about summary refs log tree commit diff
path: root/nixpkgs/nixos
diff options
context:
space:
mode:
Diffstat (limited to 'nixpkgs/nixos')
-rw-r--r--nixpkgs/nixos/COPYING18
-rw-r--r--nixpkgs/nixos/README.md110
-rw-r--r--nixpkgs/nixos/default.nix20
-rw-r--r--nixpkgs/nixos/doc/manual/README.md3
-rw-r--r--nixpkgs/nixos/doc/manual/administration/boot-problems.section.md41
-rw-r--r--nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md62
-rw-r--r--nixpkgs/nixos/doc/manual/administration/container-networking.section.md44
-rw-r--r--nixpkgs/nixos/doc/manual/administration/containers.chapter.md28
-rw-r--r--nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md59
-rw-r--r--nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md48
-rw-r--r--nixpkgs/nixos/doc/manual/administration/imperative-containers.section.md115
-rw-r--r--nixpkgs/nixos/doc/manual/administration/logging.chapter.md38
-rw-r--r--nixpkgs/nixos/doc/manual/administration/maintenance-mode.section.md11
-rw-r--r--nixpkgs/nixos/doc/manual/administration/network-problems.section.md21
-rw-r--r--nixpkgs/nixos/doc/manual/administration/rebooting.chapter.md30
-rw-r--r--nixpkgs/nixos/doc/manual/administration/rollback.section.md38
-rw-r--r--nixpkgs/nixos/doc/manual/administration/running.md14
-rw-r--r--nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md150
-rw-r--r--nixpkgs/nixos/doc/manual/administration/store-corruption.section.md28
-rw-r--r--nixpkgs/nixos/doc/manual/administration/troubleshooting.chapter.md12
-rw-r--r--nixpkgs/nixos/doc/manual/administration/user-sessions.chapter.md43
-rw-r--r--nixpkgs/nixos/doc/manual/common.nix4
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/abstractions.section.md80
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md13
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ad-hoc-packages.section.md51
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md99
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/config-file.section.md175
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/config-syntax.chapter.md18
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/configuration.md27
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md93
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md46
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md42
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/firewall.section.md32
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md238
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md35
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md42
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md96
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md180
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md77
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/modularity.section.md133
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/network-manager.section.md42
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/networking.chapter.md16
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/package-mgmt.chapter.md18
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md34
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/all-hardware.section.md11
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/base.section.md7
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/clone-config.section.md11
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/demo.section.md4
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/docker-container.section.md7
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/graphical.section.md10
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/hardened.section.md20
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/headless.section.md9
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/installation-device.section.md24
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/minimal.section.md9
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/profiles/qemu-guest.section.md7
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md51
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/ssh.section.md19
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/sshfs-file-systems.section.md104
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md102
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md91
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md27
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/wireless.section.md67
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md336
-rw-r--r--nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md57
-rw-r--r--nixpkgs/nixos/doc/manual/contributing-to-this-manual.chapter.md110
-rw-r--r--nixpkgs/nixos/doc/manual/default.nix204
-rw-r--r--nixpkgs/nixos/doc/manual/development/activation-script.section.md72
-rw-r--r--nixpkgs/nixos/doc/manual/development/assertions.section.md40
-rw-r--r--nixpkgs/nixos/doc/manual/development/bootspec.chapter.md36
-rw-r--r--nixpkgs/nixos/doc/manual/development/building-parts.chapter.md74
-rw-r--r--nixpkgs/nixos/doc/manual/development/developing-the-test-driver.chapter.md45
-rw-r--r--nixpkgs/nixos/doc/manual/development/development.md15
-rw-r--r--nixpkgs/nixos/doc/manual/development/freeform-modules.section.md78
-rw-r--r--nixpkgs/nixos/doc/manual/development/importing-modules.section.md46
-rw-r--r--nixpkgs/nixos/doc/manual/development/linking-nixos-tests-to-packages.section.md6
-rw-r--r--nixpkgs/nixos/doc/manual/development/meta-attributes.section.md68
-rw-r--r--nixpkgs/nixos/doc/manual/development/nixos-tests.chapter.md13
-rw-r--r--nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md21
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-declarations.section.md255
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-def.section.md109
-rw-r--r--nixpkgs/nixos/doc/manual/development/option-types.section.md625
-rw-r--r--nixpkgs/nixos/doc/manual/development/replace-modules.section.md71
-rw-r--r--nixpkgs/nixos/doc/manual/development/running-nixos-tests-interactively.section.md103
-rw-r--r--nixpkgs/nixos/doc/manual/development/running-nixos-tests.section.md20
-rw-r--r--nixpkgs/nixos/doc/manual/development/settings-options.section.md247
-rw-r--r--nixpkgs/nixos/doc/manual/development/sources.chapter.md77
-rw-r--r--nixpkgs/nixos/doc/manual/development/testing-installer.chapter.md18
-rw-r--r--nixpkgs/nixos/doc/manual/development/unit-handling.section.md65
-rw-r--r--nixpkgs/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md59
-rw-r--r--nixpkgs/nixos/doc/manual/development/writing-documentation.chapter.md93
-rw-r--r--nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md202
-rw-r--r--nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md279
-rw-r--r--nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md137
-rw-r--r--nixpkgs/nixos/doc/manual/installation/building-nixos.chapter.md80
-rw-r--r--nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md105
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installation.md12
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md29
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md279
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-kexec.section.md64
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-pxe.section.md32
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-usb.section.md72
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md59
-rw-r--r--nixpkgs/nixos/doc/manual/installation/installing.chapter.md612
-rw-r--r--nixpkgs/nixos/doc/manual/installation/obtaining.chapter.md23
-rw-r--r--nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md118
-rw-r--r--nixpkgs/nixos/doc/manual/manual.md56
-rw-r--r--nixpkgs/nixos/doc/manual/nixos-options.md7
-rw-r--r--nixpkgs/nixos/doc/manual/preface.md11
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/release-notes.md26
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1310.section.md3
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1404.section.md81
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1412.section.md171
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md319
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1603.section.md282
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1609.section.md73
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md303
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1709.section.md316
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1803.section.md286
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1809.section.md332
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1903.section.md214
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-1909.section.md313
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2003.section.md507
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md747
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2105.section.md428
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2111.section.md583
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md1002
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md536
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md664
-rw-r--r--nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md604
-rw-r--r--nixpkgs/nixos/doc/manual/shell.nix20
-rw-r--r--nixpkgs/nixos/lib/default.nix41
-rw-r--r--nixpkgs/nixos/lib/eval-cacheable-options.nix54
-rw-r--r--nixpkgs/nixos/lib/eval-config-minimal.nix50
-rw-r--r--nixpkgs/nixos/lib/eval-config.nix116
-rw-r--r--nixpkgs/nixos/lib/from-env.nix4
-rw-r--r--nixpkgs/nixos/lib/make-btrfs-fs.nix67
-rw-r--r--nixpkgs/nixos/lib/make-channel.nix31
-rw-r--r--nixpkgs/nixos/lib/make-disk-image.nix620
-rw-r--r--nixpkgs/nixos/lib/make-ext4-fs.nix95
-rw-r--r--nixpkgs/nixos/lib/make-iso9660-image.nix65
-rw-r--r--nixpkgs/nixos/lib/make-iso9660-image.sh130
-rw-r--r--nixpkgs/nixos/lib/make-multi-disk-zfs-image.nix330
-rw-r--r--nixpkgs/nixos/lib/make-options-doc/default.nix175
-rw-r--r--nixpkgs/nixos/lib/make-options-doc/mergeJSON.py104
-rw-r--r--nixpkgs/nixos/lib/make-single-disk-zfs-image.nix313
-rw-r--r--nixpkgs/nixos/lib/make-squashfs.nix45
-rw-r--r--nixpkgs/nixos/lib/make-system-tarball.nix56
-rw-r--r--nixpkgs/nixos/lib/make-system-tarball.sh57
-rw-r--r--nixpkgs/nixos/lib/qemu-common.nix65
-rw-r--r--nixpkgs/nixos/lib/systemd-lib.nix470
-rw-r--r--nixpkgs/nixos/lib/systemd-network-units.nix249
-rw-r--r--nixpkgs/nixos/lib/systemd-types.nix69
-rw-r--r--nixpkgs/nixos/lib/systemd-unit-options.nix735
-rw-r--r--nixpkgs/nixos/lib/test-driver/default.nix49
-rw-r--r--nixpkgs/nixos/lib/test-driver/extract-docstrings.py74
-rw-r--r--nixpkgs/nixos/lib/test-driver/nixos-test-driver-docstrings.nix13
-rw-r--r--nixpkgs/nixos/lib/test-driver/pyproject.toml44
-rw-r--r--nixpkgs/nixos/lib/test-driver/shell.nix2
-rwxr-xr-xnixpkgs/nixos/lib/test-driver/test_driver/__init__.py140
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/driver.py260
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/logger.py107
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/machine.py1296
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/polling_condition.py92
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/py.typed0
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/qmp.py98
-rw-r--r--nixpkgs/nixos/lib/test-driver/test_driver/vlan.py62
-rw-r--r--nixpkgs/nixos/lib/test-script-prepend.py42
-rw-r--r--nixpkgs/nixos/lib/testing-python.nix78
-rw-r--r--nixpkgs/nixos/lib/testing/call-test.nix12
-rw-r--r--nixpkgs/nixos/lib/testing/default.nix27
-rw-r--r--nixpkgs/nixos/lib/testing/driver.nix203
-rw-r--r--nixpkgs/nixos/lib/testing/interactive.nix45
-rw-r--r--nixpkgs/nixos/lib/testing/legacy.nix26
-rw-r--r--nixpkgs/nixos/lib/testing/meta.nix42
-rw-r--r--nixpkgs/nixos/lib/testing/name.nix14
-rw-r--r--nixpkgs/nixos/lib/testing/network.nix131
-rw-r--r--nixpkgs/nixos/lib/testing/nixos-test-base.nix23
-rw-r--r--nixpkgs/nixos/lib/testing/nodes.nix149
-rw-r--r--nixpkgs/nixos/lib/testing/pkgs.nix11
-rw-r--r--nixpkgs/nixos/lib/testing/run.nix67
-rw-r--r--nixpkgs/nixos/lib/testing/testScript.nix84
-rw-r--r--nixpkgs/nixos/lib/utils.nix236
-rw-r--r--nixpkgs/nixos/maintainers/option-usages.nix192
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore1
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/README.md42
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh36
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/common.sh7
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix10
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix33
-rw-r--r--nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix13
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh58
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/create-azure.sh8
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh22
-rw-r--r--nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix12
-rw-r--r--nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix160
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh362
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/gce/create-gce.sh35
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix31
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix20
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix31
-rw-r--r--nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl7
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/oci/create-image.sh24
-rwxr-xr-xnixpkgs/nixos/maintainers/scripts/oci/upload-image.sh100
-rw-r--r--nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix101
-rw-r--r--nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix27
-rw-r--r--nixpkgs/nixos/modules/config/appstream.nix25
-rw-r--r--nixpkgs/nixos/modules/config/console.nix251
-rw-r--r--nixpkgs/nixos/modules/config/debug-info.nix44
-rw-r--r--nixpkgs/nixos/modules/config/fanout.nix49
-rw-r--r--nixpkgs/nixos/modules/config/fonts/fontconfig.nix528
-rw-r--r--nixpkgs/nixos/modules/config/fonts/fontdir.nix67
-rw-r--r--nixpkgs/nixos/modules/config/fonts/ghostscript.nix23
-rw-r--r--nixpkgs/nixos/modules/config/fonts/packages.nix43
-rw-r--r--nixpkgs/nixos/modules/config/gtk/gtk-icon-cache.nix85
-rw-r--r--nixpkgs/nixos/modules/config/i18n.nix113
-rw-r--r--nixpkgs/nixos/modules/config/iproute2.nix27
-rw-r--r--nixpkgs/nixos/modules/config/krb5/default.nix369
-rw-r--r--nixpkgs/nixos/modules/config/ldap.nix303
-rw-r--r--nixpkgs/nixos/modules/config/locale.nix93
-rw-r--r--nixpkgs/nixos/modules/config/malloc.nix114
-rw-r--r--nixpkgs/nixos/modules/config/mysql.nix456
-rw-r--r--nixpkgs/nixos/modules/config/networking.nix237
-rw-r--r--nixpkgs/nixos/modules/config/nix-channel.nix104
-rw-r--r--nixpkgs/nixos/modules/config/nix-flakes.nix95
-rw-r--r--nixpkgs/nixos/modules/config/nix-remote-build.nix226
-rw-r--r--nixpkgs/nixos/modules/config/nix.nix379
-rw-r--r--nixpkgs/nixos/modules/config/no-x-libs.nix83
-rw-r--r--nixpkgs/nixos/modules/config/nsswitch.nix136
-rw-r--r--nixpkgs/nixos/modules/config/power-management.nix106
-rw-r--r--nixpkgs/nixos/modules/config/pulseaudio.nix332
-rw-r--r--nixpkgs/nixos/modules/config/qt.nix154
-rw-r--r--nixpkgs/nixos/modules/config/resolvconf.nix160
-rw-r--r--nixpkgs/nixos/modules/config/shells-environment.nix224
-rw-r--r--nixpkgs/nixos/modules/config/stevenblack.nix34
-rw-r--r--nixpkgs/nixos/modules/config/swap.nix304
-rw-r--r--nixpkgs/nixos/modules/config/sysctl.nix79
-rw-r--r--nixpkgs/nixos/modules/config/system-environment.nix100
-rw-r--r--nixpkgs/nixos/modules/config/system-path.nix189
-rw-r--r--nixpkgs/nixos/modules/config/terminfo.nix76
-rw-r--r--nixpkgs/nixos/modules/config/unix-odbc-drivers.nix38
-rw-r--r--nixpkgs/nixos/modules/config/update-users-groups.pl381
-rw-r--r--nixpkgs/nixos/modules/config/users-groups.nix935
-rw-r--r--nixpkgs/nixos/modules/config/vte.nix56
-rw-r--r--nixpkgs/nixos/modules/config/xdg/autostart.nix26
-rw-r--r--nixpkgs/nixos/modules/config/xdg/icons.nix48
-rw-r--r--nixpkgs/nixos/modules/config/xdg/menus.nix29
-rw-r--r--nixpkgs/nixos/modules/config/xdg/mime.nix102
-rw-r--r--nixpkgs/nixos/modules/config/xdg/portal.nix114
-rw-r--r--nixpkgs/nixos/modules/config/xdg/portals/lxqt.nix49
-rw-r--r--nixpkgs/nixos/modules/config/xdg/portals/wlr.nix67
-rw-r--r--nixpkgs/nixos/modules/config/xdg/sounds.nix30
-rw-r--r--nixpkgs/nixos/modules/config/zram.nix130
-rw-r--r--nixpkgs/nixos/modules/hardware/acpilight.nix25
-rw-r--r--nixpkgs/nixos/modules/hardware/all-firmware.nix79
-rw-r--r--nixpkgs/nixos/modules/hardware/bladeRF.nix28
-rw-r--r--nixpkgs/nixos/modules/hardware/brillo.nix21
-rw-r--r--nixpkgs/nixos/modules/hardware/ckb-next.nix53
-rw-r--r--nixpkgs/nixos/modules/hardware/corectrl.nix62
-rw-r--r--nixpkgs/nixos/modules/hardware/cpu/amd-microcode.nix29
-rw-r--r--nixpkgs/nixos/modules/hardware/cpu/amd-sev.nix80
-rw-r--r--nixpkgs/nixos/modules/hardware/cpu/intel-microcode.nix29
-rw-r--r--nixpkgs/nixos/modules/hardware/cpu/intel-sgx.nix69
-rw-r--r--nixpkgs/nixos/modules/hardware/cpu/x86-msr.nix91
-rw-r--r--nixpkgs/nixos/modules/hardware/decklink.nix16
-rw-r--r--nixpkgs/nixos/modules/hardware/device-tree.nix226
-rw-r--r--nixpkgs/nixos/modules/hardware/digitalbitbox.nix30
-rw-r--r--nixpkgs/nixos/modules/hardware/flipperzero.nix18
-rw-r--r--nixpkgs/nixos/modules/hardware/flirc.nix12
-rw-r--r--nixpkgs/nixos/modules/hardware/gkraken.nix18
-rw-r--r--nixpkgs/nixos/modules/hardware/glasgow.nix23
-rw-r--r--nixpkgs/nixos/modules/hardware/gpgsmartcards.nix37
-rw-r--r--nixpkgs/nixos/modules/hardware/hackrf.nix23
-rw-r--r--nixpkgs/nixos/modules/hardware/i2c.nix47
-rw-r--r--nixpkgs/nixos/modules/hardware/infiniband.nix58
-rw-r--r--nixpkgs/nixos/modules/hardware/keyboard/qmk.nix16
-rw-r--r--nixpkgs/nixos/modules/hardware/keyboard/teck.nix16
-rw-r--r--nixpkgs/nixos/modules/hardware/keyboard/uhk.nix22
-rw-r--r--nixpkgs/nixos/modules/hardware/keyboard/zsa.nix21
-rw-r--r--nixpkgs/nixos/modules/hardware/ksm.nix38
-rw-r--r--nixpkgs/nixos/modules/hardware/ledger.nix14
-rw-r--r--nixpkgs/nixos/modules/hardware/logitech.nix95
-rw-r--r--nixpkgs/nixos/modules/hardware/mcelog.nix35
-rw-r--r--nixpkgs/nixos/modules/hardware/network/ath-user-regd.nix31
-rw-r--r--nixpkgs/nixos/modules/hardware/network/b43.nix30
-rw-r--r--nixpkgs/nixos/modules/hardware/network/broadcom-43xx.nix3
-rw-r--r--nixpkgs/nixos/modules/hardware/network/intel-2200bg.nix30
-rw-r--r--nixpkgs/nixos/modules/hardware/network/smc-2632w/default.nix9
-rw-r--r--nixpkgs/nixos/modules/hardware/network/smc-2632w/firmware/cis/SMC2632W-v1.02.cis8
-rw-r--r--nixpkgs/nixos/modules/hardware/network/zydas-zd1211.nix5
-rw-r--r--nixpkgs/nixos/modules/hardware/new-lg4ff.nix29
-rw-r--r--nixpkgs/nixos/modules/hardware/nitrokey.nix27
-rw-r--r--nixpkgs/nixos/modules/hardware/onlykey/default.nix33
-rw-r--r--nixpkgs/nixos/modules/hardware/onlykey/onlykey.udev18
-rw-r--r--nixpkgs/nixos/modules/hardware/opengl.nix161
-rw-r--r--nixpkgs/nixos/modules/hardware/openrazer.nix146
-rw-r--r--nixpkgs/nixos/modules/hardware/opentabletdriver.nix69
-rw-r--r--nixpkgs/nixos/modules/hardware/pcmcia.nix60
-rw-r--r--nixpkgs/nixos/modules/hardware/printers.nix135
-rw-r--r--nixpkgs/nixos/modules/hardware/raid/hpsa.nix64
-rw-r--r--nixpkgs/nixos/modules/hardware/rtl-sdr.nix23
-rw-r--r--nixpkgs/nixos/modules/hardware/saleae-logic.nix25
-rw-r--r--nixpkgs/nixos/modules/hardware/sata.nix100
-rw-r--r--nixpkgs/nixos/modules/hardware/sensor/hddtemp.nix81
-rw-r--r--nixpkgs/nixos/modules/hardware/sensor/iio.nix35
-rw-r--r--nixpkgs/nixos/modules/hardware/steam-hardware.nix32
-rw-r--r--nixpkgs/nixos/modules/hardware/system-76.nix89
-rw-r--r--nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix35
-rw-r--r--nixpkgs/nixos/modules/hardware/ubertooth.nix29
-rw-r--r--nixpkgs/nixos/modules/hardware/uinput.nix19
-rw-r--r--nixpkgs/nixos/modules/hardware/usb-modeswitch.nix46
-rw-r--r--nixpkgs/nixos/modules/hardware/usb-storage.nix20
-rw-r--r--nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix68
-rw-r--r--nixpkgs/nixos/modules/hardware/video/bumblebee.nix93
-rw-r--r--nixpkgs/nixos/modules/hardware/video/capture/mwprocapture.nix56
-rw-r--r--nixpkgs/nixos/modules/hardware/video/displaylink.nix77
-rw-r--r--nixpkgs/nixos/modules/hardware/video/nvidia.nix598
-rw-r--r--nixpkgs/nixos/modules/hardware/video/radeon.nix3
-rw-r--r--nixpkgs/nixos/modules/hardware/video/switcheroo-control.nix18
-rw-r--r--nixpkgs/nixos/modules/hardware/video/uvcvideo/default.nix64
-rw-r--r--nixpkgs/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix47
-rw-r--r--nixpkgs/nixos/modules/hardware/video/webcam/facetimehd.nix52
-rw-r--r--nixpkgs/nixos/modules/hardware/video/webcam/ipu6.nix57
-rw-r--r--nixpkgs/nixos/modules/hardware/wooting.nix12
-rw-r--r--nixpkgs/nixos/modules/hardware/xone.nix23
-rw-r--r--nixpkgs/nixos/modules/hardware/xpadneo.nix30
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/default.md160
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/default.nix71
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/fcitx5.nix129
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/hime.nix14
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/ibus.nix85
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/kime.nix53
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/nabi.nix16
-rw-r--r--nixpkgs/nixos/modules/i18n/input-method/uim.nix37
-rw-r--r--nixpkgs/nixos/modules/image/amend-repart-definitions.py112
-rw-r--r--nixpkgs/nixos/modules/image/repart.nix214
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/channel.nix64
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-base.nix50
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix69
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix54
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma5.nix49
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares.nix23
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix36
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix7
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix48
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix15
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel.nix7
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix24
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/iso-image.nix906
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64-new-kernel.nix14
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64.nix14
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix14
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix14
-rw-r--r--nixpkgs/nixos/modules/installer/cd-dvd/sd-image.nix14
-rw-r--r--nixpkgs/nixos/modules/installer/netboot/netboot-base.nix17
-rw-r--r--nixpkgs/nixos/modules/installer/netboot/netboot-minimal.nix15
-rw-r--r--nixpkgs/nixos/modules/installer/netboot/netboot.nix164
-rw-r--r--nixpkgs/nixos/modules/installer/scan/detected.nix12
-rw-r--r--nixpkgs/nixos/modules/installer/scan/not-detected.nix6
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix10
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix10
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix15
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel.nix7
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64.nix83
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix10
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix52
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix49
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi-installer.nix10
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix41
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu-installer.nix10
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu.nix32
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image-x86_64.nix27
-rw-r--r--nixpkgs/nixos/modules/installer/sd-card/sd-image.nix285
-rw-r--r--nixpkgs/nixos/modules/installer/tools/get-version-suffix23
-rw-r--r--nixpkgs/nixos/modules/installer/tools/manpages/nixos-build-vms.8105
-rw-r--r--nixpkgs/nixos/modules/installer/tools/manpages/nixos-enter.872
-rw-r--r--nixpkgs/nixos/modules/installer/tools/manpages/nixos-generate-config.8165
-rw-r--r--nixpkgs/nixos/modules/installer/tools/manpages/nixos-install.8191
-rw-r--r--nixpkgs/nixos/modules/installer/tools/manpages/nixos-version.886
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix7
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix31
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh53
-rwxr-xr-xnixpkgs/nixos/modules/installer/tools/nixos-enter.sh110
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl701
-rwxr-xr-xnixpkgs/nixos/modules/installer/tools/nixos-install.sh231
-rw-r--r--nixpkgs/nixos/modules/installer/tools/nixos-version.sh31
-rw-r--r--nixpkgs/nixos/modules/installer/tools/tools.nix266
-rw-r--r--nixpkgs/nixos/modules/installer/virtualbox-demo.nix61
-rw-r--r--nixpkgs/nixos/modules/misc/assertions.nix34
-rw-r--r--nixpkgs/nixos/modules/misc/crashdump.nix76
-rw-r--r--nixpkgs/nixos/modules/misc/documentation.nix363
-rw-r--r--nixpkgs/nixos/modules/misc/documentation/test-dummy.chapter.xml0
-rw-r--r--nixpkgs/nixos/modules/misc/documentation/test.nix49
-rw-r--r--nixpkgs/nixos/modules/misc/extra-arguments.nix7
-rw-r--r--nixpkgs/nixos/modules/misc/ids.nix702
-rw-r--r--nixpkgs/nixos/modules/misc/label.nix76
-rw-r--r--nixpkgs/nixos/modules/misc/lib.nix15
-rw-r--r--nixpkgs/nixos/modules/misc/locate.nix311
-rw-r--r--nixpkgs/nixos/modules/misc/man-db.nix87
-rw-r--r--nixpkgs/nixos/modules/misc/mandoc.nix63
-rw-r--r--nixpkgs/nixos/modules/misc/meta.nix76
-rw-r--r--nixpkgs/nixos/modules/misc/nixops-autoluks.nix43
-rw-r--r--nixpkgs/nixos/modules/misc/nixpkgs.nix397
-rw-r--r--nixpkgs/nixos/modules/misc/nixpkgs/read-only.nix74
-rw-r--r--nixpkgs/nixos/modules/misc/nixpkgs/test.nix128
-rw-r--r--nixpkgs/nixos/modules/misc/passthru.nix16
-rw-r--r--nixpkgs/nixos/modules/misc/version.nix188
-rw-r--r--nixpkgs/nixos/modules/misc/wordlist.nix59
-rw-r--r--nixpkgs/nixos/modules/module-list.nix1540
-rw-r--r--nixpkgs/nixos/modules/profiles/all-hardware.nix126
-rw-r--r--nixpkgs/nixos/modules/profiles/base.nix58
-rw-r--r--nixpkgs/nixos/modules/profiles/clone-config.nix109
-rw-r--r--nixpkgs/nixos/modules/profiles/demo.nix21
-rw-r--r--nixpkgs/nixos/modules/profiles/docker-container.nix60
-rw-r--r--nixpkgs/nixos/modules/profiles/graphical.nix20
-rw-r--r--nixpkgs/nixos/modules/profiles/hardened.nix118
-rw-r--r--nixpkgs/nixos/modules/profiles/headless.nix23
-rw-r--r--nixpkgs/nixos/modules/profiles/image-based-appliance.nix26
-rw-r--r--nixpkgs/nixos/modules/profiles/installation-device.nix125
-rw-r--r--nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key7
-rw-r--r--nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub1
-rw-r--r--nixpkgs/nixos/modules/profiles/macos-builder.nix250
-rw-r--r--nixpkgs/nixos/modules/profiles/minimal.nix40
-rw-r--r--nixpkgs/nixos/modules/profiles/qemu-guest.nix17
-rw-r--r--nixpkgs/nixos/modules/programs/_1password-gui.nix65
-rw-r--r--nixpkgs/nixos/modules/programs/_1password.nix41
-rw-r--r--nixpkgs/nixos/modules/programs/adb.nix29
-rw-r--r--nixpkgs/nixos/modules/programs/appgate-sdp.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/atop.nix178
-rw-r--r--nixpkgs/nixos/modules/programs/ausweisapp.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/autojump.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/bandwhich.nix31
-rw-r--r--nixpkgs/nixos/modules/programs/bash-my-aws.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/bash/bash-completion.nix37
-rw-r--r--nixpkgs/nixos/modules/programs/bash/bash.nix217
-rw-r--r--nixpkgs/nixos/modules/programs/bash/blesh.nix16
-rw-r--r--nixpkgs/nixos/modules/programs/bash/inputrc37
-rw-r--r--nixpkgs/nixos/modules/programs/bash/ls-colors.nix20
-rw-r--r--nixpkgs/nixos/modules/programs/bash/undistract-me.nix36
-rw-r--r--nixpkgs/nixos/modules/programs/bcc.nix9
-rw-r--r--nixpkgs/nixos/modules/programs/browserpass.nix32
-rw-r--r--nixpkgs/nixos/modules/programs/calls.nix27
-rw-r--r--nixpkgs/nixos/modules/programs/captive-browser.nix156
-rw-r--r--nixpkgs/nixos/modules/programs/ccache.nix85
-rw-r--r--nixpkgs/nixos/modules/programs/cdemu.nix75
-rw-r--r--nixpkgs/nixos/modules/programs/cfs-zen-tweaks.nix34
-rw-r--r--nixpkgs/nixos/modules/programs/chromium.nix115
-rw-r--r--nixpkgs/nixos/modules/programs/clash-verge.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/cnping.nix21
-rw-r--r--nixpkgs/nixos/modules/programs/command-not-found/command-not-found.nix95
-rw-r--r--nixpkgs/nixos/modules/programs/command-not-found/command-not-found.pl77
-rw-r--r--nixpkgs/nixos/modules/programs/criu.nix27
-rw-r--r--nixpkgs/nixos/modules/programs/darling.nix21
-rw-r--r--nixpkgs/nixos/modules/programs/dconf.nix229
-rw-r--r--nixpkgs/nixos/modules/programs/digitalbitbox/default.md47
-rw-r--r--nixpkgs/nixos/modules/programs/digitalbitbox/default.nix39
-rw-r--r--nixpkgs/nixos/modules/programs/direnv.nix137
-rw-r--r--nixpkgs/nixos/modules/programs/dmrconfig.nix38
-rw-r--r--nixpkgs/nixos/modules/programs/droidcam.nix16
-rw-r--r--nixpkgs/nixos/modules/programs/ecryptfs.nix31
-rw-r--r--nixpkgs/nixos/modules/programs/environment.nix61
-rw-r--r--nixpkgs/nixos/modules/programs/evince.nix51
-rw-r--r--nixpkgs/nixos/modules/programs/extra-container.nix17
-rw-r--r--nixpkgs/nixos/modules/programs/feedbackd.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/file-roller.nix48
-rw-r--r--nixpkgs/nixos/modules/programs/firefox.nix303
-rw-r--r--nixpkgs/nixos/modules/programs/firejail.nix104
-rw-r--r--nixpkgs/nixos/modules/programs/fish.nix317
-rw-r--r--nixpkgs/nixos/modules/programs/fish_completion-generator.patch14
-rw-r--r--nixpkgs/nixos/modules/programs/flashrom.nix26
-rw-r--r--nixpkgs/nixos/modules/programs/flexoptix-app.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/freetds.nix61
-rw-r--r--nixpkgs/nixos/modules/programs/fuse.nix37
-rw-r--r--nixpkgs/nixos/modules/programs/fzf.nix32
-rw-r--r--nixpkgs/nixos/modules/programs/gamemode.nix98
-rw-r--r--nixpkgs/nixos/modules/programs/gamescope.nix85
-rw-r--r--nixpkgs/nixos/modules/programs/geary.nix24
-rw-r--r--nixpkgs/nixos/modules/programs/git.nix95
-rw-r--r--nixpkgs/nixos/modules/programs/gnome-disks.nix50
-rw-r--r--nixpkgs/nixos/modules/programs/gnome-terminal.nix38
-rw-r--r--nixpkgs/nixos/modules/programs/gnupg.nix247
-rw-r--r--nixpkgs/nixos/modules/programs/gpaste.nix36
-rw-r--r--nixpkgs/nixos/modules/programs/gphoto2.nix30
-rw-r--r--nixpkgs/nixos/modules/programs/haguichi.nix15
-rw-r--r--nixpkgs/nixos/modules/programs/hamster.nix15
-rw-r--r--nixpkgs/nixos/modules/programs/htop.nix59
-rw-r--r--nixpkgs/nixos/modules/programs/hyprland.nix80
-rw-r--r--nixpkgs/nixos/modules/programs/i3lock.nix58
-rw-r--r--nixpkgs/nixos/modules/programs/iay.nix37
-rw-r--r--nixpkgs/nixos/modules/programs/iftop.nix20
-rw-r--r--nixpkgs/nixos/modules/programs/iotop.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/java.nix79
-rw-r--r--nixpkgs/nixos/modules/programs/k3b.nix52
-rw-r--r--nixpkgs/nixos/modules/programs/k40-whisperer.nix40
-rw-r--r--nixpkgs/nixos/modules/programs/kbdlight.nix21
-rw-r--r--nixpkgs/nixos/modules/programs/kclock.nix13
-rw-r--r--nixpkgs/nixos/modules/programs/kdeconnect.nix35
-rw-r--r--nixpkgs/nixos/modules/programs/less.nix135
-rw-r--r--nixpkgs/nixos/modules/programs/liboping.nix24
-rw-r--r--nixpkgs/nixos/modules/programs/light.nix27
-rw-r--r--nixpkgs/nixos/modules/programs/mdevctl.nix18
-rw-r--r--nixpkgs/nixos/modules/programs/mepo.nix46
-rw-r--r--nixpkgs/nixos/modules/programs/mininet.nix39
-rw-r--r--nixpkgs/nixos/modules/programs/minipro.nix29
-rw-r--r--nixpkgs/nixos/modules/programs/miriway.nix78
-rw-r--r--nixpkgs/nixos/modules/programs/mosh.nix43
-rw-r--r--nixpkgs/nixos/modules/programs/msmtp.nix106
-rw-r--r--nixpkgs/nixos/modules/programs/mtr.nix41
-rw-r--r--nixpkgs/nixos/modules/programs/nano.nix48
-rw-r--r--nixpkgs/nixos/modules/programs/nbd.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/neovim.nix176
-rw-r--r--nixpkgs/nixos/modules/programs/nethoscope.nix30
-rw-r--r--nixpkgs/nixos/modules/programs/nexttrace.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/nix-index.nix62
-rw-r--r--nixpkgs/nixos/modules/programs/nix-ld.nix61
-rw-r--r--nixpkgs/nixos/modules/programs/nm-applet.nix31
-rw-r--r--nixpkgs/nixos/modules/programs/nncp.nix101
-rw-r--r--nixpkgs/nixos/modules/programs/noisetorch.nix30
-rw-r--r--nixpkgs/nixos/modules/programs/npm.nix54
-rw-r--r--nixpkgs/nixos/modules/programs/ns-usbloader.nix18
-rw-r--r--nixpkgs/nixos/modules/programs/oblogout.nix11
-rw-r--r--nixpkgs/nixos/modules/programs/oddjobd.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/openvpn3.nix45
-rw-r--r--nixpkgs/nixos/modules/programs/pantheon-tweaks.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/partition-manager.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/plotinus.md17
-rw-r--r--nixpkgs/nixos/modules/programs/plotinus.nix36
-rw-r--r--nixpkgs/nixos/modules/programs/projecteur.nix20
-rw-r--r--nixpkgs/nixos/modules/programs/proxychains.nix169
-rw-r--r--nixpkgs/nixos/modules/programs/qdmr.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/qt5ct.nix9
-rw-r--r--nixpkgs/nixos/modules/programs/regreet.nix88
-rw-r--r--nixpkgs/nixos/modules/programs/rog-control-center.nix29
-rw-r--r--nixpkgs/nixos/modules/programs/rust-motd.nix149
-rw-r--r--nixpkgs/nixos/modules/programs/screen.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/seahorse.nix46
-rw-r--r--nixpkgs/nixos/modules/programs/sedutil.nix18
-rw-r--r--nixpkgs/nixos/modules/programs/shadow.nix239
-rw-r--r--nixpkgs/nixos/modules/programs/sharing.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/singularity.nix92
-rw-r--r--nixpkgs/nixos/modules/programs/skim.nix34
-rw-r--r--nixpkgs/nixos/modules/programs/slock.nix31
-rw-r--r--nixpkgs/nixos/modules/programs/sniffnet.nix24
-rw-r--r--nixpkgs/nixos/modules/programs/spacefm.nix55
-rw-r--r--nixpkgs/nixos/modules/programs/ssh.nix357
-rw-r--r--nixpkgs/nixos/modules/programs/starship.nix68
-rw-r--r--nixpkgs/nixos/modules/programs/steam.nix155
-rw-r--r--nixpkgs/nixos/modules/programs/streamdeck-ui.nix34
-rw-r--r--nixpkgs/nixos/modules/programs/sysdig.nix14
-rw-r--r--nixpkgs/nixos/modules/programs/system-config-printer.nix32
-rw-r--r--nixpkgs/nixos/modules/programs/systemtap.nix29
-rw-r--r--nixpkgs/nixos/modules/programs/thefuck.nix40
-rw-r--r--nixpkgs/nixos/modules/programs/thunar.nix45
-rw-r--r--nixpkgs/nixos/modules/programs/tmux.nix233
-rw-r--r--nixpkgs/nixos/modules/programs/traceroute.nix28
-rw-r--r--nixpkgs/nixos/modules/programs/trippy.nix24
-rw-r--r--nixpkgs/nixos/modules/programs/tsm-client.nix287
-rw-r--r--nixpkgs/nixos/modules/programs/turbovnc.nix54
-rw-r--r--nixpkgs/nixos/modules/programs/udevil.nix19
-rw-r--r--nixpkgs/nixos/modules/programs/usbtop.nix21
-rw-r--r--nixpkgs/nixos/modules/programs/vim.nix33
-rw-r--r--nixpkgs/nixos/modules/programs/virt-manager.nix16
-rw-r--r--nixpkgs/nixos/modules/programs/virtualbox.nix8
-rw-r--r--nixpkgs/nixos/modules/programs/wavemon.nix30
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/cardboard.nix24
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/river.nix59
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/sway.nix158
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/waybar.nix25
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/wayfire.nix48
-rw-r--r--nixpkgs/nixos/modules/programs/wayland/wayland-session.nix23
-rw-r--r--nixpkgs/nixos/modules/programs/weylus.nix47
-rw-r--r--nixpkgs/nixos/modules/programs/wireshark.nix42
-rw-r--r--nixpkgs/nixos/modules/programs/wshowkeys.nix27
-rw-r--r--nixpkgs/nixos/modules/programs/xastir.nix23
-rw-r--r--nixpkgs/nixos/modules/programs/xfconf.nix27
-rw-r--r--nixpkgs/nixos/modules/programs/xfs_quota.nix110
-rw-r--r--nixpkgs/nixos/modules/programs/xonsh.nix85
-rw-r--r--nixpkgs/nixos/modules/programs/xss-lock.nix45
-rw-r--r--nixpkgs/nixos/modules/programs/xwayland.nix50
-rw-r--r--nixpkgs/nixos/modules/programs/yabar.nix163
-rw-r--r--nixpkgs/nixos/modules/programs/yazi.nix53
-rw-r--r--nixpkgs/nixos/modules/programs/yubikey-touch-detector.nix21
-rw-r--r--nixpkgs/nixos/modules/programs/zmap.nix18
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md109
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.nix146
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zinputrc42
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh-autoenv.nix28
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh-autosuggestions.nix73
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh-syntax-highlighting.nix108
-rw-r--r--nixpkgs/nixos/modules/programs/zsh/zsh.nix319
-rw-r--r--nixpkgs/nixos/modules/rename.nix133
-rw-r--r--nixpkgs/nixos/modules/security/acme/default.md354
-rw-r--r--nixpkgs/nixos/modules/security/acme/default.nix1035
-rw-r--r--nixpkgs/nixos/modules/security/acme/mk-cert-ownership-assertion.nix4
-rw-r--r--nixpkgs/nixos/modules/security/apparmor.nix216
-rw-r--r--nixpkgs/nixos/modules/security/apparmor/includes.nix322
-rw-r--r--nixpkgs/nixos/modules/security/apparmor/profiles.nix5
-rw-r--r--nixpkgs/nixos/modules/security/audit.nix123
-rw-r--r--nixpkgs/nixos/modules/security/auditd.nix31
-rw-r--r--nixpkgs/nixos/modules/security/ca.nix93
-rw-r--r--nixpkgs/nixos/modules/security/chromium-suid-sandbox.nix38
-rw-r--r--nixpkgs/nixos/modules/security/dhparams.nix185
-rw-r--r--nixpkgs/nixos/modules/security/doas.nix292
-rw-r--r--nixpkgs/nixos/modules/security/duosec.nix249
-rw-r--r--nixpkgs/nixos/modules/security/google_oslogin.nix75
-rw-r--r--nixpkgs/nixos/modules/security/ipa.nix258
-rw-r--r--nixpkgs/nixos/modules/security/lock-kernel-modules.nix57
-rw-r--r--nixpkgs/nixos/modules/security/misc.nix140
-rw-r--r--nixpkgs/nixos/modules/security/oath.nix50
-rw-r--r--nixpkgs/nixos/modules/security/pam.nix1540
-rw-r--r--nixpkgs/nixos/modules/security/pam_mount.nix185
-rw-r--r--nixpkgs/nixos/modules/security/pam_usb.nix51
-rw-r--r--nixpkgs/nixos/modules/security/please.nix122
-rw-r--r--nixpkgs/nixos/modules/security/polkit.nix119
-rw-r--r--nixpkgs/nixos/modules/security/rngd.nix16
-rw-r--r--nixpkgs/nixos/modules/security/rtkit.nix47
-rw-r--r--nixpkgs/nixos/modules/security/sudo-rs.nix296
-rw-r--r--nixpkgs/nixos/modules/security/sudo.nix280
-rw-r--r--nixpkgs/nixos/modules/security/systemd-confinement.nix197
-rw-r--r--nixpkgs/nixos/modules/security/tpm2.nix174
-rw-r--r--nixpkgs/nixos/modules/security/wrappers/default.nix339
-rw-r--r--nixpkgs/nixos/modules/security/wrappers/wrapper.c211
-rw-r--r--nixpkgs/nixos/modules/security/wrappers/wrapper.nix20
-rw-r--r--nixpkgs/nixos/modules/services/admin/meshcentral.nix51
-rw-r--r--nixpkgs/nixos/modules/services/admin/oxidized.nix118
-rw-r--r--nixpkgs/nixos/modules/services/admin/pgadmin.nix187
-rw-r--r--nixpkgs/nixos/modules/services/admin/salt/master.nix63
-rw-r--r--nixpkgs/nixos/modules/services/admin/salt/minion.nix67
-rw-r--r--nixpkgs/nixos/modules/services/amqp/activemq/ActiveMQBroker.java19
-rw-r--r--nixpkgs/nixos/modules/services/amqp/activemq/default.nix134
-rw-r--r--nixpkgs/nixos/modules/services/amqp/rabbitmq.nix228
-rw-r--r--nixpkgs/nixos/modules/services/audio/alsa.nix133
-rw-r--r--nixpkgs/nixos/modules/services/audio/botamusique.nix114
-rw-r--r--nixpkgs/nixos/modules/services/audio/castopod.md22
-rw-r--r--nixpkgs/nixos/modules/services/audio/castopod.nix287
-rw-r--r--nixpkgs/nixos/modules/services/audio/gmediarender.nix116
-rw-r--r--nixpkgs/nixos/modules/services/audio/gonic.nix90
-rw-r--r--nixpkgs/nixos/modules/services/audio/goxlr-utility.nix48
-rw-r--r--nixpkgs/nixos/modules/services/audio/hqplayerd.nix139
-rw-r--r--nixpkgs/nixos/modules/services/audio/icecast.nix131
-rw-r--r--nixpkgs/nixos/modules/services/audio/jack.nix294
-rw-r--r--nixpkgs/nixos/modules/services/audio/jmusicbot.nix48
-rw-r--r--nixpkgs/nixos/modules/services/audio/liquidsoap.nix72
-rw-r--r--nixpkgs/nixos/modules/services/audio/mopidy.nix108
-rw-r--r--nixpkgs/nixos/modules/services/audio/mpd.nix266
-rw-r--r--nixpkgs/nixos/modules/services/audio/mpdscribble.nix213
-rw-r--r--nixpkgs/nixos/modules/services/audio/navidrome.nix83
-rw-r--r--nixpkgs/nixos/modules/services/audio/networkaudiod.nix19
-rw-r--r--nixpkgs/nixos/modules/services/audio/roon-bridge.nix80
-rw-r--r--nixpkgs/nixos/modules/services/audio/roon-server.nix86
-rw-r--r--nixpkgs/nixos/modules/services/audio/slimserver.nix73
-rw-r--r--nixpkgs/nixos/modules/services/audio/snapserver.nix316
-rw-r--r--nixpkgs/nixos/modules/services/audio/spotifyd.nix68
-rw-r--r--nixpkgs/nixos/modules/services/audio/squeezelite.nix46
-rw-r--r--nixpkgs/nixos/modules/services/audio/tts.nix152
-rw-r--r--nixpkgs/nixos/modules/services/audio/wyoming/faster-whisper.nix190
-rw-r--r--nixpkgs/nixos/modules/services/audio/wyoming/openwakeword.nix163
-rw-r--r--nixpkgs/nixos/modules/services/audio/wyoming/piper.nix174
-rw-r--r--nixpkgs/nixos/modules/services/audio/ympd.nix95
-rw-r--r--nixpkgs/nixos/modules/services/backup/automysqlbackup.nix134
-rw-r--r--nixpkgs/nixos/modules/services/backup/bacula.nix578
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgbackup.md163
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgbackup.nix777
-rw-r--r--nixpkgs/nixos/modules/services/backup/borgmatic.nix104
-rw-r--r--nixpkgs/nixos/modules/services/backup/btrbk.nix273
-rw-r--r--nixpkgs/nixos/modules/services/backup/duplicati.nix87
-rw-r--r--nixpkgs/nixos/modules/services/backup/duplicity.nix190
-rw-r--r--nixpkgs/nixos/modules/services/backup/mysql-backup.nix130
-rw-r--r--nixpkgs/nixos/modules/services/backup/postgresql-backup.nix181
-rw-r--r--nixpkgs/nixos/modules/services/backup/postgresql-wal-receiver.nix204
-rw-r--r--nixpkgs/nixos/modules/services/backup/restic-rest-server.nix111
-rw-r--r--nixpkgs/nixos/modules/services/backup/restic.nix402
-rw-r--r--nixpkgs/nixos/modules/services/backup/rsnapshot.nix75
-rw-r--r--nixpkgs/nixos/modules/services/backup/sanoid.nix205
-rw-r--r--nixpkgs/nixos/modules/services/backup/syncoid.nix424
-rw-r--r--nixpkgs/nixos/modules/services/backup/tarsnap.nix409
-rw-r--r--nixpkgs/nixos/modules/services/backup/tsm.nix125
-rw-r--r--nixpkgs/nixos/modules/services/backup/zfs-replication.nix90
-rw-r--r--nixpkgs/nixos/modules/services/backup/znapzend.nix469
-rw-r--r--nixpkgs/nixos/modules/services/backup/zrepl.nix63
-rw-r--r--nixpkgs/nixos/modules/services/blockchain/ethereum/erigon.nix122
-rw-r--r--nixpkgs/nixos/modules/services/blockchain/ethereum/geth.nix213
-rw-r--r--nixpkgs/nixos/modules/services/blockchain/ethereum/lighthouse.nix315
-rw-r--r--nixpkgs/nixos/modules/services/cluster/corosync/default.nix112
-rw-r--r--nixpkgs/nixos/modules/services/cluster/hadoop/conf.nix45
-rw-r--r--nixpkgs/nixos/modules/services/cluster/hadoop/default.nix223
-rw-r--r--nixpkgs/nixos/modules/services/cluster/hadoop/hbase.nix218
-rw-r--r--nixpkgs/nixos/modules/services/cluster/hadoop/hdfs.nix204
-rw-r--r--nixpkgs/nixos/modules/services/cluster/hadoop/yarn.nix200
-rw-r--r--nixpkgs/nixos/modules/services/cluster/k3s/default.nix181
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix171
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix373
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix487
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix169
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix315
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix99
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix387
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix406
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix102
-rw-r--r--nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix101
-rw-r--r--nixpkgs/nixos/modules/services/cluster/pacemaker/default.nix52
-rw-r--r--nixpkgs/nixos/modules/services/cluster/patroni/default.nix265
-rw-r--r--nixpkgs/nixos/modules/services/cluster/spark/default.nix162
-rw-r--r--nixpkgs/nixos/modules/services/computing/boinc/client.nix119
-rw-r--r--nixpkgs/nixos/modules/services/computing/foldingathome/client.nix91
-rw-r--r--nixpkgs/nixos/modules/services/computing/slurm/slurm.nix442
-rw-r--r--nixpkgs/nixos/modules/services/computing/torque/mom.nix63
-rw-r--r--nixpkgs/nixos/modules/services/computing/torque/server.nix96
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix313
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix199
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/buildkite-agents.nix219
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/gitea-actions-runner.nix257
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix25
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix219
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix268
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix58
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/gitlab-runner.nix616
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/gocd-agent/default.nix218
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/gocd-server/default.nix216
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/common.nix118
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/default.nix110
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/settings.nix153
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix506
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/jenkins/default.nix247
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/jenkins/job-builder.nix248
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix82
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agents.nix167
-rw-r--r--nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix98
-rw-r--r--nixpkgs/nixos/modules/services/databases/aerospike.nix156
-rw-r--r--nixpkgs/nixos/modules/services/databases/cassandra.nix585
-rw-r--r--nixpkgs/nixos/modules/services/databases/clickhouse.nix85
-rw-r--r--nixpkgs/nixos/modules/services/databases/cockroachdb.nix225
-rw-r--r--nixpkgs/nixos/modules/services/databases/couchdb.nix225
-rw-r--r--nixpkgs/nixos/modules/services/databases/dgraph.nix148
-rw-r--r--nixpkgs/nixos/modules/services/databases/dragonflydb.nix152
-rw-r--r--nixpkgs/nixos/modules/services/databases/ferretdb.nix79
-rw-r--r--nixpkgs/nixos/modules/services/databases/firebird.nix168
-rw-r--r--nixpkgs/nixos/modules/services/databases/foundationdb.md309
-rw-r--r--nixpkgs/nixos/modules/services/databases/foundationdb.nix429
-rw-r--r--nixpkgs/nixos/modules/services/databases/hbase-standalone.nix148
-rw-r--r--nixpkgs/nixos/modules/services/databases/influxdb.nix195
-rw-r--r--nixpkgs/nixos/modules/services/databases/influxdb2.nix497
-rw-r--r--nixpkgs/nixos/modules/services/databases/lldap.nix121
-rw-r--r--nixpkgs/nixos/modules/services/databases/memcached.nix118
-rw-r--r--nixpkgs/nixos/modules/services/databases/monetdb.nix100
-rw-r--r--nixpkgs/nixos/modules/services/databases/mongodb.nix197
-rw-r--r--nixpkgs/nixos/modules/services/databases/mysql.nix521
-rw-r--r--nixpkgs/nixos/modules/services/databases/neo4j.nix641
-rw-r--r--nixpkgs/nixos/modules/services/databases/openldap.nix342
-rw-r--r--nixpkgs/nixos/modules/services/databases/opentsdb.nix102
-rw-r--r--nixpkgs/nixos/modules/services/databases/pgbouncer.nix632
-rw-r--r--nixpkgs/nixos/modules/services/databases/pgmanage.nix207
-rw-r--r--nixpkgs/nixos/modules/services/databases/postgresql.md329
-rw-r--r--nixpkgs/nixos/modules/services/databases/postgresql.nix650
-rw-r--r--nixpkgs/nixos/modules/services/databases/redis.nix412
-rw-r--r--nixpkgs/nixos/modules/services/databases/rethinkdb.nix108
-rw-r--r--nixpkgs/nixos/modules/services/databases/surrealdb.nix98
-rw-r--r--nixpkgs/nixos/modules/services/databases/victoriametrics.nix78
-rw-r--r--nixpkgs/nixos/modules/services/desktops/accountsservice.nix58
-rw-r--r--nixpkgs/nixos/modules/services/desktops/bamf.nix27
-rw-r--r--nixpkgs/nixos/modules/services/desktops/blueman.nix25
-rw-r--r--nixpkgs/nixos/modules/services/desktops/cpupower-gui.nix56
-rw-r--r--nixpkgs/nixos/modules/services/desktops/deepin/app-services.nix36
-rw-r--r--nixpkgs/nixos/modules/services/desktops/deepin/dde-api.nix50
-rw-r--r--nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix40
-rw-r--r--nixpkgs/nixos/modules/services/desktops/dleyna-renderer.nix28
-rw-r--r--nixpkgs/nixos/modules/services/desktops/dleyna-server.nix28
-rw-r--r--nixpkgs/nixos/modules/services/desktops/espanso.nix24
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.md39
-rw-r--r--nixpkgs/nixos/modules/services/desktops/flatpak.nix56
-rw-r--r--nixpkgs/nixos/modules/services/desktops/geoclue2.nix272
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/at-spi2-core.nix60
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/evolution-data-server.nix71
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/glib-networking.nix45
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix47
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-initial-setup.nix98
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-keyring.nix63
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-accounts.nix51
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-miners.nix51
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-remote-desktop.nix32
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-settings-daemon.nix70
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/gnome-user-share.nix48
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/rygel.nix44
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/sushi.nix50
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/tracker-miners.nix54
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gnome/tracker.nix76
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gsignond.nix45
-rw-r--r--nixpkgs/nixos/modules/services/desktops/gvfs.nix66
-rw-r--r--nixpkgs/nixos/modules/services/desktops/malcontent.nix40
-rw-r--r--nixpkgs/nixos/modules/services/desktops/neard.nix23
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix183
-rw-r--r--nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix73
-rw-r--r--nixpkgs/nixos/modules/services/desktops/profile-sync-daemon.nix77
-rw-r--r--nixpkgs/nixos/modules/services/desktops/system-config-printer.nix42
-rw-r--r--nixpkgs/nixos/modules/services/desktops/system76-scheduler.nix296
-rw-r--r--nixpkgs/nixos/modules/services/desktops/telepathy.nix48
-rw-r--r--nixpkgs/nixos/modules/services/desktops/tumbler.nix52
-rw-r--r--nixpkgs/nixos/modules/services/desktops/zeitgeist.nix31
-rw-r--r--nixpkgs/nixos/modules/services/development/blackfire.md39
-rw-r--r--nixpkgs/nixos/modules/services/development/blackfire.nix60
-rw-r--r--nixpkgs/nixos/modules/services/development/bloop.nix54
-rw-r--r--nixpkgs/nixos/modules/services/development/distccd.nix155
-rw-r--r--nixpkgs/nixos/modules/services/development/gemstash.nix103
-rw-r--r--nixpkgs/nixos/modules/services/development/hoogle.nix81
-rw-r--r--nixpkgs/nixos/modules/services/development/jupyter/default.nix206
-rw-r--r--nixpkgs/nixos/modules/services/development/jupyter/kernel-options.nix80
-rw-r--r--nixpkgs/nixos/modules/services/development/jupyterhub/default.nix202
-rw-r--r--nixpkgs/nixos/modules/services/development/livebook.md39
-rw-r--r--nixpkgs/nixos/modules/services/development/livebook.nix90
-rw-r--r--nixpkgs/nixos/modules/services/development/lorri.nix55
-rw-r--r--nixpkgs/nixos/modules/services/development/rstudio-server/default.nix107
-rw-r--r--nixpkgs/nixos/modules/services/development/zammad.nix323
-rw-r--r--nixpkgs/nixos/modules/services/display-managers/greetd.nix114
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.md406
-rw-r--r--nixpkgs/nixos/modules/services/editors/emacs.nix118
-rw-r--r--nixpkgs/nixos/modules/services/editors/haste.nix86
-rw-r--r--nixpkgs/nixos/modules/services/editors/infinoted.nix160
-rw-r--r--nixpkgs/nixos/modules/services/finance/odoo.nix128
-rw-r--r--nixpkgs/nixos/modules/services/games/asf.nix271
-rw-r--r--nixpkgs/nixos/modules/services/games/crossfire-server.nix179
-rw-r--r--nixpkgs/nixos/modules/services/games/deliantra-server.nix172
-rw-r--r--nixpkgs/nixos/modules/services/games/factorio.nix299
-rw-r--r--nixpkgs/nixos/modules/services/games/freeciv.nix187
-rw-r--r--nixpkgs/nixos/modules/services/games/mchprs.nix341
-rw-r--r--nixpkgs/nixos/modules/services/games/minecraft-server.nix285
-rw-r--r--nixpkgs/nixos/modules/services/games/minetest-server.nix162
-rw-r--r--nixpkgs/nixos/modules/services/games/openarena.nix56
-rw-r--r--nixpkgs/nixos/modules/services/games/quake3-server.nix116
-rw-r--r--nixpkgs/nixos/modules/services/games/teeworlds.nix119
-rw-r--r--nixpkgs/nixos/modules/services/games/terraria.nix169
-rw-r--r--nixpkgs/nixos/modules/services/games/xonotic.nix198
-rw-r--r--nixpkgs/nixos/modules/services/hardware/acpid.nix155
-rw-r--r--nixpkgs/nixos/modules/services/hardware/actkbd.nix133
-rw-r--r--nixpkgs/nixos/modules/services/hardware/argonone.nix58
-rw-r--r--nixpkgs/nixos/modules/services/hardware/asusd.nix104
-rw-r--r--nixpkgs/nixos/modules/services/hardware/auto-cpufreq.nix51
-rw-r--r--nixpkgs/nixos/modules/services/hardware/bluetooth.nix162
-rw-r--r--nixpkgs/nixos/modules/services/hardware/bolt.nix34
-rw-r--r--nixpkgs/nixos/modules/services/hardware/brltty.nix57
-rw-r--r--nixpkgs/nixos/modules/services/hardware/ddccontrol.nix39
-rw-r--r--nixpkgs/nixos/modules/services/hardware/evscript.nix51
-rw-r--r--nixpkgs/nixos/modules/services/hardware/fancontrol.nix55
-rw-r--r--nixpkgs/nixos/modules/services/hardware/freefall.nix64
-rw-r--r--nixpkgs/nixos/modules/services/hardware/fwupd.nix210
-rw-r--r--nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix66
-rw-r--r--nixpkgs/nixos/modules/services/hardware/illum.nix36
-rw-r--r--nixpkgs/nixos/modules/services/hardware/interception-tools.nix62
-rw-r--r--nixpkgs/nixos/modules/services/hardware/iptsd.nix53
-rw-r--r--nixpkgs/nixos/modules/services/hardware/irqbalance.nix24
-rw-r--r--nixpkgs/nixos/modules/services/hardware/joycond.nix33
-rw-r--r--nixpkgs/nixos/modules/services/hardware/kanata.nix185
-rw-r--r--nixpkgs/nixos/modules/services/hardware/keyd.nix182
-rw-r--r--nixpkgs/nixos/modules/services/hardware/lcd.nix168
-rw-r--r--nixpkgs/nixos/modules/services/hardware/lirc.nix100
-rw-r--r--nixpkgs/nixos/modules/services/hardware/nvidia-optimus.nix43
-rw-r--r--nixpkgs/nixos/modules/services/hardware/openrgb.nix60
-rw-r--r--nixpkgs/nixos/modules/services/hardware/pcscd.nix77
-rw-r--r--nixpkgs/nixos/modules/services/hardware/pommed.nix50
-rw-r--r--nixpkgs/nixos/modules/services/hardware/power-profiles-daemon.nix55
-rw-r--r--nixpkgs/nixos/modules/services/hardware/rasdaemon.nix170
-rw-r--r--nixpkgs/nixos/modules/services/hardware/ratbagd.nix27
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane.nix211
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix112
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4_etc_files.nix69
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix110
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix77
-rw-r--r--nixpkgs/nixos/modules/services/hardware/sane_extra_backends/dsseries.nix26
-rw-r--r--nixpkgs/nixos/modules/services/hardware/spacenavd.nix24
-rw-r--r--nixpkgs/nixos/modules/services/hardware/supergfxd.nix42
-rw-r--r--nixpkgs/nixos/modules/services/hardware/tcsd.nix162
-rw-r--r--nixpkgs/nixos/modules/services/hardware/thermald.nix57
-rw-r--r--nixpkgs/nixos/modules/services/hardware/thinkfan.nix230
-rw-r--r--nixpkgs/nixos/modules/services/hardware/throttled.nix36
-rw-r--r--nixpkgs/nixos/modules/services/hardware/tlp.nix124
-rw-r--r--nixpkgs/nixos/modules/services/hardware/trezord.md17
-rw-r--r--nixpkgs/nixos/modules/services/hardware/trezord.nix70
-rw-r--r--nixpkgs/nixos/modules/services/hardware/triggerhappy.nix122
-rw-r--r--nixpkgs/nixos/modules/services/hardware/tuxedo-rs.nix49
-rw-r--r--nixpkgs/nixos/modules/services/hardware/udev.nix443
-rw-r--r--nixpkgs/nixos/modules/services/hardware/udisks2.nix101
-rw-r--r--nixpkgs/nixos/modules/services/hardware/undervolt.nix190
-rw-r--r--nixpkgs/nixos/modules/services/hardware/upower.nix237
-rw-r--r--nixpkgs/nixos/modules/services/hardware/usbmuxd.nix86
-rw-r--r--nixpkgs/nixos/modules/services/hardware/usbrelayd.nix43
-rw-r--r--nixpkgs/nixos/modules/services/hardware/vdr.nix82
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/ebusd.nix270
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/esphome.nix136
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/evcc.nix96
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/home-assistant.nix695
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/homeassistant-satellite.nix225
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/zigbee2mqtt.nix142
-rw-r--r--nixpkgs/nixos/modules/services/home-automation/zwave-js.nix152
-rw-r--r--nixpkgs/nixos/modules/services/logging/SystemdJournal2Gelf.nix60
-rw-r--r--nixpkgs/nixos/modules/services/logging/awstats.nix255
-rw-r--r--nixpkgs/nixos/modules/services/logging/filebeat.nix252
-rw-r--r--nixpkgs/nixos/modules/services/logging/fluentd.nix54
-rw-r--r--nixpkgs/nixos/modules/services/logging/graylog.nix169
-rw-r--r--nixpkgs/nixos/modules/services/logging/heartbeat.nix84
-rw-r--r--nixpkgs/nixos/modules/services/logging/journalbeat.nix94
-rw-r--r--nixpkgs/nixos/modules/services/logging/journaldriver.nix112
-rw-r--r--nixpkgs/nixos/modules/services/logging/journalwatch.nix265
-rw-r--r--nixpkgs/nixos/modules/services/logging/klogd.nix9
-rw-r--r--nixpkgs/nixos/modules/services/logging/logcheck.nix236
-rw-r--r--nixpkgs/nixos/modules/services/logging/logrotate.nix253
-rw-r--r--nixpkgs/nixos/modules/services/logging/logstash.nix194
-rw-r--r--nixpkgs/nixos/modules/services/logging/promtail.nix91
-rw-r--r--nixpkgs/nixos/modules/services/logging/rsyslogd.nix105
-rw-r--r--nixpkgs/nixos/modules/services/logging/syslog-ng.nix98
-rw-r--r--nixpkgs/nixos/modules/services/logging/syslogd.nix130
-rw-r--r--nixpkgs/nixos/modules/services/logging/ulogd.nix63
-rw-r--r--nixpkgs/nixos/modules/services/logging/vector.nix63
-rw-r--r--nixpkgs/nixos/modules/services/mail/clamsmtp.nix181
-rw-r--r--nixpkgs/nixos/modules/services/mail/davmail.nix126
-rw-r--r--nixpkgs/nixos/modules/services/mail/dkimproxy-out.nix120
-rw-r--r--nixpkgs/nixos/modules/services/mail/dovecot.nix462
-rw-r--r--nixpkgs/nixos/modules/services/mail/dspam.nix150
-rw-r--r--nixpkgs/nixos/modules/services/mail/exim.nix133
-rw-r--r--nixpkgs/nixos/modules/services/mail/goeland.nix74
-rw-r--r--nixpkgs/nixos/modules/services/mail/listmonk.nix222
-rw-r--r--nixpkgs/nixos/modules/services/mail/maddy.nix464
-rw-r--r--nixpkgs/nixos/modules/services/mail/mail.nix34
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailcatcher.nix68
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailhog.nix82
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.md82
-rw-r--r--nixpkgs/nixos/modules/services/mail/mailman.nix652
-rw-r--r--nixpkgs/nixos/modules/services/mail/mlmmj.nix173
-rw-r--r--nixpkgs/nixos/modules/services/mail/nullmailer.nix246
-rw-r--r--nixpkgs/nixos/modules/services/mail/offlineimap.nix72
-rw-r--r--nixpkgs/nixos/modules/services/mail/opendkim.nix167
-rw-r--r--nixpkgs/nixos/modules/services/mail/opensmtpd.nix135
-rw-r--r--nixpkgs/nixos/modules/services/mail/pfix-srsd.nix56
-rw-r--r--nixpkgs/nixos/modules/services/mail/postfix.nix993
-rw-r--r--nixpkgs/nixos/modules/services/mail/postfixadmin.nix199
-rw-r--r--nixpkgs/nixos/modules/services/mail/postgrey.nix205
-rw-r--r--nixpkgs/nixos/modules/services/mail/postsrsd.nix135
-rw-r--r--nixpkgs/nixos/modules/services/mail/public-inbox.nix596
-rw-r--r--nixpkgs/nixos/modules/services/mail/roundcube.nix275
-rw-r--r--nixpkgs/nixos/modules/services/mail/rspamd.nix446
-rw-r--r--nixpkgs/nixos/modules/services/mail/rss2email.nix137
-rw-r--r--nixpkgs/nixos/modules/services/mail/schleuder.nix162
-rw-r--r--nixpkgs/nixos/modules/services/mail/spamassassin.nix194
-rw-r--r--nixpkgs/nixos/modules/services/mail/stalwart-mail.nix106
-rw-r--r--nixpkgs/nixos/modules/services/mail/sympa.nix588
-rw-r--r--nixpkgs/nixos/modules/services/mail/zeyple.nix125
-rw-r--r--nixpkgs/nixos/modules/services/matrix/appservice-discord.nix162
-rw-r--r--nixpkgs/nixos/modules/services/matrix/appservice-irc.nix236
-rw-r--r--nixpkgs/nixos/modules/services/matrix/conduit.nix160
-rw-r--r--nixpkgs/nixos/modules/services/matrix/dendrite.nix323
-rw-r--r--nixpkgs/nixos/modules/services/matrix/matrix-sliding-sync.nix101
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mautrix-facebook.nix200
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mautrix-telegram.nix196
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mautrix-whatsapp.nix205
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mjolnir.md110
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mjolnir.nix242
-rw-r--r--nixpkgs/nixos/modules/services/matrix/mx-puppet-discord.nix122
-rw-r--r--nixpkgs/nixos/modules/services/matrix/pantalaimon-options.nix70
-rw-r--r--nixpkgs/nixos/modules/services/matrix/pantalaimon.nix70
-rw-r--r--nixpkgs/nixos/modules/services/matrix/synapse.md219
-rw-r--r--nixpkgs/nixos/modules/services/matrix/synapse.nix1188
-rw-r--r--nixpkgs/nixos/modules/services/misc/airsonic.nix179
-rw-r--r--nixpkgs/nixos/modules/services/misc/amazon-ssm-agent.nix81
-rw-r--r--nixpkgs/nixos/modules/services/misc/ananicy.nix148
-rw-r--r--nixpkgs/nixos/modules/services/misc/ankisyncd.nix71
-rw-r--r--nixpkgs/nixos/modules/services/misc/apache-kafka.nix151
-rw-r--r--nixpkgs/nixos/modules/services/misc/atuin.nix143
-rw-r--r--nixpkgs/nixos/modules/services/misc/autofs.nix100
-rw-r--r--nixpkgs/nixos/modules/services/misc/autorandr.nix365
-rw-r--r--nixpkgs/nixos/modules/services/misc/autosuspend.nix230
-rw-r--r--nixpkgs/nixos/modules/services/misc/bazarr.nix77
-rw-r--r--nixpkgs/nixos/modules/services/misc/bcg.nix175
-rw-r--r--nixpkgs/nixos/modules/services/misc/beanstalkd.nix63
-rw-r--r--nixpkgs/nixos/modules/services/misc/bees.nix129
-rw-r--r--nixpkgs/nixos/modules/services/misc/bepasty.nix179
-rw-r--r--nixpkgs/nixos/modules/services/misc/calibre-server.nix146
-rw-r--r--nixpkgs/nixos/modules/services/misc/canto-daemon.nix37
-rw-r--r--nixpkgs/nixos/modules/services/misc/cfdyndns.nix81
-rw-r--r--nixpkgs/nixos/modules/services/misc/cgminer.nix148
-rw-r--r--nixpkgs/nixos/modules/services/misc/clipcat.nix31
-rw-r--r--nixpkgs/nixos/modules/services/misc/clipmenu.nix31
-rw-r--r--nixpkgs/nixos/modules/services/misc/confd.nix90
-rw-r--r--nixpkgs/nixos/modules/services/misc/cpuminer-cryptonight.nix66
-rw-r--r--nixpkgs/nixos/modules/services/misc/devmon.nix25
-rw-r--r--nixpkgs/nixos/modules/services/misc/dictd.nix69
-rw-r--r--nixpkgs/nixos/modules/services/misc/disnix.nix98
-rw-r--r--nixpkgs/nixos/modules/services/misc/docker-registry.nix162
-rw-r--r--nixpkgs/nixos/modules/services/misc/domoticz.nix51
-rw-r--r--nixpkgs/nixos/modules/services/misc/duckling.nix39
-rw-r--r--nixpkgs/nixos/modules/services/misc/dwm-status.nix73
-rw-r--r--nixpkgs/nixos/modules/services/misc/dysnomia.nix265
-rw-r--r--nixpkgs/nixos/modules/services/misc/errbot.nix104
-rw-r--r--nixpkgs/nixos/modules/services/misc/etcd.nix208
-rw-r--r--nixpkgs/nixos/modules/services/misc/etebase-server.nix226
-rw-r--r--nixpkgs/nixos/modules/services/misc/etesync-dav.nix92
-rw-r--r--nixpkgs/nixos/modules/services/misc/evdevremapkeys.nix59
-rw-r--r--nixpkgs/nixos/modules/services/misc/felix.nix104
-rw-r--r--nixpkgs/nixos/modules/services/misc/forgejo.md79
-rw-r--r--nixpkgs/nixos/modules/services/misc/forgejo.nix679
-rw-r--r--nixpkgs/nixos/modules/services/misc/freeswitch.nix104
-rw-r--r--nixpkgs/nixos/modules/services/misc/fstrim.nix45
-rw-r--r--nixpkgs/nixos/modules/services/misc/gammu-smsd.nix253
-rw-r--r--nixpkgs/nixos/modules/services/misc/geoipupdate.nix221
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitea.nix711
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitlab.md112
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitlab.nix1694
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitolite.nix241
-rw-r--r--nixpkgs/nixos/modules/services/misc/gitweb.nix60
-rw-r--r--nixpkgs/nixos/modules/services/misc/gogs.nix274
-rw-r--r--nixpkgs/nixos/modules/services/misc/gollum.nix158
-rw-r--r--nixpkgs/nixos/modules/services/misc/gpsd.nix145
-rw-r--r--nixpkgs/nixos/modules/services/misc/greenclip.nix31
-rw-r--r--nixpkgs/nixos/modules/services/misc/headphones.nix89
-rw-r--r--nixpkgs/nixos/modules/services/misc/heisenbridge.nix221
-rw-r--r--nixpkgs/nixos/modules/services/misc/homepage-dashboard.nix55
-rw-r--r--nixpkgs/nixos/modules/services/misc/ihaskell.nix65
-rw-r--r--nixpkgs/nixos/modules/services/misc/input-remapper.nix30
-rw-r--r--nixpkgs/nixos/modules/services/misc/irkerd.nix67
-rw-r--r--nixpkgs/nixos/modules/services/misc/jackett.nix82
-rw-r--r--nixpkgs/nixos/modules/services/misc/jellyfin.nix131
-rw-r--r--nixpkgs/nixos/modules/services/misc/jellyseerr.nix62
-rw-r--r--nixpkgs/nixos/modules/services/misc/klipper.nix242
-rw-r--r--nixpkgs/nixos/modules/services/misc/languagetool.nix78
-rw-r--r--nixpkgs/nixos/modules/services/misc/leaps.nix62
-rw-r--r--nixpkgs/nixos/modules/services/misc/libreddit.nix91
-rw-r--r--nixpkgs/nixos/modules/services/misc/lidarr.nix89
-rw-r--r--nixpkgs/nixos/modules/services/misc/lifecycled.nix164
-rw-r--r--nixpkgs/nixos/modules/services/misc/logkeys.nix30
-rw-r--r--nixpkgs/nixos/modules/services/misc/mame.nix69
-rw-r--r--nixpkgs/nixos/modules/services/misc/mbpfan.nix95
-rw-r--r--nixpkgs/nixos/modules/services/misc/mediatomb.nix396
-rw-r--r--nixpkgs/nixos/modules/services/misc/metabase.nix103
-rw-r--r--nixpkgs/nixos/modules/services/misc/moonraker.nix215
-rw-r--r--nixpkgs/nixos/modules/services/misc/mqtt2influxdb.nix253
-rw-r--r--nixpkgs/nixos/modules/services/misc/n8n.nix92
-rw-r--r--nixpkgs/nixos/modules/services/misc/nitter.nix388
-rw-r--r--nixpkgs/nixos/modules/services/misc/nix-gc.nix104
-rw-r--r--nixpkgs/nixos/modules/services/misc/nix-optimise.nix51
-rw-r--r--nixpkgs/nixos/modules/services/misc/nix-ssh-serve.nix69
-rw-r--r--nixpkgs/nixos/modules/services/misc/novacomd.nix31
-rw-r--r--nixpkgs/nixos/modules/services/misc/ntfy-sh.nix135
-rw-r--r--nixpkgs/nixos/modules/services/misc/nzbget.nix117
-rw-r--r--nixpkgs/nixos/modules/services/misc/nzbhydra2.nix78
-rw-r--r--nixpkgs/nixos/modules/services/misc/octoprint.nix142
-rw-r--r--nixpkgs/nixos/modules/services/misc/ombi.nix81
-rw-r--r--nixpkgs/nixos/modules/services/misc/osrm.nix86
-rw-r--r--nixpkgs/nixos/modules/services/misc/owncast.nix98
-rw-r--r--nixpkgs/nixos/modules/services/misc/packagekit.nix74
-rw-r--r--nixpkgs/nixos/modules/services/misc/paperless.nix387
-rw-r--r--nixpkgs/nixos/modules/services/misc/parsoid.nix129
-rw-r--r--nixpkgs/nixos/modules/services/misc/persistent-evdev.nix60
-rw-r--r--nixpkgs/nixos/modules/services/misc/pinnwand.nix122
-rw-r--r--nixpkgs/nixos/modules/services/misc/plex.nix181
-rw-r--r--nixpkgs/nixos/modules/services/misc/plikd.nix82
-rw-r--r--nixpkgs/nixos/modules/services/misc/podgrab.nix50
-rw-r--r--nixpkgs/nixos/modules/services/misc/polaris.nix151
-rw-r--r--nixpkgs/nixos/modules/services/misc/portunus.nix289
-rw-r--r--nixpkgs/nixos/modules/services/misc/prowlarr.nix43
-rw-r--r--nixpkgs/nixos/modules/services/misc/pufferpanel.nix176
-rw-r--r--nixpkgs/nixos/modules/services/misc/pykms.nix92
-rw-r--r--nixpkgs/nixos/modules/services/misc/radarr.nix83
-rw-r--r--nixpkgs/nixos/modules/services/misc/readarr.nix88
-rw-r--r--nixpkgs/nixos/modules/services/misc/redmine.nix441
-rw-r--r--nixpkgs/nixos/modules/services/misc/ripple-data-api.nix195
-rw-r--r--nixpkgs/nixos/modules/services/misc/rippled.nix438
-rw-r--r--nixpkgs/nixos/modules/services/misc/rkvm.nix164
-rw-r--r--nixpkgs/nixos/modules/services/misc/rmfakecloud.nix147
-rw-r--r--nixpkgs/nixos/modules/services/misc/rshim.nix99
-rw-r--r--nixpkgs/nixos/modules/services/misc/safeeyes.nix49
-rw-r--r--nixpkgs/nixos/modules/services/misc/sdrplay.nix35
-rw-r--r--nixpkgs/nixos/modules/services/misc/serviio.nix87
-rw-r--r--nixpkgs/nixos/modules/services/misc/sickbeard.nix95
-rw-r--r--nixpkgs/nixos/modules/services/misc/signald.nix105
-rw-r--r--nixpkgs/nixos/modules/services/misc/siproxd.nix179
-rw-r--r--nixpkgs/nixos/modules/services/misc/snapper.nix253
-rw-r--r--nixpkgs/nixos/modules/services/misc/soft-serve.nix99
-rw-r--r--nixpkgs/nixos/modules/services/misc/sonarr.nix85
-rw-r--r--nixpkgs/nixos/modules/services/misc/sourcehut/default.md93
-rw-r--r--nixpkgs/nixos/modules/services/misc/sourcehut/default.nix1373
-rw-r--r--nixpkgs/nixos/modules/services/misc/sourcehut/service.nix407
-rw-r--r--nixpkgs/nixos/modules/services/misc/spice-autorandr.nix26
-rw-r--r--nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix30
-rw-r--r--nixpkgs/nixos/modules/services/misc/spice-webdavd.nix38
-rw-r--r--nixpkgs/nixos/modules/services/misc/sssd.nix166
-rw-r--r--nixpkgs/nixos/modules/services/misc/subsonic.nix169
-rw-r--r--nixpkgs/nixos/modules/services/misc/sundtek.nix33
-rw-r--r--nixpkgs/nixos/modules/services/misc/svnserve.nix46
-rw-r--r--nixpkgs/nixos/modules/services/misc/synergy.nix149
-rw-r--r--nixpkgs/nixos/modules/services/misc/sysprof.nix19
-rw-r--r--nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix145
-rw-r--r--nixpkgs/nixos/modules/services/misc/taskserver/default.md93
-rw-r--r--nixpkgs/nixos/modules/services/misc/taskserver/default.nix570
-rw-r--r--nixpkgs/nixos/modules/services/misc/taskserver/helper-tool.py688
-rw-r--r--nixpkgs/nixos/modules/services/misc/tautulli.nix89
-rw-r--r--nixpkgs/nixos/modules/services/misc/tiddlywiki.nix52
-rw-r--r--nixpkgs/nixos/modules/services/misc/tp-auto-kbbl.nix58
-rw-r--r--nixpkgs/nixos/modules/services/misc/tzupdate.nix45
-rw-r--r--nixpkgs/nixos/modules/services/misc/uhub.nix116
-rw-r--r--nixpkgs/nixos/modules/services/misc/weechat.md46
-rw-r--r--nixpkgs/nixos/modules/services/misc/weechat.nix63
-rw-r--r--nixpkgs/nixos/modules/services/misc/xmr-stak.nix89
-rw-r--r--nixpkgs/nixos/modules/services/misc/xmrig.nix76
-rw-r--r--nixpkgs/nixos/modules/services/misc/zoneminder.nix378
-rw-r--r--nixpkgs/nixos/modules/services/misc/zookeeper.nix161
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/alerta.nix111
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/apcupsd.nix206
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/arbtt.nix56
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/below.nix108
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/bosun.nix159
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/cadvisor.nix138
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/certspotter.md74
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/certspotter.nix143
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/cockpit.nix231
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/collectd.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix34
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix302
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/do-agent.nix25
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix63
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/goss.md44
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/goss.nix86
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana-agent.nix163
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana-image-renderer.nix148
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix67
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/grafana.nix1893
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/graphite.nix428
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/hdaps.nix23
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/heapster.nix55
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/incron.nix103
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/kapacitor.nix188
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/karma.nix128
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/kthxbye.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/librenms.nix624
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/loki.nix116
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/longview.nix160
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/mackerel-agent.nix110
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/metricbeat.nix151
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/mimir.nix84
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/monit.nix48
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/munin.nix409
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/nagios.nix213
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/netdata.nix366
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.md33
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.nix134
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/opentelemetry-collector.nix73
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/osquery.nix97
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/parsedmarc.md112
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/parsedmarc.nix545
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-irc-relay.nix107
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix203
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix1855
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md180
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix442
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix59
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix54
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bird.nix50
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix82
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix70
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix64
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix77
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix117
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix92
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix55
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix41
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/flow.nix50
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix38
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix41
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix69
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix71
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix34
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix42
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix40
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix43
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix72
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/kea.nix47
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/knot.nix58
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix46
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix190
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix64
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix37
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix60
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix72
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix68
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix51
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix53
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nut.nix50
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix67
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openvpn.nix39
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix145
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix65
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix78
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix100
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix86
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/process.nix46
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pve.nix120
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix53
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/redis.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix97
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix83
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix47
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix33
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/script.nix64
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix27
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix64
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix61
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix68
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sql.nix108
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix19
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix31
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/systemd.nix22
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix44
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix95
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix66
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix37
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix29
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix89
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix71
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix44
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix166
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/sachet.nix88
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix55
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix81
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix70
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/riemann.nix100
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/scollector.nix134
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/smartd.nix252
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/statsd.nix149
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/sysstat.nix76
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/teamviewer.nix49
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/telegraf.nix90
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/thanos.nix883
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/tremor-rs.nix129
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/tuptime.nix90
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/unpoller.nix322
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/ups.nix261
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/uptime-kuma.nix81
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/uptime.nix100
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vmagent.nix110
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vmalert.nix136
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/vnstat.nix60
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix178
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix323
-rw-r--r--nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix320
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/cachefilesd.nix63
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/ceph.nix409
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/davfs2.nix93
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/diod.nix159
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/drbd.nix63
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/eris-server.nix103
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/glusterfs.nix209
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix118
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/kubo.nix428
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md52
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/litestream/default.nix99
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/moosefs.nix249
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/netatalk.nix95
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/nfsd.nix173
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/openafs/client.nix252
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/openafs/lib.nix33
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/openafs/server.nix319
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/orangefs/client.nix96
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/orangefs/server.nix225
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/rsyncd.nix127
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/samba-wsdd.nix129
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/samba.nix252
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/tahoe.nix366
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/u9fs.nix78
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/webdav-server-rs.nix150
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/webdav.nix105
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/xtreemfs.nix495
-rw-r--r--nixpkgs/nixos/modules/services/network-filesystems/yandex-disk.nix116
-rw-r--r--nixpkgs/nixos/modules/services/networking/3proxy.nix381
-rw-r--r--nixpkgs/nixos/modules/services/networking/acme-dns.nix154
-rw-r--r--nixpkgs/nixos/modules/services/networking/adguardhome.nix175
-rw-r--r--nixpkgs/nixos/modules/services/networking/alice-lg.nix101
-rw-r--r--nixpkgs/nixos/modules/services/networking/amuled.nix83
-rw-r--r--nixpkgs/nixos/modules/services/networking/antennas.nix80
-rw-r--r--nixpkgs/nixos/modules/services/networking/aria2.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/asterisk.nix232
-rw-r--r--nixpkgs/nixos/modules/services/networking/atftpd.nix65
-rw-r--r--nixpkgs/nixos/modules/services/networking/autossh.nix113
-rw-r--r--nixpkgs/nixos/modules/services/networking/avahi-daemon.nix314
-rw-r--r--nixpkgs/nixos/modules/services/networking/babeld.nix144
-rw-r--r--nixpkgs/nixos/modules/services/networking/bee-clef.nix107
-rw-r--r--nixpkgs/nixos/modules/services/networking/bee.nix149
-rw-r--r--nixpkgs/nixos/modules/services/networking/biboumi.nix269
-rw-r--r--nixpkgs/nixos/modules/services/networking/bind.nix287
-rw-r--r--nixpkgs/nixos/modules/services/networking/bird-lg.nix319
-rw-r--r--nixpkgs/nixos/modules/services/networking/bird.nix102
-rw-r--r--nixpkgs/nixos/modules/services/networking/birdwatcher.nix129
-rw-r--r--nixpkgs/nixos/modules/services/networking/bitcoind.nix260
-rw-r--r--nixpkgs/nixos/modules/services/networking/bitlbee.nix190
-rw-r--r--nixpkgs/nixos/modules/services/networking/blockbook-frontend.nix278
-rw-r--r--nixpkgs/nixos/modules/services/networking/blocky.nix41
-rw-r--r--nixpkgs/nixos/modules/services/networking/cgit.nix205
-rw-r--r--nixpkgs/nixos/modules/services/networking/charybdis.nix114
-rw-r--r--nixpkgs/nixos/modules/services/networking/chisel-server.nix99
-rw-r--r--nixpkgs/nixos/modules/services/networking/cjdns.nix304
-rw-r--r--nixpkgs/nixos/modules/services/networking/cloudflare-dyndns.nix93
-rw-r--r--nixpkgs/nixos/modules/services/networking/cloudflared.nix331
-rw-r--r--nixpkgs/nixos/modules/services/networking/cntlm.nix126
-rw-r--r--nixpkgs/nixos/modules/services/networking/connman.nix155
-rw-r--r--nixpkgs/nixos/modules/services/networking/consul.nix285
-rw-r--r--nixpkgs/nixos/modules/services/networking/coredns.nix60
-rw-r--r--nixpkgs/nixos/modules/services/networking/corerad.nix82
-rw-r--r--nixpkgs/nixos/modules/services/networking/coturn.nix366
-rw-r--r--nixpkgs/nixos/modules/services/networking/create_ap.nix50
-rw-r--r--nixpkgs/nixos/modules/services/networking/croc.nix86
-rw-r--r--nixpkgs/nixos/modules/services/networking/dae.nix170
-rw-r--r--nixpkgs/nixos/modules/services/networking/dante.nix62
-rw-r--r--nixpkgs/nixos/modules/services/networking/ddclient.nix234
-rw-r--r--nixpkgs/nixos/modules/services/networking/deconz.nix125
-rw-r--r--nixpkgs/nixos/modules/services/networking/dhcpcd.nix272
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnscache.nix108
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnscrypt-proxy2.nix123
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnscrypt-wrapper.nix275
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsdist.nix53
-rw-r--r--nixpkgs/nixos/modules/services/networking/dnsmasq.nix184
-rw-r--r--nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix60
-rw-r--r--nixpkgs/nixos/modules/services/networking/ejabberd.nix157
-rw-r--r--nixpkgs/nixos/modules/services/networking/envoy.nix101
-rw-r--r--nixpkgs/nixos/modules/services/networking/epmd.nix72
-rw-r--r--nixpkgs/nixos/modules/services/networking/ergo.nix143
-rw-r--r--nixpkgs/nixos/modules/services/networking/ergochat.nix155
-rw-r--r--nixpkgs/nixos/modules/services/networking/eternal-terminal.nix95
-rw-r--r--nixpkgs/nixos/modules/services/networking/expressvpn.nix29
-rw-r--r--nixpkgs/nixos/modules/services/networking/fakeroute.nix59
-rw-r--r--nixpkgs/nixos/modules/services/networking/fastnetmon-advanced.nix222
-rw-r--r--nixpkgs/nixos/modules/services/networking/ferm.nix63
-rw-r--r--nixpkgs/nixos/modules/services/networking/firefox-syncserver.md55
-rw-r--r--nixpkgs/nixos/modules/services/networking/firefox-syncserver.nix322
-rw-r--r--nixpkgs/nixos/modules/services/networking/fireqos.nix52
-rw-r--r--nixpkgs/nixos/modules/services/networking/firewall-iptables.nix334
-rw-r--r--nixpkgs/nixos/modules/services/networking/firewall-nftables.nix174
-rw-r--r--nixpkgs/nixos/modules/services/networking/firewall.nix290
-rw-r--r--nixpkgs/nixos/modules/services/networking/flannel.nix191
-rw-r--r--nixpkgs/nixos/modules/services/networking/freenet.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/freeradius.nix86
-rw-r--r--nixpkgs/nixos/modules/services/networking/frp.nix93
-rw-r--r--nixpkgs/nixos/modules/services/networking/frr.nix221
-rw-r--r--nixpkgs/nixos/modules/services/networking/gateone.nix59
-rw-r--r--nixpkgs/nixos/modules/services/networking/gdomap.nix29
-rw-r--r--nixpkgs/nixos/modules/services/networking/ghostunnel.nix242
-rw-r--r--nixpkgs/nixos/modules/services/networking/git-daemon.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/globalprotect-vpn.nix60
-rw-r--r--nixpkgs/nixos/modules/services/networking/gnunet.nix170
-rw-r--r--nixpkgs/nixos/modules/services/networking/go-autoconfig.nix66
-rw-r--r--nixpkgs/nixos/modules/services/networking/go-neb.nix78
-rw-r--r--nixpkgs/nixos/modules/services/networking/go-shadowsocks2.nix30
-rw-r--r--nixpkgs/nixos/modules/services/networking/gobgpd.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/gvpe.nix130
-rw-r--r--nixpkgs/nixos/modules/services/networking/hans.nix145
-rw-r--r--nixpkgs/nixos/modules/services/networking/haproxy.nix107
-rw-r--r--nixpkgs/nixos/modules/services/networking/harmonia.nix90
-rw-r--r--nixpkgs/nixos/modules/services/networking/headscale.nix531
-rw-r--r--nixpkgs/nixos/modules/services/networking/helpers.nix11
-rw-r--r--nixpkgs/nixos/modules/services/networking/hostapd.nix1257
-rw-r--r--nixpkgs/nixos/modules/services/networking/htpdate.nix80
-rw-r--r--nixpkgs/nixos/modules/services/networking/https-dns-proxy.nix138
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/default.nix31
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix12
-rwxr-xr-xnixpkgs/nixos/modules/services/networking/hylafax/faxq-wait.sh29
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/hfaxd-default.nix10
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/modem-default.nix22
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/options.nix372
-rwxr-xr-xnixpkgs/nixos/modules/services/networking/hylafax/spool.sh111
-rw-r--r--nixpkgs/nixos/modules/services/networking/hylafax/systemd.nix249
-rw-r--r--nixpkgs/nixos/modules/services/networking/i2p.nix34
-rw-r--r--nixpkgs/nixos/modules/services/networking/i2pd.nix695
-rw-r--r--nixpkgs/nixos/modules/services/networking/icecream/daemon.nix155
-rw-r--r--nixpkgs/nixos/modules/services/networking/icecream/scheduler.nix101
-rw-r--r--nixpkgs/nixos/modules/services/networking/imaginary.nix113
-rw-r--r--nixpkgs/nixos/modules/services/networking/inspircd.nix62
-rw-r--r--nixpkgs/nixos/modules/services/networking/iodine.nix198
-rw-r--r--nixpkgs/nixos/modules/services/networking/iperf3.nix97
-rw-r--r--nixpkgs/nixos/modules/services/networking/ircd-hybrid/builder.sh32
-rw-r--r--nixpkgs/nixos/modules/services/networking/ircd-hybrid/control.in26
-rw-r--r--nixpkgs/nixos/modules/services/networking/ircd-hybrid/default.nix133
-rw-r--r--nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf1051
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix86
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix194
-rw-r--r--nixpkgs/nixos/modules/services/networking/iscsi/target.nix53
-rw-r--r--nixpkgs/nixos/modules/services/networking/ivpn.nix51
-rw-r--r--nixpkgs/nixos/modules/services/networking/iwd.nix80
-rw-r--r--nixpkgs/nixos/modules/services/networking/jibri/default.nix417
-rw-r--r--nixpkgs/nixos/modules/services/networking/jibri/logging.properties-journal32
-rw-r--r--nixpkgs/nixos/modules/services/networking/jicofo.nix166
-rw-r--r--nixpkgs/nixos/modules/services/networking/jitsi-videobridge.nix293
-rw-r--r--nixpkgs/nixos/modules/services/networking/jool.nix281
-rw-r--r--nixpkgs/nixos/modules/services/networking/kea.nix452
-rw-r--r--nixpkgs/nixos/modules/services/networking/keepalived/default.nix323
-rw-r--r--nixpkgs/nixos/modules/services/networking/keepalived/virtual-ip-options.nix50
-rw-r--r--nixpkgs/nixos/modules/services/networking/keepalived/vrrp-instance-options.nix133
-rw-r--r--nixpkgs/nixos/modules/services/networking/keepalived/vrrp-script-options.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/keybase.nix47
-rw-r--r--nixpkgs/nixos/modules/services/networking/knot.nix274
-rw-r--r--nixpkgs/nixos/modules/services/networking/kresd.nix151
-rw-r--r--nixpkgs/nixos/modules/services/networking/lambdabot.nix82
-rw-r--r--nixpkgs/nixos/modules/services/networking/legit.nix182
-rw-r--r--nixpkgs/nixos/modules/services/networking/libreswan.nix160
-rw-r--r--nixpkgs/nixos/modules/services/networking/lldpd.nix39
-rw-r--r--nixpkgs/nixos/modules/services/networking/logmein-hamachi.nix50
-rw-r--r--nixpkgs/nixos/modules/services/networking/lokinet.nix157
-rw-r--r--nixpkgs/nixos/modules/services/networking/lxd-image-server.nix133
-rw-r--r--nixpkgs/nixos/modules/services/networking/magic-wormhole-mailbox-server.nix28
-rw-r--r--nixpkgs/nixos/modules/services/networking/matterbridge.nix120
-rw-r--r--nixpkgs/nixos/modules/services/networking/minidlna.nix148
-rw-r--r--nixpkgs/nixos/modules/services/networking/miniupnpd.nix79
-rw-r--r--nixpkgs/nixos/modules/services/networking/miredo.nix92
-rw-r--r--nixpkgs/nixos/modules/services/networking/mjpg-streamer.nix80
-rw-r--r--nixpkgs/nixos/modules/services/networking/mmsd.nix38
-rw-r--r--nixpkgs/nixos/modules/services/networking/monero.nix244
-rw-r--r--nixpkgs/nixos/modules/services/networking/morty.nix98
-rw-r--r--nixpkgs/nixos/modules/services/networking/mosquitto.md102
-rw-r--r--nixpkgs/nixos/modules/services/networking/mosquitto.nix723
-rw-r--r--nixpkgs/nixos/modules/services/networking/mozillavpn.nix14
-rw-r--r--nixpkgs/nixos/modules/services/networking/mstpd.nix33
-rw-r--r--nixpkgs/nixos/modules/services/networking/mtprotoproxy.nix110
-rw-r--r--nixpkgs/nixos/modules/services/networking/mtr-exporter.nix138
-rw-r--r--nixpkgs/nixos/modules/services/networking/mullvad-vpn.nix82
-rw-r--r--nixpkgs/nixos/modules/services/networking/multipath.nix557
-rw-r--r--nixpkgs/nixos/modules/services/networking/murmur.nix391
-rw-r--r--nixpkgs/nixos/modules/services/networking/mxisd.nix142
-rw-r--r--nixpkgs/nixos/modules/services/networking/namecoind.nix199
-rw-r--r--nixpkgs/nixos/modules/services/networking/nar-serve.nix55
-rw-r--r--nixpkgs/nixos/modules/services/networking/nat-iptables.nix191
-rw-r--r--nixpkgs/nixos/modules/services/networking/nat-nftables.nix184
-rw-r--r--nixpkgs/nixos/modules/services/networking/nat.nix196
-rw-r--r--nixpkgs/nixos/modules/services/networking/nats.nix158
-rw-r--r--nixpkgs/nixos/modules/services/networking/nbd.nix157
-rw-r--r--nixpkgs/nixos/modules/services/networking/ncdns.nix283
-rw-r--r--nixpkgs/nixos/modules/services/networking/ndppd.nix189
-rw-r--r--nixpkgs/nixos/modules/services/networking/nebula.nix253
-rw-r--r--nixpkgs/nixos/modules/services/networking/netbird.nix65
-rw-r--r--nixpkgs/nixos/modules/services/networking/netclient.nix27
-rw-r--r--nixpkgs/nixos/modules/services/networking/networkd-dispatcher.nix98
-rw-r--r--nixpkgs/nixos/modules/services/networking/networkmanager.nix652
-rw-r--r--nixpkgs/nixos/modules/services/networking/nextdns.nix44
-rw-r--r--nixpkgs/nixos/modules/services/networking/nftables.nix320
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/backend-params-submodule.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/backend-submodule.nix50
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/default.nix118
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/frontend-params-submodule.nix64
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/frontend-submodule.nix36
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/nghttpx-options.nix142
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/server-options.nix18
-rw-r--r--nixpkgs/nixos/modules/services/networking/nghttpx/tls-submodule.nix21
-rw-r--r--nixpkgs/nixos/modules/services/networking/ngircd.nix62
-rw-r--r--nixpkgs/nixos/modules/services/networking/nix-serve.nix102
-rw-r--r--nixpkgs/nixos/modules/services/networking/nix-store-gcs-proxy.nix75
-rw-r--r--nixpkgs/nixos/modules/services/networking/nixops-dns.nix78
-rw-r--r--nixpkgs/nixos/modules/services/networking/nncp.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/nntp-proxy.nix234
-rw-r--r--nixpkgs/nixos/modules/services/networking/nomad.nix198
-rw-r--r--nixpkgs/nixos/modules/services/networking/nsd.nix991
-rw-r--r--nixpkgs/nixos/modules/services/networking/ntopng.nix160
-rw-r--r--nixpkgs/nixos/modules/services/networking/ntp/chrony.nix274
-rw-r--r--nixpkgs/nixos/modules/services/networking/ntp/ntpd.nix147
-rw-r--r--nixpkgs/nixos/modules/services/networking/ntp/openntpd.nix85
-rw-r--r--nixpkgs/nixos/modules/services/networking/nullidentdmod.nix34
-rw-r--r--nixpkgs/nixos/modules/services/networking/nylon.nix166
-rw-r--r--nixpkgs/nixos/modules/services/networking/ocserv.nix99
-rw-r--r--nixpkgs/nixos/modules/services/networking/ofono.nix44
-rw-r--r--nixpkgs/nixos/modules/services/networking/oidentd.nix44
-rw-r--r--nixpkgs/nixos/modules/services/networking/onedrive.nix67
-rw-r--r--nixpkgs/nixos/modules/services/networking/onedrive.xml34
-rw-r--r--nixpkgs/nixos/modules/services/networking/openconnect.nix145
-rw-r--r--nixpkgs/nixos/modules/services/networking/openvpn.nix235
-rw-r--r--nixpkgs/nixos/modules/services/networking/ostinato.nix104
-rw-r--r--nixpkgs/nixos/modules/services/networking/owamp.nix45
-rw-r--r--nixpkgs/nixos/modules/services/networking/pdns-recursor.nix213
-rw-r--r--nixpkgs/nixos/modules/services/networking/pdnsd.nix91
-rw-r--r--nixpkgs/nixos/modules/services/networking/peroxide.nix131
-rw-r--r--nixpkgs/nixos/modules/services/networking/picosnitch.nix26
-rw-r--r--nixpkgs/nixos/modules/services/networking/pixiecore.nix143
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.md180
-rw-r--r--nixpkgs/nixos/modules/services/networking/pleroma.nix151
-rw-r--r--nixpkgs/nixos/modules/services/networking/polipo.nix108
-rw-r--r--nixpkgs/nixos/modules/services/networking/powerdns.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/pppd.nix154
-rw-r--r--nixpkgs/nixos/modules/services/networking/pptpd.nix124
-rw-r--r--nixpkgs/nixos/modules/services/networking/privoxy.nix281
-rw-r--r--nixpkgs/nixos/modules/services/networking/prosody.md72
-rw-r--r--nixpkgs/nixos/modules/services/networking/prosody.nix905
-rw-r--r--nixpkgs/nixos/modules/services/networking/quassel.nix139
-rw-r--r--nixpkgs/nixos/modules/services/networking/quicktun.nix118
-rw-r--r--nixpkgs/nixos/modules/services/networking/quorum.nix231
-rw-r--r--nixpkgs/nixos/modules/services/networking/r53-ddns.nix72
-rw-r--r--nixpkgs/nixos/modules/services/networking/radicale.nix204
-rw-r--r--nixpkgs/nixos/modules/services/networking/radvd.nix86
-rw-r--r--nixpkgs/nixos/modules/services/networking/rdnssd.nix82
-rw-r--r--nixpkgs/nixos/modules/services/networking/redsocks.nix273
-rw-r--r--nixpkgs/nixos/modules/services/networking/resilio.nix295
-rw-r--r--nixpkgs/nixos/modules/services/networking/robustirc-bridge.nix47
-rw-r--r--nixpkgs/nixos/modules/services/networking/rosenpass.nix233
-rw-r--r--nixpkgs/nixos/modules/services/networking/routedns.nix84
-rw-r--r--nixpkgs/nixos/modules/services/networking/rpcbind.nix56
-rw-r--r--nixpkgs/nixos/modules/services/networking/rxe.nix52
-rw-r--r--nixpkgs/nixos/modules/services/networking/sabnzbd.nix77
-rw-r--r--nixpkgs/nixos/modules/services/networking/seafile.nix297
-rw-r--r--nixpkgs/nixos/modules/services/networking/searx.nix277
-rw-r--r--nixpkgs/nixos/modules/services/networking/shadowsocks.nix158
-rw-r--r--nixpkgs/nixos/modules/services/networking/shairport-sync.nix112
-rw-r--r--nixpkgs/nixos/modules/services/networking/shellhub-agent.nix100
-rw-r--r--nixpkgs/nixos/modules/services/networking/shorewall.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/shorewall6.nix69
-rw-r--r--nixpkgs/nixos/modules/services/networking/shout.nix115
-rw-r--r--nixpkgs/nixos/modules/services/networking/sing-box.nix67
-rw-r--r--nixpkgs/nixos/modules/services/networking/sitespeed-io.nix122
-rw-r--r--nixpkgs/nixos/modules/services/networking/skydns.nix93
-rw-r--r--nixpkgs/nixos/modules/services/networking/smartdns.nix62
-rw-r--r--nixpkgs/nixos/modules/services/networking/smokeping.nix374
-rw-r--r--nixpkgs/nixos/modules/services/networking/sniproxy.nix88
-rw-r--r--nixpkgs/nixos/modules/services/networking/snowflake-proxy.nix81
-rw-r--r--nixpkgs/nixos/modules/services/networking/softether.nix163
-rw-r--r--nixpkgs/nixos/modules/services/networking/soju.nix124
-rw-r--r--nixpkgs/nixos/modules/services/networking/solanum.nix109
-rw-r--r--nixpkgs/nixos/modules/services/networking/spacecookie.nix216
-rw-r--r--nixpkgs/nixos/modules/services/networking/spiped.nix221
-rw-r--r--nixpkgs/nixos/modules/services/networking/squid.nix187
-rw-r--r--nixpkgs/nixos/modules/services/networking/ssh/lshd.nix187
-rw-r--r--nixpkgs/nixos/modules/services/networking/ssh/sshd.nix709
-rw-r--r--nixpkgs/nixos/modules/services/networking/sslh.nix227
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan-swanctl/module.nix84
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-constructors.nix163
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-lib.nix82
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix1265
-rw-r--r--nixpkgs/nixos/modules/services/networking/strongswan.nix170
-rw-r--r--nixpkgs/nixos/modules/services/networking/stubby.nix103
-rw-r--r--nixpkgs/nixos/modules/services/networking/stunnel.nix192
-rw-r--r--nixpkgs/nixos/modules/services/networking/supplicant.nix240
-rw-r--r--nixpkgs/nixos/modules/services/networking/supybot.nix163
-rw-r--r--nixpkgs/nixos/modules/services/networking/syncplay.nix130
-rw-r--r--nixpkgs/nixos/modules/services/networking/syncthing-relay.nix121
-rw-r--r--nixpkgs/nixos/modules/services/networking/syncthing.nix713
-rw-r--r--nixpkgs/nixos/modules/services/networking/tailscale.nix138
-rw-r--r--nixpkgs/nixos/modules/services/networking/tayga.nix195
-rw-r--r--nixpkgs/nixos/modules/services/networking/tcpcrypt.nix80
-rw-r--r--nixpkgs/nixos/modules/services/networking/teamspeak3.nix161
-rw-r--r--nixpkgs/nixos/modules/services/networking/teleport.nix107
-rw-r--r--nixpkgs/nixos/modules/services/networking/tetrd.nix96
-rw-r--r--nixpkgs/nixos/modules/services/networking/tftpd.nix46
-rw-r--r--nixpkgs/nixos/modules/services/networking/thelounge.nix110
-rw-r--r--nixpkgs/nixos/modules/services/networking/tinc.nix442
-rw-r--r--nixpkgs/nixos/modules/services/networking/tinydns.nix59
-rw-r--r--nixpkgs/nixos/modules/services/networking/tinyproxy.nix103
-rw-r--r--nixpkgs/nixos/modules/services/networking/tmate-ssh-server.nix122
-rw-r--r--nixpkgs/nixos/modules/services/networking/tox-bootstrapd.nix74
-rw-r--r--nixpkgs/nixos/modules/services/networking/tox-node.nix90
-rw-r--r--nixpkgs/nixos/modules/services/networking/toxvpn.nix70
-rw-r--r--nixpkgs/nixos/modules/services/networking/trickster.nix125
-rw-r--r--nixpkgs/nixos/modules/services/networking/trust-dns.nix176
-rw-r--r--nixpkgs/nixos/modules/services/networking/tvheadend.nix63
-rw-r--r--nixpkgs/nixos/modules/services/networking/twingate.nix24
-rw-r--r--nixpkgs/nixos/modules/services/networking/ucarp.nix183
-rw-r--r--nixpkgs/nixos/modules/services/networking/unbound.nix311
-rw-r--r--nixpkgs/nixos/modules/services/networking/unifi.nix210
-rw-r--r--nixpkgs/nixos/modules/services/networking/uptermd.nix109
-rw-r--r--nixpkgs/nixos/modules/services/networking/v2ray.nix97
-rw-r--r--nixpkgs/nixos/modules/services/networking/v2raya.nix50
-rw-r--r--nixpkgs/nixos/modules/services/networking/vdirsyncer.nix214
-rw-r--r--nixpkgs/nixos/modules/services/networking/vsftpd.nix330
-rw-r--r--nixpkgs/nixos/modules/services/networking/wasabibackend.nix160
-rw-r--r--nixpkgs/nixos/modules/services/networking/webhook.nix214
-rw-r--r--nixpkgs/nixos/modules/services/networking/websockify.nix54
-rw-r--r--nixpkgs/nixos/modules/services/networking/wg-netmanager.nix42
-rw-r--r--nixpkgs/nixos/modules/services/networking/wg-quick.nix345
-rw-r--r--nixpkgs/nixos/modules/services/networking/wgautomesh.nix163
-rw-r--r--nixpkgs/nixos/modules/services/networking/wireguard.nix602
-rw-r--r--nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix534
-rw-r--r--nixpkgs/nixos/modules/services/networking/wstunnel.nix429
-rw-r--r--nixpkgs/nixos/modules/services/networking/x2goserver.nix164
-rw-r--r--nixpkgs/nixos/modules/services/networking/xandikos.nix148
-rw-r--r--nixpkgs/nixos/modules/services/networking/xinetd.nix147
-rw-r--r--nixpkgs/nixos/modules/services/networking/xl2tpd.nix143
-rw-r--r--nixpkgs/nixos/modules/services/networking/xray.nix99
-rw-r--r--nixpkgs/nixos/modules/services/networking/xrdp.nix185
-rw-r--r--nixpkgs/nixos/modules/services/networking/yggdrasil.md141
-rw-r--r--nixpkgs/nixos/modules/services/networking/yggdrasil.nix234
-rw-r--r--nixpkgs/nixos/modules/services/networking/zerobin.nix101
-rw-r--r--nixpkgs/nixos/modules/services/networking/zeronet.nix101
-rw-r--r--nixpkgs/nixos/modules/services/networking/zerotierone.nix83
-rw-r--r--nixpkgs/nixos/modules/services/networking/znc/default.nix328
-rw-r--r--nixpkgs/nixos/modules/services/networking/znc/options.nix269
-rw-r--r--nixpkgs/nixos/modules/services/printing/cups-pdf.nix185
-rw-r--r--nixpkgs/nixos/modules/services/printing/cupsd.nix492
-rw-r--r--nixpkgs/nixos/modules/services/printing/ipp-usb.nix63
-rw-r--r--nixpkgs/nixos/modules/services/scheduling/atd.nix106
-rw-r--r--nixpkgs/nixos/modules/services/scheduling/cron.nix138
-rw-r--r--nixpkgs/nixos/modules/services/scheduling/fcron.nix170
-rw-r--r--nixpkgs/nixos/modules/services/search/elasticsearch-curator.nix95
-rw-r--r--nixpkgs/nixos/modules/services/search/elasticsearch.nix239
-rw-r--r--nixpkgs/nixos/modules/services/search/hound.nix126
-rw-r--r--nixpkgs/nixos/modules/services/search/meilisearch.md39
-rw-r--r--nixpkgs/nixos/modules/services/search/meilisearch.nix130
-rw-r--r--nixpkgs/nixos/modules/services/search/opensearch.nix267
-rw-r--r--nixpkgs/nixos/modules/services/search/qdrant.nix129
-rw-r--r--nixpkgs/nixos/modules/services/search/typesense.nix125
-rw-r--r--nixpkgs/nixos/modules/services/security/aesmd.nix251
-rw-r--r--nixpkgs/nixos/modules/services/security/authelia.nix401
-rw-r--r--nixpkgs/nixos/modules/services/security/certmgr.nix201
-rw-r--r--nixpkgs/nixos/modules/services/security/cfssl.nix222
-rw-r--r--nixpkgs/nixos/modules/services/security/clamav.nix151
-rw-r--r--nixpkgs/nixos/modules/services/security/endlessh-go.nix138
-rw-r--r--nixpkgs/nixos/modules/services/security/endlessh.nix99
-rw-r--r--nixpkgs/nixos/modules/services/security/esdm.nix102
-rw-r--r--nixpkgs/nixos/modules/services/security/fail2ban.nix414
-rw-r--r--nixpkgs/nixos/modules/services/security/fprintd.nix64
-rw-r--r--nixpkgs/nixos/modules/services/security/haka.nix156
-rw-r--r--nixpkgs/nixos/modules/services/security/haveged.nix77
-rw-r--r--nixpkgs/nixos/modules/services/security/hockeypuck.nix106
-rw-r--r--nixpkgs/nixos/modules/services/security/hologram-agent.nix58
-rw-r--r--nixpkgs/nixos/modules/services/security/hologram-server.nix130
-rw-r--r--nixpkgs/nixos/modules/services/security/infnoise.nix60
-rw-r--r--nixpkgs/nixos/modules/services/security/jitterentropy-rngd.nix18
-rw-r--r--nixpkgs/nixos/modules/services/security/kanidm.nix385
-rw-r--r--nixpkgs/nixos/modules/services/security/munge.nix68
-rw-r--r--nixpkgs/nixos/modules/services/security/nginx-sso.nix67
-rw-r--r--nixpkgs/nixos/modules/services/security/oauth2_proxy.nix593
-rw-r--r--nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix66
-rw-r--r--nixpkgs/nixos/modules/services/security/opensnitch.nix190
-rw-r--r--nixpkgs/nixos/modules/services/security/pass-secret-service.nix27
-rw-r--r--nixpkgs/nixos/modules/services/security/physlock.nix147
-rw-r--r--nixpkgs/nixos/modules/services/security/shibboleth-sp.nix75
-rw-r--r--nixpkgs/nixos/modules/services/security/sks.nix146
-rw-r--r--nixpkgs/nixos/modules/services/security/sshguard.nix161
-rw-r--r--nixpkgs/nixos/modules/services/security/sslmate-agent.nix32
-rw-r--r--nixpkgs/nixos/modules/services/security/step-ca.nix142
-rw-r--r--nixpkgs/nixos/modules/services/security/tang.nix95
-rw-r--r--nixpkgs/nixos/modules/services/security/tor.nix1031
-rw-r--r--nixpkgs/nixos/modules/services/security/torify.nix80
-rw-r--r--nixpkgs/nixos/modules/services/security/torsocks.nix121
-rw-r--r--nixpkgs/nixos/modules/services/security/usbguard.nix265
-rw-r--r--nixpkgs/nixos/modules/services/security/vault-agent.nix128
-rw-r--r--nixpkgs/nixos/modules/services/security/vault.nix234
-rw-r--r--nixpkgs/nixos/modules/services/security/vaultwarden/backup.sh17
-rw-r--r--nixpkgs/nixos/modules/services/security/vaultwarden/default.nix242
-rw-r--r--nixpkgs/nixos/modules/services/security/yubikey-agent.nix69
-rw-r--r--nixpkgs/nixos/modules/services/system/automatic-timezoned.nix92
-rw-r--r--nixpkgs/nixos/modules/services/system/bpftune.nix22
-rw-r--r--nixpkgs/nixos/modules/services/system/cachix-agent/default.nix80
-rw-r--r--nixpkgs/nixos/modules/services/system/cachix-watch-store.nix93
-rw-r--r--nixpkgs/nixos/modules/services/system/cloud-init.nix239
-rw-r--r--nixpkgs/nixos/modules/services/system/dbus.nix210
-rw-r--r--nixpkgs/nixos/modules/services/system/earlyoom.nix160
-rw-r--r--nixpkgs/nixos/modules/services/system/kerberos/default.nix75
-rw-r--r--nixpkgs/nixos/modules/services/system/kerberos/heimdal.nix68
-rw-r--r--nixpkgs/nixos/modules/services/system/kerberos/mit.nix68
-rw-r--r--nixpkgs/nixos/modules/services/system/localtimed.nix66
-rw-r--r--nixpkgs/nixos/modules/services/system/nix-daemon.nix259
-rw-r--r--nixpkgs/nixos/modules/services/system/nscd.conf34
-rw-r--r--nixpkgs/nixos/modules/services/system/nscd.nix153
-rw-r--r--nixpkgs/nixos/modules/services/system/saslauthd.nix62
-rw-r--r--nixpkgs/nixos/modules/services/system/self-deploy.nix177
-rw-r--r--nixpkgs/nixos/modules/services/system/systembus-notify.nix27
-rw-r--r--nixpkgs/nixos/modules/services/system/uptimed.nix60
-rw-r--r--nixpkgs/nixos/modules/services/system/zram-generator.nix38
-rw-r--r--nixpkgs/nixos/modules/services/torrent/deluge.nix279
-rw-r--r--nixpkgs/nixos/modules/services/torrent/flexget.nix101
-rw-r--r--nixpkgs/nixos/modules/services/torrent/magnetico.nix218
-rw-r--r--nixpkgs/nixos/modules/services/torrent/opentracker.nix45
-rw-r--r--nixpkgs/nixos/modules/services/torrent/peerflix.nix71
-rw-r--r--nixpkgs/nixos/modules/services/torrent/rtorrent.nix220
-rw-r--r--nixpkgs/nixos/modules/services/torrent/transmission.nix497
-rw-r--r--nixpkgs/nixos/modules/services/tracing/tempo.nix80
-rw-r--r--nixpkgs/nixos/modules/services/ttys/getty.nix161
-rw-r--r--nixpkgs/nixos/modules/services/ttys/gpm.nix57
-rw-r--r--nixpkgs/nixos/modules/services/ttys/kmscon.nix117
-rw-r--r--nixpkgs/nixos/modules/services/video/epgstation/default.nix346
-rw-r--r--nixpkgs/nixos/modules/services/video/epgstation/streaming.json140
-rw-r--r--nixpkgs/nixos/modules/services/video/frigate.nix381
-rw-r--r--nixpkgs/nixos/modules/services/video/go2rtc/default.nix115
-rw-r--r--nixpkgs/nixos/modules/services/video/mediamtx.nix67
-rw-r--r--nixpkgs/nixos/modules/services/video/mirakurun.nix207
-rw-r--r--nixpkgs/nixos/modules/services/video/replay-sorcery.nix72
-rw-r--r--nixpkgs/nixos/modules/services/video/unifi-video.nix271
-rw-r--r--nixpkgs/nixos/modules/services/video/v4l2-relayd.nix199
-rw-r--r--nixpkgs/nixos/modules/services/wayland/cage.nix113
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/akkoma.md332
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/akkoma.nix1093
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/alps.nix132
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/anuko-time-tracker.nix388
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/atlassian/confluence.nix228
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/atlassian/crowd.nix197
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/atlassian/jira.nix223
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/audiobookshelf.nix90
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/bookstack.nix446
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md42
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.nix125
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/calibre-web.nix170
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/changedetection-io.nix220
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/chatgpt-retrieval-plugin.nix106
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/cloudlog.nix503
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/coder.nix215
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/convos.nix72
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/dex.nix132
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/discourse.md286
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/discourse.nix1093
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/documize.nix137
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix602
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/dolibarr.nix323
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/engelsystem.nix187
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/ethercalc.nix62
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/fluidd.nix66
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/freshrss.nix312
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/galene.nix214
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gerrit.nix242
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gotify-server.nix49
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gotosocial.md64
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/gotosocial.nix171
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/grocy.md66
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/grocy.nix184
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/guacamole-client.nix60
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/guacamole-server.nix83
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/healthchecks.nix276
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/hedgedoc.nix321
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/hledger-web.nix142
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/honk.md23
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/honk.nix153
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix262
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix157
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/invidious.nix313
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/invoiceplane.nix358
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/isso.nix91
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/jirafeau.nix173
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md45
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/jitsi-meet.nix599
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/kasmweb/default.nix275
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/kasmweb/initialize_kasmweb.sh114
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/kavita.nix83
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/keycloak.md141
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/keycloak.nix679
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/komga.nix99
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lanraragi.nix100
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lemmy.md31
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/lemmy.nix314
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/limesurvey.nix309
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mainsail.nix66
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mastodon.nix873
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/matomo.md77
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/matomo.nix331
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mattermost.nix360
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mediawiki.nix647
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/meme-bingo-web.nix93
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/microbin.nix93
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/miniflux.nix141
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/mobilizon.nix449
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/monica.nix468
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/moodle.nix318
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/netbox.nix389
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix123
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.md221
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nextcloud.nix1201
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nexus.nix152
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/nifi.nix316
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/node-red.nix148
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/onlyoffice.nix296
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/openvscode-server.nix212
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/openwebrx.nix38
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/outline.nix758
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/peering-manager.nix343
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/peertube.nix861
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pgpkeyserver-lite.nix78
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/photoprism.nix155
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/phylactery.nix51
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pict-rs.md89
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pict-rs.nix106
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/pixelfed.nix482
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/plantuml-server.nix154
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/plausible.md35
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/plausible.nix331
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/powerdns-admin.nix153
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/prosody-filer.nix86
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/restya-board.nix380
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/rimgo.nix107
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/rss-bridge.nix125
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/selfoss.nix164
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/sftpgo.nix375
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/shiori.nix103
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/slskd.nix211
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/snipe-it.nix515
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/sogo.nix271
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/trilium.nix155
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/tt-rss.nix658
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/vikunja.nix155
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/whitebophir.nix52
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/wiki-js.nix142
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/wordpress.nix573
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/writefreely.nix484
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/youtrack.nix181
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/zabbix.nix238
-rw-r--r--nixpkgs/nixos/modules/services/web-apps/zitadel.nix223
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/agate.nix148
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix842
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix54
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix291
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/caddy/default.nix413
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/caddy/vhost-options.nix77
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/darkhttpd.nix77
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/fcgiwrap.nix74
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/garage.md96
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/garage.nix100
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/hitch/default.nix111
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/hydron.nix164
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/jboss/builder.sh73
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/jboss/default.nix88
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/keter/bundle.nix40
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/keter/default.nix191
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/lighttpd/cgit.nix93
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/lighttpd/collectd.nix62
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/lighttpd/default.nix269
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/lighttpd/gitweb.nix52
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/merecat.nix55
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix132
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/minio.nix163
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/molly-brown.nix101
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/default.nix1359
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix94
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/location-options.nix141
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix358
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/phpfpm/default.nix285
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/pomerium.nix135
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/rustus.nix256
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/stargazer.nix224
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/static-web-server.nix68
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/tomcat.nix398
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/traefik.nix191
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/trafficserver/default.nix310
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/trafficserver/ip_allow.json36
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/trafficserver/logging.json37
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/ttyd.nix197
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/unit/default.nix155
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/uwsgi.nix233
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/varnish/default.nix115
-rw-r--r--nixpkgs/nixos/modules/services/web-servers/zope2.nix262
-rw-r--r--nixpkgs/nixos/modules/services/x11/clight.nix125
-rw-r--r--nixpkgs/nixos/modules/services/x11/colord.nix41
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/budgie.nix250
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/cde.nix73
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/cinnamon.nix255
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix208
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix101
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/enlightenment.nix124
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md167
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.nix570
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix41
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/lumina.nix46
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/lxqt.nix75
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/mate.nix83
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/none.nix46
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md74
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix326
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/phosh.nix223
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/plasma5.nix564
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/retroarch.nix40
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/surf-display.nix128
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix182
-rw-r--r--nixpkgs/nixos/modules/services/x11/desktop-managers/xterm.nix38
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/account-service-util.nix44
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/default.nix530
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix330
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix140
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix174
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix100
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mobile.nix26
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix49
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/slick.nix149
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/tiny.nix90
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/lightdm.nix329
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix316
-rwxr-xr-xnixpkgs/nixos/modules/services/x11/display-managers/set-session.py89
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/slim.nix16
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/startx.nix54
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/sx.nix34
-rw-r--r--nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix259
-rw-r--r--nixpkgs/nixos/modules/services/x11/extra-layouts.nix143
-rw-r--r--nixpkgs/nixos/modules/services/x11/fractalart.nix36
-rw-r--r--nixpkgs/nixos/modules/services/x11/gdk-pixbuf.nix28
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/cmt.nix59
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/digimend.nix38
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/libinput.nix304
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/synaptics.nix218
-rw-r--r--nixpkgs/nixos/modules/services/x11/hardware/wacom.nix48
-rw-r--r--nixpkgs/nixos/modules/services/x11/imwheel.nix71
-rw-r--r--nixpkgs/nixos/modules/services/x11/picom.nix317
-rw-r--r--nixpkgs/nixos/modules/services/x11/redshift.nix138
-rw-r--r--nixpkgs/nixos/modules/services/x11/terminal-server.nix56
-rw-r--r--nixpkgs/nixos/modules/services/x11/touchegg.nix38
-rw-r--r--nixpkgs/nixos/modules/services/x11/unclutter-xfixes.nix58
-rw-r--r--nixpkgs/nixos/modules/services/x11/unclutter.nix82
-rw-r--r--nixpkgs/nixos/modules/services/x11/urserver.nix38
-rw-r--r--nixpkgs/nixos/modules/services/x11/urxvtd.nix50
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/2bwm.nix37
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/afterstep.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/awesome.nix66
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/berry.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/bspwm.nix77
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/clfswm.nix34
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/cwm.nix23
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/default.nix93
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/dk.nix27
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix58
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/e16.nix26
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/evilwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/exwm.nix69
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/fluxbox.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/fvwm2.nix47
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/fvwm3.nix35
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/hackedbox.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/herbstluftwm.nix47
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/hypr.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/i3.nix77
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/icewm.nix27
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/jwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/katriawm.nix27
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/leftwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/lwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/metacity.nix30
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/mlvwm.nix41
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/mwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix23
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/none.nix12
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/notion.nix26
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/openbox.nix24
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/pekwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/qtile.nix71
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix33
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/ratpoison.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/sawfish.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/smallwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/spectrwm.nix27
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/stumpwm.nix24
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/tinywm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/twm.nix37
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/windowlab.nix22
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/windowmaker.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix61
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/wmii.nix39
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/xmonad.nix204
-rw-r--r--nixpkgs/nixos/modules/services/x11/window-managers/yeahwm.nix25
-rw-r--r--nixpkgs/nixos/modules/services/x11/xautolock.nix141
-rw-r--r--nixpkgs/nixos/modules/services/x11/xbanish.nix31
-rw-r--r--nixpkgs/nixos/modules/services/x11/xfs.conf15
-rw-r--r--nixpkgs/nixos/modules/services/x11/xfs.nix46
-rw-r--r--nixpkgs/nixos/modules/services/x11/xserver.nix923
-rw-r--r--nixpkgs/nixos/modules/system/activation/activatable-system.nix79
-rw-r--r--nixpkgs/nixos/modules/system/activation/activation-script.nix284
-rw-r--r--nixpkgs/nixos/modules/system/activation/bootspec.cue31
-rw-r--r--nixpkgs/nixos/modules/system/activation/bootspec.nix118
-rw-r--r--nixpkgs/nixos/modules/system/activation/no-clone.nix8
-rw-r--r--nixpkgs/nixos/modules/system/activation/specialisation.nix85
-rwxr-xr-xnixpkgs/nixos/modules/system/activation/switch-to-configuration.pl992
-rw-r--r--nixpkgs/nixos/modules/system/activation/switchable-system.nix55
-rw-r--r--nixpkgs/nixos/modules/system/activation/test.nix27
-rw-r--r--nixpkgs/nixos/modules/system/activation/top-level.nix335
-rw-r--r--nixpkgs/nixos/modules/system/boot/binfmt.nix338
-rw-r--r--nixpkgs/nixos/modules/system/boot/emergency-mode.nix37
-rw-r--r--nixpkgs/nixos/modules/system/boot/grow-partition.nix54
-rw-r--r--nixpkgs/nixos/modules/system/boot/initrd-network.nix162
-rw-r--r--nixpkgs/nixos/modules/system/boot/initrd-openvpn.nix91
-rw-r--r--nixpkgs/nixos/modules/system/boot/initrd-ssh.nix267
-rw-r--r--nixpkgs/nixos/modules/system/boot/kernel.nix429
-rw-r--r--nixpkgs/nixos/modules/system/boot/kernel_config.nix116
-rw-r--r--nixpkgs/nixos/modules/system/boot/kexec.nix32
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/efi.nix20
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/external/external.md26
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/external/external.nix36
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir-builder.sh106
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir.nix62
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/default.nix82
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix8
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh157
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix834
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl801
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/ipxe.nix60
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/grub/memtest.nix69
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/init-script/init-script-builder.sh92
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/init-script/init-script.nix52
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/loader.nix20
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix9
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.sh143
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix151
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix37
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.sh38
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py344
-rw-r--r--nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix312
-rw-r--r--nixpkgs/nixos/modules/system/boot/luksroot.nix1086
-rw-r--r--nixpkgs/nixos/modules/system/boot/modprobe.nix72
-rw-r--r--nixpkgs/nixos/modules/system/boot/networkd.nix3036
-rw-r--r--nixpkgs/nixos/modules/system/boot/pbkdf2-sha512.c38
-rw-r--r--nixpkgs/nixos/modules/system/boot/plymouth.nix349
-rw-r--r--nixpkgs/nixos/modules/system/boot/resolved.nix160
-rw-r--r--nixpkgs/nixos/modules/system/boot/shutdown.nix27
-rw-r--r--nixpkgs/nixos/modules/system/boot/stage-1-init.sh664
-rw-r--r--nixpkgs/nixos/modules/system/boot/stage-1.nix735
-rwxr-xr-xnixpkgs/nixos/modules/system/boot/stage-2-init.sh147
-rw-r--r--nixpkgs/nixos/modules/system/boot/stage-2.nix82
-rw-r--r--nixpkgs/nixos/modules/system/boot/stratisroot.nix64
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd.nix662
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/coredump.nix78
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/homed.nix43
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/initrd-secrets.nix36
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/initrd.nix561
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/journald.nix140
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/logind.nix205
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/nspawn.nix132
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/oomd.nix57
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/repart.nix148
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/shutdown.nix66
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/sysupdate.nix136
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/tmpfiles.nix225
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/user.nix238
-rw-r--r--nixpkgs/nixos/modules/system/boot/systemd/userdbd.nix18
-rw-r--r--nixpkgs/nixos/modules/system/boot/timesyncd.nix85
-rw-r--r--nixpkgs/nixos/modules/system/boot/tmp.nix69
-rw-r--r--nixpkgs/nixos/modules/system/boot/uvesafb.nix39
-rw-r--r--nixpkgs/nixos/modules/system/build.nix21
-rw-r--r--nixpkgs/nixos/modules/system/etc/etc-activation.nix12
-rw-r--r--nixpkgs/nixos/modules/system/etc/etc.nix201
-rw-r--r--nixpkgs/nixos/modules/system/etc/setup-etc.pl159
-rw-r--r--nixpkgs/nixos/modules/system/etc/test.nix70
-rw-r--r--nixpkgs/nixos/modules/tasks/auto-upgrade.nix262
-rw-r--r--nixpkgs/nixos/modules/tasks/bcache.nix31
-rw-r--r--nixpkgs/nixos/modules/tasks/cpu-freq.nix90
-rw-r--r--nixpkgs/nixos/modules/tasks/encrypted-devices.nix120
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems.nix438
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/apfs.nix22
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix150
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix150
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/cifs.nix27
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix24
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/envfs.nix60
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/erofs.nix21
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/exfat.nix13
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ext.nix31
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix22
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix11
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/jfs.nix21
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/nfs.nix135
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix11
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix27
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix13
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix43
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix23
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/vfat.nix27
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/xfs.nix32
-rw-r--r--nixpkgs/nixos/modules/tasks/filesystems/zfs.nix895
-rw-r--r--nixpkgs/nixos/modules/tasks/lvm.nix144
-rw-r--r--nixpkgs/nixos/modules/tasks/network-interfaces-scripted.nix628
-rw-r--r--nixpkgs/nixos/modules/tasks/network-interfaces-systemd.nix462
-rw-r--r--nixpkgs/nixos/modules/tasks/network-interfaces.nix1580
-rw-r--r--nixpkgs/nixos/modules/tasks/powertop.nix29
-rw-r--r--nixpkgs/nixos/modules/tasks/scsi-link-power-management.nix54
-rw-r--r--nixpkgs/nixos/modules/tasks/snapraid.nix230
-rw-r--r--nixpkgs/nixos/modules/tasks/stratis.nix18
-rw-r--r--nixpkgs/nixos/modules/tasks/swraid.nix87
-rw-r--r--nixpkgs/nixos/modules/tasks/trackpoint.nix108
-rw-r--r--nixpkgs/nixos/modules/tasks/tty-backgrounds-combine.sh32
-rw-r--r--nixpkgs/nixos/modules/testing/service-runner.nix127
-rw-r--r--nixpkgs/nixos/modules/testing/test-instrumentation.nix222
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-ec2-amis.nix588
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-image.nix105
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-init.nix87
-rw-r--r--nixpkgs/nixos/modules/virtualisation/amazon-options.nix73
-rw-r--r--nixpkgs/nixos/modules/virtualisation/anbox.nix176
-rw-r--r--nixpkgs/nixos/modules/virtualisation/appvm.nix49
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-agent.nix268
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-common.nix67
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config-user.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-image.nix41
-rw-r--r--nixpkgs/nixos/modules/virtualisation/azure-images.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-config.nix5
-rw-r--r--nixpkgs/nixos/modules/virtualisation/brightbox-image.nix166
-rw-r--r--nixpkgs/nixos/modules/virtualisation/build-vm.nix58
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix40
-rw-r--r--nixpkgs/nixos/modules/virtualisation/container-config.nix43
-rw-r--r--nixpkgs/nixos/modules/virtualisation/containerd.nix101
-rw-r--r--nixpkgs/nixos/modules/virtualisation/containers.nix142
-rw-r--r--nixpkgs/nixos/modules/virtualisation/cri-o.nix158
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix197
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix70
-rw-r--r--nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix95
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-image.nix57
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker-rootless.nix101
-rw-r--r--nixpkgs/nixos/modules/virtualisation/docker.nix262
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-amis.nix9
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-data.nix92
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.sh66
-rw-r--r--nixpkgs/nixos/modules/virtualisation/ecs-agent.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/gce-images.nix17
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-config.nix116
-rw-r--r--nixpkgs/nixos/modules/virtualisation/google-compute-image.nix84
-rw-r--r--nixpkgs/nixos/modules/virtualisation/grow-partition.nix3
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix64
-rw-r--r--nixpkgs/nixos/modules/virtualisation/hyperv-image.nix71
-rw-r--r--nixpkgs/nixos/modules/virtualisation/includes-to-excludes.py86
-rw-r--r--nixpkgs/nixos/modules/virtualisation/incus.nix236
-rw-r--r--nixpkgs/nixos/modules/virtualisation/kubevirt.nix30
-rw-r--r--nixpkgs/nixos/modules/virtualisation/kvmgt.nix86
-rw-r--r--nixpkgs/nixos/modules/virtualisation/libvirtd.nix502
-rw-r--r--nixpkgs/nixos/modules/virtualisation/linode-config.nix75
-rw-r--r--nixpkgs/nixos/modules/virtualisation/linode-image.nix66
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-container.nix121
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-image-metadata.nix104
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc-instance-common.nix30
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxc.nix82
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxcfs.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd-agent.nix91
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix46
-rw-r--r--nixpkgs/nixos/modules/virtualisation/lxd.nix273
-rw-r--r--nixpkgs/nixos/modules/virtualisation/multipass.nix61
-rw-r--r--nixpkgs/nixos/modules/virtualisation/nixos-containers.nix911
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-common.nix60
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-config-user.nix12
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-containers.nix386
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-image.nix50
-rw-r--r--nixpkgs/nixos/modules/virtualisation/oci-options.nix14
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openstack-config.nix90
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openstack-metadata-fetcher.nix22
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openstack-options.nix71
-rw-r--r--nixpkgs/nixos/modules/virtualisation/openvswitch.nix145
-rw-r--r--nixpkgs/nixos/modules/virtualisation/parallels-guest.nix145
-rw-r--r--nixpkgs/nixos/modules/virtualisation/podman/default.nix245
-rw-r--r--nixpkgs/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix34
-rw-r--r--nixpkgs/nixos/modules/virtualisation/podman/network-socket.nix95
-rw-r--r--nixpkgs/nixos/modules/virtualisation/proxmox-image.nix303
-rw-r--r--nixpkgs/nixos/modules/virtualisation/proxmox-lxc.nix75
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix45
-rw-r--r--nixpkgs/nixos/modules/virtualisation/qemu-vm.nix1333
-rw-r--r--nixpkgs/nixos/modules/virtualisation/rosetta.nix81
-rw-r--r--nixpkgs/nixos/modules/virtualisation/spice-usb-redirection.nix26
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vagrant-guest.nix59
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vagrant-virtualbox-image.nix60
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix93
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix170
-rw-r--r--nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix256
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-guest.nix86
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-host.nix166
-rw-r--r--nixpkgs/nixos/modules/virtualisation/vmware-image.nix91
-rw-r--r--nixpkgs/nixos/modules/virtualisation/waydroid.nix71
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix52
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-dom0.nix453
-rw-r--r--nixpkgs/nixos/modules/virtualisation/xen-domU.nix18
-rw-r--r--nixpkgs/nixos/release-combined.nix178
-rw-r--r--nixpkgs/nixos/release-small.nix144
-rw-r--r--nixpkgs/nixos/release.nix473
-rw-r--r--nixpkgs/nixos/tests/3proxy.nix187
-rw-r--r--nixpkgs/nixos/tests/aaaaxy.nix29
-rw-r--r--nixpkgs/nixos/tests/acme-dns.nix50
-rw-r--r--nixpkgs/nixos/tests/acme.nix715
-rw-r--r--nixpkgs/nixos/tests/activation/nix-channel.nix26
-rw-r--r--nixpkgs/nixos/tests/activation/var.nix18
-rw-r--r--nixpkgs/nixos/tests/adguardhome.nix135
-rw-r--r--nixpkgs/nixos/tests/aesmd.nix102
-rw-r--r--nixpkgs/nixos/tests/agda.nix50
-rw-r--r--nixpkgs/nixos/tests/airsonic.nix29
-rw-r--r--nixpkgs/nixos/tests/akkoma.nix124
-rw-r--r--nixpkgs/nixos/tests/alice-lg.nix44
-rw-r--r--nixpkgs/nixos/tests/all-terminfo.nix31
-rw-r--r--nixpkgs/nixos/tests/all-tests.nix942
-rw-r--r--nixpkgs/nixos/tests/alps.nix108
-rw-r--r--nixpkgs/nixos/tests/amazon-init-shell.nix40
-rw-r--r--nixpkgs/nixos/tests/amazon-ssm-agent.nix17
-rw-r--r--nixpkgs/nixos/tests/amd-sev.nix56
-rw-r--r--nixpkgs/nixos/tests/anbox.nix36
-rw-r--r--nixpkgs/nixos/tests/anuko-time-tracker.nix17
-rw-r--r--nixpkgs/nixos/tests/apache_datasketches.nix29
-rw-r--r--nixpkgs/nixos/tests/apcupsd.nix41
-rw-r--r--nixpkgs/nixos/tests/apfs.nix65
-rw-r--r--nixpkgs/nixos/tests/apparmor.nix85
-rw-r--r--nixpkgs/nixos/tests/appliance-repart-image.nix116
-rw-r--r--nixpkgs/nixos/tests/archi.nix31
-rw-r--r--nixpkgs/nixos/tests/atd.nix31
-rw-r--r--nixpkgs/nixos/tests/atop.nix226
-rw-r--r--nixpkgs/nixos/tests/atuin.nix66
-rw-r--r--nixpkgs/nixos/tests/audiobookshelf.nix23
-rw-r--r--nixpkgs/nixos/tests/auth-mysql.nix177
-rw-r--r--nixpkgs/nixos/tests/authelia.nix169
-rw-r--r--nixpkgs/nixos/tests/avahi.nix79
-rw-r--r--nixpkgs/nixos/tests/babeld.nix142
-rw-r--r--nixpkgs/nixos/tests/bazarr.nix24
-rw-r--r--nixpkgs/nixos/tests/bcachefs.nix32
-rw-r--r--nixpkgs/nixos/tests/beanstalkd.nix49
-rw-r--r--nixpkgs/nixos/tests/bees.nix62
-rw-r--r--nixpkgs/nixos/tests/binary-cache.nix60
-rw-r--r--nixpkgs/nixos/tests/bind.nix28
-rw-r--r--nixpkgs/nixos/tests/bird.nix129
-rw-r--r--nixpkgs/nixos/tests/birdwatcher.nix94
-rw-r--r--nixpkgs/nixos/tests/bitcoind.nix48
-rw-r--r--nixpkgs/nixos/tests/bittorrent.nix164
-rw-r--r--nixpkgs/nixos/tests/blockbook-frontend.nix28
-rw-r--r--nixpkgs/nixos/tests/blocky.nix34
-rw-r--r--nixpkgs/nixos/tests/boot-stage1.nix164
-rw-r--r--nixpkgs/nixos/tests/boot.nix148
-rw-r--r--nixpkgs/nixos/tests/bootspec.nix172
-rw-r--r--nixpkgs/nixos/tests/borgbackup.nix230
-rw-r--r--nixpkgs/nixos/tests/botamusique.nix51
-rw-r--r--nixpkgs/nixos/tests/bpf.nix36
-rw-r--r--nixpkgs/nixos/tests/bpftune.nix20
-rw-r--r--nixpkgs/nixos/tests/breitbandmessung.nix33
-rw-r--r--nixpkgs/nixos/tests/brscan5.nix43
-rw-r--r--nixpkgs/nixos/tests/btrbk-doas.nix114
-rw-r--r--nixpkgs/nixos/tests/btrbk-no-timer.nix37
-rw-r--r--nixpkgs/nixos/tests/btrbk-section-order.nix51
-rw-r--r--nixpkgs/nixos/tests/btrbk.nix112
-rw-r--r--nixpkgs/nixos/tests/budgie.nix68
-rw-r--r--nixpkgs/nixos/tests/buildbot.nix108
-rw-r--r--nixpkgs/nixos/tests/buildkite-agents.nix29
-rw-r--r--nixpkgs/nixos/tests/c2fmzq.nix75
-rw-r--r--nixpkgs/nixos/tests/caddy.nix88
-rw-r--r--nixpkgs/nixos/tests/cadvisor.nix32
-rw-r--r--nixpkgs/nixos/tests/cage.nix38
-rw-r--r--nixpkgs/nixos/tests/cagebreak.nix65
-rw-r--r--nixpkgs/nixos/tests/calibre-server.nix104
-rw-r--r--nixpkgs/nixos/tests/calibre-web.nix42
-rw-r--r--nixpkgs/nixos/tests/cassandra.nix132
-rw-r--r--nixpkgs/nixos/tests/castopod.nix87
-rw-r--r--nixpkgs/nixos/tests/ceph-multi-node.nix233
-rw-r--r--nixpkgs/nixos/tests/ceph-single-node-bluestore.nix196
-rw-r--r--nixpkgs/nixos/tests/ceph-single-node.nix207
-rw-r--r--nixpkgs/nixos/tests/certmgr.nix155
-rw-r--r--nixpkgs/nixos/tests/cfssl.nix67
-rw-r--r--nixpkgs/nixos/tests/cgit.nix73
-rw-r--r--nixpkgs/nixos/tests/charliecloud.nix43
-rw-r--r--nixpkgs/nixos/tests/chromium.nix269
-rw-r--r--nixpkgs/nixos/tests/chrony-ptp.nix28
-rw-r--r--nixpkgs/nixos/tests/chrony.nix31
-rw-r--r--nixpkgs/nixos/tests/cinnamon.nix82
-rw-r--r--nixpkgs/nixos/tests/cjdns.nix121
-rw-r--r--nixpkgs/nixos/tests/clickhouse.nix32
-rw-r--r--nixpkgs/nixos/tests/cloud-init-hostname.nix46
-rw-r--r--nixpkgs/nixos/tests/cloud-init.nix114
-rw-r--r--nixpkgs/nixos/tests/cloudlog.nix18
-rw-r--r--nixpkgs/nixos/tests/cntr.nix75
-rw-r--r--nixpkgs/nixos/tests/cockpit.nix136
-rw-r--r--nixpkgs/nixos/tests/cockroachdb.nix124
-rw-r--r--nixpkgs/nixos/tests/coder.nix24
-rw-r--r--nixpkgs/nixos/tests/collectd.nix38
-rw-r--r--nixpkgs/nixos/tests/common/acme/client/default.nix16
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/README.md21
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/acme.test.cert.pem19
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/acme.test.key.pem27
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/ca.cert.pem20
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/ca.key.pem27
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/default.nix141
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/generate-certs.nix33
-rw-r--r--nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix13
-rw-r--r--nixpkgs/nixos/tests/common/auto-format-root-device.nix29
-rw-r--r--nixpkgs/nixos/tests/common/auto.nix55
-rw-r--r--nixpkgs/nixos/tests/common/ec2.nix73
-rw-r--r--nixpkgs/nixos/tests/common/gpg-keyring.nix21
-rw-r--r--nixpkgs/nixos/tests/common/resolver.nix141
-rw-r--r--nixpkgs/nixos/tests/common/user-account.nix15
-rw-r--r--nixpkgs/nixos/tests/common/wayland-cage.nix13
-rw-r--r--nixpkgs/nixos/tests/common/webroot/news-rss.xml27
-rw-r--r--nixpkgs/nixos/tests/common/x11.nix17
-rw-r--r--nixpkgs/nixos/tests/connman.nix77
-rw-r--r--nixpkgs/nixos/tests/consul-template.nix36
-rw-r--r--nixpkgs/nixos/tests/consul.nix239
-rw-r--r--nixpkgs/nixos/tests/containers-bridge.nix99
-rw-r--r--nixpkgs/nixos/tests/containers-custom-pkgs.nix34
-rw-r--r--nixpkgs/nixos/tests/containers-ephemeral.nix54
-rw-r--r--nixpkgs/nixos/tests/containers-extra_veth.nix91
-rw-r--r--nixpkgs/nixos/tests/containers-hosts.nix49
-rw-r--r--nixpkgs/nixos/tests/containers-imperative.nix169
-rw-r--r--nixpkgs/nixos/tests/containers-ip.nix74
-rw-r--r--nixpkgs/nixos/tests/containers-macvlans.nix82
-rw-r--r--nixpkgs/nixos/tests/containers-names.nix37
-rw-r--r--nixpkgs/nixos/tests/containers-nested.nix30
-rw-r--r--nixpkgs/nixos/tests/containers-physical_interfaces.nix131
-rw-r--r--nixpkgs/nixos/tests/containers-portforward.nix59
-rw-r--r--nixpkgs/nixos/tests/containers-reloadable.nix71
-rw-r--r--nixpkgs/nixos/tests/containers-restart_networking.nix113
-rw-r--r--nixpkgs/nixos/tests/containers-tmpfs.nix90
-rw-r--r--nixpkgs/nixos/tests/containers-unified-hierarchy.nix21
-rw-r--r--nixpkgs/nixos/tests/convos.nix28
-rw-r--r--nixpkgs/nixos/tests/corerad.nix90
-rw-r--r--nixpkgs/nixos/tests/coturn.nix34
-rw-r--r--nixpkgs/nixos/tests/couchdb.nix57
-rw-r--r--nixpkgs/nixos/tests/cri-o.nix19
-rw-r--r--nixpkgs/nixos/tests/croc.nix51
-rw-r--r--nixpkgs/nixos/tests/cups-pdf.nix40
-rw-r--r--nixpkgs/nixos/tests/curl-impersonate.nix157
-rw-r--r--nixpkgs/nixos/tests/custom-ca.nix195
-rw-r--r--nixpkgs/nixos/tests/dae.nix33
-rw-r--r--nixpkgs/nixos/tests/darling.nix44
-rw-r--r--nixpkgs/nixos/tests/dconf.nix34
-rw-r--r--nixpkgs/nixos/tests/deconz.nix28
-rw-r--r--nixpkgs/nixos/tests/deepin.nix57
-rw-r--r--nixpkgs/nixos/tests/deluge.nix63
-rw-r--r--nixpkgs/nixos/tests/dex-oidc.nix78
-rw-r--r--nixpkgs/nixos/tests/dhparams.nix128
-rw-r--r--nixpkgs/nixos/tests/disable-installer-tools.nix29
-rw-r--r--nixpkgs/nixos/tests/discourse.nix202
-rw-r--r--nixpkgs/nixos/tests/dnscrypt-proxy2.nix38
-rw-r--r--nixpkgs/nixos/tests/dnscrypt-wrapper/default.nix148
-rw-r--r--nixpkgs/nixos/tests/dnscrypt-wrapper/public.key1
-rw-r--r--nixpkgs/nixos/tests/dnscrypt-wrapper/secret.key1
-rw-r--r--nixpkgs/nixos/tests/dnsdist.nix48
-rw-r--r--nixpkgs/nixos/tests/doas.nix96
-rw-r--r--nixpkgs/nixos/tests/docker-registry.nix61
-rw-r--r--nixpkgs/nixos/tests/docker-rootless.nix41
-rw-r--r--nixpkgs/nixos/tests/docker-tools-cross.nix80
-rw-r--r--nixpkgs/nixos/tests/docker-tools-overlay.nix33
-rw-r--r--nixpkgs/nixos/tests/docker-tools.nix554
-rw-r--r--nixpkgs/nixos/tests/docker.nix53
-rw-r--r--nixpkgs/nixos/tests/documize.nix62
-rw-r--r--nixpkgs/nixos/tests/doh-proxy-rust.nix41
-rw-r--r--nixpkgs/nixos/tests/dokuwiki.nix161
-rw-r--r--nixpkgs/nixos/tests/dolibarr.nix59
-rw-r--r--nixpkgs/nixos/tests/domination.nix26
-rw-r--r--nixpkgs/nixos/tests/dovecot.nix82
-rw-r--r--nixpkgs/nixos/tests/drbd.nix87
-rw-r--r--nixpkgs/nixos/tests/early-mount-options.nix19
-rw-r--r--nixpkgs/nixos/tests/earlyoom.nix16
-rw-r--r--nixpkgs/nixos/tests/ec2.nix156
-rw-r--r--nixpkgs/nixos/tests/ecryptfs.nix85
-rw-r--r--nixpkgs/nixos/tests/elk.nix276
-rw-r--r--nixpkgs/nixos/tests/emacs-daemon.nix48
-rw-r--r--nixpkgs/nixos/tests/empty-file0
-rw-r--r--nixpkgs/nixos/tests/endlessh-go.nix58
-rw-r--r--nixpkgs/nixos/tests/endlessh.nix43
-rw-r--r--nixpkgs/nixos/tests/engelsystem.nix41
-rw-r--r--nixpkgs/nixos/tests/enlightenment.nix96
-rw-r--r--nixpkgs/nixos/tests/env.nix36
-rw-r--r--nixpkgs/nixos/tests/envfs.nix42
-rw-r--r--nixpkgs/nixos/tests/envoy.nix54
-rw-r--r--nixpkgs/nixos/tests/ergo.nix18
-rw-r--r--nixpkgs/nixos/tests/ergochat.nix97
-rw-r--r--nixpkgs/nixos/tests/eris-server.nix23
-rw-r--r--nixpkgs/nixos/tests/esphome.nix40
-rw-r--r--nixpkgs/nixos/tests/etcd-cluster.nix157
-rw-r--r--nixpkgs/nixos/tests/etcd.nix25
-rw-r--r--nixpkgs/nixos/tests/etebase-server.nix50
-rw-r--r--nixpkgs/nixos/tests/etesync-dav.nix21
-rw-r--r--nixpkgs/nixos/tests/evcc.nix96
-rw-r--r--nixpkgs/nixos/tests/fail2ban.nix18
-rw-r--r--nixpkgs/nixos/tests/fakeroute.nix22
-rw-r--r--nixpkgs/nixos/tests/fancontrol.nix34
-rw-r--r--nixpkgs/nixos/tests/fanout.nix30
-rw-r--r--nixpkgs/nixos/tests/fastnetmon-advanced.nix65
-rw-r--r--nixpkgs/nixos/tests/fcitx5/default.nix165
-rw-r--r--nixpkgs/nixos/tests/fenics.nix49
-rw-r--r--nixpkgs/nixos/tests/ferm.nix75
-rw-r--r--nixpkgs/nixos/tests/ferretdb.nix64
-rw-r--r--nixpkgs/nixos/tests/firefox.nix123
-rw-r--r--nixpkgs/nixos/tests/firejail.nix91
-rw-r--r--nixpkgs/nixos/tests/firewall.nix68
-rw-r--r--nixpkgs/nixos/tests/fish.nix24
-rw-r--r--nixpkgs/nixos/tests/flannel.nix57
-rw-r--r--nixpkgs/nixos/tests/fluentd.nix49
-rw-r--r--nixpkgs/nixos/tests/fluidd.nix19
-rw-r--r--nixpkgs/nixos/tests/fontconfig-default-fonts.nix32
-rw-r--r--nixpkgs/nixos/tests/forgejo.nix178
-rw-r--r--nixpkgs/nixos/tests/freenet.nix19
-rw-r--r--nixpkgs/nixos/tests/freeswitch.nix29
-rw-r--r--nixpkgs/nixos/tests/freetube.nix41
-rw-r--r--nixpkgs/nixos/tests/freshrss-http-auth.nix20
-rw-r--r--nixpkgs/nixos/tests/freshrss-pgsql.nix46
-rw-r--r--nixpkgs/nixos/tests/freshrss-sqlite.nix20
-rw-r--r--nixpkgs/nixos/tests/frigate.nix60
-rw-r--r--nixpkgs/nixos/tests/frp.nix86
-rw-r--r--nixpkgs/nixos/tests/frr.nix104
-rw-r--r--nixpkgs/nixos/tests/fsck.nix45
-rw-r--r--nixpkgs/nixos/tests/fscrypt.nix50
-rw-r--r--nixpkgs/nixos/tests/ft2-clone.nix33
-rw-r--r--nixpkgs/nixos/tests/garage/basic.nix98
-rw-r--r--nixpkgs/nixos/tests/garage/default.nix54
-rw-r--r--nixpkgs/nixos/tests/garage/with-3node-replication.nix121
-rw-r--r--nixpkgs/nixos/tests/gemstash.nix51
-rw-r--r--nixpkgs/nixos/tests/gerrit.nix54
-rw-r--r--nixpkgs/nixos/tests/geth.nix45
-rw-r--r--nixpkgs/nixos/tests/ghostunnel.nix104
-rw-r--r--nixpkgs/nixos/tests/git/hub.nix17
-rw-r--r--nixpkgs/nixos/tests/gitdaemon.nix71
-rw-r--r--nixpkgs/nixos/tests/gitea.nix165
-rw-r--r--nixpkgs/nixos/tests/github-runner.nix37
-rw-r--r--nixpkgs/nixos/tests/gitlab.nix437
-rw-r--r--nixpkgs/nixos/tests/gitolite-fcgiwrap.nix93
-rw-r--r--nixpkgs/nixos/tests/gitolite.nix138
-rw-r--r--nixpkgs/nixos/tests/glusterfs.nix68
-rw-r--r--nixpkgs/nixos/tests/gnome-flashback.nix52
-rw-r--r--nixpkgs/nixos/tests/gnome-xorg.nix99
-rw-r--r--nixpkgs/nixos/tests/gnome.nix93
-rw-r--r--nixpkgs/nixos/tests/gnupg.nix118
-rw-r--r--nixpkgs/nixos/tests/go-neb.nix44
-rw-r--r--nixpkgs/nixos/tests/gobgpd.nix71
-rw-r--r--nixpkgs/nixos/tests/gocd-agent.nix48
-rw-r--r--nixpkgs/nixos/tests/gocd-server.nix28
-rw-r--r--nixpkgs/nixos/tests/gollum.nix14
-rw-r--r--nixpkgs/nixos/tests/gonic.nix18
-rw-r--r--nixpkgs/nixos/tests/google-oslogin/default.nix74
-rw-r--r--nixpkgs/nixos/tests/google-oslogin/server.nix27
-rwxr-xr-xnixpkgs/nixos/tests/google-oslogin/server.py145
-rw-r--r--nixpkgs/nixos/tests/goss.nix53
-rw-r--r--nixpkgs/nixos/tests/gotify-server.nix50
-rw-r--r--nixpkgs/nixos/tests/grafana-agent.nix32
-rw-r--r--nixpkgs/nixos/tests/grafana/basic.nix142
-rw-r--r--nixpkgs/nixos/tests/grafana/default.nix9
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/contact-points.yaml9
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/dashboards.yaml6
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/datasources.yaml7
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/default.nix256
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/mute-timings.yaml4
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/policies.yaml4
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/rules.yaml36
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/templates.yaml5
-rw-r--r--nixpkgs/nixos/tests/grafana/provision/test_dashboard.json47
-rw-r--r--nixpkgs/nixos/tests/graphite.nix36
-rw-r--r--nixpkgs/nixos/tests/graylog.nix114
-rw-r--r--nixpkgs/nixos/tests/grocy.nix73
-rw-r--r--nixpkgs/nixos/tests/grow-partition.nix83
-rw-r--r--nixpkgs/nixos/tests/grub.nix60
-rw-r--r--nixpkgs/nixos/tests/guacamole-server.nix21
-rw-r--r--nixpkgs/nixos/tests/gvisor.nix49
-rw-r--r--nixpkgs/nixos/tests/hadoop/default.nix8
-rw-r--r--nixpkgs/nixos/tests/hadoop/hadoop.nix255
-rw-r--r--nixpkgs/nixos/tests/hadoop/hbase.nix109
-rw-r--r--nixpkgs/nixos/tests/hadoop/hdfs.nix83
-rw-r--r--nixpkgs/nixos/tests/hadoop/yarn.nix45
-rw-r--r--nixpkgs/nixos/tests/haka.nix24
-rw-r--r--nixpkgs/nixos/tests/haproxy.nix53
-rw-r--r--nixpkgs/nixos/tests/hardened.nix105
-rw-r--r--nixpkgs/nixos/tests/harmonia.nix37
-rw-r--r--nixpkgs/nixos/tests/haste-server.nix23
-rw-r--r--nixpkgs/nixos/tests/hbase.nix30
-rw-r--r--nixpkgs/nixos/tests/hddfancontrol.nix44
-rw-r--r--nixpkgs/nixos/tests/headscale.nix82
-rw-r--r--nixpkgs/nixos/tests/hedgedoc.nix96
-rw-r--r--nixpkgs/nixos/tests/herbstluftwm.nix37
-rw-r--r--nixpkgs/nixos/tests/hibernate.nix55
-rw-r--r--nixpkgs/nixos/tests/hitch/default.nix33
-rw-r--r--nixpkgs/nixos/tests/hitch/example.pem53
-rw-r--r--nixpkgs/nixos/tests/hitch/example/index.txt1
-rw-r--r--nixpkgs/nixos/tests/hledger-web.nix50
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/default.nix16
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix19
-rw-r--r--nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix26
-rw-r--r--nixpkgs/nixos/tests/hockeypuck.nix63
-rw-r--r--nixpkgs/nixos/tests/home-assistant.nix241
-rw-r--r--nixpkgs/nixos/tests/homepage-dashboard.nix14
-rw-r--r--nixpkgs/nixos/tests/honk.nix32
-rw-r--r--nixpkgs/nixos/tests/hostname.nix72
-rw-r--r--nixpkgs/nixos/tests/hound.nix59
-rw-r--r--nixpkgs/nixos/tests/hydra/common.nix48
-rwxr-xr-xnixpkgs/nixos/tests/hydra/create-trivial-project.sh59
-rw-r--r--nixpkgs/nixos/tests/hydra/default.nix59
-rw-r--r--nixpkgs/nixos/tests/i3wm.nix46
-rw-r--r--nixpkgs/nixos/tests/icingaweb2.nix71
-rw-r--r--nixpkgs/nixos/tests/iftop.nix31
-rw-r--r--nixpkgs/nixos/tests/image-contents.nix62
-rw-r--r--nixpkgs/nixos/tests/incron.nix52
-rw-r--r--nixpkgs/nixos/tests/incus/container.nix77
-rw-r--r--nixpkgs/nixos/tests/incus/default.nix14
-rw-r--r--nixpkgs/nixos/tests/incus/preseed.nix60
-rw-r--r--nixpkgs/nixos/tests/incus/socket-activated.nix26
-rw-r--r--nixpkgs/nixos/tests/incus/virtual-machine.nix55
-rw-r--r--nixpkgs/nixos/tests/influxdb.nix40
-rw-r--r--nixpkgs/nixos/tests/influxdb2.nix225
-rw-r--r--nixpkgs/nixos/tests/initrd-luks-empty-passphrase.nix105
-rw-r--r--nixpkgs/nixos/tests/initrd-network-openvpn/default.nix164
-rw-r--r--nixpkgs/nixos/tests/initrd-network-openvpn/initrd.ovpn30
-rw-r--r--nixpkgs/nixos/tests/initrd-network-openvpn/shared.key21
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/default.nix73
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix10
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/id_ed255197
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub1
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key7
-rw-r--r--nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub1
-rw-r--r--nixpkgs/nixos/tests/initrd-network.nix33
-rw-r--r--nixpkgs/nixos/tests/initrd-secrets-changing.nix57
-rw-r--r--nixpkgs/nixos/tests/initrd-secrets.nix41
-rw-r--r--nixpkgs/nixos/tests/input-remapper.nix52
-rw-r--r--nixpkgs/nixos/tests/inspircd.nix93
-rw-r--r--nixpkgs/nixos/tests/installed-tests/appstream-qt.nix9
-rw-r--r--nixpkgs/nixos/tests/installed-tests/appstream.nix9
-rw-r--r--nixpkgs/nixos/tests/installed-tests/colord.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/default.nix112
-rw-r--r--nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix15
-rw-r--r--nixpkgs/nixos/tests/installed-tests/flatpak.nix17
-rw-r--r--nixpkgs/nixos/tests/installed-tests/fwupd.nix11
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gcab.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix13
-rw-r--r--nixpkgs/nixos/tests/installed-tests/geocode-glib.nix13
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gjs.nix12
-rw-r--r--nixpkgs/nixos/tests/installed-tests/glib-networking.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/glib-testing.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gnome-photos.nix35
-rw-r--r--nixpkgs/nixos/tests/installed-tests/graphene.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/gsconnect.nix7
-rw-r--r--nixpkgs/nixos/tests/installed-tests/ibus.nix17
-rw-r--r--nixpkgs/nixos/tests/installed-tests/json-glib.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libgdata.nix11
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libjcat.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/libxmlb.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/malcontent.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/ostree.nix12
-rw-r--r--nixpkgs/nixos/tests/installed-tests/pipewire.nix5
-rw-r--r--nixpkgs/nixos/tests/installed-tests/upower.nix9
-rw-r--r--nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix9
-rw-r--r--nixpkgs/nixos/tests/installer-systemd-stage-1.nix37
-rw-r--r--nixpkgs/nixos/tests/installer.nix1213
-rw-r--r--nixpkgs/nixos/tests/installer/flake.nix20
-rw-r--r--nixpkgs/nixos/tests/invidious.nix80
-rw-r--r--nixpkgs/nixos/tests/invoiceplane.nix82
-rw-r--r--nixpkgs/nixos/tests/iodine.nix64
-rw-r--r--nixpkgs/nixos/tests/ipv6.nix130
-rw-r--r--nixpkgs/nixos/tests/iscsi-multipath-root.nix267
-rw-r--r--nixpkgs/nixos/tests/iscsi-root.nix161
-rw-r--r--nixpkgs/nixos/tests/isso.nix30
-rw-r--r--nixpkgs/nixos/tests/jackett.nix17
-rw-r--r--nixpkgs/nixos/tests/jellyfin.nix155
-rw-r--r--nixpkgs/nixos/tests/jenkins-cli.nix30
-rw-r--r--nixpkgs/nixos/tests/jenkins.nix123
-rw-r--r--nixpkgs/nixos/tests/jibri.nix66
-rw-r--r--nixpkgs/nixos/tests/jirafeau.nix20
-rw-r--r--nixpkgs/nixos/tests/jitsi-meet.nix46
-rw-r--r--nixpkgs/nixos/tests/jool.nix220
-rw-r--r--nixpkgs/nixos/tests/k3s/default.nix13
-rw-r--r--nixpkgs/nixos/tests/k3s/multi-node.nix183
-rw-r--r--nixpkgs/nixos/tests/k3s/single-node.nix85
-rw-r--r--nixpkgs/nixos/tests/kafka.nix78
-rw-r--r--nixpkgs/nixos/tests/kanidm.nix128
-rw-r--r--nixpkgs/nixos/tests/karma.nix84
-rw-r--r--nixpkgs/nixos/tests/kavita.nix36
-rw-r--r--nixpkgs/nixos/tests/kbd-setfont-decompress.nix21
-rw-r--r--nixpkgs/nixos/tests/kbd-update-search-paths-patch.nix19
-rw-r--r--nixpkgs/nixos/tests/kea.nix186
-rw-r--r--nixpkgs/nixos/tests/keepalived.nix42
-rw-r--r--nixpkgs/nixos/tests/keepassxc.nix72
-rw-r--r--nixpkgs/nixos/tests/kerberos/default.nix7
-rw-r--r--nixpkgs/nixos/tests/kerberos/heimdal.nix42
-rw-r--r--nixpkgs/nixos/tests/kerberos/mit.nix41
-rw-r--r--nixpkgs/nixos/tests/kernel-generic.nix49
-rw-r--r--nixpkgs/nixos/tests/kernel-latest-ath-user-regd.nix17
-rw-r--r--nixpkgs/nixos/tests/keter.nix43
-rw-r--r--nixpkgs/nixos/tests/kexec.nix50
-rw-r--r--nixpkgs/nixos/tests/keycloak.nix183
-rw-r--r--nixpkgs/nixos/tests/keyd.nix89
-rw-r--r--nixpkgs/nixos/tests/keymap.nix233
-rw-r--r--nixpkgs/nixos/tests/knot.nix200
-rw-r--r--nixpkgs/nixos/tests/komga.nix20
-rw-r--r--nixpkgs/nixos/tests/krb5/default.nix5
-rw-r--r--nixpkgs/nixos/tests/krb5/deprecated-config.nix50
-rw-r--r--nixpkgs/nixos/tests/krb5/example-config.nix112
-rw-r--r--nixpkgs/nixos/tests/ksm.nix22
-rw-r--r--nixpkgs/nixos/tests/kthxbye.nix110
-rw-r--r--nixpkgs/nixos/tests/kubernetes/base.nix107
-rw-r--r--nixpkgs/nixos/tests/kubernetes/default.nix13
-rw-r--r--nixpkgs/nixos/tests/kubernetes/dns.nix159
-rw-r--r--nixpkgs/nixos/tests/kubernetes/rbac.nix168
-rw-r--r--nixpkgs/nixos/tests/kubo/default.nix5
-rw-r--r--nixpkgs/nixos/tests/kubo/kubo-fuse.nix42
-rw-r--r--nixpkgs/nixos/tests/kubo/kubo.nix53
-rw-r--r--nixpkgs/nixos/tests/ladybird.nix30
-rw-r--r--nixpkgs/nixos/tests/languagetool.nix19
-rw-r--r--nixpkgs/nixos/tests/lanraragi.nix40
-rw-r--r--nixpkgs/nixos/tests/leaps.nix32
-rw-r--r--nixpkgs/nixos/tests/legit.nix54
-rw-r--r--nixpkgs/nixos/tests/lemmy.nix97
-rw-r--r--nixpkgs/nixos/tests/libinput.nix38
-rw-r--r--nixpkgs/nixos/tests/libreddit.nix19
-rw-r--r--nixpkgs/nixos/tests/librenms.nix108
-rw-r--r--nixpkgs/nixos/tests/libresprite.nix30
-rw-r--r--nixpkgs/nixos/tests/libreswan.nix136
-rw-r--r--nixpkgs/nixos/tests/libuiohook.nix21
-rw-r--r--nixpkgs/nixos/tests/libvirtd.nix68
-rw-r--r--nixpkgs/nixos/tests/lidarr.nix18
-rw-r--r--nixpkgs/nixos/tests/lightdm.nix28
-rw-r--r--nixpkgs/nixos/tests/lighttpd.nix22
-rw-r--r--nixpkgs/nixos/tests/limesurvey.nix26
-rw-r--r--nixpkgs/nixos/tests/listmonk.nix76
-rw-r--r--nixpkgs/nixos/tests/litestream.nix101
-rw-r--r--nixpkgs/nixos/tests/livebook-service.nix43
-rw-r--r--nixpkgs/nixos/tests/lldap.nix26
-rw-r--r--nixpkgs/nixos/tests/locate.nix62
-rw-r--r--nixpkgs/nixos/tests/login.nix68
-rw-r--r--nixpkgs/nixos/tests/logrotate.nix123
-rw-r--r--nixpkgs/nixos/tests/loki.nix56
-rw-r--r--nixpkgs/nixos/tests/lorri/builder.sh3
-rw-r--r--nixpkgs/nixos/tests/lorri/default.nix28
-rw-r--r--nixpkgs/nixos/tests/lorri/fake-shell.nix5
-rw-r--r--nixpkgs/nixos/tests/luks.nix73
-rw-r--r--nixpkgs/nixos/tests/lvm2/default.nix45
-rw-r--r--nixpkgs/nixos/tests/lvm2/systemd-stage-1.nix106
-rw-r--r--nixpkgs/nixos/tests/lvm2/thinpool.nix34
-rw-r--r--nixpkgs/nixos/tests/lvm2/vdo.nix27
-rw-r--r--nixpkgs/nixos/tests/lxd-image-server.nix94
-rw-r--r--nixpkgs/nixos/tests/lxd/container.nix132
-rw-r--r--nixpkgs/nixos/tests/lxd/default.nix12
-rw-r--r--nixpkgs/nixos/tests/lxd/nftables.nix50
-rw-r--r--nixpkgs/nixos/tests/lxd/preseed.nix71
-rw-r--r--nixpkgs/nixos/tests/lxd/ui.nix35
-rw-r--r--nixpkgs/nixos/tests/lxd/virtual-machine.nix64
-rw-r--r--nixpkgs/nixos/tests/maddy/default.nix6
-rw-r--r--nixpkgs/nixos/tests/maddy/tls.nix94
-rw-r--r--nixpkgs/nixos/tests/maddy/unencrypted.nix60
-rw-r--r--nixpkgs/nixos/tests/maestral.nix73
-rw-r--r--nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix38
-rw-r--r--nixpkgs/nixos/tests/magnetico.nix41
-rw-r--r--nixpkgs/nixos/tests/mailcatcher.nix35
-rw-r--r--nixpkgs/nixos/tests/mailhog.nix24
-rw-r--r--nixpkgs/nixos/tests/mailman.nix73
-rw-r--r--nixpkgs/nixos/tests/make-test-python.nix9
-rw-r--r--nixpkgs/nixos/tests/man.nix100
-rw-r--r--nixpkgs/nixos/tests/mate.nix58
-rw-r--r--nixpkgs/nixos/tests/matomo.nix50
-rw-r--r--nixpkgs/nixos/tests/matrix/appservice-irc.nix225
-rw-r--r--nixpkgs/nixos/tests/matrix/conduit.nix97
-rw-r--r--nixpkgs/nixos/tests/matrix/dendrite.nix101
-rw-r--r--nixpkgs/nixos/tests/matrix/mjolnir.nix176
-rw-r--r--nixpkgs/nixos/tests/matrix/pantalaimon.nix88
-rw-r--r--nixpkgs/nixos/tests/matrix/synapse-workers.nix50
-rw-r--r--nixpkgs/nixos/tests/matrix/synapse.nix234
-rw-r--r--nixpkgs/nixos/tests/mattermost.nix140
-rw-r--r--nixpkgs/nixos/tests/mediamtx.nix57
-rw-r--r--nixpkgs/nixos/tests/mediatomb.nix44
-rw-r--r--nixpkgs/nixos/tests/mediawiki.nix93
-rw-r--r--nixpkgs/nixos/tests/meilisearch.nix61
-rw-r--r--nixpkgs/nixos/tests/memcached.nix24
-rw-r--r--nixpkgs/nixos/tests/merecat.nix28
-rw-r--r--nixpkgs/nixos/tests/metabase.nix19
-rw-r--r--nixpkgs/nixos/tests/mimir.nix50
-rw-r--r--nixpkgs/nixos/tests/mindustry.nix28
-rw-r--r--nixpkgs/nixos/tests/minecraft-server.nix40
-rw-r--r--nixpkgs/nixos/tests/minecraft.nix28
-rw-r--r--nixpkgs/nixos/tests/minidlna.nix40
-rw-r--r--nixpkgs/nixos/tests/miniflux.nix87
-rw-r--r--nixpkgs/nixos/tests/minio.nix72
-rw-r--r--nixpkgs/nixos/tests/miriway.nix125
-rw-r--r--nixpkgs/nixos/tests/misc.nix164
-rw-r--r--nixpkgs/nixos/tests/mobilizon.nix46
-rw-r--r--nixpkgs/nixos/tests/mod_perl.nix53
-rw-r--r--nixpkgs/nixos/tests/molly-brown.nix71
-rw-r--r--nixpkgs/nixos/tests/mongodb.nix50
-rw-r--r--nixpkgs/nixos/tests/moodle.nix22
-rw-r--r--nixpkgs/nixos/tests/moonraker.nix45
-rw-r--r--nixpkgs/nixos/tests/moosefs.nix89
-rw-r--r--nixpkgs/nixos/tests/morty.nix30
-rw-r--r--nixpkgs/nixos/tests/mosquitto.nix213
-rw-r--r--nixpkgs/nixos/tests/mpd.nix134
-rw-r--r--nixpkgs/nixos/tests/mpich-example.c21
-rw-r--r--nixpkgs/nixos/tests/mpv.nix26
-rw-r--r--nixpkgs/nixos/tests/mtp.nix109
-rw-r--r--nixpkgs/nixos/tests/multipass.nix37
-rw-r--r--nixpkgs/nixos/tests/mumble.nix89
-rw-r--r--nixpkgs/nixos/tests/munin.nix44
-rw-r--r--nixpkgs/nixos/tests/musescore.nix106
-rw-r--r--nixpkgs/nixos/tests/mutable-users.nix73
-rw-r--r--nixpkgs/nixos/tests/mxisd.nix21
-rw-r--r--nixpkgs/nixos/tests/mysql/common.nix10
-rw-r--r--nixpkgs/nixos/tests/mysql/mariadb-galera.nix250
-rw-r--r--nixpkgs/nixos/tests/mysql/mysql-autobackup.nix53
-rw-r--r--nixpkgs/nixos/tests/mysql/mysql-backup.nix71
-rw-r--r--nixpkgs/nixos/tests/mysql/mysql-replication.nix101
-rw-r--r--nixpkgs/nixos/tests/mysql/mysql.nix151
-rw-r--r--nixpkgs/nixos/tests/mysql/testdb.sql11
-rw-r--r--nixpkgs/nixos/tests/n8n.nix25
-rw-r--r--nixpkgs/nixos/tests/nagios.nix116
-rw-r--r--nixpkgs/nixos/tests/nar-serve.nix50
-rw-r--r--nixpkgs/nixos/tests/nat.nix115
-rw-r--r--nixpkgs/nixos/tests/nats.nix63
-rw-r--r--nixpkgs/nixos/tests/navidrome.nix12
-rw-r--r--nixpkgs/nixos/tests/nbd.nix103
-rw-r--r--nixpkgs/nixos/tests/ncdns.nix93
-rw-r--r--nixpkgs/nixos/tests/ndppd.nix60
-rw-r--r--nixpkgs/nixos/tests/nebula.nix308
-rw-r--r--nixpkgs/nixos/tests/neo4j.nix26
-rw-r--r--nixpkgs/nixos/tests/netbird.nix21
-rw-r--r--nixpkgs/nixos/tests/netdata.nix41
-rw-r--r--nixpkgs/nixos/tests/networking-proxy.nix134
-rw-r--r--nixpkgs/nixos/tests/networking.nix1065
-rw-r--r--nixpkgs/nixos/tests/nextcloud/basic.nix118
-rw-r--r--nixpkgs/nixos/tests/nextcloud/default.nix25
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix122
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix79
-rw-r--r--nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix96
-rw-r--r--nixpkgs/nixos/tests/nexus.nix32
-rw-r--r--nixpkgs/nixos/tests/nfs/default.nix9
-rw-r--r--nixpkgs/nixos/tests/nfs/kerberos.nix131
-rw-r--r--nixpkgs/nixos/tests/nfs/simple.nix95
-rw-r--r--nixpkgs/nixos/tests/nghttpx.nix61
-rw-r--r--nixpkgs/nixos/tests/nginx-auth.nix47
-rw-r--r--nixpkgs/nixos/tests/nginx-etag.nix88
-rw-r--r--nixpkgs/nixos/tests/nginx-globalredirect.nix24
-rw-r--r--nixpkgs/nixos/tests/nginx-http3.nix97
-rw-r--r--nixpkgs/nixos/tests/nginx-modsecurity.nix39
-rw-r--r--nixpkgs/nixos/tests/nginx-njs.nix27
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.cert.pem20
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.key.pem27
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/ca.cert.pem20
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/ca.key.pem27
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/default.nix148
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/generate-certs.nix30
-rw-r--r--nixpkgs/nixos/tests/nginx-proxyprotocol/snakeoil-certs.nix14
-rw-r--r--nixpkgs/nixos/tests/nginx-pubhtml.nix21
-rw-r--r--nixpkgs/nixos/tests/nginx-sso.nix48
-rw-r--r--nixpkgs/nixos/tests/nginx-status-page.nix72
-rw-r--r--nixpkgs/nixos/tests/nginx-tmpdir.nix60
-rw-r--r--nixpkgs/nixos/tests/nginx-unix-socket.nix27
-rw-r--r--nixpkgs/nixos/tests/nginx-variants.nix33
-rw-r--r--nixpkgs/nixos/tests/nginx.nix137
-rw-r--r--nixpkgs/nixos/tests/nitter.nix18
-rw-r--r--nixpkgs/nixos/tests/nix-ld.nix17
-rw-r--r--nixpkgs/nixos/tests/nix-serve-ssh.nix45
-rw-r--r--nixpkgs/nixos/tests/nix-serve.nix22
-rw-r--r--nixpkgs/nixos/tests/nixops/default.nix114
-rw-r--r--nixpkgs/nixos/tests/nixops/legacy/base-configuration.nix31
-rw-r--r--nixpkgs/nixos/tests/nixops/legacy/nixops.nix15
-rw-r--r--nixpkgs/nixos/tests/nixos-generate-config.nix41
-rw-r--r--nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix73
-rw-r--r--nixpkgs/nixos/tests/nixos-rebuild-specialisations.nix120
-rw-r--r--nixpkgs/nixos/tests/nixos-test-driver/busybox.nix16
-rw-r--r--nixpkgs/nixos/tests/nixos-test-driver/extra-python-packages.nix13
-rw-r--r--nixpkgs/nixos/tests/nixos-test-driver/lib-extend.nix31
-rw-r--r--nixpkgs/nixos/tests/nixos-test-driver/node-name.nix33
-rw-r--r--nixpkgs/nixos/tests/nixos-test-driver/timeout.nix15
-rw-r--r--nixpkgs/nixos/tests/node-red.nix31
-rw-r--r--nixpkgs/nixos/tests/nomad.nix97
-rw-r--r--nixpkgs/nixos/tests/non-default-filesystems.nix172
-rw-r--r--nixpkgs/nixos/tests/non-switchable-system.nix15
-rw-r--r--nixpkgs/nixos/tests/noto-fonts-cjk-qt-default-weight.nix30
-rw-r--r--nixpkgs/nixos/tests/noto-fonts.nix42
-rw-r--r--nixpkgs/nixos/tests/novacomd.nix28
-rw-r--r--nixpkgs/nixos/tests/nscd.nix142
-rw-r--r--nixpkgs/nixos/tests/nsd.nix109
-rw-r--r--nixpkgs/nixos/tests/ntfy-sh.nix26
-rw-r--r--nixpkgs/nixos/tests/nzbget.nix46
-rw-r--r--nixpkgs/nixos/tests/nzbhydra2.nix14
-rw-r--r--nixpkgs/nixos/tests/oci-containers.nix42
-rw-r--r--nixpkgs/nixos/tests/ocsinventory-agent.nix33
-rw-r--r--nixpkgs/nixos/tests/octoprint.nix61
-rw-r--r--nixpkgs/nixos/tests/odoo.nix26
-rw-r--r--nixpkgs/nixos/tests/oh-my-zsh.nix18
-rw-r--r--nixpkgs/nixos/tests/ombi.nix16
-rw-r--r--nixpkgs/nixos/tests/openarena.nix71
-rw-r--r--nixpkgs/nixos/tests/openldap.nix156
-rw-r--r--nixpkgs/nixos/tests/openresty-lua.nix101
-rw-r--r--nixpkgs/nixos/tests/opensearch.nix47
-rw-r--r--nixpkgs/nixos/tests/opensmtpd-rspamd.nix141
-rw-r--r--nixpkgs/nixos/tests/opensmtpd.nix125
-rw-r--r--nixpkgs/nixos/tests/opensnitch.nix62
-rw-r--r--nixpkgs/nixos/tests/openssh.nix187
-rw-r--r--nixpkgs/nixos/tests/openstack-image.nix98
-rw-r--r--nixpkgs/nixos/tests/opentabletdriver.nix30
-rw-r--r--nixpkgs/nixos/tests/opentelemetry-collector.nix76
-rw-r--r--nixpkgs/nixos/tests/openvscode-server.nix22
-rw-r--r--nixpkgs/nixos/tests/orangefs.nix82
-rw-r--r--nixpkgs/nixos/tests/os-prober.nix133
-rw-r--r--nixpkgs/nixos/tests/osquery.nix52
-rw-r--r--nixpkgs/nixos/tests/osrm-backend.nix57
-rw-r--r--nixpkgs/nixos/tests/outline.nix54
-rw-r--r--nixpkgs/nixos/tests/overlayfs.nix47
-rw-r--r--nixpkgs/nixos/tests/owncast.nix42
-rw-r--r--nixpkgs/nixos/tests/pacemaker.nix110
-rw-r--r--nixpkgs/nixos/tests/packagekit.nix25
-rw-r--r--nixpkgs/nixos/tests/pam/pam-file-contents.nix26
-rw-r--r--nixpkgs/nixos/tests/pam/pam-oath-login.nix108
-rw-r--r--nixpkgs/nixos/tests/pam/pam-u2f.nix26
-rw-r--r--nixpkgs/nixos/tests/pam/pam-ussh.nix70
-rw-r--r--nixpkgs/nixos/tests/pam/test_chfn.py28
-rw-r--r--nixpkgs/nixos/tests/pam/zfs-key.nix83
-rw-r--r--nixpkgs/nixos/tests/pantheon.nix85
-rw-r--r--nixpkgs/nixos/tests/paperless.nix89
-rw-r--r--nixpkgs/nixos/tests/parsedmarc/default.nix230
-rw-r--r--nixpkgs/nixos/tests/pass-secret-service.nix69
-rw-r--r--nixpkgs/nixos/tests/patroni.nix206
-rw-r--r--nixpkgs/nixos/tests/pdns-recursor.nix15
-rw-r--r--nixpkgs/nixos/tests/peerflix.nix23
-rw-r--r--nixpkgs/nixos/tests/peroxide.nix16
-rw-r--r--nixpkgs/nixos/tests/pgadmin4.nix53
-rw-r--r--nixpkgs/nixos/tests/pgbouncer.nix59
-rw-r--r--nixpkgs/nixos/tests/pgjwt.nix34
-rw-r--r--nixpkgs/nixos/tests/pgmanage.nix41
-rw-r--r--nixpkgs/nixos/tests/phosh.nix70
-rw-r--r--nixpkgs/nixos/tests/photoprism.nix23
-rw-r--r--nixpkgs/nixos/tests/php/default.nix16
-rw-r--r--nixpkgs/nixos/tests/php/fpm.nix59
-rw-r--r--nixpkgs/nixos/tests/php/httpd.nix34
-rw-r--r--nixpkgs/nixos/tests/php/pcre.nix52
-rw-r--r--nixpkgs/nixos/tests/pict-rs.nix17
-rw-r--r--nixpkgs/nixos/tests/pinnwand.nix93
-rw-r--r--nixpkgs/nixos/tests/plantuml-server.nix20
-rw-r--r--nixpkgs/nixos/tests/plasma-bigscreen.nix35
-rw-r--r--nixpkgs/nixos/tests/plasma5-systemd-start.nix40
-rw-r--r--nixpkgs/nixos/tests/plasma5.nix67
-rw-r--r--nixpkgs/nixos/tests/plausible.nix52
-rw-r--r--nixpkgs/nixos/tests/please.nix66
-rw-r--r--nixpkgs/nixos/tests/pleroma.nix254
-rw-r--r--nixpkgs/nixos/tests/plikd.nix27
-rw-r--r--nixpkgs/nixos/tests/plotinus.nix28
-rw-r--r--nixpkgs/nixos/tests/podgrab.nix34
-rw-r--r--nixpkgs/nixos/tests/podman/default.nix183
-rw-r--r--nixpkgs/nixos/tests/podman/tls-ghostunnel.nix147
-rw-r--r--nixpkgs/nixos/tests/polaris.nix29
-rw-r--r--nixpkgs/nixos/tests/pomerium.nix109
-rw-r--r--nixpkgs/nixos/tests/portunus.nix18
-rw-r--r--nixpkgs/nixos/tests/postfix-raise-smtpd-tls-security-level.nix41
-rw-r--r--nixpkgs/nixos/tests/postfix.nix77
-rw-r--r--nixpkgs/nixos/tests/postfixadmin.nix31
-rw-r--r--nixpkgs/nixos/tests/postgis.nix29
-rw-r--r--nixpkgs/nixos/tests/postgresql-jit.nix48
-rw-r--r--nixpkgs/nixos/tests/postgresql-wal-receiver.nix119
-rw-r--r--nixpkgs/nixos/tests/postgresql.nix224
-rw-r--r--nixpkgs/nixos/tests/power-profiles-daemon.nix46
-rw-r--r--nixpkgs/nixos/tests/powerdns-admin.nix139
-rw-r--r--nixpkgs/nixos/tests/powerdns.nix62
-rw-r--r--nixpkgs/nixos/tests/pppd.nix64
-rw-r--r--nixpkgs/nixos/tests/predictable-interface-names.nix60
-rw-r--r--nixpkgs/nixos/tests/printing.nix122
-rw-r--r--nixpkgs/nixos/tests/privoxy.nix113
-rw-r--r--nixpkgs/nixos/tests/prometheus-exporters.nix1692
-rw-r--r--nixpkgs/nixos/tests/prometheus.nix349
-rw-r--r--nixpkgs/nixos/tests/promscale.nix60
-rw-r--r--nixpkgs/nixos/tests/prowlarr.nix16
-rw-r--r--nixpkgs/nixos/tests/proxy.nix90
-rw-r--r--nixpkgs/nixos/tests/pt2-clone.nix35
-rw-r--r--nixpkgs/nixos/tests/public-inbox.nix230
-rw-r--r--nixpkgs/nixos/tests/pufferpanel.nix74
-rw-r--r--nixpkgs/nixos/tests/pulseaudio.nix80
-rw-r--r--nixpkgs/nixos/tests/pykms.nix14
-rw-r--r--nixpkgs/nixos/tests/qboot.nix13
-rw-r--r--nixpkgs/nixos/tests/qemu-vm-external-disk-image.nix73
-rw-r--r--nixpkgs/nixos/tests/qemu-vm-restrictnetwork.nix36
-rw-r--r--nixpkgs/nixos/tests/qemu-vm-volatile-root.nix17
-rw-r--r--nixpkgs/nixos/tests/qgis.nix30
-rw-r--r--nixpkgs/nixos/tests/qownnotes.nix70
-rw-r--r--nixpkgs/nixos/tests/quake3.nix95
-rw-r--r--nixpkgs/nixos/tests/quorum.nix102
-rw-r--r--nixpkgs/nixos/tests/rabbitmq.nix61
-rw-r--r--nixpkgs/nixos/tests/radarr.nix16
-rw-r--r--nixpkgs/nixos/tests/radicale.nix95
-rw-r--r--nixpkgs/nixos/tests/ragnarwm.nix32
-rw-r--r--nixpkgs/nixos/tests/rasdaemon.nix34
-rw-r--r--nixpkgs/nixos/tests/readarr.nix14
-rw-r--r--nixpkgs/nixos/tests/redis.nix44
-rw-r--r--nixpkgs/nixos/tests/redmine.nix44
-rw-r--r--nixpkgs/nixos/tests/restart-by-activation-script.nix73
-rw-r--r--nixpkgs/nixos/tests/restic.nix195
-rw-r--r--nixpkgs/nixos/tests/retroarch.nix49
-rw-r--r--nixpkgs/nixos/tests/rkvm/cert.pem18
-rw-r--r--nixpkgs/nixos/tests/rkvm/default.nix104
-rw-r--r--nixpkgs/nixos/tests/rkvm/key.pem28
-rw-r--r--nixpkgs/nixos/tests/robustirc-bridge.nix29
-rw-r--r--nixpkgs/nixos/tests/rosenpass.nix217
-rw-r--r--nixpkgs/nixos/tests/roundcube.nix31
-rw-r--r--nixpkgs/nixos/tests/rshim.nix25
-rw-r--r--nixpkgs/nixos/tests/rspamd.nix313
-rw-r--r--nixpkgs/nixos/tests/rss2email.nix66
-rw-r--r--nixpkgs/nixos/tests/rstudio-server.nix24
-rw-r--r--nixpkgs/nixos/tests/rsyncd.nix36
-rw-r--r--nixpkgs/nixos/tests/rsyslogd.nix40
-rw-r--r--nixpkgs/nixos/tests/rxe.nix47
-rw-r--r--nixpkgs/nixos/tests/sabnzbd.nix25
-rw-r--r--nixpkgs/nixos/tests/samba-wsdd.nix42
-rw-r--r--nixpkgs/nixos/tests/samba.nix46
-rw-r--r--nixpkgs/nixos/tests/sanoid.nix130
-rw-r--r--nixpkgs/nixos/tests/scaphandre.nix18
-rw-r--r--nixpkgs/nixos/tests/schleuder.nix126
-rw-r--r--nixpkgs/nixos/tests/sddm.nix67
-rw-r--r--nixpkgs/nixos/tests/seafile.nix115
-rw-r--r--nixpkgs/nixos/tests/searx.nix114
-rw-r--r--nixpkgs/nixos/tests/service-runner.nix36
-rw-r--r--nixpkgs/nixos/tests/sftpgo.nix382
-rw-r--r--nixpkgs/nixos/tests/sfxr-qt.nix32
-rw-r--r--nixpkgs/nixos/tests/sgt-puzzles.nix34
-rw-r--r--nixpkgs/nixos/tests/shadow.nix172
-rw-r--r--nixpkgs/nixos/tests/shadowsocks/common.nix85
-rw-r--r--nixpkgs/nixos/tests/shadowsocks/default.nix16
-rw-r--r--nixpkgs/nixos/tests/shattered-pixel-dungeon.nix28
-rw-r--r--nixpkgs/nixos/tests/shiori.nix80
-rw-r--r--nixpkgs/nixos/tests/signal-desktop.nix69
-rw-r--r--nixpkgs/nixos/tests/simple.nix17
-rw-r--r--nixpkgs/nixos/tests/sing-box.nix48
-rw-r--r--nixpkgs/nixos/tests/slimserver.nix47
-rw-r--r--nixpkgs/nixos/tests/slurm.nix168
-rw-r--r--nixpkgs/nixos/tests/smokeping.nix36
-rw-r--r--nixpkgs/nixos/tests/snapcast.nix90
-rw-r--r--nixpkgs/nixos/tests/snapper.nix35
-rw-r--r--nixpkgs/nixos/tests/soapui.nix24
-rw-r--r--nixpkgs/nixos/tests/soft-serve.nix102
-rw-r--r--nixpkgs/nixos/tests/sogo.nix58
-rw-r--r--nixpkgs/nixos/tests/solanum.nix97
-rw-r--r--nixpkgs/nixos/tests/sonarr.nix16
-rw-r--r--nixpkgs/nixos/tests/sourcehut.nix252
-rw-r--r--nixpkgs/nixos/tests/spacecookie.nix56
-rw-r--r--nixpkgs/nixos/tests/spark/default.nix28
-rw-r--r--nixpkgs/nixos/tests/spark/spark_sample.py40
-rw-r--r--nixpkgs/nixos/tests/sqlite3-to-mysql.nix65
-rw-r--r--nixpkgs/nixos/tests/ssh-audit.nix103
-rw-r--r--nixpkgs/nixos/tests/ssh-keys.nix15
-rw-r--r--nixpkgs/nixos/tests/sslh.nix75
-rw-r--r--nixpkgs/nixos/tests/sssd-ldap.nix173
-rw-r--r--nixpkgs/nixos/tests/sssd.nix18
-rw-r--r--nixpkgs/nixos/tests/stalwart-mail.nix117
-rw-r--r--nixpkgs/nixos/tests/starship.nix42
-rw-r--r--nixpkgs/nixos/tests/step-ca.nix77
-rw-r--r--nixpkgs/nixos/tests/stratis/default.nix8
-rw-r--r--nixpkgs/nixos/tests/stratis/encryption.nix32
-rw-r--r--nixpkgs/nixos/tests/stratis/simple.nix39
-rw-r--r--nixpkgs/nixos/tests/strongswan-swanctl.nix148
-rw-r--r--nixpkgs/nixos/tests/stunnel.nix179
-rw-r--r--nixpkgs/nixos/tests/sudo-rs.nix101
-rw-r--r--nixpkgs/nixos/tests/sudo.nix103
-rw-r--r--nixpkgs/nixos/tests/swap-file-btrfs.nix50
-rw-r--r--nixpkgs/nixos/tests/swap-partition.nix48
-rw-r--r--nixpkgs/nixos/tests/swap-random-encryption.nix80
-rw-r--r--nixpkgs/nixos/tests/sway.nix193
-rw-r--r--nixpkgs/nixos/tests/switch-test.nix1409
-rw-r--r--nixpkgs/nixos/tests/sympa.nix35
-rw-r--r--nixpkgs/nixos/tests/syncthing-init.nix31
-rw-r--r--nixpkgs/nixos/tests/syncthing-many-devices.nix203
-rw-r--r--nixpkgs/nixos/tests/syncthing-no-settings.nix18
-rw-r--r--nixpkgs/nixos/tests/syncthing-relay.nix26
-rw-r--r--nixpkgs/nixos/tests/syncthing.nix65
-rw-r--r--nixpkgs/nixos/tests/systemd-analyze.nix46
-rw-r--r--nixpkgs/nixos/tests/systemd-binfmt.nix90
-rw-r--r--nixpkgs/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch25
-rw-r--r--nixpkgs/nixos/tests/systemd-boot.nix325
-rw-r--r--nixpkgs/nixos/tests/systemd-bpf.nix42
-rw-r--r--nixpkgs/nixos/tests/systemd-confinement.nix184
-rw-r--r--nixpkgs/nixos/tests/systemd-coredump.nix44
-rw-r--r--nixpkgs/nixos/tests/systemd-credentials-tpm2.nix69
-rw-r--r--nixpkgs/nixos/tests/systemd-cryptenroll.nix41
-rw-r--r--nixpkgs/nixos/tests/systemd-escaping.nix45
-rw-r--r--nixpkgs/nixos/tests/systemd-homed.nix99
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-bridge.nix63
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-btrfs-raid.nix47
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-luks-fido2.nix48
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-luks-keyfile.nix56
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-luks-password.nix56
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-luks-tpm2.nix50
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-modprobe.nix24
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-networkd-ssh.nix60
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-networkd.nix93
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-simple.nix48
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-swraid.nix66
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-vconsole.nix42
-rw-r--r--nixpkgs/nixos/tests/systemd-initrd-vlan.nix59
-rw-r--r--nixpkgs/nixos/tests/systemd-journal.nix22
-rw-r--r--nixpkgs/nixos/tests/systemd-machinectl.nix114
-rw-r--r--nixpkgs/nixos/tests/systemd-misc.nix62
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix81
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd-dhcpserver.nix109
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix335
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd-vrf.nix182
-rw-r--r--nixpkgs/nixos/tests/systemd-networkd.nix123
-rw-r--r--nixpkgs/nixos/tests/systemd-no-tainted.nix14
-rw-r--r--nixpkgs/nixos/tests/systemd-nspawn-configfile.nix128
-rw-r--r--nixpkgs/nixos/tests/systemd-nspawn.nix51
-rw-r--r--nixpkgs/nixos/tests/systemd-oomd.nix54
-rw-r--r--nixpkgs/nixos/tests/systemd-portabled.nix51
-rw-r--r--nixpkgs/nixos/tests/systemd-repart.nix182
-rw-r--r--nixpkgs/nixos/tests/systemd-shutdown.nix27
-rw-r--r--nixpkgs/nixos/tests/systemd-sysupdate.nix66
-rw-r--r--nixpkgs/nixos/tests/systemd-timesyncd.nix53
-rw-r--r--nixpkgs/nixos/tests/systemd-user-tmpfiles-rules.nix35
-rw-r--r--nixpkgs/nixos/tests/systemd-userdbd.nix32
-rw-r--r--nixpkgs/nixos/tests/systemd.nix205
-rw-r--r--nixpkgs/nixos/tests/tandoor-recipes.nix41
-rw-r--r--nixpkgs/nixos/tests/tang.nix81
-rw-r--r--nixpkgs/nixos/tests/taskserver.nix275
-rw-r--r--nixpkgs/nixos/tests/tayga.nix235
-rw-r--r--nixpkgs/nixos/tests/teeworlds.nix55
-rw-r--r--nixpkgs/nixos/tests/telegraf.nix33
-rw-r--r--nixpkgs/nixos/tests/teleport.nix115
-rw-r--r--nixpkgs/nixos/tests/terminal-emulators.nix216
-rw-r--r--nixpkgs/nixos/tests/thelounge.nix31
-rw-r--r--nixpkgs/nixos/tests/tiddlywiki.nix69
-rw-r--r--nixpkgs/nixos/tests/tigervnc.nix53
-rw-r--r--nixpkgs/nixos/tests/timescaledb.nix93
-rw-r--r--nixpkgs/nixos/tests/timezone.nix50
-rw-r--r--nixpkgs/nixos/tests/tinc/default.nix139
-rw-r--r--nixpkgs/nixos/tests/tinc/snakeoil-keys.nix157
-rw-r--r--nixpkgs/nixos/tests/tinydns.nix40
-rw-r--r--nixpkgs/nixos/tests/tinyproxy.nix20
-rw-r--r--nixpkgs/nixos/tests/tinywl.nix59
-rw-r--r--nixpkgs/nixos/tests/tmate-ssh-server.nix74
-rw-r--r--nixpkgs/nixos/tests/tomcat.nix26
-rw-r--r--nixpkgs/nixos/tests/tor.nix23
-rw-r--r--nixpkgs/nixos/tests/tracee.nix68
-rw-r--r--nixpkgs/nixos/tests/traefik.nix98
-rw-r--r--nixpkgs/nixos/tests/trafficserver.nix178
-rw-r--r--nixpkgs/nixos/tests/transmission.nix24
-rw-r--r--nixpkgs/nixos/tests/trezord.nix19
-rw-r--r--nixpkgs/nixos/tests/trickster.nix37
-rw-r--r--nixpkgs/nixos/tests/trilium-server.nix53
-rw-r--r--nixpkgs/nixos/tests/tsja.nix32
-rw-r--r--nixpkgs/nixos/tests/tsm-client-gui.nix57
-rw-r--r--nixpkgs/nixos/tests/tuptime.nix29
-rw-r--r--nixpkgs/nixos/tests/turbovnc-headless-server.nix172
-rw-r--r--nixpkgs/nixos/tests/tuxguitar.nix24
-rw-r--r--nixpkgs/nixos/tests/twingate.nix14
-rw-r--r--nixpkgs/nixos/tests/txredisapi.nix29
-rw-r--r--nixpkgs/nixos/tests/typesense.nix23
-rw-r--r--nixpkgs/nixos/tests/ucarp.nix66
-rw-r--r--nixpkgs/nixos/tests/udisks2.nix72
-rw-r--r--nixpkgs/nixos/tests/ulogd/ulogd.nix56
-rw-r--r--nixpkgs/nixos/tests/ulogd/ulogd.py48
-rw-r--r--nixpkgs/nixos/tests/unbound.nix315
-rw-r--r--nixpkgs/nixos/tests/unifi.nix38
-rw-r--r--nixpkgs/nixos/tests/upnp.nix96
-rw-r--r--nixpkgs/nixos/tests/uptermd.nix65
-rw-r--r--nixpkgs/nixos/tests/uptime-kuma.nix17
-rw-r--r--nixpkgs/nixos/tests/usbguard.nix62
-rw-r--r--nixpkgs/nixos/tests/user-activation-scripts.nix36
-rw-r--r--nixpkgs/nixos/tests/user-expiry.nix70
-rw-r--r--nixpkgs/nixos/tests/user-home-mode.nix27
-rw-r--r--nixpkgs/nixos/tests/uwsgi.nix81
-rw-r--r--nixpkgs/nixos/tests/v2ray.nix91
-rw-r--r--nixpkgs/nixos/tests/varnish.nix55
-rw-r--r--nixpkgs/nixos/tests/vault-agent.nix52
-rw-r--r--nixpkgs/nixos/tests/vault-dev.nix35
-rw-r--r--nixpkgs/nixos/tests/vault-postgresql.nix69
-rw-r--r--nixpkgs/nixos/tests/vault.nix25
-rw-r--r--nixpkgs/nixos/tests/vaultwarden.nix198
-rw-r--r--nixpkgs/nixos/tests/vector.nix37
-rw-r--r--nixpkgs/nixos/tests/vengi-tools.nix27
-rw-r--r--nixpkgs/nixos/tests/victoriametrics.nix33
-rw-r--r--nixpkgs/nixos/tests/vikunja.nix64
-rw-r--r--nixpkgs/nixos/tests/virtualbox.nix522
-rw-r--r--nixpkgs/nixos/tests/vscode-remote-ssh.nix124
-rw-r--r--nixpkgs/nixos/tests/vscodium.nix79
-rw-r--r--nixpkgs/nixos/tests/vsftpd.nix42
-rw-r--r--nixpkgs/nixos/tests/warzone2100.nix26
-rw-r--r--nixpkgs/nixos/tests/wasabibackend.nix38
-rw-r--r--nixpkgs/nixos/tests/web-apps/gotosocial.nix28
-rw-r--r--nixpkgs/nixos/tests/web-apps/healthchecks.nix42
-rw-r--r--nixpkgs/nixos/tests/web-apps/mastodon/default.nix9
-rw-r--r--nixpkgs/nixos/tests/web-apps/mastodon/remote-postgresql.nix162
-rw-r--r--nixpkgs/nixos/tests/web-apps/mastodon/script.nix53
-rw-r--r--nixpkgs/nixos/tests/web-apps/mastodon/standard.nix96
-rw-r--r--nixpkgs/nixos/tests/web-apps/monica.nix33
-rw-r--r--nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix87
-rw-r--r--nixpkgs/nixos/tests/web-apps/netbox.nix318
-rw-r--r--nixpkgs/nixos/tests/web-apps/nifi.nix30
-rw-r--r--nixpkgs/nixos/tests/web-apps/peering-manager.nix40
-rw-r--r--nixpkgs/nixos/tests/web-apps/peertube.nix139
-rw-r--r--nixpkgs/nixos/tests/web-apps/phylactery.nix20
-rw-r--r--nixpkgs/nixos/tests/web-apps/pixelfed/default.nix8
-rw-r--r--nixpkgs/nixos/tests/web-apps/pixelfed/standard.nix38
-rw-r--r--nixpkgs/nixos/tests/web-apps/snipe-it.nix101
-rw-r--r--nixpkgs/nixos/tests/web-apps/writefreely.nix44
-rw-r--r--nixpkgs/nixos/tests/web-servers/agate.nix27
-rw-r--r--nixpkgs/nixos/tests/web-servers/stargazer.nix31
-rw-r--r--nixpkgs/nixos/tests/web-servers/static-web-server.nix32
-rw-r--r--nixpkgs/nixos/tests/web-servers/unit-php.nix52
-rw-r--r--nixpkgs/nixos/tests/webhook.nix65
-rw-r--r--nixpkgs/nixos/tests/wiki-js.nix153
-rw-r--r--nixpkgs/nixos/tests/wine.nix51
-rw-r--r--nixpkgs/nixos/tests/wireguard/basic.nix73
-rw-r--r--nixpkgs/nixos/tests/wireguard/default.nix28
-rw-r--r--nixpkgs/nixos/tests/wireguard/generated.nix63
-rw-r--r--nixpkgs/nixos/tests/wireguard/make-peer.nix23
-rw-r--r--nixpkgs/nixos/tests/wireguard/namespaces.nix83
-rw-r--r--nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix12
-rw-r--r--nixpkgs/nixos/tests/wireguard/wg-quick.nix80
-rw-r--r--nixpkgs/nixos/tests/without-nix.nix32
-rw-r--r--nixpkgs/nixos/tests/wmderland.nix54
-rw-r--r--nixpkgs/nixos/tests/wordpress.nix101
-rw-r--r--nixpkgs/nixos/tests/wpa_supplicant.nix210
-rw-r--r--nixpkgs/nixos/tests/wrappers.nix112
-rw-r--r--nixpkgs/nixos/tests/xandikos.nix70
-rw-r--r--nixpkgs/nixos/tests/xautolock.nix22
-rw-r--r--nixpkgs/nixos/tests/xfce.nix75
-rw-r--r--nixpkgs/nixos/tests/xmonad-xdg-autostart.nix35
-rw-r--r--nixpkgs/nixos/tests/xmonad.nix117
-rw-r--r--nixpkgs/nixos/tests/xmpp/ejabberd.nix278
-rw-r--r--nixpkgs/nixos/tests/xmpp/prosody-mysql.nix124
-rw-r--r--nixpkgs/nixos/tests/xmpp/prosody.nix93
-rw-r--r--nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix90
-rw-r--r--nixpkgs/nixos/tests/xpadneo.nix18
-rw-r--r--nixpkgs/nixos/tests/xrdp.nix47
-rw-r--r--nixpkgs/nixos/tests/xss-lock.nix40
-rw-r--r--nixpkgs/nixos/tests/xterm.nix23
-rw-r--r--nixpkgs/nixos/tests/xxh.nix67
-rw-r--r--nixpkgs/nixos/tests/yabar.nix27
-rw-r--r--nixpkgs/nixos/tests/yggdrasil.nix172
-rw-r--r--nixpkgs/nixos/tests/zammad.nix60
-rw-r--r--nixpkgs/nixos/tests/zeronet-conservancy.nix25
-rw-r--r--nixpkgs/nixos/tests/zfs.nix258
-rw-r--r--nixpkgs/nixos/tests/zigbee2mqtt.nix23
-rw-r--r--nixpkgs/nixos/tests/zoneminder.nix23
-rw-r--r--nixpkgs/nixos/tests/zookeeper.nix46
-rw-r--r--nixpkgs/nixos/tests/zram-generator.nix42
-rw-r--r--nixpkgs/nixos/tests/zrepl.nix68
-rw-r--r--nixpkgs/nixos/tests/zsh-history.nix35
-rw-r--r--nixpkgs/nixos/tests/zwave-js.nix31
3093 files changed, 394917 insertions, 0 deletions
diff --git a/nixpkgs/nixos/COPYING b/nixpkgs/nixos/COPYING
new file mode 100644
index 000000000000..c9b44cb8aaeb
--- /dev/null
+++ b/nixpkgs/nixos/COPYING
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/nixpkgs/nixos/README.md b/nixpkgs/nixos/README.md
new file mode 100644
index 000000000000..07e82bf0ad93
--- /dev/null
+++ b/nixpkgs/nixos/README.md
@@ -0,0 +1,110 @@
+# NixOS
+
+NixOS is a Linux distribution based on the purely functional package
+management system Nix.  More information can be found at
+https://nixos.org/nixos and in the manual in doc/manual.
+
+## Testing changes
+
+You can add new module to your NixOS configuration file (usually it’s `/etc/nixos/configuration.nix`). And do `sudo nixos-rebuild test -I nixpkgs=<path to your local nixpkgs folder> --fast`.
+
+## Commit conventions
+
+- Make sure you read about the [commit conventions](../CONTRIBUTING.md#commit-conventions) common to Nixpkgs as a whole.
+
+- Format the commit messages in the following way:
+
+  ```
+  nixos/(module): (init module | add setting | refactor | etc)
+
+  (Motivation for change. Link to release notes. Additional information.)
+  ```
+
+  Examples:
+
+  * nixos/hydra: add bazBaz option
+
+    Dual baz behavior is needed to do foo.
+  * nixos/nginx: refactor config generation
+
+    The old config generation system used impure shell scripts and could break in specific circumstances (see #1234).
+
+## Reviewing contributions
+
+When changing the bootloader installation process, extra care must be taken. Grub installations cannot be rolled back, hence changes may break people’s installations forever. For any non-trivial change to the bootloader please file a PR asking for review, especially from \@edolstra.
+
+### Module updates
+
+Module updates are submissions changing modules in some ways. These often contains changes to the options or introduce new options.
+
+Reviewing process:
+
+- Ensure that the module maintainers are notified.
+  - [CODEOWNERS](https://help.github.com/articles/about-codeowners/) will make GitHub notify users based on the submitted changes, but it can happen that it misses some of the package maintainers.
+- Ensure that the module tests, if any, are succeeding.
+  - You may invoke OfBorg with `@ofborg test <module>` to build `nixosTests.<module>`
+- Ensure that the introduced options are correct.
+  - Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
+  - Description, default and example should be provided.
+- Ensure that option changes are backward compatible.
+  - `mkRenamedOptionModuleWith` provides a way to make renamed option backward compatible.
+  - Use `lib.versionAtLeast config.system.stateVersion "23.11"` on backward incompatible changes which may corrupt, change or update the state stored on existing setups.
+- Ensure that removed options are declared with `mkRemovedOptionModule`.
+- Ensure that changes that are not backward compatible are mentioned in release notes.
+- Ensure that documentations affected by the change is updated.
+
+Sample template for a module update review is provided below.
+
+```markdown
+##### Reviewed points
+
+- [ ] changes are backward compatible
+- [ ] removed options are declared with `mkRemovedOptionModule`
+- [ ] changes that are not backward compatible are documented in release notes
+- [ ] module tests succeed on ARCHITECTURE
+- [ ] options types are appropriate
+- [ ] options description is set
+- [ ] options example is provided
+- [ ] documentation affected by the changes is updated
+
+##### Possible improvements
+
+##### Comments
+```
+
+### New modules
+
+New modules submissions introduce a new module to NixOS.
+
+Reviewing process:
+
+- Ensure that all file paths [fit the guidelines](../CONTRIBUTING.md#file-naming-and-organisation).
+- Ensure that the module tests, if any, are succeeding.
+- Ensure that the introduced options are correct.
+  - Type should be appropriate (string related types differs in their merging capabilities, `loaOf` and `string` types are deprecated).
+  - Description, default and example should be provided.
+- Ensure that module `meta` field is present
+  - Maintainers should be declared in `meta.maintainers`.
+  - Module documentation should be declared with `meta.doc`.
+- Ensure that the module respect other modules functionality.
+  - For example, enabling a module should not open firewall ports by default.
+
+Sample template for a new module review is provided below.
+
+```markdown
+##### Reviewed points
+
+- [ ] module path fits the guidelines
+- [ ] module tests succeed on ARCHITECTURE
+- [ ] options have appropriate types
+- [ ] options have default
+- [ ] options have example
+- [ ] options have descriptions
+- [ ] No unneeded package is added to `environment.systemPackages`
+- [ ] `meta.maintainers` is set
+- [ ] module documentation is declared in `meta.doc`
+
+##### Possible improvements
+
+##### Comments
+```
diff --git a/nixpkgs/nixos/default.nix b/nixpkgs/nixos/default.nix
new file mode 100644
index 000000000000..6beb4cd3a7df
--- /dev/null
+++ b/nixpkgs/nixos/default.nix
@@ -0,0 +1,20 @@
+{ configuration ? import ./lib/from-env.nix "NIXOS_CONFIG" <nixos-config>
+, system ? builtins.currentSystem
+}:
+
+let
+
+  eval = import ./lib/eval-config.nix {
+    inherit system;
+    modules = [ configuration ];
+  };
+
+in
+
+{
+  inherit (eval) pkgs config options;
+
+  system = eval.config.system.build.toplevel;
+
+  inherit (eval.config.system.build) vm vmWithBootLoader;
+}
diff --git a/nixpkgs/nixos/doc/manual/README.md b/nixpkgs/nixos/doc/manual/README.md
new file mode 100644
index 000000000000..bc649761df69
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/README.md
@@ -0,0 +1,3 @@
+[Moved to ./contributing-to-this-manual.chapter.md](./contributing-to-this-manual.chapter.md). Link:
+
+https://nixos.org/manual/nixos/unstable/#chap-contributing
diff --git a/nixpkgs/nixos/doc/manual/administration/boot-problems.section.md b/nixpkgs/nixos/doc/manual/administration/boot-problems.section.md
new file mode 100644
index 000000000000..bca4fdc3fb38
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/boot-problems.section.md
@@ -0,0 +1,41 @@
+# Boot Problems {#sec-boot-problems}
+
+If NixOS fails to boot, there are a number of kernel command line parameters that may help you to identify or fix the issue. You can add these parameters in the GRUB boot menu by pressing “e†to modify the selected boot entry and editing the line starting with `linux`. The following are some useful kernel command line parameters that are recognised by the NixOS boot scripts or by systemd:
+
+`boot.shell_on_fail`
+
+: Allows the user to start a root shell if something goes wrong in stage 1 of the boot process (the initial ramdisk). This is disabled by default because there is no authentication for the root shell.
+
+`boot.debug1`
+
+: Start an interactive shell in stage 1 before anything useful has been done. That is, no modules have been loaded and no file systems have been mounted, except for `/proc` and `/sys`.
+
+`boot.debug1devices`
+
+: Like `boot.debug1`, but runs stage1 until kernel modules are loaded and device nodes are created. This may help with e.g. making the keyboard work.
+
+`boot.debug1mounts`
+
+: Like `boot.debug1` or `boot.debug1devices`, but runs stage1 until all filesystems that are mounted during initrd are mounted (see [neededForBoot](#opt-fileSystems._name_.neededForBoot)). As a motivating example, this could be useful if you've forgotten to set [neededForBoot](#opt-fileSystems._name_.neededForBoot) on a file system.
+
+`boot.trace`
+
+: Print every shell command executed by the stage 1 and 2 boot scripts.
+
+`single`
+
+: Boot into rescue mode (a.k.a. single user mode). This will cause systemd to start nothing but the unit `rescue.target`, which runs `sulogin` to prompt for the root password and start a root login shell. Exiting the shell causes the system to continue with the normal boot process.
+
+`systemd.log_level=debug` `systemd.log_target=console`
+
+: Make systemd very verbose and send log messages to the console instead of the journal. For more parameters recognised by systemd, see systemd(1).
+
+In addition, these arguments are recognised by the live image only:
+
+`live.nixos.passwd=password`
+
+: Set the password for the `nixos` live user. This can be used for SSH access if there are issues using the terminal.
+
+Notice that for `boot.shell_on_fail`, `boot.debug1`, `boot.debug1devices`, and `boot.debug1mounts`, if you did **not** select "start the new shell as pid 1", and you `exit` from the new shell, boot will proceed normally from the point where it failed, as if you'd chosen "ignore the error and continue".
+
+If no login prompts or X11 login screens appear (e.g. due to hanging dependencies), you can press Alt+ArrowUp. If you’re lucky, this will start rescue mode (described above). (Also note that since most units have a 90-second timeout before systemd gives up on them, the `agetty` login prompts should appear eventually unless something is very wrong.)
diff --git a/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md b/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md
new file mode 100644
index 000000000000..c9140d0869c7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/cleaning-store.chapter.md
@@ -0,0 +1,62 @@
+# Cleaning the Nix Store {#sec-nix-gc}
+
+Nix has a purely functional model, meaning that packages are never
+upgraded in place. Instead new versions of packages end up in a
+different location in the Nix store (`/nix/store`). You should
+periodically run Nix's *garbage collector* to remove old, unreferenced
+packages. This is easy:
+
+```ShellSession
+$ nix-collect-garbage
+```
+
+Alternatively, you can use a systemd unit that does the same in the
+background:
+
+```ShellSession
+# systemctl start nix-gc.service
+```
+
+You can tell NixOS in `configuration.nix` to run this unit automatically
+at certain points in time, for instance, every night at 03:15:
+
+```nix
+nix.gc.automatic = true;
+nix.gc.dates = "03:15";
+```
+
+The commands above do not remove garbage collector roots, such as old
+system configurations. Thus they do not remove the ability to roll back
+to previous configurations. The following command deletes old roots,
+removing the ability to roll back to them:
+
+```ShellSession
+$ nix-collect-garbage -d
+```
+
+You can also do this for specific profiles, e.g.
+
+```ShellSession
+$ nix-env -p /nix/var/nix/profiles/per-user/eelco/profile --delete-generations old
+```
+
+Note that NixOS system configurations are stored in the profile
+`/nix/var/nix/profiles/system`.
+
+Another way to reclaim disk space (often as much as 40% of the size of
+the Nix store) is to run Nix's store optimiser, which seeks out
+identical files in the store and replaces them with hard links to a
+single copy.
+
+```ShellSession
+$ nix-store --optimise
+```
+
+Since this command needs to read the entire Nix store, it can take quite
+a while to finish.
+
+## NixOS Boot Entries {#sect-nixos-gc-boot-entries}
+
+If your `/boot` partition runs out of space, after clearing old profiles
+you must rebuild your system with `nixos-rebuild boot` or `nixos-rebuild
+switch` to update the `/boot` partition and clear space.
diff --git a/nixpkgs/nixos/doc/manual/administration/container-networking.section.md b/nixpkgs/nixos/doc/manual/administration/container-networking.section.md
new file mode 100644
index 000000000000..0873768376cc
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/container-networking.section.md
@@ -0,0 +1,44 @@
+# Container Networking {#sec-container-networking}
+
+When you create a container using `nixos-container create`, it gets it
+own private IPv4 address in the range `10.233.0.0/16`. You can get the
+container's IPv4 address as follows:
+
+```ShellSession
+# nixos-container show-ip foo
+10.233.4.2
+
+$ ping -c1 10.233.4.2
+64 bytes from 10.233.4.2: icmp_seq=1 ttl=64 time=0.106 ms
+```
+
+Networking is implemented using a pair of virtual Ethernet devices. The
+network interface in the container is called `eth0`, while the matching
+interface in the host is called `ve-container-name` (e.g., `ve-foo`).
+The container has its own network namespace and the `CAP_NET_ADMIN`
+capability, so it can perform arbitrary network configuration such as
+setting up firewall rules, without affecting or having access to the
+host's network.
+
+By default, containers cannot talk to the outside network. If you want
+that, you should set up Network Address Translation (NAT) rules on the
+host to rewrite container traffic to use your external IP address. This
+can be accomplished using the following configuration on the host:
+
+```nix
+networking.nat.enable = true;
+networking.nat.internalInterfaces = ["ve-+"];
+networking.nat.externalInterface = "eth0";
+```
+
+where `eth0` should be replaced with the desired external interface.
+Note that `ve-+` is a wildcard that matches all container interfaces.
+
+If you are using Network Manager, you need to explicitly prevent it from
+managing container interfaces:
+
+```nix
+networking.networkmanager.unmanaged = [ "interface-name:ve-*" ];
+```
+
+You may need to restart your system for the changes to take effect.
diff --git a/nixpkgs/nixos/doc/manual/administration/containers.chapter.md b/nixpkgs/nixos/doc/manual/administration/containers.chapter.md
new file mode 100644
index 000000000000..50493b562b54
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/containers.chapter.md
@@ -0,0 +1,28 @@
+# Container Management {#ch-containers}
+
+NixOS allows you to easily run other NixOS instances as *containers*.
+Containers are a light-weight approach to virtualisation that runs
+software in the container at the same speed as in the host system. NixOS
+containers share the Nix store of the host, making container creation
+very efficient.
+
+::: {.warning}
+Currently, NixOS containers are not perfectly isolated from the host
+system. This means that a user with root access to the container can do
+things that affect the host. So you should not give container root
+access to untrusted users.
+:::
+
+NixOS containers can be created in two ways: imperatively, using the
+command `nixos-container`, and declaratively, by specifying them in your
+`configuration.nix`. The declarative approach implies that containers
+get upgraded along with your host system when you run `nixos-rebuild`,
+which is often not what you want. By contrast, in the imperative
+approach, containers are configured and updated independently from the
+host system.
+
+```{=include=} sections
+imperative-containers.section.md
+declarative-containers.section.md
+container-networking.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md b/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md
new file mode 100644
index 000000000000..abe8dd80b5ab
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/control-groups.chapter.md
@@ -0,0 +1,59 @@
+# Control Groups {#sec-cgroups}
+
+To keep track of the processes in a running system, systemd uses
+*control groups* (cgroups). A control group is a set of processes used
+to allocate resources such as CPU, memory or I/O bandwidth. There can be
+multiple control group hierarchies, allowing each kind of resource to be
+managed independently.
+
+The command `systemd-cgls` lists all control groups in the `systemd`
+hierarchy, which is what systemd uses to keep track of the processes
+belonging to each service or user session:
+
+```ShellSession
+$ systemd-cgls
+├─user
+│ └─eelco
+│   └─c1
+│     ├─ 2567 -:0
+│     ├─ 2682 kdeinit4: kdeinit4 Running...
+│     ├─ ...
+│     └─10851 sh -c less -R
+└─system
+  ├─httpd.service
+  │ ├─2444 httpd -f /nix/store/3pyacby5cpr55a03qwbnndizpciwq161-httpd.conf -DNO_DETACH
+  │ └─...
+  ├─dhcpcd.service
+  │ └─2376 dhcpcd --config /nix/store/f8dif8dsi2yaa70n03xir8r653776ka6-dhcpcd.conf
+  └─ ...
+```
+
+Similarly, `systemd-cgls cpu` shows the cgroups in the CPU hierarchy,
+which allows per-cgroup CPU scheduling priorities. By default, every
+systemd service gets its own CPU cgroup, while all user sessions are in
+the top-level CPU cgroup. This ensures, for instance, that a thousand
+run-away processes in the `httpd.service` cgroup cannot starve the CPU
+for one process in the `postgresql.service` cgroup. (By contrast, it
+they were in the same cgroup, then the PostgreSQL process would get
+1/1001 of the cgroup's CPU time.) You can limit a service's CPU share in
+`configuration.nix`:
+
+```nix
+systemd.services.httpd.serviceConfig.CPUShares = 512;
+```
+
+By default, every cgroup has 1024 CPU shares, so this will halve the CPU
+allocation of the `httpd.service` cgroup.
+
+There also is a `memory` hierarchy that controls memory allocation
+limits; by default, all processes are in the top-level cgroup, so any
+service or session can exhaust all available memory. Per-cgroup memory
+limits can be specified in `configuration.nix`; for instance, to limit
+`httpd.service` to 512 MiB of RAM (excluding swap):
+
+```nix
+systemd.services.httpd.serviceConfig.MemoryLimit = "512M";
+```
+
+The command `systemd-cgtop` shows a continuously updated list of all
+cgroups with their CPU and memory usage.
diff --git a/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md b/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md
new file mode 100644
index 000000000000..eaa50d3c663d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/declarative-containers.section.md
@@ -0,0 +1,48 @@
+# Declarative Container Specification {#sec-declarative-containers}
+
+You can also specify containers and their configuration in the host's
+`configuration.nix`. For example, the following specifies that there
+shall be a container named `database` running PostgreSQL:
+
+```nix
+containers.database =
+  { config =
+      { config, pkgs, ... }:
+      { services.postgresql.enable = true;
+      services.postgresql.package = pkgs.postgresql_14;
+      };
+  };
+```
+
+If you run `nixos-rebuild switch`, the container will be built. If the
+container was already running, it will be updated in place, without
+rebooting. The container can be configured to start automatically by
+setting `containers.database.autoStart = true` in its configuration.
+
+By default, declarative containers share the network namespace of the
+host, meaning that they can listen on (privileged) ports. However, they
+cannot change the network configuration. You can give a container its
+own network as follows:
+
+```nix
+containers.database = {
+  privateNetwork = true;
+  hostAddress = "192.168.100.10";
+  localAddress = "192.168.100.11";
+};
+```
+
+This gives the container a private virtual Ethernet interface with IP
+address `192.168.100.11`, which is hooked up to a virtual Ethernet
+interface on the host with IP address `192.168.100.10`. (See the next
+section for details on container networking.)
+
+To disable the container, just remove it from `configuration.nix` and
+run `nixos-rebuild
+  switch`. Note that this will not delete the root directory of the
+container in `/var/lib/nixos-containers`. Containers can be destroyed using
+the imperative method: `nixos-container destroy foo`.
+
+Declarative containers can be started and stopped using the
+corresponding systemd service, e.g.
+`systemctl start container@database`.
diff --git a/nixpkgs/nixos/doc/manual/administration/imperative-containers.section.md b/nixpkgs/nixos/doc/manual/administration/imperative-containers.section.md
new file mode 100644
index 000000000000..f45991780c4b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/imperative-containers.section.md
@@ -0,0 +1,115 @@
+# Imperative Container Management {#sec-imperative-containers}
+
+We'll cover imperative container management using `nixos-container`
+first. Be aware that container management is currently only possible as
+`root`.
+
+You create a container with identifier `foo` as follows:
+
+```ShellSession
+# nixos-container create foo
+```
+
+This creates the container's root directory in `/var/lib/nixos-containers/foo`
+and a small configuration file in `/etc/nixos-containers/foo.conf`. It also
+builds the container's initial system configuration and stores it in
+`/nix/var/nix/profiles/per-container/foo/system`. You can modify the
+initial configuration of the container on the command line. For
+instance, to create a container that has `sshd` running, with the given
+public key for `root`:
+
+```ShellSession
+# nixos-container create foo --config '
+  services.openssh.enable = true;
+  users.users.root.openssh.authorizedKeys.keys = ["ssh-dss AAAAB3N…"];
+'
+```
+
+By default the next free address in the `10.233.0.0/16` subnet will be
+chosen as container IP. This behavior can be altered by setting
+`--host-address` and `--local-address`:
+
+```ShellSession
+# nixos-container create test --config-file test-container.nix \
+    --local-address 10.235.1.2 --host-address 10.235.1.1
+```
+
+Creating a container does not start it. To start the container, run:
+
+```ShellSession
+# nixos-container start foo
+```
+
+This command will return as soon as the container has booted and has
+reached `multi-user.target`. On the host, the container runs within a
+systemd unit called `container@container-name.service`. Thus, if
+something went wrong, you can get status info using `systemctl`:
+
+```ShellSession
+# systemctl status container@foo
+```
+
+If the container has started successfully, you can log in as root using
+the `root-login` operation:
+
+```ShellSession
+# nixos-container root-login foo
+[root@foo:~]#
+```
+
+Note that only root on the host can do this (since there is no
+authentication). You can also get a regular login prompt using the
+`login` operation, which is available to all users on the host:
+
+```ShellSession
+# nixos-container login foo
+foo login: alice
+Password: ***
+```
+
+With `nixos-container run`, you can execute arbitrary commands in the
+container:
+
+```ShellSession
+# nixos-container run foo -- uname -a
+Linux foo 3.4.82 #1-NixOS SMP Thu Mar 20 14:44:05 UTC 2014 x86_64 GNU/Linux
+```
+
+There are several ways to change the configuration of the container.
+First, on the host, you can edit
+`/var/lib/container/name/etc/nixos/configuration.nix`, and run
+
+```ShellSession
+# nixos-container update foo
+```
+
+This will build and activate the new configuration. You can also specify
+a new configuration on the command line:
+
+```ShellSession
+# nixos-container update foo --config '
+  services.httpd.enable = true;
+  services.httpd.adminAddr = "foo@example.org";
+  networking.firewall.allowedTCPPorts = [ 80 ];
+'
+
+# curl http://$(nixos-container show-ip foo)/
+<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">…
+```
+
+However, note that this will overwrite the container's
+`/etc/nixos/configuration.nix`.
+
+Alternatively, you can change the configuration from within the
+container itself by running `nixos-rebuild switch` inside the container.
+Note that the container by default does not have a copy of the NixOS
+channel, so you should run `nix-channel --update` first.
+
+Containers can be stopped and started using `nixos-container
+  stop` and `nixos-container start`, respectively, or by using
+`systemctl` on the container's service unit. To destroy a container,
+including its file system, do
+
+```ShellSession
+# nixos-container destroy foo
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/logging.chapter.md b/nixpkgs/nixos/doc/manual/administration/logging.chapter.md
new file mode 100644
index 000000000000..4ce6f5e9fa72
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/logging.chapter.md
@@ -0,0 +1,38 @@
+# Logging {#sec-logging}
+
+System-wide logging is provided by systemd's *journal*, which subsumes
+traditional logging daemons such as syslogd and klogd. Log entries are
+kept in binary files in `/var/log/journal/`. The command `journalctl`
+allows you to see the contents of the journal. For example,
+
+```ShellSession
+$ journalctl -b
+```
+
+shows all journal entries since the last reboot. (The output of
+`journalctl` is piped into `less` by default.) You can use various
+options and match operators to restrict output to messages of interest.
+For instance, to get all messages from PostgreSQL:
+
+```ShellSession
+$ journalctl -u postgresql.service
+-- Logs begin at Mon, 2013-01-07 13:28:01 CET, end at Tue, 2013-01-08 01:09:57 CET. --
+...
+Jan 07 15:44:14 hagbard postgres[2681]: [2-1] LOG:  database system is shut down
+-- Reboot --
+Jan 07 15:45:10 hagbard postgres[2532]: [1-1] LOG:  database system was shut down at 2013-01-07 15:44:14 CET
+Jan 07 15:45:13 hagbard postgres[2500]: [1-1] LOG:  database system is ready to accept connections
+```
+
+Or to get all messages since the last reboot that have at least a
+"critical" severity level:
+
+```ShellSession
+$ journalctl -b -p crit
+Dec 17 21:08:06 mandark sudo[3673]: pam_unix(sudo:auth): auth could not identify password for [alice]
+Dec 29 01:30:22 mandark kernel[6131]: [1053513.909444] CPU6: Core temperature above threshold, cpu clock throttled (total events = 1)
+```
+
+The system journal is readable by root and by users in the `wheel` and
+`systemd-journal` groups. All users have a private journal that can be
+read using `journalctl`.
diff --git a/nixpkgs/nixos/doc/manual/administration/maintenance-mode.section.md b/nixpkgs/nixos/doc/manual/administration/maintenance-mode.section.md
new file mode 100644
index 000000000000..0aec013c0a9b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/maintenance-mode.section.md
@@ -0,0 +1,11 @@
+# Maintenance Mode {#sec-maintenance-mode}
+
+You can enter rescue mode by running:
+
+```ShellSession
+# systemctl rescue
+```
+
+This will eventually give you a single-user root shell. Systemd will
+stop (almost) all system services. To get out of maintenance mode, just
+exit from the rescue shell.
diff --git a/nixpkgs/nixos/doc/manual/administration/network-problems.section.md b/nixpkgs/nixos/doc/manual/administration/network-problems.section.md
new file mode 100644
index 000000000000..d360120d72d0
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/network-problems.section.md
@@ -0,0 +1,21 @@
+# Network Problems {#sec-nix-network-issues}
+
+Nix uses a so-called *binary cache* to optimise building a package from
+source into downloading it as a pre-built binary. That is, whenever a
+command like `nixos-rebuild` needs a path in the Nix store, Nix will try
+to download that path from the Internet rather than build it from
+source. The default binary cache is `https://cache.nixos.org/`. If this
+cache is unreachable, Nix operations may take a long time due to HTTP
+connection timeouts. You can disable the use of the binary cache by
+adding `--option use-binary-caches false`, e.g.
+
+```ShellSession
+# nixos-rebuild switch --option use-binary-caches false
+```
+
+If you have an alternative binary cache at your disposal, you can use it
+instead:
+
+```ShellSession
+# nixos-rebuild switch --option binary-caches http://my-cache.example.org/
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/rebooting.chapter.md b/nixpkgs/nixos/doc/manual/administration/rebooting.chapter.md
new file mode 100644
index 000000000000..ec4b889b1648
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/rebooting.chapter.md
@@ -0,0 +1,30 @@
+# Rebooting and Shutting Down {#sec-rebooting}
+
+The system can be shut down (and automatically powered off) by doing:
+
+```ShellSession
+# shutdown
+```
+
+This is equivalent to running `systemctl poweroff`.
+
+To reboot the system, run
+
+```ShellSession
+# reboot
+```
+
+which is equivalent to `systemctl reboot`. Alternatively, you can
+quickly reboot the system using `kexec`, which bypasses the BIOS by
+directly loading the new kernel into memory:
+
+```ShellSession
+# systemctl kexec
+```
+
+The machine can be suspended to RAM (if supported) using `systemctl suspend`,
+and suspended to disk using `systemctl hibernate`.
+
+These commands can be run by any user who is logged in locally, i.e. on
+a virtual console or in X11; otherwise, the user is asked for
+authentication.
diff --git a/nixpkgs/nixos/doc/manual/administration/rollback.section.md b/nixpkgs/nixos/doc/manual/administration/rollback.section.md
new file mode 100644
index 000000000000..290d685a2a18
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/rollback.section.md
@@ -0,0 +1,38 @@
+# Rolling Back Configuration Changes {#sec-rollback}
+
+After running `nixos-rebuild` to switch to a new configuration, you may
+find that the new configuration doesn't work very well. In that case,
+there are several ways to return to a previous configuration.
+
+First, the GRUB boot manager allows you to boot into any previous
+configuration that hasn't been garbage-collected. These configurations
+can be found under the GRUB submenu "NixOS - All configurations". This
+is especially useful if the new configuration fails to boot. After the
+system has booted, you can make the selected configuration the default
+for subsequent boots:
+
+```ShellSession
+# /run/current-system/bin/switch-to-configuration boot
+```
+
+Second, you can switch to the previous configuration in a running
+system:
+
+```ShellSession
+# nixos-rebuild switch --rollback
+```
+
+This is equivalent to running:
+
+```ShellSession
+# /nix/var/nix/profiles/system-N-link/bin/switch-to-configuration switch
+```
+
+where `N` is the number of the NixOS system configuration. To get a
+list of the available configurations, do:
+
+```ShellSession
+$ ls -l /nix/var/nix/profiles/system-*-link
+...
+lrwxrwxrwx 1 root root 78 Aug 12 13:54 /nix/var/nix/profiles/system-268-link -> /nix/store/202b...-nixos-13.07pre4932_5a676e4-4be1055
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/running.md b/nixpkgs/nixos/doc/manual/administration/running.md
new file mode 100644
index 000000000000..48e8c7c6668b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/running.md
@@ -0,0 +1,14 @@
+# Administration {#ch-running}
+
+This chapter describes various aspects of managing a running NixOS system, such as how to use the {command}`systemd` service manager.
+
+```{=include=} chapters
+service-mgmt.chapter.md
+rebooting.chapter.md
+user-sessions.chapter.md
+control-groups.chapter.md
+logging.chapter.md
+cleaning-store.chapter.md
+containers.chapter.md
+troubleshooting.chapter.md
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md b/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md
new file mode 100644
index 000000000000..bc9bdbe3708b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/service-mgmt.chapter.md
@@ -0,0 +1,150 @@
+# Service Management {#sec-systemctl}
+
+In NixOS, all system services are started and monitored using the
+systemd program. systemd is the "init" process of the system (i.e. PID
+1), the parent of all other processes. It manages a set of so-called
+"units", which can be things like system services (programs), but also
+mount points, swap files, devices, targets (groups of units) and more.
+Units can have complex dependencies; for instance, one unit can require
+that another unit must be successfully started before the first unit can
+be started. When the system boots, it starts a unit named
+`default.target`; the dependencies of this unit cause all system
+services to be started, file systems to be mounted, swap files to be
+activated, and so on.
+
+## Interacting with a running systemd {#sect-nixos-systemd-general}
+
+The command `systemctl` is the main way to interact with `systemd`. The
+following paragraphs demonstrate ways to interact with any OS running
+systemd as init system. NixOS is of no exception. The [next section
+](#sect-nixos-systemd-nixos) explains NixOS specific things worth
+knowing.
+
+Without any arguments, `systemctl` the status of active units:
+
+```ShellSession
+$ systemctl
+-.mount          loaded active mounted   /
+swapfile.swap    loaded active active    /swapfile
+sshd.service     loaded active running   SSH Daemon
+graphical.target loaded active active    Graphical Interface
+...
+```
+
+You can ask for detailed status information about a unit, for instance,
+the PostgreSQL database service:
+
+```ShellSession
+$ systemctl status postgresql.service
+postgresql.service - PostgreSQL Server
+          Loaded: loaded (/nix/store/pn3q73mvh75gsrl8w7fdlfk3fq5qm5mw-unit/postgresql.service)
+          Active: active (running) since Mon, 2013-01-07 15:55:57 CET; 9h ago
+        Main PID: 2390 (postgres)
+          CGroup: name=systemd:/system/postgresql.service
+                  ├─2390 postgres
+                  ├─2418 postgres: writer process
+                  ├─2419 postgres: wal writer process
+                  ├─2420 postgres: autovacuum launcher process
+                  ├─2421 postgres: stats collector process
+                  └─2498 postgres: zabbix zabbix [local] idle
+
+Jan 07 15:55:55 hagbard postgres[2394]: [1-1] LOG:  database system was shut down at 2013-01-07 15:55:05 CET
+Jan 07 15:55:57 hagbard postgres[2390]: [1-1] LOG:  database system is ready to accept connections
+Jan 07 15:55:57 hagbard postgres[2420]: [1-1] LOG:  autovacuum launcher started
+Jan 07 15:55:57 hagbard systemd[1]: Started PostgreSQL Server.
+```
+
+Note that this shows the status of the unit (active and running), all
+the processes belonging to the service, as well as the most recent log
+messages from the service.
+
+Units can be stopped, started or restarted:
+
+```ShellSession
+# systemctl stop postgresql.service
+# systemctl start postgresql.service
+# systemctl restart postgresql.service
+```
+
+These operations are synchronous: they wait until the service has
+finished starting or stopping (or has failed). Starting a unit will
+cause the dependencies of that unit to be started as well (if
+necessary).
+
+## systemd in NixOS {#sect-nixos-systemd-nixos}
+
+Packages in Nixpkgs sometimes provide systemd units with them, usually
+in e.g `#pkg-out#/lib/systemd/`. Putting such a package in
+`environment.systemPackages` doesn't make the service available to
+users or the system.
+
+In order to enable a systemd *system* service with provided upstream
+package, use (e.g):
+
+```nix
+systemd.packages = [ pkgs.packagekit ];
+```
+
+Usually NixOS modules written by the community do the above, plus take
+care of other details. If a module was written for a service you are
+interested in, you'd probably need only to use
+`services.#name#.enable = true;`. These services are defined in
+Nixpkgs' [ `nixos/modules/` directory
+](https://github.com/NixOS/nixpkgs/tree/master/nixos/modules). In case
+the service is simple enough, the above method should work, and start
+the service on boot.
+
+*User* systemd services on the other hand, should be treated
+differently. Given a package that has a systemd unit file at
+`#pkg-out#/lib/systemd/user/`, using [](#opt-systemd.packages) will
+make you able to start the service via `systemctl --user start`, but it
+won't start automatically on login. However, You can imperatively
+enable it by adding the package's attribute to
+[](#opt-systemd.packages) and then do this (e.g):
+
+```ShellSession
+$ mkdir -p ~/.config/systemd/user/default.target.wants
+$ ln -s /run/current-system/sw/lib/systemd/user/syncthing.service ~/.config/systemd/user/default.target.wants/
+$ systemctl --user daemon-reload
+$ systemctl --user enable syncthing.service
+```
+
+If you are interested in a timer file, use `timers.target.wants` instead
+of `default.target.wants` in the 1st and 2nd command.
+
+Using `systemctl --user enable syncthing.service` instead of the above,
+will work, but it'll use the absolute path of `syncthing.service` for
+the symlink, and this path is in `/nix/store/.../lib/systemd/user/`.
+Hence [garbage collection](#sec-nix-gc) will remove that file and you
+will wind up with a broken symlink in your systemd configuration, which
+in turn will not make the service / timer start on login.
+
+## Template units {#sect-nixos-systemd-template-units}
+
+systemd supports templated units where a base unit can be started multiple
+times with a different parameter. The syntax to accomplish this is
+`service-name@instance-name.service`. Units get the instance name passed to
+them (see `systemd.unit(5)`). NixOS has support for these kinds of units and
+for template-specific overrides. A service needs to be defined twice, once
+for the base unit and once for the instance. All instances must include
+`overrideStrategy = "asDropin"` for the change detection to work. This
+example illustrates this:
+```nix
+{
+  systemd.services = {
+    "base-unit@".serviceConfig = {
+      ExecStart = "...";
+      User = "...";
+    };
+    "base-unit@instance-a" = {
+      overrideStrategy = "asDropin"; # needed for templates to work
+      wantedBy = [ "multi-user.target" ]; # causes NixOS to manage the instance
+    };
+    "base-unit@instance-b" = {
+      overrideStrategy = "asDropin"; # needed for templates to work
+      wantedBy = [ "multi-user.target" ]; # causes NixOS to manage the instance
+      serviceConfig.User = "root"; # also override something for this specific instance
+    };
+  };
+}
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/store-corruption.section.md b/nixpkgs/nixos/doc/manual/administration/store-corruption.section.md
new file mode 100644
index 000000000000..bd8a5772b37c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/store-corruption.section.md
@@ -0,0 +1,28 @@
+# Nix Store Corruption {#sec-nix-store-corruption}
+
+After a system crash, it's possible for files in the Nix store to become
+corrupted. (For instance, the Ext4 file system has the tendency to
+replace un-synced files with zero bytes.) NixOS tries hard to prevent
+this from happening: it performs a `sync` before switching to a new
+configuration, and Nix's database is fully transactional. If corruption
+still occurs, you may be able to fix it automatically.
+
+If the corruption is in a path in the closure of the NixOS system
+configuration, you can fix it by doing
+
+```ShellSession
+# nixos-rebuild switch --repair
+```
+
+This will cause Nix to check every path in the closure, and if its
+cryptographic hash differs from the hash recorded in Nix's database, the
+path is rebuilt or redownloaded.
+
+You can also scan the entire Nix store for corrupt paths:
+
+```ShellSession
+# nix-store --verify --check-contents --repair
+```
+
+Any corrupt paths will be redownloaded if they're available in a binary
+cache; otherwise, they cannot be repaired.
diff --git a/nixpkgs/nixos/doc/manual/administration/troubleshooting.chapter.md b/nixpkgs/nixos/doc/manual/administration/troubleshooting.chapter.md
new file mode 100644
index 000000000000..1253607f8efc
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/troubleshooting.chapter.md
@@ -0,0 +1,12 @@
+# Troubleshooting {#ch-troubleshooting}
+
+This chapter describes solutions to common problems you might encounter
+when you manage your NixOS system.
+
+```{=include=} sections
+boot-problems.section.md
+maintenance-mode.section.md
+rollback.section.md
+store-corruption.section.md
+network-problems.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/administration/user-sessions.chapter.md b/nixpkgs/nixos/doc/manual/administration/user-sessions.chapter.md
new file mode 100644
index 000000000000..5ff468b30122
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/administration/user-sessions.chapter.md
@@ -0,0 +1,43 @@
+# User Sessions {#sec-user-sessions}
+
+Systemd keeps track of all users who are logged into the system (e.g. on
+a virtual console or remotely via SSH). The command `loginctl` allows
+querying and manipulating user sessions. For instance, to list all user
+sessions:
+
+```ShellSession
+$ loginctl
+   SESSION        UID USER             SEAT
+        c1        500 eelco            seat0
+        c3          0 root             seat0
+        c4        500 alice
+```
+
+This shows that two users are logged in locally, while another is logged
+in remotely. ("Seats" are essentially the combinations of displays and
+input devices attached to the system; usually, there is only one seat.)
+To get information about a session:
+
+```ShellSession
+$ loginctl session-status c3
+c3 - root (0)
+           Since: Tue, 2013-01-08 01:17:56 CET; 4min 42s ago
+          Leader: 2536 (login)
+            Seat: seat0; vc3
+             TTY: /dev/tty3
+         Service: login; type tty; class user
+           State: online
+          CGroup: name=systemd:/user/root/c3
+                  ├─ 2536 /nix/store/10mn4xip9n7y9bxqwnsx7xwx2v2g34xn-shadow-4.1.5.1/bin/login --
+                  ├─10339 -bash
+                  └─10355 w3m nixos.org
+```
+
+This shows that the user is logged in on virtual console 3. It also
+lists the processes belonging to this session. Since systemd keeps track
+of this, you can terminate a session in a way that ensures that all the
+session's processes are gone:
+
+```ShellSession
+# loginctl terminate-session c3
+```
diff --git a/nixpkgs/nixos/doc/manual/common.nix b/nixpkgs/nixos/doc/manual/common.nix
new file mode 100644
index 000000000000..48d1d909492d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/common.nix
@@ -0,0 +1,4 @@
+{
+  outputPath = "share/doc/nixos";
+  indexPath = "index.html";
+}
diff --git a/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md b/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md
new file mode 100644
index 000000000000..bf26e4c51ed3
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/abstractions.section.md
@@ -0,0 +1,80 @@
+# Abstractions {#sec-module-abstractions}
+
+If you find yourself repeating yourself over and over, it’s time to abstract. Take, for instance, this Apache HTTP Server configuration:
+
+```nix
+{
+  services.httpd.virtualHosts =
+    { "blog.example.org" = {
+        documentRoot = "/webroot/blog.example.org";
+        adminAddr = "alice@example.org";
+        forceSSL = true;
+        enableACME = true;
+        enablePHP = true;
+      };
+      "wiki.example.org" = {
+        documentRoot = "/webroot/wiki.example.org";
+        adminAddr = "alice@example.org";
+        forceSSL = true;
+        enableACME = true;
+        enablePHP = true;
+      };
+    };
+}
+```
+
+It defines two virtual hosts with nearly identical configuration; the only difference is the document root directories. To prevent this duplication, we can use a `let`:
+```nix
+let
+  commonConfig =
+    { adminAddr = "alice@example.org";
+      forceSSL = true;
+      enableACME = true;
+    };
+in
+{
+  services.httpd.virtualHosts =
+    { "blog.example.org" = (commonConfig // { documentRoot = "/webroot/blog.example.org"; });
+      "wiki.example.org" = (commonConfig // { documentRoot = "/webroot/wiki.example.com"; });
+    };
+}
+```
+
+The `let commonConfig = ...` defines a variable named `commonConfig`. The `//` operator merges two attribute sets, so the configuration of the second virtual host is the set `commonConfig` extended with the document root option.
+
+You can write a `let` wherever an expression is allowed. Thus, you also could have written:
+
+```nix
+{
+  services.httpd.virtualHosts =
+    let commonConfig = ...; in
+    { "blog.example.org" = (commonConfig // { ... })
+      "wiki.example.org" = (commonConfig // { ... })
+    };
+}
+```
+
+but not `{ let commonConfig = ...; in ...; }` since attributes (as opposed to attribute values) are not expressions.
+
+**Functions** provide another method of abstraction. For instance, suppose that we want to generate lots of different virtual hosts, all with identical configuration except for the document root. This can be done as follows:
+
+```nix
+{
+  services.httpd.virtualHosts =
+    let
+      makeVirtualHost = webroot:
+        { documentRoot = webroot;
+          adminAddr = "alice@example.org";
+          forceSSL = true;
+          enableACME = true;
+        };
+    in
+      { "example.org" = (makeVirtualHost "/webroot/example.org");
+        "example.com" = (makeVirtualHost "/webroot/example.com");
+        "example.gov" = (makeVirtualHost "/webroot/example.gov");
+        "example.nl" = (makeVirtualHost "/webroot/example.nl");
+      };
+}
+```
+
+Here, `makeVirtualHost` is a function that takes a single argument `webroot` and returns the configuration for a virtual host. That function is then called for several names to produce the list of virtual host configurations.
diff --git a/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md
new file mode 100644
index 000000000000..4478d77f361d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-network-config.section.md
@@ -0,0 +1,13 @@
+# Ad-Hoc Configuration {#ad-hoc-network-config}
+
+You can use [](#opt-networking.localCommands) to
+specify shell commands to be run at the end of `network-setup.service`. This
+is useful for doing network configuration not covered by the existing NixOS
+modules. For instance, to statically configure an IPv6 address:
+
+```nix
+networking.localCommands =
+  ''
+    ip -6 addr add 2001:610:685:1::1/64 dev eth0
+  '';
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/ad-hoc-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-packages.section.md
new file mode 100644
index 000000000000..e9d574903a10
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/ad-hoc-packages.section.md
@@ -0,0 +1,51 @@
+# Ad-Hoc Package Management {#sec-ad-hoc-packages}
+
+With the command `nix-env`, you can install and uninstall packages from
+the command line. For instance, to install Mozilla Thunderbird:
+
+```ShellSession
+$ nix-env -iA nixos.thunderbird
+```
+
+If you invoke this as root, the package is installed in the Nix profile
+`/nix/var/nix/profiles/default` and visible to all users of the system;
+otherwise, the package ends up in
+`/nix/var/nix/profiles/per-user/username/profile` and is not visible to
+other users. The `-A` flag specifies the package by its attribute name;
+without it, the package is installed by matching against its package
+name (e.g. `thunderbird`). The latter is slower because it requires
+matching against all available Nix packages, and is ambiguous if there
+are multiple matching packages.
+
+Packages come from the NixOS channel. You typically upgrade a package by
+updating to the latest version of the NixOS channel:
+
+```ShellSession
+$ nix-channel --update nixos
+```
+
+and then running `nix-env -i` again. Other packages in the profile are
+*not* affected; this is the crucial difference with the declarative
+style of package management, where running `nixos-rebuild switch` causes
+all packages to be updated to their current versions in the NixOS
+channel. You can however upgrade all packages for which there is a newer
+version by doing:
+
+```ShellSession
+$ nix-env -u '*'
+```
+
+A package can be uninstalled using the `-e` flag:
+
+```ShellSession
+$ nix-env -e thunderbird
+```
+
+Finally, you can roll back an undesirable `nix-env` action:
+
+```ShellSession
+$ nix-env --rollback
+```
+
+`nix-env` has many more flags. For details, see the nix-env(1) manpage or
+the Nix manual.
diff --git a/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md
new file mode 100644
index 000000000000..2340723e07c6
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/adding-custom-packages.section.md
@@ -0,0 +1,99 @@
+# Adding Custom Packages {#sec-custom-packages}
+
+It's possible that a package you need is not available in NixOS. In that
+case, you can do two things. Either you can package it with Nix, or you can try
+to use prebuilt packages from upstream. Due to the peculiarities of NixOS, it
+is important to note that building software from source is often easier than
+using pre-built executables.
+
+## Building with Nix {#sec-custom-packages-nix}
+
+This can be done either in-tree or out-of-tree. For an in-tree build, you can
+clone the Nixpkgs repository, add the package to your clone, and (optionally)
+submit a patch or pull request to have it accepted into the main Nixpkgs
+repository. This is described in detail in the [Nixpkgs
+manual](https://nixos.org/nixpkgs/manual). In short, you clone Nixpkgs:
+
+```ShellSession
+$ git clone https://github.com/NixOS/nixpkgs
+$ cd nixpkgs
+```
+
+Then you write and test the package as described in the Nixpkgs manual.
+Finally, you add it to [](#opt-environment.systemPackages), e.g.
+
+```nix
+environment.systemPackages = [ pkgs.my-package ];
+```
+
+and you run `nixos-rebuild`, specifying your own Nixpkgs tree:
+
+```ShellSession
+# nixos-rebuild switch -I nixpkgs=/path/to/my/nixpkgs
+```
+
+The second possibility is to add the package outside of the Nixpkgs
+tree. For instance, here is how you specify a build of the
+[GNU Hello](https://www.gnu.org/software/hello/) package directly in
+`configuration.nix`:
+
+```nix
+environment.systemPackages =
+  let
+    my-hello = with pkgs; stdenv.mkDerivation rec {
+      name = "hello-2.8";
+      src = fetchurl {
+        url = "mirror://gnu/hello/${name}.tar.gz";
+        hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
+      };
+    };
+  in
+  [ my-hello ];
+```
+
+Of course, you can also move the definition of `my-hello` into a
+separate Nix expression, e.g.
+
+```nix
+environment.systemPackages = [ (import ./my-hello.nix) ];
+```
+
+where `my-hello.nix` contains:
+
+```nix
+with import <nixpkgs> {}; # bring all of Nixpkgs into scope
+
+stdenv.mkDerivation rec {
+  name = "hello-2.8";
+  src = fetchurl {
+    url = "mirror://gnu/hello/${name}.tar.gz";
+    hash = "sha256-5rd/gffPfa761Kn1tl3myunD8TuM+66oy1O7XqVGDXM=";
+  };
+}
+```
+
+This allows testing the package easily:
+
+```ShellSession
+$ nix-build my-hello.nix
+$ ./result/bin/hello
+Hello, world!
+```
+
+## Using pre-built executables {#sec-custom-packages-prebuilt}
+
+Most pre-built executables will not work on NixOS. There are two notable
+exceptions: flatpaks and AppImages. For flatpaks see the [dedicated
+section](#module-services-flatpak). AppImages will not run "as-is" on NixOS.
+First you need to install `appimage-run`: add to `/etc/nixos/configuration.nix`
+
+```nix
+environment.systemPackages = [ pkgs.appimage-run ];
+```
+
+Then instead of running the AppImage "as-is", run `appimage-run foo.appimage`.
+
+To make other pre-built executables work on NixOS, you need to package them
+with Nix and special helpers like `autoPatchelfHook` or `buildFHSEnv`. See
+the [Nixpkgs manual](https://nixos.org/nixpkgs/manual) for details. This
+is complex and often doing a source build is easier.
diff --git a/nixpkgs/nixos/doc/manual/configuration/config-file.section.md b/nixpkgs/nixos/doc/manual/configuration/config-file.section.md
new file mode 100644
index 000000000000..b010026c5828
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/config-file.section.md
@@ -0,0 +1,175 @@
+# NixOS Configuration File {#sec-configuration-file}
+
+The NixOS configuration file generally looks like this:
+
+```nix
+{ config, pkgs, ... }:
+
+{ option definitions
+}
+```
+
+The first line (`{ config, pkgs, ... }:`) denotes that this is actually
+a function that takes at least the two arguments `config` and `pkgs`.
+(These are explained later, in chapter [](#sec-writing-modules)) The
+function returns a *set* of option definitions (`{ ... }`).
+These definitions have the form `name = value`, where `name` is the
+name of an option and `value` is its value. For example,
+
+```nix
+{ config, pkgs, ... }:
+
+{ services.httpd.enable = true;
+  services.httpd.adminAddr = "alice@example.org";
+  services.httpd.virtualHosts.localhost.documentRoot = "/webroot";
+}
+```
+
+defines a configuration with three option definitions that together
+enable the Apache HTTP Server with `/webroot` as the document root.
+
+Sets can be nested, and in fact dots in option names are shorthand for
+defining a set containing another set. For instance,
+[](#opt-services.httpd.enable) defines a set named
+`services` that contains a set named `httpd`, which in turn contains an
+option definition named `enable` with value `true`. This means that the
+example above can also be written as:
+
+```nix
+{ config, pkgs, ... }:
+
+{ services = {
+    httpd = {
+      enable = true;
+      adminAddr = "alice@example.org";
+      virtualHosts = {
+        localhost = {
+          documentRoot = "/webroot";
+        };
+      };
+    };
+  };
+}
+```
+
+which may be more convenient if you have lots of option definitions that
+share the same prefix (such as `services.httpd`).
+
+NixOS checks your option definitions for correctness. For instance, if
+you try to define an option that doesn't exist (that is, doesn't have a
+corresponding *option declaration*), `nixos-rebuild` will give an error
+like:
+
+```plain
+The option `services.httpd.enable' defined in `/etc/nixos/configuration.nix' does not exist.
+```
+
+Likewise, values in option definitions must have a correct type. For
+instance, `services.httpd.enable` must be a Boolean (`true` or `false`).
+Trying to give it a value of another type, such as a string, will cause
+an error:
+
+```plain
+The option value `services.httpd.enable' in `/etc/nixos/configuration.nix' is not a boolean.
+```
+
+Options have various types of values. The most important are:
+
+Strings
+
+:   Strings are enclosed in double quotes, e.g.
+
+    ```nix
+    networking.hostName = "dexter";
+    ```
+
+    Special characters can be escaped by prefixing them with a backslash
+    (e.g. `\"`).
+
+    Multi-line strings can be enclosed in *double single quotes*, e.g.
+
+    ```nix
+    networking.extraHosts =
+      ''
+        127.0.0.2 other-localhost
+        10.0.0.1 server
+      '';
+    ```
+
+    The main difference is that it strips from each line a number of
+    spaces equal to the minimal indentation of the string as a whole
+    (disregarding the indentation of empty lines), and that characters
+    like `"` and `\` are not special (making it more convenient for
+    including things like shell code). See more info about this in the
+    Nix manual [here](https://nixos.org/nix/manual/#ssec-values).
+
+Booleans
+
+:   These can be `true` or `false`, e.g.
+
+    ```nix
+    networking.firewall.enable = true;
+    networking.firewall.allowPing = false;
+    ```
+
+Integers
+
+:   For example,
+
+    ```nix
+    boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 60;
+    ```
+
+    (Note that here the attribute name `net.ipv4.tcp_keepalive_time` is
+    enclosed in quotes to prevent it from being interpreted as a set
+    named `net` containing a set named `ipv4`, and so on. This is
+    because it's not a NixOS option but the literal name of a Linux
+    kernel setting.)
+
+Sets
+
+:   Sets were introduced above. They are name/value pairs enclosed in
+    braces, as in the option definition
+
+    ```nix
+    fileSystems."/boot" =
+      { device = "/dev/sda1";
+        fsType = "ext4";
+        options = [ "rw" "data=ordered" "relatime" ];
+      };
+    ```
+
+Lists
+
+:   The important thing to note about lists is that list elements are
+    separated by whitespace, like this:
+
+    ```nix
+    boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+    ```
+
+    List elements can be any other type, e.g. sets:
+
+    ```nix
+    swapDevices = [ { device = "/dev/disk/by-label/swap"; } ];
+    ```
+
+Packages
+
+:   Usually, the packages you need are already part of the Nix Packages
+    collection, which is a set that can be accessed through the function
+    argument `pkgs`. Typical uses:
+
+    ```nix
+    environment.systemPackages =
+      [ pkgs.thunderbird
+        pkgs.emacs
+      ];
+
+    services.postgresql.package = pkgs.postgresql_14;
+    ```
+
+    The latter option definition changes the default PostgreSQL package
+    used by NixOS's PostgreSQL service to 14.x. For more information on
+    packages, including how to add new ones, see
+    [](#sec-custom-packages).
diff --git a/nixpkgs/nixos/doc/manual/configuration/config-syntax.chapter.md b/nixpkgs/nixos/doc/manual/configuration/config-syntax.chapter.md
new file mode 100644
index 000000000000..9e606b2b82af
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/config-syntax.chapter.md
@@ -0,0 +1,18 @@
+# Configuration Syntax {#sec-configuration-syntax}
+
+The NixOS configuration file `/etc/nixos/configuration.nix` is actually
+a *Nix expression*, which is the Nix package manager's purely functional
+language for describing how to build packages and configurations. This
+means you have all the expressive power of that language at your
+disposal, including the ability to abstract over common patterns, which
+is very useful when managing complex systems. The syntax and semantics
+of the Nix language are fully described in the [Nix
+manual](https://nixos.org/nix/manual/#chap-writing-nix-expressions), but
+here we give a short overview of the most important constructs useful in
+NixOS configuration files.
+
+```{=include=} sections
+config-file.section.md
+abstractions.section.md
+modularity.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/configuration.md b/nixpkgs/nixos/doc/manual/configuration/configuration.md
new file mode 100644
index 000000000000..4c966f3325b9
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/configuration.md
@@ -0,0 +1,27 @@
+# Configuration {#ch-configuration}
+
+This chapter describes how to configure various aspects of a NixOS machine through the configuration file {file}`/etc/nixos/configuration.nix`. As described in [](#sec-changing-config), changes to this file only take effect after you run {command}`nixos-rebuild`.
+
+```{=include=} chapters
+config-syntax.chapter.md
+package-mgmt.chapter.md
+user-mgmt.chapter.md
+file-systems.chapter.md
+x-windows.chapter.md
+wayland.chapter.md
+gpu-accel.chapter.md
+xfce.chapter.md
+networking.chapter.md
+linux-kernel.chapter.md
+subversion.chapter.md
+```
+
+```{=include=} chapters
+@MODULE_CHAPTERS@
+```
+
+```{=include=} chapters
+profiles.chapter.md
+kubernetes.chapter.md
+```
+<!-- Apache; libvirtd virtualisation -->
diff --git a/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md
new file mode 100644
index 000000000000..76413b7d84fb
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/customizing-packages.section.md
@@ -0,0 +1,93 @@
+# Customising Packages {#sec-customising-packages}
+
+Some packages in Nixpkgs have options to enable or disable optional
+functionality or change other aspects of the package.
+
+::: {.warning}
+Unfortunately, Nixpkgs currently lacks a way to query available
+configuration options.
+:::
+
+::: {.note}
+For example, many packages come with extensions one might add.
+Examples include:
+- [`passExtensions.pass-otp`](https://search.nixos.org/packages/query=passExtensions.pass-otp)
+- [`python310Packages.requests`](https://search.nixos.org/packages/query=python310Packages.requests)
+
+You can use them like this:
+```nix
+environment.systemPackages = with pkgs; [
+  sl
+  (pass.withExtensions (subpkgs: with subpkgs; [
+    pass-audit
+    pass-otp
+    pass-genphrase
+  ]))
+  (python3.withPackages (subpkgs: with subpkgs; [
+      requests
+  ]))
+  cowsay
+];
+```
+:::
+
+Apart from high-level options, it's possible to tweak a package in
+almost arbitrary ways, such as changing or disabling dependencies of a
+package. For instance, the Emacs package in Nixpkgs by default has a
+dependency on GTK 2. If you want to build it against GTK 3, you can
+specify that as follows:
+
+```nix
+environment.systemPackages = [ (pkgs.emacs.override { gtk = pkgs.gtk3; }) ];
+```
+
+The function `override` performs the call to the Nix function that
+produces Emacs, with the original arguments amended by the set of
+arguments specified by you. So here the function argument `gtk` gets the
+value `pkgs.gtk3`, causing Emacs to depend on GTK 3. (The parentheses
+are necessary because in Nix, function application binds more weakly
+than list construction, so without them,
+[](#opt-environment.systemPackages)
+would be a list with two elements.)
+
+Even greater customisation is possible using the function
+`overrideAttrs`. While the `override` mechanism above overrides the
+arguments of a package function, `overrideAttrs` allows changing the
+*attributes* passed to `mkDerivation`. This permits changing any aspect
+of the package, such as the source code. For instance, if you want to
+override the source code of Emacs, you can say:
+
+```nix
+environment.systemPackages = [
+  (pkgs.emacs.overrideAttrs (oldAttrs: {
+    name = "emacs-25.0-pre";
+    src = /path/to/my/emacs/tree;
+  }))
+];
+```
+
+Here, `overrideAttrs` takes the Nix derivation specified by `pkgs.emacs`
+and produces a new derivation in which the original's `name` and `src`
+attribute have been replaced by the given values by re-calling
+`stdenv.mkDerivation`. The original attributes are accessible via the
+function argument, which is conventionally named `oldAttrs`.
+
+The overrides shown above are not global. They do not affect the
+original package; other packages in Nixpkgs continue to depend on the
+original rather than the customised package. This means that if another
+package in your system depends on the original package, you end up with
+two instances of the package. If you want to have everything depend on
+your customised instance, you can apply a *global* override as follows:
+
+```nix
+nixpkgs.config.packageOverrides = pkgs:
+  { emacs = pkgs.emacs.override { gtk = pkgs.gtk3; };
+  };
+```
+
+The effect of this definition is essentially equivalent to modifying the
+`emacs` attribute in the Nixpkgs source tree. Any package in Nixpkgs
+that depends on `emacs` will be passed your customised instance.
+(However, the value `pkgs.emacs` in `nixpkgs.config.packageOverrides`
+refers to the original rather than overridden instance, to prevent an
+infinite recursion.)
diff --git a/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md
new file mode 100644
index 000000000000..480e250da8c7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/declarative-packages.section.md
@@ -0,0 +1,46 @@
+# Declarative Package Management {#sec-declarative-package-mgmt}
+
+With declarative package management, you specify which packages you want
+on your system by setting the option
+[](#opt-environment.systemPackages). For instance, adding the
+following line to `configuration.nix` enables the Mozilla Thunderbird
+email application:
+
+```nix
+environment.systemPackages = [ pkgs.thunderbird ];
+```
+
+The effect of this specification is that the Thunderbird package from
+Nixpkgs will be built or downloaded as part of the system when you run
+`nixos-rebuild switch`.
+
+::: {.note}
+Some packages require additional global configuration such as D-Bus or
+systemd service registration so adding them to
+[](#opt-environment.systemPackages) might not be sufficient. You are
+advised to check the [list of options](#ch-options) whether a NixOS
+module for the package does not exist.
+:::
+
+You can get a list of the available packages as follows:
+
+```ShellSession
+$ nix-env -qaP '*' --description
+nixos.firefox   firefox-23.0   Mozilla Firefox - the browser, reloaded
+...
+```
+
+The first column in the output is the *attribute name*, such as
+`nixos.thunderbird`.
+
+Note: the `nixos` prefix tells us that we want to get the package from
+the `nixos` channel and works only in CLI tools. In declarative
+configuration use `pkgs` prefix (variable).
+
+To "uninstall" a package, remove it from
+[](#opt-environment.systemPackages) and run `nixos-rebuild switch`.
+
+```{=include=} sections
+customizing-packages.section.md
+adding-custom-packages.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md b/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md
new file mode 100644
index 000000000000..aca978be064d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/file-systems.chapter.md
@@ -0,0 +1,42 @@
+# File Systems {#ch-file-systems}
+
+You can define file systems using the `fileSystems` configuration
+option. For instance, the following definition causes NixOS to mount the
+Ext4 file system on device `/dev/disk/by-label/data` onto the mount
+point `/data`:
+
+```nix
+fileSystems."/data" =
+  { device = "/dev/disk/by-label/data";
+    fsType = "ext4";
+  };
+```
+
+This will create an entry in `/etc/fstab`, which will generate a
+corresponding [systemd.mount](https://www.freedesktop.org/software/systemd/man/systemd.mount.html)
+unit via [systemd-fstab-generator](https://www.freedesktop.org/software/systemd/man/systemd-fstab-generator.html).
+The filesystem will be mounted automatically unless `"noauto"` is
+present in [options](#opt-fileSystems._name_.options). `"noauto"`
+filesystems can be mounted explicitly using `systemctl` e.g.
+`systemctl start data.mount`. Mount points are created automatically if they don't
+already exist. For `device`, it's best to use the topology-independent
+device aliases in `/dev/disk/by-label` and `/dev/disk/by-uuid`, as these
+don't change if the topology changes (e.g. if a disk is moved to another
+IDE controller).
+
+You can usually omit the file system type (`fsType`), since `mount` can
+usually detect the type and load the necessary kernel module
+automatically. However, if the file system is needed at early boot (in
+the initial ramdisk) and is not `ext2`, `ext3` or `ext4`, then it's best
+to specify `fsType` to ensure that the kernel module is available.
+
+::: {.note}
+System startup will fail if any of the filesystems fails to mount,
+dropping you to the emergency shell. You can make a mount asynchronous
+and non-critical by adding `options = [ "nofail" ];`.
+:::
+
+```{=include=} sections
+luks-file-systems.section.md
+sshfs-file-systems.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/firewall.section.md b/nixpkgs/nixos/doc/manual/configuration/firewall.section.md
new file mode 100644
index 000000000000..dbf0ffb9273e
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/firewall.section.md
@@ -0,0 +1,32 @@
+# Firewall {#sec-firewall}
+
+NixOS has a simple stateful firewall that blocks incoming connections
+and other unexpected packets. The firewall applies to both IPv4 and IPv6
+traffic. It is enabled by default. It can be disabled as follows:
+
+```nix
+networking.firewall.enable = false;
+```
+
+If the firewall is enabled, you can open specific TCP ports to the
+outside world:
+
+```nix
+networking.firewall.allowedTCPPorts = [ 80 443 ];
+```
+
+Note that TCP port 22 (ssh) is opened automatically if the SSH daemon is
+enabled (`services.openssh.enable = true`). UDP ports can be opened through
+[](#opt-networking.firewall.allowedUDPPorts).
+
+To open ranges of TCP ports:
+
+```nix
+networking.firewall.allowedTCPPortRanges = [
+  { from = 4000; to = 4007; }
+  { from = 8000; to = 8010; }
+];
+```
+
+Similarly, UDP port ranges can be opened through
+[](#opt-networking.firewall.allowedUDPPortRanges).
diff --git a/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md b/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md
new file mode 100644
index 000000000000..dfccdf291b73
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/gpu-accel.chapter.md
@@ -0,0 +1,238 @@
+# GPU acceleration {#sec-gpu-accel}
+
+NixOS provides various APIs that benefit from GPU hardware acceleration,
+such as VA-API and VDPAU for video playback; OpenGL and Vulkan for 3D
+graphics; and OpenCL for general-purpose computing. This chapter
+describes how to set up GPU hardware acceleration (as far as this is not
+done automatically) and how to verify that hardware acceleration is
+indeed used.
+
+Most of the aforementioned APIs are agnostic with regards to which
+display server is used. Consequently, these instructions should apply
+both to the X Window System and Wayland compositors.
+
+## OpenCL {#sec-gpu-accel-opencl}
+
+[OpenCL](https://en.wikipedia.org/wiki/OpenCL) is a general compute API.
+It is used by various applications such as Blender and Darktable to
+accelerate certain operations.
+
+OpenCL applications load drivers through the *Installable Client Driver*
+(ICD) mechanism. In this mechanism, an ICD file specifies the path to
+the OpenCL driver for a particular GPU family. In NixOS, there are two
+ways to make ICD files visible to the ICD loader. The first is through
+the `OCL_ICD_VENDORS` environment variable. This variable can contain a
+directory which is scanned by the ICL loader for ICD files. For example:
+
+```ShellSession
+$ export \
+  OCL_ICD_VENDORS=`nix-build '<nixpkgs>' --no-out-link -A rocmPackages.clr.icd`/etc/OpenCL/vendors/
+```
+
+The second mechanism is to add the OpenCL driver package to
+[](#opt-hardware.opengl.extraPackages).
+This links the ICD file under `/run/opengl-driver`, where it will be visible
+to the ICD loader.
+
+The proper installation of OpenCL drivers can be verified through the
+`clinfo` command of the clinfo package. This command will report the
+number of hardware devices that is found and give detailed information
+for each device:
+
+```ShellSession
+$ clinfo | head -n3
+Number of platforms  1
+Platform Name        AMD Accelerated Parallel Processing
+Platform Vendor      Advanced Micro Devices, Inc.
+```
+
+### AMD {#sec-gpu-accel-opencl-amd}
+
+Modern AMD [Graphics Core
+Next](https://en.wikipedia.org/wiki/Graphics_Core_Next) (GCN) GPUs are
+supported through the rocmPackages.clr.icd package. Adding this package to
+[](#opt-hardware.opengl.extraPackages)
+enables OpenCL support:
+
+```nix
+hardware.opengl.extraPackages = [
+  rocmPackages.clr.icd
+];
+```
+
+### Intel {#sec-gpu-accel-opencl-intel}
+
+[Intel Gen8 and later
+GPUs](https://en.wikipedia.org/wiki/List_of_Intel_graphics_processing_units#Gen8)
+are supported by the Intel NEO OpenCL runtime that is provided by the
+intel-compute-runtime package. For Gen7 GPUs, the deprecated Beignet
+runtime can be used, which is provided by the beignet package. The
+proprietary Intel OpenCL runtime, in the intel-ocl package, is an
+alternative for Gen7 GPUs.
+
+The intel-compute-runtime, beignet, or intel-ocl package can be added to
+[](#opt-hardware.opengl.extraPackages)
+to enable OpenCL support. For example, for Gen8 and later GPUs, the following
+configuration can be used:
+
+```nix
+hardware.opengl.extraPackages = [
+  intel-compute-runtime
+];
+```
+
+## Vulkan {#sec-gpu-accel-vulkan}
+
+[Vulkan](https://en.wikipedia.org/wiki/Vulkan_(API)) is a graphics and
+compute API for GPUs. It is used directly by games or indirectly though
+compatibility layers like
+[DXVK](https://github.com/doitsujin/dxvk/wiki).
+
+By default, if [](#opt-hardware.opengl.driSupport)
+is enabled, mesa is installed and provides Vulkan for supported hardware.
+
+Similar to OpenCL, Vulkan drivers are loaded through the *Installable
+Client Driver* (ICD) mechanism. ICD files for Vulkan are JSON files that
+specify the path to the driver library and the supported Vulkan version.
+All successfully loaded drivers are exposed to the application as
+different GPUs. In NixOS, there are two ways to make ICD files visible
+to Vulkan applications: an environment variable and a module option.
+
+The first option is through the `VK_ICD_FILENAMES` environment variable.
+This variable can contain multiple JSON files, separated by `:`. For
+example:
+
+```ShellSession
+$ export \
+  VK_ICD_FILENAMES=`nix-build '<nixpkgs>' --no-out-link -A amdvlk`/share/vulkan/icd.d/amd_icd64.json
+```
+
+The second mechanism is to add the Vulkan driver package to
+[](#opt-hardware.opengl.extraPackages).
+This links the ICD file under `/run/opengl-driver`, where it will be
+visible to the ICD loader.
+
+The proper installation of Vulkan drivers can be verified through the
+`vulkaninfo` command of the vulkan-tools package. This command will
+report the hardware devices and drivers found, in this example output
+amdvlk and radv:
+
+```ShellSession
+$ vulkaninfo | grep GPU
+                GPU id  : 0 (Unknown AMD GPU)
+                GPU id  : 1 (AMD RADV NAVI10 (LLVM 9.0.1))
+     ...
+GPU0:
+        deviceType     = PHYSICAL_DEVICE_TYPE_DISCRETE_GPU
+        deviceName     = Unknown AMD GPU
+GPU1:
+        deviceType     = PHYSICAL_DEVICE_TYPE_DISCRETE_GPU
+```
+
+A simple graphical application that uses Vulkan is `vkcube` from the
+vulkan-tools package.
+
+### AMD {#sec-gpu-accel-vulkan-amd}
+
+Modern AMD [Graphics Core
+Next](https://en.wikipedia.org/wiki/Graphics_Core_Next) (GCN) GPUs are
+supported through either radv, which is part of mesa, or the amdvlk
+package. Adding the amdvlk package to
+[](#opt-hardware.opengl.extraPackages)
+makes amdvlk the default driver and hides radv and lavapipe from the device list.
+A specific driver can be forced as follows:
+
+```nix
+hardware.opengl.extraPackages = [
+  pkgs.amdvlk
+];
+
+# To enable Vulkan support for 32-bit applications, also add:
+hardware.opengl.extraPackages32 = [
+  pkgs.driversi686Linux.amdvlk
+];
+
+# Force radv
+environment.variables.AMD_VULKAN_ICD = "RADV";
+# Or
+environment.variables.VK_ICD_FILENAMES =
+  "/run/opengl-driver/share/vulkan/icd.d/radeon_icd.x86_64.json";
+```
+
+## VA-API {#sec-gpu-accel-va-api}
+
+[VA-API (Video Acceleration API)](https://www.intel.com/content/www/us/en/developer/articles/technical/linuxmedia-vaapi.html)
+is an open-source library and API specification, which provides access to
+graphics hardware acceleration capabilities for video processing.
+
+VA-API drivers are loaded by `libva`. The version in nixpkgs is built to search
+the opengl driver path, so drivers can be installed in
+[](#opt-hardware.opengl.extraPackages).
+
+VA-API can be tested using:
+
+```ShellSession
+$ nix-shell -p libva-utils --run vainfo
+```
+
+### Intel {#sec-gpu-accel-va-api-intel}
+
+Modern Intel GPUs use the iHD driver, which can be installed with:
+
+```nix
+hardware.opengl.extraPackages = [
+  intel-media-driver
+];
+```
+
+Older Intel GPUs use the i965 driver, which can be installed with:
+
+```nix
+hardware.opengl.extraPackages = [
+  intel-vaapi-driver
+];
+```
+
+## Common issues {#sec-gpu-accel-common-issues}
+
+### User permissions {#sec-gpu-accel-common-issues-permissions}
+
+Except where noted explicitly, it should not be necessary to adjust user
+permissions to use these acceleration APIs. In the default
+configuration, GPU devices have world-read/write permissions
+(`/dev/dri/renderD*`) or are tagged as `uaccess` (`/dev/dri/card*`). The
+access control lists of devices with the `uaccess` tag will be updated
+automatically when a user logs in through `systemd-logind`. For example,
+if the user *alice* is logged in, the access control list should look as
+follows:
+
+```ShellSession
+$ getfacl /dev/dri/card0
+# file: dev/dri/card0
+# owner: root
+# group: video
+user::rw-
+user:alice:rw-
+group::rw-
+mask::rw-
+other::---
+```
+
+If you disabled (this functionality of) `systemd-logind`, you may need
+to add the user to the `video` group and log in again.
+
+### Mixing different versions of nixpkgs {#sec-gpu-accel-common-issues-mixing-nixpkgs}
+
+The *Installable Client Driver* (ICD) mechanism used by OpenCL and
+Vulkan loads runtimes into its address space using `dlopen`. Mixing an
+ICD loader mechanism and runtimes from different version of nixpkgs may
+not work. For example, if the ICD loader uses an older version of glibc
+than the runtime, the runtime may not be loadable due to missing
+symbols. Unfortunately, the loader will generally be quiet about such
+issues.
+
+If you suspect that you are running into library version mismatches
+between an ICL loader and a runtime, you could run an application with
+the `LD_DEBUG` variable set to get more diagnostic information. For
+example, OpenCL can be tested with `LD_DEBUG=files clinfo`, which should
+report missing symbols.
diff --git a/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md
new file mode 100644
index 000000000000..c73024b856d7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/ipv4-config.section.md
@@ -0,0 +1,35 @@
+# IPv4 Configuration {#sec-ipv4}
+
+By default, NixOS uses DHCP (specifically, `dhcpcd`) to automatically
+configure network interfaces. However, you can configure an interface
+manually as follows:
+
+```nix
+networking.interfaces.eth0.ipv4.addresses = [ {
+  address = "192.168.1.2";
+  prefixLength = 24;
+} ];
+```
+
+Typically you'll also want to set a default gateway and set of name
+servers:
+
+```nix
+networking.defaultGateway = "192.168.1.1";
+networking.nameservers = [ "8.8.8.8" ];
+```
+
+::: {.note}
+Statically configured interfaces are set up by the systemd service
+`interface-name-cfg.service`. The default gateway and name server
+configuration is performed by `network-setup.service`.
+:::
+
+The host name is set using [](#opt-networking.hostName):
+
+```nix
+networking.hostName = "cartman";
+```
+
+The default host name is `nixos`. Set it to the empty string (`""`) to
+allow the DHCP server to provide the host name.
diff --git a/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md b/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md
new file mode 100644
index 000000000000..ce66f53ed472
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/ipv6-config.section.md
@@ -0,0 +1,42 @@
+# IPv6 Configuration {#sec-ipv6}
+
+IPv6 is enabled by default. Stateless address autoconfiguration is used
+to automatically assign IPv6 addresses to all interfaces, and Privacy
+Extensions (RFC 4946) are enabled by default. You can adjust the default
+for this by setting [](#opt-networking.tempAddresses). This option
+may be overridden on a per-interface basis by
+[](#opt-networking.interfaces._name_.tempAddress). You can disable
+IPv6 support globally by setting:
+
+```nix
+networking.enableIPv6 = false;
+```
+
+You can disable IPv6 on a single interface using a normal sysctl (in
+this example, we use interface `eth0`):
+
+```nix
+boot.kernel.sysctl."net.ipv6.conf.eth0.disable_ipv6" = true;
+```
+
+As with IPv4 networking interfaces are automatically configured via
+DHCPv6. You can configure an interface manually:
+
+```nix
+networking.interfaces.eth0.ipv6.addresses = [ {
+  address = "fe00:aa:bb:cc::2";
+  prefixLength = 64;
+} ];
+```
+
+For configuring a gateway, optionally with explicitly specified
+interface:
+
+```nix
+networking.defaultGateway6 = {
+  address = "fe00::1";
+  interface = "enp0s3";
+};
+```
+
+See [](#sec-ipv4) for similar examples and additional information.
diff --git a/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md b/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md
new file mode 100644
index 000000000000..f39726090e43
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/kubernetes.chapter.md
@@ -0,0 +1,96 @@
+# Kubernetes {#sec-kubernetes}
+
+The NixOS Kubernetes module is a collective term for a handful of
+individual submodules implementing the Kubernetes cluster components.
+
+There are generally two ways of enabling Kubernetes on NixOS. One way is
+to enable and configure cluster components appropriately by hand:
+
+```nix
+services.kubernetes = {
+  apiserver.enable = true;
+  controllerManager.enable = true;
+  scheduler.enable = true;
+  addonManager.enable = true;
+  proxy.enable = true;
+  flannel.enable = true;
+};
+```
+
+Another way is to assign cluster roles ("master" and/or "node") to
+the host. This enables apiserver, controllerManager, scheduler,
+addonManager, kube-proxy and etcd:
+
+```nix
+services.kubernetes.roles = [ "master" ];
+```
+
+While this will enable the kubelet and kube-proxy only:
+
+```nix
+services.kubernetes.roles = [ "node" ];
+```
+
+Assigning both the master and node roles is usable if you want a single
+node Kubernetes cluster for dev or testing purposes:
+
+```nix
+services.kubernetes.roles = [ "master" "node" ];
+```
+
+Note: Assigning either role will also default both
+[](#opt-services.kubernetes.flannel.enable)
+and [](#opt-services.kubernetes.easyCerts)
+to true. This sets up flannel as CNI and activates automatic PKI bootstrapping.
+
+::: {.note}
+As of NixOS 19.03, it is mandatory to configure:
+[](#opt-services.kubernetes.masterAddress).
+The masterAddress must be resolveable and routeable by all cluster nodes.
+In single node clusters, this can be set to `localhost`.
+:::
+
+Role-based access control (RBAC) authorization mode is enabled by
+default. This means that anonymous requests to the apiserver secure port
+will expectedly cause a permission denied error. All cluster components
+must therefore be configured with x509 certificates for two-way tls
+communication. The x509 certificate subject section determines the roles
+and permissions granted by the apiserver to perform clusterwide or
+namespaced operations. See also: [ Using RBAC
+Authorization](https://kubernetes.io/docs/reference/access-authn-authz/rbac/).
+
+The NixOS kubernetes module provides an option for automatic certificate
+bootstrapping and configuration,
+[](#opt-services.kubernetes.easyCerts).
+The PKI bootstrapping process involves setting up a certificate authority (CA)
+daemon (cfssl) on the kubernetes master node. cfssl generates a CA-cert
+for the cluster, and uses the CA-cert for signing subordinate certs issued
+to each of the cluster components. Subsequently, the certmgr daemon monitors
+active certificates and renews them when needed. For single node Kubernetes
+clusters, setting [](#opt-services.kubernetes.easyCerts)
+= true is sufficient and no further action is required. For joining extra node
+machines to an existing cluster on the other hand, establishing initial
+trust is mandatory.
+
+To add new nodes to the cluster: On any (non-master) cluster node where
+[](#opt-services.kubernetes.easyCerts)
+is enabled, the helper script `nixos-kubernetes-node-join` is available on PATH.
+Given a token on stdin, it will copy the token to the kubernetes secrets directory
+and restart the certmgr service. As requested certificates are issued, the
+script will restart kubernetes cluster components as needed for them to
+pick up new keypairs.
+
+::: {.note}
+Multi-master (HA) clusters are not supported by the easyCerts module.
+:::
+
+In order to interact with an RBAC-enabled cluster as an administrator,
+one needs to have cluster-admin privileges. By default, when easyCerts
+is enabled, a cluster-admin kubeconfig file is generated and linked into
+`/etc/kubernetes/cluster-admin.kubeconfig` as determined by
+[](#opt-services.kubernetes.pki.etcClusterAdminKubeconfig).
+`export KUBECONFIG=/etc/kubernetes/cluster-admin.kubeconfig` will make
+kubectl use this kubeconfig to access and authenticate the cluster. The
+cluster-admin kubeconfig references an auto-generated keypair owned by
+root. Thus, only root on the kubernetes master may obtain cluster-admin
+rights by means of this file.
diff --git a/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md b/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md
new file mode 100644
index 000000000000..f5bce99dd1bb
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/linux-kernel.chapter.md
@@ -0,0 +1,180 @@
+# Linux Kernel {#sec-kernel-config}
+
+You can override the Linux kernel and associated packages using the
+option `boot.kernelPackages`. For instance, this selects the Linux 3.10
+kernel:
+
+```nix
+boot.kernelPackages = pkgs.linuxKernel.packages.linux_3_10;
+```
+
+Note that this not only replaces the kernel, but also packages that are
+specific to the kernel version, such as the NVIDIA video drivers. This
+ensures that driver packages are consistent with the kernel.
+
+While `pkgs.linuxKernel.packages` contains all available kernel packages,
+you may want to use one of the unversioned `pkgs.linuxPackages_*` aliases
+such as `pkgs.linuxPackages_latest`, that are kept up to date with new
+versions.
+
+Please note that the current convention in NixOS is to only keep actively
+maintained kernel versions on both unstable and the currently supported stable
+release(s) of NixOS. This means that a non-longterm kernel will be removed after it's
+abandoned by the kernel developers, even on stable NixOS versions. If you
+pin your kernel onto a non-longterm version, expect your evaluation to fail as
+soon as the version is out of maintenance.
+
+Longterm versions of kernels will be removed before the next stable NixOS that will
+exceed the maintenance period of the kernel version.
+
+The default Linux kernel configuration should be fine for most users.
+You can see the configuration of your current kernel with the following
+command:
+
+```ShellSession
+zcat /proc/config.gz
+```
+
+If you want to change the kernel configuration, you can use the
+`packageOverrides` feature (see [](#sec-customising-packages)). For
+instance, to enable support for the kernel debugger KGDB:
+
+```nix
+nixpkgs.config.packageOverrides = pkgs: pkgs.lib.recursiveUpdate pkgs {
+  linuxKernel.kernels.linux_5_10 = pkgs.linuxKernel.kernels.linux_5_10.override {
+    extraConfig = ''
+      KGDB y
+    '';
+  };
+};
+```
+
+`extraConfig` takes a list of Linux kernel configuration options, one
+per line. The name of the option should not include the prefix
+`CONFIG_`. The option value is typically `y`, `n` or `m` (to build
+something as a kernel module).
+
+Kernel modules for hardware devices are generally loaded automatically
+by `udev`. You can force a module to be loaded via
+[](#opt-boot.kernelModules), e.g.
+
+```nix
+boot.kernelModules = [ "fuse" "kvm-intel" "coretemp" ];
+```
+
+If the module is required early during the boot (e.g. to mount the root
+file system), you can use [](#opt-boot.initrd.kernelModules):
+
+```nix
+boot.initrd.kernelModules = [ "cifs" ];
+```
+
+This causes the specified modules and their dependencies to be added to
+the initial ramdisk.
+
+Kernel runtime parameters can be set through
+[](#opt-boot.kernel.sysctl), e.g.
+
+```nix
+boot.kernel.sysctl."net.ipv4.tcp_keepalive_time" = 120;
+```
+
+sets the kernel's TCP keepalive time to 120 seconds. To see the
+available parameters, run `sysctl -a`.
+
+## Building a custom kernel {#sec-linux-config-customizing}
+
+You can customize the default kernel configuration by overriding the arguments for your kernel package:
+
+```nix
+pkgs.linux_latest.override {
+  ignoreConfigErrors = true;
+  autoModules = false;
+  kernelPreferBuiltin = true;
+  extraStructuredConfig = with lib.kernel; {
+    DEBUG_KERNEL = yes;
+    FRAME_POINTER = yes;
+    KGDB = yes;
+    KGDB_SERIAL_CONSOLE = yes;
+    DEBUG_INFO = yes;
+  };
+}
+```
+
+See `pkgs/os-specific/linux/kernel/generic.nix` for details on how these arguments
+affect the generated configuration. You can also build a custom version of Linux by calling
+`pkgs.buildLinux` directly, which requires the `src` and `version` arguments to be specified.
+
+To use your custom kernel package in your NixOS configuration, set
+
+```nix
+boot.kernelPackages = pkgs.linuxPackagesFor yourCustomKernel;
+```
+
+Note that this method will use the common configuration defined in `pkgs/os-specific/linux/kernel/common-config.nix`,
+which is suitable for a NixOS system.
+
+If you already have a generated configuration file, you can build a kernel that uses it with `pkgs.linuxManualConfig`:
+
+```nix
+let
+  baseKernel = pkgs.linux_latest;
+in pkgs.linuxManualConfig {
+  inherit (baseKernel) src modDirVersion;
+  version = "${baseKernel.version}-custom";
+  configfile = ./my_kernel_config;
+  allowImportFromDerivation = true;
+}
+```
+
+::: {.note}
+The build will fail if `modDirVersion` does not match the source's `kernel.release` file,
+so `modDirVersion` should remain tied to `src`.
+:::
+
+To edit the `.config` file for Linux X.Y, proceed as follows:
+
+```ShellSession
+$ nix-shell '<nixpkgs>' -A linuxKernel.kernels.linux_X_Y.configEnv
+$ unpackPhase
+$ cd linux-*
+$ make nconfig
+```
+
+## Developing kernel modules {#sec-linux-config-developing-modules}
+
+When developing kernel modules it's often convenient to run
+edit-compile-run loop as quickly as possible. See below snippet as an
+example of developing `mellanox` drivers.
+
+```ShellSession
+$ nix-build '<nixpkgs>' -A linuxPackages.kernel.dev
+$ nix-shell '<nixpkgs>' -A linuxPackages.kernel
+$ unpackPhase
+$ cd linux-*
+$ make -C $dev/lib/modules/*/build M=$(pwd)/drivers/net/ethernet/mellanox modules
+# insmod ./drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.ko
+```
+
+## ZFS {#sec-linux-zfs}
+
+It's a common issue that the latest stable version of ZFS doesn't support the latest
+available Linux kernel. It is recommended to use the latest available LTS that's compatible
+with ZFS. Usually this is the default kernel provided by nixpkgs (i.e. `pkgs.linuxPackages`).
+
+Alternatively, it's possible to pin the system to the latest available kernel
+version *that is supported by ZFS* like this:
+
+```nix
+{
+  boot.kernelPackages = pkgs.zfs.latestCompatibleLinuxPackages;
+}
+```
+
+Please note that the version this attribute points to isn't monotonic because the latest kernel
+version only refers to kernel versions supported by the Linux developers. In other words,
+the latest kernel version that ZFS is compatible with may decrease over time.
+
+An example: the latest version ZFS is compatible with is 5.19 which is a non-longterm version. When 5.19
+is out of maintenance, the latest supported kernel version is 5.15 because it's longterm and the versions
+5.16, 5.17 and 5.18 are already out of maintenance because they're non-longterm.
diff --git a/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md
new file mode 100644
index 000000000000..b5d0407d1659
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/luks-file-systems.section.md
@@ -0,0 +1,77 @@
+# LUKS-Encrypted File Systems {#sec-luks-file-systems}
+
+NixOS supports file systems that are encrypted using *LUKS* (Linux
+Unified Key Setup). For example, here is how you create an encrypted
+Ext4 file system on the device
+`/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d`:
+
+```ShellSession
+# cryptsetup luksFormat /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d
+
+WARNING!
+========
+This will overwrite data on /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d irrevocably.
+
+Are you sure? (Type uppercase yes): YES
+Enter LUKS passphrase: ***
+Verify passphrase: ***
+
+# cryptsetup luksOpen /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d crypted
+Enter passphrase for /dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d: ***
+
+# mkfs.ext4 /dev/mapper/crypted
+```
+
+The LUKS volume should be automatically picked up by
+`nixos-generate-config`, but you might want to verify that your
+`hardware-configuration.nix` looks correct. To manually ensure that the
+system is automatically mounted at boot time as `/`, add the following
+to `configuration.nix`:
+
+```nix
+boot.initrd.luks.devices.crypted.device = "/dev/disk/by-uuid/3f6b0024-3a44-4fde-a43a-767b872abe5d";
+fileSystems."/".device = "/dev/mapper/crypted";
+```
+
+Should grub be used as bootloader, and `/boot` is located on an
+encrypted partition, it is necessary to add the following grub option:
+
+```nix
+boot.loader.grub.enableCryptodisk = true;
+```
+
+## FIDO2 {#sec-luks-file-systems-fido2}
+
+NixOS also supports unlocking your LUKS-Encrypted file system using a
+FIDO2 compatible token. In the following example, we will create a new
+FIDO2 credential and add it as a new key to our existing device
+`/dev/sda2`:
+
+```ShellSession
+# export FIDO2_LABEL="/dev/sda2 @ $HOSTNAME"
+# fido2luks credential "$FIDO2_LABEL"
+f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7
+
+# fido2luks -i add-key /dev/sda2 f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7
+Password:
+Password (again):
+Old password:
+Old password (again):
+Added to key to device /dev/sda2, slot: 2
+```
+
+To ensure that this file system is decrypted using the FIDO2 compatible
+key, add the following to `configuration.nix`:
+
+```nix
+boot.initrd.luks.fido2Support = true;
+boot.initrd.luks.devices."/dev/sda2".fido2.credential = "f1d00200108b9d6e849a8b388da457688e3dd653b4e53770012d8f28e5d3b269865038c346802f36f3da7278b13ad6a3bb6a1452e24ebeeaa24ba40eef559b1b287d2a2f80b7";
+```
+
+You can also use the FIDO2 passwordless setup, but for security reasons,
+you might want to enable it only when your device is PIN protected, such
+as [Trezor](https://trezor.io/).
+
+```nix
+boot.initrd.luks.devices."/dev/sda2".fido2.passwordLess = true;
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/modularity.section.md b/nixpkgs/nixos/doc/manual/configuration/modularity.section.md
new file mode 100644
index 000000000000..f4a566d66973
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/modularity.section.md
@@ -0,0 +1,133 @@
+# Modularity {#sec-modularity}
+
+The NixOS configuration mechanism is modular. If your
+`configuration.nix` becomes too big, you can split it into multiple
+files. Likewise, if you have multiple NixOS configurations (e.g. for
+different computers) with some commonality, you can move the common
+configuration into a shared file.
+
+Modules have exactly the same syntax as `configuration.nix`. In fact,
+`configuration.nix` is itself a module. You can use other modules by
+including them from `configuration.nix`, e.g.:
+
+```nix
+{ config, pkgs, ... }:
+
+{ imports = [ ./vpn.nix ./kde.nix ];
+  services.httpd.enable = true;
+  environment.systemPackages = [ pkgs.emacs ];
+  ...
+}
+```
+
+Here, we include two modules from the same directory, `vpn.nix` and
+`kde.nix`. The latter might look like this:
+
+```nix
+{ config, pkgs, ... }:
+
+{ services.xserver.enable = true;
+  services.xserver.displayManager.sddm.enable = true;
+  services.xserver.desktopManager.plasma5.enable = true;
+  environment.systemPackages = [ pkgs.vim ];
+}
+```
+
+Note that both `configuration.nix` and `kde.nix` define the option
+[](#opt-environment.systemPackages). When multiple modules define an
+option, NixOS will try to *merge* the definitions. In the case of
+[](#opt-environment.systemPackages) the lists of packages will be
+concatenated. The value in `configuration.nix` is
+merged last, so for list-type options, it will appear at the end of the
+merged list. If you want it to appear first, you can use `mkBefore`:
+
+```nix
+boot.kernelModules = mkBefore [ "kvm-intel" ];
+```
+
+This causes the `kvm-intel` kernel module to be loaded before any other
+kernel modules.
+
+For other types of options, a merge may not be possible. For instance,
+if two modules define [](#opt-services.httpd.adminAddr),
+`nixos-rebuild` will give an error:
+
+```plain
+The unique option `services.httpd.adminAddr' is defined multiple times, in `/etc/nixos/httpd.nix' and `/etc/nixos/configuration.nix'.
+```
+
+When that happens, it's possible to force one definition take precedence
+over the others:
+
+```nix
+services.httpd.adminAddr = pkgs.lib.mkForce "bob@example.org";
+```
+
+When using multiple modules, you may need to access configuration values
+defined in other modules. This is what the `config` function argument is
+for: it contains the complete, merged system configuration. That is,
+`config` is the result of combining the configurations returned by every
+module. (If you're wondering how it's possible that the (indirect) *result*
+of a function is passed as an *input* to that same function: that's
+because Nix is a "lazy" language --- it only computes values when
+they are needed. This works as long as no individual configuration
+value depends on itself.)
+
+For example, here is a module that adds some packages to
+[](#opt-environment.systemPackages) only if
+[](#opt-services.xserver.enable) is set to `true` somewhere else:
+
+```nix
+{ config, pkgs, ... }:
+
+{ environment.systemPackages =
+    if config.services.xserver.enable then
+      [ pkgs.firefox
+        pkgs.thunderbird
+      ]
+    else
+      [ ];
+}
+```
+
+With multiple modules, it may not be obvious what the final value of a
+configuration option is. The command `nixos-option` allows you to find
+out:
+
+```ShellSession
+$ nixos-option services.xserver.enable
+true
+
+$ nixos-option boot.kernelModules
+[ "tun" "ipv6" "loop" ... ]
+```
+
+Interactive exploration of the configuration is possible using `nix
+  repl`, a read-eval-print loop for Nix expressions. A typical use:
+
+```ShellSession
+$ nix repl '<nixpkgs/nixos>'
+
+nix-repl> config.networking.hostName
+"mandark"
+
+nix-repl> map (x: x.hostName) config.services.httpd.virtualHosts
+[ "example.org" "example.gov" ]
+```
+
+While abstracting your configuration, you may find it useful to generate
+modules using code, instead of writing files. The example below would
+have the same effect as importing a file which sets those options.
+
+```nix
+{ config, pkgs, ... }:
+
+let netConfig = hostName: {
+  networking.hostName = hostName;
+  networking.useDHCP = false;
+};
+
+in
+
+{ imports = [ (netConfig "nixos.localdomain") ]; }
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md b/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md
new file mode 100644
index 000000000000..4bda21d34a10
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/network-manager.section.md
@@ -0,0 +1,42 @@
+# NetworkManager {#sec-networkmanager}
+
+To facilitate network configuration, some desktop environments use
+NetworkManager. You can enable NetworkManager by setting:
+
+```nix
+networking.networkmanager.enable = true;
+```
+
+some desktop managers (e.g., GNOME) enable NetworkManager automatically
+for you.
+
+All users that should have permission to change network settings must
+belong to the `networkmanager` group:
+
+```nix
+users.users.alice.extraGroups = [ "networkmanager" ];
+```
+
+NetworkManager is controlled using either `nmcli` or `nmtui`
+(curses-based terminal user interface). See their manual pages for
+details on their usage. Some desktop environments (GNOME, KDE) have
+their own configuration tools for NetworkManager. On XFCE, there is no
+configuration tool for NetworkManager by default: by enabling
+[](#opt-programs.nm-applet.enable), the graphical applet will be
+installed and will launch automatically when the graphical session is
+started.
+
+::: {.note}
+`networking.networkmanager` and `networking.wireless` (WPA Supplicant)
+can be used together if desired. To do this you need to instruct
+NetworkManager to ignore those interfaces like:
+
+```nix
+networking.networkmanager.unmanaged = [
+   "*" "except:type:wwan" "except:type:gsm"
+];
+```
+
+Refer to the option description for the exact syntax and references to
+external documentation.
+:::
diff --git a/nixpkgs/nixos/doc/manual/configuration/networking.chapter.md b/nixpkgs/nixos/doc/manual/configuration/networking.chapter.md
new file mode 100644
index 000000000000..abbd9766f173
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/networking.chapter.md
@@ -0,0 +1,16 @@
+# Networking {#sec-networking}
+
+This section describes how to configure networking components
+on your NixOS machine.
+
+```{=include=} sections
+network-manager.section.md
+ssh.section.md
+ipv4-config.section.md
+ipv6-config.section.md
+firewall.section.md
+wireless.section.md
+ad-hoc-network-config.section.md
+renaming-interfaces.section.md
+```
+<!-- TODO: OpenVPN, NAT -->
diff --git a/nixpkgs/nixos/doc/manual/configuration/package-mgmt.chapter.md b/nixpkgs/nixos/doc/manual/configuration/package-mgmt.chapter.md
new file mode 100644
index 000000000000..1148bbe84740
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/package-mgmt.chapter.md
@@ -0,0 +1,18 @@
+# Package Management {#sec-package-management}
+
+This section describes how to add additional packages to your system.
+NixOS has two distinct styles of package management:
+
+-   *Declarative*, where you declare what packages you want in your
+    `configuration.nix`. Every time you run `nixos-rebuild`, NixOS will
+    ensure that you get a consistent set of binaries corresponding to
+    your specification.
+
+-   *Ad hoc*, where you install, upgrade and uninstall packages via the
+    `nix-env` command. This style allows mixing packages from different
+    Nixpkgs versions. It's the only choice for non-root users.
+
+```{=include=} sections
+declarative-packages.section.md
+ad-hoc-packages.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md b/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md
new file mode 100644
index 000000000000..9f1f48f742ac
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles.chapter.md
@@ -0,0 +1,34 @@
+# Profiles {#ch-profiles}
+
+In some cases, it may be desirable to take advantage of commonly-used,
+predefined configurations provided by nixpkgs, but different from those
+that come as default. This is a role fulfilled by NixOS's Profiles,
+which come as files living in `<nixpkgs/nixos/modules/profiles>`. That
+is to say, expected usage is to add them to the imports list of your
+`/etc/configuration.nix` as such:
+
+```nix
+imports = [
+  <nixpkgs/nixos/modules/profiles/profile-name.nix>
+];
+```
+
+Even if some of these profiles seem only useful in the context of
+install media, many are actually intended to be used in real installs.
+
+What follows is a brief explanation on the purpose and use-case for each
+profile. Detailing each option configured by each one is out of scope.
+
+```{=include=} sections
+profiles/all-hardware.section.md
+profiles/base.section.md
+profiles/clone-config.section.md
+profiles/demo.section.md
+profiles/docker-container.section.md
+profiles/graphical.section.md
+profiles/hardened.section.md
+profiles/headless.section.md
+profiles/installation-device.section.md
+profiles/minimal.section.md
+profiles/qemu-guest.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/all-hardware.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/all-hardware.section.md
new file mode 100644
index 000000000000..e2dd7c76089c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/all-hardware.section.md
@@ -0,0 +1,11 @@
+# All Hardware {#sec-profile-all-hardware}
+
+Enables all hardware supported by NixOS: i.e., all firmware is included, and
+all devices from which one may boot are enabled in the initrd. Its primary
+use is in the NixOS installation CDs.
+
+The enabled kernel modules include support for SATA and PATA, SCSI
+(partially), USB, Firewire (untested), Virtio (QEMU, KVM, etc.), VMware, and
+Hyper-V. Additionally, [](#opt-hardware.enableAllFirmware) is
+enabled, and the firmware for the ZyDAS ZD1211 chipset is specifically
+installed.
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/base.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/base.section.md
new file mode 100644
index 000000000000..59b3068fda32
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/base.section.md
@@ -0,0 +1,7 @@
+# Base {#sec-profile-base}
+
+Defines the software packages included in the "minimal" installation CD. It
+installs several utilities useful in a simple recovery or install media, such
+as a text-mode web browser, and tools for manipulating block devices,
+networking, hardware diagnostics, and filesystems (with their respective
+kernel modules).
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/clone-config.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/clone-config.section.md
new file mode 100644
index 000000000000..e2583715e517
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/clone-config.section.md
@@ -0,0 +1,11 @@
+# Clone Config {#sec-profile-clone-config}
+
+This profile is used in installer images. It provides an editable
+configuration.nix that imports all the modules that were also used when
+creating the image in the first place. As a result it allows users to edit
+and rebuild the live-system.
+
+On images where the installation media also becomes an installation target,
+copying over `configuration.nix` should be disabled by
+setting `installer.cloneConfig` to `false`.
+For example, this is done in `sd-image-aarch64-installer.nix`.
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/demo.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/demo.section.md
new file mode 100644
index 000000000000..0a0df483c123
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/demo.section.md
@@ -0,0 +1,4 @@
+# Demo {#sec-profile-demo}
+
+This profile just enables a `demo` user, with password `demo`, uid `1000`, `wheel` group and
+[autologin in the SDDM display manager](#opt-services.xserver.displayManager.autoLogin).
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/docker-container.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/docker-container.section.md
new file mode 100644
index 000000000000..f3e29b92f5e6
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/docker-container.section.md
@@ -0,0 +1,7 @@
+# Docker Container {#sec-profile-docker-container}
+
+This is the profile from which the Docker images are generated. It prepares a
+working system by importing the [Minimal](#sec-profile-minimal) and
+[Clone Config](#sec-profile-clone-config) profiles, and
+setting appropriate configuration options that are useful inside a container
+context, like [](#opt-boot.isContainer).
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/graphical.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/graphical.section.md
new file mode 100644
index 000000000000..aaea5c8c0288
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/graphical.section.md
@@ -0,0 +1,10 @@
+# Graphical {#sec-profile-graphical}
+
+Defines a NixOS configuration with the Plasma 5 desktop. It's used by the
+graphical installation CD.
+
+It sets [](#opt-services.xserver.enable),
+[](#opt-services.xserver.displayManager.sddm.enable),
+[](#opt-services.xserver.desktopManager.plasma5.enable),
+and [](#opt-services.xserver.libinput.enable) to true. It also
+includes glxinfo and firefox in the system packages list.
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/hardened.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/hardened.section.md
new file mode 100644
index 000000000000..2e9bb196c054
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/hardened.section.md
@@ -0,0 +1,20 @@
+# Hardened {#sec-profile-hardened}
+
+A profile with most (vanilla) hardening options enabled by default,
+potentially at the cost of stability, features and performance.
+
+This includes a hardened kernel, and limiting the system information
+available to processes through the `/sys` and
+`/proc` filesystems. It also disables the User Namespaces
+feature of the kernel, which stops Nix from being able to build anything
+(this particular setting can be overridden via
+[](#opt-security.allowUserNamespaces)). See the
+[profile source](https://github.com/nixos/nixpkgs/tree/master/nixos/modules/profiles/hardened.nix)
+for further detail on which settings are altered.
+
+::: {.warning}
+This profile enables options that are known to affect system
+stability. If you experience any stability issues when using the
+profile, try disabling it. If you report an issue and use this
+profile, always mention that you do.
+:::
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/headless.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/headless.section.md
new file mode 100644
index 000000000000..d185a9a774b7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/headless.section.md
@@ -0,0 +1,9 @@
+# Headless {#sec-profile-headless}
+
+Common configuration for headless machines (e.g., Amazon EC2 instances).
+
+Disables [sound](#opt-sound.enable),
+[vesa](#opt-boot.vesa), serial consoles,
+[emergency mode](#opt-systemd.enableEmergencyMode),
+[grub splash images](#opt-boot.loader.grub.splashImage)
+and configures the kernel to reboot automatically on panic.
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/installation-device.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/installation-device.section.md
new file mode 100644
index 000000000000..ae9f8fa7757f
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/installation-device.section.md
@@ -0,0 +1,24 @@
+# Installation Device {#sec-profile-installation-device}
+
+Provides a basic configuration for installation devices like CDs.
+This enables redistributable firmware, includes the
+[Clone Config profile](#sec-profile-clone-config)
+and a copy of the Nixpkgs channel, so `nixos-install`
+works out of the box.
+
+Documentation for [Nixpkgs](#opt-documentation.enable)
+and [NixOS](#opt-documentation.nixos.enable) are
+forcefully enabled (to override the
+[Minimal profile](#sec-profile-minimal) preference); the
+NixOS manual is shown automatically on TTY 8, udisks is disabled.
+Autologin is enabled as `nixos` user, while passwordless
+login as both `root` and `nixos` is possible.
+Passwordless `sudo` is enabled too.
+[wpa_supplicant](#opt-networking.wireless.enable) is
+enabled, but configured to not autostart.
+
+It is explained how to login, start the ssh server, and if available,
+how to start the display manager.
+
+Several settings are tweaked so that the installer has a better chance of
+succeeding under low-memory environments.
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/minimal.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/minimal.section.md
new file mode 100644
index 000000000000..02a3b65ae422
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/minimal.section.md
@@ -0,0 +1,9 @@
+# Minimal {#sec-profile-minimal}
+
+This profile defines a small NixOS configuration. It does not contain any
+graphical stuff. It's a very short file that enables
+[noXlibs](#opt-environment.noXlibs), sets
+[](#opt-i18n.supportedLocales) to
+only support the user-selected locale,
+[disables packages' documentation](#opt-documentation.enable),
+and [disables sound](#opt-sound.enable).
diff --git a/nixpkgs/nixos/doc/manual/configuration/profiles/qemu-guest.section.md b/nixpkgs/nixos/doc/manual/configuration/profiles/qemu-guest.section.md
new file mode 100644
index 000000000000..d7e3cae9cb0f
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/profiles/qemu-guest.section.md
@@ -0,0 +1,7 @@
+# QEMU Guest {#sec-profile-qemu-guest}
+
+This profile contains common configuration for virtual machines running under
+QEMU (using virtio).
+
+It makes virtio modules available on the initrd and sets the system time from
+the hardware clock to work around a bug in qemu-kvm.
diff --git a/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md b/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md
new file mode 100644
index 000000000000..5b515e9f82a0
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/renaming-interfaces.section.md
@@ -0,0 +1,51 @@
+# Renaming network interfaces {#sec-rename-ifs}
+
+NixOS uses the udev [predictable naming
+scheme](https://systemd.io/PREDICTABLE_INTERFACE_NAMES/) to assign names
+to network interfaces. This means that by default cards are not given
+the traditional names like `eth0` or `eth1`, whose order can change
+unpredictably across reboots. Instead, relying on physical locations and
+firmware information, the scheme produces names like `ens1`, `enp2s0`,
+etc.
+
+These names are predictable but less memorable and not necessarily
+stable: for example installing new hardware or changing firmware
+settings can result in a [name
+change](https://github.com/systemd/systemd/issues/3715#issue-165347602).
+If this is undesirable, for example if you have a single ethernet card,
+you can revert to the traditional scheme by setting
+[](#opt-networking.usePredictableInterfaceNames)
+to `false`.
+
+## Assigning custom names {#sec-custom-ifnames}
+
+In case there are multiple interfaces of the same type, it's better to
+assign custom names based on the device hardware address. For example,
+we assign the name `wan` to the interface with MAC address
+`52:54:00:12:01:01` using a netword link unit:
+
+```nix
+systemd.network.links."10-wan" = {
+  matchConfig.PermanentMACAddress = "52:54:00:12:01:01";
+  linkConfig.Name = "wan";
+};
+```
+
+Note that links are directly read by udev, *not networkd*, and will work
+even if networkd is disabled.
+
+Alternatively, we can use a plain old udev rule:
+
+```nix
+boot.initrd.services.udev.rules = ''
+  SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", \
+  ATTR{address}=="52:54:00:12:01:01", KERNEL=="eth*", NAME="wan"
+'';
+```
+
+::: {.warning}
+The rule must be installed in the initrd using
+`boot.initrd.services.udev.rules`, not the usual `services.udev.extraRules`
+option. This is to avoid race conditions with other programs controlling
+the interface.
+:::
diff --git a/nixpkgs/nixos/doc/manual/configuration/ssh.section.md b/nixpkgs/nixos/doc/manual/configuration/ssh.section.md
new file mode 100644
index 000000000000..9e239a848178
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/ssh.section.md
@@ -0,0 +1,19 @@
+# Secure Shell Access {#sec-ssh}
+
+Secure shell (SSH) access to your machine can be enabled by setting:
+
+```nix
+services.openssh.enable = true;
+```
+
+By default, root logins using a password are disallowed. They can be
+disabled entirely by setting
+[](#opt-services.openssh.settings.PermitRootLogin) to `"no"`.
+
+You can declaratively specify authorised RSA/DSA public keys for a user
+as follows:
+
+```nix
+users.users.alice.openssh.authorizedKeys.keys =
+  [ "ssh-dss AAAAB3NzaC1kc3MAAACBAPIkGWVEt4..." ];
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/sshfs-file-systems.section.md b/nixpkgs/nixos/doc/manual/configuration/sshfs-file-systems.section.md
new file mode 100644
index 000000000000..d8c9dea6c337
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/sshfs-file-systems.section.md
@@ -0,0 +1,104 @@
+# SSHFS File Systems {#sec-sshfs-file-systems}
+
+[SSHFS][sshfs] is a [FUSE][fuse] filesystem that allows easy access to directories on a remote machine using the SSH File Transfer Protocol (SFTP).
+It means that if you have SSH access to a machine, no additional setup is needed to mount a directory.
+
+[sshfs]: https://github.com/libfuse/sshfs
+[fuse]: https://en.wikipedia.org/wiki/Filesystem_in_Userspace
+
+## Interactive mounting {#sec-sshfs-interactive}
+
+In NixOS, SSHFS is packaged as `sshfs`.
+Once installed, mounting a directory interactively is simple as running:
+```ShellSession
+$ sshfs my-user@example.com:/my-dir /mnt/my-dir
+```
+Like any other FUSE file system, the directory is unmounted using:
+```ShellSession
+$ fusermount -u /mnt/my-dir
+```
+
+## Non-interactive mounting {#sec-sshfs-non-interactive}
+
+Mounting non-interactively requires some precautions because `sshfs` will run at boot and under a different user (root).
+For obvious reason, you can't input a password, so public key authentication using an unencrypted key is needed.
+To create a new key without a passphrase you can do:
+```ShellSession
+$ ssh-keygen -t ed25519 -P '' -f example-key
+Generating public/private ed25519 key pair.
+Your identification has been saved in test-key
+Your public key has been saved in test-key.pub
+The key fingerprint is:
+SHA256:yjxl3UbTn31fLWeyLYTAKYJPRmzknjQZoyG8gSNEoIE my-user@workstation
+```
+To keep the key safe, change the ownership to `root:root` and make sure the permissions are `600`:
+OpenSSH normally refuses to use the key if it's not well-protected.
+
+The file system can be configured in NixOS via the usual [fileSystems](#opt-fileSystems) option.
+Here's a typical setup:
+```nix
+{
+  system.fsPackages = [ pkgs.sshfs ];
+
+  fileSystems."/mnt/my-dir" = {
+    device = "my-user@example.com:/my-dir/";
+    fsType = "sshfs";
+    options =
+      [ # Filesystem options
+        "allow_other"          # for non-root access
+        "_netdev"              # this is a network fs
+        "x-systemd.automount"  # mount on demand
+
+        # SSH options
+        "reconnect"              # handle connection drops
+        "ServerAliveInterval=15" # keep connections alive
+        "IdentityFile=/var/secrets/example-key"
+      ];
+  };
+}
+```
+More options from `ssh_config(5)` can be given as well, for example you can change the default SSH port or specify a jump proxy:
+```nix
+{
+  options =
+    [ "ProxyJump=bastion@example.com"
+      "Port=22"
+    ];
+}
+```
+It's also possible to change the `ssh` command used by SSHFS to connect to the server.
+For example:
+```nix
+{
+  options =
+    [ (builtins.replaceStrings [" "] ["\\040"]
+        "ssh_command=${pkgs.openssh}/bin/ssh -v -L 8080:localhost:80")
+    ];
+
+}
+```
+
+::: {.note}
+The escaping of spaces is needed because every option is written to the `/etc/fstab` file, which is a space-separated table.
+:::
+
+### Troubleshooting {#sec-sshfs-troubleshooting}
+
+If you're having a hard time figuring out why mounting is failing, you can add the option `"debug"`.
+This enables a verbose log in SSHFS that you can access via:
+```ShellSession
+$ journalctl -u $(systemd-escape -p /mnt/my-dir/).mount
+Jun 22 11:41:18 workstation mount[87790]: SSHFS version 3.7.1
+Jun 22 11:41:18 workstation mount[87793]: executing <ssh> <-x> <-a> <-oClearAllForwardings=yes> <-oServerAliveInterval=15> <-oIdentityFile=/var/secrets/wrong-key> <-2> <my-user@example.com> <-s> <sftp>
+Jun 22 11:41:19 workstation mount[87793]: my-user@example.com: Permission denied (publickey).
+Jun 22 11:41:19 workstation mount[87790]: read: Connection reset by peer
+Jun 22 11:41:19 workstation systemd[1]: mnt-my\x2ddir.mount: Mount process exited, code=exited, status=1/FAILURE
+Jun 22 11:41:19 workstation systemd[1]: mnt-my\x2ddir.mount: Failed with result 'exit-code'.
+Jun 22 11:41:19 workstation systemd[1]: Failed to mount /mnt/my-dir.
+Jun 22 11:41:19 workstation systemd[1]: mnt-my\x2ddir.mount: Consumed 54ms CPU time, received 2.3K IP traffic, sent 2.7K IP traffic.
+```
+
+::: {.note}
+If the mount point contains special characters it needs to be escaped using `systemd-escape`.
+This is due to the way systemd converts paths into unit names.
+:::
diff --git a/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md b/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md
new file mode 100644
index 000000000000..ff870f5c40b9
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/subversion.chapter.md
@@ -0,0 +1,102 @@
+# Subversion {#module-services-subversion}
+
+[Subversion](https://subversion.apache.org/) is a centralized
+version-control system. It can use a [variety of
+protocols](https://svnbook.red-bean.com/en/1.7/svn-book.html#svn.serverconfig.choosing)
+for communication between client and server.
+
+## Subversion inside Apache HTTP {#module-services-subversion-apache-httpd}
+
+This section focuses on configuring a web-based server on top of the
+Apache HTTP server, which uses
+[WebDAV](http://www.webdav.org/)/[DeltaV](http://www.webdav.org/deltav/WWW10/deltav-intro.htm)
+for communication.
+
+For more information on the general setup, please refer to the [the
+appropriate section of the Subversion
+book](https://svnbook.red-bean.com/en/1.7/svn-book.html#svn.serverconfig.httpd).
+
+To configure, include in `/etc/nixos/configuration.nix` code to activate
+Apache HTTP, setting [](#opt-services.httpd.adminAddr)
+appropriately:
+
+```nix
+services.httpd.enable = true;
+services.httpd.adminAddr = ...;
+networking.firewall.allowedTCPPorts = [ 80 443 ];
+```
+
+For a simple Subversion server with basic authentication, configure the
+Subversion module for Apache as follows, setting `hostName` and
+`documentRoot` appropriately, and `SVNParentPath` to the parent
+directory of the repositories, `AuthzSVNAccessFile` to the location of
+the `.authz` file describing access permission, and `AuthUserFile` to
+the password file.
+
+```nix
+services.httpd.extraModules = [
+    # note that order is *super* important here
+    { name = "dav_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_dav_svn.so"; }
+    { name = "authz_svn"; path = "${pkgs.apacheHttpdPackages.subversion}/modules/mod_authz_svn.so"; }
+  ];
+  services.httpd.virtualHosts = {
+    "svn" = {
+       hostName = HOSTNAME;
+       documentRoot = DOCUMENTROOT;
+       locations."/svn".extraConfig = ''
+           DAV svn
+           SVNParentPath REPO_PARENT
+           AuthzSVNAccessFile ACCESS_FILE
+           AuthName "SVN Repositories"
+           AuthType Basic
+           AuthUserFile PASSWORD_FILE
+           Require valid-user
+      '';
+    }
+```
+
+The key `"svn"` is just a symbolic name identifying the virtual host.
+The `"/svn"` in `locations."/svn".extraConfig` is the path underneath
+which the repositories will be served.
+
+[This page](https://wiki.archlinux.org/index.php/Subversion) explains
+how to set up the Subversion configuration itself. This boils down to
+the following:
+
+Underneath `REPO_PARENT` repositories can be set up as follows:
+
+```ShellSession
+$ svn create REPO_NAME
+```
+
+Repository files need to be accessible by `wwwrun`:
+
+```ShellSession
+$ chown -R wwwrun:wwwrun REPO_PARENT
+```
+
+The password file `PASSWORD_FILE` can be created as follows:
+
+```ShellSession
+$ htpasswd -cs PASSWORD_FILE USER_NAME
+```
+
+Additional users can be set up similarly, omitting the `c` flag:
+
+```ShellSession
+$ htpasswd -s PASSWORD_FILE USER_NAME
+```
+
+The file describing access permissions `ACCESS_FILE` will look something
+like the following:
+
+```nix
+[/]
+* = r
+
+[REPO_NAME:/]
+USER_NAME = rw
+```
+
+The Subversion repositories will be accessible as
+`http://HOSTNAME/svn/REPO_NAME`.
diff --git a/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md b/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md
new file mode 100644
index 000000000000..b35b38f6e964
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/user-mgmt.chapter.md
@@ -0,0 +1,91 @@
+# User Management {#sec-user-management}
+
+NixOS supports both declarative and imperative styles of user
+management. In the declarative style, users are specified in
+`configuration.nix`. For instance, the following states that a user
+account named `alice` shall exist:
+
+```nix
+users.users.alice = {
+  isNormalUser = true;
+  home = "/home/alice";
+  description = "Alice Foobar";
+  extraGroups = [ "wheel" "networkmanager" ];
+  openssh.authorizedKeys.keys = [ "ssh-dss AAAAB3Nza... alice@foobar" ];
+};
+```
+
+Note that `alice` is a member of the `wheel` and `networkmanager`
+groups, which allows her to use `sudo` to execute commands as `root` and
+to configure the network, respectively. Also note the SSH public key
+that allows remote logins with the corresponding private key. Users
+created in this way do not have a password by default, so they cannot
+log in via mechanisms that require a password. However, you can use the
+`passwd` program to set a password, which is retained across invocations
+of `nixos-rebuild`.
+
+If you set [](#opt-users.mutableUsers) to
+false, then the contents of `/etc/passwd` and `/etc/group` will be congruent
+to your NixOS configuration. For instance, if you remove a user from
+[](#opt-users.users) and run nixos-rebuild, the user
+account will cease to exist. Also, imperative commands for managing users and
+groups, such as useradd, are no longer available. Passwords may still be
+assigned by setting the user's
+[hashedPassword](#opt-users.users._name_.hashedPassword) option. A
+hashed password can be generated using `mkpasswd`.
+
+A user ID (uid) is assigned automatically. You can also specify a uid
+manually by adding
+
+```nix
+uid = 1000;
+```
+
+to the user specification.
+
+Groups can be specified similarly. The following states that a group
+named `students` shall exist:
+
+```nix
+users.groups.students.gid = 1000;
+```
+
+As with users, the group ID (gid) is optional and will be assigned
+automatically if it's missing.
+
+In the imperative style, users and groups are managed by commands such
+as `useradd`, `groupmod` and so on. For instance, to create a user
+account named `alice`:
+
+```ShellSession
+# useradd -m alice
+```
+
+To make all nix tools available to this new user use \`su - USER\` which
+opens a login shell (==shell that loads the profile) for given user.
+This will create the \~/.nix-defexpr symlink. So run:
+
+```ShellSession
+# su - alice -c "true"
+```
+
+The flag `-m` causes the creation of a home directory for the new user,
+which is generally what you want. The user does not have an initial
+password and therefore cannot log in. A password can be set using the
+`passwd` utility:
+
+```ShellSession
+# passwd alice
+Enter new UNIX password: ***
+Retype new UNIX password: ***
+```
+
+A user can be deleted using `userdel`:
+
+```ShellSession
+# userdel -r alice
+```
+
+The flag `-r` deletes the user's home directory. Accounts can be
+modified using `usermod`. Unix groups can be managed using `groupadd`,
+`groupmod` and `groupdel`.
diff --git a/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md b/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md
new file mode 100644
index 000000000000..0f195bd66567
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/wayland.chapter.md
@@ -0,0 +1,27 @@
+# Wayland {#sec-wayland}
+
+While X11 (see [](#sec-x11)) is still the primary display technology
+on NixOS, Wayland support is steadily improving. Where X11 separates the
+X Server and the window manager, on Wayland those are combined: a
+Wayland Compositor is like an X11 window manager, but also embeds the
+Wayland 'Server' functionality. This means it is sufficient to install
+a Wayland Compositor such as sway without separately enabling a Wayland
+server:
+
+```nix
+programs.sway.enable = true;
+```
+
+This installs the sway compositor along with some essential utilities.
+Now you can start sway from the TTY console.
+
+If you are using a wlroots-based compositor, like sway, and want to be
+able to share your screen, you might want to activate this option:
+
+```nix
+xdg.portal.wlr.enable = true;
+```
+
+and configure Pipewire using
+[](#opt-services.pipewire.enable)
+and related options.
diff --git a/nixpkgs/nixos/doc/manual/configuration/wireless.section.md b/nixpkgs/nixos/doc/manual/configuration/wireless.section.md
new file mode 100644
index 000000000000..3299d2d7ecb8
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/wireless.section.md
@@ -0,0 +1,67 @@
+# Wireless Networks {#sec-wireless}
+
+For a desktop installation using NetworkManager (e.g., GNOME), you just
+have to make sure the user is in the `networkmanager` group and you can
+skip the rest of this section on wireless networks.
+
+NixOS will start wpa_supplicant for you if you enable this setting:
+
+```nix
+networking.wireless.enable = true;
+```
+
+NixOS lets you specify networks for wpa_supplicant declaratively:
+
+```nix
+networking.wireless.networks = {
+  echelon = {                # SSID with no spaces or special characters
+    psk = "abcdefgh";
+  };
+  "echelon's AP" = {         # SSID with spaces and/or special characters
+    psk = "ijklmnop";
+  };
+  echelon = {                # Hidden SSID
+    hidden = true;
+    psk = "qrstuvwx";
+  };
+  free.wifi = {};            # Public wireless network
+};
+```
+
+Be aware that keys will be written to the nix store in plaintext! When
+no networks are set, it will default to using a configuration file at
+`/etc/wpa_supplicant.conf`. You should edit this file yourself to define
+wireless networks, WPA keys and so on (see wpa_supplicant.conf(5)).
+
+If you are using WPA2 you can generate pskRaw key using
+`wpa_passphrase`:
+
+```ShellSession
+$ wpa_passphrase ESSID PSK
+network={
+        ssid="echelon"
+        #psk="abcdefgh"
+        psk=dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435
+}
+```
+
+```nix
+networking.wireless.networks = {
+  echelon = {
+    pskRaw = "dca6d6ed41f4ab5a984c9f55f6f66d4efdc720ebf66959810f4329bb391c5435";
+  };
+};
+```
+
+or you can use it to directly generate the `wpa_supplicant.conf`:
+
+```ShellSession
+# wpa_passphrase ESSID PSK > /etc/wpa_supplicant.conf
+```
+
+After you have edited the `wpa_supplicant.conf`, you need to restart the
+wpa_supplicant service.
+
+```ShellSession
+# systemctl restart wpa_supplicant.service
+```
diff --git a/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md b/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md
new file mode 100644
index 000000000000..0451e4d25265
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/x-windows.chapter.md
@@ -0,0 +1,336 @@
+# X Window System {#sec-x11}
+
+The X Window System (X11) provides the basis of NixOS' graphical user
+interface. It can be enabled as follows:
+
+```nix
+services.xserver.enable = true;
+```
+
+The X server will automatically detect and use the appropriate video
+driver from a set of X.org drivers (such as `vesa` and `intel`). You can
+also specify a driver manually, e.g.
+
+```nix
+services.xserver.videoDrivers = [ "r128" ];
+```
+
+to enable X.org's `xf86-video-r128` driver.
+
+You also need to enable at least one desktop or window manager.
+Otherwise, you can only log into a plain undecorated `xterm` window.
+Thus you should pick one or more of the following lines:
+
+```nix
+services.xserver.desktopManager.plasma5.enable = true;
+services.xserver.desktopManager.xfce.enable = true;
+services.xserver.desktopManager.gnome.enable = true;
+services.xserver.desktopManager.mate.enable = true;
+services.xserver.windowManager.xmonad.enable = true;
+services.xserver.windowManager.twm.enable = true;
+services.xserver.windowManager.icewm.enable = true;
+services.xserver.windowManager.i3.enable = true;
+services.xserver.windowManager.herbstluftwm.enable = true;
+```
+
+NixOS's default *display manager* (the program that provides a graphical
+login prompt and manages the X server) is LightDM. You can select an
+alternative one by picking one of the following lines:
+
+```nix
+services.xserver.displayManager.sddm.enable = true;
+services.xserver.displayManager.gdm.enable = true;
+```
+
+You can set the keyboard layout (and optionally the layout variant):
+
+```nix
+services.xserver.xkb.layout = "de";
+services.xserver.xkb.variant = "neo";
+```
+
+The X server is started automatically at boot time. If you don't want
+this to happen, you can set:
+
+```nix
+services.xserver.autorun = false;
+```
+
+The X server can then be started manually:
+
+```ShellSession
+# systemctl start display-manager.service
+```
+
+On 64-bit systems, if you want OpenGL for 32-bit programs such as in
+Wine, you should also set the following:
+
+```nix
+hardware.opengl.driSupport32Bit = true;
+```
+
+## Auto-login {#sec-x11-auto-login}
+
+The x11 login screen can be skipped entirely, automatically logging you
+into your window manager and desktop environment when you boot your
+computer.
+
+This is especially helpful if you have disk encryption enabled. Since
+you already have to provide a password to decrypt your disk, entering a
+second password to login can be redundant.
+
+To enable auto-login, you need to define your default window manager and
+desktop environment. If you wanted no desktop environment and i3 as your
+your window manager, you'd define:
+
+```nix
+services.xserver.displayManager.defaultSession = "none+i3";
+```
+
+Every display manager in NixOS supports auto-login, here is an example
+using lightdm for a user `alice`:
+
+```nix
+services.xserver.displayManager.lightdm.enable = true;
+services.xserver.displayManager.autoLogin.enable = true;
+services.xserver.displayManager.autoLogin.user = "alice";
+```
+
+## Intel Graphics drivers {#sec-x11--graphics-cards-intel}
+
+There are two choices for Intel Graphics drivers in X.org: `modesetting`
+(included in the xorg-server itself) and `intel` (provided by the
+package xf86-video-intel).
+
+The default and recommended is `modesetting`. It is a generic driver
+which uses the kernel [mode
+setting](https://en.wikipedia.org/wiki/Mode_setting) (KMS) mechanism. It
+supports Glamor (2D graphics acceleration via OpenGL) and is actively
+maintained but may perform worse in some cases (like in old chipsets).
+
+The second driver, `intel`, is specific to Intel GPUs, but not
+recommended by most distributions: it lacks several modern features (for
+example, it doesn't support Glamor) and the package hasn't been
+officially updated since 2015.
+
+The results vary depending on the hardware, so you may have to try both
+drivers. Use the option
+[](#opt-services.xserver.videoDrivers)
+to set one. The recommended configuration for modern systems is:
+
+```nix
+services.xserver.videoDrivers = [ "modesetting" ];
+```
+
+If you experience screen tearing no matter what, this configuration was
+reported to resolve the issue:
+
+```nix
+services.xserver.videoDrivers = [ "intel" ];
+services.xserver.deviceSection = ''
+  Option "DRI" "2"
+  Option "TearFree" "true"
+'';
+```
+
+Note that this will likely downgrade the performance compared to
+`modesetting` or `intel` with DRI 3 (default).
+
+## Proprietary NVIDIA drivers {#sec-x11-graphics-cards-nvidia}
+
+NVIDIA provides a proprietary driver for its graphics cards that has
+better 3D performance than the X.org drivers. It is not enabled by
+default because it's not free software. You can enable it as follows:
+
+```nix
+services.xserver.videoDrivers = [ "nvidia" ];
+```
+
+Or if you have an older card, you may have to use one of the legacy
+drivers:
+
+```nix
+services.xserver.videoDrivers = [ "nvidiaLegacy390" ];
+services.xserver.videoDrivers = [ "nvidiaLegacy340" ];
+services.xserver.videoDrivers = [ "nvidiaLegacy304" ];
+```
+
+You may need to reboot after enabling this driver to prevent a clash
+with other kernel modules.
+
+## Proprietary AMD drivers {#sec-x11--graphics-cards-amd}
+
+AMD provides a proprietary driver for its graphics cards that is not
+enabled by default because it's not Free Software, is often broken in
+nixpkgs and as of this writing doesn't offer more features or
+performance. If you still want to use it anyway, you need to explicitly
+set:
+
+```nix
+services.xserver.videoDrivers = [ "amdgpu-pro" ];
+```
+
+You will need to reboot after enabling this driver to prevent a clash
+with other kernel modules.
+
+## Touchpads {#sec-x11-touchpads}
+
+Support for Synaptics touchpads (found in many laptops such as the Dell
+Latitude series) can be enabled as follows:
+
+```nix
+services.xserver.libinput.enable = true;
+```
+
+The driver has many options (see [](#ch-options)).
+For instance, the following disables tap-to-click behavior:
+
+```nix
+services.xserver.libinput.touchpad.tapping = false;
+```
+
+Note: the use of `services.xserver.synaptics` is deprecated since NixOS
+17.09.
+
+## GTK/Qt themes {#sec-x11-gtk-and-qt-themes}
+
+GTK themes can be installed either to user profile or system-wide (via
+`environment.systemPackages`). To make Qt 5 applications look similar to
+GTK ones, you can use the following configuration:
+
+```nix
+qt.enable = true;
+qt.platformTheme = "gtk2";
+qt.style = "gtk2";
+```
+
+## Custom XKB layouts {#custom-xkb-layouts}
+
+It is possible to install custom [ XKB
+](https://en.wikipedia.org/wiki/X_keyboard_extension) keyboard layouts
+using the option `services.xserver.xkb.extraLayouts`.
+
+As a first example, we are going to create a layout based on the basic
+US layout, with an additional layer to type some greek symbols by
+pressing the right-alt key.
+
+Create a file called `us-greek` with the following content (under a
+directory called `symbols`; it's an XKB peculiarity that will help with
+testing):
+
+```nix
+xkb_symbols "us-greek"
+{
+  include "us(basic)"            // includes the base US keys
+  include "level3(ralt_switch)"  // configures right alt as a third level switch
+
+  key <LatA> { [ a, A, Greek_alpha ] };
+  key <LatB> { [ b, B, Greek_beta  ] };
+  key <LatG> { [ g, G, Greek_gamma ] };
+  key <LatD> { [ d, D, Greek_delta ] };
+  key <LatZ> { [ z, Z, Greek_zeta  ] };
+};
+```
+
+A minimal layout specification must include the following:
+
+```nix
+services.xserver.xkb.extraLayouts.us-greek = {
+  description = "US layout with alt-gr greek";
+  languages   = [ "eng" ];
+  symbolsFile = /yourpath/symbols/us-greek;
+};
+```
+
+::: {.note}
+The name (after `extraLayouts.`) should match the one given to the
+`xkb_symbols` block.
+:::
+
+Applying this customization requires rebuilding several packages, and a
+broken XKB file can lead to the X session crashing at login. Therefore,
+you're strongly advised to **test your layout before applying it**:
+
+```ShellSession
+$ nix-shell -p xorg.xkbcomp
+$ setxkbmap -I/yourpath us-greek -print | xkbcomp -I/yourpath - $DISPLAY
+```
+
+You can inspect the predefined XKB files for examples:
+
+```ShellSession
+$ echo "$(nix-build --no-out-link '<nixpkgs>' -A xorg.xkeyboardconfig)/etc/X11/xkb/"
+```
+
+Once the configuration is applied, and you did a logout/login cycle, the
+layout should be ready to use. You can try it by e.g. running
+`setxkbmap us-greek` and then type `<alt>+a` (it may not get applied in
+your terminal straight away). To change the default, the usual
+`services.xserver.xkb.layout` option can still be used.
+
+A layout can have several other components besides `xkb_symbols`, for
+example we will define new keycodes for some multimedia key and bind
+these to some symbol.
+
+Use the *xev* utility from `pkgs.xorg.xev` to find the codes of the keys
+of interest, then create a `media-key` file to hold the keycodes
+definitions
+
+```nix
+xkb_keycodes "media"
+{
+ <volUp>   = 123;
+ <volDown> = 456;
+}
+```
+
+Now use the newly define keycodes in `media-sym`:
+
+```nix
+xkb_symbols "media"
+{
+ key.type = "ONE_LEVEL";
+ key <volUp>   { [ XF86AudioLowerVolume ] };
+ key <volDown> { [ XF86AudioRaiseVolume ] };
+}
+```
+
+As before, to install the layout do
+
+```nix
+services.xserver.xkb.extraLayouts.media = {
+  description  = "Multimedia keys remapping";
+  languages    = [ "eng" ];
+  symbolsFile  = /path/to/media-key;
+  keycodesFile = /path/to/media-sym;
+};
+```
+
+::: {.note}
+The function `pkgs.writeText <filename> <content>` can be useful if you
+prefer to keep the layout definitions inside the NixOS configuration.
+:::
+
+Unfortunately, the Xorg server does not (currently) support setting a
+keymap directly but relies instead on XKB rules to select the matching
+components (keycodes, types, ...) of a layout. This means that
+components other than symbols won't be loaded by default. As a
+workaround, you can set the keymap using `setxkbmap` at the start of the
+session with:
+
+```nix
+services.xserver.displayManager.sessionCommands = "setxkbmap -keycodes media";
+```
+
+If you are manually starting the X server, you should set the argument
+`-xkbdir /etc/X11/xkb`, otherwise X won't find your layout files. For
+example with `xinit` run
+
+```ShellSession
+$ xinit -- -xkbdir /etc/X11/xkb
+```
+
+To learn how to write layouts take a look at the XKB [documentation
+](https://www.x.org/releases/current/doc/xorg-docs/input/XKB-Enhancing.html#Defining_New_Layouts).
+More example layouts can also be found [here
+](https://wiki.archlinux.org/index.php/X_KeyBoard_extension#Basic_examples).
diff --git a/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md b/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md
new file mode 100644
index 000000000000..9ec4a51d6e35
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/configuration/xfce.chapter.md
@@ -0,0 +1,57 @@
+# Xfce Desktop Environment {#sec-xfce}
+
+To enable the Xfce Desktop Environment, set
+
+```nix
+services.xserver.desktopManager.xfce.enable = true;
+services.xserver.displayManager.defaultSession = "xfce";
+```
+
+Optionally, *picom* can be enabled for nice graphical effects, some
+example settings:
+
+```nix
+services.picom = {
+  enable = true;
+  fade = true;
+  inactiveOpacity = 0.9;
+  shadow = true;
+  fadeDelta = 4;
+};
+```
+
+Some Xfce programs are not installed automatically. To install them
+manually (system wide), put them into your
+[](#opt-environment.systemPackages) from `pkgs.xfce`.
+
+## Thunar {#sec-xfce-thunar-plugins}
+
+Thunar (the Xfce file manager) is automatically enabled when Xfce is
+enabled. To enable Thunar without enabling Xfce, use the configuration
+option [](#opt-programs.thunar.enable) instead of adding
+`pkgs.xfce.thunar` to [](#opt-environment.systemPackages).
+
+If you'd like to add extra plugins to Thunar, add them to
+[](#opt-programs.thunar.plugins). You shouldn't just add them to
+[](#opt-environment.systemPackages).
+
+## Troubleshooting {#sec-xfce-troubleshooting}
+
+Even after enabling udisks2, volume management might not work. Thunar
+and/or the desktop takes time to show up. Thunar will spit out this kind
+of message on start (look at `journalctl --user -b`).
+
+```plain
+Thunar:2410): GVFS-RemoteVolumeMonitor-WARNING **: remote volume monitor with dbus name org.gtk.Private.UDisks2VolumeMonitor is not supported
+```
+
+This is caused by some needed GNOME services not running. This is all
+fixed by enabling "Launch GNOME services on startup" in the Advanced
+tab of the Session and Startup settings panel. Alternatively, you can
+run this command to do the same thing.
+
+```ShellSession
+$ xfconf-query -c xfce4-session -p /compat/LaunchGNOME -s true
+```
+
+It is necessary to log out and log in again for this to take effect.
diff --git a/nixpkgs/nixos/doc/manual/contributing-to-this-manual.chapter.md b/nixpkgs/nixos/doc/manual/contributing-to-this-manual.chapter.md
new file mode 100644
index 000000000000..6245280e30f0
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/contributing-to-this-manual.chapter.md
@@ -0,0 +1,110 @@
+# Contributing to this manual {#chap-contributing}
+
+The [DocBook] and CommonMark sources of the NixOS manual are in the [nixos/doc/manual](https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual) subdirectory of the [Nixpkgs](https://github.com/NixOS/nixpkgs) repository.
+This manual uses the [Nixpkgs manual syntax](https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup).
+
+You can quickly check your edits with the following:
+
+```ShellSession
+$ cd /path/to/nixpkgs
+$ $EDITOR doc/nixos/manual/... # edit the manual
+$ nix-build nixos/release.nix -A manual.x86_64-linux
+```
+
+If the build succeeds, the manual will be in `./result/share/doc/nixos/index.html`.
+
+There's also [a convenient development daemon](https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-devmode).
+
+The above instructions don't deal with the appendix of available `configuration.nix` options, and the manual pages related to NixOS. These are built, and written in a different location and in a different format, as explained in the next sections.
+
+## Contributing to the `configuration.nix` options documentation {#sec-contributing-options}
+
+The documentation for all the different `configuration.nix` options is automatically generated by reading the `description`s of all the NixOS options defined at `nixos/modules/`. If you want to improve such `description`, find it in the `nixos/modules/` directory, and edit it and open a pull request.
+
+To see how your changes render on the web, run again:
+
+```ShellSession
+$ nix-build nixos/release.nix -A manual.x86_64-linux
+```
+
+And you'll see the changes to the appendix in the path `result/share/doc/nixos/options.html`.
+
+You can also build only the `configuration.nix(5)` manual page, via:
+
+```ShellSession
+$ cd /path/to/nixpkgs
+$ nix-build nixos/release.nix -A nixos-configuration-reference-manpage.x86_64-linux
+```
+
+And observe the result via:
+
+```ShellSession
+$ man --local-file result/share/man/man5/configuration.nix.5
+```
+
+If you're on a different architecture that's supported by NixOS (check file `nixos/release.nix` on Nixpkgs' repository) then replace `x86_64-linux` with the architecture. `nix-build` will complain otherwise, but should also tell you which architecture you have + the supported ones.
+
+## Contributing to `nixos-*` tools' manpages {#sec-contributing-nixos-tools}
+
+The manual pages for the tools available in the installation image can be found in Nixpkgs by running (e.g for `nixos-rebuild`):
+
+```ShellSession
+$ git ls | grep nixos-rebuild.8
+```
+
+Man pages are written in [`mdoc(7)` format](https://mandoc.bsd.lv/man/mdoc.7.html) and should be portable between mandoc and groff for rendering (except for minor differences, notably different spacing rules.)
+
+For a preview, run `man --local-file path/to/file.8`.
+
+Being written in `mdoc`, these manpages use semantic markup. This following subsections provides a guideline on where to apply which semantic elements.
+
+### Command lines and arguments {#ssec-contributing-nixos-tools-cli-and-args}
+
+In any manpage, commands, flags and arguments to the *current* executable should be marked according to their semantics. Commands, flags and arguments passed to *other* executables should not be marked like this and should instead be considered as code examples and marked with `Ql`.
+
+- Use `Fl` to mark flag arguments, `Ar` for their arguments.
+- Repeating arguments should be marked by adding an ellipsis (spelled with periods, `...`).
+- Use `Cm` to mark literal string arguments, e.g. the `boot` command argument passed to `nixos-rebuild`.
+- Optional flags or arguments should be marked with `Op`. This includes optional repeating arguments.
+- Required flags or arguments should not be marked.
+- Mutually exclusive groups of arguments should be enclosed in curly brackets, preferably created with `Bro`/`Brc` blocks.
+
+When an argument is used in an example it should be marked up with `Ar` again to differentiate it from a constant. For example, a command with a `--host name` option that calls ssh to retrieve the host's local time would signify this thusly:
+```
+This will run
+.Ic ssh Ar name Ic time
+to retrieve the remote time.
+```
+
+### Paths, NixOS options, environment variables {#ssec-contributing-nixos-tools-options-and-environment}
+
+Constant paths should be marked with `Pa`, NixOS options with `Va`, and environment variables with `Ev`.
+
+Generated paths, e.g. `result/bin/run-hostname-vm` (where `hostname` is a variable or arguments) should be marked as `Ql` inline literals with their variable components marked appropriately.
+
+ - When `hostname` refers to an argument, it becomes `.Ql result/bin/run- Ns Ar hostname Ns -vm`
+ - When `hostname` refers to a variable, it becomes `.Ql result/bin/run- Ns Va hostname Ns -vm`
+
+### Code examples and other commands {#ssec-contributing-nixos-tools-code-examples}
+
+In free text names and complete invocations of other commands (e.g. `ssh` or `tar -xvf src.tar`) should be marked with `Ic`, fragments of command lines should be marked with `Ql`.
+
+Larger code blocks or those that cannot be shown inline should use indented literal display block markup for their contents, i.e.
+
+```
+.Bd -literal -offset indent
+...
+.Ed
+```
+
+Contents of code blocks may be marked up further, e.g. if they refer to arguments that will be substituted into them:
+
+```
+.Bd -literal -offset indent
+{
+  config.networking.hostname = "\c
+.Ar hostname Ns \c
+";
+}
+.Ed
+```
diff --git a/nixpkgs/nixos/doc/manual/default.nix b/nixpkgs/nixos/doc/manual/default.nix
new file mode 100644
index 000000000000..a368b16201f8
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/default.nix
@@ -0,0 +1,204 @@
+{ pkgs
+, options
+, config
+, version
+, revision
+, extraSources ? []
+, baseOptionsJSON ? null
+, warningsAreErrors ? true
+, prefix ? ../../..
+}:
+
+with pkgs;
+
+let
+  inherit (lib) hasPrefix removePrefix;
+
+  lib = pkgs.lib;
+
+  common = import ./common.nix;
+
+  manpageUrls = pkgs.path + "/doc/manpage-urls.json";
+
+  # We need to strip references to /nix/store/* from options,
+  # including any `extraSources` if some modules came from elsewhere,
+  # or else the build will fail.
+  #
+  # E.g. if some `options` came from modules in ${pkgs.customModules}/nix,
+  # you'd need to include `extraSources = [ pkgs.customModules ]`
+  prefixesToStrip = map (p: "${toString p}/") ([ prefix ] ++ extraSources);
+  stripAnyPrefixes = lib.flip (lib.foldr lib.removePrefix) prefixesToStrip;
+
+  optionsDoc = buildPackages.nixosOptionsDoc {
+    inherit options revision baseOptionsJSON warningsAreErrors;
+    transformOptions = opt: opt // {
+      # Clean up declaration sites to not refer to the NixOS source tree.
+      declarations = map stripAnyPrefixes opt.declarations;
+    };
+  };
+
+  nixos-lib = import ../../lib { };
+
+  testOptionsDoc = let
+      eval = nixos-lib.evalTest {
+        # Avoid evaluating a NixOS config prototype.
+        config.node.type = lib.types.deferredModule;
+        options._module.args = lib.mkOption { internal = true; };
+      };
+    in buildPackages.nixosOptionsDoc {
+      inherit (eval) options;
+      inherit revision;
+      transformOptions = opt: opt // {
+        # Clean up declaration sites to not refer to the NixOS source tree.
+        declarations =
+          map
+            (decl:
+              if hasPrefix (toString ../../..) (toString decl)
+              then
+                let subpath = removePrefix "/" (removePrefix (toString ../../..) (toString decl));
+                in { url = "https://github.com/NixOS/nixpkgs/blob/master/${subpath}"; name = subpath; }
+              else decl)
+            opt.declarations;
+      };
+      documentType = "none";
+      variablelistId = "test-options-list";
+      optionIdPrefix = "test-opt-";
+    };
+
+  testDriverMachineDocstrings = pkgs.callPackage
+    ../../../nixos/lib/test-driver/nixos-test-driver-docstrings.nix {};
+
+  prepareManualFromMD = ''
+    cp -r --no-preserve=all $inputs/* .
+
+    substituteInPlace ./manual.md \
+      --replace '@NIXOS_VERSION@' "${version}"
+    substituteInPlace ./configuration/configuration.md \
+      --replace \
+          '@MODULE_CHAPTERS@' \
+          ${lib.escapeShellArg (lib.concatMapStringsSep "\n" (p: "${p.value}") config.meta.doc)}
+    substituteInPlace ./nixos-options.md \
+      --replace \
+        '@NIXOS_OPTIONS_JSON@' \
+        ${optionsDoc.optionsJSON}/${common.outputPath}/options.json
+    substituteInPlace ./development/writing-nixos-tests.section.md \
+      --replace \
+        '@NIXOS_TEST_OPTIONS_JSON@' \
+        ${testOptionsDoc.optionsJSON}/${common.outputPath}/options.json
+    sed -e '/@PYTHON_MACHINE_METHODS@/ {' -e 'r ${testDriverMachineDocstrings}/machine-methods.md' -e 'd' -e '}' \
+      -i ./development/writing-nixos-tests.section.md
+  '';
+
+in rec {
+  inherit (optionsDoc) optionsJSON optionsNix optionsDocBook;
+
+  # Generate the NixOS manual.
+  manualHTML = runCommand "nixos-manual-html"
+    { nativeBuildInputs = [ buildPackages.nixos-render-docs ];
+      inputs = lib.sourceFilesBySuffices ./. [ ".md" ];
+      meta.description = "The NixOS manual in HTML format";
+      allowedReferences = ["out"];
+    }
+    ''
+      # Generate the HTML manual.
+      dst=$out/${common.outputPath}
+      mkdir -p $dst
+
+      cp ${../../../doc/style.css} $dst/style.css
+      cp ${../../../doc/overrides.css} $dst/overrides.css
+      cp -r ${pkgs.documentation-highlighter} $dst/highlightjs
+
+      ${prepareManualFromMD}
+
+      nixos-render-docs -j $NIX_BUILD_CORES manual html \
+        --manpage-urls ${manpageUrls} \
+        --revision ${lib.escapeShellArg revision} \
+        --generator "nixos-render-docs ${lib.version}" \
+        --stylesheet style.css \
+        --stylesheet overrides.css \
+        --stylesheet highlightjs/mono-blue.css \
+        --script ./highlightjs/highlight.pack.js \
+        --script ./highlightjs/loader.js \
+        --toc-depth 1 \
+        --chunk-toc-depth 1 \
+        ./manual.md \
+        $dst/${common.indexPath}
+
+      mkdir -p $out/nix-support
+      echo "nix-build out $out" >> $out/nix-support/hydra-build-products
+      echo "doc manual $dst" >> $out/nix-support/hydra-build-products
+    ''; # */
+
+  # Alias for backward compatibility. TODO(@oxij): remove eventually.
+  manual = manualHTML;
+
+  # Index page of the NixOS manual.
+  manualHTMLIndex = "${manualHTML}/${common.outputPath}/${common.indexPath}";
+
+  manualEpub = runCommand "nixos-manual-epub"
+    { nativeBuildInputs = [ buildPackages.libxml2.bin buildPackages.libxslt.bin buildPackages.zip ];
+      doc = ''
+        <book xmlns="http://docbook.org/ns/docbook"
+              xmlns:xlink="http://www.w3.org/1999/xlink"
+              version="5.0"
+              xml:id="book-nixos-manual">
+          <info>
+            <title>NixOS Manual</title>
+            <subtitle>Version ${lib.version}</subtitle>
+          </info>
+          <chapter>
+            <title>Temporarily unavailable</title>
+            <para>
+              The NixOS manual is currently not available in EPUB format,
+              please use the <link xlink:href="https://nixos.org/nixos/manual">HTML manual</link>
+              instead.
+            </para>
+            <para>
+              If you've used the EPUB manual in the past and it has been useful to you, please
+              <link xlink:href="https://github.com/NixOS/nixpkgs/issues/237234">let us know</link>.
+            </para>
+          </chapter>
+        </book>
+      '';
+      passAsFile = [ "doc" ];
+    }
+    ''
+      # Generate the epub manual.
+      dst=$out/${common.outputPath}
+
+      xsltproc \
+        --param chapter.autolabel 0 \
+        --nonet --xinclude --output $dst/epub/ \
+        ${docbook_xsl_ns}/xml/xsl/docbook/epub/docbook.xsl \
+        $docPath
+
+      echo "application/epub+zip" > mimetype
+      manual="$dst/nixos-manual.epub"
+      zip -0Xq "$manual" mimetype
+      cd $dst/epub && zip -Xr9D "$manual" *
+
+      rm -rf $dst/epub
+
+      mkdir -p $out/nix-support
+      echo "doc-epub manual $manual" >> $out/nix-support/hydra-build-products
+    '';
+
+
+  # Generate the `man configuration.nix` package
+  nixos-configuration-reference-manpage = runCommand "nixos-configuration-reference-manpage"
+    { nativeBuildInputs = [
+        buildPackages.installShellFiles
+        buildPackages.nixos-render-docs
+      ];
+      allowedReferences = ["out"];
+    }
+    ''
+      # Generate manpages.
+      mkdir -p $out/share/man/man5
+      nixos-render-docs -j $NIX_BUILD_CORES options manpage \
+        --revision ${lib.escapeShellArg revision} \
+        ${optionsJSON}/${common.outputPath}/options.json \
+        $out/share/man/man5/configuration.nix.5
+    '';
+
+}
diff --git a/nixpkgs/nixos/doc/manual/development/activation-script.section.md b/nixpkgs/nixos/doc/manual/development/activation-script.section.md
new file mode 100644
index 000000000000..cc317a6a01aa
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/activation-script.section.md
@@ -0,0 +1,72 @@
+# Activation script {#sec-activation-script}
+
+The activation script is a bash script called to activate the new
+configuration which resides in a NixOS system in `$out/activate`. Since its
+contents depend on your system configuration, the contents may differ.
+This chapter explains how the script works in general and some common NixOS
+snippets. Please be aware that the script is executed on every boot and system
+switch, so tasks that can be performed in other places should be performed
+there (for example letting a directory of a service be created by systemd using
+mechanisms like `StateDirectory`, `CacheDirectory`, ... or if that's not
+possible using `preStart` of the service).
+
+Activation scripts are defined as snippets using
+[](#opt-system.activationScripts). They can either be a simple multiline string
+or an attribute set that can depend on other snippets. The builder for the
+activation script will take these dependencies into account and order the
+snippets accordingly. As a simple example:
+
+```nix
+system.activationScripts.my-activation-script = {
+  deps = [ "etc" ];
+  # supportsDryActivation = true;
+  text = ''
+    echo "Hallo i bims"
+  '';
+};
+```
+
+This example creates an activation script snippet that is run after the `etc`
+snippet. The special variable `supportsDryActivation` can be set so the snippet
+is also run when `nixos-rebuild dry-activate` is run. To differentiate between
+real and dry activation, the `$NIXOS_ACTION` environment variable can be
+read which is set to `dry-activate` when a dry activation is done.
+
+An activation script can write to special files instructing
+`switch-to-configuration` to restart/reload units. The script will take these
+requests into account and will incorporate the unit configuration as described
+above. This means that the activation script will "fake" a modified unit file
+and `switch-to-configuration` will act accordingly. By doing so, configuration
+like [systemd.services.\<name\>.restartIfChanged](#opt-systemd.services) is
+respected. Since the activation script is run **after** services are already
+stopped, [systemd.services.\<name\>.stopIfChanged](#opt-systemd.services)
+cannot be taken into account anymore and the unit is always restarted instead
+of being stopped and started afterwards.
+
+The files that can be written to are `/run/nixos/activation-restart-list` and
+`/run/nixos/activation-reload-list` with their respective counterparts for
+dry activation being `/run/nixos/dry-activation-restart-list` and
+`/run/nixos/dry-activation-reload-list`. Those files can contain
+newline-separated lists of unit names where duplicates are being ignored. These
+files are not create automatically and activation scripts must take the
+possibility into account that they have to create them first.
+
+## NixOS snippets {#sec-activation-script-nixos-snippets}
+
+There are some snippets NixOS enables by default because disabling them would
+most likely break your system. This section lists a few of them and what they
+do:
+
+- `binsh` creates `/bin/sh` which points to the runtime shell
+- `etc` sets up the contents of `/etc`, this includes systemd units and
+  excludes `/etc/passwd`, `/etc/group`, and `/etc/shadow` (which are managed by
+  the `users` snippet)
+- `hostname` sets the system's hostname in the kernel (not in `/etc`)
+- `modprobe` sets the path to the `modprobe` binary for module auto-loading
+- `nix` prepares the nix store and adds a default initial channel
+- `specialfs` is responsible for mounting filesystems like `/proc` and `sys`
+- `users` creates and removes users and groups by managing `/etc/passwd`,
+  `/etc/group` and `/etc/shadow`. This also creates home directories
+- `usrbinenv` creates `/usr/bin/env`
+- `var` creates some directories in `/var` that are not service-specific
+- `wrappers` creates setuid wrappers like `sudo`
diff --git a/nixpkgs/nixos/doc/manual/development/assertions.section.md b/nixpkgs/nixos/doc/manual/development/assertions.section.md
new file mode 100644
index 000000000000..cc6d81e56990
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/assertions.section.md
@@ -0,0 +1,40 @@
+# Warnings and Assertions {#sec-assertions}
+
+When configuration problems are detectable in a module, it is a good idea to write an assertion or warning. Doing so provides clear feedback to the user and prevents errors after the build.
+
+Although Nix has the `abort` and `builtins.trace` [functions](https://nixos.org/nix/manual/#ssec-builtins) to perform such tasks, they are not ideally suited for NixOS modules. Instead of these functions, you can declare your warnings and assertions using the NixOS module system.
+
+## Warnings {#sec-assertions-warnings}
+
+This is an example of using `warnings`.
+
+```nix
+{ config, lib, ... }:
+{
+  config = lib.mkIf config.services.foo.enable {
+    warnings =
+      if config.services.foo.bar
+      then [ ''You have enabled the bar feature of the foo service.
+               This is known to cause some specific problems in certain situations.
+               '' ]
+      else [];
+  }
+}
+```
+
+## Assertions {#sec-assertions-assetions}
+
+This example, extracted from the [`syslogd` module](https://github.com/NixOS/nixpkgs/blob/release-17.09/nixos/modules/services/logging/syslogd.nix) shows how to use `assertions`. Since there can only be one active syslog daemon at a time, an assertion is useful to prevent such a broken system from being built.
+
+```nix
+{ config, lib, ... }:
+{
+  config = lib.mkIf config.services.syslogd.enable {
+    assertions =
+      [ { assertion = !config.services.rsyslogd.enable;
+          message = "rsyslogd conflicts with syslogd";
+        }
+      ];
+  }
+}
+```
diff --git a/nixpkgs/nixos/doc/manual/development/bootspec.chapter.md b/nixpkgs/nixos/doc/manual/development/bootspec.chapter.md
new file mode 100644
index 000000000000..96c12f24e7f1
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/bootspec.chapter.md
@@ -0,0 +1,36 @@
+# Experimental feature: Bootspec {#sec-experimental-bootspec}
+
+Bootspec is a experimental feature, introduced in the [RFC-0125 proposal](https://github.com/NixOS/rfcs/pull/125), the reference implementation can be found [there](https://github.com/NixOS/nixpkgs/pull/172237) in order to standardize bootloader support
+and advanced boot workflows such as SecureBoot and potentially more.
+
+You can enable the creation of bootspec documents through [`boot.bootspec.enable = true`](options.html#opt-boot.bootspec.enable), which will prompt a warning until [RFC-0125](https://github.com/NixOS/rfcs/pull/125) is officially merged.
+
+## Schema {#sec-experimental-bootspec-schema}
+
+The bootspec schema is versioned and validated against [a CUE schema file](https://cuelang.org/) which should considered as the source of truth for your applications.
+
+You will find the current version [here](../../../modules/system/activation/bootspec.cue).
+
+## Extensions mechanism {#sec-experimental-bootspec-extensions}
+
+Bootspec cannot account for all usecases.
+
+For this purpose, Bootspec offers a generic extension facility [`boot.bootspec.extensions`](options.html#opt-boot.bootspec.extensions) which can be used to inject any data needed for your usecases.
+
+An example for SecureBoot is to get the Nix store path to `/etc/os-release` in order to bake it into a unified kernel image:
+
+```nix
+{ config, lib, ... }: {
+  boot.bootspec.extensions = {
+    "org.secureboot.osRelease" = config.environment.etc."os-release".source;
+  };
+}
+```
+
+To reduce incompatibility and prevent names from clashing between applications, it is **highly recommended** to use a unique namespace for your extensions.
+
+## External bootloaders {#sec-experimental-bootspec-external-bootloaders}
+
+It is possible to enable your own bootloader through [`boot.loader.external.installHook`](options.html#opt-boot.loader.external.installHook) which can wrap an existing bootloader.
+
+Currently, there is no good story to compose existing bootloaders to enrich their features, e.g. SecureBoot, etc. It will be necessary to reimplement or reuse existing parts.
diff --git a/nixpkgs/nixos/doc/manual/development/building-parts.chapter.md b/nixpkgs/nixos/doc/manual/development/building-parts.chapter.md
new file mode 100644
index 000000000000..79ddaa37140b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/building-parts.chapter.md
@@ -0,0 +1,74 @@
+# Building Specific Parts of NixOS {#sec-building-parts}
+
+With the command `nix-build`, you can build specific parts of your NixOS
+configuration. This is done as follows:
+
+```ShellSession
+$ cd /path/to/nixpkgs/nixos
+$ nix-build -A config.option
+```
+
+where `option` is a NixOS option with type "derivation" (i.e. something
+that can be built). Attributes of interest include:
+
+`system.build.toplevel`
+
+:   The top-level option that builds the entire NixOS system. Everything
+    else in your configuration is indirectly pulled in by this option.
+    This is what `nixos-rebuild` builds and what `/run/current-system`
+    points to afterwards.
+
+    A shortcut to build this is:
+
+    ```ShellSession
+    $ nix-build -A system
+    ```
+
+`system.build.manual.manualHTML`
+
+:   The NixOS manual.
+
+`system.build.etc`
+
+:   A tree of symlinks that form the static parts of `/etc`.
+
+`system.build.initialRamdisk` , `system.build.kernel`
+
+:   The initial ramdisk and kernel of the system. This allows a quick
+    way to test whether the kernel and the initial ramdisk boot
+    correctly, by using QEMU's `-kernel` and `-initrd` options:
+
+    ```ShellSession
+    $ nix-build -A config.system.build.initialRamdisk -o initrd
+    $ nix-build -A config.system.build.kernel -o kernel
+    $ qemu-system-x86_64 -kernel ./kernel/bzImage -initrd ./initrd/initrd -hda /dev/null
+    ```
+
+`system.build.nixos-rebuild` , `system.build.nixos-install` , `system.build.nixos-generate-config`
+
+:   These build the corresponding NixOS commands.
+
+`systemd.units.unit-name.unit`
+
+:   This builds the unit with the specified name. Note that since unit
+    names contain dots (e.g. `httpd.service`), you need to put them
+    between quotes, like this:
+
+    ```ShellSession
+    $ nix-build -A 'config.systemd.units."httpd.service".unit'
+    ```
+
+    You can also test individual units, without rebuilding the whole
+    system, by putting them in `/run/systemd/system`:
+
+    ```ShellSession
+    $ cp $(nix-build -A 'config.systemd.units."httpd.service".unit')/httpd.service \
+        /run/systemd/system/tmp-httpd.service
+    # systemctl daemon-reload
+    # systemctl start tmp-httpd.service
+    ```
+
+    Note that the unit must not have the same name as any unit in
+    `/etc/systemd/system` since those take precedence over
+    `/run/systemd/system`. That's why the unit is installed as
+    `tmp-httpd.service` here.
diff --git a/nixpkgs/nixos/doc/manual/development/developing-the-test-driver.chapter.md b/nixpkgs/nixos/doc/manual/development/developing-the-test-driver.chapter.md
new file mode 100644
index 000000000000..d64574fa62aa
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/developing-the-test-driver.chapter.md
@@ -0,0 +1,45 @@
+
+# Developing the NixOS Test Driver {#chap-developing-the-test-driver}
+
+The NixOS test framework is a project of its own.
+
+It consists of roughly the following components:
+
+ - `nixos/lib/test-driver`: The Python framework that sets up the test and runs the [`testScript`](#test-opt-testScript)
+ - `nixos/lib/testing`: The Nix code responsible for the wiring, written using the (NixOS) Module System.
+
+These components are exposed publicly through:
+
+ - `nixos/lib/default.nix`: The public interface that exposes the `nixos/lib/testing` entrypoint.
+ - `flake.nix`: Exposes the `lib.nixos`, including the public test interface.
+
+Beyond the test driver itself, its integration into NixOS and Nixpkgs is important.
+
+ - `pkgs/top-level/all-packages.nix`: Defines the `nixosTests` attribute, used
+   by the package `tests` attributes and OfBorg.
+ - `nixos/release.nix`: Defines the `tests` attribute built by Hydra, independently, but analogous to `nixosTests`
+ - `nixos/release-combined.nix`: Defines which tests are channel blockers.
+
+Finally, we have legacy entrypoints that users should move away from, but are cared for on a best effort basis.
+These include `pkgs.nixosTest`, `testing-python.nix` and `make-test-python.nix`.
+
+## Testing changes to the test framework {#sec-test-the-test-framework}
+
+We currently have limited unit tests for the framework itself. You may run these with `nix-build -A nixosTests.nixos-test-driver`.
+
+When making significant changes to the test framework, we run the tests on Hydra, to avoid disrupting the larger NixOS project.
+
+For this, we use the `python-test-refactoring` branch in the `NixOS/nixpkgs` repository, and its [corresponding Hydra jobset](https://hydra.nixos.org/jobset/nixos/python-test-refactoring).
+This branch is used as a pointer, and not as a feature branch.
+
+1. Rebase the PR onto a recent, good evaluation of `nixos-unstable`
+2. Create a baseline evaluation by force-pushing this revision of `nixos-unstable` to `python-test-refactoring`.
+3. Note the evaluation number (we'll call it `<previous>`)
+4. Push the PR to `python-test-refactoring` and evaluate the PR on Hydra
+5. Create a comparison URL by navigating to the latest build of the PR and adding to the URL `?compare=<previous>`. This is not necessary for the evaluation that comes right after the baseline.
+
+Review the removed tests and newly failed tests using the constructed URL; otherwise you will accidentally compare iterations of the PR instead of changes to the PR base.
+
+As we currently have some flaky tests, newly failing tests are expected, but should be reviewed to make sure that
+ - The number of failures did not increase significantly.
+ - All failures that do occur can reasonably be assumed to fail for a different reason than the changes.
diff --git a/nixpkgs/nixos/doc/manual/development/development.md b/nixpkgs/nixos/doc/manual/development/development.md
new file mode 100644
index 000000000000..76f405c3b29c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/development.md
@@ -0,0 +1,15 @@
+# Development {#ch-development}
+
+This chapter describes how you can modify and extend NixOS.
+
+```{=include=} chapters
+sources.chapter.md
+writing-modules.chapter.md
+building-parts.chapter.md
+bootspec.chapter.md
+what-happens-during-a-system-switch.chapter.md
+writing-documentation.chapter.md
+nixos-tests.chapter.md
+developing-the-test-driver.chapter.md
+testing-installer.chapter.md
+```
diff --git a/nixpkgs/nixos/doc/manual/development/freeform-modules.section.md b/nixpkgs/nixos/doc/manual/development/freeform-modules.section.md
new file mode 100644
index 000000000000..4f344dd80460
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/freeform-modules.section.md
@@ -0,0 +1,78 @@
+# Freeform modules {#sec-freeform-modules}
+
+Freeform modules allow you to define values for option paths that have
+not been declared explicitly. This can be used to add attribute-specific
+types to what would otherwise have to be `attrsOf` options in order to
+accept all attribute names.
+
+This feature can be enabled by using the attribute `freeformType` to
+define a freeform type. By doing this, all assignments without an
+associated option will be merged using the freeform type and combined
+into the resulting `config` set. Since this feature nullifies name
+checking for entire option trees, it is only recommended for use in
+submodules.
+
+::: {#ex-freeform-module .example}
+### Freeform submodule
+
+The following shows a submodule assigning a freeform type that allows
+arbitrary attributes with `str` values below `settings`, but also
+declares an option for the `settings.port` attribute to have it
+type-checked and assign a default value. See
+[Example: Declaring a type-checked `settings` attribute](#ex-settings-typed-attrs)
+for a more complete example.
+
+```nix
+{ lib, config, ... }: {
+
+  options.settings = lib.mkOption {
+    type = lib.types.submodule {
+
+      freeformType = with lib.types; attrsOf str;
+
+      # We want this attribute to be checked for the correct type
+      options.port = lib.mkOption {
+        type = lib.types.port;
+        # Declaring the option also allows defining a default value
+        default = 8080;
+      };
+
+    };
+  };
+}
+```
+
+And the following shows what such a module then allows
+
+```nix
+{
+  # Not a declared option, but the freeform type allows this
+  settings.logLevel = "debug";
+
+  # Not allowed because the the freeform type only allows strings
+  # settings.enable = true;
+
+  # Allowed because there is a port option declared
+  settings.port = 80;
+
+  # Not allowed because the port option doesn't allow strings
+  # settings.port = "443";
+}
+```
+:::
+
+::: {.note}
+Freeform attributes cannot depend on other attributes of the same set
+without infinite recursion:
+
+```nix
+{
+  # This throws infinite recursion encountered
+  settings.logLevel = lib.mkIf (config.settings.port == 80) "debug";
+}
+```
+
+To prevent this, declare options for all attributes that need to depend
+on others. For above example this means to declare `logLevel` to be an
+option.
+:::
diff --git a/nixpkgs/nixos/doc/manual/development/importing-modules.section.md b/nixpkgs/nixos/doc/manual/development/importing-modules.section.md
new file mode 100644
index 000000000000..65d78959b8e0
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/importing-modules.section.md
@@ -0,0 +1,46 @@
+# Importing Modules {#sec-importing-modules}
+
+Sometimes NixOS modules need to be used in configuration but exist
+outside of Nixpkgs. These modules can be imported:
+
+```nix
+{ config, lib, pkgs, ... }:
+
+{
+  imports =
+    [ # Use a locally-available module definition in
+      # ./example-module/default.nix
+        ./example-module
+    ];
+
+  services.exampleModule.enable = true;
+}
+```
+
+The environment variable `NIXOS_EXTRA_MODULE_PATH` is an absolute path
+to a NixOS module that is included alongside the Nixpkgs NixOS modules.
+Like any NixOS module, this module can import additional modules:
+
+```nix
+# ./module-list/default.nix
+[
+  ./example-module1
+  ./example-module2
+]
+```
+
+```nix
+# ./extra-module/default.nix
+{ imports = import ./module-list.nix; }
+```
+
+```nix
+# NIXOS_EXTRA_MODULE_PATH=/absolute/path/to/extra-module
+{ config, lib, pkgs, ... }:
+
+{
+  # No `imports` needed
+
+  services.exampleModule1.enable = true;
+}
+```
diff --git a/nixpkgs/nixos/doc/manual/development/linking-nixos-tests-to-packages.section.md b/nixpkgs/nixos/doc/manual/development/linking-nixos-tests-to-packages.section.md
new file mode 100644
index 000000000000..38a64027f7c5
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/linking-nixos-tests-to-packages.section.md
@@ -0,0 +1,6 @@
+# Linking NixOS tests to packages {#sec-linking-nixos-tests-to-packages}
+
+You can link NixOS module tests to the packages that they exercised,
+so that the tests can be run automatically during code review when the package gets changed.
+This is
+[described in the nixpkgs manual](https://nixos.org/manual/nixpkgs/stable/#ssec-nixos-tests-linking).
diff --git a/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md b/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md
new file mode 100644
index 000000000000..33b41fe74d29
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/meta-attributes.section.md
@@ -0,0 +1,68 @@
+# Meta Attributes {#sec-meta-attributes}
+
+Like Nix packages, NixOS modules can declare meta-attributes to provide
+extra information. Module meta attributes are defined in the `meta.nix`
+special module.
+
+`meta` is a top level attribute like `options` and `config`. Available
+meta-attributes are `maintainers`, `doc`, and `buildDocsInSandbox`.
+
+Each of the meta-attributes must be defined at most once per module
+file.
+
+```nix
+{ config, lib, pkgs, ... }:
+{
+  options = {
+    ...
+  };
+
+  config = {
+    ...
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ ericsagnes ];
+    doc = ./default.md;
+    buildDocsInSandbox = true;
+  };
+}
+```
+
+-   `maintainers` contains a list of the module maintainers.
+
+-   `doc` points to a valid [Nixpkgs-flavored CommonMark](
+      https://nixos.org/manual/nixpkgs/unstable/#sec-contributing-markup
+    ) file containing the module
+    documentation. Its contents is automatically added to
+    [](#ch-configuration). Changes to a module documentation have to
+    be checked to not break building the NixOS manual:
+
+    ```ShellSession
+    $ nix-build nixos/release.nix -A manual.x86_64-linux
+    ```
+
+-  `buildDocsInSandbox` indicates whether the option documentation for the
+   module can be built in a derivation sandbox. This option is currently only
+   honored for modules shipped by nixpkgs. User modules and modules taken from
+   `NIXOS_EXTRA_MODULE_PATH` are always built outside of the sandbox, as has
+   been the case in previous releases.
+
+   Building NixOS option documentation in a sandbox allows caching of the built
+   documentation, which greatly decreases the amount of time needed to evaluate
+   a system configuration that has NixOS documentation enabled. The sandbox also
+   restricts which attributes may be referenced by documentation attributes
+   (such as option descriptions) to the `options` and `lib` module arguments and
+   the `pkgs.formats` attribute of the `pkgs` argument, `config` and the rest of
+   `pkgs` are disallowed and will cause doc build failures when used. This
+   restriction is necessary because we cannot reproduce the full nixpkgs
+   instantiation with configuration and overlays from a system configuration
+   inside the sandbox. The `options` argument only includes options of modules
+   that are also built inside the sandbox, referencing an option of a module
+   that isn't built in the sandbox is also forbidden.
+
+   The default is `true` and should usually not be changed; set it to `false`
+   only if the module requires access to `pkgs` in its documentation (e.g.
+   because it loads information from a linked package to build an option type)
+   or if its documentation depends on other modules that also aren't sandboxed
+   (e.g. by using types defined in the other module).
diff --git a/nixpkgs/nixos/doc/manual/development/nixos-tests.chapter.md b/nixpkgs/nixos/doc/manual/development/nixos-tests.chapter.md
new file mode 100644
index 000000000000..ec0e4b9f076a
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/nixos-tests.chapter.md
@@ -0,0 +1,13 @@
+# NixOS Tests {#sec-nixos-tests}
+
+When you add some feature to NixOS, you should write a test for it.
+NixOS tests are kept in the directory `nixos/tests`, and are executed
+(using Nix) by a testing framework that automatically starts one or more
+virtual machines containing the NixOS system(s) required for the test.
+
+```{=include=} sections
+writing-nixos-tests.section.md
+running-nixos-tests.section.md
+running-nixos-tests-interactively.section.md
+linking-nixos-tests-to-packages.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md b/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md
new file mode 100644
index 000000000000..87bb46c78909
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/non-switchable-systems.section.md
@@ -0,0 +1,21 @@
+# Non Switchable Systems {#sec-non-switchable-system}
+
+In certain systems, most notably image based appliances, updates are handled
+outside the system. This means that you do not need to rebuild your
+configuration on the system itself anymore.
+
+If you want to build such a system, you can use the `image-based-appliance`
+profile:
+
+```nix
+{ modulesPath, ... }: {
+  imports = [ "${modulesPath}/profiles/image-based-appliance.nix" ]
+}
+```
+
+The most notable deviation of this profile from a standard NixOS configuration
+is that after building it, you cannot switch *to* the configuration anymore.
+The profile sets `config.system.switch.enable = false;`, which excludes
+`switch-to-configuration`, the central script called by `nixos-rebuild`, from
+your system. Removing this script makes the image lighter and slightly more
+secure.
diff --git a/nixpkgs/nixos/doc/manual/development/option-declarations.section.md b/nixpkgs/nixos/doc/manual/development/option-declarations.section.md
new file mode 100644
index 000000000000..762070416187
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/option-declarations.section.md
@@ -0,0 +1,255 @@
+# Option Declarations {#sec-option-declarations}
+
+An option declaration specifies the name, type and description of a
+NixOS configuration option. It is invalid to define an option that
+hasn't been declared in any module. An option declaration generally
+looks like this:
+
+```nix
+options = {
+  name = mkOption {
+    type = type specification;
+    default = default value;
+    example = example value;
+    description = lib.mdDoc "Description for use in the NixOS manual.";
+  };
+};
+```
+
+The attribute names within the `name` attribute path must be camel
+cased in general but should, as an exception, match the [ package
+attribute name](https://nixos.org/nixpkgs/manual/#sec-package-naming)
+when referencing a Nixpkgs package. For example, the option
+`services.nix-serve.bindAddress` references the `nix-serve` Nixpkgs
+package.
+
+The function `mkOption` accepts the following arguments.
+
+`type`
+
+:   The type of the option (see [](#sec-option-types)). This
+    argument is mandatory for nixpkgs modules. Setting this is highly
+    recommended for the sake of documentation and type checking. In case it is
+    not set, a fallback type with unspecified behavior is used.
+
+`default`
+
+:   The default value used if no value is defined by any module. A
+    default is not required; but if a default is not given, then users
+    of the module will have to define the value of the option, otherwise
+    an error will be thrown.
+
+`defaultText`
+
+:   A textual representation of the default value to be rendered verbatim in
+    the manual. Useful if the default value is a complex expression or depends
+    on other values or packages.
+    Use `lib.literalExpression` for a Nix expression, `lib.literalMD` for
+    a plain English description in [Nixpkgs-flavored Markdown](
+    https://nixos.org/nixpkgs/manual/#sec-contributing-markup) format.
+
+`example`
+
+:   An example value that will be shown in the NixOS manual.
+    You can use `lib.literalExpression` and `lib.literalMD` in the same way
+    as in `defaultText`.
+
+`description`
+
+:   A textual description of the option, in [Nixpkgs-flavored Markdown](
+    https://nixos.org/nixpkgs/manual/#sec-contributing-markup) format, that will be
+    included in the NixOS manual. During the migration process from DocBook
+    it is necessary to mark descriptions written in CommonMark with `lib.mdDoc`.
+    The description may still be written in DocBook (without any marker), but this
+    is discouraged and will be deprecated in the future.
+
+## Utility functions for common option patterns {#sec-option-declarations-util}
+
+### `mkEnableOption` {#sec-option-declarations-util-mkEnableOption}
+
+Creates an Option attribute set for a boolean value option i.e an
+option to be toggled on or off.
+
+This function takes a single string argument, the name of the thing to be toggled.
+
+The option's description is "Whether to enable \<name\>.".
+
+For example:
+
+::: {#ex-options-declarations-util-mkEnableOption-magic .example}
+### `mkEnableOption` usage
+```nix
+lib.mkEnableOption (lib.mdDoc "magic")
+# is like
+lib.mkOption {
+  type = lib.types.bool;
+  default = false;
+  example = true;
+  description = lib.mdDoc "Whether to enable magic.";
+}
+```
+:::
+
+### `mkPackageOption` {#sec-option-declarations-util-mkPackageOption}
+
+Usage:
+
+```nix
+mkPackageOption pkgs "name" { default = [ "path" "in" "pkgs" ]; example = "literal example"; }
+```
+
+Creates an Option attribute set for an option that specifies the package a module should use for some purpose.
+
+**Note**: You shouldn’t necessarily make package options for all of your modules. You can always overwrite a specific package throughout nixpkgs by using [nixpkgs overlays](https://nixos.org/manual/nixpkgs/stable/#chap-overlays).
+
+The package is specified in the third argument under `default` as a list of strings
+representing its attribute path in nixpkgs (or another package set).
+Because of this, you need to pass nixpkgs itself (or a subset) as the first argument.
+
+The second argument may be either a string or a list of strings.
+It provides the display name of the package in the description of the generated option
+(using only the last element if the passed value is a list)
+and serves as the fallback value for the `default` argument.
+
+To include extra information in the description, pass `extraDescription` to
+append arbitrary text to the generated description.
+You can also pass an `example` value, either a literal string or an attribute path.
+
+The default argument can be omitted if the provided name is
+an attribute of pkgs (if name is a string) or a
+valid attribute path in pkgs (if name is a list).
+
+If you wish to explicitly provide no default, pass `null` as `default`.
+
+[]{#ex-options-declarations-util-mkPackageOption}
+Examples:
+
+::: {#ex-options-declarations-util-mkPackageOption-hello .example}
+### Simple `mkPackageOption` usage
+```nix
+lib.mkPackageOption pkgs "hello" { }
+# is like
+lib.mkOption {
+  type = lib.types.package;
+  default = pkgs.hello;
+  defaultText = lib.literalExpression "pkgs.hello";
+  description = lib.mdDoc "The hello package to use.";
+}
+```
+:::
+
+::: {#ex-options-declarations-util-mkPackageOption-ghc .example}
+### `mkPackageOption` with explicit default and example
+```nix
+lib.mkPackageOption pkgs "GHC" {
+  default = [ "ghc" ];
+  example = "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
+}
+# is like
+lib.mkOption {
+  type = lib.types.package;
+  default = pkgs.ghc;
+  defaultText = lib.literalExpression "pkgs.ghc";
+  example = lib.literalExpression "pkgs.haskell.packages.ghc92.ghc.withPackages (hkgs: [ hkgs.primes ])";
+  description = lib.mdDoc "The GHC package to use.";
+}
+```
+:::
+
+::: {#ex-options-declarations-util-mkPackageOption-extraDescription .example}
+### `mkPackageOption` with additional description text
+```nix
+mkPackageOption pkgs [ "python39Packages" "pytorch" ] {
+  extraDescription = "This is an example and doesn't actually do anything.";
+}
+# is like
+lib.mkOption {
+  type = lib.types.package;
+  default = pkgs.python39Packages.pytorch;
+  defaultText = lib.literalExpression "pkgs.python39Packages.pytorch";
+  description = "The pytorch package to use. This is an example and doesn't actually do anything.";
+}
+```
+:::
+
+## Extensible Option Types {#sec-option-declarations-eot}
+
+Extensible option types is a feature that allow to extend certain types
+declaration through multiple module files. This feature only work with a
+restricted set of types, namely `enum` and `submodules` and any composed
+forms of them.
+
+Extensible option types can be used for `enum` options that affects
+multiple modules, or as an alternative to related `enable` options.
+
+As an example, we will take the case of display managers. There is a
+central display manager module for generic display manager options and a
+module file per display manager backend (sddm, gdm ...).
+
+There are two approaches we could take with this module structure:
+
+-   Configuring the display managers independently by adding an enable
+    option to every display manager module backend. (NixOS)
+
+-   Configuring the display managers in the central module by adding
+    an option to select which display manager backend to use.
+
+Both approaches have problems.
+
+Making backends independent can quickly become hard to manage. For
+display managers, there can only be one enabled at a time, but the
+type system cannot enforce this restriction as there is no relation
+between each backend's `enable` option. As a result, this restriction
+has to be done explicitly by adding assertions in each display manager
+backend module.
+
+On the other hand, managing the display manager backends in the
+central module will require changing the central module option every
+time a new backend is added or removed.
+
+By using extensible option types, it is possible to create a placeholder
+option in the central module
+([Example: Extensible type placeholder in the service module](#ex-option-declaration-eot-service)),
+and to extend it in each backend module
+([Example: Extending `services.xserver.displayManager.enable` in the `gdm` module](#ex-option-declaration-eot-backend-gdm),
+[Example: Extending `services.xserver.displayManager.enable` in the `sddm` module](#ex-option-declaration-eot-backend-sddm)).
+
+As a result, `displayManager.enable` option values can be added without
+changing the main service module file and the type system automatically
+enforces that there can only be a single display manager enabled.
+
+::: {#ex-option-declaration-eot-service .example}
+### Extensible type placeholder in the service module
+```nix
+services.xserver.displayManager.enable = mkOption {
+  description = "Display manager to use";
+  type = with types; nullOr (enum [ ]);
+};
+```
+:::
+
+::: {#ex-option-declaration-eot-backend-gdm .example}
+### Extending `services.xserver.displayManager.enable` in the `gdm` module
+```nix
+services.xserver.displayManager.enable = mkOption {
+  type = with types; nullOr (enum [ "gdm" ]);
+};
+```
+:::
+
+::: {#ex-option-declaration-eot-backend-sddm .example}
+### Extending `services.xserver.displayManager.enable` in the `sddm` module
+```nix
+services.xserver.displayManager.enable = mkOption {
+  type = with types; nullOr (enum [ "sddm" ]);
+};
+```
+:::
+
+The placeholder declaration is a standard `mkOption` declaration, but it
+is important that extensible option declarations only use the `type`
+argument.
+
+Extensible option types work with any of the composed variants of `enum`
+such as `with types; nullOr (enum [ "foo" "bar" ])` or `with types;
+listOf (enum [ "foo" "bar" ])`.
diff --git a/nixpkgs/nixos/doc/manual/development/option-def.section.md b/nixpkgs/nixos/doc/manual/development/option-def.section.md
new file mode 100644
index 000000000000..6a3dc26b99be
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/option-def.section.md
@@ -0,0 +1,109 @@
+# Option Definitions {#sec-option-definitions}
+
+Option definitions are generally straight-forward bindings of values to
+option names, like
+
+```nix
+config = {
+  services.httpd.enable = true;
+};
+```
+
+However, sometimes you need to wrap an option definition or set of
+option definitions in a *property* to achieve certain effects:
+
+## Delaying Conditionals {#sec-option-definitions-delaying-conditionals}
+
+If a set of option definitions is conditional on the value of another
+option, you may need to use `mkIf`. Consider, for instance:
+
+```nix
+config = if config.services.httpd.enable then {
+  environment.systemPackages = [ ... ];
+  ...
+} else {};
+```
+
+This definition will cause Nix to fail with an "infinite recursion"
+error. Why? Because the value of `config.services.httpd.enable` depends
+on the value being constructed here. After all, you could also write the
+clearly circular and contradictory:
+
+```nix
+config = if config.services.httpd.enable then {
+  services.httpd.enable = false;
+} else {
+  services.httpd.enable = true;
+};
+```
+
+The solution is to write:
+
+```nix
+config = mkIf config.services.httpd.enable {
+  environment.systemPackages = [ ... ];
+  ...
+};
+```
+
+The special function `mkIf` causes the evaluation of the conditional to
+be "pushed down" into the individual definitions, as if you had written:
+
+```nix
+config = {
+  environment.systemPackages = if config.services.httpd.enable then [ ... ] else [];
+  ...
+};
+```
+
+## Setting Priorities {#sec-option-definitions-setting-priorities}
+
+A module can override the definitions of an option in other modules by
+setting an *override priority*. All option definitions that do not have the lowest
+priority value are discarded. By default, option definitions have
+priority 100 and option defaults have priority 1500.
+You can specify an explicit priority by using `mkOverride`, e.g.
+
+```nix
+services.openssh.enable = mkOverride 10 false;
+```
+
+This definition causes all other definitions with priorities above 10 to
+be discarded. The function `mkForce` is equal to `mkOverride 50`, and
+`mkDefault` is equal to `mkOverride 1000`.
+
+## Ordering Definitions {#sec-option-definitions-ordering}
+
+It is also possible to influence the order in which the definitions for an option are
+merged by setting an *order priority* with `mkOrder`. The default order priority is 1000.
+The functions `mkBefore` and `mkAfter` are equal to `mkOrder 500` and `mkOrder 1500`, respectively.
+As an example,
+
+```nix
+hardware.firmware = mkBefore [ myFirmware ];
+```
+
+This definition ensures that `myFirmware` comes before other unordered
+definitions in the final list value of `hardware.firmware`.
+
+Note that this is different from [override priorities](#sec-option-definitions-setting-priorities):
+setting an order does not affect whether the definition is included or not.
+
+## Merging Configurations {#sec-option-definitions-merging}
+
+In conjunction with `mkIf`, it is sometimes useful for a module to
+return multiple sets of option definitions, to be merged together as if
+they were declared in separate modules. This can be done using
+`mkMerge`:
+
+```nix
+config = mkMerge
+  [ # Unconditional stuff.
+    { environment.systemPackages = [ ... ];
+    }
+    # Conditional stuff.
+    (mkIf config.services.bla.enable {
+      environment.systemPackages = [ ... ];
+    })
+  ];
+```
diff --git a/nixpkgs/nixos/doc/manual/development/option-types.section.md b/nixpkgs/nixos/doc/manual/development/option-types.section.md
new file mode 100644
index 000000000000..2ad3d6c4f949
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/option-types.section.md
@@ -0,0 +1,625 @@
+# Options Types {#sec-option-types}
+
+Option types are a way to put constraints on the values a module option
+can take. Types are also responsible of how values are merged in case of
+multiple value definitions.
+
+## Basic types {#sec-option-types-basic}
+
+Basic types are the simplest available types in the module system. Basic
+types include multiple string types that mainly differ in how definition
+merging is handled.
+
+`types.bool`
+
+:   A boolean, its values can be `true` or `false`.
+
+`types.path`
+
+:   A filesystem path is anything that starts with a slash when
+    coerced to a string. Even if derivations can be considered as
+    paths, the more specific `types.package` should be preferred.
+
+`types.pathInStore`
+
+:   A path that is contained in the Nix store. This can be a top-level store
+    path like `pkgs.hello` or a descendant like `"${pkgs.hello}/bin/hello"`.
+
+`types.package`
+
+:   A top-level store path. This can be an attribute set pointing
+    to a store path, like a derivation or a flake input.
+
+`types.enum` *`l`*
+
+:   One element of the list *`l`*, e.g. `types.enum [ "left" "right" ]`.
+    Multiple definitions cannot be merged.
+
+`types.anything`
+
+:   A type that accepts any value and recursively merges attribute sets
+    together. This type is recommended when the option type is unknown.
+
+    ::: {#ex-types-anything .example}
+    ### `types.anything`
+
+    Two definitions of this type like
+
+    ```nix
+    {
+      str = lib.mkDefault "foo";
+      pkg.hello = pkgs.hello;
+      fun.fun = x: x + 1;
+    }
+    ```
+
+    ```nix
+    {
+      str = lib.mkIf true "bar";
+      pkg.gcc = pkgs.gcc;
+      fun.fun = lib.mkForce (x: x + 2);
+    }
+    ```
+
+    will get merged to
+
+    ```nix
+    {
+      str = "bar";
+      pkg.gcc = pkgs.gcc;
+      pkg.hello = pkgs.hello;
+      fun.fun = x: x + 2;
+    }
+    ```
+    :::
+
+`types.raw`
+
+:   A type which doesn't do any checking, merging or nested evaluation. It
+    accepts a single arbitrary value that is not recursed into, making it
+    useful for values coming from outside the module system, such as package
+    sets or arbitrary data. Options of this type are still evaluated according
+    to priorities and conditionals, so `mkForce`, `mkIf` and co. still work on
+    the option value itself, but not for any value nested within it. This type
+    should only be used when checking, merging and nested evaluation are not
+    desirable.
+
+`types.optionType`
+
+:   The type of an option's type. Its merging operation ensures that nested
+    options have the correct file location annotated, and that if possible,
+    multiple option definitions are correctly merged together. The main use
+    case is as the type of the `_module.freeformType` option.
+
+`types.attrs`
+
+:   A free-form attribute set.
+
+    ::: {.warning}
+    This type will be deprecated in the future because it doesn't
+    recurse into attribute sets, silently drops earlier attribute
+    definitions, and doesn't discharge `lib.mkDefault`, `lib.mkIf`
+    and co. For allowing arbitrary attribute sets, prefer
+    `types.attrsOf types.anything` instead which doesn't have these
+    problems.
+    :::
+
+`types.pkgs`
+
+:   A type for the top level Nixpkgs package set.
+
+### Numeric types {#sec-option-types-numeric}
+
+`types.int`
+
+:   A signed integer.
+
+`types.ints.{s8, s16, s32}`
+
+:   Signed integers with a fixed length (8, 16 or 32 bits). They go from
+    −2^n/2 to
+    2^n/2−1 respectively (e.g. `−128` to
+    `127` for 8 bits).
+
+`types.ints.unsigned`
+
+:   An unsigned integer (that is >= 0).
+
+`types.ints.{u8, u16, u32}`
+
+:   Unsigned integers with a fixed length (8, 16 or 32 bits). They go
+    from 0 to 2^n−1 respectively (e.g. `0`
+    to `255` for 8 bits).
+
+`types.ints.between` *`lowest highest`*
+
+:   An integer between *`lowest`* and *`highest`* (both inclusive).
+
+`types.ints.positive`
+
+:   A positive integer (that is > 0).
+
+`types.port`
+
+:   A port number. This type is an alias to
+    `types.ints.u16`.
+
+`types.float`
+
+:   A floating point number.
+
+    ::: {.warning}
+    Converting a floating point number to a string with `toString` or `toJSON`
+    may result in [precision loss](https://github.com/NixOS/nix/issues/5733).
+    :::
+
+`types.number`
+
+:   Either a signed integer or a floating point number. No implicit conversion
+    is done between the two types, and multiple equal definitions will only be
+    merged if they have the same type.
+
+`types.numbers.between` *`lowest highest`*
+
+:   An integer or floating point number between *`lowest`* and *`highest`* (both inclusive).
+
+`types.numbers.nonnegative`
+
+:   A nonnegative integer or floating point number (that is >= 0).
+
+`types.numbers.positive`
+
+:   A positive integer or floating point number (that is > 0).
+
+### String types {#sec-option-types-string}
+
+`types.str`
+
+:   A string. Multiple definitions cannot be merged.
+
+`types.separatedString` *`sep`*
+
+:   A string. Multiple definitions are concatenated with *`sep`*, e.g.
+    `types.separatedString "|"`.
+
+`types.lines`
+
+:   A string. Multiple definitions are concatenated with a new line
+    `"\n"`.
+
+`types.commas`
+
+:   A string. Multiple definitions are concatenated with a comma `","`.
+
+`types.envVar`
+
+:   A string. Multiple definitions are concatenated with a colon `":"`.
+
+`types.strMatching`
+
+:   A string matching a specific regular expression. Multiple
+    definitions cannot be merged. The regular expression is processed
+    using `builtins.match`.
+
+## Submodule types {#sec-option-types-submodule}
+
+Submodules are detailed in [Submodule](#section-option-types-submodule).
+
+`types.submodule` *`o`*
+
+:   A set of sub options *`o`*. *`o`* can be an attribute set, a function
+    returning an attribute set, or a path to a file containing such a
+    value. Submodules are used in composed types to create modular
+    options. This is equivalent to
+    `types.submoduleWith { modules = toList o; shorthandOnlyDefinesConfig = true; }`.
+
+`types.submoduleWith` { *`modules`*, *`specialArgs`* ? {}, *`shorthandOnlyDefinesConfig`* ? false }
+
+:   Like `types.submodule`, but more flexible and with better defaults.
+    It has parameters
+
+    -   *`modules`* A list of modules to use by default for this
+        submodule type. This gets combined with all option definitions
+        to build the final list of modules that will be included.
+
+        ::: {.note}
+        Only options defined with this argument are included in rendered
+        documentation.
+        :::
+
+    -   *`specialArgs`* An attribute set of extra arguments to be passed
+        to the module functions. The option `_module.args` should be
+        used instead for most arguments since it allows overriding.
+        *`specialArgs`* should only be used for arguments that can't go
+        through the module fixed-point, because of infinite recursion or
+        other problems. An example is overriding the `lib` argument,
+        because `lib` itself is used to define `_module.args`, which
+        makes using `_module.args` to define it impossible.
+
+    -   *`shorthandOnlyDefinesConfig`* Whether definitions of this type
+        should default to the `config` section of a module (see
+        [Example: Structure of NixOS Modules](#ex-module-syntax))
+        if it is an attribute set. Enabling this only has a benefit
+        when the submodule defines an option named `config` or `options`.
+        In such a case it would allow the option to be set with
+        `the-submodule.config = "value"` instead of requiring
+        `the-submodule.config.config = "value"`. This is because
+        only when modules *don't* set the `config` or `options`
+        keys, all keys are interpreted as option definitions in the
+        `config` section. Enabling this option implicitly puts all
+        attributes in the `config` section.
+
+        With this option enabled, defining a non-`config` section
+        requires using a function:
+        `the-submodule = { ... }: { options = { ... }; }`.
+
+`types.deferredModule`
+
+:   Whereas `submodule` represents an option tree, `deferredModule` represents
+    a module value, such as a module file or a configuration.
+
+    It can be set multiple times.
+
+    Module authors can use its value in `imports`, in `submoduleWith`'s `modules`
+    or in `evalModules`' `modules` parameter, among other places.
+
+    Note that `imports` must be evaluated before the module fixpoint. Because
+    of this, deferred modules can only be imported into "other" fixpoints, such
+    as submodules.
+
+    One use case for this type is the type of a "default" module that allow the
+    user to affect all submodules in an `attrsOf submodule` at once. This is
+    more convenient and discoverable than expecting the module user to
+    type-merge with the `attrsOf submodule` option.
+
+## Composed types {#sec-option-types-composed}
+
+Composed types are types that take a type as parameter. `listOf
+   int` and `either int str` are examples of composed types.
+
+`types.listOf` *`t`*
+
+:   A list of *`t`* type, e.g. `types.listOf
+          int`. Multiple definitions are merged with list concatenation.
+
+`types.attrsOf` *`t`*
+
+:   An attribute set of where all the values are of *`t`* type. Multiple
+    definitions result in the joined attribute set.
+
+    ::: {.note}
+    This type is *strict* in its values, which in turn means attributes
+    cannot depend on other attributes. See `
+           types.lazyAttrsOf` for a lazy version.
+    :::
+
+`types.lazyAttrsOf` *`t`*
+
+:   An attribute set of where all the values are of *`t`* type. Multiple
+    definitions result in the joined attribute set. This is the lazy
+    version of `types.attrsOf
+          `, allowing attributes to depend on each other.
+
+    ::: {.warning}
+    This version does not fully support conditional definitions! With an
+    option `foo` of this type and a definition
+    `foo.attr = lib.mkIf false 10`, evaluating `foo ? attr` will return
+    `true` even though it should be false. Accessing the value will then
+    throw an error. For types *`t`* that have an `emptyValue` defined,
+    that value will be returned instead of throwing an error. So if the
+    type of `foo.attr` was `lazyAttrsOf (nullOr int)`, `null` would be
+    returned instead for the same `mkIf false` definition.
+    :::
+
+`types.nullOr` *`t`*
+
+:   `null` or type *`t`*. Multiple definitions are merged according to
+    type *`t`*.
+
+`types.uniq` *`t`*
+
+:   Ensures that type *`t`* cannot be merged. It is used to ensure option
+    definitions are declared only once.
+
+`types.unique` `{ message = m }` *`t`*
+
+:   Ensures that type *`t`* cannot be merged. Prints the message *`m`*, after
+    the line `The option <option path> is defined multiple times.` and before
+    a list of definition locations.
+
+`types.either` *`t1 t2`*
+
+:   Type *`t1`* or type *`t2`*, e.g. `with types; either int str`.
+    Multiple definitions cannot be merged.
+
+`types.oneOf` \[ *`t1 t2`* ... \]
+
+:   Type *`t1`* or type *`t2`* and so forth, e.g.
+    `with types; oneOf [ int str bool ]`. Multiple definitions cannot be
+    merged.
+
+`types.coercedTo` *`from f to`*
+
+:   Type *`to`* or type *`from`* which will be coerced to type *`to`* using
+    function *`f`* which takes an argument of type *`from`* and return a
+    value of type *`to`*. Can be used to preserve backwards compatibility
+    of an option if its type was changed.
+
+## Submodule {#section-option-types-submodule}
+
+`submodule` is a very powerful type that defines a set of sub-options
+that are handled like a separate module.
+
+It takes a parameter *`o`*, that should be a set, or a function returning
+a set with an `options` key defining the sub-options. Submodule option
+definitions are type-checked accordingly to the `options` declarations.
+Of course, you can nest submodule option definitions for even higher
+modularity.
+
+The option set can be defined directly
+([Example: Directly defined submodule](#ex-submodule-direct)) or as reference
+([Example: Submodule defined as a reference](#ex-submodule-reference)).
+
+Note that even if your submodule’s options all have a default value,
+you will still need to provide a default value (e.g. an empty attribute set)
+if you want to allow users to leave it undefined.
+
+::: {#ex-submodule-direct .example}
+### Directly defined submodule
+```nix
+options.mod = mkOption {
+  description = "submodule example";
+  type = with types; submodule {
+    options = {
+      foo = mkOption {
+        type = int;
+      };
+      bar = mkOption {
+        type = str;
+      };
+    };
+  };
+};
+```
+:::
+
+::: {#ex-submodule-reference .example}
+### Submodule defined as a reference
+```nix
+let
+  modOptions = {
+    options = {
+      foo = mkOption {
+        type = int;
+      };
+      bar = mkOption {
+        type = int;
+      };
+    };
+  };
+in
+options.mod = mkOption {
+  description = "submodule example";
+  type = with types; submodule modOptions;
+};
+```
+:::
+
+The `submodule` type is especially interesting when used with composed
+types like `attrsOf` or `listOf`. When composed with `listOf`
+([Example: Declaration of a list of submodules](#ex-submodule-listof-declaration)), `submodule` allows
+multiple definitions of the submodule option set
+([Example: Definition of a list of submodules](#ex-submodule-listof-definition)).
+
+::: {#ex-submodule-listof-declaration .example}
+### Declaration of a list of submodules
+```nix
+options.mod = mkOption {
+  description = "submodule example";
+  type = with types; listOf (submodule {
+    options = {
+      foo = mkOption {
+        type = int;
+      };
+      bar = mkOption {
+        type = str;
+      };
+    };
+  });
+};
+```
+:::
+
+::: {#ex-submodule-listof-definition .example}
+### Definition of a list of submodules
+```nix
+config.mod = [
+  { foo = 1; bar = "one"; }
+  { foo = 2; bar = "two"; }
+];
+```
+:::
+
+When composed with `attrsOf`
+([Example: Declaration of attribute sets of submodules](#ex-submodule-attrsof-declaration)), `submodule` allows
+multiple named definitions of the submodule option set
+([Example: Definition of attribute sets of submodules](#ex-submodule-attrsof-definition)).
+
+::: {#ex-submodule-attrsof-declaration .example}
+### Declaration of attribute sets of submodules
+```nix
+options.mod = mkOption {
+  description = "submodule example";
+  type = with types; attrsOf (submodule {
+    options = {
+      foo = mkOption {
+        type = int;
+      };
+      bar = mkOption {
+        type = str;
+      };
+    };
+  });
+};
+```
+:::
+
+::: {#ex-submodule-attrsof-definition .example}
+### Definition of attribute sets of submodules
+```nix
+config.mod.one = { foo = 1; bar = "one"; };
+config.mod.two = { foo = 2; bar = "two"; };
+```
+:::
+
+## Extending types {#sec-option-types-extending}
+
+Types are mainly characterized by their `check` and `merge` functions.
+
+`check`
+
+:   The function to type check the value. Takes a value as parameter and
+    return a boolean. It is possible to extend a type check with the
+    `addCheck` function ([Example: Adding a type check](#ex-extending-type-check-1)),
+    or to fully override the check function
+    ([Example: Overriding a type check](#ex-extending-type-check-2)).
+
+    ::: {#ex-extending-type-check-1 .example}
+    ### Adding a type check
+
+    ```nix
+    byte = mkOption {
+      description = "An integer between 0 and 255.";
+      type = types.addCheck types.int (x: x >= 0 && x <= 255);
+    };
+    ```
+    :::
+
+    ::: {#ex-extending-type-check-2 .example}
+    ### Overriding a type check
+
+    ```nix
+    nixThings = mkOption {
+      description = "words that start with 'nix'";
+      type = types.str // {
+        check = (x: lib.hasPrefix "nix" x)
+      };
+    };
+    ```
+    :::
+
+`merge`
+
+:   Function to merge the options values when multiple values are set.
+    The function takes two parameters, `loc` the option path as a list
+    of strings, and `defs` the list of defined values as a list. It is
+    possible to override a type merge function for custom needs.
+
+## Custom types {#sec-option-types-custom}
+
+Custom types can be created with the `mkOptionType` function. As type
+creation includes some more complex topics such as submodule handling,
+it is recommended to get familiar with `types.nix` code before creating
+a new type.
+
+The only required parameter is `name`.
+
+`name`
+
+:   A string representation of the type function name.
+
+`description`
+
+:   Description of the type used in documentation. Give information of
+    the type and any of its arguments.
+
+`check`
+
+:   A function to type check the definition value. Takes the definition
+    value as a parameter and returns a boolean indicating the type check
+    result, `true` for success and `false` for failure.
+
+`merge`
+
+:   A function to merge multiple definitions values. Takes two
+    parameters:
+
+    *`loc`*
+
+    :   The option path as a list of strings, e.g. `["boot" "loader
+                 "grub" "enable"]`.
+
+    *`defs`*
+
+    :   The list of sets of defined `value` and `file` where the value
+        was defined, e.g. `[ {
+                 file = "/foo.nix"; value = 1; } { file = "/bar.nix"; value = 2 }
+                 ]`. The `merge` function should return the merged value
+        or throw an error in case the values are impossible or not meant
+        to be merged.
+
+`getSubOptions`
+
+:   For composed types that can take a submodule as type parameter, this
+    function generate sub-options documentation. It takes the current
+    option prefix as a list and return the set of sub-options. Usually
+    defined in a recursive manner by adding a term to the prefix, e.g.
+    `prefix:
+          elemType.getSubOptions (prefix ++
+          ["prefix"])` where *`"prefix"`* is the newly added prefix.
+
+`getSubModules`
+
+:   For composed types that can take a submodule as type parameter, this
+    function should return the type parameters submodules. If the type
+    parameter is called `elemType`, the function should just recursively
+    look into submodules by returning `elemType.getSubModules;`.
+
+`substSubModules`
+
+:   For composed types that can take a submodule as type parameter, this
+    function can be used to substitute the parameter of a submodule
+    type. It takes a module as parameter and return the type with the
+    submodule options substituted. It is usually defined as a type
+    function call with a recursive call to `substSubModules`, e.g for a
+    type `composedType` that take an `elemtype` type parameter, this
+    function should be defined as `m:
+          composedType (elemType.substSubModules m)`.
+
+`typeMerge`
+
+:   A function to merge multiple type declarations. Takes the type to
+    merge `functor` as parameter. A `null` return value means that type
+    cannot be merged.
+
+    *`f`*
+
+    :   The type to merge `functor`.
+
+    Note: There is a generic `defaultTypeMerge` that work with most of
+    value and composed types.
+
+`functor`
+
+:   An attribute set representing the type. It is used for type
+    operations and has the following keys:
+
+    `type`
+
+    :   The type function.
+
+    `wrapped`
+
+    :   Holds the type parameter for composed types.
+
+    `payload`
+
+    :   Holds the value parameter for value types. The types that have a
+        `payload` are the `enum`, `separatedString` and `submodule`
+        types.
+
+    `binOp`
+
+    :   A binary operation that can merge the payloads of two same
+        types. Defined as a function that take two payloads as
+        parameters and return the payloads merged.
diff --git a/nixpkgs/nixos/doc/manual/development/replace-modules.section.md b/nixpkgs/nixos/doc/manual/development/replace-modules.section.md
new file mode 100644
index 000000000000..ac9f5adbaf98
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/replace-modules.section.md
@@ -0,0 +1,71 @@
+# Replace Modules {#sec-replace-modules}
+
+Modules that are imported can also be disabled. The option declarations,
+config implementation and the imports of a disabled module will be
+ignored, allowing another to take its place. This can be used to
+import a set of modules from another channel while keeping the rest of
+the system on a stable release.
+
+`disabledModules` is a top level attribute like `imports`, `options` and
+`config`. It contains a list of modules that will be disabled. This can
+either be:
+ - the full path to the module,
+ - or a string with the filename relative to the modules path (eg. \<nixpkgs/nixos/modules> for nixos),
+ - or an attribute set containing a specific `key` attribute.
+
+The latter allows some modules to be disabled, despite them being distributed
+via attributes instead of file paths. The `key` should be globally unique, so
+it is recommended to include a file path in it, or rely on a framework to do it
+for you.
+
+This example will replace the existing postgresql module with the
+version defined in the nixos-unstable channel while keeping the rest of
+the modules and packages from the original nixos channel. This only
+overrides the module definition, this won't use postgresql from
+nixos-unstable unless explicitly configured to do so.
+
+```nix
+{ config, lib, pkgs, ... }:
+
+{
+  disabledModules = [ "services/databases/postgresql.nix" ];
+
+  imports =
+    [ # Use postgresql service from nixos-unstable channel.
+      # sudo nix-channel --add https://nixos.org/channels/nixos-unstable nixos-unstable
+      <nixos-unstable/nixos/modules/services/databases/postgresql.nix>
+    ];
+
+  services.postgresql.enable = true;
+}
+```
+
+This example shows how to define a custom module as a replacement for an
+existing module. Importing this module will disable the original module
+without having to know its implementation details.
+
+```nix
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.man;
+in
+
+{
+  disabledModules = [ "services/programs/man.nix" ];
+
+  options = {
+    programs.man.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = "Whether to enable manual pages.";
+    };
+  };
+
+  config = mkIf cfg.enabled {
+    warnings = [ "disabled manpages for production deployments." ];
+  };
+}
+```
diff --git a/nixpkgs/nixos/doc/manual/development/running-nixos-tests-interactively.section.md b/nixpkgs/nixos/doc/manual/development/running-nixos-tests-interactively.section.md
new file mode 100644
index 000000000000..4b8385d7e0d9
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/running-nixos-tests-interactively.section.md
@@ -0,0 +1,103 @@
+# Running Tests interactively {#sec-running-nixos-tests-interactively}
+
+The test itself can be run interactively. This is particularly useful
+when developing or debugging a test:
+
+```ShellSession
+$ nix-build . -A nixosTests.login.driverInteractive
+$ ./result/bin/nixos-test-driver
+[...]
+>>>
+```
+
+You can then take any Python statement, e.g.
+
+```py
+>>> start_all()
+>>> test_script()
+>>> machine.succeed("touch /tmp/foo")
+>>> print(machine.succeed("pwd")) # Show stdout of command
+```
+
+The function `test_script` executes the entire test script and drops you
+back into the test driver command line upon its completion. This allows
+you to inspect the state of the VMs after the test (e.g. to debug the
+test script).
+
+## Shell access in interactive mode {#sec-nixos-test-shell-access}
+
+The function `<yourmachine>.shell_interact()` grants access to a shell running
+inside a virtual machine. To use it, replace `<yourmachine>` with the name of a
+virtual machine defined in the test, for example: `machine.shell_interact()`.
+Keep in mind that this shell may not display everything correctly as it is
+running within an interactive Python REPL, and logging output from the virtual
+machine may overwrite input and output from the guest shell:
+
+```py
+>>> machine.shell_interact()
+machine: Terminal is ready (there is no initial prompt):
+$ hostname
+machine
+```
+
+As an alternative, you can proxy the guest shell to a local TCP server by first
+starting a TCP server in a terminal using the command:
+
+```ShellSession
+$ socat 'READLINE,PROMPT=$ ' tcp-listen:4444,reuseaddr`
+```
+
+In the terminal where the test driver is running, connect to this server by
+using:
+
+```py
+>>> machine.shell_interact("tcp:127.0.0.1:4444")
+```
+
+Once the connection is established, you can enter commands in the socat terminal
+where socat is running.
+
+## Port forwarding to NixOS test VMs {#sec-nixos-test-port-forwarding}
+
+If your test has only a single VM, you may use e.g.
+
+```ShellSession
+$ QEMU_NET_OPTS="hostfwd=tcp:127.0.0.1:2222-:22" ./result/bin/nixos-test-driver
+```
+
+to port-forward a port in the VM (here `22`) to the host machine (here port `2222`).
+
+This naturally does not work when multiple machines are involved,
+since a single port on the host cannot forward to multiple VMs.
+
+If the test defines multiple machines, you may opt to _temporarily_ set
+`virtualisation.forwardPorts` in the test definition for debugging.
+
+Such port forwardings connect via the VM's virtual network interface.
+Thus they cannot connect to ports that are only bound to the VM's
+loopback interface (`127.0.0.1`), and the VM's NixOS firewall
+must be configured to allow these connections.
+
+## Reuse VM state {#sec-nixos-test-reuse-vm-state}
+
+You can re-use the VM states coming from a previous run by setting the
+`--keep-vm-state` flag.
+
+```ShellSession
+$ ./result/bin/nixos-test-driver --keep-vm-state
+```
+
+The machine state is stored in the `$TMPDIR/vm-state-machinename`
+directory.
+
+## Interactive-only test configuration {#sec-nixos-test-interactive-configuration}
+
+The `.driverInteractive` attribute combines the regular test configuration with
+definitions from the [`interactive` submodule](#test-opt-interactive). This gives you
+a more usable, graphical, but slightly different configuration.
+
+You can add your own interactive-only test configuration by adding extra
+configuration to the [`interactive` submodule](#test-opt-interactive).
+
+To interactively run only the regular configuration, build the `<test>.driver` attribute
+instead, and call it with the flag `result/bin/nixos-test-driver --interactive`.
diff --git a/nixpkgs/nixos/doc/manual/development/running-nixos-tests.section.md b/nixpkgs/nixos/doc/manual/development/running-nixos-tests.section.md
new file mode 100644
index 000000000000..33076f5dc2a7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/running-nixos-tests.section.md
@@ -0,0 +1,20 @@
+# Running Tests {#sec-running-nixos-tests}
+
+You can run tests using `nix-build`. For example, to run the test
+[`login.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/login.nix),
+you do:
+
+```ShellSession
+$ cd /my/git/clone/of/nixpkgs
+$ nix-build -A nixosTests.login
+```
+
+After building/downloading all required dependencies, this will perform
+a build that starts a QEMU/KVM virtual machine containing a NixOS
+system. The virtual machine mounts the Nix store of the host; this makes
+VM creation very fast, as no disk image needs to be created. Afterwards,
+you can view a log of the test:
+
+```ShellSession
+$ nix-store --read-log result
+```
diff --git a/nixpkgs/nixos/doc/manual/development/settings-options.section.md b/nixpkgs/nixos/doc/manual/development/settings-options.section.md
new file mode 100644
index 000000000000..3a4800742b04
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/settings-options.section.md
@@ -0,0 +1,247 @@
+# Options for Program Settings {#sec-settings-options}
+
+Many programs have configuration files where program-specific settings
+can be declared. File formats can be separated into two categories:
+
+-   Nix-representable ones: These can trivially be mapped to a subset of
+    Nix syntax. E.g. JSON is an example, since its values like
+    `{"foo":{"bar":10}}` can be mapped directly to Nix:
+    `{ foo = { bar = 10; }; }`. Other examples are INI, YAML and TOML.
+    The following section explains the convention for these settings.
+
+-   Non-nix-representable ones: These can't be trivially mapped to a
+    subset of Nix syntax. Most generic programming languages are in this
+    group, e.g. bash, since the statement `if true; then echo hi; fi`
+    doesn't have a trivial representation in Nix.
+
+    Currently there are no fixed conventions for these, but it is common
+    to have a `configFile` option for setting the configuration file
+    path directly. The default value of `configFile` can be an
+    auto-generated file, with convenient options for controlling the
+    contents. For example an option of type `attrsOf str` can be used
+    for representing environment variables which generates a section
+    like `export FOO="foo"`. Often it can also be useful to also include
+    an `extraConfig` option of type `lines` to allow arbitrary text
+    after the autogenerated part of the file.
+
+## Nix-representable Formats (JSON, YAML, TOML, INI, ...) {#sec-settings-nix-representable}
+
+By convention, formats like this are handled with a generic `settings`
+option, representing the full program configuration as a Nix value. The
+type of this option should represent the format. The most common formats
+have a predefined type and string generator already declared under
+`pkgs.formats`:
+
+`pkgs.formats.javaProperties` { *`comment`* ? `"Generated with Nix"` }
+
+:   A function taking an attribute set with values
+
+    `comment`
+
+    :   A string to put at the start of the
+        file in a comment. It can have multiple
+        lines.
+
+    It returns the `type`: `attrsOf str` and a function
+    `generate` to build a Java `.properties` file, taking
+    care of the correct escaping, etc.
+
+`pkgs.formats.json` { }
+
+:   A function taking an empty attribute set (for future extensibility)
+    and returning a set with JSON-specific attributes `type` and
+    `generate` as specified [below](#pkgs-formats-result).
+
+`pkgs.formats.yaml` { }
+
+:   A function taking an empty attribute set (for future extensibility)
+    and returning a set with YAML-specific attributes `type` and
+    `generate` as specified [below](#pkgs-formats-result).
+
+`pkgs.formats.ini` { *`listsAsDuplicateKeys`* ? false, *`listToValue`* ? null, \.\.\. }
+
+:   A function taking an attribute set with values
+
+    `listsAsDuplicateKeys`
+
+    :   A boolean for controlling whether list values can be used to
+        represent duplicate INI keys
+
+    `listToValue`
+
+    :   A function for turning a list of values into a single value.
+
+    It returns a set with INI-specific attributes `type` and `generate`
+    as specified [below](#pkgs-formats-result).
+
+`pkgs.formats.toml` { }
+
+:   A function taking an empty attribute set (for future extensibility)
+    and returning a set with TOML-specific attributes `type` and
+    `generate` as specified [below](#pkgs-formats-result).
+
+`pkgs.formats.elixirConf { elixir ? pkgs.elixir }`
+
+:   A function taking an attribute set with values
+
+    `elixir`
+
+    :   The Elixir package which will be used to format the generated output
+
+    It returns a set with Elixir-Config-specific attributes `type`, `lib`, and
+    `generate` as specified [below](#pkgs-formats-result).
+
+    The `lib` attribute contains functions to be used in settings, for
+    generating special Elixir values:
+
+    `mkRaw elixirCode`
+
+    :   Outputs the given string as raw Elixir code
+
+    `mkGetEnv { envVariable, fallback ? null }`
+
+    :   Makes the configuration fetch an environment variable at runtime
+
+    `mkAtom atom`
+
+    :   Outputs the given string as an Elixir atom, instead of the default
+        Elixir binary string. Note: lowercase atoms still needs to be prefixed
+        with `:`
+
+    `mkTuple array`
+
+    :   Outputs the given array as an Elixir tuple, instead of the default
+        Elixir list
+
+    `mkMap attrset`
+
+    :   Outputs the given attribute set as an Elixir map, instead of the
+        default Elixir keyword list
+
+
+[]{#pkgs-formats-result}
+These functions all return an attribute set with these values:
+
+`type`
+
+:   A module system type representing a value of the format
+
+`lib`
+
+:   Utility functions for convenience, or special interactions with the format.
+    This attribute is optional. It may contain inside a `types` attribute
+    containing types specific to this format.
+
+`generate` *`filename jsonValue`*
+
+:   A function that can render a value of the format to a file. Returns
+    a file path.
+
+    ::: {.note}
+    This function puts the value contents in the Nix store. So this
+    should be avoided for secrets.
+    :::
+
+::: {#ex-settings-nix-representable .example}
+### Module with conventional `settings` option
+
+The following shows a module for an example program that uses a JSON
+configuration file. It demonstrates how above values can be used, along
+with some other related best practices. See the comments for
+explanations.
+
+```nix
+{ options, config, lib, pkgs, ... }:
+let
+  cfg = config.services.foo;
+  # Define the settings format used for this program
+  settingsFormat = pkgs.formats.json {};
+in {
+
+  options.services.foo = {
+    enable = lib.mkEnableOption "foo service";
+
+    settings = lib.mkOption {
+      # Setting this type allows for correct merging behavior
+      type = settingsFormat.type;
+      default = {};
+      description = ''
+        Configuration for foo, see
+        <link xlink:href="https://example.com/docs/foo"/>
+        for supported settings.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # We can assign some default settings here to make the service work by just
+    # enabling it. We use `mkDefault` for values that can be changed without
+    # problems
+    services.foo.settings = {
+      # Fails at runtime without any value set
+      log_level = lib.mkDefault "WARN";
+
+      # We assume systemd's `StateDirectory` is used, so we require this value,
+      # therefore no mkDefault
+      data_path = "/var/lib/foo";
+
+      # Since we use this to create a user we need to know the default value at
+      # eval time
+      user = lib.mkDefault "foo";
+    };
+
+    environment.etc."foo.json".source =
+      # The formats generator function takes a filename and the Nix value
+      # representing the format value and produces a filepath with that value
+      # rendered in the format
+      settingsFormat.generate "foo-config.json" cfg.settings;
+
+    # We know that the `user` attribute exists because we set a default value
+    # for it above, allowing us to use it without worries here
+    users.users.${cfg.settings.user} = { isSystemUser = true; };
+
+    # ...
+  };
+}
+```
+:::
+
+### Option declarations for attributes {#sec-settings-attrs-options}
+
+Some `settings` attributes may deserve some extra care. They may need a
+different type, default or merging behavior, or they are essential
+options that should show their documentation in the manual. This can be
+done using [](#sec-freeform-modules).
+
+We extend above example using freeform modules to declare an option for
+the port, which will enforce it to be a valid integer and make it show
+up in the manual.
+
+::: {#ex-settings-typed-attrs .example}
+### Declaring a type-checked `settings` attribute
+```nix
+settings = lib.mkOption {
+  type = lib.types.submodule {
+
+    freeformType = settingsFormat.type;
+
+    # Declare an option for the port such that the type is checked and this option
+    # is shown in the manual.
+    options.port = lib.mkOption {
+      type = lib.types.port;
+      default = 8080;
+      description = ''
+        Which port this service should listen on.
+      '';
+    };
+
+  };
+  default = {};
+  description = ''
+    Configuration for Foo, see
+    <link xlink:href="https://example.com/docs/foo"/>
+    for supported values.
+  '';
+};
+```
+:::
diff --git a/nixpkgs/nixos/doc/manual/development/sources.chapter.md b/nixpkgs/nixos/doc/manual/development/sources.chapter.md
new file mode 100644
index 000000000000..88173f7135bd
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/sources.chapter.md
@@ -0,0 +1,77 @@
+# Getting the Sources {#sec-getting-sources}
+
+By default, NixOS's `nixos-rebuild` command uses the NixOS and Nixpkgs
+sources provided by the `nixos` channel (kept in
+`/nix/var/nix/profiles/per-user/root/channels/nixos`). To modify NixOS,
+however, you should check out the latest sources from Git. This is as
+follows:
+
+```ShellSession
+$ git clone https://github.com/NixOS/nixpkgs
+$ cd nixpkgs
+$ git remote update origin
+```
+
+This will check out the latest Nixpkgs sources to `./nixpkgs` the NixOS
+sources to `./nixpkgs/nixos`. (The NixOS source tree lives in a
+subdirectory of the Nixpkgs repository.) The `nixpkgs` repository has
+branches that correspond to each Nixpkgs/NixOS channel (see
+[](#sec-upgrading) for more information about channels). Thus, the
+Git branch `origin/nixos-17.03` will contain the latest built and tested
+version available in the `nixos-17.03` channel.
+
+It's often inconvenient to develop directly on the master branch, since
+if somebody has just committed (say) a change to GCC, then the binary
+cache may not have caught up yet and you'll have to rebuild everything
+from source. So you may want to create a local branch based on your
+current NixOS version:
+
+```ShellSession
+$ nixos-version
+17.09pre104379.6e0b727 (Hummingbird)
+
+$ git checkout -b local 6e0b727
+```
+
+Or, to base your local branch on the latest version available in a NixOS
+channel:
+
+```ShellSession
+$ git remote update origin
+$ git checkout -b local origin/nixos-17.03
+```
+
+(Replace `nixos-17.03` with the name of the channel you want to use.)
+You can use `git merge` or `git
+  rebase` to keep your local branch in sync with the channel, e.g.
+
+```ShellSession
+$ git remote update origin
+$ git merge origin/nixos-17.03
+```
+
+You can use `git cherry-pick` to copy commits from your local branch to
+the upstream branch.
+
+If you want to rebuild your system using your (modified) sources, you
+need to tell `nixos-rebuild` about them using the `-I` flag:
+
+```ShellSession
+# nixos-rebuild switch -I nixpkgs=/my/sources/nixpkgs
+```
+
+If you want `nix-env` to use the expressions in `/my/sources`, use
+`nix-env -f
+  /my/sources/nixpkgs`, or change the default by adding a symlink in
+`~/.nix-defexpr`:
+
+```ShellSession
+$ ln -s /my/sources/nixpkgs ~/.nix-defexpr/nixpkgs
+```
+
+You may want to delete the symlink `~/.nix-defexpr/channels_root` to
+prevent root's NixOS channel from clashing with your own tree (this may
+break the command-not-found utility though). If you want to go back to
+the default state, you may just remove the `~/.nix-defexpr` directory
+completely, log out and log in again and it should have been recreated
+with a link to the root channels.
diff --git a/nixpkgs/nixos/doc/manual/development/testing-installer.chapter.md b/nixpkgs/nixos/doc/manual/development/testing-installer.chapter.md
new file mode 100644
index 000000000000..2eaa01614920
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/testing-installer.chapter.md
@@ -0,0 +1,18 @@
+# Testing the Installer {#ch-testing-installer}
+
+Building, burning, and booting from an installation CD is rather
+tedious, so here is a quick way to see if the installer works properly:
+
+```ShellSession
+# mount -t tmpfs none /mnt
+# nixos-generate-config --root /mnt
+$ nix-build '<nixpkgs/nixos>' -A config.system.build.nixos-install
+# ./result/bin/nixos-install
+```
+
+To start a login shell in the new NixOS installation in `/mnt`:
+
+```ShellSession
+$ nix-build '<nixpkgs/nixos>' -A config.system.build.nixos-enter
+# ./result/bin/nixos-enter
+```
diff --git a/nixpkgs/nixos/doc/manual/development/unit-handling.section.md b/nixpkgs/nixos/doc/manual/development/unit-handling.section.md
new file mode 100644
index 000000000000..32d44dbfff05
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/unit-handling.section.md
@@ -0,0 +1,65 @@
+# Unit handling {#sec-unit-handling}
+
+To figure out what units need to be started/stopped/restarted/reloaded, the
+script first checks the current state of the system, similar to what `systemctl
+list-units` shows. For each of the units, the script goes through the following
+checks:
+
+- Is the unit file still in the new system? If not, **stop** the service unless
+  it sets `X-StopOnRemoval` in the `[Unit]` section to `false`.
+
+- Is it a `.target` unit? If so, **start** it unless it sets
+  `RefuseManualStart` in the `[Unit]` section to `true` or `X-OnlyManualStart`
+  in the `[Unit]` section to `true`. Also **stop** the unit again unless it
+  sets `X-StopOnReconfiguration` to `false`.
+
+- Are the contents of the unit files different? They are compared by parsing
+  them and comparing their contents. If they are different but only
+  `X-Reload-Triggers` in the `[Unit]` section is changed, **reload** the unit.
+  The NixOS module system allows setting these triggers with the option
+  [systemd.services.\<name\>.reloadTriggers](#opt-systemd.services). There are
+  some additional keys in the `[Unit]` section that are ignored as well. If the
+  unit files differ in any way, the following actions are performed:
+
+  - `.path` and `.slice` units are ignored. There is no need to restart them
+    since changes in their values are applied by systemd when systemd is
+    reloaded.
+
+  - `.mount` units are **reload**ed if only their `Options` changed. If anything
+    else changed (like `What`), they are **restart**ed unless they are the mount
+    unit for `/` or `/nix` in which case they are reloaded to prevent the system
+    from crashing. Note that this is the case for `.mount` units and not for
+    mounts from `/etc/fstab`. These are explained in [](#sec-switching-systems).
+
+  - `.socket` units are currently ignored. This is to be fixed at a later
+    point.
+
+  - The rest of the units (mostly `.service` units) are then **reload**ed if
+    `X-ReloadIfChanged` in the `[Service]` section is set to `true` (exposed
+    via [systemd.services.\<name\>.reloadIfChanged](#opt-systemd.services)).
+    A little exception is done for units that were deactivated in the meantime,
+    for example because they require a unit that got stopped before. These
+    are **start**ed instead of reloaded.
+
+  - If the reload flag is not set, some more flags decide if the unit is
+    skipped. These flags are `X-RestartIfChanged` in the `[Service]` section
+    (exposed via
+    [systemd.services.\<name\>.restartIfChanged](#opt-systemd.services)),
+    `RefuseManualStop` in the `[Unit]` section, and `X-OnlyManualStart` in the
+    `[Unit]` section.
+
+  - Further behavior depends on the unit having `X-StopIfChanged` in the
+    `[Service]` section set to `true` (exposed via
+    [systemd.services.\<name\>.stopIfChanged](#opt-systemd.services)). This is
+    set to `true` by default and must be explicitly turned off if not wanted.
+    If the flag is enabled, the unit is **stop**ped and then **start**ed. If
+    not, the unit is **restart**ed. The goal of the flag is to make sure that
+    the new unit never runs in the old environment which is still in place
+    before the activation script is run. This behavior is different when the
+    service is socket-activated, as outlined in the following steps.
+
+  - The last thing that is taken into account is whether the unit is a service
+    and socket-activated. If `X-StopIfChanged` is **not** set, the service
+    is **restart**ed with the others. If it is set, both the service and the
+    socket are **stop**ped and the socket is **start**ed, leaving socket
+    activation to start the service when it's needed.
diff --git a/nixpkgs/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md b/nixpkgs/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
new file mode 100644
index 000000000000..ccadb819e061
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/what-happens-during-a-system-switch.chapter.md
@@ -0,0 +1,59 @@
+# What happens during a system switch? {#sec-switching-systems}
+
+Running `nixos-rebuild switch` is one of the more common tasks under NixOS.
+This chapter explains some of the internals of this command to make it simpler
+for new module developers to configure their units correctly and to make it
+easier to understand what is happening and why for curious administrators.
+
+`nixos-rebuild`, like many deployment solutions, calls `switch-to-configuration`
+which resides in a NixOS system at `$out/bin/switch-to-configuration`. The
+script is called with the action that is to be performed like `switch`, `test`,
+`boot`. There is also the `dry-activate` action which does not really perform
+the actions but rather prints what it would do if you called it with `test`.
+This feature can be used to check what service states would be changed if the
+configuration was switched to.
+
+If the action is `switch` or `boot`, the bootloader is updated first so the
+configuration will be the next one to boot. Unless `NIXOS_NO_SYNC` is set to
+`1`, `/nix/store` is synced to disk.
+
+If the action is `switch` or `test`, the currently running system is inspected
+and the actions to switch to the new system are calculated. This process takes
+two data sources into account: `/etc/fstab` and the current systemd status.
+Mounts and swaps are read from `/etc/fstab` and the corresponding actions are
+generated. If the options of a mount are modified, for example, the proper `.mount`
+unit is reloaded (or restarted if anything else changed and it's neither the root
+mount or the nix store). The current systemd state is inspected, the difference
+between the current system and the desired configuration is calculated and
+actions are generated to get to this state. There are a lot of nuances that can
+be controlled by the units which are explained here.
+
+After calculating what should be done, the actions are carried out. The order
+of actions is always the same:
+- Stop units (`systemctl stop`)
+- Run activation script (`$out/activate`)
+- See if the activation script requested more units to restart
+- Restart systemd if needed (`systemd daemon-reexec`)
+- Forget about the failed state of units (`systemctl reset-failed`)
+- Reload systemd (`systemctl daemon-reload`)
+- Reload systemd user instances (`systemctl --user daemon-reload`)
+- Set up tmpfiles (`systemd-tmpfiles --create`)
+- Reload units (`systemctl reload`)
+- Restart units (`systemctl restart`)
+- Start units (`systemctl start`)
+- Inspect what changed during these actions and print units that failed and
+  that were newly started
+
+By default, some units are filtered from the outputs to make it less spammy.
+This can be disabled for development or testing by setting the environment variable
+`STC_DISPLAY_ALL_UNITS=1`
+
+Most of these actions are either self-explaining but some of them have to do
+with our units or the activation script. For this reason, these topics are
+explained in the next sections.
+
+```{=include=} sections
+unit-handling.section.md
+activation-script.section.md
+non-switchable-systems.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/development/writing-documentation.chapter.md b/nixpkgs/nixos/doc/manual/development/writing-documentation.chapter.md
new file mode 100644
index 000000000000..3d9bd318cf33
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/writing-documentation.chapter.md
@@ -0,0 +1,93 @@
+# Writing NixOS Documentation {#sec-writing-documentation}
+
+As NixOS grows, so too does the need for a catalogue and explanation of
+its extensive functionality. Collecting pertinent information from
+disparate sources and presenting it in an accessible style would be a
+worthy contribution to the project.
+
+## Building the Manual {#sec-writing-docs-building-the-manual}
+
+The DocBook sources of the [](#book-nixos-manual) are in the
+[`nixos/doc/manual`](https://github.com/NixOS/nixpkgs/tree/master/nixos/doc/manual)
+subdirectory of the Nixpkgs repository.
+
+You can quickly validate your edits with `make`:
+
+```ShellSession
+$ cd /path/to/nixpkgs/nixos/doc/manual
+$ nix-shell
+nix-shell$ devmode
+```
+
+Once you are done making modifications to the manual, it's important to
+build it before committing. You can do that as follows:
+
+```ShellSession
+nix-build nixos/release.nix -A manual.x86_64-linux
+```
+
+When this command successfully finishes, it will tell you where the
+manual got generated. The HTML will be accessible through the `result`
+symlink at `./result/share/doc/nixos/index.html`.
+
+## Editing DocBook XML {#sec-writing-docs-editing-docbook-xml}
+
+For general information on how to write in DocBook, see [DocBook 5: The
+Definitive Guide](https://tdg.docbook.org/tdg/5.1/).
+
+Emacs nXML Mode is very helpful for editing DocBook XML because it
+validates the document as you write, and precisely locates errors. To
+use it, see [](#sec-emacs-docbook-xml).
+
+[Pandoc](https://pandoc.org/) can generate DocBook XML from a multitude of
+formats, which makes a good starting point. Here is an example of Pandoc
+invocation to convert GitHub-Flavoured MarkDown to DocBook 5 XML:
+
+```ShellSession
+pandoc -f markdown_github -t docbook5 docs.md -o my-section.md
+```
+
+Pandoc can also quickly convert a single `section.xml` to HTML, which is
+helpful when drafting.
+
+Sometimes writing valid DocBook is too difficult. In this case,
+submit your documentation updates in a [GitHub
+Issue](https://github.com/NixOS/nixpkgs/issues/new) and someone will
+handle the conversion to XML for you.
+
+## Creating a Topic {#sec-writing-docs-creating-a-topic}
+
+You can use an existing topic as a basis for the new topic or create a
+topic from scratch.
+
+Keep the following guidelines in mind when you create and add a topic:
+
+-   The NixOS [`book`](https://tdg.docbook.org/tdg/5.0/book.html)
+    element is in `nixos/doc/manual/manual.xml`. It includes several
+    [`parts`](https://tdg.docbook.org/tdg/5.0/book.html) which are in
+    subdirectories.
+
+-   Store the topic file in the same directory as the `part` to which it
+    belongs. If your topic is about configuring a NixOS module, then the
+    XML file can be stored alongside the module definition `nix` file.
+
+-   If you include multiple words in the file name, separate the words
+    with a dash. For example: `ipv6-config.xml`.
+
+-   Make sure that the `xml:id` value is unique. You can use abbreviations
+    if the ID is too long. For example: `nixos-config`.
+
+-   Determine whether your topic is a chapter or a section. If you are
+    unsure, open an existing topic file and check whether the main
+    element is chapter or section.
+
+## Adding a Topic to the Book {#sec-writing-docs-adding-a-topic}
+
+Open the parent CommonMark file and add a line to the list of
+chapters with the file name of the topic that you created. If you
+created a `section`, you add the file to the `chapter` file. If you created
+a `chapter`, you add the file to the `part` file.
+
+If the topic is about configuring a NixOS module, it can be
+automatically included in the manual by using the `meta.doc` attribute.
+See [](#sec-meta-attributes) for an explanation.
diff --git a/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md b/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md
new file mode 100644
index 000000000000..e07b899e6df7
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/writing-modules.chapter.md
@@ -0,0 +1,202 @@
+# Writing NixOS Modules {#sec-writing-modules}
+
+NixOS has a modular system for declarative configuration. This system
+combines multiple *modules* to produce the full system configuration.
+One of the modules that constitute the configuration is
+`/etc/nixos/configuration.nix`. Most of the others live in the
+[`nixos/modules`](https://github.com/NixOS/nixpkgs/tree/master/nixos/modules)
+subdirectory of the Nixpkgs tree.
+
+Each NixOS module is a file that handles one logical aspect of the
+configuration, such as a specific kind of hardware, a service, or
+network settings. A module configuration does not have to handle
+everything from scratch; it can use the functionality provided by other
+modules for its implementation. Thus a module can *declare* options that
+can be used by other modules, and conversely can *define* options
+provided by other modules in its own implementation. For example, the
+module
+[`pam.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/security/pam.nix)
+declares the option `security.pam.services` that allows other modules (e.g.
+[`sshd.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/services/networking/ssh/sshd.nix))
+to define PAM services; and it defines the option `environment.etc` (declared by
+[`etc.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/system/etc/etc.nix))
+to cause files to be created in `/etc/pam.d`.
+
+In [](#sec-configuration-syntax), we saw the following structure of
+NixOS modules:
+
+```nix
+{ config, pkgs, ... }:
+
+{ option definitions
+}
+```
+
+This is actually an *abbreviated* form of module that only defines
+options, but does not declare any. The structure of full NixOS modules
+is shown in [Example: Structure of NixOS Modules](#ex-module-syntax).
+
+::: {#ex-module-syntax .example}
+### Structure of NixOS Modules
+```nix
+{ config, pkgs, ... }:
+
+{
+  imports =
+    [ paths of other modules
+    ];
+
+  options = {
+    option declarations
+  };
+
+  config = {
+    option definitions
+  };
+}
+```
+:::
+
+The meaning of each part is as follows.
+
+-   The first line makes the current Nix expression a function. The variable
+    `pkgs` contains Nixpkgs (by default, it takes the `nixpkgs` entry of
+    `NIX_PATH`, see the [Nix manual](https://nixos.org/manual/nix/stable/#sec-common-env)
+    for further details), while `config` contains the full system
+    configuration. This line can be omitted if there is no reference to
+    `pkgs` and `config` inside the module.
+
+-   This `imports` list enumerates the paths to other NixOS modules that
+    should be included in the evaluation of the system configuration. A
+    default set of modules is defined in the file `modules/module-list.nix`.
+    These don't need to be added in the import list.
+
+-   The attribute `options` is a nested set of *option declarations*
+    (described below).
+
+-   The attribute `config` is a nested set of *option definitions* (also
+    described below).
+
+[Example: NixOS Module for the "locate" Service](#locate-example)
+shows a module that handles the regular update of the "locate" database,
+an index of all files in the file system. This module declares two
+options that can be defined by other modules (typically the user's
+`configuration.nix`): `services.locate.enable` (whether the database should
+be updated) and `services.locate.interval` (when the update should be done).
+It implements its functionality by defining two options declared by other
+modules: `systemd.services` (the set of all systemd services) and
+`systemd.timers` (the list of commands to be executed periodically by
+`systemd`).
+
+Care must be taken when writing systemd services using `Exec*` directives. By
+default systemd performs substitution on `%<char>` specifiers in these
+directives, expands environment variables from `$FOO` and `${FOO}`, splits
+arguments on whitespace, and splits commands on `;`. All of these must be escaped
+to avoid unexpected substitution or splitting when interpolating into an `Exec*`
+directive, e.g. when using an `extraArgs` option to pass additional arguments to
+the service. The functions `utils.escapeSystemdExecArg` and
+`utils.escapeSystemdExecArgs` are provided for this, see [Example: Escaping in
+Exec directives](#exec-escaping-example) for an example. When using these
+functions system environment substitution should *not* be disabled explicitly.
+
+::: {#locate-example .example}
+### NixOS Module for the "locate" Service
+```nix
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.locate;
+in {
+  options.services.locate = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        If enabled, NixOS will periodically update the database of
+        files used by the locate command.
+      '';
+    };
+
+    interval = mkOption {
+      type = types.str;
+      default = "02:15";
+      example = "hourly";
+      description = ''
+        Update the locate database at this interval. Updates by
+        default at 2:15 AM every day.
+
+        The format is described in
+        systemd.time(7).
+      '';
+    };
+
+    # Other options omitted for documentation
+  };
+
+  config = {
+    systemd.services.update-locatedb =
+      { description = "Update Locate Database";
+        path  = [ pkgs.su ];
+        script =
+          ''
+            mkdir -m 0755 -p $(dirname ${toString cfg.output})
+            exec updatedb \
+              --localuser=${cfg.localuser} \
+              ${optionalString (!cfg.includeStore) "--prunepaths='/nix/store'"} \
+              --output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
+          '';
+      };
+
+    systemd.timers.update-locatedb = mkIf cfg.enable
+      { description = "Update timer for locate database";
+        partOf      = [ "update-locatedb.service" ];
+        wantedBy    = [ "timers.target" ];
+        timerConfig.OnCalendar = cfg.interval;
+      };
+  };
+}
+```
+:::
+
+::: {#exec-escaping-example .example}
+### Escaping in Exec directives
+```nix
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.echo;
+  echoAll = pkgs.writeScript "echo-all" ''
+    #! ${pkgs.runtimeShell}
+    for s in "$@"; do
+      printf '%s\n' "$s"
+    done
+  '';
+  args = [ "a%Nything" "lang=\${LANG}" ";" "/bin/sh -c date" ];
+in {
+  systemd.services.echo =
+    { description = "Echo to the journal";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "oneshot";
+      serviceConfig.ExecStart = ''
+        ${echoAll} ${utils.escapeSystemdExecArgs args}
+      '';
+    };
+}
+```
+:::
+
+```{=include=} sections
+option-declarations.section.md
+option-types.section.md
+option-def.section.md
+assertions.section.md
+meta-attributes.section.md
+importing-modules.section.md
+replace-modules.section.md
+freeform-modules.section.md
+settings-options.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md b/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md
new file mode 100644
index 000000000000..84b247fd2042
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/development/writing-nixos-tests.section.md
@@ -0,0 +1,279 @@
+# Writing Tests {#sec-writing-nixos-tests}
+
+A NixOS test is a module that has the following structure:
+
+```nix
+{
+
+  # One or more machines:
+  nodes =
+    { machine =
+        { config, pkgs, ... }: { … };
+      machine2 =
+        { config, pkgs, ... }: { … };
+      …
+    };
+
+  testScript =
+    ''
+      Python code…
+    '';
+}
+```
+
+We refer to the whole test above as a test module, whereas the values
+in [`nodes.<name>`](#test-opt-nodes) are NixOS modules themselves.
+
+The option [`testScript`](#test-opt-testScript) is a piece of Python code that executes the
+test (described below). During the test, it will start one or more
+virtual machines, the configuration of which is described by
+the option [`nodes`](#test-opt-nodes).
+
+An example of a single-node test is
+[`login.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/login.nix).
+It only needs a single machine to test whether users can log in
+on the virtual console, whether device ownership is correctly maintained
+when switching between consoles, and so on. An interesting multi-node test is
+[`nfs/simple.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/nfs/simple.nix).
+It uses two client nodes to test correct locking across server crashes.
+
+## Calling a test {#sec-calling-nixos-tests}
+
+Tests are invoked differently depending on whether the test is part of NixOS or lives in a different project.
+
+### Testing within NixOS {#sec-call-nixos-test-in-nixos}
+
+Tests that are part of NixOS are added to [`nixos/tests/all-tests.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/all-tests.nix).
+
+```nix
+  hostname = runTest ./hostname.nix;
+```
+
+Overrides can be added by defining an anonymous module in `all-tests.nix`.
+
+```nix
+  hostname = runTest {
+    imports = [ ./hostname.nix ];
+    defaults.networking.firewall.enable = false;
+  };
+```
+
+You can run a test with attribute name `hostname` in `nixos/tests/all-tests.nix` by invoking:
+
+```shell
+cd /my/git/clone/of/nixpkgs
+nix-build -A nixosTests.hostname
+```
+
+### Testing outside the NixOS project {#sec-call-nixos-test-outside-nixos}
+
+Outside the `nixpkgs` repository, you can instantiate the test by first importing the NixOS library,
+
+```nix
+let nixos-lib = import (nixpkgs + "/nixos/lib") { };
+in
+
+nixos-lib.runTest {
+  imports = [ ./test.nix ];
+  hostPkgs = pkgs;  # the Nixpkgs package set used outside the VMs
+  defaults.services.foo.package = mypkg;
+}
+```
+
+`runTest` returns a derivation that runs the test.
+
+## Configuring the nodes {#sec-nixos-test-nodes}
+
+There are a few special NixOS options for test VMs:
+
+`virtualisation.memorySize`
+
+:   The memory of the VM in megabytes.
+
+`virtualisation.vlans`
+
+:   The virtual networks to which the VM is connected. See
+    [`nat.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/tests/nat.nix)
+    for an example.
+
+`virtualisation.writableStore`
+
+:   By default, the Nix store in the VM is not writable. If you enable
+    this option, a writable union file system is mounted on top of the
+    Nix store to make it appear writable. This is necessary for tests
+    that run Nix operations that modify the store.
+
+For more options, see the module
+[`qemu-vm.nix`](https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/virtualisation/qemu-vm.nix).
+
+The test script is a sequence of Python statements that perform various
+actions, such as starting VMs, executing commands in the VMs, and so on.
+Each virtual machine is represented as an object stored in the variable
+`name` if this is also the identifier of the machine in the declarative
+config. If you specified a node `nodes.machine`, the following example starts the
+machine, waits until it has finished booting, then executes a command
+and checks that the output is more-or-less correct:
+
+```py
+machine.start()
+machine.wait_for_unit("default.target")
+if not "Linux" in machine.succeed("uname"):
+  raise Exception("Wrong OS")
+```
+
+The first line is technically unnecessary; machines are implicitly started
+when you first execute an action on them (such as `wait_for_unit` or
+`succeed`). If you have multiple machines, you can speed up the test by
+starting them in parallel:
+
+```py
+start_all()
+```
+
+If the hostname of a node contains characters that can't be used in a
+Python variable name, those characters will be replaced with
+underscores in the variable name, so `nodes.machine-a` will be exposed
+to Python as `machine_a`.
+
+## Machine objects {#ssec-machine-objects}
+
+The following methods are available on machine objects:
+
+@PYTHON_MACHINE_METHODS@
+
+To test user units declared by `systemd.user.services` the optional
+`user` argument can be used:
+
+```py
+machine.start()
+machine.wait_for_x()
+machine.wait_for_unit("xautolock.service", "x-session-user")
+```
+
+This applies to `systemctl`, `get_unit_info`, `wait_for_unit`,
+`start_job` and `stop_job`.
+
+For faster dev cycles it's also possible to disable the code-linters
+(this shouldn't be committed though):
+
+```nix
+{
+  skipLint = true;
+  nodes.machine =
+    { config, pkgs, ... }:
+    { configuration…
+    };
+
+  testScript =
+    ''
+      Python code…
+    '';
+}
+```
+
+This will produce a Nix warning at evaluation time. To fully disable the
+linter, wrap the test script in comment directives to disable the Black
+linter directly (again, don't commit this within the Nixpkgs
+repository):
+
+```nix
+  testScript =
+    ''
+      # fmt: off
+      Python code…
+      # fmt: on
+    '';
+```
+
+Similarly, the type checking of test scripts can be disabled in the following
+way:
+
+```nix
+{
+  skipTypeCheck = true;
+  nodes.machine =
+    { config, pkgs, ... }:
+    { configuration…
+    };
+}
+```
+
+## Failing tests early {#ssec-failing-tests-early}
+
+To fail tests early when certain invariants are no longer met (instead of waiting for the build to time out), the decorator `polling_condition` is provided. For example, if we are testing a program `foo` that should not quit after being started, we might write the following:
+
+```py
+@polling_condition
+def foo_running():
+    machine.succeed("pgrep -x foo")
+
+
+machine.succeed("foo --start")
+machine.wait_until_succeeds("pgrep -x foo")
+
+with foo_running:
+    ...  # Put `foo` through its paces
+```
+
+`polling_condition` takes the following (optional) arguments:
+
+`seconds_interval`
+
+:   specifies how often the condition should be polled:
+
+```py
+@polling_condition(seconds_interval=10)
+def foo_running():
+    machine.succeed("pgrep -x foo")
+```
+
+`description`
+
+:   is used in the log when the condition is checked. If this is not provided, the description is pulled from the docstring of the function. These two are therefore equivalent:
+
+```py
+@polling_condition
+def foo_running():
+    "check that foo is running"
+    machine.succeed("pgrep -x foo")
+```
+
+```py
+@polling_condition(description="check that foo is running")
+def foo_running():
+    machine.succeed("pgrep -x foo")
+```
+
+## Adding Python packages to the test script {#ssec-python-packages-in-test-script}
+
+When additional Python libraries are required in the test script, they can be
+added using the parameter `extraPythonPackages`. For example, you could add
+`numpy` like this:
+
+```nix
+{
+  extraPythonPackages = p: [ p.numpy ];
+
+  nodes = { };
+
+  # Type checking on extra packages doesn't work yet
+  skipTypeCheck = true;
+
+  testScript = ''
+    import numpy as np
+    assert str(np.zeros(4) == "array([0., 0., 0., 0.])")
+  '';
+}
+```
+
+In that case, `numpy` is chosen from the generic `python3Packages`.
+
+## Test Options Reference {#sec-test-options-reference}
+
+The following options can be used when writing tests.
+
+```{=include=} options
+id-prefix: test-opt-
+list-id: test-options-list
+source: @NIXOS_TEST_OPTIONS_JSON@
+```
diff --git a/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md b/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md
new file mode 100644
index 000000000000..6d0675f21a03
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/building-images-via-systemd-repart.chapter.md
@@ -0,0 +1,137 @@
+# Building Images via `systemd-repart` {#sec-image-repart}
+
+You can build disk images in NixOS with the `image.repart` option provided by
+the module [image/repart.nix][]. This module uses `systemd-repart` to build the
+images and exposes it's entire interface via the `repartConfig` option.
+
+[image/repart.nix]: https://github.com/NixOS/nixpkgs/blob/master/nixos/modules/image/repart.nix
+
+An example of how to build an image:
+
+```nix
+{ config, modulesPath, ... }: {
+
+  imports = [ "${modulesPath}/image/repart.nix" ];
+
+  image.repart = {
+    name = "image";
+    partitions = {
+      "esp" = {
+        contents = {
+          ...
+        };
+        repartConfig = {
+          Type = "esp";
+          ...
+        };
+      };
+      "root" = {
+        storePaths = [ config.system.build.toplevel ];
+        repartConfig = {
+          Type = "root";
+          Label = "nixos";
+          ...
+        };
+      };
+    };
+  };
+
+}
+```
+
+## Nix Store Partition {#sec-image-repart-store-partition}
+
+You can define a partition that only contains the Nix store and then mount it
+under `/nix/store`. Because the `/nix/store` part of the paths is already
+determined by the mount point, you have to set `stripNixStorePrefix = true;` so
+that the prefix is stripped from the paths before copying them into the image.
+
+```nix
+fileSystems."/nix/store".device = "/dev/disk/by-partlabel/nix-store"
+
+image.repart.partitions = {
+  "store" = {
+    storePaths = [ config.system.build.toplevel ];
+    stripNixStorePrefix = true;
+    repartConfig = {
+      Type = "linux-generic";
+      Label = "nix-store";
+      ...
+    };
+  };
+};
+```
+
+## Appliance Image {#sec-image-repart-appliance}
+
+The `image/repart.nix` module can also be used to build self-contained [software
+appliances][].
+
+[software appliances]: https://en.wikipedia.org/wiki/Software_appliance
+
+The generation based update mechanism of NixOS is not suited for appliances.
+Updates of appliances are usually either performed by replacing the entire
+image with a new one or by updating partitions via an A/B scheme. See the
+[Chrome OS update process][chrome-os-update] for an example of how to achieve
+this. The appliance image built in the following example does not contain a
+`configuration.nix` and thus you will not be able to call `nixos-rebuild` from
+this system.
+
+[chrome-os-update]: https://chromium.googlesource.com/aosp/platform/system/update_engine/+/HEAD/README.md
+
+```nix
+let
+  pkgs = import <nixpkgs> { };
+  efiArch = pkgs.stdenv.hostPlatform.efiArch;
+in
+(pkgs.nixos [
+  ({ config, lib, pkgs, modulesPath, ... }: {
+
+    imports = [ "${modulesPath}/image/repart.nix" ];
+
+    boot.loader.grub.enable = false;
+
+    fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+    image.repart = {
+      name = "image";
+      partitions = {
+        "esp" = {
+          contents = {
+            "/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
+              "${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
+
+            "/loader/entries/nixos.conf".source = pkgs.writeText "nixos.conf" ''
+              title NixOS
+              linux /EFI/nixos/kernel.efi
+              initrd /EFI/nixos/initrd.efi
+              options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
+            '';
+
+            "/EFI/nixos/kernel.efi".source =
+              "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
+
+            "/EFI/nixos/initrd.efi".source =
+              "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+          };
+          repartConfig = {
+            Type = "esp";
+            Format = "vfat";
+            SizeMinBytes = "96M";
+          };
+        };
+        "root" = {
+          storePaths = [ config.system.build.toplevel ];
+          repartConfig = {
+            Type = "root";
+            Format = "ext4";
+            Label = "nixos";
+            Minimize = "guess";
+          };
+        };
+      };
+    };
+
+  })
+]).image
+```
diff --git a/nixpkgs/nixos/doc/manual/installation/building-nixos.chapter.md b/nixpkgs/nixos/doc/manual/installation/building-nixos.chapter.md
new file mode 100644
index 000000000000..7b0b5ea1c447
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/building-nixos.chapter.md
@@ -0,0 +1,80 @@
+# Building a NixOS (Live) ISO {#sec-building-image}
+
+Default live installer configurations are available inside `nixos/modules/installer/cd-dvd`.
+For building other system images, [nixos-generators] is a good place to start looking at.
+
+You have two options:
+
+- Use any of those default configurations as is
+- Combine them with (any of) your host config(s)
+
+System images, such as the live installer ones, know how to enforce configuration settings
+on which they immediately depend in order to work correctly.
+
+However, if you are confident, you can opt to override those
+enforced values with `mkForce`.
+
+[nixos-generators]: https://github.com/nix-community/nixos-generators
+
+## Practical Instructions {#sec-building-image-instructions}
+
+To build an ISO image for the channel `nixos-unstable`:
+
+```ShellSession
+$ git clone https://github.com/NixOS/nixpkgs.git
+$ cd nixpkgs/nixos
+$ git switch nixos-unstable
+$ nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd-dvd/installation-cd-minimal.nix default.nix
+```
+
+To check the content of an ISO image, mount it like so:
+
+```ShellSession
+# mount -o loop -t iso9660 ./result/iso/cd.iso /mnt/iso
+```
+
+## Additional drivers or firmware {#sec-building-image-drivers}
+
+If you need additional (non-distributable) drivers or firmware in the
+installer, you might want to extend these configurations.
+
+For example, to build the GNOME graphical installer ISO, but with support for
+certain WiFi adapters present in some MacBooks, you can create the following
+file at `modules/installer/cd-dvd/installation-cd-graphical-gnome-macbook.nix`:
+
+```nix
+{ config, ... }:
+
+{
+  imports = [ ./installation-cd-graphical-gnome.nix ];
+
+  boot.initrd.kernelModules = [ "wl" ];
+
+  boot.kernelModules = [ "kvm-intel" "wl" ];
+  boot.extraModulePackages = [ config.boot.kernelPackages.broadcom_sta ];
+}
+```
+
+Then build it like in the example above:
+
+```ShellSession
+$ git clone https://github.com/NixOS/nixpkgs.git
+$ cd nixpkgs/nixos
+$ export NIXPKGS_ALLOW_UNFREE=1
+$ nix-build -A config.system.build.isoImage -I nixos-config=modules/installer/cd-dvd/installation-cd-graphical-gnome-macbook.nix default.nix
+```
+
+## Technical Notes {#sec-building-image-tech-notes}
+
+The config value enforcement is implemented via `mkImageMediaOverride = mkOverride 60;`
+and therefore primes over simple value assignments, but also yields to `mkForce`.
+
+This property allows image designers to implement in semantically correct ways those
+configuration values upon which the correct functioning of the image depends.
+
+For example, the iso base image overrides those file systems which it needs at a minimum
+for correct functioning, while the installer base image overrides the entire file system
+layout because there can't be any other guarantees on a live medium than those given
+by the live medium itself. The latter is especially true before formatting the target
+block device(s). On the other hand, the netboot iso only overrides its minimum dependencies
+since netboot images are always made-to-target.
diff --git a/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md b/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md
new file mode 100644
index 000000000000..12abf90b718f
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/changing-config.chapter.md
@@ -0,0 +1,105 @@
+# Changing the Configuration {#sec-changing-config}
+
+The file `/etc/nixos/configuration.nix` contains the current
+configuration of your machine. Whenever you've [changed
+something](#ch-configuration) in that file, you should do
+
+```ShellSession
+# nixos-rebuild switch
+```
+
+to build the new configuration, make it the default configuration for
+booting, and try to realise the configuration in the running system
+(e.g., by restarting system services).
+
+::: {.warning}
+This command doesn't start/stop [user services](#opt-systemd.user.services)
+automatically. `nixos-rebuild` only runs a `daemon-reload` for each user with running
+user services.
+:::
+
+::: {.warning}
+These commands must be executed as root, so you should either run them
+from a root shell or by prefixing them with `sudo -i`.
+:::
+
+You can also do
+
+```ShellSession
+# nixos-rebuild test
+```
+
+to build the configuration and switch the running system to it, but
+without making it the boot default. So if (say) the configuration locks
+up your machine, you can just reboot to get back to a working
+configuration.
+
+There is also
+
+```ShellSession
+# nixos-rebuild boot
+```
+
+to build the configuration and make it the boot default, but not switch
+to it now (so it will only take effect after the next reboot).
+
+You can make your configuration show up in a different submenu of the
+GRUB 2 boot screen by giving it a different *profile name*, e.g.
+
+```ShellSession
+# nixos-rebuild switch -p test
+```
+
+which causes the new configuration (and previous ones created using
+`-p test`) to show up in the GRUB submenu "NixOS - Profile 'test'".
+This can be useful to separate test configurations from "stable"
+configurations.
+
+Finally, you can do
+
+```ShellSession
+$ nixos-rebuild build
+```
+
+to build the configuration but nothing more. This is useful to see
+whether everything compiles cleanly.
+
+If you have a machine that supports hardware virtualisation, you can
+also test the new configuration in a sandbox by building and running a
+QEMU *virtual machine* that contains the desired configuration. Just do
+
+```ShellSession
+$ nixos-rebuild build-vm
+$ ./result/bin/run-*-vm
+```
+
+The VM does not have any data from your host system, so your existing
+user accounts and home directories will not be available unless you have
+set `mutableUsers = false`. Another way is to temporarily add the
+following to your configuration:
+
+```nix
+users.users.your-user.initialHashedPassword = "test";
+```
+
+*Important:* delete the \$hostname.qcow2 file if you have started the
+virtual machine at least once without the right users, otherwise the
+changes will not get picked up. You can forward ports on the host to the
+guest. For instance, the following will forward host port 2222 to guest
+port 22 (SSH):
+
+```ShellSession
+$ QEMU_NET_OPTS="hostfwd=tcp:127.0.0.1:2222-:22" ./result/bin/run-*-vm
+```
+
+allowing you to log in via SSH (assuming you have set the appropriate
+passwords or SSH authorized keys):
+
+```ShellSession
+$ ssh -p 2222 localhost
+```
+
+Such port forwardings connect via the VM's virtual network interface.
+Thus they cannot connect to ports that are only bound to the VM's
+loopback interface (`127.0.0.1`), and the VM's NixOS firewall
+must be configured to allow these connections.
diff --git a/nixpkgs/nixos/doc/manual/installation/installation.md b/nixpkgs/nixos/doc/manual/installation/installation.md
new file mode 100644
index 000000000000..f3b1773d865c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installation.md
@@ -0,0 +1,12 @@
+# Installation {#ch-installation}
+
+This section describes how to obtain, install, and configure NixOS for first-time use.
+
+```{=include=} chapters
+obtaining.chapter.md
+installing.chapter.md
+changing-config.chapter.md
+upgrading.chapter.md
+building-nixos.chapter.md
+building-images-via-systemd-repart.chapter.md
+```
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md b/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md
new file mode 100644
index 000000000000..aca151531d0f
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-behind-a-proxy.section.md
@@ -0,0 +1,29 @@
+# Installing behind a proxy {#sec-installing-behind-proxy}
+
+To install NixOS behind a proxy, do the following before running
+`nixos-install`.
+
+1.  Update proxy configuration in `/mnt/etc/nixos/configuration.nix` to
+    keep the internet accessible after reboot.
+
+    ```nix
+    networking.proxy.default = "http://user:password@proxy:port/";
+    networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
+    ```
+
+1.  Setup the proxy environment variables in the shell where you are
+    running `nixos-install`.
+
+    ```ShellSession
+    # proxy_url="http://user:password@proxy:port/"
+    # export http_proxy="$proxy_url"
+    # export HTTP_PROXY="$proxy_url"
+    # export https_proxy="$proxy_url"
+    # export HTTPS_PROXY="$proxy_url"
+    ```
+
+::: {.note}
+If you are switching networks with different proxy configurations, use
+the `specialisation` option in `configuration.nix` to switch proxies at
+runtime. Refer to [](#ch-options) for more information.
+:::
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md b/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md
new file mode 100644
index 000000000000..921592fe5357
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-from-other-distro.section.md
@@ -0,0 +1,279 @@
+# Installing from another Linux distribution {#sec-installing-from-other-distro}
+
+Because Nix (the package manager) & Nixpkgs (the Nix packages
+collection) can both be installed on any (most?) Linux distributions,
+they can be used to install NixOS in various creative ways. You can, for
+instance:
+
+1.  Install NixOS on another partition, from your existing Linux
+    distribution (without the use of a USB or optical device!)
+
+1.  Install NixOS on the same partition (in place!), from your existing
+    non-NixOS Linux distribution using `NIXOS_LUSTRATE`.
+
+1.  Install NixOS on your hard drive from the Live CD of any Linux
+    distribution.
+
+The first steps to all these are the same:
+
+1.  Install the Nix package manager:
+
+    Short version:
+
+    ```ShellSession
+    $ curl -L https://nixos.org/nix/install | sh
+    $ . $HOME/.nix-profile/etc/profile.d/nix.sh # …or open a fresh shell
+    ```
+
+    More details in the [ Nix
+    manual](https://nixos.org/nix/manual/#chap-quick-start)
+
+1.  Switch to the NixOS channel:
+
+    If you've just installed Nix on a non-NixOS distribution, you will
+    be on the `nixpkgs` channel by default.
+
+    ```ShellSession
+    $ nix-channel --list
+    nixpkgs https://nixos.org/channels/nixpkgs-unstable
+    ```
+
+    As that channel gets released without running the NixOS tests, it
+    will be safer to use the `nixos-*` channels instead:
+
+    ```ShellSession
+    $ nix-channel --add https://nixos.org/channels/nixos-version nixpkgs
+    ```
+
+    You may want to throw in a `nix-channel --update` for good measure.
+
+1.  Install the NixOS installation tools:
+
+    You'll need `nixos-generate-config` and `nixos-install`, but this
+    also makes some man pages and `nixos-enter` available, just in case
+    you want to chroot into your NixOS partition. NixOS installs these
+    by default, but you don't have NixOS yet..
+
+    ```ShellSession
+    $ nix-env -f '<nixpkgs>' -iA nixos-install-tools
+    ```
+
+1.  ::: {.note}
+    The following 5 steps are only for installing NixOS to another
+    partition. For installing NixOS in place using `NIXOS_LUSTRATE`,
+    skip ahead.
+    :::
+
+    Prepare your target partition:
+
+    At this point it is time to prepare your target partition. Please
+    refer to the partitioning, file-system creation, and mounting steps
+    of [](#sec-installation)
+
+    If you're about to install NixOS in place using `NIXOS_LUSTRATE`
+    there is nothing to do for this step.
+
+1.  Generate your NixOS configuration:
+
+    ```ShellSession
+    $ sudo `which nixos-generate-config` --root /mnt
+    ```
+
+    You'll probably want to edit the configuration files. Refer to the
+    `nixos-generate-config` step in [](#sec-installation) for more
+    information.
+
+    Consider setting up the NixOS bootloader to give you the ability to
+    boot on your existing Linux partition. For instance, if you're
+    using GRUB and your existing distribution is running Ubuntu, you may
+    want to add something like this to your `configuration.nix`:
+
+    ```nix
+    boot.loader.grub.extraEntries = ''
+      menuentry "Ubuntu" {
+        search --set=ubuntu --fs-uuid 3cc3e652-0c1f-4800-8451-033754f68e6e
+        configfile "($ubuntu)/boot/grub/grub.cfg"
+      }
+    '';
+    ```
+
+    (You can find the appropriate UUID for your partition in
+    `/dev/disk/by-uuid`)
+
+1.  Create the `nixbld` group and user on your original distribution:
+
+    ```ShellSession
+    $ sudo groupadd -g 30000 nixbld
+    $ sudo useradd -u 30000 -g nixbld -G nixbld nixbld
+    ```
+
+1.  Download/build/install NixOS:
+
+    ::: {.warning}
+    Once you complete this step, you might no longer be able to boot on
+    existing systems without the help of a rescue USB drive or similar.
+    :::
+
+    ::: {.note}
+    On some distributions there are separate PATHS for programs intended
+    only for root. In order for the installation to succeed, you might
+    have to use `PATH="$PATH:/usr/sbin:/sbin"` in the following command.
+    :::
+
+    ```ShellSession
+    $ sudo PATH="$PATH" NIX_PATH="$NIX_PATH" `which nixos-install` --root /mnt
+    ```
+
+    Again, please refer to the `nixos-install` step in
+    [](#sec-installation) for more information.
+
+    That should be it for installation to another partition!
+
+1.  Optionally, you may want to clean up your non-NixOS distribution:
+
+    ```ShellSession
+    $ sudo userdel nixbld
+    $ sudo groupdel nixbld
+    ```
+
+    If you do not wish to keep the Nix package manager installed either,
+    run something like `sudo rm -rv ~/.nix-* /nix` and remove the line
+    that the Nix installer added to your `~/.profile`.
+
+1.  ::: {.note}
+    The following steps are only for installing NixOS in place using
+    `NIXOS_LUSTRATE`:
+    :::
+
+    Generate your NixOS configuration:
+
+    ```ShellSession
+    $ sudo `which nixos-generate-config`
+    ```
+
+    Note that this will place the generated configuration files in
+    `/etc/nixos`. You'll probably want to edit the configuration files.
+    Refer to the `nixos-generate-config` step in
+    [](#sec-installation) for more information.
+
+    You'll likely want to set a root password for your first boot using
+    the configuration files because you won't have a chance to enter a
+    password until after you reboot. You can initialize the root password
+    to an empty one with this line: (and of course don't forget to set
+    one once you've rebooted or to lock the account with
+    `sudo passwd -l root` if you use `sudo`)
+
+    ```nix
+    users.users.root.initialHashedPassword = "";
+    ```
+
+1.  Build the NixOS closure and install it in the `system` profile:
+
+    ```ShellSession
+    $ nix-env -p /nix/var/nix/profiles/system -f '<nixpkgs/nixos>' -I nixos-config=/etc/nixos/configuration.nix -iA system
+    ```
+
+1.  Change ownership of the `/nix` tree to root (since your Nix install
+    was probably single user):
+
+    ```ShellSession
+    $ sudo chown -R 0:0 /nix
+    ```
+
+1.  Set up the `/etc/NIXOS` and `/etc/NIXOS_LUSTRATE` files:
+
+    `/etc/NIXOS` officializes that this is now a NixOS partition (the
+    bootup scripts require its presence).
+
+    `/etc/NIXOS_LUSTRATE` tells the NixOS bootup scripts to move
+    *everything* that's in the root partition to `/old-root`. This will
+    move your existing distribution out of the way in the very early
+    stages of the NixOS bootup. There are exceptions (we do need to keep
+    NixOS there after all), so the NixOS lustrate process will not
+    touch:
+
+    -   The `/nix` directory
+
+    -   The `/boot` directory
+
+    -   Any file or directory listed in `/etc/NIXOS_LUSTRATE` (one per
+        line)
+
+    ::: {.note}
+    Support for `NIXOS_LUSTRATE` was added in NixOS 16.09. The act of
+    "lustrating" refers to the wiping of the existing distribution.
+    Creating `/etc/NIXOS_LUSTRATE` can also be used on NixOS to remove
+    all mutable files from your root partition (anything that's not in
+    `/nix` or `/boot` gets "lustrated" on the next boot.
+
+    lustrate /ˈlʌstreɪt/ verb.
+
+    purify by expiatory sacrifice, ceremonial washing, or some other
+    ritual action.
+    :::
+
+    Let's create the files:
+
+    ```ShellSession
+    $ sudo touch /etc/NIXOS
+    $ sudo touch /etc/NIXOS_LUSTRATE
+    ```
+
+    Let's also make sure the NixOS configuration files are kept once we
+    reboot on NixOS:
+
+    ```ShellSession
+    $ echo etc/nixos | sudo tee -a /etc/NIXOS_LUSTRATE
+    ```
+
+1.  Finally, move the `/boot` directory of your current distribution out
+    of the way (the lustrate process will take care of the rest once you
+    reboot, but this one must be moved out now because NixOS needs to
+    install its own boot files:
+
+    ::: {.warning}
+    Once you complete this step, your current distribution will no
+    longer be bootable! If you didn't get all the NixOS configuration
+    right, especially those settings pertaining to boot loading and root
+    partition, NixOS may not be bootable either. Have a USB rescue
+    device ready in case this happens.
+    :::
+
+    ```ShellSession
+    $ sudo mv -v /boot /boot.bak &&
+    sudo /nix/var/nix/profiles/system/bin/switch-to-configuration boot
+    ```
+
+    Cross your fingers, reboot, hopefully you should get a NixOS prompt!
+
+1.  If for some reason you want to revert to the old distribution,
+    you'll need to boot on a USB rescue disk and do something along
+    these lines:
+
+    ```ShellSession
+    # mkdir root
+    # mount /dev/sdaX root
+    # mkdir root/nixos-root
+    # mv -v root/* root/nixos-root/
+    # mv -v root/nixos-root/old-root/* root/
+    # mv -v root/boot.bak root/boot  # We had renamed this by hand earlier
+    # umount root
+    # reboot
+    ```
+
+    This may work as is or you might also need to reinstall the boot
+    loader.
+
+    And of course, if you're happy with NixOS and no longer need the
+    old distribution:
+
+    ```ShellSession
+    sudo rm -rf /old-root
+    ```
+
+1.  It's also worth noting that this whole process can be automated.
+    This is especially useful for Cloud VMs, where provider do not
+    provide NixOS. For instance,
+    [nixos-infect](https://github.com/elitak/nixos-infect) uses the
+    lustrate process to convert Digital Ocean droplets to NixOS from
+    other distributions automatically.
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-kexec.section.md b/nixpkgs/nixos/doc/manual/installation/installing-kexec.section.md
new file mode 100644
index 000000000000..61d8e8e5999b
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-kexec.section.md
@@ -0,0 +1,64 @@
+# "Booting" into NixOS via kexec {#sec-booting-via-kexec}
+
+In some cases, your system might already be booted into/preinstalled with
+another Linux distribution, and booting NixOS by attaching an installation
+image is quite a manual process.
+
+This is particularly useful for (cloud) providers where you can't boot a custom
+image, but get some Debian or Ubuntu installation.
+
+In these cases, it might be easier to use `kexec` to "jump into NixOS" from the
+running system, which only assumes `bash` and `kexec` to be installed on the
+machine.
+
+Note that kexec may not work correctly on some hardware, as devices are not
+fully re-initialized in the process. In practice, this however is rarely the
+case.
+
+To build the necessary files from your current version of nixpkgs,
+you can run:
+
+```ShellSession
+nix-build -A kexec.x86_64-linux '<nixpkgs/nixos/release.nix>'
+```
+
+This will create a `result` directory containing the following:
+ - `bzImage` (the Linux kernel)
+ - `initrd` (the initrd file)
+ - `kexec-boot` (a shellscript invoking `kexec`)
+
+These three files are meant to be copied over to the other already running
+Linux Distribution.
+
+Note its symlinks pointing elsewhere, so `cd` in, and use
+`scp * root@$destination` to copy it over, rather than rsync.
+
+Once you finished copying, execute `kexec-boot` *on the destination*, and after
+some seconds, the machine should be booting into an (ephemeral) NixOS
+installation medium.
+
+In case you want to describe your own system closure to kexec into, instead of
+the default installer image, you can build your own `configuration.nix`:
+
+```nix
+{ modulesPath, ... }: {
+  imports = [
+    (modulesPath + "/installer/netboot/netboot-minimal.nix")
+  ];
+
+  services.openssh.enable = true;
+  users.users.root.openssh.authorizedKeys.keys = [
+    "my-ssh-pubkey"
+  ];
+}
+```
+
+
+```ShellSession
+nix-build '<nixpkgs/nixos>' \
+  --arg configuration ./configuration.nix
+  --attr config.system.build.kexecTree
+```
+
+Make sure your `configuration.nix` does still import `netboot-minimal.nix` (or
+`netboot-base.nix`).
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-pxe.section.md b/nixpkgs/nixos/doc/manual/installation/installing-pxe.section.md
new file mode 100644
index 000000000000..c1cad99d39f3
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-pxe.section.md
@@ -0,0 +1,32 @@
+# Booting from the "netboot" media (PXE) {#sec-booting-from-pxe}
+
+Advanced users may wish to install NixOS using an existing PXE or iPXE
+setup.
+
+These instructions assume that you have an existing PXE or iPXE
+infrastructure and want to add the NixOS installer as another
+option. To build the necessary files from your current version of nixpkgs,
+you can run:
+
+```ShellSession
+nix-build -A netboot.x86_64-linux '<nixpkgs/nixos/release.nix>'
+```
+
+This will create a `result` directory containing: \* `bzImage` -- the
+Linux kernel \* `initrd` -- the initrd file \* `netboot.ipxe` -- an
+example ipxe script demonstrating the appropriate kernel command line
+arguments for this image
+
+If you're using plain PXE, configure your boot loader to use the
+`bzImage` and `initrd` files and have it provide the same kernel command
+line arguments found in `netboot.ipxe`.
+
+If you're using iPXE, depending on how your HTTP/FTP/etc. server is
+configured you may be able to use `netboot.ipxe` unmodified, or you may
+need to update the paths to the files to match your server's directory
+layout.
+
+In the future we may begin making these files available as build
+products from hydra at which point we will update this documentation
+with instructions on how to obtain them either for placing on a
+dedicated TFTP server or to boot them directly over the internet.
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-usb.section.md b/nixpkgs/nixos/doc/manual/installation/installing-usb.section.md
new file mode 100644
index 000000000000..adfe22ea2f00
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-usb.section.md
@@ -0,0 +1,72 @@
+# Booting from a USB flash drive {#sec-booting-from-usb}
+
+The image has to be written verbatim to the USB flash drive for it to be
+bootable on UEFI and BIOS systems. Here are the recommended tools to do that.
+
+## Creating bootable USB flash drive with a graphical tool {#sec-booting-from-usb-graphical}
+
+Etcher is a popular and user-friendly tool. It works on Linux, Windows and macOS.
+
+Download it from [balena.io](https://www.balena.io/etcher/), start the program,
+select the downloaded NixOS ISO, then select the USB flash drive and flash it.
+
+::: {.warning}
+Etcher reports errors and usage statistics by default, which can be disabled in
+the settings.
+:::
+
+An alternative is [USBImager](https://bztsrc.gitlab.io/usbimager),
+which is very simple and does not connect to the internet. Download the version
+with write-only (wo) interface for your system. Start the program,
+select the image, select the USB flash drive and click "Write".
+
+## Creating bootable USB flash drive from a Terminal on Linux {#sec-booting-from-usb-linux}
+
+1. Plug in the USB flash drive.
+2. Find the corresponding device with `lsblk`. You can distinguish them by
+   their size.
+3. Make sure all partitions on the device are properly unmounted. Replace `sdX`
+   with your device (e.g. `sdb`).
+
+  ```ShellSession
+  sudo umount /dev/sdX*
+  ```
+
+4. Then use the `dd` utility to write the image to the USB flash drive.
+
+  ```ShellSession
+  sudo dd if=<path-to-image> of=/dev/sdX bs=4M conv=fsync
+  ```
+
+## Creating bootable USB flash drive from a Terminal on macOS {#sec-booting-from-usb-macos}
+
+1. Plug in the USB flash drive.
+2. Find the corresponding device with `diskutil list`. You can distinguish them
+   by their size.
+3. Make sure all partitions on the device are properly unmounted. Replace `diskX`
+   with your device (e.g. `disk1`).
+
+  ```ShellSession
+  diskutil unmountDisk diskX
+  ```
+
+4. Then use the `dd` utility to write the image to the USB flash drive.
+
+  ```ShellSession
+  sudo dd if=<path-to-image> of=/dev/rdiskX bs=4m
+  ```
+
+  After `dd` completes, a GUI dialog "The disk
+  you inserted was not readable by this computer" will pop up, which can
+  be ignored.
+
+  ::: {.note}
+  Using the 'raw' `rdiskX` device instead of `diskX` with dd completes in
+  minutes instead of hours.
+  :::
+
+5. Eject the disk when it is finished.
+
+  ```ShellSession
+  diskutil eject /dev/diskX
+  ```
diff --git a/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md b/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md
new file mode 100644
index 000000000000..004838e586be
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing-virtualbox-guest.section.md
@@ -0,0 +1,59 @@
+# Installing in a VirtualBox guest {#sec-installing-virtualbox-guest}
+
+Installing NixOS into a VirtualBox guest is convenient for users who
+want to try NixOS without installing it on bare metal. If you want to
+use a pre-made VirtualBox appliance, it is available at [the downloads
+page](https://nixos.org/nixos/download.html). If you want to set up a
+VirtualBox guest manually, follow these instructions:
+
+1.  Add a New Machine in VirtualBox with OS Type "Linux / Other Linux"
+
+1.  Base Memory Size: 768 MB or higher.
+
+1.  New Hard Disk of 8 GB or higher.
+
+1.  Mount the CD-ROM with the NixOS ISO (by clicking on CD/DVD-ROM)
+
+1.  Click on Settings / System / Processor and enable PAE/NX
+
+1.  Click on Settings / System / Acceleration and enable "VT-x/AMD-V"
+    acceleration
+
+1.  Click on Settings / Display / Screen and select VMSVGA as Graphics
+    Controller
+
+1.  Save the settings, start the virtual machine, and continue
+    installation like normal
+
+There are a few modifications you should make in configuration.nix.
+Enable booting:
+
+```nix
+boot.loader.grub.device = "/dev/sda";
+```
+
+Also remove the fsck that runs at startup. It will always fail to run,
+stopping your boot until you press `*`.
+
+```nix
+boot.initrd.checkJournalingFS = false;
+```
+
+Shared folders can be given a name and a path in the host system in the
+VirtualBox settings (Machine / Settings / Shared Folders, then click on
+the "Add" icon). Add the following to the
+`/etc/nixos/configuration.nix` to auto-mount them. If you do not add
+`"nofail"`, the system will not boot properly.
+
+```nix
+{ config, pkgs, ...} :
+{
+  fileSystems."/virtualboxshare" = {
+    fsType = "vboxsf";
+    device = "nameofthesharedfolder";
+    options = [ "rw" "nofail" ];
+  };
+}
+```
+
+The folder will be available directly under the root directory.
diff --git a/nixpkgs/nixos/doc/manual/installation/installing.chapter.md b/nixpkgs/nixos/doc/manual/installation/installing.chapter.md
new file mode 100644
index 000000000000..815bcc071cd9
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/installing.chapter.md
@@ -0,0 +1,612 @@
+# Installing NixOS {#sec-installation}
+
+## Booting from the install medium {#sec-installation-booting}
+
+To begin the installation, you have to boot your computer from the install drive.
+
+1.   Plug in the install drive. Then turn on or restart your computer.
+
+2.   Open the boot menu by pressing the appropriate key, which is usually shown
+     on the display on early boot.
+     Select the USB flash drive (the option usually contains the word "USB").
+     If you choose the incorrect drive, your computer will likely continue to
+     boot as normal. In that case restart your computer and pick a
+     different drive.
+
+     ::: {.note}
+     The key to open the boot menu is different across computer brands and even
+     models. It can be [F12]{.keycap}, but also [F1]{.keycap},
+     [F9]{.keycap}, [F10]{.keycap}, [Enter]{.keycap}, [Del]{.keycap},
+     [Esc]{.keycap} or another function key. If you are unsure and don't see
+     it on the early boot screen, you can search online for your computers
+     brand, model followed by "boot from usb".
+     The computer might not even have that feature, so you have to go into the
+     BIOS/UEFI settings to change the boot order. Again, search online for
+     details about your specific computer model.
+
+     For Apple computers with Intel processors press and hold the [⌥]{.keycap}
+     (Option or Alt) key until you see the boot menu. On Apple silicon press
+     and hold the power button.
+     :::
+
+     ::: {.note}
+     If your computer supports both BIOS and UEFI boot, choose the UEFI option.
+     :::
+
+     ::: {.note}
+     If you use a CD for the installation, the computer will probably boot from
+     it automatically. If not, choose the option containing the word "CD" from
+     the boot menu.
+     :::
+
+3.   Shortly after selecting the appropriate boot drive, you should be
+     presented with a menu with different installer options. Leave the default
+     and wait (or press [Enter]{.keycap} to speed up).
+
+4.   The graphical images will start their corresponding desktop environment
+     and the graphical installer, which can take some time. The minimal images
+     will boot to a command line. You have to follow the instructions in
+     [](#sec-installation-manual) there.
+
+## Graphical Installation {#sec-installation-graphical}
+
+The graphical installer is recommended for desktop users and will guide you
+through the installation.
+
+1.   In the "Welcome" screen, you can select the language of the Installer and
+     the installed system.
+
+     ::: {.tip}
+     Leaving the language as "American English" will make it easier to search for
+     error messages in a search engine or to report an issue.
+     :::
+
+2.   Next you should choose your location to have the timezone set correctly.
+     You can actually click on the map!
+
+     ::: {.note}
+     The installer will use an online service to guess your location based on
+     your public IP address.
+     :::
+
+3.   Then you can select the keyboard layout. The default keyboard model should
+     work well with most desktop keyboards. If you have a special keyboard or
+     notebook, your model might be in the list. Select the language you are most
+     comfortable typing in.
+
+4.   On the "Users" screen, you have to type in your display name, login name
+     and password. You can also enable an option to automatically login to the
+     desktop.
+
+5.   Then you have the option to choose a desktop environment. If you want to
+     create a custom setup with a window manager, you can select "No desktop".
+
+     ::: {.tip}
+     If you don't have a favorite desktop and don't know which one to choose,
+     you can stick to either GNOME or Plasma. They have a quite different
+     design, so you should choose whichever you like better.
+     They are both popular choices and well tested on NixOS.
+     :::
+
+6.   You have the option to allow unfree software in the next screen.
+
+7.   The easiest option in the "Partitioning" screen is "Erase disk", which will
+     delete all data from the selected disk and install the system on it.
+     Also select "Swap (with Hibernation)" in the dropdown below it.
+     You have the option to encrypt the whole disk with LUKS.
+
+     ::: {.note}
+     At the top left you see if the Installer was booted with BIOS or UEFI. If
+     you know your system supports UEFI and it shows "BIOS", reboot with the
+     correct option.
+     :::
+
+     ::: {.warning}
+     Make sure you have selected the correct disk at the top and that no
+     valuable data is still on the disk! It will be deleted when
+     formatting the disk.
+     :::
+
+8.   Check the choices you made in the "Summary" and click "Install".
+
+     ::: {.note}
+     The installation takes about 15 minutes. The time varies based on the
+     selected desktop environment, internet connection speed and disk write speed.
+     :::
+
+9.  When the install is complete, remove the USB flash drive and
+    reboot into your new system!
+
+## Manual Installation {#sec-installation-manual}
+
+NixOS can be installed on BIOS or UEFI systems. The procedure for a UEFI
+installation is broadly the same as for a BIOS installation. The differences
+are mentioned in the following steps.
+
+The NixOS manual is available by running `nixos-help` in the command line
+or from the application menu in the desktop environment.
+
+To have access to the command line on the graphical images, open
+Terminal (GNOME) or Konsole (Plasma) from the application menu.
+
+You are logged-in automatically as `nixos`. The `nixos` user account has
+an empty password so you can use `sudo` without a password:
+
+```ShellSession
+$ sudo -i
+```
+
+You can use `loadkeys` to switch to your preferred keyboard layout.
+(We even provide neo2 via `loadkeys de neo`!)
+
+If the text is too small to be legible, try `setfont ter-v32n` to
+increase the font size.
+
+To install over a serial port connect with `115200n8` (e.g.
+`picocom -b 115200 /dev/ttyUSB0`). When the bootloader lists boot
+entries, select the serial console boot entry.
+
+### Networking in the installer {#sec-installation-manual-networking}
+[]{#sec-installation-booting-networking} <!-- legacy anchor -->
+
+The boot process should have brought up networking (check `ip
+a`). Networking is necessary for the installer, since it will
+download lots of stuff (such as source tarballs or Nixpkgs channel
+binaries). It's best if you have a DHCP server on your network.
+Otherwise configure networking manually using `ifconfig`.
+
+On the graphical installer, you can configure the network, wifi
+included, through NetworkManager. Using the `nmtui` program, you can do
+so even in a non-graphical session. If you prefer to configure the
+network manually, disable NetworkManager with
+`systemctl stop NetworkManager`.
+
+On the minimal installer, NetworkManager is not available, so
+configuration must be performed manually. To configure the wifi, first
+start wpa_supplicant with `sudo systemctl start wpa_supplicant`, then
+run `wpa_cli`. For most home networks, you need to type in the following
+commands:
+
+```plain
+> add_network
+0
+> set_network 0 ssid "myhomenetwork"
+OK
+> set_network 0 psk "mypassword"
+OK
+> set_network 0 key_mgmt WPA-PSK
+OK
+> enable_network 0
+OK
+```
+
+For enterprise networks, for example *eduroam*, instead do:
+
+```plain
+> add_network
+0
+> set_network 0 ssid "eduroam"
+OK
+> set_network 0 identity "myname@example.com"
+OK
+> set_network 0 password "mypassword"
+OK
+> set_network 0 key_mgmt WPA-EAP
+OK
+> enable_network 0
+OK
+```
+
+When successfully connected, you should see a line such as this one
+
+```plain
+<3>CTRL-EVENT-CONNECTED - Connection to 32:85:ab:ef:24:5c completed [id=0 id_str=]
+```
+
+you can now leave `wpa_cli` by typing `quit`.
+
+If you would like to continue the installation from a different machine
+you can use activated SSH daemon. You need to copy your ssh key to
+either `/home/nixos/.ssh/authorized_keys` or
+`/root/.ssh/authorized_keys` (Tip: For installers with a modifiable
+filesystem such as the sd-card installer image a key can be manually
+placed by mounting the image on a different machine). Alternatively you
+must set a password for either `root` or `nixos` with `passwd` to be
+able to login.
+
+### Partitioning and formatting {#sec-installation-manual-partitioning}
+[]{#sec-installation-partitioning} <!-- legacy anchor -->
+
+The NixOS installer doesn't do any partitioning or formatting, so you
+need to do that yourself.
+
+The NixOS installer ships with multiple partitioning tools. The examples
+below use `parted`, but also provides `fdisk`, `gdisk`, `cfdisk`, and
+`cgdisk`.
+
+The recommended partition scheme differs depending if the computer uses
+*Legacy Boot* or *UEFI*.
+
+#### UEFI (GPT) {#sec-installation-manual-partitioning-UEFI}
+[]{#sec-installation-partitioning-UEFI} <!-- legacy anchor -->
+
+Here's an example partition scheme for UEFI, using `/dev/sda` as the
+device.
+
+::: {.note}
+You can safely ignore `parted`'s informational message about needing to
+update /etc/fstab.
+:::
+
+1.  Create a *GPT* partition table.
+
+    ```ShellSession
+    # parted /dev/sda -- mklabel gpt
+    ```
+
+2.  Add the *root* partition. This will fill the disk except for the end
+    part, where the swap will live, and the space left in front (512MiB)
+    which will be used by the boot partition.
+
+    ```ShellSession
+    # parted /dev/sda -- mkpart root ext4 512MB -8GB
+    ```
+
+3.  Next, add a *swap* partition. The size required will vary according
+    to needs, here a 8GB one is created.
+
+    ```ShellSession
+    # parted /dev/sda -- mkpart swap linux-swap -8GB 100%
+    ```
+
+    ::: {.note}
+    The swap partition size rules are no different than for other Linux
+    distributions.
+    :::
+
+4.  Finally, the *boot* partition. NixOS by default uses the ESP (EFI
+    system partition) as its */boot* partition. It uses the initially
+    reserved 512MiB at the start of the disk.
+
+    ```ShellSession
+    # parted /dev/sda -- mkpart ESP fat32 1MB 512MB
+    # parted /dev/sda -- set 3 esp on
+    ```
+
+Once complete, you can follow with
+[](#sec-installation-manual-partitioning-formatting).
+
+#### Legacy Boot (MBR) {#sec-installation-manual-partitioning-MBR}
+[]{#sec-installation-partitioning-MBR} <!-- legacy anchor -->
+
+Here's an example partition scheme for Legacy Boot, using `/dev/sda` as
+the device.
+
+::: {.note}
+You can safely ignore `parted`'s informational message about needing to
+update /etc/fstab.
+:::
+
+1.  Create a *MBR* partition table.
+
+    ```ShellSession
+    # parted /dev/sda -- mklabel msdos
+    ```
+
+2.  Add the *root* partition. This will fill the the disk except for the
+    end part, where the swap will live.
+
+    ```ShellSession
+    # parted /dev/sda -- mkpart primary 1MB -8GB
+    ```
+
+3.  Set the root partition's boot flag to on. This allows the disk to be booted from.
+
+    ```ShellSession
+    # parted /dev/sda -- set 1 boot on
+    ```
+
+4.  Finally, add a *swap* partition. The size required will vary
+    according to needs, here a 8GB one is created.
+
+    ```ShellSession
+    # parted /dev/sda -- mkpart primary linux-swap -8GB 100%
+    ```
+
+    ::: {.note}
+    The swap partition size rules are no different than for other Linux
+    distributions.
+    :::
+
+Once complete, you can follow with
+[](#sec-installation-manual-partitioning-formatting).
+
+#### Formatting {#sec-installation-manual-partitioning-formatting}
+[]{#sec-installation-partitioning-formatting} <!-- legacy anchor -->
+
+Use the following commands:
+
+-   For initialising Ext4 partitions: `mkfs.ext4`. It is recommended
+    that you assign a unique symbolic label to the file system using the
+    option `-L label`, since this makes the file system configuration
+    independent from device changes. For example:
+
+    ```ShellSession
+    # mkfs.ext4 -L nixos /dev/sda1
+    ```
+
+-   For creating swap partitions: `mkswap`. Again it's recommended to
+    assign a label to the swap partition: `-L label`. For example:
+
+    ```ShellSession
+    # mkswap -L swap /dev/sda2
+    ```
+
+-   **UEFI systems**
+
+    For creating boot partitions: `mkfs.fat`. Again it's recommended
+    to assign a label to the boot partition: `-n label`. For
+    example:
+
+    ```ShellSession
+    # mkfs.fat -F 32 -n boot /dev/sda3
+    ```
+
+-   For creating LVM volumes, the LVM commands, e.g., `pvcreate`,
+    `vgcreate`, and `lvcreate`.
+
+-   For creating software RAID devices, use `mdadm`.
+
+### Installing {#sec-installation-manual-installing}
+[]{#sec-installation-installing} <!-- legacy anchor -->
+
+1.  Mount the target file system on which NixOS should be installed on
+    `/mnt`, e.g.
+
+    ```ShellSession
+    # mount /dev/disk/by-label/nixos /mnt
+    ```
+
+2.  **UEFI systems**
+
+    Mount the boot file system on `/mnt/boot`, e.g.
+
+    ```ShellSession
+    # mkdir -p /mnt/boot
+    # mount /dev/disk/by-label/boot /mnt/boot
+    ```
+
+3.  If your machine has a limited amount of memory, you may want to
+    activate swap devices now (`swapon device`).
+    The installer (or rather, the build actions that it
+    may spawn) may need quite a bit of RAM, depending on your
+    configuration.
+
+    ```ShellSession
+    # swapon /dev/sda2
+    ```
+
+4.  You now need to create a file `/mnt/etc/nixos/configuration.nix`
+    that specifies the intended configuration of the system. This is
+    because NixOS has a *declarative* configuration model: you create or
+    edit a description of the desired configuration of your system, and
+    then NixOS takes care of making it happen. The syntax of the NixOS
+    configuration file is described in [](#sec-configuration-syntax),
+    while a list of available configuration options appears in
+    [](#ch-options). A minimal example is shown in
+    [Example: NixOS Configuration](#ex-config).
+
+    The command `nixos-generate-config` can generate an initial
+    configuration file for you:
+
+    ```ShellSession
+    # nixos-generate-config --root /mnt
+    ```
+
+    You should then edit `/mnt/etc/nixos/configuration.nix` to suit your
+    needs:
+
+    ```ShellSession
+    # nano /mnt/etc/nixos/configuration.nix
+    ```
+
+    If you're using the graphical ISO image, other editors may be
+    available (such as `vim`). If you have network access, you can also
+    install other editors -- for instance, you can install Emacs by
+    running `nix-env -f '<nixpkgs>' -iA emacs`.
+
+    BIOS systems
+
+    :   You *must* set the option [](#opt-boot.loader.grub.device) to
+        specify on which disk the GRUB boot loader is to be installed.
+        Without it, NixOS cannot boot.
+
+        If there are other operating systems running on the machine before
+        installing NixOS, the [](#opt-boot.loader.grub.useOSProber)
+        option can be set to `true` to automatically add them to the grub
+        menu.
+
+    UEFI systems
+
+    :   You must select a boot-loader, either systemd-boot or GRUB. The recommended
+        option is systemd-boot: set the option [](#opt-boot.loader.systemd-boot.enable)
+        to `true`. `nixos-generate-config` should do this automatically
+        for new configurations when booted in UEFI mode.
+
+        You may want to look at the options starting with
+        [`boot.loader.efi`](#opt-boot.loader.efi.canTouchEfiVariables) and
+        [`boot.loader.systemd-boot`](#opt-boot.loader.systemd-boot.enable)
+        as well.
+
+        If you want to use GRUB, set [](#opt-boot.loader.grub.device) to `nodev` and
+        [](#opt-boot.loader.grub.efiSupport) to `true`.
+
+        With systemd-boot, you should not need any special configuration to detect
+        other installed systems. With GRUB, set [](#opt-boot.loader.grub.useOSProber)
+        to `true`, but this will only detect windows partitions, not other Linux
+        distributions. If you dual boot another Linux distribution, use systemd-boot
+        instead.
+
+    If you need to configure networking for your machine the
+    configuration options are described in [](#sec-networking). In
+    particular, while wifi is supported on the installation image, it is
+    not enabled by default in the configuration generated by
+    `nixos-generate-config`.
+
+    Another critical option is `fileSystems`, specifying the file
+    systems that need to be mounted by NixOS. However, you typically
+    don't need to set it yourself, because `nixos-generate-config` sets
+    it automatically in `/mnt/etc/nixos/hardware-configuration.nix` from
+    your currently mounted file systems. (The configuration file
+    `hardware-configuration.nix` is included from `configuration.nix`
+    and will be overwritten by future invocations of
+    `nixos-generate-config`; thus, you generally should not modify it.)
+    Additionally, you may want to look at [Hardware configuration for
+    known-hardware](https://github.com/NixOS/nixos-hardware) at this
+    point or after installation.
+
+    ::: {.note}
+    Depending on your hardware configuration or type of file system, you
+    may need to set the option `boot.initrd.kernelModules` to include
+    the kernel modules that are necessary for mounting the root file
+    system, otherwise the installed system will not be able to boot. (If
+    this happens, boot from the installation media again, mount the
+    target file system on `/mnt`, fix `/mnt/etc/nixos/configuration.nix`
+    and rerun `nixos-install`.) In most cases, `nixos-generate-config`
+    will figure out the required modules.
+    :::
+
+5.  Do the installation:
+
+    ```ShellSession
+    # nixos-install
+    ```
+
+    This will install your system based on the configuration you
+    provided. If anything fails due to a configuration problem or any
+    other issue (such as a network outage while downloading binaries
+    from the NixOS binary cache), you can re-run `nixos-install` after
+    fixing your `configuration.nix`.
+
+    As the last step, `nixos-install` will ask you to set the password
+    for the `root` user, e.g.
+
+    ```plain
+    setting root password...
+    New password: ***
+    Retype new password: ***
+    ```
+
+    ::: {.note}
+    For unattended installations, it is possible to use
+    `nixos-install --no-root-passwd` in order to disable the password
+    prompt entirely.
+    :::
+
+6.  If everything went well:
+
+    ```ShellSession
+    # reboot
+    ```
+
+7.  You should now be able to boot into the installed NixOS. The GRUB
+    boot menu shows a list of *available configurations* (initially just
+    one). Every time you change the NixOS configuration (see [Changing
+    Configuration](#sec-changing-config)), a new item is added to the
+    menu. This allows you to easily roll back to a previous
+    configuration if something goes wrong.
+
+    You should log in and change the `root` password with `passwd`.
+
+    You'll probably want to create some user accounts as well, which can
+    be done with `useradd`:
+
+    ```ShellSession
+    $ useradd -c 'Eelco Dolstra' -m eelco
+    $ passwd eelco
+    ```
+
+    You may also want to install some software. This will be covered in
+    [](#sec-package-management).
+
+### Installation summary {#sec-installation-manual-summary}
+[]{#sec-installation-summary} <!-- legacy anchor -->
+
+To summarise, [Example: Commands for Installing NixOS on `/dev/sda`](#ex-install-sequence)
+shows a typical sequence of commands for installing NixOS on an empty hard
+drive (here `/dev/sda`). [Example: NixOS Configuration](#ex-config) shows a
+corresponding configuration Nix expression.
+
+::: {#ex-partition-scheme-MBR .example}
+### Example partition schemes for NixOS on `/dev/sda` (MBR)
+```ShellSession
+# parted /dev/sda -- mklabel msdos
+# parted /dev/sda -- mkpart primary 1MB -8GB
+# parted /dev/sda -- mkpart primary linux-swap -8GB 100%
+```
+:::
+
+::: {#ex-partition-scheme-UEFI .example}
+### Example partition schemes for NixOS on `/dev/sda` (UEFI)
+```ShellSession
+# parted /dev/sda -- mklabel gpt
+# parted /dev/sda -- mkpart root ext4 512MB -8GB
+# parted /dev/sda -- mkpart swap linux-swap -8GB 100%
+# parted /dev/sda -- mkpart ESP fat32 1MB 512MB
+# parted /dev/sda -- set 3 esp on
+```
+:::
+
+::: {#ex-install-sequence .example}
+### Commands for Installing NixOS on `/dev/sda`
+
+With a partitioned disk.
+
+```ShellSession
+# mkfs.ext4 -L nixos /dev/sda1
+# mkswap -L swap /dev/sda2
+# swapon /dev/sda2
+# mkfs.fat -F 32 -n boot /dev/sda3        # (for UEFI systems only)
+# mount /dev/disk/by-label/nixos /mnt
+# mkdir -p /mnt/boot                      # (for UEFI systems only)
+# mount /dev/disk/by-label/boot /mnt/boot # (for UEFI systems only)
+# nixos-generate-config --root /mnt
+# nano /mnt/etc/nixos/configuration.nix
+# nixos-install
+# reboot
+```
+:::
+
+::: {#ex-config .example}
+### Example: NixOS Configuration
+```ShellSession
+{ config, pkgs, ... }: {
+  imports = [
+    # Include the results of the hardware scan.
+    ./hardware-configuration.nix
+  ];
+
+  boot.loader.grub.device = "/dev/sda";   # (for BIOS systems only)
+  boot.loader.systemd-boot.enable = true; # (for UEFI systems only)
+
+  # Note: setting fileSystems is generally not
+  # necessary, since nixos-generate-config figures them out
+  # automatically in hardware-configuration.nix.
+  #fileSystems."/".device = "/dev/disk/by-label/nixos";
+
+  # Enable the OpenSSH server.
+  services.sshd.enable = true;
+}
+```
+:::
+
+## Additional installation notes {#sec-installation-additional-notes}
+
+```{=include=} sections
+installing-usb.section.md
+installing-pxe.section.md
+installing-kexec.section.md
+installing-virtualbox-guest.section.md
+installing-from-other-distro.section.md
+installing-behind-a-proxy.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/installation/obtaining.chapter.md b/nixpkgs/nixos/doc/manual/installation/obtaining.chapter.md
new file mode 100644
index 000000000000..a72194ecf985
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/obtaining.chapter.md
@@ -0,0 +1,23 @@
+# Obtaining NixOS {#sec-obtaining}
+
+NixOS ISO images can be downloaded from the [NixOS download
+page](https://nixos.org/download.html#nixos-iso). Follow the instructions in
+[](#sec-booting-from-usb) to create a bootable USB flash drive.
+
+If you have a very old system that can't boot from USB, you can burn the image
+to an empty CD. NixOS might not work very well on such systems.
+
+As an alternative to installing NixOS yourself, you can get a running
+NixOS system through several other means:
+
+-   Using virtual appliances in Open Virtualization Format (OVF) that
+    can be imported into VirtualBox. These are available from the [NixOS
+    download page](https://nixos.org/download.html#nixos-virtualbox).
+
+-   Using AMIs for Amazon's EC2. To find one for your region, please refer
+    to the [download page](https://nixos.org/download.html#nixos-amazon).
+
+-   Using NixOps, the NixOS-based cloud deployment tool, which allows
+    you to provision VirtualBox and EC2 NixOS instances from declarative
+    specifications. Check out the [NixOps
+    homepage](https://nixos.org/nixops) for details.
diff --git a/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md b/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md
new file mode 100644
index 000000000000..d39e1b786d83
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/installation/upgrading.chapter.md
@@ -0,0 +1,118 @@
+# Upgrading NixOS {#sec-upgrading}
+
+The best way to keep your NixOS installation up to date is to use one of
+the NixOS *channels*. A channel is a Nix mechanism for distributing Nix
+expressions and associated binaries. The NixOS channels are updated
+automatically from NixOS's Git repository after certain tests have
+passed and all packages have been built. These channels are:
+
+-   *Stable channels*, such as [`nixos-23.05`](https://channels.nixos.org/nixos-23.05).
+    These only get conservative bug fixes and package upgrades. For
+    instance, a channel update may cause the Linux kernel on your system
+    to be upgraded from 4.19.34 to 4.19.38 (a minor bug fix), but not
+    from 4.19.x to 4.20.x (a major change that has the potential to break things).
+    Stable channels are generally maintained until the next stable
+    branch is created.
+
+-   The *unstable channel*, [`nixos-unstable`](https://channels.nixos.org/nixos-unstable).
+    This corresponds to NixOS's main development branch, and may thus see
+    radical changes between channel updates. It's not recommended for
+    production systems.
+
+-   *Small channels*, such as [`nixos-23.05-small`](https://channels.nixos.org/nixos-23.05-small)
+    or [`nixos-unstable-small`](https://channels.nixos.org/nixos-unstable-small).
+    These are identical to the stable and unstable channels described above,
+    except that they contain fewer binary packages. This means they get updated
+    faster than the regular channels (for instance, when a critical security patch
+    is committed to NixOS's source tree), but may require more packages to be
+    built from source than usual. They're mostly intended for server environments
+    and as such contain few GUI applications.
+
+To see what channels are available, go to <https://channels.nixos.org>.
+(Note that the URIs of the various channels redirect to a directory that
+contains the channel's latest version and includes ISO images and
+VirtualBox appliances.) Please note that during the release process,
+channels that are not yet released will be present here as well. See the
+Getting NixOS page <https://nixos.org/nixos/download.html> to find the
+newest supported stable release.
+
+When you first install NixOS, you're automatically subscribed to the
+NixOS channel that corresponds to your installation source. For
+instance, if you installed from a 23.05 ISO, you will be subscribed to
+the `nixos-23.05` channel. To see which NixOS channel you're subscribed
+to, run the following as root:
+
+```ShellSession
+# nix-channel --list | grep nixos
+nixos https://channels.nixos.org/nixos-unstable
+```
+
+To switch to a different NixOS channel, do
+
+```ShellSession
+# nix-channel --add https://channels.nixos.org/channel-name nixos
+```
+
+(Be sure to include the `nixos` parameter at the end.) For instance, to
+use the NixOS 23.05 stable channel:
+
+```ShellSession
+# nix-channel --add https://channels.nixos.org/nixos-23.05 nixos
+```
+
+If you have a server, you may want to use the "small" channel instead:
+
+```ShellSession
+# nix-channel --add https://channels.nixos.org/nixos-23.05-small nixos
+```
+
+And if you want to live on the bleeding edge:
+
+```ShellSession
+# nix-channel --add https://channels.nixos.org/nixos-unstable nixos
+```
+
+You can then upgrade NixOS to the latest version in your chosen channel
+by running
+
+```ShellSession
+# nixos-rebuild switch --upgrade
+```
+
+which is equivalent to the more verbose `nix-channel --update nixos; nixos-rebuild switch`.
+
+::: {.note}
+Channels are set per user. This means that running `nix-channel --add`
+as a non root user (or without sudo) will not affect
+configuration in `/etc/nixos/configuration.nix`
+:::
+
+::: {.warning}
+It is generally safe to switch back and forth between channels. The only
+exception is that a newer NixOS may also have a newer Nix version, which
+may involve an upgrade of Nix's database schema. This cannot be undone
+easily, so in that case you will not be able to go back to your original
+channel.
+:::
+
+## Automatic Upgrades {#sec-upgrading-automatic}
+
+You can keep a NixOS system up-to-date automatically by adding the
+following to `configuration.nix`:
+
+```nix
+system.autoUpgrade.enable = true;
+system.autoUpgrade.allowReboot = true;
+```
+
+This enables a periodically executed systemd service named
+`nixos-upgrade.service`. If the `allowReboot` option is `false`, it runs
+`nixos-rebuild switch --upgrade` to upgrade NixOS to the latest version
+in the current channel. (To see when the service runs, see `systemctl list-timers`.)
+If `allowReboot` is `true`, then the system will automatically reboot if
+the new generation contains a different kernel, initrd or kernel
+modules. You can also specify a channel explicitly, e.g.
+
+```nix
+system.autoUpgrade.channel = "https://channels.nixos.org/nixos-23.05";
+```
diff --git a/nixpkgs/nixos/doc/manual/manual.md b/nixpkgs/nixos/doc/manual/manual.md
new file mode 100644
index 000000000000..8cb766eeccf6
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/manual.md
@@ -0,0 +1,56 @@
+# NixOS Manual {#book-nixos-manual}
+## Version @NIXOS_VERSION@
+
+<!--
+  this is the top-level structure file for the nixos manual.
+
+  the manual structure extends the nixpkgs commonmark further with include
+  blocks to allow better organization of input text. there are six types of
+  include blocks: preface, parts, chapters, sections, appendix, and options.
+  each type except `options`` corresponds to the docbook elements of (roughly)
+  the same name, and can itself can further include blocks to denote its
+  substructure.
+
+  non-`options`` include blocks are fenced code blocks that list a number of
+  files to include, in the form
+
+     ```{=include=} <type>
+     <file-name-1>
+     <file-name-2>
+     <...>
+     ```
+
+  `options` include blocks do not list file names but contain a list of key-value
+  pairs that describe the options to be included and how to convert them into
+  elements of the manual output type:
+
+      ```{=include=} options
+      id-prefix: <options id prefix>
+      list-id: <variable list element id>
+      source: <path to options.json>
+      ```
+
+-->
+
+```{=include=} preface
+preface.md
+```
+
+```{=include=} parts
+installation/installation.md
+configuration/configuration.md
+administration/running.md
+development/development.md
+```
+
+```{=include=} chapters
+contributing-to-this-manual.chapter.md
+```
+
+```{=include=} appendix html:into-file=//options.html
+nixos-options.md
+```
+
+```{=include=} appendix html:into-file=//release-notes.html
+release-notes/release-notes.md
+```
diff --git a/nixpkgs/nixos/doc/manual/nixos-options.md b/nixpkgs/nixos/doc/manual/nixos-options.md
new file mode 100644
index 000000000000..33b487c95a2e
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/nixos-options.md
@@ -0,0 +1,7 @@
+# Configuration Options {#ch-options}
+
+```{=include=} options
+id-prefix: opt-
+list-id: configuration-variable-list
+source: @NIXOS_OPTIONS_JSON@
+```
diff --git a/nixpkgs/nixos/doc/manual/preface.md b/nixpkgs/nixos/doc/manual/preface.md
new file mode 100644
index 000000000000..b33af979c5a9
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/preface.md
@@ -0,0 +1,11 @@
+# Preface {#preface}
+
+This manual describes how to install, use and extend NixOS, a Linux distribution based on the purely functional package management system [Nix](https://nixos.org/nix), that is composed using modules and packages defined in the [Nixpkgs](https://nixos.org/nixpkgs) project.
+
+Additional information regarding the Nix package manager and the Nixpkgs project can be found in respectively the [Nix manual](https://nixos.org/nix/manual) and the [Nixpkgs manual](https://nixos.org/nixpkgs/manual).
+
+If you encounter problems, please report them on the [`Discourse`](https://discourse.nixos.org), the [Matrix room](https://matrix.to/#/%23nix:nixos.org), or on the [`#nixos` channel on Libera.Chat](irc://irc.libera.chat/#nixos). Alternatively, consider [contributing to this manual](#chap-contributing). Bugs should be reported in [NixOS’ GitHub issue tracker](https://github.com/NixOS/nixpkgs/issues).
+
+::: {.note}
+Commands prefixed with `#` have to be run as root, either requiring to login as root user or temporarily switching to it using `sudo` for example.
+:::
diff --git a/nixpkgs/nixos/doc/manual/release-notes/release-notes.md b/nixpkgs/nixos/doc/manual/release-notes/release-notes.md
new file mode 100644
index 000000000000..3f926fb21a5c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/release-notes.md
@@ -0,0 +1,26 @@
+# Release Notes {#ch-release-notes}
+
+This section lists the release notes for each stable version of NixOS and current unstable revision.
+
+```{=include=} sections
+rl-2311.section.md
+rl-2305.section.md
+rl-2211.section.md
+rl-2205.section.md
+rl-2111.section.md
+rl-2105.section.md
+rl-2009.section.md
+rl-2003.section.md
+rl-1909.section.md
+rl-1903.section.md
+rl-1809.section.md
+rl-1803.section.md
+rl-1709.section.md
+rl-1703.section.md
+rl-1609.section.md
+rl-1603.section.md
+rl-1509.section.md
+rl-1412.section.md
+rl-1404.section.md
+rl-1310.section.md
+```
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1310.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1310.section.md
new file mode 100644
index 000000000000..9efd8f6e8a1e
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1310.section.md
@@ -0,0 +1,3 @@
+# Release 13.10 ("Aardvark", 2013/10/31) {#sec-release-13.10}
+
+This is the first stable release branch of NixOS.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1404.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1404.section.md
new file mode 100644
index 000000000000..e0a70df3a634
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1404.section.md
@@ -0,0 +1,81 @@
+# Release 14.04 ("Baboon", 2014/04/30) {#sec-release-14.04}
+
+This is the second stable release branch of NixOS. In addition to numerous new and upgraded packages and modules, this release has the following highlights:
+
+- Installation on UEFI systems is now supported. See [](#sec-installation) for details.
+
+- Systemd has been updated to version 212, which has [numerous improvements](http://cgit.freedesktop.org/systemd/systemd/plain/NEWS?id=v212). NixOS now automatically starts systemd user instances when you log in. You can define global user units through the `systemd.unit.*` options.
+
+- NixOS is now based on Glibc 2.19 and GCC 4.8.
+
+- The default Linux kernel has been updated to 3.12.
+
+- KDE has been updated to 4.12.
+
+- GNOME 3.10 experimental support has been added.
+
+- Nix has been updated to 1.7 ([details](https://nixos.org/nix/manual/#ssec-relnotes-1.7)).
+
+- NixOS now supports fully declarative management of users and groups. If you set `users.mutableUsers` to `false`, then the contents of `/etc/passwd` and `/etc/group` will be [congruent](https://www.usenix.org/legacy/event/lisa02/tech/full_papers/traugott/traugott_html/) to your NixOS configuration. For instance, if you remove a user from `users.extraUsers` and run `nixos-rebuild`, the user account will cease to exist. Also, imperative commands for managing users and groups, such as `useradd`, are no longer available. If `users.mutableUsers` is `true` (the default), then behaviour is unchanged from NixOS 13.10.
+
+- NixOS now has basic container support, meaning you can easily run a NixOS instance as a container in a NixOS host system. These containers are suitable for testing and experimentation but not production use, since they're not fully isolated from the host. See [](#ch-containers) for details.
+
+- Systemd units provided by packages can now be overridden from the NixOS configuration. For instance, if a package `foo` provides systemd units, you can say:
+
+  ```nix
+  {
+    systemd.packages = [ pkgs.foo ];
+  }
+  ```
+
+  to enable those units. You can then set or override unit options in the usual way, e.g.
+
+  ```nix
+  {
+    systemd.services.foo.wantedBy = [ "multi-user.target" ];
+    systemd.services.foo.serviceConfig.MemoryLimit = "512M";
+  }
+  ```
+
+  When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- Nixpkgs no longer exposes unfree packages by default. If your NixOS configuration requires unfree packages from Nixpkgs, you need to enable support for them explicitly by setting:
+
+  ```nix
+  {
+    nixpkgs.config.allowUnfree = true;
+  }
+  ```
+
+  Otherwise, you get an error message such as:
+
+  ```ShellSession
+      error: package ‘nvidia-x11-331.49-3.12.17’ in ‘…/nvidia-x11/default.nix:56’
+        has an unfree license, refusing to evaluate
+  ```
+
+- The Adobe Flash player is no longer enabled by default in the Firefox and Chromium wrappers. To enable it, you must set:
+
+  ```nix
+  {
+    nixpkgs.config.allowUnfree = true;
+    nixpkgs.config.firefox.enableAdobeFlash = true; # for Firefox
+    nixpkgs.config.chromium.enableAdobeFlash = true; # for Chromium
+  }
+  ```
+
+- The firewall is now enabled by default. If you don't want this, you need to disable it explicitly:
+
+  ```nix
+  {
+    networking.firewall.enable = false;
+  }
+  ```
+
+- The option `boot.loader.grub.memtest86` has been renamed to `boot.loader.grub.memtest86.enable`.
+
+- The `mysql55` service has been merged into the `mysql` service, which no longer sets a default for the option `services.mysql.package`.
+
+- Package variants are now differentiated by suffixing the name, rather than the version. For instance, `sqlite-3.8.4.3-interactive` is now called `sqlite-interactive-3.8.4.3`. This ensures that `nix-env -i sqlite` is unambiguous, and that `nix-env -u` won't "upgrade" `sqlite` to `sqlite-interactive` or vice versa. Notably, this change affects the Firefox wrapper (which provides plugins), as it is now called `firefox-wrapper`. So when using `nix-env`, you should do `nix-env -e firefox; nix-env -i firefox-wrapper` if you want to keep using the wrapper. This change does not affect declarative package management, since attribute names like `pkgs.firefoxWrapper` were already unambiguous.
+
+- The symlink `/etc/ca-bundle.crt` is gone. Programs should instead use the environment variable `OPENSSL_X509_CERT_FILE` (which points to `/etc/ssl/certs/ca-bundle.crt`).
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1412.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1412.section.md
new file mode 100644
index 000000000000..683f1e45f092
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1412.section.md
@@ -0,0 +1,171 @@
+# Release 14.12 ("Caterpillar", 2014/12/30) {#sec-release-14.12}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Systemd has been updated to version 217, which has numerous [improvements.](http://lists.freedesktop.org/archives/systemd-devel/2014-October/024662.html)
+
+- [Nix has been updated to 1.8.](https://www.mail-archive.com/nix-dev@lists.science.uu.nl/msg13957.html)
+
+- NixOS is now based on Glibc 2.20.
+
+- KDE has been updated to 4.14.
+
+- The default Linux kernel has been updated to 3.14.
+
+- If `users.mutableUsers` is enabled (the default), changes made to the declaration of a user or group will be correctly realised when running `nixos-rebuild`. For instance, removing a user specification from `configuration.nix` will cause the actual user account to be deleted. If `users.mutableUsers` is disabled, it is no longer necessary to specify UIDs or GIDs; if omitted, they are allocated dynamically.
+
+Following new services were added since the last release:
+
+- `atftpd`
+
+- `bosun`
+
+- `bspwm`
+
+- `chronos`
+
+- `collectd`
+
+- `consul`
+
+- `cpuminer-cryptonight`
+
+- `crashplan`
+
+- `dnscrypt-proxy`
+
+- `docker-registry`
+
+- `docker`
+
+- `etcd`
+
+- `fail2ban`
+
+- `fcgiwrap`
+
+- `fleet`
+
+- `fluxbox`
+
+- `gdm`
+
+- `geoclue2`
+
+- `gitlab`
+
+- `gitolite`
+
+- `gnome3.gnome-documents`
+
+- `gnome3.gnome-online-miners`
+
+- `gnome3.gvfs`
+
+- `gnome3.seahorse`
+
+- `hbase`
+
+- `i2pd`
+
+- `influxdb`
+
+- `kubernetes`
+
+- `liquidsoap`
+
+- `lxc`
+
+- `mailpile`
+
+- `mesos`
+
+- `mlmmj`
+
+- `monetdb`
+
+- `mopidy`
+
+- `neo4j`
+
+- `nsd`
+
+- `openntpd`
+
+- `opentsdb`
+
+- `openvswitch`
+
+- `parallels-guest`
+
+- `peerflix`
+
+- `phd`
+
+- `polipo`
+
+- `prosody`
+
+- `radicale`
+
+- `redmine`
+
+- `riemann`
+
+- `scollector`
+
+- `seeks`
+
+- `siproxd`
+
+- `strongswan`
+
+- `tcsd`
+
+- `teamspeak3`
+
+- `thermald`
+
+- `torque/mrom`
+
+- `torque/server`
+
+- `uhub`
+
+- `unifi`
+
+- `znc`
+
+- `zookeeper`
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- The default version of Apache httpd is now 2.4. If you use the `extraConfig` option to pass literal Apache configuration text, you may need to update it --- see [Apache's documentation](http://httpd.apache.org/docs/2.4/upgrading.html) for details. If you wish to continue to use httpd 2.2, add the following line to your NixOS configuration:
+
+  ```nix
+  {
+    services.httpd.package = pkgs.apacheHttpd_2_2;
+  }
+  ```
+
+- PHP 5.3 has been removed because it is no longer supported by the PHP project. A [migration guide](http://php.net/migration54) is available.
+
+- The host side of a container virtual Ethernet pair is now called `ve-container-name` rather than `c-container-name`.
+
+- GNOME 3.10 support has been dropped. The default GNOME version is now 3.12.
+
+- VirtualBox has been upgraded to 4.3.20 release. Users may be required to run `rm -rf /tmp/.vbox*`. The line `imports = [ <nixpkgs/nixos/modules/programs/virtualbox.nix> ]` is no longer necessary, use `services.virtualboxHost.enable = true` instead.
+
+  Also, hardening mode is now enabled by default, which means that unless you want to use USB support, you no longer need to be a member of the `vboxusers` group.
+
+- Chromium has been updated to 39.0.2171.65. `enablePepperPDF` is now enabled by default. `chromium*Wrapper` packages no longer exist, because upstream removed NSAPI support. `chromium-stable` has been renamed to `chromium`.
+
+- Python packaging documentation is now part of nixpkgs manual. To override the python packages available to a custom python you now use `pkgs.pythonFull.buildEnv.override` instead of `pkgs.pythonFull.override`.
+
+- `boot.resumeDevice = "8:6"` is no longer supported. Most users will want to leave it undefined, which takes the swap partitions automatically. There is an evaluation assertion to ensure that the string starts with a slash.
+
+- The system-wide default timezone for NixOS installations changed from `CET` to `UTC`. To choose a different timezone for your system, configure `time.timeZone` in `configuration.nix`. A fairly complete list of possible values for that setting is available at <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones>.
+
+- GNU screen has been updated to 4.2.1, which breaks the ability to connect to sessions created by older versions of screen.
+
+- The Intel GPU driver was updated to the 3.x prerelease version (used by most distributions) and supports DRI3 now.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md
new file mode 100644
index 000000000000..f47d13008185
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1509.section.md
@@ -0,0 +1,319 @@
+# Release 15.09 ("Dingo", 2015/09/30) {#sec-release-15.09}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- The [Haskell](http://haskell.org/) packages infrastructure has been re-designed from the ground up ("Haskell NG"). NixOS now distributes the latest version of every single package registered on [Hackage](http://hackage.haskell.org/) -- well in excess of 8,000 Haskell packages. Detailed instructions on how to use that infrastructure can be found in the [User's Guide to the Haskell Infrastructure](https://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure). Users migrating from an earlier release may find helpful information below, in the list of backwards-incompatible changes. Furthermore, we distribute 51(!) additional Haskell package sets that provide every single [LTS Haskell](http://www.stackage.org/) release since version 0.0 as well as the most recent [Stackage Nightly](http://www.stackage.org/) snapshot. The announcement ["Full Stackage Support in Nixpkgs"](https://nixos.org/nix-dev/2015-September/018138.html) gives additional details.
+
+- Nix has been updated to version 1.10, which among other improvements enables cryptographic signatures on binary caches for improved security.
+
+- You can now keep your NixOS system up to date automatically by setting
+
+```nix
+{
+  system.autoUpgrade.enable = true;
+}
+```
+
+This will cause the system to periodically check for updates in your current channel and run `nixos-rebuild`.
+
+- This release is based on Glibc 2.21, GCC 4.9 and Linux 3.18.
+
+- GNOME has been upgraded to 3.16.
+
+- Xfce has been upgraded to 4.12.
+
+- KDE 5 has been upgraded to KDE Frameworks 5.10, Plasma 5.3.2 and Applications 15.04.3. KDE 4 has been updated to kdelibs-4.14.10.
+
+- E19 has been upgraded to 0.16.8.15.
+
+The following new services were added since the last release:
+
+- `services/mail/exim.nix`
+
+- `services/misc/apache-kafka.nix`
+
+- `services/misc/canto-daemon.nix`
+
+- `services/misc/confd.nix`
+
+- `services/misc/devmon.nix`
+
+- `services/misc/gitit.nix`
+
+- `services/misc/ihaskell.nix`
+
+- `services/misc/mbpfan.nix`
+
+- `services/misc/mediatomb.nix`
+
+- `services/misc/mwlib.nix`
+
+- `services/misc/parsoid.nix`
+
+- `services/misc/plex.nix`
+
+- `services/misc/ripple-rest.nix`
+
+- `services/misc/ripple-data-api.nix`
+
+- `services/misc/subsonic.nix`
+
+- `services/misc/sundtek.nix`
+
+- `services/monitoring/cadvisor.nix`
+
+- `services/monitoring/das_watchdog.nix`
+
+- `services/monitoring/grafana.nix`
+
+- `services/monitoring/riemann-tools.nix`
+
+- `services/monitoring/teamviewer.nix`
+
+- `services/network-filesystems/u9fs.nix`
+
+- `services/networking/aiccu.nix`
+
+- `services/networking/asterisk.nix`
+
+- `services/networking/bird.nix`
+
+- `services/networking/charybdis.nix`
+
+- `services/networking/docker-registry-server.nix`
+
+- `services/networking/fan.nix`
+
+- `services/networking/firefox/sync-server.nix`
+
+- `services/networking/gateone.nix`
+
+- `services/networking/heyefi.nix`
+
+- `services/networking/i2p.nix`
+
+- `services/networking/lambdabot.nix`
+
+- `services/networking/mstpd.nix`
+
+- `services/networking/nix-serve.nix`
+
+- `services/networking/nylon.nix`
+
+- `services/networking/racoon.nix`
+
+- `services/networking/skydns.nix`
+
+- `services/networking/shout.nix`
+
+- `services/networking/softether.nix`
+
+- `services/networking/sslh.nix`
+
+- `services/networking/tinc.nix`
+
+- `services/networking/tlsdated.nix`
+
+- `services/networking/tox-bootstrapd.nix`
+
+- `services/networking/tvheadend.nix`
+
+- `services/networking/zerotierone.nix`
+
+- `services/scheduling/marathon.nix`
+
+- `services/security/fprintd.nix`
+
+- `services/security/hologram.nix`
+
+- `services/security/munge.nix`
+
+- `services/system/cloud-init.nix`
+
+- `services/web-servers/shellinabox.nix`
+
+- `services/web-servers/uwsgi.nix`
+
+- `services/x11/unclutter.nix`
+
+- `services/x11/display-managers/sddm.nix`
+
+- `system/boot/coredump.nix`
+
+- `system/boot/loader/loader.nix`
+
+- `system/boot/loader/generic-extlinux-compatible`
+
+- `system/boot/networkd.nix`
+
+- `system/boot/resolved.nix`
+
+- `system/boot/timesyncd.nix`
+
+- `tasks/filesystems/exfat.nix`
+
+- `tasks/filesystems/ntfs.nix`
+
+- `tasks/filesystems/vboxsf.nix`
+
+- `virtualisation/virtualbox-host.nix`
+
+- `virtualisation/vmware-guest.nix`
+
+- `virtualisation/xen-dom0.nix`
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- `sshd` no longer supports DSA and ECDSA host keys by default. If you have existing systems with such host keys and want to continue to use them, please set
+
+```nix
+{
+  system.stateVersion = "14.12";
+}
+```
+
+The new option `system.stateVersion` ensures that certain configuration changes that could break existing systems (such as the `sshd` host key setting) will maintain compatibility with the specified NixOS release. NixOps sets the state version of existing deployments automatically.
+
+- `cron` is no longer enabled by default, unless you have a non-empty `services.cron.systemCronJobs`. To force `cron` to be enabled, set `services.cron.enable = true`.
+
+- Nix now requires binary caches to be cryptographically signed. If you have unsigned binary caches that you want to continue to use, you should set `nix.requireSignedBinaryCaches = false`.
+
+- Steam now doesn't need root rights to work. Instead of using `*-steam-chrootenv`, you should now just run `steam`. `steamChrootEnv` package was renamed to `steam`, and old `steam` package -- to `steamOriginal`.
+
+- CMPlayer has been renamed to bomi upstream. Package `cmplayer` was accordingly renamed to `bomi`
+
+- Atom Shell has been renamed to Electron upstream. Package `atom-shell` was accordingly renamed to `electron`
+
+- Elm is not released on Hackage anymore. You should now use `elmPackages.elm` which contains the latest Elm platform.
+
+- The CUPS printing service has been updated to version `2.0.2`. Furthermore its systemd service has been renamed to `cups.service`.
+
+  Local printers are no longer shared or advertised by default. This behavior can be changed by enabling `services.printing.defaultShared` or `services.printing.browsing` respectively.
+
+- The VirtualBox host and guest options have been named more consistently. They can now found in `virtualisation.virtualbox.host.*` instead of `services.virtualboxHost.*` and `virtualisation.virtualbox.guest.*` instead of `services.virtualboxGuest.*`.
+
+  Also, there now is support for the `vboxsf` file system using the `fileSystems` configuration attribute. An example of how this can be used in a configuration:
+
+```nix
+{
+  fileSystems."/shiny" = {
+    device = "myshinysharedfolder";
+    fsType = "vboxsf";
+  };
+}
+```
+
+- "`nix-env -qa`" no longer discovers Haskell packages by name. The only packages visible in the global scope are `ghc`, `cabal-install`, and `stack`, but all other packages are hidden. The reason for this inconvenience is the sheer size of the Haskell package set. Name-based lookups are expensive, and most `nix-env -qa` operations would become much slower if we'd add the entire Hackage database into the top level attribute set. Instead, the list of Haskell packages can be displayed by running:
+
+```ShellSession
+nix-env -f "<nixpkgs>" -qaP -A haskellPackages
+```
+
+Executable programs written in Haskell can be installed with:
+
+```ShellSession
+nix-env -f "<nixpkgs>" -iA haskellPackages.pandoc
+```
+
+Installing Haskell _libraries_ this way, however, is no longer supported. See the next item for more details.
+
+- Previous versions of NixOS came with a feature called `ghc-wrapper`, a small script that allowed GHC to transparently pick up on libraries installed in the user's profile. This feature has been deprecated; `ghc-wrapper` was removed from the distribution. The proper way to register Haskell libraries with the compiler now is the `haskellPackages.ghcWithPackages` function. The [User's Guide to the Haskell Infrastructure](https://nixos.org/nixpkgs/manual/#users-guide-to-the-haskell-infrastructure) provides more information about this subject.
+
+- All Haskell builds that have been generated with version 1.x of the `cabal2nix` utility are now invalid and need to be re-generated with a current version of `cabal2nix` to function. The most recent version of this tool can be installed by running `nix-env -i cabal2nix`.
+
+- The `haskellPackages` set in Nixpkgs used to have a function attribute called `extension` that users could override in their `~/.nixpkgs/config.nix` files to configure additional attributes, etc. That function still exists, but it's now called `overrides`.
+
+- The OpenBLAS library has been updated to version `0.2.14`. Support for the `x86_64-darwin` platform was added. Dynamic architecture detection was enabled; OpenBLAS now selects microarchitecture-optimized routines at runtime, so optimal performance is achieved without the need to rebuild OpenBLAS locally. OpenBLAS has replaced ATLAS in most packages which use an optimized BLAS or LAPACK implementation.
+
+- The `phpfpm` is now using the default PHP version (`pkgs.php`) instead of PHP 5.4 (`pkgs.php54`).
+
+- The `locate` service no longer indexes the Nix store by default, preventing packages with potentially numerous versions from cluttering the output. Indexing the store can be activated by setting `services.locate.includeStore = true`.
+
+- The Nix expression search path (`NIX_PATH`) no longer contains `/etc/nixos/nixpkgs` by default. You can override `NIX_PATH` by setting `nix.nixPath`.
+
+- Python 2.6 has been marked as broken (as it no longer receives security updates from upstream).
+
+- Any use of module arguments such as `pkgs` to access library functions, or to define `imports` attributes will now lead to an infinite loop at the time of the evaluation.
+
+  In case of an infinite loop, use the `--show-trace` command line argument and read the line just above the error message.
+
+  ```ShellSession
+  $ nixos-rebuild build --show-trace
+  …
+  while evaluating the module argument `pkgs' in "/etc/nixos/my-module.nix":
+  infinite recursion encountered
+  ```
+
+  Any use of `pkgs.lib`, should be replaced by `lib`, after adding it as argument of the module. The following module
+
+  ```nix
+  { config, pkgs, ... }:
+
+  with pkgs.lib;
+
+  {
+    options = {
+      foo = mkOption { … };
+    };
+    config = mkIf config.foo { … };
+  }
+  ```
+
+  should be modified to look like:
+
+  ```nix
+  { config, pkgs, lib, ... }:
+
+  with lib;
+
+  {
+    options = {
+      foo = mkOption { option declaration };
+    };
+    config = mkIf config.foo { option definition };
+  }
+  ```
+
+  When `pkgs` is used to download other projects to import their modules, and only in such cases, it should be replaced by `(import <nixpkgs> {})`. The following module
+
+  ```nix
+  { config, pkgs, ... }:
+
+  let
+    myProject = pkgs.fetchurl {
+      src = url;
+      sha256 = hash;
+    };
+  in
+
+  {
+    imports = [ "${myProject}/module.nix" ];
+  }
+  ```
+
+  should be modified to look like:
+
+  ```nix
+  { config, pkgs, ... }:
+
+  let
+    myProject = (import <nixpkgs> {}).fetchurl {
+      src = url;
+      sha256 = hash;
+    };
+  in
+
+  {
+    imports = [ "${myProject}/module.nix" ];
+  }
+  ```
+
+Other notable improvements:
+
+- The nixos and nixpkgs channels were unified, so one _can_ use `nix-env -iA nixos.bash` instead of `nix-env -iA nixos.pkgs.bash`. See [the commit](https://github.com/NixOS/nixpkgs/commit/2cd7c1f198) for details.
+
+- Users running an SSH server who worry about the quality of their `/etc/ssh/moduli` file with respect to the [vulnerabilities discovered in the Diffie-Hellman key exchange](https://stribika.github.io/2015/01/04/secure-secure-shell.html) can now replace OpenSSH's default version with one they generated themselves using the new `services.openssh.moduliFile` option.
+
+- A newly packaged TeX Live 2015 is provided in `pkgs.texlive`, split into 6500 nix packages. For basic user documentation see [the source](https://github.com/NixOS/nixpkgs/blob/release-15.09/pkgs/tools/typesetting/tex/texlive/default.nix#L1). Beware of [an issue](https://github.com/NixOS/nixpkgs/issues/9757) when installing a too large package set. The plan is to deprecate and maybe delete the original TeX packages until the next release.
+
+- `buildEnv.env` on all Python interpreters is now available for nix-shell interoperability.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1603.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1603.section.md
new file mode 100644
index 000000000000..532a16f937b0
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1603.section.md
@@ -0,0 +1,282 @@
+# Release 16.03 ("Emu", 2016/03/31) {#sec-release-16.03}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Systemd 229, bringing [numerous improvements](https://github.com/systemd/systemd/blob/v229/NEWS) over 217.
+
+- Linux 4.4 (was 3.18).
+
+- GCC 5.3 (was 4.9). Note that GCC 5 [changes the C++ ABI in an incompatible way](https://gcc.gnu.org/onlinedocs/libstdc++/manual/using_dual_abi.html); this may cause problems if you try to link objects compiled with different versions of GCC.
+
+- Glibc 2.23 (was 2.21).
+
+- Binutils 2.26 (was 2.23.1). See \#909
+
+- Improved support for ensuring [bitwise reproducible builds](https://reproducible-builds.org/). For example, `stdenv` now sets the environment variable `SOURCE_DATE_EPOCH` to a deterministic value, and Nix has [gained an option](https://nixos.org/nix/manual/#ssec-relnotes-1.11) to repeat a build a number of times to test determinism. An ongoing project, the goal of exact reproducibility is to allow binaries to be verified independently (e.g., a user might only trust binaries that appear in three independent binary caches).
+
+- Perl 5.22.
+
+The following new services were added since the last release:
+
+- `services/monitoring/longview.nix`
+
+- `hardware/video/webcam/facetimehd.nix`
+
+- `i18n/input-method/default.nix`
+
+- `i18n/input-method/fcitx.nix`
+
+- `i18n/input-method/ibus.nix`
+
+- `i18n/input-method/nabi.nix`
+
+- `i18n/input-method/uim.nix`
+
+- `programs/fish.nix`
+
+- `security/acme.nix`
+
+- `security/audit.nix`
+
+- `security/oath.nix`
+
+- `services/hardware/irqbalance.nix`
+
+- `services/mail/dspam.nix`
+
+- `services/mail/opendkim.nix`
+
+- `services/mail/postsrsd.nix`
+
+- `services/mail/rspamd.nix`
+
+- `services/mail/rmilter.nix`
+
+- `services/misc/autofs.nix`
+
+- `services/misc/bepasty.nix`
+
+- `services/misc/calibre-server.nix`
+
+- `services/misc/cfdyndns.nix`
+
+- `services/misc/gammu-smsd.nix`
+
+- `services/misc/mathics.nix`
+
+- `services/misc/matrix-synapse.nix`
+
+- `services/misc/octoprint.nix`
+
+- `services/monitoring/hdaps.nix`
+
+- `services/monitoring/heapster.nix`
+
+- `services/monitoring/longview.nix`
+
+- `services/network-filesystems/netatalk.nix`
+
+- `services/network-filesystems/xtreemfs.nix`
+
+- `services/networking/autossh.nix`
+
+- `services/networking/dnschain.nix`
+
+- `services/networking/gale.nix`
+
+- `services/networking/miniupnpd.nix`
+
+- `services/networking/namecoind.nix`
+
+- `services/networking/ostinato.nix`
+
+- `services/networking/pdnsd.nix`
+
+- `services/networking/shairport-sync.nix`
+
+- `services/networking/supplicant.nix`
+
+- `services/search/kibana.nix`
+
+- `services/security/haka.nix`
+
+- `services/security/physlock.nix`
+
+- `services/web-apps/pump.io.nix`
+
+- `services/x11/hardware/libinput.nix`
+
+- `services/x11/window-managers/windowlab.nix`
+
+- `system/boot/initrd-network.nix`
+
+- `system/boot/initrd-ssh.nix`
+
+- `system/boot/loader/loader.nix`
+
+- `system/boot/networkd.nix`
+
+- `system/boot/resolved.nix`
+
+- `virtualisation/lxd.nix`
+
+- `virtualisation/rkt.nix`
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- We no longer produce graphical ISO images and VirtualBox images for `i686-linux`. A minimal ISO image is still provided.
+
+- Firefox and similar browsers are now _wrapped by default_. The package and attribute names are plain `firefox` or `midori`, etc. Backward-compatibility attributes were set up, but note that `nix-env -u` will _not_ update your current `firefox-with-plugins`; you have to uninstall it and install `firefox` instead.
+
+- `wmiiSnap` has been replaced with `wmii_hg`, but `services.xserver.windowManager.wmii.enable` has been updated respectively so this only affects you if you have explicitly installed `wmiiSnap`.
+
+- `jobs` NixOS option has been removed. It served as compatibility layer between Upstart jobs and SystemD services. All services have been rewritten to use `systemd.services`
+
+- `wmiimenu` is removed, as it has been removed by the developers upstream. Use `wimenu` from the `wmii-hg` package.
+
+- Gitit is no longer automatically added to the module list in NixOS and as such there will not be any manual entries for it. You will need to add an import statement to your NixOS configuration in order to use it, e.g.
+
+  ```nix
+  {
+    imports = [ <nixpkgs/nixos/modules/services/misc/gitit.nix> ];
+  }
+  ```
+
+  will include the Gitit service configuration options.
+
+- `nginx` does not accept flags for enabling and disabling modules anymore. Instead it accepts `modules` argument, which is a list of modules to be built in. All modules now reside in `nginxModules` set. Example configuration:
+
+  ```nix
+  nginx.override {
+    modules = [ nginxModules.rtmp nginxModules.dav nginxModules.moreheaders ];
+  }
+  ```
+
+- `s3sync` is removed, as it hasn't been developed by upstream for 4 years and only runs with ruby 1.8. For an actively-developer alternative look at `tarsnap` and others.
+
+- `ruby_1_8` has been removed as it's not supported from upstream anymore and probably contains security issues.
+
+- `tidy-html5` package is removed. Upstream only provided `(lib)tidy5` during development, and now they went back to `(lib)tidy` to work as a drop-in replacement of the original package that has been unmaintained for years. You can (still) use the `html-tidy` package, which got updated to a stable release from this new upstream.
+
+- `extraDeviceOptions` argument is removed from `bumblebee` package. Instead there are now two separate arguments: `extraNvidiaDeviceOptions` and `extraNouveauDeviceOptions` for setting extra X11 options for nvidia and nouveau drivers, respectively.
+
+- The `Ctrl+Alt+Backspace` key combination no longer kills the X server by default. There's a new option `services.xserver.enableCtrlAltBackspace` allowing to enable the combination again.
+
+- `emacsPackagesNg` now contains all packages from the ELPA, MELPA, and MELPA Stable repositories.
+
+- Data directory for Postfix MTA server is moved from `/var/postfix` to `/var/lib/postfix`. Old configurations are migrated automatically. `service.postfix` module has also received many improvements, such as correct directories' access rights, new `aliasFiles` and `mapFiles` options and more.
+
+- Filesystem options should now be configured as a list of strings, not a comma-separated string. The old style will continue to work, but print a warning, until the 16.09 release. An example of the new style:
+
+  ```nix
+  {
+    fileSystems."/example" = {
+      device = "/dev/sdc";
+      fsType = "btrfs";
+      options = [ "noatime" "compress=lzo" "space_cache" "autodefrag" ];
+    };
+  }
+  ```
+
+- CUPS, installed by `services.printing` module, now has its data directory in `/var/lib/cups`. Old configurations from `/etc/cups` are moved there automatically, but there might be problems. Also configuration options `services.printing.cupsdConf` and `services.printing.cupsdFilesConf` were removed because they had been allowing one to override configuration variables required for CUPS to work at all on NixOS. For most use cases, `services.printing.extraConf` and new option `services.printing.extraFilesConf` should be enough; if you encounter a situation when they are not, please file a bug.
+
+  There are also Gutenprint improvements; in particular, a new option `services.printing.gutenprint` is added to enable automatic updating of Gutenprint PPMs; it's greatly recommended to enable it instead of adding `gutenprint` to the `drivers` list.
+
+- `services.xserver.vaapiDrivers` has been removed. Use `hardware.opengl.extraPackages{,32}` instead. You can also specify VDPAU drivers there.
+
+- `programs.ibus` moved to `i18n.inputMethod.ibus`. The option `programs.ibus.plugins` changed to `i18n.inputMethod.ibus.engines` and the option to enable ibus changed from `programs.ibus.enable` to `i18n.inputMethod.enabled`. `i18n.inputMethod.enabled` should be set to the used input method name, `"ibus"` for ibus. An example of the new style:
+
+  ```nix
+  {
+    i18n.inputMethod.enabled = "ibus";
+    i18n.inputMethod.ibus.engines = with pkgs.ibus-engines; [ anthy mozc ];
+  }
+  ```
+
+  That is equivalent to the old version:
+
+  ```nix
+  {
+    programs.ibus.enable = true;
+    programs.ibus.plugins = with pkgs; [ ibus-anthy mozc ];
+  }
+  ```
+
+- `services.udev.extraRules` option now writes rules to `99-local.rules` instead of `10-local.rules`. This makes all the user rules apply after others, so their results wouldn't be overridden by anything else.
+
+- Large parts of the `services.gitlab` module has been been rewritten. There are new configuration options available. The `stateDir` option was renamned to `statePath` and the `satellitesDir` option was removed. Please review the currently available options.
+
+- The option `services.nsd.zones.<name>.data` no longer interpret the dollar sign (\$) as a shell variable, as such it should not be escaped anymore. Thus the following zone data:
+
+  ```dns-zone
+  $ORIGIN example.com.
+  $TTL 1800
+  @       IN      SOA     ns1.vpn.nbp.name.      admin.example.com. (
+
+  ```
+
+  Should modified to look like the actual file expected by nsd:
+
+  ```dns-zone
+  $ORIGIN example.com.
+  $TTL 1800
+  @       IN      SOA     ns1.vpn.nbp.name.      admin.example.com. (
+
+  ```
+
+- `service.syncthing.dataDir` options now has to point to exact folder where syncthing is writing to. Example configuration should look something like:
+
+  ```nix
+  {
+    services.syncthing = {
+        enable = true;
+        dataDir = "/home/somebody/.syncthing";
+        user = "somebody";
+    };
+  }
+  ```
+
+- `networking.firewall.allowPing` is now enabled by default. Users are encouraged to configure an appropriate rate limit for their machines using the Kernel interface at `/proc/sys/net/ipv4/icmp_ratelimit` and `/proc/sys/net/ipv6/icmp/ratelimit` or using the firewall itself, i.e. by setting the NixOS option `networking.firewall.pingLimit`.
+
+- Systems with some broadcom cards used to result into a generated config that is no longer accepted. If you get errors like
+
+  ```ShellSession
+  error: path ‘/nix/store/*-broadcom-sta-*’ does not exist and cannot be created
+  ```
+
+  you should either re-run `nixos-generate-config` or manually replace `"${config.boot.kernelPackages.broadcom_sta}"` by `config.boot.kernelPackages.broadcom_sta` in your `/etc/nixos/hardware-configuration.nix`. More discussion is on [ the github issue](https://github.com/NixOS/nixpkgs/pull/12595).
+
+- The `services.xserver.startGnuPGAgent` option has been removed. GnuPG 2.1.x changed the way the gpg-agent works, and that new approach no longer requires (or even supports) the "start everything as a child of the agent" scheme we've implemented in NixOS for older versions. To configure the gpg-agent for your X session, add the following code to `~/.bashrc` or some file that's sourced when your shell is started:
+
+  ```shell
+  GPG_TTY=$(tty)
+  export GPG_TTY
+  ```
+
+  If you want to use gpg-agent for SSH, too, add the following to your session initialization (e.g. `displayManager.sessionCommands`)
+
+  ```shell
+      gpg-connect-agent /bye
+      unset SSH_AGENT_PID
+      export SSH_AUTH_SOCK="''${HOME}/.gnupg/S.gpg-agent.ssh"
+  ```
+
+  and make sure that
+
+  ```conf
+      enable-ssh-support
+  ```
+
+  is included in your `~/.gnupg/gpg-agent.conf`. You will need to use `ssh-add` to re-add your ssh keys. If gpg's automatic transformation of the private keys to the new format fails, you will need to re-import your private keyring as well:
+
+  ```ShellSession
+      gpg --import ~/.gnupg/secring.gpg
+  ```
+
+  The `gpg-agent(1)` man page has more details about this subject, i.e. in the "EXAMPLES" section.
+
+Other notable improvements:
+
+- `ejabberd` module is brought back and now works on NixOS.
+
+- Input method support was improved. New NixOS modules (fcitx, nabi and uim), fcitx engines (chewing, hangul, m17n, mozc and table-other) and ibus engines (hangul and m17n) have been added.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1609.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1609.section.md
new file mode 100644
index 000000000000..0cbabf58ca03
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1609.section.md
@@ -0,0 +1,73 @@
+# Release 16.09 ("Flounder", 2016/09/30) {#sec-release-16.09}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Many NixOS configurations and Nix packages now use significantly less disk space, thanks to the [extensive work on closure size reduction](https://github.com/NixOS/nixpkgs/issues/7117). For example, the closure size of a minimal NixOS container went down from \~424 MiB in 16.03 to \~212 MiB in 16.09, while the closure size of Firefox went from \~651 MiB to \~259 MiB.
+
+- To improve security, packages are now [built using various hardening features](https://github.com/NixOS/nixpkgs/pull/12895). See the Nixpkgs manual for more information.
+
+- Support for PXE netboot. See [](#sec-booting-from-pxe) for documentation.
+
+- X.org server 1.18. If you use the `ati_unfree` driver, 1.17 is still used due to an ABI incompatibility.
+
+- This release is based on Glibc 2.24, GCC 5.4.0 and systemd 231. The default Linux kernel remains 4.4.
+
+The following new services were added since the last release:
+
+- `(this will get automatically generated at release time)`
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- A large number of packages have been converted to use the multiple outputs feature of Nix to greatly reduce the amount of required disk space, as mentioned above. This may require changes to any custom packages to make them build again; see the relevant chapter in the Nixpkgs manual for more information. (Additional caveat to packagers: some packaging conventions related to multiple-output packages [were changed](https://github.com/NixOS/nixpkgs/pull/14766) late (August 2016) in the release cycle and differ from the initial introduction of multiple outputs.)
+
+- Previous versions of Nixpkgs had support for all versions of the LTS Haskell package set. That support has been dropped. The previously provided `haskell.packages.lts-x_y` package sets still exist in name to avoid breaking user code, but these package sets don't actually contain the versions mandated by the corresponding LTS release. Instead, our package set it loosely based on the latest available LTS release, i.e. LTS 7.x at the time of this writing. New releases of NixOS and Nixpkgs will drop those old names entirely. [The motivation for this change](https://nixos.org/nix-dev/2016-June/020585.html) has been discussed at length on the `nix-dev` mailing list and in [Github issue \#14897](https://github.com/NixOS/nixpkgs/issues/14897). Development strategies for Haskell hackers who want to rely on Nix and NixOS have been described in [another nix-dev article](https://nixos.org/nix-dev/2016-June/020642.html).
+
+- Shell aliases for systemd sub-commands [were dropped](https://github.com/NixOS/nixpkgs/pull/15598): `start`, `stop`, `restart`, `status`.
+
+- Redis now binds to 127.0.0.1 only instead of listening to all network interfaces. This is the default behavior of Redis 3.2
+
+- `/var/empty` is now immutable. Activation script runs `chattr +i` to forbid any modifications inside the folder. See [ the pull request](https://github.com/NixOS/nixpkgs/pull/18365) for what bugs this caused.
+
+- Gitlab's maintenance script `gitlab-runner` was removed and split up into the more clearer `gitlab-run` and `gitlab-rake` scripts, because `gitlab-runner` is a component of Gitlab CI.
+
+- `services.xserver.libinput.accelProfile` default changed from `flat` to `adaptive`, as per [ official documentation](https://wayland.freedesktop.org/libinput/doc/latest/group__config.html#gad63796972347f318b180e322e35cee79).
+
+- `fonts.fontconfig.ultimate.rendering` was removed because our presets were obsolete for some time. New presets are hardcoded into FreeType; you can select a preset via `fonts.fontconfig.ultimate.preset`. You can customize those presets via ordinary environment variables, using `environment.variables`.
+
+- The `audit` service is no longer enabled by default. Use `security.audit.enable = true` to explicitly enable it.
+
+- `pkgs.linuxPackages.virtualbox` now contains only the kernel modules instead of the VirtualBox user space binaries. If you want to reference the user space binaries, you have to use the new `pkgs.virtualbox` instead.
+
+- `goPackages` was replaced with separated Go applications in appropriate `nixpkgs` categories. Each Go package uses its own dependency set. There's also a new `go2nix` tool introduced to generate a Go package definition from its Go source automatically.
+
+- `services.mongodb.extraConfig` configuration format was changed to YAML.
+
+- PHP has been upgraded to 7.0
+
+Other notable improvements:
+
+- Revamped grsecurity/PaX support. There is now only a single general-purpose distribution kernel and the configuration interface has been streamlined. Desktop users should be able to set
+
+  ```nix
+  {
+    security.grsecurity.enable = true;
+  }
+  ```
+
+  to get a reasonably secure system without having to sacrifice too much functionality.
+
+- Special filesystems, like `/proc`, `/run` and others, now have the same mount options as recommended by systemd and are unified across different places in NixOS. Mount options are updated during `nixos-rebuild switch` if possible. One benefit from this is improved security --- most such filesystems are now mounted with `noexec`, `nodev` and/or `nosuid` options.
+
+- The reverse path filter was interfering with DHCPv4 server operation in the past. An exception for DHCPv4 and a new option to log packets that were dropped due to the reverse path filter was added (`networking.firewall.logReversePathDrops`) for easier debugging.
+
+- Containers configuration within `containers.<name>.config` is [now properly typed and checked](https://github.com/NixOS/nixpkgs/pull/17365). In particular, partial configurations are merged correctly.
+
+- The directory container setuid wrapper programs, `/var/setuid-wrappers`, [is now updated atomically to prevent failures if the switch to a new configuration is interrupted.](https://github.com/NixOS/nixpkgs/pull/18124)
+
+- `services.xserver.startGnuPGAgent` has been removed due to GnuPG 2.1.x bump. See [ how to achieve similar behavior](https://github.com/NixOS/nixpkgs/commit/5391882ebd781149e213e8817fba6ac3c503740c). You might need to `pkill gpg-agent` after the upgrade to prevent a stale agent being in the way.
+
+- [ Declarative users could share the uid due to the bug in the script handling conflict resolution. ](https://github.com/NixOS/nixpkgs/commit/e561edc322d275c3687fec431935095cfc717147)
+
+- Gummi boot has been replaced using systemd-boot.
+
+- Hydra package and NixOS module were added for convenience.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md
new file mode 100644
index 000000000000..b82c41e28ca3
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1703.section.md
@@ -0,0 +1,303 @@
+# Release 17.03 ("Gorilla", 2017/03/31) {#sec-release-17.03}
+
+## Highlights {#sec-release-17.03-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Nixpkgs is now extensible through overlays. See the [Nixpkgs manual](https://nixos.org/nixpkgs/manual/#sec-overlays-install) for more information.
+
+- This release is based on Glibc 2.25, GCC 5.4.0 and systemd 232. The default Linux kernel is 4.9 and Nix is at 1.11.8.
+
+- The default desktop environment now is KDE's Plasma 5. KDE 4 has been removed
+
+- The setuid wrapper functionality now supports setting capabilities.
+
+- X.org server uses branch 1.19. Due to ABI incompatibilities, `ati_unfree` keeps forcing 1.17 and `amdgpu-pro` starts forcing 1.18.
+
+- Cross compilation has been rewritten. See the nixpkgs manual for details. The most obvious breaking change is that in derivations there is no `.nativeDrv` nor `.crossDrv` are now cross by default, not native.
+
+- The `overridePackages` function has been rewritten to be replaced by [ overlays](https://nixos.org/nixpkgs/manual/#sec-overlays-install)
+
+- Packages in nixpkgs can be marked as insecure through listed vulnerabilities. See the [Nixpkgs manual](https://nixos.org/nixpkgs/manual/#sec-allow-insecure) for more information.
+
+- PHP now defaults to PHP 7.1
+
+## New Services {#sec-release-17.03-new-services}
+
+The following new services were added since the last release:
+
+- `hardware/ckb.nix`
+
+- `hardware/mcelog.nix`
+
+- `hardware/usb-wwan.nix`
+
+- `hardware/video/capture/mwprocapture.nix`
+
+- `programs/adb.nix`
+
+- `programs/chromium.nix`
+
+- `programs/gphoto2.nix`
+
+- `programs/java.nix`
+
+- `programs/mtr.nix`
+
+- `programs/oblogout.nix`
+
+- `programs/vim.nix`
+
+- `programs/wireshark.nix`
+
+- `security/dhparams.nix`
+
+- `services/audio/ympd.nix`
+
+- `services/computing/boinc/client.nix`
+
+- `services/continuous-integration/buildbot/master.nix`
+
+- `services/continuous-integration/buildbot/worker.nix`
+
+- `services/continuous-integration/gitlab-runner.nix`
+
+- `services/databases/riak-cs.nix`
+
+- `services/databases/stanchion.nix`
+
+- `services/desktops/gnome3/gnome-terminal-server.nix`
+
+- `services/editors/infinoted.nix`
+
+- `services/hardware/illum.nix`
+
+- `services/hardware/trezord.nix`
+
+- `services/logging/journalbeat.nix`
+
+- `services/mail/offlineimap.nix`
+
+- `services/mail/postgrey.nix`
+
+- `services/misc/couchpotato.nix`
+
+- `services/misc/docker-registry.nix`
+
+- `services/misc/errbot.nix`
+
+- `services/misc/geoip-updater.nix`
+
+- `services/misc/gogs.nix`
+
+- `services/misc/leaps.nix`
+
+- `services/misc/nix-optimise.nix`
+
+- `services/misc/ssm-agent.nix`
+
+- `services/misc/sssd.nix`
+
+- `services/monitoring/arbtt.nix`
+
+- `services/monitoring/netdata.nix`
+
+- `services/monitoring/prometheus/default.nix`
+
+- `services/monitoring/prometheus/alertmanager.nix`
+
+- `services/monitoring/prometheus/blackbox-exporter.nix`
+
+- `services/monitoring/prometheus/json-exporter.nix`
+
+- `services/monitoring/prometheus/nginx-exporter.nix`
+
+- `services/monitoring/prometheus/node-exporter.nix`
+
+- `services/monitoring/prometheus/snmp-exporter.nix`
+
+- `services/monitoring/prometheus/unifi-exporter.nix`
+
+- `services/monitoring/prometheus/varnish-exporter.nix`
+
+- `services/monitoring/sysstat.nix`
+
+- `services/monitoring/telegraf.nix`
+
+- `services/monitoring/vnstat.nix`
+
+- `services/network-filesystems/cachefilesd.nix`
+
+- `services/network-filesystems/glusterfs.nix`
+
+- `services/network-filesystems/ipfs.nix`
+
+- `services/networking/dante.nix`
+
+- `services/networking/dnscrypt-wrapper.nix`
+
+- `services/networking/fakeroute.nix`
+
+- `services/networking/flannel.nix`
+
+- `services/networking/htpdate.nix`
+
+- `services/networking/miredo.nix`
+
+- `services/networking/nftables.nix`
+
+- `services/networking/powerdns.nix`
+
+- `services/networking/pdns-recursor.nix`
+
+- `services/networking/quagga.nix`
+
+- `services/networking/redsocks.nix`
+
+- `services/networking/wireguard.nix`
+
+- `services/system/cgmanager.nix`
+
+- `services/torrent/opentracker.nix`
+
+- `services/web-apps/atlassian/confluence.nix`
+
+- `services/web-apps/atlassian/crowd.nix`
+
+- `services/web-apps/atlassian/jira.nix`
+
+- `services/web-apps/frab.nix`
+
+- `services/web-apps/nixbot.nix`
+
+- `services/web-apps/selfoss.nix`
+
+- `services/web-apps/quassel-webserver.nix`
+
+- `services/x11/unclutter-xfixes.nix`
+
+- `services/x11/urxvtd.nix`
+
+- `system/boot/systemd-nspawn.nix`
+
+- `virtualisation/ecs-agent.nix`
+
+- `virtualisation/lxcfs.nix`
+
+- `virtualisation/openstack/keystone.nix`
+
+- `virtualisation/openstack/glance.nix`
+
+## Backward Incompatibilities {#sec-release-17.03-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- Derivations have no `.nativeDrv` nor `.crossDrv` and are now cross by default, not native.
+
+- `stdenv.overrides` is now expected to take `self` and `super` arguments. See `lib.trivial.extends` for what those parameters represent.
+
+- `ansible` now defaults to ansible version 2 as version 1 has been removed due to a serious [ vulnerability](https://www.computest.nl/advisories/CT-2017-0109_Ansible.txt) unpatched by upstream.
+
+- `gnome` alias has been removed along with `gtk`, `gtkmm` and several others. Now you need to use versioned attributes, like `gnome3`.
+
+- The attribute name of the Radicale daemon has been changed from `pythonPackages.radicale` to `radicale`.
+
+- The `stripHash` bash function in `stdenv` changed according to its documentation; it now outputs the stripped name to `stdout` instead of putting it in the variable `strippedName`.
+
+- PHP now scans for extra configuration .ini files in /etc/php.d instead of /etc. This prevents accidentally loading non-PHP .ini files that may be in /etc.
+
+- Two lone top-level dict dbs moved into `dictdDBs`. This affects: `dictdWordnet` which is now at `dictdDBs.wordnet` and `dictdWiktionary` which is now at `dictdDBs.wiktionary`
+
+- Parsoid service now uses YAML configuration format. `service.parsoid.interwikis` is now called `service.parsoid.wikis` and is a list of either API URLs or attribute sets as specified in parsoid's documentation.
+
+- `Ntpd` was replaced by `systemd-timesyncd` as the default service to synchronize system time with a remote NTP server. The old behavior can be restored by setting `services.ntp.enable` to `true`. Upstream time servers for all NTP implementations are now configured using `networking.timeServers`.
+
+- `service.nylon` is now declared using named instances. As an example:
+
+  ```nix
+  {
+    services.nylon = {
+      enable = true;
+      acceptInterface = "br0";
+      bindInterface = "tun1";
+      port = 5912;
+    };
+  }
+  ```
+
+  should be replaced with:
+
+  ```nix
+  {
+    services.nylon.myvpn = {
+      enable = true;
+      acceptInterface = "br0";
+      bindInterface = "tun1";
+      port = 5912;
+    };
+  }
+  ```
+
+  this enables you to declare a SOCKS proxy for each uplink.
+
+- `overridePackages` function no longer exists. It is replaced by [ overlays](https://nixos.org/nixpkgs/manual/#sec-overlays-install). For example, the following code:
+
+  ```nix
+  let
+    pkgs = import <nixpkgs> {};
+  in
+    pkgs.overridePackages (self: super: ...)
+  ```
+
+  should be replaced by:
+
+  ```nix
+  let
+    pkgs = import <nixpkgs> {};
+  in
+    import pkgs.path { overlays = [(self: super: ...)]; }
+  ```
+
+- Autoloading connection tracking helpers is now disabled by default. This default was also changed in the Linux kernel and is considered insecure if not configured properly in your firewall. If you need connection tracking helpers (i.e. for active FTP) please enable `networking.firewall.autoLoadConntrackHelpers` and tune `networking.firewall.connectionTrackingModules` to suit your needs.
+
+- `local_recipient_maps` is not set to empty value by Postfix service. It's an insecure default as stated by Postfix documentation. Those who want to retain this setting need to set it via `services.postfix.extraConfig`.
+
+- Iputils no longer provide ping6 and traceroute6. The functionality of these tools has been integrated into ping and traceroute respectively. To enforce an address family the new flags `-4` and `-6` have been added. One notable incompatibility is that specifying an interface (for link-local IPv6 for instance) is no longer done with the `-I` flag, but by encoding the interface into the address (`ping fe80::1%eth0`).
+
+- The socket handling of the `services.rmilter` module has been fixed and refactored. As rmilter doesn't support binding to more than one socket, the options `bindUnixSockets` and `bindInetSockets` have been replaced by `services.rmilter.bindSocket.*`. The default is still a unix socket in `/run/rmilter/rmilter.sock`. Refer to the options documentation for more information.
+
+- The `fetch*` functions no longer support md5, please use sha256 instead.
+
+- The dnscrypt-proxy module interface has been streamlined around the `extraArgs` option. Where possible, legacy option declarations are mapped to `extraArgs` but will emit warnings. The `resolverList` has been outright removed: to use an unlisted resolver, use the `customResolver` option.
+
+- torbrowser now stores local state under `~/.local/share/tor-browser` by default. Any browser profile data from the old location, `~/.torbrowser4`, must be migrated manually.
+
+- The ihaskell, monetdb, offlineimap and sitecopy services have been removed.
+
+## Other Notable Changes {#sec-release-17.03-notable-changes}
+
+- Module type system have a new extensible option types feature that allow to extend certain types, such as enum, through multiple option declarations of the same option across multiple modules.
+
+- `jre` now defaults to GTK UI by default. This improves visual consistency and makes Java follow system font style, improving the situation on HighDPI displays. This has a cost of increased closure size; for server and other headless workloads it's recommended to use `jre_headless`.
+
+- Python 2.6 interpreter and package set have been removed.
+
+- The Python 2.7 interpreter does not use modules anymore. Instead, all CPython interpreters now include the whole standard library except for \`tkinter\`, which is available in the Python package set.
+
+- Python 2.7, 3.5 and 3.6 are now built deterministically and 3.4 mostly. Minor modifications had to be made to the interpreters in order to generate deterministic bytecode. This has security implications and is relevant for those using Python in a `nix-shell`. See the Nixpkgs manual for details.
+
+- The Python package sets now use a fixed-point combinator and the sets are available as attributes of the interpreters.
+
+- The Python function `buildPythonPackage` has been improved and can be used to build from Setuptools source, Flit source, and precompiled Wheels.
+
+- When adding new or updating current Python libraries, the expressions should be put in separate files in `pkgs/development/python-modules` and called from `python-packages.nix`.
+
+- The dnscrypt-proxy service supports synchronizing the list of public resolvers without working DNS resolution. This fixes issues caused by the resolver list becoming outdated. It also improves the viability of DNSCrypt only configurations.
+
+- Containers using bridged networking no longer lose their connection after changes to the host networking.
+
+- ZFS supports pool auto scrubbing.
+
+- The bind DNS utilities (e.g. dig) have been split into their own output and are now also available in `pkgs.dnsutils` and it is no longer necessary to pull in all of `bind` to use them.
+
+- Per-user configuration was moved from `~/.nixpkgs` to `~/.config/nixpkgs`. The former is still valid for `config.nix` for backwards compatibility.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1709.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1709.section.md
new file mode 100644
index 000000000000..f2ff8b46b83f
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1709.section.md
@@ -0,0 +1,316 @@
+# Release 17.09 ("Hummingbird", 2017/09/??) {#sec-release-17.09}
+
+## Highlights {#sec-release-17.09-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- The GNOME version is now 3.24. KDE Plasma was upgraded to 5.10, KDE Applications to 17.08.1 and KDE Frameworks to 5.37.
+
+- The user handling now keeps track of deallocated UIDs/GIDs. When a user or group is revived, this allows it to be allocated the UID/GID it had before. A consequence is that UIDs and GIDs are no longer reused.
+
+- The module option `services.xserver.xrandrHeads` now causes the first head specified in this list to be set as the primary head. Apart from that, it's now possible to also set additional options by using an attribute set, for example:
+
+  ```nix
+  { services.xserver.xrandrHeads = [
+      "HDMI-0"
+      {
+        output = "DVI-0";
+        primary = true;
+        monitorConfig = ''
+          Option "Rotate" "right"
+        '';
+      }
+    ];
+  }
+  ```
+
+  This will set the `DVI-0` output to be the primary head, even though `HDMI-0` is the first head in the list.
+
+- The handling of SSL in the `services.nginx` module has been cleaned up, renaming the misnamed `enableSSL` to `onlySSL` which reflects its original intention. This is not to be used with the already existing `forceSSL` which creates a second non-SSL virtual host redirecting to the SSL virtual host. This by chance had worked earlier due to specific implementation details. In case you had specified both please remove the `enableSSL` option to keep the previous behaviour.
+
+  Another `addSSL` option has been introduced to configure both a non-SSL virtual host and an SSL virtual host with the same configuration.
+
+  Options to configure `resolver` options and `upstream` blocks have been introduced. See their information for further details.
+
+  The `port` option has been replaced by a more generic `listen` option which makes it possible to specify multiple addresses, ports and SSL configs dependant on the new SSL handling mentioned above.
+
+## New Services {#sec-release-17.09-new-services}
+
+The following new services were added since the last release:
+
+- `config/fonts/fontconfig-penultimate.nix`
+
+- `config/fonts/fontconfig-ultimate.nix`
+
+- `config/terminfo.nix`
+
+- `hardware/sensor/iio.nix`
+
+- `hardware/nitrokey.nix`
+
+- `hardware/raid/hpsa.nix`
+
+- `programs/browserpass.nix`
+
+- `programs/gnupg.nix`
+
+- `programs/qt5ct.nix`
+
+- `programs/slock.nix`
+
+- `programs/thefuck.nix`
+
+- `security/auditd.nix`
+
+- `security/lock-kernel-modules.nix`
+
+- `service-managers/docker.nix`
+
+- `service-managers/trivial.nix`
+
+- `services/admin/salt/master.nix`
+
+- `services/admin/salt/minion.nix`
+
+- `services/audio/slimserver.nix`
+
+- `services/cluster/kubernetes/default.nix`
+
+- `services/cluster/kubernetes/dns.nix`
+
+- `services/cluster/kubernetes/dashboard.nix`
+
+- `services/continuous-integration/hail.nix`
+
+- `services/databases/clickhouse.nix`
+
+- `services/databases/postage.nix`
+
+- `services/desktops/gnome3/gnome-disks.nix`
+
+- `services/desktops/gnome3/gpaste.nix`
+
+- `services/logging/SystemdJournal2Gelf.nix`
+
+- `services/logging/heartbeat.nix`
+
+- `services/logging/journalwatch.nix`
+
+- `services/logging/syslogd.nix`
+
+- `services/mail/mailhog.nix`
+
+- `services/mail/nullmailer.nix`
+
+- `services/misc/airsonic.nix`
+
+- `services/misc/autorandr.nix`
+
+- `services/misc/exhibitor.nix`
+
+- `services/misc/fstrim.nix`
+
+- `services/misc/gollum.nix`
+
+- `services/misc/irkerd.nix`
+
+- `services/misc/jackett.nix`
+
+- `services/misc/radarr.nix`
+
+- `services/misc/snapper.nix`
+
+- `services/monitoring/osquery.nix`
+
+- `services/monitoring/prometheus/collectd-exporter.nix`
+
+- `services/monitoring/prometheus/fritzbox-exporter.nix`
+
+- `services/network-filesystems/kbfs.nix`
+
+- `services/networking/dnscache.nix`
+
+- `services/networking/fireqos.nix`
+
+- `services/networking/iwd.nix`
+
+- `services/networking/keepalived/default.nix`
+
+- `services/networking/keybase.nix`
+
+- `services/networking/lldpd.nix`
+
+- `services/networking/matterbridge.nix`
+
+- `services/networking/squid.nix`
+
+- `services/networking/tinydns.nix`
+
+- `services/networking/xrdp.nix`
+
+- `services/security/shibboleth-sp.nix`
+
+- `services/security/sks.nix`
+
+- `services/security/sshguard.nix`
+
+- `services/security/torify.nix`
+
+- `services/security/usbguard.nix`
+
+- `services/security/vault.nix`
+
+- `services/system/earlyoom.nix`
+
+- `services/system/saslauthd.nix`
+
+- `services/web-apps/nexus.nix`
+
+- `services/web-apps/pgpkeyserver-lite.nix`
+
+- `services/web-apps/piwik.nix`
+
+- `services/web-servers/lighttpd/collectd.nix`
+
+- `services/web-servers/minio.nix`
+
+- `services/x11/display-managers/xpra.nix`
+
+- `services/x11/xautolock.nix`
+
+- `tasks/filesystems/bcachefs.nix`
+
+- `tasks/powertop.nix`
+
+## Backward Incompatibilities {#sec-release-17.09-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- **In an Qemu-based virtualization environment, the network interface names changed from i.e. `enp0s3` to `ens3`.**
+
+  This is due to a kernel configuration change. The new naming is consistent with those of other Linux distributions with systemd. See [\#29197](https://github.com/NixOS/nixpkgs/issues/29197) for more information.
+
+  A machine is affected if the `virt-what` tool either returns `qemu` or `kvm` _and_ has interface names used in any part of its NixOS configuration, in particular if a static network configuration with `networking.interfaces` is used.
+
+  Before rebooting affected machines, please ensure:
+
+  - Change the interface names in your NixOS configuration. The first interface will be called `ens3`, the second one `ens8` and starting from there incremented by 1.
+
+  - After changing the interface names, rebuild your system with `nixos-rebuild boot` to activate the new configuration after a reboot. If you switch to the new configuration right away you might lose network connectivity! If using `nixops`, deploy with `nixops deploy --force-reboot`.
+
+- The following changes apply if the `stateVersion` is changed to 17.09 or higher. For `stateVersion = "17.03"` or lower the old behavior is preserved.
+
+  - The `postgres` default version was changed from 9.5 to 9.6.
+
+  - The `postgres` superuser name has changed from `root` to `postgres` to more closely follow what other Linux distributions are doing.
+
+  - The `postgres` default `dataDir` has changed from `/var/db/postgres` to `/var/lib/postgresql/$psqlSchema` where \$psqlSchema is 9.6 for example.
+
+  - The `mysql` default `dataDir` has changed from `/var/mysql` to `/var/lib/mysql`.
+
+  - Radicale's default package has changed from 1.x to 2.x. Instructions to migrate can be found [ here ](http://radicale.org/1to2/). It is also possible to use the newer version by setting the `package` to `radicale2`, which is done automatically when `stateVersion` is 17.09 or higher. The `extraArgs` option has been added to allow passing the data migration arguments specified in the instructions; see the `radicale.nix` NixOS test for an example migration.
+
+- The `aiccu` package was removed. This is due to SixXS [ sunsetting](https://www.sixxs.net/main/) its IPv6 tunnel.
+
+- The `fanctl` package and `fan` module have been removed due to the developers not upstreaming their iproute2 patches and lagging with compatibility to recent iproute2 versions.
+
+- Top-level `idea` package collection was renamed. All JetBrains IDEs are now at `jetbrains`.
+
+- `flexget`'s state database cannot be upgraded to its new internal format, requiring removal of any existing `db-config.sqlite` which will be automatically recreated.
+
+- The `ipfs` service now doesn't ignore the `dataDir` option anymore. If you've ever set this option to anything other than the default you'll have to either unset it (so the default gets used) or migrate the old data manually with
+
+  ```ShellSession
+  dataDir=<valueOfDataDir>
+  mv /var/lib/ipfs/.ipfs/* $dataDir
+  rmdir /var/lib/ipfs/.ipfs
+  ```
+
+- The `caddy` service was previously using an extra `.caddy` directory in the data directory specified with the `dataDir` option. The contents of the `.caddy` directory are now expected to be in the `dataDir`.
+
+- The `ssh-agent` user service is not started by default anymore. Use `programs.ssh.startAgent` to enable it if needed. There is also a new `programs.gnupg.agent` module that creates a `gpg-agent` user service. It can also serve as a SSH agent if `enableSSHSupport` is set.
+
+- The `services.tinc.networks.<name>.listenAddress` option had a misleading name that did not correspond to its behavior. It now correctly defines the ip to listen for incoming connections on. To keep the previous behaviour, use `services.tinc.networks.<name>.bindToAddress` instead. Refer to the description of the options for more details.
+
+- `tlsdate` package and module were removed. This is due to the project being dead and not building with openssl 1.1.
+
+- `wvdial` package and module were removed. This is due to the project being dead and not building with openssl 1.1.
+
+- `cc-wrapper`'s setup-hook now exports a number of environment variables corresponding to binutils binaries, (e.g. `LD`, `STRIP`, `RANLIB`, etc). This is done to prevent packages' build systems guessing, which is harder to predict, especially when cross-compiling. However, some packages have broken due to this---their build systems either not supporting, or claiming to support without adequate testing, taking such environment variables as parameters.
+
+- `services.firefox.syncserver` now runs by default as a non-root user. To accommodate this change, the default sqlite database location has also been changed. Migration should work automatically. Refer to the description of the options for more details.
+
+- The `compiz` window manager and package was removed. The system support had been broken for several years.
+
+- Touchpad support should now be enabled through `libinput` as `synaptics` is now deprecated. See the option `services.xserver.libinput.enable`.
+
+- grsecurity/PaX support has been dropped, following upstream's decision to cease free support. See [ upstream's announcement](https://grsecurity.net/passing_the_baton.php) for more information. No complete replacement for grsecurity/PaX is available presently.
+
+- `services.mysql` now has declarative configuration of databases and users with the `ensureDatabases` and `ensureUsers` options.
+
+  These options will never delete existing databases and users, especially not when the value of the options are changed.
+
+  The MySQL users will be identified using [ Unix socket authentication](https://mariadb.com/kb/en/library/authentication-plugin-unix-socket/). This authenticates the Unix user with the same name only, and that without the need for a password.
+
+  If you have previously created a MySQL `root` user _with a password_, you will need to add `root` user for unix socket authentication before using the new options. This can be done by running the following SQL script:
+
+  ```SQL
+  CREATE USER 'root'@'%' IDENTIFIED BY '';
+  GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;
+  FLUSH PRIVILEGES;
+
+  -- Optionally, delete the password-authenticated user:
+  -- DROP USER 'root'@'localhost';
+  ```
+
+- `services.mysqlBackup` now works by default without any user setup, including for users other than `mysql`.
+
+  By default, the `mysql` user is no longer the user which performs the backup. Instead a system account `mysqlbackup` is used.
+
+  The `mysqlBackup` service is also now using systemd timers instead of `cron`.
+
+  Therefore, the `services.mysqlBackup.period` option no longer exists, and has been replaced with `services.mysqlBackup.calendar`, which is in the format of [systemd.time(7)](https://www.freedesktop.org/software/systemd/man/systemd.time.html#Calendar%20Events).
+
+  If you expect to be sent an e-mail when the backup fails, consider using a script which monitors the systemd journal for errors. Regretfully, at present there is no built-in functionality for this.
+
+  You can check that backups still work by running `systemctl start mysql-backup` then `systemctl status mysql-backup`.
+
+- Templated systemd services e.g `container@name` are now handled correctly when switching to a new configuration, resulting in them being reloaded.
+
+- Steam: the `newStdcpp` parameter was removed and should not be needed anymore.
+
+- Redis has been updated to version 4 which mandates a cluster mass-restart, due to changes in the network handling, in order to ensure compatibility with networks NATing traffic.
+
+## Other Notable Changes {#sec-release-17.09-notable-changes}
+
+- Modules can now be disabled by using [ disabledModules](https://nixos.org/nixpkgs/manual/#sec-replace-modules), allowing another to take it's place. This can be used to import a set of modules from another channel while keeping the rest of the system on a stable release.
+
+- Updated to FreeType 2.7.1, including a new TrueType engine. The new engine replaces the Infinality engine which was the default in NixOS. The default font rendering settings are now provided by fontconfig-penultimate, replacing fontconfig-ultimate; the new defaults are less invasive and provide rendering that is more consistent with other systems and hopefully with each font designer's intent. Some system-wide configuration has been removed from the Fontconfig NixOS module where user Fontconfig settings are available.
+
+- ZFS/SPL have been updated to 0.7.0, `zfsUnstable, splUnstable` have therefore been removed.
+
+- The `time.timeZone` option now allows the value `null` in addition to timezone strings. This value allows changing the timezone of a system imperatively using `timedatectl set-timezone`. The default timezone is still UTC.
+
+- Nixpkgs overlays may now be specified with a file as well as a directory. The value of `<nixpkgs-overlays>` may be a file, and `~/.config/nixpkgs/overlays.nix` can be used instead of the `~/.config/nixpkgs/overlays` directory.
+
+  See the overlays chapter of the Nixpkgs manual for more details.
+
+- Definitions for `/etc/hosts` can now be specified declaratively with `networking.hosts`.
+
+- Two new options have been added to the installer loader, in addition to the default having changed. The kernel log verbosity has been lowered to the upstream default for the default options, in order to not spam the console when e.g. joining a network.
+
+  This therefore leads to adding a new `debug` option to set the log level to the previous verbose mode, to make debugging easier, but still accessible easily.
+
+  Additionally a `copytoram` option has been added, which makes it possible to remove the install medium after booting. This allows tethering from your phone after booting from it.
+
+- `services.gitlab-runner.configOptions` has been added to specify the configuration of gitlab-runners declaratively.
+
+- `services.jenkins.plugins` has been added to install plugins easily, this can be generated with jenkinsPlugins2nix.
+
+- `services.postfix.config` has been added to specify the main.cf with NixOS options. Additionally other options have been added to the postfix module and has been improved further.
+
+- The GitLab package and module have been updated to the latest 10.0 release.
+
+- The `systemd-boot` boot loader now lists the NixOS version, kernel version and build date of all bootable generations.
+
+- The dnscrypt-proxy service now defaults to using a random upstream resolver, selected from the list of public non-logging resolvers with DNSSEC support. Existing configurations can be migrated to this mode of operation by omitting the `services.dnscrypt-proxy.resolverName` option or setting it to `"random"`.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1803.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1803.section.md
new file mode 100644
index 000000000000..ecf5757bae6c
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1803.section.md
@@ -0,0 +1,286 @@
+# Release 18.03 ("Impala", 2018/04/04) {#sec-release-18.03}
+
+## Highlights {#sec-release-18.03-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- End of support is planned for end of October 2018, handing over to 18.09.
+
+- Platform support: x86_64-linux and x86_64-darwin since release time (the latter isn't NixOS, really). Binaries for aarch64-linux are available, but no channel exists yet, as it's waiting for some test fixes, etc.
+
+- Nix now defaults to 2.0; see its [release notes](https://nixos.org/nix/manual/#ssec-relnotes-2.0).
+
+- Core version changes: linux: 4.9 -\> 4.14, glibc: 2.25 -\> 2.26, gcc: 6 -\> 7, systemd: 234 -\> 237.
+
+- Desktop version changes: gnome: 3.24 -\> 3.26, (KDE) plasma-desktop: 5.10 -\> 5.12.
+
+- MariaDB 10.2, updated from 10.1, is now the default MySQL implementation. While upgrading a few changes have been made to the infrastructure involved:
+
+  - `libmysql` has been deprecated, please use `mysql.connector-c` instead, a compatibility passthru has been added to the MySQL packages.
+
+  - The `mysql57` package has a new `static` output containing the static libraries including `libmysqld.a`
+
+- PHP now defaults to PHP 7.2, updated from 7.1.
+
+## New Services {#sec-release-18.03-new-services}
+
+The following new services were added since the last release:
+
+- `./config/krb5/default.nix`
+
+- `./hardware/digitalbitbox.nix`
+
+- `./misc/label.nix`
+
+- `./programs/ccache.nix`
+
+- `./programs/criu.nix`
+
+- `./programs/digitalbitbox/default.nix`
+
+- `./programs/less.nix`
+
+- `./programs/npm.nix`
+
+- `./programs/plotinus.nix`
+
+- `./programs/rootston.nix`
+
+- `./programs/systemtap.nix`
+
+- `./programs/sway.nix`
+
+- `./programs/udevil.nix`
+
+- `./programs/way-cooler.nix`
+
+- `./programs/yabar.nix`
+
+- `./programs/zsh/zsh-autoenv.nix`
+
+- `./services/backup/borgbackup.nix`
+
+- `./services/backup/crashplan-small-business.nix`
+
+- `./services/desktops/dleyna-renderer.nix`
+
+- `./services/desktops/dleyna-server.nix`
+
+- `./services/desktops/pipewire.nix`
+
+- `./services/desktops/gnome3/chrome-gnome-shell.nix`
+
+- `./services/desktops/gnome3/tracker-miners.nix`
+
+- `./services/hardware/fwupd.nix`
+
+- `./services/hardware/interception-tools.nix`
+
+- `./services/hardware/u2f.nix`
+
+- `./services/hardware/usbmuxd.nix`
+
+- `./services/mail/clamsmtp.nix`
+
+- `./services/mail/dkimproxy-out.nix`
+
+- `./services/mail/pfix-srsd.nix`
+
+- `./services/misc/gitea.nix`
+
+- `./services/misc/home-assistant.nix`
+
+- `./services/misc/ihaskell.nix`
+
+- `./services/misc/logkeys.nix`
+
+- `./services/misc/novacomd.nix`
+
+- `./services/misc/osrm.nix`
+
+- `./services/misc/plexpy.nix`
+
+- `./services/misc/pykms.nix`
+
+- `./services/misc/tzupdate.nix`
+
+- `./services/monitoring/fusion-inventory.nix`
+
+- `./services/monitoring/prometheus/exporters.nix`
+
+- `./services/network-filesystems/beegfs.nix`
+
+- `./services/network-filesystems/davfs2.nix`
+
+- `./services/network-filesystems/openafs/client.nix`
+
+- `./services/network-filesystems/openafs/server.nix`
+
+- `./services/network-filesystems/ceph.nix`
+
+- `./services/networking/aria2.nix`
+
+- `./services/networking/monero.nix`
+
+- `./services/networking/nghttpx/default.nix`
+
+- `./services/networking/nixops-dns.nix`
+
+- `./services/networking/rxe.nix`
+
+- `./services/networking/stunnel.nix`
+
+- `./services/web-apps/matomo.nix`
+
+- `./services/web-apps/restya-board.nix`
+
+- `./services/web-servers/mighttpd2.nix`
+
+- `./services/x11/fractalart.nix`
+
+- `./system/boot/binfmt.nix`
+
+- `./system/boot/grow-partition.nix`
+
+- `./tasks/filesystems/ecryptfs.nix`
+
+- `./virtualisation/hyperv-guest.nix`
+
+## Backward Incompatibilities {#sec-release-18.03-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- `sound.enable` now defaults to false.
+
+- Dollar signs in options under `services.postfix` are passed verbatim to Postfix, which will interpret them as the beginning of a parameter expression. This was already true for string-valued options in the previous release, but not for list-valued options. If you need to pass literal dollar signs through Postfix, double them.
+
+- The `postage` package (for web-based PostgreSQL administration) has been renamed to `pgmanage`. The corresponding module has also been renamed. To migrate please rename all `services.postage` options to `services.pgmanage`.
+
+- Package attributes starting with a digit have been prefixed with an underscore sign. This is to avoid quoting in the configuration and other issues with command-line tools like `nix-env`. The change affects the following packages:
+
+  - `2048-in-terminal` → `_2048-in-terminal`
+
+  - `90secondportraits` → `_90secondportraits`
+
+  - `2bwm` → `_2bwm`
+
+  - `389-ds-base` → `_389-ds-base`
+
+- **The OpenSSH service no longer enables support for DSA keys by default, which could cause a system lock out. Update your keys or, unfavorably, re-enable DSA support manually.**
+
+  DSA support was [deprecated in OpenSSH 7.0](https://www.openssh.com/legacy.html), due to it being too weak. To re-enable support, add `PubkeyAcceptedKeyTypes +ssh-dss` to the end of your `services.openssh.extraConfig`.
+
+  After updating the keys to be stronger, anyone still on a pre-17.03 version is safe to jump to 17.03, as vetted [here](https://search.nix.gsc.io/?q=stateVersion).
+
+- The `openssh` package now includes Kerberos support by default; the `openssh_with_kerberos` package is now a deprecated alias. If you do not want Kerberos support, you can do `openssh.override { withKerberos = false; }`. Note, this also applies to the `openssh_hpn` package.
+
+- `cc-wrapper` has been split in two; there is now also a `bintools-wrapper`. The most commonly used files in `nix-support` are now split between the two wrappers. Some commonly used ones, like `nix-support/dynamic-linker`, are duplicated for backwards compatibility, even though they rightly belong only in `bintools-wrapper`. Other more obscure ones are just moved.
+
+- The propagation logic has been changed. The new logic, along with new types of dependencies that go with, is thoroughly documented in the "Specifying dependencies" section of the "Standard Environment" chapter of the nixpkgs manual. The old logic isn't but is easy to describe: dependencies were propagated as the same type of dependency no matter what. In practice, that means that many `propagatedNativeBuildInputs` should instead be `propagatedBuildInputs`. Thankfully, that was and is the least used type of dependency. Also, it means that some `propagatedBuildInputs` should instead be `depsTargetTargetPropagated`. Other types dependencies should be unaffected.
+
+- `lib.addPassthru drv passthru` is removed. Use `lib.extendDerivation true passthru drv` instead.
+
+- The `memcached` service no longer accept dynamic socket paths via `services.memcached.socket`. Unix sockets can be still enabled by `services.memcached.enableUnixSocket` and will be accessible at `/run/memcached/memcached.sock`.
+
+- The `hardware.amdHybridGraphics.disable` option was removed for lack of a maintainer. If you still need this module, you may wish to include a copy of it from an older version of nixos in your imports.
+
+- The merging of config options for `services.postfix.config` was buggy. Previously, if other options in the Postfix module like `services.postfix.useSrs` were set and the user set config options that were also set by such options, the resulting config wouldn't include all options that were needed. They are now merged correctly. If config options need to be overridden, `lib.mkForce` or `lib.mkOverride` can be used.
+
+- The following changes apply if the `stateVersion` is changed to 18.03 or higher. For `stateVersion = "17.09"` or lower the old behavior is preserved.
+
+  - `matrix-synapse` uses postgresql by default instead of sqlite. Migration instructions can be found [ here ](https://github.com/matrix-org/synapse/blob/master/docs/postgres.rst#porting-from-sqlite).
+
+- The `jid` package has been removed, due to maintenance overhead of a go package having non-versioned dependencies.
+
+- When using `services.xserver.libinput` (enabled by default in GNOME), it now handles all input devices, not just touchpads. As a result, you might need to re-evaluate any custom Xorg configuration. In particular, `Option "XkbRules" "base"` may result in broken keyboard layout.
+
+- The `attic` package was removed. A maintained fork called [Borg](https://www.borgbackup.org/) should be used instead. Migration instructions can be found [here](http://borgbackup.readthedocs.io/en/stable/usage/upgrade.html#attic-and-borg-0-xx-to-borg-1-x).
+
+- The Piwik analytics software was renamed to Matomo:
+
+  - The package `pkgs.piwik` was renamed to `pkgs.matomo`.
+
+  - The service `services.piwik` was renamed to `services.matomo`.
+
+  - The data directory `/var/lib/piwik` was renamed to `/var/lib/matomo`. All files will be moved automatically on first startup, but you might need to adjust your backup scripts.
+
+  - The default `serverName` for the nginx configuration changed from `piwik.${config.networking.hostName}` to `matomo.${config.networking.hostName}.${config.networking.domain}` if `config.networking.domain` is set, `matomo.${config.networking.hostName}` if it is not set. If you change your `serverName`, remember you'll need to update the `trustedHosts[]` array in `/var/lib/matomo/config/config.ini.php` as well.
+
+  - The `piwik` user was renamed to `matomo`. The service will adjust ownership automatically for files in the data directory. If you use unix socket authentication, remember to give the new `matomo` user access to the database and to change the `username` to `matomo` in the `[database]` section of `/var/lib/matomo/config/config.ini.php`.
+
+  - If you named your database \`piwik\`, you might want to rename it to \`matomo\` to keep things clean, but this is neither enforced nor required.
+
+- `nodejs-4_x` is end-of-life. `nodejs-4_x`, `nodejs-slim-4_x` and `nodePackages_4_x` are removed.
+
+- The `pump.io` NixOS module was removed. It is now maintained as an [external module](https://github.com/rvl/pump.io-nixos).
+
+- The Prosody XMPP server has received a major update. The following modules were renamed:
+
+  - `services.prosody.modules.httpserver` is now `services.prosody.modules.http_files`
+
+  - `services.prosody.modules.console` is now `services.prosody.modules.admin_telnet`
+
+  Many new modules are now core modules, most notably `services.prosody.modules.carbons` and `services.prosody.modules.mam`.
+
+  The better-performing `libevent` backend is now enabled by default.
+
+  `withCommunityModules` now passes through the modules to `services.prosody.extraModules`. Use `withOnlyInstalledCommunityModules` for modules that should not be enabled directly, e.g `lib_ldap`.
+
+- All prometheus exporter modules are now defined as submodules. The exporters are configured using `services.prometheus.exporters`.
+
+## Other Notable Changes {#sec-release-18.03-notable-changes}
+
+- ZNC option `services.znc.mutable` now defaults to `true`. That means that old configuration is not overwritten by default when update to the znc options are made.
+
+- The option `networking.wireless.networks.<name>.auth` has been added for wireless networks with WPA-Enterprise authentication. There is also a new `extraConfig` option to directly configure `wpa_supplicant` and `hidden` to connect to hidden networks.
+
+- In the module `networking.interfaces.<name>` the following options have been removed:
+
+  - `ipAddress`
+
+  - `ipv6Address`
+
+  - `prefixLength`
+
+  - `ipv6PrefixLength`
+
+  - `subnetMask`
+
+  To assign static addresses to an interface the options `ipv4.addresses` and `ipv6.addresses` should be used instead. The options `ip4` and `ip6` have been renamed to `ipv4.addresses` `ipv6.addresses` respectively. The new options `ipv4.routes` and `ipv6.routes` have been added to set up static routing.
+
+- The option `services.logstash.listenAddress` is now `127.0.0.1` by default. Previously the default behaviour was to listen on all interfaces.
+
+- `services.btrfs.autoScrub` has been added, to periodically check btrfs filesystems for data corruption. If there's a correct copy available, it will automatically repair corrupted blocks.
+
+- `displayManager.lightdm.greeters.gtk.clock-format.` has been added, the clock format string (as expected by strftime, e.g. `%H:%M`) to use with the lightdm gtk greeter panel.
+
+  If set to null the default clock format is used.
+
+- `displayManager.lightdm.greeters.gtk.indicators` has been added, a list of allowed indicator modules to use with the lightdm gtk greeter panel.
+
+  Built-in indicators include `~a11y`, `~language`, `~session`, `~power`, `~clock`, `~host`, `~spacer`. Unity indicators can be represented by short name (e.g. `sound`, `power`), service file name, or absolute path.
+
+  If set to `null` the default indicators are used.
+
+  In order to have the previous default configuration add
+
+  ```nix
+  {
+    services.xserver.displayManager.lightdm.greeters.gtk.indicators = [
+      "~host" "~spacer"
+      "~clock" "~spacer"
+      "~session"
+      "~language"
+      "~a11y"
+      "~power"
+    ];
+  }
+  ```
+
+  to your `configuration.nix`.
+
+- The NixOS test driver supports user services declared by `systemd.user.services`. The methods `waitForUnit`, `getUnitInfo`, `startJob` and `stopJob` provide an optional `$user` argument for that purpose.
+
+- Enabling bash completion on NixOS, `programs.bash.enableCompletion`, will now also enable completion for the Nix command line tools by installing the [nix-bash-completions](https://github.com/hedning/nix-bash-completions) package.
+
+- The vim/kakoune plugin updater now reads from a CSV file: check `pkgs/applications/editors/vim/plugins/vim-plugin-names` out to see the new format
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1809.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1809.section.md
new file mode 100644
index 000000000000..71afc71d5a89
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1809.section.md
@@ -0,0 +1,332 @@
+# Release 18.09 ("Jellyfish", 2018/10/05) {#sec-release-18.09}
+
+## Highlights {#sec-release-18.09-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following notable updates:
+
+- End of support is planned for end of April 2019, handing over to 19.03.
+
+- Platform support: x86_64-linux and x86_64-darwin as always. Support for aarch64-linux is as with the previous releases, not equivalent to the x86-64-linux release, but with efforts to reach parity.
+
+- Nix has been updated to 2.1; see its [release notes](https://nixos.org/nix/manual/#ssec-relnotes-2.1).
+
+- Core versions: linux: 4.14 LTS (unchanged), glibc: 2.26 → 2.27, gcc: 7 (unchanged), systemd: 237 → 239.
+
+- Desktop version changes: gnome: 3.26 → 3.28, (KDE) plasma-desktop: 5.12 → 5.13.
+
+Notable changes and additions for 18.09 include:
+
+- Support for wrapping binaries using `firejail` has been added through `programs.firejail.wrappedBinaries`.
+
+  For example
+
+  ```nix
+  {
+    programs.firejail = {
+      enable = true;
+      wrappedBinaries = {
+        firefox = "${lib.getBin pkgs.firefox}/bin/firefox";
+        mpv = "${lib.getBin pkgs.mpv}/bin/mpv";
+      };
+    };
+  }
+  ```
+
+  This will place `firefox` and `mpv` binaries in the global path wrapped by firejail.
+
+- User channels are now in the default `NIX_PATH`, allowing users to use their personal `nix-channel` defined channels in `nix-build` and `nix-shell` commands, as well as in imports like `import <mychannel>`.
+
+  For example
+
+  ```ShellSession
+  $ nix-channel --add https://nixos.org/channels/nixpkgs-unstable nixpkgsunstable
+  $ nix-channel --update
+  $ nix-build '<nixpkgsunstable>' -A gitFull
+  $ nix run -f '<nixpkgsunstable>' gitFull
+  $ nix-instantiate -E '(import <nixpkgsunstable> {}).gitFull'
+  ```
+
+## New Services {#sec-release-18.09-new-services}
+
+A curated selection of new services that were added since the last release:
+
+- The `services.cassandra` module has been reworked and was rewritten from scratch. The service has succeeding tests for the versions 2.1, 2.2, 3.0 and 3.11 of [Apache Cassandra](https://cassandra.apache.org/).
+
+- There is a new `services.foundationdb` module for deploying [FoundationDB](https://www.foundationdb.org) clusters.
+
+- When enabled the `iproute2` will copy the files expected by ip route (e.g., `rt_tables`) in `/etc/iproute2`. This allows to write aliases for routing tables for instance.
+
+- `services.strongswan-swanctl` is a modern replacement for `services.strongswan`. You can use either one of them to setup IPsec VPNs but not both at the same time.
+
+  `services.strongswan-swanctl` uses the [swanctl](https://wiki.strongswan.org/projects/strongswan/wiki/swanctl) command which uses the modern [vici](https://github.com/strongswan/strongswan/blob/master/src/libcharon/plugins/vici/README.md) _Versatile IKE Configuration Interface_. The deprecated `ipsec` command used in `services.strongswan` is using the legacy [stroke configuration interface](https://github.com/strongswan/strongswan/blob/master/README_LEGACY.md).
+
+- The new `services.elasticsearch-curator` service periodically curates or manages, your Elasticsearch indices and snapshots.
+
+Every new services:
+
+- `./config/xdg/autostart.nix`
+
+- `./config/xdg/icons.nix`
+
+- `./config/xdg/menus.nix`
+
+- `./config/xdg/mime.nix`
+
+- `./hardware/brightnessctl.nix`
+
+- `./hardware/onlykey.nix`
+
+- `./hardware/video/uvcvideo/default.nix`
+
+- `./misc/documentation.nix`
+
+- `./programs/firejail.nix`
+
+- `./programs/iftop.nix`
+
+- `./programs/sedutil.nix`
+
+- `./programs/singularity.nix`
+
+- `./programs/xss-lock.nix`
+
+- `./programs/zsh/zsh-autosuggestions.nix`
+
+- `./services/admin/oxidized.nix`
+
+- `./services/backup/duplicati.nix`
+
+- `./services/backup/restic.nix`
+
+- `./services/backup/restic-rest-server.nix`
+
+- `./services/cluster/hadoop/default.nix`
+
+- `./services/databases/aerospike.nix`
+
+- `./services/databases/monetdb.nix`
+
+- `./services/desktops/bamf.nix`
+
+- `./services/desktops/flatpak.nix`
+
+- `./services/desktops/zeitgeist.nix`
+
+- `./services/development/bloop.nix`
+
+- `./services/development/jupyter/default.nix`
+
+- `./services/hardware/lcd.nix`
+
+- `./services/hardware/undervolt.nix`
+
+- `./services/misc/clipmenu.nix`
+
+- `./services/misc/gitweb.nix`
+
+- `./services/misc/serviio.nix`
+
+- `./services/misc/safeeyes.nix`
+
+- `./services/misc/sysprof.nix`
+
+- `./services/misc/weechat.nix`
+
+- `./services/monitoring/datadog-agent.nix`
+
+- `./services/monitoring/incron.nix`
+
+- `./services/networking/dnsdist.nix`
+
+- `./services/networking/freeradius.nix`
+
+- `./services/networking/hans.nix`
+
+- `./services/networking/morty.nix`
+
+- `./services/networking/ndppd.nix`
+
+- `./services/networking/ocserv.nix`
+
+- `./services/networking/owamp.nix`
+
+- `./services/networking/quagga.nix`
+
+- `./services/networking/shadowsocks.nix`
+
+- `./services/networking/stubby.nix`
+
+- `./services/networking/zeronet.nix`
+
+- `./services/security/certmgr.nix`
+
+- `./services/security/cfssl.nix`
+
+- `./services/security/oauth2_proxy_nginx.nix`
+
+- `./services/web-apps/virtlyst.nix`
+
+- `./services/web-apps/youtrack.nix`
+
+- `./services/web-servers/hitch/default.nix`
+
+- `./services/web-servers/hydron.nix`
+
+- `./services/web-servers/meguca.nix`
+
+- `./services/web-servers/nginx/gitweb.nix`
+
+- `./virtualisation/kvmgt.nix`
+
+- `./virtualisation/qemu-guest-agent.nix`
+
+## Backward Incompatibilities {#sec-release-18.09-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- Some licenses that were incorrectly not marked as unfree now are. This is the case for:
+
+  - cc-by-nc-sa-20: Creative Commons Attribution Non Commercial Share Alike 2.0
+
+  - cc-by-nc-sa-25: Creative Commons Attribution Non Commercial Share Alike 2.5
+
+  - cc-by-nc-sa-30: Creative Commons Attribution Non Commercial Share Alike 3.0
+
+  - cc-by-nc-sa-40: Creative Commons Attribution Non Commercial Share Alike 4.0
+
+  - cc-by-nd-30: Creative Commons Attribution-No Derivative Works v3.00
+
+  - msrla: Microsoft Research License Agreement
+
+- The deprecated `services.cassandra` module has seen a complete rewrite. (See above.)
+
+- `lib.strict` is removed. Use `builtins.seq` instead.
+
+- The `clementine` package points now to the free derivation. `clementineFree` is removed now and `clementineUnfree` points to the package which is bundled with the unfree `libspotify` package.
+
+- The `netcat` package is now taken directly from OpenBSD's `libressl`, instead of relying on Debian's fork. The new version should be very close to the old version, but there are some minor differences. Importantly, flags like -b, -q, -C, and -Z are no longer accepted by the nc command.
+
+- The `services.docker-registry.extraConfig` object doesn't contain environment variables anymore. Instead it needs to provide an object structure that can be mapped onto the YAML configuration defined in [the `docker/distribution` docs](https://github.com/docker/distribution/blob/v2.6.2/docs/configuration.md).
+
+- `gnucash` has changed from version 2.4 to 3.x. If you've been using `gnucash` (version 2.4) instead of `gnucash26` (version 2.6) you must open your Gnucash data file(s) with `gnucash26` and then save them to upgrade the file format. Then you may use your data file(s) with Gnucash 3.x. See the upgrade [documentation](https://wiki.gnucash.org/wiki/FAQ#Using_Different_Versions.2C_Up_And_Downgrade). Gnucash 2.4 is still available under the attribute `gnucash24`.
+
+- `services.munge` now runs as user (and group) `munge` instead of root. Make sure the key file is accessible to the daemon.
+
+- `dockerTools.buildImage` now uses `null` as default value for `tag`, which indicates that the nix output hash will be used as tag.
+
+- The ELK stack: `elasticsearch`, `logstash` and `kibana` has been upgraded from 2.\* to 6.3.\*. The 2.\* versions have been [unsupported since last year](https://www.elastic.co/support/eol) so they have been removed. You can still use the 5.\* versions under the names `elasticsearch5`, `logstash5` and `kibana5`.
+
+  The elastic beats: `filebeat`, `heartbeat`, `metricbeat` and `packetbeat` have had the same treatment: they now target 6.3.\* as well. The 5.\* versions are available under the names: `filebeat5`, `heartbeat5`, `metricbeat5` and `packetbeat5`
+
+  The ELK-6.3 stack now comes with [X-Pack by default](https://www.elastic.co/products/x-pack/open). Since X-Pack is licensed under the [Elastic License](https://github.com/elastic/elasticsearch/blob/master/licenses/ELASTIC-LICENSE.txt) the ELK packages now have an unfree license. To use them you need to specify `allowUnfree = true;` in your nixpkgs configuration.
+
+  Fortunately there is also a free variant of the ELK stack without X-Pack. The packages are available under the names: `elasticsearch-oss`, `logstash-oss` and `kibana-oss`.
+
+- Options `boot.initrd.luks.devices.name.yubikey.ramfsMountPoint` `boot.initrd.luks.devices.name.yubikey.storage.mountPoint` were removed. `luksroot.nix` module never supported more than one YubiKey at a time anyway, hence those options never had any effect. You should be able to remove them from your config without any issues.
+
+- `stdenv.system` and `system` in nixpkgs now refer to the host platform instead of the build platform. For native builds this is not change, let alone a breaking one. For cross builds, it is a breaking change, and `stdenv.buildPlatform.system` can be used instead for the old behavior. They should be using that anyways for clarity.
+
+- Groups `kvm` and `render` are introduced now, as systemd requires them.
+
+## Other Notable Changes {#sec-release-18.09-notable-changes}
+
+- `dockerTools.pullImage` relies on image digest instead of image tag to download the image. The `sha256` of a pulled image has to be updated.
+
+- `lib.attrNamesToStr` has been deprecated. Use more specific concatenation (`lib.concat(Map)StringsSep`) instead.
+
+- `lib.addErrorContextToAttrs` has been deprecated. Use `builtins.addErrorContext` directly.
+
+- `lib.showVal` has been deprecated. Use `lib.traceSeqN` instead.
+
+- `lib.traceXMLVal` has been deprecated. Use `lib.traceValFn builtins.toXml` instead.
+
+- `lib.traceXMLValMarked` has been deprecated. Use `lib.traceValFn (x: str + builtins.toXML x)` instead.
+
+- The `pkgs` argument to NixOS modules can now be set directly using `nixpkgs.pkgs`. Previously, only the `system`, `config` and `overlays` arguments could be used to influence `pkgs`.
+
+- A NixOS system can now be constructed more easily based on a preexisting invocation of Nixpkgs. For example:
+
+  ```nix
+  {
+    inherit (pkgs.nixos {
+      boot.loader.grub.enable = false;
+      fileSystems."/".device = "/dev/xvda1";
+    }) toplevel kernel initialRamdisk manual;
+  }
+  ```
+
+  This benefits evaluation performance, lets you write Nixpkgs packages that depend on NixOS images and is consistent with a deployment architecture that would be centered around Nixpkgs overlays.
+
+- `lib.traceValIfNot` has been deprecated. Use `if/then/else` and `lib.traceValSeq` instead.
+
+- `lib.traceCallXml` has been deprecated. Please complain if you use the function regularly.
+
+- The attribute `lib.nixpkgsVersion` has been deprecated in favor of `lib.version`. Please refer to the discussion in [NixOS/nixpkgs\#39416](https://github.com/NixOS/nixpkgs/pull/39416#discussion_r183845745) for further reference.
+
+- `lib.recursiveUpdateUntil` was not acting according to its specification. It has been fixed to act according to the docstring, and a test has been added.
+
+- The module for `security.dhparams` has two new options now:
+
+  `security.dhparams.stateless`
+
+  : Puts the generated Diffie-Hellman parameters into the Nix store instead of managing them in a stateful manner in `/var/lib/dhparams`.
+
+  `security.dhparams.defaultBitSize`
+
+  : The default bit size to use for the generated Diffie-Hellman parameters.
+
+  ::: {.note}
+  The path to the actual generated parameter files should now be queried using `config.security.dhparams.params.name.path` because it might be either in the Nix store or in a directory configured by `security.dhparams.path`.
+  :::
+
+  ::: {.note}
+  **For developers:**
+
+  Module implementers should not set a specific bit size in order to let users configure it by themselves if they want to have a different bit size than the default (2048).
+
+  An example usage of this would be:
+
+  ```nix
+  { config, ... }:
+
+  {
+    security.dhparams.params.myservice = {};
+    environment.etc."myservice.conf".text = ''
+      dhparams = ${config.security.dhparams.params.myservice.path}
+    '';
+  }
+  ```
+
+  :::
+
+- `networking.networkmanager.useDnsmasq` has been deprecated. Use `networking.networkmanager.dns` instead.
+
+- The Kubernetes package has been bumped to major version 1.11. Please consult the [release notes](https://github.com/kubernetes/kubernetes/blob/release-1.11/CHANGELOG-1.11.md) for details on new features and api changes.
+
+- The option `services.kubernetes.apiserver.admissionControl` was renamed to `services.kubernetes.apiserver.enableAdmissionPlugins`.
+
+- Recommended way to access the Kubernetes Dashboard is via HTTPS (TLS) Therefore; public service port for the dashboard has changed to 443 (container port 8443) and scheme to https.
+
+- The option `services.kubernetes.apiserver.address` was renamed to `services.kubernetes.apiserver.bindAddress`. Note that the default value has changed from 127.0.0.1 to 0.0.0.0.
+
+- The option `services.kubernetes.apiserver.publicAddress` was not used and thus has been removed.
+
+- The option `services.kubernetes.addons.dashboard.enableRBAC` was renamed to `services.kubernetes.addons.dashboard.rbac.enable`.
+
+- The Kubernetes Dashboard now has only minimal RBAC permissions by default. If dashboard cluster-admin rights are desired, set `services.kubernetes.addons.dashboard.rbac.clusterAdmin` to true. On existing clusters, in order for the revocation of privileges to take effect, the current ClusterRoleBinding for kubernetes-dashboard must be manually removed: `kubectl delete clusterrolebinding kubernetes-dashboard`
+
+- The `programs.screen` module provides allows to configure `/etc/screenrc`, however the module behaved fairly counterintuitive as the config exists, but the package wasn't available. Since 18.09 `pkgs.screen` will be added to `environment.systemPackages`.
+
+- The module `services.networking.hostapd` now uses WPA2 by default.
+
+- `s6Dns`, `s6Networking`, `s6LinuxUtils` and `s6PortableUtils` renamed to `s6-dns`, `s6-networking`, `s6-linux-utils` and `s6-portable-utils` respectively.
+
+- The module option `nix.useSandbox` is now defaulted to `true`.
+
+- The config activation script of `nixos-rebuild` now [reloads](https://www.freedesktop.org/software/systemd/man/systemctl.html#Manager%20Lifecycle%20Commands) all user units for each authenticated user.
+
+- The default display manager is now LightDM. To use SLiM set `services.xserver.displayManager.slim.enable` to `true`.
+
+- NixOS option descriptions are now automatically broken up into individual paragraphs if the text contains two consecutive newlines, so it's no longer necessary to use `</para><para>` to start a new paragraph.
+
+- Top-level `buildPlatform`, `hostPlatform`, and `targetPlatform` in Nixpkgs are deprecated. Please use their equivalents in `stdenv` instead: `stdenv.buildPlatform`, `stdenv.hostPlatform`, and `stdenv.targetPlatform`.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1903.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1903.section.md
new file mode 100644
index 000000000000..e83a3911a5cf
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1903.section.md
@@ -0,0 +1,214 @@
+# Release 19.03 ("Koi", 2019/04/11) {#sec-release-19.03}
+
+## Highlights {#sec-release-19.03-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- End of support is planned for end of October 2019, handing over to 19.09.
+
+- The default Python 3 interpreter is now CPython 3.7 instead of CPython 3.6.
+
+- Added the Pantheon desktop environment. It can be enabled through `services.xserver.desktopManager.pantheon.enable`.
+
+  ::: {.note}
+  By default, `services.xserver.desktopManager.pantheon` enables LightDM as a display manager, as pantheon's screen locking implementation relies on it.
+  Because of that it is recommended to leave LightDM enabled. If you'd like to disable it anyway, set `services.xserver.displayManager.lightdm.enable` to `false` and enable your preferred display manager.
+  :::
+
+  Also note that Pantheon's LightDM greeter is not enabled by default, because it has numerous issues in NixOS and isn't optimal for use here yet.
+
+- A major refactoring of the Kubernetes module has been completed. Refactorings primarily focus on decoupling components and enhancing security. Two-way TLS and RBAC has been enabled by default for all components, which slightly changes the way the module is configured. See: [](#sec-kubernetes) for details.
+
+- There is now a set of `confinement` options for `systemd.services`, which allows to restrict services into a chroot 2 ed environment that only contains the store paths from the runtime closure of the service.
+
+## New Services {#sec-release-19.03-new-services}
+
+The following new services were added since the last release:
+
+- `./programs/nm-applet.nix`
+
+- There is a new `security.googleOsLogin` module for using [OS Login](https://cloud.google.com/compute/docs/instances/managing-instance-access) to manage SSH access to Google Compute Engine instances, which supersedes the imperative and broken `google-accounts-daemon` used in `nixos/modules/virtualisation/google-compute-config.nix`.
+
+- `./services/misc/beanstalkd.nix`
+
+- There is a new `services.cockroachdb` module for running CockroachDB databases. NixOS now ships with CockroachDB 2.1.x as well, available on `x86_64-linux` and `aarch64-linux`.
+
+- `./security/duosec.nix`
+
+- The [PAM module for Duo Security](https://duo.com/docs/duounix) has been enabled for use. One can configure it using the `security.duosec` options along with the corresponding PAM option in `security.pam.services.<name?>.duoSecurity.enable`.
+
+## Backward Incompatibilities {#sec-release-19.03-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- The minimum version of Nix required to evaluate Nixpkgs is now 2.0.
+
+  - For users of NixOS 18.03 and 19.03, NixOS defaults to Nix 2.0, but supports using Nix 1.11 by setting `nix.package = pkgs.nix1;`. If this option is set to a Nix 1.11 package, you will need to either unset the option or upgrade it to Nix 2.0.
+
+  - For users of NixOS 17.09, you will first need to upgrade Nix by setting `nix.package = pkgs.nixStable2;` and run `nixos-rebuild switch` as the `root` user.
+
+  - For users of a daemon-less Nix installation on Linux or macOS, you can upgrade Nix by running `curl -L https://nixos.org/nix/install | sh`, or prior to doing a channel update, running `nix-env -iA nix`. If you have already run a channel update and Nix is no longer able to evaluate Nixpkgs, the error message printed should provide adequate directions for upgrading Nix.
+
+  - For users of the Nix daemon on macOS, you can upgrade Nix by running `sudo -i sh -c 'nix-channel --update && nix-env -iA nixpkgs.nix'; sudo launchctl stop org.nixos.nix-daemon; sudo launchctl start org.nixos.nix-daemon`.
+
+- The `buildPythonPackage` function now sets `strictDeps = true` to help distinguish between native and non-native dependencies in order to improve cross-compilation compatibility. Note however that this may break user expressions.
+
+- The `buildPythonPackage` function now sets `LANG = C.UTF-8` to enable Unicode support. The `glibcLocales` package is no longer needed as a build input.
+
+- The Syncthing state and configuration data has been moved from `services.syncthing.dataDir` to the newly defined `services.syncthing.configDir`, which default to `/var/lib/syncthing/.config/syncthing`. This change makes possible to share synced directories using ACLs without Syncthing resetting the permission on every start.
+
+- The `ntp` module now has sane default restrictions. If you're relying on the previous defaults, which permitted all queries and commands from all firewall-permitted sources, you can set `services.ntp.restrictDefault` and `services.ntp.restrictSource` to `[]`.
+
+- Package `rabbitmq_server` is renamed to `rabbitmq-server`.
+
+- The `light` module no longer uses setuid binaries, but udev rules. As a consequence users of that module have to belong to the `video` group in order to use the executable (i.e. `users.users.yourusername.extraGroups = ["video"];`).
+
+- Buildbot now supports Python 3 and its packages have been moved to `pythonPackages`. The options `services.buildbot-master.package` and `services.buildbot-worker.package` can be used to select the Python 2 or 3 version of the package.
+
+- Options `services.znc.confOptions.networks.name.userName` and `services.znc.confOptions.networks.name.modulePackages` were removed. They were never used for anything and can therefore safely be removed.
+
+- Package `wasm` has been renamed `proglodyte-wasm`. The package `wasm` will be pointed to `ocamlPackages.wasm` in 19.09, so make sure to update your configuration if you want to keep `proglodyte-wasm`
+
+- When the `nixpkgs.pkgs` option is set, NixOS will no longer ignore the `nixpkgs.overlays` option. The old behavior can be recovered by setting `nixpkgs.overlays = lib.mkForce [];`.
+
+- OpenSMTPD has been upgraded to version 6.4.0p1. This release makes backwards-incompatible changes to the configuration file format. See `man smtpd.conf` for more information on the new file format.
+
+- The versioned `postgresql` have been renamed to use underscore number separators. For example, `postgresql96` has been renamed to `postgresql_9_6`.
+
+- Package `consul-ui` and passthrough `consul.ui` have been removed. The package `consul` now uses upstream releases that vendor the UI into the binary. See [\#48714](https://github.com/NixOS/nixpkgs/pull/48714#issuecomment-433454834) for details.
+
+- Slurm introduces the new option `services.slurm.stateSaveLocation`, which is now set to `/var/spool/slurm` by default (instead of `/var/spool`). Make sure to move all files to the new directory or to set the option accordingly.
+
+  The slurmctld now runs as user `slurm` instead of `root`. If you want to keep slurmctld running as `root`, set `services.slurm.user = root`.
+
+  The options `services.slurm.nodeName` and `services.slurm.partitionName` are now sets of strings to correctly reflect that fact that each of these options can occur more than once in the configuration.
+
+- The `solr` package has been upgraded from 4.10.3 to 7.5.0 and has undergone some major changes. The `services.solr` module has been updated to reflect these changes. Please review http://lucene.apache.org/solr/ carefully before upgrading.
+
+- Package `ckb` is renamed to `ckb-next`, and options `hardware.ckb.*` are renamed to `hardware.ckb-next.*`.
+
+- The option `services.xserver.displayManager.job.logToFile` which was previously set to `true` when using the display managers `lightdm`, `sddm` or `xpra` has been reset to the default value (`false`).
+
+- Network interface indiscriminate NixOS firewall options (`networking.firewall.allow*`) are now preserved when also setting interface specific rules such as `networking.firewall.interfaces.en0.allow*`. These rules continue to use the pseudo device "default" (`networking.firewall.interfaces.default.*`), and assigning to this pseudo device will override the (`networking.firewall.allow*`) options.
+
+- The `nscd` service now disables all caching of `passwd` and `group` databases by default. This was interfering with the correct functioning of the `libnss_systemd.so` module which is used by `systemd` to manage uids and usernames in the presence of `DynamicUser=` in systemd services. This was already the default behaviour in presence of `services.sssd.enable = true` because nscd caching would interfere with `sssd` in unpredictable ways as well. Because we're using nscd not for caching, but for convincing glibc to find NSS modules in the nix store instead of an absolute path, we have decided to disable caching globally now, as it's usually not the behaviour the user wants and can lead to surprising behaviour. Furthermore, negative caching of host lookups is also disabled now by default. This should fix the issue of dns lookups failing in the presence of an unreliable network.
+
+  If the old behaviour is desired, this can be restored by setting the `services.nscd.config` option with the desired caching parameters.
+
+  ```nix
+  {
+    services.nscd.config =
+    ''
+    server-user             nscd
+    threads                 1
+    paranoia                no
+    debug-level             0
+
+    enable-cache            passwd          yes
+    positive-time-to-live   passwd          600
+    negative-time-to-live   passwd          20
+    suggested-size          passwd          211
+    check-files             passwd          yes
+    persistent              passwd          no
+    shared                  passwd          yes
+
+    enable-cache            group           yes
+    positive-time-to-live   group           3600
+    negative-time-to-live   group           60
+    suggested-size          group           211
+    check-files             group           yes
+    persistent              group           no
+    shared                  group           yes
+
+    enable-cache            hosts           yes
+    positive-time-to-live   hosts           600
+    negative-time-to-live   hosts           5
+    suggested-size          hosts           211
+    check-files             hosts           yes
+    persistent              hosts           no
+    shared                  hosts           yes
+    '';
+  }
+  ```
+
+  See [\#50316](https://github.com/NixOS/nixpkgs/pull/50316) for details.
+
+- GitLab Shell previously used the nix store paths for the `gitlab-shell` command in its `authorized_keys` file, which might stop working after garbage collection. To circumvent that, we regenerated that file on each startup. As `gitlab-shell` has now been changed to use `/var/run/current-system/sw/bin/gitlab-shell`, this is not necessary anymore, but there might be leftover lines with a nix store path. Regenerate the `authorized_keys` file via `sudo -u git -H gitlab-rake gitlab:shell:setup` in that case.
+
+- The `pam_unix` account module is now loaded with its control field set to `required` instead of `sufficient`, so that later PAM account modules that might do more extensive checks are being executed. Previously, the whole account module verification was exited prematurely in case a nss module provided the account name to `pam_unix`. The LDAP and SSSD NixOS modules already add their NSS modules when enabled. In case your setup breaks due to some later PAM account module previously shadowed, or failing NSS lookups, please file a bug. You can get back the old behaviour by manually setting `security.pam.services.<name?>.text`.
+
+- The `pam_unix` password module is now loaded with its control field set to `sufficient` instead of `required`, so that password managed only by later PAM password modules are being executed. Previously, for example, changing an LDAP account's password through PAM was not possible: the whole password module verification was exited prematurely by `pam_unix`, preventing `pam_ldap` to manage the password as it should.
+
+- `fish` has been upgraded to 3.0. It comes with a number of improvements and backwards incompatible changes. See the `fish` [release notes](https://github.com/fish-shell/fish-shell/releases/tag/3.0.0) for more information.
+
+- The ibus-table input method has had a change in config format, which causes all previous settings to be lost. See [this commit message](https://github.com/mike-fabian/ibus-table/commit/f9195f877c5212fef0dfa446acb328c45ba5852b) for details.
+
+- NixOS module system type `types.optionSet` and `lib.mkOption` argument `options` are deprecated. Use `types.submodule` instead. ([\#54637](https://github.com/NixOS/nixpkgs/pull/54637))
+
+- `matrix-synapse` has been updated to version 0.99. It will [no longer generate a self-signed certificate on first launch](https://github.com/matrix-org/synapse/pull/4509) and will be [the last version to accept self-signed certificates](https://matrix.org/blog/2019/02/05/synapse-0-99-0/). As such, it is now recommended to use a proper certificate verified by a root CA (for example Let's Encrypt). The new [manual chapter on Matrix](#module-services-matrix) contains a working example of using nginx as a reverse proxy in front of `matrix-synapse`, using Let's Encrypt certificates.
+
+- `mailutils` now works by default when `sendmail` is not in a setuid wrapper. As a consequence, the `sendmailPath` argument, having lost its main use, has been removed.
+
+- `graylog` has been upgraded from version 2.\* to 3.\*. Some setups making use of extraConfig (especially those exposing Graylog via reverse proxies) need to be updated as upstream removed/replaced some settings. See [Upgrading Graylog](http://docs.graylog.org/en/3.0/pages/upgrade/graylog-3.0.html#simplified-http-interface-configuration) for details.
+
+- The option `users.ldap.bind.password` was renamed to `users.ldap.bind.passwordFile`, and needs to be readable by the `nslcd` user. Same applies to the new `users.ldap.daemon.rootpwmodpwFile` option.
+
+- `nodejs-6_x` is end-of-life. `nodejs-6_x`, `nodejs-slim-6_x` and `nodePackages_6_x` are removed.
+
+## Other Notable Changes {#sec-release-19.03-notable-changes}
+
+- The `services.matomo` module gained the option `services.matomo.package` which determines the used Matomo version.
+
+  The Matomo module now also comes with the systemd service `matomo-archive-processing.service` and a timer that automatically triggers archive processing every hour. This means that you can safely [ disable browser triggers for Matomo archiving ](https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour) at `Administration > System > General Settings`.
+
+  Additionally, you can enable to [ delete old visitor logs ](https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs) at `Administration > System > Privacy`, but make sure that you run `systemctl start matomo-archive-processing.service` at least once without errors if you have already collected data before, so that the reports get archived before the source data gets deleted.
+
+- `composableDerivation` along with supporting library functions has been removed.
+
+- The deprecated `truecrypt` package has been removed and `truecrypt` attribute is now an alias for `veracrypt`. VeraCrypt is backward-compatible with TrueCrypt volumes. Note that `cryptsetup` also supports loading TrueCrypt volumes.
+
+- The Kubernetes DNS addons, kube-dns, has been replaced with CoreDNS. This change is made in accordance with Kubernetes making CoreDNS the official default starting from [Kubernetes v1.11](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG-1.11.md#sig-cluster-lifecycle). Please beware that upgrading DNS-addon on existing clusters might induce minor downtime while the DNS-addon terminates and re-initializes. Also note that the DNS-service now runs with 2 pod replicas by default. The desired number of replicas can be configured using: `services.kubernetes.addons.dns.replicas`.
+
+- The quassel-webserver package and module was removed from nixpkgs due to the lack of maintainers.
+
+- The manual gained a [ new chapter on self-hosting `matrix-synapse` and `riot-web` ](#module-services-matrix), the most prevalent server and client implementations for the [Matrix](https://matrix.org/) federated communication network.
+
+- The astah-community package was removed from nixpkgs due to it being discontinued and the downloads not being available anymore.
+
+- The httpd service now saves log files with a .log file extension by default for easier integration with the logrotate service.
+
+- The owncloud server packages and httpd subservice module were removed from nixpkgs due to the lack of maintainers.
+
+- It is possible now to uze ZRAM devices as general purpose ephemeral block devices, not only as swap. Using more than 1 device as ZRAM swap is no longer recommended, but is still possible by setting `zramSwap.swapDevices` explicitly.
+
+  ZRAM algorithm can be changed now.
+
+  Changes to ZRAM algorithm are applied during `nixos-rebuild switch`, so make sure you have enough swap space on disk to survive ZRAM device rebuild. Alternatively, use `nixos-rebuild boot; reboot`.
+
+- Flat volumes are now disabled by default in `hardware.pulseaudio`. This has been done to prevent applications, which are unaware of this feature, setting their volumes to 100% on startup causing harm to your audio hardware and potentially your ears.
+
+  ::: {.note}
+  With this change application specific volumes are relative to the master volume which can be adjusted independently, whereas before they were absolute; meaning that in effect, it scaled the device-volume with the volume of the loudest application.
+  :::
+
+- The [`ndppd`](https://github.com/DanielAdolfsson/ndppd) module now supports [all config options](options.html#opt-services.ndppd.enable) provided by the current upstream version as service options. Additionally the `ndppd` package doesn't contain the systemd unit configuration from upstream anymore, the unit is completely configured by the NixOS module now.
+
+- New installs of NixOS will default to the Redmine 4.x series unless otherwise specified in `services.redmine.package` while existing installs of NixOS will default to the Redmine 3.x series.
+
+- The [Grafana module](options.html#opt-services.grafana.enable) now supports declarative [datasource and dashboard](http://docs.grafana.org/administration/provisioning/) provisioning.
+
+- The use of insecure ports on kubernetes has been deprecated. Thus options: `services.kubernetes.apiserver.port` and `services.kubernetes.controllerManager.port` has been renamed to `.insecurePort`, and default of both options has changed to 0 (disabled).
+
+- Note that the default value of `services.kubernetes.apiserver.bindAddress` has changed from 127.0.0.1 to 0.0.0.0, allowing the apiserver to be accessible from outside the master node itself. If the apiserver insecurePort is enabled, it is strongly recommended to only bind on the loopback interface. See: `services.kubernetes.apiserver.insecurebindAddress`.
+
+- The option `services.kubernetes.apiserver.allowPrivileged` and `services.kubernetes.kubelet.allowPrivileged` now defaults to false. Disallowing privileged containers on the cluster.
+
+- The kubernetes module does no longer add the kubernetes package to `environment.systemPackages` implicitly.
+
+- The `intel` driver has been removed from the default list of [X.org video drivers](options.html#opt-services.xserver.videoDrivers). The `modesetting` driver should take over automatically, it is better maintained upstream and has less problems with advanced X11 features. This can lead to a change in the output names used by `xrandr`. Some performance regressions on some GPU models might happen. Some OpenCL and VA-API applications might also break (Beignet seems to provide OpenCL support with `modesetting` driver, too). Kernel mode setting API does not support backlight control, so `xbacklight` tool will not work; backlight level can be controlled directly via `/sys/` or with `brightnessctl`. Users who need this functionality more than multi-output XRandR are advised to add \`intel\` to \`videoDrivers\` and report an issue (or provide additional details in an existing one)
+
+- Openmpi has been updated to version 4.0.0, which removes some deprecated MPI-1 symbols. This may break some older applications that still rely on those symbols. An upgrade guide can be found [here](https://www.open-mpi.org/faq/?category=mpi-removed).
+
+  The nginx package now relies on OpenSSL 1.1 and supports TLS 1.3 by default. You can set the protocols used by the nginx service using [services.nginx.sslProtocols](options.html#opt-services.nginx.sslProtocols).
+
+- A new subcommand `nixos-rebuild edit` was added.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-1909.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-1909.section.md
new file mode 100644
index 000000000000..2bd04f8dd40a
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-1909.section.md
@@ -0,0 +1,313 @@
+# Release 19.09 ("Loris", 2019/10/09) {#sec-release-19.09}
+
+## Highlights {#sec-release-19.09-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- End of support is planned for end of April 2020, handing over to 20.03.
+
+- Nix has been updated to 2.3; see its [release notes](https://nixos.org/nix/manual/#ssec-relnotes-2.3).
+
+- Core version changes:
+
+  systemd: 239 -\> 243
+
+  gcc: 7 -\> 8
+
+  glibc: 2.27 (unchanged)
+
+  linux: 4.19 LTS (unchanged)
+
+  openssl: 1.0 -\> 1.1
+
+- Desktop version changes:
+
+  plasma5: 5.14 -\> 5.16
+
+  gnome3: 3.30 -\> 3.32
+
+- PHP now defaults to PHP 7.3, updated from 7.2.
+
+- PHP 7.1 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 19.09 release.
+
+- The binfmt module is now easier to use. Additional systems can be added through `boot.binfmt.emulatedSystems`. For instance, `boot.binfmt.emulatedSystems = [ "wasm32-wasi" "x86_64-windows" "aarch64-linux" ];` will set up binfmt interpreters for each of those listed systems.
+
+- The installer now uses a less privileged `nixos` user whereas before we logged in as root. To gain root privileges use `sudo -i` without a password.
+
+- We've updated to Xfce 4.14, which brings a new module `services.xserver.desktopManager.xfce4-14`. If you'd like to upgrade, please switch from the `services.xserver.desktopManager.xfce` module as it will be deprecated in a future release. They're incompatibilities with the current Xfce module; it doesn't support `thunarPlugins` and it isn't recommended to use `services.xserver.desktopManager.xfce` and `services.xserver.desktopManager.xfce4-14` simultaneously or to downgrade from Xfce 4.14 after upgrading.
+
+- The GNOME 3 desktop manager module sports an interface to enable/disable core services, applications, and optional GNOME packages like games.
+
+  - `services.gnome3.core-os-services.enable`
+
+  - `services.gnome3.core-shell.enable`
+
+  - `services.gnome3.core-utilities.enable`
+
+  - `services.gnome3.games.enable`
+
+  With these options we hope to give users finer grained control over their systems. Prior to this change you'd either have to manually disable options or use `environment.gnome3.excludePackages` which only excluded the optional applications. `environment.gnome3.excludePackages` is now unguarded, it can exclude any package installed with `environment.systemPackages` in the GNOME 3 module.
+
+- Orthogonal to the previous changes to the GNOME 3 desktop manager module, we've updated all default services and applications to match as close as possible to a default reference GNOME 3 experience.
+
+  **The following changes were enacted in `services.gnome3.core-utilities.enable`**
+
+  - `accerciser`
+
+  - `dconf-editor`
+
+  - `evolution`
+
+  - `gnome-documents`
+
+  - `gnome-nettool`
+
+  - `gnome-power-manager`
+
+  - `gnome-todo`
+
+  - `gnome-tweaks`
+
+  - `gnome-usage`
+
+  - `gucharmap`
+
+  - `nautilus-sendto`
+
+  - `vinagre`
+
+  - `cheese`
+
+  - `geary`
+
+  **The following changes were enacted in `services.gnome3.core-shell.enable`**
+
+  - `gnome-color-manager`
+
+  - `orca`
+
+  - `services.avahi.enable`
+
+## New Services {#sec-release-19.09-new-services}
+
+The following new services were added since the last release:
+
+- `./programs/dwm-status.nix`
+
+- The new `hardware.printers` module allows to declaratively configure CUPS printers via the `ensurePrinters` and `ensureDefaultPrinter` options. `ensurePrinters` will never delete existing printers, but will make sure that the given printers are configured as declared.
+
+- There is a new [services.system-config-printer.enable](options.html#opt-services.system-config-printer.enable) and [programs.system-config-printer.enable](options.html#opt-programs.system-config-printer.enable) module for the program of the same name. If you previously had `system-config-printer` enabled through some other means you should migrate to using one of these modules.
+
+  - `services.xserver.desktopManager.plasma5`
+
+  - `services.xserver.desktopManager.gnome3`
+
+  - `services.xserver.desktopManager.pantheon`
+
+  - `services.xserver.desktopManager.mate` Note Mate uses `programs.system-config-printer` as it doesn't use it as a service, but its graphical interface directly.
+
+- [services.blueman.enable](options.html#opt-services.blueman.enable) has been added. If you previously had blueman installed via `environment.systemPackages` please migrate to using the NixOS module, as this would result in an insufficiently configured blueman.
+
+## Backward Incompatibilities {#sec-release-19.09-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- Buildbot no longer supports Python 2, as support was dropped upstream in version 2.0.0. Configurations may need to be modified to make them compatible with Python 3.
+
+- PostgreSQL now uses `/run/postgresql` as its socket directory instead of `/tmp`. So if you run an application like eg. Nextcloud, where you need to use the Unix socket path as the database host name, you need to change it accordingly.
+
+- PostgreSQL 9.4 is scheduled EOL during the 19.09 life cycle and has been removed.
+
+- The options `services.prometheus.alertmanager.user` and `services.prometheus.alertmanager.group` have been removed because the alertmanager service is now using systemd's [ DynamicUser mechanism](http://0pointer.net/blog/dynamic-users-with-systemd.html) which obviates these options.
+
+- The NetworkManager systemd unit was renamed back from network-manager.service to NetworkManager.service for better compatibility with other applications expecting this name. The same applies to ModemManager where modem-manager.service is now called ModemManager.service again.
+
+- The `services.nzbget.configFile` and `services.nzbget.openFirewall` options were removed as they are managed internally by the nzbget. The `services.nzbget.dataDir` option hadn't actually been used by the module for some time and so was removed as cleanup.
+
+- The `services.mysql.pidDir` option was removed, as it was only used by the wordpress apache-httpd service to wait for mysql to have started up. This can be accomplished by either describing a dependency on mysql.service (preferred) or waiting for the (hardcoded) `/run/mysqld/mysql.sock` file to appear.
+
+- The `services.emby.enable` module has been removed, see `services.jellyfin.enable` instead for a free software fork of Emby. See the Jellyfin documentation: [ Migrating from Emby to Jellyfin ](https://jellyfin.readthedocs.io/en/latest/administrator-docs/migrate-from-emby/)
+
+- IPv6 Privacy Extensions are now enabled by default for undeclared interfaces. The previous behaviour was quite misleading --- even though the default value for `networking.interfaces.*.preferTempAddress` was `true`, undeclared interfaces would not prefer temporary addresses. Now, interfaces not mentioned in the config will prefer temporary addresses. EUI64 addresses can still be set as preferred by explicitly setting the option to `false` for the interface in question.
+
+- Since Bittorrent Sync was superseded by Resilio Sync in 2016, the `bittorrentSync`, `bittorrentSync14`, and `bittorrentSync16` packages have been removed in favor of `resilio-sync`.
+
+  The corresponding module, `services.btsync` has been replaced by the `services.resilio` module.
+
+- The httpd service no longer attempts to start the postgresql service. If you have come to depend on this behaviour then you can preserve the behavior with the following configuration: `systemd.services.httpd.after = [ "postgresql.service" ];`
+
+  The option `services.httpd.extraSubservices` has been marked as deprecated. You may still use this feature, but it will be removed in a future release of NixOS. You are encouraged to convert any httpd subservices you may have written to a full NixOS module.
+
+  Most of the httpd subservices packaged with NixOS have been replaced with full NixOS modules including LimeSurvey, WordPress, and Zabbix. These modules can be enabled using the `services.limesurvey.enable`, `services.mediawiki.enable`, `services.wordpress.enable`, and `services.zabbixWeb.enable` options.
+
+- The option `systemd.network.networks.<name>.routes.*.routeConfig.GatewayOnlink` was renamed to `systemd.network.networks.<name>.routes.*.routeConfig.GatewayOnLink` (capital `L`). This follows [ upstreams renaming ](https://github.com/systemd/systemd/commit/9cb8c5593443d24c19e40bfd4fc06d672f8c554c) of the setting.
+
+- As of this release the NixOps feature `autoLuks` is deprecated. It no longer works with our systemd version without manual intervention.
+
+  Whenever the usage of the module is detected the evaluation will fail with a message explaining why and how to deal with the situation.
+
+  A new knob named `nixops.enableDeprecatedAutoLuks` has been introduced to disable the eval failure and to acknowledge the notice was received and read. If you plan on using the feature please note that it might break with subsequent updates.
+
+  Make sure you set the `_netdev` option for each of the file systems referring to block devices provided by the autoLuks module. Not doing this might render the system in a state where it doesn't boot anymore.
+
+  If you are actively using the `autoLuks` module please let us know in [issue \#62211](https://github.com/NixOS/nixpkgs/issues/62211).
+
+- The setopt declarations will be evaluated at the end of `/etc/zshrc`, so any code in [programs.zsh.interactiveShellInit](options.html#opt-programs.zsh.interactiveShellInit), [programs.zsh.loginShellInit](options.html#opt-programs.zsh.loginShellInit) and [programs.zsh.promptInit](options.html#opt-programs.zsh.promptInit) may break if it relies on those options being set.
+
+- The `prometheus-nginx-exporter` package now uses the official exporter provided by NGINX Inc. Its metrics are differently structured and are incompatible to the old ones. For information about the metrics, have a look at the [official repo](https://github.com/nginxinc/nginx-prometheus-exporter).
+
+- The `shibboleth-sp` package has been updated to version 3. It is largely backward compatible, for further information refer to the [release notes](https://wiki.shibboleth.net/confluence/display/SP3/ReleaseNotes) and [upgrade guide](https://wiki.shibboleth.net/confluence/display/SP3/UpgradingFromV2).
+
+  Nodejs 8 is scheduled EOL under the lifetime of 19.09 and has been dropped.
+
+- By default, prometheus exporters are now run with `DynamicUser` enabled. Exporters that need a real user, now run under a separate user and group which follow the pattern `<exporter-name>-exporter`, instead of the previous default `nobody` and `nogroup`. Only some exporters are affected by the latter, namely the exporters `dovecot`, `node`, `postfix` and `varnish`.
+
+- The `ibus-qt` package is not installed by default anymore when [i18n.inputMethod.enabled](options.html#opt-i18n.inputMethod.enabled) is set to `ibus`. If IBus support in Qt 4.x applications is required, add the `ibus-qt` package to your [environment.systemPackages](options.html#opt-environment.systemPackages) manually.
+
+- The CUPS Printing service now uses socket-based activation by default, only starting when needed. The previous behavior can be restored by setting `services.cups.startWhenNeeded` to `false`.
+
+- The `services.systemhealth` module has been removed from nixpkgs due to lack of maintainer.
+
+- The `services.mantisbt` module has been removed from nixpkgs due to lack of maintainer.
+
+- Squid 3 has been removed and the `squid` derivation now refers to Squid 4.
+
+- The `services.pdns-recursor.extraConfig` option has been replaced by `services.pdns-recursor.settings`. The new option allows setting extra configuration while being better type-checked and mergeable.
+
+- No service depends on `keys.target` anymore which is a systemd target that indicates if all [NixOps keys](https://nixos.org/nixops/manual/#idm140737322342384) were successfully uploaded. Instead, `<key-name>-key.service` should be used to define a dependency of a key in a service. The full issue behind the `keys.target` dependency is described at [NixOS/nixpkgs\#67265](https://github.com/NixOS/nixpkgs/issues/67265).
+
+  The following services are affected by this:
+
+  - [`services.dovecot2`](options.html#opt-services.dovecot2.enable)
+
+  - [`services.nsd`](options.html#opt-services.nsd.enable)
+
+  - [`services.softether`](options.html#opt-services.softether.enable)
+
+  - [`services.strongswan`](options.html#opt-services.strongswan.enable)
+
+  - [`services.strongswan-swanctl`](options.html#opt-services.strongswan-swanctl.enable)
+
+  - [`services.httpd`](options.html#opt-services.httpd.enable)
+
+- The `security.acme.directory` option has been replaced by a read-only `security.acme.certs.<cert>.directory` option for each certificate you define. This will be a subdirectory of `/var/lib/acme`. You can use this read-only option to figure out where the certificates are stored for a specific certificate. For example, the `services.nginx.virtualhosts.<name>.enableACME` option will use this directory option to find the certs for the virtual host.
+
+  `security.acme.preDelay` and `security.acme.activationDelay` options have been removed. To execute a service before certificates are provisioned or renewed add a `RequiredBy=acme-${cert}.service` to any service.
+
+  Furthermore, the acme module will not automatically add a dependency on `lighttpd.service` anymore. If you are using certificates provided by letsencrypt for lighttpd, then you should depend on the certificate service `acme-${cert}.service>` manually.
+
+  For nginx, the dependencies are still automatically managed when `services.nginx.virtualhosts.<name>.enableACME` is enabled just like before. What changed is that nginx now directly depends on the specific certificates that it needs, instead of depending on the catch-all `acme-certificates.target`. This target unit was also removed from the codebase. This will mean nginx will no longer depend on certificates it isn't explicitly managing and fixes a bug with certificate renewal ordering racing with nginx restarting which could lead to nginx getting in a broken state as described at [NixOS/nixpkgs\#60180](https://github.com/NixOS/nixpkgs/issues/60180).
+
+- The old deprecated `emacs` package sets have been dropped. What used to be called `emacsPackagesNg` is now called `emacsPackages`.
+
+- `services.xserver.desktopManager.xterm` is now disabled by default if `stateVersion` is 19.09 or higher. Previously the xterm desktopManager was enabled when xserver was enabled, but it isn't useful for all people so it didn't make sense to have any desktopManager enabled default.
+
+- The WeeChat plugin `pkgs.weechatScripts.weechat-xmpp` has been removed as it doesn't receive any updates from upstream and depends on outdated Python2-based modules.
+
+- Old unsupported versions (`logstash5`, `kibana5`, `filebeat5`, `heartbeat5`, `metricbeat5`, `packetbeat5`) of the ELK-stack and Elastic beats have been removed.
+
+- For NixOS 19.03, both Prometheus 1 and 2 were available to allow for a seamless transition from version 1 to 2 with existing setups. Because Prometheus 1 is no longer developed, it was removed. Prometheus 2 is now configured with `services.prometheus`.
+
+- Citrix Receiver (`citrix_receiver`) has been dropped in favor of Citrix Workspace (`citrix_workspace`).
+
+- The `services.gitlab` module has had its literal secret options (`services.gitlab.smtp.password`, `services.gitlab.databasePassword`, `services.gitlab.initialRootPassword`, `services.gitlab.secrets.secret`, `services.gitlab.secrets.db`, `services.gitlab.secrets.otp` and `services.gitlab.secrets.jws`) replaced by file-based versions (`services.gitlab.smtp.passwordFile`, `services.gitlab.databasePasswordFile`, `services.gitlab.initialRootPasswordFile`, `services.gitlab.secrets.secretFile`, `services.gitlab.secrets.dbFile`, `services.gitlab.secrets.otpFile` and `services.gitlab.secrets.jwsFile`). This was done so that secrets aren't stored in the world-readable nix store, but means that for each option you'll have to create a file with the same exact string, add "File" to the end of the option name, and change the definition to a string pointing to the corresponding file; e.g. `services.gitlab.databasePassword = "supersecurepassword"` becomes `services.gitlab.databasePasswordFile = "/path/to/secret_file"` where the file `secret_file` contains the string `supersecurepassword`.
+
+  The state path (`services.gitlab.statePath`) now has the following restriction: no parent directory can be owned by any other user than `root` or the user specified in `services.gitlab.user`; i.e. if `services.gitlab.statePath` is set to `/var/lib/gitlab/state`, `gitlab` and all parent directories must be owned by either `root` or the user specified in `services.gitlab.user`.
+
+- The `networking.useDHCP` option is unsupported in combination with `networking.useNetworkd` in anticipation of defaulting to it. It has to be set to `false` and enabled per interface with `networking.interfaces.<name>.useDHCP = true;`
+
+- The Twitter client `corebird` has been dropped as [it is discontinued and does not work against the new Twitter API](https://www.patreon.com/posts/corebirds-future-18921328). Please use the fork `cawbird` instead which has been adapted to the API changes and is still maintained.
+
+- The `nodejs-11_x` package has been removed as it's EOLed by upstream.
+
+- Because of the systemd upgrade, systemd-timesyncd will no longer work if `system.stateVersion` is not set correctly. When upgrading from NixOS 19.03, please make sure that `system.stateVersion` is set to `"19.03"`, or lower if the installation dates back to an earlier version of NixOS.
+
+- Due to the short lifetime of non-LTS kernel releases package attributes like `linux_5_1`, `linux_5_2` and `linux_5_3` have been removed to discourage dependence on specific non-LTS kernel versions in stable NixOS releases. Going forward, versioned attributes like `linux_4_9` will exist for LTS versions only. Please use `linux_latest` or `linux_testing` if you depend on non-LTS releases. Keep in mind that `linux_latest` and `linux_testing` will change versions under the hood during the lifetime of a stable release and might include breaking changes.
+
+- Because of the systemd upgrade, some network interfaces might change their name. For details see [ upstream docs](https://www.freedesktop.org/software/systemd/man/systemd.net-naming-scheme.html#History) or [ our ticket](https://github.com/NixOS/nixpkgs/issues/71086).
+
+## Other Notable Changes {#sec-release-19.09-notable-changes}
+
+- The `documentation` module gained an option named `documentation.nixos.includeAllModules` which makes the generated configuration.nix 5 manual page include all options from all NixOS modules included in a given `configuration.nix` configuration file. Currently, it is set to `false` by default as enabling it frequently prevents evaluation. But the plan is to eventually have it set to `true` by default. Please set it to `true` now in your `configuration.nix` and fix all the bugs it uncovers.
+
+- The `vlc` package gained support for Chromecast streaming, enabled by default. TCP port 8010 must be open for it to work, so something like `networking.firewall.allowedTCPPorts = [ 8010 ];` may be required in your configuration. Also consider enabling [ Accelerated Video Playback](https://nixos.wiki/wiki/Accelerated_Video_Playback) for better transcoding performance.
+
+- The following changes apply if the `stateVersion` is changed to 19.09 or higher. For `stateVersion = "19.03"` or lower the old behavior is preserved.
+
+  - `solr.package` defaults to `pkgs.solr_8`.
+
+- The `hunspellDicts.fr-any` dictionary now ships with `fr_FR.{aff,dic}` which is linked to `fr-toutesvariantes.{aff,dic}`.
+
+- The `mysql` service now runs as `mysql` user. Previously, systemd did execute it as root, and mysql dropped privileges itself. This includes `ExecStartPre=` and `ExecStartPost=` phases. To accomplish that, runtime and data directory setup was delegated to RuntimeDirectory and tmpfiles.
+
+- With the upgrade to systemd version 242 the `systemd-timesyncd` service is no longer using `DynamicUser=yes`. In order for the upgrade to work we rely on an activation script to move the state from the old to the new directory. The older directory (prior `19.09`) was `/var/lib/private/systemd/timesync`.
+
+  As long as the `system.config.stateVersion` is below `19.09` the state folder will migrated to its proper location (`/var/lib/systemd/timesync`), if required.
+
+- The package `avahi` is now built to look up service definitions from `/etc/avahi/services` instead of its output directory in the nix store. Accordingly the module `avahi` now supports custom service definitions via `services.avahi.extraServiceFiles`, which are then placed in the aforementioned directory. See avahi.service5 for more information on custom service definitions.
+
+- Since version 0.1.19, `cargo-vendor` honors package includes that are specified in the `Cargo.toml` file of Rust crates. `rustPlatform.buildRustPackage` uses `cargo-vendor` to collect and build dependent crates. Since this change in `cargo-vendor` changes the set of vendored files for most Rust packages, the hash that use used to verify the dependencies, `cargoSha256`, also changes.
+
+  The `cargoSha256` hashes of all in-tree derivations that use `buildRustPackage` have been updated to reflect this change. However, third-party derivations that use `buildRustPackage` may have to be updated as well.
+
+- The `consul` package was upgraded past version `1.5`, so its deprecated legacy UI is no longer available.
+
+- The default resample-method for PulseAudio has been changed from the upstream default `speex-float-1` to `speex-float-5`. Be aware that low-powered ARM-based and MIPS-based boards will struggle with this so you'll need to set `hardware.pulseaudio.daemon.config.resample-method` back to `speex-float-1`.
+
+- The `phabricator` package and associated `httpd.extraSubservice`, as well as the `phd` service have been removed from nixpkgs due to lack of maintainer.
+
+- The `mercurial` `httpd.extraSubservice` has been removed from nixpkgs due to lack of maintainer.
+
+- The `trac` `httpd.extraSubservice` has been removed from nixpkgs because it was unmaintained.
+
+- The `foswiki` package and associated `httpd.extraSubservice` have been removed from nixpkgs due to lack of maintainer.
+
+- The `tomcat-connector` `httpd.extraSubservice` has been removed from nixpkgs.
+
+- It's now possible to change configuration in [services.nextcloud](options.html#opt-services.nextcloud.enable) after the initial deploy since all config parameters are persisted in an additional config file generated by the module. Previously core configuration like database parameters were set using their imperative installer after creating `/var/lib/nextcloud`.
+
+- There exists now `lib.forEach`, which is like `map`, but with arguments flipped. When mapping function body spans many lines (or has nested `map`s), it is often hard to follow which list is modified.
+
+  Previous solution to this problem was either to use `lib.flip map` idiom or extract that anonymous mapping function to a named one. Both can still be used but `lib.forEach` is preferred over `lib.flip map`.
+
+  The `/etc/sysctl.d/nixos.conf` file containing all the options set via [boot.kernel.sysctl](options.html#opt-boot.kernel.sysctl) was moved to `/etc/sysctl.d/60-nixos.conf`, as sysctl.d5 recommends prefixing all filenames in `/etc/sysctl.d` with a two-digit number and a dash to simplify the ordering of the files.
+
+- We now install the sysctl snippets shipped with systemd.
+
+  - Loose reverse path filtering
+
+  - Source route filtering
+
+  - `fq_codel` as a packet scheduler (this helps to fight bufferbloat)
+
+  This also configures the kernel to pass core dumps to `systemd-coredump`, and restricts the SysRq key combinations to the sync command only. These sysctl snippets can be found in `/etc/sysctl.d/50-*.conf`, and overridden via [boot.kernel.sysctl](options.html#opt-boot.kernel.sysctl) (which will place the parameters in `/etc/sysctl.d/60-nixos.conf`).
+
+- Core dumps are now processed by `systemd-coredump` by default. `systemd-coredump` behaviour can still be modified via `systemd.coredump.extraConfig`. To stick to the old behaviour (having the kernel dump to a file called `core` in the working directory), without piping it through `systemd-coredump`, set `systemd.coredump.enable` to `false`.
+
+- `systemd.packages` option now also supports generators and shutdown scripts. Old `systemd.generator-packages` option has been removed.
+
+- The `rmilter` package was removed with associated module and options due deprecation by upstream developer. Use `rspamd` in proxy mode instead.
+
+- systemd cgroup accounting via the [systemd.enableCgroupAccounting](options.html#opt-systemd.enableCgroupAccounting) option is now enabled by default. It now also enables the more recent Block IO and IP accounting features.
+
+- We no longer enable custom font rendering settings with `fonts.fontconfig.penultimate.enable` by default. The defaults from fontconfig are sufficient.
+
+- The `crashplan` package and the `crashplan` service have been removed from nixpkgs due to crashplan shutting down the service, while the `crashplansb` package and `crashplan-small-business` service have been removed from nixpkgs due to lack of maintainer.
+
+  The [redis module](options.html#opt-services.redis.enable) was hardcoded to use the `redis` user, `/run/redis` as runtime directory and `/var/lib/redis` as state directory. Note that the NixOS module for Redis now disables kernel support for Transparent Huge Pages (THP), because this features causes major performance problems for Redis, e.g. (https://redis.io/topics/latency).
+
+- Using `fonts.enableDefaultFonts` adds a default emoji font `noto-fonts-emoji`.
+
+  - `services.xserver.enable`
+
+  - `programs.sway.enable`
+
+  - `programs.way-cooler.enable`
+
+  - `services.xrdp.enable`
+
+- The `altcoins` categorization of packages has been removed. You now access these packages at the top level, ie. `nix-shell -p dogecoin` instead of `nix-shell -p altcoins.dogecoin`, etc.
+
+- Ceph has been upgraded to v14.2.1. See the [release notes](https://ceph.com/releases/v14-2-0-nautilus-released/) for details. The mgr dashboard as well as osds backed by loop-devices is no longer explicitly supported by the package and module. Note: There's been some issues with python-cherrypy, which is used by the dashboard and prometheus mgr modules (and possibly others), hence 0000-dont-check-cherrypy-version.patch.
+
+- `pkgs.weechat` is now compiled against `pkgs.python3`. Weechat also recommends [to use Python3 in their docs.](https://weechat.org/scripts/python3/)
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2003.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2003.section.md
new file mode 100644
index 000000000000..695f8a2c95ca
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2003.section.md
@@ -0,0 +1,507 @@
+# Release 20.03 ("Markhor", 2020.04/20) {#sec-release-20.03}
+
+## Highlights {#sec-release-20.03-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Support is planned until the end of October 2020, handing over to 20.09.
+
+- Core version changes:
+
+  gcc: 8.3.0 -\> 9.2.0
+
+  glibc: 2.27 -\> 2.30
+
+  linux: 4.19 -\> 5.4
+
+  mesa: 19.1.5 -\> 19.3.3
+
+  openssl: 1.0.2u -\> 1.1.1d
+
+- Desktop version changes:
+
+  plasma5: 5.16.5 -\> 5.17.5
+
+  kdeApplications: 19.08.2 -\> 19.12.3
+
+  gnome3: 3.32 -\> 3.34
+
+  pantheon: 5.0 -\> 5.1.3
+
+- Linux kernel is updated to branch 5.4 by default (from 4.19).
+
+- Grub is updated to 2.04, adding support for booting from F2FS filesystems and Btrfs volumes using zstd compression. Note that some users have been unable to boot after upgrading to 2.04 - for more information, please see [this discussion](https://github.com/NixOS/nixpkgs/issues/61718#issuecomment-617618503).
+
+- Postgresql for NixOS service now defaults to v11.
+
+- The graphical installer image starts the graphical session automatically. Before you'd be greeted by a tty and asked to enter `systemctl start display-manager`. It is now possible to disable the display-manager from running by selecting the `Disable display-manager` quirk in the boot menu.
+
+- GNOME 3 has been upgraded to 3.34. Please take a look at their [Release Notes](https://help.gnome.org/misc/release-notes/3.34) for details.
+
+- If you enable the Pantheon Desktop Manager via [services.xserver.desktopManager.pantheon.enable](options.html#opt-services.xserver.desktopManager.pantheon.enable), we now default to also use [ Pantheon's newly designed greeter ](https://blog.elementary.io/say-hello-to-the-new-greeter/). Contrary to NixOS's usual update policy, Pantheon will receive updates during the cycle of NixOS 20.03 when backwards compatible.
+
+- By default zfs pools will now be trimmed on a weekly basis. Trimming is only done on supported devices (i.e. NVME or SSDs) and should improve throughput and lifetime of these devices. It is controlled by the `services.zfs.trim.enable` varname. The zfs scrub service (`services.zfs.autoScrub.enable`) and the zfs autosnapshot service (`services.zfs.autoSnapshot.enable`) are now only enabled if zfs is set in `config.boot.initrd.supportedFilesystems` or `config.boot.supportedFilesystems`. These lists will automatically contain zfs as soon as any zfs mountpoint is configured in `fileSystems`.
+
+- `nixos-option` has been rewritten in C++, speeding it up, improving correctness, and adding a `-r` option which prints all options and their values recursively.
+
+- `services.xserver.desktopManager.default` and `services.xserver.windowManager.default` options were replaced by a single [services.xserver.displayManager.defaultSession](options.html#opt-services.xserver.displayManager.defaultSession) option to improve support for upstream session files. If you used something like:
+
+  ```nix
+  {
+    services.xserver.desktopManager.default = "xfce";
+    services.xserver.windowManager.default = "icewm";
+  }
+  ```
+
+  you should change it to:
+
+  ```nix
+  {
+    services.xserver.displayManager.defaultSession = "xfce+icewm";
+  }
+  ```
+
+- The testing driver implementation in NixOS is now in Python `make-test-python.nix`. This was done by Jacek Galowicz ([\@tfc](https://github.com/tfc)), and with the collaboration of Julian Stecklina ([\@blitz](https://github.com/blitz)) and Jana Traue ([\@jtraue](https://github.com/jtraue)). All documentation has been updated to use this testing driver, and a vast majority of the 286 tests in NixOS were ported to python driver. In 20.09 the Perl driver implementation, `make-test.nix`, is slated for removal. This should give users of the NixOS integration framework a transitory period to rewrite their tests to use the Python implementation. Users of the Perl driver will see this warning everytime they use it:
+
+  ```ShellSession
+  $ warning: Perl VM tests are deprecated and will be removed for 20.09.
+  Please update your tests to use the python test driver.
+  See https://github.com/NixOS/nixpkgs/pull/71684 for details.
+  ```
+
+  API compatibility is planned to be kept for at least the next release with the perl driver.
+
+## New Services {#sec-release-20.03-new-services}
+
+The following new services were added since the last release:
+
+- The kubernetes kube-proxy now supports a new hostname configuration `services.kubernetes.proxy.hostname` which has to be set if the hostname of the node should be non default.
+
+- UPower's configuration is now managed by NixOS and can be customized via `services.upower`.
+
+- To use Geary you should enable [programs.geary.enable](options.html#opt-programs.geary.enable) instead of just adding it to [environment.systemPackages](options.html#opt-environment.systemPackages). It was created so Geary could function properly outside of GNOME.
+
+- `./config/console.nix`
+
+- `./hardware/brillo.nix`
+
+- `./hardware/tuxedo-keyboard.nix`
+
+- `./programs/bandwhich.nix`
+
+- `./programs/bash-my-aws.nix`
+
+- `./programs/liboping.nix`
+
+- `./programs/traceroute.nix`
+
+- `./services/backup/sanoid.nix`
+
+- `./services/backup/syncoid.nix`
+
+- `./services/backup/zfs-replication.nix`
+
+- `./services/continuous-integration/buildkite-agents.nix`
+
+- `./services/databases/victoriametrics.nix`
+
+- `./services/desktops/gnome3/gnome-initial-setup.nix`
+
+- `./services/desktops/neard.nix`
+
+- `./services/games/openarena.nix`
+
+- `./services/hardware/fancontrol.nix`
+
+- `./services/mail/sympa.nix`
+
+- `./services/misc/freeswitch.nix`
+
+- `./services/misc/mame.nix`
+
+- `./services/monitoring/do-agent.nix`
+
+- `./services/monitoring/prometheus/xmpp-alerts.nix`
+
+- `./services/network-filesystems/orangefs/server.nix`
+
+- `./services/network-filesystems/orangefs/client.nix`
+
+- `./services/networking/3proxy.nix`
+
+- `./services/networking/corerad.nix`
+
+- `./services/networking/go-shadowsocks2.nix`
+
+- `./services/networking/ntp/openntpd.nix`
+
+- `./services/networking/shorewall.nix`
+
+- `./services/networking/shorewall6.nix`
+
+- `./services/networking/spacecookie.nix`
+
+- `./services/networking/trickster.nix`
+
+- `./services/networking/v2ray.nix`
+
+- `./services/networking/xandikos.nix`
+
+- `./services/networking/yggdrasil.nix`
+
+- `./services/web-apps/dokuwiki.nix`
+
+- `./services/web-apps/gotify-server.nix`
+
+- `./services/web-apps/grocy.nix`
+
+- `./services/web-apps/ihatemoney`
+
+- `./services/web-apps/moinmoin.nix`
+
+- `./services/web-apps/trac.nix`
+
+- `./services/web-apps/trilium.nix`
+
+- `./services/web-apps/shiori.nix`
+
+- `./services/web-servers/ttyd.nix`
+
+- `./services/x11/picom.nix`
+
+- `./services/x11/hardware/digimend.nix`
+
+- `./services/x11/imwheel.nix`
+
+- `./virtualisation/cri-o.nix`
+
+## Backward Incompatibilities {#sec-release-20.03-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- The dhcpcd package [ does not request IPv4 addresses for tap and bridge interfaces anymore by default](https://roy.marples.name/archives/dhcpcd-discuss/0002621.html). In order to still get an address on a bridge interface, one has to disable `networking.useDHCP` and explicitly enable `networking.interfaces.<name>.useDHCP` on every interface, that should get an address via DHCP. This way, dhcpcd is configured in an explicit way about which interface to run on.
+
+- GnuPG is now built without support for a graphical passphrase entry by default. Please enable the `gpg-agent` user service via the NixOS option `programs.gnupg.agent.enable`. Note that upstream recommends using `gpg-agent` and will spawn a `gpg-agent` on the first invocation of GnuPG anyway.
+
+- The `dynamicHosts` option has been removed from the [NetworkManager](options.html#opt-networking.networkmanager.enable) module. Allowing (multiple) regular users to override host entries affecting the whole system opens up a huge attack vector. There seem to be very rare cases where this might be useful. Consider setting system-wide host entries using [networking.hosts](options.html#opt-networking.hosts), provide them via the DNS server in your network, or use [environment.etc](options.html#opt-environment.etc) to add a file into `/etc/NetworkManager/dnsmasq.d` reconfiguring `hostsdir`.
+
+- The `99-main.network` file was removed. Matching all network interfaces caused many breakages, see [\#18962](https://github.com/NixOS/nixpkgs/pull/18962) and [\#71106](https://github.com/NixOS/nixpkgs/pull/71106).
+
+  We already don't support the global [networking.useDHCP](options.html#opt-networking.useDHCP), [networking.defaultGateway](options.html#opt-networking.defaultGateway) and [networking.defaultGateway6](options.html#opt-networking.defaultGateway6) options if [networking.useNetworkd](options.html#opt-networking.useNetworkd) is enabled, but direct users to configure the per-device [networking.interfaces.\<name\>....](options.html#opt-networking.interfaces) options.
+
+- The stdenv now runs all bash with `set -u`, to catch the use of undefined variables. Before, it itself used `set -u` but was careful to unset it so other packages' code ran as before. Now, all bash code is held to the same high standard, and the rather complex stateful manipulation of the options can be discarded.
+
+- The SLIM Display Manager has been removed, as it has been unmaintained since 2013. Consider migrating to a different display manager such as LightDM (current default in NixOS), SDDM, GDM, or using the startx module which uses Xinitrc.
+
+- The Way Cooler wayland compositor has been removed, as the project has been officially canceled. There are no more `way-cooler` attribute and `programs.way-cooler` options.
+
+- The BEAM package set has been deleted. You will only find there the different interpreters. You should now use the different build tools coming with the languages with sandbox mode disabled.
+
+- There is now only one Xfce package-set and module. This means that attributes `xfce4-14` and `xfceUnstable` all now point to the latest Xfce 4.14 packages. And in the future NixOS releases will be the latest released version of Xfce available at the time of the release's development (if viable).
+
+- The [phpfpm](options.html#opt-services.phpfpm.pools) module now sets `PrivateTmp=true` in its systemd units for better process isolation. If you rely on `/tmp` being shared with other services, explicitly override this by setting `serviceConfig.PrivateTmp` to `false` for each phpfpm unit.
+
+- KDE's old multimedia framework Phonon no longer supports Qt 4. For that reason, Plasma desktop also does not have `enableQt4Support` option any more.
+
+- The BeeGFS module has been removed.
+
+- The osquery module has been removed.
+
+- Going forward, `~/bin` in the users home directory will no longer be in `PATH` by default. If you depend on this you should set the option `environment.homeBinInPath` to `true`. The aforementioned option was added this release.
+
+- The `buildRustCrate` infrastructure now produces `lib` outputs in addition to the `out` output. This has led to drastically reduced closure sizes for some rust crates since development dependencies are now in the `lib` output.
+
+- Pango was upgraded to 1.44, which no longer uses freetype for font loading. This means that type1 and bitmap fonts are no longer supported in applications relying on Pango for font rendering (notably, GTK application). See [ upstream issue](https://gitlab.gnome.org/GNOME/pango/issues/386) for more information.
+
+- The `roundcube` module has been hardened.
+
+  - The password of the database is not written world readable in the store any more. If `database.host` is set to `localhost`, then a unix user of the same name as the database will be created and PostreSQL peer authentication will be used, removing the need for a password. Otherwise, a password is still needed and can be provided with the new option `database.passwordFile`, which should be set to the path of a file containing the password and readable by the user `nginx` only. The `database.password` option is insecure and deprecated. Usage of this option will print a warning.
+
+  - A random `des_key` is set by default in the configuration of roundcube, instead of using the hardcoded and insecure default. To ensure a clean migration, all users will be logged out when you upgrade to this release.
+
+- The packages `openobex` and `obexftp` are no longer installed when enabling Bluetooth via `hardware.bluetooth.enable`.
+
+- The `dump1090` derivation has been changed to use FlightAware's dump1090 as its upstream. However, this version does not have an internal webserver anymore. The assets in the `share/dump1090` directory of the derivation can be used in conjunction with an external webserver to replace this functionality.
+
+- The fourStore and fourStoreEndpoint modules have been removed.
+
+- Polkit no longer has the user of uid 0 (root) as an admin identity. We now follow the upstream default of only having every member of the wheel group admin privileged. Before it was root and members of wheel. The positive outcome of this is pkexec GUI popups or terminal prompts will no longer require the user to choose between two essentially equivalent choices (whether to perform the action as themselves with wheel permissions, or as the root user).
+
+- NixOS containers no longer build NixOS manual by default. This saves evaluation time, especially if there are many declarative containers defined. Note that this is already done when `<nixos/modules/profiles/minimal.nix>` module is included in container config.
+
+- The `kresd` services deprecates the `interfaces` option in favor of the `listenPlain` option which requires full [systemd.socket compatible](https://www.freedesktop.org/software/systemd/man/systemd.socket.html#ListenStream=) declaration which always include a port.
+
+- Virtual console options have been reorganized and can be found under a single top-level attribute: `console`. The full set of changes is as follows:
+
+  - `i18n.consoleFont` renamed to [console.font](options.html#opt-console.font)
+
+  - `i18n.consoleKeyMap` renamed to [console.keyMap](options.html#opt-console.keyMap)
+
+  - `i18n.consoleColors` renamed to [console.colors](options.html#opt-console.colors)
+
+  - `i18n.consolePackages` renamed to [console.packages](options.html#opt-console.packages)
+
+  - `i18n.consoleUseXkbConfig` renamed to [console.useXkbConfig](options.html#opt-console.useXkbConfig)
+
+  - `boot.earlyVconsoleSetup` renamed to [console.earlySetup](options.html#opt-console.earlySetup)
+
+  - `boot.extraTTYs` renamed to `console.extraTTYs`.
+
+- The [awstats](options.html#opt-services.awstats.enable) module has been rewritten to serve stats via static html pages, updated on a timer, over [nginx](options.html#opt-services.nginx.virtualHosts), instead of dynamic cgi pages over [apache](options.html#opt-services.httpd.enable).
+
+  Minor changes will be required to migrate existing configurations. Details of the required changes can seen by looking through the [awstats](options.html#opt-services.awstats.enable) module.
+
+- The httpd module no longer provides options to support serving web content without defining a virtual host. As a result of this the [services.httpd.logPerVirtualHost](options.html#opt-services.httpd.logPerVirtualHost) option now defaults to `true` instead of `false`. Please update your configuration to make use of [services.httpd.virtualHosts](options.html#opt-services.httpd.virtualHosts).
+
+  The [services.httpd.virtualHosts.\<name\>](options.html#opt-services.httpd.virtualHosts) option has changed type from a list of submodules to an attribute set of submodules, better matching [services.nginx.virtualHosts.\<name\>](options.html#opt-services.nginx.virtualHosts).
+
+  This change comes with the addition of the following options which mimic the functionality of their `nginx` counterparts: [services.httpd.virtualHosts.\<name\>.addSSL](options.html#opt-services.httpd.virtualHosts), [services.httpd.virtualHosts.\<name\>.forceSSL](options.html#opt-services.httpd.virtualHosts), [services.httpd.virtualHosts.\<name\>.onlySSL](options.html#opt-services.httpd.virtualHosts), [services.httpd.virtualHosts.\<name\>.enableACME](options.html#opt-services.httpd.virtualHosts), [services.httpd.virtualHosts.\<name\>.acmeRoot](options.html#opt-services.httpd.virtualHosts), and [services.httpd.virtualHosts.\<name\>.useACMEHost](options.html#opt-services.httpd.virtualHosts).
+
+- For NixOS configuration options, the `loaOf` type has been deprecated and will be removed in a future release. In nixpkgs, options of this type will be changed to `attrsOf` instead. If you were using one of these in your configuration, you will see a warning suggesting what changes will be required.
+
+  For example, [users.users](options.html#opt-users.users) is a `loaOf` option that is commonly used as follows:
+
+  ```nix
+  {
+    users.users =
+      [ { name = "me";
+          description = "My personal user.";
+          isNormalUser = true;
+        }
+      ];
+  }
+  ```
+
+  This should be rewritten by removing the list and using the value of `name` as the name of the attribute set:
+
+  ```nix
+  {
+    users.users.me =
+      { description = "My personal user.";
+        isNormalUser = true;
+      };
+  }
+  ```
+
+  For more information on this change have look at these links: [issue \#1800](https://github.com/NixOS/nixpkgs/issues/1800), [PR \#63103](https://github.com/NixOS/nixpkgs/pull/63103).
+
+- For NixOS modules, the types `types.submodule` and `types.submoduleWith` now support paths as allowed values, similar to how `imports` supports paths. Because of this, if you have a module that defines an option of type `either (submodule ...) path`, it will break since a path is now treated as the first type instead of the second. To fix this, change the type to `either path (submodule ...)`.
+
+- The [Buildkite Agent](options.html#opt-services.buildkite-agents) module and corresponding packages have been updated to 3.x, and to support multiple instances of the agent running at the same time. This means you will have to rename `services.buildkite-agent` to `services.buildkite-agents.<name>`. Furthermore, the following options have been changed:
+
+  - `services.buildkite-agent.meta-data` has been renamed to [services.buildkite-agents.\<name\>.tags](options.html#opt-services.buildkite-agents), to match upstreams naming for 3.x. Its type has also changed - it now accepts an attrset of strings.
+
+  - The`services.buildkite-agent.openssh.publicKeyPath` option has been removed, as it's not necessary to deploy public keys to clone private repositories.
+
+  - `services.buildkite-agent.openssh.privateKeyPath` has been renamed to [buildkite-agents.\<name\>.privateSshKeyPath](options.html#opt-services.buildkite-agents), as the whole `openssh` now only contained that single option.
+
+  - [services.buildkite-agents.\<name\>.shell](options.html#opt-services.buildkite-agents) has been introduced, allowing to specify a custom shell to be used.
+
+- The `citrix_workspace_19_3_0` package has been removed as it will be EOLed within the lifespan of 20.03. For further information, please refer to the [support and maintenance information](https://www.citrix.com/de-de/support/product-lifecycle/milestones/receiver.html) from upstream.
+
+- The `gcc5` and `gfortran5` packages have been removed.
+
+- The `services.xserver.displayManager.auto` module has been removed. It was only intended for use in internal NixOS tests, and gave the false impression of it being a special display manager when it's actually LightDM. Please use the `services.xserver.displayManager.lightdm.autoLogin` options instead, or any other display manager in NixOS as they all support auto-login. If you used this module specifically because it permitted root auto-login you can override the lightdm-autologin pam module like:
+
+  ```nix
+  {
+    security.pam.services.lightdm-autologin.text = lib.mkForce ''
+        auth     requisite pam_nologin.so
+        auth     required  pam_succeed_if.so quiet
+        auth     required  pam_permit.so
+
+        account  include   lightdm
+
+        password include   lightdm
+
+        session  include   lightdm
+    '';
+  }
+  ```
+
+  The difference is the:
+
+  ```
+  auth required pam_succeed_if.so quiet
+  ```
+
+  line, where default it's:
+
+  ```
+   auth required pam_succeed_if.so uid >= 1000 quiet
+  ```
+
+  not permitting users with uid's below 1000 (like root). All other display managers in NixOS are configured like this.
+
+- There have been lots of improvements to the Mailman module. As a result,
+
+  - The `services.mailman.hyperkittyBaseUrl` option has been renamed to [services.mailman.hyperkitty.baseUrl](options.html#opt-services.mailman.hyperkitty.baseUrl).
+
+  - The `services.mailman.hyperkittyApiKey` option has been removed. This is because having an option for the Hyperkitty API key meant that the API key would be stored in the world-readable Nix store, which was a security vulnerability. A new Hyperkitty API key will be generated the first time the new Hyperkitty service is run, and it will then be persisted outside of the Nix store. To continue using Hyperkitty, you must set [services.mailman.hyperkitty.enable](options.html#opt-services.mailman.hyperkitty.enable) to `true`.
+
+  - Additionally, some Postfix configuration must now be set manually instead of automatically by the Mailman module:
+
+    ```nix
+    {
+      services.postfix.relayDomains = [ "hash:/var/lib/mailman/data/postfix_domains" ];
+      services.postfix.config.transport_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
+      services.postfix.config.local_recipient_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
+    }
+    ```
+
+    This is because some users may want to include other values in these lists as well, and this was not possible if they were set automatically by the Mailman module. It would not have been possible to just concatenate values from multiple modules each setting the values they needed, because the order of elements in the list is significant.
+
+- The LLVM versions 3.5, 3.9 and 4 (including the corresponding CLang versions) have been dropped.
+
+- The `networking.interfaces.*.preferTempAddress` option has been replaced by `networking.interfaces.*.tempAddress`. The new option allows better control of the IPv6 temporary addresses, including completely disabling them for interfaces where they are not needed.
+
+- Rspamd was updated to version 2.2. Read [ the upstream migration notes](https://rspamd.com/doc/migration.html#migration-to-rspamd-20) carefully. Please be especially aware that some modules were removed and the default Bayes backend is now Redis.
+
+- The `*psu` versions of oraclejdk8 have been removed as they aren't provided by upstream anymore.
+
+- The `services.dnscrypt-proxy` module has been removed as it used the deprecated version of dnscrypt-proxy. We've added [services.dnscrypt-proxy2.enable](options.html#opt-services.dnscrypt-proxy2.enable) to use the supported version. This module supports configuration via the Nix attribute set [services.dnscrypt-proxy2.settings](options.html#opt-services.dnscrypt-proxy2.settings), or by passing a TOML configuration file via [services.dnscrypt-proxy2.configFile](options.html#opt-services.dnscrypt-proxy2.configFile).
+
+  ```nix
+  {
+    # Example configuration:
+    services.dnscrypt-proxy2.enable = true;
+    services.dnscrypt-proxy2.settings = {
+      listen_addresses = [ "127.0.0.1:43" ];
+      sources.public-resolvers = {
+        urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+        cache_file = "public-resolvers.md";
+        minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+        refresh_delay = 72;
+      };
+    };
+
+    services.dnsmasq.enable = true;
+    services.dnsmasq.servers = [ "127.0.0.1#43" ];
+  }
+  ```
+
+- `qesteidutil` has been deprecated in favor of `qdigidoc`.
+
+- sqldeveloper_18 has been removed as it's not maintained anymore, sqldeveloper has been updated to version `19.4`. Please note that this means that this means that the oraclejdk is now required. For further information please read the [release notes](https://www.oracle.com/technetwork/developer-tools/sql-developer/downloads/sqldev-relnotes-194-5908846.html).
+
+- Haskell `env` and `shellFor` dev shell environments now organize dependencies the same way as regular builds. In particular, rather than receiving all the different lists of dependencies mashed together as one big list, and then partitioning into Haskell and non-Hakell dependencies, they work from the original many different dependency parameters and don't need to algorithmically partition anything.
+
+  This means that if you incorrectly categorize a dependency, e.g. non-Haskell library dependency as a `buildDepends` or run-time Haskell dependency as a `setupDepends`, whereas things would have worked before they may not work now.
+
+- The gcc-snapshot-package has been removed. It's marked as broken for \>2 years and used to point to a fairly old snapshot from the gcc7-branch.
+
+- The nixos-build-vms8 -script now uses the python test-driver.
+
+- The riot-web package now accepts configuration overrides as an attribute set instead of a string. A formerly used JSON configuration can be converted to an attribute set with `builtins.fromJSON`.
+
+  The new default configuration also disables automatic guest account registration and analytics to improve privacy. The previous behavior can be restored by setting `config.riot-web.conf = { disable_guests = false; piwik = true; }`.
+
+- Stand-alone usage of `Upower` now requires `services.upower.enable` instead of just installing into [environment.systemPackages](options.html#opt-environment.systemPackages).
+
+- nextcloud has been updated to `v18.0.2`. This means that users from NixOS 19.09 can't upgrade directly since you can only move one version forward and 19.09 uses `v16.0.8`.
+
+  To provide a safe upgrade-path and to circumvent similar issues in the future, the following measures were taken:
+
+  - The pkgs.nextcloud-attribute has been removed and replaced with versioned attributes (currently pkgs.nextcloud17 and pkgs.nextcloud18). With this change major-releases can be backported without breaking stuff and to make upgrade-paths easier.
+
+  - Existing setups will be detected using [system.stateVersion](options.html#opt-system.stateVersion): by default, nextcloud17 will be used, but will raise a warning which notes that after that deploy it's recommended to update to the latest stable version (nextcloud18) by declaring the newly introduced setting [services.nextcloud.package](options.html#opt-services.nextcloud.package).
+
+  - Users with an overlay (e.g. to use nextcloud at version `v18` on `19.09`) will get an evaluation error by default. This is done to ensure that our [package](options.html#opt-services.nextcloud.package)-option doesn't select an older version by accident. It's recommended to use pkgs.nextcloud18 or to set [package](options.html#opt-services.nextcloud.package) to pkgs.nextcloud explicitly.
+
+  ::: {.warning}
+  Please note that if you're coming from `19.03` or older, you have to manually upgrade to `19.09` first to upgrade your server to Nextcloud v16.
+  :::
+
+- Hydra has gained a massive performance improvement due to [some database schema changes](https://github.com/NixOS/hydra/pull/710) by adding several IDs and better indexing. However, it's necessary to upgrade Hydra in multiple steps:
+
+  - At first, an older version of Hydra needs to be deployed which adds those (nullable) columns. When having set [stateVersion ](options.html#opt-system.stateVersion) to a value older than `20.03`, this package will be selected by default from the module when upgrading. Otherwise, the package can be deployed using the following config:
+
+    ```nix
+    { pkgs, ... }: {
+      services.hydra.package = pkgs.hydra-migration;
+    }
+    ```
+
+- Automatically fill the newly added ID columns on the server by running the following command:
+
+  ```ShellSession
+  $ hydra-backfill-ids
+  ```
+  ::: {.warning}
+  Please note that this process can take a while depending on your database-size!
+  :::
+
+- Deploy a newer version of Hydra to activate the DB optimizations. This can be done by using hydra-unstable. This package already includes [flake-support](https://github.com/nixos/rfcs/pull/49) and is therefore compiled against pkgs.nixFlakes.
+
+  ::: {.warning}
+  If your [stateVersion](options.html#opt-system.stateVersion) is set to `20.03` or greater, hydra-unstable will be used automatically! This will break your setup if you didn't run the migration.
+  :::
+
+  Please note that Hydra is currently not available with nixStable as this doesn't compile anymore.
+
+  ::: {.warning}
+  pkgs.hydra has been removed to ensure a graceful database-migration using the dedicated package-attributes. If you still have pkgs.hydra defined in e.g. an overlay, an assertion error will be thrown. To circumvent this, you need to set [services.hydra.package](options.html#opt-services.hydra.package) to pkgs.hydra explicitly and make sure you know what you're doing!
+  :::
+
+- The TokuDB storage engine will be disabled in mariadb 10.5. It is recommended to switch to RocksDB. See also [TokuDB](https://mariadb.com/kb/en/tokudb/).
+
+## Other Notable Changes {#sec-release-20.03-notable-changes}
+
+- SD images are now compressed by default using `bzip2`.
+
+- The nginx web server previously started its master process as root privileged, then ran worker processes as a less privileged identity user (the `nginx` user). This was changed to start all of nginx as a less privileged user (defined by `services.nginx.user` and `services.nginx.group`). As a consequence, all files that are needed for nginx to run (included configuration fragments, SSL certificates and keys, etc.) must now be readable by this less privileged user/group.
+
+  To continue to use the old approach, you can configure:
+
+  ```nix
+  {
+    services.nginx.appendConfig = let cfg = config.services.nginx; in ''user ${cfg.user} ${cfg.group};'';
+    systemd.services.nginx.serviceConfig.User = lib.mkForce "root";
+  }
+  ```
+
+- OpenSSH has been upgraded from 7.9 to 8.1, improving security and adding features but with potential incompatibilities. Consult the [ release announcement](https://www.openssh.com/txt/release-8.1) for more information.
+
+- `PRETTY_NAME` in `/etc/os-release` now uses the short rather than full version string.
+
+- The ACME module has switched from simp-le to [lego](https://github.com/go-acme/lego) which allows us to support DNS-01 challenges and wildcard certificates. The following options have been added: [security.acme.acceptTerms](options.html#opt-security.acme.acceptTerms), [security.acme.certs.\<name\>.dnsProvider](options.html#opt-security.acme.certs), [security.acme.certs.\<name\>.credentialsFile](options.html#opt-security.acme.certs), [security.acme.certs.\<name\>.dnsPropagationCheck](options.html#opt-security.acme.certs). As well as this, the options `security.acme.acceptTerms` and either `security.acme.email` or `security.acme.certs.<name>.email` must be set in order to use the ACME module. Certificates will be regenerated on activation, no account or certificate will be migrated from simp-le. In particular private keys will not be preserved. However, the credentials for simp-le are preserved and thus it is possible to roll back to previous versions without breaking certificate generation. Note also that in contrary to simp-le a new private key is recreated at each renewal by default, which can have consequences if you embed your public key in apps.
+
+- It is now possible to unlock LUKS-Encrypted file systems using a FIDO2 token via `boot.initrd.luks.fido2Support`.
+
+- Predictably named network interfaces get renamed in stage-1. This means that it is possible to use the proper interface name for e.g. Dropbear setups.
+
+  For further reference, please read [\#68953](https://github.com/NixOS/nixpkgs/pull/68953) or the corresponding [discourse thread](https://discourse.nixos.org/t/predictable-network-interface-names-in-initrd/4055).
+
+- The matrix-synapse-package has been updated to [v1.11.1](https://github.com/matrix-org/synapse/releases/tag/v1.11.1). Due to [stricter requirements](https://github.com/matrix-org/synapse/releases/tag/v1.10.0rc1) for database configuration when using postgresql, the automated database setup of the module has been removed to avoid any further edge-cases.
+
+  matrix-synapse expects `postgresql`-databases to have the options `LC_COLLATE` and `LC_CTYPE` set to [`'C'`](https://www.postgresql.org/docs/12/locale.html) which basically instructs `postgresql` to ignore any locale-based preferences.
+
+  Depending on your setup, you need to incorporate one of the following changes in your setup to upgrade to 20.03:
+
+  - If you use `sqlite3` you don't need to do anything.
+
+  - If you use `postgresql` on a different server, you don't need to change anything as well since this module was never designed to configure remote databases.
+
+  - If you use `postgresql` and configured your synapse initially on `19.09` or older, you need to enable postgresql-support explicitly:
+
+    ```nix
+    { ... }: {
+      services.matrix-synapse = {
+        enable = true;
+        /* and all the other config you've defined here */
+      };
+      services.postgresql.enable = true;
+    }
+    ```
+
+- If you deploy a fresh matrix-synapse, you need to configure the database yourself (e.g. by using the [services.postgresql.initialScript](options.html#opt-services.postgresql.initialScript) option). An example for this can be found in the [documentation of the Matrix module](#module-services-matrix).
+
+- If you initially deployed your matrix-synapse on `nixos-unstable` _after_ the `19.09`-release, your database is misconfigured due to a regression in NixOS. For now, matrix-synapse will startup with a warning, but it's recommended to reconfigure the database to set the values `LC_COLLATE` and `LC_CTYPE` to [`'C'`](https://www.postgresql.org/docs/12/locale.html).
+
+- The [systemd.network.links](options.html#opt-systemd.network.links) option is now respected even when [systemd-networkd](options.html#opt-systemd.network.enable) is disabled. This mirrors the behaviour of systemd - It's udev that parses `.link` files, not `systemd-networkd`.
+
+- mongodb has been updated to version `3.4.24`.
+
+  ::: {.warning}
+  Please note that mongodb has been relicensed under their own [` sspl`](https://www.mongodb.com/licensing/server-side-public-license/faq)-license. Since it's not entirely free and not OSI-approved, it's listed as non-free. This means that Hydra doesn't provide prebuilt mongodb-packages and needs to be built locally.
+  :::
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md
new file mode 100644
index 000000000000..eac02a8ff445
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2009.section.md
@@ -0,0 +1,747 @@
+# Release 20.09 ("Nightingale", 2020.10/27) {#sec-release-20.09}
+
+Support is planned until the end of June 2021, handing over to 21.05. (Plans [ have shifted](https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md#core-changes) by two months since release of 20.09.)
+
+## Highlights {#sec-release-20.09-highlights}
+
+In addition to 7349 new, 14442 updated, and 8181 removed packages, this release has the following highlights:
+
+- Core version changes:
+
+  - gcc: 9.2.0 -\> 9.3.0
+
+  - glibc: 2.30 -\> 2.31
+
+  - linux: still defaults to 5.4.x, all supported kernels available
+
+  - mesa: 19.3.5 -\> 20.1.7
+
+- Desktop Environments:
+
+  - plasma5: 5.17.5 -\> 5.18.5
+
+  - kdeApplications: 19.12.3 -\> 20.08.1
+
+  - gnome3: 3.34 -\> 3.36, see its [release notes](https://help.gnome.org/misc/release-notes/3.36/)
+
+  - cinnamon: added at 4.6
+
+  - NixOS now distributes an official [GNOME ISO](https://nixos.org/download.html#nixos-iso)
+
+- Programming Languages and Frameworks:
+
+  - Agda ecosystem was heavily reworked (see more details below)
+
+  - PHP now defaults to PHP 7.4, updated from 7.3
+
+  - PHP 7.2 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 20.09 release
+
+  - Python 3 now defaults to Python 3.8 instead of 3.7
+
+  - Python 3.5 reached its upstream EOL at the end of September 2020: it has been removed from the list of available packages
+
+- Databases and Service Monitoring:
+
+  - MariaDB has been updated to 10.4, MariaDB Galera to 26.4. Please read the related upgrade instructions under [backwards incompatibilities](#sec-release-20.09-incompatibilities) before upgrading.
+
+  - Zabbix now defaults to 5.0, updated from 4.4. Please read related sections under [backwards compatibilities](#sec-release-20.09-incompatibilities) before upgrading.
+
+- Major module changes:
+
+  - Quickly configure a complete, private, self-hosted video conferencing solution with the new Jitsi Meet module.
+
+  - Two new options, [authorizedKeysCommand](options.html#opt-services.openssh.authorizedKeysCommand) and [authorizedKeysCommandUser](options.html#opt-services.openssh.authorizedKeysCommandUser), have been added to the `openssh` module. If you have `AuthorizedKeysCommand` in your [services.openssh.extraConfig](options.html#opt-services.openssh.extraConfig) you should make use of these new options instead.
+
+  - There is a new module for Podman (`virtualisation.podman`), a drop-in replacement for the Docker command line.
+
+  - The new `virtualisation.containers` module manages configuration shared by the CRI-O and Podman modules.
+
+  - Declarative Docker containers are renamed from `docker-containers` to `virtualisation.oci-containers.containers`. This is to make it possible to use `podman` instead of `docker`.
+
+  - The new option [documentation.man.generateCaches](options.html#opt-documentation.man.generateCaches) has been added to automatically generate the `man-db` caches, which are needed by utilities like `whatis` and `apropos`. The caches are generated during the build of the NixOS configuration: since this can be expensive when a large number of packages are installed, the feature is disabled by default.
+
+  - `services.postfix.sslCACert` was replaced by `services.postfix.tlsTrustedAuthorities` which now defaults to system certificate authorities.
+
+  - The various documented workarounds to use steam have been converted to a module. `programs.steam.enable` enables steam, controller support and the workarounds.
+
+  - Support for built-in LCDs in various pieces of Logitech hardware (keyboards and USB speakers). `hardware.logitech.lcd.enable` enables support for all hardware supported by the [g15daemon project](https://sourceforge.net/projects/g15daemon/).
+
+  - The GRUB module gained support for basic password protection, which allows to restrict non-default entries in the boot menu to one or more users. The users and passwords are defined via the option `boot.loader.grub.users`. Note: Password support is only available in GRUB version 2.
+
+- NixOS module changes:
+
+  - The NixOS module system now supports freeform modules as a mix between `types.attrsOf` and `types.submodule`. These allow you to explicitly declare a subset of options while still permitting definitions without an associated option. See [](#sec-freeform-modules) for how to use them.
+
+  - Following its deprecation in 20.03, the Perl NixOS test driver has been removed. All remaining tests have been ported to the Python test framework. Code outside nixpkgs using `make-test.nix` or `testing.nix` needs to be ported to `make-test-python.nix` and `testing-python.nix` respectively.
+
+  - Subordinate GID and UID mappings are now set up automatically for all normal users. This will make container tools like Podman work as non-root users out of the box.
+
+- Starting with this release, the hydra-build-result `nixos-YY.MM` branches no longer exist in the [deprecated nixpkgs-channels repository](https://github.com/nixos/nixpkgs-channels). These branches are now in [the main nixpkgs repository](https://github.com/nixos/nixpkgs).
+
+## New Services {#sec-release-20.09-new-services}
+
+In addition to 1119 new, 118 updated, and 476 removed options; 61 new modules were added since the last release:
+
+- Hardware:
+
+  - [hardware.system76.firmware-daemon.enable](options.html#opt-hardware.system76.firmware-daemon.enable) adds easy support of system76 firmware
+
+  - [hardware.uinput.enable](options.html#opt-hardware.uinput.enable) loads uinput kernel module
+
+  - [hardware.video.hidpi.enable](options.html#opt-hardware.video.hidpi.enable) enable good defaults for HiDPI displays
+
+  - [hardware.wooting.enable](options.html#opt-hardware.wooting.enable) support for Wooting keyboards
+
+  - [hardware.xpadneo.enable](options.html#opt-hardware.xpadneo.enable) xpadneo driver for Xbox One wireless controllers
+
+- Programs:
+
+  - [programs.hamster.enable](options.html#opt-programs.hamster.enable) enable hamster time tracking
+
+  - [programs.steam.enable](options.html#opt-programs.steam.enable) adds easy enablement of steam and related system configuration
+
+- Security:
+
+  - [security.doas.enable](options.html#opt-security.doas.enable) alternative to sudo, allows non-root users to execute commands as root
+
+  - [security.tpm2.enable](options.html#opt-security.tpm2.enable) add Trusted Platform Module 2 support
+
+- System:
+
+  - [boot.initrd.network.openvpn.enable](options.html#opt-boot.initrd.network.openvpn.enable) start an OpenVPN client during initrd boot
+
+- Virtualization:
+
+  - [boot.enableContainers](options.html#opt-boot.enableContainers) use nixos-containers
+
+  - [virtualisation.oci-containers.containers](options.html#opt-virtualisation.oci-containers.containers) run OCI (Docker) containers
+
+  - [virtualisation.podman.enable](options.html#opt-virtualisation.podman.enable) daemonless container engine
+
+- Services:
+
+  - [services.ankisyncd.enable](options.html#opt-services.ankisyncd.enable) Anki sync server
+
+  - [services.bazarr.enable](options.html#opt-services.bazarr.enable) Subtitle manager for Sonarr and Radarr
+
+  - [services.biboumi.enable](options.html#opt-services.biboumi.enable) Biboumi XMPP gateway to IRC
+
+  - [services.blockbook-frontend](options.html#opt-services.blockbook-frontend) Blockbook-frontend, a service for the Trezor wallet
+
+  - [services.cage.enable](options.html#opt-services.cage.enable) Wayland cage service
+
+  - [services.convos.enable](options.html#opt-services.convos.enable) IRC daemon, which can be accessed through the browser
+
+  - [services.engelsystem.enable](options.html#opt-services.engelsystem.enable) Tool for coordinating volunteers and shifts on large events
+
+  - [services.espanso.enable](options.html#opt-services.espanso.enable) text-expander written in rust
+
+  - [services.foldingathome.enable](options.html#opt-services.foldingathome.enable) Folding\@home client
+
+  - [services.gerrit.enable](options.html#opt-services.gerrit.enable) Web-based team code collaboration tool
+
+  - [services.go-neb.enable](options.html#opt-services.go-neb.enable) Matrix bot
+
+  - [services.hardware.xow.enable](options.html#opt-services.hardware.xow.enable) xow as a systemd service
+
+  - [services.hercules-ci-agent.enable](options.html#opt-services.hercules-ci-agent.enable) Hercules CI build agent
+
+  - [services.jicofo.enable](options.html#opt-services.jicofo.enable) Jitsi Conference Focus, component of Jitsi Meet
+
+  - [services.jirafeau.enable](options.html#opt-services.jirafeau.enable) A web file repository
+
+  - [services.jitsi-meet.enable](options.html#opt-services.jitsi-meet.enable) Secure, simple and scalable video conferences
+
+  - [services.jitsi-videobridge.enable](options.html#opt-services.jitsi-videobridge.enable) Jitsi Videobridge, a WebRTC compatible router
+
+  - [services.jupyterhub.enable](options.html#opt-services.jupyterhub.enable) Jupyterhub development server
+
+  - [services.k3s.enable](options.html#opt-services.k3s.enable) Lightweight Kubernetes distribution
+
+  - [services.magic-wormhole-mailbox-server.enable](options.html#opt-services.magic-wormhole-mailbox-server.enable) Magic Wormhole Mailbox Server
+
+  - [services.malcontent.enable](options.html#opt-services.malcontent.enable) Parental Control support
+
+  - [services.matrix-appservice-discord.enable](options.html#opt-services.matrix-appservice-discord.enable) Matrix and Discord bridge
+
+  - [services.mautrix-telegram.enable](options.html#opt-services.mautrix-telegram.enable) Matrix-Telegram puppeting/relaybot bridge
+
+  - [services.mirakurun.enable](options.html#opt-services.mirakurun.enable) Japanese DTV Tuner Server Service
+
+  - [services.molly-brown.enable](options.html#opt-services.molly-brown.enable) Molly-Brown Gemini server
+
+  - [services.mullvad-vpn.enable](options.html#opt-services.mullvad-vpn.enable) Mullvad VPN daemon
+
+  - [services.ncdns.enable](options.html#opt-services.ncdns.enable) Namecoin to DNS bridge
+
+  - [services.nextdns.enable](options.html#opt-services.nextdns.enable) NextDNS to DoH Proxy service
+
+  - [services.nix-store-gcs-proxy](options.html#opt-services.nix-store-gcs-proxy) Google storage bucket to be used as a nix store
+
+  - [services.onedrive.enable](options.html#opt-services.onedrive.enable) OneDrive sync service
+
+  - [services.pinnwand.enable](options.html#opt-services.pinnwand.enable) Pastebin-like service
+
+  - [services.pixiecore.enable](options.html#opt-services.pixiecore.enable) Manage network booting of machines
+
+  - [services.privacyidea.enable](options.html#opt-services.privacyidea.enable) Privacy authentication server
+
+  - [services.quorum.enable](options.html#opt-services.quorum.enable) Quorum blockchain daemon
+
+  - [services.robustirc-bridge.enable](options.html#opt-services.robustirc-bridge.enable) RobustIRC bridge
+
+  - [services.rss-bridge.enable](options.html#opt-services.rss-bridge.enable) Generate RSS and Atom feeds
+
+  - [services.rtorrent.enable](options.html#opt-services.rtorrent.enable) rTorrent service
+
+  - [services.smartdns.enable](options.html#opt-services.smartdns.enable) SmartDNS DNS server
+
+  - [services.sogo.enable](options.html#opt-services.sogo.enable) SOGo groupware
+
+  - [services.teeworlds.enable](options.html#opt-services.teeworlds.enable) Teeworlds game server
+
+  - [services.torque.mom.enable](options.html#opt-services.torque.mom.enable) torque computing node
+
+  - [services.torque.server.enable](options.html#opt-services.torque.server.enable) torque server
+
+  - [services.tuptime.enable](options.html#opt-services.tuptime.enable) A total uptime service
+
+  - [services.urserver.enable](options.html#opt-services.urserver.enable) X11 remote server
+
+  - [services.wasabibackend.enable](options.html#opt-services.wasabibackend.enable) Wasabi backend service
+
+  - [services.yubikey-agent.enable](options.html#opt-services.yubikey-agent.enable) Yubikey agent
+
+  - [services.zigbee2mqtt.enable](options.html#opt-services.zigbee2mqtt.enable) Zigbee to MQTT bridge
+
+## Backward Incompatibilities {#sec-release-20.09-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- MariaDB has been updated to 10.4, MariaDB Galera to 26.4. Before you upgrade, it would be best to take a backup of your database. For MariaDB Galera Cluster, see [Upgrading from MariaDB 10.3 to MariaDB 10.4 with Galera Cluster](https://mariadb.com/kb/en/upgrading-from-mariadb-103-to-mariadb-104-with-galera-cluster/) instead. Before doing the upgrade read [Incompatible Changes Between 10.3 and 10.4](https://mariadb.com/kb/en/upgrading-from-mariadb-103-to-mariadb-104/#incompatible-changes-between-103-and-104). After the upgrade you will need to run `mysql_upgrade`. MariaDB 10.4 introduces a number of changes to the authentication process, intended to make things easier and more intuitive. See [Authentication from MariaDB 10.4](https://mariadb.com/kb/en/authentication-from-mariadb-104/). unix_socket auth plugin does not use a password, and uses the connecting user's UID instead. When a new MariaDB data directory is initialized, two MariaDB users are created and can be used with new unix_socket auth plugin, as well as traditional mysql_native_password plugin: root\@localhost and mysql\@localhost. To actually use the traditional mysql_native_password plugin method, one must run the following:
+
+  ```nix
+  {
+  services.mysql.initialScript = pkgs.writeText "mariadb-init.sql" ''
+    ALTER USER root@localhost IDENTIFIED VIA mysql_native_password USING PASSWORD("verysecret");
+  '';
+  }
+  ```
+
+  When MariaDB data directory is just upgraded (not initialized), the users are not created or modified.
+
+- MySQL server is now started with additional systemd sandbox/hardening options for better security. The PrivateTmp, ProtectHome, and ProtectSystem options may be problematic when MySQL is attempting to read from or write to your filesystem anywhere outside of its own state directory, for example when calling `LOAD DATA INFILE or SELECT * INTO OUTFILE`. In this scenario a variant of the following may be required: - allow MySQL to read from /home and /tmp directories when using `LOAD DATA INFILE`
+
+  ```nix
+  {
+    systemd.services.mysql.serviceConfig.ProtectHome = lib.mkForce "read-only";
+  }
+  ```
+
+  \- allow MySQL to write to custom folder `/var/data` when using `SELECT * INTO OUTFILE`, assuming the mysql user has write access to `/var/data`
+
+  ```nix
+  {
+    systemd.services.mysql.serviceConfig.ReadWritePaths = [ "/var/data" ];
+  }
+  ```
+
+  The MySQL service no longer runs its `systemd` service startup script as `root` anymore. A dedicated non `root` super user account is required for operation. This means users with an existing MySQL or MariaDB database server are required to run the following SQL statements as a super admin user before upgrading:
+
+  ```SQL
+  CREATE USER IF NOT EXISTS 'mysql'@'localhost' identified with unix_socket;
+  GRANT ALL PRIVILEGES ON *.* TO 'mysql'@'localhost' WITH GRANT OPTION;
+  ```
+
+  If you use MySQL instead of MariaDB please replace `unix_socket` with `auth_socket`. If you have changed the value of [services.mysql.user](options.html#opt-services.mysql.user) from the default of `mysql` to a different user please change `'mysql'@'localhost'` to the corresponding user instead.
+
+- Zabbix now defaults to 5.0, updated from 4.4. Please carefully read through [the upgrade guide](https://www.zabbix.com/documentation/current/manual/installation/upgrade/sources) and apply any changes required. Be sure to take special note of the section on [enabling extended range of numeric (float) values](https://www.zabbix.com/documentation/current/manual/installation/upgrade_notes_500#enabling_extended_range_of_numeric_float_values) as you will need to apply this database migration manually.
+
+  If you are using Zabbix Server with a MySQL or MariaDB database you should note that using a character set of `utf8` and a collate of `utf8_bin` has become mandatory with this release. See the upstream [issue](https://support.zabbix.com/browse/ZBX-17357) for further discussion. Before upgrading you should check the character set and collation used by your database and ensure they are correct:
+
+  ```SQL
+  SELECT
+    default_character_set_name,
+    default_collation_name
+  FROM
+    information_schema.schemata
+  WHERE
+    schema_name = 'zabbix';
+  ```
+
+  If these values are not correct you should take a backup of your database and convert the character set and collation as required. Here is an [example](https://www.zabbix.com/forum/zabbix-help/396573-reinstall-after-upgrade?p=396891#post396891) of how to do so, taken from the Zabbix forums:
+
+  ```SQL
+  ALTER DATABASE `zabbix` DEFAULT CHARACTER SET utf8 COLLATE utf8_bin;
+
+  -- the following will produce a list of SQL commands you should subsequently execute
+  SELECT CONCAT("ALTER TABLE ", TABLE_NAME," CONVERT TO CHARACTER SET utf8 COLLATE utf8_bin;") AS ExecuteTheString
+  FROM information_schema.`COLUMNS`
+  WHERE table_schema = "zabbix" AND COLLATION_NAME = "utf8_general_ci";
+  ```
+
+- maxx package removed along with `services.xserver.desktopManager.maxx` module. Please migrate to cdesktopenv and `services.xserver.desktopManager.cde` module.
+
+- The [matrix-synapse](options.html#opt-services.matrix-synapse.enable) module no longer includes optional dependencies by default, they have to be added through the [plugins](options.html#opt-services.matrix-synapse.plugins) option.
+
+- `buildGoModule` now internally creates a vendor directory in the source tree for downloaded modules instead of using go's [module proxy protocol](https://golang.org/cmd/go/#hdr-Module_proxy_protocol). This storage format is simpler and therefore less likely to break with future versions of go. As a result `buildGoModule` switched from `modSha256` to the `vendorSha256` attribute to pin fetched version data.
+
+- Grafana is now built without support for phantomjs by default. Phantomjs support has been [deprecated in Grafana](https://grafana.com/docs/grafana/latest/guides/whats-new-in-v6-4/) and the phantomjs project is [currently unmaintained](https://github.com/ariya/phantomjs/issues/15344#issue-302015362). It can still be enabled by providing `phantomJsSupport = true` to the package instantiation:
+
+  ```nix
+  {
+    services.grafana.package = pkgs.grafana.overrideAttrs (oldAttrs: rec {
+      phantomJsSupport = true;
+    });
+  }
+  ```
+
+- The [supybot](options.html#opt-services.supybot.enable) module now uses `/var/lib/supybot` as its default [stateDir](options.html#opt-services.supybot.stateDir) path if `stateVersion` is 20.09 or higher. It also enables a number of [systemd sandboxing options](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#Sandboxing) which may possibly interfere with some plugins. If this is the case you can disable the options through attributes in `systemd.services.supybot.serviceConfig`.
+
+- The `security.duosec.skey` option, which stored a secret in the nix store, has been replaced by a new [security.duosec.secretKeyFile](options.html#opt-security.duosec.secretKeyFile) option for better security.
+
+  `security.duosec.ikey` has been renamed to [security.duosec.integrationKey](options.html#opt-security.duosec.integrationKey).
+
+- `vmware` has been removed from the `services.x11.videoDrivers` defaults. For VMWare guests set `virtualisation.vmware.guest.enable` to `true` which will include the appropriate drivers.
+
+- The initrd SSH support now uses OpenSSH rather than Dropbear to allow the use of Ed25519 keys and other OpenSSH-specific functionality. Host keys must now be in the OpenSSH format, and at least one pre-generated key must be specified.
+
+  If you used the `boot.initrd.network.ssh.host*Key` options, you'll get an error explaining how to convert your host keys and migrate to the new `boot.initrd.network.ssh.hostKeys` option. Otherwise, if you don't have any host keys set, you'll need to generate some; see the `hostKeys` option documentation for instructions.
+
+- Since this release there's an easy way to customize your PHP install to get a much smaller base PHP with only wanted extensions enabled. See the following snippet installing a smaller PHP with the extensions `imagick`, `opcache`, `pdo` and `pdo_mysql` loaded:
+
+  ```nix
+  {
+    environment.systemPackages = [
+      (pkgs.php.withExtensions
+        ({ all, ... }: with all; [
+          imagick
+          opcache
+          pdo
+          pdo_mysql
+        ])
+      )
+    ];
+  }
+  ```
+
+  The default `php` attribute hasn't lost any extensions. The `opcache` extension has been added. All upstream PHP extensions are available under php.extensions.\<name?\>.
+
+  All PHP `config` flags have been removed for the following reasons:
+
+- The updated `php` attribute is now easily customizable to your liking by using `php.withExtensions` or `php.buildEnv` instead of writing config files or changing configure flags.
+
+- The remaining configuration flags can now be set directly on the `php` attribute. For example, instead of
+
+  ```nix
+  {
+    php.override {
+      config.php.embed = true;
+      config.php.apxs2 = false;
+    }
+  }
+  ```
+
+  you should now write
+
+  ```nix
+  {
+    php.override {
+      embedSupport = true;
+      apxs2Support = false;
+    }
+  }
+  ```
+
+- The ACME module has been overhauled for simplicity and maintainability. Cert generation now implicitly uses the `acme` user, and the `security.acme.certs._name_.user` option has been removed. Instead, certificate access from other services is now managed through group permissions. The module no longer runs lego twice under certain conditions, and will correctly renew certificates if their configuration is changed. Services which reload nginx and httpd after certificate renewal are now properly configured too so you no longer have to do this manually if you are using HTTPS enabled virtual hosts. A mechanism for regenerating certs on demand has also been added and documented.
+
+- Gollum received a major update to version 5.x and you may have to change some links in your wiki when migrating from gollum 4.x. More information can be found [here](https://github.com/gollum/gollum/wiki/5.0-release-notes#migrating-your-wiki).
+
+- Deluge 2.x was added and is used as default for new NixOS installations where stateVersion is \>= 20.09. If you are upgrading from a previous NixOS version, you can set `service.deluge.package = pkgs.deluge-2_x` to upgrade to Deluge 2.x and migrate the state to the new format. Be aware that backwards state migrations are not supported by Deluge.
+
+- Nginx web server now starting with additional sandbox/hardening options. By default, write access to `/var/log/nginx` and `/var/cache/nginx` is allowed. To allow writing to other folders, use `systemd.services.nginx.serviceConfig.ReadWritePaths`
+
+  ```nix
+  {
+    systemd.services.nginx.serviceConfig.ReadWritePaths = [ "/var/www" ];
+  }
+  ```
+
+  Nginx is also started with the systemd option `ProtectHome = mkDefault true;` which forbids it to read anything from `/home`, `/root` and `/run/user` (see [ProtectHome docs](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#ProtectHome=) for details). If you require serving files from home directories, you may choose to set e.g.
+
+  ```nix
+  {
+    systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
+  }
+  ```
+
+- The NixOS options `nesting.clone` and `nesting.children` have been deleted, and replaced with named [specialisation](options.html#opt-specialisation) configurations.
+
+  Replace a `nesting.clone` entry with:
+
+  ```nix
+  {
+    specialisation.example-sub-configuration = {
+      configuration = {
+        ...
+      };
+  };
+  ```
+
+  Replace a `nesting.children` entry with:
+
+  ```nix
+  {
+    specialisation.example-sub-configuration = {
+      inheritParentConfig = false;
+      configuration = {
+        ...
+      };
+  };
+  ```
+
+  To switch to a specialised configuration at runtime you need to run:
+
+  ```ShellSession
+  $ sudo /run/current-system/specialisation/example-sub-configuration/bin/switch-to-configuration test
+  ```
+
+  Before you would have used:
+
+  ```ShellSession
+  $ sudo /run/current-system/fine-tune/child-1/bin/switch-to-configuration test
+  ```
+
+- The Nginx log directory has been moved to `/var/log/nginx`, the cache directory to `/var/cache/nginx`. The option `services.nginx.stateDir` has been removed.
+
+- The httpd web server previously started its main process as root privileged, then ran worker processes as a less privileged identity user. This was changed to start all of httpd as a less privileged user (defined by [services.httpd.user](options.html#opt-services.httpd.user) and [services.httpd.group](options.html#opt-services.httpd.group)). As a consequence, all files that are needed for httpd to run (included configuration fragments, SSL certificates and keys, etc.) must now be readable by this less privileged user/group.
+
+  The default value for [services.httpd.mpm](options.html#opt-services.httpd.mpm) has been changed from `prefork` to `event`. Along with this change the default value for [services.httpd.virtualHosts.\<name\>.http2](options.html#opt-services.httpd.virtualHosts) has been set to `true`.
+
+- The `systemd-networkd` option `systemd.network.networks.<name>.dhcp.CriticalConnection` has been removed following upstream systemd's deprecation of the same. It is recommended to use `systemd.network.networks.<name>.networkConfig.KeepConfiguration` instead. See systemd.network 5 for details.
+
+- The `systemd-networkd` option `systemd.network.networks._name_.dhcpConfig` has been renamed to [systemd.network.networks._name_.dhcpV4Config](options.html#opt-systemd.network.networks._name_.dhcpV4Config) following upstream systemd's documentation change. See systemd.network 5 for details.
+
+- In the `picom` module, several options that accepted floating point numbers encoded as strings (for example [services.picom.activeOpacity](options.html#opt-services.picom.activeOpacity)) have been changed to the (relatively) new native `float` type. To migrate your configuration remove the quotes around the numbers.
+
+- When using `buildBazelPackage` from Nixpkgs, `flat` hash mode is now used for dependencies instead of `recursive`. This is to better allow using hashed mirrors where needed. As a result, these hashes will have changed.
+
+- The syntax of the PostgreSQL configuration file is now checked at build time. If your configuration includes a file inaccessible inside the build sandbox, set `services.postgresql.checkConfig` to `false`.
+
+- The rkt module has been removed, it was archived by upstream.
+
+- The [Bazaar](https://bazaar.canonical.com) VCS is unmaintained and, as consequence of the Python 2 EOL, the packages `bazaar` and `bazaarTools` were removed. Breezy, the backward compatible fork of Bazaar (see the [announcement](https://www.jelmer.uk/breezy-intro.html)), was packaged as `breezy` and can be used instead.
+
+  Regarding Nixpkgs, `fetchbzr`, `nix-prefetch-bzr` and Bazaar support in Hydra will continue to work through Breezy.
+
+- In addition to the hostname, the fully qualified domain name (FQDN), which consists of `${networking.hostName}` and `${networking.domain}` is now added to `/etc/hosts`, to allow local FQDN resolution, as used by the `hostname --fqdn` command and other applications that try to determine the FQDN. These new entries take precedence over entries from the DNS which could cause regressions in some very specific setups. Additionally the hostname is now resolved to `127.0.0.2` instead of `127.0.1.1` to be consistent with what `nss-myhostname` (from systemd) returns. The old behaviour can e.g. be restored by using `networking.hosts = lib.mkForce { "127.0.1.1" = [ config.networking.hostName ]; };`.
+
+- The hostname (`networking.hostName`) must now be a valid DNS label (see RFC 1035, RFC 1123) and as such must not contain the domain part. This means that the hostname must start with a letter or digit, end with a letter or digit, and have as interior characters only letters, digits, and hyphen. The maximum length is 63 characters. Additionally it is recommended to only use lower-case characters. If (e.g. for legacy reasons) a FQDN is required as the Linux kernel network node hostname (`uname --nodename`) the option `boot.kernel.sysctl."kernel.hostname"` can be used as a workaround (but be aware of the 64 character limit).
+
+- The GRUB specific option `boot.loader.grub.extraInitrd` has been replaced with the generic option `boot.initrd.secrets`. This option creates a secondary initrd from the specified files, rather than using a manually created initrd file. Due to an existing bug with `boot.loader.grub.extraInitrd`, it is not possible to directly boot an older generation that used that option. It is still possible to rollback to that generation if the required initrd file has not been deleted.
+
+- The [DNSChain](https://github.com/okTurtles/dnschain) package and NixOS module have been removed from Nixpkgs as the software is unmaintained and can't be built. For more information see issue [\#89205](https://github.com/NixOS/nixpkgs/issues/89205).
+
+- In the `resilio` module, [services.resilio.httpListenAddr](options.html#opt-services.resilio.httpListenAddr) has been changed to listen to `[::1]` instead of `0.0.0.0`.
+
+- `sslh` has been updated to version `1.21`. The `ssl` probe must be renamed to `tls` in [services.sslh.appendConfig](options.html#opt-services.sslh.appendConfig).
+
+- Users of [OpenAFS 1.6](http://openafs.org) must upgrade their services to OpenAFS 1.8! In this release, the OpenAFS package version 1.6.24 is marked broken but can be used during transition to OpenAFS 1.8.x. Use the options `services.openafsClient.packages.module`, `services.openafsClient.packages.programs` and `services.openafsServer.package` to select a different OpenAFS package. OpenAFS 1.6 will be removed in the next release. The package `openafs` and the service options will then silently point to the OpenAFS 1.8 release.
+
+  See also the OpenAFS [Administrator Guide](http://docs.openafs.org/AdminGuide/index.html) for instructions. Beware of the following when updating servers:
+
+  - The storage format of the server key has changed and the key must be converted before running the new release.
+
+  - When updating multiple database servers, turn off the database servers from the highest IP down to the lowest with resting periods in between. Start up in reverse order. Do not concurrently run database servers working with different OpenAFS releases!
+
+  - Update servers first, then clients.
+
+- Radicale's default package has changed from 2.x to 3.x. An upgrade checklist can be found [here](https://github.com/Kozea/Radicale/blob/3.0.x/NEWS.md#upgrade-checklist). You can use the newer version in the NixOS service by setting the `package` to `radicale3`, which is done automatically if `stateVersion` is 20.09 or higher.
+
+- `udpt` experienced a complete rewrite from C++ to rust. The configuration format changed from ini to toml. The new configuration documentation can be found at [the official website](https://naim94a.github.io/udpt/config.html) and example configuration is packaged in `${udpt}/share/udpt/udpt.toml`.
+
+- We now have a unified [services.xserver.displayManager.autoLogin](options.html#opt-services.xserver.displayManager.autoLogin) option interface to be used for every display-manager in NixOS.
+
+- The `bitcoind` module has changed to multi-instance, using submodules. Therefore, it is now mandatory to name each instance. To use this new multi-instance config with an existing bitcoind data directory and user, you have to adjust the original config, e.g.:
+
+  ```nix
+  {
+    services.bitcoind = {
+      enable = true;
+      extraConfig = "...";
+      ...
+    };
+  }
+  ```
+
+  To something similar:
+
+  ```nix
+  {
+    services.bitcoind.mainnet = {
+      enable = true;
+      dataDir = "/var/lib/bitcoind";
+      user = "bitcoin";
+      extraConfig = "...";
+      ...
+    };
+  }
+  ```
+
+  The key settings are:
+
+  - `dataDir` - to continue using the same data directory.
+
+  - `user` - to continue using the same user so that bitcoind maintains access to its files.
+
+- Graylog introduced a change in the LDAP server certificate validation behaviour for version 3.3.3 which might break existing setups. When updating Graylog from a version before 3.3.3 make sure to check the Graylog [release info](https://www.graylog.org/post/announcing-graylog-v3-3-3) for information on how to avoid the issue.
+
+- The `dokuwiki` module has changed to multi-instance, using submodules. Therefore, it is now mandatory to name each instance. Moreover, forcing SSL by default has been dropped, so `nginx.forceSSL` and `nginx.enableACME` are no longer set to `true`. To continue using your service with the original SSL settings, you have to adjust the original config, e.g.:
+
+  ```nix
+  {
+    services.dokuwiki = {
+      enable = true;
+      ...
+    };
+  }
+  ```
+
+  To something similar:
+
+  ```nix
+  {
+    services.dokuwiki."mywiki" = {
+      enable = true;
+      nginx = {
+        forceSSL = true;
+        enableACME = true;
+      };
+      ...
+    };
+  }
+  ```
+
+  The base package has also been upgraded to the 2020-07-29 "Hogfather" release. Plugins might be incompatible or require upgrading.
+
+- The [services.postgresql.dataDir](options.html#opt-services.postgresql.dataDir) option is now set to `"/var/lib/postgresql/${cfg.package.psqlSchema}"` regardless of your [system.stateVersion](options.html#opt-system.stateVersion). Users with an existing postgresql install that have a [system.stateVersion](options.html#opt-system.stateVersion) of `17.03` or below should double check what the value of their [services.postgresql.dataDir](options.html#opt-services.postgresql.dataDir) option is (`/var/db/postgresql`) and then explicitly set this value to maintain compatibility:
+
+  ```nix
+  {
+    services.postgresql.dataDir = "/var/db/postgresql";
+  }
+  ```
+
+  The postgresql module now expects there to be a database super user account called `postgres` regardless of your [system.stateVersion](options.html#opt-system.stateVersion). Users with an existing postgresql install that have a [system.stateVersion](options.html#opt-system.stateVersion) of `17.03` or below should run the following SQL statements as a database super admin user before upgrading:
+
+  ```SQL
+  CREATE ROLE postgres LOGIN SUPERUSER;
+  ```
+
+- The USBGuard module now removes options and instead hardcodes values for `IPCAccessControlFiles`, `ruleFiles`, and `auditFilePath`. Audit logs can be found in the journal.
+
+- The NixOS module system now evaluates option definitions more strictly, allowing it to detect a larger set of problems. As a result, what previously evaluated may not do so anymore. See [the PR that changed this](https://github.com/NixOS/nixpkgs/pull/82743#issuecomment-674520472) for more info.
+
+- For NixOS configuration options, the type `loaOf`, after its initial deprecation in release 20.03, has been removed. In NixOS and Nixpkgs options using this type have been converted to `attrsOf`. For more information on this change have look at these links: [issue \#1800](https://github.com/NixOS/nixpkgs/issues/1800), [PR \#63103](https://github.com/NixOS/nixpkgs/pull/63103).
+
+- `config.systemd.services.${name}.path` now returns a list of paths instead of a colon-separated string.
+
+- Caddy module now uses Caddy v2 by default. Caddy v1 can still be used by setting [services.caddy.package](options.html#opt-services.caddy.package) to `pkgs.caddy1`.
+
+  New option [services.caddy.adapter](options.html#opt-services.caddy.adapter) has been added.
+
+- The [jellyfin](options.html#opt-services.jellyfin.enable) module will use and stay on the Jellyfin version `10.5.5` if `stateVersion` is lower than `20.09`. This is because significant changes were made to the database schema, and it is highly recommended to backup your instance before upgrading. After making your backup, you can upgrade to the latest version either by setting your `stateVersion` to `20.09` or higher, or set the `services.jellyfin.package` to `pkgs.jellyfin`. If you do not wish to upgrade Jellyfin, but want to change your `stateVersion`, you can set the value of `services.jellyfin.package` to `pkgs.jellyfin_10_5`.
+
+- The `security.rngd` service is now disabled by default. This choice was made because there's krngd in the linux kernel space making it (for most usecases) functionally redundant.
+
+- The `hardware.nvidia.optimus_prime.enable` service has been renamed to `hardware.nvidia.prime.sync.enable` and has many new enhancements. Related nvidia prime settings may have also changed.
+
+- The package nextcloud17 has been removed and nextcloud18 was marked as insecure since both of them will [ will be EOL (end of life) within the lifetime of 20.09](https://docs.nextcloud.com/server/19/admin_manual/release_schedule.html).
+
+  It's necessary to upgrade to nextcloud19:
+
+  - From nextcloud17, you have to upgrade to nextcloud18 first as Nextcloud doesn't allow going multiple major revisions forward in a single upgrade. This is possible by setting [services.nextcloud.package](options.html#opt-services.nextcloud.package) to nextcloud18.
+
+  - From nextcloud18, it's possible to directly upgrade to nextcloud19 by setting [services.nextcloud.package](options.html#opt-services.nextcloud.package) to nextcloud19.
+
+- The GNOME desktop manager no longer default installs gnome3.epiphany. It was chosen to do this as it has a usability breaking issue (see issue [\#98819](https://github.com/NixOS/nixpkgs/issues/98819)) that makes it unsuitable to be a default app.
+
+  ::: {.note}
+  Issue [\#98819](https://github.com/NixOS/nixpkgs/issues/98819) is now fixed and gnome3.epiphany is once again installed by default.
+  :::
+
+- If you want to manage the configuration of wpa_supplicant outside of NixOS you must ensure that none of [networking.wireless.networks](options.html#opt-networking.wireless.networks), [networking.wireless.extraConfig](options.html#opt-networking.wireless.extraConfig) or [networking.wireless.userControlled.enable](options.html#opt-networking.wireless.userControlled.enable) is being used or `true`. Using any of those options will cause wpa_supplicant to be started with a NixOS generated configuration file instead of your own.
+
+## Other Notable Changes {#sec-release-20.09-notable-changes}
+
+- SD images are now compressed by default using `zstd`. The compression for ISO images has also been changed to `zstd`, but ISO images are still not compressed by default.
+
+- `services.journald.rateLimitBurst` was updated from `1000` to `10000` to follow the new upstream systemd default.
+
+- The notmuch package moves its emacs-related binaries and emacs lisp files to a separate output. They're not part of the default `out` output anymore - if you relied on the `notmuch-emacs-mua` binary or the emacs lisp files, access them via the `notmuch.emacs` output.
+
+- Device tree overlay support was improved in [\#79370](https://github.com/NixOS/nixpkgs/pull/79370) and now uses [hardware.deviceTree.kernelPackage](options.html#opt-hardware.deviceTree.kernelPackage) instead of `hardware.deviceTree.base`. [hardware.deviceTree.overlays](options.html#opt-hardware.deviceTree.overlays) configuration was extended to support `.dts` files with symbols. Device trees can now be filtered by setting [hardware.deviceTree.filter](options.html#opt-hardware.deviceTree.filter) option.
+
+- The default output of `buildGoPackage` is now `$out` instead of `$bin`.
+
+- `buildGoModule` `doCheck` now defaults to `true`.
+
+- Packages built using `buildRustPackage` now use `release` mode for the `checkPhase` by default.
+
+  Please note that Rust packages utilizing a custom build/install procedure (e.g. by using a `Makefile`) or test suites that rely on the structure of the `target/` directory may break due to those assumptions. For further information, please read the Rust section in the Nixpkgs manual.
+
+- The cc- and binutils-wrapper's "infix salt" and `_BUILD_` and `_TARGET_` user infixes have been replaced with with a "suffix salt" and suffixes and `_FOR_BUILD` and `_FOR_TARGET`. This matches the autotools convention for env vars which standard for these things, making interfacing with other tools easier.
+
+- Additional Git documentation (HTML and text files) is now available via the `git-doc` package.
+
+- Default algorithm for ZRAM swap was changed to `zstd`.
+
+- The installer now enables sshd by default. This improves installation on headless machines especially ARM single-board-computer. To login through ssh, either a password or an ssh key must be set for the root user or the nixos user.
+
+- The scripted networking system now uses `.link` files in `/etc/systemd/network` to configure mac address and link MTU, instead of the sometimes buggy `network-link-*` units, which have been removed. Bringing the interface up has been moved to the beginning of the `network-addresses-*` unit. Note this doesn't require `systemd-networkd` - it's udev that parses `.link` files. Extra care needs to be taken in the presence of [legacy udev rules](https://wiki.debian.org/NetworkInterfaceNames#THE_.22PERSISTENT_NAMES.22_SCHEME) to rename interfaces, as MAC Address and MTU defined in these options can only match on the original link name. In such cases, you most likely want to create a `10-*.link` file through [systemd.network.links](options.html#opt-systemd.network.links) and set both name and MAC Address / MTU there.
+
+- Grafana received a major update to version 7.x. A plugin is now needed for image rendering support, and plugins must now be signed by default. More information can be found [in the Grafana documentation](https://grafana.com/docs/grafana/latest/installation/upgrading/#upgrading-to-v7-0).
+
+- The `hardware.u2f` module, which was installing udev rules was removed, as udev gained native support to handle FIDO security tokens.
+
+- The `services.transmission` module was enhanced with the new options: [services.transmission.credentialsFile](options.html#opt-services.transmission.credentialsFile), [services.transmission.openFirewall](options.html#opt-services.transmission.openFirewall), and [services.transmission.performanceNetParameters](options.html#opt-services.transmission.performanceNetParameters).
+
+  `transmission-daemon` is now started with additional systemd sandbox/hardening options for better security. Please [report](https://github.com/NixOS/nixpkgs/issues) any use case where this is not working well. In particular, the `RootDirectory` option newly set forbids uploading or downloading a torrent outside of the default directory configured at [settings.download-dir](options.html#opt-services.transmission.settings). If you really need Transmission to access other directories, you must include those directories into the `BindPaths` of the service:
+
+  ```nix
+  {
+    systemd.services.transmission.serviceConfig.BindPaths = [ "/path/to/alternative/download-dir" ];
+  }
+  ```
+
+  Also, connection to the RPC (Remote Procedure Call) of `transmission-daemon` is now only available on the local network interface by default. Use:
+
+  ```nix
+  {
+    services.transmission.settings.rpc-bind-address = "0.0.0.0";
+  }
+  ```
+
+  to get the previous behavior of listening on all network interfaces.
+
+- With this release `systemd-networkd` (when enabled through [networking.useNetworkd](options.html#opt-networking.useNetworkd)) has it's netlink socket created through a `systemd.socket` unit. This gives us control over socket buffer sizes and other parameters. For larger setups where networkd has to create a lot of (virtual) devices the default buffer size (currently 128MB) is not enough.
+
+  On a machine with \>100 virtual interfaces (e.g., wireguard tunnels, VLANs, ...), that all have to be brought up during system startup, the receive buffer size will spike for a brief period. Eventually some of the message will be dropped since there is not enough (permitted) buffer space available.
+
+  By having `systemd-networkd` start with a netlink socket created by `systemd` we can configure the `ReceiveBufferSize=` parameter in the socket options (i.e. `systemd.sockets.systemd-networkd.socketOptions.ReceiveBufferSize`) without recompiling `systemd-networkd`.
+
+  Since the actual memory requirements depend on hardware, timing, exact configurations etc. it isn't currently possible to infer a good default from within the NixOS module system. Administrators are advised to monitor the logs of `systemd-networkd` for `rtnl: kernel receive buffer overrun` spam and increase the memory limit as they see fit.
+
+  Note: Increasing the `ReceiveBufferSize=` doesn't allocate any memory. It just increases the upper bound on the kernel side. The memory allocation depends on the amount of messages that are queued on the kernel side of the netlink socket.
+
+- Specifying [mailboxes](options.html#opt-services.dovecot2.mailboxes) in the dovecot2 module as a list is deprecated and will break eval in 21.05. Instead, an attribute-set should be specified where the `name` should be the key of the attribute.
+
+  This means that a configuration like this
+
+  ```nix
+  {
+    services.dovecot2.mailboxes = [
+      { name = "Junk";
+        auto = "create";
+      }
+    ];
+  }
+  ```
+
+  should now look like this:
+
+  ```nix
+  {
+    services.dovecot2.mailboxes = {
+      Junk.auto = "create";
+    };
+  }
+  ```
+
+- netbeans was upgraded to 12.0 and now defaults to OpenJDK 11. This might cause problems if your projects depend on packages that were removed in Java 11.
+
+- nextcloud has been updated to [v19](https://nextcloud.com/blog/nextcloud-hub-brings-productivity-to-home-office/).
+
+  If you have an existing installation, please make sure that you're on nextcloud18 before upgrading to nextcloud19 since Nextcloud doesn't support upgrades across multiple major versions.
+
+- The `nixos-run-vms` script now deletes the previous run machines states on test startup. You can use the `--keep-vm-state` flag to match the previous behaviour and keep the same VM state between different test runs.
+
+- The [nix.buildMachines](options.html#opt-nix.buildMachines) option is now type-checked. There are no functional changes, however this may require updating some configurations to use correct types for all attributes.
+
+- The `fontconfig` module stopped generating config and cache files for fontconfig 2.10.x, the `/etc/fonts/fonts.conf` now belongs to the latest fontconfig, just like on other Linux distributions, and we will [no longer](https://github.com/NixOS/nixpkgs/pull/95358) be versioning the config directories.
+
+  Fontconfig 2.10.x was removed from Nixpkgs since it hasn't been used in any Nixpkgs package for years now.
+
+- Nginx module `nginxModules.fastcgi-cache-purge` renamed to official name `nginxModules.cache-purge`. Nginx module `nginxModules.ngx_aws_auth` renamed to official name `nginxModules.aws-auth`.
+
+- The option `defaultPackages` was added. It installs the packages perl, rsync and strace for now. They were added unconditionally to `systemPackages` before, but are not strictly necessary for a minimal NixOS install. You can set it to an empty list to have a more minimal system. Be aware that some functionality might still have an impure dependency on those packages, so things might break.
+
+- The `undervolt` option no longer needs to apply its settings every 30s. If they still become undone, open an issue and restore the previous behaviour using `undervolt.useTimer`.
+
+- Agda has been heavily reworked.
+
+  - `agda.mkDerivation` has been heavily changed and is now located at agdaPackages.mkDerivation.
+
+  - New top-level packages agda and `agda.withPackages` have been added, the second of which sets up agda with access to chosen libraries.
+
+  - All agda libraries now live under `agdaPackages`.
+
+  - Many broken libraries have been removed.
+
+  See the [new documentation](https://nixos.org/nixpkgs/manual/#agda) for more information.
+
+- The `deepin` package set has been removed from nixpkgs. It was a work in progress to package the [Deepin Desktop Environment (DDE)](https://www.deepin.org/en/dde/), including libraries, tools and applications, and it was still missing a service to launch the desktop environment. It has shown to no longer be a feasible goal due to reasons discussed in [issue \#94870](https://github.com/NixOS/nixpkgs/issues/94870). The package `netease-cloud-music` has also been removed, as it depends on libraries from deepin.
+
+- The `opendkim` module now uses systemd sandboxing features to limit the exposure of the system towards the opendkim service.
+
+- Kubernetes has been upgraded to 1.19.1, which also means that the golang version to build it has been bumped to 1.15. This may have consequences for your existing clusters and their certificates. Please consider [ the release notes for Kubernetes 1.19 carefully ](https://relnotes.k8s.io/?markdown=93264) before upgrading.
+
+- For AMD GPUs, Vulkan can now be used by adding `amdvlk` to `hardware.opengl.extraPackages`.
+
+- Similarly, still for AMD GPUs, the ROCm OpenCL stack can now be used by adding `rocm-opencl-icd` to `hardware.opengl.extraPackages`.
+
+## Contributions {#sec-release-20.09-contributions}
+
+I, Jonathan Ringer, would like to thank the following individuals for their work on nixpkgs. This release could not be done without the hard work of the NixOS community. There were 31282 contributions across 1313 contributors.
+
+1.  2288 Mario Rodas
+
+2.  1837 Frederik Rietdijk
+
+3.  946 Jörg Thalheim
+
+4.  925 Maximilian Bosch
+
+5.  687 Jonathan Ringer
+
+6.  651 Jan Tojnar
+
+7.  622 Daniël de Kok
+
+8.  605 WORLDofPEACE
+
+9.  597 Florian Klink
+
+10. 528 José Romildo Malaquias
+
+11. 281 volth
+
+12. 101 Robert Scott
+
+13. 86 Tim Steinbach
+
+14. 76 WORLDofPEACE
+
+15. 49 Maximilian Bosch
+
+16. 42 Thomas Tuegel
+
+17. 37 Doron Behar
+
+18. 36 Vladimír Čunát
+
+19. 27 Jonathan Ringer
+
+20. 27 Maciej Krüger
+
+I, Jonathan Ringer, would also like to personally thank \@WORLDofPEACE for their help in mentoring me on the release process. Special thanks also goes to Thomas Tuegel for helping immensely with stabilizing Qt, KDE, and Plasma5; I would also like to thank Robert Scott for his numerous fixes and pull request reviews.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2105.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2105.section.md
new file mode 100644
index 000000000000..cae3f8a85011
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2105.section.md
@@ -0,0 +1,428 @@
+# Release 21.05 ("Okapi", 2021.05/31) {#sec-release-21.05}
+
+Support is planned until the end of December 2021, handing over to 21.11.
+
+## Highlights {#sec-release-21.05-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Core version changes:
+
+  - gcc: 9.3.0 -\> 10.3.0
+
+  - glibc: 2.30 -\> 2.32
+
+  - default linux: 5.4 -\> 5.10, all supported kernels available
+
+  - mesa: 20.1.7 -\> 21.0.1
+
+- Desktop Environments:
+
+  - GNOME: 3.36 -\> 40, see its [release notes](https://help.gnome.org/misc/release-notes/40.0/)
+
+  - Plasma5: 5.18.5 -\> 5.21.3
+
+  - kdeApplications: 20.08.1 -\> 20.12.3
+
+  - cinnamon: 4.6 -\> 4.8.1
+
+- Programming Languages and Frameworks:
+
+  - Python optimizations were disabled again. Builds with optimizations enabled are not reproducible. Optimizations can now be enabled with an option.
+
+- The linux_latest kernel was updated to the 5.13 series. It currently is not officially supported for use with the zfs filesystem. If you use zfs, you should use a different kernel version (either the LTS kernel, or track a specific one).
+
+## New Services {#sec-release-21.05-new-services}
+
+The following new services were added since the last release:
+
+- [GNURadio](https://www.gnuradio.org/) 3.8 and 3.9 were [finally](https://github.com/NixOS/nixpkgs/issues/82263) packaged, along with a rewrite to the Nix expressions, allowing users to override the features upstream supports selecting to compile or not to. Additionally, the attribute `gnuradio` (3.9), `gnuradio3_8` and `gnuradio3_7` now point to an externally wrapped by default derivations, that allow you to also add \`extraPythonPackages\` to the Python interpreter used by GNURadio. Missing environmental variables needed for operational GUI were also added ([\#75478](https://github.com/NixOS/nixpkgs/issues/75478)).
+
+- [Keycloak](https://www.keycloak.org/), an open source identity and access management server with support for [OpenID Connect](https://openid.net/connect/), [OAUTH 2.0](https://oauth.net/2/) and [SAML 2.0](https://en.wikipedia.org/wiki/SAML_2.0).
+
+  See the [Keycloak section of the NixOS manual](#module-services-keycloak) for more information.
+
+- [services.samba-wsdd.enable](options.html#opt-services.samba-wsdd.enable) Web Services Dynamic Discovery host daemon
+
+- [Discourse](https://www.discourse.org/), a modern and open source discussion platform.
+
+  See the [Discourse section of the NixOS manual](#module-services-discourse) for more information.
+
+- [services.nebula.networks](options.html#opt-services.nebula.networks) [Nebula VPN](https://github.com/slackhq/nebula)
+
+## Backward Incompatibilities {#sec-release-21.05-incompatibilities}
+
+When upgrading from a previous release, please be aware of the following incompatible changes:
+
+- GNOME desktop environment was upgraded to 40, see the release notes for [40.0](https://help.gnome.org/misc/release-notes/40.0/) and [3.38](https://help.gnome.org/misc/release-notes/3.38/). The `gnome3` attribute set has been renamed to `gnome` and so have been the NixOS options.
+
+- If you are using `services.udev.extraRules` to assign custom names to network interfaces, this may stop working due to a change in the initialisation of dhcpcd and systemd networkd. To avoid this, either move them to `services.udev.initrdRules` or see the new [Assigning custom names](#sec-custom-ifnames) section of the NixOS manual for an example using networkd links.
+
+- The `security.hideProcessInformation` module has been removed. It was broken since the switch to cgroups-v2.
+
+- The `linuxPackages.ati_drivers_x11` kernel modules have been removed. The drivers only supported kernels prior to 4.2, and thus have become obsolete.
+
+- The `systemConfig` kernel parameter is no longer added to boot loader entries. It has been unused since September 2010, but if do have a system generation from that era, you will now be unable to boot into them.
+
+- `systemd-journal2gelf` no longer parses json and expects the receiving system to handle it. How to achieve this with Graylog is described in this [GitHub issue](https://github.com/parse-nl/SystemdJournal2Gelf/issues/10).
+
+- If the `services.dbus` module is enabled, then the user D-Bus session is now always socket activated. The associated options `services.dbus.socketActivated` and `services.xserver.startDbusSession` have therefore been removed and you will receive a warning if they are present in your configuration. This change makes the user D-Bus session available also for non-graphical logins.
+
+- The `networking.wireless.iwd` module now installs the upstream-provided 80-iwd.link file, which sets the NamePolicy= for all wlan devices to "keep kernel", to avoid race conditions between iwd and networkd. If you don't want this, you can set `systemd.network.links."80-iwd" = lib.mkForce {}`.
+
+- `rubyMinimal` was removed due to being unused and unusable. The default ruby interpreter includes JIT support, which makes it reference it's compiler. Since JIT support is probably needed by some Gems, it was decided to enable this feature with all cc references by default, and allow to build a Ruby derivation without references to cc, by setting `jitSupport = false;` in an overlay. See [\#90151](https://github.com/NixOS/nixpkgs/pull/90151) for more info.
+
+- Setting `services.openssh.authorizedKeysFiles` now also affects which keys `security.pam.enableSSHAgentAuth` will use. WARNING: If you are using these options in combination do make sure that any key paths you use are present in `services.openssh.authorizedKeysFiles`!
+
+- The option `fonts.enableFontDir` has been renamed to [fonts.fontDir.enable](options.html#opt-fonts.fontDir.enable). The path of font directory has also been changed to `/run/current-system/sw/share/X11/fonts`, for consistency with other X11 resources.
+
+- A number of options have been renamed in the kicad interface. `oceSupport` has been renamed to `withOCE`, `withOCCT` has been renamed to `withOCC`, `ngspiceSupport` has been renamed to `withNgspice`, and `scriptingSupport` has been renamed to `withScripting`. Additionally, `kicad/base.nix` no longer provides default argument values since these are provided by `kicad/default.nix`.
+
+- The socket for the `pdns-recursor` module was moved from `/var/lib/pdns-recursor` to `/run/pdns-recursor` to match upstream.
+
+- Paperwork was updated to version 2. The on-disk format slightly changed, and it is not possible to downgrade from Paperwork 2 back to Paperwork 1.3. Back your documents up before upgrading. See [this thread](https://forum.openpaper.work/t/paperwork-2-0/112/5) for more details.
+
+- PowerDNS has been updated from `4.2.x` to `4.3.x`. Please be sure to review the [Upgrade Notes](https://doc.powerdns.com/authoritative/upgrading.html#x-to-4-3-0) provided by upstream before upgrading. Worth specifically noting is that the service now runs entirely as a dedicated `pdns` user, instead of starting as `root` and dropping privileges, as well as the default `socket-dir` location changing from `/var/lib/powerdns` to `/run/pdns`.
+
+- The `mediatomb` service is now using by default the new and maintained fork `gerbera` package instead of the unmaintained `mediatomb` package. If you want to keep the old behavior, you must declare it with:
+
+  ```nix
+  {
+    services.mediatomb.package = pkgs.mediatomb;
+  }
+  ```
+
+  One new option `openFirewall` has been introduced which defaults to false. If you relied on the service declaration to add the firewall rules itself before, you should now declare it with:
+
+  ```nix
+  {
+    services.mediatomb.openFirewall = true;
+  }
+  ```
+
+- xfsprogs was update from 4.19 to 5.11. It now enables reflink support by default on filesystem creation. Support for reflinks was added with an experimental status to kernel 4.9 and deemed stable in kernel 4.16. If you want to be able to mount XFS filesystems created with this release of xfsprogs on kernel releases older than those, you need to format them with `mkfs.xfs -m reflink=0`.
+
+- The uWSGI server is now built with POSIX capabilities. As a consequence, root is no longer required in emperor mode and the service defaults to running as the unprivileged `uwsgi` user. Any additional capability can be added via the new option [services.uwsgi.capabilities](options.html#opt-services.uwsgi.capabilities). The previous behaviour can be restored by setting:
+
+  ```nix
+  {
+    services.uwsgi.user = "root";
+    services.uwsgi.group = "root";
+    services.uwsgi.instance =
+      {
+        uid = "uwsgi";
+        gid = "uwsgi";
+      };
+  }
+  ```
+
+  Another incompatibility from the previous release is that vassals running under a different user or group need to use `immediate-{uid,gid}` instead of the usual `uid,gid` options.
+
+- btc1 has been abandoned upstream, and removed.
+
+- cpp_ethereum (aleth) has been abandoned upstream, and removed.
+
+- riak-cs package removed along with `services.riak-cs` module.
+
+- stanchion package removed along with `services.stanchion` module.
+
+- mutt has been updated to a new major version (2.x), which comes with some backward incompatible changes that are described in the [release notes for Mutt 2.0](http://www.mutt.org/relnotes/2.0/).
+
+- `vim` and `neovim` switched to Python 3, dropping all Python 2 support.
+
+- [networking.wireguard.interfaces.\<name\>.generatePrivateKeyFile](options.html#opt-networking.wireguard.interfaces), which is off by default, had a `chmod` race condition fixed. As an aside, the parent directory's permissions were widened, and the key files were made owner-writable. This only affects newly created keys. However, if the exact permissions are important for your setup, read [\#121294](https://github.com/NixOS/nixpkgs/pull/121294).
+
+- [boot.zfs.forceImportAll](options.html#opt-boot.zfs.forceImportAll) previously did nothing, but has been fixed. However its default has been changed to `false` to preserve the existing default behaviour. If you have this explicitly set to `true`, please note that your non-root pools will now be forcibly imported.
+
+- openafs now points to openafs_1_8, which is the new stable release. OpenAFS 1.6 was removed.
+
+- The WireGuard module gained a new option `networking.wireguard.interfaces.<name>.peers.*.dynamicEndpointRefreshSeconds` that implements refreshing the IP of DNS-based endpoints periodically (which WireGuard itself [cannot do](https://lists.zx2c4.com/pipermail/wireguard/2017-November/002028.html)).
+
+- MariaDB has been updated to 10.5. Before you upgrade, it would be best to take a backup of your database and read [ Incompatible Changes Between 10.4 and 10.5](https://mariadb.com/kb/en/upgrading-from-mariadb-104-to-mariadb-105/#incompatible-changes-between-104-and-105). After the upgrade you will need to run `mysql_upgrade`.
+
+- The TokuDB storage engine dropped in mariadb 10.5 and removed in mariadb 10.6. It is recommended to switch to RocksDB. See also [TokuDB](https://mariadb.com/kb/en/tokudb/) and [MDEV-19780: Remove the TokuDB storage engine](https://jira.mariadb.org/browse/MDEV-19780).
+
+- The `openldap` module now has support for OLC-style configuration, users of the `configDir` option may wish to migrate. If you continue to use `configDir`, ensure that `olcPidFile` is set to `/run/slapd/slapd.pid`.
+
+  As a result, `extraConfig` and `extraDatabaseConfig` are removed. To help with migration, you can convert your `slapd.conf` file to OLC configuration with the following script (find the location of this configuration file by running `systemctl status openldap`, it is the `-f` option.
+
+  ```ShellSession
+  $ TMPDIR=$(mktemp -d)
+  $ slaptest -f /path/to/slapd.conf -F $TMPDIR
+  $ slapcat -F $TMPDIR -n0 -H 'ldap:///???(!(objectClass=olcSchemaConfig))'
+  ```
+
+  This will dump your current configuration in LDIF format, which should be straightforward to convert into Nix settings. This does not show your schema configuration, as this is unnecessarily verbose for users of the default schemas and `slaptest` is buggy with schemas directly in the config file.
+
+- Amazon EC2 and OpenStack Compute (nova) images now re-fetch instance meta data and user data from the instance metadata service (IMDS) on each boot. For example: stopping an EC2 instance, changing its user data, and restarting the instance will now cause it to fetch and apply the new user data.
+
+  ::: {.warning}
+  Specifically, `/etc/ec2-metadata` is re-populated on each boot. Some NixOS scripts that read from this directory are guarded to only run if the files they want to manipulate do not already exist, and so will not re-apply their changes if the IMDS response changes. Examples: `root`'s SSH key is only added if `/root/.ssh/authorized_keys` does not exist, and SSH host keys are only set from user data if they do not exist in `/etc/ssh`.
+  :::
+
+- The `rspamd` services is now sandboxed. It is run as a dynamic user instead of root, so secrets and other files may have to be moved or their permissions may have to be fixed. The sockets are now located in `/run/rspamd` instead of `/run`.
+
+- Enabling the Tor client no longer silently also enables and configures Privoxy, and the `services.tor.client.privoxy.enable` option has been removed. To enable Privoxy, and to configure it to use Tor's faster port, use the following configuration:
+
+  ```nix
+  {
+    opt-services.privoxy.enable = true;
+    opt-services.privoxy.enableTor = true;
+  }
+  ```
+
+- The `services.tor` module has a new exhaustively typed [services.tor.settings](options.html#opt-services.tor.settings) option following RFC 0042; backward compatibility with old options has been preserved when aliasing was possible. The corresponding systemd service has been hardened, but there is a chance that the service still requires more permissions, so please report any related trouble on the bugtracker. Onion services v3 are now supported in [services.tor.relay.onionServices](options.html#opt-services.tor.relay.onionServices). A new [services.tor.openFirewall](options.html#opt-services.tor.openFirewall) option as been introduced for allowing connections on all the TCP ports configured.
+
+- The options `services.slurm.dbdserver.storagePass` and `services.slurm.dbdserver.configFile` have been removed. Use `services.slurm.dbdserver.storagePassFile` instead to provide the database password. Extra config options can be given via the option `services.slurm.dbdserver.extraConfig`. The actual configuration file is created on the fly on startup of the service. This avoids that the password gets exposed in the nix store.
+
+- The `wafHook` hook does not wrap Python anymore. Packages depending on `wafHook` need to include any Python into their `nativeBuildInputs`.
+
+- Starting with version 1.7.0, the project formerly named `CodiMD` is now named `HedgeDoc`. New installations will no longer use the old name for users, state directories and such, this needs to be considered when moving state to a more recent NixOS installation. Based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will continue to work.
+
+- The fish-foreign-env package has been replaced with fishPlugins.foreign-env, in which the fish functions have been relocated to the `vendor_functions.d` directory to be loaded automatically.
+
+- The prometheus json exporter is now managed by the prometheus community. Together with additional features some backwards incompatibilities were introduced. Most importantly the exporter no longer accepts a fixed command-line parameter to specify the URL of the endpoint serving JSON. It now expects this URL to be passed as an URL parameter, when scraping the exporter's `/probe` endpoint. In the prometheus scrape configuration the scrape target might look like this:
+
+  ```
+  http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/endpoint
+  ```
+
+  Existing configuration for the exporter needs to be updated, but can partially be re-used. Documentation is available in the upstream repository and a small example for NixOS is available in the corresponding NixOS test.
+
+  These changes also affect [services.prometheus.exporters.rspamd.enable](options.html#opt-services.prometheus.exporters.rspamd.enable), which is just a preconfigured instance of the json exporter.
+
+  For more information, take a look at the [ official documentation](https://github.com/prometheus-community/json_exporter) of the json_exporter.
+
+- Androidenv was updated, removing the `includeDocs` and `lldbVersions` arguments. Docs only covered a single version of the Android SDK, LLDB is now bundled with the NDK, and both are no longer available to download from the Android package repositories. Additionally, since the package lists have been updated, some older versions of Android packages may not be bundled. If you depend on older versions of Android packages, we recommend overriding the repo.
+
+  Android packages are now loaded from a repo.json file created by parsing Android repo XML files. The arguments `repoJson` and `repoXmls` have been added to allow overriding the built-in androidenv repo.json with your own. Additionally, license files are now written to allow compatibility with Gradle-based tools, and the `extraLicenses` argument has been added to accept more SDK licenses if your project requires it. See the androidenv documentation for more details.
+
+- The attribute `mpi` is now consistently used to provide a default, system-wide MPI implementation. The default implementation is openmpi, which has been used before by all derivations affects by this change. Note that all packages that have used `mpi ? null` in the input for optional MPI builds, have been changed to the boolean input parameter `useMpi` to enable building with MPI. Building all packages with `mpich` instead of the default `openmpi` can now be achieved like this:
+
+  ```nix
+  self: super:
+  {
+    mpi = super.mpich;
+  }
+  ```
+
+- The Searx module has been updated with the ability to configure the service declaratively and uWSGI integration. The option `services.searx.configFile` has been renamed to [services.searx.settingsFile](options.html#opt-services.searx.settingsFile) for consistency with the new [services.searx.settings](options.html#opt-services.searx.settings). In addition, the `searx` uid and gid reservations have been removed since they were not necessary: the service is now running with a dynamically allocated uid.
+
+- The libinput module has been updated with the ability to configure mouse and touchpad settings separately. The options in `services.xserver.libinput` have been renamed to `services.xserver.libinput.touchpad`, while there is a new `services.xserver.libinput.mouse` for mouse related configuration.
+
+  Since touchpad options no longer apply to all devices, you may want to replicate your touchpad configuration in mouse section.
+
+- ALSA OSS emulation (`sound.enableOSSEmulation`) is now disabled by default.
+
+- Thinkfan as been updated to `1.2.x`, which comes with a new YAML based configuration format. For this reason, several NixOS options of the thinkfan module have been changed to non-backward compatible types. In addition, a new [services.thinkfan.settings](options.html#opt-services.thinkfan.settings) option has been added.
+
+  Please read the [ thinkfan documentation](https://github.com/vmatare/thinkfan#readme) before updating.
+
+- Adobe Flash Player support has been dropped from the tree. In particular, the following packages no longer support it:
+
+  - chromium
+
+  - firefox
+
+  - qt48
+
+  - qt5.qtwebkit
+
+  Additionally, packages flashplayer and hal-flash were removed along with the `services.flashpolicyd` module.
+
+- The `security.rngd` module has been removed. It was disabled by default in 20.09 as it was functionally redundant with krngd in the linux kernel. It is not necessary for any device that the kernel recognises as an hardware RNG, as it will automatically run the krngd task to periodically collect random data from the device and mix it into the kernel's RNG.
+
+  The default SMTP port for GitLab has been changed to `25` from its previous default of `465`. If you depended on this default, you should now set the [services.gitlab.smtp.port](options.html#opt-services.gitlab.smtp.port) option.
+
+- The default version of ImageMagick has been updated from 6 to 7. You can use imagemagick6, imagemagick6_light, and imagemagick6Big if you need the older version.
+
+- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) no longer uses the deprecated `cirrus` and `vesa` device dependent X drivers by default. It also enables both `amdgpu` and `nouveau` drivers by default now.
+
+- The `kindlegen` package is gone, because it is no longer supported or hosted by Amazon. Sadly, its replacement, Kindle Previewer, has no Linux support. However, there are other ways to generate MOBI files. See [the discussion](https://github.com/NixOS/nixpkgs/issues/96439) for more info.
+
+- The apacheKafka packages are now built with version-matched JREs. Versions 2.6 and above, the ones that recommend it, use jdk11, while versions below remain on jdk8. The NixOS service has been adjusted to start the service using the same version as the package, adjustable with the new [services.apache-kafka.jre](options.html#opt-services.apache-kafka.jre) option. Furthermore, the default list of [services.apache-kafka.jvmOptions](options.html#opt-services.apache-kafka.jvmOptions) have been removed. You should set your own according to the [upstream documentation](https://kafka.apache.org/documentation/#java) for your Kafka version.
+
+- The kodi package has been modified to allow concise addon management. Consider the following configuration from previous releases of NixOS to install kodi, including the kodiPackages.inputstream-adaptive and kodiPackages.vfs-sftp addons:
+
+  ```nix
+  {
+    environment.systemPackages = [
+      pkgs.kodi
+    ];
+
+    nixpkgs.config.kodi = {
+      enableInputStreamAdaptive = true;
+      enableVFSSFTP = true;
+    };
+  }
+  ```
+
+  All Kodi `config` flags have been removed, and as a result the above configuration should now be written as:
+
+  ```nix
+  {
+    environment.systemPackages = [
+      (pkgs.kodi.withPackages (p: with p; [
+        inputstream-adaptive
+        vfs-sftp
+      ]))
+    ];
+  }
+  ```
+
+- `environment.defaultPackages` now includes the nano package. If pkgs.nano is not added to the list, make sure another editor is installed and the `EDITOR` environment variable is set to it. Environment variables can be set using `environment.variables`.
+
+- `services.minio.dataDir` changed type to a list of paths, required for specifying multiple data directories for using with erasure coding. Currently, the service doesn't enforce nor checks the correct number of paths to correspond to minio requirements.
+
+- All CUDA toolkit versions prior to CUDA 10 have been removed.
+
+- The kbdKeymaps package was removed since dvp and neo are now included in kbd. If you want to use the Programmer Dvorak Keyboard Layout, you have to use `dvorak-programmer` in `console.keyMap` now instead of `dvp`. In `services.xserver.xkbVariant` it's still `dvp`.
+
+- The babeld service is now being run as an unprivileged user. To achieve that the module configures `skip-kernel-setup true` and takes care of setting forwarding and rp_filter sysctls by itself as well as for each interface in `services.babeld.interfaces`.
+
+- The `services.zigbee2mqtt.config` option has been renamed to `services.zigbee2mqtt.settings` and now follows [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md).
+
+ The yadm dotfile manager has been updated from 2.x to 3.x, which has new (XDG) default locations for some data/state files. Most yadm commands will fail and print a legacy path warning (which describes how to upgrade/migrate your repository). If you have scripts, daemons, scheduled jobs, shell profiles, etc. that invoke yadm, expect them to fail or misbehave until you perform this migration and prepare accordingly.
+
+- Instead of determining `services.radicale.package` automatically based on `system.stateVersion`, the latest version is always used because old versions are not officially supported.
+
+  Furthermore, Radicale's systemd unit was hardened which might break some deployments. In particular, a non-default `filesystem_folder` has to be added to `systemd.services.radicale.serviceConfig.ReadWritePaths` if the deprecated `services.radicale.config` is used.
+
+- In the `security.acme` module, use of `--reuse-key` parameter for Lego has been removed. It was introduced for HKPK, but this security feature is now deprecated. It is a better security practice to rotate key pairs instead of always keeping the same. If you need to keep this parameter, you can add it back using `extraLegoRenewFlags` as an option for the appropriate certificate.
+
+## Other Notable Changes {#sec-release-21.05-notable-changes}
+
+- `stdenv.lib` has been deprecated and will break eval in 21.11. Please use `pkgs.lib` instead. See [\#108938](https://github.com/NixOS/nixpkgs/issues/108938) for details.
+
+- [GNURadio](https://www.gnuradio.org/) has a `pkgs` attribute set, and there's a `gnuradio.callPackage` function that extends `pkgs` with a `mkDerivation`, and a `mkDerivationWith`, like Qt5. Now all `gnuradio.pkgs` are defined with `gnuradio.callPackage` and some packages that depend on gnuradio are defined with this as well.
+
+- [Privoxy](https://www.privoxy.org/) has been updated to version 3.0.32 (See [announcement](https://lists.privoxy.org/pipermail/privoxy-announce/2021-February/000007.html)). Compared to the previous release, Privoxy has gained support for HTTPS inspection (still experimental), Brotli decompression, several new filters and lots of bug fixes, including security ones. In addition, the package is now built with compression and external filters support, which were previously disabled.
+
+  Regarding the NixOS module, new options for HTTPS inspection have been added and `services.privoxy.extraConfig` has been replaced by the new [services.privoxy.settings](options.html#opt-services.privoxy.settings) (See [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) for the motivation).
+
+- [Kodi](https://kodi.tv/) has been updated to version 19.1 "Matrix". See the [announcement](https://kodi.tv/article/kodi-19-0-matrix-release) for further details.
+
+- The `services.packagekit.backend` option has been removed as it only supported a single setting which would always be the default. Instead new [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant [services.packagekit.settings](options.html#opt-services.packagekit.settings) and [services.packagekit.vendorSettings](options.html#opt-services.packagekit.vendorSettings) options have been introduced.
+
+- [Nginx](https://nginx.org) has been updated to stable version 1.20.0. Now nginx uses the zlib-ng library by default.
+
+- KDE Gear (formerly KDE Applications) is upgraded to 21.04, see its [release notes](https://kde.org/announcements/gear/21.04/) for details.
+
+  The `kdeApplications` package set is now `kdeGear`, in keeping with the new name. The old name remains for compatibility, but it is deprecated.
+
+- [Libreswan](https://libreswan.org/) has been updated to version 4.4. The package now includes example configurations and manual pages by default. The NixOS module has been changed to use the upstream systemd units and write the configuration in the `/etc/ipsec.d/ ` directory. In addition, two new options have been added to specify connection policies ([services.libreswan.policies](options.html#opt-services.libreswan.policies)) and disable send/receive redirects ([services.libreswan.disableRedirects](options.html#opt-services.libreswan.disableRedirects)).
+
+- The Mailman NixOS module (`services.mailman`) has a new option [services.mailman.enablePostfix](options.html#opt-services.mailman.enablePostfix), defaulting to true, that controls integration with Postfix.
+
+  If this option is disabled, default MTA config becomes not set and you should set the options in `services.mailman.settings.mta` according to the desired configuration as described in [Mailman documentation](https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html).
+
+- The default-version of `nextcloud` is nextcloud21. Please note that it's _not_ possible to upgrade `nextcloud` across multiple major versions! This means that it's e.g. not possible to upgrade from nextcloud18 to nextcloud20 in a single deploy and most `20.09` users will have to upgrade to nextcloud20 first.
+
+  The package can be manually upgraded by setting [services.nextcloud.package](options.html#opt-services.nextcloud.package) to nextcloud21.
+
+- The setting [services.redis.bind](options.html#opt-services.redis.bind) defaults to `127.0.0.1` now, making Redis listen on the loopback interface only, and not all public network interfaces.
+
+- NixOS now emits a deprecation warning if systemd's `StartLimitInterval` setting is used in a `serviceConfig` section instead of in a `unitConfig`; that setting is deprecated and now undocumented for the service section by systemd upstream, but still effective and somewhat buggy there, which can be confusing. See [\#45785](https://github.com/NixOS/nixpkgs/issues/45785) for details.
+
+  All services should use [systemd.services._name_.startLimitIntervalSec](options.html#opt-systemd.services._name_.startLimitIntervalSec) or `StartLimitIntervalSec` in [systemd.services._name_.unitConfig](options.html#opt-systemd.services._name_.unitConfig) instead.
+
+- The `mediatomb` service declares new options. It also adapts existing options so the configuration generation is now lazy. The existing option `customCfg` (defaults to false), when enabled, stops the service configuration generation completely. It then expects the users to provide their own correct configuration at the right location (whereas the configuration was generated and not used at all before). The new option `transcodingOption` (defaults to no) allows a generated configuration. It makes the mediatomb service pulls the necessary runtime dependencies in the nix store (whereas it was generated with hardcoded values before). The new option `mediaDirectories` allows the users to declare autoscan media directories from their nixos configuration:
+
+  ```nix
+  {
+    services.mediatomb.mediaDirectories = [
+      { path = "/var/lib/mediatomb/pictures"; recursive = false; hidden-files = false; }
+      { path = "/var/lib/mediatomb/audio"; recursive = true; hidden-files = false; }
+    ];
+  }
+  ```
+
+- The Unbound DNS resolver service (`services.unbound`) has been refactored to allow reloading, control sockets and to fix startup ordering issues.
+
+  It is now possible to enable a local UNIX control socket for unbound by setting the [services.unbound.localControlSocketPath](options.html#opt-services.unbound.localControlSocketPath) option.
+
+  Previously we just applied a very minimal set of restrictions and trusted unbound to properly drop root privs and capabilities.
+
+  As of this we are (for the most part) just using the upstream example unit file for unbound. The main difference is that we start unbound as `unbound` user with the required capabilities instead of letting unbound do the chroot & uid/gid changes.
+
+  The upstream unit configuration this is based on is a lot stricter with all kinds of permissions then our previous variant. It also came with the default of having the `Type` set to `notify`, therefore we are now also using the `unbound-with-systemd` package here. Unbound will start up, read the configuration files and start listening on the configured ports before systemd will declare the unit `active (running)`. This will likely help with startup order and the occasional race condition during system activation where the DNS service is started but not yet ready to answer queries. Services depending on `nss-lookup.target` or `unbound.service` are now be able to use unbound when those targets have been reached.
+
+  Additionally to the much stricter runtime environment the `/dev/urandom` mount lines we previously had in the code (that randomly failed during the stop-phase) have been removed as systemd will take care of those for us.
+
+  The `preStart` script is now only required if we enabled the trust anchor updates (which are still enabled by default).
+
+  Another benefit of the refactoring is that we can now issue reloads via either `pkill -HUP unbound` and `systemctl reload unbound` to reload the running configuration without taking the daemon offline. A prerequisite of this was that unbound configuration is available on a well known path on the file system. We are using the path `/etc/unbound/unbound.conf` as that is the default in the CLI tooling which in turn enables us to use `unbound-control` without passing a custom configuration location.
+
+  The module has also been reworked to be [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) compliant. As such, `services.unbound.extraConfig` has been removed and replaced by [services.unbound.settings](options.html#opt-services.unbound.settings). `services.unbound.interfaces` has been renamed to `services.unbound.settings.server.interface`.
+
+  `services.unbound.forwardAddresses` and `services.unbound.allowedAccess` have also been changed to use the new settings interface. You can follow the instructions when executing `nixos-rebuild` to upgrade your configuration to use the new interface.
+
+- The `services.dnscrypt-proxy2` module now takes the upstream's example configuration and updates it with the user's settings. An option has been added to restore the old behaviour if you prefer to declare the configuration from scratch.
+
+- NixOS now defaults to the unified cgroup hierarchy (cgroupsv2). See the [Fedora Article for 31](https://www.redhat.com/sysadmin/fedora-31-control-group-v2) for details on why this is desirable, and how it impacts containers.
+
+  If you want to run containers with a runtime that does not yet support cgroupsv2, you can switch back to the old behaviour by setting [systemd.enableUnifiedCgroupHierarchy](options.html#opt-systemd.enableUnifiedCgroupHierarchy) = `false`; and rebooting.
+
+- PulseAudio was upgraded to 14.0, with changes to the handling of default sinks. See its [release notes](https://www.freedesktop.org/wiki/Software/PulseAudio/Notes/14.0/).
+
+- GNOME users may wish to delete their `~/.config/pulse` due to the changes to stream routing logic. See [PulseAudio bug 832](https://gitlab.freedesktop.org/pulseaudio/pulseaudio/-/issues/832) for more information.
+
+- The zookeeper package does not provide `zooInspector.sh` anymore, as that "contrib" has been dropped from upstream releases.
+
+- In the ACME module, the data used to build the hash for the account directory has changed to accommodate new features to reduce account rate limit issues. This will trigger new account creation on the first rebuild following this update. No issues are expected to arise from this, thanks to the new account creation handling.
+
+- [users.users._name_.createHome](options.html#opt-users.users._name_.createHome) now always ensures home directory permissions to be `0700`. Permissions had previously been ignored for already existing home directories, possibly leaving them readable by others. The option's description was incorrect regarding ownership management and has been simplified greatly.
+
+- When defining a new user, one of [users.users._name_.isNormalUser](options.html#opt-users.users._name_.isNormalUser) and [users.users._name_.isSystemUser](options.html#opt-users.users._name_.isSystemUser) is now required. This is to prevent accidentally giving a UID above 1000 to system users, which could have unexpected consequences, like running user activation scripts for system users. Note that users defined with an explicit UID below 500 are exempted from this check, as [users.users._name_.isSystemUser](options.html#opt-users.users._name_.isSystemUser) has no effect for those.
+
+- The `security.apparmor` module, for the [AppArmor](https://gitlab.com/apparmor/apparmor/-/wikis/Documentation) Mandatory Access Control system, has been substantially improved along with related tools, so that module maintainers can now more easily write AppArmor profiles for NixOS. The most notable change on the user-side is the new option [security.apparmor.policies](options.html#opt-security.apparmor.policies), replacing the previous `profiles` option to provide a way to disable a profile and to select whether to confine in enforce mode (default) or in complain mode (see `journalctl -b --grep apparmor`). Security-minded users may also want to enable [security.apparmor.killUnconfinedConfinables](options.html#opt-security.apparmor.killUnconfinedConfinables), at the cost of having some of their processes killed when updating to a NixOS version introducing new AppArmor profiles.
+
+- The GNOME desktop manager once again installs gnome.epiphany by default.
+
+- NixOS now generates empty `/etc/netgroup`. `/etc/netgroup` defines network-wide groups and may affect to setups using NIS.
+
+- Platforms, like `stdenv.hostPlatform`, no longer have a `platform` attribute. It has been (mostly) flattened away:
+
+  - `platform.gcc` is now `gcc`
+
+  - `platform.kernel*` is now `linux-kernel.*`
+
+  Additionally, `platform.kernelArch` moved to the top level as `linuxArch` to match the other `*Arch` variables.
+
+  The `platform` grouping of these things never meant anything, and was just a historial/implementation artifact that was overdue removal.
+
+- `services.restic` now uses a dedicated cache directory for every backup defined in `services.restic.backups`. The old global cache directory, `/root/.cache/restic`, is now unused and can be removed to free up disk space.
+
+- `isync`: The `isync` compatibility wrapper was removed and the Master/Slave terminology has been deprecated and should be replaced with Far/Near in the configuration file.
+
+- The nix-gc service now accepts randomizedDelaySec (default: 0) and persistent (default: true) parameters. By default nix-gc will now run immediately if it would have been triggered at least once during the time when the timer was inactive.
+
+- The `rustPlatform.buildRustPackage` function is split into several hooks: cargoSetupHook to set up vendoring for Cargo-based projects, cargoBuildHook to build a project using Cargo, cargoInstallHook to install a project using Cargo, and cargoCheckHook to run tests in Cargo-based projects. With this change, mixed-language projects can use the relevant hooks within builders other than `buildRustPackage`. However, these changes also required several API changes to `buildRustPackage` itself:
+
+  - The `target` argument was removed. Instead, `buildRustPackage` will always use the same target as the C/C++ compiler that is used.
+
+  - The `cargoParallelTestThreads` argument was removed. Parallel tests are now disabled through `dontUseCargoParallelTests`.
+
+- The `rustPlatform.maturinBuildHook` hook was added. This hook can be used with `buildPythonPackage` to build Python packages that are written in Rust and use Maturin as their build tool.
+
+- Kubernetes has [deprecated docker](https://kubernetes.io/blog/2020/12/02/dont-panic-kubernetes-and-docker/) as container runtime. As a consequence, the Kubernetes module now has support for configuration of custom remote container runtimes and enables containerd by default. Note that containerd is more strict regarding container image OCI-compliance. As an example, images with CMD or ENTRYPOINT defined as strings (not lists) will fail on containerd, while working fine on docker. Please test your setup and container images with containerd prior to upgrading.
+
+- The GitLab module now has support for automatic backups. A schedule can be set with the [services.gitlab.backup.startAt](options.html#opt-services.gitlab.backup.startAt) option.
+
+- Prior to this release, systemd would also read system units from an undocumented `/etc/systemd-mutable/system` path. This path has been dropped from the defaults. That path (or others) can be re-enabled by adding it to the [boot.extraSystemdUnitPaths](options.html#opt-boot.extraSystemdUnitPaths) list.
+
+- PostgreSQL 9.5 is scheduled EOL during the 21.05 life cycle and has been removed.
+
+- [Xfce4](https://www.xfce.org/) relies on GIO/GVfs for userspace virtual filesystem access in applications like [thunar](https://docs.xfce.org/xfce/thunar/) and [gigolo](https://docs.xfce.org/apps/gigolo/). For that to work, the gvfs nixos service is enabled by default, and it can be configured with the specific package that provides GVfs. Until now Xfce4 was setting it to use a lighter version of GVfs (without support for samba). To avoid conflicts with other desktop environments this setting has been dropped. Users that still want it should add the following to their system configuration:
+
+  ```nix
+  {
+    services.gvfs.package = pkgs.gvfs.override { samba = null; };
+  }
+  ```
+
+- The newly enabled `systemd-pstore.service` now automatically evacuates crashdumps and panic logs from the persistent storage to `/var/lib/systemd/pstore`. This prevents NVRAM from filling up, which ensures the latest diagnostic data is always stored and alleviates problems with writing new boot configurations.
+
+- Nixpkgs now contains [automatically packaged GNOME Shell extensions](https://github.com/NixOS/nixpkgs/pull/118232) from the [GNOME Extensions](https://extensions.gnome.org/) portal. You can find them, filed by their UUID, under `gnome38Extensions` attribute for GNOME 3.38 and under `gnome40Extensions` for GNOME 40. Finally, the `gnomeExtensions` attribute contains extensions for the latest GNOME Shell version in Nixpkgs, listed under a more human-friendly name. The unqualified attribute scope also contains manually packaged extensions. Note that the automatically packaged extensions are provided for convenience and are not checked or guaranteed to work.
+
+- Erlang/OTP versions older than R21 got dropped. We also dropped the cuter package, as it was purely an example of how to build a package. We also dropped `lfe_1_2` as it could not build with R21+. Moving forward, we expect to only support 3 yearly releases of OTP.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2111.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2111.section.md
new file mode 100644
index 000000000000..400eb1062d9a
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2111.section.md
@@ -0,0 +1,583 @@
+# Release 21.11 (“Porcupineâ€, 2021/11/30) {#sec-release-21.11}
+
+- Support is planned until the end of June 2022, handing over to 22.05.
+
+## Highlights {#sec-release-21.11-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Nix has been updated to version 2.4, reference its [release notes](https://discourse.nixos.org/t/nix-2-4-released/15822) for more information on what has changed. The previous version of Nix, 2.3.16, remains available for the time being in the `nix_2_3` package.
+
+- `iptables` is now using `nf_tables` under the hood, by using `iptables-nft`,
+  similar to [Debian](https://wiki.debian.org/nftables#Current_status) and
+  [Fedora](https://fedoraproject.org/wiki/Changes/iptables-nft-default).
+  This means, `ip[6]tables`, `arptables` and `ebtables` commands  will actually
+  show rules from some specific tables in the `nf_tables` kernel subsystem.
+  In case you're migrating from an older release without rebooting, there might
+  be cases where you end up with iptable rules configured both in the legacy
+  `iptables` kernel backend, as well as in the `nf_tables` backend.
+  This can lead to confusing firewall behaviour. An `iptables-save` after
+  switching will complain about "iptables-legacy tables present".
+  It's probably best to reboot after the upgrade, or manually removing all
+  legacy iptables rules (via the `iptables-legacy` package).
+
+- systemd got an `nftables` backend, and configures (networkd) rules in their
+  own `io.systemd.*` tables. Check `nft list ruleset` to see these rules, not
+  `iptables-save` (which only shows `iptables`-created rules.
+
+- PHP now defaults to PHP 8.0, updated from 7.4.
+
+- kops now defaults to 1.21.1, which uses containerd as the default runtime.
+
+- `python3` now defaults to Python 3.9, updated from Python 3.8.
+
+- PostgreSQL now defaults to major version 13.
+
+- spark now defaults to spark 3, updated from 2. A [migration guide](https://spark.apache.org/docs/latest/core-migration-guide.html#upgrading-from-core-24-to-30) is available.
+
+- Improvements have been made to the Hadoop module and package:
+  - HDFS and YARN now support production-ready highly available deployments with automatic failover.
+  - Hadoop now defaults to Hadoop 3, updated from 2.
+  - JournalNode, ZKFS and HTTPFS services have been added.
+
+- Activation scripts can now, optionally, be run during a `nixos-rebuild dry-activate` and can detect the dry activation by reading `$NIXOS_ACTION`.
+  This allows activation scripts to output what they would change if the activation was really run.
+  The users/modules activation script supports this and outputs some of is actions.
+
+- KDE Plasma now finally works on Wayland.
+
+- bash now defaults to major version 5.
+
+- Systemd was updated to version 249 (from 247).
+
+- Pantheon desktop has been updated to version 6. Due to changes of screen locker, if locking doesn't work for you, please try `gsettings set org.gnome.desktop.lockdown disable-lock-screen false`.
+
+- `kubernetes-helm` now defaults to 3.7.0, which introduced some breaking changes to the experimental OCI manifest format. See [HIP 6](https://github.com/helm/community/blob/main/hips/hip-0006.md) for more details.
+  `helmfile` also defaults to 0.141.0, which is the minimum compatible version.
+
+- GNOME has been upgraded to 41. Please take a look at their [Release Notes](https://help.gnome.org/misc/release-notes/41.0/) for details.
+
+- LXD support was greatly improved:
+  - building LXD images from configurations is now directly possible with just nixpkgs
+  - hydra is now building nixOS LXD images that can be used standalone with full nixos-rebuild support
+
+- OpenSSH was updated to version 8.8p1
+  - This breaks connections to old SSH daemons as ssh-rsa host keys and ssh-rsa public keys that were signed with SHA-1 are disabled by default now
+  - These can be re-enabled, see the [OpenSSH changelog](https://www.openssh.com/txt/release-8.8) for details
+
+- ORY Kratos was updated to version 0.8.0-alpha.3
+  - This release requires you to run SQL migrations. Please, as always, create a backup of your database first!
+  - The SDKs are now generated with tag v0alpha2 to reflect that some signatures have changed in a breaking fashion. Please update your imports from v0alpha1 to v0alpha2.
+  - The SMTPS scheme used in courier config URL with cleartext/StartTLS/TLS SMTP connection types is now only supporting implicit TLS. For StartTLS and cleartext SMTP, please use the SMTP scheme instead.
+  - for more details, see [Release Notes](https://github.com/ory/kratos/releases/tag/v0.8.0-alpha.1).
+
+## New Services {#sec-release-21.11-new-services}
+
+- [btrbk](https://digint.ch/btrbk/index.html), a backup tool for btrfs subvolumes, taking advantage of btrfs specific capabilities to create atomic snapshots and transfer them incrementally to your backup locations. Available as [services.btrbk](options.html#opt-services.brtbk.instances).
+
+- [clipcat](https://github.com/xrelkd/clipcat/), an X11 clipboard manager written in Rust. Available at [services.clipcat](options.html#opt-services.clipcat.enable).
+
+- [dex](https://github.com/dexidp/dex), an OpenID Connect (OIDC) identity and OAuth 2.0 provider. Available at [services.dex](options.html#opt-services.dex.enable).
+
+- [geoipupdate](https://github.com/maxmind/geoipupdate), a GeoIP database updater from MaxMind. Available as [services.geoipupdate](options.html#opt-services.geoipupdate.enable).
+
+- [Jibri](https://github.com/jitsi/jibri), a service for recording or streaming a Jitsi Meet conference. Available as [services.jibri](options.html#opt-services.jibri.enable).
+
+- [Kea](https://www.isc.org/kea/), ISCs 2nd generation DHCP and DDNS server suite. Available at [services.kea](options.html#opt-services.kea.dhcp4).
+
+- [owncast](https://owncast.online/), self-hosted video live streaming solution. Available at [services.owncast](options.html#opt-services.owncast.enable).
+
+- [PeerTube](https://joinpeertube.org/), developed by Framasoft, is the free and decentralized alternative to video platforms. Available at [services.peertube](options.html#opt-services.peertube.enable).
+
+- [sourcehut](https://sr.ht), a collection of tools useful for software development. Available as [services.sourcehut](options.html#opt-services.sourcehut.enable).
+
+- [ucarp](https://download.pureftpd.org/pub/ucarp/README), an userspace implementation of the Common Address Redundancy Protocol (CARP). Available as [networking.ucarp](options.html#opt-networking.ucarp.enable).
+
+- Users of flashrom should migrate to [programs.flashrom.enable](options.html#opt-programs.flashrom.enable) and add themselves to the `flashrom` group to be able to access programmers supported by flashrom.
+
+- [vikunja](https://vikunja.io), a to-do list app. Available as [services.vikunja](#opt-services.vikunja.enable).
+
+- [opensnitch](https://github.com/evilsocket/opensnitch), an application firewall. Available as [services.opensnitch](#opt-services.opensnitch.enable).
+
+- [snapraid](https://www.snapraid.it/), a backup program for disk arrays.
+  Available as [snapraid](#opt-snapraid.enable).
+
+- [Hockeypuck](https://github.com/hockeypuck/hockeypuck), a OpenPGP Key Server. Available as [services.hockeypuck](#opt-services.hockeypuck.enable).
+
+- [buildkite-agent-metrics](https://github.com/buildkite/buildkite-agent-metrics), a command-line tool for collecting Buildkite agent metrics, now has a Prometheus exporter available as [services.prometheus.exporters.buildkite-agent](#opt-services.prometheus.exporters.buildkite-agent.enable).
+
+- [influxdb-exporter](https://github.com/prometheus/influxdb_exporter) a Prometheus exporter that exports metrics received on an InfluxDB compatible endpoint is now available as [services.prometheus.exporters.influxdb](#opt-services.prometheus.exporters.influxdb.enable).
+
+- [mx-puppet-discord](https://github.com/matrix-discord/mx-puppet-discord), a discord puppeting bridge for matrix. Available as [services.mx-puppet-discord](#opt-services.mx-puppet-discord.enable).
+
+- [MeshCentral](https://www.meshcommander.com/meshcentral2/overview), a remote administration service ("TeamViewer but self-hosted and with more features") is now available with a package and a module: [services.meshcentral.enable](#opt-services.meshcentral.enable)
+
+- [moonraker](https://github.com/Arksine/moonraker), an API web server for Klipper.
+  Available as [moonraker](#opt-services.moonraker.enable).
+
+- [influxdb2](https://github.com/influxdata/influxdb), a Scalable datastore for metrics, events, and real-time analytics. Available as [services.influxdb2](#opt-services.influxdb2.enable).
+
+- [isso](https://posativ.org/isso/), a commenting server similar to Disqus.
+  Available as [isso](#opt-services.isso.enable)
+
+- [navidrome](https://www.navidrome.org/), a personal music streaming server with
+  subsonic-compatible api. Available as [navidrome](#opt-services.navidrome.enable).
+
+- [fluidd](https://docs.fluidd.xyz/), a Klipper web interface for managing 3d printers using moonraker. Available as [fluidd](#opt-services.fluidd.enable).
+
+- [sx](https://github.com/earnestly/sx), a simple alternative to both xinit and startx for starting a Xorg server. Available as [services.xserver.displayManager.sx](#opt-services.xserver.displayManager.sx.enable)
+
+- [postfixadmin](https://postfixadmin.sourceforge.io/), a web based virtual user administration interface for Postfix mail servers. Available as [postfixadmin](#opt-services.postfixadmin.enable).
+
+- [prowlarr](https://wiki.servarr.com/prowlarr), an indexer manager/proxy built on the popular arr .net/reactjs base stack [services.prowlarr](#opt-services.prowlarr.enable).
+
+- [soju](https://sr.ht/~emersion/soju), a user-friendly IRC bouncer. Available as [services.soju](options.html#opt-services.soju.enable).
+
+- [nats](https://nats.io/), a high performance cloud and edge messaging system. Available as [services.nats](#opt-services.nats.enable).
+
+- [git](https://git-scm.com), a distributed version control system. Available as [programs.git](options.html#opt-programs.git.enable).
+
+- [parsedmarc](https://domainaware.github.io/parsedmarc/), a service
+  which parses incoming [DMARC](https://dmarc.org/) reports and stores
+  or sends them to a downstream service for further analysis.
+  Documented in [its manual entry](#module-services-parsedmarc).
+
+- [spark](https://spark.apache.org/), a unified analytics engine for large-scale data processing.
+
+- [touchegg](https://github.com/JoseExposito/touchegg), a multi-touch gesture recognizer. Available as [services.touchegg](#opt-services.touchegg.enable).
+
+- [pantheon-tweaks](https://github.com/pantheon-tweaks/pantheon-tweaks), an unofficial system settings panel for Pantheon. Available as [programs.pantheon-tweaks](#opt-programs.pantheon-tweaks.enable).
+
+- [joycond](https://github.com/DanielOgorchock/joycond), a service that uses `hid-nintendo` to provide nintendo joycond pairing and better nintendo switch pro controller support.
+
+- [multipath](https://github.com/opensvc/multipath-tools), the device mapper multipath (DM-MP) daemon. Available as [services.multipath](#opt-services.multipath.enable).
+
+- [seafile](https://www.seafile.com/en/home/), an open source file syncing & sharing software. Available as [services.seafile](options.html#opt-services.seafile.enable).
+
+- [rasdaemon](https://github.com/mchehab/rasdaemon), a hardware error logging daemon. Available as [hardware.rasdaemon](#opt-hardware.rasdaemon.enable).
+
+- `code-server`-module now available
+
+- [xmrig](https://github.com/xmrig/xmrig), a high performance, open source, cross platform RandomX, KawPow, CryptoNight and AstroBWT unified CPU/GPU miner and RandomX benchmark.
+
+- Auto nice daemons [ananicy](https://github.com/Nefelim4ag/Ananicy) and [ananicy-cpp](https://gitlab.com/ananicy-cpp/ananicy-cpp/). Available as [services.ananicy](#opt-services.ananicy.enable).
+
+- [smartctl_exporter](https://github.com/prometheus-community/smartctl_exporter), a Prometheus exporter for [S.M.A.R.T.](https://en.wikipedia.org/wiki/S.M.A.R.T.) data. Available as [services.prometheus.exporters.smartctl](options.html#opt-services.prometheus.exporters.smartctl.enable).
+
+- [twingate](https://docs.twingate.com/docs/linux), a high performance, easy to use zero trust solution that enables access to private resources from any device with better security than a VPN.
+
+## Backward Incompatibilities {#sec-release-21.11-incompatibilities}
+
+- The NixOS VM test framework, `pkgs.nixosTest`/`make-test-python.nix` (`pkgs.testers.nixosTest` since 22.05), now requires detaching commands such as `succeed("foo &")` and `succeed("foo | xclip -i")` to close stdout.
+  This can be done with a redirect such as `succeed("foo >&2 &")`. This breaking change was necessitated by a race condition causing tests to fail or hang.
+  It applies to all methods that invoke commands on the nodes, including `execute`, `succeed`, `fail`, `wait_until_succeeds`, `wait_until_fails`.
+
+- The `services.wakeonlan` option was removed, and replaced with `networking.interfaces.<name>.wakeOnLan`.
+
+- The `security.wrappers` option now requires to always specify an owner, group and whether the setuid/setgid bit should be set.
+  This is motivated by the fact that before NixOS 21.11, specifying either setuid or setgid but not owner/group resulted in wrappers owned by nobody/nogroup, which is unsafe.
+
+- Since `iptables` now uses `nf_tables` backend and `ipset` doesn't support it, some applications (ferm, shorewall, firehol) may have limited functionality.
+
+- The `paperless` module and package have been removed. All users should migrate to the
+  successor `paperless-ng` instead. The Paperless project [has been
+  archived](https://github.com/the-paperless-project/paperless/commit/9b0063c9731f7c5f65b1852cb8caff97f5e40ba4)
+  and advises all users to use `paperless-ng` instead.
+
+  Users can use the `services.paperless-ng` module as a replacement while noting the following incompatibilities:
+
+  - `services.paperless.ocrLanguages` has no replacement. Users should migrate to [`services.paperless-ng.extraConfig`](options.html#opt-services.paperless-ng.extraConfig) instead:
+
+  ```nix
+  {
+    services.paperless-ng.extraConfig = {
+      # Provide languages as ISO 639-2 codes
+      # separated by a plus (+) sign.
+      # https://en.wikipedia.org/wiki/List_of_ISO_639-2_codes
+      PAPERLESS_OCR_LANGUAGE = "deu+eng+jpn"; # German & English & Japanse
+    };
+  }
+  ```
+
+  - If you previously specified `PAPERLESS_CONSUME_MAIL_*` settings in
+    `services.paperless.extraConfig` you should remove those options now. You
+    now _must_ define those settings in the admin interface of paperless-ng.
+
+  - Option `services.paperless.manage` no longer exists.
+    Use the script at `${services.paperless-ng.dataDir}/paperless-ng-manage` instead.
+    Note that this script only exists after the `paperless-ng` service has been
+    started at least once.
+
+  - After switching to the new system configuration you should run the Django
+    management command to reindex your documents and optionally create a user,
+    if you don't have one already.
+
+    To do so, enter the data directory (the value of
+    `services.paperless-ng.dataDir`, `/var/lib/paperless` by default), switch
+    to the paperless user and execute the management command like below:
+
+    ```
+    $ cd /var/lib/paperless
+    $ su paperless -s /bin/sh
+    $ ./paperless-ng-manage document_index reindex
+    # if not already done create a user account, paperless-ng requires a login
+    $ ./paperless-ng-manage createsuperuser
+    Username (leave blank to use 'paperless'): my-user-name
+    Email address: me@example.com
+    Password: **********
+    Password (again): **********
+    Superuser created successfully.
+    ```
+
+- The `staticjinja` package has been upgraded from 1.0.4 to 4.1.1
+
+- Firefox v91 does not support addons with invalid signature anymore. Firefox ESR needs to be used for nix addon support.
+
+- The `erigon` ethereum node has moved to a new database format in `2021-05-04`, and requires a full resync
+
+- The `erigon` ethereum node has moved its database location in `2021-08-03`, users upgrading must manually move their chaindata (see [release notes](https://github.com/ledgerwatch/erigon/releases/tag/v2021.08.03)).
+
+- [users.users.&lt;name&gt;.group](options.html#opt-users.users._name_.group) no longer defaults to `nogroup`, which was insecure. Out-of-tree modules are likely to require adaptation: instead of
+  ```nix
+  {
+    users.users.foo = {
+      isSystemUser = true;
+    };
+  }
+  ```
+  also create a group for your user:
+  ```nix
+  {
+    users.users.foo = {
+      isSystemUser = true;
+      group = "foo";
+    };
+    users.groups.foo = {};
+  }
+  ```
+
+- `services.geoip-updater` was broken and has been replaced by [services.geoipupdate](options.html#opt-services.geoipupdate.enable).
+
+- `ihatemoney` has been updated to version 5.1.1 ([release notes](https://github.com/spiral-project/ihatemoney/blob/5.1.1/CHANGELOG.rst)). If you serve ihatemoney by HTTP rather than HTTPS, you must set [services.ihatemoney.secureCookie](options.html#opt-services.ihatemoney.secureCookie) to `false`.
+
+- PHP 7.3 is no longer supported due to upstream not supporting this version for the entire lifecycle of the 21.11 release.
+
+- Those making use of `buildBazelPackage` will need to regenerate the fetch hashes (preferred), or set `fetchConfigured = false;`.
+
+- `consul` was upgraded to a new major release with breaking changes, see [upstream changelog](https://github.com/hashicorp/consul/releases/tag/v1.10.0).
+
+- fsharp41 has been removed in preference to use the latest dotnet-sdk
+
+- The following F#-related packages have been removed for being unmaintaned. Please use `fetchNuGet` for specific packages.
+
+  - ExtCore
+  - Fake
+  - Fantomas
+  - FsCheck
+  - FsCheck262
+  - FsCheckNunit
+  - FSharpAutoComplete
+  - FSharpCompilerCodeDom
+  - FSharpCompilerService
+  - FSharpCompilerTools
+  - FSharpCore302
+  - FSharpCore3125
+  - FSharpCore4001
+  - FSharpCore4117
+  - FSharpData
+  - FSharpData225
+  - FSharpDataSQLProvider
+  - FSharpFormatting
+  - FsLexYacc
+  - FsLexYacc706
+  - FsLexYaccRuntime
+  - FsPickler
+  - FsUnit
+  - Projekt
+  - Suave
+  - UnionArgParser
+  - ExcelDnaRegistration
+  - MathNetNumerics
+
+- `programs.x2goserver` is now `services.x2goserver`
+
+- The following dotnet-related packages have been removed for being unmaintaned. Please use `fetchNuGet` for specific packages.
+  - Autofac
+  - SystemValueTuple
+  - MicrosoftDiaSymReader
+  - MicrosoftDiaSymReaderPortablePdb
+  - SystemCollectionsImmutable
+  - SystemCollectionsImmutable131
+  - SystemReflectionMetadata
+  - NUnit350
+  - Deedle
+  - ExcelDna
+  - GitVersionTree
+  - NDeskOptions
+
+* The `antlr` package now defaults to the 4.x release instead of the
+  old 2.7.7 version.
+
+* The `pulseeffects` package updated to [version 4.x](https://github.com/wwmm/easyeffects/releases/tag/v6.0.0) and renamed to `easyeffects`.
+
+* The `libwnck` package now defaults to the 3.x release instead of the
+  old 2.31.0 version.
+
+* The `bitwarden_rs` packages and modules were renamed to `vaultwarden`
+  [following upstream](https://github.com/dani-garcia/vaultwarden/discussions/1642). More specifically,
+
+  - `pkgs.bitwarden_rs`, `pkgs.bitwarden_rs-sqlite`, `pkgs.bitwarden_rs-mysql` and
+    `pkgs.bitwarden_rs-postgresql` were renamed to `pkgs.vaultwarden`, `pkgs.vaultwarden-sqlite`,
+    `pkgs.vaultwarden-mysql` and `pkgs.vaultwarden-postgresql`, respectively.
+
+    - Old names are preserved as aliases for backwards compatibility, but may be removed in the future.
+    - The `bitwarden_rs` executable was also renamed to `vaultwarden` in all packages.
+
+  - `pkgs.bitwarden_rs-vault` was renamed to `pkgs.vaultwarden-vault`.
+
+    - `pkgs.bitwarden_rs-vault` is preserved as an alias for backwards compatibility, but may be removed in the future.
+    - The static files were moved from `/usr/share/bitwarden_rs` to `/usr/share/vaultwarden`.
+
+  - The `services.bitwarden_rs` config module was renamed to `services.vaultwarden`.
+
+    - `services.bitwarden_rs` is preserved as an alias for backwards compatibility, but may be removed in the future.
+
+  - `systemd.services.bitwarden_rs`, `systemd.services.backup-bitwarden_rs` and `systemd.timers.backup-bitwarden_rs`
+    were renamed to `systemd.services.vaultwarden`, `systemd.services.backup-vaultwarden` and
+    `systemd.timers.backup-vaultwarden`, respectively.
+
+    - Old names are preserved as aliases for backwards compatibility, but may be removed in the future.
+
+  - `users.users.bitwarden_rs` and `users.groups.bitwarden_rs` were renamed to `users.users.vaultwarden` and
+    `users.groups.vaultwarden`, respectively.
+
+  - The data directory remains located at `/var/lib/bitwarden_rs`, for backwards compatibility.
+
+- `yggdrasil` was upgraded to a new major release with breaking changes, see [upstream changelog](https://github.com/yggdrasil-network/yggdrasil-go/releases/tag/v0.4.0).
+
+- `icingaweb2` was upgraded to a new release which requires a manual database upgrade, see [upstream changelog](https://github.com/Icinga/icingaweb2/releases/tag/v2.9.0).
+
+- The `isabelle` package has been upgraded from 2020 to 2021
+
+- the `mingw-64` package has been upgraded from 6.0.0 to 9.0.0
+
+- `tt-rss` was upgraded to the commit on 2021-06-21, which has breaking changes. If you use `services.tt-rss.extraConfig` you should migrate to the `putenv`-style configuration. See [this Discourse post](https://community.tt-rss.org/t/rip-config-php-hello-classes-config-php/4337) in the tt-rss forums for more details.
+
+- The following Visual Studio Code extensions were renamed to keep the naming convention uniform.
+
+  - `bbenoist.Nix` -> `bbenoist.nix`
+  - `CoenraadS.bracket-pair-colorizer` -> `coenraads.bracket-pair-colorizer`
+  - `golang.Go` -> `golang.go`
+
+- `services.uptimed` now uses `/var/lib/uptimed` as its stateDirectory instead of `/var/spool/uptimed`. Make sure to move all files to the new directory.
+
+- Deprecated package aliases in `emacs.pkgs.*` have been removed. These aliases were remnants of the old Emacs package infrastructure. We now use exact upstream names wherever possible.
+
+- `programs.neovim.runtime` switched to a `linkFarm` internally, making it impossible to use wildcards in the `source` argument.
+
+- The `openrazer` and `openrazer-daemon` packages as well as the `hardware.openrazer` module now require users to be members of the `openrazer` group instead of `plugdev`. With this change, users no longer need be granted the entire set of `plugdev` group permissions, which can include permissions other than those required by `openrazer`. This is desirable from a security point of view. The setting [`hardware.openrazer.users`](options.html#opt-services.hardware.openrazer.users) can be used to add users to the `openrazer` group.
+
+- The fontconfig service's dpi option has been removed.
+  Fontconfig should use Xft settings by default so there's no need to override one value in multiple places.
+  The user can set DPI via ~/.Xresources properly, or at the system level per monitor, or as a last resort at the system level with `services.xserver.dpi`.
+
+- The `yambar` package has been split into `yambar` and `yambar-wayland`, corresponding to the xorg and wayland backend respectively. Please switch to `yambar-wayland` if you are on wayland.
+
+- The `services.minio` module gained an additional option `consoleAddress`, that
+  configures the address and port the web UI is listening, it defaults to `:9001`.
+  To be able to access the web UI this port needs to be opened in the firewall.
+
+- The `varnish` package was upgraded from 6.3.x to 7.x. `varnish60` for the last LTS release is also still available.
+
+- The `kubernetes` package was upgraded to 1.22. The `kubernetes.apiserver.kubeletHttps` option was removed and HTTPS is always used.
+
+- The attribute `linuxPackages_latest_hardened` was dropped because the hardened patches
+  lag behind the upstream kernel which made version bumps harder. If you want to use
+  a hardened kernel, please pin it explicitly with a versioned attribute such as
+  `linuxPackages_5_10_hardened`.
+
+- The `nomad` package now defaults to a 1.1.x release instead of 1.0.x
+
+- If `exfat` is included in `boot.supportedFilesystems` and when using kernel 5.7
+  or later, the `exfatprogs` user-space utilities are used instead of `exfat`.
+
+- The `todoman` package was upgraded from 3.9.0 to 4.0.0. This introduces breaking changes in the [configuration file](https://todoman.readthedocs.io/en/stable/configure.html#configuration-file) format.
+
+- The `datadog-agent`, `datadog-integrations-core` and `datadog-process-agent` packages
+  were upgraded from 6.11.2 to 7.30.2, git-2018-09-18 to 7.30.1 and 6.11.1 to 7.30.2,
+  respectively. As a result `services.datadog-agent` has had breaking changes to the
+  configuration file. For details, see the [upstream changelog](https://github.com/DataDog/datadog-agent/blob/main/CHANGELOG.rst).
+
+- `opencv2` no longer includes the non-free libraries by default, and consequently `pfstools` no longer includes OpenCV support by default.  Both packages now support an `enableUnfree` option to re-enable this functionality.
+- `services.xserver.displayManager.defaultSession = "plasma5"` does not work anymore, instead use either `"plasma"` for the Plasma X11 session or `"plasmawayland"` for the Plasma Wayland sesison.
+
+- `boot.kernelParams` now only accepts one command line parameter per string. This change is aimed to reduce common mistakes like "param = 12", which would be parsed as 3 parameters.
+
+- `nix.daemonNiceLevel` and `nix.daemonIONiceLevel` have been removed in favour of the new options [`nix.daemonCPUSchedPolicy`](options.html#opt-nix.daemonCPUSchedPolicy), [`nix.daemonIOSchedClass`](options.html#opt-nix.daemonIOSchedClass) and [`nix.daemonIOSchedPriority`](options.html#opt-nix.daemonIOSchedPriority). Please refer to the options documentation and the `sched(7)` and `ioprio_set(2)` man pages for guidance on how to use them.
+
+- The `coursier` package's binary was renamed from `coursier` to `cs`. Completions which haven't worked for a while should now work with the renamed binary. To keep using `coursier`, you can create a shell alias.
+
+- The `services.mosquitto` module has been rewritten to support multiple listeners and per-listener configuration.
+  Module configurations from previous releases will no longer work and must be updated.
+
+- The `fluidsynth_1` attribute has been removed, as this legacy version is no longer needed in nixpkgs. The actively maintained 2.x series is available as `fluidsynth` unchanged.
+
+- Nextcloud 20 (`pkgs.nextcloud20`) has been dropped because it was EOLed by upstream in 2021-10.
+
+- The `virtualisation.pathsInNixDB` option was renamed
+  [`virtualisation.additionalPaths`](options.html#opt-virtualisation.additionalPaths).
+
+- The `services.ddclient.password` option was removed, and replaced with `services.ddclient.passwordFile`.
+
+- The default GNAT version has been changed: The `gnat` attribute now points to `gnat12`
+  instead of `gnat9`.
+
+- `retroArchCores` has been removed. This means that using `nixpkgs.config.retroarch` to customize RetroArch cores is not supported anymore. Instead, use package overrides, for example: `retroarch.override { cores = with libretro; [ citra snes9x ]; };`. Also, `retroarchFull` derivation is available for those who want to have all RetroArch cores available.
+
+- The Linux kernel for security reasons now restricts access to BPF syscalls via `BPF_UNPRIV_DEFAULT_OFF=y`. Unprivileged access can be reenabled via the `kernel.unprivileged_bpf_disabled` sysctl knob.
+
+- `/usr` will always be included in the initial ramdisk. See the `fileSystems.<name>.neededForBoot` option.
+  If any files exist under `/usr` (which is not typical for NixOS), they will be included in the initial ramdisk, increasing its size to a possibly problematic extent.
+
+- `pkgs.haskell-language-server` will now by default be linked dynamically to improve TemplateHaskell compatibility. To mitigate the increased closure size it will now by default only support our current default ghc (at the moment 9.0.2). Add other ghc versions via e.g. `pkgs.haskell-language-server.override { supportedGhcVersions = [ "90" "92" ]; }`.
+
+- `pkgs.redis` is now built using the system jemalloc. This disables the experimental active defragmentation feature of redis. Users who require this feature can switch back to redis' vendored version of jemalloc by setting `services.redis.package = pkgs.redis.override { useSystemJemalloc = false; };`.
+
+## Other Notable Changes {#sec-release-21.11-notable-changes}
+
+
+- The linux kernel package infrastructure was moved out of `all-packages.nix`, and restructured. Linux related functions and attributes now live under the `pkgs.linuxKernel` attribute set.
+  In particular the versioned `linuxPackages_*` package sets (such as `linuxPackages_5_4`) and kernels from `pkgs` were moved there and now live under `pkgs.linuxKernel.packages.*`. The unversioned ones (such as `linuxPackages_latest`) remain untouched.
+
+- In NixOS virtual machines (QEMU), the `virtualisation` module has been updated with new options:
+    - [`forwardPorts`](options.html#opt-virtualisation.forwardPorts) to configure IPv4 port forwarding,
+    - [`sharedDirectories`](options.html#opt-virtualisation.sharedDirectories) to set up shared host directories,
+    - [`resolution`](options.html#opt-virtualisation.resolution) to set the screen resolution,
+    - [`useNixStoreImage`](options.html#opt-virtualisation.useNixStoreImage) to use a disk image for the Nix store instead of 9P.
+
+  In addition, the default [`msize`](options.html#opt-virtualisation.msize) parameter in 9P filesystems (including /nix/store and all shared directories) has been increased to 16K for improved performance.
+
+- The setting [`services.openssh.logLevel`](options.html#opt-services.openssh.logLevel) `"VERBOSE"` `"INFO"`. This brings NixOS in line with upstream and other Linux distributions, and reduces log spam on servers due to bruteforcing botnets.
+
+  However, if [`services.fail2ban.enable`](options.html#opt-services.fail2ban.enable) is `true`, the `fail2ban` will override the verbosity to `"VERBOSE"`, so that `fail2ban` can observe the failed login attempts from the SSH logs.
+
+- The [`services.xserver.extraLayouts`](options.html#opt-services.xserver.extraLayouts) no longer cause additional rebuilds when a layout is added or modified.
+
+- Sway: The terminal emulator `rxvt-unicode` is no longer installed by default via `programs.sway.extraPackages`. The current default configuration uses `alacritty` (and soon `foot`) so this is only an issue when using a customized configuration and not installing `rxvt-unicode` explicitly.
+
+- `python3` now defaults to Python 3.9. Python 3.9 introduces many deprecation warnings, please look at the [What's New In Python 3.9 post](https://docs.python.org/3/whatsnew/3.9.html) for more information.
+
+- `qtile` hase been updated from '0.16.0' to '0.18.0', please check [qtile changelog](https://github.com/qtile/qtile/blob/master/CHANGELOG) for changes.
+
+- The `claws-mail` package now references the new GTK+ 3 release branch, major version 4. To use the GTK+ 2 releases, one can install the `claws-mail-gtk2` package.
+
+- The wordpress module provides a new interface which allows to use different webservers with the new option [`services.wordpress.webserver`](options.html#opt-services.wordpress.webserver).  Currently `httpd`, `caddy` and `nginx` are supported. The definitions of wordpress sites should now be set in [`services.wordpress.sites`](options.html#opt-services.wordpress.sites).
+
+  Sites definitions that use the old interface are automatically migrated in the new option. This backward compatibility will be removed in 22.05.
+
+- The dokuwiki module provides a new interface which allows to use different webservers with the new option [`services.dokuwiki.webserver`](options.html#opt-services.dokuwiki.webserver).  Currently `caddy` and `nginx` are supported. The definitions of dokuwiki sites should now be set in [`services.dokuwiki.sites`](options.html#opt-services.dokuwiki.sites).
+
+  Sites definitions that use the old interface are automatically migrated in the new option. This backward compatibility will be removed in 22.05.
+
+- The order of NSS (host) modules has been brought in line with upstream
+  recommendations:
+
+  - The `myhostname` module is placed before the `resolve` (optional) and `dns`
+    entries, but after `file` (to allow overriding via `/etc/hosts` /
+    `networking.extraHosts`, and prevent ISPs with catchall-DNS resolvers from
+    hijacking `.localhost` domains)
+  - The `mymachines` module, which provides hostname resolution for local
+    containers (registered with `systemd-machined`) is placed to the front, to
+    make sure its mappings are preferred over other resolvers.
+  - If systemd-networkd is enabled, the `resolve` module is placed before
+    `files` and `myhostname`, as it provides the same logic internally, with
+    caching.
+  - The `mdns(_minimal)` module has been updated to the new priorities.
+
+  If you use your own NSS host modules, make sure to update your priorities
+  according to these rules:
+
+  - NSS modules which should be queried before `resolved` DNS resolution should
+    use mkBefore.
+  - NSS modules which should be queried after `resolved`, `files` and
+    `myhostname`, but before `dns` should use the default priority
+  - NSS modules which should come after `dns` should use mkAfter.
+
+- The [networking.wireless](options.html#opt-networking.wireless.enable) module (based on wpa_supplicant) has been heavily reworked, solving a number of issues and adding useful features:
+  - The automatic discovery of wireless interfaces at boot has been made reliable again (issues [#101963](https://github.com/NixOS/nixpkgs/issues/101963), [#23196](https://github.com/NixOS/nixpkgs/issues/23196)).
+  - WPA3 and Fast BSS Transition (802.11r) are now enabled by default for all networks.
+  - Secrets like pre-shared keys and passwords can now be handled safely, meaning without including them in a world-readable file (`wpa_supplicant.conf` under /nix/store).
+    This is achieved by storing the secrets in a secured [environmentFile](options.html#opt-networking.wireless.environmentFile) and referring to them though environment variables that are expanded inside the configuration.
+  - With multiple interfaces declared, independent wpa_supplicant daemons are started, one for each interface (the services are named `wpa_supplicant-wlan0`, `wpa_supplicant-wlan1`, etc.).
+  - The generated `wpa_supplicant.conf` file is now formatted for easier reading.
+  - A new [scanOnLowSignal](options.html#opt-networking.wireless.scanOnLowSignal) option has been added to facilitate fast roaming between access points (enabled by default).
+  - A new [networks.&lt;name&gt;.authProtocols](options.html#opt-networking.wireless.networks._name_.authProtocols) option has been added to change the authentication protocols used when connecting to a network.
+
+- The [networking.wireless.iwd](options.html#opt-networking.wireless.iwd.enable) module has a new [networking.wireless.iwd.settings](options.html#opt-networking.wireless.iwd.settings) option.
+
+- The [services.smokeping.host](options.html#opt-services.smokeping.host) option was added and defaulted to `localhost`. Before, `smokeping` listened to all interfaces by default. NixOS defaults generally aim to provide non-Internet-exposed defaults for databases and internal monitoring tools, see e.g. [#100192](https://github.com/NixOS/nixpkgs/issues/100192). Further, the systemd service for `smokeping` got reworked defaults for increased operational stability, see [PR #144127](https://github.com/NixOS/nixpkgs/pull/144127) for details.
+
+- The [services.syncoid.enable](options.html#opt-services.syncoid.enable) module now properly drops ZFS permissions after usage. Before it delegated permissions to whole pools instead of datasets and didn't clean up after execution. You can manually look this up for your pools by running `zfs allow your-pool-name` and use `zfs unallow syncoid your-pool-name` to clean this up.
+
+- Zfs: `latestCompatibleLinuxPackages` is now exported on the zfs package. One can use `boot.kernelPackages = config.boot.zfs.package.latestCompatibleLinuxPackages;` to always track the latest compatible kernel with a given version of zfs.
+
+- Nginx will use the value of `sslTrustedCertificate` if provided for a virtual host, even if `enableACME` is set. This is useful for providers not using the same certificate to sign OCSP responses and server certificates.
+
+- `lib.formats.yaml`'s `generate` will not generate JSON anymore, but instead use more of the YAML-specific syntax.
+
+- MariaDB was upgraded from 10.5.x to 10.6.x. Please read the [upstream release notes](https://mariadb.com/kb/en/changes-improvements-in-mariadb-106/) for changes and upgrade instructions.
+
+- The MariaDB C client library, also known as libmysqlclient or mariadb-connector-c, was upgraded from 3.1.x to 3.2.x. While this should hopefully not have any impact, this upgrade comes with some changes to default behavior, so you might want to review the [upstream release notes](https://mariadb.com/kb/en/changes-and-improvements-in-mariadb-connector-c-32/).
+
+- GNOME desktop environment now enables `QGnomePlatform` as the Qt platform theme, which should avoid crashes when opening file chooser dialogs in Qt apps by using XDG desktop portal. Additionally, it will make the apps fit better visually.
+
+- `rofi` has been updated from '1.6.1' to '1.7.0', one important thing is the removal of the old xresources based configuration setup. Read more [in rofi's changelog](https://github.com/davatorium/rofi/blob/cb12e6fc058f4a0f4f/Changelog#L1).
+
+- ipfs now defaults to not listening on you local network. This setting was change as server providers won't accept port scanning on their private network. If you have several ipfs instances running on a network you own, feel free to change the setting `ipfs.localDiscovery = true;`. localDiscovery enables different instances to discover each other and share data.
+
+- `lua` and `luajit` interpreters have been patched to avoid looking into /usr/lib
+  directories, thus increasing the purity of the build.
+
+- Three new options, [xdg.mime.addedAssociations](#opt-xdg.mime.addedAssociations), [xdg.mime.defaultApplications](#opt-xdg.mime.defaultApplications), and [xdg.mime.removedAssociations](#opt-xdg.mime.removedAssociations) have been added to the [xdg.mime](#opt-xdg.mime.enable) module to allow the configuration of `/etc/xdg/mimeapps.list`.
+
+- Kopia was upgraded from 0.8.x to 0.9.x. Please read the [upstream release notes](https://github.com/kopia/kopia/releases/tag/v0.9.0) for changes and upgrade instructions.
+
+- The `systemd.network` module has gained support for the FooOverUDP link type.
+
+- The `networking` module has a new `networking.fooOverUDP` option to configure Foo-over-UDP encapsulations.
+
+- `networking.sits` now supports Foo-over-UDP encapsulation.
+
+-  The `virtualisation.libvirtd` module has been refactored and updated with new options:
+    - `virtualisation.libvirtd.qemu*` options (e.g.: `virtualisation.libvirtd.qemuRunAsRoot`) were moved to [`virtualisation.libvirtd.qemu`](options.html#opt-virtualisation.libvirtd.qemu) submodule,
+    - software TPM1/TPM2 support (e.g.: Windows 11 guests) ([`virtualisation.libvirtd.qemu.swtpm`](options.html#opt-virtualisation.libvirtd.qemu.swtpm)),
+    - custom OVMF package (e.g.: `pkgs.OVMFFull` with HTTP, CSM and Secure Boot support) ([`virtualisation.libvirtd.qemu.ovmf.package`](options.html#opt-virtualisation.libvirtd.qemu.ovmf.package)).
+
+- The `cawbird` Twitter client now uses its own API keys to count as different application than upstream builds. This is done to evade application-level rate limiting. While existing accounts continue to work, users may want to remove and re-register their account in the client to enjoy a better user experience and benefit from this change.
+
+- A new option `services.prometheus.enableReload` has been added which can be enabled to reload the prometheus service when its config file changes instead of restarting.
+
+- The option `services.prometheus.environmentFile` has been removed since it was causing [issues](https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`.
+
+- Dokuwiki now supports caddy! However
+  - the nginx option has been removed, in the new configuration, please use the `dokuwiki.webserver = "nginx"` instead.
+  - The "${hostname}" option has been deprecated, please use `dokuwiki.sites = [ "${hostname}" ]` instead
+
+- The [services.unifi](options.html#opt-services.unifi.enable) module has been reworked, solving a number of issues. This leads to several user facing changes:
+  - The `services.unifi.dataDir` option is removed and the data is now always located under `/var/lib/unifi/data`. This is done to make better use of systemd state direcotiry and thus making the service restart more reliable.
+  - The unifi logs can now be found under: `/var/log/unifi` instead of `/var/lib/unifi/logs`.
+  - The unifi run directory can now be found under: `/run/unifi` instead of `/var/lib/unifi/run`.
+
+- `security.pam.services.<name>.makeHomeDir` now uses `umask=0077` instead of `umask=0022` when creating the home directory.
+
+- Loki has had another release. Some default values have been changed for the configuration and some configuration options have been renamed. For more details, please check [the upgrade guide](https://grafana.com/docs/loki/latest/upgrading/#240).
+
+- `julia` now refers to `julia-stable` instead of `julia-lts`. In practice this means it has been upgraded from `1.0.4` to `1.5.4`.
+
+- RetroArch has been upgraded from version `1.8.5` to `1.9.13.2`. Since the previous release was quite old, if you're having issues after the upgrade, please delete your `$XDG_CONFIG_HOME/retroarch/retroarch.cfg` file.
+
+- hydrus has been upgraded from version `438` to `463`. Since upgrading between releases this old is advised against, be sure to have a backup of your data before upgrading. For details, see [the hydrus manual](https://hydrusnetwork.github.io/hydrus/help/getting_started_installing.html#big_updates).
+
+- More jdk and jre versions are now exposed via `java-packages.compiler`.
+
+- The sets `haskell.packages` and `haskell.compiler` now contain for every ghc version an attribute with the minor version dropped. E.g. for `ghc8107` there also now exists `ghc810`. Those attributes point to the same compilers and packagesets but have the advantage that e.g. `ghc92` stays stable when we update from `ghc925` to `ghc926`.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md
new file mode 100644
index 000000000000..6f5a807f478a
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2205.section.md
@@ -0,0 +1,1002 @@
+# Release 22.05 (“Quokkaâ€, 2022.05/30) {#sec-release-22.05}
+
+- Support is planned until the end of December 2022, handing over to 22.11.
+
+## Highlights {#sec-release-22.05-highlights}
+
+In addition to numerous new and upgraded packages, this release has the following highlights:
+
+- Nix has been updated from 2.3 to 2.8. This mainly brings experimental support
+  for Flakes, but also marks the `nix` command as experimental which now has to
+  be enabled via the configuration explicitly. For more information and
+  instructions for upgrades, see the
+  release notes for [nix-2.4](https://nixos.org/manual/nix/stable/release-notes/rl-2.4.html),
+  [nix-2.5](https://nixos.org/manual/nix/stable/release-notes/rl-2.5.html),
+  [nix-2.6](https://nixos.org/manual/nix/stable/release-notes/rl-2.6.html),
+  [nix-2.7](https://nixos.org/manual/nix/stable/release-notes/rl-2.7.html) and
+  [nix-2.8](https://nixos.org/manual/nix/stable/release-notes/rl-2.8.html)
+
+- The `firefox` browser on `x86_64-linux` now makes use of profile-guided
+  optimisation, resulting in a much more responsive browsing experience.
+
+- GNOME has been upgraded to 42. Please take a look at their [Release
+  Notes](https://release.gnome.org/42/) for details. In particular, it replaces
+  gedit with GNOME Text Editor, GNOME Terminal with GNOME Console (formerly
+  King's Cross) and GNOME Screenshot by a tool integrated into the Shell.
+
+- PHP 8.1 is now available.
+
+- systemd services can now set [systemd.services.\<name\>.reloadTriggers](#opt-systemd.services) instead of `reloadIfChanged` for a more granular distinction between reloads and restarts.
+
+- Systemd has been upgraded to the version 250.
+
+- Pulseaudio has been updated to version 15.0 and now optionally
+  [supports additional Bluetooth audio codecs](https://www.freedesktop.org/wiki/Software/PulseAudio/Notes/15.0/#supportforldacandaptxbluetoothcodecsplussbcxqsbcwithhigher-qualityparameters)
+  such as aptX or LDAC, with codec switching available in `pavucontrol`. This
+  feature is disabled by default, but can be enabled with the option
+  `hardware.pulseaudio.package = pkgs.pulseaudioFull;`. Existing third-party
+  modules that offered similar functions, such as `pulseaudio-modules-bt` or
+  `pulseaudio-hsphfpd`, are obsolete and have been removed.
+
+- PostgreSQL now defaults to major version 14.
+
+- Module authors can use `mkRenamedOptionModuleWith` to automate the deprecation cycle without annoying out-of-tree module authors and their users.
+
+- The default GHC version has been updated from 8.10.7 to 9.0.2. `pkgs.haskellPackages` and `pkgs.ghc` will now use this version by default.
+
+- The GNOME and Plasma installation CDs now use `pkgs.calamares` and `pkgs.calamares-nixos-extensions` to allow users to easily install and set up NixOS with a GUI.
+
+- `security.acme.defaults` has been added to simplify the configuration of
+  settings for many certificates at once. This also opens up the option to use
+  DNS-01 validation when using `enableACME` web server virtual hosts (e.g.
+  `services.nginx.virtualHosts.*.enableACME`).
+
+## New Services {#sec-release-22.05-new-services}
+
+- [1password](https://1password.com/), command-lines and graphic interface for 1Password. Available as [programs._1password](#opt-programs._1password.enable) and [programs._1password-gui](#opt-programs._1password.enable).
+
+- [aesmd](https://github.com/intel/linux-sgx#install-the-intelr-sgx-psw), the Intel SGX Architectural Enclave Service Manager. Available as [services.aesmd](#opt-services.aesmd.enable).
+
+- [agate](https://github.com/mbrubeck/agate), a very simple server for the Gemini hypertext protocol. Available as [services.agate](#opt-services.agate.enable).
+
+- [apfs](https://github.com/linux-apfs/linux-apfs-rw), a kernel module for mounting the Apple File System (APFS).
+
+- [argonone](https://gitlab.com/DarkElvenAngel/argononed), a replacement daemon for the Raspberry Pi Argon One power button and cooler. Available at [services.hardware.argonone](options.html#opt-services.hardware.argonone.enable).
+
+- [ArchiSteamFarm](https://github.com/JustArchiNET/ArchiSteamFarm), a C# application with primary purpose of idling Steam cards from multiple accounts simultaneously. Available as [services.archisteamfarm](#opt-services.archisteamfarm.enable).
+
+- [BaGet](https://loic-sharma.github.io/BaGet/), a lightweight NuGet and symbol server. Available at services.baget.
+
+- [bird-lg](https://github.com/xddxdd/bird-lg-go), a BGP looking glass for Bird Routing. Available as [services.bird-lg](#opt-services.bird-lg.package).
+
+- [blocky](https://0xerr0r.github.io/blocky/), fast and lightweight DNS proxy as ad-blocker for local network with many features. Available as [services.blocky](#opt-services.blocky.enable).
+
+- [cloudflare-dyndns](https://github.com/kissgyorgy/cloudflare-dyndns), CloudFlare Dynamic DNS client. Available as [services.cloudflare-dyndns](#opt-services.cloudflare-dyndns.enable).
+
+- [Corosync](https://corosync.github.io/corosync/) and [Pacemaker](https://clusterlabs.org/pacemaker/), A open-source high availability resource manager. Available as [services.corosync](#opt-services.corosync.enable) and [services.pacemaker](#opt-services.pacemaker.enable).
+
+- [create_ap](https://github.com/lakinduakash/linux-wifi-hotspot), a module for creating wifi hotspots using the program linux-wifi-hotspot. Available as [services.create_ap](#opt-services.create_ap.enable).
+
+- [Envoy](https://www.envoyproxy.io/), a high-performance reverse proxy. Available as [services.envoy](#opt-services.envoy.enable).
+
+- [ergochat](https://ergo.chat), a modern IRC with IRCv3 features. Available as [services.ergochat](#opt-services.ergochat.enable).
+
+- [ethercalc](https://github.com/audreyt/ethercalc), an online collaborative spreadsheet. Available as [services.ethercalc](#opt-services.ethercalc.enable).
+
+- [filebeat](https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-overview.html), a lightweight shipper for forwarding and centralizing log data. Available as [services.filebeat](#opt-services.filebeat.enable).
+
+- [FRRouting](https://frrouting.org/), a popular suite of Internet routing protocol daemons (BGP, BFD, OSPF, IS-IS, VRRP and others). Available as [services.frr](#opt-services.frr.babel.enable).
+
+- [Grafana Mimir](https://grafana.com/oss/mimir/), an open source, horizontally scalable, highly available, multi-tenant, long-term storage for Prometheus. Available as [services.mimir](#opt-services.mimir.enable).
+
+- [Haste](https://hastebin.com/about.md), a pastebin written in node.js. Available as [services.haste](#opt-services.haste-server.enable).
+
+- [headscale](https://github.com/juanfont/headscale), an Open Source implementation of the [Tailscale](https://tailscale.io) Control Server. Available as [services.headscale](#opt-services.headscale.enable).
+
+- [heisenbridge](https://github.com/hifi/heisenbridge), a bouncer-style Matrix IRC bridge. Available as [services.heisenbridge](#opt-services.heisenbridge.enable).
+
+- [https-dns-proxy](https://github.com/aarond10/https_dns_proxy), DNS to DNS over HTTPS (DoH) proxy. Available as [services.https-dns-proxy](#opt-services.https-dns-proxy.enable).
+
+- [input-remapper](https://github.com/sezanzeb/input-remapper), an easy to use tool to change the mapping of your input device buttons. Available at [services.input-remapper](#opt-services.input-remapper.enable).
+
+- [InvoicePlane](https://invoiceplane.com), web application for managing and creating invoices. Available at [services.invoiceplane](#opt-services.invoiceplane.sites._name_.enable).
+
+- [k3b](https://userbase.kde.org/K3b), the KDE disk burning application. Available as [programs.k3b](#opt-programs.k3b.enable).
+
+- [K40-Whisperer](https://www.scorchworks.com/K40whisperer/k40whisperer.html), a program to control cheap Chinese laser cutters. Available as [programs.k40-whisperer.enable](#opt-programs.k40-whisperer.enable). Users must add themselves to the `k40` group to be able to access the device.
+
+- [kanidm](https://kanidm.github.io/kanidm/stable/), an identity management server written in Rust. Available as [services.kanidm](#opt-services.kanidm.enableServer)
+
+- [Maddy](https://maddy.email/), a free an open source mail server. Available as [services.maddy](#opt-services.maddy.enable).
+
+- [matrix-conduit](https://conduit.rs/), a simple, fast and reliable chat server powered by matrix. Available as [services.matrix-conduit](option.html#opt-services.matrix-conduit.enable).
+
+- [Moosefs](https://moosefs.com), fault tolerant petabyte distributed file system. Available as [moosefs](#opt-services.moosefs.master.enable).
+
+- [mozillavpn](https://github.com/mozilla-mobile/mozilla-vpn-client), the client for the [Mozilla VPN](https://vpn.mozilla.org/) service. Available as [services.mozillavpn](#opt-services.mozillavpn.enable).
+
+- [mtr-exporter](https://github.com/mgumz/mtr-exporter), a Prometheus exporter for mtr metrics. Available as [services.mtr-exporter](#opt-services.mtr-exporter.enable).
+
+- [nbd](https://nbd.sourceforge.io/), a Network Block Device server. Available as [services.nbd](#opt-services.nbd.server.enable).
+
+- [netbox](https://github.com/netbox-community/netbox), infrastructure resource modeling (IRM) tool. Available as [services.netbox](#opt-services.netbox.enable).
+
+- [nethoscope](https://github.com/vvilhonen/nethoscope), listen to your network traffic. Available as [programs.nethoscope](#opt-programs.nethoscope.enable).
+
+- [nifi](https://nifi.apache.org), an easy to use, powerful, and reliable system to process and distribute data. Available as [services.nifi](#opt-services.nifi.enable).
+
+- [nix-ld](https://github.com/Mic92/nix-ld), Run unpatched dynamic binaries on NixOS. Available as [programs.nix-ld](#opt-programs.nix-ld.enable).
+
+- [NNCP](http://www.nncpgo.org), NNCP (Node to Node copy) utilities and configuration, Available as [programs.nncp](#opt-programs.nncp.enable).
+
+- [pgadmin4](https://github.com/postgres/pgadmin4), an admin interface for the PostgreSQL database. Available at [services.pgadmin](#opt-services.pgadmin.enable).
+
+- [PowerDNS-Admin](https://github.com/ngoduykhanh/PowerDNS-Admin), a web interface for the PowerDNS server. Available at [services.powerdns-admin](#opt-services.powerdns-admin.enable).
+
+- [prometheus-pve-exporter](https://github.com/prometheus-pve/prometheus-pve-exporter), a tool that exposes information from the Proxmox VE API for use by Prometheus. Available as [services.prometheus.exporters.pve](#opt-services.prometheus.exporters.pve.enable).
+
+- [prosody-filer](https://github.com/ThomasLeister/prosody-filer), a server for handling XMPP HTTP Upload requests. Available at [services.prosody-filer](#opt-services.prosody-filer.enable).
+
+- [Public Inbox](https://public-inbox.org), an "archives first" approach to mailing lists. Available as [services.public-inbox](#opt-services.public-inbox.enable).
+
+- [r53-ddns](https://github.com/fleaz/r53-ddns), a small tool to run your own DDNS service via AWS Route53. Available as [services.r53-ddns](#opt-services.r53-ddns.enable).
+
+- [rmfakecloud](https://ddvk.github.io/rmfakecloud/), a clone of the cloud sync the remarkable tablet. Available as [services.rmfakecloud](#opt-services.rmfakecloud.enable).
+
+- [rootless Docker](https://docs.docker.com/engine/security/rootless/), a `systemd --user` Docker service which runs without root permissions. Available as [virtualisation.docker.rootless.enable](#opt-virtualisation.docker.rootless.enable).
+
+- [rstudio-server](https://www.rstudio.com/products/rstudio/#rstudio-server), a browser-based version of the RStudio IDE for the R programming language. Available as [services.rstudio-server](#opt-services.rstudio-server.enable).
+
+- [mediamtx](https://github.com/aler9/mediamtx), ready-to-use RTSP / RTMP / HLS server and proxy that allows to read, publish and proxy video and audio streams. Available as [services.mediamtx](#opt-services.mediamtx.enable).
+
+- [Snipe-IT](https://snipeitapp.com), a free open source IT asset/license management system. Available as [services.snipe-it](#opt-services.snipe-it.enable).
+
+- [snowflake-proxy](https://snowflake.torproject.org/), a system to defeat internet censorship. Available as [services.snowflake-proxy](#opt-services.snowflake-proxy.enable).
+
+- [sslmate-agent](https://sslmate.com/), a daemon for managing SSL/TLS certificates on a server. Available as [services.sslmate-agent](services.sslmate-agent.enable).
+
+- [starship](https://starship.rs), a minimal, blazing-fast, and infinitely customizable prompt for any shell. Available at [programs.startship](#opt-programs.starship.enable).
+
+- [systembus-notify](https://github.com/rfjakob/systembus-notify), allow system level notifications to reach the users. Available as [services.systembus-notify](opt-services.systembus-notify.enable). Please keep in mind that this service should only be enabled on machines with fully trusted users, as any local user is able to DoS user sessions by spamming notifications.
+
+- [teleport](https://goteleport.com), allows engineers and security professionals to unify access for SSH servers, Kubernetes clusters, web applications, and databases across all environments. Available at [services.teleport](#opt-services.teleport.enable).
+
+- [tetrd](https://tetrd.app), share your internet connection from your device to your PC and vice versa through a USB cable. Available at [services.tetrd](#opt-services.tetrd.enable).
+
+- [uptermd](https://upterm.dev), an open-source solution for sharing terminal sessions instantly over the public internet via secure tunnels. Available at [services.uptermd](#opt-services.uptermd.enable).
+
+- [usbrelayd](https://github.com/darrylb123/usbrelay), an USB Relay MQTT daemon. Available as [services.usbrelayd](#opt-services.usbrelayd.enable).
+
+- [webdav-server-rs](https://github.com/miquels/webdav-server-rs), Webdav server in rust. Available as [services.webdav-server-rs](#opt-services.webdav-server-rs.enable).
+
+- [wg-netmanager](https://github.com/gin66/wg_netmanager), the Wireguard network manager. Available as [services.wg-netmanager](#opt-services.wg-netmanager.enable).
+
+- [Zammad](https://zammad.org/), a web-based, open source user support/ticketing solution. Available as [services.zammad](#opt-services.zammad.enable).
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+## Backward Incompatibilities {#sec-release-22.05-incompatibilities}
+
+- `pkgs.ghc` now refers to `pkgs.targetPackages.haskellPackages.ghc`.
+  This _only_ makes a difference if you are cross-compiling and will
+  ensure that `pkgs.ghc` always runs on the host platform and compiles
+  for the target platform (similar to `pkgs.gcc` for example).
+  `haskellPackages.ghc` still behaves as before, running on the build
+  platform and compiling for the host platform (similar to `stdenv.cc`).
+  This means you don't have to adjust your derivations if you use
+  `haskellPackages.callPackage`, but when using `pkgs.callPackage` and
+  taking `ghc` as an input, you should now use `buildPackages.ghc`
+  instead to ensure cross compilation keeps working (or switch to
+  `haskellPackages.callPackage`).
+
+- `pkgs.ghc.withPackages` as well as `haskellPackages.ghcWithPackages` etc.
+  now needs be overridden directly, as opposed to overriding the result of
+  calling it. Additionally, the `withLLVM` parameter has been renamed to
+  `useLLVM`. So instead of `(ghc.withPackages (p: [])).override { withLLVM = true; }`,
+  one needs to use `(ghc.withPackages.override { useLLVM = true; }) (p: [])`.
+
+- The update of the haskell package set brings with it a new version of the `xmonad`
+  module, which will break your configuration if you use `launch` as entrypoint. The
+  example code the corresponding nixos module was adjusted, you may want to have a look at it.
+
+- The `home-assistant` module now requires users that don't want their
+  configuration to be managed declaratively to set
+  `services.home-assistant.config = null;`. This is required
+  due to the way default settings are handled with the new settings style.
+
+  Additionally the default list of `extraComponents` now includes the minimal
+  dependencies to successfully complete the [onboarding](https://www.home-assistant.io/getting-started/onboarding/)
+  procedure.
+
+- `pkgs.emacsPackages.orgPackages` is removed because org elpa is deprecated.
+  The packages in the top level of `pkgs.emacsPackages`, such as org and
+  org-contrib, refer to the ones in `pkgs.emacsPackages.elpaPackages` and
+  `pkgs.emacsPackages.nongnuPackages` where the new versions will release.
+
+- The configuration and state directories used by `nixos-containers` have been
+  moved from `/etc/containers` and `/var/lib/containers` to
+  `/etc/nixos-containers` and `/var/lib/nixos-containers`.
+
+  If you are changing `system.stateVersion` to `"22.05"` manually on an existing
+  system you are responsible for migrating these directories yourself.
+
+  This is to improve compatibility with `libcontainer` based software such as Podman and Skopeo
+  which assumes they have ownership over `/etc/containers`.
+
+- `lib.systems.supported` has been removed, as it was overengineered for determining the systems to support in the nixpkgs flake. The list of systems exposed by the nixpkgs flake can now be accessed as `lib.systems.flakeExposed`.
+
+- For new installations `virtualisation.oci-containers.backend` is now set to `podman` by default.
+  If you still want to use Docker on systems where `system.stateVersion` is set to to `"22.05"` set `virtualisation.oci-containers.backend = "docker";`.Old systems with older `stateVersion`s stay with "docker".
+
+- `security.klogd` was removed.  Logging of kernel messages is handled
+  by systemd since Linux 3.5.
+
+- `pkgs.ssmtp` has been dropped due to the program being unmaintained.
+  `pkgs.msmtp` can be used instead as a substitute `sendmail` implementation.
+  The corresponding options `services.ssmtp.*` have been removed as well.
+  `programs.msmtp.*` can be used instead for an equivalent setup. For example:
+
+  ```nix
+  {
+    # Original ssmtp configuration:
+    services.ssmtp = {
+      enable = true;
+      useTLS = true;
+      useSTARTTLS = true;
+      hostName = "smtp.example:587";
+      authUser = "someone";
+      authPassFile = "/secrets/password.txt";
+    };
+
+    # Equivalent msmtp configuration:
+    programs.msmtp = {
+      enable = true;
+      accounts.default = {
+        tls = true;
+        tls_starttls = true;
+        auth = true;
+        host = "smtp.example";
+        port = 587;
+        user = "someone";
+        passwordeval = "cat /secrets/password.txt";
+      };
+    };
+  }
+  ```
+
+- `services.kubernetes.addons.dashboard` was removed due to it being an outdated version.
+
+- `services.kubernetes.scheduler.{port,address}` now set `--secure-port` and `--bind-address` instead of `--port` and `--address`, since the former have been deprecated and are no longer functional in kubernetes>=1.23. Ensure that you are not relying on the insecure behaviour before upgrading.
+
+- In the PowerDNS Recursor module (`services.pdns-recursor`), default values of several IP address-related NixOS options have been updated to match the default upstream behavior.
+  In particular, Recursor by default will:
+    - listen on (and allows connections from) both IPv4 and IPv6 addresses
+      (`services.pdns-recursor.dns.address`, `services.pdns-recursor.dns.allowFrom`);
+    - allow only local connections to the REST API server (`services.pdns-recursor.api.allowFrom`).
+
+- In the ncdns module, the default value of `services.ncdns.address` has been changed to the IPv6 loopback address (`::1`).
+
+- `openldap` (and therefore the slapd LDAP server) were updated to version 2.6.2. The project introduced backwards-incompatible changes, namely the removal of the bdb, hdb, ndb, and shell backends in slapd. Therefore before updating, dump your database `slapcat -n 1` in LDIF format, and reimport it after updating your `services.openldap.settings`, which represents your `cn=config`.
+
+  Additionally with 2.5 the argon2 module was included in the standard distribution and renamed from `pw-argon2` to `argon2`. Remember to update your `olcModuleLoad` entry in `cn=config`.
+
+- `openssh` has been update to 8.9p1, changing the FIDO security key middleware interface.
+
+- `git` no longer hardcodes the path to openssh' ssh binary to reduce the amount of rebuilds. If you are using git with ssh remotes and do not have a ssh binary in your environment consider adding `openssh` to it or switching to `gitFull`.
+
+- `services.k3s.enable` no longer implies `systemd.enableUnifiedCgroupHierarchy = false`, and will default to the 'systemd' cgroup driver when using `services.k3s.docker = true`.
+  This change may require a reboot to take effect, and k3s may not be able to run if the boot cgroup hierarchy does not match its configuration.
+  The previous behavior may be retained by explicitly setting `systemd.enableUnifiedCgroupHierarchy = false` in your configuration.
+
+- `fonts.fonts` no longer includes ancient bitmap fonts when both `config.services.xserver.enable` and `config.nixpkgs.config.allowUnfree` are enabled.
+  If you still want these fonts, use:
+
+  ```nix
+  {
+    fonts.fonts = [
+      pkgs.xorg.fontbhlucidatypewriter100dpi
+      pkgs.xorg.fontbhlucidatypewriter75dpi
+      pkgs.xorg.fontbh100dpi
+    ];
+  }
+  ```
+
+- `services.prometheus.alertManagerTimeout` has been removed as it has been deprecated upstream and has no effect.
+
+- The DHCP server (`services.dhcpd4`, `services.dhcpd6`) has been hardened.
+  The service is now using the systemd's `DynamicUser` mechanism to run as an unprivileged dynamically-allocated user with limited capabilities.
+  The dhcpd state files are now always stored in `/var/lib/dhcpd{4,6}` and the `services.dhcpd4.stateDir` and `service.dhcpd6.stateDir` options have been removed.
+  If you were depending on root privileges or set{uid,gid,cap} binaries in dhcpd shell hooks, you may give dhcpd more capabilities with e.g. `systemd.services.dhcpd6.serviceConfig.AmbientCapabilities`.
+
+- The `mailpile` email webclient (`services.mailpile`) has been removed due to its reliance on python2.
+
+- `services.ipfs.extraFlags` is now escaped with `utils.escapeSystemdExecArgs`. If you rely on systemd interpolating `extraFlags` in the service `ExecStart`, this will no longer work.
+
+- `hbase` version 0.98.24 has been removed. The package now defaults to version 2.4.11. Versions 1.7.1 and 3.0.0-alpha-2 are also available.
+
+- `services.paperless-ng` was renamed to `services.paperless`. Accordingly, the `paperless-ng-manage` script (located in `dataDir`) was renamed to `paperless-manage`. `services.paperless` now uses `paperless-ngx`.
+
+- The `matrix-synapse` service (`services.matrix-synapse`) has been converted to use the `settings` option defined in RFC42.
+  This means that options that are part of your `homeserver.yaml` configuration, and that were specified at the top-level of the
+  module (`services.matrix-synapse`) now need to be moved into `services.matrix-synapse.settings`. And while not all options you
+  may use are defined in there, they are still supported, because you can set arbitrary values in this freeform type.
+
+  The `listeners.*.bind_address` option was renamed to `bind_addresses` in order to match the upstream `homeserver.yaml` option
+  name. It is now also a list of strings instead of a string.
+
+  An example to make the required migration clearer:
+
+  Before:
+  ```nix
+  {
+    services.matrix-synapse = {
+      enable = true;
+
+      server_name = "example.com";
+      public_baseurl = "https://example.com:8448";
+
+      enable_registration = false;
+      registration_shared_secret = "xohshaeyui8jic7uutuDogahkee3aehuaf6ei3Xouz4iicie5thie6nohNahceut";
+      macaroon_secret_key = "xoo8eder9seivukaiPh1cheikohquuw8Yooreid0The4aifahth3Ou0aiShaiz4l";
+
+      tls_certificate_path = "/var/lib/acme/example.com/fullchain.pem";
+      tls_certificate_path = "/var/lib/acme/example.com/fullchain.pem";
+
+      listeners = [ {
+        port = 8448;
+        bind_address = "";
+        type = "http";
+        tls = true;
+        resources = [ {
+          names = [ "client" ];
+          compress = true;
+        } {
+          names = [ "federation" ];
+          compress = false;
+        } ];
+      } ];
+
+    };
+  }
+  ```
+
+  After:
+  ```nix
+  {
+    services.matrix-synapse = {
+      enable = true;
+
+      # this attribute set holds all values that go into your homeserver.yaml configuration
+      # See https://github.com/matrix-org/synapse/blob/develop/docs/sample_config.yaml for
+      # possible values.
+      settings = {
+        server_name = "example.com";
+        public_baseurl = "https://example.com:8448";
+
+        enable_registration = false;
+        # pass `registration_shared_secret` and `macaroon_secret_key` via `extraConfigFiles` instead
+
+        tls_certificate_path = "/var/lib/acme/example.com/fullchain.pem";
+        tls_certificate_path = "/var/lib/acme/example.com/fullchain.pem";
+
+        listeners = [ {
+          port = 8448;
+          bind_addresses = [
+            "::"
+            "0.0.0.0"
+          ];
+          type = "http";
+          tls = true;
+          resources = [ {
+            names = [ "client" ];
+            compress = true;
+          } {
+            names = [ "federation" ];
+            compress = false;
+          } ];
+        } ];
+      };
+
+      extraConfigFiles = [
+        "/run/keys/matrix-synapse/secrets.yaml"
+      ];
+    };
+  }
+  ```
+
+  The secrets in your original config should be migrated into a YAML file that is included via `extraConfigFiles`. The filename must be quoted to prevent nix from copying it to the (world readable) store.
+
+  Additionally a few option defaults have been synced up with upstream default values, for example the `max_upload_size` grew from `10M` to `50M`. For the same reason, the default
+  `media_store_path` was changed from `${dataDir}/media` to `${dataDir}/media_store` if `system.stateVersion` is at least `22.05`. Files will need to be manually moved to the new
+  location if the `stateVersion` is updated.
+
+  As of Synapse 1.58.0, the old groups/communities feature has been disabled by default. It will be completely removed with Synapse 1.61.0.
+
+- The Keycloak package (`pkgs.keycloak`) has been switched from the
+  Wildfly version, which will soon be deprecated, to the Quarkus based
+  version. The Keycloak service (`services.keycloak`) has been updated
+  to accommodate the change and now differs from the previous version
+  in a few ways:
+
+  - `services.keycloak.extraConfig` has been removed in favor of the
+    new [settings-style](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md)
+    [`services.keycloak.settings`](#opt-services.keycloak.settings)
+    option. The available options correspond directly to parameters in
+    `conf/keycloak.conf`. Some of the most important parameters are
+    documented as suboptions, the rest can be found in the [All
+    configuration section of the Keycloak Server Installation and
+    Configuration
+    Guide](https://www.keycloak.org/server/all-config). While the new
+    configuration is much simpler and cleaner than the old JBoss CLI
+    one, this unfortunately mean that there's no straightforward way
+    to convert an old configuration to the new format and some
+    settings may not even be available anymore.
+
+  - `services.keycloak.frontendUrl` was removed and the frontend URL
+    is now configured through the `hostname` family of settings in
+    [`services.keycloak.settings`](#opt-services.keycloak.settings)
+    instead. See the [Hostname section of the Keycloak Server
+    Installation and Configuration
+    Guide](https://www.keycloak.org/server/hostname) for more
+    details. Additionally, `/auth` was removed from the default
+    context path and needs to be added back in
+    [`services.keycloak.settings.http-relative-path`](#opt-services.keycloak.settings.http-relative-path)
+    if you want to keep compatibility with your current clients.
+
+  - `services.keycloak.bindAddress`,
+    `services.keycloak.forceBackendUrlToFrontendUrl`,
+    `services.keycloak.httpPort` and `services.keycloak.httpsPort`
+    have been removed in favor of their equivalent options in
+    [`services.keycloak.settings`](#opt-services.keycloak.settings). `httpPort`
+    and `httpsPort` have additionally had their types changed from
+    `str` to `port`.
+
+    The new names are as follows:
+    - `bindAddress`: [`services.keycloak.settings.http-host`](#opt-services.keycloak.settings.http-host)
+    - `forceBackendUrlToFrontendUrl`: [`services.keycloak.settings.hostname-strict-backchannel`](#opt-services.keycloak.settings.hostname-strict-backchannel)
+    - `httpPort`: [`services.keycloak.settings.http-port`](#opt-services.keycloak.settings.http-port)
+    - `httpsPort`: [`services.keycloak.settings.https-port`](#opt-services.keycloak.settings.https-port)
+
+  For example, when using a reverse proxy the migration could look
+  like this:
+
+  Before:
+  ```nix
+    services.keycloak = {
+      enable = true;
+      httpPort = "8080";
+      frontendUrl = "https://keycloak.example.com/auth";
+      database.passwordFile = "/run/keys/db_password";
+      extraConfig = {
+        "subsystem=undertow"."server=default-server"."http-listener=default".proxy-address-forwarding = true;
+      };
+    };
+  ```
+
+  After:
+  ```nix
+    services.keycloak = {
+      enable = true;
+      settings = {
+        http-port = 8080;
+        hostname = "keycloak.example.com";
+        http-relative-path = "/auth";
+        proxy = "edge";
+      };
+      database.passwordFile = "/run/keys/db_password";
+    };
+  ```
+
+- The MoinMoin wiki engine (`services.moinmoin`) has been removed, because Python 2 is being retired from nixpkgs.
+
+- Services in the `hadoop` module previously set `openFirewall` to true by default.
+  This has now been changed to false. Node definitions for multi-node clusters would need
+  `openFirewall = true;` to be added to to hadoop services when upgrading from NixOS 21.11.
+
+- `services.hadoop.yarn.nodemanager` now uses cgroup-based CPU limit enforcement by default.
+  Additionally, the option `useCGroups` was added to nodemanagers as an easy way to switch
+  back to the old behavior.
+
+- The `wafHook` hook now honors `NIX_BUILD_CORES` when `enableParallelBuilding` is not set explicitly. Packages can restore the old behaviour by setting `enableParallelBuilding=false`.
+
+- `pkgs.claws-mail-gtk2`, representing Claws Mail's older release version three, was removed in order to get rid of Python 2.
+  Please switch to `claws-mail`, which is Claws Mail's latest release based on GTK+3 and Python 3.
+
+- The `writers.writePython2` and corresponding `writers.writePython2Bin` convenience functions to create executable Python 2 scripts in the store were removed in preparation of removal of the Python 2 interpreter.
+  Scripts have to be converted to Python 3 for use with `writers.writePython3` or `writers.writePyPy2` needs to be used.
+
+- `buildGoModule` was updated to use `go_1_17`, third party derivations that specify >= go 1.17 in the main `go.mod` will need to regenerate their `vendorSha256` hash.
+
+- The `gnome-passwordsafe` package updated to [version 6.x](https://gitlab.gnome.org/World/secrets/-/tags/6.0) and renamed to `gnome-secrets`.
+
+- `services.gnome.experimental-features.realtime-scheduling` option has been removed, as GNOME Shell now [uses rtkit](https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/2060). Use `security.rtkit.enable = true;` instead. As before, you will need to have it enabled using GSettings.
+
+- `services.telepathy` will no longer be enabled by default for GNOME desktops, one should enable it in their configs if using Empathy or Polari.
+
+- If you previously used `/etc/docker/daemon.json`, you need to incorporate the changes into the new option `virtualisation.docker.daemon.settings`.
+
+- Ntopng (`services.ntopng`) is updated to 5.2.1 and uses a separate Redis instance if `system.stateVersion` is at least `22.05`. Existing setups shouldn't be affected.
+
+- The backward compatibility in `services.wordpress` to configure sites with
+  the old interface has been removed. Please use `services.wordpress.sites`
+  instead.
+
+- The backward compatibility in `services.dokuwiki` to configure sites with the
+  old interface has been removed. Please use `services.dokuwiki.sites` instead.
+
+- opensmtpd-extras is no longer build with python2 scripting support due to python2 deprecation in nixpkgs
+
+- `services.miniflux.adminCredentialFiles` is now required, instead of defaulting to `admin` and `password`.
+
+- The `taskserver` module no longer implicitly opens ports in the firewall
+  configuration. This is now controlled through the option
+  `services.taskserver.openFirewall`.
+
+- The `autorestic` package has been upgraded from 1.3.0 to 1.5.0 which introduces breaking changes in config file, check [their migration guide](https://autorestic.vercel.app/migration/1.4_1.5) for more details.
+
+- `teleport` has been upgraded to major version 9. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and [release notes](https://goteleport.com/docs/changelog/#900).
+
+- For `pkgs.python3.pkgs.ipython`, its direct dependency `pkgs.python3.pkgs.matplotlib-inline`
+  (which is really an adapter to integrate matplotlib in ipython if it is installed) does
+  not depend on `pkgs.python3.pkgs.matplotlib` anymore.
+  This is closer to a non-Nix install of ipython.
+  This has the added benefit to reduce the closure size of `ipython` from ~400MB to ~160MB
+  (including ~100MB for python itself).
+
+- `documentation.man` has been refactored to support choosing a man implementation other than GNU's `man-db`. For this, `documentation.man.manualPages` has been renamed to `documentation.man.man-db.manualPages`. If you want to use the new alternative man implementation `mandoc`, add `documentation.man = { enable = true; man-db.enable = false; mandoc.enable = true; }` to your configuration.
+
+- Normal users (with `isNormalUser = true`) which have non-empty `subUidRanges` or `subGidRanges` set no longer have additional implicit ranges allocated. To enable automatic allocation back set `autoSubUidGidRange = true`.
+
+- `idris2` now requires `--package` when using packages `contrib` and `network`, while previously these idris2 packages were automatically loaded.
+
+- The iputils package, which is installed by default, no longer provides the
+  legacy tools `tftpd` and `traceroute6`. More tools (`ninfod`, `rarpd`, and
+  `rdisc`) are going to be removed in the next release. See
+  [upstream's release notes](https://github.com/iputils/iputils/releases/tag/20211215)
+  for more details and available replacements.
+
+- `services.thelounge.private` was removed in favor of `services.thelounge.public`, to follow with upstream changes.
+
+- `pkgs.docbookrx` was removed since it's unmaintained
+
+- `pkgs._7zz` is now correctly licensed as LGPL3+ and BSD3 with optional unfree unRAR licensed code
+
+- The `vim.customize` function produced by `vimUtils.makeCustomizable` now has a slightly different interface:
+  * The wrapper now includes everything in the given Vim derivation if `name` is `"vim"` (the default). This makes the `wrapManual` argument obsolete, but this behavior can be overridden by setting the `standalone` argument.
+  * All the executables present in the given derivation (or, in `standalone` mode, only the `*vim` ones) are wrapped. This makes the `wrapGui` argument obsolete.
+  * The `vimExecutableName` and `gvimExecutableName` arguments were replaced by a single `executableName` argument in which the shell variable `$exe` can be used to refer to the wrapped executable's name.
+
+  See the comments in `pkgs/applications/editors/vim/plugins/vim-utils.nix` for more details.
+
+  `vimUtils.vimWithRC` was removed. You should instead use `customize` on a Vim derivation, which now accepts `vimrcFile` and `gvimrcFile` arguments.
+
+- `tilp2` was removed together with its module
+
+- The F-PROT antivirus (`fprot` package) and its service module were removed because it
+  reached [end-of-life](https://kb.cyren.com/av-support/index.php?/Knowledgebase/Article/View/434/0/end-of-sale--end-of-life-for-f-prot-and-csam).
+
+- `bird1` and its modules `services.bird` as well as `services.bird6` have been removed. Upgrade to `services.bird2`.
+
+- The options `networking.interfaces.<name>.ipv4.routes` and `networking.interfaces.<name>.ipv6.routes` are no longer ignored when using networkd instead of the default scripted network backend by setting `networking.useNetworkd` to `true`.
+
+- The `miller` package has been upgraded from 5.10.3 to [6.2.0](https://github.com/johnkerl/miller/releases/tag/v6.2.0). See [What's new in Miller 6](https://miller.readthedocs.io/en/latest/new-in-miller-6).
+
+- MultiMC has been replaced with the fork PrismLauncher due to upstream
+  developers being hostile to 3rd party package maintainers. PrismLauncher
+  removes all MultiMC branding and is aimed at providing proper 3rd party
+  packages like the one contained in Nixpkgs. This change affects the data
+  folder where game instances and other save and configuration files are stored.
+  Users with existing installations should rename `~/.local/share/multimc` to
+  `~/.local/share/PrismLauncher`. The main config file's path has also moved
+  from `~/.local/share/multimc/multimc.cfg` to
+  `~/.local/share/PrismLauncher/prismlauncher.cfg`.
+
+- `systemd-nspawn@.service` settings have been reverted to the default systemd behaviour. User namespaces are now activated by default. If you want to keep running nspawn containers without user namespaces you need to set `systemd.nspawn.<name>.execConfig.PrivateUsers = false`
+
+- `systemd-shutdown` is now properly linked on shutdown to unmount all filesystems and device mapper devices cleanly. This can be disabled using `systemd.shutdownRamfs.enable`.
+
+- The Tor SOCKS proxy is now actually disabled if `services.tor.client.enable` is set to `false` (the default). If you are using this functionality but didn't change the setting or set it to `false`, you now need to set it to `true`.
+
+- `services.github-runner` has been hardened.  Notably address families and
+  system calls have been restricted, which may adversely affect some kinds of
+  testing, e.g. using `AF_BLUETOOTH` to test bluetooth devices.
+
+- The terraform 0.12 compatibility has been removed and the `terraform.withPlugins` and `terraform-providers.mkProvider` implementations simplified. Providers now need to be stored under
+`$out/libexec/terraform-providers/<registry>/<owner>/<name>/<version>/<os>_<arch>/terraform-provider-<name>_v<version>` (which mkProvider does).
+
+  This breaks back-compat so it's not possible to mix-and-match with previous versions of nixpkgs. In exchange, it now becomes possible to use the providers from [nixpkgs-terraform-providers-bin](https://github.com/numtide/nixpkgs-terraform-providers-bin) directly.
+
+- The `dendrite` package has been upgraded from 0.5.1 to
+  [0.6.5](https://github.com/matrix-org/dendrite/releases/tag/v0.6.5). Instances
+  configured with split sqlite databases, which has been the default
+  in NixOS, require merging of the federation sender and signing key
+  databases. See upstream [release
+  notes](https://github.com/matrix-org/dendrite/releases/tag/v0.6.0)
+  on version 0.6.0 for details on database changes.
+
+- The existing `pkgs.opentelemetry-collector` has been moved to
+  `pkgs.opentelemetry-collector-contrib` to match the actual source being the
+  "contrib" edition. `pkgs.opentelemetry-collector` is now the actual core
+  release of opentelemetry-collector. If you use the community contributions
+  you should change the package you refer to. If you don't need them update your
+  commands from `otelcontribcol` to `otelcorecol` and enjoy a 7x smaller binary.
+
+- `services.zookeeper` has a new option `jre` for specifying the JRE to start
+  zookeeper with. It defaults to the JRE that `pkgs.zookeeper` was wrapped with,
+  instead of `pkgs.jre`. This changes the JRE to `pkgs.jdk11_headless` by default.
+
+- `pkgs.pgadmin` now refers to `pkgs.pgadmin4`. `pgadmin3` has been removed.
+
+- `pkgs.minetestclient_4` and `pkgs.minetestserver_4` have been removed, as the last 4.x release was in 2018. `pkgs.minetestclient` (equivalent to `pkgs.minetest` ) and `pkgs.minetestserver` can be used instead.
+
+- `pkgs.noto-fonts-cjk` is now deprecated in favor of `pkgs.noto-fonts-cjk-sans`
+  and `pkgs.noto-fonts-cjk-serif` because they each have different release
+  schedules. To maintain compatibility with prior releases of Nixpkgs,
+  `pkgs.noto-fonts-cjk` is currently an alias of `pkgs.noto-fonts-cjk-sans` and
+  doesn't include serif fonts.
+
+- `pkgs.epgstation` has been upgraded from v1 to v2, resulting in incompatible
+  changes in the database scheme and configuration format.
+
+- Some top-level settings under [services.epgstation](#opt-services.epgstation.enable)
+  is now deprecated because it was redundant due to the same options being
+  present in [services.epgstation.settings](#opt-services.epgstation.settings).
+
+- The option `services.epgstation.basicAuth` was removed because basic
+  authentication support was dropped by upstream.
+
+- The option [services.epgstation.database.passwordFile](#opt-services.epgstation.database.passwordFile)
+  no longer has a default value. Make sure to set this option explicitly before
+  upgrading. Change the database password if necessary.
+
+- The [services.epgstation.settings](#opt-services.epgstation.settings)
+  option now expects options for `config.yml` in EPGStation v2.
+
+- Existing data for the [services.epgstation](#opt-services.epgstation.enable)
+  module would have to be backed up prior to the upgrade. To back up existing
+  data to `/tmp/epgstation.bak`, run
+  `sudo -u epgstation epgstation run backup /tmp/epgstation.bak`.
+  To import that data after to the upgrade, run
+  `sudo -u epgstation epgstation run v1migrate /tmp/epgstation.bak`
+
+- `switch-to-configuration` (the script that is run when running `nixos-rebuild switch` for example) has been reworked
+    * The interface that allows activation scripts to restart units has been streamlined. Restarting and reloading is now done by a single file `/run/nixos/activation-restart-list` that honors `restartIfChanged` and `reloadIfChanged` of the units.
+        * Preferring to reload instead of restarting can still be achieved using `/run/nixos/activation-reload-list`.
+    * The script now uses a proper ini-file parser to parse systemd units. Some values are now only searched in one section instead of in the entire unit. This is only relevant for units that don't use the NixOS systemd moule.
+        * `RefuseManualStop`, `X-OnlyManualStart`, `X-StopOnRemoval`, `X-StopOnReconfiguration` are only searched in the `[Unit]` section
+        * `X-ReloadIfChanged`, `X-RestartIfChanged`, `X-StopIfChanged` are only searched in the `[Service]` section
+
+- The `services.bookstack.cacheDir` option has been removed, since the
+  cache directory is now handled by systemd.
+
+- The `services.bookstack.extraConfig` option has been replaced by
+  `services.bookstack.config` which implements a
+  [settings-style](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md)
+  configuration.
+
+- `lib.assertMsg` and `lib.assertOneOf` no longer return `false` if the passed condition is `false`, `throw`ing the given error message instead (which makes the resulting error message less cluttered). This will not impact the behaviour of code using these functions as intended, namely as top-level wrapper for `assert` conditions.
+
+- The `vpnc` package has been changed to use GnuTLS instead of OpenSSL by default for licensing reasons.
+
+- The default version of `nextcloud` is **nextcloud24**. Please note that it's **not** possible to upgrade
+  `nextcloud` across multiple major versions! This means it's e.g. not possible to upgrade from `nextcloud22`
+  to `nextcloud24` in a single deploy and most `21.11` users will have to upgrade to `nextcloud23` first.
+
+- `pkgs.vimPlugins.onedark-nvim` now refers to [navarasu/onedark.nvim](https://github.com/navarasu/onedark.nvim)
+  (formerly refers to [olimorris/onedarkpro.nvim](https://github.com/olimorris/onedarkpro.nvim)).
+
+- `services.pipewire.enable` will default to enabling the WirePlumber session manager instead of pipewire-media-session.
+  pipewire-media-session is deprecated by upstream and not recommended, but can still be manually enabled by setting
+  `services.pipewire.media-session.enable` to `true` and `services.pipewire.wireplumber.enable` to `false`.
+
+- `pkgs.makeDesktopItem` has been refactored to provide a more idiomatic API. Specifically:
+  - All valid options as of FDO Desktop Entry specification version 1.4 can now be passed in as explicit arguments
+  - `exec` can now be null, for entries that are not of type Application
+  - `mimeType` argument is renamed to `mimeTypes` for consistency
+  - `mimeTypes`, `categories`, `implements`, `keywords`, `onlyShowIn` and `notShowIn` take lists of strings instead of one string with semicolon separators
+  - `extraDesktopEntries` renamed to `extraConfig` for consistency
+  - Actions should now be provided as an attrset `actions`, the `Actions` line will be autogenerated.
+  - `extraEntries` is removed.
+  - Additional validation is added both at eval time and at build time.
+
+  See the `vscode` package for a more detailed example.
+
+- Existing `resholve*` functions have been renamed and nested under `pkgs.resholve`. Update uses to:
+  - `resholvePackage` -> `resholve.mkDerivation`
+  - `resholveScript` -> `resholve.writeScript`
+  - `resholveScriptBin` -> `resholve.writeScriptBin`
+
+- `pkgs.cosmopolitan` no longer provides the `cosmoc` command. It has been moved to `pkgs.cosmoc`.
+
+- `pkgs.graalvmXX-ce` packages no longer provide support for Python/Ruby/WASM, instead focusing only in Java and Native Image Support. If you need to add support back, please see the `pkgs.graalvmCEPackages.mkGraal` function to create your own customized version of GraalVM with support for what you need.
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+## Other Notable Changes {#sec-release-22.05-notable-changes}
+
+- The option [services.redis.servers](#opt-services.redis.servers) was added
+  to support per-application `redis-server` which is more secure since Redis databases
+  are only mere key prefixes without any configuration or ACL of their own.
+  Backward-compatibility is preserved by mapping old `services.redis.settings`
+  to `services.redis.servers."".settings`, but you are strongly encouraged
+  to name each `redis-server` instance after the application using it,
+  instead of keeping that nameless one.
+  Except for the nameless `services.redis.servers.""`
+  still accessible at `127.0.0.1:6379`,
+  and to the members of the Unix group `redis`
+  through the Unix socket `/run/redis/redis.sock`,
+  all other `services.redis.servers.${serverName}`
+  are only accessible by default
+  to the members of the Unix group `redis-${serverName}`
+  through the Unix socket `/run/redis-${serverName}/redis.sock`.
+
+- The option [virtualisation.vmVariant](#opt-virtualisation.vmVariant) was added
+  to allow users to make changes to the `nixos-rebuild build-vm` configuration
+  that do not apply to their normal system.
+
+  The `config.system.build.vm` attribute now always exists and defaults to the
+  value from `vmVariant`. Configurations that import the `virtualisation/qemu-vm.nix`
+  module themselves will override this value, such that `vmVariant` is not used.
+
+  Similarly [virtualisation.vmVariantWithBootloader](#opt-virtualisation.vmVariantWithBootLoader) was added.
+
+- The configuration portion of the `nix-daemon` module has been reworked and exposed as [nix.settings](options.html#opt-nix-settings):
+  * Legacy options have been mapped to the corresponding options under under [nix.settings](options.html#opt-nix.settings) and will be deprecated when NixOS 21.11 reaches end of life.
+  * [nix.buildMachines.publicHostKey](options.html#opt-nix.buildMachines.publicHostKey) has been added.
+
+- [`kops`](https://kops.sigs.k8s.io) defaults to 1.23.2, which will enable [Instance Metadata Service Version 2](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configuring-instance-metadata-service.html) and require tokens on new clusters with Kubernetes >= 1.22. This will increase security by default, but may break some types of workloads. The default behaviour for `spec.kubeDNS.nodeLocalDNS.forwardToKubeDNS` has changed from `true` to `false`. Cilium now has `disable-cnp-status-updates: true` by default. Set this to false if you rely on the CiliumNetworkPolicy status fields. Support for Kubernetes 1.17, the Lyft CNI, Weave CNI on Kubernetes >= 1.23, CentOS 7 and 8, Debian 9, RHEL 7, and Ubuntu 16.05 (Xenial) has been removed. See the [1.22 release notes](https://kops.sigs.k8s.io/releases/1.22-notes/) and [1.23 release notes](https://kops.sigs.k8s.io/releases/1.23-notes/) for more details, including other significant changes.
+
+- Mattermost has been upgraded to extended support version 6.3 as the previously
+  packaged extended support version 5.37 is [reaching end of life](https://docs.mattermost.com/upgrade/extended-support-release.html).
+  Migration may take some time, see the [changelog](https://docs.mattermost.com/install/self-managed-changelog.html#release-v6-3-extended-support-release)
+  and [important upgrade notes](https://docs.mattermost.com/upgrade/important-upgrade-notes.html).
+
+- The `writers.writePyPy2`/`writers.writePyPy3` and corresponding `writers.writePyPy2Bin`/`writers.writePyPy3Bin` convenience functions to create executable Python 2/3 scripts using the PyPy interpreter were added.
+
+- Some improvements have been made to the `hadoop` module:
+  - A `gatewayRole` option has been added, for deploying hadoop cluster configuration files to a node that does not have any active services
+  - Support for older versions of hadoop have been added to the module
+  - Overriding and extending site XML files has been made easier
+
+- The auto-upgrade service now accepts persistent (default: true) parameter.
+  By default auto-upgrade will now run immediately if it would have been triggered at least
+  once during the time when the timer was inactive.
+
+- Mastodon now uses `services.redis.servers` to start a new redis server, instead of using a global redis server.
+  This improves compatibility with other services that use redis.
+
+  Note that this will recreate the redis database, although according to the [Mastodon docs](https://docs.joinmastodon.org/admin/backups/),
+  this is almost harmless:
+  > Losing the Redis database is almost harmless: The only irrecoverable data will be the contents of the Sidekiq queues and scheduled retries of previously failed jobs.
+  >  The home and list feeds are stored in Redis, but can be regenerated with tootctl.
+
+  If you do want to save the redis database, you can use the following commands:
+  ```bash
+  redis-cli save
+  cp /var/lib/redis/dump.rdb "/var/lib/redis-mastodon/dump.rdb"
+  ```
+- Peertube now uses services.redis.servers to start a new redis server, instead of using a global redis server.
+  This improves compatibility with other services that use redis.
+
+  Redis database is used for storage only cache and job queue. More information can be found here - [Peertube architecture](https://docs.joinpeertube.org/contribute-architecture).
+
+  If you do want to save the redis database, you can use the following commands before upgrade OS:
+  ```bash
+  redis-cli save
+  sudo mkdir /var/lib/redis-peertube
+  sudo cp /var/lib/redis/dump.rdb /var/lib/redis-peertube/dump.rdb
+  ```
+- Added the `keter` NixOS module. Keter reverse proxies requests to your loaded application based on virtual hostnames.
+
+- If you are using Wayland you can choose to use the Ozone Wayland support
+  in Chrome and several Electron apps by setting the environment variable
+  `NIXOS_OZONE_WL=1` (for example via
+  `environment.sessionVariables.NIXOS_OZONE_WL = "1"`).
+  This is not enabled by default because Ozone Wayland is
+  still under heavy development and behavior is not always flawless.
+  Furthermore, not all Electron apps use the latest Electron versions.
+
+- A new option group `systemd.network.wait-online` was added, with options to configure `systemd-networkd-wait-online.service`:
+  - `anyInterface` allows specifying that the network should be considered online when *at least one* interface is online (useful on laptops)
+  - `timeout` defines how long to wait for the network to come online
+  - `extraArgs` for everything else
+
+- The `influxdb2` package was split into `influxdb2-server` and
+  `influxdb2-cli`, matching the split that took place upstream. A
+  combined `influxdb2` package is still provided in this release for
+  backwards compatibility, but will be removed at a later date.
+
+- The `unifi` package was switched from `unifi6` to `unifi7`.
+  Direct downgrades from Unifi 7 to Unifi 6 are not possible and require restoring from a backup made by Unifi 6.
+
+- `programs.zsh.autosuggestions.strategy` now takes a list of strings instead of a string.
+
+- The `asterisk` and `asterisk-stable` packages were switched from `asterisk_18` to the newly-packaged `asterisk_19`. Asterisk 13 and 17 have been removed as they have reached their end of life.
+
+- The `services.unifi.openPorts` option default value of `true` is now deprecated and will be changed to `false` in 22.11.
+  Configurations using this default will print a warning when rebuilt.
+
+- The `services.unifi-video.openPorts` option default value of `true` is now deprecated and will be changed to `false` in 22.11.
+  Configurations using this default will print a warning when rebuilt.
+
+- `security.acme` certificates will now correctly check for CA
+  revokation before reaching their minimum age.
+
+- Removing domains from `security.acme.certs._name_.extraDomainNames`
+  will now correctly remove those domains during rebuild/renew.
+
+- MariaDB is now offered in several versions, not just the newest one.
+  So if you have a need for running MariaDB 10.4 for example, you can now just set `services.mysql.package = pkgs.mariadb_104;`.
+  In general, it is recommended to run the newest version, to get the newest features, while sticking with an LTS version will most likely provide a more stable experience.
+  Sometimes software is also incompatible with the newest version of MariaDB.
+
+- The option
+  [programs.ssh.enableAskPassword](#opt-programs.ssh.enableAskPassword) was
+  added, decoupling the setting of `SSH_ASKPASS` from
+  `services.xserver.enable`. This allows easy usage in non-X11 environments,
+  e.g. Wayland.
+
+- [programs.ssh.knownHosts](#opt-programs.ssh.knownHosts) has gained an `extraHostNames`
+  option to augment `hostNames`. It is now possible to use the attribute name of a `knownHosts`
+  entry as the primary host name and specify secondary host names using `extraHostNames` without
+  having to duplicate the primary host name.
+
+- The `services.stubby` module was converted to a [settings-style](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) configuration.
+
+- The option
+  [services.xserver.desktopManager.runXdgAutostartIfNone](#opt-services.xserver.desktopManager.runXdgAutostartIfNone)
+  was added in order to automatically run XDG autostart files for sessions without a desktop manager.
+  This replaces helpers like the `dex` package.
+
+- When setting [i18n.inputMethod.enabled](#opt-i18n.inputMethod.enabled) to `fcitx5`,
+  it no longer creates corresponding systemd user services.
+  It now relies on XDG autostart files to start and work properly in your desktop sessions.
+  If you are using only a window manager without a desktop manager, you need to enable
+  `services.xserver.desktopManager.runXdgAutostartIfNone` or using the `dex` package to make `fcitx5` work.
+
+
+- The option `services.duplicati.dataDir` has been added to allow changing the location of duplicati's files.
+
+- The options `boot.extraModprobeConfig` and `boot.blacklistedKernelModules` now also take effect in the initrd by copying the file `/etc/modprobe.d/nixos.conf` into the initrd.
+
+- `nixos-generate-config` now puts the dhcp configuration in `hardware-configuration.nix` instead of `configuration.nix`.
+
+- ORY Kratos was updated to version 0.9.0-alpha.3, which introduces some breaking changes:
+  - All endpoints at the Admin API are now exposed at `/admin/`. For example, endpoint `https://kratos:4434/identities` is now exposed at `https://kratos:4434/admin/identities`
+  - Configuration key `selfservice.whitelisted_return_urls` has been renamed to `allowed_return_urls`
+  - The `password_identifier` form field of the password login strategy has been renamed to `identifier` to make compatibility with passwordless flows possible.
+  - Instead of having a global `default_schema_url` which developers used to update their schema, you now need to define the `default_schema_id` which must reference schema ID in your config.
+  - Calling `/self-service/recovery` without flow ID or with an invalid flow ID while authenticated will now respond with an error instead of redirecting to the default page.
+  - If you are relying on the SQLite images, update your Docker Pull commands as follows:
+    - `docker pull oryd/kratos:{version}`
+  - Additionally, all passwords now have to be at least 8 characters long.
+  - For more details, see:
+    - [Release Notes for v0.8.1-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.1-alpha.1)
+    - [Release Notes for v0.8.2-alpha-1](https://github.com/ory/kratos/releases/tag/v0.8.2-alpha.1)
+    - [Release Notes for v0.9.0-alpha-1](https://github.com/ory/kratos/releases/tag/v0.9.0-alpha.1)
+    - [Release Notes for v0.9.0-alpha-3](https://github.com/ory/kratos/releases/tag/v0.9.0-alpha.3)
+
+
+- `fetchFromSourcehut` now allows fetching repositories recursively
+  using `fetchgit` or `fetchhg` if the argument `fetchSubmodules`
+  is set to `true`.
+
+- A module for declarative configuration of openconnect VPN profiles was added under `networking.openconnect`.
+
+- The `element-desktop` package now has an `useKeytar` option (defaults to `true`),
+  which allows disabling `keytar` and in turn `libsecret` usage
+  (which binds to native credential managers / keychain libraries).
+
+- The option `services.thelounge.plugins` has been added to allow installing plugins for The Lounge. Plugins can be found in `pkgs.theLoungePlugins.plugins` and `pkgs.theLoungePlugins.themes`.
+
+- The option `services.xserver.videoDriver = [ "nvidia" ];` will now also install [nvidia VA-API drivers](https://github.com/elFarto/nvidia-vaapi-driver) by default.
+
+- The `firmwareLinuxNonfree` package has been renamed to `linux-firmware`.
+
+- It is now possible to specify wordlists to include as handy to access environment variables using the `config.environment.wordlist` configuration options.
+
+- The `services.mbpfan` module was converted to a [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) configuration.
+
+- The default value for `programs.spacefm.settings.graphical_su` got unset. It previously pointed to `gksu` which has been removed.
+
+- The [Dino](https://dino.im) XMPP client was updated to 0.3, adding support for audio and video calls.
+
+- `services.mattermost.plugins` has been added to allow the declarative installation of Mattermost plugins.
+  Plugins are automatically repackaged using autoPatchelf.
+
+- [services.logrotate.enable](#opt-services.logrotate.enable) now defaults to true if any rotate path has
+  been defined, and some paths have been added by default.
+- The logrotate module also has been updated to freeform syntax: `services.logrotate.paths`
+  and `services.logrotate.extraConfig` will work, but issue deprecation
+  warnings and [services.logrotate.settings](#opt-services.logrotate.settings) should now be used instead.
+
+- `security.pam.ussh` has been added, which allows authorizing PAM sessions based on SSH _certificates_ held within an SSH agent, using [pam-ussh](https://github.com/uber/pam-ussh).
+
+- The `vscode-extensions.ionide.ionide-fsharp` package has been updated to 6.0.0 and now requires .NET 6.0.
+
+- The `phpPackages.box` package has been updated from 2.7.5 to 3.16.0. See the [upgrade guide](https://github.com/box-project/box/blob/master/UPGRADE.md#from-27-to-30) for more details.
+
+- The `zrepl` package has been updated from 0.4.0 to 0.5:
+
+  - The RPC protocol version was bumped; all zrepl daemons in a setup must be updated and restarted before replication can resume.
+  - A bug involving encrypt-on-receive has been fixed. Read the [zrepl documentation](https://zrepl.github.io/configuration/sendrecvoptions.html#job-recv-options-placeholder) and check the output of `zfs get -r encryption,zrepl:placeholder PATH_TO_ROOTFS` on the receiver.
+
+- The `polybar` package has been updated from 3.5.7 to 3.6.2. See [the changelog](https://github.com/polybar/polybar/releases/tag/3.6.0) for more details.
+  - Breaking changes include changes to escaping rules in configuration values, changes in behavior when encountering invalid tag names, and changes to inter-process-messaging (IPC).
+
+- Renamed option `services.openssh.challengeResponseAuthentication` to `services.openssh.kbdInteractiveAuthentication`.
+  Reason is that the old name has been deprecated upstream.
+  Using the old option name will still work, but produce a warning.
+
+- `services.autorandr` now allows for adding hooks and profiles declaratively.
+
+- The `pomerium-cli` command has been moved out of the `pomerium` package into
+  the `pomerium-cli` package, following upstream's repository split. If you are
+  using the `pomerium-cli` command, you should now install the `pomerium-cli`
+  package.
+
+- The option `services.networking.networkmanager.enableFccUnlock`
+  was added to support FCC unlock procedures. Since release 1.18.4, the ModemManager
+  daemon no longer automatically performs the FCC unlock procedure by default. See
+  [the docs](https://modemmanager.org/docs/modemmanager/fcc-unlock/) for more details.
+
+- `programs.tmux` has a new option `plugins` that accepts a list of packages from the `tmuxPlugins` group. The specified packages are added to the system and loaded by `tmux`.
+
+- The polkit service, available at `security.polkit.enable`, is now disabled by default. It will automatically be enabled through services and desktop environments as needed.
+
+- `mercury` was updated to 22.01.1, which has some breaking changes ([Mercury 22.01 news](https://dl.mercurylang.org/release/release-notes-22.01.html)).
+
+- xfsprogs was update to version 5.15, which enables inobtcount and bigtime by default on filesystem creation. Support for these features was added in kernel 5.10 and deemed stable in kernel 5.15.
+  If you want to be able to mount XFS filesystems created with this release of xfsprogs on kernel releases older than 5.10, you need to format them with `mkfs.xfs -m bigtime=0 -m inobtcount=0`.
+
+- `services.xserver.desktopManager.xfce` now includes Xfce's screen locker, `xfce4-screensaver` that is enabled by default. You can disable it by setting `false` to [services.xserver.desktopManager.xfce.enableScreensaver](#opt-services.xserver.desktopManager.xfce.enableScreensaver).
+
+- The `hadoop` package has added support for `aarch64-linux` and `aarch64-darwin` as of 3.3.1 ([#158613](https://github.com/NixOS/nixpkgs/pull/158613)).
+
+- The `R` package now builds again on `aarch64-darwin` ([#158992](https://github.com/NixOS/nixpkgs/pull/158992)).
+
+- The `nss` package was split into `nss_esr` and `nss_latest`, with `nss` being an alias for `nss_esr`. This was done to ease maintenance of `nss` and dependent high-profile packages like `firefox`.
+
+- The default `scribus` version is now 1.5, while version 1.4 is still available as `scribus_1_4` ([#172700](https://github.com/NixOS/nixpkgs/pull/172700)).
+
+- The Nextcloud module now supports to create a Mysql database automatically
+  with `services.nextcloud.database.createLocally` enabled.
+
+- The Nextcloud module now allows setting the value of the `max-age` directive of the `Strict-Transport-Security` HTTP header, which is now controlled by the `services.nextcloud.https` option, rather than `services.nginx.recommendedHttpHeaders`.
+
+- The `spark3` package has been updated from 3.1.2 to 3.2.1 ([#160075](https://github.com/NixOS/nixpkgs/pull/160075)):
+
+  - Testing has been enabled for `aarch64-linux` in addition to `x86_64-linux`.
+  - The `spark3` package is now usable on `aarch64-darwin` as a result of [#158613](https://github.com/NixOS/nixpkgs/pull/158613) and [#158992](https://github.com/NixOS/nixpkgs/pull/158992).
+
+- The option `services.snapserver.openFirewall` will no longer default to
+  `true` starting with NixOS 22.11. Enable it explicitly if you need to control
+  Snapserver remotely or connect streamig clients from other hosts.
+
+- The option [networking.useDHCP](options.html#opt-networking.useDHCP) isn't deprecated anymore.
+  When using [`systemd-networkd`](options.html#opt-networking.useNetworkd), a generic
+  `.network`-unit is added which enables DHCP for each interface matching `en*`, `eth*`
+  or `wl*` with priority 99 (which means that it doesn't have any effect if such an interface is matched
+  by a `.network-`unit with a lower priority). In case of scripted networking, no behavior
+  was changed.
+
+- The new [`postgresqlTestHook`](https://nixos.org/manual/nixpkgs/stable/#sec-postgresqlTestHook) runs a PostgreSQL server for the duration of package checks.
+
+- `zfs` was updated from 2.1.4 to 2.1.5, enabling it to be used with Linux kernel 5.18.
+
+- `stdenv.mkDerivation` now supports a self-referencing `finalAttrs:` parameter
+  containing the final `mkDerivation` arguments including overrides.
+  `drv.overrideAttrs` now supports two parameters `finalAttrs: previousAttrs:`.
+  This allows packaging configuration to be overridden in a consistent manner by
+  providing an alternative to `rec {}` syntax.
+
+  Additionally, `passthru` can now reference `finalAttrs.finalPackage` containing
+  the final package, including attributes such as the output paths and
+  `overrideAttrs`.
+
+  New language integrations can be simplified by overriding a "prototype"
+  package containing the language-specific logic. This removes the need for a
+  extra layer of overriding for the "generic builder" arguments, thus removing a
+  usability problem and source of error.
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md
new file mode 100644
index 000000000000..1c73d0c9790d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2211.section.md
@@ -0,0 +1,536 @@
+# Release 22.11 (“Raccoonâ€, 2022.11/30) {#sec-release-22.11}
+
+The NixOS release team is happy to announce a new version of NixOS 22.11. NixOS is a Linux distribution, whose set of packages can also be used on other Linux systems and macOS.
+
+This release is supported until the end of June 2023, handing over to NixOS 23.05.
+
+To upgrade to the latest release follow the [upgrade chapter](#sec-upgrading).
+
+## Highlights {#sec-release-22.11-highlights}
+
+In addition to numerous new and upgraded packages, this release includes the following highlights:
+
+- Software that uses the `crypt` password hashing API is now using the implementation provided by [`libxcrypt`](https://github.com/besser82/libxcrypt) instead of glibc's, which enables support for more secure algorithms.
+  - Support for algorithms that `libxcrypt` [does not consider strong](https://github.com/besser82/libxcrypt/blob/v4.4.28/lib/hashes.conf#L41) are **deprecated** as of this release, and will be removed in NixOS 23.05.
+  - This includes system login passwords. Given this, we **strongly encourage** all users to update their system passwords, as you will be unable to login if password hashes are not migrated by the time their support is removed.
+    - When using `users.users.<name>.hashedPassword` to configure user passwords, run `mkpasswd`, and use the yescrypt hash that is provided as the new value.
+    - On the other hand, for interactively configured user passwords, re-set the passwords for all users with `passwd`.
+    - This release introduces warnings for the use of deprecated hash algorithms for both methods of configuring passwords. To make sure you migrated correctly, run `nixos-rebuild switch`.
+
+- The NixOS documentation is now generated from markdown. While docbook is still part of the documentation build process, it's a big step towards the full migration.
+
+- `aarch64-linux` is now included in the `nixos-22.11` and `nixos-22.11-small` channels. This means that when those channel update, both `x86_64-linux` and `aarch64-linux` will be available in the binary cache.
+
+- `aarch64-linux` ISOs are now available on the [downloads page](https://nixos.org/download.html).
+
+- `nsncd` is now available as a replacement of `nscd`.
+
+  `nscd` is responsible for resolving hostnames, users and more in NixOS and has been a long standing source of bugs, such as sporadic network freezes.
+
+  More context in this [issue](https://github.com/NixOS/nixpkgs/issues/135888).
+
+  Help us test the new implementation by setting `services.nscd.enableNsncd` to `true`.
+
+  We plan to use `nsncd` by default in NixOS 23.05.
+
+- Linode cloud images are now supported by importing `${modulesPath}/virtualisation/linode-image.nix` and accessing `system.build.linodeImage` on the output.
+
+- `hardware.nvidia` has a new option, `hardware.nvidia.open`, that can be used to enable the usage of NVIDIA's open-source kernel driver. Note that the driver's support for GeForce and Workstation GPUs is still alpha quality, see [the release announcement](https://developer.nvidia.com/blog/nvidia-releases-open-source-gpu-kernel-modules/) for more information.
+
+- The `emacs` package now makes use of native compilation which means:
+  - Emacs packages from Nixpkgs, builtin or not, will do native compilation ahead of time so you can enjoy the benefit of native compilation without compiling them on you machine;
+  - Emacs packages from somewhere else, e.g. `package-install`, will perform asynchronously deferred native compilation. If you do not want this, maybe to avoid CPU consumption for compilation, you can use `(setq native-comp-deferred-compilation nil)` to disable it while still benefiting from native compilation for packages from Nixpkgs.
+
+## Internal changes {#sec-release-22.11-internal}
+
+- Haskell `ghcWithPackages` is now up to 15 times faster to evaluate, thanks to changing `lib.closePropagation` from a quadratic to linear complexity. Please see backward incompatibilities notes below. <https://github.com/NixOS/nixpkgs/pull/194391>
+
+- For cross-compilation targets that can also run on the building machine, we now run tests. This, for example, is the case for the `pkgsStatic` and `pkgsLLVM` package sets or i686 packages on `x86_64` machines.
+
+- To simplify cross-compilation in NixOS, this release introduces the `nixpkgs.hostPlatform` and `nixpkgs.buildPlatform` options. These cover and override the `nixpkgs.{system,localSystem,crossSystem}` options.
+
+   - `hostPlatform` is the platform or "`system`" string of the NixOS system
+     described by the configuration.
+   - `buildPlatform` is the platform that is responsible for building the NixOS
+     configuration. It defaults to the `hostPlatform`, for a non-cross
+     build configuration. To cross compile, set `buildPlatform` to a different
+     value.
+
+  The new options convey the same information, but with fewer options, and
+  following the Nixpkgs terminology.
+
+  The existing options `nixpkgs.{system,localSystem,crossSystem}` have not
+  been formally deprecated, to allow for evaluation of the change and to allow
+  for a transition period so that in time the ecosystem can switch without
+  breaking compatibility with any supported NixOS release.
+
+## Notable version updates {#sec-release-22.11-version-updates}
+
+- Nix has been upgraded from v2.8.1 to v2.11.0. For more information, please see the release notes for [2.9](https://nixos.org/manual/nix/stable/release-notes/rl-2.9.html), [2.10](https://nixos.org/manual/nix/stable/release-notes/rl-2.10.html) and [2.11](https://nixos.org/manual/nix/stable/release-notes/rl-2.11.html).
+
+- OpenSSL now defaults to OpenSSL 3, updated from 1.1.1.
+
+- GNOME has been upgraded to version 43. Please see the [release notes](https://release.gnome.org/43/) for details.
+
+- KDE Plasma has been upgraded from v5.24 to v5.26. Please see the release notes for [v5.25](https://kde.org/announcements/plasma/5/5.25.0/) and [v5.26](https://kde.org/announcements/plasma/5/5.26.0/) for more details on the included changes.
+
+- Cinnamon has been updated to 5.4, and the Cinnamon module now defaults to
+  Blueman as the Bluetooth manager and slick-greeter as the LightDM greeter, to match upstream.
+
+- PHP now defaults to PHP 8.1, updated from 8.0.
+
+- Perl has been updated to 5.36, and its core module `HTTP::Tiny` was patched to verify SSL/TLS certificates by default.
+
+- Python now defaults to 3.10, updated from 3.9.
+
+## Backward Incompatibilities {#sec-release-22.11-incompatibilities}
+
+- Nixpkgs now requires Nix 2.3 or newer.
+
+- The `isCompatible` predicate checking CPU compatibility is no longer exposed
+  by the platform sets generated using `lib.systems.elaborate`. In most cases
+  you will want to use the new `canExecute` predicate instead which also
+  takes the kernel / syscall interface into account.
+  `lib.systems.parse.isCompatible` still exists, but has changed semantically:
+  Architectures with differing endianness modes are *no longer considered compatible*.
+
+- `ngrok` has been upgraded from 2.3.40 to 3.0.4. Please see [the upgrade guide](https://ngrok.com/docs/guides/upgrade-v2-v3)
+  and [changelog](https://ngrok.com/docs/ngrok-agent/changelog). Notably, breaking changes are that the config file format has
+  changed and support for single hyphen arguments was dropped.
+
+- `i18n.supportedLocales` is now only generated with the locales set in `i18n.defaultLocale` and `i18n.extraLocaleSettings`.
+  - This reduces the final system closure size by up to 200MB.
+  - If you require all locales installed, set the option to ``[ "all" ]``.
+
+- Deprecated settings `logrotate.paths` and `logrotate.extraConfig` have
+  been removed. Please convert any uses to
+  [services.logrotate.settings](#opt-services.logrotate.settings) instead.
+
+- The `isPowerPC` predicate, found on `platform` attrsets (`hostPlatform`, `buildPlatform`, `targetPlatform`, etc) has been removed in order to reduce confusion.  The predicate was was defined such that it matches only the 32-bit big-endian members of the POWER/PowerPC family, despite having a name which would imply a broader set of systems.  If you were using this predicate, you can replace `foo.isPowerPC` with `(with foo; isPower && is32bit && isBigEndian)`.
+
+- The `fetchgit` fetcher now uses [cone mode](https://www.git-scm.com/docs/git-sparse-checkout/2.37.0#_internalscone_mode_handling) by default for sparse checkouts. [Non-cone mode](https://www.git-scm.com/docs/git-sparse-checkout/2.37.0#_internalsnon_cone_problems) can be enabled by passing `nonConeMode = true`, but note that non-cone mode is deprecated and this option may be removed alongside a future Git update without notice.
+
+- The `fetchgit` fetcher supports sparse checkouts via the `sparseCheckout` option. This used to accept a multi-line string with directories/patterns to check out, but now requires a list of strings.
+
+- `openssh` was updated to version 9.1, disabling the generation of DSA keys when using `ssh-keygen -A` as they are insecure. Also, `SetEnv` directives in `ssh_config` and `sshd_config` are now first-match-wins.
+
+- `bsp-layout` no longer uses the command `cycle` to switch to other window layouts, as it got replaced by the commands `previous` and `next`.
+
+- The Barco ClickShare driver/client package `pkgs.clickshare-csc1` and the option `programs.clickshare-csc1.enable` have been removed,
+  as it requires `qt4`, which reached its end-of-life 2015 and will no longer be supported by nixpkgs.
+  [According to Barco](https://www.barco.com/de/support/knowledge-base/4380-can-i-use-linux-os-with-clickshare-base-units) many of their base unit models can be used with Google Chrome and the Google Cast extension.
+
+- `services.hbase` has been renamed to `services.hbase-standalone`.
+  For production HBase clusters, use `services.hadoop.hbase` instead.
+
+- The `p4` package now only includes the open-source Perforce Helix Core command-line client and APIs. It no longer installs the unfree Helix Core Server binaries `p4d`, `p4broker`, and `p4p`. To install the Helix Core Server binaries, use the `p4d` package instead.
+
+- The OpenSSL extension for the PHP interpreter used by Nextcloud is built against OpenSSL 1.1 if
+  [](#opt-system.stateVersion) is below `22.11`. This is to make sure that people using [server-side encryption](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html)
+  don't lose access to their files.
+
+  In any other case, it's safe to use OpenSSL 3 for PHP's OpenSSL extension. This can be done by setting
+  `services.nextcloud.enableBrokenCiphersForSSE` to `false`.
+
+- The `coq` package and versioned variants starting at `coq_8_14` no
+  longer include CoqIDE, which is now available through
+  `coqPackages.coqide`. It is still possible to get CoqIDE as part of
+  the `coq` package by overriding the `buildIde` argument of the
+  derivation.
+
+- PHP 7.4 is no longer supported due to upstream not supporting this
+  version for the entire lifecycle of the 22.11 release.
+
+- The ipfs package and module were renamed to kubo. The kubo module now uses an RFC42-style `settings` option instead of `extraConfig` and the `gatewayAddress`, `apiAddress` and `swarmAddress` options were renamed. Using the old names will print a warning but still work.
+
+- `pkgs.cosign` does not provide the `cosigned` binary anymore. The `sget` binary has been moved into its own package.
+
+- Emacs now uses the Lucid toolkit by default instead of GTK because of stability and compatibility issues.
+  Users who still wish to remain using GTK can do so by using `emacs-gtk`.
+
+- `kanidm` has been updated to 1.1.0-alpha.10 and now requires a TLS certificate and key. It will always start `https` and-–-if enabled-–-an LDAPS server and no HTTP and LDAP server anymore.
+
+- riak package removed along with `services.riak` module, due to lack of maintainer to update the package.
+
+- ppd files in `pkgs.cups-drv-rastertosag-gdi` are now gzipped.  If you refer to such a ppd file with its path (e.g. via [hardware.printers.ensurePrinters](options.html#opt-hardware.printers.ensurePrinters)) you will need to append `.gz` to the path.
+
+- xow package removed along with the `hardware.xow` module, due to the project being deprecated in favor of `xone`,  which is available via the `hardware.xone` module.
+
+- dd-agent package removed along with the `services.dd-agent` module, due to the project being deprecated in favor of `datadog-agent`,  which is available via the `services.datadog-agent` module.
+
+- `teleport` has been upgraded to major version 10. Please see upstream [upgrade instructions](https://goteleport.com/docs/ver/10.0/management/operations/upgrading/) and [release notes](https://goteleport.com/docs/ver/10.0/changelog/#1000).
+
+- `lib.closePropagation` now needs that all gathered sets have an `outPath` attribute.
+
+- lemmy module option `services.lemmy.settings.database.createLocally`
+  moved to `services.lemmy.database.createLocally`.
+
+- virtlyst package and `services.virtlyst` module removed, due to lack of maintainers.
+
+- The `nix.checkConfig` option now fully disables the config check. The new `nix.checkAllErrors` option behaves like `nix.checkConfig`  previously did.
+
+- `generateOptparseApplicativeCompletions` and `generateOptparseApplicativeCompletion` from `haskell.lib.compose`
+  (and `haskell.lib`) have been deprecated in favor of `generateOptparseApplicativeCompletions` (plural!) as
+  provided by the haskell package sets (so `haskellPackages.generateOptparseApplicativeCompletions` etc.).
+  The latter allows for cross-compilation (by automatically disabling generation of completion in the cross case).
+  For it to work properly you need to make sure that the function comes from the same context as the package
+  you are trying to override, i.e. always use the same package set as your package is coming from or – even
+  better – use `self.generateOptparseApplicativeCompletions` if you are overriding a haskell package set.
+  The old functions are retained for backwards compatibility, but yield are warning.
+
+- The `services.graphite.api` and `services.graphite.beacon` NixOS options, and
+  the `python3.pkgs.graphite_api`, `python3.pkgs.graphite_beacon` and
+  `python3.pkgs.influxgraph` packages, have been removed due to lack of upstream
+  maintenance.
+
+- The `trace` binary from `perf-linux` package has been removed, due to being a duplicate of the `perf` binary.
+
+- The `aws` package has been removed due to being abandoned by the upstream. It is recommended to use `awscli` or `awscli2` instead.
+
+- The [CEmu TI-84 Plus CE emulator](https://ce-programming.github.io/CEmu) package has been renamed to `cemu-ti`. The [Cemu Wii U emulator](https://cemu.info) is now packaged as `cemu`.
+
+- `systemd-networkd` v250 deprecated, renamed, and moved some sections and settings which leads to the following breaking module changes:
+
+   * `systemd.network.networks.<name>.dhcpV6PrefixDelegationConfig` is renamed to `systemd.network.networks.<name>.dhcpPrefixDelegationConfig`.
+   * `systemd.network.networks.<name>.dhcpV6Config` no longer accepts the `ForceDHCPv6PDOtherInformation=` setting. Please use the `WithoutRA=` and `UseDelegatedPrefix=` settings in your `systemd.network.networks.<name>.dhcpV6Config` and the `DHCPv6Client=` setting in your `systemd.network.networks.<name>.ipv6AcceptRAConfig` to control when the DHCPv6 client is started and how the delegated prefixes are handled by the DHCPv6 client.
+   * `systemd.network.networks.<name>.networkConfig` no longer accepts the `IPv6Token=` setting. Use the `Token=` setting in your `systemd.network.networks.<name>.ipv6AcceptRAConfig` instead. The `systemd.network.networks.<name>.ipv6Prefixes.*.ipv6PrefixConfig` now also accepts the `Token=` setting.
+
+- `arangodb` versions 3.3, 3.4, and 3.5 have been removed because they are at EOL upstream. The default is now 3.10.0. Support for aarch64-linux has been removed since the target cannot be built reproducibly. By default `arangodb` is now built for the `haswell` architecture. If you wish to build for a different architecture, you may override the `targetArchitecture` argument with a value from [this list supported upstream](https://github.com/arangodb/arangodb/blob/207ec6937e41a46e10aea34953879341f0606841/cmake/OptimizeForArchitecture.cmake#L594). Some architecture specific optimizations are also conditionally enabled. You may alter this behavior by overriding the `asmOptimizations` parameter. You may also add additional architecture support by adding more `-DHAS_XYZ` flags to `cmakeFlags` via `overrideAttrs`.
+
+- The `meta.mainProgram` attribute of packages in `wineWowPackages` now defaults to `"wine64"`.
+
+- The `paperless` module now defaults `PAPERLESS_TIME_ZONE` to your configured system timezone.
+
+- The top-level `termonad-with-packages` alias for `termonad` has been removed.
+
+- Linux 4.9 has been removed because it will reach its end of life within the lifespan of 22.11.
+
+- (Neo)Vim can not be configured with `configure.pathogen` anymore to reduce maintenance burden.
+  Use `configure.packages` instead.
+- Neovim can not be configured with plug anymore (still works for vim).
+
+- The `adguardhome` module no longer uses `host` and `port` options, use `settings.bind_host` and `settings.bind_port` instead.
+
+- The default `kops` version is now 1.25.1 and support for 1.22 and older has been dropped.
+
+- The `zrepl` package has been updated from 0.5.0 to 0.6.0. See the [changelog](https://zrepl.github.io/changelog.html) for details.
+
+- `k3s` no longer supports Docker as runtime due to upstream dropping support.
+
+- `cassandra_2_1` and `cassandra_2_2` have been removed. Please update to `cassandra_3_11` or `cassandra_3_0`. See the [changelog](https://github.com/apache/cassandra/blob/cassandra-3.11.14/NEWS.txt) for more information about the upgrade process.
+
+- `mysql57` has been removed. Please update to `mysql80` or `mariadb`. See the [upgrade guide](https://mariadb.com/kb/en/upgrading-from-mysql-to-mariadb/) for more information.
+
+- Consequently, `cqrlog` and `amorok` now use `mariadb` instead of `mysql57` for their embedded databases. Running `mysql_upgrade` may be necessary.
+- `k3s` supports `clusterInit` option, and it is enabled by default, for servers.
+
+- `percona-server56` has been removed. Please migrate to `mysql` or `mariadb` if possible.
+
+- `obs-studio` hase been updated to version 28. If you have packaged custom plugins, check if they are compatible. `obs-websocket` has been integrated into `obs-studio`.
+
+- `signald` has been bumped to `0.23.0`. For the upgrade, a migration process is necessary. It can be
+  done by running a command like this before starting `signald.service`:
+
+  ```
+  signald -d /var/lib/signald/db \
+    --database sqlite:/var/lib/signald/db \
+    --migrate-data
+  ```
+
+  For further information, please read the upstream changelogs.
+
+- `stylua` no longer accepts `lua52Support` and `luauSupport` overrides. Use `features` instead, which defaults to `[ "lua54" "luau" ]`.
+
+- `ocamlPackages.ocaml_extlib` has been renamed to `ocamlPackages.extlib`.
+
+- `pkgs.fetchNextcloudApp` has been rewritten to circumvent impurities in e.g. tarballs from GitHub and to make it easier to
+  apply patches. This means that your hashes are out-of-date and the (previously required) attributes `name` and `version`
+  are no longer accepted.
+
+- The Syncthing service now only allows absolute paths---starting with `/` or
+  `~/`---for `services.syncthing.folders.<name>.path`.
+  In a future release other paths will be allowed again and interpreted
+  relative to `services.syncthing.dataDir`.
+
+- `services.github-runner` and `services.github-runners.<name>` gained the option `serviceOverrides` which allows overriding the systemd `serviceConfig`. If you have been overriding the systemd service configuration (i.e., by defining `systemd.services.github-runner.serviceConfig`), you have to use the `serviceOverrides` option now. Example:
+
+  ```
+  services.github-runner.serviceOverrides.SupplementaryGroups = [
+    "docker"
+  ];
+  ```
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+## Other Notable Changes {#sec-release-22.11-notable-changes}
+
+- PHP is now built in `NTS` (Non-Thread Safe) mode by default.
+  - For Apache and `mod_php` usage, we enable `ZTS` (Zend Thread Safe) mode. This has been a
+  common practice for a long time in other distributions.
+
+- `firefox`, `thunderbird` and `librewolf` now come with Wayland support by default. The `firefox-wayland`, `firefox-esr-wayland`, `thunderbird-wayland` and `librewolf-wayland` attributes are obsolete and have been aliased to their generic attribute.
+
+- The `xplr` package has been updated from 0.18.0 to 0.19.0, which brings some breaking changes. See the [upstream release notes](https://github.com/sayanarijit/xplr/releases/tag/v0.19.0) for more details.
+
+- Configuring multiple GitHub runners is now possible through `services.github-runners.<name>`. The options under `services.github-runner` remain, to configure a single runner.
+
+- `github-runner` gained support for ephemeral runners and registrations using a personal access token (PAT) instead of a registration token. See `services.github-runner.ephemeral` and `services.github-runner.tokenFile` for details.
+
+- A new module was added to provide hardware support for the Saleae Logic device family, providing the options `hardware.saleae-logic.enable` and `hardware.saleae-logic.package`.
+
+- ZFS module will no longer allow hibernation by default.
+  - This is a safety measure to prevent data loss cases like the ones described at [OpenZFS/260](https://github.com/openzfs/zfs/issues/260) and [OpenZFS/12842](https://github.com/openzfs/zfs/issues/12842).
+  - Use the `boot.zfs.allowHibernation` option to configure this behaviour.
+
+- Mastodon now automatically removes remote media attachments older than 30 days. This is configurable through `services.mastodon.mediaAutoRemove`.
+
+- The Redis module now disables RDB persistence when `services.redis.servers.<name>.save = []` instead of using the Redis default.
+
+- Neo4j was updated from version 3 to version 4. See upstream's [migration guide](https://neo4j.com/docs/upgrade-migration-guide/current/) for information on how to migrate your instance.
+
+- The `networking.wireguard` module now can set the mtu on interfaces and tag its packets with an fwmark.
+
+- The option `overrideStrategy` was added to the different systemd unit options (`systemd.services.<name>`, `systemd.sockets.<name>`, …) to allow enforcing the creation of a dropin file, rather than the main unit file, by setting it to `asDropin`.
+  This is useful in cases where the existence of the main unit file is not known to Nix at evaluation time, for example when the main unit file is provided by adding a package to `systemd.packages`.
+  See the fix proposed in [NixOS's systemd abstraction doesn't work with systemd template units](https://github.com/NixOS/nixpkgs/issues/135557#issuecomment-1295392470) for an example.
+
+- The `polymc` package has been removed due to a rogue maintainer. It has been
+  replaced by `prismlauncher`, a fork by the rest of the maintainers. For more
+  details, see [the PR that made this change](https://github.com/NixOS/nixpkgs/pull/196624) and
+  [the issue detailing the vulnerability](https://github.com/NixOS/nixpkgs/issues/196460).
+  Users with existing installations should rename `~/.local/share/polymc` to
+  `~/.local/share/PrismLauncher`. The main config file's path has also moved
+  from `~/.local/share/polymc/polymc.cfg` to
+  `~/.local/share/PrismLauncher/prismlauncher.cfg`.
+
+- The `bloat` package has been updated from unstable-2022-03-31 to unstable-2022-10-25, which brings a breaking change. See [this upstream commit message](https://git.freesoftwareextremist.com/bloat/commit/?id=887ed241d64ba5db3fd3d87194fb5595e5ad7d73) for details.
+
+- Synapse's systemd unit has been hardened.
+
+- The module `services.grafana` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
+  - The newly introduced option [](#opt-services.grafana.settings) is an attribute-set that
+    will be converted into Grafana's INI format. This means that the configuration from
+    [Grafana's configuration reference](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/)
+    can be directly written as attribute-set in Nix within this option.
+  - The option `services.grafana.extraOptions` has been removed. This option was an association
+    of environment variables for Grafana. If you had an expression like
+
+    ```nix
+    {
+      services.grafana.extraOptions.SECURITY_ADMIN_USER = "foobar";
+    }
+    ```
+
+    your Grafana instance was running with `GF_SECURITY_ADMIN_USER=foobar` in its environment.
+
+    For the migration, it is recommended to turn it into the INI format, i.e.
+    to declare
+
+    ```nix
+    {
+      services.grafana.settings.security.admin_user = "foobar";
+    }
+    ```
+
+    instead.
+
+    The keys in `services.grafana.extraOptions` have the format `<INI section name>_<Key Name>`.
+    Further details are outlined in the [configuration reference](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#override-configuration-with-environment-variables).
+
+    Alternatively you can also set all your values from `extraOptions` to
+    `systemd.services.grafana.environment`, make sure you don't forget to add
+    the `GF_` prefix though!
+  - Previously, the options [services.grafana.provision.datasources](#opt-services.grafana.provision.datasources) and
+    [services.grafana.provision.dashboards](#opt-services.grafana.provision.dashboards) expected lists of datasources
+    or dashboards for the [declarative provisioning](https://grafana.com/docs/grafana/latest/administration/provisioning/).
+
+    To declare lists of
+    - **datasources**, please rename your declarations to [services.grafana.provision.datasources.settings.datasources](#opt-services.grafana.provision.datasources.settings.datasources).
+    - **dashboards**, please rename your declarations to [services.grafana.provision.dashboards.settings.providers](#opt-services.grafana.provision.dashboards.settings.providers).
+
+    This change was made to support more features for that:
+
+    - It's possible to declare the `apiVersion` of your dashboards and datasources
+      by [services.grafana.provision.datasources.settings.apiVersion](#opt-services.grafana.provision.datasources.settings.apiVersion) (or
+      [services.grafana.provision.dashboards.settings.apiVersion](#opt-services.grafana.provision.dashboards.settings.apiVersion)).
+
+    - Instead of declaring datasources and dashboards in pure Nix, it's also possible
+      to specify configuration files (or directories) with YAML instead using
+      [services.grafana.provision.datasources.path](#opt-services.grafana.provision.datasources.path) (or
+      [services.grafana.provision.dashboards.path](#opt-services.grafana.provision.dashboards.path). This is useful when having
+      provisioning files from non-NixOS Grafana instances that you also want to
+      deploy to NixOS.
+
+      __Note:__ secrets from these files will be leaked into the store unless you use a
+      [**file**-provider or env-var](https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider) for secrets!
+
+    - [services.grafana.provision.notifiers](#opt-services.grafana.provision.notifiers) is not affected by this change because
+      this feature is deprecated by Grafana and will probably be removed in Grafana 10.
+      It's recommended to use `services.grafana.provision.alerting.contactPoints` instead.
+
+- The `services.grafana.provision.alerting` option was added. It includes suboptions for every alerting-related objects (with the exception of `notifiers`), which means it's now possible to configure modern Grafana alerting declaratively.
+
+- Synapse now requires entries in the `state_group_edges` table to be unique, in order to prevent accidentally introducing duplicate information (for example, because a database backup was restored multiple times). If your Synapse database already has duplicate rows in this table, this could fail with an error and require manual remediation.
+
+- The `diamond` package has been update from 0.8.36 to 2.0.15. See the [upstream release notes](https://github.com/bbuchfink/diamond/releases) for more details.
+
+- The `guake` package has been updated from 3.6.3 to 3.9.0, see the [changelog](https://github.com/Guake/guake/releases) for more details.
+
+- The `netlify-cli` package has been updated from 6.13.2 to 12.2.4, see the [changelog](https://github.com/netlify/cli/releases) for more details.
+
+- `dockerTools.buildImage`'s `contents` parameter has been deprecated in favor of `copyToRoot`.
+  Use `copyToRoot = buildEnv { ... };` or similar if you intend to add packages to `/bin`.
+
+- The `proxmox.qemuConf.bios` option was added, it corresponds to `Hardware->BIOS` field in Proxmox web interface. Use `"ovmf"` value to build UEFI image, default value remains `"bios"`. New option `proxmox.partitionTableType` defaults to either `"legacy"` or `"efi"`, depending on the `bios` value. Setting `partitionTableType` to `"hybrid"` results in an image, which supports both methods (`"bios"` and `"ovmf"`), thereby remaining bootable after change to Proxmox `Hardware->BIOS` field.
+
+- memtest86+ was updated from 5.00-coreboot-002 to 6.00-beta2. It is now the upstream version from https://www.memtest.org/, as coreboot's fork is no longer available.
+
+- Option descriptions, examples, and defaults writing in DocBook are now deprecated. Using CommonMark is preferred and will become the default in a future release.
+
+- The `documentation.nixos.options.allowDocBook` option was added to ease the transition to CommonMark option documentation. Setting this option to `false` causes an error for every option included in the manual that uses DocBook documentation; it defaults to `true` to preserve the previous behavior and will be removed once the transition to CommonMark is complete.
+
+- The Redis module now persists each instance's configuration file in the state directory, in order to support some more advanced use cases like Sentinel.
+
+- `protonup` has been aliased to and replaced by `protonup-ng` due to upstream not maintaining it.
+
+- The udisks2 service, available at `services.udisks2.enable`, is now disabled by default. It will automatically be enabled through services and desktop environments as needed.
+  This also means that polkit will now actually be disabled by default. The default for `security.polkit.enable` was already flipped in the previous release, but udisks2 being enabled by default re-enabled it.
+
+- Nextcloud has been updated to version **25**. Additionally the following things have changed
+  for Nextcloud in NixOS:
+  - For Nextcloud **>=24**, the default PHP version is 8.1.
+  - Nextcloud **23** has been removed since it will reach its [end of life in December 2022](https://github.com/nextcloud/server/wiki/Maintenance-and-Release-Schedule/d76576a12a626d53305d480a6065b57cab705d3d).
+  - If `system.stateVersion` is **>=22.11**, Nextcloud 25 will be installed by default. For older versions,
+    Nextcloud 24 will be installed.
+  - Please ensure that you only upgrade one major release at a time! Nextcloud doesn't support
+    upgrades across multiple versions, i.e. an upgrade from **23** to **25** is only possible
+    when upgrading to **24** first.
+
+- systemd-oomd is enabled by default. Depending on which systemd units have
+  `ManagedOOMSwap=kill` or `ManagedOOMMemoryPressure=kill`, systemd-oomd will
+  SIGKILL all the processes under the appropriate descendant cgroups when the
+  configured limits are exceeded. NixOS does currently not configure cgroups
+  with oomd by default, this can be enabled using
+  [systemd.oomd.enableRootSlice](options.html#opt-systemd.oomd.enableRootSlice),
+  [systemd.oomd.enableSystemSlice](options.html#opt-systemd.oomd.enableSystemSlice),
+  and [systemd.oomd.enableUserServices](options.html#opt-systemd.oomd.enableUserServices).
+
+- The `tt-rss` service performs two database migrations when you first use its web UI after upgrade. Consider backing up its database before updating.
+
+- The `pass-secret-service` package now includes systemd units from upstream, so adding it to the NixOS `services.dbus.packages` option will make it start automatically as a systemd user service when an application tries to talk to the libsecret D-Bus API.
+
+- The Wordpress module now has support for installing language packs through a new option, `services.wordpress.sites.<site>.languages`.
+
+- The default package for `services.mullvad-vpn.package` was changed to `pkgs.mullvad`, allowing cross-platform usage of Mullvad. `pkgs.mullvad` only contains the Mullvad CLI tool, so users who rely on the Mullvad GUI will want to change it back to `pkgs.mullvad-vpn`, or add `pkgs.mullvad-vpn` to their environment.
+
+- PowerDNS has been updated from v4.6.2 to v4.7.2. Please be sure to review the [Upgrade Notes](https://doc.powerdns.com/authoritative/upgrading.html#to-4-7-0-or-master) provided by upstream before upgrading. Worth specifically noting is that the new Catalog Zones feature comes with a mandatory schema change for the GSQL database backends, which has to be manually applied.
+
+- There is a new module for the `thunar` program (the Xfce file manager), which depends on the `xfconf` dbus service, and also has a dbus service and a systemd unit. The option `services.xserver.desktopManager.xfce.thunarPlugins` has been renamed to `programs.thunar.plugins`, and may be removed in a future release.
+
+- There is a new module for `xfconf` (the Xfce configuration storage system), which has a dbus service.
+
+- The Mastodon package has been upgraded to v4.0.0. See the [v4.0.0 release notes](https://github.com/mastodon/mastodon/releases/tag/v4.0.0) for a list of changes. On standard setups, no manual migration steps are required. Nevertheless, a database backup is recommended.
+
+- The `nomad` package now defaults to v1.3, which no longer has a downgrade path to v1.2 or older.
+
+- The `nodePackages` package set now defaults to the LTS release in the `nodejs` package again, instead of being pinned to `nodejs-14_x`. Several updates to node2nix have been made for compatibility with newer Node.js and npm versions and a new `postRebuild` hook has been added for packages to perform extra build steps before the npm install step prunes dev dependencies.
+
+- `boot.kernel.sysctl` is defined as a freeformType and adds a custom merge option for `net.core.rmem_max` (taking the highest value defined to avoid conflicts between 2 services trying to set that value).
+
+- The `mame` package does not ship with its tools anymore in the default output. They were moved to a separate `tools` output instead. For convenience, `mame-tools` package was added for those who want to use it.
+
+- A NixOS module for Firefox has been added which allows preferences and [policies](https://github.com/mozilla/policy-templates/blob/master/README.md) to be set. This also allows extensions to be installed via the `ExtensionSettings` policy. The new options are under `programs.firefox`.
+
+- The option `services.picom.experimentalBackends` was removed since it is now the default and the option will cause `picom` to quit instead.
+
+- `haskellPackages.callHackage` is not always invalidated if `all-cabal-hashes` changes, leading to less rebuilds of haskell dependencies.
+
+- `haskellPackages.callHackage` and `haskellPackages.callCabal2nix` (and related functions) no longer keep a reference to the `cabal2nix` call used to generate them. As a result, they will be garbage collected more often.
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+## New Services {#sec-release-22.11-new-services}
+
+- [alps](https://git.sr.ht/~migadu/alps), a simple and extensible webmail. Available as [services.alps](#opt-services.alps.enable).
+
+- [appvm](https://github.com/jollheef/appvm), Nix based app VMs. Available as [virtualisation.appvm](options.html#opt-virtualisation.appvm.enable).
+
+- [AusweisApp2](https://www.ausweisapp.bund.de/), the authentication software for the German ID card. Available as [programs.ausweisapp](#opt-programs.ausweisapp.enable).
+
+- [automatic-timezoned](https://github.com/maxbrunet/automatic-timezoned). a Linux daemon to automatically update the system timezone based on location. Available as [services.automatic-timezoned](#opt-services.automatic-timezoned.enable).
+
+- [Dolibarr](https://www.dolibarr.org/), an enterprise resource planning and customer relationship manager. Enable using [services.dolibarr](#opt-services.dolibarr.enable).
+
+- [dragonflydb](https://dragonflydb.io/), a modern replacement for Redis and Memcached. Available as [services.dragonflydb](#opt-services.dragonflydb.enable).
+
+- [endlessh-go](https://github.com/shizunge/endlessh-go), an SSH tarpit that exposes Prometheus metrics. Available as [services.endlessh-go](#opt-services.endlessh-go.enable).
+
+- [endlessh](https://github.com/skeeto/endlessh), an SSH tarpit. Available as [services.endlessh](#opt-services.endlessh.enable).
+
+- [EVCC](https://evcc.io) is an EV charge controller with PV integration. It supports a multitude of chargers, meters, vehicle APIs and more and ties that together with a well-tested backend and a lightweight web frontend. Available as [services.evcc](#opt-services.evcc.enable).
+
+- [expressvpn](https://www.expressvpn.com), the CLI client for ExpressVPN. Available as [services.expressvpn](#opt-services.expressvpn.enable).
+
+- [FreshRSS](https://freshrss.org/), a free, self-hostable RSS feed aggregator. Available as [services.freshrss](#opt-services.freshrss.enable).
+
+- [Garage](https://garagehq.deuxfleurs.fr/), a simple object storage server for geodistributed deployments, alternative to MinIO. Available as [services.garage](#opt-services.garage.enable).
+
+- [go-autoconfig](https://github.com/L11R/go-autoconfig), IMAP/SMTP autodiscover server. Available as [services.go-autoconfig](#opt-services.go-autoconfig.enable).
+
+- [Grafana Tempo](https://www.grafana.com/oss/tempo/), a distributed tracing store. Available as [services.tempo](#opt-services.tempo.enable).
+
+- [HBase cluster](https://hbase.apache.org/), a distributed, scalable, big data store. Available as [services.hadoop.hbase](options.html#opt-services.hadoop.hbase.enable).
+
+- [infnoise](https://github.com/leetronics/infnoise), a hardware True Random Number Generator dongle. Available as [services.infnoise](options.html#opt-services.infnoise.enable).
+
+- [kanata](https://github.com/jtroo/kanata), a tool to improve keyboard comfort and usability with advanced customization. Available as [services.kanata](options.html#opt-services.kanata.enable).
+
+- [karma](https://github.com/prymitive/karma), an alert dashboard for Prometheus Alertmanager. Available as [services.karma](options.html#opt-services.karma.enable)
+
+- [Komga](https://komga.org/), a free and open source comics/mangas media server. Available as [services.komga](#opt-services.komga.enable).
+
+- [kthxbye](https://github.com/prymitive/kthxbye), an alert acknowledgement management daemon for Prometheus Alertmanager. Available as [services.kthxbye](options.html#opt-services.kthxbye.enable)
+
+- [languagetool](https://languagetool.org/), a multilingual grammar, style, and spell checker. Available as [services.languagetool](options.html#opt-services.languagetool.enable).
+
+- [Listmonk](https://listmonk.app), a self-hosted newsletter manager. Enable using [services.listmonk](options.html#opt-services.listmonk.enable).
+
+- [Mepo](https://mepo.milesalan.com), a fast, simple, hackable OSM map viewer for mobile and desktop Linux. Available as [programs.mepo.enable](#opt-programs.mepo.enable).
+
+- [merecat](https://troglobit.com/projects/merecat/), a small and easy HTTP server based on thttpd. Available as [services.merecat](#opt-services.merecat.enable)
+
+- [netbird](https://netbird.io), a zero configuration VPN. Available as [services.netbird](options.html#opt-services.netbird.enable).
+
+- [ntfy.sh](https://ntfy.sh), a push notification service. Available as [services.ntfy-sh](#opt-services.ntfy-sh.enable)
+
+- [OpenRGB](https://gitlab.com/CalcProgrammer1/OpenRGB/-/tree/master), a FOSS tool for controlling RGB lighting. Available as [services.hardware.openrgb.enable](options.html#opt-services.hardware.openrgb.enable).
+
+- [Outline](https://www.getoutline.com/), a wiki and knowledge base similar to Notion. Available as [services.outline](#opt-services.outline.enable).
+
+- [Patroni](https://github.com/zalando/patroni), a template for PostgreSQL HA with ZooKeeper, etcd or Consul. Available as [services.patroni](options.html#opt-services.patroni.enable).
+
+- [persistent-evdev](https://github.com/aiberia/persistent-evdev), a daemon to add virtual proxy devices that mirror a physical input device but persist even if the underlying hardware is hot-plugged. Available as [services.persistent-evdev](#opt-services.persistent-evdev.enable).
+
+- [Please](https://github.com/edneville/please), a Sudo clone written in Rust. Available as [security.please](#opt-security.please.enable).
+
+- [Prometheus IPMI exporter](https://github.com/prometheus-community/ipmi_exporter), an IPMI exporter for Prometheus. Available as [services.prometheus.exporters.ipmi](#opt-services.prometheus.exporters.ipmi.enable).
+
+- [Sachet](https://github.com/messagebird/sachet/), an SMS alerting tool for the Prometheus Alertmanager. Available as [services.prometheus.sachet](#opt-services.prometheus.sachet.enable).
+
+- [schleuder](https://schleuder.org/), a mailing list manager with PGP support. Enable using [services.schleuder](#opt-services.schleuder.enable).
+
+- [syncstorage-rs](https://github.com/mozilla-services/syncstorage-rs), a self-hostable sync server for Firefox. Available as [services.firefox-syncserver](options.html#opt-services.firefox-syncserver.enable).
+
+- [Tandoor Recipes](https://tandoor.dev), a self-hosted multi-tenant recipe collection. Available as [services.tandoor-recipes](options.html#opt-services.tandoor-recipes.enable).
+
+- [TAYGA](http://www.litech.org/tayga/), an out-of-kernel stateless NAT64 implementation. Available as [services.tayga](#opt-services.tayga.enable).
+
+- [tmate-ssh-server](https://github.com/tmate-io/tmate-ssh-server), server side part of [tmate](https://tmate.io/). Available as [services.tmate-ssh-server](#opt-services.tmate-ssh-server.enable).
+
+- [Uptime Kuma](https://uptime.kuma.pet/), a fancy self-hosted monitoring tool. Available as [services.uptime-kuma](#opt-services.uptime-kuma.enable).
+
+- [WriteFreely](https://writefreely.org), a simple blogging platform with ActivityPub support. Available as [services.writefreely](options.html#opt-services.writefreely.enable).
+
+- [xray](https://github.com/XTLS/Xray-core), a fully compatible v2ray-core replacement. Features XTLS, which when enabled on server and client, brings UDP FullCone NAT to proxy setups. Available as [services.xray](options.html#opt-services.xray.enable).
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md
new file mode 100644
index 000000000000..21c798b3b4a4
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2305.section.md
@@ -0,0 +1,664 @@
+# Release 23.05 (“Stoatâ€, 2023.05/31) {#sec-release-23.05}
+
+The NixOS release team is happy to announce a new version of NixOS. The release is called NixOS 23.05 ("Stoat").
+
+NixOS is a Linux distribution, whose set of packages can also be used on other Linux systems and macOS.
+
+Support is planned until the end of December 2023, handing over to NixOS 23.11.
+
+To upgrade to the latest release, follow the [upgrade chapter](https://nixos.org/manual/nixos/stable/index.html#sec-upgrading).
+
+## Highlights {#sec-release-23.05-highlights}
+
+In addition to numerous new and updated packages, this release has the following highlights:
+
+- The default [Nix](https://github.com/NixOS/nix) version was updated from 2.11 to 2.13. In particular, this includes a [small language alteration](https://github.com/NixOS/nix/issues/8259) in the way floats are represented in `builtins.toJSON`. See the release notes for [2.12](https://nixos.org/manual/nix/stable/release-notes/rl-2.12.html) and [2.13](https://nixos.org/manual/nix/unstable/release-notes/rl-2.13.html) for more information.
+
+- The default [Linux Kernel](https://kernel.org/) was updated from version 5.15 to 6.1, see [Kernelnewbies](https://kernelnewbies.org/Linux_6.1) for what has changed. All Kernels currently shown on [kernel.org](https://kernel.org/) are available.
+
+- [systemd](https://systemd.io) has been updated from v252 to v253, see [the release notes](https://github.com/systemd/systemd/blob/v253/NEWS#L3-L659) for more information on the changes.
+    - Updating with `nixos-rebuild boot` and rebooting is recommended, since in some rare cases the `nixos-rebuild switch` into the new generation on a live system might fail due to missing mount units.
+
+- [glibc](https://www.gnu.org/software/libc/) has been updated from version 2.35 to 2.37, see [the release notes](https://sourceware.org/glibc/wiki/Release/2.37) for what was changed.
+
+- [libxcrypt](https://github.com/besser82/libxcrypt), the library providing the `crypt(3)` password hashing function, is now built without support for algorithms not flagged [`strong`](https://github.com/besser82/libxcrypt/blob/v4.4.33/lib/hashes.conf#L48). This affects the availability of password hashing algorithms used for system login (`login(1)`, `passwd(1)`), but also Apache2 Basic-Auth, Samba, OpenLDAP, Dovecot, and [many other packages](https://sourcegraph.com/search?q=context:global+repo:%5Egithub%5C.com/NixOS/nixpkgs%24+libxcrypt&patternType=standard&sm=1&groupBy=path).
+
+- NixOS now defaults to using [nsncd](https://github.com/twosigma/nsncd), a non-caching reimplementation of nscd in Rust, as its NSS lookup dispatcher. This replaces the buggy and deprecated nscd implementation provided through glibc. When you find problems, you can switch back by disabling it:
+  ```nix
+  services.nscd.enableNsncd = false;
+  ```
+
+- The internal option `boot.bootspec.enable` is now enabled by default because [RFC 0125](https://github.com/NixOS/rfcs/pull/125) was merged. This means you will have a bootspec document called `boot.json` generated for each system and specialisation in the top-level. This is useful to enable advanced boot use cases in NixOS, such as Secure Boot.
+
+- Two changes to `nixos-rebuild` are important to highlight as well.
+    - Support for an extra `--specialisation` option was added that can be used to change specialisation for `switch` and `test` commands.
+    - The `--target-host` and `--build-host` options no longer treat the `localhost` value specially – to build on resp. deploy to a local machine, omit the relevant flag.
+
+- [Python](https://www.python.org) implements [PEP 668](https://peps.python.org/pep-0668/), providing better feedback to users that try to run `pip install` for system-wide or user home installations.
+
+- [Cinnamon](https://github.com/linuxmint/Cinnamon) has been updated to version 5.6, see [the pull request](https://github.com/NixOS/nixpkgs/pull/201328#issue-1449910204) for what was changed.
+
+- [GNOME](https://www.gnome.org) has been updated to version 44, see the [the release notes](https://release.gnome.org/44/) for details.
+
+- [KDE Plasma](https://kde.org/de/plasma-desktop/) has been updated to version 5.27, see [the release notes](https://kde.org/announcements/plasma/5/5.27.0/) for what was changed.
+
+- `openra` was updated to `20230225`. Due to large scope of the update, currently only `openraPackages.engines.release` and `openraPackages.engines.latest` packages are available.
+  If you want to use the old engine versions or mods, they were moved to the `openraPackages_2019` namespace.
+
+## New Services {#sec-release-23.05-new-services}
+
+- [Akkoma](https://akkoma.social), an ActivityPub microblogging server. Available as [services.akkoma](options.html#opt-services.akkoma.enable).
+
+- [alertmanager-irc-relay](https://github.com/google/alertmanager-irc-relay), a Prometheus Alertmanager IRC Relay. Available as [services.prometheus.alertmanagerIrcRelay](options.html#opt-services.prometheus.alertmanagerIrcRelay.enable).
+
+- [alice-lg](github.com/alice-lg/alice-lg), a looking-glass for BGP sessions. Available as [services.alice-lg](#opt-services.alice-lg.enable).
+
+- [atuin](https://github.com/ellie/atuin), a sync server for shell history. Available as [services.atuin](#opt-services.atuin.enable).
+
+- [authelia](https://www.authelia.com/), an open-source authentication and authorization server. Available as [services.authelia](options.html#opt-services.authelia.enable).
+
+- [birdwatcher](github.com/alice-lg/birdwatcher), a small HTTP server meant to provide an API defined by Barry O'Donovan's birds-eye to the BIRD internet routing daemon. Available as [services.birdwatcher](#opt-services.birdwatcher.enable).
+
+- [blesh](https://github.com/akinomyoga/ble.sh), a line editor written in pure bash. Available as [programs.bash.blesh](#opt-programs.bash.blesh.enable).
+
+- [Budgie Desktop](https://github.com/BuddiesOfBudgie/budgie-desktop), a familiar, modern desktop environment. Available as [services.xserver.desktopManager.budgie](options.html#opt-services.xserver.desktopManager.budgie).
+
+- [clash-verge](https://github.com/zzzgydi/clash-verge), a Clash GUI based on tauri. Available as [programs.clash-verge](#opt-programs.clash-verge.enable).
+
+- [Cloudlog](https://www.magicbug.co.uk/cloudlog/), a web-based Amateur Radio logging application. Available as [services.cloudlog](#opt-services.cloudlog.enable).
+
+- [consul-template](https://github.com/hashicorp/consul-template/), a template renderer, notifier, and supervisor for HashiCorp Consul and Vault data. Available as [services.consul-template](#opt-services.consul-template.instances).
+
+- [cups-pdf-to-pdf](https://github.com/alexivkin/CUPS-PDF-to-PDF), a PDF-generating CUPS backend based on [cups-pdf](https://www.cups-pdf.de/). Available as [services.printing.cups-pdf](#opt-services.printing.cups-pdf.enable).
+
+- [Deepin Desktop Environment](https://github.com/linuxdeepin/dde), an elegant, easy to use and reliable desktop environment. Available as [services.xserver.desktopManager.deepin](options.html#opt-services.xserver.desktopManager.deepin).
+
+- [esphome](https://esphome.io), a dashboard to configure ESP8266/ESP32 devices for use with Home Automation systems. Available as [services.esphome](#opt-services.esphome.enable).
+
+- [frigate](https://frigate.video), an open source NVR built around real-time AI object detection. Available as [services.frigate](#opt-services.frigate.enable).
+
+- [fzf](https://github.com/junegunn/fzf), a command line fuzzyfinder. Available as [programs.fzf](#opt-programs.fzf.fuzzyCompletion).
+
+- [gemstash](https://github.com/rubygems/gemstash), a RubyGems.org cache and private gem server. Available as [services.gemstash](#opt-services.gemstash.enable).
+
+- [gitea-actions-runner](https://gitea.com/gitea/act_runner), a CI runner for Gitea/Forgejo Actions. Available as [services.gitea-actions-runner](#opt-services.gitea-actions-runner.instances).
+
+- [evdevremapkeys](https://github.com/philipl/evdevremapkeys), a daemon to remap key events. Available as [services.evdevremapkeys](#opt-services.evdevremapkeys.enable).
+
+- [gmediarender](https://github.com/hzeller/gmrender-resurrect), a simple, headless UPnP/DLNA renderer.  Available as [services.gmediarender](options.html#opt-services.gmediarender.enable).
+
+- [go2rtc](https://github.com/AlexxIT/go2rtc), a camera streaming application with support for RTSP, WebRTC, HomeKit, FFMPEG, RTMP and other protocols. Available as [services.go2rtc](options.html#opt-services.go2rtc.enable).
+
+- [goeland](https://github.com/slurdge/goeland), an alternative to rss2email written in Golang with many filters. Available as [services.goeland](#opt-services.goeland.enable).
+
+- [gonic](https://github.com/sentriz/gonic), a Subsonic music streaming server. Available as [services.gonic](#opt-services.gonic.enable).
+
+- [hardware.ipu6](#opt-hardware.ipu6.enable), drivers for IPU6 based webcams on Intel Tiger Lake and Alder Lake.
+
+- [harmonia](https://github.com/nix-community/harmonia/), a Nix binary cache implemented in Rust using [libnixstore](https://docs.rs/libnixstore/latest/libnixstore/). Available as [services.harmonia](options.html#opt-services.harmonia.enable).
+
+- [hyprland](https://github.com/hyprwm/hyprland), a dynamic tiling Wayland compositor that doesn't sacrifice on its looks. Available as [programs.hyprland](#opt-programs.hyprland.enable).
+
+- [imaginary](https://github.com/h2non/imaginary), a microservice for high-level image processing that Nextcloud can use to generate previews. Available as [services.imaginary](#opt-services.imaginary.enable).
+
+- [ivpn](https://www.ivpn.net/), a secure, private VPN with fast WireGuard connections. Available as [services.ivpn](#opt-services.ivpn.enable).
+
+- [vmalert](https://victoriametrics.com/), an alerting engine for VictoriaMetrics. Available as [services.vmalert](#opt-services.vmalert.enable).
+
+- [jellyseerr](https://github.com/Fallenbagel/jellyseerr), a web-based requests manager for Jellyfin, forked from Overseerr. Available as [services.jellyseerr](#opt-services.jellyseerr.enable).
+
+- [kavita](https://kavitareader.com), a self-hosted digital library. Available as [services.kavita](options.html#opt-services.kavita.enable).
+
+- [keyd](https://github.com/rvaiya/keyd), a key remapping daemon for Linux. Available as [services.keyd](#opt-services.keyd.enable).
+
+- [lldap](https://github.com/lldap/lldap), a lightweight authentication server that provides an opinionated, simplified LDAP interface for authentication. Available as [services.lldap](#opt-services.lldap.enable).
+
+- [minipro](https://gitlab.com/DavidGriffith/minipro/), an open source program for controlling the MiniPRO TL866xx series of chip programmers. Available as [programs.minipro](options.html#opt-programs.minipro.enable).
+
+- [mmsd](https://gitlab.com/kop316/mmsd), a lower level daemon that transmits and receives MMSes. Available as [services.mmsd](#opt-services.mmsd.enable).
+
+- [monica](https://www.monicahq.com), an open source personal CRM. Available as [services.monica](options.html#opt-services.monica.enable).
+
+- [networkd-dispatcher](https://gitlab.com/craftyguy/networkd-dispatcher), a dispatcher service for systemd-networkd connection status changes. Available as [services.networkd-dispatcher](#opt-services.networkd-dispatcher.enable).
+
+- [nimdow](https://github.com/avahe-kellenberger/nimdow), a window manager written in Nim, inspired by dwm. Available as [services.xserver.windowManager.nimdow.enable](options.html#opt-services.xserver.windowManager.nimdow.enable).
+
+- [opensearch](https://opensearch.org), a search server alternative to Elasticsearch. Available as [services.opensearch](options.html#opt-services.opensearch.enable).
+
+- [openvscode-server](https://github.com/gitpod-io/openvscode-server), run VS Code on a remote machine with access through a modern web browser from any device, anywhere. Available as [services.openvscode-server](#opt-services.openvscode-server.enable).
+
+- [peroxide](https://github.com/ljanyst/peroxide), a fork of the official [ProtonMail bridge](https://github.com/ProtonMail/proton-bridge) that aims to be similar to [Hydroxide](https://github.com/emersion/hydroxide). Available as [services.peroxide](#opt-services.peroxide.enable).
+
+- [photoprism](https://photoprism.app/), a AI-powered photos app for the decentralized web. Available as [services.photoprism](options.html#opt-services.photoprism.enable).
+
+- [Pixelfed](https://pixelfed.org/), an Instagram-like ActivityPub server. Available as [services.pixelfed](options.html#opt-services.pixelfed.enable).
+
+- [PufferPanel](https://pufferpanel.com), a game server management panel designed to be easy to use. Available as [services.pufferpanel](#opt-services.pufferpanel.enable).
+
+- [QDMR](https://dm3mat.darc.de/qdmr/), a GUI application and command line tool for programming DMR radios [programs.qdmr](#opt-programs.qdmr.enable).
+
+- [readarr](https://github.com/Readarr/Readarr), book manager and automation (Sonarr for ebooks). Available as [services.readarr](options.html#opt-services.readarr.enable).
+
+- [ReGreet](https://github.com/rharish101/ReGreet), a clean and customizable greeter for greetd. Available as [programs.regreet](#opt-programs.regreet.enable).
+
+- [rshim](https://github.com/Mellanox/rshim-user-space), the user-space rshim driver for the BlueField SoC. Available as [services.rshim](options.html#opt-services.rshim.enable).
+
+- [SFTPGo](https://github.com/drakkan/sftpgo), a fully featured and highly configurable SFTP server with optional HTTP/S, FTP/S and WebDAV support. Available as [services.sftpgo](options.html#opt-services.sftpgo.enable).
+
+- [sharing](https://github.com/parvardegr/sharing), a command-line tool to share directories and files from the CLI to iOS and Android devices without the need of an extra client app. Available as [programs.sharing](#opt-programs.sharing.enable).
+
+- [sniffnet](https://github.com/GyulyVGC/sniffnet), an application to monitor your network traffic. Available as [programs.sniffnet](#opt-programs.sniffnet.enable).
+
+- [stargazer](https://sr.ht/~zethra/stargazer/), a fast and easy to use Gemini server. Available as [services.stargazer](#opt-services.stargazer.enable).
+
+- [stevenblack-blocklist](https://github.com/StevenBlack/hosts), a unified hosts file with base extensions for blocking unwanted websites. Available as [networking.stevenblack](options.html#opt-networking.stevenblack.enable).
+
+- [systemd-repart](https://www.freedesktop.org/software/systemd/man/systemd-repart.service.html), grow and add partitions to a partition table. Available as [systemd.repart](options.html#opt-systemd.repart) and [boot.initrd.systemd.repart](options.html#opt-boot.initrd.systemd.repart)
+
+- [trippy](https://github.com/fujiapple852/trippy), a network diagnostic tool. Available as [programs.trippy](#opt-programs.trippy.enable).
+
+- [tts](https://github.com/coqui-ai/TTS), a battle-tested deep learning toolkit for Text-to-Speech. Multiple servers may be configured below [services.tts.servers](#opt-services.tts.servers).
+
+- [ulogd](https://www.netfilter.org/projects/ulogd/index.html), a userspace logging daemon for netfilter/iptables related logging. Available as [services.ulogd](options.html#opt-services.ulogd.enable).
+
+- [v2rayA](https://v2raya.org), a Linux web GUI client of Project V which supports V2Ray, Xray, SS, SSR, Trojan and Pingtunnel. Available as [services.v2raya](options.html#opt-services.v2raya.enable).
+
+- [v4l2-relayd](https://git.launchpad.net/v4l2-relayd), a streaming relay for v4l2loopback using gstreamer. Available as [services.v4l2-relayd](#opt-services.v4l2-relayd.instances._name_.enable).
+
+- [vault-agent](https://developer.hashicorp.com/vault/docs/agent), a template renderer and API auth proxy for HashiCorp Vault, similar to `consul-template`. Available as [services.vault-agent](#opt-services.vault-agent.instances).
+
+- [webhook](https://github.com/adnanh/webhook), a lightweight webhook server. Available as [services.webhook](#opt-services.webhook.enable).
+
+- [wgautomesh](https://git.deuxfleurs.fr/Deuxfleurs/wgautomesh), a simple utility to help connect wireguard nodes together in a full mesh topology. Available as [services.wgautomesh](options.html#opt-services.wgautomesh.enable).
+
+- [woodpecker](https://woodpecker-ci.org/), a simple CI engine with great extensibility. Available as [services.woodpecker-server](#opt-services.woodpecker-server.enable) and [services.woodpecker-agents](#opt-services.woodpecker-agents.agents._name_.enable).
+
+- [wstunnel](https://github.com/erebe/wstunnel), a proxy tunnelling arbitrary TCP or UDP traffic through a WebSocket connection. Available as [services.wstunnel](options.html#opt-services.wstunnel.enable).
+
+## Backward Incompatibilities {#sec-release-23.05-incompatibilities}
+
+- `services.asusd` configuration now uses strings instead of structured configuration, as upstream switched to the [RON](https://github.com/ron-rs/ron) configuration format. Support for structured configuration may return when [RON](https://github.com/ron-rs/ron) generation is implemented in nixpkgs.
+
+- `borgbackup` module now has an option for inhibiting system sleep while backups are running, defaulting to off (not inhibiting sleep), available as [`services.borgbackup.jobs.<name>.inhibitsSleep`](#opt-services.borgbackup.jobs._name_.inhibitsSleep).
+
+- The `openssh` client now comes with the `~C` escape sequence disabled by default. It can be re-enabled by setting `EnableEscapeCommandline yes`
+
+- The `programs.ssh` client module does not read `/etc/ssh/ssh_known_hosts2` anymore, since this location is [deprecated since 2001](https://marc.info/?l=openssh-unix-dev&m=100508718416162&w=2).
+
+- The `services.openssh` server module does not read `~/.ssh/authorized_keys2` anymore, since this location is [deprecated since 2001](https://marc.info/?l=openssh-unix-dev&m=100508718416162&w=2).
+
+- MAC-then-encrypt algorithms were removed from the default selection of `services.openssh.settings.Macs`. If you still require these [MACs](https://en.wikipedia.org/wiki/Message_authentication_code), for example when you are relying on libssh2 (e.g. VLC) or the SSH library shipped on the iPhone, you can re-add them like this:
+
+  ```nix
+  services.openssh.settings.Macs = [
+    "hmac-sha2-512"
+    "hmac-sha2-256"
+    "umac-128@openssh.com"
+  ];
+  ```
+
+- `podman` now uses the `netavark` network stack. Users will need to delete all of their local containers, images, volumes, etc, by running `podman system reset --force` once before upgrading their systems.
+
+- `git-bug` has been updated to at least version 0.8.0, which includes backwards incompatible changes. The `git-bug-migration` package can be used to upgrade existing repositories.
+
+- `graylog` has been updated to version 5, which can not be updated directly from the previously packaged version 3.3. If you had installed the previously packaged version 3.3, please follow the [upgrade path](https://go2docs.graylog.org/5-0/upgrading_graylog/upgrade_path.htm) from 3.3 to 4.0 to 4.3 to 5.0.
+
+- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
+
+- `nushell` has been updated to at least version 0.77.0, which includes potential breaking changes in aliases. The old aliases are now available as `old-alias` but it is recommended you migrate to the new format. See [Reworked aliases](https://www.nushell.sh/blog/2023-03-14-nushell_0_77.html#reworked-aliases-breaking-changes-kubouch).
+
+- `gajim` has been updated to version 1.7.3 which has disabled legacy ciphers. See [changelog for version 1.7.0](https://dev.gajim.org/gajim/gajim/-/releases/1.7.0).
+
+- `keepassx` and `keepassx2` have been removed, due to upstream [stopping development](https://www.keepassx.org/index.html%3Fp=636.html). Consider [KeePassXC](https://keepassxc.org) as a maintained alternative.
+
+- The [services.kubo.settings](#opt-services.kubo.settings) option is now no longer stateful. If you changed any of the options in [services.kubo.settings](#opt-services.kubo.settings) in the past and then removed them from your NixOS configuration again, those changes are still in your Kubo configuration file but will now be reset to the default. If you're unsure, you may want to make a backup of your configuration file (probably `/var/lib/ipfs/config`) and compare after the update.
+
+- The Kubo HTTP API will no longer listen on localhost and will instead only listen on a Unix domain socket by default. Read the [services.kubo.settings.Addresses.API](#opt-services.kubo.settings.Addresses.API) option description for more information.
+
+- The EC2 image module no longer fetches instance metadata in stage-1. This results in a significantly smaller initramfs, since network drivers no longer need to be included, and faster boots, since metadata fetching can happen in parallel with startup of other services.
+  This breaks services which rely on metadata being present by the time stage-2 is entered. Anything which reads EC2 metadata from `/etc/ec2-metadata` should now have an `after` dependency on `fetch-ec2-metadata.service`
+
+- The mailman service now defaults to using a randomly generated REST API password instead of a hard-coded one.
+
+- `minio` removed support for its legacy filesystem backend in [RELEASE.2022-10-29T06-21-33Z](https://github.com/minio/minio/releases/tag/RELEASE.2022-10-29T06-21-33Z). This means if your storage was created with the old format, minio will no longer start. Unfortunately, minio doesn't provide an automatic migration, they only provide [instructions how to manually convert the node](https://min.io/docs/minio/windows/operations/install-deploy-manage/migrate-fs-gateway.html). To facilitate this migration, we keep around the last version that still supports the old filesystem backend as `minio_legacy_fs`. Use it via `services.minio.package = minio_legacy_fs;` to export your data before switching to the new version. See the corresponding [issue](https://github.com/NixOS/nixpkgs/issues/199318) for more details.
+
+- `services.sourcehut.dispatch` and the corresponding package (`sourcehut.dispatchsrht`) have been removed due to [upstream deprecation](https://sourcehut.org/blog/2022-08-01-dispatch-deprecation-plans/).
+
+- The attributes used by `services.snapper.configs.<name>` have changed. Migrate from this:
+
+  ```nix
+  services.snapper.configs.example = {
+    subvolume = "/example";
+    extraConfig = ''
+      ALLOW_USERS="alice"
+    '';
+  };
+  ```
+
+  to this:
+
+  ```nix
+  services.snapper.configs.example = {
+    SUBVOLUME = "/example";
+    ALLOW_USERS = [ "alice" ];
+  };
+  ```
+
+- The default module options for [services.snapserver.openFirewall](#opt-services.snapserver.openFirewall), [services.tmate-ssh-server.openFirewall](#opt-services.tmate-ssh-server.openFirewall) and [services.unifi-video.openFirewall](#opt-services.unifi-video.openFirewall) have been changed from `true` to `false`. You will need to explicitly set this option to `true`, or configure your firewall.
+
+- The option `i18n.inputMethod.fcitx5.enableRimeData` has been removed. Default RIME data is now included in `fcitx5-rime` by default, and can be customized using
+
+  ```nix
+  fcitx5-rime.override {
+    rimeDataPkgs = [
+      pkgs.rime-data
+      # ...
+    ];
+  }
+  ```
+
+- The `udev` hwdb.bin file is now built with systemd-hwdb rather than the [deprecated "udevadm hwdb"](https://github.com/systemd/systemd/pull/25714). This may impact mappings where the same key is defined in multiple matching entries. The updated behavior will select the latest definition in case of conflict. In general, this should be a positive change, as the hwdb source files are designed with this ordering in mind. As an example, the mapping of the HP Dev One keyboard scan code for "mute mic" is corrected by this update. This change may impact users who have worked-around previously incorrect mappings.
+
+- Kime has been updated from 2.5.6 to 3.0.2 and the `i18n.inputMethod.kime.config` option has been removed. Users should use `daemonModules`, `iconColor`, and `extraConfig` options under `i18n.inputMethod.kime` instead.
+
+- `tut` has been updated from 1.0.34 to 2.0.0, and now uses the TOML format for the configuration file instead of INI. Additional information can be found [here](https://github.com/RasmusLindroth/tut/releases/tag/2.0.0).
+
+- `i3status-rust` has been updated from 0.22.0 to 0.30.5, and this brings many changes to its configuration format. Additional information can be found [here](https://github.com/greshake/i3status-rust/blob/v0.30.0/NEWS.md).
+
+- The `wordpress` derivation no longer contains any built-in plugins or themes. If you need them, you have to add them back to prevent your site from breaking. You can find them in `wordpressPackages.{plugins,themes}`.
+
+- `llvmPackages_rocm.llvm` will not contain `clang` or `compiler-rt`. `llvmPackages_rocm.clang` will not contain `llvm`. `llvmPackages_rocm.clangNoCompilerRt` has been removed in favor of using `llvmPackages_rocm.clang-unwrapped`.
+
+- `services.xserver.desktopManager.plasma5.excludePackages` has been moved to `environment.plasma5.excludePackages`, for consistency with other Desktop Environments.
+
+- `teleport` has been updated from major version 10 to major version 12. Please see upstream [upgrade instructions](https://goteleport.com/docs/setup/operations/upgrading/) and release notes for versions [11](https://goteleport.com/docs/changelog/#1100) and [12](https://goteleport.com/docs/changelog/#1201). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 11.x version by setting `services.teleport.package = pkgs.teleport_11`. Afterwards, this option can be removed to upgrade to the default version (12).
+
+- The EC2 image module previously detected and automatically mounted ext3-formatted instance store devices and partitions in stage-1 (initramfs), storing `/tmp` on the first discovered device. This behaviour, which only catered to very specific use cases and could not be disabled, has been removed. Users relying on this should provide their own implementation, and probably use ext4 and perform the mount in stage-2.
+
+- The EC2 image module previously detected and activated swap-formatted instance store devices and partitions in stage-1 (initramfs). This behaviour has been removed. Users relying on this should provide their own implementation.
+
+- `gitlab` has been upgraded from major version 15 to major version 16 and requires at least PostgreSQL 13.6. Check the [upgrade guide](#module-services-postgres-upgrading) in the NixOS manual on how to upgrade your PostgreSQL installation.
+
+- `gitlab` 16 deprecates the use of external container registries, in our case `pkgs.docker-distribution`. Module users who have [`services.gitlab.registry.enable`](#opt-services.gitlab.registry.enable) set to `true` are advised to back up their state and switch to gitlab's fork by setting [`services.gitlab.registry.package`](#opt-services.gitlab.registry.package) to `pkgs.gitlab-container-registry`.
+
+- `fail2ban` has been updated to 1.0.2, which has a few breaking changes compared to 0.11.2 ([changelog for 1.0.1](https://github.com/fail2ban/fail2ban/blob/1.0.1/ChangeLog), [changelog for 1.0.2](https://github.com/fail2ban/fail2ban/blob/1.0.2/ChangeLog))
+
+- `albert` has been updated from 0.17.6 to 0.20.13, and 0.18.0 changed the config format and many plugins ([changelog for 0.18.0](https://github.com/albertlauncher/albert/blob/v0.18.0/CHANGELOG.md))
+
+- `dokuwiki` has been updated from 2023-07-31a (Igor) to 2023-04-04 (Jack Jackrum), which has [completely removed](https://www.dokuwiki.org/changes#release_2023-04-04_jack_jackrum) the options to embed HTML and PHP for security reasons. The [htmlok plugin](https://www.dokuwiki.org/plugin:htmlok) can be used to regain this functionality.
+
+- The old unsupported version 6.x of the ELK-stack and Elastic beats have been removed. Use OpenSearch instead.
+
+- The `cosmoc` package has been removed. The upstream scripts in `cosmocc` should be used instead.
+
+- Qt 5.12 and 5.14 have been removed, as the corresponding branches have been EOL upstream for a long time. This affected under 10 packages in nixpkgs, largely unmaintained upstream as well, however, out-of-tree package expressions may need to be updated manually.
+
+- The [services.wordpress.sites.&lt;name&gt;.plugins](#opt-services.wordpress.sites._name_.plugins) and [services.wordpress.sites.&lt;name&gt;.themes](#opt-services.wordpress.sites._name_.themes) options have been converted from sets to attribute sets to allow for consumers to specify explicit install paths via attribute name.
+
+- `protonmail-bridge` package has been updated to major version 3.
+
+- Nebula now runs as a system user and group created for each nebula network, using the `CAP_NET_ADMIN` ambient capability on launch rather than starting as root. Ensure that any files each Nebula instance needs to access are owned by the correct user and group, by default `nebula-${networkName}`.
+
+- The `i18n.inputMethod.fcitx` option has been replaced with `i18n.inputMethod.fcitx5` because fcitx 4 `pkgs.fcitx` has been removed.
+
+- In `mastodon` it is now necessary to specify location of file with `PostgreSQL` database password. In `services.mastodon.database.passwordFile` parameter default value `/var/lib/mastodon/secrets/db-password` has been changed to `null`.
+
+- The `nix.readOnlyStore` option has been renamed to `boot.readOnlyNixStore` to clarify that it configures the NixOS boot process, not the Nix daemon.
+
+- The latest available version of Nextcloud is v26 (available as `pkgs.nextcloud26`) which uses PHP 8.2 as interpreter by default. The installation logic is as follows:
+  - If `system.stateVersion` is >=23.05, `pkgs.nextcloud26` will be installed by default.
+  - If `system.stateVersion` is >=22.11, `pkgs.nextcloud25` will be installed by default.
+  - Please note that an upgrade from v24 (or older) to v26 directly is not possible. Please upgrade to `nextcloud25` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud25;`](options.html#opt-services.nextcloud.package).
+  - It's recommended to use the latest version available (i.e. v26) and to specify that using `services.nextcloud.package`.
+
+- .NET 5.0 and .NET 3.1 were removed due to being end-of-life, use a newer, supported .NET version. Visit the  [Support Policy](https://dotnet.microsoft.com/en-us/platform/support/policy/dotnet-core) for more information.
+
+- The iputils package, which is installed by default, no longer provides the
+  `ninfod`, `rarpd` and `rdisc` tools. See [upstream's release notes](https://github.com/iputils/iputils/releases/tag/20221126) for more details and available replacements.
+
+- The ppp plugin `rp-pppoe.so` has been renamed to `pppoe.so` in ppp 2.4.9. Starting from ppp 2.5.0, there is no longer an alias for backwards compatibility. Configurations that use this plugin must be updated accordingly from `plugin rp-pppoe.so` to `plugin pppoe.so`. See [upstream change](https://github.com/ppp-project/ppp/commit/610a7bd76eb1f99f22317541b35001b1e24877ed).
+
+- [services.xserver.videoDrivers](options.html#opt-services.xserver.videoDrivers) now defaults to the `modesetting` driver over device-specific ones. The `radeon`, `amdgpu` and `nouveau` drivers are still available, but effectively unmaintained and not recommended for use. Note that this __does not__ affect your regular graphics drivers; this only concerns the DDX component of the driver, which most people are not relying on.
+
+- [services.xserver.libinput.enable](options.html#opt-services.xserver.libinput.enable) is now set by default, enabling the more actively maintained and consistently behaved input device driver.
+
+- To enable the HTTP3 (QUIC) protocol for a nginx virtual host, set the `quic` attribute on it to true, e.g. `services.nginx.virtualHosts.<name>.quic = true;`.
+
+- In `services.fail2ban`, `bantime-increment.<name>` options now default to `null` (except `bantime-increment.enable`) and are used to set the corresponding option in `jail.local` only if not `null`. Also, enforce that `bantime-increment.formula` and `bantime-increment.multipliers` are not both specified.
+
+- The default `asterisk` package was changed to v20 from v19. Asterisk versions 16 and 19 have been dropped due to being EOL. You may need to update /var/lib/asterisk to match the template files in `${asterisk-20}/var/lib/asterisk`.
+
+- conntrack helper autodetection has been removed from kernels 6.0 and up upstream, and an assertion was added to ensure things don't silently stop working. Migrate your configuration to assign helpers explicitly or use an older LTS kernel branch as a temporary workaround.
+
+- The `services.pipewire.config` options have been removed, as they have basically never worked correctly. All behavior defined by the default configuration can be overridden with drop-in files as necessary - see [below](#sec-release-23.05-migration-pipewire) for details.
+
+- The catch-all `hardware.video.hidpi.enable` option was removed. Users on high density displays may want to:
+
+  - Set `services.xserver.upscaleDefaultCursor` to upscale the default X11 cursor for higher resolutions
+  - Adjust settings under `fonts.fontconfig` according to preference
+  - Adjust `console.font` according to preference, though the kernel will generally choose a reasonably sized font
+
+- `services.pipewire.media-session` and the `pipewire-media-session` package have been removed, as they are no longer supported upstream. Users are encouraged to use `services.pipewire.wireplumber` instead.
+
+- The `baget` package and module was removed due to being unmaintained.
+
+- The `qlandkartegt` and `garmindev` packages were removed due to being unmaintained and insecure.
+
+- The `go-ethereum` package has been updated to v1.11.5 and the `puppeth` command is no longer available as of v1.11.0.
+
+- The `pnpm` package has be updated to from version 7.29.1 to version 8.1.1 and Node.js 14 support has been discontinued (though, there are workarounds if Node.js 14 is still required)
+  - Migration instructions: ["Before updating pnpm to v8 in your CI, regenerate your pnpm-lock.yaml. To upgrade your lockfile, run pnpm install and commit the changes. Existing dependencies will not be updated; however, due to configuration changes in pnpm v8, some missing peer dependencies may be added to the lockfile and some packages may get deduplicated. You can commit the new lockfile even before upgrading Node.js in the CI, as pnpm v7 already supports the new lockfile format."](https://github.com/pnpm/pnpm/releases/tag/v8.0.0)
+
+- The `zplug` package changes its output path from `$out` to `$out/share/zplug`. Users should update their dependency on `${pkgs.zplug}/init.zsh` to `${pkgs.zplug}/share/zplug/init.zsh`.
+
+- The `pict-rs` package was updated from an 0.3 alpha release to 0.3 stable, and related environment variables now require two underscores instead of one.
+
+- The `shattered-pixel-dungeon` game was updated from 1.1.2 to 2.0.2.
+  - The location of game data has changed. To migrate it, run `mv ~/.shatteredpixel ~/.local/share/.shatteredpixel`
+  - The update will delete all your in-progress games.
+
+- `espanso` has been updated to major version 2. Therefore, migration steps may need to be performed. See [the official migration instructions](https://espanso.org/docs/migration/overview/) for how to perform these migrations. Further, `espanso-wayland` can now be used for Wayland support.
+
+- Only `k3s` version 1.26 is included. Users of the `k3s_1_24` or `k3s_1_25` packages should upgrade to use the `1.26` version of the package.
+
+- The `nerdfonts` package has been updated to major version 3, which includes potential [breaking changes](https://github.com/ryanoasis/nerd-fonts/releases/tag/v3.0.0).
+
+## Other Notable Changes {#sec-release-23.05-notable-changes}
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+- To follow [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md) a few options of `openssh` have been moved from `extraConfig` to the new freeform option `settings` and renamed, e.g.:
+  - `services.openssh.forwardX11` to `services.openssh.settings.X11Forwarding`
+  - `services.openssh.kbdInteractiveAuthentication` -> `services.openssh.settings.KbdInteractiveAuthentication`
+  - `services.openssh.passwordAuthentication` to `services.openssh.settings.PasswordAuthentication`
+  - `services.openssh.useDns` to `services.openssh.settings.UseDns`
+  - `services.openssh.permitRootLogin` to `services.openssh.settings.PermitRootLogin`
+  - `services.openssh.logLevel` to `services.openssh.settings.LogLevel`
+  - `services.openssh.kexAlgorithms` to `services.openssh.settings.KexAlgorithms`
+  - `services.openssh.macs` to `services.openssh.settings.Macs`
+  - `services.openssh.ciphers` to `services.openssh.settings.Ciphers`
+  - `services.openssh.gatewayPorts` to `services.openssh.settings.GatewayPorts`
+
+
+- `vim_configurable` has been renamed to `vim-full` to avoid confusion: `vim-full`'s build-time features are configurable, but both `vim` and `vim-full` are _customizable_ (in the sense of user configuration, like vimrc).
+
+- Pantheon now defaults to Mutter 43 and GNOME settings daemon 43, all Pantheon packages are now tracking elementary OS 7 updates.
+
+- The module for the application firewall `opensnitch` got the ability to configure rules. Available as [services.opensnitch.rules](#opt-services.opensnitch.rules)
+
+- The module `usbmuxd` now has the ability to change the package used by the daemon. In case you're experiencing issues with `usbmuxd` you can try an alternative program like `usbmuxd2`. Available as [services.usbmuxd.package](#opt-services.usbmuxd.package)
+
+- `netbox` was updated to 3.5. NixOS' `services.netbox.package` still defaults to 3.3 if `stateVersion` is earlier than 23.05. Please review upstream's breaking changes [for 3.4.0](https://github.com/netbox-community/netbox/releases/tag/v3.4.0) and [for 3.5.0](https://github.com/netbox-community/netbox/releases/tag/v3.5.0), and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
+
+- `services.netbox` now support RFC42-style options, through `services.netbox.settings`.
+
+- `services.mastodon` gained a tootctl wrapped named `mastodon-tootctl` similar to `nextcloud-occ` which can be executed from any user and switches to the configured mastodon user with sudo and sources the environment variables.
+
+- `services.borgmatic` now allows for multiple configurations, placed in `/etc/borgmatic.d/`, you can define them with `services.borgmatic.configurations`.
+
+- `service.openafsServer` features a new backup server `pkgs.fabs` as a
+  replacement for openafs's own `buserver`. See
+  [FABS](https://github.com/openafs-contrib/fabs) to check if this is an viable
+  replacement. It stores backups as volume dump files and thus better integrates
+  into contemporary backup solutions.
+
+- `services.maddy` got several updates:
+  - Configuration of users and their credentials using `services.maddy.ensureCredentials`.
+  - TLS configuration is now possible via `services.maddy.tls` with two loaders present: ACME and file based.
+
+- The `dnsmasq` service now takes configuration via the
+  `services.dnsmasq.settings` attribute set. The option
+  `services.dnsmasq.extraConfig` will be deprecated when NixOS 22.11 reaches
+  end of life.
+
+- The `dokuwiki` service is now configured via `services.dokuwiki.sites.<name>.settings` attribute set; `extraConfig` has been removed.
+  The `{aclUse,superUser,disableActions}` attributes have been renamed accordingly. `pluginsConfig` now only accepts an attribute set of booleans.
+  Passing plain PHP is no longer possible.
+  Same applies to `acl` which now also only accepts structured `settings`.
+
+- The `zsh` package changes the way to set environment variables on NixOS systems where `programs.zsh.enable` equals `false`.  It now sources `/etc/set-environment` when reading the system-level `zshenv` file.  Before, it sourced `/etc/profile` when reading the system-level `zprofile` file.
+
+- The `wordpress` service now takes configuration via the `services.wordpress.sites.<name>.settings` attribute set, `extraConfig` is still available to append  additional text to `wp-config.php`.
+
+- To reduce closure size in `nixos/modules/profiles/minimal.nix` profile disabled installation documentations and manuals. Also disabled `logrotate` and `udisks2` services.
+
+- To reduce closure size in `nixos/modules/installer/netboot/netboot-minimal.nix` profile disabled load linux firmwares, pre-installing the complete stdenv and `networking.wireless` service.
+
+- The minimal ISO image now uses the `nixos/modules/profiles/minimal.nix` profile.
+
+- NixOS installer ISOs can now be built for `powerpc64le-linux`; see `nixos/modules/installer/sd-card/sd-image-powerpc64le.nix` and [PR 192672](https://github.com/NixOS/nixpkgs/pull/192672).  Hydra does not support this platform, so you must build the binaries yourself.
+
+- The `ghcWithPackages` and `ghcWithHoogle` wrappers will now also symlink GHC's
+  and all included libraries' documentation to `$out/share/doc` for convenience.
+  If undesired, the old behavior can be restored by overriding the builders with
+  `{ installDocumentation = false; }`.
+
+- The nftables module now validates its ruleset at build time. The new `networking.nftables.checkRuleset` option allows disabling this check, which may fail when rules have very specific requirements, that the sandbox environment, by default, will not cover. The `networking.nftables.preCheckRuleset` option can be used to prepare the environment before the checks are run.
+
+- The `services.mastodon` module now supports connection to a remote `PostgreSQL` database.
+
+- [`services.nextcloud.database.createLocally`](#opt-services.nextcloud.database.createLocally) now uses socket authentication and is no longer compatible with password authentication.
+  - If you want the module to manage the database for you, unset [`services.nextcloud.config.dbpassFile`](#opt-services.nextcloud.config.dbpassFile) (and [`services.nextcloud.config.dbhost`](#opt-services.nextcloud.config.dbhost), if it's set).
+  - If you want to use password authentication **and** create the database locally, you will have to use [`services.mysql`](#opt-services.mysql.enable) to set it up.
+
+- [`services.nextcloud.config.objectstore.s3.sseCKeyFile`](#opt-services.nextcloud.config.objectstore.s3.sseCKeyFile) is a new option to enable server-side encryption with customer provided keys (SSE-C) for your S3 in Nextcloud.
+
+- NixOS swap partitions with random encryption can now control the sector size, cipher, and key size used to set up the plain encryption device over the underlying block device rather than allowing them to be determined by `cryptsetup(8)`. One can use these features like so:
+
+  ```nix
+  swapDevices = [ {
+    device = "/dev/disk/by-partlabel/swapspace";
+    randomEncryption = {
+      enable = true;
+      cipher = "aes-xts-plain64";
+      keySize = 512;
+      sectorSize = 4096;
+    };
+  } ];
+  ```
+
+- New option `security.pam.zfs` to enable unlocking and mounting of encrypted ZFS home dataset at login.
+
+- `services.peertube` now requires you to specify the secret file `secrets.secretsFile`. It can be generated by running `openssl rand -hex 32`.  Before upgrading, check the release notes for [PeerTube v5.0.0](https://github.com/Chocobozzz/PeerTube/releases/tag/v5.0.0).And backup your data.
+
+- `services.chronyd` is now started with additional systemd sandbox/hardening options for better security.
+
+- PostgreSQL has added opt-in support for [JIT compilation](https://www.postgresql.org/docs/current/jit-reason.html). It can be enabled like this:
+  ```nix
+  services.postgresql.enableJIT = true;
+  ```
+
+- `services.netdata` offers a [`services.netdata.deadlineBeforeStopSec`](#opt-services.netdata.deadlineBeforeStopSec) option which will control the deadline (in seconds) after which systemd will consider your netdata instance as dead if it didn't start in the elapsed time. It is helpful when your netdata instance takes longer to start because of a large amount of state or upgrades.
+
+- `services.dhcpcd` service stopped soliciting or accepting IPv6 Router Advertisements on interfaces that use static IPv6 addresses.
+  If your network provides both IPv6 unique local addresses (ULA) and globally unique addresses (GUA) through autoconfiguration with SLAAC, you must add the parameter `networking.dhcpcd.IPv6rs = true;`.
+
+- The module `services.headscale` was refactored to be compliant with [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). To be precise, this means that the following things have changed:
+
+  - Most settings have been migrated below [services.headscale.settings](#opt-services.headscale.settings) which is a freeform attribute-set that will be converted into headscale's YAML config format. This means that the configuration from [headscale's example configuration](https://github.com/juanfont/headscale/blob/main/config-example.yaml) can be directly written as attribute-set in Nix within this option.
+
+- `services.kubo` now unmounts `ipfsMountDir` and `ipnsMountDir` even if it is killed unexpectedly when `autoMount` is enabled.
+
+- `services.grafana` listens only on localhost by default again. This was changed to the upstream default of `0.0.0.0` by accident in the freeform setting conversion.
+
+- Grafana Tempo has been updated to version 2.0. See the [upstream upgrade guide](https://grafana.com/docs/tempo/latest/release-notes/v2-0/#upgrade-considerations) for migration instructions.
+
+- A new `virtualisation.rosetta` module was added to allow running `x86_64` binaries through [Rosetta](https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment) inside virtualised NixOS guests on Apple Silicon. This feature works by default with the [UTM](https://docs.getutm.app/) virtualisation [package](https://search.nixos.org/packages?channel=23.05&show=utm&from=0&size=1&sort=relevance&type=packages&query=utm).
+
+- The new option `users.motdFile` allows configuring a Message Of The Day that can be updated dynamically.
+
+- The `root` package is now built with the `"-Dgnuinstall=ON"` CMake flag, making the output conform the `bin` `lib` `share` layout. In this layout, `tutorials` is under `share/doc/ROOT/`; `cmake`, `font`, `icons`, `js` and `macro` under `share/root`; `Makefile.comp` and `Makefile.config` under `etc/root`.
+
+- There are various new options in the `services.nginx` module:
+    - Enabling global redirect in `services.nginx.virtualHosts` now allows one to add exceptions with the `locations` option.
+    - The `proxyCachePath` option has been added to `services.nginx`. It allows configuring the [`proxy_cache_path`](https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path), that configures the storage path and various other settings for the cache.
+    - A new option `recommendedBrotliSettings` has been added to `services.nginx`. Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/blob/master/README.md).
+    - `services.nginx.recommendedProxySettings` now removes the `Connection` header preventing clients from closing backend connections.
+
+- The nginx module also received an update to `services.nginx.recommendedGzipSettings`:
+  - Enables gzip compression for only certain proxied requests.
+  - Allow checking and loading of precompressed files.
+  - Updated gzip mime-types.
+  - Increased the minimum length of a response that will be gzipped.
+
+- [Garage](https://garagehq.deuxfleurs.fr/) version is based on [system.stateVersion](options.html#opt-system.stateVersion), existing installations will keep using version 0.7. New installations will use version 0.8. In order to upgrade a Garage cluster, please follow [upstream instructions](https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/) and configure [services.garage.package](options.html#opt-services.garage.package).
+
+- Nebula now supports the `services.nebula.networks.<name>.isRelay` and `services.nebula.networks.<name>.relays` configuration options for setting up or allowing traffic relaying. See the [announcement](https://www.defined.net/blog/announcing-relay-support-in-nebula/) for more details about relays.
+
+- Resilio sync secret keys can now be provided using a secrets file at runtime, preventing these secrets from ending up in the Nix store.
+
+- The `firewall` and `nat` modules can now optionally rely on an nftables based implementation. Enable `networking.nftables` to use it.
+
+- The `services.fwupd` module now allows arbitrary daemon settings to be configured in a structured manner ([`services.fwupd.daemonSettings`](#opt-services.fwupd.daemonSettings)).
+
+- `services.xserver.desktopManager.plasma5.phononBackend` now defaults to vlc according to [upstrean recommendation](https://community.kde.org/Distributions/Packaging_Recommendations#Non-Plasma_packages)
+
+- The `zramSwap` is now implemented with `zram-generator`, and the option `zramSwap.numDevices` for using ZRAM devices as general purpose ephemeral block devices has been removed.
+
+- As Singularity has renamed to [Apptainer](https://apptainer.org/news/community-announcement-20211130)
+  to distinguish from [an un-renamed fork by Sylabs Inc.](https://sylabs.io/2021/05/singularity-community-edition),
+  there are now two packages of Singularity/Apptainer:
+  * `apptainer`: From `github.com/apptainer/apptainer`, which is the new repo after renaming.
+  * `singularity`: From `github.com/sylabs/singularity`, which is the fork by Sylabs Inc..
+
+  `singularity-tools.buildImage` got a new input argument `singularity` to specify which package to use.
+
+- The new option `programs.singularity.enableFakeroot`, if set to `true`, provides `--fakeroot` support for `apptainer` and `singularity`.
+
+- The new option `services.tailscale.useRoutingFeatures` controls various settings for using Tailscale features like exit nodes and subnet routers. If you wish to use your machine as an exit node, you can set this setting to `server`, otherwise if you wish to use an exit node you can set this setting to `client`. The strict RPF warning has been removed as the RPF will be loosened automatically based on the value of this setting.
+
+- `openjdk` from version 11 and above is not build with `openjfx` (i.e.: JavaFX) support by default anymore. You can re-enable it by overriding, e.g.: `openjdk11.override { enableJavaFX = true; };`.
+
+- [Xastir](https://xastir.org/index.php/Main_Page) can now access AX.25 interfaces via the `libax25` package.
+
+- `nixos-version` now accepts `--configuration-revision` to display more information about the current generation revision
+
+- The option `services.nomad.extraSettingsPlugins` has been fixed to allow more than one plugin in the path.
+
+- The option `services.prometheus.exporters.pihole.interval` does not exist anymore and has been removed.
+
+- The option `services.gpsd.device` has been replaced with  `services.gpsd.devices`, which supports multiple devices.
+
+- `k3s` can now be configured with an `EnvironmentFile` for its systemd service, allowing secrets to be provided without ending up in the Nix Store.
+
+- The `gitea` module options have been moved into a freeform attribute set below `services.gitea.settings`.
+
+- `boot.initrd.luks.device.<name>` has a new `tryEmptyPassphrase` option, this is useful for OEMs who need to install an encrypted disk with a future settable passphrase
+
+- The `bind` module now allows the per-zone `allow-query` setting to be configured (previously it was hard-coded to `any`; it still defaults to `any` to retain compatibility).
+
+- The option `services.jitsi-videobridge.apis` has been renamed to `colibriRestApi` and turned into a boolean. Setting it to `true` will enable the private rest API, useful for monitoring using `services.prometheus.exporters.jitsi.enable`. Learn more about the API: "[The COLIBRI control interface (/colibri/)](https://github.com/jitsi/jitsi-videobridge/blob/v2.3/doc/rest.md)".
+
+- Booting from a volume managed by the Stratis storage management daemon is now supported. Use `fileSystems.<name>.stratis.poolUuid` to configure the pool containing the fs.
+
+## Nixpkgs internals {#sec-release-23.05-nixpkgs-internals}
+
+- `buildDunePackage` now defaults to `strictDeps = true` which means that any library should go into `buildInputs` or `checkInputs`. Any executable that is run on the building machine should go into `nativeBuildInputs` or `nativeCheckInputs` respectively. Example of executables are `ocaml`, `findlib` and `menhir`. PPXs are libraries which are built by dune and should therefore not go into `nativeBuildInputs`.
+
+- `buildFHSUserEnv` is now called `buildFHSEnv` and uses FlatPak's Bubblewrap sandboxing tool rather than Nixpkgs' own chrootenv. The old chrootenv-based implementation is still available via `buildFHSEnvChroot` but is considered deprecated and will be removed when the remaining uses inside Nixpkgs have been migrated. If your FHSEnv-wrapped application misbehaves when using the new bubblewrap implementation, please create an issue in Nixpkgs.
+
+- Top-level `buildPlatform`, `hostPlatform`, `targetPlatform` have been deprecated, use `stdenv.X` instead.
+
+- `carnix` and `cratesIO` has been removed due to being unmaintained, use alternatives such as [naersk](https://github.com/nix-community/naersk) and [crate2nix](https://github.com/kolloch/crate2nix) instead.
+
+- `checkInputs` have been renamed to `nativeCheckInputs`, because they behave the same as `nativeBuildInputs` when `doCheck` is set. `checkInputs` now denote a new type of dependencies, added to `buildInputs` when `doCheck` is set. As a rule of thumb, `nativeCheckInputs` are tools on `$PATH` used during the tests, and `checkInputs` are libraries which are linked to executables built as part of the tests. Similarly, `installCheckInputs` are renamed to `nativeInstallCheckInputs`, corresponding to `nativeBuildInputs`, and `installCheckInputs` are a new type of dependencies added to `buildInputs` when `doInstallCheck` is set. (Note that this change will not cause breakage to derivations with `strictDeps` unset, which are most packages except python, rust, ocaml and go packages).
+
+- DocBook option documentation, which has been deprecated since 22.11, will now cause a warning when documentation is built. Out-of-tree modules should migrate to using CommonMark documentation as outlined in [](#sec-option-declarations) to silence this warning.
+
+  DocBook option documentation support will be removed in the next release and CommonMark will become the default. DocBook option documentation that has not been migrated until then will no longer render properly or cause errors.
+
+- `lib.systems.examples.ghcjs` and consequently `pkgsCross.ghcjs` now use the target triplet `javascript-unknown-ghcjs` instead of `js-unknown-ghcjs`. This has been done to match an [upstream decision](https://gitlab.haskell.org/ghc/ghc/-/commit/6636b670233522f01d002c9b97827d00289dbf5c) to follow Cabal's platform naming more closely. Nixpkgs will also reject `js` as an architecture name.
+
+- Lisp gained a [manual section](https://nixos.org/manual/nixpkgs/stable/#lisp), documenting a new and backwards incompatible interface. The previous interface will be removed in a future release.
+
+- Calling `makeSetupHook` without passing a `name` argument is deprecated.
+
+- `nixos/lib/make-disk-image.nix` handles `contents` arguments that are directories better, fixing a bug where it used to put them in a subdirectory of the intended `target`.
+
+- `nixos/lib/make-disk-image.nix` can now mutate EFI variables, run user-provided EFI firmware or variable templates. This is now extensively documented in the NixOS manual.
+
+- Nixpkgs now uses [IEEE-standard floating point arithmetic](https://github.com/NixOS/nixpkgs/pull/170215) on `powerpc64le-linux`.
+
+- Deprecated `xlibsWrapper` transitional package has been removed in favour of direct use of its constituents: `xorg.libX11`, `freetype` and others.
+
+## Detailed migration information {#sec-release-23.05-migration}
+
+### Pipewire configuration overrides {#sec-release-23.05-migration-pipewire}
+
+#### Why this change? {#sec-release-23.05-migration-pipewire-why}
+
+The Pipewire config semantics don't really match the NixOS module semantics, so it's extremely awkward to override the default config, especially when lists are involved. Vendoring the configuration files in nixpkgs also creates unnecessary maintenance overhead.
+
+Also, upstream added a lot of accommodations to allow doing most of the things you'd want to do with a config edit in better ways.
+
+#### Migrating your configuration {#sec-release-23.05-migration-pipewire-how}
+
+Compare your settings to [the defaults](https://gitlab.freedesktop.org/pipewire/pipewire/-/tree/master/src/daemon) and where your configuration differs from them.
+
+Then, create a drop-in JSON file in `/etc/pipewire/<config file name>.d/99-custom.conf` (the actual filename can be anything) and migrate your changes to it according to the following sections.
+
+Repeat for every file you've modified, changing the directory name accordingly.
+
+#### Things you can just copy over {#sec-release-23.05-migration-pipewire-simple}
+
+If you are:
+
+- setting properties via `*.properties`
+- loading a new module to `context.modules`
+- creating new objects with `context.objects`
+- declaring SPA libraries with `context.spa-libs`
+- running custom commands with `context.exec`
+- adding new rules with `*.rules`
+- running custom PulseAudio commands with `pulse.cmd`
+
+Move the definitions into the drop-in.
+
+Note that the use of `context.exec` is not recommended and other methods of running your thing are likely a better option.
+
+```json
+{
+  "context.properties": {
+    "your.property.name": "your.property.value"
+  },
+  "context.modules": [
+    { "name": "libpipewire-module-my-cool-thing" }
+  ],
+  "context.objects": [
+    { "factory": { ... } }
+  ],
+  "alsa.rules": [
+    { "matches: { ... }, "actions": { ... } }
+  ]
+}
+```
+
+#### Removing a module from `context.modules` {#sec-release-23.05-migration-pipewire-removing-modules}
+
+Look for an option to disable it via `context.properties` (`"module.x11.bell": "false"` is likely the most common use case here).
+If one is not available, proceed to [Nuclear option](#sec-release-23.05-migration-pipewire).
+
+#### Modifying a module's parameters in `context.modules` {#sec-release-23.05-migration-pipewire-modifying-modules}
+
+For most modules (e.g. `libpipewire-module-rt`) it's enough to load the module again with the new arguments, e.g.:
+
+```json
+{
+  "context.modules": [
+    {
+      "name": "libpipewire-module-rt",
+      "args": {
+        "rt.prio": 90
+      }
+    }
+  ]
+}
+```
+
+Note that `module-rt` specifically will generally use the highest values available by default, so setting limits on the `pipewire` systemd service is preferable to reloading.
+
+If reloading the module is not an option, proceed to [Nuclear option](#sec-release-23.05-migration-pipewire).
+
+#### Nuclear option {#sec-release-23.05-migration-pipewire-nuclear}
+If all else fails, you can still manually copy the contents of the default configuration file
+from `${pkgs.pipewire}/share/pipewire` to `/etc/pipewire` and edit it to fully override the default.
+However, this should be done only as a last resort. Please talk to the Pipewire maintainers if you ever need to do this.
diff --git a/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
new file mode 100644
index 000000000000..e2853569423d
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/release-notes/rl-2311.section.md
@@ -0,0 +1,604 @@
+# Release 23.11 (“Tapirâ€, 2023.11/??) {#sec-release-23.11}
+
+## Highlights {#sec-release-23.11-highlights}
+
+- FoundationDB now defaults to major version 7.
+
+- PostgreSQL now defaults to major version 15.
+
+- Support for WiFi6 (IEEE 802.11ax) and WPA3-SAE-PK was enabled in the `hostapd` package, along with a significant rework of the hostapd module.
+
+- LXD now supports virtual machine instances to complement the existing container support
+
+- The `nixos-rebuild` command has been given a `list-generations` subcommand. See `man nixos-rebuild` for more details.
+
+- [systemd](https://systemd.io) has been updated from v253 to v254, see [the release notes](https://github.com/systemd/systemd/blob/v254/NEWS#L3-L659) for more information on the changes.
+    - `boot.resumeDevice` **must be specified** when hibernating if not in EFI mode.
+    - systemd may warn your system about the permissions of your ESP partition (often `/boot`), this warning can be ignored for now, we are looking
+      into a satisfying solution regarding this problem.
+    - Updating with `nixos-rebuild boot` and rebooting is recommended, since in some rare cases the `nixos-rebuild switch` into the new generation on a live system might fail due to missing mount units.
+
+- [`sudo-rs`], a reimplementation of `sudo` in Rust, is now supported.
+  An experimental new module `security.sudo-rs` was added.
+  Switching to it (via `security.sudo.enable = false; security.sudo-rs.enable = true;`) introduces
+  slight changes in sudo behaviour, due to `sudo-rs`' current limitations:
+  - terminfo-related environment variables aren't preserved for `root` and `wheel`;
+  - `root` and `wheel` are not given the ability to set (or preserve)
+    arbitrary environment variables.
+
+- [glibc](https://www.gnu.org/software/libc/) has been updated from version 2.37 to 2.38, see [the release notes](https://sourceware.org/glibc/wiki/Release/2.38) for what was changed.
+
+[`sudo-rs`]: https://github.com/memorysafety/sudo-rs/
+
+- `linuxPackages_testing_bcachefs` is now soft-deprecated by `linuxPackages_testing`.
+  - Please consider changing your NixOS configuration's `boot.kernelPackages` to `linuxPackages_testing` until a stable kernel with bcachefs support is released.
+
+- All [ROCm](https://rocm.docs.amd.com/en/latest/) packages have been updated to 5.7.0.
+  - [ROCm](https://rocm.docs.amd.com/en/latest/) package attribute sets are versioned: `rocmPackages` -> `rocmPackages_5`.
+
+- `yarn-berry` has been updated to 4.0.1. This means that NodeJS versions less than `18.12` are no longer supported by it. More details at the [upstream changelog](https://github.com/yarnpkg/berry/blob/master/CHANGELOG.md).
+
+- If the user has a custom shell enabled via `users.users.${USERNAME}.shell = ${CUSTOMSHELL}`, the
+  assertion will require them to also set `programs.${CUSTOMSHELL}.enable =
+  true`. This is generally safe behavior, but for anyone needing to opt out from
+  the check `users.users.${USERNAME}.ignoreShellProgramCheck = true` will do the job.
+
+- Cassandra now defaults to 4.x, updated from 3.11.x.
+
+## New Services {#sec-release-23.11-new-services}
+
+- [MCHPRS](https://github.com/MCHPR/MCHPRS), a multithreaded Minecraft server built for redstone. Available as [services.mchprs](#opt-services.mchprs.enable).
+
+- [acme-dns](https://github.com/joohoi/acme-dns), a limited DNS server to handle ACME DNS challenges easily and securely. Available as [services.acme-dns](#opt-services.acme-dns.enable).
+
+- [frp](https://github.com/fatedier/frp), a fast reverse proxy to help you expose a local server behind a NAT or firewall to the Internet. Available as [services.frp](#opt-services.frp.enable).
+
+<!-- To avoid merge conflicts, consider adding your item at an arbitrary place in the list instead. -->
+
+- [river](https://github.com/riverwm/river), A dynamic tiling wayland compositor. Available as [programs.river](#opt-programs.river.enable).
+
+- [wayfire](https://wayfire.org), A modular and extensible wayland compositor. Available as [programs.wayfire](#opt-programs.wayfire.enable).
+
+- [mautrix-whatsapp](https://docs.mau.fi/bridges/go/whatsapp/index.html) A Matrix-WhatsApp puppeting bridge
+
+- [hddfancontrol](https://github.com/desbma/hddfancontrol), a service to regulate fan speeds based on hard drive temperature. Available as [services.hddfancontrol](#opt-services.hddfancontrol.enable).
+
+- [GoToSocial](https://gotosocial.org/), an ActivityPub social network server, written in Golang. Available as [services.gotosocial](#opt-services.gotosocial.enable).
+
+- [Castopod](https://castopod.org/), an open-source hosting platform made for podcasters who want to engage and interact with their audience. Available as [services.castopod](#opt-services.castopod.enable).
+
+- [Typesense](https://github.com/typesense/typesense), a fast, typo-tolerant search engine for building delightful search experiences. Available as [services.typesense](#opt-services.typesense.enable).
+
+* [NS-USBLoader](https://github.com/developersu/ns-usbloader/), an all-in-one tool for managing Nintendo Switch homebrew. Available as [programs.ns-usbloader](#opt-programs.ns-usbloader.enable).
+
+- [Mobilizon](https://joinmobilizon.org/), a Fediverse platform for publishing events.
+
+- [Anuko Time Tracker](https://github.com/anuko/timetracker), a simple, easy to use, open source time tracking system. Available as [services.anuko-time-tracker](#opt-services.anuko-time-tracker.enable).
+
+- [Prometheus MySQL exporter](https://github.com/prometheus/mysqld_exporter), a MySQL server exporter for Prometheus. Available as [services.prometheus.exporters.mysqld](#opt-services.prometheus.exporters.mysqld.enable).
+
+- [LibreNMS](https://www.librenms.org), a auto-discovering PHP/MySQL/SNMP based network monitoring. Available as [services.librenms](#opt-services.librenms.enable).
+
+- [Livebook](https://livebook.dev/), an interactive notebook with support for Elixir, graphs, machine learning, and more.
+
+- [sitespeed-io](https://sitespeed.io), a tool that can generate metrics (timings, diagnostics) for websites. Available as [services.sitespeed-io](#opt-services.sitespeed-io.enable).
+
+- [stalwart-mail](https://stalw.art), an all-in-one email server (SMTP, IMAP, JMAP). Available as [services.stalwart-mail](#opt-services.stalwart-mail.enable).
+
+- [tang](https://github.com/latchset/tang), a server for binding data to network presence. Available as [services.tang](#opt-services.tang.enable).
+
+- [Jool](https://nicmx.github.io/Jool/en/index.html), a kernelspace NAT64 and SIIT implementation, providing translation between IPv4 and IPv6. Available as [networking.jool.enable](#opt-networking.jool.enable).
+
+- [Home Assistant Satellite], a streaming audio satellite for Home Assistant voice pipelines, where you can reuse existing mic/speaker hardware. Available as [services.homeassistant-satellite](#opt-services.homeassistant-satellite.enable).
+
+- [Apache Guacamole](https://guacamole.apache.org/), a cross-platform, clientless remote desktop gateway. Available as [services.guacamole-server](#opt-services.guacamole-server.enable) and [services.guacamole-client](#opt-services.guacamole-client.enable) services.
+
+- [pgBouncer](https://www.pgbouncer.org), a PostgreSQL connection pooler. Available as [services.pgbouncer](#opt-services.pgbouncer.enable).
+
+- [Goss](https://goss.rocks/), a YAML based serverspec alternative tool for validating a server's configuration. Available as [services.goss](#opt-services.goss.enable).
+
+- [trust-dns](https://trust-dns.org/), a Rust based DNS server built to be safe and secure from the ground up. Available as [services.trust-dns](#opt-services.trust-dns.enable).
+
+- [osquery](https://www.osquery.io/), a SQL powered operating system instrumentation, monitoring, and analytics.
+
+- [ebusd](https://ebusd.eu), a daemon for handling communication with eBUS devices connected to a 2-wire bus system (“energy bus†used by numerous heating systems). Available as [services.ebusd](#opt-services.ebusd.enable).
+
+- [systemd-sysupdate](https://www.freedesktop.org/software/systemd/man/systemd-sysupdate.html), atomically updates the host OS, container images, portable service images or other sources. Available as [systemd.sysupdate](opt-systemd.sysupdate).
+
+- [eris-server](https://codeberg.org/eris/eris-go). [ERIS](https://eris.codeberg.page/) is an encoding for immutable storage and this server provides block exchange as well as content decoding over HTTP and through a FUSE file-system. Available as [services.eris-server](#opt-services.eris-server.enable).
+
+- [forgejo](https://forgejo.org/), a git forge. Previously deployed as a drop-in replacement package in the [gitea module](#opt-services.gitea.package). Available as [services.forgejo](#opt-services.forgejo.enable). See migration instructions in the [NixOS manual](#module-forgejo) on how to migrate your forgejo instance using [`services.gitea.package = pkgs.forgejo`](#opt-services.gitea.package) to [`services.forgejo`](#opt-services.forgejo.enable).
+
+- hardware/infiniband.nix adds infiniband subnet manager support using an [opensm](https://github.com/linux-rdma/opensm) systemd-template service, instantiated on card guids. The module also adds kernel modules and cli tooling to help administrators debug and measure performance. Available as [hardware.infiniband.enable](#opt-hardware.infiniband.enable).
+
+- [zwave-js](https://github.com/zwave-js/zwave-js-server), a small server wrapper around Z-Wave JS to access it via a WebSocket. Available as [services.zwave-js](#opt-services.zwave-js.enable).
+
+- [Honk](https://humungus.tedunangst.com/r/honk), a complete ActivityPub server with minimal setup and support costs.
+  Available as [services.honk](#opt-services.honk.enable).
+
+- [ferretdb](https://www.ferretdb.io/), an open-source proxy, converting the MongoDB 6.0+ wire protocol queries to PostgreSQL or SQLite. Available as [services.ferretdb](options.html#opt-services.ferretdb.enable).
+
+- [MicroBin](https://microbin.eu/), a feature rich, performant and secure text and file sharing web application, a "paste bin". Available as [services.microbin](#opt-services.microbin.enable).
+
+- [NNCP](http://www.nncpgo.org/). Added nncp-daemon and nncp-caller services. Configuration is set with [programs.nncp.settings](#opt-programs.nncp.settings) and the daemons are enabled at [services.nncp](#opt-services.nncp.caller.enable).
+
+- [FastNetMon Advanced](https://fastnetmon.com/product-overview/), a commercial high performance DDoS detector / sensor. Available as [services.fastnetmon-advanced](#opt-services.fastnetmon-advanced.enable).
+
+- [tuxedo-rs](https://github.com/AaronErhardt/tuxedo-rs), Rust utilities for interacting with hardware from TUXEDO Computers.
+
+- [certspotter](https://github.com/SSLMate/certspotter), a certificate transparency log monitor. Available as [services.certspotter](#opt-services.certspotter.enable).
+
+- [audiobookshelf](https://github.com/advplyr/audiobookshelf/), a self-hosted audiobook and podcast server. Available as [services.audiobookshelf](#opt-services.audiobookshelf.enable).
+
+- [ZITADEL](https://zitadel.com), a turnkey identity and access management platform. Available as [services.zitadel](#opt-services.zitadel.enable).
+
+- [exportarr](https://github.com/onedr0p/exportarr), Prometheus Exporters for Bazarr, Lidarr, Prowlarr, Radarr, Readarr, and Sonarr. Available as [services.prometheus.exporters.exportarr-bazarr](#opt-services.prometheus.exporters.exportarr-bazarr.enable)/[services.prometheus.exporters.exportarr-lidarr](#opt-services.prometheus.exporters.exportarr-lidarr.enable)/[services.prometheus.exporters.exportarr-prowlarr](#opt-services.prometheus.exporters.exportarr-prowlarr.enable)/[services.prometheus.exporters.exportarr-radarr](#opt-services.prometheus.exporters.exportarr-radarr.enable)/[services.prometheus.exporters.exportarr-readarr](#opt-services.prometheus.exporters.exportarr-readarr.enable)/[services.prometheus.exporters.exportarr-sonarr](#opt-services.prometheus.exporters.exportarr-sonarr.enable).
+
+- [netclient](https://github.com/gravitl/netclient), an automated WireGuard® Management Client. Available as [services.netclient](#opt-services.netclient.enable).
+
+- [trunk-ng](https://github.com/ctron/trunk), A fork of `trunk`: Build, bundle & ship your Rust WASM application to the web
+
+- [virt-manager](https://virt-manager.org/), an UI for managing virtual machines in libvirt, is now available as `programs.virt-manager`.
+
+- [Soft Serve](https://github.com/charmbracelet/soft-serve), a tasty, self-hostable Git server for the command line. Available as [services.soft-serve](#opt-services.soft-serve.enable).
+
+- [Rosenpass](https://rosenpass.eu/), a service for post-quantum-secure VPNs with WireGuard. Available as [services.rosenpass](#opt-services.rosenpass.enable).
+
+- [c2FmZQ](https://github.com/c2FmZQ/c2FmZQ/), an application that can securely encrypt, store, and share files, including but not limited to pictures and videos. Available as [services.c2fmzq-server](#opt-services.c2fmzq-server.enable).
+
+## Backward Incompatibilities {#sec-release-23.11-incompatibilities}
+
+- `services.postgresql.ensurePermissions` has been deprecated in favor of `services.postgresql.ensureUsers.*.ensureDBOwnership` which simplifies the setup of database owned by a certain system user
+  in local database contexts (which make use of peer authentication via UNIX sockets), migration guidelines were provided in the NixOS manual, please refer to them if you are affected by a PostgreSQL 15 changing the way `GRANT ALL PRIVILEGES` is working. `services.postgresql.ensurePermissions` will be removed in 24.05. All NixOS modules were migrated using one of the strategy, e.g. `ensureDBOwnership` or `postStart`. More about this situation can be learnt in https://github.com/NixOS/nixpkgs/pull/266270.
+
+- `network-online.target` has been fixed to no longer time out for systems with `networking.useDHCP = true` and `networking.useNetworkd = true`.
+  Workarounds for this can be removed.
+
+- The `boot.loader.raspberryPi` options have been marked deprecated, with intent for removal for NixOS 24.11. They had a limited use-case, and do not work like people expect. They required either very old installs ([before mid-2019](https://github.com/NixOS/nixpkgs/pull/62462)) or customized builds out of scope of the standard and generic AArch64 support. That option set never supported the Raspberry Pi 4 family of devices.
+
+- `python3.pkgs.sequoia` was removed in favor of `python3.pkgs.pysequoia`. The latter package is based on upstream's dedicated repository for sequoia's Python bindings, where the Python bindings from [gitlab:sequoia-pgp/sequoia](https://gitlab.com/sequoia-pgp/sequoia) were removed long ago.
+
+- `writeTextFile` now requires `executable` to be boolean, values like `null` or `""` will now fail to evaluate.
+
+- The latest version of `clonehero` now stores custom content in `~/.clonehero`. See the [migration instructions](https://clonehero.net/2022/11/29/v23-to-v1-migration-instructions.html). Typically, these content files would exist along side the binary, but the previous build used a wrapper script that would store them in `~/.config/unity3d/srylain Inc_/Clone Hero`.
+
+- `services.mastodon` doesn't support providing a TCP port to its `streaming` component anymore, as upstream implemented parallelization by running multiple instances instead of running multiple processes in one instance. Please create a PR if you are interested in this feature.
+
+- The `services.hostapd` module was rewritten to support `passwordFile` like options, WPA3-SAE, and management of multiple interfaces. This breaks compatibility with older configurations.
+  - `hostapd` is now started with additional systemd sandbox/hardening options for better security.
+  - `services.hostapd.interface` was replaced with a per-radio and per-bss configuration scheme using [services.hostapd.radios](#opt-services.hostapd.radios).
+  - `services.hostapd.wpa` has been replaced by [services.hostapd.radios.&lt;name&gt;.networks.&lt;name&gt;.authentication.wpaPassword](#opt-services.hostapd.radios._name_.networks._name_.authentication.wpaPassword) and [services.hostapd.radios.&lt;name&gt;.networks.&lt;name&gt;.authentication.saePasswords](#opt-services.hostapd.radios._name_.networks._name_.authentication.saePasswords) which configure WPA2-PSK and WP3-SAE respectively.
+  - The default authentication has been changed to WPA3-SAE. Options for other (legacy) schemes are still available.
+
+- `python3.pkgs.fetchPypi` (and `python3Packages.fetchPypi`) has been deprecated in favor of top-level `fetchPypi`.
+
+- `pass` now does not contain `password-store.el`.  Users should get `password-store.el` from Emacs lisp package set `emacs.pkgs.password-store`.
+
+- `services.knot` now supports `.settings` from RFC42.  The previous `.extraConfig` still works the same, but it displays a warning now.
+
+- `mu` now does not install `mu4e` files by default.  Users should get `mu4e` from Emacs lisp package set `emacs.pkgs.mu4e`.
+
+- `mariadb` now defaults to `mariadb_1011` instead of `mariadb_106`, meaning the default version was upgraded from 10.6.x to 10.11.x. See the [upgrade notes](https://mariadb.com/kb/en/upgrading-from-mariadb-10-6-to-mariadb-10-11/) for potential issues.
+
+- `getent` has been moved from `glibc`'s `bin` output to its own dedicated output, reducing closure size for many dependents. Dependents using the `getent` alias should not be affected; others should move from using `glibc.bin` or `getBin glibc` to `getent` (which also improves compatibility with non-glibc platforms).
+
+- `maintainers/scripts/update-luarocks-packages` is now a proper package
+  `luarocks-packages-updater` that can be run to maintain out-of-tree luarocks
+  packages
+
+- The `users.users.<name>.passwordFile` has been renamed to `users.users.<name>.hashedPasswordFile` to avoid possible confusions. The option is in fact the file-based version of `hashedPassword`, not `password`, and expects a file containing the {manpage}`crypt(3)` hash of the user password.
+
+- `chromiumBeta` and `chromiumDev` have been removed due to the lack of maintenance in nixpkgs. Consider using `chromium` instead.
+
+- `google-chrome-beta` and `google-chrome-dev` have been removed due to the lack of maintenance in nixpkgs. Consider using `google-chrome` instead.
+
+- The `services.ananicy.extraRules` option now has the type of `listOf attrs` instead of `string`.
+
+- `buildVimPluginFrom2Nix` has been renamed to `buildVimPlugin`, which now
+  now skips `configurePhase` and `buildPhase`
+
+- JACK tools (`jack_*` except `jack_control`) have moved from the `jack2` package to `jack-example-tools`
+
+- The `waagent` service does provisioning now
+
+- The `matrix-synapse` package & module have undergone some significant internal changes, for most setups no intervention is needed, though:
+  - The option [`services.matrix-synapse.package`](#opt-services.matrix-synapse.package) is now read-only. For modifying the package, use an overlay which modifies `matrix-synapse-unwrapped` instead. More on that below.
+  - The `enableSystemd` & `enableRedis` arguments have been removed and `matrix-synapse` has been renamed to `matrix-synapse-unwrapped`. Also, several optional dependencies (such as `psycopg2` or `authlib`) have been removed.
+  - These optional dependencies are automatically added via a wrapper (`pkgs.matrix-synapse.override { extras = ["redis"]; }` for `hiredis` & `txredisapi` for instance) if the relevant config section is declared in `services.matrix-synapse.settings`. For instance, if `services.matrix-synapse.settings.redis.enabled` is set to `true`, `"redis"` will be automatically added to the `extras` list of `pkgs.matrix-synapse`.
+  - A list of all extras (and the extras enabled by default) can be found at the [option's reference for `services.matrix-synapse.extras`](#opt-services.matrix-synapse.extras).
+  - In some cases (e.g. for running synapse workers) it was necessary to re-use the `PYTHONPATH` of `matrix-synapse.service`'s environment to have all plugins available. This isn't necessary anymore, instead `config.services.matrix-synapse.package` can be used as it points to the wrapper with properly configured `extras` and also all plugins defined via [`services.matrix-synapse.plugins`](#opt-services.matrix-synapse.plugins) available. This is also the reason for why the option is read-only now, it's supposed to be set by the module only.
+
+- `netbox` was updated to 3.6. NixOS' `services.netbox.package` still defaults to 3.5 if `stateVersion` is earlier than 23.11. Please review upstream's breaking changes [for 3.6.0](https://github.com/netbox-community/netbox/releases/tag/v3.6.0) and upgrade NetBox by changing `services.netbox.package`. Database migrations will be run automatically.
+
+- `etcd` has been updated to 3.5, you will want to read the [3.3 to 3.4](https://etcd.io/docs/v3.5/upgrades/upgrade_3_4/) and [3.4 to 3.5](https://etcd.io/docs/v3.5/upgrades/upgrade_3_5/) upgrade guides
+
+- `gitlab` installations created or updated between versions \[15.11.0, 15.11.2] have an incorrect database schema. This will become a problem when upgrading to `gitlab` >=16.2.0. A workaround for affected users can be found in the [GitLab docs](https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later).
+
+- `consul` has been updated to `1.16.0`. See the [release note](https://github.com/hashicorp/consul/releases/tag/v1.16.0) for more details. Once a new Consul version has started and upgraded its data directory, it generally cannot be downgraded to the previous version.
+
+- `llvmPackages_rocm` has been moved to `rocmPackages.llvm`.
+
+- `hip`, `rocm-opencl-runtime`, `rocm-opencl-icd`, and `rocclr` have been combined into `rocmPackages.clr`.
+
+- `clang-ocl`, `clr`, `composable_kernel`, `hipblas`, `hipcc`, `hip-common`, `hipcub`,
+  `hipfft`, `hipfort`, `hipify`, `hipsolver`, `hipsparse`, `migraphx`, `miopen`, `miopengemm`,
+  `rccl`, `rdc`, `rocalution`, `rocblas`, `rocdgbapi`, `rocfft`, `rocgdb`, `rocm-cmake`,
+  `rocm-comgr`, `rocm-core`, `rocm-device-libs`, `rocminfo`, `rocmlir`, `rocm-runtime`,
+  `rocm-smi`, `rocm-thunk`, `rocprim`, `rocprofiler`, `rocrand`, `rocr-debug-agent`,
+  `rocsolver`, `rocsparse`, `rocthrust`, `roctracer`, `rocwmma`, and `tensile` have been moved to `rocmPackages`.
+
+- `himalaya` has been updated to `0.8.0`, which drops the native TLS support (in favor of Rustls) and add OAuth 2.0 support. See the [release note](https://github.com/soywod/himalaya/releases/tag/v0.8.0) for more details.
+
+- `nix-prefetch-git` now ignores global and user git config, to improve reproducibility.
+
+- The [services.caddy.acmeCA](#opt-services.caddy.acmeCA) option now defaults to `null` instead of `"https://acme-v02.api.letsencrypt.org/directory"`, to use all of Caddy's default ACME CAs and enable Caddy's automatic issuer fallback feature by default, as recommended by upstream.
+
+- The default priorities of [`services.nextcloud.phpOptions`](#opt-services.nextcloud.phpOptions) have changed. This means that e.g.
+  `services.nextcloud.phpOptions."opcache.interned_strings_buffer" = "23";` doesn't discard all of the other defaults from this option
+  anymore. The attribute values of `phpOptions` are still defaults, these can be overridden as shown here.
+
+  To override all of the options (including including `upload_max_filesize`, `post_max_size`
+  and `memory_limit` which all point to [`services.nextcloud.maxUploadSize`](#opt-services.nextcloud.maxUploadSize)
+  by default) can be done like this:
+
+  ```nix
+  {
+    services.nextcloud.phpOptions = lib.mkForce {
+      /* ... */
+    };
+  }
+  ```
+
+- `php80` is no longer supported due to upstream not supporting this version anymore.
+
+- PHP now defaults to PHP 8.2, updated from 8.1.
+
+- GraalVM has been updated to the latest version, and this brings significant changes. Upstream don't release multiple versions targeting different JVMs anymore, so now we only have one GraalVM derivation (`graalvm-ce`). While at first glance the version may seem a downgrade (22.3.1 -> 21.0.0), the major version is now following the JVM it targets (so this latest version targets JVM 21). Also some products like `llvm-installable-svm` and `native-image-svm` were incorporate to the main GraalVM derivation, so they're included by default.
+
+- GraalPy (`graalCEPackages.graalpy`), TruffleRuby (`graalCEPackages.truffleruby`), GraalJS (`graalCEPackages.graaljs`) and GraalNodeJS (`grallCEPackages.graalnodejs`) are now indepedent from the main GraalVM derivation.
+
+- The ISC DHCP package and corresponding module have been removed, because they are end of life upstream. See https://www.isc.org/blogs/isc-dhcp-eol/ for details and switch to a different DHCP implementation like kea or dnsmasq.
+
+- `prometheus-unbound-exporter` has been replaced by the Let's Encrypt maintained version, since the previous version was archived. This requires some changes to the module configuration, most notable `controlInterface` needs migration
+   towards `unbound.host` and requires either the `tcp://` or `unix://` URI scheme.
+
+- `odoo` now defaults to 16, updated from 15.
+
+- `varnish` was upgraded from 7.2.x to 7.4.x, see https://varnish-cache.org/docs/7.3/whats-new/upgrading-7.3.html and https://varnish-cache.org/docs/7.4/whats-new/upgrading-7.4.html for upgrade notes. The current LTS version is still offered as `varnish60`.
+
+- `util-linux` is now supported on Darwin and is no longer an alias to `unixtools`. Use the `unixtools.util-linux` package for access to the Apple variants of the utilities.
+
+- `services.keyd` changed API. Now you can create multiple configuration files.
+
+- `baloo`, the file indexer/search engine used by KDE now has a patch to prevent files from constantly being reindexed when the device ids of the their underlying storage changes. This happens frequently when using btrfs or LVM. The patch has not yet been accepted upstream but it provides a significantly improved experience. When upgrading, reset baloo to get a clean index: `balooctl disable ; balooctl purge ; balooctl enable`.
+
+- The `vlock` program from the `kbd` package has been moved into its own package output and should now be referenced explicitly as `kbd.vlock` or replaced with an alternative such as the standalone `vlock` package or `physlock`.
+
+- `fileSystems.<name>.autoFormat` now uses `systemd-makefs`, which does not accept formatting options. Therefore, `fileSystems.<name>.formatOptions` has been removed.
+
+- `fileSystems.<name>.autoResize` now uses `systemd-growfs` to resize the file system online in stage 2. This means that `f2fs` and `ext2` can no longer be auto resized, while `xfs` and `btrfs` now can be.
+
+- `fuse3` has been updated from 3.11.0 to 3.16.2; see [ChangeLog.rst](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3162-2023-10-10) for an overview of the changes.
+
+  Unsupported mount options are no longer silently accepted [(since 3.15.0)](https://github.com/libfuse/libfuse/blob/fuse-3.16.2/ChangeLog.rst#libfuse-3150-2023-06-09). The [affected mount options](https://github.com/libfuse/libfuse/commit/dba6b3983af34f30de01cf532dff0b66f0ed6045) are: `atime`, `diratime`, `lazytime`, `nolazytime`, `relatime`, `norelatime`, `strictatime`.
+
+  For example,
+
+  ```bash
+  $ sshfs 127.0.0.1:/home/test/testdir /home/test/sshfs_mnt -o atime`
+  ```
+
+  would previously terminate successfully with the mount point established, now it outputs the error message ``fuse: unknown option(s): `-o atime'`` and terminates with exit status 1.
+
+- `nixos-rebuild {switch,boot,test,dry-activate}` now runs the system activation inside `systemd-run`, creating an ephemeral systemd service and protecting the system switch against issues like network disconnections during remote (e.g. SSH) sessions. This has the side effect of running the switch in an isolated environment, that could possible break post-switch scripts that depends on things like environment variables being set. If you want to opt-out from this behavior for now, you may set the `NIXOS_SWITCH_USE_DIRTY_ENV` environment variable before running `nixos-rebuild`. However, keep in mind that this option will be removed in the future.
+
+- The `services.vaultwarden.config` option default value was changed to make Vaultwarden only listen on localhost, following the [secure defaults for most NixOS services](https://github.com/NixOS/nixpkgs/issues/100192).
+
+- `services.lemmy.settings.federation` was removed in 0.17.0 and no longer has any effect. To enable federation, the hostname must be set in the configuration file and then federation must be enabled in the admin web UI. See the [release notes](https://github.com/LemmyNet/lemmy/blob/c32585b03429f0f76d1e4ff738786321a0a9df98/RELEASES.md#upgrade-instructions) for more details.
+
+- `pict-rs` was upgraded from 0.3 to 0.4 and contains an incompatible database & configuration change. To upgrade on systems with `stateVersion = "23.05";` or older follow the migration steps from https://git.asonix.dog/asonix/pict-rs#user-content-0-3-to-0-4-migration-guide and set `services.pict-rs.package = pkgs.pict-rs;`.
+
+- The following packages in `haskellPackages` have now a separate bin output: `cabal-fmt`, `calligraphy`, `eventlog2html`, `ghc-debug-brick`, `hindent`, `nixfmt`, `releaser`. This means you need to replace e.g. `"${pkgs.haskellPackages.nixfmt}/bin/nixfmt"` with `"${lib.getBin pkgs.haskellPackages.nixfmt}/bin/nixfmt"` or `"${lib.getExe pkgs.haskellPackages.nixfmt}"`. The binaries also won’t be in scope if you rely on them being installed e.g. via `ghcWithPackages`. `environment.packages` picks the `bin` output automatically, so for normal installation no intervention is required. Also, toplevel attributes like `pkgs.nixfmt` are not impacted negatively by this change.
+
+- `spamassassin` no longer supports the `Hashcash` module. The module needs to be removed from the `loadplugin` list if it was copied over from the default `initPreConf` option.
+
+- `nano` was removed from `environment.defaultPackages`. To not leave systems without a editor, now `programs.nano.enable` is enabled by default.
+
+- `programs.nano.nanorc` and `programs.nano.syntaxHighlight` no longer have an effect unless `programs.nano.enable` is set to true which is the default.
+
+- `services.outline.sequelizeArguments` has been removed, as `outline` no longer executes database migrations via the `sequelize` cli.
+
+- The binary of the package `cloud-sql-proxy` has changed from `cloud_sql_proxy` to `cloud-sql-proxy`.
+
+- Garage has been upgraded to 0.9.x. `services.garage.package` now needs to be explicitly set, so version upgrades can be done in a controlled fashion. For this, we expose `garage_x_y` attributes which can be set here.
+
+- `voms` and `xrootd` now moves the `$out/etc` content to the `$etc` output instead of `$out/etc.orig`, when input argument `externalEtc` is not `null`.
+
+- The `woodpecker-*` CI packages have been updated to 1.0.0. This release is wildly incompatible with the 0.15.X versions that were previously packaged. Please read [upstream's documentation](https://woodpecker-ci.org/docs/next/migrations#100) to learn how to update your CI configurations.
+
+- The Caddy module gained a new option named `services.caddy.enableReload` which is enabled by default. It allows reloading the service instead of restarting it, if only a config file has changed. This option must be disabled if you have turned off the [Caddy admin API](https://caddyserver.com/docs/caddyfile/options#admin). If you keep this option enabled, you should consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period) to a non-infinite value to prevent Caddy from delaying the reload indefinitely.
+
+- mdraid support is now optional. This reduces initramfs size and prevents the potentially undesired automatic detection and activation of software RAID pools. It is disabled by default in new configurations (determined by `stateVersion`), but the appropriate settings will be generated by `nixos-generate-config` when installing to a software RAID device, so the standard installation procedure should be unaffected. If you have custom configs relying on mdraid, ensure that you use `stateVersion` correctly or set `boot.swraid.enable` manually. On systems with an updated `stateVersion` we now also emit warnings if `mdadm.conf` does not contain the minimum required configuration necessary to run the dynamically enabled monitoring daemons.
+
+- The `go-ethereum` package has been updated to v1.12.0. This drops support for proof-of-work. Its GraphQL API now encodes all numeric values as hex strings and the GraphQL UI is updated to version 2.0. The default database has changed from `leveldb` to `pebble` but `leveldb` can be forced with the --db.engine=leveldb flag. The `checkpoint-admin` command was [removed along with trusted checkpoints](https://github.com/ethereum/go-ethereum/pull/27147).
+
+- The `aseprite-unfree` package has been upgraded from 1.2.16.3 to 1.2.40. The free version of aseprite has been dropped because it is EOL and the package attribute now points to the unfree version. A maintained fork of the last free version of Aseprite, named 'LibreSprite', is available in the `libresprite` package.
+
+- The default `kops` version is now 1.28.0 and support for 1.25 and older has been dropped.
+
+- `pharo` has been updated to latest stable (PharoVM 10.0.8), which is compatible with the latest stable and oldstable images (Pharo 10 and 11). The VM in question is the 64bit Spur. The 32bit version has been dropped due to lack of maintenance. The Cog VM has been deleted because it is severily outdated. Finally, the `pharo-launcher` package has been deleted because it was not compatible with the newer VM, and due to lack of maintenance.
+
+- Emacs mainline version 29 was introduced. This new version includes many major additions, most notably `tree-sitter` support (enabled by default) and the pgtk variant (useful for Wayland users), which is available under the attribute `emacs29-pgtk`.
+
+- Emacs macport version 29 was introduced.
+
+- The option `services.networking.networkmanager.enableFccUnlock` was removed in favor of `networking.networkmanager.fccUnlockScripts`, which allows specifying unlock scripts explicitly. The previous option enabled all unlock scripts bundled with ModemManager, which is risky, and didn't allow using vendor-provided unlock scripts at all.
+
+- The `html-proofer` package has been updated from major version 3 to major version 5, which includes [breaking changes](https://github.com/gjtorikian/html-proofer/blob/v5.0.8/UPGRADING.md).
+
+- `kratos` has been updated from 0.10.1 to the first stable version 1.0.0, please read the [0.10.1 to 0.11.0](https://github.com/ory/kratos/releases/tag/v0.11.0), [0.11.0 to 0.11.1](https://github.com/ory/kratos/releases/tag/v0.11.1), [0.11.1 to 0.13.0](https://github.com/ory/kratos/releases/tag/v0.13.0) and [0.13.0 to 1.0.0](https://github.com/ory/kratos/releases/tag/v1.0.0) upgrade guides. The most notable breaking change is the introduction of one-time passwords (`code`) and update of the default recovery strategy from `link` to `code`.
+
+- The `hail` NixOS module was removed, as `hail` was unmaintained since 2017.
+
+- Package `noto-fonts-emoji` was renamed to `noto-fonts-color-emoji`;
+  see [#221181](https://github.com/NixOS/nixpkgs/issues/221181).
+
+- Package `cloud-sql-proxy` was renamed to `google-cloud-sql-proxy` as it cannot be used with other cloud providers.;
+
+- Package `pash` was removed due to being archived upstream. Use `powershell` as an alternative.
+
+- The option `services.plausible.releaseCookiePath` has been removed: Plausible does not use any distributed Erlang features, and does not plan to (see [discussion](https://github.com/NixOS/nixpkgs/pull/130297#issuecomment-1805851333)), so NixOS now disables them, and the Erlang cookie becomes unnecessary. You may delete the file that `releaseCookiePath` was set to.
+
+- `security.sudo.extraRules` now includes `root`'s default rule, with ordering
+  priority 400. This is functionally identical for users not specifying rule
+  order, or relying on `mkBefore` and `mkAfter`, but may impact users calling
+  `mkOrder n` with n ≤ 400.
+
+- X keyboard extension (XKB) options have been reorganized into a single attribute set, `services.xserver.xkb`. Specifically, `services.xserver.layout` is now `services.xserver.xkb.layout`, `services.xserver.extraLayouts` is now `services.xserver.xkb.extraLayouts`, `services.xserver.xkbModel` is now `services.xserver.xkb.model`, `services.xserver.xkbOptions` is now `services.xserver.xkb.options`, `services.xserver.xkbVariant` is now `services.xserver.xkb.variant`, and `services.xserver.xkbDir` is now `services.xserver.xkb.dir`.
+
+- `networking.networkmanager.firewallBackend` was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
+
+- [`lib.lists.foldl'`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl-prime) now always evaluates the initial accumulator argument first.
+  If you depend on the lazier behavior, consider using [`lib.lists.foldl`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.lists.foldl) or [`builtins.foldl'`](https://nixos.org/manual/nix/stable/language/builtins.html#builtins-foldl') instead.
+
+- [`lib.attrsets.foldlAttrs`](https://nixos.org/manual/nixpkgs/stable#function-library-lib.attrsets.foldlAttrs) now always evaluates the initial accumulator argument first.
+
+- `rome` was removed because it is no longer maintained and is succeeded by `biome`.
+
+- The `prometheus-knot-exporter` was migrated to a version maintained by CZ.NIC. Various metric names have changed, so checking existing rules is recommended.
+
+- The `services.mtr-exporter.target` has been removed in favor of `services.mtr-exporter.jobs` which allows specifying multiple targets.
+
+- `blender-with-packages` has been deprecated in favor of `blender.withPackages`, for example `blender.withPackages (ps: [ps.bpycv])`. It behaves similarly to `python3.withPackages`.
+
+- Setting `nixpkgs.config` options while providing an external `pkgs` instance will now raise an error instead of silently ignoring the options. NixOS modules no longer set `nixpkgs.config` to accomodate this. This specifically affects `services.locate`, `services.xserver.displayManager.lightdm.greeters.tiny` and `programs.firefox` NixOS modules. No manual intervention should be required in most cases, however, configurations relying on those modules affecting packages outside the system environment should switch to explicit overlays.
+
+- `service.borgmatic.settings.location` and `services.borgmatic.configurations.<name>.location` are deprecated, please move your options out of sections to the global scope.
+
+- `privacyidea` (and the corresponding `privacyidea-ldap-proxy`) has been removed from nixpkgs because it has severely outdated dependencies that became unmaintainable with nixpkgs' python package-set.
+
+- `dagger` was removed because using a package called `dagger` and packaging it from source violates their trademark policy.
+
+- `win-virtio` package was renamed to `virtio-win` to be consistent with the upstream package name.
+
+- `ps3netsrv` has been replaced with the webman-mod fork, the executable has been renamed from `ps3netsrv++` to `ps3netsrv` and cli parameters have changed.
+
+- `ssm-agent` package and module were renamed to `amazon-ssm-agent` to be consistent with the upstream package name.
+
+- `services.kea.{ctrl-agent,dhcp-ddns,dhcp,dhcp6}` now use separate runtime directories instead of `/run/kea` to work around the runtime directory being cleared on service start.
+
+- `mkDerivation` now rejects MD5 hashes.
+
+- The `junicode` font package has been updated to [major version 2](https://github.com/psb1558/Junicode-font/releases/tag/v2.001), which is now a font family. In particular, plain `Junicode.ttf` no longer exists. In addition, TrueType font files are now placed in `font/truetype` instead of `font/junicode-ttf`; this change does not affect use via `fonts.packages` NixOS option.
+
+- The `prayer` package as well as `services.prayer` have been removed because it's been unmaintained for several years and the author's website has vanished.
+
+- The `chrony` NixOS module now tracks the Real-Time Clock drift from the System Clock with `rtcfile` and automatically adjusts it with `rtcautotrim` when it exceeds the maximum error specified in `services.chrony.autotrimThreshold` (default 30 seconds). If you enabled `rtcsync` in `extraConfig`, you should remove RTC related options from `extraConfig`. If you do not want chrony configured to keep the RTC in check, you can set `services.chrony.enableRTCTrimming = false;`
+
+## Other Notable Changes {#sec-release-23.11-notable-changes}
+
+- A new option `system.switch.enable` was added. By default, this is option is
+  enabled. Disabling it makes the system unable to be reconfigured via
+  `nixos-rebuild`. This is good for image based appliances where updates are
+  handled outside the image.
+
+- The Cinnamon module now enables XDG desktop integration by default. If you are experiencing collisions related to xdg-desktop-portal-gtk you can safely remove `xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];` from your NixOS configuration.
+
+- GNOME, Pantheon, Cinnamon module no longer forces Qt applications to use Adwaita style since it was buggy and is no longer maintained upstream (specifically, Cinnamon now defaults to the gtk2 style instead, following the default in Linux Mint). If you still want it, you can add the following options to your configuration but it will probably be eventually removed:
+
+  ```nix
+  qt = {
+    enable = true;
+    platformTheme = "gnome";
+    style = "adwaita";
+  };
+  ```
+
+- `fontconfig` now defaults to using greyscale antialiasing instead of subpixel antialiasing because of a [recommendation from one of the downstreams](https://gitlab.freedesktop.org/fontconfig/fontconfig/-/issues/337). You can change this value by configuring [](#opt-fonts.fontconfig.subpixel.rgba) accordingly.
+
+- The latest available version of Nextcloud is v27 (available as `pkgs.nextcloud27`). The installation logic is as follows:
+  - If [`services.nextcloud.package`](#opt-services.nextcloud.package) is specified explicitly, this package will be installed (**recommended**)
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=23.11, `pkgs.nextcloud27` will be installed by default.
+  - If [`system.stateVersion`](#opt-system.stateVersion) is >=23.05, `pkgs.nextcloud26` will be installed by default.
+  - Please note that an upgrade from v25 (or older) to v27 directly is not possible. Please upgrade to `nextcloud26` (or earlier) first. Nextcloud prohibits skipping major versions while upgrading. You can upgrade by declaring [`services.nextcloud.package = pkgs.nextcloud26;`](options.html#opt-services.nextcloud.package).
+
+- New options were added to `services.searx` for better SearXNG support, including options for the built-in rate limiter and bot protection and automatically configuring a local redis server.
+
+- `jq` was updated to 1.7, its [first release in 5 years](https://github.com/jqlang/jq/releases/tag/jq-1.7).
+
+- `zfs` was updated from 2.1.x to 2.2.0, [enabling newer kernel support and adding new features](https://github.com/openzfs/zfs/releases/tag/zfs-2.2.0).
+
+- Elixir now defaults to version
+  [v1.15](https://elixir-lang.org/blog/2023/06/19/elixir-v1-15-0-released/).
+
+- A new option was added to the virtualisation module that enables specifying explicitly named network interfaces in QEMU VMs. The existing `virtualisation.vlans` is still supported for cases where the name of the network interface is irrelevant.
+
+- DocBook option documentation is no longer supported, all module documentation now uses markdown.
+
+- `services.outline` can now be configured to use local filesystem storage instead of S3 storage using [services.outline.storage.storageType](#opt-services.outline.storage.storageType).
+
+- `paperwork` was updated to version 2.2. Documents scanned with this version will not be visible to previous versions if you downgrade. See the [upstream announcement](https://forum.openpaper.work/t/paperwork-2-2-testing-phase/316#important-switch-from-jpeg-to-png-for-new-pages-2) for details and workarounds.
+
+- `buildGoModule` `go-modules` attrs have been renamed to `goModules`.
+
+- The `fonts.fonts` and `fonts.enableDefaultFonts` options have been renamed to `fonts.packages` and `fonts.enableDefaultPackages` respectively.
+
+- The `services.sslh` module has been updated to follow [RFC 0042](https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md). As such, several options have been moved to the freeform attribute set [services.sslh.settings](#opt-services.sslh.settings), which allows to change any of the settings in {manpage}`sslh(8)`.
+  In addition, the newly added option [services.sslh.method](#opt-services.sslh.method) allows to switch between the {manpage}`fork(2)`, {manpage}`select(2)` and `libev`-based connection handling method; see the [sslh docs](https://github.com/yrutschle/sslh/blob/master/doc/INSTALL.md#binaries) for a comparison.
+
+- `pkgs.openvpn3` now optionally supports systemd-resolved. `programs.openvpn3` will automatically enable systemd-resolved support if `config.services.resolved.enable` is enabled.
+
+- `services.fail2ban.jails` can now be configured with attribute sets defining settings and filters instead of lines. The stringed options `daemonConfig` and `extraSettings` have respectively been replaced by `daemonSettings` and `jails.DEFAULT.settings` which use attribute sets.
+
+- The application firewall `opensnitch` now uses the process monitor method eBPF as default as recommended by upstream. The method can be changed with the setting [services.opensnitch.settings.ProcMonitorMethod](#opt-services.opensnitch.settings.ProcMonitorMethod).
+
+- `services.hedgedoc` has been heavily refactored, reducing the amount of declared options in the module. Most of the options should still work without any changes. Some options have been deprecated, as they no longer have any effect. See [#244941](https://github.com/NixOS/nixpkgs/pull/244941) for more details.
+
+- The [services.woodpecker-server](#opt-services.woodpecker-server.environmentFile) type was changed to list of paths to be more consistent to the woodpecker-agent module
+
+- The module [services.ankisyncd](#opt-services.ankisyncd.package) has been switched to [anki-sync-server-rs](https://github.com/ankicommunity/anki-sync-server-rs) from the old python version, which was difficult to update, had not been updated in a while, and did not support recent versions of anki.
+Unfortunately all servers supporting new clients (newer version of anki-sync-server, anki's built in sync server and this new rust package) do not support the older sync protocol that was used in the old server, so such old clients will also need updating and in particular the anki package in nixpkgs is also being updated in this release.
+The module update takes care of the new config syntax and the data itself (user login and cards) are compatible, so users of the module will be able to just log in again after updating both client and server without any extra action.
+
+- `services.matrix-synapse` has new options to configure worker processes for matrix-synapse using [`services.matrix-synapse.workers`](#opt-services.matrix-synapse.workers). It's also now possible to configure a local redis server using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
+
+- `services.nginx` gained a `defaultListen` option at server-level with support for PROXY protocol listeners, also `proxyProtocol` is now exposed in `services.nginx.virtualHosts.<name>.listen` option. It is now possible to run PROXY listeners and non-PROXY listeners at a server-level, see [#213510](https://github.com/NixOS/nixpkgs/pull/213510/) for more details.
+
+- `services.restic.backups` now adds wrapper scripts to your system path, which set the same environment variables as the service, so restic operations can easily be run from the command line. This behavior can be disabled by setting `createWrapper` to `false`, per backup configuration.
+
+- `services.prometheus.exporters` has a new exporter to monitor electrical power consumption based on PowercapRAPL sensor called [Scaphandre](https://github.com/hubblo-org/scaphandre), see [#239803](https://github.com/NixOS/nixpkgs/pull/239803) for more details.
+
+- The MariaDB C client library was upgraded from 3.2.x to 3.3.x. It is recommended to review the [upstream release notes](https://mariadb.com/kb/en/mariadb-connector-c-33-release-notes/).
+
+- The module `services.calibre-server` has new options to configure the `host`, `port`, `auth.enable`, `auth.mode` and `auth.userDb` path, see [#216497](https://github.com/NixOS/nixpkgs/pull/216497/) for more details.
+
+- Mattermost has been upgraded to extended support version 8.1 as the previously
+  packaged extended support version 7.8 is [reaching end of life](https://docs.mattermost.com/upgrade/extended-support-release.html).
+  Migration may take some time, see the [changelog](https://docs.mattermost.com/install/self-managed-changelog.html#release-v8-1-extended-support-release)
+  and [important upgrade notes](https://docs.mattermost.com/upgrade/important-upgrade-notes.html).
+
+- `services.prometheus.exporters` has a new [exporter](https://github.com/hipages/php-fpm_exporter) to monitor PHP-FPM processes, see [#240394](https://github.com/NixOS/nixpkgs/pull/240394) for more details.
+
+- `services.github-runner` / `services.github-runners.<name>` gained the option `nodeRuntimes`. The option defaults to `[ "node20" ]`, i.e., the service supports Node.js 20 GitHub Actions only. The list of Node.js versions accepted by `nodeRuntimes` tracks the versions the upstream GitHub Actions runner supports. See [#249103](https://github.com/NixOS/nixpkgs/pull/249103) for details.
+
+- `programs.gnupg.agent.pinentryFlavor` is now set in `/etc/gnupg/gpg-agent.conf`, and will no longer take precedence over a `pinentry-program` set in `~/.gnupg/gpg-agent.conf`.
+
+- `programs.gnupg` now has the option `agent.settings` to set verbatim config values in `/etc/gnupg/gpg-agent.conf`.
+
+- `dockerTools.buildImage`, `dockerTools.buildLayeredImage` and `dockerTools.streamLayeredImage` now use `lib.makeOverridable` to allow `dockerTools`-based images to be customized more efficiently at the nix-level.
+
+- `services.influxdb2` now supports doing an automatic initial setup and provisioning of users, organizations, buckets and authentication tokens, see [#249502](https://github.com/NixOS/nixpkgs/pull/249502) for more details.
+
+- `wrapHelm` now exposes `passthru.pluginsDir` which can be passed to `helmfile`. For convenience, a top-level package `helmfile-wrapped` has been added, which inherits `passthru.pluginsDir` from `kubernetes-helm-wrapped`. See [#217768](https://github.com/NixOS/nixpkgs/issues/217768) for details.
+
+- `boot.initrd.network.udhcp.enable` allows control over dhcp during stage 1 regardless of what `networking.useDHCP` is set to.
+
+- Suricata was upgraded from 6.0 to 7.0 and no longer considers HTTP/2 support as experimental, see [upstream release notes](https://forum.suricata.io/t/suricata-7-0-0-released/3715) for more details.
+
+- Cloud support in the `netdata` package is now disabled by default. To enable it use the `netdataCloud` package.
+
+- `networking.nftables` now has the option `networking.nftables.table.<table>` to create tables
+  and have them be updated atomically, instead of flushing the ruleset.
+
+- `networking.nftables` is no longer flushing all rulesets on every reload.
+  Use `networking.nftables.flushRuleset = true;` to get back the old behaviour.
+
+- The `cawbird` package is dropped from nixpkgs, as it got broken by the Twitter API closing down and has been abandoned upstream.
+
+- `hardware.nvidia` gained `datacenter` options for enabling NVIDIA Data Center drivers and configuration of NVLink/NVSwitch topologies through `nv-fabricmanager`.
+
+- Certificate generation via the `security.acme` now limits the concurrent number of running certificate renewals and generation jobs, to avoid spiking resource usage when processing many certificates at once. The limit defaults to *5* and can be adjusted via `maxConcurrentRenewals`. Setting it to *0* disables the limits altogether.
+
+- New `boot.bcache.enable` (default enabled) allows completely removing `bcache` mount support.
+
+- The module `services.mbpfan` now has the option `aggressive` enabled by default for better heat moderation. You can disable it for upstream defaults.
+
+- `security.sudo` now provides two extra options, that do not change the
+  module's default behaviour:
+  - `defaultOptions` controls the options used for the default rules;
+  - `keepTerminfo` controls whether `TERMINFO` and `TERMINFO_DIRS` are preserved
+    for `root` and the `wheel` group.
+
+- `virtualisation.googleComputeImage` now provides `efi` option to support UEFI booting.
+
+- CoreDNS can now be built with external plugins by overriding `externalPlugins` and `vendorHash` arguments like this:
+
+  ```
+  services.coredns = {
+    enable = true;
+    package = pkgs.coredns.override {
+      externalPlugins = [
+        {name = "fanout"; repo = "github.com/networkservicemesh/fanout"; version = "v1.9.1";}
+      ];
+      vendorHash = "<SRI hash>";
+    };
+  };
+  ```
+
+  To get the necessary SRI hash, set `vendorHash = "";`. The build will fail and produce the correct `vendorHash` in the error message.
+
+  If you use this feature, updates to CoreDNS may require updating `vendorHash` by following these steps again.
+
+- `postgresql_11` has been removed since it'll stop receiving fixes on November 9 2023.
+
+- `ffmpeg` default upgraded from `ffmpeg_5` to `ffmpeg_6`.
+
+- `fusuma` now enables the following plugins: [appmatcher](https://github.com/iberianpig/fusuma-plugin-appmatcher), [keypress](https://github.com/iberianpig/fusuma-plugin-keypress), [sendkey](https://github.com/iberianpig/fusuma-plugin-sendkey), [tap](https://github.com/iberianpig/fusuma-plugin-tap) and [wmctrl](https://github.com/iberianpig/fusuma-plugin-wmctrl).
+
+- `services.bitcoind` now properly respects the `enable` option.
+
+- The Home Assistant module now offers support for installing custom components and lovelace modules. Available at [`services.home-assistant.customComponents`](#opt-services.home-assistant.customComponents) and [`services.home-assistant.customLovelaceModules`](#opt-services.home-assistant.customLovelaceModules).
+
+- The argument `vendorSha256` of `buildGoModule` is deprecated. Use `vendorHash` instead. ([\#259999](https://github.com/NixOS/nixpkgs/pull/259999))
+
+## Nixpkgs internals {#sec-release-23.11-nixpkgs-internals}
+
+- Node.js v14, v16 has been removed as they were end of life. Any dependent packages that contributors were not able to reasonably upgrade were dropped after a month of notice to their maintainers, were **removed**.
+  - This includes VSCode Server.
+  - This includes Kibana 7 as the ELK stack is unmaintained in nixpkgs and is marked for slow removal.
+
+- The use of `sourceRoot = "source";`, `sourceRoot = "source/subdir";`, and similar lines in package derivations using the default `unpackPhase` is deprecated as it requires `unpackPhase` to always produce a directory named "source". Use `sourceRoot = src.name`, `sourceRoot = "${src.name}/subdir";`, or `setSourceRoot = "sourceRoot=$(echo */subdir)";` or similar instead.
+
+- The `django` alias in the python package set was upgraded to Django 4.x.
+  Applications that consume Django should always pin their python environment
+  to a compatible major version, so they can move at their own pace.
+
+  ```nix
+  python = python3.override {
+    packageOverrides = self: super: {
+      django = super.django_3;
+    };
+  };
+  ```
+
+- The `qemu-vm.nix` module by default now identifies block devices via
+  persistent names available in `/dev/disk/by-*`. Because the rootDevice is
+  identified by its filesystem label, it needs to be formatted before the VM is
+  started. The functionality of automatically formatting the rootDevice in the
+  initrd is removed from the QEMU module. However, for tests that depend on
+  this functionality, a test utility for the scripted initrd is added
+  (`nixos/tests/common/auto-format-root-device.nix`). To use this in a NixOS
+  test, import the module, e.g. `imports = [
+  ./common/auto-format-root-device.nix ];` When you use the systemd initrd, you
+  can automatically format the root device by setting
+  `virtualisation.fileSystems."/".autoFormat = true;`.
+
+- `python3.pkgs.flitBuildHook` has been removed. Use `flit-core` and `format = "pyproject"` instead.
+
+- The `extend` function of `llvmPackages` has been removed due it coming from the `tools` attrset thus only extending the `tool` attrset. A possible replacement is to construct the set from `libraries` and `tools`, or patch nixpkgs.
+
+- The `qemu-vm.nix` module now supports disabling overriding `fileSystems` with
+  `virtualisation.fileSystems`. This enables the user to boot VMs from
+  "external" disk images not created by the qemu-vm module. You can stop the
+  qemu-vm module from overriding `fileSystems` by setting
+  `virtualisation.fileSystems = lib.mkForce { };`.
+
+- The `electron` packages now places its application files in `$out/libexec/electron` instead of `$out/lib/electron`. Packages using electron-builder will fail to build and need to be adjusted by changing `lib` to `libexec`.
+
+- `teleport` has been upgraded from major version 12 to major version 14. Please see upstream [upgrade instructions](https://goteleport.com/docs/management/operations/upgrading/) and release notes for versions [13](https://goteleport.com/docs/changelog/#1300-050823) and [14](https://goteleport.com/docs/changelog/#1400-092023). Note that Teleport does not officially support upgrades across more than one major version at a time. If you're running Teleport server components, it is recommended to first upgrade to an intermediate 13.x version by setting `services.teleport.package = pkgs.teleport_13`. Afterwards, this option can be removed to upgrade to the default version (14).
+
+- The Linux kernel module `msr` (see [`msr(4)`](https://man7.org/linux/man-pages/man4/msr.4.html)), which provides an interface to read and write the model-specific registers (MSRs) of an x86 CPU, can now be configured via `hardware.cpu.x86.msr`.
+
+- Docker now defaults to 24, as 20.10 is stopping to receive security updates and bug fixes after [December 10, 2023](https://github.com/moby/moby/discussions/45104).
+
+- There is a new NixOS option when writing NixOS tests `testing.initrdBackdoor`, that enables `backdoor.service` in initrd. Requires `boot.initrd.systemd.enable` to be enabled. Boot will pause in stage 1 at `initrd.target`, and will listen for commands from the `Machine` python interface, just like stage 2 normally does. This enables commands to be sent to test and debug stage 1. Use `machine.switch_root()` to leave stage 1 and proceed to stage 2.
diff --git a/nixpkgs/nixos/doc/manual/shell.nix b/nixpkgs/nixos/doc/manual/shell.nix
new file mode 100644
index 000000000000..70500a12b037
--- /dev/null
+++ b/nixpkgs/nixos/doc/manual/shell.nix
@@ -0,0 +1,20 @@
+let
+  pkgs = import ../../.. {
+    config = {};
+    overlays = [];
+  };
+
+  common = import ./common.nix;
+  inherit (common) outputPath indexPath;
+
+  web-devmode = import ../../../pkgs/tools/nix/web-devmode.nix {
+    inherit pkgs;
+    buildArgs = "../../release.nix -A manualHTML.${builtins.currentSystem}";
+    open = "/${outputPath}/${indexPath}";
+  };
+in
+  pkgs.mkShell {
+    packages = [
+      web-devmode
+    ];
+  }
diff --git a/nixpkgs/nixos/lib/default.nix b/nixpkgs/nixos/lib/default.nix
new file mode 100644
index 000000000000..65d91342d4d1
--- /dev/null
+++ b/nixpkgs/nixos/lib/default.nix
@@ -0,0 +1,41 @@
+let
+  # The warning is in a top-level let binding so it is only printed once.
+  minimalModulesWarning = warn "lib.nixos.evalModules is experimental and subject to change. See nixos/lib/default.nix" null;
+  inherit (nonExtendedLib) warn;
+  nonExtendedLib = import ../../lib;
+in
+{ # Optional. Allows an extended `lib` to be used instead of the regular Nixpkgs lib.
+  lib ? nonExtendedLib,
+
+  # Feature flags allow you to opt in to unfinished code. These may change some
+  # behavior or disable warnings.
+  featureFlags ? {},
+
+  # This file itself is rather new, so we accept unknown parameters to be forward
+  # compatible. This is generally not recommended, because typos go undetected.
+  ...
+}:
+let
+  seqIf = cond: if cond then builtins.seq else a: b: b;
+  # If cond, force `a` before returning any attr
+  seqAttrsIf = cond: a: lib.mapAttrs (_: v: seqIf cond a v);
+
+  eval-config-minimal = import ./eval-config-minimal.nix { inherit lib; };
+
+  testing-lib = import ./testing/default.nix { inherit lib; };
+in
+/*
+  This attribute set appears as lib.nixos in the flake, or can be imported
+  using a binding like `nixosLib = import (nixpkgs + "/nixos/lib") { }`.
+*/
+{
+  inherit (seqAttrsIf (!featureFlags?minimalModules) minimalModulesWarning eval-config-minimal)
+    evalModules
+    ;
+
+  inherit (testing-lib)
+    evalTest
+    runTest
+    ;
+
+}
diff --git a/nixpkgs/nixos/lib/eval-cacheable-options.nix b/nixpkgs/nixos/lib/eval-cacheable-options.nix
new file mode 100644
index 000000000000..d26967ebe09b
--- /dev/null
+++ b/nixpkgs/nixos/lib/eval-cacheable-options.nix
@@ -0,0 +1,54 @@
+{ libPath
+, pkgsLibPath
+, nixosPath
+, modules
+, stateVersion
+, release
+}:
+
+let
+  lib = import libPath;
+  modulesPath = "${nixosPath}/modules";
+  # dummy pkgs set that contains no packages, only `pkgs.lib` from the full set.
+  # not having `pkgs.lib` causes all users of `pkgs.formats` to fail.
+  pkgs = import pkgsLibPath {
+    inherit lib;
+    pkgs = null;
+  };
+  utils = import "${nixosPath}/lib/utils.nix" {
+    inherit config lib;
+    pkgs = null;
+  };
+  # this is used both as a module and as specialArgs.
+  # as a module it sets the _module special values, as specialArgs it makes `config`
+  # unusable. this causes documentation attributes depending on `config` to fail.
+  config = {
+    _module.check = false;
+    _module.args = {};
+    system.stateVersion = stateVersion;
+  };
+  eval = lib.evalModules {
+    modules = (map (m: "${modulesPath}/${m}") modules) ++ [
+      config
+    ];
+    specialArgs = {
+      inherit config pkgs utils;
+      class = "nixos";
+    };
+  };
+  docs = import "${nixosPath}/doc/manual" {
+    pkgs = pkgs // {
+      inherit lib;
+      # duplicate of the declaration in all-packages.nix
+      buildPackages.nixosOptionsDoc = attrs:
+        (import "${nixosPath}/lib/make-options-doc")
+          ({ inherit pkgs lib; } // attrs);
+    };
+    config = config.config;
+    options = eval.options;
+    version = release;
+    revision = "release-${release}";
+    prefix = modulesPath;
+  };
+in
+  docs.optionsNix
diff --git a/nixpkgs/nixos/lib/eval-config-minimal.nix b/nixpkgs/nixos/lib/eval-config-minimal.nix
new file mode 100644
index 000000000000..036389121973
--- /dev/null
+++ b/nixpkgs/nixos/lib/eval-config-minimal.nix
@@ -0,0 +1,50 @@
+
+# DO NOT IMPORT. Use nixpkgsFlake.lib.nixos, or import (nixpkgs + "/nixos/lib")
+{ lib }: # read -^
+
+let
+
+  /*
+    Invoke NixOS. Unlike traditional NixOS, this does not include all modules.
+    Any such modules have to be explicitly added via the `modules` parameter,
+    or imported using `imports` in a module.
+
+    A minimal module list improves NixOS evaluation performance and allows
+    modules to be independently usable, supporting new use cases.
+
+    Parameters:
+
+      modules:        A list of modules that constitute the configuration.
+
+      specialArgs:    An attribute set of module arguments. Unlike
+                      `config._module.args`, these are available for use in
+                      `imports`.
+                      `config._module.args` should be preferred when possible.
+
+    Return:
+
+      An attribute set containing `config.system.build.toplevel` among other
+      attributes. See `lib.evalModules` in the Nixpkgs library.
+
+   */
+  evalModules = {
+    prefix ? [],
+    modules ? [],
+    specialArgs ? {},
+  }:
+  # NOTE: Regular NixOS currently does use this function! Don't break it!
+  #       Ideally we don't diverge, unless we learn that we should.
+  #       In other words, only the public interface of nixos.evalModules
+  #       is experimental.
+  lib.evalModules {
+    inherit prefix modules;
+    class = "nixos";
+    specialArgs = {
+      modulesPath = builtins.toString ../modules;
+    } // specialArgs;
+  };
+
+in
+{
+  inherit evalModules;
+}
diff --git a/nixpkgs/nixos/lib/eval-config.nix b/nixpkgs/nixos/lib/eval-config.nix
new file mode 100644
index 000000000000..da099f86aa2c
--- /dev/null
+++ b/nixpkgs/nixos/lib/eval-config.nix
@@ -0,0 +1,116 @@
+# From an end-user configuration file (`configuration.nix'), build a NixOS
+# configuration object (`config') from which we can retrieve option
+# values.
+
+# !!! Please think twice before adding to this argument list!
+# Ideally eval-config.nix would be an extremely thin wrapper
+# around lib.evalModules, so that modular systems that have nixos configs
+# as subcomponents (e.g. the container feature, or nixops if network
+# expressions are ever made modular at the top level) can just use
+# types.submodule instead of using eval-config.nix
+evalConfigArgs@
+{ # !!! system can be set modularly, would be nice to remove,
+  #     however, removing or changing this default is too much
+  #     of a breaking change. To set it modularly, pass `null`.
+  system ? builtins.currentSystem
+, # !!! is this argument needed any more? The pkgs argument can
+  # be set modularly anyway.
+  pkgs ? null
+, # !!! what do we gain by making this configurable?
+  #     we can add modules that are included in specialisations, regardless
+  #     of inheritParentConfig.
+  baseModules ? import ../modules/module-list.nix
+, # !!! See comment about args in lib/modules.nix
+  extraArgs ? {}
+, # !!! See comment about args in lib/modules.nix
+  specialArgs ? {}
+, modules
+, modulesLocation ? (builtins.unsafeGetAttrPos "modules" evalConfigArgs).file or null
+, # !!! See comment about check in lib/modules.nix
+  check ? true
+, prefix ? []
+, lib ? import ../../lib
+, extraModules ? let e = builtins.getEnv "NIXOS_EXTRA_MODULE_PATH";
+                 in lib.optional (e != "") (import e)
+}:
+
+let
+  inherit (lib) optional;
+
+  evalModulesMinimal = (import ./default.nix {
+    inherit lib;
+    # Implicit use of feature is noted in implementation.
+    featureFlags.minimalModules = { };
+  }).evalModules;
+
+  pkgsModule = rec {
+    _file = ./eval-config.nix;
+    key = _file;
+    config = lib.mkMerge (
+      (optional (system != null) {
+        # Explicit `nixpkgs.system` or `nixpkgs.localSystem` should override
+        # this.  Since the latter defaults to the former, the former should
+        # default to the argument. That way this new default could propagate all
+        # they way through, but has the last priority behind everything else.
+        nixpkgs.system = lib.mkDefault system;
+      })
+      ++
+      (optional (pkgs != null) {
+        # This should be default priority, so it conflicts with any user-defined pkgs.
+        nixpkgs.pkgs = pkgs;
+      })
+    );
+  };
+
+  withWarnings = x:
+    lib.warnIf (evalConfigArgs?extraArgs) "The extraArgs argument to eval-config.nix is deprecated. Please set config._module.args instead."
+    lib.warnIf (evalConfigArgs?check) "The check argument to eval-config.nix is deprecated. Please set config._module.check instead."
+    x;
+
+  legacyModules =
+    lib.optional (evalConfigArgs?extraArgs) {
+      config = {
+        _module.args = extraArgs;
+      };
+    }
+    ++ lib.optional (evalConfigArgs?check) {
+      config = {
+        _module.check = lib.mkDefault check;
+      };
+    };
+
+  allUserModules =
+    let
+      # Add the invoking file (or specified modulesLocation) as error message location
+      # for modules that don't have their own locations; presumably inline modules.
+      locatedModules =
+        if modulesLocation == null then
+          modules
+        else
+          map (lib.setDefaultModuleLocation modulesLocation) modules;
+    in
+      locatedModules ++ legacyModules;
+
+  noUserModules = evalModulesMinimal ({
+    inherit prefix specialArgs;
+    modules = baseModules ++ extraModules ++ [ pkgsModule modulesModule ];
+  });
+
+  # Extra arguments that are useful for constructing a similar configuration.
+  modulesModule = {
+    config = {
+      _module.args = {
+        inherit noUserModules baseModules extraModules modules;
+      };
+    };
+  };
+
+  nixosWithUserModules = noUserModules.extendModules { modules = allUserModules; };
+
+  withExtraAttrs = configuration: configuration // {
+    inherit extraArgs;
+    inherit (configuration._module.args) pkgs;
+    extendModules = args: withExtraAttrs (configuration.extendModules args);
+  };
+in
+withWarnings (withExtraAttrs nixosWithUserModules)
diff --git a/nixpkgs/nixos/lib/from-env.nix b/nixpkgs/nixos/lib/from-env.nix
new file mode 100644
index 000000000000..6bd71e40e9a1
--- /dev/null
+++ b/nixpkgs/nixos/lib/from-env.nix
@@ -0,0 +1,4 @@
+# TODO: remove this file. There is lib.maybeEnv now
+name: default:
+let value = builtins.getEnv name; in
+if value == "" then default else value
diff --git a/nixpkgs/nixos/lib/make-btrfs-fs.nix b/nixpkgs/nixos/lib/make-btrfs-fs.nix
new file mode 100644
index 000000000000..277ff6a4dca8
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-btrfs-fs.nix
@@ -0,0 +1,67 @@
+# Builds an btrfs image containing a populated /nix/store with the closure
+# of store paths passed in the storePaths parameter, in addition to the
+# contents of a directory that can be populated with commands. The
+# generated image is sized to only fit its contents, with the expectation
+# that a script resizes the filesystem at boot time.
+{ pkgs
+, lib
+# List of derivations to be included
+, storePaths
+# Whether or not to compress the resulting image with zstd
+, compressImage ? false, zstd
+# Shell commands to populate the ./files directory.
+# All files in that directory are copied to the root of the FS.
+, populateImageCommands ? ""
+, volumeLabel
+, uuid ? "44444444-4444-4444-8888-888888888888"
+, btrfs-progs
+, libfaketime
+, fakeroot
+}:
+
+let
+  sdClosureInfo = pkgs.buildPackages.closureInfo { rootPaths = storePaths; };
+in
+pkgs.stdenv.mkDerivation {
+  name = "btrfs-fs.img${lib.optionalString compressImage ".zst"}";
+
+  nativeBuildInputs = [ btrfs-progs libfaketime fakeroot ] ++ lib.optional compressImage zstd;
+
+  buildCommand =
+    ''
+      ${if compressImage then "img=temp.img" else "img=$out"}
+
+      set -x
+      (
+          mkdir -p ./files
+          ${populateImageCommands}
+      )
+
+      mkdir -p ./rootImage/nix/store
+
+      xargs -I % cp -a --reflink=auto % -t ./rootImage/nix/store/ < ${sdClosureInfo}/store-paths
+      (
+        GLOBIGNORE=".:.."
+        shopt -u dotglob
+
+        for f in ./files/*; do
+            cp -a --reflink=auto -t ./rootImage/ "$f"
+        done
+      )
+
+      cp ${sdClosureInfo}/registration ./rootImage/nix-path-registration
+
+      touch $img
+      faketime -f "1970-01-01 00:00:01" fakeroot mkfs.btrfs -L ${volumeLabel} -U ${uuid} -r ./rootImage --shrink $img
+
+      if ! btrfs check $img; then
+        echo "--- 'btrfs check' failed for BTRFS image ---"
+        return 1
+      fi
+
+      if [ ${builtins.toString compressImage} ]; then
+        echo "Compressing image"
+        zstd -v --no-progress ./$img -o $out
+      fi
+    '';
+}
diff --git a/nixpkgs/nixos/lib/make-channel.nix b/nixpkgs/nixos/lib/make-channel.nix
new file mode 100644
index 000000000000..0a511468fb2d
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-channel.nix
@@ -0,0 +1,31 @@
+/* Build a channel tarball. These contain, in addition to the nixpkgs
+ * expressions themselves, files that indicate the version of nixpkgs
+ * that they represent.
+ */
+{ pkgs, nixpkgs, version, versionSuffix }:
+
+pkgs.releaseTools.makeSourceTarball {
+  name = "nixos-channel";
+
+  src = nixpkgs;
+
+  officialRelease = false; # FIXME: fix this in makeSourceTarball
+  inherit version versionSuffix;
+
+  buildInputs = [ pkgs.nix ];
+
+  distPhase = ''
+    rm -rf .git
+    echo -n $VERSION_SUFFIX > .version-suffix
+    echo -n ${nixpkgs.rev or nixpkgs.shortRev} > .git-revision
+    releaseName=nixos-$VERSION$VERSION_SUFFIX
+    mkdir -p $out/tarballs
+    cp -prd . ../$releaseName
+    chmod -R u+w ../$releaseName
+    ln -s . ../$releaseName/nixpkgs # hack to make ‘<nixpkgs>’ work
+    NIX_STATE_DIR=$TMPDIR nix-env -f ../$releaseName/default.nix -qaP --meta --show-trace --xml \* > /dev/null
+    cd ..
+    chmod -R u+w $releaseName
+    tar cfJ $out/tarballs/$releaseName.tar.xz $releaseName
+  '';
+}
diff --git a/nixpkgs/nixos/lib/make-disk-image.nix b/nixpkgs/nixos/lib/make-disk-image.nix
new file mode 100644
index 000000000000..e5d82f4de7c9
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-disk-image.nix
@@ -0,0 +1,620 @@
+/* Technical details
+
+`make-disk-image` has a bit of magic to minimize the amount of work to do in a virtual machine.
+
+It relies on the [LKL (Linux Kernel Library) project](https://github.com/lkl/linux) which provides Linux kernel as userspace library.
+
+The Nix-store only image only need to run LKL tools to produce an image and will never spawn a virtual machine, whereas full images will always require a virtual machine, but also use LKL.
+
+### Image preparation phase
+
+Image preparation phase will produce the initial image layout in a folder:
+
+- devise a root folder based on `$PWD`
+- prepare the contents by copying and restoring ACLs in this root folder
+- load in the Nix store database all additional paths computed by `pkgs.closureInfo` in a temporary Nix store
+- run `nixos-install` in a temporary folder
+- transfer from the temporary store the additional paths registered to the installed NixOS
+- compute the size of the disk image based on the apparent size of the root folder
+- partition the disk image using the corresponding script according to the partition table type
+- format the partitions if needed
+- use `cptofs` (LKL tool) to copy the root folder inside the disk image
+
+At this step, the disk image already contains the Nix store, it now only needs to be converted to the desired format to be used.
+
+### Image conversion phase
+
+Using `qemu-img`, the disk image is converted from a raw format to the desired format: qcow2(-compressed), vdi, vpc.
+
+### Image Partitioning
+
+#### `none`
+
+No partition table layout is written. The image is a bare filesystem image.
+
+#### `legacy`
+
+The image is partitioned using MBR. There is one primary ext4 partition starting at 1 MiB that fills the rest of the disk image.
+
+This partition layout is unsuitable for UEFI.
+
+#### `legacy+gpt`
+
+This partition table type uses GPT and:
+
+- create a "no filesystem" partition from 1MiB to 2MiB ;
+- set `bios_grub` flag on this "no filesystem" partition, which marks it as a [GRUB BIOS partition](https://www.gnu.org/software/parted/manual/html_node/set.html) ;
+- create a primary ext4 partition starting at 2MiB and extending to the full disk image ;
+- perform optimal alignments checks on each partition
+
+This partition layout is unsuitable for UEFI boot, because it has no ESP (EFI System Partition) partition. It can work with CSM (Compatibility Support Module) which emulates legacy (BIOS) boot for UEFI.
+
+#### `efi`
+
+This partition table type uses GPT and:
+
+- creates an FAT32 ESP partition from 8MiB to specified `bootSize` parameter (256MiB by default), set it bootable ;
+- creates an primary ext4 partition starting after the boot partition and extending to the full disk image
+
+#### `hybrid`
+
+This partition table type uses GPT and:
+
+- creates a "no filesystem" partition from 0 to 1MiB, set `bios_grub` flag on it ;
+- creates an FAT32 ESP partition from 8MiB to specified `bootSize` parameter (256MiB by default), set it bootable ;
+- creates a primary ext4 partition starting after the boot one and extending to the full disk image
+
+This partition could be booted by a BIOS able to understand GPT layouts and recognizing the MBR at the start.
+
+### How to run determinism analysis on results?
+
+Build your derivation with `--check` to rebuild it and verify it is the same.
+
+If it fails, you will be left with two folders with one having `.check`.
+
+You can use `diffoscope` to see the differences between the folders.
+
+However, `diffoscope` is currently not able to diff two QCOW2 filesystems, thus, it is advised to use raw format.
+
+Even if you use raw disks, `diffoscope` cannot diff the partition table and partitions recursively.
+
+To solve this, you can run `fdisk -l $image` and generate `dd if=$image of=$image-p$i.raw skip=$start count=$sectors` for each `(start, sectors)` listed in the `fdisk` output. Now, you will have each partition as a separate file and you can compare them in pairs.
+*/
+{ pkgs
+, lib
+
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # The size of the disk, in megabytes.
+  # if "auto" size is calculated based on the contents copied to it and
+  #   additionalSpace is taken into account.
+  diskSize ? "auto"
+
+, # additional disk space to be added to the image if diskSize "auto"
+  # is used
+  additionalSpace ? "512M"
+
+, # size of the boot partition, is only used if partitionTableType is
+  # either "efi" or "hybrid"
+  # This will be undersized slightly, as this is actually the offset of
+  # the end of the partition. Generally it will be 1MiB smaller.
+  bootSize ? "256M"
+
+, # The files and directories to be placed in the target file system.
+  # This is a list of attribute sets {source, target, mode, user, group} where
+  # `source' is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target', `mode' is a string containing
+  # the permissions that will be set (ex. "755"), `user' and `group' are the
+  # user and group name that will be set as owner of the files.
+  # `mode', `user', and `group' are optional.
+  # When setting one of `user' or `group', the other needs to be set too.
+  contents ? []
+
+, # Type of partition table to use; either "legacy", "efi", or "none".
+  # For "efi" images, the GPT partition table is used and a mandatory ESP
+  #   partition of reasonable size is created in addition to the root partition.
+  # For "legacy", the msdos partition table is used and a single large root
+  #   partition is created.
+  # For "legacy+gpt", the GPT partition table is used, a 1MiB no-fs partition for
+  #   use by the bootloader is created, and a single large root partition is
+  #   created.
+  # For "hybrid", the GPT partition table is used and a mandatory ESP
+  #   partition of reasonable size is created in addition to the root partition.
+  #   Also a legacy MBR will be present.
+  # For "none", no partition table is created. Enabling `installBootLoader`
+  #   most likely fails as GRUB will probably refuse to install.
+  partitionTableType ? "legacy"
+
+, # Whether to invoke `switch-to-configuration boot` during image creation
+  installBootLoader ? true
+
+, # Whether to output have EFIVARS available in $out/efi-vars.fd and use it during disk creation
+  touchEFIVars ? false
+
+, # OVMF firmware derivation
+  OVMF ? pkgs.OVMF.fd
+
+, # EFI firmware
+  efiFirmware ? OVMF.firmware
+
+, # EFI variables
+  efiVariables ? OVMF.variables
+
+, # The root file system type.
+  fsType ? "ext4"
+
+, # Filesystem label
+  label ? if onlyNixStore then "nix-store" else "nixos"
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+, # Guest memory size
+  memSize ? 1024
+
+, # Copy the contents of the Nix store to the root of the image and
+  # skip further setup. Incompatible with `contents`,
+  # `installBootLoader` and `configFile`.
+  onlyNixStore ? false
+
+, name ? "nixos-disk-image"
+
+, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
+  format ? "raw"
+
+  # Whether to fix:
+  #   - GPT Disk Unique Identifier (diskGUID)
+  #   - GPT Partition Unique Identifier: depends on the layout, root partition UUID can be controlled through `rootGPUID` option
+  #   - GPT Partition Type Identifier: fixed according to the layout, e.g. ESP partition, etc. through `parted` invocation.
+  #   - Filesystem Unique Identifier when fsType = ext4 for *root partition*.
+  # BIOS/MBR support is "best effort" at the moment.
+  # Boot partitions may not be deterministic.
+  # Also, to fix last time checked of the ext4 partition if fsType = ext4.
+, deterministic ? true
+
+  # GPT Partition Unique Identifier for root partition.
+, rootGPUID ? "F222513B-DED1-49FA-B591-20CE86A2FE7F"
+  # When fsType = ext4, this is the root Filesystem Unique Identifier.
+  # TODO: support other filesystems someday.
+, rootFSUID ? (if fsType == "ext4" then rootGPUID else null)
+
+, # Whether a nix channel based on the current source tree should be
+  # made available inside the image. Useful for interactive use of nix
+  # utils, but changes the hash of the image when the sources are
+  # updated.
+  copyChannel ? true
+
+, # Additional store paths to copy to the image's store.
+  additionalPaths ? []
+}:
+
+assert (lib.assertOneOf "partitionTableType" partitionTableType [ "legacy" "legacy+gpt" "efi" "hybrid" "none" ]);
+assert (lib.assertMsg (fsType == "ext4" && deterministic -> rootFSUID != null) "In deterministic mode with a ext4 partition, rootFSUID must be non-null, by default, it is equal to rootGPUID.");
+  # We use -E offset=X below, which is only supported by e2fsprogs
+assert (lib.assertMsg (partitionTableType != "none" -> fsType == "ext4") "to produce a partition table, we need to use -E offset flag which is support only for fsType = ext4");
+assert (lib.assertMsg (touchEFIVars -> partitionTableType == "hybrid" || partitionTableType == "efi" || partitionTableType == "legacy+gpt") "EFI variables can be used only with a partition table of type: hybrid, efi or legacy+gpt.");
+  # If only Nix store image, then: contents must be empty, configFile must be unset, and we should no install bootloader.
+assert (lib.assertMsg (onlyNixStore -> contents == [] && configFile == null && !installBootLoader) "In a only Nix store image, the contents must be empty, no configuration must be provided and no bootloader should be installed.");
+# Either both or none of {user,group} need to be set
+assert (lib.assertMsg (lib.all
+         (attrs: ((attrs.user  or null) == null)
+              == ((attrs.group or null) == null))
+        contents) "Contents of the disk image should set none of {user, group} or both at the same time.");
+
+with lib;
+
+let format' = format; in let
+
+  format = if format' == "qcow2-compressed" then "qcow2" else format';
+
+  compress = optionalString (format' == "qcow2-compressed") "-c";
+
+  filename = "nixos." + {
+    qcow2 = "qcow2";
+    vdi   = "vdi";
+    vpc   = "vhd";
+    raw   = "img";
+  }.${format} or format;
+
+  rootPartition = { # switch-case
+    legacy = "1";
+    "legacy+gpt" = "2";
+    efi = "2";
+    hybrid = "3";
+  }.${partitionTableType};
+
+  partitionDiskScript = { # switch-case
+    legacy = ''
+      parted --script $diskImage -- \
+        mklabel msdos \
+        mkpart primary ext4 1MiB -1
+    '';
+    "legacy+gpt" = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart no-fs 1MB 2MB \
+        set 1 bios_grub on \
+        align-check optimal 1 \
+        mkpart primary ext4 2MB -1 \
+        align-check optimal 2 \
+        print
+      ${optionalString deterministic ''
+          sgdisk \
+          --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C \
+          --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+          --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+          --partition-guid=3:${rootGPUID} \
+          $diskImage
+      ''}
+    '';
+    efi = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart ESP fat32 8MiB ${bootSize} \
+        set 1 boot on \
+        mkpart primary ext4 ${bootSize} -1
+      ${optionalString deterministic ''
+          sgdisk \
+          --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C \
+          --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+          --partition-guid=2:${rootGPUID} \
+          $diskImage
+      ''}
+    '';
+    hybrid = ''
+      parted --script $diskImage -- \
+        mklabel gpt \
+        mkpart ESP fat32 8MiB ${bootSize} \
+        set 1 boot on \
+        mkpart no-fs 0 1024KiB \
+        set 2 bios_grub on \
+        mkpart primary ext4 ${bootSize} -1
+      ${optionalString deterministic ''
+          sgdisk \
+          --disk-guid=97FD5997-D90B-4AA3-8D16-C1723AEA73C \
+          --partition-guid=1:1C06F03B-704E-4657-B9CD-681A087A2FDC \
+          --partition-guid=2:970C694F-AFD0-4B99-B750-CDB7A329AB6F \
+          --partition-guid=3:${rootGPUID} \
+          $diskImage
+      ''}
+    '';
+    none = "";
+  }.${partitionTableType};
+
+  useEFIBoot = touchEFIVars;
+
+  nixpkgs = cleanSource pkgs.path;
+
+  # FIXME: merge with channel.nix / make-channel.nix.
+  channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}" {} ''
+    mkdir -p $out
+    cp -prd ${nixpkgs.outPath} $out/nixos
+    chmod -R u+w $out/nixos
+    if [ ! -e $out/nixos/nixpkgs ]; then
+      ln -s . $out/nixos/nixpkgs
+    fi
+    rm -rf $out/nixos/.git
+    echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+  '';
+
+  binPath = with pkgs; makeBinPath (
+    [ rsync
+      util-linux
+      parted
+      e2fsprogs
+      lkl
+      config.system.build.nixos-install
+      config.system.build.nixos-enter
+      nix
+      systemdMinimal
+    ]
+    ++ lib.optional deterministic gptfdisk
+    ++ stdenv.initialPath);
+
+  # I'm preserving the line below because I'm going to search for it across nixpkgs to consolidate
+  # image building logic. The comment right below this now appears in 4 different places in nixpkgs :)
+  # !!! should use XML.
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+  modes   = map (x: x.mode  or "''") contents;
+  users   = map (x: x.user  or "''") contents;
+  groups  = map (x: x.group or "''") contents;
+
+  basePaths = [ config.system.build.toplevel ]
+    ++ lib.optional copyChannel channelSources;
+
+  additionalPaths' = subtractLists basePaths additionalPaths;
+
+  closureInfo = pkgs.closureInfo {
+    rootPaths = basePaths ++ additionalPaths';
+  };
+
+  blockSize = toString (4 * 1024); # ext4fs block size (not block device sector size)
+
+  prepareImage = ''
+    export PATH=${binPath}
+
+    # Yes, mkfs.ext4 takes different units in different contexts. Fun.
+    sectorsToKilobytes() {
+      echo $(( ( "$1" * 512 ) / 1024 ))
+    }
+
+    sectorsToBytes() {
+      echo $(( "$1" * 512  ))
+    }
+
+    # Given lines of numbers, adds them together
+    sum_lines() {
+      local acc=0
+      while read -r number; do
+        acc=$((acc+number))
+      done
+      echo "$acc"
+    }
+
+    mebibyte=$(( 1024 * 1024 ))
+
+    # Approximative percentage of reserved space in an ext4 fs over 512MiB.
+    # 0.05208587646484375
+    #  × 1000, integer part: 52
+    compute_fudge() {
+      echo $(( $1 * 52 / 1000 ))
+    }
+
+    mkdir $out
+
+    root="$PWD/root"
+    mkdir -p $root
+
+    # Copy arbitrary other files into the image
+    # Semi-shamelessly copied from make-etc.sh. I (@copumpkin) shall factor this stuff out as part of
+    # https://github.com/NixOS/nixpkgs/issues/23052.
+    set -f
+    sources_=(${concatStringsSep " " sources})
+    targets_=(${concatStringsSep " " targets})
+    modes_=(${concatStringsSep " " modes})
+    set +f
+
+    for ((i = 0; i < ''${#targets_[@]}; i++)); do
+      source="''${sources_[$i]}"
+      target="''${targets_[$i]}"
+      mode="''${modes_[$i]}"
+
+      if [ -n "$mode" ]; then
+        rsync_chmod_flags="--chmod=$mode"
+      else
+        rsync_chmod_flags=""
+      fi
+      # Unfortunately cptofs only supports modes, not ownership, so we can't use
+      # rsync's --chown option. Instead, we change the ownerships in the
+      # VM script with chown.
+      rsync_flags="-a --no-o --no-g $rsync_chmod_flags"
+      if [[ "$source" =~ '*' ]]; then
+        # If the source name contains '*', perform globbing.
+        mkdir -p $root/$target
+        for fn in $source; do
+          rsync $rsync_flags "$fn" $root/$target/
+        done
+      else
+        mkdir -p $root/$(dirname $target)
+        if [ -e $root/$target ]; then
+          echo "duplicate entry $target -> $source"
+          exit 1
+        elif [ -d $source ]; then
+          # Append a slash to the end of source to get rsync to copy the
+          # directory _to_ the target instead of _inside_ the target.
+          # (See `man rsync`'s note on a trailing slash.)
+          rsync $rsync_flags $source/ $root/$target
+        else
+          rsync $rsync_flags $source $root/$target
+        fi
+      fi
+    done
+
+    export HOME=$TMPDIR
+
+    # Provide a Nix database so that nixos-install can copy closures.
+    export NIX_STATE_DIR=$TMPDIR/state
+    nix-store --load-db < ${closureInfo}/registration
+
+    chmod 755 "$TMPDIR"
+    echo "running nixos-install..."
+    nixos-install --root $root --no-bootloader --no-root-passwd \
+      --system ${config.system.build.toplevel} \
+      ${if copyChannel then "--channel ${channelSources}" else "--no-channel-copy"} \
+      --substituters ""
+
+    ${optionalString (additionalPaths' != []) ''
+      nix --extra-experimental-features nix-command copy --to $root --no-check-sigs ${concatStringsSep " " additionalPaths'}
+    ''}
+
+    diskImage=nixos.raw
+
+    ${if diskSize == "auto" then ''
+      ${if partitionTableType == "efi" || partitionTableType == "hybrid" then ''
+        # Add the GPT at the end
+        gptSpace=$(( 512 * 34 * 1 ))
+        # Normally we'd need to account for alignment and things, if bootSize
+        # represented the actual size of the boot partition. But it instead
+        # represents the offset at which it ends.
+        # So we know bootSize is the reserved space in front of the partition.
+        reservedSpace=$(( gptSpace + $(numfmt --from=iec '${bootSize}') ))
+      '' else if partitionTableType == "legacy+gpt" then ''
+        # Add the GPT at the end
+        gptSpace=$(( 512 * 34 * 1 ))
+        # And include the bios_grub partition; the ext4 partition starts at 2MB exactly.
+        reservedSpace=$(( gptSpace + 2 * mebibyte ))
+      '' else if partitionTableType == "legacy" then ''
+        # Add the 1MiB aligned reserved space (includes MBR)
+        reservedSpace=$(( mebibyte ))
+      '' else ''
+        reservedSpace=0
+      ''}
+      additionalSpace=$(( $(numfmt --from=iec '${additionalSpace}') + reservedSpace ))
+
+      # Compute required space in filesystem blocks
+      diskUsage=$(find . ! -type d -print0 | du --files0-from=- --apparent-size --block-size "${blockSize}" | cut -f1 | sum_lines)
+      # Each inode takes space!
+      numInodes=$(find . | wc -l)
+      # Convert to bytes, inodes take two blocks each!
+      diskUsage=$(( (diskUsage + 2 * numInodes) * ${blockSize} ))
+      # Then increase the required space to account for the reserved blocks.
+      fudge=$(compute_fudge $diskUsage)
+      requiredFilesystemSpace=$(( diskUsage + fudge ))
+
+      diskSize=$(( requiredFilesystemSpace  + additionalSpace ))
+
+      # Round up to the nearest mebibyte.
+      # This ensures whole 512 bytes sector sizes in the disk image
+      # and helps towards aligning partitions optimally.
+      if (( diskSize % mebibyte )); then
+        diskSize=$(( ( diskSize / mebibyte + 1) * mebibyte ))
+      fi
+
+      truncate -s "$diskSize" $diskImage
+
+      printf "Automatic disk size...\n"
+      printf "  Closure space use: %d bytes\n" $diskUsage
+      printf "  fudge: %d bytes\n" $fudge
+      printf "  Filesystem size needed: %d bytes\n" $requiredFilesystemSpace
+      printf "  Additional space: %d bytes\n" $additionalSpace
+      printf "  Disk image size: %d bytes\n" $diskSize
+    '' else ''
+      truncate -s ${toString diskSize}M $diskImage
+    ''}
+
+    ${partitionDiskScript}
+
+    ${if partitionTableType != "none" then ''
+      # Get start & length of the root partition in sectors to $START and $SECTORS.
+      eval $(partx $diskImage -o START,SECTORS --nr ${rootPartition} --pairs)
+
+      mkfs.${fsType} -b ${blockSize} -F -L ${label} $diskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
+    '' else ''
+      mkfs.${fsType} -b ${blockSize} -F -L ${label} $diskImage
+    ''}
+
+    echo "copying staging root to image..."
+    cptofs -p ${optionalString (partitionTableType != "none") "-P ${rootPartition}"} \
+           -t ${fsType} \
+           -i $diskImage \
+           $root${optionalString onlyNixStore builtins.storeDir}/* / ||
+      (echo >&2 "ERROR: cptofs failed. diskSize might be too small for closure."; exit 1)
+  '';
+
+  moveOrConvertImage = ''
+    ${if format == "raw" then ''
+      mv $diskImage $out/${filename}
+    '' else ''
+      ${pkgs.qemu-utils}/bin/qemu-img convert -f raw -O ${format} ${compress} $diskImage $out/${filename}
+    ''}
+    diskImage=$out/${filename}
+  '';
+
+  createEFIVars = ''
+    efiVars=$out/efi-vars.fd
+    cp ${efiVariables} $efiVars
+    chmod 0644 $efiVars
+  '';
+
+  buildImage = pkgs.vmTools.runInLinuxVM (
+    pkgs.runCommand name {
+      preVM = prepareImage + lib.optionalString touchEFIVars createEFIVars;
+      buildInputs = with pkgs; [ util-linux e2fsprogs dosfstools ];
+      postVM = moveOrConvertImage + postVM;
+      QEMU_OPTS =
+        concatStringsSep " " (lib.optional useEFIBoot "-drive if=pflash,format=raw,unit=0,readonly=on,file=${efiFirmware}"
+        ++ lib.optionals touchEFIVars [
+          "-drive if=pflash,format=raw,unit=1,file=$efiVars"
+        ]
+      );
+      inherit memSize;
+    } ''
+      export PATH=${binPath}:$PATH
+
+      rootDisk=${if partitionTableType != "none" then "/dev/vda${rootPartition}" else "/dev/vda"}
+
+      # It is necessary to set root filesystem unique identifier in advance, otherwise
+      # bootloader might get the wrong one and fail to boot.
+      # At the end, we reset again because we want deterministic timestamps.
+      ${optionalString (fsType == "ext4" && deterministic) ''
+        tune2fs -T now ${optionalString deterministic "-U ${rootFSUID}"} -c 0 -i 0 $rootDisk
+      ''}
+      # make systemd-boot find ESP without udev
+      mkdir /dev/block
+      ln -s /dev/vda1 /dev/block/254:1
+
+      mountPoint=/mnt
+      mkdir $mountPoint
+      mount $rootDisk $mountPoint
+
+      # Create the ESP and mount it. Unlike e2fsprogs, mkfs.vfat doesn't support an
+      # '-E offset=X' option, so we can't do this outside the VM.
+      ${optionalString (partitionTableType == "efi" || partitionTableType == "hybrid") ''
+        mkdir -p /mnt/boot
+        mkfs.vfat -n ESP /dev/vda1
+        mount /dev/vda1 /mnt/boot
+
+        ${optionalString touchEFIVars "mount -t efivarfs efivarfs /sys/firmware/efi/efivars"}
+      ''}
+
+      # Install a configuration.nix
+      mkdir -p /mnt/etc/nixos
+      ${optionalString (configFile != null) ''
+        cp ${configFile} /mnt/etc/nixos/configuration.nix
+      ''}
+
+      ${lib.optionalString installBootLoader ''
+        # In this throwaway resource, we only have /dev/vda, but the actual VM may refer to another disk for bootloader, e.g. /dev/vdb
+        # Use this option to create a symlink from vda to any arbitrary device you want.
+        ${optionalString (config.boot.loader.grub.enable && config.boot.loader.grub.device != "/dev/vda") ''
+            mkdir -p $(dirname ${config.boot.loader.grub.device})
+            ln -s /dev/vda ${config.boot.loader.grub.device}
+        ''}
+
+        # Set up core system link, bootloader (sd-boot, GRUB, uboot, etc.), etc.
+        NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root $mountPoint -- /nix/var/nix/profiles/system/bin/switch-to-configuration boot
+
+        # The above scripts will generate a random machine-id and we don't want to bake a single ID into all our images
+        rm -f $mountPoint/etc/machine-id
+      ''}
+
+      # Set the ownerships of the contents. The modes are set in preVM.
+      # No globbing on targets, so no need to set -f
+      targets_=(${concatStringsSep " " targets})
+      users_=(${concatStringsSep " " users})
+      groups_=(${concatStringsSep " " groups})
+      for ((i = 0; i < ''${#targets_[@]}; i++)); do
+        target="''${targets_[$i]}"
+        user="''${users_[$i]}"
+        group="''${groups_[$i]}"
+        if [ -n "$user$group" ]; then
+          # We have to nixos-enter since we need to use the user and group of the VM
+          nixos-enter --root $mountPoint -- chown -R "$user:$group" "$target"
+        fi
+      done
+
+      umount -R /mnt
+
+      # Make sure resize2fs works. Note that resize2fs has stricter criteria for resizing than a normal
+      # mount, so the `-c 0` and `-i 0` don't affect it. Setting it to `now` doesn't produce deterministic
+      # output, of course, but we can fix that when/if we start making images deterministic.
+      # In deterministic mode, this is fixed to 1970-01-01 (UNIX timestamp 0).
+      # This two-step approach is necessary otherwise `tune2fs` will want a fresher filesystem to perform
+      # some changes.
+      ${optionalString (fsType == "ext4") ''
+        tune2fs -T now ${optionalString deterministic "-U ${rootFSUID}"} -c 0 -i 0 $rootDisk
+        ${optionalString deterministic "tune2fs -f -T 19700101 $rootDisk"}
+      ''}
+    ''
+  );
+in
+  if onlyNixStore then
+    pkgs.runCommand name {}
+      (prepareImage + moveOrConvertImage + postVM)
+  else buildImage
diff --git a/nixpkgs/nixos/lib/make-ext4-fs.nix b/nixpkgs/nixos/lib/make-ext4-fs.nix
new file mode 100644
index 000000000000..b8e1b8d24c48
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-ext4-fs.nix
@@ -0,0 +1,95 @@
+# Builds an ext4 image containing a populated /nix/store with the closure
+# of store paths passed in the storePaths parameter, in addition to the
+# contents of a directory that can be populated with commands. The
+# generated image is sized to only fit its contents, with the expectation
+# that a script resizes the filesystem at boot time.
+{ pkgs
+, lib
+# List of derivations to be included
+, storePaths
+# Whether or not to compress the resulting image with zstd
+, compressImage ? false, zstd
+# Shell commands to populate the ./files directory.
+# All files in that directory are copied to the root of the FS.
+, populateImageCommands ? ""
+, volumeLabel
+, uuid ? "44444444-4444-4444-8888-888888888888"
+, e2fsprogs
+, libfaketime
+, perl
+, fakeroot
+}:
+
+let
+  sdClosureInfo = pkgs.buildPackages.closureInfo { rootPaths = storePaths; };
+in
+pkgs.stdenv.mkDerivation {
+  name = "ext4-fs.img${lib.optionalString compressImage ".zst"}";
+
+  nativeBuildInputs = [ e2fsprogs.bin libfaketime perl fakeroot ]
+  ++ lib.optional compressImage zstd;
+
+  buildCommand =
+    ''
+      ${if compressImage then "img=temp.img" else "img=$out"}
+      (
+      mkdir -p ./files
+      ${populateImageCommands}
+      )
+
+      echo "Preparing store paths for image..."
+
+      # Create nix/store before copying path
+      mkdir -p ./rootImage/nix/store
+
+      xargs -I % cp -a --reflink=auto % -t ./rootImage/nix/store/ < ${sdClosureInfo}/store-paths
+      (
+        GLOBIGNORE=".:.."
+        shopt -u dotglob
+
+        for f in ./files/*; do
+            cp -a --reflink=auto -t ./rootImage/ "$f"
+        done
+      )
+
+      # Also include a manifest of the closures in a format suitable for nix-store --load-db
+      cp ${sdClosureInfo}/registration ./rootImage/nix-path-registration
+
+      # Make a crude approximation of the size of the target image.
+      # If the script starts failing, increase the fudge factors here.
+      numInodes=$(find ./rootImage | wc -l)
+      numDataBlocks=$(du -s -c -B 4096 --apparent-size ./rootImage | tail -1 | awk '{ print int($1 * 1.10) }')
+      bytes=$((2 * 4096 * $numInodes + 4096 * $numDataBlocks))
+      echo "Creating an EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks)"
+
+      truncate -s $bytes $img
+
+      faketime -f "1970-01-01 00:00:01" fakeroot mkfs.ext4 -L ${volumeLabel} -U ${uuid} -d ./rootImage $img
+
+      export EXT2FS_NO_MTAB_OK=yes
+      # I have ended up with corrupted images sometimes, I suspect that happens when the build machine's disk gets full during the build.
+      if ! fsck.ext4 -n -f $img; then
+        echo "--- Fsck failed for EXT4 image of $bytes bytes (numInodes=$numInodes, numDataBlocks=$numDataBlocks) ---"
+        cat errorlog
+        return 1
+      fi
+
+      # We may want to shrink the file system and resize the image to
+      # get rid of the unnecessary slack here--but see
+      # https://github.com/NixOS/nixpkgs/issues/125121 for caveats.
+
+      # shrink to fit
+      resize2fs -M $img
+
+      # Add 16 MebiByte to the current_size
+      new_size=$(dumpe2fs -h $img | awk -F: \
+        '/Block count/{count=$2} /Block size/{size=$2} END{print (count*size+16*2**20)/size}')
+
+      resize2fs $img $new_size
+
+      if [ ${builtins.toString compressImage} ]; then
+        echo "Compressing image"
+        zstd -v --no-progress ./$img -o $out
+      fi
+    '';
+}
diff --git a/nixpkgs/nixos/lib/make-iso9660-image.nix b/nixpkgs/nixos/lib/make-iso9660-image.nix
new file mode 100644
index 000000000000..2f7dcf519a16
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-iso9660-image.nix
@@ -0,0 +1,65 @@
+{ stdenv, closureInfo, xorriso, syslinux, libossp_uuid
+
+, # The file name of the resulting ISO image.
+  isoName ? "cd.iso"
+
+, # The files and directories to be placed in the ISO file system.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents
+
+, # In addition to `contents', the closure of the store paths listed
+  # in `storeContents' are also placed in the Nix store of the CD.
+  # This is a list of attribute sets {object, symlink} where `object'
+  # is a store path whose closure will be copied, and `symlink' is a
+  # symlink to `object' that will be added to the CD.
+  storeContents ? []
+
+, # Whether this should be an El-Torito bootable CD.
+  bootable ? false
+
+, # Whether this should be an efi-bootable El-Torito CD.
+  efiBootable ? false
+
+, # Whether this should be an hybrid CD (bootable from USB as well as CD).
+  usbBootable ? false
+
+, # The path (in the ISO file system) of the boot image.
+  bootImage ? ""
+
+, # The path (in the ISO file system) of the efi boot image.
+  efiBootImage ? ""
+
+, # The path (outside the ISO file system) of the isohybrid-mbr image.
+  isohybridMbrImage ? ""
+
+, # Whether to compress the resulting ISO image with zstd.
+  compressImage ? false, zstd
+
+, # The volume ID.
+  volumeID ? ""
+}:
+
+assert bootable -> bootImage != "";
+assert efiBootable -> efiBootImage != "";
+assert usbBootable -> isohybridMbrImage != "";
+
+stdenv.mkDerivation {
+  name = isoName;
+  __structuredAttrs = true;
+
+  buildCommandPath = ./make-iso9660-image.sh;
+  nativeBuildInputs = [ xorriso syslinux zstd libossp_uuid ];
+
+  inherit isoName bootable bootImage compressImage volumeID efiBootImage efiBootable isohybridMbrImage usbBootable;
+
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+
+  objects = map (x: x.object) storeContents;
+  symlinks = map (x: x.symlink) storeContents;
+
+  # For obtaining the closure of `storeContents'.
+  closureInfo = closureInfo { rootPaths = map (x: x.object) storeContents; };
+}
diff --git a/nixpkgs/nixos/lib/make-iso9660-image.sh b/nixpkgs/nixos/lib/make-iso9660-image.sh
new file mode 100644
index 000000000000..34febe9cfe0e
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-iso9660-image.sh
@@ -0,0 +1,130 @@
+# Remove the initial slash from a path, since genisofs likes it that way.
+stripSlash() {
+    res="$1"
+    if test "${res:0:1}" = /; then res=${res:1}; fi
+}
+
+# Escape potential equal signs (=) with backslash (\=)
+escapeEquals() {
+    echo "$1" | sed -e 's/\\/\\\\/g' -e 's/=/\\=/g'
+}
+
+# Queues an file/directory to be placed on the ISO.
+# An entry consists of a local source path (2) and
+# a destination path on the ISO (1).
+addPath() {
+    target="$1"
+    source="$2"
+    echo "$(escapeEquals "$target")=$(escapeEquals "$source")" >> pathlist
+}
+
+stripSlash "$bootImage"; bootImage="$res"
+
+
+if test -n "$bootable"; then
+
+    # The -boot-info-table option modifies the $bootImage file, so
+    # find it in `contents' and make a copy of it (since the original
+    # is read-only in the Nix store...).
+    for ((i = 0; i < ${#targets[@]}; i++)); do
+        stripSlash "${targets[$i]}"
+        if test "$res" = "$bootImage"; then
+            echo "copying the boot image ${sources[$i]}"
+            cp "${sources[$i]}" boot.img
+            chmod u+w boot.img
+            sources[$i]=boot.img
+        fi
+    done
+
+    isoBootFlags="-eltorito-boot ${bootImage}
+                  -eltorito-catalog .boot.cat
+                  -no-emul-boot -boot-load-size 4 -boot-info-table
+                  --sort-weight 1 /isolinux" # Make sure isolinux is near the beginning of the ISO
+fi
+
+if test -n "$usbBootable"; then
+    usbBootFlags="-isohybrid-mbr ${isohybridMbrImage}"
+fi
+
+if test -n "$efiBootable"; then
+    efiBootFlags="-eltorito-alt-boot
+                  -e $efiBootImage
+                  -no-emul-boot
+                  -isohybrid-gpt-basdat"
+fi
+
+touch pathlist
+
+
+# Add the individual files.
+for ((i = 0; i < ${#targets[@]}; i++)); do
+    stripSlash "${targets[$i]}"
+    addPath "$res" "${sources[$i]}"
+done
+
+
+# Add the closures of the top-level store objects.
+for i in $(< $closureInfo/store-paths); do
+    addPath "${i:1}" "$i"
+done
+
+
+# Also include a manifest of the closures in a format suitable for
+# nix-store --load-db.
+if [[ ${#objects[*]} != 0 ]]; then
+    cp $closureInfo/registration nix-path-registration
+    addPath "nix-path-registration" "nix-path-registration"
+fi
+
+
+# Add symlinks to the top-level store objects.
+for ((n = 0; n < ${#objects[*]}; n++)); do
+    object=${objects[$n]}
+    symlink=${symlinks[$n]}
+    if test "$symlink" != "none"; then
+        mkdir -p $(dirname ./$symlink)
+        ln -s $object ./$symlink
+        addPath "$symlink" "./$symlink"
+    fi
+done
+
+mkdir -p $out/iso
+
+# daed2280-b91e-42c0-aed6-82c825ca41f3 is an arbitrary namespace, to prevent
+# independent applications from generating the same UUID for the same value.
+# (the chance of that being problematic seem pretty slim here, but that's how
+# version-5 UUID's work)
+xorriso="xorriso
+ -boot_image any gpt_disk_guid=$(uuid -v 5 daed2280-b91e-42c0-aed6-82c825ca41f3 $out | tr -d -)
+ -volume_date all_file_dates =$SOURCE_DATE_EPOCH
+ -as mkisofs
+ -iso-level 3
+ -volid ${volumeID}
+ -appid nixos
+ -publisher nixos
+ -graft-points
+ -full-iso9660-filenames
+ -joliet
+ ${isoBootFlags}
+ ${usbBootFlags}
+ ${efiBootFlags}
+ -r
+ -path-list pathlist
+ --sort-weight 0 /
+"
+
+$xorriso -output $out/iso/$isoName
+
+if test -n "$compressImage"; then
+    echo "Compressing image..."
+    zstd -T$NIX_BUILD_CORES --rm $out/iso/$isoName
+fi
+
+mkdir -p $out/nix-support
+echo $system > $out/nix-support/system
+
+if test -n "$compressImage"; then
+    echo "file iso $out/iso/$isoName.zst" >> $out/nix-support/hydra-build-products
+else
+    echo "file iso $out/iso/$isoName" >> $out/nix-support/hydra-build-products
+fi
diff --git a/nixpkgs/nixos/lib/make-multi-disk-zfs-image.nix b/nixpkgs/nixos/lib/make-multi-disk-zfs-image.nix
new file mode 100644
index 000000000000..077bb8f22707
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-multi-disk-zfs-image.nix
@@ -0,0 +1,330 @@
+# Note: This is a private API, internal to NixOS. Its interface is subject
+# to change without notice.
+#
+# The result of this builder is two disk images:
+#
+#  * `boot` - a small disk formatted with FAT to be used for /boot. FAT is
+#    chosen to support EFI.
+#  * `root` - a larger disk with a zpool taking the entire disk.
+#
+# This two-disk approach is taken to satisfy ZFS's requirements for
+# autoexpand.
+#
+# # Why doesn't autoexpand work with ZFS in a partition?
+#
+# When ZFS owns the whole disk doesn’t really use a partition: it has
+# a marker partition at the start and a marker partition at the end of
+# the disk.
+#
+# If ZFS is constrained to a partition, ZFS leaves expanding the partition
+# up to the user. Obviously, the user may not choose to do so.
+#
+# Once the user expands the partition, calling zpool online -e expands the
+# vdev to use the whole partition. It doesn’t happen automatically
+# presumably because zed doesn’t get an event saying it’s partition grew,
+# whereas it can and does get an event saying the whole disk it is on is
+# now larger.
+{ lib
+, pkgs
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # size of the FAT boot disk, in megabytes.
+  bootSize ? 1024
+
+, # The size of the root disk, in megabytes.
+  rootSize ? 2048
+
+, # The name of the ZFS pool
+  rootPoolName ? "tank"
+
+, # zpool properties
+  rootPoolProperties ? {
+    autoexpand = "on";
+  }
+, # pool-wide filesystem properties
+  rootPoolFilesystemProperties ? {
+    acltype = "posixacl";
+    atime = "off";
+    compression = "on";
+    mountpoint = "legacy";
+    xattr = "sa";
+  }
+
+, # datasets, with per-attribute options:
+  # mount: (optional) mount point in the VM
+  # properties: (optional) ZFS properties on the dataset, like filesystemProperties
+  # Notes:
+  # 1. datasets will be created from shorter to longer names as a simple topo-sort
+  # 2. you should define a root's dataset's mount for `/`
+  datasets ? { }
+
+, # The files and directories to be placed in the target file system.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents ? []
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix. This configuration will be embedded
+  # inside a configuration which includes the described ZFS fileSystems.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+, # Guest memory size
+  memSize ? 1024
+
+, name ? "nixos-disk-image"
+
+, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
+  format ? "raw"
+
+, # Include a copy of Nixpkgs in the disk image
+  includeChannel ? true
+}:
+let
+  formatOpt = if format == "qcow2-compressed" then "qcow2" else format;
+
+  compress = lib.optionalString (format == "qcow2-compressed") "-c";
+
+  filenameSuffix = "." + {
+    qcow2 = "qcow2";
+    vdi = "vdi";
+    vpc = "vhd";
+    raw = "img";
+  }.${formatOpt} or formatOpt;
+  bootFilename = "nixos.boot${filenameSuffix}";
+  rootFilename = "nixos.root${filenameSuffix}";
+
+  # FIXME: merge with channel.nix / make-channel.nix.
+  channelSources =
+    let
+      nixpkgs = lib.cleanSource pkgs.path;
+    in
+      pkgs.runCommand "nixos-${config.system.nixos.version}" {} ''
+        mkdir -p $out
+        cp -prd ${nixpkgs.outPath} $out/nixos
+        chmod -R u+w $out/nixos
+        if [ ! -e $out/nixos/nixpkgs ]; then
+          ln -s . $out/nixos/nixpkgs
+        fi
+        rm -rf $out/nixos/.git
+        echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+      '';
+
+  closureInfo = pkgs.closureInfo {
+    rootPaths = [ config.system.build.toplevel ]
+    ++ (lib.optional includeChannel channelSources);
+  };
+
+  modulesTree = pkgs.aggregateModules
+    (with config.boot.kernelPackages; [ kernel zfs ]);
+
+  tools = lib.makeBinPath (
+    with pkgs; [
+      config.system.build.nixos-enter
+      config.system.build.nixos-install
+      dosfstools
+      e2fsprogs
+      gptfdisk
+      nix
+      parted
+      util-linux
+      zfs
+    ]
+  );
+
+  hasDefinedMount  = disk: ((disk.mount or null) != null);
+
+  stringifyProperties = prefix: properties: lib.concatStringsSep " \\\n" (
+    lib.mapAttrsToList
+      (
+        property: value: "${prefix} ${lib.escapeShellArg property}=${lib.escapeShellArg value}"
+      )
+      properties
+  );
+
+  createDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      sorted = lib.sort (left: right: (lib.stringLength left.name) < (lib.stringLength right.name)) datasetlist;
+      cmd = { name, value }:
+        let
+          properties = stringifyProperties "-o" (value.properties or {});
+        in
+          "zfs create -p ${properties} ${name}";
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+  mountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) < (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          mkdir -p /mnt${lib.escapeShellArg value.mount}
+          mount -t zfs ${name} /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+  unmountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) > (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          umount /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+      lib.concatMapStringsSep "\n" cmd sorted;
+
+
+  fileSystemsCfgFile =
+    let
+      mountable = lib.filterAttrs (_: value: hasDefinedMount value) datasets;
+    in
+      pkgs.runCommand "filesystem-config.nix" {
+        buildInputs = with pkgs; [ jq nixpkgs-fmt ];
+        filesystems = builtins.toJSON {
+          fileSystems = lib.mapAttrs'
+            (
+              dataset: attrs:
+                {
+                  name = attrs.mount;
+                  value = {
+                    fsType = "zfs";
+                    device = "${dataset}";
+                  };
+                }
+            )
+            mountable;
+        };
+        passAsFile = [ "filesystems" ];
+      } ''
+      (
+        echo "builtins.fromJSON '''"
+        jq . < "$filesystemsPath"
+        echo "'''"
+      ) > $out
+
+      nixpkgs-fmt $out
+    '';
+
+  mergedConfig =
+    if configFile == null
+    then fileSystemsCfgFile
+    else
+      pkgs.runCommand "configuration.nix" {
+        buildInputs = with pkgs; [ nixpkgs-fmt ];
+      }
+        ''
+          (
+            echo '{ imports = ['
+            printf "(%s)\n" "$(cat ${fileSystemsCfgFile})";
+            printf "(%s)\n" "$(cat ${configFile})";
+            echo ']; }'
+          ) > $out
+
+          nixpkgs-fmt $out
+        '';
+
+  image = (
+    pkgs.vmTools.override {
+      rootModules =
+        [ "zfs" "9p" "9pnet_virtio" "virtio_pci" "virtio_blk" ] ++
+          (pkgs.lib.optional pkgs.stdenv.hostPlatform.isx86 "rtc_cmos");
+      kernel = modulesTree;
+    }
+  ).runInLinuxVM (
+    pkgs.runCommand name
+      {
+        QEMU_OPTS = "-drive file=$bootDiskImage,if=virtio,cache=unsafe,werror=report"
+         + " -drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
+         inherit memSize;
+        preVM = ''
+          PATH=$PATH:${pkgs.qemu_kvm}/bin
+          mkdir $out
+          bootDiskImage=boot.raw
+          qemu-img create -f raw $bootDiskImage ${toString bootSize}M
+
+          rootDiskImage=root.raw
+          qemu-img create -f raw $rootDiskImage ${toString rootSize}M
+        '';
+
+        postVM = ''
+          ${if formatOpt == "raw" then ''
+          mv $bootDiskImage $out/${bootFilename}
+          mv $rootDiskImage $out/${rootFilename}
+        '' else ''
+          ${pkgs.qemu_kvm}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $bootDiskImage $out/${bootFilename}
+          ${pkgs.qemu_kvm}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $rootDiskImage $out/${rootFilename}
+        ''}
+          bootDiskImage=$out/${bootFilename}
+          rootDiskImage=$out/${rootFilename}
+          set -x
+          ${postVM}
+        '';
+      } ''
+      export PATH=${tools}:$PATH
+      set -x
+
+      cp -sv /dev/vda /dev/sda
+      cp -sv /dev/vda /dev/xvda
+
+      parted --script /dev/vda -- \
+        mklabel gpt \
+        mkpart no-fs 1MiB 2MiB \
+        set 1 bios_grub on \
+        align-check optimal 1 \
+        mkpart ESP fat32 2MiB -1MiB \
+        align-check optimal 2 \
+        print
+
+      sfdisk --dump /dev/vda
+
+
+      zpool create \
+        ${stringifyProperties "  -o" rootPoolProperties} \
+        ${stringifyProperties "  -O" rootPoolFilesystemProperties} \
+        ${rootPoolName} /dev/vdb
+      parted --script /dev/vdb -- print
+
+      ${createDatasets}
+      ${mountDatasets}
+
+      mkdir -p /mnt/boot
+      mkfs.vfat -n ESP /dev/vda2
+      mount /dev/vda2 /mnt/boot
+
+      mount
+
+      # Install a configuration.nix
+      mkdir -p /mnt/etc/nixos
+      # `cat` so it is mutable on the fs
+      cat ${mergedConfig} > /mnt/etc/nixos/configuration.nix
+
+      export NIX_STATE_DIR=$TMPDIR/state
+      nix-store --load-db < ${closureInfo}/registration
+
+      nixos-install \
+        --root /mnt \
+        --no-root-passwd \
+        --system ${config.system.build.toplevel} \
+        --substituters "" \
+        ${lib.optionalString includeChannel ''--channel ${channelSources}''}
+
+      df -h
+
+      umount /mnt/boot
+      ${unmountDatasets}
+
+      zpool export ${rootPoolName}
+    ''
+  );
+in
+image
diff --git a/nixpkgs/nixos/lib/make-options-doc/default.nix b/nixpkgs/nixos/lib/make-options-doc/default.nix
new file mode 100644
index 000000000000..99515b5b8276
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-options-doc/default.nix
@@ -0,0 +1,175 @@
+/* Generate JSON, XML and DocBook documentation for given NixOS options.
+
+   Minimal example:
+
+    { pkgs,  }:
+
+    let
+      eval = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+        baseModules = [
+          ../module.nix
+        ];
+        modules = [];
+      };
+    in pkgs.nixosOptionsDoc {
+      options = eval.options;
+    }
+
+*/
+{ pkgs
+, lib
+, options
+, transformOptions ? lib.id  # function for additional transformations of the options
+, documentType ? "appendix" # TODO deprecate "appendix" in favor of "none"
+                            #      and/or rename function to moduleOptionDoc for clean slate
+
+  # If you include more than one option list into a document, you need to
+  # provide different ids.
+, variablelistId ? "configuration-variable-list"
+  # String to prefix to the option XML/HTML id attributes.
+, optionIdPrefix ? "opt-"
+, revision ? "" # Specify revision for the options
+# a set of options the docs we are generating will be merged into, as if by recursiveUpdate.
+# used to split the options doc build into a static part (nixos/modules) and a dynamic part
+# (non-nixos modules imported via configuration.nix, other module sources).
+, baseOptionsJSON ? null
+# instead of printing warnings for eg options with missing descriptions (which may be lost
+# by nix build unless -L is given), emit errors instead and fail the build
+, warningsAreErrors ? true
+# allow docbook option docs if `true`. only markdown documentation is allowed when set to
+# `false`, and a different renderer may be used with different bugs and performance
+# characteristics but (hopefully) indistinguishable output.
+# deprecated since 23.11.
+# TODO remove in a while.
+, allowDocBook ? false
+# whether lib.mdDoc is required for descriptions to be read as markdown.
+# deprecated since 23.11.
+# TODO remove in a while.
+, markdownByDefault ? true
+}:
+
+assert markdownByDefault && ! allowDocBook;
+
+let
+  rawOpts = lib.optionAttrSetToDocList options;
+  transformedOpts = map transformOptions rawOpts;
+  filteredOpts = lib.filter (opt: opt.visible && !opt.internal) transformedOpts;
+  optionsList = lib.flip map filteredOpts
+   (opt: opt
+    // lib.optionalAttrs (opt ? relatedPackages && opt.relatedPackages != []) { relatedPackages = genRelatedPackages opt.relatedPackages opt.name; }
+   );
+
+  # Generate DocBook documentation for a list of packages. This is
+  # what `relatedPackages` option of `mkOption` from
+  # ../../../lib/options.nix influences.
+  #
+  # Each element of `relatedPackages` can be either
+  # - a string:  that will be interpreted as an attribute name from `pkgs` and turned into a link
+  #              to search.nixos.org,
+  # - a list:    that will be interpreted as an attribute path from `pkgs` and turned into a link
+  #              to search.nixos.org,
+  # - an attrset: that can specify `name`, `path`, `comment`
+  #   (either of `name`, `path` is required, the rest are optional).
+  #
+  # NOTE: No checks against `pkgs` are made to ensure that the referenced package actually exists.
+  # Such checks are not compatible with option docs caching.
+  genRelatedPackages = packages: optName:
+    let
+      unpack = p: if lib.isString p then { name = p; }
+                  else if lib.isList p then { path = p; }
+                  else p;
+      describe = args:
+        let
+          title = args.title or null;
+          name = args.name or (lib.concatStringsSep "." args.path);
+        in ''
+          - [${lib.optionalString (title != null) "${title} aka "}`pkgs.${name}`](
+              https://search.nixos.org/packages?show=${name}&sort=relevance&query=${name}
+            )${
+              lib.optionalString (args ? comment) "\n\n  ${args.comment}"
+            }
+        '';
+    in lib.concatMapStrings (p: describe (unpack p)) packages;
+
+  optionsNix = builtins.listToAttrs (map (o: { name = o.name; value = removeAttrs o ["name" "visible" "internal"]; }) optionsList);
+
+in rec {
+  inherit optionsNix;
+
+  optionsAsciiDoc = pkgs.runCommand "options.adoc" {
+    nativeBuildInputs = [ pkgs.nixos-render-docs ];
+  } ''
+    nixos-render-docs -j $NIX_BUILD_CORES options asciidoc \
+      --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
+      --revision ${lib.escapeShellArg revision} \
+      ${optionsJSON}/share/doc/nixos/options.json \
+      $out
+  '';
+
+  optionsCommonMark = pkgs.runCommand "options.md" {
+    nativeBuildInputs = [ pkgs.nixos-render-docs ];
+  } ''
+    nixos-render-docs -j $NIX_BUILD_CORES options commonmark \
+      --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
+      --revision ${lib.escapeShellArg revision} \
+      ${optionsJSON}/share/doc/nixos/options.json \
+      $out
+  '';
+
+  optionsJSON = pkgs.runCommand "options.json"
+    { meta.description = "List of NixOS options in JSON format";
+      nativeBuildInputs = [
+        pkgs.brotli
+        pkgs.python3Minimal
+      ];
+      options = builtins.toFile "options.json"
+        (builtins.unsafeDiscardStringContext (builtins.toJSON optionsNix));
+      # merge with an empty set if baseOptionsJSON is null to run markdown
+      # processing on the input options
+      baseJSON =
+        if baseOptionsJSON == null
+        then builtins.toFile "base.json" "{}"
+        else baseOptionsJSON;
+    }
+    ''
+      # Export list of options in different format.
+      dst=$out/share/doc/nixos
+      mkdir -p $dst
+
+      TOUCH_IF_DB=$dst/.used-docbook \
+      python ${./mergeJSON.py} \
+        ${lib.optionalString warningsAreErrors "--warnings-are-errors"} \
+        $baseJSON $options \
+        > $dst/options.json
+
+    if grep /nixpkgs/nixos/modules $dst/options.json; then
+      echo "The manual appears to depend on the location of Nixpkgs, which is bad"
+      echo "since this prevents sharing via the NixOS channel.  This is typically"
+      echo "caused by an option default that refers to a relative path (see above"
+      echo "for hints about the offending path)."
+      exit 1
+    fi
+
+      brotli -9 < $dst/options.json > $dst/options.json.br
+
+      mkdir -p $out/nix-support
+      echo "file json $dst/options.json" >> $out/nix-support/hydra-build-products
+      echo "file json-br $dst/options.json.br" >> $out/nix-support/hydra-build-products
+    '';
+
+  optionsDocBook = lib.warn "optionsDocBook is deprecated since 23.11 and will be removed in 24.05"
+    (pkgs.runCommand "options-docbook.xml" {
+      nativeBuildInputs = [
+        pkgs.nixos-render-docs
+      ];
+    } ''
+      nixos-render-docs -j $NIX_BUILD_CORES options docbook \
+        --manpage-urls ${pkgs.path + "/doc/manpage-urls.json"} \
+        --revision ${lib.escapeShellArg revision} \
+        --document-type ${lib.escapeShellArg documentType} \
+        --varlist-id ${lib.escapeShellArg variablelistId} \
+        --id-prefix ${lib.escapeShellArg optionIdPrefix} \
+        ${optionsJSON}/share/doc/nixos/options.json \
+        "$out"
+    '');
+}
diff --git a/nixpkgs/nixos/lib/make-options-doc/mergeJSON.py b/nixpkgs/nixos/lib/make-options-doc/mergeJSON.py
new file mode 100644
index 000000000000..4be83fcb827b
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-options-doc/mergeJSON.py
@@ -0,0 +1,104 @@
+import collections
+import json
+import os
+import sys
+from typing import Any, Dict, List
+
+JSON = Dict[str, Any]
+
+class Key:
+    def __init__(self, path: List[str]):
+        self.path = path
+    def __hash__(self):
+        result = 0
+        for id in self.path:
+            result ^= hash(id)
+        return result
+    def __eq__(self, other):
+        return type(self) is type(other) and self.path == other.path
+
+Option = collections.namedtuple('Option', ['name', 'value'])
+
+# pivot a dict of options keyed by their display name to a dict keyed by their path
+def pivot(options: Dict[str, JSON]) -> Dict[Key, Option]:
+    result: Dict[Key, Option] = dict()
+    for (name, opt) in options.items():
+        result[Key(opt['loc'])] = Option(name, opt)
+    return result
+
+# pivot back to indexed-by-full-name
+# like the docbook build we'll just fail if multiple options with differing locs
+# render to the same option name.
+def unpivot(options: Dict[Key, Option]) -> Dict[str, JSON]:
+    result: Dict[str, Dict] = dict()
+    for (key, opt) in options.items():
+        if opt.name in result:
+            raise RuntimeError(
+                'multiple options with colliding ids found',
+                opt.name,
+                result[opt.name]['loc'],
+                opt.value['loc'],
+            )
+        result[opt.name] = opt.value
+    return result
+
+warningsAreErrors = False
+optOffset = 0
+for arg in sys.argv[1:]:
+    if arg == "--warnings-are-errors":
+        optOffset += 1
+        warningsAreErrors = True
+
+options = pivot(json.load(open(sys.argv[1 + optOffset], 'r')))
+overrides = pivot(json.load(open(sys.argv[2 + optOffset], 'r')))
+
+# fix up declaration paths in lazy options, since we don't eval them from a full nixpkgs dir
+for (k, v) in options.items():
+    # The _module options are not declared in nixos/modules
+    if v.value['loc'][0] != "_module":
+        v.value['declarations'] = list(map(lambda s: f'nixos/modules/{s}' if isinstance(s, str) else s, v.value['declarations']))
+
+# merge both descriptions
+for (k, v) in overrides.items():
+    cur = options.setdefault(k, v).value
+    for (ok, ov) in v.value.items():
+        if ok == 'declarations':
+            decls = cur[ok]
+            for d in ov:
+                if d not in decls:
+                    decls += [d]
+        elif ok == "type":
+            # ignore types of placeholder options
+            if ov != "_unspecified" or cur[ok] == "_unspecified":
+                cur[ok] = ov
+        elif ov is not None or cur.get(ok, None) is None:
+            cur[ok] = ov
+
+severity = "error" if warningsAreErrors else "warning"
+
+# check that every option has a description
+hasWarnings = False
+hasErrors = False
+for (k, v) in options.items():
+    if v.value.get('description', None) is None:
+        hasWarnings = True
+        print(f"\x1b[1;31m{severity}: option {v.name} has no description\x1b[0m", file=sys.stderr)
+        v.value['description'] = "This option has no description."
+    if v.value.get('type', "unspecified") == "unspecified":
+        hasWarnings = True
+        print(
+            f"\x1b[1;31m{severity}: option {v.name} has no type. Please specify a valid type, see " +
+            "https://nixos.org/manual/nixos/stable/index.html#sec-option-types\x1b[0m", file=sys.stderr)
+
+if hasErrors:
+    sys.exit(1)
+if hasWarnings and warningsAreErrors:
+    print(
+        "\x1b[1;31m" +
+        "Treating warnings as errors. Set documentation.nixos.options.warningsAreErrors " +
+        "to false to ignore these warnings." +
+        "\x1b[0m",
+        file=sys.stderr)
+    sys.exit(1)
+
+json.dump(unpivot(options), fp=sys.stdout)
diff --git a/nixpkgs/nixos/lib/make-single-disk-zfs-image.nix b/nixpkgs/nixos/lib/make-single-disk-zfs-image.nix
new file mode 100644
index 000000000000..a3564f9a8b68
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-single-disk-zfs-image.nix
@@ -0,0 +1,313 @@
+# Note: This is a private API, internal to NixOS. Its interface is subject
+# to change without notice.
+#
+# The result of this builder is a single disk image, partitioned like this:
+#
+#  * partition #1: a very small, 1MiB partition to leave room for Grub.
+#
+#  * partition #2: boot, a partition formatted with FAT to be used for /boot.
+#      FAT is chosen to support EFI.
+#
+#  * partition #3: nixos, a partition dedicated to a zpool.
+#
+# This single-disk approach does not satisfy ZFS's requirements for autoexpand,
+# however automation can expand it anyway. For example, with
+# `services.zfs.expandOnBoot`.
+{ lib
+, pkgs
+, # The NixOS configuration to be installed onto the disk image.
+  config
+
+, # size of the FAT partition, in megabytes.
+  bootSize ? 1024
+
+, # The size of the root partition, in megabytes.
+  rootSize ? 2048
+
+, # The name of the ZFS pool
+  rootPoolName ? "tank"
+
+, # zpool properties
+  rootPoolProperties ? {
+    autoexpand = "on";
+  }
+, # pool-wide filesystem properties
+  rootPoolFilesystemProperties ? {
+    acltype = "posixacl";
+    atime = "off";
+    compression = "on";
+    mountpoint = "legacy";
+    xattr = "sa";
+  }
+
+, # datasets, with per-attribute options:
+  # mount: (optional) mount point in the VM
+  # properties: (optional) ZFS properties on the dataset, like filesystemProperties
+  # Notes:
+  # 1. datasets will be created from shorter to longer names as a simple topo-sort
+  # 2. you should define a root's dataset's mount for `/`
+  datasets ? { }
+
+, # The files and directories to be placed in the target file system.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents ? [ ]
+
+, # The initial NixOS configuration file to be copied to
+  # /etc/nixos/configuration.nix. This configuration will be embedded
+  # inside a configuration which includes the described ZFS fileSystems.
+  configFile ? null
+
+, # Shell code executed after the VM has finished.
+  postVM ? ""
+
+, name ? "nixos-disk-image"
+
+, # Disk image format, one of qcow2, qcow2-compressed, vdi, vpc, raw.
+  format ? "raw"
+
+, # Include a copy of Nixpkgs in the disk image
+  includeChannel ? true
+}:
+let
+  formatOpt = if format == "qcow2-compressed" then "qcow2" else format;
+
+  compress = lib.optionalString (format == "qcow2-compressed") "-c";
+
+  filenameSuffix = "." + {
+    qcow2 = "qcow2";
+    vdi = "vdi";
+    vpc = "vhd";
+    raw = "img";
+  }.${formatOpt} or formatOpt;
+  rootFilename = "nixos.root${filenameSuffix}";
+
+  # FIXME: merge with channel.nix / make-channel.nix.
+  channelSources =
+    let
+      nixpkgs = lib.cleanSource pkgs.path;
+    in
+    pkgs.runCommand "nixos-${config.system.nixos.version}" { } ''
+      mkdir -p $out
+      cp -prd ${nixpkgs.outPath} $out/nixos
+      chmod -R u+w $out/nixos
+      if [ ! -e $out/nixos/nixpkgs ]; then
+        ln -s . $out/nixos/nixpkgs
+      fi
+      rm -rf $out/nixos/.git
+      echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+    '';
+
+  closureInfo = pkgs.closureInfo {
+    rootPaths = [ config.system.build.toplevel ]
+      ++ (lib.optional includeChannel channelSources);
+  };
+
+  modulesTree = pkgs.aggregateModules
+    (with config.boot.kernelPackages; [ kernel zfs ]);
+
+  tools = lib.makeBinPath (
+    with pkgs; [
+      config.system.build.nixos-enter
+      config.system.build.nixos-install
+      dosfstools
+      e2fsprogs
+      gptfdisk
+      nix
+      parted
+      util-linux
+      zfs
+    ]
+  );
+
+  hasDefinedMount = disk: ((disk.mount or null) != null);
+
+  stringifyProperties = prefix: properties: lib.concatStringsSep " \\\n" (
+    lib.mapAttrsToList
+      (
+        property: value: "${prefix} ${lib.escapeShellArg property}=${lib.escapeShellArg value}"
+      )
+      properties
+  );
+
+  createDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      sorted = lib.sort (left: right: (lib.stringLength left.name) < (lib.stringLength right.name)) datasetlist;
+      cmd = { name, value }:
+        let
+          properties = stringifyProperties "-o" (value.properties or { });
+        in
+        "zfs create -p ${properties} ${name}";
+    in
+    lib.concatMapStringsSep "\n" cmd sorted;
+
+  mountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) < (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          mkdir -p /mnt${lib.escapeShellArg value.mount}
+          mount -t zfs ${name} /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+    lib.concatMapStringsSep "\n" cmd sorted;
+
+  unmountDatasets =
+    let
+      datasetlist = lib.mapAttrsToList lib.nameValuePair datasets;
+      mounts = lib.filter ({ value, ... }: hasDefinedMount value) datasetlist;
+      sorted = lib.sort (left: right: (lib.stringLength left.value.mount) > (lib.stringLength right.value.mount)) mounts;
+      cmd = { name, value }:
+        ''
+          umount /mnt${lib.escapeShellArg value.mount}
+        '';
+    in
+    lib.concatMapStringsSep "\n" cmd sorted;
+
+
+  fileSystemsCfgFile =
+    let
+      mountable = lib.filterAttrs (_: value: hasDefinedMount value) datasets;
+    in
+    pkgs.runCommand "filesystem-config.nix"
+      {
+        buildInputs = with pkgs; [ jq nixpkgs-fmt ];
+        filesystems = builtins.toJSON {
+          fileSystems = lib.mapAttrs'
+            (
+              dataset: attrs:
+                {
+                  name = attrs.mount;
+                  value = {
+                    fsType = "zfs";
+                    device = "${dataset}";
+                  };
+                }
+            )
+            mountable;
+        };
+        passAsFile = [ "filesystems" ];
+      } ''
+      (
+        echo "builtins.fromJSON '''"
+        jq . < "$filesystemsPath"
+        echo "'''"
+      ) > $out
+
+      nixpkgs-fmt $out
+    '';
+
+  mergedConfig =
+    if configFile == null
+    then fileSystemsCfgFile
+    else
+      pkgs.runCommand "configuration.nix"
+        {
+          buildInputs = with pkgs; [ nixpkgs-fmt ];
+        }
+        ''
+          (
+            echo '{ imports = ['
+            printf "(%s)\n" "$(cat ${fileSystemsCfgFile})";
+            printf "(%s)\n" "$(cat ${configFile})";
+            echo ']; }'
+          ) > $out
+
+          nixpkgs-fmt $out
+        '';
+
+  image = (
+    pkgs.vmTools.override {
+      rootModules =
+        [ "zfs" "9p" "9pnet_virtio" "virtio_pci" "virtio_blk" ] ++
+        (pkgs.lib.optional pkgs.stdenv.hostPlatform.isx86 "rtc_cmos");
+      kernel = modulesTree;
+    }
+  ).runInLinuxVM (
+    pkgs.runCommand name
+      {
+        memSize = 1024;
+        QEMU_OPTS = "-drive file=$rootDiskImage,if=virtio,cache=unsafe,werror=report";
+        preVM = ''
+          PATH=$PATH:${pkgs.qemu_kvm}/bin
+          mkdir $out
+
+          rootDiskImage=root.raw
+          qemu-img create -f raw $rootDiskImage ${toString (bootSize + rootSize)}M
+        '';
+
+        postVM = ''
+            ${if formatOpt == "raw" then ''
+            mv $rootDiskImage $out/${rootFilename}
+          '' else ''
+            ${pkgs.qemu_kvm}/bin/qemu-img convert -f raw -O ${formatOpt} ${compress} $rootDiskImage $out/${rootFilename}
+          ''}
+            rootDiskImage=$out/${rootFilename}
+            set -x
+            ${postVM}
+        '';
+      } ''
+      export PATH=${tools}:$PATH
+      set -x
+
+      cp -sv /dev/vda /dev/sda
+      cp -sv /dev/vda /dev/xvda
+
+      parted --script /dev/vda -- \
+        mklabel gpt \
+        mkpart no-fs 1MiB 2MiB \
+        set 1 bios_grub on \
+        align-check optimal 1 \
+        mkpart primary fat32 2MiB ${toString bootSize}MiB \
+        align-check optimal 2 \
+        mkpart primary fat32 ${toString bootSize}MiB -1MiB \
+        align-check optimal 3 \
+        print
+
+      sfdisk --dump /dev/vda
+
+
+      zpool create \
+        ${stringifyProperties "  -o" rootPoolProperties} \
+        ${stringifyProperties "  -O" rootPoolFilesystemProperties} \
+        ${rootPoolName} /dev/vda3
+      parted --script /dev/vda -- print
+
+      ${createDatasets}
+      ${mountDatasets}
+
+      mkdir -p /mnt/boot
+      mkfs.vfat -n ESP /dev/vda2
+      mount /dev/vda2 /mnt/boot
+
+      mount
+
+      # Install a configuration.nix
+      mkdir -p /mnt/etc/nixos
+      # `cat` so it is mutable on the fs
+      cat ${mergedConfig} > /mnt/etc/nixos/configuration.nix
+
+      export NIX_STATE_DIR=$TMPDIR/state
+      nix-store --load-db < ${closureInfo}/registration
+
+      nixos-install \
+        --root /mnt \
+        --no-root-passwd \
+        --system ${config.system.build.toplevel} \
+        --substituters "" \
+        ${lib.optionalString includeChannel ''--channel ${channelSources}''}
+
+      df -h
+
+      umount /mnt/boot
+      ${unmountDatasets}
+
+      zpool export ${rootPoolName}
+    ''
+  );
+in
+image
diff --git a/nixpkgs/nixos/lib/make-squashfs.nix b/nixpkgs/nixos/lib/make-squashfs.nix
new file mode 100644
index 000000000000..4b6b56739948
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-squashfs.nix
@@ -0,0 +1,45 @@
+{ lib, stdenv, squashfsTools, closureInfo
+
+,  fileName ? "squashfs"
+, # The root directory of the squashfs filesystem is filled with the
+  # closures of the Nix store paths listed here.
+  storeContents ? []
+  # Pseudo files to be added to squashfs image
+, pseudoFiles ? []
+, noStrip ? false
+, # Compression parameters.
+  # For zstd compression you can use "zstd -Xcompression-level 6".
+  comp ? "xz -Xdict-size 100%"
+}:
+
+let
+  pseudoFilesArgs = lib.concatMapStrings (f: ''-p "${f}" '') pseudoFiles;
+in
+stdenv.mkDerivation {
+  name = "${fileName}.img";
+  __structuredAttrs = true;
+
+  nativeBuildInputs = [ squashfsTools ];
+
+  buildCommand =
+    ''
+      closureInfo=${closureInfo { rootPaths = storeContents; }}
+
+      # Also include a manifest of the closures in a format suitable
+      # for nix-store --load-db.
+      cp $closureInfo/registration nix-path-registration
+
+    '' + lib.optionalString stdenv.buildPlatform.is32bit ''
+      # 64 cores on i686 does not work
+      # fails with FATAL ERROR: mangle2:: xz compress failed with error code 5
+      if ((NIX_BUILD_CORES > 48)); then
+        NIX_BUILD_CORES=48
+      fi
+    '' + ''
+
+      # Generate the squashfs image.
+      mksquashfs nix-path-registration $(cat $closureInfo/store-paths) $out ${pseudoFilesArgs} \
+        -no-hardlinks ${lib.optionalString noStrip "-no-strip"} -keep-as-directory -all-root -b 1048576 -comp ${comp} \
+        -processors $NIX_BUILD_CORES
+    '';
+}
diff --git a/nixpkgs/nixos/lib/make-system-tarball.nix b/nixpkgs/nixos/lib/make-system-tarball.nix
new file mode 100644
index 000000000000..325792f97e8f
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-system-tarball.nix
@@ -0,0 +1,56 @@
+{ stdenv, closureInfo, pixz
+
+, # The file name of the resulting tarball
+  fileName ? "nixos-system-${stdenv.hostPlatform.system}"
+
+, # The files and directories to be placed in the tarball.
+  # This is a list of attribute sets {source, target} where `source'
+  # is the file system object (regular file or directory) to be
+  # grafted in the file system at path `target'.
+  contents
+
+, # In addition to `contents', the closure of the store paths listed
+  # in `packages' are also placed in the Nix store of the tarball.  This is
+  # a list of attribute sets {object, symlink} where `object' if a
+  # store path whose closure will be copied, and `symlink' is a
+  # symlink to `object' that will be added to the tarball.
+  storeContents ? []
+
+  # Extra commands to be executed before archiving files
+, extraCommands ? ""
+
+  # Extra tar arguments
+, extraArgs ? ""
+  # Command used for compression
+, compressCommand ? "pixz -t"
+  # Extension for the compressed tarball
+, compressionExtension ? ".xz"
+  # extra inputs, like the compressor to use
+, extraInputs ? [ pixz ]
+}:
+
+let
+  symlinks = map (x: x.symlink) storeContents;
+  objects = map (x: x.object) storeContents;
+in
+
+stdenv.mkDerivation {
+  name = "tarball";
+  builder = ./make-system-tarball.sh;
+  nativeBuildInputs = extraInputs;
+
+  inherit fileName extraArgs extraCommands compressCommand;
+
+  # !!! should use XML.
+  sources = map (x: x.source) contents;
+  targets = map (x: x.target) contents;
+
+  # !!! should use XML.
+  inherit symlinks objects;
+
+  closureInfo = closureInfo {
+    rootPaths = objects;
+  };
+
+  extension = compressionExtension;
+}
diff --git a/nixpkgs/nixos/lib/make-system-tarball.sh b/nixpkgs/nixos/lib/make-system-tarball.sh
new file mode 100644
index 000000000000..1a0017a1799a
--- /dev/null
+++ b/nixpkgs/nixos/lib/make-system-tarball.sh
@@ -0,0 +1,57 @@
+source $stdenv/setup
+
+sources_=($sources)
+targets_=($targets)
+
+objects=($objects)
+symlinks=($symlinks)
+
+
+# Remove the initial slash from a path, since genisofs likes it that way.
+stripSlash() {
+    res="$1"
+    if test "${res:0:1}" = /; then res=${res:1}; fi
+}
+
+# Add the individual files.
+for ((i = 0; i < ${#targets_[@]}; i++)); do
+    stripSlash "${targets_[$i]}"
+    mkdir -p "$(dirname "$res")"
+    cp -a "${sources_[$i]}" "$res"
+done
+
+
+# Add the closures of the top-level store objects.
+chmod +w .
+mkdir -p nix/store
+for i in $(< $closureInfo/store-paths); do
+    cp -a "$i" "${i:1}"
+done
+
+
+# TODO tar ruxo
+# Also include a manifest of the closures in a format suitable for
+# nix-store --load-db.
+cp $closureInfo/registration nix-path-registration
+
+# Add symlinks to the top-level store objects.
+for ((n = 0; n < ${#objects[*]}; n++)); do
+    object=${objects[$n]}
+    symlink=${symlinks[$n]}
+    if test "$symlink" != "none"; then
+        mkdir -p $(dirname ./$symlink)
+        ln -s $object ./$symlink
+    fi
+done
+
+$extraCommands
+
+mkdir -p $out/tarball
+
+rm env-vars
+
+time tar --sort=name --mtime='@1' --owner=0 --group=0 --numeric-owner -c * $extraArgs | $compressCommand > $out/tarball/$fileName.tar${extension}
+
+mkdir -p $out/nix-support
+echo $system > $out/nix-support/system
+echo "file system-tarball $out/tarball/$fileName.tar${extension}" > $out/nix-support/hydra-build-products
diff --git a/nixpkgs/nixos/lib/qemu-common.nix b/nixpkgs/nixos/lib/qemu-common.nix
new file mode 100644
index 000000000000..b946f62d93dc
--- /dev/null
+++ b/nixpkgs/nixos/lib/qemu-common.nix
@@ -0,0 +1,65 @@
+# QEMU-related utilities shared between various Nix expressions.
+{ lib, pkgs }:
+
+let
+  zeroPad = n:
+    lib.optionalString (n < 16) "0" +
+    (if n > 255
+    then throw "Can't have more than 255 nets or nodes!"
+    else lib.toHexString n);
+in
+
+rec {
+  qemuNicMac = net: machine: "52:54:00:12:${zeroPad net}:${zeroPad machine}";
+
+  qemuNICFlags = nic: net: machine:
+    [
+      "-device virtio-net-pci,netdev=vlan${toString nic},mac=${qemuNicMac net machine}"
+      ''-netdev vde,id=vlan${toString nic},sock="$QEMU_VDE_SOCKET_${toString net}"''
+    ];
+
+  qemuSerialDevice =
+    if with pkgs.stdenv.hostPlatform; isx86 || isLoongArch64 || isMips64 || isRiscV then "ttyS0"
+    else if (with pkgs.stdenv.hostPlatform; isAarch || isPower) then "ttyAMA0"
+    else throw "Unknown QEMU serial device for system '${pkgs.stdenv.hostPlatform.system}'";
+
+  qemuBinary = qemuPkg:
+    let
+      hostStdenv = qemuPkg.stdenv;
+      hostSystem = hostStdenv.system;
+      guestSystem = pkgs.stdenv.hostPlatform.system;
+
+      linuxHostGuestMatrix = {
+        x86_64-linux = "${qemuPkg}/bin/qemu-kvm -cpu max";
+        armv7l-linux = "${qemuPkg}/bin/qemu-system-arm -machine virt,accel=kvm:tcg -cpu max";
+        aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -machine virt,gic-version=max,accel=kvm:tcg -cpu max";
+        powerpc64le-linux = "${qemuPkg}/bin/qemu-system-ppc64 -machine powernv";
+        powerpc64-linux = "${qemuPkg}/bin/qemu-system-ppc64 -machine powernv";
+        x86_64-darwin = "${qemuPkg}/bin/qemu-kvm -cpu max";
+      };
+      otherHostGuestMatrix = {
+        aarch64-darwin = {
+          aarch64-linux = "${qemuPkg}/bin/qemu-system-aarch64 -machine virt,gic-version=2,accel=hvf:tcg -cpu max";
+          inherit (otherHostGuestMatrix.x86_64-darwin) x86_64-linux;
+        };
+        x86_64-darwin = {
+          x86_64-linux = "${qemuPkg}/bin/qemu-system-x86_64 -machine type=q35,accel=hvf:tcg -cpu max";
+        };
+      };
+
+      throwUnsupportedHostSystem =
+        let
+          supportedSystems = [ "linux" ] ++ (lib.attrNames otherHostGuestMatrix);
+        in
+        throw "Unsupported host system ${hostSystem}, supported: ${lib.concatStringsSep ", " supportedSystems}";
+      throwUnsupportedGuestSystem = guestMap:
+        throw "Unsupported guest system ${guestSystem} for host ${hostSystem}, supported: ${lib.concatStringsSep ", " (lib.attrNames guestMap)}";
+    in
+    if hostStdenv.isLinux then
+      linuxHostGuestMatrix.${guestSystem} or "${qemuPkg}/bin/qemu-kvm"
+    else
+      let
+        guestMap = (otherHostGuestMatrix.${hostSystem} or throwUnsupportedHostSystem);
+      in
+      (guestMap.${guestSystem} or (throwUnsupportedGuestSystem guestMap));
+}
diff --git a/nixpkgs/nixos/lib/systemd-lib.nix b/nixpkgs/nixos/lib/systemd-lib.nix
new file mode 100644
index 000000000000..820ccbcbf72a
--- /dev/null
+++ b/nixpkgs/nixos/lib/systemd-lib.nix
@@ -0,0 +1,470 @@
+{ config, lib, pkgs }:
+
+with lib;
+
+let
+  cfg = config.systemd;
+  lndir = "${pkgs.buildPackages.xorg.lndir}/bin/lndir";
+  systemd = cfg.package;
+in rec {
+
+  shellEscape = s: (replaceStrings [ "\\" ] [ "\\\\" ] s);
+
+  mkPathSafeName = lib.replaceStrings ["@" ":" "\\" "[" "]"] ["-" "-" "-" "" ""];
+
+  # a type for options that take a unit name
+  unitNameType = types.strMatching "[a-zA-Z0-9@%:_.\\-]+[.](service|socket|device|mount|automount|swap|target|path|timer|scope|slice)";
+
+  makeUnit = name: unit:
+    if unit.enable then
+      pkgs.runCommand "unit-${mkPathSafeName name}"
+        { preferLocalBuild = true;
+          allowSubstitutes = false;
+          # unit.text can be null. But variables that are null listed in
+          # passAsFile are ignored by nix, resulting in no file being created,
+          # making the mv operation fail.
+          text = optionalString (unit.text != null) unit.text;
+          passAsFile = [ "text" ];
+        }
+        ''
+          name=${shellEscape name}
+          mkdir -p "$out/$(dirname -- "$name")"
+          mv "$textPath" "$out/$name"
+        ''
+    else
+      pkgs.runCommand "unit-${mkPathSafeName name}-disabled"
+        { preferLocalBuild = true;
+          allowSubstitutes = false;
+        }
+        ''
+          name=${shellEscape name}
+          mkdir -p "$out/$(dirname "$name")"
+          ln -s /dev/null "$out/$name"
+        '';
+
+  boolValues = [true false "yes" "no"];
+
+  digits = map toString (range 0 9);
+
+  isByteFormat = s:
+    let
+      l = reverseList (stringToCharacters s);
+      suffix = head l;
+      nums = tail l;
+    in elem suffix (["K" "M" "G" "T"] ++ digits)
+      && all (num: elem num digits) nums;
+
+  assertByteFormat = name: group: attr:
+    optional (attr ? ${name} && ! isByteFormat attr.${name})
+      "Systemd ${group} field `${name}' must be in byte format [0-9]+[KMGT].";
+
+  hexChars = stringToCharacters "0123456789abcdefABCDEF";
+
+  isMacAddress = s: stringLength s == 17
+    && flip all (splitString ":" s) (bytes:
+      all (byte: elem byte hexChars) (stringToCharacters bytes)
+    );
+
+  assertMacAddress = name: group: attr:
+    optional (attr ? ${name} && ! isMacAddress attr.${name})
+      "Systemd ${group} field `${name}' must be a valid MAC address.";
+
+  assertNetdevMacAddress = name: group: attr:
+    optional (attr ? ${name} && (! isMacAddress attr.${name} && attr.${name} != "none"))
+      "Systemd ${group} field `${name}` must be a valid MAC address or the special value `none`.";
+
+
+  isPort = i: i >= 0 && i <= 65535;
+
+  assertPort = name: group: attr:
+    optional (attr ? ${name} && ! isPort attr.${name})
+      "Error on the systemd ${group} field `${name}': ${attr.name} is not a valid port number.";
+
+  assertValueOneOf = name: values: group: attr:
+    optional (attr ? ${name} && !elem attr.${name} values)
+      "Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
+
+  assertValuesSomeOfOr = name: values: default: group: attr:
+    optional (attr ? ${name} && !(all (x: elem x values) (splitString " " attr.${name}) || attr.${name} == default))
+      "Systemd ${group} field `${name}' cannot have value `${toString attr.${name}}'.";
+
+  assertHasField = name: group: attr:
+    optional (!(attr ? ${name}))
+      "Systemd ${group} field `${name}' must exist.";
+
+  assertRange = name: min: max: group: attr:
+    optional (attr ? ${name} && !(min <= attr.${name} && max >= attr.${name}))
+      "Systemd ${group} field `${name}' is outside the range [${toString min},${toString max}]";
+
+  assertMinimum = name: min: group: attr:
+    optional (attr ? ${name} && attr.${name} < min)
+      "Systemd ${group} field `${name}' must be greater than or equal to ${toString min}";
+
+  assertOnlyFields = fields: group: attr:
+    let badFields = filter (name: ! elem name fields) (attrNames attr); in
+    optional (badFields != [ ])
+      "Systemd ${group} has extra fields [${concatStringsSep " " badFields}].";
+
+  assertInt = name: group: attr:
+    optional (attr ? ${name} && !isInt attr.${name})
+      "Systemd ${group} field `${name}' is not an integer";
+
+  checkUnitConfig = group: checks: attrs: let
+    # We're applied at the top-level type (attrsOf unitOption), so the actual
+    # unit options might contain attributes from mkOverride and mkIf that we need to
+    # convert into single values before checking them.
+    defs = mapAttrs (const (v:
+      if v._type or "" == "override" then v.content
+      else if v._type or "" == "if" then v.content
+      else v
+    )) attrs;
+    errors = concatMap (c: c group defs) checks;
+  in if errors == [] then true
+     else builtins.trace (concatStringsSep "\n" errors) false;
+
+  toOption = x:
+    if x == true then "true"
+    else if x == false then "false"
+    else toString x;
+
+  attrsToSection = as:
+    concatStrings (concatLists (mapAttrsToList (name: value:
+      map (x: ''
+          ${name}=${toOption x}
+        '')
+        (if isList value then value else [value]))
+        as));
+
+  generateUnits = { allowCollisions ? true, type, units, upstreamUnits, upstreamWants, packages ? cfg.packages, package ? cfg.package }:
+    let
+      typeDir = ({
+        system = "system";
+        initrd = "system";
+        user = "user";
+        nspawn = "nspawn";
+      }).${type};
+    in pkgs.runCommand "${type}-units"
+      { preferLocalBuild = true;
+        allowSubstitutes = false;
+      } ''
+      mkdir -p $out
+
+      # Copy the upstream systemd units we're interested in.
+      for i in ${toString upstreamUnits}; do
+        fn=${package}/example/systemd/${typeDir}/$i
+        if ! [ -e $fn ]; then echo "missing $fn"; false; fi
+        if [ -L $fn ]; then
+          target="$(readlink "$fn")"
+          if [ ''${target:0:3} = ../ ]; then
+            ln -s "$(readlink -f "$fn")" $out/
+          else
+            cp -pd $fn $out/
+          fi
+        else
+          ln -s $fn $out/
+        fi
+      done
+
+      # Copy .wants links, but only those that point to units that
+      # we're interested in.
+      for i in ${toString upstreamWants}; do
+        fn=${package}/example/systemd/${typeDir}/$i
+        if ! [ -e $fn ]; then echo "missing $fn"; false; fi
+        x=$out/$(basename $fn)
+        mkdir $x
+        for i in $fn/*; do
+          y=$x/$(basename $i)
+          cp -pd $i $y
+          if ! [ -e $y ]; then rm $y; fi
+        done
+      done
+
+      # Symlink all units provided listed in systemd.packages.
+      packages="${toString packages}"
+
+      # Filter duplicate directories
+      declare -A unique_packages
+      for k in $packages ; do unique_packages[$k]=1 ; done
+
+      for i in ''${!unique_packages[@]}; do
+        for fn in $i/etc/systemd/${typeDir}/* $i/lib/systemd/${typeDir}/*; do
+          if ! [[ "$fn" =~ .wants$ ]]; then
+            if [[ -d "$fn" ]]; then
+              targetDir="$out/$(basename "$fn")"
+              mkdir -p "$targetDir"
+              ${lndir} "$fn" "$targetDir"
+            else
+              ln -s $fn $out/
+            fi
+          fi
+        done
+      done
+
+      # Symlink units defined by systemd.units where override strategy
+      # shall be automatically detected. If these are also provided by
+      # systemd or systemd.packages, then add them as
+      # <unit-name>.d/overrides.conf, which makes them extend the
+      # upstream unit.
+      for i in ${toString (mapAttrsToList
+          (n: v: v.unit)
+          (lib.filterAttrs (n: v: (attrByPath [ "overrideStrategy" ] "asDropinIfExists" v) == "asDropinIfExists") units))}; do
+        fn=$(basename $i/*)
+        if [ -e $out/$fn ]; then
+          if [ "$(readlink -f $i/$fn)" = /dev/null ]; then
+            ln -sfn /dev/null $out/$fn
+          else
+            ${if allowCollisions then ''
+              mkdir -p $out/$fn.d
+              ln -s $i/$fn $out/$fn.d/overrides.conf
+            '' else ''
+              echo "Found multiple derivations configuring $fn!"
+              exit 1
+            ''}
+          fi
+       else
+          ln -fs $i/$fn $out/
+        fi
+      done
+
+      # Symlink units defined by systemd.units which shall be
+      # treated as drop-in file.
+      for i in ${toString (mapAttrsToList
+          (n: v: v.unit)
+          (lib.filterAttrs (n: v: v ? overrideStrategy && v.overrideStrategy == "asDropin") units))}; do
+        fn=$(basename $i/*)
+        mkdir -p $out/$fn.d
+        ln -s $i/$fn $out/$fn.d/overrides.conf
+      done
+
+      # Create service aliases from aliases option.
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            ln -sfn '${name}' $out/'${name2}'
+          '') (unit.aliases or [])) units)}
+
+      # Create .wants and .requires symlinks from the wantedBy and
+      # requiredBy options.
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            mkdir -p $out/'${name2}.wants'
+            ln -sfn '../${name}' $out/'${name2}.wants'/
+          '') (unit.wantedBy or [])) units)}
+
+      ${concatStrings (mapAttrsToList (name: unit:
+          concatMapStrings (name2: ''
+            mkdir -p $out/'${name2}.requires'
+            ln -sfn '../${name}' $out/'${name2}.requires'/
+          '') (unit.requiredBy or [])) units)}
+
+      ${optionalString (type == "system") ''
+        # Stupid misc. symlinks.
+        ln -s ${cfg.defaultUnit} $out/default.target
+        ln -s ${cfg.ctrlAltDelUnit} $out/ctrl-alt-del.target
+        ln -s rescue.target $out/kbrequest.target
+
+        mkdir -p $out/getty.target.wants/
+        ln -s ../autovt@tty1.service $out/getty.target.wants/
+
+        ln -s ../remote-fs.target $out/multi-user.target.wants/
+      ''}
+    ''; # */
+
+  makeJobScript = name: text:
+    let
+      scriptName = replaceStrings [ "\\" "@" ] [ "-" "_" ] (shellEscape name);
+      out = (pkgs.writeShellScriptBin scriptName ''
+        set -e
+        ${text}
+      '').overrideAttrs (_: {
+        # The derivation name is different from the script file name
+        # to keep the script file name short to avoid cluttering logs.
+        name = "unit-script-${scriptName}";
+      });
+    in "${out}/bin/${scriptName}";
+
+  unitConfig = { config, name, options, ... }: {
+    config = {
+      unitConfig =
+        optionalAttrs (config.requires != [])
+          { Requires = toString config.requires; }
+        // optionalAttrs (config.wants != [])
+          { Wants = toString config.wants; }
+        // optionalAttrs (config.after != [])
+          { After = toString config.after; }
+        // optionalAttrs (config.before != [])
+          { Before = toString config.before; }
+        // optionalAttrs (config.bindsTo != [])
+          { BindsTo = toString config.bindsTo; }
+        // optionalAttrs (config.partOf != [])
+          { PartOf = toString config.partOf; }
+        // optionalAttrs (config.conflicts != [])
+          { Conflicts = toString config.conflicts; }
+        // optionalAttrs (config.requisite != [])
+          { Requisite = toString config.requisite; }
+        // optionalAttrs (config ? restartTriggers && config.restartTriggers != [])
+          { X-Restart-Triggers = "${pkgs.writeText "X-Restart-Triggers-${name}" (toString config.restartTriggers)}"; }
+        // optionalAttrs (config ? reloadTriggers && config.reloadTriggers != [])
+          { X-Reload-Triggers = "${pkgs.writeText "X-Reload-Triggers-${name}" (toString config.reloadTriggers)}"; }
+        // optionalAttrs (config.description != "") {
+          Description = config.description; }
+        // optionalAttrs (config.documentation != []) {
+          Documentation = toString config.documentation; }
+        // optionalAttrs (config.onFailure != []) {
+          OnFailure = toString config.onFailure; }
+        // optionalAttrs (config.onSuccess != []) {
+          OnSuccess = toString config.onSuccess; }
+        // optionalAttrs (options.startLimitIntervalSec.isDefined) {
+          StartLimitIntervalSec = toString config.startLimitIntervalSec;
+        } // optionalAttrs (options.startLimitBurst.isDefined) {
+          StartLimitBurst = toString config.startLimitBurst;
+        };
+    };
+  };
+
+  serviceConfig = { config, ... }: {
+    config.environment.PATH = mkIf (config.path != []) "${makeBinPath config.path}:${makeSearchPathOutput "bin" "sbin" config.path}";
+  };
+
+  stage2ServiceConfig = {
+    imports = [ serviceConfig ];
+    # Default path for systemd services. Should be quite minimal.
+    config.path = mkAfter [
+      pkgs.coreutils
+      pkgs.findutils
+      pkgs.gnugrep
+      pkgs.gnused
+      systemd
+    ];
+  };
+
+  stage1ServiceConfig = serviceConfig;
+
+  mountConfig = { config, ... }: {
+    config = {
+      mountConfig =
+        { What = config.what;
+          Where = config.where;
+        } // optionalAttrs (config.type != "") {
+          Type = config.type;
+        } // optionalAttrs (config.options != "") {
+          Options = config.options;
+        };
+    };
+  };
+
+  automountConfig = { config, ... }: {
+    config = {
+      automountConfig =
+        { Where = config.where;
+        };
+    };
+  };
+
+  commonUnitText = def: ''
+      [Unit]
+      ${attrsToSection def.unitConfig}
+    '';
+
+  targetToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text =
+        ''
+          [Unit]
+          ${attrsToSection def.unitConfig}
+        '';
+    };
+
+  serviceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def + ''
+        [Service]
+      '' + (let env = cfg.globalEnvironment // def.environment;
+        in concatMapStrings (n:
+          let s = optionalString (env.${n} != null)
+            "Environment=${builtins.toJSON "${n}=${env.${n}}"}\n";
+          # systemd max line length is now 1MiB
+          # https://github.com/systemd/systemd/commit/e6dde451a51dc5aaa7f4d98d39b8fe735f73d2af
+          in if stringLength s >= 1048576 then throw "The value of the environment variable ‘${n}’ in systemd service ‘${name}.service’ is too long." else s) (attrNames env))
+      + (if def ? reloadIfChanged && def.reloadIfChanged then ''
+        X-ReloadIfChanged=true
+      '' else if (def ? restartIfChanged && !def.restartIfChanged) then ''
+        X-RestartIfChanged=false
+      '' else "")
+       + optionalString (def ? stopIfChanged && !def.stopIfChanged) ''
+         X-StopIfChanged=false
+      '' + attrsToSection def.serviceConfig;
+    };
+
+  socketToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Socket]
+          ${attrsToSection def.socketConfig}
+          ${concatStringsSep "\n" (map (s: "ListenStream=${s}") def.listenStreams)}
+          ${concatStringsSep "\n" (map (s: "ListenDatagram=${s}") def.listenDatagrams)}
+        '';
+    };
+
+  timerToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Timer]
+          ${attrsToSection def.timerConfig}
+        '';
+    };
+
+  pathToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Path]
+          ${attrsToSection def.pathConfig}
+        '';
+    };
+
+  mountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Mount]
+          ${attrsToSection def.mountConfig}
+        '';
+    };
+
+  automountToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Automount]
+          ${attrsToSection def.automountConfig}
+        '';
+    };
+
+  sliceToUnit = name: def:
+    { inherit (def) aliases wantedBy requiredBy enable overrideStrategy;
+      text = commonUnitText def +
+        ''
+          [Slice]
+          ${attrsToSection def.sliceConfig}
+        '';
+    };
+
+  # Create a directory that contains systemd definition files from an attrset
+  # that contains the file names as keys and the content as values. The values
+  # in that attrset are determined by the supplied format.
+  definitions = directoryName: format: definitionAttrs:
+    let
+      listOfDefinitions = lib.mapAttrsToList
+        (name: format.generate "${name}.conf")
+        definitionAttrs;
+    in
+    pkgs.runCommand directoryName { } ''
+      mkdir -p $out
+      ${(lib.concatStringsSep "\n"
+        (map (pkg: "cp ${pkg} $out/${pkg.name}") listOfDefinitions)
+      )}
+    '';
+
+}
diff --git a/nixpkgs/nixos/lib/systemd-network-units.nix b/nixpkgs/nixos/lib/systemd-network-units.nix
new file mode 100644
index 000000000000..1d5f823f3678
--- /dev/null
+++ b/nixpkgs/nixos/lib/systemd-network-units.nix
@@ -0,0 +1,249 @@
+{ lib, systemdUtils }:
+
+with lib;
+
+let
+  attrsToSection = systemdUtils.lib.attrsToSection;
+  commonMatchText = def:
+    optionalString (def.matchConfig != { }) ''
+      [Match]
+      ${attrsToSection def.matchConfig}
+    '';
+in {
+  linkToUnit = def:
+    commonMatchText def + ''
+      [Link]
+      ${attrsToSection def.linkConfig}
+    '' + def.extraConfig;
+
+  netdevToUnit = def:
+    commonMatchText def + ''
+      [NetDev]
+      ${attrsToSection def.netdevConfig}
+    '' + optionalString (def.vlanConfig != { }) ''
+      [VLAN]
+      ${attrsToSection def.vlanConfig}
+    '' + optionalString (def.ipvlanConfig != { }) ''
+      [IPVLAN]
+      ${attrsToSection def.ipvlanConfig}
+    '' + optionalString (def.ipvtapConfig != { }) ''
+      [IPVTAP]
+      ${attrsToSection def.ipvtapConfig}
+    '' + optionalString (def.macvlanConfig != { }) ''
+      [MACVLAN]
+      ${attrsToSection def.macvlanConfig}
+    '' + optionalString (def.vxlanConfig != { }) ''
+      [VXLAN]
+      ${attrsToSection def.vxlanConfig}
+    '' + optionalString (def.tunnelConfig != { }) ''
+      [Tunnel]
+      ${attrsToSection def.tunnelConfig}
+    '' + optionalString (def.fooOverUDPConfig != { }) ''
+      [FooOverUDP]
+      ${attrsToSection def.fooOverUDPConfig}
+    '' + optionalString (def.peerConfig != { }) ''
+      [Peer]
+      ${attrsToSection def.peerConfig}
+    '' + optionalString (def.tunConfig != { }) ''
+      [Tun]
+      ${attrsToSection def.tunConfig}
+    '' + optionalString (def.tapConfig != { }) ''
+      [Tap]
+      ${attrsToSection def.tapConfig}
+    '' + optionalString (def.l2tpConfig != { }) ''
+      [L2TP]
+      ${attrsToSection def.l2tpConfig}
+    '' + flip concatMapStrings def.l2tpSessions (x: ''
+      [L2TPSession]
+      ${attrsToSection x.l2tpSessionConfig}
+    '') + optionalString (def.wireguardConfig != { }) ''
+      [WireGuard]
+      ${attrsToSection def.wireguardConfig}
+    '' + flip concatMapStrings def.wireguardPeers (x: ''
+      [WireGuardPeer]
+      ${attrsToSection x.wireguardPeerConfig}
+    '') + optionalString (def.bondConfig != { }) ''
+      [Bond]
+      ${attrsToSection def.bondConfig}
+    '' + optionalString (def.xfrmConfig != { }) ''
+      [Xfrm]
+      ${attrsToSection def.xfrmConfig}
+    '' + optionalString (def.vrfConfig != { }) ''
+      [VRF]
+      ${attrsToSection def.vrfConfig}
+    '' + optionalString (def.wlanConfig != { }) ''
+      [WLAN]
+      ${attrsToSection def.wlanConfig}
+    '' + optionalString (def.batmanAdvancedConfig != { }) ''
+      [BatmanAdvanced]
+      ${attrsToSection def.batmanAdvancedConfig}
+    '' + def.extraConfig;
+
+  networkToUnit = def:
+    commonMatchText def + optionalString (def.linkConfig != { }) ''
+      [Link]
+      ${attrsToSection def.linkConfig}
+    '' + ''
+      [Network]
+    '' + attrsToSection def.networkConfig
+    + optionalString (def.address != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Address=${s}") def.address)}
+    '' + optionalString (def.gateway != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Gateway=${s}") def.gateway)}
+    '' + optionalString (def.dns != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "DNS=${s}") def.dns)}
+    '' + optionalString (def.ntp != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "NTP=${s}") def.ntp)}
+    '' + optionalString (def.bridge != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Bridge=${s}") def.bridge)}
+    '' + optionalString (def.bond != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Bond=${s}") def.bond)}
+    '' + optionalString (def.vrf != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "VRF=${s}") def.vrf)}
+    '' + optionalString (def.vlan != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "VLAN=${s}") def.vlan)}
+    '' + optionalString (def.macvlan != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "MACVLAN=${s}") def.macvlan)}
+    '' + optionalString (def.macvtap != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "MACVTAP=${s}") def.macvtap)}
+    '' + optionalString (def.vxlan != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "VXLAN=${s}") def.vxlan)}
+    '' + optionalString (def.tunnel != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Tunnel=${s}") def.tunnel)}
+    '' + optionalString (def.xfrm != [ ]) ''
+      ${concatStringsSep "\n" (map (s: "Xfrm=${s}") def.xfrm)}
+    '' + "\n" + flip concatMapStrings def.addresses (x: ''
+      [Address]
+      ${attrsToSection x.addressConfig}
+    '') + flip concatMapStrings def.routingPolicyRules (x: ''
+      [RoutingPolicyRule]
+      ${attrsToSection x.routingPolicyRuleConfig}
+    '') + flip concatMapStrings def.routes (x: ''
+      [Route]
+      ${attrsToSection x.routeConfig}
+    '') + optionalString (def.dhcpV4Config != { }) ''
+      [DHCPv4]
+      ${attrsToSection def.dhcpV4Config}
+    '' + optionalString (def.dhcpV6Config != { }) ''
+      [DHCPv6]
+      ${attrsToSection def.dhcpV6Config}
+    '' + optionalString (def.dhcpPrefixDelegationConfig != { }) ''
+      [DHCPPrefixDelegation]
+      ${attrsToSection def.dhcpPrefixDelegationConfig}
+    '' + optionalString (def.ipv6AcceptRAConfig != { }) ''
+      [IPv6AcceptRA]
+      ${attrsToSection def.ipv6AcceptRAConfig}
+    '' + optionalString (def.dhcpServerConfig != { }) ''
+      [DHCPServer]
+      ${attrsToSection def.dhcpServerConfig}
+    '' + optionalString (def.ipv6SendRAConfig != { }) ''
+      [IPv6SendRA]
+      ${attrsToSection def.ipv6SendRAConfig}
+    '' + flip concatMapStrings def.ipv6Prefixes (x: ''
+      [IPv6Prefix]
+      ${attrsToSection x.ipv6PrefixConfig}
+    '') + flip concatMapStrings def.ipv6RoutePrefixes (x: ''
+      [IPv6RoutePrefix]
+      ${attrsToSection x.ipv6RoutePrefixConfig}
+    '') + flip concatMapStrings def.dhcpServerStaticLeases (x: ''
+      [DHCPServerStaticLease]
+      ${attrsToSection x.dhcpServerStaticLeaseConfig}
+    '') + optionalString (def.bridgeConfig != { }) ''
+      [Bridge]
+      ${attrsToSection def.bridgeConfig}
+    '' + flip concatMapStrings def.bridgeFDBs (x: ''
+      [BridgeFDB]
+      ${attrsToSection x.bridgeFDBConfig}
+    '') + flip concatMapStrings def.bridgeMDBs (x: ''
+      [BridgeMDB]
+      ${attrsToSection x.bridgeMDBConfig}
+    '') + optionalString (def.lldpConfig != { }) ''
+      [LLDP]
+      ${attrsToSection def.lldpConfig}
+    '' + optionalString (def.canConfig != { }) ''
+      [CAN]
+      ${attrsToSection def.canConfig}
+    '' + optionalString (def.ipoIBConfig != { }) ''
+      [IPoIB]
+      ${attrsToSection def.ipoIBConfig}
+    '' + optionalString (def.qdiscConfig != { }) ''
+      [QDisc]
+      ${attrsToSection def.qdiscConfig}
+    '' + optionalString (def.networkEmulatorConfig != { }) ''
+      [NetworkEmulator]
+      ${attrsToSection def.networkEmulatorConfig}
+    '' + optionalString (def.tokenBucketFilterConfig != { }) ''
+      [TokenBucketFilter]
+      ${attrsToSection def.tokenBucketFilterConfig}
+    '' + optionalString (def.pieConfig != { }) ''
+      [PIE]
+      ${attrsToSection def.pieConfig}
+    '' + optionalString (def.flowQueuePIEConfig != { }) ''
+      [FlowQueuePIE]
+      ${attrsToSection def.flowQueuePIEConfig}
+    '' + optionalString (def.stochasticFairBlueConfig != { }) ''
+      [StochasticFairBlue]
+      ${attrsToSection def.stochasticFairBlueConfig}
+    '' + optionalString (def.stochasticFairnessQueueingConfig != { }) ''
+      [StochasticFairnessQueueing]
+      ${attrsToSection def.stochasticFairnessQueueingConfig}
+    '' + optionalString (def.bfifoConfig != { }) ''
+      [BFIFO]
+      ${attrsToSection def.bfifoConfig}
+    '' + optionalString (def.pfifoConfig != { }) ''
+      [PFIFO]
+      ${attrsToSection def.pfifoConfig}
+    '' + optionalString (def.pfifoHeadDropConfig != { }) ''
+      [PFIFOHeadDrop]
+      ${attrsToSection def.pfifoHeadDropConfig}
+    '' + optionalString (def.pfifoFastConfig != { }) ''
+      [PFIFOFast]
+      ${attrsToSection def.pfifoFastConfig}
+    '' + optionalString (def.cakeConfig != { }) ''
+      [CAKE]
+      ${attrsToSection def.cakeConfig}
+    '' + optionalString (def.controlledDelayConfig != { }) ''
+      [ControlledDelay]
+      ${attrsToSection def.controlledDelayConfig}
+    '' + optionalString (def.deficitRoundRobinSchedulerConfig != { }) ''
+      [DeficitRoundRobinScheduler]
+      ${attrsToSection def.deficitRoundRobinSchedulerConfig}
+    '' + optionalString (def.deficitRoundRobinSchedulerClassConfig != { }) ''
+      [DeficitRoundRobinSchedulerClass]
+      ${attrsToSection def.deficitRoundRobinSchedulerClassConfig}
+    '' + optionalString (def.enhancedTransmissionSelectionConfig != { }) ''
+      [EnhancedTransmissionSelection]
+      ${attrsToSection def.enhancedTransmissionSelectionConfig}
+    '' + optionalString (def.genericRandomEarlyDetectionConfig != { }) ''
+      [GenericRandomEarlyDetection]
+      ${attrsToSection def.genericRandomEarlyDetectionConfig}
+    '' + optionalString (def.fairQueueingControlledDelayConfig != { }) ''
+      [FairQueueingControlledDelay]
+      ${attrsToSection def.fairQueueingControlledDelayConfig}
+    '' + optionalString (def.fairQueueingConfig != { }) ''
+      [FairQueueing]
+      ${attrsToSection def.fairQueueingConfig}
+    '' + optionalString (def.trivialLinkEqualizerConfig != { }) ''
+      [TrivialLinkEqualizer]
+      ${attrsToSection def.trivialLinkEqualizerConfig}
+    '' + optionalString (def.hierarchyTokenBucketConfig != { }) ''
+      [HierarchyTokenBucket]
+      ${attrsToSection def.hierarchyTokenBucketConfig}
+    '' + optionalString (def.hierarchyTokenBucketClassConfig != { }) ''
+      [HierarchyTokenBucketClass]
+      ${attrsToSection def.hierarchyTokenBucketClassConfig}
+    '' + optionalString (def.heavyHitterFilterConfig != { }) ''
+      [HeavyHitterFilter]
+      ${attrsToSection def.heavyHitterFilterConfig}
+    '' + optionalString (def.quickFairQueueingConfig != { }) ''
+      [QuickFairQueueing]
+      ${attrsToSection def.quickFairQueueingConfig}
+    '' + optionalString (def.quickFairQueueingConfigClass != { }) ''
+      [QuickFairQueueingClass]
+      ${attrsToSection def.quickFairQueueingConfigClass}
+    '' + flip concatMapStrings def.bridgeVLANs (x: ''
+      [BridgeVLAN]
+      ${attrsToSection x.bridgeVLANConfig}
+    '') + def.extraConfig;
+
+}
diff --git a/nixpkgs/nixos/lib/systemd-types.nix b/nixpkgs/nixos/lib/systemd-types.nix
new file mode 100644
index 000000000000..a109f248b170
--- /dev/null
+++ b/nixpkgs/nixos/lib/systemd-types.nix
@@ -0,0 +1,69 @@
+{ lib, systemdUtils, pkgs }:
+
+with systemdUtils.lib;
+with systemdUtils.unitOptions;
+with lib;
+
+rec {
+  units = with types;
+    attrsOf (submodule ({ name, config, ... }: {
+      options = concreteUnitOptions;
+      config = { unit = mkDefault (systemdUtils.lib.makeUnit name config); };
+    }));
+
+  services = with types; attrsOf (submodule [ stage2ServiceOptions unitConfig stage2ServiceConfig ]);
+  initrdServices = with types; attrsOf (submodule [ stage1ServiceOptions unitConfig stage1ServiceConfig ]);
+
+  targets = with types; attrsOf (submodule [ stage2CommonUnitOptions unitConfig ]);
+  initrdTargets = with types; attrsOf (submodule [ stage1CommonUnitOptions unitConfig ]);
+
+  sockets = with types; attrsOf (submodule [ stage2SocketOptions unitConfig ]);
+  initrdSockets = with types; attrsOf (submodule [ stage1SocketOptions unitConfig ]);
+
+  timers = with types; attrsOf (submodule [ stage2TimerOptions unitConfig ]);
+  initrdTimers = with types; attrsOf (submodule [ stage1TimerOptions unitConfig ]);
+
+  paths = with types; attrsOf (submodule [ stage2PathOptions unitConfig ]);
+  initrdPaths = with types; attrsOf (submodule [ stage1PathOptions unitConfig ]);
+
+  slices = with types; attrsOf (submodule [ stage2SliceOptions unitConfig ]);
+  initrdSlices = with types; attrsOf (submodule [ stage1SliceOptions unitConfig ]);
+
+  mounts = with types; listOf (submodule [ stage2MountOptions unitConfig mountConfig ]);
+  initrdMounts = with types; listOf (submodule [ stage1MountOptions unitConfig mountConfig ]);
+
+  automounts = with types; listOf (submodule [ stage2AutomountOptions unitConfig automountConfig ]);
+  initrdAutomounts = with types; attrsOf (submodule [ stage1AutomountOptions unitConfig automountConfig ]);
+
+  initrdContents = types.attrsOf (types.submodule ({ config, options, name, ... }: {
+    options = {
+      enable = mkEnableOption (lib.mdDoc "copying of this file and symlinking it") // { default = true; };
+
+      target = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path of the symlink.
+        '';
+        default = name;
+      };
+
+      text = mkOption {
+        default = null;
+        type = types.nullOr types.lines;
+        description = lib.mdDoc "Text of the file.";
+      };
+
+      source = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path of the source file.";
+      };
+    };
+
+    config = {
+      source = mkIf (config.text != null) (
+        let name' = "initrd-" + baseNameOf name;
+        in mkDerivedConfig options.text (pkgs.writeText name')
+      );
+    };
+  }));
+}
diff --git a/nixpkgs/nixos/lib/systemd-unit-options.nix b/nixpkgs/nixos/lib/systemd-unit-options.nix
new file mode 100644
index 000000000000..9c69bda471bb
--- /dev/null
+++ b/nixpkgs/nixos/lib/systemd-unit-options.nix
@@ -0,0 +1,735 @@
+{ lib, systemdUtils }:
+
+with systemdUtils.lib;
+with lib;
+
+let
+  checkService = checkUnitConfig "Service" [
+    (assertValueOneOf "Type" [
+      "exec" "simple" "forking" "oneshot" "dbus" "notify" "idle"
+    ])
+    (assertValueOneOf "Restart" [
+      "no" "on-success" "on-failure" "on-abnormal" "on-abort" "always"
+    ])
+  ];
+
+in rec {
+
+  unitOption = mkOptionType {
+    name = "systemd option";
+    merge = loc: defs:
+      let
+        defs' = filterOverrides defs;
+      in
+        if isList (head defs').value
+        then concatMap (def:
+          if builtins.typeOf def.value == "list"
+          then def.value
+          else
+            throw "The definitions for systemd unit options should be either all lists, representing repeatable options, or all non-lists, but for the option ${showOption loc}, the definitions are a mix of list and non-list ${lib.options.showDefs defs'}"
+        ) defs'
+
+        else mergeEqualOption loc defs';
+  };
+
+  sharedOptions = {
+
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        If set to false, this unit will be a symlink to
+        /dev/null. This is primarily useful to prevent specific
+        template instances
+        (e.g. `serial-getty@ttyS0`) from being
+        started. Note that `enable=true` does not
+        make a unit start by default at boot; if you want that, see
+        `wantedBy`.
+      '';
+    };
+
+    overrideStrategy = mkOption {
+      default = "asDropinIfExists";
+      type = types.enum [ "asDropinIfExists" "asDropin" ];
+      description = lib.mdDoc ''
+        Defines how unit configuration is provided for systemd:
+
+        `asDropinIfExists` creates a unit file when no unit file is provided by the package
+        otherwise a drop-in file name `overrides.conf`.
+
+        `asDropin` creates a drop-in file named `overrides.conf`.
+        Mainly needed to define instances for systemd template units (e.g. `systemd-nspawn@mycontainer.service`).
+
+        See also {manpage}`systemd.unit(5)`.
+      '';
+    };
+
+    requiredBy = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = lib.mdDoc ''
+        Units that require (i.e. depend on and need to go down with) this unit.
+        As discussed in the `wantedBy` option description this also creates
+        `.requires` symlinks automatically.
+      '';
+    };
+
+    wantedBy = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = lib.mdDoc ''
+        Units that want (i.e. depend on) this unit. The default method for
+        starting a unit by default at boot time is to set this option to
+        `["multi-user.target"]` for system services. Likewise for user units
+        (`systemd.user.<name>.*`) set it to `["default.target"]` to make a unit
+        start by default when the user `<name>` logs on.
+
+        This option creates a `.wants` symlink in the given target that exists
+        statelessly without the need for running `systemctl enable`.
+        The `[Install]` section described in {manpage}`systemd.unit(5)` however is
+        not supported because it is a stateful process that does not fit well
+        into the NixOS design.
+      '';
+    };
+
+    aliases = mkOption {
+      default = [];
+      type = types.listOf unitNameType;
+      description = lib.mdDoc "Aliases of that unit.";
+    };
+
+  };
+
+  concreteUnitOptions = sharedOptions // {
+
+    text = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Text of this systemd unit.";
+    };
+
+    unit = mkOption {
+      internal = true;
+      description = lib.mdDoc "The generated unit.";
+    };
+
+  };
+
+  commonUnitOptions = {
+    options = sharedOptions // {
+
+      description = mkOption {
+        default = "";
+        type = types.singleLineStr;
+        description = lib.mdDoc "Description of this unit used in systemd messages and progress indicators.";
+      };
+
+      documentation = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc "A list of URIs referencing documentation for this unit or its configuration.";
+      };
+
+      requires = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          Start the specified units when this unit is started, and stop
+          this unit when the specified units are stopped or fail.
+        '';
+      };
+
+      wants = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          Start the specified units when this unit is started.
+        '';
+      };
+
+      after = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          If the specified units are started at the same time as
+          this unit, delay this unit until they have started.
+        '';
+      };
+
+      before = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          If the specified units are started at the same time as
+          this unit, delay them until this unit has started.
+        '';
+      };
+
+      bindsTo = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          Like ‘requires’, but in addition, if the specified units
+          unexpectedly disappear, this unit will be stopped as well.
+        '';
+      };
+
+      partOf = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          If the specified units are stopped or restarted, then this
+          unit is stopped or restarted as well.
+        '';
+      };
+
+      conflicts = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          If the specified units are started, then this unit is stopped
+          and vice versa.
+        '';
+      };
+
+      requisite = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          Similar to requires. However if the units listed are not started,
+          they will not be started and the transaction will fail.
+        '';
+      };
+
+      unitConfig = mkOption {
+        default = {};
+        example = { RequiresMountsFor = "/data"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Unit]` section of the unit.  See
+          {manpage}`systemd.unit(5)` for details.
+        '';
+      };
+
+      onFailure = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          A list of one or more units that are activated when
+          this unit enters the "failed" state.
+        '';
+      };
+
+      onSuccess = mkOption {
+        default = [];
+        type = types.listOf unitNameType;
+        description = lib.mdDoc ''
+          A list of one or more units that are activated when
+          this unit enters the "inactive" state.
+        '';
+      };
+
+      startLimitBurst = mkOption {
+         type = types.int;
+         description = lib.mdDoc ''
+           Configure unit start rate limiting. Units which are started
+           more than startLimitBurst times within an interval time
+           interval are not permitted to start any more.
+         '';
+      };
+
+      startLimitIntervalSec = mkOption {
+         type = types.int;
+         description = lib.mdDoc ''
+           Configure unit start rate limiting. Units which are started
+           more than startLimitBurst times within an interval time
+           interval are not permitted to start any more.
+         '';
+      };
+
+    };
+  };
+
+  stage2CommonUnitOptions = {
+    imports = [
+      commonUnitOptions
+    ];
+
+    options = {
+      restartTriggers = mkOption {
+        default = [];
+        type = types.listOf types.unspecified;
+        description = lib.mdDoc ''
+          An arbitrary list of items such as derivations.  If any item
+          in the list changes between reconfigurations, the service will
+          be restarted.
+        '';
+      };
+
+      reloadTriggers = mkOption {
+        default = [];
+        type = types.listOf unitOption;
+        description = lib.mdDoc ''
+          An arbitrary list of items such as derivations.  If any item
+          in the list changes between reconfigurations, the service will
+          be reloaded.  If anything but a reload trigger changes in the
+          unit file, the unit will be restarted instead.
+        '';
+      };
+    };
+  };
+  stage1CommonUnitOptions = commonUnitOptions;
+
+  serviceOptions = { name, config, ... }: {
+    options = {
+
+      environment = mkOption {
+        default = {};
+        type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+        example = { PATH = "/foo/bar/bin"; LANG = "nl_NL.UTF-8"; };
+        description = lib.mdDoc "Environment variables passed to the service's processes.";
+      };
+
+      path = mkOption {
+        default = [];
+        type = with types; listOf (oneOf [ package str ]);
+        description = lib.mdDoc ''
+          Packages added to the service's {env}`PATH`
+          environment variable.  Both the {file}`bin`
+          and {file}`sbin` subdirectories of each
+          package are added.
+        '';
+      };
+
+      serviceConfig = mkOption {
+        default = {};
+        example =
+          { RestartSec = 5;
+          };
+        type = types.addCheck (types.attrsOf unitOption) checkService;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Service]` section of the unit.  See
+          {manpage}`systemd.service(5)` for details.
+        '';
+      };
+
+      script = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Shell commands executed as the service's main process.";
+      };
+
+      scriptArgs = mkOption {
+        type = types.str;
+        default = "";
+        example = "%i";
+        description = lib.mdDoc ''
+          Arguments passed to the main process script.
+          Can contain specifiers (`%` placeholders expanded by systemd, see {manpage}`systemd.unit(5)`).
+        '';
+      };
+
+      preStart = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed before the service's main process
+          is started.
+        '';
+      };
+
+      postStart = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed after the service's main process
+          is started.
+        '';
+      };
+
+      reload = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed when the service's main process
+          is reloaded.
+        '';
+      };
+
+      preStop = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed to stop the service.
+        '';
+      };
+
+      postStop = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed after the service's main process
+          has exited.
+        '';
+      };
+
+      jobScripts = mkOption {
+        type = with types; coercedTo path singleton (listOf path);
+        internal = true;
+        description = lib.mdDoc "A list of all job script derivations of this unit.";
+        default = [];
+      };
+
+    };
+
+    config = mkMerge [
+      (mkIf (config.preStart != "") rec {
+        jobScripts = makeJobScript "${name}-pre-start" config.preStart;
+        serviceConfig.ExecStartPre = [ jobScripts ];
+      })
+      (mkIf (config.script != "") rec {
+        jobScripts = makeJobScript "${name}-start" config.script;
+        serviceConfig.ExecStart = jobScripts + " " + config.scriptArgs;
+      })
+      (mkIf (config.postStart != "") rec {
+        jobScripts = (makeJobScript "${name}-post-start" config.postStart);
+        serviceConfig.ExecStartPost = [ jobScripts ];
+      })
+      (mkIf (config.reload != "") rec {
+        jobScripts = makeJobScript "${name}-reload" config.reload;
+        serviceConfig.ExecReload = jobScripts;
+      })
+      (mkIf (config.preStop != "") rec {
+        jobScripts = makeJobScript "${name}-pre-stop" config.preStop;
+        serviceConfig.ExecStop = jobScripts;
+      })
+      (mkIf (config.postStop != "") rec {
+        jobScripts = makeJobScript "${name}-post-stop" config.postStop;
+        serviceConfig.ExecStopPost = jobScripts;
+      })
+    ];
+
+  };
+
+  stage2ServiceOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      serviceOptions
+    ];
+
+    options = {
+      restartIfChanged = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the service should be restarted during a NixOS
+          configuration switch if its definition has changed.
+        '';
+      };
+
+      reloadIfChanged = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether the service should be reloaded during a NixOS
+          configuration switch if its definition has changed.  If
+          enabled, the value of {option}`restartIfChanged` is
+          ignored.
+
+          This option should not be used anymore in favor of
+          {option}`reloadTriggers` which allows more granular
+          control of when a service is reloaded and when a service
+          is restarted.
+        '';
+      };
+
+      stopIfChanged = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If set, a changed unit is restarted by calling
+          {command}`systemctl stop` in the old configuration,
+          then {command}`systemctl start` in the new one.
+          Otherwise, it is restarted in a single step using
+          {command}`systemctl restart` in the new configuration.
+          The latter is less correct because it runs the
+          `ExecStop` commands from the new
+          configuration.
+        '';
+      };
+
+      startAt = mkOption {
+        type = with types; either str (listOf str);
+        default = [];
+        example = "Sun 14:00:00";
+        description = lib.mdDoc ''
+          Automatically start this unit at the given date/time, which
+          must be in the format described in
+          {manpage}`systemd.time(7)`.  This is equivalent
+          to adding a corresponding timer unit with
+          {option}`OnCalendar` set to the value given here.
+        '';
+        apply = v: if isList v then v else [ v ];
+      };
+    };
+  };
+
+  stage1ServiceOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      serviceOptions
+    ];
+  };
+
+
+  socketOptions = {
+    options = {
+
+      listenStreams = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = [ "0.0.0.0:993" "/run/my-socket" ];
+        description = lib.mdDoc ''
+          For each item in this list, a `ListenStream`
+          option in the `[Socket]` section will be created.
+        '';
+      };
+
+      listenDatagrams = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = [ "0.0.0.0:993" "/run/my-socket" ];
+        description = lib.mdDoc ''
+          For each item in this list, a `ListenDatagram`
+          option in the `[Socket]` section will be created.
+        '';
+      };
+
+      socketConfig = mkOption {
+        default = {};
+        example = { ListenStream = "/run/my-socket"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Socket]` section of the unit.  See
+          {manpage}`systemd.socket(5)` for details.
+        '';
+      };
+    };
+
+  };
+
+  stage2SocketOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      socketOptions
+    ];
+  };
+
+  stage1SocketOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      socketOptions
+    ];
+  };
+
+
+  timerOptions = {
+    options = {
+
+      timerConfig = mkOption {
+        default = {};
+        example = { OnCalendar = "Sun 14:00:00"; Unit = "foo.service"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Timer]` section of the unit.  See
+          {manpage}`systemd.timer(5)` and
+          {manpage}`systemd.time(7)` for details.
+        '';
+      };
+
+    };
+  };
+
+  stage2TimerOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      timerOptions
+    ];
+  };
+
+  stage1TimerOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      timerOptions
+    ];
+  };
+
+
+  pathOptions = {
+    options = {
+
+      pathConfig = mkOption {
+        default = {};
+        example = { PathChanged = "/some/path"; Unit = "changedpath.service"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Path]` section of the unit.  See
+          {manpage}`systemd.path(5)` for details.
+        '';
+      };
+
+    };
+  };
+
+  stage2PathOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      pathOptions
+    ];
+  };
+
+  stage1PathOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      pathOptions
+    ];
+  };
+
+
+  mountOptions = {
+    options = {
+
+      what = mkOption {
+        example = "/dev/sda1";
+        type = types.str;
+        description = lib.mdDoc "Absolute path of device node, file or other resource. (Mandatory)";
+      };
+
+      where = mkOption {
+        example = "/mnt";
+        type = types.str;
+        description = lib.mdDoc ''
+          Absolute path of a directory of the mount point.
+          Will be created if it doesn't exist. (Mandatory)
+        '';
+      };
+
+      type = mkOption {
+        default = "";
+        example = "ext4";
+        type = types.str;
+        description = lib.mdDoc "File system type.";
+      };
+
+      options = mkOption {
+        default = "";
+        example = "noatime";
+        type = types.commas;
+        description = lib.mdDoc "Options used to mount the file system.";
+      };
+
+      mountConfig = mkOption {
+        default = {};
+        example = { DirectoryMode = "0775"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Mount]` section of the unit.  See
+          {manpage}`systemd.mount(5)` for details.
+        '';
+      };
+
+    };
+  };
+
+  stage2MountOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      mountOptions
+    ];
+  };
+
+  stage1MountOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      mountOptions
+    ];
+  };
+
+  automountOptions = {
+    options = {
+
+      where = mkOption {
+        example = "/mnt";
+        type = types.str;
+        description = lib.mdDoc ''
+          Absolute path of a directory of the mount point.
+          Will be created if it doesn't exist. (Mandatory)
+        '';
+      };
+
+      automountConfig = mkOption {
+        default = {};
+        example = { DirectoryMode = "0775"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Automount]` section of the unit.  See
+          {manpage}`systemd.automount(5)` for details.
+        '';
+      };
+
+    };
+  };
+
+  stage2AutomountOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      automountOptions
+    ];
+  };
+
+  stage1AutomountOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      automountOptions
+    ];
+  };
+
+  sliceOptions = {
+    options = {
+
+      sliceConfig = mkOption {
+        default = {};
+        example = { MemoryMax = "2G"; };
+        type = types.attrsOf unitOption;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Slice]` section of the unit.  See
+          {manpage}`systemd.slice(5)` for details.
+        '';
+      };
+
+    };
+  };
+
+  stage2SliceOptions = {
+    imports = [
+      stage2CommonUnitOptions
+      sliceOptions
+    ];
+  };
+
+  stage1SliceOptions = {
+    imports = [
+      stage1CommonUnitOptions
+      sliceOptions
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/lib/test-driver/default.nix b/nixpkgs/nixos/lib/test-driver/default.nix
new file mode 100644
index 000000000000..09d80deb8546
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/default.nix
@@ -0,0 +1,49 @@
+{ lib
+, python3Packages
+, enableOCR ? false
+, qemu_pkg ? qemu_test
+, coreutils
+, imagemagick_light
+, netpbm
+, qemu_test
+, socat
+, ruff
+, tesseract4
+, vde2
+, extraPythonPackages ? (_ : [])
+, nixosTests
+}:
+
+python3Packages.buildPythonApplication {
+  pname = "nixos-test-driver";
+  version = "1.1";
+  src = ./.;
+  format = "pyproject";
+
+  propagatedBuildInputs = [
+    coreutils
+    netpbm
+    python3Packages.colorama
+    python3Packages.ptpython
+    qemu_pkg
+    socat
+    vde2
+  ]
+    ++ (lib.optionals enableOCR [ imagemagick_light tesseract4 ])
+    ++ extraPythonPackages python3Packages;
+
+  passthru.tests = {
+    inherit (nixosTests.nixos-test-driver) driver-timeout;
+  };
+
+  doCheck = true;
+  nativeCheckInputs = with python3Packages; [ mypy ruff black ];
+  checkPhase = ''
+    echo -e "\x1b[32m## run mypy\x1b[0m"
+    mypy test_driver extract-docstrings.py
+    echo -e "\x1b[32m## run ruff\x1b[0m"
+    ruff .
+    echo -e "\x1b[32m## run black\x1b[0m"
+    black --check --diff .
+  '';
+}
diff --git a/nixpkgs/nixos/lib/test-driver/extract-docstrings.py b/nixpkgs/nixos/lib/test-driver/extract-docstrings.py
new file mode 100644
index 000000000000..64850ca711f3
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/extract-docstrings.py
@@ -0,0 +1,74 @@
+import ast
+import sys
+from pathlib import Path
+
+"""
+This program takes all the Machine class methods and prints its methods in
+markdown-style. These can then be included in the NixOS test driver
+markdown style, assuming the docstrings themselves are also in markdown.
+
+These are included in the test driver documentation in the NixOS manual.
+See https://nixos.org/manual/nixos/stable/#ssec-machine-objects
+
+The python input looks like this:
+
+```py
+...
+
+class Machine(...):
+    ...
+
+    def some_function(self, param1, param2):
+        ""
+        documentation string of some_function.
+        foo bar baz.
+        ""
+        ...
+```
+
+Output will be:
+
+```markdown
+...
+
+some_function(param1, param2)
+
+:   documentation string of some_function.
+    foo bar baz.
+
+...
+```
+
+"""
+
+
+def main() -> None:
+    if len(sys.argv) != 2:
+        print(f"Usage: {sys.argv[0]} <path-to-test-driver>")
+        sys.exit(1)
+
+    module = ast.parse(Path(sys.argv[1]).read_text())
+
+    class_definitions = (node for node in module.body if isinstance(node, ast.ClassDef))
+
+    machine_class = next(filter(lambda x: x.name == "Machine", class_definitions))
+    assert machine_class is not None
+
+    function_definitions = [
+        node for node in machine_class.body if isinstance(node, ast.FunctionDef)
+    ]
+    function_definitions.sort(key=lambda x: x.name)
+
+    for function in function_definitions:
+        docstr = ast.get_docstring(function)
+        if docstr is not None:
+            args = ", ".join(a.arg for a in function.args.args[1:])
+            args = f"({args})"
+
+            docstr = "\n".join(f"    {line}" for line in docstr.strip().splitlines())
+
+            print(f"{function.name}{args}\n\n:{docstr[1:]}\n")
+
+
+if __name__ == "__main__":
+    main()
diff --git a/nixpkgs/nixos/lib/test-driver/nixos-test-driver-docstrings.nix b/nixpkgs/nixos/lib/test-driver/nixos-test-driver-docstrings.nix
new file mode 100644
index 000000000000..a3ef50e4e820
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/nixos-test-driver-docstrings.nix
@@ -0,0 +1,13 @@
+{ runCommand
+, python3
+}:
+
+let
+  env = { nativeBuildInputs = [ python3 ]; };
+in
+
+runCommand "nixos-test-driver-docstrings" env ''
+  mkdir $out
+  python3 ${./extract-docstrings.py} ${./test_driver/machine.py} \
+    > $out/machine-methods.md
+''
diff --git a/nixpkgs/nixos/lib/test-driver/pyproject.toml b/nixpkgs/nixos/lib/test-driver/pyproject.toml
new file mode 100644
index 000000000000..8638f14dfdae
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/pyproject.toml
@@ -0,0 +1,44 @@
+[build-system]
+requires = ["setuptools"]
+build-backend = "setuptools.build_meta"
+
+[project]
+name = "nixos-test-driver"
+version = "0.0.0"
+
+[project.scripts]
+nixos-test-driver = "test_driver:main"
+generate-driver-symbols = "test_driver:generate_driver_symbols"
+
+[tool.setuptools.packages]
+find = {}
+
+[tool.setuptools.package-data]
+test_driver = ["py.typed"]
+
+[tool.ruff]
+line-length = 88
+
+select = ["E", "F", "I", "U", "N"]
+ignore = ["E501"]
+
+# xxx: we can import https://pypi.org/project/types-colorama/ here
+[[tool.mypy.overrides]]
+module = "colorama.*"
+ignore_missing_imports = true
+
+[[tool.mypy.overrides]]
+module = "ptpython.*"
+ignore_missing_imports = true
+
+[tool.black]
+line-length = 88
+target-version = ['py39']
+include = '\.pyi?$'
+
+[tool.mypy]
+python_version = "3.10"
+warn_redundant_casts = true
+disallow_untyped_calls = true
+disallow_untyped_defs = true
+no_implicit_optional = true
diff --git a/nixpkgs/nixos/lib/test-driver/shell.nix b/nixpkgs/nixos/lib/test-driver/shell.nix
new file mode 100644
index 000000000000..367bbad556c0
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/shell.nix
@@ -0,0 +1,2 @@
+with import ../../.. {};
+pkgs.callPackage ./default.nix {}
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/__init__.py b/nixpkgs/nixos/lib/test-driver/test_driver/__init__.py
new file mode 100755
index 000000000000..9daae1e941a6
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/__init__.py
@@ -0,0 +1,140 @@
+import argparse
+import os
+import time
+from pathlib import Path
+
+import ptpython.repl
+
+from test_driver.driver import Driver
+from test_driver.logger import rootlog
+
+
+class EnvDefault(argparse.Action):
+    """An argpars Action that takes values from the specified
+    environment variable as the flags default value.
+    """
+
+    def __init__(self, envvar, required=False, default=None, nargs=None, **kwargs):  # type: ignore
+        if not default and envvar:
+            if envvar in os.environ:
+                if nargs is not None and (nargs.isdigit() or nargs in ["*", "+"]):
+                    default = os.environ[envvar].split()
+                else:
+                    default = os.environ[envvar]
+                kwargs["help"] = (
+                    kwargs["help"] + f" (default from environment: {default})"
+                )
+        if required and default:
+            required = False
+        super().__init__(default=default, required=required, nargs=nargs, **kwargs)
+
+    def __call__(self, parser, namespace, values, option_string=None):  # type: ignore
+        setattr(namespace, self.dest, values)
+
+
+def writeable_dir(arg: str) -> Path:
+    """Raises an ArgumentTypeError if the given argument isn't a writeable directory
+    Note: We want to fail as early as possible if a directory isn't writeable,
+    since an executed nixos-test could fail (very late) because of the test-driver
+    writing in a directory without proper permissions.
+    """
+    path = Path(arg)
+    if not path.is_dir():
+        raise argparse.ArgumentTypeError(f"{path} is not a directory")
+    if not os.access(path, os.W_OK):
+        raise argparse.ArgumentTypeError(f"{path} is not a writeable directory")
+    return path
+
+
+def main() -> None:
+    arg_parser = argparse.ArgumentParser(prog="nixos-test-driver")
+    arg_parser.add_argument(
+        "-K",
+        "--keep-vm-state",
+        help="re-use a VM state coming from a previous run",
+        action="store_true",
+    )
+    arg_parser.add_argument(
+        "-I",
+        "--interactive",
+        help="drop into a python repl and run the tests interactively",
+        action=argparse.BooleanOptionalAction,
+    )
+    arg_parser.add_argument(
+        "--start-scripts",
+        metavar="START-SCRIPT",
+        action=EnvDefault,
+        envvar="startScripts",
+        nargs="*",
+        help="start scripts for participating virtual machines",
+    )
+    arg_parser.add_argument(
+        "--vlans",
+        metavar="VLAN",
+        action=EnvDefault,
+        envvar="vlans",
+        nargs="*",
+        help="vlans to span by the driver",
+    )
+    arg_parser.add_argument(
+        "--global-timeout",
+        type=int,
+        metavar="GLOBAL_TIMEOUT",
+        action=EnvDefault,
+        envvar="globalTimeout",
+        help="Timeout in seconds for the whole test",
+    )
+    arg_parser.add_argument(
+        "-o",
+        "--output_directory",
+        help="""The path to the directory where outputs copied from the VM will be placed.
+                By e.g. Machine.copy_from_vm or Machine.screenshot""",
+        default=Path.cwd(),
+        type=writeable_dir,
+    )
+    arg_parser.add_argument(
+        "testscript",
+        action=EnvDefault,
+        envvar="testScript",
+        help="the test script to run",
+        type=Path,
+    )
+
+    args = arg_parser.parse_args()
+
+    if not args.keep_vm_state:
+        rootlog.info("Machine state will be reset. To keep it, pass --keep-vm-state")
+
+    with Driver(
+        args.start_scripts,
+        args.vlans,
+        args.testscript.read_text(),
+        args.output_directory.resolve(),
+        args.keep_vm_state,
+        args.global_timeout,
+    ) as driver:
+        if args.interactive:
+            history_dir = os.getcwd()
+            history_path = os.path.join(history_dir, ".nixos-test-history")
+            ptpython.repl.embed(
+                driver.test_symbols(),
+                {},
+                history_filename=history_path,
+            )
+        else:
+            tic = time.time()
+            driver.run_tests()
+            toc = time.time()
+            rootlog.info(f"test script finished in {(toc-tic):.2f}s")
+
+
+def generate_driver_symbols() -> None:
+    """
+    This generates a file with symbols of the test-driver code that can be used
+    in user's test scripts. That list is then used by pyflakes to lint those
+    scripts.
+    """
+    d = Driver([], [], "", Path())
+    test_symbols = d.test_symbols()
+    with open("driver-symbols", "w") as fp:
+        fp.write(",".join(test_symbols.keys()))
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/driver.py b/nixpkgs/nixos/lib/test-driver/test_driver/driver.py
new file mode 100644
index 000000000000..786821b0cc0d
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/driver.py
@@ -0,0 +1,260 @@
+import os
+import re
+import signal
+import tempfile
+import threading
+from contextlib import contextmanager
+from pathlib import Path
+from typing import Any, Callable, ContextManager, Dict, Iterator, List, Optional, Union
+
+from test_driver.logger import rootlog
+from test_driver.machine import Machine, NixStartScript, retry
+from test_driver.polling_condition import PollingCondition
+from test_driver.vlan import VLan
+
+
+def get_tmp_dir() -> Path:
+    """Returns a temporary directory that is defined by TMPDIR, TEMP, TMP or CWD
+    Raises an exception in case the retrieved temporary directory is not writeable
+    See https://docs.python.org/3/library/tempfile.html#tempfile.gettempdir
+    """
+    tmp_dir = Path(tempfile.gettempdir())
+    tmp_dir.mkdir(mode=0o700, exist_ok=True)
+    if not tmp_dir.is_dir():
+        raise NotADirectoryError(
+            f"The directory defined by TMPDIR, TEMP, TMP or CWD: {tmp_dir} is not a directory"
+        )
+    if not os.access(tmp_dir, os.W_OK):
+        raise PermissionError(
+            f"The directory defined by TMPDIR, TEMP, TMP, or CWD: {tmp_dir} is not writeable"
+        )
+    return tmp_dir
+
+
+def pythonize_name(name: str) -> str:
+    return re.sub(r"^[^A-z_]|[^A-z0-9_]", "_", name)
+
+
+class Driver:
+    """A handle to the driver that sets up the environment
+    and runs the tests"""
+
+    tests: str
+    vlans: List[VLan]
+    machines: List[Machine]
+    polling_conditions: List[PollingCondition]
+    global_timeout: int
+    race_timer: threading.Timer
+
+    def __init__(
+        self,
+        start_scripts: List[str],
+        vlans: List[int],
+        tests: str,
+        out_dir: Path,
+        keep_vm_state: bool = False,
+        global_timeout: int = 24 * 60 * 60 * 7,
+    ):
+        self.tests = tests
+        self.out_dir = out_dir
+        self.global_timeout = global_timeout
+        self.race_timer = threading.Timer(global_timeout, self.terminate_test)
+
+        tmp_dir = get_tmp_dir()
+
+        with rootlog.nested("start all VLans"):
+            vlans = list(set(vlans))
+            self.vlans = [VLan(nr, tmp_dir) for nr in vlans]
+
+        def cmd(scripts: List[str]) -> Iterator[NixStartScript]:
+            for s in scripts:
+                yield NixStartScript(s)
+
+        self.polling_conditions = []
+
+        self.machines = [
+            Machine(
+                start_command=cmd,
+                keep_vm_state=keep_vm_state,
+                name=cmd.machine_name,
+                tmp_dir=tmp_dir,
+                callbacks=[self.check_polling_conditions],
+                out_dir=self.out_dir,
+            )
+            for cmd in cmd(start_scripts)
+        ]
+
+    def __enter__(self) -> "Driver":
+        return self
+
+    def __exit__(self, *_: Any) -> None:
+        with rootlog.nested("cleanup"):
+            self.race_timer.cancel()
+            for machine in self.machines:
+                machine.release()
+
+    def subtest(self, name: str) -> Iterator[None]:
+        """Group logs under a given test name"""
+        with rootlog.nested("subtest: " + name):
+            try:
+                yield
+                return True
+            except Exception as e:
+                rootlog.error(f'Test "{name}" failed with error: "{e}"')
+                raise e
+
+    def test_symbols(self) -> Dict[str, Any]:
+        @contextmanager
+        def subtest(name: str) -> Iterator[None]:
+            return self.subtest(name)
+
+        general_symbols = dict(
+            start_all=self.start_all,
+            test_script=self.test_script,
+            machines=self.machines,
+            vlans=self.vlans,
+            driver=self,
+            log=rootlog,
+            os=os,
+            create_machine=self.create_machine,
+            subtest=subtest,
+            run_tests=self.run_tests,
+            join_all=self.join_all,
+            retry=retry,
+            serial_stdout_off=self.serial_stdout_off,
+            serial_stdout_on=self.serial_stdout_on,
+            polling_condition=self.polling_condition,
+            Machine=Machine,  # for typing
+        )
+        machine_symbols = {pythonize_name(m.name): m for m in self.machines}
+        # If there's exactly one machine, make it available under the name
+        # "machine", even if it's not called that.
+        if len(self.machines) == 1:
+            (machine_symbols["machine"],) = self.machines
+        vlan_symbols = {
+            f"vlan{v.nr}": self.vlans[idx] for idx, v in enumerate(self.vlans)
+        }
+        print(
+            "additionally exposed symbols:\n    "
+            + ", ".join(map(lambda m: m.name, self.machines))
+            + ",\n    "
+            + ", ".join(map(lambda v: f"vlan{v.nr}", self.vlans))
+            + ",\n    "
+            + ", ".join(list(general_symbols.keys()))
+        )
+        return {**general_symbols, **machine_symbols, **vlan_symbols}
+
+    def test_script(self) -> None:
+        """Run the test script"""
+        with rootlog.nested("run the VM test script"):
+            symbols = self.test_symbols()  # call eagerly
+            exec(self.tests, symbols, None)
+
+    def run_tests(self) -> None:
+        """Run the test script (for non-interactive test runs)"""
+        rootlog.info(
+            f"Test will time out and terminate in {self.global_timeout} seconds"
+        )
+        self.race_timer.start()
+        self.test_script()
+        # TODO: Collect coverage data
+        for machine in self.machines:
+            if machine.is_up():
+                machine.execute("sync")
+
+    def start_all(self) -> None:
+        """Start all machines"""
+        with rootlog.nested("start all VMs"):
+            for machine in self.machines:
+                machine.start()
+
+    def join_all(self) -> None:
+        """Wait for all machines to shut down"""
+        with rootlog.nested("wait for all VMs to finish"):
+            for machine in self.machines:
+                machine.wait_for_shutdown()
+            self.race_timer.cancel()
+
+    def terminate_test(self) -> None:
+        # This will be usually running in another thread than
+        # the thread actually executing the test script.
+        with rootlog.nested("timeout reached; test terminating..."):
+            for machine in self.machines:
+                machine.release()
+            # As we cannot `sys.exit` from another thread
+            # We can at least force the main thread to get SIGTERM'ed.
+            # This will prevent any user who caught all the exceptions
+            # to swallow them and prevent itself from terminating.
+            os.kill(os.getpid(), signal.SIGTERM)
+
+    def create_machine(self, args: Dict[str, Any]) -> Machine:
+        tmp_dir = get_tmp_dir()
+
+        if args.get("startCommand"):
+            start_command: str = args.get("startCommand", "")
+            cmd = NixStartScript(start_command)
+            name = args.get("name", cmd.machine_name)
+        else:
+            cmd = Machine.create_startcommand(args)  # type: ignore
+            name = args.get("name", "machine")
+
+        return Machine(
+            tmp_dir=tmp_dir,
+            out_dir=self.out_dir,
+            start_command=cmd,
+            name=name,
+            keep_vm_state=args.get("keep_vm_state", False),
+        )
+
+    def serial_stdout_on(self) -> None:
+        rootlog._print_serial_logs = True
+
+    def serial_stdout_off(self) -> None:
+        rootlog._print_serial_logs = False
+
+    def check_polling_conditions(self) -> None:
+        for condition in self.polling_conditions:
+            condition.maybe_raise()
+
+    def polling_condition(
+        self,
+        fun_: Optional[Callable] = None,
+        *,
+        seconds_interval: float = 2.0,
+        description: Optional[str] = None,
+    ) -> Union[Callable[[Callable], ContextManager], ContextManager]:
+        driver = self
+
+        class Poll:
+            def __init__(self, fun: Callable):
+                self.condition = PollingCondition(
+                    fun,
+                    seconds_interval,
+                    description,
+                )
+
+            def __enter__(self) -> None:
+                driver.polling_conditions.append(self.condition)
+
+            def __exit__(self, a, b, c) -> None:  # type: ignore
+                res = driver.polling_conditions.pop()
+                assert res is self.condition
+
+            def wait(self, timeout: int = 900) -> None:
+                def condition(last: bool) -> bool:
+                    if last:
+                        rootlog.info(f"Last chance for {self.condition.description}")
+                    ret = self.condition.check(force=True)
+                    if not ret and not last:
+                        rootlog.info(
+                            f"({self.condition.description} failure not fatal yet)"
+                        )
+                    return ret
+
+                with rootlog.nested(f"waiting for {self.condition.description}"):
+                    retry(condition, timeout=timeout)
+
+        if fun_ is None:
+            return Poll
+        else:
+            return Poll(fun_)
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/logger.py b/nixpkgs/nixos/lib/test-driver/test_driver/logger.py
new file mode 100644
index 000000000000..116244b5e4ae
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/logger.py
@@ -0,0 +1,107 @@
+# mypy: disable-error-code="no-untyped-call"
+# drop the above line when mypy is upgraded to include
+# https://github.com/python/typeshed/commit/49b717ca52bf0781a538b04c0d76a5513f7119b8
+import codecs
+import os
+import sys
+import time
+import unicodedata
+from contextlib import contextmanager
+from queue import Empty, Queue
+from typing import Any, Dict, Iterator
+from xml.sax.saxutils import XMLGenerator
+
+from colorama import Fore, Style
+
+
+class Logger:
+    def __init__(self) -> None:
+        self.logfile = os.environ.get("LOGFILE", "/dev/null")
+        self.logfile_handle = codecs.open(self.logfile, "wb")
+        self.xml = XMLGenerator(self.logfile_handle, encoding="utf-8")
+        self.queue: "Queue[Dict[str, str]]" = Queue()
+
+        self.xml.startDocument()
+        self.xml.startElement("logfile", attrs={})
+
+        self._print_serial_logs = True
+
+    @staticmethod
+    def _eprint(*args: object, **kwargs: Any) -> None:
+        print(*args, file=sys.stderr, **kwargs)
+
+    def close(self) -> None:
+        self.xml.endElement("logfile")
+        self.xml.endDocument()
+        self.logfile_handle.close()
+
+    def sanitise(self, message: str) -> str:
+        return "".join(ch for ch in message if unicodedata.category(ch)[0] != "C")
+
+    def maybe_prefix(self, message: str, attributes: Dict[str, str]) -> str:
+        if "machine" in attributes:
+            return f"{attributes['machine']}: {message}"
+        return message
+
+    def log_line(self, message: str, attributes: Dict[str, str]) -> None:
+        self.xml.startElement("line", attributes)
+        self.xml.characters(message)
+        self.xml.endElement("line")
+
+    def info(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+
+    def warning(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+
+    def error(self, *args, **kwargs) -> None:  # type: ignore
+        self.log(*args, **kwargs)
+        sys.exit(1)
+
+    def log(self, message: str, attributes: Dict[str, str] = {}) -> None:
+        self._eprint(self.maybe_prefix(message, attributes))
+        self.drain_log_queue()
+        self.log_line(message, attributes)
+
+    def log_serial(self, message: str, machine: str) -> None:
+        self.enqueue({"msg": message, "machine": machine, "type": "serial"})
+        if self._print_serial_logs:
+            self._eprint(Style.DIM + f"{machine} # {message}" + Style.RESET_ALL)
+
+    def enqueue(self, item: Dict[str, str]) -> None:
+        self.queue.put(item)
+
+    def drain_log_queue(self) -> None:
+        try:
+            while True:
+                item = self.queue.get_nowait()
+                msg = self.sanitise(item["msg"])
+                del item["msg"]
+                self.log_line(msg, item)
+        except Empty:
+            pass
+
+    @contextmanager
+    def nested(self, message: str, attributes: Dict[str, str] = {}) -> Iterator[None]:
+        self._eprint(
+            self.maybe_prefix(
+                Style.BRIGHT + Fore.GREEN + message + Style.RESET_ALL, attributes
+            )
+        )
+
+        self.xml.startElement("nest", attrs={})
+        self.xml.startElement("head", attributes)
+        self.xml.characters(message)
+        self.xml.endElement("head")
+
+        tic = time.time()
+        self.drain_log_queue()
+        yield
+        self.drain_log_queue()
+        toc = time.time()
+        self.log(f"(finished: {message}, in {toc - tic:.2f} seconds)")
+
+        self.xml.endElement("nest")
+
+
+rootlog = Logger()
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/machine.py b/nixpkgs/nixos/lib/test-driver/test_driver/machine.py
new file mode 100644
index 000000000000..f430321bb607
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/machine.py
@@ -0,0 +1,1296 @@
+import base64
+import io
+import os
+import queue
+import re
+import select
+import shlex
+import shutil
+import socket
+import subprocess
+import sys
+import tempfile
+import threading
+import time
+from contextlib import _GeneratorContextManager, nullcontext
+from pathlib import Path
+from queue import Queue
+from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple
+
+from test_driver.logger import rootlog
+
+from .qmp import QMPSession
+
+CHAR_TO_KEY = {
+    "A": "shift-a",
+    "N": "shift-n",
+    "-": "0x0C",
+    "_": "shift-0x0C",
+    "B": "shift-b",
+    "O": "shift-o",
+    "=": "0x0D",
+    "+": "shift-0x0D",
+    "C": "shift-c",
+    "P": "shift-p",
+    "[": "0x1A",
+    "{": "shift-0x1A",
+    "D": "shift-d",
+    "Q": "shift-q",
+    "]": "0x1B",
+    "}": "shift-0x1B",
+    "E": "shift-e",
+    "R": "shift-r",
+    ";": "0x27",
+    ":": "shift-0x27",
+    "F": "shift-f",
+    "S": "shift-s",
+    "'": "0x28",
+    '"': "shift-0x28",
+    "G": "shift-g",
+    "T": "shift-t",
+    "`": "0x29",
+    "~": "shift-0x29",
+    "H": "shift-h",
+    "U": "shift-u",
+    "\\": "0x2B",
+    "|": "shift-0x2B",
+    "I": "shift-i",
+    "V": "shift-v",
+    ",": "0x33",
+    "<": "shift-0x33",
+    "J": "shift-j",
+    "W": "shift-w",
+    ".": "0x34",
+    ">": "shift-0x34",
+    "K": "shift-k",
+    "X": "shift-x",
+    "/": "0x35",
+    "?": "shift-0x35",
+    "L": "shift-l",
+    "Y": "shift-y",
+    " ": "spc",
+    "M": "shift-m",
+    "Z": "shift-z",
+    "\n": "ret",
+    "!": "shift-0x02",
+    "@": "shift-0x03",
+    "#": "shift-0x04",
+    "$": "shift-0x05",
+    "%": "shift-0x06",
+    "^": "shift-0x07",
+    "&": "shift-0x08",
+    "*": "shift-0x09",
+    "(": "shift-0x0A",
+    ")": "shift-0x0B",
+}
+
+
+def make_command(args: list) -> str:
+    return " ".join(map(shlex.quote, (map(str, args))))
+
+
+def _perform_ocr_on_screenshot(
+    screenshot_path: str, model_ids: Iterable[int]
+) -> List[str]:
+    if shutil.which("tesseract") is None:
+        raise Exception("OCR requested but enableOCR is false")
+
+    magick_args = (
+        "-filter Catrom -density 72 -resample 300 "
+        + "-contrast -normalize -despeckle -type grayscale "
+        + "-sharpen 1 -posterize 3 -negate -gamma 100 "
+        + "-blur 1x65535"
+    )
+
+    tess_args = "-c debug_file=/dev/null --psm 11"
+
+    cmd = f"convert {magick_args} '{screenshot_path}' 'tiff:{screenshot_path}.tiff'"
+    ret = subprocess.run(cmd, shell=True, capture_output=True)
+    if ret.returncode != 0:
+        raise Exception(f"TIFF conversion failed with exit code {ret.returncode}")
+
+    model_results = []
+    for model_id in model_ids:
+        cmd = f"tesseract '{screenshot_path}.tiff' - {tess_args} --oem '{model_id}'"
+        ret = subprocess.run(cmd, shell=True, capture_output=True)
+        if ret.returncode != 0:
+            raise Exception(f"OCR failed with exit code {ret.returncode}")
+        model_results.append(ret.stdout.decode("utf-8"))
+
+    return model_results
+
+
+def retry(fn: Callable, timeout: int = 900) -> None:
+    """Call the given function repeatedly, with 1 second intervals,
+    until it returns True or a timeout is reached.
+    """
+
+    for _ in range(timeout):
+        if fn(False):
+            return
+        time.sleep(1)
+
+    if not fn(True):
+        raise Exception(f"action timed out after {timeout} seconds")
+
+
+class StartCommand:
+    """The Base Start Command knows how to append the necessary
+    runtime qemu options as determined by a particular test driver
+    run. Any such start command is expected to happily receive and
+    append additional qemu args.
+    """
+
+    _cmd: str
+
+    def cmd(
+        self,
+        monitor_socket_path: Path,
+        qmp_socket_path: Path,
+        shell_socket_path: Path,
+        allow_reboot: bool = False,
+    ) -> str:
+        display_opts = ""
+        display_available = any(x in os.environ for x in ["DISPLAY", "WAYLAND_DISPLAY"])
+        if not display_available:
+            display_opts += " -nographic"
+
+        # qemu options
+        qemu_opts = (
+            " -device virtio-serial"
+            # Note: virtconsole will map to /dev/hvc0 in Linux guests
+            " -device virtconsole,chardev=shell"
+            " -device virtio-rng-pci"
+            " -serial stdio"
+        )
+        if not allow_reboot:
+            qemu_opts += " -no-reboot"
+        # TODO: qemu script already catpures this env variable, legacy?
+        qemu_opts += " " + os.environ.get("QEMU_OPTS", "")
+
+        return (
+            f"{self._cmd}"
+            f" -qmp unix:{qmp_socket_path},server=on,wait=off"
+            f" -monitor unix:{monitor_socket_path}"
+            f" -chardev socket,id=shell,path={shell_socket_path}"
+            f"{qemu_opts}"
+            f"{display_opts}"
+        )
+
+    @staticmethod
+    def build_environment(
+        state_dir: Path,
+        shared_dir: Path,
+    ) -> dict:
+        # We make a copy to not update the current environment
+        env = dict(os.environ)
+        env.update(
+            {
+                "TMPDIR": str(state_dir),
+                "SHARED_DIR": str(shared_dir),
+                "USE_TMPDIR": "1",
+            }
+        )
+        return env
+
+    def run(
+        self,
+        state_dir: Path,
+        shared_dir: Path,
+        monitor_socket_path: Path,
+        qmp_socket_path: Path,
+        shell_socket_path: Path,
+        allow_reboot: bool,
+    ) -> subprocess.Popen:
+        return subprocess.Popen(
+            self.cmd(
+                monitor_socket_path, qmp_socket_path, shell_socket_path, allow_reboot
+            ),
+            stdin=subprocess.PIPE,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.STDOUT,
+            shell=True,
+            cwd=state_dir,
+            env=self.build_environment(state_dir, shared_dir),
+        )
+
+
+class NixStartScript(StartCommand):
+    """A start script from nixos/modules/virtualiation/qemu-vm.nix
+    that also satisfies the requirement of the BaseStartCommand.
+    These Nix commands have the particular characteristic that the
+    machine name can be extracted out of them via a regex match.
+    (Admittedly a _very_ implicit contract, evtl. TODO fix)
+    """
+
+    def __init__(self, script: str):
+        self._cmd = script
+
+    @property
+    def machine_name(self) -> str:
+        match = re.search("run-(.+)-vm$", self._cmd)
+        name = "machine"
+        if match:
+            name = match.group(1)
+        return name
+
+
+class LegacyStartCommand(StartCommand):
+    """Used in some places to create an ad-hoc machine instead of
+    using nix test instrumentation + module system for that purpose.
+    Legacy.
+    """
+
+    def __init__(
+        self,
+        netBackendArgs: Optional[str] = None,  # noqa: N803
+        netFrontendArgs: Optional[str] = None,  # noqa: N803
+        hda: Optional[Tuple[Path, str]] = None,
+        cdrom: Optional[str] = None,
+        usb: Optional[str] = None,
+        bios: Optional[str] = None,
+        qemuBinary: Optional[str] = None,  # noqa: N803
+        qemuFlags: Optional[str] = None,  # noqa: N803
+    ):
+        if qemuBinary is not None:
+            self._cmd = qemuBinary
+        else:
+            self._cmd = "qemu-kvm"
+
+        self._cmd += " -m 384"
+
+        # networking
+        net_backend = "-netdev user,id=net0"
+        net_frontend = "-device virtio-net-pci,netdev=net0"
+        if netBackendArgs is not None:
+            net_backend += "," + netBackendArgs
+        if netFrontendArgs is not None:
+            net_frontend += "," + netFrontendArgs
+        self._cmd += f" {net_backend} {net_frontend}"
+
+        # hda
+        hda_cmd = ""
+        if hda is not None:
+            hda_path = hda[0].resolve()
+            hda_interface = hda[1]
+            if hda_interface == "scsi":
+                hda_cmd += (
+                    f" -drive id=hda,file={hda_path},werror=report,if=none"
+                    " -device scsi-hd,drive=hda"
+                )
+            else:
+                hda_cmd += f" -drive file={hda_path},if={hda_interface},werror=report"
+        self._cmd += hda_cmd
+
+        # cdrom
+        if cdrom is not None:
+            self._cmd += f" -cdrom {cdrom}"
+
+        # usb
+        usb_cmd = ""
+        if usb is not None:
+            # https://github.com/qemu/qemu/blob/master/docs/usb2.txt
+            usb_cmd += (
+                " -device usb-ehci"
+                f" -drive id=usbdisk,file={usb},if=none,readonly"
+                " -device usb-storage,drive=usbdisk "
+            )
+        self._cmd += usb_cmd
+
+        # bios
+        if bios is not None:
+            self._cmd += f" -bios {bios}"
+
+        # qemu flags
+        if qemuFlags is not None:
+            self._cmd += f" {qemuFlags}"
+
+
+class Machine:
+    """A handle to the machine with this name, that also knows how to manage
+    the machine lifecycle with the help of a start script / command."""
+
+    name: str
+    out_dir: Path
+    tmp_dir: Path
+    shared_dir: Path
+    state_dir: Path
+    monitor_path: Path
+    qmp_path: Path
+    shell_path: Path
+
+    start_command: StartCommand
+    keep_vm_state: bool
+
+    process: Optional[subprocess.Popen]
+    pid: Optional[int]
+    monitor: Optional[socket.socket]
+    qmp_client: Optional[QMPSession]
+    shell: Optional[socket.socket]
+    serial_thread: Optional[threading.Thread]
+
+    booted: bool
+    connected: bool
+    # Store last serial console lines for use
+    # of wait_for_console_text
+    last_lines: Queue = Queue()
+    callbacks: List[Callable]
+
+    def __repr__(self) -> str:
+        return f"<Machine '{self.name}'>"
+
+    def __init__(
+        self,
+        out_dir: Path,
+        tmp_dir: Path,
+        start_command: StartCommand,
+        name: str = "machine",
+        keep_vm_state: bool = False,
+        callbacks: Optional[List[Callable]] = None,
+    ) -> None:
+        self.out_dir = out_dir
+        self.tmp_dir = tmp_dir
+        self.keep_vm_state = keep_vm_state
+        self.name = name
+        self.start_command = start_command
+        self.callbacks = callbacks if callbacks is not None else []
+
+        # set up directories
+        self.shared_dir = self.tmp_dir / "shared-xchg"
+        self.shared_dir.mkdir(mode=0o700, exist_ok=True)
+
+        self.state_dir = self.tmp_dir / f"vm-state-{self.name}"
+        self.monitor_path = self.state_dir / "monitor"
+        self.qmp_path = self.state_dir / "qmp"
+        self.shell_path = self.state_dir / "shell"
+        if (not self.keep_vm_state) and self.state_dir.exists():
+            self.cleanup_statedir()
+        self.state_dir.mkdir(mode=0o700, exist_ok=True)
+
+        self.process = None
+        self.pid = None
+        self.monitor = None
+        self.qmp_client = None
+        self.shell = None
+        self.serial_thread = None
+
+        self.booted = False
+        self.connected = False
+
+    @staticmethod
+    def create_startcommand(args: Dict[str, str]) -> StartCommand:
+        rootlog.warning(
+            "Using legacy create_startcommand(), "
+            "please use proper nix test vm instrumentation, instead "
+            "to generate the appropriate nixos test vm qemu startup script"
+        )
+        hda = None
+        if args.get("hda"):
+            hda_arg: str = args.get("hda", "")
+            hda_arg_path: Path = Path(hda_arg)
+            hda = (hda_arg_path, args.get("hdaInterface", ""))
+        return LegacyStartCommand(
+            netBackendArgs=args.get("netBackendArgs"),
+            netFrontendArgs=args.get("netFrontendArgs"),
+            hda=hda,
+            cdrom=args.get("cdrom"),
+            usb=args.get("usb"),
+            bios=args.get("bios"),
+            qemuBinary=args.get("qemuBinary"),
+            qemuFlags=args.get("qemuFlags"),
+        )
+
+    def is_up(self) -> bool:
+        return self.booted and self.connected
+
+    def log(self, msg: str) -> None:
+        rootlog.log(msg, {"machine": self.name})
+
+    def log_serial(self, msg: str) -> None:
+        rootlog.log_serial(msg, self.name)
+
+    def nested(self, msg: str, attrs: Dict[str, str] = {}) -> _GeneratorContextManager:
+        my_attrs = {"machine": self.name}
+        my_attrs.update(attrs)
+        return rootlog.nested(msg, my_attrs)
+
+    def wait_for_monitor_prompt(self) -> str:
+        assert self.monitor is not None
+        answer = ""
+        while True:
+            undecoded_answer = self.monitor.recv(1024)
+            if not undecoded_answer:
+                break
+            answer += undecoded_answer.decode()
+            if answer.endswith("(qemu) "):
+                break
+        return answer
+
+    def send_monitor_command(self, command: str) -> str:
+        """
+        Send a command to the QEMU monitor. This allows attaching
+        virtual USB disks to a running machine, among other things.
+        """
+        self.run_callbacks()
+        message = f"{command}\n".encode()
+        assert self.monitor is not None
+        self.monitor.send(message)
+        return self.wait_for_monitor_prompt()
+
+    def wait_for_unit(
+        self, unit: str, user: Optional[str] = None, timeout: int = 900
+    ) -> None:
+        """
+        Wait for a systemd unit to get into "active" state.
+        Throws exceptions on "failed" and "inactive" states as well as after
+        timing out.
+        """
+
+        def check_active(_: Any) -> bool:
+            info = self.get_unit_info(unit, user)
+            state = info["ActiveState"]
+            if state == "failed":
+                raise Exception(f'unit "{unit}" reached state "{state}"')
+
+            if state == "inactive":
+                status, jobs = self.systemctl("list-jobs --full 2>&1", user)
+                if "No jobs" in jobs:
+                    info = self.get_unit_info(unit, user)
+                    if info["ActiveState"] == state:
+                        raise Exception(
+                            f'unit "{unit}" is inactive and there are no pending jobs'
+                        )
+
+            return state == "active"
+
+        with self.nested(
+            f"waiting for unit {unit}"
+            + (f" with user {user}" if user is not None else "")
+        ):
+            retry(check_active, timeout)
+
+    def get_unit_info(self, unit: str, user: Optional[str] = None) -> Dict[str, str]:
+        status, lines = self.systemctl(f'--no-pager show "{unit}"', user)
+        if status != 0:
+            raise Exception(
+                f'retrieving systemctl info for unit "{unit}"'
+                + ("" if user is None else f' under user "{user}"')
+                + f" failed with exit code {status}"
+            )
+
+        line_pattern = re.compile(r"^([^=]+)=(.*)$")
+
+        def tuple_from_line(line: str) -> Tuple[str, str]:
+            match = line_pattern.match(line)
+            assert match is not None
+            return match[1], match[2]
+
+        return dict(
+            tuple_from_line(line)
+            for line in lines.split("\n")
+            if line_pattern.match(line)
+        )
+
+    def systemctl(self, q: str, user: Optional[str] = None) -> Tuple[int, str]:
+        """
+        Runs `systemctl` commands with optional support for
+        `systemctl --user`
+
+        ```py
+        # run `systemctl list-jobs --no-pager`
+        machine.systemctl("list-jobs --no-pager")
+
+        # spawn a shell for `any-user` and run
+        # `systemctl --user list-jobs --no-pager`
+        machine.systemctl("list-jobs --no-pager", "any-user")
+        ```
+        """
+        if user is not None:
+            q = q.replace("'", "\\'")
+            return self.execute(
+                f"su -l {user} --shell /bin/sh -c "
+                "$'XDG_RUNTIME_DIR=/run/user/`id -u` "
+                f"systemctl --user {q}'"
+            )
+        return self.execute(f"systemctl {q}")
+
+    def require_unit_state(self, unit: str, require_state: str = "active") -> None:
+        with self.nested(
+            f"checking if unit '{unit}' has reached state '{require_state}'"
+        ):
+            info = self.get_unit_info(unit)
+            state = info["ActiveState"]
+            if state != require_state:
+                raise Exception(
+                    f"Expected unit '{unit}' to to be in state "
+                    f"'{require_state}' but it is in state '{state}'"
+                )
+
+    def _next_newline_closed_block_from_shell(self) -> str:
+        assert self.shell
+        output_buffer = []
+        while True:
+            # This receives up to 4096 bytes from the socket
+            chunk = self.shell.recv(4096)
+            if not chunk:
+                # Probably a broken pipe, return the output we have
+                break
+
+            decoded = chunk.decode()
+            output_buffer += [decoded]
+            if decoded[-1] == "\n":
+                break
+        return "".join(output_buffer)
+
+    def execute(
+        self,
+        command: str,
+        check_return: bool = True,
+        check_output: bool = True,
+        timeout: Optional[int] = 900,
+    ) -> Tuple[int, str]:
+        """
+        Execute a shell command, returning a list `(status, stdout)`.
+
+        Commands are run with `set -euo pipefail` set:
+
+        -   If several commands are separated by `;` and one fails, the
+            command as a whole will fail.
+
+        -   For pipelines, the last non-zero exit status will be returned
+            (if there is one; otherwise zero will be returned).
+
+        -   Dereferencing unset variables fails the command.
+
+        -   It will wait for stdout to be closed.
+
+        If the command detaches, it must close stdout, as `execute` will wait
+        for this to consume all output reliably. This can be achieved by
+        redirecting stdout to stderr `>&2`, to `/dev/console`, `/dev/null` or
+        a file. Examples of detaching commands are `sleep 365d &`, where the
+        shell forks a new process that can write to stdout and `xclip -i`, where
+        the `xclip` command itself forks without closing stdout.
+
+        Takes an optional parameter `check_return` that defaults to `True`.
+        Setting this parameter to `False` will not check for the return code
+        and return -1 instead. This can be used for commands that shut down
+        the VM and would therefore break the pipe that would be used for
+        retrieving the return code.
+
+        A timeout for the command can be specified (in seconds) using the optional
+        `timeout` parameter, e.g., `execute(cmd, timeout=10)` or
+        `execute(cmd, timeout=None)`. The default is 900 seconds.
+        """
+        self.run_callbacks()
+        self.connect()
+
+        # Always run command with shell opts
+        command = f"set -euo pipefail; {command}"
+
+        timeout_str = ""
+        if timeout is not None:
+            timeout_str = f"timeout {timeout}"
+
+        # While sh is bash on NixOS, this is not the case for every distro.
+        # We explicitly call bash here to allow for the driver to boot other distros as well.
+        out_command = (
+            f"{timeout_str} bash -c {shlex.quote(command)} | (base64 -w 0; echo)\n"
+        )
+
+        assert self.shell
+        self.shell.send(out_command.encode())
+
+        if not check_output:
+            return (-2, "")
+
+        # Get the output
+        output = base64.b64decode(self._next_newline_closed_block_from_shell())
+
+        if not check_return:
+            return (-1, output.decode())
+
+        # Get the return code
+        self.shell.send(b"echo ${PIPESTATUS[0]}\n")
+        rc = int(self._next_newline_closed_block_from_shell().strip())
+
+        return (rc, output.decode(errors="replace"))
+
+    def shell_interact(self, address: Optional[str] = None) -> None:
+        """
+        Allows you to directly interact with the guest shell. This should
+        only be used during test development, not in production tests.
+        Killing the interactive session with `Ctrl-d` or `Ctrl-c` also ends
+        the guest session.
+        """
+        self.connect()
+
+        if address is None:
+            address = "READLINE,prompt=$ "
+            self.log("Terminal is ready (there is no initial prompt):")
+
+        assert self.shell
+        try:
+            subprocess.run(
+                ["socat", address, f"FD:{self.shell.fileno()}"],
+                pass_fds=[self.shell.fileno()],
+            )
+            # allow users to cancel this command without breaking the test
+        except KeyboardInterrupt:
+            pass
+
+    def console_interact(self) -> None:
+        """
+        Allows you to directly interact with QEMU's stdin, by forwarding
+        terminal input to the QEMU process.
+        This is for use with the interactive test driver, not for production
+        tests, which run unattended.
+        Output from QEMU is only read line-wise. `Ctrl-c` kills QEMU and
+        `Ctrl-d` closes console and returns to the test runner.
+        """
+        self.log("Terminal is ready (there is no prompt):")
+
+        assert self.process
+        assert self.process.stdin
+
+        while True:
+            try:
+                char = sys.stdin.buffer.read(1)
+            except KeyboardInterrupt:
+                break
+            if char == b"":  # ctrl+d
+                self.log("Closing connection to the console")
+                break
+            self.send_console(char.decode())
+
+    def succeed(self, *commands: str, timeout: Optional[int] = None) -> str:
+        """
+        Execute a shell command, raising an exception if the exit status is
+        not zero, otherwise returning the standard output. Similar to `execute`,
+        except that the timeout is `None` by default. See `execute` for details on
+        command execution.
+        """
+        output = ""
+        for command in commands:
+            with self.nested(f"must succeed: {command}"):
+                (status, out) = self.execute(command, timeout=timeout)
+                if status != 0:
+                    self.log(f"output: {out}")
+                    raise Exception(f"command `{command}` failed (exit code {status})")
+                output += out
+        return output
+
+    def fail(self, *commands: str, timeout: Optional[int] = None) -> str:
+        """
+        Like `succeed`, but raising an exception if the command returns a zero
+        status.
+        """
+        output = ""
+        for command in commands:
+            with self.nested(f"must fail: {command}"):
+                (status, out) = self.execute(command, timeout=timeout)
+                if status == 0:
+                    raise Exception(f"command `{command}` unexpectedly succeeded")
+                output += out
+        return output
+
+    def wait_until_succeeds(self, command: str, timeout: int = 900) -> str:
+        """
+        Repeat a shell command with 1-second intervals until it succeeds.
+        Has a default timeout of 900 seconds which can be modified, e.g.
+        `wait_until_succeeds(cmd, timeout=10)`. See `execute` for details on
+        command execution.
+        Throws an exception on timeout.
+        """
+        output = ""
+
+        def check_success(_: Any) -> bool:
+            nonlocal output
+            status, output = self.execute(command, timeout=timeout)
+            return status == 0
+
+        with self.nested(f"waiting for success: {command}"):
+            retry(check_success, timeout)
+            return output
+
+    def wait_until_fails(self, command: str, timeout: int = 900) -> str:
+        """
+        Like `wait_until_succeeds`, but repeating the command until it fails.
+        """
+        output = ""
+
+        def check_failure(_: Any) -> bool:
+            nonlocal output
+            status, output = self.execute(command, timeout=timeout)
+            return status != 0
+
+        with self.nested(f"waiting for failure: {command}"):
+            retry(check_failure, timeout)
+            return output
+
+    def wait_for_shutdown(self) -> None:
+        if not self.booted:
+            return
+
+        with self.nested("waiting for the VM to power off"):
+            sys.stdout.flush()
+            assert self.process
+            self.process.wait()
+
+            self.pid = None
+            self.booted = False
+            self.connected = False
+
+    def get_tty_text(self, tty: str) -> str:
+        status, output = self.execute(
+            f"fold -w$(stty -F /dev/tty{tty} size | "
+            f"awk '{{print $2}}') /dev/vcs{tty}"
+        )
+        return output
+
+    def wait_until_tty_matches(self, tty: str, regexp: str, timeout: int = 900) -> None:
+        """Wait until the visible output on the chosen TTY matches regular
+        expression. Throws an exception on timeout.
+        """
+        matcher = re.compile(regexp)
+
+        def tty_matches(last: bool) -> bool:
+            text = self.get_tty_text(tty)
+            if last:
+                self.log(
+                    f"Last chance to match /{regexp}/ on TTY{tty}, "
+                    f"which currently contains: {text}"
+                )
+            return len(matcher.findall(text)) > 0
+
+        with self.nested(f"waiting for {regexp} to appear on tty {tty}"):
+            retry(tty_matches, timeout)
+
+    def send_chars(self, chars: str, delay: Optional[float] = 0.01) -> None:
+        """
+        Simulate typing a sequence of characters on the virtual keyboard,
+        e.g., `send_chars("foobar\n")` will type the string `foobar`
+        followed by the Enter key.
+        """
+        with self.nested(f"sending keys {repr(chars)}"):
+            for char in chars:
+                self.send_key(char, delay, log=False)
+
+    def wait_for_file(self, filename: str, timeout: int = 900) -> None:
+        """
+        Waits until the file exists in the machine's file system.
+        """
+
+        def check_file(_: Any) -> bool:
+            status, _ = self.execute(f"test -e {filename}")
+            return status == 0
+
+        with self.nested(f"waiting for file '{filename}'"):
+            retry(check_file, timeout)
+
+    def wait_for_open_port(
+        self, port: int, addr: str = "localhost", timeout: int = 900
+    ) -> None:
+        """
+        Wait until a process is listening on the given TCP port and IP address
+        (default `localhost`).
+        """
+
+        def port_is_open(_: Any) -> bool:
+            status, _ = self.execute(f"nc -z {addr} {port}")
+            return status == 0
+
+        with self.nested(f"waiting for TCP port {port} on {addr}"):
+            retry(port_is_open, timeout)
+
+    def wait_for_open_unix_socket(
+        self, addr: str, is_datagram: bool = False, timeout: int = 900
+    ) -> None:
+        """
+        Wait until a process is listening on the given UNIX-domain socket
+        (default to a UNIX-domain stream socket).
+        """
+
+        nc_flags = [
+            "-z",
+            "-uU" if is_datagram else "-U",
+        ]
+
+        def socket_is_open(_: Any) -> bool:
+            status, _ = self.execute(f"nc {' '.join(nc_flags)} {addr}")
+            return status == 0
+
+        with self.nested(
+            f"waiting for UNIX-domain {'datagram' if is_datagram else 'stream'} on '{addr}'"
+        ):
+            retry(socket_is_open, timeout)
+
+    def wait_for_closed_port(
+        self, port: int, addr: str = "localhost", timeout: int = 900
+    ) -> None:
+        """
+        Wait until nobody is listening on the given TCP port and IP address
+        (default `localhost`).
+        """
+
+        def port_is_closed(_: Any) -> bool:
+            status, _ = self.execute(f"nc -z {addr} {port}")
+            return status != 0
+
+        with self.nested(f"waiting for TCP port {port} on {addr} to be closed"):
+            retry(port_is_closed, timeout)
+
+    def start_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+        return self.systemctl(f"start {jobname}", user)
+
+    def stop_job(self, jobname: str, user: Optional[str] = None) -> Tuple[int, str]:
+        return self.systemctl(f"stop {jobname}", user)
+
+    def wait_for_job(self, jobname: str) -> None:
+        self.wait_for_unit(jobname)
+
+    def connect(self) -> None:
+        def shell_ready(timeout_secs: int) -> bool:
+            """We sent some data from the backdoor service running on the guest
+            to indicate that the backdoor shell is ready.
+            As soon as we read some data from the socket here, we assume that
+            our root shell is operational.
+            """
+            (ready, _, _) = select.select([self.shell], [], [], timeout_secs)
+            return bool(ready)
+
+        if self.connected:
+            return
+
+        with self.nested("waiting for the VM to finish booting"):
+            self.start()
+
+            assert self.shell
+
+            tic = time.time()
+            # TODO: do we want to bail after a set number of attempts?
+            while not shell_ready(timeout_secs=30):
+                self.log("Guest root shell did not produce any data yet...")
+                self.log(
+                    "  To debug, enter the VM and run 'systemctl status backdoor.service'."
+                )
+
+            while True:
+                chunk = self.shell.recv(1024)
+                # No need to print empty strings, it means we are waiting.
+                if len(chunk) == 0:
+                    continue
+                self.log(f"Guest shell says: {chunk!r}")
+                # NOTE: for this to work, nothing must be printed after this line!
+                if b"Spawning backdoor root shell..." in chunk:
+                    break
+
+            toc = time.time()
+
+            self.log("connected to guest root shell")
+            self.log(f"(connecting took {toc - tic:.2f} seconds)")
+            self.connected = True
+
+    def screenshot(self, filename: str) -> None:
+        """
+        Take a picture of the display of the virtual machine, in PNG format.
+        The screenshot will be available in the derivation output.
+        """
+        if "." not in filename:
+            filename += ".png"
+        if "/" not in filename:
+            filename = os.path.join(self.out_dir, filename)
+        tmp = f"{filename}.ppm"
+
+        with self.nested(
+            f"making screenshot {filename}",
+            {"image": os.path.basename(filename)},
+        ):
+            self.send_monitor_command(f"screendump {tmp}")
+            ret = subprocess.run(f"pnmtopng '{tmp}' > '{filename}'", shell=True)
+            os.unlink(tmp)
+            if ret.returncode != 0:
+                raise Exception("Cannot convert screenshot")
+
+    def copy_from_host_via_shell(self, source: str, target: str) -> None:
+        """Copy a file from the host into the guest by piping it over the
+        shell into the destination file. Works without host-guest shared folder.
+        Prefer copy_from_host for whenever possible.
+        """
+        with open(source, "rb") as fh:
+            content_b64 = base64.b64encode(fh.read()).decode()
+            self.succeed(
+                f"mkdir -p $(dirname {target})",
+                f"echo -n {content_b64} | base64 -d > {target}",
+            )
+
+    def copy_from_host(self, source: str, target: str) -> None:
+        """
+        Copies a file from host to machine, e.g.,
+        `copy_from_host("myfile", "/etc/my/important/file")`.
+
+        The first argument is the file on the host. Note that the "host" refers
+        to the environment in which the test driver runs, which is typically the
+        Nix build sandbox.
+
+        The second argument is the location of the file on the machine that will
+        be written to.
+
+        The file is copied via the `shared_dir` directory which is shared among
+        all the VMs (using a temporary directory).
+        The access rights bits will mimic the ones from the host file and
+        user:group will be root:root.
+        """
+        host_src = Path(source)
+        vm_target = Path(target)
+        with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+            shared_temp = Path(shared_td)
+            host_intermediate = shared_temp / host_src.name
+            vm_shared_temp = Path("/tmp/shared") / shared_temp.name
+            vm_intermediate = vm_shared_temp / host_src.name
+
+            self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+            if host_src.is_dir():
+                shutil.copytree(host_src, host_intermediate)
+            else:
+                shutil.copy(host_src, host_intermediate)
+            self.succeed(make_command(["mkdir", "-p", vm_target.parent]))
+            self.succeed(make_command(["cp", "-r", vm_intermediate, vm_target]))
+
+    def copy_from_vm(self, source: str, target_dir: str = "") -> None:
+        """Copy a file from the VM (specified by an in-VM source path) to a path
+        relative to `$out`. The file is copied via the `shared_dir` shared among
+        all the VMs (using a temporary directory).
+        """
+        # Compute the source, target, and intermediate shared file names
+        vm_src = Path(source)
+        with tempfile.TemporaryDirectory(dir=self.shared_dir) as shared_td:
+            shared_temp = Path(shared_td)
+            vm_shared_temp = Path("/tmp/shared") / shared_temp.name
+            vm_intermediate = vm_shared_temp / vm_src.name
+            intermediate = shared_temp / vm_src.name
+            # Copy the file to the shared directory inside VM
+            self.succeed(make_command(["mkdir", "-p", vm_shared_temp]))
+            self.succeed(make_command(["cp", "-r", vm_src, vm_intermediate]))
+            abs_target = self.out_dir / target_dir / vm_src.name
+            abs_target.parent.mkdir(exist_ok=True, parents=True)
+            # Copy the file from the shared directory outside VM
+            if intermediate.is_dir():
+                shutil.copytree(intermediate, abs_target)
+            else:
+                shutil.copy(intermediate, abs_target)
+
+    def dump_tty_contents(self, tty: str) -> None:
+        """Debugging: Dump the contents of the TTY<n>"""
+        self.execute(f"fold -w 80 /dev/vcs{tty} | systemd-cat")
+
+    def _get_screen_text_variants(self, model_ids: Iterable[int]) -> List[str]:
+        with tempfile.TemporaryDirectory() as tmpdir:
+            screenshot_path = os.path.join(tmpdir, "ppm")
+            self.send_monitor_command(f"screendump {screenshot_path}")
+            return _perform_ocr_on_screenshot(screenshot_path, model_ids)
+
+    def get_screen_text_variants(self) -> List[str]:
+        """
+        Return a list of different interpretations of what is currently
+        visible on the machine's screen using optical character
+        recognition. The number and order of the interpretations is not
+        specified and is subject to change, but if no exception is raised at
+        least one will be returned.
+
+        ::: {.note}
+        This requires [`enableOCR`](#test-opt-enableOCR) to be set to `true`.
+        :::
+        """
+        return self._get_screen_text_variants([0, 1, 2])
+
+    def get_screen_text(self) -> str:
+        """
+        Return a textual representation of what is currently visible on the
+        machine's screen using optical character recognition.
+
+        ::: {.note}
+        This requires [`enableOCR`](#test-opt-enableOCR) to be set to `true`.
+        :::
+        """
+        return self._get_screen_text_variants([2])[0]
+
+    def wait_for_text(self, regex: str, timeout: int = 900) -> None:
+        """
+        Wait until the supplied regular expressions matches the textual
+        contents of the screen by using optical character recognition (see
+        `get_screen_text` and `get_screen_text_variants`).
+
+        ::: {.note}
+        This requires [`enableOCR`](#test-opt-enableOCR) to be set to `true`.
+        :::
+        """
+
+        def screen_matches(last: bool) -> bool:
+            variants = self.get_screen_text_variants()
+            for text in variants:
+                if re.search(regex, text) is not None:
+                    return True
+
+            if last:
+                self.log(f"Last OCR attempt failed. Text was: {variants}")
+
+            return False
+
+        with self.nested(f"waiting for {regex} to appear on screen"):
+            retry(screen_matches, timeout)
+
+    def wait_for_console_text(self, regex: str, timeout: int | None = None) -> None:
+        """
+        Wait until the supplied regular expressions match a line of the
+        serial console output.
+        This method is useful when OCR is not possible or inaccurate.
+        """
+        # Buffer the console output, this is needed
+        # to match multiline regexes.
+        console = io.StringIO()
+
+        def console_matches(_: Any) -> bool:
+            nonlocal console
+            try:
+                # This will return as soon as possible and
+                # sleep 1 second.
+                console.write(self.last_lines.get(block=False))
+            except queue.Empty:
+                pass
+            console.seek(0)
+            matches = re.search(regex, console.read())
+            return matches is not None
+
+        with self.nested(f"waiting for {regex} to appear on console"):
+            if timeout is not None:
+                retry(console_matches, timeout)
+            else:
+                while not console_matches(False):
+                    pass
+
+    def send_key(
+        self, key: str, delay: Optional[float] = 0.01, log: Optional[bool] = True
+    ) -> None:
+        """
+        Simulate pressing keys on the virtual keyboard, e.g.,
+        `send_key("ctrl-alt-delete")`.
+
+        Please also refer to the QEMU documentation for more information on the
+        input syntax: https://en.wikibooks.org/wiki/QEMU/Monitor#sendkey_keys
+        """
+        key = CHAR_TO_KEY.get(key, key)
+        context = self.nested(f"sending key {repr(key)}") if log else nullcontext()
+        with context:
+            self.send_monitor_command(f"sendkey {key}")
+            if delay is not None:
+                time.sleep(delay)
+
+    def send_console(self, chars: str) -> None:
+        r"""
+        Send keys to the kernel console. This allows interaction with the systemd
+        emergency mode, for example. Takes a string that is sent, e.g.,
+        `send_console("\n\nsystemctl default\n")`.
+        """
+        assert self.process
+        assert self.process.stdin
+        self.process.stdin.write(chars.encode())
+        self.process.stdin.flush()
+
+    def start(self, allow_reboot: bool = False) -> None:
+        """
+        Start the virtual machine. This method is asynchronous --- it does
+        not wait for the machine to finish booting.
+        """
+        if self.booted:
+            return
+
+        self.log("starting vm")
+
+        def clear(path: Path) -> Path:
+            if path.exists():
+                path.unlink()
+            return path
+
+        def create_socket(path: Path) -> socket.socket:
+            s = socket.socket(family=socket.AF_UNIX, type=socket.SOCK_STREAM)
+            s.bind(str(path))
+            s.listen(1)
+            return s
+
+        monitor_socket = create_socket(clear(self.monitor_path))
+        shell_socket = create_socket(clear(self.shell_path))
+        self.process = self.start_command.run(
+            self.state_dir,
+            self.shared_dir,
+            self.monitor_path,
+            self.qmp_path,
+            self.shell_path,
+            allow_reboot,
+        )
+        self.monitor, _ = monitor_socket.accept()
+        self.shell, _ = shell_socket.accept()
+        self.qmp_client = QMPSession.from_path(self.qmp_path)
+
+        # Store last serial console lines for use
+        # of wait_for_console_text
+        self.last_lines: Queue = Queue()
+
+        def process_serial_output() -> None:
+            assert self.process
+            assert self.process.stdout
+            for _line in self.process.stdout:
+                # Ignore undecodable bytes that may occur in boot menus
+                line = _line.decode(errors="ignore").replace("\r", "").rstrip()
+                self.last_lines.put(line)
+                self.log_serial(line)
+
+        self.serial_thread = threading.Thread(target=process_serial_output)
+        self.serial_thread.start()
+
+        self.wait_for_monitor_prompt()
+
+        self.pid = self.process.pid
+        self.booted = True
+
+        self.log(f"QEMU running (pid {self.pid})")
+
+    def cleanup_statedir(self) -> None:
+        shutil.rmtree(self.state_dir)
+        rootlog.log(f"deleting VM state directory {self.state_dir}")
+        rootlog.log("if you want to keep the VM state, pass --keep-vm-state")
+
+    def shutdown(self) -> None:
+        """
+        Shut down the machine, waiting for the VM to exit.
+        """
+        if not self.booted:
+            return
+
+        assert self.shell
+        self.shell.send(b"poweroff\n")
+        self.wait_for_shutdown()
+
+    def crash(self) -> None:
+        """
+        Simulate a sudden power failure, by telling the VM to exit immediately.
+        """
+        if not self.booted:
+            return
+
+        self.log("forced crash")
+        self.send_monitor_command("quit")
+        self.wait_for_shutdown()
+
+    def reboot(self) -> None:
+        """Press Ctrl+Alt+Delete in the guest.
+
+        Prepares the machine to be reconnected which is useful if the
+        machine was started with `allow_reboot = True`
+        """
+        self.send_key("ctrl-alt-delete")
+        self.connected = False
+
+    def wait_for_x(self, timeout: int = 900) -> None:
+        """
+        Wait until it is possible to connect to the X server.
+        """
+
+        def check_x(_: Any) -> bool:
+            cmd = (
+                "journalctl -b SYSLOG_IDENTIFIER=systemd | "
+                + 'grep "Reached target Current graphical"'
+            )
+            status, _ = self.execute(cmd)
+            if status != 0:
+                return False
+            status, _ = self.execute("[ -e /tmp/.X11-unix/X0 ]")
+            return status == 0
+
+        with self.nested("waiting for the X11 server"):
+            retry(check_x, timeout)
+
+    def get_window_names(self) -> List[str]:
+        return self.succeed(
+            r"xwininfo -root -tree | sed 's/.*0x[0-9a-f]* \"\([^\"]*\)\".*/\1/; t; d'"
+        ).splitlines()
+
+    def wait_for_window(self, regexp: str, timeout: int = 900) -> None:
+        """
+        Wait until an X11 window has appeared whose name matches the given
+        regular expression, e.g., `wait_for_window("Terminal")`.
+        """
+        pattern = re.compile(regexp)
+
+        def window_is_visible(last_try: bool) -> bool:
+            names = self.get_window_names()
+            if last_try:
+                self.log(
+                    f"Last chance to match {regexp} on the window list,"
+                    + " which currently contains: "
+                    + ", ".join(names)
+                )
+            return any(pattern.search(name) for name in names)
+
+        with self.nested("waiting for a window to appear"):
+            retry(window_is_visible, timeout)
+
+    def sleep(self, secs: int) -> None:
+        # We want to sleep in *guest* time, not *host* time.
+        self.succeed(f"sleep {secs}")
+
+    def forward_port(self, host_port: int = 8080, guest_port: int = 80) -> None:
+        """
+        Forward a TCP port on the host to a TCP port on the guest.
+        Useful during interactive testing.
+        """
+        self.send_monitor_command(f"hostfwd_add tcp::{host_port}-:{guest_port}")
+
+    def block(self) -> None:
+        """
+        Simulate unplugging the Ethernet cable that connects the machine to
+        the other machines.
+        This happens by shutting down eth1 (the multicast interface used to talk
+        to the other VMs). eth0 is kept online to still enable the test driver
+        to communicate with the machine.
+        """
+        self.send_monitor_command("set_link virtio-net-pci.1 off")
+
+    def unblock(self) -> None:
+        """
+        Undo the effect of `block`.
+        """
+        self.send_monitor_command("set_link virtio-net-pci.1 on")
+
+    def release(self) -> None:
+        if self.pid is None:
+            return
+        rootlog.info(f"kill machine (pid {self.pid})")
+        assert self.process
+        assert self.shell
+        assert self.monitor
+        assert self.serial_thread
+
+        self.process.terminate()
+        self.shell.close()
+        self.monitor.close()
+        self.serial_thread.join()
+
+    def run_callbacks(self) -> None:
+        for callback in self.callbacks:
+            callback()
+
+    def switch_root(self) -> None:
+        """
+        Transition from stage 1 to stage 2. This requires the
+        machine to be configured with `testing.initrdBackdoor = true`
+        and `boot.initrd.systemd.enable = true`.
+        """
+        self.wait_for_unit("initrd.target")
+        self.execute(
+            "systemctl isolate --no-block initrd-switch-root.target 2>/dev/null >/dev/null",
+            check_return=False,
+            check_output=False,
+        )
+        self.wait_for_console_text(r"systemd\[1\]:.*Switching root\.")
+        self.connected = False
+        self.connect()
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/polling_condition.py b/nixpkgs/nixos/lib/test-driver/test_driver/polling_condition.py
new file mode 100644
index 000000000000..12cbad69e34e
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/polling_condition.py
@@ -0,0 +1,92 @@
+import time
+from math import isfinite
+from typing import Callable, Optional
+
+from .logger import rootlog
+
+
+class PollingConditionError(Exception):
+    pass
+
+
+class PollingCondition:
+    condition: Callable[[], bool]
+    seconds_interval: float
+    description: Optional[str]
+
+    last_called: float
+    entry_count: int
+
+    def __init__(
+        self,
+        condition: Callable[[], Optional[bool]],
+        seconds_interval: float = 2.0,
+        description: Optional[str] = None,
+    ):
+        self.condition = condition  # type: ignore
+        self.seconds_interval = seconds_interval
+
+        if description is None:
+            if condition.__doc__:
+                self.description = condition.__doc__
+            else:
+                self.description = condition.__name__
+        else:
+            self.description = str(description)
+
+        self.last_called = float("-inf")
+        self.entry_count = 0
+
+    def check(self, force: bool = False) -> bool:
+        if (self.entered or not self.overdue) and not force:
+            return True
+
+        with self, rootlog.nested(self.nested_message):
+            time_since_last = time.monotonic() - self.last_called
+            last_message = (
+                f"Time since last: {time_since_last:.2f}s"
+                if isfinite(time_since_last)
+                else "(not called yet)"
+            )
+
+            rootlog.info(last_message)
+            try:
+                res = self.condition()  # type: ignore
+            except Exception:
+                res = False
+            res = res is None or res
+            rootlog.info(self.status_message(res))
+            return res
+
+    def maybe_raise(self) -> None:
+        if not self.check():
+            raise PollingConditionError(self.status_message(False))
+
+    def status_message(self, status: bool) -> str:
+        return f"Polling condition {'succeeded' if status else 'failed'}: {self.description}"
+
+    @property
+    def nested_message(self) -> str:
+        nested_message = ["Checking polling condition"]
+        if self.description is not None:
+            nested_message.append(repr(self.description))
+
+        return " ".join(nested_message)
+
+    @property
+    def overdue(self) -> bool:
+        return self.last_called + self.seconds_interval < time.monotonic()
+
+    @property
+    def entered(self) -> bool:
+        # entry_count should never dip *below* zero
+        assert self.entry_count >= 0
+        return self.entry_count > 0
+
+    def __enter__(self) -> None:
+        self.entry_count += 1
+
+    def __exit__(self, exc_type, exc_value, traceback) -> None:  # type: ignore
+        assert self.entered
+        self.entry_count -= 1
+        self.last_called = time.monotonic()
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/py.typed b/nixpkgs/nixos/lib/test-driver/test_driver/py.typed
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/py.typed
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/qmp.py b/nixpkgs/nixos/lib/test-driver/test_driver/qmp.py
new file mode 100644
index 000000000000..62ca6d7d5b80
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/qmp.py
@@ -0,0 +1,98 @@
+import json
+import logging
+import os
+import socket
+from collections.abc import Iterator
+from pathlib import Path
+from queue import Queue
+from typing import Any
+
+logger = logging.getLogger(__name__)
+
+
+class QMPAPIError(RuntimeError):
+    def __init__(self, message: dict[str, Any]):
+        assert "error" in message, "Not an error message!"
+        try:
+            self.class_name = message["class"]
+            self.description = message["desc"]
+            # NOTE: Some errors can occur before the Server is able to read the
+            # id member; in these cases the id member will not be part of the
+            # error response, even if provided by the client.
+            self.transaction_id = message.get("id")
+        except KeyError:
+            raise RuntimeError("Malformed QMP API error response")
+
+    def __str__(self) -> str:
+        return f"<QMP API error related to transaction {self.transaction_id} [{self.class_name}]: {self.description}>"
+
+
+class QMPSession:
+    def __init__(self, sock: socket.socket) -> None:
+        self.sock = sock
+        self.results: Queue[dict[str, str]] = Queue()
+        self.pending_events: Queue[dict[str, Any]] = Queue()
+        self.reader = sock.makefile("r")
+        self.writer = sock.makefile("w")
+        # Make the reader non-blocking so we can kind of select on it.
+        os.set_blocking(self.reader.fileno(), False)
+        hello = self._wait_for_new_result()
+        logger.debug(f"Got greeting from QMP API: {hello}")
+        # The greeting message format is:
+        # { "QMP": { "version": json-object, "capabilities": json-array } }
+        assert "QMP" in hello, f"Unexpected result: {hello}"
+        self.send("qmp_capabilities")
+
+    @classmethod
+    def from_path(cls, path: Path) -> "QMPSession":
+        sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+        sock.connect(str(path))
+        return cls(sock)
+
+    def __del__(self) -> None:
+        self.sock.close()
+
+    def _wait_for_new_result(self) -> dict[str, str]:
+        assert self.results.empty(), "Results set is not empty, missed results!"
+        while self.results.empty():
+            self.read_pending_messages()
+        return self.results.get()
+
+    def read_pending_messages(self) -> None:
+        line = self.reader.readline()
+        if not line:
+            return
+        evt_or_result = json.loads(line)
+        logger.debug(f"Received a message: {evt_or_result}")
+
+        # It's a result
+        if "return" in evt_or_result or "QMP" in evt_or_result:
+            self.results.put(evt_or_result)
+        # It's an event
+        elif "event" in evt_or_result:
+            self.pending_events.put(evt_or_result)
+        else:
+            raise QMPAPIError(evt_or_result)
+
+    def wait_for_event(self, timeout: int = 10) -> dict[str, Any]:
+        while self.pending_events.empty():
+            self.read_pending_messages()
+
+        return self.pending_events.get(timeout=timeout)
+
+    def events(self, timeout: int = 10) -> Iterator[dict[str, Any]]:
+        while not self.pending_events.empty():
+            yield self.pending_events.get(timeout=timeout)
+
+    def send(self, cmd: str, args: dict[str, str] = {}) -> dict[str, str]:
+        self.read_pending_messages()
+        assert self.results.empty(), "Results set is not empty, missed results!"
+        data: dict[str, Any] = dict(execute=cmd)
+        if args != {}:
+            data["arguments"] = args
+
+        logger.debug(f"Sending {data} to QMP...")
+        json.dump(data, self.writer)
+        self.writer.write("\n")
+        self.writer.flush()
+        return self._wait_for_new_result()
diff --git a/nixpkgs/nixos/lib/test-driver/test_driver/vlan.py b/nixpkgs/nixos/lib/test-driver/test_driver/vlan.py
new file mode 100644
index 000000000000..ec9679108e58
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-driver/test_driver/vlan.py
@@ -0,0 +1,62 @@
+import io
+import os
+import pty
+import subprocess
+from pathlib import Path
+
+from test_driver.logger import rootlog
+
+
+class VLan:
+    """This class handles a VLAN that the run-vm scripts identify via its
+    number handles. The network's lifetime equals the object's lifetime.
+    """
+
+    nr: int
+    socket_dir: Path
+
+    process: subprocess.Popen
+    pid: int
+    fd: io.TextIOBase
+
+    def __repr__(self) -> str:
+        return f"<Vlan Nr. {self.nr}>"
+
+    def __init__(self, nr: int, tmp_dir: Path):
+        self.nr = nr
+        self.socket_dir = tmp_dir / f"vde{self.nr}.ctl"
+
+        # TODO: don't side-effect environment here
+        os.environ[f"QEMU_VDE_SOCKET_{self.nr}"] = str(self.socket_dir)
+
+        rootlog.info("start vlan")
+        pty_master, pty_slave = pty.openpty()
+
+        # The --hub is required for the scenario determined by
+        # nixos/tests/networking.nix vlan-ping.
+        # VLAN Tagged traffic (802.1Q) seams to be blocked if a vde_switch is
+        # used without the hub mode (flood packets to all ports).
+        self.process = subprocess.Popen(
+            ["vde_switch", "-s", self.socket_dir, "--dirmode", "0700", "--hub"],
+            stdin=pty_slave,
+            stdout=subprocess.PIPE,
+            stderr=subprocess.PIPE,
+            shell=False,
+        )
+        self.pid = self.process.pid
+        self.fd = os.fdopen(pty_master, "w")
+        self.fd.write("version\n")
+
+        # TODO: perl version checks if this can be read from
+        # an if not, dies. we could hang here forever. Fix it.
+        assert self.process.stdout is not None
+        self.process.stdout.readline()
+        if not (self.socket_dir / "ctl").exists():
+            rootlog.error("cannot start vde_switch")
+
+        rootlog.info(f"running vlan (pid {self.pid}; ctl {self.socket_dir})")
+
+    def __del__(self) -> None:
+        rootlog.info(f"kill vlan (pid {self.pid})")
+        self.fd.close()
+        self.process.terminate()
diff --git a/nixpkgs/nixos/lib/test-script-prepend.py b/nixpkgs/nixos/lib/test-script-prepend.py
new file mode 100644
index 000000000000..15e59ce01047
--- /dev/null
+++ b/nixpkgs/nixos/lib/test-script-prepend.py
@@ -0,0 +1,42 @@
+# This file contains type hints that can be prepended to Nix test scripts so they can be type
+# checked.
+
+from test_driver.driver import Driver
+from test_driver.vlan import VLan
+from test_driver.machine import Machine
+from test_driver.logger import Logger
+from typing import Callable, Iterator, ContextManager, Optional, List, Dict, Any, Union
+from typing_extensions import Protocol
+from pathlib import Path
+
+
+class RetryProtocol(Protocol):
+    def __call__(self, fn: Callable, timeout: int = 900) -> None:
+        raise Exception("This is just type information for the Nix test driver")
+
+
+class PollingConditionProtocol(Protocol):
+    def __call__(
+        self,
+        fun_: Optional[Callable] = None,
+        *,
+        seconds_interval: float = 2.0,
+        description: Optional[str] = None,
+    ) -> Union[Callable[[Callable], ContextManager], ContextManager]:
+        raise Exception("This is just type information for the Nix test driver")
+
+
+start_all: Callable[[], None]
+subtest: Callable[[str], ContextManager[None]]
+retry: RetryProtocol
+test_script: Callable[[], None]
+machines: List[Machine]
+vlans: List[VLan]
+driver: Driver
+log: Logger
+create_machine: Callable[[Dict[str, Any]], Machine]
+run_tests: Callable[[], None]
+join_all: Callable[[], None]
+serial_stdout_off: Callable[[], None]
+serial_stdout_on: Callable[[], None]
+polling_condition: PollingConditionProtocol
diff --git a/nixpkgs/nixos/lib/testing-python.nix b/nixpkgs/nixos/lib/testing-python.nix
new file mode 100644
index 000000000000..f5222351518b
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing-python.nix
@@ -0,0 +1,78 @@
+args@
+{ system
+, pkgs ? import ../.. { inherit system config; }
+  # Use a minimal kernel?
+, minimal ? false
+  # Ignored
+, config ? { }
+  # !!! See comment about args in lib/modules.nix
+, specialArgs ? throw "legacy - do not use, see error below"
+  # Modules to add to each VM
+, extraConfigurations ? [ ]
+}:
+let
+  nixos-lib = import ./default.nix { inherit (pkgs) lib; };
+in
+
+pkgs.lib.throwIf (args?specialArgs) ''
+  testing-python.nix: `specialArgs` is not supported anymore. If you're looking
+  for the public interface to the NixOS test framework, use `runTest`, and
+  `node.specialArgs`.
+  See https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
+  and https://nixos.org/manual/nixos/unstable/index.html#test-opt-node.specialArgs
+''
+rec {
+
+  inherit pkgs;
+
+  evalTest = module: nixos-lib.evalTest { imports = [ extraTestModule module ]; };
+  runTest = module: nixos-lib.runTest { imports = [ extraTestModule module ]; };
+
+  extraTestModule = {
+    config = {
+      hostPkgs = pkgs;
+    };
+  };
+
+  # Make a full-blown test (legacy)
+  # For an official public interface to the tests, see
+  # https://nixos.org/manual/nixos/unstable/index.html#sec-calling-nixos-tests
+  makeTest =
+    { machine ? null
+    , nodes ? {}
+    , testScript
+    , enableOCR ? false
+    , globalTimeout ? (60 * 60)
+    , name ? "unnamed"
+    , skipTypeCheck ? false
+      # Skip linting (mainly intended for faster dev cycles)
+    , skipLint ? false
+    , passthru ? {}
+    , meta ? {}
+    , # For meta.position
+      pos ? # position used in error messages and for meta.position
+        (if meta.description or null != null
+          then builtins.unsafeGetAttrPos "description" meta
+          else builtins.unsafeGetAttrPos "testScript" t)
+    , extraPythonPackages ? (_ : [])
+    , interactive ? {}
+    } @ t: let
+    testConfig =
+      (evalTest {
+        imports = [
+          { _file = "makeTest parameters"; config = t; }
+          {
+            defaults = {
+              _file = "makeTest: extraConfigurations";
+              imports = extraConfigurations;
+            };
+          }
+        ];
+      }).config;
+    in
+      testConfig.test   # For nix-build
+        // testConfig;  # For all-tests.nix
+
+  simpleTest = as: (makeTest as).test;
+
+}
diff --git a/nixpkgs/nixos/lib/testing/call-test.nix b/nixpkgs/nixos/lib/testing/call-test.nix
new file mode 100644
index 000000000000..9abcea07455e
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/call-test.nix
@@ -0,0 +1,12 @@
+{ config, lib, ... }:
+let
+  inherit (lib) mkOption types;
+in
+{
+  options = {
+    result = mkOption {
+      internal = true;
+      default = config;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/default.nix b/nixpkgs/nixos/lib/testing/default.nix
new file mode 100644
index 000000000000..a89f734b1e64
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/default.nix
@@ -0,0 +1,27 @@
+{ lib }:
+let
+
+  evalTest = module: lib.evalModules {
+    modules = testModules ++ [ module ];
+    class = "nixosTest";
+  };
+  runTest = module: (evalTest ({ config, ... }: { imports = [ module ]; result = config.test; })).config.result;
+
+  testModules = [
+    ./call-test.nix
+    ./driver.nix
+    ./interactive.nix
+    ./legacy.nix
+    ./meta.nix
+    ./name.nix
+    ./network.nix
+    ./nodes.nix
+    ./pkgs.nix
+    ./run.nix
+    ./testScript.nix
+  ];
+
+in
+{
+  inherit evalTest runTest testModules;
+}
diff --git a/nixpkgs/nixos/lib/testing/driver.nix b/nixpkgs/nixos/lib/testing/driver.nix
new file mode 100644
index 000000000000..b6f01c38191d
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/driver.nix
@@ -0,0 +1,203 @@
+{ config, lib, hostPkgs, ... }:
+let
+  inherit (lib) mkOption types literalMD mdDoc;
+
+  # Reifies and correctly wraps the python test driver for
+  # the respective qemu version and with or without ocr support
+  testDriver = hostPkgs.callPackage ../test-driver {
+    inherit (config) enableOCR extraPythonPackages;
+    qemu_pkg = config.qemu.package;
+    imagemagick_light = hostPkgs.imagemagick_light.override { inherit (hostPkgs) libtiff; };
+    tesseract4 = hostPkgs.tesseract4.override { enableLanguages = [ "eng" ]; };
+  };
+
+
+  vlans = map (m: (
+    m.virtualisation.vlans ++
+    (lib.mapAttrsToList (_: v: v.vlan) m.virtualisation.interfaces))) (lib.attrValues config.nodes);
+  vms = map (m: m.system.build.vm) (lib.attrValues config.nodes);
+
+  nodeHostNames =
+    let
+      nodesList = map (c: c.system.name) (lib.attrValues config.nodes);
+    in
+    nodesList ++ lib.optional (lib.length nodesList == 1 && !lib.elem "machine" nodesList) "machine";
+
+  pythonizeName = name:
+    let
+      head = lib.substring 0 1 name;
+      tail = lib.substring 1 (-1) name;
+    in
+      (if builtins.match "[A-z_]" head == null then "_" else head) +
+      lib.stringAsChars (c: if builtins.match "[A-z0-9_]" c == null then "_" else c) tail;
+
+  uniqueVlans = lib.unique (builtins.concatLists vlans);
+  vlanNames = map (i: "vlan${toString i}: VLan;") uniqueVlans;
+  pythonizedNames = map pythonizeName nodeHostNames;
+  machineNames = map (name: "${name}: Machine;") pythonizedNames;
+
+  withChecks = lib.warnIf config.skipLint "Linting is disabled";
+
+  driver =
+    hostPkgs.runCommand "nixos-test-driver-${config.name}"
+      {
+        # inherit testName; TODO (roberth): need this?
+        nativeBuildInputs = [
+          hostPkgs.makeWrapper
+        ] ++ lib.optionals (!config.skipTypeCheck) [ hostPkgs.mypy ];
+        buildInputs = [ testDriver ];
+        testScript = config.testScriptString;
+        preferLocalBuild = true;
+        passthru = config.passthru;
+        meta = config.meta // {
+          mainProgram = "nixos-test-driver";
+        };
+      }
+      ''
+        mkdir -p $out/bin
+
+        vmStartScripts=($(for i in ${toString vms}; do echo $i/bin/run-*-vm; done))
+
+        ${lib.optionalString (!config.skipTypeCheck) ''
+          # prepend type hints so the test script can be type checked with mypy
+          cat "${../test-script-prepend.py}" >> testScriptWithTypes
+          echo "${builtins.toString machineNames}" >> testScriptWithTypes
+          echo "${builtins.toString vlanNames}" >> testScriptWithTypes
+          echo -n "$testScript" >> testScriptWithTypes
+
+          echo "Running type check (enable/disable: config.skipTypeCheck)"
+          echo "See https://nixos.org/manual/nixos/stable/#test-opt-skipTypeCheck"
+
+          mypy  --no-implicit-optional \
+                --pretty \
+                --no-color-output \
+                testScriptWithTypes
+        ''}
+
+        echo -n "$testScript" >> $out/test-script
+
+        ln -s ${testDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
+
+        ${testDriver}/bin/generate-driver-symbols
+        ${lib.optionalString (!config.skipLint) ''
+          echo "Linting test script (enable/disable: config.skipLint)"
+          echo "See https://nixos.org/manual/nixos/stable/#test-opt-skipLint"
+
+          PYFLAKES_BUILTINS="$(
+            echo -n ${lib.escapeShellArg (lib.concatStringsSep "," pythonizedNames)},
+            < ${lib.escapeShellArg "driver-symbols"}
+          )" ${hostPkgs.python3Packages.pyflakes}/bin/pyflakes $out/test-script
+        ''}
+
+        # set defaults through environment
+        # see: ./test-driver/test-driver.py argparse implementation
+        wrapProgram $out/bin/nixos-test-driver \
+          --set startScripts "''${vmStartScripts[*]}" \
+          --set testScript "$out/test-script" \
+          --set globalTimeout "${toString config.globalTimeout}" \
+          --set vlans '${toString vlans}' \
+          ${lib.escapeShellArgs (lib.concatMap (arg: ["--add-flags" arg]) config.extraDriverArgs)}
+      '';
+
+in
+{
+  options = {
+
+    driver = mkOption {
+      description = mdDoc "Package containing a script that runs the test.";
+      type = types.package;
+      defaultText = literalMD "set by the test framework";
+    };
+
+    hostPkgs = mkOption {
+      description = mdDoc "Nixpkgs attrset used outside the nodes.";
+      type = types.raw;
+      example = lib.literalExpression ''
+        import nixpkgs { inherit system config overlays; }
+      '';
+    };
+
+    qemu.package = mkOption {
+      description = mdDoc "Which qemu package to use for the virtualisation of [{option}`nodes`](#test-opt-nodes).";
+      type = types.package;
+      default = hostPkgs.qemu_test;
+      defaultText = "hostPkgs.qemu_test";
+    };
+
+    globalTimeout = mkOption {
+      description = mdDoc ''
+        A global timeout for the complete test, expressed in seconds.
+        Beyond that timeout, every resource will be killed and released and the test will fail.
+
+        By default, we use a 1 hour timeout.
+      '';
+      type = types.int;
+      default = 60 * 60;
+      example = 10 * 60;
+    };
+
+    enableOCR = mkOption {
+      description = mdDoc ''
+        Whether to enable Optical Character Recognition functionality for
+        testing graphical programs. See [Machine objects](`ssec-machine-objects`).
+      '';
+      type = types.bool;
+      default = false;
+    };
+
+    extraPythonPackages = mkOption {
+      description = mdDoc ''
+        Python packages to add to the test driver.
+
+        The argument is a Python package set, similar to `pkgs.pythonPackages`.
+      '';
+      example = lib.literalExpression ''
+        p: [ p.numpy ]
+      '';
+      type = types.functionTo (types.listOf types.package);
+      default = ps: [ ];
+    };
+
+    extraDriverArgs = mkOption {
+      description = mdDoc ''
+        Extra arguments to pass to the test driver.
+
+        They become part of [{option}`driver`](#test-opt-driver) via `wrapProgram`.
+      '';
+      type = types.listOf types.str;
+      default = [];
+    };
+
+    skipLint = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Do not run the linters. This may speed up your iteration cycle, but it is not something you should commit.
+      '';
+    };
+
+    skipTypeCheck = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Disable type checking. This must not be enabled for new NixOS tests.
+
+        This may speed up your iteration cycle, unless you're working on the [{option}`testScript`](#test-opt-testScript).
+      '';
+    };
+  };
+
+  config = {
+    _module.args = {
+      hostPkgs =
+        # Comment is in nixos/modules/misc/nixpkgs.nix
+        lib.mkOverride lib.modules.defaultOverridePriority
+          config.hostPkgs.__splicedPackages;
+    };
+
+    driver = withChecks driver;
+
+    # make available on the test runner
+    passthru.driver = config.driver;
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/interactive.nix b/nixpkgs/nixos/lib/testing/interactive.nix
new file mode 100644
index 000000000000..317ed4241882
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/interactive.nix
@@ -0,0 +1,45 @@
+{ config, lib, moduleType, hostPkgs, ... }:
+let
+  inherit (lib) mkOption types mdDoc;
+in
+{
+  options = {
+    interactive = mkOption {
+      description = mdDoc ''
+        Tests [can be run interactively](#sec-running-nixos-tests-interactively)
+        using the program in the test derivation's `.driverInteractive` attribute.
+
+        When they are, the configuration will include anything set in this submodule.
+
+        You can set any top-level test option here.
+
+        Example test module:
+
+        ```nix
+        { config, lib, ... }: {
+
+          nodes.rabbitmq = {
+            services.rabbitmq.enable = true;
+          };
+
+          # When running interactively ...
+          interactive.nodes.rabbitmq = {
+            # ... enable the web ui.
+            services.rabbitmq.managementPlugin.enable = true;
+          };
+        }
+        ```
+
+        For details, see the section about [running tests interactively](#sec-running-nixos-tests-interactively).
+      '';
+      type = moduleType;
+      visible = "shallow";
+    };
+  };
+
+  config = {
+    interactive.qemu.package = hostPkgs.qemu;
+    interactive.extraDriverArgs = [ "--interactive" ];
+    passthru.driverInteractive = config.interactive.driver;
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/legacy.nix b/nixpkgs/nixos/lib/testing/legacy.nix
new file mode 100644
index 000000000000..b31057556601
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/legacy.nix
@@ -0,0 +1,26 @@
+{ config, options, lib, ... }:
+let
+  inherit (lib) mkIf mkOption types;
+in
+{
+  # This needs options.warnings and options.assertions, which we don't have (yet?).
+  # imports = [
+  #   (lib.mkRenamedOptionModule [ "machine" ] [ "nodes" "machine" ])
+  #   (lib.mkRemovedOptionModule [ "minimal" ] "The minimal kernel module was removed as it was broken and not used any more in nixpkgs.")
+  # ];
+
+  options = {
+    machine = mkOption {
+      internal = true;
+      type = types.raw;
+    };
+  };
+
+  config = {
+    nodes = mkIf options.machine.isDefined (
+      lib.warn
+        "In test `${config.name}': The `machine' attribute in NixOS tests (pkgs.nixosTest / make-test-python.nix / testing-python.nix / makeTest) is deprecated. Please set the equivalent `nodes.machine'."
+        { inherit (config) machine; }
+    );
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/meta.nix b/nixpkgs/nixos/lib/testing/meta.nix
new file mode 100644
index 000000000000..805b7520edff
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/meta.nix
@@ -0,0 +1,42 @@
+{ lib, ... }:
+let
+  inherit (lib) types mkOption mdDoc;
+in
+{
+  options = {
+    meta = lib.mkOption {
+      description = mdDoc ''
+        The [`meta`](https://nixos.org/manual/nixpkgs/stable/#chap-meta) attributes that will be set on the returned derivations.
+
+        Not all [`meta`](https://nixos.org/manual/nixpkgs/stable/#chap-meta) attributes are supported, but more can be added as desired.
+      '';
+      apply = lib.filterAttrs (k: v: v != null);
+      type = types.submodule {
+        options = {
+          maintainers = lib.mkOption {
+            type = types.listOf types.raw;
+            default = [];
+            description = mdDoc ''
+              The [list of maintainers](https://nixos.org/manual/nixpkgs/stable/#var-meta-maintainers) for this test.
+            '';
+          };
+          timeout = lib.mkOption {
+            type = types.nullOr types.int;
+            default = 3600;  # 1 hour
+            description = mdDoc ''
+              The [{option}`test`](#test-opt-test)'s [`meta.timeout`](https://nixos.org/manual/nixpkgs/stable/#var-meta-timeout) in seconds.
+            '';
+          };
+          broken = lib.mkOption {
+            type = types.bool;
+            default = false;
+            description = mdDoc ''
+              Sets the [`meta.broken`](https://nixos.org/manual/nixpkgs/stable/#var-meta-broken) attribute on the [{option}`test`](#test-opt-test) derivation.
+            '';
+          };
+        };
+      };
+      default = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/name.nix b/nixpkgs/nixos/lib/testing/name.nix
new file mode 100644
index 000000000000..0af593169eec
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/name.nix
@@ -0,0 +1,14 @@
+{ lib, ... }:
+let
+  inherit (lib) mkOption types mdDoc;
+in
+{
+  options.name = mkOption {
+    description = mdDoc ''
+      The name of the test.
+
+      This is used in the derivation names of the [{option}`driver`](#test-opt-driver) and [{option}`test`](#test-opt-test) runner.
+    '';
+    type = types.str;
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/network.nix b/nixpkgs/nixos/lib/testing/network.nix
new file mode 100644
index 000000000000..1edc9e276530
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/network.nix
@@ -0,0 +1,131 @@
+{ lib, nodes, ... }:
+
+let
+  inherit (lib)
+    attrNames concatMap concatMapStrings flip forEach head
+    listToAttrs mkDefault mkOption nameValuePair optionalString
+    range toLower types zipListsWith zipLists
+    mdDoc
+    ;
+
+  nodeNumbers =
+    listToAttrs
+      (zipListsWith
+        nameValuePair
+        (attrNames nodes)
+        (range 1 254)
+      );
+
+  networkModule = { config, nodes, pkgs, ... }:
+    let
+      qemu-common = import ../qemu-common.nix { inherit lib pkgs; };
+
+      # Convert legacy VLANs to named interfaces and merge with explicit interfaces.
+      vlansNumbered = forEach (zipLists config.virtualisation.vlans (range 1 255)) (v: {
+        name = "eth${toString v.snd}";
+        vlan = v.fst;
+        assignIP = true;
+      });
+      explicitInterfaces = lib.mapAttrsToList (n: v: v // { name = n; }) config.virtualisation.interfaces;
+      interfaces = vlansNumbered ++ explicitInterfaces;
+      interfacesNumbered = zipLists interfaces (range 1 255);
+
+      # Automatically assign IP addresses to requested interfaces.
+      assignIPs = lib.filter (i: i.assignIP) interfaces;
+      ipInterfaces = forEach assignIPs (i:
+        nameValuePair i.name { ipv4.addresses =
+          [ { address = "192.168.${toString i.vlan}.${toString config.virtualisation.test.nodeNumber}";
+              prefixLength = 24;
+            }];
+        });
+
+      qemuOptions = lib.flatten (forEach interfacesNumbered ({ fst, snd }:
+        qemu-common.qemuNICFlags snd fst.vlan config.virtualisation.test.nodeNumber));
+      udevRules = forEach interfacesNumbered ({ fst, snd }:
+        # MAC Addresses for QEMU network devices are lowercase, and udev string comparison is case-sensitive.
+        ''SUBSYSTEM=="net",ACTION=="add",ATTR{address}=="${toLower(qemu-common.qemuNicMac fst.vlan config.virtualisation.test.nodeNumber)}",NAME="${fst.name}"'');
+
+      networkConfig =
+        {
+          networking.hostName = mkDefault config.virtualisation.test.nodeName;
+
+          networking.interfaces = listToAttrs ipInterfaces;
+
+          networking.primaryIPAddress =
+            optionalString (ipInterfaces != [ ]) (head (head ipInterfaces).value.ipv4.addresses).address;
+
+          # Put the IP addresses of all VMs in this machine's
+          # /etc/hosts file.  If a machine has multiple
+          # interfaces, use the IP address corresponding to
+          # the first interface (i.e. the first network in its
+          # virtualisation.vlans option).
+          networking.extraHosts = flip concatMapStrings (attrNames nodes)
+            (m':
+              let config = nodes.${m'}; in
+              optionalString (config.networking.primaryIPAddress != "")
+                ("${config.networking.primaryIPAddress} " +
+                  optionalString (config.networking.domain != null)
+                    "${config.networking.hostName}.${config.networking.domain} " +
+                  "${config.networking.hostName}\n"));
+
+          virtualisation.qemu.options = qemuOptions;
+          boot.initrd.services.udev.rules = concatMapStrings (x: x + "\n") udevRules;
+        };
+
+    in
+    {
+      key = "network-interfaces";
+      config = networkConfig // {
+        # Expose the networkConfig items for tests like nixops
+        # that need to recreate the network config.
+        system.build.networkConfig = networkConfig;
+      };
+    };
+
+  nodeNumberModule = (regular@{ config, name, ... }: {
+    options = {
+      virtualisation.test.nodeName = mkOption {
+        internal = true;
+        default = name;
+        # We need to force this in specilisations, otherwise it'd be
+        # readOnly = true;
+        description = mdDoc ''
+          The `name` in `nodes.<name>`; stable across `specialisations`.
+        '';
+      };
+      virtualisation.test.nodeNumber = mkOption {
+        internal = true;
+        type = types.int;
+        readOnly = true;
+        default = nodeNumbers.${config.virtualisation.test.nodeName};
+        description = mdDoc ''
+          A unique number assigned for each node in `nodes`.
+        '';
+      };
+
+      # specialisations override the `name` module argument,
+      # so we push the real `virtualisation.test.nodeName`.
+      specialisation = mkOption {
+        type = types.attrsOf (types.submodule {
+          options.configuration = mkOption {
+            type = types.submoduleWith {
+              modules = [
+                {
+                  config.virtualisation.test.nodeName =
+                    # assert regular.config.virtualisation.test.nodeName != "configuration";
+                    regular.config.virtualisation.test.nodeName;
+                }
+              ];
+            };
+          };
+        });
+      };
+    };
+  });
+
+in
+{
+  config = {
+    extraBaseModules = { imports = [ networkModule nodeNumberModule ]; };
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/nixos-test-base.nix b/nixpkgs/nixos/lib/testing/nixos-test-base.nix
new file mode 100644
index 000000000000..59e6e3843367
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/nixos-test-base.nix
@@ -0,0 +1,23 @@
+# A module containing the base imports and overrides that
+# are always applied in NixOS VM tests, unconditionally,
+# even in `inheritParentConfig = false` specialisations.
+{ lib, ... }:
+let
+  inherit (lib) mkForce;
+in
+{
+  imports = [
+    ../../modules/virtualisation/qemu-vm.nix
+    ../../modules/testing/test-instrumentation.nix # !!! should only get added for automated test runs
+    { key = "no-manual"; documentation.nixos.enable = false; }
+    {
+      key = "no-revision";
+      # Make the revision metadata constant, in order to avoid needless retesting.
+      # The human version (e.g. 21.05-pre) is left as is, because it is useful
+      # for external modules that test with e.g. testers.nixosTest and rely on that
+      # version number.
+      config.system.nixos.revision = mkForce "constant-nixos-revision";
+    }
+
+  ];
+}
diff --git a/nixpkgs/nixos/lib/testing/nodes.nix b/nixpkgs/nixos/lib/testing/nodes.nix
new file mode 100644
index 000000000000..73e6d386fd1d
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/nodes.nix
@@ -0,0 +1,149 @@
+testModuleArgs@{ config, lib, hostPkgs, nodes, ... }:
+
+let
+  inherit (lib)
+    literalExpression
+    literalMD
+    mapAttrs
+    mdDoc
+    mkDefault
+    mkIf
+    mkOption mkForce
+    optional
+    optionalAttrs
+    types
+    ;
+
+  baseOS =
+    import ../eval-config.nix {
+      inherit lib;
+      system = null; # use modularly defined system
+      inherit (config.node) specialArgs;
+      modules = [ config.defaults ];
+      baseModules = (import ../../modules/module-list.nix) ++
+        [
+          ./nixos-test-base.nix
+          { key = "nodes"; _module.args.nodes = config.nodesCompat; }
+          ({ config, ... }:
+            {
+              virtualisation.qemu.package = testModuleArgs.config.qemu.package;
+            })
+          ({ options, ... }: {
+            key = "nodes.nix-pkgs";
+            config = optionalAttrs (!config.node.pkgsReadOnly) (
+              mkIf (!options.nixpkgs.pkgs.isDefined) {
+                # TODO: switch to nixpkgs.hostPlatform and make sure containers-imperative test still evaluates.
+                nixpkgs.system = hostPkgs.stdenv.hostPlatform.system;
+              }
+            );
+          })
+          testModuleArgs.config.extraBaseModules
+        ];
+    };
+
+
+in
+
+{
+
+  options = {
+    node.type = mkOption {
+      type = types.raw;
+      default = baseOS.type;
+      internal = true;
+    };
+
+    nodes = mkOption {
+      type = types.lazyAttrsOf config.node.type;
+      visible = "shallow";
+      description = mdDoc ''
+        An attribute set of NixOS configuration modules.
+
+        The configurations are augmented by the [`defaults`](#test-opt-defaults) option.
+
+        They are assigned network addresses according to the `nixos/lib/testing/network.nix` module.
+
+        A few special options are available, that aren't in a plain NixOS configuration. See [Configuring the nodes](#sec-nixos-test-nodes)
+      '';
+    };
+
+    defaults = mkOption {
+      description = mdDoc ''
+        NixOS configuration that is applied to all [{option}`nodes`](#test-opt-nodes).
+      '';
+      type = types.deferredModule;
+      default = { };
+    };
+
+    extraBaseModules = mkOption {
+      description = mdDoc ''
+        NixOS configuration that, like [{option}`defaults`](#test-opt-defaults), is applied to all [{option}`nodes`](#test-opt-nodes) and can not be undone with [`specialisation.<name>.inheritParentConfig`](https://search.nixos.org/options?show=specialisation.%3Cname%3E.inheritParentConfig&from=0&size=50&sort=relevance&type=packages&query=specialisation).
+      '';
+      type = types.deferredModule;
+      default = { };
+    };
+
+    node.pkgs = mkOption {
+      description = mdDoc ''
+        The Nixpkgs to use for the nodes.
+
+        Setting this will make the `nixpkgs.*` options read-only, to avoid mistakenly testing with a Nixpkgs configuration that diverges from regular use.
+      '';
+      type = types.nullOr types.pkgs;
+      default = null;
+      defaultText = literalMD ''
+        `null`, so construct `pkgs` according to the `nixpkgs.*` options as usual.
+      '';
+    };
+
+    node.pkgsReadOnly = mkOption {
+      description = mdDoc ''
+        Whether to make the `nixpkgs.*` options read-only. This is only relevant when [`node.pkgs`](#test-opt-node.pkgs) is set.
+
+        Set this to `false` when any of the [`nodes`](#test-opt-nodes) needs to configure any of the `nixpkgs.*` options. This will slow down evaluation of your test a bit.
+      '';
+      type = types.bool;
+      default = config.node.pkgs != null;
+      defaultText = literalExpression ''node.pkgs != null'';
+    };
+
+    node.specialArgs = mkOption {
+      type = types.lazyAttrsOf types.raw;
+      default = { };
+      description = mdDoc ''
+        An attribute set of arbitrary values that will be made available as module arguments during the resolution of module `imports`.
+
+        Note that it is not possible to override these from within the NixOS configurations. If you argument is not relevant to `imports`, consider setting {option}`defaults._module.args.<name>` instead.
+      '';
+    };
+
+    nodesCompat = mkOption {
+      internal = true;
+      description = mdDoc ''
+        Basically `_module.args.nodes`, but with backcompat and warnings added.
+
+        This will go away.
+      '';
+    };
+  };
+
+  config = {
+    _module.args.nodes = config.nodesCompat;
+    nodesCompat =
+      mapAttrs
+        (name: config: config // {
+          config = lib.warnIf (lib.isInOldestRelease 2211)
+            "Module argument `nodes.${name}.config` is deprecated. Use `nodes.${name}` instead."
+            config;
+        })
+        config.nodes;
+
+    passthru.nodes = config.nodesCompat;
+
+    defaults = mkIf config.node.pkgsReadOnly {
+      nixpkgs.pkgs = config.node.pkgs;
+      imports = [ ../../modules/misc/nixpkgs/read-only.nix ];
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/pkgs.nix b/nixpkgs/nixos/lib/testing/pkgs.nix
new file mode 100644
index 000000000000..22dd586868e3
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/pkgs.nix
@@ -0,0 +1,11 @@
+{ config, lib, hostPkgs, ... }:
+{
+  config = {
+    # default pkgs for use in VMs
+    _module.args.pkgs = hostPkgs;
+
+    defaults = {
+      # TODO: a module to set a shared pkgs, if options.nixpkgs.* is untouched by user (highestPrio) */
+    };
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/run.nix b/nixpkgs/nixos/lib/testing/run.nix
new file mode 100644
index 000000000000..9440c1acdfd8
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/run.nix
@@ -0,0 +1,67 @@
+{ config, hostPkgs, lib, ... }:
+let
+  inherit (lib) types mkOption mdDoc;
+in
+{
+  options = {
+    passthru = mkOption {
+      type = types.lazyAttrsOf types.raw;
+      description = mdDoc ''
+        Attributes to add to the returned derivations,
+        which are not necessarily part of the build.
+
+        This is a bit like doing `drv // { myAttr = true; }` (which would be lost by `overrideAttrs`).
+        It does not change the actual derivation, but adds the attribute nonetheless, so that
+        consumers of what would be `drv` have more information.
+      '';
+    };
+
+    rawTestDerivation = mkOption {
+      type = types.package;
+      description = mdDoc ''
+        Unfiltered version of `test`, for troubleshooting the test framework and `testBuildFailure` in the test framework's test suite.
+        This is not intended for general use. Use `test` instead.
+      '';
+      internal = true;
+    };
+
+    test = mkOption {
+      type = types.package;
+      # TODO: can the interactive driver be configured to access the network?
+      description = mdDoc ''
+        Derivation that runs the test as its "build" process.
+
+        This implies that NixOS tests run isolated from the network, making them
+        more dependable.
+      '';
+    };
+  };
+
+  config = {
+    rawTestDerivation = hostPkgs.stdenv.mkDerivation {
+      name = "vm-test-run-${config.name}";
+
+      requiredSystemFeatures = [ "kvm" "nixos-test" ];
+
+      buildCommand = ''
+        mkdir -p $out
+
+        # effectively mute the XMLLogger
+        export LOGFILE=/dev/null
+
+        ${config.driver}/bin/nixos-test-driver -o $out
+      '';
+
+      passthru = config.passthru;
+
+      meta = config.meta;
+    };
+    test = lib.lazyDerivation { # lazyDerivation improves performance when only passthru items and/or meta are used.
+      derivation = config.rawTestDerivation;
+      inherit (config) passthru meta;
+    };
+
+    # useful for inspection (debugging / exploration)
+    passthru.config = config;
+  };
+}
diff --git a/nixpkgs/nixos/lib/testing/testScript.nix b/nixpkgs/nixos/lib/testing/testScript.nix
new file mode 100644
index 000000000000..5c36d754d79d
--- /dev/null
+++ b/nixpkgs/nixos/lib/testing/testScript.nix
@@ -0,0 +1,84 @@
+testModuleArgs@{ config, lib, hostPkgs, nodes, moduleType, ... }:
+let
+  inherit (lib) mkOption types mdDoc;
+  inherit (types) either str functionTo;
+in
+{
+  options = {
+    testScript = mkOption {
+      type = either str (functionTo str);
+      description = mdDoc ''
+        A series of python declarations and statements that you write to perform
+        the test.
+      '';
+    };
+    testScriptString = mkOption {
+      type = str;
+      readOnly = true;
+      internal = true;
+    };
+
+    includeTestScriptReferences = mkOption {
+      type = types.bool;
+      default = true;
+      internal = true;
+    };
+    withoutTestScriptReferences = mkOption {
+      type = moduleType;
+      description = mdDoc ''
+        A parallel universe where the testScript is invalid and has no references.
+      '';
+      internal = true;
+      visible = false;
+    };
+  };
+  config = {
+    withoutTestScriptReferences.includeTestScriptReferences = false;
+    withoutTestScriptReferences.testScript = lib.mkForce "testscript omitted";
+
+    testScriptString =
+      if lib.isFunction config.testScript
+      then
+        config.testScript
+          {
+            nodes =
+              lib.mapAttrs
+                (k: v:
+                  if v.virtualisation.useNixStoreImage
+                  then
+                  # prevent infinite recursion when testScript would
+                  # reference v's toplevel
+                    config.withoutTestScriptReferences.nodesCompat.${k}
+                  else
+                  # reuse memoized config
+                    v
+                )
+                config.nodesCompat;
+          }
+      else config.testScript;
+
+    defaults = { config, name, ... }: {
+      # Make sure all derivations referenced by the test
+      # script are available on the nodes. When the store is
+      # accessed through 9p, this isn't important, since
+      # everything in the store is available to the guest,
+      # but when building a root image it is, as all paths
+      # that should be available to the guest has to be
+      # copied to the image.
+      virtualisation.additionalPaths =
+        lib.optional
+          # A testScript may evaluate nodes, which has caused
+          # infinite recursions. The demand cycle involves:
+          #   testScript -->
+          #   nodes -->
+          #   toplevel -->
+          #   additionalPaths -->
+          #   hasContext testScript' -->
+          #   testScript (ad infinitum)
+          # If we don't need to build an image, we can break this
+          # cycle by short-circuiting when useNixStoreImage is false.
+          (config.virtualisation.useNixStoreImage && builtins.hasContext testModuleArgs.config.testScriptString && testModuleArgs.config.includeTestScriptReferences)
+          (hostPkgs.writeStringReferencesToFile testModuleArgs.config.testScriptString);
+    };
+  };
+}
diff --git a/nixpkgs/nixos/lib/utils.nix b/nixpkgs/nixos/lib/utils.nix
new file mode 100644
index 000000000000..e618cf2f861a
--- /dev/null
+++ b/nixpkgs/nixos/lib/utils.nix
@@ -0,0 +1,236 @@
+{ lib, config, pkgs }: with lib;
+
+rec {
+
+  # Copy configuration files to avoid having the entire sources in the system closure
+  copyFile = filePath: pkgs.runCommand (builtins.unsafeDiscardStringContext (builtins.baseNameOf filePath)) {} ''
+    cp ${filePath} $out
+  '';
+
+  # Check whenever fileSystem is needed for boot.  NOTE: Make sure
+  # pathsNeededForBoot is closed under the parent relationship, i.e. if /a/b/c
+  # is in the list, put /a and /a/b in as well.
+  pathsNeededForBoot = [ "/" "/nix" "/nix/store" "/var" "/var/log" "/var/lib" "/var/lib/nixos" "/etc" "/usr" ];
+  fsNeededForBoot = fs: fs.neededForBoot || elem fs.mountPoint pathsNeededForBoot;
+
+  # Check whenever `b` depends on `a` as a fileSystem
+  fsBefore = a: b:
+    let
+      # normalisePath adds a slash at the end of the path if it didn't already
+      # have one.
+      #
+      # The reason slashes are added at the end of each path is to prevent `b`
+      # from accidentally depending on `a` in cases like
+      #    a = { mountPoint = "/aaa"; ... }
+      #    b = { device     = "/aaaa"; ... }
+      # Here a.mountPoint *is* a prefix of b.device even though a.mountPoint is
+      # *not* a parent of b.device. If we add a slash at the end of each string,
+      # though, this is not a problem: "/aaa/" is not a prefix of "/aaaa/".
+      normalisePath = path: "${path}${optionalString (!(hasSuffix "/" path)) "/"}";
+      normalise = mount: mount // { device = normalisePath (toString mount.device);
+                                    mountPoint = normalisePath mount.mountPoint;
+                                    depends = map normalisePath mount.depends;
+                                  };
+
+      a' = normalise a;
+      b' = normalise b;
+
+    in hasPrefix a'.mountPoint b'.device
+    || hasPrefix a'.mountPoint b'.mountPoint
+    || any (hasPrefix a'.mountPoint) b'.depends;
+
+  # Escape a path according to the systemd rules. FIXME: slow
+  # The rules are described in systemd.unit(5) as follows:
+  # The escaping algorithm operates as follows: given a string, any "/" character is replaced by "-", and all other characters which are not ASCII alphanumerics, ":", "_" or "." are replaced by C-style "\x2d" escapes. In addition, "." is replaced with such a C-style escape when it would appear as the first character in the escaped string.
+  # When the input qualifies as absolute file system path, this algorithm is extended slightly: the path to the root directory "/" is encoded as single dash "-". In addition, any leading, trailing or duplicate "/" characters are removed from the string before transformation. Example: /foo//bar/baz/ becomes "foo-bar-baz".
+  escapeSystemdPath = s: let
+    replacePrefix = p: r: s: (if (hasPrefix p s) then r + (removePrefix p s) else s);
+    trim = s: removeSuffix "/" (removePrefix "/" s);
+    normalizedPath = strings.normalizePath s;
+  in
+    replaceStrings ["/"] ["-"]
+    (replacePrefix "." (strings.escapeC ["."] ".")
+    (strings.escapeC (stringToCharacters " !\"#$%&'()*+,;<=>=@[\\]^`{|}~-")
+    (if normalizedPath == "/" then normalizedPath else trim normalizedPath)));
+
+  # Quotes an argument for use in Exec* service lines.
+  # systemd accepts "-quoted strings with escape sequences, toJSON produces
+  # a subset of these.
+  # Additionally we escape % to disallow expansion of % specifiers. Any lone ;
+  # in the input will be turned it ";" and thus lose its special meaning.
+  # Every $ is escaped to $$, this makes it unnecessary to disable environment
+  # substitution for the directive.
+  escapeSystemdExecArg = arg:
+    let
+      s = if builtins.isPath arg then "${arg}"
+        else if builtins.isString arg then arg
+        else if builtins.isInt arg || builtins.isFloat arg then toString arg
+        else throw "escapeSystemdExecArg only allows strings, paths and numbers";
+    in
+      replaceStrings [ "%" "$" ] [ "%%" "$$" ] (builtins.toJSON s);
+
+  # Quotes a list of arguments into a single string for use in a Exec*
+  # line.
+  escapeSystemdExecArgs = concatMapStringsSep " " escapeSystemdExecArg;
+
+  # Returns a system path for a given shell package
+  toShellPath = shell:
+    if types.shellPackage.check shell then
+      "/run/current-system/sw${shell.shellPath}"
+    else if types.package.check shell then
+      throw "${shell} is not a shell package"
+    else
+      shell;
+
+  /* Recurse into a list or an attrset, searching for attrs named like
+     the value of the "attr" parameter, and return an attrset where the
+     names are the corresponding jq path where the attrs were found and
+     the values are the values of the attrs.
+
+     Example:
+       recursiveGetAttrWithJqPrefix {
+         example = [
+           {
+             irrelevant = "not interesting";
+           }
+           {
+             ignored = "ignored attr";
+             relevant = {
+               secret = {
+                 _secret = "/path/to/secret";
+               };
+             };
+           }
+         ];
+       } "_secret" -> { ".example[1].relevant.secret" = "/path/to/secret"; }
+  */
+  recursiveGetAttrWithJqPrefix = item: attr:
+    let
+      recurse = prefix: item:
+        if item ? ${attr} then
+          nameValuePair prefix item.${attr}
+        else if isAttrs item then
+          map (name:
+            let
+              escapedName = ''"${replaceStrings [''"'' "\\"] [''\"'' "\\\\"] name}"'';
+            in
+              recurse (prefix + "." + escapedName) item.${name}) (attrNames item)
+        else if isList item then
+          imap0 (index: item: recurse (prefix + "[${toString index}]") item) item
+        else
+          [];
+    in listToAttrs (flatten (recurse "" item));
+
+  /* Takes an attrset and a file path and generates a bash snippet that
+     outputs a JSON file at the file path with all instances of
+
+     { _secret = "/path/to/secret" }
+
+     in the attrset replaced with the contents of the file
+     "/path/to/secret" in the output JSON.
+
+     When a configuration option accepts an attrset that is finally
+     converted to JSON, this makes it possible to let the user define
+     arbitrary secret values.
+
+     Example:
+       If the file "/path/to/secret" contains the string
+       "topsecretpassword1234",
+
+       genJqSecretsReplacementSnippet {
+         example = [
+           {
+             irrelevant = "not interesting";
+           }
+           {
+             ignored = "ignored attr";
+             relevant = {
+               secret = {
+                 _secret = "/path/to/secret";
+               };
+             };
+           }
+         ];
+       } "/path/to/output.json"
+
+       would generate a snippet that, when run, outputs the following
+       JSON file at "/path/to/output.json":
+
+       {
+         "example": [
+           {
+             "irrelevant": "not interesting"
+           },
+           {
+             "ignored": "ignored attr",
+             "relevant": {
+               "secret": "topsecretpassword1234"
+             }
+           }
+         ]
+       }
+  */
+  genJqSecretsReplacementSnippet = genJqSecretsReplacementSnippet' "_secret";
+
+  # Like genJqSecretsReplacementSnippet, but allows the name of the
+  # attr which identifies the secret to be changed.
+  genJqSecretsReplacementSnippet' = attr: set: output:
+    let
+      secrets = recursiveGetAttrWithJqPrefix set attr;
+      stringOrDefault = str: def: if str == "" then def else str;
+    in ''
+      if [[ -h '${output}' ]]; then
+        rm '${output}'
+      fi
+
+      inherit_errexit_enabled=0
+      shopt -pq inherit_errexit && inherit_errexit_enabled=1
+      shopt -s inherit_errexit
+    ''
+    + concatStringsSep
+        "\n"
+        (imap1 (index: name: ''
+                  secret${toString index}=$(<'${secrets.${name}}')
+                  export secret${toString index}
+                '')
+               (attrNames secrets))
+    + "\n"
+    + "${pkgs.jq}/bin/jq >'${output}' "
+    + lib.escapeShellArg (stringOrDefault
+          (concatStringsSep
+            " | "
+            (imap1 (index: name: ''${name} = $ENV.secret${toString index}'')
+                   (attrNames secrets)))
+          ".")
+    + ''
+       <<'EOF'
+      ${builtins.toJSON set}
+      EOF
+      (( ! $inherit_errexit_enabled )) && shopt -u inherit_errexit
+    '';
+
+  /* Remove packages of packagesToRemove from packages, based on their names.
+     Relies on package names and has quadratic complexity so use with caution!
+
+     Type:
+       removePackagesByName :: [package] -> [package] -> [package]
+
+     Example:
+       removePackagesByName [ nautilus file-roller ] [ file-roller totem ]
+       => [ nautilus ]
+  */
+  removePackagesByName = packages: packagesToRemove:
+    let
+      namesToRemove = map lib.getName packagesToRemove;
+    in
+      lib.filter (x: !(builtins.elem (lib.getName x) namesToRemove)) packages;
+
+  systemdUtils = {
+    lib = import ./systemd-lib.nix { inherit lib config pkgs; };
+    unitOptions = import ./systemd-unit-options.nix { inherit lib systemdUtils; };
+    types = import ./systemd-types.nix { inherit lib systemdUtils pkgs; };
+    network = {
+      units = import ./systemd-network-units.nix { inherit lib systemdUtils; };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/option-usages.nix b/nixpkgs/nixos/maintainers/option-usages.nix
new file mode 100644
index 000000000000..11247666ecda
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/option-usages.nix
@@ -0,0 +1,192 @@
+{ configuration ? import ../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>
+
+# provide an option name, as a string literal.
+, testOption ? null
+
+# provide a list of option names, as string literals.
+, testOptions ? [ ]
+}:
+
+# This file is made to be used as follow:
+#
+#   $ nix-instantiate ./option-usage.nix --argstr testOption service.xserver.enable -A txtContent --eval
+#
+# or
+#
+#   $ nix-build ./option-usage.nix --argstr testOption service.xserver.enable -A txt -o service.xserver.enable._txt
+#
+# Other targets exists such as `dotContent`, `dot`, and `pdf`.  If you are
+# looking for the option usage of multiple options, you can provide a list
+# as argument.
+#
+#   $ nix-build ./option-usage.nix --arg testOptions \
+#      '["boot.loader.gummiboot.enable" "boot.loader.gummiboot.timeout"]' \
+#      -A txt -o gummiboot.list
+#
+# Note, this script is slow as it has to evaluate all options of the system
+# once per queried option.
+#
+# This nix expression works by doing a first evaluation, which evaluates the
+# result of every option.
+#
+# Then, for each queried option, we evaluate the NixOS modules a second
+# time, except that we replace the `config` argument of all the modules with
+# the result of the original evaluation, except for the tested option which
+# value is replaced by a `throw` statement which is caught by the `tryEval`
+# evaluation of each option value.
+#
+# We then compare the result of the evaluation of the original module, with
+# the result of the second evaluation, and consider that the new failures are
+# caused by our mutation of the `config` argument.
+#
+# Doing so returns all option results which are directly using the
+# tested option result.
+
+with import ../../lib;
+
+let
+
+  evalFun = {
+    specialArgs ? {}
+  }: import ../lib/eval-config.nix {
+       modules = [ configuration ];
+       inherit specialArgs;
+     };
+
+  eval = evalFun {};
+  inherit (eval) pkgs;
+
+  excludedTestOptions = [
+    # We cannot evluate _module.args, as it is used during the computation
+    # of the modules list.
+    "_module.args"
+
+    # For some reasons which we yet have to investigate, some options cannot
+    # be replaced by a throw without causing a non-catchable failure.
+    "networking.bonds"
+    "networking.bridges"
+    "networking.interfaces"
+    "networking.macvlans"
+    "networking.sits"
+    "networking.vlans"
+    "services.openssh.startWhenNeeded"
+  ];
+
+  # for some reasons which we yet have to investigate, some options are
+  # time-consuming to compute, thus we filter them out at the moment.
+  excludedOptions = [
+    "boot.systemd.services"
+    "systemd.services"
+    "kde.extraPackages"
+  ];
+  excludeOptions = list:
+    filter (opt: !(elem (showOption opt.loc) excludedOptions)) list;
+
+
+  reportNewFailures = old: new:
+    let
+      filterChanges =
+        filter ({fst, snd}:
+          !(fst.success -> snd.success)
+        );
+
+      keepNames =
+        map ({fst, snd}:
+          /* assert fst.name == snd.name; */ snd.name
+        );
+
+      # Use  tryEval (strict ...)  to know if there is any failure while
+      # evaluating the option value.
+      #
+      # Note, the `strict` function is not strict enough, but using toXML
+      # builtins multiply by 4 the memory usage and the time used to compute
+      # each options.
+      tryCollectOptions = moduleResult:
+        forEach (excludeOptions (collect isOption moduleResult)) (opt:
+          { name = showOption opt.loc; } // builtins.tryEval (strict opt.value));
+     in
+       keepNames (
+         filterChanges (
+           zipLists (tryCollectOptions old) (tryCollectOptions new)
+         )
+       );
+
+
+  # Create a list of modules where each module contains only one failling
+  # options.
+  introspectionModules =
+    let
+      setIntrospection = opt: rec {
+        name = showOption opt.loc;
+        path = opt.loc;
+        config = setAttrByPath path
+          (throw "Usage introspection of '${name}' by forced failure.");
+      };
+    in
+      map setIntrospection (collect isOption eval.options);
+
+  overrideConfig = thrower:
+    recursiveUpdateUntil (path: old: new:
+      path == thrower.path
+    ) eval.config thrower.config;
+
+
+  graph =
+    map (thrower: {
+      option = thrower.name;
+      usedBy = assert __trace "Investigate ${thrower.name}" true;
+        reportNewFailures eval.options (evalFun {
+          specialArgs = {
+            config = overrideConfig thrower;
+          };
+        }).options;
+    }) introspectionModules;
+
+  displayOptionsGraph =
+     let
+       checkList =
+         if testOption != null then [ testOption ]
+         else testOptions;
+       checkAll = checkList == [];
+     in
+       flip filter graph ({option, ...}:
+         (checkAll || elem option checkList)
+         && !(elem option excludedTestOptions)
+       );
+
+  graphToDot = graph: ''
+    digraph "Option Usages" {
+      ${concatMapStrings ({option, usedBy}:
+          concatMapStrings (user: ''
+            "${option}" -> "${user}"''
+          ) usedBy
+        ) displayOptionsGraph}
+    }
+  '';
+
+  graphToText = graph:
+    concatMapStrings ({usedBy, ...}:
+        concatMapStrings (user: ''
+          ${user}
+        '') usedBy
+      ) displayOptionsGraph;
+
+in
+
+rec {
+  dotContent = graphToDot graph;
+  dot = pkgs.writeTextFile {
+    name = "option_usages.dot";
+    text = dotContent;
+  };
+
+  pdf = pkgs.texFunctions.dot2pdf {
+    dotGraph = dot;
+  };
+
+  txtContent = graphToText graph;
+  txt = pkgs.writeTextFile {
+    name = "option_usages.txt";
+    text = txtContent;
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore b/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore
new file mode 100644
index 000000000000..9271abf14a0f
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/.gitignore
@@ -0,0 +1 @@
+azure
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/README.md b/nixpkgs/nixos/maintainers/scripts/azure-new/README.md
new file mode 100644
index 000000000000..e5b69dacec08
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/README.md
@@ -0,0 +1,42 @@
+# azure
+
+## Demo
+
+Here's a demo of this being used: https://asciinema.org/a/euXb9dIeUybE3VkstLWLbvhmp
+
+## Usage
+
+This is meant to be an example image that you can copy into your own
+project and modify to your own needs. Notice that the example image
+includes a built-in test user account, which by default uses your
+`~/.ssh/id_ed25519.pub` as an `authorized_key`.
+
+Build and upload the image
+```shell
+$ ./upload-image.sh ./examples/basic/image.nix
+
+...
++ attr=azbasic
++ nix-build ./examples/basic/image.nix --out-link azure
+/nix/store/qdpzknpskzw30vba92mb24xzll1dqsmd-azure-image
+...
+95.5 %, 0 Done, 0 Failed, 1 Pending, 0 Skipped, 1 Total, 2-sec Throughput (Mb/s): 932.9565
+...
+/subscriptions/aff271ee-e9be-4441-b9bb-42f5af4cbaeb/resourceGroups/nixos-images/providers/Microsoft.Compute/images/azure-image-todo-makethisbetter
+```
+
+Take the output, boot an Azure VM:
+
+```
+img="/subscriptions/.../..." # use output from last command
+./boot-vm.sh "${img}"
+...
+=> booted
+```
+
+## Future Work
+
+1. If the user specifies a hard-coded user, then the agent could be removed.
+   Probably has security benefits; definitely has closure-size benefits.
+   (It's likely the VM will need to be booted with a special flag. See:
+   https://github.com/Azure/azure-cli/issues/12775 for details.)
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh
new file mode 100755
index 000000000000..1ce3a5f9db1e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/boot-vm.sh
@@ -0,0 +1,36 @@
+#!/usr/bin/env bash
+set -euo pipefail
+set -x
+
+image="${1}"
+location="westus2"
+group="nixos-test-vm"
+vm_size="Standard_D2s_v3";  os_size=42;
+
+# ensure group
+az group create --location "westus2" --name "${group}"
+group_id="$(az group show --name "${group}" -o tsv --query "[id]")"
+
+# (optional) identity
+if ! az identity show -n "${group}-identity" -g "${group}" &>/dev/stderr; then
+  az identity create --name "${group}-identity" --resource-group "${group}"
+fi
+
+# (optional) role assignment, to the resource group, bad but not really great alternatives
+identity_id="$(az identity show --name "${group}-identity" --resource-group "${group}" -o tsv --query "[id]")"
+principal_id="$(az identity show --name "${group}-identity" --resource-group "${group}" -o tsv --query "[principalId]")"
+until az role assignment create --assignee "${principal_id}" --role "Owner" --scope "${group_id}"; do sleep 1; done
+
+# boot vm
+az vm create \
+  --name "${group}-vm" \
+  --resource-group "${group}" \
+  --assign-identity "${identity_id}" \
+  --size "${vm_size}" \
+  --os-disk-size-gb "${os_size}" \
+  --image "${image}" \
+  --admin-username "${USER}" \
+  --location "westus2" \
+  --storage-sku "Premium_LRS" \
+  --ssh-key-values "$(ssh-add -L)"
+
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh
new file mode 100644
index 000000000000..eb87c3e06501
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/common.sh
@@ -0,0 +1,7 @@
+export group="${AZURE_RESOURCE_GROUP:-"azure"}"
+export location="${AZURE_LOCATION:-"westus2"}"
+
+img_file=$(echo azure/*.vhd)
+img_name="$(basename "${img_file}")"
+img_name="${img_name%".vhd"}"
+export img_name="${img_name//[._]/-}"
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
new file mode 100644
index 000000000000..310eba3621a6
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/image.nix
@@ -0,0 +1,10 @@
+let
+  pkgs = (import ../../../../../../default.nix {});
+  machine = import (pkgs.path + "/nixos/lib/eval-config.nix") {
+    system = "x86_64-linux";
+    modules = [
+      ({config, ...}: { imports = [ ./system.nix ]; })
+    ];
+  };
+in
+  machine.config.system.build.azureImage
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix
new file mode 100644
index 000000000000..d1044802e1f0
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/examples/basic/system.nix
@@ -0,0 +1,33 @@
+{ pkgs, modulesPath, ... }:
+
+let username = "azurenixosuser";
+in
+{
+  imports = [
+    "${modulesPath}/virtualisation/azure-common.nix"
+    "${modulesPath}/virtualisation/azure-image.nix"
+  ];
+
+  ## NOTE: This is just an example of how to hard-code a user.
+  ## The normal Azure agent IS included and DOES provision a user based
+  ## on the information passed at VM creation time.
+  users.users."${username}" = {
+    isNormalUser = true;
+    home = "/home/${username}";
+    description = "Azure NixOS Test User";
+    openssh.authorizedKeys.keys = [ (builtins.readFile ~/.ssh/id_ed25519.pub) ];
+  };
+  nix.settings.trusted-users = [ username ];
+
+  virtualisation.azureImage.diskSize = 2500;
+
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+
+  # test user doesn't have a password
+  services.openssh.passwordAuthentication = false;
+  security.sudo.wheelNeedsPassword = false;
+
+  environment.systemPackages = with pkgs; [
+    git file htop wget curl
+  ];
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix b/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix
new file mode 100644
index 000000000000..592f1bf9056e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/shell.nix
@@ -0,0 +1,13 @@
+with (import ../../../../default.nix {});
+stdenv.mkDerivation {
+  name = "nixcfg-azure-devenv";
+
+  nativeBuildInputs = [
+    azure-cli
+    bash
+    cacert
+    azure-storage-azcopy
+  ];
+
+  AZURE_CONFIG_DIR="/tmp/azure-cli/.azure";
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh b/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh
new file mode 100755
index 000000000000..143afbd7f962
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure-new/upload-image.sh
@@ -0,0 +1,58 @@
+#!/usr/bin/env bash
+set -euo pipefail
+set -x
+
+image_nix="${1:-"./examples/basic/image.nix"}"
+
+nix-build "${image_nix}" --out-link "azure"
+
+group="nixos-images"
+location="westus2"
+img_name="nixos-image"
+img_file="$(readlink -f ./azure/disk.vhd)"
+
+if ! az group show -n "${group}" &>/dev/null; then
+  az group create --name "${group}" --location "${location}"
+fi
+
+# note: the disk access token song/dance is tedious
+# but allows us to upload direct to a disk image
+# thereby avoid storage accounts (and naming them) entirely!
+if ! az disk show -g "${group}" -n "${img_name}" &>/dev/null; then
+  bytes="$(stat -c %s ${img_file})"
+  size="30"
+  az disk create \
+    --resource-group "${group}" \
+    --name "${img_name}" \
+    --for-upload true --upload-size-bytes "${bytes}"
+
+  timeout=$(( 60 * 60 )) # disk access token timeout
+  sasurl="$(\
+    az disk grant-access \
+      --access-level Write \
+      --resource-group "${group}" \
+      --name "${img_name}" \
+      --duration-in-seconds ${timeout} \
+        | jq -r '.accessSas'
+  )"
+
+  azcopy copy "${img_file}" "${sasurl}" \
+    --blob-type PageBlob
+
+  az disk revoke-access \
+    --resource-group "${group}" \
+    --name "${img_name}"
+fi
+
+if ! az image show -g "${group}" -n "${img_name}" &>/dev/null; then
+  diskid="$(az disk show -g "${group}" -n "${img_name}" -o json | jq -r .id)"
+
+  az image create \
+    --resource-group "${group}" \
+    --name "${img_name}" \
+    --source "${diskid}" \
+    --os-type "linux" >/dev/null
+fi
+
+imageid="$(az image show -g "${group}" -n "${img_name}" -o json | jq -r .id)"
+echo "${imageid}"
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
new file mode 100755
index 000000000000..0558f8dfffcb
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/create-azure.sh
@@ -0,0 +1,8 @@
+#! /bin/sh -eu
+
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/azure-image.nix
+export TIMESTAMP=$(date +%Y%m%d%H%M)
+
+nix-build '<nixpkgs/nixos>' \
+   -A config.system.build.azureImage --argstr system x86_64-linux -o azure -j 10
diff --git a/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
new file mode 100755
index 000000000000..2ea35d1d4c33
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/azure/upload-azure.sh
@@ -0,0 +1,22 @@
+#! /bin/sh -e
+
+export STORAGE=${STORAGE:-nixos}
+export THREADS=${THREADS:-8}
+
+azure-vhd-utils-for-go  upload --localvhdpath azure/disk.vhd  --stgaccountname "$STORAGE"  --stgaccountkey "$KEY" \
+   --containername images --blobname nixos-unstable-nixops-updated.vhd --parallelism "$THREADS" --overwrite
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix b/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
new file mode 100644
index 000000000000..b66ee5d7b9bc
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/cloudstack/cloudstack-image.nix
@@ -0,0 +1,20 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.cloudstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/cloudstack/cloudstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+
+{
+  imports =
+    [ ../../../modules/virtualisation/cloudstack-config.nix ];
+
+  system.build.cloudstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config pkgs;
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix> ];
+        }
+      '';
+  };
+
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix
new file mode 100644
index 000000000000..32dd96a7cb7e
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image-zfs.nix
@@ -0,0 +1,12 @@
+{
+  imports = [ ./amazon-image.nix ];
+  ec2.zfs = {
+    enable = true;
+    datasets = {
+      "tank/system/root".mount = "/";
+      "tank/system/var".mount = "/var";
+      "tank/local/nix".mount = "/nix";
+      "tank/user/home".mount = "/home";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
new file mode 100644
index 000000000000..d12339bca1f8
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/amazon-image.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.amazonImage;
+  amiBootMode = if config.ec2.efi then "uefi" else "legacy-bios";
+
+in {
+
+  imports = [ ../../../modules/virtualisation/amazon-image.nix ];
+
+  # Amazon recommends setting this to the highest possible value for a good EBS
+  # experience, which prior to 4.15 was 255.
+  # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/nvme-ebs-volumes.html#timeout-nvme-ebs-volumes
+  config.boot.kernelParams =
+    let timeout =
+      if pkgs.lib.versionAtLeast config.boot.kernelPackages.kernel.version "4.15"
+      then "4294967295"
+      else  "255";
+    in [ "nvme_core.io_timeout=${timeout}" ];
+
+  options.amazonImage = {
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The name of the generated derivation";
+      default = "nixos-amazon-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+    };
+
+    contents = mkOption {
+      example = literalExpression ''
+        [ { source = pkgs.memtest86 + "/memtest.bin";
+            target = "boot/memtest.bin";
+          }
+        ]
+      '';
+      default = [];
+      description = lib.mdDoc ''
+        This option lists files to be copied to fixed locations in the
+        generated image. Glob patterns work.
+      '';
+    };
+
+    sizeMB = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = 3072;
+      example = 8192;
+      description = lib.mdDoc "The size in MB of the image";
+    };
+
+    format = mkOption {
+      type = types.enum [ "raw" "qcow2" "vpc" ];
+      default = "vpc";
+      description = lib.mdDoc "The image format to output";
+    };
+  };
+
+  config.system.build.amazonImage = let
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        { modulesPath, ... }: {
+          imports = [ "''${modulesPath}/virtualisation/amazon-image.nix" ];
+          ${optionalString config.ec2.efi ''
+            ec2.efi = true;
+          ''}
+          ${optionalString config.ec2.zfs.enable ''
+            ec2.zfs.enable = true;
+            networking.hostId = "${config.networking.hostId}";
+          ''}
+        }
+      '';
+
+    zfsBuilder = import ../../../lib/make-multi-disk-zfs-image.nix {
+      inherit lib config configFile;
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      includeChannel = true;
+
+      bootSize = 1000; # 1G is the minimum EBS volume
+
+      rootSize = cfg.sizeMB;
+      rootPoolProperties = {
+        ashift = 12;
+        autoexpand = "on";
+      };
+
+      datasets = config.ec2.zfs.datasets;
+
+      postVM = ''
+        extension=''${rootDiskImage##*.}
+        friendlyName=$out/${cfg.name}
+        rootDisk="$friendlyName.root.$extension"
+        bootDisk="$friendlyName.boot.$extension"
+        mv "$rootDiskImage" "$rootDisk"
+        mv "$bootDiskImage" "$bootDisk"
+
+        mkdir -p $out/nix-support
+        echo "file ${cfg.format} $bootDisk" >> $out/nix-support/hydra-build-products
+        echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
+
+       ${pkgs.jq}/bin/jq -n \
+         --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+         --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+         --arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$bootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_mode "${amiBootMode}" \
+         --arg root "$rootDisk" \
+         --arg boot "$bootDisk" \
+        '{}
+          | .label = $system_label
+          | .boot_mode = $boot_mode
+          | .system = $system
+          | .disks.boot.logical_bytes = $boot_logical_bytes
+          | .disks.boot.file = $boot
+          | .disks.root.logical_bytes = $root_logical_bytes
+          | .disks.root.file = $root
+          ' > $out/nix-support/image-info.json
+      '';
+    };
+
+    extBuilder = import ../../../lib/make-disk-image.nix {
+      inherit lib config configFile;
+
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      fsType = "ext4";
+      partitionTableType = if config.ec2.efi then "efi" else "legacy+gpt";
+
+      diskSize = cfg.sizeMB;
+
+      postVM = ''
+        extension=''${diskImage##*.}
+        friendlyName=$out/${cfg.name}.$extension
+        mv "$diskImage" "$friendlyName"
+        diskImage=$friendlyName
+
+        mkdir -p $out/nix-support
+        echo "file ${cfg.format} $diskImage" >> $out/nix-support/hydra-build-products
+
+       ${pkgs.jq}/bin/jq -n \
+         --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+         --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+         --arg logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$diskImage" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+         --arg boot_mode "${amiBootMode}" \
+         --arg file "$diskImage" \
+          '{}
+          | .label = $system_label
+          | .boot_mode = $boot_mode
+          | .system = $system
+          | .logical_bytes = $logical_bytes
+          | .file = $file
+          | .disks.root.logical_bytes = $logical_bytes
+          | .disks.root.file = $file
+          ' > $out/nix-support/image-info.json
+      '';
+    };
+  in if config.ec2.zfs.enable then zfsBuilder else extBuilder;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
new file mode 100755
index 000000000000..0c1656efaf1c
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/ec2/create-amis.sh
@@ -0,0 +1,362 @@
+#!/usr/bin/env nix-shell
+#!nix-shell -p awscli -p jq -p qemu -i bash
+# shellcheck shell=bash
+#
+# Future Deprecation?
+# This entire thing should probably be replaced with a generic terraform config
+
+# Uploads and registers NixOS images built from the
+# <nixos/release.nix> amazonImage attribute. Images are uploaded and
+# registered via a home region, and then copied to other regions.
+
+# The home region requires an s3 bucket, and an IAM role named "vmimport"
+# (by default) with access to the S3 bucket. The name can be
+# configured with the "service_role_name" variable. Configuration of the
+# vmimport role is documented in
+# https://docs.aws.amazon.com/vm-import/latest/userguide/vmimport-image-import.html
+
+# set -x
+set -euo pipefail
+
+var () { true; }
+
+# configuration
+var ${state_dir:=$HOME/amis/ec2-images}
+var ${home_region:=eu-west-1}
+var ${bucket:=nixos-amis}
+var ${service_role_name:=vmimport}
+
+# Output of the command:
+# > aws ec2 describe-regions --all-regions --query "Regions[].{Name:RegionName}" --output text | sort
+var ${regions:=
+         af-south-1
+         ap-east-1
+         ap-northeast-1
+         ap-northeast-2
+         ap-northeast-3
+         ap-south-1
+         ap-southeast-1
+         ap-southeast-2
+         ap-southeast-3
+         ca-central-1
+         eu-central-1
+         eu-north-1
+         eu-south-1
+         eu-west-1
+         eu-west-2
+         eu-west-3
+         me-south-1
+         sa-east-1
+         us-east-1
+         us-east-2
+         us-west-1
+         us-west-2
+     }
+
+regions=($regions)
+
+log() {
+    echo "$@" >&2
+}
+
+if [ "$#" -ne 1 ]; then
+    log "Usage: ./upload-amazon-image.sh IMAGE_OUTPUT"
+    exit 1
+fi
+
+# result of the amazon-image from nixos/release.nix
+store_path=$1
+
+if [ ! -e "$store_path" ]; then
+    log "Store path: $store_path does not exist, fetching..."
+    nix-store --realise "$store_path"
+fi
+
+if [ ! -d "$store_path" ]; then
+    log "store_path: $store_path is not a directory. aborting"
+    exit 1
+fi
+
+read_image_info() {
+    if [ ! -e "$store_path/nix-support/image-info.json" ]; then
+        log "Image missing metadata"
+        exit 1
+    fi
+    jq -r "$1" "$store_path/nix-support/image-info.json"
+}
+
+# We handle a single image per invocation, store all attributes in
+# globals for convenience.
+zfs_disks=$(read_image_info .disks)
+is_zfs_image=
+if jq -e .boot <<< "$zfs_disks"; then
+  is_zfs_image=1
+  zfs_boot=".disks.boot"
+fi
+image_label="$(read_image_info .label)${is_zfs_image:+-ZFS}"
+image_system=$(read_image_info .system)
+image_files=( $(read_image_info ".disks.root.file") )
+
+image_logical_bytes=$(read_image_info "${zfs_boot:-.disks.root}.logical_bytes")
+
+if [[ -n "$is_zfs_image" ]]; then
+  image_files+=( $(read_image_info .disks.boot.file) )
+fi
+
+# Derived attributes
+
+image_logical_gigabytes=$(((image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+case "$image_system" in
+    aarch64-linux)
+        amazon_arch=arm64
+        ;;
+    x86_64-linux)
+        amazon_arch=x86_64
+        ;;
+    *)
+        log "Unknown system: $image_system"
+        exit 1
+esac
+
+image_name="NixOS-${image_label}-${image_system}"
+image_description="NixOS ${image_label} ${image_system}"
+
+log "Image Details:"
+log " Name: $image_name"
+log " Description: $image_description"
+log " Size (gigabytes): $image_logical_gigabytes"
+log " System: $image_system"
+log " Amazon Arch: $amazon_arch"
+
+read_state() {
+    local state_key=$1
+    local type=$2
+
+    cat "$state_dir/$state_key.$type" 2>/dev/null || true
+}
+
+write_state() {
+    local state_key=$1
+    local type=$2
+    local val=$3
+
+    mkdir -p "$state_dir"
+    echo "$val" > "$state_dir/$state_key.$type"
+}
+
+wait_for_import() {
+    local region=$1
+    local task_id=$2
+    local state snapshot_id
+    log "Waiting for import task $task_id to be completed"
+    while true; do
+        read -r state message snapshot_id < <(
+            aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" | \
+                jq -r '.ImportSnapshotTasks[].SnapshotTaskDetail | "\(.Status) \(.StatusMessage) \(.SnapshotId)"'
+        )
+        log " ... state=$state message=$message snapshot_id=$snapshot_id"
+        case "$state" in
+            active)
+                sleep 10
+                ;;
+            completed)
+                echo "$snapshot_id"
+                return
+                ;;
+            *)
+                log "Unexpected snapshot import state: '${state}'"
+                log "Full response: "
+                aws ec2 describe-import-snapshot-tasks --region "$region" --import-task-ids "$task_id" >&2
+                exit 1
+                ;;
+        esac
+    done
+}
+
+wait_for_image() {
+    local region=$1
+    local ami_id=$2
+    local state
+    log "Waiting for image $ami_id to be available"
+
+    while true; do
+        read -r state < <(
+            aws ec2 describe-images --image-ids "$ami_id" --region "$region" | \
+                jq -r ".Images[].State"
+        )
+        log " ... state=$state"
+        case "$state" in
+            pending)
+                sleep 10
+                ;;
+            available)
+                return
+                ;;
+            *)
+                log "Unexpected AMI state: '${state}'"
+                exit 1
+                ;;
+        esac
+    done
+}
+
+
+make_image_public() {
+    local region=$1
+    local ami_id=$2
+
+    wait_for_image "$region" "$ami_id"
+
+    log "Making image $ami_id public"
+
+    aws ec2 modify-image-attribute \
+        --image-id "$ami_id" --region "$region" --launch-permission 'Add={Group=all}' >&2
+}
+
+upload_image() {
+    local region=$1
+
+    for image_file in "${image_files[@]}"; do
+        local aws_path=${image_file#/}
+
+        if [[ -n "$is_zfs_image" ]]; then
+            local suffix=${image_file%.*}
+            suffix=${suffix##*.}
+        fi
+
+        local state_key="$region.$image_label${suffix:+.${suffix}}.$image_system"
+        local task_id
+        task_id=$(read_state "$state_key" task_id)
+        local snapshot_id
+        snapshot_id=$(read_state "$state_key" snapshot_id)
+        local ami_id
+        ami_id=$(read_state "$state_key" ami_id)
+
+        if [ -z "$task_id" ]; then
+            log "Checking for image on S3"
+            if ! aws s3 ls --region "$region" "s3://${bucket}/${aws_path}" >&2; then
+                log "Image missing from aws, uploading"
+                aws s3 cp --region "$region" "$image_file" "s3://${bucket}/${aws_path}" >&2
+            fi
+
+            log "Importing image from S3 path s3://$bucket/$aws_path"
+
+            task_id=$(aws ec2 import-snapshot --role-name "$service_role_name" --disk-container "{
+              \"Description\": \"nixos-image-${image_label}-${image_system}\",
+              \"Format\": \"vhd\",
+              \"UserBucket\": {
+                  \"S3Bucket\": \"$bucket\",
+                  \"S3Key\": \"$aws_path\"
+              }
+            }" --region "$region" | jq -r '.ImportTaskId')
+
+            write_state "$state_key" task_id "$task_id"
+        fi
+
+        if [ -z "$snapshot_id" ]; then
+            snapshot_id=$(wait_for_import "$region" "$task_id")
+            write_state "$state_key" snapshot_id "$snapshot_id"
+        fi
+    done
+
+    if [ -z "$ami_id" ]; then
+        log "Registering snapshot $snapshot_id as AMI"
+
+        local block_device_mappings=(
+            "DeviceName=/dev/xvda,Ebs={SnapshotId=$snapshot_id,VolumeSize=$image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
+        )
+
+        if [[ -n "$is_zfs_image" ]]; then
+            local root_snapshot_id=$(read_state "$region.$image_label.root.$image_system" snapshot_id)
+
+            local root_image_logical_bytes=$(read_image_info ".disks.root.logical_bytes")
+            local root_image_logical_gigabytes=$(((root_image_logical_bytes-1)/1024/1024/1024+1)) # Round to the next GB
+
+            block_device_mappings+=(
+                "DeviceName=/dev/xvdb,Ebs={SnapshotId=$root_snapshot_id,VolumeSize=$root_image_logical_gigabytes,DeleteOnTermination=true,VolumeType=gp3}"
+            )
+        fi
+
+
+        local extra_flags=(
+            --root-device-name /dev/xvda
+            --sriov-net-support simple
+            --ena-support
+            --virtualization-type hvm
+        )
+
+        block_device_mappings+=("DeviceName=/dev/sdb,VirtualName=ephemeral0")
+        block_device_mappings+=("DeviceName=/dev/sdc,VirtualName=ephemeral1")
+        block_device_mappings+=("DeviceName=/dev/sdd,VirtualName=ephemeral2")
+        block_device_mappings+=("DeviceName=/dev/sde,VirtualName=ephemeral3")
+
+        ami_id=$(
+            aws ec2 register-image \
+                --name "$image_name" \
+                --description "$image_description" \
+                --region "$region" \
+                --architecture $amazon_arch \
+                --block-device-mappings "${block_device_mappings[@]}" \
+                --boot-mode $(read_image_info .boot_mode) \
+                "${extra_flags[@]}" \
+                | jq -r '.ImageId'
+              )
+
+        write_state "$state_key" ami_id "$ami_id"
+    fi
+
+    [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
+
+    echo "$ami_id"
+}
+
+copy_to_region() {
+    local region=$1
+    local from_region=$2
+    local from_ami_id=$3
+
+    state_key="$region.$image_label.$image_system"
+    ami_id=$(read_state "$state_key" ami_id)
+
+    if [ -z "$ami_id" ]; then
+        log "Copying $from_ami_id to $region"
+        ami_id=$(
+            aws ec2 copy-image \
+                --region "$region" \
+                --source-region "$from_region" \
+                --source-image-id "$from_ami_id" \
+                --name "$image_name" \
+                --description "$image_description" \
+                | jq -r '.ImageId'
+              )
+
+        write_state "$state_key" ami_id "$ami_id"
+    fi
+
+    [[ -v PRIVATE ]] || make_image_public "$region" "$ami_id"
+
+    echo "$ami_id"
+}
+
+upload_all() {
+    home_image_id=$(upload_image "$home_region")
+    jq -n \
+       --arg key "$home_region.$image_system" \
+       --arg value "$home_image_id" \
+       '$ARGS.named'
+
+    for region in "${regions[@]}"; do
+        if [ "$region" = "$home_region" ]; then
+            continue
+        fi
+        copied_image_id=$(copy_to_region "$region" "$home_region" "$home_image_id")
+
+        jq -n \
+           --arg key "$region.$image_system" \
+           --arg value "$copied_image_id" \
+           '$ARGS.named'
+    done
+}
+
+upload_all | jq --slurp from_entries
diff --git a/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
new file mode 100755
index 000000000000..0eec4d041108
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/gce/create-gce.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env nix-shell
+#! nix-shell -i bash -p google-cloud-sdk
+
+set -euo pipefail
+
+BUCKET_NAME="${BUCKET_NAME:-nixos-cloud-images}"
+TIMESTAMP="$(date +%Y%m%d%H%M)"
+export TIMESTAMP
+
+nix-build '<nixpkgs/nixos/lib/eval-config.nix>' \
+   -A config.system.build.googleComputeImage \
+   --arg modules "[ <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix> ]" \
+   --argstr system x86_64-linux \
+   -o gce \
+   -j 10
+
+img_path=$(echo gce/*.tar.gz)
+img_name=${IMAGE_NAME:-$(basename "$img_path")}
+img_id=$(echo "$img_name" | sed 's|.raw.tar.gz$||;s|\.|-|g;s|_|-|g')
+img_family=$(echo "$img_id" | cut -d - -f1-4)
+
+if ! gsutil ls "gs://${BUCKET_NAME}/$img_name"; then
+  gsutil cp "$img_path" "gs://${BUCKET_NAME}/$img_name"
+  gsutil acl ch -u AllUsers:R "gs://${BUCKET_NAME}/$img_name"
+
+  gcloud compute images create \
+    "$img_id" \
+    --source-uri "gs://${BUCKET_NAME}/$img_name" \
+    --family="$img_family"
+
+  gcloud compute images add-iam-policy-binding \
+    "$img_id" \
+    --member='allAuthenticatedUsers' \
+    --role='roles/compute.imageUser'
+fi
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
new file mode 100644
index 000000000000..62a6e1f9aa3a
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, modulesPath, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      "${modulesPath}/modules/virtualisation/lxc-container.nix"
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "@stateVersion@"; # Did you read the comment?
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix
new file mode 100644
index 000000000000..b77f9f5aabe0
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-container-image.nix
@@ -0,0 +1,31 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxc-container.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = [ "create" "copy" ];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = let
+    config = pkgs.substituteAll {
+      src = ./lxd-container-image-inner.nix;
+      stateVersion = lib.trivial.release;
+    };
+  in ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cp ${config} /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
new file mode 100644
index 000000000000..c1c50b32ff5b
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image-inner.nix
@@ -0,0 +1,20 @@
+# Edit this configuration file to define what should be installed on
+# your system.  Help is available in the configuration.nix(5) man page
+# and in the NixOS manual (accessible by running ‘nixos-help’).
+
+{ config, pkgs, lib, modulesPath, ... }:
+
+{
+  imports =
+    [
+      # Include the default lxd configuration.
+      "${modulesPath}/virtualisation/lxd-virtual-machine.nix"
+      # Include the container-specific autogenerated configuration.
+      ./lxd.nix
+    ];
+
+  networking.useDHCP = false;
+  networking.interfaces.eth0.useDHCP = true;
+
+  system.stateVersion = "@stateVersion@"; # Did you read the comment?
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
new file mode 100644
index 000000000000..0d96eea0e2d2
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/lxd-virtual-machine-image.nix
@@ -0,0 +1,31 @@
+{ lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../../modules/virtualisation/lxd-virtual-machine.nix
+  ];
+
+  virtualisation.lxc.templates.nix = {
+    enable = true;
+    target = "/etc/nixos/lxd.nix";
+    template = ./nix.tpl;
+    when = ["create" "copy"];
+  };
+
+  # copy the config for nixos-rebuild
+  system.activationScripts.config = let
+    config = pkgs.substituteAll {
+      src = ./lxd-virtual-machine-image-inner.nix;
+      stateVersion = lib.trivial.release;
+    };
+  in ''
+    if [ ! -e /etc/nixos/configuration.nix ]; then
+      mkdir -p /etc/nixos
+      cp ${config} /etc/nixos/configuration.nix
+    fi
+  '';
+
+  # Network
+  networking.useDHCP = false;
+  networking.interfaces.enp5s0.useDHCP = true;
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl b/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl
new file mode 100644
index 000000000000..25ae1bc399f2
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/lxd/nix.tpl
@@ -0,0 +1,7 @@
+{ lib, config, pkgs, ... }:
+
+# WARNING: THIS CONFIGURATION IS AUTOGENERATED AND WILL BE OVERWRITTEN AUTOMATICALLY
+
+{
+  networking.hostName = "{{ container.name }}";
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh b/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh
new file mode 100755
index 000000000000..0d7332a0b272
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/oci/create-image.sh
@@ -0,0 +1,24 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+export NIX_PATH=nixpkgs=$(dirname $(readlink -f $0))/../../../..
+export NIXOS_CONFIG=$(dirname $(readlink -f $0))/../../../modules/virtualisation/oci-image.nix
+
+if (( $# < 1 )); then
+    (
+    echo "Usage: create-image.sh <architecture>"
+    echo
+    echo "Where <architecture> is one of:"
+    echo "  x86_64-linux"
+    echo "  aarch64-linux"
+    ) >&2
+fi
+
+system="$1"; shift
+
+nix-build '<nixpkgs/nixos>' \
+    -A config.system.build.OCIImage \
+    --argstr system "$system" \
+    --option system-features kvm \
+    -o oci-image
diff --git a/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh b/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh
new file mode 100755
index 000000000000..e4870e94bf54
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/oci/upload-image.sh
@@ -0,0 +1,100 @@
+#! /usr/bin/env bash
+
+set -euo pipefail
+
+script_dir="$(dirname $(readlink -f $0))"
+nixpkgs_root="$script_dir/../../../.."
+export NIX_PATH="nixpkgs=$nixpkgs_root"
+
+cat - <<EOF
+This script will locally build a NixOS image and upload it as a Custom Image
+using oci-cli. Make sure that an API key for the tenancy administrator has been
+added to '~/.oci'.
+For more info about configuring oci-cli, please visit
+https://docs.cloud.oracle.com/iaas/Content/API/Concepts/apisigningkey.htm#Required_Keys_and_OCIDs
+
+EOF
+
+qcow="oci-image/nixos.qcow2"
+if [ ! -f "$qcow" ]; then
+    echo "OCI image $qcow does not exist"
+    echo "Building image with create-image.sh for 'x86_64-linux'"
+    "$script_dir/create-image.sh" x86_64-linux
+    [ -f "$qcow" ] || { echo "Build failed: image not present after build"; exit 1; }
+else
+    echo "Using prebuilt image $qcow"
+fi
+
+cli="$(
+  nix-build '<nixpkgs>' \
+    --no-out-link \
+    -A oci-cli
+)"
+
+PATH="$cli/bin:$PATH"
+bucket="_TEMP_NIXOS_IMAGES_$RANDOM"
+
+echo "Creating a temporary bucket"
+root_ocid="$(
+  oci iam compartment list \
+  --all \
+  --compartment-id-in-subtree true \
+  --access-level ACCESSIBLE \
+  --include-root \
+  --raw-output \
+  --query "data[?contains(\"id\",'tenancy')].id | [0]"
+)"
+bucket_ocid=$(
+  oci os bucket create \
+    -c "$root_ocid" \
+    --name "$bucket" \
+    --raw-output \
+    --query "data.id"
+)
+# Clean up bucket on script termination
+trap 'echo Removing temporary bucket; oci os bucket delete --force --name "$bucket"' INT TERM EXIT
+
+echo "Uploading image to temporary bucket"
+oci os object put -bn "$bucket" --file "$qcow"
+
+echo "Importing image as a Custom Image"
+bucket_ns="$(oci os ns get --query "data" --raw-output)"
+image_id="$(
+  oci compute image import from-object \
+    -c "$root_ocid" \
+    --namespace "$bucket_ns" \
+    --bucket-name "$bucket" \
+    --name nixos.qcow2 \
+    --operating-system NixOS \
+    --source-image-type QCOW2 \
+    --launch-mode PARAVIRTUALIZED \
+    --display-name NixOS \
+    --raw-output \
+    --query "data.id"
+)"
+
+cat - <<EOF
+Image created! Please mark all available shapes as compatible with this image by
+visiting the following link and by selecting the 'Edit Details' button on:
+https://cloud.oracle.com/compute/images/$image_id
+EOF
+
+# Workaround until https://github.com/oracle/oci-cli/issues/399 is addressed
+echo "Sleeping for 15 minutes before cleaning up files in the temporary bucket"
+sleep $((15 * 60))
+
+echo "Deleting image from bucket"
+par_id="$(
+  oci os preauth-request list \
+    --bucket-name "$bucket" \
+    --raw-output \
+    --query "data[0].id"
+)"
+
+if [[ -n $par_id ]]; then
+  oci os preauth-request delete \
+    --bucket-name "$bucket" \
+    --par-id "$par_id"
+fi
+
+oci os object delete -bn "$bucket" --object-name nixos.qcow2 --force
diff --git a/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
new file mode 100644
index 000000000000..936dcee12949
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image-zfs.nix
@@ -0,0 +1,101 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mkOption types;
+  copyChannel = true;
+  cfg = config.openstackImage;
+  imageBootMode = if config.openstack.efi then "uefi" else "legacy-bios";
+in
+{
+  imports = [
+    ../../../modules/virtualisation/openstack-config.nix
+  ] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
+
+
+  options.openstackImage = {
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The name of the generated derivation";
+      default = "nixos-openstack-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+    };
+
+    sizeMB = mkOption {
+      type = types.int;
+      default = 8192;
+      description = lib.mdDoc "The size in MB of the image";
+    };
+
+    format = mkOption {
+      type = types.enum [ "raw" "qcow2" ];
+      default = "qcow2";
+      description = lib.mdDoc "The image format to output";
+    };
+  };
+
+  config = {
+    documentation.enable = copyChannel;
+    openstack = {
+      efi = true;
+      zfs = {
+        enable = true;
+        datasets = {
+          "tank/system/root".mount = "/";
+          "tank/system/var".mount = "/var";
+          "tank/local/nix".mount = "/nix";
+          "tank/user/home".mount = "/home";
+        };
+      };
+    };
+
+    system.build.openstackImage = import ../../../lib/make-single-disk-zfs-image.nix {
+      inherit lib config;
+      inherit (cfg) contents format name;
+      pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+
+      configFile = pkgs.writeText "configuration.nix"
+        ''
+          { modulesPath, ... }: {
+            imports = [ "''${modulesPath}/virtualisation/openstack-config.nix" ];
+            openstack.zfs.enable = true;
+          }
+        '';
+
+      includeChannel = copyChannel;
+
+      bootSize = 1000;
+
+      rootSize = cfg.sizeMB;
+      rootPoolProperties = {
+        ashift = 12;
+        autoexpand = "on";
+      };
+
+      datasets = config.openstack.zfs.datasets;
+
+      postVM = ''
+         extension=''${rootDiskImage##*.}
+         friendlyName=$out/${cfg.name}
+         rootDisk="$friendlyName.root.$extension"
+         mv "$rootDiskImage" "$rootDisk"
+
+         mkdir -p $out/nix-support
+         echo "file ${cfg.format} $rootDisk" >> $out/nix-support/hydra-build-products
+
+        ${pkgs.jq}/bin/jq -n \
+          --arg system_label ${lib.escapeShellArg config.system.nixos.label} \
+          --arg system ${lib.escapeShellArg pkgs.stdenv.hostPlatform.system} \
+          --arg root_logical_bytes "$(${pkgs.qemu_kvm}/bin/qemu-img info --output json "$rootDisk" | ${pkgs.jq}/bin/jq '."virtual-size"')" \
+          --arg boot_mode "${imageBootMode}" \
+          --arg root "$rootDisk" \
+         '{}
+           | .label = $system_label
+           | .boot_mode = $boot_mode
+           | .system = $system
+           | .disks.root.logical_bytes = $root_logical_bytes
+           | .disks.root.file = $root
+           ' > $out/nix-support/image-info.json
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix
new file mode 100644
index 000000000000..6728a98758b8
--- /dev/null
+++ b/nixpkgs/nixos/maintainers/scripts/openstack/openstack-image.nix
@@ -0,0 +1,27 @@
+# nix-build '<nixpkgs/nixos>' -A config.system.build.openstackImage --arg configuration "{ imports = [ ./nixos/maintainers/scripts/openstack/openstack-image.nix ]; }"
+
+{ config, lib, pkgs, ... }:
+let
+  copyChannel = true;
+in
+{
+  imports = [
+    ../../../modules/virtualisation/openstack-config.nix
+  ] ++ (lib.optional copyChannel ../../../modules/installer/cd-dvd/channel.nix);
+
+  documentation.enable = copyChannel;
+
+  system.build.openstackImage = import ../../../lib/make-disk-image.nix {
+    inherit lib config copyChannel;
+    additionalSpace = "1024M";
+    pkgs = import ../../../.. { inherit (pkgs) system; }; # ensure we use the regular qemu-kvm package
+    format = "qcow2";
+    configFile = pkgs.writeText "configuration.nix"
+      ''
+        {
+          imports = [ <nixpkgs/nixos/modules/virtualisation/openstack-config.nix> ];
+        }
+      '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/appstream.nix b/nixpkgs/nixos/modules/config/appstream.nix
new file mode 100644
index 000000000000..5b48f6e1705d
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/appstream.nix
@@ -0,0 +1,25 @@
+{ config, lib, ... }:
+
+with lib;
+{
+  options = {
+    appstream.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [AppStream metadata specification](https://www.freedesktop.org/software/appstream/docs/index.html).
+      '';
+    };
+  };
+
+  config = mkIf config.appstream.enable {
+    environment.pathsToLink = [
+      # per component metadata
+      "/share/metainfo"
+      # legacy path for above
+      "/share/appdata"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/console.nix b/nixpkgs/nixos/modules/config/console.nix
new file mode 100644
index 000000000000..442cfe9292ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/console.nix
@@ -0,0 +1,251 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.console;
+
+  makeColor = i: concatMapStringsSep "," (x: "0x" + substring (2*i) 2 x);
+
+  isUnicode = '' \
+    LOCALE_ARCHIVE=${config.i18n.glibcLocales}/lib/locale/locale-archive \
+    LANG=${config.i18n.defaultLocale} \
+    LC_IDENTIFICATION=${config.i18n.defaultLocale} \
+    locale -k identification-codeset | grep -i UTF-8 \
+  '';
+
+  optimizedKeymap = pkgs.runCommand "keymap" {
+    nativeBuildInputs = with pkgs.buildPackages; [ kbd locale ];
+    LOADKEYS_KEYMAP_PATH = "${consoleEnv pkgs.kbd}/share/keymaps/**";
+    preferLocalBuild = true;
+  } ''
+    if ${isUnicode} ; then
+      loadkeys -b -u "${cfg.keyMap}" > $out
+    else
+      loadkeys -b "${cfg.keyMap}" > $out
+    fi
+  '';
+
+  # Sadly, systemd-vconsole-setup doesn't support binary keymaps.
+  vconsoleConf = pkgs.writeText "vconsole.conf" ''
+    KEYMAP=${cfg.keyMap}
+    ${optionalString (cfg.font != null) "FONT=${cfg.font}"}
+  '';
+
+  consoleEnv = kbd: pkgs.buildEnv {
+    name = "console-env";
+    paths = [ kbd ] ++ cfg.packages;
+    pathsToLink = [
+      "/share/consolefonts"
+      "/share/consoletrans"
+      "/share/keymaps"
+      "/share/unimaps"
+    ];
+  };
+in
+
+{
+  ###### interface
+
+  options.console  = {
+    enable = mkEnableOption (lib.mdDoc "virtual console") // {
+      default = true;
+    };
+
+    font = mkOption {
+      type = with types; nullOr (either str path);
+      default = null;
+      example = "LatArCyrHeb-16";
+      description = mdDoc ''
+        The font used for the virtual consoles.
+        Can be `null`, a font name, or a path to a PSF font file.
+
+        Use `null` to let the kernel choose a built-in font.
+        The default is 8x16, and, as of Linux 5.3, Terminus 32 bold for display
+        resolutions of 2560x1080 and higher.
+        These fonts cover the [IBM437][] character set.
+
+        [IBM437]: https://en.wikipedia.org/wiki/Code_page_437
+      '';
+    };
+
+    keyMap = mkOption {
+      type = with types; either str path;
+      default = "us";
+      example = "fr";
+      description = lib.mdDoc ''
+        The keyboard mapping table for the virtual consoles.
+      '';
+    };
+
+    colors = mkOption {
+      type = with types; listOf (strMatching "[[:xdigit:]]{6}");
+      default = [ ];
+      example = [
+        "002b36" "dc322f" "859900" "b58900"
+        "268bd2" "d33682" "2aa198" "eee8d5"
+        "002b36" "cb4b16" "586e75" "657b83"
+        "839496" "6c71c4" "93a1a1" "fdf6e3"
+      ];
+      description = lib.mdDoc ''
+        The 16 colors palette used by the virtual consoles.
+        Leave empty to use the default colors.
+        Colors must be in hexadecimal format and listed in
+        order from color 0 to color 15.
+      '';
+
+    };
+
+    packages = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      description = lib.mdDoc ''
+        List of additional packages that provide console fonts, keymaps and
+        other resources for virtual consoles use.
+      '';
+    };
+
+    useXkbConfig = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If set, configure the virtual console keymap from the xserver
+        keyboard settings.
+      '';
+    };
+
+    earlySetup = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enable setting virtual console options as early as possible (in initrd).
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+    { console.keyMap = with config.services.xserver;
+        mkIf cfg.useXkbConfig
+          (pkgs.runCommand "xkb-console-keymap" { preferLocalBuild = true; } ''
+            '${pkgs.buildPackages.ckbcomp}/bin/ckbcomp' \
+              ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
+                "-I${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
+              } \
+              -model '${xkb.model}' -layout '${xkb.layout}' \
+              -option '${xkb.options}' -variant '${xkb.variant}' > "$out"
+          '');
+    }
+
+    (mkIf (!cfg.enable) {
+      systemd.services = {
+        "serial-getty@ttyS0".enable = false;
+        "serial-getty@hvc0".enable = false;
+        "getty@tty1".enable = false;
+        "autovt@".enable = false;
+        systemd-vconsole-setup.enable = false;
+      };
+    })
+
+    (mkIf cfg.enable (mkMerge [
+      { environment.systemPackages = with pkgs; [ kbd locale ];
+
+        # Let systemd-vconsole-setup.service do the work of setting up the
+        # virtual consoles.
+        environment.etc."vconsole.conf".source = vconsoleConf;
+        # Provide kbd with additional packages.
+        environment.etc.kbd.source = "${consoleEnv pkgs.kbd}/share";
+
+        boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) (mkBefore ''
+          if ${isUnicode} ; then
+            kbd_mode -u -C /dev/console
+            printf "\033%%G" >> /dev/console
+          else
+            kbd_mode -a -C /dev/console
+            printf "\033%%@" >> /dev/console
+          fi
+          loadkmap < ${optimizedKeymap}
+
+          ${optionalString (cfg.earlySetup && cfg.font != null) ''
+            setfont -C /dev/console $extraUtils/share/consolefonts/font.psf
+          ''}
+        '');
+
+        boot.initrd.systemd.contents = {
+          "/etc/vconsole.conf".source = vconsoleConf;
+          # Add everything if we want full console setup...
+          "/etc/kbd" = lib.mkIf cfg.earlySetup { source = "${consoleEnv config.boot.initrd.systemd.package.kbd}/share"; };
+          # ...but only the keymaps if we don't
+          "/etc/kbd/keymaps" = lib.mkIf (!cfg.earlySetup) { source = "${consoleEnv config.boot.initrd.systemd.package.kbd}/share/keymaps"; };
+        };
+        boot.initrd.systemd.additionalUpstreamUnits = [
+          "systemd-vconsole-setup.service"
+        ];
+        boot.initrd.systemd.storePaths = [
+          "${config.boot.initrd.systemd.package}/lib/systemd/systemd-vconsole-setup"
+          "${config.boot.initrd.systemd.package.kbd}/bin/setfont"
+          "${config.boot.initrd.systemd.package.kbd}/bin/loadkeys"
+          "${config.boot.initrd.systemd.package.kbd.gzip}/bin/gzip" # Fonts and keyboard layouts are compressed
+        ] ++ optionals (cfg.font != null && hasPrefix builtins.storeDir cfg.font) [
+          "${cfg.font}"
+        ] ++ optionals (hasPrefix builtins.storeDir cfg.keyMap) [
+          "${cfg.keyMap}"
+        ];
+
+        systemd.services.reload-systemd-vconsole-setup =
+          { description = "Reset console on configuration changes";
+            wantedBy = [ "multi-user.target" ];
+            restartTriggers = [ vconsoleConf (consoleEnv pkgs.kbd) ];
+            reloadIfChanged = true;
+            serviceConfig =
+              { RemainAfterExit = true;
+                ExecStart = "${pkgs.coreutils}/bin/true";
+                ExecReload = "/run/current-system/systemd/bin/systemctl restart systemd-vconsole-setup";
+              };
+          };
+      }
+
+      (mkIf (cfg.colors != []) {
+        boot.kernelParams = [
+          "vt.default_red=${makeColor 0 cfg.colors}"
+          "vt.default_grn=${makeColor 1 cfg.colors}"
+          "vt.default_blu=${makeColor 2 cfg.colors}"
+        ];
+      })
+
+      (mkIf (cfg.earlySetup && cfg.font != null && !config.boot.initrd.systemd.enable) {
+        boot.initrd.extraUtilsCommands = ''
+          mkdir -p $out/share/consolefonts
+          ${if substring 0 1 cfg.font == "/" then ''
+            font="${cfg.font}"
+          '' else ''
+            font="$(echo ${consoleEnv pkgs.kbd}/share/consolefonts/${cfg.font}.*)"
+          ''}
+          if [[ $font == *.gz ]]; then
+            gzip -cd $font > $out/share/consolefonts/font.psf
+          else
+            cp -L $font $out/share/consolefonts/font.psf
+          fi
+        '';
+      })
+    ]))
+  ];
+
+  imports = [
+    (mkRenamedOptionModule [ "i18n" "consoleFont" ] [ "console" "font" ])
+    (mkRenamedOptionModule [ "i18n" "consoleKeyMap" ] [ "console" "keyMap" ])
+    (mkRenamedOptionModule [ "i18n" "consoleColors" ] [ "console" "colors" ])
+    (mkRenamedOptionModule [ "i18n" "consolePackages" ] [ "console" "packages" ])
+    (mkRenamedOptionModule [ "i18n" "consoleUseXkbConfig" ] [ "console" "useXkbConfig" ])
+    (mkRenamedOptionModule [ "boot" "earlyVconsoleSetup" ] [ "console" "earlySetup" ])
+    (mkRenamedOptionModule [ "boot" "extraTTYs" ] [ "console" "extraTTYs" ])
+    (mkRemovedOptionModule [ "console" "extraTTYs" ] ''
+      Since NixOS switched to systemd (circa 2012), TTYs have been spawned on
+      demand, so there is no need to configure them manually.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/config/debug-info.nix b/nixpkgs/nixos/modules/config/debug-info.nix
new file mode 100644
index 000000000000..78de26fda440
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/debug-info.nix
@@ -0,0 +1,44 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+
+  options = {
+
+    environment.enableDebugInfo = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Some NixOS packages provide debug symbols. However, these are
+        not included in the system closure by default to save disk
+        space. Enabling this option causes the debug symbols to appear
+        in {file}`/run/current-system/sw/lib/debug/.build-id`,
+        where tools such as {command}`gdb` can find them.
+        If you need debug symbols for a package that doesn't
+        provide them by default, you can enable them as follows:
+
+            nixpkgs.config.packageOverrides = pkgs: {
+              hello = pkgs.hello.overrideAttrs (oldAttrs: {
+                separateDebugInfo = true;
+              });
+            };
+      '';
+    };
+
+  };
+
+
+  config = mkIf config.environment.enableDebugInfo {
+
+    # FIXME: currently disabled because /lib is already in
+    # environment.pathsToLink, and we can't have both.
+    #environment.pathsToLink = [ "/lib/debug/.build-id" ];
+
+    environment.extraOutputsToInstall = [ "debug" ];
+
+    environment.variables.NIX_DEBUG_INFO_DIRS = [ "/run/current-system/sw/lib/debug" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/fanout.nix b/nixpkgs/nixos/modules/config/fanout.nix
new file mode 100644
index 000000000000..60ee145f19af
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/fanout.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.fanout;
+  mknodCmds = n: lib.lists.imap0 (i: s:
+    "mknod /dev/fanout${builtins.toString i} c $MAJOR ${builtins.toString i}"
+  ) (lib.lists.replicate n "");
+in
+{
+  options.services.fanout = {
+    enable = lib.mkEnableOption (lib.mdDoc "fanout");
+    fanoutDevices = lib.mkOption {
+      type = lib.types.int;
+      default = 1;
+      description = "Number of /dev/fanout devices";
+    };
+    bufferSize = lib.mkOption {
+      type = lib.types.int;
+      default = 16384;
+      description = "Size of /dev/fanout buffer in bytes";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot.extraModulePackages = [ config.boot.kernelPackages.fanout.out ];
+
+    boot.kernelModules = [ "fanout" ];
+
+    boot.extraModprobeConfig = ''
+      options fanout buffersize=${builtins.toString cfg.bufferSize}
+    '';
+
+    systemd.services.fanout = {
+      description = "Bring up /dev/fanout devices";
+      script = ''
+        MAJOR=$(${pkgs.gnugrep}/bin/grep fanout /proc/devices | ${pkgs.gawk}/bin/awk '{print $1}')
+        ${lib.strings.concatLines (mknodCmds cfg.fanoutDevices)}
+      '';
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        RemainAfterExit = "yes";
+        Restart = "no";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/fonts/fontconfig.nix b/nixpkgs/nixos/modules/config/fonts/fontconfig.nix
new file mode 100644
index 000000000000..5e2e054f7c4e
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/fonts/fontconfig.nix
@@ -0,0 +1,528 @@
+/*
+
+Configuration files are linked to /etc/fonts/conf.d/
+
+This module generates a package containing configuration files and link it in /etc/fonts.
+
+Fontconfig reads files in folder name / file name order, so the number prepended to the configuration file name decide the order of parsing.
+Low number means high priority.
+
+NOTE: Please take extreme care when adjusting the default settings of this module.
+People care a lot, and I mean A LOT, about their font rendering, and you will be
+The Person That Broke It if it changes in a way people don't like.
+
+See prior art:
+- https://github.com/NixOS/nixpkgs/pull/194594
+- https://github.com/NixOS/nixpkgs/pull/222236
+- https://github.com/NixOS/nixpkgs/pull/222689
+
+And do not repeat our mistakes.
+
+- @K900, March 2023
+
+*/
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.fonts.fontconfig;
+
+  fcBool = x: "<bool>" + (boolToString x) + "</bool>";
+  pkg = pkgs.fontconfig;
+
+  # configuration file to read fontconfig cache
+  # priority 0
+  cacheConf  = makeCacheConf {};
+
+  # generate the font cache setting file
+  # When cross-compiling, we can’t generate the cache, so we skip the
+  # <cachedir> part. fontconfig still works but is a little slower in
+  # looking things up.
+  makeCacheConf = { }:
+    let
+      makeCache = fontconfig: pkgs.makeFontsCache { inherit fontconfig; fontDirectories = config.fonts.packages; };
+      cache     = makeCache pkgs.fontconfig;
+      cache32   = makeCache pkgs.pkgsi686Linux.fontconfig;
+    in
+    pkgs.writeText "fc-00-nixos-cache.conf" ''
+      <?xml version='1.0'?>
+      <!DOCTYPE fontconfig SYSTEM 'urn:fontconfig:fonts.dtd'>
+      <fontconfig>
+        <!-- Font directories -->
+        ${concatStringsSep "\n" (map (font: "<dir>${font}</dir>") config.fonts.packages)}
+        ${optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
+        <!-- Pre-generated font caches -->
+        <cachedir>${cache}</cachedir>
+        ${optionalString (pkgs.stdenv.isx86_64 && cfg.cache32Bit) ''
+          <cachedir>${cache32}</cachedir>
+        ''}
+        ''}
+      </fontconfig>
+    '';
+
+  # rendering settings configuration file
+  # priority 10
+  renderConf = pkgs.writeText "fc-10-nixos-rendering.conf" ''
+    <?xml version='1.0'?>
+    <!DOCTYPE fontconfig SYSTEM 'urn:fontconfig:fonts.dtd'>
+    <fontconfig>
+
+      <!-- Default rendering settings -->
+      <match target="pattern">
+        <edit mode="append" name="hinting">
+          ${fcBool cfg.hinting.enable}
+        </edit>
+        <edit mode="append" name="autohint">
+          ${fcBool cfg.hinting.autohint}
+        </edit>
+      </match>
+
+    </fontconfig>
+  '';
+
+  # local configuration file
+  localConf = pkgs.writeText "fc-local.conf" cfg.localConf;
+
+  # default fonts configuration file
+  # priority 52
+  defaultFontsConf =
+    let genDefault = fonts: name:
+      optionalString (fonts != []) ''
+        <alias binding="same">
+          <family>${name}</family>
+          <prefer>
+          ${concatStringsSep ""
+          (map (font: ''
+            <family>${font}</family>
+          '') fonts)}
+          </prefer>
+        </alias>
+      '';
+    in
+    pkgs.writeText "fc-52-nixos-default-fonts.conf" ''
+    <?xml version='1.0'?>
+    <!DOCTYPE fontconfig SYSTEM 'urn:fontconfig:fonts.dtd'>
+    <fontconfig>
+
+      <!-- Default fonts -->
+      ${genDefault cfg.defaultFonts.sansSerif "sans-serif"}
+
+      ${genDefault cfg.defaultFonts.serif     "serif"}
+
+      ${genDefault cfg.defaultFonts.monospace "monospace"}
+
+      ${genDefault cfg.defaultFonts.emoji "emoji"}
+
+    </fontconfig>
+  '';
+
+  # bitmap font options
+  # priority 53
+  rejectBitmaps = pkgs.writeText "fc-53-no-bitmaps.conf" ''
+    <?xml version="1.0"?>
+    <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+    <fontconfig>
+
+    ${optionalString (!cfg.allowBitmaps) ''
+    <!-- Reject bitmap fonts -->
+    <selectfont>
+      <rejectfont>
+        <pattern>
+          <patelt name="scalable"><bool>false</bool></patelt>
+        </pattern>
+      </rejectfont>
+    </selectfont>
+    ''}
+
+    <!-- Use embedded bitmaps in fonts like Calibri? -->
+    <match target="font">
+      <edit name="embeddedbitmap" mode="assign">
+        ${fcBool cfg.useEmbeddedBitmaps}
+      </edit>
+    </match>
+
+    </fontconfig>
+  '';
+
+  # reject Type 1 fonts
+  # priority 53
+  rejectType1 = pkgs.writeText "fc-53-nixos-reject-type1.conf" ''
+    <?xml version="1.0"?>
+    <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+    <fontconfig>
+
+    <!-- Reject Type 1 fonts -->
+    <selectfont>
+      <rejectfont>
+        <pattern>
+          <patelt name="fontformat"><string>Type 1</string></patelt>
+        </pattern>
+      </rejectfont>
+    </selectfont>
+
+    </fontconfig>
+  '';
+
+  # Replace default linked config with a different variant
+  replaceDefaultConfig = defaultConfig: newConfig: ''
+    rm $dst/${defaultConfig}
+    ln -s ${pkg.out}/share/fontconfig/conf.avail/${newConfig} \
+          $dst/
+  '';
+
+  # fontconfig configuration package
+  confPkg = pkgs.runCommand "fontconfig-conf" {
+    preferLocalBuild = true;
+  } ''
+    dst=$out/etc/fonts/conf.d
+    mkdir -p $dst
+
+    # fonts.conf
+    ln -s ${pkg.out}/etc/fonts/fonts.conf \
+          $dst/../fonts.conf
+    # TODO: remove this legacy symlink once people stop using packages built before #95358 was merged
+    mkdir -p $out/etc/fonts/2.11
+    ln -s /etc/fonts/fonts.conf \
+          $out/etc/fonts/2.11/fonts.conf
+
+    # fontconfig default config files
+    ln -s ${pkg.out}/etc/fonts/conf.d/*.conf \
+          $dst/
+
+    ${optionalString (!cfg.antialias)
+      (replaceDefaultConfig "10-yes-antialias.conf"
+        "10-no-antialias.conf")
+    }
+
+    ${optionalString (cfg.hinting.style != "slight")
+      (replaceDefaultConfig "10-hinting-slight.conf"
+        "10-hinting-${cfg.hinting.style}.conf")
+    }
+
+    ${optionalString (cfg.subpixel.rgba != "none")
+      (replaceDefaultConfig "10-sub-pixel-none.conf"
+        "10-sub-pixel-${cfg.subpixel.rgba}.conf")
+    }
+
+    ${optionalString (cfg.subpixel.lcdfilter != "default")
+      (replaceDefaultConfig "11-lcdfilter-default.conf"
+        "11-lcdfilter-${cfg.subpixel.lcdfilter}.conf")
+    }
+
+    # 00-nixos-cache.conf
+    ln -s ${cacheConf}  $dst/00-nixos-cache.conf
+
+    # 10-nixos-rendering.conf
+    ln -s ${renderConf}       $dst/10-nixos-rendering.conf
+
+    # 50-user.conf
+    ${optionalString (!cfg.includeUserConf) ''
+    rm $dst/50-user.conf
+    ''}
+
+    # local.conf (indirect priority 51)
+    ${optionalString (cfg.localConf != "") ''
+    ln -s ${localConf}        $dst/../local.conf
+    ''}
+
+    # 52-nixos-default-fonts.conf
+    ln -s ${defaultFontsConf} $dst/52-nixos-default-fonts.conf
+
+    # 53-no-bitmaps.conf
+    ln -s ${rejectBitmaps} $dst/53-no-bitmaps.conf
+
+    ${optionalString (!cfg.allowType1) ''
+    # 53-nixos-reject-type1.conf
+    ln -s ${rejectType1} $dst/53-nixos-reject-type1.conf
+    ''}
+  '';
+
+  # Package with configuration files
+  # this merge all the packages in the fonts.fontconfig.confPackages list
+  fontconfigEtc = pkgs.buildEnv {
+    name  = "fontconfig-etc";
+    paths = cfg.confPackages;
+    ignoreCollisions = true;
+  };
+
+  fontconfigNote = "Consider manually configuring fonts.fontconfig according to personal preference.";
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "allowBitmaps" ] [ "fonts" "fontconfig" "allowBitmaps" ])
+    (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "allowType1" ] [ "fonts" "fontconfig" "allowType1" ])
+    (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "useEmbeddedBitmaps" ] [ "fonts" "fontconfig" "useEmbeddedBitmaps" ])
+    (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "forceAutohint" ] [ "fonts" "fontconfig" "forceAutohint" ])
+    (mkRenamedOptionModule [ "fonts" "fontconfig" "ultimate" "renderMonoTTFAsBitmap" ] [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ])
+    (mkRemovedOptionModule [ "fonts" "fontconfig" "forceAutohint" ] "")
+    (mkRemovedOptionModule [ "fonts" "fontconfig" "renderMonoTTFAsBitmap" ] "")
+    (mkRemovedOptionModule [ "fonts" "fontconfig" "dpi" ] "Use display server-specific options")
+    (mkRemovedOptionModule [ "hardware" "video" "hidpi" "enable" ] fontconfigNote)
+    (mkRemovedOptionModule [ "fonts" "optimizeForVeryHighDPI" ] fontconfigNote)
+  ] ++ lib.forEach [ "enable" "substitutions" "preset" ]
+     (opt: lib.mkRemovedOptionModule [ "fonts" "fontconfig" "ultimate" "${opt}" ] ''
+       The fonts.fontconfig.ultimate module and configuration is obsolete.
+       The repository has since been archived and activity has ceased.
+       https://github.com/bohoomil/fontconfig-ultimate/issues/171.
+       No action should be needed for font configuration, as the fonts.fontconfig
+       module is already used by default.
+     '');
+
+  options = {
+
+    fonts = {
+
+      fontconfig = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            If enabled, a Fontconfig configuration file will be built
+            pointing to a set of default fonts.  If you don't care about
+            running X11 applications or any other program that uses
+            Fontconfig, you can turn this option off and prevent a
+            dependency on all those fonts.
+          '';
+        };
+
+        confPackages = mkOption {
+          internal = true;
+          type     = with types; listOf path;
+          default  = [ ];
+          description = lib.mdDoc ''
+            Fontconfig configuration packages.
+          '';
+        };
+
+        antialias = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Enable font antialiasing. At high resolution (> 200 DPI),
+            antialiasing has no visible effect; users of such displays may want
+            to disable this option.
+          '';
+        };
+
+        localConf = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            System-wide customization file contents, has higher priority than
+            `defaultFonts` settings.
+          '';
+        };
+
+        defaultFonts = {
+          monospace = mkOption {
+            type = types.listOf types.str;
+            default = ["DejaVu Sans Mono"];
+            description = lib.mdDoc ''
+              System-wide default monospace font(s). Multiple fonts may be
+              listed in case multiple languages must be supported.
+            '';
+          };
+
+          sansSerif = mkOption {
+            type = types.listOf types.str;
+            default = ["DejaVu Sans"];
+            description = lib.mdDoc ''
+              System-wide default sans serif font(s). Multiple fonts may be
+              listed in case multiple languages must be supported.
+            '';
+          };
+
+          serif = mkOption {
+            type = types.listOf types.str;
+            default = ["DejaVu Serif"];
+            description = lib.mdDoc ''
+              System-wide default serif font(s). Multiple fonts may be listed
+              in case multiple languages must be supported.
+            '';
+          };
+
+          emoji = mkOption {
+            type = types.listOf types.str;
+            default = ["Noto Color Emoji"];
+            description = lib.mdDoc ''
+              System-wide default emoji font(s). Multiple fonts may be listed
+              in case a font does not support all emoji.
+
+              Note that fontconfig matches color emoji fonts preferentially,
+              so if you want to use a black and white font while having
+              a color font installed (eg. Noto Color Emoji installed alongside
+              Noto Emoji), fontconfig will still choose the color font even
+              when it is later in the list.
+            '';
+          };
+        };
+
+        hinting = {
+          enable = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Enable font hinting. Hinting aligns glyphs to pixel boundaries to
+              improve rendering sharpness at low resolution. At high resolution
+              (> 200 dpi) hinting will do nothing (at best); users of such
+              displays may want to disable this option.
+            '';
+          };
+
+          autohint = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Enable the autohinter in place of the default interpreter.
+              The results are usually lower quality than correctly-hinted
+              fonts, but better than unhinted fonts.
+            '';
+          };
+
+          style = mkOption {
+            type = types.enum ["none" "slight" "medium" "full"];
+            default = "slight";
+            description = lib.mdDoc ''
+              Hintstyle is the amount of font reshaping done to line up
+              to the grid.
+
+              slight will make the font more fuzzy to line up to the grid but
+              will be better in retaining font shape, while full will be a
+              crisp font that aligns well to the pixel grid but will lose a
+              greater amount of font shape.
+            '';
+            apply =
+              val:
+              let
+                from = "fonts.fontconfig.hinting.style";
+                val' = lib.removePrefix "hint" val;
+                warning = "The option `${from}` contains a deprecated value `${val}`. Use `${val'}` instead.";
+              in
+              lib.warnIf (lib.hasPrefix "hint" val) warning val';
+          };
+        };
+
+        includeUserConf = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Include the user configuration from
+            {file}`~/.config/fontconfig/fonts.conf` or
+            {file}`~/.config/fontconfig/conf.d`.
+          '';
+        };
+
+        subpixel = {
+
+          rgba = mkOption {
+            default = "none";
+            type = types.enum ["rgb" "bgr" "vrgb" "vbgr" "none"];
+            description = lib.mdDoc ''
+              Subpixel order. The overwhelming majority of displays are
+              `rgb` in their normal orientation. Select
+              `vrgb` for mounting such a display 90 degrees
+              clockwise from its normal orientation or `vbgr`
+              for mounting 90 degrees counter-clockwise. Select
+              `bgr` in the unlikely event of mounting 180
+              degrees from the normal orientation. Reverse these directions in
+              the improbable event that the display's native subpixel order is
+              `bgr`.
+            '';
+          };
+
+          lcdfilter = mkOption {
+            default = "default";
+            type = types.enum ["none" "default" "light" "legacy"];
+            description = lib.mdDoc ''
+              FreeType LCD filter. At high resolution (> 200 DPI), LCD filtering
+              has no visible effect; users of such displays may want to select
+              `none`.
+            '';
+          };
+
+        };
+
+        cache32Bit = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Generate system fonts cache for 32-bit applications.
+          '';
+        };
+
+        allowBitmaps = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Allow bitmap fonts. Set to `false` to ban all
+            bitmap fonts.
+          '';
+        };
+
+        allowType1 = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Allow Type-1 fonts. Default is `false` because of
+            poor rendering.
+          '';
+        };
+
+        useEmbeddedBitmaps = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Use embedded bitmaps in fonts like Calibri.";
+        };
+
+      };
+
+    };
+
+  };
+  config = mkMerge [
+    (mkIf cfg.enable {
+      environment.systemPackages    = [ pkgs.fontconfig ];
+      environment.etc.fonts.source  = "${fontconfigEtc}/etc/fonts/";
+      security.apparmor.includes."abstractions/fonts" = ''
+        # fonts.conf
+        r ${pkg.out}/etc/fonts/fonts.conf,
+
+        # fontconfig default config files
+        r ${pkg.out}/etc/fonts/conf.d/*.conf,
+
+        # 00-nixos-cache.conf
+        r ${cacheConf},
+
+        # 10-nixos-rendering.conf
+        r ${renderConf},
+
+        # 50-user.conf
+        ${optionalString cfg.includeUserConf ''
+        r ${pkg.out}/etc/fonts/conf.d.bak/50-user.conf,
+        ''}
+
+        # local.conf (indirect priority 51)
+        ${optionalString (cfg.localConf != "") ''
+        r ${localConf},
+        ''}
+
+        # 52-nixos-default-fonts.conf
+        r ${defaultFontsConf},
+
+        # 53-no-bitmaps.conf
+        r ${rejectBitmaps},
+
+        ${optionalString (!cfg.allowType1) ''
+        # 53-nixos-reject-type1.conf
+        r ${rejectType1},
+        ''}
+      '';
+    })
+    (mkIf cfg.enable {
+      fonts.fontconfig.confPackages = [ confPkg ];
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/config/fonts/fontdir.nix b/nixpkgs/nixos/modules/config/fonts/fontdir.nix
new file mode 100644
index 000000000000..3b5eaf5b2d7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/fonts/fontdir.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.fonts.fontDir;
+
+  x11Fonts = pkgs.runCommand "X11-fonts" { preferLocalBuild = true; } ''
+    mkdir -p "$out/share/X11/fonts"
+    font_regexp='.*\.\(ttf\|ttc\|otb\|otf\|pcf\|pfa\|pfb\|bdf\)\(\.gz\)?'
+    find ${toString config.fonts.packages} -regex "$font_regexp" \
+      -exec ln -sf -t "$out/share/X11/fonts" '{}' \;
+    cd "$out/share/X11/fonts"
+    ${optionalString cfg.decompressFonts ''
+      ${pkgs.gzip}/bin/gunzip -f *.gz
+    ''}
+    ${pkgs.xorg.mkfontscale}/bin/mkfontscale
+    ${pkgs.xorg.mkfontdir}/bin/mkfontdir
+    cat $(find ${pkgs.xorg.fontalias}/ -name fonts.alias) >fonts.alias
+  '';
+
+in
+
+{
+
+  options = {
+    fonts.fontDir = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to create a directory with links to all fonts in
+          {file}`/run/current-system/sw/share/X11/fonts`.
+        '';
+      };
+
+      decompressFonts = mkOption {
+        type = types.bool;
+        default = config.programs.xwayland.enable;
+        defaultText = literalExpression "config.programs.xwayland.enable";
+        description = lib.mdDoc ''
+          Whether to decompress fonts in
+          {file}`/run/current-system/sw/share/X11/fonts`.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ x11Fonts ];
+    environment.pathsToLink = [ "/share/X11/fonts" ];
+
+    services.xserver.filesSection = ''
+      FontPath "${x11Fonts}/share/X11/fonts"
+    '';
+
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "fonts" "enableFontDir" ] [ "fonts" "fontDir" "enable" ])
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/config/fonts/ghostscript.nix b/nixpkgs/nixos/modules/config/fonts/ghostscript.nix
new file mode 100644
index 000000000000..c41fcdaaa329
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/fonts/ghostscript.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+    fonts.enableGhostscriptFonts = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to add the fonts provided by Ghostscript (such as
+        various URW fonts and the “Base-14†Postscript fonts) to the
+        list of system fonts, making them available to X11
+        applications.
+      '';
+    };
+
+  };
+
+  config = mkIf config.fonts.enableGhostscriptFonts {
+    fonts.packages = [ "${pkgs.ghostscript}/share/ghostscript/fonts" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/fonts/packages.nix b/nixpkgs/nixos/modules/config/fonts/packages.nix
new file mode 100644
index 000000000000..37b705ecb345
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/fonts/packages.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.fonts;
+in
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "fonts" "enableCoreFonts" ] "Use fonts.packages = [ pkgs.corefonts ]; instead.")
+    (lib.mkRenamedOptionModule [ "fonts" "enableDefaultFonts" ] [ "fonts" "enableDefaultPackages" ])
+    (lib.mkRenamedOptionModule [ "fonts" "fonts" ] [ "fonts" "packages" ])
+  ];
+
+  options = {
+    fonts = {
+      packages = lib.mkOption {
+        type = with lib.types; listOf path;
+        default = [];
+        example = lib.literalExpression "[ pkgs.dejavu_fonts ]";
+        description = lib.mdDoc "List of primary font packages.";
+      };
+
+      enableDefaultPackages = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable a basic set of fonts providing several styles
+          and families and reasonable coverage of Unicode.
+        '';
+      };
+    };
+  };
+
+  config = {
+    fonts.packages = lib.mkIf cfg.enableDefaultPackages (with pkgs; [
+      dejavu_fonts
+      freefont_ttf
+      gyre-fonts # TrueType substitutes for standard PostScript fonts
+      liberation_ttf
+      unifont
+      noto-fonts-color-emoji
+    ]);
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/gtk/gtk-icon-cache.nix b/nixpkgs/nixos/modules/config/gtk/gtk-icon-cache.nix
new file mode 100644
index 000000000000..62f0cc3f090f
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/gtk/gtk-icon-cache.nix
@@ -0,0 +1,85 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  options = {
+    gtk.iconCache.enable = mkOption {
+      type = types.bool;
+      default = config.services.xserver.enable;
+      defaultText = literalExpression "config.services.xserver.enable";
+      description = lib.mdDoc ''
+        Whether to build icon theme caches for GTK applications.
+      '';
+    };
+  };
+
+  config = mkIf config.gtk.iconCache.enable {
+
+    # (Re)build icon theme caches
+    # ---------------------------
+    # Each icon theme has its own cache. The difficult is that many
+    # packages may contribute with icons to the same theme by installing
+    # some icons.
+    #
+    # For instance, on my current NixOS system, the following packages
+    # (among many others) have icons installed into the hicolor icon
+    # theme: hicolor-icon-theme, psensor, wpa_gui, caja, etc.
+    #
+    # As another example, the mate icon theme has icons installed by the
+    # packages mate-icon-theme, mate-settings-daemon, and libmateweather.
+    #
+    # The HighContrast icon theme also has icons from different packages,
+    # like gnome-theme-extras and meld.
+
+    # When the cache is built all of its icons has to be known. How to
+    # implement this?
+    #
+    # I think that most themes have all icons installed by only one
+    # package. On my system there are 71 themes installed. Only 3 of them
+    # have icons installed from more than one package.
+    #
+    # If the main package of the theme provides a cache, presumably most
+    # of its icons will be available to applications without running this
+    # module. But additional icons offered by other packages will not be
+    # available. Therefore I think that it is good that the main theme
+    # package installs a cache (although it does not completely fixes the
+    # situation for packages installed with nix-env).
+    #
+    # The module solution presented here keeps the cache when there is
+    # only one package contributing with icons to the theme. Otherwise it
+    # rebuilds the cache taking into account the icons provided all
+    # packages.
+
+    environment.extraSetup = ''
+      # For each icon theme directory ...
+      find $out/share/icons -exec test -d {} ';' -mindepth 1 -maxdepth 1 -print0 | while read -d $'\0' themedir
+      do
+        # In order to build the cache, the theme dir should be
+        # writable. When the theme dir is a symbolic link to somewhere
+        # in the nix store it is not writable and it means that only
+        # one package is contributing to the theme. If it already has
+        # a cache, no rebuild is needed. Otherwise a cache has to be
+        # built, and to be able to do that we first remove the
+        # symbolic link and make a directory, and then make symbolic
+        # links from the original directory into the new one.
+
+        if [ ! -w "$themedir" -a -L "$themedir" -a ! -r "$themedir"/icon-theme.cache ]; then
+          name=$(basename "$themedir")
+          path=$(readlink -f "$themedir")
+          rm "$themedir"
+          mkdir -p "$themedir"
+          ln -s "$path"/* "$themedir"/
+        fi
+
+        # (Re)build the cache if the theme dir is writable, replacing any
+        # existing cache for the theme
+
+        if [ -w "$themedir" ]; then
+          rm -f "$themedir"/icon-theme.cache
+          ${pkgs.buildPackages.gtk3.out}/bin/gtk-update-icon-cache --ignore-theme-index "$themedir"
+        fi
+      done
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/i18n.nix b/nixpkgs/nixos/modules/config/i18n.nix
new file mode 100644
index 000000000000..b19d38091e75
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/i18n.nix
@@ -0,0 +1,113 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+
+    i18n = {
+      glibcLocales = mkOption {
+        type = types.path;
+        default = pkgs.glibcLocales.override {
+          allLocales = any (x: x == "all") config.i18n.supportedLocales;
+          locales = config.i18n.supportedLocales;
+        };
+        defaultText = literalExpression ''
+          pkgs.glibcLocales.override {
+            allLocales = any (x: x == "all") config.i18n.supportedLocales;
+            locales = config.i18n.supportedLocales;
+          }
+        '';
+        example = literalExpression "pkgs.glibcLocales";
+        description = lib.mdDoc ''
+          Customized pkg.glibcLocales package.
+
+          Changing this option can disable handling of i18n.defaultLocale
+          and supportedLocale.
+        '';
+      };
+
+      defaultLocale = mkOption {
+        type = types.str;
+        default = "en_US.UTF-8";
+        example = "nl_NL.UTF-8";
+        description = lib.mdDoc ''
+          The default locale.  It determines the language for program
+          messages, the format for dates and times, sort order, and so on.
+          It also determines the character set, such as UTF-8.
+        '';
+      };
+
+      extraLocaleSettings = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = { LC_MESSAGES = "en_US.UTF-8"; LC_TIME = "de_DE.UTF-8"; };
+        description = lib.mdDoc ''
+          A set of additional system-wide locale settings other than
+          `LANG` which can be configured with
+          {option}`i18n.defaultLocale`.
+        '';
+      };
+
+      supportedLocales = mkOption {
+        type = types.listOf types.str;
+        default = unique
+          (builtins.map (l: (replaceStrings [ "utf8" "utf-8" "UTF8" ] [ "UTF-8" "UTF-8" "UTF-8" ] l) + "/UTF-8") (
+            [
+              "C.UTF-8"
+              "en_US.UTF-8"
+              config.i18n.defaultLocale
+            ] ++ (attrValues (filterAttrs (n: v: n != "LANGUAGE") config.i18n.extraLocaleSettings))
+          ));
+        defaultText = literalExpression ''
+          unique
+            (builtins.map (l: (replaceStrings [ "utf8" "utf-8" "UTF8" ] [ "UTF-8" "UTF-8" "UTF-8" ] l) + "/UTF-8") (
+              [
+                "C.UTF-8"
+                "en_US.UTF-8"
+                config.i18n.defaultLocale
+              ] ++ (attrValues (filterAttrs (n: v: n != "LANGUAGE") config.i18n.extraLocaleSettings))
+            ))
+        '';
+        example = ["en_US.UTF-8/UTF-8" "nl_NL.UTF-8/UTF-8" "nl_NL/ISO-8859-1"];
+        description = lib.mdDoc ''
+          List of locales that the system should support.  The value
+          `"all"` means that all locales supported by
+          Glibc will be installed.  A full list of supported locales
+          can be found at <https://sourceware.org/git/?p=glibc.git;a=blob;f=localedata/SUPPORTED>.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    environment.systemPackages =
+      # We increase the priority a little, so that plain glibc in systemPackages can't win.
+      optional (config.i18n.supportedLocales != []) (lib.setPrio (-1) config.i18n.glibcLocales);
+
+    environment.sessionVariables =
+      { LANG = config.i18n.defaultLocale;
+        LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive";
+      } // config.i18n.extraLocaleSettings;
+
+    systemd.globalEnvironment = mkIf (config.i18n.supportedLocales != []) {
+      LOCALE_ARCHIVE = "${config.i18n.glibcLocales}/lib/locale/locale-archive";
+    };
+
+    # ‘/etc/locale.conf’ is used by systemd.
+    environment.etc."locale.conf".source = pkgs.writeText "locale.conf"
+      ''
+        LANG=${config.i18n.defaultLocale}
+        ${concatStringsSep "\n" (mapAttrsToList (n: v: "${n}=${v}") config.i18n.extraLocaleSettings)}
+      '';
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/iproute2.nix b/nixpkgs/nixos/modules/config/iproute2.nix
new file mode 100644
index 000000000000..78bd07d680e2
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/iproute2.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.iproute2;
+in
+{
+  options.networking.iproute2 = {
+    enable = mkEnableOption (lib.mdDoc "copying IP route configuration files");
+    rttablesExtraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Verbatim lines to add to /etc/iproute2/rt_tables
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."iproute2/rt_tables" = {
+      mode = "0644";
+      text = (fileContents "${pkgs.iproute2}/lib/iproute2/rt_tables")
+        + (optionalString (cfg.rttablesExtraConfig != "") "\n\n${cfg.rttablesExtraConfig}");
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/krb5/default.nix b/nixpkgs/nixos/modules/config/krb5/default.nix
new file mode 100644
index 000000000000..df7a3f48236f
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/krb5/default.nix
@@ -0,0 +1,369 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.krb5;
+
+  # This is to provide support for old configuration options (as much as is
+  # reasonable). This can be removed after 18.03 was released.
+  defaultConfig = {
+    libdefaults = optionalAttrs (cfg.defaultRealm != null)
+      { default_realm = cfg.defaultRealm; };
+
+    realms = optionalAttrs (lib.all (value: value != null) [
+      cfg.defaultRealm cfg.kdc cfg.kerberosAdminServer
+    ]) {
+      ${cfg.defaultRealm} = {
+        kdc = cfg.kdc;
+        admin_server = cfg.kerberosAdminServer;
+      };
+    };
+
+    domain_realm = optionalAttrs (lib.all (value: value != null) [
+      cfg.domainRealm cfg.defaultRealm
+    ]) {
+      ".${cfg.domainRealm}" = cfg.defaultRealm;
+      ${cfg.domainRealm} = cfg.defaultRealm;
+    };
+  };
+
+  mergedConfig = (recursiveUpdate defaultConfig {
+    inherit (config.krb5)
+      kerberos libdefaults realms domain_realm capaths appdefaults plugins
+      extraConfig config;
+  });
+
+  filterEmbeddedMetadata = value: if isAttrs value then
+    (filterAttrs
+      (attrName: attrValue: attrName != "_module" && attrValue != null)
+        value)
+    else value;
+
+  indent = "  ";
+
+  mkRelation = name: value:
+    if (isList value) then
+      concatMapStringsSep "\n" (mkRelation name) value
+    else "${name} = ${mkVal value}";
+
+  mkVal = value:
+    if (value == true) then "true"
+    else if (value == false) then "false"
+    else if (isInt value) then (toString value)
+    else if (isAttrs value) then
+      let configLines = concatLists
+        (map (splitString "\n")
+          (mapAttrsToList mkRelation value));
+      in
+      (concatStringsSep "\n${indent}"
+        ([ "{" ] ++ configLines))
+      + "\n}"
+    else value;
+
+  mkMappedAttrsOrString = value: concatMapStringsSep "\n"
+    (line: if builtins.stringLength line > 0
+      then "${indent}${line}"
+      else line)
+    (splitString "\n"
+      (if isAttrs value then
+        concatStringsSep "\n"
+            (mapAttrsToList mkRelation value)
+        else value));
+
+in {
+
+  ###### interface
+
+  options = {
+    krb5 = {
+      enable = mkEnableOption (lib.mdDoc "building krb5.conf, configuration file for Kerberos V");
+
+      kerberos = mkOption {
+        type = types.package;
+        default = pkgs.krb5;
+        defaultText = literalExpression "pkgs.krb5";
+        example = literalExpression "pkgs.heimdal";
+        description = lib.mdDoc ''
+          The Kerberos implementation that will be present in
+          `environment.systemPackages` after enabling this
+          service.
+        '';
+      };
+
+      libdefaults = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        apply = attrs: filterEmbeddedMetadata attrs;
+        example = literalExpression ''
+          {
+            default_realm = "ATHENA.MIT.EDU";
+          };
+        '';
+        description = lib.mdDoc ''
+          Settings used by the Kerberos V5 library.
+        '';
+      };
+
+      realms = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        example = literalExpression ''
+          {
+            "ATHENA.MIT.EDU" = {
+              admin_server = "athena.mit.edu";
+              kdc = [
+                "athena01.mit.edu"
+                "athena02.mit.edu"
+              ];
+            };
+          };
+        '';
+        apply = attrs: filterEmbeddedMetadata attrs;
+        description = lib.mdDoc "Realm-specific contact information and settings.";
+      };
+
+      domain_realm = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        example = literalExpression ''
+          {
+            "example.com" = "EXAMPLE.COM";
+            ".example.com" = "EXAMPLE.COM";
+          };
+        '';
+        apply = attrs: filterEmbeddedMetadata attrs;
+        description = lib.mdDoc ''
+          Map of server hostnames to Kerberos realms.
+        '';
+      };
+
+      capaths = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        example = literalExpression ''
+          {
+            "ATHENA.MIT.EDU" = {
+              "EXAMPLE.COM" = ".";
+            };
+            "EXAMPLE.COM" = {
+              "ATHENA.MIT.EDU" = ".";
+            };
+          };
+        '';
+        apply = attrs: filterEmbeddedMetadata attrs;
+        description = lib.mdDoc ''
+          Authentication paths for non-hierarchical cross-realm authentication.
+        '';
+      };
+
+      appdefaults = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        example = literalExpression ''
+          {
+            pam = {
+              debug = false;
+              ticket_lifetime = 36000;
+              renew_lifetime = 36000;
+              max_timeout = 30;
+              timeout_shift = 2;
+              initial_timeout = 1;
+            };
+          };
+        '';
+        apply = attrs: filterEmbeddedMetadata attrs;
+        description = lib.mdDoc ''
+          Settings used by some Kerberos V5 applications.
+        '';
+      };
+
+      plugins = mkOption {
+        type = with types; either attrs lines;
+        default = {};
+        example = literalExpression ''
+          {
+            ccselect = {
+              disable = "k5identity";
+            };
+          };
+        '';
+        apply = attrs: filterEmbeddedMetadata attrs;
+        description = lib.mdDoc ''
+          Controls plugin module registration.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = with types; nullOr lines;
+        default = null;
+        example = ''
+          [logging]
+            kdc          = SYSLOG:NOTICE
+            admin_server = SYSLOG:NOTICE
+            default      = SYSLOG:NOTICE
+        '';
+        description = lib.mdDoc ''
+          These lines go to the end of `krb5.conf` verbatim.
+          `krb5.conf` may include any of the relations that are
+          valid for `kdc.conf` (see `man kdc.conf`),
+          but it is not a recommended practice.
+        '';
+      };
+
+      config = mkOption {
+        type = with types; nullOr lines;
+        default = null;
+        example = ''
+          [libdefaults]
+            default_realm = EXAMPLE.COM
+
+          [realms]
+            EXAMPLE.COM = {
+              admin_server = kerberos.example.com
+              kdc = kerberos.example.com
+              default_principal_flags = +preauth
+            }
+
+          [domain_realm]
+            example.com  = EXAMPLE.COM
+            .example.com = EXAMPLE.COM
+
+          [logging]
+            kdc          = SYSLOG:NOTICE
+            admin_server = SYSLOG:NOTICE
+            default      = SYSLOG:NOTICE
+        '';
+        description = lib.mdDoc ''
+          Verbatim `krb5.conf` configuration.  Note that this
+          is mutually exclusive with configuration via
+          `libdefaults`, `realms`,
+          `domain_realm`, `capaths`,
+          `appdefaults`, `plugins` and
+          `extraConfig` configuration options.  Consult
+          `man krb5.conf` for documentation.
+        '';
+      };
+
+      defaultRealm = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "ATHENA.MIT.EDU";
+        description = lib.mdDoc ''
+          DEPRECATED, please use
+          `krb5.libdefaults.default_realm`.
+        '';
+      };
+
+      domainRealm = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "athena.mit.edu";
+        description = lib.mdDoc ''
+          DEPRECATED, please create a map of server hostnames to Kerberos realms
+          in `krb5.domain_realm`.
+        '';
+      };
+
+      kdc = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "kerberos.mit.edu";
+        description = lib.mdDoc ''
+          DEPRECATED, please pass a `kdc` attribute to a realm
+          in `krb5.realms`.
+        '';
+      };
+
+      kerberosAdminServer = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "kerberos.mit.edu";
+        description = lib.mdDoc ''
+          DEPRECATED, please pass an `admin_server` attribute
+          to a realm in `krb5.realms`.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.kerberos ];
+
+    environment.etc."krb5.conf".text = if isString cfg.config
+      then cfg.config
+      else (''
+        [libdefaults]
+        ${mkMappedAttrsOrString mergedConfig.libdefaults}
+
+        [realms]
+        ${mkMappedAttrsOrString mergedConfig.realms}
+
+        [domain_realm]
+        ${mkMappedAttrsOrString mergedConfig.domain_realm}
+
+        [capaths]
+        ${mkMappedAttrsOrString mergedConfig.capaths}
+
+        [appdefaults]
+        ${mkMappedAttrsOrString mergedConfig.appdefaults}
+
+        [plugins]
+        ${mkMappedAttrsOrString mergedConfig.plugins}
+      '' + optionalString (mergedConfig.extraConfig != null)
+          ("\n" + mergedConfig.extraConfig));
+
+    warnings = flatten [
+      (optional (cfg.defaultRealm != null) ''
+        The option krb5.defaultRealm is deprecated, please use
+        krb5.libdefaults.default_realm.
+      '')
+      (optional (cfg.domainRealm != null) ''
+        The option krb5.domainRealm is deprecated, please use krb5.domain_realm.
+      '')
+      (optional (cfg.kdc != null) ''
+        The option krb5.kdc is deprecated, please pass a kdc attribute to a
+        realm in krb5.realms.
+      '')
+      (optional (cfg.kerberosAdminServer != null) ''
+        The option krb5.kerberosAdminServer is deprecated, please pass an
+        admin_server attribute to a realm in krb5.realms.
+      '')
+    ];
+
+    assertions = [
+      { assertion = !((builtins.any (value: value != null) [
+            cfg.defaultRealm cfg.domainRealm cfg.kdc cfg.kerberosAdminServer
+          ]) && ((builtins.any (value: value != {}) [
+              cfg.libdefaults cfg.realms cfg.domain_realm cfg.capaths
+              cfg.appdefaults cfg.plugins
+            ]) || (builtins.any (value: value != null) [
+              cfg.config cfg.extraConfig
+            ])));
+        message = ''
+          Configuration of krb5.conf by deprecated options is mutually exclusive
+          with configuration by section.  Please migrate your config using the
+          attributes suggested in the warnings.
+        '';
+      }
+      { assertion = !(cfg.config != null
+          && ((builtins.any (value: value != {}) [
+              cfg.libdefaults cfg.realms cfg.domain_realm cfg.capaths
+              cfg.appdefaults cfg.plugins
+            ]) || (builtins.any (value: value != null) [
+              cfg.extraConfig cfg.defaultRealm cfg.domainRealm cfg.kdc
+              cfg.kerberosAdminServer
+            ])));
+        message = ''
+          Configuration of krb5.conf using krb.config is mutually exclusive with
+          configuration by section.  If you want to mix the two, you can pass
+          lines to any configuration section or lines to krb5.extraConfig.
+        '';
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/ldap.nix b/nixpkgs/nixos/modules/config/ldap.nix
new file mode 100644
index 000000000000..d2f01fb87d32
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/ldap.nix
@@ -0,0 +1,303 @@
+{ config, lib, pkgs, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.users.ldap;
+
+  # Careful: OpenLDAP seems to be very picky about the indentation of
+  # this file.  Directives HAVE to start in the first column!
+  ldapConfig = {
+    target = "ldap.conf";
+    source = writeText "ldap.conf" ''
+      uri ${config.users.ldap.server}
+      base ${config.users.ldap.base}
+      timelimit ${toString config.users.ldap.timeLimit}
+      bind_timelimit ${toString config.users.ldap.bind.timeLimit}
+      bind_policy ${config.users.ldap.bind.policy}
+      ${optionalString config.users.ldap.useTLS ''
+        ssl start_tls
+      ''}
+      ${optionalString (config.users.ldap.bind.distinguishedName != "") ''
+        binddn ${config.users.ldap.bind.distinguishedName}
+      ''}
+      ${optionalString (cfg.extraConfig != "") cfg.extraConfig }
+    '';
+  };
+
+  nslcdConfig = writeText "nslcd.conf" ''
+    uri ${cfg.server}
+    base ${cfg.base}
+    timelimit ${toString cfg.timeLimit}
+    bind_timelimit ${toString cfg.bind.timeLimit}
+    ${optionalString (cfg.bind.distinguishedName != "")
+      "binddn ${cfg.bind.distinguishedName}" }
+    ${optionalString (cfg.daemon.rootpwmoddn != "")
+      "rootpwmoddn ${cfg.daemon.rootpwmoddn}" }
+    ${optionalString (cfg.daemon.extraConfig != "") cfg.daemon.extraConfig }
+  '';
+
+  # nslcd normally reads configuration from /etc/nslcd.conf.
+  # this file might contain secrets. We append those at runtime,
+  # so redirect its location to something more temporary.
+  nslcdWrapped = runCommand "nslcd-wrapped" { nativeBuildInputs = [ makeWrapper ]; } ''
+    mkdir -p $out/bin
+    makeWrapper ${nss_pam_ldapd}/sbin/nslcd $out/bin/nslcd \
+      --set LD_PRELOAD    "${pkgs.libredirect}/lib/libredirect.so" \
+      --set NIX_REDIRECTS "/etc/nslcd.conf=/run/nslcd/nslcd.conf"
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    users.ldap = {
+
+      enable = mkEnableOption (lib.mdDoc "authentication against an LDAP server");
+
+      loginPam = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to include authentication against LDAP in login PAM.";
+      };
+
+      nsswitch = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to include lookup against LDAP in NSS.";
+      };
+
+      server = mkOption {
+        type = types.str;
+        example = "ldap://ldap.example.org/";
+        description = lib.mdDoc "The URL of the LDAP server.";
+      };
+
+      base = mkOption {
+        type = types.str;
+        example = "dc=example,dc=org";
+        description = lib.mdDoc "The distinguished name of the search base.";
+      };
+
+      useTLS = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, use TLS (encryption) over an LDAP (port 389)
+          connection.  The alternative is to specify an LDAPS server (port
+          636) in {option}`users.ldap.server` or to forego
+          security.
+        '';
+      };
+
+      timeLimit = mkOption {
+        default = 0;
+        type = types.int;
+        description = lib.mdDoc ''
+          Specifies the time limit (in seconds) to use when performing
+          searches. A value of zero (0), which is the default, is to
+          wait indefinitely for searches to be completed.
+        '';
+      };
+
+      daemon = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to let the nslcd daemon (nss-pam-ldapd) handle the
+            LDAP lookups for NSS and PAM. This can improve performance,
+            and if you need to bind to the LDAP server with a password,
+            it increases security, since only the nslcd user needs to
+            have access to the bindpw file, not everyone that uses NSS
+            and/or PAM. If this option is enabled, a local nscd user is
+            created automatically, and the nslcd service is started
+            automatically when the network get up.
+          '';
+        };
+
+        extraConfig = mkOption {
+          default =  "";
+          type = types.lines;
+          description = lib.mdDoc ''
+            Extra configuration options that will be added verbatim at
+            the end of the nslcd configuration file (`nslcd.conf(5)`).
+          '' ;
+        } ;
+
+        rootpwmoddn = mkOption {
+          default = "";
+          example = "cn=admin,dc=example,dc=com";
+          type = types.str;
+          description = lib.mdDoc ''
+            The distinguished name to use to bind to the LDAP server
+            when the root user tries to modify a user's password.
+          '';
+        };
+
+        rootpwmodpwFile = mkOption {
+          default = "";
+          example = "/run/keys/nslcd.rootpwmodpw";
+          type = types.str;
+          description = lib.mdDoc ''
+            The path to a file containing the credentials with which to bind to
+            the LDAP server if the root user tries to change a user's password.
+          '';
+        };
+      };
+
+      bind = {
+        distinguishedName = mkOption {
+          default = "";
+          example = "cn=admin,dc=example,dc=com";
+          type = types.str;
+          description = lib.mdDoc ''
+            The distinguished name to bind to the LDAP server with. If this
+            is not specified, an anonymous bind will be done.
+          '';
+        };
+
+        passwordFile = mkOption {
+          default = "/etc/ldap/bind.password";
+          type = types.str;
+          description = lib.mdDoc ''
+            The path to a file containing the credentials to use when binding
+            to the LDAP server (if not binding anonymously).
+          '';
+        };
+
+        timeLimit = mkOption {
+          default = 30;
+          type = types.int;
+          description = lib.mdDoc ''
+            Specifies the time limit (in seconds) to use when connecting
+            to the directory server. This is distinct from the time limit
+            specified in {option}`users.ldap.timeLimit` and affects
+            the initial server connection only.
+          '';
+        };
+
+        policy = mkOption {
+          default = "hard_open";
+          type = types.enum [ "hard_open" "hard_init" "soft" ];
+          description = lib.mdDoc ''
+            Specifies the policy to use for reconnecting to an unavailable
+            LDAP server. The default is `hard_open`, which
+            reconnects if opening the connection to the directory server
+            failed. By contrast, `hard_init` reconnects if
+            initializing the connection failed. Initializing may not
+            actually contact the directory server, and it is possible that
+            a malformed configuration file will trigger reconnection. If
+            `soft` is specified, then
+            `nss_ldap` will return immediately on server
+            failure. All hard reconnect policies block with exponential
+            backoff before retrying.
+          '';
+        };
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration options that will be added verbatim at
+          the end of the ldap configuration file (`ldap.conf(5)`).
+          If {option}`users.ldap.daemon` is enabled, this
+          configuration will not be used. In that case, use
+          {option}`users.ldap.daemon.extraConfig` instead.
+        '' ;
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.etc = optionalAttrs (!cfg.daemon.enable) {
+      "ldap.conf" = ldapConfig;
+    };
+
+    system.activationScripts = mkIf (!cfg.daemon.enable) {
+      ldap = stringAfter [ "etc" "groups" "users" ] ''
+        if test -f "${cfg.bind.passwordFile}" ; then
+          umask 0077
+          conf="$(mktemp)"
+          printf 'bindpw %s\n' "$(cat ${cfg.bind.passwordFile})" |
+          cat ${ldapConfig.source} - >"$conf"
+          mv -fT "$conf" /etc/ldap.conf
+        fi
+      '';
+    };
+
+    system.nssModules = mkIf cfg.nsswitch (singleton (
+      if cfg.daemon.enable then nss_pam_ldapd else nss_ldap
+    ));
+
+    system.nssDatabases.group = optional cfg.nsswitch "ldap";
+    system.nssDatabases.passwd = optional cfg.nsswitch "ldap";
+    system.nssDatabases.shadow = optional cfg.nsswitch "ldap";
+
+    users = mkIf cfg.daemon.enable {
+      groups.nslcd = {
+        gid = config.ids.gids.nslcd;
+      };
+
+      users.nslcd = {
+        uid = config.ids.uids.nslcd;
+        description = "nslcd user.";
+        group = "nslcd";
+      };
+    };
+
+    systemd.services = mkIf cfg.daemon.enable {
+      nslcd = {
+        wantedBy = [ "multi-user.target" ];
+
+        preStart = ''
+          umask 0077
+          conf="$(mktemp)"
+          {
+            cat ${nslcdConfig}
+            test -z '${cfg.bind.distinguishedName}' -o ! -f '${cfg.bind.passwordFile}' ||
+            printf 'bindpw %s\n' "$(cat '${cfg.bind.passwordFile}')"
+            test -z '${cfg.daemon.rootpwmoddn}' -o ! -f '${cfg.daemon.rootpwmodpwFile}' ||
+            printf 'rootpwmodpw %s\n' "$(cat '${cfg.daemon.rootpwmodpwFile}')"
+          } >"$conf"
+          mv -fT "$conf" /run/nslcd/nslcd.conf
+        '';
+
+        restartTriggers = [
+          nslcdConfig
+          cfg.bind.passwordFile
+          cfg.daemon.rootpwmodpwFile
+        ];
+
+        serviceConfig = {
+          ExecStart = "${nslcdWrapped}/bin/nslcd";
+          Type = "forking";
+          Restart = "always";
+          User = "nslcd";
+          Group = "nslcd";
+          RuntimeDirectory = [ "nslcd" ];
+          PIDFile = "/run/nslcd/nslcd.pid";
+          AmbientCapabilities = "CAP_SYS_RESOURCE";
+        };
+      };
+
+    };
+
+  };
+
+  imports =
+    [ (mkRenamedOptionModule [ "users" "ldap" "bind" "password"] [ "users" "ldap" "bind" "passwordFile"])
+    ];
+}
diff --git a/nixpkgs/nixos/modules/config/locale.nix b/nixpkgs/nixos/modules/config/locale.nix
new file mode 100644
index 000000000000..7716e121c712
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/locale.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  tzdir = "${pkgs.tzdata}/share/zoneinfo";
+  nospace  = str: filter (c: c == " ") (stringToCharacters str) == [];
+  timezone = types.nullOr (types.addCheck types.str nospace)
+    // { description = "null or string without spaces"; };
+
+  lcfg = config.location;
+
+in
+
+{
+  options = {
+
+    time = {
+
+      timeZone = mkOption {
+        default = null;
+        type = timezone;
+        example = "America/New_York";
+        description = lib.mdDoc ''
+          The time zone used when displaying times and dates. See <https://en.wikipedia.org/wiki/List_of_tz_database_time_zones>
+          for a comprehensive list of possible values for this setting.
+
+          If null, the timezone will default to UTC and can be set imperatively
+          using timedatectl.
+        '';
+      };
+
+      hardwareClockInLocalTime = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "If set, keep the hardware clock in local time instead of UTC.";
+      };
+
+    };
+
+    location = {
+
+      latitude = mkOption {
+        type = types.float;
+        description = lib.mdDoc ''
+          Your current latitude, between
+          `-90.0` and `90.0`. Must be provided
+          along with longitude.
+        '';
+      };
+
+      longitude = mkOption {
+        type = types.float;
+        description = lib.mdDoc ''
+          Your current longitude, between
+          between `-180.0` and `180.0`. Must be
+          provided along with latitude.
+        '';
+      };
+
+      provider = mkOption {
+        type = types.enum [ "manual" "geoclue2" ];
+        default = "manual";
+        description = lib.mdDoc ''
+          The location provider to use for determining your location. If set to
+          `manual` you must also provide latitude/longitude.
+        '';
+      };
+
+    };
+  };
+
+  config = {
+
+    environment.sessionVariables.TZDIR = "/etc/zoneinfo";
+
+    services.geoclue2.enable = mkIf (lcfg.provider == "geoclue2") true;
+
+    # This way services are restarted when tzdata changes.
+    systemd.globalEnvironment.TZDIR = tzdir;
+
+    systemd.services.systemd-timedated.environment = lib.optionalAttrs (config.time.timeZone != null) { NIXOS_STATIC_TIMEZONE = "1"; };
+
+    environment.etc = {
+      zoneinfo.source = tzdir;
+    } // lib.optionalAttrs (config.time.timeZone != null) {
+        localtime.source = "/etc/zoneinfo/${config.time.timeZone}";
+        localtime.mode = "direct-symlink";
+      };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/malloc.nix b/nixpkgs/nixos/modules/config/malloc.nix
new file mode 100644
index 000000000000..043f78c8214e
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/malloc.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.environment.memoryAllocator;
+
+  # The set of alternative malloc(3) providers.
+  providers = {
+    graphene-hardened = {
+      libPath = "${pkgs.graphene-hardened-malloc}/lib/libhardened_malloc.so";
+      description = ''
+        An allocator designed to mitigate memory corruption attacks, such as
+        those caused by use-after-free bugs.
+      '';
+    };
+
+    jemalloc = {
+      libPath = "${pkgs.jemalloc}/lib/libjemalloc.so";
+      description = ''
+        A general purpose allocator that emphasizes fragmentation avoidance
+        and scalable concurrency support.
+      '';
+    };
+
+    scudo = let
+      platformMap = {
+        aarch64-linux = "aarch64";
+        x86_64-linux  = "x86_64";
+      };
+
+      systemPlatform = platformMap.${pkgs.stdenv.hostPlatform.system} or (throw "scudo not supported on ${pkgs.stdenv.hostPlatform.system}");
+    in {
+      libPath = "${pkgs.llvmPackages_14.compiler-rt}/lib/linux/libclang_rt.scudo-${systemPlatform}.so";
+      description = ''
+        A user-mode allocator based on LLVM Sanitizer’s CombinedAllocator,
+        which aims at providing additional mitigations against heap based
+        vulnerabilities, while maintaining good performance.
+      '';
+    };
+
+    mimalloc = {
+      libPath = "${pkgs.mimalloc}/lib/libmimalloc.so";
+      description = ''
+        A compact and fast general purpose allocator, which may
+        optionally be built with mitigations against various heap
+        vulnerabilities.
+      '';
+    };
+  };
+
+  providerConf = providers.${cfg.provider};
+
+  # An output that contains only the shared library, to avoid
+  # needlessly bloating the system closure
+  mallocLib = pkgs.runCommand "malloc-provider-${cfg.provider}"
+    rec {
+      preferLocalBuild = true;
+      allowSubstitutes = false;
+      origLibPath = providerConf.libPath;
+      libName = baseNameOf origLibPath;
+    }
+    ''
+      mkdir -p $out/lib
+      cp -L $origLibPath $out/lib/$libName
+    '';
+
+  # The full path to the selected provider shlib.
+  providerLibPath = "${mallocLib}/lib/${mallocLib.libName}";
+in
+
+{
+  meta = {
+    maintainers = [ maintainers.joachifm ];
+  };
+
+  options = {
+    environment.memoryAllocator.provider = mkOption {
+      type = types.enum ([ "libc" ] ++ attrNames providers);
+      default = "libc";
+      description = lib.mdDoc ''
+        The system-wide memory allocator.
+
+        Briefly, the system-wide memory allocator providers are:
+
+        - `libc`: the standard allocator provided by libc
+        ${concatStringsSep "\n" (mapAttrsToList
+            (name: value: "- `${name}`: ${replaceStrings [ "\n" ] [ " " ] value.description}")
+            providers)}
+
+        ::: {.warning}
+        Selecting an alternative allocator (i.e., anything other than
+        `libc`) may result in instability, data loss,
+        and/or service failure.
+        :::
+      '';
+    };
+  };
+
+  config = mkIf (cfg.provider != "libc") {
+    environment.etc."ld-nix.so.preload".text = ''
+      ${providerLibPath}
+    '';
+    security.apparmor.includes = {
+      "abstractions/base" = ''
+        r /etc/ld-nix.so.preload,
+        r ${config.environment.etc."ld-nix.so.preload".source},
+        include "${pkgs.apparmorRulesFromClosure {
+            name = "mallocLib";
+            baseRules = ["mr $path/lib/**.so*"];
+          } [ mallocLib ] }"
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/mysql.nix b/nixpkgs/nixos/modules/config/mysql.nix
new file mode 100644
index 000000000000..95c9ba76663e
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/mysql.nix
@@ -0,0 +1,456 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.users.mysql;
+in
+{
+  options = {
+    users.mysql = {
+      enable = mkEnableOption (lib.mdDoc "Authentication against a MySQL/MariaDB database");
+      host = mkOption {
+        type = types.str;
+        example = "localhost";
+        description = lib.mdDoc "The hostname of the MySQL/MariaDB server";
+      };
+      database = mkOption {
+        type = types.str;
+        example = "auth";
+        description = lib.mdDoc "The name of the database containing the users";
+      };
+      user = mkOption {
+        type = types.str;
+        example = "nss-user";
+        description = lib.mdDoc "The username to use when connecting to the database";
+      };
+      passwordFile = mkOption {
+        type = types.path;
+        example = "/run/secrets/mysql-auth-db-passwd";
+        description = lib.mdDoc "The path to the file containing the password for the user";
+      };
+      pam = mkOption {
+        description = lib.mdDoc "Settings for `pam_mysql`";
+        type = types.submodule {
+          options = {
+            table = mkOption {
+              type = types.str;
+              example = "users";
+              description = lib.mdDoc "The name of table that maps unique login names to the passwords.";
+            };
+            updateTable = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "users_updates";
+              description = lib.mdDoc ''
+                The name of the table used for password alteration. If not defined, the value
+                of the `table` option will be used instead.
+              '';
+            };
+            userColumn = mkOption {
+              type = types.str;
+              example = "username";
+              description = lib.mdDoc "The name of the column that contains a unix login name.";
+            };
+            passwordColumn = mkOption {
+              type = types.str;
+              example = "password";
+              description = lib.mdDoc "The name of the column that contains a (encrypted) password string.";
+            };
+            statusColumn = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "status";
+              description = lib.mdDoc ''
+                The name of the column or an SQL expression that indicates the status of
+                the user. The status is expressed by the combination of two bitfields
+                shown below:
+
+                - `bit 0 (0x01)`:
+                   if flagged, `pam_mysql` deems the account to be expired and
+                   returns `PAM_ACCT_EXPIRED`. That is, the account is supposed
+                   to no longer be available. Note this doesn't mean that `pam_mysql`
+                   rejects further authentication operations.
+                -  `bit 1 (0x02)`:
+                   if flagged, `pam_mysql` deems the authentication token
+                   (password) to be expired and returns `PAM_NEW_AUTHTOK_REQD`.
+                   This ends up requiring that the user enter a new password.
+              '';
+            };
+            passwordCrypt = mkOption {
+              example = "2";
+              type = types.enum [
+                "0" "plain"
+                "1" "Y"
+                "2" "mysql"
+                "3" "md5"
+                "4" "sha1"
+                "5" "drupal7"
+                "6" "joomla15"
+                "7" "ssha"
+                "8" "sha512"
+                "9" "sha256"
+              ];
+              description = lib.mdDoc ''
+                The method to encrypt the user's password:
+
+                - `0` (or `"plain"`):
+                  No encryption. Passwords are stored in plaintext. HIGHLY DISCOURAGED.
+                - `1` (or `"Y"`):
+                  Use crypt(3) function.
+                - `2` (or `"mysql"`):
+                  Use the MySQL PASSWORD() function. It is possible that the encryption function used
+                  by `pam_mysql` is different from that of the MySQL server, as
+                  `pam_mysql` uses the function defined in MySQL's C-client API
+                  instead of using PASSWORD() SQL function in the query.
+                - `3` (or `"md5"`):
+                  Use plain hex MD5.
+                - `4` (or `"sha1"`):
+                  Use plain hex SHA1.
+                - `5` (or `"drupal7"`):
+                  Use Drupal7 salted passwords.
+                - `6` (or `"joomla15"`):
+                  Use Joomla15 salted passwords.
+                - `7` (or `"ssha"`):
+                  Use ssha hashed passwords.
+                - `8` (or `"sha512"`):
+                  Use sha512 hashed passwords.
+                - `9` (or `"sha256"`):
+                  Use sha256 hashed passwords.
+              '';
+            };
+            cryptDefault = mkOption {
+              type = types.nullOr (types.enum [ "md5" "sha256" "sha512" "blowfish" ]);
+              default = null;
+              example = "blowfish";
+              description = lib.mdDoc "The default encryption method to use for `passwordCrypt = 1`.";
+            };
+            where = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "host.name='web' AND user.active=1";
+              description = lib.mdDoc "Additional criteria for the query.";
+            };
+            verbose = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                If enabled, produces logs with detailed messages that describes what
+                `pam_mysql` is doing. May be useful for debugging.
+              '';
+            };
+            disconnectEveryOperation = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                By default, `pam_mysql` keeps the connection to the MySQL
+                database until the session is closed. If this option is set to true it
+                disconnects every time the PAM operation has finished. This option may
+                be useful in case the session lasts quite long.
+              '';
+            };
+            logging = {
+              enable = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc "Enables logging of authentication attempts in the MySQL database.";
+              };
+              table = mkOption {
+                type = types.str;
+                example = "logs";
+                description = lib.mdDoc "The name of the table to which logs are written.";
+              };
+              msgColumn = mkOption {
+                type = types.str;
+                example = "msg";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the description
+                  of the performed operation is stored.
+                '';
+              };
+              userColumn = mkOption {
+                type = types.str;
+                example = "user";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the name of the
+                  user being authenticated is stored.
+                '';
+              };
+              pidColumn = mkOption {
+                type = types.str;
+                example = "pid";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the pid of the
+                  process utilising the `pam_mysql` authentication
+                  service is stored.
+                '';
+              };
+              hostColumn = mkOption {
+                type = types.str;
+                example = "host";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the name of the user
+                  being authenticated is stored.
+                '';
+              };
+              rHostColumn = mkOption {
+                type = types.str;
+                example = "rhost";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the name of the remote
+                  host that initiates the session is stored. The value is supposed to be
+                  set by the PAM-aware application with `pam_set_item(PAM_RHOST)`.
+                '';
+              };
+              timeColumn = mkOption {
+                type = types.str;
+                example = "timestamp";
+                description = lib.mdDoc ''
+                  The name of the column in the log table to which the timestamp of the
+                  log entry is stored.
+                '';
+              };
+            };
+          };
+        };
+      };
+      nss = mkOption {
+        description = lib.mdDoc ''
+          Settings for `libnss-mysql`.
+
+          All examples are from the [minimal example](https://github.com/saknopper/libnss-mysql/tree/master/sample/minimal)
+          of `libnss-mysql`, but they are modified with NixOS paths for bash.
+        '';
+        type = types.submodule {
+          options = {
+            getpwnam = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username,'x',uid,'5000','MySQL User', CONCAT('/home/',username),'/run/sw/current-system/bin/bash' \
+                FROM users \
+                WHERE username='%1$s' \
+                LIMIT 1
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getpwnam](https://man7.org/linux/man-pages/man3/getpwnam.3.html)
+                syscall.
+              '';
+            };
+            getpwuid = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username,'x',uid,'5000','MySQL User', CONCAT('/home/',username),'/run/sw/current-system/bin/bash' \
+                FROM users \
+                WHERE uid='%1$u' \
+                LIMIT 1
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getpwuid](https://man7.org/linux/man-pages/man3/getpwuid.3.html)
+                syscall.
+              '';
+            };
+            getspnam = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username,password,'1','0','99999','0','0','-1','0' \
+                FROM users \
+                WHERE username='%1$s' \
+                LIMIT 1
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getspnam](https://man7.org/linux/man-pages/man3/getspnam.3.html)
+                syscall.
+              '';
+            };
+            getpwent = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username,'x',uid,'5000','MySQL User', CONCAT('/home/',username),'/run/sw/current-system/bin/bash' FROM users
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getpwent](https://man7.org/linux/man-pages/man3/getpwent.3.html)
+                syscall.
+              '';
+            };
+            getspent = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username,password,'1','0','99999','0','0','-1','0' FROM users
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getspent](https://man7.org/linux/man-pages/man3/getspent.3.html)
+                syscall.
+              '';
+            };
+            getgrnam = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT name,password,gid FROM groups WHERE name='%1$s' LIMIT 1
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getgrnam](https://man7.org/linux/man-pages/man3/getgrnam.3.html)
+                syscall.
+              '';
+            };
+            getgrgid = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT name,password,gid FROM groups WHERE gid='%1$u' LIMIT 1
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getgrgid](https://man7.org/linux/man-pages/man3/getgrgid.3.html)
+                syscall.
+              '';
+            };
+            getgrent = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT name,password,gid FROM groups
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [getgrent](https://man7.org/linux/man-pages/man3/getgrent.3.html)
+                syscall.
+              '';
+            };
+            memsbygid = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT username FROM grouplist WHERE gid='%1$u'
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [memsbygid](https://man7.org/linux/man-pages/man3/memsbygid.3.html)
+                syscall.
+              '';
+            };
+            gidsbymem = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = literalExpression ''
+                SELECT gid FROM grouplist WHERE username='%1$s'
+              '';
+              description = lib.mdDoc ''
+                SQL query for the [gidsbymem](https://man7.org/linux/man-pages/man3/gidsbymem.3.html)
+                syscall.
+              '';
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    system.nssModules = [ pkgs.libnss-mysql ];
+    system.nssDatabases.shadow = [ "mysql" ];
+    system.nssDatabases.group = [ "mysql" ];
+    system.nssDatabases.passwd = [ "mysql" ];
+
+    environment.etc."security/pam_mysql.conf" = {
+      user = "root";
+      group = "root";
+      mode = "0600";
+      # password will be added from password file in activation script
+      text = ''
+        users.host=${cfg.host}
+        users.db_user=${cfg.user}
+        users.database=${cfg.database}
+        users.table=${cfg.pam.table}
+        users.user_column=${cfg.pam.userColumn}
+        users.password_column=${cfg.pam.passwordColumn}
+        users.password_crypt=${cfg.pam.passwordCrypt}
+        users.disconnect_every_operation=${if cfg.pam.disconnectEveryOperation then "1" else "0"}
+        verbose=${if cfg.pam.verbose then "1" else "0"}
+      '' + optionalString (cfg.pam.cryptDefault != null) ''
+        users.use_${cfg.pam.cryptDefault}=1
+      '' + optionalString (cfg.pam.where != null) ''
+        users.where_clause=${cfg.pam.where}
+      '' + optionalString (cfg.pam.statusColumn != null) ''
+        users.status_column=${cfg.pam.statusColumn}
+      '' + optionalString (cfg.pam.updateTable != null) ''
+        users.update_table=${cfg.pam.updateTable}
+      '' + optionalString cfg.pam.logging.enable ''
+        log.enabled=true
+        log.table=${cfg.pam.logging.table}
+        log.message_column=${cfg.pam.logging.msgColumn}
+        log.pid_column=${cfg.pam.logging.pidColumn}
+        log.user_column=${cfg.pam.logging.userColumn}
+        log.host_column=${cfg.pam.logging.hostColumn}
+        log.rhost_column=${cfg.pam.logging.rHostColumn}
+        log.time_column=${cfg.pam.logging.timeColumn}
+      '';
+    };
+
+    environment.etc."libnss-mysql.cfg" = {
+      mode = "0600";
+      user = config.services.nscd.user;
+      group = config.services.nscd.group;
+      text = optionalString (cfg.nss.getpwnam != null) ''
+        getpwnam ${cfg.nss.getpwnam}
+      '' + optionalString (cfg.nss.getpwuid != null) ''
+        getpwuid ${cfg.nss.getpwuid}
+      '' + optionalString (cfg.nss.getspnam != null) ''
+        getspnam ${cfg.nss.getspnam}
+      '' + optionalString (cfg.nss.getpwent != null) ''
+        getpwent ${cfg.nss.getpwent}
+      '' + optionalString (cfg.nss.getspent != null) ''
+        getspent ${cfg.nss.getspent}
+      '' + optionalString (cfg.nss.getgrnam != null) ''
+        getgrnam ${cfg.nss.getgrnam}
+      '' + optionalString (cfg.nss.getgrgid != null) ''
+        getgrgid ${cfg.nss.getgrgid}
+      '' + optionalString (cfg.nss.getgrent != null) ''
+        getgrent ${cfg.nss.getgrent}
+      '' + optionalString (cfg.nss.memsbygid != null) ''
+        memsbygid ${cfg.nss.memsbygid}
+      '' + optionalString (cfg.nss.gidsbymem != null) ''
+        gidsbymem ${cfg.nss.gidsbymem}
+      '' + ''
+        host ${cfg.host}
+        database ${cfg.database}
+      '';
+    };
+
+    environment.etc."libnss-mysql-root.cfg" = {
+      mode = "0600";
+      user = config.services.nscd.user;
+      group = config.services.nscd.group;
+      # password will be added from password file in activation script
+      text = ''
+        username ${cfg.user}
+      '';
+    };
+
+    # preStart script to append the password from the password file
+    # to the configuration files. It also fixes the owner of the
+    # libnss-mysql-root.cfg because it is changed to root after the
+    # password is appended.
+    systemd.services.mysql.preStart = ''
+      if [[ -r ${cfg.passwordFile} ]]; then
+        org_umask=$(umask)
+        umask 0077
+
+        conf_nss="$(mktemp)"
+        cp /etc/libnss-mysql-root.cfg $conf_nss
+        printf 'password %s\n' "$(cat ${cfg.passwordFile})" >> $conf_nss
+        mv -fT "$conf_nss" /etc/libnss-mysql-root.cfg
+        chown ${config.services.nscd.user}:${config.services.nscd.group} /etc/libnss-mysql-root.cfg
+
+        conf_pam="$(mktemp)"
+        cp /etc/security/pam_mysql.conf $conf_pam
+        printf 'users.db_passwd=%s\n' "$(cat ${cfg.passwordFile})" >> $conf_pam
+        mv -fT "$conf_pam" /etc/security/pam_mysql.conf
+
+        umask $org_umask
+      fi
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/networking.nix b/nixpkgs/nixos/modules/config/networking.nix
new file mode 100644
index 000000000000..fc910fee94bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/networking.nix
@@ -0,0 +1,237 @@
+# /etc files related to networking, such as /etc/services.
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking;
+  opt = options.networking;
+
+  localhostMultiple = any (elem "localhost") (attrValues (removeAttrs cfg.hosts [ "127.0.0.1" "::1" ]));
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "networking" "hostConf" ] "Use environment.etc.\"host.conf\" instead.")
+  ];
+
+  options = {
+
+    networking.hosts = lib.mkOption {
+      type = types.attrsOf (types.listOf types.str);
+      example = literalExpression ''
+        {
+          "127.0.0.1" = [ "foo.bar.baz" ];
+          "192.168.0.2" = [ "fileserver.local" "nameserver.local" ];
+        };
+      '';
+      description = lib.mdDoc ''
+        Locally defined maps of hostnames to IP addresses.
+      '';
+    };
+
+    networking.hostFiles = lib.mkOption {
+      type = types.listOf types.path;
+      defaultText = literalMD "Hosts from {option}`networking.hosts` and {option}`networking.extraHosts`";
+      example = literalExpression ''[ "''${pkgs.my-blocklist-package}/share/my-blocklist/hosts" ]'';
+      description = lib.mdDoc ''
+        Files that should be concatenated together to form {file}`/etc/hosts`.
+      '';
+    };
+
+    networking.extraHosts = lib.mkOption {
+      type = types.lines;
+      default = "";
+      example = "192.168.0.1 lanlocalhost";
+      description = lib.mdDoc ''
+        Additional verbatim entries to be appended to {file}`/etc/hosts`.
+        For adding hosts from derivation results, use {option}`networking.hostFiles` instead.
+      '';
+    };
+
+    networking.timeServers = mkOption {
+      default = [
+        "0.nixos.pool.ntp.org"
+        "1.nixos.pool.ntp.org"
+        "2.nixos.pool.ntp.org"
+        "3.nixos.pool.ntp.org"
+      ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        The set of NTP servers from which to synchronise.
+      '';
+    };
+
+    networking.proxy = {
+
+      default = lib.mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          This option specifies the default value for httpProxy, httpsProxy, ftpProxy and rsyncProxy.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      httpProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = cfg.proxy.default;
+        defaultText = literalExpression "config.${opt.proxy.default}";
+        description = lib.mdDoc ''
+          This option specifies the http_proxy environment variable.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      httpsProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = cfg.proxy.default;
+        defaultText = literalExpression "config.${opt.proxy.default}";
+        description = lib.mdDoc ''
+          This option specifies the https_proxy environment variable.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      ftpProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = cfg.proxy.default;
+        defaultText = literalExpression "config.${opt.proxy.default}";
+        description = lib.mdDoc ''
+          This option specifies the ftp_proxy environment variable.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      rsyncProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = cfg.proxy.default;
+        defaultText = literalExpression "config.${opt.proxy.default}";
+        description = lib.mdDoc ''
+          This option specifies the rsync_proxy environment variable.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      allProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = cfg.proxy.default;
+        defaultText = literalExpression "config.${opt.proxy.default}";
+        description = lib.mdDoc ''
+          This option specifies the all_proxy environment variable.
+        '';
+        example = "http://127.0.0.1:3128";
+      };
+
+      noProxy = lib.mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          This option specifies the no_proxy environment variable.
+          If a default proxy is used and noProxy is null,
+          then noProxy will be set to 127.0.0.1,localhost.
+        '';
+        example = "127.0.0.1,localhost,.localdomain";
+      };
+
+      envVars = lib.mkOption {
+        type = types.attrs;
+        internal = true;
+        default = {};
+        description = lib.mdDoc ''
+          Environment variables used for the network proxy.
+        '';
+      };
+    };
+  };
+
+  config = {
+
+    assertions = [{
+      assertion = !localhostMultiple;
+      message = ''
+        `networking.hosts` maps "localhost" to something other than "127.0.0.1"
+        or "::1". This will break some applications. Please use
+        `networking.extraHosts` if you really want to add such a mapping.
+      '';
+    }];
+
+    # These entries are required for "hostname -f" and to resolve both the
+    # hostname and FQDN correctly:
+    networking.hosts = let
+      hostnames = # Note: The FQDN (canonical hostname) has to come first:
+        optional (cfg.hostName != "" && cfg.domain != null) "${cfg.hostName}.${cfg.domain}"
+        ++ optional (cfg.hostName != "") cfg.hostName; # Then the hostname (without the domain)
+    in {
+      "127.0.0.2" = hostnames;
+    } // optionalAttrs cfg.enableIPv6 {
+      "::1" = hostnames;
+    };
+
+    networking.hostFiles = let
+      # Note: localhostHosts has to appear first in /etc/hosts so that 127.0.0.1
+      # resolves back to "localhost" (as some applications assume) instead of
+      # the FQDN! By default "networking.hosts" also contains entries for the
+      # FQDN so that e.g. "hostname -f" works correctly.
+      hosts = foldAttrs (a: e: a ++ e) [] ([ { "127.0.0.1" = [ "localhost" ]; } ]
+        ++ optional cfg.enableIPv6 { "::1" = [ "localhost" ]; }
+        ++ [ cfg.hosts ]);
+
+      stringHosts =
+        let
+          oneToString = set: ip: ip + " " + concatStringsSep " " set.${ip} + "\n";
+          allToString = set: concatMapStrings (oneToString set) (attrNames set);
+        in pkgs.writeText "string-hosts" (allToString (filterAttrs (_: v: v != []) hosts));
+      extraHosts = pkgs.writeText "extra-hosts" cfg.extraHosts;
+    in mkBefore [ stringHosts extraHosts ];
+
+    environment.etc =
+      { # /etc/services: TCP/UDP port assignments.
+        services.source = pkgs.iana-etc + "/etc/services";
+
+        # /etc/protocols: IP protocol numbers.
+        protocols.source  = pkgs.iana-etc + "/etc/protocols";
+
+        # /etc/hosts: Hostname-to-IP mappings.
+        hosts.source = pkgs.concatText "hosts" cfg.hostFiles;
+
+        # /etc/netgroup: Network-wide groups.
+        netgroup.text = mkDefault "";
+
+        # /etc/host.conf: resolver configuration file
+        "host.conf".text = ''
+          multi on
+        '';
+
+      } // optionalAttrs (pkgs.stdenv.hostPlatform.libc == "glibc") {
+        # /etc/rpc: RPC program numbers.
+        rpc.source = pkgs.stdenv.cc.libc.out + "/etc/rpc";
+      };
+
+      networking.proxy.envVars =
+        optionalAttrs (cfg.proxy.default != null) {
+          # other options already fallback to proxy.default
+          no_proxy = "127.0.0.1,localhost";
+        } // optionalAttrs (cfg.proxy.httpProxy != null) {
+          http_proxy  = cfg.proxy.httpProxy;
+        } // optionalAttrs (cfg.proxy.httpsProxy != null) {
+          https_proxy = cfg.proxy.httpsProxy;
+        } // optionalAttrs (cfg.proxy.rsyncProxy != null) {
+          rsync_proxy = cfg.proxy.rsyncProxy;
+        } // optionalAttrs (cfg.proxy.ftpProxy != null) {
+          ftp_proxy   = cfg.proxy.ftpProxy;
+        } // optionalAttrs (cfg.proxy.allProxy != null) {
+          all_proxy   = cfg.proxy.allProxy;
+        } // optionalAttrs (cfg.proxy.noProxy != null) {
+          no_proxy    = cfg.proxy.noProxy;
+        };
+
+    # Install the proxy environment variables
+    environment.sessionVariables = cfg.proxy.envVars;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/nix-channel.nix b/nixpkgs/nixos/modules/config/nix-channel.nix
new file mode 100644
index 000000000000..a7ca7a5c74a4
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/nix-channel.nix
@@ -0,0 +1,104 @@
+/*
+  Manages the things that are needed for a traditional nix-channel based
+  configuration to work.
+
+  See also
+  - ./nix.nix
+  - ./nix-flakes.nix
+ */
+{ config, lib, ... }:
+let
+  inherit (lib)
+    mkDefault
+    mkIf
+    mkOption
+    stringAfter
+    types
+    ;
+
+  cfg = config.nix;
+
+in
+{
+  options = {
+    nix = {
+      channel = {
+        enable = mkOption {
+          description = lib.mdDoc ''
+            Whether the `nix-channel` command and state files are made available on the machine.
+
+            The following files are initialized when enabled:
+              - `/nix/var/nix/profiles/per-user/root/channels`
+              - `/root/.nix-channels`
+              - `$HOME/.nix-defexpr/channels` (on login)
+
+            Disabling this option will not remove the state files from the system.
+          '';
+          type = types.bool;
+          default = true;
+        };
+      };
+
+      nixPath = mkOption {
+        type = types.listOf types.str;
+        default =
+          if cfg.channel.enable
+          then [
+            "nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos"
+            "nixos-config=/etc/nixos/configuration.nix"
+            "/nix/var/nix/profiles/per-user/root/channels"
+          ]
+          else [ ];
+        defaultText = ''
+          if nix.channel.enable
+          then [
+            "nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos"
+            "nixos-config=/etc/nixos/configuration.nix"
+            "/nix/var/nix/profiles/per-user/root/channels"
+          ]
+          else [];
+        '';
+        description = lib.mdDoc ''
+          The default Nix expression search path, used by the Nix
+          evaluator to look up paths enclosed in angle brackets
+          (e.g. `<nixpkgs>`).
+        '';
+      };
+    };
+
+    system = {
+      defaultChannel = mkOption {
+        internal = true;
+        type = types.str;
+        default = "https://nixos.org/channels/nixos-unstable";
+        description = lib.mdDoc "Default NixOS channel to which the root user is subscribed.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.extraInit =
+      mkIf cfg.channel.enable ''
+        if [ -e "$HOME/.nix-defexpr/channels" ]; then
+          export NIX_PATH="$HOME/.nix-defexpr/channels''${NIX_PATH:+:$NIX_PATH}"
+        fi
+      '';
+
+    environment.extraSetup = mkIf (!cfg.channel.enable) ''
+      rm --force $out/bin/nix-channel
+    '';
+
+    # NIX_PATH has a non-empty default according to Nix docs, so we don't unset
+    # it when empty.
+    environment.sessionVariables = {
+      NIX_PATH = cfg.nixPath;
+    };
+
+    nix.settings.nix-path = mkIf (! cfg.channel.enable) (mkDefault "");
+
+    systemd.tmpfiles.rules = lib.mkIf cfg.channel.enable [
+      ''f /root/.nix-channels - - - - ${config.system.defaultChannel} nixos\n''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/nix-flakes.nix b/nixpkgs/nixos/modules/config/nix-flakes.nix
new file mode 100644
index 000000000000..242d8d3b82b7
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/nix-flakes.nix
@@ -0,0 +1,95 @@
+/*
+  Manages the flake registry.
+
+  See also
+   - ./nix.nix
+   - ./nix-channel.nix
+ */
+{ config, lib, ... }:
+let
+  inherit (lib)
+    filterAttrs
+    literalExpression
+    mapAttrsToList
+    mkDefault
+    mkIf
+    mkOption
+    types
+    ;
+
+  cfg = config.nix;
+
+in
+{
+  options = {
+    nix = {
+      registry = mkOption {
+        type = types.attrsOf (types.submodule (
+          let
+            referenceAttrs = with types; attrsOf (oneOf [
+              str
+              int
+              bool
+              path
+              package
+            ]);
+          in
+          { config, name, ... }:
+          {
+            options = {
+              from = mkOption {
+                type = referenceAttrs;
+                example = { type = "indirect"; id = "nixpkgs"; };
+                description = lib.mdDoc "The flake reference to be rewritten.";
+              };
+              to = mkOption {
+                type = referenceAttrs;
+                example = { type = "github"; owner = "my-org"; repo = "my-nixpkgs"; };
+                description = lib.mdDoc "The flake reference {option}`from` is rewritten to.";
+              };
+              flake = mkOption {
+                type = types.nullOr types.attrs;
+                default = null;
+                example = literalExpression "nixpkgs";
+                description = lib.mdDoc ''
+                  The flake input {option}`from` is rewritten to.
+                '';
+              };
+              exact = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether the {option}`from` reference needs to match exactly. If set,
+                  a {option}`from` reference like `nixpkgs` does not
+                  match with a reference like `nixpkgs/nixos-20.03`.
+                '';
+              };
+            };
+            config = {
+              from = mkDefault { type = "indirect"; id = name; };
+              to = mkIf (config.flake != null) (mkDefault (
+                {
+                  type = "path";
+                  path = config.flake.outPath;
+                } // filterAttrs
+                  (n: _: n == "lastModified" || n == "rev" || n == "revCount" || n == "narHash")
+                  config.flake
+              ));
+            };
+          }
+        ));
+        default = { };
+        description = lib.mdDoc ''
+          A system-wide flake registry.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."nix/registry.json".text = builtins.toJSON {
+      version = 2;
+      flakes = mapAttrsToList (n: v: { inherit (v) from to exact; }) cfg.registry;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/nix-remote-build.nix b/nixpkgs/nixos/modules/config/nix-remote-build.nix
new file mode 100644
index 000000000000..98c8fc06d2ee
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/nix-remote-build.nix
@@ -0,0 +1,226 @@
+/*
+  Manages the remote build configuration, /etc/nix/machines
+
+  See also
+   - ./nix.nix
+   - nixos/modules/services/system/nix-daemon.nix
+ */
+{ config, lib, ... }:
+
+let
+  inherit (lib)
+    any
+    concatMapStrings
+    concatStringsSep
+    filter
+    getVersion
+    mkIf
+    mkMerge
+    mkOption
+    optional
+    optionalString
+    types
+    versionAtLeast
+    ;
+
+  cfg = config.nix;
+
+  nixPackage = cfg.package.out;
+
+  isNixAtLeast = versionAtLeast (getVersion nixPackage);
+
+  buildMachinesText =
+    concatMapStrings
+      (machine:
+        (concatStringsSep " " ([
+          "${optionalString (machine.protocol != null) "${machine.protocol}://"}${optionalString (machine.sshUser != null) "${machine.sshUser}@"}${machine.hostName}"
+          (if machine.system != null then machine.system else if machine.systems != [ ] then concatStringsSep "," machine.systems else "-")
+          (if machine.sshKey != null then machine.sshKey else "-")
+          (toString machine.maxJobs)
+          (toString machine.speedFactor)
+          (let res = (machine.supportedFeatures ++ machine.mandatoryFeatures);
+            in if (res == []) then "-" else (concatStringsSep "," res))
+          (let res = machine.mandatoryFeatures;
+            in if (res == []) then "-" else (concatStringsSep "," machine.mandatoryFeatures))
+        ]
+        ++ optional (isNixAtLeast "2.4pre") (if machine.publicHostKey != null then machine.publicHostKey else "-")))
+        + "\n"
+      )
+      cfg.buildMachines;
+
+in
+{
+  options = {
+    nix = {
+      buildMachines = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            hostName = mkOption {
+              type = types.str;
+              example = "nixbuilder.example.org";
+              description = lib.mdDoc ''
+                The hostname of the build machine.
+              '';
+            };
+            protocol = mkOption {
+              type = types.enum [ null "ssh" "ssh-ng" ];
+              default = "ssh";
+              example = "ssh-ng";
+              description = lib.mdDoc ''
+                The protocol used for communicating with the build machine.
+                Use `ssh-ng` if your remote builder and your
+                local Nix version support that improved protocol.
+
+                Use `null` when trying to change the special localhost builder
+                without a protocol which is for example used by hydra.
+              '';
+            };
+            system = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "x86_64-linux";
+              description = lib.mdDoc ''
+                The system type the build machine can execute derivations on.
+                Either this attribute or {var}`systems` must be
+                present, where {var}`system` takes precedence if
+                both are set.
+              '';
+            };
+            systems = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [ "x86_64-linux" "aarch64-linux" ];
+              description = lib.mdDoc ''
+                The system types the build machine can execute derivations on.
+                Either this attribute or {var}`system` must be
+                present, where {var}`system` takes precedence if
+                both are set.
+              '';
+            };
+            sshUser = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "builder";
+              description = lib.mdDoc ''
+                The username to log in as on the remote host. This user must be
+                able to log in and run nix commands non-interactively. It must
+                also be privileged to build derivations, so must be included in
+                {option}`nix.settings.trusted-users`.
+              '';
+            };
+            sshKey = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "/root/.ssh/id_buildhost_builduser";
+              description = lib.mdDoc ''
+                The path to the SSH private key with which to authenticate on
+                the build machine. The private key must not have a passphrase.
+                If null, the building user (root on NixOS machines) must have an
+                appropriate ssh configuration to log in non-interactively.
+
+                Note that for security reasons, this path must point to a file
+                in the local filesystem, *not* to the nix store.
+              '';
+            };
+            maxJobs = mkOption {
+              type = types.int;
+              default = 1;
+              description = lib.mdDoc ''
+                The number of concurrent jobs the build machine supports. The
+                build machine will enforce its own limits, but this allows hydra
+                to schedule better since there is no work-stealing between build
+                machines.
+              '';
+            };
+            speedFactor = mkOption {
+              type = types.int;
+              default = 1;
+              description = lib.mdDoc ''
+                The relative speed of this builder. This is an arbitrary integer
+                that indicates the speed of this builder, relative to other
+                builders. Higher is faster.
+              '';
+            };
+            mandatoryFeatures = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [ "big-parallel" ];
+              description = lib.mdDoc ''
+                A list of features mandatory for this builder. The builder will
+                be ignored for derivations that don't require all features in
+                this list. All mandatory features are automatically included in
+                {var}`supportedFeatures`.
+              '';
+            };
+            supportedFeatures = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [ "kvm" "big-parallel" ];
+              description = lib.mdDoc ''
+                A list of features supported by this builder. The builder will
+                be ignored for derivations that require features not in this
+                list.
+              '';
+            };
+            publicHostKey = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                The (base64-encoded) public host key of this builder. The field
+                is calculated via {command}`base64 -w0 /etc/ssh/ssh_host_type_key.pub`.
+                If null, SSH will use its regular known-hosts file when connecting.
+              '';
+            };
+          };
+        });
+        default = [ ];
+        description = lib.mdDoc ''
+          This option lists the machines to be used if distributed builds are
+          enabled (see {option}`nix.distributedBuilds`).
+          Nix will perform derivations on those machines via SSH by copying the
+          inputs to the Nix store on the remote machine, starting the build,
+          then copying the output back to the local Nix store.
+        '';
+      };
+
+      distributedBuilds = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to distribute builds to the machines listed in
+          {option}`nix.buildMachines`.
+        '';
+      };
+    };
+  };
+
+  # distributedBuilds does *not* inhibit /etc/machines generation; caller may
+  # override that nix option.
+  config = mkIf cfg.enable {
+    assertions =
+      let badMachine = m: m.system == null && m.systems == [ ];
+      in
+      [
+        {
+          assertion = !(any badMachine cfg.buildMachines);
+          message = ''
+            At least one system type (via <varname>system</varname> or
+              <varname>systems</varname>) must be set for every build machine.
+              Invalid machine specifications:
+          '' + "      " +
+          (concatStringsSep "\n      "
+            (map (m: m.hostName)
+              (filter (badMachine) cfg.buildMachines)));
+        }
+      ];
+
+    # List of machines for distributed Nix builds
+    environment.etc."nix/machines" =
+      mkIf (cfg.buildMachines != [ ]) {
+        text = buildMachinesText;
+      };
+
+    # Legacy configuration conversion.
+    nix.settings = mkIf (!cfg.distributedBuilds) { builders = null; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/nix.nix b/nixpkgs/nixos/modules/config/nix.nix
new file mode 100644
index 000000000000..cee4f54db0cb
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/nix.nix
@@ -0,0 +1,379 @@
+/*
+  Manages /etc/nix.conf.
+
+  See also
+   - ./nix-channel.nix
+   - ./nix-flakes.nix
+   - ./nix-remote-build.nix
+   - nixos/modules/services/system/nix-daemon.nix
+ */
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    concatStringsSep
+    boolToString
+    escape
+    floatToString
+    getVersion
+    isBool
+    isDerivation
+    isFloat
+    isInt
+    isList
+    isString
+    literalExpression
+    mapAttrsToList
+    mkAfter
+    mkDefault
+    mkIf
+    mkOption
+    mkRenamedOptionModuleWith
+    optionalString
+    optionals
+    strings
+    systems
+    toPretty
+    types
+    versionAtLeast
+    ;
+
+  cfg = config.nix;
+
+  nixPackage = cfg.package.out;
+
+  isNixAtLeast = versionAtLeast (getVersion nixPackage);
+
+  legacyConfMappings = {
+    useSandbox = "sandbox";
+    buildCores = "cores";
+    maxJobs = "max-jobs";
+    sandboxPaths = "extra-sandbox-paths";
+    binaryCaches = "substituters";
+    trustedBinaryCaches = "trusted-substituters";
+    binaryCachePublicKeys = "trusted-public-keys";
+    autoOptimiseStore = "auto-optimise-store";
+    requireSignedBinaryCaches = "require-sigs";
+    trustedUsers = "trusted-users";
+    allowedUsers = "allowed-users";
+    systemFeatures = "system-features";
+  };
+
+  semanticConfType = with types;
+    let
+      confAtom = nullOr
+        (oneOf [
+          bool
+          int
+          float
+          str
+          path
+          package
+        ]) // {
+        description = "Nix config atom (null, bool, int, float, str, path or package)";
+      };
+    in
+    attrsOf (either confAtom (listOf confAtom));
+
+  nixConf =
+    assert isNixAtLeast "2.2";
+    let
+
+      mkValueString = v:
+        if v == null then ""
+        else if isInt v then toString v
+        else if isBool v then boolToString v
+        else if isFloat v then floatToString v
+        else if isList v then toString v
+        else if isDerivation v then toString v
+        else if builtins.isPath v then toString v
+        else if isString v then v
+        else if strings.isConvertibleWithToString v then toString v
+        else abort "The nix conf value: ${toPretty {} v} can not be encoded";
+
+      mkKeyValue = k: v: "${escape [ "=" ] k} = ${mkValueString v}";
+
+      mkKeyValuePairs = attrs: concatStringsSep "\n" (mapAttrsToList mkKeyValue attrs);
+
+    in
+    pkgs.writeTextFile {
+      name = "nix.conf";
+      text = ''
+        # WARNING: this file is generated from the nix.* options in
+        # your NixOS configuration, typically
+        # /etc/nixos/configuration.nix.  Do not edit it!
+        ${mkKeyValuePairs cfg.settings}
+        ${cfg.extraOptions}
+      '';
+      checkPhase = lib.optionalString cfg.checkConfig (
+        if pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform then ''
+          echo "Ignoring validation for cross-compilation"
+        ''
+        else ''
+          echo "Validating generated nix.conf"
+          ln -s $out ./nix.conf
+          set -e
+          set +o pipefail
+          NIX_CONF_DIR=$PWD \
+            ${cfg.package}/bin/nix show-config ${optionalString (isNixAtLeast "2.3pre") "--no-net"} \
+              ${optionalString (isNixAtLeast "2.4pre") "--option experimental-features nix-command"} \
+            |& sed -e 's/^warning:/error:/' \
+            | (! grep '${if cfg.checkAllErrors then "^error:" else "^error: unknown setting"}')
+          set -o pipefail
+        '');
+    };
+
+in
+{
+  imports = [
+    (mkRenamedOptionModuleWith { sinceRelease = 2003; from = [ "nix" "useChroot" ]; to = [ "nix" "useSandbox" ]; })
+    (mkRenamedOptionModuleWith { sinceRelease = 2003; from = [ "nix" "chrootDirs" ]; to = [ "nix" "sandboxPaths" ]; })
+  ] ++
+    mapAttrsToList
+      (oldConf: newConf:
+        mkRenamedOptionModuleWith {
+          sinceRelease = 2205;
+          from = [ "nix" oldConf ];
+          to = [ "nix" "settings" newConf ];
+      })
+      legacyConfMappings;
+
+  options = {
+    nix = {
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If enabled, checks that Nix can parse the generated nix.conf.
+        '';
+      };
+
+      checkAllErrors = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If enabled, checks the nix.conf parsing for any kind of error. When disabled, checks only for unknown settings.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          keep-outputs = true
+          keep-derivations = true
+        '';
+        description = lib.mdDoc "Additional text appended to {file}`nix.conf`.";
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = semanticConfType;
+
+          options = {
+            max-jobs = mkOption {
+              type = types.either types.int (types.enum [ "auto" ]);
+              default = "auto";
+              example = 64;
+              description = lib.mdDoc ''
+                This option defines the maximum number of jobs that Nix will try to
+                build in parallel. The default is auto, which means it will use all
+                available logical cores. It is recommend to set it to the total
+                number of logical cores in your system (e.g., 16 for two CPUs with 4
+                cores each and hyper-threading).
+              '';
+            };
+
+            auto-optimise-store = mkOption {
+              type = types.bool;
+              default = false;
+              example = true;
+              description = lib.mdDoc ''
+                If set to true, Nix automatically detects files in the store that have
+                identical contents, and replaces them with hard links to a single copy.
+                This saves disk space. If set to false (the default), you can still run
+                nix-store --optimise to get rid of duplicate files.
+              '';
+            };
+
+            cores = mkOption {
+              type = types.int;
+              default = 0;
+              example = 64;
+              description = lib.mdDoc ''
+                This option defines the maximum number of concurrent tasks during
+                one build. It affects, e.g., -j option for make.
+                The special value 0 means that the builder should use all
+                available CPU cores in the system. Some builds may become
+                non-deterministic with this option; use with care! Packages will
+                only be affected if enableParallelBuilding is set for them.
+              '';
+            };
+
+            sandbox = mkOption {
+              type = types.either types.bool (types.enum [ "relaxed" ]);
+              default = true;
+              description = lib.mdDoc ''
+                If set, Nix will perform builds in a sandboxed environment that it
+                will set up automatically for each build. This prevents impurities
+                in builds by disallowing access to dependencies outside of the Nix
+                store by using network and mount namespaces in a chroot environment.
+
+                This is enabled by default even though it has a possible performance
+                impact due to the initial setup time of a sandbox for each build. It
+                doesn't affect derivation hashes, so changing this option will not
+                trigger a rebuild of packages.
+
+                When set to "relaxed", this option permits derivations that set
+                `__noChroot = true;` to run outside of the sandboxed environment.
+                Exercise caution when using this mode of operation! It is intended to
+                be a quick hack when building with packages that are not easily setup
+                to be built reproducibly.
+              '';
+            };
+
+            extra-sandbox-paths = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [ "/dev" "/proc" ];
+              description = lib.mdDoc ''
+                Directories from the host filesystem to be included
+                in the sandbox.
+              '';
+            };
+
+            substituters = mkOption {
+              type = types.listOf types.str;
+              description = lib.mdDoc ''
+                List of binary cache URLs used to obtain pre-built binaries
+                of Nix packages.
+
+                By default https://cache.nixos.org/ is added.
+              '';
+            };
+
+            trusted-substituters = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [ "https://hydra.nixos.org/" ];
+              description = lib.mdDoc ''
+                List of binary cache URLs that non-root users can use (in
+                addition to those specified using
+                {option}`nix.settings.substituters`) by passing
+                `--option binary-caches` to Nix commands.
+              '';
+            };
+
+            require-sigs = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                If enabled (the default), Nix will only download binaries from binary caches if
+                they are cryptographically signed with any of the keys listed in
+                {option}`nix.settings.trusted-public-keys`. If disabled, signatures are neither
+                required nor checked, so it's strongly recommended that you use only
+                trustworthy caches and https to prevent man-in-the-middle attacks.
+              '';
+            };
+
+            trusted-public-keys = mkOption {
+              type = types.listOf types.str;
+              example = [ "hydra.nixos.org-1:CNHJZBh9K4tP3EKF6FkkgeVYsS3ohTl+oS0Qa8bezVs=" ];
+              description = lib.mdDoc ''
+                List of public keys used to sign binary caches. If
+                {option}`nix.settings.trusted-public-keys` is enabled,
+                then Nix will use a binary from a binary cache if and only
+                if it is signed by *any* of the keys
+                listed here. By default, only the key for
+                `cache.nixos.org` is included.
+              '';
+            };
+
+            trusted-users = mkOption {
+              type = types.listOf types.str;
+              default = [ "root" ];
+              example = [ "root" "alice" "@wheel" ];
+              description = lib.mdDoc ''
+                A list of names of users that have additional rights when
+                connecting to the Nix daemon, such as the ability to specify
+                additional binary caches, or to import unsigned NARs. You
+                can also specify groups by prefixing them with
+                `@`; for instance,
+                `@wheel` means all users in the wheel
+                group.
+              '';
+            };
+
+            system-features = mkOption {
+              type = types.listOf types.str;
+              example = [ "kvm" "big-parallel" "gccarch-skylake" ];
+              description = lib.mdDoc ''
+                The set of features supported by the machine. Derivations
+                can express dependencies on system features through the
+                `requiredSystemFeatures` attribute.
+
+                By default, pseudo-features `nixos-test`, `benchmark`,
+                and `big-parallel` used in Nixpkgs are set, `kvm`
+                is also included if it is available.
+              '';
+            };
+
+            allowed-users = mkOption {
+              type = types.listOf types.str;
+              default = [ "*" ];
+              example = [ "@wheel" "@builders" "alice" "bob" ];
+              description = lib.mdDoc ''
+                A list of names of users (separated by whitespace) that are
+                allowed to connect to the Nix daemon. As with
+                {option}`nix.settings.trusted-users`, you can specify groups by
+                prefixing them with `@`. Also, you can
+                allow all users by specifying `*`. The
+                default is `*`. Note that trusted users are
+                always allowed to connect.
+              '';
+            };
+          };
+        };
+        default = { };
+        example = literalExpression ''
+          {
+            use-sandbox = true;
+            show-trace = true;
+
+            system-features = [ "big-parallel" "kvm" "recursive-nix" ];
+            sandbox-paths = { "/bin/sh" = "''${pkgs.busybox-sandbox-shell.out}/bin/busybox"; };
+          }
+        '';
+        description = lib.mdDoc ''
+          Configuration for Nix, see
+          <https://nixos.org/manual/nix/stable/command-ref/conf-file.html> or
+          {manpage}`nix.conf(5)` for available options.
+          The value declared here will be translated directly to the key-value pairs Nix expects.
+
+          You can use {command}`nix-instantiate --eval --strict '<nixpkgs/nixos>' -A config.nix.settings`
+          to view the current value. By default it is empty.
+
+          Nix configurations defined under {option}`nix.*` will be translated and applied to this
+          option. In addition, configuration specified in {option}`nix.extraOptions` will be appended
+          verbatim to the resulting config file.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."nix/nix.conf".source = nixConf;
+    nix.settings = {
+      trusted-public-keys = [ "cache.nixos.org-1:6NCHdD59X431o0gWypbMrAURkbJ16ZPMQFGspcDShjY=" ];
+      substituters = mkAfter [ "https://cache.nixos.org/" ];
+      system-features = mkDefault (
+        [ "nixos-test" "benchmark" "big-parallel" "kvm" ] ++
+        optionals (pkgs.stdenv.hostPlatform ? gcc.arch) (
+          # a builder can run code for `gcc.arch` and inferior architectures
+          [ "gccarch-${pkgs.stdenv.hostPlatform.gcc.arch}" ] ++
+          map (x: "gccarch-${x}") (systems.architectures.inferiors.${pkgs.stdenv.hostPlatform.gcc.arch} or [])
+        )
+      );
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/no-x-libs.nix b/nixpkgs/nixos/modules/config/no-x-libs.nix
new file mode 100644
index 000000000000..b2eb46f273b1
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/no-x-libs.nix
@@ -0,0 +1,83 @@
+# This module gets rid of all dependencies on X11 client libraries
+# (including fontconfig).
+
+{ config, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    environment.noXlibs = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Switch off the options in the default configuration that
+        require X11 libraries. This includes client-side font
+        configuration and SSH forwarding of X11 authentication
+        in. Thus, you probably do not want to enable this option if
+        you want to run X11 programs on this machine via SSH.
+      '';
+    };
+  };
+
+  config = mkIf config.environment.noXlibs {
+    programs.ssh.setXAuthLocation = false;
+    security.pam.services.su.forwardXAuth = lib.mkForce false;
+
+    fonts.fontconfig.enable = false;
+
+    nixpkgs.overlays = singleton (const (super: {
+      beam = super.beam_nox;
+      cairo = super.cairo.override { x11Support = false; };
+      dbus = super.dbus.override { x11Support = false; };
+      ffmpeg_4 = super.ffmpeg_4.override { ffmpegVariant = "headless"; };
+      ffmpeg_5 = super.ffmpeg_5.override { ffmpegVariant = "headless"; };
+      # dep of graphviz, libXpm is optional for Xpm support
+      gd = super.gd.override { withXorg = false; };
+      gobject-introspection = super.gobject-introspection.override { x11Support = false; };
+      gpsd = super.gpsd.override { guiSupport = false; };
+      graphviz = super.graphviz-nox;
+      gst_all_1 = super.gst_all_1 // {
+        gst-plugins-bad = super.gst_all_1.gst-plugins-bad.override { guiSupport = false; };
+        gst-plugins-base = super.gst_all_1.gst-plugins-base.override { enableWayland = false; enableX11 = false; };
+        gst-plugins-good = super.gst_all_1.gst-plugins-good.override { enableX11 = false; };
+      };
+      imagemagick = super.imagemagick.override { libX11Support = false; libXtSupport = false; };
+      imagemagickBig = super.imagemagickBig.override { libX11Support = false; libXtSupport = false; };
+      libdevil = super.libdevil-nox;
+      libextractor = super.libextractor.override { gtkSupport = false; };
+      libva = super.libva-minimal;
+      limesuite = super.limesuite.override { withGui = false; };
+      mc = super.mc.override { x11Support = false; };
+      mpv-unwrapped = super.mpv-unwrapped.override { sdl2Support = false; x11Support = false; waylandSupport = false; };
+      msmtp = super.msmtp.override { withKeyring = false; };
+      neofetch = super.neofetch.override { x11Support = false; };
+      networkmanager-fortisslvpn = super.networkmanager-fortisslvpn.override { withGnome = false; };
+      networkmanager-iodine = super.networkmanager-iodine.override { withGnome = false; };
+      networkmanager-l2tp = super.networkmanager-l2tp.override { withGnome = false; };
+      networkmanager-openconnect = super.networkmanager-openconnect.override { withGnome = false; };
+      networkmanager-openvpn = super.networkmanager-openvpn.override { withGnome = false; };
+      networkmanager-sstp = super.networkmanager-vpnc.override { withGnome = false; };
+      networkmanager-vpnc = super.networkmanager-vpnc.override { withGnome = false; };
+      pango = super.pango.override { x11Support = false; };
+      pinentry = super.pinentry.override { enabledFlavors = [ "curses" "tty" "emacs" ]; withLibsecret = false; };
+      pipewire = super.pipewire.override { x11Support = false; };
+      pythonPackagesExtensions = super.pythonPackagesExtensions ++ [
+        (python-final: python-prev: {
+          # tk feature requires wayland which fails to compile
+          matplotlib = python-prev.matplotlib.override { enableTk = false; };
+        })
+      ];
+      qemu = super.qemu.override { gtkSupport = false; spiceSupport = false; sdlSupport = false; };
+      qrencode = super.qrencode.overrideAttrs (_: { doCheck = false; });
+      qt5 = super.qt5.overrideScope (const (super': {
+        qtbase = super'.qtbase.override { withGtk3 = false; };
+      }));
+      stoken = super.stoken.override { withGTK3 = false; };
+      # translateManpages -> perlPackages.po4a -> texlive-combined-basic -> texlive-core-big -> libX11
+      util-linux = super.util-linux.override { translateManpages = false; };
+      vim-full = super.vim-full.override { guiSupport = false; };
+      zbar = super.zbar.override { enableVideo = false; withXorg = false; };
+    }));
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/nsswitch.nix b/nixpkgs/nixos/modules/config/nsswitch.nix
new file mode 100644
index 000000000000..b004072813bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/nsswitch.nix
@@ -0,0 +1,136 @@
+# Configuration for the Name Service Switch (/etc/nsswitch.conf).
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+
+    # NSS modules.  Hacky!
+    # Only works with nscd!
+    system.nssModules = mkOption {
+      type = types.listOf types.path;
+      internal = true;
+      default = [];
+      description = lib.mdDoc ''
+        Search path for NSS (Name Service Switch) modules.  This allows
+        several DNS resolution methods to be specified via
+        {file}`/etc/nsswitch.conf`.
+      '';
+      apply = list:
+        {
+          inherit list;
+          path = makeLibraryPath list;
+        };
+    };
+
+    system.nssDatabases = {
+      passwd = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of passwd entries to configure in {file}`/etc/nsswitch.conf`.
+
+          Note that "files" is always prepended while "systemd" is appended if nscd is enabled.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      group = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of group entries to configure in {file}`/etc/nsswitch.conf`.
+
+          Note that "files" is always prepended while "systemd" is appended if nscd is enabled.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      shadow = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of shadow entries to configure in {file}`/etc/nsswitch.conf`.
+
+          Note that "files" is always prepended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      hosts = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of hosts entries to configure in {file}`/etc/nsswitch.conf`.
+
+          Note that "files" is always prepended, and "dns" and "myhostname" are always appended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+
+      services = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of services entries to configure in {file}`/etc/nsswitch.conf`.
+
+          Note that "files" is always prepended.
+
+          This option only takes effect if nscd is enabled.
+        '';
+        default = [];
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "system" "nssHosts" ] [ "system" "nssDatabases" "hosts" ])
+  ];
+
+  config = {
+    assertions = [
+      {
+        assertion = config.system.nssModules.path != "" -> config.services.nscd.enable;
+        message = ''
+          Loading NSS modules from system.nssModules (${config.system.nssModules.path}),
+          requires services.nscd.enable being set to true.
+
+          If disabling nscd is really necessary, it is possible to disable loading NSS modules
+          by setting `system.nssModules = lib.mkForce [];` in your configuration.nix.
+        '';
+      }
+    ];
+
+    # Name Service Switch configuration file.  Required by the C
+    # library.
+    environment.etc."nsswitch.conf".text = ''
+      passwd:    ${concatStringsSep " " config.system.nssDatabases.passwd}
+      group:     ${concatStringsSep " " config.system.nssDatabases.group}
+      shadow:    ${concatStringsSep " " config.system.nssDatabases.shadow}
+
+      hosts:     ${concatStringsSep " " config.system.nssDatabases.hosts}
+      networks:  files
+
+      ethers:    files
+      services:  ${concatStringsSep " " config.system.nssDatabases.services}
+      protocols: files
+      rpc:       files
+    '';
+
+    system.nssDatabases = {
+      passwd = mkBefore [ "files" ];
+      group = mkBefore [ "files" ];
+      shadow = mkBefore [ "files" ];
+      hosts = mkMerge [
+        (mkOrder 998 [ "files" ])
+        (mkOrder 1499 [ "dns" ])
+      ];
+      services = mkBefore [ "files" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/power-management.nix b/nixpkgs/nixos/modules/config/power-management.nix
new file mode 100644
index 000000000000..e7fd02920e0d
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/power-management.nix
@@ -0,0 +1,106 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.powerManagement;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    powerManagement = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Whether to enable power management.  This includes support
+            for suspend-to-RAM and powersave features on laptops.
+          '';
+      };
+
+      resumeCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Commands executed after the system resumes from suspend-to-RAM.";
+      };
+
+      powerUpCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = literalExpression ''
+          "''${pkgs.hdparm}/sbin/hdparm -B 255 /dev/sda"
+        '';
+        description =
+          lib.mdDoc ''
+            Commands executed when the machine powers up.  That is,
+            they're executed both when the system first boots and when
+            it resumes from suspend or hibernation.
+          '';
+      };
+
+      powerDownCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = literalExpression ''
+          "''${pkgs.hdparm}/sbin/hdparm -B 255 /dev/sda"
+        '';
+        description =
+          lib.mdDoc ''
+            Commands executed when the machine powers down.  That is,
+            they're executed both when the system shuts down and when
+            it goes to suspend or hibernation.
+          '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.targets.post-resume = {
+      description = "Post-Resume Actions";
+      requires = [ "post-resume.service" ];
+      after = [ "post-resume.service" ];
+      wantedBy = [ "sleep.target" ];
+      unitConfig.StopWhenUnneeded = true;
+    };
+
+    # Service executed before suspending/hibernating.
+    systemd.services.pre-sleep =
+      { description = "Pre-Sleep Actions";
+        wantedBy = [ "sleep.target" ];
+        before = [ "sleep.target" ];
+        script =
+          ''
+            ${cfg.powerDownCommands}
+          '';
+        serviceConfig.Type = "oneshot";
+      };
+
+    systemd.services.post-resume =
+      { description = "Post-Resume Actions";
+        after = [ "suspend.target" "hibernate.target" "hybrid-sleep.target" "suspend-then-hibernate.target" ];
+        script =
+          ''
+            /run/current-system/systemd/bin/systemctl try-restart --no-block post-resume.target
+            ${cfg.resumeCommands}
+            ${cfg.powerUpCommands}
+          '';
+        serviceConfig.Type = "oneshot";
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/pulseaudio.nix b/nixpkgs/nixos/modules/config/pulseaudio.nix
new file mode 100644
index 000000000000..80ff6c1aabf7
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/pulseaudio.nix
@@ -0,0 +1,332 @@
+{ config, lib, pkgs, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.hardware.pulseaudio;
+  alsaCfg = config.sound;
+
+  systemWide = cfg.enable && cfg.systemWide;
+  nonSystemWide = cfg.enable && !cfg.systemWide;
+  hasZeroconf = let z = cfg.zeroconf; in z.publish.enable || z.discovery.enable;
+
+  overriddenPackage = cfg.package.override
+    (optionalAttrs hasZeroconf { zeroconfSupport = true; });
+  binary = "${getBin overriddenPackage}/bin/pulseaudio";
+  binaryNoDaemon = "${binary} --daemonize=no";
+
+  # Forces 32bit pulseaudio and alsa-plugins to be built/supported for apps
+  # using 32bit alsa on 64bit linux.
+  enable32BitAlsaPlugins = cfg.support32Bit && stdenv.isx86_64 && (pkgs.pkgsi686Linux.alsa-lib != null && pkgs.pkgsi686Linux.libpulseaudio != null);
+
+
+  myConfigFile =
+    let
+      addModuleIf = cond: mod: optionalString cond "load-module ${mod}";
+      allAnon = optional cfg.tcp.anonymousClients.allowAll "auth-anonymous=1";
+      ipAnon =  let a = cfg.tcp.anonymousClients.allowedIpRanges;
+                in optional (a != []) ''auth-ip-acl=${concatStringsSep ";" a}'';
+    in writeTextFile {
+      name = "default.pa";
+        text = ''
+        .include ${cfg.configFile}
+        ${addModuleIf cfg.zeroconf.publish.enable "module-zeroconf-publish"}
+        ${addModuleIf cfg.zeroconf.discovery.enable "module-zeroconf-discover"}
+        ${addModuleIf cfg.tcp.enable (concatStringsSep " "
+           ([ "module-native-protocol-tcp" ] ++ allAnon ++ ipAnon))}
+        ${addModuleIf config.services.jack.jackd.enable "module-jack-sink"}
+        ${addModuleIf config.services.jack.jackd.enable "module-jack-source"}
+        ${cfg.extraConfig}
+      '';
+    };
+
+  ids = config.ids;
+
+  uid = ids.uids.pulseaudio;
+  gid = ids.gids.pulseaudio;
+
+  stateDir = "/run/pulse";
+
+  # Create pulse/client.conf even if PulseAudio is disabled so
+  # that we can disable the autospawn feature in programs that
+  # are built with PulseAudio support (like KDE).
+  clientConf = writeText "client.conf" ''
+    autospawn=no
+    ${cfg.extraClientConf}
+  '';
+
+  # Write an /etc/asound.conf that causes all ALSA applications to
+  # be re-routed to the PulseAudio server through ALSA's Pulse
+  # plugin.
+  alsaConf = writeText "asound.conf" (''
+    pcm_type.pulse {
+      libs.native = ${pkgs.alsa-plugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;
+      ${lib.optionalString enable32BitAlsaPlugins
+     "libs.32Bit = ${pkgs.pkgsi686Linux.alsa-plugins}/lib/alsa-lib/libasound_module_pcm_pulse.so ;"}
+    }
+    pcm.!default {
+      type pulse
+      hint.description "Default Audio Device (via PulseAudio)"
+    }
+    ctl_type.pulse {
+      libs.native = ${pkgs.alsa-plugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;
+      ${lib.optionalString enable32BitAlsaPlugins
+     "libs.32Bit = ${pkgs.pkgsi686Linux.alsa-plugins}/lib/alsa-lib/libasound_module_ctl_pulse.so ;"}
+    }
+    ctl.!default {
+      type pulse
+    }
+    ${alsaCfg.extraConfig}
+  '');
+
+in {
+
+  options = {
+
+    hardware.pulseaudio = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the PulseAudio sound server.
+        '';
+      };
+
+      systemWide = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If false, a PulseAudio server is launched automatically for
+          each user that tries to use the sound system. The server runs
+          with user privileges. If true, one system-wide PulseAudio
+          server is launched on boot, running as the user "pulse", and
+          only users in the "pulse-access" group will have access to the server.
+          Please read the PulseAudio documentation for more details.
+
+          Don't enable this option unless you know what you are doing.
+        '';
+      };
+
+      support32Bit = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to include the 32-bit pulseaudio libraries in the system or not.
+          This is only useful on 64-bit systems and currently limited to x86_64-linux.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          The path to the default configuration options the PulseAudio server
+          should use. By default, the "default.pa" configuration
+          from the PulseAudio distribution is used.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Literal string to append to `configFile`
+          and the config file generated by the pulseaudio module.
+        '';
+      };
+
+      extraClientConf = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration appended to pulse/client.conf file.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = if config.services.jack.jackd.enable
+                  then pkgs.pulseaudioFull
+                  else pkgs.pulseaudio;
+        defaultText = literalExpression "pkgs.pulseaudio";
+        example = literalExpression "pkgs.pulseaudioFull";
+        description = lib.mdDoc ''
+          The PulseAudio derivation to use.  This can be used to enable
+          features (such as JACK support, Bluetooth) via the
+          `pulseaudioFull` package.
+        '';
+      };
+
+      extraModules = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.pulseaudio-modules-bt ]";
+        description = lib.mdDoc ''
+          Extra pulseaudio modules to use. This is intended for out-of-tree
+          pulseaudio modules like extra bluetooth codecs.
+
+          Extra modules take precedence over built-in pulseaudio modules.
+        '';
+      };
+
+      daemon = {
+        logLevel = mkOption {
+          type = types.str;
+          default = "notice";
+          description = lib.mdDoc ''
+            The log level that the system-wide pulseaudio daemon should use,
+            if activated.
+          '';
+        };
+
+        config = mkOption {
+          type = types.attrsOf types.unspecified;
+          default = {};
+          description = lib.mdDoc "Config of the pulse daemon. See `man pulse-daemon.conf`.";
+          example = literalExpression ''{ realtime-scheduling = "yes"; }'';
+        };
+      };
+
+      zeroconf = {
+        discovery.enable =
+          mkEnableOption (lib.mdDoc "discovery of pulseaudio sinks in the local network");
+        publish.enable =
+          mkEnableOption (lib.mdDoc "publishing the pulseaudio sink in the local network");
+      };
+
+      # TODO: enable by default?
+      tcp = {
+        enable = mkEnableOption (lib.mdDoc "tcp streaming support");
+
+        anonymousClients = {
+          allowAll = mkEnableOption (lib.mdDoc "all anonymous clients to stream to the server");
+          allowedIpRanges = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            example = literalExpression ''[ "127.0.0.1" "192.168.1.0/24" ]'';
+            description = lib.mdDoc ''
+              A list of IP subnets that are allowed to stream to the server.
+            '';
+          };
+        };
+      };
+
+    };
+
+  };
+
+
+  config = mkMerge [
+    {
+      environment.etc = {
+        "pulse/client.conf".source = clientConf;
+      };
+
+      hardware.pulseaudio.configFile = mkDefault "${getBin overriddenPackage}/etc/pulse/default.pa";
+    }
+
+    (mkIf cfg.enable {
+      environment.systemPackages = [ overriddenPackage ];
+
+      sound.enable = true;
+
+      environment.etc = {
+        "asound.conf".source = alsaConf;
+
+        "pulse/daemon.conf".source = writeText "daemon.conf"
+          (lib.generators.toKeyValue {} cfg.daemon.config);
+
+        "openal/alsoft.conf".source = writeText "alsoft.conf" "drivers=pulse";
+
+        "libao.conf".source = writeText "libao.conf" "default_driver=pulse";
+      };
+
+      # Disable flat volumes to enable relative ones
+      hardware.pulseaudio.daemon.config.flat-volumes = mkDefault "no";
+
+      # Upstream defaults to speex-float-1 which results in audible artifacts
+      hardware.pulseaudio.daemon.config.resample-method = mkDefault "speex-float-5";
+
+      # Allow PulseAudio to get realtime priority using rtkit.
+      security.rtkit.enable = true;
+
+      systemd.packages = [ overriddenPackage ];
+
+      # PulseAudio is packaged with udev rules to handle various audio device quirks
+      services.udev.packages = [ overriddenPackage ];
+    })
+
+    (mkIf (cfg.extraModules != []) {
+      hardware.pulseaudio.daemon.config.dl-search-path = let
+        overriddenModules = builtins.map
+          (drv: drv.override { pulseaudio = overriddenPackage; })
+          cfg.extraModules;
+        modulePaths = builtins.map
+          (drv: "${drv}/lib/pulseaudio/modules")
+          # User-provided extra modules take precedence
+          (overriddenModules ++ [ overriddenPackage ]);
+      in lib.concatStringsSep ":" modulePaths;
+    })
+
+    (mkIf hasZeroconf {
+      services.avahi.enable = true;
+    })
+    (mkIf cfg.zeroconf.publish.enable {
+      services.avahi.publish.enable = true;
+      services.avahi.publish.userServices = true;
+    })
+
+    (mkIf nonSystemWide {
+      environment.etc = {
+        "pulse/default.pa".source = myConfigFile;
+      };
+      systemd.user = {
+        services.pulseaudio = {
+          restartIfChanged = true;
+          serviceConfig = {
+            RestartSec = "500ms";
+            PassEnvironment = "DISPLAY";
+          };
+        } // optionalAttrs config.services.jack.jackd.enable {
+          environment.JACK_PROMISCUOUS_SERVER = "jackaudio";
+        };
+        sockets.pulseaudio = {
+          wantedBy = [ "sockets.target" ];
+        };
+      };
+    })
+
+    (mkIf systemWide {
+      users.users.pulse = {
+        # For some reason, PulseAudio wants UID == GID.
+        uid = assert uid == gid; uid;
+        group = "pulse";
+        extraGroups = [ "audio" ];
+        description = "PulseAudio system service user";
+        home = stateDir;
+        createHome = true;
+        isSystemUser = true;
+      };
+
+      users.groups.pulse.gid = gid;
+      users.groups.pulse-access = {};
+
+      systemd.services.pulseaudio = {
+        description = "PulseAudio System-Wide Server";
+        wantedBy = [ "sound.target" ];
+        before = [ "sound.target" ];
+        environment.PULSE_RUNTIME_PATH = stateDir;
+        serviceConfig = {
+          Type = "notify";
+          ExecStart = "${binaryNoDaemon} --log-level=${cfg.daemon.logLevel} --system -n --file=${myConfigFile}";
+          Restart = "on-failure";
+          RestartSec = "500ms";
+        };
+      };
+
+      environment.variables.PULSE_COOKIE = "${stateDir}/.config/pulse/cookie";
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/config/qt.nix b/nixpkgs/nixos/modules/config/qt.nix
new file mode 100644
index 000000000000..f82b7ab85a8c
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/qt.nix
@@ -0,0 +1,154 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.qt;
+
+  platformPackages = with pkgs; {
+    gnome = [ qgnomeplatform qgnomeplatform-qt6 ];
+    gtk2 = [ libsForQt5.qtstyleplugins qt6Packages.qt6gtk2 ];
+    kde = [ libsForQt5.plasma-integration libsForQt5.systemsettings ];
+    lxqt = [ lxqt.lxqt-qtplugin lxqt.lxqt-config ];
+    qt5ct = [ libsForQt5.qt5ct qt6Packages.qt6ct ];
+  };
+
+  stylePackages = with pkgs; {
+    bb10bright = [ libsForQt5.qtstyleplugins ];
+    bb10dark = [ libsForQt5.qtstyleplugins ];
+    cde = [ libsForQt5.qtstyleplugins ];
+    cleanlooks = [ libsForQt5.qtstyleplugins ];
+    gtk2 = [ libsForQt5.qtstyleplugins qt6Packages.qt6gtk2 ];
+    motif = [ libsForQt5.qtstyleplugins ];
+    plastique = [ libsForQt5.qtstyleplugins ];
+
+    adwaita = [ adwaita-qt adwaita-qt6 ];
+    adwaita-dark = [ adwaita-qt adwaita-qt6 ];
+    adwaita-highcontrast = [ adwaita-qt adwaita-qt6 ];
+    adwaita-highcontrastinverse = [ adwaita-qt adwaita-qt6 ];
+
+    breeze = [ libsForQt5.breeze-qt5 ];
+
+    kvantum = [ libsForQt5.qtstyleplugin-kvantum qt6Packages.qtstyleplugin-kvantum ];
+  };
+in
+{
+  meta.maintainers = with lib.maintainers; [ romildo thiagokokada ];
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "qt5" "enable" ] [ "qt" "enable" ])
+    (lib.mkRenamedOptionModule [ "qt5" "platformTheme" ] [ "qt" "platformTheme" ])
+    (lib.mkRenamedOptionModule [ "qt5" "style" ] [ "qt" "style" ])
+  ];
+
+  options = {
+    qt = {
+      enable = lib.mkEnableOption "" // {
+        description = lib.mdDoc ''
+          Whether to enable Qt configuration, including theming.
+
+          Enabling this option is necessary for Qt plugins to work in the
+          installed profiles (e.g.: `nix-env -i` or `environment.systemPackages`).
+        '';
+      };
+
+      platformTheme = lib.mkOption {
+        type = with lib.types; nullOr (enum (lib.attrNames platformPackages));
+        default = null;
+        example = "gnome";
+        relatedPackages = [
+          "qgnomeplatform"
+          "qgnomeplatform-qt6"
+          [ "libsForQt5" "plasma-integration" ]
+          [ "libsForQt5" "qt5ct" ]
+          [ "libsForQt5" "qtstyleplugins" ]
+          [ "libsForQt5" "systemsettings" ]
+          [ "lxqt" "lxqt-config" ]
+          [ "lxqt" "lxqt-qtplugin" ]
+          [ "qt6Packages" "qt6ct" ]
+          [ "qt6Packages" "qt6gtk2" ]
+        ];
+        description = lib.mdDoc ''
+          Selects the platform theme to use for Qt applications.
+
+          The options are
+          - `gnome`: Use GNOME theme with [qgnomeplatform](https://github.com/FedoraQt/QGnomePlatform)
+          - `gtk2`: Use GTK theme with [qtstyleplugins](https://github.com/qt/qtstyleplugins)
+          - `kde`: Use Qt settings from Plasma.
+          - `lxqt`: Use LXQt style set using the [lxqt-config-appearance](https://github.com/lxqt/lxqt-config)
+             application.
+          - `qt5ct`: Use Qt style set using the [qt5ct](https://sourceforge.net/projects/qt5ct/)
+             and [qt6ct](https://github.com/trialuser02/qt6ct) applications.
+        '';
+      };
+
+      style = lib.mkOption {
+        type = with lib.types; nullOr (enum (lib.attrNames stylePackages));
+        default = null;
+        example = "adwaita";
+        relatedPackages = [
+          "adwaita-qt"
+          "adwaita-qt6"
+          [ "libsForQt5" "breeze-qt5" ]
+          [ "libsForQt5" "qtstyleplugin-kvantum" ]
+          [ "libsForQt5" "qtstyleplugins" ]
+          [ "qt6Packages" "qt6gtk2" ]
+          [ "qt6Packages" "qtstyleplugin-kvantum" ]
+        ];
+        description = lib.mdDoc ''
+          Selects the style to use for Qt applications.
+
+          The options are
+          - `adwaita`, `adwaita-dark`, `adwaita-highcontrast`, `adawaita-highcontrastinverse`:
+            Use Adwaita Qt style with
+            [adwaita](https://github.com/FedoraQt/adwaita-qt)
+          - `breeze`: Use the Breeze style from
+            [breeze](https://github.com/KDE/breeze)
+          - `bb10bright`, `bb10dark`, `cleanlooks`, `gtk2`, `motif`, `plastique`:
+            Use styles from
+            [qtstyleplugins](https://github.com/qt/qtstyleplugins)
+          - `kvantum`: Use styles from
+            [kvantum](https://github.com/tsujan/Kvantum)
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions =
+      let
+        gnomeStyles = [
+          "adwaita"
+          "adwaita-dark"
+          "adwaita-highcontrast"
+          "adwaita-highcontrastinverse"
+          "breeze"
+        ];
+      in
+      [
+        {
+          assertion = cfg.platformTheme == "gnome" -> (builtins.elem cfg.style gnomeStyles);
+          message = ''
+            `qt.platformTheme` "gnome" must have `qt.style` set to a theme that supports both Qt and Gtk,
+            for example: ${lib.concatStringsSep ", " gnomeStyles}.
+          '';
+        }
+      ];
+
+    environment.variables = {
+      QT_QPA_PLATFORMTHEME = lib.mkIf (cfg.platformTheme != null) cfg.platformTheme;
+      QT_STYLE_OVERRIDE = lib.mkIf (cfg.style != null) cfg.style;
+    };
+
+    environment.profileRelativeSessionVariables =
+      let
+        qtVersions = with pkgs; [ qt5 qt6 ];
+      in
+      {
+        QT_PLUGIN_PATH = map (qt: "/${qt.qtbase.qtPluginPrefix}") qtVersions;
+        QML2_IMPORT_PATH = map (qt: "/${qt.qtbase.qtQmlPrefix}") qtVersions;
+      };
+
+    environment.systemPackages =
+      lib.optionals (cfg.platformTheme != null) (platformPackages.${cfg.platformTheme})
+      ++ lib.optionals (cfg.style != null) (stylePackages.${cfg.style});
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/resolvconf.nix b/nixpkgs/nixos/modules/config/resolvconf.nix
new file mode 100644
index 000000000000..e9ae4d651d26
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/resolvconf.nix
@@ -0,0 +1,160 @@
+# /etc files related to networking, such as /etc/services.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.resolvconf;
+
+  resolvconfOptions = cfg.extraOptions
+    ++ optional cfg.dnsSingleRequest "single-request"
+    ++ optional cfg.dnsExtensionMechanism "edns0";
+
+  configText =
+    ''
+      # This is the default, but we must set it here to prevent
+      # a collision with an apparently unrelated environment
+      # variable with the same name exported by dhcpcd.
+      interface_order='lo lo[0-9]*'
+    '' + optionalString config.services.nscd.enable ''
+      # Invalidate the nscd cache whenever resolv.conf is
+      # regenerated.
+      libc_restart='/run/current-system/systemd/bin/systemctl try-restart --no-block nscd.service 2> /dev/null'
+    '' + optionalString (length resolvconfOptions > 0) ''
+      # Options as described in resolv.conf(5)
+      resolv_conf_options='${concatStringsSep " " resolvconfOptions}'
+    '' + optionalString cfg.useLocalResolver ''
+      # This hosts runs a full-blown DNS resolver.
+      name_servers='127.0.0.1'
+    '' + cfg.extraConfig;
+
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "networking" "dnsSingleRequest" ] [ "networking" "resolvconf" "dnsSingleRequest" ])
+    (mkRenamedOptionModule [ "networking" "dnsExtensionMechanism" ] [ "networking" "resolvconf" "dnsExtensionMechanism" ])
+    (mkRenamedOptionModule [ "networking" "extraResolvconfConf" ] [ "networking" "resolvconf" "extraConfig" ])
+    (mkRenamedOptionModule [ "networking" "resolvconfOptions" ] [ "networking" "resolvconf" "extraOptions" ])
+    (mkRemovedOptionModule [ "networking" "resolvconf" "useHostResolvConf" ] "This option was never used for anything anyways")
+  ];
+
+  options = {
+
+    networking.resolvconf = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = !(config.environment.etc ? "resolv.conf");
+        defaultText = literalExpression ''!(config.environment.etc ? "resolv.conf")'';
+        description = lib.mdDoc ''
+          Whether DNS configuration is managed by resolvconf.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.openresolv;
+        defaultText = literalExpression "pkgs.openresolv";
+        description = lib.mdDoc ''
+          The package that provides the system-wide resolvconf command. Defaults to `openresolv`
+          if this module is enabled. Otherwise, can be used by other modules (for example {option}`services.resolved`) to
+          provide a compatibility layer.
+
+          This option generally shouldn't be set by the user.
+        '';
+      };
+
+      dnsSingleRequest = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Recent versions of glibc will issue both ipv4 (A) and ipv6 (AAAA)
+          address queries at the same time, from the same port. Sometimes upstream
+          routers will systemically drop the ipv4 queries. The symptom of this problem is
+          that 'getent hosts example.com' only returns ipv6 (or perhaps only ipv4) addresses. The
+          workaround for this is to specify the option 'single-request' in
+          /etc/resolv.conf. This option enables that.
+        '';
+      };
+
+      dnsExtensionMechanism = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the `edns0` option in {file}`resolv.conf`. With
+          that option set, `glibc` supports use of the extension mechanisms for
+          DNS (EDNS) specified in RFC 2671. The most popular user of that feature is DNSSEC,
+          which does not work without it.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "libc=NO";
+        description = lib.mdDoc ''
+          Extra configuration to append to {file}`resolvconf.conf`.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "ndots:1" "rotate" ];
+        description = lib.mdDoc ''
+          Set the options in {file}`/etc/resolv.conf`.
+        '';
+      };
+
+      useLocalResolver = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Use local DNS server for resolving.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkMerge [
+    {
+      environment.etc."resolvconf.conf".text =
+        if !cfg.enable then
+          # Force-stop any attempts to use resolvconf
+          ''
+            echo "resolvconf is disabled on this system but was used anyway:" >&2
+            echo "$0 $*" >&2
+            exit 1
+          ''
+        else configText;
+    }
+
+    (mkIf cfg.enable {
+      networking.resolvconf.package = pkgs.openresolv;
+
+      environment.systemPackages = [ cfg.package ];
+
+      systemd.services.resolvconf = {
+        description = "resolvconf update";
+
+        before = [ "network-pre.target" ];
+        wants = [ "network-pre.target" ];
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ config.environment.etc."resolvconf.conf".source ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${cfg.package}/bin/resolvconf -u";
+          RemainAfterExit = true;
+        };
+      };
+
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/config/shells-environment.nix b/nixpkgs/nixos/modules/config/shells-environment.nix
new file mode 100644
index 000000000000..bc6583442edf
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/shells-environment.nix
@@ -0,0 +1,224 @@
+# This module defines a global environment configuration and
+# a common configuration for all shells.
+
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.environment;
+
+  exportedEnvVars =
+    let
+      absoluteVariables =
+        mapAttrs (n: toList) cfg.variables;
+
+      suffixedVariables =
+        flip mapAttrs cfg.profileRelativeEnvVars (envVar: listSuffixes:
+          concatMap (profile: map (suffix: "${profile}${suffix}") listSuffixes) cfg.profiles
+        );
+
+      allVariables =
+        zipAttrsWith (n: concatLists) [ absoluteVariables suffixedVariables ];
+
+      exportVariables =
+        mapAttrsToList (n: v: ''export ${n}="${concatStringsSep ":" v}"'') allVariables;
+    in
+      concatStringsSep "\n" exportVariables;
+in
+
+{
+
+  options = {
+
+    environment.variables = mkOption {
+      default = {};
+      example = { EDITOR = "nvim"; VISUAL = "nvim"; };
+      description = lib.mdDoc ''
+        A set of environment variables used in the global environment.
+        These variables will be set on shell initialisation (e.g. in /etc/profile).
+        The value of each variable can be either a string or a list of
+        strings.  The latter is concatenated, interspersed with colon
+        characters.
+      '';
+      type = with types; attrsOf (oneOf [ (listOf str) str path ]);
+      apply = mapAttrs (n: v: if isList v then concatStringsSep ":" v else "${v}");
+    };
+
+    environment.profiles = mkOption {
+      default = [];
+      description = lib.mdDoc ''
+        A list of profiles used to setup the global environment.
+      '';
+      type = types.listOf types.str;
+    };
+
+    environment.profileRelativeEnvVars = mkOption {
+      type = types.attrsOf (types.listOf types.str);
+      example = { PATH = [ "/bin" ]; MANPATH = [ "/man" "/share/man" ]; };
+      description = lib.mdDoc ''
+        Attribute set of environment variable.  Each attribute maps to a list
+        of relative paths.  Each relative path is appended to the each profile
+        of {option}`environment.profiles` to form the content of the
+        corresponding environment variable.
+      '';
+    };
+
+    # !!! isn't there a better way?
+    environment.extraInit = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Shell script code called during global environment initialisation
+        after all variables and profileVariables have been set.
+        This code is assumed to be shell-independent, which means you should
+        stick to pure sh without sh word split.
+      '';
+      type = types.lines;
+    };
+
+    environment.shellInit = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Shell script code called during shell initialisation.
+        This code is assumed to be shell-independent, which means you should
+        stick to pure sh without sh word split.
+      '';
+      type = types.lines;
+    };
+
+    environment.loginShellInit = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Shell script code called during login shell initialisation.
+        This code is assumed to be shell-independent, which means you should
+        stick to pure sh without sh word split.
+      '';
+      type = types.lines;
+    };
+
+    environment.interactiveShellInit = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Shell script code called during interactive shell initialisation.
+        This code is assumed to be shell-independent, which means you should
+        stick to pure sh without sh word split.
+      '';
+      type = types.lines;
+    };
+
+    environment.shellAliases = mkOption {
+      example = { l = null; ll = "ls -l"; };
+      description = lib.mdDoc ''
+        An attribute set that maps aliases (the top level attribute names in
+        this option) to command strings or directly to build outputs. The
+        aliases are added to all users' shells.
+        Aliases mapped to `null` are ignored.
+      '';
+      type = with types; attrsOf (nullOr (either str path));
+    };
+
+    environment.homeBinInPath = mkOption {
+      description = lib.mdDoc ''
+        Include ~/bin/ in $PATH.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    environment.localBinInPath = mkOption {
+      description = lib.mdDoc ''
+        Add ~/.local/bin/ to $PATH
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    environment.binsh = mkOption {
+      default = "${config.system.build.binsh}/bin/sh";
+      defaultText = literalExpression ''"''${config.system.build.binsh}/bin/sh"'';
+      example = literalExpression ''"''${pkgs.dash}/bin/dash"'';
+      type = types.path;
+      visible = false;
+      description = lib.mdDoc ''
+        The shell executable that is linked system-wide to
+        `/bin/sh`. Please note that NixOS assumes all
+        over the place that shell to be Bash, so override the default
+        setting only if you know exactly what you're doing.
+      '';
+    };
+
+    environment.shells = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.bashInteractive pkgs.zsh ]";
+      description = lib.mdDoc ''
+        A list of permissible login shells for user accounts.
+        No need to mention `/bin/sh`
+        here, it is placed into this list implicitly.
+      '';
+      type = types.listOf (types.either types.shellPackage types.path);
+    };
+
+  };
+
+  config = {
+
+    system.build.binsh = pkgs.bashInteractive;
+
+    # Set session variables in the shell as well. This is usually
+    # unnecessary, but it allows changes to session variables to take
+    # effect without restarting the session (e.g. by opening a new
+    # terminal instead of logging out of X11).
+    environment.variables = config.environment.sessionVariables;
+
+    environment.profileRelativeEnvVars = config.environment.profileRelativeSessionVariables;
+
+    environment.shellAliases = mapAttrs (name: mkDefault) {
+      ls = "ls --color=tty";
+      ll = "ls -l";
+      l  = "ls -alh";
+    };
+
+    environment.etc.shells.text =
+      ''
+        ${concatStringsSep "\n" (map utils.toShellPath cfg.shells)}
+        /bin/sh
+      '';
+
+    # For resetting environment with `. /etc/set-environment` when needed
+    # and discoverability (see motivation of #30418).
+    environment.etc.set-environment.source = config.system.build.setEnvironment;
+
+    system.build.setEnvironment = pkgs.writeText "set-environment"
+      ''
+        # DO NOT EDIT -- this file has been generated automatically.
+
+        # Prevent this file from being sourced by child shells.
+        export __NIXOS_SET_ENVIRONMENT_DONE=1
+
+        ${exportedEnvVars}
+
+        ${cfg.extraInit}
+
+        ${optionalString cfg.homeBinInPath ''
+          # ~/bin if it exists overrides other bin directories.
+          export PATH="$HOME/bin:$PATH"
+        ''}
+
+        ${optionalString cfg.localBinInPath ''
+          export PATH="$HOME/.local/bin:$PATH"
+        ''}
+      '';
+
+    system.activationScripts.binsh = stringAfter [ "stdio" ]
+      ''
+        # Create the required /bin/sh symlink; otherwise lots of things
+        # (notably the system() function) won't work.
+        mkdir -m 0755 -p /bin
+        ln -sfn "${cfg.binsh}" /bin/.sh.tmp
+        mv /bin/.sh.tmp /bin/sh # atomically replace /bin/sh
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/stevenblack.nix b/nixpkgs/nixos/modules/config/stevenblack.nix
new file mode 100644
index 000000000000..7e6235169847
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/stevenblack.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) optionals mkOption mkEnableOption types mkIf elem concatStringsSep maintainers mdDoc;
+  cfg = config.networking.stevenblack;
+
+  # needs to be in a specific order
+  activatedHosts = with cfg; [ ]
+    ++ optionals (elem "fakenews" block) [ "fakenews" ]
+    ++ optionals (elem "gambling" block) [ "gambling" ]
+    ++ optionals (elem "porn" block) [ "porn" ]
+    ++ optionals (elem "social" block) [ "social" ];
+
+  hostsPath = "${pkgs.stevenblack-blocklist}/alternates/" + concatStringsSep "-" activatedHosts + "/hosts";
+in
+{
+  options.networking.stevenblack = {
+    enable = mkEnableOption (mdDoc "the stevenblack hosts file blocklist");
+
+    block = mkOption {
+      type = types.listOf (types.enum [ "fakenews" "gambling" "porn" "social" ]);
+      default = [ ];
+      description = mdDoc "Additional blocklist extensions.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.hostFiles = [ ]
+      ++ optionals (activatedHosts != [ ]) [ hostsPath ]
+      ++ optionals (activatedHosts == [ ]) [ "${pkgs.stevenblack-blocklist}/hosts" ];
+  };
+
+  meta.maintainers = [ maintainers.moni maintainers.artturin ];
+}
diff --git a/nixpkgs/nixos/modules/config/swap.nix b/nixpkgs/nixos/modules/config/swap.nix
new file mode 100644
index 000000000000..8989a6408264
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/swap.nix
@@ -0,0 +1,304 @@
+{ config, lib, pkgs, utils, ... }:
+
+with utils;
+with lib;
+
+let
+
+  randomEncryptionCoerce = enable: { inherit enable; };
+
+  randomEncryptionOpts = { ... }: {
+
+    options = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Encrypt swap device with a random key. This way you won't have a persistent swap device.
+
+          WARNING: Don't try to hibernate when you have at least one swap partition with
+          this option enabled! We have no way to set the partition into which hibernation image
+          is saved, so if your image ends up on an encrypted one you would lose it!
+
+          WARNING #2: Do not use /dev/disk/by-uuid/… or /dev/disk/by-label/… as your swap device
+          when using randomEncryption as the UUIDs and labels will get erased on every boot when
+          the partition is encrypted. Best to use /dev/disk/by-partuuid/…
+        '';
+      };
+
+      cipher = mkOption {
+        default = "aes-xts-plain64";
+        example = "serpent-xts-plain64";
+        type = types.str;
+        description = lib.mdDoc ''
+          Use specified cipher for randomEncryption.
+
+          Hint: Run "cryptsetup benchmark" to see which one is fastest on your machine.
+        '';
+      };
+
+      keySize = mkOption {
+        default = null;
+        example = "512";
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+          Set the encryption key size for the plain device.
+
+          If not specified, the amount of data to read from `source` will be
+          determined by cryptsetup.
+
+          See `cryptsetup-open(8)` for details.
+        '';
+      };
+
+      sectorSize = mkOption {
+        default = null;
+        example = "4096";
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+          Set the sector size for the plain encrypted device type.
+
+          If not specified, the default sector size is determined from the
+          underlying block device.
+
+          See `cryptsetup-open(8)` for details.
+        '';
+      };
+
+      source = mkOption {
+        default = "/dev/urandom";
+        example = "/dev/random";
+        type = types.str;
+        description = lib.mdDoc ''
+          Define the source of randomness to obtain a random key for encryption.
+        '';
+      };
+
+      allowDiscards = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to allow TRIM requests to the underlying device. This option
+          has security implications; please read the LUKS documentation before
+          activating it.
+        '';
+      };
+    };
+
+  };
+
+  swapCfg = {config, options, ...}: {
+
+    options = {
+
+      device = mkOption {
+        example = "/dev/sda3";
+        type = types.nonEmptyStr;
+        description = lib.mdDoc "Path of the device or swap file.";
+      };
+
+      label = mkOption {
+        example = "swap";
+        type = types.str;
+        description = lib.mdDoc ''
+          Label of the device.  Can be used instead of {var}`device`.
+        '';
+      };
+
+      size = mkOption {
+        default = null;
+        example = 2048;
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+          If this option is set, ‘device’ is interpreted as the
+          path of a swapfile that will be created automatically
+          with the indicated size (in megabytes).
+        '';
+      };
+
+      priority = mkOption {
+        default = null;
+        example = 2048;
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+          Specify the priority of the swap device. Priority is a value between 0 and 32767.
+          Higher numbers indicate higher priority.
+          null lets the kernel choose a priority, which will show up as a negative value.
+        '';
+      };
+
+      randomEncryption = mkOption {
+        default = false;
+        example = {
+          enable = true;
+          cipher = "serpent-xts-plain64";
+          source = "/dev/random";
+        };
+        type = types.coercedTo types.bool randomEncryptionCoerce (types.submodule randomEncryptionOpts);
+        description = lib.mdDoc ''
+          Encrypt swap device with a random key. This way you won't have a persistent swap device.
+
+          HINT: run "cryptsetup benchmark" to test cipher performance on your machine.
+
+          WARNING: Don't try to hibernate when you have at least one swap partition with
+          this option enabled! We have no way to set the partition into which hibernation image
+          is saved, so if your image ends up on an encrypted one you would lose it!
+
+          WARNING #2: Do not use /dev/disk/by-uuid/… or /dev/disk/by-label/… as your swap device
+          when using randomEncryption as the UUIDs and labels will get erased on every boot when
+          the partition is encrypted. Best to use /dev/disk/by-partuuid/…
+        '';
+      };
+
+      discardPolicy = mkOption {
+        default = null;
+        example = "once";
+        type = types.nullOr (types.enum ["once" "pages" "both" ]);
+        description = lib.mdDoc ''
+          Specify the discard policy for the swap device. If "once", then the
+          whole swap space is discarded at swapon invocation. If "pages",
+          asynchronous discard on freed pages is performed, before returning to
+          the available pages pool. With "both", both policies are activated.
+          See swapon(8) for more information.
+        '';
+      };
+
+      options = mkOption {
+        default = [ "defaults" ];
+        example = [ "nofail" ];
+        type = types.listOf types.nonEmptyStr;
+        description = lib.mdDoc ''
+          Options used to mount the swap.
+        '';
+      };
+
+      deviceName = mkOption {
+        type = types.str;
+        internal = true;
+      };
+
+      realDevice = mkOption {
+        type = types.path;
+        internal = true;
+      };
+
+    };
+
+    config = {
+      device = mkIf options.label.isDefined
+        "/dev/disk/by-label/${config.label}";
+      deviceName = lib.replaceStrings ["\\"] [""] (escapeSystemdPath config.device);
+      realDevice = if config.randomEncryption.enable then "/dev/mapper/${config.deviceName}" else config.device;
+    };
+
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    swapDevices = mkOption {
+      default = [];
+      example = [
+        { device = "/dev/hda7"; }
+        { device = "/var/swapfile"; }
+        { label = "bigswap"; }
+      ];
+      description = lib.mdDoc ''
+        The swap devices and swap files.  These must have been
+        initialised using {command}`mkswap`.  Each element
+        should be an attribute set specifying either the path of the
+        swap device or file (`device`) or the label
+        of the swap device (`label`, see
+        {command}`mkswap -L`).  Using a label is
+        recommended.
+      '';
+
+      type = types.listOf (types.submodule swapCfg);
+    };
+
+  };
+
+  config = mkIf ((length config.swapDevices) != 0) {
+    assertions = map (sw: {
+      assertion = sw.randomEncryption.enable -> builtins.match "/dev/disk/by-(uuid|label)/.*" sw.device == null;
+      message = ''
+        You cannot use swap device "${sw.device}" with randomEncryption enabled.
+        The UUIDs and labels will get erased on every boot when the partition is encrypted.
+        Use /dev/disk/by-partuuid/… instead.
+      '';
+    }) config.swapDevices;
+
+    warnings =
+      concatMap (sw:
+        if sw.size != null && hasPrefix "/dev/" sw.device
+        then [ "Setting the swap size of block device ${sw.device} has no effect" ]
+        else [ ])
+      config.swapDevices;
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "SWAP")
+    ];
+
+    # Create missing swapfiles.
+    systemd.services =
+      let
+        createSwapDevice = sw:
+          let realDevice' = escapeSystemdPath sw.realDevice;
+          in nameValuePair "mkswap-${sw.deviceName}"
+          { description = "Initialisation of swap device ${sw.device}";
+            # The mkswap service fails for file-backed swap devices if the
+            # loop module has not been loaded before the service runs.
+            # We add an ordering constraint to run after systemd-modules-load to
+            # avoid this race condition.
+            after = [ "systemd-modules-load.service" ];
+            wantedBy = [ "${realDevice'}.swap" ];
+            before = [ "${realDevice'}.swap" ];
+            path = [ pkgs.util-linux pkgs.e2fsprogs ]
+              ++ optional sw.randomEncryption.enable pkgs.cryptsetup;
+
+            environment.DEVICE = sw.device;
+
+            script =
+              ''
+                ${optionalString (sw.size != null) ''
+                  currentSize=$(( $(stat -c "%s" "$DEVICE" 2>/dev/null || echo 0) / 1024 / 1024 ))
+                  if [[ ! -b "$DEVICE" && "${toString sw.size}" != "$currentSize" ]]; then
+                    # Disable CoW for CoW based filesystems like BTRFS.
+                    truncate --size 0 "$DEVICE"
+                    chattr +C "$DEVICE" 2>/dev/null || true
+
+                    dd if=/dev/zero of="$DEVICE" bs=1M count=${toString sw.size}
+                    chmod 0600 ${sw.device}
+                    ${optionalString (!sw.randomEncryption.enable) "mkswap ${sw.realDevice}"}
+                  fi
+                ''}
+                ${optionalString sw.randomEncryption.enable ''
+                  cryptsetup plainOpen -c ${sw.randomEncryption.cipher} -d ${sw.randomEncryption.source} \
+                  ${concatStringsSep " \\\n" (flatten [
+                    (optional (sw.randomEncryption.sectorSize != null) "--sector-size=${toString sw.randomEncryption.sectorSize}")
+                    (optional (sw.randomEncryption.keySize != null) "--key-size=${toString sw.randomEncryption.keySize}")
+                    (optional sw.randomEncryption.allowDiscards "--allow-discards")
+                  ])} ${sw.device} ${sw.deviceName}
+                  mkswap ${sw.realDevice}
+                ''}
+              '';
+
+            unitConfig.RequiresMountsFor = [ "${dirOf sw.device}" ];
+            unitConfig.DefaultDependencies = false; # needed to prevent a cycle
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = sw.randomEncryption.enable;
+            serviceConfig.ExecStop = optionalString sw.randomEncryption.enable "${pkgs.cryptsetup}/bin/cryptsetup luksClose ${sw.deviceName}";
+            restartIfChanged = false;
+          };
+
+      in listToAttrs (map createSwapDevice (filter (sw: sw.size != null || sw.randomEncryption.enable) config.swapDevices));
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/sysctl.nix b/nixpkgs/nixos/modules/config/sysctl.nix
new file mode 100644
index 000000000000..0bc7ab9667f9
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/sysctl.nix
@@ -0,0 +1,79 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  sysctlOption = mkOptionType {
+    name = "sysctl option value";
+    check = val:
+      let
+        checkType = x: isBool x || isString x || isInt x || x == null;
+      in
+        checkType val || (val._type or "" == "override" && checkType val.content);
+    merge = loc: defs: mergeOneOption loc (filterOverrides defs);
+  };
+
+in
+
+{
+
+  options = {
+
+    boot.kernel.sysctl = mkOption {
+      type = types.submodule {
+        freeformType = types.attrsOf sysctlOption;
+        options."net.core.rmem_max" = mkOption {
+          type = types.nullOr types.ints.unsigned // {
+            merge = loc: defs:
+              foldl
+                (a: b: if b.value == null then null else lib.max a b.value)
+                0
+                (filterOverrides defs);
+          };
+          default = null;
+          description = lib.mdDoc "The maximum socket receive buffer size. In case of conflicting values, the highest will be used.";
+        };
+      };
+      default = {};
+      example = literalExpression ''
+        { "net.ipv4.tcp_syncookies" = false; "vm.swappiness" = 60; }
+      '';
+      description = lib.mdDoc ''
+        Runtime parameters of the Linux kernel, as set by
+        {manpage}`sysctl(8)`.  Note that sysctl
+        parameters names must be enclosed in quotes
+        (e.g. `"vm.swappiness"` instead of
+        `vm.swappiness`).  The value of each
+        parameter may be a string, integer, boolean, or null
+        (signifying the option will not appear at all).
+      '';
+
+    };
+
+  };
+
+  config = {
+
+    environment.etc."sysctl.d/60-nixos.conf".text =
+      concatStrings (mapAttrsToList (n: v:
+        optionalString (v != null) "${n}=${if v == false then "0" else toString v}\n"
+      ) config.boot.kernel.sysctl);
+
+    systemd.services.systemd-sysctl =
+      { wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ config.environment.etc."sysctl.d/60-nixos.conf".source ];
+      };
+
+    # Hide kernel pointers (e.g. in /proc/modules) for unprivileged
+    # users as these make it easier to exploit kernel vulnerabilities.
+    boot.kernel.sysctl."kernel.kptr_restrict" = mkDefault 1;
+
+    # Disable YAMA by default to allow easy debugging.
+    boot.kernel.sysctl."kernel.yama.ptrace_scope" = mkDefault 0;
+
+    # Improve compatibility with applications that allocate
+    # a lot of memory, like modern games
+    boot.kernel.sysctl."vm.max_map_count" = mkDefault 1048576;
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/system-environment.nix b/nixpkgs/nixos/modules/config/system-environment.nix
new file mode 100644
index 000000000000..399304185223
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/system-environment.nix
@@ -0,0 +1,100 @@
+# This module defines a system-wide environment that will be
+# initialised by pam_env (that is, not only in shells).
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.environment;
+
+in
+
+{
+
+  options = {
+
+    environment.sessionVariables = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        A set of environment variables used in the global environment.
+        These variables will be set by PAM early in the login process.
+
+        The value of each session variable can be either a string or a
+        list of strings. The latter is concatenated, interspersed with
+        colon characters.
+
+        Note, due to limitations in the PAM format values may not
+        contain the `"` character.
+
+        Also, these variables are merged into
+        [](#opt-environment.variables) and it is
+        therefore not possible to use PAM style variables such as
+        `@{HOME}`.
+      '';
+      inherit (options.environment.variables) type apply;
+    };
+
+    environment.profileRelativeSessionVariables = mkOption {
+      type = types.attrsOf (types.listOf types.str);
+      example = { PATH = [ "/bin" ]; MANPATH = [ "/man" "/share/man" ]; };
+      description = lib.mdDoc ''
+        Attribute set of environment variable used in the global
+        environment. These variables will be set by PAM early in the
+        login process.
+
+        Variable substitution is available as described in
+        {manpage}`pam_env.conf(5)`.
+
+        Each attribute maps to a list of relative paths. Each relative
+        path is appended to the each profile of
+        {option}`environment.profiles` to form the content of
+        the corresponding environment variable.
+
+        Also, these variables are merged into
+        [](#opt-environment.profileRelativeEnvVars) and it is
+        therefore not possible to use PAM style variables such as
+        `@{HOME}`.
+      '';
+    };
+
+  };
+
+  config = {
+    environment.etc."pam/environment".text = let
+      suffixedVariables =
+        flip mapAttrs cfg.profileRelativeSessionVariables (envVar: suffixes:
+          flip concatMap cfg.profiles (profile:
+            map (suffix: "${profile}${suffix}") suffixes
+          )
+        );
+
+      # We're trying to use the same syntax for PAM variables and env variables.
+      # That means we need to map the env variables that people might use to their
+      # equivalent PAM variable.
+      replaceEnvVars = replaceStrings ["$HOME" "$USER"] ["@{HOME}" "@{PAM_USER}"];
+
+      pamVariable = n: v:
+        ''${n}   DEFAULT="${concatStringsSep ":" (map replaceEnvVars (toList v))}"'';
+
+      pamVariables =
+        concatStringsSep "\n"
+        (mapAttrsToList pamVariable
+        (zipAttrsWith (n: concatLists)
+          [
+            # Make sure security wrappers are prioritized without polluting
+            # shell environments with an extra entry. Sessions which depend on
+            # pam for its environment will otherwise have eg. broken sudo. In
+            # particular Gnome Shell sometimes fails to source a proper
+            # environment from a shell.
+            { PATH = [ config.security.wrapperDir ]; }
+
+            (mapAttrs (n: toList) cfg.sessionVariables)
+            suffixedVariables
+          ]));
+    in ''
+      ${pamVariables}
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/system-path.nix b/nixpkgs/nixos/modules/config/system-path.nix
new file mode 100644
index 000000000000..71274ea8999f
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/system-path.nix
@@ -0,0 +1,189 @@
+# This module defines the packages that appear in
+# /run/current-system/sw.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  requiredPackages = map (pkg: setPrio ((pkg.meta.priority or 5) + 3) pkg)
+    [ pkgs.acl
+      pkgs.attr
+      pkgs.bashInteractive # bash with ncurses support
+      pkgs.bzip2
+      pkgs.coreutils-full
+      pkgs.cpio
+      pkgs.curl
+      pkgs.diffutils
+      pkgs.findutils
+      pkgs.gawk
+      pkgs.stdenv.cc.libc
+      pkgs.getent
+      pkgs.getconf
+      pkgs.gnugrep
+      pkgs.gnupatch
+      pkgs.gnused
+      pkgs.gnutar
+      pkgs.gzip
+      pkgs.xz
+      pkgs.less
+      pkgs.libcap
+      pkgs.ncurses
+      pkgs.netcat
+      config.programs.ssh.package
+      pkgs.mkpasswd
+      pkgs.procps
+      pkgs.su
+      pkgs.time
+      pkgs.util-linux
+      pkgs.which
+      pkgs.zstd
+    ];
+
+  defaultPackageNames =
+    [ "perl"
+      "rsync"
+      "strace"
+    ];
+  defaultPackages =
+    map
+      (n: let pkg = pkgs.${n}; in setPrio ((pkg.meta.priority or 5) + 3) pkg)
+      defaultPackageNames;
+  defaultPackagesText = "[ ${concatMapStringsSep " " (n: "pkgs.${n}") defaultPackageNames } ]";
+
+in
+
+{
+  options = {
+
+    environment = {
+
+      systemPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.firefox pkgs.thunderbird ]";
+        description = lib.mdDoc ''
+          The set of packages that appear in
+          /run/current-system/sw.  These packages are
+          automatically available to all users, and are
+          automatically updated every time you rebuild the system
+          configuration.  (The latter is the main difference with
+          installing them in the default profile,
+          {file}`/nix/var/nix/profiles/default`.
+        '';
+      };
+
+      defaultPackages = mkOption {
+        type = types.listOf types.package;
+        default = defaultPackages;
+        defaultText = literalMD ''
+          these packages, with their `meta.priority` numerically increased
+          (thus lowering their installation priority):
+
+              ${defaultPackagesText}
+        '';
+        example = [];
+        description = lib.mdDoc ''
+          Set of default packages that aren't strictly necessary
+          for a running system, entries can be removed for a more
+          minimal NixOS installation.
+
+          Like with systemPackages, packages are installed to
+          {file}`/run/current-system/sw`. They are
+          automatically available to all users, and are
+          automatically updated every time you rebuild the system
+          configuration.
+        '';
+      };
+
+      pathsToLink = mkOption {
+        type = types.listOf types.str;
+        # Note: We need `/lib' to be among `pathsToLink' for NSS modules
+        # to work.
+        default = [];
+        example = ["/"];
+        description = lib.mdDoc "List of directories to be symlinked in {file}`/run/current-system/sw`.";
+      };
+
+      extraOutputsToInstall = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "dev" "info" ];
+        description = lib.mdDoc ''
+          Entries listed here will be appended to the `meta.outputsToInstall` attribute for each package in `environment.systemPackages`, and the files from the corresponding derivation outputs symlinked into {file}`/run/current-system/sw`.
+
+          For example, this can be used to install the `dev` and `info` outputs for all packages in the system environment, if they are available.
+
+          To use specific outputs instead of configuring them globally, select the corresponding attribute on the package derivation, e.g. `libxml2.dev` or `coreutils.info`.
+        '';
+      };
+
+      extraSetup = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Shell fragments to be run after the system environment has been created. This should only be used for things that need to modify the internals of the environment, e.g. generating MIME caches. The environment being built can be accessed at $out.";
+      };
+
+    };
+
+    system = {
+
+      path = mkOption {
+        internal = true;
+        description = lib.mdDoc ''
+          The packages you want in the boot environment.
+        '';
+      };
+
+    };
+
+  };
+
+  config = {
+
+    environment.systemPackages = requiredPackages ++ config.environment.defaultPackages;
+
+    environment.pathsToLink =
+      [ "/bin"
+        "/etc/xdg"
+        "/etc/gtk-2.0"
+        "/etc/gtk-3.0"
+        "/lib" # FIXME: remove and update debug-info.nix
+        "/sbin"
+        "/share/emacs"
+        "/share/hunspell"
+        "/share/nano"
+        "/share/org"
+        "/share/themes"
+        "/share/vim-plugins"
+        "/share/vulkan"
+        "/share/kservices5"
+        "/share/kservicetypes5"
+        "/share/kxmlgui5"
+        "/share/systemd"
+        "/share/thumbnailers"
+      ];
+
+    system.path = pkgs.buildEnv {
+      name = "system-path";
+      paths = config.environment.systemPackages;
+      inherit (config.environment) pathsToLink extraOutputsToInstall;
+      ignoreCollisions = true;
+      # !!! Hacky, should modularise.
+      # outputs TODO: note that the tools will often not be linked by default
+      postBuild =
+        ''
+          # Remove wrapped binaries, they shouldn't be accessible via PATH.
+          find $out/bin -maxdepth 1 -name ".*-wrapped" -type l -delete
+
+          if [ -x $out/bin/glib-compile-schemas -a -w $out/share/glib-2.0/schemas ]; then
+              $out/bin/glib-compile-schemas $out/share/glib-2.0/schemas
+          fi
+
+          ${config.environment.extraSetup}
+        '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/terminfo.nix b/nixpkgs/nixos/modules/config/terminfo.nix
new file mode 100644
index 000000000000..ebd1aaea8f04
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/terminfo.nix
@@ -0,0 +1,76 @@
+# This module manages the terminfo database
+# and its integration in the system.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  options = with lib; {
+    environment.enableAllTerminfo = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to install all terminfo outputs
+      '';
+    };
+
+    security.sudo.keepTerminfo = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to preserve the `TERMINFO` and `TERMINFO_DIRS`
+        environment variables, for `root` and the `wheel` group.
+      '';
+    };
+  };
+
+  config = {
+
+    # can be generated with:
+    # attrNames (filterAttrs
+    #  (_: drv: (builtins.tryEval (isDerivation drv && drv ? terminfo)).value)
+    #  pkgs)
+    environment.systemPackages = mkIf config.environment.enableAllTerminfo (map (x: x.terminfo) (with pkgs; [
+      alacritty
+      contour
+      foot
+      kitty
+      mtm
+      rio
+      rxvt-unicode-unwrapped
+      rxvt-unicode-unwrapped-emoji
+      st
+      termite
+      tmux
+      wezterm
+      yaft
+    ]));
+
+    environment.pathsToLink = [
+      "/share/terminfo"
+    ];
+
+    environment.etc.terminfo = {
+      source = "${config.system.path}/share/terminfo";
+    };
+
+    environment.profileRelativeSessionVariables = {
+      TERMINFO_DIRS = [ "/share/terminfo" ];
+    };
+
+    environment.extraInit = ''
+
+      # reset TERM with new TERMINFO available (if any)
+      export TERM=$TERM
+    '';
+
+    security.sudo.extraConfig = mkIf config.security.sudo.keepTerminfo ''
+
+      # Keep terminfo database for root and %wheel.
+      Defaults:root,%wheel env_keep+=TERMINFO_DIRS
+      Defaults:root,%wheel env_keep+=TERMINFO
+    '';
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/unix-odbc-drivers.nix b/nixpkgs/nixos/modules/config/unix-odbc-drivers.nix
new file mode 100644
index 000000000000..7bd3fa1600b0
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/unix-odbc-drivers.nix
@@ -0,0 +1,38 @@
+{ config, lib, ... }:
+
+with lib;
+
+# unixODBC drivers (this solution is not perfect.. Because the user has to
+# ask the admin to add a driver.. but it's simple and works
+
+let
+  iniDescription = pkg: ''
+    [${pkg.fancyName}]
+    Description = ${pkg.meta.description}
+    Driver = ${pkg}/${pkg.driver}
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    environment.unixODBCDrivers = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "with pkgs.unixODBCDrivers; [ sqlite psql ]";
+      description = lib.mdDoc ''
+        Specifies Unix ODBC drivers to be registered in
+        {file}`/etc/odbcinst.ini`.  You may also want to
+        add `pkgs.unixODBC` to the system path to get
+        a command line client to connect to ODBC databases.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (config.environment.unixODBCDrivers != []) {
+    environment.etc."odbcinst.ini".text = concatMapStringsSep "\n" iniDescription config.environment.unixODBCDrivers;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/update-users-groups.pl b/nixpkgs/nixos/modules/config/update-users-groups.pl
new file mode 100644
index 000000000000..7aee58e697de
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/update-users-groups.pl
@@ -0,0 +1,381 @@
+use strict;
+use warnings;
+use File::Basename;
+use File::Path qw(make_path);
+use File::Slurp;
+use Getopt::Long;
+use JSON;
+use Time::Piece;
+
+# Keep track of deleted uids and gids.
+my $uidMapFile = "/var/lib/nixos/uid-map";
+my $uidMap = -e $uidMapFile ? decode_json(read_file($uidMapFile)) : {};
+
+my $gidMapFile = "/var/lib/nixos/gid-map";
+my $gidMap = -e $gidMapFile ? decode_json(read_file($gidMapFile)) : {};
+
+my $is_dry = ($ENV{'NIXOS_ACTION'} // "") eq "dry-activate";
+GetOptions("dry-activate" => \$is_dry);
+make_path("/var/lib/nixos", { mode => 0755 }) unless $is_dry;
+
+sub updateFile {
+    my ($path, $contents, $perms) = @_;
+    return if $is_dry;
+    write_file($path, { atomic => 1, binmode => ':utf8', perms => $perms // 0644 }, $contents) or die;
+}
+
+# Converts an ISO date to number of days since 1970-01-01
+sub dateToDays {
+    my ($date) = @_;
+    my $time = Time::Piece->strptime($date, "%Y-%m-%d");
+    return $time->epoch / 60 / 60 / 24;
+}
+
+sub nscdInvalidate {
+    system("nscd", "--invalidate", $_[0]) unless $is_dry;
+}
+
+sub hashPassword {
+    my ($password) = @_;
+    my $salt = "";
+    my @chars = ('.', '/', 0..9, 'A'..'Z', 'a'..'z');
+    $salt .= $chars[rand 64] for (1..8);
+    return crypt($password, '$6$' . $salt . '$');
+}
+
+sub dry_print {
+    if ($is_dry) {
+        print STDERR ("$_[1] $_[2]\n")
+    } else {
+        print STDERR ("$_[0] $_[2]\n")
+    }
+}
+
+
+# Functions for allocating free GIDs/UIDs. FIXME: respect ID ranges in
+# /etc/login.defs.
+sub allocId {
+    my ($used, $prevUsed, $idMin, $idMax, $up, $getid) = @_;
+    my $id = $up ? $idMin : $idMax;
+    while ($id >= $idMin && $id <= $idMax) {
+        if (!$used->{$id} && !$prevUsed->{$id} && !defined &$getid($id)) {
+            $used->{$id} = 1;
+            return $id;
+        }
+        $used->{$id} = 1;
+        if ($up) { $id++; } else { $id--; }
+    }
+    die "$0: out of free UIDs or GIDs\n";
+}
+
+my (%gidsUsed, %uidsUsed, %gidsPrevUsed, %uidsPrevUsed);
+
+sub allocGid {
+    my ($name) = @_;
+    my $prevGid = $gidMap->{$name};
+    if (defined $prevGid && !defined $gidsUsed{$prevGid}) {
+        dry_print("reviving", "would revive", "group '$name' with GID $prevGid");
+        $gidsUsed{$prevGid} = 1;
+        return $prevGid;
+    }
+    return allocId(\%gidsUsed, \%gidsPrevUsed, 400, 999, 0, sub { my ($gid) = @_; getgrgid($gid) });
+}
+
+sub allocUid {
+    my ($name, $isSystemUser) = @_;
+    my ($min, $max, $up) = $isSystemUser ? (400, 999, 0) : (1000, 29999, 1);
+    my $prevUid = $uidMap->{$name};
+    if (defined $prevUid && $prevUid >= $min && $prevUid <= $max && !defined $uidsUsed{$prevUid}) {
+        dry_print("reviving", "would revive", "user '$name' with UID $prevUid");
+        $uidsUsed{$prevUid} = 1;
+        return $prevUid;
+    }
+    return allocId(\%uidsUsed, \%uidsPrevUsed, $min, $max, $up, sub { my ($uid) = @_; getpwuid($uid) });
+}
+
+# Read the declared users/groups
+my $spec = decode_json(read_file($ARGV[0]));
+
+# Don't allocate UIDs/GIDs that are manually assigned.
+foreach my $g (@{$spec->{groups}}) {
+    $gidsUsed{$g->{gid}} = 1 if defined $g->{gid};
+}
+
+foreach my $u (@{$spec->{users}}) {
+    $uidsUsed{$u->{uid}} = 1 if defined $u->{uid};
+}
+
+# Likewise for previously used but deleted UIDs/GIDs.
+$uidsPrevUsed{$_} = 1 foreach values %{$uidMap};
+$gidsPrevUsed{$_} = 1 foreach values %{$gidMap};
+
+
+# Read the current /etc/group.
+sub parseGroup {
+    chomp;
+    my @f = split(':', $_, -4);
+    my $gid = $f[2] eq "" ? undef : int($f[2]);
+    $gidsUsed{$gid} = 1 if defined $gid;
+    return ($f[0], { name => $f[0], password => $f[1], gid => $gid, members => $f[3] });
+}
+
+my %groupsCur = -f "/etc/group" ? map { parseGroup } read_file("/etc/group", { binmode => ":utf8" }) : ();
+
+# Read the current /etc/passwd.
+sub parseUser {
+    chomp;
+    my @f = split(':', $_, -7);
+    my $uid = $f[2] eq "" ? undef : int($f[2]);
+    $uidsUsed{$uid} = 1 if defined $uid;
+    return ($f[0], { name => $f[0], fakePassword => $f[1], uid => $uid,
+        gid => $f[3], description => $f[4], home => $f[5], shell => $f[6] });
+}
+my %usersCur = -f "/etc/passwd" ? map { parseUser } read_file("/etc/passwd", { binmode => ":utf8" }) : ();
+
+# Read the groups that were created declaratively (i.e. not by groups)
+# in the past. These must be removed if they are no longer in the
+# current spec.
+my $declGroupsFile = "/var/lib/nixos/declarative-groups";
+my %declGroups;
+$declGroups{$_} = 1 foreach split / /, -e $declGroupsFile ? read_file($declGroupsFile, { binmode => ":utf8" }) : "";
+
+# Idem for the users.
+my $declUsersFile = "/var/lib/nixos/declarative-users";
+my %declUsers;
+$declUsers{$_} = 1 foreach split / /, -e $declUsersFile ? read_file($declUsersFile, { binmode => ":utf8" }) : "";
+
+
+# Generate a new /etc/group containing the declared groups.
+my %groupsOut;
+foreach my $g (@{$spec->{groups}}) {
+    my $name = $g->{name};
+    my $existing = $groupsCur{$name};
+
+    my %members = map { ($_, 1) } @{$g->{members}};
+
+    if (defined $existing) {
+        $g->{gid} = $existing->{gid} if !defined $g->{gid};
+        if ($g->{gid} != $existing->{gid}) {
+            dry_print("warning: not applying", "warning: would not apply", "GID change of group ‘$name’ ($existing->{gid} -> $g->{gid}) in /etc/group");
+            $g->{gid} = $existing->{gid};
+        }
+        $g->{password} = $existing->{password}; # do we want this?
+        if ($spec->{mutableUsers}) {
+            # Merge in non-declarative group members.
+            foreach my $uname (split /,/, $existing->{members} // "") {
+                $members{$uname} = 1 if !defined $declUsers{$uname};
+            }
+        }
+    } else {
+        $g->{gid} = allocGid($name) if !defined $g->{gid};
+        $g->{password} = "x";
+    }
+
+    $g->{members} = join ",", sort(keys(%members));
+    $groupsOut{$name} = $g;
+
+    $gidMap->{$name} = $g->{gid};
+}
+
+# Update the persistent list of declarative groups.
+updateFile($declGroupsFile, join(" ", sort(keys %groupsOut)));
+
+# Merge in the existing /etc/group.
+foreach my $name (keys %groupsCur) {
+    my $g = $groupsCur{$name};
+    next if defined $groupsOut{$name};
+    if (!$spec->{mutableUsers} || defined $declGroups{$name}) {
+        dry_print("removing group", "would remove group", "‘$name’");
+    } else {
+        $groupsOut{$name} = $g;
+    }
+}
+
+
+# Rewrite /etc/group. FIXME: acquire lock.
+my @lines = map { join(":", $_->{name}, $_->{password}, $_->{gid}, $_->{members}) . "\n" }
+    (sort { $a->{gid} <=> $b->{gid} } values(%groupsOut));
+updateFile($gidMapFile, to_json($gidMap, {canonical => 1}));
+updateFile("/etc/group", \@lines);
+nscdInvalidate("group");
+
+# Generate a new /etc/passwd containing the declared users.
+my %usersOut;
+foreach my $u (@{$spec->{users}}) {
+    my $name = $u->{name};
+
+    # Resolve the gid of the user.
+    if ($u->{group} =~ /^[0-9]$/) {
+        $u->{gid} = $u->{group};
+    } elsif (defined $groupsOut{$u->{group}}) {
+        $u->{gid} = $groupsOut{$u->{group}}->{gid} // die;
+    } else {
+        warn "warning: user ‘$name’ has unknown group ‘$u->{group}’\n";
+        $u->{gid} = 65534;
+    }
+
+    my $existing = $usersCur{$name};
+    if (defined $existing) {
+        $u->{uid} = $existing->{uid} if !defined $u->{uid};
+        if ($u->{uid} != $existing->{uid}) {
+            dry_print("warning: not applying", "warning: would not apply", "UID change of user ‘$name’ ($existing->{uid} -> $u->{uid}) in /etc/passwd");
+            $u->{uid} = $existing->{uid};
+        }
+    } else {
+        $u->{uid} = allocUid($name, $u->{isSystemUser}) if !defined $u->{uid};
+
+        if (!defined $u->{hashedPassword}) {
+            if (defined $u->{initialPassword}) {
+                $u->{hashedPassword} = hashPassword($u->{initialPassword});
+            } elsif (defined $u->{initialHashedPassword}) {
+                $u->{hashedPassword} = $u->{initialHashedPassword};
+            }
+        }
+    }
+
+    # Ensure home directory incl. ownership and permissions.
+    if ($u->{createHome} and !$is_dry) {
+        make_path(dirname($u->{home}), { mode => 0755 });
+        mkdir $u->{home}, oct($u->{homeMode}) if ! -e $u->{home};
+        chown $u->{uid}, $u->{gid}, $u->{home};
+        chmod oct($u->{homeMode}), $u->{home};
+    }
+
+    if (defined $u->{hashedPasswordFile}) {
+        if (-e $u->{hashedPasswordFile}) {
+            $u->{hashedPassword} = read_file($u->{hashedPasswordFile});
+            chomp $u->{hashedPassword};
+        } else {
+            warn "warning: password file ‘$u->{hashedPasswordFile}’ does not exist\n";
+        }
+    } elsif (defined $u->{password}) {
+        $u->{hashedPassword} = hashPassword($u->{password});
+    }
+
+    if (!defined $u->{shell}) {
+        if (defined $existing) {
+            $u->{shell} = $existing->{shell};
+        } else {
+            warn "warning: no declarative or previous shell for ‘$name’, setting shell to nologin\n";
+            $u->{shell} = "/run/current-system/sw/bin/nologin";
+        }
+    }
+
+    $u->{fakePassword} = $existing->{fakePassword} // "x";
+    $usersOut{$name} = $u;
+
+    $uidMap->{$name} = $u->{uid};
+}
+
+# Update the persistent list of declarative users.
+updateFile($declUsersFile, join(" ", sort(keys %usersOut)));
+
+# Merge in the existing /etc/passwd.
+foreach my $name (keys %usersCur) {
+    my $u = $usersCur{$name};
+    next if defined $usersOut{$name};
+    if (!$spec->{mutableUsers} || defined $declUsers{$name}) {
+        dry_print("removing user", "would remove user", "‘$name’");
+    } else {
+        $usersOut{$name} = $u;
+    }
+}
+
+# Rewrite /etc/passwd. FIXME: acquire lock.
+@lines = map { join(":", $_->{name}, $_->{fakePassword}, $_->{uid}, $_->{gid}, $_->{description}, $_->{home}, $_->{shell}) . "\n" }
+    (sort { $a->{uid} <=> $b->{uid} } (values %usersOut));
+updateFile($uidMapFile, to_json($uidMap, {canonical => 1}));
+updateFile("/etc/passwd", \@lines);
+nscdInvalidate("passwd");
+
+
+# Rewrite /etc/shadow to add new accounts or remove dead ones.
+my @shadowNew;
+my %shadowSeen;
+
+foreach my $line (-f "/etc/shadow" ? read_file("/etc/shadow", { binmode => ":utf8" }) : ()) {
+    chomp $line;
+    # struct name copied from `man 3 shadow`
+    my ($sp_namp, $sp_pwdp, $sp_lstch, $sp_min, $sp_max, $sp_warn, $sp_inact, $sp_expire, $sp_flag) = split(':', $line, -9);
+    my $u = $usersOut{$sp_namp};;
+    next if !defined $u;
+    $sp_pwdp = "!" if !$spec->{mutableUsers};
+    $sp_pwdp = $u->{hashedPassword} if defined $u->{hashedPassword} && !$spec->{mutableUsers}; # FIXME
+    $sp_expire = dateToDays($u->{expires}) if defined $u->{expires};
+    chomp $sp_pwdp;
+    push @shadowNew, join(":", $sp_namp, $sp_pwdp, $sp_lstch, $sp_min, $sp_max, $sp_warn, $sp_inact, $sp_expire, $sp_flag) . "\n";
+    $shadowSeen{$sp_namp} = 1;
+}
+
+foreach my $u (values %usersOut) {
+    next if defined $shadowSeen{$u->{name}};
+    my $hashedPassword = "!";
+    $hashedPassword = $u->{hashedPassword} if defined $u->{hashedPassword};
+    my $expires = "";
+    $expires = dateToDays($u->{expires}) if defined $u->{expires};
+    # FIXME: set correct value for sp_lstchg.
+    push @shadowNew, join(":", $u->{name}, $hashedPassword, "1::::", $expires, "") . "\n";
+}
+
+updateFile("/etc/shadow", \@shadowNew, 0640);
+{
+    my $uid = getpwnam "root";
+    my $gid = getgrnam "shadow";
+    my $path = "/etc/shadow";
+    (chown($uid, $gid, $path) || die "Failed to change ownership of $path: $!") unless $is_dry;
+}
+
+# Rewrite /etc/subuid & /etc/subgid to include default container mappings
+
+my $subUidMapFile = "/var/lib/nixos/auto-subuid-map";
+my $subUidMap = -e $subUidMapFile ? decode_json(read_file($subUidMapFile)) : {};
+
+my (%subUidsUsed, %subUidsPrevUsed);
+
+$subUidsPrevUsed{$_} = 1 foreach values %{$subUidMap};
+
+sub allocSubUid {
+    my ($name, @rest) = @_;
+
+    # TODO: No upper bounds?
+    my ($min, $max, $up) = (100000, 100000 * 100, 1);
+    my $prevId = $subUidMap->{$name};
+    if (defined $prevId && !defined $subUidsUsed{$prevId}) {
+        $subUidsUsed{$prevId} = 1;
+        return $prevId;
+    }
+
+    my $id = allocId(\%subUidsUsed, \%subUidsPrevUsed, $min, $max, $up, sub { my ($uid) = @_; getpwuid($uid) });
+    my $offset = $id - 100000;
+    my $count = $offset * 65536;
+    my $subordinate = 100000 + $count;
+    return $subordinate;
+}
+
+my @subGids;
+my @subUids;
+foreach my $u (values %usersOut) {
+    my $name = $u->{name};
+
+    foreach my $range (@{$u->{subUidRanges}}) {
+        my $value = join(":", ($name, $range->{startUid}, $range->{count}));
+        push @subUids, $value;
+    }
+
+    foreach my $range (@{$u->{subGidRanges}}) {
+        my $value = join(":", ($name, $range->{startGid}, $range->{count}));
+        push @subGids, $value;
+    }
+
+    if($u->{autoSubUidGidRange}) {
+        my $subordinate = allocSubUid($name);
+        $subUidMap->{$name} = $subordinate;
+        my $value = join(":", ($name, $subordinate, 65536));
+        push @subUids, $value;
+        push @subGids, $value;
+    }
+}
+
+updateFile("/etc/subuid", join("\n", @subUids) . "\n");
+updateFile("/etc/subgid", join("\n", @subGids) . "\n");
+updateFile($subUidMapFile, encode_json($subUidMap) . "\n");
diff --git a/nixpkgs/nixos/modules/config/users-groups.nix b/nixpkgs/nixos/modules/config/users-groups.nix
new file mode 100644
index 000000000000..39aac9fb821b
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/users-groups.nix
@@ -0,0 +1,935 @@
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+  ids = config.ids;
+  cfg = config.users;
+
+  # Check whether a password hash will allow login.
+  allowsLogin = hash:
+    hash == "" # login without password
+    || !(lib.elem hash
+      [ null   # password login disabled
+        "!"    # password login disabled
+        "!!"   # a variant of "!"
+        "*"    # password unset
+      ]);
+
+  passwordDescription = ''
+    The options {option}`hashedPassword`,
+    {option}`password` and {option}`hashedPasswordFile`
+    controls what password is set for the user.
+    {option}`hashedPassword` overrides both
+    {option}`password` and {option}`hashedPasswordFile`.
+    {option}`password` overrides {option}`hashedPasswordFile`.
+    If none of these three options are set, no password is assigned to
+    the user, and the user will not be able to do password logins.
+    If the option {option}`users.mutableUsers` is true, the
+    password defined in one of the three options will only be set when
+    the user is created for the first time. After that, you are free to
+    change the password with the ordinary user management commands. If
+    {option}`users.mutableUsers` is false, you cannot change
+    user passwords, they will always be set according to the password
+    options.
+  '';
+
+  hashedPasswordDescription = ''
+    To generate a hashed password run `mkpasswd`.
+
+    If set to an empty string (`""`), this user will
+    be able to log in without being asked for a password (but not via remote
+    services such as SSH, or indirectly via {command}`su` or
+    {command}`sudo`). This should only be used for e.g. bootable
+    live systems. Note: this is different from setting an empty password,
+    which can be achieved using {option}`users.users.<name?>.password`.
+
+    If set to `null` (default) this user will not
+    be able to log in using a password (i.e. via {command}`login`
+    command).
+  '';
+
+  userOpts = { name, config, ... }: {
+
+    options = {
+
+      name = mkOption {
+        type = types.passwdEntry types.str;
+        apply = x: assert (builtins.stringLength x < 32 || abort "Username '${x}' is longer than 31 characters which is not allowed!"); x;
+        description = lib.mdDoc ''
+          The name of the user account. If undefined, the name of the
+          attribute set will be used.
+        '';
+      };
+
+      description = mkOption {
+        type = types.passwdEntry types.str;
+        default = "";
+        example = "Alice Q. User";
+        description = lib.mdDoc ''
+          A short description of the user account, typically the
+          user's full name.  This is actually the “GECOS†or “commentâ€
+          field in {file}`/etc/passwd`.
+        '';
+      };
+
+      uid = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+          The account UID. If the UID is null, a free UID is picked on
+          activation.
+        '';
+      };
+
+      isSystemUser = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Indicates if the user is a system user or not. This option
+          only has an effect if {option}`uid` is
+          {option}`null`, in which case it determines whether
+          the user's UID is allocated in the range for system users
+          (below 1000) or in the range for normal users (starting at
+          1000).
+          Exactly one of `isNormalUser` and
+          `isSystemUser` must be true.
+        '';
+      };
+
+      isNormalUser = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Indicates whether this is an account for a “real†user.
+          This automatically sets {option}`group` to `users`,
+          {option}`createHome` to `true`,
+          {option}`home` to {file}`/home/«username»`,
+          {option}`useDefaultShell` to `true`,
+          and {option}`isSystemUser` to `false`.
+          Exactly one of `isNormalUser` and `isSystemUser` must be true.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        apply = x: assert (builtins.stringLength x < 32 || abort "Group name '${x}' is longer than 31 characters which is not allowed!"); x;
+        default = "";
+        description = lib.mdDoc "The user's primary group.";
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "The user's auxiliary groups.";
+      };
+
+      home = mkOption {
+        type = types.passwdEntry types.path;
+        default = "/var/empty";
+        description = lib.mdDoc "The user's home directory.";
+      };
+
+      homeMode = mkOption {
+        type = types.strMatching "[0-7]{1,5}";
+        default = "700";
+        description = lib.mdDoc "The user's home directory mode in numeric format. See chmod(1). The mode is only applied if {option}`users.users.<name>.createHome` is true.";
+      };
+
+      cryptHomeLuks = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to encrypted luks device that contains
+          the user's home directory.
+        '';
+      };
+
+      pamMount = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = lib.mdDoc ''
+          Attributes for user's entry in
+          {file}`pam_mount.conf.xml`.
+          Useful attributes might include `path`,
+          `options`, `fstype`, and `server`.
+          See <https://pam-mount.sourceforge.net/pam_mount.conf.5.html>
+          for more information.
+        '';
+      };
+
+      shell = mkOption {
+        type = types.nullOr (types.either types.shellPackage (types.passwdEntry types.path));
+        default = pkgs.shadow;
+        defaultText = literalExpression "pkgs.shadow";
+        example = literalExpression "pkgs.bashInteractive";
+        description = lib.mdDoc ''
+          The path to the user's shell. Can use shell derivations,
+          like `pkgs.bashInteractive`. Don’t
+          forget to enable your shell in
+          `programs` if necessary,
+          like `programs.zsh.enable = true;`.
+        '';
+      };
+
+      ignoreShellProgramCheck = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          By default, nixos will check that programs.SHELL.enable is set to
+          true if the user has a custom shell specified. If that behavior isn't
+          required and there are custom overrides in place to make sure that the
+          shell is functional, set this to true.
+        '';
+      };
+
+      subUidRanges = mkOption {
+        type = with types; listOf (submodule subordinateUidRange);
+        default = [];
+        example = [
+          { startUid = 1000; count = 1; }
+          { startUid = 100001; count = 65534; }
+        ];
+        description = lib.mdDoc ''
+          Subordinate user ids that user is allowed to use.
+          They are set into {file}`/etc/subuid` and are used
+          by `newuidmap` for user namespaces.
+        '';
+      };
+
+      subGidRanges = mkOption {
+        type = with types; listOf (submodule subordinateGidRange);
+        default = [];
+        example = [
+          { startGid = 100; count = 1; }
+          { startGid = 1001; count = 999; }
+        ];
+        description = lib.mdDoc ''
+          Subordinate group ids that user is allowed to use.
+          They are set into {file}`/etc/subgid` and are used
+          by `newgidmap` for user namespaces.
+        '';
+      };
+
+      autoSubUidGidRange = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Automatically allocate subordinate user and group ids for this user.
+          Allocated range is currently always of size 65536.
+        '';
+      };
+
+      createHome = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to create the home directory and ensure ownership as well as
+          permissions to match the user.
+        '';
+      };
+
+      useDefaultShell = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If true, the user's shell will be set to
+          {option}`users.defaultUserShell`.
+        '';
+      };
+
+      hashedPassword = mkOption {
+        type = with types; nullOr (passwdEntry str);
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the hashed password for the user.
+          ${passwordDescription}
+          ${hashedPasswordDescription}
+        '';
+      };
+
+      password = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the (clear text) password for the user.
+          Warning: do not set confidential information here
+          because it is world-readable in the Nix store. This option
+          should only be used for public accounts.
+          ${passwordDescription}
+        '';
+      };
+
+      hashedPasswordFile = mkOption {
+        type = with types; nullOr str;
+        default = cfg.users.${name}.passwordFile;
+        defaultText = literalExpression "null";
+        description = lib.mdDoc ''
+          The full path to a file that contains the hash of the user's
+          password. The password file is read on each system activation. The
+          file should contain exactly one line, which should be the password in
+          an encrypted form that is suitable for the `chpasswd -e` command.
+          ${passwordDescription}
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        visible = false;
+        description = lib.mdDoc "Deprecated alias of hashedPasswordFile";
+      };
+
+      initialHashedPassword = mkOption {
+        type = with types; nullOr (passwdEntry str);
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the initial hashed password for the user, i.e. the
+          hashed password assigned if the user does not already
+          exist. If {option}`users.mutableUsers` is true, the
+          password can be changed subsequently using the
+          {command}`passwd` command. Otherwise, it's
+          equivalent to setting the {option}`hashedPassword` option.
+
+          Note that the {option}`hashedPassword` option will override
+          this option if both are set.
+
+          ${hashedPasswordDescription}
+        '';
+      };
+
+      initialPassword = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the initial password for the user, i.e. the
+          password assigned if the user does not already exist. If
+          {option}`users.mutableUsers` is true, the password
+          can be changed subsequently using the
+          {command}`passwd` command. Otherwise, it's
+          equivalent to setting the {option}`password`
+          option. The same caveat applies: the password specified here
+          is world-readable in the Nix store, so it should only be
+          used for guest accounts or passwords that will be changed
+          promptly.
+
+          Note that the {option}`password` option will override this
+          option if both are set.
+        '';
+      };
+
+      packages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.firefox pkgs.thunderbird ]";
+        description = lib.mdDoc ''
+          The set of packages that should be made available to the user.
+          This is in contrast to {option}`environment.systemPackages`,
+          which adds packages to all users.
+        '';
+      };
+
+      expires = mkOption {
+        type = types.nullOr (types.strMatching "[[:digit:]]{4}-[[:digit:]]{2}-[[:digit:]]{2}");
+        default = null;
+        description = lib.mdDoc ''
+          Set the date on which the user's account will no longer be
+          accessible. The date is expressed in the format YYYY-MM-DD, or null
+          to disable the expiry.
+          A user whose account is locked must contact the system
+          administrator before being able to use the system again.
+        '';
+      };
+
+      linger = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable lingering for this user. If true, systemd user
+          units will start at boot, rather than starting at login and stopping
+          at logout. This is the declarative equivalent of running
+          `loginctl enable-linger` for this user.
+
+          If false, user units will not be started until the user logs in, and
+          may be stopped on logout depending on the settings in `logind.conf`.
+        '';
+      };
+    };
+
+    config = mkMerge
+      [ { name = mkDefault name;
+          shell = mkIf config.useDefaultShell (mkDefault cfg.defaultUserShell);
+        }
+        (mkIf config.isNormalUser {
+          group = mkDefault "users";
+          createHome = mkDefault true;
+          home = mkDefault "/home/${config.name}";
+          homeMode = mkDefault "700";
+          useDefaultShell = mkDefault true;
+          isSystemUser = mkDefault false;
+        })
+        # If !mutableUsers, setting ‘initialPassword’ is equivalent to
+        # setting ‘password’ (and similarly for hashed passwords).
+        (mkIf (!cfg.mutableUsers && config.initialPassword != null) {
+          password = mkDefault config.initialPassword;
+        })
+        (mkIf (!cfg.mutableUsers && config.initialHashedPassword != null) {
+          hashedPassword = mkDefault config.initialHashedPassword;
+        })
+        (mkIf (config.isNormalUser && config.subUidRanges == [] && config.subGidRanges == []) {
+          autoSubUidGidRange = mkDefault true;
+        })
+      ];
+
+  };
+
+  groupOpts = { name, config, ... }: {
+
+    options = {
+
+      name = mkOption {
+        type = types.passwdEntry types.str;
+        description = lib.mdDoc ''
+          The name of the group. If undefined, the name of the attribute set
+          will be used.
+        '';
+      };
+
+      gid = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+          The group GID. If the GID is null, a free GID is picked on
+          activation.
+        '';
+      };
+
+      members = mkOption {
+        type = with types; listOf (passwdEntry str);
+        default = [];
+        description = lib.mdDoc ''
+          The user names of the group members, added to the
+          `/etc/group` file.
+        '';
+      };
+
+    };
+
+    config = {
+      name = mkDefault name;
+
+      members = mapAttrsToList (n: u: u.name) (
+        filterAttrs (n: u: elem config.name u.extraGroups) cfg.users
+      );
+    };
+
+  };
+
+  subordinateUidRange = {
+    options = {
+      startUid = mkOption {
+        type = types.int;
+        description = lib.mdDoc ''
+          Start of the range of subordinate user ids that user is
+          allowed to use.
+        '';
+      };
+      count = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "Count of subordinate user ids";
+      };
+    };
+  };
+
+  subordinateGidRange = {
+    options = {
+      startGid = mkOption {
+        type = types.int;
+        description = lib.mdDoc ''
+          Start of the range of subordinate group ids that user is
+          allowed to use.
+        '';
+      };
+      count = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "Count of subordinate group ids";
+      };
+    };
+  };
+
+  idsAreUnique = set: idAttr: !(foldr (name: args@{ dup, acc }:
+    let
+      id = builtins.toString (builtins.getAttr idAttr (builtins.getAttr name set));
+      exists = builtins.hasAttr id acc;
+      newAcc = acc // (builtins.listToAttrs [ { name = id; value = true; } ]);
+    in if dup then args else if exists
+      then builtins.trace "Duplicate ${idAttr} ${id}" { dup = true; acc = null; }
+      else { dup = false; acc = newAcc; }
+    ) { dup = false; acc = {}; } (builtins.attrNames set)).dup;
+
+  uidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) cfg.users) "uid";
+  gidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) cfg.groups) "gid";
+  sdInitrdUidsAreUnique = idsAreUnique (filterAttrs (n: u: u.uid != null) config.boot.initrd.systemd.users) "uid";
+  sdInitrdGidsAreUnique = idsAreUnique (filterAttrs (n: g: g.gid != null) config.boot.initrd.systemd.groups) "gid";
+  groupNames = lib.mapAttrsToList (n: g: g.name) cfg.groups;
+  usersWithoutExistingGroup = lib.filterAttrs (n: u: !lib.elem u.group groupNames) cfg.users;
+
+  spec = pkgs.writeText "users-groups.json" (builtins.toJSON {
+    inherit (cfg) mutableUsers;
+    users = mapAttrsToList (_: u:
+      { inherit (u)
+          name uid group description home homeMode createHome isSystemUser
+          password hashedPasswordFile hashedPassword
+          autoSubUidGidRange subUidRanges subGidRanges
+          initialPassword initialHashedPassword expires;
+        shell = utils.toShellPath u.shell;
+      }) cfg.users;
+    groups = attrValues cfg.groups;
+  });
+
+  systemShells =
+    let
+      shells = mapAttrsToList (_: u: u.shell) cfg.users;
+    in
+      filter types.shellPackage.check shells;
+
+in {
+  imports = [
+    (mkAliasOptionModuleMD [ "users" "extraUsers" ] [ "users" "users" ])
+    (mkAliasOptionModuleMD [ "users" "extraGroups" ] [ "users" "groups" ])
+    (mkRenamedOptionModule ["security" "initialRootPassword"] ["users" "users" "root" "initialHashedPassword"])
+  ];
+
+  ###### interface
+  options = {
+
+    users.mutableUsers = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        If set to `true`, you are free to add new users and groups to the system
+        with the ordinary `useradd` and
+        `groupadd` commands. On system activation, the
+        existing contents of the `/etc/passwd` and
+        `/etc/group` files will be merged with the
+        contents generated from the `users.users` and
+        `users.groups` options.
+        The initial password for a user will be set
+        according to `users.users`, but existing passwords
+        will not be changed.
+
+        ::: {.warning}
+        If set to `false`, the contents of the user and
+        group files will simply be replaced on system activation. This also
+        holds for the user passwords; all changed
+        passwords will be reset according to the
+        `users.users` configuration on activation.
+        :::
+      '';
+    };
+
+    users.enforceIdUniqueness = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to require that no two users/groups share the same uid/gid.
+      '';
+    };
+
+    users.users = mkOption {
+      default = {};
+      type = with types; attrsOf (submodule userOpts);
+      example = {
+        alice = {
+          uid = 1234;
+          description = "Alice Q. User";
+          home = "/home/alice";
+          createHome = true;
+          group = "users";
+          extraGroups = ["wheel"];
+          shell = "/bin/sh";
+        };
+      };
+      description = lib.mdDoc ''
+        Additional user accounts to be created automatically by the system.
+        This can also be used to set options for root.
+      '';
+    };
+
+    users.groups = mkOption {
+      default = {};
+      example =
+        { students.gid = 1001;
+          hackers = { };
+        };
+      type = with types; attrsOf (submodule groupOpts);
+      description = lib.mdDoc ''
+        Additional groups to be created automatically by the system.
+      '';
+    };
+
+
+    users.allowNoPasswordLogin = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Disable checking that at least the `root` user or a user in the `wheel` group can log in using
+        a password or an SSH key.
+
+        WARNING: enabling this can lock you out of your system. Enable this only if you know what are you doing.
+      '';
+    };
+
+    # systemd initrd
+    boot.initrd.systemd.users = mkOption {
+      description = ''
+        Users to include in initrd.
+      '';
+      default = {};
+      type = types.attrsOf (types.submodule ({ name, ... }: {
+        options.uid = mkOption {
+          type = types.int;
+          description = ''
+            ID of the user in initrd.
+          '';
+          defaultText = literalExpression "config.users.users.\${name}.uid";
+          default = cfg.users.${name}.uid;
+        };
+        options.group = mkOption {
+          type = types.singleLineStr;
+          description = ''
+            Group the user belongs to in initrd.
+          '';
+          defaultText = literalExpression "config.users.users.\${name}.group";
+          default = cfg.users.${name}.group;
+        };
+        options.shell = mkOption {
+          type = types.passwdEntry types.path;
+          description = ''
+            The path to the user's shell in initrd.
+          '';
+          default = "${pkgs.shadow}/bin/nologin";
+          defaultText = literalExpression "\${pkgs.shadow}/bin/nologin";
+        };
+      }));
+    };
+
+    boot.initrd.systemd.groups = mkOption {
+      description = ''
+        Groups to include in initrd.
+      '';
+      default = {};
+      type = types.attrsOf (types.submodule ({ name, ... }: {
+        options.gid = mkOption {
+          type = types.int;
+          description = ''
+            ID of the group in initrd.
+          '';
+          defaultText = literalExpression "config.users.groups.\${name}.gid";
+          default = cfg.groups.${name}.gid;
+        };
+      }));
+    };
+  };
+
+
+  ###### implementation
+
+  config = let
+    cryptSchemeIdPatternGroup = "(${lib.concatStringsSep "|" pkgs.libxcrypt.enabledCryptSchemeIds})";
+  in {
+
+    users.users = {
+      root = {
+        uid = ids.uids.root;
+        description = "System administrator";
+        home = "/root";
+        shell = mkDefault cfg.defaultUserShell;
+        group = "root";
+        initialHashedPassword = mkDefault "!";
+      };
+      nobody = {
+        uid = ids.uids.nobody;
+        isSystemUser = true;
+        description = "Unprivileged account (don't use!)";
+        group = "nogroup";
+      };
+    };
+
+    users.groups = {
+      root.gid = ids.gids.root;
+      wheel.gid = ids.gids.wheel;
+      disk.gid = ids.gids.disk;
+      kmem.gid = ids.gids.kmem;
+      tty.gid = ids.gids.tty;
+      floppy.gid = ids.gids.floppy;
+      uucp.gid = ids.gids.uucp;
+      lp.gid = ids.gids.lp;
+      cdrom.gid = ids.gids.cdrom;
+      tape.gid = ids.gids.tape;
+      audio.gid = ids.gids.audio;
+      video.gid = ids.gids.video;
+      dialout.gid = ids.gids.dialout;
+      nogroup.gid = ids.gids.nogroup;
+      users.gid = ids.gids.users;
+      nixbld.gid = ids.gids.nixbld;
+      utmp.gid = ids.gids.utmp;
+      adm.gid = ids.gids.adm;
+      input.gid = ids.gids.input;
+      kvm.gid = ids.gids.kvm;
+      render.gid = ids.gids.render;
+      sgx.gid = ids.gids.sgx;
+      shadow.gid = ids.gids.shadow;
+    };
+
+    system.activationScripts.users = {
+      supportsDryActivation = true;
+      text = ''
+        install -m 0700 -d /root
+        install -m 0755 -d /home
+
+        ${pkgs.perl.withPackages (p: [ p.FileSlurp p.JSON ])}/bin/perl \
+        -w ${./update-users-groups.pl} ${spec}
+      '';
+    };
+
+    system.activationScripts.update-lingering = let
+      lingerDir = "/var/lib/systemd/linger";
+      lingeringUsers = map (u: u.name) (attrValues (flip filterAttrs cfg.users (n: u: u.linger)));
+      lingeringUsersFile = builtins.toFile "lingering-users"
+        (concatStrings (map (s: "${s}\n")
+          (sort (a: b: a < b) lingeringUsers)));  # this sorting is important for `comm` to work correctly
+    in stringAfter [ "users" ] ''
+      if [ -e ${lingerDir} ] ; then
+        cd ${lingerDir}
+        ls ${lingerDir} | sort | comm -3 -1 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl disable-linger
+        ls ${lingerDir} | sort | comm -3 -2 ${lingeringUsersFile} - | xargs -r ${pkgs.systemd}/bin/loginctl  enable-linger
+      fi
+    '';
+
+    # Warn about user accounts with deprecated password hashing schemes
+    system.activationScripts.hashes = {
+      deps = [ "users" ];
+      text = ''
+        users=()
+        while IFS=: read -r user hash _; do
+          if [[ "$hash" = "$"* && ! "$hash" =~ ^\''$${cryptSchemeIdPatternGroup}\$ ]]; then
+            users+=("$user")
+          fi
+        done </etc/shadow
+
+        if (( "''${#users[@]}" )); then
+          echo "
+        WARNING: The following user accounts rely on password hashing algorithms
+        that have been removed. They need to be renewed as soon as possible, as
+        they do prevent their users from logging in."
+          printf ' - %s\n' "''${users[@]}"
+        fi
+      '';
+    };
+
+    # for backwards compatibility
+    system.activationScripts.groups = stringAfter [ "users" ] "";
+
+    # Install all the user shells
+    environment.systemPackages = systemShells;
+
+    environment.etc = mapAttrs' (_: { packages, name, ... }: {
+      name = "profiles/per-user/${name}";
+      value.source = pkgs.buildEnv {
+        name = "user-environment";
+        paths = packages;
+        inherit (config.environment) pathsToLink extraOutputsToInstall;
+        inherit (config.system.path) ignoreCollisions postBuild;
+      };
+    }) (filterAttrs (_: u: u.packages != []) cfg.users);
+
+    environment.profiles = [
+      "$HOME/.nix-profile"
+      "\${XDG_STATE_HOME}/nix/profile"
+      "$HOME/.local/state/nix/profile"
+      "/etc/profiles/per-user/$USER"
+    ];
+
+    # systemd initrd
+    boot.initrd.systemd = lib.mkIf config.boot.initrd.systemd.enable {
+      contents = {
+        "/etc/passwd".text = ''
+          ${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: { uid, group, shell }: let
+            g = config.boot.initrd.systemd.groups.${group};
+          in "${n}:x:${toString uid}:${toString g.gid}::/var/empty:${shell}") config.boot.initrd.systemd.users)}
+        '';
+        "/etc/group".text = ''
+          ${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: { gid }: "${n}:x:${toString gid}:") config.boot.initrd.systemd.groups)}
+        '';
+        "/etc/shells".text = lib.concatStringsSep "\n" (lib.unique (lib.mapAttrsToList (_: u: u.shell) config.boot.initrd.systemd.users)) + "\n";
+      };
+
+      storePaths = [ "${pkgs.shadow}/bin/nologin" ];
+
+      users = {
+        root = { shell = lib.mkDefault "/bin/bash"; };
+        nobody = {};
+      };
+
+      groups = {
+        root = {};
+        nogroup = {};
+        systemd-journal = {};
+        tty = {};
+        dialout = {};
+        kmem = {};
+        input = {};
+        video = {};
+        render = {};
+        sgx = {};
+        audio = {};
+        video = {};
+        lp = {};
+        disk = {};
+        cdrom = {};
+        tape = {};
+        kvm = {};
+      };
+    };
+
+    assertions = [
+      { assertion = !cfg.enforceIdUniqueness || (uidsAreUnique && gidsAreUnique);
+        message = "UIDs and GIDs must be unique!";
+      }
+      { assertion = !cfg.enforceIdUniqueness || (sdInitrdUidsAreUnique && sdInitrdGidsAreUnique);
+        message = "systemd initrd UIDs and GIDs must be unique!";
+      }
+      { assertion = usersWithoutExistingGroup == {};
+        message =
+          let
+            errUsers = lib.attrNames usersWithoutExistingGroup;
+            missingGroups = lib.unique (lib.mapAttrsToList (n: u: u.group) usersWithoutExistingGroup);
+            mkConfigHint = group: "users.groups.${group} = {};";
+          in ''
+            The following users have a primary group that is undefined: ${lib.concatStringsSep " " errUsers}
+            Hint: Add this to your NixOS configuration:
+              ${lib.concatStringsSep "\n  " (map mkConfigHint missingGroups)}
+          '';
+      }
+      { # If mutableUsers is false, to prevent users creating a
+        # configuration that locks them out of the system, ensure that
+        # there is at least one "privileged" account that has a
+        # password or an SSH authorized key. Privileged accounts are
+        # root and users in the wheel group.
+        # The check does not apply when users.disableLoginPossibilityAssertion
+        # The check does not apply when users.mutableUsers
+        assertion = !cfg.mutableUsers -> !cfg.allowNoPasswordLogin ->
+          any id (mapAttrsToList (name: cfg:
+            (name == "root"
+             || cfg.group == "wheel"
+             || elem "wheel" cfg.extraGroups)
+            &&
+            (allowsLogin cfg.hashedPassword
+             || cfg.password != null
+             || cfg.hashedPasswordFile != null
+             || cfg.openssh.authorizedKeys.keys != []
+             || cfg.openssh.authorizedKeys.keyFiles != [])
+          ) cfg.users ++ [
+            config.security.googleOsLogin.enable
+          ]);
+        message = ''
+          Neither the root account nor any wheel user has a password or SSH authorized key.
+          You must set one to prevent being locked out of your system.
+          If you really want to be locked out of your system, set users.allowNoPasswordLogin = true;
+          However you are most probably better off by setting users.mutableUsers = true; and
+          manually running passwd root to set the root password.
+          '';
+      }
+    ] ++ flatten (flip mapAttrsToList cfg.users (name: user:
+      [
+        {
+        assertion = (user.hashedPassword != null)
+        -> (builtins.match ".*:.*" user.hashedPassword == null);
+        message = ''
+            The password hash of user "${user.name}" contains a ":" character.
+            This is invalid and would break the login system because the fields
+            of /etc/shadow (file where hashes are stored) are colon-separated.
+            Please check the value of option `users.users."${user.name}".hashedPassword`.'';
+          }
+          {
+            assertion = let
+              xor = a: b: a && !b || b && !a;
+              isEffectivelySystemUser = user.isSystemUser || (user.uid != null && user.uid < 1000);
+            in xor isEffectivelySystemUser user.isNormalUser;
+            message = ''
+              Exactly one of users.users.${user.name}.isSystemUser and users.users.${user.name}.isNormalUser must be set.
+            '';
+          }
+          {
+            assertion = user.group != "";
+            message = ''
+              users.users.${user.name}.group is unset. This used to default to
+              nogroup, but this is unsafe. For example you can create a group
+              for this user with:
+              users.users.${user.name}.group = "${user.name}";
+              users.groups.${user.name} = {};
+            '';
+          }
+        ] ++ (map (shell: {
+            assertion = !user.ignoreShellProgramCheck -> (user.shell == pkgs.${shell}) -> (config.programs.${shell}.enable == true);
+            message = ''
+              users.users.${user.name}.shell is set to ${shell}, but
+              programs.${shell}.enable is not true. This will cause the ${shell}
+              shell to lack the basic nix directories in its PATH and might make
+              logging in as that user impossible. You can fix it with:
+              programs.${shell}.enable = true;
+
+              If you know what you're doing and you are fine with the behavior,
+              set users.users.${user.name}.ignoreShellProgramCheck = true;
+              instead.
+            '';
+          }) [
+          "fish"
+          "xonsh"
+          "zsh"
+        ])
+    ));
+
+    warnings =
+      builtins.filter (x: x != null) (
+        flip mapAttrsToList cfg.users (_: user:
+        # This regex matches a subset of the Modular Crypto Format (MCF)[1]
+        # informal standard. Since this depends largely on the OS or the
+        # specific implementation of crypt(3) we only support the (sane)
+        # schemes implemented by glibc and BSDs. In particular the original
+        # DES hash is excluded since, having no structure, it would validate
+        # common mistakes like typing the plaintext password.
+        #
+        # [1]: https://en.wikipedia.org/wiki/Crypt_(C)
+        let
+          sep = "\\$";
+          base64 = "[a-zA-Z0-9./]+";
+          id = cryptSchemeIdPatternGroup;
+          name = "[a-z0-9-]+";
+          value = "[a-zA-Z0-9/+.-]+";
+          options = "${name}(=${value})?(,${name}=${value})*";
+          scheme  = "${id}(${sep}${options})?";
+          content = "${base64}${sep}${base64}(${sep}${base64})?";
+          mcf = "^${sep}${scheme}${sep}${content}$";
+        in
+        if (allowsLogin user.hashedPassword
+            && user.hashedPassword != ""  # login without password
+            && builtins.match mcf user.hashedPassword == null)
+        then ''
+          The password hash of user "${user.name}" may be invalid. You must set a
+          valid hash or the user will be locked out of their account. Please
+          check the value of option `users.users."${user.name}".hashedPassword`.''
+        else null)
+        ++ flip mapAttrsToList cfg.users (name: user:
+          if user.passwordFile != null then
+            ''The option `users.users."${name}".passwordFile' has been renamed '' +
+            ''to `users.users."${name}".hashedPasswordFile'.''
+          else null)
+      );
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/vte.nix b/nixpkgs/nixos/modules/config/vte.nix
new file mode 100644
index 000000000000..a969607f6e0b
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/vte.nix
@@ -0,0 +1,56 @@
+# VTE
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  vteInitSnippet = ''
+    # Show current working directory in VTE terminals window title.
+    # Supports both bash and zsh, requires interactive shell.
+    . ${pkgs.vte}/etc/profile.d/vte.sh
+  '';
+
+in
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  options = {
+
+    programs.bash.vteIntegration = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable Bash integration for VTE terminals.
+        This allows it to preserve the current directory of the shell
+        across terminals.
+      '';
+    };
+
+    programs.zsh.vteIntegration = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable Zsh integration for VTE terminals.
+        This allows it to preserve the current directory of the shell
+        across terminals.
+      '';
+    };
+
+  };
+
+  config = mkMerge [
+    (mkIf config.programs.bash.vteIntegration {
+      programs.bash.interactiveShellInit = mkBefore vteInitSnippet;
+    })
+
+    (mkIf config.programs.zsh.vteIntegration {
+      programs.zsh.interactiveShellInit = vteInitSnippet;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/autostart.nix b/nixpkgs/nixos/modules/config/xdg/autostart.nix
new file mode 100644
index 000000000000..a4fdbda911a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/autostart.nix
@@ -0,0 +1,26 @@
+{ config, lib, ... }:
+
+with lib;
+{
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  options = {
+    xdg.autostart.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [XDG Autostart specification](https://specifications.freedesktop.org/autostart-spec/autostart-spec-latest.html).
+      '';
+    };
+  };
+
+  config = mkIf config.xdg.autostart.enable {
+    environment.pathsToLink = [
+      "/etc/xdg/autostart"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/icons.nix b/nixpkgs/nixos/modules/config/xdg/icons.nix
new file mode 100644
index 000000000000..8d44a431445b
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/icons.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  options = {
+    xdg.icons.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [XDG Icon Theme specification](https://specifications.freedesktop.org/icon-theme-spec/icon-theme-spec-latest.html).
+      '';
+    };
+  };
+
+  config = mkIf config.xdg.icons.enable {
+    environment.pathsToLink = [
+      "/share/icons"
+      "/share/pixmaps"
+    ];
+
+    environment.systemPackages = [
+      # Empty icon theme that contains index.theme file describing directories
+      # where toolkits should look for icons installed by apps.
+      pkgs.hicolor-icon-theme
+    ];
+
+    # libXcursor looks for cursors in XCURSOR_PATH
+    # it mostly follows the spec for icons
+    # See: https://www.x.org/releases/current/doc/man/man3/Xcursor.3.xhtml Themes
+
+    # These are preferred so they come first in the list
+    environment.sessionVariables.XCURSOR_PATH = [
+      "$HOME/.icons"
+      "$HOME/.local/share/icons"
+    ];
+
+    environment.profileRelativeSessionVariables.XCURSOR_PATH = [
+      "/share/icons"
+      "/share/pixmaps"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/menus.nix b/nixpkgs/nixos/modules/config/xdg/menus.nix
new file mode 100644
index 000000000000..b8f829e81547
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/menus.nix
@@ -0,0 +1,29 @@
+{ config, lib, ... }:
+
+with lib;
+{
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  options = {
+    xdg.menus.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [XDG Desktop Menu specification](https://specifications.freedesktop.org/menu-spec/menu-spec-latest.html).
+      '';
+    };
+  };
+
+  config = mkIf config.xdg.menus.enable {
+    environment.pathsToLink = [
+      "/share/applications"
+      "/share/desktop-directories"
+      "/etc/xdg/menus"
+      "/etc/xdg/menus/applications-merged"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/mime.nix b/nixpkgs/nixos/modules/config/xdg/mime.nix
new file mode 100644
index 000000000000..3aa863083219
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/mime.nix
@@ -0,0 +1,102 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.xdg.mime;
+  associationOptions = with types; attrsOf (
+    coercedTo (either (listOf str) str) (x: concatStringsSep ";" (toList x)) str
+  );
+in
+
+{
+  meta = {
+    maintainers = teams.freedesktop.members ++ (with maintainers; [ figsoda ]);
+  };
+
+  options = {
+    xdg.mime.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [XDG Shared MIME-info specification](https://specifications.freedesktop.org/shared-mime-info-spec/shared-mime-info-spec-latest.html) and the
+        [XDG MIME Applications specification](https://specifications.freedesktop.org/mime-apps-spec/mime-apps-spec-latest.html).
+      '';
+    };
+
+    xdg.mime.addedAssociations = mkOption {
+      type = associationOptions;
+      default = {};
+      example = {
+        "application/pdf" = "firefox.desktop";
+        "text/xml" = [ "nvim.desktop" "codium.desktop" ];
+      };
+      description = lib.mdDoc ''
+        Adds associations between mimetypes and applications. See the
+        [
+        specifications](https://specifications.freedesktop.org/mime-apps-spec/mime-apps-spec-latest.html#associations) for more information.
+      '';
+    };
+
+    xdg.mime.defaultApplications = mkOption {
+      type = associationOptions;
+      default = {};
+      example = {
+        "application/pdf" = "firefox.desktop";
+        "image/png" = [ "sxiv.desktop" "gimp.desktop" ];
+      };
+      description = lib.mdDoc ''
+        Sets the default applications for given mimetypes. See the
+        [
+        specifications](https://specifications.freedesktop.org/mime-apps-spec/mime-apps-spec-latest.html#default) for more information.
+      '';
+    };
+
+    xdg.mime.removedAssociations = mkOption {
+      type = associationOptions;
+      default = {};
+      example = {
+        "audio/mp3" = [ "mpv.desktop" "umpv.desktop" ];
+        "inode/directory" = "codium.desktop";
+      };
+      description = lib.mdDoc ''
+        Removes associations between mimetypes and applications. See the
+        [
+        specifications](https://specifications.freedesktop.org/mime-apps-spec/mime-apps-spec-latest.html#associations) for more information.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."xdg/mimeapps.list" = mkIf (
+      cfg.addedAssociations != {}
+      || cfg.defaultApplications != {}
+      || cfg.removedAssociations != {}
+    ) {
+      text = generators.toINI { } {
+        "Added Associations" = cfg.addedAssociations;
+        "Default Applications" = cfg.defaultApplications;
+        "Removed Associations" = cfg.removedAssociations;
+      };
+    };
+
+    environment.pathsToLink = [ "/share/mime" ];
+
+    environment.systemPackages = [
+      # this package also installs some useful data, as well as its utilities
+      pkgs.shared-mime-info
+    ];
+
+    environment.extraSetup = ''
+      if [ -w $out/share/mime ] && [ -d $out/share/mime/packages ]; then
+          XDG_DATA_DIRS=$out/share PKGSYSTEM_ENABLE_FSYNC=0 ${pkgs.buildPackages.shared-mime-info}/bin/update-mime-database -V $out/share/mime > /dev/null
+      fi
+
+      if [ -w $out/share/applications ]; then
+          ${pkgs.buildPackages.desktop-file-utils}/bin/update-desktop-database $out/share/applications
+      fi
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/portal.nix b/nixpkgs/nixos/modules/config/xdg/portal.nix
new file mode 100644
index 000000000000..e19e5cf28b3b
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/portal.nix
@@ -0,0 +1,114 @@
+{ config, pkgs, lib, ... }:
+
+let
+  inherit (lib)
+    mkEnableOption
+    mkIf
+    mkOption
+    mkRenamedOptionModule
+    teams
+    types;
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "flatpak" "extraPortals" ] [ "xdg" "portal" "extraPortals" ])
+
+    ({ config, lib, options, ... }:
+      let
+        from = [ "xdg" "portal" "gtkUsePortal" ];
+        fromOpt = lib.getAttrFromPath from options;
+      in
+      {
+        warnings = lib.mkIf config.xdg.portal.gtkUsePortal [
+          "The option `${lib.showOption from}' defined in ${lib.showFiles fromOpt.files} has been deprecated. Setting the variable globally with `environment.sessionVariables' NixOS option can have unforeseen side-effects."
+        ];
+      }
+    )
+  ];
+
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  options.xdg.portal = {
+    enable =
+      mkEnableOption (lib.mdDoc ''[xdg desktop integration](https://github.com/flatpak/xdg-desktop-portal)'') // {
+        default = false;
+      };
+
+    extraPortals = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      description = lib.mdDoc ''
+        List of additional portals to add to path. Portals allow interaction
+        with system, like choosing files or taking screenshots. At minimum,
+        a desktop portal implementation should be listed. GNOME and KDE already
+        adds `xdg-desktop-portal-gtk`; and
+        `xdg-desktop-portal-kde` respectively. On other desktop
+        environments you probably want to add them yourself.
+      '';
+    };
+
+    gtkUsePortal = mkOption {
+      type = types.bool;
+      visible = false;
+      default = false;
+      description = lib.mdDoc ''
+        Sets environment variable `GTK_USE_PORTAL` to `1`.
+        This will force GTK-based programs ran outside Flatpak to respect and use XDG Desktop Portals
+        for features like file chooser but it is an unsupported hack that can easily break things.
+        Defaults to `false` to respect its opt-in nature.
+      '';
+    };
+
+    xdgOpenUsePortal = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Sets environment variable `NIXOS_XDG_OPEN_USE_PORTAL` to `1`
+        This will make `xdg-open` use the portal to open programs, which resolves bugs involving
+        programs opening inside FHS envs or with unexpected env vars set from wrappers.
+        See [#160923](https://github.com/NixOS/nixpkgs/issues/160923) for more info.
+      '';
+    };
+  };
+
+  config =
+    let
+      cfg = config.xdg.portal;
+      packages = [ pkgs.xdg-desktop-portal ] ++ cfg.extraPortals;
+      joinedPortals = pkgs.buildEnv {
+        name = "xdg-portals";
+        paths = packages;
+        pathsToLink = [ "/share/xdg-desktop-portal/portals" "/share/applications" ];
+      };
+
+    in
+    mkIf cfg.enable {
+
+      assertions = [
+        {
+          assertion = cfg.extraPortals != [ ];
+          message = "Setting xdg.portal.enable to true requires a portal implementation in xdg.portal.extraPortals such as xdg-desktop-portal-gtk or xdg-desktop-portal-kde.";
+        }
+      ];
+
+      services.dbus.packages = packages;
+      systemd.packages = packages;
+
+      environment = {
+        # fixes screen sharing on plasmawayland on non-chromium apps by linking
+        # share/applications/*.desktop files
+        # see https://github.com/NixOS/nixpkgs/issues/145174
+        systemPackages = [ joinedPortals ];
+        pathsToLink = [ "/share/applications" ];
+
+        sessionVariables = {
+          GTK_USE_PORTAL = mkIf cfg.gtkUsePortal "1";
+          NIXOS_XDG_OPEN_USE_PORTAL = mkIf cfg.xdgOpenUsePortal "1";
+          XDG_DESKTOP_PORTAL_DIR = "${joinedPortals}/share/xdg-desktop-portal/portals";
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/portals/lxqt.nix b/nixpkgs/nixos/modules/config/xdg/portals/lxqt.nix
new file mode 100644
index 000000000000..18fcf3d81c02
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/portals/lxqt.nix
@@ -0,0 +1,49 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.xdg.portal.lxqt;
+
+in
+{
+  meta = {
+    maintainers = teams.lxqt.members;
+  };
+
+  options.xdg.portal.lxqt = {
+    enable = mkEnableOption (lib.mdDoc ''
+      the desktop portal for the LXQt desktop environment.
+
+      This will add the `lxqt.xdg-desktop-portal-lxqt`
+      package (with the extra Qt styles) into the
+      {option}`xdg.portal.extraPortals` option
+    '');
+
+    styles = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression ''[
+        pkgs.libsForQt5.qtstyleplugin-kvantum
+        pkgs.breeze-qt5
+        pkgs.qtcurve
+      ];
+      '';
+      description = lib.mdDoc ''
+        Extra Qt styles that will be available to the
+        `lxqt.xdg-desktop-portal-lxqt`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    xdg.portal = {
+      enable = true;
+      extraPortals = [
+        (pkgs.lxqt.xdg-desktop-portal-lxqt.override { extraQtStyles = cfg.styles; })
+      ];
+    };
+
+    environment.systemPackages = cfg.styles;
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/portals/wlr.nix b/nixpkgs/nixos/modules/config/xdg/portals/wlr.nix
new file mode 100644
index 000000000000..d84ae794e3bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/portals/wlr.nix
@@ -0,0 +1,67 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.xdg.portal.wlr;
+  package = pkgs.xdg-desktop-portal-wlr;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "xdg-desktop-portal-wlr.ini" cfg.settings;
+in
+{
+  meta = {
+    maintainers = with maintainers; [ minijackson ];
+  };
+
+  options.xdg.portal.wlr = {
+    enable = mkEnableOption (lib.mdDoc ''
+      desktop portal for wlroots-based desktops
+
+      This will add the `xdg-desktop-portal-wlr` package into
+      the {option}`xdg.portal.extraPortals` option, and provide the
+      configuration file
+    '');
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Configuration for `xdg-desktop-portal-wlr`.
+
+        See `xdg-desktop-portal-wlr(5)` for supported
+        values.
+      '';
+
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+      };
+
+      default = { };
+
+      # Example taken from the manpage
+      example = literalExpression ''
+        {
+          screencast = {
+            output_name = "HDMI-A-1";
+            max_fps = 30;
+            exec_before = "disable_notifications.sh";
+            exec_after = "enable_notifications.sh";
+            chooser_type = "simple";
+            chooser_cmd = "''${pkgs.slurp}/bin/slurp -f %o -or";
+          };
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    xdg.portal = {
+      enable = true;
+      extraPortals = [ package ];
+    };
+
+    systemd.user.services.xdg-desktop-portal-wlr.serviceConfig.ExecStart = [
+      # Empty ExecStart value to override the field
+      ""
+      "${package}/libexec/xdg-desktop-portal-wlr --config=${configFile}"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/config/xdg/sounds.nix b/nixpkgs/nixos/modules/config/xdg/sounds.nix
new file mode 100644
index 000000000000..713d68131fc0
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/xdg/sounds.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  options = {
+    xdg.sounds.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to install files to support the
+        [XDG Sound Theme specification](https://www.freedesktop.org/wiki/Specifications/sound-theme-spec/).
+      '';
+    };
+  };
+
+  config = mkIf config.xdg.sounds.enable {
+    environment.systemPackages = [
+      pkgs.sound-theme-freedesktop
+    ];
+
+    environment.pathsToLink = [
+      "/share/sounds"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/config/zram.nix b/nixpkgs/nixos/modules/config/zram.nix
new file mode 100644
index 000000000000..ec8b4ed6e931
--- /dev/null
+++ b/nixpkgs/nixos/modules/config/zram.nix
@@ -0,0 +1,130 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  cfg = config.zramSwap;
+  devices = map (nr: "zram${toString nr}") (lib.range 0 (cfg.swapDevices - 1));
+
+in
+
+{
+
+  imports = [
+    (lib.mkRemovedOptionModule [ "zramSwap" "numDevices" ] "Using ZRAM devices as general purpose ephemeral block devices is no longer supported")
+  ];
+
+  ###### interface
+
+  options = {
+
+    zramSwap = {
+
+      enable = lib.mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc ''
+          Enable in-memory compressed devices and swap space provided by the zram
+          kernel module.
+          See [
+            https://www.kernel.org/doc/Documentation/blockdev/zram.txt
+          ](https://www.kernel.org/doc/Documentation/blockdev/zram.txt).
+        '';
+      };
+
+      swapDevices = lib.mkOption {
+        default = 1;
+        type = lib.types.int;
+        description = lib.mdDoc ''
+          Number of zram devices to be used as swap, recommended is 1.
+        '';
+      };
+
+      memoryPercent = lib.mkOption {
+        default = 50;
+        type = lib.types.int;
+        description = lib.mdDoc ''
+          Maximum total amount of memory that can be stored in the zram swap devices
+          (as a percentage of your total memory). Defaults to 1/2 of your total
+          RAM. Run `zramctl` to check how good memory is compressed.
+          This doesn't define how much memory will be used by the zram swap devices.
+        '';
+      };
+
+      memoryMax = lib.mkOption {
+        default = null;
+        type = with lib.types; nullOr int;
+        description = lib.mdDoc ''
+          Maximum total amount of memory (in bytes) that can be stored in the zram
+          swap devices.
+          This doesn't define how much memory will be used by the zram swap devices.
+        '';
+      };
+
+      priority = lib.mkOption {
+        default = 5;
+        type = lib.types.int;
+        description = lib.mdDoc ''
+          Priority of the zram swap devices. It should be a number higher than
+          the priority of your disk-based swap devices (so that the system will
+          fill the zram swap devices before falling back to disk swap).
+        '';
+      };
+
+      algorithm = lib.mkOption {
+        default = "zstd";
+        example = "lz4";
+        type = with lib.types; either (enum [ "lzo" "lz4" "zstd" ]) str;
+        description = lib.mdDoc ''
+          Compression algorithm. `lzo` has good compression,
+          but is slow. `lz4` has bad compression, but is fast.
+          `zstd` is both good compression and fast, but requires newer kernel.
+          You can check what other algorithms are supported by your zram device with
+          {command}`cat /sys/class/block/zram*/comp_algorithm`
+        '';
+      };
+
+      writebackDevice = lib.mkOption {
+        default = null;
+        example = "/dev/zvol/tarta-zoot/swap-writeback";
+        type = lib.types.nullOr lib.types.path;
+        description = lib.mdDoc ''
+          Write incompressible pages to this device,
+          as there's no gain from keeping them in RAM.
+        '';
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.writebackDevice == null || cfg.swapDevices <= 1;
+        message = "A single writeback device cannot be shared among multiple zram devices";
+      }
+    ];
+
+    services.zram-generator.enable = true;
+
+    services.zram-generator.settings = lib.listToAttrs
+      (builtins.map
+        (dev: {
+          name = dev;
+          value =
+            let
+              size = "${toString cfg.memoryPercent} / 100 * ram";
+            in
+            {
+              zram-size = if cfg.memoryMax != null then "min(${size}, ${toString cfg.memoryMax} / 1024 / 1024)" else size;
+              compression-algorithm = cfg.algorithm;
+              swap-priority = cfg.priority;
+            } // lib.optionalAttrs (cfg.writebackDevice != null) {
+              writeback-device = cfg.writebackDevice;
+            };
+        })
+        devices);
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/acpilight.nix b/nixpkgs/nixos/modules/hardware/acpilight.nix
new file mode 100644
index 000000000000..d8d82b0e81a4
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/acpilight.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.acpilight;
+in
+{
+  options = {
+    hardware.acpilight = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable acpilight.
+          This will allow brightness control via xbacklight from users in the video group
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ acpilight ];
+    services.udev.packages = with pkgs; [ acpilight ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/all-firmware.nix b/nixpkgs/nixos/modules/hardware/all-firmware.nix
new file mode 100644
index 000000000000..6f58e848b38a
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/all-firmware.nix
@@ -0,0 +1,79 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware;
+in {
+
+  imports = [
+    (mkRenamedOptionModule [ "networking" "enableRT73Firmware" ] [ "hardware" "enableRedistributableFirmware" ])
+    (mkRenamedOptionModule [ "networking" "enableIntel3945ABGFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
+    (mkRenamedOptionModule [ "networking" "enableIntel2100BGFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
+    (mkRenamedOptionModule [ "networking" "enableRalinkFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
+    (mkRenamedOptionModule [ "networking" "enableRTL8192cFirmware" ] [ "hardware" "enableRedistributableFirmware" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    hardware.enableAllFirmware = mkEnableOption "all firmware regardless of license";
+
+    hardware.enableRedistributableFirmware = mkEnableOption "firmware with a license allowing redistribution" // {
+      default = config.hardware.enableAllFirmware;
+      defaultText = lib.literalExpression "config.hardware.enableAllFirmware";
+    };
+
+    hardware.wirelessRegulatoryDatabase = mkEnableOption "loading the wireless regulatory database at boot" // {
+      default = cfg.enableRedistributableFirmware || cfg.enableAllFirmware;
+      defaultText = literalMD "Enabled if proprietary firmware is allowed via {option}`enableRedistributableFirmware` or {option}`enableAllFirmware`.";
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf (cfg.enableAllFirmware || cfg.enableRedistributableFirmware) {
+      hardware.firmware = with pkgs; [
+        linux-firmware
+        intel2200BGFirmware
+        rtl8192su-firmware
+        rt5677-firmware
+        rtl8761b-firmware
+        rtw88-firmware
+        zd1211fw
+        alsa-firmware
+        sof-firmware
+        libreelec-dvb-firmware
+      ] ++ optional pkgs.stdenv.hostPlatform.isAarch raspberrypiWirelessFirmware
+        ++ optionals (versionOlder config.boot.kernelPackages.kernel.version "4.13") [
+        rtl8723bs-firmware
+      ];
+    })
+    (mkIf cfg.enableAllFirmware {
+      assertions = [{
+        assertion = !cfg.enableAllFirmware || pkgs.config.allowUnfree;
+        message = ''
+          the list of hardware.enableAllFirmware contains non-redistributable licensed firmware files.
+            This requires nixpkgs.config.allowUnfree to be true.
+            An alternative is to use the hardware.enableRedistributableFirmware option.
+        '';
+      }];
+      hardware.firmware = with pkgs; [
+        broadcom-bt-firmware
+        b43Firmware_5_1_138
+        b43Firmware_6_30_163_46
+        xow_dongle-firmware
+      ] ++ optionals pkgs.stdenv.hostPlatform.isx86 [
+        facetimehd-calibration
+        facetimehd-firmware
+      ];
+    })
+    (mkIf cfg.wirelessRegulatoryDatabase {
+      hardware.firmware = [ pkgs.wireless-regdb ];
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/bladeRF.nix b/nixpkgs/nixos/modules/hardware/bladeRF.nix
new file mode 100644
index 000000000000..52a1f52024c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/bladeRF.nix
@@ -0,0 +1,28 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.bladeRF;
+
+in
+
+{
+  options.hardware.bladeRF = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables udev rules for BladeRF devices. By default grants access
+        to users in the "bladerf" group. You may want to install the
+        libbladeRF package.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.libbladeRF ];
+    users.groups.bladerf = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/brillo.nix b/nixpkgs/nixos/modules/hardware/brillo.nix
new file mode 100644
index 000000000000..612061718fad
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/brillo.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.brillo;
+in
+{
+  options = {
+    hardware.brillo = {
+      enable = mkEnableOption (lib.mdDoc ''
+        brillo in userspace.
+        This will allow brightness control from users in the video group
+      '');
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.brillo ];
+    environment.systemPackages = [ pkgs.brillo ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/ckb-next.nix b/nixpkgs/nixos/modules/hardware/ckb-next.nix
new file mode 100644
index 000000000000..79977939eec8
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/ckb-next.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.ckb-next;
+
+in
+  {
+    imports = [
+      (mkRenamedOptionModule [ "hardware" "ckb" "enable" ] [ "hardware" "ckb-next" "enable" ])
+      (mkRenamedOptionModule [ "hardware" "ckb" "package" ] [ "hardware" "ckb-next" "package" ])
+    ];
+
+    options.hardware.ckb-next = {
+      enable = mkEnableOption (lib.mdDoc "the Corsair keyboard/mouse driver");
+
+      gid = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 100;
+        description = lib.mdDoc ''
+          Limit access to the ckb daemon to a particular group.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ckb-next;
+        defaultText = literalExpression "pkgs.ckb-next";
+        description = lib.mdDoc ''
+          The package implementing the Corsair keyboard/mouse driver.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = [ cfg.package ];
+
+      systemd.services.ckb-next = {
+        description = "Corsair Keyboards and Mice Daemon";
+        wantedBy = ["multi-user.target"];
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/ckb-next-daemon ${optionalString (cfg.gid != null) "--gid=${builtins.toString cfg.gid}"}";
+          Restart = "on-failure";
+        };
+      };
+    };
+
+    meta = {
+      maintainers = with lib.maintainers; [ ];
+    };
+  }
diff --git a/nixpkgs/nixos/modules/hardware/corectrl.nix b/nixpkgs/nixos/modules/hardware/corectrl.nix
new file mode 100644
index 000000000000..8ef61a158d5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/corectrl.nix
@@ -0,0 +1,62 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.corectrl;
+in
+{
+  options.programs.corectrl = {
+    enable = mkEnableOption (lib.mdDoc ''
+      CoreCtrl, a tool to overclock amd graphics cards and processors.
+      Add your user to the corectrl group to run corectrl without needing to enter your password
+    '');
+
+    gpuOverclock = {
+      enable = mkEnableOption (lib.mdDoc ''
+        GPU overclocking
+      '');
+      ppfeaturemask = mkOption {
+        type = types.str;
+        default = "0xfffd7fff";
+        example = "0xffffffff";
+        description = lib.mdDoc ''
+          Sets the `amdgpu.ppfeaturemask` kernel option.
+          In particular, it is used here to set the overdrive bit.
+          Default is `0xfffd7fff` as it is less likely to cause flicker issues.
+          Setting it to `0xffffffff` enables all features.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ pkgs.corectrl ];
+
+      services.dbus.packages = [ pkgs.corectrl ];
+
+      users.groups.corectrl = { };
+
+      security.polkit.extraConfig = ''
+        polkit.addRule(function(action, subject) {
+            if ((action.id == "org.corectrl.helper.init" ||
+                 action.id == "org.corectrl.helperkiller.init") &&
+                subject.local == true &&
+                subject.active == true &&
+                subject.isInGroup("corectrl")) {
+                    return polkit.Result.YES;
+            }
+        });
+      '';
+    }
+
+    (lib.mkIf cfg.gpuOverclock.enable {
+      # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/drivers/gpu/drm/amd/include/amd_shared.h#n169
+      # The overdrive bit
+      boot.kernelParams = [ "amdgpu.ppfeaturemask=${cfg.gpuOverclock.ppfeaturemask}" ];
+    })
+  ]);
+
+  meta.maintainers = with lib.maintainers; [ artturin ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/cpu/amd-microcode.nix b/nixpkgs/nixos/modules/hardware/cpu/amd-microcode.nix
new file mode 100644
index 000000000000..3f52cb1fca3e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/cpu/amd-microcode.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    hardware.cpu.amd.updateMicrocode = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Update the CPU microcode for AMD processors.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.hardware.cpu.amd.updateMicrocode {
+    # Microcode updates must be the first item prepended in the initrd
+    boot.initrd.prepend = mkOrder 1 [ "${pkgs.microcodeAmd}/amd-ucode.img" ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/cpu/amd-sev.nix b/nixpkgs/nixos/modules/hardware/cpu/amd-sev.nix
new file mode 100644
index 000000000000..08e1de496383
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/cpu/amd-sev.nix
@@ -0,0 +1,80 @@
+{ config, options, lib, ... }:
+with lib;
+let
+  cfgSev = config.hardware.cpu.amd.sev;
+  cfgSevGuest = config.hardware.cpu.amd.sevGuest;
+
+  optionsFor = device: group: {
+    enable = mkEnableOption (lib.mdDoc "access to the AMD ${device} device");
+    user = mkOption {
+      description = lib.mdDoc "Owner to assign to the ${device} device.";
+      type = types.str;
+      default = "root";
+    };
+    group = mkOption {
+      description = lib.mdDoc "Group to assign to the ${device} device.";
+      type = types.str;
+      default = group;
+    };
+    mode = mkOption {
+      description = lib.mdDoc "Mode to set for the ${device} device.";
+      type = types.str;
+      default = "0660";
+    };
+  };
+in
+with lib; {
+  options.hardware.cpu.amd.sev = optionsFor "SEV" "sev";
+
+  options.hardware.cpu.amd.sevGuest = optionsFor "SEV guest" "sev-guest";
+
+  config = mkMerge [
+    # /dev/sev
+    (mkIf cfgSev.enable {
+      assertions = [
+        {
+          assertion = hasAttr cfgSev.user config.users.users;
+          message = "Given user does not exist";
+        }
+        {
+          assertion = (cfgSev.group == options.hardware.cpu.amd.sev.group.default) || (hasAttr cfgSev.group config.users.groups);
+          message = "Given group does not exist";
+        }
+      ];
+
+      boot.extraModprobeConfig = ''
+        options kvm_amd sev=1
+      '';
+
+      users.groups = optionalAttrs (cfgSev.group == options.hardware.cpu.amd.sev.group.default) {
+        "${cfgSev.group}" = { };
+      };
+
+      services.udev.extraRules = with cfgSev; ''
+        KERNEL=="sev", OWNER="${user}", GROUP="${group}", MODE="${mode}"
+      '';
+    })
+
+    # /dev/sev-guest
+    (mkIf cfgSevGuest.enable {
+      assertions = [
+        {
+          assertion = hasAttr cfgSevGuest.user config.users.users;
+          message = "Given user does not exist";
+        }
+        {
+          assertion = (cfgSevGuest.group == options.hardware.cpu.amd.sevGuest.group.default) || (hasAttr cfgSevGuest.group config.users.groups);
+          message = "Given group does not exist";
+        }
+      ];
+
+      users.groups = optionalAttrs (cfgSevGuest.group == options.hardware.cpu.amd.sevGuest.group.default) {
+        "${cfgSevGuest.group}" = { };
+      };
+
+      services.udev.extraRules = with cfgSevGuest; ''
+        KERNEL=="sev-guest", OWNER="${user}", GROUP="${group}", MODE="${mode}"
+      '';
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/cpu/intel-microcode.nix b/nixpkgs/nixos/modules/hardware/cpu/intel-microcode.nix
new file mode 100644
index 000000000000..d30ebfefeeac
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/cpu/intel-microcode.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    hardware.cpu.intel.updateMicrocode = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Update the CPU microcode for Intel processors.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.hardware.cpu.intel.updateMicrocode {
+    # Microcode updates must be the first item prepended in the initrd
+    boot.initrd.prepend = mkOrder 1 [ "${pkgs.microcodeIntel}/intel-ucode.img" ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/cpu/intel-sgx.nix b/nixpkgs/nixos/modules/hardware/cpu/intel-sgx.nix
new file mode 100644
index 000000000000..38a484cb126e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/cpu/intel-sgx.nix
@@ -0,0 +1,69 @@
+{ config, lib, ... }:
+with lib;
+let
+  cfg = config.hardware.cpu.intel.sgx;
+  defaultPrvGroup = "sgx_prv";
+in
+{
+  options.hardware.cpu.intel.sgx.enableDcapCompat = mkOption {
+    description = lib.mdDoc ''
+      Whether to enable backward compatibility for SGX software build for the
+      out-of-tree Intel SGX DCAP driver.
+
+      Creates symbolic links for the SGX devices `/dev/sgx_enclave`
+      and `/dev/sgx_provision` to make them available as
+      `/dev/sgx/enclave`  and `/dev/sgx/provision`,
+      respectively.
+    '';
+    type = types.bool;
+    default = true;
+  };
+
+  options.hardware.cpu.intel.sgx.provision = {
+    enable = mkEnableOption (lib.mdDoc "access to the Intel SGX provisioning device");
+    user = mkOption {
+      description = lib.mdDoc "Owner to assign to the SGX provisioning device.";
+      type = types.str;
+      default = "root";
+    };
+    group = mkOption {
+      description = lib.mdDoc "Group to assign to the SGX provisioning device.";
+      type = types.str;
+      default = defaultPrvGroup;
+    };
+    mode = mkOption {
+      description = lib.mdDoc "Mode to set for the SGX provisioning device.";
+      type = types.str;
+      default = "0660";
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.provision.enable {
+      assertions = [
+        {
+          assertion = hasAttr cfg.provision.user config.users.users;
+          message = "Given user does not exist";
+        }
+        {
+          assertion = (cfg.provision.group == defaultPrvGroup) || (hasAttr cfg.provision.group config.users.groups);
+          message = "Given group does not exist";
+        }
+      ];
+
+      users.groups = optionalAttrs (cfg.provision.group == defaultPrvGroup) {
+        "${cfg.provision.group}" = { };
+      };
+
+      services.udev.extraRules = with cfg.provision; ''
+        SUBSYSTEM=="misc", KERNEL=="sgx_provision", OWNER="${user}", GROUP="${group}", MODE="${mode}"
+      '';
+    })
+    (mkIf cfg.enableDcapCompat {
+      services.udev.extraRules = ''
+        SUBSYSTEM=="misc", KERNEL=="sgx_enclave",   SYMLINK+="sgx/enclave"
+        SUBSYSTEM=="misc", KERNEL=="sgx_provision", SYMLINK+="sgx/provision"
+      '';
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/cpu/x86-msr.nix b/nixpkgs/nixos/modules/hardware/cpu/x86-msr.nix
new file mode 100644
index 000000000000..554bec1b7db1
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/cpu/x86-msr.nix
@@ -0,0 +1,91 @@
+{ lib
+, config
+, options
+, ...
+}:
+let
+  inherit (builtins) hasAttr;
+  inherit (lib) mkIf mdDoc;
+  cfg = config.hardware.cpu.x86.msr;
+  opt = options.hardware.cpu.x86.msr;
+  defaultGroup = "msr";
+  isDefaultGroup = cfg.group == defaultGroup;
+  set = "to set for devices of the `msr` kernel subsystem.";
+
+  # Generates `foo=bar` parameters to pass to the kernel.
+  # If `module = baz` is passed, generates `baz.foo=bar`.
+  # Adds double quotes on demand to handle `foo="bar baz"`.
+  kernelParam = { module ? null }: name: value:
+    assert lib.asserts.assertMsg (!lib.strings.hasInfix "=" name) "kernel parameter cannot have '=' in name";
+    let
+      key = (if module == null then "" else module + ".") + name;
+      valueString = lib.generators.mkValueStringDefault {} value;
+      quotedValueString = if lib.strings.hasInfix " " valueString
+        then lib.strings.escape ["\""] valueString
+        else valueString;
+    in "${key}=${quotedValueString}";
+  msrKernelParam = kernelParam { module = "msr"; };
+in
+{
+  options.hardware.cpu.x86.msr = with lib.options; with lib.types; {
+    enable = mkEnableOption (mdDoc "the `msr` (Model-Specific Registers) kernel module and configure `udev` rules for its devices (usually `/dev/cpu/*/msr`)");
+    owner = mkOption {
+      type = str;
+      default = "root";
+      example = "nobody";
+      description = mdDoc "Owner ${set}";
+    };
+    group = mkOption {
+      type = str;
+      default = defaultGroup;
+      example = "nobody";
+      description = mdDoc "Group ${set}";
+    };
+    mode = mkOption {
+      type = str;
+      default = "0640";
+      example = "0660";
+      description = mdDoc "Mode ${set}";
+    };
+    settings = mkOption {
+      type = submodule {
+        freeformType = attrsOf (oneOf [ bool int str ]);
+        options.allow-writes = mkOption {
+          type = nullOr (enum ["on" "off"]);
+          default = null;
+          description = "Whether to allow writes to MSRs (`\"on\"`) or not (`\"off\"`).";
+        };
+      };
+      default = {};
+      description = "Parameters for the `msr` kernel module.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = hasAttr cfg.owner config.users.users;
+        message = "Owner '${cfg.owner}' set in `${opt.owner}` is not configured via `${options.users.users}.\"${cfg.owner}\"`.";
+      }
+      {
+        assertion = isDefaultGroup || (hasAttr cfg.group config.users.groups);
+        message = "Group '${cfg.group}' set in `${opt.group}` is not configured via `${options.users.groups}.\"${cfg.group}\"`.";
+      }
+    ];
+
+    boot = {
+      kernelModules = [ "msr" ];
+      kernelParams = lib.attrsets.mapAttrsToList msrKernelParam (lib.attrsets.filterAttrs (_: value: value != null) cfg.settings);
+    };
+
+    users.groups.${cfg.group} = mkIf isDefaultGroup { };
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="msr", OWNER="${cfg.owner}", GROUP="${cfg.group}", MODE="${cfg.mode}"
+    '';
+  };
+
+  meta = with lib; {
+    maintainers = with maintainers; [ lorenzleutgeb ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/decklink.nix b/nixpkgs/nixos/modules/hardware/decklink.nix
new file mode 100644
index 000000000000..d179e1d7634f
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/decklink.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.decklink;
+  kernelPackages = config.boot.kernelPackages;
+in
+{
+  options.hardware.decklink.enable = lib.mkEnableOption "hardware support for the Blackmagic Design Decklink audio/video interfaces";
+
+  config = lib.mkIf cfg.enable {
+    boot.kernelModules = [ "blackmagic" "blackmagic-io" "snd_blackmagic-io" ];
+    boot.extraModulePackages = [ kernelPackages.decklink ];
+    systemd.packages = [ pkgs.blackmagic-desktop-video ];
+    systemd.services.DesktopVideoHelper.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/device-tree.nix b/nixpkgs/nixos/modules/hardware/device-tree.nix
new file mode 100644
index 000000000000..6ab13c0eb709
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/device-tree.nix
@@ -0,0 +1,226 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.deviceTree;
+
+  overlayType = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Name of this overlay
+        '';
+      };
+
+      filter = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "*rpi*.dtb";
+        description = lib.mdDoc ''
+          Only apply to .dtb files matching glob expression.
+        '';
+      };
+
+      dtsFile = mkOption {
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          Path to .dts overlay file, overlay is applied to
+          each .dtb file matching "compatible" of the overlay.
+        '';
+        default = null;
+        example = literalExpression "./dts/overlays.dts";
+      };
+
+      dtsText = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Literal DTS contents, overlay is applied to
+          each .dtb file matching "compatible" of the overlay.
+        '';
+        example = ''
+          /dts-v1/;
+          /plugin/;
+          / {
+                  compatible = "raspberrypi";
+          };
+          &{/soc} {
+                  pps {
+                          compatible = "pps-gpio";
+                          status = "okay";
+                  };
+          };
+        '';
+      };
+
+      dtboFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to .dtbo compiled overlay file.
+        '';
+      };
+    };
+  };
+
+  filterDTBs = src: if cfg.filter == null
+    then src
+    else
+      pkgs.runCommand "dtbs-filtered" {} ''
+        mkdir -p $out
+        cd ${src}
+        find . -type f -name '${cfg.filter}' -print0 \
+          | xargs -0 cp -v --no-preserve=mode --target-directory $out --parents
+      '';
+
+  filteredDTBs = filterDTBs cfg.dtbSource;
+
+  # Fill in `dtboFile` for each overlay if not set already.
+  # Existence of one of these is guarded by assertion below
+  withDTBOs = xs: flip map xs (o: o // { dtboFile =
+    let
+      includePaths = ["${getDev cfg.kernelPackage}/lib/modules/${cfg.kernelPackage.modDirVersion}/source/scripts/dtc/include-prefixes"] ++ cfg.dtboBuildExtraIncludePaths;
+      extraPreprocessorFlags = cfg.dtboBuildExtraPreprocessorFlags;
+    in
+    if o.dtboFile == null then
+      let
+        dtsFile = if o.dtsFile == null then (pkgs.writeText "dts" o.dtsText) else o.dtsFile;
+      in
+      pkgs.deviceTree.compileDTS {
+        name = "${o.name}-dtbo";
+        inherit includePaths extraPreprocessorFlags dtsFile;
+      }
+    else o.dtboFile; } );
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "hardware" "deviceTree" "base" ] "Use hardware.deviceTree.kernelPackage instead")
+  ];
+
+  options = {
+      hardware.deviceTree = {
+        enable = mkOption {
+          default = pkgs.stdenv.hostPlatform.linux-kernel.DTB or false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Build device tree files. These are used to describe the
+            non-discoverable hardware of a system.
+          '';
+        };
+
+        kernelPackage = mkOption {
+          default = config.boot.kernelPackages.kernel;
+          defaultText = literalExpression "config.boot.kernelPackages.kernel";
+          example = literalExpression "pkgs.linux_latest";
+          type = types.path;
+          description = lib.mdDoc ''
+            Kernel package where device tree include directory is from. Also used as default source of dtb package to apply overlays to
+          '';
+        };
+
+        dtboBuildExtraPreprocessorFlags = mkOption {
+          default = [];
+          example = literalExpression "[ \"-DMY_DTB_DEFINE\" ]";
+          type = types.listOf types.str;
+          description = lib.mdDoc ''
+            Additional flags to pass to the preprocessor during dtbo compilations
+          '';
+        };
+
+        dtboBuildExtraIncludePaths = mkOption {
+          default = [];
+          example = literalExpression ''
+            [
+              ./my_custom_include_dir_1
+              ./custom_include_dir_2
+            ]
+          '';
+          type = types.listOf types.path;
+          description = lib.mdDoc ''
+            Additional include paths that will be passed to the preprocessor when creating the final .dts to compile into .dtbo
+          '';
+        };
+
+        dtbSource = mkOption {
+          default = "${cfg.kernelPackage}/dtbs";
+          defaultText = literalExpression "\${cfg.kernelPackage}/dtbs";
+          type = types.path;
+          description = lib.mdDoc ''
+            Path to dtb directory that overlays and other processing will be applied to. Uses
+            device trees bundled with the Linux kernel by default.
+          '';
+        };
+
+        name = mkOption {
+          default = null;
+          example = "some-dtb.dtb";
+          type = types.nullOr types.str;
+          description = lib.mdDoc ''
+            The name of an explicit dtb to be loaded, relative to the dtb base.
+            Useful in extlinux scenarios if the bootloader doesn't pick the
+            right .dtb file from FDTDIR.
+          '';
+        };
+
+        filter = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "*rpi*.dtb";
+          description = lib.mdDoc ''
+            Only include .dtb files matching glob expression.
+          '';
+        };
+
+        overlays = mkOption {
+          default = [];
+          example = literalExpression ''
+            [
+              { name = "pps"; dtsFile = ./dts/pps.dts; }
+              { name = "spi";
+                dtsText = "...";
+              }
+              { name = "precompiled"; dtboFile = ./dtbos/example.dtbo; }
+            ]
+          '';
+          type = types.listOf (types.coercedTo types.path (path: {
+            name = baseNameOf path;
+            filter = null;
+            dtboFile = path;
+          }) overlayType);
+          description = lib.mdDoc ''
+            List of overlays to apply to base device-tree (.dtb) files.
+          '';
+        };
+
+        package = mkOption {
+          default = null;
+          type = types.nullOr types.path;
+          internal = true;
+          description = lib.mdDoc ''
+            A path containing the result of applying `overlays` to `kernelPackage`.
+          '';
+        };
+      };
+  };
+
+  config = mkIf (cfg.enable) {
+
+    assertions = let
+      invalidOverlay = o: (o.dtsFile == null) && (o.dtsText == null) && (o.dtboFile == null);
+    in lib.singleton {
+      assertion = lib.all (o: !invalidOverlay o) cfg.overlays;
+      message = ''
+        deviceTree overlay needs one of dtsFile, dtsText or dtboFile set.
+        Offending overlay(s):
+        ${toString (map (o: o.name) (builtins.filter invalidOverlay cfg.overlays))}
+      '';
+    };
+
+    hardware.deviceTree.package = if (cfg.overlays != [])
+      then pkgs.deviceTree.applyOverlays filteredDTBs (withDTBOs cfg.overlays)
+      else filteredDTBs;
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/digitalbitbox.nix b/nixpkgs/nixos/modules/hardware/digitalbitbox.nix
new file mode 100644
index 000000000000..74e46bd34ace
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/digitalbitbox.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.digitalbitbox;
+in
+
+{
+  options.hardware.digitalbitbox = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables udev rules for Digital Bitbox devices.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.digitalbitbox;
+      defaultText = literalExpression "pkgs.digitalbitbox";
+      description = lib.mdDoc "The Digital Bitbox package to use. This can be used to install a package with udev rules that differ from the defaults.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/flipperzero.nix b/nixpkgs/nixos/modules/hardware/flipperzero.nix
new file mode 100644
index 000000000000..82f9b76fa3a7
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/flipperzero.nix
@@ -0,0 +1,18 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.flipperzero;
+
+in
+
+{
+  options.hardware.flipperzero.enable = mkEnableOption (mdDoc "udev rules and software for Flipper Zero devices");
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.qFlipper ];
+    services.udev.packages = [ pkgs.qFlipper ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/flirc.nix b/nixpkgs/nixos/modules/hardware/flirc.nix
new file mode 100644
index 000000000000..2fe40db947e4
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/flirc.nix
@@ -0,0 +1,12 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.hardware.flirc;
+in
+{
+  options.hardware.flirc.enable = lib.mkEnableOption (lib.mdDoc "software to configure a Flirc USB device");
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.flirc ];
+    services.udev.packages = [ pkgs.flirc ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/gkraken.nix b/nixpkgs/nixos/modules/hardware/gkraken.nix
new file mode 100644
index 000000000000..f427fec0a7cc
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/gkraken.nix
@@ -0,0 +1,18 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.gkraken;
+in
+{
+  options.hardware.gkraken = {
+    enable = mkEnableOption (lib.mdDoc "gkraken's udev rules for NZXT AIO liquid coolers");
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = with pkgs; [
+      gkraken
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/glasgow.nix b/nixpkgs/nixos/modules/hardware/glasgow.nix
new file mode 100644
index 000000000000..f8ebb772c47b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/glasgow.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.glasgow;
+
+in
+{
+  options.hardware.glasgow = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables Glasgow udev rules and ensures 'plugdev' group exists.
+        This is a prerequisite to using Glasgow without being root.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.udev.packages = [ pkgs.glasgow ];
+    users.groups.plugdev = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/gpgsmartcards.nix b/nixpkgs/nixos/modules/hardware/gpgsmartcards.nix
new file mode 100644
index 000000000000..68e1e5f74e2e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/gpgsmartcards.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  # gnupg's manual describes how to setup ccid udev rules:
+  #   https://www.gnupg.org/howtos/card-howto/en/ch02s03.html
+  # gnupg folks advised me (https://dev.gnupg.org/T5409) to look at debian's rules:
+  # https://salsa.debian.org/debian/gnupg2/-/blob/debian/main/debian/scdaemon.udev
+
+  # the latest rev of the entire debian gnupg2 repo as of 2021-04-28
+  # the scdaemon.udev file was last committed on 2021-01-05 (7817a03):
+  scdaemonUdevRev = "01898735a015541e3ffb43c7245ac1e612f40836";
+
+  scdaemonRules = pkgs.fetchurl {
+    url = "https://salsa.debian.org/debian/gnupg2/-/raw/${scdaemonUdevRev}/debian/scdaemon.udev";
+    sha256 = "08v0vp6950bz7galvc92zdss89y9vcwbinmbfcdldy8x72w6rqr3";
+  };
+
+  # per debian's udev deb hook (https://man7.org/linux/man-pages/man1/dh_installudev.1.html)
+  destination = "60-scdaemon.rules";
+
+  scdaemonUdevRulesPkg = pkgs.runCommand "scdaemon-udev-rules" {} ''
+    loc="$out/lib/udev/rules.d/"
+    mkdir -p "''${loc}"
+    cp "${scdaemonRules}" "''${loc}/${destination}"
+  '';
+
+  cfg = config.hardware.gpgSmartcards;
+in {
+  options.hardware.gpgSmartcards = {
+    enable = mkEnableOption (lib.mdDoc "udev rules for gnupg smart cards");
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ scdaemonUdevRulesPkg ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/hackrf.nix b/nixpkgs/nixos/modules/hardware/hackrf.nix
new file mode 100644
index 000000000000..38ef7fa6d3d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/hackrf.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.hackrf;
+
+in
+{
+  options.hardware.hackrf = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables hackrf udev rules and ensures 'plugdev' group exists.
+        This is a prerequisite to using HackRF devices without being root, since HackRF USB descriptors will be owned by plugdev through udev.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.udev.packages = [ pkgs.hackrf ];
+    users.groups.plugdev = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/i2c.nix b/nixpkgs/nixos/modules/hardware/i2c.nix
new file mode 100644
index 000000000000..bd4c4ebe21bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/i2c.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.i2c;
+in
+
+{
+  options.hardware.i2c = {
+    enable = mkEnableOption (lib.mdDoc ''
+      i2c devices support. By default access is granted to users in the "i2c"
+      group (will be created if non-existent) and any user with a seat, meaning
+      logged on the computer locally
+    '');
+
+    group = mkOption {
+      type = types.str;
+      default = "i2c";
+      description = lib.mdDoc ''
+        Grant access to i2c devices (/dev/i2c-*) to users in this group.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "i2c-dev" ];
+
+    users.groups = mkIf (cfg.group == "i2c") {
+      i2c = { };
+    };
+
+    services.udev.packages = lib.singleton (pkgs.writeTextFile
+      { name = "i2c-udev-rules";
+        text = ''
+          # allow group ${cfg.group} and users with a seat use of i2c devices
+          ACTION=="add", KERNEL=="i2c-[0-9]*", TAG+="uaccess", GROUP="${cfg.group}", MODE="660"
+        '';
+        destination = "/etc/udev/rules.d/70-i2c.rules";
+      });
+
+  };
+
+  meta.maintainers = [ maintainers.rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/infiniband.nix b/nixpkgs/nixos/modules/hardware/infiniband.nix
new file mode 100644
index 000000000000..962883fa7972
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/infiniband.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.infiniband;
+  opensm-services = {
+    "opensm@" = {
+      enable = true;
+      description = "Starts OpenSM Infiniband fabric Subnet Managers";
+      before = [ "network.target"];
+      unitConfig = {
+        ConditionPathExists = "/sys/class/infiniband_mad/abi_version";
+      };
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.opensm}/bin/opensm --guid %I --log_file /var/log/opensm.%I.log";
+      };
+    };
+  } // (builtins.listToAttrs (map (guid: {
+    name = "opensm@${guid}";
+    value = {
+      enable = true;
+      wantedBy = [ "machines.target" ];
+      overrideStrategy = "asDropin";
+    };
+  } ) cfg.guids));
+
+in
+
+{
+  options.hardware.infiniband = {
+    enable = mkEnableOption "Infiniband support";
+    guids = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "0xe8ebd30000eee2e1" ];
+      description = lib.mdDoc ''
+        A list of infiniband port guids on the system. This is discoverable using `ibstat -p`
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.initrd.kernelModules = [
+      "mlx5_core" "mlx5_ib" "ib_cm"
+      "rdma_cm" "rdma_ucm" "rpcrdma"
+      "ib_ipoib" "ib_isert" "ib_umad" "ib_uverbs"
+    ];
+    # rdma-core exposes ibstat, mstflint exposes mstconfig (which can be needed for
+    # setting link configurations), qperf needed to affirm link speeds
+    environment.systemPackages = with pkgs; [
+      rdma-core mstflint qperf
+    ];
+    systemd.services = opensm-services;
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/keyboard/qmk.nix b/nixpkgs/nixos/modules/hardware/keyboard/qmk.nix
new file mode 100644
index 000000000000..df3bcaeccd2e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/keyboard/qmk.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.keyboard.qmk;
+  inherit (lib) mdDoc mkEnableOption mkIf;
+
+in
+{
+  options.hardware.keyboard.qmk = {
+    enable = mkEnableOption (mdDoc "non-root access to the firmware of QMK keyboards");
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.qmk-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/keyboard/teck.nix b/nixpkgs/nixos/modules/hardware/keyboard/teck.nix
new file mode 100644
index 000000000000..8376c6b9c50b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/keyboard/teck.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.keyboard.teck;
+  inherit (lib) mdDoc mkEnableOption mkIf;
+
+in
+{
+  options.hardware.keyboard.teck = {
+    enable = mkEnableOption (mdDoc "non-root access to the firmware of TECK keyboards");
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.teck-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/keyboard/uhk.nix b/nixpkgs/nixos/modules/hardware/keyboard/uhk.nix
new file mode 100644
index 000000000000..ff984fa5daa6
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/keyboard/uhk.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.keyboard.uhk;
+  inherit (lib) mdDoc mkEnableOption mkIf;
+
+in
+{
+  options.hardware.keyboard.uhk = {
+    enable = mkEnableOption (mdDoc ''
+      non-root access to the firmware of UHK keyboards.
+      You need it when you want to flash a new firmware on the keyboard.
+      Access to the keyboard is granted to users in the "input" group.
+      You may want to install the uhk-agent package
+    '');
+
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.uhk-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/keyboard/zsa.nix b/nixpkgs/nixos/modules/hardware/keyboard/zsa.nix
new file mode 100644
index 000000000000..191fb12cca4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/keyboard/zsa.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.keyboard.zsa;
+  inherit (lib) mkEnableOption mkIf mdDoc;
+
+in
+{
+  options.hardware.keyboard.zsa = {
+    enable = mkEnableOption (mdDoc ''
+      udev rules for keyboards from ZSA like the ErgoDox EZ, Planck EZ and Moonlander Mark I.
+      You need it when you want to flash a new configuration on the keyboard
+      or use their live training in the browser.
+      You may want to install the wally-cli package
+    '');
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.zsa-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/ksm.nix b/nixpkgs/nixos/modules/hardware/ksm.nix
new file mode 100644
index 000000000000..82d94e6ab57c
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/ksm.nix
@@ -0,0 +1,38 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.ksm;
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "hardware" "enableKSM" ] [ "hardware" "ksm" "enable" ])
+  ];
+
+  options.hardware.ksm = {
+    enable = mkEnableOption (lib.mdDoc "Kernel Same-Page Merging");
+    sleep = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        How many milliseconds ksmd should sleep between scans.
+        Setting it to `null` uses the kernel's default time.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.enable-ksm = {
+      description = "Enable Kernel Same-Page Merging";
+      wantedBy = [ "multi-user.target" ];
+      script =
+        ''
+          echo 1 > /sys/kernel/mm/ksm/run
+        '' + optionalString (cfg.sleep != null)
+        ''
+          echo ${toString cfg.sleep} > /sys/kernel/mm/ksm/sleep_millisecs
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/ledger.nix b/nixpkgs/nixos/modules/hardware/ledger.nix
new file mode 100644
index 000000000000..fcce4f61a870
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/ledger.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.ledger;
+
+in {
+  options.hardware.ledger.enable = mkEnableOption (lib.mdDoc "udev rules for Ledger devices");
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.ledger-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/logitech.nix b/nixpkgs/nixos/modules/hardware/logitech.nix
new file mode 100644
index 000000000000..9b06eb8a8b01
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/logitech.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.logitech;
+
+  vendor = "046d";
+
+  daemon = "g15daemon";
+
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "hardware" "logitech" "enable" ] [ "hardware" "logitech" "wireless" "enable" ])
+    (mkRenamedOptionModule [ "hardware" "logitech" "enableGraphical" ] [ "hardware" "logitech" "wireless" "enableGraphical" ])
+  ];
+
+  options.hardware.logitech = {
+
+    lcd = {
+      enable = mkEnableOption (lib.mdDoc "Logitech LCD Devices");
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Only run the service when an actual supported device is plugged.
+        '';
+      };
+
+      devices = mkOption {
+        type = types.listOf types.str;
+        default = [ "0a07" "c222" "c225" "c227" "c251" ];
+        description = lib.mdDoc ''
+          List of USB device ids supported by g15daemon.
+
+          You most likely do not need to change this.
+        '';
+      };
+    };
+
+    wireless = {
+      enable = mkEnableOption (lib.mdDoc "Logitech Wireless Devices");
+
+      enableGraphical = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable graphical support applications.";
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.wireless.enable || cfg.lcd.enable) {
+    environment.systemPackages = []
+      ++ lib.optional cfg.wireless.enable pkgs.ltunify
+      ++ lib.optional cfg.wireless.enableGraphical pkgs.solaar;
+
+    services.udev = {
+      # ltunifi and solaar both provide udev rules but the most up-to-date have been split
+      # out into a dedicated derivation
+
+      packages = []
+      ++ lib.optional cfg.wireless.enable pkgs.logitech-udev-rules
+      ++ lib.optional cfg.lcd.enable pkgs.g15daemon;
+
+      extraRules = ''
+        # nixos: hardware.logitech.lcd
+      '' + lib.concatMapStringsSep "\n" (
+        dev:
+          ''ACTION=="add", SUBSYSTEMS=="usb", ATTRS{idVendor}=="${vendor}", ATTRS{idProduct}=="${dev}", TAG+="systemd", ENV{SYSTEMD_WANTS}+="${daemon}.service"''
+      ) cfg.lcd.devices;
+    };
+
+    systemd.services."${daemon}" = lib.mkIf cfg.lcd.enable {
+      description = "Logitech LCD Support Daemon";
+      documentation = [ "man:g15daemon(1)" ];
+      wantedBy = lib.mkIf (! cfg.lcd.startWhenNeeded) "multi-user.target";
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.g15daemon}/bin/g15daemon";
+        # we patch it to write to /run/g15daemon/g15daemon.pid instead of
+        # /run/g15daemon.pid so systemd will do the cleanup for us.
+        PIDFile = "/run/${daemon}/g15daemon.pid";
+        PrivateTmp = true;
+        PrivateNetwork = true;
+        ProtectHome = "tmpfs";
+        ProtectSystem = "full"; # strict doesn't work
+        RuntimeDirectory = daemon;
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/mcelog.nix b/nixpkgs/nixos/modules/hardware/mcelog.nix
new file mode 100644
index 000000000000..be8fc8cd1925
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/mcelog.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta.maintainers = with maintainers; [ grahamc ];
+  options = {
+
+    hardware.mcelog = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the Machine Check Exception logger.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf config.hardware.mcelog.enable {
+    systemd = {
+      packages = [ pkgs.mcelog ];
+
+      services.mcelog = {
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ProtectHome = true;
+          PrivateNetwork = true;
+          PrivateTmp = true;
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/ath-user-regd.nix b/nixpkgs/nixos/modules/hardware/network/ath-user-regd.nix
new file mode 100644
index 000000000000..a7f023d26ce7
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/ath-user-regd.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  kernelVersion = config.boot.kernelPackages.kernel.version;
+  linuxKernelMinVersion = "5.8";
+  kernelPatch = pkgs.kernelPatches.ath_regd_optional // {
+    extraConfig = ''
+      ATH_USER_REGD y
+    '';
+  };
+in
+{
+  options.networking.wireless.athUserRegulatoryDomain = mkOption {
+    default = false;
+    type = types.bool;
+    description = lib.mdDoc ''
+      If enabled, sets the ATH_USER_REGD kernel config switch to true to
+      disable the enforcement of EEPROM regulatory restrictions for ath
+      drivers. Requires at least Linux ${linuxKernelMinVersion}.
+    '';
+  };
+
+  config = mkIf config.networking.wireless.athUserRegulatoryDomain {
+    assertions = singleton {
+      assertion = lessThan 0 (builtins.compareVersions kernelVersion linuxKernelMinVersion);
+      message = "ATH_USER_REGD patch for kernels older than ${linuxKernelMinVersion} not ported yet!";
+    };
+    boot.kernelPatches = [ kernelPatch ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/b43.nix b/nixpkgs/nixos/modules/hardware/network/b43.nix
new file mode 100644
index 000000000000..7f045f7b70f9
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/b43.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let kernelVersion = config.boot.kernelPackages.kernel.version; in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.enableB43Firmware = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Turn on this option if you want firmware for the NICs supported by the b43 module.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.networking.enableB43Firmware {
+    hardware.firmware = [ pkgs.b43Firmware_5_1_138 ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/broadcom-43xx.nix b/nixpkgs/nixos/modules/hardware/network/broadcom-43xx.nix
new file mode 100644
index 000000000000..c92b7a0509d0
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/broadcom-43xx.nix
@@ -0,0 +1,3 @@
+{
+  hardware.enableRedistributableFirmware = true;
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/intel-2200bg.nix b/nixpkgs/nixos/modules/hardware/network/intel-2200bg.nix
new file mode 100644
index 000000000000..e1ec8134129e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/intel-2200bg.nix
@@ -0,0 +1,30 @@
+{ config, pkgs, lib, ... }:
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.enableIntel2200BGFirmware = lib.mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Turn on this option if you want firmware for the Intel
+        PRO/Wireless 2200BG to be loaded automatically.  This is
+        required if you want to use this device.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf config.networking.enableIntel2200BGFirmware {
+
+    hardware.firmware = [ pkgs.intel2200BGFirmware ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/smc-2632w/default.nix b/nixpkgs/nixos/modules/hardware/network/smc-2632w/default.nix
new file mode 100644
index 000000000000..b00286464f34
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/smc-2632w/default.nix
@@ -0,0 +1,9 @@
+{lib, ...}:
+
+{
+  hardware = {
+    pcmcia = {
+      firmware = [ (lib.cleanSource ./firmware) ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/network/smc-2632w/firmware/cis/SMC2632W-v1.02.cis b/nixpkgs/nixos/modules/hardware/network/smc-2632w/firmware/cis/SMC2632W-v1.02.cis
new file mode 100644
index 000000000000..5f13088c3735
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/smc-2632w/firmware/cis/SMC2632W-v1.02.cis
@@ -0,0 +1,8 @@
+  vers_1 5.0, "SMC", "SMC2632W", "Version 01.02", ""
+  manfid 0x0156, 0x0002
+  funcid network_adapter
+  cftable_entry 0x01 [default]
+    Vcc Vmin 3000mV Vmax 3300mV Iavg 300mA Ipeak 300mA
+    Idown 10mA
+    io 0x0000-0x003f [lines=6] [16bit]
+    irq mask 0xffff [level] [pulse]
diff --git a/nixpkgs/nixos/modules/hardware/network/zydas-zd1211.nix b/nixpkgs/nixos/modules/hardware/network/zydas-zd1211.nix
new file mode 100644
index 000000000000..5dd7f30ed82b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/network/zydas-zd1211.nix
@@ -0,0 +1,5 @@
+{pkgs, ...}:
+
+{
+  hardware.firmware = [ pkgs.zd1211fw ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/new-lg4ff.nix b/nixpkgs/nixos/modules/hardware/new-lg4ff.nix
new file mode 100644
index 000000000000..fac376eb7a75
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/new-lg4ff.nix
@@ -0,0 +1,29 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.new-lg4ff;
+  kernelPackages = config.boot.kernelPackages;
+in {
+  options.hardware.new-lg4ff = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables improved Linux module drivers for Logitech driving wheels.
+        This will replace the existing in-kernel hid-logitech modules.
+        Works most notably on the Logitech G25, G27, G29 and Driving Force (GT).
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot = {
+      extraModulePackages = [ kernelPackages.new-lg4ff ];
+      kernelModules = [ "hid-logitech-new" ];
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ matthiasbenaets ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/nitrokey.nix b/nixpkgs/nixos/modules/hardware/nitrokey.nix
new file mode 100644
index 000000000000..e2e88a8eade4
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/nitrokey.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.nitrokey;
+
+in
+
+{
+  options.hardware.nitrokey = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables udev rules for Nitrokey devices. By default grants access
+        to users in the "nitrokey" group. You may want to install the
+        nitrokey-app package, depending on your device and needs.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.libnitrokey ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/onlykey/default.nix b/nixpkgs/nixos/modules/hardware/onlykey/default.nix
new file mode 100644
index 000000000000..59e159dce482
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/onlykey/default.nix
@@ -0,0 +1,33 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+
+  ####### interface
+
+  options = {
+
+    hardware.onlykey = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable OnlyKey device (https://crp.to/p/) support.
+        '';
+      };
+    };
+
+  };
+
+  ## As per OnlyKey's documentation piece (hhttps://docs.google.com/document/d/1Go_Rs218fKUx-j_JKhddbSVTqY6P0vQO831t2MKCJC8),
+  ## it is important to add udev rule for OnlyKey for it to work on Linux
+
+  ####### implementation
+
+  config = mkIf config.hardware.onlykey.enable {
+    services.udev.extraRules = builtins.readFile ./onlykey.udev;
+  };
+
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/onlykey/onlykey.udev b/nixpkgs/nixos/modules/hardware/onlykey/onlykey.udev
new file mode 100644
index 000000000000..9c8873aafc9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/onlykey/onlykey.udev
@@ -0,0 +1,18 @@
+# UDEV Rules for OnlyKey, https://docs.crp.to/linux.html
+ATTRS{idVendor}=="1d50", ATTRS{idProduct}=="60fc", ENV{ID_MM_DEVICE_IGNORE}="1"
+ATTRS{idVendor}=="1d50", ATTRS{idProduct}=="60fc", ENV{MTP_NO_PROBE}="1"
+SUBSYSTEMS=="usb", ATTRS{idVendor}=="1d50", ATTRS{idProduct}=="60fc", MODE:="0666"
+KERNEL=="ttyACM*", ATTRS{idVendor}=="1d50", ATTRS{idProduct}=="60fc", MODE:="0666"
+
+
+# The udev rules were updated upstream without an explanation as you can
+# see in [this comment][commit]. Assuming that hey have changed the
+# idVendor/idProduct, I've kept the old values.
+# TODO: Contact them upstream.
+#
+# [commit]: https://github.com/trustcrypto/trustcrypto.github.io/commit/0bcf928adaea559e75efa02ebd1040f0a15f611d
+#
+ATTRS{idVendor}=="16c0", ATTRS{idProduct}=="04[789B]?", ENV{ID_MM_DEVICE_IGNORE}="1"
+ATTRS{idVendor}=="16c0", ATTRS{idProduct}=="04[789A]?", ENV{MTP_NO_PROBE}="1"
+SUBSYSTEMS=="usb", ATTRS{idVendor}=="16c0", ATTRS{idProduct}=="04[789ABCD]?", GROUP="plugdev"
+KERNEL=="ttyACM*", ATTRS{idVendor}=="16c0", ATTRS{idProduct}=="04[789B]?", GROUP="plugdev"
diff --git a/nixpkgs/nixos/modules/hardware/opengl.nix b/nixpkgs/nixos/modules/hardware/opengl.nix
new file mode 100644
index 000000000000..0ff018ddc47d
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/opengl.nix
@@ -0,0 +1,161 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.opengl;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  videoDrivers = config.services.xserver.videoDrivers;
+
+  package = pkgs.buildEnv {
+    name = "opengl-drivers";
+    paths = [ cfg.package ] ++ cfg.extraPackages;
+  };
+
+  package32 = pkgs.buildEnv {
+    name = "opengl-drivers-32bit";
+    paths = [ cfg.package32 ] ++ cfg.extraPackages32;
+  };
+
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "xserver" "vaapiDrivers" ] [ "hardware" "opengl" "extraPackages" ])
+    (mkRemovedOptionModule [ "hardware" "opengl" "s3tcSupport" ] "S3TC support is now always enabled in Mesa.")
+  ];
+
+  options = {
+
+    hardware.opengl = {
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Whether to enable OpenGL drivers. This is needed to enable
+          OpenGL support in X11 systems, as well as for Wayland compositors
+          like sway and Weston. It is enabled by default
+          by the corresponding modules, so you do not usually have to
+          set it yourself, only if there is no module for your wayland
+          compositor of choice. See services.xserver.enable and
+          programs.sway.enable.
+        '';
+        type = types.bool;
+        default = false;
+      };
+
+      driSupport = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable accelerated OpenGL rendering through the
+          Direct Rendering Interface (DRI).
+        '';
+      };
+
+      driSupport32Bit = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          On 64-bit systems, whether to support Direct Rendering for
+          32-bit applications (such as Wine).  This is currently only
+          supported for the `nvidia` as well as
+          `Mesa`.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        internal = true;
+        description = lib.mdDoc ''
+          The package that provides the OpenGL implementation.
+        '';
+      };
+
+      package32 = mkOption {
+        type = types.package;
+        internal = true;
+        description = lib.mdDoc ''
+          The package that provides the 32-bit OpenGL implementation on
+          64-bit systems. Used when {option}`driSupport32Bit` is
+          set.
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "with pkgs; [ intel-media-driver intel-ocl intel-vaapi-driver ]";
+        description = lib.mdDoc ''
+          Additional packages to add to OpenGL drivers.
+          This can be used to add OpenCL drivers, VA-API/VDPAU drivers etc.
+
+          ::: {.note}
+          intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
+          :::
+        '';
+      };
+
+      extraPackages32 = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "with pkgs.pkgsi686Linux; [ intel-media-driver intel-vaapi-driver ]";
+        description = lib.mdDoc ''
+          Additional packages to add to 32-bit OpenGL drivers on 64-bit systems.
+          Used when {option}`driSupport32Bit` is set. This can be used to add OpenCL drivers, VA-API/VDPAU drivers etc.
+
+          ::: {.note}
+          intel-media-driver supports hardware Broadwell (2014) or newer. Older hardware should use the mostly unmaintained intel-vaapi-driver driver.
+          :::
+        '';
+      };
+
+      setLdLibraryPath = mkOption {
+        type = types.bool;
+        internal = true;
+        default = false;
+        description = lib.mdDoc ''
+          Whether the `LD_LIBRARY_PATH` environment variable
+          should be set to the locations of driver libraries. Drivers which
+          rely on overriding libraries should set this to true. Drivers which
+          support `libglvnd` and other dispatch libraries
+          instead of overriding libraries should not set this.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.driSupport32Bit -> pkgs.stdenv.isx86_64;
+        message = "Option driSupport32Bit only makes sense on a 64-bit system.";
+      }
+      { assertion = cfg.driSupport32Bit -> (config.boot.kernelPackages.kernel.features.ia32Emulation or false);
+        message = "Option driSupport32Bit requires a kernel that supports 32bit emulation";
+      }
+    ];
+
+    systemd.tmpfiles.rules = [
+      "L+ /run/opengl-driver - - - - ${package}"
+      (
+        if pkgs.stdenv.isi686 then
+          "L+ /run/opengl-driver-32 - - - - opengl-driver"
+        else if cfg.driSupport32Bit then
+          "L+ /run/opengl-driver-32 - - - - ${package32}"
+        else
+          "r /run/opengl-driver-32"
+      )
+    ];
+
+    environment.sessionVariables.LD_LIBRARY_PATH = mkIf cfg.setLdLibraryPath
+      ([ "/run/opengl-driver/lib" ] ++ optional cfg.driSupport32Bit "/run/opengl-driver-32/lib");
+
+    hardware.opengl.package = mkDefault pkgs.mesa.drivers;
+    hardware.opengl.package32 = mkDefault pkgs.pkgsi686Linux.mesa.drivers;
+
+    boot.extraModulePackages = optional (elem "virtualbox" videoDrivers) kernelPackages.virtualboxGuestAdditions;
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/openrazer.nix b/nixpkgs/nixos/modules/hardware/openrazer.nix
new file mode 100644
index 000000000000..abbafaee8950
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/openrazer.nix
@@ -0,0 +1,146 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.openrazer;
+  kernelPackages = config.boot.kernelPackages;
+
+  toPyBoolStr = b: if b then "True" else "False";
+
+  daemonExe = "${pkgs.openrazer-daemon}/bin/openrazer-daemon --config ${daemonConfFile}";
+
+  daemonConfFile = pkgs.writeTextFile {
+    name = "razer.conf";
+    text = ''
+      [General]
+      verbose_logging = ${toPyBoolStr cfg.verboseLogging}
+
+      [Startup]
+      sync_effects_enabled = ${toPyBoolStr cfg.syncEffectsEnabled}
+      devices_off_on_screensaver = ${toPyBoolStr cfg.devicesOffOnScreensaver}
+      mouse_battery_notifier = ${toPyBoolStr cfg.mouseBatteryNotifier}
+
+      [Statistics]
+      key_statistics = ${toPyBoolStr cfg.keyStatistics}
+    '';
+  };
+
+  dbusServiceFile = pkgs.writeTextFile rec {
+    name = "org.razer.service";
+    destination = "/share/dbus-1/services/${name}";
+    text = ''
+      [D-BUS Service]
+      Name=org.razer
+      Exec=${daemonExe}
+      SystemdService=openrazer-daemon.service
+    '';
+  };
+
+  drivers = [
+    "razerkbd"
+    "razermouse"
+    "razerfirefly"
+    "razerkraken"
+    "razermug"
+    "razercore"
+  ];
+in
+{
+  options = {
+    hardware.openrazer = {
+      enable = mkEnableOption (lib.mdDoc ''
+        OpenRazer drivers and userspace daemon
+      '');
+
+      verboseLogging = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable verbose logging. Logs debug messages.
+        '';
+      };
+
+      syncEffectsEnabled = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Set the sync effects flag to true so any assignment of
+          effects will work across devices.
+        '';
+      };
+
+      devicesOffOnScreensaver = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Turn off the devices when the systems screensaver kicks in.
+        '';
+      };
+
+      mouseBatteryNotifier = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Mouse battery notifier.
+        '';
+      };
+
+      keyStatistics = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Collects number of keypresses per hour per key used to
+          generate a heatmap.
+        '';
+      };
+
+      users = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Usernames to be added to the "openrazer" group, so that they
+          can start and interact with the OpenRazer userspace daemon.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.extraModulePackages = [ kernelPackages.openrazer ];
+    boot.kernelModules = drivers;
+
+    # Makes the man pages available so you can successfully run
+    # > systemctl --user help openrazer-daemon
+    environment.systemPackages = [ pkgs.python3Packages.openrazer-daemon.man ];
+
+    services.udev.packages = [ kernelPackages.openrazer ];
+    services.dbus.packages = [ dbusServiceFile ];
+
+    # A user must be a member of the openrazer group in order to start
+    # the openrazer-daemon. Therefore we make sure that the group
+    # exists.
+    users.groups.openrazer = {
+      members = cfg.users;
+    };
+
+    systemd.user.services.openrazer-daemon = {
+      description = "Daemon to manage razer devices in userspace";
+      unitConfig.Documentation = "man:openrazer-daemon(8)";
+        # Requires a graphical session so the daemon knows when the screensaver
+        # starts. See the 'devicesOffOnScreensaver' option.
+        wantedBy = [ "graphical-session.target" ];
+        partOf = [ "graphical-session.target" ];
+        serviceConfig = {
+          Type = "dbus";
+          BusName = "org.razer";
+          ExecStart = "${daemonExe} --foreground";
+          Restart = "always";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ roelvandijk ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/opentabletdriver.nix b/nixpkgs/nixos/modules/hardware/opentabletdriver.nix
new file mode 100644
index 000000000000..e3f418abce4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/opentabletdriver.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.opentabletdriver;
+in
+{
+  meta.maintainers = with lib.maintainers; [ thiagokokada ];
+
+  options = {
+    hardware.opentabletdriver = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable OpenTabletDriver udev rules, user service and blacklist kernel
+          modules known to conflict with OpenTabletDriver.
+        '';
+      };
+
+      blacklistedKernelModules = mkOption {
+        type = types.listOf types.str;
+        default = [ "hid-uclogic" "wacom" ];
+        description = lib.mdDoc ''
+          Blacklist of kernel modules known to conflict with OpenTabletDriver.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.opentabletdriver;
+        defaultText = literalExpression "pkgs.opentabletdriver";
+        description = lib.mdDoc ''
+          OpenTabletDriver derivation to use.
+        '';
+      };
+
+      daemon = {
+        enable = mkOption {
+          default = true;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Whether to start OpenTabletDriver daemon as a systemd user service.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    services.udev.packages = [ cfg.package ];
+
+    boot.blacklistedKernelModules = cfg.blacklistedKernelModules;
+
+    systemd.user.services.opentabletdriver = with pkgs; mkIf cfg.daemon.enable {
+      description = "Open source, cross-platform, user-mode tablet driver";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/otd-daemon";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/pcmcia.nix b/nixpkgs/nixos/modules/hardware/pcmcia.nix
new file mode 100644
index 000000000000..f7a5565d773e
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/pcmcia.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  pcmciaUtils = pkgs.pcmciaUtils.passthru.function {
+    inherit (config.hardware.pcmcia) firmware config;
+  };
+
+in
+
+
+{
+  ###### interface
+
+  options = {
+
+    hardware.pcmcia = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable this option to support PCMCIA card.
+        '';
+      };
+
+      firmware = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          List of firmware used to handle specific PCMCIA card.
+        '';
+      };
+
+      config = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          Path to the configuration file which maps the memory, IRQs
+          and ports used by the PCMCIA hardware.
+        '';
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.hardware.pcmcia.enable {
+
+    boot.kernelModules = [ "pcmcia" ];
+
+    services.udev.packages = [ pcmciaUtils ];
+
+    environment.systemPackages = [ pcmciaUtils ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/printers.nix b/nixpkgs/nixos/modules/hardware/printers.nix
new file mode 100644
index 000000000000..846ff6f3fb4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/printers.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.hardware.printers;
+  ppdOptionsString = options: optionalString (options != {})
+    (concatStringsSep " "
+      (mapAttrsToList (name: value: "-o '${name}'='${value}'") options)
+    );
+  ensurePrinter = p: ''
+    ${pkgs.cups}/bin/lpadmin -p '${p.name}' -E \
+      ${optionalString (p.location != null) "-L '${p.location}'"} \
+      ${optionalString (p.description != null) "-D '${p.description}'"} \
+      -v '${p.deviceUri}' \
+      -m '${p.model}' \
+      ${ppdOptionsString p.ppdOptions}
+  '';
+  ensureDefaultPrinter = name: ''
+    ${pkgs.cups}/bin/lpadmin -d '${name}'
+  '';
+
+  # "graph but not # or /" can't be implemented as regex alone due to missing lookahead support
+  noInvalidChars = str: all (c: c != "#" && c != "/") (stringToCharacters str);
+  printerName = (types.addCheck (types.strMatching "[[:graph:]]+") noInvalidChars)
+    // { description = "printable string without spaces, # and /"; };
+
+
+in {
+  options = {
+    hardware.printers = {
+      ensureDefaultPrinter = mkOption {
+        type = types.nullOr printerName;
+        default = null;
+        description = lib.mdDoc ''
+          Ensures the named printer is the default CUPS printer / printer queue.
+        '';
+      };
+      ensurePrinters = mkOption {
+        description = lib.mdDoc ''
+          Will regularly ensure that the given CUPS printers are configured as declared here.
+          If a printer's options are manually changed afterwards, they will be overwritten eventually.
+          This option will never delete any printer, even if removed from this list.
+          You can check existing printers with {command}`lpstat -s`
+          and remove printers with {command}`lpadmin -x <printer-name>`.
+          Printers not listed here can still be manually configured.
+        '';
+        default = [];
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = printerName;
+              example = "BrotherHL_Workroom";
+              description = lib.mdDoc ''
+                Name of the printer / printer queue.
+                May contain any printable characters except "/", "#", and space.
+              '';
+            };
+            location = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "Workroom";
+              description = lib.mdDoc ''
+                Optional human-readable location.
+              '';
+            };
+            description = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "Brother HL-5140";
+              description = lib.mdDoc ''
+                Optional human-readable description.
+              '';
+            };
+            deviceUri = mkOption {
+              type = types.str;
+              example = literalExpression ''
+                "ipp://printserver.local/printers/BrotherHL_Workroom"
+                "usb://HP/DESKJET%20940C?serial=CN16E6C364BH"
+              '';
+              description = lib.mdDoc ''
+                How to reach the printer.
+                {command}`lpinfo -v` shows a list of supported device URIs and schemes.
+              '';
+            };
+            model = mkOption {
+              type = types.str;
+              example = literalExpression ''
+                "gutenprint.''${lib.versions.majorMinor (lib.getVersion pkgs.gutenprint)}://brother-hl-5140/expert"
+              '';
+              description = lib.mdDoc ''
+                Location of the ppd driver file for the printer.
+                {command}`lpinfo -m` shows a list of supported models.
+              '';
+            };
+            ppdOptions = mkOption {
+              type = types.attrsOf types.str;
+              example = {
+                PageSize = "A4";
+                Duplex = "DuplexNoTumble";
+              };
+              default = {};
+              description = lib.mdDoc ''
+                Sets PPD options for the printer.
+                {command}`lpoptions [-p printername] -l` shows supported PPD options for the given printer.
+              '';
+            };
+          };
+        });
+      };
+    };
+  };
+
+  config = mkIf (cfg.ensurePrinters != [] && config.services.printing.enable) {
+    systemd.services.ensure-printers = {
+      description = "Ensure NixOS-configured CUPS printers";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "cups.service" ];
+      after = [ "cups.service" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+
+      script = concatStringsSep "\n" [
+        (concatMapStrings ensurePrinter cfg.ensurePrinters)
+        (optionalString (cfg.ensureDefaultPrinter != null)
+          (ensureDefaultPrinter cfg.ensureDefaultPrinter))
+        # Note: if cupsd is "stateless" the service can't be stopped,
+        # otherwise the configuration will be wiped on the next start.
+        (optionalString (with config.services.printing; startWhenNeeded && !stateless)
+          "systemctl stop cups.service")
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/raid/hpsa.nix b/nixpkgs/nixos/modules/hardware/raid/hpsa.nix
new file mode 100644
index 000000000000..2934cd19a8c1
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/raid/hpsa.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  hpssacli = pkgs.stdenv.mkDerivation rec {
+    pname = "hpssacli";
+    version = "2.40-13.0";
+
+    src = pkgs.fetchurl {
+      urls = [
+        "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/${pname}-${version}_amd64.deb"
+        "http://apt.netangels.net/pool/main/h/hpssacli/${pname}-${version}_amd64.deb"
+      ];
+      sha256 = "11w7fwk93lmfw0yya4jpjwdmgjimqxx6412sqa166g1pz4jil4sw";
+    };
+
+    nativeBuildInputs = [ pkgs.dpkg ];
+
+    unpackPhase = "dpkg -x $src ./";
+
+    installPhase = ''
+      mkdir -p $out/bin $out/share/doc $out/share/man
+      mv opt/hp/hpssacli/bld/{hpssascripting,hprmstr,hpssacli} $out/bin/
+      mv opt/hp/hpssacli/bld/*.{license,txt}                   $out/share/doc/
+      mv usr/man                                               $out/share/
+
+      for file in $out/bin/*; do
+        chmod +w $file
+        patchelf --set-interpreter "$(cat $NIX_CC/nix-support/dynamic-linker)" \
+                 --set-rpath ${lib.makeLibraryPath [ pkgs.stdenv.cc.cc ]} \
+                 $file
+      done
+    '';
+
+    dontStrip = true;
+
+    meta = with lib; {
+      description = "HP Smart Array CLI";
+      homepage = "https://downloads.linux.hpe.com/SDR/downloads/MCP/Ubuntu/pool/non-free/";
+      license = licenses.unfreeRedistributable;
+      platforms = [ "x86_64-linux" ];
+      maintainers = with maintainers; [ ];
+    };
+  };
+in {
+  ###### interface
+
+  options = {
+    hardware.raid.HPSmartArray = {
+      enable = mkEnableOption (lib.mdDoc "HP Smart Array kernel modules and CLI utility");
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.hardware.raid.HPSmartArray.enable {
+
+    boot.initrd.kernelModules = [ "sg" ]; /* hpssacli wants it */
+    boot.initrd.availableKernelModules = [ "hpsa" ];
+
+    environment.systemPackages = [ hpssacli ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/rtl-sdr.nix b/nixpkgs/nixos/modules/hardware/rtl-sdr.nix
new file mode 100644
index 000000000000..7f462005f157
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/rtl-sdr.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.rtl-sdr;
+
+in {
+  options.hardware.rtl-sdr = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables rtl-sdr udev rules, ensures 'plugdev' group exists, and blacklists DVB kernel modules.
+        This is a prerequisite to using devices supported by rtl-sdr without being root, since rtl-sdr USB descriptors will be owned by plugdev through udev.
+       '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot.blacklistedKernelModules = [ "dvb_usb_rtl28xxu" "e4000" "rtl2832" ];
+    services.udev.packages = [ pkgs.rtl-sdr ];
+    users.groups.plugdev = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/saleae-logic.nix b/nixpkgs/nixos/modules/hardware/saleae-logic.nix
new file mode 100644
index 000000000000..f144814a06b7
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/saleae-logic.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.hardware.saleae-logic;
+in
+{
+  options.hardware.saleae-logic = {
+    enable = lib.mkEnableOption (lib.mdDoc "udev rules for Saleae Logic devices");
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.saleae-logic-2;
+      defaultText = lib.literalExpression "pkgs.saleae-logic-2";
+      description = lib.mdDoc ''
+        Saleae Logic package to use.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.udev.packages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ chivay ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/sata.nix b/nixpkgs/nixos/modules/hardware/sata.nix
new file mode 100644
index 000000000000..5330ba9268b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/sata.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mkEnableOption mkIf mkOption types;
+
+  cfg = config.hardware.sata.timeout;
+
+  buildRule = d:
+    lib.concatStringsSep ", " [
+      ''ACTION=="add"''
+      ''SUBSYSTEM=="block"''
+      ''ENV{ID_${lib.toUpper d.idBy}}=="${d.name}"''
+      ''TAG+="systemd"''
+      ''ENV{SYSTEMD_WANTS}="${unitName d}"''
+    ];
+
+  devicePath = device:
+    "/dev/disk/by-${device.idBy}/${device.name}";
+
+  unitName = device:
+    "sata-timeout-${lib.strings.sanitizeDerivationName device.name}";
+
+  startScript =
+    pkgs.writeShellScript "sata-timeout.sh" ''
+      set -eEuo pipefail
+
+      device="$1"
+
+      ${pkgs.smartmontools}/bin/smartctl \
+        -l scterc,${toString cfg.deciSeconds},${toString cfg.deciSeconds} \
+        --quietmode errorsonly \
+        "$device"
+    '';
+
+in
+{
+  meta.maintainers = with lib.maintainers; [ peterhoeg ];
+
+  options.hardware.sata.timeout = {
+    enable = mkEnableOption (lib.mdDoc "SATA drive timeouts");
+
+    deciSeconds = mkOption {
+      example = 70;
+      type = types.int;
+      description = lib.mdDoc ''
+        Set SCT Error Recovery Control timeout in deciseconds for use in RAID configurations.
+
+        Values are as follows:
+           0 = disable SCT ERT
+          70 = default in consumer drives (7 seconds)
+
+        Maximum is disk dependant but probably 60 seconds.
+      '';
+    };
+
+    drives = mkOption {
+      description = lib.mdDoc "List of drives for which to configure the timeout.";
+      type = types.listOf
+        (types.submodule {
+          options = {
+            name = mkOption {
+              description = lib.mdDoc "Drive name without the full path.";
+              type = types.str;
+            };
+
+            idBy = mkOption {
+              description = lib.mdDoc "The method to identify the drive.";
+              type = types.enum [ "path" "wwn" ];
+              default = "path";
+            };
+          };
+        });
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.extraRules = lib.concatMapStringsSep "\n" buildRule cfg.drives;
+
+    systemd.services = lib.listToAttrs (map
+      (e:
+        lib.nameValuePair (unitName e) {
+          description = "SATA timeout for ${e.name}";
+          wantedBy = [ "sata-timeout.target" ];
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${startScript} '${devicePath e}'";
+            PrivateTmp = true;
+            PrivateNetwork = true;
+            ProtectHome = "tmpfs";
+            ProtectSystem = "strict";
+          };
+        }
+      )
+      cfg.drives);
+
+    systemd.targets.sata-timeout = {
+      description = "SATA timeout";
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/sensor/hddtemp.nix b/nixpkgs/nixos/modules/hardware/sensor/hddtemp.nix
new file mode 100644
index 000000000000..1a3d211b858b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/sensor/hddtemp.nix
@@ -0,0 +1,81 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mkIf mkOption types;
+
+  cfg = config.hardware.sensor.hddtemp;
+
+  wrapper = pkgs.writeShellScript "hddtemp-wrapper" ''
+    set -eEuo pipefail
+
+    file=/var/lib/hddtemp/hddtemp.db
+
+    drives=(${toString (map (e: ''$(realpath ${lib.escapeShellArg e}) '') cfg.drives)})
+
+    cp ${pkgs.hddtemp}/share/hddtemp/hddtemp.db $file
+    ${lib.concatMapStringsSep "\n" (e: "echo ${lib.escapeShellArg e} >> $file") cfg.dbEntries}
+
+    exec ${pkgs.hddtemp}/bin/hddtemp ${lib.escapeShellArgs cfg.extraArgs} \
+      --daemon \
+      --unit=${cfg.unit} \
+      --file=$file \
+      ''${drives[@]}
+  '';
+
+in
+{
+  meta.maintainers = with lib.maintainers; [ peterhoeg ];
+
+  ###### interface
+
+  options = {
+    hardware.sensor.hddtemp = {
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Enable this option to support HDD/SSD temperature sensors.
+        '';
+        type = types.bool;
+        default = false;
+      };
+
+      drives = mkOption {
+        description = lib.mdDoc "List of drives to monitor. If you pass /dev/disk/by-path/* entries the symlinks will be resolved as hddtemp doesn't like names with colons.";
+        type = types.listOf types.str;
+      };
+
+      unit = mkOption {
+        description = lib.mdDoc "Celsius or Fahrenheit";
+        type = types.enum [ "C" "F" ];
+        default = "C";
+      };
+
+      dbEntries = mkOption {
+        description = lib.mdDoc "Additional DB entries";
+        type = types.listOf types.str;
+        default = [ ];
+      };
+
+      extraArgs = mkOption {
+        description = lib.mdDoc "Additional arguments passed to the daemon.";
+        type = types.listOf types.str;
+        default = [ ];
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.hddtemp = {
+      description = "HDD/SSD temperature";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = wrapper;
+        StateDirectory = "hddtemp";
+        PrivateTmp = true;
+        ProtectHome = "tmpfs";
+        ProtectSystem = "strict";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/sensor/iio.nix b/nixpkgs/nixos/modules/hardware/sensor/iio.nix
new file mode 100644
index 000000000000..6f7b1dc1f7f8
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/sensor/iio.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+    hardware.sensor.iio = {
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Enable this option to support IIO sensors with iio-sensor-proxy.
+
+          IIO sensors are used for orientation and ambient light
+          sensors on some mobile devices.
+        '';
+        type = types.bool;
+        default = false;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.hardware.sensor.iio.enable {
+
+    boot.initrd.availableKernelModules = [ "hid-sensor-hub" ];
+
+    environment.systemPackages = with pkgs; [ iio-sensor-proxy ];
+
+    services.dbus.packages = with pkgs; [ iio-sensor-proxy ];
+    services.udev.packages = with pkgs; [ iio-sensor-proxy ];
+    systemd.packages = with pkgs; [ iio-sensor-proxy ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/steam-hardware.nix b/nixpkgs/nixos/modules/hardware/steam-hardware.nix
new file mode 100644
index 000000000000..07edf6870390
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/steam-hardware.nix
@@ -0,0 +1,32 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.steam-hardware;
+
+in
+
+{
+  options.hardware.steam-hardware = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable udev rules for Steam hardware such as the Steam Controller, other supported controllers and the HTC Vive";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [
+      pkgs.steamPackages.steam
+    ];
+
+    # The uinput module needs to be loaded in order to trigger the udev rules
+    # defined in the steam package for setting permissions on /dev/uinput.
+    #
+    # If the udev rules are not triggered, some controllers won't work with
+    # steam.
+    boot.kernelModules = [ "uinput" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/system-76.nix b/nixpkgs/nixos/modules/hardware/system-76.nix
new file mode 100644
index 000000000000..3fb2c10a6e3b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/system-76.nix
@@ -0,0 +1,89 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  inherit (lib) literalExpression mkOption mkEnableOption types mkIf mkMerge optional versionOlder;
+  cfg = config.hardware.system76;
+  opt = options.hardware.system76;
+
+  kpkgs = config.boot.kernelPackages;
+  modules = [ "system76" "system76-io" ] ++ (optional (versionOlder kpkgs.kernel.version "5.5") "system76-acpi");
+  modulePackages = map (m: kpkgs.${m}) modules;
+  moduleConfig = mkIf cfg.kernel-modules.enable {
+    boot.extraModulePackages = modulePackages;
+
+    boot.kernelModules = modules;
+
+    services.udev.packages = modulePackages;
+  };
+
+  firmware-pkg = pkgs.system76-firmware;
+  firmwareConfig = mkIf cfg.firmware-daemon.enable {
+    # Make system76-firmware-cli usable by root from the command line.
+    environment.systemPackages = [ firmware-pkg ];
+
+    services.dbus.packages = [ firmware-pkg ];
+
+    systemd.services.system76-firmware-daemon = {
+      description = "The System76 Firmware Daemon";
+
+      serviceConfig = {
+        ExecStart = "${firmware-pkg}/bin/system76-firmware-daemon";
+
+        Restart = "on-failure";
+      };
+
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  power-pkg = config.boot.kernelPackages.system76-power;
+  powerConfig = mkIf cfg.power-daemon.enable {
+    # Make system76-power usable by root from the command line.
+    environment.systemPackages = [ power-pkg ];
+
+    services.dbus.packages = [ power-pkg ];
+
+    systemd.services.system76-power = {
+      description = "System76 Power Daemon";
+      serviceConfig = {
+        ExecStart = "${power-pkg}/bin/system76-power daemon";
+        Restart = "on-failure";
+        Type = "dbus";
+        BusName = "com.system76.PowerDaemon";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+in {
+  options = {
+    hardware.system76 = {
+      enableAll = mkEnableOption (lib.mdDoc "all recommended configuration for system76 systems");
+
+      firmware-daemon.enable = mkOption {
+        default = cfg.enableAll;
+        defaultText = literalExpression "config.${opt.enableAll}";
+        example = true;
+        description = lib.mdDoc "Whether to enable the system76 firmware daemon";
+        type = types.bool;
+      };
+
+      kernel-modules.enable = mkOption {
+        default = cfg.enableAll;
+        defaultText = literalExpression "config.${opt.enableAll}";
+        example = true;
+        description = lib.mdDoc "Whether to make the system76 out-of-tree kernel modules available";
+        type = types.bool;
+      };
+
+      power-daemon.enable = mkOption {
+        default = cfg.enableAll;
+        defaultText = literalExpression "config.${opt.enableAll}";
+        example = true;
+        description = lib.mdDoc "Whether to enable the system76 power daemon";
+        type = types.bool;
+      };
+    };
+  };
+
+  config = mkMerge [ moduleConfig firmwareConfig powerConfig ];
+}
diff --git a/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix b/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix
new file mode 100644
index 000000000000..fd8b48a5e9ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/tuxedo-keyboard.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.tuxedo-keyboard;
+  tuxedo-keyboard = config.boot.kernelPackages.tuxedo-keyboard;
+in
+  {
+    options.hardware.tuxedo-keyboard = {
+      enable = mkEnableOption (lib.mdDoc ''
+          the tuxedo-keyboard driver.
+
+          To configure the driver, pass the options to the {option}`boot.kernelParams` configuration.
+          There are several parameters you can change. It's best to check at the source code description which options are supported.
+          You can find all the supported parameters at: <https://github.com/tuxedocomputers/tuxedo-keyboard#kernelparam>
+
+          In order to use the `custom` lighting with the maximumg brightness and a color of `0xff0a0a` one would put pass {option}`boot.kernelParams` like this:
+
+          ```
+          boot.kernelParams = [
+           "tuxedo_keyboard.mode=0"
+           "tuxedo_keyboard.brightness=255"
+           "tuxedo_keyboard.color_left=0xff0a0a"
+          ];
+          ```
+      '');
+    };
+
+    config = mkIf cfg.enable
+    {
+      boot.kernelModules = ["tuxedo_keyboard"];
+      boot.extraModulePackages = [ tuxedo-keyboard ];
+    };
+  }
diff --git a/nixpkgs/nixos/modules/hardware/ubertooth.nix b/nixpkgs/nixos/modules/hardware/ubertooth.nix
new file mode 100644
index 000000000000..e2db2068d900
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/ubertooth.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.ubertooth;
+
+  ubertoothPkg = pkgs.ubertooth.override {
+    udevGroup = cfg.group;
+  };
+in {
+  options.hardware.ubertooth = {
+    enable = mkEnableOption (lib.mdDoc "Ubertooth software and its udev rules");
+
+    group = mkOption {
+      type = types.str;
+      default = "ubertooth";
+      example = "wheel";
+      description = lib.mdDoc "Group for Ubertooth's udev rules.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ ubertoothPkg ];
+
+    services.udev.packages = [ ubertoothPkg ];
+    users.groups.${cfg.group} = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/uinput.nix b/nixpkgs/nixos/modules/hardware/uinput.nix
new file mode 100644
index 000000000000..15fa66b8d83c
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/uinput.nix
@@ -0,0 +1,19 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.hardware.uinput;
+in {
+  options.hardware.uinput = {
+    enable = lib.mkEnableOption (lib.mdDoc "uinput support");
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot.kernelModules = [ "uinput" ];
+
+    users.groups.uinput = {};
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="misc", KERNEL=="uinput", MODE="0660", GROUP="uinput", OPTIONS+="static_node=uinput"
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/usb-modeswitch.nix b/nixpkgs/nixos/modules/hardware/usb-modeswitch.nix
new file mode 100644
index 000000000000..773891b0032f
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/usb-modeswitch.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+
+    hardware.usb-modeswitch = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable this option to support certain USB WLAN and WWAN adapters.
+
+          These network adapters initial present themselves as Flash Drives containing their drivers.
+          This option enables automatic switching to the networking mode.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  imports = [
+    (mkRenamedOptionModule ["hardware" "usbWwan" ] ["hardware" "usb-modeswitch" ])
+  ];
+
+  config = mkIf config.hardware.usb-modeswitch.enable {
+    # Attaches device specific handlers.
+    services.udev.packages = with pkgs; [ usb-modeswitch-data ];
+
+    # Triggered by udev, usb-modeswitch creates systemd services via a
+    # template unit in the usb-modeswitch package.
+    systemd.packages = with pkgs; [ usb-modeswitch ];
+
+    # The systemd service requires the usb-modeswitch-data. The
+    # usb-modeswitch package intends to discover this via the
+    # filesystem at /usr/share/usb_modeswitch, and merge it with user
+    # configuration in /etc/usb_modeswitch.d. Configuring the correct
+    # path in the package is difficult, as it would cause a cyclic
+    # dependency.
+    environment.etc."usb_modeswitch.d".source = "${pkgs.usb-modeswitch-data}/share/usb_modeswitch";
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/usb-storage.nix b/nixpkgs/nixos/modules/hardware/usb-storage.nix
new file mode 100644
index 000000000000..9c1b7a125fd1
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/usb-storage.nix
@@ -0,0 +1,20 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+{
+  options.hardware.usbStorage.manageStartStop = mkOption {
+    type = types.bool;
+    default = true;
+    description = lib.mdDoc ''
+      Enable this option to gracefully spin-down external storage during shutdown.
+      If you suspect improper head parking after poweroff, install `smartmontools` and check
+      for the `Power-Off_Retract_Count` field for an increment.
+    '';
+  };
+
+  config = mkIf config.hardware.usbStorage.manageStartStop {
+    services.udev.extraRules = ''
+      ACTION=="add|change", SUBSYSTEM=="scsi_disk", DRIVERS=="usb-storage", ATTR{manage_start_stop}="1"
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix b/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix
new file mode 100644
index 000000000000..605aa6ef8b88
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/amdgpu-pro.nix
@@ -0,0 +1,68 @@
+# This module provides the proprietary AMDGPU-PRO drivers.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  drivers = config.services.xserver.videoDrivers;
+
+  enabled = elem "amdgpu-pro" drivers;
+
+  package = config.boot.kernelPackages.amdgpu-pro;
+  package32 = pkgs.pkgsi686Linux.linuxPackages.amdgpu-pro.override { kernel = null; };
+
+  opengl = config.hardware.opengl;
+
+in
+
+{
+
+  config = mkIf enabled {
+    services.xserver.drivers = singleton
+      { name = "amdgpu"; modules = [ package ]; display = true; };
+
+    hardware.opengl.package = package;
+    hardware.opengl.package32 = package32;
+    hardware.opengl.setLdLibraryPath = true;
+
+    boot.extraModulePackages = [ package.kmod ];
+
+    boot.kernelPackages = pkgs.linuxKernel.packagesFor
+      (pkgs.linuxKernel.kernels.linux_5_10.override {
+        structuredExtraConfig = {
+          DEVICE_PRIVATE = kernel.yes;
+          KALLSYMS_ALL = kernel.yes;
+        };
+      });
+
+    hardware.firmware = [ package.fw ];
+
+    system.activationScripts.setup-amdgpu-pro = ''
+      ln -sfn ${package}/opt/amdgpu{,-pro} /run
+    '';
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "DEVICE_PRIVATE")
+      (isYes "KALLSYMS_ALL")
+    ];
+
+    boot.initrd.extraUdevRulesCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      cp -v ${package}/etc/udev/rules.d/*.rules $out/
+    '';
+    boot.initrd.services.udev.packages = [ package ];
+
+    environment.systemPackages =
+      [ package.vulkan ] ++
+      # this isn't really DRI, but we'll reuse this option for now
+      optional config.hardware.opengl.driSupport32Bit package32.vulkan;
+
+    environment.etc = {
+      "modprobe.d/blacklist-radeon.conf".source = package + "/etc/modprobe.d/blacklist-radeon.conf";
+      amd.source = package + "/etc/amd";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/bumblebee.nix b/nixpkgs/nixos/modules/hardware/video/bumblebee.nix
new file mode 100644
index 000000000000..75f71d499e66
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/bumblebee.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.bumblebee;
+
+  kernel = config.boot.kernelPackages;
+
+  useNvidia = cfg.driver == "nvidia";
+
+  bumblebee = pkgs.bumblebee.override {
+    inherit useNvidia;
+    useDisplayDevice = cfg.connectDisplay;
+  };
+
+  useBbswitch = cfg.pmMethod == "bbswitch" || cfg.pmMethod == "auto" && useNvidia;
+
+  primus = pkgs.primus.override {
+    inherit useNvidia;
+  };
+
+in
+
+{
+
+  options = {
+    hardware.bumblebee = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable the bumblebee daemon to manage Optimus hybrid video cards.
+          This should power off secondary GPU until its use is requested
+          by running an application with optirun.
+        '';
+      };
+
+      group = mkOption {
+        default = "wheel";
+        example = "video";
+        type = types.str;
+        description = lib.mdDoc "Group for bumblebee socket";
+      };
+
+      connectDisplay = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Set to true if you intend to connect your discrete card to a
+          monitor. This option will set up your Nvidia card for EDID
+          discovery and to turn on the monitor signal.
+
+          Only nvidia driver is supported so far.
+        '';
+      };
+
+      driver = mkOption {
+        default = "nvidia";
+        type = types.enum [ "nvidia" "nouveau" ];
+        description = lib.mdDoc ''
+          Set driver used by bumblebeed. Supported are nouveau and nvidia.
+        '';
+      };
+
+      pmMethod = mkOption {
+        default = "auto";
+        type = types.enum [ "auto" "bbswitch" "switcheroo" "none" ];
+        description = lib.mdDoc ''
+          Set preferred power management method for unused card.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.blacklistedKernelModules = [ "nvidia-drm" "nvidia" "nouveau" ];
+    boot.kernelModules = optional useBbswitch "bbswitch";
+    boot.extraModulePackages = optional useBbswitch kernel.bbswitch ++ optional useNvidia kernel.nvidia_x11.bin;
+
+    environment.systemPackages = [ bumblebee primus ];
+
+    systemd.services.bumblebeed = {
+      description = "Bumblebee Hybrid Graphics Switcher";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "display-manager.service" ];
+      serviceConfig = {
+        ExecStart = "${bumblebee}/bin/bumblebeed --use-syslog -g ${cfg.group} --driver ${cfg.driver} --pm-method ${cfg.pmMethod}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/capture/mwprocapture.nix b/nixpkgs/nixos/modules/hardware/video/capture/mwprocapture.nix
new file mode 100644
index 000000000000..ddd3f3ec7f32
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/capture/mwprocapture.nix
@@ -0,0 +1,56 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.mwProCapture;
+
+  kernelPackages = config.boot.kernelPackages;
+
+in
+
+{
+
+  options.hardware.mwProCapture.enable = mkEnableOption (lib.mdDoc "Magewell Pro Capture family kernel module");
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "ProCapture" ];
+
+    environment.systemPackages = [ kernelPackages.mwprocapture ];
+
+    boot.extraModulePackages = [ kernelPackages.mwprocapture ];
+
+    boot.extraModprobeConfig = ''
+      # Set the png picture to be displayed when no input signal is detected.
+      options ProCapture nosignal_file=${kernelPackages.mwprocapture}/res/NoSignal.png
+
+      # Set the png picture to be displayed when an unsupported input signal is detected.
+      options ProCapture unsupported_file=${kernelPackages.mwprocapture}/res/Unsupported.png
+
+      # Set the png picture to be displayed when an loking input signal is detected.
+      options ProCapture locking_file=${kernelPackages.mwprocapture}/res/Locking.png
+
+      # Message signaled interrupts switch
+      #options ProCapture disable_msi=0
+
+      # Set the debug level
+      #options ProCapture debug_level=0
+
+      # Force init switch eeprom
+      #options ProCapture init_switch_eeprom=0
+
+      # Min frame interval for VIDIOC_ENUM_FRAMEINTERVALS (default: 166666(100ns))
+      #options ProCapture enum_frameinterval_min=166666
+
+      # VIDIOC_ENUM_FRAMESIZES type (1: DISCRETE; 2: STEPWISE; otherwise: CONTINUOUS )
+      #options ProCapture enum_framesizes_type=0
+
+      # Parameters for internal usage
+      #options ProCapture internal_params=""
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/displaylink.nix b/nixpkgs/nixos/modules/hardware/video/displaylink.nix
new file mode 100644
index 000000000000..ce5fbeeae536
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/displaylink.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  enabled = elem "displaylink" config.services.xserver.videoDrivers;
+
+  evdi = config.boot.kernelPackages.evdi;
+
+  displaylink = pkgs.displaylink.override {
+    inherit evdi;
+  };
+
+in
+
+{
+
+  config = mkIf enabled {
+
+    boot.extraModulePackages = [ evdi ];
+    boot.kernelModules = [ "evdi" ];
+
+    environment.etc."X11/xorg.conf.d/40-displaylink.conf".text = ''
+      Section "OutputClass"
+        Identifier  "DisplayLink"
+        MatchDriver "evdi"
+        Driver      "modesetting"
+        Option      "TearFree" "true"
+        Option      "AccelMethod" "none"
+      EndSection
+    '';
+
+    # make the device available
+    services.xserver.displayManager.sessionCommands = ''
+      ${lib.getBin pkgs.xorg.xrandr}/bin/xrandr --setprovideroutputsource 1 0
+    '';
+
+    # Those are taken from displaylink-installer.sh and from Arch Linux AUR package.
+
+    services.udev.packages = [ displaylink ];
+
+    powerManagement.powerDownCommands = ''
+      #flush any bytes in pipe
+      while read -n 1 -t 1 SUSPEND_RESULT < /tmp/PmMessagesPort_out; do : ; done;
+
+      #suspend DisplayLinkManager
+      echo "S" > /tmp/PmMessagesPort_in
+
+      #wait until suspend of DisplayLinkManager finish
+      if [ -f /tmp/PmMessagesPort_out ]; then
+        #wait until suspend of DisplayLinkManager finish
+        read -n 1 -t 10 SUSPEND_RESULT < /tmp/PmMessagesPort_out
+      fi
+    '';
+
+    powerManagement.resumeCommands = ''
+      #resume DisplayLinkManager
+      echo "R" > /tmp/PmMessagesPort_in
+    '';
+
+    systemd.services.dlm = {
+      description = "DisplayLink Manager Service";
+      after = [ "display-manager.service" ];
+      conflicts = [ "getty@tty7.service" ];
+
+      serviceConfig = {
+        ExecStart = "${displaylink}/bin/DisplayLinkManager";
+        Restart = "always";
+        RestartSec = 5;
+        LogsDirectory = "displaylink";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/nvidia.nix b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
new file mode 100644
index 000000000000..c36775dd24bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/nvidia.nix
@@ -0,0 +1,598 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}: let
+  nvidiaEnabled = (lib.elem "nvidia" config.services.xserver.videoDrivers);
+  nvidia_x11 =
+    if nvidiaEnabled || cfg.datacenter.enable
+    then cfg.package
+    else null;
+
+  cfg = config.hardware.nvidia;
+
+  pCfg = cfg.prime;
+  syncCfg = pCfg.sync;
+  offloadCfg = pCfg.offload;
+  reverseSyncCfg = pCfg.reverseSync;
+  primeEnabled = syncCfg.enable || reverseSyncCfg.enable || offloadCfg.enable;
+  busIDType = lib.types.strMatching "([[:print:]]+[\:\@][0-9]{1,3}\:[0-9]{1,2}\:[0-9])?";
+  ibtSupport = cfg.open || (nvidia_x11.ibtSupport or false);
+  settingsFormat = pkgs.formats.keyValue {};
+in {
+  options = {
+    hardware.nvidia = {
+      datacenter.enable = lib.mkEnableOption (lib.mdDoc ''
+        Data Center drivers for NVIDIA cards on a NVLink topology
+      '');
+      datacenter.settings = lib.mkOption {
+        type = settingsFormat.type;
+        default = {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        };
+        defaultText = lib.literalExpression ''
+        {
+          LOG_LEVEL=4;
+          LOG_FILE_NAME="/var/log/fabricmanager.log";
+          LOG_APPEND_TO_LOG=1;
+          LOG_FILE_MAX_SIZE=1024;
+          LOG_USE_SYSLOG=0;
+          DAEMONIZE=1;
+          BIND_INTERFACE_IP="127.0.0.1";
+          STARTING_TCP_PORT=16000;
+          FABRIC_MODE=0;
+          FABRIC_MODE_RESTART=0;
+          STATE_FILE_NAME="/var/tmp/fabricmanager.state";
+          FM_CMD_BIND_INTERFACE="127.0.0.1";
+          FM_CMD_PORT_NUMBER=6666;
+          FM_STAY_RESIDENT_ON_FAILURES=0;
+          ACCESS_LINK_FAILURE_MODE=0;
+          TRUNK_LINK_FAILURE_MODE=0;
+          NVSWITCH_FAILURE_MODE=0;
+          ABORT_CUDA_JOBS_ON_FM_EXIT=1;
+          TOPOLOGY_FILE_PATH=nvidia_x11.fabricmanager + "/share/nvidia-fabricmanager/nvidia/nvswitch";
+        }
+        '';
+        description = lib.mdDoc ''
+          Additional configuration options for fabricmanager.
+        '';
+      };
+
+      powerManagement.enable = lib.mkEnableOption (lib.mdDoc ''
+        experimental power management through systemd. For more information, see
+        the NVIDIA docs, on Chapter 21. Configuring Power Management Support
+      '');
+
+      powerManagement.finegrained = lib.mkEnableOption (lib.mdDoc ''
+        experimental power management of PRIME offload. For more information, see
+        the NVIDIA docs, on Chapter 22. PCI-Express Runtime D3 (RTD3) Power Management
+      '');
+
+      dynamicBoost.enable = lib.mkEnableOption (lib.mdDoc ''
+        dynamic Boost balances power between the CPU and the GPU for improved
+        performance on supported laptops using the nvidia-powerd daemon. For more
+        information, see the NVIDIA docs, on Chapter 23. Dynamic Boost on Linux
+      '');
+
+      modesetting.enable = lib.mkEnableOption (lib.mdDoc ''
+        kernel modesetting when using the NVIDIA proprietary driver.
+
+        Enabling this fixes screen tearing when using Optimus via PRIME (see
+        {option}`hardware.nvidia.prime.sync.enable`. This is not enabled
+        by default because it is not officially supported by NVIDIA and would not
+        work with SLI
+      '');
+
+      prime.nvidiaBusId = lib.mkOption {
+        type = busIDType;
+        default = "";
+        example = "PCI:1:0:0";
+        description = lib.mdDoc ''
+          Bus ID of the NVIDIA GPU. You can find it using lspci; for example if lspci
+          shows the NVIDIA GPU at "01:00.0", set this option to "PCI:1:0:0".
+        '';
+      };
+
+      prime.intelBusId = lib.mkOption {
+        type = busIDType;
+        default = "";
+        example = "PCI:0:2:0";
+        description = lib.mdDoc ''
+          Bus ID of the Intel GPU. You can find it using lspci; for example if lspci
+          shows the Intel GPU at "00:02.0", set this option to "PCI:0:2:0".
+        '';
+      };
+
+      prime.amdgpuBusId = lib.mkOption {
+        type = busIDType;
+        default = "";
+        example = "PCI:4:0:0";
+        description = lib.mdDoc ''
+          Bus ID of the AMD APU. You can find it using lspci; for example if lspci
+          shows the AMD APU at "04:00.0", set this option to "PCI:4:0:0".
+        '';
+      };
+
+      prime.sync.enable = lib.mkEnableOption (lib.mdDoc ''
+        NVIDIA Optimus support using the NVIDIA proprietary driver via PRIME.
+        If enabled, the NVIDIA GPU will be always on and used for all rendering,
+        while enabling output to displays attached only to the integrated Intel/AMD
+        GPU without a multiplexer.
+
+        Note that this option only has any effect if the "nvidia" driver is specified
+        in {option}`services.xserver.videoDrivers`, and it should preferably
+        be the only driver there.
+
+        If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
+        be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
+        {option}`hardware.nvidia.prime.intelBusId` or
+        {option}`hardware.nvidia.prime.amdgpuBusId`).
+
+        If you enable this, you may want to also enable kernel modesetting for the
+        NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
+        to prevent tearing.
+
+        Note that this configuration will only be successful when a display manager
+        for which the {option}`services.xserver.displayManager.setupCommands`
+        option is supported is used
+      '');
+
+      prime.allowExternalGpu = lib.mkEnableOption (lib.mdDoc ''
+        configuring X to allow external NVIDIA GPUs when using Prime [Reverse] sync optimus
+      '');
+
+      prime.offload.enable = lib.mkEnableOption (lib.mdDoc ''
+        render offload support using the NVIDIA proprietary driver via PRIME.
+
+        If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
+        be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
+        {option}`hardware.nvidia.prime.intelBusId` or
+        {option}`hardware.nvidia.prime.amdgpuBusId`)
+      '');
+
+      prime.offload.enableOffloadCmd = lib.mkEnableOption (lib.mdDoc ''
+        adding a `nvidia-offload` convenience script to {option}`environment.systemPackages`
+        for offloading programs to an nvidia device. To work, should have also enabled
+        {option}`hardware.nvidia.prime.offload.enable` or {option}`hardware.nvidia.prime.reverseSync.enable`.
+
+        Example usage `nvidia-offload sauerbraten_client`
+      '');
+
+      prime.reverseSync.enable = lib.mkEnableOption (lib.mdDoc ''
+        NVIDIA Optimus support using the NVIDIA proprietary driver via reverse
+        PRIME. If enabled, the Intel/AMD GPU will be used for all rendering, while
+        enabling output to displays attached only to the NVIDIA GPU without a
+        multiplexer.
+
+        Warning: This feature is relatively new, depending on your system this might
+        work poorly. AMD support, especially so.
+        See: https://forums.developer.nvidia.com/t/the-all-new-outputsink-feature-aka-reverse-prime/129828
+
+        Note that this option only has any effect if the "nvidia" driver is specified
+        in {option}`services.xserver.videoDrivers`, and it should preferably
+        be the only driver there.
+
+        If this is enabled, then the bus IDs of the NVIDIA and Intel/AMD GPUs have to
+        be specified ({option}`hardware.nvidia.prime.nvidiaBusId` and
+        {option}`hardware.nvidia.prime.intelBusId` or
+        {option}`hardware.nvidia.prime.amdgpuBusId`).
+
+        If you enable this, you may want to also enable kernel modesetting for the
+        NVIDIA driver ({option}`hardware.nvidia.modesetting.enable`) in order
+        to prevent tearing.
+
+        Note that this configuration will only be successful when a display manager
+        for which the {option}`services.xserver.displayManager.setupCommands`
+        option is supported is used
+      '');
+
+      nvidiaSettings =
+        (lib.mkEnableOption (lib.mdDoc ''
+          nvidia-settings, NVIDIA's GUI configuration tool
+        ''))
+        // {default = true;};
+
+      nvidiaPersistenced = lib.mkEnableOption (lib.mdDoc ''
+        nvidia-persistenced a update for NVIDIA GPU headless mode, i.e.
+        It ensures all GPUs stay awake even during headless mode
+      '');
+
+      forceFullCompositionPipeline = lib.mkEnableOption (lib.mdDoc ''
+        forcefully the full composition pipeline.
+        This sometimes fixes screen tearing issues.
+        This has been reported to reduce the performance of some OpenGL applications and may produce issues in WebGL.
+        It also drastically increases the time the driver needs to clock down after load
+      '');
+
+      package = lib.mkOption {
+        default = config.boot.kernelPackages.nvidiaPackages."${if cfg.datacenter.enable then "dc" else "stable"}";
+        defaultText = lib.literalExpression ''
+          config.boot.kernelPackages.nvidiaPackages."\$\{if cfg.datacenter.enable then "dc" else "stable"}"
+        '';
+        example = lib.mdDoc "config.boot.kernelPackages.nvidiaPackages.legacy_470";
+        description = lib.mdDoc ''
+          The NVIDIA driver package to use.
+        '';
+      };
+
+      open = lib.mkEnableOption (lib.mdDoc ''
+        the open source NVIDIA kernel module
+      '');
+    };
+  };
+
+  config = let
+    igpuDriver =
+      if pCfg.intelBusId != ""
+      then "modesetting"
+      else "amdgpu";
+    igpuBusId =
+      if pCfg.intelBusId != ""
+      then pCfg.intelBusId
+      else pCfg.amdgpuBusId;
+  in
+    lib.mkIf (nvidia_x11 != null) (lib.mkMerge [
+      # Common
+      ({
+        assertions = [
+          {
+            assertion = !(nvidiaEnabled && cfg.datacenter.enable);
+            message = "You cannot configure both X11 and Data Center drivers at the same time.";
+          }
+        ];
+        boot = {
+          blacklistedKernelModules = ["nouveau" "nvidiafb"];
+          kernelModules = [ "nvidia-uvm" ];
+        };
+        systemd.tmpfiles.rules =
+          lib.optional config.virtualisation.docker.enableNvidia
+            "L+ /run/nvidia-docker/bin - - - - ${nvidia_x11.bin}/origBin";
+        services.udev.extraRules =
+        ''
+          # Create /dev/nvidia-uvm when the nvidia-uvm module is loaded.
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidiactl c 195 255'"
+          KERNEL=="nvidia", RUN+="${pkgs.runtimeShell} -c 'for i in $$(cat /proc/driver/nvidia/gpus/*/information | grep Minor | cut -d \  -f 4); do mknod -m 666 /dev/nvidia$${i} c 195 $${i}; done'"
+          KERNEL=="nvidia_modeset", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-modeset c 195 254'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 0'"
+          KERNEL=="nvidia_uvm", RUN+="${pkgs.runtimeShell} -c 'mknod -m 666 /dev/nvidia-uvm-tools c $$(grep nvidia-uvm /proc/devices | cut -d \  -f 1) 1'"
+        '';
+        hardware.opengl = {
+          extraPackages = [
+            nvidia_x11.out
+          ];
+          extraPackages32 = [
+            nvidia_x11.lib32
+          ];
+        };
+        environment.systemPackages = [
+          nvidia_x11.bin
+        ];
+      })
+      # X11
+      (lib.mkIf nvidiaEnabled {
+        assertions = [
+        {
+          assertion = primeEnabled -> pCfg.intelBusId == "" || pCfg.amdgpuBusId == "";
+          message = "You cannot configure both an Intel iGPU and an AMD APU. Pick the one corresponding to your processor.";
+        }
+
+        {
+          assertion = offloadCfg.enableOffloadCmd -> offloadCfg.enable || reverseSyncCfg.enable;
+          message = "Offload command requires offloading or reverse prime sync to be enabled.";
+        }
+
+        {
+          assertion = primeEnabled -> pCfg.nvidiaBusId != "" && (pCfg.intelBusId != "" || pCfg.amdgpuBusId != "");
+          message = "When NVIDIA PRIME is enabled, the GPU bus IDs must be configured.";
+        }
+
+        {
+          assertion = offloadCfg.enable -> lib.versionAtLeast nvidia_x11.version "435.21";
+          message = "NVIDIA PRIME render offload is currently only supported on versions >= 435.21.";
+        }
+
+        {
+          assertion = (reverseSyncCfg.enable && pCfg.amdgpuBusId != "") -> lib.versionAtLeast nvidia_x11.version "470.0";
+          message = "NVIDIA PRIME render offload for AMD APUs is currently only supported on versions >= 470 beta.";
+        }
+
+        {
+          assertion = !(syncCfg.enable && offloadCfg.enable);
+          message = "PRIME Sync and Offload cannot be both enabled";
+        }
+
+        {
+          assertion = !(syncCfg.enable && reverseSyncCfg.enable);
+          message = "PRIME Sync and PRIME Reverse Sync cannot be both enabled";
+        }
+
+        {
+          assertion = !(syncCfg.enable && cfg.powerManagement.finegrained);
+          message = "Sync precludes powering down the NVIDIA GPU.";
+        }
+
+        {
+          assertion = cfg.powerManagement.finegrained -> offloadCfg.enable;
+          message = "Fine-grained power management requires offload to be enabled.";
+        }
+
+        {
+          assertion = cfg.powerManagement.enable -> lib.versionAtLeast nvidia_x11.version "430.09";
+          message = "Required files for driver based power management only exist on versions >= 430.09.";
+        }
+
+        {
+          assertion = cfg.open -> (cfg.package ? open && cfg.package ? firmware);
+          message = "This version of NVIDIA driver does not provide a corresponding opensource kernel driver";
+        }
+
+        {
+          assertion = cfg.dynamicBoost.enable -> lib.versionAtLeast nvidia_x11.version "510.39.01";
+          message = "NVIDIA's Dynamic Boost feature only exists on versions >= 510.39.01";
+        }];
+
+        # If Optimus/PRIME is enabled, we:
+        # - Specify the configured NVIDIA GPU bus ID in the Device section for the
+        #   "nvidia" driver.
+        # - Add the AllowEmptyInitialConfiguration option to the Screen section for the
+        #   "nvidia" driver, in order to allow the X server to start without any outputs.
+        # - Add a separate Device section for the Intel GPU, using the "modesetting"
+        #   driver and with the configured BusID.
+        # - OR add a separate Device section for the AMD APU, using the "amdgpu"
+        #   driver and with the configures BusID.
+        # - Reference that Device section from the ServerLayout section as an inactive
+        #   device.
+        # - Configure the display manager to run specific `xrandr` commands which will
+        #   configure/enable displays connected to the Intel iGPU / AMD APU.
+
+        # reverse sync implies offloading
+        hardware.nvidia.prime.offload.enable = lib.mkDefault reverseSyncCfg.enable;
+
+        services.xserver.drivers =
+          lib.optional primeEnabled {
+            name = igpuDriver;
+            display = offloadCfg.enable;
+            modules = lib.optional (igpuDriver == "amdgpu") pkgs.xorg.xf86videoamdgpu;
+            deviceSection =
+              ''
+                BusID "${igpuBusId}"
+              ''
+              + lib.optionalString (syncCfg.enable && igpuDriver != "amdgpu") ''
+                Option "AccelMethod" "none"
+              '';
+          }
+          ++ lib.singleton {
+            name = "nvidia";
+            modules = [nvidia_x11.bin];
+            display = !offloadCfg.enable;
+            deviceSection =
+              lib.optionalString primeEnabled
+              ''
+                BusID "${pCfg.nvidiaBusId}"
+              ''
+              + lib.optionalString pCfg.allowExternalGpu ''
+                Option "AllowExternalGpus"
+              '';
+            screenSection =
+              ''
+                Option "RandRRotation" "on"
+              ''
+              + lib.optionalString syncCfg.enable ''
+                Option "AllowEmptyInitialConfiguration"
+              ''
+              + lib.optionalString cfg.forceFullCompositionPipeline ''
+                Option         "metamodes" "nvidia-auto-select +0+0 {ForceFullCompositionPipeline=On}"
+                Option         "AllowIndirectGLXProtocol" "off"
+                Option         "TripleBuffer" "on"
+              '';
+          };
+
+        services.xserver.serverLayoutSection =
+          lib.optionalString syncCfg.enable ''
+            Inactive "Device-${igpuDriver}[0]"
+          ''
+          + lib.optionalString reverseSyncCfg.enable ''
+            Inactive "Device-nvidia[0]"
+          ''
+          + lib.optionalString offloadCfg.enable ''
+            Option "AllowNVIDIAGPUScreens"
+          '';
+
+        services.xserver.displayManager.setupCommands = let
+          gpuProviderName =
+            if igpuDriver == "amdgpu"
+            then
+              # find the name of the provider if amdgpu
+              "`${lib.getExe pkgs.xorg.xrandr} --listproviders | ${lib.getExe pkgs.gnugrep} -i AMD | ${lib.getExe pkgs.gnused} -n 's/^.*name://p'`"
+            else igpuDriver;
+          providerCmdParams =
+            if syncCfg.enable
+            then "\"${gpuProviderName}\" NVIDIA-0"
+            else "NVIDIA-G0 \"${gpuProviderName}\"";
+        in
+          lib.optionalString (syncCfg.enable || reverseSyncCfg.enable) ''
+            # Added by nvidia configuration module for Optimus/PRIME.
+            ${lib.getExe pkgs.xorg.xrandr} --setprovideroutputsource ${providerCmdParams}
+            ${lib.getExe pkgs.xorg.xrandr} --auto
+          '';
+
+        environment.etc = {
+          "nvidia/nvidia-application-profiles-rc" = lib.mkIf nvidia_x11.useProfiles {source = "${nvidia_x11.bin}/share/nvidia/nvidia-application-profiles-rc";};
+
+          # 'nvidia_x11' installs it's files to /run/opengl-driver/...
+          "egl/egl_external_platform.d".source = "/run/opengl-driver/share/egl/egl_external_platform.d/";
+        };
+
+        hardware.opengl = {
+          extraPackages = [
+            pkgs.nvidia-vaapi-driver
+          ];
+          extraPackages32 = [
+            pkgs.pkgsi686Linux.nvidia-vaapi-driver
+          ];
+        };
+        environment.systemPackages =
+          lib.optional cfg.nvidiaSettings nvidia_x11.settings
+          ++ lib.optional cfg.nvidiaPersistenced nvidia_x11.persistenced
+          ++ lib.optional offloadCfg.enableOffloadCmd
+          (pkgs.writeShellScriptBin "nvidia-offload" ''
+            export __NV_PRIME_RENDER_OFFLOAD=1
+            export __NV_PRIME_RENDER_OFFLOAD_PROVIDER=NVIDIA-G0
+            export __GLX_VENDOR_LIBRARY_NAME=nvidia
+            export __VK_LAYER_NV_optimus=NVIDIA_only
+            exec "$@"
+          '');
+
+        systemd.packages = lib.optional cfg.powerManagement.enable nvidia_x11.out;
+
+        systemd.services = let
+          nvidiaService = state: {
+            description = "NVIDIA system ${state} actions";
+            path = [pkgs.kbd];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${nvidia_x11.out}/bin/nvidia-sleep.sh '${state}'";
+            };
+            before = ["systemd-${state}.service"];
+            requiredBy = ["systemd-${state}.service"];
+          };
+        in
+          lib.mkMerge [
+            (lib.mkIf cfg.powerManagement.enable {
+              nvidia-suspend = nvidiaService "suspend";
+              nvidia-hibernate = nvidiaService "hibernate";
+              nvidia-resume =
+                (nvidiaService "resume")
+                // {
+                  before = [];
+                  after = ["systemd-suspend.service" "systemd-hibernate.service"];
+                  requiredBy = ["systemd-suspend.service" "systemd-hibernate.service"];
+                };
+            })
+            (lib.mkIf cfg.nvidiaPersistenced {
+              "nvidia-persistenced" = {
+                description = "NVIDIA Persistence Daemon";
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "forking";
+                  Restart = "always";
+                  PIDFile = "/var/run/nvidia-persistenced/nvidia-persistenced.pid";
+                  ExecStart = "${lib.getExe nvidia_x11.persistenced} --verbose";
+                  ExecStopPost = "${pkgs.coreutils}/bin/rm -rf /var/run/nvidia-persistenced";
+                };
+              };
+            })
+            (lib.mkIf cfg.dynamicBoost.enable {
+              "nvidia-powerd" = {
+                description = "nvidia-powerd service";
+                path = [
+                  pkgs.util-linux # nvidia-powerd wants lscpu
+                ];
+                wantedBy = ["multi-user.target"];
+                serviceConfig = {
+                  Type = "dbus";
+                  BusName = "nvidia.powerd.server";
+                  ExecStart = "${nvidia_x11.bin}/bin/nvidia-powerd";
+                };
+              };
+            })
+          ];
+        services.acpid.enable = true;
+
+        services.dbus.packages = lib.optional cfg.dynamicBoost.enable nvidia_x11.bin;
+
+        hardware.firmware = lib.optional cfg.open nvidia_x11.firmware;
+
+        systemd.tmpfiles.rules =
+          lib.optional (nvidia_x11.persistenced != null && config.virtualisation.docker.enableNvidia)
+          "L+ /run/nvidia-docker/extras/bin/nvidia-persistenced - - - - ${nvidia_x11.persistenced}/origBin/nvidia-persistenced";
+
+        boot = {
+          extraModulePackages =
+            if cfg.open
+            then [nvidia_x11.open]
+            else [nvidia_x11.bin];
+          # nvidia-uvm is required by CUDA applications.
+          kernelModules =
+            lib.optionals config.services.xserver.enable ["nvidia" "nvidia_modeset" "nvidia_drm"];
+
+          # If requested enable modesetting via kernel parameter.
+          kernelParams =
+            lib.optional (offloadCfg.enable || cfg.modesetting.enable) "nvidia-drm.modeset=1"
+            ++ lib.optional cfg.powerManagement.enable "nvidia.NVreg_PreserveVideoMemoryAllocations=1"
+            ++ lib.optional cfg.open "nvidia.NVreg_OpenRmEnableUnsupportedGpus=1"
+            ++ lib.optional (config.boot.kernelPackages.kernel.kernelAtLeast "6.2" && !ibtSupport) "ibt=off";
+
+          # enable finegrained power management
+          extraModprobeConfig = lib.optionalString cfg.powerManagement.finegrained ''
+            options nvidia "NVreg_DynamicPowerManagement=0x02"
+          '';
+        };
+        services.udev.extraRules =
+          lib.optionalString cfg.powerManagement.finegrained (
+          lib.optionalString (lib.versionOlder config.boot.kernelPackages.kernel.version "5.5") ''
+            # Remove NVIDIA USB xHCI Host Controller devices, if present
+            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c0330", ATTR{remove}="1"
+
+            # Remove NVIDIA USB Type-C UCSI devices, if present
+            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x0c8000", ATTR{remove}="1"
+
+            # Remove NVIDIA Audio devices, if present
+            ACTION=="add", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x040300", ATTR{remove}="1"
+          ''
+          + ''
+            # Enable runtime PM for NVIDIA VGA/3D controller devices on driver bind
+            ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="auto"
+            ACTION=="bind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="auto"
+
+            # Disable runtime PM for NVIDIA VGA/3D controller devices on driver unbind
+            ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030000", TEST=="power/control", ATTR{power/control}="on"
+            ACTION=="unbind", SUBSYSTEM=="pci", ATTR{vendor}=="0x10de", ATTR{class}=="0x030200", TEST=="power/control", ATTR{power/control}="on"
+          ''
+        );
+      })
+      # Data Center
+      (lib.mkIf (cfg.datacenter.enable) {
+        boot.extraModulePackages = [
+          nvidia_x11.bin
+        ];
+        systemd.services.nvidia-fabricmanager = {
+          enable = true;
+          description = "Start NVIDIA NVLink Management";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig.After = [ "network-online.target" ];
+          unitConfig.Requires = [ "network-online.target" ];
+          serviceConfig = {
+            Type = "forking";
+            TimeoutStartSec = 240;
+            ExecStart = let
+              nv-fab-conf = settingsFormat.generate "fabricmanager.conf" cfg.datacenter.settings;
+              in
+                nvidia_x11.fabricmanager + "/bin/nv-fabricmanager -c " + nv-fab-conf;
+            LimitCORE="infinity";
+          };
+        };
+        environment.systemPackages =
+          lib.optional cfg.datacenter.enable nvidia_x11.fabricmanager;
+      })
+    ]);
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/radeon.nix b/nixpkgs/nixos/modules/hardware/video/radeon.nix
new file mode 100644
index 000000000000..c92b7a0509d0
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/radeon.nix
@@ -0,0 +1,3 @@
+{
+  hardware.enableRedistributableFirmware = true;
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/switcheroo-control.nix b/nixpkgs/nixos/modules/hardware/video/switcheroo-control.nix
new file mode 100644
index 000000000000..982388f8e5f4
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/switcheroo-control.nix
@@ -0,0 +1,18 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  pkg = [ pkgs.switcheroo-control ];
+  cfg = config.services.switcherooControl;
+in {
+  options.services.switcherooControl = {
+    enable = mkEnableOption (lib.mdDoc "switcheroo-control, a D-Bus service to check the availability of dual-GPU");
+  };
+
+  config = mkIf cfg.enable {
+    services.dbus.packages = pkg;
+    environment.systemPackages = pkg;
+    systemd.packages = pkg;
+    systemd.targets.multi-user.wants = [ "switcheroo-control.service" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/uvcvideo/default.nix b/nixpkgs/nixos/modules/hardware/video/uvcvideo/default.nix
new file mode 100644
index 000000000000..6cfb8cc6ad29
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/uvcvideo/default.nix
@@ -0,0 +1,64 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.uvcvideo;
+
+  uvcdynctrl-udev-rules = packages: pkgs.callPackage ./uvcdynctrl-udev-rules.nix {
+    drivers = packages;
+    udevDebug = false;
+  };
+
+in
+
+{
+
+  options = {
+    services.uvcvideo.dynctrl = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable {command}`uvcvideo` dynamic controls.
+
+          Note that enabling this brings the {command}`uvcdynctrl` tool
+          into your environment and register all dynamic controls from
+          specified {command}`packages` to the {command}`uvcvideo` driver.
+        '';
+      };
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        example = literalExpression "[ pkgs.tiscamera ]";
+        description = lib.mdDoc ''
+          List of packages containing {command}`uvcvideo` dynamic controls
+          rules. All files found in
+          {file}`«pkg»/share/uvcdynctrl/data`
+          will be included.
+
+          Note that these will serve as input to the {command}`libwebcam`
+          package which through its own {command}`udev` rule will register
+          the dynamic controls from specified packages to the {command}`uvcvideo`
+          driver.
+        '';
+        apply = map getBin;
+      };
+    };
+  };
+
+  config = mkIf cfg.dynctrl.enable {
+
+    services.udev.packages = [
+      (uvcdynctrl-udev-rules cfg.dynctrl.packages)
+    ];
+
+    environment.systemPackages = [
+      pkgs.libwebcam
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix b/nixpkgs/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
new file mode 100644
index 000000000000..8dadbd53b989
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/uvcvideo/uvcdynctrl-udev-rules.nix
@@ -0,0 +1,47 @@
+{ buildEnv
+, libwebcam
+, makeWrapper
+, runCommand
+, drivers ? []
+, udevDebug ? false
+}:
+
+let
+  version = "0.0.0";
+
+  dataPath = buildEnv {
+    name = "uvcdynctrl-with-drivers-data-path";
+    paths = drivers ++ [ libwebcam ];
+    pathsToLink = [ "/share/uvcdynctrl/data" ];
+    ignoreCollisions = false;
+  };
+
+  dataDir = "${dataPath}/share/uvcdynctrl/data";
+  udevDebugVarValue = if udevDebug then "1" else "0";
+in
+
+runCommand "uvcdynctrl-udev-rules-${version}"
+{
+  inherit dataPath;
+  nativeBuildInputs = [
+    makeWrapper
+  ];
+  buildInputs = [
+    libwebcam
+  ];
+  dontPatchELF = true;
+  dontStrip = true;
+  preferLocalBuild = true;
+}
+''
+  mkdir -p "$out/lib/udev"
+  makeWrapper "${libwebcam}/lib/udev/uvcdynctrl" "$out/lib/udev/uvcdynctrl" \
+    --set NIX_UVCDYNCTRL_DATA_DIR "${dataDir}" \
+    --set NIX_UVCDYNCTRL_UDEV_DEBUG "${udevDebugVarValue}"
+
+  mkdir -p "$out/lib/udev/rules.d"
+  cat "${libwebcam}/lib/udev/rules.d/80-uvcdynctrl.rules" | \
+    sed -r "s#RUN\+\=\"([^\"]+)\"#RUN\+\=\"$out/lib/udev/uvcdynctrl\"#g" > \
+    "$out/lib/udev/rules.d/80-uvcdynctrl.rules"
+''
+
diff --git a/nixpkgs/nixos/modules/hardware/video/webcam/facetimehd.nix b/nixpkgs/nixos/modules/hardware/video/webcam/facetimehd.nix
new file mode 100644
index 000000000000..a0ec9c98a54c
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/webcam/facetimehd.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.facetimehd;
+
+  kernelPackages = config.boot.kernelPackages;
+
+in
+
+{
+
+  options.hardware.facetimehd.enable = mkEnableOption (lib.mdDoc "the facetimehd kernel module");
+
+  options.hardware.facetimehd.withCalibration = mkOption {
+    default = false;
+    example = true;
+    type = types.bool;
+    description = lib.mdDoc ''
+      Whether to include sensor calibration files for facetimehd.
+      This makes colors look much better but is experimental, see
+      <https://github.com/patjak/facetimehd/wiki/Extracting-the-sensor-calibration-files>
+      for details.
+    '';
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "facetimehd" ];
+
+    boot.blacklistedKernelModules = [ "bdc_pci" ];
+
+    boot.extraModulePackages = [ kernelPackages.facetimehd ];
+
+    hardware.firmware = [ pkgs.facetimehd-firmware ]
+      ++ optional cfg.withCalibration pkgs.facetimehd-calibration;
+
+    # unload module during suspend/hibernate as it crashes the whole system
+    powerManagement.powerDownCommands = ''
+      ${pkgs.kmod}/bin/lsmod | ${pkgs.gnugrep}/bin/grep -q "^facetimehd" && ${pkgs.kmod}/bin/rmmod -f -v facetimehd
+    '';
+
+    # and load it back on resume
+    powerManagement.resumeCommands = ''
+      ${pkgs.kmod}/bin/modprobe -v facetimehd
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/video/webcam/ipu6.nix b/nixpkgs/nixos/modules/hardware/video/webcam/ipu6.nix
new file mode 100644
index 000000000000..fce78cda34c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/video/webcam/ipu6.nix
@@ -0,0 +1,57 @@
+{ config, lib, pkgs, ... }:
+let
+
+  inherit (lib) mkDefault mkEnableOption mkIf mkOption optional types;
+
+  cfg = config.hardware.ipu6;
+
+in
+{
+
+  options.hardware.ipu6 = {
+
+    enable = mkEnableOption (lib.mdDoc "support for Intel IPU6/MIPI cameras");
+
+    platform = mkOption {
+      type = types.enum [ "ipu6" "ipu6ep" ];
+      description = lib.mdDoc ''
+        Choose the version for your hardware platform.
+
+        Use `ipu6` for Tiger Lake and `ipu6ep` for Alder Lake respectively.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.extraModulePackages = with config.boot.kernelPackages; [
+      ipu6-drivers
+    ];
+
+    hardware.firmware = with pkgs; [ ]
+      ++ optional (cfg.platform == "ipu6") ipu6-camera-bin
+      ++ optional (cfg.platform == "ipu6ep") ipu6ep-camera-bin;
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="intel-ipu6-psys", MODE="0660", GROUP="video"
+    '';
+
+    services.v4l2-relayd.instances.ipu6 = {
+      enable = mkDefault true;
+
+      cardLabel = mkDefault "Intel MIPI Camera";
+
+      extraPackages = with pkgs.gst_all_1; [ ]
+        ++ optional (cfg.platform == "ipu6") icamerasrc-ipu6
+        ++ optional (cfg.platform == "ipu6ep") icamerasrc-ipu6ep;
+
+      input = {
+        pipeline = "icamerasrc";
+        format = mkIf (cfg.platform == "ipu6ep") (mkDefault "NV12");
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/hardware/wooting.nix b/nixpkgs/nixos/modules/hardware/wooting.nix
new file mode 100644
index 000000000000..78bbcb61aca7
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/wooting.nix
@@ -0,0 +1,12 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  options.hardware.wooting.enable = mkEnableOption (lib.mdDoc ''support for Wooting keyboards.
+    Note that users must be in the "input" group for udev rules to apply'');
+
+  config = mkIf config.hardware.wooting.enable {
+    environment.systemPackages = [ pkgs.wootility ];
+    services.udev.packages = [ pkgs.wooting-udev-rules ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/xone.nix b/nixpkgs/nixos/modules/hardware/xone.nix
new file mode 100644
index 000000000000..211d3fce8679
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/xone.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.hardware.xone;
+in
+{
+  options.hardware.xone = {
+    enable = mkEnableOption (lib.mdDoc "the xone driver for Xbox One and Xbobx Series X|S accessories");
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      blacklistedKernelModules = [ "xpad" "mt76x2u" ];
+      extraModulePackages = with config.boot.kernelPackages; [ xone ];
+    };
+    hardware.firmware = [ pkgs.xow_dongle-firmware ];
+  };
+
+  meta = {
+    maintainers = with maintainers; [ rhysmdnz ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/hardware/xpadneo.nix b/nixpkgs/nixos/modules/hardware/xpadneo.nix
new file mode 100644
index 000000000000..a66e81d8b15b
--- /dev/null
+++ b/nixpkgs/nixos/modules/hardware/xpadneo.nix
@@ -0,0 +1,30 @@
+{ config, lib, ... }:
+
+with lib;
+let
+  cfg = config.hardware.xpadneo;
+in
+{
+  options.hardware.xpadneo = {
+    enable = mkEnableOption (lib.mdDoc "the xpadneo driver for Xbox One wireless controllers");
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      # Must disable Enhanced Retransmission Mode to support bluetooth pairing
+      # https://wiki.archlinux.org/index.php/Gamepad#Connect_Xbox_Wireless_Controller_with_Bluetooth
+      extraModprobeConfig =
+        mkIf
+          (config.hardware.bluetooth.enable &&
+            (lib.versionOlder config.boot.kernelPackages.kernel.version "5.12"))
+          "options bluetooth disable_ertm=1";
+
+      extraModulePackages = with config.boot.kernelPackages; [ xpadneo ];
+      kernelModules = [ "hid_xpadneo" ];
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ kira-bruneau ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/default.md b/nixpkgs/nixos/modules/i18n/input-method/default.md
new file mode 100644
index 000000000000..42cb8a8d7b6a
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/default.md
@@ -0,0 +1,160 @@
+# Input Methods {#module-services-input-methods}
+
+Input methods are an operating system component that allows any data, such as
+keyboard strokes or mouse movements, to be received as input. In this way
+users can enter characters and symbols not found on their input devices.
+Using an input method is obligatory for any language that has more graphemes
+than there are keys on the keyboard.
+
+The following input methods are available in NixOS:
+
+  - IBus: The intelligent input bus.
+  - Fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using `i18n.inputMethod.fcitx5.addons`.
+  - Nabi: A Korean input method based on XIM.
+  - Uim: The universal input method, is a library with a XIM bridge.
+  - Hime: An extremely easy-to-use input method framework.
+  - Kime: Korean IME
+
+## IBus {#module-services-input-methods-ibus}
+
+IBus is an Intelligent Input Bus. It provides full featured and user
+friendly input method user interface.
+
+The following snippet can be used to configure IBus:
+
+```
+i18n.inputMethod = {
+  enabled = "ibus";
+  ibus.engines = with pkgs.ibus-engines; [ anthy hangul mozc ];
+};
+```
+
+`i18n.inputMethod.ibus.engines` is optional and can be used
+to add extra IBus engines.
+
+Available extra IBus engines are:
+
+  - Anthy (`ibus-engines.anthy`): Anthy is a system for
+    Japanese input method. It converts Hiragana text to Kana Kanji mixed text.
+  - Hangul (`ibus-engines.hangul`): Korean input method.
+  - m17n (`ibus-engines.m17n`): m17n is an input method that
+    uses input methods and corresponding icons in the m17n database.
+  - mozc (`ibus-engines.mozc`): A Japanese input method from
+    Google.
+  - Table (`ibus-engines.table`): An input method that load
+    tables of input methods.
+  - table-others (`ibus-engines.table-others`): Various
+    table-based input methods. To use this, and any other table-based input
+    methods, it must appear in the list of engines along with
+    `table`. For example:
+
+    ```
+    ibus.engines = with pkgs.ibus-engines; [ table table-others ];
+    ```
+
+To use any input method, the package must be added in the configuration, as
+shown above, and also (after running `nixos-rebuild`) the
+input method must be added from IBus' preference dialog.
+
+### Troubleshooting {#module-services-input-methods-troubleshooting}
+
+If IBus works in some applications but not others, a likely cause of this
+is that IBus is depending on a different version of `glib`
+to what the applications are depending on. This can be checked by running
+`nix-store -q --requisites <path> | grep glib`,
+where `<path>` is the path of either IBus or an
+application in the Nix store. The `glib` packages must
+match exactly. If they do not, uninstalling and reinstalling the
+application is a likely fix.
+
+## Fcitx5 {#module-services-input-methods-fcitx}
+
+Fcitx5 is an input method framework with extension support. It has three
+built-in Input Method Engine, Pinyin, QuWei and Table-based input methods.
+
+The following snippet can be used to configure Fcitx:
+
+```
+i18n.inputMethod = {
+  enabled = "fcitx5";
+  fcitx5.addons = with pkgs; [ fcitx5-mozc fcitx5-hangul fcitx5-m17n ];
+};
+```
+
+`i18n.inputMethod.fcitx5.addons` is optional and can be
+used to add extra Fcitx5 addons.
+
+Available extra Fcitx5 addons are:
+
+  - Anthy (`fcitx5-anthy`): Anthy is a system for
+    Japanese input method. It converts Hiragana text to Kana Kanji mixed text.
+  - Chewing (`fcitx5-chewing`): Chewing is an
+    intelligent Zhuyin input method. It is one of the most popular input
+    methods among Traditional Chinese Unix users.
+  - Hangul (`fcitx5-hangul`): Korean input method.
+  - Unikey (`fcitx5-unikey`): Vietnamese input method.
+  - m17n (`fcitx5-m17n`): m17n is an input method that
+    uses input methods and corresponding icons in the m17n database.
+  - mozc (`fcitx5-mozc`): A Japanese input method from
+    Google.
+  - table-others (`fcitx5-table-other`): Various
+    table-based input methods.
+  - chinese-addons (`fcitx5-chinese-addons`): Various chinese input methods.
+  - rime (`fcitx5-rime`): RIME support for fcitx5.
+
+## Nabi {#module-services-input-methods-nabi}
+
+Nabi is an easy to use Korean X input method. It allows you to enter
+phonetic Korean characters (hangul) and pictographic Korean characters
+(hanja).
+
+The following snippet can be used to configure Nabi:
+
+```
+i18n.inputMethod = {
+  enabled = "nabi";
+};
+```
+
+## Uim {#module-services-input-methods-uim}
+
+Uim (short for "universal input method") is a multilingual input method
+framework. Applications can use it through so-called bridges.
+
+The following snippet can be used to configure uim:
+
+```
+i18n.inputMethod = {
+  enabled = "uim";
+};
+```
+
+Note: The [](#opt-i18n.inputMethod.uim.toolbar) option can be
+used to choose uim toolbar.
+
+## Hime {#module-services-input-methods-hime}
+
+Hime is an extremely easy-to-use input method framework. It is lightweight,
+stable, powerful and supports many commonly used input methods, including
+Cangjie, Zhuyin, Dayi, Rank, Shrimp, Greek, Korean Pinyin, Latin Alphabet,
+etc...
+
+The following snippet can be used to configure Hime:
+
+```
+i18n.inputMethod = {
+  enabled = "hime";
+};
+```
+
+## Kime {#module-services-input-methods-kime}
+
+Kime is Korean IME. it's built with Rust language and let you get simple, safe, fast Korean typing
+
+The following snippet can be used to configure Kime:
+
+```
+i18n.inputMethod = {
+  enabled = "kime";
+};
+```
diff --git a/nixpkgs/nixos/modules/i18n/input-method/default.nix b/nixpkgs/nixos/modules/i18n/input-method/default.nix
new file mode 100644
index 000000000000..d967d4335c70
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/default.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.i18n.inputMethod;
+
+  gtk2_cache = pkgs.runCommand "gtk2-immodule.cache"
+    { preferLocalBuild = true;
+      allowSubstitutes = false;
+      buildInputs = [ pkgs.gtk2 cfg.package ];
+    }
+    ''
+      mkdir -p $out/etc/gtk-2.0/
+      GTK_PATH=${cfg.package}/lib/gtk-2.0/ gtk-query-immodules-2.0 > $out/etc/gtk-2.0/immodules.cache
+    '';
+
+  gtk3_cache = pkgs.runCommand "gtk3-immodule.cache"
+    { preferLocalBuild = true;
+      allowSubstitutes = false;
+      buildInputs = [ pkgs.gtk3 cfg.package ];
+    }
+    ''
+      mkdir -p $out/etc/gtk-3.0/
+      GTK_PATH=${cfg.package}/lib/gtk-3.0/ gtk-query-immodules-3.0 > $out/etc/gtk-3.0/immodules.cache
+    '';
+
+in
+{
+  options.i18n = {
+    inputMethod = {
+      enabled = mkOption {
+        type    = types.nullOr (types.enum [ "ibus" "fcitx5" "nabi" "uim" "hime" "kime" ]);
+        default = null;
+        example = "fcitx5";
+        description = lib.mdDoc ''
+          Select the enabled input method. Input methods is a software to input symbols that are not available on standard input devices.
+
+          Input methods are specially used to input Chinese, Japanese and Korean characters.
+
+          Currently the following input methods are available in NixOS:
+
+          - ibus: The intelligent input bus, extra input engines can be added using `i18n.inputMethod.ibus.engines`.
+          - fcitx5: The next generation of fcitx, addons (including engines, dictionaries, skins) can be added using `i18n.inputMethod.fcitx5.addons`.
+          - nabi: A Korean input method based on XIM. Nabi doesn't support Qt 5.
+          - uim: The universal input method, is a library with a XIM bridge. uim mainly support Chinese, Japanese and Korean.
+          - hime: An extremely easy-to-use input method framework.
+          - kime: Koream IME.
+        '';
+      };
+
+      package = mkOption {
+        internal = true;
+        type     = types.nullOr types.path;
+        default  = null;
+        description = lib.mdDoc ''
+          The input method method package.
+        '';
+      };
+    };
+  };
+
+  config = mkIf (cfg.enabled != null) {
+    environment.systemPackages = [ cfg.package gtk2_cache gtk3_cache ];
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ ericsagnes ];
+    doc = ./default.md;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/fcitx5.nix b/nixpkgs/nixos/modules/i18n/input-method/fcitx5.nix
new file mode 100644
index 000000000000..3d52c08888ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/fcitx5.nix
@@ -0,0 +1,129 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  im = config.i18n.inputMethod;
+  cfg = im.fcitx5;
+  fcitx5Package = pkgs.fcitx5-with-addons.override { inherit (cfg) addons; };
+  settingsFormat = pkgs.formats.ini { };
+in
+{
+  options = {
+    i18n.inputMethod.fcitx5 = {
+      addons = mkOption {
+        type = with types; listOf package;
+        default = [ ];
+        example = literalExpression "with pkgs; [ fcitx5-rime ]";
+        description = lib.mdDoc ''
+          Enabled Fcitx5 addons.
+        '';
+      };
+      quickPhrase = mkOption {
+        type = with types; attrsOf str;
+        default = { };
+        example = literalExpression ''
+          {
+            smile = "(・∀・)";
+            angry = "( ̄ー ̄)";
+          }
+        '';
+        description = lib.mdDoc "Quick phrases.";
+      };
+      quickPhraseFiles = mkOption {
+        type = with types; attrsOf path;
+        default = { };
+        example = literalExpression ''
+          {
+            words = ./words.mb;
+            numbers = ./numbers.mb;
+          }
+        '';
+        description = lib.mdDoc "Quick phrase files.";
+      };
+      settings = {
+        globalOptions = lib.mkOption {
+          type = lib.types.submodule {
+            freeformType = settingsFormat.type;
+          };
+          default = { };
+          description = lib.mdDoc ''
+            The global options in `config` file in ini format.
+          '';
+        };
+        inputMethod = lib.mkOption {
+          type = lib.types.submodule {
+            freeformType = settingsFormat.type;
+          };
+          default = { };
+          description = lib.mdDoc ''
+            The input method configure in `profile` file in ini format.
+          '';
+        };
+        addons = lib.mkOption {
+          type = with lib.types; (attrsOf anything);
+          default = { };
+          description = lib.mdDoc ''
+            The addon configures in `conf` folder in ini format with global sections.
+            Each item is written to the corresponding file.
+          '';
+          example = literalExpression "{ pinyin.globalSection.EmojiEnabled = \"True\"; }";
+        };
+      };
+      ignoreUserConfig = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Ignore the user configures. **Warning**: When this is enabled, the
+          user config files are totally ignored and the user dict can't be saved
+          and loaded.
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx5" "enableRimeData" ] ''
+      RIME data is now included in `fcitx5-rime` by default, and can be customized using `fcitx5-rime.override { rimeDataPkgs = ...; }`
+    '')
+  ];
+
+  config = mkIf (im.enabled == "fcitx5") {
+    i18n.inputMethod.package = fcitx5Package;
+
+    i18n.inputMethod.fcitx5.addons = lib.optionals (cfg.quickPhrase != { }) [
+      (pkgs.writeTextDir "share/fcitx5/data/QuickPhrase.mb"
+        (lib.concatStringsSep "\n"
+          (lib.mapAttrsToList (name: value: "${name} ${value}") cfg.quickPhrase)))
+    ] ++ lib.optionals (cfg.quickPhraseFiles != { }) [
+      (pkgs.linkFarm "quickPhraseFiles" (lib.mapAttrs'
+        (name: value: lib.nameValuePair ("share/fcitx5/data/quickphrase.d/${name}.mb") value)
+        cfg.quickPhraseFiles))
+    ];
+    environment.etc =
+      let
+        optionalFile = p: f: v: lib.optionalAttrs (v != { }) {
+          "xdg/fcitx5/${p}".text = f v;
+        };
+      in
+      lib.attrsets.mergeAttrsList [
+        (optionalFile "config" (lib.generators.toINI { }) cfg.settings.globalOptions)
+        (optionalFile "profile" (lib.generators.toINI { }) cfg.settings.inputMethod)
+        (lib.concatMapAttrs
+          (name: value: optionalFile
+            "conf/${name}.conf"
+            (lib.generators.toINIWithGlobalSection { })
+            value)
+          cfg.settings.addons)
+      ];
+
+    environment.variables = {
+      GTK_IM_MODULE = "fcitx";
+      QT_IM_MODULE = "fcitx";
+      XMODIFIERS = "@im=fcitx";
+      QT_PLUGIN_PATH = [ "${fcitx5Package}/${pkgs.qt6.qtbase.qtPluginPrefix}" ];
+    } // lib.optionalAttrs cfg.ignoreUserConfig {
+      SKIP_FCITX_USER_PATH = "1";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/hime.nix b/nixpkgs/nixos/modules/i18n/input-method/hime.nix
new file mode 100644
index 000000000000..8482130db3e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/hime.nix
@@ -0,0 +1,14 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+{
+  config = mkIf (config.i18n.inputMethod.enabled == "hime") {
+    i18n.inputMethod.package = pkgs.hime;
+    environment.variables = {
+      GTK_IM_MODULE = "hime";
+      QT_IM_MODULE  = "hime";
+      XMODIFIERS    = "@im=hime";
+    };
+    services.xserver.displayManager.sessionCommands = "${pkgs.hime}/bin/hime &";
+  };
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/ibus.nix b/nixpkgs/nixos/modules/i18n/input-method/ibus.nix
new file mode 100644
index 000000000000..2a35afad2ac7
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/ibus.nix
@@ -0,0 +1,85 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.i18n.inputMethod.ibus;
+  ibusPackage = pkgs.ibus-with-plugins.override { plugins = cfg.engines; };
+  ibusEngine = types.package // {
+    name  = "ibus-engine";
+    check = x: (lib.types.package.check x) && (attrByPath ["meta" "isIbusEngine"] false x);
+  };
+
+  impanel = optionalString (cfg.panel != null) "--panel=${cfg.panel}";
+
+  ibusAutostart = pkgs.writeTextFile {
+    name = "autostart-ibus-daemon";
+    destination = "/etc/xdg/autostart/ibus-daemon.desktop";
+    text = ''
+      [Desktop Entry]
+      Name=IBus
+      Type=Application
+      Exec=${ibusPackage}/bin/ibus-daemon --daemonize --xim ${impanel}
+      # GNOME will launch ibus using systemd
+      NotShowIn=GNOME;
+    '';
+  };
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "programs" "ibus" "plugins" ] [ "i18n" "inputMethod" "ibus" "engines" ])
+  ];
+
+  options = {
+    i18n.inputMethod.ibus = {
+      engines = mkOption {
+        type    = with types; listOf ibusEngine;
+        default = [];
+        example = literalExpression "with pkgs.ibus-engines; [ mozc hangul ]";
+        description =
+          let
+            enginesDrv = filterAttrs (const isDerivation) pkgs.ibus-engines;
+            engines = concatStringsSep ", "
+              (map (name: "`${name}`") (attrNames enginesDrv));
+          in
+            lib.mdDoc "Enabled IBus engines. Available engines are: ${engines}.";
+      };
+      panel = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = literalExpression ''"''${pkgs.plasma5Packages.plasma-desktop}/lib/libexec/kimpanel-ibus-panel"'';
+        description = lib.mdDoc "Replace the IBus panel with another panel.";
+      };
+    };
+  };
+
+  config = mkIf (config.i18n.inputMethod.enabled == "ibus") {
+    i18n.inputMethod.package = ibusPackage;
+
+    environment.systemPackages = [
+      ibusAutostart
+    ];
+
+    # Without dconf enabled it is impossible to use IBus
+    programs.dconf.enable = true;
+
+    programs.dconf.packages = [ ibusPackage ];
+
+    services.dbus.packages = [
+      ibusPackage
+    ];
+
+    environment.variables = {
+      GTK_IM_MODULE = "ibus";
+      QT_IM_MODULE = "ibus";
+      XMODIFIERS = "@im=ibus";
+    };
+
+    xdg.portal.extraPortals = mkIf config.xdg.portal.enable [
+      ibusPackage
+    ];
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/kime.nix b/nixpkgs/nixos/modules/i18n/input-method/kime.nix
new file mode 100644
index 000000000000..e82996926b28
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/kime.nix
@@ -0,0 +1,53 @@
+{ config, pkgs, lib, generators, ... }:
+let imcfg = config.i18n.inputMethod;
+in {
+  imports = [
+    (lib.mkRemovedOptionModule [ "i18n" "inputMethod" "kime" "config" ] "Use i18n.inputMethod.kime.* instead")
+  ];
+
+  options.i18n.inputMethod.kime = {
+    daemonModules = lib.mkOption {
+      type = lib.types.listOf (lib.types.enum [ "Xim" "Wayland" "Indicator" ]);
+      default = [ "Xim" "Wayland" "Indicator" ];
+      example = [ "Xim" "Indicator" ];
+      description = lib.mdDoc ''
+        List of enabled daemon modules
+      '';
+    };
+    iconColor = lib.mkOption {
+      type = lib.types.enum [ "Black" "White" ];
+      default = "Black";
+      example = "White";
+      description = lib.mdDoc ''
+        Color of the indicator icon
+      '';
+    };
+    extraConfig = lib.mkOption {
+      type = lib.types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        extra kime configuration. Refer to <https://github.com/Riey/kime/blob/v${pkgs.kime.version}/docs/CONFIGURATION.md> for details on supported values.
+      '';
+    };
+  };
+
+  config = lib.mkIf (imcfg.enabled == "kime") {
+    i18n.inputMethod.package = pkgs.kime;
+
+    environment.variables = {
+      GTK_IM_MODULE = "kime";
+      QT_IM_MODULE  = "kime";
+      XMODIFIERS    = "@im=kime";
+    };
+
+    environment.etc."xdg/kime/config.yaml".text = ''
+      daemon:
+        modules: [${lib.concatStringsSep "," imcfg.kime.daemonModules}]
+      indicator:
+        icon_color: ${imcfg.kime.iconColor}
+    '' + imcfg.kime.extraConfig;
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/nabi.nix b/nixpkgs/nixos/modules/i18n/input-method/nabi.nix
new file mode 100644
index 000000000000..87620ae4e7b2
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/nabi.nix
@@ -0,0 +1,16 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+{
+  config = mkIf (config.i18n.inputMethod.enabled == "nabi") {
+    i18n.inputMethod.package = pkgs.nabi;
+
+    environment.variables = {
+      GTK_IM_MODULE = "nabi";
+      QT_IM_MODULE  = "nabi";
+      XMODIFIERS    = "@im=nabi";
+    };
+
+    services.xserver.displayManager.sessionCommands = "${pkgs.nabi}/bin/nabi &";
+  };
+}
diff --git a/nixpkgs/nixos/modules/i18n/input-method/uim.nix b/nixpkgs/nixos/modules/i18n/input-method/uim.nix
new file mode 100644
index 000000000000..7225783b2a6f
--- /dev/null
+++ b/nixpkgs/nixos/modules/i18n/input-method/uim.nix
@@ -0,0 +1,37 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.i18n.inputMethod.uim;
+in
+{
+  options = {
+
+    i18n.inputMethod.uim = {
+      toolbar = mkOption {
+        type    = types.enum [ "gtk" "gtk3" "gtk-systray" "gtk3-systray" "qt5" ];
+        default = "gtk";
+        example = "gtk-systray";
+        description = lib.mdDoc ''
+          selected UIM toolbar.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf (config.i18n.inputMethod.enabled == "uim") {
+    i18n.inputMethod.package = pkgs.uim;
+
+    environment.variables = {
+      GTK_IM_MODULE = "uim";
+      QT_IM_MODULE  = "uim";
+      XMODIFIERS    = "@im=uim";
+    };
+    services.xserver.displayManager.sessionCommands = ''
+      ${pkgs.uim}/bin/uim-xim &
+      ${pkgs.uim}/bin/uim-toolbar-${cfg.toolbar} &
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/image/amend-repart-definitions.py b/nixpkgs/nixos/modules/image/amend-repart-definitions.py
new file mode 100644
index 000000000000..fa9b1544ae85
--- /dev/null
+++ b/nixpkgs/nixos/modules/image/amend-repart-definitions.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+
+"""Amend systemd-repart definiton files.
+
+In order to avoid Import-From-Derivation (IFD) when building images with
+systemd-repart, the definition files created by Nix need to be amended with the
+store paths from the closure.
+
+This is achieved by adding CopyFiles= instructions to the definition files.
+
+The arbitrary files configured via `contents` are also added to the definition
+files using the same mechanism.
+"""
+
+import json
+import sys
+import shutil
+from pathlib import Path
+
+
+def add_contents_to_definition(
+    definition: Path, contents: dict[str, dict[str, str]] | None
+) -> None:
+    """Add CopyFiles= instructions to a definition for all files in contents."""
+    if not contents:
+        return
+
+    copy_files_lines: list[str] = []
+    for target, options in contents.items():
+        source = options["source"]
+
+        copy_files_lines.append(f"CopyFiles={source}:{target}\n")
+
+    with open(definition, "a") as f:
+        f.writelines(copy_files_lines)
+
+
+def add_closure_to_definition(
+    definition: Path, closure: Path | None, strip_nix_store_prefix: bool | None
+) -> None:
+    """Add CopyFiles= instructions to a definition for all paths in the closure.
+
+    If strip_nix_store_prefix is True, `/nix/store` is stripped from the target path.
+    """
+    if not closure:
+        return
+
+    copy_files_lines: list[str] = []
+    with open(closure, "r") as f:
+        for line in f:
+            if not isinstance(line, str):
+                continue
+
+            source = Path(line.strip())
+            target = str(source.relative_to("/nix/store/"))
+            target = f":/{target}" if strip_nix_store_prefix else ""
+
+            copy_files_lines.append(f"CopyFiles={source}{target}\n")
+
+    with open(definition, "a") as f:
+        f.writelines(copy_files_lines)
+
+
+def main() -> None:
+    """Amend the provided repart definitions by adding CopyFiles= instructions.
+
+    For each file specified in the `contents` field of a partition in the
+    partiton config file, a `CopyFiles=` instruction is added to the
+    corresponding definition file.
+
+    The same is done for every store path of the `closure` field.
+
+    Print the path to a directory that contains the amended repart
+    definitions to stdout.
+    """
+    partition_config_file = sys.argv[1]
+    if not partition_config_file:
+        print("No partition config file was supplied.")
+        sys.exit(1)
+
+    repart_definitions = sys.argv[2]
+    if not repart_definitions:
+        print("No repart definitions were supplied.")
+        sys.exit(1)
+
+    with open(partition_config_file, "rb") as f:
+        partition_config = json.load(f)
+
+    if not partition_config:
+        print("Partition config is empty.")
+        sys.exit(1)
+
+    target_dir = Path("amended-repart.d")
+    target_dir.mkdir()
+    shutil.copytree(repart_definitions, target_dir, dirs_exist_ok=True)
+
+    for name, config in partition_config.items():
+        definition = target_dir.joinpath(f"{name}.conf")
+        definition.chmod(0o644)
+
+        contents = config.get("contents")
+        add_contents_to_definition(definition, contents)
+
+        closure = config.get("closure")
+        strip_nix_store_prefix = config.get("stripNixStorePrefix")
+        add_closure_to_definition(definition, closure, strip_nix_store_prefix)
+
+    print(target_dir.absolute())
+
+
+if __name__ == "__main__":
+    main()
diff --git a/nixpkgs/nixos/modules/image/repart.nix b/nixpkgs/nixos/modules/image/repart.nix
new file mode 100644
index 000000000000..41e6110885b8
--- /dev/null
+++ b/nixpkgs/nixos/modules/image/repart.nix
@@ -0,0 +1,214 @@
+# This module exposes options to build a disk image with a GUID Partition Table
+# (GPT). It uses systemd-repart to build the image.
+
+{ config, pkgs, lib, utils, ... }:
+
+let
+  cfg = config.image.repart;
+
+  partitionOptions = {
+    options = {
+      storePaths = lib.mkOption {
+        type = with lib.types; listOf path;
+        default = [ ];
+        description = lib.mdDoc "The store paths to include in the partition.";
+      };
+
+      stripNixStorePrefix = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to strip `/nix/store/` from the store paths. This is useful
+          when you want to build a partition that only contains store paths and
+          is mounted under `/nix/store`.
+        '';
+      };
+
+      contents = lib.mkOption {
+        type = with lib.types; attrsOf (submodule {
+          options = {
+            source = lib.mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path of the source file.";
+            };
+          };
+        });
+        default = { };
+        example = lib.literalExpression ''
+          {
+            "/EFI/BOOT/BOOTX64.EFI".source =
+              "''${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
+
+            "/loader/entries/nixos.conf".source = systemdBootEntry;
+          }
+        '';
+        description = lib.mdDoc "The contents to end up in the filesystem image.";
+      };
+
+      repartConfig = lib.mkOption {
+        type = with lib.types; attrsOf (oneOf [ str int bool ]);
+        example = {
+          Type = "home";
+          SizeMinBytes = "512M";
+          SizeMaxBytes = "2G";
+        };
+        description = lib.mdDoc ''
+          Specify the repart options for a partiton as a structural setting.
+          See <https://www.freedesktop.org/software/systemd/man/repart.d.html>
+          for all available options.
+        '';
+      };
+    };
+  };
+in
+{
+  options.image.repart = {
+
+    name = lib.mkOption {
+      type = lib.types.str;
+      description = lib.mdDoc "The name of the image.";
+    };
+
+    seed = lib.mkOption {
+      type = with lib.types; nullOr str;
+      # Generated with `uuidgen`. Random but fixed to improve reproducibility.
+      default = "0867da16-f251-457d-a9e8-c31f9a3c220b";
+      description = lib.mdDoc ''
+        A UUID to use as a seed. You can set this to `null` to explicitly
+        randomize the partition UUIDs.
+      '';
+    };
+
+    split = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables generation of split artifacts from partitions. If enabled, for
+        each partition with SplitName= set, a separate output file containing
+        just the contents of that partition is generated.
+      '';
+    };
+
+    package = lib.mkPackageOption pkgs "systemd-repart" {
+      default = "systemd";
+      example = "pkgs.systemdMinimal.override { withCryptsetup = true; }";
+    };
+
+    partitions = lib.mkOption {
+      type = with lib.types; attrsOf (submodule partitionOptions);
+      default = { };
+      example = lib.literalExpression ''
+        {
+          "10-esp" = {
+            contents = {
+              "/EFI/BOOT/BOOTX64.EFI".source =
+                "''${pkgs.systemd}/lib/systemd/boot/efi/systemd-bootx64.efi";
+            }
+            repartConfig = {
+              Type = "esp";
+              Format = "fat";
+            };
+          };
+          "20-root" = {
+            storePaths = [ config.system.build.toplevel ];
+            repartConfig = {
+              Type = "root";
+              Format = "ext4";
+              Minimize = "guess";
+            };
+          };
+        };
+      '';
+      description = lib.mdDoc ''
+        Specify partitions as a set of the names of the partitions with their
+        configuration as the key.
+      '';
+    };
+
+  };
+
+  config = {
+
+    system.build.image =
+      let
+        fileSystemToolMapping = with pkgs; {
+          "vfat" = [ dosfstools mtools ];
+          "ext4" = [ e2fsprogs.bin ];
+          "squashfs" = [ squashfsTools ];
+          "erofs" = [ erofs-utils ];
+          "btrfs" = [ btrfs-progs ];
+          "xfs" = [ xfsprogs ];
+        };
+
+        fileSystems = lib.filter
+          (f: f != null)
+          (lib.mapAttrsToList (_n: v: v.repartConfig.Format or null) cfg.partitions);
+
+        fileSystemTools = builtins.concatMap (f: fileSystemToolMapping."${f}") fileSystems;
+
+
+        makeClosure = paths: pkgs.closureInfo { rootPaths = paths; };
+
+        # Add the closure of the provided Nix store paths to cfg.partitions so
+        # that amend-repart-definitions.py can read it.
+        addClosure = _name: partitionConfig: partitionConfig // (
+          lib.optionalAttrs
+            (partitionConfig.storePaths or [ ] != [ ])
+            { closure = "${makeClosure partitionConfig.storePaths}/store-paths"; }
+        );
+
+
+        finalPartitions = lib.mapAttrs addClosure cfg.partitions;
+
+
+        amendRepartDefinitions = pkgs.runCommand "amend-repart-definitions.py"
+          {
+            nativeBuildInputs = with pkgs; [ black ruff mypy ];
+            buildInputs = [ pkgs.python3 ];
+          } ''
+          install ${./amend-repart-definitions.py} $out
+          patchShebangs --host $out
+
+          black --check --diff $out
+          ruff --line-length 88 $out
+          mypy --strict $out
+        '';
+
+        format = pkgs.formats.ini { };
+
+        definitionsDirectory = utils.systemdUtils.lib.definitions
+          "repart.d"
+          format
+          (lib.mapAttrs (_n: v: { Partition = v.repartConfig; }) finalPartitions);
+
+        partitions = pkgs.writeText "partitions.json" (builtins.toJSON finalPartitions);
+      in
+      pkgs.runCommand cfg.name
+        {
+          nativeBuildInputs = [
+            cfg.package
+            pkgs.fakeroot
+            pkgs.util-linux
+          ] ++ fileSystemTools;
+        } ''
+        amendedRepartDefinitions=$(${amendRepartDefinitions} ${partitions} ${definitionsDirectory})
+
+        mkdir -p $out
+        cd $out
+
+        unshare --map-root-user fakeroot systemd-repart \
+          --dry-run=no \
+          --empty=create \
+          --size=auto \
+          --seed="${cfg.seed}" \
+          --definitions="$amendedRepartDefinitions" \
+          --split="${lib.boolToString cfg.split}" \
+          --json=pretty \
+          image.raw \
+          | tee repart-output.json
+      '';
+
+    meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/channel.nix b/nixpkgs/nixos/modules/installer/cd-dvd/channel.nix
new file mode 100644
index 000000000000..bc70dc985fe0
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/channel.nix
@@ -0,0 +1,64 @@
+# Provide an initial copy of the NixOS channel so that the user
+# doesn't need to run "nix-channel --update" first.
+
+{ config, lib, pkgs, ... }:
+
+let
+  # This is copied into the installer image, so it's important that it is filtered
+  # to avoid including a large .git directory.
+  # We also want the source name to be normalised to "source" to avoid depending on the
+  # location of nixpkgs.
+  # In the future we might want to expose the ISO image from the flake and use
+  # `self.outPath` directly instead.
+  nixpkgs = lib.cleanSource pkgs.path;
+
+  # We need a copy of the Nix expressions for Nixpkgs and NixOS on the
+  # CD.  These are installed into the "nixos" channel of the root
+  # user, as expected by nixos-rebuild/nixos-install. FIXME: merge
+  # with make-channel.nix.
+  channelSources = pkgs.runCommand "nixos-${config.system.nixos.version}"
+    { preferLocalBuild = true; }
+    ''
+      mkdir -p $out
+      cp -prd ${nixpkgs.outPath} $out/nixos
+      chmod -R u+w $out/nixos
+      if [ ! -e $out/nixos/nixpkgs ]; then
+        ln -s . $out/nixos/nixpkgs
+      fi
+      ${lib.optionalString (config.system.nixos.revision != null) ''
+        echo -n ${config.system.nixos.revision} > $out/nixos/.git-revision
+      ''}
+      echo -n ${config.system.nixos.versionSuffix} > $out/nixos/.version-suffix
+      echo ${config.system.nixos.versionSuffix} | sed -e s/pre// > $out/nixos/svn-revision
+    '';
+in
+
+{
+  options.system.installer.channel.enable = (lib.mkEnableOption "bundling NixOS/Nixpkgs channel in the installer") // { default = true; };
+  config = lib.mkIf config.system.installer.channel.enable {
+    # Pin the nixpkgs flake in the installer to our cleaned up nixpkgs source.
+    # FIXME: this might be surprising and is really only needed for offline installations,
+    # see discussion in https://github.com/NixOS/nixpkgs/pull/204178#issuecomment-1336289021
+    nix.registry.nixpkgs.to = {
+      type = "path";
+      path = "${channelSources}/nixos";
+    };
+
+    # Provide the NixOS/Nixpkgs sources in /etc/nixos.  This is required
+    # for nixos-install.
+    boot.postBootCommands = lib.mkAfter
+      ''
+        if ! [ -e /var/lib/nixos/did-channel-init ]; then
+          echo "unpacking the NixOS/Nixpkgs sources..."
+          mkdir -p /nix/var/nix/profiles/per-user/root
+          ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/per-user/root/channels \
+            -i ${channelSources} --quiet --option build-use-substitutes false \
+            ${lib.optionalString config.boot.initrd.systemd.enable "--option sandbox false"} # There's an issue with pivot_root
+          mkdir -m 0700 -p /root/.nix-defexpr
+          ln -s /nix/var/nix/profiles/per-user/root/channels /root/.nix-defexpr/channels
+          mkdir -m 0755 -p /var/lib/nixos
+          touch /var/lib/nixos/did-channel-init
+        fi
+      '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-base.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-base.nix
new file mode 100644
index 000000000000..3f92b779d60a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-base.nix
@@ -0,0 +1,50 @@
+# This module contains the basic configuration for building a NixOS
+# installation CD.
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ./iso-image.nix
+
+      # Profiles of this basic installation CD.
+      ../../profiles/all-hardware.nix
+      ../../profiles/base.nix
+      ../../profiles/installation-device.nix
+    ];
+
+  # Adds terminus_font for people with HiDPI displays
+  console.packages = options.console.packages.default ++ [ pkgs.terminus_font ];
+
+  # ISO naming.
+  isoImage.isoName = "${config.isoImage.isoBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.iso";
+
+  # EFI booting
+  isoImage.makeEfiBootable = true;
+
+  # USB booting
+  isoImage.makeUsbBootable = true;
+
+  # Add Memtest86+ to the CD.
+  boot.loader.grub.memtest86.enable = true;
+
+  # An installation media cannot tolerate a host config defined file
+  # system layout on a fresh machine, before it has been formatted.
+  swapDevices = mkImageMediaOverride [ ];
+  fileSystems = mkImageMediaOverride config.lib.isoFileSystems;
+
+  boot.postBootCommands = ''
+    for o in $(</proc/cmdline); do
+      case "$o" in
+        live.nixos.passwd=*)
+          set -- $(IFS==; echo $o)
+          echo "nixos:$2" | ${pkgs.shadow}/bin/chpasswd
+          ;;
+      esac
+    done
+  '';
+
+  system.stateVersion = lib.mkDefault lib.trivial.release;
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
new file mode 100644
index 000000000000..4a00c52916f6
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-base.nix
@@ -0,0 +1,69 @@
+# This module contains the basic configuration for building a graphical NixOS
+# installation CD.
+
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [ ./installation-cd-base.nix ];
+
+  # Whitelist wheel users to do anything
+  # This is useful for things like pkexec
+  #
+  # WARNING: this is dangerous for systems
+  # outside the installation-cd and shouldn't
+  # be used anywhere else.
+  security.polkit.extraConfig = ''
+    polkit.addRule(function(action, subject) {
+      if (subject.isInGroup("wheel")) {
+        return polkit.Result.YES;
+      }
+    });
+  '';
+
+  services.xserver.enable = true;
+
+  # Provide networkmanager for easy wireless configuration.
+  networking.networkmanager.enable = true;
+  networking.wireless.enable = mkImageMediaOverride false;
+
+  # KDE complains if power management is disabled (to be precise, if
+  # there is no power management backend such as upower).
+  powerManagement.enable = true;
+
+  # Enable sound in graphical iso's.
+  hardware.pulseaudio.enable = true;
+
+  # VM guest additions to improve host-guest interaction
+  services.spice-vdagentd.enable = true;
+  services.qemuGuest.enable = true;
+  virtualisation.vmware.guest.enable = pkgs.stdenv.hostPlatform.isx86;
+  virtualisation.hypervGuest.enable = true;
+  services.xe-guest-utilities.enable = pkgs.stdenv.hostPlatform.isx86;
+  # The VirtualBox guest additions rely on an out-of-tree kernel module
+  # which lags behind kernel releases, potentially causing broken builds.
+  virtualisation.virtualbox.guest.enable = false;
+
+  # Enable plymouth
+  boot.plymouth.enable = true;
+
+  environment.defaultPackages = with pkgs; [
+    # Include gparted for partitioning disks.
+    gparted
+
+    # Include some editors.
+    vim
+    nano
+
+    # Include some version control tools.
+    git
+    rsync
+
+    # Firefox for reading the manual.
+    firefox
+
+    glxinfo
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix
new file mode 100644
index 000000000000..d1a4c27432c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix
@@ -0,0 +1,54 @@
+# This module defines a NixOS installation CD that contains GNOME.
+
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-graphical-calamares.nix ];
+
+  isoImage.edition = "gnome";
+
+  services.xserver.desktopManager.gnome = {
+    # Add Firefox and other tools useful for installation to the launcher
+    favoriteAppsOverride = ''
+      [org.gnome.shell]
+      favorite-apps=[ 'firefox.desktop', 'nixos-manual.desktop', 'org.gnome.Console.desktop', 'org.gnome.Nautilus.desktop', 'gparted.desktop', 'io.calamares.calamares.desktop' ]
+    '';
+
+    # Override GNOME defaults to disable GNOME tour and disable suspend
+    extraGSettingsOverrides = ''
+      [org.gnome.shell]
+      welcome-dialog-last-shown-version='9999999999'
+      [org.gnome.desktop.session]
+      idle-delay=0
+      [org.gnome.settings-daemon.plugins.power]
+      sleep-inactive-ac-type='nothing'
+      sleep-inactive-battery-type='nothing'
+    '';
+
+    extraGSettingsOverridePackages = [ pkgs.gnome.gnome-settings-daemon ];
+
+    enable = true;
+  };
+
+  # Fix scaling for calamares on wayland
+  environment.variables = {
+    QT_QPA_PLATFORM = "$([[ $XDG_SESSION_TYPE = \"wayland\" ]] && echo \"wayland\")";
+  };
+
+  services.xserver.displayManager = {
+    gdm = {
+      enable = true;
+      # autoSuspend makes the machine automatically suspend after inactivity.
+      # It's possible someone could/try to ssh'd into the machine and obviously
+      # have issues because it's inactive.
+      # See:
+      # * https://github.com/NixOS/nixpkgs/pull/63790
+      # * https://gitlab.gnome.org/GNOME/gnome-control-center/issues/22
+      autoSuspend = false;
+    };
+    autoLogin = {
+      enable = true;
+      user = "nixos";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma5.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma5.nix
new file mode 100644
index 000000000000..a4c46d58c85a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma5.nix
@@ -0,0 +1,49 @@
+# This module defines a NixOS installation CD that contains X11 and
+# Plasma 5.
+
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-graphical-calamares.nix ];
+
+  isoImage.edition = "plasma5";
+
+  services.xserver = {
+    desktopManager.plasma5 = {
+      enable = true;
+    };
+
+    # Automatically login as nixos.
+    displayManager = {
+      sddm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "nixos";
+      };
+    };
+  };
+
+  environment.systemPackages = with pkgs; [
+    # Graphical text editor
+    kate
+  ];
+
+  system.activationScripts.installerDesktop = let
+
+    # Comes from documentation.nix when xserver and nixos.enable are true.
+    manualDesktopFile = "/run/current-system/sw/share/applications/nixos-manual.desktop";
+
+    homeDir = "/home/nixos/";
+    desktopDir = homeDir + "Desktop/";
+
+  in ''
+    mkdir -p ${desktopDir}
+    chown nixos ${homeDir} ${desktopDir}
+
+    ln -sfT ${manualDesktopFile} ${desktopDir + "nixos-manual.desktop"}
+    ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop ${desktopDir + "gparted.desktop"}
+    ln -sfT ${pkgs.konsole}/share/applications/org.kde.konsole.desktop ${desktopDir + "org.kde.konsole.desktop"}
+    ln -sfT ${pkgs.calamares-nixos}/share/applications/io.calamares.calamares.desktop ${desktopDir + "io.calamares.calamares.desktop"}
+  '';
+
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares.nix
new file mode 100644
index 000000000000..3f3571d25382
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-calamares.nix
@@ -0,0 +1,23 @@
+# This module adds the calamares installer to the basic graphical NixOS
+# installation CD.
+
+{ pkgs, ... }:
+let
+  calamares-nixos-autostart = pkgs.makeAutostartItem { name = "io.calamares.calamares"; package = pkgs.calamares-nixos; };
+in
+{
+  imports = [ ./installation-cd-graphical-base.nix ];
+
+  environment.systemPackages = with pkgs; [
+    # Calamares for graphical installation
+    libsForQt5.kpmcore
+    calamares-nixos
+    calamares-nixos-autostart
+    calamares-nixos-extensions
+    # Get list of locales
+    glibcLocales
+  ];
+
+  # Support choosing from any locale
+  i18n.supportedLocales = [ "all" ];
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
new file mode 100644
index 000000000000..573b31b439c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-gnome.nix
@@ -0,0 +1,36 @@
+# This module defines a NixOS installation CD that contains GNOME.
+
+{ ... }:
+
+{
+  imports = [ ./installation-cd-graphical-base.nix ];
+
+  isoImage.edition = "gnome";
+
+  services.xserver.desktopManager.gnome = {
+    # Add Firefox and other tools useful for installation to the launcher
+    favoriteAppsOverride = ''
+      [org.gnome.shell]
+      favorite-apps=[ 'firefox.desktop', 'nixos-manual.desktop', 'org.gnome.Terminal.desktop', 'org.gnome.Nautilus.desktop', 'gparted.desktop' ]
+    '';
+    enable = true;
+  };
+
+  services.xserver.displayManager = {
+    gdm = {
+      enable = true;
+      # autoSuspend makes the machine automatically suspend after inactivity.
+      # It's possible someone could/try to ssh'd into the machine and obviously
+      # have issues because it's inactive.
+      # See:
+      # * https://github.com/NixOS/nixpkgs/pull/63790
+      # * https://gitlab.gnome.org/GNOME/gnome-control-center/issues/22
+      autoSuspend = false;
+    };
+    autoLogin = {
+      enable = true;
+      user = "nixos";
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix
new file mode 100644
index 000000000000..d98325a99ac2
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5-new-kernel.nix
@@ -0,0 +1,7 @@
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-graphical-plasma5.nix ];
+
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix
new file mode 100644
index 000000000000..5c7617c9f8c1
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-graphical-plasma5.nix
@@ -0,0 +1,48 @@
+# This module defines a NixOS installation CD that contains X11 and
+# Plasma 5.
+
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-graphical-base.nix ];
+
+  isoImage.edition = "plasma5";
+
+  services.xserver = {
+    desktopManager.plasma5 = {
+      enable = true;
+    };
+
+    # Automatically login as nixos.
+    displayManager = {
+      sddm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "nixos";
+      };
+    };
+  };
+
+  environment.systemPackages = with pkgs; [
+    # Graphical text editor
+    kate
+  ];
+
+  system.activationScripts.installerDesktop = let
+
+    # Comes from documentation.nix when xserver and nixos.enable are true.
+    manualDesktopFile = "/run/current-system/sw/share/applications/nixos-manual.desktop";
+
+    homeDir = "/home/nixos/";
+    desktopDir = homeDir + "Desktop/";
+
+  in ''
+    mkdir -p ${desktopDir}
+    chown nixos ${homeDir} ${desktopDir}
+
+    ln -sfT ${manualDesktopFile} ${desktopDir + "nixos-manual.desktop"}
+    ln -sfT ${pkgs.gparted}/share/applications/gparted.desktop ${desktopDir + "gparted.desktop"}
+    ln -sfT ${pkgs.konsole}/share/applications/org.kde.konsole.desktop ${desktopDir + "org.kde.konsole.desktop"}
+  '';
+
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix
new file mode 100644
index 000000000000..9d09cdbe0206
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix
@@ -0,0 +1,15 @@
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-minimal-new-kernel.nix ];
+
+  # Makes `availableOn` fail for zfs, see <nixos/modules/profiles/base.nix>.
+  # This is a workaround since we cannot remove the `"zfs"` string from `supportedFilesystems`.
+  # The proper fix would be to make `supportedFilesystems` an attrset with true/false which we
+  # could then `lib.mkForce false`
+  nixpkgs.overlays = [(final: super: {
+    zfs = super.zfs.overrideAttrs(_: {
+      meta.platforms = [];
+    });
+  })];
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel.nix
new file mode 100644
index 000000000000..3911a2b01b1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal-new-kernel.nix
@@ -0,0 +1,7 @@
+{ pkgs, ... }:
+
+{
+  imports = [ ./installation-cd-minimal.nix ];
+
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix
new file mode 100644
index 000000000000..29afdd471091
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/installation-cd-minimal.nix
@@ -0,0 +1,24 @@
+# This module defines a small NixOS installation CD.  It does not
+# contain any graphical stuff.
+
+{ lib, ... }:
+
+{
+  imports = [
+    ../../profiles/minimal.nix
+    ./installation-cd-base.nix
+  ];
+
+  # Causes a lot of uncached builds for a negligible decrease in size.
+  environment.noXlibs = lib.mkOverride 500 false;
+
+  documentation.man.enable = lib.mkOverride 500 true;
+
+  # Although we don't really need HTML documentation in the minimal installer,
+  # not including it may cause annoying cache misses in the case of the NixOS manual.
+  documentation.doc.enable = lib.mkOverride 500 true;
+
+  fonts.fontconfig.enable = lib.mkForce false;
+
+  isoImage.edition = lib.mkForce "minimal";
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/iso-image.nix b/nixpkgs/nixos/modules/installer/cd-dvd/iso-image.nix
new file mode 100644
index 000000000000..0b5135c088ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/iso-image.nix
@@ -0,0 +1,906 @@
+# This module creates a bootable ISO image containing the given NixOS
+# configuration.  The derivation for the ISO image will be placed in
+# config.system.build.isoImage.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  /**
+   * Given a list of `options`, concats the result of mapping each options
+   * to a menuentry for use in grub.
+   *
+   *  * defaults: {name, image, params, initrd}
+   *  * options: [ option... ]
+   *  * option: {name, params, class}
+   */
+  menuBuilderGrub2 =
+  defaults: options: lib.concatStrings
+    (
+      map
+      (option: ''
+        menuentry '${defaults.name} ${
+        # Name appended to menuentry defaults to params if no specific name given.
+        option.name or (optionalString (option ? params) "(${option.params})")
+        }' ${optionalString (option ? class) " --class ${option.class}"} {
+          # Fallback to UEFI console for boot, efifb sometimes has difficulties.
+          terminal_output console
+
+          linux ${defaults.image} \''${isoboot} ${defaults.params} ${
+            option.params or ""
+          }
+          initrd ${defaults.initrd}
+        }
+      '')
+      options
+    )
+  ;
+
+  /**
+   * Builds the default options.
+   */
+  buildMenuGrub2 = buildMenuAdditionalParamsGrub2 "";
+
+  targetArch =
+    if config.boot.loader.grub.forcei686 then
+      "ia32"
+    else
+      pkgs.stdenv.hostPlatform.efiArch;
+
+  /**
+   * Given params to add to `params`, build a set of default options.
+   * Use this one when creating a variant (e.g. hidpi)
+   */
+  buildMenuAdditionalParamsGrub2 = additional:
+  let
+    finalCfg = {
+      name = "${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel}";
+      params = "init=${config.system.build.toplevel}/init ${additional} ${toString config.boot.kernelParams}";
+      image = "/boot/${config.system.boot.loader.kernelFile}";
+      initrd = "/boot/initrd";
+    };
+
+  in
+    menuBuilderGrub2
+    finalCfg
+    [
+      { class = "installer"; }
+      { class = "nomodeset"; params = "nomodeset"; }
+      { class = "copytoram"; params = "copytoram"; }
+      { class = "debug";     params = "debug"; }
+    ]
+  ;
+
+  # Timeout in syslinux is in units of 1/10 of a second.
+  # null means max timeout (35996, just under 1h in 1/10 seconds)
+  # 0 means disable timeout
+  syslinuxTimeout = if config.boot.loader.timeout == null then
+      35996
+    else
+      config.boot.loader.timeout * 10;
+
+  # Timeout in grub is in seconds.
+  # null means max timeout (infinity)
+  # 0 means disable timeout
+  grubEfiTimeout = if config.boot.loader.timeout == null then
+      -1
+    else
+      config.boot.loader.timeout;
+
+  # The configuration file for syslinux.
+
+  # Notes on syslinux configuration and UNetbootin compatibility:
+  #   * Do not use '/syslinux/syslinux.cfg' as the path for this
+  #     configuration. UNetbootin will not parse the file and use it as-is.
+  #     This results in a broken configuration if the partition label does
+  #     not match the specified config.isoImage.volumeID. For this reason
+  #     we're using '/isolinux/isolinux.cfg'.
+  #   * Use APPEND instead of adding command-line arguments directly after
+  #     the LINUX entries.
+  #   * COM32 entries (chainload, reboot, poweroff) are not recognized. They
+  #     result in incorrect boot entries.
+
+  baseIsolinuxCfg = ''
+    SERIAL 0 115200
+    TIMEOUT ${builtins.toString syslinuxTimeout}
+    UI vesamenu.c32
+    MENU BACKGROUND /isolinux/background.png
+
+    ${config.isoImage.syslinuxTheme}
+
+    DEFAULT boot
+
+    LABEL boot
+    MENU LABEL ${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel}
+    LINUX /boot/${config.system.boot.loader.kernelFile}
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
+    INITRD /boot/${config.system.boot.loader.initrdFile}
+
+    # A variant to boot with 'nomodeset'
+    LABEL boot-nomodeset
+    MENU LABEL ${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (nomodeset)
+    LINUX /boot/${config.system.boot.loader.kernelFile}
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} nomodeset
+    INITRD /boot/${config.system.boot.loader.initrdFile}
+
+    # A variant to boot with 'copytoram'
+    LABEL boot-copytoram
+    MENU LABEL ${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (copytoram)
+    LINUX /boot/${config.system.boot.loader.kernelFile}
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} copytoram
+    INITRD /boot/${config.system.boot.loader.initrdFile}
+
+    # A variant to boot with verbose logging to the console
+    LABEL boot-debug
+    MENU LABEL ${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (debug)
+    LINUX /boot/${config.system.boot.loader.kernelFile}
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} loglevel=7
+    INITRD /boot/${config.system.boot.loader.initrdFile}
+
+    # A variant to boot with a serial console enabled
+    LABEL boot-serial
+    MENU LABEL ${config.isoImage.prependToMenuLabel}${config.system.nixos.distroName} ${config.system.nixos.label}${config.isoImage.appendToMenuLabel} (serial console=ttyS0,115200n8)
+    LINUX /boot/${config.system.boot.loader.kernelFile}
+    APPEND init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams} console=ttyS0,115200n8
+    INITRD /boot/${config.system.boot.loader.initrdFile}
+  '';
+
+  isolinuxMemtest86Entry = ''
+    LABEL memtest
+    MENU LABEL Memtest86+
+    LINUX /boot/memtest.bin
+    APPEND ${toString config.boot.loader.grub.memtest86.params}
+  '';
+
+  isolinuxCfg = concatStringsSep "\n"
+    ([ baseIsolinuxCfg ] ++ optional config.boot.loader.grub.memtest86.enable isolinuxMemtest86Entry);
+
+  refindBinary = if targetArch == "x64" || targetArch == "aa64" then "refind_${targetArch}.efi" else null;
+
+  # Setup instructions for rEFInd.
+  refind =
+    if refindBinary != null then
+      ''
+      # Adds rEFInd to the ISO.
+      cp -v ${pkgs.refind}/share/refind/${refindBinary} $out/EFI/boot/
+      ''
+    else
+      "# No refind for ${targetArch}"
+  ;
+
+  grubPkgs = if config.boot.loader.grub.forcei686 then pkgs.pkgsi686Linux else pkgs;
+
+  grubMenuCfg = ''
+    #
+    # Menu configuration
+    #
+
+    # Search using a "marker file"
+    search --set=root --file /EFI/nixos-installer-image
+
+    insmod gfxterm
+    insmod png
+    set gfxpayload=keep
+    set gfxmode=${concatStringsSep "," [
+      # GRUB will use the first valid mode listed here.
+      # `auto` will sometimes choose the smallest valid mode it detects.
+      # So instead we'll list a lot of possibly valid modes :/
+      #"3840x2160"
+      #"2560x1440"
+      "1920x1200"
+      "1920x1080"
+      "1366x768"
+      "1280x800"
+      "1280x720"
+      "1200x1920"
+      "1024x768"
+      "800x1280"
+      "800x600"
+      "auto"
+    ]}
+
+    if [ "\$textmode" == "false" ]; then
+      terminal_output gfxterm
+      terminal_input  console
+    else
+      terminal_output console
+      terminal_input  console
+      # Sets colors for console term.
+      set menu_color_normal=cyan/blue
+      set menu_color_highlight=white/blue
+    fi
+
+    ${ # When there is a theme configured, use it, otherwise use the background image.
+    if config.isoImage.grubTheme != null then ''
+      # Sets theme.
+      set theme=(\$root)/EFI/boot/grub-theme/theme.txt
+      # Load theme fonts
+      $(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
+    '' else ''
+      if background_image (\$root)/EFI/boot/efi-background.png; then
+        # Black background means transparent background when there
+        # is a background image set... This seems undocumented :(
+        set color_normal=black/black
+        set color_highlight=white/blue
+      else
+        # Falls back again to proper colors.
+        set menu_color_normal=cyan/blue
+        set menu_color_highlight=white/blue
+      fi
+    ''}
+  '';
+
+  # The EFI boot image.
+  # Notes about grub:
+  #  * Yes, the grubMenuCfg has to be repeated in all submenus. Otherwise you
+  #    will get white-on-black console-like text on sub-menus. *sigh*
+  efiDir = pkgs.runCommand "efi-directory" {
+    nativeBuildInputs = [ pkgs.buildPackages.grub2_efi ];
+    strictDeps = true;
+  } ''
+    mkdir -p $out/EFI/boot/
+
+    # Add a marker so GRUB can find the filesystem.
+    touch $out/EFI/nixos-installer-image
+
+    # ALWAYS required modules.
+    MODULES=(
+      # Basic modules for filesystems and partition schemes
+      "fat"
+      "iso9660"
+      "part_gpt"
+      "part_msdos"
+
+      # Basic stuff
+      "normal"
+      "boot"
+      "linux"
+      "configfile"
+      "loopback"
+      "chain"
+      "halt"
+
+      # Allows rebooting into firmware setup interface
+      "efifwsetup"
+
+      # EFI Graphics Output Protocol
+      "efi_gop"
+
+      # User commands
+      "ls"
+
+      # System commands
+      "search"
+      "search_label"
+      "search_fs_uuid"
+      "search_fs_file"
+      "echo"
+
+      # We're not using it anymore, but we'll leave it in so it can be used
+      # by user, with the console using "C"
+      "serial"
+
+      # Graphical mode stuff
+      "gfxmenu"
+      "gfxterm"
+      "gfxterm_background"
+      "gfxterm_menu"
+      "test"
+      "loadenv"
+      "all_video"
+      "videoinfo"
+
+      # File types for graphical mode
+      "png"
+    )
+
+    echo "Building GRUB with modules:"
+    for mod in ''${MODULES[@]}; do
+      echo " - $mod"
+    done
+
+    # Modules that may or may not be available per-platform.
+    echo "Adding additional modules:"
+    for mod in efi_uga; do
+      if [ -f ${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget}/$mod.mod ]; then
+        echo " - $mod"
+        MODULES+=("$mod")
+      fi
+    done
+
+    # Make our own efi program, we can't rely on "grub-install" since it seems to
+    # probe for devices, even with --skip-fs-probe.
+    grub-mkimage \
+      --directory=${grubPkgs.grub2_efi}/lib/grub/${grubPkgs.grub2_efi.grubTarget} \
+      -o $out/EFI/boot/boot${targetArch}.efi \
+      -p /EFI/boot \
+      -O ${grubPkgs.grub2_efi.grubTarget} \
+      ''${MODULES[@]}
+    cp ${grubPkgs.grub2_efi}/share/grub/unicode.pf2 $out/EFI/boot/
+
+    cat <<EOF > $out/EFI/boot/grub.cfg
+
+    set textmode=${boolToString (config.isoImage.forceTextMode)}
+    set timeout=${toString grubEfiTimeout}
+
+    clear
+    # This message will only be viewable on the default (UEFI) console.
+    echo ""
+    echo "Loading graphical boot menu..."
+    echo ""
+    echo "Press 't' to use the text boot menu on this console..."
+    echo ""
+
+    ${grubMenuCfg}
+
+    hiddenentry 'Text mode' --hotkey 't' {
+      loadfont (\$root)/EFI/boot/unicode.pf2
+      set textmode=true
+      terminal_output console
+    }
+    hiddenentry 'GUI mode' --hotkey 'g' {
+      $(find ${config.isoImage.grubTheme} -iname '*.pf2' -printf "loadfont (\$root)/EFI/boot/grub-theme/%P\n")
+      set textmode=false
+      terminal_output gfxterm
+    }
+
+
+    # If the parameter iso_path is set, append the findiso parameter to the kernel
+    # line. We need this to allow the nixos iso to be booted from grub directly.
+    if [ \''${iso_path} ] ; then
+      set isoboot="findiso=\''${iso_path}"
+    fi
+
+    #
+    # Menu entries
+    #
+
+    ${buildMenuGrub2}
+    submenu "HiDPI, Quirks and Accessibility" --class hidpi --class submenu {
+      ${grubMenuCfg}
+      submenu "Suggests resolution @720p" --class hidpi-720p {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "video=1280x720@60"}
+      }
+      submenu "Suggests resolution @1080p" --class hidpi-1080p {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "video=1920x1080@60"}
+      }
+
+      # If we boot into a graphical environment where X is autoran
+      # and always crashes, it makes the media unusable. Allow the user
+      # to disable this.
+      submenu "Disable display-manager" --class quirk-disable-displaymanager {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "systemd.mask=display-manager.service"}
+      }
+
+      # Some laptop and convertibles have the panel installed in an
+      # inconvenient way, rotated away from the keyboard.
+      # Those entries makes it easier to use the installer.
+      submenu "" {return}
+      submenu "Rotate framebuffer Clockwise" --class rotate-90cw {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "fbcon=rotate:1"}
+      }
+      submenu "Rotate framebuffer Upside-Down" --class rotate-180 {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "fbcon=rotate:2"}
+      }
+      submenu "Rotate framebuffer Counter-Clockwise" --class rotate-90ccw {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "fbcon=rotate:3"}
+      }
+
+      # As a proof of concept, mainly. (Not sure it has accessibility merits.)
+      submenu "" {return}
+      submenu "Use black on white" --class accessibility-blakconwhite {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "vt.default_red=0xFF,0xBC,0x4F,0xB4,0x56,0xBC,0x4F,0x00,0xA1,0xCF,0x84,0xCA,0x8D,0xB4,0x84,0x68 vt.default_grn=0xFF,0x55,0xBA,0xBA,0x4D,0x4D,0xB3,0x00,0xA0,0x8F,0xB3,0xCA,0x88,0x93,0xA4,0x68 vt.default_blu=0xFF,0x58,0x5F,0x58,0xC5,0xBD,0xC5,0x00,0xA8,0xBB,0xAB,0x97,0xBD,0xC7,0xC5,0x68"}
+      }
+
+      # Serial access is a must!
+      submenu "" {return}
+      submenu "Serial console=ttyS0,115200n8" --class serial {
+        ${grubMenuCfg}
+        ${buildMenuAdditionalParamsGrub2 "console=ttyS0,115200n8"}
+      }
+    }
+
+    ${lib.optionalString (refindBinary != null) ''
+    # GRUB apparently cannot do "chainloader" operations on "CD".
+    if [ "\$root" != "cd0" ]; then
+      menuentry 'rEFInd' --class refind {
+        # Force root to be the FAT partition
+        # Otherwise it breaks rEFInd's boot
+        search --set=root --no-floppy --fs-uuid 1234-5678
+        chainloader (\$root)/EFI/boot/${refindBinary}
+      }
+    fi
+    ''}
+    menuentry 'Firmware Setup' --class settings {
+      fwsetup
+      clear
+      echo ""
+      echo "If you see this message, your EFI system doesn't support this feature."
+      echo ""
+    }
+    menuentry 'Shutdown' --class shutdown {
+      halt
+    }
+    EOF
+
+    grub-script-check $out/EFI/boot/grub.cfg
+
+    ${refind}
+  '';
+
+  efiImg = pkgs.runCommand "efi-image_eltorito" {
+    nativeBuildInputs = [ pkgs.buildPackages.mtools pkgs.buildPackages.libfaketime pkgs.buildPackages.dosfstools ];
+    strictDeps = true;
+  }
+    # Be careful about determinism: du --apparent-size,
+    #   dates (cp -p, touch, mcopy -m, faketime for label), IDs (mkfs.vfat -i)
+    ''
+      mkdir ./contents && cd ./contents
+      mkdir -p ./EFI/boot
+      cp -rp "${efiDir}"/EFI/boot/{grub.cfg,*.efi} ./EFI/boot
+
+      # Rewrite dates for everything in the FS
+      find . -exec touch --date=2000-01-01 {} +
+
+      # Round up to the nearest multiple of 1MB, for more deterministic du output
+      usage_size=$(( $(du -s --block-size=1M --apparent-size . | tr -cd '[:digit:]') * 1024 * 1024 ))
+      # Make the image 110% as big as the files need to make up for FAT overhead
+      image_size=$(( ($usage_size * 110) / 100 ))
+      # Make the image fit blocks of 1M
+      block_size=$((1024*1024))
+      image_size=$(( ($image_size / $block_size + 1) * $block_size ))
+      echo "Usage size: $usage_size"
+      echo "Image size: $image_size"
+      truncate --size=$image_size "$out"
+      mkfs.vfat --invariant -i 12345678 -n EFIBOOT "$out"
+
+      # Force a fixed order in mcopy for better determinism, and avoid file globbing
+      for d in $(find EFI -type d | sort); do
+        faketime "2000-01-01 00:00:00" mmd -i "$out" "::/$d"
+      done
+
+      for f in $(find EFI -type f | sort); do
+        mcopy -pvm -i "$out" "$f" "::/$f"
+      done
+
+      # Verify the FAT partition.
+      fsck.vfat -vn "$out"
+    ''; # */
+
+in
+
+{
+  options = {
+
+    isoImage.isoName = mkOption {
+      default = "${config.isoImage.isoBaseName}.iso";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Name of the generated ISO image file.
+      '';
+    };
+
+    isoImage.isoBaseName = mkOption {
+      default = config.system.nixos.distroId;
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Prefix of the name of the generated ISO image file.
+      '';
+    };
+
+    isoImage.compressImage = mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Whether the ISO image should be compressed using
+        {command}`zstd`.
+      '';
+    };
+
+    isoImage.squashfsCompression = mkOption {
+      default = with pkgs.stdenv.hostPlatform; "xz -Xdict-size 100% "
+                + lib.optionalString isx86 "-Xbcj x86"
+                # Untested but should also reduce size for these platforms
+                + lib.optionalString isAarch "-Xbcj arm"
+                + lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
+                + lib.optionalString (isSparc) "-Xbcj sparc";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Compression settings to use for the squashfs nix store.
+      '';
+      example = "zstd -Xcompression-level 6";
+    };
+
+    isoImage.edition = mkOption {
+      default = "";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Specifies which edition string to use in the volume ID of the generated
+        ISO image.
+      '';
+    };
+
+    isoImage.volumeID = mkOption {
+      # nixos-$EDITION-$RELEASE-$ARCH
+      default = "nixos${optionalString (config.isoImage.edition != "") "-${config.isoImage.edition}"}-${config.system.nixos.release}-${pkgs.stdenv.hostPlatform.uname.processor}";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Specifies the label or volume ID of the generated ISO image.
+        Note that the label is used by stage 1 of the boot process to
+        mount the CD, so it should be reasonably distinctive.
+      '';
+    };
+
+    isoImage.contents = mkOption {
+      example = literalExpression ''
+        [ { source = pkgs.memtest86 + "/memtest.bin";
+            target = "boot/memtest.bin";
+          }
+        ]
+      '';
+      description = lib.mdDoc ''
+        This option lists files to be copied to fixed locations in the
+        generated ISO image.
+      '';
+    };
+
+    isoImage.storeContents = mkOption {
+      example = literalExpression "[ pkgs.stdenv ]";
+      description = lib.mdDoc ''
+        This option lists additional derivations to be included in the
+        Nix store in the generated ISO image.
+      '';
+    };
+
+    isoImage.includeSystemBuildDependencies = mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Set this option to include all the needed sources etc in the
+        image. It significantly increases image size. Use that when
+        you want to be able to keep all the sources needed to build your
+        system or when you are going to install the system on a computer
+        with slow or non-existent network connection.
+      '';
+    };
+
+    isoImage.makeBiosBootable = mkOption {
+      # Before this option was introduced, images were BIOS-bootable if the
+      # hostPlatform was x86-based. This option is enabled by default for
+      # backwards compatibility.
+      #
+      # Also note that syslinux package currently cannot be cross-compiled from
+      # non-x86 platforms, so the default is false on non-x86 build platforms.
+      default = pkgs.stdenv.buildPlatform.isx86 && pkgs.stdenv.hostPlatform.isx86;
+      defaultText = lib.literalMD ''
+        `true` if both build and host platforms are x86-based architectures,
+        e.g. i686 and x86_64.
+      '';
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Whether the ISO image should be a BIOS-bootable disk.
+      '';
+    };
+
+    isoImage.makeEfiBootable = mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Whether the ISO image should be an EFI-bootable volume.
+      '';
+    };
+
+    isoImage.makeUsbBootable = mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Whether the ISO image should be bootable from CD as well as USB.
+      '';
+    };
+
+    isoImage.efiSplashImage = mkOption {
+      default = pkgs.fetchurl {
+          url = "https://raw.githubusercontent.com/NixOS/nixos-artwork/a9e05d7deb38a8e005a2b52575a3f59a63a4dba0/bootloader/efi-background.png";
+          sha256 = "18lfwmp8yq923322nlb9gxrh5qikj1wsk6g5qvdh31c4h5b1538x";
+        };
+      description = lib.mdDoc ''
+        The splash image to use in the EFI bootloader.
+      '';
+    };
+
+    isoImage.splashImage = mkOption {
+      default = pkgs.fetchurl {
+          url = "https://raw.githubusercontent.com/NixOS/nixos-artwork/a9e05d7deb38a8e005a2b52575a3f59a63a4dba0/bootloader/isolinux/bios-boot.png";
+          sha256 = "1wp822zrhbg4fgfbwkr7cbkr4labx477209agzc0hr6k62fr6rxd";
+        };
+      description = lib.mdDoc ''
+        The splash image to use in the legacy-boot bootloader.
+      '';
+    };
+
+    isoImage.grubTheme = mkOption {
+      default = pkgs.nixos-grub2-theme;
+      type = types.nullOr (types.either types.path types.package);
+      description = lib.mdDoc ''
+        The grub2 theme used for UEFI boot.
+      '';
+    };
+
+    isoImage.syslinuxTheme = mkOption {
+      default = ''
+        MENU TITLE ${config.system.nixos.distroName}
+        MENU RESOLUTION 800 600
+        MENU CLEAR
+        MENU ROWS 6
+        MENU CMDLINEROW -4
+        MENU TIMEOUTROW -3
+        MENU TABMSGROW  -2
+        MENU HELPMSGROW -1
+        MENU HELPMSGENDROW -1
+        MENU MARGIN 0
+
+        #                                FG:AARRGGBB  BG:AARRGGBB   shadow
+        MENU COLOR BORDER       30;44      #00000000    #00000000   none
+        MENU COLOR SCREEN       37;40      #FF000000    #00E2E8FF   none
+        MENU COLOR TABMSG       31;40      #80000000    #00000000   none
+        MENU COLOR TIMEOUT      1;37;40    #FF000000    #00000000   none
+        MENU COLOR TIMEOUT_MSG  37;40      #FF000000    #00000000   none
+        MENU COLOR CMDMARK      1;36;40    #FF000000    #00000000   none
+        MENU COLOR CMDLINE      37;40      #FF000000    #00000000   none
+        MENU COLOR TITLE        1;36;44    #00000000    #00000000   none
+        MENU COLOR UNSEL        37;44      #FF000000    #00000000   none
+        MENU COLOR SEL          7;37;40    #FFFFFFFF    #FF5277C3   std
+      '';
+      type = types.str;
+      description = lib.mdDoc ''
+        The syslinux theme used for BIOS boot.
+      '';
+    };
+
+    isoImage.prependToMenuLabel = mkOption {
+      default = "";
+      type = types.str;
+      example = "Install ";
+      description = lib.mdDoc ''
+        The string to prepend before the menu label for the NixOS system.
+        This will be directly prepended (without whitespace) to the NixOS version
+        string, like for example if it is set to `XXX`:
+
+        `XXXNixOS 99.99-pre666`
+      '';
+    };
+
+    isoImage.appendToMenuLabel = mkOption {
+      default = " Installer";
+      type = types.str;
+      example = " Live System";
+      description = lib.mdDoc ''
+        The string to append after the menu label for the NixOS system.
+        This will be directly appended (without whitespace) to the NixOS version
+        string, like for example if it is set to `XXX`:
+
+        `NixOS 99.99-pre666XXX`
+      '';
+    };
+
+    isoImage.forceTextMode = mkOption {
+      default = false;
+      type = types.bool;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to use text mode instead of graphical grub.
+        A value of `true` means graphical mode is not tried to be used.
+
+        This is useful for validating that graphics mode usage is not at the root cause of a problem with the iso image.
+
+        If text mode is required off-handedly (e.g. for serial use) you can use the `T` key, after being prompted, to use text mode for the current boot.
+      '';
+    };
+
+  };
+
+  # store them in lib so we can mkImageMediaOverride the
+  # entire file system layout in installation media (only)
+  config.lib.isoFileSystems = {
+    "/" = mkImageMediaOverride
+      {
+        fsType = "tmpfs";
+        options = [ "mode=0755" ];
+      };
+
+    # Note that /dev/root is a symlink to the actual root device
+    # specified on the kernel command line, created in the stage 1
+    # init script.
+    "/iso" = mkImageMediaOverride
+      { device = "/dev/root";
+        neededForBoot = true;
+        noCheck = true;
+      };
+
+    # In stage 1, mount a tmpfs on top of /nix/store (the squashfs
+    # image) to make this a live CD.
+    "/nix/.ro-store" = mkImageMediaOverride
+      { fsType = "squashfs";
+        device = "/iso/nix-store.squashfs";
+        options = [ "loop" ];
+        neededForBoot = true;
+      };
+
+    "/nix/.rw-store" = mkImageMediaOverride
+      { fsType = "tmpfs";
+        options = [ "mode=0755" ];
+        neededForBoot = true;
+      };
+
+    "/nix/store" = mkImageMediaOverride
+      { fsType = "overlay";
+        device = "overlay";
+        options = [
+          "lowerdir=/nix/.ro-store"
+          "upperdir=/nix/.rw-store/store"
+          "workdir=/nix/.rw-store/work"
+        ];
+        depends = [
+          "/nix/.ro-store"
+          "/nix/.rw-store/store"
+          "/nix/.rw-store/work"
+        ];
+      };
+  };
+
+  config = {
+    assertions = [
+      {
+        # Syslinux (and isolinux) only supports x86-based architectures.
+        assertion = config.isoImage.makeBiosBootable -> pkgs.stdenv.hostPlatform.isx86;
+        message = "BIOS boot is only supported on x86-based architectures.";
+      }
+      {
+        assertion = !(stringLength config.isoImage.volumeID > 32);
+        # https://wiki.osdev.org/ISO_9660#The_Primary_Volume_Descriptor
+        # Volume Identifier can only be 32 bytes
+        message = let
+          length = stringLength config.isoImage.volumeID;
+          howmany = toString length;
+          toomany = toString (length - 32);
+        in
+        "isoImage.volumeID ${config.isoImage.volumeID} is ${howmany} characters. That is ${toomany} characters longer than the limit of 32.";
+      }
+    ];
+
+    # Don't build the GRUB menu builder script, since we don't need it
+    # here and it causes a cyclic dependency.
+    boot.loader.grub.enable = false;
+
+    environment.systemPackages =  [ grubPkgs.grub2 grubPkgs.grub2_efi ]
+      ++ optional (config.isoImage.makeBiosBootable) pkgs.syslinux
+    ;
+
+    # In stage 1 of the boot, mount the CD as the root FS by label so
+    # that we don't need to know its device.  We pass the label of the
+    # root filesystem on the kernel command line, rather than in
+    # `fileSystems' below.  This allows CD-to-USB converters such as
+    # UNetbootin to rewrite the kernel command line to pass the label or
+    # UUID of the USB stick.  It would be nicer to write
+    # `root=/dev/disk/by-label/...' here, but UNetbootin doesn't
+    # recognise that.
+    boot.kernelParams =
+      [ "root=LABEL=${config.isoImage.volumeID}"
+        "boot.shell_on_fail"
+      ];
+
+    fileSystems = config.lib.isoFileSystems;
+
+    boot.initrd.availableKernelModules = [ "squashfs" "iso9660" "uas" "overlay" ];
+
+    boot.initrd.kernelModules = [ "loop" "overlay" ];
+
+    # Closures to be copied to the Nix store on the CD, namely the init
+    # script and the top-level system configuration directory.
+    isoImage.storeContents =
+      [ config.system.build.toplevel ] ++
+      optional config.isoImage.includeSystemBuildDependencies
+        config.system.build.toplevel.drvPath;
+
+    # Create the squashfs image that contains the Nix store.
+    system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
+      storeContents = config.isoImage.storeContents;
+      comp = config.isoImage.squashfsCompression;
+    };
+
+    # Individual files to be included on the CD, outside of the Nix
+    # store on the CD.
+    isoImage.contents =
+      [
+        { source = config.boot.kernelPackages.kernel + "/" + config.system.boot.loader.kernelFile;
+          target = "/boot/" + config.system.boot.loader.kernelFile;
+        }
+        { source = config.system.build.initialRamdisk + "/" + config.system.boot.loader.initrdFile;
+          target = "/boot/" + config.system.boot.loader.initrdFile;
+        }
+        { source = config.system.build.squashfsStore;
+          target = "/nix-store.squashfs";
+        }
+        { source = pkgs.writeText "version" config.system.nixos.label;
+          target = "/version.txt";
+        }
+      ] ++ optionals (config.isoImage.makeBiosBootable) [
+        { source = config.isoImage.splashImage;
+          target = "/isolinux/background.png";
+        }
+        { source = pkgs.substituteAll  {
+            name = "isolinux.cfg";
+            src = pkgs.writeText "isolinux.cfg-in" isolinuxCfg;
+            bootRoot = "/boot";
+          };
+          target = "/isolinux/isolinux.cfg";
+        }
+        { source = "${pkgs.syslinux}/share/syslinux";
+          target = "/isolinux";
+        }
+      ] ++ optionals config.isoImage.makeEfiBootable [
+        { source = efiImg;
+          target = "/boot/efi.img";
+        }
+        { source = "${efiDir}/EFI";
+          target = "/EFI";
+        }
+        { source = (pkgs.writeTextDir "grub/loopback.cfg" "source /EFI/boot/grub.cfg") + "/grub";
+          target = "/boot/grub";
+        }
+        { source = config.isoImage.efiSplashImage;
+          target = "/EFI/boot/efi-background.png";
+        }
+      ] ++ optionals (config.boot.loader.grub.memtest86.enable && config.isoImage.makeBiosBootable) [
+        { source = "${pkgs.memtest86plus}/memtest.bin";
+          target = "/boot/memtest.bin";
+        }
+      ] ++ optionals (config.isoImage.grubTheme != null) [
+        { source = config.isoImage.grubTheme;
+          target = "/EFI/boot/grub-theme";
+        }
+      ];
+
+    boot.loader.timeout = 10;
+
+    # Create the ISO image.
+    system.build.isoImage = pkgs.callPackage ../../../lib/make-iso9660-image.nix ({
+      inherit (config.isoImage) isoName compressImage volumeID contents;
+      bootable = config.isoImage.makeBiosBootable;
+      bootImage = "/isolinux/isolinux.bin";
+      syslinux = if config.isoImage.makeBiosBootable then pkgs.syslinux else null;
+    } // optionalAttrs (config.isoImage.makeUsbBootable && config.isoImage.makeBiosBootable) {
+      usbBootable = true;
+      isohybridMbrImage = "${pkgs.syslinux}/share/syslinux/isohdpfx.bin";
+    } // optionalAttrs config.isoImage.makeEfiBootable {
+      efiBootable = true;
+      efiBootImage = "boot/efi.img";
+    });
+
+    boot.postBootCommands =
+      ''
+        # After booting, register the contents of the Nix store on the
+        # CD in the Nix database in the tmpfs.
+        ${config.nix.package.out}/bin/nix-store --load-db < /nix/store/nix-path-registration
+
+        # nixos-rebuild also requires a "system" profile and an
+        # /etc/NIXOS tag.
+        touch /etc/NIXOS
+        ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+      '';
+
+    # Add vfat support to the initrd to enable people to copy the
+    # contents of the CD to a bootable USB stick.
+    boot.initrd.supportedFilesystems = [ "vfat" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64-new-kernel.nix b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64-new-kernel.nix
new file mode 100644
index 000000000000..a669d61571fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64-new-kernel.nix
@@ -0,0 +1,14 @@
+{ config, ... }:
+{
+  imports = [
+    ../sd-card/sd-image-aarch64-new-kernel-installer.nix
+  ];
+  config = {
+    warnings = [
+      ''
+      .../cd-dvd/sd-image-aarch64-new-kernel.nix is deprecated and will eventually be removed.
+      Please switch to .../sd-card/sd-image-aarch64-new-kernel-installer.nix, instead.
+      ''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64.nix b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64.nix
new file mode 100644
index 000000000000..76c1509b8f7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-aarch64.nix
@@ -0,0 +1,14 @@
+{ config, ... }:
+{
+  imports = [
+    ../sd-card/sd-image-aarch64-installer.nix
+  ];
+  config = {
+    warnings = [
+      ''
+      .../cd-dvd/sd-image-aarch64.nix is deprecated and will eventually be removed.
+      Please switch to .../sd-card/sd-image-aarch64-installer.nix, instead.
+      ''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix
new file mode 100644
index 000000000000..6ee0eb9e9b8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-armv7l-multiplatform.nix
@@ -0,0 +1,14 @@
+{ config, ... }:
+{
+  imports = [
+    ../sd-card/sd-image-armv7l-multiplatform-installer.nix
+  ];
+  config = {
+    warnings = [
+      ''
+      .../cd-dvd/sd-image-armv7l-multiplatform.nix is deprecated and will eventually be removed.
+      Please switch to .../sd-card/sd-image-armv7l-multiplatform-installer.nix, instead.
+      ''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
new file mode 100644
index 000000000000..747440ba9c61
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image-raspberrypi.nix
@@ -0,0 +1,14 @@
+{ config, ... }:
+{
+  imports = [
+    ../sd-card/sd-image-raspberrypi-installer.nix
+  ];
+  config = {
+    warnings = [
+      ''
+      .../cd-dvd/sd-image-raspberrypi.nix is deprecated and will eventually be removed.
+      Please switch to .../sd-card/sd-image-raspberrypi-installer.nix, instead.
+      ''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/cd-dvd/sd-image.nix b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image.nix
new file mode 100644
index 000000000000..e2d6dcb3fe3a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/cd-dvd/sd-image.nix
@@ -0,0 +1,14 @@
+{ config, ... }:
+{
+  imports = [
+    ../sd-card/sd-image.nix
+  ];
+  config = {
+    warnings = [
+      ''
+      .../cd-dvd/sd-image.nix is deprecated and will eventually be removed.
+      Please switch to .../sd-card/sd-image.nix, instead.
+      ''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/netboot/netboot-base.nix b/nixpkgs/nixos/modules/installer/netboot/netboot-base.nix
new file mode 100644
index 000000000000..7e66a49c7391
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/netboot/netboot-base.nix
@@ -0,0 +1,17 @@
+# This module contains the basic configuration for building netboot
+# images
+
+{ lib, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ./netboot.nix
+
+      # Profiles of this basic netboot media
+      ../../profiles/all-hardware.nix
+      ../../profiles/base.nix
+      ../../profiles/installation-device.nix
+    ];
+}
diff --git a/nixpkgs/nixos/modules/installer/netboot/netboot-minimal.nix b/nixpkgs/nixos/modules/installer/netboot/netboot-minimal.nix
new file mode 100644
index 000000000000..5ca255acf35f
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/netboot/netboot-minimal.nix
@@ -0,0 +1,15 @@
+# This module defines a small netboot environment.
+
+{ lib, ... }:
+
+{
+  imports = [
+    ./netboot-base.nix
+    ../../profiles/minimal.nix
+  ];
+
+  documentation.man.enable = lib.mkOverride 500 true;
+  hardware.enableRedistributableFirmware = lib.mkOverride 70 false;
+  system.extraDependencies = lib.mkOverride 70 [];
+  networking.wireless.enable = lib.mkOverride 500 false;
+}
diff --git a/nixpkgs/nixos/modules/installer/netboot/netboot.nix b/nixpkgs/nixos/modules/installer/netboot/netboot.nix
new file mode 100644
index 000000000000..a50f22cbe471
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/netboot/netboot.nix
@@ -0,0 +1,164 @@
+# This module creates netboot media containing the given NixOS
+# configuration.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+
+    netboot.squashfsCompression = mkOption {
+      default = with pkgs.stdenv.hostPlatform; "xz -Xdict-size 100% "
+                + lib.optionalString isx86 "-Xbcj x86"
+                # Untested but should also reduce size for these platforms
+                + lib.optionalString isAarch "-Xbcj arm"
+                + lib.optionalString (isPower && is32bit && isBigEndian) "-Xbcj powerpc"
+                + lib.optionalString (isSparc) "-Xbcj sparc";
+      description = lib.mdDoc ''
+        Compression settings to use for the squashfs nix store.
+      '';
+      example = "zstd -Xcompression-level 6";
+      type = types.str;
+    };
+
+    netboot.storeContents = mkOption {
+      example = literalExpression "[ pkgs.stdenv ]";
+      description = lib.mdDoc ''
+        This option lists additional derivations to be included in the
+        Nix store in the generated netboot image.
+      '';
+    };
+
+  };
+
+  config = {
+    # Don't build the GRUB menu builder script, since we don't need it
+    # here and it causes a cyclic dependency.
+    boot.loader.grub.enable = false;
+
+    # !!! Hack - attributes expected by other modules.
+    environment.systemPackages = [ pkgs.grub2_efi ]
+      ++ (lib.optionals (pkgs.stdenv.hostPlatform.system != "aarch64-linux") [pkgs.grub2 pkgs.syslinux]);
+
+    fileSystems."/" = mkImageMediaOverride
+      { fsType = "tmpfs";
+        options = [ "mode=0755" ];
+      };
+
+    # In stage 1, mount a tmpfs on top of /nix/store (the squashfs
+    # image) to make this a live CD.
+    fileSystems."/nix/.ro-store" = mkImageMediaOverride
+      { fsType = "squashfs";
+        device = "../nix-store.squashfs";
+        options = [ "loop" ];
+        neededForBoot = true;
+      };
+
+    fileSystems."/nix/.rw-store" = mkImageMediaOverride
+      { fsType = "tmpfs";
+        options = [ "mode=0755" ];
+        neededForBoot = true;
+      };
+
+    fileSystems."/nix/store" = mkImageMediaOverride
+      { fsType = "overlay";
+        device = "overlay";
+        options = [
+          "lowerdir=/nix/.ro-store"
+          "upperdir=/nix/.rw-store/store"
+          "workdir=/nix/.rw-store/work"
+        ];
+
+        depends = [
+          "/nix/.ro-store"
+          "/nix/.rw-store/store"
+          "/nix/.rw-store/work"
+        ];
+      };
+
+    boot.initrd.availableKernelModules = [ "squashfs" "overlay" ];
+
+    boot.initrd.kernelModules = [ "loop" "overlay" ];
+
+    # Closures to be copied to the Nix store, namely the init
+    # script and the top-level system configuration directory.
+    netboot.storeContents =
+      [ config.system.build.toplevel ];
+
+    # Create the squashfs image that contains the Nix store.
+    system.build.squashfsStore = pkgs.callPackage ../../../lib/make-squashfs.nix {
+      storeContents = config.netboot.storeContents;
+      comp = config.netboot.squashfsCompression;
+    };
+
+
+    # Create the initrd
+    system.build.netbootRamdisk = pkgs.makeInitrdNG {
+      inherit (config.boot.initrd) compressor;
+      prepend = [ "${config.system.build.initialRamdisk}/initrd" ];
+
+      contents =
+        [ { object = config.system.build.squashfsStore;
+            symlink = "/nix-store.squashfs";
+          }
+        ];
+    };
+
+    system.build.netbootIpxeScript = pkgs.writeTextDir "netboot.ipxe" ''
+      #!ipxe
+      # Use the cmdline variable to allow the user to specify custom kernel params
+      # when chainloading this script from other iPXE scripts like netboot.xyz
+      kernel ${pkgs.stdenv.hostPlatform.linux-kernel.target} init=${config.system.build.toplevel}/init initrd=initrd ${toString config.boot.kernelParams} ''${cmdline}
+      initrd initrd
+      boot
+    '';
+
+    # A script invoking kexec on ./bzImage and ./initrd.gz.
+    # Usually used through system.build.kexecTree, but exposed here for composability.
+    system.build.kexecScript = pkgs.writeScript "kexec-boot" ''
+      #!/usr/bin/env bash
+      if ! kexec -v >/dev/null 2>&1; then
+        echo "kexec not found: please install kexec-tools" 2>&1
+        exit 1
+      fi
+      SCRIPT_DIR=$( cd -- "$( dirname -- "''${BASH_SOURCE[0]}" )" &> /dev/null && pwd )
+      kexec --load ''${SCRIPT_DIR}/bzImage \
+        --initrd=''${SCRIPT_DIR}/initrd.gz \
+        --command-line "init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}"
+      kexec -e
+    '';
+
+    # A tree containing initrd.gz, bzImage and a kexec-boot script.
+    system.build.kexecTree = pkgs.linkFarm "kexec-tree" [
+      {
+        name = "initrd.gz";
+        path = "${config.system.build.netbootRamdisk}/initrd";
+      }
+      {
+        name = "bzImage";
+        path = "${config.system.build.kernel}/${config.system.boot.loader.kernelFile}";
+      }
+      {
+        name = "kexec-boot";
+        path = config.system.build.kexecScript;
+      }
+    ];
+
+    boot.loader.timeout = 10;
+
+    boot.postBootCommands =
+      ''
+        # After booting, register the contents of the Nix store
+        # in the Nix database in the tmpfs.
+        ${config.nix.package}/bin/nix-store --load-db < /nix/store/nix-path-registration
+
+        # nixos-rebuild also requires a "system" profile and an
+        # /etc/NIXOS tag.
+        touch /etc/NIXOS
+        ${config.nix.package}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/installer/scan/detected.nix b/nixpkgs/nixos/modules/installer/scan/detected.nix
new file mode 100644
index 000000000000..5c5fba56f517
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/scan/detected.nix
@@ -0,0 +1,12 @@
+# List all devices which are detected by nixos-generate-config.
+# Common devices are enabled by default.
+{ lib, ... }:
+
+with lib;
+
+{
+  config = mkDefault {
+    # Common firmware, i.e. for wifi cards
+    hardware.enableRedistributableFirmware = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/scan/not-detected.nix b/nixpkgs/nixos/modules/installer/scan/not-detected.nix
new file mode 100644
index 000000000000..baa068c08dbf
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/scan/not-detected.nix
@@ -0,0 +1,6 @@
+# Enables non-free firmware on devices not recognized by `nixos-generate-config`.
+{ lib, ... }:
+
+{
+  hardware.enableRedistributableFirmware = lib.mkDefault true;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix
new file mode 100644
index 000000000000..2a6b6abdf913
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-installer.nix
@@ -0,0 +1,10 @@
+{
+  imports = [
+    ../../profiles/installation-device.nix
+    ./sd-image-aarch64.nix
+  ];
+
+  # the installation media is also the installation target,
+  # so we don't want to provide the installation configuration.nix.
+  installer.cloneConfig = false;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix
new file mode 100644
index 000000000000..1b6b55ff2918
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix
@@ -0,0 +1,10 @@
+{
+  imports = [
+    ../../profiles/installation-device.nix
+    ./sd-image-aarch64-new-kernel.nix
+  ];
+
+  # the installation media is also the installation target,
+  # so we don't want to provide the installation configuration.nix.
+  installer.cloneConfig = false;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix
new file mode 100644
index 000000000000..0e5055960294
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix
@@ -0,0 +1,15 @@
+{ pkgs, ... }:
+
+{
+  imports = [ ./sd-image-aarch64-new-kernel-installer.nix ];
+
+  # Makes `availableOn` fail for zfs, see <nixos/modules/profiles/base.nix>.
+  # This is a workaround since we cannot remove the `"zfs"` string from `supportedFilesystems`.
+  # The proper fix would be to make `supportedFilesystems` an attrset with true/false which we
+  # could then `lib.mkForce false`
+  nixpkgs.overlays = [(final: super: {
+    zfs = super.zfs.overrideAttrs(_: {
+      meta.platforms = [];
+    });
+  })];
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel.nix
new file mode 100644
index 000000000000..2882fbcc7305
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64-new-kernel.nix
@@ -0,0 +1,7 @@
+{ pkgs, ... }:
+
+{
+  imports = [ ./sd-image-aarch64.nix ];
+
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64.nix
new file mode 100644
index 000000000000..cf01005fdc8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-aarch64.nix
@@ -0,0 +1,83 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-aarch64.nix -A config.system.build.sdImage
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader.grub.enable = false;
+  boot.loader.generic-extlinux-compatible.enable = true;
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+
+  # The serial ports listed here are:
+  # - ttyS0: for Tegra (Jetson TX1)
+  # - ttyAMA0: for QEMU's -machine virt
+  boot.kernelParams = ["console=ttyS0,115200n8" "console=ttyAMA0,115200n8" "console=tty0"];
+
+  sdImage = {
+    populateFirmwareCommands = let
+      configTxt = pkgs.writeText "config.txt" ''
+        [pi3]
+        kernel=u-boot-rpi3.bin
+
+        [pi02]
+        kernel=u-boot-rpi3.bin
+
+        [pi4]
+        kernel=u-boot-rpi4.bin
+        enable_gic=1
+        armstub=armstub8-gic.bin
+
+        # Otherwise the resolution will be weird in most cases, compared to
+        # what the pi3 firmware does by default.
+        disable_overscan=1
+
+        # Supported in newer board revisions
+        arm_boost=1
+
+        [cm4]
+        # Enable host mode on the 2711 built-in XHCI USB controller.
+        # This line should be removed if the legacy DWC2 controller is required
+        # (e.g. for USB device mode) or if USB support is not required.
+        otg_mode=1
+
+        [all]
+        # Boot in 64-bit mode.
+        arm_64bit=1
+
+        # U-Boot needs this to work, regardless of whether UART is actually used or not.
+        # Look in arch/arm/mach-bcm283x/Kconfig in the U-Boot tree to see if this is still
+        # a requirement in the future.
+        enable_uart=1
+
+        # Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
+        # when attempting to show low-voltage or overtemperature warnings.
+        avoid_warnings=1
+      '';
+      in ''
+        (cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
+
+        # Add the config
+        cp ${configTxt} firmware/config.txt
+
+        # Add pi3 specific files
+        cp ${pkgs.ubootRaspberryPi3_64bit}/u-boot.bin firmware/u-boot-rpi3.bin
+
+        # Add pi4 specific files
+        cp ${pkgs.ubootRaspberryPi4_64bit}/u-boot.bin firmware/u-boot-rpi4.bin
+        cp ${pkgs.raspberrypi-armstubs}/armstub8-gic.bin firmware/armstub8-gic.bin
+        cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-4-b.dtb firmware/
+        cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-400.dtb firmware/
+        cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-cm4.dtb firmware/
+        cp ${pkgs.raspberrypifw}/share/raspberrypi/boot/bcm2711-rpi-cm4s.dtb firmware/
+      '';
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix
new file mode 100644
index 000000000000..fbe04377d50d
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix
@@ -0,0 +1,10 @@
+{
+  imports = [
+    ../../profiles/installation-device.nix
+    ./sd-image-armv7l-multiplatform.nix
+  ];
+
+  # the installation media is also the installation target,
+  # so we don't want to provide the installation configuration.nix.
+  installer.cloneConfig = false;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix
new file mode 100644
index 000000000000..23ed92851296
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix
@@ -0,0 +1,52 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-armv7l-multiplatform.nix -A config.system.build.sdImage
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader.grub.enable = false;
+  boot.loader.generic-extlinux-compatible.enable = true;
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+  boot.kernelPackages = pkgs.linuxPackages_latest;
+  # The serial ports listed here are:
+  # - ttyS0: for Tegra (Jetson TK1)
+  # - ttymxc0: for i.MX6 (Wandboard)
+  # - ttyAMA0: for Allwinner (pcDuino3 Nano) and QEMU's -machine virt
+  # - ttyO0: for OMAP (BeagleBone Black)
+  # - ttySAC2: for Exynos (ODROID-XU3)
+  boot.kernelParams = ["console=ttyS0,115200n8" "console=ttymxc0,115200n8" "console=ttyAMA0,115200n8" "console=ttyO0,115200n8" "console=ttySAC2,115200n8" "console=tty0"];
+
+  sdImage = {
+    populateFirmwareCommands = let
+      configTxt = pkgs.writeText "config.txt" ''
+        # Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
+        # when attempting to show low-voltage or overtemperature warnings.
+        avoid_warnings=1
+
+        [pi2]
+        kernel=u-boot-rpi2.bin
+
+        [pi3]
+        kernel=u-boot-rpi3.bin
+
+        # U-Boot used to need this to work, regardless of whether UART is actually used or not.
+        # TODO: check when/if this can be removed.
+        enable_uart=1
+      '';
+      in ''
+        (cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
+        cp ${pkgs.ubootRaspberryPi2}/u-boot.bin firmware/u-boot-rpi2.bin
+        cp ${pkgs.ubootRaspberryPi3_32bit}/u-boot.bin firmware/u-boot-rpi3.bin
+        cp ${configTxt} firmware/config.txt
+      '';
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix
new file mode 100644
index 000000000000..143c678e43fb
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-powerpc64le.nix
@@ -0,0 +1,49 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-powerpc64le.nix -A config.system.build.sdImage
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ../../profiles/installation-device.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader = {
+    # powerpc64le-linux typically uses petitboot
+    grub.enable = false;
+    generic-extlinux-compatible = {
+      # petitboot is not does not support all of the extlinux extensions to
+      # syslinux, but its parser is very forgiving; it essentially ignores
+      # whatever it doesn't understand.  See below for a filename adjustment.
+      enable = true;
+    };
+  };
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+  boot.kernelParams = [ "console=hvc0" ];
+
+  sdImage = {
+    populateFirmwareCommands = "";
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} \
+        -c ${config.system.build.toplevel} \
+        -d ./files/boot
+    ''
+    # https://github.com/open-power/petitboot/blob/master/discover/syslinux-parser.c
+    # petitboot will look in these paths (plus all-caps versions of them):
+    #  /boot/syslinux/syslinux.cfg
+    #  /syslinux/syslinux.cfg
+    #  /syslinux.cfg
+    + ''
+      mv ./files/boot/extlinux ./files/boot/syslinux
+      mv ./files/boot/syslinux/extlinux.conf ./files/boot/syslinux/syslinux.cfg
+    ''
+    # petitboot does not support relative paths for LINUX or INITRD; it prepends
+    # a `/` when parsing these fields
+    + ''
+      sed -i 's_^\(\W\W*\(INITRD\|initrd\|LINUX\|linux\)\W\)\.\./_\1/boot/_' ./files/boot/syslinux/syslinux.cfg
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi-installer.nix
new file mode 100644
index 000000000000..72ec7485b528
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi-installer.nix
@@ -0,0 +1,10 @@
+{
+  imports = [
+    ../../profiles/installation-device.nix
+    ./sd-image-raspberrypi.nix
+  ];
+
+  # the installation media is also the installation target,
+  # so we don't want to provide the installation configuration.nix.
+  installer.cloneConfig = false;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix
new file mode 100644
index 000000000000..103d6787a03c
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-raspberrypi.nix
@@ -0,0 +1,41 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-raspberrypi.nix -A config.system.build.sdImage
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader.grub.enable = false;
+  boot.loader.generic-extlinux-compatible.enable = true;
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+  boot.kernelPackages = pkgs.linuxKernel.packages.linux_rpi1;
+
+  sdImage = {
+    populateFirmwareCommands = let
+      configTxt = pkgs.writeText "config.txt" ''
+        # Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
+        # when attempting to show low-voltage or overtemperature warnings.
+        avoid_warnings=1
+
+        [pi0]
+        kernel=u-boot-rpi0.bin
+
+        [pi1]
+        kernel=u-boot-rpi1.bin
+      '';
+      in ''
+        (cd ${pkgs.raspberrypifw}/share/raspberrypi/boot && cp bootcode.bin fixup*.dat start*.elf $NIX_BUILD_TOP/firmware/)
+        cp ${pkgs.ubootRaspberryPiZero}/u-boot.bin firmware/u-boot-rpi0.bin
+        cp ${pkgs.ubootRaspberryPi}/u-boot.bin firmware/u-boot-rpi1.bin
+        cp ${configTxt} firmware/config.txt
+      '';
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu-installer.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu-installer.nix
new file mode 100644
index 000000000000..90c1b8413adc
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu-installer.nix
@@ -0,0 +1,10 @@
+{
+  imports = [
+    ../../profiles/installation-device.nix
+    ./sd-image-riscv64-qemu.nix
+  ];
+
+  # the installation media is also the installation target,
+  # so we don't want to provide the installation configuration.nix.
+  installer.cloneConfig = false;
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu.nix
new file mode 100644
index 000000000000..a3e30768da45
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-riscv64-qemu.nix
@@ -0,0 +1,32 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-riscv64-qemu.nix -A config.system.build.sdImage
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader = {
+    grub.enable = false;
+    generic-extlinux-compatible = {
+      enable = true;
+
+      # Don't even specify FDTDIR - We do not have the correct DT
+      # The DTB is generated by QEMU at runtime
+      useGenerationDeviceTree = false;
+    };
+  };
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+  boot.kernelParams = [ "console=tty0" "console=ttyS0,115200n8" ];
+
+  sdImage = {
+    populateFirmwareCommands = "";
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image-x86_64.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image-x86_64.nix
new file mode 100644
index 000000000000..b44c0a4eeca5
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image-x86_64.nix
@@ -0,0 +1,27 @@
+# To build, use:
+# nix-build nixos -I nixos-config=nixos/modules/installer/sd-card/sd-image-x86_64.nix -A config.system.build.sdImage
+
+# This image is primarily used in NixOS tests (boot.nix) to test `boot.loader.generic-extlinux-compatible`.
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../../profiles/base.nix
+    ./sd-image.nix
+  ];
+
+  boot.loader = {
+    grub.enable = false;
+    generic-extlinux-compatible.enable = true;
+  };
+
+  boot.consoleLogLevel = lib.mkDefault 7;
+
+  sdImage = {
+    populateFirmwareCommands = "";
+    populateRootCommands = ''
+      mkdir -p ./files/boot
+      ${config.boot.loader.generic-extlinux-compatible.populateCmd} -c ${config.system.build.toplevel} -d ./files/boot
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/sd-card/sd-image.nix b/nixpkgs/nixos/modules/installer/sd-card/sd-image.nix
new file mode 100644
index 000000000000..ad9b803b1d1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/sd-card/sd-image.nix
@@ -0,0 +1,285 @@
+# This module creates a bootable SD card image containing the given NixOS
+# configuration. The generated image is MBR partitioned, with a FAT
+# /boot/firmware partition, and ext4 root partition. The generated image
+# is sized to fit its contents, and a boot script automatically resizes
+# the root partition to fit the device on the first boot.
+#
+# The firmware partition is built with expectation to hold the Raspberry
+# Pi firmware and bootloader, and be removed and replaced with a firmware
+# build for the target SoC for other board families.
+#
+# The derivation for the SD image will be placed in
+# config.system.build.sdImage
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  rootfsImage = pkgs.callPackage ../../../lib/make-ext4-fs.nix ({
+    inherit (config.sdImage) storePaths;
+    compressImage = config.sdImage.compressImage;
+    populateImageCommands = config.sdImage.populateRootCommands;
+    volumeLabel = "NIXOS_SD";
+  } // optionalAttrs (config.sdImage.rootPartitionUUID != null) {
+    uuid = config.sdImage.rootPartitionUUID;
+  });
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "sdImage" "bootPartitionID" ] "The FAT partition for SD image now only holds the Raspberry Pi firmware files. Use firmwarePartitionID to configure that partition's ID.")
+    (mkRemovedOptionModule [ "sdImage" "bootSize" ] "The boot files for SD image have been moved to the main ext4 partition. The FAT partition now only holds the Raspberry Pi firmware files. Changing its size may not be required.")
+    ../../profiles/all-hardware.nix
+  ];
+
+  options.sdImage = {
+    imageName = mkOption {
+      default = "${config.sdImage.imageBaseName}-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img";
+      description = lib.mdDoc ''
+        Name of the generated image file.
+      '';
+    };
+
+    imageBaseName = mkOption {
+      default = "nixos-sd-image";
+      description = lib.mdDoc ''
+        Prefix of the name of the generated image file.
+      '';
+    };
+
+    storePaths = mkOption {
+      type = with types; listOf package;
+      example = literalExpression "[ pkgs.stdenv ]";
+      description = lib.mdDoc ''
+        Derivations to be included in the Nix store in the generated SD image.
+      '';
+    };
+
+    firmwarePartitionOffset = mkOption {
+      type = types.int;
+      default = 8;
+      description = lib.mdDoc ''
+        Gap in front of the /boot/firmware partition, in mebibytes (1024×1024
+        bytes).
+        Can be increased to make more space for boards requiring to dd u-boot
+        SPL before actual partitions.
+
+        Unless you are building your own images pre-configured with an
+        installed U-Boot, you can instead opt to delete the existing `FIRMWARE`
+        partition, which is used **only** for the Raspberry Pi family of
+        hardware.
+      '';
+    };
+
+    firmwarePartitionID = mkOption {
+      type = types.str;
+      default = "0x2178694e";
+      description = lib.mdDoc ''
+        Volume ID for the /boot/firmware partition on the SD card. This value
+        must be a 32-bit hexadecimal number.
+      '';
+    };
+
+    firmwarePartitionName = mkOption {
+      type = types.str;
+      default = "FIRMWARE";
+      description = lib.mdDoc ''
+        Name of the filesystem which holds the boot firmware.
+      '';
+    };
+
+    rootPartitionUUID = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "14e19a7b-0ae0-484d-9d54-43bd6fdc20c7";
+      description = lib.mdDoc ''
+        UUID for the filesystem on the main NixOS partition on the SD card.
+      '';
+    };
+
+    firmwareSize = mkOption {
+      type = types.int;
+      # As of 2019-08-18 the Raspberry pi firmware + u-boot takes ~18MiB
+      default = 30;
+      description = lib.mdDoc ''
+        Size of the /boot/firmware partition, in megabytes.
+      '';
+    };
+
+    populateFirmwareCommands = mkOption {
+      example = literalExpression "'' cp \${pkgs.myBootLoader}/u-boot.bin firmware/ ''";
+      description = lib.mdDoc ''
+        Shell commands to populate the ./firmware directory.
+        All files in that directory are copied to the
+        /boot/firmware partition on the SD image.
+      '';
+    };
+
+    populateRootCommands = mkOption {
+      example = literalExpression "''\${config.boot.loader.generic-extlinux-compatible.populateCmd} -c \${config.system.build.toplevel} -d ./files/boot''";
+      description = lib.mdDoc ''
+        Shell commands to populate the ./files directory.
+        All files in that directory are copied to the
+        root (/) partition on the SD image. Use this to
+        populate the ./files/boot (/boot) directory.
+      '';
+    };
+
+    postBuildCommands = mkOption {
+      example = literalExpression "'' dd if=\${pkgs.myBootLoader}/SPL of=$img bs=1024 seek=1 conv=notrunc ''";
+      default = "";
+      description = lib.mdDoc ''
+        Shell commands to run after the image is built.
+        Can be used for boards requiring to dd u-boot SPL before actual partitions.
+      '';
+    };
+
+    compressImage = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether the SD image should be compressed using
+        {command}`zstd`.
+      '';
+    };
+
+    expandOnBoot = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to configure the sd image to expand it's partition on boot.
+      '';
+    };
+  };
+
+  config = {
+    fileSystems = {
+      "/boot/firmware" = {
+        device = "/dev/disk/by-label/${config.sdImage.firmwarePartitionName}";
+        fsType = "vfat";
+        # Alternatively, this could be removed from the configuration.
+        # The filesystem is not needed at runtime, it could be treated
+        # as an opaque blob instead of a discrete FAT32 filesystem.
+        options = [ "nofail" "noauto" ];
+      };
+      "/" = {
+        device = "/dev/disk/by-label/NIXOS_SD";
+        fsType = "ext4";
+      };
+    };
+
+    sdImage.storePaths = [ config.system.build.toplevel ];
+
+    system.build.sdImage = pkgs.callPackage ({ stdenv, dosfstools, e2fsprogs,
+    mtools, libfaketime, util-linux, zstd }: stdenv.mkDerivation {
+      name = config.sdImage.imageName;
+
+      nativeBuildInputs = [ dosfstools e2fsprogs libfaketime mtools util-linux ]
+      ++ lib.optional config.sdImage.compressImage zstd;
+
+      inherit (config.sdImage) imageName compressImage;
+
+      buildCommand = ''
+        mkdir -p $out/nix-support $out/sd-image
+        export img=$out/sd-image/${config.sdImage.imageName}
+
+        echo "${pkgs.stdenv.buildPlatform.system}" > $out/nix-support/system
+        if test -n "$compressImage"; then
+          echo "file sd-image $img.zst" >> $out/nix-support/hydra-build-products
+        else
+          echo "file sd-image $img" >> $out/nix-support/hydra-build-products
+        fi
+
+        root_fs=${rootfsImage}
+        ${lib.optionalString config.sdImage.compressImage ''
+        root_fs=./root-fs.img
+        echo "Decompressing rootfs image"
+        zstd -d --no-progress "${rootfsImage}" -o $root_fs
+        ''}
+
+        # Gap in front of the first partition, in MiB
+        gap=${toString config.sdImage.firmwarePartitionOffset}
+
+        # Create the image file sized to fit /boot/firmware and /, plus slack for the gap.
+        rootSizeBlocks=$(du -B 512 --apparent-size $root_fs | awk '{ print $1 }')
+        firmwareSizeBlocks=$((${toString config.sdImage.firmwareSize} * 1024 * 1024 / 512))
+        imageSize=$((rootSizeBlocks * 512 + firmwareSizeBlocks * 512 + gap * 1024 * 1024))
+        truncate -s $imageSize $img
+
+        # type=b is 'W95 FAT32', type=83 is 'Linux'.
+        # The "bootable" partition is where u-boot will look file for the bootloader
+        # information (dtbs, extlinux.conf file).
+        sfdisk $img <<EOF
+            label: dos
+            label-id: ${config.sdImage.firmwarePartitionID}
+
+            start=''${gap}M, size=$firmwareSizeBlocks, type=b
+            start=$((gap + ${toString config.sdImage.firmwareSize}))M, type=83, bootable
+        EOF
+
+        # Copy the rootfs into the SD image
+        eval $(partx $img -o START,SECTORS --nr 2 --pairs)
+        dd conv=notrunc if=$root_fs of=$img seek=$START count=$SECTORS
+
+        # Create a FAT32 /boot/firmware partition of suitable size into firmware_part.img
+        eval $(partx $img -o START,SECTORS --nr 1 --pairs)
+        truncate -s $((SECTORS * 512)) firmware_part.img
+
+        mkfs.vfat --invariant -i ${config.sdImage.firmwarePartitionID} -n ${config.sdImage.firmwarePartitionName} firmware_part.img
+
+        # Populate the files intended for /boot/firmware
+        mkdir firmware
+        ${config.sdImage.populateFirmwareCommands}
+
+        find firmware -exec touch --date=2000-01-01 {} +
+        # Copy the populated /boot/firmware into the SD image
+        cd firmware
+        # Force a fixed order in mcopy for better determinism, and avoid file globbing
+        for d in $(find . -type d -mindepth 1 | sort); do
+          faketime "2000-01-01 00:00:00" mmd -i ../firmware_part.img "::/$d"
+        done
+        for f in $(find . -type f | sort); do
+          mcopy -pvm -i ../firmware_part.img "$f" "::/$f"
+        done
+        cd ..
+
+        # Verify the FAT partition before copying it.
+        fsck.vfat -vn firmware_part.img
+        dd conv=notrunc if=firmware_part.img of=$img seek=$START count=$SECTORS
+
+        ${config.sdImage.postBuildCommands}
+
+        if test -n "$compressImage"; then
+            zstd -T$NIX_BUILD_CORES --rm $img
+        fi
+      '';
+    }) {};
+
+    boot.postBootCommands = lib.mkIf config.sdImage.expandOnBoot ''
+      # On the first boot do some maintenance tasks
+      if [ -f /nix-path-registration ]; then
+        set -euo pipefail
+        set -x
+        # Figure out device names for the boot device and root filesystem.
+        rootPart=$(${pkgs.util-linux}/bin/findmnt -n -o SOURCE /)
+        bootDevice=$(lsblk -npo PKNAME $rootPart)
+        partNum=$(lsblk -npo MAJ:MIN $rootPart | ${pkgs.gawk}/bin/awk -F: '{print $2}')
+
+        # Resize the root partition and the filesystem to fit the disk
+        echo ",+," | sfdisk -N$partNum --no-reread $bootDevice
+        ${pkgs.parted}/bin/partprobe
+        ${pkgs.e2fsprogs}/bin/resize2fs $rootPart
+
+        # Register the contents of the initial Nix store
+        ${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration
+
+        # nixos-rebuild also requires a "system" profile and an /etc/NIXOS tag.
+        touch /etc/NIXOS
+        ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+
+        # Prevents this from running on later boots.
+        rm -f /nix-path-registration
+      fi
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/installer/tools/get-version-suffix b/nixpkgs/nixos/modules/installer/tools/get-version-suffix
new file mode 100644
index 000000000000..8d72905cdcb4
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/get-version-suffix
@@ -0,0 +1,23 @@
+getVersion() {
+    local dir="$1"
+    rev=
+    gitDir="$dir/.git"
+    if [ -e "$gitDir" ]; then
+        if [ -z "$(type -P git)" ]; then
+            echo "warning: Git not found; cannot figure out revision of $dir" >&2
+            return
+        fi
+        cd "$dir"
+        rev=$(git --git-dir="$gitDir" rev-parse --short HEAD)
+        if git --git-dir="$gitDir" describe --always --dirty | grep -q dirty; then
+            rev+=M
+        fi
+    fi
+}
+
+if nixpkgs=$(nix-instantiate --find-file nixpkgs "$@"); then
+    getVersion $nixpkgs
+    if [ -n "$rev" ]; then
+        echo ".git.$rev"
+    fi
+fi
diff --git a/nixpkgs/nixos/modules/installer/tools/manpages/nixos-build-vms.8 b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-build-vms.8
new file mode 100644
index 000000000000..6a8f2c42eddf
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-build-vms.8
@@ -0,0 +1,105 @@
+.Dd January 1, 1980
+.Dt nixos-build-vms 8
+.Os
+.Sh NAME
+.Nm nixos-build-vms
+.Nd build a network of virtual machines from a network of NixOS configurations
+.
+.
+.
+.Sh SYNOPSIS
+.Nm nixos-build-vms
+.Op Fl -show-trace
+.Op Fl -no-out-link
+.Op Fl -help
+.Op Fl -option Ar name value
+.Pa network.nix
+.
+.
+.
+.Sh DESCRIPTION
+.
+This command builds a network of QEMU\-KVM virtual machines of a Nix expression
+specifying a network of NixOS machines. The virtual network can be started by
+executing the
+.Pa bin/run-vms
+shell script that is generated by this command. By default, a
+.Pa result
+symlink is produced that points to the generated virtual network.
+.
+.Pp
+A network Nix expression has the following structure:
+.Bd -literal -offset indent
+{
+  test1 = {pkgs, config, ...}:
+    {
+      services.openssh.enable = true;
+      nixpkgs.localSystem.system = "i686-linux";
+      deployment.targetHost = "test1.example.net";
+
+      # Other NixOS options
+    };
+
+  test2 = {pkgs, config, ...}:
+    {
+      services.openssh.enable = true;
+      services.httpd.enable = true;
+      environment.systemPackages = [ pkgs.lynx ];
+      nixpkgs.localSystem.system = "x86_64-linux";
+      deployment.targetHost = "test2.example.net";
+
+      # Other NixOS options
+    };
+}
+.Ed
+.
+.Pp
+Each attribute in the expression represents a machine in the network
+.Ns (e.g.
+.Va test1
+and
+.Va test2 Ns
+) referring to a function defining a NixOS configuration. In each NixOS
+configuration, two attributes have a special meaning. The
+.Va deployment.targetHost
+specifies the address (domain name or IP address) of the system which is used by
+.Ic ssh
+to perform remote deployment operations. The
+.Va nixpkgs.localSystem.system
+attribute can be used to specify an architecture for the target machine, such as
+.Ql i686-linux
+which builds a 32-bit NixOS configuration. Omitting this property will build the
+configuration for the same architecture as the host system.
+.
+.
+.
+.Sh OPTIONS
+.Bl -tag -width indent
+.It Fl -show-trace
+Shows a trace of the output.
+.
+.It Fl -no-out-link
+Do not create a
+.Pa result
+symlink.
+.
+.It Fl h , -help
+Shows the usage of this command to the user.
+.
+.It Fl -option Ar name Va value
+Set the Nix configuration option
+.Va name
+to
+.Va value Ns
+\&. This overrides settings in the Nix configuration file (see
+.Xr nix.conf 5 Ns
+).
+.El
+.
+.
+.
+.Sh AUTHORS
+.An -nosplit
+.An Eelco Dolstra
+and
+.An the Nixpkgs/NixOS contributors
diff --git a/nixpkgs/nixos/modules/installer/tools/manpages/nixos-enter.8 b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-enter.8
new file mode 100644
index 000000000000..646f92199d62
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-enter.8
@@ -0,0 +1,72 @@
+.Dd January 1, 1980
+.Dt nixos-enter 8
+.Os
+.Sh NAME
+.Nm nixos-enter
+.Nd run a command in a NixOS chroot environment
+.
+.
+.
+.Sh SYNOPSIS
+.Nm nixos-enter
+.Op Fl -root Ar root
+.Op Fl -system Ar system
+.Op Fl -command | c Ar shell-command
+.Op Fl -silent
+.Op Fl -help
+.Op Fl - Ar arguments ...
+.
+.
+.
+.Sh DESCRIPTION
+This command runs a command in a NixOS chroot environment, that is, in a filesystem hierarchy previously prepared using
+.Xr nixos-install 8 .
+.
+.
+.
+.Sh OPTIONS
+.Bl -tag -width indent
+.It Fl -root Ar root
+The path to the NixOS system you want to enter. It defaults to
+.Pa /mnt Ns
+\&.
+.It Fl -system Ar system
+The NixOS system configuration to use. It defaults to
+.Pa /nix/var/nix/profiles/system Ns
+\&. You can enter a previous NixOS configuration by specifying a path such as
+.Pa /nix/var/nix/profiles/system-106-link Ns
+\&.
+.
+.It Fl -command Ar shell-command , Fl c Ar shell-command
+The bash command to execute.
+.
+.It Fl -silent
+Suppresses all output from the activation script of the target system.
+.
+.It Fl -
+Interpret the remaining arguments as the program name and arguments to be invoked.
+The program is not executed in a shell.
+.El
+.
+.
+.
+.Sh EXAMPLES
+.Bl -tag -width indent
+.It Ic nixos-enter --root /mnt
+Start an interactive shell in the NixOS installation in
+.Pa /mnt Ns .
+.
+.It Ic nixos-enter -c 'ls -l /; cat /proc/mounts'
+Run a shell command.
+.
+.It Ic nixos-enter -- cat /proc/mounts
+Run a non-shell command.
+.El
+.
+.
+.
+.Sh AUTHORS
+.An -nosplit
+.An Eelco Dolstra
+and
+.An the Nixpkgs/NixOS contributors
diff --git a/nixpkgs/nixos/modules/installer/tools/manpages/nixos-generate-config.8 b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-generate-config.8
new file mode 100644
index 000000000000..1b95599e156a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-generate-config.8
@@ -0,0 +1,165 @@
+.Dd January 1, 1980
+.Dt nixos-generate-config 8
+.Os
+.Sh NAME
+.Nm nixos-generate-config
+.Nd generate NixOS configuration modules
+.
+.
+.
+.Sh SYNOPSIS
+.Nm nixos-generate-config
+.Op Fl -force
+.Op Fl -root Ar root
+.Op Fl -dir Ar dir
+.
+.
+.
+.Sh DESCRIPTION
+This command writes two NixOS configuration modules:
+.Bl -tag -width indent
+.It Pa /etc/nixos/hardware-configuration.nix
+This module sets NixOS configuration options based on your current hardware
+configuration. In particular, it sets the
+.Va fileSystem
+option to reflect all currently mounted file systems, the
+.Va swapDevices
+option to reflect active swap devices, and the
+.Va boot.initrd.*
+options to ensure that the initial ramdisk contains any kernel modules necessary
+for mounting the root file system.
+.Pp
+If this file already exists, it is overwritten. Thus, you should not modify it
+manually. Rather, you should include it from your
+.Pa /etc/nixos/configuration.nix Ns
+, and re-run
+.Nm
+to update it whenever your hardware configuration changes.
+.
+.It Pa /etc/nixos/configuration.nix
+This is the main NixOS system configuration module. If it already exists, it’s
+left unchanged. Otherwise,
+.Nm
+will write a template for you to customise.
+.El
+.
+.
+.
+.Sh OPTIONS
+.Bl -tag -width indent
+.It Fl -root Ar root
+If this option is given, treat the directory
+.Ar root
+as the root of the file system. This means that configuration files will be written to
+.Ql Ar root Ns /etc/nixos Ns
+, and that any file systems outside of
+.Ar root
+are ignored for the purpose of generating the
+.Va fileSystems
+option.
+.
+.It Fl -dir Ar dir
+If this option is given, write the configuration files to the directory
+.Ar dir
+instead of
+.Pa /etc/nixos Ns
+\&.
+.
+.It Fl -force
+Overwrite
+.Pa /etc/nixos/configuration.nix
+if it already exists.
+.
+.It Fl -no-filesystems
+Omit everything concerning file systems and swap devices from the hardware configuration.
+.
+.It Fl -show-hardware-config
+Don't generate
+.Pa configuration.nix
+or
+.Pa hardware-configuration.nix
+and print the hardware configuration to stdout only.
+.El
+.
+.
+.
+.Sh EXAMPLES
+This command is typically used during NixOS installation to write initial
+configuration modules. For example, if you created and mounted the target file
+systems on
+.Pa /mnt
+and
+.Pa /mnt/boot Ns
+, you would run:
+.Bd -literal -offset indent
+$ nixos-generate-config --root /mnt
+.Ed
+.
+.Pp
+The resulting file
+.Pa /mnt/etc/nixos/hardware-configuration.nix
+might look like this:
+.Bd -literal -offset indent
+# Do not modify this file!  It was generated by 'nixos-generate-config'
+# and may be overwritten by future invocations.  Please make changes
+# to /etc/nixos/configuration.nix instead.
+{ config, pkgs, ... }:
+
+{
+  imports =
+    [ <nixos/modules/installer/scan/not-detected.nix>
+    ];
+
+  boot.initrd.availableKernelModules = [ "ehci_hcd" "ahci" ];
+  boot.kernelModules = [ "kvm-intel" ];
+  boot.extraModulePackages = [ ];
+
+  fileSystems."/" =
+    { device = "/dev/disk/by-label/nixos";
+      fsType = "ext3";
+      options = [ "rw" "data=ordered" "relatime" ];
+    };
+
+  fileSystems."/boot" =
+    { device = "/dev/sda1";
+      fsType = "ext3";
+      options = [ "rw" "errors=continue" "user_xattr" "acl" "barrier=1" "data=writeback" "relatime" ];
+    };
+
+  swapDevices =
+    [ { device = "/dev/sda2"; }
+    ];
+
+  nix.maxJobs = 8;
+}
+.Ed
+.
+.Pp
+It will also create a basic
+.Pa /mnt/etc/nixos/configuration.nix Ns
+, which you should edit to customise the logical configuration of your system. \
+This file includes the result of the hardware scan as follows:
+.Bd -literal -offset indent
+imports = [ ./hardware-configuration.nix ];
+.Ed
+.
+.Pp
+After installation, if your hardware configuration changes, you can run:
+.Bd -literal -offset indent
+$ nixos-generate-config
+.Ed
+.
+.Pp
+to update
+.Pa /etc/nixos/hardware-configuration.nix Ns
+\&. Your
+.Pa /etc/nixos/configuration.nix
+will
+.Em not
+be overwritten.
+.
+.Sh AUTHORS
+.An -nosplit
+.An Eelco Dolstra
+and
+.An the Nixpkgs/NixOS contributors
diff --git a/nixpkgs/nixos/modules/installer/tools/manpages/nixos-install.8 b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-install.8
new file mode 100644
index 000000000000..c6c8ed15224d
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-install.8
@@ -0,0 +1,191 @@
+.Dd January 1, 1980
+.Dt nixos-install 8
+.Os
+.Sh NAME
+.Nm nixos-install
+.Nd install bootloader and NixOS
+.
+.
+.
+.Sh SYNOPSIS
+.Nm nixos-install
+.Op Fl -verbose | v
+.Op Fl I Ar path
+.Op Fl -root Ar root
+.Op Fl -system Ar path
+.Op Fl -flake Ar flake-uri
+.Op Fl -impure
+.Op Fl -channel Ar channel
+.Op Fl -no-channel-copy
+.Op Fl -no-root-password | -no-root-passwd
+.Op Fl -no-bootloader
+.Op Fl -max-jobs | j Ar number
+.Op Fl -cores Ar number
+.Op Fl -option Ar name value
+.Op Fl -show-trace
+.Op Fl -keep-going
+.Op Fl -help
+.
+.
+.
+.Sh DESCRIPTION
+This command installs NixOS in the file system mounted on
+.Pa /mnt Ns
+, based on the NixOS configuration specified in
+.Pa /mnt/etc/nixos/configuration.nix Ns
+\&. It performs the following steps:
+.
+.Bl -enum
+.It
+It copies Nix and its dependencies to
+.Pa /mnt/nix/store Ns
+\&.
+.
+.It
+It runs Nix in
+.Pa /mnt
+to build the NixOS configuration specified in
+.Pa /mnt/etc/nixos/configuration.nix Ns
+\&.
+.
+.It
+It installs the current channel
+.Dq nixos
+in the target channel profile (unless
+.Fl -no-channel-copy
+is specified).
+.
+.It
+It installs the GRUB boot loader on the device specified in the option
+.Va boot.loader.grub.device
+(unless
+.Fl -no-bootloader
+is specified), and generates a GRUB configuration file that boots into the NixOS
+configuration just installed.
+.
+.It
+It prompts you for a password for the root account (unless
+.Fl -no-root-password
+is specified).
+.El
+.
+.Pp
+This command is idempotent: if it is interrupted or fails due to a temporary
+problem (e.g. a network issue), you can safely re-run it.
+.
+.
+.
+.Sh OPTIONS
+.Bl -tag -width indent
+.It Fl -verbose , v
+Increases the level of verbosity of diagnostic messages printed on standard
+error. For each Nix operation, the information printed on standard output is
+well-defined; any diagnostic information is printed on standard error, never on
+standard output.
+.Pp
+Please note that this option may be specified repeatedly.
+.
+.It Fl -root Ar root
+Defaults to
+.Pa /mnt Ns
+\&. If this option is given, treat the directory
+.Ar root
+as the root of the NixOS installation.
+.
+.It Fl -system Ar path
+If this option is provided,
+.Nm
+will install the specified closure rather than attempt to build one from
+.Pa /mnt/etc/nixos/configuration.nix Ns
+\&.
+.Pp
+The closure must be an appropriately configured NixOS system, with boot loader
+and partition configuration that fits the target host. Such a closure is
+typically obtained with a command such as
+.Ic nix-build -I nixos-config=./configuration.nix '<nixpkgs/nixos>' -A system --no-out-link Ns
+\&.
+.
+.It Fl -flake Ar flake-uri Ns # Ns Ar name
+Build the NixOS system from the specified flake. The flake must contain an
+output named
+.Ql nixosConfigurations. Ns Ar name Ns
+\&.
+.
+.It Fl -channel Ar channel
+If this option is provided, do not copy the current
+.Dq nixos
+channel to the target host. Instead, use the specified derivation.
+.
+.It Fl I Ar Path
+Add a path to the Nix expression search path. This option may be given multiple
+times. See the
+.Ev NIX_PATH
+environment variable for information on the semantics of the Nix search path. Paths added through
+.Fl I
+take precedence over
+.Ev NIX_PATH Ns
+\&.
+.
+.It Fl -max-jobs , j Ar number
+Sets the maximum number of build jobs that Nix will perform in parallel to the
+specified number. The default is 1. A higher value is useful on SMP systems or
+to exploit I/O latency.
+.
+.It Fl -cores Ar N
+Sets the value of the
+.Ev NIX_BUILD_CORES
+environment variable in the invocation of builders. Builders can use this
+variable at their discretion to control the maximum amount of parallelism. For
+instance, in Nixpkgs, if the derivation attribute
+.Va enableParallelBuilding
+is set to true, the builder passes the
+.Fl j Ns Va N
+flag to GNU Make. The value 0 means that the builder should use all available CPU cores in the system.
+.
+.It Fl -option Ar name value
+Set the Nix configuration option
+.Ar name
+to
+.Ar value Ns
+\&.
+.
+.It Fl -show-trace
+Causes Nix to print out a stack trace in case of Nix expression evaluation errors.
+.
+.It Fl -keep-going
+Causes Nix to continue building derivations as far as possible in the face of failed builds.
+.
+.It Fl -help
+Synonym for
+.Ic man nixos-install Ns
+\&.
+.El
+.
+.
+.
+.Sh EXAMPLES
+A typical NixOS installation is done by creating and mounting a file system on
+.Pa /mnt Ns
+, generating a NixOS configuration in
+.Pa /mnt/etc/nixos/configuration.nix Ns
+, and running
+.Nm Ns
+\&. For instance, if we want to install NixOS on an ext4 file system created in
+.Pa /dev/sda1 Ns
+:
+.Bd -literal -offset indent
+$ mkfs.ext4 /dev/sda1
+$ mount /dev/sda1 /mnt
+$ nixos-generate-config --root /mnt
+$ # edit /mnt/etc/nixos/configuration.nix
+$ nixos-install
+$ reboot
+.Ed
+.
+.
+.
+.Sh AUTHORS
+.An -nosplit
+.An Eelco Dolstra
+and
+.An the Nixpkgs/NixOS contributors
diff --git a/nixpkgs/nixos/modules/installer/tools/manpages/nixos-version.8 b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-version.8
new file mode 100644
index 000000000000..f661611599fb
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/manpages/nixos-version.8
@@ -0,0 +1,86 @@
+.Dd January 1, 1980
+.Dt nixos-version 8
+.Os
+.Sh NAME
+.Nm nixos-version
+.Nd show the NixOS version
+.
+.
+.
+.Sh SYNOPSIS
+.Nm nixos-version
+.Op Fl -hash
+.Op Fl -revision
+.Op Fl -configuration-revision
+.Op Fl -json
+.
+.
+.
+.Sh DESCRIPTION
+This command shows the version of the currently active NixOS configuration. For example:
+.Bd -literal -offset indent
+$ nixos-version
+16.03.1011.6317da4 (Emu)
+.Ed
+.
+.Pp
+The version consists of the following elements:
+.Bl -tag -width indent
+.It Ql 16.03
+The NixOS release, indicating the year and month in which it was released
+(e.g. March 2016).
+.It Ql 1011
+The number of commits in the Nixpkgs Git repository between the start of the
+release branch and the commit from which this version was built. This ensures
+that NixOS versions are monotonically increasing. It is
+.Ql git
+when the current NixOS configuration was built from a checkout of the Nixpkgs
+Git repository rather than from a NixOS channel.
+.It Ql 6317da4
+The first 7 characters of the commit in the Nixpkgs Git repository from which
+this version was built.
+.It Ql Emu
+The code name of the NixOS release. The first letter of the code name indicates
+that this is the N'th stable NixOS release; for example, Emu is the fifth
+release.
+.El
+.
+.
+.
+.Sh OPTIONS
+.Bl -tag -width indent
+.It Fl -hash , -revision
+Show the full SHA1 hash of the Git commit from which this configuration was
+built, e.g.
+.Bd -literal -offset indent
+$ nixos-version --hash
+6317da40006f6bc2480c6781999c52d88dde2acf
+.Ed
+.
+.It Fl -configuration-revision
+Show the configuration revision if available. This could be the full SHA1 hash
+of the Git commit of the system flake, if you add
+.Bd -literal -offset indent
+{ system.configurationRevision = self.rev or "dirty"; }
+.Ed
+.Pp
+to the
+.Ql modules
+array of your flake.nix system configuration e.g.
+.Bd -literal -offset indent
+$ nixos-version --configuration-revision
+aa314ebd1592f6cdd53cb5bba8bcae97d9323de8
+.Ed
+.
+.It Fl -json
+Print a JSON representation of the versions of NixOS and the top-level
+configuration flake.
+.El
+.
+.
+.
+.Sh AUTHORS
+.An -nosplit
+.An Eelco Dolstra
+and
+.An the Nixpkgs/NixOS contributors
diff --git a/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix b/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix
new file mode 100644
index 000000000000..e4241e965403
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nix-fallback-paths.nix
@@ -0,0 +1,7 @@
+{
+  x86_64-linux = "/nix/store/azvn85cras6xv4z5j85fiy406f24r1q0-nix-2.18.1";
+  i686-linux = "/nix/store/9bnwy7f9h0kzdzmcnjjsjg0aak5waj40-nix-2.18.1";
+  aarch64-linux = "/nix/store/hh65xwqm9s040s3cgn9vzcmrxj0sf5ij-nix-2.18.1";
+  x86_64-darwin = "/nix/store/6zi5fqzn9n17wrk8r41rhdw4j7jqqsi3-nix-2.18.1";
+  aarch64-darwin = "/nix/store/0pbq6wzr2f1jgpn5212knyxpwmkjgjah-nix-2.18.1";
+}
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
new file mode 100644
index 000000000000..21a257378a63
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix
@@ -0,0 +1,31 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, networkExpr
+}:
+
+let
+  nodes = builtins.mapAttrs (vm: module: {
+    _file = "${networkExpr}@node-${vm}";
+    imports = [ module ];
+  }) (import networkExpr);
+
+  pkgs = import ../../../../.. { inherit system config; };
+
+  testing = import ../../../../lib/testing-python.nix {
+    inherit system pkgs;
+  };
+
+  interactiveDriver = (testing.makeTest { inherit nodes; name = "network"; testScript = "start_all(); join_all();"; }).test.driverInteractive;
+in
+
+
+pkgs.runCommand "nixos-build-vms" { nativeBuildInputs = [ pkgs.makeWrapper ]; } ''
+  mkdir -p $out/bin
+  ln -s ${interactiveDriver}/bin/nixos-test-driver $out/bin/nixos-test-driver
+  ln -s ${interactiveDriver}/bin/nixos-test-driver $out/bin/nixos-run-vms
+  wrapProgram $out/bin/nixos-test-driver \
+    --add-flags "--interactive"
+  wrapProgram $out/bin/nixos-run-vms \
+     --set testScript "${pkgs.writeText "start-all" "start_all(); join_all();"}" \
+     --add-flags "--no-interactive"
+''
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh
new file mode 100644
index 000000000000..490ede04e6bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-build-vms/nixos-build-vms.sh
@@ -0,0 +1,53 @@
+#! @runtimeShell@ -e
+# shellcheck shell=bash
+
+# Shows the usage of this command to the user
+
+showUsage() {
+    exec man nixos-build-vms
+    exit 1
+}
+
+# Parse valid argument options
+
+nixBuildArgs=()
+networkExpr=
+
+while [ $# -gt 0 ]; do
+    case "$1" in
+      --no-out-link)
+        nixBuildArgs+=("--no-out-link")
+        ;;
+      --show-trace)
+        nixBuildArgs+=("--show-trace")
+        ;;
+      -h|--help)
+        showUsage
+        exit 0
+        ;;
+      --option)
+        shift
+        nixBuildArgs+=("--option" "$1" "$2"); shift
+        ;;
+      *)
+        if [ -n "$networkExpr" ]; then
+          echo "Network expression already set!"
+          showUsage
+          exit 1
+        fi
+        networkExpr="$(readlink -f "$1")"
+        ;;
+    esac
+
+    shift
+done
+
+if [ -z "$networkExpr" ]
+then
+    echo "ERROR: A network expression must be specified!" >&2
+    exit 1
+fi
+
+# Build a network of VMs
+nix-build '<nixpkgs/nixos/modules/installer/tools/nixos-build-vms/build-vms.nix>' \
+    --argstr networkExpr "$networkExpr" "${nixBuildArgs[@]}"
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-enter.sh b/nixpkgs/nixos/modules/installer/tools/nixos-enter.sh
new file mode 100755
index 000000000000..9141cc285702
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-enter.sh
@@ -0,0 +1,110 @@
+#! @runtimeShell@
+# shellcheck shell=bash
+
+set -e
+
+# Re-exec ourselves in a private mount namespace so that our bind
+# mounts get cleaned up automatically.
+if [ -z "$NIXOS_ENTER_REEXEC" ]; then
+    export NIXOS_ENTER_REEXEC=1
+    if [ "$(id -u)" != 0 ]; then
+        extraFlags="-r"
+    fi
+    exec unshare --fork --mount --uts --mount-proc --pid $extraFlags -- "$0" "$@"
+else
+    mount --make-rprivate /
+fi
+
+mountPoint=/mnt
+system=/nix/var/nix/profiles/system
+command=("$system/sw/bin/bash" "--login")
+silent=0
+
+while [ "$#" -gt 0 ]; do
+    i="$1"; shift 1
+    case "$i" in
+        --root)
+            mountPoint="$1"; shift 1
+            ;;
+        --system)
+            system="$1"; shift 1
+            ;;
+        --help)
+            exec man nixos-enter
+            exit 1
+            ;;
+        --command|-c)
+            command=("$system/sw/bin/bash" "-c" "$1")
+            shift 1
+            ;;
+        --silent)
+            silent=1
+            ;;
+        --)
+            command=("$@")
+            break
+            ;;
+        *)
+            echo "$0: unknown option \`$i'"
+            exit 1
+            ;;
+    esac
+done
+
+if [[ ! -e $mountPoint/etc/NIXOS ]]; then
+    echo "$0: '$mountPoint' is not a NixOS installation" >&2
+    exit 126
+fi
+
+mkdir -p "$mountPoint/dev" "$mountPoint/sys"
+chmod 0755 "$mountPoint/dev" "$mountPoint/sys"
+mount --rbind /dev "$mountPoint/dev"
+mount --rbind /sys "$mountPoint/sys"
+
+# modified from https://github.com/archlinux/arch-install-scripts/blob/bb04ab435a5a89cd5e5ee821783477bc80db797f/arch-chroot.in#L26-L52
+chroot_add_resolv_conf() {
+    local chrootDir="$1" resolvConf="$1/etc/resolv.conf"
+
+    [[ -e /etc/resolv.conf ]] || return 0
+
+    # Handle resolv.conf as a symlink to somewhere else.
+    if [[ -L "$resolvConf" ]]; then
+      # readlink(1) should always give us *something* since we know at this point
+      # it's a symlink. For simplicity, ignore the case of nested symlinks.
+      # We also ignore the possibility of `../`s escaping the root.
+      resolvConf="$(readlink "$resolvConf")"
+      if [[ "$resolvConf" = /* ]]; then
+        resolvConf="$chrootDir$resolvConf"
+      else
+        resolvConf="$chrootDir/etc/$resolvConf"
+      fi
+    fi
+
+    # ensure file exists to bind mount over
+    if [[ ! -f "$resolvConf" ]]; then
+      install -Dm644 /dev/null "$resolvConf" || return 1
+    fi
+
+    mount --bind /etc/resolv.conf "$resolvConf"
+}
+
+chroot_add_resolv_conf "$mountPoint" || echo "$0: failed to set up resolv.conf" >&2
+
+(
+    # If silent, write both stdout and stderr of activation script to /dev/null
+    # otherwise, write both streams to stderr of this process
+    if [ "$silent" -eq 1 ]; then
+        exec 2>/dev/null
+    fi
+
+    # Run the activation script. Set $LOCALE_ARCHIVE to suppress some Perl locale warnings.
+    LOCALE_ARCHIVE="$system/sw/lib/locale/locale-archive" IN_NIXOS_ENTER=1 chroot "$mountPoint" "$system/activate" 1>&2 || true
+
+    # Create /tmp. This is needed for nix-build and the NixOS activation script to work.
+    # Hide the unhelpful "failed to replace specifiers" errors caused by missing /etc/machine-id.
+    chroot "$mountPoint" "$system/sw/bin/systemd-tmpfiles" --create --remove -E 2> /dev/null || true
+)
+
+unset TMPDIR
+
+exec chroot "$mountPoint" "${command[@]}"
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
new file mode 100644
index 000000000000..2f9edba4f0c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-generate-config.pl
@@ -0,0 +1,701 @@
+#! @perl@
+
+use strict;
+use Cwd 'abs_path';
+use File::Spec;
+use File::Path;
+use File::Basename;
+use File::Slurp;
+use File::stat;
+
+umask(0022);
+
+sub uniq {
+    my %seen;
+    my @res = ();
+    foreach my $s (@_) {
+        if (!defined $seen{$s}) {
+            $seen{$s} = 1;
+            push @res, $s;
+        }
+    }
+    return @res;
+}
+
+sub runCommand {
+    my ($cmd) = @_;
+    open FILE, "$cmd 2>&1 |" or die "Failed to execute: $cmd\n";
+    my @ret = <FILE>;
+    close FILE;
+    return ($?, @ret);
+}
+
+# Process the command line.
+my $outDir = "/etc/nixos";
+my $rootDir = ""; # = /
+my $force = 0;
+my $noFilesystems = 0;
+my $showHardwareConfig = 0;
+
+for (my $n = 0; $n < scalar @ARGV; $n++) {
+    my $arg = $ARGV[$n];
+    if ($arg eq "--help") {
+        exec "man nixos-generate-config" or die;
+    }
+    elsif ($arg eq "--dir") {
+        $n++;
+        $outDir = $ARGV[$n];
+        die "$0: ‘--dir’ requires an argument\n" unless defined $outDir;
+    }
+    elsif ($arg eq "--root") {
+        $n++;
+        $rootDir = $ARGV[$n];
+        die "$0: ‘--root’ requires an argument\n" unless defined $rootDir;
+        die "$0: no need to specify `/` with `--root`, it is the default\n" if $rootDir eq "/";
+        $rootDir =~ s/\/*$//; # remove trailing slashes
+        $rootDir = File::Spec->rel2abs($rootDir); # resolve absolute path
+    }
+    elsif ($arg eq "--force") {
+        $force = 1;
+    }
+    elsif ($arg eq "--no-filesystems") {
+        $noFilesystems = 1;
+    }
+    elsif ($arg eq "--show-hardware-config") {
+        $showHardwareConfig = 1;
+    }
+    else {
+        die "$0: unrecognized argument ‘$arg’\n";
+    }
+}
+
+
+my @attrs = ();
+my @kernelModules = ();
+my @initrdKernelModules = ();
+my @initrdAvailableKernelModules = ();
+my @modulePackages = ();
+my @imports;
+
+
+sub debug {
+    return unless defined $ENV{"DEBUG"};
+    print STDERR @_;
+}
+
+
+# nixpkgs.system
+push @attrs, "nixpkgs.hostPlatform = lib.mkDefault \"@hostPlatformSystem@\";";
+
+
+my $cpuinfo = read_file "/proc/cpuinfo";
+
+
+sub hasCPUFeature {
+    my $feature = shift;
+    return $cpuinfo =~ /^flags\s*:.* $feature( |$)/m;
+}
+
+
+sub cpuManufacturer {
+    my $id = shift;
+    return $cpuinfo =~ /^vendor_id\s*:.* $id$/m;
+}
+
+# Virtualization support?
+push @kernelModules, "kvm-intel" if hasCPUFeature "vmx";
+push @kernelModules, "kvm-amd" if hasCPUFeature "svm";
+
+
+# Look at the PCI devices and add necessary modules.  Note that most
+# modules are auto-detected so we don't need to list them here.
+# However, some are needed in the initrd to boot the system.
+
+my $videoDriver;
+
+sub pciCheck {
+    my $path = shift;
+    my $vendor = read_file "$path/vendor"; chomp $vendor;
+    my $device = read_file "$path/device"; chomp $device;
+    my $class = read_file "$path/class"; chomp $class;
+
+    my $module;
+    if (-e "$path/driver/module") {
+        $module = basename `readlink -f $path/driver/module`;
+        chomp $module;
+    }
+
+    debug "$path: $vendor $device $class";
+    debug " $module" if defined $module;
+    debug "\n";
+
+    if (defined $module) {
+        # See the bottom of https://pciids.sourceforge.net/pci.ids for
+        # device classes.
+        if (# Mass-storage controller.  Definitely important.
+            $class =~ /^0x01/ ||
+
+            # Firewire controller.  A disk might be attached.
+            $class =~ /^0x0c00/ ||
+
+            # USB controller.  Needed if we want to use the
+            # keyboard when things go wrong in the initrd.
+            $class =~ /^0x0c03/
+            )
+        {
+            push @initrdAvailableKernelModules, $module;
+        }
+    }
+
+    # broadcom STA driver (wl.ko)
+    # list taken from http://www.broadcom.com/docs/linux_sta/README.txt
+    if ($vendor eq "0x14e4" &&
+        ($device eq "0x4311" || $device eq "0x4312" || $device eq "0x4313" ||
+         $device eq "0x4315" || $device eq "0x4327" || $device eq "0x4328" ||
+         $device eq "0x4329" || $device eq "0x432a" || $device eq "0x432b" ||
+         $device eq "0x432c" || $device eq "0x432d" || $device eq "0x4353" ||
+         $device eq "0x4357" || $device eq "0x4358" || $device eq "0x4359" ||
+         $device eq "0x4331" || $device eq "0x43a0" || $device eq "0x43b1"
+        ) )
+     {
+        push @modulePackages, "config.boot.kernelPackages.broadcom_sta";
+        push @kernelModules, "wl";
+     }
+
+    # broadcom FullMac driver
+    # list taken from
+    # https://wireless.wiki.kernel.org/en/users/Drivers/brcm80211#brcmfmac
+    if ($vendor eq "0x14e4" &&
+        ($device eq "0x43a3" || $device eq "0x43df" || $device eq "0x43ec" ||
+         $device eq "0x43d3" || $device eq "0x43d9" || $device eq "0x43e9" ||
+         $device eq "0x43ba" || $device eq "0x43bb" || $device eq "0x43bc" ||
+         $device eq "0xaa52" || $device eq "0x43ca" || $device eq "0x43cb" ||
+         $device eq "0x43cc" || $device eq "0x43c3" || $device eq "0x43c4" ||
+         $device eq "0x43c5"
+        ) )
+    {
+        # we need e.g. brcmfmac43602-pcie.bin
+        push @imports, "(modulesPath + \"/hardware/network/broadcom-43xx.nix\")";
+    }
+
+    # In case this is a virtio scsi device, we need to explicitly make this available.
+    if ($vendor eq "0x1af4" && ($device eq "0x1004" || $device eq "0x1048") ) {
+        push @initrdAvailableKernelModules, "virtio_scsi";
+    }
+
+    # Can't rely on $module here, since the module may not be loaded
+    # due to missing firmware.  Ideally we would check modules.pcimap
+    # here.
+    push @attrs, "networking.enableIntel2200BGFirmware = true;" if
+        $vendor eq "0x8086" &&
+        ($device eq "0x1043" || $device eq "0x104f" || $device eq "0x4220" ||
+         $device eq "0x4221" || $device eq "0x4223" || $device eq "0x4224");
+
+    push @attrs, "networking.enableIntel3945ABGFirmware = true;" if
+        $vendor eq "0x8086" &&
+        ($device eq "0x4229" || $device eq "0x4230" ||
+         $device eq "0x4222" || $device eq "0x4227");
+
+    # Assume that all NVIDIA cards are supported by the NVIDIA driver.
+    # There may be exceptions (e.g. old cards).
+    # FIXME: do we want to enable an unfree driver here?
+    #$videoDriver = "nvidia" if $vendor eq "0x10de" && $class =~ /^0x03/;
+}
+
+foreach my $path (glob "/sys/bus/pci/devices/*") {
+    pciCheck $path;
+}
+
+# Idem for USB devices.
+
+sub usbCheck {
+    my $path = shift;
+    my $class = read_file "$path/bInterfaceClass"; chomp $class;
+    my $subclass = read_file "$path/bInterfaceSubClass"; chomp $subclass;
+    my $protocol = read_file "$path/bInterfaceProtocol"; chomp $protocol;
+
+    my $module;
+    if (-e "$path/driver/module") {
+        $module = basename `readlink -f $path/driver/module`;
+        chomp $module;
+    }
+
+    debug "$path: $class $subclass $protocol";
+    debug " $module" if defined $module;
+    debug "\n";
+
+    if (defined $module) {
+        if (# Mass-storage controller.  Definitely important.
+            $class eq "08" ||
+
+            # Keyboard.  Needed if we want to use the
+            # keyboard when things go wrong in the initrd.
+            ($class eq "03" && $protocol eq "01")
+            )
+        {
+            push @initrdAvailableKernelModules, $module;
+        }
+    }
+}
+
+foreach my $path (glob "/sys/bus/usb/devices/*") {
+    if (-e "$path/bInterfaceClass") {
+        usbCheck $path;
+    }
+}
+
+
+# Add the modules for all block and MMC devices.
+foreach my $path (glob "/sys/class/{block,mmc_host}/*") {
+    my $module;
+    if (-e "$path/device/driver/module") {
+        $module = basename `readlink -f $path/device/driver/module`;
+        chomp $module;
+        push @initrdAvailableKernelModules, $module;
+    }
+}
+
+# Add bcache module, if needed.
+my @bcacheDevices = glob("/dev/bcache*");
+@bcacheDevices = grep(!qr#dev/bcachefs.*#, @bcacheDevices);
+if (scalar @bcacheDevices > 0) {
+    push @initrdAvailableKernelModules, "bcache";
+}
+
+# Prevent unbootable systems if LVM snapshots are present at boot time.
+if (`lsblk -o TYPE` =~ "lvm") {
+    push @initrdKernelModules, "dm-snapshot";
+}
+
+my $virt = `@detectvirt@`;
+chomp $virt;
+
+
+# Check if we're a VirtualBox guest.  If so, enable the guest
+# additions.
+if ($virt eq "oracle") {
+    push @attrs, "virtualisation.virtualbox.guest.enable = true;"
+}
+
+# Check if we're a Parallels guest. If so, enable the guest additions.
+# It is blocked by https://github.com/systemd/systemd/pull/23859
+if ($virt eq "parallels") {
+    push @attrs, "hardware.parallels.enable = true;";
+    push @attrs, "nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ \"prl-tools\" ];";
+}
+
+# Likewise for QEMU.
+if ($virt eq "qemu" || $virt eq "kvm" || $virt eq "bochs") {
+    push @imports, "(modulesPath + \"/profiles/qemu-guest.nix\")";
+}
+
+# Also for Hyper-V.
+if ($virt eq "microsoft") {
+    push @attrs, "virtualisation.hypervGuest.enable = true;"
+}
+
+
+# Pull in NixOS configuration for containers.
+if ($virt eq "systemd-nspawn") {
+    push @attrs, "boot.isContainer = true;";
+}
+
+
+# Check if we're on bare metal, not in a VM/container.
+if ($virt eq "none") {
+    # Provide firmware for devices that are not detected by this script.
+    push @imports, "(modulesPath + \"/installer/scan/not-detected.nix\")";
+
+    # Update the microcode.
+    push @attrs, "hardware.cpu.amd.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "AuthenticAMD";
+    push @attrs, "hardware.cpu.intel.updateMicrocode = lib.mkDefault config.hardware.enableRedistributableFirmware;" if cpuManufacturer "GenuineIntel";
+}
+
+# For a device name like /dev/sda1, find a more stable path like
+# /dev/disk/by-uuid/X or /dev/disk/by-label/Y.
+sub findStableDevPath {
+    my ($dev) = @_;
+    return $dev if substr($dev, 0, 1) ne "/";
+    return $dev unless -e $dev;
+
+    my $st = stat($dev) or return $dev;
+
+    foreach my $dev2 (glob("/dev/stratis/*/*"), glob("/dev/disk/by-uuid/*"), glob("/dev/mapper/*"), glob("/dev/disk/by-label/*")) {
+        my $st2 = stat($dev2) or next;
+        return $dev2 if $st->rdev == $st2->rdev;
+    }
+
+    return $dev;
+}
+
+push @attrs, "services.xserver.videoDrivers = [ \"$videoDriver\" ];" if $videoDriver;
+
+# Generate the swapDevices option from the currently activated swap
+# devices.
+my @swaps = read_file("/proc/swaps", err_mode => 'carp');
+my @swapDevices;
+if (@swaps) {
+    shift @swaps;
+    foreach my $swap (@swaps) {
+        my @fields = split ' ', $swap;
+        my $swapFilename = $fields[0];
+        my $swapType = $fields[1];
+        next unless -e $swapFilename;
+        my $dev = findStableDevPath $swapFilename;
+        if ($swapType =~ "partition") {
+            # zram devices are more likely created by configuration.nix, so
+            # ignore them here
+            next if ($swapFilename =~ /^\/dev\/zram/);
+            push @swapDevices, "{ device = \"$dev\"; }";
+        } elsif ($swapType =~ "file") {
+            # swap *files* are more likely specified in configuration.nix, so
+            # ignore them here.
+        } else {
+            die "Unsupported swap type: $swapType\n";
+        }
+    }
+}
+
+
+# Generate the fileSystems option from the currently mounted
+# filesystems.
+sub in {
+    my ($d1, $d2) = @_;
+    return $d1 eq $d2 || substr($d1, 0, length($d2) + 1) eq "$d2/";
+}
+
+my $fileSystems;
+my %fsByDev;
+my $useSwraid = 0;
+foreach my $fs (read_file("/proc/self/mountinfo")) {
+    chomp $fs;
+    my @fields = split / /, $fs;
+    my $mountPoint = $fields[4];
+    $mountPoint =~ s/\\040/ /g; # account for mount points with spaces in the name (\040 is the escape character)
+    $mountPoint =~ s/\\011/\t/g; # account for mount points with tabs in the name (\011 is the escape character)
+    next unless -d $mountPoint;
+    my @mountOptions = split /,/, $fields[5];
+
+    next if !in($mountPoint, $rootDir);
+    $mountPoint = substr($mountPoint, length($rootDir)); # strip the root directory (e.g. /mnt)
+    $mountPoint = "/" if $mountPoint eq "";
+
+    # Skip special filesystems.
+    next if in($mountPoint, "/proc") || in($mountPoint, "/dev") || in($mountPoint, "/sys") || in($mountPoint, "/run") || $mountPoint eq "/var/lib/nfs/rpc_pipefs";
+
+    # Skip the optional fields.
+    my $n = 6; $n++ while $fields[$n] ne "-"; $n++;
+    my $fsType = $fields[$n];
+    my $device = $fields[$n + 1];
+    my @superOptions = split /,/, $fields[$n + 2];
+    $device =~ s/\\040/ /g; # account for devices with spaces in the name (\040 is the escape character)
+    $device =~ s/\\011/\t/g; # account for mount points with tabs in the name (\011 is the escape character)
+
+    # Skip the read-only bind-mount on /nix/store.
+    next if $mountPoint eq "/nix/store" && (grep { $_ eq "rw" } @superOptions) && (grep { $_ eq "ro" } @mountOptions);
+
+    # Maybe this is a bind-mount of a filesystem we saw earlier?
+    if (defined $fsByDev{$fields[2]}) {
+        # Make sure this isn't a btrfs subvolume.
+        my $msg = `@btrfs@ subvol show $rootDir$mountPoint`;
+        if ($? != 0 || $msg =~ /ERROR:/s) {
+            my $path = $fields[3]; $path = "" if $path eq "/";
+            my $base = $fsByDev{$fields[2]};
+            $base = "" if $base eq "/";
+            $fileSystems .= <<EOF;
+  fileSystems.\"$mountPoint\" =
+    { device = \"$base$path\";
+      fsType = \"none\";
+      options = \[ \"bind\" \];
+    };
+
+EOF
+            next;
+        }
+    }
+    $fsByDev{$fields[2]} = $mountPoint;
+
+    # We don't know how to handle FUSE filesystems.
+    if ($fsType eq "fuseblk" || $fsType eq "fuse") {
+        print STDERR "warning: don't know how to emit ‘fileSystem’ option for FUSE filesystem ‘$mountPoint’\n";
+        next;
+    }
+
+    # Is this a mount of a loopback device?
+    my @extraOptions;
+    if ($device =~ /\/dev\/loop(\d+)/) {
+        my $loopnr = $1;
+        my $backer = read_file "/sys/block/loop$loopnr/loop/backing_file";
+        if (defined $backer) {
+            chomp $backer;
+            $device = $backer;
+            push @extraOptions, "loop";
+        }
+    }
+
+    # Is this a btrfs filesystem?
+    if ($fsType eq "btrfs") {
+        my ($status, @info) = runCommand("@btrfs@ subvol show $rootDir$mountPoint");
+        if ($status != 0 || join("", @info) =~ /ERROR:/) {
+            die "Failed to retrieve subvolume info for $mountPoint\n";
+        }
+        my @ids = join("\n", @info) =~ m/^(?!\/\n).*Subvolume ID:[ \t\n]*([0-9]+)/s;
+        if ($#ids > 0) {
+            die "Btrfs subvol name for $mountPoint listed multiple times in mount\n"
+        } elsif ($#ids == 0) {
+            my @paths = join("", @info) =~ m/^([^\n]*)/;
+            if ($#paths > 0) {
+                die "Btrfs returned multiple paths for a single subvolume id, mountpoint $mountPoint\n";
+            } elsif ($#paths != 0) {
+                die "Btrfs did not return a path for the subvolume at $mountPoint\n";
+            }
+            push @extraOptions, "subvol=$paths[0]";
+        }
+    }
+
+    # is this a stratis fs?
+    my $stableDevPath = findStableDevPath $device;
+    my $stratisPool;
+    if ($stableDevPath =~ qr#/dev/stratis/(.*)/.*#) {
+        my $poolName = $1;
+        my ($header, @lines) = split "\n", qx/stratis pool list/;
+        my $uuidIndex = index $header, 'UUID';
+        my ($line) = grep /^$poolName /, @lines;
+        $stratisPool = substr $line, $uuidIndex - 32, 36;
+    }
+
+    # Don't emit tmpfs entry for /tmp, because it most likely comes from the
+    # boot.tmp.useTmpfs option in configuration.nix (managed declaratively).
+    next if ($mountPoint eq "/tmp" && $fsType eq "tmpfs");
+
+    # This should work for single and multi-device systems.
+    # still needs subvolume support
+    if ($fsType eq "bcachefs") {
+        my ($status, @info) = runCommand("bcachefs fs usage $rootDir$mountPoint");
+        my $UUID = $info[0];
+
+        if ($status == 0 && $UUID =~ /^Filesystem:[ \t\n]*([0-9a-z-]+)/) {
+            $stableDevPath = "UUID=$1";
+        } else {
+            print STDERR "warning: can't find bcachefs mount UUID falling back to device-path";
+        }
+    }
+
+    # Emit the filesystem.
+    $fileSystems .= <<EOF;
+  fileSystems.\"$mountPoint\" =
+    { device = \"$stableDevPath\";
+      fsType = \"$fsType\";
+EOF
+
+    if (scalar @extraOptions > 0) {
+        $fileSystems .= <<EOF;
+      options = \[ ${\join " ", map { "\"" . $_ . "\"" } uniq(@extraOptions)} \];
+EOF
+    }
+
+    if ($stratisPool) {
+        $fileSystems .= <<EOF;
+      stratis.poolUuid = "$stratisPool";
+EOF
+    }
+
+    $fileSystems .= <<EOF;
+    };
+
+EOF
+
+    # If this filesystem is on a LUKS device, then add a
+    # boot.initrd.luks.devices entry.
+    if (-e $device) {
+        my $deviceName = basename(abs_path($device));
+        my $dmUuid = read_file("/sys/class/block/$deviceName/dm/uuid",  err_mode => 'quiet');
+        if ($dmUuid =~ /^CRYPT-LUKS/)
+        {
+            my @slaves = glob("/sys/class/block/$deviceName/slaves/*");
+            if (scalar @slaves == 1) {
+                my $slave = "/dev/" . basename($slaves[0]);
+                if (-e $slave) {
+                    my $dmName = read_file("/sys/class/block/$deviceName/dm/name");
+                    chomp $dmName;
+                    # Ensure to add an entry only once
+                    my $luksDevice = "  boot.initrd.luks.devices.\"$dmName\".device";
+                    if ($fileSystems !~ /^\Q$luksDevice\E/m) {
+                        $fileSystems .= "$luksDevice = \"${\(findStableDevPath $slave)}\";\n\n";
+                    }
+                }
+            }
+        }
+        if (-e "/sys/class/block/$deviceName/md/uuid") {
+            $useSwraid = 1;
+        }
+    }
+}
+if ($useSwraid) {
+    push @attrs, "boot.swraid.enable = true;\n\n";
+}
+
+
+# Generate the hardware configuration file.
+
+sub toNixStringList {
+    my $res = "";
+    foreach my $s (@_) {
+        $res .= " \"$s\"";
+    }
+    return $res;
+}
+sub toNixList {
+    my $res = "";
+    foreach my $s (@_) {
+        $res .= " $s";
+    }
+    return $res;
+}
+
+sub multiLineList {
+    my $indent = shift;
+    return " [ ]" if !@_;
+    my $res = "\n${indent}[ ";
+    my $first = 1;
+    foreach my $s (@_) {
+        $res .= "$indent  " if !$first;
+        $first = 0;
+        $res .= "$s\n";
+    }
+    $res .= "$indent]";
+    return $res;
+}
+
+my $initrdAvailableKernelModules = toNixStringList(uniq @initrdAvailableKernelModules);
+my $initrdKernelModules = toNixStringList(uniq @initrdKernelModules);
+my $kernelModules = toNixStringList(uniq @kernelModules);
+my $modulePackages = toNixList(uniq @modulePackages);
+
+my $fsAndSwap = "";
+if (!$noFilesystems) {
+    $fsAndSwap = "\n$fileSystems  ";
+    $fsAndSwap .= "swapDevices =" . multiLineList("    ", @swapDevices) . ";\n";
+}
+
+my $networkingDhcpConfig = generateNetworkingDhcpConfig();
+
+my $hwConfig = <<EOF;
+# Do not modify this file!  It was generated by ‘nixos-generate-config’
+# and may be overwritten by future invocations.  Please make changes
+# to /etc/nixos/configuration.nix instead.
+{ config, lib, pkgs, modulesPath, ... }:
+
+{
+  imports =${\multiLineList("    ", @imports)};
+
+  boot.initrd.availableKernelModules = [$initrdAvailableKernelModules ];
+  boot.initrd.kernelModules = [$initrdKernelModules ];
+  boot.kernelModules = [$kernelModules ];
+  boot.extraModulePackages = [$modulePackages ];
+$fsAndSwap
+$networkingDhcpConfig
+${\join "", (map { "  $_\n" } (uniq @attrs))}}
+EOF
+
+sub generateNetworkingDhcpConfig {
+    # FIXME disable networking.useDHCP by default when switching to networkd.
+    my $config = <<EOF;
+  # Enables DHCP on each ethernet and wireless interface. In case of scripted networking
+  # (the default) this is the recommended approach. When using systemd-networkd it's
+  # still possible to use this option, but it's recommended to use it in conjunction
+  # with explicit per-interface declarations with `networking.interfaces.<interface>.useDHCP`.
+  networking.useDHCP = lib.mkDefault true;
+EOF
+
+    foreach my $path (glob "/sys/class/net/*") {
+        my $dev = basename($path);
+        if ($dev ne "lo") {
+            $config .= "  # networking.interfaces.$dev.useDHCP = lib.mkDefault true;\n";
+        }
+    }
+
+    return $config;
+}
+
+sub generateXserverConfig {
+    my $xserverEnabled = "@xserverEnabled@";
+
+    my $config = "";
+    if ($xserverEnabled eq "1") {
+        $config = <<EOF;
+  # Enable the X11 windowing system.
+  services.xserver.enable = true;
+EOF
+    } else {
+        $config = <<EOF;
+  # Enable the X11 windowing system.
+  # services.xserver.enable = true;
+EOF
+    }
+}
+
+if ($showHardwareConfig) {
+    print STDOUT $hwConfig;
+} else {
+    if ($outDir eq "/etc/nixos") {
+        $outDir = "$rootDir$outDir";
+    } else {
+        $outDir = File::Spec->rel2abs($outDir);
+        $outDir =~ s/\/*$//; # remove trailing slashes
+    }
+
+    my $fn = "$outDir/hardware-configuration.nix";
+    print STDERR "writing $fn...\n";
+    mkpath($outDir, 0, 0755);
+    write_file($fn, $hwConfig);
+
+    # Generate a basic configuration.nix, unless one already exists.
+    $fn = "$outDir/configuration.nix";
+    if ($force || ! -e $fn) {
+        print STDERR "writing $fn...\n";
+
+        my $bootLoaderConfig = "";
+        if (-e "/sys/firmware/efi/efivars") {
+            $bootLoaderConfig = <<EOF;
+  # Use the systemd-boot EFI boot loader.
+  boot.loader.systemd-boot.enable = true;
+  boot.loader.efi.canTouchEfiVariables = true;
+EOF
+        } elsif (-e "/boot/extlinux") {
+            $bootLoaderConfig = <<EOF;
+  # Use the extlinux boot loader. (NixOS wants to enable GRUB by default)
+  boot.loader.grub.enable = false;
+  # Enables the generation of /boot/extlinux/extlinux.conf
+  boot.loader.generic-extlinux-compatible.enable = true;
+EOF
+        } elsif ($virt ne "systemd-nspawn") {
+            $bootLoaderConfig = <<EOF;
+  # Use the GRUB 2 boot loader.
+  boot.loader.grub.enable = true;
+  # boot.loader.grub.efiSupport = true;
+  # boot.loader.grub.efiInstallAsRemovable = true;
+  # boot.loader.efi.efiSysMountPoint = "/boot/efi";
+  # Define on which hard drive you want to install Grub.
+  # boot.loader.grub.device = "/dev/sda"; # or "nodev" for efi only
+EOF
+        }
+
+        my $networkingDhcpConfig = generateNetworkingDhcpConfig();
+
+        my $xserverConfig = generateXserverConfig();
+
+        (my $desktopConfiguration = <<EOF)=~s/^/  /gm;
+@desktopConfiguration@
+EOF
+
+        write_file($fn, <<EOF);
+@configuration@
+EOF
+        print STDERR "For more hardware-specific settings, see https://github.com/NixOS/nixos-hardware.\n"
+    } else {
+        print STDERR "warning: not overwriting existing $fn\n";
+    }
+}
+
+# workaround for a bug in substituteAll
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-install.sh b/nixpkgs/nixos/modules/installer/tools/nixos-install.sh
new file mode 100755
index 000000000000..4e42875c0365
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-install.sh
@@ -0,0 +1,231 @@
+#! @runtimeShell@
+# shellcheck shell=bash
+
+set -e
+shopt -s nullglob
+
+export PATH=@path@:$PATH
+
+# Ensure a consistent umask.
+umask 0022
+
+# Parse the command line for the -I flag
+extraBuildFlags=()
+flakeFlags=()
+
+mountPoint=/mnt
+channelPath=
+system=
+verbosity=()
+
+while [ "$#" -gt 0 ]; do
+    i="$1"; shift 1
+    case "$i" in
+        --max-jobs|-j|--cores|-I|--substituters)
+            j="$1"; shift 1
+            extraBuildFlags+=("$i" "$j")
+            ;;
+        --option)
+            j="$1"; shift 1
+            k="$1"; shift 1
+            extraBuildFlags+=("$i" "$j" "$k")
+            ;;
+        --root)
+            mountPoint="$1"; shift 1
+            ;;
+        --system|--closure)
+            system="$1"; shift 1
+            ;;
+        --flake)
+          flake="$1"
+          flakeFlags=(--experimental-features 'nix-command flakes')
+          shift 1
+          ;;
+        --recreate-lock-file|--no-update-lock-file|--no-write-lock-file|--no-registries|--commit-lock-file)
+          lockFlags+=("$i")
+          ;;
+        --update-input)
+          j="$1"; shift 1
+          lockFlags+=("$i" "$j")
+          ;;
+        --override-input)
+          j="$1"; shift 1
+          k="$1"; shift 1
+          lockFlags+=("$i" "$j" "$k")
+          ;;
+        --channel)
+            channelPath="$1"; shift 1
+            ;;
+        --no-channel-copy)
+            noChannelCopy=1
+            ;;
+        --no-root-password|--no-root-passwd)
+            noRootPasswd=1
+            ;;
+        --no-bootloader)
+            noBootLoader=1
+            ;;
+        --show-trace|--impure|--keep-going)
+            extraBuildFlags+=("$i")
+            ;;
+        --help)
+            exec man nixos-install
+            exit 1
+            ;;
+        --debug)
+            set -x
+            ;;
+        -v*|--verbose)
+            verbosity+=("$i")
+            ;;
+        *)
+            echo "$0: unknown option \`$i'"
+            exit 1
+            ;;
+    esac
+done
+
+if ! test -e "$mountPoint"; then
+    echo "mount point $mountPoint doesn't exist"
+    exit 1
+fi
+
+# Verify permissions are okay-enough
+checkPath="$(realpath "$mountPoint")"
+while [[ "$checkPath" != "/" ]]; do
+    mode="$(stat -c '%a' "$checkPath")"
+    if [[ "${mode: -1}" -lt "5" ]]; then
+        echo "path $checkPath should have permissions 755, but had permissions $mode. Consider running 'chmod o+rx $checkPath'."
+        exit 1
+    fi
+    checkPath="$(dirname "$checkPath")"
+done
+
+# Get the path of the NixOS configuration file.
+if [[ -z $NIXOS_CONFIG ]]; then
+    NIXOS_CONFIG=$mountPoint/etc/nixos/configuration.nix
+fi
+
+if [[ ${NIXOS_CONFIG:0:1} != / ]]; then
+    echo "$0: \$NIXOS_CONFIG is not an absolute path"
+    exit 1
+fi
+
+if [[ -n $flake ]]; then
+    if [[ $flake =~ ^(.*)\#([^\#\"]*)$ ]]; then
+       flake="${BASH_REMATCH[1]}"
+       flakeAttr="${BASH_REMATCH[2]}"
+    fi
+    if [[ -z "$flakeAttr" ]]; then
+        echo "Please specify the name of the NixOS configuration to be installed, as a URI fragment in the flake-uri."
+        echo "For example, to use the output nixosConfigurations.foo from the flake.nix, append \"#foo\" to the flake-uri."
+        exit 1
+    fi
+    flakeAttr="nixosConfigurations.\"$flakeAttr\""
+fi
+
+# Resolve the flake.
+if [[ -n $flake ]]; then
+    flake=$(nix "${flakeFlags[@]}" flake metadata --json "${extraBuildFlags[@]}" "${lockFlags[@]}" -- "$flake" | jq -r .url)
+fi
+
+if [[ ! -e $NIXOS_CONFIG && -z $system && -z $flake ]]; then
+    echo "configuration file $NIXOS_CONFIG doesn't exist"
+    exit 1
+fi
+
+# A place to drop temporary stuff.
+tmpdir="$(mktemp -d -p "$mountPoint")"
+trap 'rm -rf $tmpdir' EXIT
+
+# store temporary files on target filesystem by default
+export TMPDIR=${TMPDIR:-$tmpdir}
+
+sub="auto?trusted=1"
+
+# Copy the NixOS/Nixpkgs sources to the target as the initial contents
+# of the NixOS channel.
+if [[ -z $noChannelCopy ]]; then
+    if [[ -z $channelPath ]]; then
+        channelPath="$(nix-env -p /nix/var/nix/profiles/per-user/root/channels -q nixos --no-name --out-path 2>/dev/null || echo -n "")"
+    fi
+    if [[ -n $channelPath ]]; then
+        echo "copying channel..."
+        mkdir -p "$mountPoint"/nix/var/nix/profiles/per-user/root
+        nix-env --store "$mountPoint" "${extraBuildFlags[@]}" --extra-substituters "$sub" \
+                -p "$mountPoint"/nix/var/nix/profiles/per-user/root/channels --set "$channelPath" --quiet \
+                "${verbosity[@]}"
+        install -m 0700 -d "$mountPoint"/root/.nix-defexpr
+        ln -sfn /nix/var/nix/profiles/per-user/root/channels "$mountPoint"/root/.nix-defexpr/channels
+    fi
+fi
+
+# Build the system configuration in the target filesystem.
+if [[ -z $system ]]; then
+    outLink="$tmpdir/system"
+    if [[ -z $flake ]]; then
+        echo "building the configuration in $NIXOS_CONFIG..."
+        nix-build --out-link "$outLink" --store "$mountPoint" "${extraBuildFlags[@]}" \
+            --extra-substituters "$sub" \
+            '<nixpkgs/nixos>' -A system -I "nixos-config=$NIXOS_CONFIG" "${verbosity[@]}"
+    else
+        echo "building the flake in $flake..."
+        nix "${flakeFlags[@]}" build "$flake#$flakeAttr.config.system.build.toplevel" \
+            --store "$mountPoint" --extra-substituters "$sub" "${verbosity[@]}" \
+            "${extraBuildFlags[@]}" "${lockFlags[@]}" --out-link "$outLink"
+    fi
+    system=$(readlink -f "$outLink")
+fi
+
+# Set the system profile to point to the configuration. TODO: combine
+# this with the previous step once we have a nix-env replacement with
+# a progress bar.
+nix-env --store "$mountPoint" "${extraBuildFlags[@]}" \
+        --extra-substituters "$sub" \
+        -p "$mountPoint"/nix/var/nix/profiles/system --set "$system" "${verbosity[@]}"
+
+# Mark the target as a NixOS installation, otherwise switch-to-configuration will chicken out.
+mkdir -m 0755 -p "$mountPoint/etc"
+touch "$mountPoint/etc/NIXOS"
+
+# Switch to the new system configuration.  This will install Grub with
+# a menu default pointing at the kernel/initrd/etc of the new
+# configuration.
+if [[ -z $noBootLoader ]]; then
+    echo "installing the boot loader..."
+    # Grub needs an mtab.
+    ln -sfn /proc/mounts "$mountPoint"/etc/mtab
+    export mountPoint
+    NIXOS_INSTALL_BOOTLOADER=1 nixos-enter --root "$mountPoint" -c "$(cat <<'EOF'
+      # Create a bind mount for each of the mount points inside the target file
+      # system. This preserves the validity of their absolute paths after changing
+      # the root with `nixos-enter`.
+      # Without this the bootloader installation may fail due to options that
+      # contain paths referenced during evaluation, like initrd.secrets.
+      # when not root, re-execute the script in an unshared namespace
+      mount --rbind --mkdir / "$mountPoint"
+      mount --make-rslave "$mountPoint"
+      /run/current-system/bin/switch-to-configuration boot
+      umount -R "$mountPoint" && (rmdir "$mountPoint" 2>/dev/null || true)
+EOF
+)"
+fi
+
+# Ask the user to set a root password, but only if the passwd command
+# exists (i.e. when mutable user accounts are enabled).
+if [[ -z $noRootPasswd ]] && [ -t 0 ]; then
+    if nixos-enter --root "$mountPoint" -c 'test -e /nix/var/nix/profiles/system/sw/bin/passwd'; then
+        set +e
+        nixos-enter --root "$mountPoint" -c 'echo "setting root password..." && /nix/var/nix/profiles/system/sw/bin/passwd'
+        exit_code=$?
+        set -e
+
+        if [[ $exit_code != 0 ]]; then
+            echo "Setting a root password failed with the above printed error."
+            echo "You can set the root password manually by executing \`nixos-enter --root ${mountPoint@Q}\` and then running \`passwd\` in the shell of the new system."
+            exit $exit_code
+        fi
+    fi
+fi
+
+echo "installation finished!"
diff --git a/nixpkgs/nixos/modules/installer/tools/nixos-version.sh b/nixpkgs/nixos/modules/installer/tools/nixos-version.sh
new file mode 100644
index 000000000000..39e34a3718cb
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/nixos-version.sh
@@ -0,0 +1,31 @@
+#! @runtimeShell@
+# shellcheck shell=bash
+
+case "$1" in
+  -h|--help)
+    exec man nixos-version
+    exit 1
+    ;;
+  --hash|--revision)
+    if ! [[ @revision@ =~ ^[0-9a-f]+$ ]]; then
+      echo "$0: Nixpkgs commit hash is unknown" >&2
+      exit 1
+    fi
+    echo "@revision@"
+    ;;
+  --configuration-revision)
+    if [[ "@configurationRevision@" =~ "@" ]]; then
+      echo "$0: configuration revision is unknown" >&2
+      exit 1
+    fi
+    echo "@configurationRevision@"
+    ;;
+  --json)
+    cat <<EOF
+@json@
+EOF
+    ;;
+  *)
+    echo "@version@ (@codeName@)"
+    ;;
+esac
diff --git a/nixpkgs/nixos/modules/installer/tools/tools.nix b/nixpkgs/nixos/modules/installer/tools/tools.nix
new file mode 100644
index 000000000000..15e10128ac9a
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/tools/tools.nix
@@ -0,0 +1,266 @@
+# This module generates nixos-install, nixos-rebuild,
+# nixos-generate-config, etc.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  makeProg = args: pkgs.substituteAll (args // {
+    dir = "bin";
+    isExecutable = true;
+    nativeBuildInputs = [
+      pkgs.installShellFiles
+    ];
+    postInstall = ''
+      installManPage ${args.manPage}
+    '';
+  });
+
+  nixos-build-vms = makeProg {
+    name = "nixos-build-vms";
+    src = ./nixos-build-vms/nixos-build-vms.sh;
+    inherit (pkgs) runtimeShell;
+    manPage = ./manpages/nixos-build-vms.8;
+  };
+
+  nixos-install = makeProg {
+    name = "nixos-install";
+    src = ./nixos-install.sh;
+    inherit (pkgs) runtimeShell;
+    nix = config.nix.package.out;
+    path = makeBinPath [
+      pkgs.jq
+      nixos-enter
+      pkgs.util-linuxMinimal
+    ];
+    manPage = ./manpages/nixos-install.8;
+  };
+
+  nixos-rebuild = pkgs.nixos-rebuild.override { nix = config.nix.package.out; };
+
+  nixos-generate-config = makeProg {
+    name = "nixos-generate-config";
+    src = ./nixos-generate-config.pl;
+    perl = "${pkgs.perl.withPackages (p: [ p.FileSlurp ])}/bin/perl";
+    hostPlatformSystem = pkgs.stdenv.hostPlatform.system;
+    detectvirt = "${config.systemd.package}/bin/systemd-detect-virt";
+    btrfs = "${pkgs.btrfs-progs}/bin/btrfs";
+    inherit (config.system.nixos-generate-config) configuration desktopConfiguration;
+    xserverEnabled = config.services.xserver.enable;
+    manPage = ./manpages/nixos-generate-config.8;
+  };
+
+  inherit (pkgs) nixos-option;
+
+  nixos-version = makeProg {
+    name = "nixos-version";
+    src = ./nixos-version.sh;
+    inherit (pkgs) runtimeShell;
+    inherit (config.system.nixos) version codeName revision;
+    inherit (config.system) configurationRevision;
+    json = builtins.toJSON ({
+      nixosVersion = config.system.nixos.version;
+    } // optionalAttrs (config.system.nixos.revision != null) {
+      nixpkgsRevision = config.system.nixos.revision;
+    } // optionalAttrs (config.system.configurationRevision != null) {
+      configurationRevision = config.system.configurationRevision;
+    });
+    manPage = ./manpages/nixos-version.8;
+  };
+
+  nixos-enter = makeProg {
+    name = "nixos-enter";
+    src = ./nixos-enter.sh;
+    inherit (pkgs) runtimeShell;
+    path = makeBinPath [
+      pkgs.util-linuxMinimal
+    ];
+    manPage = ./manpages/nixos-enter.8;
+  };
+
+in
+
+{
+
+  options.system.nixos-generate-config = {
+    configuration = mkOption {
+      internal = true;
+      type = types.str;
+      description = lib.mdDoc ''
+        The NixOS module that `nixos-generate-config`
+        saves to `/etc/nixos/configuration.nix`.
+
+        This is an internal option. No backward compatibility is guaranteed.
+        Use at your own risk!
+
+        Note that this string gets spliced into a Perl script. The perl
+        variable `$bootLoaderConfig` can be used to
+        splice in the boot loader configuration.
+      '';
+    };
+
+    desktopConfiguration = mkOption {
+      internal = true;
+      type = types.listOf types.lines;
+      default = [];
+      description = lib.mdDoc ''
+        Text to preseed the desktop configuration that `nixos-generate-config`
+        saves to `/etc/nixos/configuration.nix`.
+
+        This is an internal option. No backward compatibility is guaranteed.
+        Use at your own risk!
+
+        Note that this string gets spliced into a Perl script. The perl
+        variable `$bootLoaderConfig` can be used to
+        splice in the boot loader configuration.
+      '';
+    };
+  };
+
+  options.system.disableInstallerTools = mkOption {
+    internal = true;
+    type = types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Disable nixos-rebuild, nixos-generate-config, nixos-installer
+      and other NixOS tools. This is useful to shrink embedded,
+      read-only systems which are not expected to be rebuild or
+      reconfigure themselves. Use at your own risk!
+    '';
+  };
+
+  config = lib.mkIf (config.nix.enable && !config.system.disableInstallerTools) {
+
+    system.nixos-generate-config.configuration = mkDefault ''
+      # Edit this configuration file to define what should be installed on
+      # your system. Help is available in the configuration.nix(5) man page, on
+      # https://search.nixos.org/options and in the NixOS manual (`nixos-help`).
+
+      { config, lib, pkgs, ... }:
+
+      {
+        imports =
+          [ # Include the results of the hardware scan.
+            ./hardware-configuration.nix
+          ];
+
+      $bootLoaderConfig
+        # networking.hostName = "nixos"; # Define your hostname.
+        # Pick only one of the below networking options.
+        # networking.wireless.enable = true;  # Enables wireless support via wpa_supplicant.
+        # networking.networkmanager.enable = true;  # Easiest to use and most distros use this by default.
+
+        # Set your time zone.
+        # time.timeZone = "Europe/Amsterdam";
+
+        # Configure network proxy if necessary
+        # networking.proxy.default = "http://user:password\@proxy:port/";
+        # networking.proxy.noProxy = "127.0.0.1,localhost,internal.domain";
+
+        # Select internationalisation properties.
+        # i18n.defaultLocale = "en_US.UTF-8";
+        # console = {
+        #   font = "Lat2-Terminus16";
+        #   keyMap = "us";
+        #   useXkbConfig = true; # use xkb.options in tty.
+        # };
+
+      $xserverConfig
+
+      $desktopConfiguration
+        # Configure keymap in X11
+        # services.xserver.xkb.layout = "us";
+        # services.xserver.xkb.options = "eurosign:e,caps:escape";
+
+        # Enable CUPS to print documents.
+        # services.printing.enable = true;
+
+        # Enable sound.
+        # sound.enable = true;
+        # hardware.pulseaudio.enable = true;
+
+        # Enable touchpad support (enabled default in most desktopManager).
+        # services.xserver.libinput.enable = true;
+
+        # Define a user account. Don't forget to set a password with ‘passwd’.
+        # users.users.alice = {
+        #   isNormalUser = true;
+        #   extraGroups = [ "wheel" ]; # Enable ‘sudo’ for the user.
+        #   packages = with pkgs; [
+        #     firefox
+        #     tree
+        #   ];
+        # };
+
+        # List packages installed in system profile. To search, run:
+        # \$ nix search wget
+        # environment.systemPackages = with pkgs; [
+        #   vim # Do not forget to add an editor to edit configuration.nix! The Nano editor is also installed by default.
+        #   wget
+        # ];
+
+        # Some programs need SUID wrappers, can be configured further or are
+        # started in user sessions.
+        # programs.mtr.enable = true;
+        # programs.gnupg.agent = {
+        #   enable = true;
+        #   enableSSHSupport = true;
+        # };
+
+        # List services that you want to enable:
+
+        # Enable the OpenSSH daemon.
+        # services.openssh.enable = true;
+
+        # Open ports in the firewall.
+        # networking.firewall.allowedTCPPorts = [ ... ];
+        # networking.firewall.allowedUDPPorts = [ ... ];
+        # Or disable the firewall altogether.
+        # networking.firewall.enable = false;
+
+        # Copy the NixOS configuration file and link it from the resulting system
+        # (/run/current-system/configuration.nix). This is useful in case you
+        # accidentally delete configuration.nix.
+        # system.copySystemConfiguration = true;
+
+        # This option defines the first version of NixOS you have installed on this particular machine,
+        # and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
+        #
+        # Most users should NEVER change this value after the initial install, for any reason,
+        # even if you've upgraded your system to a new NixOS release.
+        #
+        # This value does NOT affect the Nixpkgs version your packages and OS are pulled from,
+        # so changing it will NOT upgrade your system.
+        #
+        # This value being lower than the current NixOS release does NOT mean your system is
+        # out of date, out of support, or vulnerable.
+        #
+        # Do NOT change this value unless you have manually inspected all the changes it would make to your configuration,
+        # and migrated your data accordingly.
+        #
+        # For more information, see `man configuration.nix` or https://nixos.org/manual/nixos/stable/options#opt-system.stateVersion .
+        system.stateVersion = "${config.system.nixos.release}"; # Did you read the comment?
+
+      }
+    '';
+
+    environment.systemPackages =
+      [ nixos-build-vms
+        nixos-install
+        nixos-rebuild
+        nixos-generate-config
+        nixos-option
+        nixos-version
+        nixos-enter
+      ];
+
+    documentation.man.man-db.skipPackages = [ nixos-version ];
+
+    system.build = {
+      inherit nixos-install nixos-generate-config nixos-option nixos-rebuild nixos-enter;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/installer/virtualbox-demo.nix b/nixpkgs/nixos/modules/installer/virtualbox-demo.nix
new file mode 100644
index 000000000000..01931b2acfca
--- /dev/null
+++ b/nixpkgs/nixos/modules/installer/virtualbox-demo.nix
@@ -0,0 +1,61 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  imports =
+    [ ../virtualisation/virtualbox-image.nix
+      ../installer/cd-dvd/channel.nix
+      ../profiles/demo.nix
+      ../profiles/clone-config.nix
+    ];
+
+  # FIXME: UUID detection is currently broken
+  boot.loader.grub.fsIdentifier = "provided";
+
+  # Allow mounting of shared folders.
+  users.users.demo.extraGroups = [ "vboxsf" ];
+
+  # Add some more video drivers to give X11 a shot at working in
+  # VMware and QEMU.
+  services.xserver.videoDrivers = mkOverride 40 [ "virtualbox" "vmware" "cirrus" "vesa" "modesetting" ];
+
+  powerManagement.enable = false;
+  system.stateVersion = lib.mkDefault lib.trivial.release;
+
+  installer.cloneConfigExtra = ''
+  # Let demo build as a trusted user.
+  # nix.settings.trusted-users = [ "demo" ];
+
+  # Mount a VirtualBox shared folder.
+  # This is configurable in the VirtualBox menu at
+  # Machine / Settings / Shared Folders.
+  # fileSystems."/mnt" = {
+  #   fsType = "vboxsf";
+  #   device = "nameofdevicetomount";
+  #   options = [ "rw" ];
+  # };
+
+  # By default, the NixOS VirtualBox demo image includes SDDM and Plasma.
+  # If you prefer another desktop manager or display manager, you may want
+  # to disable the default.
+  # services.xserver.desktopManager.plasma5.enable = lib.mkForce false;
+  # services.xserver.displayManager.sddm.enable = lib.mkForce false;
+
+  # Enable GDM/GNOME by uncommenting above two lines and two lines below.
+  # services.xserver.displayManager.gdm.enable = true;
+  # services.xserver.desktopManager.gnome.enable = true;
+
+  # Set your time zone.
+  # time.timeZone = "Europe/Amsterdam";
+
+  # List packages installed in system profile. To search, run:
+  # \$ nix search wget
+  # environment.systemPackages = with pkgs; [
+  #   wget vim
+  # ];
+
+  # Enable the OpenSSH daemon.
+  # services.openssh.enable = true;
+  '';
+}
diff --git a/nixpkgs/nixos/modules/misc/assertions.nix b/nixpkgs/nixos/modules/misc/assertions.nix
new file mode 100644
index 000000000000..364bb02be82d
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/assertions.nix
@@ -0,0 +1,34 @@
+{ lib, ... }:
+
+with lib;
+
+{
+
+  options = {
+
+    assertions = mkOption {
+      type = types.listOf types.unspecified;
+      internal = true;
+      default = [];
+      example = [ { assertion = false; message = "you can't enable this for that reason"; } ];
+      description = lib.mdDoc ''
+        This option allows modules to express conditions that must
+        hold for the evaluation of the system configuration to
+        succeed, along with associated error messages for the user.
+      '';
+    };
+
+    warnings = mkOption {
+      internal = true;
+      default = [];
+      type = types.listOf types.str;
+      example = [ "The `foo' service is deprecated and will go away soon!" ];
+      description = lib.mdDoc ''
+        This option allows modules to show warnings to users during
+        the evaluation of the system configuration.
+      '';
+    };
+
+  };
+  # impl of assertions is in <nixpkgs/nixos/modules/system/activation/top-level.nix>
+}
diff --git a/nixpkgs/nixos/modules/misc/crashdump.nix b/nixpkgs/nixos/modules/misc/crashdump.nix
new file mode 100644
index 000000000000..4ae18984ee5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/crashdump.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  crashdump = config.boot.crashDump;
+
+  kernelParams = concatStringsSep " " crashdump.kernelParams;
+
+in
+###### interface
+{
+  options = {
+    boot = {
+      crashDump = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            If enabled, NixOS will set up a kernel that will
+            boot on crash, and leave the user in systemd rescue
+            to be able to save the crashed kernel dump at
+            /proc/vmcore.
+            It also activates the NMI watchdog.
+          '';
+        };
+        reservedMemory = mkOption {
+          default = "128M";
+          type = types.str;
+          description = lib.mdDoc ''
+            The amount of memory reserved for the crashdump kernel.
+            If you choose a too high value, dmesg will mention
+            "crashkernel reservation failed".
+          '';
+        };
+        kernelParams = mkOption {
+          type = types.listOf types.str;
+          default = [ "1" "boot.shell_on_fail" ];
+          description = lib.mdDoc ''
+            Parameters that will be passed to the kernel kexec-ed on crash.
+          '';
+        };
+      };
+    };
+  };
+
+###### implementation
+
+  config = mkIf crashdump.enable {
+    boot = {
+      postBootCommands = ''
+        echo "loading crashdump kernel...";
+        ${pkgs.kexec-tools}/sbin/kexec -p /run/current-system/kernel \
+        --initrd=/run/current-system/initrd \
+        --reset-vga --console-vga \
+        --command-line="init=$(readlink -f /run/current-system/init) irqpoll maxcpus=1 reset_devices ${kernelParams}"
+      '';
+      kernelParams = [
+       "crashkernel=${crashdump.reservedMemory}"
+       "nmi_watchdog=panic"
+       "softlockup_panic=1"
+      ];
+      kernelPatches = [ {
+        name = "crashdump-config";
+        patch = null;
+        extraConfig = ''
+                CRASH_DUMP y
+                DEBUG_INFO y
+                PROC_VMCORE y
+                LOCKUP_DETECTOR y
+                HARDLOCKUP_DETECTOR y
+              '';
+        } ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/documentation.nix b/nixpkgs/nixos/modules/misc/documentation.nix
new file mode 100644
index 000000000000..1111a4cf3c7a
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/documentation.nix
@@ -0,0 +1,363 @@
+{ config, options, lib, pkgs, utils, modules, baseModules, extraModules, modulesPath, specialArgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.documentation;
+  allOpts = options;
+
+  canCacheDocs = m:
+    let
+      f = import m;
+      instance = f (mapAttrs (n: _: abort "evaluating ${n} for `meta` failed") (functionArgs f));
+    in
+      cfg.nixos.options.splitBuild
+        && builtins.isPath m
+        && isFunction f
+        && instance ? options
+        && instance.meta.buildDocsInSandbox or true;
+
+  docModules =
+    let
+      p = partition canCacheDocs (baseModules ++ cfg.nixos.extraModules);
+    in
+      {
+        lazy = p.right;
+        eager = p.wrong ++ optionals cfg.nixos.includeAllModules (extraModules ++ modules);
+      };
+
+  manual = import ../../doc/manual rec {
+    inherit pkgs config;
+    version = config.system.nixos.release;
+    revision = "release-${version}";
+    extraSources = cfg.nixos.extraModuleSources;
+    options =
+      let
+        scrubbedEval = evalModules {
+          modules = [ {
+            _module.check = false;
+          } ] ++ docModules.eager;
+          class = "nixos";
+          specialArgs = specialArgs // {
+            pkgs = scrubDerivations "pkgs" pkgs;
+            # allow access to arbitrary options for eager modules, eg for getting
+            # option types from lazy modules
+            options = allOpts;
+            inherit modulesPath utils;
+          };
+        };
+        scrubDerivations = namePrefix: pkgSet: mapAttrs
+          (name: value:
+            let
+              wholeName = "${namePrefix}.${name}";
+              guard = lib.warn "Attempt to evaluate package ${wholeName} in option documentation; this is not supported and will eventually be an error. Use `mkPackageOption{,MD}` or `literalExpression` instead.";
+            in if isAttrs value then
+              scrubDerivations wholeName value
+              // optionalAttrs (isDerivation value) {
+                outPath = guard "\${${wholeName}}";
+                drvPath = guard drvPath;
+              }
+            else value
+          )
+          pkgSet;
+      in scrubbedEval.options;
+
+    baseOptionsJSON =
+      let
+        filter =
+          builtins.filterSource
+            (n: t:
+              cleanSourceFilter n t
+              && (t == "directory" -> baseNameOf n != "tests")
+              && (t == "file" -> hasSuffix ".nix" n)
+            );
+      in
+        pkgs.runCommand "lazy-options.json" {
+          libPath = filter (pkgs.path + "/lib");
+          pkgsLibPath = filter (pkgs.path + "/pkgs/pkgs-lib");
+          nixosPath = filter (pkgs.path + "/nixos");
+          modules = map (p: ''"${removePrefix "${modulesPath}/" (toString p)}"'') docModules.lazy;
+        } ''
+          export NIX_STORE_DIR=$TMPDIR/store
+          export NIX_STATE_DIR=$TMPDIR/state
+          ${pkgs.buildPackages.nix}/bin/nix-instantiate \
+            --show-trace \
+            --eval --json --strict \
+            --argstr libPath "$libPath" \
+            --argstr pkgsLibPath "$pkgsLibPath" \
+            --argstr nixosPath "$nixosPath" \
+            --arg modules "[ $modules ]" \
+            --argstr stateVersion "${options.system.stateVersion.default}" \
+            --argstr release "${config.system.nixos.release}" \
+            $nixosPath/lib/eval-cacheable-options.nix > $out \
+            || {
+              echo -en "\e[1;31m"
+              echo 'Cacheable portion of option doc build failed.'
+              echo 'Usually this means that an option attribute that ends up in documentation (eg' \
+                '`default` or `description`) depends on the restricted module arguments' \
+                '`config` or `pkgs`.'
+              echo
+              echo 'Rebuild your configuration with `--show-trace` to find the offending' \
+                'location. Remove the references to restricted arguments (eg by escaping' \
+                'their antiquotations or adding a `defaultText`) or disable the sandboxed' \
+                'build for the failing module by setting `meta.buildDocsInSandbox = false`.'
+              echo -en "\e[0m"
+              exit 1
+            } >&2
+        '';
+
+    inherit (cfg.nixos.options) warningsAreErrors;
+  };
+
+
+  nixos-help = let
+    helpScript = pkgs.writeShellScriptBin "nixos-help" ''
+      # Finds first executable browser in a colon-separated list.
+      # (see how xdg-open defines BROWSER)
+      browser="$(
+        IFS=: ; for b in $BROWSER; do
+          [ -n "$(type -P "$b" || true)" ] && echo "$b" && break
+        done
+      )"
+      if [ -z "$browser" ]; then
+        browser="$(type -P xdg-open || true)"
+        if [ -z "$browser" ]; then
+          browser="${pkgs.w3m-nographics}/bin/w3m"
+        fi
+      fi
+      exec "$browser" ${manual.manualHTMLIndex}
+    '';
+
+    desktopItem = pkgs.makeDesktopItem {
+      name = "nixos-manual";
+      desktopName = "NixOS Manual";
+      genericName = "System Manual";
+      comment = "View NixOS documentation in a web browser";
+      icon = "nix-snowflake";
+      exec = "nixos-help";
+      categories = ["System"];
+    };
+
+    in pkgs.symlinkJoin {
+      name = "nixos-help";
+      paths = [
+        helpScript
+        desktopItem
+      ];
+    };
+
+in
+
+{
+  imports = [
+    ./man-db.nix
+    ./mandoc.nix
+    ./assertions.nix
+    ./meta.nix
+    ../config/system-path.nix
+    ../system/etc/etc.nix
+    (mkRenamedOptionModule [ "programs" "info" "enable" ] [ "documentation" "info" "enable" ])
+    (mkRenamedOptionModule [ "programs" "man"  "enable" ] [ "documentation" "man"  "enable" ])
+    (mkRenamedOptionModule [ "services" "nixosManual" "enable" ] [ "documentation" "nixos" "enable" ])
+    (mkRemovedOptionModule
+      [ "documentation" "nixos" "options" "allowDocBook" ]
+      "DocBook option documentation is no longer supported")
+  ];
+
+  options = {
+
+    documentation = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install documentation of packages from
+          {option}`environment.systemPackages` into the generated system path.
+
+          See "Multiple-output packages" chapter in the nixpkgs manual for more info.
+        '';
+        # which is at ../../../doc/multiple-output.chapter.md
+      };
+
+      man.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install manual pages.
+          This also includes `man` outputs.
+        '';
+      };
+
+      man.extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = mdDoc ''
+          Lines to append to {manpage}`manpath(5)`.
+        '';
+      };
+
+      man.generateCaches = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to generate the manual page index caches.
+          This allows searching for a page or
+          keyword using utilities like {manpage}`apropos(1)`
+          and the `-k` option of
+          {manpage}`man(1)`.
+        '';
+      };
+
+      info.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install info pages and the {command}`info` command.
+          This also includes "info" outputs.
+        '';
+      };
+
+      doc.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install documentation distributed in packages' `/share/doc`.
+          Usually plain text and/or HTML.
+          This also includes "doc" outputs.
+        '';
+      };
+
+      dev.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to install documentation targeted at developers.
+          * This includes man pages targeted at developers if {option}`documentation.man.enable` is
+            set (this also includes "devman" outputs).
+          * This includes info pages targeted at developers if {option}`documentation.info.enable`
+            is set (this also includes "devinfo" outputs).
+          * This includes other pages targeted at developers if {option}`documentation.doc.enable`
+            is set (this also includes "devdoc" outputs).
+        '';
+      };
+
+      nixos.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install NixOS's own documentation.
+
+          - This includes man pages like
+            {manpage}`configuration.nix(5)` if {option}`documentation.man.enable` is
+            set.
+          - This includes the HTML manual and the {command}`nixos-help` command if
+            {option}`documentation.doc.enable` is set.
+        '';
+      };
+
+      nixos.extraModules = mkOption {
+        type = types.listOf types.raw;
+        default = [];
+        description = lib.mdDoc ''
+          Modules for which to show options even when not imported.
+        '';
+      };
+
+      nixos.options.splitBuild = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to split the option docs build into a cacheable and an uncacheable part.
+          Splitting the build can substantially decrease the amount of time needed to build
+          the manual, but some user modules may be incompatible with this splitting.
+        '';
+      };
+
+      nixos.options.warningsAreErrors = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Treat warning emitted during the option documentation build (eg for missing option
+          descriptions) as errors.
+        '';
+      };
+
+      nixos.includeAllModules = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether the generated NixOS's documentation should include documentation for all
+          the options from all the NixOS modules included in the current
+          `configuration.nix`. Disabling this will make the manual
+          generator to ignore options defined outside of `baseModules`.
+        '';
+      };
+
+      nixos.extraModuleSources = mkOption {
+        type = types.listOf (types.either types.path types.str);
+        default = [ ];
+        description = lib.mdDoc ''
+          Which extra NixOS module paths the generated NixOS's documentation should strip
+          from options.
+        '';
+        example = literalExpression ''
+          # e.g. with options from modules in ''${pkgs.customModules}/nix:
+          [ pkgs.customModules ]
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      assertions = [
+        {
+          assertion = !(cfg.man.man-db.enable && cfg.man.mandoc.enable);
+          message = ''
+            man-db and mandoc can't be used as the default man page viewer at the same time!
+          '';
+        }
+      ];
+    }
+
+    # The actual implementation for this lives in man-db.nix or mandoc.nix,
+    # depending on which backend is active.
+    (mkIf cfg.man.enable {
+      environment.pathsToLink = [ "/share/man" ];
+      environment.extraOutputsToInstall = [ "man" ] ++ optional cfg.dev.enable "devman";
+    })
+
+    (mkIf cfg.info.enable {
+      environment.systemPackages = [ pkgs.texinfoInteractive ];
+      environment.pathsToLink = [ "/share/info" ];
+      environment.extraOutputsToInstall = [ "info" ] ++ optional cfg.dev.enable "devinfo";
+      environment.extraSetup = ''
+        if [ -w $out/share/info ]; then
+          shopt -s nullglob
+          for i in $out/share/info/*.info $out/share/info/*.info.gz; do
+              ${pkgs.buildPackages.texinfo}/bin/install-info $i $out/share/info/dir
+          done
+        fi
+      '';
+    })
+
+    (mkIf cfg.doc.enable {
+      environment.pathsToLink = [ "/share/doc" ];
+      environment.extraOutputsToInstall = [ "doc" ] ++ optional cfg.dev.enable "devdoc";
+    })
+
+    (mkIf cfg.nixos.enable {
+      system.build.manual = manual;
+
+      environment.systemPackages = []
+        ++ optional cfg.man.enable manual.nixos-configuration-reference-manpage
+        ++ optionals cfg.doc.enable [ manual.manualHTML nixos-help ];
+    })
+
+  ]);
+
+}
diff --git a/nixpkgs/nixos/modules/misc/documentation/test-dummy.chapter.xml b/nixpkgs/nixos/modules/misc/documentation/test-dummy.chapter.xml
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/documentation/test-dummy.chapter.xml
diff --git a/nixpkgs/nixos/modules/misc/documentation/test.nix b/nixpkgs/nixos/modules/misc/documentation/test.nix
new file mode 100644
index 000000000000..dd1588abdb43
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/documentation/test.nix
@@ -0,0 +1,49 @@
+{ nixosLib, pkgsModule, runCommand }:
+
+let
+  sys = nixosLib.evalModules rec {
+    modules = [
+      pkgsModule
+      ../documentation.nix
+      ../version.nix
+
+      ({ lib, someArg, ... }: {
+        # Make sure imports from specialArgs are respected
+        imports = [ someArg.myModule ];
+
+        # TODO test this
+        meta.doc = ./test-dummy.chapter.xml;
+      })
+
+      {
+        _module.args = {
+          baseModules = [
+            ../documentation.nix
+            ../version.nix
+          ];
+          extraModules = [ ];
+          inherit modules;
+        };
+        documentation.nixos.includeAllModules = true;
+      }
+    ];
+    specialArgs.someArg.myModule = { lib, ... }: {
+      options.foobar = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "The foobar option was added via specialArgs";
+        default = "qux";
+      };
+    };
+  };
+
+in
+runCommand "documentation-check"
+{
+  inherit (sys.config.system.build.manual) optionsJSON;
+} ''
+  json="$optionsJSON/share/doc/nixos/options.json"
+  echo checking $json
+
+  grep 'The foobar option was added via specialArgs' <"$json" >/dev/null
+  touch $out
+''
diff --git a/nixpkgs/nixos/modules/misc/extra-arguments.nix b/nixpkgs/nixos/modules/misc/extra-arguments.nix
new file mode 100644
index 000000000000..48891b440498
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/extra-arguments.nix
@@ -0,0 +1,7 @@
+{ lib, config, pkgs, ... }:
+
+{
+  _module.args = {
+    utils = import ../../lib/utils.nix { inherit lib config pkgs; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/ids.nix b/nixpkgs/nixos/modules/misc/ids.nix
new file mode 100644
index 000000000000..18928a6bf21b
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/ids.nix
@@ -0,0 +1,702 @@
+# This module defines the global list of uids and gids.  We keep a
+# central list to prevent id collisions.
+
+# IMPORTANT!
+# We only add static uids and gids for services where it is not feasible
+# to change uids/gids on service start, for example a service with a lot of
+# files. Please also check if the service is applicable for systemd's
+# DynamicUser option and does not need a uid/gid allocation at all.
+# Systemd can also change ownership of service directories using the
+# RuntimeDirectory/StateDirectory options.
+
+{ lib, ... }:
+
+let
+  inherit (lib) types;
+in
+{
+  options = {
+
+    ids.uids = lib.mkOption {
+      internal = true;
+      description = lib.mdDoc ''
+        The user IDs used in NixOS.
+      '';
+      type = types.attrsOf types.int;
+    };
+
+    ids.gids = lib.mkOption {
+      internal = true;
+      description = lib.mdDoc ''
+        The group IDs used in NixOS.
+      '';
+      type = types.attrsOf types.int;
+    };
+
+  };
+
+
+  config = {
+
+    ids.uids = {
+      root = 0;
+      #wheel = 1; # unused
+      #kmem = 2; # unused
+      #tty = 3; # unused
+      messagebus = 4; # D-Bus
+      haldaemon = 5;
+      #disk = 6; # unused
+      #vsftpd = 7; # dynamically allocated ass of 2021-09-14
+      ftp = 8;
+      # bitlbee = 9; # removed 2021-10-05 #139765
+      #avahi = 10; # removed 2019-05-22
+      nagios = 11;
+      atd = 12;
+      postfix = 13;
+      #postdrop = 14; # unused
+      dovecot = 15;
+      tomcat = 16;
+      #audio = 17; # unused
+      #floppy = 18; # unused
+      uucp = 19;
+      #lp = 20; # unused
+      #proc = 21; # unused
+      pulseaudio = 22; # must match `pulseaudio' GID
+      gpsd = 23;
+      #cdrom = 24; # unused
+      #tape = 25; # unused
+      #video = 26; # unused
+      #dialout = 27; # unused
+      polkituser = 28;
+      #utmp = 29; # unused
+      # ddclient = 30; # converted to DynamicUser = true
+      davfs2 = 31;
+      disnix = 33;
+      osgi = 34;
+      tor = 35;
+      cups = 36;
+      foldingathome = 37;
+      sabnzbd = 38;
+      #kdm = 39; # dropped in 17.03
+      #ghostone = 40; # dropped in 18.03
+      git = 41;
+      #fourstore = 42; # dropped in 20.03
+      #fourstorehttp = 43; # dropped in 20.03
+      #virtuoso = 44;  dropped module
+      #rtkit = 45; # dynamically allocated 2021-09-03
+      dovecot2 = 46;
+      dovenull2 = 47;
+      # prayer = 49; # dropped in 23.11
+      mpd = 50;
+      clamav = 51;
+      #fprot = 52; # unused
+      # bind = 53; #dynamically allocated as of 2021-09-03
+      wwwrun = 54;
+      #adm = 55; # unused
+      spamd = 56;
+      #networkmanager = 57; # unused
+      nslcd = 58;
+      scanner = 59;
+      nginx = 60;
+      chrony = 61;
+      #systemd-journal = 62; # unused
+      smtpd = 63;
+      smtpq = 64;
+      supybot = 65;
+      iodined = 66;
+      #libvirtd = 67; # unused
+      graphite = 68;
+      #statsd = 69; # removed 2018-11-14
+      transmission = 70;
+      postgres = 71;
+      #vboxusers = 72; # unused
+      #vboxsf = 73; # unused
+      smbguest = 74;  # unused
+      varnish = 75;
+      datadog = 76;
+      lighttpd = 77;
+      lightdm = 78;
+      freenet = 79;
+      ircd = 80;
+      bacula = 81;
+      #almir = 82; # removed 2018-03-25, the almir package was removed in 30291227f2411abaca097773eedb49b8f259e297 during 2017-08
+      deluge = 83;
+      mysql = 84;
+      rabbitmq = 85;
+      activemq = 86;
+      gnunet = 87;
+      oidentd = 88;
+      quassel = 89;
+      amule = 90;
+      minidlna = 91;
+      elasticsearch = 92;
+      tcpcryptd = 93; # tcpcryptd uses a hard-coded uid. We patch it in Nixpkgs to match this choice.
+      firebird = 95;
+      #keys = 96; # unused
+      #haproxy = 97; # dynamically allocated as of 2020-03-11
+      #mongodb = 98; #dynamically allocated as of 2021-09-03
+      #openldap = 99; # dynamically allocated as of PR#94610
+      #users = 100; # unused
+      # cgminer = 101; #dynamically allocated as of 2021-09-17
+      munin = 102;
+      #logcheck = 103; #dynamically allocated as of 2021-09-17
+      #nix-ssh = 104; #dynamically allocated as of 2021-09-03
+      dictd = 105;
+      couchdb = 106;
+      #searx = 107; # dynamically allocated as of 2020-10-27
+      #kippo = 108; # removed 2021-10-07, the kippo package was removed in 1b213f321cdbfcf868b96fd9959c24207ce1b66a during 2021-04
+      jenkins = 109;
+      systemd-journal-gateway = 110;
+      #notbit = 111; # unused
+      aerospike = 111;
+      #ngircd = 112; #dynamically allocated as of 2021-09-03
+      #btsync = 113; # unused
+      #minecraft = 114; #dynamically allocated as of 2021-09-03
+      vault = 115;
+      # rippled = 116; #dynamically allocated as of 2021-09-18
+      murmur = 117;
+      foundationdb = 118;
+      newrelic = 119;
+      starbound = 120;
+      hydra = 122;
+      spiped = 123;
+      teamspeak = 124;
+      influxdb = 125;
+      nsd = 126;
+      gitolite = 127;
+      znc = 128;
+      polipo = 129;
+      mopidy = 130;
+      #docker = 131; # unused
+      gdm = 132;
+      #dhcpd = 133; # dynamically allocated as of 2021-09-03
+      siproxd = 134;
+      mlmmj = 135;
+      #neo4j = 136;# dynamically allocated as of 2021-09-03
+      riemann = 137;
+      riemanndash = 138;
+      #radvd = 139;# dynamically allocated as of 2021-09-03
+      #zookeeper = 140;# dynamically allocated as of 2021-09-03
+      #dnsmasq = 141;# dynamically allocated as of 2021-09-03
+      #uhub = 142; # unused
+      yandexdisk = 143;
+      mxisd = 144; # was once collectd
+      #consul = 145;# dynamically allocated as of 2021-09-03
+      #mailpile = 146; # removed 2022-01-12
+      redmine = 147;
+      #seeks = 148; # removed 2020-06-21
+      prosody = 149;
+      i2pd = 150;
+      systemd-coredump = 151;
+      systemd-network = 152;
+      systemd-resolve = 153;
+      systemd-timesync = 154;
+      liquidsoap = 155;
+      #etcd = 156;# dynamically allocated as of 2021-09-03
+      hbase = 158;
+      opentsdb = 159;
+      scollector = 160;
+      bosun = 161;
+      kubernetes = 162;
+      peerflix = 163;
+      #chronos = 164; # removed 2020-08-15
+      gitlab = 165;
+      # tox-bootstrapd = 166; removed 2021-09-15
+      cadvisor = 167;
+      nylon = 168;
+      #apache-kafka = 169;# dynamically allocated as of 2021-09-03
+      #panamax = 170; # unused
+      exim = 172;
+      #fleet = 173; # unused
+      #input = 174; # unused
+      sddm = 175;
+      #tss = 176; # dynamically allocated as of 2021-09-17
+      #memcached = 177; removed 2018-01-03
+      #ntp = 179; # dynamically allocated as of 2021-09-17
+      zabbix = 180;
+      #redis = 181; removed 2018-01-03
+      #unifi = 183; dynamically allocated as of 2021-09-17
+      uptimed = 184;
+      #zope2 = 185; # dynamically allocated as of 2021-09-18
+      #ripple-data-api = 186; dynamically allocated as of 2021-09-17
+      mediatomb = 187;
+      #rdnssd = 188; #dynamically allocated as of 2021-09-18
+      ihaskell = 189;
+      i2p = 190;
+      lambdabot = 191;
+      asterisk = 192;
+      plex = 193;
+      plexpy = 195;
+      grafana = 196;
+      skydns = 197;
+      # ripple-rest = 198; # unused, removed 2017-08-12
+      # nix-serve = 199; # unused, removed 2020-12-12
+      #tvheadend = 200; # dynamically allocated as of 2021-09-18
+      uwsgi = 201;
+      # gitit = 202; # unused, module was removed 2023-04-03
+      riemanntools = 203;
+      subsonic = 204;
+      # riak = 205; # unused, remove 2022-07-22
+      #shout = 206; # dynamically allocated as of 2021-09-18
+      gateone = 207;
+      namecoin = 208;
+      #lxd = 210; # unused
+      #kibana = 211;# dynamically allocated as of 2021-09-03
+      xtreemfs = 212;
+      calibre-server = 213;
+      #heapster = 214; #dynamically allocated as of 2021-09-17
+      bepasty = 215;
+      # pumpio = 216; # unused, removed 2018-02-24
+      nm-openvpn = 217;
+      # mathics = 218; # unused, removed 2020-08-15
+      ejabberd = 219;
+      postsrsd = 220;
+      opendkim = 221;
+      dspam = 222;
+      # gale = 223; removed 2021-06-10
+      matrix-synapse = 224;
+      rspamd = 225;
+      # rmilter = 226; # unused, removed 2019-08-22
+      cfdyndns = 227;
+      # gammu-smsd = 228; #dynamically allocated as of 2021-09-17
+      pdnsd = 229;
+      octoprint = 230;
+      avahi-autoipd = 231;
+      # nntp-proxy = 232; #dynamically allocated as of 2021-09-17
+      mjpg-streamer = 233;
+      #radicale = 234;# dynamically allocated as of 2021-09-03
+      hydra-queue-runner = 235;
+      hydra-www = 236;
+      syncthing = 237;
+      caddy = 239;
+      taskd = 240;
+      # factorio = 241; # DynamicUser = true
+      # emby = 242; # unusued, removed 2019-05-01
+      #graylog = 243;# dynamically allocated as of 2021-09-03
+      sniproxy = 244;
+      nzbget = 245;
+      mosquitto = 246;
+      #toxvpn = 247; # dynamically allocated as of 2021-09-18
+      # squeezelite = 248; # DynamicUser = true
+      turnserver = 249;
+      #smokeping = 250;# dynamically allocated as of 2021-09-03
+      gocd-agent = 251;
+      gocd-server = 252;
+      terraria = 253;
+      mattermost = 254;
+      prometheus = 255;
+      telegraf = 256;
+      gitlab-runner = 257;
+      postgrey = 258;
+      hound = 259;
+      leaps = 260;
+      ipfs  = 261;
+      # stanchion = 262; # unused, removed 2020-10-14
+      # riak-cs = 263; # unused, removed 2020-10-14
+      infinoted = 264;
+      sickbeard = 265;
+      headphones = 266;
+      # couchpotato = 267; # unused, removed 2022-01-01
+      gogs = 268;
+      #pdns-recursor = 269; # dynamically allocated as of 2020-20-18
+      #kresd = 270; # switched to "knot-resolver" with dynamic ID
+      rpc = 271;
+      #geoip = 272; # new module uses DynamicUser
+      fcron = 273;
+      sonarr = 274;
+      radarr = 275;
+      jackett = 276;
+      aria2 = 277;
+      clickhouse = 278;
+      rslsync = 279;
+      minio = 280;
+      kanboard = 281;
+      # pykms = 282; # DynamicUser = true
+      kodi = 283;
+      restya-board = 284;
+      mighttpd2 = 285;
+      hass = 286;
+      #monero = 287; # dynamically allocated as of 2021-05-08
+      ceph = 288;
+      duplicati = 289;
+      monetdb = 290;
+      restic = 291;
+      openvpn = 292;
+      # meguca = 293; # removed 2020-08-21
+      yarn = 294;
+      hdfs = 295;
+      mapred = 296;
+      hadoop = 297;
+      hydron = 298;
+      cfssl = 299;
+      cassandra = 300;
+      qemu-libvirtd = 301;
+      # kvm = 302; # unused
+      # render = 303; # unused
+      # zeronet = 304; # removed 2019-01-03
+      lirc = 305;
+      lidarr = 306;
+      slurm = 307;
+      kapacitor = 308;
+      # solr = 309; removed 2023-03-16
+      alerta = 310;
+      minetest = 311;
+      rss2email = 312;
+      cockroachdb = 313;
+      zoneminder = 314;
+      paperless = 315;
+      #mailman = 316;  # removed 2019-08-30
+      zigbee2mqtt = 317;
+      # shadow = 318; # unused
+      hqplayer = 319;
+      moonraker = 320;
+      distcc = 321;
+      webdav = 322;
+      pipewire = 323;
+      rstudio-server = 324;
+      localtimed = 325;
+      automatic-timezoned = 326;
+
+      # When adding a uid, make sure it doesn't match an existing gid. And don't use uids above 399!
+
+      nixbld = 30000; # start of range of uids
+      nobody = 65534;
+    };
+
+    ids.gids = {
+      root = 0;
+      wheel = 1;
+      kmem = 2;
+      tty = 3;
+      messagebus = 4; # D-Bus
+      haldaemon = 5;
+      disk = 6;
+      #vsftpd = 7; # dynamically allocated as of 2021-09-14
+      ftp = 8;
+      # bitlbee = 9; # removed 2021-10-05 #139765
+      #avahi = 10; # removed 2019-05-22
+      #nagios = 11; # unused
+      atd = 12;
+      postfix = 13;
+      postdrop = 14;
+      dovecot = 15;
+      tomcat = 16;
+      audio = 17;
+      floppy = 18;
+      uucp = 19;
+      lp = 20;
+      proc = 21;
+      pulseaudio = 22; # must match `pulseaudio' UID
+      gpsd = 23;
+      cdrom = 24;
+      tape = 25;
+      video = 26;
+      dialout = 27;
+      #polkituser = 28; # currently unused, polkitd doesn't need a group
+      utmp = 29;
+      # ddclient = 30; # converted to DynamicUser = true
+      davfs2 = 31;
+      disnix = 33;
+      osgi = 34;
+      tor = 35;
+      #cups = 36; # unused
+      #foldingathome = 37; # unused
+      #sabnzd = 38; # unused
+      #kdm = 39; # unused, even before 17.03
+      #ghostone = 40; # dropped in 18.03
+      git = 41;
+      fourstore = 42;
+      fourstorehttp = 43;
+      virtuoso = 44;
+      #rtkit = 45; # unused
+      dovecot2 = 46;
+      dovenull2 = 47;
+      # prayer = 49; # dropped in 23.11
+      mpd = 50;
+      clamav = 51;
+      #fprot = 52; # unused
+      #bind = 53; # unused
+      wwwrun = 54;
+      adm = 55;
+      spamd = 56;
+      networkmanager = 57;
+      nslcd = 58;
+      scanner = 59;
+      nginx = 60;
+      chrony = 61;
+      systemd-journal = 62;
+      smtpd = 63;
+      smtpq = 64;
+      supybot = 65;
+      iodined = 66;
+      libvirtd = 67;
+      graphite = 68;
+      #statsd = 69; # removed 2018-11-14
+      transmission = 70;
+      postgres = 71;
+      vboxusers = 72;
+      vboxsf = 73;
+      smbguest = 74;  # unused
+      varnish = 75;
+      datadog = 76;
+      lighttpd = 77;
+      lightdm = 78;
+      freenet = 79;
+      ircd = 80;
+      bacula = 81;
+      #almir = 82; # removed 2018-03-25, the almir package was removed in 30291227f2411abaca097773eedb49b8f259e297 during 2017-08
+      deluge = 83;
+      mysql = 84;
+      rabbitmq = 85;
+      activemq = 86;
+      gnunet = 87;
+      oidentd = 88;
+      quassel = 89;
+      amule = 90;
+      minidlna = 91;
+      elasticsearch = 92;
+      #tcpcryptd = 93; # unused
+      firebird = 95;
+      keys = 96;
+      #haproxy = 97; # dynamically allocated as of 2020-03-11
+      #mongodb = 98; # unused
+      #openldap = 99; # dynamically allocated as of PR#94610
+      munin = 102;
+      #logcheck = 103; # unused
+      #nix-ssh = 104; # unused
+      dictd = 105;
+      couchdb = 106;
+      #searx = 107; # dynamically allocated as of 2020-10-27
+      #kippo = 108; # removed 2021-10-07, the kippo package was removed in 1b213f321cdbfcf868b96fd9959c24207ce1b66a during 2021-04
+      jenkins = 109;
+      systemd-journal-gateway = 110;
+      #notbit = 111; # unused
+      aerospike = 111;
+      #ngircd = 112; # unused
+      #btsync = 113; # unused
+      #minecraft = 114; # unused
+      vault = 115;
+      #ripped = 116; # unused
+      murmur = 117;
+      foundationdb = 118;
+      newrelic = 119;
+      starbound = 120;
+      hydra = 122;
+      spiped = 123;
+      teamspeak = 124;
+      influxdb = 125;
+      nsd = 126;
+      gitolite = 127;
+      znc = 128;
+      polipo = 129;
+      mopidy = 130;
+      docker = 131;
+      gdm = 132;
+      #dhcpcd = 133; # unused
+      siproxd = 134;
+      mlmmj = 135;
+      #neo4j = 136; # unused
+      riemann = 137;
+      riemanndash = 138;
+      #radvd = 139; # unused
+      #zookeeper = 140; # unused
+      #dnsmasq = 141; # unused
+      uhub = 142;
+      #yandexdisk = 143; # unused
+      mxisd = 144; # was once collectd
+      #consul = 145; # unused
+      #mailpile = 146; # removed 2022-01-12
+      redmine = 147;
+      #seeks = 148; # removed 2020-06-21
+      prosody = 149;
+      i2pd = 150;
+      systemd-network = 152;
+      systemd-resolve = 153;
+      systemd-timesync = 154;
+      liquidsoap = 155;
+      #etcd = 156; # unused
+      hbase = 158;
+      opentsdb = 159;
+      scollector = 160;
+      bosun = 161;
+      kubernetes = 162;
+      #peerflix = 163; # unused
+      #chronos = 164; # unused
+      gitlab = 165;
+      nylon = 168;
+      #panamax = 170; # unused
+      exim = 172;
+      #fleet = 173; # unused
+      input = 174;
+      sddm = 175;
+      #tss = 176; #dynamically allocateda as of 2021-09-20
+      #memcached = 177; # unused, removed 2018-01-03
+      #ntp = 179; # unused
+      zabbix = 180;
+      #redis = 181; # unused, removed 2018-01-03
+      #unifi = 183; # unused
+      #uptimed = 184; # unused
+      #zope2 = 185; # unused
+      #ripple-data-api = 186; #unused
+      mediatomb = 187;
+      #rdnssd = 188; # unused
+      ihaskell = 189;
+      i2p = 190;
+      lambdabot = 191;
+      asterisk = 192;
+      plex = 193;
+      sabnzbd = 194;
+      #grafana = 196; #unused
+      #skydns = 197; #unused
+      # ripple-rest = 198; # unused, removed 2017-08-12
+      #nix-serve = 199; #unused
+      #tvheadend = 200; #unused
+      uwsgi = 201;
+      gitit = 202;
+      riemanntools = 203;
+      subsonic = 204;
+      # riak = 205;#unused, removed 2022-06-22
+      #shout = 206; #unused
+      gateone = 207;
+      namecoin = 208;
+      #lxd = 210; # unused
+      #kibana = 211;
+      xtreemfs = 212;
+      calibre-server = 213;
+      bepasty = 215;
+      # pumpio = 216; # unused, removed 2018-02-24
+      nm-openvpn = 217;
+      mathics = 218;
+      ejabberd = 219;
+      postsrsd = 220;
+      opendkim = 221;
+      dspam = 222;
+      # gale = 223; removed 2021-06-10
+      matrix-synapse = 224;
+      rspamd = 225;
+      # rmilter = 226; # unused, removed 2019-08-22
+      cfdyndns = 227;
+      pdnsd = 229;
+      octoprint = 230;
+      #radicale = 234;# dynamically allocated as of 2021-09-03
+      syncthing = 237;
+      caddy = 239;
+      taskd = 240;
+      # factorio = 241; # unused
+      # emby = 242; # unused, removed 2019-05-01
+      sniproxy = 244;
+      nzbget = 245;
+      mosquitto = 246;
+      #toxvpn = 247; # unused
+      #squeezelite = 248; #unused
+      turnserver = 249;
+      #smokeping = 250;# dynamically allocated as of 2021-09-03
+      gocd-agent = 251;
+      gocd-server = 252;
+      terraria = 253;
+      mattermost = 254;
+      prometheus = 255;
+      #telegraf = 256; # unused
+      gitlab-runner = 257;
+      postgrey = 258;
+      hound = 259;
+      leaps = 260;
+      ipfs = 261;
+      # stanchion = 262; # unused, removed 2020-10-14
+      # riak-cs = 263; # unused, removed 2020-10-14
+      infinoted = 264;
+      sickbeard = 265;
+      headphones = 266;
+      # couchpotato = 267; # unused, removed 2022-01-01
+      gogs = 268;
+      #kresd = 270; # switched to "knot-resolver" with dynamic ID
+      #rpc = 271; # unused
+      #geoip = 272; # unused
+      fcron = 273;
+      sonarr = 274;
+      radarr = 275;
+      jackett = 276;
+      aria2 = 277;
+      clickhouse = 278;
+      rslsync = 279;
+      minio = 280;
+      kanboard = 281;
+      # pykms = 282; # DynamicUser = true
+      kodi = 283;
+      restya-board = 284;
+      mighttpd2 = 285;
+      hass = 286;
+      # monero = 287; # dynamically allocated as of 2021-05-08
+      ceph = 288;
+      duplicati = 289;
+      monetdb = 290;
+      restic = 291;
+      openvpn = 292;
+      # meguca = 293; # removed 2020-08-21
+      yarn = 294;
+      hdfs = 295;
+      mapred = 296;
+      hadoop = 297;
+      hydron = 298;
+      cfssl = 299;
+      cassandra = 300;
+      qemu-libvirtd = 301;
+      kvm = 302; # default udev rules from systemd requires these
+      render = 303; # default udev rules from systemd requires these
+      sgx = 304; # default udev rules from systemd requires these
+      lirc = 305;
+      lidarr = 306;
+      slurm = 307;
+      kapacitor = 308;
+      # solr = 309; removed 2023-03-16
+      alerta = 310;
+      minetest = 311;
+      rss2email = 312;
+      cockroachdb = 313;
+      zoneminder = 314;
+      paperless = 315;
+      #mailman = 316;  # removed 2019-08-30
+      zigbee2mqtt = 317;
+      shadow = 318;
+      hqplayer = 319;
+      moonraker = 320;
+      distcc = 321;
+      webdav = 322;
+      pipewire = 323;
+      rstudio-server = 324;
+      localtimed = 325;
+      automatic-timezoned = 326;
+
+      # When adding a gid, make sure it doesn't match an existing
+      # uid. Users and groups with the same name should have equal
+      # uids and gids. Also, don't use gids above 399!
+
+      # For exceptional cases where you really need a gid above 399, leave a
+      # comment stating why.
+      #
+      # Also, avoid the following GID ranges:
+      #
+      #  1000 - 29999: user accounts (see ../config/update-users-groups.pl)
+      # 30000 - 31000: nixbld users (the upper limit is arbitrarily chosen)
+      # 61184 - 65519: systemd DynamicUser (see systemd.exec(5))
+      #         65535: the error return sentinel value when uid_t was 16 bits
+      #
+      # 100000 - 6653600: subgid allocated for user namespaces
+      #                   (see ../config/update-users-groups.pl)
+      #       4294967294: unauthenticated user in some NFS implementations
+      #       4294967295: error return sentinel value
+      #
+      # References:
+      # https://www.debian.org/doc/debian-policy/ch-opersys.html#uid-and-gid-classes
+
+      onepassword = 31001; # 1Password requires that its GID be larger than 1000
+      onepassword-cli = 31002; # 1Password requires that its GID be larger than 1000
+
+      users = 100;
+      nixbld = 30000;
+      nogroup = 65534;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/misc/label.nix b/nixpkgs/nixos/modules/misc/label.nix
new file mode 100644
index 000000000000..44ee812249ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/label.nix
@@ -0,0 +1,76 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.system.nixos;
+in
+
+{
+
+  options.system = {
+
+    nixos.label = mkOption {
+      type = types.strMatching "[a-zA-Z0-9:_\\.-]*";
+      description = lib.mdDoc ''
+        NixOS version name to be used in the names of generated
+        outputs and boot labels.
+
+        If you ever wanted to influence the labels in your GRUB menu,
+        this is the option for you.
+
+        It can only contain letters, numbers and the following symbols:
+        `:`, `_`, `.` and `-`.
+
+        The default is {option}`system.nixos.tags` separated by
+        "-" + "-" + {env}`NIXOS_LABEL_VERSION` environment
+        variable (defaults to the value of
+        {option}`system.nixos.version`).
+
+        Can be overridden by setting {env}`NIXOS_LABEL`.
+
+        Useful for not loosing track of configurations built from different
+        nixos branches/revisions, e.g.:
+
+        ```
+        #!/bin/sh
+        today=`date +%Y%m%d`
+        branch=`(cd nixpkgs ; git branch 2>/dev/null | sed -n '/^\* / { s|^\* ||; p; }')`
+        revision=`(cd nixpkgs ; git rev-parse HEAD)`
+        export NIXOS_LABEL_VERSION="$today.$branch-''${revision:0:7}"
+        nixos-rebuild switch
+        ```
+      '';
+    };
+
+    nixos.tags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "with-xen" ];
+      description = lib.mdDoc ''
+        Strings to prefix to the default
+        {option}`system.nixos.label`.
+
+        Useful for not loosing track of configurations built with
+        different options, e.g.:
+
+        ```
+        {
+          system.nixos.tags = [ "with-xen" ];
+          virtualisation.xen.enable = true;
+        }
+        ```
+      '';
+    };
+
+  };
+
+  config = {
+    # This is set here rather than up there so that changing it would
+    # not rebuild the manual
+    system.nixos.label = mkDefault (maybeEnv "NIXOS_LABEL"
+                                             (concatStringsSep "-" ((sort (x: y: x < y) cfg.tags)
+                                              ++ [ (maybeEnv "NIXOS_LABEL_VERSION" cfg.version) ])));
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/misc/lib.nix b/nixpkgs/nixos/modules/misc/lib.nix
new file mode 100644
index 000000000000..f97e9209e2f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/lib.nix
@@ -0,0 +1,15 @@
+{ lib, ... }:
+
+{
+  options = {
+    lib = lib.mkOption {
+      default = {};
+
+      type = lib.types.attrsOf lib.types.attrs;
+
+      description = lib.mdDoc ''
+        This option allows modules to define helper functions, constants, etc.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/locate.nix b/nixpkgs/nixos/modules/misc/locate.nix
new file mode 100644
index 000000000000..3c76d17086b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/locate.nix
@@ -0,0 +1,311 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.locate;
+  isMLocate = hasPrefix "mlocate" cfg.package.name;
+  isPLocate = hasPrefix "plocate" cfg.package.name;
+  isMorPLocate = isMLocate || isPLocate;
+  isFindutils = hasPrefix "findutils" cfg.package.name;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "locate" "period" ] [ "services" "locate" "interval" ])
+    (mkRenamedOptionModule [ "services" "locate" "locate" ] [ "services" "locate" "package" ])
+    (mkRemovedOptionModule [ "services" "locate" "includeStore" ] "Use services.locate.prunePaths")
+  ];
+
+  options.services.locate = with types; {
+    enable = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled, NixOS will periodically update the database of
+        files used by the {command}`locate` command.
+      '';
+    };
+
+    package = mkOption {
+      type = package;
+      default = pkgs.findutils.locate;
+      defaultText = literalExpression "pkgs.findutils.locate";
+      example = literalExpression "pkgs.mlocate";
+      description = lib.mdDoc ''
+        The locate implementation to use
+      '';
+    };
+
+    interval = mkOption {
+      type = str;
+      default = "02:15";
+      example = "hourly";
+      description = lib.mdDoc ''
+        Update the locate database at this interval. Updates by
+        default at 2:15 AM every day.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+
+        To disable automatic updates, set to `"never"`
+        and run {command}`updatedb` manually.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra flags to pass to {command}`updatedb`.
+      '';
+    };
+
+    output = mkOption {
+      type = path;
+      default = "/var/cache/locatedb";
+      description = lib.mdDoc ''
+        The database file to build.
+      '';
+    };
+
+    localuser = mkOption {
+      type = nullOr str;
+      default = "nobody";
+      description = lib.mdDoc ''
+        The user to search non-network directories as, using
+        {command}`su`.
+      '';
+    };
+
+    pruneFS = mkOption {
+      type = listOf str;
+      default = [
+        "afs"
+        "anon_inodefs"
+        "auto"
+        "autofs"
+        "bdev"
+        "binfmt"
+        "binfmt_misc"
+        "ceph"
+        "cgroup"
+        "cgroup2"
+        "cifs"
+        "coda"
+        "configfs"
+        "cramfs"
+        "cpuset"
+        "curlftpfs"
+        "debugfs"
+        "devfs"
+        "devpts"
+        "devtmpfs"
+        "ecryptfs"
+        "eventpollfs"
+        "exofs"
+        "futexfs"
+        "ftpfs"
+        "fuse"
+        "fusectl"
+        "fusesmb"
+        "fuse.ceph"
+        "fuse.glusterfs"
+        "fuse.gvfsd-fuse"
+        "fuse.mfs"
+        "fuse.rclone"
+        "fuse.rozofs"
+        "fuse.sshfs"
+        "gfs"
+        "gfs2"
+        "hostfs"
+        "hugetlbfs"
+        "inotifyfs"
+        "iso9660"
+        "jffs2"
+        "lustre"
+        "lustre_lite"
+        "misc"
+        "mfs"
+        "mqueue"
+        "ncpfs"
+        "nfs"
+        "NFS"
+        "nfs4"
+        "nfsd"
+        "nnpfs"
+        "ocfs"
+        "ocfs2"
+        "pipefs"
+        "proc"
+        "ramfs"
+        "rpc_pipefs"
+        "securityfs"
+        "selinuxfs"
+        "sfs"
+        "shfs"
+        "smbfs"
+        "sockfs"
+        "spufs"
+        "sshfs"
+        "subfs"
+        "supermount"
+        "sysfs"
+        "tmpfs"
+        "tracefs"
+        "ubifs"
+        "udev"
+        "udf"
+        "usbfs"
+        "vboxsf"
+        "vperfctrfs"
+      ];
+      description = lib.mdDoc ''
+        Which filesystem types to exclude from indexing
+      '';
+    };
+
+    prunePaths = mkOption {
+      type = listOf path;
+      default = [
+        "/tmp"
+        "/var/tmp"
+        "/var/cache"
+        "/var/lock"
+        "/var/run"
+        "/var/spool"
+        "/nix/store"
+        "/nix/var/log/nix"
+      ];
+      description = lib.mdDoc ''
+        Which paths to exclude from indexing
+      '';
+    };
+
+    pruneNames = mkOption {
+      type = listOf str;
+      default = lib.optionals (!isFindutils) [ ".bzr" ".cache" ".git" ".hg" ".svn" ];
+      defaultText = literalMD ''
+        `[ ".bzr" ".cache" ".git" ".hg" ".svn" ]`, if
+        supported by the locate implementation (i.e. mlocate or plocate).
+      '';
+      description = lib.mdDoc ''
+        Directory components which should exclude paths containing them from indexing
+      '';
+    };
+
+    pruneBindMounts = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether not to index bind mounts
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = mkMerge [
+      (mkIf isMLocate { mlocate = { }; })
+      (mkIf isPLocate { plocate = { }; })
+    ];
+
+    security.wrappers =
+      let
+        common = {
+          owner = "root";
+          permissions = "u+rx,g+x,o+x";
+          setgid = true;
+          setuid = false;
+        };
+        mlocate = mkIf isMLocate {
+          group = "mlocate";
+          source = "${cfg.package}/bin/locate";
+        };
+        plocate = mkIf isPLocate {
+          group = "plocate";
+          source = "${cfg.package}/bin/plocate";
+        };
+      in
+      mkIf isMorPLocate {
+        locate = mkMerge [ common mlocate plocate ];
+        plocate = mkIf isPLocate (mkMerge [ common plocate ]);
+      };
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.variables.LOCATE_PATH = cfg.output;
+
+    environment.etc = {
+      # write /etc/updatedb.conf for manual calls to `updatedb`
+      "updatedb.conf" = {
+        text = ''
+          PRUNEFS="${lib.concatStringsSep " " cfg.pruneFS}"
+          PRUNENAMES="${lib.concatStringsSep " " cfg.pruneNames}"
+          PRUNEPATHS="${lib.concatStringsSep " " cfg.prunePaths}"
+          PRUNE_BIND_MOUNTS="${if cfg.pruneBindMounts then "yes" else "no"}"
+        '';
+      };
+    };
+
+    warnings = optional (isMorPLocate && cfg.localuser != null)
+      "mlocate and plocate do not support the services.locate.localuser option. updatedb will run as root. Silence this warning by setting services.locate.localuser = null."
+    ++ optional (isFindutils && cfg.pruneNames != [ ])
+      "findutils locate does not support pruning by directory component"
+    ++ optional (isFindutils && cfg.pruneBindMounts)
+      "findutils locate does not support skipping bind mounts";
+
+    systemd.services.update-locatedb = {
+      description = "Update Locate Database";
+      path = mkIf (!isMorPLocate) [ pkgs.su ];
+
+      # mlocate's updatedb takes flags via a configuration file or
+      # on the command line, but not by environment variable.
+      script =
+        if isMorPLocate then
+          let
+            toFlags = x:
+              optional (cfg.${x} != [ ])
+                "--${lib.toLower x} '${concatStringsSep " " cfg.${x}}'";
+            args = concatLists (map toFlags [ "pruneFS" "pruneNames" "prunePaths" ]);
+          in
+          ''
+            exec ${cfg.package}/bin/updatedb \
+              --output ${toString cfg.output} ${concatStringsSep " " args} \
+              --prune-bind-mounts ${if cfg.pruneBindMounts then "yes" else "no"} \
+              ${concatStringsSep " " cfg.extraFlags}
+          ''
+        else ''
+          exec ${cfg.package}/bin/updatedb \
+            ${optionalString (cfg.localuser != null && !isMorPLocate) "--localuser=${cfg.localuser}"} \
+            --output=${toString cfg.output} ${concatStringsSep " " cfg.extraFlags}
+        '';
+      environment = optionalAttrs (!isMorPLocate) {
+        PRUNEFS = concatStringsSep " " cfg.pruneFS;
+        PRUNEPATHS = concatStringsSep " " cfg.prunePaths;
+        PRUNENAMES = concatStringsSep " " cfg.pruneNames;
+        PRUNE_BIND_MOUNTS = if cfg.pruneBindMounts then "yes" else "no";
+      };
+      serviceConfig.Nice = 19;
+      serviceConfig.IOSchedulingClass = "idle";
+      serviceConfig.PrivateTmp = "yes";
+      serviceConfig.PrivateNetwork = "yes";
+      serviceConfig.NoNewPrivileges = "yes";
+      serviceConfig.ReadOnlyPaths = "/";
+      # Use dirOf cfg.output because mlocate creates temporary files next to
+      # the actual database. We could specify and create them as well,
+      # but that would make this quite brittle when they change something.
+      # NOTE: If /var/cache does not exist, this leads to the misleading error message:
+      # update-locatedb.service: Failed at step NAMESPACE spawning …/update-locatedb-start: No such file or directory
+      serviceConfig.ReadWritePaths = dirOf cfg.output;
+    };
+
+    systemd.timers.update-locatedb = mkIf (cfg.interval != "never") {
+      description = "Update timer for locate database";
+      partOf = [ "update-locatedb.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.interval;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ SuperSandro2000 ];
+}
diff --git a/nixpkgs/nixos/modules/misc/man-db.nix b/nixpkgs/nixos/modules/misc/man-db.nix
new file mode 100644
index 000000000000..75f822c3448f
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/man-db.nix
@@ -0,0 +1,87 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.documentation.man.man-db;
+in
+
+{
+  options = {
+    documentation.man.man-db = {
+      enable = lib.mkEnableOption (lib.mdDoc "man-db as the default man page viewer") // {
+        default = config.documentation.man.enable;
+        defaultText = lib.literalExpression "config.documentation.man.enable";
+        example = false;
+      };
+
+      skipPackages = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [];
+        internal = true;
+        description = lib.mdDoc ''
+          Packages to *not* include in the man-db.
+          This can be useful to avoid unnecessary rebuilds due to packages that change frequently, like nixos-version.
+        '';
+      };
+
+      manualPages = lib.mkOption {
+        type = lib.types.path;
+        default = pkgs.buildEnv {
+          name = "man-paths";
+          paths = lib.subtractLists cfg.skipPackages config.environment.systemPackages;
+          pathsToLink = [ "/share/man" ];
+          extraOutputsToInstall = [ "man" ]
+            ++ lib.optionals config.documentation.dev.enable [ "devman" ];
+          ignoreCollisions = true;
+        };
+        defaultText = lib.literalMD "all man pages in {option}`config.environment.systemPackages`";
+        description = lib.mdDoc ''
+          The manual pages to generate caches for if {option}`documentation.man.generateCaches`
+          is enabled. Must be a path to a directory with man pages under
+          `/share/man`; see the source for an example.
+          Advanced users can make this a content-addressed derivation to save a few rebuilds.
+        '';
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.man-db;
+        defaultText = lib.literalExpression "pkgs.man-db";
+        description = lib.mdDoc ''
+          The `man-db` derivation to use. Useful to override
+          configuration options used for the package.
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "documentation" "man" "manualPages" ] [ "documentation" "man" "man-db" "manualPages" ])
+  ];
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    environment.etc."man_db.conf".text =
+      let
+        manualCache = pkgs.runCommand "man-cache" {
+          nativeBuildInputs = [ cfg.package ];
+        } ''
+          echo "MANDB_MAP ${cfg.manualPages}/share/man $out" > man.conf
+          mandb -C man.conf -psc >/dev/null 2>&1
+        '';
+      in
+      ''
+        # Manual pages paths for NixOS
+        MANPATH_MAP /run/current-system/sw/bin /run/current-system/sw/share/man
+        MANPATH_MAP /run/wrappers/bin          /run/current-system/sw/share/man
+
+        ${lib.optionalString config.documentation.man.generateCaches ''
+        # Generated manual pages cache for NixOS (immutable)
+        MANDB_MAP /run/current-system/sw/share/man ${manualCache}
+        ''}
+        # Manual pages caches for NixOS
+        MANDB_MAP /run/current-system/sw/share/man /var/cache/man/nixos
+
+        ${config.documentation.man.extraConfig}
+      '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/mandoc.nix b/nixpkgs/nixos/modules/misc/mandoc.nix
new file mode 100644
index 000000000000..9bcef5b1a09b
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/mandoc.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+let
+  makewhatis = "${lib.getBin cfg.package}/bin/makewhatis";
+
+  cfg = config.documentation.man.mandoc;
+
+in {
+  meta.maintainers = [ lib.maintainers.sternenseemann ];
+
+  options = {
+    documentation.man.mandoc = {
+      enable = lib.mkEnableOption (lib.mdDoc "mandoc as the default man page viewer");
+
+      manPath = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [ "share/man" ];
+        example = lib.literalExpression "[ \"share/man\" \"share/man/fr\" ]";
+        description = lib.mdDoc ''
+          Change the manpath, i. e. the directories where
+          {manpage}`man(1)`
+          looks for section-specific directories of man pages.
+          You only need to change this setting if you want extra man pages
+          (e. g. in non-english languages). All values must be strings that
+          are a valid path from the target prefix (without including it).
+          The first value given takes priority.
+        '';
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.mandoc;
+        defaultText = lib.literalExpression "pkgs.mandoc";
+        description = lib.mdDoc ''
+          The `mandoc` derivation to use. Useful to override
+          configuration options used for the package.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+
+      # tell mandoc about man pages
+      etc."man.conf".text = lib.concatMapStrings (path: ''
+        manpath /run/current-system/sw/${path}
+      '') cfg.manPath;
+
+      # create mandoc.db for whatis(1), apropos(1) and man(1) -k
+      # TODO(@sternenseemman): fix symlinked directories not getting indexed,
+      # see: https://inbox.vuxu.org/mandoc-tech/20210906171231.GF83680@athene.usta.de/T/#e85f773c1781e3fef85562b2794f9cad7b2909a3c
+      extraSetup = lib.mkIf config.documentation.man.generateCaches ''
+        ${makewhatis} -T utf8 ${
+          lib.concatMapStringsSep " " (path:
+            "$out/" + lib.escapeShellArg path
+          ) cfg.manPath
+        }
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/meta.nix b/nixpkgs/nixos/modules/misc/meta.nix
new file mode 100644
index 000000000000..95f2765aff1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/meta.nix
@@ -0,0 +1,76 @@
+{ lib, ... }:
+
+with lib;
+
+let
+  maintainer = mkOptionType {
+    name = "maintainer";
+    check = email: elem email (attrValues lib.maintainers);
+    merge = loc: defs: listToAttrs (singleton (nameValuePair (last defs).file (last defs).value));
+  };
+
+  listOfMaintainers = types.listOf maintainer // {
+    # Returns list of
+    #   { "module-file" = [
+    #        "maintainer1 <first@nixos.org>"
+    #        "maintainer2 <second@nixos.org>" ];
+    #   }
+    merge = loc: defs:
+      zipAttrs
+        (flatten (imap1 (n: def: imap1 (m: def':
+          maintainer.merge (loc ++ ["[${toString n}-${toString m}]"])
+            [{ inherit (def) file; value = def'; }]) def.value) defs));
+  };
+
+  docFile = types.path // {
+    # Returns tuples of
+    #   { file = "module location"; value = <path/to/doc.xml>; }
+    merge = loc: defs: defs;
+  };
+in
+
+{
+  options = {
+    meta = {
+
+      maintainers = mkOption {
+        type = listOfMaintainers;
+        internal = true;
+        default = [];
+        example = literalExpression ''[ lib.maintainers.all ]'';
+        description = lib.mdDoc ''
+          List of maintainers of each module.  This option should be defined at
+          most once per module.
+        '';
+      };
+
+      doc = mkOption {
+        type = docFile;
+        internal = true;
+        example = "./meta.chapter.md";
+        description = lib.mdDoc ''
+          Documentation prologue for the set of options of each module.  This
+          option should be defined at most once per module.
+        '';
+      };
+
+      buildDocsInSandbox = mkOption {
+        type = types.bool // {
+          merge = loc: defs: defs;
+        };
+        internal = true;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to include this module in the split options doc build.
+          Disable if the module references `config`, `pkgs` or other module
+          arguments that cannot be evaluated as constants.
+
+          This option should be defined at most once per module.
+        '';
+      };
+
+    };
+  };
+
+  meta.maintainers = singleton lib.maintainers.pierron;
+}
diff --git a/nixpkgs/nixos/modules/misc/nixops-autoluks.nix b/nixpkgs/nixos/modules/misc/nixops-autoluks.nix
new file mode 100644
index 000000000000..e6817633119d
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/nixops-autoluks.nix
@@ -0,0 +1,43 @@
+{ config, options, lib, ... }:
+let
+  path = [ "deployment" "autoLuks" ];
+  hasAutoLuksConfig = lib.hasAttrByPath path config && (lib.attrByPath path {} config) != {};
+
+  inherit (config.nixops) enableDeprecatedAutoLuks;
+in {
+  options.nixops.enableDeprecatedAutoLuks = lib.mkEnableOption (lib.mdDoc "the deprecated NixOps AutoLuks module");
+
+  config = {
+    assertions = [
+      {
+        assertion = if hasAutoLuksConfig then hasAutoLuksConfig && enableDeprecatedAutoLuks else true;
+        message = ''
+          âš ï¸  !!! WARNING !!! âš ï¸
+
+            NixOps autoLuks is deprecated. The feature was never widely used and the maintenance did outgrow the benefit.
+            If you still want to use the module:
+              a) Please raise your voice in the issue tracking usage of the module:
+                 https://github.com/NixOS/nixpkgs/issues/62211
+              b) make sure you set the `_netdev` option for each of the file
+                 systems referring to block devices provided by the autoLuks module.
+
+                 âš ï¸ If you do not set the option your system will not boot anymore! âš ï¸
+
+                  {
+                    fileSystems."/secret" = { options = [ "_netdev" ]; };
+                  }
+
+              b) set the option >nixops.enableDeprecatedAutoLuks = true< to remove this error.
+
+
+            For more details read through the following resources:
+              - https://github.com/NixOS/nixops/pull/1156
+              - https://github.com/NixOS/nixpkgs/issues/47550
+              - https://github.com/NixOS/nixpkgs/issues/62211
+              - https://github.com/NixOS/nixpkgs/pull/61321
+        '';
+      }
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/misc/nixpkgs.nix b/nixpkgs/nixos/modules/misc/nixpkgs.nix
new file mode 100644
index 000000000000..da321a923449
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/nixpkgs.nix
@@ -0,0 +1,397 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.nixpkgs;
+  opt = options.nixpkgs;
+
+  isConfig = x:
+    builtins.isAttrs x || lib.isFunction x;
+
+  optCall = f: x:
+    if lib.isFunction f
+    then f x
+    else f;
+
+  mergeConfig = lhs_: rhs_:
+    let
+      lhs = optCall lhs_ { inherit pkgs; };
+      rhs = optCall rhs_ { inherit pkgs; };
+    in
+    recursiveUpdate lhs rhs //
+    optionalAttrs (lhs ? packageOverrides) {
+      packageOverrides = pkgs:
+        optCall lhs.packageOverrides pkgs //
+        optCall (attrByPath [ "packageOverrides" ] { } rhs) pkgs;
+    } //
+    optionalAttrs (lhs ? perlPackageOverrides) {
+      perlPackageOverrides = pkgs:
+        optCall lhs.perlPackageOverrides pkgs //
+        optCall (attrByPath [ "perlPackageOverrides" ] { } rhs) pkgs;
+    };
+
+  configType = mkOptionType {
+    name = "nixpkgs-config";
+    description = "nixpkgs config";
+    check = x:
+      let traceXIfNot = c:
+            if c x then true
+            else lib.traceSeqN 1 x false;
+      in traceXIfNot isConfig;
+    merge = args: foldr (def: mergeConfig def.value) {};
+  };
+
+  overlayType = mkOptionType {
+    name = "nixpkgs-overlay";
+    description = "nixpkgs overlay";
+    check = lib.isFunction;
+    merge = lib.mergeOneOption;
+  };
+
+  pkgsType = types.pkgs // {
+    # This type is only used by itself, so let's elaborate the description a bit
+    # for the purpose of documentation.
+    description = "An evaluation of Nixpkgs; the top level attribute set of packages";
+  };
+
+  hasBuildPlatform = opt.buildPlatform.highestPrio < (mkOptionDefault {}).priority;
+  hasHostPlatform = opt.hostPlatform.isDefined;
+  hasPlatform = hasHostPlatform || hasBuildPlatform;
+
+  # Context for messages
+  hostPlatformLine = optionalString hasHostPlatform "${showOptionWithDefLocs opt.hostPlatform}";
+  buildPlatformLine = optionalString hasBuildPlatform "${showOptionWithDefLocs opt.buildPlatform}";
+
+  legacyOptionsDefined =
+    optional (opt.localSystem.highestPrio < (mkDefault {}).priority) opt.system
+    ++ optional (opt.localSystem.highestPrio < (mkOptionDefault {}).priority) opt.localSystem
+    ++ optional (opt.crossSystem.highestPrio < (mkOptionDefault {}).priority) opt.crossSystem
+    ;
+
+  defaultPkgs =
+    if opt.hostPlatform.isDefined
+    then
+      let isCross = cfg.buildPlatform != cfg.hostPlatform;
+          systemArgs =
+            if isCross
+            then {
+              localSystem = cfg.buildPlatform;
+              crossSystem = cfg.hostPlatform;
+            }
+            else {
+              localSystem = cfg.hostPlatform;
+            };
+      in
+      import ../../.. ({
+        inherit (cfg) config overlays;
+      } // systemArgs)
+    else
+      import ../../.. {
+        inherit (cfg) config overlays localSystem crossSystem;
+      };
+
+  finalPkgs = if opt.pkgs.isDefined then cfg.pkgs.appendOverlays cfg.overlays else defaultPkgs;
+
+in
+
+{
+  imports = [
+    ./assertions.nix
+    ./meta.nix
+    (mkRemovedOptionModule [ "nixpkgs" "initialSystem" ] "The NixOS options `nesting.clone` and `nesting.children` have been deleted, and replaced with named specialisation. Therefore `nixpgks.initialSystem` has no effect anymore.")
+  ];
+
+  options.nixpkgs = {
+
+    pkgs = mkOption {
+      defaultText = literalExpression ''
+        import "''${nixos}/.." {
+          inherit (cfg) config overlays localSystem crossSystem;
+        }
+      '';
+      type = pkgsType;
+      example = literalExpression "import <nixpkgs> {}";
+      description = lib.mdDoc ''
+        If set, the pkgs argument to all NixOS modules is the value of
+        this option, extended with `nixpkgs.overlays`, if
+        that is also set. Either `nixpkgs.crossSystem` or
+        `nixpkgs.localSystem` will be used in an assertion
+        to check that the NixOS and Nixpkgs architectures match. Any
+        other options in `nixpkgs.*`, notably `config`,
+        will be ignored.
+
+        If unset, the pkgs argument to all NixOS modules is determined
+        as shown in the default value for this option.
+
+        The default value imports the Nixpkgs source files
+        relative to the location of this NixOS module, because
+        NixOS and Nixpkgs are distributed together for consistency,
+        so the `nixos` in the default value is in fact a
+        relative path. The `config`, `overlays`,
+        `localSystem`, and `crossSystem` come
+        from this option's siblings.
+
+        This option can be used by applications like NixOps to increase
+        the performance of evaluation, or to create packages that depend
+        on a container that should be built with the exact same evaluation
+        of Nixpkgs, for example. Applications like this should set
+        their default value using `lib.mkDefault`, so
+        user-provided configuration can override it without using
+        `lib`.
+
+        Note that using a distinct version of Nixpkgs with NixOS may
+        be an unexpected source of problems. Use this option with care.
+      '';
+    };
+
+    config = mkOption {
+      default = {};
+      example = literalExpression
+        ''
+          { allowBroken = true; allowUnfree = true; }
+        '';
+      type = configType;
+      description = lib.mdDoc ''
+        The configuration of the Nix Packages collection.  (For
+        details, see the Nixpkgs documentation.)  It allows you to set
+        package configuration options.
+
+        Ignored when `nixpkgs.pkgs` is set.
+      '';
+    };
+
+    overlays = mkOption {
+      default = [];
+      example = literalExpression
+        ''
+          [
+            (self: super: {
+              openssh = super.openssh.override {
+                hpnSupport = true;
+                kerberos = self.libkrb5;
+              };
+            })
+          ]
+        '';
+      type = types.listOf overlayType;
+      description = lib.mdDoc ''
+        List of overlays to apply to Nixpkgs.
+        This option allows modifying the Nixpkgs package set accessed through the `pkgs` module argument.
+
+        For details, see the [Overlays chapter in the Nixpkgs manual](https://nixos.org/manual/nixpkgs/stable/#chap-overlays).
+
+        If the {option}`nixpkgs.pkgs` option is set, overlays specified using `nixpkgs.overlays` will be applied after the overlays that were already included in `nixpkgs.pkgs`.
+      '';
+    };
+
+    hostPlatform = mkOption {
+      type = types.either types.str types.attrs; # TODO utilize lib.systems.parsedPlatform
+      example = { system = "aarch64-linux"; };
+      # Make sure that the final value has all fields for sake of other modules
+      # referring to this. TODO make `lib.systems` itself use the module system.
+      apply = lib.systems.elaborate;
+      defaultText = literalExpression
+        ''(import "''${nixos}/../lib").lib.systems.examples.aarch64-multiplatform'';
+      description = lib.mdDoc ''
+        Specifies the platform where the NixOS configuration will run.
+
+        To cross-compile, set also `nixpkgs.buildPlatform`.
+
+        Ignored when `nixpkgs.pkgs` is set.
+      '';
+    };
+
+    buildPlatform = mkOption {
+      type = types.either types.str types.attrs; # TODO utilize lib.systems.parsedPlatform
+      default = cfg.hostPlatform;
+      example = { system = "x86_64-linux"; };
+      # Make sure that the final value has all fields for sake of other modules
+      # referring to this.
+      apply = lib.systems.elaborate;
+      defaultText = literalExpression
+        ''config.nixpkgs.hostPlatform'';
+      description = lib.mdDoc ''
+        Specifies the platform on which NixOS should be built.
+        By default, NixOS is built on the system where it runs, but you can
+        change where it's built. Setting this option will cause NixOS to be
+        cross-compiled.
+
+        For instance, if you're doing distributed multi-platform deployment,
+        or if you're building machines, you can set this to match your
+        development system and/or build farm.
+
+        Ignored when `nixpkgs.pkgs` is set.
+      '';
+    };
+
+    localSystem = mkOption {
+      type = types.attrs; # TODO utilize lib.systems.parsedPlatform
+      default = { inherit (cfg) system; };
+      example = { system = "aarch64-linux"; };
+      # Make sure that the final value has all fields for sake of other modules
+      # referring to this. TODO make `lib.systems` itself use the module system.
+      apply = lib.systems.elaborate;
+      defaultText = literalExpression
+        ''(import "''${nixos}/../lib").lib.systems.examples.aarch64-multiplatform'';
+      description = lib.mdDoc ''
+        Systems with a recently generated `hardware-configuration.nix`
+        do not need to specify this option, unless cross-compiling, in which case
+        you should set *only* {option}`nixpkgs.buildPlatform`.
+
+        If this is somehow not feasible, you may fall back to removing the
+        {option}`nixpkgs.hostPlatform` line from the generated config and
+        use the old options.
+
+        Specifies the platform on which NixOS should be built. When
+        `nixpkgs.crossSystem` is unset, it also specifies
+        the platform *for* which NixOS should be
+        built.  If this option is unset, it defaults to the platform
+        type of the machine where evaluation happens. Specifying this
+        option is useful when doing distributed multi-platform
+        deployment, or when building virtual machines. See its
+        description in the Nixpkgs manual for more details.
+
+        Ignored when `nixpkgs.pkgs` or `hostPlatform` is set.
+      '';
+    };
+
+    # TODO deprecate. "crossSystem" is a nonsense identifier, because "cross"
+    #      is a relation between at least 2 systems in the context of a
+    #      specific build step, not a single system.
+    crossSystem = mkOption {
+      type = types.nullOr types.attrs; # TODO utilize lib.systems.parsedPlatform
+      default = null;
+      example = { system = "aarch64-linux"; };
+      description = lib.mdDoc ''
+        Systems with a recently generated `hardware-configuration.nix`
+        may instead specify *only* {option}`nixpkgs.buildPlatform`,
+        or fall back to removing the {option}`nixpkgs.hostPlatform` line from the generated config.
+
+        Specifies the platform for which NixOS should be
+        built. Specify this only if it is different from
+        `nixpkgs.localSystem`, the platform
+        *on* which NixOS should be built. In other
+        words, specify this to cross-compile NixOS. Otherwise it
+        should be set as null, the default. See its description in the
+        Nixpkgs manual for more details.
+
+        Ignored when `nixpkgs.pkgs` or `hostPlatform` is set.
+      '';
+    };
+
+    system = mkOption {
+      type = types.str;
+      example = "i686-linux";
+      default =
+        if opt.hostPlatform.isDefined
+        then
+          throw ''
+            Neither ${opt.system} nor any other option in nixpkgs.* is meant
+            to be read by modules and configurations.
+            Use pkgs.stdenv.hostPlatform instead.
+          ''
+        else
+          throw ''
+            Neither ${opt.hostPlatform} nor the legacy option ${opt.system} has been set.
+            You can set ${opt.hostPlatform} in hardware-configuration.nix by re-running
+            a recent version of nixos-generate-config.
+            The option ${opt.system} is still fully supported for NixOS 22.05 interoperability,
+            but will be deprecated in the future, so we recommend to set ${opt.hostPlatform}.
+          '';
+      defaultText = lib.literalMD ''
+        Traditionally `builtins.currentSystem`, but unset when invoking NixOS through `lib.nixosSystem`.
+      '';
+      description = lib.mdDoc ''
+        This option does not need to be specified for NixOS configurations
+        with a recently generated `hardware-configuration.nix`.
+
+        Specifies the Nix platform type on which NixOS should be built.
+        It is better to specify `nixpkgs.localSystem` instead.
+        ```
+        {
+          nixpkgs.system = ..;
+        }
+        ```
+        is the same as
+        ```
+        {
+          nixpkgs.localSystem.system = ..;
+        }
+        ```
+        See `nixpkgs.localSystem` for more information.
+
+        Ignored when `nixpkgs.pkgs`, `nixpkgs.localSystem` or `nixpkgs.hostPlatform` is set.
+      '';
+    };
+  };
+
+  config = {
+    _module.args = {
+      pkgs =
+        # We explicitly set the default override priority, so that we do not need
+        # to evaluate finalPkgs in case an override is placed on `_module.args.pkgs`.
+        # After all, to determine a definition priority, we need to evaluate `._type`,
+        # which is somewhat costly for Nixpkgs. With an explicit priority, we only
+        # evaluate the wrapper to find out that the priority is lower, and then we
+        # don't need to evaluate `finalPkgs`.
+        lib.mkOverride lib.modules.defaultOverridePriority
+          finalPkgs.__splicedPackages;
+    };
+
+    assertions = let
+      # Whether `pkgs` was constructed by this module. This is false when any of
+      # nixpkgs.pkgs or _module.args.pkgs is set.
+      constructedByMe =
+        # We set it with default priority and it can not be merged, so if the
+        # pkgs module argument has that priority, it's from us.
+        (lib.modules.mergeAttrDefinitionsWithPrio options._module.args).pkgs.highestPrio
+          == lib.modules.defaultOverridePriority
+        # Although, if nixpkgs.pkgs is set, we did forward it, but we did not construct it.
+          && !opt.pkgs.isDefined;
+    in [
+      (
+        let
+          nixosExpectedSystem =
+            if config.nixpkgs.crossSystem != null
+            then config.nixpkgs.crossSystem.system or (lib.systems.parse.doubleFromSystem (lib.systems.parse.mkSystemFromString config.nixpkgs.crossSystem.config))
+            else config.nixpkgs.localSystem.system or (lib.systems.parse.doubleFromSystem (lib.systems.parse.mkSystemFromString config.nixpkgs.localSystem.config));
+          nixosOption =
+            if config.nixpkgs.crossSystem != null
+            then "nixpkgs.crossSystem"
+            else "nixpkgs.localSystem";
+          pkgsSystem = finalPkgs.stdenv.targetPlatform.system;
+        in {
+          assertion = constructedByMe -> !hasPlatform -> nixosExpectedSystem == pkgsSystem;
+          message = "The NixOS nixpkgs.pkgs option was set to a Nixpkgs invocation that compiles to target system ${pkgsSystem} but NixOS was configured for system ${nixosExpectedSystem} via NixOS option ${nixosOption}. The NixOS system settings must match the Nixpkgs target system.";
+        }
+      )
+      {
+        assertion = constructedByMe -> hasPlatform -> legacyOptionsDefined == [];
+        message = ''
+          Your system configures nixpkgs with the platform parameter${optionalString hasBuildPlatform "s"}:
+          ${hostPlatformLine
+          }${buildPlatformLine
+          }
+          However, it also defines the legacy options:
+          ${concatMapStrings showOptionWithDefLocs legacyOptionsDefined}
+          For a future proof system configuration, we recommend to remove
+          the legacy definitions.
+        '';
+      }
+      {
+        assertion = opt.pkgs.isDefined -> cfg.config == {};
+        message = ''
+          Your system configures nixpkgs with an externally created instance.
+          `nixpkgs.config` options should be passed when creating the instance instead.
+
+          Current value:
+          ${lib.generators.toPretty { multiline = true; } opt.config}
+        '';
+      }
+    ];
+  };
+
+  # needs a full nixpkgs path to import nixpkgs
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/misc/nixpkgs/read-only.nix b/nixpkgs/nixos/modules/misc/nixpkgs/read-only.nix
new file mode 100644
index 000000000000..2a783216a9d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/nixpkgs/read-only.nix
@@ -0,0 +1,74 @@
+# A replacement for the traditional nixpkgs module, such that none of the modules
+# can add their own configuration. This ensures that the Nixpkgs configuration is
+# exactly as the user intends.
+# This may also be used as a performance optimization when evaluating multiple
+# configurations at once, with a shared `pkgs`.
+
+# This is a separate module, because merging this logic into the nixpkgs module
+# is too burdensome, considering that it is already burdened with legacy.
+# Moving this logic into a module does not lose any composition benefits, because
+# its purpose is not something that composes anyway.
+
+{ lib, config, ... }:
+
+let
+  cfg = config.nixpkgs;
+  inherit (lib) mkOption types;
+
+in
+{
+  disabledModules = [
+    ../nixpkgs.nix
+  ];
+  options = {
+    nixpkgs = {
+      pkgs = mkOption {
+        type = lib.types.pkgs;
+        description = lib.mdDoc ''The pkgs module argument.'';
+      };
+      config = mkOption {
+        internal = true;
+        type = types.unique { message = "nixpkgs.config is set to read-only"; } types.anything;
+        description = lib.mdDoc ''
+          The Nixpkgs `config` that `pkgs` was initialized with.
+        '';
+      };
+      overlays = mkOption {
+        internal = true;
+        type = types.unique { message = "nixpkgs.overlays is set to read-only"; } types.anything;
+        description = lib.mdDoc ''
+          The Nixpkgs overlays that `pkgs` was initialized with.
+        '';
+      };
+      hostPlatform = mkOption {
+        internal = true;
+        readOnly = true;
+        description = lib.mdDoc ''
+          The platform of the machine that is running the NixOS configuration.
+        '';
+      };
+      buildPlatform = mkOption {
+        internal = true;
+        readOnly = true;
+        description = lib.mdDoc ''
+          The platform of the machine that built the NixOS configuration.
+        '';
+      };
+      # NOTE: do not add the legacy options such as localSystem here. Let's keep
+      #       this module simple and let module authors upgrade their code instead.
+    };
+  };
+  config = {
+    _module.args.pkgs =
+      # find mistaken definitions
+      builtins.seq cfg.config
+      builtins.seq cfg.overlays
+      builtins.seq cfg.hostPlatform
+      builtins.seq cfg.buildPlatform
+      cfg.pkgs;
+    nixpkgs.config = cfg.pkgs.config;
+    nixpkgs.overlays = cfg.pkgs.overlays;
+    nixpkgs.hostPlatform = cfg.pkgs.stdenv.hostPlatform;
+    nixpkgs.buildPlatform = cfg.pkgs.stdenv.buildPlatform;
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/nixpkgs/test.nix b/nixpkgs/nixos/modules/misc/nixpkgs/test.nix
new file mode 100644
index 000000000000..0536cfc9624a
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/nixpkgs/test.nix
@@ -0,0 +1,128 @@
+# [nixpkgs]$ nix-build -A nixosTests.nixpkgs --show-trace
+
+{ evalMinimalConfig, pkgs, lib, stdenv }:
+let
+  eval = mod: evalMinimalConfig {
+    imports = [ ../nixpkgs.nix mod ];
+  };
+  withHost = eval {
+    nixpkgs.hostPlatform = "aarch64-linux";
+  };
+  withHostAndBuild = eval {
+    nixpkgs.hostPlatform = "aarch64-linux";
+    nixpkgs.buildPlatform = "aarch64-darwin";
+  };
+  ambiguous = {
+    _file = "ambiguous.nix";
+    nixpkgs.hostPlatform = "aarch64-linux";
+    nixpkgs.buildPlatform = "aarch64-darwin";
+    nixpkgs.system = "x86_64-linux";
+    nixpkgs.localSystem.system = "x86_64-darwin";
+    nixpkgs.crossSystem.system = "i686-linux";
+    imports = [
+      { _file = "repeat.nix";
+        nixpkgs.hostPlatform = "aarch64-linux";
+      }
+    ];
+  };
+  getErrors = module:
+    let
+      uncheckedEval = lib.evalModules { modules = [ ../nixpkgs.nix module ]; };
+    in map (ass: ass.message) (lib.filter (ass: !ass.assertion) uncheckedEval.config.assertions);
+
+  readOnlyUndefined = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+  };
+
+  readOnlyBad = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = { };
+  };
+
+  readOnly = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = pkgs;
+  };
+
+  readOnlyBadConfig = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = pkgs;
+    nixpkgs.config.allowUnfree = true; # do in pkgs instead!
+  };
+
+  readOnlyBadOverlays = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = pkgs;
+    nixpkgs.overlays = [ (_: _: {}) ]; # do in pkgs instead!
+  };
+
+  readOnlyBadHostPlatform = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = pkgs;
+    nixpkgs.hostPlatform = "foo-linux"; # do in pkgs instead!
+  };
+
+  readOnlyBadBuildPlatform = evalMinimalConfig {
+    imports = [ ./read-only.nix ];
+    nixpkgs.pkgs = pkgs;
+    nixpkgs.buildPlatform = "foo-linux"; # do in pkgs instead!
+  };
+
+  throws = x: ! (builtins.tryEval x).success;
+
+in
+lib.recurseIntoAttrs {
+  invokeNixpkgsSimple =
+    (eval {
+      nixpkgs.system = stdenv.hostPlatform.system;
+    })._module.args.pkgs.hello;
+  assertions =
+    assert withHost._module.args.pkgs.stdenv.hostPlatform.system == "aarch64-linux";
+    assert withHost._module.args.pkgs.stdenv.buildPlatform.system == "aarch64-linux";
+    assert withHostAndBuild._module.args.pkgs.stdenv.hostPlatform.system == "aarch64-linux";
+    assert withHostAndBuild._module.args.pkgs.stdenv.buildPlatform.system == "aarch64-darwin";
+    assert builtins.trace (lib.head (getErrors ambiguous))
+      getErrors ambiguous ==
+        [''
+          Your system configures nixpkgs with the platform parameters:
+          nixpkgs.hostPlatform, with values defined in:
+            - repeat.nix
+            - ambiguous.nix
+          nixpkgs.buildPlatform, with values defined in:
+            - ambiguous.nix
+
+          However, it also defines the legacy options:
+          nixpkgs.system, with values defined in:
+            - ambiguous.nix
+          nixpkgs.localSystem, with values defined in:
+            - ambiguous.nix
+          nixpkgs.crossSystem, with values defined in:
+            - ambiguous.nix
+
+          For a future proof system configuration, we recommend to remove
+          the legacy definitions.
+        ''];
+    assert getErrors {
+        nixpkgs.localSystem = pkgs.stdenv.hostPlatform;
+        nixpkgs.hostPlatform = pkgs.stdenv.hostPlatform;
+        nixpkgs.pkgs = pkgs;
+      } == [];
+
+
+    # Tests for the read-only.nix module
+    assert readOnly._module.args.pkgs.stdenv.hostPlatform.system == pkgs.stdenv.hostPlatform.system;
+    assert throws readOnlyBad._module.args.pkgs.stdenv;
+    assert throws readOnlyUndefined._module.args.pkgs.stdenv;
+    assert throws readOnlyBadConfig._module.args.pkgs.stdenv;
+    assert throws readOnlyBadOverlays._module.args.pkgs.stdenv;
+    assert throws readOnlyBadHostPlatform._module.args.pkgs.stdenv;
+    assert throws readOnlyBadBuildPlatform._module.args.pkgs.stdenv;
+    # read-only.nix does not provide legacy options, for the sake of simplicity
+    # If you're bothered by this, upgrade your configs to use the new *Platform
+    # options.
+    assert !readOnly.options.nixpkgs?system;
+    assert !readOnly.options.nixpkgs?localSystem;
+    assert !readOnly.options.nixpkgs?crossSystem;
+
+    pkgs.emptyFile;
+}
diff --git a/nixpkgs/nixos/modules/misc/passthru.nix b/nixpkgs/nixos/modules/misc/passthru.nix
new file mode 100644
index 000000000000..beb9d7829037
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/passthru.nix
@@ -0,0 +1,16 @@
+# This module allows you to export something from configuration
+# Use case: export kernel source expression for ease of configuring
+
+{ lib, ... }:
+
+{
+  options = {
+    passthru = lib.mkOption {
+      visible = false;
+      description = lib.mdDoc ''
+        This attribute set will be exported as a system attribute.
+        You can put whatever you want here.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/misc/version.nix b/nixpkgs/nixos/modules/misc/version.nix
new file mode 100644
index 000000000000..45dbf45b3ae7
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/version.nix
@@ -0,0 +1,188 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.system.nixos;
+  opt = options.system.nixos;
+
+  inherit (lib)
+    concatStringsSep mapAttrsToList toLower
+    literalExpression mkRenamedOptionModule mkDefault mkOption trivial types;
+
+  needsEscaping = s: null != builtins.match "[a-zA-Z0-9]+" s;
+  escapeIfNecessary = s: if needsEscaping s then s else ''"${lib.escape [ "\$" "\"" "\\" "\`" ] s}"'';
+  attrsToText = attrs:
+    concatStringsSep "\n" (
+      mapAttrsToList (n: v: ''${n}=${escapeIfNecessary (toString v)}'') attrs
+    ) + "\n";
+
+  osReleaseContents = {
+    NAME = "${cfg.distroName}";
+    ID = "${cfg.distroId}";
+    VERSION = "${cfg.release} (${cfg.codeName})";
+    VERSION_CODENAME = toLower cfg.codeName;
+    VERSION_ID = cfg.release;
+    BUILD_ID = cfg.version;
+    PRETTY_NAME = "${cfg.distroName} ${cfg.release} (${cfg.codeName})";
+    LOGO = "nix-snowflake";
+    HOME_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/";
+    DOCUMENTATION_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/learn.html";
+    SUPPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://nixos.org/community.html";
+    BUG_REPORT_URL = lib.optionalString (cfg.distroId == "nixos") "https://github.com/NixOS/nixpkgs/issues";
+  } // lib.optionalAttrs (cfg.variant_id != null) {
+    VARIANT_ID = cfg.variant_id;
+  };
+
+  initrdReleaseContents = (removeAttrs osReleaseContents [ "BUILD_ID" ]) // {
+    PRETTY_NAME = "${osReleaseContents.PRETTY_NAME} (Initrd)";
+  };
+  initrdRelease = pkgs.writeText "initrd-release" (attrsToText initrdReleaseContents);
+
+in
+{
+  imports = [
+    ./label.nix
+    (mkRenamedOptionModule [ "system" "nixosVersion" ] [ "system" "nixos" "version" ])
+    (mkRenamedOptionModule [ "system" "nixosVersionSuffix" ] [ "system" "nixos" "versionSuffix" ])
+    (mkRenamedOptionModule [ "system" "nixosRevision" ] [ "system" "nixos" "revision" ])
+    (mkRenamedOptionModule [ "system" "nixosLabel" ] [ "system" "nixos" "label" ])
+  ];
+
+  options.boot.initrd.osRelease = mkOption {
+    internal = true;
+    readOnly = true;
+    default = initrdRelease;
+  };
+
+  options.system = {
+
+    nixos.version = mkOption {
+      internal = true;
+      type = types.str;
+      description = lib.mdDoc "The full NixOS version (e.g. `16.03.1160.f2d4ee1`).";
+    };
+
+    nixos.release = mkOption {
+      readOnly = true;
+      type = types.str;
+      default = trivial.release;
+      description = lib.mdDoc "The NixOS release (e.g. `16.03`).";
+    };
+
+    nixos.versionSuffix = mkOption {
+      internal = true;
+      type = types.str;
+      default = trivial.versionSuffix;
+      description = lib.mdDoc "The NixOS version suffix (e.g. `1160.f2d4ee1`).";
+    };
+
+    nixos.revision = mkOption {
+      internal = true;
+      type = types.nullOr types.str;
+      default = trivial.revisionWithDefault null;
+      description = lib.mdDoc "The Git revision from which this NixOS configuration was built.";
+    };
+
+    nixos.codeName = mkOption {
+      readOnly = true;
+      type = types.str;
+      default = trivial.codeName;
+      description = lib.mdDoc "The NixOS release code name (e.g. `Emu`).";
+    };
+
+    nixos.distroId = mkOption {
+      internal = true;
+      type = types.str;
+      default = "nixos";
+      description = lib.mdDoc "The id of the operating system";
+    };
+
+    nixos.distroName = mkOption {
+      internal = true;
+      type = types.str;
+      default = "NixOS";
+      description = lib.mdDoc "The name of the operating system";
+    };
+
+    nixos.variant_id = mkOption {
+      type = types.nullOr (types.strMatching "^[a-z0-9._-]+$");
+      default = null;
+      description = lib.mdDoc "A lower-case string identifying a specific variant or edition of the operating system";
+      example = "installer";
+    };
+
+    stateVersion = mkOption {
+      type = types.str;
+      # TODO Remove this and drop the default of the option so people are forced to set it.
+      # Doing this also means fixing the comment in nixos/modules/testing/test-instrumentation.nix
+      apply = v:
+        lib.warnIf (options.system.stateVersion.highestPrio == (lib.mkOptionDefault { }).priority)
+          "system.stateVersion is not set, defaulting to ${v}. Read why this matters on https://nixos.org/manual/nixos/stable/options.html#opt-system.stateVersion."
+          v;
+      default = cfg.release;
+      defaultText = literalExpression "config.${opt.release}";
+      description = lib.mdDoc ''
+        This option defines the first version of NixOS you have installed on this particular machine,
+        and is used to maintain compatibility with application data (e.g. databases) created on older NixOS versions.
+
+        For example, if NixOS version XX.YY ships with AwesomeDB version N by default, and is then
+        upgraded to version XX.YY+1, which ships AwesomeDB version N+1, the existing databases
+        may no longer be compatible, causing applications to fail, or even leading to data loss.
+
+        The `stateVersion` mechanism avoids this situation by making the default version of such packages
+        conditional on the first version of NixOS you've installed (encoded in `stateVersion`), instead of
+        simply always using the latest one.
+
+        Note that this generally only affects applications that can't upgrade their data automatically -
+        applications and services supporting automatic migrations will remain on latest versions when
+        you upgrade.
+
+        Most users should **never** change this value after the initial install, for any reason,
+        even if you've upgraded your system to a new NixOS release.
+
+        This value does **not** affect the Nixpkgs version your packages and OS are pulled from,
+        so changing it will **not** upgrade your system.
+
+        This value being lower than the current NixOS release does **not** mean your system is
+        out of date, out of support, or vulnerable.
+
+        Do **not** change this value unless you have manually inspected all the changes it would
+        make to your configuration, and migrated your data accordingly.
+      '';
+    };
+
+    configurationRevision = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "The Git revision of the top-level flake from which this configuration was built.";
+    };
+
+  };
+
+  config = {
+
+    system.nixos = {
+      # These defaults are set here rather than up there so that
+      # changing them would not rebuild the manual
+      version = mkDefault (cfg.release + cfg.versionSuffix);
+    };
+
+    # Generate /etc/os-release.  See
+    # https://www.freedesktop.org/software/systemd/man/os-release.html for the
+    # format.
+    environment.etc = {
+      "lsb-release".text = attrsToText {
+        LSB_VERSION = "${cfg.release} (${cfg.codeName})";
+        DISTRIB_ID = "${cfg.distroId}";
+        DISTRIB_RELEASE = cfg.release;
+        DISTRIB_CODENAME = toLower cfg.codeName;
+        DISTRIB_DESCRIPTION = "${cfg.distroName} ${cfg.release} (${cfg.codeName})";
+      };
+
+      "os-release".text = attrsToText osReleaseContents;
+    };
+
+  };
+
+  # uses version info nixpkgs, which requires a full nixpkgs path
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/misc/wordlist.nix b/nixpkgs/nixos/modules/misc/wordlist.nix
new file mode 100644
index 000000000000..f01fcb6f5a91
--- /dev/null
+++ b/nixpkgs/nixos/modules/misc/wordlist.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  concatAndSort = name: files: pkgs.runCommand name {} ''
+    awk 1 ${lib.escapeShellArgs files} | sed '{ /^\s*$/d; s/^\s\+//; s/\s\+$// }' | sort | uniq > $out
+  '';
+in
+{
+  options = {
+    environment.wordlist = {
+      enable = mkEnableOption (lib.mdDoc "environment variables for lists of words");
+
+      lists = mkOption {
+        type = types.attrsOf (types.nonEmptyListOf types.path);
+
+        default = {
+          WORDLIST = [ "${pkgs.scowl}/share/dict/words.txt" ];
+        };
+
+        defaultText = literalExpression ''
+          {
+            WORDLIST = [ "''${pkgs.scowl}/share/dict/words.txt" ];
+          }
+        '';
+
+        description = lib.mdDoc ''
+          A set with the key names being the environment variable you'd like to
+          set and the values being a list of paths to text documents containing
+          lists of words. The various files will be merged, sorted, duplicates
+          removed, and extraneous spacing removed.
+
+          If you have a handful of words that you want to add to an already
+          existing wordlist, you may find `builtins.toFile` useful for this
+          task.
+        '';
+
+        example = literalExpression ''
+          {
+            WORDLIST = [ "''${pkgs.scowl}/share/dict/words.txt" ];
+            AUGMENTED_WORDLIST = [
+              "''${pkgs.scowl}/share/dict/words.txt"
+              "''${pkgs.scowl}/share/dict/words.variants.txt"
+              (builtins.toFile "extra-words" '''
+                desynchonization
+                oobleck''')
+            ];
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.environment.wordlist.enable {
+    environment.variables =
+      lib.mapAttrs
+        (name: value: "${concatAndSort "wordlist-${name}" value}")
+        config.environment.wordlist.lists;
+  };
+}
diff --git a/nixpkgs/nixos/modules/module-list.nix b/nixpkgs/nixos/modules/module-list.nix
new file mode 100644
index 000000000000..9f79c70125fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/module-list.nix
@@ -0,0 +1,1540 @@
+[
+  ./config/appstream.nix
+  ./config/console.nix
+  ./config/debug-info.nix
+  ./config/fanout.nix
+  ./config/fonts/fontconfig.nix
+  ./config/fonts/fontdir.nix
+  ./config/fonts/ghostscript.nix
+  ./config/fonts/packages.nix
+  ./config/gtk/gtk-icon-cache.nix
+  ./config/i18n.nix
+  ./config/iproute2.nix
+  ./config/krb5/default.nix
+  ./config/ldap.nix
+  ./config/locale.nix
+  ./config/malloc.nix
+  ./config/mysql.nix
+  ./config/networking.nix
+  ./config/nix.nix
+  ./config/nix-channel.nix
+  ./config/nix-flakes.nix
+  ./config/nix-remote-build.nix
+  ./config/no-x-libs.nix
+  ./config/nsswitch.nix
+  ./config/power-management.nix
+  ./config/pulseaudio.nix
+  ./config/qt.nix
+  ./config/resolvconf.nix
+  ./config/shells-environment.nix
+  ./config/stevenblack.nix
+  ./config/swap.nix
+  ./config/sysctl.nix
+  ./config/system-environment.nix
+  ./config/system-path.nix
+  ./config/terminfo.nix
+  ./config/unix-odbc-drivers.nix
+  ./config/users-groups.nix
+  ./config/vte.nix
+  ./config/xdg/autostart.nix
+  ./config/xdg/icons.nix
+  ./config/xdg/menus.nix
+  ./config/xdg/mime.nix
+  ./config/xdg/portal.nix
+  ./config/xdg/portals/lxqt.nix
+  ./config/xdg/portals/wlr.nix
+  ./config/xdg/sounds.nix
+  ./config/zram.nix
+  ./hardware/acpilight.nix
+  ./hardware/all-firmware.nix
+  ./hardware/bladeRF.nix
+  ./hardware/brillo.nix
+  ./hardware/ckb-next.nix
+  ./hardware/corectrl.nix
+  ./hardware/cpu/amd-microcode.nix
+  ./hardware/cpu/amd-sev.nix
+  ./hardware/cpu/intel-microcode.nix
+  ./hardware/cpu/intel-sgx.nix
+  ./hardware/cpu/x86-msr.nix
+  ./hardware/decklink.nix
+  ./hardware/device-tree.nix
+  ./hardware/digitalbitbox.nix
+  ./hardware/flipperzero.nix
+  ./hardware/flirc.nix
+  ./hardware/gkraken.nix
+  ./hardware/glasgow.nix
+  ./hardware/gpgsmartcards.nix
+  ./hardware/hackrf.nix
+  ./hardware/i2c.nix
+  ./hardware/infiniband.nix
+  ./hardware/keyboard/qmk.nix
+  ./hardware/keyboard/teck.nix
+  ./hardware/keyboard/uhk.nix
+  ./hardware/keyboard/zsa.nix
+  ./hardware/ksm.nix
+  ./hardware/ledger.nix
+  ./hardware/logitech.nix
+  ./hardware/mcelog.nix
+  ./hardware/network/ath-user-regd.nix
+  ./hardware/network/b43.nix
+  ./hardware/network/intel-2200bg.nix
+  ./hardware/new-lg4ff.nix
+  ./hardware/nitrokey.nix
+  ./hardware/onlykey/default.nix
+  ./hardware/opengl.nix
+  ./hardware/openrazer.nix
+  ./hardware/opentabletdriver.nix
+  ./hardware/pcmcia.nix
+  ./hardware/printers.nix
+  ./hardware/raid/hpsa.nix
+  ./hardware/rtl-sdr.nix
+  ./hardware/saleae-logic.nix
+  ./hardware/sata.nix
+  ./hardware/sensor/hddtemp.nix
+  ./hardware/sensor/iio.nix
+  ./hardware/steam-hardware.nix
+  ./hardware/system-76.nix
+  ./hardware/tuxedo-keyboard.nix
+  ./hardware/ubertooth.nix
+  ./hardware/uinput.nix
+  ./hardware/usb-modeswitch.nix
+  ./hardware/usb-storage.nix
+  ./hardware/video/amdgpu-pro.nix
+  ./hardware/video/bumblebee.nix
+  ./hardware/video/capture/mwprocapture.nix
+  ./hardware/video/displaylink.nix
+  ./hardware/video/nvidia.nix
+  ./hardware/video/switcheroo-control.nix
+  ./hardware/video/uvcvideo/default.nix
+  ./hardware/video/webcam/facetimehd.nix
+  ./hardware/video/webcam/ipu6.nix
+  ./hardware/wooting.nix
+  ./hardware/xone.nix
+  ./hardware/xpadneo.nix
+  ./i18n/input-method/default.nix
+  ./i18n/input-method/fcitx5.nix
+  ./i18n/input-method/hime.nix
+  ./i18n/input-method/ibus.nix
+  ./i18n/input-method/kime.nix
+  ./i18n/input-method/nabi.nix
+  ./i18n/input-method/uim.nix
+  ./installer/tools/tools.nix
+  ./misc/assertions.nix
+  ./misc/crashdump.nix
+  ./misc/documentation.nix
+  ./misc/extra-arguments.nix
+  ./misc/ids.nix
+  ./misc/label.nix
+  ./misc/lib.nix
+  ./misc/locate.nix
+  ./misc/man-db.nix
+  ./misc/mandoc.nix
+  ./misc/meta.nix
+  ./misc/nixops-autoluks.nix
+  ./misc/nixpkgs.nix
+  ./misc/passthru.nix
+  ./misc/version.nix
+  ./misc/wordlist.nix
+  ./programs/_1password-gui.nix
+  ./programs/_1password.nix
+  ./programs/adb.nix
+  ./programs/appgate-sdp.nix
+  ./programs/atop.nix
+  ./programs/ausweisapp.nix
+  ./programs/autojump.nix
+  ./programs/bandwhich.nix
+  ./programs/bash-my-aws.nix
+  ./programs/bash/bash-completion.nix
+  ./programs/bash/bash.nix
+  ./programs/bash/blesh.nix
+  ./programs/bash/ls-colors.nix
+  ./programs/bash/undistract-me.nix
+  ./programs/bcc.nix
+  ./programs/browserpass.nix
+  ./programs/calls.nix
+  ./programs/captive-browser.nix
+  ./programs/ccache.nix
+  ./programs/cdemu.nix
+  ./programs/cfs-zen-tweaks.nix
+  ./programs/chromium.nix
+  ./programs/clash-verge.nix
+  ./programs/cnping.nix
+  ./programs/command-not-found/command-not-found.nix
+  ./programs/criu.nix
+  ./programs/darling.nix
+  ./programs/dconf.nix
+  ./programs/digitalbitbox/default.nix
+  ./programs/direnv.nix
+  ./programs/dmrconfig.nix
+  ./programs/droidcam.nix
+  ./programs/ecryptfs.nix
+  ./programs/environment.nix
+  ./programs/evince.nix
+  ./programs/extra-container.nix
+  ./programs/feedbackd.nix
+  ./programs/file-roller.nix
+  ./programs/firefox.nix
+  ./programs/firejail.nix
+  ./programs/fish.nix
+  ./programs/flashrom.nix
+  ./programs/flexoptix-app.nix
+  ./programs/freetds.nix
+  ./programs/fuse.nix
+  ./programs/fzf.nix
+  ./programs/gamemode.nix
+  ./programs/gamescope.nix
+  ./programs/geary.nix
+  ./programs/git.nix
+  ./programs/gnome-disks.nix
+  ./programs/gnome-terminal.nix
+  ./programs/gnupg.nix
+  ./programs/gpaste.nix
+  ./programs/gphoto2.nix
+  ./programs/haguichi.nix
+  ./programs/hamster.nix
+  ./programs/htop.nix
+  ./programs/hyprland.nix
+  ./programs/iay.nix
+  ./programs/iftop.nix
+  ./programs/i3lock.nix
+  ./programs/iotop.nix
+  ./programs/java.nix
+  ./programs/k3b.nix
+  ./programs/k40-whisperer.nix
+  ./programs/kbdlight.nix
+  ./programs/kclock.nix
+  ./programs/kdeconnect.nix
+  ./programs/less.nix
+  ./programs/liboping.nix
+  ./programs/light.nix
+  ./programs/mdevctl.nix
+  ./programs/mepo.nix
+  ./programs/mininet.nix
+  ./programs/minipro.nix
+  ./programs/miriway.nix
+  ./programs/mosh.nix
+  ./programs/msmtp.nix
+  ./programs/mtr.nix
+  ./programs/nano.nix
+  ./programs/nbd.nix
+  ./programs/neovim.nix
+  ./programs/nethoscope.nix
+  ./programs/nexttrace.nix
+  ./programs/nix-index.nix
+  ./programs/nix-ld.nix
+  ./programs/nm-applet.nix
+  ./programs/nncp.nix
+  ./programs/noisetorch.nix
+  ./programs/npm.nix
+  ./programs/ns-usbloader.nix
+  ./programs/oblogout.nix
+  ./programs/oddjobd.nix
+  ./programs/openvpn3.nix
+  ./programs/pantheon-tweaks.nix
+  ./programs/partition-manager.nix
+  ./programs/plotinus.nix
+  ./programs/projecteur.nix
+  ./programs/proxychains.nix
+  ./programs/qdmr.nix
+  ./programs/qt5ct.nix
+  ./programs/regreet.nix
+  ./programs/rog-control-center.nix
+  ./programs/rust-motd.nix
+  ./programs/screen.nix
+  ./programs/seahorse.nix
+  ./programs/sedutil.nix
+  ./programs/shadow.nix
+  ./programs/sharing.nix
+  ./programs/singularity.nix
+  ./programs/skim.nix
+  ./programs/slock.nix
+  ./programs/sniffnet.nix
+  ./programs/spacefm.nix
+  ./programs/ssh.nix
+  ./programs/starship.nix
+  ./programs/steam.nix
+  ./programs/streamdeck-ui.nix
+  ./programs/sysdig.nix
+  ./programs/system-config-printer.nix
+  ./programs/systemtap.nix
+  ./programs/thefuck.nix
+  ./programs/thunar.nix
+  ./programs/tmux.nix
+  ./programs/traceroute.nix
+  ./programs/trippy.nix
+  ./programs/tsm-client.nix
+  ./programs/turbovnc.nix
+  ./programs/udevil.nix
+  ./programs/usbtop.nix
+  ./programs/vim.nix
+  ./programs/virt-manager.nix
+  ./programs/wavemon.nix
+  ./programs/wayland/cardboard.nix
+  ./programs/wayland/river.nix
+  ./programs/wayland/sway.nix
+  ./programs/wayland/waybar.nix
+  ./programs/wayland/wayfire.nix
+  ./programs/weylus.nix
+  ./programs/wireshark.nix
+  ./programs/xastir.nix
+  ./programs/wshowkeys.nix
+  ./programs/xfconf.nix
+  ./programs/xfs_quota.nix
+  ./programs/xonsh.nix
+  ./programs/xss-lock.nix
+  ./programs/xwayland.nix
+  ./programs/yabar.nix
+  ./programs/yazi.nix
+  ./programs/yubikey-touch-detector.nix
+  ./programs/zmap.nix
+  ./programs/zsh/oh-my-zsh.nix
+  ./programs/zsh/zsh-autoenv.nix
+  ./programs/zsh/zsh-autosuggestions.nix
+  ./programs/zsh/zsh-syntax-highlighting.nix
+  ./programs/zsh/zsh.nix
+  ./rename.nix
+  ./security/acme
+  ./security/apparmor.nix
+  ./security/audit.nix
+  ./security/auditd.nix
+  ./security/ca.nix
+  ./security/chromium-suid-sandbox.nix
+  ./security/dhparams.nix
+  ./security/doas.nix
+  ./security/duosec.nix
+  ./security/google_oslogin.nix
+  ./security/ipa.nix
+  ./security/lock-kernel-modules.nix
+  ./security/misc.nix
+  ./security/oath.nix
+  ./security/pam.nix
+  ./security/pam_mount.nix
+  ./security/pam_usb.nix
+  ./security/please.nix
+  ./security/polkit.nix
+  ./security/rngd.nix
+  ./security/rtkit.nix
+  ./security/sudo.nix
+  ./security/sudo-rs.nix
+  ./security/systemd-confinement.nix
+  ./security/tpm2.nix
+  ./security/wrappers/default.nix
+  ./services/admin/meshcentral.nix
+  ./services/admin/oxidized.nix
+  ./services/admin/pgadmin.nix
+  ./services/admin/salt/master.nix
+  ./services/admin/salt/minion.nix
+  ./services/amqp/activemq/default.nix
+  ./services/amqp/rabbitmq.nix
+  ./services/audio/alsa.nix
+  ./services/audio/botamusique.nix
+  ./services/audio/castopod.nix
+  ./services/audio/gmediarender.nix
+  ./services/audio/gonic.nix
+  ./services/audio/goxlr-utility.nix
+  ./services/audio/hqplayerd.nix
+  ./services/audio/icecast.nix
+  ./services/audio/jack.nix
+  ./services/audio/jmusicbot.nix
+  ./services/audio/liquidsoap.nix
+  ./services/audio/mopidy.nix
+  ./services/audio/mpd.nix
+  ./services/audio/mpdscribble.nix
+  ./services/audio/navidrome.nix
+  ./services/audio/networkaudiod.nix
+  ./services/audio/roon-bridge.nix
+  ./services/audio/roon-server.nix
+  ./services/audio/slimserver.nix
+  ./services/audio/snapserver.nix
+  ./services/audio/spotifyd.nix
+  ./services/audio/squeezelite.nix
+  ./services/audio/tts.nix
+  ./services/audio/wyoming/faster-whisper.nix
+  ./services/audio/wyoming/openwakeword.nix
+  ./services/audio/wyoming/piper.nix
+  ./services/audio/ympd.nix
+  ./services/backup/automysqlbackup.nix
+  ./services/backup/bacula.nix
+  ./services/backup/borgbackup.nix
+  ./services/backup/borgmatic.nix
+  ./services/backup/btrbk.nix
+  ./services/backup/duplicati.nix
+  ./services/backup/duplicity.nix
+  ./services/backup/mysql-backup.nix
+  ./services/backup/postgresql-backup.nix
+  ./services/backup/postgresql-wal-receiver.nix
+  ./services/backup/restic-rest-server.nix
+  ./services/backup/restic.nix
+  ./services/backup/rsnapshot.nix
+  ./services/backup/sanoid.nix
+  ./services/backup/syncoid.nix
+  ./services/backup/tarsnap.nix
+  ./services/backup/tsm.nix
+  ./services/backup/zfs-replication.nix
+  ./services/backup/znapzend.nix
+  ./services/backup/zrepl.nix
+  ./services/blockchain/ethereum/erigon.nix
+  ./services/blockchain/ethereum/geth.nix
+  ./services/blockchain/ethereum/lighthouse.nix
+  ./services/cluster/corosync/default.nix
+  ./services/cluster/hadoop/default.nix
+  ./services/cluster/k3s/default.nix
+  ./services/cluster/kubernetes/addon-manager.nix
+  ./services/cluster/kubernetes/addons/dns.nix
+  ./services/cluster/kubernetes/apiserver.nix
+  ./services/cluster/kubernetes/controller-manager.nix
+  ./services/cluster/kubernetes/default.nix
+  ./services/cluster/kubernetes/flannel.nix
+  ./services/cluster/kubernetes/kubelet.nix
+  ./services/cluster/kubernetes/pki.nix
+  ./services/cluster/kubernetes/proxy.nix
+  ./services/cluster/kubernetes/scheduler.nix
+  ./services/cluster/pacemaker/default.nix
+  ./services/cluster/patroni/default.nix
+  ./services/cluster/spark/default.nix
+  ./services/computing/boinc/client.nix
+  ./services/computing/foldingathome/client.nix
+  ./services/computing/slurm/slurm.nix
+  ./services/computing/torque/mom.nix
+  ./services/computing/torque/server.nix
+  ./services/continuous-integration/buildbot/master.nix
+  ./services/continuous-integration/buildbot/worker.nix
+  ./services/continuous-integration/buildkite-agents.nix
+  ./services/continuous-integration/gitea-actions-runner.nix
+  ./services/continuous-integration/github-runner.nix
+  ./services/continuous-integration/github-runners.nix
+  ./services/continuous-integration/gitlab-runner.nix
+  ./services/continuous-integration/gocd-agent/default.nix
+  ./services/continuous-integration/gocd-server/default.nix
+  ./services/continuous-integration/hercules-ci-agent/default.nix
+  ./services/continuous-integration/hydra/default.nix
+  ./services/continuous-integration/jenkins/default.nix
+  ./services/continuous-integration/jenkins/job-builder.nix
+  ./services/continuous-integration/jenkins/slave.nix
+  ./services/continuous-integration/woodpecker/agents.nix
+  ./services/continuous-integration/woodpecker/server.nix
+  ./services/databases/aerospike.nix
+  ./services/databases/cassandra.nix
+  ./services/databases/clickhouse.nix
+  ./services/databases/cockroachdb.nix
+  ./services/databases/couchdb.nix
+  ./services/databases/dgraph.nix
+  ./services/databases/dragonflydb.nix
+  ./services/databases/ferretdb.nix
+  ./services/databases/firebird.nix
+  ./services/databases/foundationdb.nix
+  ./services/databases/hbase-standalone.nix
+  ./services/databases/influxdb.nix
+  ./services/databases/influxdb2.nix
+  ./services/databases/lldap.nix
+  ./services/databases/memcached.nix
+  ./services/databases/monetdb.nix
+  ./services/databases/mongodb.nix
+  ./services/databases/mysql.nix
+  ./services/databases/neo4j.nix
+  ./services/databases/openldap.nix
+  ./services/databases/opentsdb.nix
+  ./services/databases/pgbouncer.nix
+  ./services/databases/pgmanage.nix
+  ./services/databases/postgresql.nix
+  ./services/databases/redis.nix
+  ./services/databases/surrealdb.nix
+  ./services/databases/victoriametrics.nix
+  ./services/desktops/accountsservice.nix
+  ./services/desktops/bamf.nix
+  ./services/desktops/blueman.nix
+  ./services/desktops/cpupower-gui.nix
+  ./services/desktops/deepin/dde-api.nix
+  ./services/desktops/deepin/app-services.nix
+  ./services/desktops/deepin/dde-daemon.nix
+  ./services/desktops/dleyna-renderer.nix
+  ./services/desktops/dleyna-server.nix
+  ./services/desktops/espanso.nix
+  ./services/desktops/flatpak.nix
+  ./services/desktops/geoclue2.nix
+  ./services/desktops/gnome/at-spi2-core.nix
+  ./services/desktops/gnome/evolution-data-server.nix
+  ./services/desktops/gnome/glib-networking.nix
+  ./services/desktops/gnome/gnome-browser-connector.nix
+  ./services/desktops/gnome/gnome-initial-setup.nix
+  ./services/desktops/gnome/gnome-keyring.nix
+  ./services/desktops/gnome/gnome-online-accounts.nix
+  ./services/desktops/gnome/gnome-online-miners.nix
+  ./services/desktops/gnome/gnome-remote-desktop.nix
+  ./services/desktops/gnome/gnome-settings-daemon.nix
+  ./services/desktops/gnome/gnome-user-share.nix
+  ./services/desktops/gnome/rygel.nix
+  ./services/desktops/gnome/sushi.nix
+  ./services/desktops/gnome/tracker-miners.nix
+  ./services/desktops/gnome/tracker.nix
+  ./services/desktops/gsignond.nix
+  ./services/desktops/gvfs.nix
+  ./services/desktops/malcontent.nix
+  ./services/desktops/neard.nix
+  ./services/desktops/pipewire/pipewire.nix
+  ./services/desktops/pipewire/wireplumber.nix
+  ./services/desktops/profile-sync-daemon.nix
+  ./services/desktops/system-config-printer.nix
+  ./services/desktops/system76-scheduler.nix
+  ./services/desktops/telepathy.nix
+  ./services/desktops/tumbler.nix
+  ./services/desktops/zeitgeist.nix
+  ./services/development/blackfire.nix
+  ./services/development/bloop.nix
+  ./services/development/distccd.nix
+  ./services/development/gemstash.nix
+  ./services/development/hoogle.nix
+  ./services/development/jupyter/default.nix
+  ./services/development/jupyterhub/default.nix
+  ./services/development/livebook.nix
+  ./services/development/lorri.nix
+  ./services/development/rstudio-server/default.nix
+  ./services/development/zammad.nix
+  ./services/display-managers/greetd.nix
+  ./services/editors/emacs.nix
+  ./services/editors/haste.nix
+  ./services/editors/infinoted.nix
+  ./services/finance/odoo.nix
+  ./services/games/asf.nix
+  ./services/games/crossfire-server.nix
+  ./services/games/deliantra-server.nix
+  ./services/games/factorio.nix
+  ./services/games/freeciv.nix
+  ./services/games/mchprs.nix
+  ./services/games/minecraft-server.nix
+  ./services/games/minetest-server.nix
+  ./services/games/openarena.nix
+  ./services/games/quake3-server.nix
+  ./services/games/teeworlds.nix
+  ./services/games/terraria.nix
+  ./services/games/xonotic.nix
+  ./services/hardware/acpid.nix
+  ./services/hardware/actkbd.nix
+  ./services/hardware/argonone.nix
+  ./services/hardware/asusd.nix
+  ./services/hardware/auto-cpufreq.nix
+  ./services/hardware/bluetooth.nix
+  ./services/hardware/bolt.nix
+  ./services/hardware/brltty.nix
+  ./services/hardware/evscript.nix
+  ./services/hardware/ddccontrol.nix
+  ./services/hardware/fancontrol.nix
+  ./services/hardware/freefall.nix
+  ./services/hardware/fwupd.nix
+  ./services/hardware/hddfancontrol.nix
+  ./services/hardware/illum.nix
+  ./services/hardware/interception-tools.nix
+  ./services/hardware/iptsd.nix
+  ./services/hardware/irqbalance.nix
+  ./services/hardware/joycond.nix
+  ./services/hardware/kanata.nix
+  ./services/hardware/lcd.nix
+  ./services/hardware/lirc.nix
+  ./services/hardware/nvidia-optimus.nix
+  ./services/hardware/openrgb.nix
+  ./services/hardware/pcscd.nix
+  ./services/hardware/pommed.nix
+  ./services/hardware/power-profiles-daemon.nix
+  ./services/hardware/rasdaemon.nix
+  ./services/hardware/ratbagd.nix
+  ./services/hardware/sane.nix
+  ./services/hardware/sane_extra_backends/brscan4.nix
+  ./services/hardware/sane_extra_backends/brscan5.nix
+  ./services/hardware/sane_extra_backends/dsseries.nix
+  ./services/hardware/spacenavd.nix
+  ./services/hardware/supergfxd.nix
+  ./services/hardware/tcsd.nix
+  ./services/hardware/thermald.nix
+  ./services/hardware/thinkfan.nix
+  ./services/hardware/throttled.nix
+  ./services/hardware/tlp.nix
+  ./services/hardware/trezord.nix
+  ./services/hardware/triggerhappy.nix
+  ./services/hardware/tuxedo-rs.nix
+  ./services/hardware/udev.nix
+  ./services/hardware/udisks2.nix
+  ./services/hardware/undervolt.nix
+  ./services/hardware/upower.nix
+  ./services/hardware/usbmuxd.nix
+  ./services/hardware/usbrelayd.nix
+  ./services/hardware/vdr.nix
+  ./services/hardware/keyd.nix
+  ./services/home-automation/ebusd.nix
+  ./services/home-automation/esphome.nix
+  ./services/home-automation/evcc.nix
+  ./services/home-automation/home-assistant.nix
+  ./services/home-automation/homeassistant-satellite.nix
+  ./services/home-automation/zigbee2mqtt.nix
+  ./services/home-automation/zwave-js.nix
+  ./services/logging/SystemdJournal2Gelf.nix
+  ./services/logging/awstats.nix
+  ./services/logging/filebeat.nix
+  ./services/logging/fluentd.nix
+  ./services/logging/graylog.nix
+  ./services/logging/heartbeat.nix
+  ./services/logging/journalbeat.nix
+  ./services/logging/journaldriver.nix
+  ./services/logging/journalwatch.nix
+  ./services/logging/klogd.nix
+  ./services/logging/logcheck.nix
+  ./services/logging/logrotate.nix
+  ./services/logging/logstash.nix
+  ./services/logging/promtail.nix
+  ./services/logging/rsyslogd.nix
+  ./services/logging/syslog-ng.nix
+  ./services/logging/syslogd.nix
+  ./services/logging/vector.nix
+  ./services/logging/ulogd.nix
+  ./services/mail/clamsmtp.nix
+  ./services/mail/davmail.nix
+  ./services/mail/dkimproxy-out.nix
+  ./services/mail/dovecot.nix
+  ./services/mail/dspam.nix
+  ./services/mail/exim.nix
+  ./services/mail/goeland.nix
+  ./services/mail/listmonk.nix
+  ./services/mail/maddy.nix
+  ./services/mail/mail.nix
+  ./services/mail/mailcatcher.nix
+  ./services/mail/mailhog.nix
+  ./services/mail/mailman.nix
+  ./services/mail/mlmmj.nix
+  ./services/mail/nullmailer.nix
+  ./services/mail/offlineimap.nix
+  ./services/mail/opendkim.nix
+  ./services/mail/opensmtpd.nix
+  ./services/mail/pfix-srsd.nix
+  ./services/mail/postfix.nix
+  ./services/mail/postfixadmin.nix
+  ./services/mail/postgrey.nix
+  ./services/mail/postsrsd.nix
+  ./services/mail/public-inbox.nix
+  ./services/mail/roundcube.nix
+  ./services/mail/rspamd.nix
+  ./services/mail/rss2email.nix
+  ./services/mail/schleuder.nix
+  ./services/mail/spamassassin.nix
+  ./services/mail/stalwart-mail.nix
+  ./services/mail/sympa.nix
+  ./services/mail/zeyple.nix
+  ./services/matrix/appservice-discord.nix
+  ./services/matrix/appservice-irc.nix
+  ./services/matrix/conduit.nix
+  ./services/matrix/dendrite.nix
+  ./services/matrix/mautrix-facebook.nix
+  ./services/matrix/mautrix-telegram.nix
+  ./services/matrix/mautrix-whatsapp.nix
+  ./services/matrix/mjolnir.nix
+  ./services/matrix/mx-puppet-discord.nix
+  ./services/matrix/pantalaimon.nix
+  ./services/matrix/matrix-sliding-sync.nix
+  ./services/matrix/synapse.nix
+  ./services/misc/airsonic.nix
+  ./services/misc/amazon-ssm-agent.nix
+  ./services/misc/ananicy.nix
+  ./services/misc/ankisyncd.nix
+  ./services/misc/apache-kafka.nix
+  ./services/misc/atuin.nix
+  ./services/misc/autofs.nix
+  ./services/misc/autorandr.nix
+  ./services/misc/autosuspend.nix
+  ./services/misc/bazarr.nix
+  ./services/misc/bcg.nix
+  ./services/misc/beanstalkd.nix
+  ./services/misc/bees.nix
+  ./services/misc/bepasty.nix
+  ./services/misc/calibre-server.nix
+  ./services/misc/canto-daemon.nix
+  ./services/misc/cfdyndns.nix
+  ./services/misc/cgminer.nix
+  ./services/misc/clipcat.nix
+  ./services/misc/clipmenu.nix
+  ./services/misc/confd.nix
+  ./services/misc/cpuminer-cryptonight.nix
+  ./services/misc/devmon.nix
+  ./services/misc/dictd.nix
+  ./services/misc/disnix.nix
+  ./services/misc/docker-registry.nix
+  ./services/misc/domoticz.nix
+  ./services/misc/duckling.nix
+  ./services/misc/dwm-status.nix
+  ./services/misc/dysnomia.nix
+  ./services/misc/errbot.nix
+  ./services/misc/etcd.nix
+  ./services/misc/etebase-server.nix
+  ./services/misc/etesync-dav.nix
+  ./services/misc/evdevremapkeys.nix
+  ./services/misc/felix.nix
+  ./services/misc/forgejo.nix
+  ./services/misc/freeswitch.nix
+  ./services/misc/fstrim.nix
+  ./services/misc/gammu-smsd.nix
+  ./services/misc/geoipupdate.nix
+  ./services/misc/gitea.nix
+  ./services/misc/gitlab.nix
+  ./services/misc/gitolite.nix
+  ./services/misc/gitweb.nix
+  ./services/misc/gogs.nix
+  ./services/misc/gollum.nix
+  ./services/misc/gpsd.nix
+  ./services/misc/greenclip.nix
+  ./services/misc/headphones.nix
+  ./services/misc/heisenbridge.nix
+  ./services/misc/homepage-dashboard.nix
+  ./services/misc/ihaskell.nix
+  ./services/misc/input-remapper.nix
+  ./services/misc/irkerd.nix
+  ./services/misc/jackett.nix
+  ./services/misc/jellyfin.nix
+  ./services/misc/jellyseerr.nix
+  ./services/misc/klipper.nix
+  ./services/misc/languagetool.nix
+  ./services/misc/leaps.nix
+  ./services/misc/libreddit.nix
+  ./services/misc/lidarr.nix
+  ./services/misc/lifecycled.nix
+  ./services/misc/logkeys.nix
+  ./services/misc/mame.nix
+  ./services/misc/mbpfan.nix
+  ./services/misc/mediatomb.nix
+  ./services/misc/metabase.nix
+  ./services/misc/moonraker.nix
+  ./services/misc/mqtt2influxdb.nix
+  ./services/misc/n8n.nix
+  ./services/misc/nitter.nix
+  ./services/misc/nix-gc.nix
+  ./services/misc/nix-optimise.nix
+  ./services/misc/nix-ssh-serve.nix
+  ./services/misc/novacomd.nix
+  ./services/misc/ntfy-sh.nix
+  ./services/misc/nzbget.nix
+  ./services/misc/nzbhydra2.nix
+  ./services/misc/octoprint.nix
+  ./services/misc/ombi.nix
+  ./services/misc/osrm.nix
+  ./services/misc/owncast.nix
+  ./services/misc/packagekit.nix
+  ./services/misc/paperless.nix
+  ./services/misc/parsoid.nix
+  ./services/misc/persistent-evdev.nix
+  ./services/misc/pinnwand.nix
+  ./services/misc/plex.nix
+  ./services/misc/plikd.nix
+  ./services/misc/podgrab.nix
+  ./services/misc/polaris.nix
+  ./services/misc/portunus.nix
+  ./services/misc/prowlarr.nix
+  ./services/misc/pufferpanel.nix
+  ./services/misc/pykms.nix
+  ./services/misc/radarr.nix
+  ./services/misc/readarr.nix
+  ./services/misc/redmine.nix
+  ./services/misc/ripple-data-api.nix
+  ./services/misc/rippled.nix
+  ./services/misc/rmfakecloud.nix
+  ./services/misc/rkvm.nix
+  ./services/misc/rshim.nix
+  ./services/misc/safeeyes.nix
+  ./services/misc/sdrplay.nix
+  ./services/misc/serviio.nix
+  ./services/misc/sickbeard.nix
+  ./services/misc/signald.nix
+  ./services/misc/siproxd.nix
+  ./services/misc/snapper.nix
+  ./services/misc/soft-serve.nix
+  ./services/misc/sonarr.nix
+  ./services/misc/sourcehut
+  ./services/misc/spice-autorandr.nix
+  ./services/misc/spice-vdagentd.nix
+  ./services/misc/spice-webdavd.nix
+  ./services/misc/sssd.nix
+  ./services/misc/subsonic.nix
+  ./services/misc/sundtek.nix
+  ./services/misc/svnserve.nix
+  ./services/misc/synergy.nix
+  ./services/misc/sysprof.nix
+  ./services/misc/tandoor-recipes.nix
+  ./services/misc/taskserver
+  ./services/misc/tautulli.nix
+  ./services/misc/tiddlywiki.nix
+  ./services/misc/tp-auto-kbbl.nix
+  ./services/misc/tzupdate.nix
+  ./services/misc/uhub.nix
+  ./services/misc/weechat.nix
+  ./services/misc/xmr-stak.nix
+  ./services/misc/xmrig.nix
+  ./services/misc/zoneminder.nix
+  ./services/misc/zookeeper.nix
+  ./services/monitoring/alerta.nix
+  ./services/monitoring/apcupsd.nix
+  ./services/monitoring/arbtt.nix
+  ./services/monitoring/below.nix
+  ./services/monitoring/bosun.nix
+  ./services/monitoring/cadvisor.nix
+  ./services/monitoring/certspotter.nix
+  ./services/monitoring/cockpit.nix
+  ./services/monitoring/collectd.nix
+  ./services/monitoring/das_watchdog.nix
+  ./services/monitoring/datadog-agent.nix
+  ./services/monitoring/do-agent.nix
+  ./services/monitoring/fusion-inventory.nix
+  ./services/monitoring/goss.nix
+  ./services/monitoring/grafana-agent.nix
+  ./services/monitoring/grafana-image-renderer.nix
+  ./services/monitoring/grafana-reporter.nix
+  ./services/monitoring/grafana.nix
+  ./services/monitoring/graphite.nix
+  ./services/monitoring/hdaps.nix
+  ./services/monitoring/heapster.nix
+  ./services/monitoring/incron.nix
+  ./services/monitoring/kapacitor.nix
+  ./services/monitoring/karma.nix
+  ./services/monitoring/kthxbye.nix
+  ./services/monitoring/librenms.nix
+  ./services/monitoring/loki.nix
+  ./services/monitoring/longview.nix
+  ./services/monitoring/mackerel-agent.nix
+  ./services/monitoring/metricbeat.nix
+  ./services/monitoring/mimir.nix
+  ./services/monitoring/monit.nix
+  ./services/monitoring/munin.nix
+  ./services/monitoring/nagios.nix
+  ./services/monitoring/netdata.nix
+  ./services/monitoring/ocsinventory-agent.nix
+  ./services/monitoring/opentelemetry-collector.nix
+  ./services/monitoring/osquery.nix
+  ./services/monitoring/parsedmarc.nix
+  ./services/monitoring/prometheus/alertmanager-irc-relay.nix
+  ./services/monitoring/prometheus/alertmanager.nix
+  ./services/monitoring/prometheus/default.nix
+  ./services/monitoring/prometheus/exporters.nix
+  ./services/monitoring/prometheus/pushgateway.nix
+  ./services/monitoring/prometheus/sachet.nix
+  ./services/monitoring/prometheus/xmpp-alerts.nix
+  ./services/monitoring/riemann-dash.nix
+  ./services/monitoring/riemann-tools.nix
+  ./services/monitoring/riemann.nix
+  ./services/monitoring/scollector.nix
+  ./services/monitoring/smartd.nix
+  ./services/monitoring/statsd.nix
+  ./services/monitoring/sysstat.nix
+  ./services/monitoring/teamviewer.nix
+  ./services/monitoring/telegraf.nix
+  ./services/monitoring/thanos.nix
+  ./services/monitoring/tremor-rs.nix
+  ./services/monitoring/tuptime.nix
+  ./services/monitoring/unpoller.nix
+  ./services/monitoring/ups.nix
+  ./services/monitoring/uptime-kuma.nix
+  ./services/monitoring/uptime.nix
+  ./services/monitoring/vmagent.nix
+  ./services/monitoring/vmalert.nix
+  ./services/monitoring/vnstat.nix
+  ./services/monitoring/zabbix-agent.nix
+  ./services/monitoring/zabbix-proxy.nix
+  ./services/monitoring/zabbix-server.nix
+  ./services/network-filesystems/cachefilesd.nix
+  ./services/network-filesystems/ceph.nix
+  ./services/network-filesystems/davfs2.nix
+  ./services/network-filesystems/diod.nix
+  ./services/network-filesystems/drbd.nix
+  ./services/network-filesystems/eris-server.nix
+  ./services/network-filesystems/glusterfs.nix
+  ./services/network-filesystems/kbfs.nix
+  ./services/network-filesystems/kubo.nix
+  ./services/network-filesystems/litestream/default.nix
+  ./services/network-filesystems/moosefs.nix
+  ./services/network-filesystems/netatalk.nix
+  ./services/network-filesystems/nfsd.nix
+  ./services/network-filesystems/openafs/client.nix
+  ./services/network-filesystems/openafs/server.nix
+  ./services/network-filesystems/orangefs/client.nix
+  ./services/network-filesystems/orangefs/server.nix
+  ./services/network-filesystems/rsyncd.nix
+  ./services/network-filesystems/samba-wsdd.nix
+  ./services/network-filesystems/samba.nix
+  ./services/network-filesystems/tahoe.nix
+  ./services/network-filesystems/u9fs.nix
+  ./services/network-filesystems/webdav-server-rs.nix
+  ./services/network-filesystems/webdav.nix
+  ./services/network-filesystems/xtreemfs.nix
+  ./services/network-filesystems/yandex-disk.nix
+  ./services/networking/3proxy.nix
+  ./services/networking/acme-dns.nix
+  ./services/networking/adguardhome.nix
+  ./services/networking/alice-lg.nix
+  ./services/networking/amuled.nix
+  ./services/networking/antennas.nix
+  ./services/networking/aria2.nix
+  ./services/networking/asterisk.nix
+  ./services/networking/atftpd.nix
+  ./services/networking/autossh.nix
+  ./services/networking/avahi-daemon.nix
+  ./services/networking/babeld.nix
+  ./services/networking/bee-clef.nix
+  ./services/networking/bee.nix
+  ./services/networking/biboumi.nix
+  ./services/networking/bind.nix
+  ./services/networking/bird-lg.nix
+  ./services/networking/bird.nix
+  ./services/networking/birdwatcher.nix
+  ./services/networking/bitcoind.nix
+  ./services/networking/bitlbee.nix
+  ./services/networking/blockbook-frontend.nix
+  ./services/networking/blocky.nix
+  ./services/networking/cgit.nix
+  ./services/networking/charybdis.nix
+  ./services/networking/chisel-server.nix
+  ./services/networking/cjdns.nix
+  ./services/networking/cloudflare-dyndns.nix
+  ./services/networking/cloudflared.nix
+  ./services/networking/cntlm.nix
+  ./services/networking/connman.nix
+  ./services/networking/consul.nix
+  ./services/networking/coredns.nix
+  ./services/networking/corerad.nix
+  ./services/networking/coturn.nix
+  ./services/networking/create_ap.nix
+  ./services/networking/croc.nix
+  ./services/networking/dae.nix
+  ./services/networking/dante.nix
+  ./services/networking/deconz.nix
+  ./services/networking/ddclient.nix
+  ./services/networking/dhcpcd.nix
+  ./services/networking/dnscache.nix
+  ./services/networking/dnscrypt-proxy2.nix
+  ./services/networking/dnscrypt-wrapper.nix
+  ./services/networking/dnsdist.nix
+  ./services/networking/dnsmasq.nix
+  ./services/networking/doh-proxy-rust.nix
+  ./services/networking/ejabberd.nix
+  ./services/networking/envoy.nix
+  ./services/networking/epmd.nix
+  ./services/networking/ergo.nix
+  ./services/networking/ergochat.nix
+  ./services/networking/eternal-terminal.nix
+  ./services/networking/expressvpn.nix
+  ./services/networking/fakeroute.nix
+  ./services/networking/fastnetmon-advanced.nix
+  ./services/networking/ferm.nix
+  ./services/networking/firefox-syncserver.nix
+  ./services/networking/fireqos.nix
+  ./services/networking/firewall.nix
+  ./services/networking/firewall-iptables.nix
+  ./services/networking/firewall-nftables.nix
+  ./services/networking/flannel.nix
+  ./services/networking/freenet.nix
+  ./services/networking/freeradius.nix
+  ./services/networking/frp.nix
+  ./services/networking/frr.nix
+  ./services/networking/gateone.nix
+  ./services/networking/gdomap.nix
+  ./services/networking/ghostunnel.nix
+  ./services/networking/git-daemon.nix
+  ./services/networking/globalprotect-vpn.nix
+  ./services/networking/gnunet.nix
+  ./services/networking/go-autoconfig.nix
+  ./services/networking/go-neb.nix
+  ./services/networking/go-shadowsocks2.nix
+  ./services/networking/gobgpd.nix
+  ./services/networking/gvpe.nix
+  ./services/networking/hans.nix
+  ./services/networking/harmonia.nix
+  ./services/networking/haproxy.nix
+  ./services/networking/headscale.nix
+  ./services/networking/hostapd.nix
+  ./services/networking/htpdate.nix
+  ./services/networking/https-dns-proxy.nix
+  ./services/networking/hylafax/default.nix
+  ./services/networking/i2p.nix
+  ./services/networking/i2pd.nix
+  ./services/networking/icecream/daemon.nix
+  ./services/networking/icecream/scheduler.nix
+  ./services/networking/imaginary.nix
+  ./services/networking/inspircd.nix
+  ./services/networking/iodine.nix
+  ./services/networking/iperf3.nix
+  ./services/networking/ircd-hybrid/default.nix
+  ./services/networking/iscsi/initiator.nix
+  ./services/networking/iscsi/root-initiator.nix
+  ./services/networking/iscsi/target.nix
+  ./services/networking/ivpn.nix
+  ./services/networking/iwd.nix
+  ./services/networking/jibri/default.nix
+  ./services/networking/jicofo.nix
+  ./services/networking/jitsi-videobridge.nix
+  ./services/networking/jool.nix
+  ./services/networking/kea.nix
+  ./services/networking/keepalived/default.nix
+  ./services/networking/keybase.nix
+  ./services/networking/knot.nix
+  ./services/networking/kresd.nix
+  ./services/networking/lambdabot.nix
+  ./services/networking/legit.nix
+  ./services/networking/libreswan.nix
+  ./services/networking/lldpd.nix
+  ./services/networking/logmein-hamachi.nix
+  ./services/networking/lokinet.nix
+  ./services/networking/lxd-image-server.nix
+  ./services/networking/magic-wormhole-mailbox-server.nix
+  ./services/networking/matterbridge.nix
+  ./services/networking/minidlna.nix
+  ./services/networking/miniupnpd.nix
+  ./services/networking/miredo.nix
+  ./services/networking/mjpg-streamer.nix
+  ./services/networking/mmsd.nix
+  ./services/networking/monero.nix
+  ./services/networking/morty.nix
+  ./services/networking/mosquitto.nix
+  ./services/networking/mozillavpn.nix
+  ./services/networking/mstpd.nix
+  ./services/networking/mtprotoproxy.nix
+  ./services/networking/mtr-exporter.nix
+  ./services/networking/mullvad-vpn.nix
+  ./services/networking/multipath.nix
+  ./services/networking/murmur.nix
+  ./services/networking/mxisd.nix
+  ./services/networking/namecoind.nix
+  ./services/networking/nar-serve.nix
+  ./services/networking/nat.nix
+  ./services/networking/nat-iptables.nix
+  ./services/networking/nat-nftables.nix
+  ./services/networking/nats.nix
+  ./services/networking/nbd.nix
+  ./services/networking/ncdns.nix
+  ./services/networking/ndppd.nix
+  ./services/networking/nebula.nix
+  ./services/networking/netbird.nix
+  ./services/networking/netclient.nix
+  ./services/networking/networkd-dispatcher.nix
+  ./services/networking/networkmanager.nix
+  ./services/networking/nextdns.nix
+  ./services/networking/nftables.nix
+  ./services/networking/nghttpx/default.nix
+  ./services/networking/ngircd.nix
+  ./services/networking/nix-serve.nix
+  ./services/networking/nix-store-gcs-proxy.nix
+  ./services/networking/nixops-dns.nix
+  ./services/networking/nncp.nix
+  ./services/networking/nntp-proxy.nix
+  ./services/networking/nomad.nix
+  ./services/networking/nsd.nix
+  ./services/networking/ntopng.nix
+  ./services/networking/ntp/chrony.nix
+  ./services/networking/ntp/ntpd.nix
+  ./services/networking/ntp/openntpd.nix
+  ./services/networking/nullidentdmod.nix
+  ./services/networking/nylon.nix
+  ./services/networking/ocserv.nix
+  ./services/networking/ofono.nix
+  ./services/networking/oidentd.nix
+  ./services/networking/onedrive.nix
+  ./services/networking/openconnect.nix
+  ./services/networking/openvpn.nix
+  ./services/networking/ostinato.nix
+  ./services/networking/owamp.nix
+  ./services/networking/pdns-recursor.nix
+  ./services/networking/pdnsd.nix
+  ./services/networking/peroxide.nix
+  ./services/networking/picosnitch.nix
+  ./services/networking/pixiecore.nix
+  ./services/networking/pleroma.nix
+  ./services/networking/polipo.nix
+  ./services/networking/powerdns.nix
+  ./services/networking/pppd.nix
+  ./services/networking/pptpd.nix
+  ./services/networking/privoxy.nix
+  ./services/networking/prosody.nix
+  ./services/networking/quassel.nix
+  ./services/networking/quicktun.nix
+  ./services/networking/quorum.nix
+  ./services/networking/r53-ddns.nix
+  ./services/networking/radicale.nix
+  ./services/networking/radvd.nix
+  ./services/networking/rdnssd.nix
+  ./services/networking/redsocks.nix
+  ./services/networking/resilio.nix
+  ./services/networking/robustirc-bridge.nix
+  ./services/networking/rosenpass.nix
+  ./services/networking/routedns.nix
+  ./services/networking/rpcbind.nix
+  ./services/networking/rxe.nix
+  ./services/networking/sabnzbd.nix
+  ./services/networking/seafile.nix
+  ./services/networking/searx.nix
+  ./services/networking/shadowsocks.nix
+  ./services/networking/shairport-sync.nix
+  ./services/networking/shellhub-agent.nix
+  ./services/networking/shorewall.nix
+  ./services/networking/shorewall6.nix
+  ./services/networking/shout.nix
+  ./services/networking/sing-box.nix
+  ./services/networking/sitespeed-io.nix
+  ./services/networking/skydns.nix
+  ./services/networking/smartdns.nix
+  ./services/networking/smokeping.nix
+  ./services/networking/sniproxy.nix
+  ./services/networking/snowflake-proxy.nix
+  ./services/networking/softether.nix
+  ./services/networking/soju.nix
+  ./services/networking/solanum.nix
+  ./services/networking/spacecookie.nix
+  ./services/networking/spiped.nix
+  ./services/networking/squid.nix
+  ./services/networking/ssh/lshd.nix
+  ./services/networking/ssh/sshd.nix
+  ./services/networking/sslh.nix
+  ./services/networking/strongswan-swanctl/module.nix
+  ./services/networking/strongswan.nix
+  ./services/networking/stubby.nix
+  ./services/networking/stunnel.nix
+  ./services/networking/supplicant.nix
+  ./services/networking/supybot.nix
+  ./services/networking/syncplay.nix
+  ./services/networking/syncthing-relay.nix
+  ./services/networking/syncthing.nix
+  ./services/networking/tailscale.nix
+  ./services/networking/tayga.nix
+  ./services/networking/tcpcrypt.nix
+  ./services/networking/teamspeak3.nix
+  ./services/networking/teleport.nix
+  ./services/networking/tetrd.nix
+  ./services/networking/tftpd.nix
+  ./services/networking/thelounge.nix
+  ./services/networking/tinc.nix
+  ./services/networking/tinydns.nix
+  ./services/networking/tinyproxy.nix
+  ./services/networking/tmate-ssh-server.nix
+  ./services/networking/tox-bootstrapd.nix
+  ./services/networking/tox-node.nix
+  ./services/networking/toxvpn.nix
+  ./services/networking/trickster.nix
+  ./services/networking/trust-dns.nix
+  ./services/networking/tvheadend.nix
+  ./services/networking/twingate.nix
+  ./services/networking/ucarp.nix
+  ./services/networking/unbound.nix
+  ./services/networking/unifi.nix
+  ./services/networking/uptermd.nix
+  ./services/networking/v2ray.nix
+  ./services/networking/v2raya.nix
+  ./services/networking/vdirsyncer.nix
+  ./services/networking/vsftpd.nix
+  ./services/networking/wasabibackend.nix
+  ./services/networking/websockify.nix
+  ./services/networking/wg-netmanager.nix
+  ./services/networking/webhook.nix
+  ./services/networking/wg-quick.nix
+  ./services/networking/wgautomesh.nix
+  ./services/networking/wireguard.nix
+  ./services/networking/wpa_supplicant.nix
+  ./services/networking/wstunnel.nix
+  ./services/networking/x2goserver.nix
+  ./services/networking/xandikos.nix
+  ./services/networking/xinetd.nix
+  ./services/networking/xl2tpd.nix
+  ./services/networking/xray.nix
+  ./services/networking/xrdp.nix
+  ./services/networking/yggdrasil.nix
+  ./services/networking/zerobin.nix
+  ./services/networking/zeronet.nix
+  ./services/networking/zerotierone.nix
+  ./services/networking/znc/default.nix
+  ./services/printing/cupsd.nix
+  ./services/printing/ipp-usb.nix
+  ./services/printing/cups-pdf.nix
+  ./services/scheduling/atd.nix
+  ./services/scheduling/cron.nix
+  ./services/scheduling/fcron.nix
+  ./services/search/elasticsearch-curator.nix
+  ./services/search/elasticsearch.nix
+  ./services/search/hound.nix
+  ./services/search/meilisearch.nix
+  ./services/search/opensearch.nix
+  ./services/search/qdrant.nix
+  ./services/search/typesense.nix
+  ./services/security/aesmd.nix
+  ./services/security/authelia.nix
+  ./services/security/certmgr.nix
+  ./services/security/cfssl.nix
+  ./services/security/clamav.nix
+  ./services/security/endlessh-go.nix
+  ./services/security/endlessh.nix
+  ./services/security/esdm.nix
+  ./services/security/fail2ban.nix
+  ./services/security/fprintd.nix
+  ./services/security/haka.nix
+  ./services/security/haveged.nix
+  ./services/security/hockeypuck.nix
+  ./services/security/hologram-agent.nix
+  ./services/security/hologram-server.nix
+  ./services/security/infnoise.nix
+  ./services/security/jitterentropy-rngd.nix
+  ./services/security/kanidm.nix
+  ./services/security/munge.nix
+  ./services/security/nginx-sso.nix
+  ./services/security/oauth2_proxy.nix
+  ./services/security/oauth2_proxy_nginx.nix
+  ./services/security/opensnitch.nix
+  ./services/security/pass-secret-service.nix
+  ./services/security/physlock.nix
+  ./services/security/shibboleth-sp.nix
+  ./services/security/sks.nix
+  ./services/security/sshguard.nix
+  ./services/security/sslmate-agent.nix
+  ./services/security/step-ca.nix
+  ./services/security/tang.nix
+  ./services/security/tor.nix
+  ./services/security/torify.nix
+  ./services/security/torsocks.nix
+  ./services/security/usbguard.nix
+  ./services/security/vault.nix
+  ./services/security/vault-agent.nix
+  ./services/security/vaultwarden/default.nix
+  ./services/security/yubikey-agent.nix
+  ./services/system/automatic-timezoned.nix
+  ./services/system/bpftune.nix
+  ./services/system/cachix-agent/default.nix
+  ./services/system/cachix-watch-store.nix
+  ./services/system/cloud-init.nix
+  ./services/system/dbus.nix
+  ./services/system/earlyoom.nix
+  ./services/system/kerberos/default.nix
+  ./services/system/localtimed.nix
+  ./services/system/nix-daemon.nix
+  ./services/system/nscd.nix
+  ./services/system/saslauthd.nix
+  ./services/system/self-deploy.nix
+  ./services/system/systembus-notify.nix
+  ./services/system/uptimed.nix
+  ./services/system/zram-generator.nix
+  ./services/torrent/deluge.nix
+  ./services/torrent/flexget.nix
+  ./services/torrent/magnetico.nix
+  ./services/torrent/opentracker.nix
+  ./services/torrent/peerflix.nix
+  ./services/torrent/rtorrent.nix
+  ./services/torrent/transmission.nix
+  ./services/tracing/tempo.nix
+  ./services/ttys/getty.nix
+  ./services/ttys/gpm.nix
+  ./services/ttys/kmscon.nix
+  ./services/video/epgstation/default.nix
+  ./services/video/go2rtc/default.nix
+  ./services/video/frigate.nix
+  ./services/video/mirakurun.nix
+  ./services/video/replay-sorcery.nix
+  ./services/video/mediamtx.nix
+  ./services/video/unifi-video.nix
+  ./services/video/v4l2-relayd.nix
+  ./services/wayland/cage.nix
+  ./services/web-apps/akkoma.nix
+  ./services/web-apps/alps.nix
+  ./services/web-apps/anuko-time-tracker.nix
+  ./services/web-apps/atlassian/confluence.nix
+  ./services/web-apps/atlassian/crowd.nix
+  ./services/web-apps/atlassian/jira.nix
+  ./services/web-apps/audiobookshelf.nix
+  ./services/web-apps/bookstack.nix
+  ./services/web-apps/c2fmzq-server.nix
+  ./services/web-apps/calibre-web.nix
+  ./services/web-apps/coder.nix
+  ./services/web-apps/changedetection-io.nix
+  ./services/web-apps/chatgpt-retrieval-plugin.nix
+  ./services/web-apps/cloudlog.nix
+  ./services/web-apps/convos.nix
+  ./services/web-apps/dex.nix
+  ./services/web-apps/discourse.nix
+  ./services/web-apps/documize.nix
+  ./services/web-apps/dokuwiki.nix
+  ./services/web-apps/dolibarr.nix
+  ./services/web-apps/engelsystem.nix
+  ./services/web-apps/ethercalc.nix
+  ./services/web-apps/fluidd.nix
+  ./services/web-apps/freshrss.nix
+  ./services/web-apps/galene.nix
+  ./services/web-apps/gerrit.nix
+  ./services/web-apps/gotify-server.nix
+  ./services/web-apps/gotosocial.nix
+  ./services/web-apps/grocy.nix
+  ./services/web-apps/pixelfed.nix
+  ./services/web-apps/guacamole-client.nix
+  ./services/web-apps/guacamole-server.nix
+  ./services/web-apps/healthchecks.nix
+  ./services/web-apps/hedgedoc.nix
+  ./services/web-apps/hledger-web.nix
+  ./services/web-apps/honk.nix
+  ./services/web-apps/icingaweb2/icingaweb2.nix
+  ./services/web-apps/icingaweb2/module-monitoring.nix
+  ./services/web-apps/invidious.nix
+  ./services/web-apps/invoiceplane.nix
+  ./services/web-apps/isso.nix
+  ./services/web-apps/jirafeau.nix
+  ./services/web-apps/jitsi-meet.nix
+  ./services/web-apps/kasmweb/default.nix
+  ./services/web-apps/kavita.nix
+  ./services/web-apps/keycloak.nix
+  ./services/web-apps/komga.nix
+  ./services/web-apps/lanraragi.nix
+  ./services/web-apps/lemmy.nix
+  ./services/web-apps/limesurvey.nix
+  ./services/web-apps/mainsail.nix
+  ./services/web-apps/mastodon.nix
+  ./services/web-apps/matomo.nix
+  ./services/web-apps/mattermost.nix
+  ./services/web-apps/mediawiki.nix
+  ./services/web-apps/meme-bingo-web.nix
+  ./services/web-apps/microbin.nix
+  ./services/web-apps/miniflux.nix
+  ./services/web-apps/monica.nix
+  ./services/web-apps/moodle.nix
+  ./services/web-apps/netbox.nix
+  ./services/web-apps/nextcloud.nix
+  ./services/web-apps/nextcloud-notify_push.nix
+  ./services/web-apps/nexus.nix
+  ./services/web-apps/nifi.nix
+  ./services/web-apps/node-red.nix
+  ./services/web-apps/onlyoffice.nix
+  ./services/web-apps/openvscode-server.nix
+  ./services/web-apps/mobilizon.nix
+  ./services/web-apps/openwebrx.nix
+  ./services/web-apps/outline.nix
+  ./services/web-apps/peering-manager.nix
+  ./services/web-apps/peertube.nix
+  ./services/web-apps/pgpkeyserver-lite.nix
+  ./services/web-apps/phylactery.nix
+  ./services/web-apps/photoprism.nix
+  ./services/web-apps/pict-rs.nix
+  ./services/web-apps/plantuml-server.nix
+  ./services/web-apps/plausible.nix
+  ./services/web-apps/powerdns-admin.nix
+  ./services/web-apps/prosody-filer.nix
+  ./services/web-apps/restya-board.nix
+  ./services/web-apps/rimgo.nix
+  ./services/web-apps/sftpgo.nix
+  ./services/web-apps/rss-bridge.nix
+  ./services/web-apps/selfoss.nix
+  ./services/web-apps/shiori.nix
+  ./services/web-apps/slskd.nix
+  ./services/web-apps/snipe-it.nix
+  ./services/web-apps/sogo.nix
+  ./services/web-apps/trilium.nix
+  ./services/web-apps/tt-rss.nix
+  ./services/web-apps/vikunja.nix
+  ./services/web-apps/whitebophir.nix
+  ./services/web-apps/wiki-js.nix
+  ./services/web-apps/wordpress.nix
+  ./services/web-apps/writefreely.nix
+  ./services/web-apps/youtrack.nix
+  ./services/web-apps/zabbix.nix
+  ./services/web-apps/zitadel.nix
+  ./services/web-servers/agate.nix
+  ./services/web-servers/apache-httpd/default.nix
+  ./services/web-servers/caddy/default.nix
+  ./services/web-servers/darkhttpd.nix
+  ./services/web-servers/fcgiwrap.nix
+  ./services/web-servers/garage.nix
+  ./services/web-servers/hitch/default.nix
+  ./services/web-servers/hydron.nix
+  ./services/web-servers/jboss/default.nix
+  ./services/web-servers/keter
+  ./services/web-servers/lighttpd/cgit.nix
+  ./services/web-servers/lighttpd/collectd.nix
+  ./services/web-servers/lighttpd/default.nix
+  ./services/web-servers/lighttpd/gitweb.nix
+  ./services/web-servers/merecat.nix
+  ./services/web-servers/mighttpd2.nix
+  ./services/web-servers/minio.nix
+  ./services/web-servers/molly-brown.nix
+  ./services/web-servers/nginx/default.nix
+  ./services/web-servers/nginx/gitweb.nix
+  ./services/web-servers/phpfpm/default.nix
+  ./services/web-servers/pomerium.nix
+  ./services/web-servers/rustus.nix
+  ./services/web-servers/stargazer.nix
+  ./services/web-servers/static-web-server.nix
+  ./services/web-servers/tomcat.nix
+  ./services/web-servers/traefik.nix
+  ./services/web-servers/trafficserver/default.nix
+  ./services/web-servers/ttyd.nix
+  ./services/web-servers/unit/default.nix
+  ./services/web-servers/uwsgi.nix
+  ./services/web-servers/varnish/default.nix
+  ./services/web-servers/zope2.nix
+  ./services/x11/clight.nix
+  ./services/x11/colord.nix
+  ./services/x11/desktop-managers/default.nix
+  ./services/x11/display-managers/default.nix
+  ./services/x11/display-managers/gdm.nix
+  ./services/x11/display-managers/lightdm.nix
+  ./services/x11/display-managers/sddm.nix
+  ./services/x11/display-managers/slim.nix
+  ./services/x11/display-managers/startx.nix
+  ./services/x11/display-managers/sx.nix
+  ./services/x11/display-managers/xpra.nix
+  ./services/x11/extra-layouts.nix
+  ./services/x11/fractalart.nix
+  ./services/x11/gdk-pixbuf.nix
+  ./services/x11/hardware/cmt.nix
+  ./services/x11/hardware/digimend.nix
+  ./services/x11/hardware/libinput.nix
+  ./services/x11/hardware/synaptics.nix
+  ./services/x11/hardware/wacom.nix
+  ./services/x11/imwheel.nix
+  ./services/x11/picom.nix
+  ./services/x11/redshift.nix
+  ./services/x11/touchegg.nix
+  ./services/x11/unclutter-xfixes.nix
+  ./services/x11/unclutter.nix
+  ./services/x11/urserver.nix
+  ./services/x11/urxvtd.nix
+  ./services/x11/window-managers/awesome.nix
+  ./services/x11/window-managers/bspwm.nix
+  ./services/x11/window-managers/clfswm.nix
+  ./services/x11/window-managers/default.nix
+  ./services/x11/window-managers/fluxbox.nix
+  ./services/x11/window-managers/icewm.nix
+  ./services/x11/window-managers/katriawm.nix
+  ./services/x11/window-managers/metacity.nix
+  ./services/x11/window-managers/nimdow.nix
+  ./services/x11/window-managers/none.nix
+  ./services/x11/window-managers/twm.nix
+  ./services/x11/window-managers/windowlab.nix
+  ./services/x11/window-managers/wmii.nix
+  ./services/x11/window-managers/xmonad.nix
+  ./services/x11/xautolock.nix
+  ./services/x11/xbanish.nix
+  ./services/x11/xfs.nix
+  ./services/x11/xserver.nix
+  ./system/activation/activatable-system.nix
+  ./system/activation/activation-script.nix
+  ./system/activation/specialisation.nix
+  ./system/activation/switchable-system.nix
+  ./system/activation/bootspec.nix
+  ./system/activation/top-level.nix
+  ./system/boot/binfmt.nix
+  ./system/boot/emergency-mode.nix
+  ./system/boot/grow-partition.nix
+  ./system/boot/initrd-network.nix
+  ./system/boot/initrd-openvpn.nix
+  ./system/boot/initrd-ssh.nix
+  ./system/boot/kernel.nix
+  ./system/boot/kexec.nix
+  ./system/boot/loader/efi.nix
+  ./system/boot/loader/generations-dir/generations-dir.nix
+  ./system/boot/loader/generic-extlinux-compatible
+  ./system/boot/loader/grub/grub.nix
+  ./system/boot/loader/grub/ipxe.nix
+  ./system/boot/loader/grub/memtest.nix
+  ./system/boot/loader/external/external.nix
+  ./system/boot/loader/init-script/init-script.nix
+  ./system/boot/loader/loader.nix
+  ./system/boot/loader/raspberrypi/raspberrypi.nix
+  ./system/boot/loader/systemd-boot/systemd-boot.nix
+  ./system/boot/luksroot.nix
+  ./system/boot/stratisroot.nix
+  ./system/boot/modprobe.nix
+  ./system/boot/networkd.nix
+  ./system/boot/plymouth.nix
+  ./system/boot/resolved.nix
+  ./system/boot/shutdown.nix
+  ./system/boot/stage-1.nix
+  ./system/boot/stage-2.nix
+  ./system/boot/systemd.nix
+  ./system/boot/systemd/coredump.nix
+  ./system/boot/systemd/initrd-secrets.nix
+  ./system/boot/systemd/initrd.nix
+  ./system/boot/systemd/journald.nix
+  ./system/boot/systemd/logind.nix
+  ./system/boot/systemd/nspawn.nix
+  ./system/boot/systemd/oomd.nix
+  ./system/boot/systemd/repart.nix
+  ./system/boot/systemd/shutdown.nix
+  ./system/boot/systemd/sysupdate.nix
+  ./system/boot/systemd/tmpfiles.nix
+  ./system/boot/systemd/user.nix
+  ./system/boot/systemd/userdbd.nix
+  ./system/boot/systemd/homed.nix
+  ./system/boot/timesyncd.nix
+  ./system/boot/tmp.nix
+  ./system/boot/uvesafb.nix
+  ./system/etc/etc-activation.nix
+  ./tasks/auto-upgrade.nix
+  ./tasks/bcache.nix
+  ./tasks/cpu-freq.nix
+  ./tasks/encrypted-devices.nix
+  ./tasks/filesystems.nix
+  ./tasks/filesystems/apfs.nix
+  ./tasks/filesystems/bcachefs.nix
+  ./tasks/filesystems/btrfs.nix
+  ./tasks/filesystems/cifs.nix
+  ./tasks/filesystems/ecryptfs.nix
+  ./tasks/filesystems/envfs.nix
+  ./tasks/filesystems/erofs.nix
+  ./tasks/filesystems/exfat.nix
+  ./tasks/filesystems/ext.nix
+  ./tasks/filesystems/f2fs.nix
+  ./tasks/filesystems/jfs.nix
+  ./tasks/filesystems/nfs.nix
+  ./tasks/filesystems/ntfs.nix
+  ./tasks/filesystems/reiserfs.nix
+  ./tasks/filesystems/squashfs.nix
+  ./tasks/filesystems/unionfs-fuse.nix
+  ./tasks/filesystems/vboxsf.nix
+  ./tasks/filesystems/vfat.nix
+  ./tasks/filesystems/xfs.nix
+  ./tasks/filesystems/zfs.nix
+  ./tasks/lvm.nix
+  ./tasks/network-interfaces-scripted.nix
+  ./tasks/network-interfaces-systemd.nix
+  ./tasks/network-interfaces.nix
+  ./tasks/powertop.nix
+  ./tasks/scsi-link-power-management.nix
+  ./tasks/snapraid.nix
+  ./tasks/stratis.nix
+  ./tasks/swraid.nix
+  ./tasks/trackpoint.nix
+  ./testing/service-runner.nix
+  ./virtualisation/amazon-options.nix
+  ./virtualisation/anbox.nix
+  ./virtualisation/appvm.nix
+  ./virtualisation/build-vm.nix
+  ./virtualisation/container-config.nix
+  ./virtualisation/containerd.nix
+  ./virtualisation/containers.nix
+  ./virtualisation/cri-o.nix
+  ./virtualisation/docker-rootless.nix
+  ./virtualisation/docker.nix
+  ./virtualisation/ecs-agent.nix
+  ./virtualisation/hyperv-guest.nix
+  ./virtualisation/incus.nix
+  ./virtualisation/kvmgt.nix
+  ./virtualisation/libvirtd.nix
+  ./virtualisation/lxc.nix
+  ./virtualisation/lxcfs.nix
+  ./virtualisation/lxd.nix
+  ./virtualisation/lxd-agent.nix
+  ./virtualisation/multipass.nix
+  ./virtualisation/nixos-containers.nix
+  ./virtualisation/oci-containers.nix
+  ./virtualisation/openstack-options.nix
+  ./virtualisation/oci-options.nix
+  ./virtualisation/openvswitch.nix
+  ./virtualisation/parallels-guest.nix
+  ./virtualisation/podman/default.nix
+  ./virtualisation/qemu-guest-agent.nix
+  ./virtualisation/rosetta.nix
+  ./virtualisation/spice-usb-redirection.nix
+  ./virtualisation/virtualbox-guest.nix
+  ./virtualisation/virtualbox-host.nix
+  ./virtualisation/vmware-guest.nix
+  ./virtualisation/vmware-host.nix
+  ./virtualisation/waydroid.nix
+  ./virtualisation/xe-guest-utilities.nix
+  ./virtualisation/xen-dom0.nix
+  { documentation.nixos.extraModules = [
+    ./virtualisation/qemu-vm.nix
+    ./image/repart.nix
+    ];
+  }
+]
diff --git a/nixpkgs/nixos/modules/profiles/all-hardware.nix b/nixpkgs/nixos/modules/profiles/all-hardware.nix
new file mode 100644
index 000000000000..4857ea4dbeae
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/all-hardware.nix
@@ -0,0 +1,126 @@
+# This module enables all hardware supported by NixOS: i.e., all
+# firmware is included, and all devices from which one may boot are
+# enabled in the initrd.  Its primary use is in the NixOS installation
+# CDs.
+
+{ pkgs, lib,... }:
+let
+  platform = pkgs.stdenv.hostPlatform;
+in
+{
+
+  # The initrd has to contain any module that might be necessary for
+  # supporting the most important parts of HW like drives.
+  boot.initrd.availableKernelModules =
+    [ # SATA/PATA support.
+      "ahci"
+
+      "ata_piix"
+
+      "sata_inic162x" "sata_nv" "sata_promise" "sata_qstor"
+      "sata_sil" "sata_sil24" "sata_sis" "sata_svw" "sata_sx4"
+      "sata_uli" "sata_via" "sata_vsc"
+
+      "pata_ali" "pata_amd" "pata_artop" "pata_atiixp" "pata_efar"
+      "pata_hpt366" "pata_hpt37x" "pata_hpt3x2n" "pata_hpt3x3"
+      "pata_it8213" "pata_it821x" "pata_jmicron" "pata_marvell"
+      "pata_mpiix" "pata_netcell" "pata_ns87410" "pata_oldpiix"
+      "pata_pcmcia" "pata_pdc2027x" "pata_qdi" "pata_rz1000"
+      "pata_serverworks" "pata_sil680" "pata_sis"
+      "pata_sl82c105" "pata_triflex" "pata_via"
+      "pata_winbond"
+
+      # SCSI support (incomplete).
+      "3w-9xxx" "3w-xxxx" "aic79xx" "aic7xxx" "arcmsr" "hpsa"
+
+      # USB support, especially for booting from USB CD-ROM
+      # drives.
+      "uas"
+
+      # SD cards.
+      "sdhci_pci"
+
+      # NVMe drives
+      "nvme"
+
+      # Firewire support.  Not tested.
+      "ohci1394" "sbp2"
+
+      # Virtio (QEMU, KVM etc.) support.
+      "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "virtio_balloon" "virtio_console"
+
+      # VMware support.
+      "mptspi" "vmxnet3" "vsock"
+    ] ++ lib.optional platform.isx86 "vmw_balloon"
+    ++ lib.optionals (pkgs.stdenv.isi686 || pkgs.stdenv.isx86_64) [
+      "vmw_vmci" "vmwgfx" "vmw_vsock_vmci_transport"
+
+      # Hyper-V support.
+      "hv_storvsc"
+    ] ++ lib.optionals pkgs.stdenv.hostPlatform.isAarch [
+      # Most of the following falls into two categories:
+      #  - early KMS / early display
+      #  - early storage (e.g. USB) support
+
+      # Allows using framebuffer configured by the initial boot firmware
+      "simplefb"
+
+      # Allwinner support
+
+      # Required for early KMS
+      "sun4i-drm"
+      "sun8i-mixer" # Audio, but required for kms
+
+      # PWM for the backlight
+      "pwm-sun4i"
+
+      # Broadcom
+
+      "vc4"
+    ] ++ lib.optionals pkgs.stdenv.isAarch64 [
+      # Most of the following falls into two categories:
+      #  - early KMS / early display
+      #  - early storage (e.g. USB) support
+
+      # Broadcom
+
+      "pcie-brcmstb"
+
+      # Rockchip
+      "dw-hdmi"
+      "dw-mipi-dsi"
+      "rockchipdrm"
+      "rockchip-rga"
+      "phy-rockchip-pcie"
+      "pcie-rockchip-host"
+
+      # Misc. uncategorized hardware
+
+      # Used for some platform's integrated displays
+      "panel-simple"
+      "pwm-bl"
+
+      # Power supply drivers, some platforms need them for USB
+      "axp20x-ac-power"
+      "axp20x-battery"
+      "pinctrl-axp209"
+      "mp8859"
+
+      # USB drivers
+      "xhci-pci-renesas"
+
+      # Reset controllers
+      "reset-raspberrypi" # Triggers USB chip firmware load.
+
+      # Misc "weak" dependencies
+      "analogix-dp"
+      "analogix-anx6345" # For DP or eDP (e.g. integrated display)
+    ];
+
+  # Include lots of firmware.
+  hardware.enableRedistributableFirmware = true;
+
+  imports =
+    [ ../hardware/network/zydas-zd1211.nix ];
+
+}
diff --git a/nixpkgs/nixos/modules/profiles/base.nix b/nixpkgs/nixos/modules/profiles/base.nix
new file mode 100644
index 000000000000..9f32f85a61ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/base.nix
@@ -0,0 +1,58 @@
+# This module defines the software packages included in the "minimal"
+# installation CD. It might be useful elsewhere.
+
+{ config, lib, pkgs, ... }:
+
+{
+  # Include some utilities that are useful for installing or repairing
+  # the system.
+  environment.systemPackages = [
+    pkgs.w3m-nographics # needed for the manual anyway
+    pkgs.testdisk # useful for repairing boot problems
+    pkgs.ms-sys # for writing Microsoft boot sectors / MBRs
+    pkgs.efibootmgr
+    pkgs.efivar
+    pkgs.parted
+    pkgs.gptfdisk
+    pkgs.ddrescue
+    pkgs.ccrypt
+    pkgs.cryptsetup # needed for dm-crypt volumes
+
+    # Some text editors.
+    (pkgs.vim.customize {
+      name = "vim";
+      vimrcConfig.packages.default = {
+        start = [ pkgs.vimPlugins.vim-nix ];
+      };
+      vimrcConfig.customRC = "syntax on";
+    })
+
+    # Some networking tools.
+    pkgs.fuse
+    pkgs.fuse3
+    pkgs.sshfs-fuse
+    pkgs.socat
+    pkgs.screen
+    pkgs.tcpdump
+
+    # Hardware-related tools.
+    pkgs.sdparm
+    pkgs.hdparm
+    pkgs.smartmontools # for diagnosing hard disks
+    pkgs.pciutils
+    pkgs.usbutils
+    pkgs.nvme-cli
+
+    # Some compression/archiver tools.
+    pkgs.unzip
+    pkgs.zip
+  ];
+
+  # Include support for various filesystems and tools to create / manipulate them.
+  boot.supportedFilesystems =
+    [ "btrfs" "cifs" "f2fs" "jfs" "ntfs" "reiserfs" "vfat" "xfs" ] ++
+    lib.optional (lib.meta.availableOn pkgs.stdenv.hostPlatform config.boot.zfs.package) "zfs";
+
+  # Configure host id for ZFS to work
+  networking.hostId = lib.mkDefault "8425e349";
+}
diff --git a/nixpkgs/nixos/modules/profiles/clone-config.nix b/nixpkgs/nixos/modules/profiles/clone-config.nix
new file mode 100644
index 000000000000..ba65a250d25a
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/clone-config.nix
@@ -0,0 +1,109 @@
+{ config, lib, pkgs, modules, ... }:
+
+with lib;
+
+let
+
+  # Location of the repository on the harddrive
+  nixosPath = toString ../..;
+
+  # Check if the path is from the NixOS repository
+  isNixOSFile = path:
+    let s = toString path; in
+      removePrefix nixosPath s != s;
+
+  # Copy modules given as extra configuration files.  Unfortunately, we
+  # cannot serialized attribute set given in the list of modules (that's why
+  # you should use files).
+  moduleFiles =
+    # FIXME: use typeOf (Nix 1.6.1).
+    filter (x: !isAttrs x && !lib.isFunction x) modules;
+
+  # Partition module files because between NixOS and non-NixOS files.  NixOS
+  # files may change if the repository is updated.
+  partitionedModuleFiles =
+    let p = partition isNixOSFile moduleFiles; in
+    { nixos = p.right; others = p.wrong; };
+
+  # Path transformed to be valid on the installation device.  Thus the
+  # device configuration could be rebuild.
+  relocatedModuleFiles =
+    let
+      relocateNixOS = path:
+        "<nixpkgs/nixos" + removePrefix nixosPath (toString path) + ">";
+    in
+      { nixos = map relocateNixOS partitionedModuleFiles.nixos;
+        others = []; # TODO: copy the modules to the install-device repository.
+      };
+
+  # A dummy /etc/nixos/configuration.nix in the booted CD that
+  # rebuilds the CD's configuration (and allows the configuration to
+  # be modified, of course, providing a true live CD).  Problem is
+  # that we don't really know how the CD was built - the Nix
+  # expression language doesn't allow us to query the expression being
+  # evaluated.  So we'll just hope for the best.
+  configClone = pkgs.writeText "configuration.nix"
+    ''
+      { config, pkgs, ... }:
+
+      {
+        imports = [ ${toString config.installer.cloneConfigIncludes} ];
+
+        ${config.installer.cloneConfigExtra}
+      }
+    '';
+
+in
+
+{
+
+  options = {
+
+    installer.cloneConfig = mkOption {
+      default = true;
+      description = lib.mdDoc ''
+        Try to clone the installation-device configuration by re-using it's
+        profile from the list of imported modules.
+      '';
+    };
+
+    installer.cloneConfigIncludes = mkOption {
+      default = [];
+      example = [ "./nixos/modules/hardware/network/rt73.nix" ];
+      description = lib.mdDoc ''
+        List of modules used to re-build this installation device profile.
+      '';
+    };
+
+    installer.cloneConfigExtra = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Extra text to include in the cloned configuration.nix included in this
+        installer.
+      '';
+    };
+  };
+
+  config = {
+
+    installer.cloneConfigIncludes =
+      relocatedModuleFiles.nixos ++ relocatedModuleFiles.others;
+
+    boot.postBootCommands =
+      ''
+        # Provide a mount point for nixos-install.
+        mkdir -p /mnt
+
+        ${optionalString config.installer.cloneConfig ''
+          # Provide a configuration for the CD/DVD itself, to allow users
+          # to run nixos-rebuild to change the configuration of the
+          # running system on the CD/DVD.
+          if ! [ -e /etc/nixos/configuration.nix ]; then
+            cp ${configClone} /etc/nixos/configuration.nix
+          fi
+       ''}
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/profiles/demo.nix b/nixpkgs/nixos/modules/profiles/demo.nix
new file mode 100644
index 000000000000..4e8c74deedba
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/demo.nix
@@ -0,0 +1,21 @@
+{ ... }:
+
+{
+  imports = [ ./graphical.nix ];
+
+  users.users.demo =
+    { isNormalUser = true;
+      description = "Demo user account";
+      extraGroups = [ "wheel" ];
+      password = "demo";
+      uid = 1000;
+    };
+
+  services.xserver.displayManager = {
+    autoLogin = {
+      enable = true;
+      user = "demo";
+    };
+    sddm.autoLogin.relogin = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/profiles/docker-container.nix b/nixpkgs/nixos/modules/profiles/docker-container.nix
new file mode 100644
index 000000000000..5365e49711dc
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/docker-container.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (pkgs) writeScript;
+
+  pkgs2storeContents = map (x: { object = x; symlink = "none"; });
+in
+
+{
+  # Docker image config.
+  imports = [
+    ../installer/cd-dvd/channel.nix
+    ./minimal.nix
+    ./clone-config.nix
+  ];
+
+  # Create the tarball
+  system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
+    contents = [
+      {
+        source = "${config.system.build.toplevel}/.";
+        target = "./";
+      }
+    ];
+    extraArgs = "--owner=0";
+
+    # Add init script to image
+    storeContents = pkgs2storeContents [
+      config.system.build.toplevel
+      pkgs.stdenv
+    ];
+
+    # Some container managers like lxc need these
+    extraCommands =
+      let script = writeScript "extra-commands.sh" ''
+            rm etc
+            mkdir -p proc sys dev etc
+          '';
+      in script;
+  };
+
+  boot.isContainer = true;
+  boot.postBootCommands =
+    ''
+      # After booting, register the contents of the Nix store in the Nix
+      # database.
+      if [ -f /nix-path-registration ]; then
+        ${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration &&
+        rm /nix-path-registration
+      fi
+
+      # nixos-rebuild also requires a "system" profile
+      ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+    '';
+
+  # Install new init script
+  system.activationScripts.installInitScript = ''
+    ln -fs $systemConfig/init /init
+  '';
+}
diff --git a/nixpkgs/nixos/modules/profiles/graphical.nix b/nixpkgs/nixos/modules/profiles/graphical.nix
new file mode 100644
index 000000000000..d80456cede56
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/graphical.nix
@@ -0,0 +1,20 @@
+# This module defines a NixOS configuration with the Plasma 5 desktop.
+# It's used by the graphical installation CD.
+
+{ pkgs, ... }:
+
+{
+  services.xserver = {
+    enable = true;
+    displayManager.sddm.enable = true;
+    desktopManager.plasma5 = {
+      enable = true;
+    };
+    libinput.enable = true; # for touchpad support on many laptops
+  };
+
+  # Enable sound in virtualbox appliances.
+  hardware.pulseaudio.enable = true;
+
+  environment.systemPackages = [ pkgs.glxinfo pkgs.firefox ];
+}
diff --git a/nixpkgs/nixos/modules/profiles/hardened.nix b/nixpkgs/nixos/modules/profiles/hardened.nix
new file mode 100644
index 000000000000..856ee480fc0b
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/hardened.nix
@@ -0,0 +1,118 @@
+# A profile with most (vanilla) hardening options enabled by default,
+# potentially at the cost of stability, features and performance.
+#
+# This profile enables options that are known to affect system
+# stability. If you experience any stability issues when using the
+# profile, try disabling it. If you report an issue and use this
+# profile, always mention that you do.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = [ maintainers.joachifm maintainers.emily ];
+  };
+
+  boot.kernelPackages = mkDefault pkgs.linuxPackages_hardened;
+
+  nix.settings.allowed-users = mkDefault [ "@users" ];
+
+  environment.memoryAllocator.provider = mkDefault "scudo";
+  environment.variables.SCUDO_OPTIONS = mkDefault "ZeroContents=1";
+
+  security.lockKernelModules = mkDefault true;
+
+  security.protectKernelImage = mkDefault true;
+
+  security.allowSimultaneousMultithreading = mkDefault false;
+
+  security.forcePageTableIsolation = mkDefault true;
+
+  # This is required by podman to run containers in rootless mode.
+  security.unprivilegedUsernsClone = mkDefault config.virtualisation.containers.enable;
+
+  security.virtualisation.flushL1DataCache = mkDefault "always";
+
+  security.apparmor.enable = mkDefault true;
+  security.apparmor.killUnconfinedConfinables = mkDefault true;
+
+  boot.kernelParams = [
+    # Slab/slub sanity checks, redzoning, and poisoning
+    "slub_debug=FZP"
+
+    # Overwrite free'd memory
+    "page_poison=1"
+
+    # Enable page allocator randomization
+    "page_alloc.shuffle=1"
+  ];
+
+  boot.blacklistedKernelModules = [
+    # Obscure network protocols
+    "ax25"
+    "netrom"
+    "rose"
+
+    # Old or rare or insufficiently audited filesystems
+    "adfs"
+    "affs"
+    "bfs"
+    "befs"
+    "cramfs"
+    "efs"
+    "erofs"
+    "exofs"
+    "freevxfs"
+    "f2fs"
+    "hfs"
+    "hpfs"
+    "jfs"
+    "minix"
+    "nilfs2"
+    "ntfs"
+    "omfs"
+    "qnx4"
+    "qnx6"
+    "sysv"
+    "ufs"
+  ];
+
+  # Restrict ptrace() usage to processes with a pre-defined relationship
+  # (e.g., parent/child)
+  boot.kernel.sysctl."kernel.yama.ptrace_scope" = mkOverride 500 1;
+
+  # Hide kptrs even for processes with CAP_SYSLOG
+  boot.kernel.sysctl."kernel.kptr_restrict" = mkOverride 500 2;
+
+  # Disable bpf() JIT (to eliminate spray attacks)
+  boot.kernel.sysctl."net.core.bpf_jit_enable" = mkDefault false;
+
+  # Disable ftrace debugging
+  boot.kernel.sysctl."kernel.ftrace_enabled" = mkDefault false;
+
+  # Enable strict reverse path filtering (that is, do not attempt to route
+  # packets that "obviously" do not belong to the iface's network; dropped
+  # packets are logged as martians).
+  boot.kernel.sysctl."net.ipv4.conf.all.log_martians" = mkDefault true;
+  boot.kernel.sysctl."net.ipv4.conf.all.rp_filter" = mkDefault "1";
+  boot.kernel.sysctl."net.ipv4.conf.default.log_martians" = mkDefault true;
+  boot.kernel.sysctl."net.ipv4.conf.default.rp_filter" = mkDefault "1";
+
+  # Ignore broadcast ICMP (mitigate SMURF)
+  boot.kernel.sysctl."net.ipv4.icmp_echo_ignore_broadcasts" = mkDefault true;
+
+  # Ignore incoming ICMP redirects (note: default is needed to ensure that the
+  # setting is applied to interfaces added after the sysctls are set)
+  boot.kernel.sysctl."net.ipv4.conf.all.accept_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv4.conf.all.secure_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv4.conf.default.accept_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv4.conf.default.secure_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv6.conf.all.accept_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv6.conf.default.accept_redirects" = mkDefault false;
+
+  # Ignore outgoing ICMP redirects (this is ipv4 only)
+  boot.kernel.sysctl."net.ipv4.conf.all.send_redirects" = mkDefault false;
+  boot.kernel.sysctl."net.ipv4.conf.default.send_redirects" = mkDefault false;
+}
diff --git a/nixpkgs/nixos/modules/profiles/headless.nix b/nixpkgs/nixos/modules/profiles/headless.nix
new file mode 100644
index 000000000000..eb29f3d65106
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/headless.nix
@@ -0,0 +1,23 @@
+# Common configuration for headless machines (e.g., Amazon EC2
+# instances).
+
+{ lib, ... }:
+
+with lib;
+
+{
+  # Don't start a tty on the serial consoles.
+  systemd.services."serial-getty@ttyS0".enable = lib.mkDefault false;
+  systemd.services."serial-getty@hvc0".enable = false;
+  systemd.services."getty@tty1".enable = false;
+  systemd.services."autovt@".enable = false;
+
+  # Since we can't manually respond to a panic, just reboot.
+  boot.kernelParams = [ "panic=1" "boot.panic_on_fail" "vga=0x317" "nomodeset" ];
+
+  # Don't allow emergency mode, because we don't have a console.
+  systemd.enableEmergencyMode = false;
+
+  # Being headless, we don't need a GRUB splash image.
+  boot.loader.grub.splashImage = null;
+}
diff --git a/nixpkgs/nixos/modules/profiles/image-based-appliance.nix b/nixpkgs/nixos/modules/profiles/image-based-appliance.nix
new file mode 100644
index 000000000000..7e8b6f696d54
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/image-based-appliance.nix
@@ -0,0 +1,26 @@
+# This profile sets up a sytem for image based appliance usage. An appliance is
+# installed as an image, cannot be re-built, has no Nix available, and is
+# generally not meant for interactive use. Updates to such an appliance are
+# handled by updating whole partition images via a tool like systemd-sysupdate.
+
+{ lib, modulesPath, ... }:
+
+{
+
+  # Appliances are always "minimal".
+  imports = [
+    "${modulesPath}/profiles/minimal.nix"
+  ];
+
+  # The system cannot be rebuilt.
+  nix.enable = false;
+  system.switch.enable = false;
+
+  # The system is static.
+  users.mutableUsers = false;
+
+  # The system avoids interpreters as much as possible to reduce its attack
+  # surface.
+  boot.initrd.systemd.enable = lib.mkDefault true;
+  networking.useNetworkd = lib.mkDefault true;
+}
diff --git a/nixpkgs/nixos/modules/profiles/installation-device.nix b/nixpkgs/nixos/modules/profiles/installation-device.nix
new file mode 100644
index 000000000000..52750cd472da
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/installation-device.nix
@@ -0,0 +1,125 @@
+# Provide a basic configuration for installation devices like CDs.
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  imports =
+    [ # Enable devices which are usually scanned, because we don't know the
+      # target system.
+      ../installer/scan/detected.nix
+      ../installer/scan/not-detected.nix
+
+      # Allow "nixos-rebuild" to work properly by providing
+      # /etc/nixos/configuration.nix.
+      ./clone-config.nix
+
+      # Include a copy of Nixpkgs so that nixos-install works out of
+      # the box.
+      ../installer/cd-dvd/channel.nix
+    ];
+
+  config = {
+    system.nixos.variant_id = lib.mkDefault "installer";
+
+    # Enable in installer, even if the minimal profile disables it.
+    documentation.enable = mkImageMediaOverride true;
+
+    # Show the manual.
+    documentation.nixos.enable = mkImageMediaOverride true;
+
+    # Use less privileged nixos user
+    users.users.nixos = {
+      isNormalUser = true;
+      extraGroups = [ "wheel" "networkmanager" "video" ];
+      # Allow the graphical user to login without password
+      initialHashedPassword = "";
+    };
+
+    # Allow the user to log in as root without a password.
+    users.users.root.initialHashedPassword = "";
+
+    # Allow passwordless sudo from nixos user
+    security.sudo = {
+      enable = mkDefault true;
+      wheelNeedsPassword = mkImageMediaOverride false;
+    };
+
+    # Automatically log in at the virtual consoles.
+    services.getty.autologinUser = "nixos";
+
+    # Some more help text.
+    services.getty.helpLine = ''
+      The "nixos" and "root" accounts have empty passwords.
+
+      To log in over ssh you must set a password for either "nixos" or "root"
+      with `passwd` (prefix with `sudo` for "root"), or add your public key to
+      /home/nixos/.ssh/authorized_keys or /root/.ssh/authorized_keys.
+
+      If you need a wireless connection, type
+      `sudo systemctl start wpa_supplicant` and configure a
+      network using `wpa_cli`. See the NixOS manual for details.
+    '' + optionalString config.services.xserver.enable ''
+
+      Type `sudo systemctl start display-manager' to
+      start the graphical user interface.
+    '';
+
+    # We run sshd by default. Login is only possible after adding a
+    # password via "passwd" or by adding a ssh key to ~/.ssh/authorized_keys.
+    # The latter one is particular useful if keys are manually added to
+    # installation device for head-less systems i.e. arm boards by manually
+    # mounting the storage in a different system.
+    services.openssh = {
+      enable = true;
+      settings.PermitRootLogin = "yes";
+    };
+
+    # Enable wpa_supplicant, but don't start it by default.
+    networking.wireless.enable = mkDefault true;
+    networking.wireless.userControlled.enable = true;
+    systemd.services.wpa_supplicant.wantedBy = mkOverride 50 [];
+
+    # Tell the Nix evaluator to garbage collect more aggressively.
+    # This is desirable in memory-constrained environments that don't
+    # (yet) have swap set up.
+    environment.variables.GC_INITIAL_HEAP_SIZE = "1M";
+
+    # Make the installer more likely to succeed in low memory
+    # environments.  The kernel's overcommit heustistics bite us
+    # fairly often, preventing processes such as nix-worker or
+    # download-using-manifests.pl from forking even if there is
+    # plenty of free memory.
+    boot.kernel.sysctl."vm.overcommit_memory" = "1";
+
+    # To speed up installation a little bit, include the complete
+    # stdenv in the Nix store on the CD.
+    system.extraDependencies = with pkgs;
+      [
+        stdenv
+        stdenvNoCC # for runCommand
+        busybox
+        jq # for closureInfo
+        # For boot.initrd.systemd
+        makeInitrdNGTool
+      ];
+
+    boot.swraid.enable = true;
+
+    # Show all debug messages from the kernel but don't log refused packets
+    # because we have the firewall enabled. This makes installs from the
+    # console less cumbersome if the machine has a public IP.
+    networking.firewall.logRefusedConnections = mkDefault false;
+
+    # Prevent installation media from evacuating persistent storage, as their
+    # var directory is not persistent and it would thus result in deletion of
+    # those entries.
+    environment.etc."systemd/pstore.conf".text = ''
+      [PStore]
+      Unlink=no
+    '';
+
+    # allow nix-copy to live system
+    nix.settings.trusted-users = [ "root" "nixos" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key b/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key
new file mode 100644
index 000000000000..b18489795369
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmwAAAJASuMMnErjD
+JwAAAAtzc2gtZWQyNTUxOQAAACCQVnMW/wZWqrdWrjrRPhfEFFq1KLYguagSflLhFnVQmw
+AAAEDIN2VWFyggtoSPXcAFy8dtG1uAig8sCuyE21eMDt2GgJBWcxb/Blaqt1auOtE+F8QU
+WrUotiC5qBJ+UuEWdVCbAAAACnJvb3RAbml4b3MBAgM=
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub b/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub
new file mode 100644
index 000000000000..2c45826715fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/keys/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJBWcxb/Blaqt1auOtE+F8QUWrUotiC5qBJ+UuEWdVCb root@nixos
diff --git a/nixpkgs/nixos/modules/profiles/macos-builder.nix b/nixpkgs/nixos/modules/profiles/macos-builder.nix
new file mode 100644
index 000000000000..d48afed18f7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/macos-builder.nix
@@ -0,0 +1,250 @@
+{ config, lib, options, ... }:
+
+let
+  keysDirectory = "/var/keys";
+
+  user = "builder";
+
+  keyType = "ed25519";
+
+  cfg = config.virtualisation.darwin-builder;
+
+in
+
+{
+  imports = [
+    ../virtualisation/qemu-vm.nix
+
+    # Avoid a dependency on stateVersion
+    {
+      disabledModules = [
+        ../virtualisation/nixos-containers.nix
+        ../services/x11/desktop-managers/xterm.nix
+      ];
+      # swraid's default depends on stateVersion
+      config.boot.swraid.enable = false;
+      options.boot.isContainer = lib.mkOption { default = false; internal = true; };
+    }
+  ];
+
+  options.virtualisation.darwin-builder = with lib; {
+    diskSize = mkOption {
+      default = 20 * 1024;
+      type = types.int;
+      example = 30720;
+      description = "The maximum disk space allocated to the runner in MB";
+    };
+    memorySize = mkOption {
+      default = 3 * 1024;
+      type = types.int;
+      example = 8192;
+      description = "The runner's memory in MB";
+    };
+    min-free = mkOption {
+      default = 1024 * 1024 * 1024;
+      type = types.int;
+      example = 1073741824;
+      description = ''
+        The threshold (in bytes) of free disk space left at which to
+        start garbage collection on the runner
+      '';
+    };
+    max-free = mkOption {
+      default = 3 * 1024 * 1024 * 1024;
+      type = types.int;
+      example = 3221225472;
+      description = ''
+        The threshold (in bytes) of free disk space left at which to
+        stop garbage collection on the runner
+      '';
+    };
+    workingDirectory = mkOption {
+       default = ".";
+       type = types.str;
+       example = "/var/lib/darwin-builder";
+       description = ''
+         The working directory to use to run the script. When running
+         as part of a flake will need to be set to a non read-only filesystem.
+       '';
+    };
+    hostPort = mkOption {
+      default = 31022;
+      type = types.int;
+      example = 22;
+      description = ''
+        The localhost host port to forward TCP to the guest port.
+      '';
+    };
+  };
+
+  config = {
+    # The builder is not intended to be used interactively
+    documentation.enable = false;
+
+    environment.etc = {
+      "ssh/ssh_host_ed25519_key" = {
+        mode = "0600";
+
+        source = ./keys/ssh_host_ed25519_key;
+      };
+
+      "ssh/ssh_host_ed25519_key.pub" = {
+        mode = "0644";
+
+        source = ./keys/ssh_host_ed25519_key.pub;
+      };
+    };
+
+    # DNS fails for QEMU user networking (SLiRP) on macOS.  See:
+    #
+    # https://github.com/utmapp/UTM/issues/2353
+    #
+    # This works around that by using a public DNS server other than the DNS
+    # server that QEMU provides (normally 10.0.2.3)
+    networking.nameservers = [ "8.8.8.8" ];
+
+    nix.settings = {
+      auto-optimise-store = true;
+
+      min-free = cfg.min-free;
+
+      max-free = cfg.max-free;
+
+      trusted-users = [ "root" user ];
+    };
+
+    services = {
+      getty.autologinUser = user;
+
+      openssh = {
+        enable = true;
+
+        authorizedKeysFiles = [ "${keysDirectory}/%u_${keyType}.pub" ];
+      };
+    };
+
+    system.build.macos-builder-installer =
+      let
+        privateKey = "/etc/nix/${user}_${keyType}";
+
+        publicKey = "${privateKey}.pub";
+
+        # This installCredentials script is written so that it's as easy as
+        # possible for a user to audit before confirming the `sudo`
+        installCredentials = hostPkgs.writeShellScript "install-credentials" ''
+          KEYS="''${1}"
+          INSTALL=${hostPkgs.coreutils}/bin/install
+          "''${INSTALL}" -g nixbld -m 600 "''${KEYS}/${user}_${keyType}" ${privateKey}
+          "''${INSTALL}" -g nixbld -m 644 "''${KEYS}/${user}_${keyType}.pub" ${publicKey}
+        '';
+
+        hostPkgs = config.virtualisation.host.pkgs;
+
+        script = hostPkgs.writeShellScriptBin "create-builder" (
+          # When running as non-interactively as part of a DarwinConfiguration the working directory
+          # must be set to a writeable directory.
+        (if cfg.workingDirectory != "." then ''
+          ${hostPkgs.coreutils}/bin/mkdir --parent "${cfg.workingDirectory}"
+          cd "${cfg.workingDirectory}"
+        '' else "") + ''
+          KEYS="''${KEYS:-./keys}"
+          ${hostPkgs.coreutils}/bin/mkdir --parent "''${KEYS}"
+          PRIVATE_KEY="''${KEYS}/${user}_${keyType}"
+          PUBLIC_KEY="''${PRIVATE_KEY}.pub"
+          if [ ! -e "''${PRIVATE_KEY}" ] || [ ! -e "''${PUBLIC_KEY}" ]; then
+              ${hostPkgs.coreutils}/bin/rm --force -- "''${PRIVATE_KEY}" "''${PUBLIC_KEY}"
+              ${hostPkgs.openssh}/bin/ssh-keygen -q -f "''${PRIVATE_KEY}" -t ${keyType} -N "" -C 'builder@localhost'
+          fi
+          if ! ${hostPkgs.diffutils}/bin/cmp "''${PUBLIC_KEY}" ${publicKey}; then
+            (set -x; sudo --reset-timestamp ${installCredentials} "''${KEYS}")
+          fi
+          KEYS="$(${hostPkgs.nix}/bin/nix-store --add "$KEYS")" ${lib.getExe config.system.build.vm}
+        '');
+
+      in
+      script.overrideAttrs (old: {
+        pos = __curPos; # sets meta.position to point here; see script binding above for package definition
+        meta = (old.meta or { }) // {
+          platforms = lib.platforms.darwin;
+        };
+        passthru = (old.passthru or { }) // {
+          # Let users in the repl inspect the config
+          nixosConfig = config;
+          nixosOptions = options;
+        };
+      });
+
+    system = {
+      # To prevent gratuitous rebuilds on each change to Nixpkgs
+      nixos.revision = null;
+
+      stateVersion = lib.mkDefault (throw ''
+        The macOS linux builder should not need a stateVersion to be set, but a module
+        has accessed stateVersion nonetheless.
+        Please inspect the trace of the following command to figure out which module
+        has a dependency on stateVersion.
+
+          nix-instantiate --attr darwin.linux-builder --show-trace
+      '');
+    };
+
+    users.users."${user}" = {
+      isNormalUser = true;
+    };
+
+    security.polkit.enable = true;
+
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id === "org.freedesktop.login1.power-off" && subject.user === "${user}") {
+          return "yes";
+        } else {
+          return "no";
+        }
+      })
+    '';
+
+    virtualisation = {
+      diskSize = cfg.diskSize;
+
+      memorySize = cfg.memorySize;
+
+      forwardPorts = [
+        { from = "host"; guest.port = 22; host.port = cfg.hostPort; }
+      ];
+
+      # Disable graphics for the builder since users will likely want to run it
+      # non-interactively in the background.
+      graphics = false;
+
+      sharedDirectories.keys = {
+        source = "\"$KEYS\"";
+        target = keysDirectory;
+      };
+
+      # If we don't enable this option then the host will fail to delegate builds
+      # to the guest, because:
+      #
+      # - The host will lock the path to build
+      # - The host will delegate the build to the guest
+      # - The guest will attempt to lock the same path and fail because
+      #   the lockfile on the host is visible on the guest
+      #
+      # Snapshotting the host's /nix/store as an image isolates the guest VM's
+      # /nix/store from the host's /nix/store, preventing this problem.
+      useNixStoreImage = true;
+
+      # Obviously the /nix/store needs to be writable on the guest in order for it
+      # to perform builds.
+      writableStore = true;
+
+      # This ensures that anything built on the guest isn't lost when the guest is
+      # restarted.
+      writableStoreUseTmpfs = false;
+
+      # Pass certificates from host to the guest otherwise when custom CA certificates
+      # are required we can't use the cached builder.
+      useHostCerts = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/profiles/minimal.nix b/nixpkgs/nixos/modules/profiles/minimal.nix
new file mode 100644
index 000000000000..75f355b4a002
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/minimal.nix
@@ -0,0 +1,40 @@
+# This module defines a small NixOS configuration.  It does not
+# contain any graphical stuff.
+
+{ config, lib, ... }:
+
+with lib;
+
+{
+  environment.noXlibs = mkDefault true;
+
+  documentation.enable = mkDefault false;
+
+  documentation.doc.enable = mkDefault false;
+
+  documentation.info.enable = mkDefault false;
+
+  documentation.man.enable = mkDefault false;
+
+  documentation.nixos.enable = mkDefault false;
+
+  # Perl is a default package.
+  environment.defaultPackages = mkDefault [ ];
+
+  # The lessopen package pulls in Perl.
+  programs.less.lessopen = mkDefault null;
+
+  # This pulls in nixos-containers which depends on Perl.
+  boot.enableContainers = mkDefault false;
+
+  programs.command-not-found.enable = mkDefault false;
+
+  services.logrotate.enable = mkDefault false;
+
+  services.udisks2.enable = mkDefault false;
+
+  xdg.autostart.enable = mkDefault false;
+  xdg.icons.enable = mkDefault false;
+  xdg.mime.enable = mkDefault false;
+  xdg.sounds.enable = mkDefault false;
+}
diff --git a/nixpkgs/nixos/modules/profiles/qemu-guest.nix b/nixpkgs/nixos/modules/profiles/qemu-guest.nix
new file mode 100644
index 000000000000..8b3df97ae0db
--- /dev/null
+++ b/nixpkgs/nixos/modules/profiles/qemu-guest.nix
@@ -0,0 +1,17 @@
+# Common configuration for virtual machines running under QEMU (using
+# virtio).
+
+{ config, lib, ... }:
+
+{
+  boot.initrd.availableKernelModules = [ "virtio_net" "virtio_pci" "virtio_mmio" "virtio_blk" "virtio_scsi" "9p" "9pnet_virtio" ];
+  boot.initrd.kernelModules = [ "virtio_balloon" "virtio_console" "virtio_rng" ];
+
+  boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable)
+    ''
+      # Set the system time from the hardware clock to work around a
+      # bug in qemu-kvm > 1.5.2 (where the VM clock is initialised
+      # to the *boot time* of the host).
+      hwclock -s
+    '';
+}
diff --git a/nixpkgs/nixos/modules/programs/_1password-gui.nix b/nixpkgs/nixos/modules/programs/_1password-gui.nix
new file mode 100644
index 000000000000..27c0d34a2eed
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/_1password-gui.nix
@@ -0,0 +1,65 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs._1password-gui;
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "programs" "_1password-gui" "gid" ] ''
+      A preallocated GID will be used instead.
+    '')
+  ];
+
+  options = {
+    programs._1password-gui = {
+      enable = mkEnableOption (lib.mdDoc "the 1Password GUI application");
+
+      polkitPolicyOwners = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = literalExpression ''["user1" "user2" "user3"]'';
+        description = lib.mdDoc ''
+          A list of users who should be able to integrate 1Password with polkit-based authentication mechanisms.
+        '';
+      };
+
+      package = mkPackageOptionMD pkgs "1Password GUI" {
+        default = [ "_1password-gui" ];
+      };
+    };
+  };
+
+  config =
+    let
+      package = cfg.package.override {
+        polkitPolicyOwners = cfg.polkitPolicyOwners;
+      };
+    in
+    mkIf cfg.enable {
+      environment.systemPackages = [ package ];
+      users.groups.onepassword.gid = config.ids.gids.onepassword;
+
+      security.wrappers = {
+        "1Password-BrowserSupport" = {
+          source = "${package}/share/1password/1Password-BrowserSupport";
+          owner = "root";
+          group = "onepassword";
+          setuid = false;
+          setgid = true;
+        };
+
+        "1Password-KeyringHelper" = {
+          source = "${package}/share/1password/1Password-KeyringHelper";
+          owner = "root";
+          group = "onepassword";
+          setuid = true;
+          setgid = true;
+        };
+      };
+
+    };
+}
diff --git a/nixpkgs/nixos/modules/programs/_1password.nix b/nixpkgs/nixos/modules/programs/_1password.nix
new file mode 100644
index 000000000000..8537484c7e67
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/_1password.nix
@@ -0,0 +1,41 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs._1password;
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "programs" "_1password" "gid" ] ''
+      A preallocated GID will be used instead.
+    '')
+  ];
+
+  options = {
+    programs._1password = {
+      enable = mkEnableOption (lib.mdDoc "the 1Password CLI tool");
+
+      package = mkPackageOptionMD pkgs "1Password CLI" {
+        default = [ "_1password" ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    users.groups.onepassword-cli.gid = config.ids.gids.onepassword-cli;
+
+    security.wrappers = {
+      "op" = {
+        source = "${cfg.package}/bin/op";
+        owner = "root";
+        group = "onepassword-cli";
+        setuid = false;
+        setgid = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/adb.nix b/nixpkgs/nixos/modules/programs/adb.nix
new file mode 100644
index 000000000000..e5b0abd9fcfe
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/adb.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta.maintainers = [ maintainers.mic92 ];
+
+  ###### interface
+  options = {
+    programs.adb = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to configure system to use Android Debug Bridge (adb).
+          To grant access to a user, it must be part of adbusers group:
+          `users.users.alice.extraGroups = ["adbusers"];`
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.programs.adb.enable {
+    services.udev.packages = [ pkgs.android-udev-rules ];
+    environment.systemPackages = [ pkgs.android-tools ];
+    users.groups.adbusers = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/appgate-sdp.nix b/nixpkgs/nixos/modules/programs/appgate-sdp.nix
new file mode 100644
index 000000000000..bdd538dc2f1f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/appgate-sdp.nix
@@ -0,0 +1,25 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    programs.appgate-sdp = {
+      enable = mkEnableOption (lib.mdDoc "AppGate SDP VPN client");
+    };
+  };
+
+  config = mkIf config.programs.appgate-sdp.enable {
+    boot.kernelModules = [ "tun" ];
+    environment.systemPackages = [ pkgs.appgate-sdp ];
+    services.dbus.packages = [ pkgs.appgate-sdp ];
+    systemd = {
+      packages = [ pkgs.appgate-sdp ];
+      # https://github.com/NixOS/nixpkgs/issues/81138
+      services.appgatedriver.wantedBy = [ "multi-user.target" ];
+      services.appgate-dumb-resolver.path = [ pkgs.e2fsprogs ];
+      services.appgate-resolver.path = [ pkgs.procps pkgs.e2fsprogs ];
+      services.appgatedriver.path = [ pkgs.e2fsprogs ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/atop.nix b/nixpkgs/nixos/modules/programs/atop.nix
new file mode 100644
index 000000000000..a5f4d990bdbe
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/atop.nix
@@ -0,0 +1,178 @@
+# Global configuration for atop.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.atop;
+
+in
+{
+  ###### interface
+
+  options = {
+
+    programs.atop = rec {
+
+      enable = mkEnableOption (lib.mdDoc "Atop");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atop;
+        defaultText = literalExpression "pkgs.atop";
+        description = lib.mdDoc ''
+          Which package to use for Atop.
+        '';
+      };
+
+      netatop = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to install and enable the netatop kernel module.
+            Note: this sets the kernel taint flag "O" for loading out-of-tree modules.
+          '';
+        };
+        package = mkOption {
+          type = types.package;
+          default = config.boot.kernelPackages.netatop;
+          defaultText = literalExpression "config.boot.kernelPackages.netatop";
+          description = lib.mdDoc ''
+            Which package to use for netatop.
+          '';
+        };
+      };
+
+      atopgpu.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to install and enable the atopgpud daemon to get information about
+          NVIDIA gpus.
+        '';
+      };
+
+      setuidWrapper.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to install a setuid wrapper for Atop. This is required to use some of
+          the features as non-root user (e.g.: ipc information, netatop, atopgpu).
+          Atop tries to drop the root privileges shortly after starting.
+        '';
+      };
+
+      atopService.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the atop service responsible for storing statistics for
+          long-term analysis.
+        '';
+      };
+      atopRotateTimer.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the atop-rotate timer, which restarts the atop service
+          daily to make sure the data files are rotate.
+        '';
+      };
+      atopacctService.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the atopacct service which manages process accounting.
+          This allows Atop to gather data about processes that disappeared in between
+          two refresh intervals.
+        '';
+      };
+      settings = mkOption {
+        type = types.attrs;
+        default = { };
+        example = {
+          flags = "a1f";
+          interval = 5;
+        };
+        description = lib.mdDoc ''
+          Parameters to be written to {file}`/etc/atoprc`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (
+    let
+      atop =
+        if cfg.atopgpu.enable then
+          (cfg.package.override { withAtopgpu = true; })
+        else
+          cfg.package;
+    in
+    {
+      environment.etc = mkIf (cfg.settings != { }) {
+        atoprc.text = concatStrings
+          (mapAttrsToList
+            (n: v: ''
+              ${n} ${toString v}
+            '')
+            cfg.settings);
+      };
+      environment.systemPackages = [ atop (lib.mkIf cfg.netatop.enable cfg.netatop.package) ];
+      boot.extraModulePackages = [ (lib.mkIf cfg.netatop.enable cfg.netatop.package) ];
+      systemd =
+        let
+          mkSystemd = type: name: restartTriggers: {
+            ${name} = {
+              inherit restartTriggers;
+              wantedBy = [ (if type == "services" then "multi-user.target" else if type == "timers" then "timers.target" else null) ];
+            };
+          };
+          mkService = mkSystemd "services";
+          mkTimer = mkSystemd "timers";
+        in
+        {
+          packages = [ atop (lib.mkIf cfg.netatop.enable cfg.netatop.package) ];
+          services = lib.mkMerge [
+            (lib.mkIf cfg.atopService.enable (lib.recursiveUpdate
+              (mkService "atop" [ atop ])
+              {
+                # always convert logs to newer version first
+                # XXX might trigger TimeoutStart but restarting atop.service will
+                # convert remainings logs and start eventually
+                atop.preStart = ''
+                  set -e -u
+                  shopt -s nullglob
+                  for logfile in "$LOGPATH"/atop_*
+                  do
+                    ${atop}/bin/atopconvert "$logfile" "$logfile".new
+                    # only replace old file if version was upgraded to avoid
+                    # false positives for atop-rotate.service
+                    if ! ${pkgs.diffutils}/bin/cmp -s "$logfile" "$logfile".new
+                    then
+                      ${pkgs.coreutils}/bin/mv -v -f "$logfile".new "$logfile"
+                    else
+                      ${pkgs.coreutils}/bin/rm -f "$logfile".new
+                    fi
+                  done
+                '';
+              }))
+            (lib.mkIf cfg.atopacctService.enable (mkService "atopacct" [ atop ]))
+            (lib.mkIf cfg.netatop.enable (mkService "netatop" [ cfg.netatop.package ]))
+            (lib.mkIf cfg.atopgpu.enable (mkService "atopgpu" [ atop ]))
+          ];
+          timers = lib.mkIf cfg.atopRotateTimer.enable (mkTimer "atop-rotate" [ atop ]);
+        };
+
+      security.wrappers = lib.mkIf cfg.setuidWrapper.enable {
+        atop = {
+          setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${atop}/bin/atop";
+        };
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/programs/ausweisapp.nix b/nixpkgs/nixos/modules/programs/ausweisapp.nix
new file mode 100644
index 000000000000..91870df20246
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/ausweisapp.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg  = config.programs.ausweisapp;
+in
+{
+  options.programs.ausweisapp = {
+    enable = mkEnableOption (lib.mdDoc "AusweisApp");
+
+    openFirewall = mkOption {
+      description = lib.mdDoc ''
+        Whether to open the required firewall ports for the Smartphone as Card Reader (SaC) functionality of AusweisApp.
+      '';
+      default = false;
+      type = lib.types.bool;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ ausweisapp ];
+    networking.firewall.allowedUDPPorts = lib.optionals cfg.openFirewall [ 24727 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/autojump.nix b/nixpkgs/nixos/modules/programs/autojump.nix
new file mode 100644
index 000000000000..dde6870d9890
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/autojump.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.autojump;
+  prg = config.programs;
+in
+{
+  options = {
+    programs.autojump = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable autojump.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.pathsToLink = [ "/share/autojump" ];
+    environment.systemPackages = [ pkgs.autojump ];
+
+    programs.bash.interactiveShellInit = "source ${pkgs.autojump}/share/autojump/autojump.bash";
+    programs.zsh.interactiveShellInit = mkIf prg.zsh.enable "source ${pkgs.autojump}/share/autojump/autojump.zsh";
+    programs.fish.interactiveShellInit = mkIf prg.fish.enable "source ${pkgs.autojump}/share/autojump/autojump.fish";
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/bandwhich.nix b/nixpkgs/nixos/modules/programs/bandwhich.nix
new file mode 100644
index 000000000000..aa6a0dfb6ffd
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bandwhich.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.bandwhich;
+in {
+  meta.maintainers = with maintainers; [ Br1ght0ne ];
+
+  options = {
+    programs.bandwhich = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add bandwhich to the global environment and configure a
+          setcap wrapper for it.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ bandwhich ];
+    security.wrappers.bandwhich = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_sys_ptrace,cap_dac_read_search,cap_net_raw,cap_net_admin+ep";
+      source = "${pkgs.bandwhich}/bin/bandwhich";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/bash-my-aws.nix b/nixpkgs/nixos/modules/programs/bash-my-aws.nix
new file mode 100644
index 000000000000..10f16cae651b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash-my-aws.nix
@@ -0,0 +1,25 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  prg = config.programs;
+  cfg = prg.bash-my-aws;
+
+  initScript = ''
+    eval $(${pkgs.bash-my-aws}/bin/bma-init)
+  '';
+in
+  {
+    options = {
+      programs.bash-my-aws = {
+        enable = mkEnableOption (lib.mdDoc "bash-my-aws");
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = with pkgs; [ bash-my-aws ];
+
+      programs.bash.interactiveShellInit = initScript;
+    };
+  }
diff --git a/nixpkgs/nixos/modules/programs/bash/bash-completion.nix b/nixpkgs/nixos/modules/programs/bash/bash-completion.nix
new file mode 100644
index 000000000000..96fbe0126d66
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/bash-completion.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  enable = config.programs.bash.enableCompletion;
+in
+{
+  options = {
+    programs.bash.enableCompletion = mkEnableOption (lib.mdDoc "Bash completion for all interactive bash shells") // {
+      default = true;
+    };
+  };
+
+  config = mkIf enable {
+    programs.bash.promptPluginInit = ''
+      # Check whether we're running a version of Bash that has support for
+      # programmable completion. If we do, enable all modules installed in
+      # the system and user profile in obsolete /etc/bash_completion.d/
+      # directories. Bash loads completions in all
+      # $XDG_DATA_DIRS/bash-completion/completions/
+      # on demand, so they do not need to be sourced here.
+      if shopt -q progcomp &>/dev/null; then
+        . "${pkgs.bash-completion}/etc/profile.d/bash_completion.sh"
+        nullglobStatus=$(shopt -p nullglob)
+        shopt -s nullglob
+        for p in $NIX_PROFILES; do
+          for m in "$p/etc/bash_completion.d/"*; do
+            . "$m"
+          done
+        done
+        eval "$nullglobStatus"
+        unset nullglobStatus p m
+      fi
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/bash/bash.nix b/nixpkgs/nixos/modules/programs/bash/bash.nix
new file mode 100644
index 000000000000..7d3322ea5e50
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/bash.nix
@@ -0,0 +1,217 @@
+# This module defines global configuration for the Bash shell, in
+# particular /etc/bashrc and /etc/profile.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfge = config.environment;
+
+  cfg = config.programs.bash;
+
+  bashAliases = concatStringsSep "\n" (
+    mapAttrsFlatten (k: v: "alias -- ${k}=${escapeShellArg v}")
+      (filterAttrs (k: v: v != null) cfg.shellAliases)
+  );
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "programs" "bash" "enable" ] "")
+  ];
+
+  options = {
+
+    programs.bash = {
+
+      /*
+      enable = mkOption {
+        default = true;
+        description = lib.mdDoc ''
+          Whenever to configure Bash as an interactive shell.
+          Note that this tries to make Bash the default
+          {option}`users.defaultUserShell`,
+          which in turn means that you might need to explicitly
+          set this variable if you have another shell configured
+          with NixOS.
+        '';
+        type = types.bool;
+      };
+      */
+
+      shellAliases = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Set of aliases for bash shell, which overrides {option}`environment.shellAliases`.
+          See {option}`environment.shellAliases` for an option format description.
+        '';
+        type = with types; attrsOf (nullOr (either str path));
+      };
+
+      shellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during bash shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      loginShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during login bash shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      interactiveShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during interactive bash shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      promptInit = mkOption {
+        default = ''
+          # Provide a nice prompt if the terminal supports it.
+          if [ "$TERM" != "dumb" ] || [ -n "$INSIDE_EMACS" ]; then
+            PROMPT_COLOR="1;31m"
+            ((UID)) && PROMPT_COLOR="1;32m"
+            if [ -n "$INSIDE_EMACS" ]; then
+              # Emacs term mode doesn't support xterm title escape sequence (\e]0;)
+              PS1="\n\[\033[$PROMPT_COLOR\][\u@\h:\w]\\$\[\033[0m\] "
+            else
+              PS1="\n\[\033[$PROMPT_COLOR\][\[\e]0;\u@\h: \w\a\]\u@\h:\w]\\$\[\033[0m\] "
+            fi
+            if test "$TERM" = "xterm"; then
+              PS1="\[\033]2;\h:\u:\w\007\]$PS1"
+            fi
+          fi
+        '';
+        description = lib.mdDoc ''
+          Shell script code used to initialise the bash prompt.
+        '';
+        type = types.lines;
+      };
+
+      promptPluginInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code used to initialise bash prompt plugins.
+        '';
+        type = types.lines;
+        internal = true;
+      };
+
+    };
+
+  };
+
+  config = /* mkIf cfg.enable */ {
+
+    programs.bash = {
+
+      shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
+
+      shellInit = ''
+        if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]; then
+            . ${config.system.build.setEnvironment}
+        fi
+
+        ${cfge.shellInit}
+      '';
+
+      loginShellInit = cfge.loginShellInit;
+
+      interactiveShellInit = ''
+        # Check the window size after every command.
+        shopt -s checkwinsize
+
+        # Disable hashing (i.e. caching) of command lookups.
+        set +h
+
+        ${cfg.promptInit}
+        ${cfg.promptPluginInit}
+        ${bashAliases}
+
+        ${cfge.interactiveShellInit}
+      '';
+
+    };
+
+    environment.etc.profile.text =
+      ''
+        # /etc/profile: DO NOT EDIT -- this file has been generated automatically.
+        # This file is read for login shells.
+
+        # Only execute this file once per shell.
+        if [ -n "$__ETC_PROFILE_SOURCED" ]; then return; fi
+        __ETC_PROFILE_SOURCED=1
+
+        # Prevent this file from being sourced by interactive non-login child shells.
+        export __ETC_PROFILE_DONE=1
+
+        ${cfg.shellInit}
+        ${cfg.loginShellInit}
+
+        # Read system-wide modifications.
+        if test -f /etc/profile.local; then
+            . /etc/profile.local
+        fi
+
+        if [ -n "''${BASH_VERSION:-}" ]; then
+            . /etc/bashrc
+        fi
+      '';
+
+    environment.etc.bashrc.text =
+      ''
+        # /etc/bashrc: DO NOT EDIT -- this file has been generated automatically.
+
+        # Only execute this file once per shell.
+        if [ -n "$__ETC_BASHRC_SOURCED" ] || [ -n "$NOSYSBASHRC" ]; then return; fi
+        __ETC_BASHRC_SOURCED=1
+
+        # If the profile was not loaded in a parent process, source
+        # it.  But otherwise don't do it because we don't want to
+        # clobber overridden values of $PATH, etc.
+        if [ -z "$__ETC_PROFILE_DONE" ]; then
+            . /etc/profile
+        fi
+
+        # We are not always an interactive shell.
+        if [ -n "$PS1" ]; then
+            ${cfg.interactiveShellInit}
+        fi
+
+        # Read system-wide modifications.
+        if test -f /etc/bashrc.local; then
+            . /etc/bashrc.local
+        fi
+      '';
+
+    # Configuration for readline in bash. We use "option default"
+    # priority to allow user override using both .text and .source.
+    environment.etc.inputrc.source = mkOptionDefault ./inputrc;
+
+    users.defaultUserShell = mkDefault pkgs.bashInteractive;
+
+    environment.pathsToLink = optionals cfg.enableCompletion [
+      "/etc/bash_completion.d"
+      "/share/bash-completion"
+    ];
+
+    environment.shells =
+      [ "/run/current-system/sw/bin/bash"
+        "/run/current-system/sw/bin/sh"
+        "${pkgs.bashInteractive}/bin/bash"
+        "${pkgs.bashInteractive}/bin/sh"
+      ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/bash/blesh.nix b/nixpkgs/nixos/modules/programs/bash/blesh.nix
new file mode 100644
index 000000000000..8fa51bef7744
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/blesh.nix
@@ -0,0 +1,16 @@
+{ lib, config, pkgs, ... }:
+with lib;
+let
+  cfg = config.programs.bash.blesh;
+in {
+  options = {
+    programs.bash.blesh.enable = mkEnableOption (mdDoc "blesh");
+  };
+
+  config = mkIf cfg.enable {
+    programs.bash.interactiveShellInit = mkBefore ''
+      source ${pkgs.blesh}/share/blesh/ble.sh
+    '';
+  };
+  meta.maintainers = with maintainers; [ laalsaas ];
+}
diff --git a/nixpkgs/nixos/modules/programs/bash/inputrc b/nixpkgs/nixos/modules/programs/bash/inputrc
new file mode 100644
index 000000000000..f339eb649ed8
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/inputrc
@@ -0,0 +1,37 @@
+# inputrc borrowed from CentOS (RHEL).
+
+set bell-style none
+
+set meta-flag on
+set input-meta on
+set convert-meta off
+set output-meta on
+set colored-stats on
+
+#set mark-symlinked-directories on
+
+$if mode=emacs
+
+# for linux console and RH/Debian xterm
+"\e[1~": beginning-of-line
+"\e[4~": end-of-line
+"\e[5~": beginning-of-history
+"\e[6~": end-of-history
+"\e[3~": delete-char
+"\e[2~": quoted-insert
+"\e[5C": forward-word
+"\e[5D": backward-word
+"\e[1;5C": forward-word
+"\e[1;5D": backward-word
+
+# for rxvt
+"\e[8~": end-of-line
+
+# for non RH/Debian xterm, can't hurt for RH/DEbian xterm
+"\eOH": beginning-of-line
+"\eOF": end-of-line
+
+# for freebsd console
+"\e[H": beginning-of-line
+"\e[F": end-of-line
+$endif
diff --git a/nixpkgs/nixos/modules/programs/bash/ls-colors.nix b/nixpkgs/nixos/modules/programs/bash/ls-colors.nix
new file mode 100644
index 000000000000..6a5253a3cca2
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/ls-colors.nix
@@ -0,0 +1,20 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  enable = config.programs.bash.enableLsColors;
+in
+{
+  options = {
+    programs.bash.enableLsColors = mkEnableOption (lib.mdDoc "extra colors in directory listings") // {
+      default = true;
+    };
+  };
+
+  config = mkIf enable {
+    programs.bash.promptPluginInit = ''
+      eval "$(${pkgs.coreutils}/bin/dircolors -b)"
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/bash/undistract-me.nix b/nixpkgs/nixos/modules/programs/bash/undistract-me.nix
new file mode 100644
index 000000000000..587b649377df
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bash/undistract-me.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.bash.undistractMe;
+in
+{
+  options = {
+    programs.bash.undistractMe = {
+      enable = mkEnableOption (lib.mdDoc "notifications when long-running terminal commands complete");
+
+      playSound = mkEnableOption (lib.mdDoc "notification sounds when long-running terminal commands complete");
+
+      timeout = mkOption {
+        default = 10;
+        description = lib.mdDoc ''
+          Number of seconds it would take for a command to be considered long-running.
+        '';
+        type = types.int;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.bash.promptPluginInit = ''
+      export LONG_RUNNING_COMMAND_TIMEOUT=${toString cfg.timeout}
+      export UDM_PLAY_SOUND=${if cfg.playSound then "1" else "0"}
+      . "${pkgs.undistract-me}/etc/profile.d/undistract-me.sh"
+    '';
+  };
+
+  meta = {
+    maintainers = with maintainers; [ kira-bruneau ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/bcc.nix b/nixpkgs/nixos/modules/programs/bcc.nix
new file mode 100644
index 000000000000..ff29d56bedb9
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/bcc.nix
@@ -0,0 +1,9 @@
+{ config, pkgs, lib, ... }:
+{
+  options.programs.bcc.enable = lib.mkEnableOption (lib.mdDoc "bcc");
+
+  config = lib.mkIf config.programs.bcc.enable {
+    environment.systemPackages = [ pkgs.bcc ];
+    boot.extraModulePackages = [ pkgs.bcc ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/browserpass.nix b/nixpkgs/nixos/modules/programs/browserpass.nix
new file mode 100644
index 000000000000..a9670a37e618
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/browserpass.nix
@@ -0,0 +1,32 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  options.programs.browserpass.enable = mkEnableOption (lib.mdDoc "Browserpass native messaging host");
+
+  config = mkIf config.programs.browserpass.enable {
+    environment.etc = let
+      appId = "com.github.browserpass.native.json";
+      source = part: "${pkgs.browserpass}/lib/browserpass/${part}/${appId}";
+    in {
+      # chromium
+      "chromium/native-messaging-hosts/${appId}".source = source "hosts/chromium";
+      "chromium/policies/managed/${appId}".source = source "policies/chromium";
+
+      # chrome
+      "opt/chrome/native-messaging-hosts/${appId}".source = source "hosts/chromium";
+      "opt/chrome/policies/managed/${appId}".source = source "policies/chromium";
+
+      # vivaldi
+      "opt/vivaldi/native-messaging-hosts/${appId}".source = source "hosts/chromium";
+      "opt/vivaldi/policies/managed/${appId}".source = source "policies/chromium";
+
+      # brave
+      "opt/brave/native-messaging-hosts/${appId}".source = source "hosts/chromium";
+      "opt/brave/policies/managed/${appId}".source = source "policies/chromium";
+    };
+    programs.firefox.nativeMessagingHosts.packages = [ pkgs.browserpass ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/calls.nix b/nixpkgs/nixos/modules/programs/calls.nix
new file mode 100644
index 000000000000..3d757bc1fc32
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/calls.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.calls;
+in {
+  options = {
+    programs.calls = {
+      enable = mkEnableOption (lib.mdDoc ''
+        GNOME calls: a phone dialer and call handler
+      '');
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.dconf.enable = true;
+
+    environment.systemPackages = [
+      pkgs.calls
+    ];
+
+    services.dbus.packages = [
+      pkgs.callaudiod
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/captive-browser.nix b/nixpkgs/nixos/modules/programs/captive-browser.nix
new file mode 100644
index 000000000000..032c0e71f1f4
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/captive-browser.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.captive-browser;
+
+  inherit (lib)
+    concatStringsSep escapeShellArgs optionalString
+    literalExpression mkEnableOption mkIf mkOption mkOptionDefault types;
+
+  requiresSetcapWrapper = config.boot.kernelPackages.kernelOlder "5.7" && cfg.bindInterface;
+
+  browserDefault = chromium: concatStringsSep " " [
+    ''env XDG_CONFIG_HOME="$PREV_CONFIG_HOME"''
+    ''${chromium}/bin/chromium''
+    ''--user-data-dir=''${XDG_DATA_HOME:-$HOME/.local/share}/chromium-captive''
+    ''--proxy-server="socks5://$PROXY"''
+    ''--host-resolver-rules="MAP * ~NOTFOUND , EXCLUDE localhost"''
+    ''--no-first-run''
+    ''--new-window''
+    ''--incognito''
+    ''-no-default-browser-check''
+    ''http://cache.nixos.org/''
+  ];
+
+  desktopItem = pkgs.makeDesktopItem {
+    name = "captive-browser";
+    desktopName = "Captive Portal Browser";
+    exec = "captive-browser";
+    icon = "nix-snowflake";
+    categories = [ "Network" ];
+  };
+
+  captive-browser-configured = pkgs.writeShellScriptBin "captive-browser" ''
+    export PREV_CONFIG_HOME="$XDG_CONFIG_HOME"
+    export XDG_CONFIG_HOME=${pkgs.writeTextDir "captive-browser.toml" ''
+      browser = """${cfg.browser}"""
+      dhcp-dns = """${cfg.dhcp-dns}"""
+      socks5-addr = """${cfg.socks5-addr}"""
+      ${optionalString cfg.bindInterface ''
+        bind-device = """${cfg.interface}"""
+      ''}
+    ''}
+    exec ${cfg.package}/bin/captive-browser
+  '';
+in
+{
+  ###### interface
+
+  options = {
+    programs.captive-browser = {
+      enable = mkEnableOption (lib.mdDoc "captive browser");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.captive-browser;
+        defaultText = literalExpression "pkgs.captive-browser";
+        description = lib.mdDoc "Which package to use for captive-browser";
+      };
+
+      interface = mkOption {
+        type = types.str;
+        description = lib.mdDoc "your public network interface (wlp3s0, wlan0, eth0, ...)";
+      };
+
+      # the options below are the same as in "captive-browser.toml"
+      browser = mkOption {
+        type = types.str;
+        default = browserDefault pkgs.chromium;
+        defaultText = literalExpression (browserDefault "\${pkgs.chromium}");
+        description = lib.mdDoc ''
+          The shell (/bin/sh) command executed once the proxy starts.
+          When browser exits, the proxy exits. An extra env var PROXY is available.
+
+          Here, we use a separate Chrome instance in Incognito mode, so that
+          it can run (and be waited for) alongside the default one, and that
+          it maintains no state across runs. To configure this browser open a
+          normal window in it, settings will be preserved.
+
+          @volth: chromium is to open a plain HTTP (not HTTPS nor redirect to HTTPS!) website.
+                  upstream uses http://example.com but I have seen captive portals whose DNS server resolves "example.com" to 127.0.0.1
+        '';
+      };
+
+      dhcp-dns = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The shell (/bin/sh) command executed to obtain the DHCP
+          DNS server address. The first match of an IPv4 regex is used.
+          IPv4 only, because let's be real, it's a captive portal.
+        '';
+      };
+
+      socks5-addr = mkOption {
+        type = types.str;
+        default = "localhost:1666";
+        description = lib.mdDoc "the listen address for the SOCKS5 proxy server";
+      };
+
+      bindInterface = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Binds `captive-browser` to the network interface declared in
+          `cfg.interface`. This can be used to avoid collisions
+          with private subnets.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      (pkgs.runCommand "captive-browser-desktop-item" { } ''
+        install -Dm444 -t $out/share/applications ${desktopItem}/share/applications/*.desktop
+      '')
+      captive-browser-configured
+    ];
+
+    programs.captive-browser.dhcp-dns =
+      let
+        iface = prefixes:
+          optionalString cfg.bindInterface (escapeShellArgs (prefixes ++ [ cfg.interface ]));
+      in
+      mkOptionDefault (
+        if config.networking.networkmanager.enable then
+          "${pkgs.networkmanager}/bin/nmcli dev show ${iface []} | ${pkgs.gnugrep}/bin/fgrep IP4.DNS"
+        else if config.networking.dhcpcd.enable then
+          "${pkgs.dhcpcd}/bin/dhcpcd ${iface ["-U"]} | ${pkgs.gnugrep}/bin/fgrep domain_name_servers"
+        else if config.networking.useNetworkd then
+          "${cfg.package}/bin/systemd-networkd-dns ${iface []}"
+        else
+          "${config.security.wrapperDir}/udhcpc --quit --now -f ${iface ["-i"]} -O dns --script ${
+          pkgs.writeShellScript "udhcp-script" ''
+            if [ "$1" = bound ]; then
+              echo "$dns"
+            fi
+          ''}"
+      );
+
+    security.wrappers.udhcpc = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = "${pkgs.busybox}/bin/udhcpc";
+    };
+
+    security.wrappers.captive-browser = mkIf requiresSetcapWrapper {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = "${captive-browser-configured}/bin/captive-browser";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/ccache.nix b/nixpkgs/nixos/modules/programs/ccache.nix
new file mode 100644
index 000000000000..567c853e8c7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/ccache.nix
@@ -0,0 +1,85 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.programs.ccache;
+in {
+  options.programs.ccache = {
+    # host configuration
+    enable = mkEnableOption (lib.mdDoc "CCache");
+    cacheDir = mkOption {
+      type = types.path;
+      description = lib.mdDoc "CCache directory";
+      default = "/var/cache/ccache";
+    };
+    # target configuration
+    packageNames = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc "Nix top-level packages to be compiled using CCache";
+      default = [];
+      example = [ "wxGTK32" "ffmpeg" "libav_all" ];
+    };
+  };
+
+  config = mkMerge [
+    # host configuration
+    (mkIf cfg.enable {
+      systemd.tmpfiles.rules = [ "d ${cfg.cacheDir} 0770 root nixbld -" ];
+
+      # "nix-ccache --show-stats" and "nix-ccache --clear"
+      security.wrappers.nix-ccache = {
+        owner = "root";
+        group = "nixbld";
+        setuid = false;
+        setgid = true;
+        source = pkgs.writeScript "nix-ccache.pl" ''
+          #!${pkgs.perl}/bin/perl
+
+          %ENV=( CCACHE_DIR => '${cfg.cacheDir}' );
+          sub untaint {
+            my $v = shift;
+            return '-C' if $v eq '-C' || $v eq '--clear';
+            return '-V' if $v eq '-V' || $v eq '--version';
+            return '-s' if $v eq '-s' || $v eq '--show-stats';
+            return '-z' if $v eq '-z' || $v eq '--zero-stats';
+            exec('${pkgs.ccache}/bin/ccache', '-h');
+          }
+          exec('${pkgs.ccache}/bin/ccache', map { untaint $_ } @ARGV);
+        '';
+      };
+    })
+
+    # target configuration
+    (mkIf (cfg.packageNames != []) {
+      nixpkgs.overlays = [
+        (self: super: genAttrs cfg.packageNames (pn: super.${pn}.override { stdenv = builtins.trace "with ccache: ${pn}" self.ccacheStdenv; }))
+
+        (self: super: {
+          ccacheWrapper = super.ccacheWrapper.override {
+            extraConfig = ''
+              export CCACHE_COMPRESS=1
+              export CCACHE_DIR="${cfg.cacheDir}"
+              export CCACHE_UMASK=007
+              if [ ! -d "$CCACHE_DIR" ]; then
+                echo "====="
+                echo "Directory '$CCACHE_DIR' does not exist"
+                echo "Please create it with:"
+                echo "  sudo mkdir -m0770 '$CCACHE_DIR'"
+                echo "  sudo chown root:nixbld '$CCACHE_DIR'"
+                echo "====="
+                exit 1
+              fi
+              if [ ! -w "$CCACHE_DIR" ]; then
+                echo "====="
+                echo "Directory '$CCACHE_DIR' is not accessible for user $(whoami)"
+                echo "Please verify its access permissions"
+                echo "====="
+                exit 1
+              fi
+            '';
+          };
+        })
+      ];
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/programs/cdemu.nix b/nixpkgs/nixos/modules/programs/cdemu.nix
new file mode 100644
index 000000000000..7eba4d29d83b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/cdemu.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.cdemu;
+in {
+
+  options = {
+    programs.cdemu = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          {command}`cdemu` for members of
+          {option}`programs.cdemu.group`.
+        '';
+      };
+      group = mkOption {
+        type = types.str;
+        default = "cdrom";
+        description = lib.mdDoc ''
+          Group that users must be in to use {command}`cdemu`.
+        '';
+      };
+      gui = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install the {command}`cdemu` GUI (gCDEmu).
+        '';
+      };
+      image-analyzer = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to install the image analyzer.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    boot = {
+      extraModulePackages = [ config.boot.kernelPackages.vhba ];
+      kernelModules = [ "vhba" ];
+    };
+
+    services = {
+      udev.extraRules = ''
+        KERNEL=="vhba_ctl", MODE="0660", OWNER="root", GROUP="${cfg.group}"
+      '';
+      dbus.packages = [ pkgs.cdemu-daemon ];
+    };
+
+    users.groups.${config.programs.cdemu.group} = {};
+
+    # Systemd User service
+    # manually adapted from example in source package:
+    # https://sourceforge.net/p/cdemu/code/ci/master/tree/cdemu-daemon/service-example/cdemu-daemon.service
+    systemd.user.services.cdemu-daemon.description = "CDEmu daemon";
+    systemd.user.services.cdemu-daemon.serviceConfig = {
+      Type = "dbus";
+      BusName = "net.sf.cdemu.CDEmuDaemon";
+      ExecStart = "${pkgs.cdemu-daemon}/bin/cdemu-daemon --config-file \"%h/.config/cdemu-daemon\"";
+      Restart = "no";
+    };
+
+    environment.systemPackages =
+      [ pkgs.cdemu-daemon pkgs.cdemu-client ]
+      ++ optional cfg.gui pkgs.gcdemu
+      ++ optional cfg.image-analyzer pkgs.image-analyzer;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/cfs-zen-tweaks.nix b/nixpkgs/nixos/modules/programs/cfs-zen-tweaks.nix
new file mode 100644
index 000000000000..fc05bcd11ecb
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/cfs-zen-tweaks.nix
@@ -0,0 +1,34 @@
+# CFS Zen Tweaks
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.cfs-zen-tweaks;
+
+in
+
+{
+
+  meta = {
+    maintainers = with maintainers; [ mkg20001 ];
+  };
+
+  options = {
+    programs.cfs-zen-tweaks.enable = mkEnableOption (lib.mdDoc "CFS Zen Tweaks");
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ pkgs.cfs-zen-tweaks ];
+
+    systemd.services.set-cfs-tweaks.wantedBy = [
+      "multi-user.target"
+      "suspend.target"
+      "hibernate.target"
+      "hybrid-sleep.target"
+      "suspend-then-hibernate.target"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/chromium.nix b/nixpkgs/nixos/modules/programs/chromium.nix
new file mode 100644
index 000000000000..4024f337dfcd
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/chromium.nix
@@ -0,0 +1,115 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.chromium;
+
+  defaultProfile = filterAttrs (k: v: v != null) {
+    HomepageLocation = cfg.homepageLocation;
+    DefaultSearchProviderEnabled = cfg.defaultSearchProviderEnabled;
+    DefaultSearchProviderSearchURL = cfg.defaultSearchProviderSearchURL;
+    DefaultSearchProviderSuggestURL = cfg.defaultSearchProviderSuggestURL;
+    ExtensionInstallForcelist = cfg.extensions;
+  };
+in
+
+{
+  ###### interface
+
+  options = {
+    programs.chromium = {
+      enable = mkEnableOption (lib.mdDoc "{command}`chromium` policies");
+
+      extensions = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of chromium extensions to install.
+          For list of plugins ids see id in url of extensions on
+          [chrome web store](https://chrome.google.com/webstore/category/extensions)
+          page. To install a chromium extension not included in the chrome web
+          store, append to the extension id a semicolon ";" followed by a URL
+          pointing to an Update Manifest XML file. See
+          [ExtensionInstallForcelist](https://cloud.google.com/docs/chrome-enterprise/policies/?policy=ExtensionInstallForcelist)
+          for additional details.
+        '';
+        default = [];
+        example = literalExpression ''
+          [
+            "chlffgpmiacpedhhbkiomidkjlcfhogd" # pushbullet
+            "mbniclmhobmnbdlbpiphghaielnnpgdp" # lightshot
+            "gcbommkclmclpchllfjekcdonpmejbdp" # https everywhere
+            "cjpalhdlnbpafiamejdnhcphjbkeiagm" # ublock origin
+          ]
+        '';
+      };
+
+      homepageLocation = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Chromium default homepage";
+        default = null;
+        example = "https://nixos.org";
+      };
+
+      defaultSearchProviderEnabled = mkOption {
+        type = types.nullOr types.bool;
+        description = lib.mdDoc "Enable the default search provider.";
+        default = null;
+        example = true;
+      };
+
+      defaultSearchProviderSearchURL = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Chromium default search provider url.";
+        default = null;
+        example =
+          "https://encrypted.google.com/search?q={searchTerms}&{google:RLZ}{google:originalQueryForSuggestion}{google:assistedQueryStats}{google:searchFieldtrialParameter}{google:searchClient}{google:sourceId}{google:instantExtendedEnabledParameter}ie={inputEncoding}";
+      };
+
+      defaultSearchProviderSuggestURL = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Chromium default search provider url for suggestions.";
+        default = null;
+        example =
+          "https://encrypted.google.com/complete/search?output=chrome&q={searchTerms}";
+      };
+
+      extraOpts = mkOption {
+        type = types.attrs;
+        description = lib.mdDoc ''
+          Extra chromium policy options. A list of available policies
+          can be found in the Chrome Enterprise documentation:
+          <https://cloud.google.com/docs/chrome-enterprise/policies/>
+          Make sure the selected policy is supported on Linux and your browser version.
+        '';
+        default = {};
+        example = literalExpression ''
+          {
+            "BrowserSignin" = 0;
+            "SyncDisabled" = true;
+            "PasswordManagerEnabled" = false;
+            "SpellcheckEnabled" = true;
+            "SpellcheckLanguage" = [
+                                     "de"
+                                     "en-US"
+                                   ];
+          }
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    # for chromium
+    environment.etc."chromium/policies/managed/default.json".text = builtins.toJSON defaultProfile;
+    environment.etc."chromium/policies/managed/extra.json".text = builtins.toJSON cfg.extraOpts;
+    # for google-chrome https://www.chromium.org/administrators/linux-quick-start
+    environment.etc."opt/chrome/policies/managed/default.json".text = builtins.toJSON defaultProfile;
+    environment.etc."opt/chrome/policies/managed/extra.json".text = builtins.toJSON cfg.extraOpts;
+    # for brave
+    environment.etc."brave/policies/managed/default.json".text = builtins.toJSON defaultProfile;
+    environment.etc."brave/policies/managed/extra.json".text = builtins.toJSON cfg.extraOpts;
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/clash-verge.nix b/nixpkgs/nixos/modules/programs/clash-verge.nix
new file mode 100644
index 000000000000..57a1c0377edb
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/clash-verge.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+{
+  options.programs.clash-verge = {
+    enable = lib.mkEnableOption (lib.mdDoc "Clash Verge");
+    autoStart = lib.mkEnableOption (lib.mdDoc "Clash Verge auto launch");
+    tunMode = lib.mkEnableOption (lib.mdDoc "Clash Verge TUN mode");
+  };
+
+  config =
+    let
+      cfg = config.programs.clash-verge;
+    in
+    lib.mkIf cfg.enable {
+
+      environment.systemPackages = [
+        pkgs.clash-verge
+        (lib.mkIf cfg.autoStart (pkgs.makeAutostartItem {
+          name = "clash-verge";
+          package = pkgs.clash-verge;
+        }))
+      ];
+
+      security.wrappers.clash-verge = lib.mkIf cfg.tunMode {
+        owner = "root";
+        group = "root";
+        capabilities = "cap_net_bind_service,cap_net_admin=+ep";
+        source = "${lib.getExe pkgs.clash-verge}";
+      };
+    };
+
+  meta.maintainers = with lib.maintainers; [ zendo ];
+}
diff --git a/nixpkgs/nixos/modules/programs/cnping.nix b/nixpkgs/nixos/modules/programs/cnping.nix
new file mode 100644
index 000000000000..143267fc9a42
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/cnping.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.cnping;
+in
+{
+  options = {
+    programs.cnping = {
+      enable = mkEnableOption (lib.mdDoc "a setcap wrapper for cnping");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.cnping = {
+      source = "${pkgs.cnping}/bin/cnping";
+      capabilities = "cap_net_raw+ep";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.nix b/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.nix
new file mode 100644
index 000000000000..b5c7626bd207
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.nix
@@ -0,0 +1,95 @@
+# This module provides suggestions of packages to install if the user
+# tries to run a missing command in Bash.  This is implemented using a
+# SQLite database that maps program names to Nix package names (e.g.,
+# "pdflatex" is mapped to "tetex").
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.command-not-found;
+  commandNotFound = pkgs.substituteAll {
+    name = "command-not-found";
+    dir = "bin";
+    src = ./command-not-found.pl;
+    isExecutable = true;
+    inherit (cfg) dbPath;
+    perl = pkgs.perl.withPackages (p: [ p.DBDSQLite p.StringShellQuote ]);
+  };
+
+in
+
+{
+  options.programs.command-not-found = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether interactive shells should show which Nix package (if
+        any) provides a missing command.
+      '';
+    };
+
+    dbPath = mkOption {
+      default = "/nix/var/nix/profiles/per-user/root/channels/nixos/programs.sqlite" ;
+      description = lib.mdDoc ''
+        Absolute path to programs.sqlite.
+
+        By default this file will be provided by your channel
+        (nixexprs.tar.xz).
+      '';
+      type = types.path;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.bash.interactiveShellInit =
+      ''
+        # This function is called whenever a command is not found.
+        command_not_found_handle() {
+          local p='${commandNotFound}/bin/command-not-found'
+          if [ -x "$p" ] && [ -f '${cfg.dbPath}' ]; then
+            # Run the helper program.
+            "$p" "$@"
+            # Retry the command if we just installed it.
+            if [ $? = 126 ]; then
+              "$@"
+            else
+              return 127
+            fi
+          else
+            echo "$1: command not found" >&2
+            return 127
+          fi
+        }
+      '';
+
+    programs.zsh.interactiveShellInit =
+      ''
+        # This function is called whenever a command is not found.
+        command_not_found_handler() {
+          local p='${commandNotFound}/bin/command-not-found'
+          if [ -x "$p" ] && [ -f '${cfg.dbPath}' ]; then
+            # Run the helper program.
+            "$p" "$@"
+
+            # Retry the command if we just installed it.
+            if [ $? = 126 ]; then
+              "$@"
+            else
+              return 127
+            fi
+          else
+            # Indicate than there was an error so ZSH falls back to its default handler
+            echo "$1: command not found" >&2
+            return 127
+          fi
+        }
+      '';
+
+    environment.systemPackages = [ commandNotFound ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.pl b/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.pl
new file mode 100644
index 000000000000..72e246c81ae9
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/command-not-found/command-not-found.pl
@@ -0,0 +1,77 @@
+#! @perl@/bin/perl -w
+
+use strict;
+use DBI;
+use DBD::SQLite;
+use String::ShellQuote;
+use Config;
+
+my $program = $ARGV[0];
+
+my $dbPath = "@dbPath@";
+
+my $dbh = DBI->connect("dbi:SQLite:dbname=$dbPath", "", "")
+    or die "cannot open database `$dbPath'";
+$dbh->{RaiseError} = 0;
+$dbh->{PrintError} = 0;
+
+my $system = $ENV{"NIX_SYSTEM"} // $Config{myarchname};
+
+my $res = $dbh->selectall_arrayref(
+    "select package from Programs where system = ? and name = ?",
+    { Slice => {} }, $system, $program);
+
+my $len = !defined $res ? 0 : scalar @$res;
+
+if ($len == 0) {
+    print STDERR "$program: command not found\n";
+} elsif ($len == 1) {
+    my $package = @$res[0]->{package};
+    if ($ENV{"NIX_AUTO_RUN"} // "") {
+        if ($ENV{"NIX_AUTO_RUN_INTERACTIVE"} // "") {
+            while (1) {
+                print STDERR "'$program' from package '$package' will be run, confirm? [yn]: ";
+                chomp(my $comfirm = <STDIN>);
+                if (lc $comfirm eq "n") {
+                    exit 0;
+                } elsif (lc $comfirm eq "y") {
+                    last;
+                }
+            }
+        }
+        exec("nix-shell", "-p", $package, "--run", shell_quote("exec", @ARGV));
+    } else {
+        print STDERR <<EOF;
+The program '$program' is not in your PATH. You can make it available in an
+ephemeral shell by typing:
+  nix-shell -p $package
+EOF
+    }
+} else {
+    if ($ENV{"NIX_AUTO_RUN"} // "") {
+        print STDERR "Select a package that provides '$program':\n";
+        for my $i (0 .. $len - 1) {
+            print STDERR "  [", $i + 1, "]: @$res[$i]->{package}\n";
+        }
+        my $choice = 0;
+        while (1) { # exec will break this loop
+            no warnings "numeric";
+            print STDERR "Your choice [1-${len}]: ";
+            # 0 can be invalid user input like non-number string
+            # so we start from 1
+            $choice = <STDIN> + 0;
+            if (1 <= $choice && $choice <= $len) {
+                exec("nix-shell", "-p", @$res[$choice - 1]->{package},
+                    "--run", shell_quote("exec", @ARGV));
+            }
+        }
+    } else {
+        print STDERR <<EOF;
+The program '$program' is not in your PATH. It is provided by several packages.
+You can make it available in an ephemeral shell by typing one of the following:
+EOF
+        print STDERR "  nix-shell -p $_->{package}\n" foreach @$res;
+    }
+}
+
+exit 127;
diff --git a/nixpkgs/nixos/modules/programs/criu.nix b/nixpkgs/nixos/modules/programs/criu.nix
new file mode 100644
index 000000000000..9f03b0c6431a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/criu.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.criu;
+in {
+
+  options = {
+    programs.criu = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Install {command}`criu` along with necessary kernel options.
+        '';
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "CHECKPOINT_RESTORE")
+    ];
+    boot.kernel.features.criu = true;
+    environment.systemPackages = [ pkgs.criu ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/darling.nix b/nixpkgs/nixos/modules/programs/darling.nix
new file mode 100644
index 000000000000..c4e1c73b5c29
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/darling.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.darling;
+in {
+  options = {
+    programs.darling = {
+      enable = lib.mkEnableOption (lib.mdDoc "Darling, a Darwin/macOS compatibility layer for Linux");
+      package = lib.mkPackageOptionMD pkgs "darling" {};
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    security.wrappers.darling = {
+      source = lib.getExe cfg.package;
+      owner = "root";
+      group = "root";
+      setuid = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/dconf.nix b/nixpkgs/nixos/modules/programs/dconf.nix
new file mode 100644
index 000000000000..cf53658c4fad
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/dconf.nix
@@ -0,0 +1,229 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.dconf;
+
+  # Compile keyfiles to dconf DB
+  compileDconfDb = dir: pkgs.runCommand "dconf-db"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } "dconf compile $out ${dir}";
+
+  # Check if dconf keyfiles are valid
+  checkDconfKeyfiles = dir: pkgs.runCommand "check-dconf-keyfiles"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } ''
+    if [[ -f ${dir} ]]; then
+      echo "dconf keyfiles should be a directory but a file is provided: ${dir}"
+      exit 1
+    fi
+
+    dconf compile db ${dir} || (
+      echo "The dconf keyfiles are invalid: ${dir}"
+      exit 1
+    )
+    cp -R ${dir} $out
+  '';
+
+  mkAllLocks = settings: lib.flatten (
+    lib.mapAttrsToList (k: v: lib.mapAttrsToList (k': _: "/${k}/${k'}") v) settings);
+
+  # Generate dconf DB from dconfDatabase and keyfiles
+  mkDconfDb = val: compileDconfDb (pkgs.symlinkJoin {
+    name = "nixos-generated-dconf-keyfiles";
+    paths = [
+      (pkgs.writeTextDir "nixos-generated-dconf-keyfiles" (lib.generators.toDconfINI val.settings))
+      (pkgs.writeTextDir "locks/nixos-generated-dconf-locks" (lib.concatStringsSep "\n"
+        (if val.lockAll then mkAllLocks val.settings else val.locks)
+      ))
+    ] ++ (map checkDconfKeyfiles val.keyfiles);
+  });
+
+  # Check if a dconf DB file is valid. The dconf cli doesn't return 1 when it can't
+  # open the database file so we have to check if the output is empty.
+  checkDconfDb = file: pkgs.runCommand "check-dconf-db"
+    {
+      nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+    } ''
+    if [[ -d ${file} ]]; then
+      echo "dconf DB should be a file but a directory is provided: ${file}"
+      exit 1
+    fi
+
+    echo "file-db:${file}" > profile
+    DCONF_PROFILE=$(pwd)/profile dconf dump / > output 2> error
+    if [[ ! -s output ]] && [[ -s error ]]; then
+      cat error
+      echo "The dconf DB file is invalid: ${file}"
+      exit 1
+    fi
+
+    cp ${file} $out
+  '';
+
+  # Generate dconf profile
+  mkDconfProfile = name: value:
+    if lib.isDerivation value || lib.isPath value then
+      pkgs.runCommand "dconf-profile" { } ''
+        if [[ -d ${value} ]]; then
+          echo "Dconf profile should be a file but a directory is provided."
+          exit 1
+        fi
+        mkdir -p $out/etc/dconf/profile/
+        cp ${value} $out/etc/dconf/profile/${name}
+      ''
+    else
+      pkgs.writeTextDir "etc/dconf/profile/${name}" (
+        lib.concatMapStrings (x: "${x}\n") ((
+          lib.optional value.enableUserDb "user-db:user"
+        ) ++ (
+          map
+            (value:
+              let
+                db = if lib.isAttrs value && !lib.isDerivation value then mkDconfDb value else checkDconfDb value;
+              in
+              "file-db:${db}")
+            value.databases
+        ))
+      );
+
+  dconfDatabase = with lib.types; submodule {
+    options = {
+      keyfiles = lib.mkOption {
+        type = listOf (oneOf [ path package ]);
+        default = [ ];
+        description = lib.mdDoc "A list of dconf keyfile directories.";
+      };
+      settings = lib.mkOption {
+        type = attrs;
+        default = { };
+        description = lib.mdDoc "An attrset used to generate dconf keyfile.";
+        example = literalExpression ''
+          with lib.gvariant;
+          {
+            "com/raggesilver/BlackBox" = {
+              scrollback-lines = mkUint32 10000;
+              theme-dark = "Tommorow Night";
+            };
+          }
+        '';
+      };
+      locks = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [ ];
+        description = lib.mdDoc ''
+          A list of dconf keys to be lockdown. This doesn't take effect if `lockAll`
+          is set.
+        '';
+        example = literalExpression ''
+          [ "/org/gnome/desktop/background/picture-uri" ]
+        '';
+      };
+      lockAll = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Lockdown all dconf keys in `settings`.";
+      };
+    };
+  };
+
+  dconfProfile = with lib.types; submodule {
+    options = {
+      enableUserDb = lib.mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Add `user-db:user` at the beginning of the profile.";
+      };
+
+      databases = lib.mkOption {
+        type = with lib.types; listOf (oneOf [
+          path
+          package
+          dconfDatabase
+        ]);
+        default = [ ];
+        description = lib.mdDoc ''
+          List of data sources for the profile. An element can be an attrset,
+          or the path of an already compiled database. Each element is converted
+          to a file-db.
+
+          A key is searched from up to down and the first result takes the
+          priority. If a lock for a particular key is installed then the value from
+          the last database in the profile where the key is locked will be used.
+          This can be used to enforce mandatory settings.
+        '';
+      };
+    };
+  };
+
+in
+{
+  options = {
+    programs.dconf = {
+      enable = lib.mkEnableOption (lib.mdDoc "dconf");
+
+      profiles = lib.mkOption {
+        type = with lib.types; attrsOf (oneOf [
+          path
+          package
+          dconfProfile
+        ]);
+        default = { };
+        description = lib.mdDoc ''
+          Attrset of dconf profiles. By default the `user` profile is used which
+          ends up in `/etc/dconf/profile/user`.
+        '';
+        example = lib.literalExpression ''
+          {
+            # A "user" profile with a database
+            user.databases = [
+              {
+                settings = { };
+              }
+            ];
+            # A "bar" profile from a package
+            bar = pkgs.bar-dconf-profile;
+            # A "foo" profile from a path
+            foo = ''${./foo}
+          };
+        '';
+      };
+
+      packages = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [ ];
+        description = lib.mdDoc "A list of packages which provide dconf profiles and databases in {file}`/etc/dconf`.";
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.profiles != { } || cfg.enable) {
+    programs.dconf.packages = lib.mapAttrsToList mkDconfProfile cfg.profiles;
+
+    environment.etc.dconf = lib.mkIf (cfg.packages != [ ]) {
+      source = pkgs.symlinkJoin {
+        name = "dconf-system-config";
+        paths = map (x: "${x}/etc/dconf") cfg.packages;
+        nativeBuildInputs = [ (lib.getBin pkgs.dconf) ];
+        postBuild = ''
+          if test -d $out/db; then
+            dconf update $out/db
+          fi
+        '';
+      };
+    };
+
+    services.dbus.packages = [ pkgs.dconf ];
+
+    systemd.packages = [ pkgs.dconf ];
+
+    # For dconf executable
+    environment.systemPackages = [ pkgs.dconf ];
+
+    environment.sessionVariables = lib.mkIf cfg.enable {
+      # Needed for unwrapped applications
+      GIO_EXTRA_MODULES = [ "${pkgs.dconf.lib}/lib/gio/modules" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/digitalbitbox/default.md b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
new file mode 100644
index 000000000000..9bca14e97ffe
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/digitalbitbox/default.md
@@ -0,0 +1,47 @@
+# Digital Bitbox {#module-programs-digitalbitbox}
+
+Digital Bitbox is a hardware wallet and second-factor authenticator.
+
+The `digitalbitbox` programs module may be installed by setting
+`programs.digitalbitbox` to `true` in a manner similar to
+```
+programs.digitalbitbox.enable = true;
+```
+and bundles the `digitalbitbox` package (see [](#sec-digitalbitbox-package)),
+which contains the `dbb-app` and `dbb-cli` binaries, along with the hardware
+module (see [](#sec-digitalbitbox-hardware-module)) which sets up the necessary
+udev rules to access the device.
+
+Enabling the digitalbitbox module is pretty much the easiest way to get a
+Digital Bitbox device working on your system.
+
+For more information, see <https://digitalbitbox.com/start_linux>.
+
+## Package {#sec-digitalbitbox-package}
+
+The binaries, `dbb-app` (a GUI tool) and `dbb-cli` (a CLI tool), are available
+through the `digitalbitbox` package which could be installed as follows:
+```
+environment.systemPackages = [
+  pkgs.digitalbitbox
+];
+```
+
+## Hardware {#sec-digitalbitbox-hardware-module}
+
+The digitalbitbox hardware package enables the udev rules for Digital Bitbox
+devices and may be installed as follows:
+```
+hardware.digitalbitbox.enable = true;
+```
+
+In order to alter the udev rules, one may provide different values for the
+`udevRule51` and `udevRule52` attributes by means of overriding as follows:
+```
+programs.digitalbitbox = {
+  enable = true;
+  package = pkgs.digitalbitbox.override {
+    udevRule51 = "something else";
+  };
+};
+```
diff --git a/nixpkgs/nixos/modules/programs/digitalbitbox/default.nix b/nixpkgs/nixos/modules/programs/digitalbitbox/default.nix
new file mode 100644
index 000000000000..5ee6cdafe63a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/digitalbitbox/default.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.digitalbitbox;
+in
+
+{
+  options.programs.digitalbitbox = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Installs the Digital Bitbox application and enables the complementary hardware module.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.digitalbitbox;
+      defaultText = literalExpression "pkgs.digitalbitbox";
+      description = lib.mdDoc "The Digital Bitbox package to use. This can be used to install a package with udev rules that differ from the defaults.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    hardware.digitalbitbox = {
+      enable = true;
+      package = cfg.package;
+    };
+  };
+
+  meta = {
+    doc = ./default.md;
+    maintainers = with lib.maintainers; [ vidbina ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/direnv.nix b/nixpkgs/nixos/modules/programs/direnv.nix
new file mode 100644
index 000000000000..2566fa7699bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/direnv.nix
@@ -0,0 +1,137 @@
+{
+  lib,
+  config,
+  pkgs,
+  ...
+}: let
+  cfg = config.programs.direnv;
+in {
+  options.programs.direnv = {
+
+    enable = lib.mkEnableOption (lib.mdDoc ''
+      direnv integration. Takes care of both installation and
+      setting up the sourcing of the shell. Additionally enables nix-direnv
+      integration. Note that you need to logout and login for this change to apply
+    '');
+
+    package = lib.mkPackageOptionMD pkgs "direnv" {};
+
+    direnvrcExtra = lib.mkOption {
+      type = lib.types.lines;
+      default = "";
+      example = ''
+        export FOO="foo"
+        echo "loaded direnv!"
+      '';
+      description = lib.mdDoc ''
+        Extra lines to append to the sourced direnvrc
+      '';
+    };
+
+    silent = lib.mkEnableOption (lib.mdDoc ''
+      the hiding of direnv logging
+    '');
+
+    loadInNixShell =
+      lib.mkEnableOption (lib.mdDoc ''
+        loading direnv in `nix-shell` `nix shell` or `nix develop`
+      '')
+      // {
+        default = true;
+      };
+
+    nix-direnv = {
+      enable =
+        (lib.mkEnableOption (lib.mdDoc ''
+          a faster, persistent implementation of use_nix and use_flake, to replace the built-in one
+        ''))
+        // {
+          default = true;
+        };
+
+      package = lib.mkPackageOptionMD pkgs "nix-direnv" {};
+    };
+  };
+
+  imports = [
+    (lib.mkRemovedOptionModule ["programs" "direnv" "persistDerivations"] "persistDerivations was removed as it is no longer necessary")
+  ];
+
+  config = lib.mkIf cfg.enable {
+
+    programs = {
+      zsh.interactiveShellInit = ''
+        if ${lib.boolToString cfg.loadInNixShell} || printenv PATH | grep -vqc '/nix/store'; then
+         eval "$(${lib.getExe cfg.package} hook zsh)"
+        fi
+      '';
+
+      #$NIX_GCROOT for "nix develop" https://github.com/NixOS/nix/blob/6db66ebfc55769edd0c6bc70fcbd76246d4d26e0/src/nix/develop.cc#L530
+      #$IN_NIX_SHELL for "nix-shell"
+      bash.interactiveShellInit = ''
+        if ${lib.boolToString cfg.loadInNixShell} || [ -z "$IN_NIX_SHELL$NIX_GCROOT$(printenv PATH | grep '/nix/store')" ] ; then
+         eval "$(${lib.getExe cfg.package} hook bash)"
+        fi
+      '';
+
+      fish.interactiveShellInit = ''
+        if ${lib.boolToString cfg.loadInNixShell};
+        or printenv PATH | grep -vqc '/nix/store';
+         ${lib.getExe cfg.package} hook fish | source
+        end
+      '';
+    };
+
+    environment = {
+      systemPackages =
+        if cfg.loadInNixShell then [cfg.package]
+        else [
+          #direnv has a fish library which sources direnv for some reason
+          (cfg.package.overrideAttrs (old: {
+            installPhase =
+              (old.installPhase or "")
+              + ''
+                rm -rf $out/share/fish
+              '';
+          }))
+        ];
+
+      variables = {
+        DIRENV_CONFIG = "/etc/direnv";
+        DIRENV_LOG_FORMAT = lib.mkIf cfg.silent "";
+      };
+
+      etc = {
+        "direnv/direnvrc".text = ''
+          ${lib.optionalString cfg.nix-direnv.enable ''
+            #Load nix-direnv
+            source ${cfg.nix-direnv.package}/share/nix-direnv/direnvrc
+          ''}
+
+           #Load direnvrcExtra
+           ${cfg.direnvrcExtra}
+
+           #Load user-configuration if present (~/.direnvrc or ~/.config/direnv/direnvrc)
+           direnv_config_dir_home="''${DIRENV_CONFIG_HOME:-''${XDG_CONFIG_HOME:-$HOME/.config}/direnv}"
+           if [[ -f $direnv_config_dir_home/direnvrc ]]; then
+             source "$direnv_config_dir_home/direnvrc" >&2
+           elif [[ -f $HOME/.direnvrc ]]; then
+             source "$HOME/.direnvrc" >&2
+           fi
+
+           unset direnv_config_dir_home
+        '';
+
+        "direnv/lib/zz-user.sh".text = ''
+          direnv_config_dir_home="''${DIRENV_CONFIG_HOME:-''${XDG_CONFIG_HOME:-$HOME/.config}/direnv}"
+
+          for lib in "$direnv_config_dir_home/lib/"*.sh; do
+            source "$lib"
+          done
+
+          unset direnv_config_dir_home
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/dmrconfig.nix b/nixpkgs/nixos/modules/programs/dmrconfig.nix
new file mode 100644
index 000000000000..20a0dc9556da
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/dmrconfig.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.dmrconfig;
+
+in {
+  meta.maintainers = with maintainers; [ ];
+
+  ###### interface
+  options = {
+    programs.dmrconfig = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to configure system to enable use of dmrconfig. This
+          enables the required udev rules and installs the program.
+        '';
+        relatedPackages = [ "dmrconfig" ];
+      };
+
+      package = mkOption {
+        default = pkgs.dmrconfig;
+        type = types.package;
+        defaultText = literalExpression "pkgs.dmrconfig";
+        description = lib.mdDoc "dmrconfig derivation to use";
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/droidcam.nix b/nixpkgs/nixos/modules/programs/droidcam.nix
new file mode 100644
index 000000000000..c9b4457d1d18
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/droidcam.nix
@@ -0,0 +1,16 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+{
+  options.programs.droidcam = {
+    enable = mkEnableOption (lib.mdDoc "DroidCam client");
+  };
+
+  config = lib.mkIf config.programs.droidcam.enable {
+    environment.systemPackages = [ pkgs.droidcam ];
+
+    boot.extraModulePackages = [ config.boot.kernelPackages.v4l2loopback ];
+    boot.kernelModules = [ "v4l2loopback" "snd-aloop" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/ecryptfs.nix b/nixpkgs/nixos/modules/programs/ecryptfs.nix
new file mode 100644
index 000000000000..63c1a3ad4419
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/ecryptfs.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.ecryptfs;
+
+in {
+  options.programs.ecryptfs = {
+    enable = mkEnableOption (lib.mdDoc "ecryptfs setuid mount wrappers");
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers = {
+
+      "mount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/mount.ecryptfs_private";
+      };
+      "umount.ecryptfs_private" = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.ecryptfs}/bin/umount.ecryptfs_private";
+      };
+
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/environment.nix b/nixpkgs/nixos/modules/programs/environment.nix
new file mode 100644
index 000000000000..6cf9257d035a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/environment.nix
@@ -0,0 +1,61 @@
+# This module defines a standard configuration for NixOS global environment.
+
+# Most of the stuff here should probably be moved elsewhere sometime.
+
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.environment;
+
+in
+
+{
+
+  config = {
+
+    environment.variables =
+      { NIXPKGS_CONFIG = "/etc/nix/nixpkgs-config.nix";
+        # note: many programs exec() this directly, so default options for less must not
+        # be specified here; do so in the default value of programs.less.envVariables instead
+        PAGER = mkDefault "less";
+        EDITOR = mkDefault "nano";
+      };
+
+    # since we set PAGER to this above, make sure it's installed
+    programs.less.enable = true;
+
+    environment.profiles = mkAfter
+      [ "/nix/var/nix/profiles/default"
+        "/run/current-system/sw"
+      ];
+
+    environment.sessionVariables =
+      {
+        XDG_CONFIG_DIRS = [ "/etc/xdg" ]; # needs to be before profile-relative paths to allow changes through environment.etc
+      };
+
+    # TODO: move most of these elsewhere
+    environment.profileRelativeSessionVariables =
+      { PATH = [ "/bin" ];
+        INFOPATH = [ "/info" "/share/info" ];
+        QTWEBKIT_PLUGIN_PATH = [ "/lib/mozilla/plugins/" ];
+        GTK_PATH = [ "/lib/gtk-2.0" "/lib/gtk-3.0" "/lib/gtk-4.0" ];
+        XDG_CONFIG_DIRS = [ "/etc/xdg" ];
+        XDG_DATA_DIRS = [ "/share" ];
+        LIBEXEC_PATH = [ "/lib/libexec" ];
+      };
+
+    environment.pathsToLink = [ "/lib/gtk-2.0" "/lib/gtk-3.0" "/lib/gtk-4.0" ];
+
+    environment.extraInit =
+      ''
+         export NIX_USER_PROFILE_DIR="/nix/var/nix/profiles/per-user/$USER"
+         export NIX_PROFILES="${concatStringsSep " " (reverseList cfg.profiles)}"
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/evince.nix b/nixpkgs/nixos/modules/programs/evince.nix
new file mode 100644
index 000000000000..9ed5ea0feb04
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/evince.nix
@@ -0,0 +1,51 @@
+# Evince.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.programs.evince;
+
+in {
+
+  # Added 2019-08-09
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "evince" "enable" ]
+      [ "programs" "evince" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    programs.evince = {
+
+      enable = mkEnableOption
+        (lib.mdDoc "Evince, the GNOME document viewer");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.evince;
+        defaultText = literalExpression "pkgs.evince";
+        description = lib.mdDoc "Evince derivation to use.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.programs.evince.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/extra-container.nix b/nixpkgs/nixos/modules/programs/extra-container.nix
new file mode 100644
index 000000000000..5e717c4d8223
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/extra-container.nix
@@ -0,0 +1,17 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.programs.extra-container;
+in {
+  options = {
+    programs.extra-container.enable = mkEnableOption (lib.mdDoc ''
+      extra-container, a tool for running declarative NixOS containers
+      without host system rebuilds
+    '');
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.extra-container ];
+    boot.extraSystemdUnitPaths = [ "/etc/systemd-mutable/system" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/feedbackd.nix b/nixpkgs/nixos/modules/programs/feedbackd.nix
new file mode 100644
index 000000000000..e3fde947a3df
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/feedbackd.nix
@@ -0,0 +1,33 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.feedbackd;
+in {
+  options = {
+    programs.feedbackd = {
+      enable = mkEnableOption (lib.mdDoc ''
+        the feedbackd D-BUS service and udev rules.
+
+        Your user needs to be in the `feedbackd` group to trigger effects
+      '');
+      package = mkOption {
+        description = lib.mdDoc ''
+          Which feedbackd package to use.
+        '';
+        type = types.package;
+        default = pkgs.feedbackd;
+        defaultText = literalExpression "pkgs.feedbackd";
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+
+    users.groups.feedbackd = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/file-roller.nix b/nixpkgs/nixos/modules/programs/file-roller.nix
new file mode 100644
index 000000000000..ca0c4d1b2a2a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/file-roller.nix
@@ -0,0 +1,48 @@
+# File Roller.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.programs.file-roller;
+
+in {
+
+  # Added 2019-08-09
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "file-roller" "enable" ]
+      [ "programs" "file-roller" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    programs.file-roller = {
+
+      enable = mkEnableOption (lib.mdDoc "File Roller, an archive manager for GNOME");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.gnome.file-roller;
+        defaultText = literalExpression "pkgs.gnome.file-roller";
+        description = lib.mdDoc "File Roller derivation to use.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/firefox.nix b/nixpkgs/nixos/modules/programs/firefox.nix
new file mode 100644
index 000000000000..1edf935d1649
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/firefox.nix
@@ -0,0 +1,303 @@
+{ pkgs, config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.firefox;
+
+  policyFormat = pkgs.formats.json { };
+
+  organisationInfo = ''
+    When this option is in use, Firefox will inform you that "your browser
+    is managed by your organisation". That message appears because NixOS
+    installs what you have declared here such that it cannot be overridden
+    through the user interface. It does not mean that someone else has been
+    given control of your browser, unless of course they also control your
+    NixOS configuration.
+  '';
+
+  # deprecated per-native-messaging-host options
+  nmhOptions = {
+    browserpass = {
+      name = "Browserpass";
+      package = pkgs.browserpass;
+    };
+    bukubrow = {
+      name = "Bukubrow";
+      package = pkgs.bukubrow;
+    };
+    euwebid = {
+      name = "Web eID";
+      package = pkgs.web-eid-app;
+    };
+    ff2mpv = {
+      name = "ff2mpv";
+      package = pkgs.ff2mpv;
+    };
+    fxCast = {
+      name = "fx_cast";
+      package = pkgs.fx-cast-bridge;
+    };
+    gsconnect = {
+      name = "GSConnect";
+      package = pkgs.gnomeExtensions.gsconnect;
+    };
+    jabref = {
+      name = "JabRef";
+      package = pkgs.jabref;
+    };
+    passff = {
+      name = "PassFF";
+      package = pkgs.passff-host;
+    };
+    tridactyl = {
+      name = "Tridactyl";
+      package = pkgs.tridactyl-native;
+    };
+    ugetIntegrator = {
+      name = "Uget Integrator";
+      package = pkgs.uget-integrator;
+    };
+  };
+in
+{
+  options.programs.firefox = {
+    enable = mkEnableOption (mdDoc "the Firefox web browser");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.firefox;
+      description = mdDoc "Firefox package to use.";
+      defaultText = literalExpression "pkgs.firefox";
+      relatedPackages = [
+        "firefox"
+        "firefox-beta-bin"
+        "firefox-bin"
+        "firefox-devedition-bin"
+        "firefox-esr"
+      ];
+    };
+
+    wrapperConfig = mkOption {
+      type = types.attrs;
+      default = {};
+      description = mdDoc "Arguments to pass to Firefox wrapper";
+    };
+
+    policies = mkOption {
+      type = policyFormat.type;
+      default = { };
+      description = mdDoc ''
+        Group policies to install.
+
+        See [Mozilla's documentation](https://mozilla.github.io/policy-templates/)
+        for a list of available options.
+
+        This can be used to install extensions declaratively! Check out the
+        documentation of the `ExtensionSettings` policy for details.
+
+        ${organisationInfo}
+      '';
+    };
+
+    preferences = mkOption {
+      type = with types; attrsOf (oneOf [ bool int str ]);
+      default = { };
+      description = mdDoc ''
+        Preferences to set from `about:config`.
+
+        Some of these might be able to be configured more ergonomically
+        using policies.
+
+        ${organisationInfo}
+      '';
+    };
+
+    preferencesStatus = mkOption {
+      type = types.enum [ "default" "locked" "user" "clear" ];
+      default = "locked";
+      description = mdDoc ''
+        The status of `firefox.preferences`.
+
+        `status` can assume the following values:
+        - `"default"`: Preferences appear as default.
+        - `"locked"`: Preferences appear as default and can't be changed.
+        - `"user"`: Preferences appear as changed.
+        - `"clear"`: Value has no effect. Resets to factory defaults on each startup.
+      '';
+    };
+
+    languagePacks = mkOption {
+      # Available languages can be found in https://releases.mozilla.org/pub/firefox/releases/${cfg.package.version}/linux-x86_64/xpi/
+      type = types.listOf (types.enum ([
+        "ach"
+        "af"
+        "an"
+        "ar"
+        "ast"
+        "az"
+        "be"
+        "bg"
+        "bn"
+        "br"
+        "bs"
+        "ca-valencia"
+        "ca"
+        "cak"
+        "cs"
+        "cy"
+        "da"
+        "de"
+        "dsb"
+        "el"
+        "en-CA"
+        "en-GB"
+        "en-US"
+        "eo"
+        "es-AR"
+        "es-CL"
+        "es-ES"
+        "es-MX"
+        "et"
+        "eu"
+        "fa"
+        "ff"
+        "fi"
+        "fr"
+        "fy-NL"
+        "ga-IE"
+        "gd"
+        "gl"
+        "gn"
+        "gu-IN"
+        "he"
+        "hi-IN"
+        "hr"
+        "hsb"
+        "hu"
+        "hy-AM"
+        "ia"
+        "id"
+        "is"
+        "it"
+        "ja"
+        "ka"
+        "kab"
+        "kk"
+        "km"
+        "kn"
+        "ko"
+        "lij"
+        "lt"
+        "lv"
+        "mk"
+        "mr"
+        "ms"
+        "my"
+        "nb-NO"
+        "ne-NP"
+        "nl"
+        "nn-NO"
+        "oc"
+        "pa-IN"
+        "pl"
+        "pt-BR"
+        "pt-PT"
+        "rm"
+        "ro"
+        "ru"
+        "sco"
+        "si"
+        "sk"
+        "sl"
+        "son"
+        "sq"
+        "sr"
+        "sv-SE"
+        "szl"
+        "ta"
+        "te"
+        "th"
+        "tl"
+        "tr"
+        "trs"
+        "uk"
+        "ur"
+        "uz"
+        "vi"
+        "xh"
+        "zh-CN"
+        "zh-TW"
+      ]));
+      default = [ ];
+      description = mdDoc ''
+        The language packs to install.
+      '';
+    };
+
+    autoConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = mdDoc ''
+        AutoConfig files can be used to set and lock preferences that are not covered
+        by the policies.json for Mac and Linux. This method can be used to automatically
+        change user preferences or prevent the end user from modifiying specific
+        preferences by locking them. More info can be found in https://support.mozilla.org/en-US/kb/customizing-firefox-using-autoconfig.
+      '';
+    };
+
+    nativeMessagingHosts = ({
+      packages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = mdDoc ''
+          Additional packages containing native messaging hosts that should be made available to Firefox extensions.
+        '';
+      };
+    }) // (mapAttrs (k: v: mkEnableOption (mdDoc "${v.name} support")) nmhOptions);
+  };
+
+  config = let
+    forEachEnabledNmh = fn: flatten (mapAttrsToList (k: v: lib.optional cfg.nativeMessagingHosts.${k} (fn k v)) nmhOptions);
+  in mkIf cfg.enable {
+    warnings = forEachEnabledNmh (k: v:
+      "The `programs.firefox.nativeMessagingHosts.${k}` option is deprecated, " +
+      "please add `${v.package.pname}` to `programs.firefox.nativeMessagingHosts.packages` instead."
+    );
+    programs.firefox.nativeMessagingHosts.packages = forEachEnabledNmh (_: v: v.package);
+
+    environment.systemPackages = [
+      (cfg.package.override (old: {
+        extraPrefsFiles = old.extraPrefsFiles or [] ++ [(pkgs.writeText "firefox-autoconfig.js" cfg.autoConfig)];
+        nativeMessagingHosts = old.nativeMessagingHosts or [] ++ cfg.nativeMessagingHosts.packages;
+        cfg = (old.cfg or {}) // cfg.wrapperConfig;
+      }))
+    ];
+
+    environment.etc =
+      let
+        policiesJSON = policyFormat.generate "firefox-policies.json" { inherit (cfg) policies; };
+      in
+      mkIf (cfg.policies != { }) {
+        "firefox/policies/policies.json".source = "${policiesJSON}";
+      };
+
+    # Preferences are converted into a policy
+    programs.firefox.policies = {
+      Preferences = (mapAttrs
+        (_: value: { Value = value; Status = cfg.preferencesStatus; })
+        cfg.preferences);
+      ExtensionSettings = listToAttrs (map
+        (lang: nameValuePair
+          "langpack-${lang}@firefox.mozilla.org"
+          {
+            installation_mode = "normal_installed";
+            install_url = "https://releases.mozilla.org/pub/firefox/releases/${cfg.package.version}/linux-x86_64/xpi/${lang}.xpi";
+          }
+        )
+        cfg.languagePacks);
+    };
+  };
+
+  meta.maintainers = with maintainers; [ danth ];
+}
diff --git a/nixpkgs/nixos/modules/programs/firejail.nix b/nixpkgs/nixos/modules/programs/firejail.nix
new file mode 100644
index 000000000000..6f79c13d94b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/firejail.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.firejail;
+
+  wrappedBins = pkgs.runCommand "firejail-wrapped-binaries"
+    { preferLocalBuild = true;
+      allowSubstitutes = false;
+      # take precedence over non-firejailed versions
+      meta.priority = -1;
+    }
+    ''
+      mkdir -p $out/bin
+      mkdir -p $out/share/applications
+      ${lib.concatStringsSep "\n" (lib.mapAttrsToList (command: value:
+      let
+        opts = if builtins.isAttrs value
+        then value
+        else { executable = value; desktop = null; profile = null; extraArgs = []; };
+        args = lib.escapeShellArgs (
+          opts.extraArgs
+          ++ (optional (opts.profile != null) "--profile=${toString opts.profile}")
+        );
+      in
+      ''
+        cat <<_EOF >$out/bin/${command}
+        #! ${pkgs.runtimeShell} -e
+        exec /run/wrappers/bin/firejail ${args} -- ${toString opts.executable} "\$@"
+        _EOF
+        chmod 0755 $out/bin/${command}
+
+        ${lib.optionalString (opts.desktop != null) ''
+          substitute ${opts.desktop} $out/share/applications/$(basename ${opts.desktop}) \
+            --replace ${opts.executable} $out/bin/${command}
+        ''}
+      '') cfg.wrappedBinaries)}
+    '';
+
+in {
+  options.programs.firejail = {
+    enable = mkEnableOption (lib.mdDoc "firejail");
+
+    wrappedBinaries = mkOption {
+      type = types.attrsOf (types.either types.path (types.submodule {
+        options = {
+          executable = mkOption {
+            type = types.path;
+            description = lib.mdDoc "Executable to run sandboxed";
+            example = literalExpression ''"''${lib.getBin pkgs.firefox}/bin/firefox"'';
+          };
+          desktop = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mkDoc ".desktop file to modify. Only necessary if it uses the absolute path to the executable.";
+            example = literalExpression ''"''${pkgs.firefox}/share/applications/firefox.desktop"'';
+          };
+          profile = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc "Profile to use";
+            example = literalExpression ''"''${pkgs.firejail}/etc/firejail/firefox.profile"'';
+          };
+          extraArgs = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc "Extra arguments to pass to firejail";
+            example = [ "--private=~/.firejail_home" ];
+          };
+        };
+      }));
+      default = {};
+      example = literalExpression ''
+        {
+          firefox = {
+            executable = "''${lib.getBin pkgs.firefox}/bin/firefox";
+            profile = "''${pkgs.firejail}/etc/firejail/firefox.profile";
+          };
+          mpv = {
+            executable = "''${lib.getBin pkgs.mpv}/bin/mpv";
+            profile = "''${pkgs.firejail}/etc/firejail/mpv.profile";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Wrap the binaries in firejail and place them in the global path.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.firejail =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.firejail}/bin/firejail";
+      };
+
+    environment.systemPackages = [ pkgs.firejail ] ++ [ wrappedBins ];
+  };
+
+  meta.maintainers = with maintainers; [ peterhoeg ];
+}
diff --git a/nixpkgs/nixos/modules/programs/fish.nix b/nixpkgs/nixos/modules/programs/fish.nix
new file mode 100644
index 000000000000..a4c20560bc9b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/fish.nix
@@ -0,0 +1,317 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfge = config.environment;
+
+  cfg = config.programs.fish;
+
+  fishAbbrs = concatStringsSep "\n" (
+    mapAttrsFlatten (k: v: "abbr -ag ${k} ${escapeShellArg v}")
+      cfg.shellAbbrs
+  );
+
+  fishAliases = concatStringsSep "\n" (
+    mapAttrsFlatten (k: v: "alias ${k} ${escapeShellArg v}")
+      (filterAttrs (k: v: v != null) cfg.shellAliases)
+  );
+
+  envShellInit = pkgs.writeText "shellInit" cfge.shellInit;
+
+  envLoginShellInit = pkgs.writeText "loginShellInit" cfge.loginShellInit;
+
+  envInteractiveShellInit = pkgs.writeText "interactiveShellInit" cfge.interactiveShellInit;
+
+  sourceEnv = file:
+  if cfg.useBabelfish then
+    "source /etc/fish/${file}.fish"
+  else
+    ''
+      set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $fish_function_path
+      fenv source /etc/fish/foreign-env/${file} > /dev/null
+      set -e fish_function_path[1]
+    '';
+
+  babelfishTranslate = path: name:
+    pkgs.runCommandLocal "${name}.fish" {
+      nativeBuildInputs = [ pkgs.babelfish ];
+    } "babelfish < ${path} > $out;";
+
+in
+
+{
+
+  options = {
+
+    programs.fish = {
+
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to configure fish as an interactive shell.
+        '';
+        type = types.bool;
+      };
+
+      useBabelfish = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, the configured environment will be translated to native fish using [babelfish](https://github.com/bouk/babelfish).
+          Otherwise, [foreign-env](https://github.com/oh-my-fish/plugin-foreign-env) will be used.
+        '';
+      };
+
+      vendor.config.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether fish should source configuration snippets provided by other packages.
+        '';
+      };
+
+      vendor.completions.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether fish should use completion files provided by other packages.
+        '';
+      };
+
+      vendor.functions.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether fish should autoload fish functions provided by other packages.
+        '';
+      };
+
+      shellAbbrs = mkOption {
+        default = {};
+        example = {
+          gco = "git checkout";
+          npu = "nix-prefetch-url";
+        };
+        description = lib.mdDoc ''
+          Set of fish abbreviations.
+        '';
+        type = with types; attrsOf str;
+      };
+
+      shellAliases = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Set of aliases for fish shell, which overrides {option}`environment.shellAliases`.
+          See {option}`environment.shellAliases` for an option format description.
+        '';
+        type = with types; attrsOf (nullOr (either str path));
+      };
+
+      shellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during fish shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      loginShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during fish login shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      interactiveShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during interactive fish shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      promptInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code used to initialise fish prompt.
+        '';
+        type = types.lines;
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    programs.fish.shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
+
+    # Required for man completions
+    documentation.man.generateCaches = lib.mkDefault true;
+
+    environment = mkMerge [
+      (mkIf cfg.useBabelfish
+      {
+        etc."fish/setEnvironment.fish".source = babelfishTranslate config.system.build.setEnvironment "setEnvironment";
+        etc."fish/shellInit.fish".source = babelfishTranslate envShellInit "shellInit";
+        etc."fish/loginShellInit.fish".source = babelfishTranslate envLoginShellInit "loginShellInit";
+        etc."fish/interactiveShellInit.fish".source = babelfishTranslate envInteractiveShellInit "interactiveShellInit";
+     })
+
+      (mkIf (!cfg.useBabelfish)
+      {
+        etc."fish/foreign-env/shellInit".source = envShellInit;
+        etc."fish/foreign-env/loginShellInit".source = envLoginShellInit;
+        etc."fish/foreign-env/interactiveShellInit".source = envInteractiveShellInit;
+      })
+
+      {
+        etc."fish/nixos-env-preinit.fish".text =
+        if cfg.useBabelfish
+        then ''
+          # source the NixOS environment config
+          if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
+            source /etc/fish/setEnvironment.fish
+          end
+        ''
+        else ''
+          # This happens before $__fish_datadir/config.fish sets fish_function_path, so it is currently
+          # unset. We set it and then completely erase it, leaving its configuration to $__fish_datadir/config.fish
+          set fish_function_path ${pkgs.fishPlugins.foreign-env}/share/fish/vendor_functions.d $__fish_datadir/functions
+
+          # source the NixOS environment config
+          if [ -z "$__NIXOS_SET_ENVIRONMENT_DONE" ]
+            fenv source ${config.system.build.setEnvironment}
+          end
+
+          # clear fish_function_path so that it will be correctly set when we return to $__fish_datadir/config.fish
+          set -e fish_function_path
+        '';
+      }
+
+      {
+        etc."fish/config.fish".text = ''
+        # /etc/fish/config.fish: DO NOT EDIT -- this file has been generated automatically.
+
+        # if we haven't sourced the general config, do it
+        if not set -q __fish_nixos_general_config_sourced
+          ${sourceEnv "shellInit"}
+
+          ${cfg.shellInit}
+
+          # and leave a note so we don't source this config section again from
+          # this very shell (children will source the general config anew)
+          set -g __fish_nixos_general_config_sourced 1
+        end
+
+        # if we haven't sourced the login config, do it
+        status is-login; and not set -q __fish_nixos_login_config_sourced
+        and begin
+          ${sourceEnv "loginShellInit"}
+
+          ${cfg.loginShellInit}
+
+          # and leave a note so we don't source this config section again from
+          # this very shell (children will source the general config anew)
+          set -g __fish_nixos_login_config_sourced 1
+        end
+
+        # if we haven't sourced the interactive config, do it
+        status is-interactive; and not set -q __fish_nixos_interactive_config_sourced
+        and begin
+          ${fishAbbrs}
+          ${fishAliases}
+
+          ${sourceEnv "interactiveShellInit"}
+
+          ${cfg.promptInit}
+          ${cfg.interactiveShellInit}
+
+          # and leave a note so we don't source this config section again from
+          # this very shell (children will source the general config anew,
+          # allowing configuration changes in, e.g, aliases, to propagate)
+          set -g __fish_nixos_interactive_config_sourced 1
+        end
+      '';
+      }
+
+      {
+        etc."fish/generated_completions".source =
+        let
+          patchedGenerator = pkgs.stdenv.mkDerivation {
+            name = "fish_patched-completion-generator";
+            srcs = [
+              "${pkgs.fish}/share/fish/tools/create_manpage_completions.py"
+              "${pkgs.fish}/share/fish/tools/deroff.py"
+            ];
+            unpackCmd = "cp $curSrc $(basename $curSrc)";
+            sourceRoot = ".";
+            patches = [ ./fish_completion-generator.patch ]; # to prevent collisions of identical completion files
+            dontBuild = true;
+            installPhase = ''
+              mkdir -p $out
+              cp * $out/
+            '';
+            preferLocalBuild = true;
+            allowSubstitutes = false;
+          };
+          generateCompletions = package: pkgs.runCommandLocal
+            ( with lib.strings; let
+                storeLength = stringLength storeDir + 34; # Nix' StorePath::HashLen + 2 for the separating slash and dash
+                pathName = substring storeLength (stringLength package - storeLength) package;
+              in (package.name or pathName) + "_fish-completions")
+            ( { inherit package; } //
+              optionalAttrs (package ? meta.priority) { meta.priority = package.meta.priority; })
+            ''
+              mkdir -p $out
+              if [ -d $package/share/man ]; then
+                find $package/share/man -type f | xargs ${pkgs.python3.pythonOnBuildForHost.interpreter} ${patchedGenerator}/create_manpage_completions.py --directory $out >/dev/null
+              fi
+            '';
+        in
+          pkgs.buildEnv {
+            name = "system_fish-completions";
+            ignoreCollisions = true;
+            paths = map generateCompletions config.environment.systemPackages;
+          };
+      }
+
+      # include programs that bring their own completions
+      {
+        pathsToLink = []
+        ++ optional cfg.vendor.config.enable "/share/fish/vendor_conf.d"
+        ++ optional cfg.vendor.completions.enable "/share/fish/vendor_completions.d"
+        ++ optional cfg.vendor.functions.enable "/share/fish/vendor_functions.d";
+      }
+
+      { systemPackages = [ pkgs.fish ]; }
+
+      {
+        shells = [
+          "/run/current-system/sw/bin/fish"
+          "${pkgs.fish}/bin/fish"
+        ];
+      }
+    ];
+
+    programs.fish.interactiveShellInit = ''
+      # add completions generated by NixOS to $fish_complete_path
+      begin
+        # joins with null byte to accommodate all characters in paths, then respectively gets all paths before (exclusive) / after (inclusive) the first one including "generated_completions",
+        # splits by null byte, and then removes all empty lines produced by using 'string'
+        set -l prev (string join0 $fish_complete_path | string match --regex "^.*?(?=\x00[^\x00]*generated_completions.*)" | string split0 | string match -er ".")
+        set -l post (string join0 $fish_complete_path | string match --regex "[^\x00]*generated_completions.*" | string split0 | string match -er ".")
+        set fish_complete_path $prev "/etc/fish/generated_completions" $post
+      end
+      # prevent fish from generating completions on first run
+      if not test -d $__fish_user_data_dir/generated_completions
+        ${pkgs.coreutils}/bin/mkdir $__fish_user_data_dir/generated_completions
+      end
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/fish_completion-generator.patch b/nixpkgs/nixos/modules/programs/fish_completion-generator.patch
new file mode 100644
index 000000000000..fa207e484c99
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/fish_completion-generator.patch
@@ -0,0 +1,14 @@
+--- a/create_manpage_completions.py
++++ b/create_manpage_completions.py
+@@ -879,10 +879,6 @@ def parse_manpage_at_path(manpage_path, output_directory):
+                 )
+                 return False
+ 
+-        # Output the magic word Autogenerated so we can tell if we can overwrite this
+-        built_command_output.insert(
+-            0, "# " + CMDNAME + "\n# Autogenerated from man page " + manpage_path
+-        )
+         # built_command_output.insert(2, "# using " + parser.__class__.__name__) # XXX MISATTRIBUTES THE CULPABLE PARSER! Was really using Type2 but reporting TypeDeroffManParser
+ 
+         for line in built_command_output:
+
diff --git a/nixpkgs/nixos/modules/programs/flashrom.nix b/nixpkgs/nixos/modules/programs/flashrom.nix
new file mode 100644
index 000000000000..9f8faff14e47
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/flashrom.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.flashrom;
+in
+{
+  options.programs.flashrom = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Installs flashrom and configures udev rules for programmers
+        used by flashrom. Grants access to users in the "flashrom"
+        group.
+      '';
+    };
+    package = mkPackageOptionMD pkgs "flashrom" { };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/flexoptix-app.nix b/nixpkgs/nixos/modules/programs/flexoptix-app.nix
new file mode 100644
index 000000000000..2524e7ba4d58
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/flexoptix-app.nix
@@ -0,0 +1,25 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.flexoptix-app;
+in {
+  options = {
+    programs.flexoptix-app = {
+      enable = mkEnableOption (lib.mdDoc "FLEXOPTIX app + udev rules");
+
+      package = mkOption {
+        description = lib.mdDoc "FLEXOPTIX app package to use";
+        type = types.package;
+        default = pkgs.flexoptix-app;
+        defaultText = literalExpression "pkgs.flexoptix-app";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/freetds.nix b/nixpkgs/nixos/modules/programs/freetds.nix
new file mode 100644
index 000000000000..98274fa9b562
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/freetds.nix
@@ -0,0 +1,61 @@
+# Global configuration for freetds environment.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.environment.freetds;
+
+in
+{
+  ###### interface
+
+  options = {
+
+    environment.freetds = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = literalExpression ''
+        { MYDATABASE = '''
+            host = 10.0.2.100
+            port = 1433
+            tds version = 7.2
+          ''';
+        }
+      '';
+      description =
+        lib.mdDoc ''
+        Configure freetds database entries. Each attribute denotes
+        a section within freetds.conf, and the value (a string) is the config
+        content for that section. When at least one entry is configured
+        the global environment variables FREETDSCONF, FREETDS and SYBASE
+        will be configured to allow the programs that use freetds to find the
+        library and config.
+        '';
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf (length (attrNames cfg) > 0) {
+
+    environment.variables.FREETDSCONF = "/etc/freetds.conf";
+    environment.variables.FREETDS = "/etc/freetds.conf";
+    environment.variables.SYBASE = "${pkgs.freetds}";
+
+    environment.etc."freetds.conf" = { text =
+      (concatStrings (mapAttrsToList (name: value:
+        ''
+        [${name}]
+        ${value}
+        ''
+      ) cfg));
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/fuse.nix b/nixpkgs/nixos/modules/programs/fuse.nix
new file mode 100644
index 000000000000..b82d37a051e7
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/fuse.nix
@@ -0,0 +1,37 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.fuse;
+in {
+  meta.maintainers = with maintainers; [ primeos ];
+
+  options.programs.fuse = {
+    mountMax = mkOption {
+      # In the C code it's an "int" (i.e. signed and at least 16 bit), but
+      # negative numbers obviously make no sense:
+      type = types.ints.between 0 32767; # 2^15 - 1
+      default = 1000;
+      description = lib.mdDoc ''
+        Set the maximum number of FUSE mounts allowed to non-root users.
+      '';
+    };
+
+    userAllowOther = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Allow non-root users to specify the allow_other or allow_root mount
+        options, see mount.fuse3(8).
+      '';
+    };
+  };
+
+  config =  {
+    environment.etc."fuse.conf".text = ''
+      ${optionalString (!cfg.userAllowOther) "#"}user_allow_other
+      mount_max = ${toString cfg.mountMax}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/fzf.nix b/nixpkgs/nixos/modules/programs/fzf.nix
new file mode 100644
index 000000000000..7c4f338e29b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/fzf.nix
@@ -0,0 +1,32 @@
+{ pkgs, config, lib, ... }:
+with lib;
+let
+  cfg = config.programs.fzf;
+in
+{
+  options = {
+    programs.fzf = {
+      fuzzyCompletion = mkEnableOption (mdDoc "fuzzy completion with fzf");
+      keybindings = mkEnableOption (mdDoc "fzf keybindings");
+    };
+  };
+  config = {
+    environment.systemPackages = optional (cfg.keybindings || cfg.fuzzyCompletion) pkgs.fzf;
+
+    programs.bash.interactiveShellInit = optionalString cfg.fuzzyCompletion ''
+      source ${pkgs.fzf}/share/fzf/completion.bash
+    '' + optionalString cfg.keybindings ''
+      source ${pkgs.fzf}/share/fzf/key-bindings.bash
+    '';
+
+    programs.zsh.interactiveShellInit = optionalString (!config.programs.zsh.ohMyZsh.enable)
+      (optionalString cfg.fuzzyCompletion ''
+        source ${pkgs.fzf}/share/fzf/completion.zsh
+      '' + optionalString cfg.keybindings ''
+        source ${pkgs.fzf}/share/fzf/key-bindings.zsh
+      '');
+
+    programs.zsh.ohMyZsh.plugins = lib.mkIf (cfg.keybindings || cfg.fuzzyCompletion) [ "fzf" ];
+  };
+  meta.maintainers = with maintainers; [ laalsaas ];
+}
diff --git a/nixpkgs/nixos/modules/programs/gamemode.nix b/nixpkgs/nixos/modules/programs/gamemode.nix
new file mode 100644
index 000000000000..c43e2c2296f5
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gamemode.nix
@@ -0,0 +1,98 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.gamemode;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "gamemode.ini" cfg.settings;
+in
+{
+  options = {
+    programs.gamemode = {
+      enable = mkEnableOption (lib.mdDoc "GameMode to optimise system performance on demand");
+
+      enableRenice = mkEnableOption (lib.mdDoc "CAP_SYS_NICE on gamemoded to support lowering process niceness") // {
+        default = true;
+      };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = {};
+        description = lib.mdDoc ''
+          System-wide configuration for GameMode (/etc/gamemode.ini).
+          See gamemoded(8) man page for available settings.
+        '';
+        example = literalExpression ''
+          {
+            general = {
+              renice = 10;
+            };
+
+            # Warning: GPU optimisations have the potential to damage hardware
+            gpu = {
+              apply_gpu_optimisations = "accept-responsibility";
+              gpu_device = 0;
+              amd_performance_level = "high";
+            };
+
+            custom = {
+              start = "''${pkgs.libnotify}/bin/notify-send 'GameMode started'";
+              end = "''${pkgs.libnotify}/bin/notify-send 'GameMode ended'";
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      systemPackages = [ pkgs.gamemode ];
+      etc."gamemode.ini".source = configFile;
+    };
+
+    security = {
+      polkit.enable = true;
+      wrappers = mkIf cfg.enableRenice {
+        gamemoded = {
+          owner = "root";
+          group = "root";
+          source = "${pkgs.gamemode}/bin/gamemoded";
+          capabilities = "cap_sys_nice+ep";
+        };
+      };
+    };
+
+    systemd = {
+      packages = [ pkgs.gamemode ];
+      user.services.gamemoded = {
+        # The upstream service already defines this, but doesn't get applied.
+        # See https://github.com/NixOS/nixpkgs/issues/81138
+        wantedBy = [ "default.target" ];
+
+        # Use pkexec from the security wrappers to allow users to
+        # run libexec/cpugovctl & libexec/gpuclockctl as root with
+        # the the actions defined in share/polkit-1/actions.
+        #
+        # This uses a link farm to make sure other wrapped executables
+        # aren't included in PATH.
+        environment.PATH = mkForce (pkgs.linkFarm "pkexec" [
+          {
+            name = "pkexec";
+            path = "${config.security.wrapperDir}/pkexec";
+          }
+        ]);
+
+        serviceConfig.ExecStart = mkIf cfg.enableRenice [
+          "" # Tell systemd to clear the existing ExecStart list, to prevent appending to it.
+          "${config.security.wrapperDir}/gamemoded"
+        ];
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ kira-bruneau ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/gamescope.nix b/nixpkgs/nixos/modules/programs/gamescope.nix
new file mode 100644
index 000000000000..a31295e736df
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gamescope.nix
@@ -0,0 +1,85 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+with lib; let
+  cfg = config.programs.gamescope;
+
+  gamescope =
+    let
+      wrapperArgs =
+        optional (cfg.args != [ ])
+          ''--add-flags "${toString cfg.args}"''
+        ++ builtins.attrValues (mapAttrs (var: val: "--set-default ${var} ${val}") cfg.env);
+    in
+    pkgs.runCommand "gamescope" { nativeBuildInputs = [ pkgs.makeBinaryWrapper ]; } ''
+      mkdir -p $out/bin
+      makeWrapper ${cfg.package}/bin/gamescope $out/bin/gamescope --inherit-argv0 \
+        ${toString wrapperArgs}
+    '';
+in
+{
+  options.programs.gamescope = {
+    enable = mkEnableOption (mdDoc "gamescope");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.gamescope;
+      defaultText = literalExpression "pkgs.gamescope";
+      description = mdDoc ''
+        The GameScope package to use.
+      '';
+    };
+
+    capSysNice = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Add cap_sys_nice capability to the GameScope
+        binary so that it may renice itself.
+      '';
+    };
+
+    args = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--rt" "--prefer-vk-device 8086:9bc4" ];
+      description = mdDoc ''
+        Arguments passed to GameScope on startup.
+      '';
+    };
+
+    env = mkOption {
+      type = types.attrsOf types.str;
+      default = { };
+      example = literalExpression ''
+        # for Prime render offload on Nvidia laptops.
+        # Also requires `hardware.nvidia.prime.offload.enable`.
+        {
+          __NV_PRIME_RENDER_OFFLOAD = "1";
+          __VK_LAYER_NV_optimus = "NVIDIA_only";
+          __GLX_VENDOR_LIBRARY_NAME = "nvidia";
+        }
+      '';
+      description = mdDoc ''
+        Default environment variables available to the GameScope process, overridable at runtime.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers = mkIf cfg.capSysNice {
+      gamescope = {
+        owner = "root";
+        group = "root";
+        source = "${gamescope}/bin/gamescope";
+        capabilities = "cap_sys_nice+pie";
+      };
+    };
+
+    environment.systemPackages = mkIf (!cfg.capSysNice) [ gamescope ];
+  };
+
+  meta.maintainers = with maintainers; [ nrdxp ];
+}
diff --git a/nixpkgs/nixos/modules/programs/geary.nix b/nixpkgs/nixos/modules/programs/geary.nix
new file mode 100644
index 000000000000..d9454a2247fd
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/geary.nix
@@ -0,0 +1,24 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.geary;
+
+in {
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  options = {
+    programs.geary.enable = mkEnableOption (lib.mdDoc "Geary, a Mail client for GNOME 3");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.gnome.geary ];
+    programs.dconf.enable = true;
+    services.gnome.gnome-keyring.enable = true;
+    services.gnome.gnome-online-accounts.enable = true;
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/programs/git.nix b/nixpkgs/nixos/modules/programs/git.nix
new file mode 100644
index 000000000000..4e271a8c134b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/git.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.git;
+in
+
+{
+  options = {
+    programs.git = {
+      enable = mkEnableOption (lib.mdDoc "git");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.git;
+        defaultText = literalExpression "pkgs.git";
+        example = literalExpression "pkgs.gitFull";
+        description = lib.mdDoc "The git package to use";
+      };
+
+      config = mkOption {
+        type =
+          with types;
+          let
+            gitini = attrsOf (attrsOf anything);
+          in
+          either gitini (listOf gitini) // {
+            merge = loc: defs:
+              let
+                config = foldl'
+                  (acc: { value, ... }@x: acc // (if isList value then {
+                    ordered = acc.ordered ++ value;
+                  } else {
+                    unordered = acc.unordered ++ [ x ];
+                  }))
+                  {
+                    ordered = [ ];
+                    unordered = [ ];
+                  }
+                  defs;
+              in
+              [ (gitini.merge loc config.unordered) ] ++ config.ordered;
+          };
+        default = [ ];
+        example = {
+          init.defaultBranch = "main";
+          url."https://github.com/".insteadOf = [ "gh:" "github:" ];
+        };
+        description = lib.mdDoc ''
+          Configuration to write to /etc/gitconfig. A list can also be
+          specified to keep the configuration in order. For example, setting
+          `config` to `[ { foo.x = 42; } { bar.y = 42; }]` will put the `foo`
+          section before the `bar` section unlike the default alphabetical
+          order, which can be helpful for sections such as `include` and
+          `includeIf`. See the CONFIGURATION FILE section of git-config(1) for
+          more information.
+        '';
+      };
+
+      lfs = {
+        enable = mkEnableOption (lib.mdDoc "git-lfs");
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.git-lfs;
+          defaultText = literalExpression "pkgs.git-lfs";
+          description = lib.mdDoc "The git-lfs package to use";
+        };
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      environment.systemPackages = [ cfg.package ];
+      environment.etc.gitconfig = mkIf (cfg.config != [ ]) {
+        text = concatMapStringsSep "\n" generators.toGitINI cfg.config;
+      };
+    })
+    (mkIf (cfg.enable && cfg.lfs.enable) {
+      environment.systemPackages = [ cfg.lfs.package ];
+      programs.git.config = {
+        filter.lfs = {
+          clean = "git-lfs clean -- %f";
+          smudge = "git-lfs smudge -- %f";
+          process = "git-lfs filter-process";
+          required = true;
+        };
+      };
+    })
+  ];
+
+  meta.maintainers = with maintainers; [ figsoda ];
+}
diff --git a/nixpkgs/nixos/modules/programs/gnome-disks.nix b/nixpkgs/nixos/modules/programs/gnome-disks.nix
new file mode 100644
index 000000000000..dcb20bd6037c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gnome-disks.nix
@@ -0,0 +1,50 @@
+# GNOME Disks.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2019-08-09
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-disks" "enable" ]
+      [ "programs" "gnome-disks" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    programs.gnome-disks = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GNOME Disks daemon, a program designed to
+          be a UDisks2 graphical front-end.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.programs.gnome-disks.enable {
+
+    environment.systemPackages = [ pkgs.gnome.gnome-disk-utility ];
+
+    services.dbus.packages = [ pkgs.gnome.gnome-disk-utility ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/gnome-terminal.nix b/nixpkgs/nixos/modules/programs/gnome-terminal.nix
new file mode 100644
index 000000000000..a8d82e0b018c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gnome-terminal.nix
@@ -0,0 +1,38 @@
+# GNOME Terminal.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.gnome-terminal;
+
+in
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2019-08-19
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-terminal-server" "enable" ]
+      [ "programs" "gnome-terminal" "enable" ])
+  ];
+
+  options = {
+    programs.gnome-terminal.enable = mkEnableOption (lib.mdDoc "GNOME Terminal");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.gnome.gnome-terminal ];
+    services.dbus.packages = [ pkgs.gnome.gnome-terminal ];
+    systemd.packages = [ pkgs.gnome.gnome-terminal ];
+
+    programs.bash.vteIntegration = true;
+    programs.zsh.vteIntegration = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/gnupg.nix b/nixpkgs/nixos/modules/programs/gnupg.nix
new file mode 100644
index 000000000000..aa1a536247ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gnupg.nix
@@ -0,0 +1,247 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.gnupg;
+
+  agentSettingsFormat = pkgs.formats.keyValue {
+    mkKeyValue = lib.generators.mkKeyValueDefault { } " ";
+  };
+
+  xserverCfg = config.services.xserver;
+
+  defaultPinentryFlavor =
+    if xserverCfg.desktopManager.lxqt.enable
+    || xserverCfg.desktopManager.plasma5.enable
+    || xserverCfg.desktopManager.deepin.enable then
+      "qt"
+    else if xserverCfg.desktopManager.xfce.enable then
+      "gtk2"
+    else if xserverCfg.enable || config.programs.sway.enable then
+      "gnome3"
+    else
+      "curses";
+
+in
+
+{
+
+  options.programs.gnupg = {
+    package = mkOption {
+      type = types.package;
+      default = pkgs.gnupg;
+      defaultText = literalExpression "pkgs.gnupg";
+      description = lib.mdDoc ''
+        The gpg package that should be used.
+      '';
+    };
+
+    agent.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables GnuPG agent with socket-activation for every user session.
+      '';
+    };
+
+    agent.enableSSHSupport = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable SSH agent support in GnuPG agent. Also sets SSH_AUTH_SOCK
+        environment variable correctly. This will disable socket-activation
+        and thus always start a GnuPG agent per user session.
+      '';
+    };
+
+    agent.enableExtraSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable extra socket for GnuPG agent.
+      '';
+    };
+
+    agent.enableBrowserSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable browser socket for GnuPG agent.
+      '';
+    };
+
+    agent.pinentryFlavor = mkOption {
+      type = types.nullOr (types.enum pkgs.pinentry.flavors);
+      example = "gnome3";
+      default = defaultPinentryFlavor;
+      defaultText = literalMD ''matching the configured desktop environment'';
+      description = lib.mdDoc ''
+        Which pinentry interface to use. If not null, the path to the
+        pinentry binary will be set in /etc/gnupg/gpg-agent.conf.
+        If not set at all, it'll pick an appropriate flavor depending on the
+        system configuration (qt flavor for lxqt and plasma5, gtk2 for xfce
+        4.12, gnome3 on all other systems with X enabled, ncurses otherwise).
+      '';
+    };
+
+    agent.settings = mkOption {
+      type = agentSettingsFormat.type;
+      default = { };
+      example = {
+        default-cache-ttl = 600;
+      };
+      description = lib.mdDoc ''
+        Configuration for /etc/gnupg/gpg-agent.conf.
+        See {manpage}`gpg-agent(1)` for supported options.
+      '';
+    };
+
+    dirmngr.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables GnuPG network certificate management daemon with socket-activation for every user session.
+      '';
+    };
+  };
+
+  config = mkIf cfg.agent.enable {
+    programs.gnupg.agent.settings = {
+      pinentry-program = lib.mkIf (cfg.agent.pinentryFlavor != null)
+        "${pkgs.pinentry.${cfg.agent.pinentryFlavor}}/bin/pinentry";
+    };
+
+    environment.etc."gnupg/gpg-agent.conf".source =
+      agentSettingsFormat.generate "gpg-agent.conf" cfg.agent.settings;
+
+    # This overrides the systemd user unit shipped with the gnupg package
+    systemd.user.services.gpg-agent = {
+      unitConfig = {
+        Description = "GnuPG cryptographic agent and passphrase cache";
+        Documentation = "man:gpg-agent(1)";
+        Requires = [ "sockets.target" ];
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/gpg-agent --supervised";
+        ExecReload = "${cfg.package}/bin/gpgconf --reload gpg-agent";
+      };
+    };
+
+    systemd.user.sockets.gpg-agent = {
+      unitConfig = {
+        Description = "GnuPG cryptographic agent and passphrase cache";
+        Documentation = "man:gpg-agent(1)";
+      };
+      socketConfig = {
+        ListenStream = "%t/gnupg/S.gpg-agent";
+        FileDescriptorName = "std";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.user.sockets.gpg-agent-ssh = mkIf cfg.agent.enableSSHSupport {
+      unitConfig = {
+        Description = "GnuPG cryptographic agent (ssh-agent emulation)";
+        Documentation = "man:gpg-agent(1) man:ssh-add(1) man:ssh-agent(1) man:ssh(1)";
+      };
+      socketConfig = {
+        ListenStream = "%t/gnupg/S.gpg-agent.ssh";
+        FileDescriptorName = "ssh";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.user.sockets.gpg-agent-extra = mkIf cfg.agent.enableExtraSocket {
+      unitConfig = {
+        Description = "GnuPG cryptographic agent and passphrase cache (restricted)";
+        Documentation = "man:gpg-agent(1)";
+      };
+      socketConfig = {
+        ListenStream = "%t/gnupg/S.gpg-agent.extra";
+        FileDescriptorName = "extra";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.user.sockets.gpg-agent-browser = mkIf cfg.agent.enableBrowserSocket {
+      unitConfig = {
+        Description = "GnuPG cryptographic agent and passphrase cache (access for web browsers)";
+        Documentation = "man:gpg-agent(1)";
+      };
+      socketConfig = {
+        ListenStream = "%t/gnupg/S.gpg-agent.browser";
+        FileDescriptorName = "browser";
+        Service = "gpg-agent.service";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.user.services.dirmngr = mkIf cfg.dirmngr.enable {
+      unitConfig = {
+        Description = "GnuPG network certificate management daemon";
+        Documentation = "man:dirmngr(8)";
+        Requires = "dirmngr.socket";
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/dirmngr --supervised";
+        ExecReload = "${cfg.package}/bin/gpgconf --reload dirmngr";
+      };
+    };
+
+    systemd.user.sockets.dirmngr = mkIf cfg.dirmngr.enable {
+      unitConfig = {
+        Description = "GnuPG network certificate management daemon";
+        Documentation = "man:dirmngr(8)";
+      };
+      socketConfig = {
+        ListenStream = "%t/gnupg/S.dirmngr";
+        SocketMode = "0600";
+        DirectoryMode = "0700";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    services.dbus.packages = mkIf (cfg.agent.pinentryFlavor == "gnome3") [ pkgs.gcr ];
+
+    environment.systemPackages = with pkgs; [ cfg.package ];
+
+    environment.interactiveShellInit = ''
+      # Bind gpg-agent to this TTY if gpg commands are used.
+      export GPG_TTY=$(tty)
+    '';
+
+    programs.ssh.extraConfig = optionalString cfg.agent.enableSSHSupport ''
+      # The SSH agent protocol doesn't have support for changing TTYs; however we
+      # can simulate this with the `exec` feature of openssh (see ssh_config(5))
+      # that hooks a command to the shell currently running the ssh program.
+      Match host * exec "${pkgs.runtimeShell} -c '${cfg.package}/bin/gpg-connect-agent --quiet updatestartuptty /bye >/dev/null 2>&1'"
+    '';
+
+    environment.extraInit = mkIf cfg.agent.enableSSHSupport ''
+      if [ -z "$SSH_AUTH_SOCK" ]; then
+        export SSH_AUTH_SOCK=$(${cfg.package}/bin/gpgconf --list-dirs agent-ssh-socket)
+      fi
+    '';
+
+    assertions = [
+      { assertion = cfg.agent.enableSSHSupport -> !config.programs.ssh.startAgent;
+        message = "You can't use ssh-agent and GnuPG agent with SSH support enabled at the same time!";
+      }
+    ];
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/programs/gpaste.nix b/nixpkgs/nixos/modules/programs/gpaste.nix
new file mode 100644
index 000000000000..074b4d59a365
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gpaste.nix
@@ -0,0 +1,36 @@
+# GPaste.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  # Added 2019-08-09
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gpaste" "enable" ]
+      [ "programs" "gpaste" "enable" ])
+  ];
+
+  ###### interface
+  options = {
+     programs.gpaste = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GPaste, a clipboard manager.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.programs.gpaste.enable {
+    environment.systemPackages = [ pkgs.gnome.gpaste ];
+    services.dbus.packages = [ pkgs.gnome.gpaste ];
+    systemd.packages = [ pkgs.gnome.gpaste ];
+    # gnome-control-center crashes in Keyboard Shortcuts pane without the GSettings schemas.
+    services.xserver.desktopManager.gnome.sessionPath = [ pkgs.gnome.gpaste ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/gphoto2.nix b/nixpkgs/nixos/modules/programs/gphoto2.nix
new file mode 100644
index 000000000000..f31b1863963d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/gphoto2.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta.maintainers = [ maintainers.league ];
+
+  ###### interface
+  options = {
+    programs.gphoto2 = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to configure system to use gphoto2.
+          To grant digital camera access to a user, the user must
+          be part of the camera group:
+          `users.users.alice.extraGroups = ["camera"];`
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.programs.gphoto2.enable {
+    services.udev.packages = [ pkgs.libgphoto2 ];
+    environment.systemPackages = [ pkgs.gphoto2 ];
+    users.groups.camera = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/haguichi.nix b/nixpkgs/nixos/modules/programs/haguichi.nix
new file mode 100644
index 000000000000..699327c28c61
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/haguichi.nix
@@ -0,0 +1,15 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+{
+  options.programs.haguichi = {
+    enable = mkEnableOption (lib.mdDoc "Haguichi, a Linux GUI frontend to the proprietary LogMeIn Hamachi");
+  };
+
+  config = mkIf config.programs.haguichi.enable {
+    environment.systemPackages = with pkgs; [ haguichi ];
+
+    services.logmein-hamachi.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/hamster.nix b/nixpkgs/nixos/modules/programs/hamster.nix
new file mode 100644
index 000000000000..f50438cc1704
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/hamster.nix
@@ -0,0 +1,15 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta.maintainers = pkgs.hamster.meta.maintainers;
+
+  options.programs.hamster.enable =
+    mkEnableOption (lib.mdDoc "hamster, a time tracking program");
+
+  config = lib.mkIf config.programs.hamster.enable {
+    environment.systemPackages = [ pkgs.hamster ];
+    services.dbus.packages = [ pkgs.hamster ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/htop.nix b/nixpkgs/nixos/modules/programs/htop.nix
new file mode 100644
index 000000000000..777ea709836e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/htop.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.htop;
+
+  fmt = value:
+    if isList value then concatStringsSep " " (map fmt value) else
+    if isString value then value else
+    if isBool value then if value then "1" else "0" else
+    if isInt value then toString value else
+    throw "Unrecognized type ${typeOf value} in htop settings";
+
+in
+
+{
+
+  options.programs.htop = {
+    package = mkOption {
+      type = types.package;
+      default = pkgs.htop;
+      defaultText = lib.literalExpression "pkgs.htop";
+      description = lib.mdDoc ''
+        The htop package that should be used.
+      '';
+    };
+
+    enable = mkEnableOption (lib.mdDoc "htop process monitor");
+
+    settings = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool (listOf (oneOf [ str int bool ])) ]);
+      default = {};
+      example = {
+        hide_kernel_threads = true;
+        hide_userland_threads = true;
+      };
+      description = lib.mdDoc ''
+        Extra global default configuration for htop
+        which is read on first startup only.
+        Htop subsequently uses ~/.config/htop/htoprc
+        as configuration source.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      cfg.package
+    ];
+
+    environment.etc."htoprc".text = ''
+      # Global htop configuration
+      # To change set: programs.htop.settings.KEY = VALUE;
+    '' + concatStringsSep "\n" (mapAttrsToList (key: value: "${key}=${fmt value}") cfg.settings);
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/hyprland.nix b/nixpkgs/nixos/modules/programs/hyprland.nix
new file mode 100644
index 000000000000..638dfb98e8ab
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/hyprland.nix
@@ -0,0 +1,80 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+with lib; let
+  cfg = config.programs.hyprland;
+
+  finalPortalPackage = cfg.portalPackage.override {
+    hyprland = cfg.finalPackage;
+  };
+in
+{
+  options.programs.hyprland = {
+    enable = mkEnableOption null // {
+      description = mdDoc ''
+        Hyprland, the dynamic tiling Wayland compositor that doesn't sacrifice on its looks.
+
+        You can manually launch Hyprland by executing {command}`Hyprland` on a TTY.
+
+        A configuration file will be generated in {file}`~/.config/hypr/hyprland.conf`.
+        See <https://wiki.hyprland.org> for more information.
+      '';
+    };
+
+    package = mkPackageOptionMD pkgs "hyprland" { };
+
+    finalPackage = mkOption {
+      type = types.package;
+      readOnly = true;
+      default = cfg.package.override {
+        enableXWayland = cfg.xwayland.enable;
+        enableNvidiaPatches = cfg.enableNvidiaPatches;
+      };
+      defaultText = literalExpression
+        "`programs.hyprland.package` with applied configuration";
+      description = mdDoc ''
+        The Hyprland package after applying configuration.
+      '';
+    };
+
+    portalPackage = mkPackageOptionMD pkgs "xdg-desktop-portal-hyprland" { };
+
+    xwayland.enable = mkEnableOption (mdDoc "XWayland") // { default = true; };
+
+    enableNvidiaPatches = mkEnableOption (mdDoc "patching wlroots for better Nvidia support");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.finalPackage ];
+
+    fonts.enableDefaultPackages = mkDefault true;
+    hardware.opengl.enable = mkDefault true;
+
+    programs = {
+      dconf.enable = mkDefault true;
+      xwayland.enable = mkDefault cfg.xwayland.enable;
+    };
+
+    security.polkit.enable = true;
+
+    services.xserver.displayManager.sessionPackages = [ cfg.finalPackage ];
+
+    xdg.portal = {
+      enable = mkDefault true;
+      extraPortals = [ finalPortalPackage ];
+    };
+  };
+
+  imports = with lib; [
+    (mkRemovedOptionModule
+      [ "programs" "hyprland" "xwayland" "hidpi" ]
+      "XWayland patches are deprecated. Refer to https://wiki.hyprland.org/Configuring/XWayland"
+    )
+    (mkRenamedOptionModule
+      [ "programs" "hyprland" "nvidiaPatches" ]
+      [ "programs" "hyprland" "enableNvidiaPatches" ]
+    )
+  ];
+}
diff --git a/nixpkgs/nixos/modules/programs/i3lock.nix b/nixpkgs/nixos/modules/programs/i3lock.nix
new file mode 100644
index 000000000000..466ae59c9277
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/i3lock.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.i3lock;
+
+in {
+
+  ###### interface
+
+  options = {
+    programs.i3lock = {
+      enable = mkEnableOption (mdDoc "i3lock");
+      package = mkOption {
+        type        = types.package;
+        default     = pkgs.i3lock;
+        defaultText = literalExpression "pkgs.i3lock";
+        example     = literalExpression ''
+          pkgs.i3lock-color
+        '';
+        description = mdDoc ''
+          Specify which package to use for the i3lock program,
+          The i3lock package must include a i3lock file or link in its out directory in order for the u2fSupport option to work correctly.
+        '';
+      };
+      u2fSupport = mkOption {
+        type        = types.bool;
+        default     = false;
+        example     = true;
+        description = mdDoc ''
+          Whether to enable U2F support in the i3lock program.
+          U2F enables authentication using a hardware device, such as a security key.
+          When U2F support is enabled, the i3lock program will set the setuid bit on the i3lock binary and enable the pam u2fAuth service,
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    security.wrappers.i3lock = mkIf cfg.u2fSupport {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${cfg.package.out}/bin/i3lock";
+    };
+
+    security.pam.services.i3lock.u2fAuth = cfg.u2fSupport;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/iay.nix b/nixpkgs/nixos/modules/programs/iay.nix
new file mode 100644
index 000000000000..9164f5cb6486
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/iay.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.iay;
+  inherit (lib) mkEnableOption mkIf mkOption mkPackageOptionMD optionalString types;
+in {
+  options.programs.iay = {
+    enable = mkEnableOption (lib.mdDoc "iay");
+    package = mkPackageOptionMD pkgs "iay" {};
+
+    minimalPrompt = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use minimal one-liner prompt.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.bash.promptInit = ''
+      if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
+        PS1='$(iay ${optionalString cfg.minimalPrompt "-m"})'
+      fi
+    '';
+
+    programs.zsh.promptInit = ''
+      if [[ $TERM != "dumb" && (-z $INSIDE_EMACS || $INSIDE_EMACS == "vterm") ]]; then
+        autoload -Uz add-zsh-hook
+        _iay_prompt() {
+          PROMPT="$(iay -z ${optionalString cfg.minimalPrompt "-m"})"
+        }
+        add-zsh-hook precmd _iay_prompt
+      fi
+    '';
+  };
+
+  meta.maintainers = pkgs.iay.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/programs/iftop.nix b/nixpkgs/nixos/modules/programs/iftop.nix
new file mode 100644
index 000000000000..1db018858b65
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/iftop.nix
@@ -0,0 +1,20 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.iftop;
+in {
+  options = {
+    programs.iftop.enable = mkEnableOption (lib.mdDoc "iftop + setcap wrapper");
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.iftop ];
+    security.wrappers.iftop = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = "${pkgs.iftop}/bin/iftop";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/iotop.nix b/nixpkgs/nixos/modules/programs/iotop.nix
new file mode 100644
index 000000000000..0eb60b989eb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/iotop.nix
@@ -0,0 +1,19 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.iotop;
+in {
+  options = {
+    programs.iotop.enable = mkEnableOption (lib.mdDoc "iotop + setcap wrapper");
+  };
+  config = mkIf cfg.enable {
+    security.wrappers.iotop = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_admin+p";
+      source = "${pkgs.iotop}/bin/iotop";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/java.nix b/nixpkgs/nixos/modules/programs/java.nix
new file mode 100644
index 000000000000..c5f83858d06a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/java.nix
@@ -0,0 +1,79 @@
+# This module provides JAVA_HOME, with a different way to install java
+# system-wide.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.java;
+in
+{
+
+  options = {
+
+    programs.java = {
+
+      enable = mkEnableOption (lib.mdDoc "java") // {
+        description = lib.mdDoc ''
+          Install and setup the Java development kit.
+
+          ::: {.note}
+          This adds JAVA_HOME to the global environment, by sourcing the
+          jdk's setup-hook on shell init. It is equivalent to starting a shell
+          through 'nix-shell -p jdk', or roughly the following system-wide
+          configuration:
+
+              environment.variables.JAVA_HOME = ''${pkgs.jdk.home}/lib/openjdk;
+              environment.systemPackages = [ pkgs.jdk ];
+          :::
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.jdk;
+        defaultText = literalExpression "pkgs.jdk";
+        description = lib.mdDoc ''
+          Java package to install. Typical values are pkgs.jdk or pkgs.jre.
+        '';
+        type = types.package;
+      };
+
+      binfmt = mkEnableOption (lib.mdDoc "binfmt to execute java jar's and classes");
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.binfmt.registrations = mkIf cfg.binfmt {
+      java-class = {
+        recognitionType = "extension";
+        magicOrExtension = "class";
+        interpreter = pkgs.writeShellScript "java-class-wrapper" ''
+          test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
+          classpath=$(dirname "$1")
+          class=$(basename "''${1%%.class}")
+          $JAVA_HOME/bin/java -classpath "$classpath" "$class" "''${@:2}"
+        '';
+      };
+      java-jar = {
+        recognitionType = "extension";
+        magicOrExtension = "jar";
+        interpreter = pkgs.writeShellScript "java-jar-wrapper" ''
+          test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
+          $JAVA_HOME/bin/java -jar "$@"
+        '';
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.shellInit = ''
+      test -e ${cfg.package}/nix-support/setup-hook && source ${cfg.package}/nix-support/setup-hook
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/k3b.nix b/nixpkgs/nixos/modules/programs/k3b.nix
new file mode 100644
index 000000000000..5d19e4f1cc4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/k3b.nix
@@ -0,0 +1,52 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  # interface
+  options.programs.k3b = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable k3b, the KDE disk burning application.
+
+        Additionally to installing `k3b` enabling this will
+        add `setuid` wrappers in `/run/wrappers/bin`
+        for both `cdrdao` and `cdrecord`. On first
+        run you must manually configure the path of `cdrdae` and
+        `cdrecord` to correspond to the appropriate paths under
+        `/run/wrappers/bin` in the "Setup External Programs" menu.
+      '';
+    };
+  };
+
+  # implementation
+  config = mkIf config.programs.k3b.enable {
+
+    environment.systemPackages = with pkgs; [
+      k3b
+      dvdplusrwtools
+      cdrdao
+      cdrtools
+    ];
+
+    security.wrappers = {
+      cdrdao = {
+        setuid = true;
+        owner = "root";
+        group = "cdrom";
+        permissions = "u+wrx,g+x";
+        source = "${pkgs.cdrdao}/bin/cdrdao";
+      };
+      cdrecord = {
+        setuid = true;
+        owner = "root";
+        group = "cdrom";
+        permissions = "u+wrx,g+x";
+        source = "${pkgs.cdrtools}/bin/cdrecord";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/k40-whisperer.nix b/nixpkgs/nixos/modules/programs/k40-whisperer.nix
new file mode 100644
index 000000000000..27a79caa4b53
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/k40-whisperer.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.k40-whisperer;
+  pkg = cfg.package.override {
+    udevGroup = cfg.group;
+  };
+in
+{
+  options.programs.k40-whisperer = {
+    enable = mkEnableOption (lib.mdDoc "K40-Whisperer");
+
+    group = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Group assigned to the device when connected.
+      '';
+      default = "k40";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.k40-whisperer;
+      defaultText = literalExpression "pkgs.k40-whisperer";
+      example = literalExpression "pkgs.k40-whisperer";
+      description = lib.mdDoc ''
+        K40 Whisperer package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.${cfg.group} = {};
+
+    environment.systemPackages = [ pkg ];
+    services.udev.packages = [ pkg ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/kbdlight.nix b/nixpkgs/nixos/modules/programs/kbdlight.nix
new file mode 100644
index 000000000000..6c3c79ddb4aa
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/kbdlight.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.kbdlight;
+
+in
+{
+  options.programs.kbdlight.enable = mkEnableOption (lib.mdDoc "kbdlight");
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.kbdlight ];
+    security.wrappers.kbdlight =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.kbdlight.out}/bin/kbdlight";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/kclock.nix b/nixpkgs/nixos/modules/programs/kclock.nix
new file mode 100644
index 000000000000..63d6fb1e2d7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/kclock.nix
@@ -0,0 +1,13 @@
+{ lib, pkgs, config, ... }:
+with lib;
+let
+  cfg = config.programs.kclock;
+  kclockPkg = pkgs.libsForQt5.kclock;
+in {
+  options.programs.kclock = { enable = mkEnableOption (lib.mdDoc "KClock"); };
+
+  config = mkIf cfg.enable {
+    services.dbus.packages = [ kclockPkg ];
+    environment.systemPackages = [ kclockPkg ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/kdeconnect.nix b/nixpkgs/nixos/modules/programs/kdeconnect.nix
new file mode 100644
index 000000000000..4ba156f2db8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/kdeconnect.nix
@@ -0,0 +1,35 @@
+{ config, pkgs, lib, ... }:
+with lib;
+{
+  options.programs.kdeconnect = {
+    enable = mkEnableOption (lib.mdDoc ''
+      kdeconnect.
+
+      Note that it will open the TCP and UDP port from
+      1714 to 1764 as they are needed for it to function properly.
+      You can use the {option}`package` to use
+      `gnomeExtensions.gsconnect` as an alternative
+      implementation if you use Gnome
+    '');
+    package = mkOption {
+      default = pkgs.plasma5Packages.kdeconnect-kde;
+      defaultText = literalExpression "pkgs.plasma5Packages.kdeconnect-kde";
+      type = types.package;
+      example = literalExpression "pkgs.gnomeExtensions.gsconnect";
+      description = lib.mdDoc ''
+        The package providing the implementation for kdeconnect.
+      '';
+    };
+  };
+  config =
+    let
+      cfg = config.programs.kdeconnect;
+    in
+      mkIf cfg.enable {
+        environment.systemPackages = [ cfg.package ];
+        networking.firewall = rec {
+          allowedTCPPortRanges = [ { from = 1714; to = 1764; } ];
+          allowedUDPPortRanges = allowedTCPPortRanges;
+        };
+      };
+}
diff --git a/nixpkgs/nixos/modules/programs/less.nix b/nixpkgs/nixos/modules/programs/less.nix
new file mode 100644
index 000000000000..81c68307aee1
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/less.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.less;
+
+  configText = if (cfg.configFile != null) then (builtins.readFile cfg.configFile) else ''
+    #command
+    ${concatStringsSep "\n"
+      (mapAttrsToList (command: action: "${command} ${action}") cfg.commands)
+    }
+    ${optionalString cfg.clearDefaultCommands "#stop"}
+
+    #line-edit
+    ${concatStringsSep "\n"
+      (mapAttrsToList (command: action: "${command} ${action}") cfg.lineEditingKeys)
+    }
+
+    #env
+    ${concatStringsSep "\n"
+      (mapAttrsToList (variable: values: "${variable}=${values}") cfg.envVariables)
+    }
+  '';
+
+  lessKey = pkgs.writeText "lessconfig" configText;
+
+in
+
+{
+  options = {
+
+    programs.less = {
+
+      # note that environment.nix sets PAGER=less, and
+      # therefore also enables this module
+      enable = mkEnableOption (lib.mdDoc "less");
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = literalExpression ''"''${pkgs.my-configs}/lesskey"'';
+        description = lib.mdDoc ''
+          Path to lesskey configuration file.
+
+          {option}`configFile` takes precedence over {option}`commands`,
+          {option}`clearDefaultCommands`, {option}`lineEditingKeys`, and
+          {option}`envVariables`.
+        '';
+      };
+
+      commands = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = {
+          h = "noaction 5\\e(";
+          l = "noaction 5\\e)";
+        };
+        description = lib.mdDoc "Defines new command keys.";
+      };
+
+      clearDefaultCommands = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Clear all default commands.
+          You should remember to set the quit key.
+          Otherwise you will not be able to leave less without killing it.
+        '';
+      };
+
+      lineEditingKeys = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = {
+          e = "abort";
+        };
+        description = lib.mdDoc "Defines new line-editing keys.";
+      };
+
+      envVariables = mkOption {
+        type = types.attrsOf types.str;
+        default = {
+          LESS = "-R";
+        };
+        example = {
+          LESS = "--quit-if-one-screen";
+        };
+        description = lib.mdDoc "Defines environment variables.";
+      };
+
+      lessopen = mkOption {
+        type = types.nullOr types.str;
+        default = "|${pkgs.lesspipe}/bin/lesspipe.sh %s";
+        defaultText = literalExpression ''"|''${pkgs.lesspipe}/bin/lesspipe.sh %s"'';
+        description = lib.mdDoc ''
+          Before less opens a file, it first gives your input preprocessor a chance to modify the way the contents of the file are displayed.
+        '';
+      };
+
+      lessclose = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          When less closes a file opened in such a way, it will call another program, called the input postprocessor,
+          which may perform any desired clean-up action (such as deleting the replacement file created by LESSOPEN).
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.less ];
+
+    environment.variables = {
+      LESSKEYIN_SYSTEM = toString lessKey;
+    } // optionalAttrs (cfg.lessopen != null) {
+      LESSOPEN = cfg.lessopen;
+    } // optionalAttrs (cfg.lessclose != null) {
+      LESSCLOSE = cfg.lessclose;
+    };
+
+    warnings = optional (
+      cfg.clearDefaultCommands && (all (x: x != "quit") (attrValues cfg.commands))
+    ) ''
+      config.programs.less.clearDefaultCommands clears all default commands of less but there is no alternative binding for exiting.
+      Consider adding a binding for 'quit'.
+    '';
+  };
+
+  meta.maintainers = with maintainers; [ johnazoidberg ];
+
+}
diff --git a/nixpkgs/nixos/modules/programs/liboping.nix b/nixpkgs/nixos/modules/programs/liboping.nix
new file mode 100644
index 000000000000..39e75ba90c9d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/liboping.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.liboping;
+in {
+  options.programs.liboping = {
+    enable = mkEnableOption (lib.mdDoc "liboping");
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ liboping ];
+    security.wrappers = mkMerge (map (
+      exec: {
+        "${exec}" = {
+          owner = "root";
+          group = "root";
+          capabilities = "cap_net_raw+p";
+          source = "${pkgs.liboping}/bin/${exec}";
+        };
+      }
+    ) [ "oping" "noping" ]);
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/light.nix b/nixpkgs/nixos/modules/programs/light.nix
new file mode 100644
index 000000000000..57cc925be465
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/light.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.light;
+
+in
+{
+  options = {
+    programs.light = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to install Light backlight control command
+          and udev rules granting access to members of the "video" group.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.light ];
+    services.udev.packages = [ pkgs.light ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/mdevctl.nix b/nixpkgs/nixos/modules/programs/mdevctl.nix
new file mode 100644
index 000000000000..2b7285233350
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/mdevctl.nix
@@ -0,0 +1,18 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.programs.mdevctl;
+in {
+  options.programs.mdevctl = {
+    enable = mkEnableOption (lib.mdDoc "Mediated Device Management");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ mdevctl ];
+
+    environment.etc."mdevctl.d/scripts.d/notifiers/.keep".text = "";
+    environment.etc."mdevctl.d/scripts.d/callouts/.keep".text = "";
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/mepo.nix b/nixpkgs/nixos/modules/programs/mepo.nix
new file mode 100644
index 000000000000..4b1706a2a0e5
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/mepo.nix
@@ -0,0 +1,46 @@
+{ pkgs, config, lib, ...}:
+with lib;
+let
+  cfg = config.programs.mepo;
+in
+{
+  options.programs.mepo = {
+    enable = mkEnableOption (mdDoc "Mepo");
+
+    locationBackends = {
+      gpsd = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to enable location detection via gpsd.
+          This may require additional configuration of gpsd, see [here](#opt-services.gpsd.enable)
+        '';
+      };
+
+      geoclue = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Whether to enable location detection via geoclue";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      mepo
+    ] ++ lib.optional cfg.locationBackends.geoclue geoclue2-with-demo-agent
+    ++ lib.optional cfg.locationBackends.gpsd gpsd;
+
+    services.geoclue2 = mkIf cfg.locationBackends.geoclue {
+      enable = true;
+      appConfig.where-am-i = {
+        isAllowed = true;
+        isSystem = false;
+      };
+    };
+
+    services.gpsd.enable = cfg.locationBackends.gpsd;
+  };
+
+  meta.maintainers = with maintainers; [ laalsaas ];
+}
diff --git a/nixpkgs/nixos/modules/programs/mininet.nix b/nixpkgs/nixos/modules/programs/mininet.nix
new file mode 100644
index 000000000000..02272729d233
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/mininet.nix
@@ -0,0 +1,39 @@
+# Global configuration for mininet
+# kernel must have NETNS/VETH/SCHED
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg  = config.programs.mininet;
+
+  generatedPath = with pkgs; makeSearchPath "bin"  [
+    iperf ethtool iproute2 socat
+  ];
+
+  pyEnv = pkgs.python.withPackages(ps: [ ps.mininet-python ]);
+
+  mnexecWrapped = pkgs.runCommand "mnexec-wrapper"
+    { nativeBuildInputs = [ pkgs.makeWrapper pkgs.pythonPackages.wrapPython ]; }
+    ''
+      makeWrapper ${pkgs.mininet}/bin/mnexec \
+        $out/bin/mnexec \
+        --prefix PATH : "${generatedPath}"
+
+      ln -s ${pyEnv}/bin/mn $out/bin/mn
+
+      # mn errors out without a telnet binary
+      # pkgs.inetutils brings an undesired ifconfig into PATH see #43105
+      ln -s ${pkgs.inetutils}/bin/telnet $out/bin/telnet
+    '';
+in
+{
+  options.programs.mininet.enable = mkEnableOption (lib.mdDoc "Mininet");
+
+  config = mkIf cfg.enable {
+
+    virtualisation.vswitch.enable = true;
+
+    environment.systemPackages = [ mnexecWrapped ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/minipro.nix b/nixpkgs/nixos/modules/programs/minipro.nix
new file mode 100644
index 000000000000..a947f83f2ee0
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/minipro.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.minipro;
+in
+{
+  options = {
+    programs.minipro = {
+      enable = lib.mkEnableOption (lib.mdDoc "minipro") // {
+        description = lib.mdDoc ''
+          Installs minipro and its udev rules.
+          Users of the `plugdev` group can interact with connected MiniPRO chip programmers.
+        '';
+      };
+
+      package = lib.mkPackageOptionMD pkgs "minipro" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    users.groups.plugdev = { };
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ infinidoge ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/miriway.nix b/nixpkgs/nixos/modules/programs/miriway.nix
new file mode 100644
index 000000000000..e8a10770b6a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/miriway.nix
@@ -0,0 +1,78 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.programs.miriway;
+in {
+  options.programs.miriway = {
+    enable = lib.mkEnableOption (lib.mdDoc ''
+      Miriway, a Mir based Wayland compositor. You can manually launch Miriway by
+      executing "exec miriway" on a TTY, or launch it from a display manager. Copy
+      /etc/xdg/xdg-miriway/miriway-shell.config to ~/.config/miriway-shell.config
+      to modify the system-wide configuration on a per-user basis. See <https://github.com/Miriway/Miriway>,
+      and "miriway --help" for more information'');
+
+    config = lib.mkOption {
+      type = lib.types.lines;
+      default = ''
+        x11-window-title=Miriway (Mir-on-X)
+        idle-timeout=600
+        ctrl-alt=t:miriway-terminal # Default "terminal emulator finder"
+
+        shell-component=dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY
+
+        meta=Left:@dock-left
+        meta=Right:@dock-right
+        meta=Space:@toggle-maximized
+        meta=Home:@workspace-begin
+        meta=End:@workspace-end
+        meta=Page_Up:@workspace-up
+        meta=Page_Down:@workspace-down
+        ctrl-alt=BackSpace:@exit
+      '';
+      example = ''
+        idle-timeout=300
+        ctrl-alt=t:weston-terminal
+        add-wayland-extensions=all
+
+        shell-components=dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY
+
+        shell-component=waybar
+        shell-component=wbg Pictures/wallpaper
+
+        shell-meta=a:synapse
+
+        meta=Left:@dock-left
+        meta=Right:@dock-right
+        meta=Space:@toggle-maximized
+        meta=Home:@workspace-begin
+        meta=End:@workspace-end
+        meta=Page_Up:@workspace-up
+        meta=Page_Down:@workspace-down
+        ctrl-alt=BackSpace:@exit
+      '';
+      description = lib.mdDoc ''
+        Miriway's config. This will be installed system-wide.
+        The default will install the miriway package's barebones example config.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = [ pkgs.miriway ];
+      etc = {
+        "xdg/xdg-miriway/miriway-shell.config".text = cfg.config;
+      };
+    };
+
+    hardware.opengl.enable = lib.mkDefault true;
+    fonts.enableDefaultPackages = lib.mkDefault true;
+    programs.dconf.enable = lib.mkDefault true;
+    programs.xwayland.enable = lib.mkDefault true;
+
+    # To make the Miriway session available if a display manager like SDDM is enabled:
+    services.xserver.displayManager.sessionPackages = [ pkgs.miriway ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ OPNA2608 ];
+}
diff --git a/nixpkgs/nixos/modules/programs/mosh.nix b/nixpkgs/nixos/modules/programs/mosh.nix
new file mode 100644
index 000000000000..9e56e1731d7c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/mosh.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg  = config.programs.mosh;
+
+in
+{
+  options.programs.mosh = {
+    enable = mkOption {
+      description = lib.mdDoc ''
+        Whether to enable mosh. Note, this will open ports in your firewall!
+      '';
+      default = false;
+      type = lib.types.bool;
+    };
+    withUtempter = mkOption {
+      description = lib.mdDoc ''
+        Whether to enable libutempter for mosh.
+        This is required so that mosh can write to /var/run/utmp (which can be queried with `who` to display currently connected user sessions).
+        Note, this will add a guid wrapper for the group utmp!
+      '';
+      default = true;
+      type = lib.types.bool;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ mosh ];
+    networking.firewall.allowedUDPPortRanges = [ { from = 60000; to = 61000; } ];
+    security.wrappers = mkIf cfg.withUtempter {
+      utempter = {
+        source = "${pkgs.libutempter}/lib/utempter/utempter";
+        owner = "root";
+        group = "utmp";
+        setuid = false;
+        setgid = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/msmtp.nix b/nixpkgs/nixos/modules/programs/msmtp.nix
new file mode 100644
index 000000000000..a9aed027bdb7
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/msmtp.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.msmtp;
+
+in {
+  meta.maintainers = with maintainers; [ pacien ];
+
+  options = {
+    programs.msmtp = {
+      enable = mkEnableOption (lib.mdDoc "msmtp - an SMTP client");
+
+      setSendmail = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to set the system sendmail to msmtp's.
+        '';
+      };
+
+      defaults = mkOption {
+        type = types.attrs;
+        default = {};
+        example = {
+          aliases = "/etc/aliases";
+          port = 587;
+          tls = true;
+        };
+        description = lib.mdDoc ''
+          Default values applied to all accounts.
+          See msmtp(1) for the available options.
+        '';
+      };
+
+      accounts = mkOption {
+        type = with types; attrsOf attrs;
+        default = {};
+        example = {
+          "default" = {
+            host = "smtp.example";
+            auth = true;
+            user = "someone";
+            passwordeval = "cat /secrets/password.txt";
+          };
+        };
+        description = lib.mdDoc ''
+          Named accounts and their respective configurations.
+          The special name "default" allows a default account to be defined.
+          See msmtp(1) for the available options.
+
+          Use `programs.msmtp.extraConfig` instead of this attribute set-based
+          option if ordered account inheritance is needed.
+
+          It is advised to use the `passwordeval` setting to read the password
+          from a secret file to avoid having it written in the world-readable
+          nix store. The password file must end with a newline (`\n`).
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to add to the msmtp configuration verbatim.
+          See msmtp(1) for the syntax and available options.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.msmtp ];
+
+    services.mail.sendmailSetuidWrapper = mkIf cfg.setSendmail {
+      program = "sendmail";
+      source = "${pkgs.msmtp}/bin/sendmail";
+      setuid = false;
+      setgid = false;
+      owner = "root";
+      group = "root";
+    };
+
+    environment.etc."msmtprc".text = let
+      mkValueString = v:
+        if v == true then "on"
+        else if v == false then "off"
+        else generators.mkValueStringDefault {} v;
+      mkKeyValueString = k: v: "${k} ${mkValueString v}";
+      mkInnerSectionString =
+        attrs: concatStringsSep "\n" (mapAttrsToList mkKeyValueString attrs);
+      mkAccountString = name: attrs: ''
+        account ${name}
+        ${mkInnerSectionString attrs}
+      '';
+    in ''
+      defaults
+      ${mkInnerSectionString cfg.defaults}
+
+      ${concatStringsSep "\n" (mapAttrsToList mkAccountString cfg.accounts)}
+
+      ${cfg.extraConfig}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/mtr.nix b/nixpkgs/nixos/modules/programs/mtr.nix
new file mode 100644
index 000000000000..173f24729417
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/mtr.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.mtr;
+
+in {
+  options = {
+    programs.mtr = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add mtr to the global environment and configure a
+          setcap wrapper for it.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.mtr;
+        defaultText = literalExpression "pkgs.mtr";
+        description = lib.mdDoc ''
+          The package to use.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ cfg.package ];
+
+    security.wrappers.mtr-packet = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = "${cfg.package}/bin/mtr-packet";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nano.nix b/nixpkgs/nixos/modules/programs/nano.nix
new file mode 100644
index 000000000000..88404f3557c6
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nano.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.nano;
+in
+
+{
+  options = {
+    programs.nano = {
+      enable = lib.mkEnableOption (lib.mdDoc "nano") // {
+        default = true;
+      };
+
+      package = lib.mkPackageOptionMD pkgs "nano" { };
+
+      nanorc = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          The system-wide nano configuration.
+          See {manpage}`nanorc(5)`.
+        '';
+        example = ''
+          set nowrap
+          set tabstospaces
+          set tabsize 2
+        '';
+      };
+
+      syntaxHighlight = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable syntax highlight for various languages.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      etc.nanorc.text = (lib.optionalString cfg.syntaxHighlight ''
+        # load syntax highlighting files
+        include "${cfg.package}/share/nano/*.nanorc"
+        include "${cfg.package}/share/nano/extra/*.nanorc"
+      '') + cfg.nanorc;
+      systemPackages = [ cfg.package ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nbd.nix b/nixpkgs/nixos/modules/programs/nbd.nix
new file mode 100644
index 000000000000..a44403021e35
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nbd.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.nbd;
+in
+{
+  options = {
+    programs.nbd = {
+      enable = mkEnableOption (lib.mdDoc "Network Block Device (nbd) support");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ nbd ];
+    boot.kernelModules = [ "nbd" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/neovim.nix b/nixpkgs/nixos/modules/programs/neovim.nix
new file mode 100644
index 000000000000..1b53b9b5d919
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/neovim.nix
@@ -0,0 +1,176 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.neovim;
+in
+{
+  options.programs.neovim = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to enable Neovim.
+
+        When enabled through this option, Neovim is wrapped to use a
+        configuration managed by this module. The configuration file in the
+        user's home directory at {file}`~/.config/nvim/init.vim` is no longer
+        loaded by default.
+      '';
+    };
+
+    defaultEditor = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        When enabled, installs neovim and configures neovim to be the default editor
+        using the EDITOR environment variable.
+      '';
+    };
+
+    viAlias = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Symlink {command}`vi` to {command}`nvim` binary.
+      '';
+    };
+
+    vimAlias = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Symlink {command}`vim` to {command}`nvim` binary.
+      '';
+    };
+
+    withRuby = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Enable Ruby provider.";
+    };
+
+    withPython3 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Enable Python 3 provider.";
+    };
+
+    withNodeJs = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable Node provider.";
+    };
+
+    configure = mkOption {
+      type = types.attrs;
+      default = { };
+      example = literalExpression ''
+        {
+          customRC = '''
+            " here your custom configuration goes!
+          ''';
+          packages.myVimPackage = with pkgs.vimPlugins; {
+            # loaded on launch
+            start = [ fugitive ];
+            # manually loadable by calling `:packadd $plugin-name`
+            opt = [ ];
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Generate your init file from your list of plugins and custom commands.
+        Neovim will then be wrapped to load {command}`nvim -u /nix/store/«hash»-vimrc`
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.neovim-unwrapped;
+      defaultText = literalExpression "pkgs.neovim-unwrapped";
+      description = lib.mdDoc "The package to use for the neovim binary.";
+    };
+
+    finalPackage = mkOption {
+      type = types.package;
+      visible = false;
+      readOnly = true;
+      description = lib.mdDoc "Resulting customized neovim package.";
+    };
+
+    runtime = mkOption {
+      default = { };
+      example = literalExpression ''
+        { "ftplugin/c.vim".text = "setlocal omnifunc=v:lua.vim.lsp.omnifunc"; }
+      '';
+      description = lib.mdDoc ''
+        Set of files that have to be linked in {file}`runtime`.
+      '';
+
+      type = with types; attrsOf (submodule (
+        { name, config, ... }:
+        {
+          options = {
+
+            enable = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether this runtime directory should be generated.  This
+                option allows specific runtime files to be disabled.
+              '';
+            };
+
+            target = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Name of symlink.  Defaults to the attribute
+                name.
+              '';
+            };
+
+            text = mkOption {
+              default = null;
+              type = types.nullOr types.lines;
+              description = lib.mdDoc "Text of the file.";
+            };
+
+            source = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc "Path of the source file.";
+            };
+
+          };
+
+          config.target = mkDefault name;
+        }
+      ));
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      cfg.finalPackage
+    ];
+    environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "nvim");
+
+    environment.etc = listToAttrs (attrValues (mapAttrs
+      (name: value: {
+        name = "xdg/nvim/${name}";
+        value = removeAttrs
+          (value // {
+            target = "xdg/nvim/${value.target}";
+          })
+          (optionals (isNull value.source) [ "source" ]);
+      })
+      cfg.runtime));
+
+    programs.neovim.finalPackage = pkgs.wrapNeovim cfg.package {
+      inherit (cfg) viAlias vimAlias withPython3 withNodeJs withRuby configure;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nethoscope.nix b/nixpkgs/nixos/modules/programs/nethoscope.nix
new file mode 100644
index 000000000000..d8ece61c90a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nethoscope.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.nethoscope;
+in
+{
+  meta.maintainers = with maintainers; [ _0x4A6F ];
+
+  options = {
+    programs.nethoscope = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add nethoscope to the global environment and configure a
+          setcap wrapper for it.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ nethoscope ];
+    security.wrappers.nethoscope = {
+      source = "${pkgs.nethoscope}/bin/nethoscope";
+      capabilities = "cap_net_raw,cap_net_admin=eip";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nexttrace.nix b/nixpkgs/nixos/modules/programs/nexttrace.nix
new file mode 100644
index 000000000000..091d4f17f9f6
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nexttrace.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.nexttrace;
+
+in
+{
+  options = {
+    programs.nexttrace = {
+      enable = lib.mkEnableOption (lib.mdDoc "Nexttrace to the global environment and configure a setcap wrapper for it");
+      package = lib.mkPackageOptionMD pkgs "nexttrace" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    security.wrappers.nexttrace = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw,cap_net_admin+eip";
+      source = "${cfg.package}/bin/nexttrace";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nix-index.nix b/nixpkgs/nixos/modules/programs/nix-index.nix
new file mode 100644
index 000000000000..a494b9d8c2c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nix-index.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.programs.nix-index;
+in {
+  options.programs.nix-index = with lib; {
+    enable = mkEnableOption (lib.mdDoc "nix-index, a file database for nixpkgs");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.nix-index;
+      defaultText = literalExpression "pkgs.nix-index";
+      description = lib.mdDoc "Package providing the `nix-index` tool.";
+    };
+
+    enableBashIntegration = mkEnableOption (lib.mdDoc "Bash integration") // {
+      default = true;
+    };
+
+    enableZshIntegration = mkEnableOption (lib.mdDoc "Zsh integration") // {
+      default = true;
+    };
+
+    enableFishIntegration = mkEnableOption (lib.mdDoc "Fish integration") // {
+      default = true;
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = let
+      checkOpt = name: {
+        assertion = cfg.${name} -> !config.programs.command-not-found.enable;
+        message = ''
+          The 'programs.command-not-found.enable' option is mutually exclusive
+          with the 'programs.nix-index.${name}' option.
+        '';
+      };
+    in [ (checkOpt "enableBashIntegration") (checkOpt "enableZshIntegration") ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    programs.bash.interactiveShellInit = lib.mkIf cfg.enableBashIntegration ''
+      source ${cfg.package}/etc/profile.d/command-not-found.sh
+    '';
+
+    programs.zsh.interactiveShellInit = lib.mkIf cfg.enableZshIntegration ''
+      source ${cfg.package}/etc/profile.d/command-not-found.sh
+    '';
+
+    # See https://github.com/bennofs/nix-index/issues/126
+    programs.fish.interactiveShellInit = let
+      wrapper = pkgs.writeScript "command-not-found" ''
+        #!${pkgs.bash}/bin/bash
+        source ${cfg.package}/etc/profile.d/command-not-found.sh
+        command_not_found_handle "$@"
+      '';
+    in lib.mkIf cfg.enableFishIntegration ''
+      function __fish_command_not_found_handler --on-event fish_command_not_found
+          ${wrapper} $argv
+      end
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nix-ld.nix b/nixpkgs/nixos/modules/programs/nix-ld.nix
new file mode 100644
index 000000000000..d54b3917f89a
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nix-ld.nix
@@ -0,0 +1,61 @@
+{ pkgs, lib, config, ... }:
+let
+  cfg = config.programs.nix-ld;
+
+  nix-ld-libraries = pkgs.buildEnv {
+    name = "lb-library-path";
+    pathsToLink = [ "/lib" ];
+    paths = map lib.getLib cfg.libraries;
+    # TODO make glibc here configurable?
+    postBuild = ''
+      ln -s ${pkgs.stdenv.cc.bintools.dynamicLinker} $out/share/nix-ld/lib/ld.so
+    '';
+    extraPrefix = "/share/nix-ld";
+    ignoreCollisions = true;
+  };
+
+  # We currently take all libraries from systemd and nix as the default.
+  # Is there a better list?
+  baseLibraries = with pkgs; [
+    zlib
+    zstd
+    stdenv.cc.cc
+    curl
+    openssl
+    attr
+    libssh
+    bzip2
+    libxml2
+    acl
+    libsodium
+    util-linux
+    xz
+    systemd
+  ];
+in
+{
+  meta.maintainers = [ lib.maintainers.mic92 ];
+  options.programs.nix-ld = {
+    enable = lib.mkEnableOption (lib.mdDoc ''nix-ld, Documentation: <https://github.com/Mic92/nix-ld>'');
+    package = lib.mkPackageOptionMD pkgs "nix-ld" { };
+    libraries = lib.mkOption {
+      type = lib.types.listOf lib.types.package;
+      description = lib.mdDoc "Libraries that automatically become available to all programs. The default set includes common libraries.";
+      default = baseLibraries;
+      defaultText = lib.literalExpression "baseLibraries derived from systemd and nix dependencies.";
+    };
+  };
+
+  config = lib.mkIf config.programs.nix-ld.enable {
+    systemd.tmpfiles.packages = [ cfg.package ];
+
+    environment.systemPackages = [ nix-ld-libraries ];
+
+    environment.pathsToLink = [ "/share/nix-ld" ];
+
+    environment.variables = {
+      NIX_LD = "/run/current-system/sw/share/nix-ld/lib/ld.so";
+      NIX_LD_LIBRARY_PATH = "/run/current-system/sw/share/nix-ld/lib";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nm-applet.nix b/nixpkgs/nixos/modules/programs/nm-applet.nix
new file mode 100644
index 000000000000..4b09b1884d7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nm-applet.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+{
+  meta = {
+    maintainers = lib.teams.freedesktop.members;
+  };
+
+  options.programs.nm-applet = {
+    enable = lib.mkEnableOption (lib.mdDoc "nm-applet");
+
+    indicator = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to use indicator instead of status icon.
+        It is needed for Appindicator environments, like Enlightenment.
+      '';
+    };
+  };
+
+  config = lib.mkIf config.programs.nm-applet.enable {
+    systemd.user.services.nm-applet = {
+      description = "Network manager applet";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${pkgs.networkmanagerapplet}/bin/nm-applet ${lib.optionalString config.programs.nm-applet.indicator "--indicator"}";
+    };
+
+    services.dbus.packages = [ pkgs.gcr ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/nncp.nix b/nixpkgs/nixos/modules/programs/nncp.nix
new file mode 100644
index 000000000000..98fea84ab740
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/nncp.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  nncpCfgFile = "/run/nncp.hjson";
+  programCfg = config.programs.nncp;
+  settingsFormat = pkgs.formats.json { };
+  jsonCfgFile = settingsFormat.generate "nncp.json" programCfg.settings;
+  pkg = programCfg.package;
+in {
+  options.programs.nncp = {
+
+    enable =
+      mkEnableOption (lib.mdDoc "NNCP (Node to Node copy) utilities and configuration");
+
+    group = mkOption {
+      type = types.str;
+      default = "uucp";
+      description = lib.mdDoc ''
+        The group under which NNCP files shall be owned.
+        Any member of this group may access the secret keys
+        of this NNCP node.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.nncp;
+      defaultText = literalExpression "pkgs.nncp";
+      description = lib.mdDoc "The NNCP package to use system-wide.";
+    };
+
+    secrets = mkOption {
+      type = with types; listOf str;
+      example = [ "/run/keys/nncp.hjson" ];
+      description = lib.mdDoc ''
+        A list of paths to NNCP configuration files that should not be
+        in the Nix store. These files are layered on top of the values at
+        [](#opt-programs.nncp.settings).
+      '';
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      description = lib.mdDoc ''
+        NNCP configuration, see
+        <http://www.nncpgo.org/Configuration.html>.
+        At runtime these settings will be overlayed by the contents of
+        [](#opt-programs.nncp.secrets) into the file
+        `${nncpCfgFile}`. Node keypairs go in
+        `secrets`, do not specify them in
+        `settings` as they will be leaked into
+        `/nix/store`!
+      '';
+      default = { };
+    };
+
+  };
+
+  config = mkIf programCfg.enable {
+
+    environment = {
+      systemPackages = [ pkg ];
+      etc."nncp.hjson".source = nncpCfgFile;
+    };
+
+    programs.nncp.settings = {
+      spool = mkDefault "/var/spool/nncp";
+      log = mkDefault "/var/spool/nncp/log";
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${programCfg.settings.spool} 0770 root ${programCfg.group}"
+      "f ${programCfg.settings.log} 0770 root ${programCfg.group}"
+    ];
+
+    systemd.services.nncp-config = {
+      path = [ pkg ];
+      description = "Generate NNCP configuration";
+      wantedBy = [ "basic.target" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        umask u=rw
+        nncpCfgDir=$(mktemp --directory nncp.XXX)
+        for f in ${jsonCfgFile} ${toString config.programs.nncp.secrets}; do
+          tmpdir=$(mktemp --directory nncp.XXX)
+          nncp-cfgdir -cfg $f -dump $tmpdir
+          find $tmpdir -size 1c -delete
+          cp -a $tmpdir/* $nncpCfgDir/
+          rm -rf $tmpdir
+        done
+        nncp-cfgdir -load $nncpCfgDir > ${nncpCfgFile}
+        rm -rf $nncpCfgDir
+        chgrp ${programCfg.group} ${nncpCfgFile}
+        chmod g+r ${nncpCfgFile}
+      '';
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ehmry ];
+}
diff --git a/nixpkgs/nixos/modules/programs/noisetorch.nix b/nixpkgs/nixos/modules/programs/noisetorch.nix
new file mode 100644
index 000000000000..c022b01d79af
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/noisetorch.nix
@@ -0,0 +1,30 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.programs.noisetorch;
+in
+{
+  options.programs.noisetorch = {
+    enable = mkEnableOption (lib.mdDoc "noisetorch + setcap wrapper");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.noisetorch;
+      defaultText = literalExpression "pkgs.noisetorch";
+      description = lib.mdDoc ''
+        The noisetorch package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.noisetorch = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_sys_resource=+ep";
+      source = "${cfg.package}/bin/noisetorch";
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/npm.nix b/nixpkgs/nixos/modules/programs/npm.nix
new file mode 100644
index 000000000000..c41fea326149
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/npm.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.npm;
+in
+
+{
+  ###### interface
+
+  options = {
+    programs.npm = {
+      enable = mkEnableOption (lib.mdDoc "{command}`npm` global config");
+
+      package = mkOption {
+        type = types.package;
+        description = lib.mdDoc "The npm package version / flavor to use";
+        default = pkgs.nodePackages.npm;
+        defaultText = literalExpression "pkgs.nodePackages.npm";
+        example = literalExpression "pkgs.nodePackages_13_x.npm";
+      };
+
+      npmrc = mkOption {
+        type = lib.types.lines;
+        description = lib.mdDoc ''
+          The system-wide npm configuration.
+          See <https://docs.npmjs.com/misc/config>.
+        '';
+        default = ''
+          prefix = ''${HOME}/.npm
+        '';
+        example = ''
+          prefix = ''${HOME}/.npm
+          https-proxy=proxy.example.com
+          init-license=MIT
+          init-author-url=https://www.npmjs.com/
+          color=true
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    environment.etc.npmrc.text = cfg.npmrc;
+
+    environment.variables.NPM_CONFIG_GLOBALCONFIG = "/etc/npmrc";
+
+    environment.systemPackages = [ cfg.package ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/ns-usbloader.nix b/nixpkgs/nixos/modules/programs/ns-usbloader.nix
new file mode 100644
index 000000000000..8d0b698d6b4c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/ns-usbloader.nix
@@ -0,0 +1,18 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.programs.ns-usbloader;
+in
+{
+  options = {
+    programs.ns-usbloader = {
+      enable = lib.mkEnableOption (lib.mdDoc "ns-usbloader application with udev rules applied");
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.ns-usbloader ];
+    services.udev.packages = [ pkgs.ns-usbloader ];
+  };
+
+  meta.maintainers = pkgs.ns-usbloader.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/programs/oblogout.nix b/nixpkgs/nixos/modules/programs/oblogout.nix
new file mode 100644
index 000000000000..a039b0623b52
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/oblogout.nix
@@ -0,0 +1,11 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "programs" "oblogout" ] "programs.oblogout has been removed from NixOS. This is because the oblogout repository has been archived upstream.")
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/programs/oddjobd.nix b/nixpkgs/nixos/modules/programs/oddjobd.nix
new file mode 100644
index 000000000000..b0920d007c9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/oddjobd.nix
@@ -0,0 +1,33 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.programs.oddjobd;
+in
+{
+  options.programs.oddjobd = {
+    enable = lib.mkEnableOption "oddjob";
+    package = lib.mkPackageOption pkgs "oddjob" {};
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      { assertion = false;
+        message = "The oddjob service was found to be broken without NixOS test or maintainer. Please take ownership of this service.";
+      }
+    ];
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.oddjobd = {
+      wantedBy = [ "multi-user.target"];
+      after = [ "network.target"];
+      description = "DBUS Odd-job Daemon";
+      enable = true;
+      documentation = [ "man:oddjobd(8)" "man:oddjobd.conf(5)" ];
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "org.freedesktop.oddjob";
+        ExecStart = "${lib.getBin cfg.package}/bin/oddjobd";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/openvpn3.nix b/nixpkgs/nixos/modules/programs/openvpn3.nix
new file mode 100644
index 000000000000..37a1bfeb0c3e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/openvpn3.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.openvpn3;
+in
+{
+  options.programs.openvpn3 = {
+    enable = mkEnableOption (lib.mdDoc "the openvpn3 client");
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openvpn3.override {
+        enableSystemdResolved = config.services.resolved.enable;
+      };
+      defaultText = literalExpression ''pkgs.openvpn3.override {
+        enableSystemdResolved = config.services.resolved.enable;
+      }'';
+      description = lib.mdDoc ''
+        Which package to use for `openvpn3`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.dbus.packages = [
+      cfg.package
+    ];
+
+    users.users.openvpn = {
+      isSystemUser = true;
+      uid = config.ids.uids.openvpn;
+      group = "openvpn";
+    };
+
+    users.groups.openvpn = {
+      gid = config.ids.gids.openvpn;
+    };
+
+    environment.systemPackages = [
+      cfg.package
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/pantheon-tweaks.nix b/nixpkgs/nixos/modules/programs/pantheon-tweaks.nix
new file mode 100644
index 000000000000..82f93619db15
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/pantheon-tweaks.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = teams.pantheon.members;
+  };
+
+  ###### interface
+  options = {
+    programs.pantheon-tweaks.enable = mkEnableOption (lib.mdDoc "Pantheon Tweaks, an unofficial system settings panel for Pantheon");
+  };
+
+  ###### implementation
+  config = mkIf config.programs.pantheon-tweaks.enable {
+    services.xserver.desktopManager.pantheon.extraSwitchboardPlugs = [ pkgs.pantheon-tweaks ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/partition-manager.nix b/nixpkgs/nixos/modules/programs/partition-manager.nix
new file mode 100644
index 000000000000..c18598b7c25d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/partition-manager.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta.maintainers = [ maintainers.oxalica ];
+
+  ###### interface
+  options = {
+    programs.partition-manager.enable = mkEnableOption (lib.mdDoc "KDE Partition Manager");
+  };
+
+  ###### implementation
+  config = mkIf config.programs.partition-manager.enable {
+    services.dbus.packages = [ pkgs.libsForQt5.kpmcore ];
+    # `kpmcore` need to be installed to pull in polkit actions.
+    environment.systemPackages = [ pkgs.libsForQt5.kpmcore pkgs.partition-manager ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/plotinus.md b/nixpkgs/nixos/modules/programs/plotinus.md
new file mode 100644
index 000000000000..fac3bbad1e08
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/plotinus.md
@@ -0,0 +1,17 @@
+# Plotinus {#module-program-plotinus}
+
+*Source:* {file}`modules/programs/plotinus.nix`
+
+*Upstream documentation:* <https://github.com/p-e-w/plotinus>
+
+Plotinus is a searchable command palette in every modern GTK application.
+
+When in a GTK 3 application and Plotinus is enabled, you can press
+`Ctrl+Shift+P` to open the command palette. The command
+palette provides a searchable list of of all menu items in the application.
+
+To enable Plotinus, add the following to your
+{file}`configuration.nix`:
+```
+programs.plotinus.enable = true;
+```
diff --git a/nixpkgs/nixos/modules/programs/plotinus.nix b/nixpkgs/nixos/modules/programs/plotinus.nix
new file mode 100644
index 000000000000..c2b6884d6490
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/plotinus.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.plotinus;
+in
+{
+  meta = {
+    maintainers = pkgs.plotinus.meta.maintainers;
+    doc = ./plotinus.md;
+  };
+
+  ###### interface
+
+  options = {
+    programs.plotinus = {
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Plotinus GTK 3 plugin. Plotinus provides a
+          popup (triggered by Ctrl-Shift-P) to search the menus of a
+          compatible application.
+        '';
+        type = types.bool;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.sessionVariables.XDG_DATA_DIRS = [ "${pkgs.plotinus}/share/gsettings-schemas/${pkgs.plotinus.name}" ];
+    environment.variables.GTK3_MODULES = [ "${pkgs.plotinus}/lib/libplotinus.so" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/projecteur.nix b/nixpkgs/nixos/modules/programs/projecteur.nix
new file mode 100644
index 000000000000..9fcd357d3b23
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/projecteur.nix
@@ -0,0 +1,20 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.projecteur;
+in
+{
+  options.programs.projecteur = {
+    enable = lib.mkEnableOption (lib.mdDoc "projecteur");
+    package = lib.mkPackageOptionMD pkgs "projecteur" { };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ benneti drupol ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/proxychains.nix b/nixpkgs/nixos/modules/programs/proxychains.nix
new file mode 100644
index 000000000000..9bdd5d405668
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/proxychains.nix
@@ -0,0 +1,169 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+
+  cfg = config.programs.proxychains;
+
+  configFile = ''
+    ${cfg.chain.type}_chain
+    ${optionalString (cfg.chain.type == "random")
+    "chain_len = ${builtins.toString cfg.chain.length}"}
+    ${optionalString cfg.proxyDNS "proxy_dns"}
+    ${optionalString cfg.quietMode "quiet_mode"}
+    remote_dns_subnet ${builtins.toString cfg.remoteDNSSubnet}
+    tcp_read_time_out ${builtins.toString cfg.tcpReadTimeOut}
+    tcp_connect_time_out ${builtins.toString cfg.tcpConnectTimeOut}
+    localnet ${cfg.localnet}
+    [ProxyList]
+    ${builtins.concatStringsSep "\n"
+      (lib.mapAttrsToList (k: v: "${v.type} ${v.host} ${builtins.toString v.port}")
+        (lib.filterAttrs (k: v: v.enable) cfg.proxies))}
+  '';
+
+  proxyOptions = {
+    options = {
+      enable = mkEnableOption (lib.mdDoc "this proxy");
+
+      type = mkOption {
+        type = types.enum [ "http" "socks4" "socks5" ];
+        description = lib.mdDoc "Proxy type.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Proxy host or IP address.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        description = lib.mdDoc "Proxy port";
+      };
+    };
+  };
+
+in {
+
+  ###### interface
+
+  options = {
+
+    programs.proxychains = {
+
+      enable = mkEnableOption (lib.mdDoc "installing proxychains configuration");
+
+      package = mkPackageOptionMD pkgs "proxychains" {
+        example = "pkgs.proxychains-ng";
+      };
+
+      chain = {
+        type = mkOption {
+          type = types.enum [ "dynamic" "strict" "random" ];
+          default = "strict";
+          description = lib.mdDoc ''
+            `dynamic` - Each connection will be done via chained proxies
+            all proxies chained in the order as they appear in the list
+            at least one proxy must be online to play in chain
+            (dead proxies are skipped)
+            otherwise `EINTR` is returned to the app.
+
+            `strict` - Each connection will be done via chained proxies
+            all proxies chained in the order as they appear in the list
+            all proxies must be online to play in chain
+            otherwise `EINTR` is returned to the app.
+
+            `random` - Each connection will be done via random proxy
+            (or proxy chain, see {option}`programs.proxychains.chain.length`) from the list.
+          '';
+        };
+        length = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          description = lib.mdDoc ''
+            Chain length for random chain.
+          '';
+        };
+      };
+
+      proxyDNS = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Proxy DNS requests - no leak for DNS data.";
+      };
+
+      quietMode = mkEnableOption (lib.mdDoc "Quiet mode (no output from the library)");
+
+      remoteDNSSubnet = mkOption {
+        type = types.enum [ 10 127 224 ];
+        default = 224;
+        description = lib.mdDoc ''
+          Set the class A subnet number to use for the internal remote DNS mapping, uses the reserved 224.x.x.x range by default.
+        '';
+      };
+
+      tcpReadTimeOut = mkOption {
+        type = types.int;
+        default = 15000;
+        description = lib.mdDoc "Connection read time-out in milliseconds.";
+      };
+
+      tcpConnectTimeOut = mkOption {
+        type = types.int;
+        default = 8000;
+        description = lib.mdDoc "Connection time-out in milliseconds.";
+      };
+
+      localnet = mkOption {
+        type = types.str;
+        default = "127.0.0.0/255.0.0.0";
+        description = lib.mdDoc "By default enable localnet for loopback address ranges.";
+      };
+
+      proxies = mkOption {
+        type = types.attrsOf (types.submodule proxyOptions);
+        description = lib.mdDoc ''
+          Proxies to be used by proxychains.
+        '';
+
+        example = literalExpression ''
+          { myproxy =
+            { type = "socks4";
+              host = "127.0.0.1";
+              port = 1337;
+            };
+          }
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  meta.maintainers = with maintainers; [ sorki ];
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = cfg.chain.type != "random" && cfg.chain.length == null;
+      message = ''
+        Option `programs.proxychains.chain.length`
+        only makes sense with `programs.proxychains.chain.type` = "random".
+      '';
+    };
+
+    programs.proxychains.proxies = mkIf config.services.tor.client.enable
+      {
+        torproxy = mkDefault {
+          enable = true;
+          type = "socks4";
+          host = "127.0.0.1";
+          port = 9050;
+        };
+      };
+
+    environment.etc."proxychains.conf".text = configFile;
+    environment.systemPackages = [ cfg.package ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/qdmr.nix b/nixpkgs/nixos/modules/programs/qdmr.nix
new file mode 100644
index 000000000000..1bb81317bda8
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/qdmr.nix
@@ -0,0 +1,25 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+
+let
+  cfg = config.programs.qdmr;
+in {
+  meta.maintainers = [ lib.maintainers.janik ];
+
+  options = {
+    programs.qdmr = {
+      enable = lib.mkEnableOption (lib.mdDoc "QDMR - a GUI application and command line tool for programming DMR radios");
+      package = lib.mkPackageOptionMD pkgs "qdmr" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+    users.groups.dialout = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/qt5ct.nix b/nixpkgs/nixos/modules/programs/qt5ct.nix
new file mode 100644
index 000000000000..3ff47b355915
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/qt5ct.nix
@@ -0,0 +1,9 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "programs" "qt5ct" "enable" ] "Use qt5.platformTheme = \"qt5ct\" instead.")
+  ];
+}
diff --git a/nixpkgs/nixos/modules/programs/regreet.nix b/nixpkgs/nixos/modules/programs/regreet.nix
new file mode 100644
index 000000000000..0fd9cf232981
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/regreet.nix
@@ -0,0 +1,88 @@
+{ lib
+, pkgs
+, config
+, ...
+}:
+let
+  cfg = config.programs.regreet;
+  settingsFormat = pkgs.formats.toml { };
+in
+{
+  options.programs.regreet = {
+    enable = lib.mkEnableOption null // {
+      description = lib.mdDoc ''
+        Enable ReGreet, a clean and customizable greeter for greetd.
+
+        To use ReGreet, {option}`services.greetd` has to be enabled and
+        {option}`services.greetd.settings.default_session` should contain the
+        appropriate configuration to launch
+        {option}`config.programs.regreet.package`. For examples, see the
+        [ReGreet Readme](https://github.com/rharish101/ReGreet#set-as-default-session).
+
+        A minimal configuration that launches ReGreet in {command}`cage` is
+        enabled by this module by default.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs [ "greetd" "regreet" ] { };
+
+    settings = lib.mkOption {
+      type = lib.types.either lib.types.path settingsFormat.type;
+      default = { };
+      description = lib.mdDoc ''
+        ReGreet configuration file. Refer
+        <https://github.com/rharish101/ReGreet/blob/main/regreet.sample.toml>
+        for options.
+      '';
+    };
+
+    cageArgs = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      default = [ "-s" ];
+      example = lib.literalExpression
+        ''
+          [ "-s" "-m" "last" ]
+        '';
+      description = lib.mdDoc ''
+        Additional arguments to be passed to
+        [cage](https://github.com/cage-kiosk/cage).
+      '';
+    };
+
+    extraCss = lib.mkOption {
+      type = lib.types.either lib.types.path lib.types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra CSS rules to apply on top of the GTK theme. Refer to
+        [GTK CSS Properties](https://docs.gtk.org/gtk4/css-properties.html) for
+        modifiable properties.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.greetd = {
+      enable = lib.mkDefault true;
+      settings.default_session.command = lib.mkDefault "${pkgs.dbus}/bin/dbus-run-session ${lib.getExe pkgs.cage} ${lib.escapeShellArgs cfg.cageArgs} -- ${lib.getExe cfg.package}";
+    };
+
+    environment.etc = {
+      "greetd/regreet.css" =
+        if lib.isPath cfg.extraCss
+        then {source = cfg.extraCss;}
+        else {text = cfg.extraCss;};
+
+      "greetd/regreet.toml".source =
+        if lib.isPath cfg.settings
+        then cfg.settings
+        else settingsFormat.generate "regreet.toml" cfg.settings;
+    };
+
+    systemd.tmpfiles.rules = let
+      group = config.users.users.${config.services.greetd.settings.default_session.user}.group;
+    in [
+      "d /var/log/regreet 0755 greeter ${group} - -"
+      "d /var/cache/regreet 0755 greeter ${group} - -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/rog-control-center.nix b/nixpkgs/nixos/modules/programs/rog-control-center.nix
new file mode 100644
index 000000000000..4aef5143ac7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/rog-control-center.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.rog-control-center;
+in
+{
+  options = {
+    programs.rog-control-center = {
+      enable = lib.mkEnableOption (lib.mdDoc "the rog-control-center application");
+
+      autoStart = lib.mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc "Whether rog-control-center should be started automatically.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [
+      pkgs.asusctl
+      (lib.mkIf cfg.autoStart (pkgs.makeAutostartItem { name = "rog-control-center"; package = pkgs.asusctl; }))
+    ];
+
+    services.asusd.enable = true;
+  };
+
+  meta.maintainers = pkgs.asusctl.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/programs/rust-motd.nix b/nixpkgs/nixos/modules/programs/rust-motd.nix
new file mode 100644
index 000000000000..4c9b1018596b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/rust-motd.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.rust-motd;
+  format = pkgs.formats.toml { };
+
+  # Order the sections in the TOML according to the order of sections
+  # in `cfg.order`.
+  motdConf = pkgs.runCommand "motd.conf"
+    {
+      __structuredAttrs = true;
+      inherit (cfg) order settings;
+      nativeBuildInputs = [ pkgs.remarshal pkgs.jq ];
+    }
+    ''
+      cat "$NIX_ATTRS_JSON_FILE" \
+        | jq '.settings as $settings
+              | .order
+              | map({ key: ., value: $settings."\(.)" })
+              | from_entries' -r \
+        | json2toml /dev/stdin "$out"
+    '';
+in {
+  options.programs.rust-motd = {
+    enable = mkEnableOption (lib.mdDoc "rust-motd");
+    enableMotdInSSHD = mkOption {
+      default = true;
+      type = types.bool;
+      description = mdDoc ''
+        Whether to let `openssh` print the
+        result when entering a new `ssh`-session.
+        By default either nothing or a static file defined via
+        [](#opt-users.motd) is printed. Because of that,
+        the latter option is incompatible with this module.
+      '';
+    };
+    refreshInterval = mkOption {
+      default = "*:0/5";
+      type = types.str;
+      description = mdDoc ''
+        Interval in which the {manpage}`motd(5)` file is refreshed.
+        For possible formats, please refer to {manpage}`systemd.time(7)`.
+      '';
+    };
+    order = mkOption {
+      type = types.listOf types.str;
+      default = attrNames cfg.settings;
+      defaultText = literalExpression "attrNames cfg.settings";
+      description = mdDoc ''
+        The order of the sections in [](#opt-programs.rust-motd.settings).
+        By default they are ordered alphabetically.
+
+        Context: since attribute sets in Nix are always
+        ordered alphabetically internally this means that
+
+        ```nix
+        {
+          uptime = { /* ... */ };
+          banner = { /* ... */ };
+        }
+        ```
+
+        will still have `banner` displayed before `uptime`.
+
+        To work around that, this option can be used to define the order of all keys,
+        i.e.
+
+        ```nix
+        {
+          order = [
+            "uptime"
+            "banner"
+          ];
+        }
+        ```
+
+        makes sure that `uptime` is placed before `banner` in the motd.
+      '';
+    };
+    settings = mkOption {
+      type = types.attrsOf format.type;
+      description = mdDoc ''
+        Settings on what to generate. Please read the
+        [upstream documentation](https://github.com/rust-motd/rust-motd/blob/main/README.md#configuration)
+        for further information.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = config.users.motd == null;
+        message = ''
+          `programs.rust-motd` is incompatible with `users.motd`!
+        '';
+      }
+      { assertion = sort (a: b: a < b) cfg.order == attrNames cfg.settings;
+        message = ''
+          Please ensure that every section from `programs.rust-motd.settings` is present in
+          `programs.rust-motd.order`.
+        '';
+      }
+    ];
+    systemd.services.rust-motd = {
+      path = with pkgs; [ bash ];
+      documentation = [ "https://github.com/rust-motd/rust-motd/blob/v${pkgs.rust-motd.version}/README.md" ];
+      description = "motd generator";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.writeShellScript "update-motd" ''
+          ${pkgs.rust-motd}/bin/rust-motd ${motdConf} > motd
+        ''}";
+        CapabilityBoundingSet = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "full";
+        StateDirectory = "rust-motd";
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        WorkingDirectory = "/var/lib/rust-motd";
+      };
+    };
+    systemd.timers.rust-motd = {
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.refreshInterval;
+    };
+    security.pam.services.sshd.text = mkIf cfg.enableMotdInSSHD (mkDefault (mkAfter ''
+      session optional ${pkgs.pam}/lib/security/pam_motd.so motd=/var/lib/rust-motd/motd
+    ''));
+    services.openssh.extraConfig = mkIf (cfg.settings ? last_login && cfg.settings.last_login != {}) ''
+      PrintLastLog no
+    '';
+  };
+  meta.maintainers = with maintainers; [ ma27 ];
+}
diff --git a/nixpkgs/nixos/modules/programs/screen.nix b/nixpkgs/nixos/modules/programs/screen.nix
new file mode 100644
index 000000000000..68de9e52d7be
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/screen.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkOption mkIf types;
+  cfg = config.programs.screen;
+in
+
+{
+  ###### interface
+
+  options = {
+    programs.screen = {
+
+      screenrc = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          The contents of /etc/screenrc file.
+        '';
+        type = types.lines;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (cfg.screenrc != "") {
+    environment.etc.screenrc.text = cfg.screenrc;
+
+    environment.systemPackages = [ pkgs.screen ];
+    security.pam.services.screen = {};
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/seahorse.nix b/nixpkgs/nixos/modules/programs/seahorse.nix
new file mode 100644
index 000000000000..5e179c1446ed
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/seahorse.nix
@@ -0,0 +1,46 @@
+# Seahorse.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+ # Added 2019-08-27
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "seahorse" "enable" ]
+      [ "programs" "seahorse" "enable" ])
+  ];
+
+
+  ###### interface
+
+  options = {
+
+    programs.seahorse = {
+
+      enable = mkEnableOption (lib.mdDoc "Seahorse, a GNOME application for managing encryption keys and passwords in the GNOME Keyring");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.programs.seahorse.enable {
+
+    programs.ssh.askPassword = mkDefault "${pkgs.gnome.seahorse}/libexec/seahorse/ssh-askpass";
+
+    environment.systemPackages = [
+      pkgs.gnome.seahorse
+    ];
+
+    services.dbus.packages = [
+      pkgs.gnome.seahorse
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/sedutil.nix b/nixpkgs/nixos/modules/programs/sedutil.nix
new file mode 100644
index 000000000000..d5e20a8815d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/sedutil.nix
@@ -0,0 +1,18 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.sedutil;
+
+in {
+  options.programs.sedutil.enable = mkEnableOption (lib.mdDoc "sedutil");
+
+  config = mkIf cfg.enable {
+    boot.kernelParams = [
+      "libata.allow_tpm=1"
+    ];
+
+    environment.systemPackages = with pkgs; [ sedutil ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/shadow.nix b/nixpkgs/nixos/modules/programs/shadow.nix
new file mode 100644
index 000000000000..00895db03fc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/shadow.nix
@@ -0,0 +1,239 @@
+# Configuration for the pwdutils suite of tools: passwd, useradd, etc.
+{ config, lib, utils, pkgs, ... }:
+with lib;
+let
+  cfg = config.security.loginDefs;
+in
+{
+  options = with types; {
+    security.loginDefs = {
+      package = mkPackageOptionMD pkgs "shadow" { };
+
+      chfnRestrict = mkOption {
+        description = mdDoc ''
+          Use chfn SUID to allow non-root users to change their account GECOS information.
+        '';
+        type = nullOr str;
+        default = null;
+      };
+
+      settings = mkOption {
+        description = mdDoc ''
+          Config options for the /etc/login.defs file, that defines
+          the site-specific configuration for the shadow password suite.
+          See login.defs(5) man page for available options.
+        '';
+        type = submodule {
+          freeformType = (pkgs.formats.keyValue { }).type;
+          /* There are three different sources for user/group id ranges, each of which gets
+             used by different programs:
+             - The login.defs file, used by the useradd, groupadd and newusers commands
+             - The update-users-groups.pl file, used by NixOS in the activation phase to
+               decide on which ids to use for declaratively defined users without a static
+               id
+             - Systemd compile time options -Dsystem-uid-max= and -Dsystem-gid-max=, used
+               by systemd for features like ConditionUser=@system and systemd-sysusers
+              */
+          options = {
+            DEFAULT_HOME = mkOption {
+              description = mdDoc "Indicate if login is allowed if we can't cd to the home directory.";
+              default = "yes";
+              type = enum [ "yes" "no" ];
+            };
+
+            ENCRYPT_METHOD = mkOption {
+              description = mdDoc "This defines the system default encryption algorithm for encrypting passwords.";
+              # The default crypt() method, keep in sync with the PAM default
+              default = "YESCRYPT";
+              type = enum [ "YESCRYPT" "SHA512" "SHA256" "MD5" "DES"];
+            };
+
+            SYS_UID_MIN = mkOption {
+              description = mdDoc "Range of user IDs used for the creation of system users by useradd or newusers.";
+              default = 400;
+              type = int;
+            };
+
+            SYS_UID_MAX = mkOption {
+              description = mdDoc "Range of user IDs used for the creation of system users by useradd or newusers.";
+              default = 999;
+              type = int;
+            };
+
+            UID_MIN = mkOption {
+              description = mdDoc "Range of user IDs used for the creation of regular users by useradd or newusers.";
+              default = 1000;
+              type = int;
+            };
+
+            UID_MAX = mkOption {
+              description = mdDoc "Range of user IDs used for the creation of regular users by useradd or newusers.";
+              default = 29999;
+              type = int;
+            };
+
+            SYS_GID_MIN = mkOption {
+              description = mdDoc "Range of group IDs used for the creation of system groups by useradd, groupadd, or newusers";
+              default = 400;
+              type = int;
+            };
+
+            SYS_GID_MAX = mkOption {
+              description = mdDoc "Range of group IDs used for the creation of system groups by useradd, groupadd, or newusers";
+              default = 999;
+              type = int;
+            };
+
+            GID_MIN = mkOption {
+              description = mdDoc "Range of group IDs used for the creation of regular groups by useradd, groupadd, or newusers.";
+              default = 1000;
+              type = int;
+            };
+
+            GID_MAX = mkOption {
+              description = mdDoc "Range of group IDs used for the creation of regular groups by useradd, groupadd, or newusers.";
+              default = 29999;
+              type = int;
+            };
+
+            TTYGROUP = mkOption {
+              description = mdDoc ''
+                The terminal permissions: the login tty will be owned by the TTYGROUP group,
+                and the permissions will be set to TTYPERM'';
+              default = "tty";
+              type = str;
+            };
+
+            TTYPERM = mkOption {
+              description = mdDoc ''
+                The terminal permissions: the login tty will be owned by the TTYGROUP group,
+                and the permissions will be set to TTYPERM'';
+              default = "0620";
+              type = str;
+            };
+
+            # Ensure privacy for newly created home directories.
+            UMASK = mkOption {
+              description = mdDoc "The file mode creation mask is initialized to this value.";
+              default = "077";
+              type = str;
+            };
+          };
+        };
+        default = { };
+      };
+    };
+
+    users.defaultUserShell = mkOption {
+      description = mdDoc ''
+        This option defines the default shell assigned to user
+        accounts. This can be either a full system path or a shell package.
+
+        This must not be a store path, since the path is
+        used outside the store (in particular in /etc/passwd).
+      '';
+      example = literalExpression "pkgs.zsh";
+      type = either path shellPackage;
+    };
+  };
+
+  ###### implementation
+
+  config = {
+    assertions = [
+      {
+        assertion = cfg.settings.SYS_UID_MIN <= cfg.settings.SYS_UID_MAX;
+        message = "SYS_UID_MIN must be less than or equal to SYS_UID_MAX";
+      }
+      {
+        assertion = cfg.settings.UID_MIN <= cfg.settings.UID_MAX;
+        message = "UID_MIN must be less than or equal to UID_MAX";
+      }
+      {
+        assertion = cfg.settings.SYS_GID_MIN <= cfg.settings.SYS_GID_MAX;
+        message = "SYS_GID_MIN must be less than or equal to SYS_GID_MAX";
+      }
+      {
+        assertion = cfg.settings.GID_MIN <= cfg.settings.GID_MAX;
+        message = "GID_MIN must be less than or equal to GID_MAX";
+      }
+    ];
+
+    security.loginDefs.settings.CHFN_RESTRICT =
+      mkIf (cfg.chfnRestrict != null) cfg.chfnRestrict;
+
+    environment.systemPackages = optional config.users.mutableUsers cfg.package
+      ++ optional (types.shellPackage.check config.users.defaultUserShell) config.users.defaultUserShell
+      ++ optional (cfg.chfnRestrict != null) pkgs.util-linux;
+
+    environment.etc =
+      # Create custom toKeyValue generator
+      # see https://man7.org/linux/man-pages/man5/login.defs.5.html for config specification
+      let
+        toKeyValue = generators.toKeyValue {
+          mkKeyValue = generators.mkKeyValueDefault { } " ";
+        };
+      in
+      {
+        # /etc/login.defs: global configuration for pwdutils.
+        # You cannot login without it!
+        "login.defs".source = pkgs.writeText "login.defs" (toKeyValue cfg.settings);
+
+        # /etc/default/useradd: configuration for useradd.
+        "default/useradd".source = pkgs.writeText "useradd" ''
+          GROUP=100
+          HOME=/home
+          SHELL=${utils.toShellPath config.users.defaultUserShell}
+        '';
+      };
+
+    security.pam.services = {
+      chsh = { rootOK = true; };
+      chfn = { rootOK = true; };
+      su = {
+        rootOK = true;
+        forwardXAuth = true;
+        logFailures = true;
+      };
+      passwd = { };
+      # Note: useradd, groupadd etc. aren't setuid root, so it
+      # doesn't really matter what the PAM config says as long as it
+      # lets root in.
+      useradd.rootOK = true;
+      usermod.rootOK = true;
+      userdel.rootOK = true;
+      groupadd.rootOK = true;
+      groupmod.rootOK = true;
+      groupmems.rootOK = true;
+      groupdel.rootOK = true;
+      login = {
+        startSession = true;
+        allowNullPassword = true;
+        showMotd = true;
+        updateWtmp = true;
+      };
+      chpasswd = { rootOK = true; };
+    };
+
+    security.wrappers =
+      let
+        mkSetuidRoot = source: {
+          setuid = true;
+          owner = "root";
+          group = "root";
+          inherit source;
+        };
+      in
+      {
+        su = mkSetuidRoot "${cfg.package.su}/bin/su";
+        sg = mkSetuidRoot "${cfg.package.out}/bin/sg";
+        newgrp = mkSetuidRoot "${cfg.package.out}/bin/newgrp";
+        newuidmap = mkSetuidRoot "${cfg.package.out}/bin/newuidmap";
+        newgidmap = mkSetuidRoot "${cfg.package.out}/bin/newgidmap";
+      }
+      // optionalAttrs config.users.mutableUsers {
+        chsh = mkSetuidRoot "${cfg.package.out}/bin/chsh";
+        passwd = mkSetuidRoot "${cfg.package.out}/bin/passwd";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/sharing.nix b/nixpkgs/nixos/modules/programs/sharing.nix
new file mode 100644
index 000000000000..9ab51859dc51
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/sharing.nix
@@ -0,0 +1,19 @@
+{ config, pkgs, lib, ... }:
+with lib;
+{
+  options.programs.sharing = {
+    enable = mkEnableOption (lib.mdDoc ''
+      sharing, a CLI tool for sharing files.
+
+      Note that it will opens the 7478 port for TCP in the firewall, which is needed for it to function properly
+    '');
+  };
+  config =
+    let
+      cfg = config.programs.sharing;
+    in
+      mkIf cfg.enable {
+        environment.systemPackages = [ pkgs.sharing ];
+        networking.firewall.allowedTCPPorts = [ 7478 ];
+      };
+}
diff --git a/nixpkgs/nixos/modules/programs/singularity.nix b/nixpkgs/nixos/modules/programs/singularity.nix
new file mode 100644
index 000000000000..05fdb4842c54
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/singularity.nix
@@ -0,0 +1,92 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.programs.singularity;
+in
+{
+
+  options.programs.singularity = {
+    enable = mkEnableOption (mdDoc "singularity") // {
+      description = mdDoc ''
+        Whether to install Singularity/Apptainer with system-level overriding such as SUID support.
+      '';
+    };
+    package = mkOption {
+      type = types.package;
+      default = pkgs.singularity;
+      defaultText = literalExpression "pkgs.singularity";
+      example = literalExpression "pkgs.apptainer";
+      description = mdDoc ''
+        Singularity/Apptainer package to override and install.
+      '';
+    };
+    packageOverriden = mkOption {
+      type = types.nullOr types.package;
+      default = null;
+      description = mdDoc ''
+        This option provides access to the overridden result of `programs.singularity.package`.
+
+        For example, the following configuration makes all the Nixpkgs packages use the overridden `singularity`:
+        ```Nix
+        { config, lib, pkgs, ... }:
+        {
+          nixpkgs.overlays = [
+            (final: prev: {
+              _singularity-orig = prev.singularity;
+              singularity = config.programs.singularity.packageOverriden;
+            })
+          ];
+          programs.singularity.enable = true;
+          programs.singularity.package = pkgs._singularity-orig;
+        }
+        ```
+
+        Use `lib.mkForce` to forcefully specify the overridden package.
+      '';
+    };
+    enableFakeroot = mkOption {
+      type = types.bool;
+      default = true;
+      example = false;
+      description = mdDoc ''
+        Whether to enable the `--fakeroot` support of Singularity/Apptainer.
+      '';
+    };
+    enableSuid = mkOption {
+      type = types.bool;
+      default = true;
+      example = false;
+      description = mdDoc ''
+        Whether to enable the SUID support of Singularity/Apptainer.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.singularity.packageOverriden = (cfg.package.override (
+      optionalAttrs cfg.enableFakeroot {
+        newuidmapPath = "/run/wrappers/bin/newuidmap";
+        newgidmapPath = "/run/wrappers/bin/newgidmap";
+      } // optionalAttrs cfg.enableSuid {
+        enableSuid = true;
+        starterSuidPath = "/run/wrappers/bin/${cfg.package.projectName}-suid";
+      }
+    ));
+    environment.systemPackages = [ cfg.packageOverriden ];
+    security.wrappers."${cfg.packageOverriden.projectName}-suid" = mkIf cfg.enableSuid {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${cfg.packageOverriden}/libexec/${cfg.packageOverriden.projectName}/bin/starter-suid.orig";
+    };
+    systemd.tmpfiles.rules = [
+      "d /var/lib/${cfg.packageOverriden.projectName}/mnt/session 0770 root root -"
+      "d /var/lib/${cfg.packageOverriden.projectName}/mnt/final 0770 root root -"
+      "d /var/lib/${cfg.packageOverriden.projectName}/mnt/overlay 0770 root root -"
+      "d /var/lib/${cfg.packageOverriden.projectName}/mnt/container 0770 root root -"
+      "d /var/lib/${cfg.packageOverriden.projectName}/mnt/source 0770 root root -"
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/skim.nix b/nixpkgs/nixos/modules/programs/skim.nix
new file mode 100644
index 000000000000..8dadf322606e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/skim.nix
@@ -0,0 +1,34 @@
+{ pkgs, config, lib, ... }:
+let
+  inherit (lib) mdDoc mkEnableOption mkPackageOptionMD optional optionalString;
+  cfg = config.programs.skim;
+in
+{
+  options = {
+    programs.skim = {
+      fuzzyCompletion = mkEnableOption (mdDoc "fuzzy completion with skim");
+      keybindings = mkEnableOption (mdDoc "skim keybindings");
+      package = mkPackageOptionMD pkgs "skim" {};
+    };
+  };
+
+  config = {
+    environment.systemPackages = optional (cfg.keybindings || cfg.fuzzyCompletion) cfg.package;
+
+    programs.bash.interactiveShellInit = optionalString cfg.fuzzyCompletion ''
+      source ${cfg.package}/share/skim/completion.bash
+    '' + optionalString cfg.keybindings ''
+      source ${cfg.package}/share/skim/key-bindings.bash
+    '';
+
+    programs.zsh.interactiveShellInit = optionalString cfg.fuzzyCompletion ''
+      source ${cfg.package}/share/skim/completion.zsh
+    '' + optionalString cfg.keybindings ''
+      source ${cfg.package}/share/skim/key-bindings.zsh
+    '';
+
+    programs.fish.interactiveShellInit = optionalString cfg.keybindings ''
+      source ${cfg.package}/share/skim/key-bindings.fish && skim_key_bindings
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/slock.nix b/nixpkgs/nixos/modules/programs/slock.nix
new file mode 100644
index 000000000000..3db9866d9f1c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/slock.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.slock;
+
+in
+{
+  options = {
+    programs.slock = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to install slock screen locker with setuid wrapper.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.slock ];
+    security.wrappers.slock =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.slock.out}/bin/slock";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/sniffnet.nix b/nixpkgs/nixos/modules/programs/sniffnet.nix
new file mode 100644
index 000000000000..98e9f628a9bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/sniffnet.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.sniffnet;
+in
+
+{
+  options = {
+    programs.sniffnet = {
+      enable = lib.mkEnableOption (lib.mdDoc "sniffnet");
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    security.wrappers.sniffnet = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw,cap_net_admin=eip";
+      source = "${pkgs.sniffnet}/bin/sniffnet";
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ figsoda ];
+}
diff --git a/nixpkgs/nixos/modules/programs/spacefm.nix b/nixpkgs/nixos/modules/programs/spacefm.nix
new file mode 100644
index 000000000000..b4ba9dcdea56
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/spacefm.nix
@@ -0,0 +1,55 @@
+# Global configuration for spacefm.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.spacefm;
+
+in
+{
+  ###### interface
+
+  options = {
+
+    programs.spacefm = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to install SpaceFM and create {file}`/etc/spacefm/spacefm.conf`.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.attrs;
+        default = {
+          tmp_dir = "/tmp";
+          terminal_su = "${pkgs.sudo}/bin/sudo";
+        };
+        defaultText = literalExpression ''
+          {
+            tmp_dir = "/tmp";
+            terminal_su = "''${pkgs.sudo}/bin/sudo";
+          }
+        '';
+        description = lib.mdDoc ''
+          The system-wide spacefm configuration.
+          Parameters to be written to {file}`/etc/spacefm/spacefm.conf`.
+          Refer to the [relevant entry](https://ignorantguru.github.io/spacefm/spacefm-manual-en.html#programfiles-etc) in the SpaceFM manual.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.spaceFM ];
+
+    environment.etc."spacefm/spacefm.conf".text =
+      concatStrings (mapAttrsToList (n: v: "${n}=${toString v}\n") cfg.settings);
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/ssh.nix b/nixpkgs/nixos/modules/programs/ssh.nix
new file mode 100644
index 000000000000..7c85d1e7c3d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/ssh.nix
@@ -0,0 +1,357 @@
+# Global configuration for the SSH client.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg  = config.programs.ssh;
+
+  askPassword = cfg.askPassword;
+
+  askPasswordWrapper = pkgs.writeScript "ssh-askpass-wrapper"
+    ''
+      #! ${pkgs.runtimeShell} -e
+      export DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^DISPLAY=\(.*\)/\1/; t; d')"
+      export WAYLAND_DISPLAY="$(systemctl --user show-environment | ${pkgs.gnused}/bin/sed 's/^WAYLAND_DISPLAY=\(.*\)/\1/; t; d')"
+      exec ${askPassword} "$@"
+    '';
+
+  knownHosts = attrValues cfg.knownHosts;
+
+  knownHostsText = (flip (concatMapStringsSep "\n") knownHosts
+    (h: assert h.hostNames != [];
+      optionalString h.certAuthority "@cert-authority " + concatStringsSep "," h.hostNames + " "
+      + (if h.publicKey != null then h.publicKey else readFile h.publicKeyFile)
+    )) + "\n";
+
+  knownHostsFiles = [ "/etc/ssh/ssh_known_hosts" ]
+    ++ map pkgs.copyPathToStore cfg.knownHostsFiles;
+
+in
+{
+  ###### interface
+
+  options = {
+
+    programs.ssh = {
+
+      enableAskPassword = mkOption {
+        type = types.bool;
+        default = config.services.xserver.enable;
+        defaultText = literalExpression "config.services.xserver.enable";
+        description = lib.mdDoc "Whether to configure SSH_ASKPASS in the environment.";
+      };
+
+      askPassword = mkOption {
+        type = types.str;
+        default = "${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass";
+        defaultText = literalExpression ''"''${pkgs.x11_ssh_askpass}/libexec/x11-ssh-askpass"'';
+        description = lib.mdDoc "Program used by SSH to ask for passwords.";
+      };
+
+      forwardX11 = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to request X11 forwarding on outgoing connections by default.
+          This is useful for running graphical programs on the remote machine and have them display to your local X11 server.
+          Historically, this value has depended on the value used by the local sshd daemon, but there really isn't a relation between the two.
+          Note: there are some security risks to forwarding an X11 connection.
+          NixOS's X server is built with the SECURITY extension, which prevents some obvious attacks.
+          To enable or disable forwarding on a per-connection basis, see the -X and -x options to ssh.
+          The -Y option to ssh enables trusted forwarding, which bypasses the SECURITY extension.
+        '';
+      };
+
+      setXAuthLocation = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to set the path to {command}`xauth` for X11-forwarded connections.
+          This causes a dependency on X11 packages.
+        '';
+      };
+
+      pubkeyAcceptedKeyTypes = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "ssh-ed25519" "ssh-rsa" ];
+        description = lib.mdDoc ''
+          Specifies the key types that will be used for public key authentication.
+        '';
+      };
+
+      hostKeyAlgorithms = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "ssh-ed25519" "ssh-rsa" ];
+        description = lib.mdDoc ''
+          Specifies the host key algorithms that the client wants to use in order of preference.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration text prepended to {file}`ssh_config`. Other generated
+          options will be added after a `Host *` pattern.
+          See {manpage}`ssh_config(5)`
+          for help.
+        '';
+      };
+
+      startAgent = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to start the OpenSSH agent when you log in.  The OpenSSH agent
+          remembers private keys for you so that you don't have to type in
+          passphrases every time you make an SSH connection.  Use
+          {command}`ssh-add` to add a key to the agent.
+        '';
+      };
+
+      agentTimeout = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "1h";
+        description = lib.mdDoc ''
+          How long to keep the private keys in memory. Use null to keep them forever.
+        '';
+      };
+
+      agentPKCS11Whitelist = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = literalExpression ''"''${pkgs.opensc}/lib/opensc-pkcs11.so"'';
+        description = lib.mdDoc ''
+          A pattern-list of acceptable paths for PKCS#11 shared libraries
+          that may be used with the -s option to ssh-add.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.openssh;
+        defaultText = literalExpression "pkgs.openssh";
+        description = lib.mdDoc ''
+          The package used for the openssh client and daemon.
+        '';
+      };
+
+      knownHosts = mkOption {
+        default = {};
+        type = types.attrsOf (types.submodule ({ name, config, options, ... }: {
+          options = {
+            certAuthority = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                This public key is an SSH certificate authority, rather than an
+                individual host's key.
+              '';
+            };
+            hostNames = mkOption {
+              type = types.listOf types.str;
+              default = [ name ] ++ config.extraHostNames;
+              defaultText = literalExpression "[ ${name} ] ++ config.${options.extraHostNames}";
+              description = lib.mdDoc ''
+                A list of host names and/or IP numbers used for accessing
+                the host's ssh service. This list includes the name of the
+                containing `knownHosts` attribute by default
+                for convenience. If you wish to configure multiple host keys
+                for the same host use multiple `knownHosts`
+                entries with different attribute names and the same
+                `hostNames` list.
+              '';
+            };
+            extraHostNames = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc ''
+                A list of additional host names and/or IP numbers used for
+                accessing the host's ssh service. This list is ignored if
+                `hostNames` is set explicitly.
+              '';
+            };
+            publicKey = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              example = "ecdsa-sha2-nistp521 AAAAE2VjZHN...UEPg==";
+              description = lib.mdDoc ''
+                The public key data for the host. You can fetch a public key
+                from a running SSH server with the {command}`ssh-keyscan`
+                command. The public key should not include any host names, only
+                the key type and the key itself.
+              '';
+            };
+            publicKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                The path to the public key file for the host. The public
+                key file is read at build time and saved in the Nix store.
+                You can fetch a public key file from a running SSH server
+                with the {command}`ssh-keyscan` command. The content
+                of the file should follow the same format as described for
+                the `publicKey` option. Only a single key
+                is supported. If a host has multiple keys, use
+                {option}`programs.ssh.knownHostsFiles` instead.
+              '';
+            };
+          };
+        }));
+        description = lib.mdDoc ''
+          The set of system-wide known SSH hosts. To make simple setups more
+          convenient the name of an attribute in this set is used as a host name
+          for the entry. This behaviour can be disabled by setting
+          `hostNames` explicitly. You can use
+          `extraHostNames` to add additional host names without
+          disabling this default.
+        '';
+        example = literalExpression ''
+          {
+            myhost = {
+              extraHostNames = [ "myhost.mydomain.com" "10.10.1.4" ];
+              publicKeyFile = ./pubkeys/myhost_ssh_host_dsa_key.pub;
+            };
+            "myhost2.net".publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAILIRuJ8p1Fi+m6WkHV0KWnRfpM1WxoW8XAS+XvsSKsTK";
+            "myhost2.net/dsa" = {
+              hostNames = [ "myhost2.net" ];
+              publicKeyFile = ./pubkeys/myhost2_ssh_host_dsa_key.pub;
+            };
+          }
+        '';
+      };
+
+      knownHostsFiles = mkOption {
+        default = [];
+        type = with types; listOf path;
+        description = lib.mdDoc ''
+          Files containing SSH host keys to set as global known hosts.
+          `/etc/ssh/ssh_known_hosts` (which is
+          generated by {option}`programs.ssh.knownHosts`) is
+          always included.
+        '';
+        example = literalExpression ''
+          [
+            ./known_hosts
+            (writeText "github.keys" '''
+              github.com ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCj7ndNxQowgcQnjshcLrqPEiiphnt+VTTvDP6mHBL9j1aNUkY4Ue1gvwnGLVlOhGeYrnZaMgRK6+PKCUXaDbC7qtbW8gIkhL7aGCsOr/C56SJMy/BCZfxd1nWzAOxSDPgVsmerOBYfNqltV9/hWCqBywINIR+5dIg6JTJ72pcEpEjcYgXkE2YEFXV1JHnsKgbLWNlhScqb2UmyRkQyytRLtL+38TGxkxCflmO+5Z8CSSNY7GidjMIZ7Q4zMjA2n1nGrlTDkzwDCsw+wqFPGQA179cnfGWOWRVruj16z6XyvxvjJwbz0wQZ75XK5tKSb7FNyeIEs4TT4jk+S4dhPeAUC5y+bDYirYgM4GC7uEnztnZyaVWQ7B381AK4Qdrwt51ZqExKbQpTUNn+EjqoTwvqNj4kqx5QUCI0ThS/YkOxJCXmPUWZbhjpCg56i+2aB6CmK2JGhn57K5mj0MNdBXA4/WnwH6XoPWJzK5Nyu2zB3nAZp+S5hpQs+p1vN1/wsjk=
+              github.com ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
+              github.com ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOMqqnkVzrm0SdG6UOoqKLsabgH5C9okWi0dh2l9GKJl
+            ''')
+          ]
+        '';
+      };
+
+      kexAlgorithms = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = [ "curve25519-sha256@libssh.org" "diffie-hellman-group-exchange-sha256" ];
+        description = lib.mdDoc ''
+          Specifies the available KEX (Key Exchange) algorithms.
+        '';
+      };
+
+      ciphers = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = [ "chacha20-poly1305@openssh.com" "aes256-gcm@openssh.com" ];
+        description = lib.mdDoc ''
+          Specifies the ciphers allowed and their order of preference.
+        '';
+      };
+
+      macs = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = [ "hmac-sha2-512-etm@openssh.com" "hmac-sha1" ];
+        description = lib.mdDoc ''
+          Specifies the MAC (message authentication code) algorithms in order of preference. The MAC algorithm is used
+          for data integrity protection.
+        '';
+      };
+    };
+
+  };
+
+  config = {
+
+    programs.ssh.setXAuthLocation =
+      mkDefault (config.services.xserver.enable || config.programs.ssh.forwardX11 || config.services.openssh.settings.X11Forwarding);
+
+    assertions =
+      [ { assertion = cfg.forwardX11 -> cfg.setXAuthLocation;
+          message = "cannot enable X11 forwarding without setting XAuth location";
+        }
+      ] ++ flip mapAttrsToList cfg.knownHosts (name: data: {
+        assertion = (data.publicKey == null && data.publicKeyFile != null) ||
+                    (data.publicKey != null && data.publicKeyFile == null);
+        message = "knownHost ${name} must contain either a publicKey or publicKeyFile";
+      });
+
+    # SSH configuration. Slight duplication of the sshd_config
+    # generation in the sshd service.
+    environment.etc."ssh/ssh_config".text =
+      ''
+        # Custom options from `extraConfig`, to override generated options
+        ${cfg.extraConfig}
+
+        # Generated options from other settings
+        Host *
+        AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
+        GlobalKnownHostsFile ${concatStringsSep " " knownHostsFiles}
+
+        ${optionalString cfg.setXAuthLocation ''
+          XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
+        ''}
+
+        ForwardX11 ${if cfg.forwardX11 then "yes" else "no"}
+
+        ${optionalString (cfg.pubkeyAcceptedKeyTypes != []) "PubkeyAcceptedKeyTypes ${concatStringsSep "," cfg.pubkeyAcceptedKeyTypes}"}
+        ${optionalString (cfg.hostKeyAlgorithms != []) "HostKeyAlgorithms ${concatStringsSep "," cfg.hostKeyAlgorithms}"}
+        ${optionalString (cfg.kexAlgorithms != null) "KexAlgorithms ${concatStringsSep "," cfg.kexAlgorithms}"}
+        ${optionalString (cfg.ciphers != null) "Ciphers ${concatStringsSep "," cfg.ciphers}"}
+        ${optionalString (cfg.macs != null) "MACs ${concatStringsSep "," cfg.macs}"}
+      '';
+
+    environment.etc."ssh/ssh_known_hosts".text = knownHostsText;
+
+    # FIXME: this should really be socket-activated for über-awesomeness.
+    systemd.user.services.ssh-agent = mkIf cfg.startAgent
+      { description = "SSH Agent";
+        wantedBy = [ "default.target" ];
+        unitConfig.ConditionUser = "!@system";
+        serviceConfig =
+          { ExecStartPre = "${pkgs.coreutils}/bin/rm -f %t/ssh-agent";
+            ExecStart =
+                "${cfg.package}/bin/ssh-agent " +
+                optionalString (cfg.agentTimeout != null) ("-t ${cfg.agentTimeout} ") +
+                optionalString (cfg.agentPKCS11Whitelist != null) ("-P ${cfg.agentPKCS11Whitelist} ") +
+                "-a %t/ssh-agent";
+            StandardOutput = "null";
+            Type = "forking";
+            Restart = "on-failure";
+            SuccessExitStatus = "0 2";
+          };
+        # Allow ssh-agent to ask for confirmation. This requires the
+        # unit to know about the user's $DISPLAY (via ‘systemctl
+        # import-environment’).
+        environment.SSH_ASKPASS = optionalString cfg.enableAskPassword askPasswordWrapper;
+        environment.DISPLAY = "fake"; # required to make ssh-agent start $SSH_ASKPASS
+      };
+
+    environment.extraInit = optionalString cfg.startAgent
+      ''
+        if [ -z "$SSH_AUTH_SOCK" -a -n "$XDG_RUNTIME_DIR" ]; then
+          export SSH_AUTH_SOCK="$XDG_RUNTIME_DIR/ssh-agent"
+        fi
+      '';
+
+    environment.variables.SSH_ASKPASS = optionalString cfg.enableAskPassword askPassword;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/starship.nix b/nixpkgs/nixos/modules/programs/starship.nix
new file mode 100644
index 000000000000..9dca39da5edc
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/starship.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.starship;
+
+  settingsFormat = pkgs.formats.toml { };
+
+  settingsFile = settingsFormat.generate "starship.toml" cfg.settings;
+
+  initOption =
+    if cfg.interactiveOnly then
+      "promptInit"
+    else
+      "shellInit";
+
+in
+{
+  options.programs.starship = {
+    enable = mkEnableOption (lib.mdDoc "the Starship shell prompt");
+
+    interactiveOnly = mkOption {
+      default = true;
+      example = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable starship only when the shell is interactive.
+        Some plugins require this to be set to false to function correctly.
+      '';
+    };
+
+    settings = mkOption {
+      inherit (settingsFormat) type;
+      default = { };
+      description = lib.mdDoc ''
+        Configuration included in `starship.toml`.
+
+        See https://starship.rs/config/#prompt for documentation.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.bash.${initOption} = ''
+      if [[ $TERM != "dumb" ]]; then
+        export STARSHIP_CONFIG=${settingsFile}
+        eval "$(${pkgs.starship}/bin/starship init bash)"
+      fi
+    '';
+
+    programs.fish.${initOption} = ''
+      if test "$TERM" != "dumb"
+        set -x STARSHIP_CONFIG ${settingsFile}
+        eval (${pkgs.starship}/bin/starship init fish)
+      end
+    '';
+
+    programs.zsh.${initOption} = ''
+      if [[ $TERM != "dumb" ]]; then
+        export STARSHIP_CONFIG=${settingsFile}
+        eval "$(${pkgs.starship}/bin/starship init zsh)"
+      fi
+    '';
+  };
+
+  meta.maintainers = pkgs.starship.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/programs/steam.nix b/nixpkgs/nixos/modules/programs/steam.nix
new file mode 100644
index 000000000000..29c449c16946
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/steam.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.steam;
+  gamescopeCfg = config.programs.gamescope;
+
+  steam-gamescope = let
+    exports = builtins.attrValues (builtins.mapAttrs (n: v: "export ${n}=${v}") cfg.gamescopeSession.env);
+  in
+    pkgs.writeShellScriptBin "steam-gamescope" ''
+      ${builtins.concatStringsSep "\n" exports}
+      gamescope --steam ${toString cfg.gamescopeSession.args} -- steam -tenfoot -pipewire-dmabuf
+    '';
+
+  gamescopeSessionFile =
+    (pkgs.writeTextDir "share/wayland-sessions/steam.desktop" ''
+      [Desktop Entry]
+      Name=Steam
+      Comment=A digital distribution platform
+      Exec=${steam-gamescope}/bin/steam-gamescope
+      Type=Application
+    '').overrideAttrs (_: { passthru.providedSessions = [ "steam" ]; });
+in {
+  options.programs.steam = {
+    enable = mkEnableOption (lib.mdDoc "steam");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.steam;
+      defaultText = literalExpression "pkgs.steam";
+      example = literalExpression ''
+        pkgs.steam-small.override {
+          extraEnv = {
+            MANGOHUD = true;
+            OBS_VKCAPTURE = true;
+            RADV_TEX_ANISO = 16;
+          };
+          extraLibraries = p: with p; [
+            atk
+          ];
+        }
+      '';
+      apply = steam: steam.override (prev: {
+        extraLibraries = pkgs: let
+          prevLibs = if prev ? extraLibraries then prev.extraLibraries pkgs else [ ];
+          additionalLibs = with config.hardware.opengl;
+            if pkgs.stdenv.hostPlatform.is64bit
+            then [ package ] ++ extraPackages
+            else [ package32 ] ++ extraPackages32;
+        in prevLibs ++ additionalLibs;
+      } // optionalAttrs (cfg.gamescopeSession.enable && gamescopeCfg.capSysNice)
+      {
+        buildFHSEnv = pkgs.buildFHSEnv.override {
+          # use the setuid wrapped bubblewrap
+          bubblewrap = "${config.security.wrapperDir}/..";
+        };
+      });
+      description = lib.mdDoc ''
+        The Steam package to use. Additional libraries are added from the system
+        configuration to ensure graphics work properly.
+
+        Use this option to customise the Steam package rather than adding your
+        custom Steam to {option}`environment.systemPackages` yourself.
+      '';
+    };
+
+    remotePlay.openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports in the firewall for Steam Remote Play.
+      '';
+    };
+
+    dedicatedServer.openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports in the firewall for Source Dedicated Server.
+      '';
+    };
+
+    gamescopeSession = mkOption {
+      description = mdDoc "Run a GameScope driven Steam session from your display-manager";
+      default = {};
+      type = types.submodule {
+        options = {
+          enable = mkEnableOption (mdDoc "GameScope Session");
+          args = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            description = mdDoc ''
+              Arguments to be passed to GameScope for the session.
+            '';
+          };
+
+          env = mkOption {
+            type = types.attrsOf types.str;
+            default = { };
+            description = mdDoc ''
+              Environmental variables to be passed to GameScope for the session.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    hardware.opengl = { # this fixes the "glXChooseVisual failed" bug, context: https://github.com/NixOS/nixpkgs/issues/47932
+      enable = true;
+      driSupport = true;
+      driSupport32Bit = true;
+    };
+
+    security.wrappers = mkIf (cfg.gamescopeSession.enable && gamescopeCfg.capSysNice) {
+      # needed or steam fails
+      bwrap = {
+        owner = "root";
+        group = "root";
+        source = "${pkgs.bubblewrap}/bin/bwrap";
+        setuid = true;
+      };
+    };
+
+    programs.gamescope.enable = mkDefault cfg.gamescopeSession.enable;
+    services.xserver.displayManager.sessionPackages = mkIf cfg.gamescopeSession.enable [ gamescopeSessionFile ];
+
+    # optionally enable 32bit pulseaudio support if pulseaudio is enabled
+    hardware.pulseaudio.support32Bit = config.hardware.pulseaudio.enable;
+
+    hardware.steam-hardware.enable = true;
+
+    environment.systemPackages = [
+      cfg.package
+      cfg.package.run
+    ] ++ lib.optional cfg.gamescopeSession.enable steam-gamescope;
+
+    networking.firewall = lib.mkMerge [
+      (mkIf cfg.remotePlay.openFirewall {
+        allowedTCPPorts = [ 27036 ];
+        allowedUDPPortRanges = [ { from = 27031; to = 27036; } ];
+      })
+
+      (mkIf cfg.dedicatedServer.openFirewall {
+        allowedTCPPorts = [ 27015 ]; # SRCDS Rcon port
+        allowedUDPPorts = [ 27015 ]; # Gameplay traffic
+      })
+    ];
+  };
+
+  meta.maintainers = with maintainers; [ mkg20001 ];
+}
diff --git a/nixpkgs/nixos/modules/programs/streamdeck-ui.nix b/nixpkgs/nixos/modules/programs/streamdeck-ui.nix
new file mode 100644
index 000000000000..220f0a35f162
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/streamdeck-ui.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.streamdeck-ui;
+in
+{
+  options.programs.streamdeck-ui = {
+    enable = mkEnableOption (lib.mdDoc "streamdeck-ui");
+
+    autoStart = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether streamdeck-ui should be started automatically.";
+    };
+
+    package = mkPackageOptionMD pkgs "streamdeck-ui" {
+      default = [ "streamdeck-ui" ];
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      cfg.package
+      (mkIf cfg.autoStart (makeAutostartItem { name = "streamdeck-ui-noui"; package = cfg.package; }))
+    ];
+
+    services.udev.packages = [ cfg.package ];
+  };
+
+  meta.maintainers = with maintainers; [ majiir ];
+}
diff --git a/nixpkgs/nixos/modules/programs/sysdig.nix b/nixpkgs/nixos/modules/programs/sysdig.nix
new file mode 100644
index 000000000000..ccb1e1d4c5f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/sysdig.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.sysdig;
+in {
+  options.programs.sysdig.enable = mkEnableOption (lib.mdDoc "sysdig");
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.sysdig ];
+    boot.extraModulePackages = [ config.boot.kernelPackages.sysdig ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/system-config-printer.nix b/nixpkgs/nixos/modules/programs/system-config-printer.nix
new file mode 100644
index 000000000000..7c7eea580545
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/system-config-printer.nix
@@ -0,0 +1,32 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    programs.system-config-printer = {
+
+      enable = mkEnableOption (lib.mdDoc "system-config-printer, a Graphical user interface for CUPS administration");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.programs.system-config-printer.enable {
+
+    environment.systemPackages = [
+      pkgs.system-config-printer
+    ];
+
+    services.system-config-printer.enable = true;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/systemtap.nix b/nixpkgs/nixos/modules/programs/systemtap.nix
new file mode 100644
index 000000000000..cbb9ec164c6c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/systemtap.nix
@@ -0,0 +1,29 @@
+{ config, lib, ... }:
+
+with lib;
+
+let cfg = config.programs.systemtap;
+in {
+
+  options = {
+    programs.systemtap = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Install {command}`systemtap` along with necessary kernel options.
+        '';
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "DEBUG")
+    ];
+    boot.kernel.features.debug = true;
+    environment.systemPackages = [
+      config.boot.kernelPackages.systemtap
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/thefuck.nix b/nixpkgs/nixos/modules/programs/thefuck.nix
new file mode 100644
index 000000000000..e057d1ca657d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/thefuck.nix
@@ -0,0 +1,40 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  prg = config.programs;
+  cfg = prg.thefuck;
+
+  bashAndZshInitScript = ''
+    eval $(${pkgs.thefuck}/bin/thefuck --alias ${cfg.alias})
+  '';
+  fishInitScript = ''
+    ${pkgs.thefuck}/bin/thefuck --alias ${cfg.alias} | source
+  '';
+in
+  {
+    options = {
+      programs.thefuck = {
+        enable = mkEnableOption (lib.mdDoc "thefuck");
+
+        alias = mkOption {
+          default = "fuck";
+          type = types.str;
+
+          description = lib.mdDoc ''
+            `thefuck` needs an alias to be configured.
+            The default value is `fuck`, but you can use anything else as well.
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = with pkgs; [ thefuck ];
+
+      programs.bash.interactiveShellInit = bashAndZshInitScript;
+      programs.zsh.interactiveShellInit = mkIf prg.zsh.enable bashAndZshInitScript;
+      programs.fish.interactiveShellInit = mkIf prg.fish.enable fishInitScript;
+    };
+  }
diff --git a/nixpkgs/nixos/modules/programs/thunar.nix b/nixpkgs/nixos/modules/programs/thunar.nix
new file mode 100644
index 000000000000..cb85b3886c13
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/thunar.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.thunar;
+
+in {
+  meta = {
+    maintainers = teams.xfce.members;
+  };
+
+  options = {
+    programs.thunar = {
+      enable = mkEnableOption (lib.mdDoc "Thunar, the Xfce file manager");
+
+      plugins = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        description = lib.mdDoc "List of thunar plugins to install.";
+        example = literalExpression "with pkgs.xfce; [ thunar-archive-plugin thunar-volman ]";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable (
+    let package = pkgs.xfce.thunar.override { thunarPlugins = cfg.plugins; };
+
+    in {
+      environment.systemPackages = [
+        package
+      ];
+
+      services.dbus.packages = [
+        package
+      ];
+
+      systemd.packages = [
+        package
+      ];
+
+      programs.xfconf.enable = true;
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/programs/tmux.nix b/nixpkgs/nixos/modules/programs/tmux.nix
new file mode 100644
index 000000000000..0d1c7c9cdf0f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/tmux.nix
@@ -0,0 +1,233 @@
+{ config, pkgs, lib, ... }:
+
+let
+  inherit (lib) mkOption mkIf types optionalString;
+
+  cfg = config.programs.tmux;
+
+  defaultKeyMode  = "emacs";
+  defaultResize   = 5;
+  defaultShortcut = "b";
+  defaultTerminal = "screen";
+
+  boolToStr = value: if value then "on" else "off";
+
+  tmuxConf = ''
+    set  -g default-terminal "${cfg.terminal}"
+    set  -g base-index      ${toString cfg.baseIndex}
+    setw -g pane-base-index ${toString cfg.baseIndex}
+
+    ${optionalString cfg.newSession "new-session"}
+
+    ${optionalString cfg.reverseSplit ''
+    bind v split-window -h
+    bind s split-window -v
+    ''}
+
+    set -g status-keys ${cfg.keyMode}
+    set -g mode-keys   ${cfg.keyMode}
+
+    ${optionalString (cfg.keyMode == "vi" && cfg.customPaneNavigationAndResize) ''
+    bind h select-pane -L
+    bind j select-pane -D
+    bind k select-pane -U
+    bind l select-pane -R
+
+    bind -r H resize-pane -L ${toString cfg.resizeAmount}
+    bind -r J resize-pane -D ${toString cfg.resizeAmount}
+    bind -r K resize-pane -U ${toString cfg.resizeAmount}
+    bind -r L resize-pane -R ${toString cfg.resizeAmount}
+    ''}
+
+    ${optionalString (cfg.shortcut != defaultShortcut) ''
+    # rebind main key: C-${cfg.shortcut}
+    unbind C-${defaultShortcut}
+    set -g prefix C-${cfg.shortcut}
+    bind ${cfg.shortcut} send-prefix
+    bind C-${cfg.shortcut} last-window
+    ''}
+
+    setw -g aggressive-resize ${boolToStr cfg.aggressiveResize}
+    setw -g clock-mode-style  ${if cfg.clock24 then "24" else "12"}
+    set  -s escape-time       ${toString cfg.escapeTime}
+    set  -g history-limit     ${toString cfg.historyLimit}
+
+    ${cfg.extraConfigBeforePlugins}
+
+    ${lib.optionalString (cfg.plugins != []) ''
+    # Run plugins
+    ${lib.concatMapStringsSep "\n" (x: "run-shell ${x.rtp}") cfg.plugins}
+
+    ''}
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    programs.tmux = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whenever to configure {command}`tmux` system-wide.";
+        relatedPackages = [ "tmux" ];
+      };
+
+      aggressiveResize = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Resize the window to the size of the smallest session for which it is the current window.
+        '';
+      };
+
+      baseIndex = mkOption {
+        default = 0;
+        example = 1;
+        type = types.int;
+        description = lib.mdDoc "Base index for windows and panes.";
+      };
+
+      clock24 = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Use 24 hour clock.";
+      };
+
+      customPaneNavigationAndResize = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Override the hjkl and HJKL bindings for pane navigation and resizing in VI mode.";
+      };
+
+      escapeTime = mkOption {
+        default = 500;
+        example = 0;
+        type = types.int;
+        description = lib.mdDoc "Time in milliseconds for which tmux waits after an escape is input.";
+      };
+
+      extraConfigBeforePlugins = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Additional contents of /etc/tmux.conf, to be run before sourcing plugins.
+        '';
+        type = types.lines;
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Additional contents of /etc/tmux.conf, to be run after sourcing plugins.
+        '';
+        type = types.lines;
+      };
+
+      historyLimit = mkOption {
+        default = 2000;
+        example = 5000;
+        type = types.int;
+        description = lib.mdDoc "Maximum number of lines held in window history.";
+      };
+
+      keyMode = mkOption {
+        default = defaultKeyMode;
+        example = "vi";
+        type = types.enum [ "emacs" "vi" ];
+        description = lib.mdDoc "VI or Emacs style shortcuts.";
+      };
+
+      newSession = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Automatically spawn a session if trying to attach and none are running.";
+      };
+
+      reverseSplit = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Reverse the window split shortcuts.";
+      };
+
+      resizeAmount = mkOption {
+        default = defaultResize;
+        example = 10;
+        type = types.int;
+        description = lib.mdDoc "Number of lines/columns when resizing.";
+      };
+
+      shortcut = mkOption {
+        default = defaultShortcut;
+        example = "a";
+        type = types.str;
+        description = lib.mdDoc "Ctrl following by this key is used as the main shortcut.";
+      };
+
+      terminal = mkOption {
+        default = defaultTerminal;
+        example = "screen-256color";
+        type = types.str;
+        description = lib.mdDoc ''
+          Set the $TERM variable. Use tmux-direct if italics or 24bit true color
+          support is needed.
+        '';
+      };
+
+      secureSocket = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Store tmux socket under /run, which is more secure than /tmp, but as a
+          downside it doesn't survive user logout.
+        '';
+      };
+
+      plugins = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        description = lib.mdDoc "List of plugins to install.";
+        example = lib.literalExpression "[ pkgs.tmuxPlugins.nord ]";
+      };
+
+      withUtempter = mkOption {
+        description = lib.mdDoc ''
+          Whether to enable libutempter for tmux.
+          This is required so that tmux can write to /var/run/utmp (which can be queried with `who` to display currently connected user sessions).
+          Note, this will add a guid wrapper for the group utmp!
+        '';
+        default = true;
+        type = types.bool;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment = {
+      etc."tmux.conf".text = tmuxConf;
+
+      systemPackages = [ pkgs.tmux ] ++ cfg.plugins;
+
+      variables = {
+        TMUX_TMPDIR = lib.optional cfg.secureSocket ''''${XDG_RUNTIME_DIR:-"/run/user/$(id -u)"}'';
+      };
+    };
+    security.wrappers = mkIf cfg.withUtempter {
+      utempter = {
+        source = "${pkgs.libutempter}/lib/utempter/utempter";
+        owner = "root";
+        group = "utmp";
+        setuid = false;
+        setgid = true;
+      };
+    };
+  };
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "programs" "tmux" "extraTmuxConf" ] [ "programs" "tmux" "extraConfig" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/programs/traceroute.nix b/nixpkgs/nixos/modules/programs/traceroute.nix
new file mode 100644
index 000000000000..df5f10b87d5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/traceroute.nix
@@ -0,0 +1,28 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.traceroute;
+in {
+  options = {
+    programs.traceroute = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to configure a setcap wrapper for traceroute.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.traceroute = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = "${pkgs.traceroute}/bin/traceroute";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/trippy.nix b/nixpkgs/nixos/modules/programs/trippy.nix
new file mode 100644
index 000000000000..6e31aea43e75
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/trippy.nix
@@ -0,0 +1,24 @@
+{ lib, config, pkgs, ... }:
+
+let
+  cfg = config.programs.trippy;
+in
+
+{
+  options = {
+    programs.trippy = {
+      enable = lib.mkEnableOption (lib.mdDoc "trippy");
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    security.wrappers.trip = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_raw+p";
+      source = lib.getExe pkgs.trippy;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ figsoda ];
+}
diff --git a/nixpkgs/nixos/modules/programs/tsm-client.nix b/nixpkgs/nixos/modules/programs/tsm-client.nix
new file mode 100644
index 000000000000..41560544c2c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/tsm-client.nix
@@ -0,0 +1,287 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inherit (builtins) length map;
+  inherit (lib.attrsets) attrNames filterAttrs hasAttr mapAttrs mapAttrsToList optionalAttrs;
+  inherit (lib.modules) mkDefault mkIf;
+  inherit (lib.options) literalExpression mkEnableOption mkOption;
+  inherit (lib.strings) concatLines optionalString toLower;
+  inherit (lib.types) addCheck attrsOf lines nonEmptyStr nullOr package path port str strMatching submodule;
+
+  # Checks if given list of strings contains unique
+  # elements when compared without considering case.
+  # Type: checkIUnique :: [string] -> bool
+  # Example: checkIUnique ["foo" "Foo"] => false
+  checkIUnique = lst:
+    let
+      lenUniq = l: length (lib.lists.unique l);
+    in
+      lenUniq lst == lenUniq (map toLower lst);
+
+  # TSM rejects servername strings longer than 64 chars.
+  servernameType = strMatching ".{1,64}";
+
+  serverOptions = { name, config, ... }: {
+    options.name = mkOption {
+      type = servernameType;
+      example = "mainTsmServer";
+      description = lib.mdDoc ''
+        Local name of the IBM TSM server,
+        must be uncapitalized and no longer than 64 chars.
+        The value will be used for the
+        `server`
+        directive in {file}`dsm.sys`.
+      '';
+    };
+    options.server = mkOption {
+      type = nonEmptyStr;
+      example = "tsmserver.company.com";
+      description = lib.mdDoc ''
+        Host/domain name or IP address of the IBM TSM server.
+        The value will be used for the
+        `tcpserveraddress`
+        directive in {file}`dsm.sys`.
+      '';
+    };
+    options.port = mkOption {
+      type = addCheck port (p: p<=32767);
+      default = 1500;  # official default
+      description = lib.mdDoc ''
+        TCP port of the IBM TSM server.
+        The value will be used for the
+        `tcpport`
+        directive in {file}`dsm.sys`.
+        TSM does not support ports above 32767.
+      '';
+    };
+    options.node = mkOption {
+      type = nonEmptyStr;
+      example = "MY-TSM-NODE";
+      description = lib.mdDoc ''
+        Target node name on the IBM TSM server.
+        The value will be used for the
+        `nodename`
+        directive in {file}`dsm.sys`.
+      '';
+    };
+    options.genPasswd = mkEnableOption (lib.mdDoc ''
+      automatic client password generation.
+      This option influences the
+      `passwordaccess`
+      directive in {file}`dsm.sys`.
+      The password will be stored in the directory
+      given by the option {option}`passwdDir`.
+      *Caution*:
+      If this option is enabled and the server forces
+      to renew the password (e.g. on first connection),
+      a random password will be generated and stored
+    '');
+    options.passwdDir = mkOption {
+      type = path;
+      example = "/home/alice/tsm-password";
+      description = lib.mdDoc ''
+        Directory that holds the TSM
+        node's password information.
+        The value will be used for the
+        `passworddir`
+        directive in {file}`dsm.sys`.
+      '';
+    };
+    options.includeExclude = mkOption {
+      type = lines;
+      default = "";
+      example = ''
+        exclude.dir     /nix/store
+        include.encrypt /home/.../*
+      '';
+      description = lib.mdDoc ''
+        `include.*` and
+        `exclude.*` directives to be
+        used when sending files to the IBM TSM server.
+        The lines will be written into a file that the
+        `inclexcl`
+        directive in {file}`dsm.sys` points to.
+      '';
+    };
+    options.extraConfig = mkOption {
+      # TSM option keys are case insensitive;
+      # we have to ensure there are no keys that
+      # differ only by upper and lower case.
+      type = addCheck
+        (attrsOf (nullOr str))
+        (attrs: checkIUnique (attrNames attrs));
+      default = {};
+      example.compression = "yes";
+      example.passwordaccess = null;
+      description = lib.mdDoc ''
+        Additional key-value pairs for the server stanza.
+        Values must be strings, or `null`
+        for the key not to be used in the stanza
+        (e.g. to overrule values generated by other options).
+      '';
+    };
+    options.text = mkOption {
+      type = lines;
+      example = literalExpression
+        ''lib.modules.mkAfter "compression no"'';
+      description = lib.mdDoc ''
+        Additional text lines for the server stanza.
+        This option can be used if certion configuration keys
+        must be used multiple times or ordered in a certain way
+        as the {option}`extraConfig` option can't
+        control the order of lines in the resulting stanza.
+        Note that the `server`
+        line at the beginning of the stanza is
+        not part of this option's value.
+      '';
+    };
+    options.stanza = mkOption {
+      type = str;
+      internal = true;
+      visible = false;
+      description = lib.mdDoc "Server stanza text generated from the options.";
+    };
+    config.name = mkDefault name;
+    # Client system-options file directives are explained here:
+    # https://www.ibm.com/docs/en/spectrum-protect/8.1.13?topic=commands-processing-options
+    config.extraConfig =
+      mapAttrs (lib.trivial.const mkDefault) (
+        {
+          commmethod = "v6tcpip";  # uses v4 or v6, based on dns lookup result
+          tcpserveraddress = config.server;
+          tcpport = builtins.toString config.port;
+          nodename = config.node;
+          passwordaccess = if config.genPasswd then "generate" else "prompt";
+          passworddir = ''"${config.passwdDir}"'';
+        } // optionalAttrs (config.includeExclude!="") {
+          inclexcl = ''"${pkgs.writeText "inclexcl.dsm.sys" config.includeExclude}"'';
+        }
+      );
+    config.text =
+      let
+        attrset = filterAttrs (k: v: v!=null) config.extraConfig;
+        mkLine = k: v: k + optionalString (v!="") "  ${v}";
+        lines = mapAttrsToList mkLine attrset;
+      in
+        concatLines lines;
+    config.stanza = ''
+      server  ${config.name}
+      ${config.text}
+    '';
+  };
+
+  options.programs.tsmClient = {
+    enable = mkEnableOption (lib.mdDoc ''
+      IBM Spectrum Protect (Tivoli Storage Manager, TSM)
+      client command line applications with a
+      client system-options file "dsm.sys"
+    '');
+    servers = mkOption {
+      type = attrsOf (submodule [ serverOptions ]);
+      default = {};
+      example.mainTsmServer = {
+        server = "tsmserver.company.com";
+        node = "MY-TSM-NODE";
+        extraConfig.compression = "yes";
+      };
+      description = lib.mdDoc ''
+        Server definitions ("stanzas")
+        for the client system-options file.
+      '';
+    };
+    defaultServername = mkOption {
+      type = nullOr servernameType;
+      default = null;
+      example = "mainTsmServer";
+      description = lib.mdDoc ''
+        If multiple server stanzas are declared with
+        {option}`programs.tsmClient.servers`,
+        this option may be used to name a default
+        server stanza that IBM TSM uses in the absence of
+        a user-defined {file}`dsm.opt` file.
+        This option translates to a
+        `defaultserver` configuration line.
+      '';
+    };
+    dsmSysText = mkOption {
+      type = lines;
+      readOnly = true;
+      description = lib.mdDoc ''
+        This configuration key contains the effective text
+        of the client system-options file "dsm.sys".
+        It should not be changed, but may be
+        used to feed the configuration into other
+        TSM-depending packages used on the system.
+      '';
+    };
+    package = mkOption {
+      type = package;
+      default = pkgs.tsm-client;
+      defaultText = literalExpression "pkgs.tsm-client";
+      example = literalExpression "pkgs.tsm-client-withGui";
+      description = lib.mdDoc ''
+        The TSM client derivation to be
+        added to the system environment.
+        It will be used with `.override`
+        to add paths to the client system-options file.
+      '';
+    };
+    wrappedPackage = mkOption {
+      type = package;
+      readOnly = true;
+      description = lib.mdDoc ''
+        The TSM client derivation, wrapped with the path
+        to the client system-options file "dsm.sys".
+        This option is to provide the effective derivation
+        for other modules that want to call TSM executables.
+      '';
+    };
+  };
+
+  cfg = config.programs.tsmClient;
+
+  assertions = [
+    {
+      assertion = checkIUnique (mapAttrsToList (k: v: v.name) cfg.servers);
+      message = ''
+        TSM servernames contain duplicate name
+        (note that case doesn't matter!)
+      '';
+    }
+    {
+      assertion = (cfg.defaultServername!=null)->(hasAttr cfg.defaultServername cfg.servers);
+      message = "TSM defaultServername not found in list of servers";
+    }
+  ];
+
+  dsmSysText = ''
+    ****  IBM Spectrum Protect (Tivoli Storage Manager)
+    ****  client system-options file "dsm.sys".
+    ****  Do not edit!
+    ****  This file is generated by NixOS configuration.
+
+    ${optionalString (cfg.defaultServername!=null) "defaultserver  ${cfg.defaultServername}"}
+
+    ${concatLines (mapAttrsToList (k: v: v.stanza) cfg.servers)}
+  '';
+
+in
+
+{
+
+  inherit options;
+
+  config = mkIf cfg.enable {
+    inherit assertions;
+    programs.tsmClient.dsmSysText = dsmSysText;
+    programs.tsmClient.wrappedPackage = cfg.package.override rec {
+      dsmSysCli = pkgs.writeText "dsm.sys" cfg.dsmSysText;
+      dsmSysApi = dsmSysCli;
+    };
+    environment.systemPackages = [ cfg.wrappedPackage ];
+  };
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+
+}
diff --git a/nixpkgs/nixos/modules/programs/turbovnc.nix b/nixpkgs/nixos/modules/programs/turbovnc.nix
new file mode 100644
index 000000000000..511b6badc041
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/turbovnc.nix
@@ -0,0 +1,54 @@
+# Global configuration for the SSH client.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.turbovnc;
+in
+{
+  options = {
+
+    programs.turbovnc = {
+
+      ensureHeadlessSoftwareOpenGL = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to set up NixOS such that TurboVNC's built-in software OpenGL
+          implementation works.
+
+          This will enable {option}`hardware.opengl.enable` so that OpenGL
+          programs can find Mesa's llvmpipe drivers.
+
+          Setting this option to `false` does not mean that software
+          OpenGL won't work; it may still work depending on your system
+          configuration.
+
+          This option is also intended to generate warnings if you are using some
+          configuration that's incompatible with using headless software OpenGL
+          in TurboVNC.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.ensureHeadlessSoftwareOpenGL {
+
+    # TurboVNC has builtin support for Mesa llvmpipe's `swrast`
+    # software rendering to implement GLX (OpenGL on Xorg).
+    # However, just building TurboVNC with support for that is not enough
+    # (it only takes care of the X server side part of OpenGL);
+    # the indiviudual applications (e.g. `glxgears`) also need to directly load
+    # the OpenGL libs.
+    # Thus, this creates `/run/opengl-driver` populated by Mesa so that the applications
+    # can find the llvmpipe `swrast.so` software rendering DRI lib via `libglvnd`.
+    # This comment exists to explain why `hardware.` is involved,
+    # even though 100% software rendering is used.
+    hardware.opengl.enable = true;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/udevil.nix b/nixpkgs/nixos/modules/programs/udevil.nix
new file mode 100644
index 000000000000..b0f00b4b541b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/udevil.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.udevil;
+
+in {
+  options.programs.udevil.enable = mkEnableOption (lib.mdDoc "udevil");
+
+  config = mkIf cfg.enable {
+    security.wrappers.udevil =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${lib.getBin pkgs.udevil}/bin/udevil";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/usbtop.nix b/nixpkgs/nixos/modules/programs/usbtop.nix
new file mode 100644
index 000000000000..e262ae3745be
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/usbtop.nix
@@ -0,0 +1,21 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.usbtop;
+in {
+  options = {
+    programs.usbtop.enable = mkEnableOption (lib.mdDoc "usbtop and required kernel module");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      usbtop
+    ];
+
+    boot.kernelModules = [
+      "usbmon"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/vim.nix b/nixpkgs/nixos/modules/programs/vim.nix
new file mode 100644
index 000000000000..b12a45166d56
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/vim.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.vim;
+in {
+  options.programs.vim = {
+    defaultEditor = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        When enabled, installs vim and configures vim to be the default editor
+        using the EDITOR environment variable.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.vim;
+      defaultText = literalExpression "pkgs.vim";
+      example = literalExpression "pkgs.vim-full";
+      description = lib.mdDoc ''
+        vim package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.defaultEditor {
+    environment.systemPackages = [ cfg.package ];
+    environment.variables = { EDITOR = mkOverride 900 "vim"; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/virt-manager.nix b/nixpkgs/nixos/modules/programs/virt-manager.nix
new file mode 100644
index 000000000000..095db7586a03
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/virt-manager.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.virt-manager;
+in {
+  options.programs.virt-manager = {
+    enable = lib.mkEnableOption "virt-manager, an UI for managing virtual machines in libvirt";
+
+    package = lib.mkPackageOption pkgs "virt-manager" {};
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    programs.dconf.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/virtualbox.nix b/nixpkgs/nixos/modules/programs/virtualbox.nix
new file mode 100644
index 000000000000..be96cf23b396
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/virtualbox.nix
@@ -0,0 +1,8 @@
+let
+  msg = "Importing <nixpkgs/nixos/modules/programs/virtualbox.nix> is "
+      + "deprecated, please use `virtualisation.virtualbox.host.enable = true' "
+      + "instead.";
+in {
+  config.warnings = [ msg ];
+  config.virtualisation.virtualbox.host.enable = true;
+}
diff --git a/nixpkgs/nixos/modules/programs/wavemon.nix b/nixpkgs/nixos/modules/programs/wavemon.nix
new file mode 100644
index 000000000000..4dbf2748913e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wavemon.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.wavemon;
+in {
+  options = {
+    programs.wavemon = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add wavemon to the global environment and configure a
+          setcap wrapper for it.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ wavemon ];
+    security.wrappers.wavemon = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_admin+ep";
+      source = "${pkgs.wavemon}/bin/wavemon";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/cardboard.nix b/nixpkgs/nixos/modules/programs/wayland/cardboard.nix
new file mode 100644
index 000000000000..262c698c74ba
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/cardboard.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.cardboard;
+in
+{
+  meta.maintainers = with lib.maintainers; [ AndersonTorres ];
+
+  options.programs.cardboard = {
+    enable = lib.mkEnableOption (lib.mdDoc "cardboard");
+
+    package = lib.mkPackageOptionMD pkgs "cardboard" { };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      environment.systemPackages = [ cfg.package ];
+
+      # To make a cardboard session available for certain DMs like SDDM
+      services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    }
+    (import ./wayland-session.nix { inherit lib pkgs; })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/river.nix b/nixpkgs/nixos/modules/programs/wayland/river.nix
new file mode 100644
index 000000000000..71232a7d2618
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/river.nix
@@ -0,0 +1,59 @@
+{
+  config,
+  pkgs,
+  lib,
+  ...
+}:
+with lib; let
+  cfg = config.programs.river;
+in {
+  options.programs.river = {
+    enable = mkEnableOption (lib.mdDoc "river, a dynamic tiling Wayland compositor");
+
+    package = mkOption {
+      type = with types; nullOr package;
+      default = pkgs.river;
+      defaultText = literalExpression "pkgs.river";
+      description = lib.mdDoc ''
+        River package to use.
+        Set to `null` to not add any River package to your path.
+        This should be done if you want to use the Home Manager River module to install River.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs; [
+        swaylock
+        foot
+        dmenu
+      ];
+      defaultText = literalExpression ''
+        with pkgs; [ swaylock foot dmenu ];
+      '';
+      example = literalExpression ''
+        with pkgs; [
+          termite rofi light
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed system wide. See
+        [Common X11 apps used on i3 with Wayland alternatives](https://github.com/swaywm/sway/wiki/i3-Migration-Guide#common-x11-apps-used-on-i3-with-wayland-alternatives)
+        for a list of useful software.
+      '';
+    };
+  };
+
+  config =
+    mkIf cfg.enable (mkMerge [
+      {
+        environment.systemPackages = optional (cfg.package != null) cfg.package ++ cfg.extraPackages;
+
+        # To make a river session available if a display manager like SDDM is enabled:
+        services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ];
+      }
+      (import ./wayland-session.nix { inherit lib pkgs; })
+    ]);
+
+  meta.maintainers = with lib.maintainers; [ GaetanLepage ];
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/sway.nix b/nixpkgs/nixos/modules/programs/wayland/sway.nix
new file mode 100644
index 000000000000..698d9c2b46c4
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/sway.nix
@@ -0,0 +1,158 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.sway;
+
+  wrapperOptions = types.submodule {
+    options =
+      let
+        mkWrapperFeature  = default: description: mkOption {
+          type = types.bool;
+          inherit default;
+          example = !default;
+          description = lib.mdDoc "Whether to make use of the ${description}";
+        };
+      in {
+        base = mkWrapperFeature true ''
+          base wrapper to execute extra session commands and prepend a
+          dbus-run-session to the sway command.
+        '';
+        gtk = mkWrapperFeature false ''
+          wrapGAppsHook wrapper to execute sway with required environment
+          variables for GTK applications.
+        '';
+    };
+  };
+
+  defaultSwayPackage = pkgs.sway.override {
+    extraSessionCommands = cfg.extraSessionCommands;
+    extraOptions = cfg.extraOptions;
+    withBaseWrapper = cfg.wrapperFeatures.base;
+    withGtkWrapper = cfg.wrapperFeatures.gtk;
+    isNixOS = true;
+  };
+in {
+  options.programs.sway = {
+    enable = mkEnableOption (lib.mdDoc ''
+      Sway, the i3-compatible tiling Wayland compositor. You can manually launch
+      Sway by executing "exec sway" on a TTY. Copy /etc/sway/config to
+      ~/.config/sway/config to modify the default configuration. See
+      <https://github.com/swaywm/sway/wiki> and
+      "man 5 sway" for more information'');
+
+    package = mkOption {
+      type = with types; nullOr package;
+      default = defaultSwayPackage;
+      defaultText = literalExpression "pkgs.sway";
+      description = lib.mdDoc ''
+        Sway package to use. Will override the options
+        'wrapperFeatures', 'extraSessionCommands', and 'extraOptions'.
+        Set to `null` to not add any Sway package to your
+        path. This should be done if you want to use the Home Manager Sway
+        module to install Sway.
+      '';
+    };
+
+    wrapperFeatures = mkOption {
+      type = wrapperOptions;
+      default = { };
+      example = { gtk = true; };
+      description = lib.mdDoc ''
+        Attribute set of features to enable in the wrapper.
+      '';
+    };
+
+    extraSessionCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        # SDL:
+        export SDL_VIDEODRIVER=wayland
+        # QT (needs qt5.qtwayland in systemPackages):
+        export QT_QPA_PLATFORM=wayland-egl
+        export QT_WAYLAND_DISABLE_WINDOWDECORATION="1"
+        # Fix for some Java AWT applications (e.g. Android Studio),
+        # use this if they aren't displayed properly:
+        export _JAVA_AWT_WM_NONREPARENTING=1
+      '';
+      description = lib.mdDoc ''
+        Shell commands executed just before Sway is started. See
+        <https://github.com/swaywm/sway/wiki/Running-programs-natively-under-wayland>
+        and <https://github.com/swaywm/wlroots/blob/master/docs/env_vars.md>
+        for some useful environment variables.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "--verbose"
+        "--debug"
+        "--unsupported-gpu"
+      ];
+      description = lib.mdDoc ''
+        Command line arguments passed to launch Sway. Please DO NOT report
+        issues if you use an unsupported GPU (proprietary drivers).
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs; [
+        swaylock swayidle foot dmenu
+      ];
+      defaultText = literalExpression ''
+        with pkgs; [ swaylock swayidle foot dmenu ];
+      '';
+      example = literalExpression ''
+        with pkgs; [
+          i3status i3status-rust
+          termite rofi light
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed system wide. See
+        <https://github.com/swaywm/sway/wiki/Useful-add-ons-for-sway> and
+        <https://github.com/swaywm/sway/wiki/i3-Migration-Guide#common-x11-apps-used-on-i3-with-wayland-alternatives>
+        for a list of useful software.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable
+    (mkMerge [
+      {
+        assertions = [
+          {
+            assertion = cfg.extraSessionCommands != "" -> cfg.wrapperFeatures.base;
+            message = ''
+              The extraSessionCommands for Sway will not be run if
+              wrapperFeatures.base is disabled.
+            '';
+          }
+        ];
+        environment = {
+          systemPackages = optional (cfg.package != null) cfg.package ++ cfg.extraPackages;
+          # Needed for the default wallpaper:
+          pathsToLink = optionals (cfg.package != null) [ "/share/backgrounds/sway" ];
+          etc = {
+            "sway/config.d/nixos.conf".source = pkgs.writeText "nixos.conf" ''
+              # Import the most important environment variables into the D-Bus and systemd
+              # user environments (e.g. required for screen sharing and Pinentry prompts):
+              exec dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY SWAYSOCK XDG_CURRENT_DESKTOP
+            '';
+          } // optionalAttrs (cfg.package != null) {
+            "sway/config".source = mkOptionDefault "${cfg.package}/etc/sway/config";
+          };
+        };
+        # To make a Sway session available if a display manager like SDDM is enabled:
+        services.xserver.displayManager.sessionPackages = optionals (cfg.package != null) [ cfg.package ]; }
+      (import ./wayland-session.nix { inherit lib pkgs; })
+    ]);
+
+  meta.maintainers = with lib.maintainers; [ primeos colemickens ];
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/waybar.nix b/nixpkgs/nixos/modules/programs/wayland/waybar.nix
new file mode 100644
index 000000000000..2c49ae140813
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/waybar.nix
@@ -0,0 +1,25 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.waybar;
+in
+{
+  options.programs.waybar = {
+    enable = mkEnableOption (lib.mdDoc "waybar");
+    package = mkPackageOptionMD pkgs "waybar" { };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    systemd.user.services.waybar = {
+      description = "Waybar as systemd service";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      script = "${cfg.package}/bin/waybar";
+    };
+  };
+
+  meta.maintainers = [ maintainers.FlorianFranzen ];
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/wayfire.nix b/nixpkgs/nixos/modules/programs/wayland/wayfire.nix
new file mode 100644
index 000000000000..9ea2010cf59c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/wayfire.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ...}:
+let
+  cfg = config.programs.wayfire;
+in
+{
+  meta.maintainers = with lib.maintainers; [ rewine ];
+
+  options.programs.wayfire = {
+    enable = lib.mkEnableOption (lib.mdDoc "Wayfire, a wayland compositor based on wlroots");
+
+    package = lib.mkPackageOptionMD pkgs "wayfire" { };
+
+    plugins = lib.mkOption {
+      type = lib.types.listOf lib.types.package;
+      default = with pkgs.wayfirePlugins; [ wcm wf-shell ];
+      defaultText = lib.literalExpression "with pkgs.wayfirePlugins; [ wcm wf-shell ]";
+      example = lib.literalExpression ''
+        with pkgs.wayfirePlugins; [
+          wcm
+          wf-shell
+          wayfire-plugins-extra
+        ];
+      '';
+      description = lib.mdDoc ''
+        Additional plugins to use with the wayfire window manager.
+      '';
+    };
+  };
+
+  config = let
+    finalPackage = pkgs.wayfire-with-plugins.override {
+      wayfire = cfg.package;
+      plugins = cfg.plugins;
+    };
+  in
+  lib.mkIf cfg.enable {
+    environment.systemPackages = [
+      finalPackage
+    ];
+
+    services.xserver.displayManager.sessionPackages = [ finalPackage ];
+
+    xdg.portal = {
+      enable = lib.mkDefault true;
+      wlr.enable = lib.mkDefault true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/wayland/wayland-session.nix b/nixpkgs/nixos/modules/programs/wayland/wayland-session.nix
new file mode 100644
index 000000000000..da117ceae0ad
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wayland/wayland-session.nix
@@ -0,0 +1,23 @@
+{ lib, pkgs, ... }: with lib; {
+    security = {
+      polkit.enable = true;
+      pam.services.swaylock = {};
+    };
+
+    hardware.opengl.enable = mkDefault true;
+    fonts.enableDefaultPackages = mkDefault true;
+
+    programs = {
+      dconf.enable = mkDefault true;
+      xwayland.enable = mkDefault true;
+    };
+
+    xdg.portal = {
+      enable = mkDefault true;
+
+      extraPortals = [
+        # For screen sharing
+        pkgs.xdg-desktop-portal-wlr
+      ];
+    };
+}
diff --git a/nixpkgs/nixos/modules/programs/weylus.nix b/nixpkgs/nixos/modules/programs/weylus.nix
new file mode 100644
index 000000000000..a5775f3b981c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/weylus.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.weylus;
+in
+{
+  options.programs.weylus = with types; {
+    enable = mkEnableOption (lib.mdDoc "weylus");
+
+    openFirewall = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports needed for the functionality of the program.
+      '';
+    };
+
+     users = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = lib.mdDoc ''
+        To enable stylus and multi-touch support, the user you're going to use must be added to this list.
+        These users can synthesize input events system-wide, even when another user is logged in - untrusted users should not be added.
+      '';
+    };
+
+    package = mkOption {
+      type = package;
+      default = pkgs.weylus;
+      defaultText = lib.literalExpression "pkgs.weylus";
+      description = lib.mdDoc "Weylus package to install.";
+    };
+  };
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 1701 9001 ];
+    };
+
+    hardware.uinput.enable = true;
+
+    users.groups.uinput.members = cfg.users;
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/wireshark.nix b/nixpkgs/nixos/modules/programs/wireshark.nix
new file mode 100644
index 000000000000..834b0ba35695
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wireshark.nix
@@ -0,0 +1,42 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.wireshark;
+  wireshark = cfg.package;
+in {
+  options = {
+    programs.wireshark = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add Wireshark to the global environment and configure a
+          setcap wrapper for 'dumpcap' for users in the 'wireshark' group.
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.wireshark-cli;
+        defaultText = literalExpression "pkgs.wireshark-cli";
+        description = lib.mdDoc ''
+          Which Wireshark package to install in the global environment.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ wireshark ];
+    users.groups.wireshark = {};
+
+    security.wrappers.dumpcap = {
+      source = "${wireshark}/bin/dumpcap";
+      capabilities = "cap_net_raw,cap_net_admin+eip";
+      owner = "root";
+      group = "wireshark";
+      permissions = "u+rx,g+x";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/wshowkeys.nix b/nixpkgs/nixos/modules/programs/wshowkeys.nix
new file mode 100644
index 000000000000..ebb5c5509442
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/wshowkeys.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.wshowkeys;
+in {
+  meta.maintainers = with maintainers; [ primeos ];
+
+  options = {
+    programs.wshowkeys = {
+      enable = mkEnableOption (lib.mdDoc ''
+        wshowkeys (displays keypresses on screen on supported Wayland
+        compositors). It requires root permissions to read input events, but
+        these permissions are dropped after startup'');
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers.wshowkeys =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.wshowkeys}/bin/wshowkeys";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/xastir.nix b/nixpkgs/nixos/modules/programs/xastir.nix
new file mode 100644
index 000000000000..6d5fc59aac50
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xastir.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.xastir;
+in {
+  meta.maintainers = with maintainers; [ melling ];
+
+  options.programs.xastir = {
+    enable = mkEnableOption (mdDoc "Xastir Graphical APRS client");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ xastir ];
+    security.wrappers.xastir = {
+      source = "${pkgs.xastir}/bin/xastir";
+      capabilities = "cap_net_raw+p";
+      owner = "root";
+      group = "root";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/xfconf.nix b/nixpkgs/nixos/modules/programs/xfconf.nix
new file mode 100644
index 000000000000..b0f45339335d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xfconf.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.programs.xfconf;
+
+in {
+  meta = {
+    maintainers = teams.xfce.members;
+  };
+
+  options = {
+    programs.xfconf = {
+      enable = mkEnableOption (lib.mdDoc "Xfconf, the Xfce configuration storage system");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [
+      pkgs.xfce.xfconf
+    ];
+
+    services.dbus.packages = [
+      pkgs.xfce.xfconf
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/xfs_quota.nix b/nixpkgs/nixos/modules/programs/xfs_quota.nix
new file mode 100644
index 000000000000..0fc2958b3f38
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xfs_quota.nix
@@ -0,0 +1,110 @@
+# Configuration for the xfs_quota command
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.xfs_quota;
+
+  limitOptions = opts: concatStringsSep " " [
+    (optionalString (opts.sizeSoftLimit != null) "bsoft=${opts.sizeSoftLimit}")
+    (optionalString (opts.sizeHardLimit != null) "bhard=${opts.sizeHardLimit}")
+  ];
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    programs.xfs_quota = {
+      projects = mkOption {
+        default = {};
+        type = types.attrsOf (types.submodule {
+          options = {
+            id = mkOption {
+              type = types.int;
+              description = lib.mdDoc "Project ID.";
+            };
+
+            fileSystem = mkOption {
+              type = types.str;
+              description = lib.mdDoc "XFS filesystem hosting the xfs_quota project.";
+              default = "/";
+            };
+
+            path = mkOption {
+              type = types.str;
+              description = lib.mdDoc "Project directory.";
+            };
+
+            sizeSoftLimit = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "30g";
+              description = lib.mdDoc "Soft limit of the project size";
+            };
+
+            sizeHardLimit = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "50g";
+              description = lib.mdDoc "Hard limit of the project size.";
+            };
+          };
+        });
+
+        description = lib.mdDoc "Setup of xfs_quota projects. Make sure the filesystem is mounted with the pquota option.";
+
+        example = {
+          projname = {
+            id = 50;
+            path = "/xfsprojects/projname";
+            sizeHardLimit = "50g";
+          };
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.projects != {}) {
+
+    environment.etc.projects.source = pkgs.writeText "etc-project"
+      (concatStringsSep "\n" (mapAttrsToList
+        (name: opts: "${toString opts.id}:${opts.path}") cfg.projects));
+
+    environment.etc.projid.source = pkgs.writeText "etc-projid"
+      (concatStringsSep "\n" (mapAttrsToList
+        (name: opts: "${name}:${toString opts.id}") cfg.projects));
+
+    systemd.services = mapAttrs' (name: opts:
+      nameValuePair "xfs_quota-${name}" {
+        description = "Setup xfs_quota for project ${name}";
+        script = ''
+          ${pkgs.xfsprogs.bin}/bin/xfs_quota -x -c 'project -s ${name}' ${opts.fileSystem}
+          ${pkgs.xfsprogs.bin}/bin/xfs_quota -x -c 'limit -p ${limitOptions opts} ${name}' ${opts.fileSystem}
+        '';
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ ((replaceStrings [ "/" ] [ "-" ] opts.fileSystem) + ".mount") ];
+
+        restartTriggers = [ config.environment.etc.projects.source ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+      }
+    ) cfg.projects;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/xonsh.nix b/nixpkgs/nixos/modules/programs/xonsh.nix
new file mode 100644
index 000000000000..167c953f5ffd
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xonsh.nix
@@ -0,0 +1,85 @@
+# This module defines global configuration for the xonsh.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.xonsh;
+
+in
+
+{
+
+  options = {
+
+    programs.xonsh = {
+
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to configure xonsh as an interactive shell.
+        '';
+        type = types.bool;
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xonsh;
+        defaultText = literalExpression "pkgs.xonsh";
+        example = literalExpression "pkgs.xonsh.override { extraPackages = ps: [ ps.requests ]; }";
+        description = lib.mdDoc ''
+          xonsh package to use.
+        '';
+      };
+
+      config = mkOption {
+        default = "";
+        description = lib.mdDoc "Control file to customize your shell behavior.";
+        type = types.lines;
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc."xonsh/xonshrc".text = ''
+      # /etc/xonsh/xonshrc: DO NOT EDIT -- this file has been generated automatically.
+
+
+      if not ''${...}.get('__NIXOS_SET_ENVIRONMENT_DONE'):
+          # The NixOS environment and thereby also $PATH
+          # haven't been fully set up at this point. But
+          # `source-bash` below requires `bash` to be on $PATH,
+          # so add an entry with bash's location:
+          $PATH.add('${pkgs.bash}/bin')
+
+          # Stash xonsh's ls alias, so that we don't get a collision
+          # with Bash's ls alias from environment.shellAliases:
+          _ls_alias = aliases.pop('ls', None)
+
+          # Source the NixOS environment config.
+          source-bash "${config.system.build.setEnvironment}"
+
+          # Restore xonsh's ls alias, overriding that from Bash (if any).
+          if _ls_alias is not None:
+              aliases['ls'] = _ls_alias
+          del _ls_alias
+
+
+      ${cfg.config}
+    '';
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.shells =
+      [ "/run/current-system/sw/bin/xonsh"
+        "${cfg.package}/bin/xonsh"
+      ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/programs/xss-lock.nix b/nixpkgs/nixos/modules/programs/xss-lock.nix
new file mode 100644
index 000000000000..87b3957ab834
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xss-lock.nix
@@ -0,0 +1,45 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.xss-lock;
+in
+{
+  options.programs.xss-lock = {
+    enable = mkEnableOption (lib.mdDoc "xss-lock");
+
+    lockerCommand = mkOption {
+      default = "${pkgs.i3lock}/bin/i3lock";
+      defaultText = literalExpression ''"''${pkgs.i3lock}/bin/i3lock"'';
+      example = literalExpression ''"''${pkgs.i3lock-fancy}/bin/i3lock-fancy"'';
+      type = types.separatedString " ";
+      description = lib.mdDoc "Locker to be used with xsslock";
+    };
+
+    extraOptions = mkOption {
+      default = [ ];
+      example = [ "--ignore-sleep" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        Additional command-line arguments to pass to
+        {command}`xss-lock`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.xss-lock = {
+      description = "XSS Lock Daemon";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = with lib;
+        strings.concatStringsSep " " ([
+            "${pkgs.xss-lock}/bin/xss-lock" "--session \${XDG_SESSION_ID}"
+          ] ++ (map escapeShellArg cfg.extraOptions) ++ [
+            "--"
+            cfg.lockerCommand
+        ]);
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/xwayland.nix b/nixpkgs/nixos/modules/programs/xwayland.nix
new file mode 100644
index 000000000000..8d13e4c22b5b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/xwayland.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.xwayland;
+
+in
+
+{
+  options.programs.xwayland = {
+
+    enable = mkEnableOption (lib.mdDoc "Xwayland (an X server for interfacing X11 apps with the Wayland protocol)");
+
+    defaultFontPath = mkOption {
+      type = types.str;
+      default = optionalString config.fonts.fontDir.enable
+        "/run/current-system/sw/share/X11/fonts";
+      defaultText = literalExpression ''
+        optionalString config.fonts.fontDir.enable "/run/current-system/sw/share/X11/fonts"
+      '';
+      description = lib.mdDoc ''
+        Default font path. Setting this option causes Xwayland to be rebuilt.
+      '';
+    };
+
+    package = mkOption {
+      type = types.path;
+      default = pkgs.xwayland.override (oldArgs: {
+        inherit (cfg) defaultFontPath;
+      });
+      defaultText = literalExpression ''
+        pkgs.xwayland.override (oldArgs: {
+          inherit (config.programs.xwayland) defaultFontPath;
+        })
+      '';
+      description = lib.mdDoc "The Xwayland package to use.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    # Needed by some applications for fonts and default settings
+    environment.pathsToLink = [ "/share/X11" ];
+
+    environment.systemPackages = [ cfg.package ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/yabar.nix b/nixpkgs/nixos/modules/programs/yabar.nix
new file mode 100644
index 000000000000..58ffe555715d
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/yabar.nix
@@ -0,0 +1,163 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.yabar;
+
+  mapExtra = v: lib.concatStringsSep "\n" (mapAttrsToList (
+    key: val: "${key} = ${if (isString val) then "\"${val}\"" else "${builtins.toString val}"};"
+  ) v);
+
+  listKeys = r: concatStringsSep "," (map (n: "\"${n}\"") (attrNames r));
+
+  configFile = let
+    bars = mapAttrsToList (
+      name: cfg: ''
+        ${name}: {
+          font: "${cfg.font}";
+          position: "${cfg.position}";
+
+          ${mapExtra cfg.extra}
+
+          block-list: [${listKeys cfg.indicators}]
+
+          ${concatStringsSep "\n" (mapAttrsToList (
+            name: cfg: ''
+              ${name}: {
+                exec: "${cfg.exec}";
+                align: "${cfg.align}";
+                ${mapExtra cfg.extra}
+              };
+            ''
+          ) cfg.indicators)}
+        };
+      ''
+    ) cfg.bars;
+  in pkgs.writeText "yabar.conf" ''
+    bar-list = [${listKeys cfg.bars}];
+    ${concatStringsSep "\n" bars}
+  '';
+in
+  {
+    options.programs.yabar = {
+      enable = mkEnableOption (lib.mdDoc "yabar");
+
+      package = mkOption {
+        default = pkgs.yabar-unstable;
+        defaultText = literalExpression "pkgs.yabar-unstable";
+        example = literalExpression "pkgs.yabar";
+        type = types.package;
+
+        # `yabar-stable` segfaults under certain conditions.
+        apply = x: if x == pkgs.yabar-unstable then x else flip warn x ''
+          It's not recommended to use `yabar' with `programs.yabar', the (old) stable release
+          tends to segfault under certain circumstances:
+
+          * https://github.com/geommer/yabar/issues/86
+          * https://github.com/geommer/yabar/issues/68
+          * https://github.com/geommer/yabar/issues/143
+
+          Most of them don't occur on master anymore, until a new release is published, it's recommended
+          to use `yabar-unstable'.
+        '';
+
+        description = lib.mdDoc ''
+          The package which contains the `yabar` binary.
+
+          Nixpkgs provides the `yabar` and `yabar-unstable`
+          derivations since 18.03, so it's possible to choose.
+        '';
+      };
+
+      bars = mkOption {
+        default = {};
+        type = types.attrsOf(types.submodule {
+          options = {
+            font = mkOption {
+              default = "sans bold 9";
+              example = "Droid Sans, FontAwesome Bold 9";
+              type = types.str;
+
+              description = lib.mdDoc ''
+                The font that will be used to draw the status bar.
+              '';
+            };
+
+            position = mkOption {
+              default = "top";
+              example = "bottom";
+              type = types.enum [ "top" "bottom" ];
+
+              description = lib.mdDoc ''
+                The position where the bar will be rendered.
+              '';
+            };
+
+            extra = mkOption {
+              default = {};
+              type = types.attrsOf types.str;
+
+              description = lib.mdDoc ''
+                An attribute set which contains further attributes of a bar.
+              '';
+            };
+
+            indicators = mkOption {
+              default = {};
+              type = types.attrsOf(types.submodule {
+                options.exec = mkOption {
+                  example = "YABAR_DATE";
+                  type = types.str;
+                  description = lib.mdDoc ''
+                     The type of the indicator to be executed.
+                  '';
+                };
+
+                options.align = mkOption {
+                  default = "left";
+                  example = "right";
+                  type = types.enum [ "left" "center" "right" ];
+
+                  description = lib.mdDoc ''
+                    Whether to align the indicator at the left or right of the bar.
+                  '';
+                };
+
+                options.extra = mkOption {
+                  default = {};
+                  type = types.attrsOf (types.either types.str types.int);
+
+                  description = lib.mdDoc ''
+                    An attribute set which contains further attributes of a indicator.
+                  '';
+                };
+              });
+
+              description = lib.mdDoc ''
+                Indicators that should be rendered by yabar.
+              '';
+            };
+          };
+        });
+
+        description = lib.mdDoc ''
+          List of bars that should be rendered by yabar.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      systemd.user.services.yabar = {
+        description = "yabar service";
+        wantedBy = [ "graphical-session.target" ];
+        partOf = [ "graphical-session.target" ];
+
+        script = ''
+          ${cfg.package}/bin/yabar -c ${configFile}
+        '';
+
+        serviceConfig.Restart = "always";
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/programs/yazi.nix b/nixpkgs/nixos/modules/programs/yazi.nix
new file mode 100644
index 000000000000..973f5c0122c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/yazi.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.programs.yazi;
+
+  settingsFormat = pkgs.formats.toml { };
+
+  names = [ "yazi" "theme" "keymap" ];
+in
+{
+  options.programs.yazi = {
+    enable = lib.mkEnableOption (lib.mdDoc "yazi terminal file manager");
+
+    package = lib.mkPackageOptionMD pkgs "yazi" { };
+
+    settings = lib.mkOption {
+      type = with lib.types; submodule {
+        options = lib.listToAttrs (map
+          (name: lib.nameValuePair name (lib.mkOption {
+            inherit (settingsFormat) type;
+            default = { };
+            description = lib.mdDoc ''
+              Configuration included in `${name}.toml`.
+
+              See https://github.com/sxyazi/yazi/blob/v${cfg.package.version}/config/docs/${name}.md for documentation.
+            '';
+          }))
+          names);
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration included in `$YAZI_CONFIG_HOME`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      variables.YAZI_CONFIG_HOME = "/etc/yazi/";
+      etc = lib.attrsets.mergeAttrsList (map
+        (name: lib.optionalAttrs (cfg.settings.${name} != { }) {
+          "yazi/${name}.toml".source = settingsFormat.generate "${name}.toml" cfg.settings.${name};
+        })
+        names);
+    };
+  };
+  meta = {
+    maintainers = with lib.maintainers; [ linsui ];
+    # The version of the package is used in the doc.
+    buildDocsInSandbox = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/yubikey-touch-detector.nix b/nixpkgs/nixos/modules/programs/yubikey-touch-detector.nix
new file mode 100644
index 000000000000..9a0d107f73c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/yubikey-touch-detector.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+let cfg = config.programs.yubikey-touch-detector;
+in {
+  options = {
+    programs.yubikey-touch-detector = {
+      enable = lib.mkEnableOption "yubikey-touch-detector";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ pkgs.yubikey-touch-detector ];
+
+    systemd.user.services.yubikey-touch-detector = {
+      path = [ pkgs.gnupg ];
+      wantedBy = [ "graphical-session.target" ];
+    };
+    systemd.user.sockets.yubikey-touch-detector = {
+      wantedBy = [ "sockets.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/zmap.nix b/nixpkgs/nixos/modules/programs/zmap.nix
new file mode 100644
index 000000000000..056f78883061
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zmap.nix
@@ -0,0 +1,18 @@
+{ pkgs, config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.zmap;
+in {
+  options.programs.zmap = {
+    enable = mkEnableOption (lib.mdDoc "ZMap");
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.zmap ];
+
+    environment.etc."zmap/blacklist.conf".source = "${pkgs.zmap}/etc/zmap/blacklist.conf";
+    environment.etc."zmap/zmap.conf".source = "${pkgs.zmap}/etc/zmap.conf";
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
new file mode 100644
index 000000000000..6a310006edbf
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.md
@@ -0,0 +1,109 @@
+# Oh my ZSH {#module-programs-zsh-ohmyzsh}
+
+[`oh-my-zsh`](https://ohmyz.sh/) is a framework to manage your [ZSH](https://www.zsh.org/)
+configuration including completion scripts for several CLI tools or custom
+prompt themes.
+
+## Basic usage {#module-programs-oh-my-zsh-usage}
+
+The module uses the `oh-my-zsh` package with all available
+features. The initial setup using Nix expressions is fairly similar to the
+configuration format of `oh-my-zsh`.
+```
+{
+  programs.zsh.ohMyZsh = {
+    enable = true;
+    plugins = [ "git" "python" "man" ];
+    theme = "agnoster";
+  };
+}
+```
+For a detailed explanation of these arguments please refer to the
+[`oh-my-zsh` docs](https://github.com/robbyrussell/oh-my-zsh/wiki).
+
+The expression generates the needed configuration and writes it into your
+`/etc/zshrc`.
+
+## Custom additions {#module-programs-oh-my-zsh-additions}
+
+Sometimes third-party or custom scripts such as a modified theme may be
+needed. `oh-my-zsh` provides the
+[`ZSH_CUSTOM`](https://github.com/robbyrussell/oh-my-zsh/wiki/Customization#overriding-internals)
+environment variable for this which points to a directory with additional
+scripts.
+
+The module can do this as well:
+```
+{
+  programs.zsh.ohMyZsh.custom = "~/path/to/custom/scripts";
+}
+```
+
+## Custom environments {#module-programs-oh-my-zsh-environments}
+
+There are several extensions for `oh-my-zsh` packaged in
+`nixpkgs`. One of them is
+[nix-zsh-completions](https://github.com/spwhitt/nix-zsh-completions)
+which bundles completion scripts and a plugin for `oh-my-zsh`.
+
+Rather than using a single mutable path for `ZSH_CUSTOM`,
+it's also possible to generate this path from a list of Nix packages:
+```
+{ pkgs, ... }:
+{
+  programs.zsh.ohMyZsh.customPkgs = [
+    pkgs.nix-zsh-completions
+    # and even more...
+  ];
+}
+```
+Internally a single store path will be created using
+`buildEnv`. Please refer to the docs of
+[`buildEnv`](https://nixos.org/nixpkgs/manual/#sec-building-environment)
+for further reference.
+
+*Please keep in mind that this is not compatible with
+`programs.zsh.ohMyZsh.custom` as it requires an immutable
+store path while `custom` shall remain mutable! An
+evaluation failure will be thrown if both `custom` and
+`customPkgs` are set.*
+
+## Package your own customizations {#module-programs-oh-my-zsh-packaging-customizations}
+
+If third-party customizations (e.g. new themes) are supposed to be added to
+`oh-my-zsh` there are several pitfalls to keep in mind:
+
+  - To comply with the default structure of `ZSH` the entire
+    output needs to be written to `$out/share/zsh.`
+
+  - Completion scripts are supposed to be stored at
+    `$out/share/zsh/site-functions`. This directory is part of the
+    [`fpath`](https://zsh.sourceforge.io/Doc/Release/Functions.html)
+    and the package should be compatible with pure `ZSH`
+    setups. The module will automatically link the contents of
+    `site-functions` to completions directory in the proper
+    store path.
+
+  - The `plugins` directory needs the structure
+    `pluginname/pluginname.plugin.zsh` as structured in the
+    [upstream repo.](https://github.com/robbyrussell/oh-my-zsh/tree/91b771914bc7c43dd7c7a43b586c5de2c225ceb7/plugins)
+
+A derivation for `oh-my-zsh` may look like this:
+```
+{ stdenv, fetchFromGitHub }:
+
+stdenv.mkDerivation rec {
+  name = "exemplary-zsh-customization-${version}";
+  version = "1.0.0";
+  src = fetchFromGitHub {
+    # path to the upstream repository
+  };
+
+  dontBuild = true;
+  installPhase = ''
+    mkdir -p $out/share/zsh/site-functions
+    cp {themes,plugins} $out/share/zsh
+    cp completions $out/share/zsh/site-functions
+  '';
+}
+```
diff --git a/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.nix b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.nix
new file mode 100644
index 000000000000..83eee1c88b3c
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/oh-my-zsh.nix
@@ -0,0 +1,146 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.programs.zsh.ohMyZsh;
+
+  mkLinkFarmEntry = name: dir:
+    let
+      env = pkgs.buildEnv {
+        name = "zsh-${name}-env";
+        paths = cfg.customPkgs;
+        pathsToLink = "/share/zsh/${dir}";
+      };
+    in
+      { inherit name; path = "${env}/share/zsh/${dir}"; };
+
+  mkLinkFarmEntry' = name: mkLinkFarmEntry name name;
+
+  custom =
+    if cfg.custom != null then cfg.custom
+    else if length cfg.customPkgs == 0 then null
+    else pkgs.linkFarm "oh-my-zsh-custom" [
+      (mkLinkFarmEntry' "themes")
+      (mkLinkFarmEntry "completions" "site-functions")
+      (mkLinkFarmEntry' "plugins")
+    ];
+
+in
+  {
+    imports = [
+      (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "enable" ] [ "programs" "zsh" "ohMyZsh" "enable" ])
+      (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "theme" ] [ "programs" "zsh" "ohMyZsh" "theme" ])
+      (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "custom" ] [ "programs" "zsh" "ohMyZsh" "custom" ])
+      (mkRenamedOptionModule [ "programs" "zsh" "oh-my-zsh" "plugins" ] [ "programs" "zsh" "ohMyZsh" "plugins" ])
+    ];
+
+    options = {
+      programs.zsh.ohMyZsh = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable oh-my-zsh.
+          '';
+        };
+
+        package = mkOption {
+          default = pkgs.oh-my-zsh;
+          defaultText = literalExpression "pkgs.oh-my-zsh";
+          description = lib.mdDoc ''
+            Package to install for `oh-my-zsh` usage.
+          '';
+
+          type = types.package;
+        };
+
+        plugins = mkOption {
+          default = [];
+          type = types.listOf(types.str);
+          description = lib.mdDoc ''
+            List of oh-my-zsh plugins
+          '';
+        };
+
+        custom = mkOption {
+          default = null;
+          type = with types; nullOr str;
+          description = lib.mdDoc ''
+            Path to a custom oh-my-zsh package to override config of oh-my-zsh.
+            (Can't be used along with `customPkgs`).
+          '';
+        };
+
+        customPkgs = mkOption {
+          default = [];
+          type = types.listOf types.package;
+          description = lib.mdDoc ''
+            List of custom packages that should be loaded into `oh-my-zsh`.
+          '';
+        };
+
+        theme = mkOption {
+          default = "";
+          type = types.str;
+          description = lib.mdDoc ''
+            Name of the theme to be used by oh-my-zsh.
+          '';
+        };
+
+        cacheDir = mkOption {
+          default = "$HOME/.cache/oh-my-zsh";
+          type = types.str;
+          description = lib.mdDoc ''
+            Cache directory to be used by `oh-my-zsh`.
+            Without this option it would default to the read-only nix store.
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+
+      # Prevent zsh from overwriting oh-my-zsh's prompt
+      programs.zsh.promptInit = mkDefault "";
+
+      environment.systemPackages = [ cfg.package ];
+
+      programs.zsh.interactiveShellInit = ''
+        # oh-my-zsh configuration generated by NixOS
+        export ZSH=${cfg.package}/share/oh-my-zsh
+
+        ${optionalString (length(cfg.plugins) > 0)
+          "plugins=(${concatStringsSep " " cfg.plugins})"
+        }
+
+        ${optionalString (custom != null)
+          "ZSH_CUSTOM=\"${custom}\""
+        }
+
+        ${optionalString (stringLength(cfg.theme) > 0)
+          "ZSH_THEME=\"${cfg.theme}\""
+        }
+
+        ${optionalString (cfg.cacheDir != null) ''
+          if [[ ! -d "${cfg.cacheDir}" ]]; then
+            mkdir -p "${cfg.cacheDir}"
+          fi
+          ZSH_CACHE_DIR=${cfg.cacheDir}
+        ''}
+
+        source $ZSH/oh-my-zsh.sh
+      '';
+
+      assertions = [
+        {
+          assertion = cfg.custom != null -> cfg.customPkgs == [];
+          message = "If `cfg.custom` is set for `ZSH_CUSTOM`, `customPkgs` can't be used!";
+        }
+      ];
+
+    };
+
+    meta.doc = ./oh-my-zsh.md;
+  }
diff --git a/nixpkgs/nixos/modules/programs/zsh/zinputrc b/nixpkgs/nixos/modules/programs/zsh/zinputrc
new file mode 100644
index 000000000000..6121f3e21f16
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/zinputrc
@@ -0,0 +1,42 @@
+# Stolen from ArchWiki
+
+# create a zkbd compatible hash;
+# to add other keys to this hash, see: man 5 terminfo
+typeset -A key
+
+key[Home]=${terminfo[khome]}
+
+key[End]=${terminfo[kend]}
+key[Insert]=${terminfo[kich1]}
+key[Delete]=${terminfo[kdch1]}
+key[Up]=${terminfo[kcuu1]}
+key[Down]=${terminfo[kcud1]}
+key[Left]=${terminfo[kcub1]}
+key[Right]=${terminfo[kcuf1]}
+key[PageUp]=${terminfo[kpp]}
+key[PageDown]=${terminfo[knp]}
+
+# setup key accordingly
+[[ -n "${key[Home]}"     ]]  && bindkey  "${key[Home]}"     beginning-of-line
+[[ -n "${key[End]}"      ]]  && bindkey  "${key[End]}"      end-of-line
+[[ -n "${key[Insert]}"   ]]  && bindkey  "${key[Insert]}"   overwrite-mode
+[[ -n "${key[Delete]}"   ]]  && bindkey  "${key[Delete]}"   delete-char
+[[ -n "${key[Up]}"       ]]  && bindkey  "${key[Up]}"       up-line-or-history
+[[ -n "${key[Down]}"     ]]  && bindkey  "${key[Down]}"     down-line-or-history
+[[ -n "${key[Left]}"     ]]  && bindkey  "${key[Left]}"     backward-char
+[[ -n "${key[Right]}"    ]]  && bindkey  "${key[Right]}"    forward-char
+[[ -n "${key[PageUp]}"   ]]  && bindkey  "${key[PageUp]}"   beginning-of-buffer-or-history
+[[ -n "${key[PageDown]}" ]]  && bindkey  "${key[PageDown]}" end-of-buffer-or-history
+
+# Finally, make sure the terminal is in application mode, when zle is
+# active. Only then are the values from $terminfo valid.
+if (( ${+terminfo[smkx]} )) && (( ${+terminfo[rmkx]} )); then
+    function zle-line-init () {
+        printf '%s' "${terminfo[smkx]}"
+    }
+    function zle-line-finish () {
+        printf '%s' "${terminfo[rmkx]}"
+    }
+    zle -N zle-line-init
+    zle -N zle-line-finish
+fi
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh-autoenv.nix b/nixpkgs/nixos/modules/programs/zsh/zsh-autoenv.nix
new file mode 100644
index 000000000000..be93c96b2bc8
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh-autoenv.nix
@@ -0,0 +1,28 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.zsh.zsh-autoenv;
+in {
+  options = {
+    programs.zsh.zsh-autoenv = {
+      enable = mkEnableOption (lib.mdDoc "zsh-autoenv");
+      package = mkOption {
+        default = pkgs.zsh-autoenv;
+        defaultText = literalExpression "pkgs.zsh-autoenv";
+        description = lib.mdDoc ''
+          Package to install for `zsh-autoenv` usage.
+        '';
+
+        type = types.package;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    programs.zsh.interactiveShellInit = ''
+      source ${cfg.package}/share/zsh-autoenv/autoenv.zsh
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh-autosuggestions.nix b/nixpkgs/nixos/modules/programs/zsh/zsh-autosuggestions.nix
new file mode 100644
index 000000000000..d3a9c372e89b
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh-autosuggestions.nix
@@ -0,0 +1,73 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.zsh.autosuggestions;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "programs" "zsh" "enableAutosuggestions" ] [ "programs" "zsh" "autosuggestions" "enable" ])
+  ];
+
+  options.programs.zsh.autosuggestions = {
+
+    enable = mkEnableOption (lib.mdDoc "zsh-autosuggestions");
+
+    highlightStyle = mkOption {
+      type = types.str;
+      default = "fg=8"; # https://github.com/zsh-users/zsh-autosuggestions/tree/v0.4.3#suggestion-highlight-style
+      description = lib.mdDoc "Highlight style for suggestions ({fore,back}ground color)";
+      example = "fg=cyan";
+    };
+
+    strategy = mkOption {
+      type = types.listOf (types.enum [ "history" "completion" "match_prev_cmd" ]);
+      default = [ "history" ];
+      description = lib.mdDoc ''
+        `ZSH_AUTOSUGGEST_STRATEGY` is an array that specifies how suggestions should be generated.
+        The strategies in the array are tried successively until a suggestion is found.
+        There are currently three built-in strategies to choose from:
+
+        - `history`: Chooses the most recent match from history.
+        - `completion`: Chooses a suggestion based on what tab-completion would suggest. (requires `zpty` module)
+        - `match_prev_cmd`: Like `history`, but chooses the most recent match whose preceding history item matches
+            the most recently executed command. Note that this strategy won't work as expected with ZSH options that
+            don't preserve the history order such as `HIST_IGNORE_ALL_DUPS` or `HIST_EXPIRE_DUPS_FIRST`.
+      '';
+    };
+
+    async = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Whether to fetch suggestions asynchronously";
+      example = false;
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      description = lib.mdDoc "Attribute set with additional configuration values";
+      example = literalExpression ''
+        {
+          "ZSH_AUTOSUGGEST_BUFFER_MAX_SIZE" = "20";
+        }
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    programs.zsh.interactiveShellInit = ''
+      source ${pkgs.zsh-autosuggestions}/share/zsh-autosuggestions/zsh-autosuggestions.zsh
+
+      export ZSH_AUTOSUGGEST_HIGHLIGHT_STYLE="${cfg.highlightStyle}"
+      export ZSH_AUTOSUGGEST_STRATEGY=(${concatStringsSep " " cfg.strategy})
+      ${optionalString (!cfg.async) "unset ZSH_AUTOSUGGEST_USE_ASYNC"}
+
+      ${concatStringsSep "\n" (mapAttrsToList (key: value: ''export ${key}="${value}"'') cfg.extraConfig)}
+    '';
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh-syntax-highlighting.nix b/nixpkgs/nixos/modules/programs/zsh/zsh-syntax-highlighting.nix
new file mode 100644
index 000000000000..cec4be1cb01e
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh-syntax-highlighting.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.programs.zsh.syntaxHighlighting;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "programs" "zsh" "enableSyntaxHighlighting" ] [ "programs" "zsh" "syntaxHighlighting" "enable" ])
+    (mkRenamedOptionModule [ "programs" "zsh" "syntax-highlighting" "enable" ] [ "programs" "zsh" "syntaxHighlighting" "enable" ])
+    (mkRenamedOptionModule [ "programs" "zsh" "syntax-highlighting" "highlighters" ] [ "programs" "zsh" "syntaxHighlighting" "highlighters" ])
+    (mkRenamedOptionModule [ "programs" "zsh" "syntax-highlighting" "patterns" ] [ "programs" "zsh" "syntaxHighlighting" "patterns" ])
+  ];
+
+  options = {
+    programs.zsh.syntaxHighlighting = {
+      enable = mkEnableOption (lib.mdDoc "zsh-syntax-highlighting");
+
+      highlighters = mkOption {
+        default = [ "main" ];
+
+        # https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters.md
+        type = types.listOf(types.enum([
+          "main"
+          "brackets"
+          "pattern"
+          "cursor"
+          "regexp"
+          "root"
+          "line"
+        ]));
+
+        description = lib.mdDoc ''
+          Specifies the highlighters to be used by zsh-syntax-highlighting.
+
+          The following defined options can be found here:
+          https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters.md
+        '';
+      };
+
+      patterns = mkOption {
+        default = {};
+        type = types.attrsOf types.str;
+
+        example = literalExpression ''
+          {
+            "rm -rf *" = "fg=white,bold,bg=red";
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Specifies custom patterns to be highlighted by zsh-syntax-highlighting.
+
+          Please refer to the docs for more information about the usage:
+          https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters/pattern.md
+        '';
+      };
+      styles = mkOption {
+        default = {};
+        type = types.attrsOf types.str;
+
+        example = literalExpression ''
+          {
+            "alias" = "fg=magenta,bold";
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Specifies custom styles to be highlighted by zsh-syntax-highlighting.
+
+          Please refer to the docs for more information about the usage:
+          https://github.com/zsh-users/zsh-syntax-highlighting/blob/master/docs/highlighters/main.md
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ zsh-syntax-highlighting ];
+
+    assertions = [
+      {
+        assertion = length(attrNames cfg.patterns) > 0 -> elem "pattern" cfg.highlighters;
+        message = ''
+          When highlighting patterns, "pattern" needs to be included in the list of highlighters.
+        '';
+      }
+    ];
+
+    programs.zsh.interactiveShellInit = with pkgs;
+      lib.mkAfter (lib.concatStringsSep "\n" ([
+        "source ${zsh-syntax-highlighting}/share/zsh-syntax-highlighting/zsh-syntax-highlighting.zsh"
+      ] ++ optional (length(cfg.highlighters) > 0)
+        "ZSH_HIGHLIGHT_HIGHLIGHTERS=(${concatStringsSep " " cfg.highlighters})"
+        ++ optionals (length(attrNames cfg.patterns) > 0)
+          (mapAttrsToList (
+            pattern: design:
+            "ZSH_HIGHLIGHT_PATTERNS+=('${pattern}' '${design}')"
+          ) cfg.patterns)
+        ++ optionals (length(attrNames cfg.styles) > 0)
+          (mapAttrsToList (
+            styles: design:
+            "ZSH_HIGHLIGHT_STYLES[${styles}]='${design}'"
+          ) cfg.styles)
+      ));
+  };
+}
diff --git a/nixpkgs/nixos/modules/programs/zsh/zsh.nix b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
new file mode 100644
index 000000000000..cad639f299c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/programs/zsh/zsh.nix
@@ -0,0 +1,319 @@
+# This module defines global configuration for the zshell.
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfge = config.environment;
+
+  cfg = config.programs.zsh;
+  opt = options.programs.zsh;
+
+  zshAliases = concatStringsSep "\n" (
+    mapAttrsFlatten (k: v: "alias -- ${k}=${escapeShellArg v}")
+      (filterAttrs (k: v: v != null) cfg.shellAliases)
+  );
+
+  zshStartupNotes = ''
+    # Note that generated /etc/zprofile and /etc/zshrc files do a lot of
+    # non-standard setup to make zsh usable with no configuration by default.
+    #
+    # Which means that unless you explicitly meticulously override everything
+    # generated, interactions between your ~/.zshrc and these files are likely
+    # to be rather surprising.
+    #
+    # Note however, that you can disable loading of the generated /etc/zprofile
+    # and /etc/zshrc (you can't disable loading of /etc/zshenv, but it is
+    # designed to not set anything surprising) by setting `no_global_rcs` option
+    # in ~/.zshenv:
+    #
+    #   echo setopt no_global_rcs >> ~/.zshenv
+    #
+    # See "STARTUP/SHUTDOWN FILES" section of zsh(1) for more info.
+  '';
+
+in
+
+{
+
+  options = {
+
+    programs.zsh = {
+
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to configure zsh as an interactive shell. To enable zsh for
+          a particular user, use the {option}`users.users.<name?>.shell`
+          option for that user. To enable zsh system-wide use the
+          {option}`users.defaultUserShell` option.
+        '';
+        type = types.bool;
+      };
+
+      shellAliases = mkOption {
+        default = { };
+        description = lib.mdDoc ''
+          Set of aliases for zsh shell, which overrides {option}`environment.shellAliases`.
+          See {option}`environment.shellAliases` for an option format description.
+        '';
+        type = with types; attrsOf (nullOr (either str path));
+      };
+
+      shellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during zsh shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      loginShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during zsh login shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      interactiveShellInit = mkOption {
+        default = "";
+        description = lib.mdDoc ''
+          Shell script code called during interactive zsh shell initialisation.
+        '';
+        type = types.lines;
+      };
+
+      promptInit = mkOption {
+        default = ''
+          # Note that to manually override this in ~/.zshrc you should run `prompt off`
+          # before setting your PS1 and etc. Otherwise this will likely to interact with
+          # your ~/.zshrc configuration in unexpected ways as the default prompt sets
+          # a lot of different prompt variables.
+          autoload -U promptinit && promptinit && prompt suse && setopt prompt_sp
+        '';
+        description = lib.mdDoc ''
+          Shell script code used to initialise the zsh prompt.
+        '';
+        type = types.lines;
+      };
+
+      histSize = mkOption {
+        default = 2000;
+        description = lib.mdDoc ''
+          Change history size.
+        '';
+        type = types.int;
+      };
+
+      histFile = mkOption {
+        default = "$HOME/.zsh_history";
+        description = lib.mdDoc ''
+          Change history file.
+        '';
+        type = types.str;
+      };
+
+      setOptions = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "HIST_IGNORE_DUPS"
+          "SHARE_HISTORY"
+          "HIST_FCNTL_LOCK"
+        ];
+        example = [ "EXTENDED_HISTORY" "RM_STAR_WAIT" ];
+        description = lib.mdDoc ''
+          Configure zsh options. See
+          {manpage}`zshoptions(1)`.
+        '';
+      };
+
+      enableCompletion = mkOption {
+        default = true;
+        description = lib.mdDoc ''
+          Enable zsh completion for all interactive zsh shells.
+        '';
+        type = types.bool;
+      };
+
+      enableBashCompletion = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Enable compatibility with bash's programmable completion system.
+        '';
+        type = types.bool;
+      };
+
+      enableGlobalCompInit = mkOption {
+        default = cfg.enableCompletion;
+        defaultText = literalExpression "config.${opt.enableCompletion}";
+        description = lib.mdDoc ''
+          Enable execution of compinit call for all interactive zsh shells.
+
+          This option can be disabled if the user wants to extend its
+          `fpath` and a custom `compinit`
+          call in the local config is required.
+        '';
+        type = types.bool;
+      };
+
+      enableLsColors = mkOption {
+        default = true;
+        description = lib.mdDoc ''
+          Enable extra colors in directory listings (used by `ls` and `tree`).
+        '';
+        type = types.bool;
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    programs.zsh.shellAliases = mapAttrs (name: mkDefault) cfge.shellAliases;
+
+    environment.etc.zshenv.text =
+      ''
+        # /etc/zshenv: DO NOT EDIT -- this file has been generated automatically.
+        # This file is read for all shells.
+
+        # Only execute this file once per shell.
+        if [ -n "''${__ETC_ZSHENV_SOURCED-}" ]; then return; fi
+        __ETC_ZSHENV_SOURCED=1
+
+        if [ -z "''${__NIXOS_SET_ENVIRONMENT_DONE-}" ]; then
+            . ${config.system.build.setEnvironment}
+        fi
+
+        HELPDIR="${pkgs.zsh}/share/zsh/$ZSH_VERSION/help"
+
+        # Tell zsh how to find installed completions.
+        for p in ''${(z)NIX_PROFILES}; do
+            fpath=($p/share/zsh/site-functions $p/share/zsh/$ZSH_VERSION/functions $p/share/zsh/vendor-completions $fpath)
+        done
+
+        # Setup custom shell init stuff.
+        ${cfge.shellInit}
+
+        ${cfg.shellInit}
+
+        # Read system-wide modifications.
+        if test -f /etc/zshenv.local; then
+            . /etc/zshenv.local
+        fi
+      '';
+
+    environment.etc.zprofile.text =
+      ''
+        # /etc/zprofile: DO NOT EDIT -- this file has been generated automatically.
+        # This file is read for login shells.
+        #
+        ${zshStartupNotes}
+
+        # Only execute this file once per shell.
+        if [ -n "''${__ETC_ZPROFILE_SOURCED-}" ]; then return; fi
+        __ETC_ZPROFILE_SOURCED=1
+
+        # Setup custom login shell init stuff.
+        ${cfge.loginShellInit}
+
+        ${cfg.loginShellInit}
+
+        # Read system-wide modifications.
+        if test -f /etc/zprofile.local; then
+            . /etc/zprofile.local
+        fi
+      '';
+
+    environment.etc.zshrc.text =
+      ''
+        # /etc/zshrc: DO NOT EDIT -- this file has been generated automatically.
+        # This file is read for interactive shells.
+        #
+        ${zshStartupNotes}
+
+        # Only execute this file once per shell.
+        if [ -n "$__ETC_ZSHRC_SOURCED" -o -n "$NOSYSZSHRC" ]; then return; fi
+        __ETC_ZSHRC_SOURCED=1
+
+        ${optionalString (cfg.setOptions != []) ''
+          # Set zsh options.
+          setopt ${concatStringsSep " " cfg.setOptions}
+        ''}
+
+        # Alternative method of determining short and full hostname.
+        HOST=${config.networking.fqdnOrHostName}
+
+        # Setup command line history.
+        # Don't export these, otherwise other shells (bash) will try to use same HISTFILE.
+        SAVEHIST=${toString cfg.histSize}
+        HISTSIZE=${toString cfg.histSize}
+        HISTFILE=${cfg.histFile}
+
+        # Configure sane keyboard defaults.
+        . /etc/zinputrc
+
+        ${optionalString cfg.enableGlobalCompInit ''
+          # Enable autocompletion.
+          autoload -U compinit && compinit
+        ''}
+
+        ${optionalString cfg.enableBashCompletion ''
+          # Enable compatibility with bash's completion system.
+          autoload -U bashcompinit && bashcompinit
+        ''}
+
+        # Setup custom interactive shell init stuff.
+        ${cfge.interactiveShellInit}
+
+        ${cfg.interactiveShellInit}
+
+        ${optionalString cfg.enableLsColors ''
+          # Extra colors for directory listings.
+          eval "$(${pkgs.coreutils}/bin/dircolors -b)"
+        ''}
+
+        # Setup aliases.
+        ${zshAliases}
+
+        # Setup prompt.
+        ${cfg.promptInit}
+
+        # Disable some features to support TRAMP.
+        if [ "$TERM" = dumb ]; then
+            unsetopt zle prompt_cr prompt_subst
+            unset RPS1 RPROMPT
+            PS1='$ '
+            PROMPT='$ '
+        fi
+
+        # Read system-wide modifications.
+        if test -f /etc/zshrc.local; then
+            . /etc/zshrc.local
+        fi
+      '';
+
+    # Bug in nix flakes:
+    # If we use `.source` here the path is garbage collected also we point to it with a symlink
+    # see https://github.com/NixOS/nixpkgs/issues/132732
+    environment.etc.zinputrc.text = builtins.readFile ./zinputrc;
+
+    environment.systemPackages = [ pkgs.zsh ]
+      ++ optional cfg.enableCompletion pkgs.nix-zsh-completions;
+
+    environment.pathsToLink = optional cfg.enableCompletion "/share/zsh";
+
+    #users.defaultUserShell = mkDefault "/run/current-system/sw/bin/zsh";
+
+    environment.shells =
+      [
+        "/run/current-system/sw/bin/zsh"
+        "${pkgs.zsh}/bin/zsh"
+      ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/rename.nix b/nixpkgs/nixos/modules/rename.nix
new file mode 100644
index 000000000000..3fab863adb7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/rename.nix
@@ -0,0 +1,133 @@
+{ lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    mkAliasOptionModuleMD
+    mkRemovedOptionModule;
+in
+{
+  imports = [
+    /*
+    This file defines some renaming/removing options for backwards compatibility
+
+    It should ONLY be used when the relevant module can't define these imports
+    itself, such as when the module was removed completely.
+    See https://github.com/NixOS/nixpkgs/pull/61570 for explanation
+    */
+
+    # This alias module can't be where _module.check is defined because it would
+    # be added to submodules as well there
+    (mkAliasOptionModuleMD [ "environment" "checkConfigurationOptions" ] [ "_module" "check" ])
+
+    # Completely removed modules
+    (mkRemovedOptionModule [ "environment" "blcr" "enable" ] "The BLCR module has been removed")
+    (mkRemovedOptionModule [ "fonts" "fontconfig" "penultimate" ] "The corresponding package has removed from nixpkgs.")
+    (mkRemovedOptionModule [ "hardware" "brightnessctl" ] ''
+      The brightnessctl module was removed because newer versions of
+      brightnessctl don't require the udev rules anymore (they can use the
+      systemd-logind API). Instead of using the module you can now
+      simply add the brightnessctl package to environment.systemPackages.
+    '')
+    (mkRemovedOptionModule [ "hardware" "u2f" ] ''
+      The U2F modules module was removed, as all it did was adding the
+      udev rules from libu2f-host to the system. Udev gained native support
+      to handle FIDO security tokens, so this isn't necessary anymore.
+    '')
+    (mkRemovedOptionModule [ "hardware" "xow" ] ''
+      The xow package was removed from nixpkgs. Upstream has deprecated
+      the project and users are urged to switch to xone.
+    '')
+    (mkRemovedOptionModule [ "networking" "vpnc" ] "Use environment.etc.\"vpnc/service.conf\" instead.")
+    (mkRemovedOptionModule [ "networking" "wicd" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "programs" "gnome-documents" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "programs" "tilp2" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "programs" "way-cooler" ] ("way-cooler is abandoned by its author: " +
+      "https://way-cooler.org/blog/2020/01/09/way-cooler-post-mortem.html"))
+    (mkRemovedOptionModule [ "security" "hideProcessInformation" ] ''
+        The hidepid module was removed, since the underlying machinery
+        is broken when using cgroups-v2.
+    '')
+    (mkRemovedOptionModule [ "services" "baget" "enable" ] "The baget module was removed due to the upstream package being unmaintained.")
+    (mkRemovedOptionModule [ "services" "beegfs" ] "The BeeGFS module has been removed")
+    (mkRemovedOptionModule [ "services" "beegfsEnable" ] "The BeeGFS module has been removed")
+    (mkRemovedOptionModule [ "services" "cgmanager" "enable"] "cgmanager was deprecated by lxc and therefore removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "chronos" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "couchpotato" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "dd-agent" ] "dd-agent was removed from nixpkgs in favor of the newer datadog-agent.")
+    (mkRemovedOptionModule [ "services" "dnscrypt-proxy" ] "Use services.dnscrypt-proxy2 instead")
+    (mkRemovedOptionModule [ "services" "exhibitor" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "firefox" "syncserver" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "flashpolicyd" ] "The flashpolicyd module has been removed. Adobe Flash Player is deprecated.")
+    (mkRemovedOptionModule [ "services" "fourStore" ] "The fourStore module has been removed")
+    (mkRemovedOptionModule [ "services" "fourStoreEndpoint" ] "The fourStoreEndpoint module has been removed")
+    (mkRemovedOptionModule [ "services" "fprot" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "frab" ] "The frab module has been removed")
+    (mkRemovedOptionModule [ "services" "ihatemoney" ] "The ihatemoney module has been removed for lack of downstream maintainer")
+    (mkRemovedOptionModule [ "services" "kippo" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "mailpile" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "marathon" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "mathics" ] "The Mathics module has been removed")
+    (mkRemovedOptionModule [ "services" "meguca" ] "Use meguca has been removed from nixpkgs")
+    (mkRemovedOptionModule [ "services" "mesos" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "moinmoin" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "mwlib" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "pantheon" "files" ] ''
+      This module was removed, please add pkgs.pantheon.elementary-files to environment.systemPackages directly.
+    '')
+    (mkRemovedOptionModule [ "services" "prey" ] ''
+      prey-bash-client is deprecated upstream
+    '')
+    (mkRemovedOptionModule [ "services" "quagga" ] "the corresponding package has been removed from nixpkgs")
+    (mkRemovedOptionModule [ "services" "railcar" ] "the corresponding package has been removed from nixpkgs")
+    (mkRemovedOptionModule [ "services" "seeks" ] "")
+    (mkRemovedOptionModule [ "services" "ssmtp" ] ''
+      The ssmtp package and the corresponding module have been removed due to
+      the program being unmaintained. The options `programs.msmtp.*` can be
+      used instead.
+    '')
+    (mkRemovedOptionModule [ "services" "venus" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "wakeonlan"] "This module was removed in favor of enabling it with networking.interfaces.<name>.wakeOnLan")
+    (mkRemovedOptionModule [ "services" "winstone" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "xserver" "displayManager" "auto" ] ''
+      The services.xserver.displayManager.auto module has been removed
+      because it was only intended for use in internal NixOS tests, and gave the
+      false impression of it being a special display manager when it's actually
+      LightDM. Please use the services.xserver.displayManager.autoLogin options
+      instead, or any other display manager in NixOS as they all support auto-login.
+    '')
+    (mkRemovedOptionModule [ "services" "xserver" "multitouch" ] ''
+      services.xserver.multitouch (which uses xf86_input_mtrack) has been removed
+      as the underlying package isn't being maintained. Working alternatives are
+      libinput and synaptics.
+    '')
+    (mkRemovedOptionModule [ "virtualisation" "rkt" ] "The rkt module has been removed, it was archived by upstream")
+    (mkRemovedOptionModule [ "services" "racoon" ] ''
+      The racoon module has been removed, because the software project was abandoned upstream.
+    '')
+    (mkRemovedOptionModule [ "services" "shellinabox" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "gogoclient" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "virtuoso" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "openfire" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "riak" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "cryptpad" ] "The corresponding package was removed from nixpkgs.")
+    (mkRemovedOptionModule [ "services" "rtsp-simple-server" ] "Package has been completely rebranded by upstream as mediamtx, and thus the service and the package were renamed in NixOS as well.")
+    (mkRemovedOptionModule [ "services" "prayer" ] "The corresponding package was removed from nixpkgs.")
+
+    (mkRemovedOptionModule [ "i18n" "inputMethod" "fcitx" ] "The fcitx module has been removed. Please use fcitx5 instead")
+    (mkRemovedOptionModule [ "services" "dhcpd4" ] ''
+      The dhcpd4 module has been removed because ISC DHCP reached its end of life.
+      See https://www.isc.org/blogs/isc-dhcp-eol/ for details.
+      Please switch to a different implementation like kea or dnsmasq.
+    '')
+    (mkRemovedOptionModule [ "services" "dhcpd6" ] ''
+      The dhcpd6 module has been removed because ISC DHCP reached its end of life.
+      See https://www.isc.org/blogs/isc-dhcp-eol/ for details.
+      Please switch to a different implementation like kea or dnsmasq.
+    '')
+    (mkRemovedOptionModule [ "services" "tedicross" ] ''
+      The corresponding package was broken and removed from nixpkgs.
+    '')
+
+    # Do NOT add any option renames here, see top of the file
+  ];
+}
diff --git a/nixpkgs/nixos/modules/security/acme/default.md b/nixpkgs/nixos/modules/security/acme/default.md
new file mode 100644
index 000000000000..31548ad181a7
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/acme/default.md
@@ -0,0 +1,354 @@
+# SSL/TLS Certificates with ACME {#module-security-acme}
+
+NixOS supports automatic domain validation & certificate retrieval and
+renewal using the ACME protocol. Any provider can be used, but by default
+NixOS uses Let's Encrypt. The alternative ACME client
+[lego](https://go-acme.github.io/lego/) is used under
+the hood.
+
+Automatic cert validation and configuration for Apache and Nginx virtual
+hosts is included in NixOS, however if you would like to generate a wildcard
+cert or you are not using a web server you will have to configure DNS
+based validation.
+
+## Prerequisites {#module-security-acme-prerequisites}
+
+To use the ACME module, you must accept the provider's terms of service
+by setting [](#opt-security.acme.acceptTerms)
+to `true`. The Let's Encrypt ToS can be found
+[here](https://letsencrypt.org/repository/).
+
+You must also set an email address to be used when creating accounts with
+Let's Encrypt. You can set this for all certs with
+[](#opt-security.acme.defaults.email)
+and/or on a per-cert basis with
+[](#opt-security.acme.certs._name_.email).
+This address is only used for registration and renewal reminders,
+and cannot be used to administer the certificates in any way.
+
+Alternatively, you can use a different ACME server by changing the
+[](#opt-security.acme.defaults.server) option
+to a provider of your choosing, or just change the server for one cert with
+[](#opt-security.acme.certs._name_.server).
+
+You will need an HTTP server or DNS server for verification. For HTTP,
+the server must have a webroot defined that can serve
+{file}`.well-known/acme-challenge`. This directory must be
+writeable by the user that will run the ACME client. For DNS, you must
+set up credentials with your provider/server for use with lego.
+
+## Using ACME certificates in Nginx {#module-security-acme-nginx}
+
+NixOS supports fetching ACME certificates for you by setting
+`enableACME = true;` in a virtualHost config. We first create self-signed
+placeholder certificates in place of the real ACME certs. The placeholder
+certs are overwritten when the ACME certs arrive. For
+`foo.example.com` the config would look like this:
+
+```
+security.acme.acceptTerms = true;
+security.acme.defaults.email = "admin+acme@example.com";
+services.nginx = {
+  enable = true;
+  virtualHosts = {
+    "foo.example.com" = {
+      forceSSL = true;
+      enableACME = true;
+      # All serverAliases will be added as extra domain names on the certificate.
+      serverAliases = [ "bar.example.com" ];
+      locations."/" = {
+        root = "/var/www";
+      };
+    };
+
+    # We can also add a different vhost and reuse the same certificate
+    # but we have to append extraDomainNames manually beforehand:
+    # security.acme.certs."foo.example.com".extraDomainNames = [ "baz.example.com" ];
+    "baz.example.com" = {
+      forceSSL = true;
+      useACMEHost = "foo.example.com";
+      locations."/" = {
+        root = "/var/www";
+      };
+    };
+  };
+}
+```
+
+## Using ACME certificates in Apache/httpd {#module-security-acme-httpd}
+
+Using ACME certificates with Apache virtual hosts is identical
+to using them with Nginx. The attribute names are all the same, just replace
+"nginx" with "httpd" where appropriate.
+
+## Manual configuration of HTTP-01 validation {#module-security-acme-configuring}
+
+First off you will need to set up a virtual host to serve the challenges.
+This example uses a vhost called `certs.example.com`, with
+the intent that you will generate certs for all your vhosts and redirect
+everyone to HTTPS.
+
+```
+security.acme.acceptTerms = true;
+security.acme.defaults.email = "admin+acme@example.com";
+
+# /var/lib/acme/.challenges must be writable by the ACME user
+# and readable by the Nginx user. The easiest way to achieve
+# this is to add the Nginx user to the ACME group.
+users.users.nginx.extraGroups = [ "acme" ];
+
+services.nginx = {
+  enable = true;
+  virtualHosts = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      serverAliases = [ "*.example.com" ];
+      locations."/.well-known/acme-challenge" = {
+        root = "/var/lib/acme/.challenges";
+      };
+      locations."/" = {
+        return = "301 https://$host$request_uri";
+      };
+    };
+  };
+}
+# Alternative config for Apache
+users.users.wwwrun.extraGroups = [ "acme" ];
+services.httpd = {
+  enable = true;
+  virtualHosts = {
+    "acmechallenge.example.com" = {
+      # Catchall vhost, will redirect users to HTTPS for all vhosts
+      serverAliases = [ "*.example.com" ];
+      # /var/lib/acme/.challenges must be writable by the ACME user and readable by the Apache user.
+      # By default, this is the case.
+      documentRoot = "/var/lib/acme/.challenges";
+      extraConfig = ''
+        RewriteEngine On
+        RewriteCond %{HTTPS} off
+        RewriteCond %{REQUEST_URI} !^/\.well-known/acme-challenge [NC]
+        RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI} [R=301]
+      '';
+    };
+  };
+}
+```
+
+Now you need to configure ACME to generate a certificate.
+
+```
+security.acme.certs."foo.example.com" = {
+  webroot = "/var/lib/acme/.challenges";
+  email = "foo@example.com";
+  # Ensure that the web server you use can read the generated certs
+  # Take a look at the group option for the web server you choose.
+  group = "nginx";
+  # Since we have a wildcard vhost to handle port 80,
+  # we can generate certs for anything!
+  # Just make sure your DNS resolves them.
+  extraDomainNames = [ "mail.example.com" ];
+};
+```
+
+The private key {file}`key.pem` and certificate
+{file}`fullchain.pem` will be put into
+{file}`/var/lib/acme/foo.example.com`.
+
+Refer to [](#ch-options) for all available configuration
+options for the [security.acme](#opt-security.acme.certs)
+module.
+
+## Configuring ACME for DNS validation {#module-security-acme-config-dns}
+
+This is useful if you want to generate a wildcard certificate, since
+ACME servers will only hand out wildcard certs over DNS validation.
+There are a number of supported DNS providers and servers you can utilise,
+see the [lego docs](https://go-acme.github.io/lego/dns/)
+for provider/server specific configuration values. For the sake of these
+docs, we will provide a fully self-hosted example using bind.
+
+```
+services.bind = {
+  enable = true;
+  extraConfig = ''
+    include "/var/lib/secrets/dnskeys.conf";
+  '';
+  zones = [
+    rec {
+      name = "example.com";
+      file = "/var/db/bind/${name}";
+      master = true;
+      extraConfig = "allow-update { key rfc2136key.example.com.; };";
+    }
+  ];
+}
+
+# Now we can configure ACME
+security.acme.acceptTerms = true;
+security.acme.defaults.email = "admin+acme@example.com";
+security.acme.certs."example.com" = {
+  domain = "*.example.com";
+  dnsProvider = "rfc2136";
+  environmentFile = "/var/lib/secrets/certs.secret";
+  # We don't need to wait for propagation since this is a local DNS server
+  dnsPropagationCheck = false;
+};
+```
+
+The {file}`dnskeys.conf` and {file}`certs.secret`
+must be kept secure and thus you should not keep their contents in your
+Nix config. Instead, generate them one time with a systemd service:
+
+```
+systemd.services.dns-rfc2136-conf = {
+  requiredBy = ["acme-example.com.service" "bind.service"];
+  before = ["acme-example.com.service" "bind.service"];
+  unitConfig = {
+    ConditionPathExists = "!/var/lib/secrets/dnskeys.conf";
+  };
+  serviceConfig = {
+    Type = "oneshot";
+    UMask = 0077;
+  };
+  path = [ pkgs.bind ];
+  script = ''
+    mkdir -p /var/lib/secrets
+    chmod 755 /var/lib/secrets
+    tsig-keygen rfc2136key.example.com > /var/lib/secrets/dnskeys.conf
+    chown named:root /var/lib/secrets/dnskeys.conf
+    chmod 400 /var/lib/secrets/dnskeys.conf
+
+    # extract secret value from the dnskeys.conf
+    while read x y; do if [ "$x" = "secret" ]; then secret="''${y:1:''${#y}-3}"; fi; done < /var/lib/secrets/dnskeys.conf
+
+    cat > /var/lib/secrets/certs.secret << EOF
+    RFC2136_NAMESERVER='127.0.0.1:53'
+    RFC2136_TSIG_ALGORITHM='hmac-sha256.'
+    RFC2136_TSIG_KEY='rfc2136key.example.com'
+    RFC2136_TSIG_SECRET='$secret'
+    EOF
+    chmod 400 /var/lib/secrets/certs.secret
+  '';
+};
+```
+
+Now you're all set to generate certs! You should monitor the first invocation
+by running `systemctl start acme-example.com.service &
+journalctl -fu acme-example.com.service` and watching its log output.
+
+## Using DNS validation with web server virtual hosts {#module-security-acme-config-dns-with-vhosts}
+
+It is possible to use DNS-01 validation with all certificates,
+including those automatically configured via the Nginx/Apache
+[`enableACME`](#opt-services.nginx.virtualHosts._name_.enableACME)
+option. This configuration pattern is fully
+supported and part of the module's test suite for Nginx + Apache.
+
+You must follow the guide above on configuring DNS-01 validation
+first, however instead of setting the options for one certificate
+(e.g. [](#opt-security.acme.certs._name_.dnsProvider))
+you will set them as defaults
+(e.g. [](#opt-security.acme.defaults.dnsProvider)).
+
+```
+# Configure ACME appropriately
+security.acme.acceptTerms = true;
+security.acme.defaults.email = "admin+acme@example.com";
+security.acme.defaults = {
+  dnsProvider = "rfc2136";
+  environmentFile = "/var/lib/secrets/certs.secret";
+  # We don't need to wait for propagation since this is a local DNS server
+  dnsPropagationCheck = false;
+};
+
+# For each virtual host you would like to use DNS-01 validation with,
+# set acmeRoot = null
+services.nginx = {
+  enable = true;
+  virtualHosts = {
+    "foo.example.com" = {
+      enableACME = true;
+      acmeRoot = null;
+    };
+  };
+}
+```
+
+And that's it! Next time your configuration is rebuilt, or when
+you add a new virtualHost, it will be DNS-01 validated.
+
+## Using ACME with services demanding root owned certificates {#module-security-acme-root-owned}
+
+Some services refuse to start if the configured certificate files
+are not owned by root. PostgreSQL and OpenSMTPD are examples of these.
+There is no way to change the user the ACME module uses (it will always be
+`acme`), however you can use systemd's
+`LoadCredential` feature to resolve this elegantly.
+Below is an example configuration for OpenSMTPD, but this pattern
+can be applied to any service.
+
+```
+# Configure ACME however you like (DNS or HTTP validation), adding
+# the following configuration for the relevant certificate.
+# Note: You cannot use `systemctl reload` here as that would mean
+# the LoadCredential configuration below would be skipped and
+# the service would continue to use old certificates.
+security.acme.certs."mail.example.com".postRun = ''
+  systemctl restart opensmtpd
+'';
+
+# Now you must augment OpenSMTPD's systemd service to load
+# the certificate files.
+systemd.services.opensmtpd.requires = ["acme-finished-mail.example.com.target"];
+systemd.services.opensmtpd.serviceConfig.LoadCredential = let
+  certDir = config.security.acme.certs."mail.example.com".directory;
+in [
+  "cert.pem:${certDir}/cert.pem"
+  "key.pem:${certDir}/key.pem"
+];
+
+# Finally, configure OpenSMTPD to use these certs.
+services.opensmtpd = let
+  credsDir = "/run/credentials/opensmtpd.service";
+in {
+  enable = true;
+  setSendmail = false;
+  serverConfiguration = ''
+    pki mail.example.com cert "${credsDir}/cert.pem"
+    pki mail.example.com key "${credsDir}/key.pem"
+    listen on localhost tls pki mail.example.com
+    action act1 relay host smtp://127.0.0.1:10027
+    match for local action act1
+  '';
+};
+```
+
+## Regenerating certificates {#module-security-acme-regenerate}
+
+Should you need to regenerate a particular certificate in a hurry, such
+as when a vulnerability is found in Let's Encrypt, there is now a convenient
+mechanism for doing so. Running
+`systemctl clean --what=state acme-example.com.service`
+will remove all certificate files and the account data for the given domain,
+allowing you to then `systemctl start acme-example.com.service`
+to generate fresh ones.
+
+## Fixing JWS Verification error {#module-security-acme-fix-jws}
+
+It is possible that your account credentials file may become corrupt and need
+to be regenerated. In this scenario lego will produce the error `JWS verification error`.
+The solution is to simply delete the associated accounts file and
+re-run the affected service(s).
+
+```
+# Find the accounts folder for the certificate
+systemctl cat acme-example.com.service | grep -Po 'accounts/[^:]*'
+export accountdir="$(!!)"
+# Move this folder to some place else
+mv /var/lib/acme/.lego/$accountdir{,.bak}
+# Recreate the folder using systemd-tmpfiles
+systemd-tmpfiles --create
+# Get a new account and reissue certificates
+# Note: Do this for all certs that share the same account email address
+systemctl start acme-example.com.service
+```
diff --git a/nixpkgs/nixos/modules/security/acme/default.nix b/nixpkgs/nixos/modules/security/acme/default.nix
new file mode 100644
index 000000000000..7cc302969fb6
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/acme/default.nix
@@ -0,0 +1,1035 @@
+{ config, lib, pkgs, options, ... }:
+with lib;
+let
+
+
+  cfg = config.security.acme;
+  opt = options.security.acme;
+  user = if cfg.useRoot then "root" else "acme";
+
+  # Used to calculate timer accuracy for coalescing
+  numCerts = length (builtins.attrNames cfg.certs);
+  _24hSecs = 60 * 60 * 24;
+
+  # Used to make unique paths for each cert/account config set
+  mkHash = with builtins; val: substring 0 20 (hashString "sha256" val);
+  mkAccountHash = acmeServer: data: mkHash "${toString acmeServer} ${data.keyType} ${data.email}";
+  accountDirRoot = "/var/lib/acme/.lego/accounts/";
+
+  lockdir = "/run/acme/";
+  concurrencyLockfiles = map (n: "${toString n}.lock") (lib.range 1 cfg.maxConcurrentRenewals);
+  # Assign elements of `baseList` to each element of `needAssignmentList`, until the latter is exhausted.
+  # returns: [{fst = "element of baseList"; snd = "element of needAssignmentList"}]
+  roundRobinAssign = baseList: needAssignmentList:
+    if baseList == [] then []
+    else _rrCycler baseList baseList needAssignmentList;
+  _rrCycler = with builtins; origBaseList: workingBaseList: needAssignmentList:
+    if (workingBaseList == [] || needAssignmentList == [])
+    then []
+    else
+      [{ fst = head workingBaseList; snd = head needAssignmentList;}] ++
+      _rrCycler origBaseList (if (tail workingBaseList == []) then origBaseList else tail workingBaseList) (tail needAssignmentList);
+  attrsToList = mapAttrsToList (attrname: attrval: {name = attrname; value = attrval;});
+  # for an AttrSet `funcsAttrs` having functions as values, apply single arguments from
+  # `argsList` to them in a round-robin manner.
+  # Returns an attribute set with the applied functions as values.
+  roundRobinApplyAttrs = funcsAttrs: argsList: lib.listToAttrs (map (x: {inherit (x.snd) name; value = x.snd.value x.fst;}) (roundRobinAssign argsList (attrsToList funcsAttrs)));
+  wrapInFlock = lockfilePath: script:
+    # explainer: https://stackoverflow.com/a/60896531
+    ''
+      exec {LOCKFD}> ${lockfilePath}
+      echo "Waiting to acquire lock ${lockfilePath}"
+      ${pkgs.flock}/bin/flock ''${LOCKFD} || exit 1
+      echo "Acquired lock ${lockfilePath}"
+    ''
+    + script + "\n"
+    + ''echo "Releasing lock ${lockfilePath}"  # only released after process exit'';
+
+
+  # There are many services required to make cert renewals work.
+  # They all follow a common structure:
+  #   - They inherit this commonServiceConfig
+  #   - They all run as the acme user
+  #   - They all use BindPath and StateDirectory where possible
+  #     to set up a sort of build environment in /tmp
+  # The Group can vary depending on what the user has specified in
+  # security.acme.certs.<cert>.group on some of the services.
+  commonServiceConfig = {
+    Type = "oneshot";
+    User = user;
+    Group = mkDefault "acme";
+    UMask = "0022";
+    StateDirectoryMode = "750";
+    ProtectSystem = "strict";
+    ReadWritePaths = [
+      "/var/lib/acme"
+      lockdir
+    ];
+    PrivateTmp = true;
+
+    WorkingDirectory = "/tmp";
+
+    CapabilityBoundingSet = [ "" ];
+    DevicePolicy = "closed";
+    LockPersonality = true;
+    MemoryDenyWriteExecute = true;
+    NoNewPrivileges = true;
+    PrivateDevices = true;
+    ProtectClock = true;
+    ProtectHome = true;
+    ProtectHostname = true;
+    ProtectControlGroups = true;
+    ProtectKernelLogs = true;
+    ProtectKernelModules = true;
+    ProtectKernelTunables = true;
+    ProtectProc = "invisible";
+    ProcSubset = "pid";
+    RemoveIPC = true;
+    RestrictAddressFamilies = [
+      "AF_INET"
+      "AF_INET6"
+    ];
+    RestrictNamespaces = true;
+    RestrictRealtime = true;
+    RestrictSUIDSGID = true;
+    SystemCallArchitectures = "native";
+    SystemCallFilter = [
+      # 1. allow a reasonable set of syscalls
+      "@system-service @resources"
+      # 2. and deny unreasonable ones
+      "~@privileged"
+      # 3. then allow the required subset within denied groups
+      "@chown"
+    ];
+  };
+
+  # In order to avoid race conditions creating the CA for selfsigned certs,
+  # we have a separate service which will create the necessary files.
+  selfsignCAService = {
+    description = "Generate self-signed certificate authority";
+
+    path = with pkgs; [ minica ];
+
+    unitConfig = {
+      ConditionPathExists = "!/var/lib/acme/.minica/key.pem";
+      StartLimitIntervalSec = 0;
+    };
+
+    serviceConfig = commonServiceConfig // {
+      StateDirectory = "acme/.minica";
+      BindPaths = "/var/lib/acme/.minica:/tmp/ca";
+      UMask = "0077";
+    };
+
+    # Working directory will be /tmp
+    script = ''
+      minica \
+        --ca-key ca/key.pem \
+        --ca-cert ca/cert.pem \
+        --domains selfsigned.local
+    '';
+  };
+
+  # Ensures that directories which are shared across all certs
+  # exist and have the correct user and group, since group
+  # is configurable on a per-cert basis.
+  userMigrationService = let
+    script = with builtins; ''
+      chown -R ${user} .lego/accounts
+    '' + (concatStringsSep "\n" (mapAttrsToList (cert: data: ''
+      for fixpath in ${escapeShellArg cert} .lego/${escapeShellArg cert}; do
+        if [ -d "$fixpath" ]; then
+          chmod -R u=rwX,g=rX,o= "$fixpath"
+          chown -R ${user}:${data.group} "$fixpath"
+        fi
+      done
+    '') certConfigs));
+  in {
+    description = "Fix owner and group of all ACME certificates";
+
+    serviceConfig = commonServiceConfig // {
+      # We don't want this to run every time a renewal happens
+      RemainAfterExit = true;
+
+      # StateDirectory entries are a cleaner, service-level mechanism
+      # for dealing with persistent service data
+      StateDirectory = [ "acme" "acme/.lego" "acme/.lego/accounts" ];
+      StateDirectoryMode = 755;
+      WorkingDirectory = "/var/lib/acme";
+
+      # Run the start script as root
+      ExecStart = "+" + (pkgs.writeShellScript "acme-fixperms" script);
+    };
+  };
+  lockfilePrepareService = {
+    description = "Manage lock files for acme services";
+
+    # ensure all required lock files exist, but none more
+    script = ''
+      GLOBIGNORE="${concatStringsSep ":" concurrencyLockfiles}"
+      rm -f *
+      unset GLOBIGNORE
+
+      xargs touch <<< "${toString concurrencyLockfiles}"
+    '';
+
+    serviceConfig = commonServiceConfig // {
+      # We don't want this to run every time a renewal happens
+      RemainAfterExit = true;
+      WorkingDirectory = lockdir;
+    };
+  };
+
+
+  certToConfig = cert: data: let
+    acmeServer = data.server;
+    useDns = data.dnsProvider != null;
+    useDnsOrS3 = useDns || data.s3Bucket != null;
+    destPath = "/var/lib/acme/${cert}";
+    selfsignedDeps = optionals (cfg.preliminarySelfsigned) [ "acme-selfsigned-${cert}.service" ];
+
+    # Minica and lego have a "feature" which replaces * with _. We need
+    # to make this substitution to reference the output files from both programs.
+    # End users never see this since we rename the certs.
+    keyName = builtins.replaceStrings ["*"] ["_"] data.domain;
+
+    # FIXME when mkChangedOptionModule supports submodules, change to that.
+    # This is a workaround
+    extraDomains = data.extraDomainNames ++ (
+      optionals
+      (data.extraDomains != "_mkMergedOptionModule")
+      (builtins.attrNames data.extraDomains)
+    );
+
+    # Create hashes for cert data directories based on configuration
+    # Flags are separated to avoid collisions
+    hashData = with builtins; ''
+      ${concatStringsSep " " data.extraLegoFlags} -
+      ${concatStringsSep " " data.extraLegoRunFlags} -
+      ${concatStringsSep " " data.extraLegoRenewFlags} -
+      ${toString acmeServer} ${toString data.dnsProvider}
+      ${toString data.ocspMustStaple} ${data.keyType}
+    '';
+    certDir = mkHash hashData;
+    # TODO remove domainHash usage entirely. Waiting on go-acme/lego#1532
+    domainHash = mkHash "${concatStringsSep " " extraDomains} ${data.domain}";
+    accountHash = (mkAccountHash acmeServer data);
+    accountDir = accountDirRoot + accountHash;
+
+    protocolOpts = if useDns then (
+      [ "--dns" data.dnsProvider ]
+      ++ optionals (!data.dnsPropagationCheck) [ "--dns.disable-cp" ]
+      ++ optionals (data.dnsResolver != null) [ "--dns.resolvers" data.dnsResolver ]
+    ) else if data.s3Bucket != null then [ "--http" "--http.s3-bucket" data.s3Bucket ]
+    else if data.listenHTTP != null then [ "--http" "--http.port" data.listenHTTP ]
+    else [ "--http" "--http.webroot" data.webroot ];
+
+    commonOpts = [
+      "--accept-tos" # Checking the option is covered by the assertions
+      "--path" "."
+      "-d" data.domain
+      "--email" data.email
+      "--key-type" data.keyType
+    ] ++ protocolOpts
+      ++ optionals (acmeServer != null) [ "--server" acmeServer ]
+      ++ concatMap (name: [ "-d" name ]) extraDomains
+      ++ data.extraLegoFlags;
+
+    # Although --must-staple is common to both modes, it is not declared as a
+    # mode-agnostic argument in lego and thus must come after the mode.
+    runOpts = escapeShellArgs (
+      commonOpts
+      ++ [ "run" ]
+      ++ optionals data.ocspMustStaple [ "--must-staple" ]
+      ++ data.extraLegoRunFlags
+    );
+    renewOpts = escapeShellArgs (
+      commonOpts
+      ++ [ "renew" "--no-random-sleep" ]
+      ++ optionals data.ocspMustStaple [ "--must-staple" ]
+      ++ data.extraLegoRenewFlags
+    );
+
+    # We need to collect all the ACME webroots to grant them write
+    # access in the systemd service.
+    webroots =
+      lib.remove null
+        (lib.unique
+            (builtins.map
+            (certAttrs: certAttrs.webroot)
+            (lib.attrValues config.security.acme.certs)));
+  in {
+    inherit accountHash cert selfsignedDeps;
+
+    group = data.group;
+
+    renewTimer = {
+      description = "Renew ACME Certificate for ${cert}";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = data.renewInterval;
+        Unit = "acme-${cert}.service";
+        Persistent = "yes";
+
+        # Allow systemd to pick a convenient time within the day
+        # to run the check.
+        # This allows the coalescing of multiple timer jobs.
+        # We divide by the number of certificates so that if you
+        # have many certificates, the renewals are distributed over
+        # the course of the day to avoid rate limits.
+        AccuracySec = "${toString (_24hSecs / numCerts)}s";
+        # Skew randomly within the day, per https://letsencrypt.org/docs/integration-guide/.
+        RandomizedDelaySec = "24h";
+        FixedRandomDelay = true;
+      };
+    };
+
+    selfsignService = lockfileName: {
+      description = "Generate self-signed certificate for ${cert}";
+      after = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      requires = [ "acme-selfsigned-ca.service" "acme-fixperms.service" ] ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+
+      path = with pkgs; [ minica ];
+
+      unitConfig = {
+        ConditionPathExists = "!/var/lib/acme/${cert}/key.pem";
+        StartLimitIntervalSec = 0;
+      };
+
+      serviceConfig = commonServiceConfig // {
+        Group = data.group;
+        UMask = "0027";
+
+        StateDirectory = "acme/${cert}";
+
+        BindPaths = [
+          "/var/lib/acme/.minica:/tmp/ca"
+          "/var/lib/acme/${cert}:/tmp/${keyName}"
+        ];
+      };
+
+      # Working directory will be /tmp
+      # minica will output to a folder sharing the name of the first domain
+      # in the list, which will be ${data.domain}
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
+        minica \
+          --ca-key ca/key.pem \
+          --ca-cert ca/cert.pem \
+          --domains ${escapeShellArg (builtins.concatStringsSep "," ([ data.domain ] ++ extraDomains))}
+
+        # Create files to match directory layout for real certificates
+        cd '${keyName}'
+        cp ../ca/cert.pem chain.pem
+        cat cert.pem chain.pem > fullchain.pem
+        cat key.pem fullchain.pem > full.pem
+
+        # Group might change between runs, re-apply it
+        chown '${user}:${data.group}' *
+
+        # Default permissions make the files unreadable by group + anon
+        # Need to be readable by group
+        chmod 640 *
+      '';
+    };
+
+    renewService = lockfileName: {
+      description = "Renew ACME certificate for ${cert}";
+      after = [ "network.target" "network-online.target" "acme-fixperms.service" "nss-lookup.target" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+      wants = [ "network-online.target" "acme-fixperms.service" ] ++ selfsignedDeps ++ optional (cfg.maxConcurrentRenewals > 0) "acme-lockfiles.service";
+
+      # https://github.com/NixOS/nixpkgs/pull/81371#issuecomment-605526099
+      wantedBy = optionals (!config.boot.isContainer) [ "multi-user.target" ];
+
+      path = with pkgs; [ lego coreutils diffutils openssl ];
+
+      serviceConfig = commonServiceConfig // {
+        Group = data.group;
+
+        # Let's Encrypt Failed Validation Limit allows 5 retries per hour, per account, hostname and hour.
+        # This avoids eating them all up if something is misconfigured upon the first try.
+        RestartSec = 15 * 60;
+
+        # Keep in mind that these directories will be deleted if the user runs
+        # systemctl clean --what=state
+        # acme/.lego/${cert} is listed for this reason.
+        StateDirectory = [
+          "acme/${cert}"
+          "acme/.lego/${cert}"
+          "acme/.lego/${cert}/${certDir}"
+          "acme/.lego/accounts/${accountHash}"
+        ];
+
+        ReadWritePaths = commonServiceConfig.ReadWritePaths ++ webroots;
+
+        # Needs to be space separated, but can't use a multiline string because that'll include newlines
+        BindPaths = [
+          "${accountDir}:/tmp/accounts"
+          "/var/lib/acme/${cert}:/tmp/out"
+          "/var/lib/acme/.lego/${cert}/${certDir}:/tmp/certificates"
+        ];
+
+        EnvironmentFile = mkIf useDnsOrS3 data.environmentFile;
+
+        Environment = mkIf useDnsOrS3
+          (mapAttrsToList (k: v: ''"${k}=%d/${k}"'') data.credentialFiles);
+
+        LoadCredential = mkIf useDnsOrS3
+          (mapAttrsToList (k: v: "${k}:${v}") data.credentialFiles);
+
+        # Run as root (Prefixed with +)
+        ExecStartPost = "+" + (pkgs.writeShellScript "acme-postrun" ''
+          cd /var/lib/acme/${escapeShellArg cert}
+          if [ -e renewed ]; then
+            rm renewed
+            ${data.postRun}
+            ${optionalString (data.reloadServices != [])
+                "systemctl --no-block try-reload-or-restart ${escapeShellArgs data.reloadServices}"
+            }
+          fi
+        '');
+      } // optionalAttrs (data.listenHTTP != null && toInt (last (splitString ":" data.listenHTTP)) < 1024) {
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+      };
+
+      # Working directory will be /tmp
+      script = (if (lockfileName == null) then lib.id else wrapInFlock "${lockdir}${lockfileName}") ''
+        ${optionalString data.enableDebugLogs "set -x"}
+        set -euo pipefail
+
+        # This reimplements the expiration date check, but without querying
+        # the acme server first. By doing this offline, we avoid errors
+        # when the network or DNS are unavailable, which can happen during
+        # nixos-rebuild switch.
+        is_expiration_skippable() {
+          pem=$1
+
+          # This function relies on set -e to exit early if any of the
+          # conditions or programs fail.
+
+          [[ -e $pem ]]
+
+          expiration_line="$(
+            set -euxo pipefail
+            openssl x509 -noout -enddate <$pem \
+                  | grep notAfter \
+                  | sed -e 's/^notAfter=//'
+          )"
+          [[ -n "$expiration_line" ]]
+
+          expiration_date="$(date -d "$expiration_line" +%s)"
+          now="$(date +%s)"
+          expiration_s=$[expiration_date - now]
+          expiration_days=$[expiration_s / (3600 * 24)]   # rounds down
+
+          [[ $expiration_days -gt ${toString data.validMinDays} ]]
+        }
+
+        ${optionalString (data.webroot != null) ''
+          # Ensure the webroot exists. Fixing group is required in case configuration was changed between runs.
+          # Lego will fail if the webroot does not exist at all.
+          (
+            mkdir -p '${data.webroot}/.well-known/acme-challenge' \
+            && chgrp '${data.group}' ${data.webroot}/.well-known/acme-challenge
+          ) || (
+            echo 'Please ensure ${data.webroot}/.well-known/acme-challenge exists and is writable by acme:${data.group}' \
+            && exit 1
+          )
+        ''}
+
+        echo '${domainHash}' > domainhash.txt
+
+        # Check if we can renew.
+        # We can only renew if the list of domains has not changed.
+        # We also need an account key. Avoids #190493
+        if cmp -s domainhash.txt certificates/domainhash.txt && [ -e 'certificates/${keyName}.key' -a -e 'certificates/${keyName}.crt' -a -n "$(find accounts -name '${data.email}.key')" ]; then
+
+          # Even if a cert is not expired, it may be revoked by the CA.
+          # Try to renew, and silently fail if the cert is not expired.
+          # Avoids #85794 and resolves #129838
+          if ! lego ${renewOpts} --days ${toString data.validMinDays}; then
+            if is_expiration_skippable out/full.pem; then
+              echo 1>&2 "nixos-acme: Ignoring failed renewal because expiration isn't within the coming ${toString data.validMinDays} days"
+            else
+              # High number to avoid Systemd reserved codes.
+              exit 11
+            fi
+          fi
+
+        # Otherwise do a full run
+        elif ! lego ${runOpts}; then
+          # Produce a nice error for those doing their first nixos-rebuild with these certs
+          echo Failed to fetch certificates. \
+            This may mean your DNS records are set up incorrectly. \
+            ${optionalString (cfg.preliminarySelfsigned) "Selfsigned certs are in place and dependant services will still start."}
+          # Exit 10 so that users can potentially amend SuccessExitStatus to ignore this error.
+          # High number to avoid Systemd reserved codes.
+          exit 10
+        fi
+
+        mv domainhash.txt certificates/
+
+        # Group might change between runs, re-apply it
+        chown '${user}:${data.group}' certificates/*
+
+        # Copy all certs to the "real" certs directory
+        if ! cmp -s 'certificates/${keyName}.crt' out/fullchain.pem; then
+          touch out/renewed
+          echo Installing new certificate
+          cp -vp 'certificates/${keyName}.crt' out/fullchain.pem
+          cp -vp 'certificates/${keyName}.key' out/key.pem
+          cp -vp 'certificates/${keyName}.issuer.crt' out/chain.pem
+          ln -sf fullchain.pem out/cert.pem
+          cat out/key.pem out/fullchain.pem > out/full.pem
+        fi
+
+        # By default group will have no access to the cert files.
+        # This chmod will fix that.
+        chmod 640 out/*
+      '';
+    };
+  };
+
+  certConfigs = mapAttrs certToConfig cfg.certs;
+
+  # These options can be specified within
+  # security.acme.defaults or security.acme.certs.<name>
+  inheritableModule = isDefaults: { config, ... }: let
+    defaultAndText = name: default: {
+      # When ! isDefaults then this is the option declaration for the
+      # security.acme.certs.<name> path, which has the extra inheritDefaults
+      # option, which if disabled means that we can't inherit it
+      default = if isDefaults || ! config.inheritDefaults then default else cfg.defaults.${name};
+      # The docs however don't need to depend on inheritDefaults, they should
+      # stay constant. Though notably it wouldn't matter much, because to get
+      # the option information, a submodule with name `<name>` is evaluated
+      # without any definitions.
+      defaultText = if isDefaults then default else literalExpression "config.security.acme.defaults.${name}";
+    };
+  in {
+    imports = [
+      (mkRenamedOptionModule [ "credentialsFile" ] [ "environmentFile" ])
+    ];
+
+    options = {
+      validMinDays = mkOption {
+        type = types.int;
+        inherit (defaultAndText "validMinDays" 30) default defaultText;
+        description = lib.mdDoc "Minimum remaining validity before renewal in days.";
+      };
+
+      renewInterval = mkOption {
+        type = types.str;
+        inherit (defaultAndText "renewInterval" "daily") default defaultText;
+        description = lib.mdDoc ''
+          Systemd calendar expression when to check for renewal. See
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      enableDebugLogs = mkEnableOption (lib.mdDoc "debug logging for this certificate") // {
+        inherit (defaultAndText "enableDebugLogs" true) default defaultText;
+      };
+
+      webroot = mkOption {
+        type = types.nullOr types.str;
+        inherit (defaultAndText "webroot" null) default defaultText;
+        example = "/var/lib/acme/acme-challenge";
+        description = lib.mdDoc ''
+          Where the webroot of the HTTP vhost is located.
+          {file}`.well-known/acme-challenge/` directory
+          will be created below the webroot if it doesn't exist.
+          `http://example.org/.well-known/acme-challenge/` must also
+          be available (notice unencrypted HTTP).
+        '';
+      };
+
+      server = mkOption {
+        type = types.nullOr types.str;
+        inherit (defaultAndText "server" null) default defaultText;
+        description = lib.mdDoc ''
+          ACME Directory Resource URI. Defaults to Let's Encrypt's
+          production endpoint,
+          <https://acme-v02.api.letsencrypt.org/directory>, if unset.
+        '';
+      };
+
+      email = mkOption {
+        type = types.nullOr types.str;
+        inherit (defaultAndText "email" null) default defaultText;
+        description = lib.mdDoc ''
+          Email address for account creation and correspondence from the CA.
+          It is recommended to use the same email for all certs to avoid account
+          creation limits.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        inherit (defaultAndText "group" "acme") default defaultText;
+        description = lib.mdDoc "Group running the ACME client.";
+      };
+
+      reloadServices = mkOption {
+        type = types.listOf types.str;
+        inherit (defaultAndText "reloadServices" []) default defaultText;
+        description = lib.mdDoc ''
+          The list of systemd services to call `systemctl try-reload-or-restart`
+          on.
+        '';
+      };
+
+      postRun = mkOption {
+        type = types.lines;
+        inherit (defaultAndText "postRun" "") default defaultText;
+        example = "cp full.pem backup.pem";
+        description = lib.mdDoc ''
+          Commands to run after new certificates go live. Note that
+          these commands run as the root user.
+
+          Executed in the same directory with the new certificate.
+        '';
+      };
+
+      keyType = mkOption {
+        type = types.str;
+        inherit (defaultAndText "keyType" "ec256") default defaultText;
+        description = lib.mdDoc ''
+          Key type to use for private keys.
+          For an up to date list of supported values check the --key-type option
+          at <https://go-acme.github.io/lego/usage/cli/options/>.
+        '';
+      };
+
+      dnsProvider = mkOption {
+        type = types.nullOr types.str;
+        inherit (defaultAndText "dnsProvider" null) default defaultText;
+        example = "route53";
+        description = lib.mdDoc ''
+          DNS Challenge provider. For a list of supported providers, see the "code"
+          field of the DNS providers listed at <https://go-acme.github.io/lego/dns/>.
+        '';
+      };
+
+      dnsResolver = mkOption {
+        type = types.nullOr types.str;
+        inherit (defaultAndText "dnsResolver" null) default defaultText;
+        example = "1.1.1.1:53";
+        description = lib.mdDoc ''
+          Set the resolver to use for performing recursive DNS queries. Supported:
+          host:port. The default is to use the system resolvers, or Google's DNS
+          resolvers if the system's cannot be determined.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        inherit (defaultAndText "environmentFile" null) default defaultText;
+        description = lib.mdDoc ''
+          Path to an EnvironmentFile for the cert's service containing any required and
+          optional environment variables for your selected dnsProvider.
+          To find out what values you need to set, consult the documentation at
+          <https://go-acme.github.io/lego/dns/> for the corresponding dnsProvider.
+        '';
+        example = "/var/src/secrets/example.org-route53-api-token";
+      };
+
+      credentialFiles = mkOption {
+        type = types.attrsOf (types.path);
+        inherit (defaultAndText "credentialFiles" {}) default defaultText;
+        description = lib.mdDoc ''
+          Environment variables suffixed by "_FILE" to set for the cert's service
+          for your selected dnsProvider.
+          To find out what values you need to set, consult the documentation at
+          <https://go-acme.github.io/lego/dns/> for the corresponding dnsProvider.
+          This allows to securely pass credential files to lego by leveraging systemd
+          credentials.
+        '';
+        example = literalExpression ''
+          {
+            "RFC2136_TSIG_SECRET_FILE" = "/run/secrets/tsig-secret-example.org";
+          }
+        '';
+      };
+
+      dnsPropagationCheck = mkOption {
+        type = types.bool;
+        inherit (defaultAndText "dnsPropagationCheck" true) default defaultText;
+        description = lib.mdDoc ''
+          Toggles lego DNS propagation check, which is used alongside DNS-01
+          challenge to ensure the DNS entries required are available.
+        '';
+      };
+
+      ocspMustStaple = mkOption {
+        type = types.bool;
+        inherit (defaultAndText "ocspMustStaple" false) default defaultText;
+        description = lib.mdDoc ''
+          Turns on the OCSP Must-Staple TLS extension.
+          Make sure you know what you're doing! See:
+
+          - <https://blog.apnic.net/2019/01/15/is-the-web-ready-for-ocsp-must-staple/>
+          - <https://blog.hboeck.de/archives/886-The-Problem-with-OCSP-Stapling-and-Must-Staple-and-why-Certificate-Revocation-is-still-broken.html>
+        '';
+      };
+
+      extraLegoFlags = mkOption {
+        type = types.listOf types.str;
+        inherit (defaultAndText "extraLegoFlags" []) default defaultText;
+        description = lib.mdDoc ''
+          Additional global flags to pass to all lego commands.
+        '';
+      };
+
+      extraLegoRenewFlags = mkOption {
+        type = types.listOf types.str;
+        inherit (defaultAndText "extraLegoRenewFlags" []) default defaultText;
+        description = lib.mdDoc ''
+          Additional flags to pass to lego renew.
+        '';
+      };
+
+      extraLegoRunFlags = mkOption {
+        type = types.listOf types.str;
+        inherit (defaultAndText "extraLegoRunFlags" []) default defaultText;
+        description = lib.mdDoc ''
+          Additional flags to pass to lego run.
+        '';
+      };
+    };
+  };
+
+  certOpts = { name, config, ... }: {
+    options = {
+      # user option has been removed
+      user = mkOption {
+        visible = false;
+        default = "_mkRemovedOptionModule";
+      };
+
+      # allowKeysForGroup option has been removed
+      allowKeysForGroup = mkOption {
+        visible = false;
+        default = "_mkRemovedOptionModule";
+      };
+
+      # extraDomains was replaced with extraDomainNames
+      extraDomains = mkOption {
+        visible = false;
+        default = "_mkMergedOptionModule";
+      };
+
+      directory = mkOption {
+        type = types.str;
+        readOnly = true;
+        default = "/var/lib/acme/${name}";
+        description = lib.mdDoc "Directory where certificate and other state is stored.";
+      };
+
+      domain = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "Domain to fetch certificate for (defaults to the entry name).";
+      };
+
+      extraDomainNames = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''
+          [
+            "example.org"
+            "mydomain.org"
+          ]
+        '';
+        description = lib.mdDoc ''
+          A list of extra domain names, which are included in the one certificate to be issued.
+        '';
+      };
+
+      # This setting must be different for each configured certificate, otherwise
+      # two or more renewals may fail to bind to the address. Hence, it is not in
+      # the inheritableOpts.
+      listenHTTP = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = ":1360";
+        description = lib.mdDoc ''
+          Interface and port to listen on to solve HTTP challenges
+          in the form [INTERFACE]:PORT.
+          If you use a port other than 80, you must proxy port 80 to this port.
+        '';
+      };
+
+      s3Bucket = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "acme";
+        description = lib.mdDoc ''
+          S3 bucket name to use for HTTP-01 based challenges. Challenges will be written to the S3 bucket.
+        '';
+      };
+
+      inheritDefaults = mkOption {
+        default = true;
+        example = true;
+        description = lib.mdDoc "Whether to inherit values set in `security.acme.defaults` or not.";
+        type = lib.types.bool;
+      };
+    };
+  };
+
+in {
+
+  options = {
+    security.acme = {
+      preliminarySelfsigned = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether a preliminary self-signed certificate should be generated before
+          doing ACME requests. This can be useful when certificates are required in
+          a webserver, but ACME needs the webserver to make its requests.
+
+          With preliminary self-signed certificate the webserver can be started and
+          can later reload the correct ACME certificates.
+        '';
+      };
+
+      acceptTerms = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Accept the CA's terms of service. The default provider is Let's Encrypt,
+          you can find their ToS at <https://letsencrypt.org/repository/>.
+        '';
+      };
+
+      useRoot = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use the root user when generating certs. This is not recommended
+          for security + compatibility reasons. If a service requires root owned certificates
+          consider following the guide on "Using ACME with services demanding root
+          owned certificates" in the NixOS manual, and only using this as a fallback
+          or for testing.
+        '';
+      };
+
+      defaults = mkOption {
+        type = types.submodule (inheritableModule true);
+        description = lib.mdDoc ''
+          Default values inheritable by all configured certs. You can
+          use this to define options shared by all your certs. These defaults
+          can also be ignored on a per-cert basis using the
+          {option}`security.acme.certs.''${cert}.inheritDefaults` option.
+        '';
+      };
+
+      certs = mkOption {
+        default = { };
+        type = with types; attrsOf (submodule [ (inheritableModule false) certOpts ]);
+        description = lib.mdDoc ''
+          Attribute set of certificates to get signed and renewed. Creates
+          `acme-''${cert}.{service,timer}` systemd units for
+          each certificate defined here. Other services can add dependencies
+          to those units if they rely on the certificates being present,
+          or trigger restarts of the service if certificates get renewed.
+        '';
+        example = literalExpression ''
+          {
+            "example.com" = {
+              webroot = "/var/lib/acme/acme-challenge/";
+              email = "foo@example.com";
+              extraDomainNames = [ "www.example.com" "foo.example.com" ];
+            };
+            "bar.example.com" = {
+              webroot = "/var/lib/acme/acme-challenge/";
+              email = "bar@example.com";
+            };
+          }
+        '';
+      };
+      maxConcurrentRenewals = mkOption {
+        default = 5;
+        type = types.int;
+        description = lib.mdDoc ''
+          Maximum number of concurrent certificate generation or renewal jobs. All other
+          jobs will queue and wait running jobs to finish. Reduces the system load of
+          certificate generation.
+
+          Set to `0` to allow unlimited number of concurrent job runs."
+          '';
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "security" "acme" "production" ] ''
+      Use security.acme.server to define your staging ACME server URL instead.
+
+      To use the let's encrypt staging server, use security.acme.server =
+      "https://acme-staging-v02.api.letsencrypt.org/directory".
+    '')
+    (mkRemovedOptionModule [ "security" "acme" "directory" ] "ACME Directory is now hardcoded to /var/lib/acme and its permissions are managed by systemd. See https://github.com/NixOS/nixpkgs/issues/53852 for more info.")
+    (mkRemovedOptionModule [ "security" "acme" "preDelay" ] "This option has been removed. If you want to make sure that something executes before certificates are provisioned, add a RequiredBy=acme-\${cert}.service to the service you want to execute before the cert renewal")
+    (mkRemovedOptionModule [ "security" "acme" "activationDelay" ] "This option has been removed. If you want to make sure that something executes before certificates are provisioned, add a RequiredBy=acme-\${cert}.service to the service you want to execute before the cert renewal")
+    (mkChangedOptionModule [ "security" "acme" "validMin" ] [ "security" "acme" "defaults" "validMinDays" ] (config: config.security.acme.validMin / (24 * 3600)))
+    (mkChangedOptionModule [ "security" "acme" "validMinDays" ] [ "security" "acme" "defaults" "validMinDays" ] (config: config.security.acme.validMinDays))
+    (mkChangedOptionModule [ "security" "acme" "renewInterval" ] [ "security" "acme" "defaults" "renewInterval" ] (config: config.security.acme.renewInterval))
+    (mkChangedOptionModule [ "security" "acme" "email" ] [ "security" "acme" "defaults" "email" ] (config: config.security.acme.email))
+    (mkChangedOptionModule [ "security" "acme" "server" ] [ "security" "acme" "defaults" "server" ] (config: config.security.acme.server))
+    (mkChangedOptionModule [ "security" "acme" "enableDebugLogs" ] [ "security" "acme" "defaults" "enableDebugLogs" ] (config: config.security.acme.enableDebugLogs))
+  ];
+
+  config = mkMerge [
+    (mkIf (cfg.certs != { }) {
+
+      # FIXME Most of these custom warnings and filters for security.acme.certs.* are required
+      # because using mkRemovedOptionModule/mkChangedOptionModule with attrsets isn't possible.
+      warnings = filter (w: w != "") (mapAttrsToList (cert: data: optionalString (data.extraDomains != "_mkMergedOptionModule") ''
+        The option definition `security.acme.certs.${cert}.extraDomains` has changed
+        to `security.acme.certs.${cert}.extraDomainNames` and is now a list of strings.
+        Setting a custom webroot for extra domains is not possible, instead use separate certs.
+      '') cfg.certs);
+
+      assertions = let
+        certs = attrValues cfg.certs;
+      in [
+        {
+          assertion = cfg.email != null || all (certOpts: certOpts.email != null) certs;
+          message = ''
+            You must define `security.acme.certs.<name>.email` or
+            `security.acme.email` to register with the CA. Note that using
+            many different addresses for certs may trigger account rate limits.
+          '';
+        }
+        {
+          assertion = cfg.acceptTerms;
+          message = ''
+            You must accept the CA's terms of service before using
+            the ACME module by setting `security.acme.acceptTerms`
+            to `true`. For Let's Encrypt's ToS see https://letsencrypt.org/repository/
+          '';
+        }
+      ] ++ (builtins.concatLists (mapAttrsToList (cert: data: [
+        {
+          assertion = data.user == "_mkRemovedOptionModule";
+          message = ''
+            The option definition `security.acme.certs.${cert}.user' no longer has any effect; Please remove it.
+            Certificate user is now hard coded to the "acme" user. If you would
+            like another user to have access, consider adding them to the
+            "acme" group or changing security.acme.certs.${cert}.group.
+          '';
+        }
+        {
+          assertion = data.allowKeysForGroup == "_mkRemovedOptionModule";
+          message = ''
+            The option definition `security.acme.certs.${cert}.allowKeysForGroup' no longer has any effect; Please remove it.
+            All certs are readable by the configured group. If this is undesired,
+            consider changing security.acme.certs.${cert}.group to an unused group.
+          '';
+        }
+        # * in the cert value breaks building of systemd services, and makes
+        # referencing them as a user quite weird too. Best practice is to use
+        # the domain option.
+        {
+          assertion = ! hasInfix "*" cert;
+          message = ''
+            The cert option path `security.acme.certs.${cert}.dnsProvider`
+            cannot contain a * character.
+            Instead, set `security.acme.certs.${cert}.domain = "${cert}";`
+            and remove the wildcard from the path.
+          '';
+        }
+        (let exclusiveAttrs = {
+          inherit (data) dnsProvider webroot listenHTTP s3Bucket;
+        }; in {
+          assertion = lib.length (lib.filter (x: x != null) (builtins.attrValues exclusiveAttrs)) == 1;
+          message = ''
+            Exactly one of the options
+            `security.acme.certs.${cert}.dnsProvider`,
+            `security.acme.certs.${cert}.webroot`,
+            `security.acme.certs.${cert}.listenHTTP` and
+            `security.acme.certs.${cert}.s3Bucket`
+            is required.
+            Current values: ${(lib.generators.toPretty {} exclusiveAttrs)}.
+          '';
+        })
+        {
+          assertion = all (hasSuffix "_FILE") (attrNames data.credentialFiles);
+          message = ''
+            Option `security.acme.certs.${cert}.credentialFiles` can only be
+            used for variables suffixed by "_FILE".
+          '';
+        }
+      ]) cfg.certs));
+
+      users.users.acme = {
+        home = "/var/lib/acme";
+        group = "acme";
+        isSystemUser = true;
+      };
+
+      users.groups.acme = {};
+
+      # for lock files, still use tmpfiles as they should better reside in /run
+      systemd.tmpfiles.rules = [
+        "d ${lockdir} 0700 ${user} - - -"
+        "Z ${lockdir} 0700 ${user} - - -"
+      ];
+
+      systemd.services = let
+        renewServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewService) certConfigs;
+        renewServices =  if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs renewServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) renewServiceFunctions;
+        selfsignServiceFunctions = mapAttrs' (cert: conf: nameValuePair "acme-selfsigned-${cert}" conf.selfsignService) certConfigs;
+        selfsignServices = if cfg.maxConcurrentRenewals > 0
+          then roundRobinApplyAttrs selfsignServiceFunctions concurrencyLockfiles
+          else mapAttrs (_: f: f null) selfsignServiceFunctions;
+        in
+        { "acme-fixperms" = userMigrationService; }
+        // (optionalAttrs (cfg.maxConcurrentRenewals > 0) {"acme-lockfiles" = lockfilePrepareService; })
+        // renewServices
+        // (optionalAttrs (cfg.preliminarySelfsigned) ({
+        "acme-selfsigned-ca" = selfsignCAService;
+      } // selfsignServices));
+
+      systemd.timers = mapAttrs' (cert: conf: nameValuePair "acme-${cert}" conf.renewTimer) certConfigs;
+
+      systemd.targets = let
+        # Create some targets which can be depended on to be "active" after cert renewals
+        finishedTargets = mapAttrs' (cert: conf: nameValuePair "acme-finished-${cert}" {
+          wantedBy = [ "default.target" ];
+          requires = [ "acme-${cert}.service" ];
+          after = [ "acme-${cert}.service" ];
+        }) certConfigs;
+
+        # Create targets to limit the number of simultaneous account creations
+        # How it works:
+        # - Pick a "leader" cert service, which will be in charge of creating the account,
+        #   and run first (requires + after)
+        # - Make all other cert services sharing the same account wait for the leader to
+        #   finish before starting (requiredBy + before).
+        # Using a target here is fine - account creation is a one time event. Even if
+        # systemd clean --what=state is used to delete the account, so long as the user
+        # then runs one of the cert services, there won't be any issues.
+        accountTargets = mapAttrs' (hash: confs: let
+          leader = "acme-${(builtins.head confs).cert}.service";
+          dependantServices = map (conf: "acme-${conf.cert}.service") (builtins.tail confs);
+        in nameValuePair "acme-account-${hash}" {
+          requiredBy = dependantServices;
+          before = dependantServices;
+          requires = [ leader ];
+          after = [ leader ];
+        }) (groupBy (conf: conf.accountHash) (attrValues certConfigs));
+      in finishedTargets // accountTargets;
+    })
+  ];
+
+  meta = {
+    maintainers = lib.teams.acme.members;
+    doc = ./default.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/acme/mk-cert-ownership-assertion.nix b/nixpkgs/nixos/modules/security/acme/mk-cert-ownership-assertion.nix
new file mode 100644
index 000000000000..b80d89aeb9fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/acme/mk-cert-ownership-assertion.nix
@@ -0,0 +1,4 @@
+{ cert, group, groups, user }: {
+  assertion = cert.group == group || builtins.any (u: u == user) groups.${cert.group}.members;
+  message = "Group for certificate ${cert.domain} must be ${group}, or user ${user} must be a member of group ${cert.group}";
+}
diff --git a/nixpkgs/nixos/modules/security/apparmor.nix b/nixpkgs/nixos/modules/security/apparmor.nix
new file mode 100644
index 000000000000..24b48338ed77
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/apparmor.nix
@@ -0,0 +1,216 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inherit (builtins) attrNames head map match readFile;
+  inherit (lib) types;
+  inherit (config.environment) etc;
+  cfg = config.security.apparmor;
+  mkDisableOption = name: mkEnableOption (lib.mdDoc name) // {
+    default = true;
+    example = false;
+  };
+  enabledPolicies = filterAttrs (n: p: p.enable) cfg.policies;
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "security" "apparmor" "confineSUIDApplications" ] "Please use the new options: `security.apparmor.policies.<policy>.enable'.")
+    (mkRemovedOptionModule [ "security" "apparmor" "profiles" ] "Please use the new option: `security.apparmor.policies'.")
+    apparmor/includes.nix
+    apparmor/profiles.nix
+  ];
+
+  options = {
+    security.apparmor = {
+      enable = mkEnableOption (lib.mdDoc ''
+        the AppArmor Mandatory Access Control system.
+
+        If you're enabling this module on a running system,
+        note that a reboot will be required to activate AppArmor in the kernel.
+
+        Also, beware that enabling this module privileges stability over security
+        by not trying to kill unconfined but newly confinable running processes by default,
+        though it would be needed because AppArmor can only confine new
+        or already confined processes of an executable.
+        This killing would for instance be necessary when upgrading to a NixOS revision
+        introducing for the first time an AppArmor profile for the executable
+        of a running process.
+
+        Enable [](#opt-security.apparmor.killUnconfinedConfinables)
+        if you want this service to do such killing
+        by sending a `SIGTERM` to those running processes'');
+      policies = mkOption {
+        description = lib.mdDoc ''
+          AppArmor policies.
+        '';
+        type = types.attrsOf (types.submodule ({ name, config, ... }: {
+          options = {
+            enable = mkDisableOption "loading of the profile into the kernel";
+            enforce = mkDisableOption "enforcing of the policy or only complain in the logs";
+            profile = mkOption {
+              description = lib.mdDoc "The policy of the profile.";
+              type = types.lines;
+              apply = pkgs.writeText name;
+            };
+          };
+        }));
+        default = {};
+      };
+      includes = mkOption {
+        type = types.attrsOf types.lines;
+        default = {};
+        description = lib.mdDoc ''
+          List of paths to be added to AppArmor's searched paths
+          when resolving `include` directives.
+        '';
+        apply = mapAttrs pkgs.writeText;
+      };
+      packages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc "List of packages to be added to AppArmor's include path";
+      };
+      enableCache = mkEnableOption (lib.mdDoc ''
+        caching of AppArmor policies
+        in `/var/cache/apparmor/`.
+
+        Beware that AppArmor policies almost always contain Nix store paths,
+        and thus produce at each change of these paths
+        a new cached version accumulating in the cache'');
+      killUnconfinedConfinables = mkEnableOption (lib.mdDoc ''
+        killing of processes which have an AppArmor profile enabled
+        (in [](#opt-security.apparmor.policies))
+        but are not confined (because AppArmor can only confine new processes).
+
+        This is only sending a gracious `SIGTERM` signal to the processes,
+        not a `SIGKILL`.
+
+        Beware that due to a current limitation of AppArmor,
+        only profiles with exact paths (and no name) can enable such kills'');
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = map (policy:
+      { assertion = match ".*/.*" policy == null;
+        message = "`security.apparmor.policies.\"${policy}\"' must not contain a slash.";
+        # Because, for instance, aa-remove-unknown uses profiles_names_list() in rc.apparmor.functions
+        # which does not recurse into sub-directories.
+      }
+    ) (attrNames cfg.policies);
+
+    environment.systemPackages = [
+      pkgs.apparmor-utils
+      pkgs.apparmor-bin-utils
+    ];
+    environment.etc."apparmor.d".source = pkgs.linkFarm "apparmor.d" (
+      # It's important to put only enabledPolicies here and not all cfg.policies
+      # because aa-remove-unknown reads profiles from all /etc/apparmor.d/*
+      mapAttrsToList (name: p: { inherit name; path = p.profile; }) enabledPolicies ++
+      mapAttrsToList (name: path: { inherit name path; }) cfg.includes
+    );
+    environment.etc."apparmor/parser.conf".text = ''
+        ${if cfg.enableCache then "write-cache" else "skip-cache"}
+        cache-loc /var/cache/apparmor
+        Include /etc/apparmor.d
+      '' +
+      concatMapStrings (p: "Include ${p}/etc/apparmor.d\n") cfg.packages;
+    # For aa-logprof
+    environment.etc."apparmor/apparmor.conf".text = ''
+    '';
+    # For aa-logprof
+    environment.etc."apparmor/severity.db".source = pkgs.apparmor-utils + "/etc/apparmor/severity.db";
+    environment.etc."apparmor/logprof.conf".source = pkgs.runCommand "logprof.conf" {
+      header = ''
+        [settings]
+          # /etc/apparmor.d/ is read-only on NixOS
+          profiledir = /var/cache/apparmor/logprof
+          inactive_profiledir = /etc/apparmor.d/disable
+          # Use: journalctl -b --since today --grep audit: | aa-logprof
+          logfiles = /dev/stdin
+
+          parser = ${pkgs.apparmor-parser}/bin/apparmor_parser
+          ldd = ${pkgs.glibc.bin}/bin/ldd
+          logger = ${pkgs.util-linux}/bin/logger
+
+          # customize how file ownership permissions are presented
+          # 0 - off
+          # 1 - default of what ever mode the log reported
+          # 2 - force the new permissions to be user
+          # 3 - force all perms on the rule to be user
+          default_owner_prompt = 1
+
+          custom_includes = /etc/apparmor.d ${concatMapStringsSep " " (p: "${p}/etc/apparmor.d") cfg.packages}
+
+        [qualifiers]
+          ${pkgs.runtimeShell} = icnu
+          ${pkgs.bashInteractive}/bin/sh = icnu
+          ${pkgs.bashInteractive}/bin/bash = icnu
+          ${config.users.defaultUserShell} = icnu
+      '';
+      footer = "${pkgs.apparmor-utils}/etc/apparmor/logprof.conf";
+      passAsFile = [ "header" ];
+    } ''
+      cp $headerPath $out
+      sed '1,/\[qualifiers\]/d' $footer >> $out
+    '';
+
+    boot.kernelParams = [ "apparmor=1" "security=apparmor" ];
+
+    systemd.services.apparmor = {
+      after = [
+        "local-fs.target"
+        "systemd-journald-audit.socket"
+      ];
+      before = [ "sysinit.target" ];
+      wantedBy = [ "multi-user.target" ];
+      unitConfig = {
+        Description="Load AppArmor policies";
+        DefaultDependencies = "no";
+        ConditionSecurity = "apparmor";
+      };
+      # Reloading instead of restarting enables to load new AppArmor profiles
+      # without necessarily restarting all services which have Requires=apparmor.service
+      reloadIfChanged = true;
+      restartTriggers = [
+        etc."apparmor/parser.conf".source
+        etc."apparmor.d".source
+      ];
+      serviceConfig = let
+        killUnconfinedConfinables = pkgs.writeShellScript "apparmor-kill" ''
+          set -eu
+          ${pkgs.apparmor-bin-utils}/bin/aa-status --json |
+          ${pkgs.jq}/bin/jq --raw-output '.processes | .[] | .[] | select (.status == "unconfined") | .pid' |
+          xargs --verbose --no-run-if-empty --delimiter='\n' \
+          kill
+        '';
+        commonOpts = p: "--verbose --show-cache ${optionalString (!p.enforce) "--complain "}${p.profile}";
+        in {
+        Type = "oneshot";
+        RemainAfterExit = "yes";
+        ExecStartPre = "${pkgs.apparmor-utils}/bin/aa-teardown";
+        ExecStart = mapAttrsToList (n: p: "${pkgs.apparmor-parser}/bin/apparmor_parser --add ${commonOpts p}") enabledPolicies;
+        ExecStartPost = optional cfg.killUnconfinedConfinables killUnconfinedConfinables;
+        ExecReload =
+          # Add or replace into the kernel profiles in enabledPolicies
+          # (because AppArmor can do that without stopping the processes already confined).
+          mapAttrsToList (n: p: "${pkgs.apparmor-parser}/bin/apparmor_parser --replace ${commonOpts p}") enabledPolicies ++
+          # Remove from the kernel any profile whose name is not
+          # one of the names within the content of the profiles in enabledPolicies
+          # (indirectly read from /etc/apparmor.d/*, without recursing into sub-directory).
+          # Note that this does not remove profiles dynamically generated by libvirt.
+          [ "${pkgs.apparmor-utils}/bin/aa-remove-unknown" ] ++
+          # Optionally kill the processes which are unconfined but now have a profile loaded
+          # (because AppArmor can only start to confine new processes).
+          optional cfg.killUnconfinedConfinables killUnconfinedConfinables;
+        ExecStop = "${pkgs.apparmor-utils}/bin/aa-teardown";
+        CacheDirectory = [ "apparmor" "apparmor/logprof" ];
+        CacheDirectoryMode = "0700";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ julm ];
+}
diff --git a/nixpkgs/nixos/modules/security/apparmor/includes.nix b/nixpkgs/nixos/modules/security/apparmor/includes.nix
new file mode 100644
index 000000000000..88051de484c5
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/apparmor/includes.nix
@@ -0,0 +1,322 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (builtins) attrNames hasAttr isAttrs;
+  inherit (lib) getLib;
+  inherit (config.environment) etc;
+  # Utility to generate an AppArmor rule
+  # only when the given path exists in config.environment.etc
+  etcRule = arg:
+    let go = { path ? null, mode ? "r", trail ? "" }:
+      lib.optionalString (hasAttr path etc)
+        "${mode} ${config.environment.etc.${path}.source}${trail},";
+    in if isAttrs arg
+    then go arg
+    else go { path = arg; };
+in
+{
+# FIXME: most of the etcRule calls below have been
+# written systematically by converting from apparmor-profiles's profiles
+# without testing nor deep understanding of their uses,
+# and thus may need more rules or can have less rules;
+# this remains to be determined case by case,
+# some may even be completely useless.
+config.security.apparmor.includes = {
+  # This one is included by <tunables/global>
+  # which is usually included before any profile.
+  "abstractions/tunables/alias" = ''
+    alias /bin -> /run/current-system/sw/bin,
+    alias /lib/modules -> /run/current-system/kernel/lib/modules,
+    alias /sbin -> /run/current-system/sw/sbin,
+    alias /usr -> /run/current-system/sw,
+  '';
+  "abstractions/audio" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/audio"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "asound.conf"
+      "esound/esd.conf"
+      "libao.conf"
+      { path = "pulse";  trail = "/"; }
+      { path = "pulse";  trail = "/**"; }
+      { path = "sound";  trail = "/"; }
+      { path = "sound";  trail = "/**"; }
+      { path = "alsa/conf.d";  trail = "/"; }
+      { path = "alsa/conf.d";  trail = "/*"; }
+      "openal/alsoft.conf"
+      "wildmidi/wildmidi.conf"
+    ];
+  "abstractions/authentication" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/authentication"
+    # Defined in security.pam
+    include <abstractions/pam>
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "nologin"
+      "securetty"
+      { path = "security";  trail = "/*"; }
+      "shadow"
+      "gshadow"
+      "pwdb.conf"
+      "default/passwd"
+      "login.defs"
+    ];
+  "abstractions/base" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/base"
+    r ${pkgs.stdenv.cc.libc}/share/locale/**,
+    r ${pkgs.stdenv.cc.libc}/share/locale.alias,
+    r ${config.i18n.glibcLocales}/lib/locale/locale-archive,
+    ${etcRule "localtime"}
+    r ${pkgs.tzdata}/share/zoneinfo/**,
+    r ${pkgs.stdenv.cc.libc}/share/i18n/**,
+  '';
+  "abstractions/bash" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/bash"
+
+    # bash inspects filesystems at startup
+    # and /etc/mtab is linked to /proc/mounts
+    r @{PROC}/mounts,
+
+    # system-wide bash configuration
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "profile.dos"
+      "profile"
+      "profile.d"
+      { path = "profile.d";  trail = "/*"; }
+      "bashrc"
+      "bash.bashrc"
+      "bash.bashrc.local"
+      "bash_completion"
+      "bash_completion.d"
+      { path = "bash_completion.d";  trail = "/*"; }
+      # bash relies on system-wide readline configuration
+      "inputrc"
+      # run out of /etc/bash.bashrc
+      "DIR_COLORS"
+    ];
+  "abstractions/consoles" = ''
+     include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/consoles"
+  '';
+  "abstractions/cups-client" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/cups-client"
+    ${etcRule "cups/cups-client.conf"}
+  '';
+  "abstractions/dbus-session-strict" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/dbus-session-strict"
+    ${etcRule "machine-id"}
+  '';
+  "abstractions/dconf" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/dconf"
+    ${etcRule { path = "dconf";  trail = "/**"; }}
+  '';
+  "abstractions/dri-common" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/dri-common"
+    ${etcRule "drirc"}
+  '';
+  # The config.fonts.fontconfig NixOS module adds many files to /etc/fonts/
+  # by symlinking them but without exporting them outside of its NixOS module,
+  # those are therefore added there to this "abstractions/fonts".
+  "abstractions/fonts" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/fonts"
+    ${etcRule { path = "fonts";  trail = "/**"; }}
+  '';
+  "abstractions/gnome" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/gnome"
+    include <abstractions/fonts>
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      { path = "gnome";  trail = "/gtkrc*"; }
+      { path = "gtk";  trail = "/*"; }
+      { path = "gtk-2.0";  trail = "/*"; }
+      { path = "gtk-3.0";  trail = "/*"; }
+      "orbitrc"
+      { path = "pango";  trail = "/*"; }
+      { path = "/etc/gnome-vfs-2.0";  trail = "/modules/"; }
+      { path = "/etc/gnome-vfs-2.0";  trail = "/modules/*"; }
+      "papersize"
+      { path = "cups";  trail = "/lpoptions"; }
+      { path = "gnome";  trail = "/defaults.list"; }
+      { path = "xdg";  trail = "/{,*-}mimeapps.list"; }
+      "xdg/mimeapps.list"
+    ];
+  "abstractions/kde" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/kde"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      { path = "qt3";  trail = "/kstylerc"; }
+      { path = "qt3";  trail = "/qt_plugins_3.3rc"; }
+      { path = "qt3";  trail = "/qtrc"; }
+      "kderc"
+      { path = "kde3";  trail = "/*"; }
+      "kde4rc"
+      { path = "xdg";  trail = "/kdeglobals"; }
+      { path = "xdg";  trail = "/Trolltech.conf"; }
+    ];
+  "abstractions/kerberosclient" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/kerberosclient"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+    { path = "krb5.keytab"; mode="rk"; }
+    "krb5.conf"
+    "krb5.conf.d"
+    { path = "krb5.conf.d";  trail = "/*"; }
+
+    # config files found via strings on libs
+    "krb.conf"
+    "krb.realms"
+    "srvtab"
+    ];
+  "abstractions/ldapclient" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/ldapclient"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "ldap.conf"
+      "ldap.secret"
+      { path = "openldap";  trail = "/*"; }
+      { path = "openldap";  trail = "/cacerts/*"; }
+      { path = "sasl2";  trail = "/*"; }
+    ];
+  "abstractions/likewise" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/likewise"
+  '';
+  "abstractions/mdns" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/mdns"
+    ${etcRule "nss_mdns.conf"}
+  '';
+  "abstractions/nameservice" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/nameservice"
+
+    # Many programs wish to perform nameservice-like operations, such as
+    # looking up users by name or id, groups by name or id, hosts by name
+    # or IP, etc. These operations may be performed through files, dns,
+    # NIS, NIS+, LDAP, hesiod, wins, etc. Allow them all here.
+    mr ${getLib pkgs.nss}/lib/libnss_*.so*,
+    mr ${getLib pkgs.nss}/lib64/libnss_*.so*,
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "group"
+      "host.conf"
+      "hosts"
+      "nsswitch.conf"
+      "gai.conf"
+      "passwd"
+      "protocols"
+
+      # libtirpc (used for NIS/YP login) needs this
+      "netconfig"
+
+      "resolv.conf"
+
+      { path = "samba";  trail = "/lmhosts"; }
+      "services"
+
+      "default/nss"
+
+      # libnl-3-200 via libnss-gw-name
+      { path = "libnl";  trail = "/classid"; }
+      { path = "libnl-3";  trail = "/classid"; }
+    ];
+  "abstractions/nis" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/nis"
+  '';
+  "abstractions/nss-systemd" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/nss-systemd"
+  '';
+  "abstractions/nvidia" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/nvidia"
+    ${etcRule "vdpau_wrapper.cfg"}
+  '';
+  "abstractions/opencl-common" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/opencl-common"
+    ${etcRule { path = "OpenCL";  trail = "/**"; }}
+  '';
+  "abstractions/opencl-mesa" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/opencl-mesa"
+    ${etcRule "default/drirc"}
+  '';
+  "abstractions/openssl" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/openssl"
+    ${etcRule { path = "ssl";  trail = "/openssl.cnf"; }}
+  '';
+  "abstractions/p11-kit" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/p11-kit"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      { path = "pkcs11";  trail = "/"; }
+      { path = "pkcs11";  trail = "/pkcs11.conf"; }
+      { path = "pkcs11";  trail = "/modules/"; }
+      { path = "pkcs11";  trail = "/modules/*"; }
+    ];
+  "abstractions/perl" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/perl"
+    ${etcRule { path = "perl";  trail = "/**"; }}
+  '';
+  "abstractions/php" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/php"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      { path = "php";  trail = "/**/"; }
+      { path = "php5";  trail = "/**/"; }
+      { path = "php7";  trail = "/**/"; }
+      { path = "php";  trail = "/**.ini"; }
+      { path = "php5";  trail = "/**.ini"; }
+      { path = "php7";  trail = "/**.ini"; }
+    ];
+  "abstractions/postfix-common" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/postfix-common"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "mailname"
+      { path = "postfix";  trail = "/*.cf"; }
+      "postfix/main.cf"
+      "postfix/master.cf"
+    ];
+  "abstractions/python" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/python"
+  '';
+  "abstractions/qt5" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/qt5"
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      { path = "xdg";  trail = "/QtProject/qtlogging.ini"; }
+      { path = "xdg/QtProject";  trail = "/qtlogging.ini"; }
+      "xdg/QtProject/qtlogging.ini"
+    ];
+  "abstractions/samba" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/samba"
+    ${etcRule { path = "samba";  trail = "/*"; }}
+  '';
+  "abstractions/ssl_certs" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/ssl_certs"
+
+    # For the NixOS module: security.acme
+    r /var/lib/acme/*/cert.pem,
+    r /var/lib/acme/*/chain.pem,
+    r /var/lib/acme/*/fullchain.pem,
+
+    r /etc/pki/tls/certs/,
+
+    '' + lib.concatMapStringsSep "\n" etcRule [
+      "ssl/certs/ca-certificates.crt"
+      "ssl/certs/ca-bundle.crt"
+      "pki/tls/certs/ca-bundle.crt"
+
+      { path = "ssl/trust";  trail = "/"; }
+      { path = "ssl/trust";  trail = "/*"; }
+      { path = "ssl/trust/anchors";  trail = "/"; }
+      { path = "ssl/trust/anchors";  trail = "/**"; }
+      { path = "pki/trust";  trail = "/"; }
+      { path = "pki/trust";  trail = "/*"; }
+      { path = "pki/trust/anchors";  trail = "/"; }
+      { path = "pki/trust/anchors";  trail = "/**"; }
+    ];
+  "abstractions/ssl_keys" = ''
+    # security.acme NixOS module
+    r /var/lib/acme/*/full.pem,
+    r /var/lib/acme/*/key.pem,
+  '';
+  "abstractions/vulkan" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/vulkan"
+    ${etcRule { path = "vulkan/icd.d";  trail = "/"; }}
+    ${etcRule { path = "vulkan/icd.d";  trail = "/*.json"; }}
+  '';
+  "abstractions/winbind" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/winbind"
+    ${etcRule { path = "samba";  trail = "/smb.conf"; }}
+    ${etcRule { path = "samba";  trail = "/dhcp.conf"; }}
+  '';
+  "abstractions/X" = ''
+    include "${pkgs.apparmor-profiles}/etc/apparmor.d/abstractions/X"
+    ${etcRule { path = "X11/cursors";  trail = "/"; }}
+    ${etcRule { path = "X11/cursors";  trail = "/**"; }}
+  '';
+};
+}
diff --git a/nixpkgs/nixos/modules/security/apparmor/profiles.nix b/nixpkgs/nixos/modules/security/apparmor/profiles.nix
new file mode 100644
index 000000000000..0bf90a008655
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/apparmor/profiles.nix
@@ -0,0 +1,5 @@
+{ config, lib, pkgs, ... }:
+let apparmor = config.security.apparmor; in
+{
+config.security.apparmor.packages = [ pkgs.apparmor-profiles ];
+}
diff --git a/nixpkgs/nixos/modules/security/audit.nix b/nixpkgs/nixos/modules/security/audit.nix
new file mode 100644
index 000000000000..afc7dd13039d
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/audit.nix
@@ -0,0 +1,123 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.security.audit;
+  enabled = cfg.enable == "lock" || cfg.enable;
+
+  failureModes = {
+    silent = 0;
+    printk = 1;
+    panic  = 2;
+  };
+
+  disableScript = pkgs.writeScript "audit-disable" ''
+    #!${pkgs.runtimeShell} -eu
+    # Explicitly disable everything, as otherwise journald might start it.
+    auditctl -D
+    auditctl -e 0 -a task,never
+  '';
+
+  # TODO: it seems like people like their rules to be somewhat secret, yet they will not be if
+  # put in the store like this. At the same time, it doesn't feel like a huge deal and working
+  # around that is a pain so I'm leaving it like this for now.
+  startScript = pkgs.writeScript "audit-start" ''
+    #!${pkgs.runtimeShell} -eu
+    # Clear out any rules we may start with
+    auditctl -D
+
+    # Put the rules in a temporary file owned and only readable by root
+    rulesfile="$(mktemp)"
+    ${concatMapStrings (x: "echo '${x}' >> $rulesfile\n") cfg.rules}
+
+    # Apply the requested rules
+    auditctl -R "$rulesfile"
+
+    # Enable and configure auditing
+    auditctl \
+      -e ${if cfg.enable == "lock" then "2" else "1"} \
+      -b ${toString cfg.backlogLimit} \
+      -f ${toString failureModes.${cfg.failureMode}} \
+      -r ${toString cfg.rateLimit}
+  '';
+
+  stopScript = pkgs.writeScript "audit-stop" ''
+    #!${pkgs.runtimeShell} -eu
+    # Clear the rules
+    auditctl -D
+
+    # Disable auditing
+    auditctl -e 0
+  '';
+in {
+  options = {
+    security.audit = {
+      enable = mkOption {
+        type        = types.enum [ false true "lock" ];
+        default     = false;
+        description = lib.mdDoc ''
+          Whether to enable the Linux audit system. The special `lock` value can be used to
+          enable auditing and prevent disabling it until a restart. Be careful about locking
+          this, as it will prevent you from changing your audit configuration until you
+          restart. If possible, test your configuration using build-vm beforehand.
+        '';
+      };
+
+      failureMode = mkOption {
+        type        = types.enum [ "silent" "printk" "panic" ];
+        default     = "printk";
+        description = lib.mdDoc "How to handle critical errors in the auditing system";
+      };
+
+      backlogLimit = mkOption {
+        type        = types.int;
+        default     = 64; # Apparently the kernel default
+        description = lib.mdDoc ''
+          The maximum number of outstanding audit buffers allowed; exceeding this is
+          considered a failure and handled in a manner specified by failureMode.
+        '';
+      };
+
+      rateLimit = mkOption {
+        type        = types.int;
+        default     = 0;
+        description = lib.mdDoc ''
+          The maximum messages per second permitted before triggering a failure as
+          specified by failureMode. Setting it to zero disables the limit.
+        '';
+      };
+
+      rules = mkOption {
+        type        = types.listOf types.str; # (types.either types.str (types.submodule rule));
+        default     = [];
+        example     = [ "-a exit,always -F arch=b64 -S execve" ];
+        description = lib.mdDoc ''
+          The ordered audit rules, with each string appearing as one line of the audit.rules file.
+        '';
+      };
+    };
+  };
+
+  config = {
+    systemd.services.audit = {
+      description = "Kernel Auditing";
+      wantedBy = [ "basic.target" ];
+
+      unitConfig = {
+        ConditionVirtualization = "!container";
+        ConditionSecurity = [ "audit" ];
+      };
+
+
+      path = [ pkgs.audit ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "@${if enabled then startScript else disableScript} audit-start";
+        ExecStop  = "@${stopScript} audit-stop";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/auditd.nix b/nixpkgs/nixos/modules/security/auditd.nix
new file mode 100644
index 000000000000..db4b2701ee2e
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/auditd.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options.security.auditd.enable = mkEnableOption (lib.mdDoc "the Linux Audit daemon");
+
+  config = mkIf config.security.auditd.enable {
+    boot.kernelParams = [ "audit=1" ];
+
+    environment.systemPackages = [ pkgs.audit ];
+
+    systemd.services.auditd = {
+      description = "Linux Audit daemon";
+      wantedBy = [ "basic.target" ];
+
+      unitConfig = {
+        ConditionVirtualization = "!container";
+        ConditionSecurity = [ "audit" ];
+        DefaultDependencies = false;
+      };
+
+      path = [ pkgs.audit ];
+
+      serviceConfig = {
+        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/log/audit";
+        ExecStart = "${pkgs.audit}/bin/auditd -l -n -s nochange";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/ca.nix b/nixpkgs/nixos/modules/security/ca.nix
new file mode 100644
index 000000000000..3cd56bff04d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/ca.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.pki;
+
+  cacertPackage = pkgs.cacert.override {
+    blacklist = cfg.caCertificateBlacklist;
+    extraCertificateFiles = cfg.certificateFiles;
+    extraCertificateStrings = cfg.certificates;
+  };
+  caBundle = "${cacertPackage}/etc/ssl/certs/ca-bundle.crt";
+
+in
+
+{
+
+  options = {
+    security.pki.installCACerts = mkEnableOption "Add CA certificates to system" // {
+      default = true;
+      internal = true;
+    };
+
+    security.pki.certificateFiles = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      example = literalExpression ''[ "''${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt" ]'';
+      description = lib.mdDoc ''
+        A list of files containing trusted root certificates in PEM
+        format. These are concatenated to form
+        {file}`/etc/ssl/certs/ca-certificates.crt`, which is
+        used by many programs that use OpenSSL, such as
+        {command}`curl` and {command}`git`.
+      '';
+    };
+
+    security.pki.certificates = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = literalExpression ''
+        [ '''
+            NixOS.org
+            =========
+            -----BEGIN CERTIFICATE-----
+            MIIGUDCCBTigAwIBAgIDD8KWMA0GCSqGSIb3DQEBBQUAMIGMMQswCQYDVQQGEwJJ
+            TDEWMBQGA1UEChMNU3RhcnRDb20gTHRkLjErMCkGA1UECxMiU2VjdXJlIERpZ2l0
+            ...
+            -----END CERTIFICATE-----
+          '''
+        ]
+      '';
+      description = lib.mdDoc ''
+        A list of trusted root certificates in PEM format.
+      '';
+    };
+
+    security.pki.caCertificateBlacklist = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "WoSign" "WoSign China"
+        "CA WoSign ECC Root"
+        "Certification Authority of WoSign G2"
+      ];
+      description = lib.mdDoc ''
+        A list of blacklisted CA certificate names that won't be imported from
+        the Mozilla Trust Store into
+        {file}`/etc/ssl/certs/ca-certificates.crt`. Use the
+        names from that file.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.installCACerts {
+
+    # NixOS canonical location + Debian/Ubuntu/Arch/Gentoo compatibility.
+    environment.etc."ssl/certs/ca-certificates.crt".source = caBundle;
+
+    # Old NixOS compatibility.
+    environment.etc."ssl/certs/ca-bundle.crt".source = caBundle;
+
+    # CentOS/Fedora compatibility.
+    environment.etc."pki/tls/certs/ca-bundle.crt".source = caBundle;
+
+    # P11-Kit trust source.
+    environment.etc."ssl/trust-source".source = "${cacertPackage.p11kit}/etc/ssl/trust-source";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/security/chromium-suid-sandbox.nix b/nixpkgs/nixos/modules/security/chromium-suid-sandbox.nix
new file mode 100644
index 000000000000..cab4b9f8d3ab
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/chromium-suid-sandbox.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg     = config.security.chromiumSuidSandbox;
+  sandbox = pkgs.chromium.sandbox;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "programs" "unity3d" "enable" ] [ "security" "chromiumSuidSandbox" "enable" ])
+  ];
+
+  options.security.chromiumSuidSandbox.enable = mkOption {
+    type = types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Whether to install the Chromium SUID sandbox which is an executable that
+      Chromium may use in order to achieve sandboxing.
+
+      If you get the error "The SUID sandbox helper binary was found, but is not
+      configured correctly.", turning this on might help.
+
+      Also, if the URL chrome://sandbox tells you that "You are not adequately
+      sandboxed!", turning this on might resolve the issue.
+    '';
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ sandbox ];
+    security.wrappers.${sandbox.passthru.sandboxExecutableName} =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${sandbox}/bin/${sandbox.passthru.sandboxExecutableName}";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/dhparams.nix b/nixpkgs/nixos/modules/security/dhparams.nix
new file mode 100644
index 000000000000..9fed7e012b1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/dhparams.nix
@@ -0,0 +1,185 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  inherit (lib) literalExpression mkOption types;
+  cfg = config.security.dhparams;
+  opt = options.security.dhparams;
+
+  bitType = types.addCheck types.int (b: b >= 16) // {
+    name = "bits";
+    description = "integer of at least 16 bits";
+  };
+
+  paramsSubmodule = { name, config, ... }: {
+    options.bits = mkOption {
+      type = bitType;
+      default = cfg.defaultBitSize;
+      defaultText = literalExpression "config.${opt.defaultBitSize}";
+      description = lib.mdDoc ''
+        The bit size for the prime that is used during a Diffie-Hellman
+        key exchange.
+      '';
+    };
+
+    options.path = mkOption {
+      type = types.path;
+      readOnly = true;
+      description = lib.mdDoc ''
+        The resulting path of the generated Diffie-Hellman parameters
+        file for other services to reference. This could be either a
+        store path or a file inside the directory specified by
+        {option}`security.dhparams.path`.
+      '';
+    };
+
+    config.path = let
+      generated = pkgs.runCommand "dhparams-${name}.pem" {
+        nativeBuildInputs = [ pkgs.openssl ];
+      } "openssl dhparam -out \"$out\" ${toString config.bits}";
+    in if cfg.stateful then "${cfg.path}/${name}.pem" else generated;
+  };
+
+in {
+  options = {
+    security.dhparams = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to generate new DH params and clean up old DH params.
+        '';
+      };
+
+      params = mkOption {
+        type = with types; let
+          coerce = bits: { inherit bits; };
+        in attrsOf (coercedTo int coerce (submodule paramsSubmodule));
+        default = {};
+        example = lib.literalExpression "{ nginx.bits = 3072; }";
+        description = lib.mdDoc ''
+          Diffie-Hellman parameters to generate.
+
+          The value is the size (in bits) of the DH params to generate. The
+          generated DH params path can be found in
+          `config.security.dhparams.params.«name».path`.
+
+          ::: {.note}
+          The name of the DH params is taken as being the name of
+          the service it serves and the params will be generated before the
+          said service is started.
+          :::
+
+          ::: {.warning}
+          If you are removing all dhparams from this list, you
+          have to leave {option}`security.dhparams.enable` for at
+          least one activation in order to have them be cleaned up. This also
+          means if you rollback to a version without any dhparams the
+          existing ones won't be cleaned up. Of course this only applies if
+          {option}`security.dhparams.stateful` is
+          `true`.
+          :::
+
+          ::: {.note}
+          **For module implementers:** It's recommended
+          to not set a specific bit size here, so that users can easily
+          override this by setting
+          {option}`security.dhparams.defaultBitSize`.
+          :::
+        '';
+      };
+
+      stateful = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether generation of Diffie-Hellman parameters should be stateful or
+          not. If this is enabled, PEM-encoded files for Diffie-Hellman
+          parameters are placed in the directory specified by
+          {option}`security.dhparams.path`. Otherwise the files are
+          created within the Nix store.
+
+          ::: {.note}
+          If this is `false` the resulting store
+          path will be non-deterministic and will be rebuilt every time the
+          `openssl` package changes.
+          :::
+        '';
+      };
+
+      defaultBitSize = mkOption {
+        type = bitType;
+        default = 2048;
+        description = lib.mdDoc ''
+          This allows to override the default bit size for all of the
+          Diffie-Hellman parameters set in
+          {option}`security.dhparams.params`.
+        '';
+      };
+
+      path = mkOption {
+        type = types.str;
+        default = "/var/lib/dhparams";
+        description = lib.mdDoc ''
+          Path to the directory in which Diffie-Hellman parameters will be
+          stored. This only is relevant if
+          {option}`security.dhparams.stateful` is
+          `true`.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.enable && cfg.stateful) {
+    systemd.services = {
+      dhparams-init = {
+        description = "Clean Up Old Diffie-Hellman Parameters";
+
+        # Clean up even when no DH params is set
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          if [ ! -d ${cfg.path} ]; then
+            mkdir -p ${cfg.path}
+          fi
+
+          # Remove old dhparams
+          for file in ${cfg.path}/*; do
+            if [ ! -f "$file" ]; then
+              continue
+            fi
+            ${lib.concatStrings (lib.mapAttrsToList (name: { bits, path, ... }: ''
+              if [ "$file" = ${lib.escapeShellArg path} ] && \
+                 ${pkgs.openssl}/bin/openssl dhparam -in "$file" -text \
+                 | head -n 1 | grep "(${toString bits} bit)" > /dev/null; then
+                continue
+              fi
+            '') cfg.params)}
+            rm $file
+          done
+
+          # TODO: Ideally this would be removing the *former* cfg.path, though
+          # this does not seem really important as changes to it are quite
+          # unlikely
+          rmdir --ignore-fail-on-non-empty ${cfg.path}
+        '';
+      };
+    } // lib.mapAttrs' (name: { bits, path, ... }: lib.nameValuePair "dhparams-gen-${name}" {
+      description = "Generate Diffie-Hellman Parameters for ${name}";
+      after = [ "dhparams-init.service" ];
+      before = [ "${name}.service" ];
+      wantedBy = [ "multi-user.target" ];
+      unitConfig.ConditionPathExists = "!${path}";
+      serviceConfig.Type = "oneshot";
+      script = ''
+        mkdir -p ${lib.escapeShellArg cfg.path}
+        ${pkgs.openssl}/bin/openssl dhparam -out ${lib.escapeShellArg path} \
+          ${toString bits}
+      '';
+    }) cfg.params;
+  };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
+}
diff --git a/nixpkgs/nixos/modules/security/doas.nix b/nixpkgs/nixos/modules/security/doas.nix
new file mode 100644
index 000000000000..115ca33efb5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/doas.nix
@@ -0,0 +1,292 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.security.doas;
+
+  inherit (pkgs) doas;
+
+  mkUsrString = user: toString user;
+
+  mkGrpString = group: ":${toString group}";
+
+  mkOpts = rule: concatStringsSep " " [
+    (optionalString rule.noPass "nopass")
+    (optionalString rule.noLog "nolog")
+    (optionalString rule.persist "persist")
+    (optionalString rule.keepEnv "keepenv")
+    "setenv { SSH_AUTH_SOCK TERMINFO TERMINFO_DIRS ${concatStringsSep " " rule.setEnv} }"
+  ];
+
+  mkArgs = rule:
+    if (rule.args == null) then ""
+    else if (length rule.args == 0) then "args"
+    else "args ${concatStringsSep " " rule.args}";
+
+  mkRule = rule:
+    let
+      opts = mkOpts rule;
+
+      as = optionalString (rule.runAs != null) "as ${rule.runAs}";
+
+      cmd = optionalString (rule.cmd != null) "cmd ${rule.cmd}";
+
+      args = mkArgs rule;
+    in
+    optionals (length cfg.extraRules > 0) [
+      (
+        optionalString (length rule.users > 0)
+          (map (usr: "permit ${opts} ${mkUsrString usr} ${as} ${cmd} ${args}") rule.users)
+      )
+      (
+        optionalString (length rule.groups > 0)
+          (map (grp: "permit ${opts} ${mkGrpString grp} ${as} ${cmd} ${args}") rule.groups)
+      )
+    ];
+in
+{
+
+  ###### interface
+
+  options.security.doas = {
+
+    enable = mkOption {
+      type = with types; bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the {command}`doas` command, which allows
+        non-root users to execute commands as root.
+      '';
+    };
+
+    wheelNeedsPassword = mkOption {
+      type = with types; bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether users of the `wheel` group must provide a password to
+        run commands as super user via {command}`doas`.
+      '';
+    };
+
+    extraRules = mkOption {
+      default = [];
+      description = lib.mdDoc ''
+        Define specific rules to be set in the
+        {file}`/etc/doas.conf` file. More specific rules should
+        come after more general ones in order to yield the expected behavior.
+        You can use `mkBefore` and/or `mkAfter` to ensure
+        this is the case when configuration options are merged. Be aware that
+        this option cannot be used to override the behaviour allowing
+        passwordless operation for root.
+      '';
+      example = literalExpression ''
+        [
+          # Allow execution of any command by any user in group doas, requiring
+          # a password and keeping any previously-defined environment variables.
+          { groups = [ "doas" ]; noPass = false; keepEnv = true; }
+
+          # Allow execution of "/home/root/secret.sh" by user `backup` OR user
+          # `database` OR any member of the group with GID `1006`, without a
+          # password.
+          { users = [ "backup" "database" ]; groups = [ 1006 ];
+            cmd = "/home/root/secret.sh"; noPass = true; }
+
+          # Allow any member of group `bar` to run `/home/baz/cmd1.sh` as user
+          # `foo` with argument `hello-doas`.
+          { groups = [ "bar" ]; runAs = "foo";
+            cmd = "/home/baz/cmd1.sh"; args = [ "hello-doas" ]; }
+
+          # Allow any member of group `bar` to run `/home/baz/cmd2.sh` as user
+          # `foo` with no arguments.
+          { groups = [ "bar" ]; runAs = "foo";
+            cmd = "/home/baz/cmd2.sh"; args = [ ]; }
+
+          # Allow user `abusers` to execute "nano" and unset the value of
+          # SSH_AUTH_SOCK, override the value of ALPHA to 1, and inherit the
+          # value of BETA from the current environment.
+          { users = [ "abusers" ]; cmd = "nano";
+            setEnv = [ "-SSH_AUTH_SOCK" "ALPHA=1" "BETA" ]; }
+        ]
+      '';
+      type = with types; listOf (
+        submodule {
+          options = {
+
+            noPass = mkOption {
+              type = with types; bool;
+              default = false;
+              description = lib.mdDoc ''
+                If `true`, the user is not required to enter a
+                password.
+              '';
+            };
+
+            noLog = mkOption {
+              type = with types; bool;
+              default = false;
+              description = lib.mdDoc ''
+                If `true`, successful executions will not be logged
+                to
+                {manpage}`syslogd(8)`.
+              '';
+            };
+
+            persist = mkOption {
+              type = with types; bool;
+              default = false;
+              description = lib.mdDoc ''
+                If `true`, do not ask for a password again for some
+                time after the user successfully authenticates.
+              '';
+            };
+
+            keepEnv = mkOption {
+              type = with types; bool;
+              default = false;
+              description = lib.mdDoc ''
+                If `true`, environment variables other than those
+                listed in
+                {manpage}`doas(1)`
+                are kept when creating the environment for the new process.
+              '';
+            };
+
+            setEnv = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = lib.mdDoc ''
+                Keep or set the specified variables. Variables may also be
+                removed with a leading '-' or set using
+                `variable=value`. If the first character of
+                `value` is a '$', the value to be set is taken from
+                the existing environment variable of the indicated name. This
+                option is processed after the default environment has been
+                created.
+
+                NOTE: All rules have `setenv { SSH_AUTH_SOCK }` by
+                default. To prevent `SSH_AUTH_SOCK` from being
+                inherited, add `"-SSH_AUTH_SOCK"` anywhere in this
+                list.
+              '';
+            };
+
+            users = mkOption {
+              type = with types; listOf (either str int);
+              default = [];
+              description = lib.mdDoc "The usernames / UIDs this rule should apply for.";
+            };
+
+            groups = mkOption {
+              type = with types; listOf (either str int);
+              default = [];
+              description = lib.mdDoc "The groups / GIDs this rule should apply for.";
+            };
+
+            runAs = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                Which user or group the specified command is allowed to run as.
+                When set to `null` (the default), all users are
+                allowed.
+
+                A user can be specified using just the username:
+                `"foo"`. It is also possible to only allow running as
+                a specific group with `":bar"`.
+              '';
+            };
+
+            cmd = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                The command the user is allowed to run. When set to
+                `null` (the default), all commands are allowed.
+
+                NOTE: It is best practice to specify absolute paths. If a
+                relative path is specified, only a restricted PATH will be
+                searched.
+              '';
+            };
+
+            args = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                Arguments that must be provided to the command. When set to
+                `[]`, the command must be run without any arguments.
+              '';
+            };
+          };
+        }
+      );
+    };
+
+    extraConfig = mkOption {
+      type = with types; lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra configuration text appended to {file}`doas.conf`. Be aware that
+        this option cannot be used to override the behaviour allowing
+        passwordless operation for root.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    security.doas.extraRules = mkOrder 600 [
+      {
+        groups = [ "wheel" ];
+        noPass = !cfg.wheelNeedsPassword;
+      }
+    ];
+
+    security.wrappers.doas =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${doas}/bin/doas";
+      };
+
+    environment.systemPackages = [
+      doas
+    ];
+
+    security.pam.services.doas = {
+      allowNullPassword = true;
+      sshAgentAuth = true;
+    };
+
+    environment.etc."doas.conf" = {
+      source = pkgs.runCommand "doas-conf"
+        {
+          src = pkgs.writeText "doas-conf-in" ''
+            # To modify this file, set the NixOS options
+            # `security.doas.extraRules` or `security.doas.extraConfig`. To
+            # completely replace the contents of this file, use
+            # `environment.etc."doas.conf"`.
+
+            # extraRules
+            ${concatStringsSep "\n" (lists.flatten (map mkRule cfg.extraRules))}
+
+            # extraConfig
+            ${cfg.extraConfig}
+
+            # "root" is allowed to do anything.
+            permit nopass keepenv root
+          '';
+          preferLocalBuild = true;
+        }
+        # Make sure that the doas.conf file is syntactically valid.
+        "${pkgs.buildPackages.doas}/bin/doas -C $src && cp $src $out";
+      mode = "0440";
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ cole-h ];
+}
diff --git a/nixpkgs/nixos/modules/security/duosec.nix b/nixpkgs/nixos/modules/security/duosec.nix
new file mode 100644
index 000000000000..2a855a77e3a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/duosec.nix
@@ -0,0 +1,249 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.security.duosec;
+
+  boolToStr = b: if b then "yes" else "no";
+
+  configFilePam = ''
+    [duo]
+    ikey=${cfg.integrationKey}
+    host=${cfg.host}
+    ${optionalString (cfg.groups != "") ("groups="+cfg.groups)}
+    failmode=${cfg.failmode}
+    pushinfo=${boolToStr cfg.pushinfo}
+    autopush=${boolToStr cfg.autopush}
+    prompts=${toString cfg.prompts}
+    fallback_local_ip=${boolToStr cfg.fallbackLocalIP}
+  '';
+
+  configFileLogin = configFilePam + ''
+    motd=${boolToStr cfg.motd}
+    accept_env_factor=${boolToStr cfg.acceptEnvFactor}
+  '';
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "security" "duosec" "group" ] [ "security" "duosec" "groups" ])
+    (mkRenamedOptionModule [ "security" "duosec" "ikey" ] [ "security" "duosec" "integrationKey" ])
+    (mkRemovedOptionModule [ "security" "duosec" "skey" ] "The insecure security.duosec.skey option has been replaced by a new security.duosec.secretKeyFile option. Use this new option to store a secure copy of your key instead.")
+  ];
+
+  options = {
+    security.duosec = {
+      ssh.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If enabled, protect SSH logins with Duo Security.";
+      };
+
+      pam.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If enabled, protect logins with Duo Security using PAM support.";
+      };
+
+      integrationKey = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Integration key.";
+      };
+
+      secretKeyFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing your secret key. The security of your Duo application is tied to the security of your secret key.
+        '';
+        example = "/run/keys/duo-skey";
+      };
+
+      host = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Duo API hostname.";
+      };
+
+      groups = mkOption {
+        type = types.str;
+        default = "";
+        example = "users,!wheel,!*admin guests";
+        description = lib.mdDoc ''
+          If specified, Duo authentication is required only for users
+          whose primary group or supplementary group list matches one
+          of the space-separated pattern lists. Refer to
+          <https://duo.com/docs/duounix> for details.
+        '';
+      };
+
+      failmode = mkOption {
+        type = types.enum [ "safe" "secure" ];
+        default = "safe";
+        description = lib.mdDoc ''
+          On service or configuration errors that prevent Duo
+          authentication, fail "safe" (allow access) or "secure" (deny
+          access). The default is "safe".
+        '';
+      };
+
+      pushinfo = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Include information such as the command to be executed in
+          the Duo Push message.
+        '';
+      };
+
+      autopush = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If `true`, Duo Unix will automatically send
+          a push login request to the user’s phone, falling back on a
+          phone call if push is unavailable. If
+          `false`, the user will be prompted to
+          choose an authentication method. When configured with
+          `autopush = yes`, we recommend setting
+          `prompts = 1`.
+        '';
+      };
+
+      motd = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Print the contents of `/etc/motd` to screen
+          after a successful login.
+        '';
+      };
+
+      prompts = mkOption {
+        type = types.enum [ 1 2 3 ];
+        default = 3;
+        description = lib.mdDoc ''
+          If a user fails to authenticate with a second factor, Duo
+          Unix will prompt the user to authenticate again. This option
+          sets the maximum number of prompts that Duo Unix will
+          display before denying access. Must be 1, 2, or 3. Default
+          is 3.
+
+          For example, when `prompts = 1`, the user
+          will have to successfully authenticate on the first prompt,
+          whereas if `prompts = 2`, if the user
+          enters incorrect information at the initial prompt, he/she
+          will be prompted to authenticate again.
+
+          When configured with `autopush = true`, we
+          recommend setting `prompts = 1`.
+        '';
+      };
+
+      acceptEnvFactor = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Look for factor selection or passcode in the
+          `$DUO_PASSCODE` environment variable before
+          prompting the user for input.
+
+          When $DUO_PASSCODE is non-empty, it will override
+          autopush. The SSH client will need SendEnv DUO_PASSCODE in
+          its configuration, and the SSH server will similarly need
+          AcceptEnv DUO_PASSCODE.
+        '';
+      };
+
+      fallbackLocalIP = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Duo Unix reports the IP address of the authorizing user, for
+          the purposes of authorization and whitelisting. If Duo Unix
+          cannot detect the IP address of the client, setting
+          `fallbackLocalIP = yes` will cause Duo Unix
+          to send the IP address of the server it is running on.
+
+          If you are using IP whitelisting, enabling this option could
+          cause unauthorized logins if the local IP is listed in the
+          whitelist.
+        '';
+      };
+
+      allowTcpForwarding = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          By default, when SSH forwarding, enabling Duo Security will
+          disable TCP forwarding. By enabling this, you potentially
+          undermine some of the SSH based login security. Note this is
+          not needed if you use PAM.
+        '';
+      };
+    };
+  };
+
+  config = mkIf (cfg.ssh.enable || cfg.pam.enable) {
+    environment.systemPackages = [ pkgs.duo-unix ];
+
+    security.wrappers.login_duo =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.duo-unix.out}/bin/login_duo";
+      };
+
+    systemd.services.login-duo = lib.mkIf cfg.ssh.enable {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" ];
+      unitConfig.DefaultDependencies = false;
+      script = ''
+        if test -f "${cfg.secretKeyFile}"; then
+          mkdir -m 0755 -p /etc/duo
+
+          umask 0077
+          conf="$(mktemp)"
+          {
+            cat ${pkgs.writeText "login_duo.conf" configFileLogin}
+            printf 'skey = %s\n' "$(cat ${cfg.secretKeyFile})"
+          } >"$conf"
+
+          chown sshd "$conf"
+          mv -fT "$conf" /etc/duo/login_duo.conf
+        fi
+      '';
+    };
+
+    systemd.services.pam-duo = lib.mkIf cfg.ssh.enable {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" ];
+      unitConfig.DefaultDependencies = false;
+      script = ''
+        if test -f "${cfg.secretKeyFile}"; then
+          mkdir -m 0755 -p /etc/duo
+
+          umask 0077
+          conf="$(mktemp)"
+          {
+            cat ${pkgs.writeText "login_duo.conf" configFilePam}
+            printf 'skey = %s\n' "$(cat ${cfg.secretKeyFile})"
+          } >"$conf"
+
+          mv -fT "$conf" /etc/duo/pam_duo.conf
+        fi
+      '';
+    };
+
+    /* If PAM *and* SSH are enabled, then don't do anything special.
+    If PAM isn't used, set the default SSH-only options. */
+    services.openssh.extraConfig = mkIf (cfg.ssh.enable || cfg.pam.enable) (
+    if cfg.pam.enable then "UseDNS no" else ''
+      # Duo Security configuration
+      ForceCommand ${config.security.wrapperDir}/login_duo
+      PermitTunnel no
+      ${optionalString (!cfg.allowTcpForwarding) ''
+        AllowTcpForwarding no
+      ''}
+    '');
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/google_oslogin.nix b/nixpkgs/nixos/modules/security/google_oslogin.nix
new file mode 100644
index 000000000000..95975943ff80
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/google_oslogin.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.googleOsLogin;
+  package = pkgs.google-guest-oslogin;
+
+in
+
+{
+
+  options = {
+
+    security.googleOsLogin.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable Google OS Login.
+
+        The OS Login package enables the following components:
+        AuthorizedKeysCommand to query valid SSH keys from the user's OS Login
+        profile during ssh authentication phase.
+        NSS Module to provide user and group information
+        PAM Module for the sshd service, providing authorization and
+        authentication support, allowing the system to use data stored in
+        Google Cloud IAM permissions to control both, the ability to log into
+        an instance, and to perform operations as root (sudo).
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    security.pam.services.sshd = {
+      makeHomeDir = true;
+      googleOsLoginAccountVerification = true;
+      googleOsLoginAuthentication = true;
+    };
+
+    security.sudo.extraConfig = ''
+      #includedir /run/google-sudoers.d
+    '';
+    security.sudo-rs.extraConfig = ''
+      #includedir /run/google-sudoers.d
+    '';
+
+    systemd.tmpfiles.rules = [
+      "d /run/google-sudoers.d 750 root root -"
+      "d /var/google-users.d 750 root root -"
+    ];
+
+    systemd.packages = [ package ];
+    systemd.timers.google-oslogin-cache.wantedBy = [ "timers.target" ];
+
+    # enable the nss module, so user lookups etc. work
+    system.nssModules = [ package ];
+    system.nssDatabases.passwd = [ "cache_oslogin" "oslogin" ];
+    system.nssDatabases.group = [ "cache_oslogin" "oslogin" ];
+
+    # Ugly: sshd refuses to start if a store path is given because /nix/store is group-writable.
+    # So indirect by a symlink.
+    environment.etc."ssh/authorized_keys_command_google_oslogin" = {
+      mode = "0755";
+      text = ''
+        #!/bin/sh
+        exec ${package}/bin/google_authorized_keys "$@"
+      '';
+    };
+    services.openssh.authorizedKeysCommand = "/etc/ssh/authorized_keys_command_google_oslogin %u";
+    services.openssh.authorizedKeysCommandUser = "nobody";
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/security/ipa.nix b/nixpkgs/nixos/modules/security/ipa.nix
new file mode 100644
index 000000000000..69a670cd5e4a
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/ipa.nix
@@ -0,0 +1,258 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+with lib; let
+  cfg = config.security.ipa;
+  pyBool = x:
+    if x
+    then "True"
+    else "False";
+
+  ldapConf = pkgs.writeText "ldap.conf" ''
+    # Turning this off breaks GSSAPI used with krb5 when rdns = false
+    SASL_NOCANON    on
+
+    URI ldaps://${cfg.server}
+    BASE ${cfg.basedn}
+    TLS_CACERT /etc/ipa/ca.crt
+  '';
+  nssDb =
+    pkgs.runCommand "ipa-nssdb"
+    {
+      nativeBuildInputs = [pkgs.nss.tools];
+    } ''
+      mkdir -p $out
+      certutil -d $out -N --empty-password
+      certutil -d $out -A --empty-password -n "${cfg.realm} IPA CA" -t CT,C,C -i ${cfg.certificate}
+    '';
+in {
+  options = {
+    security.ipa = {
+      enable = mkEnableOption (lib.mdDoc "FreeIPA domain integration");
+
+      certificate = mkOption {
+        type = types.package;
+        description = lib.mdDoc ''
+          IPA server CA certificate.
+
+          Use `nix-prefetch-url http://$server/ipa/config/ca.crt` to
+          obtain the file and the hash.
+        '';
+        example = literalExpression ''
+          pkgs.fetchurl {
+            url = http://ipa.example.com/ipa/config/ca.crt;
+            sha256 = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+          };
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        example = "example.com";
+        description = lib.mdDoc "Domain of the IPA server.";
+      };
+
+      realm = mkOption {
+        type = types.str;
+        example = "EXAMPLE.COM";
+        description = lib.mdDoc "Kerberos realm.";
+      };
+
+      server = mkOption {
+        type = types.str;
+        example = "ipa.example.com";
+        description = lib.mdDoc "IPA Server hostname.";
+      };
+
+      basedn = mkOption {
+        type = types.str;
+        example = "dc=example,dc=com";
+        description = lib.mdDoc "Base DN to use when performing LDAP operations.";
+      };
+
+      offlinePasswords = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to store offline passwords when the server is down.";
+      };
+
+      cacheCredentials = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to cache credentials.";
+      };
+
+      ifpAllowedUids = mkOption {
+        type = types.listOf types.str;
+        default = ["root"];
+        description = lib.mdDoc "A list of users allowed to access the ifp dbus interface.";
+      };
+
+      dyndns = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to enable FreeIPA automatic hostname updates.";
+        };
+
+        interface = mkOption {
+          type = types.str;
+          example = "eth0";
+          default = "*";
+          description = lib.mdDoc "Network interface to perform hostname updates through.";
+        };
+      };
+
+      chromiumSupport = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to whitelist the FreeIPA domain in Chromium.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !config.krb5.enable;
+        message = "krb5 must be disabled through `krb5.enable` for FreeIPA integration to work.";
+      }
+      {
+        assertion = !config.users.ldap.enable;
+        message = "ldap must be disabled through `users.ldap.enable` for FreeIPA integration to work.";
+      }
+    ];
+
+    environment.systemPackages = with pkgs; [krb5Full freeipa];
+
+    environment.etc = {
+      "ipa/default.conf".text = ''
+        [global]
+        basedn = ${cfg.basedn}
+        realm = ${cfg.realm}
+        domain = ${cfg.domain}
+        server = ${cfg.server}
+        host = ${config.networking.hostName}
+        xmlrpc_uri = https://${cfg.server}/ipa/xml
+        enable_ra = True
+      '';
+
+      "ipa/nssdb".source = nssDb;
+
+      "krb5.conf".text = ''
+        [libdefaults]
+         default_realm = ${cfg.realm}
+         dns_lookup_realm = false
+         dns_lookup_kdc = true
+         rdns = false
+         ticket_lifetime = 24h
+         forwardable = true
+         udp_preference_limit = 0
+
+        [realms]
+         ${cfg.realm} = {
+          kdc = ${cfg.server}:88
+          master_kdc = ${cfg.server}:88
+          admin_server = ${cfg.server}:749
+          default_domain = ${cfg.domain}
+          pkinit_anchors = FILE:/etc/ipa/ca.crt
+        }
+
+        [domain_realm]
+         .${cfg.domain} = ${cfg.realm}
+         ${cfg.domain} = ${cfg.realm}
+         ${cfg.server} = ${cfg.realm}
+
+        [dbmodules]
+          ${cfg.realm} = {
+            db_library = ${pkgs.freeipa}/lib/krb5/plugins/kdb/ipadb.so
+          }
+      '';
+
+      "openldap/ldap.conf".source = ldapConf;
+    };
+
+    environment.etc."chromium/policies/managed/freeipa.json" = mkIf cfg.chromiumSupport {
+      text = ''
+        { "AuthServerWhitelist": "*.${cfg.domain}" }
+      '';
+    };
+
+    system.activationScripts.ipa = stringAfter ["etc"] ''
+      # libcurl requires a hard copy of the certificate
+      if ! ${pkgs.diffutils}/bin/diff ${cfg.certificate} /etc/ipa/ca.crt > /dev/null 2>&1; then
+        rm -f /etc/ipa/ca.crt
+        cp ${cfg.certificate} /etc/ipa/ca.crt
+      fi
+
+      if [ ! -f /etc/krb5.keytab ]; then
+        cat <<EOF
+
+          In order to complete FreeIPA integration, please join the domain by completing the following steps:
+          1. Authenticate as an IPA user authorized to join new hosts, e.g. kinit admin@${cfg.realm}
+          2. Join the domain and obtain the keytab file: ipa-join
+          3. Install the keytab file: sudo install -m 600 krb5.keytab /etc/
+          4. Restart sssd systemd service: sudo systemctl restart sssd
+
+      EOF
+      fi
+    '';
+
+    services.sssd.config = ''
+      [domain/${cfg.domain}]
+      id_provider = ipa
+      auth_provider = ipa
+      access_provider = ipa
+      chpass_provider = ipa
+
+      ipa_domain = ${cfg.domain}
+      ipa_server = _srv_, ${cfg.server}
+      ipa_hostname = ${config.networking.hostName}.${cfg.domain}
+
+      cache_credentials = ${pyBool cfg.cacheCredentials}
+      krb5_store_password_if_offline = ${pyBool cfg.offlinePasswords}
+      ${optionalString ((toLower cfg.domain) != (toLower cfg.realm))
+        "krb5_realm = ${cfg.realm}"}
+
+      dyndns_update = ${pyBool cfg.dyndns.enable}
+      dyndns_iface = ${cfg.dyndns.interface}
+
+      ldap_tls_cacert = /etc/ipa/ca.crt
+      ldap_user_extra_attrs = mail:mail, sn:sn, givenname:givenname, telephoneNumber:telephoneNumber, lock:nsaccountlock
+
+      [sssd]
+      debug_level = 65510
+      services = nss, sudo, pam, ssh, ifp
+      domains = ${cfg.domain}
+
+      [nss]
+      homedir_substring = /home
+
+      [pam]
+      pam_pwd_expiration_warning = 3
+      pam_verbosity = 3
+
+      [sudo]
+      debug_level = 65510
+
+      [autofs]
+
+      [ssh]
+
+      [pac]
+
+      [ifp]
+      user_attributes = +mail, +telephoneNumber, +givenname, +sn, +lock
+      allowed_uids = ${concatStringsSep ", " cfg.ifpAllowedUids}
+    '';
+
+    services.ntp.servers = singleton cfg.server;
+    services.sssd.enable = true;
+    services.ntp.enable = true;
+
+    security.pki.certificateFiles = singleton cfg.certificate;
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/lock-kernel-modules.nix b/nixpkgs/nixos/modules/security/lock-kernel-modules.nix
new file mode 100644
index 000000000000..333b64801426
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/lock-kernel-modules.nix
@@ -0,0 +1,57 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = [ maintainers.joachifm ];
+  };
+
+  options = {
+    security.lockKernelModules = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Disable kernel module loading once the system is fully initialised.
+        Module loading is disabled until the next reboot. Problems caused
+        by delayed module loading can be fixed by adding the module(s) in
+        question to {option}`boot.kernelModules`.
+      '';
+    };
+  };
+
+  config = mkIf config.security.lockKernelModules {
+    boot.kernelModules = concatMap (x:
+      optionals (x.device != null) (
+        if x.fsType == "vfat"
+        then [ "vfat" "nls-cp437" "nls-iso8859-1" ]
+        else [ x.fsType ])
+      ) config.system.build.fileSystems;
+
+    systemd.services.disable-kernel-module-loading = {
+      description = "Disable kernel module loading";
+
+      wants = [ "systemd-udevd.service" ];
+      wantedBy = [ config.systemd.defaultUnit ];
+
+      after =
+        [ "firewall.service"
+          "systemd-modules-load.service"
+           config.systemd.defaultUnit
+        ];
+
+      unitConfig.ConditionPathIsReadWrite = "/proc/sys/kernel";
+
+      serviceConfig =
+        { Type = "oneshot";
+          RemainAfterExit = true;
+          TimeoutSec = 180;
+        };
+
+      script = ''
+        ${pkgs.udev}/bin/udevadm settle
+        echo -n 1 >/proc/sys/kernel/modules_disabled
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/misc.nix b/nixpkgs/nixos/modules/security/misc.nix
new file mode 100644
index 000000000000..cd48eade7784
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/misc.nix
@@ -0,0 +1,140 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = [ maintainers.joachifm ];
+  };
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "security" "virtualization" "flushL1DataCache" ] [ "security" "virtualisation" "flushL1DataCache" ])
+  ];
+
+  options = {
+    security.allowUserNamespaces = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to allow creation of user namespaces.
+
+        The motivation for disabling user namespaces is the potential
+        presence of code paths where the kernel's permission checking
+        logic fails to account for namespacing, instead permitting a
+        namespaced process to act outside the namespace with the same
+        privileges as it would have inside it.  This is particularly
+        damaging in the common case of running as root within the namespace.
+
+        When user namespace creation is disallowed, attempting to create a
+        user namespace fails with "no space left on device" (ENOSPC).
+        root may re-enable user namespace creation at runtime.
+      '';
+    };
+
+    security.unprivilegedUsernsClone = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        When disabled, unprivileged users will not be able to create new namespaces.
+        By default unprivileged user namespaces are disabled.
+        This option only works in a hardened profile.
+      '';
+    };
+
+    security.protectKernelImage = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to prevent replacing the running kernel image.
+      '';
+    };
+
+    security.allowSimultaneousMultithreading = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to allow SMT/hyperthreading.  Disabling SMT means that only
+        physical CPU cores will be usable at runtime, potentially at
+        significant performance cost.
+
+        The primary motivation for disabling SMT is to mitigate the risk of
+        leaking data between threads running on the same CPU core (due to
+        e.g., shared caches).  This attack vector is unproven.
+
+        Disabling SMT is a supplement to the L1 data cache flushing mitigation
+        (see [](#opt-security.virtualisation.flushL1DataCache))
+        versus malicious VM guests (SMT could "bring back" previously flushed
+        data).
+      '';
+    };
+
+    security.forcePageTableIsolation = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to force-enable the Page Table Isolation (PTI) Linux kernel
+        feature even on CPU models that claim to be safe from Meltdown.
+
+        This hardening feature is most beneficial to systems that run untrusted
+        workloads that rely on address space isolation for security.
+      '';
+    };
+
+    security.virtualisation.flushL1DataCache = mkOption {
+      type = types.nullOr (types.enum [ "never" "cond" "always" ]);
+      default = null;
+      description = lib.mdDoc ''
+        Whether the hypervisor should flush the L1 data cache before
+        entering guests.
+        See also [](#opt-security.allowSimultaneousMultithreading).
+
+        - `null`: uses the kernel default
+        - `"never"`: disables L1 data cache flushing entirely.
+          May be appropriate if all guests are trusted.
+        - `"cond"`: flushes L1 data cache only for pre-determined
+          code paths.  May leak information about the host address space
+          layout.
+        - `"always"`: flushes L1 data cache every time the hypervisor
+          enters the guest.  May incur significant performance cost.
+      '';
+    };
+  };
+
+  config = mkMerge [
+    (mkIf (!config.security.allowUserNamespaces) {
+      # Setting the number of allowed user namespaces to 0 effectively disables
+      # the feature at runtime.  Note that root may raise the limit again
+      # at any time.
+      boot.kernel.sysctl."user.max_user_namespaces" = 0;
+
+      assertions = [
+        { assertion = config.nix.settings.sandbox -> config.security.allowUserNamespaces;
+          message = "`nix.settings.sandbox = true` conflicts with `!security.allowUserNamespaces`.";
+        }
+      ];
+    })
+
+    (mkIf config.security.unprivilegedUsernsClone {
+      boot.kernel.sysctl."kernel.unprivileged_userns_clone" = mkDefault true;
+    })
+
+    (mkIf config.security.protectKernelImage {
+      # Disable hibernation (allows replacing the running kernel)
+      boot.kernelParams = [ "nohibernate" ];
+      # Prevent replacing the running kernel image w/o reboot
+      boot.kernel.sysctl."kernel.kexec_load_disabled" = mkDefault true;
+    })
+
+    (mkIf (!config.security.allowSimultaneousMultithreading) {
+      boot.kernelParams = [ "nosmt" ];
+    })
+
+    (mkIf config.security.forcePageTableIsolation {
+      boot.kernelParams = [ "pti=on" ];
+    })
+
+    (mkIf (config.security.virtualisation.flushL1DataCache != null) {
+      boot.kernelParams = [ "kvm-intel.vmentry_l1d_flush=${config.security.virtualisation.flushL1DataCache}" ];
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/security/oath.nix b/nixpkgs/nixos/modules/security/oath.nix
new file mode 100644
index 000000000000..334286653846
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/oath.nix
@@ -0,0 +1,50 @@
+# This module provides configuration for the OATH PAM modules.
+
+{ lib, ... }:
+
+with lib;
+
+{
+  options = {
+
+    security.pam.oath = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the OATH (one-time password) PAM module.
+        '';
+      };
+
+      digits = mkOption {
+        type = types.enum [ 6 7 8 ];
+        default = 6;
+        description = lib.mdDoc ''
+          Specify the length of the one-time password in number of
+          digits.
+        '';
+      };
+
+      window = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc ''
+          Specify the number of one-time passwords to check in order
+          to accommodate for situations where the system and the
+          client are slightly out of sync (iteration for HOTP or time
+          steps for TOTP).
+        '';
+      };
+
+      usersFile = mkOption {
+        type = types.path;
+        default = "/etc/users.oath";
+        description = lib.mdDoc ''
+          Set the path to file where the user's credentials are
+          stored. This file must not be world readable!
+        '';
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/pam.nix b/nixpkgs/nixos/modules/security/pam.nix
new file mode 100644
index 000000000000..b7e1ea526535
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/pam.nix
@@ -0,0 +1,1540 @@
+# This module provides configuration for the PAM (Pluggable
+# Authentication Modules) system.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  mkRulesTypeOption = type: mkOption {
+    # These options are experimental and subject to breaking changes without notice.
+    description = lib.mdDoc ''
+      PAM `${type}` rules for this service.
+
+      Attribute keys are the name of each rule.
+    '';
+    type = types.attrsOf (types.submodule ({ name, config, ... }: {
+      options = {
+        name = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            Name of this rule.
+          '';
+          internal = true;
+          readOnly = true;
+        };
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether this rule is added to the PAM service config file.
+          '';
+        };
+        order = mkOption {
+          type = types.int;
+          description = lib.mdDoc ''
+            Order of this rule in the service file. Rules are arranged in ascending order of this value.
+
+            ::: {.warning}
+            The `order` values for the built-in rules are subject to change. If you assign a constant value to this option, a system update could silently reorder your rule. You could be locked out of your system, or your system could be left wide open. When using this option, set it to a relative offset from another rule's `order` value:
+
+            ```nix
+            {
+              security.pam.services.login.rules.auth.foo.order =
+                config.security.pam.services.login.rules.auth.unix.order + 10;
+            }
+            ```
+            :::
+          '';
+        };
+        control = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            Indicates the behavior of the PAM-API should the module fail to succeed in its authentication task. See `control` in {manpage}`pam.conf(5)` for details.
+          '';
+        };
+        modulePath = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            Either the full filename of the PAM to be used by the application (it begins with a '/'), or a relative pathname from the default module location. See `module-path` in {manpage}`pam.conf(5)` for details.
+          '';
+        };
+        args = mkOption {
+          type = types.listOf types.str;
+          description = lib.mdDoc ''
+            Tokens that can be used to modify the specific behavior of the given PAM. Such arguments will be documented for each individual module. See `module-arguments` in {manpage}`pam.conf(5)` for details.
+
+            Escaping rules for spaces and square brackets are automatically applied.
+
+            {option}`settings` are automatically added as {option}`args`. It's recommended to use the {option}`settings` option whenever possible so that arguments can be overridden.
+          '';
+        };
+        settings = mkOption {
+          type = with types; attrsOf (nullOr (oneOf [ bool str int pathInStore ]));
+          default = {};
+          description = lib.mdDoc ''
+            Settings to add as `module-arguments`.
+
+            Boolean values render just the key if true, and nothing if false. Null values are ignored. All other values are rendered as key-value pairs.
+          '';
+        };
+      };
+      config = {
+        inherit name;
+        # Formats an attrset of settings as args for use as `module-arguments`.
+        args = concatLists (flip mapAttrsToList config.settings (name: value:
+          if isBool value
+          then optional value name
+          else optional (value != null) "${name}=${toString value}"
+        ));
+      };
+    }));
+  };
+
+  parentConfig = config;
+
+  pamOpts = { config, name, ... }: let cfg = config; in let config = parentConfig; in {
+
+    options = {
+
+      name = mkOption {
+        example = "sshd";
+        type = types.str;
+        description = lib.mdDoc "Name of the PAM service.";
+      };
+
+      rules = mkOption {
+        # This option is experimental and subject to breaking changes without notice.
+        visible = false;
+
+        description = lib.mdDoc ''
+          PAM rules for this service.
+
+          ::: {.warning}
+          This option and its suboptions are experimental and subject to breaking changes without notice.
+
+          If you use this option in your system configuration, you will need to manually monitor this module for any changes. Otherwise, failure to adjust your configuration properly could lead to you being locked out of your system, or worse, your system could be left wide open to attackers.
+
+          If you share configuration examples that use this option, you MUST include this warning so that users are informed.
+
+          You may freely use this option within `nixpkgs`, and future changes will account for those use sites.
+          :::
+        '';
+        type = types.submodule {
+          options = genAttrs [ "account" "auth" "password" "session" ] mkRulesTypeOption;
+        };
+      };
+
+      unixAuth = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether users can log in with passwords defined in
+          {file}`/etc/shadow`.
+        '';
+      };
+
+      rootOK = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, root doesn't need to authenticate (e.g. for the
+          {command}`useradd` service).
+        '';
+      };
+
+      p11Auth = mkOption {
+        default = config.security.pam.p11.enable;
+        defaultText = literalExpression "config.security.pam.p11.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, keys listed in
+          {file}`~/.ssh/authorized_keys` and
+          {file}`~/.eid/authorized_certificates`
+          can be used to log in with the associated PKCS#11 tokens.
+        '';
+      };
+
+      u2fAuth = mkOption {
+        default = config.security.pam.u2f.enable;
+        defaultText = literalExpression "config.security.pam.u2f.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, users listed in
+          {file}`$XDG_CONFIG_HOME/Yubico/u2f_keys` (or
+          {file}`$HOME/.config/Yubico/u2f_keys` if XDG variable is
+          not set) are able to log in with the associated U2F key. Path can be
+          changed using {option}`security.pam.u2f.authFile` option.
+        '';
+      };
+
+      usshAuth = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, users with an SSH certificate containing an authorized principal
+          in their SSH agent are able to log in. Specific options are controlled
+          using the {option}`security.pam.ussh` options.
+
+          Note that the  {option}`security.pam.ussh.enable` must also be
+          set for this option to take effect.
+        '';
+      };
+
+      yubicoAuth = mkOption {
+        default = config.security.pam.yubico.enable;
+        defaultText = literalExpression "config.security.pam.yubico.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, users listed in
+          {file}`~/.yubico/authorized_yubikeys`
+          are able to log in with the associated Yubikey tokens.
+        '';
+      };
+
+      googleAuthenticator = {
+        enable = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            If set, users with enabled Google Authenticator (created
+            {file}`~/.google_authenticator`) will be required
+            to provide Google Authenticator token to log in.
+          '';
+        };
+      };
+
+      usbAuth = mkOption {
+        default = config.security.pam.usb.enable;
+        defaultText = literalExpression "config.security.pam.usb.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, users listed in
+          {file}`/etc/pamusb.conf` are able to log in
+          with the associated USB key.
+        '';
+      };
+
+      otpwAuth = mkOption {
+        default = config.security.pam.enableOTPW;
+        defaultText = literalExpression "config.security.pam.enableOTPW";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the OTPW system will be used (if
+          {file}`~/.otpw` exists).
+        '';
+      };
+
+      googleOsLoginAccountVerification = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, will use the Google OS Login PAM modules
+          (`pam_oslogin_login`,
+          `pam_oslogin_admin`) to verify possible OS Login
+          users and set sudoers configuration accordingly.
+          This only makes sense to enable for the `sshd` PAM
+          service.
+        '';
+      };
+
+      googleOsLoginAuthentication = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, will use the `pam_oslogin_login`'s user
+          authentication methods to authenticate users using 2FA.
+          This only makes sense to enable for the `sshd` PAM
+          service.
+        '';
+      };
+
+      mysqlAuth = mkOption {
+        default = config.users.mysql.enable;
+        defaultText = literalExpression "config.users.mysql.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the `pam_mysql` module will be used to
+          authenticate users against a MySQL/MariaDB database.
+        '';
+      };
+
+      fprintAuth = mkOption {
+        default = config.services.fprintd.enable;
+        defaultText = literalExpression "config.services.fprintd.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, fingerprint reader will be used (if exists and
+          your fingerprints are enrolled).
+        '';
+      };
+
+      oathAuth = mkOption {
+        default = config.security.pam.oath.enable;
+        defaultText = literalExpression "config.security.pam.oath.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the OATH Toolkit will be used.
+        '';
+      };
+
+      sshAgentAuth = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the calling user's SSH agent is used to authenticate
+          against the keys in the calling user's
+          {file}`~/.ssh/authorized_keys`.  This is useful
+          for {command}`sudo` on password-less remote systems.
+        '';
+      };
+
+      duoSecurity = {
+        enable = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            If set, use the Duo Security pam module
+            `pam_duo` for authentication.  Requires
+            configuration of {option}`security.duosec` options.
+          '';
+        };
+      };
+
+      startSession = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the service will register a new session with
+          systemd's login manager.  For local sessions, this will give
+          the user access to audio devices, CD-ROM drives.  In the
+          default PolicyKit configuration, it also allows the user to
+          reboot the system.
+        '';
+      };
+
+      setEnvironment = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the service should set the environment variables
+          listed in {option}`environment.sessionVariables`
+          using `pam_env.so`.
+        '';
+      };
+
+      setLoginUid = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Set the login uid of the process
+          ({file}`/proc/self/loginuid`) for auditing
+          purposes.  The login uid is only set by ‘entry points’ like
+          {command}`login` and {command}`sshd`, not by
+          commands like {command}`sudo`.
+        '';
+      };
+
+      ttyAudit = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable or disable TTY auditing for specified users
+          '';
+        };
+
+        enablePattern = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            For each user matching one of comma-separated
+            glob patterns, enable TTY auditing
+          '';
+        };
+
+        disablePattern = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            For each user matching one of comma-separated
+            glob patterns, disable TTY auditing
+          '';
+        };
+
+        openOnly = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Set the TTY audit flag when opening the session,
+            but do not restore it when closing the session.
+            Using this option is necessary for some services
+            that don't fork() to run the authenticated session,
+            such as sudo.
+          '';
+        };
+      };
+
+      forwardXAuth = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether X authentication keys should be passed from the
+          calling user to the target user (e.g. for
+          {command}`su`)
+        '';
+      };
+
+      pamMount = mkOption {
+        default = config.security.pam.mount.enable;
+        defaultText = literalExpression "config.security.pam.mount.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable PAM mount (pam_mount) system to mount filesystems on user login.
+        '';
+      };
+
+      allowNullPassword = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to allow logging into accounts that have no password
+          set (i.e., have an empty password field in
+          {file}`/etc/passwd` or
+          {file}`/etc/group`).  This does not enable
+          logging into disabled accounts (i.e., that have the password
+          field set to `!`).  Note that regardless of
+          what the pam_unix documentation says, accounts with hashed
+          empty passwords are always allowed to log in.
+        '';
+      };
+
+      nodelay = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether the delay after typing a wrong password should be disabled.
+        '';
+      };
+
+      requireWheel = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to permit root access only to members of group wheel.
+        '';
+      };
+
+      limits = mkOption {
+        default = [];
+        type = limitsType;
+        description = lib.mdDoc ''
+          Attribute set describing resource limits.  Defaults to the
+          value of {option}`security.pam.loginLimits`.
+          The meaning of the values is explained in {manpage}`limits.conf(5)`.
+        '';
+      };
+
+      showMotd = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to show the message of the day.";
+      };
+
+      makeHomeDir = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to try to create home directories for users
+          with `$HOME`s pointing to nonexistent
+          locations on session login.
+        '';
+      };
+
+      updateWtmp = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to update {file}`/var/log/wtmp`.";
+      };
+
+      logFailures = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to log authentication failures in {file}`/var/log/faillog`.";
+      };
+
+      enableAppArmor = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable support for attaching AppArmor profiles at the
+          user/group level, e.g., as part of a role based access
+          control scheme.
+        '';
+      };
+
+      enableKwallet = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If enabled, pam_wallet will attempt to automatically unlock the
+          user's default KDE wallet upon login. If the user has no wallet named
+          "kdewallet", or the login password does not match their wallet
+          password, KDE will prompt separately after login.
+        '';
+      };
+      sssdStrictAccess = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "enforce sssd access control";
+      };
+
+      enableGnomeKeyring = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If enabled, pam_gnome_keyring will attempt to automatically unlock the
+          user's default Gnome keyring upon login. If the user login password does
+          not match their keyring password, Gnome Keyring will prompt separately
+          after login.
+        '';
+      };
+
+      failDelay = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            If enabled, this will replace the `FAIL_DELAY` setting from `login.defs`.
+            Change the delay on failure per-application.
+            '';
+        };
+
+        delay = mkOption {
+          default = 3000000;
+          type = types.int;
+          example = 1000000;
+          description = lib.mdDoc "The delay time (in microseconds) on failure.";
+        };
+      };
+
+      gnupg = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            If enabled, pam_gnupg will attempt to automatically unlock the
+            user's GPG keys with the login password via
+            {command}`gpg-agent`. The keygrips of all keys to be
+            unlocked should be written to {file}`~/.pam-gnupg`,
+            and can be queried with {command}`gpg -K --with-keygrip`.
+            Presetting passphrases must be enabled by adding
+            `allow-preset-passphrase` in
+            {file}`~/.gnupg/gpg-agent.conf`.
+          '';
+        };
+
+        noAutostart = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Don't start {command}`gpg-agent` if it is not running.
+            Useful in conjunction with starting {command}`gpg-agent` as
+            a systemd user service.
+          '';
+        };
+
+        storeOnly = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Don't send the password immediately after login, but store for PAM
+            `session`.
+          '';
+        };
+      };
+
+      zfs = mkOption {
+        default = config.security.pam.zfs.enable;
+        defaultText = literalExpression "config.security.pam.zfs.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable unlocking and mounting of encrypted ZFS home dataset at login.
+        '';
+      };
+
+      text = mkOption {
+        type = types.nullOr types.lines;
+        description = lib.mdDoc "Contents of the PAM service file.";
+      };
+
+    };
+
+    # The resulting /etc/pam.d/* file contents are verified in
+    # nixos/tests/pam/pam-file-contents.nix. Please update tests there when
+    # changing the derivation.
+    config = {
+      name = mkDefault name;
+      setLoginUid = mkDefault cfg.startSession;
+      limits = mkDefault config.security.pam.loginLimits;
+
+      text = let
+        ensureUniqueOrder = type: rules:
+          let
+            checkPair = a: b: assert assertMsg (a.order != b.order) "security.pam.services.${name}.rules.${type}: rules '${a.name}' and '${b.name}' cannot have the same order value (${toString a.order})"; b;
+            checked = zipListsWith checkPair rules (drop 1 rules);
+          in take 1 rules ++ checked;
+        # Formats a string for use in `module-arguments`. See `man pam.conf`.
+        formatModuleArgument = token:
+          if hasInfix " " token
+          then "[${replaceStrings ["]"] ["\\]"] token}]"
+          else token;
+        formatRules = type: pipe cfg.rules.${type} [
+          attrValues
+          (filter (rule: rule.enable))
+          (sort (a: b: a.order < b.order))
+          (ensureUniqueOrder type)
+          (map (rule: concatStringsSep " " (
+            [ type rule.control rule.modulePath ]
+            ++ map formatModuleArgument rule.args
+            ++ [ "# ${rule.name} (order ${toString rule.order})" ]
+          )))
+          (concatStringsSep "\n")
+        ];
+      in mkDefault ''
+        # Account management.
+        ${formatRules "account"}
+
+        # Authentication management.
+        ${formatRules "auth"}
+
+        # Password management.
+        ${formatRules "password"}
+
+        # Session management.
+        ${formatRules "session"}
+      '';
+
+      # !!! TODO: move the LDAP stuff to the LDAP module, and the
+      # Samba stuff to the Samba module.  This requires that the PAM
+      # module provides the right hooks.
+      rules = let
+        autoOrderRules = flip pipe [
+          (imap1 (index: rule: rule // { order = mkDefault (10000 + index * 100); } ))
+          (map (rule: nameValuePair rule.name (removeAttrs rule [ "name" ])))
+          listToAttrs
+        ];
+      in {
+        account = autoOrderRules [
+          { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
+          { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
+            config_file = "/etc/security/pam_mysql.conf";
+          }; }
+          { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; settings = {
+            ignore_unknown_user = true;
+          }; }
+          { name = "sss"; enable = config.services.sssd.enable; control = if cfg.sssdStrictAccess then "[default=bad success=ok user_unknown=ignore]" else "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
+          { name = "krb5"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; }
+          { name = "oslogin_login"; enable = cfg.googleOsLoginAccountVerification; control = "[success=ok ignore=ignore default=die]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so"; }
+          { name = "oslogin_admin"; enable = cfg.googleOsLoginAccountVerification; control = "[success=ok default=ignore]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_admin.so"; }
+          { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
+          # The required pam_unix.so module has to come after all the sufficient modules
+          # because otherwise, the account lookup will fail if the user does not exist
+          # locally, for example with MySQL- or LDAP-auth.
+          { name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
+        ];
+
+        auth = autoOrderRules ([
+          { name = "oslogin_login"; enable = cfg.googleOsLoginAuthentication; control = "[success=done perm_denied=die default=ignore]"; modulePath = "${pkgs.google-guest-oslogin}/lib/security/pam_oslogin_login.so"; }
+          { name = "rootok"; enable = cfg.rootOK; control = "sufficient"; modulePath = "pam_rootok.so"; }
+          { name = "wheel"; enable = cfg.requireWheel; control = "required"; modulePath = "pam_wheel.so"; settings = {
+            use_uid = true;
+          }; }
+          { name = "faillock"; enable = cfg.logFailures; control = "required"; modulePath = "pam_faillock.so"; }
+          { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
+            config_file = "/etc/security/pam_mysql.conf";
+          }; }
+          { name = "ssh_agent_auth"; enable = config.security.pam.enableSSHAgentAuth && cfg.sshAgentAuth; control = "sufficient"; modulePath = "${pkgs.pam_ssh_agent_auth}/libexec/pam_ssh_agent_auth.so"; settings = {
+            file = lib.concatStringsSep ":" config.services.openssh.authorizedKeysFiles;
+          }; }
+          (let p11 = config.security.pam.p11; in { name = "p11"; enable = cfg.p11Auth; control = p11.control; modulePath = "${pkgs.pam_p11}/lib/security/pam_p11.so"; args = [
+            "${pkgs.opensc}/lib/opensc-pkcs11.so"
+          ]; })
+          (let u2f = config.security.pam.u2f; in { name = "u2f"; enable = cfg.u2fAuth; control = u2f.control; modulePath = "${pkgs.pam_u2f}/lib/security/pam_u2f.so"; settings = {
+            inherit (u2f) debug interactive cue origin;
+            authfile = u2f.authFile;
+            appid = u2f.appId;
+          }; })
+          { name = "usb"; enable = cfg.usbAuth; control = "sufficient"; modulePath = "${pkgs.pam_usb}/lib/security/pam_usb.so"; }
+          (let ussh = config.security.pam.ussh; in { name = "ussh"; enable = config.security.pam.ussh.enable && cfg.usshAuth; control = ussh.control; modulePath = "${pkgs.pam_ussh}/lib/security/pam_ussh.so"; settings = {
+            ca_file = ussh.caFile;
+            authorized_principals = ussh.authorizedPrincipals;
+            authorized_principals_file = ussh.authorizedPrincipalsFile;
+            inherit (ussh) group;
+          }; })
+          (let oath = config.security.pam.oath; in { name = "oath"; enable = cfg.oathAuth; control = "requisite"; modulePath = "${pkgs.oath-toolkit}/lib/security/pam_oath.so"; settings = {
+            inherit (oath) window digits;
+            usersfile = oath.usersFile;
+          }; })
+          (let yubi = config.security.pam.yubico; in { name = "yubico"; enable = cfg.yubicoAuth; control = yubi.control; modulePath = "${pkgs.yubico-pam}/lib/security/pam_yubico.so"; settings = {
+            inherit (yubi) mode debug;
+            chalresp_path = yubi.challengeResponsePath;
+            id = mkIf (yubi.mode == "client") yubi.id;
+          }; })
+          (let dp9ik = config.security.pam.dp9ik; in { name = "p9"; enable = dp9ik.enable; control = dp9ik.control; modulePath = "${pkgs.pam_dp9ik}/lib/security/pam_p9.so"; args = [
+            dp9ik.authserver
+          ]; })
+          { name = "fprintd"; enable = cfg.fprintAuth; control = "sufficient"; modulePath = "${pkgs.fprintd}/lib/security/pam_fprintd.so"; }
+        ] ++
+          # Modules in this block require having the password set in PAM_AUTHTOK.
+          # pam_unix is marked as 'sufficient' on NixOS which means nothing will run
+          # after it succeeds. Certain modules need to run after pam_unix
+          # prompts the user for password so we run it once with 'optional' at an
+          # earlier point and it will run again with 'sufficient' further down.
+          # We use try_first_pass the second time to avoid prompting password twice.
+          #
+          # The same principle applies to systemd-homed
+          (optionals ((cfg.unixAuth || config.services.homed.enable) &&
+            (config.security.pam.enableEcryptfs
+              || config.security.pam.enableFscrypt
+              || cfg.pamMount
+              || cfg.enableKwallet
+              || cfg.enableGnomeKeyring
+              || cfg.googleAuthenticator.enable
+              || cfg.gnupg.enable
+              || cfg.failDelay.enable
+              || cfg.duoSecurity.enable
+              || cfg.zfs))
+            [
+              { name = "systemd_home-early"; enable = config.services.homed.enable; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
+              { name = "unix-early"; enable = cfg.unixAuth; control = "optional"; modulePath = "pam_unix.so"; settings = {
+                nullok = cfg.allowNullPassword;
+                inherit (cfg) nodelay;
+                likeauth = true;
+              }; }
+              { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; settings = {
+                unwrap = true;
+              }; }
+              { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
+              { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
+                inherit (config.security.pam.zfs) homes;
+              }; }
+              { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; settings = {
+                disable_interactive = true;
+              }; }
+              { name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
+                kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
+              }; }
+              { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; }
+              { name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
+                store-only = cfg.gnupg.storeOnly;
+              }; }
+              { name = "faildelay"; enable = cfg.failDelay.enable; control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_faildelay.so"; settings = {
+                inherit (cfg.failDelay) delay;
+              }; }
+              { name = "google_authenticator"; enable = cfg.googleAuthenticator.enable; control = "required"; modulePath = "${pkgs.google-authenticator}/lib/security/pam_google_authenticator.so"; settings = {
+                no_increment_hotp = true;
+              }; }
+              { name = "duo"; enable = cfg.duoSecurity.enable; control = "required"; modulePath = "${pkgs.duo-unix}/lib/security/pam_duo.so"; }
+            ]) ++ [
+          { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
+          { name = "unix"; enable = cfg.unixAuth; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
+            nullok = cfg.allowNullPassword;
+            inherit (cfg) nodelay;
+            likeauth = true;
+            try_first_pass = true;
+          }; }
+          { name = "otpw"; enable = cfg.otpwAuth; control = "sufficient"; modulePath = "${pkgs.otpw}/lib/security/pam_otpw.so"; }
+          { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; settings = {
+            use_first_pass = true;
+          }; }
+          { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; settings = {
+            ignore_unknown_user = true;
+            use_first_pass = true;
+          }; }
+          { name = "sss"; enable = config.services.sssd.enable; control = "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; settings = {
+            use_first_pass = true;
+          }; }
+          { name = "krb5"; enable = config.security.pam.krb5.enable; control = "[default=ignore success=1 service_err=reset]"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; settings = {
+            use_first_pass = true;
+          }; }
+          { name = "ccreds-validate"; enable = config.security.pam.krb5.enable; control = "[default=die success=done]"; modulePath = "${pam_ccreds}/lib/security/pam_ccreds.so"; settings = {
+            action = "validate";
+            use_first_pass = true;
+          }; }
+          { name = "ccreds-store"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_ccreds}/lib/security/pam_ccreds.so"; settings = {
+            action = "store";
+            use_first_pass = true;
+          }; }
+          { name = "deny"; control = "required"; modulePath = "pam_deny.so"; }
+        ]);
+
+        password = autoOrderRules [
+          { name = "systemd_home"; enable = config.services.homed.enable; control = "sufficient"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
+          { name = "unix"; control = "sufficient"; modulePath = "pam_unix.so"; settings = {
+            nullok = true;
+            yescrypt = true;
+          }; }
+          { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; }
+          { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
+          { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
+            inherit (config.security.pam.zfs) homes;
+          }; }
+          { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; }
+          { name = "ldap"; enable = use_ldap; control = "sufficient"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
+          { name = "mysql"; enable = cfg.mysqlAuth; control = "sufficient"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
+            config_file = "/etc/security/pam_mysql.conf";
+          }; }
+          { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "sufficient"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; }
+          { name = "sss"; enable = config.services.sssd.enable; control = "sufficient"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
+          { name = "krb5"; enable = config.security.pam.krb5.enable; control = "sufficient"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; settings = {
+            use_first_pass = true;
+          }; }
+          { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; settings = {
+            use_authtok = true;
+          }; }
+        ];
+
+        session = autoOrderRules [
+          { name = "env"; enable = cfg.setEnvironment; control = "required"; modulePath = "pam_env.so"; settings = {
+            conffile = "/etc/pam/environment";
+            readenv = 0;
+          }; }
+          { name = "unix"; control = "required"; modulePath = "pam_unix.so"; }
+          { name = "loginuid"; enable = cfg.setLoginUid; control = if config.boot.isContainer then "optional" else "required"; modulePath = "pam_loginuid.so"; }
+          { name = "tty_audit"; enable = cfg.ttyAudit.enable; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_tty_audit.so"; settings = {
+            open_only = cfg.ttyAudit.openOnly;
+            enable = cfg.ttyAudit.enablePattern;
+            disable = cfg.ttyAudit.disablePattern;
+          }; }
+          { name = "systemd_home"; enable = config.services.homed.enable; control = "required"; modulePath = "${config.systemd.package}/lib/security/pam_systemd_home.so"; }
+          { name = "mkhomedir"; enable = cfg.makeHomeDir; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_mkhomedir.so"; settings = {
+            silent = true;
+            skel = config.security.pam.makeHomeDir.skelDirectory;
+            inherit (config.security.pam.makeHomeDir) umask;
+          }; }
+          { name = "lastlog"; enable = cfg.updateWtmp; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_lastlog.so"; settings = {
+            silent = true;
+          }; }
+          { name = "ecryptfs"; enable = config.security.pam.enableEcryptfs; control = "optional"; modulePath = "${pkgs.ecryptfs}/lib/security/pam_ecryptfs.so"; }
+          # Work around https://github.com/systemd/systemd/issues/8598
+          # Skips the pam_fscrypt module for systemd-user sessions which do not have a password
+          # anyways.
+          # See also https://github.com/google/fscrypt/issues/95
+          { name = "fscrypt-skip-systemd"; enable = config.security.pam.enableFscrypt; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
+            "service" "=" "systemd-user"
+          ]; }
+          { name = "fscrypt"; enable = config.security.pam.enableFscrypt; control = "optional"; modulePath = "${pkgs.fscrypt-experimental}/lib/security/pam_fscrypt.so"; }
+          { name = "zfs_key-skip-systemd"; enable = cfg.zfs; control = "[success=1 default=ignore]"; modulePath = "pam_succeed_if.so"; args = [
+            "service" "=" "systemd-user"
+          ]; }
+          { name = "zfs_key"; enable = cfg.zfs; control = "optional"; modulePath = "${config.boot.zfs.package}/lib/security/pam_zfs_key.so"; settings = {
+            inherit (config.security.pam.zfs) homes;
+            nounmount = config.security.pam.zfs.noUnmount;
+          }; }
+          { name = "mount"; enable = cfg.pamMount; control = "optional"; modulePath = "${pkgs.pam_mount}/lib/security/pam_mount.so"; settings = {
+            disable_interactive = true;
+          }; }
+          { name = "ldap"; enable = use_ldap; control = "optional"; modulePath = "${pam_ldap}/lib/security/pam_ldap.so"; }
+          { name = "mysql"; enable = cfg.mysqlAuth; control = "optional"; modulePath = "${pkgs.pam_mysql}/lib/security/pam_mysql.so"; settings = {
+            config_file = "/etc/security/pam_mysql.conf";
+          }; }
+          { name = "kanidm"; enable = config.services.kanidm.enablePam; control = "optional"; modulePath = "${pkgs.kanidm}/lib/pam_kanidm.so"; }
+          { name = "sss"; enable = config.services.sssd.enable; control = "optional"; modulePath = "${pkgs.sssd}/lib/security/pam_sss.so"; }
+          { name = "krb5"; enable = config.security.pam.krb5.enable; control = "optional"; modulePath = "${pam_krb5}/lib/security/pam_krb5.so"; }
+          { name = "otpw"; enable = cfg.otpwAuth; control = "optional"; modulePath = "${pkgs.otpw}/lib/security/pam_otpw.so"; }
+          { name = "systemd"; enable = cfg.startSession; control = "optional"; modulePath = "${config.systemd.package}/lib/security/pam_systemd.so"; }
+          { name = "xauth"; enable = cfg.forwardXAuth; control = "optional"; modulePath = "pam_xauth.so"; settings = {
+            xauthpath = "${pkgs.xorg.xauth}/bin/xauth";
+            systemuser = 99;
+          }; }
+          { name = "limits"; enable = cfg.limits != []; control = "required"; modulePath = "${pkgs.pam}/lib/security/pam_limits.so"; settings = {
+            conf = "${makeLimitsConf cfg.limits}";
+          }; }
+          { name = "motd"; enable = cfg.showMotd && (config.users.motd != null || config.users.motdFile != null); control = "optional"; modulePath = "${pkgs.pam}/lib/security/pam_motd.so"; settings = {
+            inherit motd;
+          }; }
+          { name = "apparmor"; enable = cfg.enableAppArmor && config.security.apparmor.enable; control = "optional"; modulePath = "${pkgs.apparmor-pam}/lib/security/pam_apparmor.so"; settings = {
+            order = "user,group,default";
+            debug = true;
+          }; }
+          { name = "kwallet5"; enable = cfg.enableKwallet; control = "optional"; modulePath = "${pkgs.plasma5Packages.kwallet-pam}/lib/security/pam_kwallet5.so"; settings = {
+            kwalletd = "${pkgs.plasma5Packages.kwallet.bin}/bin/kwalletd5";
+          }; }
+          { name = "gnome_keyring"; enable = cfg.enableGnomeKeyring; control = "optional"; modulePath = "${pkgs.gnome.gnome-keyring}/lib/security/pam_gnome_keyring.so"; settings = {
+            auto_start = true;
+          }; }
+          { name = "gnupg"; enable = cfg.gnupg.enable; control = "optional"; modulePath = "${pkgs.pam_gnupg}/lib/security/pam_gnupg.so"; settings = {
+            no-autostart = cfg.gnupg.noAutostart;
+          }; }
+          { name = "cgfs"; enable = config.virtualisation.lxc.lxcfs.enable; control = "optional"; modulePath = "${pkgs.lxc}/lib/security/pam_cgfs.so"; args = [
+            "-c" "all"
+          ]; }
+        ];
+      };
+    };
+
+  };
+
+
+  inherit (pkgs) pam_krb5 pam_ccreds;
+
+  use_ldap = (config.users.ldap.enable && config.users.ldap.loginPam);
+  pam_ldap = if config.users.ldap.daemon.enable then pkgs.nss_pam_ldapd else pkgs.pam_ldap;
+
+  # Create a limits.conf(5) file.
+  makeLimitsConf = limits:
+    pkgs.writeText "limits.conf"
+       (concatMapStrings ({ domain, type, item, value }:
+         "${domain} ${type} ${item} ${toString value}\n")
+         limits);
+
+  limitsType = with lib.types; listOf (submodule ({ ... }: {
+    options = {
+      domain = mkOption {
+        description = lib.mdDoc "Username, groupname, or wildcard this limit applies to";
+        example = "@wheel";
+        type = str;
+      };
+
+      type = mkOption {
+        description = lib.mdDoc "Type of this limit";
+        type = enum [ "-" "hard" "soft" ];
+        default = "-";
+      };
+
+      item = mkOption {
+        description = lib.mdDoc "Item this limit applies to";
+        type = enum [
+          "core"
+          "data"
+          "fsize"
+          "memlock"
+          "nofile"
+          "rss"
+          "stack"
+          "cpu"
+          "nproc"
+          "as"
+          "maxlogins"
+          "maxsyslogins"
+          "priority"
+          "locks"
+          "sigpending"
+          "msgqueue"
+          "nice"
+          "rtprio"
+        ];
+      };
+
+      value = mkOption {
+        description = lib.mdDoc "Value of this limit";
+        type = oneOf [ str int ];
+      };
+    };
+  }));
+
+  motd = if config.users.motdFile == null
+         then pkgs.writeText "motd" config.users.motd
+         else config.users.motdFile;
+
+  makePAMService = name: service:
+    { name = "pam.d/${name}";
+      value.source = pkgs.writeText "${name}.pam" service.text;
+    };
+
+in
+
+{
+
+  meta.maintainers = [ maintainers.majiir ];
+
+  imports = [
+    (mkRenamedOptionModule [ "security" "pam" "enableU2F" ] [ "security" "pam" "u2f" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    security.pam.loginLimits = mkOption {
+      default = [];
+      type = limitsType;
+      example =
+        [ { domain = "ftp";
+            type   = "hard";
+            item   = "nproc";
+            value  = "0";
+          }
+          { domain = "@student";
+            type   = "-";
+            item   = "maxlogins";
+            value  = "4";
+          }
+       ];
+
+     description = lib.mdDoc ''
+       Define resource limits that should apply to users or groups.
+       Each item in the list should be an attribute set with a
+       {var}`domain`, {var}`type`,
+       {var}`item`, and {var}`value`
+       attribute.  The syntax and semantics of these attributes
+       must be that described in {manpage}`limits.conf(5)`.
+
+       Note that these limits do not apply to systemd services,
+       whose limits can be changed via {option}`systemd.extraConfig`
+       instead.
+     '';
+    };
+
+    security.pam.services = mkOption {
+      default = {};
+      type = with types; attrsOf (submodule pamOpts);
+      description =
+        lib.mdDoc ''
+          This option defines the PAM services.  A service typically
+          corresponds to a program that uses PAM,
+          e.g. {command}`login` or {command}`passwd`.
+          Each attribute of this set defines a PAM service, with the attribute name
+          defining the name of the service.
+        '';
+    };
+
+    security.pam.makeHomeDir.skelDirectory = mkOption {
+      type = types.str;
+      default = "/var/empty";
+      example =  "/etc/skel";
+      description = lib.mdDoc ''
+        Path to skeleton directory whose contents are copied to home
+        directories newly created by `pam_mkhomedir`.
+      '';
+    };
+
+    security.pam.makeHomeDir.umask = mkOption {
+      type = types.str;
+      default = "0077";
+      example = "0022";
+      description = lib.mdDoc ''
+        The user file mode creation mask to use on home directories
+        newly created by `pam_mkhomedir`.
+      '';
+    };
+
+    security.pam.enableSSHAgentAuth = mkOption {
+      type = types.bool;
+      default = false;
+      description =
+        lib.mdDoc ''
+          Enable sudo logins if the user's SSH agent provides a key
+          present in {file}`~/.ssh/authorized_keys`.
+          This allows machines to exclusively use SSH keys instead of
+          passwords.
+        '';
+    };
+
+    security.pam.enableOTPW = mkEnableOption (lib.mdDoc "the OTPW (one-time password) PAM module");
+
+    security.pam.dp9ik = {
+      enable = mkEnableOption (
+        lib.mdDoc ''
+          the dp9ik pam module provided by tlsclient.
+
+          If set, users can be authenticated against the 9front
+          authentication server given in {option}`security.pam.dp9ik.authserver`.
+        ''
+      );
+      control = mkOption {
+        default = "sufficient";
+        type = types.str;
+        description = lib.mdDoc ''
+          This option sets the pam "control" used for this module.
+        '';
+      };
+      authserver = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          This controls the hostname for the 9front authentication server
+          that users will be authenticated against.
+        '';
+      };
+    };
+
+    security.pam.krb5 = {
+      enable = mkOption {
+        default = config.krb5.enable;
+        defaultText = literalExpression "config.krb5.enable";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables Kerberos PAM modules (`pam-krb5`,
+          `pam-ccreds`).
+
+          If set, users can authenticate with their Kerberos password.
+          This requires a valid Kerberos configuration
+          (`config.krb5.enable` should be set to
+          `true`).
+
+          Note that the Kerberos PAM modules are not necessary when using SSS
+          to handle Kerberos authentication.
+        '';
+      };
+    };
+
+    security.pam.p11 = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables P11 PAM (`pam_p11`) module.
+
+          If set, users can log in with SSH keys and PKCS#11 tokens.
+
+          More information can be found [here](https://github.com/OpenSC/pam_p11).
+        '';
+      };
+
+      control = mkOption {
+        default = "sufficient";
+        type = types.enum [ "required" "requisite" "sufficient" "optional" ];
+        description = lib.mdDoc ''
+          This option sets pam "control".
+          If you want to have multi factor authentication, use "required".
+          If you want to use the PKCS#11 device instead of the regular password,
+          use "sufficient".
+
+          Read
+          {manpage}`pam.conf(5)`
+          for better understanding of this option.
+        '';
+      };
+    };
+
+    security.pam.u2f = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables U2F PAM (`pam-u2f`) module.
+
+          If set, users listed in
+          {file}`$XDG_CONFIG_HOME/Yubico/u2f_keys` (or
+          {file}`$HOME/.config/Yubico/u2f_keys` if XDG variable is
+          not set) are able to log in with the associated U2F key. The path can
+          be changed using {option}`security.pam.u2f.authFile` option.
+
+          File format is:
+          `username:first_keyHandle,first_public_key: second_keyHandle,second_public_key`
+          This file can be generated using {command}`pamu2fcfg` command.
+
+          More information can be found [here](https://developers.yubico.com/pam-u2f/).
+        '';
+      };
+
+      authFile = mkOption {
+        default = null;
+        type = with types; nullOr path;
+        description = lib.mdDoc ''
+          By default `pam-u2f` module reads the keys from
+          {file}`$XDG_CONFIG_HOME/Yubico/u2f_keys` (or
+          {file}`$HOME/.config/Yubico/u2f_keys` if XDG variable is
+          not set).
+
+          If you want to change auth file locations or centralize database (for
+          example use {file}`/etc/u2f-mappings`) you can set this
+          option.
+
+          File format is:
+          `username:first_keyHandle,first_public_key: second_keyHandle,second_public_key`
+          This file can be generated using {command}`pamu2fcfg` command.
+
+          More information can be found [here](https://developers.yubico.com/pam-u2f/).
+        '';
+      };
+
+      appId = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+            By default `pam-u2f` module sets the application
+            ID to `pam://$HOSTNAME`.
+
+            When using {command}`pamu2fcfg`, you can specify your
+            application ID with the `-i` flag.
+
+            More information can be found [here](https://developers.yubico.com/pam-u2f/Manuals/pam_u2f.8.html)
+        '';
+      };
+
+      origin = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+            By default `pam-u2f` module sets the origin
+            to `pam://$HOSTNAME`.
+            Setting origin to an host independent value will allow you to
+            reuse credentials across machines
+
+            When using {command}`pamu2fcfg`, you can specify your
+            application ID with the `-o` flag.
+
+            More information can be found [here](https://developers.yubico.com/pam-u2f/Manuals/pam_u2f.8.html)
+        '';
+      };
+
+      control = mkOption {
+        default = "sufficient";
+        type = types.enum [ "required" "requisite" "sufficient" "optional" ];
+        description = lib.mdDoc ''
+          This option sets pam "control".
+          If you want to have multi factor authentication, use "required".
+          If you want to use U2F device instead of regular password, use "sufficient".
+
+          Read
+          {manpage}`pam.conf(5)`
+          for better understanding of this option.
+        '';
+      };
+
+      debug = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Debug output to stderr.
+        '';
+      };
+
+      interactive = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Set to prompt a message and wait before testing the presence of a U2F device.
+          Recommended if your device doesn’t have a tactile trigger.
+        '';
+      };
+
+      cue = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          By default `pam-u2f` module does not inform user
+          that he needs to use the u2f device, it just waits without a prompt.
+
+          If you set this option to `true`,
+          `cue` option is added to `pam-u2f`
+          module and reminder message will be displayed.
+        '';
+      };
+    };
+
+    security.pam.ussh = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables Uber's USSH PAM (`pam-ussh`) module.
+
+          This is similar to `pam-ssh-agent`, except that
+          the presence of a CA-signed SSH key with a valid principal is checked
+          instead.
+
+          Note that this module must both be enabled using this option and on a
+          per-PAM-service level as well (using `usshAuth`).
+
+          More information can be found [here](https://github.com/uber/pam-ussh).
+        '';
+      };
+
+      caFile = mkOption {
+        default = null;
+        type = with types; nullOr path;
+        description = lib.mdDoc ''
+          By default `pam-ussh` reads the trusted user CA keys
+          from {file}`/etc/ssh/trusted_user_ca`.
+
+          This should be set the same as your `TrustedUserCAKeys`
+          option for sshd.
+        '';
+      };
+
+      authorizedPrincipals = mkOption {
+        default = null;
+        type = with types; nullOr commas;
+        description = lib.mdDoc ''
+          Comma-separated list of authorized principals to permit; if the user
+          presents a certificate with one of these principals, then they will be
+          authorized.
+
+          Note that `pam-ussh` also requires that the certificate
+          contain a principal matching the user's username. The principals from
+          this list are in addition to those principals.
+
+          Mutually exclusive with `authorizedPrincipalsFile`.
+        '';
+      };
+
+      authorizedPrincipalsFile = mkOption {
+        default = null;
+        type = with types; nullOr path;
+        description = lib.mdDoc ''
+          Path to a list of principals; if the user presents a certificate with
+          one of these principals, then they will be authorized.
+
+          Note that `pam-ussh` also requires that the certificate
+          contain a principal matching the user's username. The principals from
+          this file are in addition to those principals.
+
+          Mutually exclusive with `authorizedPrincipals`.
+        '';
+      };
+
+      group = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          If set, then the authenticating user must be a member of this group
+          to use this module.
+        '';
+      };
+
+      control = mkOption {
+        default = "sufficient";
+        type = types.enum [ "required" "requisite" "sufficient" "optional" ];
+        description = lib.mdDoc ''
+          This option sets pam "control".
+          If you want to have multi factor authentication, use "required".
+          If you want to use the SSH certificate instead of the regular password,
+          use "sufficient".
+
+          Read
+          {manpage}`pam.conf(5)`
+          for better understanding of this option.
+        '';
+      };
+    };
+
+    security.pam.yubico = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables Yubico PAM (`yubico-pam`) module.
+
+          If set, users listed in
+          {file}`~/.yubico/authorized_yubikeys`
+          are able to log in with the associated Yubikey tokens.
+
+          The file must have only one line:
+          `username:yubikey_token_id1:yubikey_token_id2`
+          More information can be found [here](https://developers.yubico.com/yubico-pam/).
+        '';
+      };
+      control = mkOption {
+        default = "sufficient";
+        type = types.enum [ "required" "requisite" "sufficient" "optional" ];
+        description = lib.mdDoc ''
+          This option sets pam "control".
+          If you want to have multi factor authentication, use "required".
+          If you want to use Yubikey instead of regular password, use "sufficient".
+
+          Read
+          {manpage}`pam.conf(5)`
+          for better understanding of this option.
+        '';
+      };
+      id = mkOption {
+        example = "42";
+        type = types.str;
+        description = lib.mdDoc "client id";
+      };
+
+      debug = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Debug output to stderr.
+        '';
+      };
+      mode = mkOption {
+        default = "client";
+        type = types.enum [ "client" "challenge-response" ];
+        description = lib.mdDoc ''
+          Mode of operation.
+
+          Use "client" for online validation with a YubiKey validation service such as
+          the YubiCloud.
+
+          Use "challenge-response" for offline validation using YubiKeys with HMAC-SHA-1
+          Challenge-Response configurations. See the man-page ykpamcfg(1) for further
+          details on how to configure offline Challenge-Response validation.
+
+          More information can be found [here](https://developers.yubico.com/yubico-pam/Authentication_Using_Challenge-Response.html).
+        '';
+      };
+      challengeResponsePath = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          If not null, set the path used by yubico pam module where the challenge expected response is stored.
+
+          More information can be found [here](https://developers.yubico.com/yubico-pam/Authentication_Using_Challenge-Response.html).
+        '';
+      };
+    };
+
+    security.pam.zfs = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable unlocking and mounting of encrypted ZFS home dataset at login.
+        '';
+      };
+
+      homes = mkOption {
+        example = "rpool/home";
+        default = "rpool/home";
+        type = types.str;
+        description = lib.mdDoc ''
+          Prefix of home datasets. This value will be concatenated with
+          `"/" + <username>` in order to determine the home dataset to unlock.
+        '';
+      };
+
+      noUnmount = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Do not unmount home dataset on logout.
+        '';
+      };
+    };
+
+    security.pam.enableEcryptfs = mkEnableOption (lib.mdDoc "eCryptfs PAM module (mounting ecryptfs home directory on login)");
+    security.pam.enableFscrypt = mkEnableOption (lib.mdDoc ''
+      fscrypt to automatically unlock directories with the user's login password.
+
+      This also enables a service at security.pam.services.fscrypt which is used by
+      fscrypt to verify the user's password when setting up a new protector. If you
+      use something other than pam_unix to verify user passwords, please remember to
+      adjust this PAM service.
+    '');
+
+    users.motd = mkOption {
+      default = null;
+      example = "Today is Sweetmorn, the 4th day of The Aftermath in the YOLD 3178.";
+      type = types.nullOr types.lines;
+      description = lib.mdDoc "Message of the day shown to users when they log in.";
+    };
+
+    users.motdFile = mkOption {
+      default = null;
+      example = "/etc/motd";
+      type = types.nullOr types.path;
+      description = lib.mdDoc "A file containing the message of the day shown to users when they log in.";
+    };
+  };
+
+
+  ###### implementation
+
+  config = {
+    assertions = [
+      {
+        assertion = config.users.motd == null || config.users.motdFile == null;
+        message = ''
+          Only one of users.motd and users.motdFile can be set.
+        '';
+      }
+      {
+        assertion = config.security.pam.zfs.enable -> (config.boot.zfs.enabled || config.boot.zfs.enableUnstable);
+        message = ''
+          `security.pam.zfs.enable` requires enabling ZFS (`boot.zfs.enabled` or `boot.zfs.enableUnstable`).
+        '';
+      }
+    ];
+
+    environment.systemPackages =
+      # Include the PAM modules in the system path mostly for the manpages.
+      [ pkgs.pam ]
+      ++ optional config.users.ldap.enable pam_ldap
+      ++ optional config.services.kanidm.enablePam pkgs.kanidm
+      ++ optional config.services.sssd.enable pkgs.sssd
+      ++ optionals config.security.pam.krb5.enable [pam_krb5 pam_ccreds]
+      ++ optionals config.security.pam.enableOTPW [ pkgs.otpw ]
+      ++ optionals config.security.pam.oath.enable [ pkgs.oath-toolkit ]
+      ++ optionals config.security.pam.p11.enable [ pkgs.pam_p11 ]
+      ++ optionals config.security.pam.enableFscrypt [ pkgs.fscrypt-experimental ]
+      ++ optionals config.security.pam.u2f.enable [ pkgs.pam_u2f ];
+
+    boot.supportedFilesystems = optionals config.security.pam.enableEcryptfs [ "ecryptfs" ];
+
+    security.wrappers = {
+      unix_chkpwd = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.pam}/bin/unix_chkpwd";
+      };
+    };
+
+    environment.etc = mapAttrs' makePAMService config.security.pam.services;
+
+    security.pam.services =
+      { other.text =
+          ''
+            auth     required pam_warn.so
+            auth     required pam_deny.so
+            account  required pam_warn.so
+            account  required pam_deny.so
+            password required pam_warn.so
+            password required pam_deny.so
+            session  required pam_warn.so
+            session  required pam_deny.so
+          '';
+
+        # Most of these should be moved to specific modules.
+        i3lock = {};
+        i3lock-color = {};
+        vlock = {};
+        xlock = {};
+        xscreensaver = {};
+
+        runuser = { rootOK = true; unixAuth = false; setEnvironment = false; };
+
+        /* FIXME: should runuser -l start a systemd session? Currently
+           it complains "Cannot create session: Already running in a
+           session". */
+        runuser-l = { rootOK = true; unixAuth = false; };
+      } // optionalAttrs (config.security.pam.enableFscrypt) {
+        # Allow fscrypt to verify login passphrase
+        fscrypt = {};
+      };
+
+    security.apparmor.includes."abstractions/pam" =
+      lib.concatMapStrings
+        (name: "r ${config.environment.etc."pam.d/${name}".source},\n")
+        (attrNames config.security.pam.services) +
+      ''
+      mr ${getLib pkgs.pam}/lib/security/pam_filter/*,
+      mr ${getLib pkgs.pam}/lib/security/pam_*.so,
+      r ${getLib pkgs.pam}/lib/security/,
+      '' +
+      (with lib; pipe config.security.pam.services [
+        attrValues
+        (catAttrs "rules")
+        (concatMap attrValues)
+        (concatMap attrValues)
+        (filter (rule: rule.enable))
+        (catAttrs "modulePath")
+        (filter (hasPrefix "/"))
+        unique
+        (map (module: "mr ${module},"))
+        concatLines
+      ]);
+
+    security.sudo.extraConfig = optionalString config.security.pam.enableSSHAgentAuth ''
+      # Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so can do its magic.
+      Defaults env_keep+=SSH_AUTH_SOCK
+    '';
+    };
+}
diff --git a/nixpkgs/nixos/modules/security/pam_mount.nix b/nixpkgs/nixos/modules/security/pam_mount.nix
new file mode 100644
index 000000000000..26f906f2a76a
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/pam_mount.nix
@@ -0,0 +1,185 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.security.pam.mount;
+
+  oflRequired = cfg.logoutHup || cfg.logoutTerm || cfg.logoutKill;
+
+  fake_ofl = pkgs.writeShellScriptBin "fake_ofl" ''
+    SIGNAL=$1
+    MNTPT=$2
+    ${pkgs.lsof}/bin/lsof | ${pkgs.gnugrep}/bin/grep $MNTPT | ${pkgs.gawk}/bin/awk '{print $2}' | ${pkgs.findutils}/bin/xargs ${pkgs.util-linux}/bin/kill -$SIGNAL
+  '';
+
+  anyPamMount = any (attrByPath ["pamMount"] false) (attrValues config.security.pam.services);
+in
+
+{
+  options = {
+
+    security.pam.mount = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable PAM mount system to mount filesystems on user login.
+        '';
+      };
+
+      extraVolumes = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          List of volume definitions for pam_mount.
+          For more information, visit <https://pam-mount.sourceforge.net/pam_mount.conf.5.html>.
+        '';
+      };
+
+      additionalSearchPaths = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.bindfs ]";
+        description = lib.mdDoc ''
+          Additional programs to include in the search path of pam_mount.
+          Useful for example if you want to use some FUSE filesystems like bindfs.
+        '';
+      };
+
+      cryptMountOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''
+          [ "allow_discard" ]
+        '';
+        description = lib.mdDoc ''
+          Global mount options that apply to every crypt volume.
+          You can define volume-specific options in the volume definitions.
+        '';
+      };
+
+      fuseMountOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''
+          [ "nodev" "nosuid" "force-user=%(USER)" "gid=%(USERGID)" "perms=0700" "chmod-deny" "chown-deny" "chgrp-deny" ]
+        '';
+        description = lib.mdDoc ''
+          Global mount options that apply to every FUSE volume.
+          You can define volume-specific options in the volume definitions.
+        '';
+      };
+
+      debugLevel = mkOption {
+        type = types.int;
+        default = 0;
+        example = 1;
+        description = lib.mdDoc ''
+          Sets the Debug-Level. 0 disables debugging, 1 enables pam_mount tracing,
+          and 2 additionally enables tracing in mount.crypt. The default is 0.
+          For more information, visit <https://pam-mount.sourceforge.net/pam_mount.conf.5.html>.
+        '';
+      };
+
+      logoutWait = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Amount of microseconds to wait until killing remaining processes after
+          final logout.
+          For more information, visit <https://pam-mount.sourceforge.net/pam_mount.conf.5.html>.
+        '';
+      };
+
+      logoutHup = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Kill remaining processes after logout by sending a SIGHUP.
+        '';
+      };
+
+      logoutTerm = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Kill remaining processes after logout by sending a SIGTERM.
+        '';
+      };
+
+      logoutKill = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Kill remaining processes after logout by sending a SIGKILL.
+        '';
+      };
+
+      createMountPoints = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Create mountpoints for volumes if they do not exist.
+        '';
+      };
+
+      removeCreatedMountPoints = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Remove mountpoints created by pam_mount after logout. This
+          only affects mountpoints that have been created by pam_mount
+          in the same session.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf (cfg.enable || anyPamMount) {
+
+    environment.systemPackages = [ pkgs.pam_mount ];
+    environment.etc."security/pam_mount.conf.xml" = {
+      source =
+        let
+          extraUserVolumes = filterAttrs (n: u: u.cryptHomeLuks != null || u.pamMount != {}) config.users.users;
+          mkAttr = k: v: ''${k}="${v}"'';
+          userVolumeEntry = user: let
+            attrs = {
+              user = user.name;
+              path = user.cryptHomeLuks;
+              mountpoint = user.home;
+            } // user.pamMount;
+          in
+            "<volume ${concatStringsSep " " (mapAttrsToList mkAttr attrs)} />\n";
+        in
+         pkgs.writeText "pam_mount.conf.xml" ''
+          <?xml version="1.0" encoding="utf-8" ?>
+          <!DOCTYPE pam_mount SYSTEM "pam_mount.conf.xml.dtd">
+          <!-- auto generated from Nixos: modules/config/users-groups.nix -->
+          <pam_mount>
+          <debug enable="${toString cfg.debugLevel}" />
+          <!-- if activated, requires ofl from hxtools to be present -->
+          <logout wait="${toString cfg.logoutWait}" hup="${if cfg.logoutHup then "yes" else "no"}" term="${if cfg.logoutTerm then "yes" else "no"}" kill="${if cfg.logoutKill then "yes" else "no"}" />
+          <!-- set PATH variable for pam_mount module -->
+          <path>${makeBinPath ([ pkgs.util-linux ] ++ cfg.additionalSearchPaths)}</path>
+          <!-- create mount point if not present -->
+          <mkmountpoint enable="${if cfg.createMountPoints then "1" else "0"}" remove="${if cfg.removeCreatedMountPoints then "true" else "false"}" />
+          <!-- specify the binaries to be called -->
+          <!-- the comma in front of the options is necessary for empty options -->
+          <fusemount>${pkgs.fuse}/bin/mount.fuse %(VOLUME) %(MNTPT) -o ,${concatStringsSep "," (cfg.fuseMountOptions ++ [ "%(OPTIONS)" ])}'</fusemount>
+          <fuseumount>${pkgs.fuse}/bin/fusermount -u %(MNTPT)</fuseumount>
+          <!-- the comma in front of the options is necessary for empty options -->
+          <cryptmount>${pkgs.pam_mount}/bin/mount.crypt -o ,${concatStringsSep "," (cfg.cryptMountOptions ++ [ "%(OPTIONS)" ])} %(VOLUME) %(MNTPT)</cryptmount>
+          <cryptumount>${pkgs.pam_mount}/bin/umount.crypt %(MNTPT)</cryptumount>
+          <pmvarrun>${pkgs.pam_mount}/bin/pmvarrun -u %(USER) -o %(OPERATION)</pmvarrun>
+          ${optionalString oflRequired "<ofl>${fake_ofl}/bin/fake_ofl %(SIGNAL) %(MNTPT)</ofl>"}
+          ${concatStrings (map userVolumeEntry (attrValues extraUserVolumes))}
+          ${concatStringsSep "\n" cfg.extraVolumes}
+          </pam_mount>
+          '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/pam_usb.nix b/nixpkgs/nixos/modules/security/pam_usb.nix
new file mode 100644
index 000000000000..4275c26c6bda
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/pam_usb.nix
@@ -0,0 +1,51 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.pam.usb;
+
+  anyUsbAuth = any (attrByPath ["usbAuth"] false) (attrValues config.security.pam.services);
+
+in
+
+{
+  options = {
+
+    security.pam.usb = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable USB login for all login systems that support it.  For
+          more information, visit <https://github.com/aluzzardi/pam_usb/wiki/Getting-Started#setting-up-devices-and-users>.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (cfg.enable || anyUsbAuth) {
+
+    # Make sure pmount and pumount are setuid wrapped.
+    security.wrappers = {
+      pmount =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.pmount.out}/bin/pmount";
+        };
+      pumount =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.pmount.out}/bin/pumount";
+        };
+    };
+
+    environment.systemPackages = [ pkgs.pmount ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/please.nix b/nixpkgs/nixos/modules/security/please.nix
new file mode 100644
index 000000000000..88bb9cba2bfc
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/please.nix
@@ -0,0 +1,122 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.security.please;
+  ini = pkgs.formats.ini { };
+in
+{
+  options.security.please = {
+    enable = mkEnableOption (mdDoc ''
+      please, a Sudo clone which allows a users to execute a command or edit a
+      file as another user
+    '');
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.please;
+      defaultText = literalExpression "pkgs.please";
+      description = mdDoc ''
+        Which package to use for {command}`please`.
+      '';
+    };
+
+    wheelNeedsPassword = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether users of the `wheel` group must provide a password to run
+        commands or edit files with {command}`please` and
+        {command}`pleaseedit` respectively.
+      '';
+    };
+
+    settings = mkOption {
+      type = ini.type;
+      default = { };
+      example = {
+        jim_run_any_as_root = {
+          name = "jim";
+          type = "run";
+          target = "root";
+          rule = ".*";
+          require_pass = false;
+        };
+        jim_edit_etc_hosts_as_root = {
+          name = "jim";
+          type = "edit";
+          target = "root";
+          rule = "/etc/hosts";
+          editmode = 644;
+          require_pass = true;
+        };
+      };
+      description = mdDoc ''
+        Please configuration. Refer to
+        <https://github.com/edneville/please/blob/master/please.ini.md> for
+        details.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.wrappers =
+      let
+        owner = "root";
+        group = "root";
+        setuid = true;
+      in
+      {
+        please = {
+          source = "${cfg.package}/bin/please";
+          inherit owner group setuid;
+        };
+        pleaseedit = {
+          source = "${cfg.package}/bin/pleaseedit";
+          inherit owner group setuid;
+        };
+      };
+
+    security.please.settings = rec {
+      # The "wheel" group is allowed to do anything by default but this can be
+      # overridden.
+      wheel_run_as_any = {
+        type = "run";
+        group = true;
+        name = "wheel";
+        target = ".*";
+        rule = ".*";
+        require_pass = cfg.wheelNeedsPassword;
+      };
+      wheel_edit_as_any = wheel_run_as_any // { type = "edit"; };
+      wheel_list_as_any = wheel_run_as_any // { type = "list"; };
+    };
+
+    environment = {
+      systemPackages = [ cfg.package ];
+
+      etc."please.ini".source = ini.generate "please.ini"
+        (cfg.settings // (rec {
+          # The "root" user is allowed to do anything by default and this cannot
+          # be overridden.
+          root_run_as_any = {
+            type = "run";
+            name = "root";
+            target = ".*";
+            rule = ".*";
+            require_pass = false;
+          };
+          root_edit_as_any = root_run_as_any // { type = "edit"; };
+          root_list_as_any = root_run_as_any // { type = "list"; };
+        }));
+    };
+
+    security.pam.services.please = {
+      sshAgentAuth = true;
+      usshAuth = true;
+    };
+
+    meta.maintainers = with maintainers; [ azahi ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/polkit.nix b/nixpkgs/nixos/modules/security/polkit.nix
new file mode 100644
index 000000000000..327f49c0b637
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/polkit.nix
@@ -0,0 +1,119 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.polkit;
+
+in
+
+{
+
+  options = {
+
+    security.polkit.enable = mkEnableOption (lib.mdDoc "polkit");
+
+    security.polkit.debug = mkEnableOption (lib.mdDoc "debug logs from polkit. This is required in order to see log messages from rule definitions");
+
+    security.polkit.extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example =
+        ''
+          /* Log authorization checks. */
+          polkit.addRule(function(action, subject) {
+            // Make sure to set { security.polkit.debug = true; } in configuration.nix
+            polkit.log("user " +  subject.user + " is attempting action " + action.id + " from PID " + subject.pid);
+          });
+
+          /* Allow any local user to do anything (dangerous!). */
+          polkit.addRule(function(action, subject) {
+            if (subject.local) return "yes";
+          });
+        '';
+      description = lib.mdDoc
+        ''
+          Any polkit rules to be added to config (in JavaScript ;-). See:
+          <https://www.freedesktop.org/software/polkit/docs/latest/polkit.8.html#polkit-rules>
+        '';
+    };
+
+    security.polkit.adminIdentities = mkOption {
+      type = types.listOf types.str;
+      default = [ "unix-group:wheel" ];
+      example = [ "unix-user:alice" "unix-group:admin" ];
+      description = lib.mdDoc
+        ''
+          Specifies which users are considered “administratorsâ€, for those
+          actions that require the user to authenticate as an
+          administrator (i.e. have an `auth_admin`
+          value).  By default, this is all users in the `wheel` group.
+        '';
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.polkit.bin pkgs.polkit.out ];
+
+    systemd.packages = [ pkgs.polkit.out ];
+
+    systemd.services.polkit.serviceConfig.ExecStart = [
+      ""
+      "${pkgs.polkit.out}/lib/polkit-1/polkitd ${optionalString (!cfg.debug) "--no-debug"}"
+    ];
+
+    systemd.services.polkit.restartTriggers = [ config.system.path ];
+    systemd.services.polkit.stopIfChanged = false;
+
+    # The polkit daemon reads action/rule files
+    environment.pathsToLink = [ "/share/polkit-1" ];
+
+    # PolKit rules for NixOS.
+    environment.etc."polkit-1/rules.d/10-nixos.rules".text =
+      ''
+        polkit.addAdminRule(function(action, subject) {
+          return [${concatStringsSep ", " (map (i: "\"${i}\"") cfg.adminIdentities)}];
+        });
+
+        ${cfg.extraConfig}
+      ''; #TODO: validation on compilation (at least against typos)
+
+    services.dbus.packages = [ pkgs.polkit.out ];
+
+    security.pam.services.polkit-1 = {};
+
+    security.wrappers = {
+      pkexec =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.polkit.bin}/bin/pkexec";
+        };
+      polkit-agent-helper-1 =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.polkit.out}/lib/polkit-1/polkit-agent-helper-1";
+        };
+    };
+
+    systemd.tmpfiles.rules = [
+      # Probably no more needed, clean up
+      "R /var/lib/polkit-1"
+      "R /var/lib/PolicyKit"
+    ];
+
+    users.users.polkituser = {
+      description = "PolKit daemon";
+      uid = config.ids.uids.polkituser;
+      group = "polkituser";
+    };
+
+    users.groups.polkituser = {};
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/security/rngd.nix b/nixpkgs/nixos/modules/security/rngd.nix
new file mode 100644
index 000000000000..8cca1c26d683
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/rngd.nix
@@ -0,0 +1,16 @@
+{ lib, ... }:
+let
+  removed = k: lib.mkRemovedOptionModule [ "security" "rngd" k ];
+in
+{
+  imports = [
+    (removed "enable" ''
+       rngd is not necessary for any device that the kernel recognises
+       as an hardware RNG, as it will automatically run the krngd task
+       to periodically collect random data from the device and mix it
+       into the kernel's RNG.
+    '')
+    (removed "debug"
+      "The rngd module was removed, so its debug option does nothing.")
+  ];
+}
diff --git a/nixpkgs/nixos/modules/security/rtkit.nix b/nixpkgs/nixos/modules/security/rtkit.nix
new file mode 100644
index 000000000000..0f58b4dce84a
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/rtkit.nix
@@ -0,0 +1,47 @@
+# A module for ‘rtkit’, a DBus system service that hands out realtime
+# scheduling priority to processes that ask for it.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  options = {
+
+    security.rtkit.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the RealtimeKit system service, which hands
+        out realtime scheduling priority to user processes on
+        demand. For example, the PulseAudio server uses this to
+        acquire realtime priority.
+      '';
+    };
+
+  };
+
+
+  config = mkIf config.security.rtkit.enable {
+
+    security.polkit.enable = true;
+
+    # To make polkit pickup rtkit policies
+    environment.systemPackages = [ pkgs.rtkit ];
+
+    systemd.packages = [ pkgs.rtkit ];
+
+    services.dbus.packages = [ pkgs.rtkit ];
+
+    users.users.rtkit =
+      {
+        isSystemUser = true;
+        group = "rtkit";
+        description = "RealtimeKit daemon";
+      };
+    users.groups.rtkit = {};
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/security/sudo-rs.nix b/nixpkgs/nixos/modules/security/sudo-rs.nix
new file mode 100644
index 000000000000..6b8f09a8d3d0
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/sudo-rs.nix
@@ -0,0 +1,296 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) sudo sudo-rs;
+
+  cfg = config.security.sudo-rs;
+
+  enableSSHAgentAuth =
+    with config.security;
+    pam.enableSSHAgentAuth && pam.sudo.sshAgentAuth;
+
+  usingMillersSudo = cfg.package.pname == sudo.pname;
+  usingSudoRs = cfg.package.pname == sudo-rs.pname;
+
+  toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
+  toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
+
+  toCommandOptionsString = options:
+    "${concatStringsSep ":" options}${optionalString (length options != 0) ":"} ";
+
+  toCommandsString = commands:
+    concatStringsSep ", " (
+      map (command:
+        if (isString command) then
+          command
+        else
+          "${toCommandOptionsString command.options}${command.command}"
+      ) commands
+    );
+
+in
+
+{
+
+  ###### interface
+
+  options.security.sudo-rs = {
+
+    defaultOptions = mkOption {
+      type = with types; listOf str;
+      default = optional usingMillersSudo "SETENV";
+      defaultText = literalMD ''
+        `[ "SETENV" ]` if using the default `sudo` implementation
+      '';
+      description = mdDoc ''
+        Options used for the default rules, granting `root` and the
+        `wheel` group permission to run any command as any user.
+      '';
+    };
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to enable the {command}`sudo` command, which
+        allows non-root users to execute commands as root.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.sudo-rs;
+      defaultText = literalExpression "pkgs.sudo-rs";
+      description = mdDoc ''
+        Which package to use for `sudo`.
+      '';
+    };
+
+    wheelNeedsPassword = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc ''
+        Whether users of the `wheel` group must
+        provide a password to run commands as super user via {command}`sudo`.
+      '';
+      };
+
+    execWheelOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Only allow members of the `wheel` group to execute sudo by
+        setting the executable's permissions accordingly.
+        This prevents users that are not members of `wheel` from
+        exploiting vulnerabilities in sudo such as CVE-2021-3156.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.lines;
+      # Note: if syntax errors are detected in this file, the NixOS
+      # configuration will fail to build.
+      description = mdDoc ''
+        This string contains the contents of the
+        {file}`sudoers` file.
+      '';
+    };
+
+    extraRules = mkOption {
+      description = mdDoc ''
+        Define specific rules to be in the {file}`sudoers` file.
+        More specific rules should come after more general ones in order to
+        yield the expected behavior. You can use mkBefore/mkAfter to ensure
+        this is the case when configuration options are merged.
+      '';
+      default = [];
+      example = literalExpression ''
+        [
+          # Allow execution of any command by all users in group sudo,
+          # requiring a password.
+          { groups = [ "sudo" ]; commands = [ "ALL" ]; }
+
+          # Allow execution of "/home/root/secret.sh" by user `backup`, `database`
+          # and the group with GID `1006` without a password.
+          { users = [ "backup" "database" ]; groups = [ 1006 ];
+            commands = [ { command = "/home/root/secret.sh"; options = [ "SETENV" "NOPASSWD" ]; } ]; }
+
+          # Allow all users of group `bar` to run two executables as user `foo`
+          # with arguments being pre-set.
+          { groups = [ "bar" ]; runAs = "foo";
+            commands =
+              [ "/home/baz/cmd1.sh hello-sudo"
+                  { command = '''/home/baz/cmd2.sh ""'''; options = [ "SETENV" ]; } ]; }
+        ]
+      '';
+      type = with types; listOf (submodule {
+        options = {
+          users = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The usernames / UIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          groups = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The groups / GIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          host = mkOption {
+            type = types.str;
+            default = "ALL";
+            description = mdDoc ''
+              For what host this rule should apply.
+            '';
+          };
+
+          runAs = mkOption {
+            type = with types; str;
+            default = "ALL:ALL";
+            description = mdDoc ''
+              Under which user/group the specified command is allowed to run.
+
+              A user can be specified using just the username: `"foo"`.
+              It is also possible to specify a user/group combination using `"foo:bar"`
+              or to only allow running as a specific group with `":bar"`.
+            '';
+          };
+
+          commands = mkOption {
+            description = mdDoc ''
+              The commands for which the rule should apply.
+            '';
+            type = with types; listOf (either str (submodule {
+
+              options = {
+                command = mkOption {
+                  type = with types; str;
+                  description = mdDoc ''
+                    A command being either just a path to a binary to allow any arguments,
+                    the full command with arguments pre-set or with `""` used as the argument,
+                    not allowing arguments to the command at all.
+                  '';
+                };
+
+                options = mkOption {
+                  type = with types; listOf (enum [ "NOPASSWD" "PASSWD" "NOEXEC" "EXEC" "SETENV" "NOSETENV" "LOG_INPUT" "NOLOG_INPUT" "LOG_OUTPUT" "NOLOG_OUTPUT" ]);
+                  description = mdDoc ''
+                    Options for running the command. Refer to the [sudo manual](https://www.sudo.ws/man/1.7.10/sudoers.man.html).
+                  '';
+                  default = [];
+                };
+              };
+
+            }));
+          };
+        };
+      });
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = mdDoc ''
+        Extra configuration text appended to {file}`sudoers`.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    security.sudo-rs.extraRules =
+      let
+        defaultRule = { users ? [], groups ? [], opts ? [] }: [ {
+          inherit users groups;
+          commands = [ {
+            command = "ALL";
+            options = opts ++ cfg.defaultOptions;
+          } ];
+        } ];
+      in mkMerge [
+        # This is ordered before users' `mkBefore` rules,
+        # so as not to introduce unexpected changes.
+        (mkOrder 400 (defaultRule { users = [ "root" ]; }))
+
+        # This is ordered to show before (most) other rules, but
+        # late-enough for a user to `mkBefore` it.
+        (mkOrder 600 (defaultRule {
+          groups = [ "wheel" ];
+          opts = (optional (!cfg.wheelNeedsPassword) "NOPASSWD");
+        }))
+      ];
+
+    security.sudo-rs.configFile = concatStringsSep "\n" (filter (s: s != "") [
+      ''
+        # Don't edit this file. Set the NixOS options ‘security.sudo-rs.configFile’
+        # or ‘security.sudo-rs.extraRules’ instead.
+      ''
+      (optionalString enableSSHAgentAuth ''
+        # Keep SSH_AUTH_SOCK so that pam_ssh_agent_auth.so can do its magic.
+        Defaults env_keep+=SSH_AUTH_SOCK
+      '')
+      (concatStringsSep "\n" (
+        lists.flatten (
+          map (
+            rule: optionals (length rule.commands != 0) [
+              (map (user: "${toUserString user}	${rule.host}=(${rule.runAs})	${toCommandsString rule.commands}") rule.users)
+              (map (group: "${toGroupString group}	${rule.host}=(${rule.runAs})	${toCommandsString rule.commands}") rule.groups)
+            ]
+          ) cfg.extraRules
+        )
+      ) + "\n")
+      (optionalString (cfg.extraConfig != "") ''
+        # extraConfig
+        ${cfg.extraConfig}
+      '')
+    ]);
+
+    security.wrappers = let
+      owner = "root";
+      group = if cfg.execWheelOnly then "wheel" else "root";
+      setuid = true;
+      permissions = if cfg.execWheelOnly then "u+rx,g+x" else "u+rx,g+x,o+x";
+    in {
+      sudo = {
+        source = "${cfg.package.out}/bin/sudo";
+        inherit owner group setuid permissions;
+      };
+      # sudo-rs does not yet ship a sudoedit (as of v0.2.0)
+      sudoedit = mkIf usingMillersSudo {
+        source = "${cfg.package.out}/bin/sudoedit";
+        inherit owner group setuid permissions;
+      };
+    };
+
+    environment.systemPackages = [ sudo ];
+
+    security.pam.services.sudo = { sshAgentAuth = true; usshAuth = true; };
+    security.pam.services.sudo-i = mkIf usingSudoRs
+      { sshAgentAuth = true; usshAuth = true; };
+
+    environment.etc.sudoers =
+      { source =
+          pkgs.runCommand "sudoers"
+          {
+            src = pkgs.writeText "sudoers-in" cfg.configFile;
+            preferLocalBuild = true;
+          }
+          "${pkgs.buildPackages."${cfg.package.pname}"}/bin/visudo -f $src -c && cp $src $out";
+        mode = "0440";
+      };
+
+  };
+
+  meta.maintainers = [ lib.maintainers.nicoo ];
+
+}
diff --git a/nixpkgs/nixos/modules/security/sudo.nix b/nixpkgs/nixos/modules/security/sudo.nix
new file mode 100644
index 000000000000..3dd5d2e525d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/sudo.nix
@@ -0,0 +1,280 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.security.sudo;
+
+  inherit (config.security.pam) enableSSHAgentAuth;
+
+  toUserString = user: if (isInt user) then "#${toString user}" else "${user}";
+  toGroupString = group: if (isInt group) then "%#${toString group}" else "%${group}";
+
+  toCommandOptionsString = options:
+    "${concatStringsSep ":" options}${optionalString (length options != 0) ":"} ";
+
+  toCommandsString = commands:
+    concatStringsSep ", " (
+      map (command:
+        if (isString command) then
+          command
+        else
+          "${toCommandOptionsString command.options}${command.command}"
+      ) commands
+    );
+
+in
+
+{
+
+  ###### interface
+
+  options.security.sudo = {
+
+    defaultOptions = mkOption {
+      type = with types; listOf str;
+      default = [ "SETENV" ];
+      description = mdDoc ''
+        Options used for the default rules, granting `root` and the
+        `wheel` group permission to run any command as any user.
+      '';
+    };
+
+    enable = mkOption {
+      type = types.bool;
+      default = true;
+      description =
+        lib.mdDoc ''
+          Whether to enable the {command}`sudo` command, which
+          allows non-root users to execute commands as root.
+        '';
+    };
+
+    package = mkPackageOption pkgs "sudo" { };
+
+    wheelNeedsPassword = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc ''
+        Whether users of the `wheel` group must
+        provide a password to run commands as super user via {command}`sudo`.
+      '';
+      };
+
+    execWheelOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Only allow members of the `wheel` group to execute sudo by
+        setting the executable's permissions accordingly.
+        This prevents users that are not members of `wheel` from
+        exploiting vulnerabilities in sudo such as CVE-2021-3156.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.lines;
+      # Note: if syntax errors are detected in this file, the NixOS
+      # configuration will fail to build.
+      description = mdDoc ''
+        This string contains the contents of the
+        {file}`sudoers` file.
+      '';
+    };
+
+    extraRules = mkOption {
+      description = mdDoc ''
+        Define specific rules to be in the {file}`sudoers` file.
+        More specific rules should come after more general ones in order to
+        yield the expected behavior. You can use mkBefore/mkAfter to ensure
+        this is the case when configuration options are merged.
+      '';
+      default = [];
+      example = literalExpression ''
+        [
+          # Allow execution of any command by all users in group sudo,
+          # requiring a password.
+          { groups = [ "sudo" ]; commands = [ "ALL" ]; }
+
+          # Allow execution of "/home/root/secret.sh" by user `backup`, `database`
+          # and the group with GID `1006` without a password.
+          { users = [ "backup" "database" ]; groups = [ 1006 ];
+            commands = [ { command = "/home/root/secret.sh"; options = [ "SETENV" "NOPASSWD" ]; } ]; }
+
+          # Allow all users of group `bar` to run two executables as user `foo`
+          # with arguments being pre-set.
+          { groups = [ "bar" ]; runAs = "foo";
+            commands =
+              [ "/home/baz/cmd1.sh hello-sudo"
+                  { command = '''/home/baz/cmd2.sh ""'''; options = [ "SETENV" ]; } ]; }
+        ]
+      '';
+      type = with types; listOf (submodule {
+        options = {
+          users = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The usernames / UIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          groups = mkOption {
+            type = with types; listOf (either str int);
+            description = mdDoc ''
+              The groups / GIDs this rule should apply for.
+            '';
+            default = [];
+          };
+
+          host = mkOption {
+            type = types.str;
+            default = "ALL";
+            description = mdDoc ''
+              For what host this rule should apply.
+            '';
+          };
+
+          runAs = mkOption {
+            type = with types; str;
+            default = "ALL:ALL";
+            description = mdDoc ''
+              Under which user/group the specified command is allowed to run.
+
+              A user can be specified using just the username: `"foo"`.
+              It is also possible to specify a user/group combination using `"foo:bar"`
+              or to only allow running as a specific group with `":bar"`.
+            '';
+          };
+
+          commands = mkOption {
+            description = mdDoc ''
+              The commands for which the rule should apply.
+            '';
+            type = with types; listOf (either str (submodule {
+
+              options = {
+                command = mkOption {
+                  type = with types; str;
+                  description = mdDoc ''
+                    A command being either just a path to a binary to allow any arguments,
+                    the full command with arguments pre-set or with `""` used as the argument,
+                    not allowing arguments to the command at all.
+                  '';
+                };
+
+                options = mkOption {
+                  type = with types; listOf (enum [ "NOPASSWD" "PASSWD" "NOEXEC" "EXEC" "SETENV" "NOSETENV" "LOG_INPUT" "NOLOG_INPUT" "LOG_OUTPUT" "NOLOG_OUTPUT" ]);
+                  description = mdDoc ''
+                    Options for running the command. Refer to the [sudo manual](https://www.sudo.ws/man/1.7.10/sudoers.man.html).
+                  '';
+                  default = [];
+                };
+              };
+
+            }));
+          };
+        };
+      });
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = mdDoc ''
+        Extra configuration text appended to {file}`sudoers`.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = cfg.package.pname != "sudo-rs";
+      message = ''
+        NixOS' `sudo` module does not support `sudo-rs`; see `security.sudo-rs` instead.
+      '';
+    } ];
+
+    security.sudo.extraRules =
+      let
+        defaultRule = { users ? [], groups ? [], opts ? [] }: [ {
+          inherit users groups;
+          commands = [ {
+            command = "ALL";
+            options = opts ++ cfg.defaultOptions;
+          } ];
+        } ];
+      in mkMerge [
+        # This is ordered before users' `mkBefore` rules,
+        # so as not to introduce unexpected changes.
+        (mkOrder 400 (defaultRule { users = [ "root" ]; }))
+
+        # This is ordered to show before (most) other rules, but
+        # late-enough for a user to `mkBefore` it.
+        (mkOrder 600 (defaultRule {
+          groups = [ "wheel" ];
+          opts = (optional (!cfg.wheelNeedsPassword) "NOPASSWD");
+        }))
+      ];
+
+    security.sudo.configFile = concatStringsSep "\n" (filter (s: s != "") [
+      ''
+        # Don't edit this file. Set the NixOS options ‘security.sudo.configFile’
+        # or ‘security.sudo.extraRules’ instead.
+      ''
+      (pipe cfg.extraRules [
+        (filter (rule: length rule.commands != 0))
+        (map (rule: [
+          (map (user: "${toUserString user}     ${rule.host}=(${rule.runAs})    ${toCommandsString rule.commands}") rule.users)
+          (map (group: "${toGroupString group}  ${rule.host}=(${rule.runAs})    ${toCommandsString rule.commands}") rule.groups)
+        ]))
+        flatten
+        (concatStringsSep "\n")
+      ])
+      "\n"
+      (optionalString (cfg.extraConfig != "") ''
+        # extraConfig
+        ${cfg.extraConfig}
+      '')
+    ]);
+
+    security.wrappers = let
+      owner = "root";
+      group = if cfg.execWheelOnly then "wheel" else "root";
+      setuid = true;
+      permissions = if cfg.execWheelOnly then "u+rx,g+x" else "u+rx,g+x,o+x";
+    in {
+      sudo = {
+        source = "${cfg.package.out}/bin/sudo";
+        inherit owner group setuid permissions;
+      };
+      sudoedit = {
+        source = "${cfg.package.out}/bin/sudoedit";
+        inherit owner group setuid permissions;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    security.pam.services.sudo = { sshAgentAuth = true; usshAuth = true; };
+
+    environment.etc.sudoers =
+      { source =
+          pkgs.runCommand "sudoers"
+          {
+            src = pkgs.writeText "sudoers-in" cfg.configFile;
+            preferLocalBuild = true;
+          }
+          # Make sure that the sudoers file is syntactically valid.
+          # (currently disabled - NIXOS-66)
+          "${pkgs.buildPackages.sudo}/sbin/visudo -f $src -c && cp $src $out";
+        mode = "0440";
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/security/systemd-confinement.nix b/nixpkgs/nixos/modules/security/systemd-confinement.nix
new file mode 100644
index 000000000000..cdf6c22ef1b6
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/systemd-confinement.nix
@@ -0,0 +1,197 @@
+{ config, pkgs, lib, utils, ... }:
+
+let
+  toplevelConfig = config;
+  inherit (lib) types;
+  inherit (utils.systemdUtils.lib) mkPathSafeName;
+in {
+  options.systemd.services = lib.mkOption {
+    type = types.attrsOf (types.submodule ({ name, config, ... }: {
+      options.confinement.enable = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, all the required runtime store paths for this service are
+          bind-mounted into a `tmpfs`-based
+          {manpage}`chroot(2)`.
+        '';
+      };
+
+      options.confinement.fullUnit = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to include the full closure of the systemd unit file into the
+          chroot, instead of just the dependencies for the executables.
+
+          ::: {.warning}
+          While it may be tempting to just enable this option to
+          make things work quickly, please be aware that this might add paths
+          to the closure of the chroot that you didn't anticipate. It's better
+          to use {option}`confinement.packages` to **explicitly** add additional store paths to the
+          chroot.
+          :::
+        '';
+      };
+
+      options.confinement.packages = lib.mkOption {
+        type = types.listOf (types.either types.str types.package);
+        default = [];
+        description = let
+          mkScOption = optName: "{option}`serviceConfig.${optName}`";
+        in lib.mdDoc ''
+          Additional packages or strings with context to add to the closure of
+          the chroot. By default, this includes all the packages from the
+          ${lib.concatMapStringsSep ", " mkScOption [
+            "ExecReload" "ExecStartPost" "ExecStartPre" "ExecStop"
+            "ExecStopPost"
+          ]} and ${mkScOption "ExecStart"} options. If you want to have all the
+          dependencies of this systemd unit, you can use
+          {option}`confinement.fullUnit`.
+
+          ::: {.note}
+          The store paths listed in {option}`path` are
+          **not** included in the closure as
+          well as paths from other options except those listed
+          above.
+          :::
+        '';
+      };
+
+      options.confinement.binSh = lib.mkOption {
+        type = types.nullOr types.path;
+        default = toplevelConfig.environment.binsh;
+        defaultText = lib.literalExpression "config.environment.binsh";
+        example = lib.literalExpression ''"''${pkgs.dash}/bin/dash"'';
+        description = lib.mdDoc ''
+          The program to make available as {file}`/bin/sh` inside
+          the chroot. If this is set to `null`, no
+          {file}`/bin/sh` is provided at all.
+
+          This is useful for some applications, which for example use the
+          {manpage}`system(3)` library function to execute commands.
+        '';
+      };
+
+      options.confinement.mode = lib.mkOption {
+        type = types.enum [ "full-apivfs" "chroot-only" ];
+        default = "full-apivfs";
+        description = lib.mdDoc ''
+          The value `full-apivfs` (the default) sets up
+          private {file}`/dev`, {file}`/proc`,
+          {file}`/sys` and {file}`/tmp` file systems in a separate user
+          name space.
+
+          If this is set to `chroot-only`, only the file
+          system name space is set up along with the call to
+          {manpage}`chroot(2)`.
+
+          ::: {.note}
+          This doesn't cover network namespaces and is solely for
+          file system level isolation.
+          :::
+        '';
+      };
+
+      config = let
+        inherit (config.confinement) binSh fullUnit;
+        wantsAPIVFS = lib.mkDefault (config.confinement.mode == "full-apivfs");
+      in lib.mkIf config.confinement.enable {
+        serviceConfig = {
+          RootDirectory = "/var/empty";
+          TemporaryFileSystem = "/";
+          PrivateMounts = lib.mkDefault true;
+
+          # https://github.com/NixOS/nixpkgs/issues/14645 is a future attempt
+          # to change some of these to default to true.
+          #
+          # If we run in chroot-only mode, having something like PrivateDevices
+          # set to true by default will mount /dev within the chroot, whereas
+          # with "chroot-only" it's expected that there are no /dev, /proc and
+          # /sys file systems available.
+          #
+          # However, if this suddenly becomes true, the attack surface will
+          # increase, so let's explicitly set these options to true/false
+          # depending on the mode.
+          MountAPIVFS = wantsAPIVFS;
+          PrivateDevices = wantsAPIVFS;
+          PrivateTmp = wantsAPIVFS;
+          PrivateUsers = wantsAPIVFS;
+          ProtectControlGroups = wantsAPIVFS;
+          ProtectKernelModules = wantsAPIVFS;
+          ProtectKernelTunables = wantsAPIVFS;
+        };
+        confinement.packages = let
+          execOpts = [
+            "ExecReload" "ExecStart" "ExecStartPost" "ExecStartPre" "ExecStop"
+            "ExecStopPost"
+          ];
+          execPkgs = lib.concatMap (opt: let
+            isSet = config.serviceConfig ? ${opt};
+          in lib.flatten (lib.optional isSet config.serviceConfig.${opt})) execOpts;
+          unitAttrs = toplevelConfig.systemd.units."${name}.service";
+          allPkgs = lib.singleton (builtins.toJSON unitAttrs);
+          unitPkgs = if fullUnit then allPkgs else execPkgs;
+        in unitPkgs ++ lib.optional (binSh != null) binSh;
+      };
+    }));
+  };
+
+  config.assertions = lib.concatLists (lib.mapAttrsToList (name: cfg: let
+    whatOpt = optName: "The 'serviceConfig' option '${optName}' for"
+                    + " service '${name}' is enabled in conjunction with"
+                    + " 'confinement.enable'";
+  in lib.optionals cfg.confinement.enable [
+    { assertion = !cfg.serviceConfig.RootDirectoryStartOnly or false;
+      message = "${whatOpt "RootDirectoryStartOnly"}, but right now systemd"
+              + " doesn't support restricting bind-mounts to 'ExecStart'."
+              + " Please either define a separate service or find a way to run"
+              + " commands other than ExecStart within the chroot.";
+    }
+    { assertion = !cfg.serviceConfig.DynamicUser or false;
+      message = "${whatOpt "DynamicUser"}. Please create a dedicated user via"
+              + " the 'users.users' option instead as this combination is"
+              + " currently not supported.";
+    }
+    { assertion = cfg.serviceConfig ? ProtectSystem -> cfg.serviceConfig.ProtectSystem == false;
+      message = "${whatOpt "ProtectSystem"}. ProtectSystem is not compatible"
+              + " with service confinement as it fails to remount /usr within"
+              + " our chroot. Please disable the option.";
+    }
+  ]) config.systemd.services);
+
+  config.systemd.packages = lib.concatLists (lib.mapAttrsToList (name: cfg: let
+    rootPaths = let
+      contents = lib.concatStringsSep "\n" cfg.confinement.packages;
+    in pkgs.writeText "${mkPathSafeName name}-string-contexts.txt" contents;
+
+    chrootPaths = pkgs.runCommand "${mkPathSafeName name}-chroot-paths" {
+      closureInfo = pkgs.closureInfo { inherit rootPaths; };
+      serviceName = "${name}.service";
+      excludedPath = rootPaths;
+    } ''
+      mkdir -p "$out/lib/systemd/system/$serviceName.d"
+      serviceFile="$out/lib/systemd/system/$serviceName.d/confinement.conf"
+
+      echo '[Service]' > "$serviceFile"
+
+      # /bin/sh is special here, because the option value could contain a
+      # symlink and we need to properly resolve it.
+      ${lib.optionalString (cfg.confinement.binSh != null) ''
+        binsh=${lib.escapeShellArg cfg.confinement.binSh}
+        realprog="$(readlink -e "$binsh")"
+        echo "BindReadOnlyPaths=$realprog:/bin/sh" >> "$serviceFile"
+      ''}
+
+      while read storePath; do
+        if [ -L "$storePath" ]; then
+          # Currently, systemd can't cope with symlinks in Bind(ReadOnly)Paths,
+          # so let's just bind-mount the target to that location.
+          echo "BindReadOnlyPaths=$(readlink -e "$storePath"):$storePath"
+        elif [ "$storePath" != "$excludedPath" ]; then
+          echo "BindReadOnlyPaths=$storePath"
+        fi
+      done < "$closureInfo/store-paths" >> "$serviceFile"
+    '';
+  in lib.optional cfg.confinement.enable chrootPaths) config.systemd.services);
+}
diff --git a/nixpkgs/nixos/modules/security/tpm2.nix b/nixpkgs/nixos/modules/security/tpm2.nix
new file mode 100644
index 000000000000..708c3a69d174
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/tpm2.nix
@@ -0,0 +1,174 @@
+{ lib, pkgs, config, ... }:
+let
+  cfg = config.security.tpm2;
+
+  # This snippet is taken from tpm2-tss/dist/tpm-udev.rules, but modified to allow custom user/groups
+  # The idea is that the tssUser is allowed to access the TPM and kernel TPM resource manager, while
+  # the tssGroup is only allowed to access the kernel resource manager
+  # Therefore, if either of the two are null, the respective part isn't generated
+  udevRules = tssUser: tssGroup: ''
+    ${lib.optionalString (tssUser != null) ''KERNEL=="tpm[0-9]*", MODE="0660", OWNER="${tssUser}"''}
+    ${lib.optionalString (tssUser != null || tssGroup != null)
+      ''KERNEL=="tpmrm[0-9]*", MODE="0660"''
+      + lib.optionalString (tssUser != null) '', OWNER="${tssUser}"''
+      + lib.optionalString (tssGroup != null) '', GROUP="${tssGroup}"''
+     }
+  '';
+
+in {
+  options.security.tpm2 = {
+    enable = lib.mkEnableOption (lib.mdDoc "Trusted Platform Module 2 support");
+
+    tssUser = lib.mkOption {
+      description = lib.mdDoc ''
+        Name of the tpm device-owner and service user, set if applyUdevRules is
+        set.
+      '';
+      type = lib.types.nullOr lib.types.str;
+      default = if cfg.abrmd.enable then "tss" else "root";
+      defaultText = lib.literalExpression ''if config.security.tpm2.abrmd.enable then "tss" else "root"'';
+    };
+
+    tssGroup = lib.mkOption {
+      description = lib.mdDoc ''
+        Group of the tpm kernel resource manager (tpmrm) device-group, set if
+        applyUdevRules is set.
+      '';
+      type = lib.types.nullOr lib.types.str;
+      default = "tss";
+    };
+
+    applyUdevRules = lib.mkOption {
+      description = lib.mdDoc ''
+        Whether to make the /dev/tpm[0-9] devices accessible by the tssUser, or
+        the /dev/tpmrm[0-9] by tssGroup respectively
+      '';
+      type = lib.types.bool;
+      default = true;
+    };
+
+    abrmd = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        Trusted Platform 2 userspace resource manager daemon
+      '');
+
+      package = lib.mkOption {
+        description = lib.mdDoc "tpm2-abrmd package to use";
+        type = lib.types.package;
+        default = pkgs.tpm2-abrmd;
+        defaultText = lib.literalExpression "pkgs.tpm2-abrmd";
+      };
+    };
+
+    pkcs11 = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        TPM2 PKCS#11 tool and shared library in system path
+        (`/run/current-system/sw/lib/libtpm2_pkcs11.so`)
+      '');
+
+      package = lib.mkOption {
+        description = lib.mdDoc "tpm2-pkcs11 package to use";
+        type = lib.types.package;
+        default = pkgs.tpm2-pkcs11;
+        defaultText = lib.literalExpression "pkgs.tpm2-pkcs11";
+      };
+    };
+
+    tctiEnvironment = {
+      enable = lib.mkOption {
+        description = lib.mdDoc ''
+          Set common TCTI environment variables to the specified value.
+          The variables are
+          - `TPM2TOOLS_TCTI`
+          - `TPM2_PKCS11_TCTI`
+        '';
+        type = lib.types.bool;
+        default = false;
+      };
+
+      interface = lib.mkOption {
+        description = lib.mdDoc ''
+          The name of the TPM command transmission interface (TCTI) library to
+          use.
+        '';
+        type = lib.types.enum [ "tabrmd" "device" ];
+        default = "device";
+      };
+
+      deviceConf = lib.mkOption {
+        description = lib.mdDoc ''
+          Configuration part of the device TCTI, e.g. the path to the TPM device.
+          Applies if interface is set to "device".
+          The format is specified in the
+          [
+          tpm2-tools repository](https://github.com/tpm2-software/tpm2-tools/blob/master/man/common/tcti.md#tcti-options).
+        '';
+        type = lib.types.str;
+        default = "/dev/tpmrm0";
+      };
+
+      tabrmdConf = lib.mkOption {
+        description = lib.mdDoc ''
+          Configuration part of the tabrmd TCTI, like the D-Bus bus name.
+          Applies if interface is set to "tabrmd".
+          The format is specified in the
+          [
+          tpm2-tools repository](https://github.com/tpm2-software/tpm2-tools/blob/master/man/common/tcti.md#tcti-options).
+        '';
+        type = lib.types.str;
+        default = "bus_name=com.intel.tss2.Tabrmd";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    {
+      # PKCS11 tools and library
+      environment.systemPackages = lib.mkIf cfg.pkcs11.enable [
+        (lib.getBin cfg.pkcs11.package)
+        (lib.getLib cfg.pkcs11.package)
+      ];
+
+      services.udev.extraRules = lib.mkIf cfg.applyUdevRules
+        (udevRules cfg.tssUser cfg.tssGroup);
+
+      # Create the tss user and group only if the default value is used
+      users.users.${cfg.tssUser} = lib.mkIf (cfg.tssUser == "tss") {
+        isSystemUser = true;
+        group = "tss";
+      };
+      users.groups.${cfg.tssGroup} = lib.mkIf (cfg.tssGroup == "tss") {};
+
+      environment.variables = lib.mkIf cfg.tctiEnvironment.enable (
+        lib.attrsets.genAttrs [
+          "TPM2TOOLS_TCTI"
+          "TPM2_PKCS11_TCTI"
+        ] (_: ''${cfg.tctiEnvironment.interface}:${
+          if cfg.tctiEnvironment.interface == "tabrmd" then
+            cfg.tctiEnvironment.tabrmdConf
+          else
+            cfg.tctiEnvironment.deviceConf
+        }'')
+      );
+    }
+
+    (lib.mkIf cfg.abrmd.enable {
+      systemd.services."tpm2-abrmd" = {
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          Type = "dbus";
+          Restart = "always";
+          RestartSec = 30;
+          BusName = "com.intel.tss2.Tabrmd";
+          ExecStart = "${cfg.abrmd.package}/bin/tpm2-abrmd";
+          User = "tss";
+          Group = "tss";
+        };
+      };
+
+      services.dbus.packages = lib.singleton cfg.abrmd.package;
+    })
+  ]);
+
+  meta.maintainers = with lib.maintainers; [ lschuermann ];
+}
diff --git a/nixpkgs/nixos/modules/security/wrappers/default.nix b/nixpkgs/nixos/modules/security/wrappers/default.nix
new file mode 100644
index 000000000000..250f9775be14
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/wrappers/default.nix
@@ -0,0 +1,339 @@
+{ config, lib, pkgs, ... }:
+let
+
+  inherit (config.security) wrapperDir wrappers;
+
+  parentWrapperDir = dirOf wrapperDir;
+
+  # This is security-sensitive code, and glibc vulns happen from time to time.
+  # musl is security-focused and generally more minimal, so it's a better choice here.
+  # The dynamic linker is still a fairly complex piece of code, and the wrappers are
+  # quite small, so linking it statically is more appropriate.
+  securityWrapper = sourceProg : pkgs.pkgsStatic.callPackage ./wrapper.nix {
+    inherit sourceProg;
+
+    # glibc definitions of insecure environment variables
+    #
+    # We extract the single header file we need into its own derivation,
+    # so that we don't have to pull full glibc sources to build wrappers.
+    #
+    # They're taken from pkgs.glibc so that we don't have to keep as close
+    # an eye on glibc changes. Not every relevant variable is in this header,
+    # so we maintain a slightly stricter list in wrapper.c itself as well.
+    unsecvars = lib.overrideDerivation (pkgs.srcOnly pkgs.glibc)
+      ({ name, ... }: {
+        name = "${name}-unsecvars";
+        installPhase = ''
+          mkdir $out
+          cp sysdeps/generic/unsecvars.h $out
+        '';
+      });
+  };
+
+  fileModeType =
+    let
+      # taken from the chmod(1) man page
+      symbolic = "[ugoa]*([-+=]([rwxXst]*|[ugo]))+|[-+=][0-7]+";
+      numeric = "[-+=]?[0-7]{0,4}";
+      mode = "((${symbolic})(,${symbolic})*)|(${numeric})";
+    in
+     lib.types.strMatching mode
+     // { description = "file mode string"; };
+
+  wrapperType = lib.types.submodule ({ name, config, ... }: {
+    options.source = lib.mkOption
+      { type = lib.types.path;
+        description = lib.mdDoc "The absolute path to the program to be wrapped.";
+      };
+    options.program = lib.mkOption
+      { type = with lib.types; nullOr str;
+        default = name;
+        description = lib.mdDoc ''
+          The name of the wrapper program. Defaults to the attribute name.
+        '';
+      };
+    options.owner = lib.mkOption
+      { type = lib.types.str;
+        description = lib.mdDoc "The owner of the wrapper program.";
+      };
+    options.group = lib.mkOption
+      { type = lib.types.str;
+        description = lib.mdDoc "The group of the wrapper program.";
+      };
+    options.permissions = lib.mkOption
+      { type = fileModeType;
+        default  = "u+rx,g+x,o+x";
+        example = "a+rx";
+        description = lib.mdDoc ''
+          The permissions of the wrapper program. The format is that of a
+          symbolic or numeric file mode understood by {command}`chmod`.
+        '';
+      };
+    options.capabilities = lib.mkOption
+      { type = lib.types.commas;
+        default = "";
+        description = lib.mdDoc ''
+          A comma-separated list of capability clauses to be given to the
+          wrapper program. The format for capability clauses is described in the
+          “TEXTUAL REPRESENTATION†section of the {manpage}`cap_from_text(3)`
+          manual page. For a list of capabilities supported by the system, check
+          the {manpage}`capabilities(7)` manual page.
+
+          ::: {.note}
+          `cap_setpcap`, which is required for the wrapper
+          program to be able to raise caps into the Ambient set is NOT raised
+          to the Ambient set so that the real program cannot modify its own
+          capabilities!! This may be too restrictive for cases in which the
+          real program needs cap_setpcap but it at least leans on the side
+          security paranoid vs. too relaxed.
+          :::
+        '';
+      };
+    options.setuid = lib.mkOption
+      { type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to add the setuid bit the wrapper program.";
+      };
+    options.setgid = lib.mkOption
+      { type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to add the setgid bit the wrapper program.";
+      };
+  });
+
+  ###### Activation script for the setcap wrappers
+  mkSetcapProgram =
+    { program
+    , capabilities
+    , source
+    , owner
+    , group
+    , permissions
+    , ...
+    }:
+    ''
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
+
+      # Prevent races
+      chmod 0000 "$wrapperDir/${program}"
+      chown ${owner}:${group} "$wrapperDir/${program}"
+
+      # Set desired capabilities on the file plus cap_setpcap so
+      # the wrapper program can elevate the capabilities set on
+      # its file into the Ambient set.
+      ${pkgs.libcap.out}/bin/setcap "cap_setpcap,${capabilities}" "$wrapperDir/${program}"
+
+      # Set the executable bit
+      chmod ${permissions} "$wrapperDir/${program}"
+    '';
+
+  ###### Activation script for the setuid wrappers
+  mkSetuidProgram =
+    { program
+    , source
+    , owner
+    , group
+    , setuid
+    , setgid
+    , permissions
+    , ...
+    }:
+    ''
+      cp ${securityWrapper source}/bin/security-wrapper "$wrapperDir/${program}"
+
+      # Prevent races
+      chmod 0000 "$wrapperDir/${program}"
+      chown ${owner}:${group} "$wrapperDir/${program}"
+
+      chmod "u${if setuid then "+" else "-"}s,g${if setgid then "+" else "-"}s,${permissions}" "$wrapperDir/${program}"
+    '';
+
+  mkWrappedPrograms =
+    builtins.map
+      (opts:
+        if opts.capabilities != ""
+        then mkSetcapProgram opts
+        else mkSetuidProgram opts
+      ) (lib.attrValues wrappers);
+in
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "security" "setuidOwners" ] "Use security.wrappers instead")
+    (lib.mkRemovedOptionModule [ "security" "setuidPrograms" ] "Use security.wrappers instead")
+  ];
+
+  ###### interface
+
+  options = {
+    security.wrappers = lib.mkOption {
+      type = lib.types.attrsOf wrapperType;
+      default = {};
+      example = lib.literalExpression
+        ''
+          {
+            # a setuid root program
+            doas =
+              { setuid = true;
+                owner = "root";
+                group = "root";
+                source = "''${pkgs.doas}/bin/doas";
+              };
+
+            # a setgid program
+            locate =
+              { setgid = true;
+                owner = "root";
+                group = "mlocate";
+                source = "''${pkgs.locate}/bin/locate";
+              };
+
+            # a program with the CAP_NET_RAW capability
+            ping =
+              { owner = "root";
+                group = "root";
+                capabilities = "cap_net_raw+ep";
+                source = "''${pkgs.iputils.out}/bin/ping";
+              };
+          }
+        '';
+      description = lib.mdDoc ''
+        This option effectively allows adding setuid/setgid bits, capabilities,
+        changing file ownership and permissions of a program without directly
+        modifying it. This works by creating a wrapper program under the
+        {option}`security.wrapperDir` directory, which is then added to
+        the shell `PATH`.
+      '';
+    };
+
+    security.wrapperDirSize = lib.mkOption {
+      default = "50%";
+      example = "10G";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Size limit for the /run/wrappers tmpfs. Look at mount(8), tmpfs size option,
+        for the accepted syntax. WARNING: don't set to less than 64MB.
+      '';
+    };
+
+    security.wrapperDir = lib.mkOption {
+      type        = lib.types.path;
+      default     = "/run/wrappers/bin";
+      internal    = true;
+      description = lib.mdDoc ''
+        This option defines the path to the wrapper programs. It
+        should not be overridden.
+      '';
+    };
+  };
+
+  ###### implementation
+  config = {
+
+    assertions = lib.mapAttrsToList
+      (name: opts:
+        { assertion = opts.setuid || opts.setgid -> opts.capabilities == "";
+          message = ''
+            The security.wrappers.${name} wrapper is not valid:
+                setuid/setgid and capabilities are mutually exclusive.
+          '';
+        }
+      ) wrappers;
+
+    security.wrappers =
+      let
+        mkSetuidRoot = source:
+          { setuid = true;
+            owner = "root";
+            group = "root";
+            inherit source;
+          };
+      in
+      { # These are mount related wrappers that require the +s permission.
+        fusermount  = mkSetuidRoot "${pkgs.fuse}/bin/fusermount";
+        fusermount3 = mkSetuidRoot "${pkgs.fuse3}/bin/fusermount3";
+        mount  = mkSetuidRoot "${lib.getBin pkgs.util-linux}/bin/mount";
+        umount = mkSetuidRoot "${lib.getBin pkgs.util-linux}/bin/umount";
+      };
+
+    boot.specialFileSystems.${parentWrapperDir} = {
+      fsType = "tmpfs";
+      options = [ "nodev" "mode=755" "size=${config.security.wrapperDirSize}" ];
+    };
+
+    # Make sure our wrapperDir exports to the PATH env variable when
+    # initializing the shell
+    environment.extraInit = ''
+      # Wrappers override other bin directories.
+      export PATH="${wrapperDir}:$PATH"
+    '';
+
+    security.apparmor.includes = lib.mapAttrs' (wrapName: wrap: lib.nameValuePair
+     "nixos/security.wrappers/${wrapName}" ''
+      include "${pkgs.apparmorRulesFromClosure { name="security.wrappers.${wrapName}"; } [
+        (securityWrapper wrap.source)
+      ]}"
+      mrpx ${wrap.source},
+    '') wrappers;
+
+    systemd.services.suid-sgid-wrappers = {
+      description = "Create SUID/SGID Wrappers";
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" ];
+      unitConfig.DefaultDependencies = false;
+      unitConfig.RequiresMountsFor = [ "/nix/store" "/run/wrappers" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        chmod 755 "${parentWrapperDir}"
+
+        # We want to place the tmpdirs for the wrappers to the parent dir.
+        wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
+        chmod a+rx "$wrapperDir"
+
+        ${lib.concatStringsSep "\n" mkWrappedPrograms}
+
+        if [ -L ${wrapperDir} ]; then
+          # Atomically replace the symlink
+          # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
+          old=$(readlink -f ${wrapperDir})
+          if [ -e "${wrapperDir}-tmp" ]; then
+            rm --force --recursive "${wrapperDir}-tmp"
+          fi
+          ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
+          mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
+          rm --force --recursive "$old"
+        else
+          # For initial setup
+          ln --symbolic "$wrapperDir" "${wrapperDir}"
+        fi
+      '';
+    };
+
+    ###### wrappers consistency checks
+    system.checks = lib.singleton (pkgs.runCommandLocal
+      "ensure-all-wrappers-paths-exist" { }
+      ''
+        # make sure we produce output
+        mkdir -p $out
+
+        echo -n "Checking that Nix store paths of all wrapped programs exist... "
+
+        declare -A wrappers
+        ${lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v:
+          "wrappers['${n}']='${v.source}'") wrappers)}
+
+        for name in "''${!wrappers[@]}"; do
+          path="''${wrappers[$name]}"
+          if [[ "$path" =~ /nix/store ]] && [ ! -e "$path" ]; then
+            test -t 1 && echo -ne '\033[1;31m'
+            echo "FAIL"
+            echo "The path $path does not exist!"
+            echo 'Please, check the value of `security.wrappers."'$name'".source`.'
+            test -t 1 && echo -ne '\033[0m'
+            exit 1
+          fi
+        done
+
+        echo "OK"
+      '');
+  };
+}
diff --git a/nixpkgs/nixos/modules/security/wrappers/wrapper.c b/nixpkgs/nixos/modules/security/wrappers/wrapper.c
new file mode 100644
index 000000000000..3277e7ef6f79
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/wrappers/wrapper.c
@@ -0,0 +1,211 @@
+#define _GNU_SOURCE
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdnoreturn.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/xattr.h>
+#include <fcntl.h>
+#include <dirent.h>
+#include <errno.h>
+#include <linux/capability.h>
+#include <sys/prctl.h>
+#include <limits.h>
+#include <stdint.h>
+#include <syscall.h>
+#include <byteswap.h>
+
+// imported from glibc
+#include "unsecvars.h"
+
+#ifndef SOURCE_PROG
+#error SOURCE_PROG should be defined via preprocessor commandline
+#endif
+
+// aborts when false, printing the failed expression
+#define ASSERT(expr) ((expr) ? (void) 0 : assert_failure(#expr))
+
+extern char **environ;
+
+// Wrapper debug variable name
+static char *wrapper_debug = "WRAPPER_DEBUG";
+
+#define CAP_SETPCAP 8
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define LE32_TO_H(x) bswap_32(x)
+#else
+#define LE32_TO_H(x) (x)
+#endif
+
+static noreturn void assert_failure(const char *assertion) {
+    fprintf(stderr, "Assertion `%s` in NixOS's wrapper.c failed.\n", assertion);
+    fflush(stderr);
+    abort();
+}
+
+int get_last_cap(unsigned *last_cap) {
+    FILE* file = fopen("/proc/sys/kernel/cap_last_cap", "r");
+    if (file == NULL) {
+        int saved_errno = errno;
+        fprintf(stderr, "failed to open /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
+        return -saved_errno;
+    }
+    int res = fscanf(file, "%u", last_cap);
+    if (res == EOF) {
+        int saved_errno = errno;
+        fprintf(stderr, "could not read number from /proc/sys/kernel/cap_last_cap: %s\n", strerror(errno));
+        return -saved_errno;
+    }
+    fclose(file);
+    return 0;
+}
+
+// Given the path to this program, fetch its configured capability set
+// (as set by `setcap ... /path/to/file`) and raise those capabilities
+// into the Ambient set.
+static int make_caps_ambient(const char *self_path) {
+    struct vfs_ns_cap_data data = {};
+    int r = getxattr(self_path, "security.capability", &data, sizeof(data));
+
+    if (r < 0) {
+        if (errno == ENODATA) {
+            // no capabilities set
+            return 0;
+        }
+        fprintf(stderr, "cannot get capabilities for %s: %s", self_path, strerror(errno));
+        return 1;
+    }
+
+    size_t size;
+    uint32_t version = LE32_TO_H(data.magic_etc) & VFS_CAP_REVISION_MASK;
+    switch (version) {
+        case VFS_CAP_REVISION_1:
+            size = VFS_CAP_U32_1;
+            break;
+        case VFS_CAP_REVISION_2:
+        case VFS_CAP_REVISION_3:
+            size = VFS_CAP_U32_3;
+            break;
+        default:
+            fprintf(stderr, "BUG! Unsupported capability version 0x%x on %s. Report to NixOS bugtracker\n", version, self_path);
+            return 1;
+    }
+
+    const struct __user_cap_header_struct header = {
+      .version = _LINUX_CAPABILITY_VERSION_3,
+      .pid = getpid(),
+    };
+    struct __user_cap_data_struct user_data[2] = {};
+
+    for (size_t i = 0; i < size; i++) {
+        // merge inheritable & permitted into one
+        user_data[i].permitted = user_data[i].inheritable =
+            LE32_TO_H(data.data[i].inheritable) | LE32_TO_H(data.data[i].permitted);
+    }
+
+    if (syscall(SYS_capset, &header, &user_data) < 0) {
+        fprintf(stderr, "failed to inherit capabilities: %s", strerror(errno));
+        return 1;
+    }
+    unsigned last_cap;
+    r = get_last_cap(&last_cap);
+    if (r < 0) {
+        return 1;
+    }
+    uint64_t set = user_data[0].permitted | (uint64_t)user_data[1].permitted << 32;
+    for (unsigned cap = 0; cap < last_cap; cap++) {
+        if (!(set & (1ULL << cap))) {
+            continue;
+        }
+
+        // Check for the cap_setpcap capability, we set this on the
+        // wrapper so it can elevate the capabilities to the Ambient
+        // set but we do not want to propagate it down into the
+        // wrapped program.
+        //
+        // TODO: what happens if that's the behavior you want
+        // though???? I'm preferring a strict vs. loose policy here.
+        if (cap == CAP_SETPCAP) {
+            if(getenv(wrapper_debug)) {
+                fprintf(stderr, "cap_setpcap in set, skipping it\n");
+            }
+            continue;
+        }
+        if (prctl(PR_CAP_AMBIENT, PR_CAP_AMBIENT_RAISE, (unsigned long) cap, 0, 0)) {
+            fprintf(stderr, "cannot raise the capability %d into the ambient set: %s\n", cap, strerror(errno));
+            return 1;
+        }
+        if (getenv(wrapper_debug)) {
+            fprintf(stderr, "raised %d into the ambient capability set\n", cap);
+        }
+    }
+
+    return 0;
+}
+
+// These are environment variable aliases for glibc tunables.
+// This list shouldn't grow further, since this is a legacy mechanism.
+// Any future tunables are expected to only be accessible through GLIBC_TUNABLES.
+//
+// They are not included in the glibc-provided UNSECURE_ENVVARS list,
+// since any SUID executable ignores them. This wrapper also serves
+// executables that are merely granted ambient capabilities, rather than
+// being SUID, and hence don't run in secure mode. We'd like them to
+// defend those in depth as well, so we clear these explicitly.
+//
+// Except for MALLOC_CHECK_ (which is marked SXID_ERASE), these are all
+// marked SXID_IGNORE (ignored in secure mode), so even the glibc version
+// of this wrapper would leave them intact.
+#define UNSECURE_ENVVARS_TUNABLES \
+    "MALLOC_CHECK_\0" \
+    "MALLOC_TOP_PAD_\0" \
+    "MALLOC_PERTURB_\0" \
+    "MALLOC_MMAP_THRESHOLD_\0" \
+    "MALLOC_TRIM_THRESHOLD_\0" \
+    "MALLOC_MMAP_MAX_\0" \
+    "MALLOC_ARENA_MAX\0" \
+    "MALLOC_ARENA_TEST\0"
+
+int main(int argc, char **argv) {
+    ASSERT(argc >= 1);
+
+    int debug = getenv(wrapper_debug) != NULL;
+
+    // Drop insecure environment variables explicitly
+    //
+    // glibc does this automatically in SUID binaries, but we'd like to cover this:
+    //
+    //  a) before it gets to glibc
+    //  b) in binaries that are only granted ambient capabilities by the wrapper,
+    //     but don't run with an altered effective UID/GID, nor directly gain
+    //     capabilities themselves, and thus don't run in secure mode.
+    //
+    // We're using musl, which doesn't drop environment variables in secure mode,
+    // and we'd also like glibc-specific variables to be covered.
+    //
+    // If we don't explicitly unset them, it's quite easy to just set LD_PRELOAD,
+    // have it passed through to the wrapped program, and gain privileges.
+    for (char *unsec = UNSECURE_ENVVARS_TUNABLES UNSECURE_ENVVARS; *unsec; unsec = strchr(unsec, 0) + 1) {
+        if (debug) {
+            fprintf(stderr, "unsetting %s\n", unsec);
+        }
+        unsetenv(unsec);
+    }
+
+    // Read the capabilities set on the wrapper and raise them in to
+    // the ambient set so the program we're wrapping receives the
+    // capabilities too!
+    if (make_caps_ambient("/proc/self/exe") != 0) {
+        return 1;
+    }
+
+    execve(SOURCE_PROG, argv, environ);
+    
+    fprintf(stderr, "%s: cannot run `%s': %s\n",
+        argv[0], SOURCE_PROG, strerror(errno));
+
+    return 1;
+}
diff --git a/nixpkgs/nixos/modules/security/wrappers/wrapper.nix b/nixpkgs/nixos/modules/security/wrappers/wrapper.nix
new file mode 100644
index 000000000000..27d46c630af5
--- /dev/null
+++ b/nixpkgs/nixos/modules/security/wrappers/wrapper.nix
@@ -0,0 +1,20 @@
+{ stdenv, unsecvars, linuxHeaders, sourceProg, debug ? false }:
+# For testing:
+# $ nix-build -E 'with import <nixpkgs> {}; pkgs.callPackage ./wrapper.nix { parentWrapperDir = "/run/wrappers"; debug = true; }'
+stdenv.mkDerivation {
+  name = "security-wrapper";
+  buildInputs = [ linuxHeaders ];
+  dontUnpack = true;
+  CFLAGS = [
+    ''-DSOURCE_PROG="${sourceProg}"''
+  ] ++ (if debug then [
+    "-Werror" "-Og" "-g"
+  ] else [
+    "-Wall" "-O2"
+  ]);
+  dontStrip = debug;
+  installPhase = ''
+    mkdir -p $out/bin
+    $CC $CFLAGS ${./wrapper.c} -I${unsecvars} -o $out/bin/security-wrapper
+  '';
+}
diff --git a/nixpkgs/nixos/modules/services/admin/meshcentral.nix b/nixpkgs/nixos/modules/services/admin/meshcentral.nix
new file mode 100644
index 000000000000..22f31e952622
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/admin/meshcentral.nix
@@ -0,0 +1,51 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.meshcentral;
+  configFormat = pkgs.formats.json {};
+  configFile = configFormat.generate "meshcentral-config.json" cfg.settings;
+in with lib; {
+  options.services.meshcentral = with types; {
+    enable = mkEnableOption (lib.mdDoc "MeshCentral computer management server");
+    package = mkOption {
+      description = lib.mdDoc "MeshCentral package to use. Replacing this may be necessary to add dependencies for extra functionality.";
+      type = types.package;
+      default = pkgs.meshcentral;
+      defaultText = literalExpression "pkgs.meshcentral";
+    };
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Settings for MeshCentral. Refer to upstream documentation for details:
+
+        - [JSON Schema definition](https://github.com/Ylianst/MeshCentral/blob/master/meshcentral-config-schema.json)
+        - [simple sample configuration](https://github.com/Ylianst/MeshCentral/blob/master/sample-config.json)
+        - [complex sample configuration](https://github.com/Ylianst/MeshCentral/blob/master/sample-config-advanced.json)
+        - [Old homepage with documentation link](https://www.meshcommander.com/meshcentral2)
+      '';
+      type = types.submodule {
+        freeformType = configFormat.type;
+      };
+      example = {
+        settings = {
+          WANonly = true;
+          Cert = "meshcentral.example.com";
+          TlsOffload = "10.0.0.2,fd42::2";
+          Port = 4430;
+        };
+        domains."".certUrl = "https://meshcentral.example.com/";
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    services.meshcentral.settings.settings.autoBackup.backupPath = lib.mkDefault "/var/lib/meshcentral/backups";
+    systemd.services.meshcentral = {
+      wantedBy = ["multi-user.target"];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/meshcentral --datapath /var/lib/meshcentral --configfile ${configFile}";
+        DynamicUser = true;
+        StateDirectory = "meshcentral";
+        CacheDirectory = "meshcentral";
+      };
+    };
+  };
+  meta.maintainers = [ maintainers.lheckemann ];
+}
diff --git a/nixpkgs/nixos/modules/services/admin/oxidized.nix b/nixpkgs/nixos/modules/services/admin/oxidized.nix
new file mode 100644
index 000000000000..56f33031498a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/admin/oxidized.nix
@@ -0,0 +1,118 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.oxidized;
+in
+{
+  options.services.oxidized = {
+    enable = mkEnableOption (lib.mdDoc "the oxidized configuration backup service");
+
+    user = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = lib.mdDoc ''
+        User under which the oxidized service runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "oxidized";
+      description = lib.mdDoc ''
+        Group under which the oxidized service runs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/oxidized";
+      description = lib.mdDoc "State directory for the oxidized service.";
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = literalExpression ''
+        pkgs.writeText "oxidized-config.yml" '''
+          ---
+          debug: true
+          use_syslog: true
+          input:
+            default: ssh
+            ssh:
+              secure: true
+          interval: 3600
+          model_map:
+            dell: powerconnect
+            hp: procurve
+          source:
+            default: csv
+            csv:
+              delimiter: !ruby/regexp /:/
+              file: "/var/lib/oxidized/.config/oxidized/router.db"
+              map:
+                name: 0
+                model: 1
+                username: 2
+                password: 3
+          pid: "/var/lib/oxidized/.config/oxidized/pid"
+          rest: 127.0.0.1:8888
+          retries: 3
+          # ... additional config
+        ''';
+      '';
+      description = lib.mdDoc ''
+        Path to the oxidized configuration file.
+      '';
+    };
+
+    routerDB = mkOption {
+      type = types.path;
+      example = literalExpression ''
+        pkgs.writeText "oxidized-router.db" '''
+          hostname-sw1:powerconnect:username1:password2
+          hostname-sw2:procurve:username2:password2
+          # ... additional hosts
+        '''
+      '';
+      description = lib.mdDoc ''
+        Path to the file/database which contains the targets for oxidized.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.${cfg.group} = { };
+    users.users.${cfg.user} = {
+      description = "Oxidized service user";
+      group = cfg.group;
+      home = cfg.dataDir;
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    systemd.services.oxidized = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        mkdir -p ${cfg.dataDir}/.config/oxidized
+        ln -f -s ${cfg.routerDB} ${cfg.dataDir}/.config/oxidized/router.db
+        ln -f -s ${cfg.configFile} ${cfg.dataDir}/.config/oxidized/config
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.oxidized}/bin/oxidized";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0077";
+        NoNewPrivileges = true;
+        Restart  = "always";
+        WorkingDirectory = cfg.dataDir;
+        KillSignal = "SIGKILL";
+        PIDFile = "${cfg.dataDir}/.config/oxidized/pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/admin/pgadmin.nix b/nixpkgs/nixos/modules/services/admin/pgadmin.nix
new file mode 100644
index 000000000000..390c80d1a2d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/admin/pgadmin.nix
@@ -0,0 +1,187 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  pkg = pkgs.pgadmin4;
+  cfg = config.services.pgadmin;
+
+  _base = with types; [ int bool str ];
+  base = with types; oneOf ([ (listOf (oneOf _base)) (attrsOf (oneOf _base)) ] ++ _base);
+
+  formatAttrset = attr:
+    "{${concatStringsSep "\n" (mapAttrsToList (key: value: "${builtins.toJSON key}: ${formatPyValue value},") attr)}}";
+
+  formatPyValue = value:
+    if builtins.isString value then builtins.toJSON value
+    else if value ? _expr then value._expr
+    else if builtins.isInt value then toString value
+    else if builtins.isBool value then (if value then "True" else "False")
+    else if builtins.isAttrs value then (formatAttrset value)
+    else if builtins.isList value then "[${concatStringsSep "\n" (map (v: "${formatPyValue v},") value)}]"
+    else throw "Unrecognized type";
+
+  formatPy = attrs:
+    concatStringsSep "\n" (mapAttrsToList (key: value: "${key} = ${formatPyValue value}") attrs);
+
+  pyType = with types; attrsOf (oneOf [ (attrsOf base) (listOf base) base ]);
+in
+{
+  options.services.pgadmin = {
+    enable = mkEnableOption (lib.mdDoc "PostgreSQL Admin 4");
+
+    port = mkOption {
+      description = lib.mdDoc "Port for pgadmin4 to run on";
+      type = types.port;
+      default = 5050;
+    };
+
+    initialEmail = mkOption {
+      description = lib.mdDoc "Initial email for the pgAdmin account";
+      type = types.str;
+    };
+
+    initialPasswordFile = mkOption {
+      description = lib.mdDoc ''
+        Initial password file for the pgAdmin account.
+        NOTE: Should be string not a store path, to prevent the password from being world readable
+      '';
+      type = types.path;
+    };
+
+    emailServer = {
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Enable SMTP email server. This is necessary, if you want to use password recovery or change your own password
+        '';
+        type = types.bool;
+        default = false;
+      };
+      address = mkOption {
+        description = lib.mdDoc "SMTP server for email delivery";
+        type = types.str;
+        default = "localhost";
+      };
+      port = mkOption {
+        description = lib.mdDoc "SMTP server port for email delivery";
+        type = types.port;
+        default = 25;
+      };
+      useSSL = mkOption {
+        description = lib.mdDoc "SMTP server should use SSL";
+        type = types.bool;
+        default = false;
+      };
+      useTLS = mkOption {
+        description = lib.mdDoc "SMTP server should use TLS";
+        type = types.bool;
+        default = false;
+      };
+      username = mkOption {
+        description = lib.mdDoc "SMTP server username for email delivery";
+        type = types.nullOr types.str;
+        default = null;
+      };
+      sender = mkOption {
+        description = lib.mdDoc ''
+          SMTP server sender email for email delivery. Some servers require this to be a valid email address from that server
+        '';
+        type = types.str;
+        example = "noreply@example.com";
+      };
+      passwordFile = mkOption {
+        description = lib.mdDoc ''
+          Password for SMTP email account.
+          NOTE: Should be string not a store path, to prevent the password from being world readable
+        '';
+        type = types.path;
+      };
+    };
+
+    openFirewall = mkEnableOption (lib.mdDoc "firewall passthrough for pgadmin4");
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Settings for pgadmin4.
+        [Documentation](https://www.pgadmin.org/docs/pgadmin4/development/config_py.html)
+      '';
+      type = pyType;
+      default = { };
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+    networking.firewall.allowedTCPPorts = mkIf (cfg.openFirewall) [ cfg.port ];
+
+    services.pgadmin.settings = {
+      DEFAULT_SERVER_PORT = cfg.port;
+      SERVER_MODE = true;
+    } // (optionalAttrs cfg.openFirewall {
+      DEFAULT_SERVER = mkDefault "::";
+    }) // (optionalAttrs cfg.emailServer.enable {
+      MAIL_SERVER = cfg.emailServer.address;
+      MAIL_PORT = cfg.emailServer.port;
+      MAIL_USE_SSL = cfg.emailServer.useSSL;
+      MAIL_USE_TLS = cfg.emailServer.useTLS;
+      MAIL_USERNAME = cfg.emailServer.username;
+      SECURITY_EMAIL_SENDER = cfg.emailServer.sender;
+    });
+
+    systemd.services.pgadmin = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      requires = [ "network.target" ];
+      # we're adding this optionally so just in case there's any race it'll be caught
+      # in case postgres doesn't start, pgadmin will just start normally
+      wants = [ "postgresql.service" ];
+
+      path = [ config.services.postgresql.package pkgs.coreutils pkgs.bash ];
+
+      preStart = ''
+        # NOTE: this is idempotent (aka running it twice has no effect)
+        (
+          # Email address:
+          echo ${escapeShellArg cfg.initialEmail}
+
+          # file might not contain newline. echo hack fixes that.
+          PW=$(cat ${escapeShellArg cfg.initialPasswordFile})
+
+          # Password:
+          echo "$PW"
+          # Retype password:
+          echo "$PW"
+        ) | ${pkg}/bin/pgadmin4-setup
+      '';
+
+      restartTriggers = [
+        "/etc/pgadmin/config_system.py"
+      ];
+
+      serviceConfig = {
+        User = "pgadmin";
+        DynamicUser = true;
+        LogsDirectory = "pgadmin";
+        StateDirectory = "pgadmin";
+        ExecStart = "${pkg}/bin/pgadmin4";
+      };
+    };
+
+    users.users.pgadmin = {
+      isSystemUser = true;
+      group = "pgadmin";
+    };
+
+    users.groups.pgadmin = { };
+
+    environment.etc."pgadmin/config_system.py" = {
+      text = lib.optionalString cfg.emailServer.enable ''
+        with open("${cfg.emailServer.passwordFile}") as f:
+          pw = f.read()
+        MAIL_PASSWORD = pw
+      '' + formatPy cfg.settings;
+      mode = "0600";
+      user = "pgadmin";
+      group = "pgadmin";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/admin/salt/master.nix b/nixpkgs/nixos/modules/services/admin/salt/master.nix
new file mode 100644
index 000000000000..4346022970e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/admin/salt/master.nix
@@ -0,0 +1,63 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg  = config.services.salt.master;
+
+  fullConfig = lib.recursiveUpdate {
+    # Provide defaults for some directories to allow an immutable config dir
+
+    # Default is equivalent to /etc/salt/master.d/*.conf
+    default_include = "/var/lib/salt/master.d/*.conf";
+    # Default is in /etc/salt/pki/master
+    pki_dir = "/var/lib/salt/pki/master";
+  } cfg.configuration;
+
+in
+
+{
+  options = {
+    services.salt.master = {
+      enable = mkEnableOption (lib.mdDoc "Salt master service");
+      configuration = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Salt master configuration as Nix attribute set.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      # Set this up in /etc/salt/master so `salt`, `salt-key`, etc. work.
+      # The alternatives are
+      # - passing --config-dir to all salt commands, not just the master unit,
+      # - setting a global environment variable,
+      etc."salt/master".source = pkgs.writeText "master" (
+        builtins.toJSON fullConfig
+      );
+      systemPackages = with pkgs; [ salt ];
+    };
+    systemd.services.salt-master = {
+      description = "Salt Master";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [
+        util-linux  # for dmesg
+      ];
+      serviceConfig = {
+        ExecStart = "${pkgs.salt}/bin/salt-master";
+        LimitNOFILE = 16384;
+        Type = "notify";
+        NotifyAccess = "all";
+      };
+      restartTriggers = [
+        config.environment.etc."salt/master".source
+      ];
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ Flakebi ];
+}
diff --git a/nixpkgs/nixos/modules/services/admin/salt/minion.nix b/nixpkgs/nixos/modules/services/admin/salt/minion.nix
new file mode 100644
index 000000000000..3ae02a4cc5d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/admin/salt/minion.nix
@@ -0,0 +1,67 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg  = config.services.salt.minion;
+
+  fullConfig = lib.recursiveUpdate {
+    # Provide defaults for some directories to allow an immutable config dir
+    # NOTE: the config dir being immutable prevents `minion_id` caching
+
+    # Default is equivalent to /etc/salt/minion.d/*.conf
+    default_include = "/var/lib/salt/minion.d/*.conf";
+    # Default is in /etc/salt/pki/minion
+    pki_dir = "/var/lib/salt/pki/minion";
+  } cfg.configuration;
+
+in
+
+{
+  options = {
+    services.salt.minion = {
+      enable = mkEnableOption (lib.mdDoc "Salt minion service");
+      configuration = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Salt minion configuration as Nix attribute set.
+          See <https://docs.saltstack.com/en/latest/ref/configuration/minion.html>
+          for details.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      # Set this up in /etc/salt/minion so `salt-call`, etc. work.
+      # The alternatives are
+      # - passing --config-dir to all salt commands, not just the minion unit,
+      # - setting aglobal environment variable.
+      etc."salt/minion".source = pkgs.writeText "minion" (
+        builtins.toJSON fullConfig
+      );
+      systemPackages = with pkgs; [ salt ];
+    };
+    systemd.services.salt-minion = {
+      description = "Salt Minion";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [
+        util-linux
+      ];
+      serviceConfig = {
+        ExecStart = "${pkgs.salt}/bin/salt-minion";
+        LimitNOFILE = 8192;
+        Type = "notify";
+        NotifyAccess = "all";
+      };
+      restartTriggers = [
+        config.environment.etc."salt/minion".source
+      ];
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/amqp/activemq/ActiveMQBroker.java b/nixpkgs/nixos/modules/services/amqp/activemq/ActiveMQBroker.java
new file mode 100644
index 000000000000..c0f5d16ea11a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/amqp/activemq/ActiveMQBroker.java
@@ -0,0 +1,19 @@
+import org.apache.activemq.broker.BrokerService;
+import org.apache.activemq.broker.BrokerFactory;
+import java.net.URI;
+
+public class ActiveMQBroker {
+
+  public static void main(String[] args) throws Throwable {
+    URI uri = new URI((args.length > 0) ? args[0] : "xbean:activemq.xml");
+    BrokerService broker = BrokerFactory.createBroker(uri);
+    broker.start();
+    if (broker.waitUntilStarted()) {
+      broker.waitUntilStopped();
+    } else {
+      System.out.println("Failed starting broker");
+      System.exit(-1);
+    };
+  }
+
+}
diff --git a/nixpkgs/nixos/modules/services/amqp/activemq/default.nix b/nixpkgs/nixos/modules/services/amqp/activemq/default.nix
new file mode 100644
index 000000000000..b1f9b7a3bb1f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/amqp/activemq/default.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.activemq;
+
+  activemqBroker = runCommand "activemq-broker"
+    {
+      nativeBuildInputs = [ jdk ];
+    } ''
+    mkdir -p $out/lib
+    source ${activemq}/lib/classpath.env
+    export CLASSPATH
+    ln -s "${./ActiveMQBroker.java}" ActiveMQBroker.java
+    javac -d $out/lib ActiveMQBroker.java
+  '';
+
+in
+{
+
+  options = {
+    services.activemq = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the Apache ActiveMQ message broker service.
+        '';
+      };
+      configurationDir = mkOption {
+        default = "${activemq}/conf";
+        defaultText = literalExpression ''"''${pkgs.activemq}/conf"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          The base directory for ActiveMQ's configuration.
+          By default, this directory is searched for a file named activemq.xml,
+          which should contain the configuration for the broker service.
+        '';
+      };
+      configurationURI = mkOption {
+        type = types.str;
+        default = "xbean:activemq.xml";
+        description = lib.mdDoc ''
+          The URI that is passed along to the BrokerFactory to
+          set up the configuration of the ActiveMQ broker service.
+          You should not need to change this. For custom configuration,
+          set the `configurationDir` instead, and create
+          an activemq.xml configuration file in it.
+        '';
+      };
+      baseDir = mkOption {
+        type = types.str;
+        default = "/var/activemq";
+        description = lib.mdDoc ''
+          The base directory where ActiveMQ stores its persistent data and logs.
+          This will be overridden if you set "activemq.base" and "activemq.data"
+          in the `javaProperties` option. You can also override
+          this in activemq.xml.
+        '';
+      };
+      javaProperties = mkOption {
+        type = types.attrs;
+        default = { };
+        example = literalExpression ''
+          {
+            "java.net.preferIPv4Stack" = "true";
+          }
+        '';
+        apply = attrs: {
+          "activemq.base" = "${cfg.baseDir}";
+          "activemq.data" = "${cfg.baseDir}/data";
+          "activemq.conf" = "${cfg.configurationDir}";
+          "activemq.home" = "${activemq}";
+        } // attrs;
+        description = lib.mdDoc ''
+          Specifies Java properties that are sent to the ActiveMQ
+          broker service with the "-D" option. You can set properties
+          here to change the behaviour and configuration of the broker.
+          All essential properties that are not set here are automatically
+          given reasonable defaults.
+        '';
+      };
+      extraJavaOptions = mkOption {
+        type = types.separatedString " ";
+        default = "";
+        example = "-Xmx2G -Xms2G -XX:MaxPermSize=512M";
+        description = lib.mdDoc ''
+          Add extra options here that you want to be sent to the
+          Java runtime when the broker service is started.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.activemq = {
+      description = "ActiveMQ server user";
+      group = "activemq";
+      uid = config.ids.uids.activemq;
+    };
+
+    users.groups.activemq.gid = config.ids.gids.activemq;
+
+    systemd.services.activemq_init = {
+      wantedBy = [ "activemq.service" ];
+      partOf = [ "activemq.service" ];
+      before = [ "activemq.service" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        mkdir -p "${cfg.javaProperties."activemq.data"}"
+        chown -R activemq "${cfg.javaProperties."activemq.data"}"
+      '';
+    };
+
+    systemd.services.activemq = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ jre ];
+      serviceConfig.User = "activemq";
+      script = ''
+        source ${activemq}/lib/classpath.env
+        export CLASSPATH=${activemqBroker}/lib:${cfg.configurationDir}:$CLASSPATH
+        exec java \
+          ${concatStringsSep " \\\n" (mapAttrsToList (name: value: "-D${name}=${value}") cfg.javaProperties)} \
+          ${cfg.extraJavaOptions} ActiveMQBroker "${cfg.configurationURI}"
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix b/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix
new file mode 100644
index 000000000000..11dabf0b51c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/amqp/rabbitmq.nix
@@ -0,0 +1,228 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rabbitmq;
+
+  inherit (builtins) concatStringsSep;
+
+  config_file_content = lib.generators.toKeyValue { } cfg.configItems;
+  config_file = pkgs.writeText "rabbitmq.conf" config_file_content;
+
+  advanced_config_file = pkgs.writeText "advanced.config" cfg.config;
+
+in
+{
+  ###### interface
+  options = {
+    services.rabbitmq = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the RabbitMQ server, an Advanced Message
+          Queuing Protocol (AMQP) broker.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.rabbitmq-server;
+        type = types.package;
+        defaultText = literalExpression "pkgs.rabbitmq-server";
+        description = lib.mdDoc ''
+          Which rabbitmq package to use.
+        '';
+      };
+
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        example = "";
+        description = lib.mdDoc ''
+          IP address on which RabbitMQ will listen for AMQP
+          connections.  Set to the empty string to listen on all
+          interfaces.  Note that RabbitMQ creates a user named
+          `guest` with password
+          `guest` by default, so you should delete
+          this user if you intend to allow external access.
+
+          Together with 'port' setting it's mostly an alias for
+          configItems."listeners.tcp.1" and it's left for backwards
+          compatibility with previous version of this module.
+        '';
+        type = types.str;
+      };
+
+      port = mkOption {
+        default = 5672;
+        description = lib.mdDoc ''
+          Port on which RabbitMQ will listen for AMQP connections.
+        '';
+        type = types.port;
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/rabbitmq";
+        description = lib.mdDoc ''
+          Data directory for rabbitmq.
+        '';
+      };
+
+      cookie = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Erlang cookie is a string of arbitrary length which must
+          be the same for several nodes to be allowed to communicate.
+          Leave empty to generate automatically.
+        '';
+      };
+
+      configItems = mkOption {
+        default = { };
+        type = types.attrsOf types.str;
+        example = literalExpression ''
+          {
+            "auth_backends.1.authn" = "rabbit_auth_backend_ldap";
+            "auth_backends.1.authz" = "rabbit_auth_backend_internal";
+          }
+        '';
+        description = lib.mdDoc ''
+          Configuration options in RabbitMQ's new config file format,
+          which is a simple key-value format that can not express nested
+          data structures. This is known as the `rabbitmq.conf` file,
+          although outside NixOS that filename may have Erlang syntax, particularly
+          prior to RabbitMQ 3.7.0.
+
+          If you do need to express nested data structures, you can use
+          `config` option. Configuration from `config`
+          will be merged into these options by RabbitMQ at runtime to
+          form the final configuration.
+
+          See https://www.rabbitmq.com/configure.html#config-items
+          For the distinct formats, see https://www.rabbitmq.com/configure.html#config-file-formats
+        '';
+      };
+
+      config = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Verbatim advanced configuration file contents using the Erlang syntax.
+          This is also known as the `advanced.config` file or the old config format.
+
+          `configItems` is preferred whenever possible. However, nested
+          data structures can only be expressed properly using the `config` option.
+
+          The contents of this option will be merged into the `configItems`
+          by RabbitMQ at runtime to form the final configuration.
+
+          See the second table on https://www.rabbitmq.com/configure.html#config-items
+          For the distinct formats, see https://www.rabbitmq.com/configure.html#config-file-formats
+        '';
+      };
+
+      plugins = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc "The names of plugins to enable";
+      };
+
+      pluginDirs = mkOption {
+        default = [ ];
+        type = types.listOf types.path;
+        description = lib.mdDoc "The list of directories containing external plugins";
+      };
+
+      managementPlugin = {
+        enable = mkEnableOption (lib.mdDoc "the management plugin");
+        port = mkOption {
+          default = 15672;
+          type = types.port;
+          description = lib.mdDoc ''
+            On which port to run the management plugin
+          '';
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf cfg.enable {
+
+    # This is needed so we will have 'rabbitmqctl' in our PATH
+    environment.systemPackages = [ cfg.package ];
+
+    services.epmd.enable = true;
+
+    users.users.rabbitmq = {
+      description = "RabbitMQ server user";
+      home = "${cfg.dataDir}";
+      createHome = true;
+      group = "rabbitmq";
+      uid = config.ids.uids.rabbitmq;
+    };
+
+    users.groups.rabbitmq.gid = config.ids.gids.rabbitmq;
+
+    services.rabbitmq.configItems = {
+      "listeners.tcp.1" = mkDefault "${cfg.listenAddress}:${toString cfg.port}";
+    } // optionalAttrs cfg.managementPlugin.enable {
+      "management.tcp.port" = toString cfg.managementPlugin.port;
+      "management.tcp.ip" = cfg.listenAddress;
+    };
+
+    services.rabbitmq.plugins = optional cfg.managementPlugin.enable "rabbitmq_management";
+
+    systemd.services.rabbitmq = {
+      description = "RabbitMQ Server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "epmd.socket" ];
+      wants = [ "network.target" "epmd.socket" ];
+
+      path = [
+        cfg.package
+        pkgs.coreutils # mkdir/chown/chmod for preStart
+      ];
+
+      environment = {
+        RABBITMQ_MNESIA_BASE = "${cfg.dataDir}/mnesia";
+        RABBITMQ_LOGS = "-";
+        SYS_PREFIX = "";
+        RABBITMQ_CONFIG_FILE = config_file;
+        RABBITMQ_PLUGINS_DIR = concatStringsSep ":" cfg.pluginDirs;
+        RABBITMQ_ENABLED_PLUGINS_FILE = pkgs.writeText "enabled_plugins" ''
+          [ ${concatStringsSep "," cfg.plugins} ].
+        '';
+      } // optionalAttrs (cfg.config != "") { RABBITMQ_ADVANCED_CONFIG_FILE = advanced_config_file; };
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/sbin/rabbitmq-server";
+        ExecStop = "${cfg.package}/sbin/rabbitmqctl shutdown";
+        User = "rabbitmq";
+        Group = "rabbitmq";
+        LogsDirectory = "rabbitmq";
+        WorkingDirectory = cfg.dataDir;
+        Type = "notify";
+        NotifyAccess = "all";
+        UMask = "0027";
+        LimitNOFILE = "100000";
+        Restart = "on-failure";
+        RestartSec = "10";
+        TimeoutStartSec = "3600";
+      };
+
+      preStart = ''
+        ${optionalString (cfg.cookie != "") ''
+            echo -n ${cfg.cookie} > ${cfg.dataDir}/.erlang.cookie
+            chmod 600 ${cfg.dataDir}/.erlang.cookie
+        ''}
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/alsa.nix b/nixpkgs/nixos/modules/services/audio/alsa.nix
new file mode 100644
index 000000000000..155780199fd6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/alsa.nix
@@ -0,0 +1,133 @@
+# ALSA sound support.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) alsa-utils;
+
+  pulseaudioEnabled = config.hardware.pulseaudio.enable;
+
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "sound" "enableMediaKeys" ] [ "sound" "mediaKeys" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    sound = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable ALSA sound.
+        '';
+      };
+
+      enableOSSEmulation = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable ALSA OSS emulation (with certain cards sound mixing may not work!).
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          defaults.pcm.!card 3
+        '';
+        description = lib.mdDoc ''
+          Set addition configuration for system-wide alsa.
+        '';
+      };
+
+      mediaKeys = {
+
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to enable volume and capture control with keyboard media keys.
+
+            You want to leave this disabled if you run a desktop environment
+            like KDE, Gnome, Xfce, etc, as those handle such things themselves.
+            You might want to enable this if you run a minimalistic desktop
+            environment or work from bare linux ttys/framebuffers.
+
+            Enabling this will turn on {option}`services.actkbd`.
+          '';
+        };
+
+        volumeStep = mkOption {
+          type = types.str;
+          default = "1";
+          example = "1%";
+          description = lib.mdDoc ''
+            The value by which to increment/decrement volume on media keys.
+
+            See amixer(1) for allowed values.
+          '';
+        };
+
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.sound.enable {
+
+    environment.systemPackages = [ alsa-utils ];
+
+    environment.etc = mkIf (!pulseaudioEnabled && config.sound.extraConfig != "")
+      { "asound.conf".text = config.sound.extraConfig; };
+
+    # ALSA provides a udev rule for restoring volume settings.
+    services.udev.packages = [ alsa-utils ];
+
+    boot.kernelModules = optional config.sound.enableOSSEmulation "snd_pcm_oss";
+
+    systemd.services.alsa-store =
+      { description = "Store Sound Card State";
+        wantedBy = [ "multi-user.target" ];
+        unitConfig.RequiresMountsFor = "/var/lib/alsa";
+        unitConfig.ConditionVirtualization = "!systemd-nspawn";
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          ExecStart = "${pkgs.coreutils}/bin/mkdir -p /var/lib/alsa";
+          ExecStop = "${alsa-utils}/sbin/alsactl store --ignore";
+        };
+      };
+
+    services.actkbd = mkIf config.sound.mediaKeys.enable {
+      enable = true;
+      bindings = [
+        # "Mute" media key
+        { keys = [ 113 ]; events = [ "key" ];       command = "${alsa-utils}/bin/amixer -q set Master toggle"; }
+
+        # "Lower Volume" media key
+        { keys = [ 114 ]; events = [ "key" "rep" ]; command = "${alsa-utils}/bin/amixer -q set Master ${config.sound.mediaKeys.volumeStep}- unmute"; }
+
+        # "Raise Volume" media key
+        { keys = [ 115 ]; events = [ "key" "rep" ]; command = "${alsa-utils}/bin/amixer -q set Master ${config.sound.mediaKeys.volumeStep}+ unmute"; }
+
+        # "Mic Mute" media key
+        { keys = [ 190 ]; events = [ "key" ];       command = "${alsa-utils}/bin/amixer -q set Capture toggle"; }
+      ];
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/botamusique.nix b/nixpkgs/nixos/modules/services/audio/botamusique.nix
new file mode 100644
index 000000000000..5d3f7db12bc9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/botamusique.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.botamusique;
+
+  format = pkgs.formats.ini {};
+  configFile = format.generate "botamusique.ini" cfg.settings;
+in
+{
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  options.services.botamusique = {
+    enable = mkEnableOption (lib.mdDoc "botamusique, a bot to play audio streams on mumble");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.botamusique;
+      defaultText = literalExpression "pkgs.botamusique";
+      description = lib.mdDoc "The botamusique package to use.";
+    };
+
+    settings = mkOption {
+      type = with types; submodule {
+        freeformType = format.type;
+        options = {
+          server.host = mkOption {
+            type = types.str;
+            default = "localhost";
+            example = "mumble.example.com";
+            description = lib.mdDoc "Hostname of the mumble server to connect to.";
+          };
+
+          server.port = mkOption {
+            type = types.port;
+            default = 64738;
+            description = lib.mdDoc "Port of the mumble server to connect to.";
+          };
+
+          bot.username = mkOption {
+            type = types.str;
+            default = "botamusique";
+            description = lib.mdDoc "Name the bot should appear with.";
+          };
+
+          bot.comment = mkOption {
+            type = types.str;
+            default = "Hi, I'm here to play radio, local music or youtube/soundcloud music. Have fun!";
+            description = lib.mdDoc "Comment displayed for the bot.";
+          };
+        };
+      };
+      default = {};
+      description = lib.mdDoc ''
+        Your {file}`configuration.ini` as a Nix attribute set. Look up
+        possible options in the [configuration.example.ini](https://github.com/azlux/botamusique/blob/master/configuration.example.ini).
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.botamusique = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      unitConfig.Documentation = "https://github.com/azlux/botamusique/wiki";
+
+      environment.HOME = "/var/lib/botamusique";
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/botamusique --config ${configFile}";
+        Restart = "always"; # the bot exits when the server connection is lost
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        IPAddressDeny = [
+          "link-local"
+          "multicast"
+        ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        ProcSubset = "pid";
+        PrivateDevices = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        StateDirectory = "botamusique";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service @resources"
+          "~@privileged"
+        ];
+        UMask = "0077";
+        WorkingDirectory = "/var/lib/botamusique";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/castopod.md b/nixpkgs/nixos/modules/services/audio/castopod.md
new file mode 100644
index 000000000000..ee8590737a7c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/castopod.md
@@ -0,0 +1,22 @@
+# Castopod {#module-services-castopod}
+
+Castopod is an open-source hosting platform made for podcasters who want to engage and interact with their audience.
+
+## Quickstart {#module-services-castopod-quickstart}
+
+Use the following configuration to start a public instance of Castopod on `castopod.example.com` domain:
+
+```nix
+networking.firewall.allowedTCPPorts = [ 80 443 ];
+services.castopod = {
+  enable = true;
+  database.createLocally = true;
+  nginx.virtualHost = {
+    serverName = "castopod.example.com";
+    enableACME = true;
+    forceSSL = true;
+  };
+};
+```
+
+Go to `https://castopod.example.com/cp-install` to create superadmin account after applying the above configuration.
diff --git a/nixpkgs/nixos/modules/services/audio/castopod.nix b/nixpkgs/nixos/modules/services/audio/castopod.nix
new file mode 100644
index 000000000000..b782b5489147
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/castopod.nix
@@ -0,0 +1,287 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.castopod;
+  fpm = config.services.phpfpm.pools.castopod;
+
+  user = "castopod";
+  stateDirectory = "/var/lib/castopod";
+
+  # https://docs.castopod.org/getting-started/install.html#requirements
+  phpPackage = pkgs.php.withExtensions ({ enabled, all }: with all; [
+    intl
+    curl
+    mbstring
+    gd
+    exif
+    mysqlnd
+  ] ++ enabled);
+in
+{
+  meta.doc = ./castopod.md;
+  meta.maintainers = with lib.maintainers; [ alexoundos misuzu ];
+
+  options.services = {
+    castopod = {
+      enable = lib.mkEnableOption (lib.mdDoc "Castopod");
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.castopod;
+        defaultText = lib.literalMD "pkgs.castopod";
+        description = lib.mdDoc "Which Castopod package to use.";
+      };
+      database = {
+        createLocally = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Create the database and database user locally.
+          '';
+        };
+        hostname = lib.mkOption {
+          type = lib.types.str;
+          default = "localhost";
+          description = lib.mdDoc "Database hostname.";
+        };
+        name = lib.mkOption {
+          type = lib.types.str;
+          default = "castopod";
+          description = lib.mdDoc "Database name.";
+        };
+        user = lib.mkOption {
+          type = lib.types.str;
+          default = user;
+          description = lib.mdDoc "Database user.";
+        };
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/run/keys/castopod-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            [](#opt-services.castopod.database.user).
+          '';
+        };
+      };
+      settings = lib.mkOption {
+        type = with lib.types; attrsOf (oneOf [ str int bool ]);
+        default = { };
+        example = {
+          "email.protocol" = "smtp";
+          "email.SMTPHost" = "localhost";
+          "email.SMTPUser" = "myuser";
+          "email.fromEmail" = "castopod@example.com";
+        };
+        description = lib.mdDoc ''
+          Environment variables used for Castopod.
+          See [](https://code.castopod.org/adaures/castopod/-/blob/main/.env.example)
+          for available environment variables.
+        '';
+      };
+      environmentFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/castopod-env";
+        description = lib.mdDoc ''
+          Environment file to inject e.g. secrets into the configuration.
+          See [](https://code.castopod.org/adaures/castopod/-/blob/main/.env.example)
+          for available environment variables.
+        '';
+      };
+      configureNginx = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Configure nginx as a reverse proxy for CastoPod.";
+      };
+      localDomain = lib.mkOption {
+        type = lib.types.str;
+        example = "castopod.example.org";
+        description = lib.mdDoc "The domain serving your CastoPod instance.";
+      };
+      poolSettings = lib.mkOption {
+        type = with lib.types; attrsOf (oneOf [ str int bool ]);
+        default = {
+          "pm" = "dynamic";
+          "pm.max_children" = "32";
+          "pm.start_servers" = "2";
+          "pm.min_spare_servers" = "2";
+          "pm.max_spare_servers" = "4";
+          "pm.max_requests" = "500";
+        };
+        description = lib.mdDoc ''
+          Options for Castopod's PHP pool. See the documentation on `php-fpm.conf` for details on configuration directives.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.castopod.settings =
+      let
+        sslEnabled = with config.services.nginx.virtualHosts.${cfg.localDomain}; addSSL || forceSSL || onlySSL || enableACME || useACMEHost != null;
+        baseURL = "http${lib.optionalString sslEnabled "s"}://${cfg.localDomain}";
+      in
+      lib.mapAttrs (name: lib.mkDefault) {
+        "app.forceGlobalSecureRequests" = sslEnabled;
+        "app.baseURL" = baseURL;
+
+        "media.baseURL" = "/";
+        "media.root" = "media";
+        "media.storage" = stateDirectory;
+
+        "admin.gateway" = "admin";
+        "auth.gateway" = "auth";
+
+        "database.default.hostname" = cfg.database.hostname;
+        "database.default.database" = cfg.database.name;
+        "database.default.username" = cfg.database.user;
+        "database.default.DBPrefix" = "cp_";
+
+        "cache.handler" = "file";
+      };
+
+    services.phpfpm.pools.castopod = {
+      inherit user;
+      group = config.services.nginx.group;
+      phpPackage = phpPackage;
+      phpOptions = ''
+        # https://code.castopod.org/adaures/castopod/-/blob/main/docker/production/app/uploads.ini
+        file_uploads = On
+        memory_limit = 512M
+        upload_max_filesize = 500M
+        post_max_size = 512M
+        max_execution_time = 300
+        max_input_time = 300
+      '';
+      settings = {
+        "listen.owner" = config.services.nginx.user;
+        "listen.group" = config.services.nginx.group;
+      } // cfg.poolSettings;
+    };
+
+    systemd.services.castopod-setup = {
+      after = lib.optional config.services.mysql.enable "mysql.service";
+      requires = lib.optional config.services.mysql.enable "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.openssl phpPackage ];
+      script =
+        let
+          envFile = "${stateDirectory}/.env";
+          media = "${cfg.settings."media.storage"}/${cfg.settings."media.root"}";
+        in
+        ''
+          mkdir -p ${stateDirectory}/writable/{cache,logs,session,temp,uploads}
+
+          if [ ! -d ${lib.escapeShellArg media} ]; then
+            cp --no-preserve=mode,ownership -r ${cfg.package}/share/castopod/public/media ${lib.escapeShellArg media}
+          fi
+
+          if [ ! -f ${stateDirectory}/salt ]; then
+            openssl rand -base64 33 > ${stateDirectory}/salt
+          fi
+
+          cat <<'EOF' > ${envFile}
+          ${lib.generators.toKeyValue { } cfg.settings}
+          EOF
+
+          echo "analytics.salt=$(cat ${stateDirectory}/salt)" >> ${envFile}
+
+          ${if (cfg.database.passwordFile != null) then ''
+            echo "database.default.password=$(cat ${lib.escapeShellArg cfg.database.passwordFile})" >> ${envFile}
+          '' else ''
+            echo "database.default.password=" >> ${envFile}
+          ''}
+
+          ${lib.optionalString (cfg.environmentFile != null) ''
+            cat ${lib.escapeShellArg cfg.environmentFile}) >> ${envFile}
+          ''}
+
+          php spark castopod:database-update
+        '';
+      serviceConfig = {
+        StateDirectory = "castopod";
+        WorkingDirectory = "${cfg.package}/share/castopod";
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = user;
+        Group = config.services.nginx.group;
+      };
+    };
+
+    systemd.services.castopod-scheduled = {
+      after = [ "castopod-setup.service" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ phpPackage ];
+      script = ''
+        php public/index.php scheduled-activities
+        php public/index.php scheduled-websub-publish
+        php public/index.php scheduled-video-clips
+      '';
+      serviceConfig = {
+        StateDirectory = "castopod";
+        WorkingDirectory = "${cfg.package}/share/castopod";
+        Type = "oneshot";
+        User = user;
+        Group = config.services.nginx.group;
+      };
+    };
+
+    systemd.timers.castopod-scheduled = {
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "*-*-* *:*:00";
+        Unit = "castopod-scheduled.service";
+      };
+    };
+
+    services.mysql = lib.mkIf cfg.database.createLocally {
+      enable = true;
+      package = lib.mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+      }];
+    };
+
+    services.nginx = lib.mkIf cfg.configureNginx {
+      enable = true;
+      virtualHosts."${cfg.localDomain}" = {
+        root = lib.mkForce "${cfg.package}/share/castopod/public";
+
+        extraConfig = ''
+          try_files $uri $uri/ /index.php?$args;
+          index index.php index.html;
+        '';
+
+        locations."^~ /${cfg.settings."media.root"}/" = {
+          root = cfg.settings."media.storage";
+          extraConfig = ''
+            add_header Access-Control-Allow-Origin "*";
+            expires max;
+            access_log off;
+          '';
+        };
+
+        locations."~ \.php$" = {
+          fastcgiParams = {
+            SERVER_NAME = "$host";
+          };
+          extraConfig = ''
+            fastcgi_intercept_errors on;
+            fastcgi_index index.php;
+            fastcgi_pass unix:${fpm.socket};
+            try_files $uri =404;
+            fastcgi_read_timeout 3600;
+            fastcgi_send_timeout 3600;
+          '';
+        };
+      };
+    };
+
+    users.users.${user} = lib.mapAttrs (name: lib.mkDefault) {
+      description = "Castopod user";
+      isSystemUser = true;
+      group = config.services.nginx.group;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/gmediarender.nix b/nixpkgs/nixos/modules/services/audio/gmediarender.nix
new file mode 100644
index 000000000000..2f23232d19cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/gmediarender.nix
@@ -0,0 +1,116 @@
+{ pkgs, lib, config, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gmediarender;
+in
+{
+  options.services.gmediarender = {
+    enable = mkEnableOption (mdDoc "the gmediarender DLNA renderer");
+
+    audioDevice = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = mdDoc ''
+        The audio device to use.
+      '';
+    };
+
+    audioSink = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = mdDoc ''
+        The audio sink to use.
+      '';
+    };
+
+    friendlyName = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = mdDoc ''
+        A "friendly name" for identifying the endpoint.
+      '';
+    };
+
+    initialVolume = mkOption {
+      type = types.nullOr types.int;
+      default = 0;
+      description = mdDoc ''
+        A default volume attenuation (in dB) for the endpoint.
+      '';
+    };
+
+    package = mkPackageOptionMD pkgs "gmediarender" {
+      default = "gmrender-resurrect";
+    };
+
+    port = mkOption {
+      type = types.nullOr types.port;
+      default = null;
+      description = mdDoc "Port that will be used to accept client connections.";
+    };
+
+    uuid = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = mdDoc ''
+        A UUID for uniquely identifying the endpoint.  If you have
+        multiple renderers on your network, you MUST set this.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd = {
+      services.gmediarender = {
+        after = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        description = "gmediarender server daemon";
+        environment = {
+          XDG_CACHE_HOME = "%t/gmediarender";
+        };
+        serviceConfig = {
+          DynamicUser = true;
+          User = "gmediarender";
+          Group = "gmediarender";
+          SupplementaryGroups = [ "audio" ];
+          ExecStart =
+            "${cfg.package}/bin/gmediarender " +
+            optionalString (cfg.audioDevice != null) ("--gstout-audiodevice=${utils.escapeSystemdExecArg cfg.audioDevice} ") +
+            optionalString (cfg.audioSink != null) ("--gstout-audiosink=${utils.escapeSystemdExecArg cfg.audioSink} ") +
+            optionalString (cfg.friendlyName != null) ("--friendly-name=${utils.escapeSystemdExecArg cfg.friendlyName} ") +
+            optionalString (cfg.initialVolume != 0) ("--initial-volume=${toString cfg.initialVolume} ") +
+            optionalString (cfg.port != null) ("--port=${toString cfg.port} ") +
+            optionalString (cfg.uuid != null) ("--uuid=${utils.escapeSystemdExecArg cfg.uuid} ");
+          Restart = "always";
+          RuntimeDirectory = "gmediarender";
+
+          # Security options:
+          CapabilityBoundingSet = "";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          # PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+          UMask = 066;
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/gonic.nix b/nixpkgs/nixos/modules/services/audio/gonic.nix
new file mode 100644
index 000000000000..66daeb60b503
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/gonic.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gonic;
+  settingsFormat = pkgs.formats.keyValue {
+    mkKeyValue = lib.generators.mkKeyValueDefault { } " ";
+    listsAsDuplicateKeys = true;
+  };
+in
+{
+  options = {
+    services.gonic = {
+
+      enable = mkEnableOption (lib.mdDoc "Gonic music server");
+
+      settings = mkOption rec {
+        type = settingsFormat.type;
+        apply = recursiveUpdate default;
+        default = {
+          listen-addr = "127.0.0.1:4747";
+          cache-path = "/var/cache/gonic";
+          tls-cert = null;
+          tls-key = null;
+        };
+        example = {
+          music-path = [ "/mnt/music" ];
+          podcast-path = "/mnt/podcasts";
+        };
+        description = lib.mdDoc ''
+          Configuration for Gonic, see <https://github.com/sentriz/gonic#configuration-options> for supported values.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.gonic = {
+      description = "Gonic Media Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart =
+          let
+            # these values are null by default but should not appear in the final config
+            filteredSettings = filterAttrs (n: v: !((n == "tls-cert" || n == "tls-key") && v == null)) cfg.settings;
+          in
+          "${pkgs.gonic}/bin/gonic -config-path ${settingsFormat.generate "gonic" filteredSettings}";
+        DynamicUser = true;
+        StateDirectory = "gonic";
+        CacheDirectory = "gonic";
+        WorkingDirectory = "/var/lib/gonic";
+        RuntimeDirectory = "gonic";
+        RootDirectory = "/run/gonic";
+        ReadWritePaths = "";
+        BindReadOnlyPaths = [
+          # gonic can access scrobbling services
+          "-/etc/resolv.conf"
+          "-/etc/ssl/certs/ca-certificates.crt"
+          builtins.storeDir
+          cfg.settings.podcast-path
+        ] ++ cfg.settings.music-path
+        ++ lib.optional (cfg.settings.tls-cert != null) cfg.settings.tls-cert
+        ++ lib.optional (cfg.settings.tls-key != null) cfg.settings.tls-key;
+        CapabilityBoundingSet = "";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        RestrictRealtime = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        UMask = "0066";
+        ProtectHostname = true;
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.autrimpo ];
+}
diff --git a/nixpkgs/nixos/modules/services/audio/goxlr-utility.nix b/nixpkgs/nixos/modules/services/audio/goxlr-utility.nix
new file mode 100644
index 000000000000..b719de875c7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/goxlr-utility.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.goxlr-utility;
+in
+
+with lib;
+{
+
+  options = {
+    services.goxlr-utility = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable goxlr-utility for controlling your TC-Helicon GoXLR or GoXLR Mini
+        '';
+      };
+      package = mkPackageOptionMD pkgs "goxlr-utility" { };
+      autoStart.xdg = mkOption {
+        default = true;
+        type = with types; bool;
+        description = lib.mdDoc ''
+          Start the daemon automatically using XDG autostart.
+          Sets `xdg.autostart.enable = true` if not already enabled.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.goxlr-utility.enable
+    {
+      services.udev.packages = [ cfg.package ];
+
+      xdg.autostart.enable = mkIf cfg.autoStart.xdg true;
+      environment.systemPackages = mkIf cfg.autoStart.xdg
+        [
+          cfg.package
+          (pkgs.makeAutostartItem
+            {
+              name = "goxlr-utility";
+              package = cfg.package;
+            })
+        ];
+    };
+
+  meta.maintainers = with maintainers; [ errnoh ];
+}
diff --git a/nixpkgs/nixos/modules/services/audio/hqplayerd.nix b/nixpkgs/nixos/modules/services/audio/hqplayerd.nix
new file mode 100644
index 000000000000..d54400b18e30
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/hqplayerd.nix
@@ -0,0 +1,139 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hqplayerd;
+  pkg = pkgs.hqplayerd;
+  # XXX: This is hard-coded in the distributed binary, don't try to change it.
+  stateDir = "/var/lib/hqplayer";
+  configDir = "/etc/hqplayer";
+in
+{
+  options = {
+    services.hqplayerd = {
+      enable = mkEnableOption (lib.mdDoc "HQPlayer Embedded");
+
+      auth = {
+        username = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Username used for HQPlayer's WebUI.
+
+            Without this you will need to manually create the credentials after
+            first start by going to http://your.ip/8088/auth
+          '';
+        };
+
+        password = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Password used for HQPlayer's WebUI.
+
+            Without this you will need to manually create the credentials after
+            first start by going to http://your.ip/8088/auth
+          '';
+        };
+      };
+
+      licenseFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the HQPlayer license key file.
+
+          Without this, the service will run in trial mode and restart every 30
+          minutes.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Opens ports needed for the WebUI and controller API.
+        '';
+      };
+
+      config = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          HQplayer daemon configuration, written to /etc/hqplayer/hqplayerd.xml.
+
+          Refer to share/doc/hqplayerd/readme.txt in the hqplayerd derivation for possible values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.auth.username != null -> cfg.auth.password != null)
+                 && (cfg.auth.password != null -> cfg.auth.username != null);
+        message = "You must set either both services.hqplayer.auth.username and password, or neither.";
+      }
+    ];
+
+    environment = {
+      etc = {
+        "hqplayer/hqplayerd.xml" = mkIf (cfg.config != null) { source = pkgs.writeText "hqplayerd.xml" cfg.config; };
+        "hqplayer/hqplayerd4-key.xml" = mkIf (cfg.licenseFile != null) { source = cfg.licenseFile; };
+      };
+      systemPackages = [ pkg ];
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 8088 4321 ];
+    };
+
+    systemd = {
+      tmpfiles.rules = [
+        "d ${configDir}      0755 hqplayer hqplayer - -"
+        "d ${stateDir}       0755 hqplayer hqplayer - -"
+        "d ${stateDir}/home  0755 hqplayer hqplayer - -"
+      ];
+
+      packages = [ pkg ];
+
+      services.hqplayerd = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "systemd-tmpfiles-setup.service" ];
+
+        environment.HOME = "${stateDir}/home";
+
+        unitConfig.ConditionPathExists = [ configDir stateDir ];
+
+        restartTriggers = optionals (cfg.config != null) [ config.environment.etc."hqplayer/hqplayerd.xml".source ];
+
+        preStart = ''
+          cp -r "${pkg}/var/lib/hqplayer/web" "${stateDir}"
+          chmod -R u+wX "${stateDir}/web"
+
+          if [ ! -f "${configDir}/hqplayerd.xml" ]; then
+            echo "creating initial config file"
+            install -m 0644 "${pkg}/etc/hqplayer/hqplayerd.xml" "${configDir}/hqplayerd.xml"
+          fi
+        '' + optionalString (cfg.auth.username != null && cfg.auth.password != null) ''
+          ${pkg}/bin/hqplayerd -s ${cfg.auth.username} ${cfg.auth.password}
+        '';
+      };
+    };
+
+    users.groups = {
+      hqplayer.gid = config.ids.gids.hqplayer;
+    };
+
+    users.users = {
+      hqplayer = {
+        description = "hqplayer daemon user";
+        extraGroups = [ "audio" "video" ];
+        group = "hqplayer";
+        uid = config.ids.uids.hqplayer;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/icecast.nix b/nixpkgs/nixos/modules/services/audio/icecast.nix
new file mode 100644
index 000000000000..63049bd93ab9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/icecast.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.icecast;
+  configFile = pkgs.writeText "icecast.xml" ''
+    <icecast>
+      <hostname>${cfg.hostname}</hostname>
+
+      <authentication>
+        <admin-user>${cfg.admin.user}</admin-user>
+        <admin-password>${cfg.admin.password}</admin-password>
+      </authentication>
+
+      <paths>
+        <logdir>${cfg.logDir}</logdir>
+        <adminroot>${pkgs.icecast}/share/icecast/admin</adminroot>
+        <webroot>${pkgs.icecast}/share/icecast/web</webroot>
+        <alias source="/" dest="/status.xsl"/>
+      </paths>
+
+      <listen-socket>
+        <port>${toString cfg.listen.port}</port>
+        <bind-address>${cfg.listen.address}</bind-address>
+      </listen-socket>
+
+      <security>
+        <chroot>0</chroot>
+        <changeowner>
+            <user>${cfg.user}</user>
+            <group>${cfg.group}</group>
+        </changeowner>
+      </security>
+
+      ${cfg.extraConf}
+    </icecast>
+  '';
+in {
+
+  ###### interface
+
+  options = {
+
+    services.icecast = {
+
+      enable = mkEnableOption (lib.mdDoc "Icecast server");
+
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "DNS name or IP address that will be used for the stream directory lookups or possibly the playlist generation if a Host header is not provided.";
+        default = config.networking.domain;
+        defaultText = literalExpression "config.networking.domain";
+      };
+
+      admin = {
+        user = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Username used for all administration functions.";
+          default = "admin";
+        };
+
+        password = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Password used for all administration functions.";
+        };
+      };
+
+      logDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Base directory used for logging.";
+        default = "/var/log/icecast";
+      };
+
+      listen = {
+        port = mkOption {
+          type = types.port;
+          description = lib.mdDoc "TCP port that will be used to accept client connections.";
+          default = 8000;
+        };
+
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Address Icecast will listen on.";
+          default = "::";
+        };
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc "User privileges for the server.";
+        default = "nobody";
+      };
+
+      group = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Group privileges for the server.";
+        default = "nogroup";
+      };
+
+      extraConf = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "icecast.xml content.";
+        default = "";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.icecast = {
+      after = [ "network.target" ];
+      description = "Icecast Network Audio Streaming Server";
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = "mkdir -p ${cfg.logDir} && chown ${cfg.user}:${cfg.group} ${cfg.logDir}";
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.icecast}/bin/icecast -c ${configFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/jack.nix b/nixpkgs/nixos/modules/services/audio/jack.nix
new file mode 100644
index 000000000000..b51f2a78c983
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/jack.nix
@@ -0,0 +1,294 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jack;
+
+  pcmPlugin = cfg.jackd.enable && cfg.alsa.enable;
+  loopback = cfg.jackd.enable && cfg.loopback.enable;
+
+  enable32BitAlsaPlugins = cfg.alsa.support32Bit && pkgs.stdenv.isx86_64 && pkgs.pkgsi686Linux.alsa-lib != null;
+
+  umaskNeeded = versionOlder cfg.jackd.package.version "1.9.12";
+  bridgeNeeded = versionAtLeast cfg.jackd.package.version "1.9.12";
+in {
+  options = {
+    services.jack = {
+      jackd = {
+        enable = mkEnableOption (lib.mdDoc ''
+          JACK Audio Connection Kit. You need to add yourself to the "jackaudio" group
+        '');
+
+        package = mkOption {
+          # until jack1 promiscuous mode is fixed
+          internal = true;
+          type = types.package;
+          default = pkgs.jack2;
+          defaultText = literalExpression "pkgs.jack2";
+          example = literalExpression "pkgs.jack1";
+          description = lib.mdDoc ''
+            The JACK package to use.
+          '';
+        };
+
+        extraOptions = mkOption {
+          type = types.listOf types.str;
+          default = [
+            "-dalsa"
+          ];
+          example = literalExpression ''
+            [ "-dalsa" "--device" "hw:1" ];
+          '';
+          description = lib.mdDoc ''
+            Specifies startup command line arguments to pass to JACK server.
+          '';
+        };
+
+        session = mkOption {
+          type = types.lines;
+          description = lib.mdDoc ''
+            Commands to run after JACK is started.
+          '';
+        };
+
+      };
+
+      alsa = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Route audio to/from generic ALSA-using applications using ALSA JACK PCM plugin.
+          '';
+        };
+
+        support32Bit = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to support sound for 32-bit ALSA applications on 64-bit system.
+          '';
+        };
+      };
+
+      loopback = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Create ALSA loopback device, instead of using PCM plugin. Has broader
+            application support (things like Steam will work), but may need fine-tuning
+            for concrete hardware.
+          '';
+        };
+
+        index = mkOption {
+          type = types.int;
+          default = 10;
+          description = lib.mdDoc ''
+            Index of an ALSA loopback device.
+          '';
+        };
+
+        config = mkOption {
+          type = types.lines;
+          description = lib.mdDoc ''
+            ALSA config for loopback device.
+          '';
+        };
+
+        dmixConfig = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+            period_size 2048
+            periods 2
+          '';
+          description = lib.mdDoc ''
+            For music production software that still doesn't support JACK natively you
+            would like to put buffer/period adjustments here
+            to decrease dmix device latency.
+          '';
+        };
+
+        session = mkOption {
+          type = types.lines;
+          description = lib.mdDoc ''
+            Additional commands to run to setup loopback device.
+          '';
+        };
+      };
+
+    };
+
+  };
+
+  config = mkMerge [
+
+    (mkIf pcmPlugin {
+      sound.extraConfig = ''
+        pcm_type.jack {
+          libs.native = ${pkgs.alsa-plugins}/lib/alsa-lib/libasound_module_pcm_jack.so ;
+          ${lib.optionalString enable32BitAlsaPlugins
+          "libs.32Bit = ${pkgs.pkgsi686Linux.alsa-plugins}/lib/alsa-lib/libasound_module_pcm_jack.so ;"}
+        }
+        pcm.!default {
+          @func getenv
+          vars [ PCM ]
+          default "plug:jack"
+        }
+      '';
+    })
+
+    (mkIf loopback {
+      boot.kernelModules = [ "snd-aloop" ];
+      boot.kernelParams = [ "snd-aloop.index=${toString cfg.loopback.index}" ];
+      sound.extraConfig = cfg.loopback.config;
+    })
+
+    (mkIf cfg.jackd.enable {
+      services.jack.jackd.session = ''
+        ${lib.optionalString bridgeNeeded "${pkgs.a2jmidid}/bin/a2jmidid -e &"}
+      '';
+      # https://alsa.opensrc.org/Jack_and_Loopback_device_as_Alsa-to-Jack_bridge#id06
+      services.jack.loopback.config = ''
+        pcm.loophw00 {
+          type hw
+          card ${toString cfg.loopback.index}
+          device 0
+          subdevice 0
+        }
+        pcm.amix {
+          type dmix
+          ipc_key 219345
+          slave {
+            pcm loophw00
+            ${cfg.loopback.dmixConfig}
+          }
+        }
+        pcm.asoftvol {
+          type softvol
+          slave.pcm "amix"
+          control { name Master }
+        }
+        pcm.cloop {
+          type hw
+          card ${toString cfg.loopback.index}
+          device 1
+          subdevice 0
+          format S32_LE
+        }
+        pcm.loophw01 {
+          type hw
+          card ${toString cfg.loopback.index}
+          device 0
+          subdevice 1
+        }
+        pcm.ploop {
+          type hw
+          card ${toString cfg.loopback.index}
+          device 1
+          subdevice 1
+          format S32_LE
+        }
+        pcm.aduplex {
+          type asym
+          playback.pcm "asoftvol"
+          capture.pcm "loophw01"
+        }
+        pcm.!default {
+          type plug
+          slave.pcm aduplex
+        }
+      '';
+      services.jack.loopback.session = ''
+        alsa_in -j cloop -dcloop &
+        alsa_out -j ploop -dploop &
+        while [ "$(jack_lsp cloop)" == "" ] || [ "$(jack_lsp ploop)" == "" ]; do sleep 1; done
+        jack_connect cloop:capture_1 system:playback_1
+        jack_connect cloop:capture_2 system:playback_2
+        jack_connect system:capture_1 ploop:playback_1
+        jack_connect system:capture_2 ploop:playback_2
+      '';
+
+      assertions = [
+        {
+          assertion = !(cfg.alsa.enable && cfg.loopback.enable);
+          message = "For JACK both alsa and loopback options shouldn't be used at the same time.";
+        }
+      ];
+
+      users.users.jackaudio = {
+        group = "jackaudio";
+        extraGroups = [ "audio" ];
+        description = "JACK Audio system service user";
+        isSystemUser = true;
+      };
+      # https://jackaudio.org/faq/linux_rt_config.html
+      security.pam.loginLimits = [
+        { domain = "@jackaudio"; type = "-"; item = "rtprio"; value = "99"; }
+        { domain = "@jackaudio"; type = "-"; item = "memlock"; value = "unlimited"; }
+      ];
+      users.groups.jackaudio = {};
+
+      environment = {
+        systemPackages = [ cfg.jackd.package ];
+        etc."alsa/conf.d/50-jack.conf".source = "${pkgs.alsa-plugins}/etc/alsa/conf.d/50-jack.conf";
+        variables.JACK_PROMISCUOUS_SERVER = "jackaudio";
+      };
+
+      services.udev.extraRules = ''
+        ACTION=="add", SUBSYSTEM=="sound", ATTRS{id}!="Loopback", TAG+="systemd", ENV{SYSTEMD_WANTS}="jack.service"
+      '';
+
+      systemd.services.jack = {
+        description = "JACK Audio Connection Kit";
+        serviceConfig = {
+          User = "jackaudio";
+          SupplementaryGroups = lib.optional
+            (config.hardware.pulseaudio.enable
+            && !config.hardware.pulseaudio.systemWide) "users";
+          ExecStart = "${cfg.jackd.package}/bin/jackd ${lib.escapeShellArgs cfg.jackd.extraOptions}";
+          LimitRTPRIO = 99;
+          LimitMEMLOCK = "infinity";
+        } // optionalAttrs umaskNeeded {
+          UMask = "007";
+        };
+        path = [ cfg.jackd.package ];
+        environment = {
+          JACK_PROMISCUOUS_SERVER = "jackaudio";
+          JACK_NO_AUDIO_RESERVATION = "1";
+        };
+        restartIfChanged = false;
+      };
+      systemd.services.jack-session = {
+        description = "JACK session";
+        script = ''
+          jack_wait -w
+          ${cfg.jackd.session}
+          ${lib.optionalString cfg.loopback.enable cfg.loopback.session}
+        '';
+        serviceConfig = {
+          RemainAfterExit = true;
+          User = "jackaudio";
+          StateDirectory = "jack";
+          LimitRTPRIO = 99;
+          LimitMEMLOCK = "infinity";
+        };
+        path = [ cfg.jackd.package ];
+        environment = {
+          JACK_PROMISCUOUS_SERVER = "jackaudio";
+          HOME = "/var/lib/jack";
+        };
+        wantedBy = [ "jack.service" ];
+        partOf = [ "jack.service" ];
+        after = [ "jack.service" ];
+        restartIfChanged = false;
+      };
+    })
+
+  ];
+
+  meta.maintainers = [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/audio/jmusicbot.nix b/nixpkgs/nixos/modules/services/audio/jmusicbot.nix
new file mode 100644
index 000000000000..348c7b25682e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/jmusicbot.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.jmusicbot;
+in
+{
+  options = {
+    services.jmusicbot = {
+      enable = mkEnableOption (lib.mdDoc "jmusicbot, a Discord music bot that's easy to set up and run yourself");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.jmusicbot;
+        defaultText = literalExpression "pkgs.jmusicbot";
+        description = lib.mdDoc "JMusicBot package to use";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The directory where config.txt and serversettings.json is saved.
+          If left as the default value this directory will automatically be created before JMusicBot starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.
+          Untouched by the value of this option config.txt needs to be placed manually into this directory.
+        '';
+        default = "/var/lib/jmusicbot/";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.jmusicbot = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      description = "Discord music bot that's easy to set up and run yourself!";
+      serviceConfig = mkMerge [{
+        ExecStart = "${cfg.package}/bin/JMusicBot";
+        WorkingDirectory = cfg.stateDir;
+        Restart = "always";
+        RestartSec = 20;
+        DynamicUser = true;
+      }
+        (mkIf (cfg.stateDir == "/var/lib/jmusicbot") { StateDirectory = "jmusicbot"; })];
+    };
+  };
+
+  meta.maintainers = with maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/audio/liquidsoap.nix b/nixpkgs/nixos/modules/services/audio/liquidsoap.nix
new file mode 100644
index 000000000000..9e61a7979619
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/liquidsoap.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  streams = builtins.attrNames config.services.liquidsoap.streams;
+
+  streamService =
+    name:
+    let stream = builtins.getAttr name config.services.liquidsoap.streams; in
+    { inherit name;
+      value = {
+        after = [ "network-online.target" "sound.target" ];
+        description = "${name} liquidsoap stream";
+        wantedBy = [ "multi-user.target" ];
+        path = [ pkgs.wget ];
+        serviceConfig = {
+          ExecStart = "${pkgs.liquidsoap}/bin/liquidsoap ${stream}";
+          User = "liquidsoap";
+          LogsDirectory = "liquidsoap";
+          Restart = "always";
+        };
+      };
+    };
+in
+{
+
+  ##### interface
+
+  options = {
+
+    services.liquidsoap.streams = mkOption {
+
+      description =
+        lib.mdDoc ''
+          Set of Liquidsoap streams to start,
+          one systemd service per stream.
+        '';
+
+      default = {};
+
+      example = literalExpression ''
+        {
+          myStream1 = "/etc/liquidsoap/myStream1.liq";
+          myStream2 = ./myStream2.liq;
+          myStream3 = "out(playlist(\"/srv/music/\"))";
+        }
+      '';
+
+      type = types.attrsOf (types.either types.path types.str);
+    };
+
+  };
+  ##### implementation
+
+  config = mkIf (builtins.length streams != 0) {
+
+    users.users.liquidsoap = {
+      uid = config.ids.uids.liquidsoap;
+      group = "liquidsoap";
+      extraGroups = [ "audio" ];
+      description = "Liquidsoap streaming user";
+      home = "/var/lib/liquidsoap";
+      createHome = true;
+    };
+
+    users.groups.liquidsoap.gid = config.ids.gids.liquidsoap;
+
+    systemd.services = builtins.listToAttrs ( map streamService streams );
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/mopidy.nix b/nixpkgs/nixos/modules/services/audio/mopidy.nix
new file mode 100644
index 000000000000..40e8679f53d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/mopidy.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+
+with pkgs;
+with lib;
+
+let
+  uid = config.ids.uids.mopidy;
+  gid = config.ids.gids.mopidy;
+  cfg = config.services.mopidy;
+
+  mopidyConf = writeText "mopidy.conf" cfg.configuration;
+
+  mopidyEnv = buildEnv {
+    name = "mopidy-with-extensions-${mopidy.version}";
+    paths = closePropagation cfg.extensionPackages;
+    pathsToLink = [ "/${mopidyPackages.python.sitePackages}" ];
+    nativeBuildInputs = [ makeWrapper ];
+    postBuild = ''
+      makeWrapper ${mopidy}/bin/mopidy $out/bin/mopidy \
+        --prefix PYTHONPATH : $out/${mopidyPackages.python.sitePackages}
+    '';
+  };
+in {
+
+  options = {
+
+    services.mopidy = {
+
+      enable = mkEnableOption (lib.mdDoc "Mopidy, a music player daemon");
+
+      dataDir = mkOption {
+        default = "/var/lib/mopidy";
+        type = types.str;
+        description = lib.mdDoc ''
+          The directory where Mopidy stores its state.
+        '';
+      };
+
+      extensionPackages = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        example = literalExpression "[ pkgs.mopidy-spotify ]";
+        description = lib.mdDoc ''
+          Mopidy extensions that should be loaded by the service.
+        '';
+      };
+
+      configuration = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          The configuration that Mopidy should use.
+        '';
+      };
+
+      extraConfigFiles = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Extra config file read by Mopidy when the service starts.
+          Later files in the list overrides earlier configuration.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - mopidy mopidy - -"
+    ];
+
+    systemd.services.mopidy = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "sound.target" ];
+      description = "mopidy music player daemon";
+      serviceConfig = {
+        ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)}";
+        User = "mopidy";
+      };
+    };
+
+    systemd.services.mopidy-scan = {
+      description = "mopidy local files scanner";
+      serviceConfig = {
+        ExecStart = "${mopidyEnv}/bin/mopidy --config ${concatStringsSep ":" ([mopidyConf] ++ cfg.extraConfigFiles)} local scan";
+        User = "mopidy";
+        Type = "oneshot";
+      };
+    };
+
+    users.users.mopidy = {
+      inherit uid;
+      group = "mopidy";
+      extraGroups = [ "audio" ];
+      description = "Mopidy daemon user";
+      home = cfg.dataDir;
+    };
+
+    users.groups.mopidy.gid = gid;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/mpd.nix b/nixpkgs/nixos/modules/services/audio/mpd.nix
new file mode 100644
index 000000000000..3c853973c872
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/mpd.nix
@@ -0,0 +1,266 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "mpd";
+
+  uid = config.ids.uids.mpd;
+  gid = config.ids.gids.mpd;
+  cfg = config.services.mpd;
+
+  credentialsPlaceholder = (creds:
+    let
+      placeholders = (imap0
+        (i: c: ''password "{{password-${toString i}}}@${concatStringsSep "," c.permissions}"'')
+        creds);
+    in
+      concatStringsSep "\n" placeholders);
+
+  mpdConf = pkgs.writeText "mpd.conf" ''
+    # This file was automatically generated by NixOS. Edit mpd's configuration
+    # via NixOS' configuration.nix, as this file will be rewritten upon mpd's
+    # restart.
+
+    music_directory     "${cfg.musicDirectory}"
+    playlist_directory  "${cfg.playlistDirectory}"
+    ${lib.optionalString (cfg.dbFile != null) ''
+      db_file             "${cfg.dbFile}"
+    ''}
+    state_file          "${cfg.dataDir}/state"
+    sticker_file        "${cfg.dataDir}/sticker.sql"
+
+    ${optionalString (cfg.network.listenAddress != "any") ''bind_to_address "${cfg.network.listenAddress}"''}
+    ${optionalString (cfg.network.port != 6600)  ''port "${toString cfg.network.port}"''}
+    ${optionalString (cfg.fluidsynth) ''
+      decoder {
+              plugin "fluidsynth"
+              soundfont "${pkgs.soundfont-fluid}/share/soundfonts/FluidR3_GM2-2.sf2"
+      }
+    ''}
+
+    ${optionalString (cfg.credentials != []) (credentialsPlaceholder cfg.credentials)}
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.mpd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable MPD, the music player daemon.
+        '';
+      };
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, {command}`mpd` is socket-activated; that
+          is, instead of having it permanently running as a daemon,
+          systemd will start it on the first incoming connection.
+        '';
+      };
+
+      musicDirectory = mkOption {
+        type = with types; either path (strMatching "(http|https|nfs|smb)://.+");
+        default = "${cfg.dataDir}/music";
+        defaultText = literalExpression ''"''${dataDir}/music"'';
+        description = lib.mdDoc ''
+          The directory or NFS/SMB network share where MPD reads music from. If left
+          as the default value this directory will automatically be created before
+          the MPD server starts, otherwise the sysadmin is responsible for ensuring
+          the directory exists with appropriate ownership and permissions.
+        '';
+      };
+
+      playlistDirectory = mkOption {
+        type = types.path;
+        default = "${cfg.dataDir}/playlists";
+        defaultText = literalExpression ''"''${dataDir}/playlists"'';
+        description = lib.mdDoc ''
+          The directory where MPD stores playlists. If left as the default value
+          this directory will automatically be created before the MPD server starts,
+          otherwise the sysadmin is responsible for ensuring the directory exists
+          with appropriate ownership and permissions.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra directives added to to the end of MPD's configuration file,
+          mpd.conf. Basic configuration like file location and uid/gid
+          is added automatically to the beginning of the file. For available
+          options see {manpage}`mpd.conf(5)`.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        description = lib.mdDoc ''
+          The directory where MPD stores its state, tag cache, playlists etc. If
+          left as the default value this directory will automatically be created
+          before the MPD server starts, otherwise the sysadmin is responsible for
+          ensuring the directory exists with appropriate ownership and permissions.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "User account under which MPD runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "Group account under which MPD runs.";
+      };
+
+      network = {
+
+        listenAddress = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          example = "any";
+          description = lib.mdDoc ''
+            The address for the daemon to listen on.
+            Use `any` to listen on all addresses.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 6600;
+          description = lib.mdDoc ''
+            This setting is the TCP port that is desired for the daemon to get assigned
+            to.
+          '';
+        };
+
+      };
+
+      dbFile = mkOption {
+        type = types.nullOr types.str;
+        default = "${cfg.dataDir}/tag_cache";
+        defaultText = literalExpression ''"''${dataDir}/tag_cache"'';
+        description = lib.mdDoc ''
+          The path to MPD's database. If set to `null` the
+          parameter is omitted from the configuration.
+        '';
+      };
+
+      credentials = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            passwordFile = mkOption {
+              type = types.path;
+              description = lib.mdDoc ''
+                Path to file containing the password.
+              '';
+            };
+            permissions = let
+              perms = ["read" "add" "control" "admin"];
+            in mkOption {
+              type = types.listOf (types.enum perms);
+              default = [ "read" ];
+              description = lib.mdDoc ''
+                List of permissions that are granted with this password.
+                Permissions can be "${concatStringsSep "\", \"" perms}".
+              '';
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          Credentials and permissions for accessing the mpd server.
+        '';
+        default = [];
+        example = [
+          {passwordFile = "/var/lib/secrets/mpd_readonly_password"; permissions = [ "read" ];}
+          {passwordFile = "/var/lib/secrets/mpd_admin_password"; permissions = ["read" "add" "control" "admin"];}
+        ];
+      };
+
+      fluidsynth = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, add fluidsynth soundfont and configure the plugin.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # install mpd units
+    systemd.packages = [ pkgs.mpd ];
+
+    systemd.sockets.mpd = mkIf cfg.startWhenNeeded {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [
+        ""  # Note: this is needed to override the upstream unit
+        (if pkgs.lib.hasPrefix "/" cfg.network.listenAddress
+          then cfg.network.listenAddress
+          else "${optionalString (cfg.network.listenAddress != "any") "${cfg.network.listenAddress}:"}${toString cfg.network.port}")
+      ];
+    };
+
+    systemd.services.mpd = {
+      wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
+
+      preStart =
+        ''
+          set -euo pipefail
+          install -m 600 ${mpdConf} /run/mpd/mpd.conf
+        '' + optionalString (cfg.credentials != [])
+        (concatStringsSep "\n"
+          (imap0
+            (i: c: ''${pkgs.replace-secret}/bin/replace-secret '{{password-${toString i}}}' '${c.passwordFile}' /run/mpd/mpd.conf'')
+            cfg.credentials));
+
+      serviceConfig =
+        {
+          User = "${cfg.user}";
+          # Note: the first "" overrides the ExecStart from the upstream unit
+          ExecStart = [ "" "${pkgs.mpd}/bin/mpd --systemd /run/mpd/mpd.conf" ];
+          RuntimeDirectory = "mpd";
+          StateDirectory = []
+            ++ optionals (cfg.dataDir == "/var/lib/${name}") [ name ]
+            ++ optionals (cfg.playlistDirectory == "/var/lib/${name}/playlists") [ name "${name}/playlists" ]
+            ++ optionals (cfg.musicDirectory == "/var/lib/${name}/music")        [ name "${name}/music" ];
+        };
+    };
+
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        inherit uid;
+        group = cfg.group;
+        extraGroups = [ "audio" ];
+        description = "Music Player Daemon user";
+        home = "${cfg.dataDir}";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = gid;
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/mpdscribble.nix b/nixpkgs/nixos/modules/services/audio/mpdscribble.nix
new file mode 100644
index 000000000000..132d9ad32588
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/mpdscribble.nix
@@ -0,0 +1,213 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mpdscribble;
+  mpdCfg = config.services.mpd;
+  mpdOpt = options.services.mpd;
+
+  endpointUrls = {
+    "last.fm" = "http://post.audioscrobbler.com";
+    "libre.fm" = "http://turtle.libre.fm";
+    "jamendo" = "http://postaudioscrobbler.jamendo.com";
+    "listenbrainz" = "http://proxy.listenbrainz.org";
+  };
+
+  mkSection = secname: secCfg: ''
+    [${secname}]
+    url      = ${secCfg.url}
+    username = ${secCfg.username}
+    password = {{${secname}_PASSWORD}}
+    journal  = /var/lib/mpdscribble/${secname}.journal
+  '';
+
+  endpoints = concatStringsSep "\n" (mapAttrsToList mkSection cfg.endpoints);
+  cfgTemplate = pkgs.writeText "mpdscribble.conf" ''
+    ## This file was automatically genenrated by NixOS and will be overwritten.
+    ## Do not edit. Edit your NixOS configuration instead.
+
+    ## mpdscribble - an audioscrobbler for the Music Player Daemon.
+    ## http://mpd.wikia.com/wiki/Client:mpdscribble
+
+    # HTTP proxy URL.
+    ${optionalString (cfg.proxy != null) "proxy = ${cfg.proxy}"}
+
+    # The location of the mpdscribble log file.  The special value
+    # "syslog" makes mpdscribble use the local syslog daemon.  On most
+    # systems, log messages will appear in /var/log/daemon.log then.
+    # "-" means log to stderr (the current terminal).
+    log = -
+
+    # How verbose mpdscribble's logging should be.  Default is 1.
+    verbose = ${toString cfg.verbose}
+
+    # How often should mpdscribble save the journal file? [seconds]
+    journal_interval = ${toString cfg.journalInterval}
+
+    # The host running MPD, possibly protected by a password
+    # ([PASSWORD@]HOSTNAME).
+    host = ${(optionalString (cfg.passwordFile != null) "{{MPD_PASSWORD}}@") + cfg.host}
+
+    # The port that the MPD listens on and mpdscribble should try to
+    # connect to.
+    port = ${toString cfg.port}
+
+    ${endpoints}
+  '';
+
+  cfgFile = "/run/mpdscribble/mpdscribble.conf";
+
+  replaceSecret = secretFile: placeholder: targetFile:
+    optionalString (secretFile != null) ''
+      ${pkgs.replace-secret}/bin/replace-secret '${placeholder}' '${secretFile}' '${targetFile}' '';
+
+  preStart = pkgs.writeShellScript "mpdscribble-pre-start" ''
+    cp -f "${cfgTemplate}" "${cfgFile}"
+    ${replaceSecret cfg.passwordFile "{{MPD_PASSWORD}}" cfgFile}
+    ${concatStringsSep "\n" (mapAttrsToList (secname: cfg:
+      replaceSecret cfg.passwordFile "{{${secname}_PASSWORD}}" cfgFile)
+      cfg.endpoints)}
+  '';
+
+  localMpd = (cfg.host == "localhost" || cfg.host == "127.0.0.1");
+
+in {
+  ###### interface
+
+  options.services.mpdscribble = {
+
+    enable = mkEnableOption (lib.mdDoc "mpdscribble");
+
+    proxy = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        HTTP proxy URL.
+      '';
+    };
+
+    verbose = mkOption {
+      default = 1;
+      type = types.int;
+      description = lib.mdDoc ''
+        Log level for the mpdscribble daemon.
+      '';
+    };
+
+    journalInterval = mkOption {
+      default = 600;
+      example = 60;
+      type = types.int;
+      description = lib.mdDoc ''
+        How often should mpdscribble save the journal file? [seconds]
+      '';
+    };
+
+    host = mkOption {
+      default = (if mpdCfg.network.listenAddress != "any" then
+        mpdCfg.network.listenAddress
+      else
+        "localhost");
+      defaultText = literalExpression ''
+        if config.${mpdOpt.network.listenAddress} != "any"
+        then config.${mpdOpt.network.listenAddress}
+        else "localhost"
+      '';
+      type = types.str;
+      description = lib.mdDoc ''
+        Host for the mpdscribble daemon to search for a mpd daemon on.
+      '';
+    };
+
+    passwordFile = mkOption {
+      default = if localMpd then
+        (findFirst
+          (c: any (x: x == "read") c.permissions)
+          { passwordFile = null; }
+          mpdCfg.credentials).passwordFile
+      else
+        null;
+      defaultText = literalMD ''
+        The first password file with read access configured for MPD when using a local instance,
+        otherwise `null`.
+      '';
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        File containing the password for the mpd daemon.
+        If there is a local mpd configured using {option}`services.mpd.credentials`
+        the default is automatically set to a matching passwordFile of the local mpd.
+      '';
+    };
+
+    port = mkOption {
+      default = mpdCfg.network.port;
+      defaultText = literalExpression "config.${mpdOpt.network.port}";
+      type = types.port;
+      description = lib.mdDoc ''
+        Port for the mpdscribble daemon to search for a mpd daemon on.
+      '';
+    };
+
+    endpoints = mkOption {
+      type = (let
+        endpoint = { name, ... }: {
+          options = {
+            url = mkOption {
+              type = types.str;
+              default = endpointUrls.${name} or "";
+              description =
+                lib.mdDoc "The url endpoint where the scrobble API is listening.";
+            };
+            username = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Username for the scrobble service.
+              '';
+            };
+            passwordFile = mkOption {
+              type = types.nullOr types.str;
+              description =
+                lib.mdDoc "File containing the password, either as MD5SUM or cleartext.";
+            };
+          };
+        };
+      in types.attrsOf (types.submodule endpoint));
+      default = { };
+      example = {
+        "last.fm" = {
+          username = "foo";
+          passwordFile = "/run/secrets/lastfm_password";
+        };
+      };
+      description = lib.mdDoc ''
+        Endpoints to scrobble to.
+        If the endpoint is one of "${
+          concatStringsSep "\", \"" (attrNames endpointUrls)
+        }" the url is set automatically.
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.mpdscribble = {
+      after = [ "network.target" ] ++ (optional localMpd "mpd.service");
+      description = "mpdscribble mpd scrobble client";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "mpdscribble";
+        RuntimeDirectory = "mpdscribble";
+        RuntimeDirectoryMode = "700";
+        # TODO use LoadCredential= instead of running preStart with full privileges?
+        ExecStartPre = "+${preStart}";
+        ExecStart =
+          "${pkgs.mpdscribble}/bin/mpdscribble --no-daemon --conf ${cfgFile}";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/navidrome.nix b/nixpkgs/nixos/modules/services/audio/navidrome.nix
new file mode 100644
index 000000000000..77a0e74af9ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/navidrome.nix
@@ -0,0 +1,83 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.navidrome;
+  settingsFormat = pkgs.formats.json {};
+in {
+  options = {
+    services.navidrome = {
+
+      enable = mkEnableOption (lib.mdDoc "Navidrome music server");
+
+      package = mkPackageOptionMD pkgs "navidrome" { };
+
+      settings = mkOption rec {
+        type = settingsFormat.type;
+        apply = recursiveUpdate default;
+        default = {
+          Address = "127.0.0.1";
+          Port = 4533;
+        };
+        example = {
+          MusicFolder = "/mnt/music";
+        };
+        description = lib.mdDoc ''
+          Configuration for Navidrome, see <https://www.navidrome.org/docs/usage/configuration-options/> for supported values.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open the TCP port in the firewall";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [cfg.settings.Port];
+
+    systemd.services.navidrome = {
+      description = "Navidrome Media Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/navidrome --configfile ${settingsFormat.generate "navidrome.json" cfg.settings}
+        '';
+        DynamicUser = true;
+        StateDirectory = "navidrome";
+        WorkingDirectory = "/var/lib/navidrome";
+        RuntimeDirectory = "navidrome";
+        RootDirectory = "/run/navidrome";
+        ReadWritePaths = "";
+        BindReadOnlyPaths = [
+          # navidrome uses online services to download additional album metadata / covers
+          "${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt"
+          builtins.storeDir
+          "/etc"
+        ] ++ lib.optional (cfg.settings ? MusicFolder) cfg.settings.MusicFolder;
+        CapabilityBoundingSet = "";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        RestrictRealtime = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        UMask = "0066";
+        ProtectHostname = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/networkaudiod.nix b/nixpkgs/nixos/modules/services/audio/networkaudiod.nix
new file mode 100644
index 000000000000..11486429e667
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/networkaudiod.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  name = "networkaudiod";
+  cfg = config.services.networkaudiod;
+in {
+  options = {
+    services.networkaudiod = {
+      enable = mkEnableOption (lib.mdDoc "Networkaudiod (NAA)");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ pkgs.networkaudiod ];
+    systemd.services.networkaudiod.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/roon-bridge.nix b/nixpkgs/nixos/modules/services/audio/roon-bridge.nix
new file mode 100644
index 000000000000..027b0332fd1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/roon-bridge.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  name = "roon-bridge";
+  cfg = config.services.roon-bridge;
+in {
+  options = {
+    services.roon-bridge = {
+      enable = mkEnableOption (lib.mdDoc "Roon Bridge");
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the bridge.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        default = "roon-bridge";
+        description = lib.mdDoc ''
+          User to run the Roon bridge as.
+        '';
+      };
+      group = mkOption {
+        type = types.str;
+        default = "roon-bridge";
+        description = lib.mdDoc ''
+          Group to run the Roon Bridge as.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.roon-bridge = {
+      after = [ "network.target" ];
+      description = "Roon Bridge";
+      wantedBy = [ "multi-user.target" ];
+
+      environment.ROON_DATAROOT = "/var/lib/${name}";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.roon-bridge}/bin/RoonBridge";
+        LimitNOFILE = 8192;
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = name;
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPortRanges = [{ from = 9100; to = 9200; }];
+      allowedUDPPorts = [ 9003 ];
+      extraCommands = optionalString (!config.networking.nftables.enable) ''
+        iptables -A INPUT -s 224.0.0.0/4 -j ACCEPT
+        iptables -A INPUT -d 224.0.0.0/4 -j ACCEPT
+        iptables -A INPUT -s 240.0.0.0/5 -j ACCEPT
+        iptables -A INPUT -m pkttype --pkt-type multicast -j ACCEPT
+        iptables -A INPUT -m pkttype --pkt-type broadcast -j ACCEPT
+      '';
+      extraInputRules = optionalString config.networking.nftables.enable ''
+        ip saddr { 224.0.0.0/4, 240.0.0.0/5 } accept
+        ip daddr 224.0.0.0/4 accept
+        pkttype { multicast, broadcast } accept
+      '';
+    };
+
+
+    users.groups.${cfg.group} = {};
+    users.users.${cfg.user} =
+      optionalAttrs (cfg.user == "roon-bridge") {
+        isSystemUser = true;
+        description = "Roon Bridge user";
+        group = cfg.group;
+        extraGroups = [ "audio" ];
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/roon-server.nix b/nixpkgs/nixos/modules/services/audio/roon-server.nix
new file mode 100644
index 000000000000..8691c08b0d36
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/roon-server.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  name = "roon-server";
+  cfg = config.services.roon-server;
+in {
+  options = {
+    services.roon-server = {
+      enable = mkEnableOption (lib.mdDoc "Roon Server");
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the server.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        default = "roon-server";
+        description = lib.mdDoc ''
+          User to run the Roon Server as.
+        '';
+      };
+      group = mkOption {
+        type = types.str;
+        default = "roon-server";
+        description = lib.mdDoc ''
+          Group to run the Roon Server as.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.roon-server = {
+      after = [ "network.target" ];
+      description = "Roon Server";
+      wantedBy = [ "multi-user.target" ];
+
+      environment.ROON_DATAROOT = "/var/lib/${name}";
+      environment.ROON_ID_DIR = "/var/lib/${name}";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.roon-server}/bin/RoonServer";
+        LimitNOFILE = 8192;
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = name;
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPortRanges = [
+        { from = 9100; to = 9200; }
+        { from = 9330; to = 9339; }
+        { from = 30000; to = 30010; }
+      ];
+      allowedUDPPorts = [ 9003 ];
+      extraCommands = optionalString (!config.networking.nftables.enable) ''
+        ## IGMP / Broadcast ##
+        iptables -A INPUT -s 224.0.0.0/4 -j ACCEPT
+        iptables -A INPUT -d 224.0.0.0/4 -j ACCEPT
+        iptables -A INPUT -s 240.0.0.0/5 -j ACCEPT
+        iptables -A INPUT -m pkttype --pkt-type multicast -j ACCEPT
+        iptables -A INPUT -m pkttype --pkt-type broadcast -j ACCEPT
+      '';
+      extraInputRules = optionalString config.networking.nftables.enable ''
+        ip saddr { 224.0.0.0/4, 240.0.0.0/5 } accept
+        ip daddr 224.0.0.0/4 accept
+        pkttype { multicast, broadcast } accept
+      '';
+    };
+
+
+    users.groups.${cfg.group} = {};
+    users.users.${cfg.user} =
+      optionalAttrs (cfg.user == "roon-server") {
+        isSystemUser = true;
+        description = "Roon Server user";
+        group = cfg.group;
+        extraGroups = [ "audio" ];
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/slimserver.nix b/nixpkgs/nixos/modules/services/audio/slimserver.nix
new file mode 100644
index 000000000000..cdd9d551c501
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/slimserver.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.slimserver;
+
+in {
+  options = {
+
+    services.slimserver = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable slimserver.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.slimserver;
+        defaultText = literalExpression "pkgs.slimserver";
+        description = lib.mdDoc "Slimserver package to use.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/slimserver";
+        description = lib.mdDoc ''
+          The directory where slimserver stores its state, tag cache,
+          playlists etc.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - slimserver slimserver - -"
+    ];
+
+    systemd.services.slimserver = {
+      after = [ "network.target" ];
+      description = "Slim Server for Logitech Squeezebox Players";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "slimserver";
+        # Issue 40589: Disable broken image/video support (audio still works!)
+        ExecStart = "${lib.getExe cfg.package} --logdir ${cfg.dataDir}/logs --prefsdir ${cfg.dataDir}/prefs --cachedir ${cfg.dataDir}/cache --noimage --novideo";
+      };
+    };
+
+    users = {
+      users.slimserver = {
+        description = "Slimserver daemon user";
+        home = cfg.dataDir;
+        group = "slimserver";
+        isSystemUser = true;
+      };
+      groups.slimserver = {};
+    };
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/audio/snapserver.nix b/nixpkgs/nixos/modules/services/audio/snapserver.nix
new file mode 100644
index 000000000000..dbab741bf6fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/snapserver.nix
@@ -0,0 +1,316 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "snapserver";
+
+  cfg = config.services.snapserver;
+
+  # Using types.nullOr to inherit upstream defaults.
+  sampleFormat = mkOption {
+    type = with types; nullOr str;
+    default = null;
+    description = lib.mdDoc ''
+      Default sample format.
+    '';
+    example = "48000:16:2";
+  };
+
+  codec = mkOption {
+    type = with types; nullOr str;
+    default = null;
+    description = lib.mdDoc ''
+      Default audio compression method.
+    '';
+    example = "flac";
+  };
+
+  streamToOption = name: opt:
+    let
+      os = val:
+        optionalString (val != null) "${val}";
+      os' = prefix: val:
+        optionalString (val != null) (prefix + "${val}");
+      flatten = key: value:
+        "&${key}=${value}";
+    in
+      "--stream.stream=\"${opt.type}://" + os opt.location + "?" + os' "name=" name
+        + concatStrings (mapAttrsToList flatten opt.query) + "\"";
+
+  optionalNull = val: ret:
+    optional (val != null) ret;
+
+  optionString = concatStringsSep " " (mapAttrsToList streamToOption cfg.streams
+    # global options
+    ++ [ "--stream.bind_to_address=${cfg.listenAddress}" ]
+    ++ [ "--stream.port=${toString cfg.port}" ]
+    ++ optionalNull cfg.sampleFormat "--stream.sampleformat=${cfg.sampleFormat}"
+    ++ optionalNull cfg.codec "--stream.codec=${cfg.codec}"
+    ++ optionalNull cfg.streamBuffer "--stream.stream_buffer=${toString cfg.streamBuffer}"
+    ++ optionalNull cfg.buffer "--stream.buffer=${toString cfg.buffer}"
+    ++ optional cfg.sendToMuted "--stream.send_to_muted"
+    # tcp json rpc
+    ++ [ "--tcp.enabled=${toString cfg.tcp.enable}" ]
+    ++ optionals cfg.tcp.enable [
+      "--tcp.bind_to_address=${cfg.tcp.listenAddress}"
+      "--tcp.port=${toString cfg.tcp.port}" ]
+     # http json rpc
+    ++ [ "--http.enabled=${toString cfg.http.enable}" ]
+    ++ optionals cfg.http.enable [
+      "--http.bind_to_address=${cfg.http.listenAddress}"
+      "--http.port=${toString cfg.http.port}"
+    ] ++ optional (cfg.http.docRoot != null) "--http.doc_root=\"${toString cfg.http.docRoot}\"");
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "snapserver" "controlPort" ] [ "services" "snapserver" "tcp" "port" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.snapserver = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable snapserver.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "::";
+        example = "0.0.0.0";
+        description = lib.mdDoc ''
+          The address where snapclients can connect.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 1704;
+        description = lib.mdDoc ''
+          The port that snapclients can connect to.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically open the specified ports in the firewall.
+        '';
+      };
+
+      inherit sampleFormat;
+      inherit codec;
+
+      streamBuffer = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+          Stream read (input) buffer in ms.
+        '';
+        example = 20;
+      };
+
+      buffer = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+          Network buffer in ms.
+        '';
+        example = 1000;
+      };
+
+      sendToMuted = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Send audio to muted clients.
+        '';
+      };
+
+      tcp.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the JSON-RPC via TCP.
+        '';
+      };
+
+      tcp.listenAddress = mkOption {
+        type = types.str;
+        default = "::";
+        example = "0.0.0.0";
+        description = lib.mdDoc ''
+          The address where the TCP JSON-RPC listens on.
+        '';
+      };
+
+      tcp.port = mkOption {
+        type = types.port;
+        default = 1705;
+        description = lib.mdDoc ''
+          The port where the TCP JSON-RPC listens on.
+        '';
+      };
+
+      http.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the JSON-RPC via HTTP.
+        '';
+      };
+
+      http.listenAddress = mkOption {
+        type = types.str;
+        default = "::";
+        example = "0.0.0.0";
+        description = lib.mdDoc ''
+          The address where the HTTP JSON-RPC listens on.
+        '';
+      };
+
+      http.port = mkOption {
+        type = types.port;
+        default = 1780;
+        description = lib.mdDoc ''
+          The port where the HTTP JSON-RPC listens on.
+        '';
+      };
+
+      http.docRoot = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to serve from the HTTP servers root.
+        '';
+      };
+
+      streams = mkOption {
+        type = with types; attrsOf (submodule {
+          options = {
+            location = mkOption {
+              type = types.oneOf [ types.path types.str ];
+              description = lib.mdDoc ''
+                For type `pipe` or `file`, the path to the pipe or file.
+                For type `librespot`, `airplay` or `process`, the path to the corresponding binary.
+                For type `tcp`, the `host:port` address to connect to or listen on.
+                For type `meta`, a list of stream names in the form `/one/two/...`. Don't forget the leading slash.
+                For type `alsa`, use an empty string.
+              '';
+              example = literalExpression ''
+                "/path/to/pipe"
+                "/path/to/librespot"
+                "192.168.1.2:4444"
+                "/MyTCP/Spotify/MyPipe"
+              '';
+            };
+            type = mkOption {
+              type = types.enum [ "pipe" "librespot" "airplay" "file" "process" "tcp" "alsa" "spotify" "meta" ];
+              default = "pipe";
+              description = lib.mdDoc ''
+                The type of input stream.
+              '';
+            };
+            query = mkOption {
+              type = attrsOf str;
+              default = {};
+              description = lib.mdDoc ''
+                Key-value pairs that convey additional parameters about a stream.
+              '';
+              example = literalExpression ''
+                # for type == "pipe":
+                {
+                  mode = "create";
+                };
+                # for type == "process":
+                {
+                  params = "--param1 --param2";
+                  logStderr = "true";
+                };
+                # for type == "tcp":
+                {
+                  mode = "client";
+                }
+                # for type == "alsa":
+                {
+                  device = "hw:0,0";
+                }
+              '';
+            };
+            inherit sampleFormat;
+            inherit codec;
+          };
+        });
+        default = { default = {}; };
+        description = lib.mdDoc ''
+          The definition for an input source.
+        '';
+        example = literalExpression ''
+          {
+            mpd = {
+              type = "pipe";
+              location = "/run/snapserver/mpd";
+              sampleFormat = "48000:16:2";
+              codec = "pcm";
+            };
+          };
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    warnings =
+      # https://github.com/badaix/snapcast/blob/98ac8b2fb7305084376607b59173ce4097c620d8/server/streamreader/stream_manager.cpp#L85
+      filter (w: w != "") (mapAttrsToList (k: v: optionalString (v.type == "spotify") ''
+        services.snapserver.streams.${k}.type = "spotify" is deprecated, use services.snapserver.streams.${k}.type = "librespot" instead.
+      '') cfg.streams);
+
+    systemd.services.snapserver = {
+      after = [ "network.target" ];
+      description = "Snapserver";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "mpd.service" "mopidy.service" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.snapcast}/bin/snapserver --daemon ${optionString}";
+        Type = "forking";
+        LimitRTPRIO = 50;
+        LimitRTTIME = "infinity";
+        NoNewPrivileges = true;
+        PIDFile = "/run/${name}/pid";
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX AF_NETLINK";
+        RestrictNamespaces = true;
+        RuntimeDirectory = name;
+        StateDirectory = name;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts =
+      optionals cfg.openFirewall [ cfg.port ]
+      ++ optional (cfg.openFirewall && cfg.tcp.enable) cfg.tcp.port
+      ++ optional (cfg.openFirewall && cfg.http.enable) cfg.http.port;
+  };
+
+  meta = {
+    maintainers = with maintainers; [ tobim ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/audio/spotifyd.nix b/nixpkgs/nixos/modules/services/audio/spotifyd.nix
new file mode 100644
index 000000000000..975be5a87cba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/spotifyd.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.spotifyd;
+  toml = pkgs.formats.toml {};
+  warnConfig =
+    if cfg.config != ""
+    then lib.trace "Using the stringly typed .config attribute is discouraged. Use the TOML typed .settings attribute instead."
+    else id;
+  spotifydConf =
+    if cfg.settings != {}
+    then toml.generate "spotify.conf" cfg.settings
+    else warnConfig (pkgs.writeText "spotifyd.conf" cfg.config);
+in
+{
+  options = {
+    services.spotifyd = {
+      enable = mkEnableOption (lib.mdDoc "spotifyd, a Spotify playing daemon");
+
+      config = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          (Deprecated) Configuration for Spotifyd. For syntax and directives, see
+          <https://github.com/Spotifyd/spotifyd#Configuration>.
+        '';
+      };
+
+      settings = mkOption {
+        default = {};
+        type = toml.type;
+        example = { global.bitrate = 320; };
+        description = lib.mdDoc ''
+          Configuration for Spotifyd. For syntax and directives, see
+          <https://github.com/Spotifyd/spotifyd#Configuration>.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.config == "" || cfg.settings == {};
+        message = "At most one of the .config attribute and the .settings attribute may be set";
+      }
+    ];
+
+    systemd.services.spotifyd = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "sound.target" ];
+      description = "spotifyd, a Spotify playing daemon";
+      environment.SHELL = "/bin/sh";
+      serviceConfig = {
+        ExecStart = "${pkgs.spotifyd}/bin/spotifyd --no-daemon --cache-path /var/cache/spotifyd --config-path ${spotifydConf}";
+        Restart = "always";
+        RestartSec = 12;
+        DynamicUser = true;
+        CacheDirectory = "spotifyd";
+        SupplementaryGroups = ["audio"];
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.anderslundstedt ];
+}
diff --git a/nixpkgs/nixos/modules/services/audio/squeezelite.nix b/nixpkgs/nixos/modules/services/audio/squeezelite.nix
new file mode 100644
index 000000000000..30dc12552f00
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/squeezelite.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption optionalString types;
+
+  dataDir = "/var/lib/squeezelite";
+  cfg = config.services.squeezelite;
+  pkg = if cfg.pulseAudio then pkgs.squeezelite-pulse else pkgs.squeezelite;
+  bin = "${pkg}/bin/${pkg.pname}";
+
+in
+{
+
+  ###### interface
+
+  options.services.squeezelite = {
+    enable = mkEnableOption (lib.mdDoc "Squeezelite, a software Squeezebox emulator");
+
+    pulseAudio = mkEnableOption (lib.mdDoc "pulseaudio support");
+
+    extraArguments = mkOption {
+      default = "";
+      type = types.str;
+      description = lib.mdDoc ''
+        Additional command line arguments to pass to Squeezelite.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.squeezelite = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "sound.target" ];
+      description = "Software Squeezebox emulator";
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${bin} -N ${dataDir}/player-name ${cfg.extraArguments}";
+        StateDirectory = builtins.baseNameOf dataDir;
+        SupplementaryGroups = "audio";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/tts.nix b/nixpkgs/nixos/modules/services/audio/tts.nix
new file mode 100644
index 000000000000..0d93224ec030
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/tts.nix
@@ -0,0 +1,152 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.tts;
+in
+
+{
+  options.services.tts = let
+    inherit (lib) literalExpression mkOption mdDoc mkEnableOption types;
+  in  {
+    servers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { ... }: {
+          options = {
+            enable = mkEnableOption (mdDoc "Coqui TTS server");
+
+            port = mkOption {
+              type = types.port;
+              example = 5000;
+              description = mdDoc ''
+                Port to bind the TTS server to.
+              '';
+            };
+
+            model = mkOption {
+              type = types.nullOr types.str;
+              default = "tts_models/en/ljspeech/tacotron2-DDC";
+              example = null;
+              description = mdDoc ''
+                Name of the model to download and use for speech synthesis.
+
+                Check `tts-server --list_models` for possible values.
+
+                Set to `null` to use a custom model.
+              '';
+            };
+
+            useCuda = mkOption {
+              type = types.bool;
+              default = false;
+              example = true;
+              description = mdDoc ''
+                Whether to offload computation onto a CUDA compatible GPU.
+              '';
+            };
+
+            extraArgs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = mdDoc ''
+                Extra arguments to pass to the server commandline.
+              '';
+            };
+          };
+        }
+      ));
+      default = {};
+      example = literalExpression ''
+        {
+          english = {
+            port = 5300;
+            model = "tts_models/en/ljspeech/tacotron2-DDC";
+          };
+          german = {
+            port = 5301;
+            model = "tts_models/de/thorsten/tacotron2-DDC";
+          };
+          dutch = {
+            port = 5302;
+            model = "tts_models/nl/mai/tacotron2-DDC";
+          };
+        }
+      '';
+      description = mdDoc ''
+        TTS server instances.
+      '';
+    };
+  };
+
+  config = let
+    inherit (lib) mkIf mapAttrs' nameValuePair optionalString concatMapStringsSep escapeShellArgs;
+  in mkIf (cfg.servers != {}) {
+    systemd.services = mapAttrs' (server: options:
+      nameValuePair "tts-${server}" {
+        description = "Coqui TTS server instance ${server}";
+        after = [
+          "network-online.target"
+        ];
+        wantedBy = [
+          "multi-user.target"
+        ];
+        path = with pkgs; [
+          espeak-ng
+        ];
+        environment.HOME = "/var/lib/tts";
+        serviceConfig = {
+          DynamicUser = true;
+          User = "tts";
+          StateDirectory = "tts";
+          ExecStart = "${pkgs.tts}/bin/tts-server --port ${toString options.port}"
+            + optionalString (options.model != null) " --model_name ${options.model}"
+            + optionalString (options.useCuda) " --use_cuda"
+            + (concatMapStringsSep " " escapeShellArgs options.extraArgs);
+          CapabilityBoundingSet = "";
+          DeviceAllow = if options.useCuda then [
+            # https://docs.nvidia.com/dgx/pdf/dgx-os-5-user-guide.pdf
+            "/dev/nvidia1"
+            "/dev/nvidia2"
+            "/dev/nvidia3"
+            "/dev/nvidia4"
+            "/dev/nvidia-caps/nvidia-cap1"
+            "/dev/nvidia-caps/nvidia-cap2"
+            "/dev/nvidiactl"
+            "/dev/nvidia-modeset"
+            "/dev/nvidia-uvm"
+            "/dev/nvidia-uvm-tools"
+          ] else "";
+          DevicePolicy = "closed";
+          LockPersonality = true;
+          # jit via numba->llvmpipe
+          MemoryDenyWriteExecute = false;
+          PrivateDevices = true;
+          PrivateUsers = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectControlGroups = true;
+          ProtectProc = "invisible";
+          ProcSubset = "pid";
+          RestrictAddressFamilies = [
+            "AF_UNIX"
+            "AF_INET"
+            "AF_INET6"
+          ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [
+            "@system-service"
+            "~@privileged"
+          ];
+          UMask = "0077";
+        };
+      }) cfg.servers;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/wyoming/faster-whisper.nix b/nixpkgs/nixos/modules/services/audio/wyoming/faster-whisper.nix
new file mode 100644
index 000000000000..2d56acdc1b4c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/wyoming/faster-whisper.nix
@@ -0,0 +1,190 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.wyoming.faster-whisper;
+
+  inherit (lib)
+    escapeShellArgs
+    mkOption
+    mdDoc
+    mkEnableOption
+    mkPackageOptionMD
+    types
+    ;
+
+  inherit (builtins)
+    toString
+    ;
+
+in
+
+{
+  options.services.wyoming.faster-whisper = with types; {
+    package = mkPackageOptionMD pkgs "wyoming-faster-whisper" { };
+
+    servers = mkOption {
+      default = {};
+      description = mdDoc ''
+        Attribute set of faster-whisper instances to spawn.
+      '';
+      type = types.attrsOf (types.submodule (
+        { ... }: {
+          options = {
+            enable = mkEnableOption (mdDoc "Wyoming faster-whisper server");
+
+            model = mkOption {
+              # Intersection between available and referenced models here:
+              # https://github.com/rhasspy/models/releases/tag/v1.0
+              # https://github.com/rhasspy/rhasspy3/blob/wyoming-v1/programs/asr/faster-whisper/server/wyoming_faster_whisper/download.py#L17-L27
+              type = enum [
+                "tiny"
+                "tiny-int8"
+                "base"
+                "base-int8"
+                "small"
+                "small-int8"
+                "medium-int8"
+              ];
+              default = "tiny-int8";
+              example = "medium-int8";
+              description = mdDoc ''
+                Name of the voice model to use.
+              '';
+            };
+
+            uri = mkOption {
+              type = strMatching "^(tcp|unix)://.*$";
+              example = "tcp://0.0.0.0:10300";
+              description = mdDoc ''
+                URI to bind the wyoming server to.
+              '';
+            };
+
+            device = mkOption {
+              # https://opennmt.net/CTranslate2/python/ctranslate2.models.Whisper.html#
+              type = types.enum [
+                "cpu"
+                "cuda"
+                "auto"
+              ];
+              default = "cpu";
+              description = mdDoc ''
+                Determines the platform faster-whisper is run on. CPU works everywhere, CUDA requires a compatible NVIDIA GPU.
+              '';
+            };
+
+            language = mkOption {
+              type = enum [
+                # https://github.com/home-assistant/addons/blob/master/whisper/config.yaml#L20
+                "auto" "af" "am" "ar" "as" "az" "ba" "be" "bg" "bn" "bo" "br" "bs" "ca" "cs" "cy" "da" "de" "el" "en" "es" "et" "eu" "fa" "fi" "fo" "fr" "gl" "gu" "ha" "haw" "he" "hi" "hr" "ht" "hu" "hy" "id" "is" "it" "ja" "jw" "ka" "kk" "km" "kn" "ko" "la" "lb" "ln" "lo" "lt" "lv" "mg" "mi" "mk" "ml" "mn" "mr" "ms" "mt" "my" "ne" "nl" "nn" "no" "oc" "pa" "pl" "ps" "pt" "ro" "ru" "sa" "sd" "si" "sk" "sl" "sn" "so" "sq" "sr" "su" "sv" "sw" "ta" "te" "tg" "th" "tk" "tl" "tr" "tt" "uk" "ur" "uz" "vi" "yi" "yo" "zh"
+              ];
+              example = "en";
+              description = mdDoc ''
+                The language used to to parse words and sentences.
+              '';
+            };
+
+            beamSize = mkOption {
+              type = ints.unsigned;
+              default = 1;
+              example = 5;
+              description = mdDoc ''
+                The number of beams to use in beam search.
+              '';
+              apply = toString;
+            };
+
+            extraArgs = mkOption {
+              type = listOf str;
+              default = [ ];
+              description = mdDoc ''
+                Extra arguments to pass to the server commandline.
+              '';
+              apply = escapeShellArgs;
+            };
+          };
+        }
+      ));
+    };
+  };
+
+  config = let
+    inherit (lib)
+      mapAttrs'
+      mkIf
+      nameValuePair
+    ;
+  in mkIf (cfg.servers != {}) {
+    systemd.services = mapAttrs' (server: options:
+      nameValuePair "wyoming-faster-whisper-${server}" {
+        description = "Wyoming faster-whisper server instance ${server}";
+        after = [
+          "network-online.target"
+        ];
+        wantedBy = [
+          "multi-user.target"
+        ];
+        serviceConfig = {
+          DynamicUser = true;
+          User = "wyoming-faster-whisper";
+          StateDirectory = "wyoming/faster-whisper";
+          # https://github.com/home-assistant/addons/blob/master/whisper/rootfs/etc/s6-overlay/s6-rc.d/whisper/run
+          ExecStart = ''
+            ${cfg.package}/bin/wyoming-faster-whisper \
+              --data-dir $STATE_DIRECTORY \
+              --download-dir $STATE_DIRECTORY \
+              --uri ${options.uri} \
+              --device ${options.device} \
+              --model ${options.model} \
+              --language ${options.language} \
+              --beam-size ${options.beamSize} ${options.extraArgs}
+          '';
+          CapabilityBoundingSet = "";
+          DeviceAllow = if builtins.elem options.device [ "cuda" "auto" ] then [
+            # https://docs.nvidia.com/dgx/pdf/dgx-os-5-user-guide.pdf
+            # CUDA not working? Check DeviceAllow and PrivateDevices first!
+            "/dev/nvidia0"
+            "/dev/nvidia1"
+            "/dev/nvidia2"
+            "/dev/nvidia3"
+            "/dev/nvidia4"
+            "/dev/nvidia-caps/nvidia-cap1"
+            "/dev/nvidia-caps/nvidia-cap2"
+            "/dev/nvidiactl"
+            "/dev/nvidia-modeset"
+            "/dev/nvidia-uvm"
+            "/dev/nvidia-uvm-tools"
+          ] else "";
+          DevicePolicy = "closed";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          PrivateUsers = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectControlGroups = true;
+          ProtectProc = "invisible";
+          ProcSubset = "pid";
+          RestrictAddressFamilies = [
+            "AF_INET"
+            "AF_INET6"
+            "AF_UNIX"
+          ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [
+            "@system-service"
+            "~@privileged"
+          ];
+          UMask = "0077";
+        };
+      }) cfg.servers;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/wyoming/openwakeword.nix b/nixpkgs/nixos/modules/services/audio/wyoming/openwakeword.nix
new file mode 100644
index 000000000000..987818246bde
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/wyoming/openwakeword.nix
@@ -0,0 +1,163 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.wyoming.openwakeword;
+
+  inherit (lib)
+    concatStringsSep
+    concatMapStringsSep
+    escapeShellArgs
+    mkOption
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    types
+    ;
+
+  inherit (builtins)
+    toString
+    ;
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "wyoming" "openwakeword" "models" ] "Configuring models has been removed, they are now dynamically discovered and loaded at runtime")
+  ];
+
+  meta.buildDocsInSandbox = false;
+
+  options.services.wyoming.openwakeword = with types; {
+    enable = mkEnableOption (mdDoc "Wyoming openWakeWord server");
+
+    package = mkPackageOptionMD pkgs "wyoming-openwakeword" { };
+
+    uri = mkOption {
+      type = strMatching "^(tcp|unix)://.*$";
+      default = "tcp://0.0.0.0:10400";
+      example = "tcp://192.0.2.1:5000";
+      description = mdDoc ''
+        URI to bind the wyoming server to.
+      '';
+    };
+
+    customModelsDirectories = mkOption {
+      type = listOf types.path;
+      default = [];
+      description = lib.mdDoc ''
+        Paths to directories with custom wake word models (*.tflite model files).
+      '';
+    };
+
+    preloadModels = mkOption {
+      type = listOf str;
+      default = [
+        "ok_nabu"
+      ];
+      example = [
+        # wyoming_openwakeword/models/*.tflite
+        "alexa"
+        "hey_jarvis"
+        "hey_mycroft"
+        "hey_rhasspy"
+        "ok_nabu"
+      ];
+      description = mdDoc ''
+        List of wake word models to preload after startup.
+      '';
+    };
+
+    threshold = mkOption {
+      type = float;
+      default = 0.5;
+      description = mdDoc ''
+        Activation threshold (0-1), where higher means fewer activations.
+
+        See trigger level for the relationship between activations and
+        wake word detections.
+      '';
+      apply = toString;
+    };
+
+    triggerLevel = mkOption {
+      type = int;
+      default = 1;
+      description = mdDoc ''
+        Number of activations before a detection is registered.
+
+        A higher trigger level means fewer detections.
+      '';
+      apply = toString;
+    };
+
+    extraArgs = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = mdDoc ''
+        Extra arguments to pass to the server commandline.
+      '';
+      apply = escapeShellArgs;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services."wyoming-openwakeword" = {
+      description = "Wyoming openWakeWord server";
+      after = [
+        "network-online.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      serviceConfig = {
+        DynamicUser = true;
+        User = "wyoming-openwakeword";
+        # https://github.com/home-assistant/addons/blob/master/openwakeword/rootfs/etc/s6-overlay/s6-rc.d/openwakeword/run
+        ExecStart = concatStringsSep " " [
+          "${cfg.package}/bin/wyoming-openwakeword"
+          "--uri ${cfg.uri}"
+          (concatMapStringsSep " " (model: "--preload-model ${model}") cfg.preloadModels)
+          (concatMapStringsSep " " (dir: "--custom-model-dir ${toString dir}") cfg.customModelsDirectories)
+          "--threshold ${cfg.threshold}"
+          "--trigger-level ${cfg.triggerLevel}"
+          "${cfg.extraArgs}"
+        ];
+        CapabilityBoundingSet = "";
+        DeviceAllow = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectProc = "invisible";
+        ProcSubset = "all"; # reads /proc/cpuinfo
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RuntimeDirectory = "wyoming-openwakeword";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/wyoming/piper.nix b/nixpkgs/nixos/modules/services/audio/wyoming/piper.nix
new file mode 100644
index 000000000000..ed50bd9f48e9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/wyoming/piper.nix
@@ -0,0 +1,174 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.wyoming.piper;
+
+  inherit (lib)
+    escapeShellArgs
+    mkOption
+    mdDoc
+    mkEnableOption
+    mkPackageOptionMD
+    types
+    ;
+
+  inherit (builtins)
+    toString
+    ;
+
+in
+
+{
+  meta.buildDocsInSandbox = false;
+
+  options.services.wyoming.piper = with types; {
+    package = mkPackageOptionMD pkgs "wyoming-piper" { };
+
+    servers = mkOption {
+      default = {};
+      description = mdDoc ''
+        Attribute set of piper instances to spawn.
+      '';
+      type = types.attrsOf (types.submodule (
+        { ... }: {
+          options = {
+            enable = mkEnableOption (mdDoc "Wyoming Piper server");
+
+            piper = mkPackageOptionMD pkgs "piper-tts" { };
+
+            voice = mkOption {
+              type = str;
+              example = "en-us-ryan-medium";
+              description = mdDoc ''
+                Name of the voice model to use. See the following website for samples:
+                https://rhasspy.github.io/piper-samples/
+              '';
+            };
+
+            uri = mkOption {
+              type = strMatching "^(tcp|unix)://.*$";
+              example = "tcp://0.0.0.0:10200";
+              description = mdDoc ''
+                URI to bind the wyoming server to.
+              '';
+            };
+
+            speaker = mkOption {
+              type = ints.unsigned;
+              default = 0;
+              description = mdDoc ''
+                ID of a specific speaker in a multi-speaker model.
+              '';
+              apply = toString;
+            };
+
+            noiseScale = mkOption {
+              type = float;
+              default = 0.667;
+              description = mdDoc ''
+                Generator noise value.
+              '';
+              apply = toString;
+            };
+
+            noiseWidth = mkOption {
+              type = float;
+              default = 0.333;
+              description = mdDoc ''
+                Phoneme width noise value.
+              '';
+              apply = toString;
+            };
+
+            lengthScale = mkOption {
+              type = float;
+              default = 1.0;
+              description = mdDoc ''
+                Phoneme length value.
+              '';
+              apply = toString;
+            };
+
+            extraArgs = mkOption {
+              type = listOf str;
+              default = [ ];
+              description = mdDoc ''
+                Extra arguments to pass to the server commandline.
+              '';
+              apply = escapeShellArgs;
+            };
+          };
+        }
+      ));
+    };
+  };
+
+  config = let
+    inherit (lib)
+      mapAttrs'
+      mkIf
+      nameValuePair
+    ;
+  in mkIf (cfg.servers != {}) {
+    systemd.services = mapAttrs' (server: options:
+      nameValuePair "wyoming-piper-${server}" {
+        description = "Wyoming Piper server instance ${server}";
+        after = [
+          "network-online.target"
+        ];
+        wantedBy = [
+          "multi-user.target"
+        ];
+        serviceConfig = {
+          DynamicUser = true;
+          User = "wyoming-piper";
+          StateDirectory = "wyoming/piper";
+          # https://github.com/home-assistant/addons/blob/master/piper/rootfs/etc/s6-overlay/s6-rc.d/piper/run
+          ExecStart = ''
+            ${cfg.package}/bin/wyoming-piper \
+              --data-dir $STATE_DIRECTORY \
+              --download-dir $STATE_DIRECTORY \
+              --uri ${options.uri} \
+              --piper ${options.piper}/bin/piper \
+              --voice ${options.voice} \
+              --speaker ${options.speaker} \
+              --length-scale ${options.lengthScale} \
+              --noise-scale ${options.noiseScale} \
+              --noise-w ${options.noiseWidth} ${options.extraArgs}
+          '';
+          CapabilityBoundingSet = "";
+          DeviceAllow = "";
+          DevicePolicy = "closed";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          PrivateDevices = true;
+          PrivateUsers = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectControlGroups = true;
+          ProtectProc = "invisible";
+          ProcSubset = "pid";
+          RestrictAddressFamilies = [
+            "AF_INET"
+            "AF_INET6"
+            "AF_UNIX"
+          ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [
+            "@system-service"
+            "~@privileged"
+          ];
+          UMask = "0077";
+        };
+      }) cfg.servers;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/audio/ympd.nix b/nixpkgs/nixos/modules/services/audio/ympd.nix
new file mode 100644
index 000000000000..b74cc3f9c0b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/audio/ympd.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ympd;
+in {
+
+  ###### interface
+
+  options = {
+
+    services.ympd = {
+
+      enable = mkEnableOption (lib.mdDoc "ympd, the MPD Web GUI");
+
+      webPort = mkOption {
+        type = types.either types.str types.port; # string for backwards compat
+        default = "8080";
+        description = lib.mdDoc "The port where ympd's web interface will be available.";
+        example = "ssl://8080:/path/to/ssl-private-key.pem";
+      };
+
+      mpd = {
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "The host where MPD is listening.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = config.services.mpd.network.port;
+          defaultText = literalExpression "config.services.mpd.network.port";
+          description = lib.mdDoc "The port where MPD is listening.";
+          example = 6600;
+        };
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.ympd = {
+      description = "Standalone MPD Web GUI written in C";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.ympd}/bin/ympd \
+            --host ${cfg.mpd.host} \
+            --port ${toString cfg.mpd.port} \
+            --webport ${toString cfg.webPort}
+        '';
+
+        DynamicUser = true;
+        NoNewPrivileges = true;
+
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        ProtectHome = "tmpfs";
+
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        SystemCallFilter = [
+          "@system-service"
+          "~@process"
+          "~@setuid"
+        ];
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/backup/automysqlbackup.nix b/nixpkgs/nixos/modules/services/backup/automysqlbackup.nix
new file mode 100644
index 000000000000..27bbff813b10
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/automysqlbackup.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inherit (lib) concatMapStringsSep concatStringsSep isInt isList literalExpression;
+  inherit (lib) mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkOption mkRenamedOptionModule optional types;
+
+  cfg = config.services.automysqlbackup;
+  pkg = pkgs.automysqlbackup;
+  user = "automysqlbackup";
+  group = "automysqlbackup";
+
+  toStr = val:
+    if isList val then "( ${concatMapStringsSep " " (val: "'${val}'") val} )"
+    else if isInt val then toString val
+    else if true == val then "'yes'"
+    else if false == val then "'no'"
+    else "'${toString val}'";
+
+  configFile = pkgs.writeText "automysqlbackup.conf" ''
+    #version=${pkg.version}
+    # DONT'T REMOVE THE PREVIOUS VERSION LINE!
+    #
+    ${concatStringsSep "\n" (mapAttrsToList (name: value: "CONFIG_${name}=${toStr value}") cfg.config)}
+  '';
+
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "automysqlbackup" "config" ] [ "services" "automysqlbackup" "settings" ])
+  ];
+
+  # interface
+  options = {
+    services.automysqlbackup = {
+
+      enable = mkEnableOption (lib.mdDoc "AutoMySQLBackup");
+
+      calendar = mkOption {
+        type = types.str;
+        default = "01:15:00";
+        description = lib.mdDoc ''
+          Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        default = {};
+        description = lib.mdDoc ''
+          automysqlbackup configuration. Refer to
+          {file}`''${pkgs.automysqlbackup}/etc/automysqlbackup.conf`
+          for details on supported values.
+        '';
+        example = literalExpression ''
+          {
+            db_names = [ "nextcloud" "matomo" ];
+            table_exclude = [ "nextcloud.oc_users" "nextcloud.oc_whats_new" ];
+            mailcontent = "log";
+            mail_address = "admin@example.org";
+          }
+        '';
+      };
+
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.services.mysqlBackup.enable;
+        message = "Please choose one of services.mysqlBackup or services.automysqlbackup.";
+      }
+    ];
+
+    services.automysqlbackup.config = mapAttrs (name: mkDefault) {
+      mysql_dump_username = user;
+      mysql_dump_host = "localhost";
+      mysql_dump_socket = "/run/mysqld/mysqld.sock";
+      backup_dir = "/var/backup/mysql";
+      db_exclude = [ "information_schema" "performance_schema" ];
+      mailcontent = "stdout";
+      mysql_dump_single_transaction = true;
+    };
+
+    systemd.timers.automysqlbackup = {
+      description = "automysqlbackup timer";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.calendar;
+        AccuracySec = "5m";
+      };
+    };
+
+    systemd.services.automysqlbackup = {
+      description = "automysqlbackup service";
+      serviceConfig = {
+        User = user;
+        Group = group;
+        ExecStart = "${pkg}/bin/automysqlbackup ${configFile}";
+      };
+    };
+
+    environment.systemPackages = [ pkg ];
+
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
+    users.groups.${group} = { };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.config.backup_dir}' 0750 ${user} ${group} - -"
+    ];
+
+    services.mysql.ensureUsers = optional (config.services.mysql.enable && cfg.config.mysql_dump_host == "localhost") {
+      name = user;
+      ensurePermissions = {
+        "*.*" = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES, EVENT";
+
+        # https://forums.mysql.com/read.php?10,668311,668315#msg-668315
+        "function sys.extract_table_from_file_name" = "execute";
+        "function sys.format_path" = "execute";
+        "function sys.format_statement" = "execute";
+        "function sys.extract_schema_from_file_name" = "execute";
+        "function sys.ps_thread_account" = "execute";
+        "function sys.format_time" = "execute";
+        "function sys.format_bytes" = "execute";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/bacula.nix b/nixpkgs/nixos/modules/services/backup/bacula.nix
new file mode 100644
index 000000000000..5a75a46e5259
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/bacula.nix
@@ -0,0 +1,578 @@
+{ config, lib, pkgs, ... }:
+
+
+# TODO: test configuration when building nixexpr (use -t parameter)
+# TODO: support sqlite3 (it's deprecate?) and mysql
+
+with lib;
+
+let
+  libDir = "/var/lib/bacula";
+
+  fd_cfg = config.services.bacula-fd;
+  fd_conf = pkgs.writeText "bacula-fd.conf"
+    ''
+      Client {
+        Name = "${fd_cfg.name}";
+        FDPort = ${toString fd_cfg.port};
+        WorkingDirectory = ${libDir};
+        Pid Directory = /run;
+        ${fd_cfg.extraClientConfig}
+      }
+
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
+      Director {
+        Name = "${name}";
+        Password = ${value.password};
+        Monitor = ${value.monitor};
+      }
+      '') fd_cfg.director)}
+
+      Messages {
+        Name = Standard;
+        syslog = all, !skipped, !restored
+        ${fd_cfg.extraMessagesConfig}
+      }
+    '';
+
+  sd_cfg = config.services.bacula-sd;
+  sd_conf = pkgs.writeText "bacula-sd.conf"
+    ''
+      Storage {
+        Name = "${sd_cfg.name}";
+        SDPort = ${toString sd_cfg.port};
+        WorkingDirectory = ${libDir};
+        Pid Directory = /run;
+        ${sd_cfg.extraStorageConfig}
+      }
+
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
+      Autochanger {
+        Name = "${name}";
+        Device = ${concatStringsSep ", " (map (a: "\"${a}\"") value.devices)};
+        Changer Device =  ${value.changerDevice};
+        Changer Command = ${value.changerCommand};
+        ${value.extraAutochangerConfig}
+      }
+      '') sd_cfg.autochanger)}
+
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
+      Device {
+        Name = "${name}";
+        Archive Device = ${value.archiveDevice};
+        Media Type = ${value.mediaType};
+        ${value.extraDeviceConfig}
+      }
+      '') sd_cfg.device)}
+
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: ''
+      Director {
+        Name = "${name}";
+        Password = ${value.password};
+        Monitor = ${value.monitor};
+      }
+      '') sd_cfg.director)}
+
+      Messages {
+        Name = Standard;
+        syslog = all, !skipped, !restored
+        ${sd_cfg.extraMessagesConfig}
+      }
+    '';
+
+  dir_cfg = config.services.bacula-dir;
+  dir_conf = pkgs.writeText "bacula-dir.conf"
+    ''
+    Director {
+      Name = "${dir_cfg.name}";
+      Password = ${dir_cfg.password};
+      DirPort = ${toString dir_cfg.port};
+      Working Directory = ${libDir};
+      Pid Directory = /run/;
+      QueryFile = ${pkgs.bacula}/etc/query.sql;
+      ${dir_cfg.extraDirectorConfig}
+    }
+
+    Catalog {
+      Name = PostgreSQL;
+      dbname = bacula;
+      user = bacula;
+    }
+
+    Messages {
+      Name = Standard;
+      syslog = all, !skipped, !restored
+      ${dir_cfg.extraMessagesConfig}
+    }
+
+    ${dir_cfg.extraConfig}
+    '';
+
+  directorOptions = {...}:
+  {
+    options = {
+      password = mkOption {
+        type = types.str;
+        # TODO: required?
+        description = lib.mdDoc ''
+          Specifies the password that must be supplied for the default Bacula
+          Console to be authorized. The same password must appear in the
+          Director resource of the Console configuration file. For added
+          security, the password is never passed across the network but instead
+          a challenge response hash code created with the password. This
+          directive is required. If you have either /dev/random or bc on your
+          machine, Bacula will generate a random password during the
+          configuration process, otherwise it will be left blank and you must
+          manually supply it.
+
+          The password is plain text. It is not generated through any special
+          process but as noted above, it is better to use random text for
+          security reasons.
+        '';
+      };
+
+      monitor = mkOption {
+        type = types.enum [ "no" "yes" ];
+        default = "no";
+        example = "yes";
+        description = lib.mdDoc ''
+          If Monitor is set to `no`, this director will have
+          full access to this Storage daemon. If Monitor is set to
+          `yes`, this director will only be able to fetch the
+          current status of this Storage daemon.
+
+          Please note that if this director is being used by a Monitor, we
+          highly recommend to set this directive to yes to avoid serious
+          security problems.
+        '';
+      };
+    };
+  };
+
+  autochangerOptions = {...}:
+  {
+    options = {
+      changerDevice = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The specified name-string must be the generic SCSI device name of the
+          autochanger that corresponds to the normal read/write Archive Device
+          specified in the Device resource. This generic SCSI device name
+          should be specified if you have an autochanger or if you have a
+          standard tape drive and want to use the Alert Command (see below).
+          For example, on Linux systems, for an Archive Device name of
+          `/dev/nst0`, you would specify
+          `/dev/sg0` for the Changer Device name.  Depending
+          on your exact configuration, and the number of autochangers or the
+          type of autochanger, what you specify here can vary. This directive
+          is optional. See the Using AutochangersAutochangersChapter chapter of
+          this manual for more details of using this and the following
+          autochanger directives.
+          '';
+      };
+
+      changerCommand = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The name-string specifies an external program to be called that will
+          automatically change volumes as required by Bacula. Normally, this
+          directive will be specified only in the AutoChanger resource, which
+          is then used for all devices. However, you may also specify the
+          different Changer Command in each Device resource. Most frequently,
+          you will specify the Bacula supplied mtx-changer script as follows:
+
+          `"/path/mtx-changer %c %o %S %a %d"`
+
+          and you will install the mtx on your system (found in the depkgs
+          release). An example of this command is in the default bacula-sd.conf
+          file. For more details on the substitution characters that may be
+          specified to configure your autochanger please see the
+          AutochangersAutochangersChapter chapter of this manual. For FreeBSD
+          users, you might want to see one of the several chio scripts in
+          examples/autochangers.
+          '';
+        default = "/etc/bacula/mtx-changer %c %o %S %a %d";
+      };
+
+      devices = mkOption {
+        description = lib.mdDoc "";
+        type = types.listOf types.str;
+      };
+
+      extraAutochangerConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Autochanger directive.
+        '';
+        example = ''
+
+        '';
+      };
+    };
+  };
+
+
+  deviceOptions = {...}:
+  {
+    options = {
+      archiveDevice = mkOption {
+        # TODO: required?
+        type = types.str;
+        description = lib.mdDoc ''
+          The specified name-string gives the system file name of the storage
+          device managed by this storage daemon. This will usually be the
+          device file name of a removable storage device (tape drive), for
+          example `/dev/nst0` or
+          `/dev/rmt/0mbn`. For a DVD-writer, it will be for
+          example `/dev/hdc`. It may also be a directory name
+          if you are archiving to disk storage. In this case, you must supply
+          the full absolute path to the directory. When specifying a tape
+          device, it is preferable that the "non-rewind" variant of the device
+          file name be given.
+        '';
+      };
+
+      mediaType = mkOption {
+        # TODO: required?
+        type = types.str;
+        description = lib.mdDoc ''
+          The specified name-string names the type of media supported by this
+          device, for example, `DLT7000`. Media type names are
+          arbitrary in that you set them to anything you want, but they must be
+          known to the volume database to keep track of which storage daemons
+          can read which volumes. In general, each different storage type
+          should have a unique Media Type associated with it. The same
+          name-string must appear in the appropriate Storage resource
+          definition in the Director's configuration file.
+
+          Even though the names you assign are arbitrary (i.e. you choose the
+          name you want), you should take care in specifying them because the
+          Media Type is used to determine which storage device Bacula will
+          select during restore. Thus you should probably use the same Media
+          Type specification for all drives where the Media can be freely
+          interchanged. This is not generally an issue if you have a single
+          Storage daemon, but it is with multiple Storage daemons, especially
+          if they have incompatible media.
+
+          For example, if you specify a Media Type of `DDS-4`
+          then during the restore, Bacula will be able to choose any Storage
+          Daemon that handles `DDS-4`. If you have an
+          autochanger, you might want to name the Media Type in a way that is
+          unique to the autochanger, unless you wish to possibly use the
+          Volumes in other drives. You should also ensure to have unique Media
+          Type names if the Media is not compatible between drives. This
+          specification is required for all devices.
+
+          In addition, if you are using disk storage, each Device resource will
+          generally have a different mount point or directory. In order for
+          Bacula to select the correct Device resource, each one must have a
+          unique Media Type.
+        '';
+      };
+
+      extraDeviceConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Device directive.
+        '';
+        example = ''
+          LabelMedia = yes
+          Random Access = no
+          AutomaticMount = no
+          RemovableMedia = no
+          MaximumOpenWait = 60
+          AlwaysOpen = no
+        '';
+      };
+    };
+  };
+
+in {
+  options = {
+    services.bacula-fd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Bacula File Daemon.
+        '';
+      };
+
+      name = mkOption {
+        default = "${config.networking.hostName}-fd";
+        defaultText = literalExpression ''"''${config.networking.hostName}-fd"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          The client name that must be used by the Director when connecting.
+          Generally, it is a good idea to use a name related to the machine so
+          that error messages can be easily identified if you have multiple
+          Clients. This directive is required.
+        '';
+      };
+
+      port = mkOption {
+        default = 9102;
+        type = types.port;
+        description = lib.mdDoc ''
+          This specifies the port number on which the Client listens for
+          Director connections. It must agree with the FDPort specified in
+          the Client resource of the Director's configuration file.
+        '';
+      };
+
+      director = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines director resources in Bacula File Daemon.
+        '';
+        type = with types; attrsOf (submodule directorOptions);
+      };
+
+      extraClientConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Client directive.
+        '';
+        example = ''
+          Maximum Concurrent Jobs = 20;
+          Heartbeat Interval = 30;
+        '';
+      };
+
+      extraMessagesConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Messages directive.
+        '';
+        example = ''
+          console = all
+        '';
+      };
+    };
+
+    services.bacula-sd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Bacula Storage Daemon.
+        '';
+      };
+
+      name = mkOption {
+        default = "${config.networking.hostName}-sd";
+        defaultText = literalExpression ''"''${config.networking.hostName}-sd"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the Name of the Storage daemon.
+        '';
+      };
+
+      port = mkOption {
+        default = 9103;
+        type = types.port;
+        description = lib.mdDoc ''
+          Specifies port number on which the Storage daemon listens for
+          Director connections.
+        '';
+      };
+
+      director = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines Director resources in Bacula Storage Daemon.
+        '';
+        type = with types; attrsOf (submodule directorOptions);
+      };
+
+      device = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines Device resources in Bacula Storage Daemon.
+        '';
+        type = with types; attrsOf (submodule deviceOptions);
+      };
+
+      autochanger = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines Autochanger resources in Bacula Storage Daemon.
+        '';
+        type = with types; attrsOf (submodule autochangerOptions);
+      };
+
+      extraStorageConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Storage directive.
+        '';
+        example = ''
+          Maximum Concurrent Jobs = 20;
+          Heartbeat Interval = 30;
+        '';
+      };
+
+      extraMessagesConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Messages directive.
+        '';
+        example = ''
+          console = all
+        '';
+      };
+
+    };
+
+    services.bacula-dir = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Bacula Director Daemon.
+        '';
+      };
+
+      name = mkOption {
+        default = "${config.networking.hostName}-dir";
+        defaultText = literalExpression ''"''${config.networking.hostName}-dir"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          The director name used by the system administrator. This directive is
+          required.
+        '';
+      };
+
+      port = mkOption {
+        default = 9101;
+        type = types.port;
+        description = lib.mdDoc ''
+          Specify the port (a positive integer) on which the Director daemon
+          will listen for Bacula Console connections. This same port number
+          must be specified in the Director resource of the Console
+          configuration file. The default is 9101, so normally this directive
+          need not be specified. This directive should not be used if you
+          specify DirAddresses (N.B plural) directive.
+        '';
+      };
+
+      password = mkOption {
+        # TODO: required?
+        type = types.str;
+        description = lib.mdDoc ''
+           Specifies the password that must be supplied for a Director.
+        '';
+      };
+
+      extraMessagesConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Messages directive.
+        '';
+        example = ''
+          console = all
+        '';
+      };
+
+      extraDirectorConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration to be passed in Director directive.
+        '';
+        example = ''
+          Maximum Concurrent Jobs = 20;
+          Heartbeat Interval = 30;
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration for Bacula Director Daemon.
+        '';
+        example = ''
+          TODO
+        '';
+      };
+    };
+  };
+
+  config = mkIf (fd_cfg.enable || sd_cfg.enable || dir_cfg.enable) {
+    systemd.services.bacula-fd = mkIf fd_cfg.enable {
+      after = [ "network.target" ];
+      description = "Bacula File Daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.bacula ];
+      serviceConfig = {
+        ExecStart = "${pkgs.bacula}/sbin/bacula-fd -f -u root -g bacula -c ${fd_conf}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LogsDirectory = "bacula";
+        StateDirectory = "bacula";
+      };
+    };
+
+    systemd.services.bacula-sd = mkIf sd_cfg.enable {
+      after = [ "network.target" ];
+      description = "Bacula Storage Daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.bacula ];
+      serviceConfig = {
+        ExecStart = "${pkgs.bacula}/sbin/bacula-sd -f -u bacula -g bacula -c ${sd_conf}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LogsDirectory = "bacula";
+        StateDirectory = "bacula";
+      };
+    };
+
+    services.postgresql.enable = lib.mkIf dir_cfg.enable true;
+
+    systemd.services.bacula-dir = mkIf dir_cfg.enable {
+      after = [ "network.target" "postgresql.service" ];
+      description = "Bacula Director Daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.bacula ];
+      serviceConfig = {
+        ExecStart = "${pkgs.bacula}/sbin/bacula-dir -f -u bacula -g bacula -c ${dir_conf}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LogsDirectory = "bacula";
+        StateDirectory = "bacula";
+      };
+      preStart = ''
+        if ! test -e "${libDir}/db-created"; then
+            ${pkgs.postgresql}/bin/createuser --no-superuser --no-createdb --no-createrole bacula
+            #${pkgs.postgresql}/bin/createdb --owner bacula bacula
+
+            # populate DB
+            ${pkgs.bacula}/etc/create_bacula_database postgresql
+            ${pkgs.bacula}/etc/make_bacula_tables postgresql
+            ${pkgs.bacula}/etc/grant_bacula_privileges postgresql
+            touch "${libDir}/db-created"
+        else
+            ${pkgs.bacula}/etc/update_bacula_tables postgresql || true
+        fi
+      '';
+    };
+
+    environment.systemPackages = [ pkgs.bacula ];
+
+    users.users.bacula = {
+      group = "bacula";
+      uid = config.ids.uids.bacula;
+      home = "${libDir}";
+      createHome = true;
+      description = "Bacula Daemons user";
+      shell = "${pkgs.bash}/bin/bash";
+    };
+
+    users.groups.bacula.gid = config.ids.gids.bacula;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/borgbackup.md b/nixpkgs/nixos/modules/services/backup/borgbackup.md
new file mode 100644
index 000000000000..39141f6ec858
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/borgbackup.md
@@ -0,0 +1,163 @@
+# BorgBackup {#module-borgbase}
+
+*Source:* {file}`modules/services/backup/borgbackup.nix`
+
+*Upstream documentation:* <https://borgbackup.readthedocs.io/>
+
+[BorgBackup](https://www.borgbackup.org/) (short: Borg)
+is a deduplicating backup program. Optionally, it supports compression and
+authenticated encryption.
+
+The main goal of Borg is to provide an efficient and secure way to backup
+data. The data deduplication technique used makes Borg suitable for daily
+backups since only changes are stored. The authenticated encryption technique
+makes it suitable for backups to not fully trusted targets.
+
+## Configuring {#module-services-backup-borgbackup-configuring}
+
+A complete list of options for the Borgbase module may be found
+[here](#opt-services.borgbackup.jobs).
+
+## Basic usage for a local backup {#opt-services-backup-borgbackup-local-directory}
+
+A very basic configuration for backing up to a locally accessible directory is:
+```
+{
+    opt.services.borgbackup.jobs = {
+      { rootBackup = {
+          paths = "/";
+          exclude = [ "/nix" "/path/to/local/repo" ];
+          repo = "/path/to/local/repo";
+          doInit = true;
+          encryption = {
+            mode = "repokey";
+            passphrase = "secret";
+          };
+          compression = "auto,lzma";
+          startAt = "weekly";
+        };
+      }
+    };
+}
+```
+
+::: {.warning}
+If you do not want the passphrase to be stored in the world-readable
+Nix store, use passCommand. You find an example below.
+:::
+
+## Create a borg backup server {#opt-services-backup-create-server}
+
+You should use a different SSH key for each repository you write to,
+because the specified keys are restricted to running borg serve and can only
+access this single repository. You need the output of the generate pub file.
+
+```ShellSession
+# sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_my_borg_repo
+# cat /run/keys/id_ed25519_my_borg_repo
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos
+```
+
+Add the following snippet to your NixOS configuration:
+```
+{
+  services.borgbackup.repos = {
+    my_borg_repo = {
+      authorizedKeys = [
+        "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAID78zmOyA+5uPG4Ot0hfAy+sLDPU1L4AiIoRYEIVbbQ/ root@nixos"
+      ] ;
+      path = "/var/lib/my_borg_repo" ;
+    };
+  };
+}
+```
+
+## Backup to the borg repository server {#opt-services-backup-borgbackup-remote-server}
+
+The following NixOS snippet creates an hourly backup to the service
+(on the host nixos) as created in the section above. We assume
+that you have stored a secret passphrasse in the file
+{file}`/run/keys/borgbackup_passphrase`, which should be only
+accessible by root
+
+```
+{
+  services.borgbackup.jobs = {
+    backupToLocalServer = {
+      paths = [ "/etc/nixos" ];
+      doInit = true;
+      repo =  "borg@nixos:." ;
+      encryption = {
+        mode = "repokey-blake2";
+        passCommand = "cat /run/keys/borgbackup_passphrase";
+      };
+      environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_my_borg_repo"; };
+      compression = "auto,lzma";
+      startAt = "hourly";
+    };
+  };
+};
+```
+
+The following few commands (run as root) let you test your backup.
+```
+> nixos-rebuild switch
+...restarting the following units: polkit.service
+> systemctl restart borgbackup-job-backupToLocalServer
+> sleep 10
+> systemctl restart borgbackup-job-backupToLocalServer
+> export BORG_PASSPHRASE=topSecrect
+> borg list --rsh='ssh -i /run/keys/id_ed25519_my_borg_repo' borg@nixos:.
+nixos-backupToLocalServer-2020-03-30T21:46:17 Mon, 2020-03-30 21:46:19 [84feb97710954931ca384182f5f3cb90665f35cef214760abd7350fb064786ac]
+nixos-backupToLocalServer-2020-03-30T21:46:30 Mon, 2020-03-30 21:46:32 [e77321694ecd160ca2228611747c6ad1be177d6e0d894538898de7a2621b6e68]
+```
+
+## Backup to a hosting service {#opt-services-backup-borgbackup-borgbase}
+
+Several companies offer [(paid) hosting services](https://www.borgbackup.org/support/commercial.html)
+for Borg repositories.
+
+To backup your home directory to borgbase you have to:
+
+  - Generate a SSH key without a password, to access the remote server. E.g.
+
+        sudo ssh-keygen -N '' -t ed25519 -f /run/keys/id_ed25519_borgbase
+
+  - Create the repository on the server by following the instructions for your
+    hosting server.
+  - Initialize the repository on the server. Eg.
+
+        sudo borg init --encryption=repokey-blake2  \
+            --rsh "ssh -i /run/keys/id_ed25519_borgbase" \
+            zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo
+
+  - Add it to your NixOS configuration, e.g.
+
+        {
+            services.borgbackup.jobs = {
+            my_Remote_Backup = {
+                paths = [ "/" ];
+                exclude = [ "/nix" "'**/.cache'" ];
+                repo =  "zzz2aaaaa@zzz2aaaaa.repo.borgbase.com:repo";
+                  encryption = {
+                  mode = "repokey-blake2";
+                  passCommand = "cat /run/keys/borgbackup_passphrase";
+                };
+                environment = { BORG_RSH = "ssh -i /run/keys/id_ed25519_borgbase"; };
+                compression = "auto,lzma";
+                startAt = "daily";
+            };
+          };
+        }}
+
+## Vorta backup client for the desktop {#opt-services-backup-borgbackup-vorta}
+
+Vorta is a backup client for macOS and Linux desktops. It integrates the
+mighty BorgBackup with your desktop environment to protect your data from
+disk failure, ransomware and theft.
+
+It can be installed in NixOS e.g. by adding `pkgs.vorta`
+to [](#opt-environment.systemPackages).
+
+Details about using Vorta can be found under
+[https://vorta.borgbase.com](https://vorta.borgbase.com/usage) .
diff --git a/nixpkgs/nixos/modules/services/backup/borgbackup.nix b/nixpkgs/nixos/modules/services/backup/borgbackup.nix
new file mode 100644
index 000000000000..28887f8e2ad5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/borgbackup.nix
@@ -0,0 +1,777 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  isLocalPath = x:
+    builtins.substring 0 1 x == "/"      # absolute path
+    || builtins.substring 0 1 x == "."   # relative path
+    || builtins.match "[.*:.*]" == null; # not machine:path
+
+  mkExcludeFile = cfg:
+    # Write each exclude pattern to a new line
+    pkgs.writeText "excludefile" (concatMapStrings (s: s + "\n") cfg.exclude);
+
+  mkPatternsFile = cfg:
+    # Write each pattern to a new line
+    pkgs.writeText "patternsfile" (concatMapStrings (s: s + "\n") cfg.patterns);
+
+  mkKeepArgs = cfg:
+    # If cfg.prune.keep e.g. has a yearly attribute,
+    # its content is passed on as --keep-yearly
+    concatStringsSep " "
+      (mapAttrsToList (x: y: "--keep-${x}=${toString y}") cfg.prune.keep);
+
+  mkBackupScript = name: cfg: pkgs.writeShellScript "${name}-script" (''
+    set -e
+    on_exit()
+    {
+      exitStatus=$?
+      ${cfg.postHook}
+      exit $exitStatus
+    }
+    trap on_exit EXIT
+
+    archiveName="${optionalString (cfg.archiveBaseName != null) (cfg.archiveBaseName + "-")}$(date ${cfg.dateFormat})"
+    archiveSuffix="${optionalString cfg.appendFailedSuffix ".failed"}"
+    ${cfg.preHook}
+  '' + optionalString cfg.doInit ''
+    # Run borg init if the repo doesn't exist yet
+    if ! borg list $extraArgs > /dev/null; then
+      borg init $extraArgs \
+        --encryption ${cfg.encryption.mode} \
+        $extraInitArgs
+      ${cfg.postInit}
+    fi
+  '' + ''
+    (
+      set -o pipefail
+      ${optionalString (cfg.dumpCommand != null) ''${escapeShellArg cfg.dumpCommand} | \''}
+      borg create $extraArgs \
+        --compression ${cfg.compression} \
+        --exclude-from ${mkExcludeFile cfg} \
+        --patterns-from ${mkPatternsFile cfg} \
+        $extraCreateArgs \
+        "::$archiveName$archiveSuffix" \
+        ${if cfg.paths == null then "-" else escapeShellArgs cfg.paths}
+    )
+  '' + optionalString cfg.appendFailedSuffix ''
+    borg rename $extraArgs \
+      "::$archiveName$archiveSuffix" "$archiveName"
+  '' + ''
+    ${cfg.postCreate}
+  '' + optionalString (cfg.prune.keep != { }) ''
+    borg prune $extraArgs \
+      ${mkKeepArgs cfg} \
+      ${optionalString (cfg.prune.prefix != null) "--glob-archives ${escapeShellArg "${cfg.prune.prefix}*"}"} \
+      $extraPruneArgs
+    borg compact $extraArgs $extraCompactArgs
+    ${cfg.postPrune}
+  '');
+
+  mkPassEnv = cfg: with cfg.encryption;
+    if passCommand != null then
+      { BORG_PASSCOMMAND = passCommand; }
+    else if passphrase != null then
+      { BORG_PASSPHRASE = passphrase; }
+    else { };
+
+  mkBackupService = name: cfg:
+    let
+      userHome = config.users.users.${cfg.user}.home;
+      backupJobName = "borgbackup-job-${name}";
+      backupScript = mkBackupScript backupJobName cfg;
+    in nameValuePair backupJobName {
+      description = "BorgBackup job ${name}";
+      path =  [
+        config.services.borgbackup.package pkgs.openssh
+      ];
+      script = "exec " + optionalString cfg.inhibitsSleep ''\
+        ${pkgs.systemd}/bin/systemd-inhibit \
+            --who="borgbackup" \
+            --what="sleep" \
+            --why="Scheduled backup" \
+        '' + backupScript;
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        # Only run when no other process is using CPU or disk
+        CPUSchedulingPolicy = "idle";
+        IOSchedulingClass = "idle";
+        ProtectSystem = "strict";
+        ReadWritePaths =
+          [ "${userHome}/.config/borg" "${userHome}/.cache/borg" ]
+          ++ cfg.readWritePaths
+          # Borg needs write access to repo if it is not remote
+          ++ optional (isLocalPath cfg.repo) cfg.repo;
+        PrivateTmp = cfg.privateTmp;
+      };
+      environment = {
+        BORG_REPO = cfg.repo;
+        inherit (cfg) extraArgs extraInitArgs extraCreateArgs extraPruneArgs extraCompactArgs;
+      } // (mkPassEnv cfg) // cfg.environment;
+    };
+
+  mkBackupTimers = name: cfg:
+    nameValuePair "borgbackup-job-${name}" {
+      description = "BorgBackup job ${name} timer";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        Persistent = cfg.persistentTimer;
+        OnCalendar = cfg.startAt;
+      };
+      # if remote-backup wait for network
+      after = optional (cfg.persistentTimer && !isLocalPath cfg.repo) "network-online.target";
+    };
+
+  # utility function around makeWrapper
+  mkWrapperDrv = {
+      original, name, set ? {}
+    }:
+    pkgs.runCommand "${name}-wrapper" {
+      nativeBuildInputs = [ pkgs.makeWrapper ];
+    } (with lib; ''
+      makeWrapper "${original}" "$out/bin/${name}" \
+        ${concatStringsSep " \\\n " (mapAttrsToList (name: value: ''--set ${name} "${value}"'') set)}
+    '');
+
+  mkBorgWrapper = name: cfg: mkWrapperDrv {
+    original = getExe config.services.borgbackup.package;
+    name = "borg-job-${name}";
+    set = { BORG_REPO = cfg.repo; } // (mkPassEnv cfg) // cfg.environment;
+  };
+
+  # Paths listed in ReadWritePaths must exist before service is started
+  mkActivationScript = name: cfg:
+    let
+      install = "install -o ${cfg.user} -g ${cfg.group}";
+    in
+      nameValuePair "borgbackup-job-${name}" (stringAfter [ "users" ] (''
+        # Ensure that the home directory already exists
+        # We can't assert createHome == true because that's not the case for root
+        cd "${config.users.users.${cfg.user}.home}"
+        # Create each directory separately to prevent root owned parent dirs
+        ${install} -d .config .config/borg
+        ${install} -d .cache .cache/borg
+      '' + optionalString (isLocalPath cfg.repo && !cfg.removableDevice) ''
+        ${install} -d ${escapeShellArg cfg.repo}
+      ''));
+
+  mkPassAssertion = name: cfg: {
+    assertion = with cfg.encryption;
+      mode != "none" -> passCommand != null || passphrase != null;
+    message =
+      "passCommand or passphrase has to be specified because"
+      + '' borgbackup.jobs.${name}.encryption != "none"'';
+  };
+
+  mkRepoService = name: cfg:
+    nameValuePair "borgbackup-repo-${name}" {
+      description = "Create BorgBackup repository ${name} directory";
+      script = ''
+        mkdir -p ${escapeShellArg cfg.path}
+        chown ${cfg.user}:${cfg.group} ${escapeShellArg cfg.path}
+      '';
+      serviceConfig = {
+        # The service's only task is to ensure that the specified path exists
+        Type = "oneshot";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+  mkAuthorizedKey = cfg: appendOnly: key:
+    let
+      # Because of the following line, clients do not need to specify an absolute repo path
+      cdCommand = "cd ${escapeShellArg cfg.path}";
+      restrictedArg = "--restrict-to-${if cfg.allowSubRepos then "path" else "repository"} .";
+      appendOnlyArg = optionalString appendOnly "--append-only";
+      quotaArg = optionalString (cfg.quota != null) "--storage-quota ${cfg.quota}";
+      serveCommand = "borg serve ${restrictedArg} ${appendOnlyArg} ${quotaArg}";
+    in
+      ''command="${cdCommand} && ${serveCommand}",restrict ${key}'';
+
+  mkUsersConfig = name: cfg: {
+    users.${cfg.user} = {
+      openssh.authorizedKeys.keys =
+        (map (mkAuthorizedKey cfg false) cfg.authorizedKeys
+        ++ map (mkAuthorizedKey cfg true) cfg.authorizedKeysAppendOnly);
+      useDefaultShell = true;
+      group = cfg.group;
+      isSystemUser = true;
+    };
+    groups.${cfg.group} = { };
+  };
+
+  mkKeysAssertion = name: cfg: {
+    assertion = cfg.authorizedKeys != [ ] || cfg.authorizedKeysAppendOnly != [ ];
+    message =
+      "borgbackup.repos.${name} does not make sense"
+      + " without at least one public key";
+  };
+
+  mkSourceAssertions = name: cfg: {
+    assertion = count isNull [ cfg.dumpCommand cfg.paths ] == 1;
+    message = ''
+      Exactly one of borgbackup.jobs.${name}.paths or borgbackup.jobs.${name}.dumpCommand
+      must be set.
+    '';
+  };
+
+  mkRemovableDeviceAssertions = name: cfg: {
+    assertion = !(isLocalPath cfg.repo) -> !cfg.removableDevice;
+    message = ''
+      borgbackup.repos.${name}: repo isn't a local path, thus it can't be a removable device!
+    '';
+  };
+
+in {
+  meta.maintainers = with maintainers; [ dotlambda ];
+  meta.doc = ./borgbackup.md;
+
+  ###### interface
+
+  options.services.borgbackup.package = mkPackageOptionMD pkgs "borgbackup" { };
+
+  options.services.borgbackup.jobs = mkOption {
+    description = lib.mdDoc ''
+      Deduplicating backups using BorgBackup.
+      Adding a job will cause a borg-job-NAME wrapper to be added
+      to your system path, so that you can perform maintenance easily.
+      See also the chapter about BorgBackup in the NixOS manual.
+    '';
+    default = { };
+    example = literalExpression ''
+      { # for a local backup
+        rootBackup = {
+          paths = "/";
+          exclude = [ "/nix" ];
+          repo = "/path/to/local/repo";
+          encryption = {
+            mode = "repokey";
+            passphrase = "secret";
+          };
+          compression = "auto,lzma";
+          startAt = "weekly";
+        };
+      }
+      { # Root backing each day up to a remote backup server. We assume that you have
+        #   * created a password less key: ssh-keygen -N "" -t ed25519 -f /path/to/ssh_key
+        #     best practices are: use -t ed25519, /path/to = /run/keys
+        #   * the passphrase is in the file /run/keys/borgbackup_passphrase
+        #   * you have initialized the repository manually
+        paths = [ "/etc" "/home" ];
+        exclude = [ "/nix" "'**/.cache'" ];
+        doInit = false;
+        repo =  "user3@arep.repo.borgbase.com:repo";
+        encryption = {
+          mode = "repokey-blake2";
+          passCommand = "cat /path/to/passphrase";
+        };
+        environment = { BORG_RSH = "ssh -i /path/to/ssh_key"; };
+        compression = "auto,lzma";
+        startAt = "daily";
+    };
+    '';
+    type = types.attrsOf (types.submodule (let globalConfig = config; in
+      { name, config, ... }: {
+        options = {
+
+          paths = mkOption {
+            type = with types; nullOr (coercedTo str lib.singleton (listOf str));
+            default = null;
+            description = lib.mdDoc ''
+              Path(s) to back up.
+              Mutually exclusive with {option}`dumpCommand`.
+            '';
+            example = "/home/user";
+          };
+
+          dumpCommand = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              Backup the stdout of this program instead of filesystem paths.
+              Mutually exclusive with {option}`paths`.
+            '';
+            example = "/path/to/createZFSsend.sh";
+          };
+
+          repo = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Remote or local repository to back up to.";
+            example = "user@machine:/path/to/repo";
+          };
+
+          removableDevice = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether the repo (which must be local) is a removable device.";
+          };
+
+          archiveBaseName = mkOption {
+            type = types.nullOr (types.strMatching "[^/{}]+");
+            default = "${globalConfig.networking.hostName}-${name}";
+            defaultText = literalExpression ''"''${config.networking.hostName}-<name>"'';
+            description = lib.mdDoc ''
+              How to name the created archives. A timestamp, whose format is
+              determined by {option}`dateFormat`, will be appended. The full
+              name can be modified at runtime (`$archiveName`).
+              Placeholders like `{hostname}` must not be used.
+              Use `null` for no base name.
+            '';
+          };
+
+          dateFormat = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Arguments passed to {command}`date`
+              to create a timestamp suffix for the archive name.
+            '';
+            default = "+%Y-%m-%dT%H:%M:%S";
+            example = "-u +%s";
+          };
+
+          startAt = mkOption {
+            type = with types; either str (listOf str);
+            default = "daily";
+            description = lib.mdDoc ''
+              When or how often the backup should run.
+              Must be in the format described in
+              {manpage}`systemd.time(7)`.
+              If you do not want the backup to start
+              automatically, use `[ ]`.
+              It will generate a systemd service borgbackup-job-NAME.
+              You may trigger it manually via systemctl restart borgbackup-job-NAME.
+            '';
+          };
+
+          persistentTimer = mkOption {
+            default = false;
+            type = types.bool;
+            example = true;
+            description = lib.mdDoc ''
+              Set the `persistentTimer` option for the
+              {manpage}`systemd.timer(5)`
+              which triggers the backup immediately if the last trigger
+              was missed (e.g. if the system was powered down).
+            '';
+          };
+
+          inhibitsSleep = mkOption {
+            default = false;
+            type = types.bool;
+            example = true;
+            description = lib.mdDoc ''
+              Prevents the system from sleeping while backing up.
+            '';
+          };
+
+          user = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              The user {command}`borg` is run as.
+              User or group need read permission
+              for the specified {option}`paths`.
+            '';
+            default = "root";
+          };
+
+          group = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              The group borg is run as. User or group needs read permission
+              for the specified {option}`paths`.
+            '';
+            default = "root";
+          };
+
+          encryption.mode = mkOption {
+            type = types.enum [
+              "repokey" "keyfile"
+              "repokey-blake2" "keyfile-blake2"
+              "authenticated" "authenticated-blake2"
+              "none"
+            ];
+            description = lib.mdDoc ''
+              Encryption mode to use. Setting a mode
+              other than `"none"` requires
+              you to specify a {option}`passCommand`
+              or a {option}`passphrase`.
+            '';
+            example = "repokey-blake2";
+          };
+
+          encryption.passCommand = mkOption {
+            type = with types; nullOr str;
+            description = lib.mdDoc ''
+              A command which prints the passphrase to stdout.
+              Mutually exclusive with {option}`passphrase`.
+            '';
+            default = null;
+            example = "cat /path/to/passphrase_file";
+          };
+
+          encryption.passphrase = mkOption {
+            type = with types; nullOr str;
+            description = lib.mdDoc ''
+              The passphrase the backups are encrypted with.
+              Mutually exclusive with {option}`passCommand`.
+              If you do not want the passphrase to be stored in the
+              world-readable Nix store, use {option}`passCommand`.
+            '';
+            default = null;
+          };
+
+          compression = mkOption {
+            # "auto" is optional,
+            # compression mode must be given,
+            # compression level is optional
+            type = types.strMatching "none|(auto,)?(lz4|zstd|zlib|lzma)(,[[:digit:]]{1,2})?";
+            description = lib.mdDoc ''
+              Compression method to use. Refer to
+              {command}`borg help compression`
+              for all available options.
+            '';
+            default = "lz4";
+            example = "auto,lzma";
+          };
+
+          exclude = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              Exclude paths matching any of the given patterns. See
+              {command}`borg help patterns` for pattern syntax.
+            '';
+            default = [ ];
+            example = [
+              "/home/*/.cache"
+              "/nix"
+            ];
+          };
+
+          patterns = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              Include/exclude paths matching the given patterns. The first
+              matching patterns is used, so if an include pattern (prefix `+`)
+              matches before an exclude pattern (prefix `-`), the file is
+              backed up. See [{command}`borg help patterns`](https://borgbackup.readthedocs.io/en/stable/usage/help.html#borg-patterns) for pattern syntax.
+            '';
+            default = [ ];
+            example = [
+              "+ /home/susan"
+              "- /home/*"
+            ];
+          };
+
+          readWritePaths = mkOption {
+            type = with types; listOf path;
+            description = lib.mdDoc ''
+              By default, borg cannot write anywhere on the system but
+              `$HOME/.config/borg` and `$HOME/.cache/borg`.
+              If, for example, your preHook script needs to dump files
+              somewhere, put those directories here.
+            '';
+            default = [ ];
+            example = [
+              "/var/backup/mysqldump"
+            ];
+          };
+
+          privateTmp = mkOption {
+            type = types.bool;
+            description = lib.mdDoc ''
+              Set the `PrivateTmp` option for
+              the systemd-service. Set to false if you need sockets
+              or other files from global /tmp.
+            '';
+            default = true;
+          };
+
+          doInit = mkOption {
+            type = types.bool;
+            description = lib.mdDoc ''
+              Run {command}`borg init` if the
+              specified {option}`repo` does not exist.
+              You should set this to `false`
+              if the repository is located on an external drive
+              that might not always be mounted.
+            '';
+            default = true;
+          };
+
+          appendFailedSuffix = mkOption {
+            type = types.bool;
+            description = lib.mdDoc ''
+              Append a `.failed` suffix
+              to the archive name, which is only removed if
+              {command}`borg create` has a zero exit status.
+            '';
+            default = true;
+          };
+
+          prune.keep = mkOption {
+            # Specifying e.g. `prune.keep.yearly = -1`
+            # means there is no limit of yearly archives to keep
+            # The regex is for use with e.g. --keep-within 1y
+            type = with types; attrsOf (either int (strMatching "[[:digit:]]+[Hdwmy]"));
+            description = lib.mdDoc ''
+              Prune a repository by deleting all archives not matching any of the
+              specified retention options. See {command}`borg help prune`
+              for the available options.
+            '';
+            default = { };
+            example = literalExpression ''
+              {
+                within = "1d"; # Keep all archives from the last day
+                daily = 7;
+                weekly = 4;
+                monthly = -1;  # Keep at least one archive for each month
+              }
+            '';
+          };
+
+          prune.prefix = mkOption {
+            type = types.nullOr (types.str);
+            description = lib.mdDoc ''
+              Only consider archive names starting with this prefix for pruning.
+              By default, only archives created by this job are considered.
+              Use `""` or `null` to consider all archives.
+            '';
+            default = config.archiveBaseName;
+            defaultText = literalExpression "archiveBaseName";
+          };
+
+          environment = mkOption {
+            type = with types; attrsOf str;
+            description = lib.mdDoc ''
+              Environment variables passed to the backup script.
+              You can for example specify which SSH key to use.
+            '';
+            default = { };
+            example = { BORG_RSH = "ssh -i /path/to/key"; };
+          };
+
+          preHook = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands to run before the backup.
+              This can for example be used to mount file systems.
+            '';
+            default = "";
+            example = ''
+              # To add excluded paths at runtime
+              extraCreateArgs="$extraCreateArgs --exclude /some/path"
+            '';
+          };
+
+          postInit = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands to run after {command}`borg init`.
+            '';
+            default = "";
+          };
+
+          postCreate = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands to run after {command}`borg create`. The name
+              of the created archive is stored in `$archiveName`.
+            '';
+            default = "";
+          };
+
+          postPrune = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands to run after {command}`borg prune`.
+            '';
+            default = "";
+          };
+
+          postHook = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands to run just before exit. They are executed
+              even if a previous command exits with a non-zero exit code.
+              The latter is available as `$exitStatus`.
+            '';
+            default = "";
+          };
+
+          extraArgs = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Additional arguments for all {command}`borg` calls the
+              service has. Handle with care.
+            '';
+            default = "";
+            example = "--remote-path=/path/to/borg";
+          };
+
+          extraInitArgs = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Additional arguments for {command}`borg init`.
+              Can also be set at runtime using `$extraInitArgs`.
+            '';
+            default = "";
+            example = "--append-only";
+          };
+
+          extraCreateArgs = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Additional arguments for {command}`borg create`.
+              Can also be set at runtime using `$extraCreateArgs`.
+            '';
+            default = "";
+            example = "--stats --checkpoint-interval 600";
+          };
+
+          extraPruneArgs = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Additional arguments for {command}`borg prune`.
+              Can also be set at runtime using `$extraPruneArgs`.
+            '';
+            default = "";
+            example = "--save-space";
+          };
+
+          extraCompactArgs = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Additional arguments for {command}`borg compact`.
+              Can also be set at runtime using `$extraCompactArgs`.
+            '';
+            default = "";
+            example = "--cleanup-commits";
+          };
+        };
+      }
+    ));
+  };
+
+  options.services.borgbackup.repos = mkOption {
+    description = lib.mdDoc ''
+      Serve BorgBackup repositories to given public SSH keys,
+      restricting their access to the repository only.
+      See also the chapter about BorgBackup in the NixOS manual.
+      Also, clients do not need to specify the absolute path when accessing the repository,
+      i.e. `user@machine:.` is enough. (Note colon and dot.)
+    '';
+    default = { };
+    type = types.attrsOf (types.submodule (
+      { ... }: {
+        options = {
+          path = mkOption {
+            type = types.path;
+            description = lib.mdDoc ''
+              Where to store the backups. Note that the directory
+              is created automatically, with correct permissions.
+            '';
+            default = "/var/lib/borgbackup";
+          };
+
+          user = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              The user {command}`borg serve` is run as.
+              User or group needs write permission
+              for the specified {option}`path`.
+            '';
+            default = "borg";
+          };
+
+          group = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              The group {command}`borg serve` is run as.
+              User or group needs write permission
+              for the specified {option}`path`.
+            '';
+            default = "borg";
+          };
+
+          authorizedKeys = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              Public SSH keys that are given full write access to this repository.
+              You should use a different SSH key for each repository you write to, because
+              the specified keys are restricted to running {command}`borg serve`
+              and can only access this single repository.
+            '';
+            default = [ ];
+          };
+
+          authorizedKeysAppendOnly = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              Public SSH keys that can only be used to append new data (archives) to the repository.
+              Note that archives can still be marked as deleted and are subsequently removed from disk
+              upon accessing the repo with full write access, e.g. when pruning.
+            '';
+            default = [ ];
+          };
+
+          allowSubRepos = mkOption {
+            type = types.bool;
+            description = lib.mdDoc ''
+              Allow clients to create repositories in subdirectories of the
+              specified {option}`path`. These can be accessed using
+              `user@machine:path/to/subrepo`. Note that a
+              {option}`quota` applies to repositories independently.
+              Therefore, if this is enabled, clients can create multiple
+              repositories and upload an arbitrary amount of data.
+            '';
+            default = false;
+          };
+
+          quota = mkOption {
+            # See the definition of parse_file_size() in src/borg/helpers/parseformat.py
+            type = with types; nullOr (strMatching "[[:digit:].]+[KMGTP]?");
+            description = lib.mdDoc ''
+              Storage quota for the repository. This quota is ensured for all
+              sub-repositories if {option}`allowSubRepos` is enabled
+              but not for the overall storage space used.
+            '';
+            default = null;
+            example = "100G";
+          };
+
+        };
+      }
+    ));
+  };
+
+  ###### implementation
+
+  config = mkIf (with config.services.borgbackup; jobs != { } || repos != { })
+    (with config.services.borgbackup; {
+      assertions =
+        mapAttrsToList mkPassAssertion jobs
+        ++ mapAttrsToList mkKeysAssertion repos
+        ++ mapAttrsToList mkSourceAssertions jobs
+        ++ mapAttrsToList mkRemovableDeviceAssertions jobs;
+
+      system.activationScripts = mapAttrs' mkActivationScript jobs;
+
+      systemd.services =
+        # A job named "foo" is mapped to systemd.services.borgbackup-job-foo
+        mapAttrs' mkBackupService jobs
+        # A repo named "foo" is mapped to systemd.services.borgbackup-repo-foo
+        // mapAttrs' mkRepoService repos;
+
+      # A job named "foo" is mapped to systemd.timers.borgbackup-job-foo
+      # only generate the timer if interval (startAt) is set
+      systemd.timers = mapAttrs' mkBackupTimers (filterAttrs (_: cfg: cfg.startAt != []) jobs);
+
+      users = mkMerge (mapAttrsToList mkUsersConfig repos);
+
+      environment.systemPackages =
+        [ config.services.borgbackup.package ] ++ (mapAttrsToList mkBorgWrapper jobs);
+    });
+}
diff --git a/nixpkgs/nixos/modules/services/backup/borgmatic.nix b/nixpkgs/nixos/modules/services/backup/borgmatic.nix
new file mode 100644
index 000000000000..b27dd2817120
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/borgmatic.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.borgmatic;
+  settingsFormat = pkgs.formats.yaml { };
+
+  repository = with types; submodule {
+    options = {
+      path = mkOption {
+        type = str;
+        description = mdDoc ''
+          Path to the repository
+        '';
+      };
+      label = mkOption {
+        type = str;
+        description = mdDoc ''
+          Label to the repository
+        '';
+      };
+    };
+  };
+  cfgType = with types; submodule {
+    freeformType = settingsFormat.type;
+    options = {
+      source_directories = mkOption {
+        type = nullOr (listOf str);
+        default = null;
+        description = mdDoc ''
+          List of source directories and files to backup. Globs and tildes are
+          expanded. Do not backslash spaces in path names.
+        '';
+        example = [ "/home" "/etc" "/var/log/syslog*" "/home/user/path with spaces" ];
+      };
+      repositories = mkOption {
+        type = nullOr (listOf repository);
+        default = null;
+        description = mdDoc ''
+          A required list of local or remote repositories with paths and
+          optional labels (which can be used with the --repository flag to
+          select a repository). Tildes are expanded. Multiple repositories are
+          backed up to in sequence. Borg placeholders can be used. See the
+          output of "borg help placeholders" for details. See ssh_command for
+          SSH options like identity file or port. If systemd service is used,
+          then add local repository paths in the systemd service file to the
+          ReadWritePaths list.
+        '';
+        example = [
+          { path="ssh://user@backupserver/./sourcehostname.borg"; label="backupserver"; }
+          { path="/mnt/backup"; label="local"; }
+        ];
+      };
+    };
+  };
+
+  cfgfile = settingsFormat.generate "config.yaml" cfg.settings;
+in
+{
+  options.services.borgmatic = {
+    enable = mkEnableOption (mdDoc "borgmatic");
+
+    settings = mkOption {
+      description = mdDoc ''
+        See https://torsion.org/borgmatic/docs/reference/configuration/
+      '';
+      default = null;
+      type = types.nullOr cfgType;
+    };
+
+    configurations = mkOption {
+      description = mdDoc ''
+        Set of borgmatic configurations, see https://torsion.org/borgmatic/docs/reference/configuration/
+      '';
+      default = { };
+      type = types.attrsOf cfgType;
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    warnings = []
+      ++ optional (cfg.settings != null && cfg.settings ? location)
+        "`services.borgmatic.settings.location` is deprecated, please move your options out of sections to the global scope"
+      ++ optional (catAttrs "location" (attrValues cfg.configurations) != [])
+        "`services.borgmatic.configurations.<name>.location` is deprecated, please move your options out of sections to the global scope"
+    ;
+
+    environment.systemPackages = [ pkgs.borgmatic ];
+
+    environment.etc = (optionalAttrs (cfg.settings != null) { "borgmatic/config.yaml".source = cfgfile; }) //
+      mapAttrs'
+        (name: value: nameValuePair
+          "borgmatic.d/${name}.yaml"
+          { source = settingsFormat.generate "${name}.yaml" value; })
+        cfg.configurations;
+
+    systemd.packages = [ pkgs.borgmatic ];
+
+    # Workaround: https://github.com/NixOS/nixpkgs/issues/81138
+    systemd.timers.borgmatic.wantedBy = [ "timers.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/btrbk.nix b/nixpkgs/nixos/modules/services/backup/btrbk.nix
new file mode 100644
index 000000000000..9b7f1566eb1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/btrbk.nix
@@ -0,0 +1,273 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib)
+    concatLists
+    concatMap
+    concatMapStringsSep
+    concatStringsSep
+    filterAttrs
+    isAttrs
+    literalExpression
+    mapAttrs'
+    mapAttrsToList
+    mkIf
+    mkOption
+    optionalString
+    sort
+    types
+    ;
+
+  # The priority of an option or section.
+  # The configurations format are order-sensitive. Pairs are added as children of
+  # the last sections if possible, otherwise, they start a new section.
+  # We sort them in topological order:
+  # 1. Leaf pairs.
+  # 2. Sections that may contain (1).
+  # 3. Sections that may contain (1) or (2).
+  # 4. Etc.
+  prioOf = { name, value }:
+    if !isAttrs value then 0 # Leaf options.
+    else {
+      target = 1; # Contains: options.
+      subvolume = 2; # Contains: options, target.
+      volume = 3; # Contains: options, target, subvolume.
+    }.${name} or (throw "Unknow section '${name}'");
+
+  genConfig' = set: concatStringsSep "\n" (genConfig set);
+  genConfig = set:
+    let
+      pairs = mapAttrsToList (name: value: { inherit name value; }) set;
+      sortedPairs = sort (a: b: prioOf a < prioOf b) pairs;
+    in
+      concatMap genPair sortedPairs;
+  genSection = sec: secName: value:
+    [ "${sec} ${secName}" ] ++ map (x: " " + x) (genConfig value);
+  genPair = { name, value }:
+    if !isAttrs value
+    then [ "${name} ${value}" ]
+    else concatLists (mapAttrsToList (genSection name) value);
+
+  sudo_doas =
+    if config.security.sudo.enable then "sudo"
+    else if config.security.doas.enable then "doas"
+    else throw "The btrbk nixos module needs either sudo or doas enabled in the configuration";
+
+  addDefaults = settings: { backend = "btrfs-progs-${sudo_doas}"; } // settings;
+
+  mkConfigFile = name: settings: pkgs.writeTextFile {
+    name = "btrbk-${name}.conf";
+    text = genConfig' (addDefaults settings);
+    checkPhase = ''
+      set +e
+      ${pkgs.btrbk}/bin/btrbk -c $out dryrun
+      # According to btrbk(1), exit status 2 means parse error
+      # for CLI options or the config file.
+      if [[ $? == 2 ]]; then
+        echo "Btrbk configuration is invalid:"
+        cat $out
+        exit 1
+      fi
+      set -e
+    '';
+  };
+
+  cfg = config.services.btrbk;
+  sshEnabled = cfg.sshAccess != [ ];
+  serviceEnabled = cfg.instances != { };
+in
+{
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  options = {
+    services.btrbk = {
+      extraPackages = mkOption {
+        description = lib.mdDoc "Extra packages for btrbk, like compression utilities for `stream_compress`";
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExpression "[ pkgs.xz ]";
+      };
+      niceness = mkOption {
+        description = lib.mdDoc "Niceness for local instances of btrbk. Also applies to remote ones connecting via ssh when positive.";
+        type = types.ints.between (-20) 19;
+        default = 10;
+      };
+      ioSchedulingClass = mkOption {
+        description = lib.mdDoc "IO scheduling class for btrbk (see ionice(1) for a quick description). Applies to local instances, and remote ones connecting by ssh if set to idle.";
+        type = types.enum [ "idle" "best-effort" "realtime" ];
+        default = "best-effort";
+      };
+      instances = mkOption {
+        description = lib.mdDoc "Set of btrbk instances. The instance named `btrbk` is the default one.";
+        type = with types;
+          attrsOf (
+            submodule {
+              options = {
+                onCalendar = mkOption {
+                  type = types.nullOr types.str;
+                  default = "daily";
+                  description = lib.mdDoc ''
+                    How often this btrbk instance is started. See systemd.time(7) for more information about the format.
+                    Setting it to null disables the timer, thus this instance can only be started manually.
+                  '';
+                };
+                settings = mkOption {
+                  type = let t = types.attrsOf (types.either types.str (t // { description = "instances of this type recursively"; })); in t;
+                  default = { };
+                  example = {
+                    snapshot_preserve_min = "2d";
+                    snapshot_preserve = "14d";
+                    volume = {
+                      "/mnt/btr_pool" = {
+                        target = "/mnt/btr_backup/mylaptop";
+                        subvolume = {
+                          "rootfs" = { };
+                          "home" = { snapshot_create = "always"; };
+                        };
+                      };
+                    };
+                  };
+                  description = lib.mdDoc "configuration options for btrbk. Nested attrsets translate to subsections.";
+                };
+              };
+            }
+          );
+        default = { };
+      };
+      sshAccess = mkOption {
+        description = lib.mdDoc "SSH keys that should be able to make or push snapshots on this system remotely with btrbk";
+        type = with types; listOf (
+          submodule {
+            options = {
+              key = mkOption {
+                type = str;
+                description = lib.mdDoc "SSH public key allowed to login as user `btrbk` to run remote backups.";
+              };
+              roles = mkOption {
+                type = listOf (enum [ "info" "source" "target" "delete" "snapshot" "send" "receive" ]);
+                example = [ "source" "info" "send" ];
+                description = lib.mdDoc "What actions can be performed with this SSH key. See ssh_filter_btrbk(1) for details";
+              };
+            };
+          }
+        );
+        default = [ ];
+      };
+    };
+
+  };
+  config = mkIf (sshEnabled || serviceEnabled) {
+    environment.systemPackages = [ pkgs.btrbk ] ++ cfg.extraPackages;
+    security.sudo = mkIf (sudo_doas == "sudo") {
+      extraRules = [
+        {
+            users = [ "btrbk" ];
+            commands = [
+            { command = "${pkgs.btrfs-progs}/bin/btrfs"; options = [ "NOPASSWD" ]; }
+            { command = "${pkgs.coreutils}/bin/mkdir"; options = [ "NOPASSWD" ]; }
+            { command = "${pkgs.coreutils}/bin/readlink"; options = [ "NOPASSWD" ]; }
+            # for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
+            { command = "/run/current-system/sw/bin/btrfs"; options = [ "NOPASSWD" ]; }
+            { command = "/run/current-system/sw/bin/mkdir"; options = [ "NOPASSWD" ]; }
+            { command = "/run/current-system/sw/bin/readlink"; options = [ "NOPASSWD" ]; }
+            ];
+        }
+      ];
+    };
+    security.doas = mkIf (sudo_doas == "doas") {
+      extraRules = let
+        doasCmdNoPass = cmd: { users = [ "btrbk" ]; cmd = cmd; noPass = true; };
+      in
+        [
+            (doasCmdNoPass "${pkgs.btrfs-progs}/bin/btrfs")
+            (doasCmdNoPass "${pkgs.coreutils}/bin/mkdir")
+            (doasCmdNoPass "${pkgs.coreutils}/bin/readlink")
+            # for ssh, they are not the same than the one hard coded in ${pkgs.btrbk}
+            (doasCmdNoPass "/run/current-system/sw/bin/btrfs")
+            (doasCmdNoPass "/run/current-system/sw/bin/mkdir")
+            (doasCmdNoPass "/run/current-system/sw/bin/readlink")
+
+            # doas matches command, not binary
+            (doasCmdNoPass "btrfs")
+            (doasCmdNoPass "mkdir")
+            (doasCmdNoPass "readlink")
+        ];
+    };
+    users.users.btrbk = {
+      isSystemUser = true;
+      # ssh needs a home directory
+      home = "/var/lib/btrbk";
+      createHome = true;
+      shell = "${pkgs.bash}/bin/bash";
+      group = "btrbk";
+      openssh.authorizedKeys.keys = map
+        (
+          v:
+          let
+            options = concatMapStringsSep " " (x: "--" + x) v.roles;
+            ioniceClass = {
+              "idle" = 3;
+              "best-effort" = 2;
+              "realtime" = 1;
+            }.${cfg.ioSchedulingClass};
+            sudo_doas_flag = "--${sudo_doas}";
+          in
+          ''command="${pkgs.util-linux}/bin/ionice -t -c ${toString ioniceClass} ${optionalString (cfg.niceness >= 1) "${pkgs.coreutils}/bin/nice -n ${toString cfg.niceness}"} ${pkgs.btrbk}/share/btrbk/scripts/ssh_filter_btrbk.sh ${sudo_doas_flag} ${options}" ${v.key}''
+        )
+        cfg.sshAccess;
+    };
+    users.groups.btrbk = { };
+    systemd.tmpfiles.rules = [
+      "d /var/lib/btrbk 0750 btrbk btrbk"
+      "d /var/lib/btrbk/.ssh 0700 btrbk btrbk"
+      "f /var/lib/btrbk/.ssh/config 0700 btrbk btrbk - StrictHostKeyChecking=accept-new"
+    ];
+    environment.etc = mapAttrs'
+      (
+        name: instance: {
+          name = "btrbk/${name}.conf";
+          value.source = mkConfigFile name instance.settings;
+        }
+      )
+      cfg.instances;
+    systemd.services = mapAttrs'
+      (
+        name: _: {
+          name = "btrbk-${name}";
+          value = {
+            description = "Takes BTRFS snapshots and maintains retention policies.";
+            unitConfig.Documentation = "man:btrbk(1)";
+            path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+            serviceConfig = {
+              User = "btrbk";
+              Group = "btrbk";
+              Type = "oneshot";
+              ExecStart = "${pkgs.btrbk}/bin/btrbk -c /etc/btrbk/${name}.conf run";
+              Nice = cfg.niceness;
+              IOSchedulingClass = cfg.ioSchedulingClass;
+              StateDirectory = "btrbk";
+            };
+          };
+        }
+      )
+      cfg.instances;
+
+    systemd.timers = mapAttrs'
+      (
+        name: instance: {
+          name = "btrbk-${name}";
+          value = {
+            description = "Timer to take BTRFS snapshots and maintain retention policies.";
+            wantedBy = [ "timers.target" ];
+            timerConfig = {
+              OnCalendar = instance.onCalendar;
+              AccuracySec = "10min";
+              Persistent = true;
+            };
+          };
+        }
+      )
+      (filterAttrs (name: instance: instance.onCalendar != null)
+        cfg.instances);
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/backup/duplicati.nix b/nixpkgs/nixos/modules/services/backup/duplicati.nix
new file mode 100644
index 000000000000..9b422635e7f0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/duplicati.nix
@@ -0,0 +1,87 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.duplicati;
+in
+{
+  options = {
+    services.duplicati = {
+      enable = mkEnableOption (lib.mdDoc "Duplicati");
+
+      package = mkPackageOptionMD pkgs "duplicati" { };
+
+      port = mkOption {
+        default = 8200;
+        type = types.port;
+        description = lib.mdDoc ''
+          Port serving the web interface
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/duplicati";
+        description = lib.mdDoc ''
+          The directory where Duplicati stores its data files.
+
+          ::: {.note}
+          If left as the default value this directory will automatically be created
+          before the Duplicati server starts, otherwise you are responsible for ensuring
+          the directory exists with appropriate ownership and permissions.
+          :::
+        '';
+      };
+
+      interface = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = lib.mdDoc ''
+          Listening interface for the web UI
+          Set it to "any" to listen on all available interfaces
+        '';
+      };
+
+      user = mkOption {
+        default = "duplicati";
+        type = types.str;
+        description = lib.mdDoc ''
+          Duplicati runs as it's own user. It will only be able to backup world-readable files.
+          Run as root with special care.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.duplicati = {
+      description = "Duplicati backup";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = mkMerge [
+        {
+          User = cfg.user;
+          Group = "duplicati";
+          ExecStart = "${cfg.package}/bin/duplicati-server --webservice-interface=${cfg.interface} --webservice-port=${toString cfg.port} --server-datafolder=${cfg.dataDir}";
+          Restart = "on-failure";
+        }
+        (mkIf (cfg.dataDir == "/var/lib/duplicati") {
+          StateDirectory = "duplicati";
+        })
+      ];
+    };
+
+    users.users = lib.optionalAttrs (cfg.user == "duplicati") {
+      duplicati = {
+        uid = config.ids.uids.duplicati;
+        home = cfg.dataDir;
+        group = "duplicati";
+      };
+    };
+    users.groups.duplicati.gid = config.ids.gids.duplicati;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/duplicity.nix b/nixpkgs/nixos/modules/services/backup/duplicity.nix
new file mode 100644
index 000000000000..05ec997ab66b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/duplicity.nix
@@ -0,0 +1,190 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.duplicity;
+
+  stateDirectory = "/var/lib/duplicity";
+
+  localTarget =
+    if hasPrefix "file://" cfg.targetUrl
+    then removePrefix "file://" cfg.targetUrl else null;
+
+in
+{
+  options.services.duplicity = {
+    enable = mkEnableOption (lib.mdDoc "backups with duplicity");
+
+    root = mkOption {
+      type = types.path;
+      default = "/";
+      description = lib.mdDoc ''
+        Root directory to backup.
+      '';
+    };
+
+    include = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "/home" ];
+      description = lib.mdDoc ''
+        List of paths to include into the backups. See the FILE SELECTION
+        section in {manpage}`duplicity(1)` for details on the syntax.
+      '';
+    };
+
+    exclude = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = lib.mdDoc ''
+        List of paths to exclude from backups. See the FILE SELECTION section in
+        {manpage}`duplicity(1)` for details on the syntax.
+      '';
+    };
+
+    targetUrl = mkOption {
+      type = types.str;
+      example = "s3://host:port/prefix";
+      description = lib.mdDoc ''
+        Target url to backup to. See the URL FORMAT section in
+        {manpage}`duplicity(1)` for supported urls.
+      '';
+    };
+
+    secretFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path of a file containing secrets (gpg passphrase, access key...) in
+        the format of EnvironmentFile as described by
+        {manpage}`systemd.exec(5)`. For example:
+        ```
+        PASSPHRASE=«...»
+        AWS_ACCESS_KEY_ID=«...»
+        AWS_SECRET_ACCESS_KEY=«...»
+        ```
+      '';
+    };
+
+    frequency = mkOption {
+      type = types.nullOr types.str;
+      default = "daily";
+      description = lib.mdDoc ''
+        Run duplicity with the given frequency (see
+        {manpage}`systemd.time(7)` for the format).
+        If null, do not run automatically.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--backend-retry-delay" "100" ];
+      description = lib.mdDoc ''
+        Extra command-line flags passed to duplicity. See
+        {manpage}`duplicity(1)`.
+      '';
+    };
+
+    fullIfOlderThan = mkOption {
+      type = types.str;
+      default = "never";
+      example = "1M";
+      description = lib.mdDoc ''
+        If `"never"` (the default) always do incremental
+        backups (the first backup will be a full backup, of course).  If
+        `"always"` always do full backups.  Otherwise, this
+        must be a string representing a duration. Full backups will be made
+        when the latest full backup is older than this duration. If this is not
+        the case, an incremental backup is performed.
+      '';
+    };
+
+    cleanup = {
+      maxAge = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "6M";
+        description = lib.mdDoc ''
+          If non-null, delete all backup sets older than the given time.  Old backup sets
+          will not be deleted if backup sets newer than time depend on them.
+        '';
+      };
+      maxFull = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 2;
+        description = lib.mdDoc ''
+          If non-null, delete all backups sets that are older than the count:th last full
+          backup (in other words, keep the last count full backups and
+          associated incremental sets).
+        '';
+      };
+      maxIncr = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 1;
+        description = lib.mdDoc ''
+          If non-null, delete incremental sets of all backups sets that are
+          older than the count:th last full backup (in other words, keep only
+          old full backups and not their increments).
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd = {
+      services.duplicity = {
+        description = "backup files with duplicity";
+
+        environment.HOME = stateDirectory;
+
+        script =
+          let
+            target = escapeShellArg cfg.targetUrl;
+            extra = escapeShellArgs ([ "--archive-dir" stateDirectory ] ++ cfg.extraFlags);
+            dup = "${pkgs.duplicity}/bin/duplicity";
+          in
+          ''
+            set -x
+            ${dup} cleanup ${target} --force ${extra}
+            ${lib.optionalString (cfg.cleanup.maxAge != null) "${dup} remove-older-than ${lib.escapeShellArg cfg.cleanup.maxAge} ${target} --force ${extra}"}
+            ${lib.optionalString (cfg.cleanup.maxFull != null) "${dup} remove-all-but-n-full ${toString cfg.cleanup.maxFull} ${target} --force ${extra}"}
+            ${lib.optionalString (cfg.cleanup.maxIncr != null) "${dup} remove-all-inc-of-but-n-full ${toString cfg.cleanup.maxIncr} ${target} --force ${extra}"}
+            exec ${dup} ${if cfg.fullIfOlderThan == "always" then "full" else "incr"} ${lib.escapeShellArgs (
+              [ cfg.root cfg.targetUrl ]
+              ++ concatMap (p: [ "--include" p ]) cfg.include
+              ++ concatMap (p: [ "--exclude" p ]) cfg.exclude
+              ++ (lib.optionals (cfg.fullIfOlderThan != "never" && cfg.fullIfOlderThan != "always") [ "--full-if-older-than" cfg.fullIfOlderThan ])
+              )} ${extra}
+          '';
+        serviceConfig = {
+          PrivateTmp = true;
+          ProtectSystem = "strict";
+          ProtectHome = "read-only";
+          StateDirectory = baseNameOf stateDirectory;
+        } // optionalAttrs (localTarget != null) {
+          ReadWritePaths = localTarget;
+        } // optionalAttrs (cfg.secretFile != null) {
+          EnvironmentFile = cfg.secretFile;
+        };
+      } // optionalAttrs (cfg.frequency != null) {
+        startAt = cfg.frequency;
+      };
+
+      tmpfiles.rules = optional (localTarget != null) "d ${localTarget} 0700 root root -";
+    };
+
+    assertions = singleton {
+      # Duplicity will fail if the last file selection option is an include. It
+      # is not always possible to detect but this simple case can be caught.
+      assertion = cfg.include != [ ] -> cfg.exclude != [ ] || cfg.extraFlags != [ ];
+      message = ''
+        Duplicity will fail if you only specify included paths ("Because the
+        default is to include all files, the expression is redundant. Exiting
+        because this probably isn't what you meant.")
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/mysql-backup.nix b/nixpkgs/nixos/modules/services/backup/mysql-backup.nix
new file mode 100644
index 000000000000..9fbc599cd41a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/mysql-backup.nix
@@ -0,0 +1,130 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) mariadb gzip;
+
+  cfg = config.services.mysqlBackup;
+  defaultUser = "mysqlbackup";
+
+  backupScript = ''
+    set -o pipefail
+    failed=""
+    ${concatMapStringsSep "\n" backupDatabaseScript cfg.databases}
+    if [ -n "$failed" ]; then
+      echo "Backup of database(s) failed:$failed"
+      exit 1
+    fi
+  '';
+  backupDatabaseScript = db: ''
+    dest="${cfg.location}/${db}.gz"
+    if ${mariadb}/bin/mysqldump ${optionalString cfg.singleTransaction "--single-transaction"} ${db} | ${gzip}/bin/gzip -c > $dest.tmp; then
+      mv $dest.tmp $dest
+      echo "Backed up to $dest"
+    else
+      echo "Failed to back up to $dest"
+      rm -f $dest.tmp
+      failed="$failed ${db}"
+    fi
+  '';
+
+in
+
+{
+  options = {
+
+    services.mysqlBackup = {
+
+      enable = mkEnableOption (lib.mdDoc "MySQL backups");
+
+      calendar = mkOption {
+        type = types.str;
+        default = "01:15:00";
+        description = lib.mdDoc ''
+          Configured when to run the backup service systemd unit (DayOfWeek Year-Month-Day Hour:Minute:Second).
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = lib.mdDoc ''
+          User to be used to perform backup.
+        '';
+      };
+
+      databases = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of database names to dump.
+        '';
+      };
+
+      location = mkOption {
+        type = types.path;
+        default = "/var/backup/mysql";
+        description = lib.mdDoc ''
+          Location to put the gzipped MySQL database dumps.
+        '';
+      };
+
+      singleTransaction = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to create database dump in a single transaction
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        isSystemUser = true;
+        createHome = false;
+        home = cfg.location;
+        group = "nogroup";
+      };
+    };
+
+    services.mysql.ensureUsers = [{
+      name = cfg.user;
+      ensurePermissions = with lib;
+        let
+          privs = "SELECT, SHOW VIEW, TRIGGER, LOCK TABLES";
+          grant = db: nameValuePair "${db}.*" privs;
+        in
+          listToAttrs (map grant cfg.databases);
+    }];
+
+    systemd = {
+      timers.mysql-backup = {
+        description = "Mysql backup timer";
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnCalendar = cfg.calendar;
+          AccuracySec = "5m";
+          Unit = "mysql-backup.service";
+        };
+      };
+      services.mysql-backup = {
+        description = "MySQL backup service";
+        enable = true;
+        serviceConfig = {
+          Type = "oneshot";
+          User = cfg.user;
+        };
+        script = backupScript;
+      };
+      tmpfiles.rules = [
+        "d ${cfg.location} 0700 ${cfg.user} - - -"
+      ];
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/backup/postgresql-backup.nix b/nixpkgs/nixos/modules/services/backup/postgresql-backup.nix
new file mode 100644
index 000000000000..d3c6f3104fc5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/postgresql-backup.nix
@@ -0,0 +1,181 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.postgresqlBackup;
+
+  postgresqlBackupService = db: dumpCmd:
+    let
+      compressSuffixes = {
+        "none" = "";
+        "gzip" = ".gz";
+        "zstd" = ".zstd";
+      };
+      compressSuffix = getAttr cfg.compression compressSuffixes;
+
+      compressCmd = getAttr cfg.compression {
+        "none" = "cat";
+        "gzip" = "${pkgs.gzip}/bin/gzip -c -${toString cfg.compressionLevel}";
+        "zstd" = "${pkgs.zstd}/bin/zstd -c -${toString cfg.compressionLevel}";
+      };
+
+      mkSqlPath = prefix: suffix: "${cfg.location}/${db}${prefix}.sql${suffix}";
+      curFile = mkSqlPath "" compressSuffix;
+      prevFile = mkSqlPath ".prev" compressSuffix;
+      prevFiles = map (mkSqlPath ".prev") (attrValues compressSuffixes);
+      inProgressFile = mkSqlPath ".in-progress" compressSuffix;
+    in {
+      enable = true;
+
+      description = "Backup of ${db} database(s)";
+
+      requires = [ "postgresql.service" ];
+
+      path = [ pkgs.coreutils config.services.postgresql.package ];
+
+      script = ''
+        set -e -o pipefail
+
+        umask 0077 # ensure backup is only readable by postgres user
+
+        if [ -e ${curFile} ]; then
+          rm -f ${toString prevFiles}
+          mv ${curFile} ${prevFile}
+        fi
+
+        ${dumpCmd} \
+          | ${compressCmd} \
+          > ${inProgressFile}
+
+        mv ${inProgressFile} ${curFile}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "postgres";
+      };
+
+      startAt = cfg.startAt;
+    };
+
+in {
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "postgresqlBackup" "period" ] ''
+       A systemd timer is now used instead of cron.
+       The starting time can be configured via <literal>services.postgresqlBackup.startAt</literal>.
+    '')
+  ];
+
+  options = {
+    services.postgresqlBackup = {
+      enable = mkEnableOption (lib.mdDoc "PostgreSQL dumps");
+
+      startAt = mkOption {
+        default = "*-*-* 01:15:00";
+        type = with types; either (listOf str) str;
+        description = lib.mdDoc ''
+          This option defines (see `systemd.time` for format) when the
+          databases should be dumped.
+          The default is to update at 01:15 (at night) every day.
+        '';
+      };
+
+      backupAll = mkOption {
+        default = cfg.databases == [];
+        defaultText = literalExpression "services.postgresqlBackup.databases == []";
+        type = lib.types.bool;
+        description = lib.mdDoc ''
+          Backup all databases using pg_dumpall.
+          This option is mutual exclusive to
+          `services.postgresqlBackup.databases`.
+          The resulting backup dump will have the name all.sql.gz.
+          This option is the default if no databases are specified.
+        '';
+      };
+
+      databases = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of database names to dump.
+        '';
+      };
+
+      location = mkOption {
+        default = "/var/backup/postgresql";
+        type = types.path;
+        description = lib.mdDoc ''
+          Path of directory where the PostgreSQL database dumps will be placed.
+        '';
+      };
+
+      pgdumpOptions = mkOption {
+        type = types.separatedString " ";
+        default = "-C";
+        description = lib.mdDoc ''
+          Command line options for pg_dump. This options is not used
+          if `config.services.postgresqlBackup.backupAll` is enabled.
+          Note that config.services.postgresqlBackup.backupAll is also active,
+          when no databases where specified.
+        '';
+      };
+
+      compression = mkOption {
+        type = types.enum ["none" "gzip" "zstd"];
+        default = "gzip";
+        description = lib.mdDoc ''
+          The type of compression to use on the generated database dump.
+        '';
+      };
+
+      compressionLevel = mkOption {
+        type = types.ints.between 1 19;
+        default = 6;
+        description = lib.mdDoc ''
+          The compression level used when compression is enabled.
+          gzip accepts levels 1 to 9. zstd accepts levels 1 to 19.
+        '';
+      };
+    };
+
+  };
+
+  config = mkMerge [
+    {
+      assertions = [
+        {
+          assertion = cfg.backupAll -> cfg.databases == [];
+          message = "config.services.postgresqlBackup.backupAll cannot be used together with config.services.postgresqlBackup.databases";
+        }
+        {
+          assertion = cfg.compression == "none" ||
+            (cfg.compression == "gzip" && cfg.compressionLevel >= 1 && cfg.compressionLevel <= 9) ||
+            (cfg.compression == "zstd" && cfg.compressionLevel >= 1 && cfg.compressionLevel <= 19);
+          message = "config.services.postgresqlBackup.compressionLevel must be set between 1 and 9 for gzip and 1 and 19 for zstd";
+        }
+      ];
+    }
+    (mkIf cfg.enable {
+      systemd.tmpfiles.rules = [
+        "d '${cfg.location}' 0700 postgres - - -"
+      ];
+    })
+    (mkIf (cfg.enable && cfg.backupAll) {
+      systemd.services.postgresqlBackup =
+        postgresqlBackupService "all" "pg_dumpall";
+    })
+    (mkIf (cfg.enable && !cfg.backupAll) {
+      systemd.services = listToAttrs (map (db:
+        let
+          cmd = "pg_dump ${cfg.pgdumpOptions} ${db}";
+        in {
+          name = "postgresqlBackup-${db}";
+          value = postgresqlBackupService db cmd;
+        }) cfg.databases);
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/backup/postgresql-wal-receiver.nix b/nixpkgs/nixos/modules/services/backup/postgresql-wal-receiver.nix
new file mode 100644
index 000000000000..773dc0ba447d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/postgresql-wal-receiver.nix
@@ -0,0 +1,204 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  receiverSubmodule = {
+    options = {
+      postgresqlPackage = mkOption {
+        type = types.package;
+        example = literalExpression "pkgs.postgresql_15";
+        description = lib.mdDoc ''
+          PostgreSQL package to use.
+        '';
+      };
+
+      directory = mkOption {
+        type = types.path;
+        example = literalExpression "/mnt/pg_wal/main/";
+        description = lib.mdDoc ''
+          Directory to write the output to.
+        '';
+      };
+
+      statusInterval = mkOption {
+        type = types.int;
+        default = 10;
+        description = lib.mdDoc ''
+          Specifies the number of seconds between status packets sent back to the server.
+          This allows for easier monitoring of the progress from server.
+          A value of zero disables the periodic status updates completely,
+          although an update will still be sent when requested by the server, to avoid timeout disconnect.
+        '';
+      };
+
+      slot = mkOption {
+        type = types.str;
+        default = "";
+        example = "some_slot_name";
+        description = lib.mdDoc ''
+          Require {command}`pg_receivewal` to use an existing replication slot (see
+          [Section 26.2.6 of the PostgreSQL manual](https://www.postgresql.org/docs/current/warm-standby.html#STREAMING-REPLICATION-SLOTS)).
+          When this option is used, {command}`pg_receivewal` will report a flush position to the server,
+          indicating when each segment has been synchronized to disk so that the server can remove that segment if it is not otherwise needed.
+
+          When the replication client of {command}`pg_receivewal` is configured on the server as a synchronous standby,
+          then using a replication slot will report the flush position to the server, but only when a WAL file is closed.
+          Therefore, that configuration will cause transactions on the primary to wait for a long time and effectively not work satisfactorily.
+          The option {option}`synchronous` must be specified in addition to make this work correctly.
+        '';
+      };
+
+      synchronous = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Flush the WAL data to disk immediately after it has been received.
+          Also send a status packet back to the server immediately after flushing, regardless of {option}`statusInterval`.
+
+          This option should be specified if the replication client of {command}`pg_receivewal` is configured on the server as a synchronous standby,
+          to ensure that timely feedback is sent to the server.
+        '';
+      };
+
+      compress = mkOption {
+        type = types.ints.between 0 9;
+        default = 0;
+        description = lib.mdDoc ''
+          Enables gzip compression of write-ahead logs, and specifies the compression level
+          (`0` through `9`, `0` being no compression and `9` being best compression).
+          The suffix `.gz` will automatically be added to all filenames.
+
+          This option requires PostgreSQL >= 10.
+        '';
+      };
+
+      connection = mkOption {
+        type = types.str;
+        example = "postgresql://user@somehost";
+        description = lib.mdDoc ''
+          Specifies parameters used to connect to the server, as a connection string.
+          See [Section 34.1.1 of the PostgreSQL manual](https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING) for more information.
+
+          Because {command}`pg_receivewal` doesn't connect to any particular database in the cluster,
+          database name in the connection string will be ignored.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        default = [ ];
+        example = literalExpression ''
+          [
+            "--no-sync"
+          ]
+        '';
+        description = lib.mdDoc ''
+          A list of extra arguments to pass to the {command}`pg_receivewal` command.
+        '';
+      };
+
+      environment = mkOption {
+        type = with types; attrsOf str;
+        default = { };
+        example = literalExpression ''
+          {
+            PGPASSFILE = "/private/passfile";
+            PGSSLMODE = "require";
+          }
+        '';
+        description = lib.mdDoc ''
+          Environment variables passed to the service.
+          Usable parameters are listed in [Section 34.14 of the PostgreSQL manual](https://www.postgresql.org/docs/current/libpq-envars.html).
+        '';
+      };
+    };
+  };
+
+in {
+  options = {
+    services.postgresqlWalReceiver = {
+      receivers = mkOption {
+        type = with types; attrsOf (submodule receiverSubmodule);
+        default = { };
+        example = literalExpression ''
+          {
+            main = {
+              postgresqlPackage = pkgs.postgresql_15;
+              directory = /mnt/pg_wal/main/;
+              slot = "main_wal_receiver";
+              connection = "postgresql://user@somehost";
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          PostgreSQL WAL receivers.
+          Stream write-ahead logs from a PostgreSQL server using {command}`pg_receivewal` (formerly {command}`pg_receivexlog`).
+          See [the man page](https://www.postgresql.org/docs/current/app-pgreceivewal.html) for more information.
+        '';
+      };
+    };
+  };
+
+  config = let
+    receivers = config.services.postgresqlWalReceiver.receivers;
+  in mkIf (receivers != { }) {
+    users = {
+      users.postgres = {
+        uid = config.ids.uids.postgres;
+        group = "postgres";
+        description = "PostgreSQL server user";
+      };
+
+      groups.postgres = {
+        gid = config.ids.gids.postgres;
+      };
+    };
+
+    assertions = concatLists (attrsets.mapAttrsToList (name: config: [
+      {
+        assertion = config.compress > 0 -> versionAtLeast config.postgresqlPackage.version "10";
+        message = "Invalid configuration for WAL receiver \"${name}\": compress requires PostgreSQL version >= 10.";
+      }
+    ]) receivers);
+
+    systemd.tmpfiles.rules = mapAttrsToList (name: config: ''
+      d ${escapeShellArg config.directory} 0750 postgres postgres - -
+    '') receivers;
+
+    systemd.services = with attrsets; mapAttrs' (name: config: nameValuePair "postgresql-wal-receiver-${name}" {
+      description = "PostgreSQL WAL receiver (${name})";
+      wantedBy = [ "multi-user.target" ];
+      startLimitIntervalSec = 0; # retry forever, useful in case of network disruption
+
+      serviceConfig = {
+        User = "postgres";
+        Group = "postgres";
+        KillSignal = "SIGINT";
+        Restart = "always";
+        RestartSec = 60;
+      };
+
+      inherit (config) environment;
+
+      script = let
+        receiverCommand = postgresqlPackage:
+         if (versionAtLeast postgresqlPackage.version "10")
+           then "${postgresqlPackage}/bin/pg_receivewal"
+           else "${postgresqlPackage}/bin/pg_receivexlog";
+      in ''
+        ${receiverCommand config.postgresqlPackage} \
+          --no-password \
+          --directory=${escapeShellArg config.directory} \
+          --status-interval=${toString config.statusInterval} \
+          --dbname=${escapeShellArg config.connection} \
+          ${optionalString (config.compress > 0) "--compress=${toString config.compress}"} \
+          ${optionalString (config.slot != "") "--slot=${escapeShellArg config.slot}"} \
+          ${optionalString config.synchronous "--synchronous"} \
+          ${concatStringsSep " " config.extraArgs}
+      '';
+    }) receivers;
+  };
+
+  meta.maintainers = with maintainers; [ pacien ];
+}
diff --git a/nixpkgs/nixos/modules/services/backup/restic-rest-server.nix b/nixpkgs/nixos/modules/services/backup/restic-rest-server.nix
new file mode 100644
index 000000000000..37a6150c99d3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/restic-rest-server.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.restic.server;
+in
+{
+  meta.maintainers = [ maintainers.bachp ];
+
+  options.services.restic.server = {
+    enable = mkEnableOption (lib.mdDoc "Restic REST Server");
+
+    listenAddress = mkOption {
+      default = ":8000";
+      example = "127.0.0.1:8080";
+      type = types.str;
+      description = lib.mdDoc "Listen on a specific IP address and port.";
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/restic";
+      type = types.path;
+      description = lib.mdDoc "The directory for storing the restic repository.";
+    };
+
+    appendOnly = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enable append only mode.
+        This mode allows creation of new backups but prevents deletion and modification of existing backups.
+        This can be useful when backing up systems that have a potential of being hacked.
+      '';
+    };
+
+    privateRepos = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enable private repos.
+        Grants access only when a subdirectory with the same name as the user is specified in the repository URL.
+      '';
+    };
+
+    prometheus = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc "Enable Prometheus metrics at /metrics.";
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra commandline options to pass to Restic REST server.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.restic-rest-server;
+      defaultText = literalExpression "pkgs.restic-rest-server";
+      type = types.package;
+      description = lib.mdDoc "Restic REST server package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.restic-rest-server = {
+      description = "Restic REST Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/rest-server \
+          --listen ${cfg.listenAddress} \
+          --path ${cfg.dataDir} \
+          ${optionalString cfg.appendOnly "--append-only"} \
+          ${optionalString cfg.privateRepos "--private-repos"} \
+          ${optionalString cfg.prometheus "--prometheus"} \
+          ${escapeShellArgs cfg.extraFlags} \
+        '';
+        Type = "simple";
+        User = "restic";
+        Group = "restic";
+
+        # Security hardening
+        ReadWritePaths = [ cfg.dataDir ];
+        PrivateTmp = true;
+        ProtectSystem = "strict";
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        PrivateDevices = true;
+      };
+    };
+
+    systemd.tmpfiles.rules = mkIf cfg.privateRepos [
+        "f ${cfg.dataDir}/.htpasswd 0700 restic restic -"
+    ];
+
+    users.users.restic = {
+      group = "restic";
+      home = cfg.dataDir;
+      createHome = true;
+      uid = config.ids.uids.restic;
+    };
+
+    users.groups.restic.gid = config.ids.uids.restic;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/restic.nix b/nixpkgs/nixos/modules/services/backup/restic.nix
new file mode 100644
index 000000000000..87595f39796d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/restic.nix
@@ -0,0 +1,402 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  # Type for a valid systemd unit option. Needed for correctly passing "timerConfig" to "systemd.timers"
+  inherit (utils.systemdUtils.unitOptions) unitOption;
+in
+{
+  options.services.restic.backups = mkOption {
+    description = lib.mdDoc ''
+      Periodic backups to create with Restic.
+    '';
+    type = types.attrsOf (types.submodule ({ config, name, ... }: {
+      options = {
+        passwordFile = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            Read the repository password from a file.
+          '';
+          example = "/etc/nixos/restic-password";
+        };
+
+        environmentFile = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            file containing the credentials to access the repository, in the
+            format of an EnvironmentFile as described by systemd.exec(5)
+          '';
+        };
+
+        rcloneOptions = mkOption {
+          type = with types; nullOr (attrsOf (oneOf [ str bool ]));
+          default = null;
+          description = lib.mdDoc ''
+            Options to pass to rclone to control its behavior.
+            See <https://rclone.org/docs/#options> for
+            available options. When specifying option names, strip the
+            leading `--`. To set a flag such as
+            `--drive-use-trash`, which does not take a value,
+            set the value to the Boolean `true`.
+          '';
+          example = {
+            bwlimit = "10M";
+            drive-use-trash = "true";
+          };
+        };
+
+        rcloneConfig = mkOption {
+          type = with types; nullOr (attrsOf (oneOf [ str bool ]));
+          default = null;
+          description = lib.mdDoc ''
+            Configuration for the rclone remote being used for backup.
+            See the remote's specific options under rclone's docs at
+            <https://rclone.org/docs/>. When specifying
+            option names, use the "config" name specified in the docs.
+            For example, to set `--b2-hard-delete` for a B2
+            remote, use `hard_delete = true` in the
+            attribute set.
+            Warning: Secrets set in here will be world-readable in the Nix
+            store! Consider using the `rcloneConfigFile`
+            option instead to specify secret values separately. Note that
+            options set here will override those set in the config file.
+          '';
+          example = {
+            type = "b2";
+            account = "xxx";
+            key = "xxx";
+            hard_delete = true;
+          };
+        };
+
+        rcloneConfigFile = mkOption {
+          type = with types; nullOr path;
+          default = null;
+          description = lib.mdDoc ''
+            Path to the file containing rclone configuration. This file
+            must contain configuration for the remote specified in this backup
+            set and also must be readable by root. Options set in
+            `rcloneConfig` will override those set in this
+            file.
+          '';
+        };
+
+        repository = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            repository to backup to.
+          '';
+          example = "sftp:backup@192.168.1.100:/backups/${name}";
+        };
+
+        repositoryFile = mkOption {
+          type = with types; nullOr path;
+          default = null;
+          description = lib.mdDoc ''
+            Path to the file containing the repository location to backup to.
+          '';
+        };
+
+        paths = mkOption {
+          # This is nullable for legacy reasons only. We should consider making it a pure listOf
+          # after some time has passed since this comment was added.
+          type = types.nullOr (types.listOf types.str);
+          default = [ ];
+          description = lib.mdDoc ''
+            Which paths to backup, in addition to ones specified via
+            `dynamicFilesFrom`.  If null or an empty array and
+            `dynamicFilesFrom` is also null, no backup command will be run.
+             This can be used to create a prune-only job.
+          '';
+          example = [
+            "/var/lib/postgresql"
+            "/home/user/backup"
+          ];
+        };
+
+        exclude = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          description = lib.mdDoc ''
+            Patterns to exclude when backing up. See
+            https://restic.readthedocs.io/en/latest/040_backup.html#excluding-files for
+            details on syntax.
+          '';
+          example = [
+            "/var/cache"
+            "/home/*/.cache"
+            ".git"
+          ];
+        };
+
+        timerConfig = mkOption {
+          type = types.nullOr (types.attrsOf unitOption);
+          default = {
+            OnCalendar = "daily";
+            Persistent = true;
+          };
+          description = lib.mdDoc ''
+            When to run the backup. See {manpage}`systemd.timer(5)` for
+            details. If null no timer is created and the backup will only
+            run when explicitly started.
+          '';
+          example = {
+            OnCalendar = "00:05";
+            RandomizedDelaySec = "5h";
+            Persistent = true;
+          };
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "root";
+          description = lib.mdDoc ''
+            As which user the backup should run.
+          '';
+          example = "postgresql";
+        };
+
+        extraBackupArgs = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          description = lib.mdDoc ''
+            Extra arguments passed to restic backup.
+          '';
+          example = [
+            "--exclude-file=/etc/nixos/restic-ignore"
+          ];
+        };
+
+        extraOptions = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          description = lib.mdDoc ''
+            Extra extended options to be passed to the restic --option flag.
+          '';
+          example = [
+            "sftp.command='ssh backup@192.168.1.100 -i /home/user/.ssh/id_rsa -s sftp'"
+          ];
+        };
+
+        initialize = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Create the repository if it doesn't exist.
+          '';
+        };
+
+        pruneOpts = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          description = lib.mdDoc ''
+            A list of options (--keep-\* et al.) for 'restic forget
+            --prune', to automatically prune old snapshots.  The
+            'forget' command is run *after* the 'backup' command, so
+            keep that in mind when constructing the --keep-\* options.
+          '';
+          example = [
+            "--keep-daily 7"
+            "--keep-weekly 5"
+            "--keep-monthly 12"
+            "--keep-yearly 75"
+          ];
+        };
+
+        checkOpts = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          description = lib.mdDoc ''
+            A list of options for 'restic check', which is run after
+            pruning.
+          '';
+          example = [
+            "--with-cache"
+          ];
+        };
+
+        dynamicFilesFrom = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            A script that produces a list of files to back up.  The
+            results of this command are given to the '--files-from'
+            option. The result is merged with paths specified via `paths`.
+          '';
+          example = "find /home/matt/git -type d -name .git";
+        };
+
+        backupPrepareCommand = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            A script that must run before starting the backup process.
+          '';
+        };
+
+        backupCleanupCommand = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            A script that must run after finishing the backup process.
+          '';
+        };
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.restic;
+          defaultText = literalExpression "pkgs.restic";
+          description = lib.mdDoc ''
+            Restic package to use.
+          '';
+        };
+
+        createWrapper = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = ''
+            Whether to generate and add a script to the system path, that has the same environment variables set
+            as the systemd service. This can be used to e.g. mount snapshots or perform other opterations, without
+            having to manually specify most options.
+          '';
+        };
+      };
+    }));
+    default = { };
+    example = {
+      localbackup = {
+        paths = [ "/home" ];
+        exclude = [ "/home/*/.cache" ];
+        repository = "/mnt/backup-hdd";
+        passwordFile = "/etc/nixos/secrets/restic-password";
+        initialize = true;
+      };
+      remotebackup = {
+        paths = [ "/home" ];
+        repository = "sftp:backup@host:/backups/home";
+        passwordFile = "/etc/nixos/secrets/restic-password";
+        extraOptions = [
+          "sftp.command='ssh backup@host -i /etc/nixos/secrets/backup-private-key -s sftp'"
+        ];
+        timerConfig = {
+          OnCalendar = "00:05";
+          RandomizedDelaySec = "5h";
+        };
+      };
+    };
+  };
+
+  config = {
+    assertions = mapAttrsToList (n: v: {
+      assertion = (v.repository == null) != (v.repositoryFile == null);
+      message = "services.restic.backups.${n}: exactly one of repository or repositoryFile should be set";
+    }) config.services.restic.backups;
+    systemd.services =
+      mapAttrs'
+        (name: backup:
+          let
+            extraOptions = concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
+            resticCmd = "${backup.package}/bin/restic${extraOptions}";
+            excludeFlags = optional (backup.exclude != []) "--exclude-file=${pkgs.writeText "exclude-patterns" (concatStringsSep "\n" backup.exclude)}";
+            filesFromTmpFile = "/run/restic-backups-${name}/includes";
+            doBackup = (backup.dynamicFilesFrom != null) || (backup.paths != null && backup.paths != []);
+            pruneCmd = optionals (builtins.length backup.pruneOpts > 0) [
+              (resticCmd + " forget --prune " + (concatStringsSep " " backup.pruneOpts))
+              (resticCmd + " check " + (concatStringsSep " " backup.checkOpts))
+            ];
+            # Helper functions for rclone remotes
+            rcloneRemoteName = builtins.elemAt (splitString ":" backup.repository) 1;
+            rcloneAttrToOpt = v: "RCLONE_" + toUpper (builtins.replaceStrings [ "-" ] [ "_" ] v);
+            rcloneAttrToConf = v: "RCLONE_CONFIG_" + toUpper (rcloneRemoteName + "_" + v);
+            toRcloneVal = v: if lib.isBool v then lib.boolToString v else v;
+          in
+          nameValuePair "restic-backups-${name}" ({
+            environment = {
+              # not %C, because that wouldn't work in the wrapper script
+              RESTIC_CACHE_DIR = "/var/cache/restic-backups-${name}";
+              RESTIC_PASSWORD_FILE = backup.passwordFile;
+              RESTIC_REPOSITORY = backup.repository;
+              RESTIC_REPOSITORY_FILE = backup.repositoryFile;
+            } // optionalAttrs (backup.rcloneOptions != null) (mapAttrs'
+              (name: value:
+                nameValuePair (rcloneAttrToOpt name) (toRcloneVal value)
+              )
+              backup.rcloneOptions) // optionalAttrs (backup.rcloneConfigFile != null) {
+              RCLONE_CONFIG = backup.rcloneConfigFile;
+            } // optionalAttrs (backup.rcloneConfig != null) (mapAttrs'
+              (name: value:
+                nameValuePair (rcloneAttrToConf name) (toRcloneVal value)
+              )
+              backup.rcloneConfig);
+            path = [ config.programs.ssh.package ];
+            restartIfChanged = false;
+            wants = [ "network-online.target" ];
+            after = [ "network-online.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = (optionals doBackup [ "${resticCmd} backup ${concatStringsSep " " (backup.extraBackupArgs ++ excludeFlags)} --files-from=${filesFromTmpFile}" ])
+                ++ pruneCmd;
+              User = backup.user;
+              RuntimeDirectory = "restic-backups-${name}";
+              CacheDirectory = "restic-backups-${name}";
+              CacheDirectoryMode = "0700";
+              PrivateTmp = true;
+            } // optionalAttrs (backup.environmentFile != null) {
+              EnvironmentFile = backup.environmentFile;
+            };
+          } // optionalAttrs (backup.initialize || doBackup || backup.backupPrepareCommand != null) {
+            preStart = ''
+              ${optionalString (backup.backupPrepareCommand != null) ''
+                ${pkgs.writeScript "backupPrepareCommand" backup.backupPrepareCommand}
+              ''}
+              ${optionalString (backup.initialize) ''
+                ${resticCmd} snapshots || ${resticCmd} init
+              ''}
+              ${optionalString (backup.paths != null && backup.paths != []) ''
+                cat ${pkgs.writeText "staticPaths" (concatStringsSep "\n" backup.paths)} >> ${filesFromTmpFile}
+              ''}
+              ${optionalString (backup.dynamicFilesFrom != null) ''
+                ${pkgs.writeScript "dynamicFilesFromScript" backup.dynamicFilesFrom} >> ${filesFromTmpFile}
+              ''}
+            '';
+          } // optionalAttrs (doBackup || backup.backupCleanupCommand != null) {
+            postStop = ''
+              ${optionalString (backup.backupCleanupCommand != null) ''
+                ${pkgs.writeScript "backupCleanupCommand" backup.backupCleanupCommand}
+              ''}
+              ${optionalString doBackup ''
+                rm ${filesFromTmpFile}
+              ''}
+            '';
+          })
+        )
+        config.services.restic.backups;
+    systemd.timers =
+      mapAttrs'
+        (name: backup: nameValuePair "restic-backups-${name}" {
+          wantedBy = [ "timers.target" ];
+          timerConfig = backup.timerConfig;
+        })
+        (filterAttrs (_: backup: backup.timerConfig != null) config.services.restic.backups);
+
+    # generate wrapper scripts, as described in the createWrapper option
+    environment.systemPackages = lib.mapAttrsToList (name: backup: let
+      extraOptions = lib.concatMapStrings (arg: " -o ${arg}") backup.extraOptions;
+      resticCmd = "${backup.package}/bin/restic${extraOptions}";
+    in pkgs.writeShellScriptBin "restic-${name}" ''
+      set -a  # automatically export variables
+      ${lib.optionalString (backup.environmentFile != null) "source ${backup.environmentFile}"}
+      # set same environment variables as the systemd service
+      ${lib.pipe config.systemd.services."restic-backups-${name}".environment [
+        (lib.filterAttrs (_: v: v != null))
+        (lib.mapAttrsToList (n: v: "${n}=${v}"))
+        (lib.concatStringsSep "\n")
+      ]}
+
+      exec ${resticCmd} $@
+    '') (lib.filterAttrs (_: v: v.createWrapper) config.services.restic.backups);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/rsnapshot.nix b/nixpkgs/nixos/modules/services/backup/rsnapshot.nix
new file mode 100644
index 000000000000..0b9bb60af0ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/rsnapshot.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rsnapshot;
+  cfgfile = pkgs.writeText "rsnapshot.conf" ''
+    config_version	1.2
+    cmd_cp	${pkgs.coreutils}/bin/cp
+    cmd_rm	${pkgs.coreutils}/bin/rm
+    cmd_rsync	${pkgs.rsync}/bin/rsync
+    cmd_ssh	${pkgs.openssh}/bin/ssh
+    cmd_logger	${pkgs.inetutils}/bin/logger
+    cmd_du	${pkgs.coreutils}/bin/du
+    cmd_rsnapshot_diff	${pkgs.rsnapshot}/bin/rsnapshot-diff
+    lockfile	/run/rsnapshot.pid
+    link_dest	1
+
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options = {
+    services.rsnapshot = {
+      enable = mkEnableOption (lib.mdDoc "rsnapshot backups");
+      enableManualRsnapshot = mkOption {
+        description = lib.mdDoc "Whether to enable manual usage of the rsnapshot command with this module.";
+        default = true;
+        type = types.bool;
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        example = ''
+          retains	hourly	24
+          retain	daily	365
+          backup	/home/	localhost/
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          rsnapshot configuration option in addition to the defaults from
+          rsnapshot and this module.
+
+          Note that tabs are required to separate option arguments, and
+          directory names require trailing slashes.
+
+          The "extra" in the option name might be a little misleading right
+          now, as it is required to get a functional configuration.
+        '';
+      };
+
+      cronIntervals = mkOption {
+        default = {};
+        example = { hourly = "0 * * * *"; daily = "50 21 * * *"; };
+        type = types.attrsOf types.str;
+        description = lib.mdDoc ''
+          Periodicity at which intervals should be run by cron.
+          Note that the intervals also have to exist in configuration
+          as retain options.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      services.cron.systemCronJobs =
+        mapAttrsToList (interval: time: "${time} root ${pkgs.rsnapshot}/bin/rsnapshot -c ${cfgfile} ${interval}") cfg.cronIntervals;
+    }
+    (mkIf cfg.enableManualRsnapshot {
+      environment.systemPackages = [ pkgs.rsnapshot ];
+      environment.etc."rsnapshot.conf".source = cfgfile;
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/backup/sanoid.nix b/nixpkgs/nixos/modules/services/backup/sanoid.nix
new file mode 100644
index 000000000000..aae77cee07d0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/sanoid.nix
@@ -0,0 +1,205 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sanoid;
+
+  datasetSettingsType = with types;
+    (attrsOf (nullOr (oneOf [ str int bool (listOf str) ]))) // {
+      description = "dataset/template options";
+    };
+
+  commonOptions = {
+    hourly = mkOption {
+      description = lib.mdDoc "Number of hourly snapshots.";
+      type = with types; nullOr ints.unsigned;
+      default = null;
+    };
+
+    daily = mkOption {
+      description = lib.mdDoc "Number of daily snapshots.";
+      type = with types; nullOr ints.unsigned;
+      default = null;
+    };
+
+    monthly = mkOption {
+      description = lib.mdDoc "Number of monthly snapshots.";
+      type = with types; nullOr ints.unsigned;
+      default = null;
+    };
+
+    yearly = mkOption {
+      description = lib.mdDoc "Number of yearly snapshots.";
+      type = with types; nullOr ints.unsigned;
+      default = null;
+    };
+
+    autoprune = mkOption {
+      description = lib.mdDoc "Whether to automatically prune old snapshots.";
+      type = with types; nullOr bool;
+      default = null;
+    };
+
+    autosnap = mkOption {
+      description = lib.mdDoc "Whether to automatically take snapshots.";
+      type = with types; nullOr bool;
+      default = null;
+    };
+  };
+
+  datasetOptions = rec {
+    use_template = mkOption {
+      description = lib.mdDoc "Names of the templates to use for this dataset.";
+      type = types.listOf (types.str // {
+        check = (types.enum (attrNames cfg.templates)).check;
+        description = "configured template name";
+      });
+      default = [ ];
+    };
+    useTemplate = use_template;
+
+    recursive = mkOption {
+      description = lib.mdDoc ''
+        Whether to recursively snapshot dataset children.
+        You can also set this to `"zfs"` to handle datasets
+        recursively in an atomic way without the possibility to
+        override settings for child datasets.
+      '';
+      type = with types; oneOf [ bool (enum [ "zfs" ]) ];
+      default = false;
+    };
+
+    process_children_only = mkOption {
+      description = lib.mdDoc "Whether to only snapshot child datasets if recursing.";
+      type = types.bool;
+      default = false;
+    };
+    processChildrenOnly = process_children_only;
+  };
+
+  # Extract unique dataset names
+  datasets = unique (attrNames cfg.datasets);
+
+  # Function to build "zfs allow" and "zfs unallow" commands for the
+  # filesystems we've delegated permissions to.
+  buildAllowCommand = zfsAction: permissions: dataset: lib.escapeShellArgs [
+    # Here we explicitly use the booted system to guarantee the stable API needed by ZFS
+    "-+/run/booted-system/sw/bin/zfs"
+    zfsAction
+    "sanoid"
+    (concatStringsSep "," permissions)
+    dataset
+  ];
+
+  configFile =
+    let
+      mkValueString = v:
+        if builtins.isList v then concatStringsSep "," v
+        else generators.mkValueStringDefault { } v;
+
+      mkKeyValue = k: v:
+        if v == null then ""
+        else if k == "processChildrenOnly" then ""
+        else if k == "useTemplate" then ""
+        else generators.mkKeyValueDefault { inherit mkValueString; } "=" k v;
+    in
+    generators.toINI { inherit mkKeyValue; } cfg.settings;
+
+in
+{
+
+  # Interface
+
+  options.services.sanoid = {
+    enable = mkEnableOption (lib.mdDoc "Sanoid ZFS snapshotting service");
+
+    package = lib.mkPackageOptionMD pkgs "sanoid" {};
+
+    interval = mkOption {
+      type = types.str;
+      default = "hourly";
+      example = "daily";
+      description = lib.mdDoc ''
+        Run sanoid at this interval. The default is to run hourly.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    datasets = mkOption {
+      type = types.attrsOf (types.submodule ({ config, options, ... }: {
+        freeformType = datasetSettingsType;
+        options = commonOptions // datasetOptions;
+        config.use_template = modules.mkAliasAndWrapDefsWithPriority id (options.useTemplate or { });
+        config.process_children_only = modules.mkAliasAndWrapDefsWithPriority id (options.processChildrenOnly or { });
+      }));
+      default = { };
+      description = lib.mdDoc "Datasets to snapshot.";
+    };
+
+    templates = mkOption {
+      type = types.attrsOf (types.submodule {
+        freeformType = datasetSettingsType;
+        options = commonOptions;
+      });
+      default = { };
+      description = lib.mdDoc "Templates for datasets.";
+    };
+
+    settings = mkOption {
+      type = types.attrsOf datasetSettingsType;
+      description = lib.mdDoc ''
+        Free-form settings written directly to the config file. See
+        <https://github.com/jimsalterjrs/sanoid/blob/master/sanoid.defaults.conf>
+        for allowed values.
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" "--readonly" "--debug" ];
+      description = lib.mdDoc ''
+        Extra arguments to pass to sanoid. See
+        <https://github.com/jimsalterjrs/sanoid/#sanoid-command-line-options>
+        for allowed options.
+      '';
+    };
+  };
+
+  # Implementation
+
+  config = mkIf cfg.enable {
+    services.sanoid.settings = mkMerge [
+      (mapAttrs' (d: v: nameValuePair ("template_" + d) v) cfg.templates)
+      (mapAttrs (d: v: v) cfg.datasets)
+    ];
+
+    systemd.services.sanoid = {
+      description = "Sanoid snapshot service";
+      serviceConfig = {
+        ExecStartPre = (map (buildAllowCommand "allow" [ "snapshot" "mount" "destroy" ]) datasets);
+        ExecStopPost = (map (buildAllowCommand "unallow" [ "snapshot" "mount" "destroy" ]) datasets);
+        ExecStart = lib.escapeShellArgs ([
+          "${cfg.package}/bin/sanoid"
+          "--cron"
+          "--configdir"
+          (pkgs.writeTextDir "sanoid.conf" configFile)
+        ] ++ cfg.extraArgs);
+        User = "sanoid";
+        Group = "sanoid";
+        DynamicUser = true;
+        RuntimeDirectory = "sanoid";
+        CacheDirectory = "sanoid";
+      };
+      # Prevents missing snapshots during DST changes
+      environment.TZ = "UTC";
+      after = [ "zfs.target" ];
+      startAt = cfg.interval;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ lopsided98 ];
+}
diff --git a/nixpkgs/nixos/modules/services/backup/syncoid.nix b/nixpkgs/nixos/modules/services/backup/syncoid.nix
new file mode 100644
index 000000000000..1a1df38617b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/syncoid.nix
@@ -0,0 +1,424 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.syncoid;
+
+  # Extract local dasaset names (so no datasets containing "@")
+  localDatasetName = d: optionals (d != null) (
+    let m = builtins.match "([^/@]+[^@]*)" d; in
+    optionals (m != null) m
+  );
+
+  # Escape as required by: https://www.freedesktop.org/software/systemd/man/systemd.unit.html
+  escapeUnitName = name:
+    lib.concatMapStrings (s: if lib.isList s then "-" else s)
+      (builtins.split "[^a-zA-Z0-9_.\\-]+" name);
+
+  # Function to build "zfs allow" commands for the filesystems we've delegated
+  # permissions to. It also checks if the target dataset exists before
+  # delegating permissions, if it doesn't exist we delegate it to the parent
+  # dataset (if it exists). This should solve the case of provisoning new
+  # datasets.
+  buildAllowCommand = permissions: dataset: (
+    "-+${pkgs.writeShellScript "zfs-allow-${dataset}" ''
+      # Here we explicitly use the booted system to guarantee the stable API needed by ZFS
+
+      # Run a ZFS list on the dataset to check if it exists
+      if ${lib.escapeShellArgs [
+        "/run/booted-system/sw/bin/zfs"
+        "list"
+        dataset
+      ]} 2> /dev/null; then
+        ${lib.escapeShellArgs [
+          "/run/booted-system/sw/bin/zfs"
+          "allow"
+          cfg.user
+          (concatStringsSep "," permissions)
+          dataset
+        ]}
+      ${lib.optionalString ((builtins.dirOf dataset) != ".") ''
+        else
+          ${lib.escapeShellArgs [
+            "/run/booted-system/sw/bin/zfs"
+            "allow"
+            cfg.user
+            (concatStringsSep "," permissions)
+            # Remove the last part of the path
+            (builtins.dirOf dataset)
+          ]}
+      ''}
+      fi
+    ''}"
+  );
+
+  # Function to build "zfs unallow" commands for the filesystems we've
+  # delegated permissions to. Here we unallow both the target but also
+  # on the parent dataset because at this stage we have no way of
+  # knowing if the allow command did execute on the parent dataset or
+  # not in the pre-hook. We can't run the same if in the post hook
+  # since the dataset should have been created at this point.
+  buildUnallowCommand = permissions: dataset: (
+    "-+${pkgs.writeShellScript "zfs-unallow-${dataset}" ''
+      # Here we explicitly use the booted system to guarantee the stable API needed by ZFS
+      ${lib.escapeShellArgs [
+        "/run/booted-system/sw/bin/zfs"
+        "unallow"
+        cfg.user
+        (concatStringsSep "," permissions)
+        dataset
+      ]}
+      ${lib.optionalString ((builtins.dirOf dataset) != ".") (lib.escapeShellArgs [
+        "/run/booted-system/sw/bin/zfs"
+        "unallow"
+        cfg.user
+        (concatStringsSep "," permissions)
+        # Remove the last part of the path
+        (builtins.dirOf dataset)
+      ])}
+    ''}"
+  );
+in
+{
+
+  # Interface
+
+  options.services.syncoid = {
+    enable = mkEnableOption (lib.mdDoc "Syncoid ZFS synchronization service");
+
+    package = lib.mkPackageOptionMD pkgs "sanoid" {};
+
+    interval = mkOption {
+      type = types.str;
+      default = "hourly";
+      example = "*-*-* *:15:00";
+      description = lib.mdDoc ''
+        Run syncoid at this interval. The default is to run hourly.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "syncoid";
+      example = "backup";
+      description = lib.mdDoc ''
+        The user for the service. ZFS privilege delegation will be
+        automatically configured for any local pools used by syncoid if this
+        option is set to a user other than root. The user will be given the
+        "hold" and "send" privileges on any pool that has datasets being sent
+        and the "create", "mount", "receive", and "rollback" privileges on
+        any pool that has datasets being received.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "syncoid";
+      example = "backup";
+      description = lib.mdDoc "The group for the service.";
+    };
+
+    sshKey = mkOption {
+      type = types.nullOr types.path;
+      # Prevent key from being copied to store
+      apply = mapNullable toString;
+      default = null;
+      description = lib.mdDoc ''
+        SSH private key file to use to login to the remote system. Can be
+        overridden in individual commands.
+      '';
+    };
+
+    localSourceAllow = mkOption {
+      type = types.listOf types.str;
+      # Permissions snapshot and destroy are in case --no-sync-snap is not used
+      default = [ "bookmark" "hold" "send" "snapshot" "destroy" ];
+      description = lib.mdDoc ''
+        Permissions granted for the {option}`services.syncoid.user` user
+        for local source datasets. See
+        <https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
+        for available permissions.
+      '';
+    };
+
+    localTargetAllow = mkOption {
+      type = types.listOf types.str;
+      default = [ "change-key" "compression" "create" "mount" "mountpoint" "receive" "rollback" ];
+      example = [ "create" "mount" "receive" "rollback" ];
+      description = lib.mdDoc ''
+        Permissions granted for the {option}`services.syncoid.user` user
+        for local target datasets. See
+        <https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
+        for available permissions.
+        Make sure to include the `change-key` permission if you send raw encrypted datasets,
+        the `compression` permission if you send raw compressed datasets, and so on.
+        For remote target datasets you'll have to set your remote user permissions by yourself.
+      '';
+    };
+
+    commonArgs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--no-sync-snap" ];
+      description = lib.mdDoc ''
+        Arguments to add to every syncoid command, unless disabled for that
+        command. See
+        <https://github.com/jimsalterjrs/sanoid/#syncoid-command-line-options>
+        for available options.
+      '';
+    };
+
+    service = mkOption {
+      type = types.attrs;
+      default = { };
+      description = lib.mdDoc ''
+        Systemd configuration common to all syncoid services.
+      '';
+    };
+
+    commands = mkOption {
+      type = types.attrsOf (types.submodule ({ name, ... }: {
+        options = {
+          source = mkOption {
+            type = types.str;
+            example = "pool/dataset";
+            description = lib.mdDoc ''
+              Source ZFS dataset. Can be either local or remote. Defaults to
+              the attribute name.
+            '';
+          };
+
+          target = mkOption {
+            type = types.str;
+            example = "user@server:pool/dataset";
+            description = lib.mdDoc ''
+              Target ZFS dataset. Can be either local
+              («pool/dataset») or remote
+              («user@server:pool/dataset»).
+            '';
+          };
+
+          recursive = mkEnableOption (lib.mdDoc ''the transfer of child datasets'');
+
+          sshKey = mkOption {
+            type = types.nullOr types.path;
+            # Prevent key from being copied to store
+            apply = mapNullable toString;
+            description = lib.mdDoc ''
+              SSH private key file to use to login to the remote system.
+              Defaults to {option}`services.syncoid.sshKey` option.
+            '';
+          };
+
+          localSourceAllow = mkOption {
+            type = types.listOf types.str;
+            description = lib.mdDoc ''
+              Permissions granted for the {option}`services.syncoid.user` user
+              for local source datasets. See
+              <https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
+              for available permissions.
+              Defaults to {option}`services.syncoid.localSourceAllow` option.
+            '';
+          };
+
+          localTargetAllow = mkOption {
+            type = types.listOf types.str;
+            description = lib.mdDoc ''
+              Permissions granted for the {option}`services.syncoid.user` user
+              for local target datasets. See
+              <https://openzfs.github.io/openzfs-docs/man/8/zfs-allow.8.html>
+              for available permissions.
+              Make sure to include the `change-key` permission if you send raw encrypted datasets,
+              the `compression` permission if you send raw compressed datasets, and so on.
+              For remote target datasets you'll have to set your remote user permissions by yourself.
+            '';
+          };
+
+          sendOptions = mkOption {
+            type = types.separatedString " ";
+            default = "";
+            example = "Lc e";
+            description = lib.mdDoc ''
+              Advanced options to pass to zfs send. Options are specified
+              without their leading dashes and separated by spaces.
+            '';
+          };
+
+          recvOptions = mkOption {
+            type = types.separatedString " ";
+            default = "";
+            example = "ux recordsize o compression=lz4";
+            description = lib.mdDoc ''
+              Advanced options to pass to zfs recv. Options are specified
+              without their leading dashes and separated by spaces.
+            '';
+          };
+
+          useCommonArgs = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Whether to add the configured common arguments to this command.
+            '';
+          };
+
+          service = mkOption {
+            type = types.attrs;
+            default = { };
+            description = lib.mdDoc ''
+              Systemd configuration specific to this syncoid service.
+            '';
+          };
+
+          extraArgs = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "--sshport 2222" ];
+            description = lib.mdDoc "Extra syncoid arguments for this command.";
+          };
+        };
+        config = {
+          source = mkDefault name;
+          sshKey = mkDefault cfg.sshKey;
+          localSourceAllow = mkDefault cfg.localSourceAllow;
+          localTargetAllow = mkDefault cfg.localTargetAllow;
+        };
+      }));
+      default = { };
+      example = literalExpression ''
+        {
+          "pool/test".target = "root@target:pool/test";
+        }
+      '';
+      description = lib.mdDoc "Syncoid commands to run.";
+    };
+  };
+
+  # Implementation
+
+  config = mkIf cfg.enable {
+    users = {
+      users = mkIf (cfg.user == "syncoid") {
+        syncoid = {
+          group = cfg.group;
+          isSystemUser = true;
+          # For syncoid to be able to create /var/lib/syncoid/.ssh/
+          # and to use custom ssh_config or known_hosts.
+          home = "/var/lib/syncoid";
+          createHome = false;
+        };
+      };
+      groups = mkIf (cfg.group == "syncoid") {
+        syncoid = { };
+      };
+    };
+
+    systemd.services = mapAttrs'
+      (name: c:
+        nameValuePair "syncoid-${escapeUnitName name}" (mkMerge [
+          {
+            description = "Syncoid ZFS synchronization from ${c.source} to ${c.target}";
+            after = [ "zfs.target" ];
+            startAt = cfg.interval;
+            # syncoid may need zpool to get feature@extensible_dataset
+            path = [ "/run/booted-system/sw/bin/" ];
+            serviceConfig = {
+              ExecStartPre =
+                (map (buildAllowCommand c.localSourceAllow) (localDatasetName c.source)) ++
+                (map (buildAllowCommand c.localTargetAllow) (localDatasetName c.target));
+              ExecStopPost =
+                (map (buildUnallowCommand c.localSourceAllow) (localDatasetName c.source)) ++
+                (map (buildUnallowCommand c.localTargetAllow) (localDatasetName c.target));
+              ExecStart = lib.escapeShellArgs ([ "${cfg.package}/bin/syncoid" ]
+                ++ optionals c.useCommonArgs cfg.commonArgs
+                ++ optional c.recursive "-r"
+                ++ optionals (c.sshKey != null) [ "--sshkey" c.sshKey ]
+                ++ c.extraArgs
+                ++ [
+                "--sendoptions"
+                c.sendOptions
+                "--recvoptions"
+                c.recvOptions
+                "--no-privilege-elevation"
+                c.source
+                c.target
+              ]);
+              User = cfg.user;
+              Group = cfg.group;
+              StateDirectory = [ "syncoid" ];
+              StateDirectoryMode = "700";
+              # Prevent SSH control sockets of different syncoid services from interfering
+              PrivateTmp = true;
+              # Permissive access to /proc because syncoid
+              # calls ps(1) to detect ongoing `zfs receive`.
+              ProcSubset = "all";
+              ProtectProc = "default";
+
+              # The following options are only for optimizing:
+              # systemd-analyze security | grep syncoid-'*'
+              AmbientCapabilities = "";
+              CapabilityBoundingSet = "";
+              DeviceAllow = [ "/dev/zfs" ];
+              LockPersonality = true;
+              MemoryDenyWriteExecute = true;
+              NoNewPrivileges = true;
+              PrivateDevices = true;
+              PrivateMounts = true;
+              PrivateNetwork = mkDefault false;
+              PrivateUsers = false; # Enabling this breaks on zfs-2.2.0
+              ProtectClock = true;
+              ProtectControlGroups = true;
+              ProtectHome = true;
+              ProtectHostname = true;
+              ProtectKernelLogs = true;
+              ProtectKernelModules = true;
+              ProtectKernelTunables = true;
+              ProtectSystem = "strict";
+              RemoveIPC = true;
+              RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+              RestrictNamespaces = true;
+              RestrictRealtime = true;
+              RestrictSUIDSGID = true;
+              RootDirectory = "/run/syncoid/${escapeUnitName name}";
+              RootDirectoryStartOnly = true;
+              BindPaths = [ "/dev/zfs" ];
+              BindReadOnlyPaths = [ builtins.storeDir "/etc" "/run" "/bin/sh" ];
+              # Avoid useless mounting of RootDirectory= in the own RootDirectory= of ExecStart='s mount namespace.
+              InaccessiblePaths = [ "-+/run/syncoid/${escapeUnitName name}" ];
+              MountAPIVFS = true;
+              # Create RootDirectory= in the host's mount namespace.
+              RuntimeDirectory = [ "syncoid/${escapeUnitName name}" ];
+              RuntimeDirectoryMode = "700";
+              SystemCallFilter = [
+                "@system-service"
+                # Groups in @system-service which do not contain a syscall listed by:
+                # perf stat -x, 2>perf.log -e 'syscalls:sys_enter_*' syncoid …
+                # awk >perf.syscalls -F "," '$1 > 0 {sub("syscalls:sys_enter_","",$3); print $3}' perf.log
+                # systemd-analyze syscall-filter | grep -v -e '#' | sed -e ':loop; /^[^ ]/N; s/\n //; t loop' | grep $(printf ' -e \\<%s\\>' $(cat perf.syscalls)) | cut -f 1 -d ' '
+                "~@aio"
+                "~@chown"
+                "~@keyring"
+                "~@memlock"
+                "~@privileged"
+                "~@resources"
+                "~@setuid"
+                "~@timer"
+              ];
+              SystemCallArchitectures = "native";
+              # This is for BindPaths= and BindReadOnlyPaths=
+              # to allow traversal of directories they create in RootDirectory=.
+              UMask = "0066";
+            };
+          }
+          cfg.service
+          c.service
+        ]))
+      cfg.commands;
+  };
+
+  meta.maintainers = with maintainers; [ julm lopsided98 ];
+}
diff --git a/nixpkgs/nixos/modules/services/backup/tarsnap.nix b/nixpkgs/nixos/modules/services/backup/tarsnap.nix
new file mode 100644
index 000000000000..9e1db23ca22a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/tarsnap.nix
@@ -0,0 +1,409 @@
+{ config, lib, options, pkgs, utils, ... }:
+
+with lib;
+
+let
+  gcfg = config.services.tarsnap;
+  opt = options.services.tarsnap;
+
+  configFile = name: cfg: ''
+    keyfile ${cfg.keyfile}
+    ${optionalString (cfg.cachedir != null) "cachedir ${cfg.cachedir}"}
+    ${optionalString cfg.nodump "nodump"}
+    ${optionalString cfg.printStats "print-stats"}
+    ${optionalString cfg.printStats "humanize-numbers"}
+    ${optionalString (cfg.checkpointBytes != null) ("checkpoint-bytes "+cfg.checkpointBytes)}
+    ${optionalString cfg.aggressiveNetworking "aggressive-networking"}
+    ${concatStringsSep "\n" (map (v: "exclude ${v}") cfg.excludes)}
+    ${concatStringsSep "\n" (map (v: "include ${v}") cfg.includes)}
+    ${optionalString cfg.lowmem "lowmem"}
+    ${optionalString cfg.verylowmem "verylowmem"}
+    ${optionalString (cfg.maxbw != null) "maxbw ${toString cfg.maxbw}"}
+    ${optionalString (cfg.maxbwRateUp != null) "maxbw-rate-up ${toString cfg.maxbwRateUp}"}
+    ${optionalString (cfg.maxbwRateDown != null) "maxbw-rate-down ${toString cfg.maxbwRateDown}"}
+  '';
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "tarsnap" "cachedir" ] "Use services.tarsnap.archives.<name>.cachedir")
+  ];
+
+  options = {
+    services.tarsnap = {
+      enable = mkEnableOption (lib.mdDoc "periodic tarsnap backups");
+
+      package = mkPackageOption pkgs "tarsnap" { };
+
+      keyfile = mkOption {
+        type = types.str;
+        default = "/root/tarsnap.key";
+        description = lib.mdDoc ''
+          The keyfile which associates this machine with your tarsnap
+          account.
+          Create the keyfile with {command}`tarsnap-keygen`.
+
+          Note that each individual archive (specified below) may also have its
+          own individual keyfile specified. Tarsnap does not allow multiple
+          concurrent backups with the same cache directory and key (starting a
+          new backup will cause another one to fail). If you have multiple
+          archives specified, you should either spread out your backups to be
+          far apart, or specify a separate key for each archive. By default
+          every archive defaults to using
+          `"/root/tarsnap.key"`.
+
+          It's recommended for backups that you generate a key for every archive
+          using `tarsnap-keygen(1)`, and then generate a
+          write-only tarsnap key using `tarsnap-keymgmt(1)`,
+          and keep your master key(s) for a particular machine off-site.
+
+          The keyfile name should be given as a string and not a path, to
+          avoid the key being copied into the Nix store.
+        '';
+      };
+
+      archives = mkOption {
+        type = types.attrsOf (types.submodule ({ config, options, ... }:
+          {
+            options = {
+              keyfile = mkOption {
+                type = types.str;
+                default = gcfg.keyfile;
+                defaultText = literalExpression "config.${opt.keyfile}";
+                description = lib.mdDoc ''
+                  Set a specific keyfile for this archive. This defaults to
+                  `"/root/tarsnap.key"` if left unspecified.
+
+                  Use this option if you want to run multiple backups
+                  concurrently - each archive must have a unique key. You can
+                  generate a write-only key derived from your master key (which
+                  is recommended) using `tarsnap-keymgmt(1)`.
+
+                  Note: every archive must have an individual master key. You
+                  must generate multiple keys with
+                  `tarsnap-keygen(1)`, and then generate write
+                  only keys from those.
+
+                  The keyfile name should be given as a string and not a path, to
+                  avoid the key being copied into the Nix store.
+                '';
+              };
+
+              cachedir = mkOption {
+                type = types.nullOr types.path;
+                default = "/var/cache/tarsnap/${utils.escapeSystemdPath config.keyfile}";
+                defaultText = literalExpression ''
+                  "/var/cache/tarsnap/''${utils.escapeSystemdPath config.${options.keyfile}}"
+                '';
+                description = lib.mdDoc ''
+                  The cache allows tarsnap to identify previously stored data
+                  blocks, reducing archival time and bandwidth usage.
+
+                  Should the cache become desynchronized or corrupted, tarsnap
+                  will refuse to run until you manually rebuild the cache with
+                  {command}`tarsnap --fsck`.
+
+                  Set to `null` to disable caching.
+                '';
+              };
+
+              nodump = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Exclude files with the `nodump` flag.
+                '';
+              };
+
+              printStats = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Print global archive statistics upon completion.
+                  The output is available via
+                  {command}`systemctl status tarsnap-archive-name`.
+                '';
+              };
+
+              checkpointBytes = mkOption {
+                type = types.nullOr types.str;
+                default = "1GB";
+                description = lib.mdDoc ''
+                  Create a checkpoint every `checkpointBytes`
+                  of uploaded data (optionally specified using an SI prefix).
+
+                  1GB is the minimum value. A higher value is recommended,
+                  as checkpointing is expensive.
+
+                  Set to `null` to disable checkpointing.
+                '';
+              };
+
+              period = mkOption {
+                type = types.str;
+                default = "01:15";
+                example = "hourly";
+                description = lib.mdDoc ''
+                  Create archive at this interval.
+
+                  The format is described in
+                  {manpage}`systemd.time(7)`.
+                '';
+              };
+
+              aggressiveNetworking = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Upload data over multiple TCP connections, potentially
+                  increasing tarsnap's bandwidth utilisation at the cost
+                  of slowing down all other network traffic. Not
+                  recommended unless TCP congestion is the dominant
+                  limiting factor.
+                '';
+              };
+
+              directories = mkOption {
+                type = types.listOf types.path;
+                default = [];
+                description = lib.mdDoc "List of filesystem paths to archive.";
+              };
+
+              excludes = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                description = lib.mdDoc ''
+                  Exclude files and directories matching these patterns.
+                '';
+              };
+
+              includes = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                description = lib.mdDoc ''
+                  Include only files and directories matching these
+                  patterns (the empty list includes everything).
+
+                  Exclusions have precedence over inclusions.
+                '';
+              };
+
+              lowmem = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Reduce memory consumption by not caching small files.
+                  Possibly beneficial if the average file size is smaller
+                  than 1 MB and the number of files is lower than the
+                  total amount of RAM in KB.
+                '';
+              };
+
+              verylowmem = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Reduce memory consumption by a factor of 2 beyond what
+                  `lowmem` does, at the cost of significantly
+                  slowing down the archiving process.
+                '';
+              };
+
+              maxbw = mkOption {
+                type = types.nullOr types.int;
+                default = null;
+                description = lib.mdDoc ''
+                  Abort archival if upstream bandwidth usage in bytes
+                  exceeds this threshold.
+                '';
+              };
+
+              maxbwRateUp = mkOption {
+                type = types.nullOr types.int;
+                default = null;
+                example = literalExpression "25 * 1000";
+                description = lib.mdDoc ''
+                  Upload bandwidth rate limit in bytes.
+                '';
+              };
+
+              maxbwRateDown = mkOption {
+                type = types.nullOr types.int;
+                default = null;
+                example = literalExpression "50 * 1000";
+                description = lib.mdDoc ''
+                  Download bandwidth rate limit in bytes.
+                '';
+              };
+
+              verbose = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to produce verbose logging output.
+                '';
+              };
+              explicitSymlinks = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to follow symlinks specified as archives.
+                '';
+              };
+              followSymlinks = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to follow all symlinks in archive trees.
+                '';
+              };
+            };
+          }
+        ));
+
+        default = {};
+
+        example = literalExpression ''
+          {
+            nixos =
+              { directories = [ "/home" "/root/ssl" ];
+              };
+
+            gamedata =
+              { directories = [ "/var/lib/minecraft" ];
+                period      = "*:30";
+              };
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Tarsnap archive configurations. Each attribute names an archive
+          to be created at a given time interval, according to the options
+          associated with it. When uploading to the tarsnap server,
+          archive names are suffixed by a 1 second resolution timestamp,
+          with the format `%Y%m%d%H%M%S`.
+
+          For each member of the set is created a timer which triggers the
+          instanced `tarsnap-archive-name` service unit. You may use
+          {command}`systemctl start tarsnap-archive-name` to
+          manually trigger creation of `archive-name` at
+          any time.
+        '';
+      };
+    };
+  };
+
+  config = mkIf gcfg.enable {
+    assertions =
+      (mapAttrsToList (name: cfg:
+        { assertion = cfg.directories != [];
+          message = "Must specify paths for tarsnap to back up";
+        }) gcfg.archives) ++
+      (mapAttrsToList (name: cfg:
+        { assertion = !(cfg.lowmem && cfg.verylowmem);
+          message = "You cannot set both lowmem and verylowmem";
+        }) gcfg.archives);
+
+    systemd.services =
+      (mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}" {
+        description = "Tarsnap archive '${name}'";
+        requires    = [ "network-online.target" ];
+        after       = [ "network-online.target" ];
+
+        path = with pkgs; [ iputils gcfg.package util-linux ];
+
+        # In order for the persistent tarsnap timer to work reliably, we have to
+        # make sure that the tarsnap server is reachable after systemd starts up
+        # the service - therefore we sleep in a loop until we can ping the
+        # endpoint.
+        preStart = ''
+          while ! ping -4 -q -c 1 v1-0-0-server.tarsnap.com &> /dev/null; do sleep 3; done
+        '';
+
+        script = let
+          tarsnap = ''${lib.getExe gcfg.package} --configfile "/etc/tarsnap/${name}.conf"'';
+          run = ''${tarsnap} -c -f "${name}-$(date +"%Y%m%d%H%M%S")" \
+                        ${optionalString cfg.verbose "-v"} \
+                        ${optionalString cfg.explicitSymlinks "-H"} \
+                        ${optionalString cfg.followSymlinks "-L"} \
+                        ${concatStringsSep " " cfg.directories}'';
+          cachedir = escapeShellArg cfg.cachedir;
+          in if (cfg.cachedir != null) then ''
+            mkdir -p ${cachedir}
+            chmod 0700 ${cachedir}
+
+            ( flock 9
+              if [ ! -e ${cachedir}/firstrun ]; then
+                ( flock 10
+                  flock -u 9
+                  ${tarsnap} --fsck
+                  flock 9
+                ) 10>${cachedir}/firstrun
+              fi
+            ) 9>${cachedir}/lockf
+
+             exec flock ${cachedir}/firstrun ${run}
+          '' else "exec ${run}";
+
+        serviceConfig = {
+          Type = "oneshot";
+          IOSchedulingClass = "idle";
+          NoNewPrivileges = "true";
+          CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
+          PermissionsStartOnly = "true";
+        };
+      }) gcfg.archives) //
+
+      (mapAttrs' (name: cfg: nameValuePair "tarsnap-restore-${name}"{
+        description = "Tarsnap restore '${name}'";
+        requires    = [ "network-online.target" ];
+
+        path = with pkgs; [ iputils gcfg.package util-linux ];
+
+        script = let
+          tarsnap = ''${lib.getExe gcfg.package} --configfile "/etc/tarsnap/${name}.conf"'';
+          lastArchive = "$(${tarsnap} --list-archives | sort | tail -1)";
+          run = ''${tarsnap} -x -f "${lastArchive}" ${optionalString cfg.verbose "-v"}'';
+          cachedir = escapeShellArg cfg.cachedir;
+
+        in if (cfg.cachedir != null) then ''
+          mkdir -p ${cachedir}
+          chmod 0700 ${cachedir}
+
+          ( flock 9
+            if [ ! -e ${cachedir}/firstrun ]; then
+              ( flock 10
+                flock -u 9
+                ${tarsnap} --fsck
+                flock 9
+              ) 10>${cachedir}/firstrun
+            fi
+          ) 9>${cachedir}/lockf
+
+           exec flock ${cachedir}/firstrun ${run}
+        '' else "exec ${run}";
+
+        serviceConfig = {
+          Type = "oneshot";
+          IOSchedulingClass = "idle";
+          NoNewPrivileges = "true";
+          CapabilityBoundingSet = [ "CAP_DAC_READ_SEARCH" ];
+          PermissionsStartOnly = "true";
+        };
+      }) gcfg.archives);
+
+    # Note: the timer must be Persistent=true, so that systemd will start it even
+    # if e.g. your laptop was asleep while the latest interval occurred.
+    systemd.timers = mapAttrs' (name: cfg: nameValuePair "tarsnap-${name}"
+      { timerConfig.OnCalendar = cfg.period;
+        timerConfig.Persistent = "true";
+        wantedBy = [ "timers.target" ];
+      }) gcfg.archives;
+
+    environment.etc =
+      mapAttrs' (name: cfg: nameValuePair "tarsnap/${name}.conf"
+        { text = configFile name cfg;
+        }) gcfg.archives;
+
+    environment.systemPackages = [ gcfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/tsm.nix b/nixpkgs/nixos/modules/services/backup/tsm.nix
new file mode 100644
index 000000000000..c4de0b16d47d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/tsm.nix
@@ -0,0 +1,125 @@
+{ config, lib, ... }:
+
+let
+
+  inherit (lib.attrsets) hasAttr;
+  inherit (lib.modules) mkDefault mkIf;
+  inherit (lib.options) mkEnableOption mkOption;
+  inherit (lib.types) nonEmptyStr nullOr;
+
+  options.services.tsmBackup = {
+    enable = mkEnableOption (lib.mdDoc ''
+      automatic backups with the
+      IBM Spectrum Protect (Tivoli Storage Manager, TSM) client.
+      This also enables
+      {option}`programs.tsmClient.enable`
+    '');
+    command = mkOption {
+      type = nonEmptyStr;
+      default = "backup";
+      example = "incr";
+      description = lib.mdDoc ''
+        The actual command passed to the
+        `dsmc` executable to start the backup.
+      '';
+    };
+    servername = mkOption {
+      type = nonEmptyStr;
+      example = "mainTsmServer";
+      description = lib.mdDoc ''
+        Create a systemd system service
+        `tsm-backup.service` that starts
+        a backup based on the given servername's stanza.
+        Note that this server's
+        {option}`passwdDir` will default to
+        {file}`/var/lib/tsm-backup/password`
+        (but may be overridden);
+        also, the service will use
+        {file}`/var/lib/tsm-backup` as
+        `HOME` when calling
+        `dsmc`.
+      '';
+    };
+    autoTime = mkOption {
+      type = nullOr nonEmptyStr;
+      default = null;
+      example = "12:00";
+      description = lib.mdDoc ''
+        The backup service will be invoked
+        automatically at the given date/time,
+        which must be in the format described in
+        {manpage}`systemd.time(5)`.
+        The default `null`
+        disables automatic backups.
+      '';
+    };
+  };
+
+  cfg = config.services.tsmBackup;
+  cfgPrg = config.programs.tsmClient;
+
+  assertions = [
+    {
+      assertion = hasAttr cfg.servername cfgPrg.servers;
+      message = "TSM service servername not found in list of servers";
+    }
+    {
+      assertion = cfgPrg.servers.${cfg.servername}.genPasswd;
+      message = "TSM service requires automatic password generation";
+    }
+  ];
+
+in
+
+{
+
+  inherit options;
+
+  config = mkIf cfg.enable {
+    inherit assertions;
+    programs.tsmClient.enable = true;
+    programs.tsmClient.servers.${cfg.servername}.passwdDir =
+      mkDefault "/var/lib/tsm-backup/password";
+    systemd.services.tsm-backup = {
+      description = "IBM Spectrum Protect (Tivoli Storage Manager) Backup";
+      # DSM_LOG needs a trailing slash to have it treated as a directory.
+      # `/var/log` would be littered with TSM log files otherwise.
+      environment.DSM_LOG = "/var/log/tsm-backup/";
+      # TSM needs a HOME dir to store certificates.
+      environment.HOME = "/var/lib/tsm-backup";
+      serviceConfig = {
+        # for exit status description see
+        # https://www.ibm.com/docs/en/spectrum-protect/8.1.13?topic=clients-client-return-codes
+        SuccessExitStatus = "4 8";
+        # The `-se` option must come after the command.
+        # The `-optfile` option suppresses a `dsm.opt`-not-found warning.
+        ExecStart =
+          "${cfgPrg.wrappedPackage}/bin/dsmc ${cfg.command} -se='${cfg.servername}' -optfile=/dev/null";
+        LogsDirectory = "tsm-backup";
+        StateDirectory = "tsm-backup";
+        StateDirectoryMode = "0750";
+        # systemd sandboxing
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        #PrivateTmp = true;  # would break backup of {/var,}/tmp
+        #PrivateUsers = true;  # would block backup of /home/*
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = "read-only";
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "noaccess";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictSUIDSGID = true;
+      };
+      startAt = mkIf (cfg.autoTime!=null) cfg.autoTime;
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/backup/zfs-replication.nix b/nixpkgs/nixos/modules/services/backup/zfs-replication.nix
new file mode 100644
index 000000000000..8e7059e5b59d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/zfs-replication.nix
@@ -0,0 +1,90 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.zfs.autoReplication;
+  recursive = optionalString cfg.recursive " --recursive";
+  followDelete = optionalString cfg.followDelete " --follow-delete";
+in {
+  options = {
+    services.zfs.autoReplication = {
+      enable = mkEnableOption (lib.mdDoc "ZFS snapshot replication");
+
+      followDelete = mkOption {
+        description = lib.mdDoc "Remove remote snapshots that don't have a local correspondent.";
+        default = true;
+        type = types.bool;
+      };
+
+      host = mkOption {
+        description = lib.mdDoc "Remote host where snapshots should be sent. `lz4` is expected to be installed on this host.";
+        example = "example.com";
+        type = types.str;
+      };
+
+      identityFilePath = mkOption {
+        description = lib.mdDoc "Path to SSH key used to login to host.";
+        example = "/home/username/.ssh/id_rsa";
+        type = types.path;
+      };
+
+      localFilesystem = mkOption {
+        description = lib.mdDoc "Local ZFS filesystem from which snapshots should be sent.  Defaults to the attribute name.";
+        example = "pool/file/path";
+        type = types.str;
+      };
+
+      remoteFilesystem = mkOption {
+        description = lib.mdDoc "Remote ZFS filesystem where snapshots should be sent.";
+        example = "pool/file/path";
+        type = types.str;
+      };
+
+      recursive = mkOption {
+        description = lib.mdDoc "Recursively discover snapshots to send.";
+        default = true;
+        type = types.bool;
+      };
+
+      username = mkOption {
+        description = lib.mdDoc "Username used by SSH to login to remote host.";
+        example = "username";
+        type = types.str;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [
+      pkgs.lz4
+    ];
+
+    systemd.services.zfs-replication = {
+      after = [
+        "zfs-snapshot-daily.service"
+        "zfs-snapshot-frequent.service"
+        "zfs-snapshot-hourly.service"
+        "zfs-snapshot-monthly.service"
+        "zfs-snapshot-weekly.service"
+      ];
+      description = "ZFS Snapshot Replication";
+      documentation = [
+        "https://github.com/alunduil/zfs-replicate"
+      ];
+      restartIfChanged = false;
+      serviceConfig.ExecStart = "${pkgs.zfs-replicate}/bin/zfs-replicate${recursive} -l ${escapeShellArg cfg.username} -i ${escapeShellArg cfg.identityFilePath}${followDelete} ${escapeShellArg cfg.host} ${escapeShellArg cfg.remoteFilesystem} ${escapeShellArg cfg.localFilesystem}";
+      wantedBy = [
+        "zfs-snapshot-daily.service"
+        "zfs-snapshot-frequent.service"
+        "zfs-snapshot-hourly.service"
+        "zfs-snapshot-monthly.service"
+        "zfs-snapshot-weekly.service"
+      ];
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ alunduil ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/backup/znapzend.nix b/nixpkgs/nixos/modules/services/backup/znapzend.nix
new file mode 100644
index 000000000000..2ebe8ad2f69a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/znapzend.nix
@@ -0,0 +1,469 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+with types;
+
+let
+
+  planDescription = ''
+      The znapzend backup plan to use for the source.
+
+      The plan specifies how often to backup and for how long to keep the
+      backups. It consists of a series of retention periods to interval
+      associations:
+
+      ```
+        retA=>intA,retB=>intB,...
+      ```
+
+      Both intervals and retention periods are expressed in standard units
+      of time or multiples of them. You can use both the full name or a
+      shortcut according to the following listing:
+
+      ```
+        second|sec|s, minute|min, hour|h, day|d, week|w, month|mon|m, year|y
+      ```
+
+      See {manpage}`znapzendzetup(1)` for more info.
+  '';
+  planExample = "1h=>10min,1d=>1h,1w=>1d,1m=>1w,1y=>1m";
+
+  # A type for a string of the form number{b|k|M|G}
+  mbufferSizeType = str // {
+    check = x: str.check x && builtins.isList (builtins.match "^[0-9]+[bkMG]$" x);
+    description = "string of the form number{b|k|M|G}";
+  };
+
+  enabledFeatures = concatLists (mapAttrsToList (name: enabled: optional enabled name) cfg.features);
+
+  # Type for a string that must contain certain other strings (the list parameter).
+  # Note that these would need regex escaping.
+  stringContainingStrings = list: let
+    matching = s: map (str: builtins.match ".*${str}.*" s) list;
+  in str // {
+    check = x: str.check x && all isList (matching x);
+    description = "string containing all of the characters ${concatStringsSep ", " list}";
+  };
+
+  timestampType = stringContainingStrings [ "%Y" "%m" "%d" "%H" "%M" "%S" ];
+
+  destType = srcConfig: submodule ({ name, ... }: {
+    options = {
+
+      label = mkOption {
+        type = str;
+        description = lib.mdDoc "Label for this destination. Defaults to the attribute name.";
+      };
+
+      plan = mkOption {
+        type = str;
+        description = lib.mdDoc planDescription;
+        example = planExample;
+      };
+
+      dataset = mkOption {
+        type = str;
+        description = lib.mdDoc "Dataset name to send snapshots to.";
+        example = "tank/main";
+      };
+
+      host = mkOption {
+        type = nullOr str;
+        description = lib.mdDoc ''
+          Host to use for the destination dataset. Can be prefixed with
+          `user@` to specify the ssh user.
+        '';
+        default = null;
+        example = "john@example.com";
+      };
+
+      presend = mkOption {
+        type = nullOr str;
+        description = lib.mdDoc ''
+          Command to run before sending the snapshot to the destination.
+          Intended to run a remote script via {command}`ssh` on the
+          destination, e.g. to bring up a backup disk or server or to put a
+          zpool online/offline. See also {option}`postsend`.
+        '';
+        default = null;
+        example = "ssh root@bserv zpool import -Nf tank";
+      };
+
+      postsend = mkOption {
+        type = nullOr str;
+        description = lib.mdDoc ''
+          Command to run after sending the snapshot to the destination.
+          Intended to run a remote script via {command}`ssh` on the
+          destination, e.g. to bring up a backup disk or server or to put a
+          zpool online/offline. See also {option}`presend`.
+        '';
+        default = null;
+        example = "ssh root@bserv zpool export tank";
+      };
+    };
+
+    config = {
+      label = mkDefault name;
+      plan = mkDefault srcConfig.plan;
+    };
+  });
+
+
+
+  srcType = submodule ({ name, config, ... }: {
+    options = {
+
+      enable = mkOption {
+        type = bool;
+        description = lib.mdDoc "Whether to enable this source.";
+        default = true;
+      };
+
+      recursive = mkOption {
+        type = bool;
+        description = lib.mdDoc "Whether to do recursive snapshots.";
+        default = false;
+      };
+
+      mbuffer = {
+        enable = mkOption {
+          type = bool;
+          description = lib.mdDoc "Whether to use {command}`mbuffer`.";
+          default = false;
+        };
+
+        port = mkOption {
+          type = nullOr ints.u16;
+          description = lib.mdDoc ''
+              Port to use for {command}`mbuffer`.
+
+              If this is null, it will run {command}`mbuffer` through
+              ssh.
+
+              If this is not null, it will run {command}`mbuffer`
+              directly through TCP, which is not encrypted but faster. In that
+              case the given port needs to be open on the destination host.
+          '';
+          default = null;
+        };
+
+        size = mkOption {
+          type = mbufferSizeType;
+          description = lib.mdDoc ''
+            The size for {command}`mbuffer`.
+            Supports the units b, k, M, G.
+          '';
+          default = "1G";
+          example = "128M";
+        };
+      };
+
+      presnap = mkOption {
+        type = nullOr str;
+        description = lib.mdDoc ''
+          Command to run before snapshots are taken on the source dataset,
+          e.g. for database locking/flushing. See also
+          {option}`postsnap`.
+        '';
+        default = null;
+        example = literalExpression ''
+          '''''${pkgs.mariadb}/bin/mysql -e "set autocommit=0;flush tables with read lock;\\! ''${pkgs.coreutils}/bin/sleep 600" &  ''${pkgs.coreutils}/bin/echo $! > /tmp/mariadblock.pid ; sleep 10'''
+        '';
+      };
+
+      postsnap = mkOption {
+        type = nullOr str;
+        description = lib.mdDoc ''
+          Command to run after snapshots are taken on the source dataset,
+          e.g. for database unlocking. See also {option}`presnap`.
+        '';
+        default = null;
+        example = literalExpression ''
+          "''${pkgs.coreutils}/bin/kill `''${pkgs.coreutils}/bin/cat /tmp/mariadblock.pid`;''${pkgs.coreutils}/bin/rm /tmp/mariadblock.pid"
+        '';
+      };
+
+      timestampFormat = mkOption {
+        type = timestampType;
+        description = lib.mdDoc ''
+          The timestamp format to use for constructing snapshot names.
+          The syntax is `strftime`-like. The string must
+          consist of the mandatory `%Y %m %d %H %M %S`.
+          Optionally  `- _ . :`  characters as well as any
+          alphanumeric character are allowed. If suffixed by a
+          `Z`, times will be in UTC.
+        '';
+        default = "%Y-%m-%d-%H%M%S";
+        example = "znapzend-%m.%d.%Y-%H%M%SZ";
+      };
+
+      sendDelay = mkOption {
+        type = int;
+        description = lib.mdDoc ''
+          Specify delay (in seconds) before sending snaps to the destination.
+          May be useful if you want to control sending time.
+        '';
+        default = 0;
+        example = 60;
+      };
+
+      plan = mkOption {
+        type = str;
+        description = lib.mdDoc planDescription;
+        example = planExample;
+      };
+
+      dataset = mkOption {
+        type = str;
+        description = lib.mdDoc "The dataset to use for this source.";
+        example = "tank/home";
+      };
+
+      destinations = mkOption {
+        type = attrsOf (destType config);
+        description = lib.mdDoc "Additional destinations.";
+        default = {};
+        example = literalExpression ''
+          {
+            local = {
+              dataset = "btank/backup";
+              presend = "zpool import -N btank";
+              postsend = "zpool export btank";
+            };
+            remote = {
+              host = "john@example.com";
+              dataset = "tank/john";
+            };
+          };
+        '';
+      };
+    };
+
+    config = {
+      dataset = mkDefault name;
+    };
+
+  });
+
+  ### Generating the configuration from here
+
+  cfg = config.services.znapzend;
+
+  onOff = b: if b then "on" else "off";
+  nullOff = b: if b == null then "off" else toString b;
+  stripSlashes = replaceStrings [ "/" ] [ "." ];
+
+  attrsToFile = config: concatStringsSep "\n" (builtins.attrValues (
+    mapAttrs (n: v: "${n}=${v}") config));
+
+  mkDestAttrs = dst: with dst;
+    mapAttrs' (n: v: nameValuePair "dst_${label}${n}" v) ({
+      "" = optionalString (host != null) "${host}:" + dataset;
+      _plan = plan;
+    } // optionalAttrs (presend != null) {
+      _precmd = presend;
+    } // optionalAttrs (postsend != null) {
+      _pstcmd = postsend;
+    });
+
+  mkSrcAttrs = srcCfg: with srcCfg; {
+    enabled = onOff enable;
+    # mbuffer is not referenced by its full path to accommodate non-NixOS systems or differing mbuffer versions between source and target
+    mbuffer = with mbuffer; if enable then "mbuffer"
+        + optionalString (port != null) ":${toString port}" else "off";
+    mbuffer_size = mbuffer.size;
+    post_znap_cmd = nullOff postsnap;
+    pre_znap_cmd = nullOff presnap;
+    recursive = onOff recursive;
+    src = dataset;
+    src_plan = plan;
+    tsformat = timestampFormat;
+    zend_delay = toString sendDelay;
+  } // foldr (a: b: a // b) {} (
+    map mkDestAttrs (builtins.attrValues destinations)
+  );
+
+  files = mapAttrs' (n: srcCfg: let
+    fileText = attrsToFile (mkSrcAttrs srcCfg);
+  in {
+    name = srcCfg.dataset;
+    value = pkgs.writeText (stripSlashes srcCfg.dataset) fileText;
+  }) cfg.zetup;
+
+in
+{
+  options = {
+    services.znapzend = {
+      enable = mkEnableOption (lib.mdDoc "ZnapZend ZFS backup daemon");
+
+      logLevel = mkOption {
+        default = "debug";
+        example = "warning";
+        type = enum ["debug" "info" "warning" "err" "alert"];
+        description = lib.mdDoc ''
+          The log level when logging to file. Any of debug, info, warning, err,
+          alert. Default in daemonized form is debug.
+        '';
+      };
+
+      logTo = mkOption {
+        type = str;
+        default = "syslog::daemon";
+        example = "/var/log/znapzend.log";
+        description = lib.mdDoc ''
+          Where to log to (syslog::\<facility\> or \<filepath\>).
+        '';
+      };
+
+      noDestroy = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc "Does all changes to the filesystem except destroy.";
+      };
+
+      autoCreation = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc "Automatically create the destination dataset if it does not exist.";
+      };
+
+      zetup = mkOption {
+        type = attrsOf srcType;
+        description = lib.mdDoc "Znapzend configuration.";
+        default = {};
+        example = literalExpression ''
+          {
+            "tank/home" = {
+              # Make snapshots of tank/home every hour, keep those for 1 day,
+              # keep every days snapshot for 1 month, etc.
+              plan = "1d=>1h,1m=>1d,1y=>1m";
+              recursive = true;
+              # Send all those snapshots to john@example.com:rtank/john as well
+              destinations.remote = {
+                host = "john@example.com";
+                dataset = "rtank/john";
+              };
+            };
+          };
+        '';
+      };
+
+      pure = mkOption {
+        type = bool;
+        description = lib.mdDoc ''
+          Do not persist any stateful znapzend setups. If this option is
+          enabled, your previously set znapzend setups will be cleared and only
+          the ones defined with this module will be applied.
+        '';
+        default = false;
+      };
+
+      features.oracleMode = mkEnableOption (lib.mdDoc ''
+        destroying snapshots one by one instead of using one long argument list.
+        If source and destination are out of sync for a long time, you may have
+        so many snapshots to destroy that the argument gets is too long and the
+        command fails
+      '');
+      features.recvu = mkEnableOption (lib.mdDoc ''
+        recvu feature which uses `-u` on the receiving end to keep the destination
+        filesystem unmounted
+      '');
+      features.compressed = mkEnableOption (lib.mdDoc ''
+        compressed feature which adds the options `-Lce` to
+        the {command}`zfs send` command. When this is enabled, make
+        sure that both the sending and receiving pool have the same relevant
+        features enabled. Using `-c` will skip unnecessary
+        decompress-compress stages, `-L` is for large block
+        support and -e is for embedded data support. see
+        {manpage}`znapzend(1)`
+        and {manpage}`zfs(8)`
+        for more info
+      '');
+      features.sendRaw = mkEnableOption (lib.mdDoc ''
+        sendRaw feature which adds the options `-w` to the
+        {command}`zfs send` command. For encrypted source datasets this
+        instructs zfs not to decrypt before sending which results in a remote
+        backup that can't be read without the encryption key/passphrase, useful
+        when the remote isn't fully trusted or not physically secure. This
+        option must be used consistently, raw incrementals cannot be based on
+        non-raw snapshots and vice versa
+      '');
+      features.skipIntermediates = mkEnableOption (lib.mdDoc ''
+        the skipIntermediates feature to send a single increment
+        between latest common snapshot and the newly made one. It may skip
+        several source snaps if the destination was offline for some time, and
+        it should skip snapshots not managed by znapzend. Normally for online
+        destinations, the new snapshot is sent as soon as it is created on the
+        source, so there are no automatic increments to skip
+      '');
+      features.lowmemRecurse = mkEnableOption (lib.mdDoc ''
+        use lowmemRecurse on systems where you have too many datasets, so a
+        recursive listing of attributes to find backup plans exhausts the
+        memory available to {command}`znapzend`: instead, go the slower
+        way to first list all impacted dataset names, and then query their
+        configs one by one
+      '');
+      features.zfsGetType = mkEnableOption (lib.mdDoc ''
+        using zfsGetType if your {command}`zfs get` supports a
+        `-t` argument for filtering by dataset type at all AND
+        lists properties for snapshots by default when recursing, so that there
+        is too much data to process while searching for backup plans.
+        If these two conditions apply to your system, the time needed for a
+        `--recursive` search for backup plans can literally
+        differ by hundreds of times (depending on the amount of snapshots in
+        that dataset tree... and a decent backup plan will ensure you have a lot
+        of those), so you would benefit from requesting this feature
+      '');
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.znapzend ];
+
+    systemd.services = {
+      znapzend = {
+        description = "ZnapZend - ZFS Backup System";
+        wantedBy    = [ "zfs.target" ];
+        after       = [ "zfs.target" ];
+
+        path = with pkgs; [ zfs mbuffer openssh ];
+
+        preStart = optionalString cfg.pure ''
+          echo Resetting znapzend zetups
+          ${pkgs.znapzend}/bin/znapzendzetup list \
+            | grep -oP '(?<=\*\*\* backup plan: ).*(?= \*\*\*)' \
+            | xargs -I{} ${pkgs.znapzend}/bin/znapzendzetup delete "{}"
+        '' + concatStringsSep "\n" (mapAttrsToList (dataset: config: ''
+          echo Importing znapzend zetup ${config} for dataset ${dataset}
+          ${pkgs.znapzend}/bin/znapzendzetup import --write ${dataset} ${config} &
+        '') files) + ''
+          wait
+        '';
+
+        serviceConfig = {
+          # znapzendzetup --import apparently tries to connect to the backup
+          # host 3 times with a timeout of 30 seconds, leading to a startup
+          # delay of >90s when the host is down, which is just above the default
+          # service timeout of 90 seconds. Increase the timeout so it doesn't
+          # make the service fail in that case.
+          TimeoutStartSec = 180;
+          # Needs to have write access to ZFS
+          User = "root";
+          ExecStart = let
+            args = concatStringsSep " " [
+              "--logto=${cfg.logTo}"
+              "--loglevel=${cfg.logLevel}"
+              (optionalString cfg.noDestroy "--nodestroy")
+              (optionalString cfg.autoCreation "--autoCreation")
+              (optionalString (enabledFeatures != [])
+                "--features=${concatStringsSep "," enabledFeatures}")
+            ]; in "${pkgs.znapzend}/bin/znapzend ${args}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          Restart = "on-failure";
+        };
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ infinisil SlothOfAnarchy ];
+}
diff --git a/nixpkgs/nixos/modules/services/backup/zrepl.nix b/nixpkgs/nixos/modules/services/backup/zrepl.nix
new file mode 100644
index 000000000000..1d3afa3eda05
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/backup/zrepl.nix
@@ -0,0 +1,63 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.zrepl;
+  format = pkgs.formats.yaml { };
+  configFile = format.generate "zrepl.yml" cfg.settings;
+in
+{
+  meta.maintainers = with maintainers; [ cole-h ];
+
+  options = {
+    services.zrepl = {
+      enable = mkEnableOption (lib.mdDoc "zrepl");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.zrepl;
+        defaultText = literalExpression "pkgs.zrepl";
+        description = lib.mdDoc "Which package to use for zrepl";
+      };
+
+      settings = mkOption {
+        default = { };
+        description = lib.mdDoc ''
+          Configuration for zrepl. See <https://zrepl.github.io/configuration.html>
+          for more information.
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+        };
+      };
+    };
+  };
+
+  ### Implementation ###
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    # zrepl looks for its config in this location by default. This
+    # allows the use of e.g. `zrepl signal wakeup <job>` without having
+    # to specify the storepath of the config.
+    environment.etc."zrepl/zrepl.yml".source = configFile;
+
+    systemd.packages = [ cfg.package ];
+
+    # Note that pkgs.zrepl copies and adapts the upstream systemd unit, and
+    # the fields defined here only override certain fields from that unit.
+    systemd.services.zrepl = {
+      requires = [ "local-fs.target" ];
+      wantedBy = [ "zfs.target" ];
+      after = [ "zfs.target" ];
+
+      path = [ config.boot.zfs.package ];
+      restartTriggers = [ configFile ];
+
+      serviceConfig = {
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/blockchain/ethereum/erigon.nix b/nixpkgs/nixos/modules/services/blockchain/ethereum/erigon.nix
new file mode 100644
index 000000000000..945a373d1274
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/blockchain/ethereum/erigon.nix
@@ -0,0 +1,122 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.erigon;
+
+  settingsFormat = pkgs.formats.toml { };
+  configFile = settingsFormat.generate "config.toml" cfg.settings;
+in {
+
+  options = {
+    services.erigon = {
+      enable = mkEnableOption (lib.mdDoc "Ethereum implementation on the efficiency frontier");
+
+      package = mkPackageOptionMD pkgs "erigon" { };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Additional arguments passed to Erigon";
+        default = [ ];
+      };
+
+      secretJwtPath = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to the secret jwt used for the http api authentication.
+        '';
+        default = "";
+        example = "config.age.secrets.ERIGON_JWT.path";
+      };
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Configuration for Erigon
+          Refer to <https://github.com/ledgerwatch/erigon#usage> for details on supported values.
+        '';
+
+        type = settingsFormat.type;
+
+        example = {
+          datadir = "/var/lib/erigon";
+          chain = "mainnet";
+          http = true;
+          "http.port" = 8545;
+          "http.api" = ["eth" "debug" "net" "trace" "web3" "erigon"];
+          ws = true;
+          port = 30303;
+          "authrpc.port" = 8551;
+          "torrent.port" = 42069;
+          "private.api.addr" = "localhost:9090";
+          "log.console.verbosity" = 3; # info
+        };
+
+        defaultText = literalExpression ''
+          {
+            datadir = "/var/lib/erigon";
+            chain = "mainnet";
+            http = true;
+            "http.port" = 8545;
+            "http.api" = ["eth" "debug" "net" "trace" "web3" "erigon"];
+            ws = true;
+            port = 30303;
+            "authrpc.port" = 8551;
+            "torrent.port" = 42069;
+            "private.api.addr" = "localhost:9090";
+            "log.console.verbosity" = 3; # info
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Default values are the same as in the binary, they are just written here for convenience.
+    services.erigon.settings = {
+      datadir = mkDefault "/var/lib/erigon";
+      chain = mkDefault "mainnet";
+      http = mkDefault true;
+      "http.port" = mkDefault 8545;
+      "http.api" = mkDefault ["eth" "debug" "net" "trace" "web3" "erigon"];
+      ws = mkDefault true;
+      port = mkDefault 30303;
+      "authrpc.port" = mkDefault 8551;
+      "torrent.port" = mkDefault 42069;
+      "private.api.addr" = mkDefault "localhost:9090";
+      "log.console.verbosity" = mkDefault 3; # info
+    };
+
+    systemd.services.erigon = {
+      description = "Erigon ethereum implemenntation";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        LoadCredential = "ERIGON_JWT:${cfg.secretJwtPath}";
+        ExecStart = "${cfg.package}/bin/erigon --config ${configFile} --authrpc.jwtsecret=%d/ERIGON_JWT ${lib.escapeShellArgs cfg.extraArgs}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        StateDirectory = "erigon";
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/blockchain/ethereum/geth.nix b/nixpkgs/nixos/modules/services/blockchain/ethereum/geth.nix
new file mode 100644
index 000000000000..d12516ca2f24
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/blockchain/ethereum/geth.nix
@@ -0,0 +1,213 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  eachGeth = config.services.geth;
+
+  gethOpts = { config, lib, name, ...}: {
+
+    options = {
+
+      enable = lib.mkEnableOption (lib.mdDoc "Go Ethereum Node");
+
+      port = mkOption {
+        type = types.port;
+        default = 30303;
+        description = lib.mdDoc "Port number Go Ethereum will be listening on, both TCP and UDP.";
+      };
+
+      http = {
+        enable = lib.mkEnableOption (lib.mdDoc "Go Ethereum HTTP API");
+        address = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address of Go Ethereum HTTP API.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8545;
+          description = lib.mdDoc "Port number of Go Ethereum HTTP API.";
+        };
+
+        apis = mkOption {
+          type = types.nullOr (types.listOf types.str);
+          default = null;
+          description = lib.mdDoc "APIs to enable over WebSocket";
+          example = ["net" "eth"];
+        };
+      };
+
+      websocket = {
+        enable = lib.mkEnableOption (lib.mdDoc "Go Ethereum WebSocket API");
+        address = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address of Go Ethereum WebSocket API.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8546;
+          description = lib.mdDoc "Port number of Go Ethereum WebSocket API.";
+        };
+
+        apis = mkOption {
+          type = types.nullOr (types.listOf types.str);
+          default = null;
+          description = lib.mdDoc "APIs to enable over WebSocket";
+          example = ["net" "eth"];
+        };
+      };
+
+      authrpc = {
+        enable = lib.mkEnableOption (lib.mdDoc "Go Ethereum Auth RPC API");
+        address = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address of Go Ethereum Auth RPC API.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8551;
+          description = lib.mdDoc "Port number of Go Ethereum Auth RPC API.";
+        };
+
+        vhosts = mkOption {
+          type = types.nullOr (types.listOf types.str);
+          default = ["localhost"];
+          description = lib.mdDoc "List of virtual hostnames from which to accept requests.";
+          example = ["localhost" "geth.example.org"];
+        };
+
+        jwtsecret = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Path to a JWT secret for authenticated RPC endpoint.";
+          example = "/var/run/geth/jwtsecret";
+        };
+      };
+
+      metrics = {
+        enable = lib.mkEnableOption (lib.mdDoc "Go Ethereum prometheus metrics");
+        address = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address of Go Ethereum metrics service.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 6060;
+          description = lib.mdDoc "Port number of Go Ethereum metrics service.";
+        };
+      };
+
+      network = mkOption {
+        type = types.nullOr (types.enum [ "goerli" "rinkeby" "yolov2" "ropsten" ]);
+        default = null;
+        description = lib.mdDoc "The network to connect to. Mainnet (null) is the default ethereum network.";
+      };
+
+      syncmode = mkOption {
+        type = types.enum [ "snap" "fast" "full" "light" ];
+        default = "snap";
+        description = lib.mdDoc "Blockchain sync mode.";
+      };
+
+      gcmode = mkOption {
+        type = types.enum [ "full" "archive" ];
+        default = "full";
+        description = lib.mdDoc "Blockchain garbage collection mode.";
+      };
+
+      maxpeers = mkOption {
+        type = types.int;
+        default = 50;
+        description = lib.mdDoc "Maximum peers to connect to.";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Additional arguments passed to Go Ethereum.";
+        default = [];
+      };
+
+      package = mkOption {
+        default = pkgs.go-ethereum.geth;
+        defaultText = literalExpression "pkgs.go-ethereum.geth";
+        type = types.package;
+        description = lib.mdDoc "Package to use as Go Ethereum node.";
+      };
+    };
+  };
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.geth = mkOption {
+      type = types.attrsOf (types.submodule gethOpts);
+      default = {};
+      description = lib.mdDoc "Specification of one or more geth instances.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (eachGeth != {}) {
+
+    environment.systemPackages = flatten (mapAttrsToList (gethName: cfg: [
+      cfg.package
+    ]) eachGeth);
+
+    systemd.services = mapAttrs' (gethName: cfg: let
+      stateDir = "goethereum/${gethName}/${if (cfg.network == null) then "mainnet" else cfg.network}";
+      dataDir = "/var/lib/${stateDir}";
+    in (
+      nameValuePair "geth-${gethName}" (mkIf cfg.enable {
+      description = "Go Ethereum node (${gethName})";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        StateDirectory = stateDir;
+
+        # Hardening measures
+        PrivateTmp = "true";
+        ProtectSystem = "full";
+        NoNewPrivileges = "true";
+        PrivateDevices = "true";
+        MemoryDenyWriteExecute = "true";
+      };
+
+      script = ''
+        ${cfg.package}/bin/geth \
+          --nousb \
+          --ipcdisable \
+          ${optionalString (cfg.network != null) ''--${cfg.network}''} \
+          --syncmode ${cfg.syncmode} \
+          --gcmode ${cfg.gcmode} \
+          --port ${toString cfg.port} \
+          --maxpeers ${toString cfg.maxpeers} \
+          ${optionalString cfg.http.enable ''--http --http.addr ${cfg.http.address} --http.port ${toString cfg.http.port}''} \
+          ${optionalString (cfg.http.apis != null) ''--http.api ${lib.concatStringsSep "," cfg.http.apis}''} \
+          ${optionalString cfg.websocket.enable ''--ws --ws.addr ${cfg.websocket.address} --ws.port ${toString cfg.websocket.port}''} \
+          ${optionalString (cfg.websocket.apis != null) ''--ws.api ${lib.concatStringsSep "," cfg.websocket.apis}''} \
+          ${optionalString cfg.metrics.enable ''--metrics --metrics.addr ${cfg.metrics.address} --metrics.port ${toString cfg.metrics.port}''} \
+          --authrpc.addr ${cfg.authrpc.address} --authrpc.port ${toString cfg.authrpc.port} --authrpc.vhosts ${lib.concatStringsSep "," cfg.authrpc.vhosts} \
+          ${if (cfg.authrpc.jwtsecret != "") then ''--authrpc.jwtsecret ${cfg.authrpc.jwtsecret}'' else ''--authrpc.jwtsecret ${dataDir}/geth/jwtsecret''} \
+          ${lib.escapeShellArgs cfg.extraArgs} \
+          --datadir ${dataDir}
+      '';
+    }))) eachGeth;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/blockchain/ethereum/lighthouse.nix b/nixpkgs/nixos/modules/services/blockchain/ethereum/lighthouse.nix
new file mode 100644
index 000000000000..863e737d908a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/blockchain/ethereum/lighthouse.nix
@@ -0,0 +1,315 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.lighthouse;
+in {
+
+  options = {
+    services.lighthouse = {
+      beacon = mkOption {
+        description = lib.mdDoc "Beacon node";
+        default = {};
+        type = types.submodule {
+          options = {
+            enable = lib.mkEnableOption (lib.mdDoc "Lightouse Beacon node");
+
+            dataDir = mkOption {
+              type = types.str;
+              default = "/var/lib/lighthouse-beacon";
+              description = lib.mdDoc ''
+                Directory where data will be stored. Each chain will be stored under it's own specific subdirectory.
+              '';
+            };
+
+            address = mkOption {
+              type = types.str;
+              default = "0.0.0.0";
+              description = lib.mdDoc ''
+                Listen address of Beacon node.
+              '';
+            };
+
+            port = mkOption {
+              type = types.port;
+              default = 9000;
+              description = lib.mdDoc ''
+                Port number the Beacon node will be listening on.
+              '';
+            };
+
+            openFirewall = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Open the port in the firewall
+              '';
+            };
+
+            disableDepositContractSync = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Explicitly disables syncing of deposit logs from the execution node.
+                This overrides any previous option that depends on it.
+                Useful if you intend to run a non-validating beacon node.
+              '';
+            };
+
+            execution = {
+              address = mkOption {
+                type = types.str;
+                default = "127.0.0.1";
+                description = lib.mdDoc ''
+                  Listen address for the execution layer.
+                '';
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 8551;
+                description = lib.mdDoc ''
+                  Port number the Beacon node will be listening on for the execution layer.
+                '';
+              };
+
+              jwtPath = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  Path for the jwt secret required to connect to the execution layer.
+                '';
+              };
+            };
+
+            http = {
+              enable = lib.mkEnableOption (lib.mdDoc "Beacon node http api");
+              port = mkOption {
+                type = types.port;
+                default = 5052;
+                description = lib.mdDoc ''
+                  Port number of Beacon node RPC service.
+                '';
+              };
+
+              address = mkOption {
+                type = types.str;
+                default = "127.0.0.1";
+                description = lib.mdDoc ''
+                  Listen address of Beacon node RPC service.
+                '';
+              };
+            };
+
+            metrics = {
+              enable = lib.mkEnableOption (lib.mdDoc "Beacon node prometheus metrics");
+              address = mkOption {
+                type = types.str;
+                default = "127.0.0.1";
+                description = lib.mdDoc ''
+                  Listen address of Beacon node metrics service.
+                '';
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 5054;
+                description = lib.mdDoc ''
+                  Port number of Beacon node metrics service.
+                '';
+              };
+            };
+
+            extraArgs = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Additional arguments passed to the lighthouse beacon command.
+              '';
+              default = "";
+              example = "";
+            };
+          };
+        };
+      };
+
+      validator = mkOption {
+        description = lib.mdDoc "Validator node";
+        default = {};
+        type = types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "Enable Lightouse Validator node.";
+            };
+
+            dataDir = mkOption {
+              type = types.str;
+              default = "/var/lib/lighthouse-validator";
+              description = lib.mdDoc ''
+                Directory where data will be stored. Each chain will be stored under it's own specific subdirectory.
+              '';
+            };
+
+            beaconNodes = mkOption {
+              type = types.listOf types.str;
+              default = ["http://localhost:5052"];
+              description = lib.mdDoc ''
+                Beacon nodes to connect to.
+              '';
+            };
+
+            metrics = {
+              enable = lib.mkEnableOption (lib.mdDoc "Validator node prometheus metrics");
+              address = mkOption {
+                type = types.str;
+                default = "127.0.0.1";
+                description = lib.mdDoc ''
+                  Listen address of Validator node metrics service.
+                '';
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 5056;
+                description = lib.mdDoc ''
+                  Port number of Validator node metrics service.
+                '';
+              };
+            };
+
+            extraArgs = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Additional arguments passed to the lighthouse validator command.
+              '';
+              default = "";
+              example = "";
+            };
+          };
+        };
+      };
+
+      network = mkOption {
+        type = types.enum [ "mainnet" "prater" "goerli" "gnosis" "kiln" "ropsten" "sepolia" ];
+        default = "mainnet";
+        description = lib.mdDoc ''
+          The network to connect to. Mainnet is the default ethereum network.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Additional arguments passed to every lighthouse command.
+        '';
+        default = "";
+        example = "";
+      };
+    };
+  };
+
+  config = mkIf (cfg.beacon.enable || cfg.validator.enable) {
+
+    environment.systemPackages = [ pkgs.lighthouse ] ;
+
+    networking.firewall = mkIf cfg.beacon.enable {
+      allowedTCPPorts = mkIf cfg.beacon.openFirewall [ cfg.beacon.port ];
+      allowedUDPPorts = mkIf cfg.beacon.openFirewall [ cfg.beacon.port ];
+    };
+
+
+    systemd.services.lighthouse-beacon = mkIf cfg.beacon.enable {
+      description = "Lighthouse beacon node (connect to P2P nodes and verify blocks)";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      script = ''
+        # make sure the chain data directory is created on first run
+        mkdir -p ${cfg.beacon.dataDir}/${cfg.network}
+
+        ${pkgs.lighthouse}/bin/lighthouse beacon_node \
+          --disable-upnp \
+          ${lib.optionalString cfg.beacon.disableDepositContractSync "--disable-deposit-contract-sync"} \
+          --port ${toString cfg.beacon.port} \
+          --listen-address ${cfg.beacon.address} \
+          --network ${cfg.network} \
+          --datadir ${cfg.beacon.dataDir}/${cfg.network} \
+          --execution-endpoint http://${cfg.beacon.execution.address}:${toString cfg.beacon.execution.port} \
+          --execution-jwt ''${CREDENTIALS_DIRECTORY}/LIGHTHOUSE_JWT \
+          ${lib.optionalString cfg.beacon.http.enable '' --http --http-address ${cfg.beacon.http.address} --http-port ${toString cfg.beacon.http.port}''} \
+          ${lib.optionalString cfg.beacon.metrics.enable '' --metrics --metrics-address ${cfg.beacon.metrics.address} --metrics-port ${toString cfg.beacon.metrics.port}''} \
+          ${cfg.extraArgs} ${cfg.beacon.extraArgs}
+      '';
+      serviceConfig = {
+        LoadCredential = "LIGHTHOUSE_JWT:${cfg.beacon.execution.jwtPath}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        StateDirectory = "lighthouse-beacon";
+        ReadWritePaths = [ cfg.beacon.dataDir ];
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+
+    systemd.services.lighthouse-validator = mkIf cfg.validator.enable {
+      description = "Lighthouse validtor node (manages validators, using data obtained from the beacon node via a HTTP API)";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      script = ''
+        # make sure the chain data directory is created on first run
+        mkdir -p ${cfg.validator.dataDir}/${cfg.network}
+
+        ${pkgs.lighthouse}/bin/lighthouse validator_client \
+          --network ${cfg.network} \
+          --beacon-nodes ${lib.concatStringsSep "," cfg.validator.beaconNodes} \
+          --datadir ${cfg.validator.dataDir}/${cfg.network} \
+          ${optionalString cfg.validator.metrics.enable ''--metrics --metrics-address ${cfg.validator.metrics.address} --metrics-port ${toString cfg.validator.metrics.port}''} \
+          ${cfg.extraArgs} ${cfg.validator.extraArgs}
+      '';
+
+      serviceConfig = {
+        Restart = "on-failure";
+        StateDirectory = "lighthouse-validator";
+        ReadWritePaths = [ cfg.validator.dataDir ];
+        CapabilityBoundingSet = "";
+        DynamicUser = true;
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/corosync/default.nix b/nixpkgs/nixos/modules/services/cluster/corosync/default.nix
new file mode 100644
index 000000000000..7ef17c46b81e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/corosync/default.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.corosync;
+in
+{
+  # interface
+  options.services.corosync = {
+    enable = mkEnableOption (lib.mdDoc "corosync");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.corosync;
+      defaultText = literalExpression "pkgs.corosync";
+      description = lib.mdDoc "Package that should be used for corosync.";
+    };
+
+    clusterName = mkOption {
+      type = types.str;
+      default = "nixcluster";
+      description = lib.mdDoc "Name of the corosync cluster.";
+    };
+
+    extraOptions = mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc "Additional options with which to start corosync.";
+    };
+
+    nodelist = mkOption {
+      description = lib.mdDoc "Corosync nodelist: all cluster members.";
+      default = [];
+      type = with types; listOf (submodule {
+        options = {
+          nodeid = mkOption {
+            type = int;
+            description = lib.mdDoc "Node ID number";
+          };
+          name = mkOption {
+            type = str;
+            description = lib.mdDoc "Node name";
+          };
+          ring_addrs = mkOption {
+            type = listOf str;
+            description = lib.mdDoc "List of addresses, one for each ring.";
+          };
+        };
+      });
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."corosync/corosync.conf".text = ''
+      totem {
+        version: 2
+        secauth: on
+        cluster_name: ${cfg.clusterName}
+        transport: knet
+      }
+
+      nodelist {
+        ${concatMapStrings ({ nodeid, name, ring_addrs }: ''
+          node {
+            nodeid: ${toString nodeid}
+            name: ${name}
+            ${concatStrings (imap0 (i: addr: ''
+              ring${toString i}_addr: ${addr}
+            '') ring_addrs)}
+          }
+        '') cfg.nodelist}
+      }
+
+      quorum {
+        # only corosync_votequorum is supported
+        provider: corosync_votequorum
+        wait_for_all: 0
+        ${optionalString (builtins.length cfg.nodelist < 3) ''
+          two_node: 1
+        ''}
+      }
+
+      logging {
+        to_syslog: yes
+      }
+    '';
+
+    environment.etc."corosync/uidgid.d/root".text = ''
+      # allow pacemaker connection by root
+      uidgid {
+        uid: 0
+        gid: 0
+      }
+    '';
+
+    systemd.packages = [ cfg.package ];
+    systemd.services.corosync = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        StateDirectory = "corosync";
+        StateDirectoryMode = "0700";
+      };
+    };
+
+    environment.etc."sysconfig/corosync".text = lib.optionalString (cfg.extraOptions != []) ''
+      COROSYNC_OPTIONS="${lib.escapeShellArgs cfg.extraOptions}"
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/hadoop/conf.nix b/nixpkgs/nixos/modules/services/cluster/hadoop/conf.nix
new file mode 100644
index 000000000000..388eaafcc362
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/hadoop/conf.nix
@@ -0,0 +1,45 @@
+{ cfg, pkgs, lib }:
+let
+  propertyXml = name: value: lib.optionalString (value != null) ''
+    <property>
+      <name>${name}</name>
+      <value>${builtins.toString value}</value>
+    </property>
+  '';
+  siteXml = fileName: properties: pkgs.writeTextDir fileName ''
+    <?xml version="1.0" encoding="UTF-8" standalone="no"?>
+    <!-- generated by NixOS -->
+    <configuration>
+      ${builtins.concatStringsSep "\n" (pkgs.lib.mapAttrsToList propertyXml properties)}
+    </configuration>
+  '';
+  cfgLine = name: value: ''
+    ${name}=${builtins.toString value}
+  '';
+  cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
+    # generated by NixOS
+    ${builtins.concatStringsSep "" (pkgs.lib.mapAttrsToList cfgLine properties)}
+  '';
+  userFunctions = ''
+    hadoop_verify_logdir() {
+      echo Skipping verification of log directory
+    }
+  '';
+  hadoopEnv = ''
+    export HADOOP_LOG_DIR=/tmp/hadoop/$USER
+  '';
+in
+pkgs.runCommand "hadoop-conf" {} (with cfg; ''
+  mkdir -p $out/
+  cp ${siteXml "core-site.xml" (coreSite // coreSiteInternal)}/* $out/
+  cp ${siteXml "hdfs-site.xml" (hdfsSiteDefault // hdfsSite // hdfsSiteInternal)}/* $out/
+  cp ${siteXml "hbase-site.xml" (hbaseSiteDefault // hbaseSite // hbaseSiteInternal)}/* $out/
+  cp ${siteXml "mapred-site.xml" (mapredSiteDefault // mapredSite)}/* $out/
+  cp ${siteXml "yarn-site.xml" (yarnSiteDefault // yarnSite // yarnSiteInternal)}/* $out/
+  cp ${siteXml "httpfs-site.xml" httpfsSite}/* $out/
+  cp ${cfgFile "container-executor.cfg" containerExecutorCfg}/* $out/
+  cp ${pkgs.writeTextDir "hadoop-user-functions.sh" userFunctions}/* $out/
+  cp ${pkgs.writeTextDir "hadoop-env.sh" hadoopEnv}/* $out/
+  cp ${log4jProperties} $out/log4j.properties
+  ${lib.concatMapStringsSep "\n" (dir: "cp -f -r ${dir}/* $out/") extraConfDirs}
+'')
diff --git a/nixpkgs/nixos/modules/services/cluster/hadoop/default.nix b/nixpkgs/nixos/modules/services/cluster/hadoop/default.nix
new file mode 100644
index 000000000000..ff6b4d5588b1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/hadoop/default.nix
@@ -0,0 +1,223 @@
+{ config, lib, options, pkgs, ...}:
+let
+  cfg = config.services.hadoop;
+  opt = options.services.hadoop;
+in
+with lib;
+{
+  imports = [ ./yarn.nix ./hdfs.nix ./hbase.nix ];
+
+  options.services.hadoop = {
+    coreSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        {
+          "fs.defaultFS" = "hdfs://localhost";
+        }
+      '';
+      description = lib.mdDoc ''
+        Hadoop core-site.xml definition
+        <https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/core-default.xml>
+      '';
+    };
+    coreSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = lib.mdDoc ''
+        Internal option to add configs to core-site.xml based on module options
+      '';
+    };
+
+    hdfsSiteDefault = mkOption {
+      default = {
+        "dfs.namenode.rpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-address" = "0.0.0.0:9870";
+        "dfs.namenode.servicerpc-bind-host" = "0.0.0.0";
+        "dfs.namenode.http-bind-host" = "0.0.0.0";
+      };
+      type = types.attrsOf types.anything;
+      description = lib.mdDoc ''
+        Default options for hdfs-site.xml
+      '';
+    };
+    hdfsSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        {
+          "dfs.nameservices" = "namenode1";
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional options and overrides for hdfs-site.xml
+        <https://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-hdfs/hdfs-default.xml>
+      '';
+    };
+    hdfsSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = lib.mdDoc ''
+        Internal option to add configs to hdfs-site.xml based on module options
+      '';
+    };
+
+    mapredSiteDefault = mkOption {
+      default = {
+        "mapreduce.framework.name" = "yarn";
+        "yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=${cfg.package}";
+        "mapreduce.map.env" = "HADOOP_MAPRED_HOME=${cfg.package}";
+        "mapreduce.reduce.env" = "HADOOP_MAPRED_HOME=${cfg.package}";
+      };
+      defaultText = literalExpression ''
+        {
+          "mapreduce.framework.name" = "yarn";
+          "yarn.app.mapreduce.am.env" = "HADOOP_MAPRED_HOME=''${config.${opt.package}}";
+          "mapreduce.map.env" = "HADOOP_MAPRED_HOME=''${config.${opt.package}}";
+          "mapreduce.reduce.env" = "HADOOP_MAPRED_HOME=''${config.${opt.package}}";
+        }
+      '';
+      type = types.attrsOf types.anything;
+      description = lib.mdDoc ''
+        Default options for mapred-site.xml
+      '';
+    };
+    mapredSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        {
+          "mapreduce.map.java.opts" = "-Xmx900m -XX:+UseParallelGC";
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional options and overrides for mapred-site.xml
+        <https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/mapred-default.xml>
+      '';
+    };
+
+    yarnSiteDefault = mkOption {
+      default = {
+        "yarn.nodemanager.admin-env" = "PATH=$PATH";
+        "yarn.nodemanager.aux-services" = "mapreduce_shuffle";
+        "yarn.nodemanager.aux-services.mapreduce_shuffle.class" = "org.apache.hadoop.mapred.ShuffleHandler";
+        "yarn.nodemanager.bind-host" = "0.0.0.0";
+        "yarn.nodemanager.container-executor.class" = "org.apache.hadoop.yarn.server.nodemanager.LinuxContainerExecutor";
+        "yarn.nodemanager.env-whitelist" = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_HOME,LANG,TZ";
+        "yarn.nodemanager.linux-container-executor.group" = "hadoop";
+        "yarn.nodemanager.linux-container-executor.path" = "/run/wrappers/yarn-nodemanager/bin/container-executor";
+        "yarn.nodemanager.log-dirs" = "/var/log/hadoop/yarn/nodemanager";
+        "yarn.resourcemanager.bind-host" = "0.0.0.0";
+        "yarn.resourcemanager.scheduler.class" = "org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler";
+      };
+      type = types.attrsOf types.anything;
+      description = lib.mdDoc ''
+        Default options for yarn-site.xml
+      '';
+    };
+    yarnSite = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        {
+          "yarn.resourcemanager.hostname" = "''${config.networking.hostName}";
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional options and overrides for yarn-site.xml
+        <https://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-common/yarn-default.xml>
+      '';
+    };
+    yarnSiteInternal = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      internal = true;
+      description = lib.mdDoc ''
+        Internal option to add configs to yarn-site.xml based on module options
+      '';
+    };
+
+    httpfsSite = mkOption {
+      default = { };
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        {
+          "hadoop.http.max.threads" = 500;
+        }
+      '';
+      description = lib.mdDoc ''
+        Hadoop httpfs-site.xml definition
+        <https://hadoop.apache.org/docs/current/hadoop-hdfs-httpfs/httpfs-default.html>
+      '';
+    };
+
+    log4jProperties = mkOption {
+      default = "${cfg.package}/etc/hadoop/log4j.properties";
+      defaultText = literalExpression ''
+        "''${config.${opt.package}}/etc/hadoop/log4j.properties"
+      '';
+      type = types.path;
+      example = literalExpression ''
+        "''${pkgs.hadoop}/etc/hadoop/log4j.properties";
+      '';
+      description = lib.mdDoc "log4j.properties file added to HADOOP_CONF_DIR";
+    };
+
+    containerExecutorCfg = mkOption {
+      default = {
+        # must be the same as yarn.nodemanager.linux-container-executor.group in yarnSite
+        "yarn.nodemanager.linux-container-executor.group"="hadoop";
+        "min.user.id"=1000;
+        "feature.terminal.enabled"=1;
+        "feature.mount-cgroup.enabled" = 1;
+      };
+      type = types.attrsOf types.anything;
+      example = literalExpression ''
+        options.services.hadoop.containerExecutorCfg.default // {
+          "feature.terminal.enabled" = 0;
+        }
+      '';
+      description = lib.mdDoc ''
+        Yarn container-executor.cfg definition
+        <https://hadoop.apache.org/docs/r2.7.2/hadoop-yarn/hadoop-yarn-site/SecureContainer.html>
+      '';
+    };
+
+    extraConfDirs = mkOption {
+      default = [];
+      type = types.listOf types.path;
+      example = literalExpression ''
+        [
+          ./extraHDFSConfs
+          ./extraYARNConfs
+        ]
+      '';
+      description = lib.mdDoc "Directories containing additional config files to be added to HADOOP_CONF_DIR";
+    };
+
+    gatewayRole.enable = mkEnableOption (lib.mdDoc "gateway role for deploying hadoop configs");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.hadoop;
+      defaultText = literalExpression "pkgs.hadoop";
+      description = lib.mdDoc "";
+    };
+  };
+
+
+  config = mkIf cfg.gatewayRole.enable {
+    users.groups.hadoop = {
+      gid = config.ids.gids.hadoop;
+    };
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."hadoop-conf".source = let
+        hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+      in "${hadoopConf}";
+      variables.HADOOP_CONF_DIR = "/etc/hadoop-conf/";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/hadoop/hbase.nix b/nixpkgs/nixos/modules/services/cluster/hadoop/hbase.nix
new file mode 100644
index 000000000000..a39da2a84eca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/hadoop/hbase.nix
@@ -0,0 +1,218 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+let
+  cfg = config.services.hadoop;
+  hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+  mkIfNotNull = x: mkIf (x != null) x;
+  # generic hbase role options
+  hbaseRoleOption = name: extraOpts: {
+    enable = mkEnableOption (mdDoc "HBase ${name}");
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc "Open firewall ports for HBase ${name}.";
+    };
+
+    restartIfChanged = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc "Restart ${name} con config change.";
+    };
+
+    extraFlags = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = literalExpression ''[ "--backup" ]'';
+      description = mdDoc "Extra flags for the ${name} service.";
+    };
+
+    environment = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      example = literalExpression ''
+        {
+          HBASE_MASTER_OPTS = "-Dcom.sun.management.jmxremote.ssl=true";
+        }
+      '';
+      description = mdDoc "Environment variables passed to ${name}.";
+    };
+  } // extraOpts;
+  # generic hbase role configs
+  hbaseRoleConfig = name: ports: (mkIf cfg.hbase."${name}".enable {
+    services.hadoop.gatewayRole = {
+      enable = true;
+      enableHbaseCli = mkDefault true;
+    };
+
+    systemd.services."hbase-${toLower name}" = {
+      description = "HBase ${name}";
+      wantedBy = [ "multi-user.target" ];
+      path = with cfg; [ hbase.package ] ++ optional
+        (with cfg.hbase.master; enable && initHDFS) package;
+      preStart = mkIf (with cfg.hbase.master; enable && initHDFS)
+        (concatStringsSep "\n" (
+          map (x: "HADOOP_USER_NAME=hdfs hdfs --config /etc/hadoop-conf ${x}")[
+            "dfsadmin -safemode wait"
+            "dfs -mkdir -p ${cfg.hbase.rootdir}"
+            "dfs -chown hbase ${cfg.hbase.rootdir}"
+          ]
+        ));
+
+      inherit (cfg.hbase."${name}") environment;
+      script = concatStringsSep " " (
+        [
+          "hbase --config /etc/hadoop-conf/"
+          "${toLower name} start"
+        ]
+        ++ cfg.hbase."${name}".extraFlags
+        ++ map (x: "--${toLower x} ${toString cfg.hbase.${name}.${x}}")
+          (filter (x: hasAttr x cfg.hbase.${name}) ["port" "infoPort"])
+      );
+
+      serviceConfig = {
+        User = "hbase";
+        SyslogIdentifier = "hbase-${toLower name}";
+        Restart = "always";
+      };
+    };
+
+    services.hadoop.hbaseSiteInternal."hbase.rootdir" = cfg.hbase.rootdir;
+
+    networking = {
+      firewall.allowedTCPPorts = mkIf cfg.hbase."${name}".openFirewall ports;
+      hosts = mkIf (with cfg.hbase.regionServer; enable && overrideHosts) {
+        "127.0.0.2" = mkForce [ ];
+        "::1" = mkForce [ ];
+      };
+    };
+
+  });
+in
+{
+  options.services.hadoop = {
+
+    gatewayRole.enableHbaseCli = mkEnableOption (mdDoc "HBase CLI tools");
+
+    hbaseSiteDefault = mkOption {
+      default = {
+        "hbase.regionserver.ipc.address" = "0.0.0.0";
+        "hbase.master.ipc.address" = "0.0.0.0";
+        "hbase.master.info.bindAddress" = "0.0.0.0";
+        "hbase.regionserver.info.bindAddress" = "0.0.0.0";
+
+        "hbase.cluster.distributed" = "true";
+      };
+      type = types.attrsOf types.anything;
+      description = mdDoc ''
+        Default options for hbase-site.xml
+      '';
+    };
+    hbaseSite = mkOption {
+      default = {};
+      type = with types; attrsOf anything;
+      example = literalExpression ''
+        {
+          "hbase.hregion.max.filesize" = 20*1024*1024*1024;
+          "hbase.table.normalization.enabled" = "true";
+        }
+      '';
+      description = mdDoc ''
+        Additional options and overrides for hbase-site.xml
+        <https://github.com/apache/hbase/blob/rel/2.4.11/hbase-common/src/main/resources/hbase-default.xml>
+      '';
+    };
+    hbaseSiteInternal = mkOption {
+      default = {};
+      type = with types; attrsOf anything;
+      internal = true;
+      description = mdDoc ''
+        Internal option to add configs to hbase-site.xml based on module options
+      '';
+    };
+
+    hbase = {
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.hbase;
+        defaultText = literalExpression "pkgs.hbase";
+        description = mdDoc "HBase package";
+      };
+
+      rootdir = mkOption {
+        description = mdDoc ''
+          This option will set "hbase.rootdir" in hbase-site.xml and determine
+          the directory shared by region servers and into which HBase persists.
+          The URL should be 'fully-qualified' to include the filesystem scheme.
+          If a core-site.xml is provided, the FS scheme defaults to the value
+          of "fs.defaultFS".
+
+          Filesystems other than HDFS (like S3, QFS, Swift) are also supported.
+        '';
+        type = types.str;
+        example = "hdfs://nameservice1/hbase";
+        default = "/hbase";
+      };
+      zookeeperQuorum = mkOption {
+        description = mdDoc ''
+          This option will set "hbase.zookeeper.quorum" in hbase-site.xml.
+          Comma separated list of servers in the ZooKeeper ensemble.
+        '';
+        type = with types; nullOr commas;
+        example = "zk1.internal,zk2.internal,zk3.internal";
+        default = null;
+      };
+    } // (let
+      ports = port: infoPort: {
+        port = mkOption {
+          type = types.int;
+          default = port;
+          description = mdDoc "RPC port";
+        };
+        infoPort = mkOption {
+          type = types.int;
+          default = infoPort;
+          description = mdDoc "web UI port";
+        };
+      };
+    in mapAttrs hbaseRoleOption {
+      master.initHDFS = mkEnableOption (mdDoc "initialization of the hbase directory on HDFS");
+      regionServer.overrideHosts = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Remove /etc/hosts entries for "127.0.0.2" and "::1" defined in nixos/modules/config/networking.nix
+          Regionservers must be able to resolve their hostnames to their IP addresses, through PTR records
+          or /etc/hosts entries.
+        '';
+      };
+      thrift = ports 9090 9095;
+      rest = ports 8080 8085;
+    });
+  };
+
+  config = mkMerge ([
+
+    (mkIf cfg.gatewayRole.enable {
+
+      environment.systemPackages = mkIf cfg.gatewayRole.enableHbaseCli [ cfg.hbase.package ];
+
+      services.hadoop.hbaseSiteInternal = with cfg.hbase; {
+        "hbase.zookeeper.quorum" = mkIfNotNull zookeeperQuorum;
+      };
+
+      users.users.hbase = {
+        description = "Hadoop HBase user";
+        group = "hadoop";
+        isSystemUser = true;
+      };
+    })
+  ] ++ (mapAttrsToList hbaseRoleConfig {
+    master = [ 16000 16010 ];
+    regionServer = [ 16020 16030 ];
+    thrift = with cfg.hbase.thrift; [ port infoPort ];
+    rest = with cfg.hbase.rest; [ port infoPort ];
+  }));
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/hadoop/hdfs.nix b/nixpkgs/nixos/modules/services/cluster/hadoop/hdfs.nix
new file mode 100644
index 000000000000..4a49bd0ddd43
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/hadoop/hdfs.nix
@@ -0,0 +1,204 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.hadoop;
+
+  # Config files for hadoop services
+  hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+
+  # Generator for HDFS service options
+  hadoopServiceOption = { serviceName, firewallOption ? true, extraOpts ? null }: {
+    enable = mkEnableOption (lib.mdDoc serviceName);
+    restartIfChanged = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        Automatically restart the service on config change.
+        This can be set to false to defer restarts on clusters running critical applications.
+        Please consider the security implications of inadvertently running an older version,
+        and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+      '';
+      default = false;
+    };
+    extraFlags = mkOption{
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc "Extra command line flags to pass to ${serviceName}";
+      example = [
+        "-Dcom.sun.management.jmxremote"
+        "-Dcom.sun.management.jmxremote.port=8010"
+      ];
+    };
+    extraEnv = mkOption{
+      type = with types; attrsOf str;
+      default = {};
+      description = lib.mdDoc "Extra environment variables for ${serviceName}";
+    };
+  } // (optionalAttrs firewallOption {
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Open firewall ports for ${serviceName}.";
+    };
+  }) // (optionalAttrs (extraOpts != null) extraOpts);
+
+  # Generator for HDFS service configs
+  hadoopServiceConfig =
+    { name
+    , serviceOptions ? cfg.hdfs."${toLower name}"
+    , description ? "Hadoop HDFS ${name}"
+    , User ? "hdfs"
+    , allowedTCPPorts ? [ ]
+    , preStart ? ""
+    , environment ? { }
+    , extraConfig ? { }
+    }: (
+
+      mkIf serviceOptions.enable ( mkMerge [{
+        systemd.services."hdfs-${toLower name}" = {
+          inherit description preStart;
+          environment = environment // serviceOptions.extraEnv;
+          wantedBy = [ "multi-user.target" ];
+          inherit (serviceOptions) restartIfChanged;
+          serviceConfig = {
+            inherit User;
+            SyslogIdentifier = "hdfs-${toLower name}";
+            ExecStart = "${cfg.package}/bin/hdfs --config ${hadoopConf} ${toLower name} ${escapeShellArgs serviceOptions.extraFlags}";
+            Restart = "always";
+          };
+        };
+
+        services.hadoop.gatewayRole.enable = true;
+
+        networking.firewall.allowedTCPPorts = mkIf
+          ((builtins.hasAttr "openFirewall" serviceOptions) && serviceOptions.openFirewall)
+          allowedTCPPorts;
+      } extraConfig])
+    );
+
+in
+{
+  options.services.hadoop.hdfs = {
+
+    namenode = hadoopServiceOption { serviceName = "HDFS NameNode"; } // {
+      formatOnInit = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Format HDFS namenode on first start. This is useful for quickly spinning up
+          ephemeral HDFS clusters with a single namenode.
+          For HA clusters, initialization involves multiple steps across multiple nodes.
+          Follow this guide to initialize an HA cluster manually:
+          <https://hadoop.apache.org/docs/stable/hadoop-project-dist/hadoop-hdfs/HDFSHighAvailabilityWithQJM.html>
+        '';
+      };
+    };
+
+    datanode = hadoopServiceOption { serviceName = "HDFS DataNode"; } // {
+      dataDirs = mkOption {
+        default = null;
+        description = lib.mdDoc "Tier and path definitions for datanode storage.";
+        type = with types; nullOr (listOf (submodule {
+          options = {
+            type = mkOption {
+              type = enum [ "SSD" "DISK" "ARCHIVE" "RAM_DISK" ];
+              description = lib.mdDoc ''
+                Storage types ([SSD]/[DISK]/[ARCHIVE]/[RAM_DISK]) for HDFS storage policies.
+              '';
+            };
+            path = mkOption {
+              type = path;
+              example = [ "/var/lib/hadoop/hdfs/dn" ];
+              description = lib.mdDoc "Determines where on the local filesystem a data node should store its blocks.";
+            };
+          };
+        }));
+      };
+    };
+
+    journalnode = hadoopServiceOption { serviceName = "HDFS JournalNode"; };
+
+    zkfc = hadoopServiceOption {
+      serviceName = "HDFS ZooKeeper failover controller";
+      firewallOption = false;
+    };
+
+    httpfs = hadoopServiceOption { serviceName = "HDFS JournalNode"; } // {
+      tempPath = mkOption {
+        type = types.path;
+        default = "/tmp/hadoop/httpfs";
+        description = lib.mdDoc "HTTPFS_TEMP path used by HTTPFS";
+      };
+    };
+
+  };
+
+  config = mkMerge [
+    (hadoopServiceConfig {
+      name = "NameNode";
+      allowedTCPPorts = [
+        9870 # namenode.http-address
+        8020 # namenode.rpc-address
+        8022 # namenode.servicerpc-address
+        8019 # dfs.ha.zkfc.port
+      ];
+      preStart = (mkIf cfg.hdfs.namenode.formatOnInit
+        "${cfg.package}/bin/hdfs --config ${hadoopConf} namenode -format -nonInteractive || true"
+      );
+    })
+
+    (hadoopServiceConfig {
+      name = "DataNode";
+      # port numbers for datanode changed between hadoop 2 and 3
+      allowedTCPPorts = if versionAtLeast cfg.package.version "3" then [
+        9864 # datanode.http.address
+        9866 # datanode.address
+        9867 # datanode.ipc.address
+      ] else [
+        50075 # datanode.http.address
+        50010 # datanode.address
+        50020 # datanode.ipc.address
+      ];
+      extraConfig.services.hadoop.hdfsSiteInternal."dfs.datanode.data.dir" = mkIf (cfg.hdfs.datanode.dataDirs!= null)
+        (concatMapStringsSep "," (x: "["+x.type+"]file://"+x.path) cfg.hdfs.datanode.dataDirs);
+    })
+
+    (hadoopServiceConfig {
+      name = "JournalNode";
+      allowedTCPPorts = [
+        8480 # dfs.journalnode.http-address
+        8485 # dfs.journalnode.rpc-address
+      ];
+    })
+
+    (hadoopServiceConfig {
+      name = "zkfc";
+      description = "Hadoop HDFS ZooKeeper failover controller";
+    })
+
+    (hadoopServiceConfig {
+      name = "HTTPFS";
+      environment.HTTPFS_TEMP = cfg.hdfs.httpfs.tempPath;
+      preStart = "mkdir -p $HTTPFS_TEMP";
+      User = "httpfs";
+      allowedTCPPorts = [
+        14000 # httpfs.http.port
+      ];
+    })
+
+    (mkIf cfg.gatewayRole.enable {
+      users.users.hdfs = {
+        description = "Hadoop HDFS user";
+        group = "hadoop";
+        uid = config.ids.uids.hdfs;
+      };
+    })
+    (mkIf cfg.hdfs.httpfs.enable {
+      users.users.httpfs = {
+        description = "Hadoop HTTPFS user";
+        group = "hadoop";
+        isSystemUser = true;
+      };
+    })
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/hadoop/yarn.nix b/nixpkgs/nixos/modules/services/cluster/hadoop/yarn.nix
new file mode 100644
index 000000000000..a49aafbd1dca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/hadoop/yarn.nix
@@ -0,0 +1,200 @@
+{ config, lib, pkgs, ...}:
+with lib;
+let
+  cfg = config.services.hadoop;
+  hadoopConf = "${import ./conf.nix { inherit cfg pkgs lib; }}/";
+  restartIfChanged  = mkOption {
+    type = types.bool;
+    description = lib.mdDoc ''
+      Automatically restart the service on config change.
+      This can be set to false to defer restarts on clusters running critical applications.
+      Please consider the security implications of inadvertently running an older version,
+      and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+    '';
+    default = false;
+  };
+  extraFlags = mkOption{
+    type = with types; listOf str;
+    default = [];
+    description = lib.mdDoc "Extra command line flags to pass to the service";
+    example = [
+      "-Dcom.sun.management.jmxremote"
+      "-Dcom.sun.management.jmxremote.port=8010"
+    ];
+  };
+  extraEnv = mkOption{
+    type = with types; attrsOf str;
+    default = {};
+    description = lib.mdDoc "Extra environment variables";
+  };
+in
+{
+  options.services.hadoop.yarn = {
+    resourcemanager = {
+      enable = mkEnableOption (lib.mdDoc "Hadoop YARN ResourceManager");
+      inherit restartIfChanged extraFlags extraEnv;
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open firewall ports for resourcemanager
+        '';
+      };
+    };
+    nodemanager = {
+      enable = mkEnableOption (lib.mdDoc "Hadoop YARN NodeManager");
+      inherit restartIfChanged extraFlags extraEnv;
+
+      resource = {
+        cpuVCores = mkOption {
+          description = lib.mdDoc "Number of vcores that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationVCores = mkOption {
+          description = lib.mdDoc "The maximum virtual CPU cores any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        memoryMB = mkOption {
+          description = lib.mdDoc "Amount of physical memory, in MB, that can be allocated for containers.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+        maximumAllocationMB = mkOption {
+          description = lib.mdDoc "The maximum physical memory any container can be allocated.";
+          type = with types; nullOr ints.positive;
+          default = null;
+        };
+      };
+
+      useCGroups = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Use cgroups to enforce resource limits on containers
+        '';
+      };
+
+      localDir = mkOption {
+        description = lib.mdDoc "List of directories to store localized files in.";
+        type = with types; nullOr (listOf path);
+        example = [ "/var/lib/hadoop/yarn/nm" ];
+        default = null;
+      };
+
+      addBinBash = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Add /bin/bash. This is needed by the linux container executor's launch script.
+        '';
+      };
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open firewall ports for nodemanager.
+          Because containers can listen on any ephemeral port, TCP ports 1024–65535 will be opened.
+        '';
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.gatewayRole.enable {
+      users.users.yarn = {
+        description = "Hadoop YARN user";
+        group = "hadoop";
+        uid = config.ids.uids.yarn;
+      };
+    })
+
+    (mkIf cfg.yarn.resourcemanager.enable {
+      systemd.services.yarn-resourcemanager = {
+        description = "Hadoop YARN ResourceManager";
+        wantedBy = [ "multi-user.target" ];
+        inherit (cfg.yarn.resourcemanager) restartIfChanged;
+        environment = cfg.yarn.resourcemanager.extraEnv;
+
+        serviceConfig = {
+          User = "yarn";
+          SyslogIdentifier = "yarn-resourcemanager";
+          ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
+                      " resourcemanager ${escapeShellArgs cfg.yarn.resourcemanager.extraFlags}";
+          Restart = "always";
+        };
+      };
+
+      services.hadoop.gatewayRole.enable = true;
+
+      networking.firewall.allowedTCPPorts = (mkIf cfg.yarn.resourcemanager.openFirewall [
+        8088 # resourcemanager.webapp.address
+        8030 # resourcemanager.scheduler.address
+        8031 # resourcemanager.resource-tracker.address
+        8032 # resourcemanager.address
+        8033 # resourcemanager.admin.address
+      ]);
+    })
+
+    (mkIf cfg.yarn.nodemanager.enable {
+      # Needed because yarn hardcodes /bin/bash in container start scripts
+      # These scripts can't be patched, they are generated at runtime
+      systemd.tmpfiles.rules = [
+        (mkIf cfg.yarn.nodemanager.addBinBash "L /bin/bash - - - - /run/current-system/sw/bin/bash")
+      ];
+
+      systemd.services.yarn-nodemanager = {
+        description = "Hadoop YARN NodeManager";
+        wantedBy = [ "multi-user.target" ];
+        inherit (cfg.yarn.nodemanager) restartIfChanged;
+        environment = cfg.yarn.nodemanager.extraEnv;
+
+        preStart = ''
+          # create log dir
+          mkdir -p /var/log/hadoop/yarn/nodemanager
+          chown yarn:hadoop /var/log/hadoop/yarn/nodemanager
+
+          # set up setuid container executor binary
+          umount /run/wrappers/yarn-nodemanager/cgroup/cpu || true
+          rm -rf /run/wrappers/yarn-nodemanager/ || true
+          mkdir -p /run/wrappers/yarn-nodemanager/{bin,etc/hadoop,cgroup/cpu}
+          cp ${cfg.package}/bin/container-executor /run/wrappers/yarn-nodemanager/bin/
+          chgrp hadoop /run/wrappers/yarn-nodemanager/bin/container-executor
+          chmod 6050 /run/wrappers/yarn-nodemanager/bin/container-executor
+          cp ${hadoopConf}/container-executor.cfg /run/wrappers/yarn-nodemanager/etc/hadoop/
+        '';
+
+        serviceConfig = {
+          User = "yarn";
+          SyslogIdentifier = "yarn-nodemanager";
+          PermissionsStartOnly = true;
+          ExecStart = "${cfg.package}/bin/yarn --config ${hadoopConf} " +
+                      " nodemanager ${escapeShellArgs cfg.yarn.nodemanager.extraFlags}";
+          Restart = "always";
+        };
+      };
+
+      services.hadoop.gatewayRole.enable = true;
+
+      services.hadoop.yarnSiteInternal = with cfg.yarn.nodemanager; mkMerge [ ({
+        "yarn.nodemanager.local-dirs" = mkIf (localDir!= null) (concatStringsSep "," localDir);
+        "yarn.scheduler.maximum-allocation-vcores" = resource.maximumAllocationVCores;
+        "yarn.scheduler.maximum-allocation-mb" = resource.maximumAllocationMB;
+        "yarn.nodemanager.resource.cpu-vcores" = resource.cpuVCores;
+        "yarn.nodemanager.resource.memory-mb" = resource.memoryMB;
+      }) (mkIf useCGroups {
+        "yarn.nodemanager.linux-container-executor.cgroups.hierarchy" = "/hadoop-yarn";
+        "yarn.nodemanager.linux-container-executor.resources-handler.class" = "org.apache.hadoop.yarn.server.nodemanager.util.CgroupsLCEResourcesHandler";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount" = "true";
+        "yarn.nodemanager.linux-container-executor.cgroups.mount-path" = "/run/wrappers/yarn-nodemanager/cgroup";
+      })];
+
+      networking.firewall.allowedTCPPortRanges = [
+        (mkIf (cfg.yarn.nodemanager.openFirewall) {from = 1024; to = 65535;})
+      ];
+    })
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/k3s/default.nix b/nixpkgs/nixos/modules/services/cluster/k3s/default.nix
new file mode 100644
index 000000000000..72b2f992a339
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/k3s/default.nix
@@ -0,0 +1,181 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.k3s;
+  removeOption = config: instruction:
+    lib.mkRemovedOptionModule ([ "services" "k3s" ] ++ config) instruction;
+in
+{
+  imports = [
+    (removeOption [ "docker" ] "k3s docker option is no longer supported.")
+  ];
+
+  # interface
+  options.services.k3s = {
+    enable = mkEnableOption (lib.mdDoc "k3s");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.k3s;
+      defaultText = literalExpression "pkgs.k3s";
+      description = lib.mdDoc "Package that should be used for k3s";
+    };
+
+    role = mkOption {
+      description = lib.mdDoc ''
+        Whether k3s should run as a server or agent.
+
+        If it's a server:
+
+        - By default it also runs workloads as an agent.
+        - Starts by default as a standalone server using an embedded sqlite datastore.
+        - Configure `clusterInit = true` to switch over to embedded etcd datastore and enable HA mode.
+        - Configure `serverAddr` to join an already-initialized HA cluster.
+
+        If it's an agent:
+
+        - `serverAddr` is required.
+      '';
+      default = "server";
+      type = types.enum [ "server" "agent" ];
+    };
+
+    serverAddr = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The k3s server to connect to.
+
+        Servers and agents need to communicate each other. Read
+        [the networking docs](https://rancher.com/docs/k3s/latest/en/installation/installation-requirements/#networking)
+        to know how to configure the firewall.
+      '';
+      example = "https://10.0.0.10:6443";
+      default = "";
+    };
+
+    clusterInit = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Initialize HA cluster using an embedded etcd datastore.
+
+        If this option is `false` and `role` is `server`
+
+        On a server that was using the default embedded sqlite backend,
+        enabling this option will migrate to an embedded etcd DB.
+
+        If an HA cluster using the embedded etcd datastore was already initialized,
+        this option has no effect.
+
+        This option only makes sense in a server that is not connecting to another server.
+
+        If you are configuring an HA cluster with an embedded etcd,
+        the 1st server must have `clusterInit = true`
+        and other servers must connect to it using `serverAddr`.
+      '';
+    };
+
+    token = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The k3s token to use when connecting to a server.
+
+        WARNING: This option will expose store your token unencrypted world-readable in the nix store.
+        If this is undesired use the tokenFile option instead.
+      '';
+      default = "";
+    };
+
+    tokenFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc "File path containing k3s token to use when connecting to the server.";
+      default = null;
+    };
+
+    extraFlags = mkOption {
+      description = lib.mdDoc "Extra flags to pass to the k3s command.";
+      type = types.str;
+      default = "";
+      example = "--no-deploy traefik --cluster-cidr 10.24.0.0/16";
+    };
+
+    disableAgent = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Only run the server. This option only makes sense for a server.";
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        File path containing environment variables for configuring the k3s service in the format of an EnvironmentFile. See systemd.exec(5).
+      '';
+      default = null;
+    };
+
+    configPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc "File path containing the k3s YAML config. This is useful when the config is generated (for example on boot).";
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.role == "agent" -> (cfg.configPath != null || cfg.serverAddr != "");
+        message = "serverAddr or configPath (with 'server' key) should be set if role is 'agent'";
+      }
+      {
+        assertion = cfg.role == "agent" -> cfg.configPath != null || cfg.tokenFile != null || cfg.token != "";
+        message = "token or tokenFile or configPath (with 'token' or 'token-file' keys) should be set if role is 'agent'";
+      }
+      {
+        assertion = cfg.role == "agent" -> !cfg.disableAgent;
+        message = "disableAgent must be false if role is 'agent'";
+      }
+      {
+        assertion = cfg.role == "agent" -> !cfg.clusterInit;
+        message = "clusterInit must be false if role is 'agent'";
+      }
+    ];
+
+    environment.systemPackages = [ config.services.k3s.package ];
+
+    systemd.services.k3s = {
+      description = "k3s service";
+      after = [ "firewall.service" "network-online.target" ];
+      wants = [ "firewall.service" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = optional config.boot.zfs.enabled config.boot.zfs.package;
+      serviceConfig = {
+        # See: https://github.com/rancher/k3s/blob/dddbd16305284ae4bd14c0aade892412310d7edc/install.sh#L197
+        Type = if cfg.role == "agent" then "exec" else "notify";
+        KillMode = "process";
+        Delegate = "yes";
+        Restart = "always";
+        RestartSec = "5s";
+        LimitNOFILE = 1048576;
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        TasksMax = "infinity";
+        EnvironmentFile = cfg.environmentFile;
+        ExecStart = concatStringsSep " \\\n " (
+          [
+            "${cfg.package}/bin/k3s ${cfg.role}"
+          ]
+          ++ (optional cfg.clusterInit "--cluster-init")
+          ++ (optional cfg.disableAgent "--disable-agent")
+          ++ (optional (cfg.serverAddr != "") "--server ${cfg.serverAddr}")
+          ++ (optional (cfg.token != "") "--token ${cfg.token}")
+          ++ (optional (cfg.tokenFile != null) "--token-file ${cfg.tokenFile}")
+          ++ (optional (cfg.configPath != null) "--config ${cfg.configPath}")
+          ++ [ cfg.extraFlags ]
+        );
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix
new file mode 100644
index 000000000000..dc851688fbec
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/addon-manager.nix
@@ -0,0 +1,171 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.addonManager;
+
+  isRBACEnabled = elem "RBAC" top.apiserver.authorizationMode;
+
+  addons = pkgs.runCommand "kubernetes-addons" { } ''
+    mkdir -p $out
+    # since we are mounting the addons to the addon manager, they need to be copied
+    ${concatMapStringsSep ";" (a: "cp -v ${a}/* $out/") (mapAttrsToList (name: addon:
+      pkgs.writeTextDir "${name}.json" (builtins.toJSON addon)
+    ) (cfg.addons))}
+  '';
+in
+{
+  ###### interface
+  options.services.kubernetes.addonManager = with lib.types; {
+
+    bootstrapAddons = mkOption {
+      description = lib.mdDoc ''
+        Bootstrap addons are like regular addons, but they are applied with cluster-admin rights.
+        They are applied at addon-manager startup only.
+      '';
+      default = { };
+      type = attrsOf attrs;
+      example = literalExpression ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+      '';
+    };
+
+    addons = mkOption {
+      description = lib.mdDoc "Kubernetes addons (any kind of Kubernetes resource can be an addon).";
+      default = { };
+      type = attrsOf (either attrs (listOf attrs));
+      example = literalExpression ''
+        {
+          "my-service" = {
+            "apiVersion" = "v1";
+            "kind" = "Service";
+            "metadata" = {
+              "name" = "my-service";
+              "namespace" = "default";
+            };
+            "spec" = { ... };
+          };
+        }
+        // import <nixpkgs/nixos/modules/services/cluster/kubernetes/dns.nix> { cfg = config.services.kubernetes; };
+      '';
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes addon manager");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."kubernetes/addons".source = "${addons}/";
+
+    systemd.services.kube-addon-manager = {
+      description = "Kubernetes addon manager";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      environment.ADDON_PATH = "/etc/kubernetes/addons/";
+      path = [ pkgs.gawk ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = "${top.package}/bin/kube-addons";
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 10;
+      };
+      unitConfig = {
+        StartLimitIntervalSec = 0;
+      };
+    };
+
+    services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled
+    (let
+      name = "system:kube-addon-manager";
+      namespace = "kube-system";
+    in
+    {
+
+      kube-addon-manager-r = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "Role";
+        metadata = {
+          inherit name namespace;
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["*"];
+        }];
+      };
+
+      kube-addon-manager-rb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "RoleBinding";
+        metadata = {
+          inherit name namespace;
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "Role";
+          inherit name;
+        };
+        subjects = [{
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "User";
+          inherit name;
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        rules = [{
+          apiGroups = ["*"];
+          resources = ["*"];
+          verbs = ["list"];
+        }];
+      };
+
+      kube-addon-manager-cluster-lister-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          name = "${name}:cluster-lister";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "${name}:cluster-lister";
+        };
+        subjects = [{
+          kind = "User";
+          inherit name;
+        }];
+      };
+    });
+
+    services.kubernetes.pki.certs = {
+      addonManager = top.lib.mkCert {
+        name = "kube-addon-manager";
+        CN = "system:kube-addon-manager";
+        action = "systemctl restart kube-addon-manager.service";
+      };
+    };
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix
new file mode 100644
index 000000000000..1c00329e6ccf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/addons/dns.nix
@@ -0,0 +1,373 @@
+{ config, options, pkgs, lib, ... }:
+
+with lib;
+
+let
+  version = "1.10.1";
+  cfg = config.services.kubernetes.addons.dns;
+  ports = {
+    dns = 10053;
+    health = 10054;
+    metrics = 10055;
+  };
+in {
+  options.services.kubernetes.addons.dns = {
+    enable = mkEnableOption (lib.mdDoc "kubernetes dns addon");
+
+    clusterIp = mkOption {
+      description = lib.mdDoc "Dns addon clusterIP";
+
+      # this default is also what kubernetes users
+      default = (
+        concatStringsSep "." (
+          take 3 (splitString "." config.services.kubernetes.apiserver.serviceClusterIpRange
+        ))
+      ) + ".254";
+      defaultText = literalMD ''
+        The `x.y.z.254` IP of
+        `config.${options.services.kubernetes.apiserver.serviceClusterIpRange}`.
+      '';
+      type = types.str;
+    };
+
+    clusterDomain = mkOption {
+      description = lib.mdDoc "Dns cluster domain";
+      default = "cluster.local";
+      type = types.str;
+    };
+
+    replicas = mkOption {
+      description = lib.mdDoc "Number of DNS pod replicas to deploy in the cluster.";
+      default = 2;
+      type = types.int;
+    };
+
+    reconcileMode = mkOption {
+      description = lib.mdDoc ''
+        Controls the addon manager reconciliation mode for the DNS addon.
+
+        Setting reconcile mode to EnsureExists makes it possible to tailor DNS behavior by editing the coredns ConfigMap.
+
+        See: <https://github.com/kubernetes/kubernetes/blob/master/cluster/addons/addon-manager/README.md>.
+      '';
+      default = "Reconcile";
+      type = types.enum [ "Reconcile" "EnsureExists" ];
+    };
+
+    coredns = mkOption {
+      description = lib.mdDoc "Docker image to seed for the CoreDNS container.";
+      type = types.attrs;
+      default = {
+        imageName = "coredns/coredns";
+        imageDigest = "sha256:a0ead06651cf580044aeb0a0feba63591858fb2e43ade8c9dea45a6a89ae7e5e";
+        finalImageTag = version;
+        sha256 = "0wg696920smmal7552a2zdhfncndn5kfammfa8bk8l7dz9bhk0y1";
+      };
+    };
+
+    corefile = mkOption {
+      description = lib.mdDoc ''
+        Custom coredns corefile configuration.
+
+        See: <https://coredns.io/manual/toc/#configuration>.
+      '';
+      type = types.str;
+      default = ''
+        .:${toString ports.dns} {
+          errors
+          health :${toString ports.health}
+          kubernetes ${cfg.clusterDomain} in-addr.arpa ip6.arpa {
+            pods insecure
+            fallthrough in-addr.arpa ip6.arpa
+          }
+          prometheus :${toString ports.metrics}
+          forward . /etc/resolv.conf
+          cache 30
+          loop
+          reload
+          loadbalance
+        }'';
+      defaultText = literalExpression ''
+        '''
+          .:${toString ports.dns} {
+            errors
+            health :${toString ports.health}
+            kubernetes ''${config.services.kubernetes.addons.dns.clusterDomain} in-addr.arpa ip6.arpa {
+              pods insecure
+              fallthrough in-addr.arpa ip6.arpa
+            }
+            prometheus :${toString ports.metrics}
+            forward . /etc/resolv.conf
+            cache 30
+            loop
+            reload
+            loadbalance
+          }
+        '''
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.kubernetes.kubelet.seedDockerImages =
+      singleton (pkgs.dockerTools.pullImage cfg.coredns);
+
+    services.kubernetes.addonManager.bootstrapAddons = {
+      coredns-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        rules = [
+          {
+            apiGroups = [ "" ];
+            resources = [ "endpoints" "services" "pods" "namespaces" ];
+            verbs = [ "list" "watch" ];
+          }
+          {
+            apiGroups = [ "" ];
+            resources = [ "nodes" ];
+            verbs = [ "get" ];
+          }
+          {
+            apiGroups = [ "discovery.k8s.io" ];
+            resources = [ "endpointslices" ];
+            verbs = [ "list" "watch" ];
+          }
+        ];
+      };
+
+      coredns-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = {
+          annotations = {
+            "rbac.authorization.kubernetes.io/autoupdate" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/bootstrapping" = "rbac-defaults";
+          };
+          name = "system:coredns";
+        };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "system:coredns";
+        };
+        subjects = [
+          {
+            kind = "ServiceAccount";
+            name = "coredns";
+            namespace = "kube-system";
+          }
+        ];
+      };
+    };
+
+    services.kubernetes.addonManager.addons = {
+      coredns-sa = {
+        apiVersion = "v1";
+        kind = "ServiceAccount";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+      };
+
+      coredns-cm = {
+        apiVersion = "v1";
+        kind = "ConfigMap";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        data = {
+          Corefile = cfg.corefile;
+        };
+      };
+
+      coredns-deploy = {
+        apiVersion = "apps/v1";
+        kind = "Deployment";
+        metadata = {
+          labels = {
+            "addonmanager.kubernetes.io/mode" = cfg.reconcileMode;
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
+          };
+          name = "coredns";
+          namespace = "kube-system";
+        };
+        spec = {
+          replicas = cfg.replicas;
+          selector = {
+            matchLabels = { k8s-app = "kube-dns"; };
+          };
+          strategy = {
+            rollingUpdate = { maxUnavailable = 1; };
+            type = "RollingUpdate";
+          };
+          template = {
+            metadata = {
+              labels = {
+                k8s-app = "kube-dns";
+              };
+            };
+            spec = {
+              containers = [
+                {
+                  args = [ "-conf" "/etc/coredns/Corefile" ];
+                  image = with cfg.coredns; "${imageName}:${finalImageTag}";
+                  imagePullPolicy = "Never";
+                  livenessProbe = {
+                    failureThreshold = 5;
+                    httpGet = {
+                      path = "/health";
+                      port = ports.health;
+                      scheme = "HTTP";
+                    };
+                    initialDelaySeconds = 60;
+                    successThreshold = 1;
+                    timeoutSeconds = 5;
+                  };
+                  name = "coredns";
+                  ports = [
+                    {
+                      containerPort = ports.dns;
+                      name = "dns";
+                      protocol = "UDP";
+                    }
+                    {
+                      containerPort = ports.dns;
+                      name = "dns-tcp";
+                      protocol = "TCP";
+                    }
+                    {
+                      containerPort = ports.metrics;
+                      name = "metrics";
+                      protocol = "TCP";
+                    }
+                  ];
+                  resources = {
+                    limits = {
+                      memory = "170Mi";
+                    };
+                    requests = {
+                      cpu = "100m";
+                      memory = "70Mi";
+                    };
+                  };
+                  securityContext = {
+                    allowPrivilegeEscalation = false;
+                    capabilities = {
+                      drop = [ "all" ];
+                    };
+                    readOnlyRootFilesystem = true;
+                  };
+                  volumeMounts = [
+                    {
+                      mountPath = "/etc/coredns";
+                      name = "config-volume";
+                      readOnly = true;
+                    }
+                  ];
+                }
+              ];
+              dnsPolicy = "Default";
+              nodeSelector = {
+                "beta.kubernetes.io/os" = "linux";
+              };
+              serviceAccountName = "coredns";
+              tolerations = [
+                {
+                  effect = "NoSchedule";
+                  key = "node-role.kubernetes.io/master";
+                }
+                {
+                  key = "CriticalAddonsOnly";
+                  operator = "Exists";
+                }
+              ];
+              volumes = [
+                {
+                  configMap = {
+                    items = [
+                      {
+                        key = "Corefile";
+                        path = "Corefile";
+                      }
+                    ];
+                    name = "coredns";
+                  };
+                  name = "config-volume";
+                }
+              ];
+            };
+          };
+        };
+      };
+
+      coredns-svc = {
+        apiVersion = "v1";
+        kind = "Service";
+        metadata = {
+          annotations = {
+            "prometheus.io/port" = toString ports.metrics;
+            "prometheus.io/scrape" = "true";
+          };
+          labels = {
+            "addonmanager.kubernetes.io/mode" = "Reconcile";
+            k8s-app = "kube-dns";
+            "kubernetes.io/cluster-service" = "true";
+            "kubernetes.io/name" = "CoreDNS";
+          };
+          name = "kube-dns";
+          namespace = "kube-system";
+        };
+        spec = {
+          clusterIP = cfg.clusterIp;
+          ports = [
+            {
+              name = "dns";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "UDP";
+            }
+            {
+              name = "dns-tcp";
+              port = 53;
+              targetPort = ports.dns;
+              protocol = "TCP";
+            }
+          ];
+          selector = { k8s-app = "kube-dns"; };
+        };
+      };
+    };
+
+    services.kubernetes.kubelet.clusterDns = mkDefault cfg.clusterIp;
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix
new file mode 100644
index 000000000000..d5ec1e5e6d26
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/apiserver.nix
@@ -0,0 +1,487 @@
+  { config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  otop = options.services.kubernetes;
+  cfg = top.apiserver;
+
+  isRBACEnabled = elem "RBAC" cfg.authorizationMode;
+
+  apiserverServiceIP = (concatStringsSep "." (
+    take 3 (splitString "." cfg.serviceClusterIpRange
+  )) + ".1");
+in
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "admissionControl" ] [ "services" "kubernetes" "apiserver" "enableAdmissionPlugins" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "apiserver" "address" ] ["services" "kubernetes" "apiserver" "bindAddress"])
+    (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecureBindAddress" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "insecurePort" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "apiserver" "publicAddress" ] "")
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "servers" ] [ "services" "kubernetes" "apiserver" "etcd" "servers" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "keyFile" ] [ "services" "kubernetes" "apiserver" "etcd" "keyFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "certFile" ] [ "services" "kubernetes" "apiserver" "etcd" "certFile" ])
+    (mkRenamedOptionModule [ "services" "kubernetes" "etcd" "caFile" ] [ "services" "kubernetes" "apiserver" "etcd" "caFile" ])
+  ];
+
+  ###### interface
+  options.services.kubernetes.apiserver = with lib.types; {
+
+    advertiseAddress = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver IP address on which to advertise the apiserver
+        to members of the cluster. This address must be reachable by the rest
+        of the cluster.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+    allowPrivileged = mkOption {
+      description = lib.mdDoc "Whether to allow privileged containers on Kubernetes.";
+      default = false;
+      type = bool;
+    };
+
+    authorizationMode = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver authorization mode (AlwaysAllow/AlwaysDeny/ABAC/Webhook/RBAC/Node). See
+        <https://kubernetes.io/docs/reference/access-authn-authz/authorization/>
+      '';
+      default = ["RBAC" "Node"]; # Enabling RBAC by default, although kubernetes default is AllowAllow
+      type = listOf (enum ["AlwaysAllow" "AlwaysDeny" "ABAC" "Webhook" "RBAC" "Node"]);
+    };
+
+    authorizationPolicy = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver authorization policy file. See
+        <https://kubernetes.io/docs/reference/access-authn-authz/authorization/>
+      '';
+      default = [];
+      type = listOf attrs;
+    };
+
+    basicAuthFile = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver basic authentication file. See
+        <https://kubernetes.io/docs/reference/access-authn-authz/authentication>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    bindAddress = mkOption {
+      description = lib.mdDoc ''
+        The IP address on which to listen for the --secure-port port.
+        The associated interface(s) must be reachable by the rest
+        of the cluster, and by CLI/web clients.
+      '';
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver CA file for client auth.";
+      default = top.caFile;
+      defaultText = literalExpression "config.${otop.caFile}";
+      type = nullOr path;
+    };
+
+    disableAdmissionPlugins = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes admission control plugins to disable. See
+        <https://kubernetes.io/docs/admin/admission-controllers/>
+      '';
+      default = [];
+      type = listOf str;
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes apiserver");
+
+    enableAdmissionPlugins = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes admission control plugins to enable. See
+        <https://kubernetes.io/docs/admin/admission-controllers/>
+      '';
+      default = [
+        "NamespaceLifecycle" "LimitRanger" "ServiceAccount"
+        "ResourceQuota" "DefaultStorageClass" "DefaultTolerationSeconds"
+        "NodeRestriction"
+      ];
+      example = [
+        "NamespaceLifecycle" "NamespaceExists" "LimitRanger"
+        "SecurityContextDeny" "ServiceAccount" "ResourceQuota"
+        "PodSecurityPolicy" "NodeRestriction" "DefaultStorageClass"
+      ];
+      type = listOf str;
+    };
+
+    etcd = {
+      servers = mkOption {
+        description = lib.mdDoc "List of etcd servers.";
+        default = ["http://127.0.0.1:2379"];
+        type = types.listOf types.str;
+      };
+
+      keyFile = mkOption {
+        description = lib.mdDoc "Etcd key file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      certFile = mkOption {
+        description = lib.mdDoc "Etcd cert file.";
+        default = null;
+        type = types.nullOr types.path;
+      };
+
+      caFile = mkOption {
+        description = lib.mdDoc "Etcd ca file.";
+        default = top.caFile;
+        defaultText = literalExpression "config.${otop.caFile}";
+        type = types.nullOr types.path;
+      };
+    };
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver extra command line options.";
+      default = "";
+      type = separatedString " ";
+    };
+
+    extraSANs = mkOption {
+      description = lib.mdDoc "Extra x509 Subject Alternative Names to be added to the kubernetes apiserver tls cert.";
+      default = [];
+      type = listOf str;
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates";
+      default = top.featureGates;
+      defaultText = literalExpression "config.${otop.featureGates}";
+      type = listOf str;
+    };
+
+    kubeletClientCaFile = mkOption {
+      description = lib.mdDoc "Path to a cert file for connecting to kubelet.";
+      default = top.caFile;
+      defaultText = literalExpression "config.${otop.caFile}";
+      type = nullOr path;
+    };
+
+    kubeletClientCertFile = mkOption {
+      description = lib.mdDoc "Client certificate to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    kubeletClientKeyFile = mkOption {
+      description = lib.mdDoc "Key to use for connections to kubelet.";
+      default = null;
+      type = nullOr path;
+    };
+
+    preferredAddressTypes = mkOption {
+      description = lib.mdDoc "List of the preferred NodeAddressTypes to use for kubelet connections.";
+      type = nullOr str;
+      default = null;
+    };
+
+    proxyClientCertFile = mkOption {
+      description = lib.mdDoc "Client certificate to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
+    proxyClientKeyFile = mkOption {
+      description = lib.mdDoc "Key to use for connections to proxy.";
+      default = null;
+      type = nullOr path;
+    };
+
+    runtimeConfig = mkOption {
+      description = lib.mdDoc ''
+        Api runtime configuration. See
+        <https://kubernetes.io/docs/tasks/administer-cluster/cluster-management/>
+      '';
+      default = "authentication.k8s.io/v1beta1=true";
+      example = "api/all=false,api/v1=true";
+      type = str;
+    };
+
+    storageBackend = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver storage backend.
+      '';
+      default = "etcd3";
+      type = enum ["etcd2" "etcd3"];
+    };
+
+    securePort = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver secure port.";
+      default = 6443;
+      type = int;
+    };
+
+    apiAudiences = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver ServiceAccount issuer.
+      '';
+      default = "api,https://kubernetes.default.svc";
+      type = str;
+    };
+
+    serviceAccountIssuer = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver ServiceAccount issuer.
+      '';
+      default = "https://kubernetes.default.svc";
+      type = str;
+    };
+
+    serviceAccountSigningKeyFile = mkOption {
+      description = lib.mdDoc ''
+        Path to the file that contains the current private key of the service
+        account token issuer. The issuer will sign issued ID tokens with this
+        private key.
+      '';
+      type = path;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = lib.mdDoc ''
+        File containing PEM-encoded x509 RSA or ECDSA private or public keys,
+        used to verify ServiceAccount tokens. The specified file can contain
+        multiple keys, and the flag can be specified multiple times with
+        different files. If unspecified, --tls-private-key-file is used.
+        Must be specified when --service-account-signing-key is provided
+      '';
+      type = path;
+    };
+
+    serviceClusterIpRange = mkOption {
+      description = lib.mdDoc ''
+        A CIDR notation IP range from which to assign service cluster IPs.
+        This must not overlap with any IP ranges assigned to nodes for pods.
+      '';
+      default = "10.0.0.0/24";
+      type = str;
+    };
+
+    tlsCertFile = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tokenAuthFile = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver token authentication file. See
+        <https://kubernetes.io/docs/reference/access-authn-authz/authentication>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = lib.mdDoc ''
+        Optional glog verbosity level for logging statements. See
+        <https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+    webhookConfig = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes apiserver Webhook config file. It uses the kubeconfig file format.
+        See <https://kubernetes.io/docs/reference/access-authn-authz/webhook/>
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+  };
+
+
+  ###### implementation
+  config = mkMerge [
+
+    (mkIf cfg.enable {
+        systemd.services.kube-apiserver = {
+          description = "Kubernetes APIServer Service";
+          wantedBy = [ "kubernetes.target" ];
+          after = [ "network.target" ];
+          serviceConfig = {
+            Slice = "kubernetes.slice";
+            ExecStart = ''${top.package}/bin/kube-apiserver \
+              --allow-privileged=${boolToString cfg.allowPrivileged} \
+              --authorization-mode=${concatStringsSep "," cfg.authorizationMode} \
+                ${optionalString (elem "ABAC" cfg.authorizationMode)
+                  "--authorization-policy-file=${
+                    pkgs.writeText "kube-auth-policy.jsonl"
+                    (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.authorizationPolicy)
+                  }"
+                } \
+                ${optionalString (elem "Webhook" cfg.authorizationMode)
+                  "--authorization-webhook-config-file=${cfg.webhookConfig}"
+                } \
+              --bind-address=${cfg.bindAddress} \
+              ${optionalString (cfg.advertiseAddress != null)
+                "--advertise-address=${cfg.advertiseAddress}"} \
+              ${optionalString (cfg.clientCaFile != null)
+                "--client-ca-file=${cfg.clientCaFile}"} \
+              --disable-admission-plugins=${concatStringsSep "," cfg.disableAdmissionPlugins} \
+              --enable-admission-plugins=${concatStringsSep "," cfg.enableAdmissionPlugins} \
+              --etcd-servers=${concatStringsSep "," cfg.etcd.servers} \
+              ${optionalString (cfg.etcd.caFile != null)
+                "--etcd-cafile=${cfg.etcd.caFile}"} \
+              ${optionalString (cfg.etcd.certFile != null)
+                "--etcd-certfile=${cfg.etcd.certFile}"} \
+              ${optionalString (cfg.etcd.keyFile != null)
+                "--etcd-keyfile=${cfg.etcd.keyFile}"} \
+              ${optionalString (cfg.featureGates != [])
+                "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+              ${optionalString (cfg.basicAuthFile != null)
+                "--basic-auth-file=${cfg.basicAuthFile}"} \
+              ${optionalString (cfg.kubeletClientCaFile != null)
+                "--kubelet-certificate-authority=${cfg.kubeletClientCaFile}"} \
+              ${optionalString (cfg.kubeletClientCertFile != null)
+                "--kubelet-client-certificate=${cfg.kubeletClientCertFile}"} \
+              ${optionalString (cfg.kubeletClientKeyFile != null)
+                "--kubelet-client-key=${cfg.kubeletClientKeyFile}"} \
+              ${optionalString (cfg.preferredAddressTypes != null)
+                "--kubelet-preferred-address-types=${cfg.preferredAddressTypes}"} \
+              ${optionalString (cfg.proxyClientCertFile != null)
+                "--proxy-client-cert-file=${cfg.proxyClientCertFile}"} \
+              ${optionalString (cfg.proxyClientKeyFile != null)
+                "--proxy-client-key-file=${cfg.proxyClientKeyFile}"} \
+              ${optionalString (cfg.runtimeConfig != "")
+                "--runtime-config=${cfg.runtimeConfig}"} \
+              --secure-port=${toString cfg.securePort} \
+              --api-audiences=${toString cfg.apiAudiences} \
+              --service-account-issuer=${toString cfg.serviceAccountIssuer} \
+              --service-account-signing-key-file=${cfg.serviceAccountSigningKeyFile} \
+              --service-account-key-file=${cfg.serviceAccountKeyFile} \
+              --service-cluster-ip-range=${cfg.serviceClusterIpRange} \
+              --storage-backend=${cfg.storageBackend} \
+              ${optionalString (cfg.tlsCertFile != null)
+                "--tls-cert-file=${cfg.tlsCertFile}"} \
+              ${optionalString (cfg.tlsKeyFile != null)
+                "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+              ${optionalString (cfg.tokenAuthFile != null)
+                "--token-auth-file=${cfg.tokenAuthFile}"} \
+              ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+              ${cfg.extraOpts}
+            '';
+            WorkingDirectory = top.dataDir;
+            User = "kubernetes";
+            Group = "kubernetes";
+            AmbientCapabilities = "cap_net_bind_service";
+            Restart = "on-failure";
+            RestartSec = 5;
+          };
+
+          unitConfig = {
+            StartLimitIntervalSec = 0;
+          };
+        };
+
+        services.etcd = {
+          clientCertAuth = mkDefault true;
+          peerClientCertAuth = mkDefault true;
+          listenClientUrls = mkDefault ["https://0.0.0.0:2379"];
+          listenPeerUrls = mkDefault ["https://0.0.0.0:2380"];
+          advertiseClientUrls = mkDefault ["https://${top.masterAddress}:2379"];
+          initialCluster = mkDefault ["${top.masterAddress}=https://${top.masterAddress}:2380"];
+          name = mkDefault top.masterAddress;
+          initialAdvertisePeerUrls = mkDefault ["https://${top.masterAddress}:2380"];
+        };
+
+        services.kubernetes.addonManager.bootstrapAddons = mkIf isRBACEnabled {
+
+          apiserver-kubelet-api-admin-crb = {
+            apiVersion = "rbac.authorization.k8s.io/v1";
+            kind = "ClusterRoleBinding";
+            metadata = {
+              name = "system:kube-apiserver:kubelet-api-admin";
+            };
+            roleRef = {
+              apiGroup = "rbac.authorization.k8s.io";
+              kind = "ClusterRole";
+              name = "system:kubelet-api-admin";
+            };
+            subjects = [{
+              kind = "User";
+              name = "system:kube-apiserver";
+            }];
+          };
+
+        };
+
+      services.kubernetes.pki.certs = with top.lib; {
+        apiServer = mkCert {
+          name = "kube-apiserver";
+          CN = "kubernetes";
+          hosts = [
+                    "kubernetes.default.svc"
+                    "kubernetes.default.svc.${top.addons.dns.clusterDomain}"
+                    cfg.advertiseAddress
+                    top.masterAddress
+                    apiserverServiceIP
+                    "127.0.0.1"
+                  ] ++ cfg.extraSANs;
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverProxyClient = mkCert {
+          name = "kube-apiserver-proxy-client";
+          CN = "front-proxy-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverKubeletClient = mkCert {
+          name = "kube-apiserver-kubelet-client";
+          CN = "system:kube-apiserver";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        apiserverEtcdClient = mkCert {
+          name = "kube-apiserver-etcd-client";
+          CN = "etcd-client";
+          action = "systemctl restart kube-apiserver.service";
+        };
+        clusterAdmin = mkCert {
+          name = "cluster-admin";
+          CN = "cluster-admin";
+          fields = {
+            O = "system:masters";
+          };
+          privateKeyOwner = "root";
+        };
+        etcd = mkCert {
+          name = "etcd";
+          CN = top.masterAddress;
+          hosts = [
+                    "etcd.local"
+                    "etcd.${top.addons.dns.clusterDomain}"
+                    top.masterAddress
+                    cfg.advertiseAddress
+                  ];
+          privateKeyOwner = "etcd";
+          action = "systemctl restart etcd.service";
+        };
+      };
+
+    })
+
+  ];
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix
new file mode 100644
index 000000000000..18c82fc23593
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/controller-manager.nix
@@ -0,0 +1,169 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  otop = options.services.kubernetes;
+  cfg = top.controllerManager;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "kubernetes" "controllerManager" "address" ] ["services" "kubernetes" "controllerManager" "bindAddress"])
+    (mkRemovedOptionModule [ "services" "kubernetes" "controllerManager" "insecurePort" ] "")
+  ];
+
+  ###### interface
+  options.services.kubernetes.controllerManager = with lib.types; {
+
+    allocateNodeCIDRs = mkOption {
+      description = lib.mdDoc "Whether to automatically allocate CIDR ranges for cluster nodes.";
+      default = true;
+      type = bool;
+    };
+
+    bindAddress = mkOption {
+      description = lib.mdDoc "Kubernetes controller manager listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    clusterCidr = mkOption {
+      description = lib.mdDoc "Kubernetes CIDR Range for Pods in cluster.";
+      default = top.clusterCidr;
+      defaultText = literalExpression "config.${otop.clusterCidr}";
+      type = str;
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes controller manager");
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Kubernetes controller manager extra command line options.";
+      default = "";
+      type = separatedString " ";
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates";
+      default = top.featureGates;
+      defaultText = literalExpression "config.${otop.featureGates}";
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes controller manager";
+
+    leaderElect = mkOption {
+      description = lib.mdDoc "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    rootCaFile = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes controller manager certificate authority file included in
+        service account's token secret.
+      '';
+      default = top.caFile;
+      defaultText = literalExpression "config.${otop.caFile}";
+      type = nullOr path;
+    };
+
+    securePort = mkOption {
+      description = lib.mdDoc "Kubernetes controller manager secure listening port.";
+      default = 10252;
+      type = int;
+    };
+
+    serviceAccountKeyFile = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes controller manager PEM-encoded private RSA key file used to
+        sign service account tokens
+      '';
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsCertFile = mkOption {
+      description = lib.mdDoc "Kubernetes controller-manager certificate file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = lib.mdDoc "Kubernetes controller-manager private key file.";
+      default = null;
+      type = nullOr path;
+    };
+
+    verbosity = mkOption {
+      description = lib.mdDoc ''
+        Optional glog verbosity level for logging statements. See
+        <https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-controller-manager = {
+      description = "Kubernetes Controller Manager Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        RestartSec = "30s";
+        Restart = "on-failure";
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-controller-manager \
+          --allocate-node-cidrs=${boolToString cfg.allocateNodeCIDRs} \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (cfg.clusterCidr!=null)
+            "--cluster-cidr=${cfg.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-controller-manager" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          ${optionalString (cfg.rootCaFile!=null)
+            "--root-ca-file=${cfg.rootCaFile}"} \
+          --secure-port=${toString cfg.securePort} \
+          ${optionalString (cfg.serviceAccountKeyFile!=null)
+            "--service-account-private-key-file=${cfg.serviceAccountKeyFile}"} \
+          ${optionalString (cfg.tlsCertFile!=null)
+            "--tls-cert-file=${cfg.tlsCertFile}"} \
+          ${optionalString (cfg.tlsKeyFile!=null)
+            "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+          ${optionalString (elem "RBAC" top.apiserver.authorizationMode)
+            "--use-service-account-credentials"} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+      };
+      unitConfig = {
+        StartLimitIntervalSec = 0;
+      };
+      path = top.path;
+    };
+
+    services.kubernetes.pki.certs = with top.lib; {
+      controllerManager = mkCert {
+        name = "kube-controller-manager";
+        CN = "kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+      controllerManagerClient = mkCert {
+        name = "kube-controller-manager-client";
+        CN = "system:kube-controller-manager";
+        action = "systemctl restart kube-controller-manager.service";
+      };
+    };
+
+    services.kubernetes.controllerManager.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix
new file mode 100644
index 000000000000..f5374fc71942
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/default.nix
@@ -0,0 +1,315 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kubernetes;
+  opt = options.services.kubernetes;
+
+  defaultContainerdSettings = {
+    version = 2;
+    root = "/var/lib/containerd";
+    state = "/run/containerd";
+    oom_score = 0;
+
+    grpc = {
+      address = "/run/containerd/containerd.sock";
+    };
+
+    plugins."io.containerd.grpc.v1.cri" = {
+      sandbox_image = "pause:latest";
+
+      cni = {
+        bin_dir = "/opt/cni/bin";
+        max_conf_num = 0;
+      };
+
+      containerd.runtimes.runc = {
+        runtime_type = "io.containerd.runc.v2";
+        options.SystemdCgroup = true;
+      };
+    };
+  };
+
+  mkKubeConfig = name: conf: pkgs.writeText "${name}-kubeconfig" (builtins.toJSON {
+    apiVersion = "v1";
+    kind = "Config";
+    clusters = [{
+      name = "local";
+      cluster.certificate-authority = conf.caFile or cfg.caFile;
+      cluster.server = conf.server;
+    }];
+    users = [{
+      inherit name;
+      user = {
+        client-certificate = conf.certFile;
+        client-key = conf.keyFile;
+      };
+    }];
+    contexts = [{
+      context = {
+        cluster = "local";
+        user = name;
+      };
+      name = "local";
+    }];
+    current-context = "local";
+  });
+
+  caCert = secret "ca";
+
+  etcdEndpoints = ["https://${cfg.masterAddress}:2379"];
+
+  mkCert = { name, CN, hosts ? [], fields ? {}, action ? "",
+             privateKeyOwner ? "kubernetes" }: rec {
+    inherit name caCert CN hosts fields action;
+    cert = secret name;
+    key = secret "${name}-key";
+    privateKeyOptions = {
+      owner = privateKeyOwner;
+      group = "nogroup";
+      mode = "0600";
+      path = key;
+    };
+  };
+
+  secret = name: "${cfg.secretsPath}/${name}.pem";
+
+  mkKubeConfigOptions = prefix: {
+    server = mkOption {
+      description = lib.mdDoc "${prefix} kube-apiserver server address.";
+      type = types.str;
+    };
+
+    caFile = mkOption {
+      description = lib.mdDoc "${prefix} certificate authority file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = cfg.caFile;
+      defaultText = literalExpression "config.${opt.caFile}";
+    };
+
+    certFile = mkOption {
+      description = lib.mdDoc "${prefix} client certificate file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = null;
+    };
+
+    keyFile = mkOption {
+      description = lib.mdDoc "${prefix} client key file used to connect to kube-apiserver.";
+      type = types.nullOr types.path;
+      default = null;
+    };
+  };
+in {
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "kubernetes" "addons" "dashboard" ] "Removed due to it being an outdated version")
+    (mkRemovedOptionModule [ "services" "kubernetes" "verbose" ] "")
+  ];
+
+  ###### interface
+
+  options.services.kubernetes = {
+    roles = mkOption {
+      description = lib.mdDoc ''
+        Kubernetes role that this machine should take.
+
+        Master role will enable etcd, apiserver, scheduler, controller manager
+        addon manager, flannel and proxy services.
+        Node role will enable flannel, docker, kubelet and proxy services.
+      '';
+      default = [];
+      type = types.listOf (types.enum ["master" "node"]);
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "Kubernetes package to use.";
+      type = types.package;
+      default = pkgs.kubernetes;
+      defaultText = literalExpression "pkgs.kubernetes";
+    };
+
+    kubeconfig = mkKubeConfigOptions "Default kubeconfig";
+
+    apiserverAddress = mkOption {
+      description = lib.mdDoc ''
+        Clusterwide accessible address for the kubernetes apiserver,
+        including protocol and optional port.
+      '';
+      example = "https://kubernetes-apiserver.example.com:6443";
+      type = types.str;
+    };
+
+    caFile = mkOption {
+      description = lib.mdDoc "Default kubernetes certificate authority";
+      type = types.nullOr types.path;
+      default = null;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "Kubernetes root directory for managing kubelet files.";
+      default = "/var/lib/kubernetes";
+      type = types.path;
+    };
+
+    easyCerts = mkOption {
+      description = lib.mdDoc "Automatically setup x509 certificates and keys for the entire cluster.";
+      default = false;
+      type = types.bool;
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates.";
+      default = [];
+      type = types.listOf types.str;
+    };
+
+    masterAddress = mkOption {
+      description = lib.mdDoc "Clusterwide available network address or hostname for the kubernetes master server.";
+      example = "master.example.com";
+      type = types.str;
+    };
+
+    path = mkOption {
+      description = lib.mdDoc "Packages added to the services' PATH environment variable. Both the bin and sbin subdirectories of each package are added.";
+      type = types.listOf types.package;
+      default = [];
+    };
+
+    clusterCidr = mkOption {
+      description = lib.mdDoc "Kubernetes controller manager and proxy CIDR Range for Pods in cluster.";
+      default = "10.1.0.0/16";
+      type = types.nullOr types.str;
+    };
+
+    lib = mkOption {
+      description = lib.mdDoc "Common functions for the kubernetes modules.";
+      default = {
+        inherit mkCert;
+        inherit mkKubeConfig;
+        inherit mkKubeConfigOptions;
+      };
+      type = types.attrs;
+    };
+
+    secretsPath = mkOption {
+      description = lib.mdDoc "Default location for kubernetes secrets. Not a store location.";
+      type = types.path;
+      default = cfg.dataDir + "/secrets";
+      defaultText = literalExpression ''
+        config.${opt.dataDir} + "/secrets"
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+
+    (mkIf cfg.easyCerts {
+      services.kubernetes.pki.enable = mkDefault true;
+      services.kubernetes.caFile = caCert;
+    })
+
+    (mkIf (elem "master" cfg.roles) {
+      services.kubernetes.apiserver.enable = mkDefault true;
+      services.kubernetes.scheduler.enable = mkDefault true;
+      services.kubernetes.controllerManager.enable = mkDefault true;
+      services.kubernetes.addonManager.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+      services.etcd.enable = true; # Cannot mkDefault because of flannel default options
+      services.kubernetes.kubelet = {
+        enable = mkDefault true;
+        taints = mkIf (!(elem "node" cfg.roles)) {
+          master = {
+            key = "node-role.kubernetes.io/master";
+            value = "true";
+            effect = "NoSchedule";
+          };
+        };
+      };
+    })
+
+
+    (mkIf (all (el: el == "master") cfg.roles) {
+      # if this node is only a master make it unschedulable by default
+      services.kubernetes.kubelet.unschedulable = mkDefault true;
+    })
+
+    (mkIf (elem "node" cfg.roles) {
+      services.kubernetes.kubelet.enable = mkDefault true;
+      services.kubernetes.proxy.enable = mkDefault true;
+    })
+
+    # Using "services.kubernetes.roles" will automatically enable easyCerts and flannel
+    (mkIf (cfg.roles != []) {
+      services.kubernetes.flannel.enable = mkDefault true;
+      services.flannel.etcd.endpoints = mkDefault etcdEndpoints;
+      services.kubernetes.easyCerts = mkDefault true;
+    })
+
+    (mkIf cfg.apiserver.enable {
+      services.kubernetes.pki.etcClusterAdminKubeconfig = mkDefault "kubernetes/cluster-admin.kubeconfig";
+      services.kubernetes.apiserver.etcd.servers = mkDefault etcdEndpoints;
+    })
+
+    (mkIf cfg.kubelet.enable {
+      virtualisation.containerd = {
+        enable = mkDefault true;
+        settings = mapAttrsRecursive (name: mkDefault) defaultContainerdSettings;
+      };
+    })
+
+    (mkIf (cfg.apiserver.enable || cfg.controllerManager.enable) {
+      services.kubernetes.pki.certs = {
+        serviceAccount = mkCert {
+          name = "service-account";
+          CN = "system:service-account-signer";
+          action = ''
+            systemctl reload \
+              kube-apiserver.service \
+              kube-controller-manager.service
+          '';
+        };
+      };
+    })
+
+    (mkIf (
+        cfg.apiserver.enable ||
+        cfg.scheduler.enable ||
+        cfg.controllerManager.enable ||
+        cfg.kubelet.enable ||
+        cfg.proxy.enable ||
+        cfg.addonManager.enable
+    ) {
+      systemd.targets.kubernetes = {
+        description = "Kubernetes";
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      systemd.tmpfiles.rules = [
+        "d /opt/cni/bin 0755 root root -"
+        "d /run/kubernetes 0755 kubernetes kubernetes -"
+        "d /var/lib/kubernetes 0755 kubernetes kubernetes -"
+      ];
+
+      users.users.kubernetes = {
+        uid = config.ids.uids.kubernetes;
+        description = "Kubernetes user";
+        group = "kubernetes";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+      users.groups.kubernetes.gid = config.ids.gids.kubernetes;
+
+      # dns addon is enabled by default
+      services.kubernetes.addons.dns.enable = mkDefault true;
+
+      services.kubernetes.apiserverAddress = mkDefault ("https://${if cfg.apiserver.advertiseAddress != null
+                          then cfg.apiserver.advertiseAddress
+                          else "${cfg.masterAddress}:${toString cfg.apiserver.securePort}"}");
+    })
+  ];
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix
new file mode 100644
index 000000000000..11c5adc6a885
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/flannel.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.flannel;
+
+  # we want flannel to use kubernetes itself as configuration backend, not direct etcd
+  storageBackend = "kubernetes";
+in
+{
+  ###### interface
+  options.services.kubernetes.flannel = {
+    enable = mkEnableOption (lib.mdDoc "flannel networking");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.flannel = {
+
+      enable = mkDefault true;
+      network = mkDefault top.clusterCidr;
+      inherit storageBackend;
+      nodeName = config.services.kubernetes.kubelet.hostname;
+    };
+
+    services.kubernetes.kubelet = {
+      cni.config = mkDefault [{
+        name = "mynet";
+        type = "flannel";
+        cniVersion = "0.3.1";
+        delegate = {
+          isDefaultGateway = true;
+          bridge = "mynet";
+        };
+      }];
+    };
+
+    networking = {
+      firewall.allowedUDPPorts = [
+        8285  # flannel udp
+        8472  # flannel vxlan
+      ];
+      dhcpcd.denyInterfaces = [ "mynet*" "flannel*" ];
+    };
+
+    services.kubernetes.pki.certs = {
+      flannelClient = top.lib.mkCert {
+        name = "flannel-client";
+        CN = "flannel-client";
+        action = "systemctl restart flannel.service";
+      };
+    };
+
+    # give flannel some kubernetes rbac permissions if applicable
+    services.kubernetes.addonManager.bootstrapAddons = mkIf ((storageBackend == "kubernetes") && (elem "RBAC" top.apiserver.authorizationMode)) {
+
+      flannel-cr = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRole";
+        metadata = { name = "flannel"; };
+        rules = [{
+          apiGroups = [ "" ];
+          resources = [ "pods" ];
+          verbs = [ "get" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes" ];
+          verbs = [ "list" "watch" ];
+        }
+        {
+          apiGroups = [ "" ];
+          resources = [ "nodes/status" ];
+          verbs = [ "patch" ];
+        }];
+      };
+
+      flannel-crb = {
+        apiVersion = "rbac.authorization.k8s.io/v1";
+        kind = "ClusterRoleBinding";
+        metadata = { name = "flannel"; };
+        roleRef = {
+          apiGroup = "rbac.authorization.k8s.io";
+          kind = "ClusterRole";
+          name = "flannel";
+        };
+        subjects = [{
+          kind = "User";
+          name = "flannel-client";
+        }];
+      };
+
+    };
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
new file mode 100644
index 000000000000..fd2dce7ee6a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/kubelet.nix
@@ -0,0 +1,387 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  otop = options.services.kubernetes;
+  cfg = top.kubelet;
+
+  cniConfig =
+    if cfg.cni.config != [] && cfg.cni.configDir != null then
+      throw "Verbatim CNI-config and CNI configDir cannot both be set."
+    else if cfg.cni.configDir != null then
+      cfg.cni.configDir
+    else
+      (pkgs.buildEnv {
+        name = "kubernetes-cni-config";
+        paths = imap (i: entry:
+          pkgs.writeTextDir "${toString (10+i)}-${entry.type}.conf" (builtins.toJSON entry)
+        ) cfg.cni.config;
+      });
+
+  infraContainer = pkgs.dockerTools.buildImage {
+    name = "pause";
+    tag = "latest";
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ top.package.pause ];
+    };
+    config.Cmd = ["/bin/pause"];
+  };
+
+  kubeconfig = top.lib.mkKubeConfig "kubelet" cfg.kubeconfig;
+
+  manifestPath = "kubernetes/manifests";
+
+  taintOptions = with lib.types; { name, ... }: {
+    options = {
+      key = mkOption {
+        description = lib.mdDoc "Key of taint.";
+        default = name;
+        defaultText = literalMD "Name of this submodule.";
+        type = str;
+      };
+      value = mkOption {
+        description = lib.mdDoc "Value of taint.";
+        type = str;
+      };
+      effect = mkOption {
+        description = lib.mdDoc "Effect of taint.";
+        example = "NoSchedule";
+        type = enum ["NoSchedule" "PreferNoSchedule" "NoExecute"];
+      };
+    };
+  };
+
+  taints = concatMapStringsSep "," (v: "${v.key}=${v.value}:${v.effect}") (mapAttrsToList (n: v: v) cfg.taints);
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "applyManifests" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "cadvisorPort" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "allowPrivileged" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "networkPlugin" ] "")
+    (mkRemovedOptionModule [ "services" "kubernetes" "kubelet" "containerRuntime" ] "")
+  ];
+
+  ###### interface
+  options.services.kubernetes.kubelet = with lib.types; {
+
+    address = mkOption {
+      description = lib.mdDoc "Kubernetes kubelet info server listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    clusterDns = mkOption {
+      description = lib.mdDoc "Use alternative DNS.";
+      default = "10.1.0.1";
+      type = str;
+    };
+
+    clusterDomain = mkOption {
+      description = lib.mdDoc "Use alternative domain.";
+      default = config.services.kubernetes.addons.dns.clusterDomain;
+      defaultText = literalExpression "config.${options.services.kubernetes.addons.dns.clusterDomain}";
+      type = str;
+    };
+
+    clientCaFile = mkOption {
+      description = lib.mdDoc "Kubernetes apiserver CA file for client authentication.";
+      default = top.caFile;
+      defaultText = literalExpression "config.${otop.caFile}";
+      type = nullOr path;
+    };
+
+    cni = {
+      packages = mkOption {
+        description = lib.mdDoc "List of network plugin packages to install.";
+        type = listOf package;
+        default = [];
+      };
+
+      config = mkOption {
+        description = lib.mdDoc "Kubernetes CNI configuration.";
+        type = listOf attrs;
+        default = [];
+        example = literalExpression ''
+          [{
+            "cniVersion": "0.3.1",
+            "name": "mynet",
+            "type": "bridge",
+            "bridge": "cni0",
+            "isGateway": true,
+            "ipMasq": true,
+            "ipam": {
+                "type": "host-local",
+                "subnet": "10.22.0.0/16",
+                "routes": [
+                    { "dst": "0.0.0.0/0" }
+                ]
+            }
+          } {
+            "cniVersion": "0.3.1",
+            "type": "loopback"
+          }]
+        '';
+      };
+
+      configDir = mkOption {
+        description = lib.mdDoc "Path to Kubernetes CNI configuration directory.";
+        type = nullOr path;
+        default = null;
+      };
+    };
+
+    containerRuntimeEndpoint = mkOption {
+      description = lib.mdDoc "Endpoint at which to find the container runtime api interface/socket";
+      type = str;
+      default = "unix:///run/containerd/containerd.sock";
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes kubelet");
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Kubernetes kubelet extra command line options.";
+      default = "";
+      type = separatedString " ";
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates";
+      default = top.featureGates;
+      defaultText = literalExpression "config.${otop.featureGates}";
+      type = listOf str;
+    };
+
+    healthz = {
+      bind = mkOption {
+        description = lib.mdDoc "Kubernetes kubelet healthz listening address.";
+        default = "127.0.0.1";
+        type = str;
+      };
+
+      port = mkOption {
+        description = lib.mdDoc "Kubernetes kubelet healthz port.";
+        default = 10248;
+        type = port;
+      };
+    };
+
+    hostname = mkOption {
+      description = lib.mdDoc "Kubernetes kubelet hostname override.";
+      defaultText = literalExpression "config.networking.fqdnOrHostName";
+      type = str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubelet";
+
+    manifests = mkOption {
+      description = lib.mdDoc "List of manifests to bootstrap with kubelet (only pods can be created as manifest entry)";
+      type = attrsOf attrs;
+      default = {};
+    };
+
+    nodeIp = mkOption {
+      description = lib.mdDoc "IP address of the node. If set, kubelet will use this IP address for the node.";
+      default = null;
+      type = nullOr str;
+    };
+
+    registerNode = mkOption {
+      description = lib.mdDoc "Whether to auto register kubelet with API server.";
+      default = true;
+      type = bool;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Kubernetes kubelet info server listening port.";
+      default = 10250;
+      type = port;
+    };
+
+    seedDockerImages = mkOption {
+      description = lib.mdDoc "List of docker images to preload on system";
+      default = [];
+      type = listOf package;
+    };
+
+    taints = mkOption {
+      description = lib.mdDoc "Node taints (https://kubernetes.io/docs/concepts/configuration/assign-pod-node/).";
+      default = {};
+      type = attrsOf (submodule [ taintOptions ]);
+    };
+
+    tlsCertFile = mkOption {
+      description = lib.mdDoc "File containing x509 Certificate for HTTPS.";
+      default = null;
+      type = nullOr path;
+    };
+
+    tlsKeyFile = mkOption {
+      description = lib.mdDoc "File containing x509 private key matching tlsCertFile.";
+      default = null;
+      type = nullOr path;
+    };
+
+    unschedulable = mkOption {
+      description = lib.mdDoc "Whether to set node taint to unschedulable=true as it is the case of node that has only master role.";
+      default = false;
+      type = bool;
+    };
+
+    verbosity = mkOption {
+      description = lib.mdDoc ''
+        Optional glog verbosity level for logging statements. See
+        <https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkMerge [
+    (mkIf cfg.enable {
+
+      environment.etc."cni/net.d".source = cniConfig;
+
+      services.kubernetes.kubelet.seedDockerImages = [infraContainer];
+
+      boot.kernel.sysctl = {
+        "net.bridge.bridge-nf-call-iptables"  = 1;
+        "net.ipv4.ip_forward"                 = 1;
+        "net.bridge.bridge-nf-call-ip6tables" = 1;
+      };
+
+      systemd.services.kubelet = {
+        description = "Kubernetes Kubelet Service";
+        wantedBy = [ "kubernetes.target" ];
+        after = [ "containerd.service" "network.target" "kube-apiserver.service" ];
+        path = with pkgs; [
+          gitMinimal
+          openssh
+          util-linux
+          iproute2
+          ethtool
+          thin-provisioning-tools
+          iptables
+          socat
+        ] ++ lib.optional config.boot.zfs.enabled config.boot.zfs.package ++ top.path;
+        preStart = ''
+          ${concatMapStrings (img: ''
+            echo "Seeding container image: ${img}"
+            ${if (lib.hasSuffix "gz" img) then
+              ''${pkgs.gzip}/bin/zcat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import --all-platforms -''
+            else
+              ''${pkgs.coreutils}/bin/cat "${img}" | ${pkgs.containerd}/bin/ctr -n k8s.io image import --all-platforms -''
+            }
+          '') cfg.seedDockerImages}
+
+          rm /opt/cni/bin/* || true
+          ${concatMapStrings (package: ''
+            echo "Linking cni package: ${package}"
+            ln -fs ${package}/bin/* /opt/cni/bin
+          '') cfg.cni.packages}
+        '';
+        serviceConfig = {
+          Slice = "kubernetes.slice";
+          CPUAccounting = true;
+          MemoryAccounting = true;
+          Restart = "on-failure";
+          RestartSec = "1000ms";
+          ExecStart = ''${top.package}/bin/kubelet \
+            --address=${cfg.address} \
+            --authentication-token-webhook \
+            --authentication-token-webhook-cache-ttl="10s" \
+            --authorization-mode=Webhook \
+            ${optionalString (cfg.clientCaFile != null)
+              "--client-ca-file=${cfg.clientCaFile}"} \
+            ${optionalString (cfg.clusterDns != "")
+              "--cluster-dns=${cfg.clusterDns}"} \
+            ${optionalString (cfg.clusterDomain != "")
+              "--cluster-domain=${cfg.clusterDomain}"} \
+            ${optionalString (cfg.featureGates != [])
+              "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+            --hairpin-mode=hairpin-veth \
+            --healthz-bind-address=${cfg.healthz.bind} \
+            --healthz-port=${toString cfg.healthz.port} \
+            --hostname-override=${cfg.hostname} \
+            --kubeconfig=${kubeconfig} \
+            ${optionalString (cfg.nodeIp != null)
+              "--node-ip=${cfg.nodeIp}"} \
+            --pod-infra-container-image=pause \
+            ${optionalString (cfg.manifests != {})
+              "--pod-manifest-path=/etc/${manifestPath}"} \
+            --port=${toString cfg.port} \
+            --register-node=${boolToString cfg.registerNode} \
+            ${optionalString (taints != "")
+              "--register-with-taints=${taints}"} \
+            --root-dir=${top.dataDir} \
+            ${optionalString (cfg.tlsCertFile != null)
+              "--tls-cert-file=${cfg.tlsCertFile}"} \
+            ${optionalString (cfg.tlsKeyFile != null)
+              "--tls-private-key-file=${cfg.tlsKeyFile}"} \
+            ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+            --container-runtime-endpoint=${cfg.containerRuntimeEndpoint} \
+            --cgroup-driver=systemd \
+            ${cfg.extraOpts}
+          '';
+          WorkingDirectory = top.dataDir;
+        };
+        unitConfig = {
+          StartLimitIntervalSec = 0;
+        };
+      };
+
+      # Always include cni plugins
+      services.kubernetes.kubelet.cni.packages = [pkgs.cni-plugins pkgs.cni-plugin-flannel];
+
+      boot.kernelModules = ["br_netfilter" "overlay"];
+
+      services.kubernetes.kubelet.hostname =
+        mkDefault config.networking.fqdnOrHostName;
+
+      services.kubernetes.pki.certs = with top.lib; {
+        kubelet = mkCert {
+          name = "kubelet";
+          CN = top.kubelet.hostname;
+          action = "systemctl restart kubelet.service";
+
+        };
+        kubeletClient = mkCert {
+          name = "kubelet-client";
+          CN = "system:node:${top.kubelet.hostname}";
+          fields = {
+            O = "system:nodes";
+          };
+          action = "systemctl restart kubelet.service";
+        };
+      };
+
+      services.kubernetes.kubelet.kubeconfig.server = mkDefault top.apiserverAddress;
+    })
+
+    (mkIf (cfg.enable && cfg.manifests != {}) {
+      environment.etc = mapAttrs' (name: manifest:
+        nameValuePair "${manifestPath}/${name}.json" {
+          text = builtins.toJSON manifest;
+          mode = "0755";
+        }
+      ) cfg.manifests;
+    })
+
+    (mkIf (cfg.unschedulable && cfg.enable) {
+      services.kubernetes.kubelet.taints.unschedulable = {
+        value = "true";
+        effect = "NoSchedule";
+      };
+    })
+
+  ];
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
new file mode 100644
index 000000000000..38682701ea15
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/pki.nix
@@ -0,0 +1,406 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  cfg = top.pki;
+
+  csrCA = pkgs.writeText "kube-pki-cacert-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    names = singleton cfg.caSpec;
+  });
+
+  csrCfssl = pkgs.writeText "kube-pki-cfssl-csr.json" (builtins.toJSON {
+    key = {
+        algo = "rsa";
+        size = 2048;
+    };
+    CN = top.masterAddress;
+    hosts = [top.masterAddress] ++ cfg.cfsslAPIExtraSANs;
+  });
+
+  cfsslAPITokenBaseName = "apitoken.secret";
+  cfsslAPITokenPath = "${config.services.cfssl.dataDir}/${cfsslAPITokenBaseName}";
+  certmgrAPITokenPath = "${top.secretsPath}/${cfsslAPITokenBaseName}";
+  cfsslAPITokenLength = 32;
+
+  clusterAdminKubeconfig = with cfg.certs.clusterAdmin;
+    top.lib.mkKubeConfig "cluster-admin" {
+        server = top.apiserverAddress;
+        certFile = cert;
+        keyFile = key;
+    };
+
+  remote = with config.services; "https://${kubernetes.masterAddress}:${toString cfssl.port}";
+in
+{
+  ###### interface
+  options.services.kubernetes.pki = with lib.types; {
+
+    enable = mkEnableOption (lib.mdDoc "easyCert issuer service");
+
+    certs = mkOption {
+      description = lib.mdDoc "List of certificate specs to feed to cert generator.";
+      default = {};
+      type = attrs;
+    };
+
+    genCfsslCACert = mkOption {
+      description = lib.mdDoc ''
+        Whether to automatically generate cfssl CA certificate and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    genCfsslAPICerts = mkOption {
+      description = lib.mdDoc ''
+        Whether to automatically generate cfssl API webserver TLS cert and key,
+        if they don't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    cfsslAPIExtraSANs = mkOption {
+      description = lib.mdDoc ''
+        Extra x509 Subject Alternative Names to be added to the cfssl API webserver TLS cert.
+      '';
+      default = [];
+      example = [ "subdomain.example.com" ];
+      type = listOf str;
+    };
+
+    genCfsslAPIToken = mkOption {
+      description = lib.mdDoc ''
+        Whether to automatically generate cfssl API-token secret,
+        if they doesn't exist.
+      '';
+      default = true;
+      type = bool;
+    };
+
+    pkiTrustOnBootstrap = mkOption {
+      description = lib.mdDoc "Whether to always trust remote cfssl server upon initial PKI bootstrap.";
+      default = true;
+      type = bool;
+    };
+
+    caCertPathPrefix = mkOption {
+      description = lib.mdDoc ''
+        Path-prefrix for the CA-certificate to be used for cfssl signing.
+        Suffixes ".pem" and "-key.pem" will be automatically appended for
+        the public and private keys respectively.
+      '';
+      default = "${config.services.cfssl.dataDir}/ca";
+      defaultText = literalExpression ''"''${config.services.cfssl.dataDir}/ca"'';
+      type = str;
+    };
+
+    caSpec = mkOption {
+      description = lib.mdDoc "Certificate specification for the auto-generated CAcert.";
+      default = {
+        CN = "kubernetes-cluster-ca";
+        O = "NixOS";
+        OU = "services.kubernetes.pki.caSpec";
+        L = "auto-generated";
+      };
+      type = attrs;
+    };
+
+    etcClusterAdminKubeconfig = mkOption {
+      description = lib.mdDoc ''
+        Symlink a kubeconfig with cluster-admin privileges to environment path
+        (/etc/\<path\>).
+      '';
+      default = null;
+      type = nullOr str;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable
+  (let
+    cfsslCertPathPrefix = "${config.services.cfssl.dataDir}/cfssl";
+    cfsslCert = "${cfsslCertPathPrefix}.pem";
+    cfsslKey = "${cfsslCertPathPrefix}-key.pem";
+  in
+  {
+
+    services.cfssl = mkIf (top.apiserver.enable) {
+      enable = true;
+      address = "0.0.0.0";
+      tlsCert = cfsslCert;
+      tlsKey = cfsslKey;
+      configFile = toString (pkgs.writeText "cfssl-config.json" (builtins.toJSON {
+        signing = {
+          profiles = {
+            default = {
+              usages = ["digital signature"];
+              auth_key = "default";
+              expiry = "720h";
+            };
+          };
+        };
+        auth_keys = {
+          default = {
+            type = "standard";
+            key = "file:${cfsslAPITokenPath}";
+          };
+        };
+      }));
+    };
+
+    systemd.services.cfssl.preStart = with pkgs; with config.services.cfssl; mkIf (top.apiserver.enable)
+    (concatStringsSep "\n" [
+      "set -e"
+      (optionalString cfg.genCfsslCACert ''
+        if [ ! -f "${cfg.caCertPathPrefix}.pem" ]; then
+          ${cfssl}/bin/cfssl genkey -initca ${csrCA} | \
+            ${cfssl}/bin/cfssljson -bare ${cfg.caCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPICerts ''
+        if [ ! -f "${dataDir}/cfssl.pem" ]; then
+          ${cfssl}/bin/cfssl gencert -ca "${cfg.caCertPathPrefix}.pem" -ca-key "${cfg.caCertPathPrefix}-key.pem" ${csrCfssl} | \
+            ${cfssl}/bin/cfssljson -bare ${cfsslCertPathPrefix}
+        fi
+      '')
+      (optionalString cfg.genCfsslAPIToken ''
+        if [ ! -f "${cfsslAPITokenPath}" ]; then
+          head -c ${toString (cfsslAPITokenLength / 2)} /dev/urandom | od -An -t x | tr -d ' ' >"${cfsslAPITokenPath}"
+        fi
+        chown cfssl "${cfsslAPITokenPath}" && chmod 400 "${cfsslAPITokenPath}"
+      '')]);
+
+    systemd.services.kube-certmgr-bootstrap = {
+      description = "Kubernetes certmgr bootstrapper";
+      wantedBy = [ "certmgr.service" ];
+      after = [ "cfssl.target" ];
+      script = concatStringsSep "\n" [''
+        set -e
+
+        # If there's a cfssl (cert issuer) running locally, then don't rely on user to
+        # manually paste it in place. Just symlink.
+        # otherwise, create the target file, ready for users to insert the token
+
+        mkdir -p "$(dirname "${certmgrAPITokenPath}")"
+        if [ -f "${cfsslAPITokenPath}" ]; then
+          ln -fs "${cfsslAPITokenPath}" "${certmgrAPITokenPath}"
+        else
+          touch "${certmgrAPITokenPath}" && chmod 600 "${certmgrAPITokenPath}"
+        fi
+      ''
+      (optionalString (cfg.pkiTrustOnBootstrap) ''
+        if [ ! -f "${top.caFile}" ] || [ $(cat "${top.caFile}" | wc -c) -lt 1 ]; then
+          ${pkgs.curl}/bin/curl --fail-early -f -kd '{}' ${remote}/api/v1/cfssl/info | \
+            ${pkgs.cfssl}/bin/cfssljson -stdout >${top.caFile}
+        fi
+      '')
+      ];
+      serviceConfig = {
+        RestartSec = "10s";
+        Restart = "on-failure";
+      };
+    };
+
+    services.certmgr = {
+      enable = true;
+      package = pkgs.certmgr-selfsigned;
+      svcManager = "command";
+      specs =
+        let
+          mkSpec = _: cert: {
+            inherit (cert) action;
+            authority = {
+              inherit remote;
+              file.path = cert.caCert;
+              root_ca = cert.caCert;
+              profile = "default";
+              auth_key_file = certmgrAPITokenPath;
+            };
+            certificate = {
+              path = cert.cert;
+            };
+            private_key = cert.privateKeyOptions;
+            request = {
+              hosts = [cert.CN] ++ cert.hosts;
+              inherit (cert) CN;
+              key = {
+                algo = "rsa";
+                size = 2048;
+              };
+              names = [ cert.fields ];
+            };
+          };
+        in
+          mapAttrs mkSpec cfg.certs;
+      };
+
+      #TODO: Get rid of kube-addon-manager in the future for the following reasons
+      # - it is basically just a shell script wrapped around kubectl
+      # - it assumes that it is clusterAdmin or can gain clusterAdmin rights through serviceAccount
+      # - it is designed to be used with k8s system components only
+      # - it would be better with a more Nix-oriented way of managing addons
+      systemd.services.kube-addon-manager = mkIf top.addonManager.enable (mkMerge [{
+        environment.KUBECONFIG = with cfg.certs.addonManager;
+          top.lib.mkKubeConfig "addon-manager" {
+            server = top.apiserverAddress;
+            certFile = cert;
+            keyFile = key;
+          };
+        }
+
+        (optionalAttrs (top.addonManager.bootstrapAddons != {}) {
+          serviceConfig.PermissionsStartOnly = true;
+          preStart = with pkgs;
+          let
+            files = mapAttrsToList (n: v: writeText "${n}.json" (builtins.toJSON v))
+              top.addonManager.bootstrapAddons;
+          in
+          ''
+            export KUBECONFIG=${clusterAdminKubeconfig}
+            ${top.package}/bin/kubectl apply -f ${concatStringsSep " \\\n -f " files}
+          '';
+        })]);
+
+      environment.etc.${cfg.etcClusterAdminKubeconfig}.source = mkIf (cfg.etcClusterAdminKubeconfig != null)
+        clusterAdminKubeconfig;
+
+      environment.systemPackages = mkIf (top.kubelet.enable || top.proxy.enable) [
+      (pkgs.writeScriptBin "nixos-kubernetes-node-join" ''
+        set -e
+        exec 1>&2
+
+        if [ $# -gt 0 ]; then
+          echo "Usage: $(basename $0)"
+          echo ""
+          echo "No args. Apitoken must be provided on stdin."
+          echo "To get the apitoken, execute: 'sudo cat ${certmgrAPITokenPath}' on the master node."
+          exit 1
+        fi
+
+        if [ $(id -u) != 0 ]; then
+          echo "Run as root please."
+          exit 1
+        fi
+
+        read -r token
+        if [ ''${#token} != ${toString cfsslAPITokenLength} ]; then
+          echo "Token must be of length ${toString cfsslAPITokenLength}."
+          exit 1
+        fi
+
+        echo $token > ${certmgrAPITokenPath}
+        chmod 600 ${certmgrAPITokenPath}
+
+        echo "Restarting certmgr..." >&1
+        systemctl restart certmgr
+
+        echo "Waiting for certs to appear..." >&1
+
+        ${optionalString top.kubelet.enable ''
+          while [ ! -f ${cfg.certs.kubelet.cert} ]; do sleep 1; done
+          echo "Restarting kubelet..." >&1
+          systemctl restart kubelet
+        ''}
+
+        ${optionalString top.proxy.enable ''
+          while [ ! -f ${cfg.certs.kubeProxyClient.cert} ]; do sleep 1; done
+          echo "Restarting kube-proxy..." >&1
+          systemctl restart kube-proxy
+        ''}
+
+        ${optionalString top.flannel.enable ''
+          while [ ! -f ${cfg.certs.flannelClient.cert} ]; do sleep 1; done
+          echo "Restarting flannel..." >&1
+          systemctl restart flannel
+        ''}
+
+        echo "Node joined successfully"
+      '')];
+
+      # isolate etcd on loopback at the master node
+      # easyCerts doesn't support multimaster clusters anyway atm.
+      services.etcd = with cfg.certs.etcd; {
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://127.0.0.1:2380"];
+        advertiseClientUrls = ["https://etcd.local:2379"];
+        initialCluster = ["${top.masterAddress}=https://etcd.local:2380"];
+        initialAdvertisePeerUrls = ["https://etcd.local:2380"];
+        certFile = mkDefault cert;
+        keyFile = mkDefault key;
+        trustedCaFile = mkDefault caCert;
+      };
+      networking.extraHosts = mkIf (config.services.etcd.enable) ''
+        127.0.0.1 etcd.${top.addons.dns.clusterDomain} etcd.local
+      '';
+
+      services.flannel = with cfg.certs.flannelClient; {
+        kubeconfig = top.lib.mkKubeConfig "flannel" {
+          server = top.apiserverAddress;
+          certFile = cert;
+          keyFile = key;
+        };
+      };
+
+      services.kubernetes = {
+
+        apiserver = mkIf top.apiserver.enable (with cfg.certs.apiServer; {
+          etcd = with cfg.certs.apiserverEtcdClient; {
+            servers = ["https://etcd.local:2379"];
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+            caFile = mkDefault caCert;
+          };
+          clientCaFile = mkDefault caCert;
+          tlsCertFile = mkDefault cert;
+          tlsKeyFile = mkDefault key;
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.cert;
+          serviceAccountSigningKeyFile = mkDefault cfg.certs.serviceAccount.key;
+          kubeletClientCaFile = mkDefault caCert;
+          kubeletClientCertFile = mkDefault cfg.certs.apiserverKubeletClient.cert;
+          kubeletClientKeyFile = mkDefault cfg.certs.apiserverKubeletClient.key;
+          proxyClientCertFile = mkDefault cfg.certs.apiserverProxyClient.cert;
+          proxyClientKeyFile = mkDefault cfg.certs.apiserverProxyClient.key;
+        });
+        controllerManager = mkIf top.controllerManager.enable {
+          serviceAccountKeyFile = mkDefault cfg.certs.serviceAccount.key;
+          rootCaFile = cfg.certs.controllerManagerClient.caCert;
+          kubeconfig = with cfg.certs.controllerManagerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        scheduler = mkIf top.scheduler.enable {
+          kubeconfig = with cfg.certs.schedulerClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        kubelet = mkIf top.kubelet.enable {
+          clientCaFile = mkDefault cfg.certs.kubelet.caCert;
+          tlsCertFile = mkDefault cfg.certs.kubelet.cert;
+          tlsKeyFile = mkDefault cfg.certs.kubelet.key;
+          kubeconfig = with cfg.certs.kubeletClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+        proxy = mkIf top.proxy.enable {
+          kubeconfig = with cfg.certs.kubeProxyClient; {
+            certFile = mkDefault cert;
+            keyFile = mkDefault key;
+          };
+        };
+      };
+    });
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix
new file mode 100644
index 000000000000..015784f7e311
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/proxy.nix
@@ -0,0 +1,102 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  otop = options.services.kubernetes;
+  cfg = top.proxy;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "kubernetes" "proxy" "address" ] ["services" "kubernetes" "proxy" "bindAddress"])
+  ];
+
+  ###### interface
+  options.services.kubernetes.proxy = with lib.types; {
+
+    bindAddress = mkOption {
+      description = lib.mdDoc "Kubernetes proxy listening address.";
+      default = "0.0.0.0";
+      type = str;
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes proxy");
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Kubernetes proxy extra command line options.";
+      default = "";
+      type = separatedString " ";
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates";
+      default = top.featureGates;
+      defaultText = literalExpression "config.${otop.featureGates}";
+      type = listOf str;
+    };
+
+    hostname = mkOption {
+      description = lib.mdDoc "Kubernetes proxy hostname override.";
+      default = config.networking.hostName;
+      defaultText = literalExpression "config.networking.hostName";
+      type = str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes proxy";
+
+    verbosity = mkOption {
+      description = lib.mdDoc ''
+        Optional glog verbosity level for logging statements. See
+        <https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-proxy = {
+      description = "Kubernetes Proxy Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      path = with pkgs; [ iptables conntrack-tools ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-proxy \
+          --bind-address=${cfg.bindAddress} \
+          ${optionalString (top.clusterCidr!=null)
+            "--cluster-cidr=${top.clusterCidr}"} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --hostname-override=${cfg.hostname} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-proxy" cfg.kubeconfig} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+      unitConfig = {
+        StartLimitIntervalSec = 0;
+      };
+    };
+
+    services.kubernetes.proxy.hostname = with config.networking; mkDefault hostName;
+
+    services.kubernetes.pki.certs = {
+      kubeProxyClient = top.lib.mkCert {
+        name = "kube-proxy-client";
+        CN = "system:kube-proxy";
+        action = "systemctl restart kube-proxy.service";
+      };
+    };
+
+    services.kubernetes.proxy.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix b/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix
new file mode 100644
index 000000000000..f31a92f36840
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/kubernetes/scheduler.nix
@@ -0,0 +1,101 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  top = config.services.kubernetes;
+  otop = options.services.kubernetes;
+  cfg = top.scheduler;
+in
+{
+  ###### interface
+  options.services.kubernetes.scheduler = with lib.types; {
+
+    address = mkOption {
+      description = lib.mdDoc "Kubernetes scheduler listening address.";
+      default = "127.0.0.1";
+      type = str;
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Kubernetes scheduler");
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Kubernetes scheduler extra command line options.";
+      default = "";
+      type = separatedString " ";
+    };
+
+    featureGates = mkOption {
+      description = lib.mdDoc "List set of feature gates";
+      default = top.featureGates;
+      defaultText = literalExpression "config.${otop.featureGates}";
+      type = listOf str;
+    };
+
+    kubeconfig = top.lib.mkKubeConfigOptions "Kubernetes scheduler";
+
+    leaderElect = mkOption {
+      description = lib.mdDoc "Whether to start leader election before executing main loop.";
+      type = bool;
+      default = true;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Kubernetes scheduler listening port.";
+      default = 10251;
+      type = port;
+    };
+
+    verbosity = mkOption {
+      description = lib.mdDoc ''
+        Optional glog verbosity level for logging statements. See
+        <https://github.com/kubernetes/community/blob/master/contributors/devel/logging.md>
+      '';
+      default = null;
+      type = nullOr int;
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.kube-scheduler = {
+      description = "Kubernetes Scheduler Service";
+      wantedBy = [ "kubernetes.target" ];
+      after = [ "kube-apiserver.service" ];
+      serviceConfig = {
+        Slice = "kubernetes.slice";
+        ExecStart = ''${top.package}/bin/kube-scheduler \
+          --bind-address=${cfg.address} \
+          ${optionalString (cfg.featureGates != [])
+            "--feature-gates=${concatMapStringsSep "," (feature: "${feature}=true") cfg.featureGates}"} \
+          --kubeconfig=${top.lib.mkKubeConfig "kube-scheduler" cfg.kubeconfig} \
+          --leader-elect=${boolToString cfg.leaderElect} \
+          --secure-port=${toString cfg.port} \
+          ${optionalString (cfg.verbosity != null) "--v=${toString cfg.verbosity}"} \
+          ${cfg.extraOpts}
+        '';
+        WorkingDirectory = top.dataDir;
+        User = "kubernetes";
+        Group = "kubernetes";
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+      unitConfig = {
+        StartLimitIntervalSec = 0;
+      };
+    };
+
+    services.kubernetes.pki.certs = {
+      schedulerClient = top.lib.mkCert {
+        name = "kube-scheduler-client";
+        CN = "system:kube-scheduler";
+        action = "systemctl restart kube-scheduler.service";
+      };
+    };
+
+    services.kubernetes.scheduler.kubeconfig.server = mkDefault top.apiserverAddress;
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/pacemaker/default.nix b/nixpkgs/nixos/modules/services/cluster/pacemaker/default.nix
new file mode 100644
index 000000000000..0f37f4b754fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/pacemaker/default.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.pacemaker;
+in
+{
+  # interface
+  options.services.pacemaker = {
+    enable = mkEnableOption (lib.mdDoc "pacemaker");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.pacemaker;
+      defaultText = literalExpression "pkgs.pacemaker";
+      description = lib.mdDoc "Package that should be used for pacemaker.";
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = config.services.corosync.enable;
+      message = ''
+        Enabling services.pacemaker requires a services.corosync configuration.
+      '';
+    } ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    # required by pacemaker
+    users.users.hacluster = {
+      isSystemUser = true;
+      group = "pacemaker";
+      home = "/var/lib/pacemaker";
+    };
+    users.groups.pacemaker = {};
+
+    systemd.tmpfiles.rules = [
+      "d /var/log/pacemaker 0700 hacluster pacemaker -"
+    ];
+
+    systemd.packages = [ cfg.package ];
+    systemd.services.pacemaker = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        StateDirectory = "pacemaker";
+        StateDirectoryMode = "0700";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/patroni/default.nix b/nixpkgs/nixos/modules/services/cluster/patroni/default.nix
new file mode 100644
index 000000000000..5ab016a9f59f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/patroni/default.nix
@@ -0,0 +1,265 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.patroni;
+  defaultUser = "patroni";
+  defaultGroup = "patroni";
+  format = pkgs.formats.yaml { };
+
+  configFileName = "patroni-${cfg.scope}-${cfg.name}.yaml";
+  configFile = format.generate configFileName cfg.settings;
+in
+{
+  options.services.patroni = {
+
+    enable = mkEnableOption (lib.mdDoc "Patroni");
+
+    postgresqlPackage = mkOption {
+      type = types.package;
+      example = literalExpression "pkgs.postgresql_14";
+      description = mdDoc ''
+        PostgreSQL package to use.
+        Plugins can be enabled like this `pkgs.postgresql_14.withPackages (p: [ p.pg_safeupdate p.postgis ])`.
+      '';
+    };
+
+    postgresqlDataDir = mkOption {
+      type = types.path;
+      defaultText = literalExpression ''"/var/lib/postgresql/''${config.services.patroni.postgresqlPackage.psqlSchema}"'';
+      example = "/var/lib/postgresql/14";
+      default = "/var/lib/postgresql/${cfg.postgresqlPackage.psqlSchema}";
+      description = mdDoc ''
+        The data directory for PostgreSQL. If left as the default value
+        this directory will automatically be created before the PostgreSQL server starts, otherwise
+        the sysadmin is responsible for ensuring the directory exists with appropriate ownership
+        and permissions.
+      '';
+    };
+
+    postgresqlPort = mkOption {
+      type = types.port;
+      default = 5432;
+      description = mdDoc ''
+        The port on which PostgreSQL listens.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      example = "postgres";
+      description = mdDoc ''
+        The user for the service. If left as the default value this user will automatically be created,
+        otherwise the sysadmin is responsible for ensuring the user exists.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultGroup;
+      example = "postgres";
+      description = mdDoc ''
+        The group for the service. If left as the default value this group will automatically be created,
+        otherwise the sysadmin is responsible for ensuring the group exists.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/patroni";
+      description = mdDoc ''
+        Folder where Patroni data will be written, used by Raft as well if enabled.
+      '';
+    };
+
+    scope = mkOption {
+      type = types.str;
+      example = "cluster1";
+      description = mdDoc ''
+        Cluster name.
+      '';
+    };
+
+    name = mkOption {
+      type = types.str;
+      example = "node1";
+      description = mdDoc ''
+        The name of the host. Must be unique for the cluster.
+      '';
+    };
+
+    namespace = mkOption {
+      type = types.str;
+      default = "/service";
+      description = mdDoc ''
+        Path within the configuration store where Patroni will keep information about the cluster.
+      '';
+    };
+
+    nodeIp = mkOption {
+      type = types.str;
+      example = "192.168.1.1";
+      description = mdDoc ''
+        IP address of this node.
+      '';
+    };
+
+    otherNodesIps = mkOption {
+      type = types.listOf types.str;
+      example = [ "192.168.1.2" "192.168.1.3" ];
+      description = mdDoc ''
+        IP addresses of the other nodes.
+      '';
+    };
+
+    restApiPort = mkOption {
+      type = types.port;
+      default = 8008;
+      description = mdDoc ''
+        The port on Patroni's REST api listens.
+      '';
+    };
+
+    raft = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        This will configure Patroni to use its own RAFT implementation instead of using a dedicated DCS.
+      '';
+    };
+
+    raftPort = mkOption {
+      type = types.port;
+      default = 5010;
+      description = mdDoc ''
+        The port on which RAFT listens.
+      '';
+    };
+
+    softwareWatchdog = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        This will configure Patroni to use the software watchdog built into the Linux kernel
+        as described in the [documentation](https://patroni.readthedocs.io/en/latest/watchdog.html#setting-up-software-watchdog-on-linux).
+      '';
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = mdDoc ''
+        The primary patroni configuration. See the [documentation](https://patroni.readthedocs.io/en/latest/SETTINGS.html)
+        for possible values.
+        Secrets should be passed in by using the `environmentFiles` option.
+      '';
+    };
+
+    environmentFiles = mkOption {
+      type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+      default = { };
+      example = {
+        PATRONI_REPLICATION_PASSWORD = "/secret/file";
+        PATRONI_SUPERUSER_PASSWORD = "/secret/file";
+      };
+      description = mdDoc "Environment variables made available to Patroni as files content, useful for providing secrets from files.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.patroni.settings = {
+      scope = cfg.scope;
+      name = cfg.name;
+      namespace = cfg.namespace;
+
+      restapi = {
+        listen = "${cfg.nodeIp}:${toString cfg.restApiPort}";
+        connect_address = "${cfg.nodeIp}:${toString cfg.restApiPort}";
+      };
+
+      raft = mkIf cfg.raft {
+        data_dir = "${cfg.dataDir}/raft";
+        self_addr = "${cfg.nodeIp}:5010";
+        partner_addrs = map (ip: ip + ":5010") cfg.otherNodesIps;
+      };
+
+      postgresql = {
+        listen = "${cfg.nodeIp}:${toString cfg.postgresqlPort}";
+        connect_address = "${cfg.nodeIp}:${toString cfg.postgresqlPort}";
+        data_dir = cfg.postgresqlDataDir;
+        bin_dir = "${cfg.postgresqlPackage}/bin";
+        pgpass = "${cfg.dataDir}/pgpass";
+      };
+
+      watchdog = mkIf cfg.softwareWatchdog {
+        mode = "required";
+        device = "/dev/watchdog";
+        safety_margin = 5;
+      };
+    };
+
+
+    users = {
+      users = mkIf (cfg.user == defaultUser) {
+        patroni = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+      groups = mkIf (cfg.group == defaultGroup) {
+        patroni = { };
+      };
+    };
+
+    systemd.services = {
+      patroni = {
+        description = "Runners to orchestrate a high-availability PostgreSQL";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        script = ''
+          ${concatStringsSep "\n" (attrValues (mapAttrs (name: path: ''export ${name}="$(< ${escapeShellArg path})"'') cfg.environmentFiles))}
+          exec ${pkgs.patroni}/bin/patroni ${configFile}
+        '';
+
+        serviceConfig = mkMerge [
+          {
+            User = cfg.user;
+            Group = cfg.group;
+            Type = "simple";
+            Restart = "on-failure";
+            TimeoutSec = 30;
+            ExecReload = "${pkgs.coreutils}/bin/kill -s HUP $MAINPID";
+            KillMode = "process";
+          }
+          (mkIf (cfg.postgresqlDataDir == "/var/lib/postgresql/${cfg.postgresqlPackage.psqlSchema}" && cfg.dataDir == "/var/lib/patroni") {
+            StateDirectory = "patroni patroni/raft postgresql postgresql/${cfg.postgresqlPackage.psqlSchema}";
+            StateDirectoryMode = "0750";
+          })
+        ];
+      };
+    };
+
+    boot.kernelModules = mkIf cfg.softwareWatchdog [ "softdog" ];
+
+    services.udev.extraRules = mkIf cfg.softwareWatchdog ''
+      KERNEL=="watchdog", OWNER="${cfg.user}", GROUP="${cfg.group}", MODE="0600"
+    '';
+
+    environment.systemPackages = [
+      pkgs.patroni
+      cfg.postgresqlPackage
+      (mkIf cfg.raft pkgs.python310Packages.pysyncobj)
+    ];
+
+    environment.etc."${configFileName}".source = configFile;
+
+    environment.sessionVariables = {
+      PATRONICTL_CONFIG_FILE = "/etc/${configFileName}";
+    };
+  };
+
+  meta.maintainers = [ maintainers.phfroidmont ];
+}
diff --git a/nixpkgs/nixos/modules/services/cluster/spark/default.nix b/nixpkgs/nixos/modules/services/cluster/spark/default.nix
new file mode 100644
index 000000000000..bf39c5537332
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/cluster/spark/default.nix
@@ -0,0 +1,162 @@
+{config, pkgs, lib, ...}:
+let
+  cfg = config.services.spark;
+in
+with lib;
+{
+  options = {
+    services.spark = {
+      master = {
+        enable = mkEnableOption (lib.mdDoc "Spark master service");
+        bind = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Address the spark master binds to.";
+          default = "127.0.0.1";
+          example = "0.0.0.0";
+        };
+        restartIfChanged  = mkOption {
+          type = types.bool;
+          description = lib.mdDoc ''
+            Automatically restart master service on config change.
+            This can be set to false to defer restarts on clusters running critical applications.
+            Please consider the security implications of inadvertently running an older version,
+            and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+          '';
+          default = true;
+        };
+        extraEnvironment = mkOption {
+          type = types.attrsOf types.str;
+          description = lib.mdDoc "Extra environment variables to pass to spark master. See spark-standalone documentation.";
+          default = {};
+          example = {
+            SPARK_MASTER_WEBUI_PORT = 8181;
+            SPARK_MASTER_OPTS = "-Dspark.deploy.defaultCores=5";
+          };
+        };
+      };
+      worker = {
+        enable = mkEnableOption (lib.mdDoc "Spark worker service");
+        workDir = mkOption {
+          type = types.path;
+          description = lib.mdDoc "Spark worker work dir.";
+          default = "/var/lib/spark";
+        };
+        master = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Address of the spark master.";
+          default = "127.0.0.1:7077";
+        };
+        restartIfChanged  = mkOption {
+          type = types.bool;
+          description = lib.mdDoc ''
+            Automatically restart worker service on config change.
+            This can be set to false to defer restarts on clusters running critical applications.
+            Please consider the security implications of inadvertently running an older version,
+            and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+          '';
+          default = true;
+        };
+        extraEnvironment = mkOption {
+          type = types.attrsOf types.str;
+          description = lib.mdDoc "Extra environment variables to pass to spark worker.";
+          default = {};
+          example = {
+            SPARK_WORKER_CORES = 5;
+            SPARK_WORKER_MEMORY = "2g";
+          };
+        };
+      };
+      confDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Spark configuration directory. Spark will use the configuration files (spark-defaults.conf, spark-env.sh, log4j.properties, etc) from this directory.";
+        default = "${cfg.package}/lib/${cfg.package.untarDir}/conf";
+        defaultText = literalExpression ''"''${package}/lib/''${package.untarDir}/conf"'';
+      };
+      logDir = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Spark log directory.";
+        default = "/var/log/spark";
+      };
+      package = mkOption {
+        type = types.package;
+        description = lib.mdDoc "Spark package.";
+        default = pkgs.spark;
+        defaultText = literalExpression "pkgs.spark";
+        example = literalExpression ''pkgs.spark.overrideAttrs (super: rec {
+          pname = "spark";
+          version = "2.4.4";
+
+          src = pkgs.fetchzip {
+            url    = "mirror://apache/spark/"''${pname}-''${version}/''${pname}-''${version}-bin-without-hadoop.tgz";
+            sha256 = "1a9w5k0207fysgpxx6db3a00fs5hdc2ncx99x4ccy2s0v5ndc66g";
+          };
+        })'';
+      };
+    };
+  };
+  config = lib.mkIf (cfg.worker.enable || cfg.master.enable) {
+    environment.systemPackages = [ cfg.package ];
+    systemd = {
+      services = {
+        spark-master = lib.mkIf cfg.master.enable {
+          path = with pkgs; [ procps openssh nettools ];
+          description = "spark master service.";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          restartIfChanged = cfg.master.restartIfChanged;
+          environment = cfg.master.extraEnvironment // {
+            SPARK_MASTER_HOST = cfg.master.bind;
+            SPARK_CONF_DIR = cfg.confDir;
+            SPARK_LOG_DIR = cfg.logDir;
+          };
+          serviceConfig = {
+            Type = "forking";
+            User = "spark";
+            Group = "spark";
+            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
+            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-master.sh";
+            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-master.sh";
+            TimeoutSec = 300;
+            StartLimitBurst=10;
+            Restart = "always";
+          };
+        };
+        spark-worker = lib.mkIf cfg.worker.enable {
+          path = with pkgs; [ procps openssh nettools rsync ];
+          description = "spark master service.";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          restartIfChanged = cfg.worker.restartIfChanged;
+          environment = cfg.worker.extraEnvironment // {
+            SPARK_MASTER = cfg.worker.master;
+            SPARK_CONF_DIR = cfg.confDir;
+            SPARK_LOG_DIR = cfg.logDir;
+            SPARK_WORKER_DIR = cfg.worker.workDir;
+          };
+          serviceConfig = {
+            Type = "forking";
+            User = "spark";
+            WorkingDirectory = "${cfg.package}/lib/${cfg.package.untarDir}";
+            ExecStart = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/start-worker.sh spark://${cfg.worker.master}";
+            ExecStop  = "${cfg.package}/lib/${cfg.package.untarDir}/sbin/stop-worker.sh";
+            TimeoutSec = 300;
+            StartLimitBurst=10;
+            Restart = "always";
+          };
+        };
+      };
+      tmpfiles.rules = [
+        "d '${cfg.worker.workDir}' - spark spark - -"
+        "d '${cfg.logDir}' - spark spark - -"
+      ];
+    };
+    users = {
+      users.spark = {
+        description = "spark user.";
+        group = "spark";
+        isSystemUser = true;
+      };
+      groups.spark = { };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/computing/boinc/client.nix b/nixpkgs/nixos/modules/services/computing/boinc/client.nix
new file mode 100644
index 000000000000..ff16795c8208
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/computing/boinc/client.nix
@@ -0,0 +1,119 @@
+{config, lib, pkgs, ...}:
+
+with lib;
+
+let
+  cfg = config.services.boinc;
+  allowRemoteGuiRpcFlag = optionalString cfg.allowRemoteGuiRpc "--allow_remote_gui_rpc";
+
+  fhsEnv = pkgs.buildFHSEnv {
+    name = "boinc-fhs-env";
+    targetPkgs = pkgs': [ cfg.package ] ++ cfg.extraEnvPackages;
+    runScript = "/bin/boinc_client";
+  };
+  fhsEnvExecutable = "${fhsEnv}/bin/${fhsEnv.name}";
+
+in
+  {
+    options.services.boinc = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the BOINC distributed computing client. If this
+          option is set to true, the boinc_client daemon will be run as a
+          background service. The boinccmd command can be used to control the
+          daemon.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.boinc;
+        defaultText = literalExpression "pkgs.boinc";
+        example = literalExpression "pkgs.boinc-headless";
+        description = lib.mdDoc ''
+          Which BOINC package to use.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/boinc";
+        description = lib.mdDoc ''
+          The directory in which to store BOINC's configuration and data files.
+        '';
+      };
+
+      allowRemoteGuiRpc = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set to true, any remote host can connect to and control this BOINC
+          client (subject to password authentication). If instead set to false,
+          only the hosts listed in {var}`dataDir`/remote_hosts.cfg will be allowed to
+          connect.
+
+          See also: <https://boinc.berkeley.edu/wiki/Controlling_BOINC_remotely#Remote_access>
+        '';
+      };
+
+      extraEnvPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.virtualbox ]";
+        description = lib.mdDoc ''
+          Additional packages to make available in the environment in which
+          BOINC will run. Common choices are:
+
+          - {var}`pkgs.virtualbox`:
+            The VirtualBox virtual machine framework. Required by some BOINC
+            projects, such as ATLAS@home.
+          - {var}`pkgs.ocl-icd`:
+            OpenCL infrastructure library. Required by BOINC projects that
+            use OpenCL, in addition to a device-specific OpenCL driver.
+          - {var}`pkgs.linuxPackages.nvidia_x11`:
+            Provides CUDA libraries. Required by BOINC projects that use
+            CUDA. Note that this requires an NVIDIA graphics device to be
+            present on the system.
+
+            Also provides OpenCL drivers for NVIDIA GPUs;
+            {var}`pkgs.ocl-icd` is also needed in this case.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = [cfg.package];
+
+      users.users.boinc = {
+        group = "boinc";
+        createHome = false;
+        description = "BOINC Client";
+        home = cfg.dataDir;
+        isSystemUser = true;
+      };
+      users.groups.boinc = {};
+
+      systemd.tmpfiles.rules = [
+        "d '${cfg.dataDir}' - boinc boinc - -"
+      ];
+
+      systemd.services.boinc = {
+        description = "BOINC Client";
+        after = ["network.target"];
+        wantedBy = ["multi-user.target"];
+        script = ''
+          ${fhsEnvExecutable} --dir ${cfg.dataDir} ${allowRemoteGuiRpcFlag}
+        '';
+        serviceConfig = {
+          User = "boinc";
+          Nice = 10;
+        };
+      };
+    };
+
+    meta = {
+      maintainers = with lib.maintainers; [kierdavis];
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/computing/foldingathome/client.nix b/nixpkgs/nixos/modules/services/computing/foldingathome/client.nix
new file mode 100644
index 000000000000..1229e5ac987e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/computing/foldingathome/client.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.foldingathome;
+
+  args =
+    ["--team" "${toString cfg.team}"]
+    ++ lib.optionals (cfg.user != null) ["--user" cfg.user]
+    ++ cfg.extraArgs
+    ;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "foldingAtHome" ] [ "services" "foldingathome" ])
+    (mkRenamedOptionModule [ "services" "foldingathome" "nickname" ] [ "services" "foldingathome" "user" ])
+    (mkRemovedOptionModule [ "services" "foldingathome" "config" ] ''
+      Use <literal>services.foldingathome.extraArgs instead<literal>
+    '')
+  ];
+  options.services.foldingathome = {
+    enable = mkEnableOption (lib.mdDoc "Folding@home client");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.fahclient;
+      defaultText = literalExpression "pkgs.fahclient";
+      description = lib.mdDoc ''
+        Which Folding@home client to use.
+      '';
+    };
+
+    user = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The user associated with the reported computation results. This will
+        be used in the ranking statistics.
+      '';
+    };
+
+    team = mkOption {
+      type = types.int;
+      default = 236565;
+      description = lib.mdDoc ''
+        The team ID associated with the reported computation results. This
+        will be used in the ranking statistics.
+
+        By default, use the NixOS folding@home team ID is being used.
+      '';
+    };
+
+    daemonNiceLevel = mkOption {
+      type = types.ints.between (-20) 19;
+      default = 0;
+      description = lib.mdDoc ''
+        Daemon process priority for FAHClient.
+        0 is the default Unix process priority, 19 is the lowest.
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra startup options for the FAHClient. Run
+        `FAHClient --help` to find all the available options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.foldingathome = {
+      description = "Folding@home client";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        exec ${cfg.package}/bin/FAHClient ${lib.escapeShellArgs args}
+      '';
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "foldingathome";
+        Nice = cfg.daemonNiceLevel;
+        WorkingDirectory = "%S/foldingathome";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ zimbatm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/computing/slurm/slurm.nix b/nixpkgs/nixos/modules/services/computing/slurm/slurm.nix
new file mode 100644
index 000000000000..1cbe7b893f83
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/computing/slurm/slurm.nix
@@ -0,0 +1,442 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.slurm;
+  opt = options.services.slurm;
+  # configuration file can be generated by https://slurm.schedmd.com/configurator.html
+
+  defaultUser = "slurm";
+
+  configFile = pkgs.writeTextDir "slurm.conf"
+    ''
+      ClusterName=${cfg.clusterName}
+      StateSaveLocation=${cfg.stateSaveLocation}
+      SlurmUser=${cfg.user}
+      ${optionalString (cfg.controlMachine != null) "controlMachine=${cfg.controlMachine}"}
+      ${optionalString (cfg.controlAddr != null) "controlAddr=${cfg.controlAddr}"}
+      ${toString (map (x: "NodeName=${x}\n") cfg.nodeName)}
+      ${toString (map (x: "PartitionName=${x}\n") cfg.partitionName)}
+      PlugStackConfig=${plugStackConfig}/plugstack.conf
+      ProctrackType=${cfg.procTrackType}
+      ${cfg.extraConfig}
+    '';
+
+  plugStackConfig = pkgs.writeTextDir "plugstack.conf"
+    ''
+      ${optionalString cfg.enableSrunX11 "optional ${pkgs.slurm-spank-x11}/lib/x11.so"}
+      ${cfg.extraPlugstackConfig}
+    '';
+
+  cgroupConfig = pkgs.writeTextDir "cgroup.conf"
+   ''
+     ${cfg.extraCgroupConfig}
+   '';
+
+  slurmdbdConf = pkgs.writeText "slurmdbd.conf"
+   ''
+     DbdHost=${cfg.dbdserver.dbdHost}
+     SlurmUser=${cfg.user}
+     StorageType=accounting_storage/mysql
+     StorageUser=${cfg.dbdserver.storageUser}
+     ${cfg.dbdserver.extraConfig}
+   '';
+
+  # slurm expects some additional config files to be
+  # in the same directory as slurm.conf
+  etcSlurm = pkgs.symlinkJoin {
+    name = "etc-slurm";
+    paths = [ configFile cgroupConfig plugStackConfig ] ++ cfg.extraConfigPaths;
+  };
+in
+
+{
+
+  ###### interface
+
+  meta.maintainers = [ maintainers.markuskowa ];
+
+  options = {
+
+    services.slurm = {
+
+      server = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to enable the slurm control daemon.
+            Note that the standard authentication method is "munge".
+            The "munge" service needs to be provided with a password file in order for
+            slurm to work properly (see `services.munge.password`).
+          '';
+        };
+      };
+
+      dbdserver = {
+        enable = mkEnableOption (lib.mdDoc "SlurmDBD service");
+
+        dbdHost = mkOption {
+          type = types.str;
+          default = config.networking.hostName;
+          defaultText = literalExpression "config.networking.hostName";
+          description = lib.mdDoc ''
+            Hostname of the machine where `slurmdbd`
+            is running (i.e. name returned by `hostname -s`).
+          '';
+        };
+
+        storageUser = mkOption {
+          type = types.str;
+          default = cfg.user;
+          defaultText = literalExpression "config.${opt.user}";
+          description = lib.mdDoc ''
+            Database user name.
+          '';
+        };
+
+        storagePassFile = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            Path to file with database password. The content of this will be used to
+            create the password for the `StoragePass` option.
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            Extra configuration for `slurmdbd.conf` See also:
+            {manpage}`slurmdbd.conf(8)`.
+          '';
+        };
+      };
+
+      client = {
+        enable = mkEnableOption (lib.mdDoc "slurm client daemon");
+      };
+
+      enableStools = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to provide a slurm.conf file.
+          Enable this option if you do not run a slurm daemon on this host
+          (i.e. `server.enable` and `client.enable` are `false`)
+          but you still want to run slurm commands from this host.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.slurm.override { enableX11 = ! cfg.enableSrunX11; };
+        defaultText = literalExpression "pkgs.slurm";
+        example = literalExpression "pkgs.slurm-full";
+        description = lib.mdDoc ''
+          The package to use for slurm binaries.
+        '';
+      };
+
+      controlMachine = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = null;
+        description = lib.mdDoc ''
+          The short hostname of the machine where SLURM control functions are
+          executed (i.e. the name returned by the command "hostname -s", use "tux001"
+          rather than "tux001.my.com").
+        '';
+      };
+
+      controlAddr = mkOption {
+        type = types.nullOr types.str;
+        default = cfg.controlMachine;
+        defaultText = literalExpression "config.${opt.controlMachine}";
+        example = null;
+        description = lib.mdDoc ''
+          Name that ControlMachine should be referred to in establishing a
+          communications path.
+        '';
+      };
+
+      clusterName = mkOption {
+        type = types.str;
+        default = "default";
+        example = "myCluster";
+        description = lib.mdDoc ''
+          Necessary to distinguish accounting records in a multi-cluster environment.
+        '';
+      };
+
+      nodeName = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''[ "linux[1-32] CPUs=1 State=UNKNOWN" ];'';
+        description = lib.mdDoc ''
+          Name that SLURM uses to refer to a node (or base partition for BlueGene
+          systems). Typically this would be the string that "/bin/hostname -s"
+          returns. Note that now you have to write node's parameters after the name.
+        '';
+      };
+
+      partitionName = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''[ "debug Nodes=linux[1-32] Default=YES MaxTime=INFINITE State=UP" ];'';
+        description = lib.mdDoc ''
+          Name by which the partition may be referenced. Note that now you have
+          to write the partition's parameters after the name.
+        '';
+      };
+
+      enableSrunX11 = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If enabled srun will accept the option "--x11" to allow for X11 forwarding
+          from within an interactive session or a batch job. This activates the
+          slurm-spank-x11 module. Note that this option also enables
+          {option}`services.openssh.forwardX11` on the client.
+
+          This option requires slurm to be compiled without native X11 support.
+          The default behavior is to re-compile the slurm package with native X11
+          support disabled if this option is set to true.
+
+          To use the native X11 support add `PrologFlags=X11` in {option}`extraConfig`.
+          Note that this method will only work RSA SSH host keys.
+        '';
+      };
+
+      procTrackType = mkOption {
+        type = types.str;
+        default = "proctrack/linuxproc";
+        description = lib.mdDoc ''
+          Plugin to be used for process tracking on a job step basis.
+          The slurmd daemon uses this mechanism to identify all processes
+          which are children of processes it spawns for a user job step.
+        '';
+      };
+
+      stateSaveLocation = mkOption {
+        type = types.str;
+        default = "/var/spool/slurmctld";
+        description = lib.mdDoc ''
+          Directory into which the Slurm controller, slurmctld, saves its state.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = lib.mdDoc ''
+          Set this option when you want to run the slurmctld daemon
+          as something else than the default slurm user "slurm".
+          Note that the UID of this user needs to be the same
+          on all nodes.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration options that will be added verbatim at
+          the end of the slurm configuration file.
+        '';
+      };
+
+      extraPlugstackConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration that will be added to the end of `plugstack.conf`.
+        '';
+      };
+
+      extraCgroupConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra configuration for `cgroup.conf`. This file is
+          used when `procTrackType=proctrack/cgroup`.
+        '';
+      };
+
+      extraConfigPaths = mkOption {
+        type = with types; listOf path;
+        default = [];
+        description = lib.mdDoc ''
+          Slurm expects config files for plugins in the same path
+          as `slurm.conf`. Add extra nix store
+          paths that should be merged into same directory as
+          `slurm.conf`.
+        '';
+      };
+
+      etcSlurm = mkOption {
+        type = types.path;
+        internal = true;
+        default = etcSlurm;
+        defaultText = literalMD ''
+          Directory created from generated config files and
+          `config.${opt.extraConfigPaths}`.
+        '';
+        description = lib.mdDoc ''
+          Path to directory with slurm config files. This option is set by default from the
+          Slurm module and is meant to make the Slurm config file available to other modules.
+        '';
+      };
+
+    };
+
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "slurm" "dbdserver" "storagePass" ] ''
+      This option has been removed so that the database password is not exposed via the nix store.
+      Use services.slurm.dbdserver.storagePassFile to provide the database password.
+    '')
+    (mkRemovedOptionModule [ "services" "slurm" "dbdserver" "configFile" ] ''
+      This option has been removed. Use services.slurm.dbdserver.storagePassFile
+      and services.slurm.dbdserver.extraConfig instead.
+    '')
+  ];
+
+  ###### implementation
+
+  config =
+    let
+      wrappedSlurm = pkgs.stdenv.mkDerivation {
+        name = "wrappedSlurm";
+
+        builder = pkgs.writeText "builder.sh" ''
+          source $stdenv/setup
+          mkdir -p $out/bin
+          find  ${getBin cfg.package}/bin -type f -executable | while read EXE
+          do
+            exename="$(basename $EXE)"
+            wrappername="$out/bin/$exename"
+            cat > "$wrappername" <<EOT
+          #!/bin/sh
+          if [ -z "$SLURM_CONF" ]
+          then
+            SLURM_CONF="${cfg.etcSlurm}/slurm.conf" "$EXE" "\$@"
+          else
+            "$EXE" "\$0"
+          fi
+          EOT
+            chmod +x "$wrappername"
+          done
+
+          mkdir -p $out/share
+          ln -s ${getBin cfg.package}/share/man $out/share/man
+        '';
+      };
+
+  in mkIf ( cfg.enableStools ||
+            cfg.client.enable ||
+            cfg.server.enable ||
+            cfg.dbdserver.enable ) {
+
+    environment.systemPackages = [ wrappedSlurm ];
+
+    services.munge.enable = mkDefault true;
+
+    # use a static uid as default to ensure it is the same on all nodes
+    users.users.slurm = mkIf (cfg.user == defaultUser) {
+      name = defaultUser;
+      group = "slurm";
+      uid = config.ids.uids.slurm;
+    };
+
+    users.groups.slurm.gid = config.ids.uids.slurm;
+
+    systemd.services.slurmd = mkIf (cfg.client.enable) {
+      path = with pkgs; [ wrappedSlurm coreutils ]
+        ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
+
+      wantedBy = [ "multi-user.target" ];
+      after = [
+        "systemd-tmpfiles-clean.service"
+        "munge.service"
+        "network-online.target"
+        "remote-fs.target"
+      ];
+      wants = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "forking";
+        KillMode = "process";
+        ExecStart = "${wrappedSlurm}/bin/slurmd";
+        PIDFile = "/run/slurmd.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitMEMLOCK = "infinity";
+        Delegate="Yes";
+      };
+    };
+
+    systemd.tmpfiles.rules = mkIf cfg.client.enable [
+      "d /var/spool/slurmd 755 root root -"
+    ];
+
+    services.openssh.settings.X11Forwarding = mkIf cfg.client.enable (mkDefault true);
+
+    systemd.services.slurmctld = mkIf (cfg.server.enable) {
+      path = with pkgs; [ wrappedSlurm munge coreutils ]
+        ++ lib.optional cfg.enableSrunX11 slurm-spank-x11;
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "munged.service" ];
+      requires = [ "munged.service" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${wrappedSlurm}/bin/slurmctld";
+        PIDFile = "/run/slurmctld.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+
+      preStart = ''
+        mkdir -p ${cfg.stateSaveLocation}
+        chown -R ${cfg.user}:slurm ${cfg.stateSaveLocation}
+      '';
+    };
+
+    systemd.services.slurmdbd = let
+      # slurm strips the last component off the path
+      configPath = "$RUNTIME_DIRECTORY/slurmdbd.conf";
+    in mkIf (cfg.dbdserver.enable) {
+      path = with pkgs; [ wrappedSlurm munge coreutils ];
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "munged.service" "mysql.service" ];
+      requires = [ "munged.service" "mysql.service" ];
+
+      preStart = ''
+        install -m 600 -o ${cfg.user} -T ${slurmdbdConf} ${configPath}
+        ${optionalString (cfg.dbdserver.storagePassFile != null) ''
+          echo "StoragePass=$(cat ${cfg.dbdserver.storagePassFile})" \
+            >> ${configPath}
+        ''}
+      '';
+
+      script = ''
+        export SLURM_CONF=${configPath}
+        exec ${cfg.package}/bin/slurmdbd -D
+      '';
+
+      serviceConfig = {
+        RuntimeDirectory = "slurmdbd";
+        Type = "simple";
+        PIDFile = "/run/slurmdbd.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/computing/torque/mom.nix b/nixpkgs/nixos/modules/services/computing/torque/mom.nix
new file mode 100644
index 000000000000..5dd41429bf81
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/computing/torque/mom.nix
@@ -0,0 +1,63 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.torque.mom;
+  torque = pkgs.torque;
+
+  momConfig = pkgs.writeText "torque-mom-config" ''
+    $pbsserver ${cfg.serverNode}
+    $logevent 225
+  '';
+
+in
+{
+  options = {
+
+    services.torque.mom = {
+      enable = mkEnableOption (lib.mdDoc "torque computing node");
+
+      serverNode = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Hostname running pbs server.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.torque ];
+
+    systemd.services.torque-mom-init = {
+      path = with pkgs; [ torque util-linux procps inetutils ];
+
+      script = ''
+        pbs_mkdirs -v aux
+        pbs_mkdirs -v mom
+        hostname > /var/spool/torque/server_name
+        cp -v ${momConfig} /var/spool/torque/mom_priv/config
+      '';
+
+      serviceConfig.Type = "oneshot";
+      unitConfig.ConditionPathExists = "!/var/spool/torque";
+    };
+
+    systemd.services.torque-mom = {
+      path = [ torque ];
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "torque-mom-init.service" ];
+      after = [ "torque-mom-init.service" "network.target" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${torque}/bin/pbs_mom";
+        PIDFile = "/var/spool/torque/mom_priv/mom.lock";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/computing/torque/server.nix b/nixpkgs/nixos/modules/services/computing/torque/server.nix
new file mode 100644
index 000000000000..02f20fb37c10
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/computing/torque/server.nix
@@ -0,0 +1,96 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.torque.server;
+  torque = pkgs.torque;
+in
+{
+  options = {
+
+    services.torque.server = {
+
+      enable = mkEnableOption (lib.mdDoc "torque server");
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.torque ];
+
+    systemd.services.torque-server-init = {
+      path = with pkgs; [ torque util-linux procps inetutils ];
+
+      script = ''
+        tmpsetup=$(mktemp -t torque-XXXX)
+        cp -p ${torque}/bin/torque.setup $tmpsetup
+        sed -i $tmpsetup -e 's/pbs_server -t create/pbs_server -f -t create/'
+
+        pbs_mkdirs -v aux
+        pbs_mkdirs -v server
+        hostname > /var/spool/torque/server_name
+        cp -prv ${torque}/var/spool/torque/* /var/spool/torque/
+        $tmpsetup root
+
+        sleep 1
+        rm -f $tmpsetup
+        kill $(pgrep pbs_server) 2>/dev/null
+        kill $(pgrep trqauthd) 2>/dev/null
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+
+      unitConfig = {
+        ConditionPathExists = "!/var/spool/torque";
+      };
+    };
+
+    systemd.services.trqauthd = {
+      path = [ torque ];
+
+      requires = [ "torque-server-init.service" ];
+      after = [ "torque-server-init.service" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${torque}/bin/trqauthd";
+      };
+    };
+
+    systemd.services.torque-server = {
+      path = [ torque ];
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "torque-scheduler.service" "trqauthd.service" ];
+      before = [ "trqauthd.service" ];
+      requires = [ "torque-server-init.service" ];
+      after = [ "torque-server-init.service" "network.target" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${torque}/bin/pbs_server";
+        ExecStop = "${torque}/bin/qterm";
+        PIDFile = "/var/spool/torque/server_priv/server.lock";
+      };
+    };
+
+    systemd.services.torque-scheduler = {
+      path = [ torque ];
+
+      requires = [ "torque-server-init.service" ];
+      after = [ "torque-server-init.service" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${torque}/bin/pbs_sched";
+        PIDFile = "/var/spool/torque/sched_priv/sched.lock";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix
new file mode 100644
index 000000000000..9a89745055f0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/master.nix
@@ -0,0 +1,313 @@
+# NixOS module for Buildbot continuous integration server.
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.buildbot-master;
+  opt = options.services.buildbot-master;
+
+  package = pkgs.python3.pkgs.toPythonModule cfg.package;
+  python = package.pythonModule;
+
+  escapeStr = escape [ "'" ];
+
+  defaultMasterCfg = pkgs.writeText "master.cfg" ''
+    from buildbot.plugins import *
+    ${cfg.extraImports}
+    factory = util.BuildFactory()
+    c = BuildmasterConfig = dict(
+     workers       = [${concatStringsSep "," cfg.workers}],
+     protocols     = { 'pb': {'port': ${toString cfg.pbPort} } },
+     title         = '${escapeStr cfg.title}',
+     titleURL      = '${escapeStr cfg.titleUrl}',
+     buildbotURL   = '${escapeStr cfg.buildbotUrl}',
+     db            = dict(db_url='${escapeStr cfg.dbUrl}'),
+     www           = dict(port=${toString cfg.port}),
+     change_source = [ ${concatStringsSep "," cfg.changeSource} ],
+     schedulers    = [ ${concatStringsSep "," cfg.schedulers} ],
+     builders      = [ ${concatStringsSep "," cfg.builders} ],
+     services      = [ ${concatStringsSep "," cfg.reporters} ],
+     configurators = [ ${concatStringsSep "," cfg.configurators} ],
+    )
+    for step in [ ${concatStringsSep "," cfg.factorySteps} ]:
+      factory.addStep(step)
+
+    ${cfg.extraConfig}
+  '';
+
+  tacFile = pkgs.writeText "buildbot-master.tac" ''
+    import os
+
+    from twisted.application import service
+    from buildbot.master import BuildMaster
+
+    basedir = '${cfg.buildbotDir}'
+
+    configfile = '${cfg.masterCfg}'
+
+    # Default umask for server
+    umask = None
+
+    # note: this line is matched against to check that this is a buildmaster
+    # directory; do not edit it.
+    application = service.Application('buildmaster')
+
+    m = BuildMaster(basedir, configfile, umask)
+    m.setServiceParent(application)
+  '';
+
+in {
+  options = {
+    services.buildbot-master = {
+
+      factorySteps = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Factory Steps";
+        default = [];
+        example = [
+          "steps.Git(repourl='https://github.com/buildbot/pyflakes.git', mode='incremental')"
+          "steps.ShellCommand(command=['trial', 'pyflakes'])"
+        ];
+      };
+
+      changeSource = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of Change Sources.";
+        default = [];
+        example = [
+          "changes.GitPoller('https://github.com/buildbot/pyflakes.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
+        ];
+      };
+
+      configurators = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Configurator Steps, see https://docs.buildbot.net/latest/manual/configuration/configurators.html";
+        default = [];
+        example = [
+          "util.JanitorConfigurator(logHorizon=timedelta(weeks=4), hour=12, dayOfWeek=6)"
+        ];
+      };
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Buildbot continuous integration server.";
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Extra configuration to append to master.cfg";
+        default = "c['buildbotNetUsageData'] = None";
+      };
+
+      extraImports = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Extra python imports to prepend to master.cfg";
+        default = "";
+        example = "from buildbot.process.project import Project";
+      };
+
+      masterCfg = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Optionally pass master.cfg path. Other options in this configuration will be ignored.";
+        default = defaultMasterCfg;
+        defaultText = literalMD ''generated configuration file'';
+        example = "/etc/nixos/buildbot/master.cfg";
+      };
+
+      schedulers = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of Schedulers.";
+        default = [
+          "schedulers.SingleBranchScheduler(name='all', change_filter=util.ChangeFilter(branch='master'), treeStableTimer=None, builderNames=['runtests'])"
+          "schedulers.ForceScheduler(name='force',builderNames=['runtests'])"
+        ];
+      };
+
+      builders = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of Builders.";
+        default = [
+          "util.BuilderConfig(name='runtests',workernames=['example-worker'],factory=factory)"
+        ];
+      };
+
+      workers = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of Workers.";
+        default = [ "worker.Worker('example-worker', 'pass')" ];
+      };
+
+      reporters = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of reporter objects used to present build status to various users.";
+      };
+
+      user = mkOption {
+        default = "buildbot";
+        type = types.str;
+        description = lib.mdDoc "User the buildbot server should execute under.";
+      };
+
+      group = mkOption {
+        default = "buildbot";
+        type = types.str;
+        description = lib.mdDoc "Primary group of buildbot user.";
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "List of extra groups that the buildbot user should be a part of.";
+      };
+
+      home = mkOption {
+        default = "/home/buildbot";
+        type = types.path;
+        description = lib.mdDoc "Buildbot home directory.";
+      };
+
+      buildbotDir = mkOption {
+        default = "${cfg.home}/master";
+        defaultText = literalExpression ''"''${config.${opt.home}}/master"'';
+        type = types.path;
+        description = lib.mdDoc "Specifies the Buildbot directory.";
+      };
+
+      pbPort = mkOption {
+        default = 9989;
+        type = types.either types.str types.int;
+        example = "'tcp:9990:interface=127.0.0.1'";
+        description = lib.mdDoc ''
+          The buildmaster will listen on a TCP port of your choosing
+          for connections from workers.
+          It can also use this port for connections from remote Change Sources,
+          status clients, and debug tools.
+          This port should be visible to the outside world, and you’ll need to tell
+          your worker admins about your choice.
+          If put in (single) quotes, this can also be used as a connection string,
+          as defined in the [ConnectionStrings guide](https://twistedmatrix.com/documents/current/core/howto/endpoints.html).
+        '';
+      };
+
+      listenAddress = mkOption {
+        default = "0.0.0.0";
+        type = types.str;
+        description = lib.mdDoc "Specifies the bind address on which the buildbot HTTP interface listens.";
+      };
+
+      buildbotUrl = mkOption {
+        default = "http://localhost:8010/";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot URL.";
+      };
+
+      title = mkOption {
+        default = "Buildbot";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot Title.";
+      };
+
+      titleUrl = mkOption {
+        default = "Buildbot";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot TitleURL.";
+      };
+
+      dbUrl = mkOption {
+        default = "sqlite:///state.sqlite";
+        type = types.str;
+        description = lib.mdDoc "Specifies the database connection string.";
+      };
+
+      port = mkOption {
+        default = 8010;
+        type = types.port;
+        description = lib.mdDoc "Specifies port number on which the buildbot HTTP interface listens.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.buildbot-full;
+        defaultText = literalExpression "pkgs.buildbot-full";
+        description = lib.mdDoc "Package to use for buildbot.";
+        example = literalExpression "pkgs.buildbot";
+      };
+
+      packages = mkOption {
+        default = [ pkgs.git ];
+        defaultText = literalExpression "[ pkgs.git ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc "Packages to add to PATH for the buildbot process.";
+      };
+
+      pythonPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = pythonPackages: with pythonPackages; [ ];
+        defaultText = literalExpression "pythonPackages: with pythonPackages; [ ]";
+        description = lib.mdDoc "Packages to add the to the PYTHONPATH of the buildbot process.";
+        example = literalExpression "pythonPackages: with pythonPackages; [ requests ]";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "buildbot") {
+      buildbot = { };
+    };
+
+    users.users = optionalAttrs (cfg.user == "buildbot") {
+      buildbot = {
+        description = "Buildbot User.";
+        isNormalUser = true;
+        createHome = true;
+        inherit (cfg) home group extraGroups;
+        useDefaultShell = true;
+      };
+    };
+
+    systemd.services.buildbot-master = {
+      description = "Buildbot Continuous Integration Server.";
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = cfg.packages ++ cfg.pythonPackages python.pkgs;
+      environment.PYTHONPATH = "${python.withPackages (self: cfg.pythonPackages self ++ [ package ])}/${python.sitePackages}";
+
+      preStart = ''
+        mkdir -vp "${cfg.buildbotDir}"
+        # Link the tac file so buildbot command line tools recognize the directory
+        ln -sf "${tacFile}" "${cfg.buildbotDir}/buildbot.tac"
+        ${cfg.package}/bin/buildbot create-master --db "${cfg.dbUrl}" "${cfg.buildbotDir}"
+        rm -f buildbot.tac.new master.cfg.sample
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.home;
+        # NOTE: call twistd directly with stdout logging for systemd
+        ExecStart = "${python.pkgs.twisted}/bin/twistd -o --nodaemon --pidfile= --logfile - --python ${cfg.buildbotDir}/buildbot.tac";
+        # To reload on upgrade, set the following in your configuration:
+        # systemd.services.buildbot-master.reloadIfChanged = true;
+        ExecReload = [
+          "${pkgs.coreutils}/bin/ln -sf ${tacFile} ${cfg.buildbotDir}/buildbot.tac"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "buildbot-master" "bpPort" ] [ "services" "buildbot-master" "pbPort" ])
+    (mkRemovedOptionModule [ "services" "buildbot-master" "status" ] ''
+      Since Buildbot 0.9.0, status targets are deprecated and ignored.
+      Review your configuration and migrate to reporters (available at services.buildbot-master.reporters).
+    '')
+  ];
+
+  meta.maintainers = with lib.maintainers; [ mic92 lopsided98 ];
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix
new file mode 100644
index 000000000000..7e78b8935f81
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildbot/worker.nix
@@ -0,0 +1,199 @@
+# NixOS module for Buildbot Worker.
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.buildbot-worker;
+  opt = options.services.buildbot-worker;
+
+  package = pkgs.python3.pkgs.toPythonModule cfg.package;
+  python = package.pythonModule;
+
+  tacFile = pkgs.writeText "aur-buildbot-worker.tac" ''
+    import os
+    from io import open
+
+    from buildbot_worker.bot import Worker
+    from twisted.application import service
+
+    basedir = '${cfg.buildbotDir}'
+
+    # note: this line is matched against to check that this is a worker
+    # directory; do not edit it.
+    application = service.Application('buildbot-worker')
+
+    master_url_split = '${cfg.masterUrl}'.split(':')
+    buildmaster_host = master_url_split[0]
+    port = int(master_url_split[1])
+    workername = '${cfg.workerUser}'
+
+    with open('${cfg.workerPassFile}', 'r', encoding='utf-8') as passwd_file:
+        passwd = passwd_file.read().strip('\r\n')
+    keepalive = ${toString cfg.keepalive}
+    umask = None
+    maxdelay = 300
+    numcpus = None
+    allow_shutdown = None
+
+    s = Worker(buildmaster_host, port, workername, passwd, basedir,
+               keepalive, umask=umask, maxdelay=maxdelay,
+               numcpus=numcpus, allow_shutdown=allow_shutdown)
+    s.setServiceParent(application)
+  '';
+
+in {
+  options = {
+    services.buildbot-worker = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Buildbot Worker.";
+      };
+
+      user = mkOption {
+        default = "bbworker";
+        type = types.str;
+        description = lib.mdDoc "User the buildbot Worker should execute under.";
+      };
+
+      group = mkOption {
+        default = "bbworker";
+        type = types.str;
+        description = lib.mdDoc "Primary group of buildbot Worker user.";
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "List of extra groups that the Buildbot Worker user should be a part of.";
+      };
+
+      home = mkOption {
+        default = "/home/bbworker";
+        type = types.path;
+        description = lib.mdDoc "Buildbot home directory.";
+      };
+
+      buildbotDir = mkOption {
+        default = "${cfg.home}/worker";
+        defaultText = literalExpression ''"''${config.${opt.home}}/worker"'';
+        type = types.path;
+        description = lib.mdDoc "Specifies the Buildbot directory.";
+      };
+
+      workerUser = mkOption {
+        default = "example-worker";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot Worker user.";
+      };
+
+      workerPass = mkOption {
+        default = "pass";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot Worker password.";
+      };
+
+      workerPassFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc "File used to store the Buildbot Worker password";
+      };
+
+      hostMessage = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Description of this worker";
+      };
+
+      adminMessage = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Name of the administrator of this worker";
+      };
+
+      masterUrl = mkOption {
+        default = "localhost:9989";
+        type = types.str;
+        description = lib.mdDoc "Specifies the Buildbot Worker connection string.";
+      };
+
+      keepalive = mkOption {
+        default = 600;
+        type = types.int;
+        description = lib.mdDoc ''
+          This is a number that indicates how frequently keepalive messages should be sent
+          from the worker to the buildmaster, expressed in seconds.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.buildbot-worker;
+        defaultText = literalExpression "pkgs.python3Packages.buildbot-worker";
+        description = lib.mdDoc "Package to use for buildbot worker.";
+        example = literalExpression "pkgs.python2Packages.buildbot-worker";
+      };
+
+      packages = mkOption {
+        default = with pkgs; [ git ];
+        defaultText = literalExpression "[ pkgs.git ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc "Packages to add to PATH for the buildbot process.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.buildbot-worker.workerPassFile = mkDefault (pkgs.writeText "buildbot-worker-password" cfg.workerPass);
+
+    users.groups = optionalAttrs (cfg.group == "bbworker") {
+      bbworker = { };
+    };
+
+    users.users = optionalAttrs (cfg.user == "bbworker") {
+      bbworker = {
+        description = "Buildbot Worker User.";
+        isNormalUser = true;
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+      };
+    };
+
+    systemd.services.buildbot-worker = {
+      description = "Buildbot Worker.";
+      after = [ "network.target" "buildbot-master.service" ];
+      wantedBy = [ "multi-user.target" ];
+      path = cfg.packages;
+      environment.PYTHONPATH = "${python.withPackages (p: [ package ])}/${python.sitePackages}";
+
+      preStart = ''
+        mkdir -vp "${cfg.buildbotDir}/info"
+        ${optionalString (cfg.hostMessage != null) ''
+          ln -sf "${pkgs.writeText "buildbot-worker-host" cfg.hostMessage}" "${cfg.buildbotDir}/info/host"
+        ''}
+        ${optionalString (cfg.adminMessage != null) ''
+          ln -sf "${pkgs.writeText "buildbot-worker-admin" cfg.adminMessage}" "${cfg.buildbotDir}/info/admin"
+        ''}
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.home;
+
+        # NOTE: call twistd directly with stdout logging for systemd
+        ExecStart = "${python.pkgs.twisted}/bin/twistd --nodaemon --pidfile= --logfile - --python ${tacFile}";
+      };
+
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agents.nix b/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agents.nix
new file mode 100644
index 000000000000..a35ca4168074
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/buildkite-agents.nix
@@ -0,0 +1,219 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.buildkite-agents;
+
+  hooksDir = hooks:
+    let
+      mkHookEntry = name: text: ''
+        ln --symbolic ${pkgs.writeShellApplication { inherit name text; }}/bin/${name} $out/${name}
+      '';
+    in
+    pkgs.runCommandLocal "buildkite-agent-hooks" { } ''
+      mkdir $out
+      ${lib.concatStringsSep "\n" (lib.mapAttrsToList mkHookEntry hooks)}
+    '';
+
+  buildkiteOptions = { name ? "", config, ... }: {
+    options = {
+      enable = lib.mkOption {
+        default = true;
+        type = lib.types.bool;
+        description = lib.mdDoc "Whether to enable this buildkite agent";
+      };
+
+      package = lib.mkOption {
+        default = pkgs.buildkite-agent;
+        defaultText = lib.literalExpression "pkgs.buildkite-agent";
+        description = lib.mdDoc "Which buildkite-agent derivation to use";
+        type = lib.types.package;
+      };
+
+      dataDir = lib.mkOption {
+        default = "/var/lib/buildkite-agent-${name}";
+        description = lib.mdDoc "The workdir for the agent";
+        type = lib.types.str;
+      };
+
+      runtimePackages = lib.mkOption {
+        default = [ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ];
+        defaultText = lib.literalExpression "[ pkgs.bash pkgs.gnutar pkgs.gzip pkgs.git pkgs.nix ]";
+        description = lib.mdDoc "Add programs to the buildkite-agent environment";
+        type = lib.types.listOf lib.types.package;
+      };
+
+      tokenPath = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          The token from your Buildkite "Agents" page.
+
+          A run-time path to the token file, which is supposed to be provisioned
+          outside of Nix store.
+        '';
+      };
+
+      name = lib.mkOption {
+        type = lib.types.str;
+        default = "%hostname-${name}-%n";
+        description = lib.mdDoc ''
+          The name of the agent as seen in the buildkite dashboard.
+        '';
+      };
+
+      tags = lib.mkOption {
+        type = lib.types.attrsOf (lib.types.either lib.types.str (lib.types.listOf lib.types.str));
+        default = { };
+        example = { queue = "default"; docker = "true"; ruby2 = "true"; };
+        description = lib.mdDoc ''
+          Tags for the agent.
+        '';
+      };
+
+      extraConfig = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        example = "debug=true";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the configuration file.
+        '';
+      };
+
+      privateSshKeyPath = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        ## maximum care is taken so that secrets (ssh keys and the CI token)
+        ## don't end up in the Nix store.
+        apply = final: if final == null then null else toString final;
+
+        description = lib.mdDoc ''
+          OpenSSH private key
+
+          A run-time path to the key file, which is supposed to be provisioned
+          outside of Nix store.
+        '';
+      };
+
+      hooks = lib.mkOption {
+        type = lib.types.attrsOf lib.types.lines;
+        default = { };
+        example = lib.literalExpression ''
+          {
+            environment = '''
+              export SECRET_VAR=`head -1 /run/keys/secret`
+            ''';
+          }'';
+        description = lib.mdDoc ''
+          "Agent" hooks to install.
+          See <https://buildkite.com/docs/agent/v3/hooks> for possible options.
+        '';
+      };
+
+      hooksPath = lib.mkOption {
+        type = lib.types.path;
+        default = hooksDir config.hooks;
+        defaultText = lib.literalMD "generated from {option}`services.buildkite-agents.<name>.hooks`";
+        description = lib.mdDoc ''
+          Path to the directory storing the hooks.
+          Consider using {option}`services.buildkite-agents.<name>.hooks.<name>`
+          instead.
+        '';
+      };
+
+      shell = lib.mkOption {
+        type = lib.types.str;
+        default = "${pkgs.bash}/bin/bash -e -c";
+        defaultText = lib.literalExpression ''"''${pkgs.bash}/bin/bash -e -c"'';
+        description = lib.mdDoc ''
+          Command that buildkite-agent 3 will execute when it spawns a shell.
+        '';
+      };
+    };
+  };
+  enabledAgents = lib.filterAttrs (n: v: v.enable) cfg;
+  mapAgents = function: lib.mkMerge (lib.mapAttrsToList function enabledAgents);
+in
+{
+  options.services.buildkite-agents = lib.mkOption {
+    type = lib.types.attrsOf (lib.types.submodule buildkiteOptions);
+    default = { };
+    description = lib.mdDoc ''
+      Attribute set of buildkite agents.
+      The attribute key is combined with the hostname and a unique integer to
+      create the final agent name. This can be overridden by setting the `name`
+      attribute.
+    '';
+  };
+
+  config.users.users = mapAgents (name: cfg: {
+    "buildkite-agent-${name}" = {
+      name = "buildkite-agent-${name}";
+      home = cfg.dataDir;
+      createHome = true;
+      description = "Buildkite agent user";
+      extraGroups = [ "keys" ];
+      isSystemUser = true;
+      group = "buildkite-agent-${name}";
+    };
+  });
+  config.users.groups = mapAgents (name: cfg: {
+    "buildkite-agent-${name}" = { };
+  });
+
+  config.systemd.services = mapAgents (name: cfg: {
+    "buildkite-agent-${name}" = {
+      description = "Buildkite Agent";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = cfg.runtimePackages ++ [ cfg.package pkgs.coreutils ];
+      environment = config.networking.proxy.envVars // {
+        HOME = cfg.dataDir;
+        NIX_REMOTE = "daemon";
+      };
+
+      ## NB: maximum care is taken so that secrets (ssh keys and the CI token)
+      ##     don't end up in the Nix store.
+      preStart =
+        let
+          sshDir = "${cfg.dataDir}/.ssh";
+          tagStr = name: value:
+            if lib.isList value
+            then lib.concatStringsSep "," (builtins.map (v: "${name}=${v}") value)
+            else "${name}=${value}";
+          tagsStr = lib.concatStringsSep "," (lib.mapAttrsToList tagStr cfg.tags);
+        in
+        lib.optionalString (cfg.privateSshKeyPath != null) ''
+          mkdir -m 0700 -p "${sshDir}"
+          install -m600 "${toString cfg.privateSshKeyPath}" "${sshDir}/id_rsa"
+        '' + ''
+          cat > "${cfg.dataDir}/buildkite-agent.cfg" <<EOF
+          token="$(cat ${toString cfg.tokenPath})"
+          name="${cfg.name}"
+          shell="${cfg.shell}"
+          tags="${tagsStr}"
+          build-path="${cfg.dataDir}/builds"
+          hooks-path="${cfg.hooksPath}"
+          ${cfg.extraConfig}
+          EOF
+        '';
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/buildkite-agent start --config ${cfg.dataDir}/buildkite-agent.cfg";
+        User = "buildkite-agent-${name}";
+        RestartSec = 5;
+        Restart = "on-failure";
+        TimeoutSec = 10;
+        # set a long timeout to give buildkite-agent a chance to finish current builds
+        TimeoutStopSec = "2 min";
+        KillMode = "mixed";
+      };
+    };
+  });
+
+  config.assertions = mapAgents (name: cfg: [{
+    assertion = cfg.hooksPath != hooksDir cfg.hooks -> cfg.hooks == { };
+    message = ''
+      Options `services.buildkite-agents.${name}.hooksPath' and
+      `services.buildkite-agents.${name}.hooks.<name>' are mutually exclusive.
+    '';
+  }]);
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/gitea-actions-runner.nix b/nixpkgs/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
new file mode 100644
index 000000000000..d8d25898e294
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/gitea-actions-runner.nix
@@ -0,0 +1,257 @@
+{ config
+, lib
+, pkgs
+, utils
+, ...
+}:
+
+let
+  inherit (lib)
+    any
+    attrValues
+    concatStringsSep
+    escapeShellArg
+    hasInfix
+    hasSuffix
+    optionalAttrs
+    optionals
+    literalExpression
+    mapAttrs'
+    mkEnableOption
+    mkOption
+    mkPackageOptionMD
+    mkIf
+    nameValuePair
+    types
+  ;
+
+  inherit (utils)
+    escapeSystemdPath
+  ;
+
+  cfg = config.services.gitea-actions-runner;
+
+  settingsFormat = pkgs.formats.yaml { };
+
+  # Check whether any runner instance label requires a container runtime
+  # Empty label strings result in the upstream defined defaultLabels, which require docker
+  # https://gitea.com/gitea/act_runner/src/tag/v0.1.5/internal/app/cmd/register.go#L93-L98
+  hasDockerScheme = instance:
+    instance.labels == [] || any (label: hasInfix ":docker:" label) instance.labels;
+  wantsContainerRuntime = any hasDockerScheme (attrValues cfg.instances);
+
+  hasHostScheme = instance: any (label: hasSuffix ":host" label) instance.labels;
+
+  # provide shorthands for whether container runtimes are enabled
+  hasDocker = config.virtualisation.docker.enable;
+  hasPodman = config.virtualisation.podman.enable;
+
+  tokenXorTokenFile = instance:
+    (instance.token == null && instance.tokenFile != null) ||
+    (instance.token != null && instance.tokenFile == null);
+in
+{
+  meta.maintainers = with lib.maintainers; [
+    hexa
+  ];
+
+  options.services.gitea-actions-runner = with types; {
+    package = mkPackageOptionMD pkgs "gitea-actions-runner" { };
+
+    instances = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Gitea Actions Runner instances.
+      '';
+      type = attrsOf (submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc "Gitea Actions Runner instance");
+
+          name = mkOption {
+            type = str;
+            example = literalExpression "config.networking.hostName";
+            description = lib.mdDoc ''
+              The name identifying the runner instance towards the Gitea/Forgejo instance.
+            '';
+          };
+
+          url = mkOption {
+            type = str;
+            example = "https://forge.example.com";
+            description = lib.mdDoc ''
+              Base URL of your Gitea/Forgejo instance.
+            '';
+          };
+
+          token = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              Plain token to register at the configured Gitea/Forgejo instance.
+            '';
+          };
+
+          tokenFile = mkOption {
+            type = nullOr (either str path);
+            default = null;
+            description = lib.mdDoc ''
+              Path to an environment file, containing the `TOKEN` environment
+              variable, that holds a token to register at the configured
+              Gitea/Forgejo instance.
+            '';
+          };
+
+          labels = mkOption {
+            type = listOf str;
+            example = literalExpression ''
+              [
+                # provide a debian base with nodejs for actions
+                "debian-latest:docker://node:18-bullseye"
+                # fake the ubuntu name, because node provides no ubuntu builds
+                "ubuntu-latest:docker://node:18-bullseye"
+                # provide native execution on the host
+                #"native:host"
+              ]
+            '';
+            description = lib.mdDoc ''
+              Labels used to map jobs to their runtime environment. Changing these
+              labels currently requires a new registration token.
+
+              Many common actions require bash, git and nodejs, as well as a filesystem
+              that follows the filesystem hierarchy standard.
+            '';
+          };
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Configuration for `act_runner daemon`.
+              See https://gitea.com/gitea/act_runner/src/branch/main/internal/pkg/config/config.example.yaml for an example configuration
+            '';
+
+            type = types.submodule {
+              freeformType = settingsFormat.type;
+            };
+
+            default = { };
+          };
+
+          hostPackages = mkOption {
+            type = listOf package;
+            default = with pkgs; [
+              bash
+              coreutils
+              curl
+              gawk
+              gitMinimal
+              gnused
+              nodejs
+              wget
+            ];
+            defaultText = literalExpression ''
+              with pkgs; [
+                bash
+                coreutils
+                curl
+                gawk
+                gitMinimal
+                gnused
+                nodejs
+                wget
+              ]
+            '';
+            description = lib.mdDoc ''
+              List of packages, that are available to actions, when the runner is configured
+              with a host execution label.
+            '';
+          };
+        };
+      });
+    };
+  };
+
+  config = mkIf (cfg.instances != {}) {
+    assertions = [ {
+      assertion = any tokenXorTokenFile (attrValues cfg.instances);
+      message = "Instances of gitea-actions-runner can have `token` or `tokenFile`, not both.";
+    } {
+      assertion = wantsContainerRuntime -> hasDocker || hasPodman;
+      message = "Label configuration on gitea-actions-runner instance requires either docker or podman.";
+    } ];
+
+    systemd.services = let
+      mkRunnerService = name: instance: let
+        wantsContainerRuntime = hasDockerScheme instance;
+        wantsHost = hasHostScheme instance;
+        wantsDocker = wantsContainerRuntime && config.virtualisation.docker.enable;
+        wantsPodman = wantsContainerRuntime && config.virtualisation.podman.enable;
+        configFile = settingsFormat.generate "config.yaml" instance.settings;
+      in
+        nameValuePair "gitea-runner-${escapeSystemdPath name}" {
+          inherit (instance) enable;
+          description = "Gitea Actions Runner";
+          after = [
+            "network-online.target"
+          ] ++ optionals (wantsDocker) [
+            "docker.service"
+          ] ++ optionals (wantsPodman) [
+            "podman.service"
+          ];
+          wantedBy = [
+            "multi-user.target"
+          ];
+          environment = optionalAttrs (instance.token != null) {
+            TOKEN = "${instance.token}";
+          } // optionalAttrs (wantsPodman) {
+            DOCKER_HOST = "unix:///run/podman/podman.sock";
+          };
+          path = with pkgs; [
+            coreutils
+          ] ++ lib.optionals wantsHost instance.hostPackages;
+          serviceConfig = {
+            DynamicUser = true;
+            User = "gitea-runner";
+            StateDirectory = "gitea-runner";
+            WorkingDirectory = "-/var/lib/gitea-runner/${name}";
+
+            # gitea-runner might fail when gitea is restarted during upgrade.
+            Restart = "on-failure";
+            RestartSec = 2;
+
+            ExecStartPre = [(pkgs.writeShellScript "gitea-register-runner-${name}" ''
+              export INSTANCE_DIR="$STATE_DIRECTORY/${name}"
+              mkdir -vp "$INSTANCE_DIR"
+              cd "$INSTANCE_DIR"
+
+              # force reregistration on changed labels
+              export LABELS_FILE="$INSTANCE_DIR/.labels"
+              export LABELS_WANTED="$(echo ${escapeShellArg (concatStringsSep "\n" instance.labels)} | sort)"
+              export LABELS_CURRENT="$(cat $LABELS_FILE 2>/dev/null || echo 0)"
+
+              if [ ! -e "$INSTANCE_DIR/.runner" ] || [ "$LABELS_WANTED" != "$LABELS_CURRENT" ]; then
+                # remove existing registration file, so that changing the labels forces a re-registration
+                rm -v "$INSTANCE_DIR/.runner" || true
+
+                # perform the registration
+                ${cfg.package}/bin/act_runner register --no-interactive \
+                  --instance ${escapeShellArg instance.url} \
+                  --token "$TOKEN" \
+                  --name ${escapeShellArg instance.name} \
+                  --labels ${escapeShellArg (concatStringsSep "," instance.labels)}
+
+                # and write back the configured labels
+                echo "$LABELS_WANTED" > "$LABELS_FILE"
+              fi
+
+            '')];
+            ExecStart = "${cfg.package}/bin/act_runner daemon --config ${configFile}";
+            SupplementaryGroups = optionals (wantsDocker) [
+              "docker"
+            ] ++ optionals (wantsPodman) [
+              "podman"
+            ];
+          } // optionalAttrs (instance.tokenFile != null) {
+            EnvironmentFile = instance.tokenFile;
+          };
+        };
+    in mapAttrs' mkRunnerService cfg.instances;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix b/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix
new file mode 100644
index 000000000000..27cfee92c75a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/github-runner.nix
@@ -0,0 +1,25 @@
+{ config
+, pkgs
+, lib
+, ...
+}@args:
+
+with lib;
+
+let
+  cfg = config.services.github-runner;
+in
+
+{
+  options.services.github-runner = import ./github-runner/options.nix (args // {
+    # Users don't need to specify options.services.github-runner.name; it will default
+    # to the hostname.
+    includeNameDefault = true;
+  });
+
+  config = mkIf cfg.enable {
+    services.github-runners.${cfg.name} = cfg;
+  };
+
+  meta.maintainers = with maintainers; [ veehaitch newam thomasjm ];
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix b/nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix
new file mode 100644
index 000000000000..f2887c7711b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/github-runner/options.nix
@@ -0,0 +1,219 @@
+{ config
+, lib
+, pkgs
+, includeNameDefault
+, ...
+}:
+
+with lib;
+
+{
+  enable = mkOption {
+    default = false;
+    example = true;
+    description = lib.mdDoc ''
+      Whether to enable GitHub Actions runner.
+
+      Note: GitHub recommends using self-hosted runners with private repositories only. Learn more here:
+      [About self-hosted runners](https://docs.github.com/en/actions/hosting-your-own-runners/about-self-hosted-runners).
+    '';
+    type = lib.types.bool;
+  };
+
+  url = mkOption {
+    type = types.str;
+    description = lib.mdDoc ''
+      Repository to add the runner to.
+
+      Changing this option triggers a new runner registration.
+
+      IMPORTANT: If your token is org-wide (not per repository), you need to
+      provide a github org link, not a single repository, so do it like this
+      `https://github.com/nixos`, not like this
+      `https://github.com/nixos/nixpkgs`.
+      Otherwise, you are going to get a `404 NotFound`
+      from `POST https://api.github.com/actions/runner-registration`
+      in the configure script.
+    '';
+    example = "https://github.com/nixos/nixpkgs";
+  };
+
+  tokenFile = mkOption {
+    type = types.path;
+    description = lib.mdDoc ''
+      The full path to a file which contains either
+
+      * a fine-grained personal access token (PAT),
+      * a classic PAT
+      * or a runner registration token
+
+      Changing this option or the `tokenFile`’s content triggers a new runner registration.
+
+      We suggest using the fine-grained PATs. A runner registration token is valid
+      only for 1 hour after creation, so the next time the runner configuration changes
+      this will give you hard-to-debug HTTP 404 errors in the configure step.
+
+      The file should contain exactly one line with the token without any newline.
+      (Use `echo -n '…token…' > …token file…` to make sure no newlines sneak in.)
+
+      If the file contains a PAT, the service creates a new registration token
+      on startup as needed.
+      If a registration token is given, it can be used to re-register a runner of the same
+      name but is time-limited as noted above.
+
+      For fine-grained PATs:
+
+      Give it "Read and Write access to organization/repository self hosted runners",
+      depending on whether it is organization wide or per-repository. You might have to
+      experiment a little, fine-grained PATs are a `beta` Github feature and still subject
+      to change; nonetheless they are the best option at the moment.
+
+      For classic PATs:
+
+      Make sure the PAT has a scope of `admin:org` for organization-wide registrations
+      or a scope of `repo` for a single repository.
+
+      For runner registration tokens:
+
+      Nothing special needs to be done, but updating will break after one hour,
+      so these are not recommended.
+    '';
+    example = "/run/secrets/github-runner/nixos.token";
+  };
+
+  name = let
+    # Same pattern as for `networking.hostName`
+    baseType = types.strMatching "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
+  in mkOption {
+    type = if includeNameDefault then baseType else types.nullOr baseType;
+    description = lib.mdDoc ''
+      Name of the runner to configure. Defaults to the hostname.
+
+      Changing this option triggers a new runner registration.
+    '';
+    example = "nixos";
+  } // (if includeNameDefault then {
+    default = config.networking.hostName;
+    defaultText = literalExpression "config.networking.hostName";
+  } else {
+    default = null;
+  });
+
+  runnerGroup = mkOption {
+    type = types.nullOr types.str;
+    description = lib.mdDoc ''
+      Name of the runner group to add this runner to (defaults to the default runner group).
+
+      Changing this option triggers a new runner registration.
+    '';
+    default = null;
+  };
+
+  extraLabels = mkOption {
+    type = types.listOf types.str;
+    description = lib.mdDoc ''
+      Extra labels in addition to the default (`["self-hosted", "Linux", "X64"]`).
+
+      Changing this option triggers a new runner registration.
+    '';
+    example = literalExpression ''[ "nixos" ]'';
+    default = [ ];
+  };
+
+  replace = mkOption {
+    type = types.bool;
+    description = lib.mdDoc ''
+      Replace any existing runner with the same name.
+
+      Without this flag, registering a new runner with the same name fails.
+    '';
+    default = false;
+  };
+
+  extraPackages = mkOption {
+    type = types.listOf types.package;
+    description = lib.mdDoc ''
+      Extra packages to add to `PATH` of the service to make them available to workflows.
+    '';
+    default = [ ];
+  };
+
+  extraEnvironment = mkOption {
+    type = types.attrs;
+    description = lib.mdDoc ''
+      Extra environment variables to set for the runner, as an attrset.
+    '';
+    example = {
+      GIT_CONFIG = "/path/to/git/config";
+    };
+    default = {};
+  };
+
+  serviceOverrides = mkOption {
+    type = types.attrs;
+    description = lib.mdDoc ''
+      Modify the systemd service. Can be used to, e.g., adjust the sandboxing options.
+    '';
+    example = {
+      ProtectHome = false;
+      RestrictAddressFamilies = [ "AF_PACKET" ];
+    };
+    default = {};
+  };
+
+  package = mkOption {
+    type = types.package;
+    description = lib.mdDoc ''
+      Which github-runner derivation to use.
+    '';
+    default = pkgs.github-runner;
+    defaultText = literalExpression "pkgs.github-runner";
+  };
+
+  ephemeral = mkOption {
+    type = types.bool;
+    description = lib.mdDoc ''
+      If enabled, causes the following behavior:
+
+      - Passes the `--ephemeral` flag to the runner configuration script
+      - De-registers and stops the runner with GitHub after it has processed one job
+      - On stop, systemd wipes the runtime directory (this always happens, even without using the ephemeral option)
+      - Restarts the service after its successful exit
+      - On start, wipes the state directory and configures a new runner
+
+      You should only enable this option if `tokenFile` points to a file which contains a
+      personal access token (PAT). If you're using the option with a registration token, restarting the
+      service will fail as soon as the registration token expired.
+    '';
+    default = false;
+  };
+
+  user = mkOption {
+    type = types.nullOr types.str;
+    description = lib.mdDoc ''
+      User under which to run the service. If null, will use a systemd dynamic user.
+    '';
+    default = null;
+    defaultText = literalExpression "username";
+  };
+
+  workDir = mkOption {
+    type = with types; nullOr str;
+    description = lib.mdDoc ''
+      Working directory, available as `$GITHUB_WORKSPACE` during workflow runs
+      and used as a default for [repository checkouts](https://github.com/actions/checkout).
+      The service cleans this directory on every service start.
+
+      A value of `null` will default to the systemd `RuntimeDirectory`.
+    '';
+    default = null;
+  };
+
+  nodeRuntimes = mkOption {
+    type = with types; nonEmptyListOf (enum [ "node16" "node20" ]);
+    default = [ "node20" ];
+    description = mdDoc ''
+      List of Node.js runtimes the runner should support.
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix b/nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix
new file mode 100644
index 000000000000..535df7f68e07
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/github-runner/service.nix
@@ -0,0 +1,268 @@
+{ config
+, lib
+, pkgs
+
+, cfg ? config.services.github-runner
+, svcName
+
+, systemdDir ? "${svcName}/${cfg.name}"
+  # %t: Runtime directory root (usually /run); see systemd.unit(5)
+, runtimeDir ? "%t/${systemdDir}"
+  # %S: State directory root (usually /var/lib); see systemd.unit(5)
+, stateDir ? "%S/${systemdDir}"
+  # %L: Log directory root (usually /var/log); see systemd.unit(5)
+, logsDir ? "%L/${systemdDir}"
+  # Name of file stored in service state directory
+, currentConfigTokenFilename ? ".current-token"
+
+, ...
+}:
+
+with lib;
+
+let
+  workDir = if cfg.workDir == null then runtimeDir else cfg.workDir;
+  package = cfg.package.override { inherit (cfg) nodeRuntimes; };
+in
+{
+  description = "GitHub Actions runner";
+
+  wantedBy = [ "multi-user.target" ];
+  wants = [ "network-online.target" ];
+  after = [ "network.target" "network-online.target" ];
+
+  environment = {
+    HOME = workDir;
+    RUNNER_ROOT = stateDir;
+  } // cfg.extraEnvironment;
+
+  path = (with pkgs; [
+    bash
+    coreutils
+    git
+    gnutar
+    gzip
+  ]) ++ [
+    config.nix.package
+  ] ++ cfg.extraPackages;
+
+  serviceConfig = mkMerge [
+    {
+      ExecStart = "${package}/bin/Runner.Listener run --startuptype service";
+
+      # Does the following, sequentially:
+      # - If the module configuration or the token has changed, purge the state directory,
+      #   and create the current and the new token file with the contents of the configured
+      #   token. While both files have the same content, only the later is accessible by
+      #   the service user.
+      # - Configure the runner using the new token file. When finished, delete it.
+      # - Set up the directory structure by creating the necessary symlinks.
+      ExecStartPre =
+        let
+          # Wrapper script which expects the full path of the state, working and logs
+          # directory as arguments. Overrides the respective systemd variables to provide
+          # unambiguous directory names. This becomes relevant, for example, if the
+          # caller overrides any of the StateDirectory=, RuntimeDirectory= or LogDirectory=
+          # to contain more than one directory. This causes systemd to set the respective
+          # environment variables with the path of all of the given directories, separated
+          # by a colon.
+          writeScript = name: lines: pkgs.writeShellScript "${svcName}-${name}.sh" ''
+            set -euo pipefail
+
+            STATE_DIRECTORY="$1"
+            WORK_DIRECTORY="$2"
+            LOGS_DIRECTORY="$3"
+
+            ${lines}
+          '';
+          runnerRegistrationConfig = getAttrs [ "name" "tokenFile" "url" "runnerGroup" "extraLabels" "ephemeral" "workDir" ] cfg;
+          newConfigPath = builtins.toFile "${svcName}-config.json" (builtins.toJSON runnerRegistrationConfig);
+          currentConfigPath = "$STATE_DIRECTORY/.nixos-current-config.json";
+          newConfigTokenPath = "$STATE_DIRECTORY/.new-token";
+          currentConfigTokenPath = "$STATE_DIRECTORY/${currentConfigTokenFilename}";
+
+          runnerCredFiles = [
+            ".credentials"
+            ".credentials_rsaparams"
+            ".runner"
+          ];
+          unconfigureRunner = writeScript "unconfigure" ''
+            copy_tokens() {
+              # Copy the configured token file to the state dir and allow the service user to read the file
+              install --mode=666 ${escapeShellArg cfg.tokenFile} "${newConfigTokenPath}"
+              # Also copy current file to allow for a diff on the next start
+              install --mode=600 ${escapeShellArg cfg.tokenFile} "${currentConfigTokenPath}"
+            }
+            clean_state() {
+              find "$STATE_DIRECTORY/" -mindepth 1 -delete
+              copy_tokens
+            }
+            diff_config() {
+              changed=0
+              # Check for module config changes
+              [[ -f "${currentConfigPath}" ]] \
+                && ${pkgs.diffutils}/bin/diff -q '${newConfigPath}' "${currentConfigPath}" >/dev/null 2>&1 \
+                || changed=1
+              # Also check the content of the token file
+              [[ -f "${currentConfigTokenPath}" ]] \
+                && ${pkgs.diffutils}/bin/diff -q "${currentConfigTokenPath}" ${escapeShellArg cfg.tokenFile} >/dev/null 2>&1 \
+                || changed=1
+              # If the config has changed, remove old state and copy tokens
+              if [[ "$changed" -eq 1 ]]; then
+                echo "Config has changed, removing old runner state."
+                echo "The old runner will still appear in the GitHub Actions UI." \
+                     "You have to remove it manually."
+                clean_state
+              fi
+            }
+            if [[ "${optionalString cfg.ephemeral "1"}" ]]; then
+              # In ephemeral mode, we always want to start with a clean state
+              clean_state
+            elif [[ "$(ls -A "$STATE_DIRECTORY")" ]]; then
+              # There are state files from a previous run; diff them to decide if we need a new registration
+              diff_config
+            else
+              # The state directory is entirely empty which indicates a first start
+              copy_tokens
+            fi
+            # Always clean workDir
+            find -H "$WORK_DIRECTORY" -mindepth 1 -delete
+          '';
+          configureRunner = writeScript "configure" ''
+            if [[ -e "${newConfigTokenPath}" ]]; then
+              echo "Configuring GitHub Actions Runner"
+              args=(
+                --unattended
+                --disableupdate
+                --work "$WORK_DIRECTORY"
+                --url ${escapeShellArg cfg.url}
+                --labels ${escapeShellArg (concatStringsSep "," cfg.extraLabels)}
+                --name ${escapeShellArg cfg.name}
+                ${optionalString cfg.replace "--replace"}
+                ${optionalString (cfg.runnerGroup != null) "--runnergroup ${escapeShellArg cfg.runnerGroup}"}
+                ${optionalString cfg.ephemeral "--ephemeral"}
+              )
+              # If the token file contains a PAT (i.e., it starts with "ghp_" or "github_pat_"), we have to use the --pat option,
+              # if it is not a PAT, we assume it contains a registration token and use the --token option
+              token=$(<"${newConfigTokenPath}")
+              if [[ "$token" =~ ^ghp_* ]] || [[ "$token" =~ ^github_pat_* ]]; then
+                args+=(--pat "$token")
+              else
+                args+=(--token "$token")
+              fi
+              ${package}/bin/Runner.Listener configure "''${args[@]}"
+              # Move the automatically created _diag dir to the logs dir
+              mkdir -p  "$STATE_DIRECTORY/_diag"
+              cp    -r  "$STATE_DIRECTORY/_diag/." "$LOGS_DIRECTORY/"
+              rm    -rf "$STATE_DIRECTORY/_diag/"
+              # Cleanup token from config
+              rm "${newConfigTokenPath}"
+              # Symlink to new config
+              ln -s '${newConfigPath}' "${currentConfigPath}"
+            fi
+          '';
+          setupWorkDir = writeScript "setup-work-dirs" ''
+            # Link _diag dir
+            ln -s "$LOGS_DIRECTORY" "$WORK_DIRECTORY/_diag"
+
+            # Link the runner credentials to the work dir
+            ln -s "$STATE_DIRECTORY"/{${lib.concatStringsSep "," runnerCredFiles}} "$WORK_DIRECTORY/"
+          '';
+        in
+        map (x: "${x} ${escapeShellArgs [ stateDir workDir logsDir ]}") [
+          "+${unconfigureRunner}" # runs as root
+          configureRunner
+          setupWorkDir
+        ];
+
+      # If running in ephemeral mode, restart the service on-exit (i.e., successful de-registration of the runner)
+      # to trigger a fresh registration.
+      Restart = if cfg.ephemeral then "on-success" else "no";
+      # If the runner exits with `ReturnCode.RetryableError = 2`, always restart the service:
+      # https://github.com/actions/runner/blob/40ed7f8/src/Runner.Common/Constants.cs#L146
+      RestartForceExitStatus = [ 2 ];
+
+      # Contains _diag
+      LogsDirectory = [ systemdDir ];
+      # Default RUNNER_ROOT which contains ephemeral Runner data
+      RuntimeDirectory = [ systemdDir ];
+      # Home of persistent runner data, e.g., credentials
+      StateDirectory = [ systemdDir ];
+      StateDirectoryMode = "0700";
+      WorkingDirectory = workDir;
+
+      InaccessiblePaths = [
+        # Token file path given in the configuration, if visible to the service
+        "-${cfg.tokenFile}"
+        # Token file in the state directory
+        "${stateDir}/${currentConfigTokenFilename}"
+      ];
+
+      KillSignal = "SIGINT";
+
+      # Hardening (may overlap with DynamicUser=)
+      # The following options are only for optimizing:
+      # systemd-analyze security github-runner
+      AmbientCapabilities = mkBefore [ "" ];
+      CapabilityBoundingSet = mkBefore [ "" ];
+      # ProtectClock= adds DeviceAllow=char-rtc r
+      DeviceAllow = mkBefore [ "" ];
+      NoNewPrivileges = mkDefault true;
+      PrivateDevices = mkDefault true;
+      PrivateMounts = mkDefault true;
+      PrivateTmp = mkDefault true;
+      PrivateUsers = mkDefault true;
+      ProtectClock = mkDefault true;
+      ProtectControlGroups = mkDefault true;
+      ProtectHome = mkDefault true;
+      ProtectHostname = mkDefault true;
+      ProtectKernelLogs = mkDefault true;
+      ProtectKernelModules = mkDefault true;
+      ProtectKernelTunables = mkDefault true;
+      ProtectSystem = mkDefault "strict";
+      RemoveIPC = mkDefault true;
+      RestrictNamespaces = mkDefault true;
+      RestrictRealtime = mkDefault true;
+      RestrictSUIDSGID = mkDefault true;
+      UMask = mkDefault "0066";
+      ProtectProc = mkDefault "invisible";
+      SystemCallFilter = mkBefore [
+        "~@clock"
+        "~@cpu-emulation"
+        "~@module"
+        "~@mount"
+        "~@obsolete"
+        "~@raw-io"
+        "~@reboot"
+        "~capset"
+        "~setdomainname"
+        "~sethostname"
+      ];
+      RestrictAddressFamilies = mkBefore [ "AF_INET" "AF_INET6" "AF_UNIX" "AF_NETLINK" ];
+
+      BindPaths = lib.optionals (cfg.workDir != null) [ cfg.workDir ];
+
+      # Needs network access
+      PrivateNetwork = mkDefault false;
+      # Cannot be true due to Node
+      MemoryDenyWriteExecute = mkDefault false;
+
+      # The more restrictive "pid" option makes `nix` commands in CI emit
+      # "GC Warning: Couldn't read /proc/stat"
+      # You may want to set this to "pid" if not using `nix` commands
+      ProcSubset = mkDefault "all";
+      # Coverage programs for compiled code such as `cargo-tarpaulin` disable
+      # ASLR (address space layout randomization) which requires the
+      # `personality` syscall
+      # You may want to set this to `true` if not using coverage tooling on
+      # compiled code
+      LockPersonality = mkDefault false;
+
+      # Note that this has some interactions with the User setting; so you may
+      # want to consult the systemd docs if using both.
+      DynamicUser = mkDefault true;
+    }
+    (mkIf (cfg.user != null) { User = cfg.user; })
+    cfg.serviceOverrides
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix b/nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix
new file mode 100644
index 000000000000..66ace9580eca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/github-runners.nix
@@ -0,0 +1,58 @@
+{ config
+, pkgs
+, lib
+, ...
+}@args:
+
+with lib;
+
+let
+  cfg = config.services.github-runners;
+
+in
+
+{
+  options.services.github-runners = mkOption {
+    default = {};
+    type = with types; attrsOf (submodule { options = import ./github-runner/options.nix (args // {
+      # services.github-runners.${name}.name doesn't have a default; it falls back to ${name} below.
+      includeNameDefault = false;
+    }); });
+    example = {
+      runner1 = {
+        enable = true;
+        url = "https://github.com/owner/repo";
+        name = "runner1";
+        tokenFile = "/secrets/token1";
+      };
+
+      runner2 = {
+        enable = true;
+        url = "https://github.com/owner/repo";
+        name = "runner2";
+        tokenFile = "/secrets/token2";
+      };
+    };
+    description = lib.mdDoc ''
+      Multiple GitHub Runners.
+    '';
+  };
+
+  config = {
+    systemd.services = flip mapAttrs' cfg (n: v:
+      let
+        svcName = "github-runner-${n}";
+      in
+        nameValuePair svcName
+        (import ./github-runner/service.nix (args // {
+          inherit svcName;
+          cfg = v // {
+            name = if v.name != null then v.name else n;
+          };
+          systemdDir = "github-runner/${n}";
+        }))
+    );
+  };
+
+  meta.maintainers = with maintainers; [ veehaitch newam ];
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/gitlab-runner.nix b/nixpkgs/nixos/modules/services/continuous-integration/gitlab-runner.nix
new file mode 100644
index 000000000000..10a2fe8a44dd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/gitlab-runner.nix
@@ -0,0 +1,616 @@
+{ config, lib, pkgs, ... }:
+with builtins;
+with lib;
+let
+  cfg = config.services.gitlab-runner;
+  hasDocker = config.virtualisation.docker.enable;
+
+  /* The whole logic of this module is to diff the hashes of the desired vs existing runners
+  The hash is recorded in the runner's name because we can't do better yet
+  See https://gitlab.com/gitlab-org/gitlab-runner/-/issues/29350 for more details
+  */
+  genRunnerName = name: service: let
+      hash = substring 0 12 (hashString "md5" (unsafeDiscardStringContext (toJSON service)));
+    in if service ? description && service.description != null
+    then "${hash} ${service.description}"
+    else "${name}_${config.networking.hostName}_${hash}";
+
+  hashedServices = mapAttrs'
+    (name: service: nameValuePair (genRunnerName name service) service) cfg.services;
+  configPath = ''"$HOME"/.gitlab-runner/config.toml'';
+  configureScript = pkgs.writeShellApplication {
+    name = "gitlab-runner-configure";
+    runtimeInputs = with pkgs; [
+        bash
+        gawk
+        jq
+        moreutils
+        remarshal
+        util-linux
+        cfg.package
+        perl
+        python3
+    ];
+    text = if (cfg.configFile != null) then ''
+      cp ${cfg.configFile} ${configPath}
+      # make config file readable by service
+      chown -R --reference="$HOME" "$(dirname ${configPath})"
+    '' else ''
+      export CONFIG_FILE=${configPath}
+
+      mkdir -p "$(dirname ${configPath})"
+      touch ${configPath}
+
+      # update global options
+      remarshal --if toml --of json ${configPath} \
+        | jq -cM 'with_entries(select([.key] | inside(["runners"])))' \
+        | jq -scM '.[0] + .[1]' - <(echo ${escapeShellArg (toJSON cfg.settings)}) \
+        | remarshal --if json --of toml \
+        | sponge ${configPath}
+
+      # remove no longer existing services
+      gitlab-runner verify --delete
+
+      ${toShellVar "NEEDED_SERVICES" (lib.mapAttrs (name: value: 1) hashedServices)}
+
+      declare -A REGISTERED_SERVICES
+
+      while IFS="," read -r name token;
+      do
+        REGISTERED_SERVICES["$name"]="$token"
+      done < <(gitlab-runner --log-format json list 2>&1 | grep Token  | jq -r '.msg +"," + .Token')
+
+      echo "NEEDED_SERVICES: " "''${!NEEDED_SERVICES[@]}"
+      echo "REGISTERED_SERVICES:" "''${!REGISTERED_SERVICES[@]}"
+
+      # difference between current and desired state
+      declare -A NEW_SERVICES
+      for name in "''${!NEEDED_SERVICES[@]}"; do
+        if [ ! -v 'REGISTERED_SERVICES[$name]' ]; then
+          NEW_SERVICES[$name]=1
+        fi
+      done
+
+      declare -A OLD_SERVICES
+      # shellcheck disable=SC2034
+      for name in "''${!REGISTERED_SERVICES[@]}"; do
+        if [ ! -v 'NEEDED_SERVICES[$name]' ]; then
+          OLD_SERVICES[$name]=1
+        fi
+      done
+
+      # register new services
+      ${concatStringsSep "\n" (mapAttrsToList (name: service: ''
+        # TODO so here we should mention NEW_SERVICES
+        if [ -v 'NEW_SERVICES["${name}"]' ] ; then
+          bash -c ${escapeShellArg (concatStringsSep " \\\n " ([
+            "set -a && source ${service.registrationConfigFile} &&"
+            "gitlab-runner register"
+            "--non-interactive"
+            "--name '${name}'"
+            "--executor ${service.executor}"
+            "--limit ${toString service.limit}"
+            "--request-concurrency ${toString service.requestConcurrency}"
+            "--maximum-timeout ${toString service.maximumTimeout}"
+          ] ++ service.registrationFlags
+            ++ optional (service.buildsDir != null)
+            "--builds-dir ${service.buildsDir}"
+            ++ optional (service.cloneUrl != null)
+            "--clone-url ${service.cloneUrl}"
+            ++ optional (service.preCloneScript != null)
+            "--pre-clone-script ${service.preCloneScript}"
+            ++ optional (service.preBuildScript != null)
+            "--pre-build-script ${service.preBuildScript}"
+            ++ optional (service.postBuildScript != null)
+            "--post-build-script ${service.postBuildScript}"
+            ++ optional (service.tagList != [ ])
+            "--tag-list ${concatStringsSep "," service.tagList}"
+            ++ optional service.runUntagged
+            "--run-untagged"
+            ++ optional service.protected
+            "--access-level ref_protected"
+            ++ optional service.debugTraceDisabled
+            "--debug-trace-disabled"
+            ++ map (e: "--env ${escapeShellArg e}") (mapAttrsToList (name: value: "${name}=${value}") service.environmentVariables)
+            ++ optionals (hasPrefix "docker" service.executor) (
+              assert (
+                assertMsg (service.dockerImage != null)
+                  "dockerImage option is required for ${service.executor} executor (${name})");
+              [ "--docker-image ${service.dockerImage}" ]
+              ++ optional service.dockerDisableCache
+              "--docker-disable-cache"
+              ++ optional service.dockerPrivileged
+              "--docker-privileged"
+              ++ map (v: "--docker-volumes ${escapeShellArg v}") service.dockerVolumes
+              ++ map (v: "--docker-extra-hosts ${escapeShellArg v}") service.dockerExtraHosts
+              ++ map (v: "--docker-allowed-images ${escapeShellArg v}") service.dockerAllowedImages
+              ++ map (v: "--docker-allowed-services ${escapeShellArg v}") service.dockerAllowedServices
+            )
+          ))} && sleep 1 || exit 1
+        fi
+      '') hashedServices)}
+
+      # check key is in array https://stackoverflow.com/questions/30353951/how-to-check-if-dictionary-contains-a-key-in-bash
+
+      echo "NEW_SERVICES: ''${NEW_SERVICES[*]}"
+      echo "OLD_SERVICES: ''${OLD_SERVICES[*]}"
+      # unregister old services
+      for NAME in "''${!OLD_SERVICES[@]}"
+      do
+        [ -n "$NAME" ] && gitlab-runner unregister \
+          --name "$NAME" && sleep 1
+      done
+
+      # make config file readable by service
+      chown -R --reference="$HOME" "$(dirname ${configPath})"
+    '';
+  };
+  startScript = pkgs.writeShellScriptBin "gitlab-runner-start" ''
+    export CONFIG_FILE=${configPath}
+    exec gitlab-runner run --working-directory $HOME
+  '';
+in {
+  options.services.gitlab-runner = {
+    enable = mkEnableOption (lib.mdDoc "Gitlab Runner");
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Configuration file for gitlab-runner.
+
+        {option}`configFile` takes precedence over {option}`services`.
+        {option}`checkInterval` and {option}`concurrent` will be ignored too.
+
+        This option is deprecated, please use {option}`services` instead.
+        You can use {option}`registrationConfigFile` and
+        {option}`registrationFlags`
+        for settings not covered by this module.
+      '';
+    };
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = (pkgs.formats.json { }).type;
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Global gitlab-runner configuration. See
+        <https://docs.gitlab.com/runner/configuration/advanced-configuration.html#the-global-section>
+        for supported values.
+      '';
+    };
+    gracefulTermination = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Finish all remaining jobs before stopping.
+        If not set gitlab-runner will stop immediately without waiting
+        for jobs to finish, which will lead to failed builds.
+      '';
+    };
+    gracefulTimeout = mkOption {
+      type = types.str;
+      default = "infinity";
+      example = "5min 20s";
+      description = lib.mdDoc ''
+        Time to wait until a graceful shutdown is turned into a forceful one.
+      '';
+    };
+    package = mkOption {
+      type = types.package;
+      default = pkgs.gitlab-runner;
+      defaultText = literalExpression "pkgs.gitlab-runner";
+      example = literalExpression "pkgs.gitlab-runner_1_11";
+      description = lib.mdDoc "Gitlab Runner package to use.";
+    };
+    extraPackages = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra packages to add to PATH for the gitlab-runner process.
+      '';
+    };
+    services = mkOption {
+      description = lib.mdDoc "GitLab Runner services.";
+      default = { };
+      example = literalExpression ''
+        {
+          # runner for building in docker via host's nix-daemon
+          # nix store will be readable in runner, might be insecure
+          nix = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "alpine";
+            dockerVolumes = [
+              "/nix/store:/nix/store:ro"
+              "/nix/var/nix/db:/nix/var/nix/db:ro"
+              "/nix/var/nix/daemon-socket:/nix/var/nix/daemon-socket:ro"
+            ];
+            dockerDisableCache = true;
+            preBuildScript = pkgs.writeScript "setup-container" '''
+              mkdir -p -m 0755 /nix/var/log/nix/drvs
+              mkdir -p -m 0755 /nix/var/nix/gcroots
+              mkdir -p -m 0755 /nix/var/nix/profiles
+              mkdir -p -m 0755 /nix/var/nix/temproots
+              mkdir -p -m 0755 /nix/var/nix/userpool
+              mkdir -p -m 1777 /nix/var/nix/gcroots/per-user
+              mkdir -p -m 1777 /nix/var/nix/profiles/per-user
+              mkdir -p -m 0755 /nix/var/nix/profiles/per-user/root
+              mkdir -p -m 0700 "$HOME/.nix-defexpr"
+
+              . ''${pkgs.nix}/etc/profile.d/nix.sh
+
+              ''${pkgs.nix}/bin/nix-env -i ''${concatStringsSep " " (with pkgs; [ nix cacert git openssh ])}
+
+              ''${pkgs.nix}/bin/nix-channel --add https://nixos.org/channels/nixpkgs-unstable
+              ''${pkgs.nix}/bin/nix-channel --update nixpkgs
+            ''';
+            environmentVariables = {
+              ENV = "/etc/profile";
+              USER = "root";
+              NIX_REMOTE = "daemon";
+              PATH = "/nix/var/nix/profiles/default/bin:/nix/var/nix/profiles/default/sbin:/bin:/sbin:/usr/bin:/usr/sbin";
+              NIX_SSL_CERT_FILE = "/nix/var/nix/profiles/default/etc/ssl/certs/ca-bundle.crt";
+            };
+            tagList = [ "nix" ];
+          };
+          # runner for building docker images
+          docker-images = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "docker:stable";
+            dockerVolumes = [
+              "/var/run/docker.sock:/var/run/docker.sock"
+            ];
+            tagList = [ "docker-images" ];
+          };
+          # runner for executing stuff on host system (very insecure!)
+          # make sure to add required packages (including git!)
+          # to `environment.systemPackages`
+          shell = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            executor = "shell";
+            tagList = [ "shell" ];
+          };
+          # runner for everything else
+          default = {
+            # File should contain at least these two variables:
+            # `CI_SERVER_URL`
+            # `REGISTRATION_TOKEN`
+            registrationConfigFile = "/run/secrets/gitlab-runner-registration";
+            dockerImage = "debian:stable";
+          };
+        }
+      '';
+      type = types.attrsOf (types.submodule {
+        options = {
+          registrationConfigFile = mkOption {
+            type = types.path;
+            description = lib.mdDoc ''
+              Absolute path to a file with environment variables
+              used for gitlab-runner registration.
+              A list of all supported environment variables can be found in
+              `gitlab-runner register --help`.
+
+              Ones that you probably want to set is
+
+              `CI_SERVER_URL=<CI server URL>`
+
+              `REGISTRATION_TOKEN=<registration secret>`
+
+              WARNING: make sure to use quoted absolute path,
+              or it is going to be copied to Nix Store.
+            '';
+          };
+          registrationFlags = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "--docker-helper-image my/gitlab-runner-helper" ];
+            description = lib.mdDoc ''
+              Extra command-line flags passed to
+              `gitlab-runner register`.
+              Execute `gitlab-runner register --help`
+              for a list of supported flags.
+            '';
+          };
+          environmentVariables = mkOption {
+            type = types.attrsOf types.str;
+            default = { };
+            example = { NAME = "value"; };
+            description = lib.mdDoc ''
+              Custom environment variables injected to build environment.
+              For secrets you can use {option}`registrationConfigFile`
+              with `RUNNER_ENV` variable set.
+            '';
+          };
+          description = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc ''
+              Name/description of the runner.
+            '';
+          };
+          executor = mkOption {
+            type = types.str;
+            default = "docker";
+            description = lib.mdDoc ''
+              Select executor, eg. shell, docker, etc.
+              See [runner documentation](https://docs.gitlab.com/runner/executors/README.html) for more information.
+            '';
+          };
+          buildsDir = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            example = "/var/lib/gitlab-runner/builds";
+            description = lib.mdDoc ''
+              Absolute path to a directory where builds will be stored
+              in context of selected executor (Locally, Docker, SSH).
+            '';
+          };
+          cloneUrl = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "http://gitlab.example.local";
+            description = lib.mdDoc ''
+              Overwrite the URL for the GitLab instance. Used if the Runner can’t connect to GitLab on the URL GitLab exposes itself.
+            '';
+          };
+          dockerImage = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc ''
+              Docker image to be used.
+            '';
+          };
+          dockerVolumes = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "/var/run/docker.sock:/var/run/docker.sock" ];
+            description = lib.mdDoc ''
+              Bind-mount a volume and create it
+              if it doesn't exist prior to mounting.
+            '';
+          };
+          dockerDisableCache = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Disable all container caching.
+            '';
+          };
+          dockerPrivileged = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Give extended privileges to container.
+            '';
+          };
+          dockerExtraHosts = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "other-host:127.0.0.1" ];
+            description = lib.mdDoc ''
+              Add a custom host-to-IP mapping.
+            '';
+          };
+          dockerAllowedImages = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "ruby:*" "python:*" "php:*" "my.registry.tld:5000/*:*" ];
+            description = lib.mdDoc ''
+              Whitelist allowed images.
+            '';
+          };
+          dockerAllowedServices = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "postgres:9" "redis:*" "mysql:*" ];
+            description = lib.mdDoc ''
+              Whitelist allowed services.
+            '';
+          };
+          preCloneScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc ''
+              Runner-specific command script executed before code is pulled.
+            '';
+          };
+          preBuildScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc ''
+              Runner-specific command script executed after code is pulled,
+              just before build executes.
+            '';
+          };
+          postBuildScript = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc ''
+              Runner-specific command script executed after code is pulled
+              and just after build executes.
+            '';
+          };
+          tagList = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            description = lib.mdDoc ''
+              Tag list.
+            '';
+          };
+          runUntagged = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Register to run untagged builds; defaults to
+              `true` when {option}`tagList` is empty.
+            '';
+          };
+          limit = mkOption {
+            type = types.int;
+            default = 0;
+            description = lib.mdDoc ''
+              Limit how many jobs can be handled concurrently by this service.
+              0 (default) simply means don't limit.
+            '';
+          };
+          requestConcurrency = mkOption {
+            type = types.int;
+            default = 0;
+            description = lib.mdDoc ''
+              Limit number of concurrent requests for new jobs from GitLab.
+            '';
+          };
+          maximumTimeout = mkOption {
+            type = types.int;
+            default = 0;
+            description = lib.mdDoc ''
+              What is the maximum timeout (in seconds) that will be set for
+              job when using this Runner. 0 (default) simply means don't limit.
+            '';
+          };
+          protected = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              When set to true Runner will only run on pipelines
+              triggered on protected branches.
+            '';
+          };
+          debugTraceDisabled = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              When set to true Runner will disable the possibility of
+              using the `CI_DEBUG_TRACE` feature.
+            '';
+          };
+        };
+      });
+    };
+    clear-docker-cache = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to periodically prune gitlab runner's Docker resources. If
+          enabled, a systemd timer will run {command}`clear-docker-cache` as
+          specified by the `dates` option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "prune" ];
+        description = lib.mdDoc ''
+          Any additional flags passed to {command}`clear-docker-cache`.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specification (in the format described by
+          {manpage}`systemd.time(7)`) of the time at
+          which the prune will occur.
+        '';
+      };
+
+      package = mkOption {
+        default = config.virtualisation.docker.package;
+        defaultText = literalExpression "config.virtualisation.docker.package";
+        example = literalExpression "pkgs.docker";
+        description = lib.mdDoc "Docker package to use for clearing up docker cache.";
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    warnings = mapAttrsToList
+      (n: v: "services.gitlab-runner.services.${n}.`registrationConfigFile` points to a file in Nix Store. You should use quoted absolute path to prevent this.")
+      (filterAttrs (n: v: isStorePath v.registrationConfigFile) cfg.services);
+
+    environment.systemPackages = [ cfg.package ];
+    systemd.services.gitlab-runner = {
+      description = "Gitlab Runner";
+      documentation = [ "https://docs.gitlab.com/runner/" ];
+      after = [ "network.target" ]
+        ++ optional hasDocker "docker.service";
+      requires = optional hasDocker "docker.service";
+      wantedBy = [ "multi-user.target" ];
+      environment = config.networking.proxy.envVars // {
+        HOME = "/var/lib/gitlab-runner";
+      };
+      path = with pkgs; [
+        bash
+        gawk
+        jq
+        moreutils
+        remarshal
+        util-linux
+        cfg.package
+      ] ++ cfg.extraPackages;
+      reloadIfChanged = true;
+      serviceConfig = {
+        # Set `DynamicUser` under `systemd.services.gitlab-runner.serviceConfig`
+        # to `lib.mkForce false` in your configuration to run this service as root.
+        # You can also set `User` and `Group` options to run this service as desired user.
+        # Make sure to restart service or changes won't apply.
+        DynamicUser = true;
+        StateDirectory = "gitlab-runner";
+        SupplementaryGroups = optional hasDocker "docker";
+        ExecStartPre = "!${configureScript}/bin/gitlab-runner-configure";
+        ExecStart = "${startScript}/bin/gitlab-runner-start";
+        ExecReload = "!${configureScript}/bin/gitlab-runner-configure";
+      } // optionalAttrs cfg.gracefulTermination {
+        TimeoutStopSec = "${cfg.gracefulTimeout}";
+        KillSignal = "SIGQUIT";
+        KillMode = "process";
+      };
+    };
+    # Enable periodic clear-docker-cache script
+    systemd.services.gitlab-runner-clear-docker-cache = mkIf (cfg.clear-docker-cache.enable && (any (s: s.executor == "docker") (attrValues cfg.services))) {
+      description = "Prune gitlab-runner docker resources";
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig.Type = "oneshot";
+
+      path = [ cfg.clear-docker-cache.package pkgs.gawk ];
+
+      script = ''
+        ${pkgs.gitlab-runner}/bin/clear-docker-cache ${toString cfg.clear-docker-cache.flags}
+      '';
+
+      startAt = cfg.clear-docker-cache.dates;
+    };
+    # Enable docker if `docker` executor is used in any service
+    virtualisation.docker.enable = mkIf (
+      any (s: s.executor == "docker") (attrValues cfg.services)
+    ) (mkDefault true);
+  };
+  imports = [
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "packages" ] [ "services" "gitlab-runner" "extraPackages" ] )
+    (mkRemovedOptionModule [ "services" "gitlab-runner" "configOptions" ] "Use services.gitlab-runner.services option instead" )
+    (mkRemovedOptionModule [ "services" "gitlab-runner" "workDir" ] "You should move contents of workDir (if any) to /var/lib/gitlab-runner" )
+
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "checkInterval" ] [ "services" "gitlab-runner" "settings" "check_interval" ] )
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "concurrent" ] [ "services" "gitlab-runner" "settings" "concurrent" ] )
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "sentryDSN" ] [ "services" "gitlab-runner" "settings" "sentry_dsn" ] )
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "prometheusListenAddress" ] [ "services" "gitlab-runner" "settings" "listen_address" ] )
+
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "sessionServer" "listenAddress" ] [ "services" "gitlab-runner" "settings" "session_server" "listen_address" ] )
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "sessionServer" "advertiseAddress" ] [ "services" "gitlab-runner" "settings" "session_server" "advertise_address" ] )
+    (mkRenamedOptionModule [ "services" "gitlab-runner" "sessionServer" "sessionTimeout" ] [ "services" "gitlab-runner" "settings" "session_server" "session_timeout" ] )
+  ];
+
+  meta.maintainers = teams.gitlab.members;
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/gocd-agent/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/gocd-agent/default.nix
new file mode 100644
index 000000000000..c0d752443a16
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/gocd-agent/default.nix
@@ -0,0 +1,218 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gocd-agent;
+  opt = options.services.gocd-agent;
+in {
+  options = {
+    services.gocd-agent = {
+      enable = mkEnableOption (lib.mdDoc "gocd-agent");
+
+      user = mkOption {
+        default = "gocd-agent";
+        type = types.str;
+        description = lib.mdDoc ''
+          User the Go.CD agent should execute under.
+        '';
+      };
+
+      group = mkOption {
+        default = "gocd-agent";
+        type = types.str;
+        description = lib.mdDoc ''
+          If the default user "gocd-agent" is configured then this is the primary
+          group of that user.
+        '';
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "wheel" "docker" ];
+        description = lib.mdDoc ''
+          List of extra groups that the "gocd-agent" user should be a part of.
+        '';
+      };
+
+      packages = mkOption {
+        default = [ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ];
+        defaultText = literalExpression "[ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc ''
+          Packages to add to PATH for the Go.CD agent process.
+        '';
+      };
+
+      agentConfig = mkOption {
+        default = "";
+        type = types.str;
+        example = ''
+          agent.auto.register.resources=ant,java
+          agent.auto.register.environments=QA,Performance
+          agent.auto.register.hostname=Agent01
+        '';
+        description = lib.mdDoc ''
+          Agent registration configuration.
+        '';
+      };
+
+      goServer = mkOption {
+        default = "https://127.0.0.1:8154/go";
+        type = types.str;
+        description = lib.mdDoc ''
+          URL of the GoCD Server to attach the Go.CD Agent to.
+        '';
+      };
+
+      workDir = mkOption {
+        default = "/var/lib/go-agent";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the working directory in which the Go.CD agent java archive resides.
+        '';
+      };
+
+      initialJavaHeapSize = mkOption {
+        default = "128m";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the initial java heap memory size for the Go.CD agent java process.
+        '';
+      };
+
+      maxJavaHeapMemory = mkOption {
+        default = "256m";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the java maximum heap memory size for the Go.CD agent java process.
+        '';
+      };
+
+      startupOptions = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "-Xms${cfg.initialJavaHeapSize}"
+          "-Xmx${cfg.maxJavaHeapMemory}"
+          "-Djava.io.tmpdir=/tmp"
+          "-Dcruise.console.publish.interval=10"
+          "-Djava.security.egd=file:/dev/./urandom"
+        ];
+        defaultText = literalExpression ''
+          [
+            "-Xms''${config.${opt.initialJavaHeapSize}}"
+            "-Xmx''${config.${opt.maxJavaHeapMemory}}"
+            "-Djava.io.tmpdir=/tmp"
+            "-Dcruise.console.publish.interval=10"
+            "-Djava.security.egd=file:/dev/./urandom"
+          ]
+        '';
+        description = lib.mdDoc ''
+          Specifies startup command line arguments to pass to Go.CD agent
+          java process.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = [
+          "-X debug"
+          "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5006"
+          "-verbose:gc"
+          "-Xloggc:go-agent-gc.log"
+          "-XX:+PrintGCTimeStamps"
+          "-XX:+PrintTenuringDistribution"
+          "-XX:+PrintGCDetails"
+          "-XX:+PrintGC"
+        ];
+        description = lib.mdDoc ''
+          Specifies additional command line arguments to pass to Go.CD agent
+          java process.  Example contains debug and gcLog arguments.
+        '';
+      };
+
+      environment = mkOption {
+        default = { };
+        type = with types; attrsOf str;
+        description = lib.mdDoc ''
+          Additional environment variables to be passed to the Go.CD agent process.
+          As a base environment, Go.CD agent receives NIX_PATH from
+          {option}`environment.sessionVariables`, NIX_REMOTE is set to
+          "daemon".
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "gocd-agent") {
+      gocd-agent.gid = config.ids.gids.gocd-agent;
+    };
+
+    users.users = optionalAttrs (cfg.user == "gocd-agent") {
+      gocd-agent = {
+        description = "gocd-agent user";
+        createHome = true;
+        home = cfg.workDir;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.gocd-agent;
+      };
+    };
+
+    systemd.services.gocd-agent = {
+      description = "GoCD Agent";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment =
+        let
+          selectedSessionVars =
+            lib.filterAttrs (n: v: builtins.elem n [ "NIX_PATH" ])
+              config.environment.sessionVariables;
+        in
+          selectedSessionVars //
+            {
+              NIX_REMOTE = "daemon";
+              AGENT_WORK_DIR = cfg.workDir;
+              AGENT_STARTUP_ARGS = ''${concatStringsSep " "  cfg.startupOptions}'';
+              LOG_DIR = cfg.workDir;
+              LOG_FILE = "${cfg.workDir}/go-agent-start.log";
+            } //
+            cfg.environment;
+
+      path = cfg.packages;
+
+      script = ''
+        MPATH="''${PATH}";
+        source /etc/profile
+        export PATH="''${MPATH}:''${PATH}";
+
+        if ! test -f ~/.nixpkgs/config.nix; then
+          mkdir -p ~/.nixpkgs/
+          echo "{ allowUnfree = true; }" > ~/.nixpkgs/config.nix
+        fi
+
+        mkdir -p config
+        rm -f config/autoregister.properties
+        ln -s "${pkgs.writeText "autoregister.properties" cfg.agentConfig}" config/autoregister.properties
+
+        ${pkgs.git}/bin/git config --global --add http.sslCAinfo /etc/ssl/certs/ca-certificates.crt
+        ${pkgs.jre}/bin/java ${concatStringsSep " " cfg.startupOptions} \
+                        ${concatStringsSep " " cfg.extraOptions} \
+                              -jar ${pkgs.gocd-agent}/go-agent/agent-bootstrapper.jar \
+                              -serverUrl ${cfg.goServer}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        WorkingDirectory = cfg.workDir;
+        RestartSec = 30;
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/gocd-server/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/gocd-server/default.nix
new file mode 100644
index 000000000000..bf7fd529bfca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/gocd-server/default.nix
@@ -0,0 +1,216 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gocd-server;
+  opt = options.services.gocd-server;
+in {
+  options = {
+    services.gocd-server = {
+      enable = mkEnableOption (lib.mdDoc "gocd-server");
+
+      user = mkOption {
+        default = "gocd-server";
+        type = types.str;
+        description = lib.mdDoc ''
+          User the Go.CD server should execute under.
+        '';
+      };
+
+      group = mkOption {
+        default = "gocd-server";
+        type = types.str;
+        description = lib.mdDoc ''
+          If the default user "gocd-server" is configured then this is the primary group of that user.
+        '';
+      };
+
+      extraGroups = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = [ "wheel" "docker" ];
+        description = lib.mdDoc ''
+          List of extra groups that the "gocd-server" user should be a part of.
+        '';
+      };
+
+      listenAddress = mkOption {
+        default = "0.0.0.0";
+        example = "localhost";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the bind address on which the Go.CD server HTTP interface listens.
+        '';
+      };
+
+      port = mkOption {
+        default = 8153;
+        type = types.port;
+        description = lib.mdDoc ''
+          Specifies port number on which the Go.CD server HTTP interface listens.
+        '';
+      };
+
+      sslPort = mkOption {
+        default = 8154;
+        type = types.int;
+        description = lib.mdDoc ''
+          Specifies port number on which the Go.CD server HTTPS interface listens.
+        '';
+      };
+
+      workDir = mkOption {
+        default = "/var/lib/go-server";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the working directory in which the Go.CD server java archive resides.
+        '';
+      };
+
+      packages = mkOption {
+        default = [ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ];
+        defaultText = literalExpression "[ pkgs.stdenv pkgs.jre pkgs.git config.programs.ssh.package pkgs.nix ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc ''
+          Packages to add to PATH for the Go.CD server's process.
+        '';
+      };
+
+      initialJavaHeapSize = mkOption {
+        default = "512m";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the initial java heap memory size for the Go.CD server's java process.
+        '';
+      };
+
+      maxJavaHeapMemory = mkOption {
+        default = "1024m";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the java maximum heap memory size for the Go.CD server's java process.
+        '';
+      };
+
+      startupOptions = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "-Xms${cfg.initialJavaHeapSize}"
+          "-Xmx${cfg.maxJavaHeapMemory}"
+          "-Dcruise.listen.host=${cfg.listenAddress}"
+          "-Duser.language=en"
+          "-Djruby.rack.request.size.threshold.bytes=30000000"
+          "-Duser.country=US"
+          "-Dcruise.config.dir=${cfg.workDir}/conf"
+          "-Dcruise.config.file=${cfg.workDir}/conf/cruise-config.xml"
+          "-Dcruise.server.port=${toString cfg.port}"
+          "-Dcruise.server.ssl.port=${toString cfg.sslPort}"
+          "--add-opens=java.base/java.lang=ALL-UNNAMED"
+          "--add-opens=java.base/java.util=ALL-UNNAMED"
+        ];
+        defaultText = literalExpression ''
+          [
+            "-Xms''${config.${opt.initialJavaHeapSize}}"
+            "-Xmx''${config.${opt.maxJavaHeapMemory}}"
+            "-Dcruise.listen.host=''${config.${opt.listenAddress}}"
+            "-Duser.language=en"
+            "-Djruby.rack.request.size.threshold.bytes=30000000"
+            "-Duser.country=US"
+            "-Dcruise.config.dir=''${config.${opt.workDir}}/conf"
+            "-Dcruise.config.file=''${config.${opt.workDir}}/conf/cruise-config.xml"
+            "-Dcruise.server.port=''${toString config.${opt.port}}"
+            "-Dcruise.server.ssl.port=''${toString config.${opt.sslPort}}"
+            "--add-opens=java.base/java.lang=ALL-UNNAMED"
+            "--add-opens=java.base/java.util=ALL-UNNAMED"
+          ]
+        '';
+
+        description = lib.mdDoc ''
+          Specifies startup command line arguments to pass to Go.CD server
+          java process.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = [
+          "-X debug"
+          "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address=5005"
+          "-verbose:gc"
+          "-Xloggc:go-server-gc.log"
+          "-XX:+PrintGCTimeStamps"
+          "-XX:+PrintTenuringDistribution"
+          "-XX:+PrintGCDetails"
+          "-XX:+PrintGC"
+        ];
+        description = lib.mdDoc ''
+          Specifies additional command line arguments to pass to Go.CD server's
+          java process.  Example contains debug and gcLog arguments.
+        '';
+      };
+
+      environment = mkOption {
+        default = { };
+        type = with types; attrsOf str;
+        description = lib.mdDoc ''
+          Additional environment variables to be passed to the gocd-server process.
+          As a base environment, gocd-server receives NIX_PATH from
+          {option}`environment.sessionVariables`, NIX_REMOTE is set to
+          "daemon".
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "gocd-server") {
+      gocd-server.gid = config.ids.gids.gocd-server;
+    };
+
+    users.users = optionalAttrs (cfg.user == "gocd-server") {
+      gocd-server = {
+        description = "gocd-server user";
+        createHome = true;
+        home = cfg.workDir;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.gocd-server;
+      };
+    };
+
+    systemd.services.gocd-server = {
+      description = "GoCD Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment =
+        let
+          selectedSessionVars =
+            lib.filterAttrs (n: v: builtins.elem n [ "NIX_PATH" ])
+              config.environment.sessionVariables;
+        in
+          selectedSessionVars //
+            { NIX_REMOTE = "daemon";
+            } //
+            cfg.environment;
+
+      path = cfg.packages;
+
+      script = ''
+        ${pkgs.git}/bin/git config --global --add http.sslCAinfo /etc/ssl/certs/ca-certificates.crt
+        ${pkgs.jre}/bin/java -server ${concatStringsSep " " cfg.startupOptions} \
+                               ${concatStringsSep " " cfg.extraOptions}  \
+                              -jar ${pkgs.gocd-server}/go-server/lib/go.jar
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.workDir;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/common.nix b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/common.nix
new file mode 100644
index 000000000000..ea9b5ffbf43c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/common.nix
@@ -0,0 +1,118 @@
+/*
+
+  This file is for options that NixOS and nix-darwin have in common.
+
+  Platform-specific code is in the respective default.nix files.
+
+*/
+
+{ config, lib, options, pkgs, ... }:
+let
+  inherit (lib)
+    filterAttrs
+    literalExpression
+    mkIf
+    mkOption
+    mkRemovedOptionModule
+    mkRenamedOptionModule
+    types
+
+    ;
+
+  cfg = config.services.hercules-ci-agent;
+
+  inherit (import ./settings.nix { inherit pkgs lib; }) format settingsModule;
+
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "hercules-ci-agent" "extraOptions" ] [ "services" "hercules-ci-agent" "settings" ])
+    (mkRenamedOptionModule [ "services" "hercules-ci-agent" "baseDirectory" ] [ "services" "hercules-ci-agent" "settings" "baseDirectory" ])
+    (mkRenamedOptionModule [ "services" "hercules-ci-agent" "concurrentTasks" ] [ "services" "hercules-ci-agent" "settings" "concurrentTasks" ])
+    (mkRemovedOptionModule [ "services" "hercules-ci-agent" "patchNix" ] "Nix versions packaged in this version of Nixpkgs don't need a patched nix-daemon to work correctly in Hercules CI Agent clusters.")
+  ];
+
+  options.services.hercules-ci-agent = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable to run Hercules CI Agent as a system service.
+
+        [Hercules CI](https://hercules-ci.com) is a
+        continuous integation service that is centered around Nix.
+
+        Support is available at [help@hercules-ci.com](mailto:help@hercules-ci.com).
+      '';
+    };
+    package = mkOption {
+      description = lib.mdDoc ''
+        Package containing the bin/hercules-ci-agent executable.
+      '';
+      type = types.package;
+      default = pkgs.hercules-ci-agent;
+      defaultText = literalExpression "pkgs.hercules-ci-agent";
+    };
+    settings = mkOption {
+      description = lib.mdDoc ''
+        These settings are written to the `agent.toml` file.
+
+        Not all settings are listed as options, can be set nonetheless.
+
+        For the exhaustive list of settings, see <https://docs.hercules-ci.com/hercules-ci/reference/agent-config/>.
+      '';
+      type = types.submoduleWith { modules = [ settingsModule ]; };
+    };
+
+    /*
+      Internal and/or computed values.
+
+      These are written as options instead of let binding to allow sharing with
+      default.nix on both NixOS and nix-darwin.
+    */
+    tomlFile = mkOption {
+      type = types.path;
+      internal = true;
+      defaultText = lib.literalMD "generated `hercules-ci-agent.toml`";
+      description = lib.mdDoc ''
+        The fully assembled config file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Make sure that nix.extraOptions does not override trusted-users
+    assertions = [
+      {
+        assertion =
+          (cfg.settings.nixUserIsTrusted or false) ->
+          builtins.match ".*(^|\n)[ \t]*trusted-users[ \t]*=.*" config.nix.extraOptions == null;
+        message = ''
+          hercules-ci-agent: Please do not set `trusted-users` in `nix.extraOptions`.
+
+          The hercules-ci-agent module by default relies on `nix.settings.trusted-users`
+          to be effectful, but a line like `trusted-users = ...` in `nix.extraOptions`
+          will override the value set in `nix.settings.trusted-users`.
+
+          Instead of setting `trusted-users` in the `nix.extraOptions` string, you should
+          set an option with additive semantics, such as
+           - the NixOS option `nix.settings.trusted-users`, or
+           - the Nix option in the `extraOptions` string, `extra-trusted-users`
+        '';
+      }
+    ];
+    nix.extraOptions = ''
+      # A store path that was missing at first may well have finished building,
+      # even shortly after the previous lookup. This *also* applies to the daemon.
+      narinfo-cache-negative-ttl = 0
+    '';
+    services.hercules-ci-agent = {
+      tomlFile =
+        format.generate "hercules-ci-agent.toml" cfg.settings;
+      settings.config._module.args = {
+        packageOption = options.services.hercules-ci-agent.package;
+        inherit pkgs;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/default.nix
new file mode 100644
index 000000000000..ad26b5316dde
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/default.nix
@@ -0,0 +1,110 @@
+/*
+
+  This file is for NixOS-specific options and configs.
+
+  Code that is shared with nix-darwin goes in common.nix.
+
+*/
+
+{ pkgs, config, lib, ... }:
+let
+  inherit (lib) mkIf mkDefault;
+
+  cfg = config.services.hercules-ci-agent;
+
+  command = "${cfg.package}/bin/hercules-ci-agent --config ${cfg.tomlFile}";
+  testCommand = "${command} --test-configuration";
+
+in
+{
+  imports = [
+    ./common.nix
+    (lib.mkRenamedOptionModule [ "services" "hercules-ci-agent" "user" ] [ "systemd" "services" "hercules-ci-agent" "serviceConfig" "User" ])
+  ];
+
+  config = mkIf cfg.enable {
+    systemd.services.hercules-ci-agent = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      path = [ config.nix.package ];
+      startLimitBurst = 30 * 1000000; # practically infinite
+      serviceConfig = {
+        User = "hercules-ci-agent";
+        ExecStart = command;
+        ExecStartPre = testCommand;
+        Restart = "on-failure";
+        RestartSec = 120;
+
+        # If a worker goes OOM, don't kill the main process. It needs to
+        # report the failure and it's unlikely to be part of the problem.
+        OOMPolicy = "continue";
+
+        # Work around excessive stack use by libstdc++ regex
+        # https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86164
+        # A 256 MiB stack allows between 400 KiB and 1.5 MiB file to be matched by ".*".
+        LimitSTACK = 256 * 1024 * 1024;
+      };
+    };
+
+    # Changes in the secrets do not affect the unit in any way that would cause
+    # a restart, which is currently necessary to reload the secrets.
+    systemd.paths.hercules-ci-agent-restart-files = {
+      wantedBy = [ "hercules-ci-agent.service" ];
+      pathConfig = {
+        Unit = "hercules-ci-agent-restarter.service";
+        PathChanged = [ cfg.settings.clusterJoinTokenPath cfg.settings.binaryCachesPath ];
+      };
+    };
+    systemd.services.hercules-ci-agent-restarter = {
+      serviceConfig.Type = "oneshot";
+      script = ''
+        # Wait a bit, with the effect of bundling up file changes into a single
+        # run of this script and hopefully a single restart.
+        sleep 10
+        if systemctl is-active --quiet hercules-ci-agent.service; then
+          if ${testCommand}; then
+            systemctl restart hercules-ci-agent.service
+          else
+            echo 1>&2 "WARNING: Not restarting agent because config is not valid at this time."
+          fi
+        else
+          echo 1>&2 "Not restarting hercules-ci-agent despite config file update, because it is not already active."
+        fi
+      '';
+    };
+
+    # Trusted user allows simplified configuration and better performance
+    # when operating in a cluster.
+    nix.settings.trusted-users = [ config.systemd.services.hercules-ci-agent.serviceConfig.User ];
+    services.hercules-ci-agent = {
+      settings = {
+        nixUserIsTrusted = true;
+        labels =
+          let
+            mkIfNotNull = x: mkIf (x != null) x;
+          in
+          {
+            nixos.configurationRevision = mkIfNotNull config.system.configurationRevision;
+            nixos.release = config.system.nixos.release;
+            nixos.label = mkIfNotNull config.system.nixos.label;
+            nixos.codeName = config.system.nixos.codeName;
+            nixos.tags = config.system.nixos.tags;
+            nixos.systemName = mkIfNotNull config.system.name;
+          };
+      };
+    };
+
+    users.users.hercules-ci-agent = {
+      home = cfg.settings.baseDirectory;
+      createHome = true;
+      group = "hercules-ci-agent";
+      description = "Hercules CI Agent system user";
+      isSystemUser = true;
+    };
+
+    users.groups.hercules-ci-agent = { };
+  };
+
+  meta.maintainers = [ lib.maintainers.roberth ];
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/settings.nix b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/settings.nix
new file mode 100644
index 000000000000..8eb902313ee8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/hercules-ci-agent/settings.nix
@@ -0,0 +1,153 @@
+# Not a module
+{ pkgs, lib }:
+let
+  inherit (lib)
+    types
+    literalExpression
+    mkOption
+    ;
+
+  format = pkgs.formats.toml { };
+
+  settingsModule = { config, packageOption, pkgs, ... }: {
+    freeformType = format.type;
+    options = {
+      apiBaseUrl = mkOption {
+        description = lib.mdDoc ''
+          API base URL that the agent will connect to.
+
+          When using Hercules CI Enterprise, set this to the URL where your
+          Hercules CI server is reachable.
+        '';
+        type = types.str;
+        default = "https://hercules-ci.com";
+      };
+      baseDirectory = mkOption {
+        type = types.path;
+        default = "/var/lib/hercules-ci-agent";
+        description = lib.mdDoc ''
+          State directory (secrets, work directory, etc) for agent
+        '';
+      };
+      concurrentTasks = mkOption {
+        description = lib.mdDoc ''
+          Number of tasks to perform simultaneously.
+
+          A task is a single derivation build, an evaluation or an effect run.
+          At minimum, you need 2 concurrent tasks for `x86_64-linux`
+          in your cluster, to allow for import from derivation.
+
+          `concurrentTasks` can be around the CPU core count or lower if memory is
+          the bottleneck.
+
+          The optimal value depends on the resource consumption characteristics of your workload,
+          including memory usage and in-task parallelism. This is typically determined empirically.
+
+          When scaling, it is generally better to have a double-size machine than two machines,
+          because each split of resources causes inefficiencies; particularly with regards
+          to build latency because of extra downloads.
+        '';
+        type = types.either types.ints.positive (types.enum [ "auto" ]);
+        default = "auto";
+        defaultText = lib.literalMD ''
+          `"auto"`, meaning equal to the number of CPU cores.
+        '';
+      };
+      labels = mkOption {
+        description = lib.mdDoc ''
+          A key-value map of user data.
+
+          This data will be available to organization members in the dashboard and API.
+
+          The values can be of any TOML type that corresponds to a JSON type, but arrays
+          can not contain tables/objects due to limitations of the TOML library. Values
+          involving arrays of non-primitive types may not be representable currently.
+        '';
+        type = format.type;
+        defaultText = literalExpression ''
+          {
+            agent.source = "..."; # One of "nixpkgs", "flake", "override"
+            lib.version = "...";
+            pkgs.version = "...";
+          }
+        '';
+      };
+      workDirectory = mkOption {
+        description = lib.mdDoc ''
+          The directory in which temporary subdirectories are created for task state. This includes sources for Nix evaluation.
+        '';
+        type = types.path;
+        default = config.baseDirectory + "/work";
+        defaultText = literalExpression ''baseDirectory + "/work"'';
+      };
+      staticSecretsDirectory = mkOption {
+        description = lib.mdDoc ''
+          This is the default directory to look for statically configured secrets like `cluster-join-token.key`.
+
+          See also `clusterJoinTokenPath` and `binaryCachesPath` for fine-grained configuration.
+        '';
+        type = types.path;
+        default = config.baseDirectory + "/secrets";
+        defaultText = literalExpression ''baseDirectory + "/secrets"'';
+      };
+      clusterJoinTokenPath = mkOption {
+        description = lib.mdDoc ''
+          Location of the cluster-join-token.key file.
+
+          You can retrieve the contents of the file when creating a new agent via
+          <https://hercules-ci.com/dashboard>.
+
+          As this value is confidential, it should not be in the store, but
+          installed using other means, such as agenix, NixOps
+          `deployment.keys`, or manual installation.
+
+          The contents of the file are used for authentication between the agent and the API.
+        '';
+        type = types.path;
+        default = config.staticSecretsDirectory + "/cluster-join-token.key";
+        defaultText = literalExpression ''staticSecretsDirectory + "/cluster-join-token.key"'';
+      };
+      binaryCachesPath = mkOption {
+        description = lib.mdDoc ''
+          Path to a JSON file containing binary cache secret keys.
+
+          As these values are confidential, they should not be in the store, but
+          copied over using other means, such as agenix, NixOps
+          `deployment.keys`, or manual installation.
+
+          The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/binary-caches-json/>.
+        '';
+        type = types.path;
+        default = config.staticSecretsDirectory + "/binary-caches.json";
+        defaultText = literalExpression ''staticSecretsDirectory + "/binary-caches.json"'';
+      };
+      secretsJsonPath = mkOption {
+        description = lib.mdDoc ''
+          Path to a JSON file containing secrets for effects.
+
+          As these values are confidential, they should not be in the store, but
+          copied over using other means, such as agenix, NixOps
+          `deployment.keys`, or manual installation.
+
+          The format is described on <https://docs.hercules-ci.com/hercules-ci-agent/secrets-json/>.
+        '';
+        type = types.path;
+        default = config.staticSecretsDirectory + "/secrets.json";
+        defaultText = literalExpression ''staticSecretsDirectory + "/secrets.json"'';
+      };
+    };
+    config = {
+      labels = {
+        agent.source =
+          if packageOption.highestPrio == (lib.modules.mkOptionDefault { }).priority
+          then "nixpkgs"
+          else lib.mkOptionDefault "override";
+        pkgs.version = pkgs.lib.version;
+        lib.version = lib.version;
+      };
+    };
+  };
+in
+{
+  inherit format settingsModule;
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
new file mode 100644
index 000000000000..83078706fcae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/hydra/default.nix
@@ -0,0 +1,506 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.hydra;
+
+  baseDir = "/var/lib/hydra";
+
+  hydraConf = pkgs.writeScript "hydra.conf" cfg.extraConfig;
+
+  hydraEnv =
+    { HYDRA_DBI = cfg.dbi;
+      HYDRA_CONFIG = "${baseDir}/hydra.conf";
+      HYDRA_DATA = "${baseDir}";
+    };
+
+  env =
+    { NIX_REMOTE = "daemon";
+      SSL_CERT_FILE = "/etc/ssl/certs/ca-certificates.crt"; # Remove in 16.03
+      PGPASSFILE = "${baseDir}/pgpass";
+      NIX_REMOTE_SYSTEMS = concatStringsSep ":" cfg.buildMachinesFiles;
+    } // optionalAttrs (cfg.smtpHost != null) {
+      EMAIL_SENDER_TRANSPORT = "SMTP";
+      EMAIL_SENDER_TRANSPORT_host = cfg.smtpHost;
+    } // hydraEnv // cfg.extraEnv;
+
+  serverEnv = env //
+    { HYDRA_TRACKER = cfg.tracker;
+      XDG_CACHE_HOME = "${baseDir}/www/.cache";
+      COLUMNS = "80";
+      PGPASSFILE = "${baseDir}/pgpass-www"; # grrr
+    } // (optionalAttrs cfg.debugServer { DBIC_TRACE = "1"; });
+
+  localDB = "dbi:Pg:dbname=hydra;user=hydra;";
+
+  haveLocalDB = cfg.dbi == localDB;
+
+  hydra-package =
+  let
+    makeWrapperArgs = concatStringsSep " " (mapAttrsToList (key: value: "--set \"${key}\" \"${value}\"") hydraEnv);
+  in pkgs.buildEnv rec {
+    name = "hydra-env";
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+    paths = [ cfg.package ];
+
+    postBuild = ''
+      if [ -L "$out/bin" ]; then
+          unlink "$out/bin"
+      fi
+      mkdir -p "$out/bin"
+
+      for path in ${concatStringsSep " " paths}; do
+        if [ -d "$path/bin" ]; then
+          cd "$path/bin"
+          for prg in *; do
+            if [ -f "$prg" ]; then
+              rm -f "$out/bin/$prg"
+              if [ -x "$prg" ]; then
+                makeWrapper "$path/bin/$prg" "$out/bin/$prg" ${makeWrapperArgs}
+              fi
+            fi
+          done
+        fi
+      done
+   '';
+  };
+
+in
+
+{
+  ###### interface
+  options = {
+
+    services.hydra = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run Hydra services.
+        '';
+      };
+
+      dbi = mkOption {
+        type = types.str;
+        default = localDB;
+        example = "dbi:Pg:dbname=hydra;host=postgres.example.org;user=foo;";
+        description = lib.mdDoc ''
+          The DBI string for Hydra database connection.
+
+          NOTE: Attempts to set `application_name` will be overridden by
+          `hydra-TYPE` (where TYPE is e.g. `evaluator`, `queue-runner`,
+          etc.) in all hydra services to more easily distinguish where
+          queries are coming from.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.hydra_unstable;
+        defaultText = literalExpression "pkgs.hydra_unstable";
+        description = lib.mdDoc "The Hydra package.";
+      };
+
+      hydraURL = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The base URL for the Hydra webserver instance. Used for links in emails.
+        '';
+      };
+
+      listenHost = mkOption {
+        type = types.str;
+        default = "*";
+        example = "localhost";
+        description = lib.mdDoc ''
+          The hostname or address to listen on or `*` to listen
+          on all interfaces.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc ''
+          TCP port the web server should listen to.
+        '';
+      };
+
+      minimumDiskFree = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Threshold of minimum disk space (GiB) to determine if the queue runner should run or not.
+        '';
+      };
+
+      minimumDiskFreeEvaluator = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Threshold of minimum disk space (GiB) to determine if the evaluator should run or not.
+        '';
+      };
+
+      notificationSender = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Sender email address used for email notifications.
+        '';
+      };
+
+      smtpHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "localhost";
+        description = lib.mdDoc ''
+          Hostname of the SMTP server to use to send email.
+        '';
+      };
+
+      tracker = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Piece of HTML that is included on all pages.
+        '';
+      };
+
+      logo = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to a file containing the logo of your Hydra instance.
+        '';
+      };
+
+      debugServer = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the server in debug mode.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "Extra lines for the Hydra configuration.";
+      };
+
+      extraEnv = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = lib.mdDoc "Extra environment variables for Hydra.";
+      };
+
+      gcRootsDir = mkOption {
+        type = types.path;
+        default = "/nix/var/nix/gcroots/hydra";
+        description = lib.mdDoc "Directory that holds Hydra garbage collector roots.";
+      };
+
+      buildMachinesFiles = mkOption {
+        type = types.listOf types.path;
+        default = optional (config.nix.buildMachines != []) "/etc/nix/machines";
+        defaultText = literalExpression ''optional (config.nix.buildMachines != []) "/etc/nix/machines"'';
+        example = [ "/etc/nix/machines" "/var/lib/hydra/provisioner/machines" ];
+        description = lib.mdDoc "List of files containing build machines.";
+      };
+
+      useSubstitutes = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use binary caches for downloading store paths. Note that
+          binary substitutions trigger (a potentially large number of) additional
+          HTTP requests that slow down the queue monitor thread significantly.
+          Also, this Hydra instance will serve those downloaded store paths to
+          its users with its own signature attached as if it had built them
+          itself, so don't enable this feature unless your active binary caches
+          are absolute trustworthy.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.groups.hydra = {
+      gid = config.ids.gids.hydra;
+    };
+
+    users.users.hydra =
+      { description = "Hydra";
+        group = "hydra";
+        # We don't enable `createHome` here because the creation of the home directory is handled by the hydra-init service below.
+        home = baseDir;
+        useDefaultShell = true;
+        uid = config.ids.uids.hydra;
+      };
+
+    users.users.hydra-queue-runner =
+      { description = "Hydra queue runner";
+        group = "hydra";
+        useDefaultShell = true;
+        home = "${baseDir}/queue-runner"; # really only to keep SSH happy
+        uid = config.ids.uids.hydra-queue-runner;
+      };
+
+    users.users.hydra-www =
+      { description = "Hydra web server";
+        group = "hydra";
+        useDefaultShell = true;
+        uid = config.ids.uids.hydra-www;
+      };
+
+    services.hydra.extraConfig =
+      ''
+        using_frontend_proxy = 1
+        base_uri = ${cfg.hydraURL}
+        notification_sender = ${cfg.notificationSender}
+        max_servers = 25
+        ${optionalString (cfg.logo != null) ''
+          hydra_logo = ${cfg.logo}
+        ''}
+        gc_roots_dir = ${cfg.gcRootsDir}
+        use-substitutes = ${if cfg.useSubstitutes then "1" else "0"}
+      '';
+
+    environment.systemPackages = [ hydra-package ];
+
+    environment.variables = hydraEnv;
+
+    nix.settings = mkMerge [
+      {
+        keep-outputs = true;
+        keep-derivations = true;
+        trusted-users = [ "hydra-queue-runner" ];
+      }
+
+      (mkIf (versionOlder (getVersion config.nix.package.out) "2.4pre")
+        {
+          # The default (`true') slows Nix down a lot since the build farm
+          # has so many GC roots.
+          gc-check-reachability = false;
+        }
+      )
+    ];
+
+    systemd.services.hydra-init =
+      { wantedBy = [ "multi-user.target" ];
+        requires = optional haveLocalDB "postgresql.service";
+        after = optional haveLocalDB "postgresql.service";
+        environment = env // {
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-init";
+        };
+        path = [ pkgs.util-linux ];
+        preStart = ''
+          mkdir -p ${baseDir}
+          chown hydra:hydra ${baseDir}
+          chmod 0750 ${baseDir}
+
+          ln -sf ${hydraConf} ${baseDir}/hydra.conf
+
+          mkdir -m 0700 -p ${baseDir}/www
+          chown hydra-www:hydra ${baseDir}/www
+
+          mkdir -m 0700 -p ${baseDir}/queue-runner
+          mkdir -m 0750 -p ${baseDir}/build-logs
+          mkdir -m 0750 -p ${baseDir}/runcommand-logs
+          chown hydra-queue-runner.hydra \
+            ${baseDir}/queue-runner \
+            ${baseDir}/build-logs \
+            ${baseDir}/runcommand-logs
+
+          ${optionalString haveLocalDB ''
+            if ! [ -e ${baseDir}/.db-created ]; then
+              runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createuser hydra
+              runuser -u ${config.services.postgresql.superUser} ${config.services.postgresql.package}/bin/createdb -- -O hydra hydra
+              touch ${baseDir}/.db-created
+            fi
+            echo "create extension if not exists pg_trgm" | runuser -u ${config.services.postgresql.superUser} -- ${config.services.postgresql.package}/bin/psql hydra
+          ''}
+
+          if [ ! -e ${cfg.gcRootsDir} ]; then
+
+            # Move legacy roots directory.
+            if [ -e /nix/var/nix/gcroots/per-user/hydra/hydra-roots ]; then
+              mv /nix/var/nix/gcroots/per-user/hydra/hydra-roots ${cfg.gcRootsDir}
+            fi
+
+            mkdir -p ${cfg.gcRootsDir}
+          fi
+
+          # Move legacy hydra-www roots.
+          if [ -e /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots ]; then
+            find /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots/ -type f \
+              | xargs -r mv -f -t ${cfg.gcRootsDir}/
+            rmdir /nix/var/nix/gcroots/per-user/hydra-www/hydra-roots
+          fi
+
+          chown hydra:hydra ${cfg.gcRootsDir}
+          chmod 2775 ${cfg.gcRootsDir}
+        '';
+        serviceConfig.ExecStart = "${hydra-package}/bin/hydra-init";
+        serviceConfig.PermissionsStartOnly = true;
+        serviceConfig.User = "hydra";
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+    systemd.services.hydra-server =
+      { wantedBy = [ "multi-user.target" ];
+        requires = [ "hydra-init.service" ];
+        after = [ "hydra-init.service" ];
+        environment = serverEnv // {
+          HYDRA_DBI = "${serverEnv.HYDRA_DBI};application_name=hydra-server";
+        };
+        restartTriggers = [ hydraConf ];
+        serviceConfig =
+          { ExecStart =
+              "@${hydra-package}/bin/hydra-server hydra-server -f -h '${cfg.listenHost}' "
+              + "-p ${toString cfg.port} --max_spare_servers 5 --max_servers 25 "
+              + "--max_requests 100 ${optionalString cfg.debugServer "-d"}";
+            User = "hydra-www";
+            PermissionsStartOnly = true;
+            Restart = "always";
+          };
+      };
+
+    systemd.services.hydra-queue-runner =
+      { wantedBy = [ "multi-user.target" ];
+        requires = [ "hydra-init.service" ];
+        after = [ "hydra-init.service" "network.target" ];
+        path = [ hydra-package pkgs.nettools pkgs.openssh pkgs.bzip2 config.nix.package ];
+        restartTriggers = [ hydraConf ];
+        environment = env // {
+          PGPASSFILE = "${baseDir}/pgpass-queue-runner"; # grrr
+          IN_SYSTEMD = "1"; # to get log severity levels
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-queue-runner";
+        };
+        serviceConfig =
+          { ExecStart = "@${hydra-package}/bin/hydra-queue-runner hydra-queue-runner -v";
+            ExecStopPost = "${hydra-package}/bin/hydra-queue-runner --unlock";
+            User = "hydra-queue-runner";
+            Restart = "always";
+
+            # Ensure we can get core dumps.
+            LimitCORE = "infinity";
+            WorkingDirectory = "${baseDir}/queue-runner";
+          };
+      };
+
+    systemd.services.hydra-evaluator =
+      { wantedBy = [ "multi-user.target" ];
+        requires = [ "hydra-init.service" ];
+        after = [ "hydra-init.service" "network.target" "network-online.target" ];
+        path = with pkgs; [ hydra-package nettools jq ];
+        restartTriggers = [ hydraConf ];
+        environment = env // {
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-evaluator";
+        };
+        serviceConfig =
+          { ExecStart = "@${hydra-package}/bin/hydra-evaluator hydra-evaluator";
+            User = "hydra";
+            Restart = "always";
+            WorkingDirectory = baseDir;
+          };
+      };
+
+    systemd.services.hydra-update-gc-roots =
+      { requires = [ "hydra-init.service" ];
+        after = [ "hydra-init.service" ];
+        environment = env // {
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-update-gc-roots";
+        };
+        serviceConfig =
+          { ExecStart = "@${hydra-package}/bin/hydra-update-gc-roots hydra-update-gc-roots";
+            User = "hydra";
+          };
+        startAt = "2,14:15";
+      };
+
+    systemd.services.hydra-send-stats =
+      { wantedBy = [ "multi-user.target" ];
+        after = [ "hydra-init.service" ];
+        environment = env // {
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-send-stats";
+        };
+        serviceConfig =
+          { ExecStart = "@${hydra-package}/bin/hydra-send-stats hydra-send-stats";
+            User = "hydra";
+          };
+      };
+
+    systemd.services.hydra-notify =
+      { wantedBy = [ "multi-user.target" ];
+        requires = [ "hydra-init.service" ];
+        after = [ "hydra-init.service" ];
+        restartTriggers = [ hydraConf ];
+        environment = env // {
+          PGPASSFILE = "${baseDir}/pgpass-queue-runner";
+          HYDRA_DBI = "${env.HYDRA_DBI};application_name=hydra-notify";
+        };
+        serviceConfig =
+          { ExecStart = "@${hydra-package}/bin/hydra-notify hydra-notify";
+            # FIXME: run this under a less privileged user?
+            User = "hydra-queue-runner";
+            Restart = "always";
+            RestartSec = 5;
+          };
+      };
+
+    # If there is less than a certain amount of free disk space, stop
+    # the queue/evaluator to prevent builds from failing or aborting.
+    systemd.services.hydra-check-space =
+      { script =
+          ''
+            if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFree} * 1024**3)) ]; then
+                echo "stopping Hydra queue runner due to lack of free space..."
+                systemctl stop hydra-queue-runner
+            fi
+            if [ $(($(stat -f -c '%a' /nix/store) * $(stat -f -c '%S' /nix/store))) -lt $((${toString cfg.minimumDiskFreeEvaluator} * 1024**3)) ]; then
+                echo "stopping Hydra evaluator due to lack of free space..."
+                systemctl stop hydra-evaluator
+            fi
+          '';
+        startAt = "*:0/5";
+      };
+
+    # Periodically compress build logs. The queue runner compresses
+    # logs automatically after a step finishes, but this doesn't work
+    # if the queue runner is stopped prematurely.
+    systemd.services.hydra-compress-logs =
+      { path = [ pkgs.bzip2 ];
+        script =
+          ''
+            find /var/lib/hydra/build-logs -type f -name "*.drv" -mtime +3 -size +0c | xargs -r bzip2 -v -f
+          '';
+        startAt = "Sun 01:45";
+      };
+
+    services.postgresql.enable = mkIf haveLocalDB true;
+
+    services.postgresql.identMap = optionalString haveLocalDB
+      ''
+        hydra-users hydra hydra
+        hydra-users hydra-queue-runner hydra
+        hydra-users hydra-www hydra
+        hydra-users root hydra
+        # The postgres user is used to create the pg_trgm extension for the hydra database
+        hydra-users postgres postgres
+      '';
+
+    services.postgresql.authentication = optionalString haveLocalDB
+      ''
+        local hydra all ident map=hydra-users
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/jenkins/default.nix b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/default.nix
new file mode 100644
index 000000000000..e4d54b0cb0f4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/default.nix
@@ -0,0 +1,247 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.jenkins;
+  jenkinsUrl = "http://${cfg.listenAddress}:${toString cfg.port}${cfg.prefix}";
+in {
+  options = {
+    services.jenkins = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the jenkins continuous integration server.
+        '';
+      };
+
+      user = mkOption {
+        default = "jenkins";
+        type = types.str;
+        description = lib.mdDoc ''
+          User the jenkins server should execute under.
+        '';
+      };
+
+      group = mkOption {
+        default = "jenkins";
+        type = types.str;
+        description = lib.mdDoc ''
+          If the default user "jenkins" is configured then this is the primary
+          group of that user.
+        '';
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "wheel" "dialout" ];
+        description = lib.mdDoc ''
+          List of extra groups that the "jenkins" user should be a part of.
+        '';
+      };
+
+      home = mkOption {
+        default = "/var/lib/jenkins";
+        type = types.path;
+        description = lib.mdDoc ''
+          The path to use as JENKINS_HOME. If the default user "jenkins" is configured then
+          this is the home of the "jenkins" user.
+        '';
+      };
+
+      listenAddress = mkOption {
+        default = "0.0.0.0";
+        example = "localhost";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies the bind address on which the jenkins HTTP interface listens.
+          The default is the wildcard address.
+        '';
+      };
+
+      port = mkOption {
+        default = 8080;
+        type = types.port;
+        description = lib.mdDoc ''
+          Specifies port number on which the jenkins HTTP interface listens.
+          The default is 8080.
+        '';
+      };
+
+      prefix = mkOption {
+        default = "";
+        example = "/jenkins";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specifies a urlPrefix to use with jenkins.
+          If the example /jenkins is given, the jenkins server will be
+          accessible using localhost:8080/jenkins.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.jenkins;
+        defaultText = literalExpression "pkgs.jenkins";
+        type = types.package;
+        description = lib.mdDoc "Jenkins package to use.";
+      };
+
+      packages = mkOption {
+        default = [ pkgs.stdenv pkgs.git pkgs.jdk17 config.programs.ssh.package pkgs.nix ];
+        defaultText = literalExpression "[ pkgs.stdenv pkgs.git pkgs.jdk17 config.programs.ssh.package pkgs.nix ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc ''
+          Packages to add to PATH for the jenkins process.
+        '';
+      };
+
+      environment = mkOption {
+        default = { };
+        type = with types; attrsOf str;
+        description = lib.mdDoc ''
+          Additional environment variables to be passed to the jenkins process.
+          As a base environment, jenkins receives NIX_PATH from
+          {option}`environment.sessionVariables`, NIX_REMOTE is set to
+          "daemon" and JENKINS_HOME is set to the value of
+          {option}`services.jenkins.home`.
+          This option has precedence and can be used to override those
+          mentioned variables.
+        '';
+      };
+
+      plugins = mkOption {
+        default = null;
+        type = types.nullOr (types.attrsOf types.package);
+        description = lib.mdDoc ''
+          A set of plugins to activate. Note that this will completely
+          remove and replace any previously installed plugins. If you
+          have manually-installed plugins that you want to keep while
+          using this module, set this option to
+          `null`. You can generate this set with a
+          tool such as `jenkinsPlugins2nix`.
+        '';
+        example = literalExpression ''
+          import path/to/jenkinsPlugins2nix-generated-plugins.nix { inherit (pkgs) fetchurl stdenv; }
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "--debug=9" ];
+        description = lib.mdDoc ''
+          Additional command line arguments to pass to Jenkins.
+        '';
+      };
+
+      extraJavaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-Xmx80m" ];
+        description = lib.mdDoc ''
+          Additional command line arguments to pass to the Java run time (as opposed to Jenkins).
+        '';
+      };
+
+      withCLI = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to make the CLI available.
+
+          More info about the CLI available at
+          [
+          https://www.jenkins.io/doc/book/managing/cli](https://www.jenkins.io/doc/book/managing/cli) .
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      # server references the dejavu fonts
+      systemPackages = [
+        pkgs.dejavu_fonts
+      ] ++ optional cfg.withCLI cfg.package;
+
+      variables = {}
+        // optionalAttrs cfg.withCLI {
+          # Make it more convenient to use the `jenkins-cli`.
+          JENKINS_URL = jenkinsUrl;
+        };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "jenkins") {
+      jenkins.gid = config.ids.gids.jenkins;
+    };
+
+    users.users = optionalAttrs (cfg.user == "jenkins") {
+      jenkins = {
+        description = "jenkins user";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        useDefaultShell = true;
+        uid = config.ids.uids.jenkins;
+      };
+    };
+
+    systemd.services.jenkins = {
+      description = "Jenkins Continuous Integration Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment =
+        let
+          selectedSessionVars =
+            lib.filterAttrs (n: v: builtins.elem n [ "NIX_PATH" ])
+              config.environment.sessionVariables;
+        in
+          selectedSessionVars //
+          { JENKINS_HOME = cfg.home;
+            NIX_REMOTE = "daemon";
+          } //
+          cfg.environment;
+
+      path = cfg.packages;
+
+      # Force .war (re)extraction, or else we might run stale Jenkins.
+
+      preStart =
+        let replacePlugins =
+              optionalString (cfg.plugins != null) (
+                let pluginCmds = lib.attrsets.mapAttrsToList
+                      (n: v: "cp ${v} ${cfg.home}/plugins/${n}.jpi")
+                      cfg.plugins;
+                in ''
+                  rm -r ${cfg.home}/plugins || true
+                  mkdir -p ${cfg.home}/plugins
+                  ${lib.strings.concatStringsSep "\n" pluginCmds}
+                '');
+        in ''
+          rm -rf ${cfg.home}/war
+          ${replacePlugins}
+        '';
+
+      # For reference: https://wiki.jenkins.io/display/JENKINS/JenkinsLinuxStartupScript
+      script = ''
+        ${pkgs.jdk17}/bin/java ${concatStringsSep " " cfg.extraJavaOptions} -jar ${cfg.package}/webapps/jenkins.war --httpListenAddress=${cfg.listenAddress} \
+                                                  --httpPort=${toString cfg.port} \
+                                                  --prefix=${cfg.prefix} \
+                                                  -Djava.awt.headless=true \
+                                                  ${concatStringsSep " " cfg.extraOptions}
+      '';
+
+      postStart = ''
+        until [[ $(${pkgs.curl.bin}/bin/curl -L -s --head -w '\n%{http_code}' ${jenkinsUrl} | tail -n1) =~ ^(200|403)$ ]]; do
+          sleep 1
+        done
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/jenkins/job-builder.nix b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/job-builder.nix
new file mode 100644
index 000000000000..a8e3effd1f72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/job-builder.nix
@@ -0,0 +1,248 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  jenkinsCfg = config.services.jenkins;
+  cfg = config.services.jenkins.jobBuilder;
+
+in {
+  options = {
+    services.jenkins.jobBuilder = {
+      enable = mkEnableOption (mdDoc ''
+        the Jenkins Job Builder (JJB) service. It
+        allows defining jobs for Jenkins in a declarative manner.
+
+        Jobs managed through the Jenkins WebUI (or by other means) are left
+        unchanged.
+
+        Note that it really is declarative configuration; if you remove a
+        previously defined job, the corresponding job directory will be
+        deleted.
+
+        Please see the Jenkins Job Builder documentation for more info:
+        <https://jenkins-job-builder.readthedocs.io/>
+      '');
+
+      accessUser = mkOption {
+        default = "admin";
+        type = types.str;
+        description = lib.mdDoc ''
+          User id in Jenkins used to reload config.
+        '';
+      };
+
+      accessToken = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          User token in Jenkins used to reload config.
+          WARNING: This token will be world readable in the Nix store. To keep
+          it secret, use the {option}`accessTokenFile` option instead.
+        '';
+      };
+
+      accessTokenFile = mkOption {
+        default = "${config.services.jenkins.home}/secrets/initialAdminPassword";
+        defaultText = literalExpression ''"''${config.services.jenkins.home}/secrets/initialAdminPassword"'';
+        type = types.str;
+        example = "/run/keys/jenkins-job-builder-access-token";
+        description = lib.mdDoc ''
+          File containing the API token for the {option}`accessUser`
+          user.
+        '';
+      };
+
+      yamlJobs = mkOption {
+        default = "";
+        type = types.lines;
+        example = ''
+          - job:
+              name: jenkins-job-test-1
+              builders:
+                - shell: echo 'Hello world!'
+        '';
+        description = lib.mdDoc ''
+          Job descriptions for Jenkins Job Builder in YAML format.
+        '';
+      };
+
+      jsonJobs = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = literalExpression ''
+          [
+            '''
+              [ { "job":
+                  { "name": "jenkins-job-test-2",
+                    "builders": [ "shell": "echo 'Hello world!'" ]
+                  }
+                }
+              ]
+            '''
+          ]
+        '';
+        description = lib.mdDoc ''
+          Job descriptions for Jenkins Job Builder in JSON format.
+        '';
+      };
+
+      nixJobs = mkOption {
+        default = [ ];
+        type = types.listOf types.attrs;
+        example = literalExpression ''
+          [ { job =
+              { name = "jenkins-job-test-3";
+                builders = [
+                  { shell = "echo 'Hello world!'"; }
+                ];
+              };
+            }
+          ]
+        '';
+        description = lib.mdDoc ''
+          Job descriptions for Jenkins Job Builder in Nix format.
+
+          This is a trivial wrapper around jsonJobs, using builtins.toJSON
+          behind the scene.
+        '';
+      };
+    };
+  };
+
+  config = mkIf (jenkinsCfg.enable && cfg.enable) {
+    assertions = [
+      { assertion =
+          if cfg.accessUser != ""
+          then (cfg.accessToken != "" && cfg.accessTokenFile == "") ||
+               (cfg.accessToken == "" && cfg.accessTokenFile != "")
+          else true;
+        message = ''
+          One of accessToken and accessTokenFile options must be non-empty
+          strings, but not both. Current values:
+            services.jenkins.jobBuilder.accessToken = "${cfg.accessToken}"
+            services.jenkins.jobBuilder.accessTokenFile = "${cfg.accessTokenFile}"
+        '';
+      }
+    ];
+
+    systemd.services.jenkins-job-builder = {
+      description = "Jenkins Job Builder Service";
+      # JJB can run either before or after jenkins. We chose after, so we can
+      # always use curl to notify (running) jenkins to reload its config.
+      after = [ "jenkins.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [ jenkins-job-builder curl ];
+
+      # Q: Why manipulate files directly instead of using "jenkins-jobs upload [...]"?
+      # A: Because this module is for administering a local jenkins install,
+      #    and using local file copy allows us to not worry about
+      #    authentication.
+      script =
+        let
+          yamlJobsFile = builtins.toFile "jobs.yaml" cfg.yamlJobs;
+          jsonJobsFiles =
+            map (x: (builtins.toFile "jobs.json" x))
+              (cfg.jsonJobs ++ [(builtins.toJSON cfg.nixJobs)]);
+          jobBuilderOutputDir = "/run/jenkins-job-builder/output";
+          # Stamp file is placed in $JENKINS_HOME/jobs/$JOB_NAME/ to indicate
+          # ownership. Enables tracking and removal of stale jobs.
+          ownerStamp = ".config-xml-managed-by-nixos-jenkins-job-builder";
+          reloadScript = ''
+            echo "Asking Jenkins to reload config"
+            curl_opts="--silent --fail --show-error"
+            access_token_file=${if cfg.accessTokenFile != ""
+                           then cfg.accessTokenFile
+                           else "$RUNTIME_DIRECTORY/jenkins_access_token.txt"}
+            if [ "${cfg.accessToken}" != "" ]; then
+               (umask 0077; printf "${cfg.accessToken}" >"$access_token_file")
+            fi
+            jenkins_url="http://${jenkinsCfg.listenAddress}:${toString jenkinsCfg.port}${jenkinsCfg.prefix}"
+            auth_file="$RUNTIME_DIRECTORY/jenkins_auth_file.txt"
+            trap 'rm -f "$auth_file"' EXIT
+            (umask 0077; printf "${cfg.accessUser}:@password_placeholder@" >"$auth_file")
+            "${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "$access_token_file" "$auth_file"
+
+            if ! "${pkgs.jenkins}/bin/jenkins-cli" -s "$jenkins_url" -auth "@$auth_file" reload-configuration; then
+                echo "error: failed to reload configuration"
+                exit 1
+            fi
+          '';
+        in
+          ''
+            joinByString()
+            {
+                local separator="$1"
+                shift
+                local first="$1"
+                shift
+                printf "%s" "$first" "''${@/#/$separator}"
+            }
+
+            # Map a relative directory path in the output from
+            # jenkins-job-builder (jobname) to the layout expected by jenkins:
+            # each directory level gets prepended "jobs/".
+            getJenkinsJobDir()
+            {
+                IFS='/' read -ra input_dirs <<< "$1"
+                printf "jobs/"
+                joinByString "/jobs/" "''${input_dirs[@]}"
+            }
+
+            # The inverse of getJenkinsJobDir (remove the "jobs/" prefixes)
+            getJobname()
+            {
+                IFS='/' read -ra input_dirs <<< "$1"
+                local i=0
+                local nelem=''${#input_dirs[@]}
+                for e in "''${input_dirs[@]}"; do
+                    if [ $((i % 2)) -eq 1 ]; then
+                        printf "$e"
+                        if [ $i -lt $(( nelem - 1 )) ]; then
+                            printf "/"
+                        fi
+                    fi
+                    i=$((i + 1))
+                done
+            }
+
+            rm -rf ${jobBuilderOutputDir}
+            cur_decl_jobs=/run/jenkins-job-builder/declarative-jobs
+            rm -f "$cur_decl_jobs"
+
+            # Create / update jobs
+            mkdir -p ${jobBuilderOutputDir}
+            for inputFile in ${yamlJobsFile} ${concatStringsSep " " jsonJobsFiles}; do
+                HOME="${jenkinsCfg.home}" "${pkgs.jenkins-job-builder}/bin/jenkins-jobs" --ignore-cache test --config-xml -o "${jobBuilderOutputDir}" "$inputFile"
+            done
+
+            find "${jobBuilderOutputDir}" -type f -name config.xml | while read -r f; do echo "$(dirname "$f")"; done | sort | while read -r dir; do
+                jobname="$(realpath --relative-to="${jobBuilderOutputDir}" "$dir")"
+                jenkinsjobname=$(getJenkinsJobDir "$jobname")
+                jenkinsjobdir="${jenkinsCfg.home}/$jenkinsjobname"
+                echo "Creating / updating job \"$jobname\""
+                mkdir -p "$jenkinsjobdir"
+                touch "$jenkinsjobdir/${ownerStamp}"
+                cp "$dir"/config.xml "$jenkinsjobdir/config.xml"
+                echo "$jenkinsjobname" >> "$cur_decl_jobs"
+            done
+
+            # Remove stale jobs
+            find "${jenkinsCfg.home}" -type f -name "${ownerStamp}" | while read -r f; do echo "$(dirname "$f")"; done | sort --reverse | while read -r dir; do
+                jenkinsjobname="$(realpath --relative-to="${jenkinsCfg.home}" "$dir")"
+                grep --quiet --line-regexp "$jenkinsjobname" "$cur_decl_jobs" 2>/dev/null && continue
+                jobname=$(getJobname "$jenkinsjobname")
+                echo "Deleting stale job \"$jobname\""
+                jobdir="${jenkinsCfg.home}/$jenkinsjobname"
+                rm -rf "$jobdir"
+            done
+          '' + (optionalString (cfg.accessUser != "") reloadScript);
+      serviceConfig = {
+        Type = "oneshot";
+        User = jenkinsCfg.user;
+        RuntimeDirectory = "jenkins-job-builder";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix
new file mode 100644
index 000000000000..9b86917ab380
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/jenkins/slave.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.jenkinsSlave;
+  masterCfg = config.services.jenkins;
+in {
+  options = {
+    services.jenkinsSlave = {
+      # todo:
+      # * assure the profile of the jenkins user has a JRE and any specified packages. This would
+      # enable ssh slaves.
+      # * Optionally configure the node as a jenkins ad-hoc slave. This would imply configuration
+      # properties for the master node.
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If true the system will be configured to work as a jenkins slave.
+          If the system is also configured to work as a jenkins master then this has no effect.
+          In progress: Currently only assures the jenkins user is configured.
+        '';
+      };
+
+      user = mkOption {
+        default = "jenkins";
+        type = types.str;
+        description = lib.mdDoc ''
+          User the jenkins slave agent should execute under.
+        '';
+      };
+
+      group = mkOption {
+        default = "jenkins";
+        type = types.str;
+        description = lib.mdDoc ''
+          If the default slave agent user "jenkins" is configured then this is
+          the primary group of that user.
+        '';
+      };
+
+      home = mkOption {
+        default = "/var/lib/jenkins";
+        type = types.path;
+        description = lib.mdDoc ''
+          The path to use as JENKINS_HOME. If the default user "jenkins" is configured then
+          this is the home of the "jenkins" user.
+        '';
+      };
+
+      javaPackage = mkOption {
+        default = pkgs.jdk;
+        defaultText = literalExpression "pkgs.jdk";
+        description = lib.mdDoc ''
+          Java package to install.
+        '';
+        type = types.package;
+      };
+    };
+  };
+
+  config = mkIf (cfg.enable && !masterCfg.enable) {
+    users.groups = optionalAttrs (cfg.group == "jenkins") {
+      jenkins.gid = config.ids.gids.jenkins;
+    };
+
+    users.users = optionalAttrs (cfg.user == "jenkins") {
+      jenkins = {
+        description = "jenkins user";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        useDefaultShell = true;
+        uid = config.ids.uids.jenkins;
+      };
+    };
+
+    programs.java = {
+      enable = true;
+      package = cfg.javaPackage;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agents.nix b/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agents.nix
new file mode 100644
index 000000000000..3b883c72ff07
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/agents.nix
@@ -0,0 +1,167 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.woodpecker-agents;
+
+  agentModule = lib.types.submodule {
+    options = {
+      enable = lib.mkEnableOption (lib.mdDoc "this Woodpecker-Agent. Agents execute tasks generated by a Server, every install will need one server and at least one agent");
+
+      package = lib.mkPackageOptionMD pkgs "woodpecker-agent" { };
+
+      environment = lib.mkOption {
+        default = { };
+        type = lib.types.attrsOf lib.types.str;
+        example = lib.literalExpression ''
+          {
+            WOODPECKER_SERVER = "localhost:9000";
+            WOODPECKER_BACKEND = "docker";
+            DOCKER_HOST = "unix:///run/podman/podman.sock";
+          }
+        '';
+        description = lib.mdDoc "woodpecker-agent config environment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/agent-config)";
+      };
+
+      extraGroups = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        example = [ "podman" ];
+        description = lib.mdDoc ''
+          Additional groups for the systemd service.
+        '';
+      };
+
+      path = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [ ];
+        example = [ "" ];
+        description = lib.mdDoc ''
+          Additional packages that should be added to the agent's `PATH`.
+          Mostly useful for the `local` backend.
+        '';
+      };
+
+      environmentFile = lib.mkOption {
+        type = lib.types.listOf lib.types.path;
+        default = [ ];
+        example = [ "/var/secrets/woodpecker-agent.env" ];
+        description = lib.mdDoc ''
+          File to load environment variables
+          from. This is helpful for specifying secrets.
+          Example content of environmentFile:
+          ```
+          WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
+          ```
+        '';
+      };
+    };
+  };
+
+  mkAgentService = name: agentCfg: {
+    name = "woodpecker-agent-${name}";
+    value = {
+      description = "Woodpecker-Agent Service - ${name}";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        SupplementaryGroups = agentCfg.extraGroups;
+        EnvironmentFile = agentCfg.environmentFile;
+        ExecStart = lib.getExe agentCfg.package;
+        Restart = "on-failure";
+        RestartSec = 15;
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
+        BindReadOnlyPaths = [
+          "-/etc/resolv.conf"
+          "-/etc/nsswitch.conf"
+          "-/etc/ssl/certs"
+          "-/etc/static/ssl/certs"
+          "-/etc/hosts"
+          "-/etc/localtime"
+        ];
+      };
+      inherit (agentCfg) environment path;
+    };
+  };
+in
+{
+  meta.maintainers = with lib.maintainers; [ janik ambroisie ];
+
+  options = {
+    services.woodpecker-agents = {
+      agents = lib.mkOption {
+        default = { };
+        type = lib.types.attrsOf agentModule;
+        example = lib.literalExpression ''
+          {
+            podman = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "docker";
+                DOCKER_HOST = "unix:///run/podman/podman.sock";
+              };
+
+              extraGroups = [ "podman" ];
+
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
+            };
+
+            exec = {
+              environment = {
+                WOODPECKER_SERVER = "localhost:9000";
+                WOODPECKER_BACKEND = "local";
+              };
+
+              environmentFile = [ "/run/secrets/woodpecker/agent-secret.txt" ];
+
+              path = [
+                # Needed to clone repos
+                git
+                git-lfs
+                woodpecker-plugin-git
+                # Used by the runner as the default shell
+                bash
+                # Most likely to be used in pipeline definitions
+                coreutils
+              ];
+            };
+          }
+        '';
+        description = lib.mdDoc "woodpecker-agents configurations";
+      };
+    };
+  };
+
+  config = {
+    systemd.services =
+      let
+        mkServices = lib.mapAttrs' mkAgentService;
+        enabledAgents = lib.filterAttrs (_: agent: agent.enable) cfg.agents;
+      in
+      mkServices enabledAgents;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix b/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix
new file mode 100644
index 000000000000..38b42f7288c0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/continuous-integration/woodpecker/server.nix
@@ -0,0 +1,98 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.woodpecker-server;
+in
+{
+  meta.maintainers = with lib.maintainers; [ janik ambroisie ];
+
+
+  options = {
+    services.woodpecker-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "the Woodpecker-Server, a CI/CD application for automatic builds, deployments and tests");
+      package = lib.mkPackageOptionMD pkgs "woodpecker-server" { };
+      environment = lib.mkOption {
+        default = { };
+        type = lib.types.attrsOf lib.types.str;
+        example = lib.literalExpression
+          ''
+            {
+              WOODPECKER_HOST = "https://woodpecker.example.com";
+              WOODPECKER_OPEN = "true";
+              WOODPECKER_GITEA = "true";
+              WOODPECKER_GITEA_CLIENT = "ffffffff-ffff-ffff-ffff-ffffffffffff";
+              WOODPECKER_GITEA_URL = "https://git.example.com";
+            }
+          '';
+        description = lib.mdDoc "woodpecker-server config environment variables, for other options read the [documentation](https://woodpecker-ci.org/docs/administration/server-config)";
+      };
+      environmentFile = lib.mkOption {
+        type = with lib.types; coercedTo path (f: [ f ]) (listOf path);
+        default = [ ];
+        example = [ "/root/woodpecker-server.env" ];
+        description = lib.mdDoc ''
+          File to load environment variables
+          from. This is helpful for specifying secrets.
+          Example content of environmentFile:
+          ```
+          WOODPECKER_AGENT_SECRET=your-shared-secret-goes-here
+          WOODPECKER_GITEA_SECRET=gto_**************************************
+          ```
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services = {
+      woodpecker-server = {
+        description = "Woodpecker-Server Service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network-online.target" ];
+        wants = [ "network-online.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          WorkingDirectory = "%S/woodpecker-server";
+          StateDirectory = "woodpecker-server";
+          StateDirectoryMode = "0700";
+          UMask = "0007";
+          ConfigurationDirectory = "woodpecker-server";
+          EnvironmentFile = cfg.environmentFile;
+          ExecStart = "${cfg.package}/bin/woodpecker-server";
+          Restart = "on-failure";
+          RestartSec = 15;
+          CapabilityBoundingSet = "";
+          # Security
+          NoNewPrivileges = true;
+          # Sandboxing
+          ProtectSystem = "strict";
+          ProtectHome = true;
+          PrivateTmp = true;
+          PrivateDevices = true;
+          PrivateUsers = true;
+          ProtectHostname = true;
+          ProtectClock = true;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          PrivateMounts = true;
+          # System Call Filtering
+          SystemCallArchitectures = "native";
+          SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
+        };
+        inherit (cfg) environment;
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/databases/aerospike.nix b/nixpkgs/nixos/modules/services/databases/aerospike.nix
new file mode 100644
index 000000000000..21df4cd0577b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/aerospike.nix
@@ -0,0 +1,156 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.aerospike;
+
+  aerospikeConf = pkgs.writeText "aerospike.conf" ''
+    # This stanza must come first.
+    service {
+      user aerospike
+      group aerospike
+      paxos-single-replica-limit 1 # Number of nodes where the replica count is automatically reduced to 1.
+      proto-fd-max 15000
+      work-directory ${cfg.workDir}
+    }
+    logging {
+      console {
+        context any info
+      }
+    }
+    mod-lua {
+      system-path ${cfg.package}/share/udf/lua
+      user-path ${cfg.workDir}/udf/lua
+    }
+    network {
+      ${cfg.networkConfig}
+    }
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.aerospike = {
+      enable = mkEnableOption (lib.mdDoc "Aerospike server");
+
+      package = mkOption {
+        default = pkgs.aerospike;
+        defaultText = literalExpression "pkgs.aerospike";
+        type = types.package;
+        description = lib.mdDoc "Which Aerospike derivation to use";
+      };
+
+      workDir = mkOption {
+        type = types.str;
+        default = "/var/lib/aerospike";
+        description = lib.mdDoc "Location where Aerospike stores its files";
+      };
+
+      networkConfig = mkOption {
+        type = types.lines;
+        default = ''
+          service {
+            address any
+            port 3000
+          }
+
+          heartbeat {
+            address any
+            mode mesh
+            port 3002
+            interval 150
+            timeout 10
+          }
+
+          fabric {
+            address any
+            port 3001
+          }
+
+          info {
+            address any
+            port 3003
+          }
+        '';
+        description = lib.mdDoc "network section of configuration file";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          namespace test {
+            replication-factor 2
+            memory-size 4G
+            default-ttl 30d
+            storage-engine memory
+          }
+        '';
+        description = lib.mdDoc "Extra configuration";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.aerospike.enable {
+
+    users.users.aerospike = {
+      name = "aerospike";
+      group = "aerospike";
+      uid = config.ids.uids.aerospike;
+      description = "Aerospike server user";
+    };
+    users.groups.aerospike.gid = config.ids.gids.aerospike;
+
+    systemd.services.aerospike = rec {
+      description = "Aerospike server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/asd --fgdaemon --config-file ${aerospikeConf}";
+        User = "aerospike";
+        Group = "aerospike";
+        LimitNOFILE = 100000;
+        PermissionsStartOnly = true;
+      };
+
+      preStart = ''
+        if [ $(echo "$(${pkgs.procps}/bin/sysctl -n kernel.shmall) < 4294967296" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "kernel.shmall too low, setting to 4G pages"
+          ${pkgs.procps}/bin/sysctl -w kernel.shmall=4294967296
+        fi
+        if [ $(echo "$(${pkgs.procps}/bin/sysctl -n kernel.shmmax) < 1073741824" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "kernel.shmmax too low, setting to 1GB"
+          ${pkgs.procps}/bin/sysctl -w kernel.shmmax=1073741824
+        fi
+        if [ $(echo "$(cat /proc/sys/net/core/rmem_max) < 15728640" | ${pkgs.bc}/bin/bc) == "1" ]; then
+          echo "increasing socket buffer limit (/proc/sys/net/core/rmem_max): $(cat /proc/sys/net/core/rmem_max) -> 15728640"
+          echo 15728640 > /proc/sys/net/core/rmem_max
+        fi
+        if [ $(echo "$(cat /proc/sys/net/core/wmem_max) <  5242880" | ${pkgs.bc}/bin/bc) == "1"  ]; then
+          echo "increasing socket buffer limit (/proc/sys/net/core/wmem_max): $(cat /proc/sys/net/core/wmem_max) -> 5242880"
+          echo  5242880 > /proc/sys/net/core/wmem_max
+        fi
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/smd"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf"
+        install -d -m0700 -o ${serviceConfig.User} -g ${serviceConfig.Group} "${cfg.workDir}/udf/lua"
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/cassandra.nix b/nixpkgs/nixos/modules/services/databases/cassandra.nix
new file mode 100644
index 000000000000..cd816ffaf0dd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/cassandra.nix
@@ -0,0 +1,585 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    concatStringsSep
+    flip
+    literalMD
+    literalExpression
+    optionalAttrs
+    optionals
+    recursiveUpdate
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    types
+    versionAtLeast
+    ;
+
+  cfg = config.services.cassandra;
+
+  atLeast3 = versionAtLeast cfg.package.version "3";
+  atLeast3_11 = versionAtLeast cfg.package.version "3.11";
+  atLeast4 = versionAtLeast cfg.package.version "4";
+
+  defaultUser = "cassandra";
+
+  cassandraConfig = flip recursiveUpdate cfg.extraConfig (
+    {
+      commitlog_sync = "batch";
+      commitlog_sync_batch_window_in_ms = 2;
+      start_native_transport = cfg.allowClients;
+      cluster_name = cfg.clusterName;
+      partitioner = "org.apache.cassandra.dht.Murmur3Partitioner";
+      endpoint_snitch = "SimpleSnitch";
+      data_file_directories = [ "${cfg.homeDir}/data" ];
+      commitlog_directory = "${cfg.homeDir}/commitlog";
+      saved_caches_directory = "${cfg.homeDir}/saved_caches";
+    } // optionalAttrs (cfg.seedAddresses != [ ]) {
+      seed_provider = [
+        {
+          class_name = "org.apache.cassandra.locator.SimpleSeedProvider";
+          parameters = [{ seeds = concatStringsSep "," cfg.seedAddresses; }];
+        }
+      ];
+    } // optionalAttrs atLeast3 {
+      hints_directory = "${cfg.homeDir}/hints";
+    }
+  );
+
+  cassandraConfigWithAddresses = cassandraConfig // (
+    if cfg.listenAddress == null
+    then { listen_interface = cfg.listenInterface; }
+    else { listen_address = cfg.listenAddress; }
+  ) // (
+    if cfg.rpcAddress == null
+    then { rpc_interface = cfg.rpcInterface; }
+    else { rpc_address = cfg.rpcAddress; }
+  );
+
+  cassandraEtc = pkgs.stdenv.mkDerivation {
+    name = "cassandra-etc";
+
+    cassandraYaml = builtins.toJSON cassandraConfigWithAddresses;
+    cassandraEnvPkg = "${cfg.package}/conf/cassandra-env.sh";
+    cassandraLogbackConfig = pkgs.writeText "logback.xml" cfg.logbackConfig;
+
+    passAsFile = [ "extraEnvSh" ];
+    inherit (cfg) extraEnvSh package;
+
+    buildCommand = ''
+      mkdir -p "$out"
+
+      echo "$cassandraYaml" > "$out/cassandra.yaml"
+      ln -s "$cassandraLogbackConfig" "$out/logback.xml"
+
+      ( cat "$cassandraEnvPkg"
+        echo "# lines from services.cassandra.extraEnvSh: "
+        cat "$extraEnvShPath"
+      ) > "$out/cassandra-env.sh"
+
+      # Delete default JMX Port, otherwise we can't set it using env variable
+      sed -i '/JMX_PORT="7199"/d' "$out/cassandra-env.sh"
+
+      # Delete default password file
+      sed -i '/-Dcom.sun.management.jmxremote.password.file=\/etc\/cassandra\/jmxremote.password/d' "$out/cassandra-env.sh"
+
+      ${lib.optionalString atLeast4 ''
+        cp $package/conf/jvm*.options $out/
+      ''}
+    '';
+  };
+
+  defaultJmxRolesFile =
+    builtins.foldl'
+      (left: right: left + right) ""
+      (map (role: "${role.username} ${role.password}") cfg.jmxRoles);
+
+  fullJvmOptions =
+    cfg.jvmOpts
+    ++ optionals (cfg.jmxRoles != [ ]) [
+      "-Dcom.sun.management.jmxremote.authenticate=true"
+      "-Dcom.sun.management.jmxremote.password.file=${cfg.jmxRolesFile}"
+    ] ++ optionals cfg.remoteJmx [
+      "-Djava.rmi.server.hostname=${cfg.rpcAddress}"
+    ] ++ optionals atLeast4 [
+      # Historically, we don't use a log dir, whereas the upstream scripts do
+      # expect this. We override those by providing our own -Xlog:gc flag.
+      "-Xlog:gc=warning,heap*=warning,age*=warning,safepoint=warning,promotion*=warning"
+    ];
+
+  commonEnv = {
+    # Sufficient for cassandra 2.x, 3.x
+    CASSANDRA_CONF = "${cassandraEtc}";
+
+    # Required since cassandra 4
+    CASSANDRA_LOGBACK_CONF = "${cassandraEtc}/logback.xml";
+  };
+
+in
+{
+  options.services.cassandra = {
+
+    enable = mkEnableOption (lib.mdDoc ''
+      Apache Cassandra – Scalable and highly available database
+    '');
+
+    clusterName = mkOption {
+      type = types.str;
+      default = "Test Cluster";
+      description = mdDoc ''
+        The name of the cluster.
+        This setting prevents nodes in one logical cluster from joining
+        another. All nodes in a cluster must have the same value.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = mdDoc "Run Apache Cassandra under this user.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = mdDoc "Run Apache Cassandra under this group.";
+    };
+
+    homeDir = mkOption {
+      type = types.path;
+      default = "/var/lib/cassandra";
+      description = mdDoc ''
+        Home directory for Apache Cassandra.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.cassandra;
+      defaultText = literalExpression "pkgs.cassandra";
+      example = literalExpression "pkgs.cassandra_3_11";
+      description = mdDoc ''
+        The Apache Cassandra package to use.
+      '';
+    };
+
+    jvmOpts = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = mdDoc ''
+        Populate the `JVM_OPT` environment variable.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.str;
+      default = "127.0.0.1";
+      example = null;
+      description = mdDoc ''
+        Address or interface to bind to and tell other Cassandra nodes
+        to connect to. You _must_ change this if you want multiple
+        nodes to be able to communicate!
+
+        Set {option}`listenAddress` OR {option}`listenInterface`, not both.
+
+        Leaving it blank leaves it up to
+        `InetAddress.getLocalHost()`. This will always do the "Right
+        Thing" _if_ the node is properly configured (hostname, name
+        resolution, etc), and the Right Thing is to use the address
+        associated with the hostname (it might not be).
+
+        Setting {option}`listenAddress` to `0.0.0.0` is always wrong.
+      '';
+    };
+
+    listenInterface = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "eth1";
+      description = mdDoc ''
+        Set `listenAddress` OR `listenInterface`, not both. Interfaces
+        must correspond to a single address, IP aliasing is not
+        supported.
+      '';
+    };
+
+    rpcAddress = mkOption {
+      type = types.nullOr types.str;
+      default = "127.0.0.1";
+      example = null;
+      description = mdDoc ''
+        The address or interface to bind the native transport server to.
+
+        Set {option}`rpcAddress` OR {option}`rpcInterface`, not both.
+
+        Leaving {option}`rpcAddress` blank has the same effect as on
+        {option}`listenAddress` (i.e. it will be based on the configured hostname
+        of the node).
+
+        Note that unlike {option}`listenAddress`, you can specify `"0.0.0.0"`, but you
+        must also set `extraConfig.broadcast_rpc_address` to a value other
+        than `"0.0.0.0"`.
+
+        For security reasons, you should not expose this port to the
+        internet. Firewall it if needed.
+      '';
+    };
+
+    rpcInterface = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "eth1";
+      description = mdDoc ''
+        Set {option}`rpcAddress` OR {option}`rpcInterface`, not both. Interfaces must
+        correspond to a single address, IP aliasing is not supported.
+      '';
+    };
+
+    logbackConfig = mkOption {
+      type = types.lines;
+      default = ''
+        <configuration scan="false">
+          <appender name="STDOUT" class="ch.qos.logback.core.ConsoleAppender">
+            <encoder>
+              <pattern>%-5level %date{HH:mm:ss,SSS} %msg%n</pattern>
+            </encoder>
+          </appender>
+
+          <root level="INFO">
+            <appender-ref ref="STDOUT" />
+          </root>
+
+          <logger name="com.thinkaurelius.thrift" level="ERROR"/>
+        </configuration>
+      '';
+      description = mdDoc ''
+        XML logback configuration for cassandra
+      '';
+    };
+
+    seedAddresses = mkOption {
+      type = types.listOf types.str;
+      default = [ "127.0.0.1" ];
+      description = mdDoc ''
+        The addresses of hosts designated as contact points in the cluster. A
+        joining node contacts one of the nodes in the seeds list to learn the
+        topology of the ring.
+        Set to `[ "127.0.0.1" ]` for a single node cluster.
+      '';
+    };
+
+    allowClients = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc ''
+        Enables or disables the native transport server (CQL binary protocol).
+        This server uses the same address as the {option}`rpcAddress`,
+        but the port it uses is not `rpc_port` but
+        `native_transport_port`. See the official Cassandra
+        docs for more information on these variables and set them using
+        {option}`extraConfig`.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.attrs;
+      default = { };
+      example =
+        {
+          commitlog_sync_batch_window_in_ms = 3;
+        };
+      description = mdDoc ''
+        Extra options to be merged into {file}`cassandra.yaml` as nix attribute set.
+      '';
+    };
+
+    extraEnvSh = mkOption {
+      type = types.lines;
+      default = "";
+      example = literalExpression ''"CLASSPATH=$CLASSPATH:''${extraJar}"'';
+      description = mdDoc ''
+        Extra shell lines to be appended onto {file}`cassandra-env.sh`.
+      '';
+    };
+
+    fullRepairInterval = mkOption {
+      type = types.nullOr types.str;
+      default = "3w";
+      example = null;
+      description = mdDoc ''
+        Set the interval how often full repairs are run, i.e.
+        {command}`nodetool repair --full` is executed. See
+        <https://cassandra.apache.org/doc/latest/operating/repair.html>
+        for more information.
+
+        Set to `null` to disable full repairs.
+      '';
+    };
+
+    fullRepairOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--partitioner-range" ];
+      description = mdDoc ''
+        Options passed through to the full repair command.
+      '';
+    };
+
+    incrementalRepairInterval = mkOption {
+      type = types.nullOr types.str;
+      default = "3d";
+      example = null;
+      description = mdDoc ''
+        Set the interval how often incremental repairs are run, i.e.
+        {command}`nodetool repair` is executed. See
+        <https://cassandra.apache.org/doc/latest/operating/repair.html>
+        for more information.
+
+        Set to `null` to disable incremental repairs.
+      '';
+    };
+
+    incrementalRepairOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--partitioner-range" ];
+      description = mdDoc ''
+        Options passed through to the incremental repair command.
+      '';
+    };
+
+    maxHeapSize = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "4G";
+      description = mdDoc ''
+        Must be left blank or set together with {option}`heapNewSize`.
+        If left blank a sensible value for the available amount of RAM and CPU
+        cores is calculated.
+
+        Override to set the amount of memory to allocate to the JVM at
+        start-up. For production use you may wish to adjust this for your
+        environment. `MAX_HEAP_SIZE` is the total amount of memory dedicated
+        to the Java heap. `HEAP_NEWSIZE` refers to the size of the young
+        generation.
+
+        The main trade-off for the young generation is that the larger it
+        is, the longer GC pause times will be. The shorter it is, the more
+        expensive GC will be (usually).
+      '';
+    };
+
+    heapNewSize = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "800M";
+      description = mdDoc ''
+        Must be left blank or set together with {option}`heapNewSize`.
+        If left blank a sensible value for the available amount of RAM and CPU
+        cores is calculated.
+
+        Override to set the amount of memory to allocate to the JVM at
+        start-up. For production use you may wish to adjust this for your
+        environment. `HEAP_NEWSIZE` refers to the size of the young
+        generation.
+
+        The main trade-off for the young generation is that the larger it
+        is, the longer GC pause times will be. The shorter it is, the more
+        expensive GC will be (usually).
+
+        The example `HEAP_NEWSIZE` assumes a modern 8-core+ machine for decent pause
+        times. If in doubt, and if you do not particularly want to tweak, go with
+        100 MB per physical CPU core.
+      '';
+    };
+
+    mallocArenaMax = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      example = 4;
+      description = mdDoc ''
+        Set this to control the amount of arenas per-thread in glibc.
+      '';
+    };
+
+    remoteJmx = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Cassandra ships with JMX accessible *only* from localhost.
+        To enable remote JMX connections set to true.
+
+        Be sure to also enable authentication and/or TLS.
+        See: <https://wiki.apache.org/cassandra/JmxSecurity>
+      '';
+    };
+
+    jmxPort = mkOption {
+      type = types.int;
+      default = 7199;
+      description = mdDoc ''
+        Specifies the default port over which Cassandra will be available for
+        JMX connections.
+        For security reasons, you should not expose this port to the internet.
+        Firewall it if needed.
+      '';
+    };
+
+    jmxRoles = mkOption {
+      default = [ ];
+      description = mdDoc ''
+        Roles that are allowed to access the JMX (e.g. {command}`nodetool`)
+        BEWARE: The passwords will be stored world readable in the nix store.
+                It's recommended to use your own protected file using
+                {option}`jmxRolesFile`
+
+        Doesn't work in versions older than 3.11 because they don't like that
+        it's world readable.
+      '';
+      type = types.listOf (types.submodule {
+        options = {
+          username = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Username for JMX";
+          };
+          password = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Password for JMX";
+          };
+        };
+      });
+    };
+
+    jmxRolesFile = mkOption {
+      type = types.nullOr types.path;
+      default =
+        if atLeast3_11
+        then pkgs.writeText "jmx-roles-file" defaultJmxRolesFile
+        else null;
+      defaultText = literalMD ''generated configuration file if version is at least 3.11, otherwise `null`'';
+      example = "/var/lib/cassandra/jmx.password";
+      description = lib.mdDoc ''
+        Specify your own jmx roles file.
+
+        Make sure the permissions forbid "others" from reading the file if
+        you're using Cassandra below version 3.11.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.listenAddress == null) != (cfg.listenInterface == null);
+        message = "You have to set either listenAddress or listenInterface";
+      }
+      {
+        assertion = (cfg.rpcAddress == null) != (cfg.rpcInterface == null);
+        message = "You have to set either rpcAddress or rpcInterface";
+      }
+      {
+        assertion = (cfg.maxHeapSize == null) == (cfg.heapNewSize == null);
+        message = "If you set either of maxHeapSize or heapNewSize you have to set both";
+      }
+      {
+        assertion = cfg.remoteJmx -> cfg.jmxRolesFile != null;
+        message = ''
+          If you want JMX available remotely you need to set a password using
+          <literal>jmxRoles</literal> or <literal>jmxRolesFile</literal> if
+          using Cassandra older than v3.11.
+        '';
+      }
+    ];
+    users = mkIf (cfg.user == defaultUser) {
+      users.${defaultUser} = {
+        group = cfg.group;
+        home = cfg.homeDir;
+        createHome = true;
+        uid = config.ids.uids.cassandra;
+        description = "Cassandra service user";
+      };
+      groups.${defaultUser}.gid = config.ids.gids.cassandra;
+    };
+
+    systemd.services.cassandra = {
+      description = "Apache Cassandra service";
+      after = [ "network.target" ];
+      environment = commonEnv // {
+        JVM_OPTS = builtins.concatStringsSep " " fullJvmOptions;
+        MAX_HEAP_SIZE = toString cfg.maxHeapSize;
+        HEAP_NEWSIZE = toString cfg.heapNewSize;
+        MALLOC_ARENA_MAX = toString cfg.mallocArenaMax;
+        LOCAL_JMX = if cfg.remoteJmx then "no" else "yes";
+        JMX_PORT = toString cfg.jmxPort;
+      };
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/cassandra -f";
+        SuccessExitStatus = 143;
+      };
+    };
+
+    systemd.services.cassandra-full-repair = {
+      description = "Perform a full repair on this Cassandra node";
+      after = [ "cassandra.service" ];
+      requires = [ "cassandra.service" ];
+      environment = commonEnv;
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart =
+          concatStringsSep " "
+            ([
+              "${cfg.package}/bin/nodetool"
+              "repair"
+              "--full"
+            ] ++ cfg.fullRepairOptions);
+      };
+    };
+
+    systemd.timers.cassandra-full-repair =
+      mkIf (cfg.fullRepairInterval != null) {
+        description = "Schedule full repairs on Cassandra";
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnBootSec = cfg.fullRepairInterval;
+          OnUnitActiveSec = cfg.fullRepairInterval;
+          Persistent = true;
+        };
+      };
+
+    systemd.services.cassandra-incremental-repair = {
+      description = "Perform an incremental repair on this cassandra node.";
+      after = [ "cassandra.service" ];
+      requires = [ "cassandra.service" ];
+      environment = commonEnv;
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart =
+          concatStringsSep " "
+            ([
+              "${cfg.package}/bin/nodetool"
+              "repair"
+            ] ++ cfg.incrementalRepairOptions);
+      };
+    };
+
+    systemd.timers.cassandra-incremental-repair =
+      mkIf (cfg.incrementalRepairInterval != null) {
+        description = "Schedule incremental repairs on Cassandra";
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnBootSec = cfg.incrementalRepairInterval;
+          OnUnitActiveSec = cfg.incrementalRepairInterval;
+          Persistent = true;
+        };
+      };
+  };
+
+  meta.maintainers = with lib.maintainers; [ roberth ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/clickhouse.nix b/nixpkgs/nixos/modules/services/databases/clickhouse.nix
new file mode 100644
index 000000000000..dca352ef72fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/clickhouse.nix
@@ -0,0 +1,85 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.clickhouse;
+in
+with lib;
+{
+
+  ###### interface
+
+  options = {
+
+    services.clickhouse = {
+
+      enable = mkEnableOption (lib.mdDoc "ClickHouse database server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.clickhouse;
+        defaultText = lib.literalExpression "pkgs.clickhouse";
+        description = lib.mdDoc ''
+          ClickHouse package to use.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.clickhouse = {
+      name = "clickhouse";
+      uid = config.ids.uids.clickhouse;
+      group = "clickhouse";
+      description = "ClickHouse server user";
+    };
+
+    users.groups.clickhouse.gid = config.ids.gids.clickhouse;
+
+    systemd.services.clickhouse = {
+      description = "ClickHouse server";
+
+      wantedBy = [ "multi-user.target" ];
+
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        Type = "notify";
+        User = "clickhouse";
+        Group = "clickhouse";
+        ConfigurationDirectory = "clickhouse-server";
+        AmbientCapabilities = "CAP_SYS_NICE";
+        StateDirectory = "clickhouse";
+        LogsDirectory = "clickhouse";
+        ExecStart = "${cfg.package}/bin/clickhouse-server --config-file=/etc/clickhouse-server/config.xml";
+        TimeoutStartSec = "infinity";
+      };
+
+      environment = {
+        # Switching off watchdog is very important for sd_notify to work correctly.
+        CLICKHOUSE_WATCHDOG_ENABLE = "0";
+      };
+    };
+
+    environment.etc = {
+      "clickhouse-server/config.xml" = {
+        source = "${cfg.package}/etc/clickhouse-server/config.xml";
+      };
+
+      "clickhouse-server/users.xml" = {
+        source = "${cfg.package}/etc/clickhouse-server/users.xml";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    # startup requires a `/etc/localtime` which only if exists if `time.timeZone != null`
+    time.timeZone = mkDefault "UTC";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/cockroachdb.nix b/nixpkgs/nixos/modules/services/databases/cockroachdb.nix
new file mode 100644
index 000000000000..ff77d30588fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/cockroachdb.nix
@@ -0,0 +1,225 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cockroachdb;
+  crdb = cfg.package;
+
+  startupCommand = utils.escapeSystemdExecArgs
+    ([
+      # Basic startup
+      "${crdb}/bin/cockroach"
+      "start"
+      "--logtostderr"
+      "--store=/var/lib/cockroachdb"
+
+      # WebUI settings
+      "--http-addr=${cfg.http.address}:${toString cfg.http.port}"
+
+      # Cluster listen address
+      "--listen-addr=${cfg.listen.address}:${toString cfg.listen.port}"
+
+      # Cache and memory settings.
+      "--cache=${cfg.cache}"
+      "--max-sql-memory=${cfg.maxSqlMemory}"
+
+      # Certificate/security settings.
+      (if cfg.insecure then "--insecure" else "--certs-dir=${cfg.certsDir}")
+    ]
+    ++ lib.optional (cfg.join != null) "--join=${cfg.join}"
+    ++ lib.optional (cfg.locality != null) "--locality=${cfg.locality}"
+    ++ cfg.extraArgs);
+
+  addressOption = descr: defaultPort: {
+    address = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Address to bind to for ${descr}";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = defaultPort;
+      description = lib.mdDoc "Port to bind to for ${descr}";
+    };
+  };
+in
+
+{
+  options = {
+    services.cockroachdb = {
+      enable = mkEnableOption (lib.mdDoc "CockroachDB Server");
+
+      listen = addressOption "intra-cluster communication" 26257;
+
+      http = addressOption "http-based Admin UI" 8080;
+
+      locality = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          An ordered, comma-separated list of key-value pairs that describe the
+          topography of the machine. Topography might include country,
+          datacenter or rack designations. Data is automatically replicated to
+          maximize diversities of each tier. The order of tiers is used to
+          determine the priority of the diversity, so the more inclusive
+          localities like country should come before less inclusive localities
+          like datacenter.  The tiers and order must be the same on all nodes.
+          Including more tiers is better than including fewer. For example:
+
+          ```
+              country=us,region=us-west,datacenter=us-west-1b,rack=12
+              country=ca,region=ca-east,datacenter=ca-east-2,rack=4
+
+              planet=earth,province=manitoba,colo=secondary,power=3
+          ```
+        '';
+      };
+
+      join = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "The addresses for connecting the node to a cluster.";
+      };
+
+      insecure = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Run in insecure mode.";
+      };
+
+      certsDir = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "The path to the certificate directory.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "cockroachdb";
+        description = lib.mdDoc "User account under which CockroachDB runs";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "cockroachdb";
+        description = lib.mdDoc "User account under which CockroachDB runs";
+      };
+
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open firewall ports for cluster communication by default";
+      };
+
+      cache = mkOption {
+        type = types.str;
+        default = "25%";
+        description = lib.mdDoc ''
+          The total size for caches.
+
+          This can be a percentage, expressed with a fraction sign or as a
+          decimal-point number, or any bytes-based unit. For example,
+          `"25%"`, `"0.25"` both represent
+          25% of the available system memory. The values
+          `"1000000000"` and `"1GB"` both
+          represent 1 gigabyte of memory.
+
+        '';
+      };
+
+      maxSqlMemory = mkOption {
+        type = types.str;
+        default = "25%";
+        description = lib.mdDoc ''
+          The maximum in-memory storage capacity available to store temporary
+          data for SQL queries.
+
+          This can be a percentage, expressed with a fraction sign or as a
+          decimal-point number, or any bytes-based unit. For example,
+          `"25%"`, `"0.25"` both represent
+          25% of the available system memory. The values
+          `"1000000000"` and `"1GB"` both
+          represent 1 gigabyte of memory.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.cockroachdb;
+        defaultText = literalExpression "pkgs.cockroachdb";
+        description = lib.mdDoc ''
+          The CockroachDB derivation to use for running the service.
+
+          This would primarily be useful to enable Enterprise Edition features
+          in your own custom CockroachDB build (Nixpkgs CockroachDB binaries
+          only contain open source features and open source code).
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--advertise-addr" "[fe80::f6f2:::]" ];
+        description = lib.mdDoc ''
+          Extra CLI arguments passed to {command}`cockroach start`.
+          For the full list of supported arguments, check <https://www.cockroachlabs.com/docs/stable/cockroach-start.html#flags>
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.cockroachdb.enable {
+    assertions = [
+      { assertion = !cfg.insecure -> cfg.certsDir != null;
+        message = "CockroachDB must have a set of SSL certificates (.certsDir), or run in Insecure Mode (.insecure = true)";
+      }
+    ];
+
+    environment.systemPackages = [ crdb ];
+
+    users.users = optionalAttrs (cfg.user == "cockroachdb") {
+      cockroachdb = {
+        description = "CockroachDB Server User";
+        uid         = config.ids.uids.cockroachdb;
+        group       = cfg.group;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "cockroachdb") {
+      cockroachdb.gid = config.ids.gids.cockroachdb;
+    };
+
+    networking.firewall.allowedTCPPorts = lib.optionals cfg.openPorts
+      [ cfg.http.port cfg.listen.port ];
+
+    systemd.services.cockroachdb =
+      { description   = "CockroachDB Server";
+        documentation = [ "man:cockroach(1)" "https://www.cockroachlabs.com" ];
+
+        after    = [ "network.target" "time-sync.target" ];
+        requires = [ "time-sync.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        unitConfig.RequiresMountsFor = "/var/lib/cockroachdb";
+
+        serviceConfig =
+          { ExecStart = startupCommand;
+            Type = "notify";
+            User = cfg.user;
+            StateDirectory = "cockroachdb";
+            StateDirectoryMode = "0700";
+
+            Restart = "always";
+
+            # A conservative-ish timeout is alright here, because for Type=notify
+            # cockroach will send systemd pings during startup to keep it alive
+            TimeoutStopSec = 60;
+            RestartSec = 10;
+          };
+      };
+  };
+
+  meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/couchdb.nix b/nixpkgs/nixos/modules/services/databases/couchdb.nix
new file mode 100644
index 000000000000..bfecfbb3664f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/couchdb.nix
@@ -0,0 +1,225 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.couchdb;
+  opt = options.services.couchdb;
+  configFile = pkgs.writeText "couchdb.ini" (
+    ''
+      [couchdb]
+      database_dir = ${cfg.databaseDir}
+      uri_file = ${cfg.uriFile}
+      view_index_dir = ${cfg.viewIndexDir}
+    '' + (optionalString (cfg.adminPass != null) ''
+      [admins]
+      ${cfg.adminUser} = ${cfg.adminPass}
+    '' + ''
+      [chttpd]
+    '') +
+    ''
+      port = ${toString cfg.port}
+      bind_address = ${cfg.bindAddress}
+
+      [log]
+      file = ${cfg.logFile}
+    '');
+  executable = "${cfg.package}/bin/couchdb";
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.couchdb = {
+
+      enable = mkEnableOption (lib.mdDoc "CouchDB Server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.couchdb3;
+        defaultText = literalExpression "pkgs.couchdb3";
+        description = lib.mdDoc ''
+          CouchDB package to use.
+        '';
+      };
+
+      adminUser = mkOption {
+        type = types.str;
+        default = "admin";
+        description = lib.mdDoc ''
+          Couchdb (i.e. fauxton) account with permission for all dbs and
+          tasks.
+        '';
+      };
+
+      adminPass = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Couchdb (i.e. fauxton) account with permission for all dbs and
+          tasks.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "couchdb";
+        description = lib.mdDoc ''
+          User account under which couchdb runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "couchdb";
+        description = lib.mdDoc ''
+          Group account under which couchdb runs.
+        '';
+      };
+
+      # couchdb options: https://docs.couchdb.org/en/latest/config/index.html
+
+      databaseDir = mkOption {
+        type = types.path;
+        default = "/var/lib/couchdb";
+        description = lib.mdDoc ''
+          Specifies location of CouchDB database files (*.couch named). This
+          location should be writable and readable for the user the CouchDB
+          service runs as (couchdb by default).
+        '';
+      };
+
+      uriFile = mkOption {
+        type = types.path;
+        default = "/run/couchdb/couchdb.uri";
+        description = lib.mdDoc ''
+          This file contains the full URI that can be used to access this
+          instance of CouchDB. It is used to help discover the port CouchDB is
+          running on (if it was set to 0 (e.g. automatically assigned any free
+          one). This file should be writable and readable for the user that
+          runs the CouchDB service (couchdb by default).
+        '';
+      };
+
+      viewIndexDir = mkOption {
+        type = types.path;
+        default = "/var/lib/couchdb";
+        description = lib.mdDoc ''
+          Specifies location of CouchDB view index files. This location should
+          be writable and readable for the user that runs the CouchDB service
+          (couchdb by default).
+        '';
+      };
+
+      bindAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          Defines the IP address by which CouchDB will be accessible.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5984;
+        description = lib.mdDoc ''
+          Defined the port number to listen.
+        '';
+      };
+
+      logFile = mkOption {
+        type = types.path;
+        default = "/var/log/couchdb.log";
+        description = lib.mdDoc ''
+          Specifies the location of file for logging output.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration. Overrides any other configuration.
+        '';
+      };
+
+      argsFile = mkOption {
+        type = types.path;
+        default = "${cfg.package}/etc/vm.args";
+        defaultText = literalExpression ''"config.${opt.package}/etc/vm.args"'';
+        description = lib.mdDoc ''
+          vm.args configuration. Overrides Couchdb's Erlang VM parameters file.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Configuration file for persisting runtime changes. File
+          needs to be readable and writable from couchdb user/group.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.couchdb.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.couchdb.configFile = mkDefault "/var/lib/couchdb/local.ini";
+
+    systemd.tmpfiles.rules = [
+      "d '${dirOf cfg.uriFile}' - ${cfg.user} ${cfg.group} - -"
+      "f '${cfg.logFile}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.databaseDir}' -  ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.viewIndexDir}' -  ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.couchdb = {
+      description = "CouchDB Server";
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        touch ${cfg.configFile}
+        if ! test -e ${cfg.databaseDir}/.erlang.cookie; then
+          touch ${cfg.databaseDir}/.erlang.cookie
+          chmod 600 ${cfg.databaseDir}/.erlang.cookie
+          dd if=/dev/random bs=16 count=1 | base64 > ${cfg.databaseDir}/.erlang.cookie
+        fi
+      '';
+
+      environment = {
+        # we are actually specifying 5 configuration files:
+        # 1. the preinstalled default.ini
+        # 2. the module configuration
+        # 3. the extraConfig from the module options
+        # 4. the locally writable config file, which couchdb itself writes to
+        ERL_FLAGS= ''-couch_ini ${cfg.package}/etc/default.ini ${configFile} ${pkgs.writeText "couchdb-extra.ini" cfg.extraConfig} ${cfg.configFile}'';
+        # 5. the vm.args file
+        COUCHDB_ARGS_FILE=''${cfg.argsFile}'';
+        HOME =''${cfg.databaseDir}'';
+      };
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = executable;
+      };
+    };
+
+    users.users.couchdb = {
+      description = "CouchDB Server user";
+      group = "couchdb";
+      uid = config.ids.uids.couchdb;
+    };
+
+    users.groups.couchdb.gid = config.ids.gids.couchdb;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/dgraph.nix b/nixpkgs/nixos/modules/services/databases/dgraph.nix
new file mode 100644
index 000000000000..7f005a9971a6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/dgraph.nix
@@ -0,0 +1,148 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dgraph;
+  settingsFormat = pkgs.formats.json {};
+  configFile = settingsFormat.generate "config.json" cfg.settings;
+  dgraphWithNode = pkgs.runCommand "dgraph" {
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+  }
+  ''
+    mkdir -p $out/bin
+    makeWrapper ${cfg.package}/bin/dgraph $out/bin/dgraph \
+      --prefix PATH : "${lib.makeBinPath [ pkgs.nodejs ]}" \
+  '';
+  securityOptions = {
+      NoNewPrivileges = true;
+
+      AmbientCapabilities = "";
+      CapabilityBoundingSet = "";
+
+      DeviceAllow = "";
+
+      LockPersonality = true;
+
+      PrivateTmp = true;
+      PrivateDevices = true;
+      PrivateUsers = true;
+
+      ProtectClock = true;
+      ProtectControlGroups = true;
+      ProtectHostname = true;
+      ProtectKernelLogs = true;
+      ProtectKernelModules = true;
+      ProtectKernelTunables = true;
+
+      RemoveIPC = true;
+
+      RestrictNamespaces = true;
+      RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+      RestrictRealtime = true;
+      RestrictSUIDSGID = true;
+
+      SystemCallArchitectures = "native";
+      SystemCallErrorNumber = "EPERM";
+      SystemCallFilter = [
+        "@system-service"
+        "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+      ];
+  };
+in
+{
+  options = {
+    services.dgraph = {
+      enable = mkEnableOption (lib.mdDoc "Dgraph native GraphQL database with a graph backend");
+
+      package = lib.mkPackageOptionMD pkgs "dgraph" { };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = {};
+        description = lib.mdDoc ''
+          Contents of the dgraph config. For more details see https://dgraph.io/docs/deploy/config
+        '';
+      };
+
+      alpha = {
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            The host which dgraph alpha will be run on.
+          '';
+        };
+        port = mkOption {
+          type = types.port;
+          default = 7080;
+          description = lib.mdDoc ''
+            The port which to run dgraph alpha on.
+          '';
+        };
+
+      };
+
+      zero = {
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            The host which dgraph zero will be run on.
+          '';
+        };
+        port = mkOption {
+          type = types.port;
+          default = 5080;
+          description = lib.mdDoc ''
+            The port which to run dgraph zero on.
+          '';
+        };
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.dgraph.settings = {
+      badger.compression = mkDefault "zstd:3";
+    };
+
+    systemd.services.dgraph-zero = {
+      description = "Dgraph native GraphQL database with a graph backend. Zero controls node clustering";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        StateDirectory = "dgraph-zero";
+        WorkingDirectory = "/var/lib/dgraph-zero";
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/dgraph zero --my ${cfg.zero.host}:${toString cfg.zero.port}";
+        Restart = "on-failure";
+      } // securityOptions;
+    };
+
+    systemd.services.dgraph-alpha = {
+      description = "Dgraph native GraphQL database with a graph backend. Alpha serves data";
+      after = [ "network.target" "dgraph-zero.service" ];
+      requires = [ "dgraph-zero.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        StateDirectory = "dgraph-alpha";
+        WorkingDirectory = "/var/lib/dgraph-alpha";
+        DynamicUser = true;
+        ExecStart = "${dgraphWithNode}/bin/dgraph alpha --config ${configFile} --my ${cfg.alpha.host}:${toString cfg.alpha.port} --zero ${cfg.zero.host}:${toString cfg.zero.port}";
+        ExecStop = ''
+          ${pkgs.curl}/bin/curl --data "mutation { shutdown { response { message code } } }" \
+              --header 'Content-Type: application/graphql' \
+              -X POST \
+              http://localhost:8080/admin
+        '';
+        Restart = "on-failure";
+      } // securityOptions;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ happysalada ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/dragonflydb.nix b/nixpkgs/nixos/modules/services/databases/dragonflydb.nix
new file mode 100644
index 000000000000..46a0c188c3ae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/dragonflydb.nix
@@ -0,0 +1,152 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dragonflydb;
+  dragonflydb = pkgs.dragonflydb;
+
+  settings =
+    {
+      port = cfg.port;
+      dir = "/var/lib/dragonflydb";
+      keys_output_limit = cfg.keysOutputLimit;
+    } //
+    (lib.optionalAttrs (cfg.bind != null) { bind = cfg.bind; }) //
+    (lib.optionalAttrs (cfg.requirePass != null) { requirepass = cfg.requirePass; }) //
+    (lib.optionalAttrs (cfg.maxMemory != null) { maxmemory = cfg.maxMemory; }) //
+    (lib.optionalAttrs (cfg.memcachePort != null) { memcache_port = cfg.memcachePort; }) //
+    (lib.optionalAttrs (cfg.dbNum != null) { dbnum = cfg.dbNum; }) //
+    (lib.optionalAttrs (cfg.cacheMode != null) { cache_mode = cfg.cacheMode; });
+in
+{
+
+  ###### interface
+
+  options = {
+    services.dragonflydb = {
+      enable = mkEnableOption (lib.mdDoc "DragonflyDB");
+
+      user = mkOption {
+        type = types.str;
+        default = "dragonfly";
+        description = lib.mdDoc "The user to run DragonflyDB as";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 6379;
+        description = lib.mdDoc "The TCP port to accept connections.";
+      };
+
+      bind = mkOption {
+        type = with types; nullOr str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The IP interface to bind to.
+          `null` means "all interfaces".
+        '';
+      };
+
+      requirePass = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc "Password for database";
+        example = "letmein!";
+      };
+
+      maxMemory = mkOption {
+        type = with types; nullOr ints.unsigned;
+        default = null;
+        description = lib.mdDoc ''
+          The maximum amount of memory to use for storage (in bytes).
+          `null` means this will be automatically set.
+        '';
+      };
+
+      memcachePort = mkOption {
+        type = with types; nullOr port;
+        default = null;
+        description = lib.mdDoc ''
+          To enable memcached compatible API on this port.
+          `null` means disabled.
+        '';
+      };
+
+      keysOutputLimit = mkOption {
+        type = types.ints.unsigned;
+        default = 8192;
+        description = lib.mdDoc ''
+          Maximum number of returned keys in keys command.
+          `keys` is a dangerous command.
+          We truncate its result to avoid blowup in memory when fetching too many keys.
+        '';
+      };
+
+      dbNum = mkOption {
+        type = with types; nullOr ints.unsigned;
+        default = null;
+        description = lib.mdDoc "Maximum number of supported databases for `select`";
+      };
+
+      cacheMode = mkOption {
+        type = with types; nullOr bool;
+        default = null;
+        description = lib.mdDoc ''
+          Once this mode is on, Dragonfly will evict items least likely to be stumbled
+          upon in the future but only when it is near maxmemory limit.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.dragonflydb.enable {
+
+    users.users = optionalAttrs (cfg.user == "dragonfly") {
+      dragonfly.description = "DragonflyDB server user";
+      dragonfly.isSystemUser = true;
+      dragonfly.group = "dragonfly";
+    };
+    users.groups = optionalAttrs (cfg.user == "dragonfly") { dragonfly = { }; };
+
+    environment.systemPackages = [ dragonflydb ];
+
+    systemd.services.dragonflydb = {
+      description = "DragonflyDB server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${dragonflydb}/bin/dragonfly --alsologtostderr ${builtins.concatStringsSep " " (attrsets.mapAttrsToList (n: v: "--${n} ${strings.escapeShellArg v}") settings)}";
+
+        User = cfg.user;
+
+        # Filesystem access
+        ReadWritePaths = [ settings.dir ];
+        StateDirectory = "dragonflydb";
+        StateDirectoryMode = "0700";
+        # Process Properties
+        LimitMEMLOCK = "infinity";
+        # Caps
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        LockPersonality = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/ferretdb.nix b/nixpkgs/nixos/modules/services/databases/ferretdb.nix
new file mode 100644
index 000000000000..ab55e22bf214
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/ferretdb.nix
@@ -0,0 +1,79 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ferretdb;
+in
+{
+
+  meta.maintainers = with lib.maintainers; [ julienmalka camillemndn ];
+
+  options = {
+    services.ferretdb = {
+      enable = mkEnableOption "FerretDB, an Open Source MongoDB alternative";
+
+      package = mkOption {
+        type = types.package;
+        example = literalExpression "pkgs.ferretdb";
+        default = pkgs.ferretdb;
+        defaultText = "pkgs.ferretdb";
+        description = "FerretDB package to use.";
+      };
+
+      settings = lib.mkOption {
+        type =
+          lib.types.submodule { freeformType = with lib.types; attrsOf str; };
+        example = {
+          FERRETDB_LOG_LEVEL = "warn";
+          FERRETDB_MODE = "normal";
+        };
+        description = ''
+          Additional configuration for FerretDB, see
+          <https://docs.ferretdb.io/configuration/flags/>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable
+    {
+
+      services.ferretdb.settings = {
+        FERRETDB_HANDLER = lib.mkDefault "sqlite";
+        FERRETDB_SQLITE_URL = lib.mkDefault "file:/var/lib/ferretdb/";
+      };
+
+      systemd.services.ferretdb = {
+        description = "FerretDB";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        environment = cfg.settings;
+        serviceConfig = {
+          Type = "simple";
+          StateDirectory = "ferretdb";
+          WorkingDirectory = "/var/lib/ferretdb";
+          ExecStart = "${cfg.package}/bin/ferretdb";
+          Restart = "on-failure";
+          ProtectHome = true;
+          ProtectSystem = "strict";
+          PrivateTmp = true;
+          PrivateDevices = true;
+          ProtectHostname = true;
+          ProtectClock = true;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          NoNewPrivileges = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          RemoveIPC = true;
+          PrivateMounts = true;
+          DynamicUser = true;
+        };
+      };
+    };
+}
+
diff --git a/nixpkgs/nixos/modules/services/databases/firebird.nix b/nixpkgs/nixos/modules/services/databases/firebird.nix
new file mode 100644
index 000000000000..3927c81d953d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/firebird.nix
@@ -0,0 +1,168 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: This may file may need additional review, eg which configurations to
+# expose to the user.
+#
+# I only used it to access some simple databases.
+
+# test:
+# isql, then type the following commands:
+# CREATE DATABASE '/var/db/firebird/data/test.fdb' USER 'SYSDBA' PASSWORD 'masterkey';
+# CONNECT '/var/db/firebird/data/test.fdb' USER 'SYSDBA' PASSWORD 'masterkey';
+# CREATE TABLE test ( text varchar(100) );
+# DROP DATABASE;
+#
+# Be careful, virtuoso-opensource also provides a different isql command !
+
+# There are at least two ways to run firebird. superserver has been chosen
+# however there are no strong reasons to prefer this or the other one AFAIK
+# Eg superserver is said to be most efficiently using resources according to
+# https://www.firebirdsql.org/manual/qsg25-classic-or-super.html
+
+with lib;
+
+let
+
+  cfg = config.services.firebird;
+
+  firebird = cfg.package;
+
+  dataDir = "${cfg.baseDir}/data";
+  systemDir = "${cfg.baseDir}/system";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.firebird = {
+
+      enable = mkEnableOption (lib.mdDoc "the Firebird super server");
+
+      package = mkOption {
+        default = pkgs.firebird;
+        defaultText = literalExpression "pkgs.firebird";
+        type = types.package;
+        example = literalExpression "pkgs.firebird_3";
+        description = lib.mdDoc ''
+          Which Firebird package to be installed: `pkgs.firebird_3`
+          For SuperServer use override: `pkgs.firebird_3.override { superServer = true; };`
+        '';
+      };
+
+      port = mkOption {
+        default = 3050;
+        type = types.port;
+        description = lib.mdDoc ''
+          Port Firebird uses.
+        '';
+      };
+
+      user = mkOption {
+        default = "firebird";
+        type = types.str;
+        description = lib.mdDoc ''
+          User account under which firebird runs.
+        '';
+      };
+
+      baseDir = mkOption {
+        default = "/var/lib/firebird";
+        type = types.str;
+        description = lib.mdDoc ''
+          Location containing data/ and system/ directories.
+          data/ stores the databases, system/ stores the password database security2.fdb.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.firebird.enable {
+
+    environment.systemPackages = [cfg.package];
+
+    systemd.tmpfiles.rules = [
+      "d '${dataDir}' 0700 ${cfg.user} - - -"
+      "d '${systemDir}' 0700 ${cfg.user} - - -"
+    ];
+
+    systemd.services.firebird =
+      { description = "Firebird Super-Server";
+
+        wantedBy = [ "multi-user.target" ];
+
+        # TODO: moving security2.fdb into the data directory works, maybe there
+        # is a better way
+        preStart =
+          ''
+            if ! test -e "${systemDir}/security2.fdb"; then
+                cp ${firebird}/security2.fdb "${systemDir}"
+            fi
+
+            if ! test -e "${systemDir}/security3.fdb"; then
+                cp ${firebird}/security3.fdb "${systemDir}"
+            fi
+
+            if ! test -e "${systemDir}/security4.fdb"; then
+                cp ${firebird}/security4.fdb "${systemDir}"
+            fi
+
+            chmod -R 700         "${dataDir}" "${systemDir}" /var/log/firebird
+          '';
+
+        serviceConfig.User = cfg.user;
+        serviceConfig.LogsDirectory = "firebird";
+        serviceConfig.LogsDirectoryMode = "0700";
+        serviceConfig.ExecStart = "${firebird}/bin/fbserver -d";
+
+        # TODO think about shutdown
+      };
+
+    environment.etc."firebird/firebird.msg".source = "${firebird}/firebird.msg";
+
+    # think about this again - and eventually make it an option
+    environment.etc."firebird/firebird.conf".text = ''
+      # RootDirectory = Restrict ${dataDir}
+      DatabaseAccess = Restrict ${dataDir}
+      ExternalFileAccess = Restrict ${dataDir}
+      # what is this? is None allowed?
+      UdfAccess = None
+      # "Native" =  traditional interbase/firebird, "mixed" is windows only
+      Authentication = Native
+
+      # defaults to -1 on non Win32
+      #MaxUnflushedWrites = 100
+      #MaxUnflushedWriteTime = 100
+
+      # show trace if trouble occurs (does this require debug build?)
+      # BugcheckAbort = 0
+      # ConnectionTimeout = 180
+
+      #RemoteServiceName = gds_db
+      RemoteServicePort = ${cfg.port}
+
+      # randomly choose port for server Event Notification
+      #RemoteAuxPort = 0
+      # rsetrict connections to a network card:
+      #RemoteBindAddress =
+      # there are some additional settings which should be reviewed
+    '';
+
+    users.users.firebird = {
+      description = "Firebird server user";
+      group = "firebird";
+      uid = config.ids.uids.firebird;
+    };
+
+    users.groups.firebird.gid = config.ids.gids.firebird;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/foundationdb.md b/nixpkgs/nixos/modules/services/databases/foundationdb.md
new file mode 100644
index 000000000000..0815c139152f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/foundationdb.md
@@ -0,0 +1,309 @@
+# FoundationDB {#module-services-foundationdb}
+
+*Source:* {file}`modules/services/databases/foundationdb.nix`
+
+*Upstream documentation:* <https://apple.github.io/foundationdb/>
+
+*Maintainer:* Austin Seipp
+
+*Available version(s):* 7.1.x
+
+FoundationDB (or "FDB") is an open source, distributed, transactional
+key-value store.
+
+## Configuring and basic setup {#module-services-foundationdb-configuring}
+
+To enable FoundationDB, add the following to your
+{file}`configuration.nix`:
+```
+services.foundationdb.enable = true;
+services.foundationdb.package = pkgs.foundationdb71; # FoundationDB 7.1.x
+```
+
+The {option}`services.foundationdb.package` option is required, and
+must always be specified. Due to the fact FoundationDB network protocols and
+on-disk storage formats may change between (major) versions, and upgrades
+must be explicitly handled by the user, you must always manually specify
+this yourself so that the NixOS module will use the proper version. Note
+that minor, bugfix releases are always compatible.
+
+After running {command}`nixos-rebuild`, you can verify whether
+FoundationDB is running by executing {command}`fdbcli` (which is
+added to {option}`environment.systemPackages`):
+```ShellSession
+$ sudo -u foundationdb fdbcli
+Using cluster file `/etc/foundationdb/fdb.cluster'.
+
+The database is available.
+
+Welcome to the fdbcli. For help, type `help'.
+fdb> status
+
+Using cluster file `/etc/foundationdb/fdb.cluster'.
+
+Configuration:
+  Redundancy mode        - single
+  Storage engine         - memory
+  Coordinators           - 1
+
+Cluster:
+  FoundationDB processes - 1
+  Machines               - 1
+  Memory availability    - 5.4 GB per process on machine with least available
+  Fault Tolerance        - 0 machines
+  Server time            - 04/20/18 15:21:14
+
+...
+
+fdb>
+```
+
+You can also write programs using the available client libraries. For
+example, the following Python program can be run in order to grab the
+cluster status, as a quick example. (This example uses
+{command}`nix-shell` shebang support to automatically supply the
+necessary Python modules).
+```ShellSession
+a@link> cat fdb-status.py
+#! /usr/bin/env nix-shell
+#! nix-shell -i python -p python pythonPackages.foundationdb71
+
+import fdb
+import json
+
+def main():
+    fdb.api_version(520)
+    db = fdb.open()
+
+    @fdb.transactional
+    def get_status(tr):
+        return str(tr['\xff\xff/status/json'])
+
+    obj = json.loads(get_status(db))
+    print('FoundationDB available: %s' % obj['client']['database_status']['available'])
+
+if __name__ == "__main__":
+    main()
+a@link> chmod +x fdb-status.py
+a@link> ./fdb-status.py
+FoundationDB available: True
+a@link>
+```
+
+FoundationDB is run under the {command}`foundationdb` user and group
+by default, but this may be changed in the NixOS configuration. The systemd
+unit {command}`foundationdb.service` controls the
+{command}`fdbmonitor` process.
+
+By default, the NixOS module for FoundationDB creates a single SSD-storage
+based database for development and basic usage. This storage engine is
+designed for SSDs and will perform poorly on HDDs; however it can handle far
+more data than the alternative "memory" engine and is a better default
+choice for most deployments. (Note that you can change the storage backend
+on-the-fly for a given FoundationDB cluster using
+{command}`fdbcli`.)
+
+Furthermore, only 1 server process and 1 backup agent are started in the
+default configuration. See below for more on scaling to increase this.
+
+FoundationDB stores all data for all server processes under
+{file}`/var/lib/foundationdb`. You can override this using
+{option}`services.foundationdb.dataDir`, e.g.
+```
+services.foundationdb.dataDir = "/data/fdb";
+```
+
+Similarly, logs are stored under {file}`/var/log/foundationdb`
+by default, and there is a corresponding
+{option}`services.foundationdb.logDir` as well.
+
+## Scaling processes and backup agents {#module-services-foundationdb-scaling}
+
+Scaling the number of server processes is quite easy; simply specify
+{option}`services.foundationdb.serverProcesses` to be the number of
+FoundationDB worker processes that should be started on the machine.
+
+FoundationDB worker processes typically require 4GB of RAM per-process at
+minimum for good performance, so this option is set to 1 by default since
+the maximum amount of RAM is unknown. You're advised to abide by this
+restriction, so pick a number of processes so that each has 4GB or more.
+
+A similar option exists in order to scale backup agent processes,
+{option}`services.foundationdb.backupProcesses`. Backup agents are
+not as performance/RAM sensitive, so feel free to experiment with the number
+of available backup processes.
+
+## Clustering {#module-services-foundationdb-clustering}
+
+FoundationDB on NixOS works similarly to other Linux systems, so this
+section will be brief. Please refer to the full FoundationDB documentation
+for more on clustering.
+
+FoundationDB organizes clusters using a set of
+*coordinators*, which are just specially-designated
+worker processes. By default, every installation of FoundationDB on NixOS
+will start as its own individual cluster, with a single coordinator: the
+first worker process on {command}`localhost`.
+
+Coordinators are specified globally using the
+{command}`/etc/foundationdb/fdb.cluster` file, which all servers and
+client applications will use to find and join coordinators. Note that this
+file *can not* be managed by NixOS so easily:
+FoundationDB is designed so that it will rewrite the file at runtime for all
+clients and nodes when cluster coordinators change, with clients
+transparently handling this without intervention. It is fundamentally a
+mutable file, and you should not try to manage it in any way in NixOS.
+
+When dealing with a cluster, there are two main things you want to do:
+
+  - Add a node to the cluster for storage/compute.
+  - Promote an ordinary worker to a coordinator.
+
+A node must already be a member of the cluster in order to properly be
+promoted to a coordinator, so you must always add it first if you wish to
+promote it.
+
+To add a machine to a FoundationDB cluster:
+
+  - Choose one of the servers to start as the initial coordinator.
+  - Copy the {command}`/etc/foundationdb/fdb.cluster` file from this
+    server to all the other servers. Restart FoundationDB on all of these
+    other servers, so they join the cluster.
+  - All of these servers are now connected and working together in the
+    cluster, under the chosen coordinator.
+
+At this point, you can add as many nodes as you want by just repeating the
+above steps. By default there will still be a single coordinator: you can
+use {command}`fdbcli` to change this and add new coordinators.
+
+As a convenience, FoundationDB can automatically assign coordinators based
+on the redundancy mode you wish to achieve for the cluster. Once all the
+nodes have been joined, simply set the replication policy, and then issue
+the {command}`coordinators auto` command
+
+For example, assuming we have 3 nodes available, we can enable double
+redundancy mode, then auto-select coordinators. For double redundancy, 3
+coordinators is ideal: therefore FoundationDB will make
+*every* node a coordinator automatically:
+
+```ShellSession
+fdbcli> configure double ssd
+fdbcli> coordinators auto
+```
+
+This will transparently update all the servers within seconds, and
+appropriately rewrite the {command}`fdb.cluster` file, as well as
+informing all client processes to do the same.
+
+## Client connectivity {#module-services-foundationdb-connectivity}
+
+By default, all clients must use the current {command}`fdb.cluster`
+file to access a given FoundationDB cluster. This file is located by default
+in {command}`/etc/foundationdb/fdb.cluster` on all machines with the
+FoundationDB service enabled, so you may copy the active one from your
+cluster to a new node in order to connect, if it is not part of the cluster.
+
+## Client authorization and TLS {#module-services-foundationdb-authorization}
+
+By default, any user who can connect to a FoundationDB process with the
+correct cluster configuration can access anything. FoundationDB uses a
+pluggable design to transport security, and out of the box it supports a
+LibreSSL-based plugin for TLS support. This plugin not only does in-flight
+encryption, but also performs client authorization based on the given
+endpoint's certificate chain. For example, a FoundationDB server may be
+configured to only accept client connections over TLS, where the client TLS
+certificate is from organization *Acme Co* in the
+*Research and Development* unit.
+
+Configuring TLS with FoundationDB is done using the
+{option}`services.foundationdb.tls` options in order to control the
+peer verification string, as well as the certificate and its private key.
+
+Note that the certificate and its private key must be accessible to the
+FoundationDB user account that the server runs under. These files are also
+NOT managed by NixOS, as putting them into the store may reveal private
+information.
+
+After you have a key and certificate file in place, it is not enough to
+simply set the NixOS module options -- you must also configure the
+{command}`fdb.cluster` file to specify that a given set of
+coordinators use TLS. This is as simple as adding the suffix
+{command}`:tls` to your cluster coordinator configuration, after the
+port number. For example, assuming you have a coordinator on localhost with
+the default configuration, simply specifying:
+
+```
+XXXXXX:XXXXXX@127.0.0.1:4500:tls
+```
+
+will configure all clients and server processes to use TLS from now on.
+
+## Backups and Disaster Recovery {#module-services-foundationdb-disaster-recovery}
+
+The usual rules for doing FoundationDB backups apply on NixOS as written in
+the FoundationDB manual. However, one important difference is the security
+profile for NixOS: by default, the {command}`foundationdb` systemd
+unit uses *Linux namespaces* to restrict write access to
+the system, except for the log directory, data directory, and the
+{command}`/etc/foundationdb/` directory. This is enforced by default
+and cannot be disabled.
+
+However, a side effect of this is that the {command}`fdbbackup`
+command doesn't work properly for local filesystem backups: FoundationDB
+uses a server process alongside the database processes to perform backups
+and copy the backups to the filesystem. As a result, this process is put
+under the restricted namespaces above: the backup process can only write to
+a limited number of paths.
+
+In order to allow flexible backup locations on local disks, the FoundationDB
+NixOS module supports a
+{option}`services.foundationdb.extraReadWritePaths` option. This
+option takes a list of paths, and adds them to the systemd unit, allowing
+the processes inside the service to write (and read) the specified
+directories.
+
+For example, to create backups in {command}`/opt/fdb-backups`, first
+set up the paths in the module options:
+
+```
+services.foundationdb.extraReadWritePaths = [ "/opt/fdb-backups" ];
+```
+
+Restart the FoundationDB service, and it will now be able to write to this
+directory (even if it does not yet exist.) Note: this path
+*must* exist before restarting the unit. Otherwise,
+systemd will not include it in the private FoundationDB namespace (and it
+will not add it dynamically at runtime).
+
+You can now perform a backup:
+
+```ShellSession
+$ sudo -u foundationdb fdbbackup start  -t default -d file:///opt/fdb-backups
+$ sudo -u foundationdb fdbbackup status -t default
+```
+
+## Known limitations {#module-services-foundationdb-limitations}
+
+The FoundationDB setup for NixOS should currently be considered beta.
+FoundationDB is not new software, but the NixOS compilation and integration
+has only undergone fairly basic testing of all the available functionality.
+
+  - There is no way to specify individual parameters for individual
+    {command}`fdbserver` processes. Currently, all server processes
+    inherit all the global {command}`fdbmonitor` settings.
+  - Ruby bindings are not currently installed.
+  - Go bindings are not currently installed.
+
+## Options {#module-services-foundationdb-options}
+
+NixOS's FoundationDB module allows you to configure all of the most relevant
+configuration options for {command}`fdbmonitor`, matching it quite
+closely. A complete list of options for the FoundationDB module may be found
+[here](#opt-services.foundationdb.enable). You should
+also read the FoundationDB documentation as well.
+
+## Full documentation {#module-services-foundationdb-full-docs}
+
+FoundationDB is a complex piece of software, and requires careful
+administration to properly use. Full documentation for administration can be
+found here: <https://apple.github.io/foundationdb/>.
diff --git a/nixpkgs/nixos/modules/services/databases/foundationdb.nix b/nixpkgs/nixos/modules/services/databases/foundationdb.nix
new file mode 100644
index 000000000000..48e9898a68c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/foundationdb.nix
@@ -0,0 +1,429 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.foundationdb;
+  pkg = cfg.package;
+
+  # used for initial cluster configuration
+  initialIpAddr = if (cfg.publicAddress != "auto") then cfg.publicAddress else "127.0.0.1";
+
+  fdbServers = n:
+    concatStringsSep "\n" (map (x: "[fdbserver.${toString (x+cfg.listenPortStart)}]") (range 0 (n - 1)));
+
+  backupAgents = n:
+    concatStringsSep "\n" (map (x: "[backup_agent.${toString x}]") (range 1 n));
+
+  configFile = pkgs.writeText "foundationdb.conf" ''
+    [general]
+    cluster_file  = /etc/foundationdb/fdb.cluster
+
+    [fdbmonitor]
+    restart_delay = ${toString cfg.restartDelay}
+    user          = ${cfg.user}
+    group         = ${cfg.group}
+
+    [fdbserver]
+    command        = ${pkg}/bin/fdbserver
+    public_address = ${cfg.publicAddress}:$ID
+    listen_address = ${cfg.listenAddress}
+    datadir        = ${cfg.dataDir}/$ID
+    logdir         = ${cfg.logDir}
+    logsize        = ${cfg.logSize}
+    maxlogssize    = ${cfg.maxLogSize}
+    ${optionalString (cfg.class != null) "class = ${cfg.class}"}
+    memory         = ${cfg.memory}
+    storage_memory = ${cfg.storageMemory}
+
+    ${optionalString (lib.versionAtLeast cfg.package.version "6.1") ''
+    trace_format   = ${cfg.traceFormat}
+    ''}
+
+    ${optionalString (cfg.tls != null) ''
+      tls_plugin           = ${pkg}/libexec/plugins/FDBLibTLS.so
+      tls_certificate_file = ${cfg.tls.certificate}
+      tls_key_file         = ${cfg.tls.key}
+      tls_verify_peers     = ${cfg.tls.allowedPeers}
+    ''}
+
+    ${optionalString (cfg.locality.machineId    != null) "locality_machineid=${cfg.locality.machineId}"}
+    ${optionalString (cfg.locality.zoneId       != null) "locality_zoneid=${cfg.locality.zoneId}"}
+    ${optionalString (cfg.locality.datacenterId != null) "locality_dcid=${cfg.locality.datacenterId}"}
+    ${optionalString (cfg.locality.dataHall     != null) "locality_data_hall=${cfg.locality.dataHall}"}
+
+    ${fdbServers cfg.serverProcesses}
+
+    [backup_agent]
+    command = ${pkg}/libexec/backup_agent
+    ${backupAgents cfg.backupProcesses}
+  '';
+in
+{
+  options.services.foundationdb = {
+
+    enable = mkEnableOption (lib.mdDoc "FoundationDB Server");
+
+    package = mkOption {
+      type        = types.package;
+      description = lib.mdDoc ''
+        The FoundationDB package to use for this server. This must be specified by the user
+        in order to ensure migrations and upgrades are controlled appropriately.
+      '';
+    };
+
+    publicAddress = mkOption {
+      type        = types.str;
+      default     = "auto";
+      description = lib.mdDoc "Publicly visible IP address of the process. Port is determined by process ID";
+    };
+
+    listenAddress = mkOption {
+      type        = types.str;
+      default     = "public";
+      description = lib.mdDoc "Publicly visible IP address of the process. Port is determined by process ID";
+    };
+
+    listenPortStart = mkOption {
+      type          = types.int;
+      default       = 4500;
+      description   = lib.mdDoc ''
+        Starting port number for database listening sockets. Every FDB process binds to a
+        subsequent port, to this number reflects the start of the overall range. e.g. having
+        8 server processes will use all ports between 4500 and 4507.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Open the firewall ports corresponding to FoundationDB processes and coordinators
+        using {option}`config.networking.firewall.*`.
+      '';
+    };
+
+    dataDir = mkOption {
+      type        = types.path;
+      default     = "/var/lib/foundationdb";
+      description = lib.mdDoc "Data directory. All cluster data will be put under here.";
+    };
+
+    logDir = mkOption {
+      type        = types.path;
+      default     = "/var/log/foundationdb";
+      description = lib.mdDoc "Log directory.";
+    };
+
+    user = mkOption {
+      type        = types.str;
+      default     = "foundationdb";
+      description = lib.mdDoc "User account under which FoundationDB runs.";
+    };
+
+    group = mkOption {
+      type        = types.str;
+      default     = "foundationdb";
+      description = lib.mdDoc "Group account under which FoundationDB runs.";
+    };
+
+    class = mkOption {
+      type        = types.nullOr (types.enum [ "storage" "transaction" "stateless" ]);
+      default     = null;
+      description = lib.mdDoc "Process class";
+    };
+
+    restartDelay = mkOption {
+      type = types.int;
+      default = 10;
+      description = lib.mdDoc "Number of seconds to wait before restarting servers.";
+    };
+
+    logSize = mkOption {
+      type        = types.str;
+      default     = "10MiB";
+      description = lib.mdDoc ''
+        Roll over to a new log file after the current log file
+        reaches the specified size.
+      '';
+    };
+
+    maxLogSize = mkOption {
+      type        = types.str;
+      default     = "100MiB";
+      description = lib.mdDoc ''
+        Delete the oldest log file when the total size of all log
+        files exceeds the specified size. If set to 0, old log files
+        will not be deleted.
+      '';
+    };
+
+    serverProcesses = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc "Number of fdbserver processes to run.";
+    };
+
+    backupProcesses = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc "Number of backup_agent processes to run for snapshots.";
+    };
+
+    memory = mkOption {
+      type        = types.str;
+      default     = "8GiB";
+      description = lib.mdDoc ''
+        Maximum memory used by the process. The default value is
+        `8GiB`. When specified without a unit,
+        `MiB` is assumed. This parameter does not
+        change the memory allocation of the program. Rather, it sets
+        a hard limit beyond which the process will kill itself and
+        be restarted. The default value of `8GiB`
+        is double the intended memory usage in the default
+        configuration (providing an emergency buffer to deal with
+        memory leaks or similar problems). It is not recommended to
+        decrease the value of this parameter below its default
+        value. It may be increased if you wish to allocate a very
+        large amount of storage engine memory or cache. In
+        particular, when the `storageMemory`
+        parameter is increased, the `memory`
+        parameter should be increased by an equal amount.
+      '';
+    };
+
+    storageMemory = mkOption {
+      type        = types.str;
+      default     = "1GiB";
+      description = lib.mdDoc ''
+        Maximum memory used for data storage. The default value is
+        `1GiB`. When specified without a unit,
+        `MB` is assumed. Clusters using the memory
+        storage engine will be restricted to using this amount of
+        memory per process for purposes of data storage. Memory
+        overhead associated with storing the data is counted against
+        this total. If you increase the
+        `storageMemory`, you should also increase
+        the `memory` parameter by the same amount.
+      '';
+    };
+
+    tls = mkOption {
+      default = null;
+      description = lib.mdDoc ''
+        FoundationDB Transport Security Layer (TLS) settings.
+      '';
+
+      type = types.nullOr (types.submodule ({
+        options = {
+          certificate = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Path to the TLS certificate file. This certificate will
+              be offered to, and may be verified by, clients.
+            '';
+          };
+
+          key = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Private key file for the certificate.";
+          };
+
+          allowedPeers = mkOption {
+            type = types.str;
+            default = "Check.Valid=1,Check.Unexpired=1";
+            description = lib.mdDoc ''
+              "Peer verification string". This may be used to adjust which TLS
+              client certificates a server will accept, as a form of user
+              authorization; for example, it may only accept TLS clients who
+              offer a certificate abiding by some locality or organization name.
+
+              For more information, please see the FoundationDB documentation.
+            '';
+          };
+        };
+      }));
+    };
+
+    locality = mkOption {
+      default = {
+        machineId    = null;
+        zoneId       = null;
+        datacenterId = null;
+        dataHall     = null;
+      };
+
+      description = lib.mdDoc ''
+        FoundationDB locality settings.
+      '';
+
+      type = types.submodule ({
+        options = {
+          machineId = mkOption {
+            default = null;
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              Machine identifier key. All processes on a machine should share a
+              unique id. By default, processes on a machine determine a unique id to share.
+              This does not generally need to be set.
+            '';
+          };
+
+          zoneId = mkOption {
+            default = null;
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              Zone identifier key. Processes that share a zone id are
+              considered non-unique for the purposes of data replication.
+              If unset, defaults to machine id.
+            '';
+          };
+
+          datacenterId = mkOption {
+            default = null;
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              Data center identifier key. All processes physically located in a
+              data center should share the id. If you are depending on data
+              center based replication this must be set on all processes.
+            '';
+          };
+
+          dataHall = mkOption {
+            default = null;
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              Data hall identifier key. All processes physically located in a
+              data hall should share the id. If you are depending on data
+              hall based replication this must be set on all processes.
+            '';
+          };
+        };
+      });
+    };
+
+    extraReadWritePaths = mkOption {
+      default = [ ];
+      type = types.listOf types.path;
+      description = lib.mdDoc ''
+        An extra set of filesystem paths that FoundationDB can read to
+        and write from. By default, FoundationDB runs under a heavily
+        namespaced systemd environment without write access to most of
+        the filesystem outside of its data and log directories. By
+        adding paths to this list, the set of writeable paths will be
+        expanded. This is useful for allowing e.g. backups to local files,
+        which must be performed on behalf of the foundationdb service.
+      '';
+    };
+
+    pidfile = mkOption {
+      type        = types.path;
+      default     = "/run/foundationdb.pid";
+      description = lib.mdDoc "Path to pidfile for fdbmonitor.";
+    };
+
+    traceFormat = mkOption {
+      type = types.enum [ "xml" "json" ];
+      default = "xml";
+      description = lib.mdDoc "Trace logging format.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = lib.versionOlder cfg.package.version "6.1" -> cfg.traceFormat == "xml";
+        message = ''
+          Versions of FoundationDB before 6.1 do not support configurable trace formats (only XML is supported).
+          This option has no effect for version '' + cfg.package.version + '', and enabling it is an error.
+        '';
+      }
+    ];
+
+    environment.systemPackages = [ pkg ];
+
+    users.users = optionalAttrs (cfg.user == "foundationdb") {
+      foundationdb = {
+        description = "FoundationDB User";
+        uid         = config.ids.uids.foundationdb;
+        group       = cfg.group;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "foundationdb") {
+      foundationdb.gid = config.ids.gids.foundationdb;
+    };
+
+    networking.firewall.allowedTCPPortRanges = mkIf cfg.openFirewall
+      [ { from = cfg.listenPortStart;
+          to = (cfg.listenPortStart + cfg.serverProcesses) - 1;
+        }
+      ];
+
+    systemd.tmpfiles.rules = [
+      "d /etc/foundationdb 0755 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.logDir}' 0770 ${cfg.user} ${cfg.group} - -"
+      "F '${cfg.pidfile}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.foundationdb = {
+      description             = "FoundationDB Service";
+
+      after                   = [ "network.target" ];
+      wantedBy                = [ "multi-user.target" ];
+      unitConfig =
+        { RequiresMountsFor = "${cfg.dataDir} ${cfg.logDir}";
+        };
+
+      serviceConfig =
+        let rwpaths = [ cfg.dataDir cfg.logDir cfg.pidfile "/etc/foundationdb" ]
+                   ++ cfg.extraReadWritePaths;
+        in
+        { Type       = "simple";
+          Restart    = "always";
+          RestartSec = 5;
+          User       = cfg.user;
+          Group      = cfg.group;
+          PIDFile    = "${cfg.pidfile}";
+
+          PermissionsStartOnly = true;  # setup needs root perms
+          TimeoutSec           = 120;   # give reasonable time to shut down
+
+          # Security options
+          NoNewPrivileges       = true;
+          ProtectHome           = true;
+          ProtectSystem         = "strict";
+          ProtectKernelTunables = true;
+          ProtectControlGroups  = true;
+          PrivateTmp            = true;
+          PrivateDevices        = true;
+          ReadWritePaths        = lib.concatStringsSep " " (map (x: "-" + x) rwpaths);
+        };
+
+      path = [ pkg pkgs.coreutils ];
+
+      preStart = ''
+        if [ ! -f /etc/foundationdb/fdb.cluster ]; then
+            cf=/etc/foundationdb/fdb.cluster
+            desc=$(tr -dc A-Za-z0-9 </dev/urandom 2>/dev/null | head -c8)
+            rand=$(tr -dc A-Za-z0-9 </dev/urandom 2>/dev/null | head -c8)
+            echo ''${desc}:''${rand}@${initialIpAddr}:${builtins.toString cfg.listenPortStart} > $cf
+            chmod 0664 $cf
+            touch "${cfg.dataDir}/.first_startup"
+        fi
+      '';
+
+      script = "exec fdbmonitor --lockfile ${cfg.pidfile} --conffile ${configFile}";
+
+      postStart = ''
+        if [ -e "${cfg.dataDir}/.first_startup" ]; then
+          fdbcli --exec "configure new single ssd"
+          rm -f "${cfg.dataDir}/.first_startup";
+        fi
+      '';
+    };
+  };
+
+  meta.doc         = ./foundationdb.md;
+  meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/hbase-standalone.nix b/nixpkgs/nixos/modules/services/databases/hbase-standalone.nix
new file mode 100644
index 000000000000..1ee73ec8d1ff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/hbase-standalone.nix
@@ -0,0 +1,148 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hbase-standalone;
+  opt = options.services.hbase-standalone;
+
+  buildProperty = configAttr:
+    (builtins.concatStringsSep "\n"
+      (lib.mapAttrsToList
+        (name: value: ''
+          <property>
+            <name>${name}</name>
+            <value>${builtins.toString value}</value>
+          </property>
+        '')
+        configAttr));
+
+  configFile = pkgs.writeText "hbase-site.xml"
+    ''<configuration>
+        ${buildProperty (opt.settings.default // cfg.settings)}
+      </configuration>
+    '';
+
+  configDir = pkgs.runCommand "hbase-config-dir" { preferLocalBuild = true; } ''
+    mkdir -p $out
+    cp ${cfg.package}/conf/* $out/
+    rm $out/hbase-site.xml
+    ln -s ${configFile} $out/hbase-site.xml
+  '' ;
+
+in {
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "hbase" ] [ "services" "hbase-standalone" ])
+  ];
+
+  ###### interface
+
+  options = {
+    services.hbase-standalone = {
+
+      enable = mkEnableOption (lib.mdDoc ''
+        HBase master in standalone mode with embedded regionserver and zookeper.
+        Do not use this configuration for production nor for evaluating HBase performance.
+      '');
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.hbase;
+        defaultText = literalExpression "pkgs.hbase";
+        description = lib.mdDoc ''
+          HBase package to use.
+        '';
+      };
+
+
+      user = mkOption {
+        type = types.str;
+        default = "hbase";
+        description = lib.mdDoc ''
+          User account under which HBase runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "hbase";
+        description = lib.mdDoc ''
+          Group account under which HBase runs.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/hbase";
+        description = lib.mdDoc ''
+          Specifies location of HBase database files. This location should be
+          writable and readable for the user the HBase service runs as
+          (hbase by default).
+        '';
+      };
+
+      logDir = mkOption {
+        type = types.path;
+        default = "/var/log/hbase";
+        description = lib.mdDoc ''
+          Specifies the location of HBase log files.
+        '';
+      };
+
+      settings = mkOption {
+        type = with lib.types; attrsOf (oneOf [ str int bool ]);
+        default = {
+          "hbase.rootdir" = "file://${cfg.dataDir}/hbase";
+          "hbase.zookeeper.property.dataDir" = "${cfg.dataDir}/zookeeper";
+        };
+        defaultText = literalExpression ''
+          {
+            "hbase.rootdir" = "file://''${config.${opt.dataDir}}/hbase";
+            "hbase.zookeeper.property.dataDir" = "''${config.${opt.dataDir}}/zookeeper";
+          }
+        '';
+        description = lib.mdDoc ''
+          configurations in hbase-site.xml, see <https://github.com/apache/hbase/blob/master/hbase-server/src/test/resources/hbase-site.xml> for details.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.logDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.hbase = {
+      description = "HBase Server";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        # JRE 15 removed option `UseConcMarkSweepGC` which is needed.
+        JAVA_HOME = "${pkgs.jre8}";
+        HBASE_LOG_DIR = cfg.logDir;
+      };
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/hbase --config ${configDir} master start";
+      };
+    };
+
+    users.users.hbase = {
+      description = "HBase Server user";
+      group = "hbase";
+      uid = config.ids.uids.hbase;
+    };
+
+    users.groups.hbase.gid = config.ids.gids.hbase;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/influxdb.nix b/nixpkgs/nixos/modules/services/databases/influxdb.nix
new file mode 100644
index 000000000000..b3361d2014ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/influxdb.nix
@@ -0,0 +1,195 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.influxdb;
+
+  configOptions = recursiveUpdate {
+    meta = {
+      bind-address = ":8088";
+      commit-timeout = "50ms";
+      dir = "${cfg.dataDir}/meta";
+      election-timeout = "1s";
+      heartbeat-timeout = "1s";
+      hostname = "localhost";
+      leader-lease-timeout = "500ms";
+      retention-autocreate = true;
+    };
+
+    data = {
+      dir = "${cfg.dataDir}/data";
+      wal-dir = "${cfg.dataDir}/wal";
+      max-wal-size = 104857600;
+      wal-enable-logging = true;
+      wal-flush-interval = "10m";
+      wal-partition-flush-delay = "2s";
+    };
+
+    cluster = {
+      shard-writer-timeout = "5s";
+      write-timeout = "5s";
+    };
+
+    retention = {
+      enabled = true;
+      check-interval = "30m";
+    };
+
+    http = {
+      enabled = true;
+      auth-enabled = false;
+      bind-address = ":8086";
+      https-enabled = false;
+      log-enabled = true;
+      pprof-enabled = false;
+      write-tracing = false;
+    };
+
+    monitor = {
+      store-enabled = false;
+      store-database = "_internal";
+      store-interval = "10s";
+    };
+
+    admin = {
+      enabled = true;
+      bind-address = ":8083";
+      https-enabled = false;
+    };
+
+    graphite = [{
+      enabled = false;
+    }];
+
+    udp = [{
+      enabled = false;
+    }];
+
+    collectd = [{
+      enabled = false;
+      typesdb = "${pkgs.collectd-data}/share/collectd/types.db";
+      database = "collectd_db";
+      bind-address = ":25826";
+    }];
+
+    opentsdb = [{
+      enabled = false;
+    }];
+
+    continuous_queries = {
+      enabled = true;
+      log-enabled = true;
+      recompute-previous-n = 2;
+      recompute-no-older-than = "10m";
+      compute-runs-per-interval = 10;
+      compute-no-more-than = "2m";
+    };
+
+    hinted-handoff = {
+      enabled = true;
+      dir = "${cfg.dataDir}/hh";
+      max-size = 1073741824;
+      max-age = "168h";
+      retry-rate-limit = 0;
+      retry-interval = "1s";
+    };
+  } cfg.extraConfig;
+
+  configFile = pkgs.runCommandLocal "config.toml" { } ''
+    ${pkgs.buildPackages.remarshal}/bin/remarshal -if json -of toml \
+      < ${pkgs.writeText "config.json" (builtins.toJSON configOptions)} \
+      > $out
+  '';
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.influxdb = {
+
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc "Whether to enable the influxdb server";
+        type = types.bool;
+      };
+
+      package = mkOption {
+        default = pkgs.influxdb;
+        defaultText = literalExpression "pkgs.influxdb";
+        description = lib.mdDoc "Which influxdb derivation to use";
+        type = types.package;
+      };
+
+      user = mkOption {
+        default = "influxdb";
+        description = lib.mdDoc "User account under which influxdb runs";
+        type = types.str;
+      };
+
+      group = mkOption {
+        default = "influxdb";
+        description = lib.mdDoc "Group under which influxdb runs";
+        type = types.str;
+      };
+
+      dataDir = mkOption {
+        default = "/var/db/influxdb";
+        description = lib.mdDoc "Data directory for influxd data files.";
+        type = types.path;
+      };
+
+      extraConfig = mkOption {
+        default = {};
+        description = lib.mdDoc "Extra configuration options for influxdb";
+        type = types.attrs;
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.influxdb.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.influxdb = {
+      description = "InfluxDB Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''${cfg.package}/bin/influxd -config "${configFile}"'';
+        User = cfg.user;
+        Group = cfg.group;
+      };
+      postStart =
+        let
+          scheme = if configOptions.http.https-enabled then "-k https" else "http";
+          bindAddr = (ba: if hasPrefix ":" ba then "127.0.0.1${ba}" else "${ba}")(toString configOptions.http.bind-address);
+        in
+        mkBefore ''
+          until ${pkgs.curl.bin}/bin/curl -s -o /dev/null ${scheme}://${bindAddr}/ping; do
+            sleep 1;
+          done
+        '';
+    };
+
+    users.users = optionalAttrs (cfg.user == "influxdb") {
+      influxdb = {
+        uid = config.ids.uids.influxdb;
+        group = "influxdb";
+        description = "Influxdb daemon user";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "influxdb") {
+      influxdb.gid = config.ids.gids.influxdb;
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/influxdb2.nix b/nixpkgs/nixos/modules/services/databases/influxdb2.nix
new file mode 100644
index 000000000000..3740cd01b5dc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/influxdb2.nix
@@ -0,0 +1,497 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit
+    (lib)
+    any
+    attrNames
+    attrValues
+    count
+    escapeShellArg
+    filterAttrs
+    flatten
+    flip
+    getExe
+    hasAttr
+    hasInfix
+    listToAttrs
+    literalExpression
+    mapAttrsToList
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    nameValuePair
+    optional
+    subtractLists
+    types
+    unique
+    ;
+
+  format = pkgs.formats.json { };
+  cfg = config.services.influxdb2;
+  configFile = format.generate "config.json" cfg.settings;
+
+  validPermissions = [
+    "authorizations"
+    "buckets"
+    "dashboards"
+    "orgs"
+    "tasks"
+    "telegrafs"
+    "users"
+    "variables"
+    "secrets"
+    "labels"
+    "views"
+    "documents"
+    "notificationRules"
+    "notificationEndpoints"
+    "checks"
+    "dbrp"
+    "annotations"
+    "sources"
+    "scrapers"
+    "notebooks"
+    "remotes"
+    "replications"
+  ];
+
+  # Determines whether at least one active api token is defined
+  anyAuthDefined =
+    flip any (attrValues cfg.provision.organizations)
+    (o: o.present && flip any (attrValues o.auths)
+    (a: a.present && a.tokenFile != null));
+
+  provisionState = pkgs.writeText "provision_state.json" (builtins.toJSON {
+    inherit (cfg.provision) organizations users;
+  });
+
+  provisioningScript = pkgs.writeShellScript "post-start-provision" ''
+    set -euo pipefail
+    export INFLUX_HOST="http://"${escapeShellArg (
+      if ! hasAttr "http-bind-address" cfg.settings
+        || hasInfix "0.0.0.0" cfg.settings.http-bind-address
+      then "localhost:8086"
+      else cfg.settings.http-bind-address
+    )}
+
+    # Wait for the influxdb server to come online
+    count=0
+    while ! influx ping &>/dev/null; do
+      if [ "$count" -eq 300 ]; then
+        echo "Tried for 30 seconds, giving up..."
+        exit 1
+      fi
+
+      if ! kill -0 "$MAINPID"; then
+        echo "Main server died, giving up..."
+        exit 1
+      fi
+
+      sleep 0.1
+      count=$((count++))
+    done
+
+    # Do the initial database setup. Pass /dev/null as configs-path to
+    # avoid saving the token as the active config.
+    if test -e "$STATE_DIRECTORY/.first_startup"; then
+      influx setup \
+        --configs-path /dev/null \
+        --org ${escapeShellArg cfg.provision.initialSetup.organization} \
+        --bucket ${escapeShellArg cfg.provision.initialSetup.bucket} \
+        --username ${escapeShellArg cfg.provision.initialSetup.username} \
+        --password "$(< "$CREDENTIALS_DIRECTORY/admin-password")" \
+        --token "$(< "$CREDENTIALS_DIRECTORY/admin-token")" \
+        --retention ${toString cfg.provision.initialSetup.retention}s \
+        --force >/dev/null
+
+      rm -f "$STATE_DIRECTORY/.first_startup"
+    fi
+
+    provision_result=$(${getExe pkgs.influxdb2-provision} ${provisionState} "$INFLUX_HOST" "$(< "$CREDENTIALS_DIRECTORY/admin-token")")
+    if [[ "$(jq '[.auths[] | select(.action == "created")] | length' <<< "$provision_result")" -gt 0 ]]; then
+      echo "Created at least one new token, queueing service restart so we can manipulate secrets"
+      touch "$STATE_DIRECTORY/.needs_restart"
+    fi
+  '';
+
+  restarterScript = pkgs.writeShellScript "post-start-restarter" ''
+    set -euo pipefail
+    if test -e "$STATE_DIRECTORY/.needs_restart"; then
+      rm -f "$STATE_DIRECTORY/.needs_restart"
+      /run/current-system/systemd/bin/systemctl restart influxdb2
+    fi
+  '';
+
+  organizationSubmodule = types.submodule (organizationSubmod: let
+    org = organizationSubmod.config._module.args.name;
+  in {
+    options = {
+      present = mkOption {
+        description = mdDoc "Whether to ensure that this organization is present or absent.";
+        type = types.bool;
+        default = true;
+      };
+
+      description = mkOption {
+        description = mdDoc "Optional description for the organization.";
+        default = null;
+        type = types.nullOr types.str;
+      };
+
+      buckets = mkOption {
+        description = mdDoc "Buckets to provision in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (bucketSubmod: let
+          bucket = bucketSubmod.config._module.args.name;
+        in {
+          options = {
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this bucket is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = mdDoc "Optional description for the bucket.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            retention = mkOption {
+              type = types.ints.unsigned;
+              default = 0;
+              description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
+            };
+          };
+        }));
+      };
+
+      auths = mkOption {
+        description = mdDoc "API tokens to provision for the user in this organization.";
+        default = {};
+        type = types.attrsOf (types.submodule (authSubmod: let
+          auth = authSubmod.config._module.args.name;
+        in {
+          options = {
+            id = mkOption {
+              description = mdDoc "A unique identifier for this authentication token. Since influx doesn't store names for tokens, this will be hashed and appended to the description to identify the token.";
+              readOnly = true;
+              default = builtins.substring 0 32 (builtins.hashString "sha256" "${org}:${auth}");
+              defaultText = "<a hash derived from org and name>";
+              type = types.str;
+            };
+
+            present = mkOption {
+              description = mdDoc "Whether to ensure that this user is present or absent.";
+              type = types.bool;
+              default = true;
+            };
+
+            description = mkOption {
+              description = ''
+                Optional description for the API token.
+                Note that the actual token will always be created with a descriptionregardless
+                of whether this is given or not. The name is always added plus a unique suffix
+                to later identify the token to track whether it has already been created.
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            tokenFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc "The token value. If not given, influx will automatically generate one.";
+            };
+
+            operator = mkOption {
+              description = mdDoc "Grants all permissions in all organizations.";
+              default = false;
+              type = types.bool;
+            };
+
+            allAccess = mkOption {
+              description = mdDoc "Grants all permissions in the associated organization.";
+              default = false;
+              type = types.bool;
+            };
+
+            readPermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants read access to all associated buckets. Use `readBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            writePermissions = mkOption {
+              description = mdDoc ''
+                The read permissions to include for this token. Access is usually granted only
+                for resources in the associated organization.
+
+                Available permissions are `authorizations`, `buckets`, `dashboards`,
+                `orgs`, `tasks`, `telegrafs`, `users`, `variables`, `secrets`, `labels`, `views`,
+                `documents`, `notificationRules`, `notificationEndpoints`, `checks`, `dbrp`,
+                `annotations`, `sources`, `scrapers`, `notebooks`, `remotes`, `replications`.
+
+                Refer to `influx auth create --help` for a full list with descriptions.
+
+                `buckets` grants write access to all associated buckets. Use `writeBuckets` to define
+                more granular access permissions.
+              '';
+              default = [];
+              type = types.listOf (types.enum validPermissions);
+            };
+
+            readBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be read";
+              default = [];
+              type = types.listOf types.str;
+            };
+
+            writeBuckets = mkOption {
+              description = mdDoc "The organization's buckets which should be allowed to be written";
+              default = [];
+              type = types.listOf types.str;
+            };
+          };
+        }));
+      };
+    };
+  });
+in
+{
+  options = {
+    services.influxdb2 = {
+      enable = mkEnableOption (mdDoc "the influxdb2 server");
+
+      package = mkOption {
+        default = pkgs.influxdb2-server;
+        defaultText = literalExpression "pkgs.influxdb2";
+        description = mdDoc "influxdb2 derivation to use.";
+        type = types.package;
+      };
+
+      settings = mkOption {
+        default = { };
+        description = mdDoc ''configuration options for influxdb2, see <https://docs.influxdata.com/influxdb/v2.0/reference/config-options> for details.'';
+        type = format.type;
+      };
+
+      provision = {
+        enable = mkEnableOption "initial database setup and provisioning";
+
+        initialSetup = {
+          organization = mkOption {
+            type = types.str;
+            example = "main";
+            description = mdDoc "Primary organization name";
+          };
+
+          bucket = mkOption {
+            type = types.str;
+            example = "example";
+            description = mdDoc "Primary bucket name";
+          };
+
+          username = mkOption {
+            type = types.str;
+            default = "admin";
+            description = mdDoc "Primary username";
+          };
+
+          retention = mkOption {
+            type = types.ints.unsigned;
+            default = 0;
+            description = mdDoc "The duration in seconds for which the bucket will retain data (0 is infinite).";
+          };
+
+          passwordFile = mkOption {
+            type = types.path;
+            description = mdDoc "Password for primary user. Don't use a file from the nix store!";
+          };
+
+          tokenFile = mkOption {
+            type = types.path;
+            description = mdDoc "API Token to set for the admin user. Don't use a file from the nix store!";
+          };
+        };
+
+        organizations = mkOption {
+          description = mdDoc "Organizations to provision.";
+          example = literalExpression ''
+            {
+              myorg = {
+                description = "My organization";
+                buckets.mybucket = {
+                  description = "My bucket";
+                  retention = 31536000; # 1 year
+                };
+                auths.mytoken = {
+                  readBuckets = ["mybucket"];
+                  tokenFile = "/run/secrets/mytoken";
+                };
+              };
+            }
+          '';
+          default = {};
+          type = types.attrsOf organizationSubmodule;
+        };
+
+        users = mkOption {
+          description = mdDoc "Users to provision.";
+          default = {};
+          example = literalExpression ''
+            {
+              # admin = {}; /* The initialSetup.username will automatically be added. */
+              myuser.passwordFile = "/run/secrets/myuser_password";
+            }
+          '';
+          type = types.attrsOf (types.submodule (userSubmod: let
+            user = userSubmod.config._module.args.name;
+            org = userSubmod.config.org;
+          in {
+            options = {
+              present = mkOption {
+                description = mdDoc "Whether to ensure that this user is present or absent.";
+                type = types.bool;
+                default = true;
+              };
+
+              passwordFile = mkOption {
+                description = mdDoc "Password for the user. If unset, the user will not be able to log in until a password is set by an operator! Don't use a file from the nix store!";
+                default = null;
+                type = types.nullOr types.path;
+              };
+            };
+          }));
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [
+        {
+          assertion = !(hasAttr "bolt-path" cfg.settings) && !(hasAttr "engine-path" cfg.settings);
+          message = "services.influxdb2.config: bolt-path and engine-path should not be set as they are managed by systemd";
+        }
+      ]
+      ++ flatten (flip mapAttrsToList cfg.provision.organizations (orgName: org:
+        flip mapAttrsToList org.auths (authName: auth:
+          [
+            {
+              assertion = 1 == count (x: x) [
+                auth.operator
+                auth.allAccess
+                (auth.readPermissions != []
+                  || auth.writePermissions != []
+                  || auth.readBuckets != []
+                  || auth.writeBuckets != [])
+              ];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: The `operator` and `allAccess` options are mutually exclusive with each other and the granular permission settings.";
+            }
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.readBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in readBuckets: ${toString unknownBuckets}";
+            })
+            (let unknownBuckets = subtractLists (attrNames org.buckets) auth.writeBuckets; in {
+              assertion = unknownBuckets == [];
+              message = "influxdb2: provision.organizations.${orgName}.auths.${authName}: Refers to invalid buckets in writeBuckets: ${toString unknownBuckets}";
+            })
+          ]
+        )
+      ));
+
+    services.influxdb2.provision = mkIf cfg.provision.enable {
+      organizations.${cfg.provision.initialSetup.organization} = {
+        buckets.${cfg.provision.initialSetup.bucket} = {
+          inherit (cfg.provision.initialSetup) retention;
+        };
+      };
+      users.${cfg.provision.initialSetup.username} = {
+        inherit (cfg.provision.initialSetup) passwordFile;
+      };
+    };
+
+    systemd.services.influxdb2 = {
+      description = "InfluxDB is an open-source, distributed, time series database";
+      documentation = [ "https://docs.influxdata.com/influxdb/" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        INFLUXD_CONFIG_PATH = configFile;
+        ZONEINFO = "${pkgs.tzdata}/share/zoneinfo";
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/influxd --bolt-path \${STATE_DIRECTORY}/influxd.bolt --engine-path \${STATE_DIRECTORY}/engine";
+        StateDirectory = "influxdb2";
+        User = "influxdb2";
+        Group = "influxdb2";
+        CapabilityBoundingSet = "";
+        SystemCallFilter = "@system-service";
+        LimitNOFILE = 65536;
+        KillMode = "control-group";
+        Restart = "on-failure";
+        LoadCredential = mkIf cfg.provision.enable [
+          "admin-password:${cfg.provision.initialSetup.passwordFile}"
+          "admin-token:${cfg.provision.initialSetup.tokenFile}"
+        ];
+
+        ExecStartPost = mkIf cfg.provision.enable (
+          [provisioningScript] ++
+          # Only the restarter runs with elevated privileges
+          optional anyAuthDefined "+${restarterScript}"
+        );
+      };
+
+      path = [
+        pkgs.influxdb2-cli
+        pkgs.jq
+      ];
+
+      # Mark if this is the first startup so postStart can do the initial setup.
+      # Also extract any token secret mappings and apply them if this isn't the first start.
+      preStart = let
+        tokenPaths = listToAttrs (flatten
+          # For all organizations
+          (flip mapAttrsToList cfg.provision.organizations
+            # For each contained token that has a token file
+            (_: org: flip mapAttrsToList (filterAttrs (_: x: x.tokenFile != null) org.auths)
+              # Collect id -> tokenFile for the mapping
+              (_: auth: nameValuePair auth.id auth.tokenFile))));
+        tokenMappings = pkgs.writeText "token_mappings.json" (builtins.toJSON tokenPaths);
+      in mkIf cfg.provision.enable ''
+        if ! test -e "$STATE_DIRECTORY/influxd.bolt"; then
+          touch "$STATE_DIRECTORY/.first_startup"
+        else
+          # Manipulate provisioned api tokens if necessary
+          ${getExe pkgs.influxdb2-token-manipulator} "$STATE_DIRECTORY/influxd.bolt" ${tokenMappings}
+        fi
+      '';
+    };
+
+    users.extraUsers.influxdb2 = {
+      isSystemUser = true;
+      group = "influxdb2";
+    };
+
+    users.extraGroups.influxdb2 = {};
+  };
+
+  meta.maintainers = with lib.maintainers; [ nickcao oddlama ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/lldap.nix b/nixpkgs/nixos/modules/services/databases/lldap.nix
new file mode 100644
index 000000000000..960792d0805f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/lldap.nix
@@ -0,0 +1,121 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  cfg = config.services.lldap;
+  format = pkgs.formats.toml { };
+in
+{
+  options.services.lldap = with lib; {
+    enable = mkEnableOption (mdDoc "lldap");
+
+    package = mkPackageOptionMD pkgs "lldap" { };
+
+    environment = mkOption {
+      type = with types; attrsOf str;
+      default = { };
+      example = {
+        LLDAP_JWT_SECRET_FILE = "/run/lldap/jwt_secret";
+        LLDAP_LDAP_USER_PASS_FILE = "/run/lldap/user_password";
+      };
+      description = lib.mdDoc ''
+        Environment variables passed to the service.
+        Any config option name prefixed with `LLDAP_` takes priority over the one in the configuration file.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)` passed to the service.
+      '';
+    };
+
+    settings = mkOption {
+      description = mdDoc ''
+        Free-form settings written directly to the `lldap_config.toml` file.
+        Refer to <https://github.com/lldap/lldap/blob/main/lldap_config.docker_template.toml> for supported values.
+      '';
+
+      default = { };
+
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          ldap_host = mkOption {
+            type = types.str;
+            description = mdDoc "The host address that the LDAP server will be bound to.";
+            default = "::";
+          };
+
+          ldap_port = mkOption {
+            type = types.port;
+            description = mdDoc "The port on which to have the LDAP server.";
+            default = 3890;
+          };
+
+          http_host = mkOption {
+            type = types.str;
+            description = mdDoc "The host address that the HTTP server will be bound to.";
+            default = "::";
+          };
+
+          http_port = mkOption {
+            type = types.port;
+            description = mdDoc "The port on which to have the HTTP server, for user login and administration.";
+            default = 17170;
+          };
+
+          http_url = mkOption {
+            type = types.str;
+            description = mdDoc "The public URL of the server, for password reset links.";
+            default = "http://localhost";
+          };
+
+          ldap_base_dn = mkOption {
+            type = types.str;
+            description = mdDoc "Base DN for LDAP.";
+            example = "dc=example,dc=com";
+          };
+
+          ldap_user_dn = mkOption {
+            type = types.str;
+            description = mdDoc "Admin username";
+            default = "admin";
+          };
+
+          ldap_user_email = mkOption {
+            type = types.str;
+            description = mdDoc "Admin email.";
+            default = "admin@example.com";
+          };
+
+          database_url = mkOption {
+            type = types.str;
+            description = mdDoc "Database URL.";
+            default = "sqlite://./users.db?mode=rwc";
+            example = "postgres://postgres-user:password@postgres-server/my-database";
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.lldap = {
+      description = "Lightweight LDAP server (lldap)";
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package} run --config-file ${format.generate "lldap_config.toml" cfg.settings}";
+        StateDirectory = "lldap";
+        WorkingDirectory = "%S/lldap";
+        User = "lldap";
+        Group = "lldap";
+        DynamicUser = true;
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+      };
+      inherit (cfg) environment;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/memcached.nix b/nixpkgs/nixos/modules/services/databases/memcached.nix
new file mode 100644
index 000000000000..542c80ab2e67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/memcached.nix
@@ -0,0 +1,118 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.memcached;
+
+  memcached = pkgs.memcached;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.memcached = {
+      enable = mkEnableOption (lib.mdDoc "Memcached");
+
+      user = mkOption {
+        type = types.str;
+        default = "memcached";
+        description = lib.mdDoc "The user to run Memcached as";
+      };
+
+      listen = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "The IP address to bind to.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 11211;
+        description = lib.mdDoc "The port to bind to.";
+      };
+
+      enableUnixSocket = mkEnableOption (lib.mdDoc "unix socket at /run/memcached/memcached.sock");
+
+      maxMemory = mkOption {
+        type = types.ints.unsigned;
+        default = 64;
+        description = lib.mdDoc "The maximum amount of memory to use for storage, in megabytes.";
+      };
+
+      maxConnections = mkOption {
+        type = types.ints.unsigned;
+        default = 1024;
+        description = lib.mdDoc "The maximum number of simultaneous connections.";
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "A list of extra options that will be added as a suffix when running memcached.";
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.memcached.enable {
+
+    users.users = optionalAttrs (cfg.user == "memcached") {
+      memcached.description = "Memcached server user";
+      memcached.isSystemUser = true;
+      memcached.group = "memcached";
+    };
+    users.groups = optionalAttrs (cfg.user == "memcached") { memcached = {}; };
+
+    environment.systemPackages = [ memcached ];
+
+    systemd.services.memcached = {
+      description = "Memcached server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart =
+        let
+          networking = if cfg.enableUnixSocket
+          then "-s /run/memcached/memcached.sock"
+          else "-l ${cfg.listen} -p ${toString cfg.port}";
+        in "${memcached}/bin/memcached ${networking} -m ${toString cfg.maxMemory} -c ${toString cfg.maxConnections} ${concatStringsSep " " cfg.extraOptions}";
+
+        User = cfg.user;
+
+        # Filesystem access
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RuntimeDirectory = "memcached";
+        # Caps
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        # Misc.
+        LockPersonality = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+  imports = [
+    (mkRemovedOptionModule ["services" "memcached" "socket"] ''
+      This option was replaced by a fixed unix socket path at /run/memcached/memcached.sock enabled using services.memcached.enableUnixSocket.
+    '')
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/monetdb.nix b/nixpkgs/nixos/modules/services/databases/monetdb.nix
new file mode 100644
index 000000000000..5573b530a913
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/monetdb.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.monetdb;
+
+in {
+  meta.maintainers = with maintainers; [ StillerHarpo primeos ];
+
+  ###### interface
+  options = {
+    services.monetdb = {
+
+      enable = mkEnableOption (lib.mdDoc "the MonetDB database server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.monetdb;
+        defaultText = literalExpression "pkgs.monetdb";
+        description = lib.mdDoc "MonetDB package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "monetdb";
+        description = lib.mdDoc "User account under which MonetDB runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "monetdb";
+        description = lib.mdDoc "Group under which MonetDB runs.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/monetdb";
+        description = lib.mdDoc "Data directory for the dbfarm.";
+      };
+
+      port = mkOption {
+        type = types.ints.u16;
+        default = 50000;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        example = "0.0.0.0";
+        description = lib.mdDoc "Address to listen on.";
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+
+    users.users.monetdb = mkIf (cfg.user == "monetdb") {
+      uid = config.ids.uids.monetdb;
+      group = cfg.group;
+      description = "MonetDB user";
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    users.groups.monetdb = mkIf (cfg.group == "monetdb") {
+      gid = config.ids.gids.monetdb;
+      members = [ cfg.user ];
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.monetdb = {
+      description = "MonetDB database server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ cfg.package ];
+      unitConfig.RequiresMountsFor = "${cfg.dataDir}";
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/monetdbd start -n ${cfg.dataDir}";
+        ExecStop = "${cfg.package}/bin/monetdbd stop ${cfg.dataDir}";
+      };
+      preStart = ''
+        if [ ! -e ${cfg.dataDir}/.merovingian_properties ]; then
+          # Create the dbfarm (as cfg.user)
+          ${cfg.package}/bin/monetdbd create ${cfg.dataDir}
+        fi
+
+        # Update the properties
+        ${cfg.package}/bin/monetdbd set port=${toString cfg.port} ${cfg.dataDir}
+        ${cfg.package}/bin/monetdbd set listenaddr=${cfg.listenAddress} ${cfg.dataDir}
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/mongodb.nix b/nixpkgs/nixos/modules/services/databases/mongodb.nix
new file mode 100644
index 000000000000..8f3be1492e9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/mongodb.nix
@@ -0,0 +1,197 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.mongodb;
+
+  mongodb = cfg.package;
+
+  mongoCnf = cfg: pkgs.writeText "mongodb.conf"
+  ''
+    net.bindIp: ${cfg.bind_ip}
+    ${optionalString cfg.quiet "systemLog.quiet: true"}
+    systemLog.destination: syslog
+    storage.dbPath: ${cfg.dbpath}
+    ${optionalString cfg.enableAuth "security.authorization: enabled"}
+    ${optionalString (cfg.replSetName != "") "replication.replSetName: ${cfg.replSetName}"}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.mongodb = {
+
+      enable = mkEnableOption (lib.mdDoc "the MongoDB server");
+
+      package = mkOption {
+        default = pkgs.mongodb;
+        defaultText = literalExpression "pkgs.mongodb";
+        type = types.package;
+        description = lib.mdDoc ''
+          Which MongoDB derivation to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mongodb";
+        description = lib.mdDoc "User account under which MongoDB runs";
+      };
+
+      bind_ip = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "IP to bind to";
+      };
+
+      quiet = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "quieter output";
+      };
+
+      enableAuth = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable client authentication. Creates a default superuser with username root!";
+      };
+
+      initialRootPassword = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Password for the root user if auth is enabled.";
+      };
+
+      dbpath = mkOption {
+        type = types.str;
+        default = "/var/db/mongodb";
+        description = lib.mdDoc "Location where MongoDB stores its files";
+      };
+
+      pidFile = mkOption {
+        type = types.str;
+        default = "/run/mongodb.pid";
+        description = lib.mdDoc "Location of MongoDB pid file";
+      };
+
+      replSetName = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          If this instance is part of a replica set, set its name here.
+          Otherwise, leave empty to run as single node.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          storage.journal.enabled: false
+        '';
+        description = lib.mdDoc "MongoDB extra configuration in YAML format";
+      };
+
+      initialScript = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing MongoDB statements to execute on first startup.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.mongodb.enable {
+    assertions = [
+      { assertion = !cfg.enableAuth || cfg.initialRootPassword != null;
+        message = "`enableAuth` requires `initialRootPassword` to be set.";
+      }
+    ];
+
+    users.users.mongodb = mkIf (cfg.user == "mongodb")
+      { name = "mongodb";
+        isSystemUser = true;
+        group = "mongodb";
+        description = "MongoDB server user";
+      };
+    users.groups.mongodb = mkIf (cfg.user == "mongodb") {};
+
+    environment.systemPackages = [ mongodb ];
+
+    systemd.services.mongodb =
+      { description = "MongoDB server";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        serviceConfig = {
+          ExecStart = "${mongodb}/bin/mongod --config ${mongoCnf cfg} --fork --pidfilepath ${cfg.pidFile}";
+          User = cfg.user;
+          PIDFile = cfg.pidFile;
+          Type = "forking";
+          TimeoutStartSec=120; # initial creating of journal can take some time
+          PermissionsStartOnly = true;
+        };
+
+        preStart = let
+          cfg_ = cfg // { enableAuth = false; bind_ip = "127.0.0.1"; };
+        in ''
+          rm ${cfg.dbpath}/mongod.lock || true
+          if ! test -e ${cfg.dbpath}; then
+              install -d -m0700 -o ${cfg.user} ${cfg.dbpath}
+              # See postStart!
+              touch ${cfg.dbpath}/.first_startup
+          fi
+          if ! test -e ${cfg.pidFile}; then
+              install -D -o ${cfg.user} /dev/null ${cfg.pidFile}
+          fi '' + lib.optionalString cfg.enableAuth ''
+
+          if ! test -e "${cfg.dbpath}/.auth_setup_complete"; then
+            systemd-run --unit=mongodb-for-setup --uid=${cfg.user} ${mongodb}/bin/mongod --config ${mongoCnf cfg_}
+            # wait for mongodb
+            while ! ${mongodb}/bin/mongo --eval "db.version()" > /dev/null 2>&1; do sleep 0.1; done
+
+          ${mongodb}/bin/mongo <<EOF
+            use admin
+            db.createUser(
+              {
+                user: "root",
+                pwd: "${cfg.initialRootPassword}",
+                roles: [
+                  { role: "userAdminAnyDatabase", db: "admin" },
+                  { role: "dbAdminAnyDatabase", db: "admin" },
+                  { role: "readWriteAnyDatabase", db: "admin" }
+                ]
+              }
+            )
+          EOF
+            touch "${cfg.dbpath}/.auth_setup_complete"
+            systemctl stop mongodb-for-setup
+          fi
+        '';
+        postStart = ''
+            if test -e "${cfg.dbpath}/.first_startup"; then
+              ${optionalString (cfg.initialScript != null) ''
+                ${mongodb}/bin/mongo ${optionalString (cfg.enableAuth) "-u root -p ${cfg.initialRootPassword}"} admin "${cfg.initialScript}"
+              ''}
+              rm -f "${cfg.dbpath}/.first_startup"
+            fi
+        '';
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/mysql.nix b/nixpkgs/nixos/modules/services/databases/mysql.nix
new file mode 100644
index 000000000000..128bb0862175
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/mysql.nix
@@ -0,0 +1,521 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.mysql;
+
+  isMariaDB = lib.getName cfg.package == lib.getName pkgs.mariadb;
+
+  mysqldOptions =
+    "--user=${cfg.user} --datadir=${cfg.dataDir} --basedir=${cfg.package}";
+
+  format = pkgs.formats.ini { listsAsDuplicateKeys = true; };
+  configFile = format.generate "my.cnf" cfg.settings;
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "mysql" "pidDir" ] "Don't wait for pidfiles, describe dependencies through systemd.")
+    (mkRemovedOptionModule [ "services" "mysql" "rootPassword" ] "Use socket authentication or set the password outside of the nix store.")
+    (mkRemovedOptionModule [ "services" "mysql" "extraOptions" ] "Use services.mysql.settings.mysqld instead.")
+    (mkRemovedOptionModule [ "services" "mysql" "bind" ] "Use services.mysql.settings.mysqld.bind-address instead.")
+    (mkRemovedOptionModule [ "services" "mysql" "port" ] "Use services.mysql.settings.mysqld.port instead.")
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.mysql = {
+
+      enable = mkEnableOption (lib.mdDoc "MySQL server");
+
+      package = mkOption {
+        type = types.package;
+        example = literalExpression "pkgs.mariadb";
+        description = lib.mdDoc ''
+          Which MySQL derivation to use. MariaDB packages are supported too.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mysql";
+        description = lib.mdDoc ''
+          User account under which MySQL runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the MySQL service starts.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "mysql";
+        description = lib.mdDoc ''
+          Group account under which MySQL runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the MySQL service starts.
+          :::
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        example = "/var/lib/mysql";
+        description = lib.mdDoc ''
+          The data directory for MySQL.
+
+          ::: {.note}
+          If left as the default value of `/var/lib/mysql` this directory will automatically be created before the MySQL
+          server starts, otherwise you are responsible for ensuring the directory exists with appropriate ownership and permissions.
+          :::
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = configFile;
+        defaultText = ''
+          A configuration file automatically generated by NixOS.
+        '';
+        description = lib.mdDoc ''
+          Override the configuration file used by MySQL. By default,
+          NixOS generates one automatically from {option}`services.mysql.settings`.
+        '';
+        example = literalExpression ''
+          pkgs.writeText "my.cnf" '''
+            [mysqld]
+            datadir = /var/lib/mysql
+            bind-address = 127.0.0.1
+            port = 3336
+
+            !includedir /etc/mysql/conf.d/
+          ''';
+        '';
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = {};
+        description = lib.mdDoc ''
+          MySQL configuration. Refer to
+          <https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html>,
+          <https://dev.mysql.com/doc/refman/8.0/en/server-system-variables.html>,
+          and <https://mariadb.com/kb/en/server-system-variables/>
+          for details on supported values.
+
+          ::: {.note}
+          MySQL configuration options such as `--quick` should be treated as
+          boolean options and provided values such as `true`, `false`,
+          `1`, or `0`. See the provided example below.
+          :::
+        '';
+        example = literalExpression ''
+          {
+            mysqld = {
+              key_buffer_size = "6G";
+              table_cache = 1600;
+              log-error = "/var/log/mysql_err.log";
+              plugin-load-add = [ "server_audit" "ed25519=auth_ed25519" ];
+            };
+            mysqldump = {
+              quick = true;
+              max_allowed_packet = "16M";
+            };
+          }
+        '';
+      };
+
+      initialDatabases = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                The name of the database to create.
+              '';
+            };
+            schema = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                The initial schema of the database; if null (the default),
+                an empty database is created.
+              '';
+            };
+          };
+        });
+        default = [];
+        description = lib.mdDoc ''
+          List of database names and their initial schemas that should be used to create databases on the first startup
+          of MySQL. The schema attribute is optional: If not specified, an empty database is created.
+        '';
+        example = literalExpression ''
+          [
+            { name = "foodatabase"; schema = ./foodatabase.sql; }
+            { name = "bardatabase"; }
+          ]
+        '';
+      };
+
+      initialScript = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "A file containing SQL statements to be executed on the first startup. Can be used for granting certain permissions on the database.";
+      };
+
+      ensureDatabases = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Ensures that the specified databases exist.
+          This option will never delete existing databases, especially not when the value of this
+          option is changed. This means that databases created once through this option or
+          otherwise have to be removed manually.
+        '';
+        example = [
+          "nextcloud"
+          "matomo"
+        ];
+      };
+
+      ensureUsers = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Name of the user to ensure.
+              '';
+            };
+            ensurePermissions = mkOption {
+              type = types.attrsOf types.str;
+              default = {};
+              description = lib.mdDoc ''
+                Permissions to ensure for the user, specified as attribute set.
+                The attribute names specify the database and tables to grant the permissions for,
+                separated by a dot. You may use wildcards here.
+                The attribute values specfiy the permissions to grant.
+                You may specify one or multiple comma-separated SQL privileges here.
+
+                For more information on how to specify the target
+                and on which privileges exist, see the
+                [GRANT syntax](https://mariadb.com/kb/en/library/grant/).
+                The attributes are used as `GRANT ''${attrName} ON ''${attrValue}`.
+              '';
+              example = literalExpression ''
+                {
+                  "database.*" = "ALL PRIVILEGES";
+                  "*.*" = "SELECT, LOCK TABLES";
+                }
+              '';
+            };
+          };
+        });
+        default = [];
+        description = lib.mdDoc ''
+          Ensures that the specified users exist and have at least the ensured permissions.
+          The MySQL users will be identified using Unix socket authentication. This authenticates the Unix user with the
+          same name only, and that without the need for a password.
+          This option will never delete existing users or remove permissions, especially not when the value of this
+          option is changed. This means that users created and permissions assigned once through this option or
+          otherwise have to be removed manually.
+        '';
+        example = literalExpression ''
+          [
+            {
+              name = "nextcloud";
+              ensurePermissions = {
+                "nextcloud.*" = "ALL PRIVILEGES";
+              };
+            }
+            {
+              name = "backup";
+              ensurePermissions = {
+                "*.*" = "SELECT, LOCK TABLES";
+              };
+            }
+          ]
+        '';
+      };
+
+      replication = {
+        role = mkOption {
+          type = types.enum [ "master" "slave" "none" ];
+          default = "none";
+          description = lib.mdDoc "Role of the MySQL server instance.";
+        };
+
+        serverId = mkOption {
+          type = types.int;
+          default = 1;
+          description = lib.mdDoc "Id of the MySQL server instance. This number must be unique for each instance.";
+        };
+
+        masterHost = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Hostname of the MySQL master server.";
+        };
+
+        slaveHost = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Hostname of the MySQL slave server.";
+        };
+
+        masterUser = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Username of the MySQL replication user.";
+        };
+
+        masterPassword = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Password of the MySQL replication user.";
+        };
+
+        masterPort = mkOption {
+          type = types.port;
+          default = 3306;
+          description = lib.mdDoc "Port number on which the MySQL master server runs.";
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.mysql.dataDir =
+      mkDefault (if versionAtLeast config.system.stateVersion "17.09" then "/var/lib/mysql"
+                 else "/var/mysql");
+
+    services.mysql.settings.mysqld = mkMerge [
+      {
+        datadir = cfg.dataDir;
+        port = mkDefault 3306;
+      }
+      (mkIf (cfg.replication.role == "master" || cfg.replication.role == "slave") {
+        log-bin = "mysql-bin-${toString cfg.replication.serverId}";
+        log-bin-index = "mysql-bin-${toString cfg.replication.serverId}.index";
+        relay-log = "mysql-relay-bin";
+        server-id = cfg.replication.serverId;
+        binlog-ignore-db = [ "information_schema" "performance_schema" "mysql" ];
+      })
+      (mkIf (!isMariaDB) {
+        plugin-load-add = "auth_socket.so";
+      })
+    ];
+
+    users.users = optionalAttrs (cfg.user == "mysql") {
+      mysql = {
+        description = "MySQL server user";
+        group = cfg.group;
+        uid = config.ids.uids.mysql;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "mysql") {
+      mysql.gid = config.ids.gids.mysql;
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."my.cnf".source = cfg.configFile;
+
+    systemd.services.mysql = {
+      description = "MySQL Server";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ cfg.configFile ];
+
+      unitConfig.RequiresMountsFor = cfg.dataDir;
+
+      path = [
+        # Needed for the mysql_install_db command in the preStart script
+        # which calls the hostname command.
+        pkgs.nettools
+      ];
+
+      preStart = if isMariaDB then ''
+        if ! test -e ${cfg.dataDir}/mysql; then
+          ${cfg.package}/bin/mysql_install_db --defaults-file=/etc/my.cnf ${mysqldOptions}
+          touch ${cfg.dataDir}/mysql_init
+        fi
+      '' else ''
+        if ! test -e ${cfg.dataDir}/mysql; then
+          ${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} --initialize-insecure
+          touch ${cfg.dataDir}/mysql_init
+        fi
+      '';
+
+      script = ''
+        # https://mariadb.com/kb/en/getting-started-with-mariadb-galera-cluster/#systemd-and-galera-recovery
+        if test -n "''${_WSREP_START_POSITION}"; then
+          if test -e "${cfg.package}/bin/galera_recovery"; then
+            VAR=$(cd ${cfg.package}/bin/..; ${cfg.package}/bin/galera_recovery); [[ $? -eq 0 ]] && export _WSREP_START_POSITION=$VAR || exit 1
+          fi
+        fi
+
+        # The last two environment variables are used for starting Galera clusters
+        exec ${cfg.package}/bin/mysqld --defaults-file=/etc/my.cnf ${mysqldOptions} $_WSREP_NEW_CLUSTER $_WSREP_START_POSITION
+      '';
+
+      postStart = let
+        # The super user account to use on *first* run of MySQL server
+        superUser = if isMariaDB then cfg.user else "root";
+      in ''
+        ${optionalString (!isMariaDB) ''
+          # Wait until the MySQL server is available for use
+          count=0
+          while [ ! -e /run/mysqld/mysqld.sock ]
+          do
+              if [ $count -eq 30 ]
+              then
+                  echo "Tried 30 times, giving up..."
+                  exit 1
+              fi
+
+              echo "MySQL daemon not yet started. Waiting for 1 second..."
+              count=$((count++))
+              sleep 1
+          done
+        ''}
+
+        if [ -f ${cfg.dataDir}/mysql_init ]
+        then
+            # While MariaDB comes with a 'mysql' super user account since 10.4.x, MySQL does not
+            # Since we don't want to run this service as 'root' we need to ensure the account exists on first run
+            ( echo "CREATE USER IF NOT EXISTS '${cfg.user}'@'localhost' IDENTIFIED WITH ${if isMariaDB then "unix_socket" else "auth_socket"};"
+              echo "GRANT ALL PRIVILEGES ON *.* TO '${cfg.user}'@'localhost' WITH GRANT OPTION;"
+            ) | ${cfg.package}/bin/mysql -u ${superUser} -N
+
+            ${concatMapStrings (database: ''
+              # Create initial databases
+              if ! test -e "${cfg.dataDir}/${database.name}"; then
+                  echo "Creating initial database: ${database.name}"
+                  ( echo 'create database `${database.name}`;'
+
+                    ${optionalString (database.schema != null) ''
+                    echo 'use `${database.name}`;'
+
+                    # TODO: this silently falls through if database.schema does not exist,
+                    # we should catch this somehow and exit, but can't do it here because we're in a subshell.
+                    if [ -f "${database.schema}" ]
+                    then
+                        cat ${database.schema}
+                    elif [ -d "${database.schema}" ]
+                    then
+                        cat ${database.schema}/mysql-databases/*.sql
+                    fi
+                    ''}
+                  ) | ${cfg.package}/bin/mysql -u ${superUser} -N
+              fi
+            '') cfg.initialDatabases}
+
+            ${optionalString (cfg.replication.role == "master")
+              ''
+                # Set up the replication master
+
+                ( echo "use mysql;"
+                  echo "CREATE USER '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}' IDENTIFIED WITH mysql_native_password;"
+                  echo "SET PASSWORD FOR '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}' = PASSWORD('${cfg.replication.masterPassword}');"
+                  echo "GRANT REPLICATION SLAVE ON *.* TO '${cfg.replication.masterUser}'@'${cfg.replication.slaveHost}';"
+                ) | ${cfg.package}/bin/mysql -u ${superUser} -N
+              ''}
+
+            ${optionalString (cfg.replication.role == "slave")
+              ''
+                # Set up the replication slave
+
+                ( echo "stop slave;"
+                  echo "change master to master_host='${cfg.replication.masterHost}', master_user='${cfg.replication.masterUser}', master_password='${cfg.replication.masterPassword}';"
+                  echo "start slave;"
+                ) | ${cfg.package}/bin/mysql -u ${superUser} -N
+              ''}
+
+            ${optionalString (cfg.initialScript != null)
+              ''
+                # Execute initial script
+                # using toString to avoid copying the file to nix store if given as path instead of string,
+                # as it might contain credentials
+                cat ${toString cfg.initialScript} | ${cfg.package}/bin/mysql -u ${superUser} -N
+              ''}
+
+            rm ${cfg.dataDir}/mysql_init
+        fi
+
+        ${optionalString (cfg.ensureDatabases != []) ''
+          (
+          ${concatMapStrings (database: ''
+            echo "CREATE DATABASE IF NOT EXISTS \`${database}\`;"
+          '') cfg.ensureDatabases}
+          ) | ${cfg.package}/bin/mysql -N
+        ''}
+
+        ${concatMapStrings (user:
+          ''
+            ( echo "CREATE USER IF NOT EXISTS '${user.name}'@'localhost' IDENTIFIED WITH ${if isMariaDB then "unix_socket" else "auth_socket"};"
+              ${concatStringsSep "\n" (mapAttrsToList (database: permission: ''
+                echo "GRANT ${permission} ON ${database} TO '${user.name}'@'localhost';"
+              '') user.ensurePermissions)}
+            ) | ${cfg.package}/bin/mysql -N
+          '') cfg.ensureUsers}
+      '';
+
+      serviceConfig = mkMerge [
+        {
+          Type = if isMariaDB then "notify" else "simple";
+          Restart = "on-abort";
+          RestartSec = "5s";
+
+          # User and group
+          User = cfg.user;
+          Group = cfg.group;
+          # Runtime directory and mode
+          RuntimeDirectory = "mysqld";
+          RuntimeDirectoryMode = "0755";
+          # Access write directories
+          ReadWritePaths = [ cfg.dataDir ];
+          # Capabilities
+          CapabilityBoundingSet = "";
+          # Security
+          NoNewPrivileges = true;
+          # Sandboxing
+          ProtectSystem = "strict";
+          ProtectHome = true;
+          PrivateTmp = true;
+          PrivateDevices = true;
+          ProtectHostname = true;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectControlGroups = true;
+          RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          PrivateMounts = true;
+          # System Call Filtering
+          SystemCallArchitectures = "native";
+        }
+        (mkIf (cfg.dataDir == "/var/lib/mysql") {
+          StateDirectory = "mysql";
+          StateDirectoryMode = "0700";
+        })
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/neo4j.nix b/nixpkgs/nixos/modules/services/databases/neo4j.nix
new file mode 100644
index 000000000000..090502424028
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/neo4j.nix
@@ -0,0 +1,641 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.neo4j;
+  opt = options.services.neo4j;
+  certDirOpt = options.services.neo4j.directories.certificates;
+  isDefaultPathOption = opt: isOption opt && opt.type == types.path && opt.highestPrio >= 1500;
+
+  sslPolicies = mapAttrsToList (
+    name: conf: ''
+      dbms.ssl.policy.${name}.allow_key_generation=${boolToString conf.allowKeyGeneration}
+      dbms.ssl.policy.${name}.base_directory=${conf.baseDirectory}
+      ${optionalString (conf.ciphers != null) ''
+        dbms.ssl.policy.${name}.ciphers=${concatStringsSep "," conf.ciphers}
+      ''}
+      dbms.ssl.policy.${name}.client_auth=${conf.clientAuth}
+      ${if length (splitString "/" conf.privateKey) > 1 then
+        "dbms.ssl.policy.${name}.private_key=${conf.privateKey}"
+      else
+        "dbms.ssl.policy.${name}.private_key=${conf.baseDirectory}/${conf.privateKey}"
+      }
+      ${if length (splitString "/" conf.privateKey) > 1 then
+        "dbms.ssl.policy.${name}.public_certificate=${conf.publicCertificate}"
+      else
+        "dbms.ssl.policy.${name}.public_certificate=${conf.baseDirectory}/${conf.publicCertificate}"
+      }
+      dbms.ssl.policy.${name}.revoked_dir=${conf.revokedDir}
+      dbms.ssl.policy.${name}.tls_versions=${concatStringsSep "," conf.tlsVersions}
+      dbms.ssl.policy.${name}.trust_all=${boolToString conf.trustAll}
+      dbms.ssl.policy.${name}.trusted_dir=${conf.trustedDir}
+    ''
+  ) cfg.ssl.policies;
+
+  serverConfig = pkgs.writeText "neo4j.conf" ''
+    # General
+    dbms.allow_upgrade=${boolToString cfg.allowUpgrade}
+    dbms.default_listen_address=${cfg.defaultListenAddress}
+    dbms.databases.default_to_read_only=${boolToString cfg.readOnly}
+    ${optionalString (cfg.workerCount > 0) ''
+      dbms.threads.worker_count=${toString cfg.workerCount}
+    ''}
+
+    # Directories (readonly)
+    dbms.directories.certificates=${cfg.directories.certificates}
+    dbms.directories.plugins=${cfg.directories.plugins}
+    dbms.directories.lib=${cfg.package}/share/neo4j/lib
+    ${optionalString (cfg.constrainLoadCsv) ''
+      dbms.directories.import=${cfg.directories.imports}
+   ''}
+
+    # Directories (read and write)
+    dbms.directories.data=${cfg.directories.data}
+    dbms.directories.logs=${cfg.directories.home}/logs
+    dbms.directories.run=${cfg.directories.home}/run
+
+    # HTTP Connector
+    ${optionalString (cfg.http.enable) ''
+      dbms.connector.http.enabled=${boolToString cfg.http.enable}
+      dbms.connector.http.listen_address=${cfg.http.listenAddress}
+      dbms.connector.http.advertised_address=${cfg.http.listenAddress}
+    ''}
+
+    # HTTPS Connector
+    dbms.connector.https.enabled=${boolToString cfg.https.enable}
+    dbms.connector.https.listen_address=${cfg.https.listenAddress}
+    dbms.connector.https.advertised_address=${cfg.https.listenAddress}
+
+    # BOLT Connector
+    dbms.connector.bolt.enabled=${boolToString cfg.bolt.enable}
+    dbms.connector.bolt.listen_address=${cfg.bolt.listenAddress}
+    dbms.connector.bolt.advertised_address=${cfg.bolt.listenAddress}
+    dbms.connector.bolt.tls_level=${cfg.bolt.tlsLevel}
+
+    # SSL Policies
+    ${concatStringsSep "\n" sslPolicies}
+
+    # Default retention policy from neo4j.conf
+    dbms.tx_log.rotation.retention_policy=1 days
+
+    # Default JVM parameters from neo4j.conf
+    dbms.jvm.additional=-XX:+UseG1GC
+    dbms.jvm.additional=-XX:-OmitStackTraceInFastThrow
+    dbms.jvm.additional=-XX:+AlwaysPreTouch
+    dbms.jvm.additional=-XX:+UnlockExperimentalVMOptions
+    dbms.jvm.additional=-XX:+TrustFinalNonStaticFields
+    dbms.jvm.additional=-XX:+DisableExplicitGC
+    dbms.jvm.additional=-Djdk.tls.ephemeralDHKeySize=2048
+    dbms.jvm.additional=-Djdk.tls.rejectClientInitiatedRenegotiation=true
+    dbms.jvm.additional=-Dunsupported.dbms.udc.source=tarball
+
+    #dbms.memory.heap.initial_size=12000m
+    #dbms.memory.heap.max_size=12000m
+    #dbms.memory.pagecache.size=4g
+    #dbms.tx_state.max_off_heap_memory=8000m
+
+    # Extra Configuration
+    ${cfg.extraServerConfig}
+  '';
+
+in {
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "neo4j" "host" ] [ "services" "neo4j" "defaultListenAddress" ])
+    (mkRenamedOptionModule [ "services" "neo4j" "listenAddress" ] [ "services" "neo4j" "defaultListenAddress" ])
+    (mkRenamedOptionModule [ "services" "neo4j" "enableBolt" ] [ "services" "neo4j" "bolt" "enable" ])
+    (mkRenamedOptionModule [ "services" "neo4j" "enableHttps" ] [ "services" "neo4j" "https" "enable" ])
+    (mkRenamedOptionModule [ "services" "neo4j" "certDir" ] [ "services" "neo4j" "directories" "certificates" ])
+    (mkRenamedOptionModule [ "services" "neo4j" "dataDir" ] [ "services" "neo4j" "directories" "home" ])
+    (mkRemovedOptionModule [ "services" "neo4j" "port" ] "Use services.neo4j.http.listenAddress instead.")
+    (mkRemovedOptionModule [ "services" "neo4j" "boltPort" ] "Use services.neo4j.bolt.listenAddress instead.")
+    (mkRemovedOptionModule [ "services" "neo4j" "httpsPort" ] "Use services.neo4j.https.listenAddress instead.")
+    (mkRemovedOptionModule [ "services" "neo4j" "shell" "enabled" ] "shell.enabled was removed upstream")
+    (mkRemovedOptionModule [ "services" "neo4j" "udc" "enabled" ] "udc.enabled was removed upstream")
+  ];
+
+  ###### interface
+
+  options.services.neo4j = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable Neo4j Community Edition.
+      '';
+    };
+
+    allowUpgrade = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Allow upgrade of Neo4j database files from an older version.
+      '';
+    };
+
+    constrainLoadCsv = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Sets the root directory for file URLs used with the Cypher
+        `LOAD CSV` clause to be that defined by
+        {option}`directories.imports`. It restricts
+        access to only those files within that directory and its
+        subdirectories.
+
+        Setting this option to `false` introduces
+        possible security problems.
+      '';
+    };
+
+    defaultListenAddress = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        Default network interface to listen for incoming connections. To
+        listen for connections on all interfaces, use "0.0.0.0".
+
+        Specifies the default IP address and address part of connector
+        specific {option}`listenAddress` options. To bind specific
+        connectors to a specific network interfaces, specify the entire
+        {option}`listenAddress` option for that connector.
+      '';
+    };
+
+    extraServerConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra configuration for Neo4j Community server. Refer to the
+        [complete reference](https://neo4j.com/docs/operations-manual/current/reference/configuration-settings/)
+        of Neo4j configuration settings.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.neo4j;
+      defaultText = literalExpression "pkgs.neo4j";
+      description = lib.mdDoc ''
+        Neo4j package to use.
+      '';
+    };
+
+    readOnly = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Only allow read operations from this Neo4j instance.
+      '';
+    };
+
+    workerCount = mkOption {
+      type = types.ints.between 0 44738;
+      default = 0;
+      description = lib.mdDoc ''
+        Number of Neo4j worker threads, where the default of
+        `0` indicates a worker count equal to the number of
+        available processors.
+      '';
+    };
+
+    bolt = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the BOLT connector for Neo4j. Setting this option to
+          `false` will stop Neo4j from listening for incoming
+          connections on the BOLT port (7687 by default).
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7687";
+        description = lib.mdDoc ''
+          Neo4j listen address for BOLT traffic. The listen address is
+          expressed in the format `<ip-address>:<port-number>`.
+        '';
+      };
+
+      sslPolicy = mkOption {
+        type = types.str;
+        default = "legacy";
+        description = lib.mdDoc ''
+          Neo4j SSL policy for BOLT traffic.
+
+          The legacy policy is a special policy which is not defined in
+          the policy configuration section, but rather derives from
+          {option}`directories.certificates` and
+          associated files (by default: {file}`neo4j.key` and
+          {file}`neo4j.cert`). Its use will be deprecated.
+
+          Note: This connector must be configured to support/require
+          SSL/TLS for the legacy policy to actually be utilized. See
+          {option}`bolt.tlsLevel`.
+        '';
+      };
+
+      tlsLevel = mkOption {
+        type = types.enum [ "REQUIRED" "OPTIONAL" "DISABLED" ];
+        default = "OPTIONAL";
+        description = lib.mdDoc ''
+          SSL/TSL requirement level for BOLT traffic.
+        '';
+      };
+    };
+
+    directories = {
+      certificates = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/certificates";
+        defaultText = literalExpression ''"''${config.${opt.directories.home}}/certificates"'';
+        description = lib.mdDoc ''
+          Directory for storing certificates to be used by Neo4j for
+          TLS connections.
+
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read/write permissions are
+          given to the Neo4j daemon user `neo4j`.
+
+          Note that changing this directory from its default will prevent
+          the directory structure required for each SSL policy from being
+          automatically generated. A policy's directory structure as defined by
+          its {option}`baseDirectory`,{option}`revokedDir` and
+          {option}`trustedDir` must then be setup manually. The
+          existence of these directories is mandatory, as well as the presence
+          of the certificate file and the private key. Ensure the correct
+          permissions are set on these directories and files.
+        '';
+      };
+
+      data = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/data";
+        defaultText = literalExpression ''"''${config.${opt.directories.home}}/data"'';
+        description = lib.mdDoc ''
+          Path of the data directory. You must not configure more than one
+          Neo4j installation to use the same data directory.
+
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read/write permissions are
+          given to the Neo4j daemon user `neo4j`.
+        '';
+      };
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/neo4j";
+        description = lib.mdDoc ''
+          Path of the Neo4j home directory. Other default directories are
+          subdirectories of this path. This directory will be created if
+          non-existent, and its ownership will be {command}`chown` to
+          the Neo4j daemon user `neo4j`.
+        '';
+      };
+
+      imports = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/import";
+        defaultText = literalExpression ''"''${config.${opt.directories.home}}/import"'';
+        description = lib.mdDoc ''
+          The root directory for file URLs used with the Cypher
+          `LOAD CSV` clause. Only meaningful when
+          {option}`constrainLoadCvs` is set to
+          `true`.
+
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read permission is
+          given to the Neo4j daemon user `neo4j`.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.path;
+        default = "${cfg.directories.home}/plugins";
+        defaultText = literalExpression ''"''${config.${opt.directories.home}}/plugins"'';
+        description = lib.mdDoc ''
+          Path of the database plugin directory. Compiled Java JAR files that
+          contain database procedures will be loaded if they are placed in
+          this directory.
+
+          When setting this directory to something other than its default,
+          ensure the directory's existence, and that read permission is
+          given to the Neo4j daemon user `neo4j`.
+        '';
+      };
+    };
+
+    http = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the HTTP connector for Neo4j. Setting this option to
+          `false` will stop Neo4j from listening for incoming
+          connections on the HTTPS port (7474 by default).
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7474";
+        description = lib.mdDoc ''
+          Neo4j listen address for HTTP traffic. The listen address is
+          expressed in the format `<ip-address>:<port-number>`.
+        '';
+      };
+    };
+
+    https = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the HTTPS connector for Neo4j. Setting this option to
+          `false` will stop Neo4j from listening for incoming
+          connections on the HTTPS port (7473 by default).
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":7473";
+        description = lib.mdDoc ''
+          Neo4j listen address for HTTPS traffic. The listen address is
+          expressed in the format `<ip-address>:<port-number>`.
+        '';
+      };
+
+      sslPolicy = mkOption {
+        type = types.str;
+        default = "legacy";
+        description = lib.mdDoc ''
+          Neo4j SSL policy for HTTPS traffic.
+
+          The legacy policy is a special policy which is not defined in the
+          policy configuration section, but rather derives from
+          {option}`directories.certificates` and
+          associated files (by default: {file}`neo4j.key` and
+          {file}`neo4j.cert`). Its use will be deprecated.
+        '';
+      };
+    };
+
+    shell = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable a remote shell server which Neo4j Shell clients can log in to.
+          Only applicable to {command}`neo4j-shell`.
+        '';
+      };
+    };
+
+    ssl.policies = mkOption {
+      type = with types; attrsOf (submodule ({ name, config, options, ... }: {
+        options = {
+
+          allowKeyGeneration = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Allows the generation of a private key and associated self-signed
+              certificate. Only performed when both objects cannot be found for
+              this policy. It is recommended to turn this off again after keys
+              have been generated.
+
+              The public certificate is required to be duplicated to the
+              directory holding trusted certificates as defined by the
+              {option}`trustedDir` option.
+
+              Keys should in general be generated and distributed offline by a
+              trusted certificate authority and not by utilizing this mode.
+            '';
+          };
+
+          baseDirectory = mkOption {
+            type = types.path;
+            default = "${cfg.directories.certificates}/${name}";
+            defaultText = literalExpression ''"''${config.${opt.directories.certificates}}/''${name}"'';
+            description = lib.mdDoc ''
+              The mandatory base directory for cryptographic objects of this
+              policy. This path is only automatically generated when this
+              option as well as {option}`directories.certificates` are
+              left at their default. Ensure read/write permissions are given
+              to the Neo4j daemon user `neo4j`.
+
+              It is also possible to override each individual
+              configuration with absolute paths. See the
+              {option}`privateKey` and {option}`publicCertificate`
+              policy options.
+            '';
+          };
+
+          ciphers = mkOption {
+            type = types.nullOr (types.listOf types.str);
+            default = null;
+            description = lib.mdDoc ''
+              Restrict the allowed ciphers of this policy to those defined
+              here. The default ciphers are those of the JVM platform.
+            '';
+          };
+
+          clientAuth = mkOption {
+            type = types.enum [ "NONE" "OPTIONAL" "REQUIRE" ];
+            default = "REQUIRE";
+            description = lib.mdDoc ''
+              The client authentication stance for this policy.
+            '';
+          };
+
+          privateKey = mkOption {
+            type = types.str;
+            default = "private.key";
+            description = lib.mdDoc ''
+              The name of private PKCS #8 key file for this policy to be found
+              in the {option}`baseDirectory`, or the absolute path to
+              the key file. It is mandatory that a key can be found or generated.
+            '';
+          };
+
+          publicCertificate = mkOption {
+            type = types.str;
+            default = "public.crt";
+            description = lib.mdDoc ''
+              The name of public X.509 certificate (chain) file in PEM format
+              for this policy to be found in the {option}`baseDirectory`,
+              or the absolute path to the certificate file. It is mandatory
+              that a certificate can be found or generated.
+
+              The public certificate is required to be duplicated to the
+              directory holding trusted certificates as defined by the
+              {option}`trustedDir` option.
+            '';
+          };
+
+          revokedDir = mkOption {
+            type = types.path;
+            default = "${config.baseDirectory}/revoked";
+            defaultText = literalExpression ''"''${config.${options.baseDirectory}}/revoked"'';
+            description = lib.mdDoc ''
+              Path to directory of CRLs (Certificate Revocation Lists) in
+              PEM format. Must be an absolute path. The existence of this
+              directory is mandatory and will need to be created manually when:
+              setting this option to something other than its default; setting
+              either this policy's {option}`baseDirectory` or
+              {option}`directories.certificates` to something other than
+              their default. Ensure read/write permissions are given to the
+              Neo4j daemon user `neo4j`.
+            '';
+          };
+
+          tlsVersions = mkOption {
+            type = types.listOf types.str;
+            default = [ "TLSv1.2" ];
+            description = lib.mdDoc ''
+              Restrict the TLS protocol versions of this policy to those
+              defined here.
+            '';
+          };
+
+          trustAll = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Makes this policy trust all remote parties. Enabling this is not
+              recommended and the policy's trusted directory will be ignored.
+              Use of this mode is discouraged. It would offer encryption but
+              no security.
+            '';
+          };
+
+          trustedDir = mkOption {
+            type = types.path;
+            default = "${config.baseDirectory}/trusted";
+            defaultText = literalExpression ''"''${config.${options.baseDirectory}}/trusted"'';
+            description = lib.mdDoc ''
+              Path to directory of X.509 certificates in PEM format for
+              trusted parties. Must be an absolute path. The existence of this
+              directory is mandatory and will need to be created manually when:
+              setting this option to something other than its default; setting
+              either this policy's {option}`baseDirectory` or
+              {option}`directories.certificates` to something other than
+              their default. Ensure read/write permissions are given to the
+              Neo4j daemon user `neo4j`.
+
+              The public certificate as defined by
+              {option}`publicCertificate` is required to be duplicated
+              to this directory.
+            '';
+          };
+
+          directoriesToCreate = mkOption {
+            type = types.listOf types.path;
+            internal = true;
+            readOnly = true;
+            description = lib.mdDoc ''
+              Directories of this policy that will be created automatically
+              when the certificates directory is left at its default value.
+              This includes all options of type path that are left at their
+              default value.
+            '';
+          };
+
+        };
+
+        config.directoriesToCreate = optionals
+          (certDirOpt.highestPrio >= 1500 && options.baseDirectory.highestPrio >= 1500)
+          (map (opt: opt.value) (filter isDefaultPathOption (attrValues options)));
+
+      }));
+      default = {};
+      description = lib.mdDoc ''
+        Defines the SSL policies for use with Neo4j connectors. Each attribute
+        of this set defines a policy, with the attribute name defining the name
+        of the policy and its namespace. Refer to the operations manual section
+        on Neo4j's
+        [SSL Framework](https://neo4j.com/docs/operations-manual/current/security/ssl-framework/)
+        for further details.
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config =
+    let
+      # Assertion helpers
+      policyNameList = attrNames cfg.ssl.policies;
+      validPolicyNameList = [ "legacy" ] ++ policyNameList;
+      validPolicyNameString = concatStringsSep ", " validPolicyNameList;
+
+      # Capture various directories left at their default so they can be created.
+      defaultDirectoriesToCreate = map (opt: opt.value) (filter isDefaultPathOption (attrValues options.services.neo4j.directories));
+      policyDirectoriesToCreate = concatMap (pol: pol.directoriesToCreate) (attrValues cfg.ssl.policies);
+    in
+
+    mkIf cfg.enable {
+      assertions = [
+        { assertion = !elem "legacy" policyNameList;
+          message = "The policy 'legacy' is special to Neo4j, and its name is reserved."; }
+        { assertion = elem cfg.bolt.sslPolicy validPolicyNameList;
+          message = "Invalid policy assigned: `services.neo4j.bolt.sslPolicy = \"${cfg.bolt.sslPolicy}\"`, defined policies are: ${validPolicyNameString}"; }
+        { assertion = elem cfg.https.sslPolicy validPolicyNameList;
+          message = "Invalid policy assigned: `services.neo4j.https.sslPolicy = \"${cfg.https.sslPolicy}\"`, defined policies are: ${validPolicyNameString}"; }
+      ];
+
+      systemd.services.neo4j = {
+        description = "Neo4j Daemon";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = {
+          NEO4J_HOME = "${cfg.directories.home}";
+          NEO4J_CONF = "${cfg.directories.home}/conf";
+        };
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/neo4j console";
+          User = "neo4j";
+          PermissionsStartOnly = true;
+          LimitNOFILE = 40000;
+        };
+
+        preStart = ''
+          # Directories Setup
+          #   Always ensure home exists with nested conf, logs directories.
+          mkdir -m 0700 -p ${cfg.directories.home}/{conf,logs}
+
+          #   Create other sub-directories and policy directories that have been left at their default.
+          ${concatMapStringsSep "\n" (
+            dir: ''
+              mkdir -m 0700 -p ${dir}
+          '') (defaultDirectoriesToCreate ++ policyDirectoriesToCreate)}
+
+          # Place the configuration where Neo4j can find it.
+          ln -fs ${serverConfig} ${cfg.directories.home}/conf/neo4j.conf
+
+          # Ensure neo4j user ownership
+          chown -R neo4j ${cfg.directories.home}
+        '';
+      };
+
+      environment.systemPackages = [ cfg.package ];
+
+      users.users.neo4j = {
+        isSystemUser = true;
+        group = "neo4j";
+        description = "Neo4j daemon user";
+        home = cfg.directories.home;
+      };
+      users.groups.neo4j = {};
+    };
+
+  meta = {
+    maintainers = with lib.maintainers; [ patternspandemic jonringer ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/openldap.nix b/nixpkgs/nixos/modules/services/databases/openldap.nix
new file mode 100644
index 000000000000..cba3442023cb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/openldap.nix
@@ -0,0 +1,342 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.openldap;
+  openldap = cfg.package;
+  configDir = if cfg.configDir != null then cfg.configDir else "/etc/openldap/slapd.d";
+
+  ldapValueType = let
+    # Can't do types.either with multiple non-overlapping submodules, so define our own
+    singleLdapValueType = lib.mkOptionType rec {
+      name = "LDAP";
+      # TODO: It would be nice to define a { secret = ...; } option, using
+      # systemd's LoadCredentials for secrets. That would remove the last
+      # barrier to using DynamicUser for openldap. This is blocked on
+      # systemd/systemd#19604
+      description = ''
+        LDAP value - either a string, or an attrset containing
+        `path` or `base64` for included
+        values or base-64 encoded values respectively.
+      '';
+      check = x: lib.isString x || (lib.isAttrs x && (x ? path || x ? base64));
+      merge = lib.mergeEqualOption;
+    };
+    # We don't coerce to lists of single values, as some values must be unique
+  in types.either singleLdapValueType (types.listOf singleLdapValueType);
+
+  ldapAttrsType =
+    let
+      options = {
+        attrs = mkOption {
+          type = types.attrsOf ldapValueType;
+          default = {};
+          description = lib.mdDoc "Attributes of the parent entry.";
+        };
+        children = mkOption {
+          # Hide the child attributes, to avoid infinite recursion in e.g. documentation
+          # Actual Nix evaluation is lazy, so this is not an issue there
+          type = let
+            hiddenOptions = lib.mapAttrs (name: attr: attr // { visible = false; }) options;
+          in types.attrsOf (types.submodule { options = hiddenOptions; });
+          default = {};
+          description = lib.mdDoc "Child entries of the current entry, with recursively the same structure.";
+          example = lib.literalExpression ''
+            {
+                "cn=schema" = {
+                # The attribute used in the DN must be defined
+                attrs = { cn = "schema"; };
+                children = {
+                    # This entry's DN is expanded to "cn=foo,cn=schema"
+                    "cn=foo" = { ... };
+                };
+                # These includes are inserted after "cn=schema", but before "cn=foo,cn=schema"
+                includes = [ ... ];
+                };
+            }
+          '';
+        };
+        includes = mkOption {
+          type = types.listOf types.path;
+          default = [];
+          description = lib.mdDoc ''
+            LDIF files to include after the parent's attributes but before its children.
+          '';
+        };
+      };
+    in types.submodule { inherit options; };
+
+  valueToLdif = attr: values: let
+    listValues = if lib.isList values then values else lib.singleton values;
+  in map (value:
+    if lib.isAttrs value then
+      if lib.hasAttr "path" value
+      then "${attr}:< file://${value.path}"
+      else "${attr}:: ${value.base64}"
+    else "${attr}: ${lib.replaceStrings [ "\n" ] [ "\n " ] value}"
+  ) listValues;
+
+  attrsToLdif = dn: { attrs, children, includes, ... }: [''
+    dn: ${dn}
+    ${lib.concatStringsSep "\n" (lib.flatten (lib.mapAttrsToList valueToLdif attrs))}
+  ''] ++ (map (path: "include: file://${path}\n") includes) ++ (
+    lib.flatten (lib.mapAttrsToList (name: value: attrsToLdif "${name},${dn}" value) children)
+  );
+in {
+  options = {
+    services.openldap = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the ldap server.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.openldap;
+        defaultText = literalExpression "pkgs.openldap";
+        description = lib.mdDoc ''
+          OpenLDAP package to use.
+
+          This can be used to, for example, set an OpenLDAP package
+          with custom overrides to enable modules or other
+          functionality.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "openldap";
+        description = lib.mdDoc "User account under which slapd runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "openldap";
+        description = lib.mdDoc "Group account under which slapd runs.";
+      };
+
+      urlList = mkOption {
+        type = types.listOf types.str;
+        default = [ "ldap:///" ];
+        description = lib.mdDoc "URL list slapd should listen on.";
+        example = [ "ldaps:///" ];
+      };
+
+      settings = mkOption {
+        type = ldapAttrsType;
+        description = lib.mdDoc "Configuration for OpenLDAP, in OLC format";
+        example = lib.literalExpression ''
+          {
+            attrs.olcLogLevel = [ "stats" ];
+            children = {
+              "cn=schema".includes = [
+                 "''${pkgs.openldap}/etc/schema/core.ldif"
+                 "''${pkgs.openldap}/etc/schema/cosine.ldif"
+                 "''${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+              ];
+              "olcDatabase={-1}frontend" = {
+                attrs = {
+                  objectClass = "olcDatabaseConfig";
+                  olcDatabase = "{-1}frontend";
+                  olcAccess = [ "{0}to * by dn.exact=uidNumber=0+gidNumber=0,cn=peercred,cn=external,cn=auth manage stop by * none stop" ];
+                };
+              };
+              "olcDatabase={0}config" = {
+                attrs = {
+                  objectClass = "olcDatabaseConfig";
+                  olcDatabase = "{0}config";
+                  olcAccess = [ "{0}to * by * none break" ];
+                };
+              };
+              "olcDatabase={1}mdb" = {
+                attrs = {
+                  objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+                  olcDatabase = "{1}mdb";
+                  olcDbDirectory = "/var/lib/openldap/ldap";
+                  olcDbIndex = [
+                    "objectClass eq"
+                    "cn pres,eq"
+                    "uid pres,eq"
+                    "sn pres,eq,subany"
+                  ];
+                  olcSuffix = "dc=example,dc=com";
+                  olcAccess = [ "{0}to * by * read break" ];
+                };
+              };
+            };
+          };
+        '';
+      };
+
+      # This option overrides settings
+      configDir = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Use this config directory instead of generating one from the
+          `settings` option. Overrides all NixOS settings.
+        '';
+        example = "/var/lib/openldap/slapd.d";
+      };
+
+      mutableConfig = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to allow writable on-line configuration. If
+          `true`, the NixOS settings will only be used to
+          initialize the OpenLDAP configuration if it does not exist, and are
+          subsequently ignored.
+        '';
+      };
+
+      declarativeContents = mkOption {
+        type = with types; attrsOf lines;
+        default = {};
+        description = lib.mdDoc ''
+          Declarative contents for the LDAP database, in LDIF format by suffix.
+
+          All data will be erased when starting the LDAP server. Modifications
+          to the database are not prevented, they are just dropped on the next
+          reboot of the server. Performance-wise the database and indexes are
+          rebuilt on each server startup, so this will slow down server startup,
+          especially with large databases.
+
+          Note that the root of the DB must be defined in
+          `services.openldap.settings` and the
+          `olcDbDirectory` must begin with
+          `"/var/lib/openldap"`.
+        '';
+        example = lib.literalExpression ''
+          {
+            "dc=example,dc=org" = '''
+              dn= dn: dc=example,dc=org
+              objectClass: domain
+              dc: example
+
+              dn: ou=users,dc=example,dc=org
+              objectClass = organizationalUnit
+              ou: users
+
+              # ...
+            ''';
+          }
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ kwohlfahrt ];
+
+  config = let
+    dbSettings = mapAttrs' (name: { attrs, ... }: nameValuePair attrs.olcSuffix attrs)
+      (filterAttrs (name: { attrs, ... }: (hasPrefix "olcDatabase=" name) && attrs ? olcSuffix) cfg.settings.children);
+    settingsFile = pkgs.writeText "config.ldif" (lib.concatStringsSep "\n" (attrsToLdif "cn=config" cfg.settings));
+    writeConfig = pkgs.writeShellScript "openldap-config" ''
+      set -euo pipefail
+
+      ${lib.optionalString (!cfg.mutableConfig) ''
+        chmod -R u+w ${configDir}
+        rm -rf ${configDir}/*
+      ''}
+      if [ ! -e "${configDir}/cn=config.ldif" ]; then
+        ${openldap}/bin/slapadd -F ${configDir} -bcn=config -l ${settingsFile}
+      fi
+      chmod -R ${if cfg.mutableConfig then "u+rw" else "u+r-w"} ${configDir}
+    '';
+
+    contentsFiles = mapAttrs (dn: ldif: pkgs.writeText "${dn}.ldif" ldif) cfg.declarativeContents;
+    writeContents = pkgs.writeShellScript "openldap-load" ''
+      set -euo pipefail
+
+      rm -rf $2/*
+      ${openldap}/bin/slapadd -F ${configDir} -b $1 -l $3
+    '';
+  in mkIf cfg.enable {
+    assertions = [{
+      assertion = (cfg.declarativeContents != {}) -> cfg.configDir == null;
+      message = ''
+        Declarative DB contents (${attrNames cfg.declarativeContents}) are not
+        supported with user-managed configuration.
+      '';
+    }] ++ (map (dn: {
+      assertion = (getAttr dn dbSettings) ? "olcDbDirectory";
+      # olcDbDirectory is necessary to prepopulate database using `slapadd`.
+      message = ''
+        Declarative DB ${dn} does not exist in `services.openldap.settings`, or does not have
+        `olcDbDirectory` configured.
+      '';
+    }) (attrNames cfg.declarativeContents)) ++ (mapAttrsToList (dn: { olcDbDirectory ? null, ... }: {
+      # For forward compatibility with `DynamicUser`, and to avoid accidentally clobbering
+      # directories with `declarativeContents`.
+      assertion = (olcDbDirectory != null) ->
+      ((hasPrefix "/var/lib/openldap/" olcDbDirectory) && (olcDbDirectory != "/var/lib/openldap/"));
+      message = ''
+        Database ${dn} has `olcDbDirectory` (${olcDbDirectory}) that is not a subdirectory of
+        `/var/lib/openldap/`.
+      '';
+    }) dbSettings);
+    environment.systemPackages = [ openldap ];
+
+    # Literal attributes must always be set
+    services.openldap.settings = {
+      attrs = {
+        objectClass = "olcGlobal";
+        cn = "config";
+      };
+      children."cn=schema".attrs = {
+        cn = "schema";
+        objectClass = "olcSchemaConfig";
+      };
+    };
+
+    systemd.services.openldap = {
+      description = "OpenLDAP Server Daemon";
+      documentation = [
+        "man:slapd"
+        "man:slapd-config"
+        "man:slapd-mdb"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStartPre = [
+          "!${pkgs.coreutils}/bin/mkdir -p ${configDir}"
+          "+${pkgs.coreutils}/bin/chown $USER ${configDir}"
+        ] ++ (lib.optional (cfg.configDir == null) writeConfig)
+        ++ (mapAttrsToList (dn: content: lib.escapeShellArgs [
+          writeContents dn (getAttr dn dbSettings).olcDbDirectory content
+        ]) contentsFiles)
+        ++ [ "${openldap}/bin/slaptest -u -F ${configDir}" ];
+        ExecStart = lib.escapeShellArgs ([
+          "${openldap}/libexec/slapd" "-d" "0" "-F" configDir "-h" (lib.concatStringsSep " " cfg.urlList)
+        ]);
+        Type = "notify";
+        # Fixes an error where openldap attempts to notify from a thread
+        # outside the main process:
+        #   Got notification message from PID 6378, but reception only permitted for main PID 6377
+        NotifyAccess = "all";
+        RuntimeDirectory = "openldap";
+        StateDirectory = ["openldap"]
+          ++ (map ({olcDbDirectory, ... }: removePrefix "/var/lib/" olcDbDirectory) (attrValues dbSettings));
+        StateDirectoryMode = "700";
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+      };
+    };
+
+    users.users = lib.optionalAttrs (cfg.user == "openldap") {
+      openldap = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = lib.optionalAttrs (cfg.group == "openldap") {
+      openldap = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/opentsdb.nix b/nixpkgs/nixos/modules/services/databases/opentsdb.nix
new file mode 100644
index 000000000000..288b716fce03
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/opentsdb.nix
@@ -0,0 +1,102 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.opentsdb;
+
+  configFile = pkgs.writeText "opentsdb.conf" cfg.config;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.opentsdb = {
+
+      enable = mkEnableOption (lib.mdDoc "OpenTSDB");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.opentsdb;
+        defaultText = literalExpression "pkgs.opentsdb";
+        description = lib.mdDoc ''
+          OpenTSDB package to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "opentsdb";
+        description = lib.mdDoc ''
+          User account under which OpenTSDB runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "opentsdb";
+        description = lib.mdDoc ''
+          Group account under which OpenTSDB runs.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 4242;
+        description = lib.mdDoc ''
+          Which port OpenTSDB listens on.
+        '';
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = ''
+          tsd.core.auto_create_metrics = true
+          tsd.http.request.enable_chunked  = true
+        '';
+        description = lib.mdDoc ''
+          The contents of OpenTSDB's configuration file
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.opentsdb.enable {
+
+    systemd.services.opentsdb = {
+      description = "OpenTSDB Server";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "hbase.service" ];
+
+      environment.JAVA_HOME = "${pkgs.jre}";
+      path = [ pkgs.gnuplot ];
+
+      preStart =
+        ''
+        COMPRESSION=NONE HBASE_HOME=${config.services.hbase.package} ${cfg.package}/share/opentsdb/tools/create_table.sh
+        '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/tsdb tsd --staticroot=${cfg.package}/share/opentsdb/static --cachedir=/tmp/opentsdb --port=${toString cfg.port} --config=${configFile}";
+      };
+    };
+
+    users.users.opentsdb = {
+      description = "OpenTSDB Server user";
+      group = "opentsdb";
+      uid = config.ids.uids.opentsdb;
+    };
+
+    users.groups.opentsdb.gid = config.ids.gids.opentsdb;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/pgbouncer.nix b/nixpkgs/nixos/modules/services/databases/pgbouncer.nix
new file mode 100644
index 000000000000..1aec03c114d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/pgbouncer.nix
@@ -0,0 +1,632 @@
+{ lib, pkgs, config, ... } :
+
+with lib;
+
+let
+  cfg = config.services.pgbouncer;
+
+  confFile = pkgs.writeTextFile {
+    name = "pgbouncer.ini";
+    text =  ''
+      [databases]
+      ${concatStringsSep "\n"
+      (mapAttrsToList (dbname : settings : "${dbname} = ${settings}") cfg.databases)}
+
+      [users]
+      ${concatStringsSep "\n"
+      (mapAttrsToList (username : settings : "${username} = ${settings}") cfg.users)}
+
+      [peers]
+      ${concatStringsSep "\n"
+      (mapAttrsToList (peerid : settings : "${peerid} = ${settings}") cfg.peers)}
+
+      [pgbouncer]
+      # general
+      ${optionalString (cfg.ignoreStartupParameters != null) "ignore_startup_parameters = ${cfg.ignoreStartupParameters}"}
+      listen_port = ${toString cfg.listenPort}
+      ${optionalString (cfg.listenAddress != null) "listen_addr = ${cfg.listenAddress}"}
+      pool_mode = ${cfg.poolMode}
+      max_client_conn = ${toString cfg.maxClientConn}
+      default_pool_size = ${toString cfg.defaultPoolSize}
+      max_user_connections = ${toString cfg.maxUserConnections}
+      max_db_connections = ${toString cfg.maxDbConnections}
+
+      #auth
+      auth_type = ${cfg.authType}
+      ${optionalString (cfg.authHbaFile != null) "auth_hba_file = ${cfg.authHbaFile}"}
+      ${optionalString (cfg.authFile != null) "auth_file = ${cfg.authFile}"}
+      ${optionalString (cfg.authUser != null) "auth_user = ${cfg.authUser}"}
+      ${optionalString (cfg.authQuery != null) "auth_query = ${cfg.authQuery}"}
+      ${optionalString (cfg.authDbname != null) "auth_dbname = ${cfg.authDbname}"}
+
+      # TLS
+      ${optionalString (cfg.tls.client != null) ''
+      client_tls_sslmode = ${cfg.tls.client.sslmode}
+      client_tls_key_file = ${cfg.tls.client.keyFile}
+      client_tls_cert_file = ${cfg.tls.client.certFile}
+      client_tls_ca_file = ${cfg.tls.client.caFile}
+      ''}
+      ${optionalString (cfg.tls.server != null) ''
+      server_tls_sslmode = ${cfg.tls.server.sslmode}
+      server_tls_key_file = ${cfg.tls.server.keyFile}
+      server_tls_cert_file = ${cfg.tls.server.certFile}
+      server_tls_ca_file = ${cfg.tls.server.caFile}
+      ''}
+
+      # log
+      ${optionalString (cfg.logFile != null) "logfile = ${cfg.homeDir}/${cfg.logFile}"}
+      ${optionalString (cfg.syslog != null) ''
+      syslog = ${if cfg.syslog.enable then "1" else "0"}
+      syslog_ident = ${cfg.syslog.syslogIdent}
+      syslog_facility = ${cfg.syslog.syslogFacility}
+      ''}
+      ${optionalString (cfg.verbose != null) "verbose = ${toString cfg.verbose}"}
+
+      # console access
+      ${optionalString (cfg.adminUsers != null) "admin_users = ${cfg.adminUsers}"}
+      ${optionalString (cfg.statsUsers != null) "stats_users = ${cfg.statsUsers}"}
+
+      # linux
+      pidfile = /run/pgbouncer/pgbouncer.pid
+
+      # extra
+      ${cfg.extraConfig}
+    '';
+  };
+
+in {
+
+  options.services.pgbouncer = {
+
+    # NixOS settings
+
+    enable = mkEnableOption (lib.mdDoc "PostgreSQL connection pooler");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.pgbouncer;
+      defaultText = literalExpression "pkgs.pgbouncer";
+      description = lib.mdDoc ''
+        The pgbouncer package to use.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to automatically open the specified TCP port in the firewall.
+      '';
+    };
+
+    # Generic settings
+
+    logFile = mkOption {
+      type = types.nullOr types.str;
+      default = "pgbouncer.log";
+      description = lib.mdDoc ''
+        Specifies the log file.
+        Either this or syslog has to be specified.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.commas;
+      example = "*";
+      default = null;
+      description = lib.mdDoc ''
+        Specifies a list (comma-separated) of addresses where to listen for TCP connections.
+        You may also use * meaning “listen on all addressesâ€.
+        When not set, only Unix socket connections are accepted.
+
+        Addresses can be specified numerically (IPv4/IPv6) or by name.
+      '';
+    };
+
+    listenPort = mkOption {
+      type = types.port;
+      default = 6432;
+      description = lib.mdDoc ''
+        Which port to listen on. Applies to both TCP and Unix sockets.
+      '';
+    };
+
+    poolMode = mkOption {
+      type = types.enum [ "session" "transaction" "statement" ];
+      default = "session";
+      description = lib.mdDoc ''
+        Specifies when a server connection can be reused by other clients.
+
+        session
+            Server is released back to pool after client disconnects. Default.
+        transaction
+            Server is released back to pool after transaction finishes.
+        statement
+            Server is released back to pool after query finishes.
+            Transactions spanning multiple statements are disallowed in this mode.
+      '';
+    };
+
+    maxClientConn = mkOption {
+      type = types.int;
+      default = 100;
+      description = lib.mdDoc ''
+        Maximum number of client connections allowed.
+
+        When this setting is increased, then the file descriptor limits in the operating system
+        might also have to be increased. Note that the number of file descriptors potentially
+        used is more than maxClientConn. If each user connects under its own user name to the server,
+        the theoretical maximum used is:
+        maxClientConn + (max pool_size * total databases * total users)
+
+        If a database user is specified in the connection string (all users connect under the same user name),
+        the theoretical maximum is:
+        maxClientConn + (max pool_size * total databases)
+
+        The theoretical maximum should never be reached, unless somebody deliberately crafts a special load for it.
+        Still, it means you should set the number of file descriptors to a safely high number.
+      '';
+    };
+
+    defaultPoolSize = mkOption {
+      type = types.int;
+      default = 20;
+      description = lib.mdDoc ''
+        How many server connections to allow per user/database pair.
+        Can be overridden in the per-database configuration.
+      '';
+    };
+
+    maxDbConnections = mkOption {
+      type = types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Do not allow more than this many server connections per database (regardless of user).
+        This considers the PgBouncer database that the client has connected to,
+        not the PostgreSQL database of the outgoing connection.
+
+        This can also be set per database in the [databases] section.
+
+        Note that when you hit the limit, closing a client connection to one pool will
+        not immediately allow a server connection to be established for another pool,
+        because the server connection for the first pool is still open.
+        Once the server connection closes (due to idle timeout),
+        a new server connection will immediately be opened for the waiting pool.
+
+        0 = unlimited
+      '';
+    };
+
+    maxUserConnections = mkOption {
+      type = types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Do not allow more than this many server connections per user (regardless of database).
+        This considers the PgBouncer user that is associated with a pool,
+        which is either the user specified for the server connection
+        or in absence of that the user the client has connected as.
+
+        This can also be set per user in the [users] section.
+
+        Note that when you hit the limit, closing a client connection to one pool
+        will not immediately allow a server connection to be established for another pool,
+        because the server connection for the first pool is still open.
+        Once the server connection closes (due to idle timeout), a new server connection
+        will immediately be opened for the waiting pool.
+
+        0 = unlimited
+      '';
+    };
+
+    ignoreStartupParameters = mkOption {
+      type = types.nullOr types.commas;
+      example = "extra_float_digits";
+      default = null;
+      description = lib.mdDoc ''
+        By default, PgBouncer allows only parameters it can keep track of in startup packets:
+        client_encoding, datestyle, timezone and standard_conforming_strings.
+
+        All others parameters will raise an error.
+        To allow others parameters, they can be specified here, so that PgBouncer knows that
+        they are handled by the admin and it can ignore them.
+
+        If you need to specify multiple values, use a comma-separated list.
+
+        IMPORTANT: When using prometheus-pgbouncer-exporter, you need:
+        extra_float_digits
+        <https://github.com/prometheus-community/pgbouncer_exporter#pgbouncer-configuration>
+      '';
+    };
+
+    # Section [databases]
+    databases = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = {
+        exampledb = "host=/run/postgresql/ port=5432 auth_user=exampleuser dbname=exampledb sslmode=require";
+        bardb = "host=localhost dbname=bazdb";
+        foodb  = "host=host1.example.com port=5432";
+      };
+      description = lib.mdDoc ''
+        Detailed information about PostgreSQL database definitions:
+        <https://www.pgbouncer.org/config.html#section-databases>
+      '';
+    };
+
+    # Section [users]
+    users = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = {
+        user1 = "pool_mode=session";
+      };
+      description = lib.mdDoc ''
+        Optional.
+
+        Detailed information about PostgreSQL user definitions:
+        <https://www.pgbouncer.org/config.html#section-users>
+      '';
+    };
+
+    # Section [peers]
+    peers = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = {
+        "1" = "host=host1.example.com";
+        "2" = "host=/tmp/pgbouncer-2 port=5555";
+      };
+      description = lib.mdDoc ''
+        Optional.
+
+        Detailed information about PostgreSQL database definitions:
+        <https://www.pgbouncer.org/config.html#section-peers>
+      '';
+    };
+
+    # Authentication settings
+    authType = mkOption {
+      type = types.enum [ "cert" "md5" "scram-sha-256" "plain" "trust" "any" "hba" "pam" ];
+      default = "md5";
+      description = lib.mdDoc ''
+        How to authenticate users.
+
+        cert
+            Client must connect over TLS connection with a valid client certificate.
+            The user name is then taken from the CommonName field from the certificate.
+        md5
+            Use MD5-based password check. This is the default authentication method.
+            authFile may contain both MD5-encrypted and plain-text passwords.
+            If md5 is configured and a user has a SCRAM secret, then SCRAM authentication is used automatically instead.
+        scram-sha-256
+            Use password check with SCRAM-SHA-256. authFile has to contain SCRAM secrets or plain-text passwords.
+        plain
+            The clear-text password is sent over the wire. Deprecated.
+        trust
+            No authentication is done. The user name must still exist in authFile.
+        any
+            Like the trust method, but the user name given is ignored.
+            Requires that all databases are configured to log in as a specific user.
+            Additionally, the console database allows any user to log in as admin.
+        hba
+            The actual authentication type is loaded from authHbaFile.
+            This allows different authentication methods for different access paths,
+            for example: connections over Unix socket use the peer auth method, connections over TCP must use TLS.
+        pam
+            PAM is used to authenticate users, authFile is ignored.
+            This method is not compatible with databases using the authUser option.
+            The service name reported to PAM is “pgbouncerâ€. pam is not supported in the HBA configuration file.
+      '';
+    };
+
+    authHbaFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/secrets/pgbouncer_hba";
+      description = lib.mdDoc ''
+        HBA configuration file to use when authType is hba.
+
+        See HBA file format details:
+        <https://www.pgbouncer.org/config.html#hba-file-format>
+      '';
+    };
+
+    authFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/secrets/pgbouncer_authfile";
+      description = lib.mdDoc ''
+        The name of the file to load user names and passwords from.
+
+        See section Authentication file format details:
+        <https://www.pgbouncer.org/config.html#authentication-file-format>
+
+        Most authentication types require that either authFile or authUser be set;
+        otherwise there would be no users defined.
+      '';
+    };
+
+    authUser = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "pgbouncer";
+      description = lib.mdDoc ''
+        If authUser is set, then any user not specified in authFile will be queried
+        through the authQuery query from pg_shadow in the database, using authUser.
+        The password of authUser will be taken from authFile.
+        (If the authUser does not require a password then it does not need to be defined in authFile.)
+
+        Direct access to pg_shadow requires admin rights.
+        It's preferable to use a non-superuser that calls a SECURITY DEFINER function instead.
+      '';
+    };
+
+    authQuery = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "SELECT usename, passwd FROM pg_shadow WHERE usename=$1";
+      description = lib.mdDoc ''
+        Query to load user's password from database.
+
+        Direct access to pg_shadow requires admin rights.
+        It's preferable to use a non-superuser that calls a SECURITY DEFINER function instead.
+
+        Note that the query is run inside the target database.
+        So if a function is used, it needs to be installed into each database.
+      '';
+    };
+
+    authDbname = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "authdb";
+      description = lib.mdDoc ''
+        Database name in the [database] section to be used for authentication purposes.
+        This option can be either global or overriden in the connection string if this parameter is specified.
+      '';
+    };
+
+    # TLS settings
+    tls.client = mkOption {
+      type = types.nullOr (types.submodule {
+        options = {
+          sslmode = mkOption {
+            type = types.enum [ "disable" "allow" "prefer" "require" "verify-ca" "verify-full" ];
+            default = "disable";
+            description = lib.mdDoc ''
+              TLS mode to use for connections from clients.
+              TLS connections are disabled by default.
+
+              When enabled, tls.client.keyFile and tls.client.certFile
+              must be also configured to set up the key and certificate
+              PgBouncer uses to accept client connections.
+
+              disable
+                  Plain TCP. If client requests TLS, it's ignored. Default.
+              allow
+                  If client requests TLS, it is used. If not, plain TCP is used.
+                  If the client presents a client certificate, it is not validated.
+              prefer
+                  Same as allow.
+              require
+                  Client must use TLS. If not, the client connection is rejected.
+                  If the client presents a client certificate, it is not validated.
+              verify-ca
+                  Client must use TLS with valid client certificate.
+              verify-full
+                  Same as verify-ca
+            '';
+          };
+          certFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer.key";
+            description = lib.mdDoc "Path to certificate for private key. Clients can validate it";
+          };
+          keyFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer.crt";
+            description = lib.mdDoc "Path to private key for PgBouncer to accept client connections";
+          };
+          caFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer.crt";
+            description = lib.mdDoc "Path to root certificate file to validate client certificates";
+          };
+        };
+      });
+      default = null;
+      description = lib.mdDoc ''
+        <https://www.pgbouncer.org/config.html#tls-settings>
+      '';
+    };
+
+    tls.server = mkOption {
+      type = types.nullOr (types.submodule {
+        options = {
+          sslmode = mkOption {
+            type = types.enum [ "disable" "allow" "prefer" "require" "verify-ca" "verify-full" ];
+            default = "disable";
+            description = lib.mdDoc ''
+              TLS mode to use for connections to PostgreSQL servers.
+              TLS connections are disabled by default.
+
+              disable
+                  Plain TCP. TLS is not even requested from the server. Default.
+              allow
+                  FIXME: if server rejects plain, try TLS?
+              prefer
+                  TLS connection is always requested first from PostgreSQL.
+                  If refused, the connection will be established over plain TCP.
+                  Server certificate is not validated.
+              require
+                  Connection must go over TLS. If server rejects it, plain TCP is not attempted.
+                  Server certificate is not validated.
+              verify-ca
+                  Connection must go over TLS and server certificate must be valid according to tls.server.caFile.
+                  Server host name is not checked against certificate.
+              verify-full
+                  Connection must go over TLS and server certificate must be valid according to tls.server.caFile.
+                  Server host name must match certificate information.
+            '';
+          };
+          certFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer_server.key";
+            description = lib.mdDoc "Certificate for private key. PostgreSQL server can validate it.";
+          };
+          keyFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer_server.crt";
+            description = lib.mdDoc "Private key for PgBouncer to authenticate against PostgreSQL server.";
+          };
+          caFile = mkOption {
+            type = types.path;
+            example = "/secrets/pgbouncer_server.crt";
+            description = lib.mdDoc "Root certificate file to validate PostgreSQL server certificates.";
+          };
+        };
+      });
+      default = null;
+      description = lib.mdDoc ''
+        <https://www.pgbouncer.org/config.html#tls-settings>
+      '';
+    };
+
+    # Log settings
+    syslog = mkOption {
+      type = types.nullOr (types.submodule {
+        options = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Toggles syslog on/off.
+            '';
+          };
+          syslogIdent = mkOption {
+            type = types.str;
+            default = "pgbouncer";
+            description = lib.mdDoc ''
+              Under what name to send logs to syslog.
+            '';
+          };
+          syslogFacility = mkOption {
+            type = types.enum [ "auth" "authpriv" "daemon" "user" "local0" "local1" "local2" "local3" "local4" "local5" "local6" "local7" ];
+            default = "daemon";
+            description = lib.mdDoc ''
+              Under what facility to send logs to syslog.
+            '';
+          };
+        };
+      });
+      default = null;
+      description = lib.mdDoc ''
+        <https://www.pgbouncer.org/config.html#log-settings>
+      '';
+    };
+
+    verbose = lib.mkOption {
+      type = lib.types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Increase verbosity. Mirrors the “-v†switch on the command line.
+      '';
+    };
+
+    # Console access control
+    adminUsers = mkOption {
+      type = types.nullOr types.commas;
+      default = null;
+      description = lib.mdDoc ''
+        Comma-separated list of database users that are allowed to connect and run all commands on the console.
+        Ignored when authType is any, in which case any user name is allowed in as admin.
+      '';
+    };
+
+    statsUsers = mkOption {
+      type = types.nullOr types.commas;
+      default = null;
+      description = lib.mdDoc ''
+        Comma-separated list of database users that are allowed to connect and run read-only queries on the console.
+        That means all SHOW commands except SHOW FDS.
+      '';
+    };
+
+    # Linux settings
+    openFilesLimit = lib.mkOption {
+      type = lib.types.int;
+      default = 65536;
+      description = lib.mdDoc ''
+        Maximum number of open files.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "pgbouncer";
+      description = lib.mdDoc ''
+        The user pgbouncer is run as.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "pgbouncer";
+      description = lib.mdDoc ''
+        The group pgbouncer is run as.
+      '';
+    };
+
+    homeDir = mkOption {
+      type = types.path;
+      default = "/var/lib/pgbouncer";
+      description = lib.mdDoc ''
+        Specifies the home directory.
+      '';
+    };
+
+    # Extra settings
+    extraConfig = mkOption {
+      type = types.lines;
+      description = lib.mdDoc ''
+        Any additional text to be appended to config.ini
+         <https://www.pgbouncer.org/config.html>.
+      '';
+      default = "";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.${cfg.group} = { };
+    users.users.${cfg.user} = {
+      description = "PgBouncer service user";
+      group = cfg.group;
+      home = cfg.homeDir;
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    systemd.services.pgbouncer = {
+      description = "PgBouncer - PostgreSQL connection pooler";
+      wants    = [ "postgresql.service" ];
+      after    = [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.pgbouncer}/bin/pgbouncer -d ${confFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+        RuntimeDirectory = "pgbouncer";
+        PIDFile = "/run/pgbouncer/pgbouncer.pid";
+        LimitNOFILE = cfg.openFilesLimit;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
+
+  };
+
+    meta.maintainers = [ maintainers._1000101 ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/pgmanage.nix b/nixpkgs/nixos/modules/services/databases/pgmanage.nix
new file mode 100644
index 000000000000..a0933a5ffc45
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/pgmanage.nix
@@ -0,0 +1,207 @@
+{ lib, pkgs, config, ... } :
+
+with lib;
+
+let
+  cfg = config.services.pgmanage;
+
+  confFile = pkgs.writeTextFile {
+    name = "pgmanage.conf";
+    text =  ''
+      connection_file = ${pgmanageConnectionsFile}
+
+      allow_custom_connections = ${builtins.toJSON cfg.allowCustomConnections}
+
+      pgmanage_port = ${toString cfg.port}
+
+      super_only = ${builtins.toJSON cfg.superOnly}
+
+      ${optionalString (cfg.loginGroup != null) "login_group = ${cfg.loginGroup}"}
+
+      login_timeout = ${toString cfg.loginTimeout}
+
+      web_root = ${cfg.package}/etc/pgmanage/web_root
+
+      sql_root = ${cfg.sqlRoot}
+
+      ${optionalString (cfg.tls != null) ''
+      tls_cert = ${cfg.tls.cert}
+      tls_key = ${cfg.tls.key}
+      ''}
+
+      log_level = ${cfg.logLevel}
+    '';
+  };
+
+  pgmanageConnectionsFile = pkgs.writeTextFile {
+    name = "pgmanage-connections.conf";
+    text = concatStringsSep "\n"
+      (mapAttrsToList (name : conn : "${name}: ${conn}") cfg.connections);
+  };
+
+  pgmanage = "pgmanage";
+
+in {
+
+  options.services.pgmanage = {
+    enable = mkEnableOption (lib.mdDoc "PostgreSQL Administration for the web");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.pgmanage;
+      defaultText = literalExpression "pkgs.pgmanage";
+      description = lib.mdDoc ''
+        The pgmanage package to use.
+      '';
+    };
+
+    connections = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = {
+        nuc-server  = "hostaddr=192.168.0.100 port=5432 dbname=postgres";
+        mini-server = "hostaddr=127.0.0.1 port=5432 dbname=postgres sslmode=require";
+      };
+      description = lib.mdDoc ''
+        pgmanage requires at least one PostgreSQL server be defined.
+
+        Detailed information about PostgreSQL connection strings is available at:
+        <https://www.postgresql.org/docs/current/libpq-connect.html>
+
+        Note that you should not specify your user name or password. That
+        information will be entered on the login screen. If you specify a
+        username or password, it will be removed by pgmanage before attempting to
+        connect to a database.
+      '';
+    };
+
+    allowCustomConnections = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        This tells pgmanage whether or not to allow anyone to use a custom
+        connection from the login screen.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        This tells pgmanage what port to listen on for browser requests.
+      '';
+    };
+
+    localOnly = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        This tells pgmanage whether or not to set the listening socket to local
+        addresses only.
+      '';
+    };
+
+    superOnly = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        This tells pgmanage whether or not to only allow super users to
+        login. The recommended value is true and will restrict users who are not
+        super users from logging in to any PostgreSQL instance through
+        pgmanage. Note that a connection will be made to PostgreSQL in order to
+        test if the user is a superuser.
+      '';
+    };
+
+    loginGroup = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        This tells pgmanage to only allow users in a certain PostgreSQL group to
+        login to pgmanage. Note that a connection will be made to PostgreSQL in
+        order to test if the user is a member of the login group.
+      '';
+    };
+
+    loginTimeout = mkOption {
+      type = types.int;
+      default = 3600;
+      description = lib.mdDoc ''
+        Number of seconds of inactivity before user is automatically logged
+        out.
+      '';
+    };
+
+    sqlRoot = mkOption {
+      type = types.str;
+      default = "/var/lib/pgmanage";
+      description = lib.mdDoc ''
+        This tells pgmanage where to put the SQL file history. All tabs are saved
+        to this location so that if you get disconnected from pgmanage you
+        don't lose your work.
+      '';
+    };
+
+    tls = mkOption {
+      type = types.nullOr (types.submodule {
+        options = {
+          cert = mkOption {
+            type = types.str;
+            description = lib.mdDoc "TLS certificate";
+          };
+          key = mkOption {
+            type = types.str;
+            description = lib.mdDoc "TLS key";
+          };
+        };
+      });
+      default = null;
+      description = lib.mdDoc ''
+        These options tell pgmanage where the TLS Certificate and Key files
+        reside. If you use these options then you'll only be able to access
+        pgmanage through a secure TLS connection. These options are only
+        necessary if you wish to connect directly to pgmanage using a secure TLS
+        connection. As an alternative, you can set up pgmanage in a reverse proxy
+        configuration. This allows your web server to terminate the secure
+        connection and pass on the request to pgmanage. You can find help to set
+        up this configuration in:
+        <https://github.com/pgManage/pgManage/blob/master/INSTALL_NGINX.md>
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["error" "warn" "notice" "info"];
+      default = "error";
+      description = lib.mdDoc ''
+        Verbosity of logs
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.pgmanage = {
+      description = "pgmanage - PostgreSQL Administration for the web";
+      wants    = [ "postgresql.service" ];
+      after    = [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User         = pgmanage;
+        Group        = pgmanage;
+        ExecStart    = "${cfg.package}/sbin/pgmanage -c ${confFile}" +
+                       optionalString cfg.localOnly " --local-only=true";
+      };
+    };
+    users = {
+      users.${pgmanage} = {
+        name  = pgmanage;
+        group = pgmanage;
+        home  = cfg.sqlRoot;
+        createHome = true;
+        isSystemUser = true;
+      };
+      groups.${pgmanage} = {
+        name = pgmanage;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/postgresql.md b/nixpkgs/nixos/modules/services/databases/postgresql.md
new file mode 100644
index 000000000000..e5e0b7efec29
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/postgresql.md
@@ -0,0 +1,329 @@
+# PostgreSQL {#module-postgresql}
+
+<!-- FIXME: render nicely -->
+<!-- FIXME: source can be added automatically -->
+
+*Source:* {file}`modules/services/databases/postgresql.nix`
+
+*Upstream documentation:* <https://www.postgresql.org/docs/>
+
+<!-- FIXME: more stuff, like maintainer? -->
+
+PostgreSQL is an advanced, free relational database.
+<!-- MORE -->
+
+## Configuring {#module-services-postgres-configuring}
+
+To enable PostgreSQL, add the following to your {file}`configuration.nix`:
+```
+services.postgresql.enable = true;
+services.postgresql.package = pkgs.postgresql_15;
+```
+Note that you are required to specify the desired version of PostgreSQL (e.g. `pkgs.postgresql_15`). Since upgrading your PostgreSQL version requires a database dump and reload (see below), NixOS cannot provide a default value for [](#opt-services.postgresql.package) such as the most recent release of PostgreSQL.
+
+<!--
+After running {command}`nixos-rebuild`, you can verify
+whether PostgreSQL works by running {command}`psql`:
+
+```ShellSession
+$ psql
+psql (9.2.9)
+Type "help" for help.
+
+alice=>
+```
+-->
+
+By default, PostgreSQL stores its databases in {file}`/var/lib/postgresql/$psqlSchema`. You can override this using [](#opt-services.postgresql.dataDir), e.g.
+```
+services.postgresql.dataDir = "/data/postgresql";
+```
+
+## Initializing {#module-services-postgres-initializing}
+
+As of NixOS 23.11,
+`services.postgresql.ensureUsers.*.ensurePermissions` has been
+deprecated, after a change to default permissions in PostgreSQL 15
+invalidated most of its previous use cases:
+
+- In psql < 15, `ALL PRIVILEGES` used to include `CREATE TABLE`, where
+  in psql >= 15 that would be a separate permission
+- psql >= 15 instead gives only the database owner create permissions
+- Even on psql < 15 (or databases migrated to >= 15), it is
+  recommended to manually assign permissions along these lines
+  - https://www.postgresql.org/docs/release/15.0/
+  - https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PRIV
+
+### Assigning ownership {#module-services-postgres-initializing-ownership}
+
+Usually, the database owner should be a database user of the same
+name. This can be done with
+`services.postgresql.ensureUsers.*.ensureDBOwnership = true;`.
+
+If the database user name equals the connecting system user name,
+postgres by default will accept a passwordless connection via unix
+domain socket. This makes it possible to run many postgres-backed
+services without creating any database secrets at all
+
+### Assigning extra permissions {#module-services-postgres-initializing-extra-permissions}
+
+For many cases, it will be enough to have the database user be the
+owner. Until `services.postgresql.ensureUsers.*.ensurePermissions` has
+been re-thought, if more users need access to the database, please use
+one of the following approaches:
+
+**WARNING:** `services.postgresql.initialScript` is not recommended
+for `ensurePermissions` replacement, as that is *only run on first
+start of PostgreSQL*.
+
+**NOTE:** all of these methods may be obsoleted, when `ensure*` is
+reworked, but it is expected that they will stay viable for running
+database migrations.
+
+**NOTE:** please make sure that any added migrations are idempotent (re-runnable).
+
+#### as superuser {#module-services-postgres-initializing-extra-permissions-superuser}
+
+**Advantage:** compatible with postgres < 15, because it's run
+as the database superuser `postgres`.
+
+##### in database `postStart` {#module-services-postgres-initializing-extra-permissions-superuser-post-start}
+
+**Disadvantage:** need to take care of ordering yourself. In this
+example, `mkAfter` ensures that permissions are assigned after any
+databases from `ensureDatabases` and `extraUser1` from `ensureUsers`
+are already created.
+
+```nix
+    systemd.services.postgresql.postStart = lib.mkAfter ''
+      $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-superuser-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "postgres";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL service1 -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL service1 -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
+#### as service user {#module-services-postgres-initializing-extra-permissions-service-user}
+
+**Advantage:** re-uses systemd's dependency ordering;
+
+**Disadvantage:** relies on service user having grant permission. To be combined with `ensureDBOwnership`.
+
+##### in service `preStart` {#module-services-postgres-initializing-extra-permissions-service-user-pre-start}
+
+```nix
+    environment.PSQL = "psql --port=${toString services.postgresql.port}";
+    path = [ postgresql ];
+    systemd.services."service1".preStart = ''
+      $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+      $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+      # ....
+    '';
+```
+
+##### in intermediate oneshot service {#module-services-postgres-initializing-extra-permissions-service-user-oneshot}
+
+```nix
+    systemd.services."migrate-service1-db1" = {
+      serviceConfig.Type = "oneshot";
+      requiredBy = "service1.service";
+      before = "service1.service";
+      after = "postgresql.service";
+      serviceConfig.User = "service1";
+      environment.PSQL = "psql --port=${toString services.postgresql.port}";
+      path = [ postgresql ];
+      script = ''
+        $PSQL -c 'GRANT SELECT ON ALL TABLES IN SCHEMA public TO "extraUser1"'
+        $PSQL -c 'GRANT SELECT ON ALL SEQUENCES IN SCHEMA public TO "extraUser1"'
+        # ....
+      '';
+    };
+```
+
+## Upgrading {#module-services-postgres-upgrading}
+
+::: {.note}
+The steps below demonstrate how to upgrade from an older version to `pkgs.postgresql_13`.
+These instructions are also applicable to other versions.
+:::
+
+Major PostgreSQL upgrades require a downtime and a few imperative steps to be called. This is the case because
+each major version has some internal changes in the databases' state during major releases. Because of that,
+NixOS places the state into {file}`/var/lib/postgresql/&lt;version&gt;` where each `version`
+can be obtained like this:
+```
+$ nix-instantiate --eval -A postgresql_13.psqlSchema
+"13"
+```
+For an upgrade, a script like this can be used to simplify the process:
+```
+{ config, pkgs, ... }:
+{
+  environment.systemPackages = [
+    (let
+      # XXX specify the postgresql package you'd like to upgrade to.
+      # Do not forget to list the extensions you need.
+      newPostgres = pkgs.postgresql_13.withPackages (pp: [
+        # pp.plv8
+      ]);
+    in pkgs.writeScriptBin "upgrade-pg-cluster" ''
+      set -eux
+      # XXX it's perhaps advisable to stop all services that depend on postgresql
+      systemctl stop postgresql
+
+      export NEWDATA="/var/lib/postgresql/${newPostgres.psqlSchema}"
+
+      export NEWBIN="${newPostgres}/bin"
+
+      export OLDDATA="${config.services.postgresql.dataDir}"
+      export OLDBIN="${config.services.postgresql.package}/bin"
+
+      install -d -m 0700 -o postgres -g postgres "$NEWDATA"
+      cd "$NEWDATA"
+      sudo -u postgres $NEWBIN/initdb -D "$NEWDATA"
+
+      sudo -u postgres $NEWBIN/pg_upgrade \
+        --old-datadir "$OLDDATA" --new-datadir "$NEWDATA" \
+        --old-bindir $OLDBIN --new-bindir $NEWBIN \
+        "$@"
+    '')
+  ];
+}
+```
+
+The upgrade process is:
+
+  1. Rebuild nixos configuration with the configuration above added to your {file}`configuration.nix`. Alternatively, add that into separate file and reference it in `imports` list.
+  2. Login as root (`sudo su -`)
+  3. Run `upgrade-pg-cluster`. It will stop old postgresql, initialize a new one and migrate the old one to the new one. You may supply arguments like `--jobs 4` and `--link` to speedup migration process. See <https://www.postgresql.org/docs/current/pgupgrade.html> for details.
+  4. Change postgresql package in NixOS configuration to the one you were upgrading to via [](#opt-services.postgresql.package). Rebuild NixOS. This should start new postgres using upgraded data directory and all services you stopped during the upgrade.
+  5. After the upgrade it's advisable to analyze the new cluster.
+
+       - For PostgreSQL ≥ 14, use the `vacuumdb` command printed by the upgrades script.
+       - For PostgreSQL < 14, run (as `su -l postgres` in the [](#opt-services.postgresql.dataDir), in this example {file}`/var/lib/postgresql/13`):
+
+         ```
+         $ ./analyze_new_cluster.sh
+         ```
+
+     ::: {.warning}
+     The next step removes the old state-directory!
+     :::
+
+     ```
+     $ ./delete_old_cluster.sh
+     ```
+
+## Options {#module-services-postgres-options}
+
+A complete list of options for the PostgreSQL module may be found [here](#opt-services.postgresql.enable).
+
+## Plugins {#module-services-postgres-plugins}
+
+Plugins collection for each PostgreSQL version can be accessed with `.pkgs`. For example, for `pkgs.postgresql_15` package, its plugin collection is accessed by `pkgs.postgresql_15.pkgs`:
+```ShellSession
+$ nix repl '<nixpkgs>'
+
+Loading '<nixpkgs>'...
+Added 10574 variables.
+
+nix-repl> postgresql_15.pkgs.<TAB><TAB>
+postgresql_15.pkgs.cstore_fdw        postgresql_15.pkgs.pg_repack
+postgresql_15.pkgs.pg_auto_failover  postgresql_15.pkgs.pg_safeupdate
+postgresql_15.pkgs.pg_bigm           postgresql_15.pkgs.pg_similarity
+postgresql_15.pkgs.pg_cron           postgresql_15.pkgs.pg_topn
+postgresql_15.pkgs.pg_hll            postgresql_15.pkgs.pgjwt
+postgresql_15.pkgs.pg_partman        postgresql_15.pkgs.pgroonga
+...
+```
+
+To add plugins via NixOS configuration, set `services.postgresql.extraPlugins`:
+```
+services.postgresql.package = pkgs.postgresql_12;
+services.postgresql.extraPlugins = with pkgs.postgresql_12.pkgs; [
+  pg_repack
+  postgis
+];
+```
+
+You can build custom PostgreSQL-with-plugins (to be used outside of NixOS) using function `.withPackages`. For example, creating a custom PostgreSQL package in an overlay can look like:
+```
+self: super: {
+  postgresql_custom = self.postgresql_12.withPackages (ps: [
+    ps.pg_repack
+    ps.postgis
+  ]);
+}
+```
+
+Here's a recipe on how to override a particular plugin through an overlay:
+```
+self: super: {
+  postgresql_15 = super.postgresql_15.override { this = self.postgresql_15; } // {
+    pkgs = super.postgresql_15.pkgs // {
+      pg_repack = super.postgresql_15.pkgs.pg_repack.overrideAttrs (_: {
+        name = "pg_repack-v20181024";
+        src = self.fetchzip {
+          url = "https://github.com/reorg/pg_repack/archive/923fa2f3c709a506e111cc963034bf2fd127aa00.tar.gz";
+          sha256 = "17k6hq9xaax87yz79j773qyigm4fwk8z4zh5cyp6z0sxnwfqxxw5";
+        };
+      });
+    };
+  };
+}
+```
+
+## JIT (Just-In-Time compilation) {#module-services-postgres-jit}
+
+[JIT](https://www.postgresql.org/docs/current/jit-reason.html)-support in the PostgreSQL package
+is disabled by default because of the ~300MiB closure-size increase from the LLVM dependency. It
+can be optionally enabled in PostgreSQL with the following config option:
+
+```nix
+{
+  services.postgresql.enableJIT = true;
+}
+```
+
+This makes sure that the [`jit`](https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-JIT)-setting
+is set to `on` and a PostgreSQL package with JIT enabled is used. Further tweaking of the JIT compiler, e.g. setting a different
+query cost threshold via [`jit_above_cost`](https://www.postgresql.org/docs/current/runtime-config-query.html#GUC-JIT-ABOVE-COST)
+can be done manually via [`services.postgresql.settings`](#opt-services.postgresql.settings).
+
+The attribute-names of JIT-enabled PostgreSQL packages are suffixed with `_jit`, i.e. for each `pkgs.postgresql`
+(and `pkgs.postgresql_<major>`) in `nixpkgs` there's also a `pkgs.postgresql_jit` (and `pkgs.postgresql_<major>_jit`).
+Alternatively, a JIT-enabled variant can be derived from a given `postgresql` package via `postgresql.withJIT`.
+This is also useful if it's not clear which attribute from `nixpkgs` was originally used (e.g. when working with
+[`config.services.postgresql.package`](#opt-services.postgresql.package) or if the package was modified via an
+overlay) since all modifications are propagated to `withJIT`. I.e.
+
+```nix
+with import <nixpkgs> {
+  overlays = [
+    (self: super: {
+      postgresql = super.postgresql.overrideAttrs (_: { pname = "foobar"; });
+    })
+  ];
+};
+postgresql.withJIT.pname
+```
+
+evaluates to `"foobar"`.
diff --git a/nixpkgs/nixos/modules/services/databases/postgresql.nix b/nixpkgs/nixos/modules/services/databases/postgresql.nix
new file mode 100644
index 000000000000..a9067d5974a9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/postgresql.nix
@@ -0,0 +1,650 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.postgresql;
+
+  postgresql =
+    let
+      # ensure that
+      #   services.postgresql = {
+      #     enableJIT = true;
+      #     package = pkgs.postgresql_<major>;
+      #   };
+      # works.
+      base = if cfg.enableJIT && !cfg.package.jitSupport then cfg.package.withJIT else cfg.package;
+    in
+    if cfg.extraPlugins == []
+      then base
+      else base.withPackages (_: cfg.extraPlugins);
+
+  toStr = value:
+    if true == value then "yes"
+    else if false == value then "no"
+    else if isString value then "'${lib.replaceStrings ["'"] ["''"] value}'"
+    else toString value;
+
+  # The main PostgreSQL configuration file.
+  configFile = pkgs.writeTextDir "postgresql.conf" (concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${toStr v}") cfg.settings));
+
+  configFileCheck = pkgs.runCommand "postgresql-configfile-check" {} ''
+    ${cfg.package}/bin/postgres -D${configFile} -C config_file >/dev/null
+    touch $out
+  '';
+
+  groupAccessAvailable = versionAtLeast postgresql.version "11.0";
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "postgresql" "extraConfig" ] "Use services.postgresql.settings instead.")
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.postgresql = {
+
+      enable = mkEnableOption (lib.mdDoc "PostgreSQL Server");
+
+      enableJIT = mkEnableOption (lib.mdDoc "JIT support");
+
+      package = mkOption {
+        type = types.package;
+        example = literalExpression "pkgs.postgresql_15";
+        description = lib.mdDoc ''
+          PostgreSQL package to use.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5432;
+        description = lib.mdDoc ''
+          The port on which PostgreSQL listens.
+        '';
+      };
+
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Check the syntax of the configuration file at compile time";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        defaultText = literalExpression ''"/var/lib/postgresql/''${config.services.postgresql.package.psqlSchema}"'';
+        example = "/var/lib/postgresql/15";
+        description = lib.mdDoc ''
+          The data directory for PostgreSQL. If left as the default value
+          this directory will automatically be created before the PostgreSQL server starts, otherwise
+          the sysadmin is responsible for ensuring the directory exists with appropriate ownership
+          and permissions.
+        '';
+      };
+
+      authentication = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Defines how users authenticate themselves to the server. See the
+          [PostgreSQL documentation for pg_hba.conf](https://www.postgresql.org/docs/current/auth-pg-hba-conf.html)
+          for details on the expected format of this option. By default,
+          peer based authentication will be used for users connecting
+          via the Unix socket, and md5 password authentication will be
+          used for users connecting via TCP. Any added rules will be
+          inserted above the default rules. If you'd like to replace the
+          default rules entirely, you can use `lib.mkForce` in your
+          module.
+        '';
+      };
+
+      identMap = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          map-name-0 system-username-0 database-username-0
+          map-name-1 system-username-1 database-username-1
+        '';
+        description = lib.mdDoc ''
+          Defines the mapping from system users to database users.
+
+          See the [auth doc](https://postgresql.org/docs/current/auth-username-maps.html).
+        '';
+      };
+
+      initdbArgs = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "--data-checksums" "--allow-group-access" ];
+        description = lib.mdDoc ''
+          Additional arguments passed to `initdb` during data dir
+          initialisation.
+        '';
+      };
+
+      initialScript = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = literalExpression ''
+          pkgs.writeText "init-sql-script" '''
+            alter user postgres with password 'myPassword';
+          ''';'';
+
+        description = lib.mdDoc ''
+          A file containing SQL statements to execute on first startup.
+        '';
+      };
+
+      ensureDatabases = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Ensures that the specified databases exist.
+          This option will never delete existing databases, especially not when the value of this
+          option is changed. This means that databases created once through this option or
+          otherwise have to be removed manually.
+        '';
+        example = [
+          "gitea"
+          "nextcloud"
+        ];
+      };
+
+      ensureUsers = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Name of the user to ensure.
+              '';
+            };
+
+            ensurePermissions = mkOption {
+              type = types.attrsOf types.str;
+              default = {};
+              visible = false; # This option has been deprecated.
+              description = lib.mdDoc ''
+                This option is DEPRECATED and should not be used in nixpkgs anymore,
+                use `ensureDBOwnership` instead. It can also break with newer
+                versions of PostgreSQL (≥ 15).
+
+                Permissions to ensure for the user, specified as an attribute set.
+                The attribute names specify the database and tables to grant the permissions for.
+                The attribute values specify the permissions to grant. You may specify one or
+                multiple comma-separated SQL privileges here.
+
+                For more information on how to specify the target
+                and on which privileges exist, see the
+                [GRANT syntax](https://www.postgresql.org/docs/current/sql-grant.html).
+                The attributes are used as `GRANT ''${attrValue} ON ''${attrName}`.
+              '';
+              example = literalExpression ''
+                {
+                  "DATABASE \"nextcloud\"" = "ALL PRIVILEGES";
+                  "ALL TABLES IN SCHEMA public" = "ALL PRIVILEGES";
+                }
+              '';
+            };
+
+            ensureDBOwnership = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Grants the user ownership to a database with the same name.
+                This database must be defined manually in
+                [](#opt-services.postgresql.ensureDatabases).
+              '';
+            };
+
+            ensureClauses = mkOption {
+              description = lib.mdDoc ''
+                An attrset of clauses to grant to the user. Under the hood this uses the
+                [ALTER USER syntax](https://www.postgresql.org/docs/current/sql-alteruser.html) for each attrName where
+                the attrValue is true in the attrSet:
+                `ALTER USER user.name WITH attrName`
+              '';
+              example = literalExpression ''
+                {
+                  superuser = true;
+                  createrole = true;
+                  createdb = true;
+                }
+              '';
+              default = {};
+              defaultText = lib.literalMD ''
+                The default, `null`, means that the user created will have the default permissions assigned by PostgreSQL. Subsequent server starts will not set or unset the clause, so imperative changes are preserved.
+              '';
+              type = types.submodule {
+                options = let
+                  defaultText = lib.literalMD ''
+                    `null`: do not set. For newly created roles, use PostgreSQL's default. For existing roles, do not touch this clause.
+                  '';
+                in {
+                  superuser = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, superuser permissions. From the postgres docs:
+
+                      A database superuser bypasses all permission checks,
+                      except the right to log in. This is a dangerous privilege
+                      and should not be used carelessly; it is best to do most
+                      of your work as a role that is not a superuser. To create
+                      a new database superuser, use CREATE ROLE name SUPERUSER.
+                      You must do this as a role that is already a superuser.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  createrole = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, createrole permissions. From the postgres docs:
+
+                      A role must be explicitly given permission to create more
+                      roles (except for superusers, since those bypass all
+                      permission checks). To create such a role, use CREATE
+                      ROLE name CREATEROLE. A role with CREATEROLE privilege
+                      can alter and drop other roles, too, as well as grant or
+                      revoke membership in them. However, to create, alter,
+                      drop, or change membership of a superuser role, superuser
+                      status is required; CREATEROLE is insufficient for that.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  createdb = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, createdb permissions. From the postgres docs:
+
+                      A role must be explicitly given permission to create
+                      databases (except for superusers, since those bypass all
+                      permission checks). To create such a role, use CREATE
+                      ROLE name CREATEDB.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  "inherit" = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user created inherit permissions. From the postgres docs:
+
+                      A role is given permission to inherit the privileges of
+                      roles it is a member of, by default. However, to create a
+                      role without the permission, use CREATE ROLE name
+                      NOINHERIT.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  login = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, login permissions. From the postgres docs:
+
+                      Only roles that have the LOGIN attribute can be used as
+                      the initial role name for a database connection. A role
+                      with the LOGIN attribute can be considered the same as a
+                      “database userâ€. To create a role with login privilege,
+                      use either:
+
+                      CREATE ROLE name LOGIN; CREATE USER name;
+
+                      (CREATE USER is equivalent to CREATE ROLE except that
+                      CREATE USER includes LOGIN by default, while CREATE ROLE
+                      does not.)
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  replication = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, replication permissions. From the postgres docs:
+
+                      A role must explicitly be given permission to initiate
+                      streaming replication (except for superusers, since those
+                      bypass all permission checks). A role used for streaming
+                      replication must have LOGIN permission as well. To create
+                      such a role, use CREATE ROLE name REPLICATION LOGIN.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                  bypassrls = mkOption {
+                    type = types.nullOr types.bool;
+                    description = lib.mdDoc ''
+                      Grants the user, created by the ensureUser attr, replication permissions. From the postgres docs:
+
+                      A role must be explicitly given permission to bypass
+                      every row-level security (RLS) policy (except for
+                      superusers, since those bypass all permission checks). To
+                      create such a role, use CREATE ROLE name BYPASSRLS as a
+                      superuser.
+
+                      More information on postgres roles can be found [here](https://www.postgresql.org/docs/current/role-attributes.html)
+                    '';
+                    default = null;
+                    inherit defaultText;
+                  };
+                };
+              };
+            };
+          };
+        });
+        default = [];
+        description = lib.mdDoc ''
+          Ensures that the specified users exist.
+          The PostgreSQL users will be identified using peer authentication. This authenticates the Unix user with the
+          same name only, and that without the need for a password.
+          This option will never delete existing users or remove DB ownership of databases
+          once granted with `ensureDBOwnership = true;`. This means that this must be
+          cleaned up manually when changing after changing the config in here.
+        '';
+        example = literalExpression ''
+          [
+            {
+              name = "nextcloud";
+            }
+            {
+              name = "superuser";
+              ensureDBOwnership = true;
+            }
+          ]
+        '';
+      };
+
+      enableTCPIP = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether PostgreSQL should listen on all network interfaces.
+          If disabled, the database can only be accessed via its Unix
+          domain socket or via TCP connections to localhost.
+        '';
+      };
+
+      logLinePrefix = mkOption {
+        type = types.str;
+        default = "[%p] ";
+        example = "%m [%p] ";
+        description = lib.mdDoc ''
+          A printf-style string that is output at the beginning of each log line.
+          Upstream default is `'%m [%p] '`, i.e. it includes the timestamp. We do
+          not include the timestamp, because journal has it anyway.
+        '';
+      };
+
+      extraPlugins = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = literalExpression "with pkgs.postgresql_15.pkgs; [ postgis pg_repack ]";
+        description = lib.mdDoc ''
+          List of PostgreSQL plugins. PostgreSQL version for each plugin should
+          match version for `services.postgresql.package` value.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ bool float int str ]);
+        default = {};
+        description = lib.mdDoc ''
+          PostgreSQL configuration. Refer to
+          <https://www.postgresql.org/docs/15/config-setting.html#CONFIG-SETTING-CONFIGURATION-FILE>
+          for an overview of `postgresql.conf`.
+
+          ::: {.note}
+          String values will automatically be enclosed in single quotes. Single quotes will be
+          escaped with two single quotes as described by the upstream documentation linked above.
+          :::
+        '';
+        example = literalExpression ''
+          {
+            log_connections = true;
+            log_statement = "all";
+            logging_collector = true;
+            log_disconnections = true;
+            log_destination = lib.mkForce "syslog";
+          }
+        '';
+      };
+
+      recoveryConfig = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Contents of the {file}`recovery.conf` file.
+        '';
+      };
+
+      superUser = mkOption {
+        type = types.str;
+        default = "postgres";
+        internal = true;
+        readOnly = true;
+        description = lib.mdDoc ''
+          PostgreSQL superuser account to use for various operations. Internal since changing
+          this value would lead to breakage while setting up databases.
+        '';
+        };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = map ({ name, ensureDBOwnership, ... }: {
+      assertion = ensureDBOwnership -> builtins.elem name cfg.ensureDatabases;
+      message = ''
+        For each database user defined with `services.postgresql.ensureUsers` and
+        `ensureDBOwnership = true;`, a database with the same name must be defined
+        in `services.postgresql.ensureDatabases`.
+
+        Offender: ${name} has not been found among databases.
+      '';
+    }) cfg.ensureUsers;
+    # `ensurePermissions` is now deprecated, let's avoid it.
+    warnings = lib.optional (any ({ ensurePermissions, ... }: ensurePermissions != {}) cfg.ensureUsers) "
+      `services.postgresql.*.ensurePermissions` is used in your expressions,
+      this option is known to be broken with newer PostgreSQL versions,
+      consider migrating to `services.postgresql.*.ensureDBOwnership` or
+      consult the release notes or manual for more migration guidelines.
+
+      This option will be removed in NixOS 24.05 unless it sees significant
+      maintenance improvements.
+    ";
+
+    services.postgresql.settings =
+      {
+        hba_file = "${pkgs.writeText "pg_hba.conf" cfg.authentication}";
+        ident_file = "${pkgs.writeText "pg_ident.conf" cfg.identMap}";
+        log_destination = "stderr";
+        log_line_prefix = cfg.logLinePrefix;
+        listen_addresses = if cfg.enableTCPIP then "*" else "localhost";
+        port = cfg.port;
+        jit = mkDefault (if cfg.enableJIT then "on" else "off");
+      };
+
+    services.postgresql.package = let
+        mkThrow = ver: throw "postgresql_${ver} was removed, please upgrade your postgresql version.";
+        base = if versionAtLeast config.system.stateVersion "23.11" then pkgs.postgresql_15
+            else if versionAtLeast config.system.stateVersion "22.05" then pkgs.postgresql_14
+            else if versionAtLeast config.system.stateVersion "21.11" then pkgs.postgresql_13
+            else if versionAtLeast config.system.stateVersion "20.03" then mkThrow "11"
+            else if versionAtLeast config.system.stateVersion "17.09" then mkThrow "9_6"
+            else mkThrow "9_5";
+    in
+      # Note: when changing the default, make it conditional on
+      # ‘system.stateVersion’ to maintain compatibility with existing
+      # systems!
+      mkDefault (if cfg.enableJIT then base.withJIT else base);
+
+    services.postgresql.dataDir = mkDefault "/var/lib/postgresql/${cfg.package.psqlSchema}";
+
+    services.postgresql.authentication = mkMerge [
+      (mkBefore "# Generated file; do not edit!")
+      (mkAfter
+      ''
+        # default value of services.postgresql.authentication
+        local all all              peer
+        host  all all 127.0.0.1/32 md5
+        host  all all ::1/128      md5
+      '')
+    ];
+
+    users.users.postgres =
+      { name = "postgres";
+        uid = config.ids.uids.postgres;
+        group = "postgres";
+        description = "PostgreSQL server user";
+        home = "${cfg.dataDir}";
+        useDefaultShell = true;
+      };
+
+    users.groups.postgres.gid = config.ids.gids.postgres;
+
+    environment.systemPackages = [ postgresql ];
+
+    environment.pathsToLink = [
+     "/share/postgresql"
+    ];
+
+    system.checks = lib.optional (cfg.checkConfig && pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) configFileCheck;
+
+    systemd.services.postgresql =
+      { description = "PostgreSQL Server";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        environment.PGDATA = cfg.dataDir;
+
+        path = [ postgresql ];
+
+        preStart =
+          ''
+            if ! test -e ${cfg.dataDir}/PG_VERSION; then
+              # Cleanup the data directory.
+              rm -f ${cfg.dataDir}/*.conf
+
+              # Initialise the database.
+              initdb -U ${cfg.superUser} ${concatStringsSep " " cfg.initdbArgs}
+
+              # See postStart!
+              touch "${cfg.dataDir}/.first_startup"
+            fi
+
+            ln -sfn "${configFile}/postgresql.conf" "${cfg.dataDir}/postgresql.conf"
+            ${optionalString (cfg.recoveryConfig != null) ''
+              ln -sfn "${pkgs.writeText "recovery.conf" cfg.recoveryConfig}" \
+                "${cfg.dataDir}/recovery.conf"
+            ''}
+          '';
+
+        # Wait for PostgreSQL to be ready to accept connections.
+        postStart =
+          ''
+            PSQL="psql --port=${toString cfg.port}"
+
+            while ! $PSQL -d postgres -c "" 2> /dev/null; do
+                if ! kill -0 "$MAINPID"; then exit 1; fi
+                sleep 0.1
+            done
+
+            if test -e "${cfg.dataDir}/.first_startup"; then
+              ${optionalString (cfg.initialScript != null) ''
+                $PSQL -f "${cfg.initialScript}" -d postgres
+              ''}
+              rm -f "${cfg.dataDir}/.first_startup"
+            fi
+          '' + optionalString (cfg.ensureDatabases != []) ''
+            ${concatMapStrings (database: ''
+              $PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${database}'" | grep -q 1 || $PSQL -tAc 'CREATE DATABASE "${database}"'
+            '') cfg.ensureDatabases}
+          '' + ''
+            ${
+              concatMapStrings
+              (user:
+              let
+                  userPermissions = concatStringsSep "\n"
+                    (mapAttrsToList
+                      (database: permission: ''$PSQL -tAc 'GRANT ${permission} ON ${database} TO "${user.name}"' '')
+                      user.ensurePermissions
+                    );
+                  dbOwnershipStmt = optionalString
+                    user.ensureDBOwnership
+                    ''$PSQL -tAc 'ALTER DATABASE "${user.name}" OWNER TO "${user.name}";' '';
+
+                  filteredClauses = filterAttrs (name: value: value != null) user.ensureClauses;
+
+                  clauseSqlStatements = attrValues (mapAttrs (n: v: if v then n else "no${n}") filteredClauses);
+
+                  userClauses = ''$PSQL -tAc 'ALTER ROLE "${user.name}" ${concatStringsSep " " clauseSqlStatements}' '';
+                in ''
+                  $PSQL -tAc "SELECT 1 FROM pg_roles WHERE rolname='${user.name}'" | grep -q 1 || $PSQL -tAc 'CREATE USER "${user.name}"'
+                  ${userPermissions}
+                  ${userClauses}
+
+                  ${dbOwnershipStmt}
+                ''
+              )
+              cfg.ensureUsers
+            }
+          '';
+
+        serviceConfig = mkMerge [
+          { ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+            User = "postgres";
+            Group = "postgres";
+            RuntimeDirectory = "postgresql";
+            Type = if versionAtLeast cfg.package.version "9.6"
+                   then "notify"
+                   else "simple";
+
+            # Shut down Postgres using SIGINT ("Fast Shutdown mode").  See
+            # https://www.postgresql.org/docs/current/server-shutdown.html
+            KillSignal = "SIGINT";
+            KillMode = "mixed";
+
+            # Give Postgres a decent amount of time to clean up after
+            # receiving systemd's SIGINT.
+            TimeoutSec = 120;
+
+            ExecStart = "${postgresql}/bin/postgres";
+          }
+          (mkIf (cfg.dataDir == "/var/lib/postgresql/${cfg.package.psqlSchema}") {
+            StateDirectory = "postgresql postgresql/${cfg.package.psqlSchema}";
+            StateDirectoryMode = if groupAccessAvailable then "0750" else "0700";
+          })
+        ];
+
+        unitConfig.RequiresMountsFor = "${cfg.dataDir}";
+      };
+
+  };
+
+  meta.doc = ./postgresql.md;
+  meta.maintainers = with lib.maintainers; [ thoughtpolice danbst ];
+}
diff --git a/nixpkgs/nixos/modules/services/databases/redis.nix b/nixpkgs/nixos/modules/services/databases/redis.nix
new file mode 100644
index 000000000000..315a0282cd73
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/redis.nix
@@ -0,0 +1,412 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.redis;
+
+  mkValueString = value:
+    if value == true then "yes"
+    else if value == false then "no"
+    else generators.mkValueStringDefault { } value;
+
+  redisConfig = settings: pkgs.writeText "redis.conf" (generators.toKeyValue {
+    listsAsDuplicateKeys = true;
+    mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } " ";
+  } settings);
+
+  redisName = name: "redis" + optionalString (name != "") ("-"+name);
+  enabledServers = filterAttrs (name: conf: conf.enable) config.services.redis.servers;
+
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "redis" "user" ] "The redis module now is hardcoded to the redis user.")
+    (mkRemovedOptionModule [ "services" "redis" "dbpath" ] "The redis module now uses /var/lib/redis as data directory.")
+    (mkRemovedOptionModule [ "services" "redis" "dbFilename" ] "The redis module now uses /var/lib/redis/dump.rdb as database dump location.")
+    (mkRemovedOptionModule [ "services" "redis" "appendOnlyFilename" ] "This option was never used.")
+    (mkRemovedOptionModule [ "services" "redis" "pidFile" ] "This option was removed.")
+    (mkRemovedOptionModule [ "services" "redis" "extraConfig" ] "Use services.redis.servers.*.settings instead.")
+    (mkRenamedOptionModule [ "services" "redis" "enable"] [ "services" "redis" "servers" "" "enable" ])
+    (mkRenamedOptionModule [ "services" "redis" "port"] [ "services" "redis" "servers" "" "port" ])
+    (mkRenamedOptionModule [ "services" "redis" "openFirewall"] [ "services" "redis" "servers" "" "openFirewall" ])
+    (mkRenamedOptionModule [ "services" "redis" "bind"] [ "services" "redis" "servers" "" "bind" ])
+    (mkRenamedOptionModule [ "services" "redis" "unixSocket"] [ "services" "redis" "servers" "" "unixSocket" ])
+    (mkRenamedOptionModule [ "services" "redis" "unixSocketPerm"] [ "services" "redis" "servers" "" "unixSocketPerm" ])
+    (mkRenamedOptionModule [ "services" "redis" "logLevel"] [ "services" "redis" "servers" "" "logLevel" ])
+    (mkRenamedOptionModule [ "services" "redis" "logfile"] [ "services" "redis" "servers" "" "logfile" ])
+    (mkRenamedOptionModule [ "services" "redis" "syslog"] [ "services" "redis" "servers" "" "syslog" ])
+    (mkRenamedOptionModule [ "services" "redis" "databases"] [ "services" "redis" "servers" "" "databases" ])
+    (mkRenamedOptionModule [ "services" "redis" "maxclients"] [ "services" "redis" "servers" "" "maxclients" ])
+    (mkRenamedOptionModule [ "services" "redis" "save"] [ "services" "redis" "servers" "" "save" ])
+    (mkRenamedOptionModule [ "services" "redis" "slaveOf"] [ "services" "redis" "servers" "" "slaveOf" ])
+    (mkRenamedOptionModule [ "services" "redis" "masterAuth"] [ "services" "redis" "servers" "" "masterAuth" ])
+    (mkRenamedOptionModule [ "services" "redis" "requirePass"] [ "services" "redis" "servers" "" "requirePass" ])
+    (mkRenamedOptionModule [ "services" "redis" "requirePassFile"] [ "services" "redis" "servers" "" "requirePassFile" ])
+    (mkRenamedOptionModule [ "services" "redis" "appendOnly"] [ "services" "redis" "servers" "" "appendOnly" ])
+    (mkRenamedOptionModule [ "services" "redis" "appendFsync"] [ "services" "redis" "servers" "" "appendFsync" ])
+    (mkRenamedOptionModule [ "services" "redis" "slowLogLogSlowerThan"] [ "services" "redis" "servers" "" "slowLogLogSlowerThan" ])
+    (mkRenamedOptionModule [ "services" "redis" "slowLogMaxLen"] [ "services" "redis" "servers" "" "slowLogMaxLen" ])
+    (mkRenamedOptionModule [ "services" "redis" "settings"] [ "services" "redis" "servers" "" "settings" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.redis = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.redis;
+        defaultText = literalExpression "pkgs.redis";
+        description = lib.mdDoc "Which Redis derivation to use.";
+      };
+
+      vmOverCommit = mkEnableOption (lib.mdDoc ''
+        setting of vm.overcommit_memory to 1
+        (Suggested for Background Saving: <https://redis.io/docs/get-started/faq/>)
+      '');
+
+      servers = mkOption {
+        type = with types; attrsOf (submodule ({ config, name, ... }: {
+          options = {
+            enable = mkEnableOption (lib.mdDoc ''
+              Redis server.
+
+              Note that the NixOS module for Redis disables kernel support
+              for Transparent Huge Pages (THP),
+              because this features causes major performance problems for Redis,
+              e.g. (https://redis.io/topics/latency)
+            '');
+
+            user = mkOption {
+              type = types.str;
+              default = redisName name;
+              defaultText = literalExpression ''
+                if name == "" then "redis" else "redis-''${name}"
+              '';
+              description = lib.mdDoc "The username and groupname for redis-server.";
+            };
+
+            port = mkOption {
+              type = types.port;
+              default = if name == "" then 6379 else 0;
+              defaultText = literalExpression ''if name == "" then 6379 else 0'';
+              description = lib.mdDoc ''
+                The TCP port to accept connections.
+                If port 0 is specified Redis will not listen on a TCP socket.
+              '';
+            };
+
+            openFirewall = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether to open ports in the firewall for the server.
+              '';
+            };
+
+            extraParams = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = lib.mdDoc "Extra parameters to append to redis-server invocation";
+              example = [ "--sentinel" ];
+            };
+
+            bind = mkOption {
+              type = with types; nullOr str;
+              default = "127.0.0.1";
+              description = lib.mdDoc ''
+                The IP interface to bind to.
+                `null` means "all interfaces".
+              '';
+              example = "192.0.2.1";
+            };
+
+            unixSocket = mkOption {
+              type = with types; nullOr path;
+              default = "/run/${redisName name}/redis.sock";
+              defaultText = literalExpression ''
+                if name == "" then "/run/redis/redis.sock" else "/run/redis-''${name}/redis.sock"
+              '';
+              description = lib.mdDoc "The path to the socket to bind to.";
+            };
+
+            unixSocketPerm = mkOption {
+              type = types.int;
+              default = 660;
+              description = lib.mdDoc "Change permissions for the socket";
+              example = 600;
+            };
+
+            logLevel = mkOption {
+              type = types.str;
+              default = "notice"; # debug, verbose, notice, warning
+              example = "debug";
+              description = lib.mdDoc "Specify the server verbosity level, options: debug, verbose, notice, warning.";
+            };
+
+            logfile = mkOption {
+              type = types.str;
+              default = "/dev/null";
+              description = lib.mdDoc "Specify the log file name. Also 'stdout' can be used to force Redis to log on the standard output.";
+              example = "/var/log/redis.log";
+            };
+
+            syslog = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc "Enable logging to the system logger.";
+            };
+
+            databases = mkOption {
+              type = types.int;
+              default = 16;
+              description = lib.mdDoc "Set the number of databases.";
+            };
+
+            maxclients = mkOption {
+              type = types.int;
+              default = 10000;
+              description = lib.mdDoc "Set the max number of connected clients at the same time.";
+            };
+
+            save = mkOption {
+              type = with types; listOf (listOf int);
+              default = [ [900 1] [300 10] [60 10000] ];
+              description = mdDoc ''
+                The schedule in which data is persisted to disk, represented as a list of lists where the first element represent the amount of seconds and the second the number of changes.
+
+                If set to the empty list (`[]`) then RDB persistence will be disabled (useful if you are using AOF or don't want any persistence).
+              '';
+            };
+
+            slaveOf = mkOption {
+              type = with types; nullOr (submodule ({ ... }: {
+                options = {
+                  ip = mkOption {
+                    type = str;
+                    description = lib.mdDoc "IP of the Redis master";
+                    example = "192.168.1.100";
+                  };
+
+                  port = mkOption {
+                    type = port;
+                    description = lib.mdDoc "port of the Redis master";
+                    default = 6379;
+                  };
+                };
+              }));
+
+              default = null;
+              description = lib.mdDoc "IP and port to which this redis instance acts as a slave.";
+              example = { ip = "192.168.1.100"; port = 6379; };
+            };
+
+            masterAuth = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''If the master is password protected (using the requirePass configuration)
+              it is possible to tell the slave to authenticate before starting the replication synchronization
+              process, otherwise the master will refuse the slave request.
+              (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE)'';
+            };
+
+            requirePass = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                Password for database (STORED PLAIN TEXT, WORLD-READABLE IN NIX STORE).
+                Use requirePassFile to store it outside of the nix store in a dedicated file.
+              '';
+              example = "letmein!";
+            };
+
+            requirePassFile = mkOption {
+              type = with types; nullOr path;
+              default = null;
+              description = lib.mdDoc "File with password for the database.";
+              example = "/run/keys/redis-password";
+            };
+
+            appendOnly = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "By default data is only periodically persisted to disk, enable this option to use an append-only file for improved persistence.";
+            };
+
+            appendFsync = mkOption {
+              type = types.str;
+              default = "everysec"; # no, always, everysec
+              description = lib.mdDoc "How often to fsync the append-only log, options: no, always, everysec.";
+            };
+
+            slowLogLogSlowerThan = mkOption {
+              type = types.int;
+              default = 10000;
+              description = lib.mdDoc "Log queries whose execution take longer than X in milliseconds.";
+              example = 1000;
+            };
+
+            slowLogMaxLen = mkOption {
+              type = types.int;
+              default = 128;
+              description = lib.mdDoc "Maximum number of items to keep in slow log.";
+            };
+
+            settings = mkOption {
+              # TODO: this should be converted to freeformType
+              type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
+              default = {};
+              description = lib.mdDoc ''
+                Redis configuration. Refer to
+                <https://redis.io/topics/config>
+                for details on supported values.
+              '';
+              example = literalExpression ''
+                {
+                  loadmodule = [ "/path/to/my_module.so" "/path/to/other_module.so" ];
+                }
+              '';
+            };
+          };
+          config.settings = mkMerge [
+            {
+              inherit (config) port logfile databases maxclients appendOnly;
+              daemonize = false;
+              supervised = "systemd";
+              loglevel = config.logLevel;
+              syslog-enabled = config.syslog;
+              save = if config.save == []
+                then ''""'' # Disable saving with `save = ""`
+                else map
+                  (d: "${toString (builtins.elemAt d 0)} ${toString (builtins.elemAt d 1)}")
+                  config.save;
+              dbfilename = "dump.rdb";
+              dir = "/var/lib/${redisName name}";
+              appendfsync = config.appendFsync;
+              slowlog-log-slower-than = config.slowLogLogSlowerThan;
+              slowlog-max-len = config.slowLogMaxLen;
+            }
+            (mkIf (config.bind != null) { inherit (config) bind; })
+            (mkIf (config.unixSocket != null) {
+              unixsocket = config.unixSocket;
+              unixsocketperm = toString config.unixSocketPerm;
+            })
+            (mkIf (config.slaveOf != null) { slaveof = "${config.slaveOf.ip} ${toString config.slaveOf.port}"; })
+            (mkIf (config.masterAuth != null) { masterauth = config.masterAuth; })
+            (mkIf (config.requirePass != null) { requirepass = config.requirePass; })
+          ];
+        }));
+        description = lib.mdDoc "Configuration of multiple `redis-server` instances.";
+        default = {};
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (enabledServers != {}) {
+
+    assertions = attrValues (mapAttrs (name: conf: {
+      assertion = conf.requirePass != null -> conf.requirePassFile == null;
+      message = ''
+        You can only set one services.redis.servers.${name}.requirePass
+        or services.redis.servers.${name}.requirePassFile
+      '';
+    }) enabledServers);
+
+    boot.kernel.sysctl = mkMerge [
+      { "vm.nr_hugepages" = "0"; }
+      ( mkIf cfg.vmOverCommit { "vm.overcommit_memory" = "1"; } )
+    ];
+
+    networking.firewall.allowedTCPPorts = concatMap (conf:
+      optional conf.openFirewall conf.port
+    ) (attrValues enabledServers);
+
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = mapAttrs' (name: conf: nameValuePair (redisName name) {
+      description = "System user for the redis-server instance ${name}";
+      isSystemUser = true;
+      group = redisName name;
+    }) enabledServers;
+    users.groups = mapAttrs' (name: conf: nameValuePair (redisName name) {
+    }) enabledServers;
+
+    systemd.services = mapAttrs' (name: conf: nameValuePair (redisName name) {
+      description = "Redis Server - ${redisName name}";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/redis-server /var/lib/${redisName name}/redis.conf ${escapeShellArgs conf.extraParams}";
+        ExecStartPre = "+"+pkgs.writeShellScript "${redisName name}-prep-conf" (let
+          redisConfVar = "/var/lib/${redisName name}/redis.conf";
+          redisConfRun = "/run/${redisName name}/nixos.conf";
+          redisConfStore = redisConfig conf.settings;
+        in ''
+          touch "${redisConfVar}" "${redisConfRun}"
+          chown '${conf.user}' "${redisConfVar}" "${redisConfRun}"
+          chmod 0600 "${redisConfVar}" "${redisConfRun}"
+          if [ ! -s ${redisConfVar} ]; then
+            echo 'include "${redisConfRun}"' > "${redisConfVar}"
+          fi
+          echo 'include "${redisConfStore}"' > "${redisConfRun}"
+          ${optionalString (conf.requirePassFile != null) ''
+            {
+              echo -n "requirepass "
+              cat ${escapeShellArg conf.requirePassFile}
+            } >> "${redisConfRun}"
+          ''}
+        '');
+        Type = "notify";
+        # User and group
+        User = conf.user;
+        Group = conf.user;
+        # Runtime directory and mode
+        RuntimeDirectory = redisName name;
+        RuntimeDirectoryMode = "0750";
+        # State directory and mode
+        StateDirectory = redisName name;
+        StateDirectoryMode = "0700";
+        # Access write directories
+        UMask = "0077";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Process Properties
+        LimitNOFILE = mkDefault "${toString (conf.maxclients + 32)}";
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies =
+          optionals (conf.port != 0) ["AF_INET" "AF_INET6"] ++
+          optional (conf.unixSocket != null) "AF_UNIX";
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@cpu-emulation @debug @keyring @memlock @mount @obsolete @privileged @resources @setuid";
+      };
+    }) enabledServers;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/rethinkdb.nix b/nixpkgs/nixos/modules/services/databases/rethinkdb.nix
new file mode 100644
index 000000000000..f5391b48e89c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/rethinkdb.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rethinkdb;
+  rethinkdb = cfg.package;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.rethinkdb = {
+
+      enable = mkEnableOption (lib.mdDoc "RethinkDB server");
+
+      #package = mkOption {
+      #  default = pkgs.rethinkdb;
+      #  description = "Which RethinkDB derivation to use.";
+      #};
+
+      user = mkOption {
+        default = "rethinkdb";
+        description = lib.mdDoc "User account under which RethinkDB runs.";
+      };
+
+      group = mkOption {
+        default = "rethinkdb";
+        description = lib.mdDoc "Group which rethinkdb user belongs to.";
+      };
+
+      dbpath = mkOption {
+        default = "/var/db/rethinkdb";
+        description = lib.mdDoc "Location where RethinkDB stores its data, 1 data directory per instance.";
+      };
+
+      pidpath = mkOption {
+        default = "/run/rethinkdb";
+        description = lib.mdDoc "Location where each instance's pid file is located.";
+      };
+
+      #cfgpath = mkOption {
+      #  default = "/etc/rethinkdb/instances.d";
+      #  description = "Location where RethinkDB stores it config files, 1 config file per instance.";
+      #};
+
+      # TODO: currently not used by our implementation.
+      #instances = mkOption {
+      #  type = types.attrsOf types.str;
+      #  default = {};
+      #  description = "List of named RethinkDB instances in our cluster.";
+      #};
+
+    };
+
+  };
+
+  ###### implementation
+  config = mkIf config.services.rethinkdb.enable {
+
+    environment.systemPackages = [ rethinkdb ];
+
+    systemd.services.rethinkdb = {
+      description = "RethinkDB server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        # TODO: abstract away 'default', which is a per-instance directory name
+        #       allowing end user of this nix module to provide multiple instances,
+        #       and associated directory per instance
+        ExecStart = "${rethinkdb}/bin/rethinkdb -d ${cfg.dbpath}/default";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = cfg.user;
+        Group = cfg.group;
+        PIDFile = "${cfg.pidpath}/default.pid";
+        PermissionsStartOnly = true;
+      };
+
+      preStart = ''
+        if ! test -e ${cfg.dbpath}; then
+            install -d -m0755 -o ${cfg.user} -g ${cfg.group} ${cfg.dbpath}
+            install -d -m0755 -o ${cfg.user} -g ${cfg.group} ${cfg.dbpath}/default
+            chown -R ${cfg.user}:${cfg.group} ${cfg.dbpath}
+        fi
+        if ! test -e "${cfg.pidpath}/default.pid"; then
+            install -D -o ${cfg.user} -g ${cfg.group} /dev/null "${cfg.pidpath}/default.pid"
+        fi
+      '';
+    };
+
+    users.users.rethinkdb = mkIf (cfg.user == "rethinkdb")
+      { name = "rethinkdb";
+        description = "RethinkDB server user";
+        isSystemUser = true;
+      };
+
+    users.groups = optionalAttrs (cfg.group == "rethinkdb") (singleton
+      { name = "rethinkdb";
+      });
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/databases/surrealdb.nix b/nixpkgs/nixos/modules/services/databases/surrealdb.nix
new file mode 100644
index 000000000000..e1a1faed1f8f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/surrealdb.nix
@@ -0,0 +1,98 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.surrealdb;
+in {
+
+  options = {
+    services.surrealdb = {
+      enable = mkEnableOption (lib.mdDoc "SurrealDB, a scalable, distributed, collaborative, document-graph database, for the realtime web");
+
+      package = mkOption {
+        default = pkgs.surrealdb;
+        defaultText = literalExpression "pkgs.surrealdb";
+        type = types.package;
+        description = lib.mdDoc ''
+          Which surrealdb derivation to use.
+        '';
+      };
+
+      dbPath = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The path that surrealdb will write data to. Use null for in-memory.
+          Can be one of "memory", "file://:path", "tikv://:addr".
+        '';
+        default = "file:///var/lib/surrealdb/";
+        example = "memory";
+      };
+
+      host = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The host that surrealdb will connect to.
+        '';
+        default = "127.0.0.1";
+        example = "127.0.0.1";
+      };
+
+      port = mkOption {
+        type = types.port;
+        description = lib.mdDoc ''
+          The port that surrealdb will connect to.
+        '';
+        default = 8000;
+        example = 8000;
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--allow-all" "--auth" "--user root" "--pass root" ];
+        description = lib.mdDoc ''
+          Specify a list of additional command line flags,
+          which get escaped and are then passed to surrealdb.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    # Used to connect to the running service
+    environment.systemPackages = [ cfg.package ] ;
+
+    systemd.services.surrealdb = {
+      description = "A scalable, distributed, collaborative, document-graph database, for the realtime web ";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/surreal start --bind ${cfg.host}:${toString cfg.port} ${escapeShellArgs cfg.extraFlags} -- ${cfg.dbPath}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        StateDirectory = "surrealdb";
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/databases/victoriametrics.nix b/nixpkgs/nixos/modules/services/databases/victoriametrics.nix
new file mode 100644
index 000000000000..638066a42dbd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/databases/victoriametrics.nix
@@ -0,0 +1,78 @@
+{ config, pkgs, lib, ... }:
+let cfg = config.services.victoriametrics; in
+{
+  options.services.victoriametrics = with lib; {
+    enable = mkEnableOption (lib.mdDoc "victoriametrics");
+    package = mkOption {
+      type = types.package;
+      default = pkgs.victoriametrics;
+      defaultText = literalExpression "pkgs.victoriametrics";
+      description = lib.mdDoc ''
+        The VictoriaMetrics distribution to use.
+      '';
+    };
+    listenAddress = mkOption {
+      default = ":8428";
+      type = types.str;
+      description = lib.mdDoc ''
+        The listen address for the http interface.
+      '';
+    };
+    retentionPeriod = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        Retention period in months.
+      '';
+    };
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra options to pass to VictoriaMetrics. See the README:
+        <https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/README.md>
+        or {command}`victoriametrics -help` for more
+        information.
+      '';
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    systemd.services.victoriametrics = {
+      description = "VictoriaMetrics time series database";
+      after = [ "network.target" ];
+      startLimitBurst = 5;
+      serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = 1;
+        StateDirectory = "victoriametrics";
+        DynamicUser = true;
+        ExecStart = ''
+          ${cfg.package}/bin/victoria-metrics \
+              -storageDataPath=/var/lib/victoriametrics \
+              -httpListenAddr ${cfg.listenAddress} \
+              -retentionPeriod ${toString cfg.retentionPeriod} \
+              ${lib.escapeShellArgs cfg.extraOptions}
+        '';
+        # victoriametrics 1.59 with ~7GB of data seems to eventually panic when merging files and then
+        # begins restart-looping forever. Set LimitNOFILE= to a large number to work around this issue.
+        #
+        # panic: FATAL: unrecoverable error when merging small parts in the partition "/var/lib/victoriametrics/data/small/2021_08":
+        # cannot open source part for merging: cannot open values file in stream mode:
+        # cannot open file "/var/lib/victoriametrics/data/small/2021_08/[...]/values.bin":
+        # open /var/lib/victoriametrics/data/small/2021_08/[...]/values.bin: too many open files
+        LimitNOFILE = 1048576;
+      };
+      wantedBy = [ "multi-user.target" ];
+
+      postStart =
+        let
+          bindAddr = (lib.optionalString (lib.hasPrefix ":" cfg.listenAddress) "127.0.0.1") + cfg.listenAddress;
+        in
+        lib.mkBefore ''
+          until ${lib.getBin pkgs.curl}/bin/curl -s -o /dev/null http://${bindAddr}/ping; do
+            sleep 1;
+          done
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/accountsservice.nix b/nixpkgs/nixos/modules/services/desktops/accountsservice.nix
new file mode 100644
index 000000000000..af62850acdc1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/accountsservice.nix
@@ -0,0 +1,58 @@
+# AccountsService daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.accounts-daemon = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable AccountsService, a DBus service for accessing
+          the list of user accounts and information attached to those accounts.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.accounts-daemon.enable {
+
+    environment.systemPackages = [ pkgs.accountsservice ];
+
+    # Accounts daemon looks for dbus interfaces in $XDG_DATA_DIRS/accountsservice
+    environment.pathsToLink = [ "/share/accountsservice" ];
+
+    services.dbus.packages = [ pkgs.accountsservice ];
+
+    systemd.packages = [ pkgs.accountsservice ];
+
+    systemd.services.accounts-daemon = recursiveUpdate {
+
+      wantedBy = [ "graphical.target" ];
+
+      # Accounts daemon looks for dbus interfaces in $XDG_DATA_DIRS/accountsservice
+      environment.XDG_DATA_DIRS = "${config.system.path}/share";
+
+    } (optionalAttrs (!config.users.mutableUsers) {
+      environment.NIXOS_USERS_PURE = "true";
+    });
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/bamf.nix b/nixpkgs/nixos/modules/services/desktops/bamf.nix
new file mode 100644
index 000000000000..3e40a7055348
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/bamf.nix
@@ -0,0 +1,27 @@
+# Bamf
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+
+  ###### interface
+
+  options = {
+    services.bamf = {
+      enable = mkEnableOption (lib.mdDoc "bamf");
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.bamf.enable {
+    services.dbus.packages = [ pkgs.bamf ];
+
+    systemd.packages = [ pkgs.bamf ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/blueman.nix b/nixpkgs/nixos/modules/services/desktops/blueman.nix
new file mode 100644
index 000000000000..fad2f21bce5b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/blueman.nix
@@ -0,0 +1,25 @@
+# blueman service
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.blueman;
+in {
+  ###### interface
+  options = {
+    services.blueman = {
+      enable = mkEnableOption (lib.mdDoc "blueman");
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.blueman ];
+
+    services.dbus.packages = [ pkgs.blueman ];
+
+    systemd.packages = [ pkgs.blueman ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/cpupower-gui.nix b/nixpkgs/nixos/modules/services/desktops/cpupower-gui.nix
new file mode 100644
index 000000000000..47071aebce8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/cpupower-gui.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cpupower-gui;
+in {
+  options = {
+    services.cpupower-gui = {
+      enable = mkOption {
+        type = lib.types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Enables dbus/systemd service needed by cpupower-gui.
+          These services are responsible for retrieving and modifying cpu power
+          saving settings.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.cpupower-gui ];
+    services.dbus.packages = [ pkgs.cpupower-gui ];
+    systemd.user = {
+      services.cpupower-gui-user = {
+        description = "Apply cpupower-gui config at user login";
+        wantedBy = [ "graphical-session.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkgs.cpupower-gui}/bin/cpupower-gui config";
+        };
+      };
+    };
+    systemd.services = {
+      cpupower-gui = {
+        description = "Apply cpupower-gui config at boot";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkgs.cpupower-gui}/bin/cpupower-gui config";
+        };
+      };
+      cpupower-gui-helper = {
+        description = "cpupower-gui system helper";
+        aliases = [ "dbus-org.rnd2.cpupower_gui.helper.service" ];
+        serviceConfig = {
+          Type = "dbus";
+          BusName = "org.rnd2.cpupower_gui.helper";
+          ExecStart = "${pkgs.cpupower-gui}/lib/cpupower-gui/cpupower-gui-helper";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/deepin/app-services.nix b/nixpkgs/nixos/modules/services/desktops/deepin/app-services.nix
new file mode 100644
index 000000000000..4592bc7bb340
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/deepin/app-services.nix
@@ -0,0 +1,36 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.deepin.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.deepin.app-services = {
+
+      enable = mkEnableOption (lib.mdDoc "service collection of DDE applications, including dconfig-center");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.deepin.app-services.enable {
+
+    environment.systemPackages = [ pkgs.deepin.dde-app-services ];
+
+    services.dbus.packages = [ pkgs.deepin.dde-app-services ];
+
+    environment.pathsToLink = [ "/share/dsg" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/deepin/dde-api.nix b/nixpkgs/nixos/modules/services/desktops/deepin/dde-api.nix
new file mode 100644
index 000000000000..459876febf21
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/deepin/dde-api.nix
@@ -0,0 +1,50 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.deepin.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.deepin.dde-api = {
+
+      enable = mkEnableOption (lib.mdDoc ''
+        some dbus interfaces that is used for screen zone detecting,
+        thumbnail generating, and sound playing in Deepin Desktop Environment
+      '');
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.deepin.dde-api.enable {
+
+     environment.systemPackages = [ pkgs.deepin.dde-api ];
+
+     services.dbus.packages = [ pkgs.deepin.dde-api ];
+
+     systemd.packages = [ pkgs.deepin.dde-api ];
+
+     environment.pathsToLink = [ "/lib/deepin-api" ];
+
+     users.groups.deepin-sound-player = { };
+     users.users.deepin-sound-player = {
+       description = "Deepin sound player";
+       home = "/var/lib/deepin-sound-player";
+       createHome = true;
+       group = "deepin-sound-player";
+       isSystemUser = true;
+     };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix b/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix
new file mode 100644
index 000000000000..356d323bcbdf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/deepin/dde-daemon.nix
@@ -0,0 +1,40 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.deepin.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.deepin.dde-daemon = {
+
+      enable = mkEnableOption (lib.mdDoc "daemon for handling the deepin session settings");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.deepin.dde-daemon.enable {
+
+    environment.systemPackages = [ pkgs.deepin.dde-daemon ];
+
+    services.dbus.packages = [ pkgs.deepin.dde-daemon ];
+
+    services.udev.packages = [ pkgs.deepin.dde-daemon ];
+
+    systemd.packages = [ pkgs.deepin.dde-daemon ];
+
+    environment.pathsToLink = [ "/lib/deepin-daemon" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/dleyna-renderer.nix b/nixpkgs/nixos/modules/services/desktops/dleyna-renderer.nix
new file mode 100644
index 000000000000..daf65180b36f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/dleyna-renderer.nix
@@ -0,0 +1,28 @@
+# dleyna-renderer service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+  options = {
+    services.dleyna-renderer = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable dleyna-renderer service, a DBus service
+          for handling DLNA renderers.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.dleyna-renderer.enable {
+    environment.systemPackages = [ pkgs.dleyna-renderer ];
+
+    services.dbus.packages = [ pkgs.dleyna-renderer ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/dleyna-server.nix b/nixpkgs/nixos/modules/services/desktops/dleyna-server.nix
new file mode 100644
index 000000000000..9cbcd2a9cdae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/dleyna-server.nix
@@ -0,0 +1,28 @@
+# dleyna-server service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+  options = {
+    services.dleyna-server = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable dleyna-server service, a DBus service
+          for handling DLNA servers.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.dleyna-server.enable {
+    environment.systemPackages = [ pkgs.dleyna-server ];
+
+    services.dbus.packages = [ pkgs.dleyna-server ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/espanso.nix b/nixpkgs/nixos/modules/services/desktops/espanso.nix
new file mode 100644
index 000000000000..cbc48034795e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/espanso.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let cfg = config.services.espanso;
+in {
+  meta = { maintainers = with lib.maintainers; [ numkem ]; };
+
+  options = {
+    services.espanso = { enable = options.mkEnableOption (lib.mdDoc "Espanso"); };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.espanso = {
+      description = "Espanso daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.espanso}/bin/espanso daemon";
+        Restart = "on-failure";
+      };
+      wantedBy = [ "default.target" ];
+    };
+
+    environment.systemPackages = [ pkgs.espanso ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.md b/nixpkgs/nixos/modules/services/desktops/flatpak.md
new file mode 100644
index 000000000000..65b1554d79b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.md
@@ -0,0 +1,39 @@
+# Flatpak {#module-services-flatpak}
+
+*Source:* {file}`modules/services/desktop/flatpak.nix`
+
+*Upstream documentation:* <https://github.com/flatpak/flatpak/wiki>
+
+Flatpak is a system for building, distributing, and running sandboxed desktop
+applications on Linux.
+
+To enable Flatpak, add the following to your {file}`configuration.nix`:
+```
+  services.flatpak.enable = true;
+```
+
+For the sandboxed apps to work correctly, desktop integration portals need to
+be installed. If you run GNOME, this will be handled automatically for you;
+in other cases, you will need to add something like the following to your
+{file}`configuration.nix`:
+```
+  xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
+```
+
+Then, you will need to add a repository, for example,
+[Flathub](https://github.com/flatpak/flatpak/wiki),
+either using the following commands:
+```ShellSession
+$ flatpak remote-add --if-not-exists flathub https://flathub.org/repo/flathub.flatpakrepo
+$ flatpak update
+```
+or by opening the
+[repository file](https://flathub.org/repo/flathub.flatpakrepo) in GNOME Software.
+
+Finally, you can search and install programs:
+```ShellSession
+$ flatpak search bustle
+$ flatpak install flathub org.freedesktop.Bustle
+$ flatpak run org.freedesktop.Bustle
+```
+Again, GNOME Software offers graphical interface for these tasks.
diff --git a/nixpkgs/nixos/modules/services/desktops/flatpak.nix b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
new file mode 100644
index 000000000000..d99faf381e01
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/flatpak.nix
@@ -0,0 +1,56 @@
+# flatpak service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.flatpak;
+in {
+  meta = {
+    doc = ./flatpak.md;
+    maintainers = pkgs.flatpak.meta.maintainers;
+  };
+
+  ###### interface
+  options = {
+    services.flatpak = {
+      enable = mkEnableOption (lib.mdDoc "flatpak");
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = (config.xdg.portal.enable == true);
+        message = "To use Flatpak you must enable XDG Desktop Portals with xdg.portal.enable.";
+      }
+    ];
+
+    environment.systemPackages = [ pkgs.flatpak ];
+
+    security.polkit.enable = true;
+
+    services.dbus.packages = [ pkgs.flatpak ];
+
+    systemd.packages = [ pkgs.flatpak ];
+
+    environment.profiles = [
+      "$HOME/.local/share/flatpak/exports"
+      "/var/lib/flatpak/exports"
+    ];
+
+    # It has been possible since https://github.com/flatpak/flatpak/releases/tag/1.3.2
+    # to build a SELinux policy module.
+
+    # TODO: use sysusers.d
+    users.users.flatpak = {
+      description = "Flatpak system helper";
+      group = "flatpak";
+      isSystemUser = true;
+    };
+
+    users.groups.flatpak = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/geoclue2.nix b/nixpkgs/nixos/modules/services/desktops/geoclue2.nix
new file mode 100644
index 000000000000..b04f46c26a56
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/geoclue2.nix
@@ -0,0 +1,272 @@
+# GeoClue 2 daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  # the demo agent isn't built by default, but we need it here
+  package = pkgs.geoclue2.override { withDemoAgent = config.services.geoclue2.enableDemoAgent; };
+
+  cfg = config.services.geoclue2;
+
+  defaultWhitelist = [ "gnome-shell" "io.elementary.desktop.agent-geoclue2" ];
+
+  appConfigModule = types.submodule ({ name, ... }: {
+    options = {
+      desktopID = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Desktop ID of the application.";
+      };
+
+      isAllowed = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether the application will be allowed access to location information.
+        '';
+      };
+
+      isSystem = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether the application is a system component or not.
+        '';
+      };
+
+      users = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          List of UIDs of all users for which this application is allowed location
+          info access, Defaults to an empty string to allow it for all users.
+        '';
+      };
+    };
+
+    config.desktopID = mkDefault name;
+  });
+
+  appConfigToINICompatible = _: { desktopID, isAllowed, isSystem, users, ... }: {
+    name = desktopID;
+    value = {
+      allowed = isAllowed;
+      system = isSystem;
+      users = concatStringsSep ";" users;
+    };
+  };
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.geoclue2 = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GeoClue 2 daemon, a DBus service
+          that provides location information for accessing.
+        '';
+      };
+
+      enableDemoAgent = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to use the GeoClue demo agent. This should be
+          overridden by desktop environments that provide their own
+          agent.
+        '';
+      };
+
+      enableNmea = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to fetch location from NMEA sources on local network.
+        '';
+      };
+
+      enable3G = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable 3G source.
+        '';
+      };
+
+      enableCDMA = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable CDMA source.
+        '';
+      };
+
+      enableModemGPS = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable Modem-GPS source.
+        '';
+      };
+
+      enableWifi = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable WiFi source.
+        '';
+      };
+
+      geoProviderUrl = mkOption {
+        type = types.str;
+        default = "https://location.services.mozilla.com/v1/geolocate?key=geoclue";
+        example = "https://www.googleapis.com/geolocation/v1/geolocate?key=YOUR_KEY";
+        description = lib.mdDoc ''
+          The url to the wifi GeoLocation Service.
+        '';
+      };
+
+      submitData = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to submit data to a GeoLocation Service.
+        '';
+      };
+
+      submissionUrl = mkOption {
+        type = types.str;
+        default = "https://location.services.mozilla.com/v1/submit?key=geoclue";
+        description = lib.mdDoc ''
+          The url to submit data to a GeoLocation Service.
+        '';
+      };
+
+      submissionNick = mkOption {
+        type = types.str;
+        default = "geoclue";
+        description = lib.mdDoc ''
+          A nickname to submit network data with.
+          Must be 2-32 characters long.
+        '';
+      };
+
+      appConfig = mkOption {
+        type = types.attrsOf appConfigModule;
+        default = {};
+        example = literalExpression ''
+          "com.github.app" = {
+            isAllowed = true;
+            isSystem = true;
+            users = [ "300" ];
+          };
+        '';
+        description = lib.mdDoc ''
+          Specify extra settings per application.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ package ];
+
+    services.dbus.packages = [ package ];
+
+    systemd.packages = [ package ];
+
+    # we cannot use DynamicUser as we need the the geoclue user to exist for the
+    # dbus policy to work
+    users = {
+      users.geoclue = {
+        isSystemUser = true;
+        home = "/var/lib/geoclue";
+        group = "geoclue";
+        description = "Geoinformation service";
+      };
+
+      groups.geoclue = {};
+    };
+
+    systemd.services.geoclue = {
+      after = lib.optionals cfg.enableWifi [ "network-online.target" ];
+      # restart geoclue service when the configuration changes
+      restartTriggers = [
+        config.environment.etc."geoclue/geoclue.conf".source
+      ];
+      serviceConfig.StateDirectory = "geoclue";
+    };
+
+    # this needs to run as a user service, since it's associated with the
+    # user who is making the requests
+    systemd.user.services = mkIf cfg.enableDemoAgent {
+      geoclue-agent = {
+        description = "Geoclue agent";
+        # this should really be `partOf = [ "geoclue.service" ]`, but
+        # we can't be part of a system service, and the agent should
+        # be okay with the main service coming and going
+        wantedBy = [ "default.target" ];
+        after = lib.optionals cfg.enableWifi [ "network-online.target" ];
+        unitConfig.ConditionUser = "!@system";
+        serviceConfig = {
+          Type = "exec";
+          ExecStart = "${package}/libexec/geoclue-2.0/demos/agent";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+      };
+    };
+
+    services.geoclue2.appConfig.epiphany = {
+      isAllowed = true;
+      isSystem = false;
+    };
+
+    services.geoclue2.appConfig.firefox = {
+      isAllowed = true;
+      isSystem = false;
+    };
+
+    environment.etc."geoclue/geoclue.conf".text =
+      generators.toINI {} ({
+        agent = {
+          whitelist = concatStringsSep ";"
+            (optional cfg.enableDemoAgent "geoclue-demo-agent" ++ defaultWhitelist);
+        };
+        network-nmea = {
+          enable = cfg.enableNmea;
+        };
+        "3g" = {
+          enable = cfg.enable3G;
+        };
+        cdma = {
+          enable = cfg.enableCDMA;
+        };
+        modem-gps = {
+          enable = cfg.enableModemGPS;
+        };
+        wifi = {
+          enable = cfg.enableWifi;
+          url = cfg.geoProviderUrl;
+          submit-data = boolToString cfg.submitData;
+          submission-url = cfg.submissionUrl;
+          submission-nick = cfg.submissionNick;
+        };
+      } // mapAttrs' appConfigToINICompatible cfg.appConfig);
+  };
+
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/at-spi2-core.nix b/nixpkgs/nixos/modules/services/desktops/gnome/at-spi2-core.nix
new file mode 100644
index 000000000000..d0320c1e6307
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/at-spi2-core.nix
@@ -0,0 +1,60 @@
+# at-spi2-core daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  ###### interface
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "at-spi2-core" "enable" ]
+      [ "services" "gnome" "at-spi2-core" "enable" ]
+    )
+  ];
+
+  options = {
+
+    services.gnome.at-spi2-core = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable at-spi2-core, a service for the Assistive Technologies
+          available on the GNOME platform.
+
+          Enable this if you get the error or warning
+          `The name org.a11y.Bus was not provided by any .service files`.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf config.services.gnome.at-spi2-core.enable {
+      environment.systemPackages = [ pkgs.at-spi2-core ];
+      services.dbus.packages = [ pkgs.at-spi2-core ];
+      systemd.packages = [ pkgs.at-spi2-core ];
+    })
+
+    (mkIf (!config.services.gnome.at-spi2-core.enable) {
+      environment.sessionVariables = {
+        NO_AT_BRIDGE = "1";
+        GTK_A11Y = "none";
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/evolution-data-server.nix b/nixpkgs/nixos/modules/services/desktops/gnome/evolution-data-server.nix
new file mode 100644
index 000000000000..a8db7dce8fdf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/evolution-data-server.nix
@@ -0,0 +1,71 @@
+# Evolution Data Server daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "evolution-data-server" "enable" ]
+      [ "services" "gnome" "evolution-data-server" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "evolution-data-server" "plugins" ]
+      [ "services" "gnome" "evolution-data-server" "plugins" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.evolution-data-server = {
+      enable = mkEnableOption (lib.mdDoc "Evolution Data Server, a collection of services for storing addressbooks and calendars");
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        description = lib.mdDoc "Plugins for Evolution Data Server.";
+      };
+    };
+    programs.evolution = {
+      enable = mkEnableOption (lib.mdDoc "Evolution, a Personal information management application that provides integrated mail, calendaring and address book functionality");
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExpression "[ pkgs.evolution-ews ]";
+        description = lib.mdDoc "Plugins for Evolution.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config =
+    let
+      bundle = pkgs.evolutionWithPlugins.override { inherit (config.services.gnome.evolution-data-server) plugins; };
+    in
+    mkMerge [
+      (mkIf config.services.gnome.evolution-data-server.enable {
+        environment.systemPackages = [ bundle ];
+
+        services.dbus.packages = [ bundle ];
+
+        systemd.packages = [ bundle ];
+      })
+      (mkIf config.programs.evolution.enable {
+        services.gnome.evolution-data-server = {
+          enable = true;
+          plugins = [ pkgs.evolution ] ++ config.programs.evolution.plugins;
+        };
+        services.gnome.gnome-keyring.enable = true;
+      })
+    ];
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/glib-networking.nix b/nixpkgs/nixos/modules/services/desktops/gnome/glib-networking.nix
new file mode 100644
index 000000000000..6b54f46f0cf5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/glib-networking.nix
@@ -0,0 +1,45 @@
+# GLib Networking
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "glib-networking" "enable" ]
+      [ "services" "gnome" "glib-networking" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.glib-networking = {
+
+      enable = mkEnableOption (lib.mdDoc "network extensions for GLib");
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.glib-networking.enable {
+
+    services.dbus.packages = [ pkgs.glib-networking ];
+
+    systemd.packages = [ pkgs.glib-networking ];
+
+    environment.sessionVariables.GIO_EXTRA_MODULES = [ "${pkgs.glib-networking.out}/lib/gio/modules" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix
new file mode 100644
index 000000000000..4f680eabbe15
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-browser-connector.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mdDoc mkEnableOption mkIf mkRenamedOptionModule teams;
+in
+
+{
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "chrome-gnome-shell" "enable" ]
+      [ "services" "gnome" "gnome-browser-connector" "enable" ]
+    )
+    # Added 2022-07-25
+    (mkRenamedOptionModule
+      [ "services" "gnome" "chrome-gnome-shell" "enable" ]
+      [ "services" "gnome" "gnome-browser-connector" "enable" ]
+    )
+  ];
+
+  options = {
+    services.gnome.gnome-browser-connector.enable = mkEnableOption (mdDoc ''
+      native host connector for the GNOME Shell browser extension, a DBus service
+      allowing to install GNOME Shell extensions from a web browser
+    '');
+  };
+
+  config = mkIf config.services.gnome.gnome-browser-connector.enable {
+    environment.etc = {
+      "chromium/native-messaging-hosts/org.gnome.browser_connector.json".source = "${pkgs.gnome-browser-connector}/etc/chromium/native-messaging-hosts/org.gnome.browser_connector.json";
+      "opt/chrome/native-messaging-hosts/org.gnome.browser_connector.json".source = "${pkgs.gnome-browser-connector}/etc/opt/chrome/native-messaging-hosts/org.gnome.browser_connector.json";
+      # Legacy paths.
+      "chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.gnome-browser-connector}/etc/chromium/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
+      "opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json".source = "${pkgs.gnome-browser-connector}/etc/opt/chrome/native-messaging-hosts/org.gnome.chrome_gnome_shell.json";
+    };
+
+    environment.systemPackages = [ pkgs.gnome-browser-connector ];
+
+    services.dbus.packages = [ pkgs.gnome-browser-connector ];
+
+    programs.firefox.nativeMessagingHosts.packages = [ pkgs.gnome-browser-connector ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-initial-setup.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-initial-setup.nix
new file mode 100644
index 000000000000..f24e6f1eb155
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-initial-setup.nix
@@ -0,0 +1,98 @@
+# GNOME Initial Setup.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  # GNOME initial setup's run is conditioned on whether
+  # the gnome-initial-setup-done file exists in XDG_CONFIG_HOME
+  # Because of this, every existing user will have initial setup
+  # running because they never ran it before.
+  #
+  # To prevent this we create the file if the users stateVersion
+  # is older than 20.03 (the release we added this module).
+
+  script = pkgs.writeScript "create-gis-stamp-files" ''
+    #!${pkgs.runtimeShell}
+    setup_done=$HOME/.config/gnome-initial-setup-done
+
+    echo "Creating g-i-s stamp file $setup_done ..."
+    cat - > $setup_done <<- EOF
+    yes
+    EOF
+  '';
+
+  createGisStampFilesAutostart = pkgs.writeTextFile rec {
+    name = "create-g-i-s-stamp-files";
+    destination = "/etc/xdg/autostart/${name}.desktop";
+    text = ''
+      [Desktop Entry]
+      Type=Application
+      Name=Create GNOME Initial Setup stamp files
+      Exec=${script}
+      StartupNotify=false
+      NoDisplay=true
+      OnlyShowIn=GNOME;
+      AutostartCondition=unless-exists gnome-initial-setup-done
+      X-GNOME-Autostart-Phase=EarlyInitialization
+    '';
+  };
+
+in
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-initial-setup" "enable" ]
+      [ "services" "gnome" "gnome-initial-setup" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-initial-setup = {
+
+      enable = mkEnableOption (lib.mdDoc "GNOME Initial Setup, a Simple, easy, and safe way to prepare a new system");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.gnome-initial-setup.enable {
+
+    environment.systemPackages = [
+      pkgs.gnome.gnome-initial-setup
+    ]
+    ++ optional (versionOlder config.system.stateVersion "20.03") createGisStampFilesAutostart
+    ;
+
+    systemd.packages = [
+      pkgs.gnome.gnome-initial-setup
+    ];
+
+    systemd.user.targets."gnome-session".wants = [
+      "gnome-initial-setup-copy-worker.service"
+      "gnome-initial-setup-first-login.service"
+      "gnome-welcome-tour.service"
+    ];
+
+    systemd.user.targets."gnome-session@gnome-initial-setup".wants = [
+      "gnome-initial-setup.service"
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-keyring.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-keyring.nix
new file mode 100644
index 000000000000..6c7e713b32d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-keyring.nix
@@ -0,0 +1,63 @@
+# GNOME Keyring daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-keyring" "enable" ]
+      [ "services" "gnome" "gnome-keyring" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-keyring = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GNOME Keyring daemon, a service designed to
+          take care of the user's security credentials,
+          such as user names and passwords.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.gnome-keyring.enable {
+
+    environment.systemPackages = [ pkgs.gnome.gnome-keyring ];
+
+    services.dbus.packages = [ pkgs.gnome.gnome-keyring pkgs.gcr ];
+
+    xdg.portal.extraPortals = [ pkgs.gnome.gnome-keyring ];
+
+    security.pam.services.login.enableGnomeKeyring = true;
+
+    security.wrappers.gnome-keyring-daemon = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_ipc_lock=ep";
+      source = "${pkgs.gnome.gnome-keyring}/bin/gnome-keyring-daemon";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-accounts.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-accounts.nix
new file mode 100644
index 000000000000..ed5e000cae3e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-accounts.nix
@@ -0,0 +1,51 @@
+# GNOME Online Accounts daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-online-accounts" "enable" ]
+      [ "services" "gnome" "gnome-online-accounts" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-online-accounts = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GNOME Online Accounts daemon, a service that provides
+          a single sign-on framework for the GNOME desktop.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.gnome-online-accounts.enable {
+
+    environment.systemPackages = [ pkgs.gnome-online-accounts ];
+
+    services.dbus.packages = [ pkgs.gnome-online-accounts ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-miners.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-miners.nix
new file mode 100644
index 000000000000..7cf1bfa1b046
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-online-miners.nix
@@ -0,0 +1,51 @@
+# GNOME Online Miners daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-online-miners" "enable" ]
+      [ "services" "gnome" "gnome-online-miners" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-online-miners = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GNOME Online Miners, a service that
+          crawls through your online content.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.gnome-online-miners.enable {
+
+    environment.systemPackages = [ pkgs.gnome.gnome-online-miners ];
+
+    services.dbus.packages = [ pkgs.gnome.gnome-online-miners ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-remote-desktop.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-remote-desktop.nix
new file mode 100644
index 000000000000..0a5b67eb2722
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-remote-desktop.nix
@@ -0,0 +1,32 @@
+# Remote desktop daemon using Pipewire.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2021-05-07
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-remote-desktop" "enable" ]
+      [ "services" "gnome" "gnome-remote-desktop" "enable" ]
+    )
+  ];
+
+  ###### interface
+  options = {
+    services.gnome.gnome-remote-desktop = {
+      enable = mkEnableOption (lib.mdDoc "Remote Desktop support using Pipewire");
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.services.gnome.gnome-remote-desktop.enable {
+    services.pipewire.enable = true;
+
+    systemd.packages = [ pkgs.gnome.gnome-remote-desktop ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-settings-daemon.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-settings-daemon.nix
new file mode 100644
index 000000000000..ca739b06a5a5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-settings-daemon.nix
@@ -0,0 +1,70 @@
+# GNOME Settings Daemon
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.gnome.gnome-settings-daemon;
+
+in
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    (mkRemovedOptionModule
+      ["services" "gnome3" "gnome-settings-daemon" "package"]
+      "")
+
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-settings-daemon" "enable" ]
+      [ "services" "gnome" "gnome-settings-daemon" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-settings-daemon = {
+
+      enable = mkEnableOption (lib.mdDoc "GNOME Settings Daemon");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [
+      pkgs.gnome.gnome-settings-daemon
+    ];
+
+    services.udev.packages = [
+      pkgs.gnome.gnome-settings-daemon
+    ];
+
+    systemd.packages = [
+      pkgs.gnome.gnome-settings-daemon
+    ];
+
+    systemd.user.targets."gnome-session-x11-services".wants = [
+      "org.gnome.SettingsDaemon.XSettings.service"
+    ];
+
+    systemd.user.targets."gnome-session-x11-services-ready".wants = [
+      "org.gnome.SettingsDaemon.XSettings.service"
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/gnome-user-share.nix b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-user-share.nix
new file mode 100644
index 000000000000..0c88d13b343d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/gnome-user-share.nix
@@ -0,0 +1,48 @@
+# GNOME User Share daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gnome-user-share" "enable" ]
+      [ "services" "gnome" "gnome-user-share" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.gnome-user-share = {
+
+      enable = mkEnableOption (lib.mdDoc "GNOME User Share, a user-level file sharing service for GNOME");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.gnome-user-share.enable {
+
+    environment.systemPackages = [
+      pkgs.gnome.gnome-user-share
+    ];
+
+    systemd.packages = [
+      pkgs.gnome.gnome-user-share
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/rygel.nix b/nixpkgs/nixos/modules/services/desktops/gnome/rygel.nix
new file mode 100644
index 000000000000..9c0faaa4885b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/rygel.nix
@@ -0,0 +1,44 @@
+# rygel service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "rygel" "enable" ]
+      [ "services" "gnome" "rygel" "enable" ]
+    )
+  ];
+
+  ###### interface
+  options = {
+    services.gnome.rygel = {
+      enable = mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Rygel UPnP Mediaserver.
+
+          You will need to also allow UPnP connections in firewall, see the following [comment](https://github.com/NixOS/nixpkgs/pull/45045#issuecomment-416030795).
+        '';
+        type = types.bool;
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.services.gnome.rygel.enable {
+    environment.systemPackages = [ pkgs.gnome.rygel ];
+
+    services.dbus.packages = [ pkgs.gnome.rygel ];
+
+    systemd.packages = [ pkgs.gnome.rygel ];
+
+    environment.etc."rygel.conf".source = "${pkgs.gnome.rygel}/etc/rygel.conf";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/sushi.nix b/nixpkgs/nixos/modules/services/desktops/gnome/sushi.nix
new file mode 100644
index 000000000000..446851f434d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/sushi.nix
@@ -0,0 +1,50 @@
+# GNOME Sushi daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "sushi" "enable" ]
+      [ "services" "gnome" "sushi" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.sushi = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Sushi, a quick previewer for nautilus.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.sushi.enable {
+
+    environment.systemPackages = [ pkgs.gnome.sushi ];
+
+    services.dbus.packages = [ pkgs.gnome.sushi ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/tracker-miners.nix b/nixpkgs/nixos/modules/services/desktops/gnome/tracker-miners.nix
new file mode 100644
index 000000000000..a3c58f374208
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/tracker-miners.nix
@@ -0,0 +1,54 @@
+# Tracker Miners daemons.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "tracker-miners" "enable" ]
+      [ "services" "gnome" "tracker-miners" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.tracker-miners = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Tracker miners, indexing services for Tracker
+          search engine and metadata storage system.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.gnome.tracker-miners.enable {
+
+    environment.systemPackages = [ pkgs.tracker-miners ];
+
+    services.dbus.packages = [ pkgs.tracker-miners ];
+
+    systemd.packages = [ pkgs.tracker-miners ];
+
+    services.gnome.tracker.subcommandPackages = [ pkgs.tracker-miners ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gnome/tracker.nix b/nixpkgs/nixos/modules/services/desktops/gnome/tracker.nix
new file mode 100644
index 000000000000..e6404c84a26f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gnome/tracker.nix
@@ -0,0 +1,76 @@
+# Tracker daemon.
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gnome.tracker;
+in
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "tracker" "enable" ]
+      [ "services" "gnome" "tracker" "enable" ]
+    )
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gnome.tracker = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Tracker services, a search engine,
+          search tool and metadata storage system.
+        '';
+      };
+
+      subcommandPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        internal = true;
+        description = lib.mdDoc ''
+          List of packages containing tracker3 subcommands.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.tracker ];
+
+    services.dbus.packages = [ pkgs.tracker ];
+
+    systemd.packages = [ pkgs.tracker ];
+
+    environment.variables = {
+      TRACKER_CLI_SUBCOMMANDS_DIR =
+        let
+          subcommandPackagesTree = pkgs.symlinkJoin {
+            name = "tracker-with-subcommands-${pkgs.tracker.version}";
+            paths = [ pkgs.tracker ] ++ cfg.subcommandPackages;
+          };
+        in
+        "${subcommandPackagesTree}/libexec/tracker3";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gsignond.nix b/nixpkgs/nixos/modules/services/desktops/gsignond.nix
new file mode 100644
index 000000000000..cf80fd75452b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gsignond.nix
@@ -0,0 +1,45 @@
+# Accounts-SSO gSignOn daemon
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  package = pkgs.gsignond.override { plugins = config.services.gsignond.plugins; };
+in
+{
+
+  meta.maintainers = teams.pantheon.members;
+
+  ###### interface
+
+  options = {
+
+    services.gsignond = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable gSignOn daemon, a DBus service
+          which performs user authentication on behalf of its clients.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          What plugins to use with the gSignOn daemon.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf config.services.gsignond.enable {
+    environment.etc."gsignond.conf".source = "${package}/etc/gsignond.conf";
+    services.dbus.packages = [ package ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/gvfs.nix b/nixpkgs/nixos/modules/services/desktops/gvfs.nix
new file mode 100644
index 000000000000..7e15b433fcc2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/gvfs.nix
@@ -0,0 +1,66 @@
+# GVfs
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.gvfs;
+
+in
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  # Added 2019-08-19
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "gvfs" "enable" ]
+      [ "services" "gvfs" "enable" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.gvfs = {
+
+      enable = mkEnableOption (lib.mdDoc "GVfs, a userspace virtual filesystem");
+
+      # gvfs can be built with multiple configurations
+      package = mkOption {
+        type = types.package;
+        default = pkgs.gnome.gvfs;
+        defaultText = literalExpression "pkgs.gnome.gvfs";
+        description = lib.mdDoc "Which GVfs package to use.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+    services.udev.packages = [ pkgs.libmtp.out ];
+
+    services.udisks2.enable = true;
+
+    # Needed for unwrapped applications
+    environment.sessionVariables.GIO_EXTRA_MODULES = [ "${cfg.package}/lib/gio/modules" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/malcontent.nix b/nixpkgs/nixos/modules/services/desktops/malcontent.nix
new file mode 100644
index 000000000000..27b4577f4c2a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/malcontent.nix
@@ -0,0 +1,40 @@
+# Malcontent daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.malcontent = {
+
+      enable = mkEnableOption (lib.mdDoc "Malcontent, parental control support for applications");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.malcontent.enable {
+
+    environment.systemPackages = with pkgs; [
+      malcontent
+      malcontent-ui
+    ];
+
+    services.dbus.packages = [
+      # D-Bus services are in `out`, not the default `bin` output that would be picked up by `makeDbusConf`.
+      pkgs.malcontent.out
+    ];
+
+    services.accounts-daemon.enable = true;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/neard.nix b/nixpkgs/nixos/modules/services/desktops/neard.nix
new file mode 100644
index 000000000000..9130b8d3d216
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/neard.nix
@@ -0,0 +1,23 @@
+# neard service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+  options = {
+    services.neard = {
+      enable = mkEnableOption (lib.mdDoc "neard, NFC daemon");
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.neard.enable {
+    environment.systemPackages = [ pkgs.neard ];
+
+    services.dbus.packages = [ pkgs.neard ];
+
+    systemd.packages = [ pkgs.neard ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
new file mode 100644
index 000000000000..07ca2727cf48
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/pipewire.nix
@@ -0,0 +1,183 @@
+# pipewire service.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pipewire;
+  enable32BitAlsaPlugins = cfg.alsa.support32Bit
+                           && pkgs.stdenv.isx86_64
+                           && pkgs.pkgsi686Linux.pipewire != null;
+
+  # The package doesn't output to $out/lib/pipewire directly so that the
+  # overlays can use the outputs to replace the originals in FHS environments.
+  #
+  # This doesn't work in general because of missing development information.
+  jack-libs = pkgs.runCommand "jack-libs" {} ''
+    mkdir -p "$out/lib"
+    ln -s "${cfg.package.jack}/lib" "$out/lib/pipewire"
+  '';
+in {
+  meta.maintainers = teams.freedesktop.members ++ [ lib.maintainers.k900 ];
+
+  ###### interface
+  options = {
+    services.pipewire = {
+      enable = mkEnableOption (lib.mdDoc "pipewire service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.pipewire;
+        defaultText = literalExpression "pkgs.pipewire";
+        description = lib.mdDoc ''
+          The pipewire derivation to use.
+        '';
+      };
+
+      socketActivation = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Automatically run pipewire when connections are made to the pipewire socket.
+        '';
+      };
+
+      audio = {
+        enable = lib.mkOption {
+          type = lib.types.bool;
+          # this is for backwards compatibility
+          default = cfg.alsa.enable || cfg.jack.enable || cfg.pulse.enable;
+          defaultText = lib.literalExpression "config.services.pipewire.alsa.enable || config.services.pipewire.jack.enable || config.services.pipewire.pulse.enable";
+          description = lib.mdDoc "Whether to use PipeWire as the primary sound server";
+        };
+      };
+
+      alsa = {
+        enable = mkEnableOption (lib.mdDoc "ALSA support");
+        support32Bit = mkEnableOption (lib.mdDoc "32-bit ALSA support on 64-bit systems");
+      };
+
+      jack = {
+        enable = mkEnableOption (lib.mdDoc "JACK audio emulation");
+      };
+
+      pulse = {
+        enable = mkEnableOption (lib.mdDoc "PulseAudio server emulation");
+      };
+
+      systemWide = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If true, a system-wide PipeWire service and socket is enabled
+          allowing all users in the "pipewire" group to use it simultaneously.
+          If false, then user units are used instead, restricting access to
+          only one user.
+
+          Enabling system-wide PipeWire is however not recommended and disabled
+          by default according to
+          https://github.com/PipeWire/pipewire/blob/master/NEWS
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (lib.mkRemovedOptionModule ["services" "pipewire" "config"] ''
+      Overriding default Pipewire configuration through NixOS options never worked correctly and is no longer supported.
+      Please create drop-in files in /etc/pipewire/pipewire.conf.d/ to make the desired setting changes instead.
+    '')
+
+    (lib.mkRemovedOptionModule ["services" "pipewire" "media-session"] ''
+      pipewire-media-session is no longer supported upstream and has been removed.
+      Please switch to `services.pipewire.wireplumber` instead.
+    '')
+  ];
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.audio.enable -> !config.hardware.pulseaudio.enable;
+        message = "Using PipeWire as the sound server conflicts with PulseAudio. This option requires `hardware.pulseaudio.enable` to be set to false";
+      }
+      {
+        assertion = cfg.jack.enable -> !config.services.jack.jackd.enable;
+        message = "PipeWire based JACK emulation doesn't use the JACK service. This option requires `services.jack.jackd.enable` to be set to false";
+      }
+      {
+        # JACK intentionally not checked, as PW-on-JACK setups are a thing that some people may want
+        assertion = (cfg.alsa.enable || cfg.pulse.enable) -> cfg.audio.enable;
+        message = "Using PipeWire's ALSA/PulseAudio compatibility layers requires running PipeWire as the sound server. Set `services.pipewire.audio.enable` to true.";
+      }
+    ];
+
+    environment.systemPackages = [ cfg.package ]
+                                 ++ lib.optional cfg.jack.enable jack-libs;
+
+    systemd.packages = [ cfg.package ];
+
+    # PipeWire depends on DBUS but doesn't list it. Without this booting
+    # into a terminal results in the service crashing with an error.
+    systemd.services.pipewire.bindsTo = [ "dbus.service" ];
+    systemd.user.services.pipewire.bindsTo = [ "dbus.service" ];
+
+    # Enable either system or user units.  Note that for pipewire-pulse there
+    # are only user units, which work in both cases.
+    systemd.sockets.pipewire.enable = cfg.systemWide;
+    systemd.services.pipewire.enable = cfg.systemWide;
+    systemd.user.sockets.pipewire.enable = !cfg.systemWide;
+    systemd.user.services.pipewire.enable = !cfg.systemWide;
+
+    # Mask pw-pulse if it's not wanted
+    systemd.user.services.pipewire-pulse.enable = cfg.pulse.enable;
+    systemd.user.sockets.pipewire-pulse.enable = cfg.pulse.enable;
+
+    systemd.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+    systemd.user.sockets.pipewire-pulse.wantedBy = lib.mkIf cfg.socketActivation [ "sockets.target" ];
+
+    services.udev.packages = [ cfg.package ];
+
+    # If any paths are updated here they must also be updated in the package test.
+    environment.etc."alsa/conf.d/49-pipewire-modules.conf" = mkIf cfg.alsa.enable {
+      text = ''
+        pcm_type.pipewire {
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;
+          ${optionalString enable32BitAlsaPlugins
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_pcm_pipewire.so ;"}
+        }
+        ctl_type.pipewire {
+          libs.native = ${cfg.package}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;
+          ${optionalString enable32BitAlsaPlugins
+            "libs.32Bit = ${pkgs.pkgsi686Linux.pipewire}/lib/alsa-lib/libasound_module_ctl_pipewire.so ;"}
+        }
+      '';
+    };
+    environment.etc."alsa/conf.d/50-pipewire.conf" = mkIf cfg.alsa.enable {
+      source = "${cfg.package}/share/alsa/alsa.conf.d/50-pipewire.conf";
+    };
+    environment.etc."alsa/conf.d/99-pipewire-default.conf" = mkIf cfg.alsa.enable {
+      source = "${cfg.package}/share/alsa/alsa.conf.d/99-pipewire-default.conf";
+    };
+
+    environment.sessionVariables.LD_LIBRARY_PATH =
+      lib.mkIf cfg.jack.enable [ "${cfg.package.jack}/lib" ];
+
+    users = lib.mkIf cfg.systemWide {
+      users.pipewire = {
+        uid = config.ids.uids.pipewire;
+        group = "pipewire";
+        extraGroups = [
+          "audio"
+          "video"
+        ] ++ lib.optional config.security.rtkit.enable "rtkit";
+        description = "Pipewire system service user";
+        isSystemUser = true;
+        home = "/var/lib/pipewire";
+        createHome = true;
+      };
+      groups.pipewire.gid = config.ids.gids.pipewire;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
new file mode 100644
index 000000000000..95a7ece26c5d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/pipewire/wireplumber.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+let
+  pwCfg = config.services.pipewire;
+  cfg = pwCfg.wireplumber;
+  pwUsedForAudio = pwCfg.audio.enable;
+in
+{
+  meta.maintainers = [ lib.maintainers.k900 ];
+
+  options = {
+    services.pipewire.wireplumber = {
+      enable = lib.mkOption {
+        type = lib.types.bool;
+        default = config.services.pipewire.enable;
+        defaultText = lib.literalExpression "config.services.pipewire.enable";
+        description = lib.mdDoc "Whether to enable Wireplumber, a modular session / policy manager for PipeWire";
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.wireplumber;
+        defaultText = lib.literalExpression "pkgs.wireplumber";
+        description = lib.mdDoc "The wireplumber derivation to use.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !config.hardware.bluetooth.hsphfpd.enable;
+        message = "Using Wireplumber conflicts with hsphfpd, as it provides the same functionality. `hardware.bluetooth.hsphfpd.enable` needs be set to false";
+      }
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."wireplumber/main.lua.d/80-nixos.lua" = lib.mkIf (!pwUsedForAudio) {
+      text = ''
+        -- Pipewire is not used for audio, so prevent it from grabbing audio devices
+        alsa_monitor.enable = function() end
+      '';
+    };
+    environment.etc."wireplumber/main.lua.d/80-systemwide.lua" = lib.mkIf config.services.pipewire.systemWide {
+      text = ''
+        -- When running system-wide, these settings need to be disabled (they
+        -- use functions that aren't available on the system dbus).
+        alsa_monitor.properties["alsa.reserve"] = false
+        default_access.properties["enable-flatpak-portal"] = false
+      '';
+    };
+    environment.etc."wireplumber/bluetooth.lua.d/80-systemwide.lua" = lib.mkIf config.services.pipewire.systemWide {
+      text = ''
+        -- When running system-wide, logind-integration needs to be disabled.
+        bluez_monitor.properties["with-logind"] = false
+      '';
+    };
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.wireplumber.enable = config.services.pipewire.systemWide;
+    systemd.user.services.wireplumber.enable = !config.services.pipewire.systemWide;
+
+    systemd.services.wireplumber.wantedBy = [ "pipewire.service" ];
+    systemd.user.services.wireplumber.wantedBy = [ "pipewire.service" ];
+
+    systemd.services.wireplumber.environment = lib.mkIf config.services.pipewire.systemWide {
+      # Force wireplumber to use system dbus.
+      DBUS_SESSION_BUS_ADDRESS = "unix:path=/run/dbus/system_bus_socket";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/profile-sync-daemon.nix b/nixpkgs/nixos/modules/services/desktops/profile-sync-daemon.nix
new file mode 100644
index 000000000000..e307c6735004
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/profile-sync-daemon.nix
@@ -0,0 +1,77 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.psd;
+in {
+  options.services.psd = with types; {
+    enable = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the Profile Sync daemon.
+      '';
+    };
+    resyncTimer = mkOption {
+      type = str;
+      default = "1h";
+      example = "1h 30min";
+      description = lib.mdDoc ''
+        The amount of time to wait before syncing browser profiles back to the
+        disk.
+
+        Takes a systemd.unit time span. The time unit defaults to seconds if
+        omitted.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd = {
+      user = {
+        services = {
+          psd = {
+            enable = true;
+            description = "Profile Sync daemon";
+            wants = [ "psd-resync.service" ];
+            wantedBy = [ "default.target" ];
+            path = with pkgs; [ rsync kmod gawk nettools util-linux profile-sync-daemon ];
+            unitConfig = {
+              RequiresMountsFor = [ "/home/" ];
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = "yes";
+              ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon sync";
+              ExecStop = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon unsync";
+            };
+          };
+
+          psd-resync = {
+            enable = true;
+            description = "Timed profile resync";
+            after = [ "psd.service" ];
+            wants = [ "psd-resync.timer" ];
+            partOf = [ "psd.service" ];
+            wantedBy = [ "default.target" ];
+            path = with pkgs; [ rsync kmod gawk nettools util-linux profile-sync-daemon ];
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${pkgs.profile-sync-daemon}/bin/profile-sync-daemon resync";
+            };
+          };
+        };
+
+        timers.psd-resync = {
+          description = "Timer for profile sync daemon - ${cfg.resyncTimer}";
+          partOf = [ "psd-resync.service" "psd.service" ];
+
+          timerConfig = {
+            OnUnitActiveSec = "${cfg.resyncTimer}";
+          };
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/system-config-printer.nix b/nixpkgs/nixos/modules/services/desktops/system-config-printer.nix
new file mode 100644
index 000000000000..caebfabf146c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/system-config-printer.nix
@@ -0,0 +1,42 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.system-config-printer = {
+
+      enable = mkEnableOption (lib.mdDoc "system-config-printer, a service for CUPS administration used by printing interfaces");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.system-config-printer.enable {
+
+    services.dbus.packages = [
+      pkgs.system-config-printer
+    ];
+
+    systemd.packages = [
+      pkgs.system-config-printer
+    ];
+
+    services.udev.packages = [
+      pkgs.system-config-printer
+    ];
+
+    # for $out/bin/install-printer-driver
+    # TODO: Enable once #177946 is resolved
+    # services.packagekit.enable = true;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/system76-scheduler.nix b/nixpkgs/nixos/modules/services/desktops/system76-scheduler.nix
new file mode 100644
index 000000000000..267b528cc5dd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/system76-scheduler.nix
@@ -0,0 +1,296 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.system76-scheduler;
+
+  inherit (builtins) concatStringsSep map toString attrNames;
+  inherit (lib) boolToString types mkOption literalExpression mdDoc optional mkIf mkMerge;
+  inherit (types) nullOr listOf bool int ints float str enum;
+
+  withDefaults = optionSpecs: defaults:
+    lib.genAttrs (attrNames optionSpecs) (name:
+      mkOption (optionSpecs.${name} // {
+        default = optionSpecs.${name}.default or defaults.${name} or null;
+      }));
+
+  latencyProfile = withDefaults {
+    latency = {
+      type = int;
+      description = mdDoc "`sched_latency_ns`.";
+    };
+    nr-latency = {
+      type = int;
+      description = mdDoc "`sched_nr_latency`.";
+    };
+    wakeup-granularity = {
+      type = float;
+      description = mdDoc "`sched_wakeup_granularity_ns`.";
+    };
+    bandwidth-size = {
+      type = int;
+      description = mdDoc "`sched_cfs_bandwidth_slice_us`.";
+    };
+    preempt = {
+      type = enum [ "none" "voluntary" "full" ];
+      description = mdDoc "Preemption mode.";
+    };
+  };
+  schedulerProfile = withDefaults {
+    nice = {
+      type = nullOr (ints.between (-20) 19);
+      description = mdDoc "Niceness.";
+    };
+    class = {
+      type = nullOr (enum [ "idle" "batch" "other" "rr" "fifo" ]);
+      example = literalExpression "\"batch\"";
+      description = mdDoc "CPU scheduler class.";
+    };
+    prio = {
+      type = nullOr (ints.between 1 99);
+      example = literalExpression "49";
+      description = mdDoc "CPU scheduler priority.";
+    };
+    ioClass = {
+      type = nullOr (enum [ "idle" "best-effort" "realtime" ]);
+      example = literalExpression "\"best-effort\"";
+      description = mdDoc "IO scheduler class.";
+    };
+    ioPrio = {
+      type = nullOr (ints.between 0 7);
+      example = literalExpression "4";
+      description = mdDoc "IO scheduler priority.";
+    };
+    matchers = {
+      type = nullOr (listOf str);
+      default = [];
+      example = literalExpression ''
+        [
+          "include cgroup=\"/user.slice/*.service\" parent=\"systemd\""
+          "emacs"
+        ]
+      '';
+      description = mdDoc "Process matchers.";
+    };
+  };
+
+  cfsProfileToString = name: let
+    p = cfg.settings.cfsProfiles.${name};
+  in
+    "${name} latency=${toString p.latency} nr-latency=${toString p.nr-latency} wakeup-granularity=${toString p.wakeup-granularity} bandwidth-size=${toString p.bandwidth-size} preempt=\"${p.preempt}\"";
+
+  prioToString = class: prio: if prio == null then "\"${class}\"" else "(${class})${toString prio}";
+
+  schedulerProfileToString = name: a: indent:
+    concatStringsSep " "
+      (["${indent}${name}"]
+       ++ (optional (a.nice != null) "nice=${toString a.nice}")
+       ++ (optional (a.class != null) "sched=${prioToString a.class a.prio}")
+       ++ (optional (a.ioClass != null) "io=${prioToString a.ioClass a.ioPrio}")
+       ++ (optional ((builtins.length a.matchers) != 0) ("{\n${concatStringsSep "\n" (map (m: "  ${indent}${m}") a.matchers)}\n${indent}}")));
+
+in {
+  options = {
+    services.system76-scheduler = {
+      enable = lib.mkEnableOption (lib.mdDoc "system76-scheduler");
+
+      package = mkOption {
+        type = types.package;
+        default = config.boot.kernelPackages.system76-scheduler;
+        defaultText = literalExpression "config.boot.kernelPackages.system76-scheduler";
+        description = mdDoc "Which System76-Scheduler package to use.";
+      };
+
+      useStockConfig = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Use the (reasonable and featureful) stock configuration.
+
+          When this option is `true`, `services.system76-scheduler.settings`
+          are ignored.
+        '';
+      };
+
+      settings = {
+        cfsProfiles = {
+          enable = mkOption {
+            type = bool;
+            default = true;
+            description = mdDoc "Tweak CFS latency parameters when going on/off battery";
+          };
+
+          default = latencyProfile {
+            latency = 6;
+            nr-latency = 8;
+            wakeup-granularity = 1.0;
+            bandwidth-size = 5;
+            preempt = "voluntary";
+          };
+          responsive = latencyProfile {
+            latency = 4;
+            nr-latency = 10;
+            wakeup-granularity = 0.5;
+            bandwidth-size = 3;
+            preempt = "full";
+          };
+        };
+
+        processScheduler = {
+          enable = mkOption {
+            type = bool;
+            default = true;
+            description = mdDoc "Tweak scheduling of individual processes in real time.";
+          };
+
+          useExecsnoop = mkOption {
+            type = bool;
+            default = true;
+            description = mdDoc "Use execsnoop (otherwise poll the precess list periodically).";
+          };
+
+          refreshInterval = mkOption {
+            type = int;
+            default = 60;
+            description = mdDoc "Process list poll interval, in seconds";
+          };
+
+          foregroundBoost = {
+            enable = mkOption {
+              type = bool;
+              default = true;
+              description = mdDoc ''
+                Boost foreground process priorities.
+
+                (And de-boost background ones).  Note that this option needs cooperation
+                from the desktop environment to work.  On Gnome the client side is
+                implemented by the "System76 Scheduler" shell extension.
+              '';
+            };
+            foreground = schedulerProfile {
+              nice = 0;
+              ioClass = "best-effort";
+              ioPrio = 0;
+            };
+            background = schedulerProfile {
+              nice = 6;
+              ioClass = "idle";
+            };
+          };
+
+          pipewireBoost = {
+            enable = mkOption {
+              type = bool;
+              default = true;
+              description = mdDoc "Boost Pipewire client priorities.";
+            };
+            profile = schedulerProfile {
+              nice = -6;
+              ioClass = "best-effort";
+              ioPrio = 0;
+            };
+          };
+        };
+      };
+
+      assignments = mkOption {
+        type = types.attrsOf (types.submodule {
+          options = schedulerProfile { };
+        });
+        default = {};
+        example = literalExpression ''
+          {
+            nix-builds = {
+              nice = 15;
+              class = "batch";
+              ioClass = "idle";
+              matchers = [
+                "nix-daemon"
+              ];
+            };
+          }
+        '';
+        description = mdDoc "Process profile assignments.";
+      };
+
+      exceptions = mkOption {
+        type = types.listOf str;
+        default = [];
+        example = literalExpression ''
+          [
+            "include descends=\"schedtool\""
+            "schedtool"
+          ]
+        '';
+        description = mdDoc "Processes that are left alone.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.dbus.packages = [ cfg.package ];
+
+    systemd.services.system76-scheduler = {
+      description = "Manage process priorities and CFS scheduler latencies for improved responsiveness on the desktop";
+      wantedBy = [ "multi-user.target" ];
+      path = [
+        # execsnoop needs those to extract kernel headers:
+        pkgs.kmod
+        pkgs.gnutar
+        pkgs.xz
+      ];
+      serviceConfig = {
+        Type = "dbus";
+        BusName= "com.system76.Scheduler";
+        ExecStart = "${cfg.package}/bin/system76-scheduler daemon";
+        ExecReload = "${cfg.package}/bin/system76-scheduler daemon reload";
+      };
+    };
+
+    environment.etc = mkMerge [
+      (mkIf cfg.useStockConfig {
+        # No custom settings: just use stock configuration with a fix for Pipewire
+        "system76-scheduler/config.kdl".source = "${cfg.package}/data/config.kdl";
+        "system76-scheduler/process-scheduler/00-dist.kdl".source = "${cfg.package}/data/pop_os.kdl";
+        "system76-scheduler/process-scheduler/01-fix-pipewire-paths.kdl".source = ../../../../pkgs/os-specific/linux/system76-scheduler/01-fix-pipewire-paths.kdl;
+      })
+
+      (let
+        settings = cfg.settings;
+        cfsp = settings.cfsProfiles;
+        ps = settings.processScheduler;
+      in mkIf (!cfg.useStockConfig) {
+        "system76-scheduler/config.kdl".text = ''
+          version "2.0"
+          autogroup-enabled false
+          cfs-profiles enable=${boolToString cfsp.enable} {
+            ${cfsProfileToString "default"}
+            ${cfsProfileToString "responsive"}
+          }
+          process-scheduler enable=${boolToString ps.enable} {
+            execsnoop ${boolToString ps.useExecsnoop}
+            refresh-rate ${toString ps.refreshInterval}
+            assignments {
+              ${if ps.foregroundBoost.enable then (schedulerProfileToString "foreground" ps.foregroundBoost.foreground "    ") else ""}
+              ${if ps.foregroundBoost.enable then (schedulerProfileToString "background" ps.foregroundBoost.background "    ") else ""}
+              ${if ps.pipewireBoost.enable then (schedulerProfileToString "pipewire" ps.pipewireBoost.profile "    ") else ""}
+            }
+          }
+        '';
+      })
+
+      {
+        "system76-scheduler/process-scheduler/02-config.kdl".text =
+          "exceptions {\n${concatStringsSep "\n" (map (e: "  ${e}") cfg.exceptions)}\n}\n"
+          + "assignments {\n"
+          + (concatStringsSep "\n" (map (name: schedulerProfileToString name cfg.assignments.${name} "  ")
+            (attrNames cfg.assignments)))
+          + "\n}\n";
+      }
+    ];
+  };
+
+  meta = {
+    maintainers = [ lib.maintainers.cmm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/telepathy.nix b/nixpkgs/nixos/modules/services/desktops/telepathy.nix
new file mode 100644
index 000000000000..cdc6eb26de7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/telepathy.nix
@@ -0,0 +1,48 @@
+# Telepathy daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.telepathy = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Telepathy service, a communications framework
+          that enables real-time communication via pluggable protocol backends.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.telepathy.enable {
+
+    environment.systemPackages = [ pkgs.telepathy-mission-control ];
+
+    services.dbus.packages = [ pkgs.telepathy-mission-control ];
+
+    # Enable runtime optional telepathy in gnome-shell
+    services.xserver.desktopManager.gnome.sessionPath = with pkgs; [
+      telepathy-glib
+      telepathy-logger
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/tumbler.nix b/nixpkgs/nixos/modules/services/desktops/tumbler.nix
new file mode 100644
index 000000000000..203071ec660d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/tumbler.nix
@@ -0,0 +1,52 @@
+# Tumbler
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tumbler;
+
+in
+
+{
+
+  imports = [
+    (mkRemovedOptionModule
+      [ "services" "tumbler" "package" ]
+      "")
+  ];
+
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.tumbler = {
+
+      enable = mkEnableOption (lib.mdDoc "Tumbler, A D-Bus thumbnailer service");
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = with pkgs.xfce; [
+      tumbler
+    ];
+
+    services.dbus.packages = with pkgs.xfce; [
+      tumbler
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/desktops/zeitgeist.nix b/nixpkgs/nixos/modules/services/desktops/zeitgeist.nix
new file mode 100644
index 000000000000..0eb2a4c9c371
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/desktops/zeitgeist.nix
@@ -0,0 +1,31 @@
+# Zeitgeist
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+
+  ###### interface
+
+  options = {
+    services.zeitgeist = {
+      enable = mkEnableOption (lib.mdDoc "zeitgeist");
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.zeitgeist.enable {
+
+    environment.systemPackages = [ pkgs.zeitgeist ];
+
+    services.dbus.packages = [ pkgs.zeitgeist ];
+
+    systemd.packages = [ pkgs.zeitgeist ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/blackfire.md b/nixpkgs/nixos/modules/services/development/blackfire.md
new file mode 100644
index 000000000000..e2e7e4780c79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/blackfire.md
@@ -0,0 +1,39 @@
+# Blackfire profiler {#module-services-blackfire}
+
+*Source:* {file}`modules/services/development/blackfire.nix`
+
+*Upstream documentation:* <https://blackfire.io/docs/introduction>
+
+[Blackfire](https://blackfire.io) is a proprietary tool for profiling applications. There are several languages supported by the product but currently only PHP support is packaged in Nixpkgs. The back-end consists of a module that is loaded into the language runtime (called *probe*) and a service (*agent*) that the probe connects to and that sends the profiles to the server.
+
+To use it, you will need to enable the agent and the probe on your server. The exact method will depend on the way you use PHP but here is an example of NixOS configuration for PHP-FPM:
+```
+let
+  php = pkgs.php.withExtensions ({ enabled, all }: enabled ++ (with all; [
+    blackfire
+  ]));
+in {
+  # Enable the probe extension for PHP-FPM.
+  services.phpfpm = {
+    phpPackage = php;
+  };
+
+  # Enable and configure the agent.
+  services.blackfire-agent = {
+    enable = true;
+    settings = {
+      # You will need to get credentials at https://blackfire.io/my/settings/credentials
+      # You can also use other options described in https://blackfire.io/docs/up-and-running/configuration/agent
+      server-id = "XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX";
+      server-token = "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX";
+    };
+  };
+
+  # Make the agent run on start-up.
+  # (WantedBy= from the upstream unit not respected: https://github.com/NixOS/nixpkgs/issues/81138)
+  # Alternately, you can start it manually with `systemctl start blackfire-agent`.
+  systemd.services.blackfire-agent.wantedBy = [ "phpfpm-foo.service" ];
+}
+```
+
+On your developer machine, you will also want to install [the client](https://blackfire.io/docs/up-and-running/installation#install-a-profiling-client) (see `blackfire` package) or the browser extension to actually trigger the profiling.
diff --git a/nixpkgs/nixos/modules/services/development/blackfire.nix b/nixpkgs/nixos/modules/services/development/blackfire.nix
new file mode 100644
index 000000000000..3c98d7a281c6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/blackfire.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.blackfire-agent;
+
+  agentConfigFile = lib.generators.toINI {} {
+    blackfire =  cfg.settings;
+  };
+
+  agentSock = "blackfire/agent.sock";
+in {
+  meta = {
+    maintainers = pkgs.blackfire.meta.maintainers;
+    doc = ./blackfire.md;
+  };
+
+  options = {
+    services.blackfire-agent = {
+      enable = lib.mkEnableOption (lib.mdDoc "Blackfire profiler agent");
+      settings = lib.mkOption {
+        description = lib.mdDoc ''
+          See https://blackfire.io/docs/up-and-running/configuration/agent
+        '';
+        type = lib.types.submodule {
+          freeformType = with lib.types; attrsOf str;
+
+          options = {
+            server-id = lib.mkOption {
+              type = lib.types.str;
+              description = lib.mdDoc ''
+                Sets the server id used to authenticate with Blackfire
+
+                You can find your personal server-id at https://blackfire.io/my/settings/credentials
+              '';
+            };
+
+            server-token = lib.mkOption {
+              type = lib.types.str;
+              description = lib.mdDoc ''
+                Sets the server token used to authenticate with Blackfire
+
+                You can find your personal server-token at https://blackfire.io/my/settings/credentials
+              '';
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.etc."blackfire/agent".text = agentConfigFile;
+
+    services.blackfire-agent.settings.socket = "unix:///run/${agentSock}";
+
+    systemd.packages = [
+      pkgs.blackfire
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/bloop.nix b/nixpkgs/nixos/modules/services/development/bloop.nix
new file mode 100644
index 000000000000..27da76a74432
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/bloop.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.bloop;
+
+in {
+
+  options.services.bloop = {
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [
+        "-J-Xmx2G"
+        "-J-XX:MaxInlineLevel=20"
+        "-J-XX:+UseParallelGC"
+      ];
+      description = lib.mdDoc ''
+        Specifies additional command line argument to pass to bloop
+        java process.
+      '';
+    };
+
+    install = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to install a user service for the Bloop server.
+
+        The service must be manually started for each user with
+        "systemctl --user start bloop".
+      '';
+    };
+  };
+
+  config = mkIf (cfg.install) {
+    systemd.user.services.bloop = {
+      description = "Bloop Scala build server";
+
+      environment = {
+        PATH = mkForce "${makeBinPath [ config.programs.java.package ]}";
+      };
+      serviceConfig = {
+        Type        = "simple";
+        ExecStart   = "${pkgs.bloop}/bin/bloop server";
+        Restart     = "always";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.bloop ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/distccd.nix b/nixpkgs/nixos/modules/services/development/distccd.nix
new file mode 100644
index 000000000000..a3c909eb1959
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/distccd.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.distccd;
+in
+{
+  options = {
+    services.distccd = {
+      enable = mkEnableOption (lib.mdDoc "distccd");
+
+      allowedClients = mkOption {
+        type = types.listOf types.str;
+        default = [ "127.0.0.1" ];
+        example = [ "127.0.0.1" "192.168.0.0/24" "10.0.0.0/24" ];
+        description = lib.mdDoc ''
+          Client IPs which are allowed to connect to distccd in CIDR notation.
+
+          Anyone who can connect to the distccd server can run arbitrary
+          commands on that system as the distcc user, therefore you should use
+          this judiciously.
+        '';
+      };
+
+      jobTimeout = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Maximum duration, in seconds, of a single compilation request.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.nullOr (types.enum [ "critical" "error" "warning" "notice" "info" "debug" ]);
+        default = "warning";
+        description = lib.mdDoc ''
+          Set the minimum severity of error that will be included in the log
+          file. Useful if you only want to see error messages rather than an
+          entry for each connection.
+        '';
+      };
+
+      maxJobs = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Maximum number of tasks distccd should execute at any time.
+        '';
+      };
+
+
+      nice = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Niceness of the compilation tasks.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Opens the specified TCP port for distcc.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.distcc;
+        defaultText = literalExpression "pkgs.distcc";
+        description = lib.mdDoc ''
+          The distcc package to use.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3632;
+        description = lib.mdDoc ''
+          The TCP port which distccd will listen on.
+        '';
+      };
+
+      stats = {
+        enable = mkEnableOption (lib.mdDoc "statistics reporting via HTTP server");
+        port = mkOption {
+          type = types.port;
+          default = 3633;
+          description = lib.mdDoc ''
+            The TCP port which the distccd statistics HTTP server will listen
+            on.
+          '';
+        };
+      };
+
+      zeroconf = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to register via mDNS/DNS-SD
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ]
+        ++ optionals cfg.stats.enable [ cfg.stats.port ];
+    };
+
+    systemd.services.distccd = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      description = "Distributed C, C++ and Objective-C compiler";
+      documentation = [ "man:distccd(1)" ];
+
+      serviceConfig = {
+        User = "distcc";
+        Group = "distcc";
+        # FIXME: I'd love to get rid of `--enable-tcp-insecure` here, but I'm
+        # not sure how I'm supposed to get distccd to "accept" running a binary
+        # (the compiler) that's outside of /usr/lib.
+        ExecStart = pkgs.writeShellScript "start-distccd" ''
+          export PATH="${pkgs.distccMasquerade}/bin"
+          ${cfg.package}/bin/distccd \
+            --no-detach \
+            --daemon \
+            --enable-tcp-insecure \
+            --port ${toString cfg.port} \
+            ${optionalString (cfg.jobTimeout != null) "--job-lifetime ${toString cfg.jobTimeout}"} \
+            ${optionalString (cfg.logLevel != null) "--log-level ${cfg.logLevel}"} \
+            ${optionalString (cfg.maxJobs != null) "--jobs ${toString cfg.maxJobs}"} \
+            ${optionalString (cfg.nice != null) "--nice ${toString cfg.nice}"} \
+            ${optionalString cfg.stats.enable "--stats"} \
+            ${optionalString cfg.stats.enable "--stats-port ${toString cfg.stats.port}"} \
+            ${optionalString cfg.zeroconf "--zeroconf"} \
+            ${concatMapStrings (c: "--allow ${c} ") cfg.allowedClients}
+        '';
+      };
+    };
+
+    users = {
+      groups.distcc.gid = config.ids.gids.distcc;
+      users.distcc = {
+        description = "distccd user";
+        group = "distcc";
+        uid = config.ids.uids.distcc;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/gemstash.nix b/nixpkgs/nixos/modules/services/development/gemstash.nix
new file mode 100644
index 000000000000..eb7ccb98bde8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/gemstash.nix
@@ -0,0 +1,103 @@
+{ lib, pkgs, config, ... }:
+with lib;
+
+let
+  settingsFormat = pkgs.formats.yaml { };
+
+  # gemstash uses a yaml config where the keys are ruby symbols,
+  # which means they start with ':'. This would be annoying to use
+  # on the nix side, so we rewrite plain names instead.
+  prefixColon = s: listToAttrs (map
+    (attrName: {
+      name = ":${attrName}";
+      value =
+        if isAttrs s.${attrName}
+        then prefixColon s."${attrName}"
+        else s."${attrName}";
+    })
+    (attrNames s));
+
+  # parse the port number out of the tcp://ip:port bind setting string
+  parseBindPort = bind: strings.toInt (last (strings.splitString ":" bind));
+
+  cfg = config.services.gemstash;
+in
+{
+  options.services.gemstash = {
+    enable = mkEnableOption (lib.mdDoc "gemstash service");
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open the firewall for the port in {option}`services.gemstash.bind`.
+      '';
+    };
+
+    settings = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Configuration for Gemstash. The details can be found at in
+        [gemstash documentation](https://github.com/rubygems/gemstash/blob/master/man/gemstash-configuration.5.md).
+        Each key set here is automatically prefixed with ":" to match the gemstash expectations.
+      '';
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+        options = {
+          base_path = mkOption {
+            type = types.path;
+            default = "/var/lib/gemstash";
+            description = lib.mdDoc "Path to store the gem files and the sqlite database. If left unchanged, the directory will be created.";
+          };
+          bind = mkOption {
+            type = types.str;
+            default = "tcp://0.0.0.0:9292";
+            description = lib.mdDoc "Host and port combination for the server to listen on.";
+          };
+          db_adapter = mkOption {
+            type = types.nullOr (types.enum [ "sqlite3" "postgres" "mysql" "mysql2" ]);
+            default = null;
+            description = lib.mdDoc "Which database type to use. For choices other than sqlite3, the dbUrl has to be specified as well.";
+          };
+          db_url = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "The database to connect to when using postgres, mysql, or mysql2.";
+          };
+        };
+      };
+    };
+  };
+
+  config =
+    mkIf cfg.enable {
+      users = {
+        users.gemstash = {
+          group = "gemstash";
+          isSystemUser = true;
+        };
+        groups.gemstash = { };
+      };
+
+      networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ (parseBindPort cfg.settings.bind) ];
+
+      systemd.services.gemstash = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = mkMerge [
+          {
+            ExecStart = "${pkgs.gemstash}/bin/gemstash start --no-daemonize --config-file ${settingsFormat.generate "gemstash.yaml" (prefixColon cfg.settings)}";
+            NoNewPrivileges = true;
+            User = "gemstash";
+            Group = "gemstash";
+            PrivateTmp = true;
+            RestrictSUIDSGID = true;
+            LockPersonality = true;
+          }
+          (mkIf (cfg.settings.base_path == "/var/lib/gemstash") {
+            StateDirectory = "gemstash";
+          })
+        ];
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/development/hoogle.nix b/nixpkgs/nixos/modules/services/development/hoogle.nix
new file mode 100644
index 000000000000..88dd01fd8aab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/hoogle.nix
@@ -0,0 +1,81 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.hoogle;
+
+  hoogleEnv = pkgs.buildEnv {
+    name = "hoogle";
+    paths = [ (cfg.haskellPackages.ghcWithHoogle cfg.packages) ];
+  };
+
+in {
+
+  options.services.hoogle = {
+    enable = mkEnableOption (lib.mdDoc "Haskell documentation server");
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        Port number Hoogle will be listening to.
+      '';
+    };
+
+    packages = mkOption {
+      type = types.functionTo (types.listOf types.package);
+      default = hp: [];
+      defaultText = literalExpression "hp: []";
+      example = literalExpression "hp: with hp; [ text lens ]";
+      description = lib.mdDoc ''
+        The Haskell packages to generate documentation for.
+
+        The option value is a function that takes the package set specified in
+        the {var}`haskellPackages` option as its sole parameter and
+        returns a list of packages.
+      '';
+    };
+
+    haskellPackages = mkOption {
+      description = lib.mdDoc "Which haskell package set to use.";
+      type = types.attrs;
+      default = pkgs.haskellPackages;
+      defaultText = literalExpression "pkgs.haskellPackages";
+    };
+
+    home = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Url for hoogle logo";
+      default = "https://hoogle.haskell.org";
+    };
+
+    host = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Set the host to bind on.";
+      default = "127.0.0.1";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.hoogle = {
+      description = "Haskell documentation server";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart = "always";
+        ExecStart = ''${hoogleEnv}/bin/hoogle server --local --port ${toString cfg.port} --home ${cfg.home} --host ${cfg.host}'';
+
+        DynamicUser = true;
+
+        ProtectHome = true;
+
+        RuntimeDirectory = "hoogle";
+        WorkingDirectory = "%t/hoogle";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/development/jupyter/default.nix b/nixpkgs/nixos/modules/services/development/jupyter/default.nix
new file mode 100644
index 000000000000..9f7910844468
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/jupyter/default.nix
@@ -0,0 +1,206 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.jupyter;
+
+  package = cfg.package;
+
+  kernels = (pkgs.jupyter-kernel.create  {
+    definitions = if cfg.kernels != null
+      then cfg.kernels
+      else  pkgs.jupyter-kernel.default;
+  });
+
+  notebookConfig = pkgs.writeText "jupyter_config.py" ''
+    ${cfg.notebookConfig}
+
+    c.NotebookApp.password = ${cfg.password}
+  '';
+
+in {
+  meta.maintainers = with maintainers; [ aborsu ];
+
+  options.services.jupyter = {
+    enable = mkEnableOption (lib.mdDoc "Jupyter development server");
+
+    ip = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc ''
+        IP address Jupyter will be listening on.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      # NOTE: We don't use top-level jupyter because we don't
+      # want to pass in JUPYTER_PATH but use .environment instead,
+      # saving a rebuild.
+      default = pkgs.python3.pkgs.notebook;
+      defaultText = literalExpression "pkgs.python3.pkgs.notebook";
+      description = lib.mdDoc ''
+        Jupyter package to use.
+      '';
+    };
+
+    command = mkOption {
+      type = types.str;
+      default = "jupyter-notebook";
+      example = "jupyter-lab";
+      description = lib.mdDoc ''
+        Which command the service runs. Note that not all jupyter packages
+        have all commands, e.g. jupyter-lab isn't present in the default package.
+       '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8888;
+      description = lib.mdDoc ''
+        Port number Jupyter will be listening on.
+      '';
+    };
+
+    notebookDir = mkOption {
+      type = types.str;
+      default = "~/";
+      description = lib.mdDoc ''
+        Root directory for notebooks.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "jupyter";
+      description = lib.mdDoc ''
+        Name of the user used to run the jupyter service.
+        For security reason, jupyter should really not be run as root.
+        If not set (jupyter), the service will create a jupyter user with appropriate settings.
+      '';
+      example = "aborsu";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "jupyter";
+      description = lib.mdDoc ''
+        Name of the group used to run the jupyter service.
+        Use this if you want to create a group of users that are able to view the notebook directory's content.
+      '';
+      example = "users";
+    };
+
+    password = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Password to use with notebook.
+        Can be generated using:
+          In [1]: from notebook.auth import passwd
+          In [2]: passwd('test')
+          Out[2]: 'sha1:1b961dc713fb:88483270a63e57d18d43cf337e629539de1436ba'
+          NOTE: you need to keep the single quote inside the nix string.
+        Or you can use a python oneliner:
+          "open('/path/secret_file', 'r', encoding='utf8').read().strip()"
+        It will be interpreted at the end of the notebookConfig.
+      '';
+      example = "'sha1:1b961dc713fb:88483270a63e57d18d43cf337e629539de1436ba'";
+    };
+
+    notebookConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Raw jupyter config.
+      '';
+    };
+
+    kernels = mkOption {
+      type = types.nullOr (types.attrsOf(types.submodule (import ./kernel-options.nix {
+        inherit lib pkgs;
+      })));
+
+      default = null;
+      example = literalExpression ''
+        {
+          python3 = let
+            env = (pkgs.python3.withPackages (pythonPackages: with pythonPackages; [
+                    ipykernel
+                    pandas
+                    scikit-learn
+                  ]));
+          in {
+            displayName = "Python 3 for machine learning";
+            argv = [
+              "''${env.interpreter}"
+              "-m"
+              "ipykernel_launcher"
+              "-f"
+              "{connection_file}"
+            ];
+            language = "python";
+            logo32 = "''${env.sitePackages}/ipykernel/resources/logo-32x32.png";
+            logo64 = "''${env.sitePackages}/ipykernel/resources/logo-64x64.png";
+            extraPaths = {
+              "cool.txt" = pkgs.writeText "cool" "cool content";
+            };
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Declarative kernel config.
+
+        Kernels can be declared in any language that supports and has the required
+        dependencies to communicate with a jupyter server.
+        In python's case, it means that ipykernel package must always be included in
+        the list of packages of the targeted environment.
+      '';
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable  {
+      systemd.services.jupyter = {
+        description = "Jupyter development server";
+
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        # TODO: Patch notebook so we can explicitly pass in a shell
+        path = [ pkgs.bash ]; # needed for sh in cell magic to work
+
+        environment = {
+          JUPYTER_PATH = toString kernels;
+        };
+
+        serviceConfig = {
+          Restart = "always";
+          ExecStart = ''${package}/bin/${cfg.command} \
+            --no-browser \
+            --ip=${cfg.ip} \
+            --port=${toString cfg.port} --port-retries 0 \
+            --notebook-dir=${cfg.notebookDir} \
+            --NotebookApp.config_file=${notebookConfig}
+          '';
+          User = cfg.user;
+          Group = cfg.group;
+          WorkingDirectory = "~";
+        };
+      };
+    })
+    (mkIf (cfg.enable && (cfg.group == "jupyter")) {
+      users.groups.jupyter = {};
+    })
+    (mkIf (cfg.enable && (cfg.user == "jupyter")) {
+      users.extraUsers.jupyter = {
+        extraGroups = [ cfg.group ];
+        home = "/var/lib/jupyter";
+        createHome = true;
+        isSystemUser = true;
+        useDefaultShell = true; # needed so that the user can start a terminal.
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/development/jupyter/kernel-options.nix b/nixpkgs/nixos/modules/services/development/jupyter/kernel-options.nix
new file mode 100644
index 000000000000..6e406152de47
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/jupyter/kernel-options.nix
@@ -0,0 +1,80 @@
+# Options that can be used for creating a jupyter kernel.
+{ lib, pkgs }:
+
+with lib;
+
+{
+  freeformType = (pkgs.formats.json { }).type;
+
+  options = {
+
+    displayName = mkOption {
+      type = types.str;
+      default = "";
+      example = literalExpression ''
+        "Python 3"
+        "Python 3 for Data Science"
+      '';
+      description = lib.mdDoc ''
+        Name that will be shown to the user.
+      '';
+    };
+
+    argv = mkOption {
+      type = types.listOf types.str;
+      example = [
+        "{customEnv.interpreter}"
+        "-m"
+        "ipykernel_launcher"
+        "-f"
+        "{connection_file}"
+      ];
+      description = lib.mdDoc ''
+        Command and arguments to start the kernel.
+      '';
+    };
+
+    language = mkOption {
+      type = types.str;
+      example = "python";
+      description = lib.mdDoc ''
+        Language of the environment. Typically the name of the binary.
+      '';
+    };
+
+    env = mkOption {
+      type = types.attrsOf types.str;
+      default = { };
+      example = { OMP_NUM_THREADS = "1"; };
+      description = lib.mdDoc ''
+        Environment variables to set for the kernel.
+      '';
+    };
+
+    logo32 = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = literalExpression ''"''${env.sitePackages}/ipykernel/resources/logo-32x32.png"'';
+      description = lib.mdDoc ''
+        Path to 32x32 logo png.
+      '';
+    };
+    logo64 = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = literalExpression ''"''${env.sitePackages}/ipykernel/resources/logo-64x64.png"'';
+      description = lib.mdDoc ''
+        Path to 64x64 logo png.
+      '';
+    };
+
+    extraPaths = mkOption {
+      type = types.attrsOf types.path;
+      default = { };
+      example = literalExpression ''"{ examples = ''${env.sitePack}/IRkernel/kernelspec/kernel.js"; }'';
+      description = lib.mdDoc ''
+        Extra paths to link in kernel directory
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/jupyterhub/default.nix b/nixpkgs/nixos/modules/services/development/jupyterhub/default.nix
new file mode 100644
index 000000000000..cebc35a50476
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/jupyterhub/default.nix
@@ -0,0 +1,202 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.jupyterhub;
+
+  kernels = (pkgs.jupyter-kernel.create  {
+    definitions = if cfg.kernels != null
+      then cfg.kernels
+      else  pkgs.jupyter-kernel.default;
+  });
+
+  jupyterhubConfig = pkgs.writeText "jupyterhub_config.py" ''
+    c.JupyterHub.bind_url = "http://${cfg.host}:${toString cfg.port}"
+
+    c.JupyterHub.authenticator_class = "${cfg.authentication}"
+    c.JupyterHub.spawner_class = "${cfg.spawner}"
+
+    c.SystemdSpawner.default_url = '/lab'
+    c.SystemdSpawner.cmd = "${cfg.jupyterlabEnv}/bin/jupyterhub-singleuser"
+    c.SystemdSpawner.environment = {
+      'JUPYTER_PATH': '${kernels}'
+    }
+
+    ${cfg.extraConfig}
+  '';
+in {
+  meta.maintainers = with maintainers; [ costrouc ];
+
+  options.services.jupyterhub = {
+    enable = mkEnableOption (lib.mdDoc "Jupyterhub development server");
+
+    authentication = mkOption {
+      type = types.str;
+      default = "jupyterhub.auth.PAMAuthenticator";
+      description = lib.mdDoc ''
+        Jupyterhub authentication to use
+
+        There are many authenticators available including: oauth, pam,
+        ldap, kerberos, etc.
+      '';
+    };
+
+    spawner = mkOption {
+      type = types.str;
+      default = "systemdspawner.SystemdSpawner";
+      description = lib.mdDoc ''
+        Jupyterhub spawner to use
+
+        There are many spawners available including: local process,
+        systemd, docker, kubernetes, yarn, batch, etc.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra contents appended to the jupyterhub configuration
+
+        Jupyterhub configuration is a normal python file using
+        Traitlets. https://jupyterhub.readthedocs.io/en/stable/getting-started/config-basics.html. The
+        base configuration of this module was designed to have sane
+        defaults for configuration but you can override anything since
+        this is a python file.
+      '';
+      example = ''
+        c.SystemdSpawner.mem_limit = '8G'
+        c.SystemdSpawner.cpu_limit = 2.0
+      '';
+    };
+
+    jupyterhubEnv = mkOption {
+      type = types.package;
+      default = pkgs.python3.withPackages (p: with p; [
+        jupyterhub
+        jupyterhub-systemdspawner
+      ]);
+      defaultText = literalExpression ''
+        pkgs.python3.withPackages (p: with p; [
+          jupyterhub
+          jupyterhub-systemdspawner
+        ])
+      '';
+      description = lib.mdDoc ''
+        Python environment to run jupyterhub
+
+        Customizing will affect the packages available in the hub and
+        proxy. This will allow packages to be available for the
+        extraConfig that you may need. This will not normally need to
+        be changed.
+      '';
+    };
+
+    jupyterlabEnv = mkOption {
+      type = types.package;
+      default = pkgs.python3.withPackages (p: with p; [
+        jupyterhub
+        jupyterlab
+      ]);
+      defaultText = literalExpression ''
+        pkgs.python3.withPackages (p: with p; [
+          jupyterhub
+          jupyterlab
+        ])
+      '';
+      description = lib.mdDoc ''
+        Python environment to run jupyterlab
+
+        Customizing will affect the packages available in the
+        jupyterlab server and the default kernel provided. This is the
+        way to customize the jupyterlab extensions and jupyter
+        notebook extensions. This will not normally need to
+        be changed.
+      '';
+    };
+
+    kernels = mkOption {
+      type = types.nullOr (types.attrsOf(types.submodule (import ../jupyter/kernel-options.nix {
+        inherit lib pkgs;
+      })));
+
+      default = null;
+      example = literalExpression ''
+        {
+          python3 = let
+            env = (pkgs.python3.withPackages (pythonPackages: with pythonPackages; [
+                    ipykernel
+                    pandas
+                    scikit-learn
+                  ]));
+          in {
+            displayName = "Python 3 for machine learning";
+            argv = [
+              "''${env.interpreter}"
+              "-m"
+              "ipykernel_launcher"
+              "-f"
+              "{connection_file}"
+            ];
+            language = "python";
+            logo32 = "''${env}/''${env.sitePackages}/ipykernel/resources/logo-32x32.png";
+            logo64 = "''${env}/''${env.sitePackages}/ipykernel/resources/logo-64x64.png";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Declarative kernel config
+
+        Kernels can be declared in any language that supports and has
+        the required dependencies to communicate with a jupyter server.
+        In python's case, it means that ipykernel package must always be
+        included in the list of packages of the targeted environment.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8000;
+      description = lib.mdDoc ''
+        Port number Jupyterhub will be listening on
+      '';
+    };
+
+    host = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        Bind IP JupyterHub will be listening on
+      '';
+    };
+
+    stateDirectory = mkOption {
+      type = types.str;
+      default = "jupyterhub";
+      description = lib.mdDoc ''
+        Directory for jupyterhub state (token + database)
+      '';
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable  {
+      systemd.services.jupyterhub = {
+        description = "Jupyterhub development server";
+
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          Restart = "always";
+          ExecStart = "${cfg.jupyterhubEnv}/bin/jupyterhub --config ${jupyterhubConfig}";
+          User = "root";
+          StateDirectory = cfg.stateDirectory;
+          WorkingDirectory = "/var/lib/${cfg.stateDirectory}";
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/development/livebook.md b/nixpkgs/nixos/modules/services/development/livebook.md
new file mode 100644
index 000000000000..73ddc57f6179
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/livebook.md
@@ -0,0 +1,39 @@
+# Livebook {#module-services-livebook}
+
+[Livebook](https://livebook.dev/) is a web application for writing
+interactive and collaborative code notebooks.
+
+## Basic Usage {#module-services-livebook-basic-usage}
+
+Enabling the `livebook` service creates a user
+[`systemd`](https://www.freedesktop.org/wiki/Software/systemd/) unit
+which runs the server.
+
+```
+{ ... }:
+
+{
+  services.livebook = {
+    enableUserService = true;
+    port = 20123;
+    # See note below about security
+    environmentFile = pkgs.writeText "livebook.env" ''
+      LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+    '';
+  };
+}
+```
+
+::: {.note}
+
+The Livebook server has the ability to run any command as the user it
+is running under, so securing access to it with a password is highly
+recommended.
+
+Putting the password in the Nix configuration like above is an easy
+way to get started but it is not recommended in the real world because
+the `livebook.env` file will be added to the world-readable Nix store.
+A better approach would be to put the password in some secure
+user-readable location and set `environmentFile = /home/user/secure/livebook.env`.
+
+:::
diff --git a/nixpkgs/nixos/modules/services/development/livebook.nix b/nixpkgs/nixos/modules/services/development/livebook.nix
new file mode 100644
index 000000000000..3991a4125ec3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/livebook.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.livebook;
+in
+{
+  options.services.livebook = {
+    # Since livebook doesn't have a granular permission system (a user
+    # either has access to all the data or none at all), the decision
+    # was made to run this as a user service.  If that changes in the
+    # future, this can be changed to a system service.
+    enableUserService = mkEnableOption "a user service for Livebook";
+
+    environmentFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)` passed to the service.
+
+        This must contain at least `LIVEBOOK_PASSWORD` or
+        `LIVEBOOK_TOKEN_ENABLED=false`.  See `livebook server --help`
+        for other options.'';
+    };
+
+    erlang_node_short_name = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "livebook";
+      description = "A short name for the distributed node.";
+    };
+
+    erlang_node_name = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "livebook@127.0.0.1";
+      description = "The name for the app distributed node.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = "The port to start the web application on.";
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        The address to start the web application on.  Must be a valid IPv4 or
+        IPv6 address.
+      '';
+    };
+
+    options = mkOption {
+      type = with types; attrsOf str;
+      default = { };
+      description = lib.mdDoc ''
+        Additional options to pass as command-line arguments to the server.
+      '';
+      example = literalExpression ''
+        {
+          cookie = "a value shared by all nodes in this cluster";
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enableUserService {
+    systemd.user.services.livebook = {
+      serviceConfig = {
+        Restart = "always";
+        EnvironmentFile = cfg.environmentFile;
+        ExecStart =
+          let
+            args = lib.cli.toGNUCommandLineShell { } ({
+              inherit (cfg) port;
+              ip = cfg.address;
+              name = cfg.erlang_node_name;
+              sname = cfg.erlang_node_short_name;
+            } // cfg.options);
+          in
+          "${pkgs.livebook}/bin/livebook server ${args}";
+      };
+      path = [ pkgs.bash ];
+      wantedBy = [ "default.target" ];
+    };
+  };
+
+  meta.doc = ./livebook.md;
+}
diff --git a/nixpkgs/nixos/modules/services/development/lorri.nix b/nixpkgs/nixos/modules/services/development/lorri.nix
new file mode 100644
index 000000000000..74f56f5890fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/lorri.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.lorri;
+  socketPath = "lorri/daemon.socket";
+in {
+  options = {
+    services.lorri = {
+      enable = lib.mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc ''
+          Enables the daemon for `lorri`, a nix-shell replacement for project
+          development. The socket-activated daemon starts on the first request
+          issued by the `lorri` command.
+        '';
+      };
+      package = lib.mkOption {
+        default = pkgs.lorri;
+        type = lib.types.package;
+        description = lib.mdDoc ''
+          The lorri package to use.
+        '';
+        defaultText = lib.literalExpression "pkgs.lorri";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.user.sockets.lorri = {
+      description = "Socket for Lorri Daemon";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = "%t/${socketPath}";
+        RuntimeDirectory = "lorri";
+      };
+    };
+
+    systemd.user.services.lorri = {
+      description = "Lorri Daemon";
+      requires = [ "lorri.socket" ];
+      after = [ "lorri.socket" ];
+      path = with pkgs; [ config.nix.package git gnutar gzip ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/lorri daemon";
+        PrivateTmp = true;
+        ProtectSystem = "strict";
+        ProtectHome = "read-only";
+        Restart = "on-failure";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package pkgs.direnv ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/development/rstudio-server/default.nix b/nixpkgs/nixos/modules/services/development/rstudio-server/default.nix
new file mode 100644
index 000000000000..bf4c7727bf74
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/rstudio-server/default.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rstudio-server;
+
+  rserver-conf = builtins.toFile "rserver.conf" ''
+    server-working-dir=${cfg.serverWorkingDir}
+    www-address=${cfg.listenAddr}
+    ${cfg.rserverExtraConfig}
+  '';
+
+  rsession-conf = builtins.toFile "rsession.conf" ''
+    ${cfg.rsessionExtraConfig}
+  '';
+
+in
+{
+  meta.maintainers = with maintainers; [ jbedo cfhammill ];
+
+  options.services.rstudio-server = {
+    enable = mkEnableOption (lib.mdDoc "RStudio server");
+
+    serverWorkingDir = mkOption {
+      type = types.str;
+      default = "/var/lib/rstudio-server";
+      description = lib.mdDoc ''
+        Default working directory for server (server-working-dir in rserver.conf).
+      '';
+    };
+
+    listenAddr = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        Address to listen on (www-address in rserver.conf).
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.rstudio-server;
+      defaultText = literalExpression "pkgs.rstudio-server";
+      example = literalExpression "pkgs.rstudioServerWrapper.override { packages = [ pkgs.rPackages.ggplot2 ]; }";
+      description = lib.mdDoc ''
+        Rstudio server package to use. Can be set to rstudioServerWrapper to provide packages.
+      '';
+    };
+
+    rserverExtraConfig = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Extra contents for rserver.conf.
+      '';
+    };
+
+    rsessionExtraConfig = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Extra contents for resssion.conf.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable
+    {
+      systemd.services.rstudio-server = {
+        description = "Rstudio server";
+
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ rserver-conf rsession-conf ];
+
+        serviceConfig = {
+          Restart = "on-failure";
+          Type = "forking";
+          ExecStart = "${cfg.package}/bin/rserver";
+          StateDirectory = "rstudio-server";
+          RuntimeDirectory = "rstudio-server";
+        };
+      };
+
+      environment.etc = {
+        "rstudio/rserver.conf".source = rserver-conf;
+        "rstudio/rsession.conf".source = rsession-conf;
+        "pam.d/rstudio".source = "/etc/pam.d/login";
+      };
+      environment.systemPackages = [ cfg.package ];
+
+      users = {
+        users.rstudio-server = {
+          uid = config.ids.uids.rstudio-server;
+          description = "rstudio-server";
+          group = "rstudio-server";
+        };
+        groups.rstudio-server = {
+          gid = config.ids.gids.rstudio-server;
+        };
+      };
+
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/development/zammad.nix b/nixpkgs/nixos/modules/services/development/zammad.nix
new file mode 100644
index 000000000000..d24ed24ef395
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/development/zammad.nix
@@ -0,0 +1,323 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.zammad;
+  settingsFormat = pkgs.formats.yaml { };
+  filterNull = filterAttrs (_: v: v != null);
+  serviceConfig = {
+    Type = "simple";
+    Restart = "always";
+
+    User = "zammad";
+    Group = "zammad";
+    PrivateTmp = true;
+    StateDirectory = "zammad";
+    WorkingDirectory = cfg.dataDir;
+  };
+  environment = {
+    RAILS_ENV = "production";
+    NODE_ENV = "production";
+    RAILS_SERVE_STATIC_FILES = "true";
+    RAILS_LOG_TO_STDOUT = "true";
+  };
+  databaseConfig = settingsFormat.generate "database.yml" cfg.database.settings;
+in
+{
+
+  options = {
+    services.zammad = {
+      enable = mkEnableOption (lib.mdDoc "Zammad, a web-based, open source user support/ticketing solution");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.zammad;
+        defaultText = literalExpression "pkgs.zammad";
+        description = lib.mdDoc "Zammad package to use.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/zammad";
+        description = lib.mdDoc ''
+          Path to a folder that will contain Zammad working directory.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        example = "192.168.23.42";
+        description = lib.mdDoc "Host address.";
+      };
+
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open firewall ports for Zammad";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc "Web service port.";
+      };
+
+      websocketPort = mkOption {
+        type = types.port;
+        default = 6042;
+        description = lib.mdDoc "Websocket service port.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "PostgreSQL" "MySQL" ];
+          default = "PostgreSQL";
+          example = "MySQL";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.nullOr types.str;
+          default = {
+            PostgreSQL = "/run/postgresql";
+            MySQL = "localhost";
+          }.${cfg.database.type};
+          defaultText = literalExpression ''
+            {
+              PostgreSQL = "/run/postgresql";
+              MySQL = "localhost";
+            }.''${config.services.zammad.database.type};
+          '';
+          description = lib.mdDoc ''
+            Database host address.
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.port;
+          default = null;
+          description = lib.mdDoc "Database port. Use `null` for default port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zammad";
+          description = lib.mdDoc ''
+            Database name.
+          '';
+        };
+
+        user = mkOption {
+          type = types.nullOr types.str;
+          default = "zammad";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zammad-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password for {option}`services.zammad.database.user`.
+          '';
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local database automatically.";
+        };
+
+        settings = mkOption {
+          type = settingsFormat.type;
+          default = { };
+          example = literalExpression ''
+            {
+            }
+          '';
+          description = lib.mdDoc ''
+            The {file}`database.yml` configuration file as key value set.
+            See \<TODO\>
+            for list of configuration parameters.
+          '';
+        };
+      };
+
+      secretKeyBaseFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/secret_key_base";
+        description = lib.mdDoc ''
+          The path to a file containing the
+          `secret_key_base` secret.
+
+          Zammad uses `secret_key_base` to encrypt
+          the cookie store, which contains session data, and to digest
+          user auth tokens.
+
+          Needs to be a 64 byte long string of hexadecimal
+          characters. You can generate one by running
+
+          ```
+          openssl rand -hex 64 >/path/to/secret_key_base_file
+          ```
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.zammad.database.settings = {
+      production = mapAttrs (_: v: mkDefault v) (filterNull {
+        adapter = {
+          PostgreSQL = "postgresql";
+          MySQL = "mysql2";
+        }.${cfg.database.type};
+        database = cfg.database.name;
+        pool = 50;
+        timeout = 5000;
+        encoding = "utf8";
+        username = cfg.database.user;
+        host = cfg.database.host;
+        port = cfg.database.port;
+      });
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openPorts [
+      config.services.zammad.port
+      config.services.zammad.websocketPort
+    ];
+
+    users.users.zammad = {
+      isSystemUser = true;
+      home = cfg.dataDir;
+      group = "zammad";
+    };
+
+    users.groups.zammad = { };
+
+    assertions = [
+      {
+        assertion = cfg.database.createLocally -> cfg.database.user == "zammad" && cfg.database.name == "zammad";
+        message = "services.zammad.database.user must be set to \"zammad\" if services.zammad.database.createLocally is set to true";
+      }
+      {
+        assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.zammad.database.createLocally is set to true";
+      }
+    ];
+
+    services.mysql = optionalAttrs (cfg.database.createLocally && cfg.database.type == "MySQL") {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.postgresql = optionalAttrs (cfg.database.createLocally && cfg.database.type == "PostgreSQL") {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    systemd.services.zammad-web = {
+      inherit environment;
+      serviceConfig = serviceConfig // {
+        # loading all the gems takes time
+        TimeoutStartSec = 1200;
+      };
+      after = [
+        "network.target"
+        "postgresql.service"
+      ];
+      requires = [
+        "postgresql.service"
+      ];
+      description = "Zammad web";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        # Blindly copy the whole project here.
+        chmod -R +w .
+        rm -rf ./public/assets/
+        rm -rf ./tmp/*
+        rm -rf ./log/*
+        cp -r --no-preserve=owner ${cfg.package}/* .
+        chmod -R +w .
+        # config file
+        cp ${databaseConfig} ./config/database.yml
+        chmod -R +w .
+        ${optionalString (cfg.database.passwordFile != null) ''
+        {
+          echo -n "  password: "
+          cat ${cfg.database.passwordFile}
+        } >> ./config/database.yml
+        ''}
+        ${optionalString (cfg.secretKeyBaseFile != null) ''
+        {
+          echo "production: "
+          echo -n "  secret_key_base: "
+          cat ${cfg.secretKeyBaseFile}
+        } > ./config/secrets.yml
+        ''}
+
+        if [ `${config.services.postgresql.package}/bin/psql \
+                  --host ${cfg.database.host} \
+                  ${optionalString
+                    (cfg.database.port != null)
+                    "--port ${toString cfg.database.port}"} \
+                  --username ${cfg.database.user} \
+                  --dbname ${cfg.database.name} \
+                  --command "SELECT COUNT(*) FROM pg_class c \
+                            JOIN pg_namespace s ON s.oid = c.relnamespace \
+                            WHERE s.nspname NOT IN ('pg_catalog', 'pg_toast', 'information_schema') \
+                              AND s.nspname NOT LIKE 'pg_temp%';" | sed -n 3p` -eq 0 ]; then
+          echo "Initialize database"
+          ./bin/rake --no-system db:migrate
+          ./bin/rake --no-system db:seed
+        else
+          echo "Migrate database"
+          ./bin/rake --no-system db:migrate
+        fi
+        echo "Done"
+      '';
+      script = "./script/rails server -b ${cfg.host} -p ${toString cfg.port}";
+    };
+
+    systemd.services.zammad-websocket = {
+      inherit serviceConfig environment;
+      after = [ "zammad-web.service" ];
+      requires = [ "zammad-web.service" ];
+      description = "Zammad websocket";
+      wantedBy = [ "multi-user.target" ];
+      script = "./script/websocket-server.rb -b ${cfg.host} -p ${toString cfg.websocketPort} start";
+    };
+
+    systemd.services.zammad-scheduler = {
+      inherit environment;
+      serviceConfig = serviceConfig // { Type = "forking"; };
+      after = [ "zammad-web.service" ];
+      requires = [ "zammad-web.service" ];
+      description = "Zammad scheduler";
+      wantedBy = [ "multi-user.target" ];
+      script = "./script/scheduler.rb start";
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ garbas taeer ];
+}
diff --git a/nixpkgs/nixos/modules/services/display-managers/greetd.nix b/nixpkgs/nixos/modules/services/display-managers/greetd.nix
new file mode 100644
index 000000000000..89cb81f3a78f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/display-managers/greetd.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.greetd;
+  tty = "tty${toString cfg.vt}";
+  settingsFormat = pkgs.formats.toml {};
+in
+{
+  options.services.greetd = {
+    enable = mkEnableOption (lib.mdDoc "greetd");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.greetd.greetd;
+      defaultText = literalExpression "pkgs.greetd.greetd";
+      description = lib.mdDoc "The greetd package that should be used.";
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      example = literalExpression ''
+        {
+          default_session = {
+            command = "''${pkgs.greetd.greetd}/bin/agreety --cmd sway";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        greetd configuration ([documentation](https://man.sr.ht/~kennylevinsen/greetd/))
+        as a Nix attribute set.
+      '';
+    };
+
+    vt = mkOption  {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        The virtual console (tty) that greetd should use. This option also disables getty on that tty.
+      '';
+    };
+
+    restart = mkOption {
+      type = types.bool;
+      default = !(cfg.settings ? initial_session);
+      defaultText = literalExpression "!(config.services.greetd.settings ? initial_session)";
+      description = lib.mdDoc ''
+        Whether to restart greetd when it terminates (e.g. on failure).
+        This is usually desirable so a user can always log in, but should be disabled when using 'settings.initial_session' (autologin),
+        because every greetd restart will trigger the autologin again.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+
+    services.greetd.settings.terminal.vt = mkDefault cfg.vt;
+    services.greetd.settings.default_session.user = mkDefault "greeter";
+
+    security.pam.services.greetd = {
+      allowNullPassword = true;
+      startSession = true;
+      enableGnomeKeyring = mkDefault config.services.gnome.gnome-keyring.enable;
+    };
+
+    # This prevents nixos-rebuild from killing greetd by activating getty again
+    systemd.services."autovt@${tty}".enable = false;
+
+    systemd.services.greetd = {
+      unitConfig = {
+        Wants = [
+          "systemd-user-sessions.service"
+        ];
+        After = [
+          "systemd-user-sessions.service"
+          "plymouth-quit-wait.service"
+          "getty@${tty}.service"
+        ];
+        Conflicts = [
+          "getty@${tty}.service"
+        ];
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.greetd.greetd}/bin/greetd --config ${settingsFormat.generate "greetd.toml" cfg.settings}";
+
+        Restart = mkIf cfg.restart "always";
+
+        # Defaults from greetd upstream configuration
+        IgnoreSIGPIPE = false;
+        SendSIGHUP = true;
+        TimeoutStopSec = "30s";
+        KeyringMode = "shared";
+
+        Type = "idle";
+      };
+
+      # Don't kill a user session when using nixos-rebuild
+      restartIfChanged = false;
+
+      wantedBy = [ "graphical.target" ];
+    };
+
+    systemd.defaultUnit = "graphical.target";
+
+    users.users.greeter = {
+      isSystemUser = true;
+      group = "greeter";
+    };
+
+    users.groups.greeter = {};
+  };
+
+  meta.maintainers = with maintainers; [ queezle ];
+}
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.md b/nixpkgs/nixos/modules/services/editors/emacs.md
new file mode 100644
index 000000000000..9db1bd594175
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/editors/emacs.md
@@ -0,0 +1,406 @@
+# Emacs {#module-services-emacs}
+
+<!--
+    Documentation contributors:
+      Damien Cassou @DamienCassou
+      Thomas Tuegel @ttuegel
+      Rodney Lorrimar @rvl
+      Adam Hoese @adisbladis
+  -->
+
+[Emacs](https://www.gnu.org/software/emacs/) is an
+extensible, customizable, self-documenting real-time display editor — and
+more. At its core is an interpreter for Emacs Lisp, a dialect of the Lisp
+programming language with extensions to support text editing.
+
+Emacs runs within a graphical desktop environment using the X Window System,
+but works equally well on a text terminal. Under
+macOS, a "Mac port" edition is available, which
+uses Apple's native GUI frameworks.
+
+Nixpkgs provides a superior environment for
+running Emacs. It's simple to create custom builds
+by overriding the default packages. Chaotic collections of Emacs Lisp code
+and extensions can be brought under control using declarative package
+management. NixOS even provides a
+{command}`systemd` user service for automatically starting the Emacs
+daemon.
+
+## Installing Emacs {#module-services-emacs-installing}
+
+Emacs can be installed in the normal way for Nix (see
+[](#sec-package-management)). In addition, a NixOS
+*service* can be enabled.
+
+### The Different Releases of Emacs {#module-services-emacs-releases}
+
+Nixpkgs defines several basic Emacs packages.
+The following are attributes belonging to the {var}`pkgs` set:
+
+  {var}`emacs`
+  : The latest stable version of Emacs using the [GTK 2](http://www.gtk.org)
+    widget toolkit.
+
+  {var}`emacs-nox`
+  : Emacs built without any dependency on X11 libraries.
+
+  {var}`emacsMacport`
+  : Emacs with the "Mac port" patches, providing a more native look and
+    feel under macOS.
+
+If those aren't suitable, then the following imitation Emacs editors are
+also available in Nixpkgs:
+[Zile](https://www.gnu.org/software/zile/),
+[mg](http://homepage.boetes.org/software/mg/),
+[Yi](http://yi-editor.github.io/),
+[jmacs](https://joe-editor.sourceforge.io/).
+
+### Adding Packages to Emacs {#module-services-emacs-adding-packages}
+
+Emacs includes an entire ecosystem of functionality beyond text editing,
+including a project planner, mail and news reader, debugger interface,
+calendar, and more.
+
+Most extensions are gotten with the Emacs packaging system
+({file}`package.el`) from
+[Emacs Lisp Package Archive (ELPA)](https://elpa.gnu.org/),
+[MELPA](https://melpa.org/),
+[MELPA Stable](https://stable.melpa.org/), and
+[Org ELPA](http://orgmode.org/elpa.html). Nixpkgs is
+regularly updated to mirror all these archives.
+
+Under NixOS, you can continue to use
+`package-list-packages` and
+`package-install` to install packages. You can also
+declare the set of Emacs packages you need using the derivations from
+Nixpkgs. The rest of this section discusses declarative installation of
+Emacs packages through nixpkgs.
+
+The first step to declare the list of packages you want in your Emacs
+installation is to create a dedicated derivation. This can be done in a
+dedicated {file}`emacs.nix` file such as:
+
+::: {.example #ex-emacsNix}
+### Nix expression to build Emacs with packages (`emacs.nix`)
+
+```nix
+/*
+This is a nix expression to build Emacs and some Emacs packages I like
+from source on any distribution where Nix is installed. This will install
+all the dependencies from the nixpkgs repository and build the binary files
+without interfering with the host distribution.
+
+To build the project, type the following from the current directory:
+
+$ nix-build emacs.nix
+
+To run the newly compiled executable:
+
+$ ./result/bin/emacs
+*/
+
+# The first non-comment line in this file indicates that
+# the whole file represents a function.
+{ pkgs ? import <nixpkgs> {} }:
+
+let
+  # The let expression below defines a myEmacs binding pointing to the
+  # current stable version of Emacs. This binding is here to separate
+  # the choice of the Emacs binary from the specification of the
+  # required packages.
+  myEmacs = pkgs.emacs;
+  # This generates an emacsWithPackages function. It takes a single
+  # argument: a function from a package set to a list of packages
+  # (the packages that will be available in Emacs).
+  emacsWithPackages = (pkgs.emacsPackagesFor myEmacs).emacsWithPackages;
+in
+  # The rest of the file specifies the list of packages to install. In the
+  # example, two packages (magit and zerodark-theme) are taken from
+  # MELPA stable.
+  emacsWithPackages (epkgs: (with epkgs.melpaStablePackages; [
+    magit          # ; Integrate git <C-x g>
+    zerodark-theme # ; Nicolas' theme
+  ])
+  # Two packages (undo-tree and zoom-frm) are taken from MELPA.
+  ++ (with epkgs.melpaPackages; [
+    undo-tree      # ; <C-x u> to show the undo tree
+    zoom-frm       # ; increase/decrease font size for all buffers %lt;C-x C-+>
+  ])
+  # Three packages are taken from GNU ELPA.
+  ++ (with epkgs.elpaPackages; [
+    auctex         # ; LaTeX mode
+    beacon         # ; highlight my cursor when scrolling
+    nameless       # ; hide current package name everywhere in elisp code
+  ])
+  # notmuch is taken from a nixpkgs derivation which contains an Emacs mode.
+  ++ [
+    pkgs.notmuch   # From main packages set
+  ])
+```
+:::
+
+The result of this configuration will be an {command}`emacs`
+command which launches Emacs with all of your chosen packages in the
+{var}`load-path`.
+
+You can check that it works by executing this in a terminal:
+```ShellSession
+$ nix-build emacs.nix
+$ ./result/bin/emacs -q
+```
+and then typing `M-x package-initialize`. Check that you
+can use all the packages you want in this Emacs instance. For example, try
+switching to the zerodark theme through `M-x load-theme <RET> zerodark <RET> y`.
+
+::: {.tip}
+A few popular extensions worth checking out are: auctex, company,
+edit-server, flycheck, helm, iedit, magit, multiple-cursors, projectile,
+and yasnippet.
+:::
+
+The list of available packages in the various ELPA repositories can be seen
+with the following commands:
+::: {.example #module-services-emacs-querying-packages}
+### Querying Emacs packages
+
+```
+nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.elpaPackages
+nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaPackages
+nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.melpaStablePackages
+nix-env -f "<nixpkgs>" -qaP -A emacs.pkgs.orgPackages
+```
+:::
+
+If you are on NixOS, you can install this particular Emacs for all users by
+adding it to the list of system packages (see
+[](#sec-declarative-package-mgmt)). Simply modify your file
+{file}`configuration.nix` to make it contain:
+::: {.example #module-services-emacs-configuration-nix}
+### Custom Emacs in `configuration.nix`
+
+```
+{
+ environment.systemPackages = [
+   # [...]
+   (import /path/to/emacs.nix { inherit pkgs; })
+  ];
+}
+```
+:::
+
+In this case, the next {command}`nixos-rebuild switch` will take
+care of adding your {command}`emacs` to the {var}`PATH`
+environment variable (see [](#sec-changing-config)).
+
+<!-- fixme: i think the following is better done with config.nix
+https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides
+-->
+
+If you are not on NixOS or want to install this particular Emacs only for
+yourself, you can do so by adding it to your
+{file}`~/.config/nixpkgs/config.nix` (see
+[Nixpkgs manual](https://nixos.org/nixpkgs/manual/#sec-modify-via-packageOverrides)):
+::: {.example #module-services-emacs-config-nix}
+### Custom Emacs in `~/.config/nixpkgs/config.nix`
+
+```
+{
+  packageOverrides = super: let self = super.pkgs; in {
+    myemacs = import /path/to/emacs.nix { pkgs = self; };
+  };
+}
+```
+:::
+
+In this case, the next `nix-env -f '<nixpkgs>' -iA
+myemacs` will take care of adding your emacs to the
+{var}`PATH` environment variable.
+
+### Advanced Emacs Configuration {#module-services-emacs-advanced}
+
+If you want, you can tweak the Emacs package itself from your
+{file}`emacs.nix`. For example, if you want to have a
+GTK 3-based Emacs instead of the default GTK 2-based binary and remove the
+automatically generated {file}`emacs.desktop` (useful if you
+only use {command}`emacsclient`), you can change your file
+{file}`emacs.nix` in this way:
+
+::: {.example #ex-emacsGtk3Nix}
+### Custom Emacs build
+
+```
+{ pkgs ? import <nixpkgs> {} }:
+let
+  myEmacs = (pkgs.emacs.override {
+    # Use gtk3 instead of the default gtk2
+    withGTK3 = true;
+    withGTK2 = false;
+  }).overrideAttrs (attrs: {
+    # I don't want emacs.desktop file because I only use
+    # emacsclient.
+    postInstall = (attrs.postInstall or "") + ''
+      rm $out/share/applications/emacs.desktop
+    '';
+  });
+in [...]
+```
+:::
+
+After building this file as shown in [](#ex-emacsNix), you
+will get an GTK 3-based Emacs binary pre-loaded with your favorite packages.
+
+## Running Emacs as a Service {#module-services-emacs-running}
+
+NixOS provides an optional
+{command}`systemd` service which launches
+[Emacs daemon](https://www.gnu.org/software/emacs/manual/html_node/emacs/Emacs-Server.html)
+with the user's login session.
+
+*Source:* {file}`modules/services/editors/emacs.nix`
+
+### Enabling the Service {#module-services-emacs-enabling}
+
+To install and enable the {command}`systemd` user service for Emacs
+daemon, add the following to your {file}`configuration.nix`:
+```
+services.emacs.enable = true;
+services.emacs.package = import /home/cassou/.emacs.d { pkgs = pkgs; };
+```
+
+The {var}`services.emacs.package` option allows a custom
+derivation to be used, for example, one created by
+`emacsWithPackages`.
+
+Ensure that the Emacs server is enabled for your user's Emacs
+configuration, either by customizing the {var}`server-mode`
+variable, or by adding `(server-start)` to
+{file}`~/.emacs.d/init.el`.
+
+To start the daemon, execute the following:
+```ShellSession
+$ nixos-rebuild switch  # to activate the new configuration.nix
+$ systemctl --user daemon-reload        # to force systemd reload
+$ systemctl --user start emacs.service  # to start the Emacs daemon
+```
+The server should now be ready to serve Emacs clients.
+
+### Starting the client {#module-services-emacs-starting-client}
+
+Ensure that the Emacs server is enabled, either by customizing the
+{var}`server-mode` variable, or by adding
+`(server-start)` to {file}`~/.emacs`.
+
+To connect to the Emacs daemon, run one of the following:
+```
+emacsclient FILENAME
+emacsclient --create-frame  # opens a new frame (window)
+emacsclient --create-frame --tty  # opens a new frame on the current terminal
+```
+
+### Configuring the {var}`EDITOR` variable {#module-services-emacs-editor-variable}
+
+<!--<title>{command}`emacsclient` as the Default Editor</title>-->
+
+If [](#opt-services.emacs.defaultEditor) is
+`true`, the {var}`EDITOR` variable will be set
+to a wrapper script which launches {command}`emacsclient`.
+
+Any setting of {var}`EDITOR` in the shell config files will
+override {var}`services.emacs.defaultEditor`. To make sure
+{var}`EDITOR` refers to the Emacs wrapper script, remove any
+existing {var}`EDITOR` assignment from
+{file}`.profile`, {file}`.bashrc`,
+{file}`.zshenv` or any other shell config file.
+
+If you have formed certain bad habits when editing files, these can be
+corrected with a shell alias to the wrapper script:
+```
+alias vi=$EDITOR
+```
+
+### Per-User Enabling of the Service {#module-services-emacs-per-user}
+
+In general, {command}`systemd` user services are globally enabled
+by symlinks in {file}`/etc/systemd/user`. In the case where
+Emacs daemon is not wanted for all users, it is possible to install the
+service but not globally enable it:
+```
+services.emacs.enable = false;
+services.emacs.install = true;
+```
+
+To enable the {command}`systemd` user service for just the
+currently logged in user, run:
+```
+systemctl --user enable emacs
+```
+This will add the symlink
+{file}`~/.config/systemd/user/emacs.service`.
+
+## Configuring Emacs {#module-services-emacs-configuring}
+
+If you want to only use extension packages from Nixpkgs, you can add
+`(setq package-archives nil)` to your init file.
+
+After the declarative Emacs package configuration has been tested,
+previously downloaded packages can be cleaned up by removing
+{file}`~/.emacs.d/elpa` (do make a backup first, in case you
+forgot a package).
+
+<!--
+      todo: is it worth documenting customizations for
+      server-switch-hook, server-done-hook?
+  -->
+
+### A Major Mode for Nix Expressions {#module-services-emacs-major-mode}
+
+Of interest may be {var}`melpaPackages.nix-mode`, which
+provides syntax highlighting for the Nix language. This is particularly
+convenient if you regularly edit Nix files.
+
+### Accessing man pages {#module-services-emacs-man-pages}
+
+You can use `woman` to get completion of all available
+man pages. For example, type `M-x woman <RET> nixos-rebuild <RET>.`
+
+### Editing DocBook 5 XML Documents {#sec-emacs-docbook-xml}
+
+Emacs includes
+[nXML](https://www.gnu.org/software/emacs/manual/html_node/nxml-mode/Introduction.html),
+a major-mode for validating and editing XML documents. When editing DocBook
+5.0 documents, such as [this one](#book-nixos-manual),
+nXML needs to be configured with the relevant schema, which is not
+included.
+
+To install the DocBook 5.0 schemas, either add
+{var}`pkgs.docbook5` to [](#opt-environment.systemPackages)
+([NixOS](#sec-declarative-package-mgmt)), or run
+`nix-env -f '<nixpkgs>' -iA docbook5`
+([Nix](#sec-ad-hoc-packages)).
+
+Then customize the variable {var}`rng-schema-locating-files` to
+include {file}`~/.emacs.d/schemas.xml` and put the following
+text into that file:
+::: {.example #ex-emacs-docbook-xml}
+### nXML Schema Configuration (`~/.emacs.d/schemas.xml`)
+
+```xml
+<?xml version="1.0"?>
+<!--
+  To let emacs find this file, evaluate:
+  (add-to-list 'rng-schema-locating-files "~/.emacs.d/schemas.xml")
+-->
+<locatingRules xmlns="http://thaiopensource.com/ns/locating-rules/1.0">
+  <!--
+    Use this variation if pkgs.docbook5 is added to environment.systemPackages
+  -->
+  <namespace ns="http://docbook.org/ns/docbook"
+             uri="/run/current-system/sw/share/xml/docbook-5.0/rng/docbookxi.rnc"/>
+  <!--
+    Use this variation if installing schema with "nix-env -iA pkgs.docbook5".
+  <namespace ns="http://docbook.org/ns/docbook"
+             uri="../.nix-profile/share/xml/docbook-5.0/rng/docbookxi.rnc"/>
+  -->
+</locatingRules>
+```
+:::
diff --git a/nixpkgs/nixos/modules/services/editors/emacs.nix b/nixpkgs/nixos/modules/services/editors/emacs.nix
new file mode 100644
index 000000000000..fad4f39ff210
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/editors/emacs.nix
@@ -0,0 +1,118 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.emacs;
+
+  editorScript = pkgs.writeScriptBin "emacseditor" ''
+    #!${pkgs.runtimeShell}
+    if [ -z "$1" ]; then
+      exec ${cfg.package}/bin/emacsclient --create-frame --alternate-editor ${cfg.package}/bin/emacs
+    else
+      exec ${cfg.package}/bin/emacsclient --alternate-editor ${cfg.package}/bin/emacs "$@"
+    fi
+  '';
+
+  desktopApplicationFile = pkgs.writeTextFile {
+    name = "emacsclient.desktop";
+    destination = "/share/applications/emacsclient.desktop";
+    text = ''
+      [Desktop Entry]
+      Name=Emacsclient
+      GenericName=Text Editor
+      Comment=Edit text
+      MimeType=text/english;text/plain;text/x-makefile;text/x-c++hdr;text/x-c++src;text/x-chdr;text/x-csrc;text/x-java;text/x-moc;text/x-pascal;text/x-tcl;text/x-tex;application/x-shellscript;text/x-c;text/x-c++;
+      Exec=emacseditor %F
+      Icon=emacs
+      Type=Application
+      Terminal=false
+      Categories=Development;TextEditor;
+      StartupWMClass=Emacs
+      Keywords=Text;Editor;
+    '';
+  };
+
+in
+{
+
+  options.services.emacs = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable a user service for the Emacs daemon. Use `emacsclient` to connect to the
+        daemon. If `true`, {var}`services.emacs.install` is
+        considered `true`, whatever its value.
+      '';
+    };
+
+    install = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to install a user service for the Emacs daemon. Once
+        the service is started, use emacsclient to connect to the
+        daemon.
+
+        The service must be manually started for each user with
+        "systemctl --user start emacs" or globally through
+        {var}`services.emacs.enable`.
+      '';
+    };
+
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.emacs;
+      defaultText = literalExpression "pkgs.emacs";
+      description = lib.mdDoc ''
+        emacs derivation to use.
+      '';
+    };
+
+    defaultEditor = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        When enabled, configures emacsclient to be the default editor
+        using the EDITOR environment variable.
+      '';
+    };
+
+    startWithGraphical = mkOption {
+      type = types.bool;
+      default = config.services.xserver.enable;
+      defaultText = literalExpression "config.services.xserver.enable";
+      description = lib.mdDoc ''
+        Start emacs with the graphical session instead of any session. Without this, emacs clients will not be able to create frames in the graphical session.
+      '';
+    };
+  };
+
+  config = mkIf (cfg.enable || cfg.install) {
+    systemd.user.services.emacs = {
+      description = "Emacs: the extensible, self-documenting text editor";
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.bash}/bin/bash -c 'source ${config.system.build.setEnvironment}; exec ${cfg.package}/bin/emacs --daemon'";
+        ExecStop = "${cfg.package}/bin/emacsclient --eval (kill-emacs)";
+        Restart = "always";
+      };
+
+      unitConfig = optionalAttrs cfg.startWithGraphical {
+        After = "graphical-session.target";
+      };
+    } // optionalAttrs cfg.enable {
+      wantedBy = if cfg.startWithGraphical then [ "graphical-session.target" ] else [ "default.target" ];
+    };
+
+    environment.systemPackages = [ cfg.package editorScript desktopApplicationFile ];
+
+    environment.variables.EDITOR = mkIf cfg.defaultEditor (mkOverride 900 "emacseditor");
+  };
+
+  meta.doc = ./emacs.md;
+}
diff --git a/nixpkgs/nixos/modules/services/editors/haste.nix b/nixpkgs/nixos/modules/services/editors/haste.nix
new file mode 100644
index 000000000000..a46415d43634
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/editors/haste.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  pkg = pkgs.haste-server;
+  cfg = config.services.haste-server;
+
+  format = pkgs.formats.json {};
+in
+{
+  options.services.haste-server = {
+    enable = mkEnableOption (lib.mdDoc "haste-server");
+    openFirewall = mkEnableOption (lib.mdDoc "firewall passthrough for haste-server");
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Configuration for haste-server.
+        For documentation see [project readme](https://github.com/toptal/haste-server#settings)
+      '';
+      type = format.type;
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+    networking.firewall.allowedTCPPorts = mkIf (cfg.openFirewall) [ cfg.settings.port ];
+
+    services.haste-server = {
+      settings = {
+        host = mkDefault "::";
+        port = mkDefault 7777;
+
+        keyLength = mkDefault 10;
+        maxLength = mkDefault 400000;
+
+        staticMaxAge = mkDefault 86400;
+        recompressStaticAssets = mkDefault false;
+
+        logging = mkDefault [
+          {
+            level = "verbose";
+            type = "Console";
+            colorize = true;
+          }
+        ];
+
+        keyGenerator = mkDefault {
+          type = "phonetic";
+        };
+
+        rateLimits = {
+          categories = {
+            normal = {
+              totalRequests = mkDefault 500;
+              every = mkDefault 60000;
+            };
+          };
+        };
+
+        storage = mkDefault {
+          type = "file";
+        };
+
+        documents = {
+          about = mkDefault "${pkg}/share/haste-server/about.md";
+        };
+      };
+    };
+
+    systemd.services.haste-server = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = "haste-server";
+        DynamicUser = true;
+        StateDirectory = "haste-server";
+        WorkingDirectory = "/var/lib/haste-server";
+        ExecStart = "${pkg}/bin/haste-server ${format.generate "config.json" cfg.settings}";
+      };
+
+      path = with pkgs; [ pkg coreutils ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/editors/infinoted.nix b/nixpkgs/nixos/modules/services/editors/infinoted.nix
new file mode 100644
index 000000000000..de0989994019
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/editors/infinoted.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.infinoted;
+in {
+  options.services.infinoted = {
+    enable = mkEnableOption (lib.mdDoc "infinoted");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.libinfinity;
+      defaultText = literalExpression "pkgs.libinfinity";
+      description = lib.mdDoc ''
+        Package providing infinoted
+      '';
+    };
+
+    keyFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Private key to use for TLS
+      '';
+    };
+
+    certificateFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Server certificate to use for TLS
+      '';
+    };
+
+    certificateChain = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Chain of CA-certificates to which our `certificateFile` is relative.
+        Optional for TLS.
+      '';
+    };
+
+    securityPolicy = mkOption {
+      type = types.enum ["no-tls" "allow-tls" "require-tls"];
+      default = "require-tls";
+      description = lib.mdDoc ''
+        How strictly to enforce clients connection with TLS.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 6523;
+      description = lib.mdDoc ''
+        Port to listen on
+      '';
+    };
+
+    rootDirectory = mkOption {
+      type = types.path;
+      default = "/var/lib/infinoted/documents/";
+      description = lib.mdDoc ''
+        Root of the directory structure to serve
+      '';
+    };
+
+    plugins = mkOption {
+      type = types.listOf types.str;
+      default = [ "note-text" "note-chat" "logging" "autosave" ];
+      description = lib.mdDoc ''
+        Plugins to enable
+      '';
+    };
+
+    passwordFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File to read server-wide password from
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = ''
+        [autosave]
+        interval=10
+      '';
+      description = lib.mdDoc ''
+        Additional configuration to append to infinoted.conf
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "infinoted";
+      description = lib.mdDoc ''
+        What to call the dedicated user under which infinoted is run
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "infinoted";
+      description = lib.mdDoc ''
+        What to call the primary group of the dedicated user under which infinoted is run
+      '';
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+    users.users = optionalAttrs (cfg.user == "infinoted")
+      { infinoted = {
+          description = "Infinoted user";
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+    users.groups = optionalAttrs (cfg.group == "infinoted")
+      { infinoted = { };
+      };
+
+    systemd.services.infinoted =
+      { description = "Gobby Dedicated Server";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        serviceConfig = {
+          Type = "simple";
+          Restart = "always";
+          ExecStart = "${cfg.package.infinoted} --config-file=/var/lib/infinoted/infinoted.conf";
+          User = cfg.user;
+          Group = cfg.group;
+          PermissionsStartOnly = true;
+        };
+        preStart = ''
+          mkdir -p /var/lib/infinoted
+          install -o ${cfg.user} -g ${cfg.group} -m 0600 /dev/null /var/lib/infinoted/infinoted.conf
+          cat >>/var/lib/infinoted/infinoted.conf <<EOF
+          [infinoted]
+          ${optionalString (cfg.keyFile != null) "key-file=${cfg.keyFile}"}
+          ${optionalString (cfg.certificateFile != null) "certificate-file=${cfg.certificateFile}"}
+          ${optionalString (cfg.certificateChain != null) "certificate-chain=${cfg.certificateChain}"}
+          port=${toString cfg.port}
+          security-policy=${cfg.securityPolicy}
+          root-directory=${cfg.rootDirectory}
+          plugins=${concatStringsSep ";" cfg.plugins}
+          ${optionalString (cfg.passwordFile != null) "password=$(head -n 1 ${cfg.passwordFile})"}
+
+          ${cfg.extraConfig}
+          EOF
+
+          install -o ${cfg.user} -g ${cfg.group} -m 0750 -d ${cfg.rootDirectory}
+        '';
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/finance/odoo.nix b/nixpkgs/nixos/modules/services/finance/odoo.nix
new file mode 100644
index 000000000000..b8574ed09af9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/finance/odoo.nix
@@ -0,0 +1,128 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.odoo;
+  format = pkgs.formats.ini {};
+in
+{
+  options = {
+    services.odoo = {
+      enable = mkEnableOption (lib.mdDoc "odoo");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.odoo;
+        defaultText = literalExpression "pkgs.odoo";
+        description = lib.mdDoc "Odoo package to use.";
+      };
+
+      addons = mkOption {
+        type = with types; listOf package;
+        default = [];
+        example = literalExpression "[ pkgs.odoo_enterprise ]";
+        description = lib.mdDoc "Odoo addons.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = {};
+        description = lib.mdDoc ''
+          Odoo configuration settings. For more details see <https://www.odoo.com/documentation/15.0/administration/install/deploy.html>
+        '';
+        example = literalExpression ''
+          options = {
+            db_user = "odoo";
+            db_password="odoo";
+          };
+        '';
+      };
+
+      domain = mkOption {
+        type = with types; nullOr str;
+        description = lib.mdDoc "Domain to host Odoo with nginx";
+        default = null;
+      };
+    };
+  };
+
+  config = mkIf (cfg.enable) (let
+    cfgFile = format.generate "odoo.cfg" cfg.settings;
+  in {
+    services.nginx = mkIf (cfg.domain != null) {
+      upstreams = {
+        odoo.servers = {
+          "127.0.0.1:8069" = {};
+        };
+
+        odoochat.servers = {
+          "127.0.0.1:8072" = {};
+        };
+      };
+
+      virtualHosts."${cfg.domain}" = {
+        extraConfig = ''
+          proxy_read_timeout 720s;
+          proxy_connect_timeout 720s;
+          proxy_send_timeout 720s;
+
+          proxy_set_header X-Forwarded-Host $host;
+          proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+          proxy_set_header X-Forwarded-Proto $scheme;
+          proxy_set_header X-Real-IP $remote_addr;
+        '';
+
+        locations = {
+          "/longpolling" = {
+            proxyPass = "http://odoochat";
+          };
+
+          "/" = {
+            proxyPass = "http://odoo";
+            extraConfig = ''
+              proxy_redirect off;
+            '';
+          };
+        };
+      };
+    };
+
+    services.odoo.settings.options = {
+      proxy_mode = cfg.domain != null;
+    };
+
+    users.users.odoo = {
+      isSystemUser = true;
+      group = "odoo";
+    };
+    users.groups.odoo = {};
+
+    systemd.services.odoo = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "postgresql.service" ];
+
+      # pg_dump
+      path = [ config.services.postgresql.package ];
+
+      requires = [ "postgresql.service" ];
+      script = "HOME=$STATE_DIRECTORY ${cfg.package}/bin/odoo ${optionalString (cfg.addons != []) "--addons-path=${concatMapStringsSep "," escapeShellArg cfg.addons}"} -c ${cfgFile}";
+
+      serviceConfig = {
+        DynamicUser = true;
+        User = "odoo";
+        StateDirectory = "odoo";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+
+      ensureDatabases = [ "odoo" ];
+      ensureUsers = [{
+        name = "odoo";
+        ensureDBOwnership = true;
+      }];
+    };
+  });
+}
diff --git a/nixpkgs/nixos/modules/services/games/asf.nix b/nixpkgs/nixos/modules/services/games/asf.nix
new file mode 100644
index 000000000000..432de6336ce2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/asf.nix
@@ -0,0 +1,271 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.archisteamfarm;
+
+  format = pkgs.formats.json { };
+
+  asf-config = format.generate "ASF.json" (cfg.settings // {
+    # we disable it because ASF cannot update itself anyways
+    # and nixos takes care of restarting the service
+    # is in theory not needed as this is already the default for default builds
+    UpdateChannel = 0;
+    Headless = true;
+  } // lib.optionalAttrs (cfg.ipcPasswordFile != null) {
+    IPCPassword = "#ipcPassword#";
+  });
+
+  ipc-config = format.generate "IPC.config" cfg.ipcSettings;
+
+  mkBot = n: c:
+    format.generate "${n}.json" (c.settings // {
+      SteamLogin = if c.username == "" then n else c.username;
+      SteamPassword = c.passwordFile;
+      # sets the password format to file (https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Security#file)
+      PasswordFormat = 4;
+      Enabled = c.enabled;
+    });
+in
+{
+  options.services.archisteamfarm = {
+    enable = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        If enabled, starts the ArchisSteamFarm service.
+        For configuring the SteamGuard token you will need to use the web-ui, which is enabled by default over on 127.0.0.1:1242.
+        You cannot configure ASF in any way outside of nix, since all the config files get wiped on restart and replaced with the programnatically set ones by nix.
+      '';
+      default = false;
+    };
+
+    web-ui = mkOption {
+      type = types.submodule {
+        options = {
+          enable = mkEnableOption "" // {
+            description = lib.mdDoc "Whether to start the web-ui. This is the preferred way of configuring things such as the steam guard token.";
+          };
+
+          package = mkOption {
+            type = types.package;
+            default = pkgs.ArchiSteamFarm.ui;
+            defaultText = lib.literalExpression "pkgs.ArchiSteamFarm.ui";
+            description =
+              lib.mdDoc "Web-UI package to use. Contents must be in lib/dist.";
+          };
+        };
+      };
+      default = {
+        enable = true;
+      };
+      example = {
+        enable = false;
+      };
+      description = lib.mdDoc "The Web-UI hosted on 127.0.0.1:1242.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.ArchiSteamFarm;
+      defaultText = lib.literalExpression "pkgs.ArchiSteamFarm";
+      description =
+        lib.mdDoc "Package to use. Should always be the latest version, for security reasons, since this module uses very new features and to not get out of sync with the Steam API.";
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/asf";
+      description = lib.mdDoc ''
+        The ASF home directory used to store all data.
+        If left as the default value this directory will automatically be created before the ASF server starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.'';
+    };
+
+    settings = mkOption {
+      type = format.type;
+      description = lib.mdDoc ''
+        The ASF.json file, all the options are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#global-config).
+        Do note that `AutoRestart`  and `UpdateChannel` is always to `false` respectively `0` because NixOS takes care of updating everything.
+        `Headless` is also always set to `true` because there is no way to provide inputs via a systemd service.
+        You should try to keep ASF up to date since upstream does not provide support for anything but the latest version and you're exposing yourself to all kinds of issues - as is outlined [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#updateperiod).
+      '';
+      example = {
+        Statistics = false;
+      };
+      default = { };
+    };
+
+    ipcPasswordFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group.";
+    };
+
+    ipcSettings = mkOption {
+      type = format.type;
+      description = lib.mdDoc ''
+        Settings to write to IPC.config.
+        All options can be found [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/IPC#custom-configuration).
+      '';
+      example = {
+        Kestrel = {
+          Endpoints = {
+            HTTP = {
+              Url = "http://*:1242";
+            };
+          };
+        };
+      };
+      default = { };
+    };
+
+    bots = mkOption {
+      type = types.attrsOf (types.submodule {
+        options = {
+          username = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Name of the user to log in. Default is attribute name.";
+            default = "";
+          };
+          passwordFile = mkOption {
+            type = types.path;
+            description = lib.mdDoc "Path to a file containing the password. The file must be readable by the `asf` user/group.";
+          };
+          enabled = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Whether to enable the bot on startup.";
+          };
+          settings = mkOption {
+            type = types.attrs;
+            description = lib.mdDoc ''
+              Additional settings that are documented [here](https://github.com/JustArchiNET/ArchiSteamFarm/wiki/Configuration#bot-config).
+            '';
+            default = { };
+          };
+        };
+      });
+      description = lib.mdDoc ''
+        Bots name and configuration.
+      '';
+      example = {
+        exampleBot = {
+          username = "alice";
+          passwordFile = "/var/lib/asf/secrets/password";
+          settings = { SteamParentalCode = "1234"; };
+        };
+      };
+      default = { };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users = {
+      users.asf = {
+        home = cfg.dataDir;
+        isSystemUser = true;
+        group = "asf";
+        description = "Archis-Steam-Farm service user";
+      };
+      groups.asf = { };
+    };
+
+    systemd.services = {
+      asf = {
+        description = "Archis-Steam-Farm Service";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = mkMerge [
+          (mkIf (cfg.dataDir == "/var/lib/asf") {
+            StateDirectory = "asf";
+            StateDirectoryMode = "700";
+          })
+          {
+            User = "asf";
+            Group = "asf";
+            WorkingDirectory = cfg.dataDir;
+            Type = "simple";
+            ExecStart = "${lib.getExe cfg.package} --no-restart --process-required --service --system-required --path ${cfg.dataDir}";
+            Restart = "always";
+
+            # copied from the default systemd service at
+            # https://github.com/JustArchiNET/ArchiSteamFarm/blob/main/ArchiSteamFarm/overlay/variant-base/linux/ArchiSteamFarm%40.service
+            CapabilityBoundingSet = "";
+            DevicePolicy = "closed";
+            LockPersonality = true;
+            NoNewPrivileges = true;
+            PrivateDevices = true;
+            PrivateIPC = true;
+            PrivateMounts = true;
+            PrivateTmp = true; # instead of rw /tmp
+            PrivateUsers = true;
+            ProcSubset = "pid";
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHome = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectProc = "invisible";
+            ProtectSystem = "strict";
+            RemoveIPC = true;
+            RestrictAddressFamilies = "AF_INET AF_INET6 AF_NETLINK AF_UNIX";
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            SystemCallArchitectures = "native";
+            UMask = "0077";
+
+            # we luckily already have systemd v247+
+            SecureBits = "noroot-locked";
+            SystemCallFilter = [ "@system-service" "~@privileged" ];
+          }
+        ];
+
+        preStart =
+          let
+            createBotsScript = pkgs.runCommandLocal "ASF-bots" { } ''
+              mkdir -p $out
+              # clean potential removed bots
+              rm -rf $out/*.json
+              for i in ${strings.concatStringsSep " " (lists.map (x: "${getName x},${x}") (attrsets.mapAttrsToList mkBot cfg.bots))}; do IFS=",";
+                set -- $i
+                ln -fs $2 $out/$1
+              done
+            '';
+            replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+          in
+          ''
+            mkdir -p config
+
+            cp --no-preserve=mode ${asf-config} config/ASF.json
+
+            ${optionalString (cfg.ipcPasswordFile != null) ''
+              ${replaceSecretBin} '#ipcPassword#' '${cfg.ipcPasswordFile}' config/ASF.json
+            ''}
+
+            ${optionalString (cfg.ipcSettings != {}) ''
+              ln -fs ${ipc-config} config/IPC.config
+            ''}
+
+            ${optionalString (cfg.ipcSettings != {}) ''
+              ln -fs ${createBotsScript}/* config/
+            ''}
+
+            rm -f www
+            ${optionalString cfg.web-ui.enable ''
+              ln -s ${cfg.web-ui.package}/ www
+            ''}
+          '';
+      };
+    };
+  };
+
+  meta = {
+    buildDocsInSandbox = false;
+    maintainers = with maintainers; [ lom SuperSandro2000 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/crossfire-server.nix b/nixpkgs/nixos/modules/services/games/crossfire-server.nix
new file mode 100644
index 000000000000..0849667e61c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/crossfire-server.nix
@@ -0,0 +1,179 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.crossfire-server;
+  serverPort = 13327;
+in {
+  options.services.crossfire-server = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled, the Crossfire game server will be started at boot.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.crossfire-server;
+      defaultText = literalExpression "pkgs.crossfire-server";
+      description = lib.mdDoc ''
+        The package to use for the Crossfire server (and map/arch data, if you
+        don't change dataDir).
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "${cfg.package}/share/crossfire";
+      defaultText = literalExpression ''"''${config.services.crossfire.package}/share/crossfire"'';
+      description = lib.mdDoc ''
+        Where to load readonly data from -- maps, archetypes, treasure tables,
+        and the like. If you plan to edit the data on the live server (rather
+        than overlaying the crossfire-maps and crossfire-arch packages and
+        nixos-rebuilding), point this somewhere read-write and copy the data
+        there before starting the server.
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.str;
+      default = "/var/lib/crossfire";
+      description = lib.mdDoc ''
+        Where to store runtime data (save files, persistent items, etc).
+
+        If left at the default, this will be automatically created on server
+        startup if it does not already exist. If changed, it is the admin's
+        responsibility to make sure that the directory exists and is writeable
+        by the `crossfire` user.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open ports in the firewall for the server.
+      '';
+    };
+
+    configFiles = mkOption {
+      type = types.attrsOf types.str;
+      description = lib.mdDoc ''
+        Text to append to the corresponding configuration files. Note that the
+        files given in the example are *not* the complete set of files available
+        to customize; look in /etc/crossfire after enabling the server to see
+        the available files, and read the comments in each file for detailed
+        documentation on the format and what settings are available.
+
+        Note that the motd, rules, and news files, if configured here, will
+        overwrite the example files that come with the server, rather than being
+        appended to them as the other configuration files are.
+      '';
+      example = literalExpression ''
+        {
+          dm_file = '''
+            admin:secret_password:localhost
+            alice:xyzzy:*
+          ''';
+          ban_file = '''
+            # Bob is a jerk
+            bob@*
+            # So is everyone on 192.168.86.255/24
+            *@192.168.86.
+          ''';
+          metaserver2 = '''
+            metaserver2_notification on
+            localhostname crossfire.example.net
+          ''';
+          motd = "Welcome to CrossFire!";
+          news = "No news yet.";
+          rules = "Don't be a jerk.";
+          settings = '''
+            # be nicer to newbies and harsher to experienced players
+            balanced_stat_loss true
+            # don't let players pick up and use admin-created items
+            real_wiz false
+          ''';
+        }
+      '';
+      default = {};
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.crossfire = {
+      description     = "Crossfire server daemon user";
+      home            = cfg.stateDir;
+      createHome      = false;
+      isSystemUser    = true;
+      group           = "crossfire";
+    };
+    users.groups.crossfire = {};
+
+    # Merge the cfg.configFiles setting with the default files shipped with
+    # Crossfire.
+    # For most files this consists of reading ${crossfire}/etc/crossfire/${name}
+    # and appending the user setting to it; the motd, news, and rules are handled
+    # specially, with user-provided values completely replacing the original.
+    environment.etc = lib.attrsets.mapAttrs'
+      (name: value: lib.attrsets.nameValuePair "crossfire/${name}" {
+        mode = "0644";
+        text =
+          (optionalString (!elem name ["motd" "news" "rules"])
+            (fileContents "${cfg.package}/etc/crossfire/${name}"))
+          + "\n${value}";
+      }) ({
+        ban_file = "";
+        dm_file = "";
+        exp_table = "";
+        forbid = "";
+        metaserver2 = "";
+        motd = fileContents "${cfg.package}/etc/crossfire/motd";
+        news = fileContents "${cfg.package}/etc/crossfire/news";
+        rules = fileContents "${cfg.package}/etc/crossfire/rules";
+        settings = "";
+        stat_bonus = "";
+      } // cfg.configFiles);
+
+    systemd.services.crossfire-server = {
+      description   = "Crossfire Server Daemon";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      serviceConfig = mkMerge [
+        {
+          ExecStart = "${cfg.package}/bin/crossfire-server -conf /etc/crossfire -local '${cfg.stateDir}' -data '${cfg.dataDir}'";
+          Restart = "always";
+          User = "crossfire";
+          Group = "crossfire";
+          WorkingDirectory = cfg.stateDir;
+        }
+        (mkIf (cfg.stateDir == "/var/lib/crossfire") {
+          StateDirectory = "crossfire";
+        })
+      ];
+
+      # The crossfire server needs access to a bunch of files at runtime that
+      # are not created automatically at server startup; they're meant to be
+      # installed in $PREFIX/var/crossfire by `make install`. And those files
+      # need to be writeable, so we can't just point at the ones in the nix
+      # store. Instead we take the approach of copying them out of the store
+      # on first run. If `bookarch` already exists, we assume the rest of the
+      # files do as well, and copy nothing -- otherwise we risk ovewriting
+      # server state information every time the server is upgraded.
+      preStart = ''
+        if [ ! -e "${cfg.stateDir}"/bookarch ]; then
+          ${pkgs.rsync}/bin/rsync -a --chmod=u=rwX,go=rX \
+            "${cfg.package}/var/crossfire/" "${cfg.stateDir}/"
+        fi
+      '';
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ serverPort ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/deliantra-server.nix b/nixpkgs/nixos/modules/services/games/deliantra-server.nix
new file mode 100644
index 000000000000..f39044eda7c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/deliantra-server.nix
@@ -0,0 +1,172 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.deliantra-server;
+  serverPort = 13327;
+in {
+  options.services.deliantra-server = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled, the Deliantra game server will be started at boot.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.deliantra-server;
+      defaultText = literalExpression "pkgs.deliantra-server";
+      description = lib.mdDoc ''
+        The package to use for the Deliantra server (and map/arch data, if you
+        don't change dataDir).
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "${pkgs.deliantra-data}";
+      defaultText = literalExpression ''"''${pkgs.deliantra-data}"'';
+      description = lib.mdDoc ''
+        Where to store readonly data (maps, archetypes, sprites, etc).
+        Note that if you plan to use the live map editor (rather than editing
+        the maps offline and then nixos-rebuilding), THIS MUST BE WRITEABLE --
+        copy the deliantra-data someplace writeable (say,
+        /var/lib/deliantra/data) and update this option accordingly.
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.str;
+      default = "/var/lib/deliantra";
+      description = lib.mdDoc ''
+        Where to store runtime data (save files, persistent items, etc).
+
+        If left at the default, this will be automatically created on server
+        startup if it does not already exist. If changed, it is the admin's
+        responsibility to make sure that the directory exists and is writeable
+        by the `crossfire` user.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open ports in the firewall for the server.
+      '';
+    };
+
+    configFiles = mkOption {
+      type = types.attrsOf types.str;
+      description = lib.mdDoc ''
+        Contents of the server configuration files. These will be appended to
+        the example configurations the server comes with and overwrite any
+        default settings defined therein.
+
+        The example here is not comprehensive. See the files in
+        /etc/deliantra-server after enabling this module for full documentation.
+      '';
+      example = literalExpression ''
+        {
+          dm_file = '''
+            admin:secret_password:localhost
+            alice:xyzzy:*
+          ''';
+          motd = "Welcome to Deliantra!";
+          settings = '''
+            # Settings for game mechanics.
+            stat_loss_on_death true
+            armor_max_enchant 7
+          ''';
+          config = '''
+            # Settings for the server daemon.
+            hiscore_url https://deliantra.example.net/scores/
+            max_map_reset 86400
+          ''';
+        }
+      '';
+      default = {
+        motd = "";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.deliantra = {
+      description     = "Deliantra server daemon user";
+      home            = cfg.stateDir;
+      createHome      = false;
+      isSystemUser    = true;
+      group           = "deliantra";
+    };
+    users.groups.deliantra = {};
+
+    # Merge the cfg.configFiles setting with the default files shipped with
+    # Deliantra.
+    # For most files this consists of reading
+    # ${deliantra}/etc/deliantra-server/${name} and appending the user setting
+    # to it.
+    environment.etc = lib.attrsets.mapAttrs'
+      (name: value: lib.attrsets.nameValuePair "deliantra-server/${name}" {
+        mode = "0644";
+        text =
+          # Deliantra doesn't come with a motd file, but respects it if present
+          # in /etc.
+          (optionalString (name != "motd")
+            (fileContents "${cfg.package}/etc/deliantra-server/${name}"))
+          + "\n${value}";
+      }) ({
+        motd = "";
+        settings = "";
+        config = "";
+        dm_file = "";
+      } // cfg.configFiles);
+
+    systemd.services.deliantra-server = {
+      description   = "Deliantra Server Daemon";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      environment = {
+        DELIANTRA_DATADIR="${cfg.dataDir}";
+        DELIANTRA_LOCALDIR="${cfg.stateDir}";
+        DELIANTRA_CONFDIR="/etc/deliantra-server";
+      };
+
+      serviceConfig = mkMerge [
+        {
+          ExecStart = "${cfg.package}/bin/deliantra-server";
+          Restart = "always";
+          User = "deliantra";
+          Group = "deliantra";
+          WorkingDirectory = cfg.stateDir;
+        }
+        (mkIf (cfg.stateDir == "/var/lib/deliantra") {
+          StateDirectory = "deliantra";
+        })
+      ];
+
+      # The deliantra server needs access to a bunch of files at runtime that
+      # are not created automatically at server startup; they're meant to be
+      # installed in $PREFIX/var/deliantra-server by `make install`. And those
+      # files need to be writeable, so we can't just point at the ones in the
+      # nix store. Instead we take the approach of copying them out of the store
+      # on first run. If `bookarch` already exists, we assume the rest of the
+      # files do as well, and copy nothing -- otherwise we risk ovewriting
+      # server state information every time the server is upgraded.
+      preStart = ''
+        if [ ! -e "${cfg.stateDir}"/bookarch ]; then
+          ${pkgs.rsync}/bin/rsync -a --chmod=u=rwX,go=rX \
+            "${cfg.package}/var/deliantra-server/" "${cfg.stateDir}/"
+        fi
+      '';
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ serverPort ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/factorio.nix b/nixpkgs/nixos/modules/services/games/factorio.nix
new file mode 100644
index 000000000000..b349ffa2375f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/factorio.nix
@@ -0,0 +1,299 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.factorio;
+  name = "Factorio";
+  stateDir = "/var/lib/${cfg.stateDirName}";
+  mkSavePath = name: "${stateDir}/saves/${name}.zip";
+  configFile = pkgs.writeText "factorio.conf" ''
+    use-system-read-write-data-directories=true
+    [path]
+    read-data=${cfg.package}/share/factorio/data
+    write-data=${stateDir}
+  '';
+  serverSettings = {
+    name = cfg.game-name;
+    description = cfg.description;
+    visibility = {
+      public = cfg.public;
+      lan = cfg.lan;
+    };
+    username = cfg.username;
+    password = cfg.password;
+    token = cfg.token;
+    game_password = cfg.game-password;
+    require_user_verification = cfg.requireUserVerification;
+    max_upload_in_kilobytes_per_second = 0;
+    minimum_latency_in_ticks = 0;
+    ignore_player_limit_for_returning_players = false;
+    allow_commands = "admins-only";
+    autosave_interval = cfg.autosave-interval;
+    autosave_slots = 5;
+    afk_autokick_interval = 0;
+    auto_pause = true;
+    only_admins_can_pause_the_game = true;
+    autosave_only_on_server = true;
+    non_blocking_saving = cfg.nonBlockingSaving;
+  } // cfg.extraSettings;
+  serverSettingsFile = pkgs.writeText "server-settings.json" (builtins.toJSON (filterAttrsRecursive (n: v: v != null) serverSettings));
+  serverAdminsFile = pkgs.writeText "server-adminlist.json" (builtins.toJSON cfg.admins);
+  modDir = pkgs.factorio-utils.mkModDirDrv cfg.mods cfg.mods-dat;
+in
+{
+  options = {
+    services.factorio = {
+      enable = mkEnableOption (lib.mdDoc name);
+      port = mkOption {
+        type = types.port;
+        default = 34197;
+        description = lib.mdDoc ''
+          The port to which the service should bind.
+        '';
+      };
+
+      bind = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          The address to which the service should bind.
+        '';
+      };
+
+      admins = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "username" ];
+        description = lib.mdDoc ''
+          List of player names which will be admin.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically open the specified UDP port in the firewall.
+        '';
+      };
+      saveName = mkOption {
+        type = types.str;
+        default = "default";
+        description = lib.mdDoc ''
+          The name of the savegame that will be used by the server.
+
+          When not present in /var/lib/''${config.services.factorio.stateDirName}/saves,
+          a new map with default settings will be generated before starting the service.
+        '';
+      };
+      loadLatestSave = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Load the latest savegame on startup. This overrides saveName, in that the latest
+          save will always be used even if a saved game of the given name exists. It still
+          controls the 'canonical' name of the savegame.
+
+          Set this to true to have the server automatically reload a recent autosave after
+          a crash or desync.
+        '';
+      };
+      # TODO Add more individual settings as nixos-options?
+      # TODO XXX The server tries to copy a newly created config file over the old one
+      #   on shutdown, but fails, because it's in the nix store. When is this needed?
+      #   Can an admin set options in-game and expect to have them persisted?
+      configFile = mkOption {
+        type = types.path;
+        default = configFile;
+        defaultText = literalExpression "configFile";
+        description = lib.mdDoc ''
+          The server's configuration file.
+
+          The default file generated by this module contains lines essential to
+          the server's operation. Use its contents as a basis for any
+          customizations.
+        '';
+      };
+      stateDirName = mkOption {
+        type = types.str;
+        default = "factorio";
+        description = lib.mdDoc ''
+          Name of the directory under /var/lib holding the server's data.
+
+          The configuration and map will be stored here.
+        '';
+      };
+      mods = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          Mods the server should install and activate.
+
+          The derivations in this list must "build" the mod by simply copying
+          the .zip, named correctly, into the output directory. Eventually,
+          there will be a way to pull in the most up-to-date list of
+          derivations via nixos-channel. Until then, this is for experts only.
+        '';
+      };
+      mods-dat = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Mods settings can be changed by specifying a dat file, in the [mod
+          settings file
+          format](https://wiki.factorio.com/Mod_settings_file_format).
+        '';
+      };
+      game-name = mkOption {
+        type = types.nullOr types.str;
+        default = "Factorio Game";
+        description = lib.mdDoc ''
+          Name of the game as it will appear in the game listing.
+        '';
+      };
+      description = mkOption {
+        type = types.nullOr types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Description of the game that will appear in the listing.
+        '';
+      };
+      extraSettings = mkOption {
+        type = types.attrs;
+        default = {};
+        example = { admins = [ "username" ];};
+        description = lib.mdDoc ''
+          Extra game configuration that will go into server-settings.json
+        '';
+      };
+      public = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Game will be published on the official Factorio matching server.
+        '';
+      };
+      lan = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Game will be broadcast on LAN.
+        '';
+      };
+      username = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Your factorio.com login credentials. Required for games with visibility public.
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.factorio-headless;
+        defaultText = literalExpression "pkgs.factorio-headless";
+        example = literalExpression "pkgs.factorio-headless-experimental";
+        description = lib.mdDoc ''
+          Factorio version to use. This defaults to the stable channel.
+        '';
+      };
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Your factorio.com login credentials. Required for games with visibility public.
+        '';
+      };
+      token = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Authentication token. May be used instead of 'password' above.
+        '';
+      };
+      game-password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Game password.
+        '';
+      };
+      requireUserVerification = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          When set to true, the server will only allow clients that have a valid factorio.com account.
+        '';
+      };
+      autosave-interval = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 10;
+        description = lib.mdDoc ''
+          Autosave interval in minutes.
+        '';
+      };
+      nonBlockingSaving = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Highly experimental feature, enable only at your own risk of losing your saves.
+          On UNIX systems, server will fork itself to create an autosave.
+          Autosaving on connected Windows clients will be disabled regardless of autosave_only_on_server option.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.factorio = {
+      description   = "Factorio headless server";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      preStart = toString [
+        "test -e ${stateDir}/saves/${cfg.saveName}.zip"
+        "||"
+        "${cfg.package}/bin/factorio"
+          "--config=${cfg.configFile}"
+          "--create=${mkSavePath cfg.saveName}"
+          (optionalString (cfg.mods != []) "--mod-directory=${modDir}")
+      ];
+
+      serviceConfig = {
+        Restart = "always";
+        KillSignal = "SIGINT";
+        DynamicUser = true;
+        StateDirectory = cfg.stateDirName;
+        UMask = "0007";
+        ExecStart = toString [
+          "${cfg.package}/bin/factorio"
+          "--config=${cfg.configFile}"
+          "--port=${toString cfg.port}"
+          "--bind=${cfg.bind}"
+          (optionalString (!cfg.loadLatestSave) "--start-server=${mkSavePath cfg.saveName}")
+          "--server-settings=${serverSettingsFile}"
+          (optionalString cfg.loadLatestSave "--start-server-load-latest")
+          (optionalString (cfg.mods != []) "--mod-directory=${modDir}")
+          (optionalString (cfg.admins != []) "--server-adminlist=${serverAdminsFile}")
+        ];
+
+        # Sandboxing
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+
+    networking.firewall.allowedUDPPorts = optional cfg.openFirewall cfg.port;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/freeciv.nix b/nixpkgs/nixos/modules/services/games/freeciv.nix
new file mode 100644
index 000000000000..bba27ae4cb5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/freeciv.nix
@@ -0,0 +1,187 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.freeciv;
+  inherit (config.users) groups;
+  rootDir = "/run/freeciv";
+  argsFormat = {
+    type = with lib.types; let
+      valueType = nullOr (oneOf [
+        bool int float str
+        (listOf valueType)
+      ]) // {
+        description = "freeciv-server params";
+      };
+    in valueType;
+    generate = name: value:
+      let mkParam = k: v:
+            if v == null then []
+            else if isBool v then optional v ("--"+k)
+            else [("--"+k) v];
+          mkParams = k: v: map (mkParam k) (if isList v then v else [v]);
+      in escapeShellArgs (concatLists (concatLists (mapAttrsToList mkParams value)));
+  };
+in
+{
+  options = {
+    services.freeciv = {
+      enable = mkEnableOption (lib.mdDoc ''freeciv'');
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Parameters of freeciv-server.
+        '';
+        default = {};
+        type = types.submodule {
+          freeformType = argsFormat.type;
+          options.Announce = mkOption {
+            type = types.enum ["IPv4" "IPv6" "none"];
+            default = "none";
+            description = lib.mdDoc "Announce game in LAN using given protocol.";
+          };
+          options.auth = mkEnableOption (lib.mdDoc "server authentication");
+          options.Database = mkOption {
+            type = types.nullOr types.str;
+            apply = pkgs.writeText "auth.conf";
+            default = ''
+              [fcdb]
+                backend="sqlite"
+                database="/var/lib/freeciv/auth.sqlite"
+            '';
+            description = lib.mdDoc "Enable database connection with given configuration.";
+          };
+          options.debug = mkOption {
+            type = types.ints.between 0 3;
+            default = 0;
+            description = lib.mdDoc "Set debug log level.";
+          };
+          options.exit-on-end = mkEnableOption (lib.mdDoc "exit instead of restarting when a game ends");
+          options.Guests = mkEnableOption (lib.mdDoc "guests to login if auth is enabled");
+          options.Newusers = mkEnableOption (lib.mdDoc "new users to login if auth is enabled");
+          options.port = mkOption {
+            type = types.port;
+            default = 5556;
+            description = lib.mdDoc "Listen for clients on given port";
+          };
+          options.quitidle = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = lib.mdDoc "Quit if no players for given time in seconds.";
+          };
+          options.read = mkOption {
+            type = types.lines;
+            apply = v: pkgs.writeTextDir "read.serv" v + "/read";
+            default = ''
+              /fcdb lua sqlite_createdb()
+            '';
+            description = lib.mdDoc "Startup script.";
+          };
+          options.saves = mkOption {
+            type = types.nullOr types.str;
+            default = "/var/lib/freeciv/saves/";
+            description = lib.mdDoc ''
+              Save games to given directory,
+              a sub-directory named after the starting date of the service
+              will me inserted to preserve older saves.
+            '';
+          };
+        };
+      };
+      openFirewall = mkEnableOption (lib.mdDoc "opening the firewall for the port listening for clients");
+    };
+  };
+  config = mkIf cfg.enable {
+    users.groups.freeciv = {};
+    # Use with:
+    #   journalctl -u freeciv.service -f -o cat &
+    #   cat >/run/freeciv.stdin
+    #   load saves/2020-11-14_05-22-27/freeciv-T0005-Y-3750-interrupted.sav.bz2
+    systemd.sockets.freeciv = {
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenFIFO = "/run/freeciv.stdin";
+        SocketGroup = groups.freeciv.name;
+        SocketMode = "660";
+        RemoveOnStop = true;
+      };
+    };
+    systemd.services.freeciv = {
+      description = "Freeciv Service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.HOME = "/var/lib/freeciv";
+      serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = "5s";
+        StandardInput = "fd:freeciv.socket";
+        StandardOutput = "journal";
+        StandardError = "journal";
+        ExecStart = pkgs.writeShellScript "freeciv-server" (''
+          set -eux
+          savedir=$(date +%Y-%m-%d_%H-%M-%S)
+          '' + "${pkgs.freeciv}/bin/freeciv-server"
+          + " " + optionalString (cfg.settings.saves != null)
+            (concatStringsSep " " [ "--saves" "${escapeShellArg cfg.settings.saves}/$savedir" ])
+          + " " + argsFormat.generate "freeciv-server" (cfg.settings // { saves = null; }));
+        DynamicUser = true;
+        # Create rootDir in the host's mount namespace.
+        RuntimeDirectory = [(baseNameOf rootDir)];
+        RuntimeDirectoryMode = "755";
+        StateDirectory = [ "freeciv" ];
+        WorkingDirectory = "/var/lib/freeciv";
+        # Avoid mounting rootDir in the own rootDir of ExecStart='s mount namespace.
+        InaccessiblePaths = ["-+${rootDir}"];
+        # This is for BindPaths= and BindReadOnlyPaths=
+        # to allow traversal of directories they create in RootDirectory=.
+        UMask = "0066";
+        RootDirectory = rootDir;
+        RootDirectoryStartOnly = true;
+        MountAPIVFS = true;
+        BindReadOnlyPaths = [
+          builtins.storeDir
+          "/etc"
+          "/run"
+        ];
+        # The following options are only for optimizing:
+        # systemd-analyze security freeciv
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateNetwork = mkDefault false;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallFilter = [
+          "@system-service"
+          # Groups in @system-service which do not contain a syscall listed by:
+          # perf stat -x, 2>perf.log -e 'syscalls:sys_enter_*' freeciv-server
+          # in tests, and seem likely not necessary for freeciv-server.
+          "~@aio" "~@chown" "~@ipc" "~@keyring" "~@memlock"
+          "~@resources" "~@setuid" "~@sync" "~@timer"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+      };
+    };
+    networking.firewall = mkIf cfg.openFirewall
+      { allowedTCPPorts = [ cfg.settings.port ]; };
+  };
+  meta.maintainers = with lib.maintainers; [ julm ];
+}
diff --git a/nixpkgs/nixos/modules/services/games/mchprs.nix b/nixpkgs/nixos/modules/services/games/mchprs.nix
new file mode 100644
index 000000000000..a65001b0b3e2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/mchprs.nix
@@ -0,0 +1,341 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mchprs;
+  settingsFormat = pkgs.formats.toml { };
+
+  whitelistFile = pkgs.writeText "whitelist.json"
+    (builtins.toJSON
+      (mapAttrsToList (n: v: { name = n; uuid = v; }) cfg.whitelist.list));
+
+  configToml =
+    (removeAttrs cfg.settings [ "address" "port" ]) //
+    {
+      bind_address = cfg.settings.address + ":" + toString cfg.settings.port;
+      whitelist = cfg.whitelist.enable;
+    };
+
+  configTomlFile = settingsFormat.generate "Config.toml" configToml;
+in
+{
+  options = {
+    services.mchprs = {
+      enable = mkEnableOption "MCHPRS";
+
+      declarativeSettings = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to use a declarative configuration for MCHPRS.
+        '';
+      };
+
+      declarativeWhitelist = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to use a declarative whitelist.
+          The options {option}`services.mchprs.whitelist.list`
+          will be applied if and only if set to `true`.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/mchprs";
+        description = mdDoc ''
+          Directory to store MCHPRS database and other state/data files.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Whether to open ports in the firewall for the server.
+          Only has effect when
+          {option}`services.mchprs.declarativeSettings` is `true`.
+        '';
+      };
+
+      maxRuntime = mkOption {
+        type = types.str;
+        default = "infinity";
+        example = "7d";
+        description = mdDoc ''
+          Automatically restart the server after
+          {option}`services.mchprs.maxRuntime`.
+          The time span format is described here:
+          https://www.freedesktop.org/software/systemd/man/systemd.time.html#Parsing%20Time%20Spans.
+          If `null`, then the server is not restarted automatically.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.mchprs;
+        defaultText = literalExpression "pkgs.mchprs";
+        description = mdDoc "Version of MCHPRS to run.";
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            port = mkOption {
+              type = types.port;
+              default = 25565;
+              description = mdDoc ''
+                Port for the server.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            address = mkOption {
+              type = types.str;
+              default = "0.0.0.0";
+              description = mdDoc ''
+                Address for the server.
+                Please use enclosing square brackets when using ipv6.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            motd = mkOption {
+              type = types.str;
+              default = "Minecraft High Performance Redstone Server";
+              description = mdDoc ''
+                Message of the day.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            chat_format = mkOption {
+              type = types.str;
+              default = "<{username}> {message}";
+              description = mdDoc ''
+                How to format chat message interpolating `username`
+                and `message` with curly braces.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            max_players = mkOption {
+              type = types.ints.positive;
+              default = 99999;
+              description = mdDoc ''
+                Maximum number of simultaneous players.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            view_distance = mkOption {
+              type = types.ints.positive;
+              default = 8;
+              description = mdDoc ''
+                Maximal distance (in chunks) between players and loaded chunks.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            bungeecord = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Enable compatibility with
+                [BungeeCord](https://github.com/SpigotMC/BungeeCord).
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            schemati = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Mimic the verification and directory layout used by the
+                Open Redstone Engineers
+                [Schemati plugin](https://github.com/OpenRedstoneEngineers/Schemati).
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            block_in_hitbox = mkOption {
+              type = types.bool;
+              default = true;
+              description = mdDoc ''
+                Allow placing blocks inside of players
+                (hitbox logic is simplified).
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+
+            auto_redpiler = mkOption {
+              type = types.bool;
+              default = true;
+              description = mdDoc ''
+                Use redpiler automatically.
+                Only has effect when
+                {option}`services.mchprs.declarativeSettings` is `true`.
+              '';
+            };
+          };
+        };
+        default = { };
+
+        description = mdDoc ''
+          Configuration for MCHPRS via `Config.toml`.
+          See https://github.com/MCHPR/MCHPRS/blob/master/README.md for documentation.
+        '';
+      };
+
+      whitelist = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = mdDoc ''
+            Whether or not the whitelist (in `whitelist.json`) shoud be enabled.
+            Only has effect when {option}`services.mchprs.declarativeSettings` is `true`.
+          '';
+        };
+
+        list = mkOption {
+          type =
+            let
+              minecraftUUID = types.strMatching
+                "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" // {
+                description = "Minecraft UUID";
+              };
+            in
+            types.attrsOf minecraftUUID;
+          default = { };
+          example = literalExpression ''
+            {
+              username1 = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx";
+              username2 = "yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy";
+            };
+          '';
+          description = mdDoc ''
+            Whitelisted players, only has an effect when
+            {option}`services.mchprs.declarativeWhitelist` is
+            `true` and the whitelist is enabled
+            via {option}`services.mchprs.whitelist.enable`.
+            This is a mapping from Minecraft usernames to UUIDs.
+            You can use <https://mcuuid.net/> to get a
+            Minecraft UUID for a username.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.mchprs = {
+      description = "MCHPRS service user";
+      home = cfg.dataDir;
+      createHome = true;
+      isSystemUser = true;
+      group = "mchprs";
+    };
+    users.groups.mchprs = { };
+
+    systemd.services.mchprs = {
+      description = "MCHPRS Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package}";
+        Restart = "always";
+        RuntimeMaxSec = cfg.maxRuntime;
+        User = "mchprs";
+        WorkingDirectory = cfg.dataDir;
+
+        StandardOutput = "journal";
+        StandardError = "journal";
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        UMask = "0077";
+      };
+
+      preStart =
+        (if cfg.declarativeSettings then ''
+          if [ -e .declarativeSettings ]; then
+
+            # Settings were declarative before, no need to back up anything
+            cp -f ${configTomlFile} Config.toml
+
+          else
+
+            # Declarative settings for the first time, backup stateful files
+            cp -b --suffix=.stateful ${configTomlFile} Config.toml
+
+            echo "Autogenerated file that implies that this server configuration is managed declaratively by NixOS" \
+              > .declarativeSettings
+
+          fi
+        '' else ''
+          if [ -e .declarativeSettings ]; then
+            rm .declarativeSettings
+          fi
+        '') + (if cfg.declarativeWhitelist then ''
+          if [ -e .declarativeWhitelist ]; then
+
+            # Whitelist was declarative before, no need to back up anything
+            ln -sf ${whitelistFile} whitelist.json
+
+          else
+
+            # Declarative whitelist for the first time, backup stateful files
+            ln -sb --suffix=.stateful ${whitelistFile} whitelist.json
+
+            echo "Autogenerated file that implies that this server's whitelist is managed declaratively by NixOS" \
+              > .declarativeWhitelist
+
+          fi
+        '' else ''
+          if [ -e .declarativeWhitelist ]; then
+            rm .declarativeWhitelist
+          fi
+        '');
+    };
+
+    networking.firewall = mkIf (cfg.declarativeSettings && cfg.openFirewall) {
+      allowedUDPPorts = [ cfg.settings.port ];
+      allowedTCPPorts = [ cfg.settings.port ];
+    };
+  };
+
+  meta.maintainers = with maintainers; [ gdd ];
+}
diff --git a/nixpkgs/nixos/modules/services/games/minecraft-server.nix b/nixpkgs/nixos/modules/services/games/minecraft-server.nix
new file mode 100644
index 000000000000..77f92ab97db7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/minecraft-server.nix
@@ -0,0 +1,285 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.minecraft-server;
+
+  # We don't allow eula=false anyways
+  eulaFile = builtins.toFile "eula.txt" ''
+    # eula.txt managed by NixOS Configuration
+    eula=true
+  '';
+
+  whitelistFile = pkgs.writeText "whitelist.json"
+    (builtins.toJSON
+      (mapAttrsToList (n: v: { name = n; uuid = v; }) cfg.whitelist));
+
+  cfgToString = v: if builtins.isBool v then boolToString v else toString v;
+
+  serverPropertiesFile = pkgs.writeText "server.properties" (''
+    # server.properties managed by NixOS configuration
+  '' + concatStringsSep "\n" (mapAttrsToList
+    (n: v: "${n}=${cfgToString v}") cfg.serverProperties));
+
+  stopScript = pkgs.writeShellScript "minecraft-server-stop" ''
+    echo stop > ${config.systemd.sockets.minecraft-server.socketConfig.ListenFIFO}
+
+    # Wait for the PID of the minecraft server to disappear before
+    # returning, so systemd doesn't attempt to SIGKILL it.
+    while kill -0 "$1" 2> /dev/null; do
+      sleep 1s
+    done
+  '';
+
+  # To be able to open the firewall, we need to read out port values in the
+  # server properties, but fall back to the defaults when those don't exist.
+  # These defaults are from https://minecraft.gamepedia.com/Server.properties#Java_Edition_3
+  defaultServerPort = 25565;
+
+  serverPort = cfg.serverProperties.server-port or defaultServerPort;
+
+  rconPort = if cfg.serverProperties.enable-rcon or false
+    then cfg.serverProperties."rcon.port" or 25575
+    else null;
+
+  queryPort = if cfg.serverProperties.enable-query or false
+    then cfg.serverProperties."query.port" or 25565
+    else null;
+
+in {
+  options = {
+    services.minecraft-server = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, start a Minecraft Server. The server
+          data will be loaded from and saved to
+          {option}`services.minecraft-server.dataDir`.
+        '';
+      };
+
+      declarative = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use a declarative Minecraft server configuration.
+          Only if set to `true`, the options
+          {option}`services.minecraft-server.whitelist` and
+          {option}`services.minecraft-server.serverProperties` will be
+          applied.
+        '';
+      };
+
+      eula = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether you agree to
+          [
+          Mojangs EULA](https://account.mojang.com/documents/minecraft_eula). This option must be set to
+          `true` to run Minecraft server.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/minecraft";
+        description = lib.mdDoc ''
+          Directory to store Minecraft database and other state/data files.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open ports in the firewall for the server.
+        '';
+      };
+
+      whitelist = mkOption {
+        type = let
+          minecraftUUID = types.strMatching
+            "[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}" // {
+              description = "Minecraft UUID";
+            };
+          in types.attrsOf minecraftUUID;
+        default = {};
+        description = lib.mdDoc ''
+          Whitelisted players, only has an effect when
+          {option}`services.minecraft-server.declarative` is
+          `true` and the whitelist is enabled
+          via {option}`services.minecraft-server.serverProperties` by
+          setting `white-list` to `true`.
+          This is a mapping from Minecraft usernames to UUIDs.
+          You can use <https://mcuuid.net/> to get a
+          Minecraft UUID for a username.
+        '';
+        example = literalExpression ''
+          {
+            username1 = "xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx";
+            username2 = "yyyyyyyy-yyyy-yyyy-yyyy-yyyyyyyyyyyy";
+          };
+        '';
+      };
+
+      serverProperties = mkOption {
+        type = with types; attrsOf (oneOf [ bool int str ]);
+        default = {};
+        example = literalExpression ''
+          {
+            server-port = 43000;
+            difficulty = 3;
+            gamemode = 1;
+            max-players = 5;
+            motd = "NixOS Minecraft server!";
+            white-list = true;
+            enable-rcon = true;
+            "rcon.password" = "hunter2";
+          }
+        '';
+        description = lib.mdDoc ''
+          Minecraft server properties for the server.properties file. Only has
+          an effect when {option}`services.minecraft-server.declarative`
+          is set to `true`. See
+          <https://minecraft.gamepedia.com/Server.properties#Java_Edition_3>
+          for documentation on these values.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.minecraft-server;
+        defaultText = literalExpression "pkgs.minecraft-server";
+        example = literalExpression "pkgs.minecraft-server_1_12_2";
+        description = lib.mdDoc "Version of minecraft-server to run.";
+      };
+
+      jvmOpts = mkOption {
+        type = types.separatedString " ";
+        default = "-Xmx2048M -Xms2048M";
+        # Example options from https://minecraft.gamepedia.com/Tutorials/Server_startup_script
+        example = "-Xms4092M -Xmx4092M -XX:+UseG1GC -XX:+CMSIncrementalPacing "
+          + "-XX:+CMSClassUnloadingEnabled -XX:ParallelGCThreads=2 "
+          + "-XX:MinHeapFreeRatio=5 -XX:MaxHeapFreeRatio=10";
+        description = lib.mdDoc "JVM options for the Minecraft server.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.minecraft = {
+      description     = "Minecraft server service user";
+      home            = cfg.dataDir;
+      createHome      = true;
+      isSystemUser    = true;
+      group           = "minecraft";
+    };
+    users.groups.minecraft = {};
+
+    systemd.sockets.minecraft-server = {
+      bindsTo = [ "minecraft-server.service" ];
+      socketConfig = {
+        ListenFIFO = "/run/minecraft-server.stdin";
+        SocketMode = "0660";
+        SocketUser = "minecraft";
+        SocketGroup = "minecraft";
+        RemoveOnStop = true;
+        FlushPending = true;
+      };
+    };
+
+    systemd.services.minecraft-server = {
+      description   = "Minecraft Server Service";
+      wantedBy      = [ "multi-user.target" ];
+      requires      = [ "minecraft-server.socket" ];
+      after         = [ "network.target" "minecraft-server.socket" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/minecraft-server ${cfg.jvmOpts}";
+        ExecStop = "${stopScript} $MAINPID";
+        Restart = "always";
+        User = "minecraft";
+        WorkingDirectory = cfg.dataDir;
+
+        StandardInput = "socket";
+        StandardOutput = "journal";
+        StandardError = "journal";
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        UMask = "0077";
+      };
+
+      preStart = ''
+        ln -sf ${eulaFile} eula.txt
+      '' + (if cfg.declarative then ''
+
+        if [ -e .declarative ]; then
+
+          # Was declarative before, no need to back up anything
+          ln -sf ${whitelistFile} whitelist.json
+          cp -f ${serverPropertiesFile} server.properties
+
+        else
+
+          # Declarative for the first time, backup stateful files
+          ln -sb --suffix=.stateful ${whitelistFile} whitelist.json
+          cp -b --suffix=.stateful ${serverPropertiesFile} server.properties
+
+          # server.properties must have write permissions, because every time
+          # the server starts it first parses the file and then regenerates it..
+          chmod +w server.properties
+          echo "Autogenerated file that signifies that this server configuration is managed declaratively by NixOS" \
+            > .declarative
+
+        fi
+      '' else ''
+        if [ -e .declarative ]; then
+          rm .declarative
+        fi
+      '');
+    };
+
+    networking.firewall = mkIf cfg.openFirewall (if cfg.declarative then {
+      allowedUDPPorts = [ serverPort ];
+      allowedTCPPorts = [ serverPort ]
+        ++ optional (queryPort != null) queryPort
+        ++ optional (rconPort != null) rconPort;
+    } else {
+      allowedUDPPorts = [ defaultServerPort ];
+      allowedTCPPorts = [ defaultServerPort ];
+    });
+
+    assertions = [
+      { assertion = cfg.eula;
+        message = "You must agree to Mojangs EULA to run minecraft-server."
+          + " Read https://account.mojang.com/documents/minecraft_eula and"
+          + " set `services.minecraft-server.eula` to `true` if you agree.";
+      }
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/minetest-server.nix b/nixpkgs/nixos/modules/services/games/minetest-server.nix
new file mode 100644
index 000000000000..8dc360153497
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/minetest-server.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  CONTAINS_NEWLINE_RE = ".*\n.*";
+  # The following values are reserved as complete option values:
+  # { - start of a group.
+  # """ - start of a multi-line string.
+  RESERVED_VALUE_RE = "[[:space:]]*(\"\"\"|\\{)[[:space:]]*";
+  NEEDS_MULTILINE_RE = "${CONTAINS_NEWLINE_RE}|${RESERVED_VALUE_RE}";
+
+  # There is no way to encode """ on its own line in a Minetest config.
+  UNESCAPABLE_RE = ".*\n\"\"\"\n.*";
+
+  toConfMultiline = name: value:
+    assert lib.assertMsg
+      ((builtins.match UNESCAPABLE_RE value) == null)
+      ''""" can't be on its own line in a minetest config.'';
+    "${name} = \"\"\"\n${value}\n\"\"\"\n";
+
+  toConf = values:
+    lib.concatStrings
+      (lib.mapAttrsToList
+        (name: value: {
+          bool = "${name} = ${toString value}\n";
+          int = "${name} = ${toString value}\n";
+          null = "";
+          set = "${name} = {\n${toConf value}}\n";
+          string =
+            if (builtins.match NEEDS_MULTILINE_RE value) != null
+            then toConfMultiline name value
+            else "${name} = ${value}\n";
+        }.${builtins.typeOf value})
+        values);
+
+  cfg   = config.services.minetest-server;
+  flag  = val: name: lib.optionals (val != null) ["--${name}" "${toString val}"];
+
+  flags = [
+    "--server"
+  ]
+    ++ (
+      if cfg.configPath != null
+      then ["--config" cfg.configPath]
+      else ["--config" (builtins.toFile "minetest.conf" (toConf cfg.config))])
+    ++ (flag cfg.gameId "gameid")
+    ++ (flag cfg.world "world")
+    ++ (flag cfg.logPath "logfile")
+    ++ (flag cfg.port "port")
+    ++ cfg.extraArgs;
+in
+{
+  options = {
+    services.minetest-server = {
+      enable = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "If enabled, starts a Minetest Server.";
+      };
+
+      gameId = mkOption {
+        type        = types.nullOr types.str;
+        default     = null;
+        description = lib.mdDoc ''
+          Id of the game to use. To list available games run
+          `minetestserver --gameid list`.
+
+          If only one game exists, this option can be null.
+        '';
+      };
+
+      world = mkOption {
+        type        = types.nullOr types.path;
+        default     = null;
+        description = lib.mdDoc ''
+          Name of the world to use. To list available worlds run
+          `minetestserver --world list`.
+
+          If only one world exists, this option can be null.
+        '';
+      };
+
+      configPath = mkOption {
+        type        = types.nullOr types.path;
+        default     = null;
+        description = lib.mdDoc ''
+          Path to the config to use.
+
+          If set to null, the config of the running user will be used:
+          `~/.minetest/minetest.conf`.
+        '';
+      };
+
+      config = mkOption {
+        type = types.attrsOf types.anything;
+        default = {};
+        description = lib.mdDoc ''
+          Settings to add to the minetest config file.
+
+          This option is ignored if `configPath` is set.
+        '';
+      };
+
+      logPath = mkOption {
+        type        = types.nullOr types.path;
+        default     = null;
+        description = lib.mdDoc ''
+          Path to logfile for logging.
+
+          If set to null, logging will be output to stdout which means
+          all output will be caught by systemd.
+        '';
+      };
+
+      port = mkOption {
+        type        = types.nullOr types.int;
+        default     = null;
+        description = lib.mdDoc ''
+          Port number to bind to.
+
+          If set to null, the default 30000 will be used.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Additional command line flags to pass to the minetest executable.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.minetest = {
+      description     = "Minetest Server Service user";
+      home            = "/var/lib/minetest";
+      createHome      = true;
+      uid             = config.ids.uids.minetest;
+      group           = "minetest";
+    };
+    users.groups.minetest.gid = config.ids.gids.minetest;
+
+    systemd.services.minetest-server = {
+      description   = "Minetest Server Service";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      serviceConfig.Restart = "always";
+      serviceConfig.User    = "minetest";
+      serviceConfig.Group   = "minetest";
+
+      script = ''
+        cd /var/lib/minetest
+
+        exec ${pkgs.minetest}/bin/minetest ${lib.escapeShellArgs flags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/openarena.nix b/nixpkgs/nixos/modules/services/games/openarena.nix
new file mode 100644
index 000000000000..8f6d4986903f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/openarena.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) concatStringsSep mkEnableOption mkIf mkOption types;
+  cfg = config.services.openarena;
+in
+{
+  options = {
+    services.openarena = {
+      enable = mkEnableOption (lib.mdDoc "OpenArena");
+      package = lib.mkPackageOptionMD pkgs "openarena" { };
+
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open firewall ports for OpenArena";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Extra flags to pass to {command}`oa_ded`";
+        example = [
+          "+set dedicated 2"
+          "+set sv_hostname 'My NixOS OpenArena Server'"
+          # Load a map. Mandatory for clients to be able to connect.
+          "+map oa_dm1"
+        ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openPorts {
+      allowedUDPPorts = [ 27960 ];
+    };
+
+    systemd.services.openarena = {
+      description = "OpenArena";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "openarena";
+        ExecStart = "${cfg.package}/bin/oa_ded +set fs_basepath ${cfg.package}/share/openarena +set fs_homepath /var/lib/openarena ${concatStringsSep " " cfg.extraFlags}";
+        Restart = "on-failure";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/quake3-server.nix b/nixpkgs/nixos/modules/services/games/quake3-server.nix
new file mode 100644
index 000000000000..e51830c12e78
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/quake3-server.nix
@@ -0,0 +1,116 @@
+{ config, pkgs, lib, ... }:
+
+let
+  inherit (lib) literalMD mkEnableOption mkIf mkOption types;
+  cfg = config.services.quake3-server;
+
+  configFile = pkgs.writeText "q3ds-extra.cfg" ''
+    set net_port ${builtins.toString cfg.port}
+
+    ${cfg.extraConfig}
+  '';
+
+  defaultBaseq3 = pkgs.requireFile rec {
+    name = "baseq3";
+    hashMode = "recursive";
+    sha256 = "5dd8ee09eabd45e80450f31d7a8b69b846f59738726929298d8a813ce5725ed3";
+    message = ''
+      Unfortunately, we cannot download ${name} automatically.
+      Please purchase a legitimate copy of Quake 3 and change into the installation directory.
+
+      You can either add all relevant files to the nix-store like this:
+      mkdir /tmp/baseq3
+      cp baseq3/pak*.pk3 /tmp/baseq3
+      nix-store --add-fixed sha256 --recursive /tmp/baseq3
+
+      Alternatively you can set services.quake3-server.baseq3 to a path and copy the baseq3 directory into
+      $services.quake3-server.baseq3/.q3a/
+    '';
+  };
+
+  home = pkgs.runCommand "quake3-home" {} ''
+      mkdir -p $out/.q3a/baseq3
+
+      for file in ${cfg.baseq3}/*; do
+        ln -s $file $out/.q3a/baseq3/$(basename $file)
+      done
+
+      ln -s ${configFile} $out/.q3a/baseq3/nix.cfg
+  '';
+in {
+  options = {
+    services.quake3-server = {
+      enable = mkEnableOption (lib.mdDoc "Quake 3 dedicated server");
+      package = lib.mkPackageOptionMD pkgs "ioquake3" { };
+
+      port = mkOption {
+        type = types.port;
+        default = 27960;
+        description = lib.mdDoc ''
+          UDP Port the server should listen on.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the firewall.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          seta rconPassword "superSecret"      // sets RCON password for remote console
+          seta sv_hostname "My Quake 3 server"      // name that appears in server list
+        '';
+        description = lib.mdDoc ''
+          Extra configuration options. Note that options changed via RCON will not be persisted. To list all possible
+          options, use "cvarlist 1" via RCON.
+        '';
+      };
+
+      baseq3 = mkOption {
+        type = types.either types.package types.path;
+        default = defaultBaseq3;
+        defaultText = literalMD "Manually downloaded Quake 3 installation directory.";
+        example = "/var/lib/q3ds";
+        description = lib.mdDoc ''
+          Path to the baseq3 files (pak*.pk3). If this is on the nix store (type = package) all .pk3 files should be saved
+          in the top-level directory. If this is on another filesystem (e.g /var/lib/baseq3) the .pk3 files are searched in
+          $baseq3/.q3a/baseq3/
+        '';
+      };
+    };
+  };
+
+  config = let
+    baseq3InStore = builtins.typeOf cfg.baseq3 == "set";
+  in mkIf cfg.enable {
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    systemd.services.q3ds = {
+      description = "Quake 3 dedicated server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+
+      environment.HOME = if baseq3InStore then home else cfg.baseq3;
+
+      serviceConfig = with lib; {
+        Restart = "always";
+        DynamicUser = true;
+        WorkingDirectory = home;
+
+        # It is possible to alter configuration files via RCON. To ensure reproducibility we have to prevent this
+        ReadOnlyPaths = if baseq3InStore then home else cfg.baseq3;
+        ExecStartPre = optionalString (!baseq3InStore) "+${pkgs.coreutils}/bin/cp ${configFile} ${cfg.baseq3}/.q3a/baseq3/nix.cfg";
+
+        ExecStart = "${cfg.package}/bin/ioq3ded +exec nix.cfg";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ f4814n ];
+}
diff --git a/nixpkgs/nixos/modules/services/games/teeworlds.nix b/nixpkgs/nixos/modules/services/games/teeworlds.nix
new file mode 100644
index 000000000000..ffef440330c4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/teeworlds.nix
@@ -0,0 +1,119 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.teeworlds;
+  register = cfg.register;
+
+  teeworldsConf = pkgs.writeText "teeworlds.cfg" ''
+    sv_port ${toString cfg.port}
+    sv_register ${if cfg.register then "1" else "0"}
+    ${optionalString (cfg.name != null) "sv_name ${cfg.name}"}
+    ${optionalString (cfg.motd != null) "sv_motd ${cfg.motd}"}
+    ${optionalString (cfg.password != null) "password ${cfg.password}"}
+    ${optionalString (cfg.rconPassword != null) "sv_rcon_password ${cfg.rconPassword}"}
+    ${concatStringsSep "\n" cfg.extraOptions}
+  '';
+
+in
+{
+  options = {
+    services.teeworlds = {
+      enable = mkEnableOption (lib.mdDoc "Teeworlds Server");
+
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open firewall ports for Teeworlds";
+      };
+
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Name of the server. Defaults to 'unnamed server'.
+        '';
+      };
+
+      register = mkOption {
+        type = types.bool;
+        example = true;
+        default = false;
+        description = lib.mdDoc ''
+          Whether the server registers as public server in the global server list. This is disabled by default because of privacy.
+        '';
+      };
+
+      motd = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Set the server message of the day text.
+        '';
+      };
+
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Password to connect to the server.
+        '';
+      };
+
+      rconPassword = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Password to access the remote console. If not set, a randomly generated one is displayed in the server log.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8303;
+        description = lib.mdDoc ''
+          Port the server will listen on.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra configuration lines for the {file}`teeworlds.cfg`. See [Teeworlds Documentation](https://www.teeworlds.com/?page=docs&wiki=server_settings).
+        '';
+        example = [ "sv_map dm1" "sv_gametype dm" ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openPorts {
+      allowedUDPPorts = [ cfg.port ];
+    };
+
+    systemd.services.teeworlds = {
+      description = "Teeworlds Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.teeworlds}/bin/teeworlds_srv -f ${teeworldsConf}";
+
+        # Hardening
+        CapabilityBoundingSet = false;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        SystemCallArchitectures = "native";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/terraria.nix b/nixpkgs/nixos/modules/services/games/terraria.nix
new file mode 100644
index 000000000000..ccdd779165b8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/terraria.nix
@@ -0,0 +1,169 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg   = config.services.terraria;
+  opt   = options.services.terraria;
+  worldSizeMap = { small = 1; medium = 2; large = 3; };
+  valFlag = name: val: optionalString (val != null) "-${name} \"${escape ["\\" "\""] (toString val)}\"";
+  boolFlag = name: val: optionalString val "-${name}";
+  flags = [
+    (valFlag "port" cfg.port)
+    (valFlag "maxPlayers" cfg.maxPlayers)
+    (valFlag "password" cfg.password)
+    (valFlag "motd" cfg.messageOfTheDay)
+    (valFlag "world" cfg.worldPath)
+    (valFlag "autocreate" (builtins.getAttr cfg.autoCreatedWorldSize worldSizeMap))
+    (valFlag "banlist" cfg.banListPath)
+    (boolFlag "secure" cfg.secure)
+    (boolFlag "noupnp" cfg.noUPnP)
+  ];
+  stopScript = pkgs.writeScript "terraria-stop" ''
+    #!${pkgs.runtimeShell}
+
+    if ! [ -d "/proc/$1" ]; then
+      exit 0
+    fi
+
+    ${getBin pkgs.tmux}/bin/tmux -S ${cfg.dataDir}/terraria.sock send-keys Enter exit Enter
+    ${getBin pkgs.coreutils}/bin/tail --pid="$1" -f /dev/null
+  '';
+in
+{
+  options = {
+    services.terraria = {
+      enable = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc ''
+          If enabled, starts a Terraria server. The server can be connected to via `tmux -S ''${config.${opt.dataDir}}/terraria.sock attach`
+          for administration by users who are a part of the `terraria` group (use `C-b d` shortcut to detach again).
+        '';
+      };
+
+      port = mkOption {
+        type        = types.port;
+        default     = 7777;
+        description = lib.mdDoc ''
+          Specifies the port to listen on.
+        '';
+      };
+
+      maxPlayers = mkOption {
+        type        = types.ints.u8;
+        default     = 255;
+        description = lib.mdDoc ''
+          Sets the max number of players (between 1 and 255).
+        '';
+      };
+
+      password = mkOption {
+        type        = types.nullOr types.str;
+        default     = null;
+        description = lib.mdDoc ''
+          Sets the server password. Leave `null` for no password.
+        '';
+      };
+
+      messageOfTheDay = mkOption {
+        type        = types.nullOr types.str;
+        default     = null;
+        description = lib.mdDoc ''
+          Set the server message of the day text.
+        '';
+      };
+
+      worldPath = mkOption {
+        type        = types.nullOr types.path;
+        default     = null;
+        description = lib.mdDoc ''
+          The path to the world file (`.wld`) which should be loaded.
+          If no world exists at this path, one will be created with the size
+          specified by `autoCreatedWorldSize`.
+        '';
+      };
+
+      autoCreatedWorldSize = mkOption {
+        type        = types.enum [ "small" "medium" "large" ];
+        default     = "medium";
+        description = lib.mdDoc ''
+          Specifies the size of the auto-created world if `worldPath` does not
+          point to an existing world.
+        '';
+      };
+
+      banListPath = mkOption {
+        type        = types.nullOr types.path;
+        default     = null;
+        description = lib.mdDoc ''
+          The path to the ban list.
+        '';
+      };
+
+      secure = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "Adds additional cheat protection to the server.";
+      };
+
+      noUPnP = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "Disables automatic Universal Plug and Play.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open ports in the firewall";
+      };
+
+      dataDir = mkOption {
+        type        = types.str;
+        default     = "/var/lib/terraria";
+        example     = "/srv/terraria";
+        description = lib.mdDoc "Path to variable state data directory for terraria.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.terraria = {
+      description = "Terraria server service user";
+      group       = "terraria";
+      home        = cfg.dataDir;
+      createHome  = true;
+      uid         = config.ids.uids.terraria;
+    };
+
+    users.groups.terraria = {
+      gid = config.ids.gids.terraria;
+    };
+
+    systemd.services.terraria = {
+      description   = "Terraria Server Service";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      serviceConfig = {
+        User    = "terraria";
+        Type = "forking";
+        GuessMainPID = true;
+        ExecStart = "${getBin pkgs.tmux}/bin/tmux -S ${cfg.dataDir}/terraria.sock new -d ${pkgs.terraria-server}/bin/TerrariaServer ${concatStringsSep " " flags}";
+        ExecStop = "${stopScript} $MAINPID";
+      };
+
+      postStart = ''
+        ${pkgs.coreutils}/bin/chmod 660 ${cfg.dataDir}/terraria.sock
+        ${pkgs.coreutils}/bin/chgrp terraria ${cfg.dataDir}/terraria.sock
+      '';
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+      allowedUDPPorts = [ cfg.port ];
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/games/xonotic.nix b/nixpkgs/nixos/modules/services/games/xonotic.nix
new file mode 100644
index 000000000000..c84347ddc981
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/games/xonotic.nix
@@ -0,0 +1,198 @@
+{ config
+, pkgs
+, lib
+, ...
+}:
+
+let
+  cfg = config.services.xonotic;
+
+  serverCfg = pkgs.writeText "xonotic-server.cfg" (
+    toString cfg.prependConfig
+      + "\n"
+      + builtins.concatStringsSep "\n" (
+        lib.mapAttrsToList (key: option:
+          let
+            escape = s: lib.escape [ "\"" ] s;
+            quote = s: "\"${s}\"";
+
+            toValue = x: quote (escape (toString x));
+
+            value = (if lib.isList option then
+              builtins.concatStringsSep
+                " "
+                (builtins.map (x: toValue x) option)
+            else
+              toValue option
+            );
+          in
+          "${key} ${value}"
+        ) cfg.settings
+      )
+      + "\n"
+      + toString cfg.appendConfig
+  );
+in
+
+{
+  options.services.xonotic = {
+    enable = lib.mkEnableOption (lib.mdDoc "Xonotic dedicated server");
+
+    package = lib.mkPackageOption pkgs "xonotic-dedicated" {};
+
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open the firewall for TCP and UDP on the specified port.
+      '';
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.path;
+      readOnly = true;
+      default = "/var/lib/xonotic";
+      description = lib.mdDoc ''
+        Data directory.
+      '';
+    };
+
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Generates the `server.cfg` file. Refer to [upstream's example][0] for
+        details.
+
+        [0]: https://gitlab.com/xonotic/xonotic/-/blob/master/server/server.cfg
+      '';
+      default = {};
+      type = lib.types.submodule {
+        freeformType = with lib.types; let
+          scalars = oneOf [ singleLineStr int float ];
+        in
+        attrsOf (oneOf [ scalars (nonEmptyListOf scalars) ]);
+
+        options.sv_public = lib.mkOption {
+          type = lib.types.int;
+          default = 0;
+          example = [ (-1) 1 ];
+          description = lib.mdDoc ''
+            Controls whether the server will be publicly listed.
+          '';
+        };
+
+        options.hostname = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "Xonotic $g_xonoticversion Server";
+          description = lib.mdDoc ''
+            The name that will appear in the server list. `$g_xonoticversion`
+            gets replaced with the current version.
+          '';
+        };
+
+        options.sv_motd = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "";
+          description = lib.mdDoc ''
+            Text displayed when players join the server.
+          '';
+        };
+
+        options.sv_termsofservice_url = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "";
+          description = lib.mdDoc ''
+            URL for the Terms of Service for playing on your server.
+          '';
+        };
+
+        options.maxplayers = lib.mkOption {
+          type = lib.types.int;
+          default = 16;
+          description = lib.mdDoc ''
+            Number of player slots on the server, including spectators.
+          '';
+        };
+
+        options.net_address = lib.mkOption {
+          type = lib.types.singleLineStr;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            The address Xonotic will listen on.
+          '';
+        };
+
+        options.port = lib.mkOption {
+          type = lib.types.port;
+          default = 26000;
+          description = lib.mdDoc ''
+            The port Xonotic will listen on.
+          '';
+        };
+      };
+    };
+
+    # Still useful even though we're using RFC 42 settings because *some* keys
+    # can be repeated.
+    appendConfig = lib.mkOption {
+      type = with lib.types; nullOr lines;
+      default = null;
+      description = lib.mdDoc ''
+        Literal text to insert at the end of `server.cfg`.
+      '';
+    };
+
+    # Certain changes need to happen at the beginning of the file.
+    prependConfig = lib.mkOption {
+      type = with lib.types; nullOr lines;
+      default = null;
+      description = lib.mdDoc ''
+        Literal text to insert at the start of `server.cfg`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.xonotic = {
+      description = "Xonotic server";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        # Required or else it tries to write the lock file into the nix store
+        HOME = cfg.dataDir;
+      };
+
+      serviceConfig = {
+        DynamicUser = true;
+        User = "xonotic";
+        StateDirectory = "xonotic";
+        ExecStart = "${cfg.package}/bin/xonotic-dedicated";
+
+        # Symlink the configuration from the nix store to where Xonotic actually
+        # looks for it
+        ExecStartPre = [
+          "${pkgs.coreutils}/bin/mkdir -p ${cfg.dataDir}/.xonotic/data"
+          ''
+            ${pkgs.coreutils}/bin/ln -sf ${serverCfg} \
+              ${cfg.dataDir}/.xonotic/data/server.cfg
+          ''
+        ];
+
+        # Cargo-culted from search results about writing Xonotic systemd units
+        ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
+
+        Restart = "on-failure";
+        RestartSec = 10;
+        StartLimitBurst = 5;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [
+      cfg.settings.port
+    ];
+    networking.firewall.allowedUDPPorts = lib.mkIf cfg.openFirewall [
+      cfg.settings.port
+    ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ CobaltCause ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/acpid.nix b/nixpkgs/nixos/modules/services/hardware/acpid.nix
new file mode 100644
index 000000000000..821f4ef205fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/acpid.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.acpid;
+
+  canonicalHandlers = {
+    powerEvent = {
+      event = "button/power.*";
+      action = cfg.powerEventCommands;
+    };
+
+    lidEvent = {
+      event = "button/lid.*";
+      action = cfg.lidEventCommands;
+    };
+
+    acEvent = {
+      event = "ac_adapter.*";
+      action = cfg.acEventCommands;
+    };
+  };
+
+  acpiConfDir = pkgs.runCommand "acpi-events" { preferLocalBuild = true; }
+    ''
+      mkdir -p $out
+      ${
+        # Generate a configuration file for each event. (You can't have
+        # multiple events in one config file...)
+        let f = name: handler:
+          ''
+            fn=$out/${name}
+            echo "event=${handler.event}" > $fn
+            echo "action=${pkgs.writeShellScriptBin "${name}.sh" handler.action }/bin/${name}.sh '%e'" >> $fn
+          '';
+        in concatStringsSep "\n" (mapAttrsToList f (canonicalHandlers // cfg.handlers))
+      }
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.acpid = {
+
+      enable = mkEnableOption (lib.mdDoc "the ACPI daemon");
+
+      logEvents = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Log all event activity.";
+      };
+
+      handlers = mkOption {
+        type = types.attrsOf (types.submodule {
+          options = {
+            event = mkOption {
+              type = types.str;
+              example = literalExpression ''"button/power.*" "button/lid.*" "ac_adapter.*" "button/mute.*" "button/volumedown.*" "cd/play.*" "cd/next.*"'';
+              description = lib.mdDoc "Event type.";
+            };
+
+            action = mkOption {
+              type = types.lines;
+              description = lib.mdDoc "Shell commands to execute when the event is triggered.";
+            };
+          };
+        });
+
+        description = lib.mdDoc ''
+          Event handlers.
+
+          ::: {.note}
+          Handler can be a single command.
+          :::
+        '';
+        default = {};
+        example = {
+          ac-power = {
+            event = "ac_adapter/*";
+            action = ''
+              vals=($1)  # space separated string to array of multiple values
+              case ''${vals[3]} in
+                  00000000)
+                      echo unplugged >> /tmp/acpi.log
+                      ;;
+                  00000001)
+                      echo plugged in >> /tmp/acpi.log
+                      ;;
+                  *)
+                      echo unknown >> /tmp/acpi.log
+                      ;;
+              esac
+            '';
+          };
+        };
+      };
+
+      powerEventCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Shell commands to execute on a button/power.* event.";
+      };
+
+      lidEventCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Shell commands to execute on a button/lid.* event.";
+      };
+
+      acEventCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Shell commands to execute on an ac_adapter.* event.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.acpid = {
+      description = "ACPI Daemon";
+      documentation = [ "man:acpid(8)" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = escapeShellArgs
+          ([ "${pkgs.acpid}/bin/acpid"
+             "--foreground"
+             "--netlink"
+             "--confdir" "${acpiConfDir}"
+           ] ++ optional cfg.logEvents "--logevents"
+          );
+      };
+      unitConfig = {
+        ConditionVirtualization = "!systemd-nspawn";
+        ConditionPathExists = [ "/proc/acpi" ];
+      };
+
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/actkbd.nix b/nixpkgs/nixos/modules/services/hardware/actkbd.nix
new file mode 100644
index 000000000000..1718d179bf5e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/actkbd.nix
@@ -0,0 +1,133 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.actkbd;
+
+  configFile = pkgs.writeText "actkbd.conf" ''
+    ${concatMapStringsSep "\n"
+      ({ keys, events, attributes, command, ... }:
+        ''${concatMapStringsSep "+" toString keys}:${concatStringsSep "," events}:${concatStringsSep "," attributes}:${command}''
+      )
+      cfg.bindings}
+    ${cfg.extraConfig}
+  '';
+
+  bindingCfg = { ... }: {
+    options = {
+
+      keys = mkOption {
+        type = types.listOf types.int;
+        description = lib.mdDoc "List of keycodes to match.";
+      };
+
+      events = mkOption {
+        type = types.listOf (types.enum ["key" "rep" "rel"]);
+        default = [ "key" ];
+        description = lib.mdDoc "List of events to match.";
+      };
+
+      attributes = mkOption {
+        type = types.listOf types.str;
+        default = [ "exec" ];
+        description = lib.mdDoc "List of attributes.";
+      };
+
+      command = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "What to run.";
+      };
+
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.actkbd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the {command}`actkbd` key mapping daemon.
+
+          Turning this on will start an {command}`actkbd`
+          instance for every evdev input that has at least one key
+          (which is okay even for systems with tiny memory footprint,
+          since actkbd normally uses \<100 bytes of memory per
+          instance).
+
+          This allows binding keys globally without the need for e.g.
+          X11.
+        '';
+      };
+
+      bindings = mkOption {
+        type = types.listOf (types.submodule bindingCfg);
+        default = [];
+        example = lib.literalExpression ''
+          [ { keys = [ 113 ]; events = [ "key" ]; command = "''${pkgs.alsa-utils}/bin/amixer -q set Master toggle"; }
+          ]
+        '';
+        description = lib.mdDoc ''
+          Key bindings for {command}`actkbd`.
+
+          See {command}`actkbd` {file}`README` for documentation.
+
+          The example shows a piece of what {option}`sound.mediaKeys.enable` does when enabled.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Literal contents to append to the end of actkbd configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "actkbd-udev-rules";
+      destination = "/etc/udev/rules.d/61-actkbd.rules";
+      text = ''
+        ACTION=="add", SUBSYSTEM=="input", KERNEL=="event[0-9]*", ENV{ID_INPUT_KEY}=="1", TAG+="systemd", ENV{SYSTEMD_WANTS}+="actkbd@$env{DEVNAME}.service"
+      '';
+    });
+
+    systemd.services."actkbd@" = {
+      enable = true;
+      restartIfChanged = true;
+      unitConfig = {
+        Description = "actkbd on %I";
+        ConditionPathExists = "%I";
+      };
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.actkbd}/bin/actkbd -D -c ${configFile} -d %I";
+      };
+    };
+
+    # For testing
+    environment.systemPackages = [ pkgs.actkbd ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/argonone.nix b/nixpkgs/nixos/modules/services/hardware/argonone.nix
new file mode 100644
index 000000000000..e67c2625062e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/argonone.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hardware.argonone;
+in
+{
+  options.services.hardware.argonone = {
+    enable = lib.mkEnableOption (lib.mdDoc "the driver for Argon One Raspberry Pi case fan and power button");
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.argononed;
+      defaultText = lib.literalExpression "pkgs.argononed";
+      description = lib.mdDoc ''
+        The package implementing the Argon One driver
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    hardware.i2c.enable = true;
+    hardware.deviceTree.overlays = [
+      {
+        name = "argononed";
+        dtboFile = "${cfg.package}/boot/overlays/argonone.dtbo";
+      }
+      {
+        name = "i2c1-okay-overlay";
+        dtsText = ''
+          /dts-v1/;
+          /plugin/;
+          / {
+            compatible = "brcm,bcm2711";
+            fragment@0 {
+              target = <&i2c1>;
+              __overlay__ {
+                status = "okay";
+              };
+            };
+          };
+        '';
+      }
+    ];
+    environment.systemPackages = [ cfg.package ];
+    systemd.services.argononed = {
+      description = "Argon One Raspberry Pi case Daemon Service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${cfg.package}/bin/argononed";
+        PIDFile = "/run/argononed.pid";
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ misterio77 ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/asusd.nix b/nixpkgs/nixos/modules/services/hardware/asusd.nix
new file mode 100644
index 000000000000..ebbdea26c051
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/asusd.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.asusd;
+in
+{
+  options = {
+    services.asusd = {
+      enable = lib.mkEnableOption (lib.mdDoc "the asusd service for ASUS ROG laptops");
+
+      enableUserService = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Activate the asusd-user service.
+        '';
+      };
+
+      animeConfig = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/asusd/anime.ron.
+          See https://asus-linux.org/asusctl/#anime-control.
+        '';
+      };
+
+      asusdConfig = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/asusd/asusd.ron.
+          See https://asus-linux.org/asusctl/.
+        '';
+      };
+
+      auraConfig = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/asusd/aura.ron.
+          See https://asus-linux.org/asusctl/#led-keyboard-control.
+        '';
+      };
+
+      profileConfig = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/asusd/profile.ron.
+          See https://asus-linux.org/asusctl/#profiles.
+        '';
+      };
+
+      fanCurvesConfig = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+          The content of /etc/asusd/fan_curves.ron.
+          See https://asus-linux.org/asusctl/#fan-curves.
+        '';
+      };
+
+      userLedModesConfig = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/asusd/asusd-user-ledmodes.ron.
+          See https://asus-linux.org/asusctl/#led-keyboard-control.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.asusctl ];
+
+    environment.etc =
+      let
+        maybeConfig = name: cfg: lib.mkIf (cfg != null) {
+          source = pkgs.writeText name cfg;
+          mode = "0644";
+        };
+      in
+      {
+        "asusd/anime.ron" = maybeConfig "anime.ron" cfg.animeConfig;
+        "asusd/asusd.ron" = maybeConfig "asusd.ron" cfg.asusdConfig;
+        "asusd/aura.ron" = maybeConfig "aura.ron" cfg.auraConfig;
+        "asusd/profile.conf" = maybeConfig "profile.ron" cfg.profileConfig;
+        "asusd/fan_curves.ron" = maybeConfig "fan_curves.ron" cfg.fanCurvesConfig;
+        "asusd/asusd_user_ledmodes.ron" = maybeConfig "asusd_user_ledmodes.ron" cfg.userLedModesConfig;
+      };
+
+    services.dbus.enable = true;
+    systemd.packages = [ pkgs.asusctl ];
+    services.dbus.packages = [ pkgs.asusctl ];
+    services.udev.packages = [ pkgs.asusctl ];
+    services.supergfxd.enable = lib.mkDefault true;
+
+    systemd.user.services.asusd-user.enable = cfg.enableUserService;
+  };
+
+  meta.maintainers = pkgs.asusctl.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/auto-cpufreq.nix b/nixpkgs/nixos/modules/services/hardware/auto-cpufreq.nix
new file mode 100644
index 000000000000..9c69ba8920f3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/auto-cpufreq.nix
@@ -0,0 +1,51 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.auto-cpufreq;
+  cfgFilename = "auto-cpufreq.conf";
+  cfgFile = format.generate cfgFilename cfg.settings;
+
+  format = pkgs.formats.ini {};
+in {
+  options = {
+    services.auto-cpufreq = {
+      enable = mkEnableOption (lib.mdDoc "auto-cpufreq daemon");
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Configuration for `auto-cpufreq`.
+
+          The available options can be found in [the example configuration file](https://github.com/AdnanHodzic/auto-cpufreq/blob/v${pkgs.auto-cpufreq.version}/auto-cpufreq.conf-example).
+          '';
+
+        default = {};
+        type = types.submodule { freeformType = format.type; };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.auto-cpufreq ];
+
+    systemd = {
+      packages = [ pkgs.auto-cpufreq ];
+      services.auto-cpufreq = {
+        # Workaround for https://github.com/NixOS/nixpkgs/issues/81138
+        wantedBy = [ "multi-user.target" ];
+        path = with pkgs; [ bash coreutils ];
+
+        serviceConfig.WorkingDirectory = "";
+        serviceConfig.ExecStart = [
+          ""
+          "${lib.getExe pkgs.auto-cpufreq} --daemon --config ${cfgFile}"
+        ];
+      };
+    };
+  };
+
+  # uses attributes of the linked package
+  meta = {
+    buildDocsInSandbox = false;
+    maintainers = with lib.maintainers; [ nicoo ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/bluetooth.nix b/nixpkgs/nixos/modules/services/hardware/bluetooth.nix
new file mode 100644
index 000000000000..2a58be51bb02
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/bluetooth.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.hardware.bluetooth;
+  package = cfg.package;
+
+  inherit (lib)
+    mkDefault mkEnableOption mkIf mkOption
+    mkRenamedOptionModule mkRemovedOptionModule
+    concatStringsSep escapeShellArgs literalExpression
+    optional optionals optionalAttrs recursiveUpdate types;
+
+  cfgFmt = pkgs.formats.ini { };
+
+  defaults = {
+    General.ControllerMode = "dual";
+    Policy.AutoEnable = cfg.powerOnBoot;
+  };
+
+  hasDisabledPlugins = builtins.length cfg.disabledPlugins > 0;
+
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "hardware" "bluetooth" "config" ] [ "hardware" "bluetooth" "settings" ])
+    (mkRemovedOptionModule [ "hardware" "bluetooth" "extraConfig" ] ''
+      Use hardware.bluetooth.settings instead.
+
+      This is part of the general move to use structured settings instead of raw
+      text for config as introduced by RFC0042:
+      https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md
+    '')
+  ];
+
+  ###### interface
+
+  options = {
+
+    hardware.bluetooth = {
+      enable = mkEnableOption (lib.mdDoc "support for Bluetooth");
+
+      hsphfpd.enable = mkEnableOption (lib.mdDoc "support for hsphfpd[-prototype] implementation");
+
+      powerOnBoot = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to power up the default Bluetooth controller on boot.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bluez;
+        defaultText = literalExpression "pkgs.bluez";
+        description = lib.mdDoc ''
+          Which BlueZ package to use.
+        '';
+      };
+
+      disabledPlugins = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc "Built-in plugins to disable";
+      };
+
+      settings = mkOption {
+        type = cfgFmt.type;
+        default = { };
+        example = {
+          General = {
+            ControllerMode = "bredr";
+          };
+        };
+        description = lib.mdDoc "Set configuration for system-wide bluetooth (/etc/bluetooth/main.conf).";
+      };
+
+      input = mkOption {
+        type = cfgFmt.type;
+        default = { };
+        example = {
+          General = {
+            IdleTimeout = 30;
+            ClassicBondedOnly = true;
+          };
+        };
+        description = lib.mdDoc "Set configuration for the input service (/etc/bluetooth/input.conf).";
+      };
+
+      network = mkOption {
+        type = cfgFmt.type;
+        default = { };
+        example = {
+          General = {
+            DisableSecurity = true;
+          };
+        };
+        description = lib.mdDoc "Set configuration for the network service (/etc/bluetooth/network.conf).";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ package ]
+      ++ optional cfg.hsphfpd.enable pkgs.hsphfpd;
+
+    environment.etc."bluetooth/input.conf".source =
+      cfgFmt.generate "input.conf" cfg.input;
+    environment.etc."bluetooth/network.conf".source =
+      cfgFmt.generate "network.conf" cfg.network;
+    environment.etc."bluetooth/main.conf".source =
+      cfgFmt.generate "main.conf" (recursiveUpdate defaults cfg.settings);
+    services.udev.packages = [ package ];
+    services.dbus.packages = [ package ]
+      ++ optional cfg.hsphfpd.enable pkgs.hsphfpd;
+    systemd.packages = [ package ];
+
+    systemd.services = {
+      bluetooth =
+        let
+          # `man bluetoothd` will refer to main.conf in the nix store but bluez
+          # will in fact load the configuration file at /etc/bluetooth/main.conf
+          # so force it here to avoid any ambiguity and things suddenly breaking
+          # if/when the bluez derivation is changed.
+          args = [ "-f" "/etc/bluetooth/main.conf" ]
+            ++ optional hasDisabledPlugins
+            "--noplugin=${concatStringsSep "," cfg.disabledPlugins}";
+        in
+        {
+          wantedBy = [ "bluetooth.target" ];
+          aliases = [ "dbus-org.bluez.service" ];
+          serviceConfig.ExecStart = [
+            ""
+            "${package}/libexec/bluetooth/bluetoothd ${escapeShellArgs args}"
+          ];
+          # restarting can leave people without a mouse/keyboard
+          unitConfig.X-RestartIfChanged = false;
+        };
+    }
+    // (optionalAttrs cfg.hsphfpd.enable {
+      hsphfpd = {
+        after = [ "bluetooth.service" ];
+        requires = [ "bluetooth.service" ];
+        wantedBy = [ "bluetooth.target" ];
+
+        description = "A prototype implementation used for connecting HSP/HFP Bluetooth devices";
+        serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/hsphfpd.pl";
+      };
+    });
+
+    systemd.user.services = {
+      obex.aliases = [ "dbus-org.bluez.obex.service" ];
+    }
+    // optionalAttrs cfg.hsphfpd.enable {
+      telephony_client = {
+        wantedBy = [ "default.target" ];
+
+        description = "telephony_client for hsphfpd";
+        serviceConfig.ExecStart = "${pkgs.hsphfpd}/bin/telephony_client.pl";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/bolt.nix b/nixpkgs/nixos/modules/services/hardware/bolt.nix
new file mode 100644
index 000000000000..6990a9ea63b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/bolt.nix
@@ -0,0 +1,34 @@
+# Thunderbolt 3 device manager
+
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+{
+  options = {
+
+    services.hardware.bolt = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Bolt, a userspace daemon to enable
+          security levels for Thunderbolt 3 on GNU/Linux.
+
+          Bolt is used by GNOME 3 to handle Thunderbolt settings.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf config.services.hardware.bolt.enable {
+
+    environment.systemPackages = [ pkgs.bolt ];
+    services.udev.packages = [ pkgs.bolt ];
+    systemd.packages = [ pkgs.bolt ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/brltty.nix b/nixpkgs/nixos/modules/services/hardware/brltty.nix
new file mode 100644
index 000000000000..3133804f485f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/brltty.nix
@@ -0,0 +1,57 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.brltty;
+
+  targets = [
+    "default.target" "multi-user.target"
+    "rescue.target" "emergency.target"
+  ];
+
+  genApiKey = pkgs.writers.writeDash "generate-brlapi-key" ''
+    if ! test -f /etc/brlapi.key; then
+      echo -n generating brlapi key...
+      ${pkgs.brltty}/bin/brltty-genkey -f /etc/brlapi.key
+      echo done
+    fi
+  '';
+
+in {
+
+  options = {
+
+    services.brltty.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Whether to enable the BRLTTY daemon.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    users.users.brltty = {
+      description = "BRLTTY daemon user";
+      group = "brltty";
+    };
+    users.groups = {
+      brltty = { };
+      brlapi = { };
+    };
+
+    systemd.services."brltty@".serviceConfig =
+      { ExecStartPre = "!${genApiKey}"; };
+
+    # Install all upstream-provided files
+    systemd.packages = [ pkgs.brltty ];
+    systemd.tmpfiles.packages = [ pkgs.brltty ];
+    services.udev.packages = [ pkgs.brltty ];
+    environment.systemPackages = [ pkgs.brltty ];
+
+    # Add missing WantedBys (see issue #81138)
+    systemd.paths.brltty.wantedBy = targets;
+    systemd.paths."brltty@".wantedBy = targets;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/ddccontrol.nix b/nixpkgs/nixos/modules/services/hardware/ddccontrol.nix
new file mode 100644
index 000000000000..0f1e8bf0d26c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/ddccontrol.nix
@@ -0,0 +1,39 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.ddccontrol;
+in
+
+{
+  ###### interface
+
+  options = {
+    services.ddccontrol = {
+      enable = lib.mkEnableOption (lib.mdDoc "ddccontrol for controlling displays");
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    # Load the i2c-dev module
+    boot.kernelModules = [ "i2c_dev" ];
+
+    # Give users access to the "gddccontrol" tool
+    environment.systemPackages = [
+      pkgs.ddccontrol
+    ];
+
+    services.dbus.packages = [
+      pkgs.ddccontrol
+    ];
+
+    systemd.packages = [
+      pkgs.ddccontrol
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/evscript.nix b/nixpkgs/nixos/modules/services/hardware/evscript.nix
new file mode 100644
index 000000000000..6722887afb4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/evscript.nix
@@ -0,0 +1,51 @@
+{ pkgs, lib, config, ... }:
+
+let
+  cfg = config.services.evscript;
+
+in
+{
+  options = with lib; {
+    services.evscript = {
+      enable = mkEnableOption (mdDoc "the evscript service");
+
+      package = mkOption {
+        description = mdDoc "evscript package to use for the evscript service";
+        type = types.package;
+        default = pkgs.evscript;
+        defaultText = literalExpression "pkgs.evscript";
+      };
+
+      devices = mkOption {
+        description = mdDoc "evdev devices for evscript to listen to";
+        type = types.listOf types.path;
+        example = [ "/dev/input/by-path/pci-0000:00:1d.0-usb-0:1.1:1.0-event-kbd" ];
+      };
+
+      script = mkOption {
+        description = mdDoc "Dyon script for evscript service to run";
+        type = types.path;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot.kernelModules = [ "uinput" ];
+
+    services.udev.extraRules = ''
+      KERNEL=="uinput", MODE="0660", GROUP="input"
+    '';
+
+    systemd.services.evscript = {
+      after = [ "systemd-udevd" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.DynamicUser = true;
+      serviceConfig.SupplementaryGroups = [ "input" ];
+      script = ''
+        ${cfg.package}/bin/evscript \
+            ${lib.concatMapStringsSep " " (d: "-d ${d}") cfg.devices} \
+            -f ${cfg.script}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/fancontrol.nix b/nixpkgs/nixos/modules/services/hardware/fancontrol.nix
new file mode 100644
index 000000000000..993c37b2364f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/fancontrol.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.fancontrol;
+  configFile = pkgs.writeText "fancontrol.conf" cfg.config;
+
+in
+{
+  options.hardware.fancontrol = {
+    enable = mkEnableOption (lib.mdDoc "software fan control (requires fancontrol.config)");
+
+    config = mkOption {
+      type = types.lines;
+      description = lib.mdDoc "Required fancontrol configuration file content. See {manpage}`pwmconfig(8)` from the lm_sensors package.";
+      example = ''
+        # Configuration file generated by pwmconfig
+        INTERVAL=10
+        DEVPATH=hwmon3=devices/virtual/thermal/thermal_zone2 hwmon4=devices/platform/f71882fg.656
+        DEVNAME=hwmon3=soc_dts1 hwmon4=f71869a
+        FCTEMPS=hwmon4/device/pwm1=hwmon3/temp1_input
+        FCFANS=hwmon4/device/pwm1=hwmon4/device/fan1_input
+        MINTEMP=hwmon4/device/pwm1=35
+        MAXTEMP=hwmon4/device/pwm1=65
+        MINSTART=hwmon4/device/pwm1=150
+        MINSTOP=hwmon4/device/pwm1=0
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.fancontrol = {
+      documentation = [ "man:fancontrol(8)" ];
+      description = "software fan control";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "lm_sensors.service" ];
+
+      serviceConfig = {
+        Restart = "on-failure";
+        ExecStart = "${pkgs.lm_sensors}/sbin/fancontrol ${configFile}";
+      };
+    };
+
+    # On some systems, the fancontrol service does not resume properly after sleep because the pwm status of the fans
+    # is not reset properly. Restarting the service fixes this, in accordance with https://github.com/lm-sensors/lm-sensors/issues/172.
+    powerManagement.resumeCommands = ''
+      systemctl restart fancontrol.service
+    '';
+
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/freefall.nix b/nixpkgs/nixos/modules/services/hardware/freefall.nix
new file mode 100644
index 000000000000..7b794264ff35
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/freefall.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.freefall;
+
+in {
+
+  options.services.freefall = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to protect HP/Dell laptop hard drives (not SSDs) in free fall.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.freefall;
+      defaultText = literalExpression "pkgs.freefall";
+      description = lib.mdDoc ''
+        freefall derivation to use.
+      '';
+    };
+
+    devices = mkOption {
+      type = types.listOf types.str;
+      default = [ "/dev/sda" ];
+      description = lib.mdDoc ''
+        Device paths to all internal spinning hard drives.
+      '';
+    };
+
+  };
+
+  config = let
+
+    mkService = dev:
+      assert dev != "";
+      let dev' = utils.escapeSystemdPath dev; in
+      nameValuePair "freefall-${dev'}" {
+        description = "Free-fall protection for ${dev}";
+        after = [ "${dev'}.device" ];
+        wantedBy = [ "${dev'}.device" ];
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/freefall ${dev}";
+          Restart = "on-failure";
+          Type = "forking";
+        };
+      };
+
+  in mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services = builtins.listToAttrs (map mkService cfg.devices);
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/fwupd.nix b/nixpkgs/nixos/modules/services/hardware/fwupd.nix
new file mode 100644
index 000000000000..7b6c336bd221
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/fwupd.nix
@@ -0,0 +1,210 @@
+# fwupd daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fwupd;
+
+  format = pkgs.formats.ini {
+    listToValue = l: lib.concatStringsSep ";" (map (s: generators.mkValueStringDefault {} s) l);
+    mkKeyValue = generators.mkKeyValueDefault {} "=";
+  };
+
+  customEtc = {
+    "fwupd/fwupd.conf" = {
+      source = format.generate "fwupd.conf" {
+        fwupd = cfg.daemonSettings;
+        uefi_capsule = cfg.uefiCapsuleSettings;
+      };
+      # fwupd tries to chmod the file if it doesn't have the right permissions
+      mode = "0640";
+    };
+  };
+
+  originalEtc =
+    let
+      mkEtcFile = n: nameValuePair n { source = "${cfg.package}/etc/${n}"; };
+    in listToAttrs (map mkEtcFile cfg.package.filesInstalledToEtc);
+  extraTrustedKeys =
+    let
+      mkName = p: "pki/fwupd/${baseNameOf (toString p)}";
+      mkEtcFile = p: nameValuePair (mkName p) { source = p; };
+    in listToAttrs (map mkEtcFile cfg.extraTrustedKeys);
+
+  enableRemote = base: remote: {
+    "fwupd/remotes.d/${remote}.conf" = {
+      source = pkgs.runCommand "${remote}-enabled.conf" {} ''
+        sed "s,^Enabled=false,Enabled=true," \
+        "${base}/etc/fwupd/remotes.d/${remote}.conf" > "$out"
+      '';
+    };
+  };
+  remotes = (foldl'
+    (configFiles: remote: configFiles // (enableRemote cfg.package remote))
+    {}
+    cfg.extraRemotes
+  ) // (
+    # We cannot include the file in $out and rely on filesInstalledToEtc
+    # to install it because it would create a cyclic dependency between
+    # the outputs. We also need to enable the remote,
+    # which should not be done by default.
+    lib.optionalAttrs cfg.enableTestRemote (enableRemote cfg.package.installedTests "fwupd-tests")
+  );
+
+in {
+
+  ###### interface
+  options = {
+    services.fwupd = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable fwupd, a DBus service that allows
+          applications to update firmware.
+        '';
+      };
+
+      extraTrustedKeys = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = literalExpression "[ /etc/nixos/fwupd/myfirmware.pem ]";
+        description = lib.mdDoc ''
+          Installing a public key allows firmware signed with a matching private key to be recognized as trusted, which may require less authentication to install than for untrusted files. By default trusted firmware can be upgraded (but not downgraded) without the user or administrator password. Only very few keys are installed by default.
+        '';
+      };
+
+      extraRemotes = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "lvfs-testing" ];
+        description = lib.mdDoc ''
+          Enables extra remotes in fwupd. See `/etc/fwupd/remotes.d`.
+        '';
+      };
+
+      enableTestRemote = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable test remote. This is used by
+          [installed tests](https://github.com/fwupd/fwupd/blob/master/data/installed-tests/README.md).
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.fwupd;
+        defaultText = literalExpression "pkgs.fwupd";
+        description = lib.mdDoc ''
+          Which fwupd package to use.
+        '';
+      };
+
+      daemonSettings = mkOption {
+        type = types.submodule {
+          freeformType = format.type.nestedTypes.elemType;
+          options = {
+            DisabledDevices = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "2082b5e0-7a64-478a-b1b2-e3404fab6dad" ];
+              description = lib.mdDoc ''
+                List of device GUIDs to be disabled.
+              '';
+            };
+
+            DisabledPlugins = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "udev" ];
+              description = lib.mdDoc ''
+                List of plugins to be disabled.
+              '';
+            };
+
+            EspLocation = mkOption {
+              type = types.path;
+              default = config.boot.loader.efi.efiSysMountPoint;
+              defaultText = lib.literalExpression "config.boot.loader.efi.efiSysMountPoint";
+              description = lib.mdDoc ''
+                The EFI system partition (ESP) path used if UDisks is not available
+                or if this partition is not mounted at /boot/efi, /boot, or /efi
+              '';
+            };
+          };
+        };
+        default = {};
+        description = lib.mdDoc ''
+          Configurations for the fwupd daemon.
+        '';
+      };
+
+      uefiCapsuleSettings = mkOption {
+        type = types.submodule {
+          freeformType = format.type.nestedTypes.elemType;
+        };
+        default = {};
+        description = lib.mdDoc ''
+          UEFI capsule configurations for the fwupd daemon.
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "fwupd" "blacklistDevices"] [ "services" "fwupd" "daemonSettings" "DisabledDevices" ])
+    (mkRenamedOptionModule [ "services" "fwupd" "blacklistPlugins"] [ "services" "fwupd" "daemonSettings" "DisabledPlugins" ])
+    (mkRenamedOptionModule [ "services" "fwupd" "disabledDevices" ] [ "services" "fwupd" "daemonSettings" "DisabledDevices" ])
+    (mkRenamedOptionModule [ "services" "fwupd" "disabledPlugins" ] [ "services" "fwupd" "daemonSettings" "DisabledPlugins" ])
+  ];
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    # Disable test related plug-ins implicitly so that users do not have to care about them.
+    services.fwupd.daemonSettings = {
+      DisabledPlugins = cfg.package.defaultDisabledPlugins;
+      EspLocation = config.boot.loader.efi.efiSysMountPoint;
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    # customEtc overrides some files from the package
+    environment.etc = originalEtc // customEtc // extraTrustedKeys // remotes;
+
+    services.dbus.packages = [ cfg.package ];
+
+    services.udev.packages = [ cfg.package ];
+
+    # required to update the firmware of disks
+    services.udisks2.enable = true;
+
+    systemd = {
+      packages = [ cfg.package ];
+
+      # fwupd-refresh expects a user that we do not create, so just run with DynamicUser
+      # instead and ensure we take ownership of /var/lib/fwupd
+      services.fwupd-refresh.serviceConfig = {
+        StateDirectory = "fwupd";
+        # Better for debugging, upstream sets stderr to null for some reason..
+        StandardError = "inherit";
+      };
+
+      timers.fwupd-refresh.wantedBy = [ "timers.target" ];
+    };
+
+    users.users.fwupd-refresh = {
+      isSystemUser = true;
+      group = "fwupd-refresh";
+    };
+    users.groups.fwupd-refresh = {};
+
+    security.polkit.enable = true;
+  };
+
+  meta = {
+    maintainers = pkgs.fwupd.meta.maintainers;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
new file mode 100644
index 000000000000..f472b5774cbf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/hddfancontrol.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hddfancontrol;
+  types = lib.types;
+in
+
+{
+  options = {
+
+    services.hddfancontrol.enable = lib.mkEnableOption (lib.mdDoc "hddfancontrol daemon");
+
+    services.hddfancontrol.disks = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        Drive(s) to get temperature from
+      '';
+      example = ["/dev/sda"];
+    };
+
+    services.hddfancontrol.pwmPaths = lib.mkOption {
+      type = with types; listOf path;
+      default = [];
+      description = lib.mdDoc ''
+        PWM filepath(s) to control fan speed (under /sys)
+      '';
+      example = ["/sys/class/hwmon/hwmon2/pwm1"];
+    };
+
+    services.hddfancontrol.smartctl = lib.mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Probe temperature using smartctl instead of hddtemp or hdparm
+      '';
+    };
+
+    services.hddfancontrol.extraArgs = lib.mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra commandline arguments for hddfancontrol
+      '';
+      example = ["--pwm-start-value=32"
+                 "--pwm-stop-value=0"
+                 "--spin-down-time=900"];
+    };
+  };
+
+  config = lib.mkIf cfg.enable (
+    let args = lib.concatLists [
+      ["-d"] cfg.disks
+      ["-p"] cfg.pwmPaths
+      (lib.optional cfg.smartctl "--smartctl")
+      cfg.extraArgs
+    ]; in {
+      systemd.packages = [pkgs.hddfancontrol];
+
+      systemd.services.hddfancontrol = {
+        wantedBy = [ "multi-user.target" ];
+        environment.HDDFANCONTROL_ARGS = lib.escapeShellArgs args;
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/illum.nix b/nixpkgs/nixos/modules/services/hardware/illum.nix
new file mode 100644
index 000000000000..46172fb7b53a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/illum.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.illum;
+in {
+
+  options = {
+
+    services.illum = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable illum, a daemon for controlling screen brightness with brightness buttons.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.illum = {
+      description = "Backlight Adjustment Service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${pkgs.illum}/bin/illum-d";
+      serviceConfig.Restart = "on-failure";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/interception-tools.nix b/nixpkgs/nixos/modules/services/hardware/interception-tools.nix
new file mode 100644
index 000000000000..4f86bd470ea7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/interception-tools.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.interception-tools;
+in {
+  options.services.interception-tools = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Whether to enable the interception tools service.";
+    };
+
+    plugins = mkOption {
+      type = types.listOf types.package;
+      default = [ pkgs.interception-tools-plugins.caps2esc ];
+      defaultText = literalExpression "[ pkgs.interception-tools-plugins.caps2esc ]";
+      description = lib.mdDoc ''
+        A list of interception tools plugins that will be made available to use
+        inside the udevmon configuration.
+      '';
+    };
+
+    udevmonConfig = mkOption {
+      type = types.either types.str types.path;
+      default = ''
+        - JOB: "intercept -g $DEVNODE | caps2esc | uinput -d $DEVNODE"
+          DEVICE:
+            EVENTS:
+              EV_KEY: [KEY_CAPSLOCK, KEY_ESC]
+      '';
+      example = ''
+        - JOB: "intercept -g $DEVNODE | y2z | x2y | uinput -d $DEVNODE"
+          DEVICE:
+            EVENTS:
+              EV_KEY: [KEY_X, KEY_Y]
+      '';
+      description = lib.mdDoc ''
+        String of udevmon YAML configuration, or path to a udevmon YAML
+        configuration file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.interception-tools = {
+      description = "Interception tools";
+      path = [ pkgs.bash pkgs.interception-tools ] ++ cfg.plugins;
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.interception-tools}/bin/udevmon -c \
+          ${if builtins.typeOf cfg.udevmonConfig == "path"
+          then cfg.udevmonConfig
+          else pkgs.writeText "udevmon.yaml" cfg.udevmonConfig}
+        '';
+        Nice = -20;
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/iptsd.nix b/nixpkgs/nixos/modules/services/hardware/iptsd.nix
new file mode 100644
index 000000000000..8af0a6d6bbe1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/iptsd.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.iptsd;
+  format = pkgs.formats.ini { };
+  configFile = format.generate "iptsd.conf" cfg.config;
+in {
+  options.services.iptsd = {
+    enable = lib.mkEnableOption (lib.mdDoc "the userspace daemon for Intel Precise Touch & Stylus");
+
+    config = lib.mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for IPTSD. See the
+        [reference configuration](https://github.com/linux-surface/iptsd/blob/master/etc/iptsd.conf)
+        for available options and defaults.
+      '';
+      type = lib.types.submodule {
+        freeformType = format.type;
+        options = {
+          Touch = {
+            DisableOnPalm = lib.mkOption {
+              default = false;
+              description = lib.mdDoc "Ignore all touch inputs if a palm was registered on the display.";
+              type = lib.types.bool;
+            };
+            DisableOnStylus = lib.mkOption {
+              default = false;
+              description = lib.mdDoc "Ignore all touch inputs if a stylus is in proximity.";
+              type = lib.types.bool;
+            };
+          };
+          Stylus = {
+            Disable = lib.mkOption {
+              default = false;
+              description = lib.mdDoc "Disables the stylus. No stylus data will be processed.";
+              type = lib.types.bool;
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ pkgs.iptsd ];
+    environment.etc."iptsd.conf".source = configFile;
+    systemd.services."iptsd@".restartTriggers = [ configFile ];
+    services.udev.packages = [ pkgs.iptsd ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ dotlambda ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/irqbalance.nix b/nixpkgs/nixos/modules/services/hardware/irqbalance.nix
new file mode 100644
index 000000000000..8ba0a73d895d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/irqbalance.nix
@@ -0,0 +1,24 @@
+#
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.irqbalance;
+
+in
+{
+  options.services.irqbalance.enable = mkEnableOption (lib.mdDoc "irqbalance daemon");
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.irqbalance ];
+
+    systemd.services.irqbalance.wantedBy = ["multi-user.target"];
+
+    systemd.packages = [ pkgs.irqbalance ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/joycond.nix b/nixpkgs/nixos/modules/services/hardware/joycond.nix
new file mode 100644
index 000000000000..df3239cb2a7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/joycond.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.joycond;
+in
+
+with lib;
+
+{
+  options.services.joycond = {
+    enable = mkEnableOption (lib.mdDoc "support for Nintendo Pro Controllers and Joycons");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.joycond;
+      defaultText = lib.literalExpression "pkgs.joycond";
+      description = lib.mdDoc ''
+        The joycond package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    services.udev.packages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+    # Workaround for https://github.com/NixOS/nixpkgs/issues/81138
+    systemd.services.joycond.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/kanata.nix b/nixpkgs/nixos/modules/services/hardware/kanata.nix
new file mode 100644
index 000000000000..aac20c6c760e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/kanata.nix
@@ -0,0 +1,185 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kanata;
+
+  keyboard = {
+    options = {
+      devices = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "/dev/input/by-id/usb-0000_0000-event-kbd" ];
+        description = mdDoc ''
+          Paths to keyboard devices.
+
+          An empty list, the default value, lets kanata detect which
+          input devices are keyboards and intercept them all.
+        '';
+      };
+      config = mkOption {
+        type = types.lines;
+        example = ''
+          (defsrc
+            grv  1    2    3    4    5    6    7    8    9    0    -    =    bspc
+            tab  q    w    e    r    t    y    u    i    o    p    [    ]    \
+            caps a    s    d    f    g    h    j    k    l    ;    '    ret
+            lsft z    x    c    v    b    n    m    ,    .    /    rsft
+            lctl lmet lalt           spc            ralt rmet rctl)
+
+          (deflayer qwerty
+            grv  1    2    3    4    5    6    7    8    9    0    -    =    bspc
+            tab  q    w    e    r    t    y    u    i    o    p    [    ]    \
+            @cap a    s    d    f    g    h    j    k    l    ;    '    ret
+            lsft z    x    c    v    b    n    m    ,    .    /    rsft
+            lctl lmet lalt           spc            ralt rmet rctl)
+
+          (defalias
+            ;; tap within 100ms for capslk, hold more than 100ms for lctl
+            cap (tap-hold 100 100 caps lctl))
+        '';
+        description = mdDoc ''
+          Configuration other than `defcfg`.
+
+          See [example config files](https://github.com/jtroo/kanata)
+          for more information.
+        '';
+      };
+      extraDefCfg = mkOption {
+        type = types.lines;
+        default = "";
+        example = "danger-enable-cmd yes";
+        description = mdDoc ''
+          Configuration of `defcfg` other than `linux-dev` (generated
+          from the devices option) and
+          `linux-continue-if-no-devs-found` (hardcoded to be yes).
+
+          See [example config files](https://github.com/jtroo/kanata)
+          for more information.
+        '';
+      };
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = mdDoc "Extra command line arguments passed to kanata.";
+      };
+      port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        example = 6666;
+        description = mdDoc ''
+          Port to run the TCP server on. `null` will not run the server.
+        '';
+      };
+    };
+  };
+
+  mkName = name: "kanata-${name}";
+
+  mkDevices = devices:
+    optionalString ((length devices) > 0) "linux-dev ${concatStringsSep ":" devices}";
+
+  mkConfig = name: keyboard: pkgs.writeText "${mkName name}-config.kdb" ''
+    (defcfg
+      ${keyboard.extraDefCfg}
+      ${mkDevices keyboard.devices}
+      linux-continue-if-no-devs-found yes)
+
+    ${keyboard.config}
+  '';
+
+  mkService = name: keyboard: nameValuePair (mkName name) {
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      Type = "notify";
+      ExecStart = ''
+        ${getExe cfg.package} \
+          --cfg ${mkConfig name keyboard} \
+          --symlink-path ''${RUNTIME_DIRECTORY}/${name} \
+          ${optionalString (keyboard.port != null) "--port ${toString keyboard.port}"} \
+          ${utils.escapeSystemdExecArgs keyboard.extraArgs}
+      '';
+
+      DynamicUser = true;
+      RuntimeDirectory = mkName name;
+      SupplementaryGroups = with config.users.groups; [
+        input.name
+        uinput.name
+      ];
+
+      # hardening
+      DeviceAllow = [
+        "/dev/uinput rw"
+        "char-input r"
+      ];
+      CapabilityBoundingSet = [ "" ];
+      DevicePolicy = "closed";
+      IPAddressAllow = optional (keyboard.port != null) "localhost";
+      IPAddressDeny = [ "any" ];
+      LockPersonality = true;
+      MemoryDenyWriteExecute = true;
+      PrivateNetwork = keyboard.port == null;
+      PrivateUsers = true;
+      ProcSubset = "pid";
+      ProtectClock = true;
+      ProtectControlGroups = true;
+      ProtectHome = true;
+      ProtectHostname = true;
+      ProtectKernelLogs = true;
+      ProtectKernelModules = true;
+      ProtectKernelTunables = true;
+      ProtectProc = "invisible";
+      RestrictAddressFamilies = [ "AF_UNIX" ] ++ optional (keyboard.port != null) "AF_INET";
+      RestrictNamespaces = true;
+      RestrictRealtime = true;
+      SystemCallArchitectures = [ "native" ];
+      SystemCallFilter = [
+        "@system-service"
+        "~@privileged"
+        "~@resources"
+      ];
+      UMask = "0077";
+    };
+  };
+in
+{
+  options.services.kanata = {
+    enable = mkEnableOption (mdDoc "kanata");
+    package = mkOption {
+      type = types.package;
+      default = pkgs.kanata;
+      defaultText = literalExpression "pkgs.kanata";
+      example = literalExpression "pkgs.kanata-with-cmd";
+      description = mdDoc ''
+        The kanata package to use.
+
+        ::: {.note}
+        If `danger-enable-cmd` is enabled in any of the keyboards, the
+        `kanata-with-cmd` package should be used.
+        :::
+      '';
+    };
+    keyboards = mkOption {
+      type = types.attrsOf (types.submodule keyboard);
+      default = { };
+      description = mdDoc "Keyboard configurations.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings =
+      let
+        keyboardsWithEmptyDevices = filterAttrs (name: keyboard: keyboard.devices == [ ]) cfg.keyboards;
+        existEmptyDevices = length (attrNames keyboardsWithEmptyDevices) > 0;
+        moreThanOneKeyboard = length (attrNames cfg.keyboards) > 1;
+      in
+      optional (existEmptyDevices && moreThanOneKeyboard) "One device can only be intercepted by one kanata instance.  Setting services.kanata.keyboards.${head (attrNames keyboardsWithEmptyDevices)}.devices = [ ] and using more than one services.kanata.keyboards may cause a race condition.";
+
+    hardware.uinput.enable = true;
+
+    systemd.services = mapAttrs' mkService cfg.keyboards;
+  };
+
+  meta.maintainers = with maintainers; [ linj ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/keyd.nix b/nixpkgs/nixos/modules/services/hardware/keyd.nix
new file mode 100644
index 000000000000..724e9b956847
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/keyd.nix
@@ -0,0 +1,182 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.keyd;
+
+  keyboardOptions = { ... }: {
+    options = {
+      ids = mkOption {
+        type = types.listOf types.str;
+        default = [ "*" ];
+        example = [ "*" "-0123:0456" ];
+        description = lib.mdDoc ''
+          Device identifiers, as shown by {manpage}`keyd(1)`.
+        '';
+      };
+
+      settings = mkOption {
+        type = (pkgs.formats.ini { }).type;
+        default = { };
+        example = {
+          main = {
+            capslock = "overload(control, esc)";
+            rightalt = "layer(rightalt)";
+          };
+
+          rightalt = {
+            j = "down";
+            k = "up";
+            h = "left";
+            l = "right";
+          };
+        };
+        description = lib.mdDoc ''
+          Configuration, except `ids` section, that is written to {file}`/etc/keyd/<keyboard>.conf`.
+          Appropriate names can be used to write non-alpha keys, for example "equal" instead of "=" sign (see <https://github.com/NixOS/nixpkgs/issues/236622>).
+          See <https://github.com/rvaiya/keyd> how to configure.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          [control+shift]
+          h = left
+        '';
+        description = lib.mdDoc ''
+          Extra configuration that is appended to the end of the file.
+          **Do not** write `ids` section here, use a separate option for it.
+          You can use this option to define compound layers that must always be defined after the layer they are comprised.
+        '';
+      };
+    };
+  };
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "keyd" "ids" ]
+      ''Use keyboards.<filename>.ids instead. If you don't need a multi-file configuration, just add keyboards.default before the ids. See https://github.com/NixOS/nixpkgs/pull/243271.'')
+    (mkRemovedOptionModule [ "services" "keyd" "settings" ]
+      ''Use keyboards.<filename>.settings instead. If you don't need a multi-file configuration, just add keyboards.default before the settings. See https://github.com/NixOS/nixpkgs/pull/243271.'')
+  ];
+
+  options.services.keyd = {
+    enable = mkEnableOption (lib.mdDoc "keyd, a key remapping daemon");
+
+    keyboards = mkOption {
+      type = types.attrsOf (types.submodule keyboardOptions);
+      default = { };
+      example = literalExpression ''
+        {
+          default = {
+            ids = [ "*" ];
+            settings = {
+              main = {
+                capslock = "overload(control, esc)";
+              };
+            };
+          };
+          externalKeyboard = {
+            ids = [ "1ea7:0907" ];
+            settings = {
+              main = {
+                esc = capslock;
+              };
+            };
+          };
+        }
+      '';
+      description = mdDoc ''
+        Configuration for one or more device IDs. Corresponding files in the /etc/keyd/ directory are created according to the name of the keys (like `default` or `externalKeyboard`).
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Creates separate files in the `/etc/keyd/` directory for each key in the dictionary
+    environment.etc = mapAttrs'
+      (name: options:
+        nameValuePair "keyd/${name}.conf" {
+          text = ''
+            [ids]
+            ${concatStringsSep "\n" options.ids}
+
+            ${generators.toINI {} options.settings}
+            ${options.extraConfig}
+          '';
+        })
+      cfg.keyboards;
+
+    hardware.uinput.enable = lib.mkDefault true;
+
+    systemd.services.keyd = {
+      description = "Keyd remapping daemon";
+      documentation = [ "man:keyd(1)" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      restartTriggers = mapAttrsToList
+        (name: options:
+          config.environment.etc."keyd/${name}.conf".source
+        )
+        cfg.keyboards;
+
+      # this is configurable in 2.4.2, later versions seem to remove this option.
+      # post-2.4.2 may need to set makeFlags in the derivation:
+      #
+      #     makeFlags = [ "SOCKET_PATH/run/keyd/keyd.socket" ];
+      environment.KEYD_SOCKET = "/run/keyd/keyd.sock";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.keyd}/bin/keyd";
+        Restart = "always";
+
+        # TODO investigate why it doesn't work propeprly with DynamicUser
+        # See issue: https://github.com/NixOS/nixpkgs/issues/226346
+        # DynamicUser = true;
+        SupplementaryGroups = [
+          config.users.groups.input.name
+          config.users.groups.uinput.name
+        ];
+
+        RuntimeDirectory = "keyd";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        DeviceAllow = [
+          "char-input rw"
+          "/dev/uinput rw"
+        ];
+        ProtectClock = true;
+        PrivateNetwork = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        PrivateUsers = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        RestrictNamespaces = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        LockPersonality = true;
+        ProtectProc = "invisible";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        RestrictSUIDSGID = true;
+        IPAddressDeny = [ "any" ];
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProcSubset = "pid";
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/lcd.nix b/nixpkgs/nixos/modules/services/hardware/lcd.nix
new file mode 100644
index 000000000000..8d682d137f44
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/lcd.nix
@@ -0,0 +1,168 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hardware.lcd;
+  pkg = lib.getBin pkgs.lcdproc;
+
+  serverCfg = pkgs.writeText "lcdd.conf" ''
+    [server]
+    DriverPath=${pkg}/lib/lcdproc/
+    ReportToSyslog=false
+    Bind=${cfg.serverHost}
+    Port=${toString cfg.serverPort}
+    ${cfg.server.extraConfig}
+  '';
+
+  clientCfg = pkgs.writeText "lcdproc.conf" ''
+    [lcdproc]
+    Server=${cfg.serverHost}
+    Port=${toString cfg.serverPort}
+    ReportToSyslog=false
+    ${cfg.client.extraConfig}
+  '';
+
+  serviceCfg = {
+    DynamicUser = true;
+    Restart = "on-failure";
+    Slice = "lcd.slice";
+  };
+
+in with lib; {
+
+  meta.maintainers = with maintainers; [ peterhoeg ];
+
+  options = with types; {
+    services.hardware.lcd = {
+      serverHost = mkOption {
+        type = str;
+        default = "localhost";
+        description = lib.mdDoc "Host on which LCDd is listening.";
+      };
+
+      serverPort = mkOption {
+        type = int;
+        default = 13666;
+        description = lib.mdDoc "Port on which LCDd is listening.";
+      };
+
+      server = {
+        enable = mkOption {
+          type = bool;
+          default = false;
+          description = lib.mdDoc "Enable the LCD panel server (LCDd)";
+        };
+
+        openPorts = mkOption {
+          type = bool;
+          default = false;
+          description = lib.mdDoc "Open the ports in the firewall";
+        };
+
+        usbPermissions = mkOption {
+          type = bool;
+          default = false;
+          description = lib.mdDoc ''
+            Set group-write permissions on a USB device.
+
+            A USB connected LCD panel will most likely require having its
+            permissions modified for lcdd to write to it. Enabling this option
+            sets group-write permissions on the device identified by
+            {option}`services.hardware.lcd.usbVid` and
+            {option}`services.hardware.lcd.usbPid`. In order to find the
+            values, you can run the {command}`lsusb` command. Example
+            output:
+
+            ```
+            Bus 005 Device 002: ID 0403:c630 Future Technology Devices International, Ltd lcd2usb interface
+            ```
+
+            In this case the vendor id is 0403 and the product id is c630.
+          '';
+        };
+
+        usbVid = mkOption {
+          type = str;
+          default = "";
+          description = lib.mdDoc "The vendor ID of the USB device to claim.";
+        };
+
+        usbPid = mkOption {
+          type = str;
+          default = "";
+          description = lib.mdDoc "The product ID of the USB device to claim.";
+        };
+
+        usbGroup = mkOption {
+          type = str;
+          default = "dialout";
+          description = lib.mdDoc "The group to use for settings permissions. This group must exist or you will have to create it.";
+        };
+
+        extraConfig = mkOption {
+          type = lines;
+          default = "";
+          description = lib.mdDoc "Additional configuration added verbatim to the server config.";
+        };
+      };
+
+      client = {
+        enable = mkOption {
+          type = bool;
+          default = false;
+          description = lib.mdDoc "Enable the LCD panel client (LCDproc)";
+        };
+
+        extraConfig = mkOption {
+          type = lines;
+          default = "";
+          description = lib.mdDoc "Additional configuration added verbatim to the client config.";
+        };
+
+        restartForever = mkOption {
+          type = bool;
+          default = true;
+          description = lib.mdDoc "Try restarting the client forever.";
+        };
+      };
+    };
+  };
+
+  config = mkIf (cfg.server.enable || cfg.client.enable) {
+    networking.firewall.allowedTCPPorts = mkIf (cfg.server.enable && cfg.server.openPorts) [ cfg.serverPort ];
+
+    services.udev.extraRules = mkIf (cfg.server.enable && cfg.server.usbPermissions) ''
+      ACTION=="add", SUBSYSTEMS=="usb", ATTRS{idVendor}=="${cfg.server.usbVid}", ATTRS{idProduct}=="${cfg.server.usbPid}", MODE="660", GROUP="${cfg.server.usbGroup}"
+    '';
+
+    systemd.services = {
+      lcdd = mkIf cfg.server.enable {
+        description = "LCDproc - server";
+        wantedBy = [ "lcd.target" ];
+        serviceConfig = serviceCfg // {
+          ExecStart = "${pkg}/bin/LCDd -f -c ${serverCfg}";
+          SupplementaryGroups = cfg.server.usbGroup;
+        };
+      };
+
+      lcdproc = mkIf cfg.client.enable {
+        description = "LCDproc - client";
+        after = [ "lcdd.service" ];
+        wantedBy = [ "lcd.target" ];
+        # Allow restarting for eternity
+        startLimitIntervalSec = lib.mkIf cfg.client.restartForever 0;
+        serviceConfig = serviceCfg // {
+          ExecStart = "${pkg}/bin/lcdproc -f -c ${clientCfg}";
+          # If the server is being restarted at the same time, the client will
+          # fail as it cannot connect, so space it out a bit.
+          RestartSec = "5";
+        };
+      };
+    };
+
+    systemd.targets.lcd = {
+      description = "LCD client/server";
+      after = [ "lcdd.service" "lcdproc.service" ];
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/lirc.nix b/nixpkgs/nixos/modules/services/hardware/lirc.nix
new file mode 100644
index 000000000000..5b1a8d10c729
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/lirc.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lirc;
+in {
+
+  ###### interface
+
+  options = {
+    services.lirc = {
+
+      enable = mkEnableOption (lib.mdDoc "LIRC daemon");
+
+      options = mkOption {
+        type = types.lines;
+        example = ''
+          [lircd]
+          nodaemon = False
+        '';
+        description = lib.mdDoc "LIRC default options described in man:lircd(8) ({file}`lirc_options.conf`)";
+      };
+
+      configs = mkOption {
+        type = types.listOf types.lines;
+        description = lib.mdDoc "Configurations for lircd to load, see man:lircd.conf(5) for details ({file}`lircd.conf`)";
+      };
+
+      extraArguments = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Extra arguments to lircd.";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # Note: LIRC executables raises a warning, if lirc_options.conf do not exists
+    environment.etc."lirc/lirc_options.conf".text = cfg.options;
+
+    passthru.lirc.socket = "/run/lirc/lircd";
+
+    environment.systemPackages = [ pkgs.lirc ];
+
+    systemd.sockets.lircd = {
+      description = "LIRC daemon socket";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = config.passthru.lirc.socket;
+        SocketUser = "lirc";
+        SocketMode = "0660";
+      };
+    };
+
+    systemd.services.lircd = let
+      configFile = pkgs.writeText "lircd.conf" (builtins.concatStringsSep "\n" cfg.configs);
+    in {
+      description = "LIRC daemon service";
+      after = [ "network.target" ];
+
+      unitConfig.Documentation = [ "man:lircd(8)" ];
+
+      serviceConfig = {
+        RuntimeDirectory = ["lirc" "lirc/lock"];
+
+        # Service runtime directory and socket share same folder.
+        # Following hacks are necessary to get everything right:
+
+        # 1. prevent socket deletion during stop and restart
+        RuntimeDirectoryPreserve = true;
+
+        # 2. fix runtime folder owner-ship, happens when socket activation
+        #    creates the folder
+        PermissionsStartOnly = true;
+        ExecStartPre = [
+          "${pkgs.coreutils}/bin/chown lirc /run/lirc/"
+        ];
+
+        ExecStart = ''
+          ${pkgs.lirc}/bin/lircd --nodaemon \
+            ${escapeShellArgs cfg.extraArguments} \
+            ${configFile}
+        '';
+        User = "lirc";
+      };
+    };
+
+    users.users.lirc = {
+      uid = config.ids.uids.lirc;
+      group = "lirc";
+      description = "LIRC user for lircd";
+    };
+
+    users.groups.lirc.gid = config.ids.gids.lirc;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/nvidia-optimus.nix b/nixpkgs/nixos/modules/services/hardware/nvidia-optimus.nix
new file mode 100644
index 000000000000..5b5273ed7823
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/nvidia-optimus.nix
@@ -0,0 +1,43 @@
+{ config, lib, ... }:
+
+let kernel = config.boot.kernelPackages; in
+
+{
+
+  ###### interface
+
+  options = {
+
+    hardware.nvidiaOptimus.disable = lib.mkOption {
+      default = false;
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Completely disable the NVIDIA graphics card and use the
+        integrated graphics processor instead.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf config.hardware.nvidiaOptimus.disable {
+    boot.blacklistedKernelModules = ["nouveau" "nvidia" "nvidiafb" "nvidia-drm"];
+    boot.kernelModules = [ "bbswitch" ];
+    boot.extraModulePackages = [ kernel.bbswitch ];
+
+    systemd.services.bbswitch = {
+      description = "Disable NVIDIA Card";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${kernel.bbswitch}/bin/discrete_vga_poweroff";
+        ExecStop = "${kernel.bbswitch}/bin/discrete_vga_poweron";
+      };
+      path = [ kernel.bbswitch ];
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/openrgb.nix b/nixpkgs/nixos/modules/services/hardware/openrgb.nix
new file mode 100644
index 000000000000..13b1d07e53b7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/openrgb.nix
@@ -0,0 +1,60 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hardware.openrgb;
+in {
+  options.services.hardware.openrgb = {
+    enable = mkEnableOption (lib.mdDoc "OpenRGB server");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openrgb;
+      defaultText = literalMD "pkgs.openrgb";
+      description = lib.mdDoc "Set version of openrgb package to use.";
+    };
+
+    motherboard = mkOption {
+      type = types.nullOr (types.enum [ "amd" "intel" ]);
+      default = if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      defaultText = literalMD ''
+        if config.hardware.cpu.intel.updateMicrocode then "intel"
+        else if config.hardware.cpu.amd.updateMicrocode then "amd"
+        else null;
+      '';
+      description = lib.mdDoc "CPU family of motherboard. Allows for addition motherboard i2c support.";
+    };
+
+    server.port = mkOption {
+      type = types.port;
+      default = 6742;
+      description = lib.mdDoc "Set server port of openrgb.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    services.udev.packages = [ cfg.package ];
+
+    boot.kernelModules = [ "i2c-dev" ]
+     ++ lib.optionals (cfg.motherboard == "amd") [ "i2c-piix4" ]
+     ++ lib.optionals (cfg.motherboard == "intel") [ "i2c-i801" ];
+
+    systemd.services.openrgb = {
+      description = "OpenRGB server daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        StateDirectory = "OpenRGB";
+        WorkingDirectory = "/var/lib/OpenRGB";
+        ExecStart = "${cfg.package}/bin/openrgb --server --server-port ${toString cfg.server.port}";
+        Restart = "always";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ jonringer ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/pcscd.nix b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
new file mode 100644
index 000000000000..a9e4998efe37
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/pcscd.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfgFile = pkgs.writeText "reader.conf" config.services.pcscd.readerConfig;
+
+  package = if config.security.polkit.enable
+              then pkgs.pcscliteWithPolkit
+              else pkgs.pcsclite;
+
+  pluginEnv = pkgs.buildEnv {
+    name = "pcscd-plugins";
+    paths = map (p: "${p}/pcsc/drivers") config.services.pcscd.plugins;
+  };
+
+in
+{
+
+  ###### interface
+
+  options.services.pcscd = {
+    enable = mkEnableOption (lib.mdDoc "PCSC-Lite daemon");
+
+    plugins = mkOption {
+      type = types.listOf types.package;
+      defaultText = literalExpression "[ pkgs.ccid ]";
+      example = literalExpression "[ pkgs.pcsc-cyberjack ]";
+      description = lib.mdDoc "Plugin packages to be used for PCSC-Lite.";
+    };
+
+    readerConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        FRIENDLYNAME      "Some serial reader"
+        DEVICENAME        /dev/ttyS0
+        LIBPATH           /path/to/serial_reader.so
+        CHANNELID         1
+      '';
+      description = lib.mdDoc ''
+        Configuration for devices that aren't hotpluggable.
+
+        See {manpage}`reader.conf(5)` for valid options.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.pcscd.enable {
+
+    environment.etc."reader.conf".source = cfgFile;
+
+    environment.systemPackages = [ package ];
+    systemd.packages = [ (getBin package) ];
+
+    services.pcscd.plugins = [ pkgs.ccid ];
+
+    systemd.sockets.pcscd.wantedBy = [ "sockets.target" ];
+
+    systemd.services.pcscd = {
+      environment.PCSCLITE_HP_DROPDIR = pluginEnv;
+      restartTriggers = [ "/etc/reader.conf" ];
+
+      # If the cfgFile is empty and not specified (in which case the default
+      # /etc/reader.conf is assumed), pcscd will happily start going through the
+      # entire confdir (/etc in our case) looking for a config file and try to
+      # parse everything it finds. Doesn't take a lot of imagination to see how
+      # well that works. It really shouldn't do that to begin with, but to work
+      # around it, we force the path to the cfgFile.
+      #
+      # https://github.com/NixOS/nixpkgs/issues/121088
+      serviceConfig.ExecStart = [ "" "${getBin package}/bin/pcscd -f -x -c ${cfgFile}" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/pommed.nix b/nixpkgs/nixos/modules/services/hardware/pommed.nix
new file mode 100644
index 000000000000..a71004c1767c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/pommed.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.hardware.pommed;
+    defaultConf = "${pkgs.pommed_light}/etc/pommed.conf.mactel";
+in {
+
+  options = {
+
+    services.hardware.pommed = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use the pommed tool to handle Apple laptop
+          keyboard hotkeys.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the {file}`pommed.conf` file. Leave
+          to null to use the default config file
+          ({file}`/etc/pommed.conf.mactel`). See the
+          files {file}`/etc/pommed.conf.mactel` and
+          {file}`/etc/pommed.conf.pmac` for examples to
+          build on.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.polkit pkgs.pommed_light ];
+
+    environment.etc."pommed.conf".source =
+      if cfg.configFile == null then defaultConf else cfg.configFile;
+
+    systemd.services.pommed = {
+      description = "Pommed Apple Hotkeys Daemon";
+      wantedBy = [ "multi-user.target" ];
+      script = "${pkgs.pommed_light}/bin/pommed -f";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/power-profiles-daemon.nix b/nixpkgs/nixos/modules/services/hardware/power-profiles-daemon.nix
new file mode 100644
index 000000000000..101da01b4a71
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/power-profiles-daemon.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.power-profiles-daemon;
+  package = pkgs.power-profiles-daemon;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.power-profiles-daemon = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable power-profiles-daemon, a DBus daemon that allows
+          changing system behavior based upon user-selected power profiles.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.services.tlp.enable;
+        message = ''
+          You have set services.power-profiles-daemon.enable = true;
+          which conflicts with services.tlp.enable = true;
+        '';
+      }
+    ];
+
+    environment.systemPackages = [ package ];
+
+    services.dbus.packages = [ package ];
+
+    services.udev.packages = [ package ];
+
+    systemd.packages = [ package ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/rasdaemon.nix b/nixpkgs/nixos/modules/services/hardware/rasdaemon.nix
new file mode 100644
index 000000000000..a1334684b7d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/rasdaemon.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.hardware.rasdaemon;
+
+in
+{
+  options.hardware.rasdaemon = {
+
+    enable = mkEnableOption (lib.mdDoc "RAS logging daemon");
+
+    record = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "record events via sqlite3, required for ras-mc-ctl";
+    };
+
+    mainboard = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Custom mainboard description, see {manpage}`ras-mc-ctl(8)` for more details.";
+      example = ''
+        vendor = ASRock
+        model = B450M Pro4
+
+        # it should default to such values from
+        # /sys/class/dmi/id/board_[vendor|name]
+        # alternatively one can supply a script
+        # that returns the same format as above
+
+        script = <path to script>
+      '';
+    };
+
+    # TODO, accept `rasdaemon.labels = " ";` or `rasdaemon.labels = { dell = " "; asrock = " "; };'
+
+    labels = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Additional memory module label descriptions to be placed in /etc/ras/dimm_labels.d/labels";
+      example = ''
+        # vendor and model may be shown by 'ras-mc-ctl --mainboard'
+        vendor: ASRock
+          product: To Be Filled By O.E.M.
+          model: B450M Pro4
+            # these labels are names for the motherboard slots
+            # the numbers may be shown by `ras-mc-ctl --error-count`
+            # they are mc:csrow:channel
+            DDR4_A1: 0.2.0;  DDR4_B1: 0.2.1;
+            DDR4_A2: 0.3.0;  DDR4_B2: 0.3.1;
+      '';
+    };
+
+    config = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        rasdaemon configuration, currently only used for CE PFA
+        for details, read rasdaemon.outPath/etc/sysconfig/rasdaemon's comments
+      '';
+      example = ''
+        # defaults from included config
+        PAGE_CE_REFRESH_CYCLE="24h"
+        PAGE_CE_THRESHOLD="50"
+        PAGE_CE_ACTION="soft"
+      '';
+    };
+
+    extraModules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc "extra kernel modules to load";
+      example = [ "i7core_edac" ];
+    };
+
+    testing = mkEnableOption (lib.mdDoc "error injection infrastructure");
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc = {
+      "ras/mainboard" = {
+        enable = cfg.mainboard != "";
+        text = cfg.mainboard;
+      };
+    # TODO, handle multiple cfg.labels.brand = " ";
+      "ras/dimm_labels.d/labels" = {
+        enable = cfg.labels != "";
+        text = cfg.labels;
+      };
+      "sysconfig/rasdaemon" = {
+        enable = cfg.config != "";
+        text = cfg.config;
+      };
+    };
+    environment.systemPackages = [ pkgs.rasdaemon ]
+      ++ optionals (cfg.testing) (with pkgs.error-inject; [
+        edac-inject
+        mce-inject
+        aer-inject
+      ]);
+
+    boot.initrd.kernelModules = cfg.extraModules
+      ++ optionals (cfg.testing) [
+        # edac_core and amd64_edac should get loaded automatically
+        # i7core_edac may not be, and may not be required, but should load successfully
+        "edac_core"
+        "amd64_edac"
+        "i7core_edac"
+        "mce-inject"
+        "aer-inject"
+      ];
+
+    boot.kernelPatches = optionals (cfg.testing) [{
+      name = "rasdaemon-tests";
+      patch = null;
+      extraConfig = ''
+        EDAC_DEBUG y
+        X86_MCE_INJECT y
+
+        PCIEPORTBUS y
+        PCIEAER y
+        PCIEAER_INJECT y
+      '';
+    }];
+
+    # i tried to set up a group for this
+    # but rasdaemon needs higher permissions?
+    # `rasdaemon: Can't locate a mounted debugfs`
+
+    # most of this taken from src/misc/
+    systemd.services = {
+      rasdaemon = {
+        description = "the RAS logging daemon";
+        documentation = [ "man:rasdaemon(1)" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          StateDirectory = optionalString (cfg.record) "rasdaemon";
+
+          ExecStart = "${pkgs.rasdaemon}/bin/rasdaemon --foreground"
+            + optionalString (cfg.record) " --record";
+          ExecStop = "${pkgs.rasdaemon}/bin/rasdaemon --disable";
+          Restart = "on-abort";
+
+          # src/misc/rasdaemon.service.in shows this:
+          # ExecStartPost = ${pkgs.rasdaemon}/bin/rasdaemon --enable
+          # but that results in unpredictable existence of the database
+          # and everything seems to be enabled without this...
+        };
+      };
+      ras-mc-ctl = mkIf (cfg.labels != "") {
+        description = "register DIMM labels on startup";
+        documentation = [ "man:ras-mc-ctl(8)" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkgs.rasdaemon}/bin/ras-mc-ctl --register-labels";
+          RemainAfterExit = true;
+        };
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/ratbagd.nix b/nixpkgs/nixos/modules/services/hardware/ratbagd.nix
new file mode 100644
index 000000000000..c939d5e40a24
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/ratbagd.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ratbagd;
+in
+{
+  ###### interface
+
+  options = {
+    services.ratbagd = {
+      enable = mkEnableOption (lib.mdDoc "ratbagd for configuring gaming mice");
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    # Give users access to the "ratbagctl" tool
+    environment.systemPackages = [ pkgs.libratbag ];
+
+    services.dbus.packages = [ pkgs.libratbag ];
+
+    systemd.packages = [ pkgs.libratbag ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane.nix b/nixpkgs/nixos/modules/services/hardware/sane.nix
new file mode 100644
index 000000000000..2cac2e8e8bb4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane.nix
@@ -0,0 +1,211 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  pkg = pkgs.sane-backends.override {
+    scanSnapDriversUnfree = config.hardware.sane.drivers.scanSnap.enable;
+    scanSnapDriversPackage = config.hardware.sane.drivers.scanSnap.package;
+  };
+
+  sanedConf = pkgs.writeTextFile {
+    name = "saned.conf";
+    destination = "/etc/sane.d/saned.conf";
+    text = ''
+      localhost
+      ${config.services.saned.extraConfig}
+    '';
+  };
+
+  netConf = pkgs.writeTextFile {
+    name = "net.conf";
+    destination = "/etc/sane.d/net.conf";
+    text = ''
+      ${lib.optionalString config.services.saned.enable "localhost"}
+      ${config.hardware.sane.netConf}
+    '';
+  };
+
+  env = {
+    SANE_CONFIG_DIR = "/etc/sane-config";
+    LD_LIBRARY_PATH = [ "/etc/sane-libs" ];
+  };
+
+  backends = [ pkg netConf ] ++ optional config.services.saned.enable sanedConf ++ config.hardware.sane.extraBackends;
+  saneConfig = pkgs.mkSaneConfig { paths = backends; inherit (config.hardware.sane) disabledDefaultBackends; };
+
+  enabled = config.hardware.sane.enable || config.services.saned.enable;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    hardware.sane.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable support for SANE scanners.
+
+        ::: {.note}
+        Users in the "scanner" group will gain access to the scanner, or the "lp" group if it's also a printer.
+        :::
+      '';
+    };
+
+    hardware.sane.snapshot = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use a development snapshot of SANE scanner drivers.";
+    };
+
+    hardware.sane.extraBackends = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      description = lib.mdDoc ''
+        Packages providing extra SANE backends to enable.
+
+        ::: {.note}
+        The example contains the package for HP scanners, and the package for
+        Apple AirScan and Microsoft WSD support (supports many
+        vendors/devices).
+        :::
+      '';
+      example = literalExpression "[ pkgs.hplipWithPlugin pkgs.sane-airscan ]";
+    };
+
+    hardware.sane.disabledDefaultBackends = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "v4l" ];
+      description = lib.mdDoc ''
+        Names of backends which are enabled by default but should be disabled.
+        See `$SANE_CONFIG_DIR/dll.conf` for the list of possible names.
+      '';
+    };
+
+    hardware.sane.configDir = mkOption {
+      type = types.str;
+      internal = true;
+      description = lib.mdDoc "The value of SANE_CONFIG_DIR.";
+    };
+
+    hardware.sane.netConf = mkOption {
+      type = types.lines;
+      default = "";
+      example = "192.168.0.16";
+      description = lib.mdDoc ''
+        Network hosts that should be probed for remote scanners.
+      '';
+    };
+
+    hardware.sane.drivers.scanSnap.enable = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to enable drivers for the Fujitsu ScanSnap scanners.
+
+        The driver files are unfree and extracted from the Windows driver image.
+      '';
+    };
+
+    hardware.sane.drivers.scanSnap.package = mkOption {
+      type = types.package;
+      default = pkgs.sane-drivers.epjitsu;
+      defaultText = literalExpression "pkgs.sane-drivers.epjitsu";
+      description = lib.mdDoc ''
+        Epjitsu driver package to use. Useful if you want to extract the driver files yourself.
+
+        The process is described in the `/etc/sane.d/epjitsu.conf` file in
+        the `sane-backends` package.
+      '';
+    };
+
+    hardware.sane.openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports needed for discovery of scanners on the local network, e.g.
+        needed for Canon scanners (BJNP protocol).
+      '';
+    };
+
+    services.saned.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable saned network daemon for remote connection to scanners.
+
+        saned would be run from `scanner` user; to allow
+        access to hardware that doesn't have `scanner` group
+        you should add needed groups to this user.
+      '';
+    };
+
+    services.saned.extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = "192.168.0.0/24";
+      description = lib.mdDoc ''
+        Extra saned configuration lines.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf enabled {
+      hardware.sane.configDir = mkDefault "${saneConfig}/etc/sane.d";
+
+      environment.systemPackages = backends;
+      environment.sessionVariables = env;
+      environment.etc."sane-config".source = config.hardware.sane.configDir;
+      environment.etc."sane-libs".source = "${saneConfig}/lib/sane";
+      services.udev.packages = backends;
+
+      users.groups.scanner.gid = config.ids.gids.scanner;
+      networking.firewall.allowedUDPPorts = mkIf config.hardware.sane.openFirewall [ 8612 ];
+    })
+
+    (mkIf config.services.saned.enable {
+      networking.firewall.connectionTrackingModules = [ "sane" ];
+
+      systemd.services."saned@" = {
+        description = "Scanner Service";
+        environment = mapAttrs (name: val: toString val) env;
+        serviceConfig = {
+          User = "scanner";
+          Group = "scanner";
+          ExecStart = "${pkg}/bin/saned";
+        };
+      };
+
+      systemd.sockets.saned = {
+        description = "saned incoming socket";
+        wantedBy = [ "sockets.target" ];
+        listenStreams = [ "0.0.0.0:6566" "[::]:6566" ];
+        socketConfig = {
+          # saned needs to distinguish between IPv4 and IPv6 to open matching data sockets.
+          BindIPv6Only = "ipv6-only";
+          Accept = true;
+          MaxConnections = 64;
+        };
+      };
+
+      users.users.scanner = {
+        uid = config.ids.uids.scanner;
+        group = "scanner";
+        extraGroups = [ "lp" ] ++ optionals config.services.avahi.enable [ "avahi" ];
+      };
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix
new file mode 100644
index 000000000000..e737a4ce20de
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.sane.brscan4;
+
+  netDeviceList = attrValues cfg.netDevices;
+
+  etcFiles = pkgs.callPackage ./brscan4_etc_files.nix { netDevices = netDeviceList; };
+
+  netDeviceOpts = { name, ... }: {
+
+    options = {
+
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The friendly name you give to the network device. If undefined,
+          the name of attribute will be used.
+        '';
+
+        example = "office1";
+      };
+
+      model = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The model of the network device.
+        '';
+
+        example = "MFC-7860DW";
+      };
+
+      ip = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          The ip address of the device. If undefined, you will have to
+          provide a nodename.
+        '';
+
+        example = "192.168.1.2";
+      };
+
+      nodename = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          The node name of the device. If undefined, you will have to
+          provide an ip.
+        '';
+
+        example = "BRW0080927AFBCE";
+      };
+
+    };
+
+
+    config =
+      { name = mkDefault name;
+      };
+  };
+
+in
+
+{
+  options = {
+
+    hardware.sane.brscan4.enable =
+      mkEnableOption (lib.mdDoc "Brother's brscan4 scan backend") // {
+      description = lib.mdDoc ''
+        When enabled, will automatically register the "brscan4" sane
+        backend and bring configuration files to their expected location.
+      '';
+    };
+
+    hardware.sane.brscan4.netDevices = mkOption {
+      default = {};
+      example =
+        { office1 = { model = "MFC-7860DW"; ip = "192.168.1.2"; };
+          office2 = { model = "MFC-7860DW"; nodename = "BRW0080927AFBCE"; };
+        };
+      type = with types; attrsOf (submodule netDeviceOpts);
+      description = lib.mdDoc ''
+        The list of network devices that will be registered against the brscan4
+        sane backend.
+      '';
+    };
+  };
+
+  config = mkIf (config.hardware.sane.enable && cfg.enable) {
+
+    hardware.sane.extraBackends = [
+      pkgs.brscan4
+    ];
+
+    environment.etc."opt/brother/scanner/brscan4" =
+      { source = "${etcFiles}/etc/opt/brother/scanner/brscan4"; };
+
+    assertions = [
+      { assertion = all (x: !(null != x.ip && null != x.nodename)) netDeviceList;
+        message = ''
+          When describing a network device as part of the attribute list
+          `hardware.sane.brscan4.netDevices`, only one of its `ip` or `nodename`
+          attribute should be specified, not both!
+        '';
+      }
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4_etc_files.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4_etc_files.nix
new file mode 100644
index 000000000000..f76ab701c5b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan4_etc_files.nix
@@ -0,0 +1,69 @@
+{ stdenv, lib, brscan4, netDevices ? [] }:
+
+/*
+
+Testing
+-------
+
+No net devices:
+
+~~~
+nix-shell -E 'with import <nixpkgs> { }; brscan4-etc-files'
+~~~
+
+Two net devices:
+
+~~~
+nix-shell -E 'with import <nixpkgs> { }; brscan4-etc-files.override{netDevices=[{name="a"; model="MFC-7860DW"; nodename="BRW0080927AFBCE";} {name="b"; model="MFC-7860DW"; ip="192.168.1.2";}];}'
+~~~
+
+*/
+
+let
+
+  addNetDev = nd: ''
+    brsaneconfig4 -a \
+    name="${nd.name}" \
+    model="${nd.model}" \
+    ${if (lib.hasAttr "nodename" nd && nd.nodename != null) then
+      ''nodename="${nd.nodename}"'' else
+      ''ip="${nd.ip}"''}'';
+  addAllNetDev = xs: lib.concatStringsSep "\n" (map addNetDev xs);
+in
+
+stdenv.mkDerivation {
+
+  pname = "brscan4-etc-files";
+  version = "0.4.3-3";
+  src = "${brscan4}/opt/brother/scanner/brscan4";
+
+  nativeBuildInputs = [ brscan4 ];
+
+  dontConfigure = true;
+
+  buildPhase = ''
+    TARGET_DIR="$out/etc/opt/brother/scanner/brscan4"
+    mkdir -p "$TARGET_DIR"
+    cp -rp "./models4" "$TARGET_DIR"
+    cp -rp "./Brsane4.ini" "$TARGET_DIR"
+    cp -rp "./brsanenetdevice4.cfg" "$TARGET_DIR"
+
+    export BRSANENETDEVICE4_CFG_FILENAME="$TARGET_DIR/brsanenetdevice4.cfg"
+
+    printf '${addAllNetDev netDevices}\n'
+
+    ${addAllNetDev netDevices}
+  '';
+
+  dontInstall = true;
+  dontStrip = true;
+  dontPatchELF = true;
+
+  meta = with lib; {
+    description = "Brother brscan4 sane backend driver etc files";
+    homepage = "http://www.brother.com";
+    platforms = platforms.linux;
+    license = licenses.unfree;
+    maintainers = with maintainers; [ jraygauthier ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
new file mode 100644
index 000000000000..d29e0f542f55
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5.nix
@@ -0,0 +1,110 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.sane.brscan5;
+
+  netDeviceList = attrValues cfg.netDevices;
+
+  etcFiles = pkgs.callPackage ./brscan5_etc_files.nix { netDevices = netDeviceList; };
+
+  netDeviceOpts = { name, ... }: {
+
+    options = {
+
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The friendly name you give to the network device. If undefined,
+          the name of attribute will be used.
+        '';
+
+        example = "office1";
+      };
+
+      model = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The model of the network device.
+        '';
+
+        example = "ADS-1200";
+      };
+
+      ip = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          The ip address of the device. If undefined, you will have to
+          provide a nodename.
+        '';
+
+        example = "192.168.1.2";
+      };
+
+      nodename = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          The node name of the device. If undefined, you will have to
+          provide an ip.
+        '';
+
+        example = "BRW0080927AFBCE";
+      };
+
+    };
+
+
+    config =
+      { name = mkDefault name;
+      };
+  };
+
+in
+
+{
+  options = {
+
+    hardware.sane.brscan5.enable =
+      mkEnableOption (lib.mdDoc "the Brother brscan5 sane backend");
+
+    hardware.sane.brscan5.netDevices = mkOption {
+      default = {};
+      example =
+        { office1 = { model = "MFC-7860DW"; ip = "192.168.1.2"; };
+          office2 = { model = "MFC-7860DW"; nodename = "BRW0080927AFBCE"; };
+        };
+      type = with types; attrsOf (submodule netDeviceOpts);
+      description = lib.mdDoc ''
+        The list of network devices that will be registered against the brscan5
+        sane backend.
+      '';
+    };
+  };
+
+  config = mkIf (config.hardware.sane.enable && cfg.enable) {
+
+    hardware.sane.extraBackends = [
+      pkgs.brscan5
+    ];
+
+    environment.etc."opt/brother/scanner/brscan5" =
+      { source = "${etcFiles}/etc/opt/brother/scanner/brscan5"; };
+    environment.etc."opt/brother/scanner/models" =
+      { source = "${etcFiles}/etc/opt/brother/scanner/brscan5/models"; };
+    environment.etc."sane.d/dll.d/brother5.conf".source = "${pkgs.brscan5}/etc/sane.d/dll.d/brother.conf";
+
+    assertions = [
+      { assertion = all (x: !(null != x.ip && null != x.nodename)) netDeviceList;
+        message = ''
+          When describing a network device as part of the attribute list
+          `hardware.sane.brscan5.netDevices`, only one of its `ip` or `nodename`
+          attribute should be specified, not both!
+        '';
+      }
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix
new file mode 100644
index 000000000000..432f0316a4fa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix
@@ -0,0 +1,77 @@
+{ stdenv, lib, brscan5, netDevices ? [] }:
+
+/*
+
+Testing
+-------
+From nixpkgs repo
+
+No net devices:
+
+~~~
+nix-build -E 'let pkgs = import ./. {};
+                  brscan5-etc-files = pkgs.callPackage (import ./nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix) {};
+              in brscan5-etc-files'
+~~~
+
+Two net devices:
+
+~~~
+nix-build -E 'let pkgs = import ./. {};
+                  brscan5-etc-files = pkgs.callPackage (import ./nixos/modules/services/hardware/sane_extra_backends/brscan5_etc_files.nix) {};
+              in brscan5-etc-files.override {
+                   netDevices = [
+                     {name="a"; model="ADS-1200"; nodename="BRW0080927AFBCE";}
+                     {name="b"; model="ADS-1200"; ip="192.168.1.2";}
+                   ];
+              }'
+~~~
+
+*/
+
+let
+
+  addNetDev = nd: ''
+    brsaneconfig5 -a \
+    name="${nd.name}" \
+    model="${nd.model}" \
+    ${if (lib.hasAttr "nodename" nd && nd.nodename != null) then
+      ''nodename="${nd.nodename}"'' else
+      ''ip="${nd.ip}"''}'';
+  addAllNetDev = xs: lib.concatStringsSep "\n" (map addNetDev xs);
+in
+
+stdenv.mkDerivation {
+
+  name = "brscan5-etc-files";
+  version = "1.2.6-0";
+  src = "${brscan5}/opt/brother/scanner/brscan5";
+
+  nativeBuildInputs = [ brscan5 ];
+
+  dontConfigure = true;
+
+  buildPhase = ''
+    TARGET_DIR="$out/etc/opt/brother/scanner/brscan5"
+    mkdir -p "$TARGET_DIR"
+    cp -rp "./models" "$TARGET_DIR"
+    cp -rp "./brscan5.ini" "$TARGET_DIR"
+    cp -rp "./brsanenetdevice.cfg" "$TARGET_DIR"
+
+    export NIX_REDIRECTS="/etc/opt/brother/scanner/brscan5/=$TARGET_DIR/"
+
+    printf '${addAllNetDev netDevices}\n'
+
+    ${addAllNetDev netDevices}
+  '';
+
+  dontInstall = true;
+
+  meta = with lib; {
+    description = "Brother brscan5 sane backend driver etc files";
+    homepage = "https://www.brother.com";
+    platforms = platforms.linux;
+    license = licenses.unfree;
+    maintainers = with maintainers; [ mattchrist ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/dsseries.nix b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/dsseries.nix
new file mode 100644
index 000000000000..5b05694abc01
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/sane_extra_backends/dsseries.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+
+    hardware.sane.dsseries.enable =
+      mkEnableOption (lib.mdDoc "Brother DSSeries scan backend") // {
+      description = lib.mdDoc ''
+        When enabled, will automatically register the "dsseries" SANE backend.
+
+        This supports the Brother DSmobile scanner series, including the
+        DS-620, DS-720D, DS-820W, and DS-920DW scanners.
+      '';
+    };
+  };
+
+  config = mkIf (config.hardware.sane.enable && config.hardware.sane.dsseries.enable) {
+
+    hardware.sane.extraBackends = [ pkgs.dsseries ];
+    services.udev.packages = [ pkgs.dsseries ];
+    boot.kernelModules = [ "sg" ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/spacenavd.nix b/nixpkgs/nixos/modules/services/hardware/spacenavd.nix
new file mode 100644
index 000000000000..36f132439377
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/spacenavd.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.hardware.spacenavd;
+
+in {
+
+  options = {
+    hardware.spacenavd = {
+      enable = mkEnableOption (lib.mdDoc "spacenavd to support 3DConnexion devices");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.spacenavd = {
+      description = "Daemon for the Spacenavigator 6DOF mice by 3Dconnexion";
+      wantedBy = [ "graphical.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.spacenavd}/bin/spacenavd -d -l syslog";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/supergfxd.nix b/nixpkgs/nixos/modules/services/hardware/supergfxd.nix
new file mode 100644
index 000000000000..f7af993d7238
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/supergfxd.nix
@@ -0,0 +1,42 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.supergfxd;
+  json = pkgs.formats.json { };
+in
+{
+  options = {
+    services.supergfxd = {
+      enable = lib.mkEnableOption (lib.mdDoc "the supergfxd service");
+
+      settings = lib.mkOption {
+        type = lib.types.nullOr json.type;
+        default = null;
+        description = lib.mdDoc ''
+          The content of /etc/supergfxd.conf.
+          See https://gitlab.com/asus-linux/supergfxctl/#config-options-etcsupergfxdconf.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.supergfxctl ];
+
+    environment.etc."supergfxd.conf" = lib.mkIf (cfg.settings != null) {
+      source = json.generate "supergfxd.conf" cfg.settings;
+      mode = "0644";
+    };
+
+    services.dbus.enable = true;
+
+    systemd.packages = [ pkgs.supergfxctl ];
+    systemd.services.supergfxd.wantedBy = [ "multi-user.target" ];
+    systemd.services.supergfxd.path = [ pkgs.kmod pkgs.pciutils ];
+
+    services.dbus.packages = [ pkgs.supergfxctl ];
+    services.udev.packages = [ pkgs.supergfxctl ];
+  };
+
+  meta.maintainers = pkgs.supergfxctl.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/tcsd.nix b/nixpkgs/nixos/modules/services/hardware/tcsd.nix
new file mode 100644
index 000000000000..f22924d410d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/tcsd.nix
@@ -0,0 +1,162 @@
+# tcsd daemon.
+
+{ config, options, pkgs, lib, ... }:
+
+with lib;
+let
+
+  cfg = config.services.tcsd;
+  opt = options.services.tcsd;
+
+  tcsdConf = pkgs.writeText "tcsd.conf" ''
+    port = 30003
+    num_threads = 10
+    system_ps_file = ${cfg.stateDir}/system.data
+    # This is the log of each individual measurement done by the system.
+    # By re-calculating the PCR registers based on this information, even
+    # finer details about the measured environment can be inferred than
+    # what is available directly from the PCR registers.
+    firmware_log_file = /sys/kernel/security/tpm0/binary_bios_measurements
+    kernel_log_file = /sys/kernel/security/ima/binary_runtime_measurements
+    firmware_pcrs = ${cfg.firmwarePCRs}
+    kernel_pcrs = ${cfg.kernelPCRs}
+    platform_cred = ${cfg.platformCred}
+    conformance_cred = ${cfg.conformanceCred}
+    endorsement_cred = ${cfg.endorsementCred}
+    #remote_ops = create_key,random
+    #host_platform_class = server_12
+    #all_platform_classes = pc_11,pc_12,mobile_12
+  '';
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.tcsd = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable tcsd, a Trusted Computing management service
+          that provides TCG Software Stack (TSS).  The tcsd daemon is
+          the only portal to the Trusted Platform Module (TPM), a hardware
+          chip on the motherboard.
+        '';
+      };
+
+      user = mkOption {
+        default = "tss";
+        type = types.str;
+        description = lib.mdDoc "User account under which tcsd runs.";
+      };
+
+      group = mkOption {
+        default = "tss";
+        type = types.str;
+        description = lib.mdDoc "Group account under which tcsd runs.";
+      };
+
+      stateDir = mkOption {
+        default = "/var/lib/tpm";
+        type = types.path;
+        description = lib.mdDoc ''
+          The location of the system persistent storage file.
+          The system persistent storage file holds keys and data across
+          restarts of the TCSD and system reboots.
+        '';
+      };
+
+      firmwarePCRs = mkOption {
+        default = "0,1,2,3,4,5,6,7";
+        type = types.str;
+        description = lib.mdDoc "PCR indices used in the TPM for firmware measurements.";
+      };
+
+      kernelPCRs = mkOption {
+        default = "8,9,10,11,12";
+        type = types.str;
+        description = lib.mdDoc "PCR indices used in the TPM for kernel measurements.";
+      };
+
+      platformCred = mkOption {
+        default = "${cfg.stateDir}/platform.cert";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/platform.cert"'';
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to the platform credential for your TPM. Your TPM
+          manufacturer may have provided you with a set of credentials
+          (certificates) that should be used when creating identities
+          using your TPM. When a user of your TPM makes an identity,
+          this credential will be encrypted as part of that process.
+          See the 1.1b TPM Main specification section 9.3 for information
+          on this process. '';
+      };
+
+      conformanceCred = mkOption {
+        default = "${cfg.stateDir}/conformance.cert";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/conformance.cert"'';
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to the conformance credential for your TPM.
+          See also the platformCred option'';
+      };
+
+      endorsementCred = mkOption {
+        default = "${cfg.stateDir}/endorsement.cert";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/endorsement.cert"'';
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to the endorsement credential for your TPM.
+          See also the platformCred option'';
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.trousers ];
+
+    services.udev.extraRules = ''
+      # Give tcsd ownership of all TPM devices
+      KERNEL=="tpm[0-9]*", MODE="0660", OWNER="${cfg.user}", GROUP="${cfg.group}"
+      # Tag TPM devices to create a .device unit for tcsd to depend on
+      ACTION=="add", KERNEL=="tpm[0-9]*", TAG+="systemd"
+    '';
+
+    systemd.tmpfiles.rules = [
+      # Initialise the state directory
+      "d ${cfg.stateDir} 0770 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.tcsd = {
+      description = "Manager for Trusted Computing resources";
+      documentation = [ "man:tcsd(8)" ];
+
+      requires = [ "dev-tpm0.device" ];
+      after = [ "dev-tpm0.device" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.trousers}/sbin/tcsd -f -c ${tcsdConf}";
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "tss") {
+      tss = {
+        group = "tss";
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "tss") { tss = {}; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/thermald.nix b/nixpkgs/nixos/modules/services/hardware/thermald.nix
new file mode 100644
index 000000000000..6b694ede5885
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/thermald.nix
@@ -0,0 +1,57 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.thermald;
+in
+{
+  ###### interface
+  options = {
+    services.thermald = {
+      enable = mkEnableOption (lib.mdDoc "thermald, the temperature management daemon");
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable debug logging.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "the thermald manual configuration file.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.thermald;
+        defaultText = literalExpression "pkgs.thermald";
+        description = lib.mdDoc "Which thermald package to use.";
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.dbus.packages = [ cfg.package ];
+
+    systemd.services.thermald = {
+      description = "Thermal Daemon Service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        PrivateNetwork = true;
+        ExecStart = ''
+          ${cfg.package}/sbin/thermald \
+            --no-daemon \
+            ${optionalString cfg.debug "--loglevel=debug"} \
+            ${optionalString (cfg.configFile != null) "--config-file ${cfg.configFile}"} \
+            --dbus-enable \
+            --adaptive
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/thinkfan.nix b/nixpkgs/nixos/modules/services/hardware/thinkfan.nix
new file mode 100644
index 000000000000..8fa7b456f20e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/thinkfan.nix
@@ -0,0 +1,230 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.thinkfan;
+  settingsFormat = pkgs.formats.yaml { };
+  configFile = settingsFormat.generate "thinkfan.yaml" cfg.settings;
+  thinkfan = pkgs.thinkfan.override { inherit (cfg) smartSupport; };
+
+  # fan-speed and temperature levels
+  levelType = with types;
+    let
+      tuple = ts: mkOptionType {
+        name = "tuple";
+        merge = mergeOneOption;
+        check = xs: all id (zipListsWith (t: x: t.check x) ts xs);
+        description = "tuple of" + concatMapStrings (t: " (${t.description})") ts;
+      };
+      level = ints.unsigned;
+      special = enum [ "level auto" "level full-speed" "level disengaged" ];
+    in
+      tuple [ (either level special) level level ];
+
+  # sensor or fan config
+  sensorType = name: types.submodule {
+    freeformType = types.attrsOf settingsFormat.type;
+    options = {
+      type = mkOption {
+        type = types.enum [ "hwmon" "atasmart" "tpacpi" "nvml" ];
+        description = lib.mdDoc ''
+          The ${name} type, can be
+          `hwmon` for standard ${name}s,
+
+          `atasmart` to read the temperature via
+          S.M.A.R.T (requires smartSupport to be enabled),
+
+          `tpacpi` for the legacy thinkpac_acpi driver, or
+
+          `nvml` for the (proprietary) nVidia driver.
+        '';
+      };
+      query = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The query string used to match one or more ${name}s: can be
+          a fullpath to the temperature file (single ${name}) or a fullpath
+          to a driver directory (multiple ${name}s).
+
+          ::: {.note}
+          When multiple ${name}s match, the query can be restricted using the
+          {option}`name` or {option}`indices` options.
+          :::
+        '';
+      };
+      indices = mkOption {
+        type = with types; nullOr (listOf ints.unsigned);
+        default = null;
+        description = lib.mdDoc ''
+          A list of ${name}s to pick in case multiple ${name}s match the query.
+
+          ::: {.note}
+          Indices start from 0.
+          :::
+        '';
+      };
+    } // optionalAttrs (name == "sensor") {
+      correction = mkOption {
+        type = with types; nullOr (listOf int);
+        default = null;
+        description = lib.mdDoc ''
+          A list of values to be added to the temperature of each sensor,
+          can be used to equalize small discrepancies in temperature ratings.
+        '';
+      };
+    };
+  };
+
+  # removes NixOS special and unused attributes
+  sensorToConf = { type, query, ... }@args:
+    (filterAttrs (k: v: v != null && !(elem k ["type" "query"])) args)
+    // { "${type}" = query; };
+
+  syntaxNote = name: ''
+    ::: {.note}
+    This section slightly departs from the thinkfan.conf syntax.
+    The type and path must be specified like this:
+    ```
+      type = "tpacpi";
+      query = "/proc/acpi/ibm/${name}";
+    ```
+    instead of a single declaration like:
+    ```
+      - tpacpi: /proc/acpi/ibm/${name}
+    ```
+    :::
+  '';
+
+in {
+
+  options = {
+
+    services.thinkfan = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable thinkfan, a fan control program.
+
+          ::: {.note}
+          This module targets IBM/Lenovo thinkpads by default, for
+          other hardware you will have configure it more carefully.
+          :::
+        '';
+        relatedPackages = [ "thinkfan" ];
+      };
+
+      smartSupport = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to build thinkfan with S.M.A.R.T. support to read temperatures
+          directly from hard disks.
+        '';
+      };
+
+      sensors = mkOption {
+        type = types.listOf (sensorType "sensor");
+        default = [
+          { type = "tpacpi";
+            query = "/proc/acpi/ibm/thermal";
+          }
+        ];
+        description = lib.mdDoc ''
+          List of temperature sensors thinkfan will monitor.
+
+          ${syntaxNote "thermal"}
+        '';
+      };
+
+      fans = mkOption {
+        type = types.listOf (sensorType "fan");
+        default = [
+          { type = "tpacpi";
+            query = "/proc/acpi/ibm/fan";
+          }
+        ];
+        description = lib.mdDoc ''
+          List of fans thinkfan will control.
+
+          ${syntaxNote "fan"}
+        '';
+      };
+
+      levels = mkOption {
+        type = types.listOf levelType;
+        default = [
+          [0  0   55]
+          [1  48  60]
+          [2  50  61]
+          [3  52  63]
+          [6  56  65]
+          [7  60  85]
+          ["level auto" 80 32767]
+        ];
+        description = lib.mdDoc ''
+          [LEVEL LOW HIGH]
+
+          LEVEL is the fan level to use: it can be an integer (0-7 with thinkpad_acpi),
+          "level auto" (to keep the default firmware behavior), "level full-speed" or
+          "level disengaged" (to run the fan as fast as possible).
+          LOW is the temperature at which to step down to the previous level.
+          HIGH is the temperature at which to step up to the next level.
+          All numbers are integers.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-b" "0" ];
+        description = lib.mdDoc ''
+          A list of extra command line arguments to pass to thinkfan.
+          Check the thinkfan(1) manpage for available arguments.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.attrsOf settingsFormat.type;
+        default = { };
+        description = lib.mdDoc ''
+          Thinkfan settings. Use this option to configure thinkfan
+          settings not exposed in a NixOS option or to bypass one.
+          Before changing this, read the `thinkfan.conf(5)`
+          manpage and take a look at the example config file at
+          <https://github.com/vmatare/thinkfan/blob/master/examples/thinkfan.yaml>
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ thinkfan ];
+
+    services.thinkfan.settings = mapAttrs (k: v: mkDefault v) {
+      sensors = map sensorToConf cfg.sensors;
+      fans    = map sensorToConf cfg.fans;
+      levels  = cfg.levels;
+    };
+
+    systemd.packages = [ thinkfan ];
+
+    systemd.services = {
+      thinkfan.environment.THINKFAN_ARGS = escapeShellArgs ([ "-c" configFile ] ++ cfg.extraArgs);
+
+      # must be added manually, see issue #81138
+      thinkfan.wantedBy = [ "multi-user.target" ];
+      thinkfan-wakeup.wantedBy = [ "sleep.target" ];
+      thinkfan-sleep.wantedBy = [ "sleep.target" ];
+    };
+
+    boot.extraModprobeConfig = "options thinkpad_acpi experimental=1 fan_control=1";
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/throttled.nix b/nixpkgs/nixos/modules/services/hardware/throttled.nix
new file mode 100644
index 000000000000..0f1f00348ee8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/throttled.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.throttled;
+in {
+  options = {
+    services.throttled = {
+      enable = mkEnableOption (lib.mdDoc "fix for Intel CPU throttling");
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Alternative configuration";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ pkgs.throttled ];
+    # The upstream package has this in Install, but that's not enough, see the NixOS manual
+    systemd.services.throttled.wantedBy = [ "multi-user.target" ];
+
+    environment.etc."throttled.conf".source =
+      if cfg.extraConfig != ""
+      then pkgs.writeText "throttled.conf" cfg.extraConfig
+      else "${pkgs.throttled}/etc/throttled.conf";
+
+    hardware.cpu.x86.msr.enable = true;
+    # Kernel 5.9 spams warnings whenever userspace writes to CPU MSRs.
+    # See https://github.com/erpalma/throttled/issues/215
+    hardware.cpu.x86.msr.settings.allow-writes =
+      mkIf (versionAtLeast config.boot.kernelPackages.kernel.version "5.9") "on";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/tlp.nix b/nixpkgs/nixos/modules/services/hardware/tlp.nix
new file mode 100644
index 000000000000..0b7f98ab6a6d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/tlp.nix
@@ -0,0 +1,124 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.tlp;
+  enableRDW = config.networking.networkmanager.enable;
+  tlp = pkgs.tlp.override { inherit enableRDW; };
+  # TODO: Use this for having proper parameters in the future
+  mkTlpConfig = tlpConfig: generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {
+      mkValueString = val:
+        if isList val then "\"" + (toString val) + "\""
+        else toString val;
+    } "=";
+  } tlpConfig;
+in
+{
+  ###### interface
+  options = {
+    services.tlp = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the TLP power management daemon.";
+      };
+
+      settings = mkOption {type = with types; attrsOf (oneOf [bool int float str (listOf str)]);
+        default = {};
+        example = {
+          SATA_LINKPWR_ON_BAT = "med_power_with_dipm";
+          USB_BLACKLIST_PHONE = 1;
+        };
+        description = lib.mdDoc ''
+          Options passed to TLP. See https://linrunner.de/tlp for all supported options..
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Verbatim additional configuration variables for TLP.
+          DEPRECATED: use services.tlp.settings instead.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    hardware.cpu.x86.msr.enable = true;
+
+    warnings = optional (cfg.extraConfig != "") ''
+      Using config.services.tlp.extraConfig is deprecated and will become unsupported in a future release. Use config.services.tlp.settings instead.
+    '';
+
+    assertions = [{
+      assertion = cfg.enable -> config.powerManagement.scsiLinkPolicy == null;
+      message = ''
+        `services.tlp.enable` and `config.powerManagement.scsiLinkPolicy` cannot be set both.
+        Set `services.tlp.settings.SATA_LINKPWR_ON_AC` and `services.tlp.settings.SATA_LINKPWR_ON_BAT` instead.
+      '';
+    }];
+
+    environment.etc = {
+      "tlp.conf".text = (mkTlpConfig cfg.settings) + cfg.extraConfig;
+    } // optionalAttrs enableRDW {
+      "NetworkManager/dispatcher.d/99tlp-rdw-nm".source =
+        "${tlp}/usr/lib/NetworkManager/dispatcher.d/99tlp-rdw-nm";
+    };
+
+    environment.systemPackages = [ tlp ];
+
+
+    services.tlp.settings = let
+      cfg = config.powerManagement;
+      maybeDefault = val: lib.mkIf (val != null) (lib.mkDefault val);
+    in {
+      CPU_SCALING_GOVERNOR_ON_AC = maybeDefault cfg.cpuFreqGovernor;
+      CPU_SCALING_GOVERNOR_ON_BAT = maybeDefault cfg.cpuFreqGovernor;
+      CPU_SCALING_MIN_FREQ_ON_AC = maybeDefault cfg.cpufreq.min;
+      CPU_SCALING_MAX_FREQ_ON_AC = maybeDefault cfg.cpufreq.max;
+      CPU_SCALING_MIN_FREQ_ON_BAT = maybeDefault cfg.cpufreq.min;
+      CPU_SCALING_MAX_FREQ_ON_BAT = maybeDefault cfg.cpufreq.max;
+    };
+
+    services.udev.packages = [ tlp ];
+
+    systemd = {
+      # use native tlp instead because it can also differentiate between AC/BAT
+      services.cpufreq.enable = false;
+
+      packages = [ tlp ];
+      # XXX: These must always be disabled/masked according to [1].
+      #
+      # [1]: https://github.com/linrunner/TLP/blob/a9ada09e0821f275ce5f93dc80a4d81a7ff62ae4/tlp-stat.in#L319
+      sockets.systemd-rfkill.enable = false;
+      services.systemd-rfkill.enable = false;
+
+      services.tlp = {
+        # XXX: The service should reload whenever the configuration changes,
+        # otherwise newly set power options remain inactive until reboot (or
+        # manual unit restart.)
+        restartTriggers = [ config.environment.etc."tlp.conf".source ];
+        # XXX: When using systemd.packages (which we do above) the [Install]
+        # section of systemd units does not work (citation needed) so we manually
+        # enforce it here.
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      services.tlp-sleep = {
+        # XXX: When using systemd.packages (which we do above) the [Install]
+        # section of systemd units does not work (citation needed) so we manually
+        # enforce it here.
+        before = [ "sleep.target" ];
+        wantedBy = [ "sleep.target" ];
+        # XXX: `tlp suspend` requires /var/lib/tlp to exist in order to save
+        # some stuff in there. There is no way, that I know of, to do this in
+        # the package itself, so we do it here instead making sure the unit
+        # won't fail due to the save dir not existing.
+        serviceConfig.StateDirectory = "tlp";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/trezord.md b/nixpkgs/nixos/modules/services/hardware/trezord.md
new file mode 100644
index 000000000000..58c244a44bc1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/trezord.md
@@ -0,0 +1,17 @@
+# Trezor {#trezor}
+
+Trezor is an open-source cryptocurrency hardware wallet and security token
+allowing secure storage of private keys.
+
+It offers advanced features such U2F two-factor authorization, SSH login
+through
+[Trezor SSH agent](https://wiki.trezor.io/Apps:SSH_agent),
+[GPG](https://wiki.trezor.io/GPG) and a
+[password manager](https://wiki.trezor.io/Trezor_Password_Manager).
+For more information, guides and documentation, see <https://wiki.trezor.io>.
+
+To enable Trezor support, add the following to your {file}`configuration.nix`:
+
+    services.trezord.enable = true;
+
+This will add all necessary udev rules and start Trezor Bridge.
diff --git a/nixpkgs/nixos/modules/services/hardware/trezord.nix b/nixpkgs/nixos/modules/services/hardware/trezord.nix
new file mode 100644
index 000000000000..b2217fc97124
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/trezord.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.trezord;
+in {
+
+  ### docs
+
+  meta = {
+    doc = ./trezord.md;
+  };
+
+  ### interface
+
+  options = {
+    services.trezord = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Trezor bridge daemon, for use with Trezor hardware bitcoin wallets.
+        '';
+      };
+
+      emulator.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Trezor emulator support.
+          '';
+       };
+
+      emulator.port = mkOption {
+        type = types.port;
+        default = 21324;
+        description = lib.mdDoc ''
+          Listening port for the Trezor emulator.
+          '';
+      };
+    };
+  };
+
+  ### implementation
+
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.trezor-udev-rules ];
+
+    systemd.services.trezord = {
+      description = "Trezor Bridge";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.trezord}/bin/trezord-go ${optionalString cfg.emulator.enable "-e ${builtins.toString cfg.emulator.port}"}";
+        User = "trezord";
+      };
+    };
+
+    users.users.trezord = {
+      group = "trezord";
+      description = "Trezor bridge daemon user";
+      isSystemUser = true;
+    };
+
+    users.groups.trezord = {};
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix b/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix
new file mode 100644
index 000000000000..54eac70643ff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/triggerhappy.nix
@@ -0,0 +1,122 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.triggerhappy;
+
+  socket = "/run/thd.socket";
+
+  configFile = pkgs.writeText "triggerhappy.conf" ''
+    ${concatMapStringsSep "\n"
+      ({ keys, event, cmd, ... }:
+        ''${concatMapStringsSep "+" (x: "KEY_" + x) keys} ${toString { press = 1; hold = 2; release = 0; }.${event}} ${cmd}''
+      )
+      cfg.bindings}
+    ${cfg.extraConfig}
+  '';
+
+  bindingCfg = { ... }: {
+    options = {
+
+      keys = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of keys to match.  Key names as defined in linux/input-event-codes.h";
+      };
+
+      event = mkOption {
+        type = types.enum ["press" "hold" "release"];
+        default = "press";
+        description = lib.mdDoc "Event to match.";
+      };
+
+      cmd = mkOption {
+        type = types.str;
+        description = lib.mdDoc "What to run.";
+      };
+
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.triggerhappy = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the {command}`triggerhappy` hotkey daemon.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nobody";
+        example = "root";
+        description = lib.mdDoc ''
+          User account under which {command}`triggerhappy` runs.
+        '';
+      };
+
+      bindings = mkOption {
+        type = types.listOf (types.submodule bindingCfg);
+        default = [];
+        example = lib.literalExpression ''
+          [ { keys = ["PLAYPAUSE"];  cmd = "''${pkgs.mpc-cli}/bin/mpc -q toggle"; } ]
+        '';
+        description = lib.mdDoc ''
+          Key bindings for {command}`triggerhappy`.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Literal contents to append to the end of {command}`triggerhappy` configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.sockets.triggerhappy = {
+      description = "Triggerhappy Socket";
+      wantedBy = [ "sockets.target" ];
+      socketConfig.ListenDatagram = socket;
+    };
+
+    systemd.services.triggerhappy = {
+      wantedBy = [ "multi-user.target" ];
+      description = "Global hotkey daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.triggerhappy}/bin/thd ${optionalString (cfg.user != "root") "--user ${cfg.user}"} --socket ${socket} --triggers ${configFile} --deviceglob /dev/input/event*";
+      };
+    };
+
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "triggerhappy-udev-rules";
+      destination = "/etc/udev/rules.d/61-triggerhappy.rules";
+      text = ''
+        ACTION=="add", SUBSYSTEM=="input", KERNEL=="event[0-9]*", ATTRS{name}!="triggerhappy", \
+          RUN+="${pkgs.triggerhappy}/bin/th-cmd --socket ${socket} --passfd --udev"
+      '';
+    });
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/tuxedo-rs.nix b/nixpkgs/nixos/modules/services/hardware/tuxedo-rs.nix
new file mode 100644
index 000000000000..0daccfef3a53
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/tuxedo-rs.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.hardware.tuxedo-rs;
+
+in
+{
+  options = {
+    hardware.tuxedo-rs = {
+      enable = mkEnableOption (lib.mdDoc "Rust utilities for interacting with hardware from TUXEDO Computers");
+
+      tailor-gui.enable = mkEnableOption (lib.mdDoc "tailor-gui, an alternative to TUXEDO Control Center, written in Rust");
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      hardware.tuxedo-keyboard.enable = true;
+
+      systemd = {
+        services.tailord = {
+          enable = true;
+          description = "Tuxedo Tailor hardware control service";
+          after = [ "systemd-logind.service" ];
+          wantedBy = [ "multi-user.target" ];
+
+          serviceConfig = {
+            Type = "dbus";
+            BusName = "com.tux.Tailor";
+            ExecStart = "${pkgs.tuxedo-rs}/bin/tailord";
+            Environment = "RUST_BACKTRACE=1";
+            Restart = "on-failure";
+          };
+        };
+      };
+
+      services.dbus.packages = [ pkgs.tuxedo-rs ];
+
+      environment.systemPackages = [ pkgs.tuxedo-rs ];
+    }
+    (mkIf cfg.tailor-gui.enable {
+      environment.systemPackages = [ pkgs.tailor-gui ];
+    })
+  ]);
+
+  meta.maintainers = with maintainers; [ mrcjkb ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/udev.nix b/nixpkgs/nixos/modules/services/hardware/udev.nix
new file mode 100644
index 000000000000..08ca7a0d247d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/udev.nix
@@ -0,0 +1,443 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  udev = config.systemd.package;
+
+  cfg = config.services.udev;
+
+  initrdUdevRules = pkgs.runCommand "initrd-udev-rules" {} ''
+    mkdir -p $out/etc/udev/rules.d
+    for f in 60-cdrom_id 60-persistent-storage 75-net-description 80-drivers 80-net-setup-link; do
+      ln -s ${config.boot.initrd.systemd.package}/lib/udev/rules.d/$f.rules $out/etc/udev/rules.d
+    done
+  '';
+
+
+  extraUdevRules = pkgs.writeTextFile {
+    name = "extra-udev-rules";
+    text = cfg.extraRules;
+    destination = "/etc/udev/rules.d/99-local.rules";
+  };
+
+  extraHwdbFile = pkgs.writeTextFile {
+    name = "extra-hwdb-file";
+    text = cfg.extraHwdb;
+    destination = "/etc/udev/hwdb.d/99-local.hwdb";
+  };
+
+  nixosRules = ''
+    # Miscellaneous devices.
+    KERNEL=="kvm",                  MODE="0666"
+
+    # Needed for gpm.
+    SUBSYSTEM=="input", KERNEL=="mice", TAG+="systemd"
+  '';
+
+  nixosInitrdRules = ''
+    # Mark dm devices as db_persist so that they are kept active after switching root
+    SUBSYSTEM=="block", KERNEL=="dm-[0-9]*", ACTION=="add|change", OPTIONS+="db_persist"
+  '';
+
+  # Perform substitutions in all udev rules files.
+  udevRulesFor = { name, udevPackages, udevPath, udev, systemd, binPackages, initrdBin ? null }: pkgs.runCommand name
+    { preferLocalBuild = true;
+      allowSubstitutes = false;
+      packages = unique (map toString udevPackages);
+    }
+    ''
+      mkdir -p $out
+      shopt -s nullglob
+      set +o pipefail
+
+      # Set a reasonable $PATH for programs called by udev rules.
+      echo 'ENV{PATH}="${udevPath}/bin:${udevPath}/sbin"' > $out/00-path.rules
+
+      # Add the udev rules from other packages.
+      for i in $packages; do
+        echo "Adding rules for package $i"
+        for j in $i/{etc,lib}/udev/rules.d/*; do
+          echo "Copying $j to $out/$(basename $j)"
+          cat $j > $out/$(basename $j)
+        done
+      done
+
+      # Fix some paths in the standard udev rules.  Hacky.
+      for i in $out/*.rules; do
+        substituteInPlace $i \
+          --replace \"/sbin/modprobe \"${pkgs.kmod}/bin/modprobe \
+          --replace \"/sbin/mdadm \"${pkgs.mdadm}/sbin/mdadm \
+          --replace \"/sbin/blkid \"${pkgs.util-linux}/sbin/blkid \
+          --replace \"/bin/mount \"${pkgs.util-linux}/bin/mount \
+          --replace /usr/bin/readlink ${pkgs.coreutils}/bin/readlink \
+          --replace /usr/bin/basename ${pkgs.coreutils}/bin/basename 2>/dev/null
+      ${optionalString (initrdBin != null) ''
+        substituteInPlace $i --replace '/run/current-system/systemd' "${removeSuffix "/bin" initrdBin}"
+      ''}
+      done
+
+      echo -n "Checking that all programs called by relative paths in udev rules exist in ${udev}/lib/udev... "
+      import_progs=$(grep 'IMPORT{program}="[^/$]' $out/* |
+        sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
+      run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="[^/$]' |
+        sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
+      for i in $import_progs $run_progs; do
+        if [[ ! -x ${udev}/lib/udev/$i && ! $i =~ socket:.* ]]; then
+          echo "FAIL"
+          echo "$i is called in udev rules but not installed by udev"
+          exit 1
+        fi
+      done
+      echo "OK"
+
+      echo -n "Checking that all programs called by absolute paths in udev rules exist... "
+      import_progs=$(grep 'IMPORT{program}="\/' $out/* |
+        sed -e 's/.*IMPORT{program}="\([^ "]*\)[ "].*/\1/' | uniq)
+      run_progs=$(grep -v '^[[:space:]]*#' $out/* | grep 'RUN+="/' |
+        sed -e 's/.*RUN+="\([^ "]*\)[ "].*/\1/' | uniq)
+      for i in $import_progs $run_progs; do
+        # if the path refers to /run/current-system/systemd, replace with config.systemd.package
+        if [[ $i == /run/current-system/systemd* ]]; then
+          i="${systemd}/''${i#/run/current-system/systemd/}"
+        fi
+
+        if [[ ! -x $i ]]; then
+          echo "FAIL"
+          echo "$i is called in udev rules but is not executable or does not exist"
+          exit 1
+        fi
+      done
+      echo "OK"
+
+      filesToFixup="$(for i in "$out"/*; do
+        grep -l '\B\(/usr\)\?/s\?bin' "$i" || :
+      done)"
+
+      if [ -n "$filesToFixup" ]; then
+        echo "Consider fixing the following udev rules:"
+        echo "$filesToFixup" | while read localFile; do
+          remoteFile="origin unknown"
+          for i in ${toString binPackages}; do
+            for j in "$i"/*/udev/rules.d/*; do
+              [ -e "$out/$(basename "$j")" ] || continue
+              [ "$(basename "$j")" = "$(basename "$localFile")" ] || continue
+              remoteFile="originally from $j"
+              break 2
+            done
+          done
+          refs="$(
+            grep -o '\B\(/usr\)\?/s\?bin/[^ "]\+' "$localFile" \
+              | sed -e ':r;N;''${s/\n/ and /;br};s/\n/, /g;br'
+          )"
+          echo "$localFile ($remoteFile) contains references to $refs."
+        done
+        exit 1
+      fi
+
+      # If auto-configuration is disabled, then remove
+      # udev's 80-drivers.rules file, which contains rules for
+      # automatically calling modprobe.
+      ${optionalString (!config.boot.hardwareScan) ''
+        ln -s /dev/null $out/80-drivers.rules
+      ''}
+    '';
+
+  hwdbBin = pkgs.runCommand "hwdb.bin"
+    { preferLocalBuild = true;
+      allowSubstitutes = false;
+      packages = unique (map toString ([udev] ++ cfg.packages));
+    }
+    ''
+      mkdir -p etc/udev/hwdb.d
+      for i in $packages; do
+        echo "Adding hwdb files for package $i"
+        for j in $i/{etc,lib}/udev/hwdb.d/*; do
+          ln -s $j etc/udev/hwdb.d/$(basename $j)
+        done
+      done
+
+      echo "Generating hwdb database..."
+      # hwdb --update doesn't return error code even on errors!
+      res="$(${pkgs.buildPackages.systemd}/bin/systemd-hwdb --root=$(pwd) update 2>&1)"
+      echo "$res"
+      [ -z "$(echo "$res" | egrep '^Error')" ]
+      mv etc/udev/hwdb.bin $out
+    '';
+
+  compressFirmware = firmware: if (config.boot.kernelPackages.kernelAtLeast "5.3" && (firmware.compressFirmware or true)) then
+    pkgs.compressFirmwareXz firmware
+  else
+    id firmware;
+
+  # Udev has a 512-character limit for ENV{PATH}, so create a symlink
+  # tree to work around this.
+  udevPath = pkgs.buildEnv {
+    name = "udev-path";
+    paths = cfg.path;
+    pathsToLink = [ "/bin" "/sbin" ];
+    ignoreCollisions = true;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    boot.hardwareScan = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to try to load kernel modules for all detected hardware.
+        Usually this does a good job of providing you with the modules
+        you need, but sometimes it can crash the system or cause other
+        nasty effects.
+      '';
+    };
+
+    services.udev = {
+      enable = mkEnableOption (lib.mdDoc "udev") // {
+        default = true;
+      };
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          List of packages containing {command}`udev` rules.
+          All files found in
+          {file}`«pkg»/etc/udev/rules.d` and
+          {file}`«pkg»/lib/udev/rules.d`
+          will be included.
+        '';
+        apply = map getBin;
+      };
+
+      path = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          Packages added to the {env}`PATH` environment variable when
+          executing programs from Udev rules.
+        '';
+      };
+
+      extraRules = mkOption {
+        default = "";
+        example = ''
+          ENV{ID_VENDOR_ID}=="046d", ENV{ID_MODEL_ID}=="0825", ENV{PULSE_IGNORE}="1"
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional {command}`udev` rules. They'll be written
+          into file {file}`99-local.rules`. Thus they are
+          read and applied after all other rules.
+        '';
+      };
+
+      extraHwdb = mkOption {
+        default = "";
+        example = ''
+          evdev:input:b0003v05AFp8277*
+            KEYBOARD_KEY_70039=leftalt
+            KEYBOARD_KEY_700e2=leftctrl
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional {command}`hwdb` files. They'll be written
+          into file {file}`99-local.hwdb`. Thus they are
+          read after all other files.
+        '';
+      };
+
+    };
+
+    hardware.firmware = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      description = lib.mdDoc ''
+        List of packages containing firmware files.  Such files
+        will be loaded automatically if the kernel asks for them
+        (i.e., when it has detected specific hardware that requires
+        firmware to function).  If multiple packages contain firmware
+        files with the same name, the first package in the list takes
+        precedence.  Note that you must rebuild your system if you add
+        files to any of these directories.
+      '';
+      apply = list: pkgs.buildEnv {
+        name = "firmware";
+        paths = map compressFirmware list;
+        pathsToLink = [ "/lib/firmware" ];
+        ignoreCollisions = true;
+      };
+    };
+
+    networking.usePredictableInterfaceNames = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to assign [predictable names to network interfaces](https://www.freedesktop.org/wiki/Software/systemd/PredictableNetworkInterfaceNames/).
+        If enabled, interfaces
+        are assigned names that contain topology information
+        (e.g. `wlp3s0`) and thus should be stable
+        across reboots.  If disabled, names depend on the order in
+        which interfaces are discovered by the kernel, which may
+        change randomly across reboots; for instance, you may find
+        `eth0` and `eth1` flipping
+        unpredictably.
+      '';
+    };
+
+    boot.initrd.services.udev = {
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          *This will only be used when systemd is used in stage 1.*
+
+          List of packages containing {command}`udev` rules that will be copied to stage 1.
+          All files found in
+          {file}`«pkg»/etc/udev/rules.d` and
+          {file}`«pkg»/lib/udev/rules.d`
+          will be included.
+        '';
+      };
+
+      binPackages = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          *This will only be used when systemd is used in stage 1.*
+
+          Packages to search for binaries that are referenced by the udev rules in stage 1.
+          This list always contains /bin of the initrd.
+        '';
+        apply = map getBin;
+      };
+
+      rules = mkOption {
+        default = "";
+        example = ''
+          SUBSYSTEM=="net", ACTION=="add", DRIVERS=="?*", ATTR{address}=="00:1D:60:B9:6D:4F", KERNEL=="eth*", NAME="my_fast_network_card"
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          {command}`udev` rules to include in the initrd
+          *only*. They'll be written into file
+          {file}`99-local.rules`. Thus they are read and applied
+          after the essential initrd rules.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.udev.extraRules = nixosRules;
+
+    services.udev.packages = [ extraUdevRules extraHwdbFile ];
+
+    services.udev.path = [ pkgs.coreutils pkgs.gnused pkgs.gnugrep pkgs.util-linux udev ];
+
+    boot.kernelParams = mkIf (!config.networking.usePredictableInterfaceNames) [ "net.ifnames=0" ];
+
+    boot.initrd.extraUdevRulesCommands = mkIf (!config.boot.initrd.systemd.enable && config.boot.initrd.services.udev.rules != "")
+      ''
+        cat <<'EOF' > $out/99-local.rules
+        ${config.boot.initrd.services.udev.rules}
+        EOF
+      '';
+
+    boot.initrd.services.udev.rules = nixosInitrdRules;
+
+    boot.initrd.systemd.additionalUpstreamUnits = [
+      "initrd-udevadm-cleanup-db.service"
+      "systemd-udevd-control.socket"
+      "systemd-udevd-kernel.socket"
+      "systemd-udevd.service"
+      "systemd-udev-settle.service"
+      "systemd-udev-trigger.service"
+    ];
+    boot.initrd.systemd.storePaths = [
+      "${config.boot.initrd.systemd.package}/lib/systemd/systemd-udevd"
+      "${config.boot.initrd.systemd.package}/lib/udev/ata_id"
+      "${config.boot.initrd.systemd.package}/lib/udev/cdrom_id"
+      "${config.boot.initrd.systemd.package}/lib/udev/scsi_id"
+      "${config.boot.initrd.systemd.package}/lib/udev/rules.d"
+    ] ++ map (x: "${x}/bin") config.boot.initrd.services.udev.binPackages;
+
+    # Generate the udev rules for the initrd
+    boot.initrd.systemd.contents = {
+      "/etc/udev/rules.d".source = udevRulesFor {
+        name = "initrd-udev-rules";
+        initrdBin = config.boot.initrd.systemd.contents."/bin".source;
+        udevPackages = config.boot.initrd.services.udev.packages;
+        udevPath = config.boot.initrd.systemd.contents."/bin".source;
+        udev = config.boot.initrd.systemd.package;
+        systemd = config.boot.initrd.systemd.package;
+        binPackages = config.boot.initrd.services.udev.binPackages ++ [ config.boot.initrd.systemd.contents."/bin".source ];
+      };
+    };
+    # Insert initrd rules
+    boot.initrd.services.udev.packages = [
+      initrdUdevRules
+      (mkIf (config.boot.initrd.services.udev.rules != "") (pkgs.writeTextFile {
+        name = "initrd-udev-rules";
+        destination = "/etc/udev/rules.d/99-local.rules";
+        text = config.boot.initrd.services.udev.rules;
+      }))
+    ];
+
+    environment.etc =
+      {
+        "udev/rules.d".source = udevRulesFor {
+          name = "udev-rules";
+          udevPackages = cfg.packages;
+          systemd = config.systemd.package;
+          binPackages = cfg.packages;
+          inherit udevPath udev;
+        };
+        "udev/hwdb.bin".source = hwdbBin;
+      };
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isEnabled "UNIX")
+      (isYes "INOTIFY_USER")
+      (isYes "NET")
+    ];
+
+    # We don't place this into `extraModprobeConfig` so that stage-1 ramdisk doesn't bloat.
+    environment.etc."modprobe.d/firmware.conf".text = "options firmware_class path=${config.hardware.firmware}/lib/firmware";
+
+    system.activationScripts.udevd =
+      ''
+        # The deprecated hotplug uevent helper is not used anymore
+        if [ -e /proc/sys/kernel/hotplug ]; then
+          echo "" > /proc/sys/kernel/hotplug
+        fi
+
+        # Allow the kernel to find our firmware.
+        if [ -e /sys/module/firmware_class/parameters/path ]; then
+          echo -n "${config.hardware.firmware}/lib/firmware" > /sys/module/firmware_class/parameters/path
+        fi
+      '';
+
+    systemd.services.systemd-udevd =
+      { restartTriggers = cfg.packages;
+      };
+
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "udev" "initrdRules" ] [ "boot" "initrd" "services" "udev" "rules" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/udisks2.nix b/nixpkgs/nixos/modules/services/hardware/udisks2.nix
new file mode 100644
index 000000000000..5c058f1f0a6f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/udisks2.nix
@@ -0,0 +1,101 @@
+# Udisks daemon.
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.udisks2;
+  settingsFormat = pkgs.formats.ini {
+    listToValue = concatMapStringsSep "," (generators.mkValueStringDefault {});
+  };
+  configFiles = mapAttrs (name: value: (settingsFormat.generate name value)) (mapAttrs' (name: value: nameValuePair name value ) config.services.udisks2.settings);
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.udisks2 = {
+
+      enable = mkEnableOption (mdDoc "udisks2, a DBus service that allows applications to query and manipulate storage devices");
+
+      mountOnMedia = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          When enabled, instructs udisks2 to mount removable drives under `/media/` directory, instead of the
+          default, ACL-controlled `/run/media/$USER/`. Since `/media/` is not mounted as tmpfs by default, it
+          requires cleanup to get rid of stale mountpoints; enabling this option will take care of this at boot.
+        '';
+      };
+
+      settings = mkOption rec {
+        type = types.attrsOf settingsFormat.type;
+        apply = recursiveUpdate default;
+        default = {
+          "udisks2.conf" = {
+            udisks2 = {
+              modules = [ "*" ];
+              modules_load_preference = "ondemand";
+            };
+            defaults = {
+              encryption = "luks2";
+            };
+          };
+        };
+        example = literalExpression ''
+        {
+          "WDC-WD10EZEX-60M2NA0-WD-WCC3F3SJ0698.conf" = {
+            ATA = {
+              StandbyTimeout = 50;
+            };
+          };
+        };
+        '';
+        description = mdDoc ''
+          Options passed to udisksd.
+          See [here](http://manpages.ubuntu.com/manpages/latest/en/man5/udisks2.conf.5.html) and
+          drive configuration in [here](http://manpages.ubuntu.com/manpages/latest/en/man8/udisks.8.html) for supported options.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.udisks2.enable {
+
+    environment.systemPackages = [ pkgs.udisks2 ];
+
+    environment.etc = (mapAttrs' (name: value: nameValuePair "udisks2/${name}" { source = value; } ) configFiles) // (
+    let
+      libblockdev = pkgs.udisks2.libblockdev;
+      majorVer = versions.major libblockdev.version;
+    in {
+      # We need to make sure /etc/libblockdev/@major_ver@/conf.d is populated to avoid
+      # warnings
+      "libblockdev/${majorVer}/conf.d/00-default.cfg".source = "${libblockdev}/etc/libblockdev/${majorVer}/conf.d/00-default.cfg";
+      "libblockdev/${majorVer}/conf.d/10-lvm-dbus.cfg".source = "${libblockdev}/etc/libblockdev/${majorVer}/conf.d/10-lvm-dbus.cfg";
+    });
+
+    security.polkit.enable = true;
+
+    services.dbus.packages = [ pkgs.udisks2 ];
+
+    systemd.tmpfiles.rules = [ "d /var/lib/udisks2 0755 root root -" ]
+      ++ optional cfg.mountOnMedia "D! /media 0755 root root -";
+
+    services.udev.packages = [ pkgs.udisks2 ];
+
+    services.udev.extraRules = optionalString cfg.mountOnMedia ''
+      ENV{ID_FS_USAGE}=="filesystem", ENV{UDISKS_FILESYSTEM_SHARED}="1"
+    '';
+
+    systemd.packages = [ pkgs.udisks2 ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/undervolt.nix b/nixpkgs/nixos/modules/services/hardware/undervolt.nix
new file mode 100644
index 000000000000..258f09bbab09
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/undervolt.nix
@@ -0,0 +1,190 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.undervolt;
+
+  mkPLimit = limit: window:
+    if (limit == null && window == null) then null
+    else assert asserts.assertMsg (limit != null && window != null) "Both power limit and window must be set";
+      "${toString limit} ${toString window}";
+  cliArgs = lib.cli.toGNUCommandLine {} {
+    inherit (cfg)
+      verbose
+      temp
+      ;
+    # `core` and `cache` are both intentionally set to `cfg.coreOffset` as according to the undervolt docs:
+    #
+    #     Core or Cache offsets have no effect. It is not possible to set different offsets for
+    #     CPU Core and Cache. The CPU will take the smaller of the two offsets, and apply that to
+    #     both CPU and Cache. A warning message will be displayed if you attempt to set different offsets.
+    core = cfg.coreOffset;
+    cache = cfg.coreOffset;
+    gpu = cfg.gpuOffset;
+    uncore = cfg.uncoreOffset;
+    analogio = cfg.analogioOffset;
+
+    temp-bat = cfg.tempBat;
+    temp-ac = cfg.tempAc;
+
+    power-limit-long = mkPLimit cfg.p1.limit cfg.p1.window;
+    power-limit-short = mkPLimit cfg.p2.limit cfg.p2.window;
+  };
+in
+{
+  options.services.undervolt = {
+    enable = mkEnableOption (lib.mdDoc ''
+       Undervolting service for Intel CPUs.
+
+       Warning: This service is not endorsed by Intel and may permanently damage your hardware. Use at your own risk!
+    '');
+
+    verbose = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable verbose logging.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.undervolt;
+      defaultText = literalExpression "pkgs.undervolt";
+      description = lib.mdDoc ''
+        undervolt derivation to use.
+      '';
+    };
+
+    coreOffset = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The amount of voltage in mV to offset the CPU cores by.
+      '';
+    };
+
+    gpuOffset = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The amount of voltage in mV to offset the GPU by.
+      '';
+    };
+
+    uncoreOffset = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The amount of voltage in mV to offset uncore by.
+      '';
+    };
+
+    analogioOffset = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The amount of voltage in mV to offset analogio by.
+      '';
+    };
+
+    temp = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The temperature target in Celsius degrees.
+      '';
+    };
+
+    tempAc = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The temperature target on AC power in Celsius degrees.
+      '';
+    };
+
+    tempBat = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        The temperature target on battery power in Celsius degrees.
+      '';
+    };
+
+    p1.limit = mkOption {
+      type = with types; nullOr int;
+      default = null;
+      description = lib.mdDoc ''
+        The P1 Power Limit in Watts.
+        Both limit and window must be set.
+      '';
+    };
+    p1.window = mkOption {
+      type = with types; nullOr (oneOf [ float int ]);
+      default = null;
+      description = lib.mdDoc ''
+        The P1 Time Window in seconds.
+        Both limit and window must be set.
+      '';
+    };
+
+    p2.limit = mkOption {
+      type = with types; nullOr int;
+      default = null;
+      description = lib.mdDoc ''
+        The P2 Power Limit in Watts.
+        Both limit and window must be set.
+      '';
+    };
+    p2.window = mkOption {
+      type = with types; nullOr (oneOf [ float int ]);
+      default = null;
+      description = lib.mdDoc ''
+        The P2 Time Window in seconds.
+        Both limit and window must be set.
+      '';
+    };
+
+    useTimer = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to set a timer that applies the undervolt settings every 30s.
+        This will cause spam in the journal but might be required for some
+        hardware under specific conditions.
+        Enable this if your undervolt settings don't hold.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    hardware.cpu.x86.msr.enable = true;
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.undervolt = {
+      description = "Intel Undervolting Service";
+
+      # Apply undervolt on boot, nixos generation switch and resume
+      wantedBy = [ "multi-user.target" "post-resume.target" ];
+      after = [ "post-resume.target" ]; # Not sure why but it won't work without this
+
+      serviceConfig = {
+        Type = "oneshot";
+        Restart = "no";
+        ExecStart = "${cfg.package}/bin/undervolt ${toString cliArgs}";
+      };
+    };
+
+    systemd.timers.undervolt = mkIf cfg.useTimer {
+      description = "Undervolt timer to ensure voltage settings are always applied";
+      partOf = [ "undervolt.service" ];
+      wantedBy = [ "multi-user.target" ];
+      timerConfig = {
+        OnBootSec = "2min";
+        OnUnitActiveSec = "30";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/upower.nix b/nixpkgs/nixos/modules/services/hardware/upower.nix
new file mode 100644
index 000000000000..aacc8a63dbeb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/upower.nix
@@ -0,0 +1,237 @@
+# Upower daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.upower;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.upower = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Upower, a DBus service that provides power
+          management support to applications.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.upower;
+        defaultText = literalExpression "pkgs.upower";
+        description = lib.mdDoc ''
+          Which upower package to use.
+        '';
+      };
+
+      enableWattsUpPro = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the Watts Up Pro device.
+
+          The Watts Up Pro contains a generic FTDI USB device without a specific
+          vendor and product ID. When we probe for WUP devices, we can cause
+          the user to get a perplexing "Device or resource busy" error when
+          attempting to use their non-WUP device.
+
+          The generic FTDI device is known to also be used on:
+
+          - Sparkfun FT232 breakout board
+          - Parallax Propeller
+        '';
+      };
+
+      noPollBatteries = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Don't poll the kernel for battery level changes.
+
+          Some hardware will send us battery level changes through
+          events, rather than us having to poll for it. This option
+          allows disabling polling for hardware that sends out events.
+        '';
+      };
+
+      ignoreLid = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Do we ignore the lid state
+
+          Some laptops are broken. The lid state is either inverted, or stuck
+          on or off. We can't do much to fix these problems, but this is a way
+          for users to make the laptop panel vanish, a state that might be used
+          by a couple of user-space daemons. On Linux systems, see also
+          logind.conf(5).
+        '';
+      };
+
+      usePercentageForPolicy = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Policy for warnings and action based on battery levels
+
+          Whether battery percentage based policy should be used. The default
+          is to use the percentage, which
+          should work around broken firmwares. It is also more reliable than
+          the time left (frantically saving all your files is going to use more
+          battery than letting it rest for example).
+        '';
+      };
+
+      percentageLow = mkOption {
+        type = types.ints.unsigned;
+        default = 10;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `true`, the levels at which UPower will consider the
+          battery low.
+
+          This will also be used for batteries which don't have time information
+          such as that of peripherals.
+
+          If any value (of `percentageLow`,
+          `percentageCritical` and
+          `percentageAction`) is invalid, or not in descending
+          order, the defaults will be used.
+        '';
+      };
+
+      percentageCritical = mkOption {
+        type = types.ints.unsigned;
+        default = 3;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `true`, the levels at which UPower will consider the
+          battery critical.
+
+          This will also be used for batteries which don't have time information
+          such as that of peripherals.
+
+          If any value (of `percentageLow`,
+          `percentageCritical` and
+          `percentageAction`) is invalid, or not in descending
+          order, the defaults will be used.
+        '';
+      };
+
+      percentageAction = mkOption {
+        type = types.ints.unsigned;
+        default = 2;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `true`, the levels at which UPower will take action
+          for the critical battery level.
+
+          This will also be used for batteries which don't have time information
+          such as that of peripherals.
+
+          If any value (of `percentageLow`,
+          `percentageCritical` and
+          `percentageAction`) is invalid, or not in descending
+          order, the defaults will be used.
+        '';
+      };
+
+      timeLow = mkOption {
+        type = types.ints.unsigned;
+        default = 1200;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `false`, the time remaining in seconds at which
+          UPower will consider the battery low.
+
+          If any value (of `timeLow`,
+          `timeCritical` and `timeAction`) is
+          invalid, or not in descending order, the defaults will be used.
+        '';
+      };
+
+      timeCritical = mkOption {
+        type = types.ints.unsigned;
+        default = 300;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `false`, the time remaining in seconds at which
+          UPower will consider the battery critical.
+
+          If any value (of `timeLow`,
+          `timeCritical` and `timeAction`) is
+          invalid, or not in descending order, the defaults will be used.
+        '';
+      };
+
+      timeAction = mkOption {
+        type = types.ints.unsigned;
+        default = 120;
+        description = lib.mdDoc ''
+          When `usePercentageForPolicy` is
+          `false`, the time remaining in seconds at which
+          UPower will take action for the critical battery level.
+
+          If any value (of `timeLow`,
+          `timeCritical` and `timeAction`) is
+          invalid, or not in descending order, the defaults will be used.
+        '';
+      };
+
+      criticalPowerAction = mkOption {
+        type = types.enum [ "PowerOff" "Hibernate" "HybridSleep" ];
+        default = "HybridSleep";
+        description = lib.mdDoc ''
+          The action to take when `timeAction` or
+          `percentageAction` has been reached for the batteries
+          (UPS or laptop batteries) supplying the computer
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+
+    services.udev.packages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+    environment.etc."UPower/UPower.conf".text = generators.toINI {} {
+      UPower = {
+        EnableWattsUpPro = cfg.enableWattsUpPro;
+        NoPollBatteries = cfg.noPollBatteries;
+        IgnoreLid = cfg.ignoreLid;
+        UsePercentageForPolicy = cfg.usePercentageForPolicy;
+        PercentageLow = cfg.percentageLow;
+        PercentageCritical = cfg.percentageCritical;
+        PercentageAction = cfg.percentageAction;
+        TimeLow = cfg.timeLow;
+        TimeCritical = cfg.timeCritical;
+        TimeAction = cfg.timeAction;
+        CriticalPowerAction = cfg.criticalPowerAction;
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix b/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix
new file mode 100644
index 000000000000..9466ea26995b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/usbmuxd.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  defaultUserGroup = "usbmux";
+  apple = "05ac";
+
+  cfg = config.services.usbmuxd;
+
+in
+
+{
+  options.services.usbmuxd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable the usbmuxd ("USB multiplexing daemon") service. This daemon is
+        in charge of multiplexing connections over USB to an iOS device. This is
+        needed for transferring data from and to iOS devices (see ifuse). Also
+        this may enable plug-n-play tethering for iPhones.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUserGroup;
+      description = lib.mdDoc ''
+        The user usbmuxd should use to run after startup.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUserGroup;
+      description = lib.mdDoc ''
+        The group usbmuxd should use to run after startup.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.usbmuxd;
+      defaultText = literalExpression "pkgs.usbmuxd";
+      description = lib.mdDoc "Which package to use for the usbmuxd daemon.";
+      relatedPackages = [ "usbmuxd" "usbmuxd2" ];
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == defaultUserGroup) {
+      ${cfg.user} = {
+        description = "usbmuxd user";
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == defaultUserGroup) {
+      ${cfg.group} = { };
+    };
+
+    # Give usbmuxd permission for Apple devices
+    services.udev.extraRules = ''
+      SUBSYSTEM=="usb", ATTR{idVendor}=="${apple}", GROUP="${cfg.group}"
+    '';
+
+    systemd.services.usbmuxd = {
+      description = "usbmuxd";
+      wantedBy = [ "multi-user.target" ];
+      unitConfig.Documentation = "man:usbmuxd(8)";
+      serviceConfig = {
+        # Trigger the udev rule manually. This doesn't require replugging the
+        # device when first enabling the option to get it to work
+        ExecStartPre = "${pkgs.udev}/bin/udevadm trigger -s usb -a idVendor=${apple}";
+        ExecStart = "${cfg.package}/bin/usbmuxd -U ${cfg.user} -v";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/usbrelayd.nix b/nixpkgs/nixos/modules/services/hardware/usbrelayd.nix
new file mode 100644
index 000000000000..01d3a5ba8bee
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/usbrelayd.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.usbrelayd;
+in
+{
+  options.services.usbrelayd = with types; {
+    enable = mkEnableOption (lib.mdDoc "USB Relay MQTT daemon");
+
+    broker = mkOption {
+      type = str;
+      description = lib.mdDoc "Hostname or IP address of your MQTT Broker.";
+      default = "127.0.0.1";
+      example = [
+        "mqtt"
+        "192.168.1.1"
+      ];
+    };
+
+    clientName = mkOption {
+      type = str;
+      description = lib.mdDoc "Name, your client connects as.";
+      default = "MyUSBRelay";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc."usbrelayd.conf".text = ''
+      [MQTT]
+      BROKER = ${cfg.broker}
+      CLIENTNAME = ${cfg.clientName}
+    '';
+
+    services.udev.packages = [ pkgs.usbrelayd ];
+    systemd.packages = [ pkgs.usbrelayd ];
+    users.groups.usbrelay = { };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ wentasah ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/hardware/vdr.nix b/nixpkgs/nixos/modules/services/hardware/vdr.nix
new file mode 100644
index 000000000000..de63ed893b02
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/hardware/vdr.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vdr;
+  libDir = "/var/lib/vdr";
+in {
+
+  ###### interface
+
+  options = {
+
+    services.vdr = {
+      enable = mkEnableOption (lib.mdDoc "VDR. Please put config into ${libDir}");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.vdr;
+        defaultText = literalExpression "pkgs.vdr";
+        example = literalExpression "pkgs.wrapVdr.override { plugins = with pkgs.vdrPlugins; [ hello ]; }";
+        description = lib.mdDoc "Package to use.";
+      };
+
+      videoDir = mkOption {
+        type = types.path;
+        default = "/srv/vdr/video";
+        description = lib.mdDoc "Recording directory";
+      };
+
+      extraArguments = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Additional command line arguments to pass to VDR.";
+      };
+
+      enableLirc = mkEnableOption (lib.mdDoc "LIRC");
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+    systemd.tmpfiles.rules = [
+      "d ${cfg.videoDir} 0755 vdr vdr -"
+      "Z ${cfg.videoDir} - vdr vdr -"
+    ];
+
+    systemd.services.vdr = {
+      description = "VDR";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/vdr \
+            --video="${cfg.videoDir}" \
+            --config="${libDir}" \
+            ${escapeShellArgs cfg.extraArguments}
+        '';
+        User = "vdr";
+        CacheDirectory = "vdr";
+        StateDirectory = "vdr";
+        Restart = "on-failure";
+      };
+    };
+
+    users.users.vdr = {
+      group = "vdr";
+      home = libDir;
+      isSystemUser = true;
+    };
+
+    users.groups.vdr = {};
+  }
+
+  (mkIf cfg.enableLirc {
+    services.lirc.enable = true;
+    users.users.vdr.extraGroups = [ "lirc" ];
+    services.vdr.extraArguments = [
+      "--lirc=${config.passthru.lirc.socket}"
+    ];
+  })]);
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/ebusd.nix b/nixpkgs/nixos/modules/services/home-automation/ebusd.nix
new file mode 100644
index 000000000000..519d116e0e55
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/ebusd.nix
@@ -0,0 +1,270 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ebusd;
+
+  package = pkgs.ebusd;
+
+  arguments = [
+    "${package}/bin/ebusd"
+    "--foreground"
+    "--updatecheck=off"
+    "--device=${cfg.device}"
+    "--port=${toString cfg.port}"
+    "--configpath=${cfg.configpath}"
+    "--scanconfig=${cfg.scanconfig}"
+    "--log=main:${cfg.logs.main}"
+    "--log=network:${cfg.logs.network}"
+    "--log=bus:${cfg.logs.bus}"
+    "--log=update:${cfg.logs.update}"
+    "--log=other:${cfg.logs.other}"
+    "--log=all:${cfg.logs.all}"
+  ] ++ lib.optionals cfg.readonly [
+    "--readonly"
+  ] ++ lib.optionals cfg.mqtt.enable [
+    "--mqtthost=${cfg.mqtt.host}"
+    "--mqttport=${toString cfg.mqtt.port}"
+    "--mqttuser=${cfg.mqtt.user}"
+    "--mqttpass=${cfg.mqtt.password}"
+  ] ++ lib.optionals cfg.mqtt.home-assistant [
+    "--mqttint=${package}/etc/ebusd/mqtt-hassio.cfg"
+    "--mqttjson"
+  ] ++ lib.optionals cfg.mqtt.retain [
+    "--mqttretain"
+  ] ++ cfg.extraArguments;
+
+  usesDev = hasPrefix "/" cfg.device;
+
+  command = concatStringsSep " " arguments;
+
+in
+{
+  meta.maintainers = with maintainers; [ nathan-gs ];
+
+  options.services.ebusd = {
+    enable = mkEnableOption (lib.mdDoc "ebusd service");
+
+    device = mkOption {
+      type = types.str;
+      default = "";
+      example = "IP:PORT";
+      description = lib.mdDoc ''
+        Use DEV as eBUS device [/dev/ttyUSB0].
+        This can be either:
+          enh:DEVICE or enh:IP:PORT for enhanced device (only adapter v3 and newer),
+          ens:DEVICE for enhanced high speed serial device (only adapter v3 and newer with firmware since 20220731),
+          DEVICE for serial device (normal speed, for all other serial adapters like adapter v2 as well as adapter v3 in non-enhanced mode), or
+          [udp:]IP:PORT for network device.
+        https://github.com/john30/ebusd/wiki/2.-Run#device-options
+      '';
+    };
+
+    port = mkOption {
+      default = 8888;
+      type = types.port;
+      description = lib.mdDoc ''
+        The port on which to listen on
+      '';
+    };
+
+    readonly = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+         Only read from device, never write to it
+      '';
+    };
+
+    configpath = mkOption {
+      type = types.str;
+      default = "https://cfg.ebusd.eu/";
+      description = lib.mdDoc ''
+        Read CSV config files from PATH (local folder or HTTPS URL) [https://cfg.ebusd.eu/]
+      '';
+    };
+
+    scanconfig = mkOption {
+      type = types.str;
+      default = "full";
+      description = lib.mdDoc ''
+        Pick CSV config files matching initial scan ("none" or empty for no initial scan message, "full" for full scan, or a single hex address to scan, default is to send a broadcast ident message).
+        If combined with --checkconfig, you can add scan message data as arguments for checking a particular scan configuration, e.g. "FF08070400/0AB5454850303003277201". For further details on this option,
+        see [Automatic configuration](https://github.com/john30/ebusd/wiki/4.7.-Automatic-configuration).
+      '';
+    };
+
+    logs = {
+      main = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+
+      network = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+
+      bus = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+
+      update = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+
+      other = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+
+      all = mkOption {
+        type = types.enum [ "error" "notice" "info" "debug"];
+        default = "info";
+        description = lib.mdDoc ''
+          Only write log for matching AREAs (main|network|bus|update|other|all) below or equal to LEVEL (error|notice|info|debug) [all:notice].
+        '';
+      };
+    };
+
+    mqtt = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Adds support for MQTT
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Connect to MQTT broker on HOST.
+        '';
+      };
+
+      port = mkOption {
+        default = 1883;
+        type = types.port;
+        description = lib.mdDoc ''
+          The port on which to connect to MQTT
+        '';
+      };
+
+      home-assistant = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Adds the Home Assistant topics to MQTT, read more at [MQTT Integration](https://github.com/john30/ebusd/wiki/MQTT-integration)
+        '';
+      };
+
+      retain = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Set the retain flag on all topics instead of only selected global ones
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The MQTT user to use
+        '';
+      };
+
+      password = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The MQTT password.
+        '';
+      };
+
+    };
+
+    extraArguments = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra arguments to the ebus daemon
+      '';
+    };
+
+  };
+
+  config = mkIf (cfg.enable) {
+
+    systemd.services.ebusd = {
+      description = "EBUSd Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = command;
+        DynamicUser = true;
+        Restart = "on-failure";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        DeviceAllow = lib.optionals usesDev [
+          cfg.device
+        ] ;
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = false;
+        NoNewPrivileges = true;
+        PrivateDevices = usesDev;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SupplementaryGroups = [
+          "dialout"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service @pkey"
+          "~@privileged @resources"
+        ];
+        UMask = "0077";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/esphome.nix b/nixpkgs/nixos/modules/services/home-automation/esphome.nix
new file mode 100644
index 000000000000..080c8876382f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/esphome.nix
@@ -0,0 +1,136 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    literalExpression
+    maintainers
+    mkEnableOption
+    mkIf
+    mkOption
+    mdDoc
+    types
+    ;
+
+  cfg = config.services.esphome;
+
+  stateDir = "/var/lib/esphome";
+
+  esphomeParams =
+    if cfg.enableUnixSocket
+    then "--socket /run/esphome/esphome.sock"
+    else "--address ${cfg.address} --port ${toString cfg.port}";
+in
+{
+  meta.maintainers = with maintainers; [ oddlama ];
+
+  options.services.esphome = {
+    enable = mkEnableOption (mdDoc "esphome");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.esphome;
+      defaultText = literalExpression "pkgs.esphome";
+      description = mdDoc "The package to use for the esphome command.";
+    };
+
+    enableUnixSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Listen on a unix socket `/run/esphome/esphome.sock` instead of the TCP port.";
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = mdDoc "esphome address";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 6052;
+      description = mdDoc "esphome port";
+    };
+
+    openFirewall = mkOption {
+      default = false;
+      type = types.bool;
+      description = mdDoc "Whether to open the firewall for the specified port.";
+    };
+
+    allowedDevices = mkOption {
+      default = ["char-ttyS" "char-ttyUSB"];
+      example = ["/dev/serial/by-id/usb-Silicon_Labs_CP2102_USB_to_UART_Bridge_Controller_0001-if00-port0"];
+      description = lib.mdDoc ''
+        A list of device nodes to which {command}`esphome` has access to.
+        Refer to DeviceAllow in systemd.resource-control(5) for more information.
+        Beware that if a device is referred to by an absolute path instead of a device category,
+        it will only allow devices that already are plugged in when the service is started.
+      '';
+      type = types.listOf types.str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf (cfg.openFirewall && !cfg.enableUnixSocket) [cfg.port];
+
+    systemd.services.esphome = {
+      description = "ESPHome dashboard";
+      after = ["network.target"];
+      wantedBy = ["multi-user.target"];
+      path = [cfg.package];
+
+      # platformio fails to determine the home directory when using DynamicUser
+      environment.PLATFORMIO_CORE_DIR = "${stateDir}/.platformio";
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/esphome dashboard ${esphomeParams} ${stateDir}";
+        DynamicUser = true;
+        User = "esphome";
+        Group = "esphome";
+        WorkingDirectory = stateDir;
+        StateDirectory = "esphome";
+        StateDirectoryMode = "0750";
+        Restart = "on-failure";
+        RuntimeDirectory = mkIf cfg.enableUnixSocket "esphome";
+        RuntimeDirectoryMode = "0750";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        DevicePolicy = "closed";
+        DeviceAllow = map (d: "${d} rw") cfg.allowedDevices;
+        SupplementaryGroups = ["dialout"];
+        #NoNewPrivileges = true; # Implied by DynamicUser
+        PrivateUsers = true;
+        #PrivateTmp = true; # Implied by DynamicUser
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "all"; # Using "pid" breaks bwrap
+        ProtectSystem = "strict";
+        #RemoveIPC = true; # Implied by DynamicUser
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = false; # Required by platformio for chroot
+        RestrictRealtime = true;
+        #RestrictSUIDSGID = true; # Implied by DynamicUser
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "@mount" # Required by platformio for chroot
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/evcc.nix b/nixpkgs/nixos/modules/services/home-automation/evcc.nix
new file mode 100644
index 000000000000..d0ce3fb4a1ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/evcc.nix
@@ -0,0 +1,96 @@
+{ lib
+, pkgs
+, config
+, ...
+}:
+
+with lib;
+
+let
+  cfg = config.services.evcc;
+
+  format = pkgs.formats.yaml {};
+  configFile = format.generate "evcc.yml" cfg.settings;
+
+  package = pkgs.evcc;
+in
+
+{
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  options.services.evcc = with types; {
+    enable = mkEnableOption (lib.mdDoc "EVCC, the extensible EV Charge Controller with PV integration");
+
+    extraArgs = mkOption {
+      type = listOf str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra arguments to pass to the evcc executable.
+      '';
+    };
+
+    settings = mkOption {
+      type = format.type;
+      description = lib.mdDoc ''
+        evcc configuration as a Nix attribute set.
+
+        Check for possible options in the sample [evcc.dist.yaml](https://github.com/andig/evcc/blob/${package.version}/evcc.dist.yaml].
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.evcc = {
+      after = [
+        "network-online.target"
+        "mosquitto.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      environment.HOME = "/var/lib/evcc";
+      path = with pkgs; [
+        getent
+      ];
+      serviceConfig = {
+        ExecStart = "${package}/bin/evcc --config ${configFile} ${escapeShellArgs cfg.extraArgs}";
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [
+          "char-ttyUSB"
+        ];
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups= true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        StateDirectory = "evcc";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+        User = "evcc";
+      };
+    };
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/home-assistant.nix b/nixpkgs/nixos/modules/services/home-automation/home-assistant.nix
new file mode 100644
index 000000000000..54fd3e17292f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/home-assistant.nix
@@ -0,0 +1,695 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.home-assistant;
+  format = pkgs.formats.yaml {};
+
+  # Render config attribute sets to YAML
+  # Values that are null will be filtered from the output, so this is one way to have optional
+  # options shown in settings.
+  # We post-process the result to add support for YAML functions, like secrets or includes, see e.g.
+  # https://www.home-assistant.io/docs/configuration/secrets/
+  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null ])) cfg.config or {};
+  configFile = pkgs.runCommandLocal "configuration.yaml" { } ''
+    cp ${format.generate "configuration.yaml" filteredConfig} $out
+    sed -i -e "s/'\!\([a-z_]\+\) \(.*\)'/\!\1 \2/;s/^\!\!/\!/;" $out
+  '';
+  lovelaceConfig = if (cfg.lovelaceConfig == null) then {}
+    else (lib.recursiveUpdate customLovelaceModulesResources cfg.lovelaceConfig);
+  lovelaceConfigFile = format.generate "ui-lovelace.yaml" lovelaceConfig;
+
+  # Components advertised by the home-assistant package
+  availableComponents = cfg.package.availableComponents;
+
+  # Components that were added by overriding the package
+  explicitComponents = cfg.package.extraComponents;
+  useExplicitComponent = component: elem component explicitComponents;
+
+  # Given a component "platform", looks up whether it is used in the config
+  # as `platform = "platform";`.
+  #
+  # For example, the component mqtt.sensor is used as follows:
+  # config.sensor = [ {
+  #   platform = "mqtt";
+  #   ...
+  # } ];
+  usedPlatforms = config:
+    # don't recurse into derivations possibly creating an infinite recursion
+    if isDerivation config then
+      [ ]
+    else if isAttrs config then
+      optional (config ? platform) config.platform
+      ++ concatMap usedPlatforms (attrValues config)
+    else if isList config then
+      concatMap usedPlatforms config
+    else [ ];
+
+  useComponentPlatform = component: elem component (usedPlatforms cfg.config);
+
+  # Returns whether component is used in config, explicitly passed into package or
+  # configured in the module.
+  useComponent = component:
+    hasAttrByPath (splitString "." component) cfg.config
+    || useComponentPlatform component
+    || useExplicitComponent component
+    || builtins.elem component cfg.extraComponents;
+
+  # Final list of components passed into the package to include required dependencies
+  extraComponents = filter useComponent availableComponents;
+
+  package = (cfg.package.override (oldArgs: {
+    # Respect overrides that already exist in the passed package and
+    # concat it with values passed via the module.
+    extraComponents = oldArgs.extraComponents or [] ++ extraComponents;
+    extraPackages = ps: (oldArgs.extraPackages or (_: []) ps)
+      ++ (cfg.extraPackages ps)
+      ++ (lib.concatMap (component: component.propagatedBuildInputs or []) cfg.customComponents);
+  }));
+
+  # Create a directory that holds all lovelace modules
+  customLovelaceModulesDir = pkgs.buildEnv {
+    name = "home-assistant-custom-lovelace-modules";
+    paths = cfg.customLovelaceModules;
+  };
+
+  # Create parts of the lovelace config that reference lovelave modules as resources
+  customLovelaceModulesResources = {
+    lovelace.resources = map (card: {
+      url = "/local/nixos-lovelace-modules/${card.entrypoint or card.pname}.js?${card.version}";
+      type = "module";
+    }) cfg.customLovelaceModules;
+  };
+in {
+  imports = [
+    # Migrations in NixOS 22.05
+    (mkRemovedOptionModule [ "services" "home-assistant" "applyDefaultConfig" ] "The default config was migrated into services.home-assistant.config")
+    (mkRemovedOptionModule [ "services" "home-assistant" "autoExtraComponents" ] "Components are now parsed from services.home-assistant.config unconditionally")
+    (mkRenamedOptionModule [ "services" "home-assistant" "port" ] [ "services" "home-assistant" "config" "http" "server_port" ])
+  ];
+
+  meta = {
+    buildDocsInSandbox = false;
+    maintainers = teams.home-assistant.members;
+  };
+
+  options.services.home-assistant = {
+    # Running home-assistant on NixOS is considered an installation method that is unsupported by the upstream project.
+    # https://github.com/home-assistant/architecture/blob/master/adr/0012-define-supported-installation-method.md#decision
+    enable = mkEnableOption (lib.mdDoc "Home Assistant. Please note that this installation method is unsupported upstream");
+
+    configDir = mkOption {
+      default = "/var/lib/hass";
+      type = types.path;
+      description = lib.mdDoc "The config directory, where your {file}`configuration.yaml` is located.";
+    };
+
+    extraComponents = mkOption {
+      type = types.listOf (types.enum availableComponents);
+      default = [
+        # List of components required to complete the onboarding
+        "default_config"
+        "met"
+        "esphome"
+      ] ++ optionals pkgs.stdenv.hostPlatform.isAarch [
+        # Use the platform as an indicator that we might be running on a RaspberryPi and include
+        # relevant components
+        "rpi_power"
+      ];
+      example = literalExpression ''
+        [
+          "analytics"
+          "default_config"
+          "esphome"
+          "my"
+          "shopping_list"
+          "wled"
+        ]
+      '';
+      description = lib.mdDoc ''
+        List of [components](https://www.home-assistant.io/integrations/) that have their dependencies included in the package.
+
+        The component name can be found in the URL, for example `https://www.home-assistant.io/integrations/ffmpeg/` would map to `ffmpeg`.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = types.functionTo (types.listOf types.package);
+      default = _: [];
+      defaultText = literalExpression ''
+        python3Packages: with python3Packages; [];
+      '';
+      example = literalExpression ''
+        python3Packages: with python3Packages; [
+          # postgresql support
+          psycopg2
+        ];
+      '';
+      description = lib.mdDoc ''
+        List of packages to add to propagatedBuildInputs.
+
+        A popular example is `python3Packages.psycopg2`
+        for PostgreSQL support in the recorder component.
+      '';
+    };
+
+    customComponents = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression ''
+        with pkgs.home-assistant-custom-components; [
+          prometheus-sensor
+        ];
+      '';
+      description = lib.mdDoc ''
+        List of custom component packages to install.
+
+        Available components can be found below `pkgs.home-assistant-custom-components`.
+      '';
+    };
+
+    customLovelaceModules = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression ''
+        with pkgs.home-assistant-custom-lovelace-modules; [
+          mini-graph-card
+          mini-media-player
+        ];
+      '';
+      description = lib.mdDoc ''
+        List of custom lovelace card packages to load as lovelace resources.
+
+        Available cards can be found below `pkgs.home-assistant-custom-lovelace-modules`.
+
+        ::: {.note}
+        Automatic loading only works with lovelace in `yaml` mode.
+        :::
+      '';
+    };
+
+    config = mkOption {
+      type = types.nullOr (types.submodule {
+        freeformType = format.type;
+        options = {
+          # This is a partial selection of the most common options, so new users can quickly
+          # pick up how to match home-assistants config structure to ours. It also lets us preset
+          # config values intelligently.
+
+          homeassistant = {
+            # https://www.home-assistant.io/docs/configuration/basic/
+            name = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "Home";
+              description = lib.mdDoc ''
+                Name of the location where Home Assistant is running.
+              '';
+            };
+
+            latitude = mkOption {
+              type = types.nullOr (types.either types.float types.str);
+              default = null;
+              example = 52.3;
+              description = lib.mdDoc ''
+                Latitude of your location required to calculate the time the sun rises and sets.
+              '';
+            };
+
+            longitude = mkOption {
+              type = types.nullOr (types.either types.float types.str);
+              default = null;
+              example = 4.9;
+              description = lib.mdDoc ''
+                Longitude of your location required to calculate the time the sun rises and sets.
+              '';
+            };
+
+            unit_system = mkOption {
+              type = types.nullOr (types.enum [ "metric" "imperial" ]);
+              default = null;
+              example = "metric";
+              description = lib.mdDoc ''
+                The unit system to use. This also sets temperature_unit, Celsius for Metric and Fahrenheit for Imperial.
+              '';
+            };
+
+            temperature_unit = mkOption {
+              type = types.nullOr (types.enum [ "C" "F" ]);
+              default = null;
+              example = "C";
+              description = lib.mdDoc ''
+                Override temperature unit set by unit_system. `C` for Celsius, `F` for Fahrenheit.
+              '';
+            };
+
+            time_zone = mkOption {
+              type = types.nullOr types.str;
+              default = config.time.timeZone or null;
+              defaultText = literalExpression ''
+                config.time.timeZone or null
+              '';
+              example = "Europe/Amsterdam";
+              description = lib.mdDoc ''
+                Pick your time zone from the column TZ of Wikipedia’s [list of tz database time zones](https://en.wikipedia.org/wiki/List_of_tz_database_time_zones).
+              '';
+            };
+          };
+
+          http = {
+            # https://www.home-assistant.io/integrations/http/
+            server_host = mkOption {
+              type = types.either types.str (types.listOf types.str);
+              default = [
+                "0.0.0.0"
+                "::"
+              ];
+              example = "::1";
+              description = lib.mdDoc ''
+                Only listen to incoming requests on specific IP/host. The default listed assumes support for IPv4 and IPv6.
+              '';
+            };
+
+            server_port = mkOption {
+              default = 8123;
+              type = types.port;
+              description = lib.mdDoc ''
+                The port on which to listen.
+              '';
+            };
+          };
+
+          lovelace = {
+            # https://www.home-assistant.io/lovelace/dashboards/
+            mode = mkOption {
+              type = types.enum [ "yaml" "storage" ];
+              default = if cfg.lovelaceConfig != null
+                then "yaml"
+                else "storage";
+              defaultText = literalExpression ''
+                if cfg.lovelaceConfig != null
+                  then "yaml"
+                else "storage";
+              '';
+              example = "yaml";
+              description = lib.mdDoc ''
+                In what mode should the main Lovelace panel be, `yaml` or `storage` (UI managed).
+              '';
+            };
+          };
+        };
+      });
+      example = literalExpression ''
+        {
+          homeassistant = {
+            name = "Home";
+            latitude = "!secret latitude";
+            longitude = "!secret longitude";
+            elevation = "!secret elevation";
+            unit_system = "metric";
+            time_zone = "UTC";
+          };
+          frontend = {
+            themes = "!include_dir_merge_named themes";
+          };
+          http = {};
+          feedreader.urls = [ "https://nixos.org/blogs.xml" ];
+        }
+      '';
+      description = lib.mdDoc ''
+        Your {file}`configuration.yaml` as a Nix attribute set.
+
+        YAML functions like [secrets](https://www.home-assistant.io/docs/configuration/secrets/)
+        can be passed as a string and will be unquoted automatically.
+
+        Unless this option is explicitly set to `null`
+        we assume your {file}`configuration.yaml` is
+        managed through this module and thereby overwritten on startup.
+      '';
+    };
+
+    configWritable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to make {file}`configuration.yaml` writable.
+
+        This will allow you to edit it from Home Assistant's web interface.
+
+        This only has an effect if {option}`config` is set.
+        However, bear in mind that it will be overwritten at every start of the service.
+      '';
+    };
+
+    lovelaceConfig = mkOption {
+      default = null;
+      type = types.nullOr format.type;
+      # from https://www.home-assistant.io/lovelace/dashboards/
+      example = literalExpression ''
+        {
+          title = "My Awesome Home";
+          views = [ {
+            title = "Example";
+            cards = [ {
+              type = "markdown";
+              title = "Lovelace";
+              content = "Welcome to your **Lovelace UI**.";
+            } ];
+          } ];
+        }
+      '';
+      description = lib.mdDoc ''
+        Your {file}`ui-lovelace.yaml` as a Nix attribute set.
+        Setting this option will automatically set `lovelace.mode` to `yaml`.
+
+        Beware that setting this option will delete your previous {file}`ui-lovelace.yaml`
+      '';
+    };
+
+    lovelaceConfigWritable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to make {file}`ui-lovelace.yaml` writable.
+
+        This will allow you to edit it from Home Assistant's web interface.
+
+        This only has an effect if {option}`lovelaceConfig` is set.
+        However, bear in mind that it will be overwritten at every start of the service.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.home-assistant.overrideAttrs (oldAttrs: {
+        doInstallCheck = false;
+      });
+      defaultText = literalExpression ''
+        pkgs.home-assistant.overrideAttrs (oldAttrs: {
+          doInstallCheck = false;
+        })
+      '';
+      type = types.package;
+      example = literalExpression ''
+        pkgs.home-assistant.override {
+          extraPackages = python3Packages: with python3Packages; [
+            psycopg2
+          ];
+          extraComponents = [
+            "default_config"
+            "esphome"
+            "met"
+          ];
+        }
+      '';
+      description = lib.mdDoc ''
+        The Home Assistant package to use.
+      '';
+    };
+
+    openFirewall = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc "Whether to open the firewall for the specified port.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.openFirewall -> cfg.config != null;
+        message = "openFirewall can only be used with a declarative config";
+      }
+    ];
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.config.http.server_port ];
+
+    # symlink the configuration to /etc/home-assistant
+    environment.etc = lib.mkMerge [
+      (lib.mkIf (cfg.config != null && !cfg.configWritable) {
+        "home-assistant/configuration.yaml".source = configFile;
+      })
+
+      (lib.mkIf (cfg.lovelaceConfig != null && !cfg.lovelaceConfigWritable) {
+        "home-assistant/ui-lovelace.yaml".source = lovelaceConfigFile;
+      })
+    ];
+
+    systemd.services.home-assistant = {
+      description = "Home Assistant";
+      after = [
+        "network-online.target"
+
+        # prevent races with database creation
+        "mysql.service"
+        "postgresql.service"
+      ];
+      reloadTriggers = lib.optional (cfg.config != null) configFile
+      ++ lib.optional (cfg.lovelaceConfig != null) lovelaceConfigFile;
+
+      preStart = let
+        copyConfig = if cfg.configWritable then ''
+          cp --no-preserve=mode ${configFile} "${cfg.configDir}/configuration.yaml"
+        '' else ''
+          rm -f "${cfg.configDir}/configuration.yaml"
+          ln -s /etc/home-assistant/configuration.yaml "${cfg.configDir}/configuration.yaml"
+        '';
+        copyLovelaceConfig = if cfg.lovelaceConfigWritable then ''
+          cp --no-preserve=mode ${lovelaceConfigFile} "${cfg.configDir}/ui-lovelace.yaml"
+        '' else ''
+          rm -f "${cfg.configDir}/ui-lovelace.yaml"
+          ln -s /etc/home-assistant/ui-lovelace.yaml "${cfg.configDir}/ui-lovelace.yaml"
+        '';
+        copyCustomLovelaceModules = if cfg.customLovelaceModules != [] then ''
+          mkdir -p "${cfg.configDir}/www"
+          ln -fns ${customLovelaceModulesDir} "${cfg.configDir}/www/nixos-lovelace-modules"
+        '' else ''
+          rm -f "${cfg.configDir}/www/nixos-lovelace-modules"
+        '';
+        copyCustomComponents = ''
+          mkdir -p "${cfg.configDir}/custom_components"
+
+          # remove components symlinked in from below the /nix/store
+          components="$(find "${cfg.configDir}/custom_components" -maxdepth 1 -type l)"
+          for component in "$components"; do
+            if [[ "$(readlink "$component")" =~ ^${escapeShellArg builtins.storeDir} ]]; then
+              rm "$component"
+            fi
+          done
+
+          # recreate symlinks for desired components
+          declare -a components=(${escapeShellArgs cfg.customComponents})
+          for component in "''${components[@]}"; do
+            path="$(dirname $(find "$component" -name "manifest.json"))"
+            ln -fns "$path" "${cfg.configDir}/custom_components/"
+          done
+        '';
+      in
+        (optionalString (cfg.config != null) copyConfig) +
+        (optionalString (cfg.lovelaceConfig != null) copyLovelaceConfig) +
+        copyCustomLovelaceModules +
+        copyCustomComponents
+      ;
+      environment.PYTHONPATH = package.pythonPath;
+      serviceConfig = let
+        # List of capabilities to equip home-assistant with, depending on configured components
+        capabilities = lib.unique ([
+          # Empty string first, so we will never accidentally have an empty capability bounding set
+          # https://github.com/NixOS/nixpkgs/issues/120617#issuecomment-830685115
+          ""
+        ] ++ lib.optionals (builtins.any useComponent componentsUsingBluetooth) [
+          # Required for interaction with hci devices and bluetooth sockets, identified by bluetooth-adapters dependency
+          # https://www.home-assistant.io/integrations/bluetooth_le_tracker/#rootless-setup-on-core-installs
+          "CAP_NET_ADMIN"
+          "CAP_NET_RAW"
+        ] ++ lib.optionals (useComponent "emulated_hue") [
+          # Alexa looks for the service on port 80
+          # https://www.home-assistant.io/integrations/emulated_hue
+          "CAP_NET_BIND_SERVICE"
+        ] ++ lib.optionals (useComponent "nmap_tracker") [
+          # https://www.home-assistant.io/integrations/nmap_tracker#linux-capabilities
+          "CAP_NET_ADMIN"
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW"
+        ]);
+        componentsUsingBluetooth = [
+          # Components that require the AF_BLUETOOTH address family
+          "august"
+          "august_ble"
+          "airthings_ble"
+          "aranet"
+          "bluemaestro"
+          "bluetooth"
+          "bluetooth_adapters"
+          "bluetooth_le_tracker"
+          "bluetooth_tracker"
+          "bthome"
+          "default_config"
+          "eq3btsmart"
+          "eufylife_ble"
+          "esphome"
+          "fjaraskupan"
+          "gardena_bluetooth"
+          "govee_ble"
+          "homekit_controller"
+          "inkbird"
+          "improv_ble"
+          "keymitt_ble"
+          "led_ble"
+          "medcom_ble"
+          "melnor"
+          "moat"
+          "mopeka"
+          "oralb"
+          "private_ble_device"
+          "qingping"
+          "rapt_ble"
+          "ruuvi_gateway"
+          "ruuvitag_ble"
+          "sensirion_ble"
+          "sensorpro"
+          "sensorpush"
+          "shelly"
+          "snooz"
+          "switchbot"
+          "thermobeacon"
+          "thermopro"
+          "tilt_ble"
+          "xiaomi_ble"
+          "yalexs_ble"
+        ];
+        componentsUsingPing = [
+          # Components that require the capset syscall for the ping wrapper
+          "ping"
+          "wake_on_lan"
+        ];
+        componentsUsingSerialDevices = [
+          # Components that require access to serial devices (/dev/tty*)
+          # List generated from home-assistant documentation:
+          #   git clone https://github.com/home-assistant/home-assistant.io/
+          #   cd source/_integrations
+          #   rg "/dev/tty" -l | cut -d'/' -f3 | cut -d'.' -f1 | sort
+          # And then extended by references found in the source code, these
+          # mostly the ones using config flows already.
+          "acer_projector"
+          "alarmdecoder"
+          "blackbird"
+          "deconz"
+          "dsmr"
+          "edl21"
+          "elkm1"
+          "elv"
+          "enocean"
+          "firmata"
+          "flexit"
+          "gpsd"
+          "insteon"
+          "kwb"
+          "lacrosse"
+          "modbus"
+          "modem_callerid"
+          "mysensors"
+          "nad"
+          "numato"
+          "otbr"
+          "rflink"
+          "rfxtrx"
+          "scsgate"
+          "serial"
+          "serial_pm"
+          "sms"
+          "upb"
+          "usb"
+          "velbus"
+          "w800rf32"
+          "zha"
+          "zwave"
+          "zwave_js"
+        ];
+      in {
+        ExecStart = "${package}/bin/hass --config '${cfg.configDir}'";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = "hass";
+        Group = "hass";
+        Restart = "on-failure";
+        RestartForceExitStatus = "100";
+        SuccessExitStatus = "100";
+        KillSignal = "SIGINT";
+
+        # Hardening
+        AmbientCapabilities = capabilities;
+        CapabilityBoundingSet = capabilities;
+        DeviceAllow = (optionals (any useComponent componentsUsingSerialDevices) [
+          "char-ttyACM rw"
+          "char-ttyAMA rw"
+          "char-ttyUSB rw"
+        ]);
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateUsers = false; # prevents gaining capabilities in the host namespace
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "all";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        ReadWritePaths = let
+          # Allow rw access to explicitly configured paths
+          cfgPath = [ "config" "homeassistant" "allowlist_external_dirs" ];
+          value = attrByPath cfgPath [] cfg;
+          allowPaths = if isList value then value else singleton value;
+        in [ "${cfg.configDir}" ] ++ allowPaths;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+          "AF_UNIX"
+        ] ++ optionals (any useComponent componentsUsingBluetooth) [
+          "AF_BLUETOOTH"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SupplementaryGroups = optionals (any useComponent componentsUsingSerialDevices) [
+          "dialout"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ] ++ optionals (any useComponent componentsUsingPing) [
+          "capset"
+          "setuid"
+        ];
+        UMask = "0077";
+      };
+      path = [
+        pkgs.unixtools.ping # needed for ping
+      ];
+    };
+
+    systemd.targets.home-assistant = rec {
+      description = "Home Assistant";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "home-assistant.service" ];
+      after = wants;
+    };
+
+    users.users.hass = {
+      home = cfg.configDir;
+      createHome = true;
+      group = "hass";
+      uid = config.ids.uids.hass;
+    };
+
+    users.groups.hass.gid = config.ids.gids.hass;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/homeassistant-satellite.nix b/nixpkgs/nixos/modules/services/home-automation/homeassistant-satellite.nix
new file mode 100644
index 000000000000..e3f0617cf01c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/homeassistant-satellite.nix
@@ -0,0 +1,225 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.homeassistant-satellite;
+
+  inherit (lib)
+    escapeShellArg
+    escapeShellArgs
+    mkOption
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkPackageOptionMD
+    types
+    ;
+
+  inherit (builtins)
+    toString
+    ;
+
+  # override the package with the relevant vad dependencies
+  package = cfg.package.overridePythonAttrs (oldAttrs: {
+    propagatedBuildInputs = oldAttrs.propagatedBuildInputs
+      ++ lib.optional (cfg.vad == "webrtcvad") cfg.package.optional-dependencies.webrtc
+      ++ lib.optional (cfg.vad == "silero") cfg.package.optional-dependencies.silerovad
+      ++ lib.optional (cfg.pulseaudio.enable) cfg.package.optional-dependencies.pulseaudio;
+  });
+
+in
+
+{
+  meta.buildDocsInSandbox = false;
+
+  options.services.homeassistant-satellite = with types; {
+    enable = mkEnableOption (mdDoc "Home Assistant Satellite");
+
+    package = mkPackageOptionMD pkgs "homeassistant-satellite" { };
+
+    user = mkOption {
+      type = str;
+      example = "alice";
+      description = mdDoc ''
+        User to run homeassistant-satellite under.
+      '';
+    };
+
+    group = mkOption {
+      type = str;
+      default = "users";
+      description = mdDoc ''
+        Group to run homeassistant-satellite under.
+      '';
+    };
+
+    host = mkOption {
+      type = str;
+      example = "home-assistant.local";
+      description = mdDoc ''
+        Hostname on which your Home Assistant instance can be reached.
+      '';
+    };
+
+    port = mkOption {
+      type = port;
+      example = 8123;
+      description = mdDoc ''
+        Port on which your Home Assistance can be reached.
+      '';
+      apply = toString;
+    };
+
+    protocol = mkOption {
+      type = enum [ "http" "https" ];
+      default = "http";
+      example = "https";
+      description = mdDoc ''
+        The transport protocol used to connect to Home Assistant.
+      '';
+    };
+
+    tokenFile = mkOption {
+      type = path;
+      example = "/run/keys/hass-token";
+      description = mdDoc ''
+        Path to a file containing a long-lived access token for your Home Assistant instance.
+      '';
+      apply = escapeShellArg;
+    };
+
+    sounds = {
+      awake = mkOption {
+        type = nullOr str;
+        default = null;
+        description = mdDoc ''
+          Audio file to play when the wake word is detected.
+        '';
+      };
+
+      done = mkOption {
+        type = nullOr str;
+        default = null;
+        description = mdDoc ''
+          Audio file to play when the voice command is done.
+        '';
+      };
+    };
+
+    vad = mkOption {
+      type = enum [ "disabled" "webrtcvad" "silero" ];
+      default = "disabled";
+      example = "silero";
+      description = mdDoc ''
+        Voice activity detection model. With `disabled` sound will be transmitted continously.
+      '';
+    };
+
+    pulseaudio = {
+      enable = mkEnableOption "recording/playback via PulseAudio or PipeWire";
+
+      socket = mkOption {
+        type = nullOr str;
+        default = null;
+        example = "/run/user/1000/pulse/native";
+        description = mdDoc ''
+          Path or hostname to connect with the PulseAudio server.
+        '';
+      };
+
+      duckingVolume = mkOption {
+        type = nullOr float;
+        default = null;
+        example = 0.4;
+        description = mdDoc ''
+          Reduce output volume (between 0 and 1) to this percentage value while recording.
+        '';
+      };
+
+      echoCancellation = mkEnableOption "acoustic echo cancellation";
+    };
+
+    extraArgs = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = mdDoc ''
+        Extra arguments to pass to the commandline.
+      '';
+      apply = escapeShellArgs;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services."homeassistant-satellite" = {
+      description = "Home Assistant Satellite";
+      after = [
+        "network-online.target"
+      ];
+      wants = [
+        "network-online.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      path = with pkgs; [
+        ffmpeg-headless
+      ] ++ lib.optionals (!cfg.pulseaudio.enable) [
+        alsa-utils
+      ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        # https://github.com/rhasspy/hassio-addons/blob/master/assist_microphone/rootfs/etc/s6-overlay/s6-rc.d/assist_microphone/run
+        ExecStart = ''
+          ${package}/bin/homeassistant-satellite \
+            --host ${cfg.host} \
+            --port ${cfg.port} \
+            --protocol ${cfg.protocol} \
+            --token-file ${cfg.tokenFile} \
+            --vad ${cfg.vad} \
+            ${lib.optionalString cfg.pulseaudio.enable "--pulseaudio"}${lib.optionalString (cfg.pulseaudio.socket != null) "=${cfg.pulseaudio.socket}"} \
+            ${lib.optionalString (cfg.pulseaudio.enable && cfg.pulseaudio.duckingVolume != null) "--ducking-volume=${toString cfg.pulseaudio.duckingVolume}"} \
+            ${lib.optionalString (cfg.pulseaudio.enable && cfg.pulseaudio.echoCancellation) "--echo-cancel"} \
+            ${lib.optionalString (cfg.sounds.awake != null) "--awake-sound=${toString cfg.sounds.awake}"} \
+            ${lib.optionalString (cfg.sounds.done != null) "--done-sound=${toString cfg.sounds.done}"} \
+            ${cfg.extraArgs}
+        '';
+        CapabilityBoundingSet = "";
+        DeviceAllow = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = false; # onnxruntime/capi/onnxruntime_pybind11_state.so: cannot enable executable stack as shared object requires: Operation not permitted
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHome = false; # Would deny access to local pulse/pipewire server
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectProc = "invisible";
+        ProcSubset = "all"; # Error in cpuinfo: failed to parse processor information from /proc/cpuinfo
+        Restart = "always";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SupplementaryGroups = [
+          "audio"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/zigbee2mqtt.nix b/nixpkgs/nixos/modules/services/home-automation/zigbee2mqtt.nix
new file mode 100644
index 000000000000..6b5bd8a0d9bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/zigbee2mqtt.nix
@@ -0,0 +1,142 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.zigbee2mqtt;
+
+  format = pkgs.formats.yaml { };
+  configFile = format.generate "zigbee2mqtt.yaml" cfg.settings;
+
+in
+{
+  meta.maintainers = with maintainers; [ sweber hexa ];
+
+  imports = [
+    # Remove warning before the 21.11 release
+    (mkRenamedOptionModule [ "services" "zigbee2mqtt" "config" ] [ "services" "zigbee2mqtt" "settings" ])
+  ];
+
+  options.services.zigbee2mqtt = {
+    enable = mkEnableOption (lib.mdDoc "zigbee2mqtt service");
+
+    package = mkOption {
+      description = lib.mdDoc "Zigbee2mqtt package to use";
+      default = pkgs.zigbee2mqtt;
+      defaultText = literalExpression ''
+        pkgs.zigbee2mqtt
+      '';
+      type = types.package;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "Zigbee2mqtt data directory";
+      default = "/var/lib/zigbee2mqtt";
+      type = types.path;
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      example = literalExpression ''
+        {
+          homeassistant = config.services.home-assistant.enable;
+          permit_join = true;
+          serial = {
+            port = "/dev/ttyACM1";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Your {file}`configuration.yaml` as a Nix attribute set.
+        Check the [documentation](https://www.zigbee2mqtt.io/information/configuration.html)
+        for possible options.
+      '';
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+
+    # preset config values
+    services.zigbee2mqtt.settings = {
+      homeassistant = mkDefault config.services.home-assistant.enable;
+      permit_join = mkDefault false;
+      mqtt = {
+        base_topic = mkDefault "zigbee2mqtt";
+        server = mkDefault "mqtt://localhost:1883";
+      };
+      serial.port = mkDefault "/dev/ttyACM0";
+      # reference device/group configuration, that is kept in a separate file
+      # to prevent it being overwritten in the units ExecStartPre script
+      devices = mkDefault "devices.yaml";
+      groups = mkDefault "groups.yaml";
+    };
+
+    systemd.services.zigbee2mqtt = {
+      description = "Zigbee2mqtt Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment.ZIGBEE2MQTT_DATA = cfg.dataDir;
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/zigbee2mqtt";
+        User = "zigbee2mqtt";
+        Group = "zigbee2mqtt";
+        WorkingDirectory = cfg.dataDir;
+        Restart = "on-failure";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        DeviceAllow = [
+          config.services.zigbee2mqtt.settings.serial.port
+        ];
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = false;
+        NoNewPrivileges = true;
+        PrivateDevices = false; # prevents access to /dev/serial, because it is set 0700 root:root
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        ReadWritePaths = cfg.dataDir;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SupplementaryGroups = [
+          "dialout"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service @pkey"
+          "~@privileged @resources"
+        ];
+        UMask = "0077";
+      };
+      preStart = ''
+        cp --no-preserve=mode ${configFile} "${cfg.dataDir}/configuration.yaml"
+      '';
+    };
+
+    users.users.zigbee2mqtt = {
+      home = cfg.dataDir;
+      createHome = true;
+      group = "zigbee2mqtt";
+      uid = config.ids.uids.zigbee2mqtt;
+    };
+
+    users.groups.zigbee2mqtt.gid = config.ids.gids.zigbee2mqtt;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/home-automation/zwave-js.nix b/nixpkgs/nixos/modules/services/home-automation/zwave-js.nix
new file mode 100644
index 000000000000..87c9b8f1ac81
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/home-automation/zwave-js.nix
@@ -0,0 +1,152 @@
+{config, pkgs, lib, ...}:
+
+with lib;
+
+let
+  cfg = config.services.zwave-js;
+  mergedConfigFile = "/run/zwave-js/config.json";
+  settingsFormat = pkgs.formats.json {};
+in {
+  options.services.zwave-js = {
+    enable = mkEnableOption (mdDoc "the zwave-js server on boot");
+
+    package = mkPackageOptionMD pkgs "zwave-js-server" { };
+
+    port = mkOption {
+      type = types.port;
+      default = 3000;
+      description = mdDoc ''
+        Port for the server to listen on.
+      '';
+    };
+
+    serialPort = mkOption {
+      type = types.path;
+      description = mdDoc ''
+        Serial port device path for Z-Wave controller.
+      '';
+      example = "/dev/ttyUSB0";
+    };
+
+    secretsConfigFile = mkOption {
+      type = types.path;
+      description = mdDoc ''
+        JSON file containing secret keys. A dummy example:
+
+        ```
+        {
+          "securityKeys": {
+            "S0_Legacy": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
+            "S2_Unauthenticated": "BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB",
+            "S2_Authenticated": "CCCCCCCCCCCCCCCCCCCCCCCCCCCCCCCC",
+            "S2_AccessControl": "DDDDDDDDDDDDDDDDDDDDDDDDDDDDDDDD"
+          }
+        }
+        ```
+
+        See
+        <https://zwave-js.github.io/node-zwave-js/#/getting-started/security-s2>
+        for details. This file will be merged with the module-generated config
+        file (taking precedence).
+
+        Z-Wave keys can be generated with:
+
+          {command}`< /dev/urandom tr -dc A-F0-9 | head -c32 ;echo`
+
+
+        ::: {.warning}
+        A file in the nix store should not be used since it will be readable to
+        all users.
+        :::
+      '';
+      example = "/secrets/zwave-js-keys.json";
+    };
+
+    settings = mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          storage = {
+            cacheDir = mkOption {
+              type = types.path;
+              default = "/var/cache/zwave-js";
+              readOnly = true;
+              description = lib.mdDoc "Cache directory";
+            };
+          };
+        };
+      };
+      default = {};
+      description = mdDoc ''
+        Configuration settings for the generated config
+        file.
+      '';
+    };
+
+    extraFlags = lib.mkOption {
+      type = with lib.types; listOf str;
+      default = [ ];
+      example = [ "--mock-driver" ];
+      description = lib.mdDoc ''
+        Extra flags to pass to command
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.zwave-js = let
+      configFile = settingsFormat.generate "zwave-js-config.json" cfg.settings;
+    in {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "Z-Wave JS Server";
+      serviceConfig = {
+        ExecStartPre = ''
+          /bin/sh -c "${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${configFile} ${cfg.secretsConfigFile} > ${mergedConfigFile}"
+        '';
+        ExecStart = lib.concatStringsSep " " [
+          "${cfg.package}/bin/zwave-server"
+          "--config ${mergedConfigFile}"
+          "--port ${toString cfg.port}"
+          cfg.serialPort
+          (escapeShellArgs cfg.extraFlags)
+        ];
+        Restart = "on-failure";
+        User = "zwave-js";
+        SupplementaryGroups = [ "dialout" ];
+        CacheDirectory = "zwave-js";
+        RuntimeDirectory = "zwave-js";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        DeviceAllow = [cfg.serialPort];
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = false;
+        NoNewPrivileges = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service @pkey"
+          "~@privileged @resources"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ graham33 ];
+}
diff --git a/nixpkgs/nixos/modules/services/logging/SystemdJournal2Gelf.nix b/nixpkgs/nixos/modules/services/logging/SystemdJournal2Gelf.nix
new file mode 100644
index 000000000000..3d85c2b62c63
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/SystemdJournal2Gelf.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.SystemdJournal2Gelf;
+in
+
+{ options = {
+    services.SystemdJournal2Gelf = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable SystemdJournal2Gelf.
+        '';
+      };
+
+      graylogServer = mkOption {
+        type = types.str;
+        example = "graylog2.example.com:11201";
+        description = lib.mdDoc ''
+          Host and port of your graylog2 input. This should be a GELF
+          UDP input.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description = lib.mdDoc ''
+          Any extra flags to pass to SystemdJournal2Gelf. Note that
+          these are basically `journalctl` flags.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.systemd-journal2gelf;
+        defaultText = literalExpression "pkgs.systemd-journal2gelf";
+        description = lib.mdDoc ''
+          SystemdJournal2Gelf package to use.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.SystemdJournal2Gelf = {
+      description = "SystemdJournal2Gelf";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/SystemdJournal2Gelf ${cfg.graylogServer} --follow ${cfg.extraOptions}";
+        Restart = "on-failure";
+        RestartSec = "30";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/awstats.nix b/nixpkgs/nixos/modules/services/logging/awstats.nix
new file mode 100644
index 000000000000..708775bfcf03
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/awstats.nix
@@ -0,0 +1,255 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.awstats;
+  package = pkgs.awstats;
+  configOpts = {name, config, ...}: {
+    options = {
+      type = mkOption{
+        type = types.enum [ "mail" "web" ];
+        default = "web";
+        example = "mail";
+        description = lib.mdDoc ''
+          The type of log being collected.
+        '';
+      };
+      domain = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "The domain name to collect stats for.";
+        example = "example.com";
+      };
+
+      logFile = mkOption {
+        type = types.str;
+        example = "/var/log/nginx/access.log";
+        description = lib.mdDoc ''
+          The log file to be scanned.
+
+          For mail, set this to
+          ```
+          journalctl $OLD_CURSOR -u postfix.service | ''${pkgs.perl}/bin/perl ''${pkgs.awstats.out}/share/awstats/tools/maillogconvert.pl standard |
+          ```
+        '';
+      };
+
+      logFormat = mkOption {
+        type = types.str;
+        default = "1";
+        description = lib.mdDoc ''
+          The log format being used.
+
+          For mail, set this to
+          ```
+          %time2 %email %email_r %host %host_r %method %url %code %bytesd
+          ```
+        '';
+      };
+
+      hostAliases = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "www.example.org" ];
+        description = lib.mdDoc ''
+          List of aliases the site has.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = literalExpression ''
+          {
+            "ValidHTTPCodes" = "404";
+          }
+        '';
+        description = lib.mdDoc "Extra configuration to be appended to awstats.\${name}.conf.";
+      };
+
+      webService = {
+        enable = mkEnableOption (lib.mdDoc "awstats web service");
+
+        hostname = mkOption {
+          type = types.str;
+          default = config.domain;
+          description = lib.mdDoc "The hostname the web service appears under.";
+        };
+
+        urlPrefix = mkOption {
+          type = types.str;
+          default = "/awstats";
+          description = lib.mdDoc "The URL prefix under which the awstats pages appear.";
+        };
+      };
+    };
+  };
+  webServices = filterAttrs (name: value: value.webService.enable) cfg.configs;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "awstats" "service" "enable" ] "Please enable per domain with `services.awstats.configs.<name>.webService.enable`")
+    (mkRemovedOptionModule [ "services" "awstats" "service" "urlPrefix" ] "Please set per domain with `services.awstats.configs.<name>.webService.urlPrefix`")
+    (mkRenamedOptionModule [ "services" "awstats" "vardir" ] [ "services" "awstats" "dataDir" ])
+  ];
+
+  options.services.awstats = {
+    enable = mkEnableOption (lib.mdDoc "awstats");
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/awstats";
+      description = lib.mdDoc "The directory where awstats data will be stored.";
+    };
+
+    configs = mkOption {
+      type = types.attrsOf (types.submodule configOpts);
+      default = {};
+      example = literalExpression ''
+        {
+          "mysite" = {
+            domain = "example.com";
+            logFile = "/var/log/nginx/access.log";
+          };
+        }
+      '';
+      description = lib.mdDoc "Attribute set of domains to collect stats for.";
+    };
+
+    updateAt = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "hourly";
+      description = lib.mdDoc ''
+        Specification of the time at which awstats will get updated.
+        (in the format described by {manpage}`systemd.time(7)`)
+      '';
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ package.bin ];
+
+    environment.etc = mapAttrs' (name: opts:
+    nameValuePair "awstats/awstats.${name}.conf" {
+      source = pkgs.runCommand "awstats.${name}.conf"
+      { preferLocalBuild = true; }
+      (''
+        sed \
+      ''
+      # set up mail stats
+      + optionalString (opts.type == "mail")
+      ''
+        -e 's|^\(LogType\)=.*$|\1=M|' \
+        -e 's|^\(LevelForBrowsersDetection\)=.*$|\1=0|' \
+        -e 's|^\(LevelForOSDetection\)=.*$|\1=0|' \
+        -e 's|^\(LevelForRefererAnalyze\)=.*$|\1=0|' \
+        -e 's|^\(LevelForRobotsDetection\)=.*$|\1=0|' \
+        -e 's|^\(LevelForSearchEnginesDetection\)=.*$|\1=0|' \
+        -e 's|^\(LevelForFileTypesDetection\)=.*$|\1=0|' \
+        -e 's|^\(LevelForWormsDetection\)=.*$|\1=0|' \
+        -e 's|^\(ShowMenu\)=.*$|\1=1|' \
+        -e 's|^\(ShowSummary\)=.*$|\1=HB|' \
+        -e 's|^\(ShowMonthStats\)=.*$|\1=HB|' \
+        -e 's|^\(ShowDaysOfMonthStats\)=.*$|\1=HB|' \
+        -e 's|^\(ShowDaysOfWeekStats\)=.*$|\1=HB|' \
+        -e 's|^\(ShowHoursStats\)=.*$|\1=HB|' \
+        -e 's|^\(ShowDomainsStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowHostsStats\)=.*$|\1=HB|' \
+        -e 's|^\(ShowAuthenticatedUsers\)=.*$|\1=0|' \
+        -e 's|^\(ShowRobotsStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowEMailSenders\)=.*$|\1=HBML|' \
+        -e 's|^\(ShowEMailReceivers\)=.*$|\1=HBML|' \
+        -e 's|^\(ShowSessionsStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowPagesStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowFileTypesStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowFileSizesStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowBrowsersStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowOSStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowOriginStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowKeyphrasesStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowKeywordsStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowMiscStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowHTTPErrorsStats\)=.*$|\1=0|' \
+        -e 's|^\(ShowSMTPErrorsStats\)=.*$|\1=1|' \
+      ''
+      +
+      # common options
+      ''
+        -e 's|^\(DirData\)=.*$|\1="${cfg.dataDir}/${name}"|' \
+        -e 's|^\(DirIcons\)=.*$|\1="icons"|' \
+        -e 's|^\(CreateDirDataIfNotExists\)=.*$|\1=1|' \
+        -e 's|^\(SiteDomain\)=.*$|\1="${name}"|' \
+        -e 's|^\(LogFile\)=.*$|\1="${opts.logFile}"|' \
+        -e 's|^\(LogFormat\)=.*$|\1="${opts.logFormat}"|' \
+      ''
+      +
+      # extra config
+      concatStringsSep "\n" (mapAttrsToList (n: v: ''
+        -e 's|^\(${n}\)=.*$|\1="${v}"|' \
+      '') opts.extraConfig)
+      +
+      ''
+        < '${package.out}/wwwroot/cgi-bin/awstats.model.conf' > "$out"
+      '');
+    }) cfg.configs;
+
+    # create data directory with the correct permissions
+    systemd.tmpfiles.rules =
+      [ "d '${cfg.dataDir}' 755 root root - -" ] ++
+      mapAttrsToList (name: opts: "d '${cfg.dataDir}/${name}' 755 root root - -") cfg.configs ++
+      [ "Z '${cfg.dataDir}' 755 root root - -" ];
+
+    # nginx options
+    services.nginx.virtualHosts = mapAttrs'(name: opts: {
+      name = opts.webService.hostname;
+      value = {
+        locations = {
+          "${opts.webService.urlPrefix}/css/" = {
+            alias = "${package.out}/wwwroot/css/";
+          };
+          "${opts.webService.urlPrefix}/icons/" = {
+            alias = "${package.out}/wwwroot/icon/";
+          };
+          "${opts.webService.urlPrefix}/" = {
+            alias = "${cfg.dataDir}/${name}/";
+            extraConfig = ''
+              autoindex on;
+            '';
+          };
+        };
+      };
+    }) webServices;
+
+    # update awstats
+    systemd.services = mkIf (cfg.updateAt != null) (mapAttrs' (name: opts:
+      nameValuePair "awstats-${name}-update" {
+        description = "update awstats for ${name}";
+        script = optionalString (opts.type == "mail")
+        ''
+          if [[ -f "${cfg.dataDir}/${name}-cursor" ]]; then
+            CURSOR="$(cat "${cfg.dataDir}/${name}-cursor" | tr -d '\n')"
+            if [[ -n "$CURSOR" ]]; then
+              echo "Using cursor: $CURSOR"
+              export OLD_CURSOR="--cursor $CURSOR"
+            fi
+          fi
+          NEW_CURSOR="$(journalctl $OLD_CURSOR -u postfix.service --show-cursor | tail -n 1 | tr -d '\n' | sed -e 's#^-- cursor: \(.*\)#\1#')"
+          echo "New cursor: $NEW_CURSOR"
+          ${package.bin}/bin/awstats -update -config=${name}
+          if [ -n "$NEW_CURSOR" ]; then
+            echo -n "$NEW_CURSOR" > ${cfg.dataDir}/${name}-cursor
+          fi
+        '' + ''
+          ${package.out}/share/awstats/tools/awstats_buildstaticpages.pl \
+            -config=${name} -update -dir=${cfg.dataDir}/${name} \
+            -awstatsprog=${package.bin}/bin/awstats
+        '';
+        startAt = cfg.updateAt;
+    }) cfg.configs);
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/logging/filebeat.nix b/nixpkgs/nixos/modules/services/logging/filebeat.nix
new file mode 100644
index 000000000000..5b5e7fd5ae89
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/filebeat.nix
@@ -0,0 +1,252 @@
+{ config, lib, utils, pkgs, ... }:
+
+let
+  inherit (lib)
+    attrValues
+    literalExpression
+    mkEnableOption
+    mkIf
+    mkOption
+    types;
+
+  cfg = config.services.filebeat;
+
+  json = pkgs.formats.json {};
+in
+{
+  options = {
+
+    services.filebeat = {
+
+      enable = mkEnableOption (lib.mdDoc "filebeat");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.filebeat;
+        defaultText = literalExpression "pkgs.filebeat";
+        example = literalExpression "pkgs.filebeat7";
+        description = lib.mdDoc ''
+          The filebeat package to use.
+        '';
+      };
+
+      inputs = mkOption {
+        description = lib.mdDoc ''
+          Inputs specify how Filebeat locates and processes input data.
+
+          This is like `services.filebeat.settings.filebeat.inputs`,
+          but structured as an attribute set. This has the benefit
+          that multiple NixOS modules can contribute settings to a
+          single filebeat input.
+
+          An input type can be specified multiple times by choosing a
+          different `<name>` for each, but setting
+          [](#opt-services.filebeat.inputs._name_.type)
+          to the same value.
+
+          See <https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-options.html>.
+        '';
+        default = {};
+        type = types.attrsOf (types.submodule ({ name, ... }: {
+          freeformType = json.type;
+          options = {
+            type = mkOption {
+              type = types.str;
+              default = name;
+              description = lib.mdDoc ''
+                The input type.
+
+                Look for the value after `type:` on
+                the individual input pages linked from
+                <https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-options.html>.
+              '';
+            };
+          };
+        }));
+        example = literalExpression ''
+          {
+            journald.id = "everything";  # Only for filebeat7
+            log = {
+              enabled = true;
+              paths = [
+                "/var/log/*.log"
+              ];
+            };
+          };
+        '';
+      };
+
+      modules = mkOption {
+        description = lib.mdDoc ''
+          Filebeat modules provide a quick way to get started
+          processing common log formats. They contain default
+          configurations, Elasticsearch ingest pipeline definitions,
+          and Kibana dashboards to help you implement and deploy a log
+          monitoring solution.
+
+          This is like `services.filebeat.settings.filebeat.modules`,
+          but structured as an attribute set. This has the benefit
+          that multiple NixOS modules can contribute settings to a
+          single filebeat module.
+
+          A module can be specified multiple times by choosing a
+          different `<name>` for each, but setting
+          [](#opt-services.filebeat.modules._name_.module)
+          to the same value.
+
+          See <https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html>.
+        '';
+        default = {};
+        type = types.attrsOf (types.submodule ({ name, ... }: {
+          freeformType = json.type;
+          options = {
+            module = mkOption {
+              type = types.str;
+              default = name;
+              description = lib.mdDoc ''
+                The name of the module.
+
+                Look for the value after `module:` on
+                the individual input pages linked from
+                <https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html>.
+              '';
+            };
+          };
+        }));
+        example = literalExpression ''
+          {
+            nginx = {
+              access = {
+                enabled = true;
+                var.paths = [ "/path/to/log/nginx/access.log*" ];
+              };
+              error = {
+                enabled = true;
+                var.paths = [ "/path/to/log/nginx/error.log*" ];
+              };
+            };
+          };
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = json.type;
+
+          options = {
+
+            output.elasticsearch.hosts = mkOption {
+              type = with types; listOf str;
+              default = [ "127.0.0.1:9200" ];
+              example = [ "myEShost:9200" ];
+              description = lib.mdDoc ''
+                The list of Elasticsearch nodes to connect to.
+
+                The events are distributed to these nodes in round
+                robin order. If one node becomes unreachable, the
+                event is automatically sent to another node. Each
+                Elasticsearch node can be defined as a URL or
+                IP:PORT. For example:
+                `http://192.15.3.2`,
+                `https://es.found.io:9230` or
+                `192.24.3.2:9300`. If no port is
+                specified, `9200` is used.
+              '';
+            };
+
+            filebeat = {
+              inputs = mkOption {
+                type = types.listOf json.type;
+                default = [];
+                internal = true;
+                description = lib.mdDoc ''
+                  Inputs specify how Filebeat locates and processes
+                  input data. Use [](#opt-services.filebeat.inputs) instead.
+
+                  See <https://www.elastic.co/guide/en/beats/filebeat/current/configuration-filebeat-options.html>.
+                '';
+              };
+              modules = mkOption {
+                type = types.listOf json.type;
+                default = [];
+                internal = true;
+                description = lib.mdDoc ''
+                  Filebeat modules provide a quick way to get started
+                  processing common log formats. They contain default
+                  configurations, Elasticsearch ingest pipeline
+                  definitions, and Kibana dashboards to help you
+                  implement and deploy a log monitoring solution.
+
+                  Use [](#opt-services.filebeat.modules) instead.
+
+                  See <https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-modules.html>.
+                '';
+              };
+            };
+          };
+        };
+        default = {};
+        example = literalExpression ''
+          {
+            settings = {
+              output.elasticsearch = {
+                hosts = [ "myEShost:9200" ];
+                username = "filebeat_internal";
+                password = { _secret = "/var/keys/elasticsearch_password"; };
+              };
+              logging.level = "info";
+            };
+          };
+        '';
+
+        description = lib.mdDoc ''
+          Configuration for filebeat. See
+          <https://www.elastic.co/guide/en/beats/filebeat/current/filebeat-reference-yml.html>
+          for supported values.
+
+          Options containing secret data should be set to an attribute
+          set containing the attribute `_secret` - a
+          string pointing to a file containing the value the option
+          should be set to. See the example to get a better picture of
+          this: in the resulting
+          {file}`filebeat.yml` file, the
+          `output.elasticsearch.password`
+          key will be set to the contents of the
+          {file}`/var/keys/elasticsearch_password` file.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.filebeat.settings.filebeat.inputs = attrValues cfg.inputs;
+    services.filebeat.settings.filebeat.modules = attrValues cfg.modules;
+
+    systemd.services.filebeat = {
+      description = "Filebeat log shipper";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "elasticsearch.service" ];
+      after = [ "elasticsearch.service" ];
+      serviceConfig = {
+        ExecStartPre = pkgs.writeShellScript "filebeat-exec-pre" ''
+          set -euo pipefail
+
+          umask u=rwx,g=,o=
+
+          ${utils.genJqSecretsReplacementSnippet
+              cfg.settings
+              "/var/lib/filebeat/filebeat.yml"
+           }
+        '';
+        ExecStart = ''
+          ${cfg.package}/bin/filebeat -e \
+            -c "/var/lib/filebeat/filebeat.yml" \
+            --path.data "/var/lib/filebeat"
+        '';
+        Restart = "always";
+        StateDirectory = "filebeat";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/fluentd.nix b/nixpkgs/nixos/modules/services/logging/fluentd.nix
new file mode 100644
index 000000000000..7764aafb2d1a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/fluentd.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fluentd;
+
+  pluginArgs = concatStringsSep " " (map (x: "-p ${x}") cfg.plugins);
+in {
+  ###### interface
+
+  options = {
+
+    services.fluentd = {
+      enable = mkEnableOption (lib.mdDoc "fluentd");
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Fluentd config.";
+      };
+
+      package = mkOption {
+        type = types.path;
+        default = pkgs.fluentd;
+        defaultText = literalExpression "pkgs.fluentd";
+        description = lib.mdDoc "The fluentd package to use.";
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of plugin paths to pass into fluentd. It will make plugins defined in ruby files
+          there available in your config.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.fluentd = with pkgs; {
+      description = "Fluentd Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/fluentd -c ${pkgs.writeText "fluentd.conf" cfg.config} ${pluginArgs}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/graylog.nix b/nixpkgs/nixos/modules/services/logging/graylog.nix
new file mode 100644
index 000000000000..673930c4cb5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/graylog.nix
@@ -0,0 +1,169 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.graylog;
+
+  confFile = pkgs.writeText "graylog.conf" ''
+    is_master = ${boolToString cfg.isMaster}
+    node_id_file = ${cfg.nodeIdFile}
+    password_secret = ${cfg.passwordSecret}
+    root_username = ${cfg.rootUsername}
+    root_password_sha2 = ${cfg.rootPasswordSha2}
+    elasticsearch_hosts = ${concatStringsSep "," cfg.elasticsearchHosts}
+    message_journal_dir = ${cfg.messageJournalDir}
+    mongodb_uri = ${cfg.mongodbUri}
+    plugin_dir = /var/lib/graylog/plugins
+
+    ${cfg.extraConfig}
+  '';
+
+  glPlugins = pkgs.buildEnv {
+    name = "graylog-plugins";
+    paths = cfg.plugins;
+  };
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.graylog = {
+
+      enable = mkEnableOption (lib.mdDoc "Graylog");
+
+      package = mkOption {
+        type = types.package;
+        default = if versionOlder config.system.stateVersion "23.05" then pkgs.graylog-3_3 else pkgs.graylog-5_1;
+        defaultText = literalExpression (if versionOlder config.system.stateVersion "23.05" then "pkgs.graylog-3_3" else "pkgs.graylog-5_1");
+        description = lib.mdDoc "Graylog package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "graylog";
+        description = lib.mdDoc "User account under which graylog runs";
+      };
+
+      isMaster = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether this is the master instance of your Graylog cluster";
+      };
+
+      nodeIdFile = mkOption {
+        type = types.str;
+        default = "/var/lib/graylog/server/node-id";
+        description = lib.mdDoc "Path of the file containing the graylog node-id";
+      };
+
+      passwordSecret = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          You MUST set a secret to secure/pepper the stored user passwords here. Use at least 64 characters.
+          Generate one by using for example: pwgen -N 1 -s 96
+        '';
+      };
+
+      rootUsername = mkOption {
+        type = types.str;
+        default = "admin";
+        description = lib.mdDoc "Name of the default administrator user";
+      };
+
+      rootPasswordSha2 = mkOption {
+        type = types.str;
+        example = "e3c652f0ba0b4801205814f8b6bc49672c4c74e25b497770bb89b22cdeb4e952";
+        description = lib.mdDoc ''
+          You MUST specify a hash password for the root user (which you only need to initially set up the
+          system and in case you lose connectivity to your authentication backend)
+          This password cannot be changed using the API or via the web interface. If you need to change it,
+          modify it here.
+          Create one by using for example: echo -n yourpassword | shasum -a 256
+          and use the resulting hash value as string for the option
+        '';
+      };
+
+      elasticsearchHosts = mkOption {
+        type = types.listOf types.str;
+        example = literalExpression ''[ "http://node1:9200" "http://user:password@node2:19200" ]'';
+        description = lib.mdDoc "List of valid URIs of the http ports of your elastic nodes. If one or more of your elasticsearch hosts require authentication, include the credentials in each node URI that requires authentication";
+      };
+
+      messageJournalDir = mkOption {
+        type = types.str;
+        default = "/var/lib/graylog/data/journal";
+        description = lib.mdDoc "The directory which will be used to store the message journal. The directory must be exclusively used by Graylog and must not contain any other files than the ones created by Graylog itself";
+      };
+
+      mongodbUri = mkOption {
+        type = types.str;
+        default = "mongodb://localhost/graylog";
+        description = lib.mdDoc "MongoDB connection string. See http://docs.mongodb.org/manual/reference/connection-string/ for details";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Any other configuration options you might want to add";
+      };
+
+      plugins = mkOption {
+        description = lib.mdDoc "Extra graylog plugins";
+        default = [ ];
+        type = types.listOf types.package;
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = mkIf (cfg.user == "graylog") {
+      graylog = {
+        isSystemUser = true;
+        group = "graylog";
+        description = "Graylog server daemon user";
+      };
+    };
+    users.groups = mkIf (cfg.user == "graylog") { graylog = {}; };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.messageJournalDir}' - ${cfg.user} - - -"
+    ];
+
+    systemd.services.graylog = {
+      description = "Graylog Server";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        GRAYLOG_CONF = "${confFile}";
+      };
+      path = [ pkgs.which pkgs.procps ];
+      preStart = ''
+        rm -rf /var/lib/graylog/plugins || true
+        mkdir -p /var/lib/graylog/plugins -m 755
+
+        mkdir -p "$(dirname ${cfg.nodeIdFile})"
+        chown -R ${cfg.user} "$(dirname ${cfg.nodeIdFile})"
+
+        for declarativeplugin in `ls ${glPlugins}/bin/`; do
+          ln -sf ${glPlugins}/bin/$declarativeplugin /var/lib/graylog/plugins/$declarativeplugin
+        done
+        for includedplugin in `ls ${cfg.package}/plugin/`; do
+          ln -s ${cfg.package}/plugin/$includedplugin /var/lib/graylog/plugins/$includedplugin || true
+        done
+      '';
+      serviceConfig = {
+        User="${cfg.user}";
+        StateDirectory = "graylog";
+        ExecStart = "${cfg.package}/bin/graylogctl run";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/heartbeat.nix b/nixpkgs/nixos/modules/services/logging/heartbeat.nix
new file mode 100644
index 000000000000..a9ae11ec66e6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/heartbeat.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.heartbeat;
+
+  heartbeatYml = pkgs.writeText "heartbeat.yml" ''
+    name: ${cfg.name}
+    tags: ${builtins.toJSON cfg.tags}
+
+    ${cfg.extraConfig}
+  '';
+
+in
+{
+  options = {
+
+    services.heartbeat = {
+
+      enable = mkEnableOption (lib.mdDoc "heartbeat");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.heartbeat;
+        defaultText = literalExpression "pkgs.heartbeat";
+        example = literalExpression "pkgs.heartbeat7";
+        description = lib.mdDoc ''
+          The heartbeat package to use.
+        '';
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "heartbeat";
+        description = lib.mdDoc "Name of the beat";
+      };
+
+      tags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Tags to place on the shipped log messages";
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/lib/heartbeat";
+        description = lib.mdDoc "The state directory. heartbeat's own logs and other data are stored here.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = ''
+          heartbeat.monitors:
+          - type: http
+            urls: ["http://localhost:9200"]
+            schedule: '@every 10s'
+        '';
+        description = lib.mdDoc "Any other configuration options you want to add";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' - nobody nogroup - -"
+    ];
+
+    systemd.services.heartbeat = with pkgs; {
+      description = "heartbeat log shipper";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -p "${cfg.stateDir}"/{data,logs}
+      '';
+      serviceConfig = {
+        User = "nobody";
+        AmbientCapabilities = "cap_net_raw";
+        ExecStart = "${cfg.package}/bin/heartbeat -c \"${heartbeatYml}\" -path.data \"${cfg.stateDir}/data\" -path.logs \"${cfg.stateDir}/logs\"";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/journalbeat.nix b/nixpkgs/nixos/modules/services/logging/journalbeat.nix
new file mode 100644
index 000000000000..e761380552de
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/journalbeat.nix
@@ -0,0 +1,94 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.journalbeat;
+
+  journalbeatYml = pkgs.writeText "journalbeat.yml" ''
+    name: ${cfg.name}
+    tags: ${builtins.toJSON cfg.tags}
+
+    ${cfg.extraConfig}
+  '';
+
+in
+{
+  options = {
+
+    services.journalbeat = {
+
+      enable = mkEnableOption (lib.mdDoc "journalbeat");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.journalbeat;
+        defaultText = literalExpression "pkgs.journalbeat";
+        description = lib.mdDoc ''
+          The journalbeat package to use
+        '';
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "journalbeat";
+        description = lib.mdDoc "Name of the beat";
+      };
+
+      tags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Tags to place on the shipped log messages";
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "journalbeat";
+        description = lib.mdDoc ''
+          Directory below `/var/lib/` to store journalbeat's
+          own logs and other data. This directory will be created automatically
+          using systemd's StateDirectory mechanism.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Any other configuration options you want to add";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.stateDir;
+        message =
+          "The option services.journalbeat.stateDir shouldn't be an absolute directory." +
+          " It should be a directory relative to /var/lib/.";
+      }
+    ];
+
+    systemd.services.journalbeat = {
+      description = "Journalbeat log shipper";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "elasticsearch.service" ];
+      after = [ "elasticsearch.service" ];
+      preStart = ''
+        mkdir -p ${cfg.stateDir}/data
+        mkdir -p ${cfg.stateDir}/logs
+      '';
+      serviceConfig = {
+        StateDirectory = cfg.stateDir;
+        ExecStart = ''
+          ${cfg.package}/bin/journalbeat \
+            -c ${journalbeatYml} \
+            -path.data /var/lib/${cfg.stateDir}/data \
+            -path.logs /var/lib/${cfg.stateDir}/logs'';
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/journaldriver.nix b/nixpkgs/nixos/modules/services/logging/journaldriver.nix
new file mode 100644
index 000000000000..59eedff90d60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/journaldriver.nix
@@ -0,0 +1,112 @@
+# This module implements a systemd service for running journaldriver,
+# a log forwarding agent that sends logs from journald to Stackdriver
+# Logging.
+#
+# It can be enabled without extra configuration when running on GCP.
+# On machines hosted elsewhere, the other configuration options need
+# to be set.
+#
+# For further information please consult the documentation in the
+# upstream repository at: https://github.com/tazjin/journaldriver/
+
+{ config, lib, pkgs, ...}:
+
+with lib; let cfg = config.services.journaldriver;
+in {
+  options.services.journaldriver = {
+    enable = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Whether to enable journaldriver to forward journald logs to
+        Stackdriver Logging.
+      '';
+    };
+
+    logLevel = mkOption {
+      type        = types.str;
+      default     = "info";
+      description = lib.mdDoc ''
+        Log level at which journaldriver logs its own output.
+      '';
+    };
+
+    logName = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = lib.mdDoc ''
+        Configures the name of the target log in Stackdriver Logging.
+        This option can be set to, for example, the hostname of a
+        machine to improve the user experience in the logging
+        overview.
+      '';
+    };
+
+    googleCloudProject = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = lib.mdDoc ''
+        Configures the name of the Google Cloud project to which to
+        forward journald logs.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    logStream = mkOption {
+      type        = with types; nullOr str;
+      default     = null;
+      description = lib.mdDoc ''
+        Configures the name of the Stackdriver Logging log stream into
+        which to write journald entries.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+
+    applicationCredentials = mkOption {
+      type        = with types; nullOr path;
+      default     = null;
+      description = lib.mdDoc ''
+        Path to the service account private key (in JSON-format) used
+        to forward log entries to Stackdriver Logging on non-GCP
+        instances.
+
+        This option is required on non-GCP machines, but should not be
+        set on GCP instances.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.journaldriver = {
+      description = "Stackdriver Logging journal forwarder";
+      script      = "${pkgs.journaldriver}/bin/journaldriver";
+      after       = [ "network-online.target" ];
+      wantedBy    = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart        = "always";
+        DynamicUser    = true;
+
+        # This directive lets systemd automatically configure
+        # permissions on /var/lib/journaldriver, the directory in
+        # which journaldriver persists its cursor state.
+        StateDirectory = "journaldriver";
+
+        # This group is required for accessing journald.
+        SupplementaryGroups = "systemd-journal";
+      };
+
+      environment = {
+        RUST_LOG                       = cfg.logLevel;
+        LOG_NAME                       = cfg.logName;
+        LOG_STREAM                     = cfg.logStream;
+        GOOGLE_CLOUD_PROJECT           = cfg.googleCloudProject;
+        GOOGLE_APPLICATION_CREDENTIALS = cfg.applicationCredentials;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/journalwatch.nix b/nixpkgs/nixos/modules/services/logging/journalwatch.nix
new file mode 100644
index 000000000000..55e2d600ee4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/journalwatch.nix
@@ -0,0 +1,265 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.journalwatch;
+  user = "journalwatch";
+  # for journal access
+  group = "systemd-journal";
+  dataDir = "/var/lib/${user}";
+
+  journalwatchConfig = pkgs.writeText "config" (''
+    # (File Generated by NixOS journalwatch module.)
+    [DEFAULT]
+    mail_binary = ${cfg.mailBinary}
+    priority = ${toString cfg.priority}
+    mail_from = ${cfg.mailFrom}
+  ''
+  + optionalString (cfg.mailTo != null) ''
+    mail_to = ${cfg.mailTo}
+  ''
+  + cfg.extraConfig);
+
+  journalwatchPatterns = pkgs.writeText "patterns" ''
+    # (File Generated by NixOS journalwatch module.)
+
+    ${mkPatterns cfg.filterBlocks}
+  '';
+
+  # empty line at the end needed to to separate the blocks
+  mkPatterns = filterBlocks: concatStringsSep "\n" (map (block: ''
+    ${block.match}
+    ${block.filters}
+
+  '') filterBlocks);
+
+  # can't use joinSymlinks directly, because when we point $XDG_CONFIG_HOME
+  # to the /nix/store path, we still need the subdirectory "journalwatch" inside that
+  # to match journalwatch's expectations
+  journalwatchConfigDir = pkgs.runCommand "journalwatch-config"
+    { preferLocalBuild = true; allowSubstitutes = false; }
+    ''
+      mkdir -p $out/journalwatch
+      ln -sf ${journalwatchConfig} $out/journalwatch/config
+      ln -sf ${journalwatchPatterns} $out/journalwatch/patterns
+    '';
+
+
+in {
+  options = {
+    services.journalwatch = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, periodically check the journal with journalwatch and report the results by mail.
+        '';
+      };
+
+      priority = mkOption {
+        type = types.int;
+        default = 6;
+        description = lib.mdDoc ''
+          Lowest priority of message to be considered.
+          A value between 7 ("debug"), and 0 ("emerg"). Defaults to 6 ("info").
+          If you don't care about anything with "info" priority, you can reduce
+          this to e.g. 5 ("notice") to considerably reduce the amount of
+          messages without needing many {option}`filterBlocks`.
+        '';
+      };
+
+      # HACK: this is a workaround for journalwatch's usage of socket.getfqdn() which always returns localhost if
+      # there's an alias for the localhost on a separate line in /etc/hosts, or take for ages if it's not present and
+      # then return something right-ish in the direction of /etc/hostname. Just bypass it completely.
+      mailFrom = mkOption {
+        type = types.str;
+        default = "journalwatch@${config.networking.hostName}";
+        defaultText = literalExpression ''"journalwatch@''${config.networking.hostName}"'';
+        description = lib.mdDoc ''
+          Mail address to send journalwatch reports from.
+        '';
+      };
+
+      mailTo = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Mail address to send journalwatch reports to.
+        '';
+      };
+
+      mailBinary = mkOption {
+        type = types.path;
+        default = "/run/wrappers/bin/sendmail";
+        description = lib.mdDoc ''
+          Sendmail-compatible binary to be used to send the messages.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the journalwatch/config configuration file.
+          You can add any commandline argument to the config, without the '--'.
+          See `journalwatch --help` for all arguments and their description.
+          '';
+      };
+
+      filterBlocks = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+           match = mkOption {
+              type = types.str;
+              example = "SYSLOG_IDENTIFIER = systemd";
+              description = lib.mdDoc ''
+                Syntax: `field = value`
+                Specifies the log entry `field` this block should apply to.
+                If the `field` of a message matches this `value`,
+                this patternBlock's {option}`filters` are applied.
+                If `value` starts and ends with a slash, it is interpreted as
+                an extended python regular expression, if not, it's an exact match.
+                The journal fields are explained in systemd.journal-fields(7).
+              '';
+            };
+
+            filters = mkOption {
+              type = types.str;
+              example = ''
+                (Stopped|Stopping|Starting|Started) .*
+                (Reached target|Stopped target) .*
+              '';
+              description = lib.mdDoc ''
+                The filters to apply on all messages which satisfy {option}`match`.
+                Any of those messages that match any specified filter will be removed from journalwatch's output.
+                Each filter is an extended Python regular expression.
+                You can specify multiple filters and separate them by newlines.
+                Lines starting with '#' are comments. Inline-comments are not permitted.
+              '';
+            };
+          };
+        });
+
+        example = [
+          # examples taken from upstream
+          {
+            match = "_SYSTEMD_UNIT = systemd-logind.service";
+            filters = ''
+              New session [a-z]?\d+ of user \w+\.
+              Removed session [a-z]?\d+\.
+            '';
+          }
+
+          {
+            match = "SYSLOG_IDENTIFIER = /(CROND|crond)/";
+            filters = ''
+              pam_unix\(crond:session\): session (opened|closed) for user \w+
+              \(\w+\) CMD .*
+            '';
+          }
+        ];
+
+        # another example from upstream.
+        # very useful on priority = 6, and required as journalwatch throws an error when no pattern is defined at all.
+        default = [
+          {
+            match = "SYSLOG_IDENTIFIER = systemd";
+            filters = ''
+              (Stopped|Stopping|Starting|Started) .*
+              (Created slice|Removed slice) user-\d*\.slice\.
+              Received SIGRTMIN\+24 from PID .*
+              (Reached target|Stopped target) .*
+              Startup finished in \d*ms\.
+            '';
+          }
+        ];
+
+
+        description = lib.mdDoc ''
+          filterBlocks can be defined to blacklist journal messages which are not errors.
+          Each block matches on a log entry field, and the filters in that block then are matched
+          against all messages with a matching log entry field.
+
+          All messages whose PRIORITY is at least 6 (INFO) are processed by journalwatch.
+          If you don't specify any filterBlocks, PRIORITY is reduced to 5 (NOTICE) by default.
+
+          All regular expressions are extended Python regular expressions, for details
+          see: http://doc.pyschools.com/html/regex.html
+        '';
+      };
+
+      interval = mkOption {
+        type = types.str;
+        default = "hourly";
+        description = lib.mdDoc ''
+          How often to run journalwatch.
+
+          The format is described in systemd.time(7).
+        '';
+      };
+      accuracy = mkOption {
+        type = types.str;
+        default = "10min";
+        description = lib.mdDoc ''
+          The time window around the interval in which the journalwatch run will be scheduled.
+
+          The format is described in systemd.time(7).
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.${user} = {
+      isSystemUser = true;
+      home = dataDir;
+      group = group;
+    };
+
+    systemd.tmpfiles.rules = [
+      # present since NixOS 19.09: remove old stateful symlink join directory,
+      # which has been replaced with the journalwatchConfigDir store path
+      "R ${dataDir}/config"
+    ];
+
+    systemd.services.journalwatch = {
+
+      environment = {
+        # journalwatch stores the last processed timpestamp here
+        # the share subdirectory is historic now that config home lives in /nix/store,
+        # but moving this in a backwards-compatible way is much more work than what's justified
+        # for cleaning that up.
+        XDG_DATA_HOME = "${dataDir}/share";
+        XDG_CONFIG_HOME = journalwatchConfigDir;
+      };
+      serviceConfig = {
+        User = user;
+        Group = group;
+        Type = "oneshot";
+        # requires a relative directory name to create beneath /var/lib
+        StateDirectory = user;
+        StateDirectoryMode = "0750";
+        ExecStart = "${pkgs.python3Packages.journalwatch}/bin/journalwatch mail";
+        # lowest CPU and IO priority, but both still in best-effort class to prevent starvation
+        Nice=19;
+        IOSchedulingPriority=7;
+      };
+    };
+
+    systemd.timers.journalwatch = {
+      description = "Periodic journalwatch run";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.interval;
+        AccuracySec = cfg.accuracy;
+        Persistent = true;
+      };
+    };
+
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ florianjacob ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/klogd.nix b/nixpkgs/nixos/modules/services/logging/klogd.nix
new file mode 100644
index 000000000000..1de0e58abbb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/klogd.nix
@@ -0,0 +1,9 @@
+{ lib, ... }:
+
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "security" "klogd" "enable" ] ''
+      Logging of kernel messages is now handled by systemd.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/logging/logcheck.nix b/nixpkgs/nixos/modules/services/logging/logcheck.nix
new file mode 100644
index 000000000000..8a277cea6e46
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/logcheck.nix
@@ -0,0 +1,236 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.logcheck;
+
+  defaultRules = pkgs.runCommand "logcheck-default-rules" { preferLocalBuild = true; } ''
+                   cp -prd ${pkgs.logcheck}/etc/logcheck $out
+                   chmod u+w $out
+                   rm -r $out/logcheck.*
+                 '';
+
+  rulesDir = pkgs.symlinkJoin
+    { name = "logcheck-rules-dir";
+      paths = ([ defaultRules ] ++ cfg.extraRulesDirs);
+    };
+
+  configFile = pkgs.writeText "logcheck.conf" cfg.config;
+
+  logFiles = pkgs.writeText "logcheck.logfiles" cfg.files;
+
+  flags = "-r ${rulesDir} -c ${configFile} -L ${logFiles} -${levelFlag} -m ${cfg.mailTo}";
+
+  levelFlag = getAttrFromPath [cfg.level]
+    { paranoid    = "p";
+      server      = "s";
+      workstation = "w";
+    };
+
+  cronJob = ''
+    @reboot   logcheck env PATH=/run/wrappers/bin:$PATH nice -n10 ${pkgs.logcheck}/sbin/logcheck -R ${flags}
+    2 ${cfg.timeOfDay} * * * logcheck env PATH=/run/wrappers/bin:$PATH nice -n10 ${pkgs.logcheck}/sbin/logcheck ${flags}
+  '';
+
+  writeIgnoreRule = name: {level, regex, ...}:
+    pkgs.writeTextFile
+      { inherit name;
+        destination = "/ignore.d.${level}/${name}";
+        text = ''
+          ^\w{3} [ :[:digit:]]{11} [._[:alnum:]-]+ ${regex}
+        '';
+      };
+
+  writeIgnoreCronRule = name: {level, user, regex, cmdline, ...}:
+    let escapeRegex = escape (stringToCharacters "\\[]{}()^$?*+|.");
+        cmdline_ = builtins.unsafeDiscardStringContext cmdline;
+        re = if regex != "" then regex else if cmdline_ == "" then ".*" else escapeRegex cmdline_;
+    in writeIgnoreRule "cron-${name}" {
+      inherit level;
+      regex = ''
+        (/usr/bin/)?cron\[[0-9]+\]: \(${user}\) CMD \(${re}\)$
+      '';
+    };
+
+  levelOption = mkOption {
+    default = "server";
+    type = types.enum [ "workstation" "server" "paranoid" ];
+    description = lib.mdDoc ''
+      Set the logcheck level.
+    '';
+  };
+
+  ignoreOptions = {
+    options = {
+      level = levelOption;
+
+      regex = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Regex specifying which log lines to ignore.
+        '';
+      };
+    };
+  };
+
+  ignoreCronOptions = {
+    options = {
+      user = mkOption {
+        default = "root";
+        type = types.str;
+        description = lib.mdDoc ''
+          User that runs the cronjob.
+        '';
+      };
+
+      cmdline = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Command line for the cron job. Will be turned into a regex for the logcheck ignore rule.
+        '';
+      };
+
+      timeArgs = mkOption {
+        default = null;
+        type = types.nullOr (types.str);
+        example = "02 06 * * *";
+        description = lib.mdDoc ''
+          "min hr dom mon dow" crontab time args, to auto-create a cronjob too.
+          Leave at null to not do this and just add a logcheck ignore rule.
+        '';
+      };
+    };
+  };
+
+in
+{
+  options = {
+    services.logcheck = {
+      enable = mkEnableOption (lib.mdDoc "logcheck cron job");
+
+      user = mkOption {
+        default = "logcheck";
+        type = types.str;
+        description = lib.mdDoc ''
+          Username for the logcheck user.
+        '';
+      };
+
+      timeOfDay = mkOption {
+        default = "*";
+        example = "6";
+        type = types.str;
+        description = lib.mdDoc ''
+          Time of day to run logcheck. A logcheck will be scheduled at xx:02 each day.
+          Leave default (*) to run every hour. Of course when nothing special was logged,
+          logcheck will be silent.
+        '';
+      };
+
+      mailTo = mkOption {
+        default = "root";
+        example = "you@domain.com";
+        type = types.str;
+        description = lib.mdDoc ''
+          Email address to send reports to.
+        '';
+      };
+
+      level = mkOption {
+        default = "server";
+        type = types.str;
+        description = lib.mdDoc ''
+          Set the logcheck level. Either "workstation", "server", or "paranoid".
+        '';
+      };
+
+      config = mkOption {
+        default = "FQDN=1";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Config options that you would like in logcheck.conf.
+        '';
+      };
+
+      files = mkOption {
+        default = [ "/var/log/messages" ];
+        type = types.listOf types.path;
+        example = [ "/var/log/messages" "/var/log/mail" ];
+        description = lib.mdDoc ''
+          Which log files to check.
+        '';
+      };
+
+      extraRulesDirs = mkOption {
+        default = [];
+        example = [ "/etc/logcheck" ];
+        type = types.listOf types.path;
+        description = lib.mdDoc ''
+          Directories with extra rules.
+        '';
+      };
+
+      ignore = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines extra ignore rules.
+        '';
+        type = with types; attrsOf (submodule ignoreOptions);
+      };
+
+      ignoreCron = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          This option defines extra ignore rules for cronjobs.
+        '';
+        type = with types; attrsOf (submodule ignoreCronOptions);
+      };
+
+      extraGroups = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = [ "postdrop" "mongodb" ];
+        description = lib.mdDoc ''
+          Extra groups for the logcheck user, for example to be able to use sendmail,
+          or to access certain log files.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.logcheck.extraRulesDirs =
+        mapAttrsToList writeIgnoreRule cfg.ignore
+        ++ mapAttrsToList writeIgnoreCronRule cfg.ignoreCron;
+
+    users.users = optionalAttrs (cfg.user == "logcheck") {
+      logcheck = {
+        group = "logcheck";
+        isSystemUser = true;
+        shell = "/bin/sh";
+        description = "Logcheck user account";
+        extraGroups = cfg.extraGroups;
+      };
+    };
+    users.groups = optionalAttrs (cfg.user == "logcheck") {
+      logcheck = {};
+    };
+
+    system.activationScripts.logcheck = ''
+      mkdir -m 700 -p /var/{lib,lock}/logcheck
+      chown ${cfg.user} /var/{lib,lock}/logcheck
+    '';
+
+    services.cron.systemCronJobs =
+        let withTime = name: {timeArgs, ...}: timeArgs != null;
+            mkCron = name: {user, cmdline, timeArgs, ...}: ''
+              ${timeArgs} ${user} ${cmdline}
+            '';
+        in mapAttrsToList mkCron (filterAttrs withTime cfg.ignoreCron)
+           ++ [ cronJob ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/logrotate.nix b/nixpkgs/nixos/modules/services/logging/logrotate.nix
new file mode 100644
index 000000000000..ba1445f08397
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/logrotate.nix
@@ -0,0 +1,253 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.logrotate;
+
+  generateLine = n: v:
+    if builtins.elem n [ "files" "priority" "enable" "global" ] || v == null then null
+    else if builtins.elem n [ "frequency" ] then "${v}\n"
+    else if builtins.elem n [ "firstaction" "lastaction" "prerotate" "postrotate" "preremove" ]
+         then "${n}\n    ${v}\n  endscript\n"
+    else if isInt v then "${n} ${toString v}\n"
+    else if v == true then "${n}\n"
+    else if v == false then "no${n}\n"
+    else "${n} ${v}\n";
+  generateSection = indent: settings: concatStringsSep (fixedWidthString indent " " "") (
+    filter (x: x != null) (mapAttrsToList generateLine settings)
+  );
+
+  # generateSection includes a final newline hence weird closing brace
+  mkConf = settings:
+    if settings.global or false then generateSection 0 settings
+    else ''
+      ${concatMapStringsSep "\n" (files: ''"${files}"'') (toList settings.files)} {
+        ${generateSection 2 settings}}
+    '';
+
+  settings = sortProperties (attrValues (filterAttrs (_: settings: settings.enable) (
+    foldAttrs recursiveUpdate { } [
+      {
+        header = {
+          enable = true;
+          missingok = true;
+          notifempty = true;
+          frequency = "weekly";
+          rotate = 4;
+        };
+      }
+      cfg.settings
+      { header = { global = true; priority = 100; }; }
+    ]
+  )));
+  configFile = pkgs.writeTextFile {
+    name = "logrotate.conf";
+    text = concatStringsSep "\n" (
+      map mkConf settings
+    );
+    checkPhase = optionalString cfg.checkConfig ''
+      # logrotate --debug also checks that users specified in config
+      # file exist, but we only have sandboxed users here so brown these
+      # out. according to man page that means su, create and createolddir.
+      # files required to exist also won't be present, so missingok is forced.
+      user=$(${pkgs.buildPackages.coreutils}/bin/id -un)
+      group=$(${pkgs.buildPackages.coreutils}/bin/id -gn)
+      sed -e "s/\bsu\s.*/su $user $group/" \
+          -e "s/\b\(create\s\+[0-9]*\s*\|createolddir\s\+[0-9]*\s\+\).*/\1$user $group/" \
+          -e "1imissingok" -e "s/\bnomissingok\b//" \
+          $out > logrotate.conf
+      # Since this makes for very verbose builds only show real error.
+      # There is no way to control log level, but logrotate hardcodes
+      # 'error:' at common log level, so we can use grep, taking care
+      # to keep error codes
+      set -o pipefail
+      if ! ${pkgs.buildPackages.logrotate}/sbin/logrotate -s logrotate.status \
+                      --debug logrotate.conf 2>&1 \
+                  | ( ! grep "error:" ) > logrotate-error; then
+              echo "Logrotate configuration check failed."
+              echo "The failing configuration (after adjustments to pass tests in sandbox) was:"
+              printf "%s\n" "-------"
+              cat logrotate.conf
+              printf "%s\n" "-------"
+              echo "The error reported by logrotate was as follow:"
+              printf "%s\n" "-------"
+              cat logrotate-error
+              printf "%s\n" "-------"
+              echo "You can disable this check with services.logrotate.checkConfig = false,"
+              echo "but if you think it should work please report this failure along with"
+              echo "the config file being tested!"
+              false
+      fi
+    '';
+  };
+
+  mailOption =
+    optionalString (foldr (n: a: a || (n.mail or false) != false) false (attrValues cfg.settings))
+    "--mail=${pkgs.mailutils}/bin/mail";
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "logrotate" "config" ] "Modify services.logrotate.settings.header instead")
+    (mkRemovedOptionModule [ "services" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.header instead")
+    (mkRemovedOptionModule [ "services" "logrotate" "paths" ] "Add attributes to services.logrotate.settings instead")
+  ];
+
+  options = {
+    services.logrotate = {
+      enable = mkEnableOption (lib.mdDoc "the logrotate systemd service") // {
+        default = foldr (n: a: a || n.enable) false (attrValues cfg.settings);
+        defaultText = literalExpression "cfg.settings != {}";
+      };
+
+      settings = mkOption {
+        default = { };
+        description = lib.mdDoc ''
+          logrotate freeform settings: each attribute here will define its own section,
+          ordered by priority, which can either define files to rotate with their settings
+          or settings common to all further files settings.
+          Refer to <https://linux.die.net/man/8/logrotate> for details.
+        '';
+        example = literalExpression ''
+          {
+            # global options
+            header = {
+              dateext = true;
+            };
+            # example custom files
+            "/var/log/mylog.log" = {
+              frequency = "daily";
+              rotate = 3;
+            };
+            "multiple paths" = {
+               files = [
+                "/var/log/first*.log"
+                "/var/log/second.log"
+              ];
+            };
+          };
+          '';
+        type = types.attrsOf (types.submodule ({ name, ... }: {
+          freeformType = with types; attrsOf (nullOr (oneOf [ int bool str ]));
+
+          options = {
+            enable = mkEnableOption (lib.mdDoc "setting individual kill switch") // {
+              default = true;
+            };
+
+            global = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether this setting is a global option or not: set to have these
+                settings apply to all files settings with a higher priority.
+              '';
+            };
+            files = mkOption {
+              type = with types; either str (listOf str);
+              default = name;
+              defaultText = ''
+                The attrset name if not specified
+              '';
+              description = lib.mdDoc ''
+                Single or list of files for which rules are defined.
+                The files are quoted with double-quotes in logrotate configuration,
+                so globs and spaces are supported.
+                Note this setting is ignored if globals is true.
+              '';
+            };
+
+            frequency = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                How often to rotate the logs. Defaults to previously set global setting,
+                which itself defaults to weekly.
+              '';
+            };
+
+            priority = mkOption {
+              type = types.int;
+              default = 1000;
+              description = lib.mdDoc ''
+                Order of this logrotate block in relation to the others. The semantics are
+                the same as with `lib.mkOrder`. Smaller values are inserted first.
+              '';
+            };
+          };
+
+        }));
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = configFile;
+        defaultText = ''
+          A configuration file automatically generated by NixOS.
+        '';
+        description = lib.mdDoc ''
+          Override the configuration file used by logrotate. By default,
+          NixOS generates one automatically from [](#opt-services.logrotate.settings).
+        '';
+        example = literalExpression ''
+          pkgs.writeText "logrotate.conf" '''
+            missingok
+            "/var/log/*.log" {
+              rotate 4
+              weekly
+            }
+          ''';
+        '';
+      };
+
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the config should be checked at build time.
+
+          Some options are not checkable at build time because of the build sandbox:
+          for example, the test does not know about existing files and system users are
+          not known.
+          These limitations mean we must adjust the file for tests (missingok is forced
+          and users are replaced by dummy users), so tests are complemented by a
+          logrotate-checkconf service that is enabled by default.
+          This extra check can be disabled by disabling it at the systemd level with the
+          {option}`services.systemd.services.logrotate-checkconf.enable` option.
+
+          Conversely there are still things that might make this check fail incorrectly
+          (e.g. a file path where we don't have access to intermediate directories):
+          in this case you can disable the failing check with this option.
+        '';
+      };
+
+      extraArgs = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [];
+        description = "Additional command line arguments to pass on logrotate invocation";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.logrotate = {
+      description = "Logrotate Service";
+      startAt = "hourly";
+
+      serviceConfig = {
+        Restart = "no";
+        User = "root";
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} ${mailOption} ${cfg.configFile}";
+      };
+    };
+    systemd.services.logrotate-checkconf = {
+      description = "Logrotate configuration check";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${pkgs.logrotate}/sbin/logrotate ${utils.escapeSystemdExecArgs cfg.extraArgs} --debug ${cfg.configFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/logstash.nix b/nixpkgs/nixos/modules/services/logging/logstash.nix
new file mode 100644
index 000000000000..42d52a61639e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/logstash.nix
@@ -0,0 +1,194 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.logstash;
+  ops = lib.optionalString;
+  verbosityFlag = "--log.level " + cfg.logLevel;
+
+  logstashConf = pkgs.writeText "logstash.conf" ''
+    input {
+      ${cfg.inputConfig}
+    }
+
+    filter {
+      ${cfg.filterConfig}
+    }
+
+    output {
+      ${cfg.outputConfig}
+    }
+  '';
+
+  logstashSettingsYml = pkgs.writeText "logstash.yml" cfg.extraSettings;
+
+  logstashJvmOptionsFile = pkgs.writeText "jvm.options" cfg.extraJvmOptions;
+
+  logstashSettingsDir = pkgs.runCommand "logstash-settings" {
+      inherit logstashJvmOptionsFile;
+      inherit logstashSettingsYml;
+      preferLocalBuild = true;
+    } ''
+    mkdir -p $out
+    ln -s $logstashSettingsYml $out/logstash.yml
+    ln -s $logstashJvmOptionsFile $out/jvm.options
+  '';
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "logstash" "address" ] [ "services" "logstash" "listenAddress" ])
+    (mkRemovedOptionModule [ "services" "logstash" "enableWeb" ] "The web interface was removed from logstash")
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.logstash = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable logstash.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.logstash;
+        defaultText = literalExpression "pkgs.logstash";
+        description = lib.mdDoc "Logstash package to use.";
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        example = literalExpression "[ pkgs.logstash-contrib ]";
+        description = lib.mdDoc "The paths to find other logstash plugins in.";
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/logstash";
+        description = lib.mdDoc ''
+          A path to directory writable by logstash that it uses to store data.
+          Plugins will also have access to this path.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "debug" "info" "warn" "error" "fatal" ];
+        default = "warn";
+        description = lib.mdDoc "Logging verbosity level.";
+      };
+
+      filterWorkers = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "The quantity of filter workers to run.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "Address on which to start webserver.";
+      };
+
+      port = mkOption {
+        type = types.str;
+        default = "9292";
+        description = lib.mdDoc "Port on which to start webserver.";
+      };
+
+      inputConfig = mkOption {
+        type = types.lines;
+        default = "generator { }";
+        description = lib.mdDoc "Logstash input configuration.";
+        example = literalExpression ''
+          '''
+            # Read from journal
+            pipe {
+              command => "''${config.systemd.package}/bin/journalctl -f -o json"
+              type => "syslog" codec => json {}
+            }
+          '''
+        '';
+      };
+
+      filterConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "logstash filter configuration.";
+        example = ''
+          if [type] == "syslog" {
+            # Keep only relevant systemd fields
+            # https://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html
+            prune {
+              whitelist_names => [
+                "type", "@timestamp", "@version",
+                "MESSAGE", "PRIORITY", "SYSLOG_FACILITY"
+              ]
+            }
+          }
+        '';
+      };
+
+      outputConfig = mkOption {
+        type = types.lines;
+        default = "stdout { codec => rubydebug }";
+        description = lib.mdDoc "Logstash output configuration.";
+        example = ''
+          redis { host => ["localhost"] data_type => "list" key => "logstash" codec => json }
+          elasticsearch { }
+        '';
+      };
+
+      extraSettings = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra Logstash settings in YAML format.";
+        example = ''
+          pipeline:
+            batch:
+              size: 125
+              delay: 5
+        '';
+      };
+
+      extraJvmOptions = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra JVM options, one per line (jvm.options format).";
+        example = ''
+          -Xms2g
+          -Xmx2g
+        '';
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.logstash = {
+      description = "Logstash Daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.bash ];
+      serviceConfig = {
+        ExecStartPre = ''${pkgs.coreutils}/bin/mkdir -p "${cfg.dataDir}" ; ${pkgs.coreutils}/bin/chmod 700 "${cfg.dataDir}"'';
+        ExecStart = concatStringsSep " " (filter (s: stringLength s != 0) [
+          "${cfg.package}/bin/logstash"
+          "-w ${toString cfg.filterWorkers}"
+          (concatMapStringsSep " " (x: "--path.plugins ${x}") cfg.plugins)
+          "${verbosityFlag}"
+          "-f ${logstashConf}"
+          "--path.settings ${logstashSettingsDir}"
+          "--path.data ${cfg.dataDir}"
+        ]);
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/promtail.nix b/nixpkgs/nixos/modules/services/logging/promtail.nix
new file mode 100644
index 000000000000..9db82fd42b28
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/promtail.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.services.promtail;
+
+  prettyJSON = conf: pkgs.runCommandLocal "promtail-config.json" {} ''
+    echo '${builtins.toJSON conf}' | ${pkgs.buildPackages.jq}/bin/jq 'del(._module)' > $out
+  '';
+
+  allowSystemdJournal = cfg.configuration ? scrape_configs && lib.any (v: v ? journal) cfg.configuration.scrape_configs;
+
+  allowPositionsFile = !lib.hasPrefix "/var/cache/promtail" positionsFile;
+  positionsFile = cfg.configuration.positions.filename;
+in {
+  options.services.promtail = with types; {
+    enable = mkEnableOption (lib.mdDoc "the Promtail ingresser");
+
+
+    configuration = mkOption {
+      type = (pkgs.formats.json {}).type;
+      description = lib.mdDoc ''
+        Specify the configuration for Promtail in Nix.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = listOf str;
+      default = [];
+      example = [ "--server.http-listen-port=3101" ];
+      description = lib.mdDoc ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Loki.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.promtail.configuration.positions.filename = mkDefault "/var/cache/promtail/positions.yaml";
+
+    systemd.services.promtail = {
+      description = "Promtail log ingress";
+      wantedBy = [ "multi-user.target" ];
+      stopIfChanged = false;
+
+      serviceConfig = {
+        Restart = "on-failure";
+        TimeoutStopSec = 10;
+
+        ExecStart = "${pkgs.promtail}/bin/promtail -config.file=${prettyJSON cfg.configuration} ${escapeShellArgs cfg.extraFlags}";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        CacheDirectory = "promtail";
+        ReadWritePaths = lib.optional allowPositionsFile (builtins.dirOf positionsFile);
+
+        User = "promtail";
+        Group = "promtail";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+
+        ProtectKernelModules = true;
+        SystemCallArchitectures = "native";
+        ProtectKernelLogs = true;
+        ProtectClock = true;
+
+        LockPersonality = true;
+        ProtectHostname = true;
+        RestrictRealtime = true;
+        MemoryDenyWriteExecute = true;
+        PrivateUsers = true;
+
+        SupplementaryGroups = lib.optional (allowSystemdJournal) "systemd-journal";
+      } // (optionalAttrs (!pkgs.stdenv.isAarch64) { # FIXME: figure out why this breaks on aarch64
+        SystemCallFilter = "@system-service";
+      });
+    };
+
+    users.groups.promtail = {};
+    users.users.promtail = {
+      description = "Promtail service user";
+      isSystemUser = true;
+      group = "promtail";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/rsyslogd.nix b/nixpkgs/nixos/modules/services/logging/rsyslogd.nix
new file mode 100644
index 000000000000..207d416c1a88
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/rsyslogd.nix
@@ -0,0 +1,105 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rsyslogd;
+
+  syslogConf = pkgs.writeText "syslog.conf" ''
+    $ModLoad imuxsock
+    $SystemLogSocketName /run/systemd/journal/syslog
+    $WorkDirectory /var/spool/rsyslog
+
+    ${cfg.defaultConfig}
+    ${cfg.extraConfig}
+  '';
+
+  defaultConf = ''
+    # "local1" is used for dhcpd messages.
+    local1.*                     -/var/log/dhcpd
+
+    mail.*                       -/var/log/mail
+
+    *.=warning;*.=err            -/var/log/warn
+    *.crit                        /var/log/warn
+
+    *.*;mail.none;local1.none    -/var/log/messages
+  '';
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.rsyslogd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable syslogd.  Note that systemd also logs
+          syslog messages, so you normally don't need to run syslogd.
+        '';
+      };
+
+      defaultConfig = mkOption {
+        type = types.lines;
+        default = defaultConf;
+        description = lib.mdDoc ''
+          The default {file}`syslog.conf` file configures a
+          fairly standard setup of log files, which can be extended by
+          means of {var}`extraConfig`.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "news.* -/var/log/news";
+        description = lib.mdDoc ''
+          Additional text appended to {file}`syslog.conf`,
+          i.e. the contents of {var}`defaultConfig`.
+        '';
+      };
+
+      extraParams = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-m 0" ];
+        description = lib.mdDoc ''
+          Additional parameters passed to {command}`rsyslogd`.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.rsyslog ];
+
+    systemd.services.syslog =
+      { description = "Syslog Daemon";
+
+        requires = [ "syslog.socket" ];
+
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig =
+          { ExecStart = "${pkgs.rsyslog}/sbin/rsyslogd ${toString cfg.extraParams} -f ${syslogConf} -n";
+            ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /var/spool/rsyslog";
+            # Prevent syslogd output looping back through journald.
+            StandardOutput = "null";
+          };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/logging/syslog-ng.nix b/nixpkgs/nixos/modules/services/logging/syslog-ng.nix
new file mode 100644
index 000000000000..48d556b9459e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/syslog-ng.nix
@@ -0,0 +1,98 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.syslog-ng;
+
+  syslogngConfig = pkgs.writeText "syslog-ng.conf" ''
+    ${cfg.configHeader}
+    ${cfg.extraConfig}
+  '';
+
+  ctrlSocket = "/run/syslog-ng/syslog-ng.ctl";
+  pidFile = "/run/syslog-ng/syslog-ng.pid";
+  persistFile = "/var/syslog-ng/syslog-ng.persist";
+
+  syslogngOptions = [
+    "--foreground"
+    "--module-path=${concatStringsSep ":" (["${cfg.package}/lib/syslog-ng"] ++ cfg.extraModulePaths)}"
+    "--cfgfile=${syslogngConfig}"
+    "--control=${ctrlSocket}"
+    "--persist-file=${persistFile}"
+    "--pidfile=${pidFile}"
+  ];
+
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "syslog-ng" "serviceName" ] "")
+    (mkRemovedOptionModule [ "services" "syslog-ng" "listenToJournal" ] "")
+  ];
+
+  options = {
+
+    services.syslog-ng = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the syslog-ng daemon.
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.syslogng;
+        defaultText = literalExpression "pkgs.syslogng";
+        description = lib.mdDoc ''
+          The package providing syslog-ng binaries.
+        '';
+      };
+      extraModulePaths = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          A list of paths that should be included in syslog-ng's
+          `--module-path` option. They should usually
+          end in `/lib/syslog-ng`
+        '';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration added to the end of `syslog-ng.conf`.
+        '';
+      };
+      configHeader = mkOption {
+        type = types.lines;
+        default = ''
+          @version: 4.4
+          @include "scl.conf"
+        '';
+        description = lib.mdDoc ''
+          The very first lines of the configuration file. Should usually contain
+          the syslog-ng version header.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.syslog-ng = {
+      description = "syslog-ng daemon";
+      preStart = "mkdir -p /{var,run}/syslog-ng";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "multi-user.target" ]; # makes sure hostname etc is set
+      serviceConfig = {
+        Type = "notify";
+        PIDFile = pidFile;
+        StandardOutput = "null";
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/sbin/syslog-ng ${concatStringsSep " " syslogngOptions}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/logging/syslogd.nix b/nixpkgs/nixos/modules/services/logging/syslogd.nix
new file mode 100644
index 000000000000..553973e255f7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/syslogd.nix
@@ -0,0 +1,130 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.syslogd;
+
+  syslogConf = pkgs.writeText "syslog.conf" ''
+    ${optionalString (cfg.tty != "") "kern.warning;*.err;authpriv.none /dev/${cfg.tty}"}
+    ${cfg.defaultConfig}
+    ${cfg.extraConfig}
+  '';
+
+  defaultConf = ''
+    # Send emergency messages to all users.
+    *.emerg                       *
+
+    # "local1" is used for dhcpd messages.
+    local1.*                     -/var/log/dhcpd
+
+    mail.*                       -/var/log/mail
+
+    *.=warning;*.=err            -/var/log/warn
+    *.crit                        /var/log/warn
+
+    *.*;mail.none;local1.none    -/var/log/messages
+  '';
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.syslogd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable syslogd.  Note that systemd also logs
+          syslog messages, so you normally don't need to run syslogd.
+        '';
+      };
+
+      tty = mkOption {
+        type = types.str;
+        default = "tty10";
+        description = lib.mdDoc ''
+          The tty device on which syslogd will print important log
+          messages. Leave this option blank to disable tty logging.
+        '';
+      };
+
+      defaultConfig = mkOption {
+        type = types.lines;
+        default = defaultConf;
+        description = lib.mdDoc ''
+          The default {file}`syslog.conf` file configures a
+          fairly standard setup of log files, which can be extended by
+          means of {var}`extraConfig`.
+        '';
+      };
+
+      enableNetworkInput = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Accept logging through UDP. Option -r of syslogd(8).
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "news.* -/var/log/news";
+        description = lib.mdDoc ''
+          Additional text appended to {file}`syslog.conf`,
+          i.e. the contents of {var}`defaultConfig`.
+        '';
+      };
+
+      extraParams = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-m 0" ];
+        description = lib.mdDoc ''
+          Additional parameters passed to {command}`syslogd`.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions =
+      [ { assertion = !config.services.rsyslogd.enable;
+          message = "rsyslogd conflicts with syslogd";
+        }
+      ];
+
+    environment.systemPackages = [ pkgs.sysklogd ];
+
+    services.syslogd.extraParams = optional cfg.enableNetworkInput "-r";
+
+    # FIXME: restarting syslog seems to break journal logging.
+    systemd.services.syslog =
+      { description = "Syslog Daemon";
+
+        requires = [ "syslog.socket" ];
+
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig =
+          { ExecStart = "${pkgs.sysklogd}/sbin/syslogd ${toString cfg.extraParams} -f ${syslogConf} -n";
+            # Prevent syslogd output looping back through journald.
+            StandardOutput = "null";
+          };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/logging/ulogd.nix b/nixpkgs/nixos/modules/services/logging/ulogd.nix
new file mode 100644
index 000000000000..05c9797bb28b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/ulogd.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.ulogd;
+  settingsFormat = pkgs.formats.ini { listsAsDuplicateKeys = true; };
+  settingsFile = settingsFormat.generate "ulogd.conf" cfg.settings;
+in {
+  options = {
+    services.ulogd = {
+      enable = mkEnableOption (lib.mdDoc "ulogd");
+
+      settings = mkOption {
+        example = {
+          global.stack = [
+            "log1:NFLOG,base1:BASE,ifi1:IFINDEX,ip2str1:IP2STR,print1:PRINTPKT,emu1:LOGEMU"
+            "log1:NFLOG,base1:BASE,pcap1:PCAP"
+          ];
+
+          log1.group = 2;
+
+          pcap1 = {
+            sync = 1;
+            file = "/var/log/ulogd.pcap";
+          };
+
+          emu1 = {
+            sync = 1;
+            file = "/var/log/ulogd_pkts.log";
+          };
+        };
+        type = settingsFormat.type;
+        default = { };
+        description = lib.mdDoc
+          "Configuration for ulogd. See {file}`/share/doc/ulogd/` in `pkgs.ulogd.doc`.";
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ 1 3 5 7 8 ];
+        default = 5;
+        description = lib.mdDoc
+          "Log level (1 = debug, 3 = info, 5 = notice, 7 = error, 8 = fatal)";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.ulogd = {
+      description = "Ulogd Daemon";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-pre.target" ];
+      before = [ "network-pre.target" ];
+
+      serviceConfig = {
+        ExecStart =
+          "${pkgs.ulogd}/bin/ulogd -c ${settingsFile} --verbose --loglevel ${
+            toString cfg.logLevel
+          }";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/logging/vector.nix b/nixpkgs/nixos/modules/services/logging/vector.nix
new file mode 100644
index 000000000000..f2edeabfc06f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/logging/vector.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let cfg = config.services.vector;
+
+in
+{
+  options.services.vector = {
+    enable = mkEnableOption (lib.mdDoc "Vector");
+
+    package = mkPackageOptionMD pkgs "vector" { };
+
+    journaldAccess = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable Vector to access journald.
+      '';
+    };
+
+    settings = mkOption {
+      type = (pkgs.formats.json { }).type;
+      default = { };
+      description = lib.mdDoc ''
+        Specify the configuration for Vector in Nix.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # for cli usage
+    environment.systemPackages = [ pkgs.vector ];
+
+    systemd.services.vector = {
+      description = "Vector event and log aggregator";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      requires = [ "network-online.target" ];
+      serviceConfig =
+        let
+          format = pkgs.formats.toml { };
+          conf = format.generate "vector.toml" cfg.settings;
+          validateConfig = file:
+          pkgs.runCommand "validate-vector-conf" {
+            nativeBuildInputs = [ pkgs.vector ];
+          } ''
+              vector validate --no-environment "${file}"
+              ln -s "${file}" "$out"
+            '';
+        in
+        {
+          ExecStart = "${getExe cfg.package} --config ${validateConfig conf}";
+          DynamicUser = true;
+          Restart = "no";
+          StateDirectory = "vector";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+          # This group is required for accessing journald.
+          SupplementaryGroups = mkIf cfg.journaldAccess "systemd-journal";
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/clamsmtp.nix b/nixpkgs/nixos/modules/services/mail/clamsmtp.nix
new file mode 100644
index 000000000000..a0de25962845
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/clamsmtp.nix
@@ -0,0 +1,181 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.clamsmtp;
+  clamdSocket = "/run/clamav/clamd.ctl"; # See services/security/clamav.nix
+in
+{
+  ##### interface
+  options = {
+    services.clamsmtp = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable clamsmtp.";
+      };
+
+      instances = mkOption {
+        description = lib.mdDoc "Instances of clamsmtp to run.";
+        type = types.listOf (types.submodule { options = {
+          action = mkOption {
+            type = types.enum [ "bounce" "drop" "pass" ];
+            default = "drop";
+            description =
+              lib.mdDoc ''
+                Action to take when a virus is detected.
+
+                Note that viruses often spoof sender addresses, so bouncing is
+                in most cases not a good idea.
+              '';
+          };
+
+          header = mkOption {
+            type = types.str;
+            default = "";
+            example = "X-Virus-Scanned: ClamAV using ClamSMTP";
+            description =
+              lib.mdDoc ''
+                A header to add to scanned messages. See clamsmtpd.conf(5) for
+                more details. Empty means no header.
+              '';
+          };
+
+          keepAlives = mkOption {
+            type = types.int;
+            default = 0;
+            description =
+              lib.mdDoc ''
+                Number of seconds to wait between each NOOP sent to the sending
+                server. 0 to disable.
+
+                This is meant for slow servers where the sending MTA times out
+                waiting for clamd to scan the file.
+              '';
+          };
+
+          listen = mkOption {
+            type = types.str;
+            example = "127.0.0.1:10025";
+            description =
+              lib.mdDoc ''
+                Address to wait for incoming SMTP connections on. See
+                clamsmtpd.conf(5) for more details.
+              '';
+          };
+
+          quarantine = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              lib.mdDoc ''
+                Whether to quarantine files that contain viruses by leaving them
+                in the temporary directory.
+              '';
+          };
+
+          maxConnections = mkOption {
+            type = types.int;
+            default = 64;
+            description = lib.mdDoc "Maximum number of connections to accept at once.";
+          };
+
+          outAddress = mkOption {
+            type = types.str;
+            description =
+              lib.mdDoc ''
+                Address of the SMTP server to send email to once it has been
+                scanned.
+              '';
+          };
+
+          tempDirectory = mkOption {
+            type = types.str;
+            default = "/tmp";
+            description =
+              lib.mdDoc ''
+                Temporary directory that needs to be accessible to both clamd
+                and clamsmtpd.
+              '';
+          };
+
+          timeout = mkOption {
+            type = types.int;
+            default = 180;
+            description = lib.mdDoc "Time-out for network connections.";
+          };
+
+          transparentProxy = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Enable clamsmtp's transparent proxy support.";
+          };
+
+          virusAction = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            description =
+              lib.mdDoc ''
+                Command to run when a virus is found. Please see VIRUS ACTION in
+                clamsmtpd(8) for a discussion of this option and its safe use.
+              '';
+          };
+
+          xClient = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              lib.mdDoc ''
+                Send the XCLIENT command to the receiving server, for forwarding
+                client addresses and connection information if the receiving
+                server supports this feature.
+              '';
+          };
+        };});
+      };
+    };
+  };
+
+  ##### implementation
+  config = let
+    configfile = conf: pkgs.writeText "clamsmtpd.conf"
+      ''
+        Action: ${conf.action}
+        ClamAddress: ${clamdSocket}
+        Header: ${conf.header}
+        KeepAlives: ${toString conf.keepAlives}
+        Listen: ${conf.listen}
+        Quarantine: ${if conf.quarantine then "on" else "off"}
+        MaxConnections: ${toString conf.maxConnections}
+        OutAddress: ${conf.outAddress}
+        TempDirectory: ${conf.tempDirectory}
+        TimeOut: ${toString conf.timeout}
+        TransparentProxy: ${if conf.transparentProxy then "on" else "off"}
+        User: clamav
+        ${optionalString (conf.virusAction != null) "VirusAction: ${conf.virusAction}"}
+        XClient: ${if conf.xClient then "on" else "off"}
+      '';
+  in
+    mkIf cfg.enable {
+      assertions = [
+        { assertion = config.services.clamav.daemon.enable;
+          message = "clamsmtp requires clamav to be enabled";
+        }
+      ];
+
+      systemd.services = listToAttrs (imap1 (i: conf:
+        nameValuePair "clamsmtp-${toString i}" {
+          description = "ClamSMTP instance ${toString i}";
+          wantedBy = [ "multi-user.target" ];
+          script = "exec ${pkgs.clamsmtp}/bin/clamsmtpd -f ${configfile conf}";
+          after = [ "clamav-daemon.service" ];
+          requires = [ "clamav-daemon.service" ];
+          serviceConfig.Type = "forking";
+          serviceConfig.PrivateTmp = "yes";
+          unitConfig.JoinsNamespaceOf = "clamav-daemon.service";
+        }
+      ) cfg.instances);
+    };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/davmail.nix b/nixpkgs/nixos/modules/services/mail/davmail.nix
new file mode 100644
index 000000000000..9cdb435af4a1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/davmail.nix
@@ -0,0 +1,126 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.davmail;
+
+  configType = with types;
+    oneOf [ (attrsOf configType) str int bool ] // {
+      description = "davmail config type (str, int, bool or attribute set thereof)";
+    };
+
+  toStr = val: if isBool val then boolToString val else toString val;
+
+  linesForAttrs = attrs: concatMap (name: let value = attrs.${name}; in
+    if isAttrs value
+      then map (line: name + "." + line) (linesForAttrs value)
+      else [ "${name}=${toStr value}" ]
+  ) (attrNames attrs);
+
+  configFile = pkgs.writeText "davmail.properties" (concatStringsSep "\n" (linesForAttrs cfg.config));
+
+in
+
+  {
+    options.services.davmail = {
+      enable = mkEnableOption (lib.mdDoc "davmail, an MS Exchange gateway");
+
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Outlook Web Access URL to access the exchange server, i.e. the base webmail URL.";
+        example = "https://outlook.office365.com/EWS/Exchange.asmx";
+      };
+
+      config = mkOption {
+        type = configType;
+        default = {};
+        description = lib.mdDoc ''
+          Davmail configuration. Refer to
+          <http://davmail.sourceforge.net/serversetup.html>
+          and <http://davmail.sourceforge.net/advanced.html>
+          for details on supported values.
+        '';
+        example = literalExpression ''
+          {
+            davmail.allowRemote = true;
+            davmail.imapPort = 55555;
+            davmail.bindAddress = "10.0.1.2";
+            davmail.smtpSaveInSent = true;
+            davmail.folderSizeLimit = 10;
+            davmail.caldavAutoSchedule = false;
+            log4j.logger.rootLogger = "DEBUG";
+          }
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+
+      services.davmail.config = {
+        davmail = mapAttrs (name: mkDefault) {
+          server = true;
+          disableUpdateCheck = true;
+          logFilePath = "/var/log/davmail/davmail.log";
+          logFileSize = "1MB";
+          mode = "auto";
+          url = cfg.url;
+          caldavPort = 1080;
+          imapPort = 1143;
+          ldapPort = 1389;
+          popPort = 1110;
+          smtpPort = 1025;
+        };
+        log4j = {
+          logger.davmail = mkDefault "WARN";
+          logger.httpclient.wire = mkDefault "WARN";
+          logger.org.apache.commons.httpclient = mkDefault "WARN";
+          rootLogger = mkDefault "WARN";
+        };
+      };
+
+      systemd.services.davmail = {
+        description = "DavMail POP/IMAP/SMTP Exchange Gateway";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          Type = "simple";
+          ExecStart = "${pkgs.davmail}/bin/davmail ${configFile}";
+          Restart = "on-failure";
+          DynamicUser = "yes";
+          LogsDirectory = "davmail";
+
+          CapabilityBoundingSet = [ "" ];
+          DeviceAllow = [ "" ];
+          LockPersonality = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectSystem = "strict";
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = "@system-service";
+          SystemCallErrorNumber = "EPERM";
+          UMask = "0077";
+
+        };
+      };
+
+      environment.systemPackages = [ pkgs.davmail ];
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/mail/dkimproxy-out.nix b/nixpkgs/nixos/modules/services/mail/dkimproxy-out.nix
new file mode 100644
index 000000000000..6f9cbc4e9d4d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/dkimproxy-out.nix
@@ -0,0 +1,120 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.dkimproxy-out;
+  keydir = "/var/lib/dkimproxy-out";
+  privkey = "${keydir}/private.key";
+  pubkey = "${keydir}/public.key";
+in
+{
+  ##### interface
+  options = {
+    services.dkimproxy-out = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            Whether to enable dkimproxy_out.
+
+            Note that a key will be auto-generated, and can be found in
+            ${keydir}.
+          '';
+      };
+
+      listen = mkOption {
+        type = types.str;
+        example = "127.0.0.1:10027";
+        description = lib.mdDoc "Address:port DKIMproxy should listen on.";
+      };
+
+      relay = mkOption {
+        type = types.str;
+        example = "127.0.0.1:10028";
+        description = lib.mdDoc "Address:port DKIMproxy should forward mail to.";
+      };
+
+      domains = mkOption {
+        type = with types; listOf str;
+        example = [ "example.org" "example.com" ];
+        description = lib.mdDoc "List of domains DKIMproxy can sign for.";
+      };
+
+      selector = mkOption {
+        type = types.str;
+        example = "selector1";
+        description =
+          lib.mdDoc ''
+            The selector to use for DKIM key identification.
+
+            For example, if 'selector1' is used here, then for each domain
+            'example.org' given in `domain`, 'selector1._domainkey.example.org'
+            should contain the TXT record indicating the public key is the one
+            in ${pubkey}: "v=DKIM1; t=s; p=[THE PUBLIC KEY]".
+          '';
+      };
+
+      keySize = mkOption {
+        type = types.int;
+        default = 2048;
+        description =
+          lib.mdDoc ''
+            Size of the RSA key to use to sign outgoing emails. Note that the
+            maximum mandatorily verified as per RFC6376 is 2048.
+          '';
+      };
+
+      # TODO: allow signature for other schemes than dkim(c=relaxed/relaxed)?
+      # This being the scheme used by gmail, maybe nothing more is needed for
+      # reasonable use.
+    };
+  };
+
+  ##### implementation
+  config = let
+    configfile = pkgs.writeText "dkimproxy_out.conf"
+      ''
+        listen ${cfg.listen}
+        relay ${cfg.relay}
+
+        domain ${concatStringsSep "," cfg.domains}
+        selector ${cfg.selector}
+
+        signature dkim(c=relaxed/relaxed)
+
+        keyfile ${privkey}
+      '';
+  in
+    mkIf cfg.enable {
+      users.groups.dkimproxy-out = {};
+      users.users.dkimproxy-out = {
+        description = "DKIMproxy_out daemon";
+        group = "dkimproxy-out";
+        isSystemUser = true;
+      };
+
+      systemd.services.dkimproxy-out = {
+        description = "DKIMproxy_out";
+        wantedBy = [ "multi-user.target" ];
+        preStart = ''
+          if [ ! -d "${keydir}" ]; then
+            mkdir -p "${keydir}"
+            chmod 0700 "${keydir}"
+            ${pkgs.openssl}/bin/openssl genrsa -out "${privkey}" ${toString cfg.keySize}
+            ${pkgs.openssl}/bin/openssl rsa -in "${privkey}" -pubout -out "${pubkey}"
+            chown -R dkimproxy-out:dkimproxy-out "${keydir}"
+          fi
+        '';
+        script = ''
+          exec ${pkgs.dkimproxy}/bin/dkimproxy.out --conf_file=${configfile}
+        '';
+        serviceConfig = {
+          User = "dkimproxy-out";
+          PermissionsStartOnly = true;
+        };
+      };
+    };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/dovecot.nix b/nixpkgs/nixos/modules/services/mail/dovecot.nix
new file mode 100644
index 000000000000..abbb2f32e6cc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/dovecot.nix
@@ -0,0 +1,462 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dovecot2;
+  dovecotPkg = pkgs.dovecot;
+
+  baseDir = "/run/dovecot2";
+  stateDir = "/var/lib/dovecot";
+
+  dovecotConf = concatStrings [
+    ''
+      base_dir = ${baseDir}
+      protocols = ${concatStringsSep " " cfg.protocols}
+      sendmail_path = /run/wrappers/bin/sendmail
+      # defining mail_plugins must be done before the first protocol {} filter because of https://doc.dovecot.org/configuration_manual/config_file/config_file_syntax/#variable-expansion
+      mail_plugins = $mail_plugins ${concatStringsSep " " cfg.mailPlugins.globally.enable}
+    ''
+
+    (
+      concatStringsSep "\n" (
+        mapAttrsToList (
+          protocol: plugins: ''
+            protocol ${protocol} {
+              mail_plugins = $mail_plugins ${concatStringsSep " " plugins.enable}
+            }
+          ''
+        ) cfg.mailPlugins.perProtocol
+      )
+    )
+
+    (
+      if cfg.sslServerCert == null then ''
+        ssl = no
+        disable_plaintext_auth = no
+      '' else ''
+        ssl_cert = <${cfg.sslServerCert}
+        ssl_key = <${cfg.sslServerKey}
+        ${optionalString (cfg.sslCACert != null) ("ssl_ca = <" + cfg.sslCACert)}
+        ${optionalString cfg.enableDHE ''ssl_dh = <${config.security.dhparams.params.dovecot2.path}''}
+        disable_plaintext_auth = yes
+      ''
+    )
+
+    ''
+      default_internal_user = ${cfg.user}
+      default_internal_group = ${cfg.group}
+      ${optionalString (cfg.mailUser != null) "mail_uid = ${cfg.mailUser}"}
+      ${optionalString (cfg.mailGroup != null) "mail_gid = ${cfg.mailGroup}"}
+
+      mail_location = ${cfg.mailLocation}
+
+      maildir_copy_with_hardlinks = yes
+      pop3_uidl_format = %08Xv%08Xu
+
+      auth_mechanisms = plain login
+
+      service auth {
+        user = root
+      }
+    ''
+
+    (
+      optionalString cfg.enablePAM ''
+        userdb {
+          driver = passwd
+        }
+
+        passdb {
+          driver = pam
+          args = ${optionalString cfg.showPAMFailure "failure_show_msg=yes"} dovecot2
+        }
+      ''
+    )
+
+    (
+      optionalString (cfg.sieveScripts != {}) ''
+        plugin {
+          ${concatStringsSep "\n" (mapAttrsToList (to: from: "sieve_${to} = ${stateDir}/sieve/${to}") cfg.sieveScripts)}
+        }
+      ''
+    )
+
+    (
+      optionalString (cfg.mailboxes != {}) ''
+        namespace inbox {
+          inbox=yes
+          ${concatStringsSep "\n" (map mailboxConfig (attrValues cfg.mailboxes))}
+        }
+      ''
+    )
+
+    (
+      optionalString cfg.enableQuota ''
+        service quota-status {
+          executable = ${dovecotPkg}/libexec/dovecot/quota-status -p postfix
+          inet_listener {
+            port = ${cfg.quotaPort}
+          }
+          client_limit = 1
+        }
+
+        plugin {
+          quota_rule = *:storage=${cfg.quotaGlobalPerUser}
+          quota = count:User quota # per virtual mail user quota
+          quota_status_success = DUNNO
+          quota_status_nouser = DUNNO
+          quota_status_overquota = "552 5.2.2 Mailbox is full"
+          quota_grace = 10%%
+          quota_vsizes = yes
+        }
+      ''
+    )
+
+    cfg.extraConfig
+  ];
+
+  modulesDir = pkgs.symlinkJoin {
+    name = "dovecot-modules";
+    paths = map (pkg: "${pkg}/lib/dovecot") ([ dovecotPkg ] ++ map (module: module.override { dovecot = dovecotPkg; }) cfg.modules);
+  };
+
+  mailboxConfig = mailbox: ''
+    mailbox "${mailbox.name}" {
+      auto = ${toString mailbox.auto}
+  '' + optionalString (mailbox.autoexpunge != null) ''
+    autoexpunge = ${mailbox.autoexpunge}
+  '' + optionalString (mailbox.specialUse != null) ''
+    special_use = \${toString mailbox.specialUse}
+  '' + "}";
+
+  mailboxes = { name, ... }: {
+    options = {
+      name = mkOption {
+        type = types.strMatching ''[^"]+'';
+        example = "Spam";
+        default = name;
+        readOnly = true;
+        description = lib.mdDoc "The name of the mailbox.";
+      };
+      auto = mkOption {
+        type = types.enum [ "no" "create" "subscribe" ];
+        default = "no";
+        example = "subscribe";
+        description = lib.mdDoc "Whether to automatically create or create and subscribe to the mailbox or not.";
+      };
+      specialUse = mkOption {
+        type = types.nullOr (types.enum [ "All" "Archive" "Drafts" "Flagged" "Junk" "Sent" "Trash" ]);
+        default = null;
+        example = "Junk";
+        description = lib.mdDoc "Null if no special use flag is set. Other than that every use flag mentioned in the RFC is valid.";
+      };
+      autoexpunge = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "60d";
+        description = lib.mdDoc ''
+          To automatically remove all email from the mailbox which is older than the
+          specified time.
+        '';
+      };
+    };
+  };
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "dovecot2" "package" ] "")
+  ];
+
+  options.services.dovecot2 = {
+    enable = mkEnableOption (lib.mdDoc "the dovecot 2.x POP3/IMAP server");
+
+    enablePop3 = mkEnableOption (lib.mdDoc "starting the POP3 listener (when Dovecot is enabled)");
+
+    enableImap = mkEnableOption (lib.mdDoc "starting the IMAP listener (when Dovecot is enabled)") // { default = true; };
+
+    enableLmtp = mkEnableOption (lib.mdDoc "starting the LMTP listener (when Dovecot is enabled)");
+
+    protocols = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc "Additional listeners to start when Dovecot is enabled.";
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "dovecot2";
+      description = lib.mdDoc "Dovecot user name.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "dovecot2";
+      description = lib.mdDoc "Dovecot group name.";
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = "mail_debug = yes";
+      description = lib.mdDoc "Additional entries to put verbatim into Dovecot's config file.";
+    };
+
+    mailPlugins =
+      let
+        plugins = hint: types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc "mail plugins to enable as a list of strings to append to the ${hint} `$mail_plugins` configuration variable";
+            };
+          };
+        };
+      in
+        mkOption {
+          type = with types; submodule {
+            options = {
+              globally = mkOption {
+                description = lib.mdDoc "Additional entries to add to the mail_plugins variable for all protocols";
+                type = plugins "top-level";
+                example = { enable = [ "virtual" ]; };
+                default = { enable = []; };
+              };
+              perProtocol = mkOption {
+                description = lib.mdDoc "Additional entries to add to the mail_plugins variable, per protocol";
+                type = attrsOf (plugins "corresponding per-protocol");
+                default = {};
+                example = { imap = [ "imap_acl" ]; };
+              };
+            };
+          };
+          description = lib.mdDoc "Additional entries to add to the mail_plugins variable, globally and per protocol";
+          example = {
+            globally.enable = [ "acl" ];
+            perProtocol.imap.enable = [ "imap_acl" ];
+          };
+          default = { globally.enable = []; perProtocol = {}; };
+        };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc "Config file used for the whole dovecot configuration.";
+      apply = v: if v != null then v else pkgs.writeText "dovecot.conf" dovecotConf;
+    };
+
+    mailLocation = mkOption {
+      type = types.str;
+      default = "maildir:/var/spool/mail/%u"; /* Same as inbox, as postfix */
+      example = "maildir:~/mail:INBOX=/var/spool/mail/%u";
+      description = lib.mdDoc ''
+        Location that dovecot will use for mail folders. Dovecot mail_location option.
+      '';
+    };
+
+    mailUser = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Default user to store mail for virtual users.";
+    };
+
+    mailGroup = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Default group to store mail for virtual users.";
+    };
+
+    createMailUser = mkEnableOption (lib.mdDoc ''automatically creating the user
+      given in {option}`services.dovecot.user` and the group
+      given in {option}`services.dovecot.group`.'') // { default = true; };
+
+    modules = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "[ pkgs.dovecot_pigeonhole ]";
+      description = lib.mdDoc ''
+        Symlinks the contents of lib/dovecot of every given package into
+        /etc/dovecot/modules. This will make the given modules available
+        if a dovecot package with the module_dir patch applied is being used.
+      '';
+    };
+
+    sslCACert = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Path to the server's CA certificate key.";
+    };
+
+    sslServerCert = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Path to the server's public key.";
+    };
+
+    sslServerKey = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Path to the server's private key.";
+    };
+
+    enablePAM = mkEnableOption (lib.mdDoc "creating a own Dovecot PAM service and configure PAM user logins") // { default = true; };
+
+    enableDHE = mkEnableOption (lib.mdDoc "ssl_dh and generation of primes for the key exchange") // { default = true; };
+
+    sieveScripts = mkOption {
+      type = types.attrsOf types.path;
+      default = {};
+      description = lib.mdDoc "Sieve scripts to be executed. Key is a sequence, e.g. 'before2', 'after' etc.";
+    };
+
+    showPAMFailure = mkEnableOption (lib.mdDoc "showing the PAM failure message on authentication error (useful for OTPW)");
+
+    mailboxes = mkOption {
+      type = with types; coercedTo
+        (listOf unspecified)
+        (list: listToAttrs (map (entry: { name = entry.name; value = removeAttrs entry ["name"]; }) list))
+        (attrsOf (submodule mailboxes));
+      default = {};
+      example = literalExpression ''
+        {
+          Spam = { specialUse = "Junk"; auto = "create"; };
+        }
+      '';
+      description = lib.mdDoc "Configure mailboxes and auto create or subscribe them.";
+    };
+
+    enableQuota = mkEnableOption (lib.mdDoc "the dovecot quota service");
+
+    quotaPort = mkOption {
+      type = types.str;
+      default = "12340";
+      description = lib.mdDoc ''
+        The Port the dovecot quota service binds to.
+        If using postfix, add check_policy_service inet:localhost:12340 to your smtpd_recipient_restrictions in your postfix config.
+      '';
+    };
+    quotaGlobalPerUser = mkOption {
+      type = types.str;
+      default = "100G";
+      example = "10G";
+      description = lib.mdDoc "Quota limit for the user in bytes. Supports suffixes b, k, M, G, T and %.";
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+    security.pam.services.dovecot2 = mkIf cfg.enablePAM {};
+
+    security.dhparams = mkIf (cfg.sslServerCert != null && cfg.enableDHE) {
+      enable = true;
+      params.dovecot2 = {};
+    };
+    services.dovecot2.protocols =
+      optional cfg.enableImap "imap"
+      ++ optional cfg.enablePop3 "pop3"
+      ++ optional cfg.enableLmtp "lmtp";
+
+    services.dovecot2.mailPlugins = mkIf cfg.enableQuota {
+      globally.enable = [ "quota" ];
+      perProtocol.imap.enable = [ "imap_quota" ];
+    };
+
+    users.users = {
+      dovenull =
+        {
+          uid = config.ids.uids.dovenull2;
+          description = "Dovecot user for untrusted logins";
+          group = "dovenull";
+        };
+    } // optionalAttrs (cfg.user == "dovecot2") {
+      dovecot2 =
+        {
+          uid = config.ids.uids.dovecot2;
+          description = "Dovecot user";
+          group = cfg.group;
+        };
+    } // optionalAttrs (cfg.createMailUser && cfg.mailUser != null) {
+      ${cfg.mailUser} =
+        { description = "Virtual Mail User"; isSystemUser = true; } // optionalAttrs (cfg.mailGroup != null)
+          { group = cfg.mailGroup; };
+    };
+
+    users.groups = {
+      dovenull.gid = config.ids.gids.dovenull2;
+    } // optionalAttrs (cfg.group == "dovecot2") {
+      dovecot2.gid = config.ids.gids.dovecot2;
+    } // optionalAttrs (cfg.createMailUser && cfg.mailGroup != null) {
+      ${cfg.mailGroup} = {};
+    };
+
+    environment.etc."dovecot/modules".source = modulesDir;
+    environment.etc."dovecot/dovecot.conf".source = cfg.configFile;
+
+    systemd.services.dovecot2 = {
+      description = "Dovecot IMAP/POP3 server";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ cfg.configFile modulesDir ];
+
+      startLimitIntervalSec = 60;  # 1 min
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${dovecotPkg}/sbin/dovecot -F";
+        ExecReload = "${dovecotPkg}/sbin/doveadm reload";
+        Restart = "on-failure";
+        RestartSec = "1s";
+        RuntimeDirectory = [ "dovecot2" ];
+      };
+
+      # When copying sieve scripts preserve the original time stamp
+      # (should be 0) so that the compiled sieve script is newer than
+      # the source file and Dovecot won't try to compile it.
+      preStart = ''
+        rm -rf ${stateDir}/sieve
+      '' + optionalString (cfg.sieveScripts != {}) ''
+        mkdir -p ${stateDir}/sieve
+        ${concatStringsSep "\n" (
+        mapAttrsToList (
+          to: from: ''
+            if [ -d '${from}' ]; then
+              mkdir '${stateDir}/sieve/${to}'
+              cp -p "${from}/"*.sieve '${stateDir}/sieve/${to}'
+            else
+              cp -p '${from}' '${stateDir}/sieve/${to}'
+            fi
+            ${pkgs.dovecot_pigeonhole}/bin/sievec '${stateDir}/sieve/${to}'
+          ''
+        ) cfg.sieveScripts
+      )}
+        chown -R '${cfg.mailUser}:${cfg.mailGroup}' '${stateDir}/sieve'
+      '';
+    };
+
+    environment.systemPackages = [ dovecotPkg ];
+
+    warnings = mkIf (any isList options.services.dovecot2.mailboxes.definitions) [
+      "Declaring `services.dovecot2.mailboxes' as a list is deprecated and will break eval in 21.05! See the release notes for more info for migration."
+    ];
+
+    assertions = [
+      {
+        assertion = (cfg.sslServerCert == null) == (cfg.sslServerKey == null)
+        && (cfg.sslCACert != null -> !(cfg.sslServerCert == null || cfg.sslServerKey == null));
+        message = "dovecot needs both sslServerCert and sslServerKey defined for working crypto";
+      }
+      {
+        assertion = cfg.showPAMFailure -> cfg.enablePAM;
+        message = "dovecot is configured with showPAMFailure while enablePAM is disabled";
+      }
+      {
+        assertion = cfg.sieveScripts != {} -> (cfg.mailUser != null && cfg.mailGroup != null);
+        message = "dovecot requires mailUser and mailGroup to be set when sieveScripts is set";
+      }
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/dspam.nix b/nixpkgs/nixos/modules/services/mail/dspam.nix
new file mode 100644
index 000000000000..4fccd452a4fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/dspam.nix
@@ -0,0 +1,150 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.dspam;
+
+  dspam = pkgs.dspam;
+
+  defaultSock = "/run/dspam/dspam.sock";
+
+  cfgfile = pkgs.writeText "dspam.conf" ''
+    Home /var/lib/dspam
+    StorageDriver ${dspam}/lib/dspam/lib${cfg.storageDriver}_drv.so
+
+    Trust root
+    Trust ${cfg.user}
+    SystemLog on
+    UserLog on
+
+    ${optionalString (cfg.domainSocket != null) ''
+      ServerDomainSocketPath "${cfg.domainSocket}"
+      ClientHost "${cfg.domainSocket}"
+    ''}
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.dspam = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the dspam spam filter.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "dspam";
+        description = lib.mdDoc "User for the dspam daemon.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "dspam";
+        description = lib.mdDoc "Group for the dspam daemon.";
+      };
+
+      storageDriver = mkOption {
+        type = types.str;
+        default = "hash";
+        description =  lib.mdDoc "Storage driver backend to use for dspam.";
+      };
+
+      domainSocket = mkOption {
+        type = types.nullOr types.path;
+        default = defaultSock;
+        description = lib.mdDoc "Path to local domain socket which is used for communication with the daemon. Set to null to disable UNIX socket.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional dspam configuration.";
+      };
+
+      maintenanceInterval = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "If set, maintenance script will be run at specified (in systemd.timer format) interval";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      users.users = optionalAttrs (cfg.user == "dspam") {
+        dspam = {
+          group = cfg.group;
+          uid = config.ids.uids.dspam;
+        };
+      };
+
+      users.groups = optionalAttrs (cfg.group == "dspam") {
+        dspam.gid = config.ids.gids.dspam;
+      };
+
+      environment.systemPackages = [ dspam ];
+
+      environment.etc."dspam/dspam.conf".source = cfgfile;
+
+      systemd.services.dspam = {
+        description = "dspam spam filtering daemon";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "postgresql.service" ];
+        restartTriggers = [ cfgfile ];
+
+        serviceConfig = {
+          ExecStart = "${dspam}/bin/dspam --daemon --nofork";
+          User = cfg.user;
+          Group = cfg.group;
+          RuntimeDirectory = optional (cfg.domainSocket == defaultSock) "dspam";
+          RuntimeDirectoryMode = optional (cfg.domainSocket == defaultSock) "0750";
+          StateDirectory = "dspam";
+          StateDirectoryMode = "0750";
+          LogsDirectory = "dspam";
+          LogsDirectoryMode = "0750";
+          # DSPAM segfaults on just about every error
+          Restart = "on-abort";
+          RestartSec = "1s";
+        };
+      };
+    }
+
+    (mkIf (cfg.maintenanceInterval != null) {
+      systemd.timers.dspam-maintenance = {
+        description = "Timer for dspam maintenance script";
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnCalendar = cfg.maintenanceInterval;
+          Unit = "dspam-maintenance.service";
+        };
+      };
+
+      systemd.services.dspam-maintenance = {
+        description = "dspam maintenance script";
+        restartTriggers = [ cfgfile ];
+
+        serviceConfig = {
+          ExecStart = "${dspam}/bin/dspam_maintenance --verbose";
+          Type = "oneshot";
+          User = cfg.user;
+          Group = cfg.group;
+        };
+      };
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/mail/exim.nix b/nixpkgs/nixos/modules/services/mail/exim.nix
new file mode 100644
index 000000000000..1d1258913b67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/exim.nix
@@ -0,0 +1,133 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) literalExpression mkIf mkOption singleton types;
+  inherit (pkgs) coreutils;
+  cfg = config.services.exim;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.exim = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Exim mail transfer agent.";
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Verbatim Exim configuration.  This should not contain exim_user,
+          exim_group, exim_path, or spool_directory.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "exim";
+        description = lib.mdDoc ''
+          User to use when no root privileges are required.
+          In particular, this applies when receiving messages and when doing
+          remote deliveries.  (Local deliveries run as various non-root users,
+          typically as the owner of a local mailbox.) Specifying this value
+          as root is not supported.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "exim";
+        description = lib.mdDoc ''
+          Group to use when no root privileges are required.
+        '';
+      };
+
+      spoolDir = mkOption {
+        type = types.path;
+        default = "/var/spool/exim";
+        description = lib.mdDoc ''
+          Location of the spool directory of exim.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.exim;
+        defaultText = literalExpression "pkgs.exim";
+        description = lib.mdDoc ''
+          The Exim derivation to use.
+          This can be used to enable features such as LDAP or PAM support.
+        '';
+      };
+
+      queueRunnerInterval = mkOption {
+        type = types.str;
+        default = "5m";
+        description = lib.mdDoc ''
+          How often to spawn a new queue runner.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment = {
+      etc."exim.conf".text = ''
+        exim_user = ${cfg.user}
+        exim_group = ${cfg.group}
+        exim_path = /run/wrappers/bin/exim
+        spool_directory = ${cfg.spoolDir}
+        ${cfg.config}
+      '';
+      systemPackages = [ cfg.package ];
+    };
+
+    users.users.${cfg.user} = {
+      description = "Exim mail transfer agent user";
+      uid = config.ids.uids.exim;
+      group = cfg.group;
+    };
+
+    users.groups.${cfg.group} = {
+      gid = config.ids.gids.exim;
+    };
+
+    security.wrappers.exim =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${cfg.package}/bin/exim";
+      };
+
+    systemd.services.exim = {
+      description = "Exim Mail Daemon";
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."exim.conf".source ];
+      serviceConfig = {
+        ExecStart   = "!${cfg.package}/bin/exim -bdf -q${cfg.queueRunnerInterval}";
+        ExecReload  = "!${coreutils}/bin/kill -HUP $MAINPID";
+        User        = cfg.user;
+      };
+      preStart = ''
+        if ! test -d ${cfg.spoolDir}; then
+          ${coreutils}/bin/mkdir -p ${cfg.spoolDir}
+          ${coreutils}/bin/chown ${cfg.user}:${cfg.group} ${cfg.spoolDir}
+        fi
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/goeland.nix b/nixpkgs/nixos/modules/services/mail/goeland.nix
new file mode 100644
index 000000000000..13092a65ed90
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/goeland.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.goeland;
+  tomlFormat = pkgs.formats.toml { };
+in
+{
+  options.services.goeland = {
+    enable = mkEnableOption (mdDoc "goeland");
+
+    settings = mkOption {
+      description = mdDoc ''
+        Configuration of goeland.
+        See the [example config file](https://github.com/slurdge/goeland/blob/master/cmd/asset/config.default.toml) for the available options.
+      '';
+      default = { };
+      type = tomlFormat.type;
+    };
+    schedule = mkOption {
+      type = types.str;
+      default = "12h";
+      example = "Mon, 00:00:00";
+      description = mdDoc "How often to run goeland, in systemd time format.";
+    };
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/lib/goeland";
+      description = mdDoc ''
+        The data directory for goeland where the database will reside if using the unseen filter.
+        If left as the default value this directory will automatically be created before the goeland
+        server starts, otherwise you are responsible for ensuring the directory exists with
+        appropriate ownership and permissions.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.goeland.settings.database = "${cfg.stateDir}/goeland.db";
+
+    systemd.services.goeland = {
+      serviceConfig = let confFile = tomlFormat.generate "config.toml" cfg.settings; in mkMerge [
+        {
+          ExecStart = "${pkgs.goeland}/bin/goeland run -c ${confFile}";
+          User = "goeland";
+          Group = "goeland";
+        }
+        (mkIf (cfg.stateDir == "/var/lib/goeland") {
+          StateDirectory = "goeland";
+          StateDirectoryMode = "0750";
+        })
+      ];
+      startAt = cfg.schedule;
+    };
+
+    users.users.goeland = {
+      description = "goeland user";
+      group = "goeland";
+      isSystemUser = true;
+    };
+    users.groups.goeland = { };
+
+    warnings = optionals (hasAttr "password" cfg.settings.email) [
+      ''
+        It is not recommended to set the "services.goeland.settings.email.password"
+        option as it will be in cleartext in the Nix store.
+        Please use "services.goeland.settings.email.password_file" instead.
+      ''
+    ];
+  };
+
+  meta.maintainers = with maintainers; [ sweenu ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/listmonk.nix b/nixpkgs/nixos/modules/services/mail/listmonk.nix
new file mode 100644
index 000000000000..cea1bc956081
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/listmonk.nix
@@ -0,0 +1,222 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.listmonk;
+  tomlFormat = pkgs.formats.toml { };
+  cfgFile = tomlFormat.generate "listmonk.toml" cfg.settings;
+  # Escaping is done according to https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS
+  setDatabaseOption = key: value:
+    "UPDATE settings SET value = '${
+      lib.replaceStrings [ "'" ] [ "''" ] (builtins.toJSON value)
+    }' WHERE key = '${key}';";
+  updateDatabaseConfigSQL = pkgs.writeText "update-database-config.sql"
+    (concatStringsSep "\n" (mapAttrsToList setDatabaseOption
+      (if (cfg.database.settings != null) then
+        cfg.database.settings
+      else
+        { })));
+  updateDatabaseConfigScript =
+    pkgs.writeShellScriptBin "update-database-config.sh" ''
+      ${if cfg.database.mutableSettings then ''
+        if [ ! -f /var/lib/listmonk/.db_settings_initialized ]; then
+          ${pkgs.postgresql}/bin/psql -d listmonk -f ${updateDatabaseConfigSQL} ;
+          touch /var/lib/listmonk/.db_settings_initialized
+        fi
+      '' else
+        "${pkgs.postgresql}/bin/psql -d listmonk -f ${updateDatabaseConfigSQL}"}
+    '';
+
+  databaseSettingsOpts = with types; {
+    freeformType =
+      oneOf [ (listOf str) (listOf (attrsOf anything)) str int bool ];
+
+    options = {
+      "app.notify_emails" = mkOption {
+        type = listOf str;
+        default = [ ];
+        description = lib.mdDoc "Administrator emails for system notifications";
+      };
+
+      "privacy.exportable" = mkOption {
+        type = listOf str;
+        default = [ "profile" "subscriptions" "campaign_views" "link_clicks" ];
+        description = lib.mdDoc
+          "List of fields which can be exported through an automatic export request";
+      };
+
+      "privacy.domain_blocklist" = mkOption {
+        type = listOf str;
+        default = [ ];
+        description = lib.mdDoc
+          "E-mail addresses with these domains are disallowed from subscribing.";
+      };
+
+      smtp = mkOption {
+        type = listOf (submodule {
+          freeformType = with types; attrsOf anything;
+
+          options = {
+            enabled = mkEnableOption (lib.mdDoc "this SMTP server for listmonk");
+            host = mkOption {
+              type = types.str;
+              description = lib.mdDoc "Hostname for the SMTP server";
+            };
+            port = mkOption {
+              type = types.port;
+              description = lib.mdDoc "Port for the SMTP server";
+            };
+            max_conns = mkOption {
+              type = types.int;
+              description = lib.mdDoc
+                "Maximum number of simultaneous connections, defaults to 1";
+              default = 1;
+            };
+            tls_type = mkOption {
+              type = types.enum [ "none" "STARTTLS" "TLS" ];
+              description =
+                lib.mdDoc "Type of TLS authentication with the SMTP server";
+            };
+          };
+        });
+
+        description = lib.mdDoc "List of outgoing SMTP servers";
+      };
+
+      # TODO: refine this type based on the smtp one.
+      "bounce.mailboxes" = mkOption {
+        type = listOf
+          (submodule { freeformType = with types; listOf (attrsOf anything); });
+        default = [ ];
+        description = lib.mdDoc "List of bounce mailboxes";
+      };
+
+      messengers = mkOption {
+        type = listOf str;
+        default = [ ];
+        description = lib.mdDoc
+          "List of messengers, see: <https://github.com/knadh/listmonk/blob/master/models/settings.go#L64-L74> for options.";
+      };
+    };
+  };
+in {
+  ###### interface
+  options = {
+    services.listmonk = {
+      enable = mkEnableOption
+        (lib.mdDoc "Listmonk, this module assumes a reverse proxy to be set");
+      database = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc
+            "Create the PostgreSQL database and database user locally.";
+        };
+
+        settings = mkOption {
+          default = null;
+          type = with types; nullOr (submodule databaseSettingsOpts);
+          description = lib.mdDoc
+            "Dynamic settings in the PostgreSQL database, set by a SQL script, see <https://github.com/knadh/listmonk/blob/master/schema.sql#L177-L230> for details.";
+        };
+        mutableSettings = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Database settings will be reset to the value set in this module if this is not enabled.
+            Enable this if you want to persist changes you have done in the application.
+          '';
+        };
+      };
+      package = mkPackageOptionMD pkgs "listmonk" {};
+      settings = mkOption {
+        type = types.submodule { freeformType = tomlFormat.type; };
+        description = lib.mdDoc ''
+          Static settings set in the config.toml, see <https://github.com/knadh/listmonk/blob/master/config.toml.sample> for details.
+          You can set secrets using the secretFile option with environment variables following <https://listmonk.app/docs/configuration/#environment-variables>.
+        '';
+      };
+      secretFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc
+          "A file containing secrets as environment variables. See <https://listmonk.app/docs/configuration/#environment-variables> for details on supported values.";
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    # Default parameters from https://github.com/knadh/listmonk/blob/master/config.toml.sample
+    services.listmonk.settings."app".address = mkDefault "localhost:9000";
+    services.listmonk.settings."db" = mkMerge [
+      ({
+        max_open = mkDefault 25;
+        max_idle = mkDefault 25;
+        max_lifetime = mkDefault "300s";
+      })
+      (mkIf cfg.database.createLocally {
+        host = mkDefault "/run/postgresql";
+        port = mkDefault 5432;
+        user = mkDefault "listmonk";
+        database = mkDefault "listmonk";
+      })
+    ];
+
+    services.postgresql = mkIf cfg.database.createLocally {
+      enable = true;
+
+      ensureUsers = [{
+        name = "listmonk";
+        ensureDBOwnership = true;
+      }];
+
+      ensureDatabases = [ "listmonk" ];
+    };
+
+    systemd.services.listmonk = {
+      description = "Listmonk - newsletter and mailing list manager";
+      after = [ "network.target" ]
+        ++ optional cfg.database.createLocally "postgresql.service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "exec";
+        EnvironmentFile = mkIf (cfg.secretFile != null) [ cfg.secretFile ];
+        ExecStartPre = [
+          # StateDirectory cannot be used when DynamicUser = true is set this way.
+          # Indeed, it will try to create all the folders and realize one of them already exist.
+          # Therefore, we have to create it ourselves.
+          ''${pkgs.coreutils}/bin/mkdir -p "''${STATE_DIRECTORY}/listmonk/uploads"''
+          "${cfg.package}/bin/listmonk --config ${cfgFile} --idempotent --install --upgrade --yes"
+          "${updateDatabaseConfigScript}/bin/update-database-config.sh"
+        ];
+        ExecStart = "${cfg.package}/bin/listmonk --config ${cfgFile}";
+
+        Restart = "on-failure";
+
+        StateDirectory = [ "listmonk" ];
+
+        User = "listmonk";
+        Group = "listmonk";
+        DynamicUser = true;
+        NoNewPrivileges = true;
+        CapabilityBoundingSet = "";
+        SystemCallArchitecture = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        ProtectDevices = true;
+        ProtectControlGroups = true;
+        ProtectKernelTunables = true;
+        ProtectHome = true;
+        DeviceAllow = false;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        UMask = "0027";
+        MemoryDenyWriteExecute = true;
+        LockPersonality = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        ProtectKernelModules = true;
+        PrivateUsers = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/maddy.nix b/nixpkgs/nixos/modules/services/mail/maddy.nix
new file mode 100644
index 000000000000..2c4d75e8391a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/maddy.nix
@@ -0,0 +1,464 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "maddy";
+
+  cfg = config.services.maddy;
+
+  defaultConfig = ''
+    # Minimal configuration with TLS disabled, adapted from upstream example
+    # configuration here https://github.com/foxcpp/maddy/blob/master/maddy.conf
+    # Do not use this in production!
+
+    auth.pass_table local_authdb {
+      table sql_table {
+        driver sqlite3
+        dsn credentials.db
+        table_name passwords
+      }
+    }
+
+    storage.imapsql local_mailboxes {
+      driver sqlite3
+      dsn imapsql.db
+    }
+
+    table.chain local_rewrites {
+      optional_step regexp "(.+)\+(.+)@(.+)" "$1@$3"
+      optional_step static {
+        entry postmaster postmaster@$(primary_domain)
+      }
+      optional_step file /etc/maddy/aliases
+    }
+
+    msgpipeline local_routing {
+      destination postmaster $(local_domains) {
+        modify {
+          replace_rcpt &local_rewrites
+        }
+        deliver_to &local_mailboxes
+      }
+      default_destination {
+        reject 550 5.1.1 "User doesn't exist"
+      }
+    }
+
+    smtp tcp://0.0.0.0:25 {
+      limits {
+        all rate 20 1s
+        all concurrency 10
+      }
+      dmarc yes
+      check {
+        require_mx_record
+        dkim
+        spf
+      }
+      source $(local_domains) {
+        reject 501 5.1.8 "Use Submission for outgoing SMTP"
+      }
+      default_source {
+        destination postmaster $(local_domains) {
+          deliver_to &local_routing
+        }
+        default_destination {
+          reject 550 5.1.1 "User doesn't exist"
+        }
+      }
+    }
+
+    submission tcp://0.0.0.0:587 {
+      limits {
+        all rate 50 1s
+      }
+      auth &local_authdb
+      source $(local_domains) {
+        check {
+            authorize_sender {
+                prepare_email &local_rewrites
+                user_to_email identity
+            }
+        }
+        destination postmaster $(local_domains) {
+            deliver_to &local_routing
+        }
+        default_destination {
+            modify {
+                dkim $(primary_domain) $(local_domains) default
+            }
+            deliver_to &remote_queue
+        }
+      }
+      default_source {
+        reject 501 5.1.8 "Non-local sender domain"
+      }
+    }
+
+    target.remote outbound_delivery {
+      limits {
+        destination rate 20 1s
+        destination concurrency 10
+      }
+      mx_auth {
+        dane
+        mtasts {
+          cache fs
+          fs_dir mtasts_cache/
+        }
+        local_policy {
+            min_tls_level encrypted
+            min_mx_level none
+        }
+      }
+    }
+
+    target.queue remote_queue {
+      target &outbound_delivery
+      autogenerated_msg_domain $(primary_domain)
+      bounce {
+        destination postmaster $(local_domains) {
+          deliver_to &local_routing
+        }
+        default_destination {
+            reject 550 5.0.0 "Refusing to send DSNs to non-local addresses"
+        }
+      }
+    }
+
+    imap tcp://0.0.0.0:143 {
+      auth &local_authdb
+      storage &local_mailboxes
+    }
+  '';
+
+in {
+  options = {
+    services.maddy = {
+
+      enable = mkEnableOption (lib.mdDoc "Maddy, a free an open source mail server");
+
+      user = mkOption {
+        default = "maddy";
+        type = with types; uniq str;
+        description = lib.mdDoc ''
+          User account under which maddy runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise the sysadmin is responsible for
+          ensuring the user exists before the maddy service starts.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        default = "maddy";
+        type = with types; uniq str;
+        description = lib.mdDoc ''
+          Group account under which maddy runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise the sysadmin is responsible for
+          ensuring the group exists before the maddy service starts.
+          :::
+        '';
+      };
+
+      hostname = mkOption {
+        default = "localhost";
+        type = with types; uniq str;
+        example = ''example.com'';
+        description = lib.mdDoc ''
+          Hostname to use. It should be FQDN.
+        '';
+      };
+
+      primaryDomain = mkOption {
+        default = "localhost";
+        type = with types; uniq str;
+        example = ''mail.example.com'';
+        description = lib.mdDoc ''
+          Primary MX domain to use. It should be FQDN.
+        '';
+      };
+
+      localDomains = mkOption {
+        type = with types; listOf str;
+        default = ["$(primary_domain)"];
+        example = [
+          "$(primary_domain)"
+          "example.com"
+          "other.example.com"
+        ];
+        description = lib.mdDoc ''
+          Define list of allowed domains.
+        '';
+      };
+
+      config = mkOption {
+        type = with types; nullOr lines;
+        default = defaultConfig;
+        description = lib.mdDoc ''
+          Server configuration, see
+          [https://maddy.email](https://maddy.email) for
+          more information. The default configuration of this module will setup
+          minimal Maddy instance for mail transfer without TLS encryption.
+
+          ::: {.note}
+          This should not be used in a production environment.
+          :::
+        '';
+      };
+
+      tls = {
+        loader = mkOption {
+          type = with types; nullOr (enum [ "off" "file" "acme" ]);
+          default = "off";
+          description = lib.mdDoc ''
+            TLS certificates are obtained by modules called "certificate
+            loaders".
+
+            The `file` loader module reads certificates from files specified by
+            the `certificates` option.
+
+            Alternatively the `acme` module can be used to automatically obtain
+            certificates using the ACME protocol.
+
+            Module configuration is done via the `tls.extraConfig` option.
+
+            Secrets such as API keys or passwords should not be supplied in
+            plaintext. Instead the `secrets` option can be used to read secrets
+            at runtime as environment variables. Secrets can be referenced with
+            `{env:VAR}`.
+          '';
+        };
+
+        certificates = mkOption {
+          type = with types; listOf (submodule {
+            options = {
+              keyPath = mkOption {
+                type = types.path;
+                example = "/etc/ssl/mx1.example.org.key";
+                description = lib.mdDoc ''
+                  Path to the private key used for TLS.
+                '';
+              };
+              certPath = mkOption {
+                type = types.path;
+                example = "/etc/ssl/mx1.example.org.crt";
+                description = lib.mdDoc ''
+                  Path to the certificate used for TLS.
+                '';
+              };
+            };
+          });
+          default = [];
+          example = lib.literalExpression ''
+            [{
+              keyPath = "/etc/ssl/mx1.example.org.key";
+              certPath = "/etc/ssl/mx1.example.org.crt";
+            }]
+          '';
+          description = lib.mdDoc ''
+            A list of attribute sets containing paths to TLS certificates and
+            keys. Maddy will use SNI if multiple pairs are selected.
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = with types; nullOr lines;
+          description = lib.mdDoc ''
+            Arguments for the specified certificate loader.
+
+            In case the `tls` loader is set, the defaults are considered secure
+            and there is no need to change anything in most cases.
+            For available options see [upstream manual](https://maddy.email/reference/tls/).
+
+            For ACME configuration, see [following page](https://maddy.email/reference/tls-acme).
+          '';
+          default = "";
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the configured incoming and outgoing mail server ports.
+        '';
+      };
+
+      ensureAccounts = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          List of IMAP accounts which get automatically created. Note that for
+          a complete setup, user credentials for these accounts are required
+          and can be created using the `ensureCredentials` option.
+          This option does not delete accounts which are not (anymore) listed.
+        '';
+        example = [
+          "user1@localhost"
+          "user2@localhost"
+        ];
+      };
+
+      ensureCredentials = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          List of user accounts which get automatically created if they don't
+          exist yet. Note that for a complete setup, corresponding mail boxes
+          have to get created using the `ensureAccounts` option.
+          This option does not delete accounts which are not (anymore) listed.
+        '';
+        example = {
+          "user1@localhost".passwordFile = /secrets/user1-localhost;
+          "user2@localhost".passwordFile = /secrets/user2-localhost;
+        };
+        type = types.attrsOf (types.submodule {
+          options = {
+            passwordFile = mkOption {
+              type = types.path;
+              example = "/path/to/file";
+              default = null;
+              description = lib.mdDoc ''
+                Specifies the path to a file containing the
+                clear text password for the user.
+              '';
+            };
+          };
+        });
+      };
+
+      secrets = lib.mkOption {
+        type = with types; listOf path;
+        description = lib.mdDoc ''
+          A list of files containing the various secrets. Should be in the format
+          expected by systemd's `EnvironmentFile` directory. Secrets can be
+          referenced in the format `{env:VAR}`.
+        '';
+        default = [ ];
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.tls.loader == "file" -> cfg.tls.certificates != [];
+        message = ''
+          If Maddy is configured to use TLS, tls.certificates with attribute sets
+          of certPath and keyPath must be provided.
+          Read more about obtaining TLS certificates here:
+          https://maddy.email/tutorials/setting-up/#tls-certificates
+        '';
+      }
+      {
+        assertion = cfg.tls.loader == "acme" -> cfg.tls.extraConfig != "";
+        message = ''
+          If Maddy is configured to obtain TLS certificates using the ACME
+          loader, extra configuration options must be supplied via
+          tls.extraConfig option.
+          See upstream documentation for more details:
+          https://maddy.email/reference/tls-acme
+        '';
+      }
+    ];
+
+    systemd = {
+
+      packages = [ pkgs.maddy ];
+      services = {
+        maddy = {
+          serviceConfig = {
+            User = cfg.user;
+            Group = cfg.group;
+            StateDirectory = [ "maddy" ];
+            EnvironmentFile = cfg.secrets;
+          };
+          restartTriggers = [ config.environment.etc."maddy/maddy.conf".source ];
+          wantedBy = [ "multi-user.target" ];
+        };
+        maddy-ensure-accounts = {
+          script = ''
+            ${optionalString (cfg.ensureAccounts != []) ''
+              ${concatMapStrings (account: ''
+                if ! ${pkgs.maddy}/bin/maddyctl imap-acct list | grep "${account}"; then
+                  ${pkgs.maddy}/bin/maddyctl imap-acct create ${account}
+                fi
+              '') cfg.ensureAccounts}
+            ''}
+            ${optionalString (cfg.ensureCredentials != {}) ''
+              ${concatStringsSep "\n" (mapAttrsToList (name: cfg: ''
+                if ! ${pkgs.maddy}/bin/maddyctl creds list | grep "${name}"; then
+                  ${pkgs.maddy}/bin/maddyctl creds create --password $(cat ${escapeShellArg cfg.passwordFile}) ${name}
+                fi
+              '') cfg.ensureCredentials)}
+            ''}
+          '';
+          serviceConfig = {
+            Type = "oneshot";
+            User= "maddy";
+          };
+          after = [ "maddy.service" ];
+          wantedBy = [ "multi-user.target" ];
+        };
+
+      };
+
+    };
+
+    environment.etc."maddy/maddy.conf" = {
+      text = ''
+        $(hostname) = ${cfg.hostname}
+        $(primary_domain) = ${cfg.primaryDomain}
+        $(local_domains) = ${toString cfg.localDomains}
+        hostname ${cfg.hostname}
+
+        ${if (cfg.tls.loader == "file") then ''
+          tls file ${concatStringsSep " " (
+            map (x: x.certPath + " " + x.keyPath
+          ) cfg.tls.certificates)} ${optionalString (cfg.tls.extraConfig != "") ''
+            { ${cfg.tls.extraConfig} }
+          ''}
+        '' else if (cfg.tls.loader == "acme") then ''
+          tls {
+            loader acme {
+              ${cfg.tls.extraConfig}
+            }
+          }
+        '' else if (cfg.tls.loader == "off") then ''
+          tls off
+        '' else ""}
+
+        ${cfg.config}
+      '';
+    };
+
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        isSystemUser = true;
+        group = cfg.group;
+        description = "Maddy mail transfer agent user";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${cfg.group} = { };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 25 143 587 ];
+    };
+
+    environment.systemPackages = [
+      pkgs.maddy
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/mail.nix b/nixpkgs/nixos/modules/services/mail/mail.nix
new file mode 100644
index 000000000000..8e1424595b51
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mail.nix
@@ -0,0 +1,34 @@
+{ config, options, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.mail = {
+
+      sendmailSetuidWrapper = mkOption {
+        type = types.nullOr options.security.wrappers.type.nestedTypes.elemType;
+        default = null;
+        internal = true;
+        description = lib.mdDoc ''
+          Configuration for the sendmail setuid wapper.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf (config.services.mail.sendmailSetuidWrapper != null) {
+
+    security.wrappers.sendmail = config.services.mail.sendmailSetuidWrapper;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/mailcatcher.nix b/nixpkgs/nixos/modules/services/mail/mailcatcher.nix
new file mode 100644
index 000000000000..d0f4550c1926
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mailcatcher.nix
@@ -0,0 +1,68 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.mailcatcher;
+
+  inherit (lib) mkEnableOption mkIf mkOption types optionalString;
+in
+{
+  # interface
+
+  options = {
+
+    services.mailcatcher = {
+      enable = mkEnableOption (lib.mdDoc "MailCatcher");
+
+      http.ip = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "The ip address of the http server.";
+      };
+
+      http.port = mkOption {
+        type = types.port;
+        default = 1080;
+        description = lib.mdDoc "The port address of the http server.";
+      };
+
+      http.path = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc "Prefix to all HTTP paths.";
+        example = "/mailcatcher";
+      };
+
+      smtp.ip = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "The ip address of the smtp server.";
+      };
+
+      smtp.port = mkOption {
+        type = types.port;
+        default = 1025;
+        description = lib.mdDoc "The port address of the smtp server.";
+      };
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.mailcatcher ];
+
+    systemd.services.mailcatcher = {
+      description = "MailCatcher Service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        ExecStart = "${pkgs.mailcatcher}/bin/mailcatcher --foreground --no-quit --http-ip ${cfg.http.ip} --http-port ${toString cfg.http.port} --smtp-ip ${cfg.smtp.ip} --smtp-port ${toString cfg.smtp.port}" + optionalString (cfg.http.path != null) " --http-path ${cfg.http.path}";
+        AmbientCapabilities = optionalString (cfg.http.port < 1024 || cfg.smtp.port < 1024) "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/mailhog.nix b/nixpkgs/nixos/modules/services/mail/mailhog.nix
new file mode 100644
index 000000000000..7ae62de291ba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mailhog.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mailhog;
+
+  args = lib.concatStringsSep " " (
+    [
+      "-api-bind-addr :${toString cfg.apiPort}"
+      "-smtp-bind-addr :${toString cfg.smtpPort}"
+      "-ui-bind-addr :${toString cfg.uiPort}"
+      "-storage ${cfg.storage}"
+    ] ++ lib.optional (cfg.storage == "maildir")
+      "-maildir-path $STATE_DIRECTORY"
+    ++ cfg.extraArgs
+  );
+
+in
+{
+  ###### interface
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "mailhog" "user" ] "")
+  ];
+
+  options = {
+
+    services.mailhog = {
+      enable = mkEnableOption (lib.mdDoc "MailHog");
+
+      storage = mkOption {
+        type = types.enum [ "maildir" "memory" ];
+        default = "memory";
+        description = lib.mdDoc "Store mails on disk or in memory.";
+      };
+
+      apiPort = mkOption {
+        type = types.port;
+        default = 8025;
+        description = lib.mdDoc "Port on which the API endpoint will listen.";
+      };
+
+      smtpPort = mkOption {
+        type = types.port;
+        default = 1025;
+        description = lib.mdDoc "Port on which the SMTP endpoint will listen.";
+      };
+
+      uiPort = mkOption {
+        type = types.port;
+        default = 8025;
+        description = lib.mdDoc "Port on which the HTTP UI will listen.";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "List of additional arguments to pass to the MailHog process.";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.mailhog = {
+      description = "MailHog - Web and API based SMTP testing";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "exec";
+        ExecStart = "${pkgs.mailhog}/bin/MailHog ${args}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        StateDirectory = "mailhog";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.md b/nixpkgs/nixos/modules/services/mail/mailman.md
new file mode 100644
index 000000000000..55b61f8a2582
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mailman.md
@@ -0,0 +1,82 @@
+# Mailman {#module-services-mailman}
+
+[Mailman](https://www.list.org) is free
+software for managing electronic mail discussion and e-newsletter
+lists. Mailman and its web interface can be configured using the
+corresponding NixOS module. Note that this service is best used with
+an existing, securely configured Postfix setup, as it does not automatically configure this.
+
+## Basic usage with Postfix {#module-services-mailman-basic-usage}
+
+For a basic configuration with Postfix as the MTA, the following settings are suggested:
+```
+{ config, ... }: {
+  services.postfix = {
+    enable = true;
+    relayDomains = ["hash:/var/lib/mailman/data/postfix_domains"];
+    sslCert = config.security.acme.certs."lists.example.org".directory + "/full.pem";
+    sslKey = config.security.acme.certs."lists.example.org".directory + "/key.pem";
+    config = {
+      transport_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"];
+      local_recipient_maps = ["hash:/var/lib/mailman/data/postfix_lmtp"];
+    };
+  };
+  services.mailman = {
+    enable = true;
+    serve.enable = true;
+    hyperkitty.enable = true;
+    webHosts = ["lists.example.org"];
+    siteOwner = "mailman@example.org";
+  };
+  services.nginx.virtualHosts."lists.example.org".enableACME = true;
+  networking.firewall.allowedTCPPorts = [ 25 80 443 ];
+}
+```
+
+DNS records will also be required:
+
+  - `AAAA` and `A` records pointing to the host in question, in order for browsers to be able to discover the address of the web server;
+  - An `MX` record pointing to a domain name at which the host is reachable, in order for other mail servers to be able to deliver emails to the mailing lists it hosts.
+
+After this has been done and appropriate DNS records have been
+set up, the Postorius mailing list manager and the Hyperkitty
+archive browser will be available at
+https://lists.example.org/. Note that this setup is not
+sufficient to deliver emails to most email providers nor to
+avoid spam -- a number of additional measures for authenticating
+incoming and outgoing mails, such as SPF, DMARC and DKIM are
+necessary, but outside the scope of the Mailman module.
+
+## Using with other MTAs {#module-services-mailman-other-mtas}
+
+Mailman also supports other MTA, though with a little bit more configuration. For example, to use Mailman with Exim, you can use the following settings:
+```
+{ config, ... }: {
+  services = {
+    mailman = {
+      enable = true;
+      siteOwner = "mailman@example.org";
+      enablePostfix = false;
+      settings.mta = {
+        incoming = "mailman.mta.exim4.LMTP";
+        outgoing = "mailman.mta.deliver.deliver";
+        lmtp_host = "localhost";
+        lmtp_port = "8024";
+        smtp_host = "localhost";
+        smtp_port = "25";
+        configuration = "python:mailman.config.exim4";
+      };
+    };
+    exim = {
+      enable = true;
+      # You can configure Exim in a separate file to reduce configuration.nix clutter
+      config = builtins.readFile ./exim.conf;
+    };
+  };
+}
+```
+
+The exim config needs some special additions to work with Mailman. Currently
+NixOS can't manage Exim config with such granularity. Please refer to
+[Mailman documentation](https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html)
+for more info on configuring Mailman for working with Exim.
diff --git a/nixpkgs/nixos/modules/services/mail/mailman.nix b/nixpkgs/nixos/modules/services/mail/mailman.nix
new file mode 100644
index 000000000000..d61826de1b5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mailman.nix
@@ -0,0 +1,652 @@
+{ config, pkgs, lib, ... }:          # mailman.nix
+
+with lib;
+
+let
+
+  cfg = config.services.mailman;
+
+  inherit (pkgs.mailmanPackages.buildEnvs { withHyperkitty = cfg.hyperkitty.enable; withLDAP = cfg.ldap.enable; })
+    mailmanEnv webEnv;
+
+  withPostgresql = config.services.postgresql.enable;
+
+  # This deliberately doesn't use recursiveUpdate so users can
+  # override the defaults.
+  webSettings = {
+    DEFAULT_FROM_EMAIL = cfg.siteOwner;
+    SERVER_EMAIL = cfg.siteOwner;
+    ALLOWED_HOSTS = [ "localhost" "127.0.0.1" ] ++ cfg.webHosts;
+    COMPRESS_OFFLINE = true;
+    STATIC_ROOT = "/var/lib/mailman-web-static";
+    MEDIA_ROOT = "/var/lib/mailman-web/media";
+    LOGGING = {
+      version = 1;
+      disable_existing_loggers = true;
+      handlers.console.class = "logging.StreamHandler";
+      loggers.django = {
+        handlers = [ "console" ];
+        level = "INFO";
+      };
+    };
+    HAYSTACK_CONNECTIONS.default = {
+      ENGINE = "haystack.backends.whoosh_backend.WhooshEngine";
+      PATH = "/var/lib/mailman-web/fulltext-index";
+    };
+  } // cfg.webSettings;
+
+  webSettingsJSON = pkgs.writeText "settings.json" (builtins.toJSON webSettings);
+
+  # TODO: Should this be RFC42-ised so that users can set additional options without modifying the module?
+  postfixMtaConfig = pkgs.writeText "mailman-postfix.cfg" ''
+    [postfix]
+    postmap_command: ${pkgs.postfix}/bin/postmap
+    transport_file_type: hash
+  '';
+
+  mailmanCfg = lib.generators.toINI {} (recursiveUpdate cfg.settings {
+    webservice.admin_pass = "#NIXOS_MAILMAN_REST_API_PASS_SECRET#";
+  });
+
+  mailmanCfgFile = pkgs.writeText "mailman-raw.cfg" mailmanCfg;
+
+  mailmanHyperkittyCfg = pkgs.writeText "mailman-hyperkitty.cfg" ''
+    [general]
+    # This is your HyperKitty installation, preferably on the localhost. This
+    # address will be used by Mailman to forward incoming emails to HyperKitty
+    # for archiving. It does not need to be publicly available, in fact it's
+    # better if it is not.
+    base_url: ${cfg.hyperkitty.baseUrl}
+
+    # Shared API key, must be the identical to the value in HyperKitty's
+    # settings.
+    api_key: @API_KEY@
+  '';
+
+in {
+
+  ###### interface
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "mailman" "hyperkittyBaseUrl" ]
+      [ "services" "mailman" "hyperkitty" "baseUrl" ])
+
+    (mkRemovedOptionModule [ "services" "mailman" "hyperkittyApiKey" ] ''
+      The Hyperkitty API key is now generated on first run, and not
+      stored in the world-readable Nix store.  To continue using
+      Hyperkitty, you must set services.mailman.hyperkitty.enable = true.
+    '')
+    (mkRemovedOptionModule [ "services" "mailman" "package" ] ''
+      Didn't have an effect for several years.
+    '')
+  ];
+
+  options = {
+
+    services.mailman = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable Mailman on this host. Requires an active MTA on the host (e.g. Postfix).";
+      };
+
+      ldap = {
+        enable = mkEnableOption (lib.mdDoc "LDAP auth");
+        serverUri = mkOption {
+          type = types.str;
+          example = "ldaps://ldap.host";
+          description = lib.mdDoc ''
+            LDAP host to connect against.
+          '';
+        };
+        bindDn = mkOption {
+          type = types.str;
+          example = "cn=root,dc=nixos,dc=org";
+          description = lib.mdDoc ''
+            Service account to bind against.
+          '';
+        };
+        bindPasswordFile = mkOption {
+          type = types.str;
+          example = "/run/secrets/ldap-bind";
+          description = lib.mdDoc ''
+            Path to the file containing the bind password of the service account
+            defined by [](#opt-services.mailman.ldap.bindDn).
+          '';
+        };
+        superUserGroup = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "cn=admin,ou=groups,dc=nixos,dc=org";
+          description = lib.mdDoc ''
+            Group where a user must be a member of to gain superuser rights.
+          '';
+        };
+        userSearch = {
+          query = mkOption {
+            type = types.str;
+            example = "(&(objectClass=inetOrgPerson)(|(uid=%(user)s)(mail=%(user)s)))";
+            description = lib.mdDoc ''
+              Query to find a user in the LDAP database.
+            '';
+          };
+          ou = mkOption {
+            type = types.str;
+            example = "ou=users,dc=nixos,dc=org";
+            description = lib.mdDoc ''
+              Organizational unit to look up a user.
+            '';
+          };
+        };
+        groupSearch = {
+          type = mkOption {
+            type = types.enum [
+              "posixGroup" "groupOfNames" "memberDNGroup" "nestedMemberDNGroup" "nestedGroupOfNames"
+              "groupOfUniqueNames" "nestedGroupOfUniqueNames" "activeDirectoryGroup" "nestedActiveDirectoryGroup"
+              "organizationalRoleGroup" "nestedOrganizationalRoleGroup"
+            ];
+            default = "posixGroup";
+            apply = v: "${toUpper (substring 0 1 v)}${substring 1 (stringLength v) v}Type";
+            description = lib.mdDoc ''
+              Type of group to perform a group search against.
+            '';
+          };
+          query = mkOption {
+            type = types.str;
+            example = "(objectClass=groupOfNames)";
+            description = lib.mdDoc ''
+              Query to find a group associated to a user in the LDAP database.
+            '';
+          };
+          ou = mkOption {
+            type = types.str;
+            example = "ou=groups,dc=nixos,dc=org";
+            description = lib.mdDoc ''
+              Organizational unit to look up a group.
+            '';
+          };
+        };
+        attrMap = {
+          username = mkOption {
+            default = "uid";
+            type = types.str;
+            description = lib.mdDoc ''
+              LDAP-attribute that corresponds to the `username`-attribute in mailman.
+            '';
+          };
+          firstName = mkOption {
+            default = "givenName";
+            type = types.str;
+            description = lib.mdDoc ''
+              LDAP-attribute that corresponds to the `firstName`-attribute in mailman.
+            '';
+          };
+          lastName = mkOption {
+            default = "sn";
+            type = types.str;
+            description = lib.mdDoc ''
+              LDAP-attribute that corresponds to the `lastName`-attribute in mailman.
+            '';
+          };
+          email = mkOption {
+            default = "mail";
+            type = types.str;
+            description = lib.mdDoc ''
+              LDAP-attribute that corresponds to the `email`-attribute in mailman.
+            '';
+          };
+        };
+      };
+
+      enablePostfix = mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = lib.mdDoc ''
+          Enable Postfix integration. Requires an active Postfix installation.
+
+          If you want to use another MTA, set this option to false and configure
+          settings in services.mailman.settings.mta.
+
+          Refer to the Mailman manual for more info.
+        '';
+      };
+
+      siteOwner = mkOption {
+        type = types.str;
+        example = "postmaster@example.org";
+        description = lib.mdDoc ''
+          Certain messages that must be delivered to a human, but which can't
+          be delivered to a list owner (e.g. a bounce from a list owner), will
+          be sent to this address. It should point to a human.
+        '';
+      };
+
+      webHosts = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          The list of hostnames and/or IP addresses from which the Mailman Web
+          UI will accept requests. By default, "localhost" and "127.0.0.1" are
+          enabled. All additional names under which your web server accepts
+          requests for the UI must be listed here or incoming requests will be
+          rejected.
+        '';
+      };
+
+      webUser = mkOption {
+        type = types.str;
+        default = "mailman-web";
+        description = lib.mdDoc ''
+          User to run mailman-web as
+        '';
+      };
+
+      webSettings = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Overrides for the default mailman-web Django settings.
+        '';
+      };
+
+      restApiPassFile = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Path to the file containing the value for `MAILMAN_REST_API_PASS`.
+        '';
+      };
+
+      serve = {
+        enable = mkEnableOption (lib.mdDoc "automatic nginx and uwsgi setup for mailman-web");
+
+        virtualRoot = mkOption {
+          default = "/";
+          example = lib.literalExpression "/lists";
+          type = types.str;
+          description = lib.mdDoc ''
+            Path to mount the mailman-web django application on.
+          '';
+        };
+      };
+
+      settings = mkOption {
+        description = lib.mdDoc "Settings for mailman.cfg";
+        type = types.attrsOf (types.attrsOf types.str);
+        default = {};
+      };
+
+      hyperkitty = {
+        enable = mkEnableOption (lib.mdDoc "the Hyperkitty archiver for Mailman");
+
+        baseUrl = mkOption {
+          type = types.str;
+          default = "http://localhost:18507/archives/";
+          description = lib.mdDoc ''
+            Where can Mailman connect to Hyperkitty's internal API, preferably on
+            localhost?
+          '';
+        };
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra lines for the mailman configuration file";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.mailman.settings = {
+      mailman.site_owner = lib.mkDefault cfg.siteOwner;
+      mailman.layout = "fhs";
+
+      "paths.fhs" = {
+        bin_dir = "${pkgs.mailmanPackages.mailman}/bin";
+        var_dir = "/var/lib/mailman";
+        queue_dir = "$var_dir/queue";
+        template_dir = "$var_dir/templates";
+        log_dir = "/var/log/mailman";
+        lock_dir = "/run/mailman/lock";
+        etc_dir = "/etc";
+        pid_file = "/run/mailman/master.pid";
+      };
+
+      mta.configuration = lib.mkDefault (if cfg.enablePostfix then "${postfixMtaConfig}" else throw "When Mailman Postfix integration is disabled, set `services.mailman.settings.mta.configuration` to the path of the config file required to integrate with your MTA.");
+
+      "archiver.hyperkitty" = lib.mkIf cfg.hyperkitty.enable {
+        class = "mailman_hyperkitty.Archiver";
+        enable = "yes";
+        configuration = "/var/lib/mailman/mailman-hyperkitty.cfg";
+      };
+    } // (let
+      loggerNames = ["root" "archiver" "bounce" "config" "database" "debug" "error" "fromusenet" "http" "locks" "mischief" "plugins" "runner" "smtp"];
+      loggerSectionNames = map (n: "logging.${n}") loggerNames;
+      in lib.genAttrs loggerSectionNames(name: { handler = "stderr"; })
+    );
+
+    assertions = let
+      inherit (config.services) postfix;
+
+      requirePostfixHash = optionPath: dataFile:
+        with lib;
+        let
+          expected = "hash:/var/lib/mailman/data/${dataFile}";
+          value = attrByPath optionPath [] postfix;
+        in
+          { assertion = postfix.enable -> isList value && elem expected value;
+            message = ''
+              services.postfix.${concatStringsSep "." optionPath} must contain
+              "${expected}".
+              See <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>.
+            '';
+          };
+    in [
+      { assertion = cfg.webHosts != [];
+        message = ''
+          services.mailman.serve.enable requires there to be at least one entry
+          in services.mailman.webHosts.
+        '';
+      }
+    ] ++ (lib.optionals cfg.enablePostfix [
+      { assertion = postfix.enable;
+        message = ''
+          Mailman's default NixOS configuration requires Postfix to be enabled.
+
+          If you want to use another MTA, set services.mailman.enablePostfix
+          to false and configure settings in services.mailman.settings.mta.
+
+          Refer to <https://mailman.readthedocs.io/en/latest/src/mailman/docs/mta.html>
+          for more info.
+        '';
+      }
+      (requirePostfixHash [ "relayDomains" ] "postfix_domains")
+      (requirePostfixHash [ "config" "transport_maps" ] "postfix_lmtp")
+      (requirePostfixHash [ "config" "local_recipient_maps" ] "postfix_lmtp")
+    ]);
+
+    users.users.mailman = {
+      description = "GNU Mailman";
+      isSystemUser = true;
+      group = "mailman";
+    };
+    users.users.mailman-web = lib.mkIf (cfg.webUser == "mailman-web") {
+      description = "GNU Mailman web interface";
+      isSystemUser = true;
+      group = "mailman";
+    };
+    users.groups.mailman = {};
+
+    environment.etc."mailman3/settings.py".text = ''
+      import os
+      from configparser import ConfigParser
+
+      # Required by mailman_web.settings, but will be overridden when
+      # settings_local.json is loaded.
+      os.environ["SECRET_KEY"] = ""
+
+      from mailman_web.settings.base import *
+      from mailman_web.settings.mailman import *
+
+      import json
+
+      with open('${webSettingsJSON}') as f:
+          globals().update(json.load(f))
+
+      with open('/var/lib/mailman-web/settings_local.json') as f:
+          globals().update(json.load(f))
+
+      with open('/etc/mailman.cfg') as f:
+          config = ConfigParser()
+          config.read_file(f)
+          MAILMAN_REST_API_PASS = config['webservice']['admin_pass']
+
+      ${optionalString (cfg.ldap.enable) ''
+        import ldap
+        from django_auth_ldap.config import LDAPSearch, ${cfg.ldap.groupSearch.type}
+        AUTH_LDAP_SERVER_URI = "${cfg.ldap.serverUri}"
+        AUTH_LDAP_BIND_DN = "${cfg.ldap.bindDn}"
+        with open("${cfg.ldap.bindPasswordFile}") as f:
+            AUTH_LDAP_BIND_PASSWORD = f.read().rstrip('\n')
+        AUTH_LDAP_USER_SEARCH = LDAPSearch("${cfg.ldap.userSearch.ou}",
+            ldap.SCOPE_SUBTREE, "${cfg.ldap.userSearch.query}")
+        AUTH_LDAP_GROUP_TYPE = ${cfg.ldap.groupSearch.type}()
+        AUTH_LDAP_GROUP_SEARCH = LDAPSearch("${cfg.ldap.groupSearch.ou}",
+            ldap.SCOPE_SUBTREE, "${cfg.ldap.groupSearch.query}")
+        AUTH_LDAP_USER_ATTR_MAP = {
+          ${concatStrings (flip mapAttrsToList cfg.ldap.attrMap (key: value: ''
+            "${key}": "${value}",
+          ''))}
+        }
+        ${optionalString (cfg.ldap.superUserGroup != null) ''
+          AUTH_LDAP_USER_FLAGS_BY_GROUP = {
+            "is_superuser": "${cfg.ldap.superUserGroup}"
+          }
+        ''}
+        AUTHENTICATION_BACKENDS = (
+            "django_auth_ldap.backend.LDAPBackend",
+            "django.contrib.auth.backends.ModelBackend"
+        )
+      ''}
+    '';
+
+    services.nginx = mkIf (cfg.serve.enable && cfg.webHosts != []) {
+      enable = mkDefault true;
+      virtualHosts = lib.genAttrs cfg.webHosts (webHost: {
+        locations = {
+          ${cfg.serve.virtualRoot}.extraConfig = "uwsgi_pass unix:/run/mailman-web.socket;";
+          "${removeSuffix "/" cfg.serve.virtualRoot}/static/".alias = webSettings.STATIC_ROOT + "/";
+        };
+      });
+    };
+
+    environment.systemPackages = [ pkgs.mailmanPackages.mailman ] ++ (with pkgs; [ mailman-web ]);
+
+    services.postfix = lib.mkIf cfg.enablePostfix {
+      recipientDelimiter = "+";         # bake recipient addresses in mail envelopes via VERP
+      config = {
+        owner_request_special = "no";   # Mailman handles -owner addresses on its own
+      };
+    };
+
+    systemd.sockets.mailman-uwsgi = lib.mkIf cfg.serve.enable {
+      wantedBy = ["sockets.target"];
+      before = ["nginx.service"];
+      socketConfig.ListenStream = "/run/mailman-web.socket";
+    };
+    systemd.services = {
+      mailman = {
+        description = "GNU Mailman Master Process";
+        before = lib.optional cfg.enablePostfix "postfix.service";
+        after = [ "network.target" ]
+          ++ lib.optional cfg.enablePostfix "postfix-setup.service"
+          ++ lib.optional withPostgresql "postgresql.service";
+        restartTriggers = [ mailmanCfgFile ];
+        requires = optional withPostgresql "postgresql.service";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${mailmanEnv}/bin/mailman start";
+          ExecStop = "${mailmanEnv}/bin/mailman stop";
+          User = "mailman";
+          Group = "mailman";
+          Type = "forking";
+          RuntimeDirectory = "mailman";
+          LogsDirectory = "mailman";
+          PIDFile = "/run/mailman/master.pid";
+          Restart = "on-failure";
+          TimeoutStartSec = 180;
+          TimeoutStopSec = 180;
+        };
+      };
+
+      mailman-settings = {
+        description = "Generate settings files (including secrets) for Mailman";
+        before = [ "mailman.service" "mailman-web-setup.service" "mailman-uwsgi.service" "hyperkitty.service" ];
+        requiredBy = [ "mailman.service" "mailman-web-setup.service" "mailman-uwsgi.service" "hyperkitty.service" ];
+        path = with pkgs; [ jq ];
+        after = optional withPostgresql "postgresql.service";
+        requires = optional withPostgresql "postgresql.service";
+        serviceConfig.Type = "oneshot";
+        script = ''
+          install -m0750 -o mailman -g mailman ${mailmanCfgFile} /etc/mailman.cfg
+          ${if cfg.restApiPassFile == null then ''
+            sed -i "s/#NIXOS_MAILMAN_REST_API_PASS_SECRET#/$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)/g" \
+              /etc/mailman.cfg
+          '' else ''
+            ${pkgs.replace-secret}/bin/replace-secret \
+              '#NIXOS_MAILMAN_REST_API_PASS_SECRET#' \
+              ${cfg.restApiPassFile} \
+              /etc/mailman.cfg
+          ''}
+
+          mailmanDir=/var/lib/mailman
+          mailmanWebDir=/var/lib/mailman-web
+
+          mailmanCfg=$mailmanDir/mailman-hyperkitty.cfg
+          mailmanWebCfg=$mailmanWebDir/settings_local.json
+
+          install -m 0775 -o mailman -g mailman -d /var/lib/mailman-web-static
+          install -m 0770 -o mailman -g mailman -d $mailmanDir
+          install -m 0770 -o ${cfg.webUser} -g mailman -d $mailmanWebDir
+
+          if [ ! -e $mailmanWebCfg ]; then
+              hyperkittyApiKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
+              secretKey=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 64)
+
+              mailmanWebCfgTmp=$(mktemp)
+              jq -n '.MAILMAN_ARCHIVER_KEY=$archiver_key | .SECRET_KEY=$secret_key' \
+                  --arg archiver_key "$hyperkittyApiKey" \
+                  --arg secret_key "$secretKey" \
+                  >"$mailmanWebCfgTmp"
+              chown root:mailman "$mailmanWebCfgTmp"
+              chmod 440 "$mailmanWebCfgTmp"
+              mv -n "$mailmanWebCfgTmp" "$mailmanWebCfg"
+          fi
+
+          hyperkittyApiKey="$(jq -r .MAILMAN_ARCHIVER_KEY "$mailmanWebCfg")"
+          mailmanCfgTmp=$(mktemp)
+          sed "s/@API_KEY@/$hyperkittyApiKey/g" ${mailmanHyperkittyCfg} >"$mailmanCfgTmp"
+          chown mailman:mailman "$mailmanCfgTmp"
+          mv "$mailmanCfgTmp" "$mailmanCfg"
+        '';
+        serviceConfig = {
+          # RemainAfterExit makes restartIfChanged work for this service, so
+          # downstream services will get updated automatically when things like
+          # services.mailman.hyperkitty.baseUrl change.  Otherwise users have to
+          # restart things manually, which is confusing.
+          RemainAfterExit = "yes";
+        };
+      };
+
+      mailman-web-setup = {
+        description = "Prepare mailman-web files and database";
+        before = [ "hyperkitty.service" "mailman-uwsgi.service" ];
+        requiredBy = [ "hyperkitty.service" "mailman-uwsgi.service" ];
+        restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
+        script = ''
+          [[ -e "${webSettings.STATIC_ROOT}" ]] && find "${webSettings.STATIC_ROOT}/" -mindepth 1 -delete
+          ${webEnv}/bin/mailman-web migrate
+          ${webEnv}/bin/mailman-web collectstatic
+          ${webEnv}/bin/mailman-web compress
+        '';
+        serviceConfig = {
+          User = cfg.webUser;
+          Group = "mailman";
+          Type = "oneshot";
+          # Similar to mailman-settings.service, this makes restartTriggers work
+          # properly for this service.
+          RemainAfterExit = "yes";
+          WorkingDirectory = "/var/lib/mailman-web";
+        };
+      };
+
+      mailman-uwsgi = mkIf cfg.serve.enable (let
+        uwsgiConfig.uwsgi = {
+          type = "normal";
+          plugins = ["python3"];
+          home = webEnv;
+          http = "127.0.0.1:18507";
+        }
+        // (if cfg.serve.virtualRoot == "/"
+          then { module = "mailman_web.wsgi:application"; }
+          else {
+            mount = "${cfg.serve.virtualRoot}=mailman_web.wsgi:application";
+            manage-script-name = true;
+          });
+        uwsgiConfigFile = pkgs.writeText "uwsgi-mailman.json" (builtins.toJSON uwsgiConfig);
+      in {
+        wantedBy = ["multi-user.target"];
+        after = optional withPostgresql "postgresql.service";
+        requires = ["mailman-uwsgi.socket" "mailman-web-setup.service"]
+          ++ optional withPostgresql "postgresql.service";
+        restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
+        serviceConfig = {
+          # Since the mailman-web settings.py obstinately creates a logs
+          # dir in the cwd, change to the (writable) runtime directory before
+          # starting uwsgi.
+          ExecStart = "${pkgs.coreutils}/bin/env -C $RUNTIME_DIRECTORY ${pkgs.uwsgi.override { plugins = ["python3"]; python3 = webEnv.python; }}/bin/uwsgi --json ${uwsgiConfigFile}";
+          User = cfg.webUser;
+          Group = "mailman";
+          RuntimeDirectory = "mailman-uwsgi";
+          Restart = "on-failure";
+        };
+      });
+
+      mailman-daily = {
+        description = "Trigger daily Mailman events";
+        startAt = "daily";
+        restartTriggers = [ mailmanCfgFile ];
+        serviceConfig = {
+          ExecStart = "${mailmanEnv}/bin/mailman digests --send";
+          User = "mailman";
+          Group = "mailman";
+        };
+      };
+
+      hyperkitty = lib.mkIf cfg.hyperkitty.enable {
+        description = "GNU Hyperkitty QCluster Process";
+        after = [ "network.target" ];
+        restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
+        wantedBy = [ "mailman.service" "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${webEnv}/bin/mailman-web qcluster";
+          User = cfg.webUser;
+          Group = "mailman";
+          WorkingDirectory = "/var/lib/mailman-web";
+          Restart = "on-failure";
+        };
+      };
+    } // flip lib.mapAttrs' {
+      "minutely" = "minutely";
+      "quarter_hourly" = "*:00/15";
+      "hourly" = "hourly";
+      "daily" = "daily";
+      "weekly" = "weekly";
+      "yearly" = "yearly";
+    } (name: startAt:
+      lib.nameValuePair "hyperkitty-${name}" (lib.mkIf cfg.hyperkitty.enable {
+        description = "Trigger ${name} Hyperkitty events";
+        inherit startAt;
+        restartTriggers = [ config.environment.etc."mailman3/settings.py".source ];
+        serviceConfig = {
+          ExecStart = "${webEnv}/bin/mailman-web runjobs ${name}";
+          User = cfg.webUser;
+          Group = "mailman";
+          WorkingDirectory = "/var/lib/mailman-web";
+        };
+      }));
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ lheckemann qyliss ];
+    doc = ./mailman.md;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/mlmmj.nix b/nixpkgs/nixos/modules/services/mail/mlmmj.nix
new file mode 100644
index 000000000000..3f07fabcf177
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/mlmmj.nix
@@ -0,0 +1,173 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  concatMapLines = f: l: lib.concatStringsSep "\n" (map f l);
+
+  cfg = config.services.mlmmj;
+  stateDir = "/var/lib/mlmmj";
+  spoolDir = "/var/spool/mlmmj";
+  listDir = domain: list: "${spoolDir}/${domain}/${list}";
+  listCtl = domain: list: "${listDir domain list}/control";
+  transport = domain: list: "${domain}--${list}@local.list.mlmmj mlmmj:${domain}/${list}";
+  virtual = domain: list: "${list}@${domain} ${domain}--${list}@local.list.mlmmj";
+  alias = domain: list: "${list}: \"|${pkgs.mlmmj}/bin/mlmmj-receive -L ${listDir domain list}/\"";
+  subjectPrefix = list: "[${list}]";
+  listAddress = domain: list: "${list}@${domain}";
+  customHeaders = domain: list: [
+    "List-Id: ${list}"
+    "Reply-To: ${list}@${domain}"
+    "List-Post: <mailto:${list}@${domain}>"
+    "List-Help: <mailto:${list}+help@${domain}>"
+    "List-Subscribe: <mailto:${list}+subscribe@${domain}>"
+    "List-Unsubscribe: <mailto:${list}+unsubscribe@${domain}>"
+  ];
+  footer = domain: list: "To unsubscribe send a mail to ${list}+unsubscribe@${domain}";
+  createList = d: l:
+    let ctlDir = listCtl d l; in
+    ''
+      for DIR in incoming queue queue/discarded archive text subconf unsubconf \
+                 bounce control moderation subscribers.d digesters.d requeue \
+                 nomailsubs.d
+      do
+             mkdir -p '${listDir d l}'/"$DIR"
+      done
+      ${pkgs.coreutils}/bin/mkdir -p ${ctlDir}
+      echo ${listAddress d l} > '${ctlDir}/listaddress'
+      [ ! -e ${ctlDir}/customheaders ] && \
+          echo "${lib.concatStringsSep "\n" (customHeaders d l)}" > '${ctlDir}/customheaders'
+      [ ! -e ${ctlDir}/footer ] && \
+          echo ${footer d l} > '${ctlDir}/footer'
+      [ ! -e ${ctlDir}/prefix ] && \
+          echo ${subjectPrefix l} > '${ctlDir}/prefix'
+    '';
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.mlmmj = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable mlmmj";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mlmmj";
+        description = lib.mdDoc "mailinglist local user";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "mlmmj";
+        description = lib.mdDoc "mailinglist local group";
+      };
+
+      listDomain = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Set the mailing list domain";
+      };
+
+      mailLists = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "The collection of hosted maillists";
+      };
+
+      maintInterval = mkOption {
+        type = types.str;
+        default = "20min";
+        description = lib.mdDoc ''
+          Time interval between mlmmj-maintd runs, see
+          {manpage}`systemd.time(7)` for format information.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.${cfg.user} = {
+      description = "mlmmj user";
+      home = stateDir;
+      createHome = true;
+      uid = config.ids.uids.mlmmj;
+      group = cfg.group;
+      useDefaultShell = true;
+    };
+
+    users.groups.${cfg.group} = {
+      gid = config.ids.gids.mlmmj;
+    };
+
+    services.postfix = {
+      enable = true;
+      recipientDelimiter= "+";
+      masterConfig.mlmmj = {
+        type = "unix";
+        private = true;
+        privileged = true;
+        chroot = false;
+        wakeup = 0;
+        command = "pipe";
+        args = [
+          "flags=ORhu"
+          "user=mlmmj"
+          "argv=${pkgs.mlmmj}/bin/mlmmj-receive"
+          "-F"
+          "-L"
+          "${spoolDir}/$nexthop"
+        ];
+      };
+
+      extraAliases = concatMapLines (alias cfg.listDomain) cfg.mailLists;
+
+      extraConfig = "propagate_unmatched_extensions = virtual";
+
+      virtual = concatMapLines (virtual cfg.listDomain) cfg.mailLists;
+      transport = concatMapLines (transport cfg.listDomain) cfg.mailLists;
+    };
+
+    environment.systemPackages = [ pkgs.mlmmj ];
+
+    systemd.tmpfiles.rules = [
+      ''d "${stateDir}" -''
+      ''d "${spoolDir}/${cfg.listDomain}" -''
+      ''Z "${spoolDir}" - "${cfg.user}" "${cfg.group}" -''
+    ];
+
+    systemd.services.mlmmj-maintd = {
+      description = "mlmmj maintenance daemon";
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.mlmmj}/bin/mlmmj-maintd -F -d ${spoolDir}/${cfg.listDomain}";
+      };
+      preStart = ''
+        ${concatMapLines (createList cfg.listDomain) cfg.mailLists}
+        ${pkgs.postfix}/bin/postmap /etc/postfix/virtual
+        ${pkgs.postfix}/bin/postmap /etc/postfix/transport
+      '';
+    };
+
+    systemd.timers.mlmmj-maintd = {
+      description = "mlmmj maintenance timer";
+      timerConfig.OnUnitActiveSec = cfg.maintInterval;
+      wantedBy = [ "timers.target" ];
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/nullmailer.nix b/nixpkgs/nixos/modules/services/mail/nullmailer.nix
new file mode 100644
index 000000000000..f6befe246b12
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/nullmailer.nix
@@ -0,0 +1,246 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  options = {
+
+    services.nullmailer = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable nullmailer daemon.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nullmailer";
+        description = lib.mdDoc ''
+          User to use to run nullmailer-send.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nullmailer";
+        description = lib.mdDoc ''
+          Group to use to run nullmailer-send.
+        '';
+      };
+
+      setSendmail = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to set the system sendmail to nullmailer's.";
+      };
+
+      remotesFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the `remotes` control file. This file contains a
+          list of remote servers to which to send each message.
+
+          See `man 8 nullmailer-send` for syntax and available
+          options.
+        '';
+      };
+
+      config = {
+        adminaddr = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            If set, all recipients to users at either "localhost" (the literal string)
+            or the canonical host name (from the me control attribute) are remapped to this address.
+            This is provided to allow local daemons to be able to send email to
+            "somebody@localhost" and have it go somewhere sensible instead of being  bounced
+            by your relay host. To send to multiple addresses,
+            put them all on one line separated by a comma.
+          '';
+        };
+
+        allmailfrom = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            If set, content will override the envelope sender on all messages.
+          '';
+        };
+
+        defaultdomain = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+             The content of this attribute is appended to any host name that
+             does not contain a period (except localhost), including defaulthost
+             and idhost. Defaults to the value of the me attribute, if it exists,
+             otherwise the literal name defauldomain.
+          '';
+        };
+
+        defaulthost = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+             The content of this attribute is appended to any address that
+             is missing a host name. Defaults to the value of the me control
+             attribute, if it exists, otherwise the literal name defaulthost.
+          '';
+        };
+
+        doublebounceto = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            If the original sender was empty (the original message was a
+            delivery status or disposition notification), the double bounce
+            is sent to the address in this attribute.
+          '';
+        };
+
+        helohost = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Sets  the  environment variable $HELOHOST which is used by the
+            SMTP protocol module to set the parameter given to the HELO command.
+            Defaults to the value of the me configuration attribute.
+          '';
+        };
+
+        idhost = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The content of this attribute is used when building the message-id
+            string for the message. Defaults to the canonicalized value of defaulthost.
+          '';
+        };
+
+        maxpause = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+             The maximum time to pause between successive queue runs, in seconds.
+             Defaults to 24 hours (86400).
+          '';
+        };
+
+        me = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+             The fully-qualifiled host name of the computer running nullmailer.
+             Defaults to the literal name me.
+          '';
+        };
+
+        pausetime = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The minimum time to pause between successive queue runs when there
+            are messages in the queue, in seconds. Defaults to 1 minute (60).
+            Each time this timeout is reached, the timeout is doubled to a
+            maximum of maxpause. After new messages are injected, the timeout
+            is reset.  If this is set to 0, nullmailer-send will exit
+            immediately after going through the queue once (one-shot mode).
+          '';
+        };
+
+        remotes = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            A list of remote servers to which to send each message. Each line
+            contains a remote host name or address followed by an optional
+            protocol string, separated by white space.
+
+            See `man 8 nullmailer-send` for syntax and available
+            options.
+
+            WARNING: This is stored world-readable in the nix store. If you need
+            to specify any secret credentials here, consider using the
+            `remotesFile` option instead.
+          '';
+        };
+
+        sendtimeout = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The  time to wait for a remote module listed above to complete sending
+            a message before killing it and trying again, in seconds.
+            Defaults to 1 hour (3600).  If this is set to 0, nullmailer-send
+            will wait forever for messages to complete sending.
+          '';
+        };
+      };
+    };
+  };
+
+  config = let
+    cfg = config.services.nullmailer;
+  in mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.config.remotes == null || cfg.remotesFile == null;
+        message = "Only one of `remotesFile` or `config.remotes` may be used at a time.";
+      }
+    ];
+
+    environment = {
+      systemPackages = [ pkgs.nullmailer ];
+      etc = let
+        validAttrs = filterAttrs (name: value: value != null) cfg.config;
+      in
+        (foldl' (as: name: as // { "nullmailer/${name}".text = validAttrs.${name}; }) {} (attrNames validAttrs))
+          // optionalAttrs (cfg.remotesFile != null) { "nullmailer/remotes".source = cfg.remotesFile; };
+    };
+
+    users = {
+      users.${cfg.user} = {
+        description = "Nullmailer relay-only mta user";
+        inherit (cfg) group;
+        isSystemUser = true;
+      };
+
+      groups.${cfg.group} = { };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/spool/nullmailer - ${cfg.user} ${cfg.group} - -"
+      "d /var/spool/nullmailer/failed 770 ${cfg.user} ${cfg.group} - -"
+      "d /var/spool/nullmailer/queue 770 ${cfg.user} ${cfg.group} - -"
+      "d /var/spool/nullmailer/tmp 770 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.nullmailer = {
+      description = "nullmailer";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        rm -f /var/spool/nullmailer/trigger && mkfifo -m 660 /var/spool/nullmailer/trigger
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.nullmailer}/bin/nullmailer-send";
+        Restart = "always";
+      };
+    };
+
+    services.mail.sendmailSetuidWrapper = mkIf cfg.setSendmail {
+      program = "sendmail";
+      source = "${pkgs.nullmailer}/bin/sendmail";
+      owner = cfg.user;
+      inherit (cfg) group;
+      setuid = true;
+      setgid = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/offlineimap.nix b/nixpkgs/nixos/modules/services/mail/offlineimap.nix
new file mode 100644
index 000000000000..64fa09e83612
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/offlineimap.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.offlineimap;
+in {
+
+  options.services.offlineimap = {
+    enable = mkEnableOption (lib.mdDoc "OfflineIMAP, a software to dispose your mailbox(es) as a local Maildir(s)");
+
+    install = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to install a user service for Offlineimap. Once
+        the service is started, emails will be fetched automatically.
+
+        The service must be manually started for each user with
+        "systemctl --user start offlineimap" or globally through
+        {var}`services.offlineimap.enable`.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.offlineimap;
+      defaultText = literalExpression "pkgs.offlineimap";
+      description = lib.mdDoc "Offlineimap derivation to use.";
+    };
+
+    path = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      example = literalExpression "[ pkgs.pass pkgs.bash pkgs.notmuch ]";
+      description = lib.mdDoc "List of derivations to put in Offlineimap's path.";
+    };
+
+    onCalendar = mkOption {
+      type = types.str;
+      default = "*:0/3"; # every 3 minutes
+      description = lib.mdDoc "How often is offlineimap started. Default is '*:0/3' meaning every 3 minutes. See systemd.time(7) for more information about the format.";
+    };
+
+    timeoutStartSec = mkOption {
+      type = types.str;
+      default = "120sec"; # Kill if still alive after 2 minutes
+      description = lib.mdDoc "How long waiting for offlineimap before killing it. Default is '120sec' meaning every 2 minutes. See systemd.time(7) for more information about the format.";
+    };
+  };
+  config = mkIf (cfg.enable || cfg.install) {
+    systemd.user.services.offlineimap = {
+      description = "Offlineimap: a software to dispose your mailbox(es) as a local Maildir(s)";
+      serviceConfig = {
+        Type      = "oneshot";
+        ExecStart = "${cfg.package}/bin/offlineimap -u syslog -o -1";
+        TimeoutStartSec = cfg.timeoutStartSec;
+      };
+      path = cfg.path;
+    };
+    environment.systemPackages = [ cfg.package ];
+    systemd.user.timers.offlineimap = {
+      description = "offlineimap timer";
+      timerConfig               = {
+        Unit = "offlineimap.service";
+        OnCalendar = cfg.onCalendar;
+        # start immediately after computer is started:
+        Persistent = "true";
+      };
+    } // optionalAttrs cfg.enable { wantedBy = [ "default.target" ]; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/opendkim.nix b/nixpkgs/nixos/modules/services/mail/opendkim.nix
new file mode 100644
index 000000000000..a377fccc7bd2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/opendkim.nix
@@ -0,0 +1,167 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.opendkim;
+
+  defaultSock = "local:/run/opendkim/opendkim.sock";
+
+  keyFile = "${cfg.keyPath}/${cfg.selector}.private";
+
+  args = [ "-f" "-l"
+           "-p" cfg.socket
+           "-d" cfg.domains
+           "-k" keyFile
+           "-s" cfg.selector
+         ] ++ optionals (cfg.configFile != null) [ "-x" cfg.configFile ];
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "opendkim" "keyFile" ] [ "services" "opendkim" "keyPath" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.opendkim = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the OpenDKIM sender authentication system.";
+      };
+
+      socket = mkOption {
+        type = types.str;
+        default = defaultSock;
+        description = lib.mdDoc "Socket which is used for communication with OpenDKIM.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "opendkim";
+        description = lib.mdDoc "User for the daemon.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "opendkim";
+        description = lib.mdDoc "Group for the daemon.";
+      };
+
+      domains = mkOption {
+        type = types.str;
+        default = "csl:${config.networking.hostName}";
+        defaultText = literalExpression ''"csl:''${config.networking.hostName}"'';
+        example = "csl:example.com,mydomain.net";
+        description = lib.mdDoc ''
+          Local domains set (see `opendkim(8)` for more information on datasets).
+          Messages from them are signed, not verified.
+        '';
+      };
+
+      keyPath = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The path that opendkim should put its generated private keys into.
+          The DNS settings will be found in this directory with the name selector.txt.
+        '';
+        default = "/var/lib/opendkim/keys";
+      };
+
+      selector = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Selector to use when signing.";
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "Additional opendkim configuration.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == "opendkim") {
+      opendkim = {
+        group = cfg.group;
+        uid = config.ids.uids.opendkim;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "opendkim") {
+      opendkim.gid = config.ids.gids.opendkim;
+    };
+
+    environment.systemPackages = [ pkgs.opendkim ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.keyPath}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.opendkim = {
+      description = "OpenDKIM signing and verification daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        cd "${cfg.keyPath}"
+        if ! test -f ${cfg.selector}.private; then
+          ${pkgs.opendkim}/bin/opendkim-genkey -s ${cfg.selector} -d all-domains-generic-key
+          echo "Generated OpenDKIM key! Please update your DNS settings:\n"
+          echo "-------------------------------------------------------------"
+          cat ${cfg.selector}.txt
+          echo "-------------------------------------------------------------"
+        fi
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.opendkim}/bin/opendkim ${escapeShellArgs args}";
+        User = cfg.user;
+        Group = cfg.group;
+        RuntimeDirectory = optional (cfg.socket == defaultSock) "opendkim";
+        StateDirectory = "opendkim";
+        StateDirectoryMode = "0700";
+        ReadWritePaths = [ cfg.keyPath ];
+
+        AmbientCapabilities = [];
+        CapabilityBoundingSet = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6 AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+        UMask = "0077";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/opensmtpd.nix b/nixpkgs/nixos/modules/services/mail/opensmtpd.nix
new file mode 100644
index 000000000000..6ad3386d2d4e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/opensmtpd.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.opensmtpd;
+  conf = pkgs.writeText "smtpd.conf" cfg.serverConfiguration;
+  args = concatStringsSep " " cfg.extraServerArgs;
+
+  sendmail = pkgs.runCommand "opensmtpd-sendmail" { preferLocalBuild = true; } ''
+    mkdir -p $out/bin
+    ln -s ${cfg.package}/sbin/smtpctl $out/bin/sendmail
+  '';
+
+in {
+
+  ###### interface
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "opensmtpd" "addSendmailToSystemPath" ] [ "services" "opensmtpd" "setSendmail" ])
+  ];
+
+  options = {
+
+    services.opensmtpd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the OpenSMTPD server.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.opensmtpd;
+        defaultText = literalExpression "pkgs.opensmtpd";
+        description = lib.mdDoc "The OpenSMTPD package to use.";
+      };
+
+      setSendmail = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to set the system sendmail to OpenSMTPD's.";
+      };
+
+      extraServerArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-v" "-P mta" ];
+        description = lib.mdDoc ''
+          Extra command line arguments provided when the smtpd process
+          is started.
+        '';
+      };
+
+      serverConfiguration = mkOption {
+        type = types.lines;
+        example = ''
+          listen on lo
+          accept for any deliver to lmtp localhost:24
+        '';
+        description = lib.mdDoc ''
+          The contents of the smtpd.conf configuration file. See the
+          OpenSMTPD documentation for syntax information.
+        '';
+      };
+
+      procPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          Packages to search for filters, tables, queues, and schedulers.
+
+          Add OpenSMTPD-extras here if you want to use the filters, etc. from
+          that package.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable rec {
+    users.groups = {
+      smtpd.gid = config.ids.gids.smtpd;
+      smtpq.gid = config.ids.gids.smtpq;
+    };
+
+    users.users = {
+      smtpd = {
+        description = "OpenSMTPD process user";
+        uid = config.ids.uids.smtpd;
+        group = "smtpd";
+      };
+      smtpq = {
+        description = "OpenSMTPD queue user";
+        uid = config.ids.uids.smtpq;
+        group = "smtpq";
+      };
+    };
+
+    security.wrappers.smtpctl = {
+      owner = "root";
+      group = "smtpq";
+      setuid = false;
+      setgid = true;
+      source = "${cfg.package}/bin/smtpctl";
+    };
+
+    services.mail.sendmailSetuidWrapper = mkIf cfg.setSendmail
+      (security.wrappers.smtpctl // { program = "sendmail"; });
+
+    systemd.tmpfiles.rules = [
+      "d /var/spool/smtpd 711 root - - -"
+      "d /var/spool/smtpd/offline 770 root smtpq - -"
+      "d /var/spool/smtpd/purge 700 smtpq root - -"
+    ];
+
+    systemd.services.opensmtpd = let
+      procEnv = pkgs.buildEnv {
+        name = "opensmtpd-procs";
+        paths = [ cfg.package ] ++ cfg.procPackages;
+        pathsToLink = [ "/libexec/opensmtpd" ];
+      };
+    in {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/sbin/smtpd -d -f ${conf} ${args}";
+      environment.OPENSMTPD_PROC_PATH = "${procEnv}/libexec/opensmtpd";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix b/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix
new file mode 100644
index 000000000000..237f36945e4b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/pfix-srsd.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.pfix-srsd = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to run the postfix sender rewriting scheme daemon.";
+      };
+
+      domain = mkOption {
+        description = lib.mdDoc "The domain for which to enable srs";
+        type = types.str;
+        example = "example.com";
+      };
+
+      secretsFile = mkOption {
+        description = lib.mdDoc ''
+          The secret data used to encode the SRS address.
+          to generate, use a command like:
+          `for n in $(seq 5); do dd if=/dev/urandom count=1 bs=1024 status=none | sha256sum | sed 's/  -$//' | sed 's/^/          /'; done`
+        '';
+        type = types.path;
+        default = "/var/lib/pfix-srsd/secrets";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.pfix-srsd.enable {
+    environment = {
+      systemPackages = [ pkgs.pfixtools ];
+    };
+
+    systemd.services.pfix-srsd = {
+      description = "Postfix sender rewriting scheme daemon";
+      before = [ "postfix.service" ];
+      #note that we use requires rather than wants because postfix
+      #is unable to process (almost) all mail without srsd
+      requiredBy = [ "postfix.service" ];
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/pfix-srsd.pid";
+        ExecStart = "${pkgs.pfixtools}/bin/pfix-srsd -p /run/pfix-srsd.pid -I ${config.services.pfix-srsd.domain} ${config.services.pfix-srsd.secretsFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/postfix.nix b/nixpkgs/nixos/modules/services/mail/postfix.nix
new file mode 100644
index 000000000000..23c47aaca7e2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/postfix.nix
@@ -0,0 +1,993 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.postfix;
+  user = cfg.user;
+  group = cfg.group;
+  setgidGroup = cfg.setgidGroup;
+
+  haveAliases = cfg.postmasterAlias != "" || cfg.rootAlias != ""
+                      || cfg.extraAliases != "";
+  haveCanonical = cfg.canonical != "";
+  haveTransport = cfg.transport != "";
+  haveVirtual = cfg.virtual != "";
+  haveLocalRecipients = cfg.localRecipients != null;
+
+  clientAccess =
+    optional (cfg.dnsBlacklistOverrides != "")
+      "check_client_access hash:/etc/postfix/client_access";
+
+  dnsBl =
+    optionals (cfg.dnsBlacklists != [])
+      (map (s: "reject_rbl_client " + s) cfg.dnsBlacklists);
+
+  clientRestrictions = concatStringsSep ", " (clientAccess ++ dnsBl);
+
+  mainCf = let
+    escape = replaceStrings ["$"] ["$$"];
+    mkList = items: "\n  " + concatStringsSep ",\n  " items;
+    mkVal = value:
+      if isList value then mkList value
+        else " " + (if value == true then "yes"
+        else if value == false then "no"
+        else toString value);
+    mkEntry = name: value: "${escape name} =${mkVal value}";
+  in
+    concatStringsSep "\n" (mapAttrsToList mkEntry cfg.config)
+      + "\n" + cfg.extraConfig;
+
+  masterCfOptions = { options, config, name, ... }: {
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = name;
+        example = "smtp";
+        description = lib.mdDoc ''
+          The name of the service to run. Defaults to the attribute set key.
+        '';
+      };
+
+      type = mkOption {
+        type = types.enum [ "inet" "unix" "unix-dgram" "fifo" "pass" ];
+        default = "unix";
+        example = "inet";
+        description = lib.mdDoc "The type of the service";
+      };
+
+      private = mkOption {
+        type = types.bool;
+        example = false;
+        description = lib.mdDoc ''
+          Whether the service's sockets and storage directory is restricted to
+          be only available via the mail system. If `null` is
+          given it uses the postfix default `true`.
+        '';
+      };
+
+      privileged = mkOption {
+        type = types.bool;
+        example = true;
+        description = lib.mdDoc "";
+      };
+
+      chroot = mkOption {
+        type = types.bool;
+        example = true;
+        description = lib.mdDoc ''
+          Whether the service is chrooted to have only access to the
+          {option}`services.postfix.queueDir` and the closure of
+          store paths specified by the {option}`program` option.
+        '';
+      };
+
+      wakeup = mkOption {
+        type = types.int;
+        example = 60;
+        description = lib.mdDoc ''
+          Automatically wake up the service after the specified number of
+          seconds. If `0` is given, never wake the service
+          up.
+        '';
+      };
+
+      wakeupUnusedComponent = mkOption {
+        type = types.bool;
+        example = false;
+        description = lib.mdDoc ''
+          If set to `false` the component will only be woken
+          up if it is used. This is equivalent to postfix' notion of adding a
+          question mark behind the wakeup time in
+          {file}`master.cf`
+        '';
+      };
+
+      maxproc = mkOption {
+        type = types.int;
+        example = 1;
+        description = lib.mdDoc ''
+          The maximum number of processes to spawn for this service. If the
+          value is `0` it doesn't have any limit. If
+          `null` is given it uses the postfix default of
+          `100`.
+        '';
+      };
+
+      command = mkOption {
+        type = types.str;
+        default = name;
+        example = "smtpd";
+        description = lib.mdDoc ''
+          A program name specifying a Postfix service/daemon process.
+          By default it's the attribute {option}`name`.
+        '';
+      };
+
+      args = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-o" "smtp_helo_timeout=5" ];
+        description = lib.mdDoc ''
+          Arguments to pass to the {option}`command`. There is no shell
+          processing involved and shell syntax is passed verbatim to the
+          process.
+        '';
+      };
+
+      rawEntry = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        internal = true;
+        description = lib.mdDoc ''
+          The raw configuration line for the {file}`master.cf`.
+        '';
+      };
+    };
+
+    config.rawEntry = let
+      mkBool = bool: if bool then "y" else "n";
+      mkArg = arg: "${optionalString (hasPrefix "-" arg) "\n  "}${arg}";
+
+      maybeOption = fun: option:
+        if options.${option}.isDefined then fun config.${option} else "-";
+
+      # This is special, because we have two options for this value.
+      wakeup = let
+        wakeupDefined = options.wakeup.isDefined;
+        wakeupUCDefined = options.wakeupUnusedComponent.isDefined;
+        finalValue = toString config.wakeup
+                   + optionalString (wakeupUCDefined && !config.wakeupUnusedComponent) "?";
+      in if wakeupDefined then finalValue else "-";
+
+    in [
+      config.name
+      config.type
+      (maybeOption mkBool "private")
+      (maybeOption (b: mkBool (!b)) "privileged")
+      (maybeOption mkBool "chroot")
+      wakeup
+      (maybeOption toString "maxproc")
+      (config.command + " " + concatMapStringsSep " " mkArg config.args)
+    ];
+  };
+
+  masterCfContent = let
+
+    labels = [
+      "# service" "type" "private" "unpriv" "chroot" "wakeup" "maxproc"
+      "command + args"
+    ];
+
+    labelDefaults = [
+      "# " "" "(yes)" "(yes)" "(no)" "(never)" "(100)" "" ""
+    ];
+
+    masterCf = mapAttrsToList (const (getAttr "rawEntry")) cfg.masterConfig;
+
+    # A list of the maximum width of the columns across all lines and labels
+    maxWidths = let
+      foldLine = line: acc: let
+        columnLengths = map stringLength line;
+      in zipListsWith max acc columnLengths;
+      # We need to handle the last column specially here, because it's
+      # open-ended (command + args).
+      lines = [ labels labelDefaults ] ++ (map (l: init l ++ [""]) masterCf);
+    in foldr foldLine (genList (const 0) (length labels)) lines;
+
+    # Pad a string with spaces from the right (opposite of fixedWidthString).
+    pad = width: str: let
+      padWidth = width - stringLength str;
+      padding = concatStrings (genList (const " ") padWidth);
+    in str + optionalString (padWidth > 0) padding;
+
+    # It's + 2 here, because that's the amount of spacing between columns.
+    fullWidth = foldr (width: acc: acc + width + 2) 0 maxWidths;
+
+    formatLine = line: concatStringsSep "  " (zipListsWith pad maxWidths line);
+
+    formattedLabels = let
+      sep = "# " + concatStrings (genList (const "=") (fullWidth + 5));
+      lines = [ sep (formatLine labels) (formatLine labelDefaults) sep ];
+    in concatStringsSep "\n" lines;
+
+  in formattedLabels + "\n" + concatMapStringsSep "\n" formatLine masterCf + "\n" + cfg.extraMasterConf;
+
+  headerCheckOptions = { ... }:
+  {
+    options = {
+      pattern = mkOption {
+        type = types.str;
+        default = "/^.*/";
+        example = "/^X-Mailer:/";
+        description = lib.mdDoc "A regexp pattern matching the header";
+      };
+      action = mkOption {
+        type = types.str;
+        default = "DUNNO";
+        example = "BCC mail@example.com";
+        description = lib.mdDoc "The action to be executed when the pattern is matched";
+      };
+    };
+  };
+
+  headerChecks = concatStringsSep "\n" (map (x: "${x.pattern} ${x.action}") cfg.headerChecks) + cfg.extraHeaderChecks;
+
+  aliases = let separator = optionalString (cfg.aliasMapType == "hash") ":"; in
+    optionalString (cfg.postmasterAlias != "") ''
+      postmaster${separator} ${cfg.postmasterAlias}
+    ''
+    + optionalString (cfg.rootAlias != "") ''
+      root${separator} ${cfg.rootAlias}
+    ''
+    + cfg.extraAliases
+  ;
+
+  aliasesFile = pkgs.writeText "postfix-aliases" aliases;
+  canonicalFile = pkgs.writeText "postfix-canonical" cfg.canonical;
+  virtualFile = pkgs.writeText "postfix-virtual" cfg.virtual;
+  localRecipientMapFile = pkgs.writeText "postfix-local-recipient-map" (concatMapStrings (x: x + " ACCEPT\n") cfg.localRecipients);
+  checkClientAccessFile = pkgs.writeText "postfix-check-client-access" cfg.dnsBlacklistOverrides;
+  mainCfFile = pkgs.writeText "postfix-main.cf" mainCf;
+  masterCfFile = pkgs.writeText "postfix-master.cf" masterCfContent;
+  transportFile = pkgs.writeText "postfix-transport" cfg.transport;
+  headerChecksFile = pkgs.writeText "postfix-header-checks" headerChecks;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.postfix = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the Postfix mail server.";
+      };
+
+      enableSmtp = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable smtp in master.cf.";
+      };
+
+      enableSubmission = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable smtp submission.";
+      };
+
+      enableSubmissions = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable smtp submission via smtps.
+
+          According to RFC 8314 this should be preferred
+          over STARTTLS for submission of messages by end user clients.
+        '';
+      };
+
+      submissionOptions = mkOption {
+        type = with types; attrsOf str;
+        default = {
+          smtpd_tls_security_level = "encrypt";
+          smtpd_sasl_auth_enable = "yes";
+          smtpd_client_restrictions = "permit_sasl_authenticated,reject";
+          milter_macro_daemon_name = "ORIGINATING";
+        };
+        example = {
+          smtpd_tls_security_level = "encrypt";
+          smtpd_sasl_auth_enable = "yes";
+          smtpd_sasl_type = "dovecot";
+          smtpd_client_restrictions = "permit_sasl_authenticated,reject";
+          milter_macro_daemon_name = "ORIGINATING";
+        };
+        description = lib.mdDoc "Options for the submission config in master.cf";
+      };
+
+      submissionsOptions = mkOption {
+        type = with types; attrsOf str;
+        default = {
+          smtpd_sasl_auth_enable = "yes";
+          smtpd_client_restrictions = "permit_sasl_authenticated,reject";
+          milter_macro_daemon_name = "ORIGINATING";
+        };
+        example = {
+          smtpd_sasl_auth_enable = "yes";
+          smtpd_sasl_type = "dovecot";
+          smtpd_client_restrictions = "permit_sasl_authenticated,reject";
+          milter_macro_daemon_name = "ORIGINATING";
+        };
+        description = lib.mdDoc ''
+          Options for the submission config via smtps in master.cf.
+
+          smtpd_tls_security_level will be set to encrypt, if it is missing
+          or has one of the values "may" or "none".
+
+          smtpd_tls_wrappermode with value "yes" will be added automatically.
+        '';
+      };
+
+      setSendmail = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to set the system sendmail to postfix's.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "postfix";
+        description = lib.mdDoc "What to call the Postfix user (must be used only for postfix).";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "postfix";
+        description = lib.mdDoc "What to call the Postfix group (must be used only for postfix).";
+      };
+
+      setgidGroup = mkOption {
+        type = types.str;
+        default = "postdrop";
+        description = lib.mdDoc ''
+          How to call postfix setgid group (for postdrop). Should
+          be uniquely used group.
+        '';
+      };
+
+      networks = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = ["192.168.0.1/24"];
+        description = lib.mdDoc ''
+          Net masks for trusted - allowed to relay mail to third parties -
+          hosts. Leave empty to use mynetworks_style configuration or use
+          default (localhost-only).
+        '';
+      };
+
+      networksStyle = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Name of standard way of trusted network specification to use,
+          leave blank if you specify it explicitly or if you want to use
+          default (localhost-only).
+        '';
+      };
+
+      hostname = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Hostname to use. Leave blank to use just the hostname of machine.
+          It should be FQDN.
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Domain to use. Leave blank to use hostname minus first component.
+        '';
+      };
+
+      origin = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Origin to use in outgoing e-mail. Leave blank to use hostname.
+        '';
+      };
+
+      destination = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = ["localhost"];
+        description = lib.mdDoc ''
+          Full (!) list of domains we deliver locally. Leave blank for
+          acceptable Postfix default.
+        '';
+      };
+
+      relayDomains = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = ["localdomain"];
+        description = lib.mdDoc ''
+          List of domains we agree to relay to. Default is empty.
+        '';
+      };
+
+      relayHost = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Mail relay for outbound mail.
+        '';
+      };
+
+      relayPort = mkOption {
+        type = types.int;
+        default = 25;
+        description = lib.mdDoc ''
+          SMTP port for relay mail relay.
+        '';
+      };
+
+      lookupMX = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether relay specified is just domain whose MX must be used.
+        '';
+      };
+
+      postmasterAlias = mkOption {
+        type = types.str;
+        default = "root";
+        description = lib.mdDoc ''
+          Who should receive postmaster e-mail. Multiple values can be added by
+          separating values with comma.
+        '';
+      };
+
+      rootAlias = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Who should receive root e-mail. Blank for no redirection.
+          Multiple values can be added by separating values with comma.
+        '';
+      };
+
+      extraAliases = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional entries to put verbatim into aliases file, cf. man-page aliases(8).
+        '';
+      };
+
+      aliasMapType = mkOption {
+        type = with types; enum [ "hash" "regexp" "pcre" ];
+        default = "hash";
+        example = "regexp";
+        description = lib.mdDoc "The format the alias map should have. Use regexp if you want to use regular expressions.";
+      };
+
+      config = mkOption {
+        type = with types; attrsOf (oneOf [ bool str (listOf str) ]);
+        description = lib.mdDoc ''
+          The main.cf configuration file as key value set.
+        '';
+        example = {
+          mail_owner = "postfix";
+          smtp_tls_security_level = "may";
+        };
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the main.cf configuration file.
+        '';
+      };
+
+      tlsTrustedAuthorities = mkOption {
+        type = types.str;
+        default = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
+        defaultText = literalExpression ''"''${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"'';
+        description = lib.mdDoc ''
+          File containing trusted certification authorities (CA) to verify certificates of mailservers contacted for mail delivery. This basically sets smtp_tls_CAfile and enables opportunistic tls. Defaults to NixOS trusted certification authorities.
+        '';
+      };
+
+      sslCert = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "SSL certificate to use.";
+      };
+
+      sslKey = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "SSL key to use.";
+      };
+
+      recipientDelimiter = mkOption {
+        type = types.str;
+        default = "";
+        example = "+";
+        description = lib.mdDoc ''
+          Delimiter for address extension: so mail to user+test can be handled by ~user/.forward+test
+        '';
+      };
+
+      canonical = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Entries for the {manpage}`canonical(5)` table.
+        '';
+      };
+
+      virtual = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Entries for the virtual alias map, cf. man-page virtual(5).
+        '';
+      };
+
+      virtualMapType = mkOption {
+        type = types.enum ["hash" "regexp" "pcre"];
+        default = "hash";
+        description = lib.mdDoc ''
+          What type of virtual alias map file to use. Use `"regexp"` for regular expressions.
+        '';
+      };
+
+      localRecipients = mkOption {
+        type = with types; nullOr (listOf str);
+        default = null;
+        description = lib.mdDoc ''
+          List of accepted local users. Specify a bare username, an
+          `"@domain.tld"` wild-card, or a complete
+          `"user@domain.tld"` address. If set, these names end
+          up in the local recipient map -- see the local(8) man-page -- and
+          effectively replace the system user database lookup that's otherwise
+          used by default.
+        '';
+      };
+
+      transport = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Entries for the transport map, cf. man-page transport(8).
+        '';
+      };
+
+      dnsBlacklists = mkOption {
+        default = [];
+        type = with types; listOf str;
+        description = lib.mdDoc "dns blacklist servers to use with smtpd_client_restrictions";
+      };
+
+      dnsBlacklistOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "contents of check_client_access for overriding dnsBlacklists";
+      };
+
+      masterConfig = mkOption {
+        type = types.attrsOf (types.submodule masterCfOptions);
+        default = {};
+        example =
+          { submission = {
+              type = "inet";
+              args = [ "-o" "smtpd_tls_security_level=encrypt" ];
+            };
+          };
+        description = lib.mdDoc ''
+          An attribute set of service options, which correspond to the service
+          definitions usually done within the Postfix
+          {file}`master.cf` file.
+        '';
+      };
+
+      extraMasterConf = mkOption {
+        type = types.lines;
+        default = "";
+        example = "submission inet n - n - - smtpd";
+        description = lib.mdDoc "Extra lines to append to the generated master.cf file.";
+      };
+
+      enableHeaderChecks = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc "Whether to enable postfix header checks";
+      };
+
+      headerChecks = mkOption {
+        type = types.listOf (types.submodule headerCheckOptions);
+        default = [];
+        example = [ { pattern = "/^X-Spam-Flag:/"; action = "REDIRECT spam@example.com"; } ];
+        description = lib.mdDoc "Postfix header checks.";
+      };
+
+      extraHeaderChecks = mkOption {
+        type = types.lines;
+        default = "";
+        example = "/^X-Spam-Flag:/ REDIRECT spam@example.com";
+        description = lib.mdDoc "Extra lines to /etc/postfix/header_checks file.";
+      };
+
+      aliasFiles = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc "Aliases' tables to be compiled and placed into /var/lib/postfix/conf.";
+      };
+
+      mapFiles = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc "Maps to be compiled and placed into /var/lib/postfix/conf.";
+      };
+
+      useSrs = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable sender rewriting scheme";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.postfix.enable (mkMerge [
+    {
+
+      environment = {
+        etc.postfix.source = "/var/lib/postfix/conf";
+
+        # This makes it comfortable to run 'postqueue/postdrop' for example.
+        systemPackages = [ pkgs.postfix ];
+      };
+
+      services.pfix-srsd.enable = config.services.postfix.useSrs;
+
+      services.mail.sendmailSetuidWrapper = mkIf config.services.postfix.setSendmail {
+        program = "sendmail";
+        source = "${pkgs.postfix}/bin/sendmail";
+        owner = "root";
+        group = setgidGroup;
+        setuid = false;
+        setgid = true;
+      };
+
+      security.wrappers.mailq = {
+        program = "mailq";
+        source = "${pkgs.postfix}/bin/mailq";
+        owner = "root";
+        group = setgidGroup;
+        setuid = false;
+        setgid = true;
+      };
+
+      security.wrappers.postqueue = {
+        program = "postqueue";
+        source = "${pkgs.postfix}/bin/postqueue";
+        owner = "root";
+        group = setgidGroup;
+        setuid = false;
+        setgid = true;
+      };
+
+      security.wrappers.postdrop = {
+        program = "postdrop";
+        source = "${pkgs.postfix}/bin/postdrop";
+        owner = "root";
+        group = setgidGroup;
+        setuid = false;
+        setgid = true;
+      };
+
+      users.users = optionalAttrs (user == "postfix")
+        { postfix = {
+            description = "Postfix mail server user";
+            uid = config.ids.uids.postfix;
+            group = group;
+          };
+        };
+
+      users.groups =
+        optionalAttrs (group == "postfix")
+        { ${group}.gid = config.ids.gids.postfix;
+        }
+        // optionalAttrs (setgidGroup == "postdrop")
+        { ${setgidGroup}.gid = config.ids.gids.postdrop;
+        };
+
+      systemd.services.postfix-setup =
+        { description = "Setup for Postfix mail server";
+          serviceConfig.RemainAfterExit = true;
+          serviceConfig.Type = "oneshot";
+          script = ''
+            # Backwards compatibility
+            if [ ! -d /var/lib/postfix ] && [ -d /var/postfix ]; then
+              mkdir -p /var/lib
+              mv /var/postfix /var/lib/postfix
+            fi
+
+            # All permissions set according ${pkgs.postfix}/etc/postfix/postfix-files script
+            mkdir -p /var/lib/postfix /var/lib/postfix/queue/{pid,public,maildrop}
+            chmod 0755 /var/lib/postfix
+            chown root:root /var/lib/postfix
+
+            rm -rf /var/lib/postfix/conf
+            mkdir -p /var/lib/postfix/conf
+            chmod 0755 /var/lib/postfix/conf
+            ln -sf ${pkgs.postfix}/etc/postfix/postfix-files /var/lib/postfix/conf/postfix-files
+            ln -sf ${mainCfFile} /var/lib/postfix/conf/main.cf
+            ln -sf ${masterCfFile} /var/lib/postfix/conf/master.cf
+
+            ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
+              ln -sf ${from} /var/lib/postfix/conf/${to}
+              ${pkgs.postfix}/bin/postalias /var/lib/postfix/conf/${to}
+            '') cfg.aliasFiles)}
+            ${concatStringsSep "\n" (mapAttrsToList (to: from: ''
+              ln -sf ${from} /var/lib/postfix/conf/${to}
+              ${pkgs.postfix}/bin/postmap /var/lib/postfix/conf/${to}
+            '') cfg.mapFiles)}
+
+            mkdir -p /var/spool/mail
+            chown root:root /var/spool/mail
+            chmod a+rwxt /var/spool/mail
+            ln -sf /var/spool/mail /var/
+
+            #Finally delegate to postfix checking remain directories in /var/lib/postfix and set permissions on them
+            ${pkgs.postfix}/bin/postfix set-permissions config_directory=/var/lib/postfix/conf
+          '';
+        };
+
+      systemd.services.postfix =
+        { description = "Postfix mail server";
+
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" "postfix-setup.service" ];
+          requires = [ "postfix-setup.service" ];
+          path = [ pkgs.postfix ];
+
+          serviceConfig = {
+            Type = "forking";
+            Restart = "always";
+            PIDFile = "/var/lib/postfix/queue/pid/master.pid";
+            ExecStart = "${pkgs.postfix}/bin/postfix start";
+            ExecStop = "${pkgs.postfix}/bin/postfix stop";
+            ExecReload = "${pkgs.postfix}/bin/postfix reload";
+          };
+        };
+
+      services.postfix.config = (mapAttrs (_: v: mkDefault v) {
+        compatibility_level  = pkgs.postfix.version;
+        mail_owner           = cfg.user;
+        default_privs        = "nobody";
+
+        # NixOS specific locations
+        data_directory       = "/var/lib/postfix/data";
+        queue_directory      = "/var/lib/postfix/queue";
+
+        # Default location of everything in package
+        meta_directory       = "${pkgs.postfix}/etc/postfix";
+        command_directory    = "${pkgs.postfix}/bin";
+        sample_directory     = "/etc/postfix";
+        newaliases_path      = "${pkgs.postfix}/bin/newaliases";
+        mailq_path           = "${pkgs.postfix}/bin/mailq";
+        readme_directory     = false;
+        sendmail_path        = "${pkgs.postfix}/bin/sendmail";
+        daemon_directory     = "${pkgs.postfix}/libexec/postfix";
+        manpage_directory    = "${pkgs.postfix}/share/man";
+        html_directory       = "${pkgs.postfix}/share/postfix/doc/html";
+        shlib_directory      = false;
+        mail_spool_directory = "/var/spool/mail/";
+        setgid_group         = cfg.setgidGroup;
+      })
+      // optionalAttrs (cfg.relayHost != "") { relayhost = if cfg.lookupMX
+                                                           then "${cfg.relayHost}:${toString cfg.relayPort}"
+                                                           else "[${cfg.relayHost}]:${toString cfg.relayPort}"; }
+      // optionalAttrs (!config.networking.enableIPv6) { inet_protocols = mkDefault "ipv4"; }
+      // optionalAttrs (cfg.networks != null) { mynetworks = cfg.networks; }
+      // optionalAttrs (cfg.networksStyle != "") { mynetworks_style = cfg.networksStyle; }
+      // optionalAttrs (cfg.hostname != "") { myhostname = cfg.hostname; }
+      // optionalAttrs (cfg.domain != "") { mydomain = cfg.domain; }
+      // optionalAttrs (cfg.origin != "") { myorigin =  cfg.origin; }
+      // optionalAttrs (cfg.destination != null) { mydestination = cfg.destination; }
+      // optionalAttrs (cfg.relayDomains != null) { relay_domains = cfg.relayDomains; }
+      // optionalAttrs (cfg.recipientDelimiter != "") { recipient_delimiter = cfg.recipientDelimiter; }
+      // optionalAttrs haveAliases { alias_maps = [ "${cfg.aliasMapType}:/etc/postfix/aliases" ]; }
+      // optionalAttrs haveTransport { transport_maps = [ "hash:/etc/postfix/transport" ]; }
+      // optionalAttrs haveVirtual { virtual_alias_maps = [ "${cfg.virtualMapType}:/etc/postfix/virtual" ]; }
+      // optionalAttrs haveLocalRecipients { local_recipient_maps = [ "hash:/etc/postfix/local_recipients" ] ++ optional haveAliases "$alias_maps"; }
+      // optionalAttrs (cfg.dnsBlacklists != []) { smtpd_client_restrictions = clientRestrictions; }
+      // optionalAttrs cfg.useSrs {
+        sender_canonical_maps = [ "tcp:127.0.0.1:10001" ];
+        sender_canonical_classes = [ "envelope_sender" ];
+        recipient_canonical_maps = [ "tcp:127.0.0.1:10002" ];
+        recipient_canonical_classes = [ "envelope_recipient" ];
+      }
+      // optionalAttrs cfg.enableHeaderChecks { header_checks = [ "regexp:/etc/postfix/header_checks" ]; }
+      // optionalAttrs (cfg.tlsTrustedAuthorities != "") {
+        smtp_tls_CAfile = cfg.tlsTrustedAuthorities;
+        smtp_tls_security_level = mkDefault "may";
+      }
+      // optionalAttrs (cfg.sslCert != "") {
+        smtp_tls_cert_file = cfg.sslCert;
+        smtp_tls_key_file = cfg.sslKey;
+
+        smtp_tls_security_level = mkDefault "may";
+
+        smtpd_tls_cert_file = cfg.sslCert;
+        smtpd_tls_key_file = cfg.sslKey;
+
+        smtpd_tls_security_level = "may";
+      };
+
+      services.postfix.masterConfig = {
+        pickup = {
+          private = false;
+          wakeup = 60;
+          maxproc = 1;
+        };
+        cleanup = {
+          private = false;
+          maxproc = 0;
+        };
+        qmgr = {
+          private = false;
+          wakeup = 300;
+          maxproc = 1;
+        };
+        tlsmgr = {
+          wakeup = 1000;
+          wakeupUnusedComponent = false;
+          maxproc = 1;
+        };
+        rewrite = {
+          command = "trivial-rewrite";
+        };
+        bounce = {
+          maxproc = 0;
+        };
+        defer = {
+          maxproc = 0;
+          command = "bounce";
+        };
+        trace = {
+          maxproc = 0;
+          command = "bounce";
+        };
+        verify = {
+          maxproc = 1;
+        };
+        flush = {
+          private = false;
+          wakeup = 1000;
+          wakeupUnusedComponent = false;
+          maxproc = 0;
+        };
+        proxymap = {
+          command = "proxymap";
+        };
+        proxywrite = {
+          maxproc = 1;
+          command = "proxymap";
+        };
+        showq = {
+          private = false;
+        };
+        error = {};
+        retry = {
+          command = "error";
+        };
+        discard = {};
+        local = {
+          privileged = true;
+        };
+        virtual = {
+          privileged = true;
+        };
+        lmtp = {
+        };
+        anvil = {
+          maxproc = 1;
+        };
+        scache = {
+          maxproc = 1;
+        };
+      } // optionalAttrs cfg.enableSubmission {
+        submission = {
+          type = "inet";
+          private = false;
+          command = "smtpd";
+          args = let
+            mkKeyVal = opt: val: [ "-o" (opt + "=" + val) ];
+          in concatLists (mapAttrsToList mkKeyVal cfg.submissionOptions);
+        };
+      } // optionalAttrs cfg.enableSmtp {
+        smtp_inet = {
+          name = "smtp";
+          type = "inet";
+          private = false;
+          command = "smtpd";
+        };
+        smtp = {};
+        relay = {
+          command = "smtp";
+          args = [ "-o" "smtp_fallback_relay=" ];
+        };
+      } // optionalAttrs cfg.enableSubmissions {
+        submissions = {
+          type = "inet";
+          private = false;
+          command = "smtpd";
+          args = let
+            mkKeyVal = opt: val: [ "-o" (opt + "=" + val) ];
+            adjustSmtpTlsSecurityLevel = !(cfg.submissionsOptions ? smtpd_tls_security_level) ||
+                                      cfg.submissionsOptions.smtpd_tls_security_level == "none" ||
+                                      cfg.submissionsOptions.smtpd_tls_security_level == "may";
+            submissionsOptions = cfg.submissionsOptions // {
+              smtpd_tls_wrappermode = "yes";
+            } // optionalAttrs adjustSmtpTlsSecurityLevel {
+              smtpd_tls_security_level = "encrypt";
+            };
+          in concatLists (mapAttrsToList mkKeyVal submissionsOptions);
+        };
+      };
+    }
+
+    (mkIf haveAliases {
+      services.postfix.aliasFiles.aliases = aliasesFile;
+    })
+    (mkIf haveCanonical {
+      services.postfix.mapFiles.canonical = canonicalFile;
+    })
+    (mkIf haveTransport {
+      services.postfix.mapFiles.transport = transportFile;
+    })
+    (mkIf haveVirtual {
+      services.postfix.mapFiles.virtual = virtualFile;
+    })
+    (mkIf haveLocalRecipients {
+      services.postfix.mapFiles.local_recipients = localRecipientMapFile;
+    })
+    (mkIf cfg.enableHeaderChecks {
+      services.postfix.mapFiles.header_checks = headerChecksFile;
+    })
+    (mkIf (cfg.dnsBlacklists != []) {
+      services.postfix.mapFiles.client_access = checkClientAccessFile;
+    })
+  ]);
+
+  imports = [
+   (mkRemovedOptionModule [ "services" "postfix" "sslCACert" ]
+     "services.postfix.sslCACert was replaced by services.postfix.tlsTrustedAuthorities. In case you intend that your server should validate requested client certificates use services.postfix.extraConfig.")
+
+   (mkChangedOptionModule [ "services" "postfix" "useDane" ]
+     [ "services" "postfix" "config" "smtp_tls_security_level" ]
+     (config: mkIf config.services.postfix.useDane "dane"))
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/postfixadmin.nix b/nixpkgs/nixos/modules/services/mail/postfixadmin.nix
new file mode 100644
index 000000000000..b86428770cb2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/postfixadmin.nix
@@ -0,0 +1,199 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.postfixadmin;
+  fpm = config.services.phpfpm.pools.postfixadmin;
+  localDB = cfg.database.host == "localhost";
+  user = if localDB then cfg.database.username else "nginx";
+in
+{
+  options.services.postfixadmin = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable postfixadmin.
+
+        Also enables nginx virtual host management.
+        Further nginx configuration can be done by adapting `services.nginx.virtualHosts.<name>`.
+        See [](#opt-services.nginx.virtualHosts) for further information.
+      '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      example = "postfixadmin.example.com";
+      description = lib.mdDoc "Hostname to use for the nginx vhost";
+    };
+
+    adminEmail = mkOption {
+      type = types.str;
+      example = "postmaster@example.com";
+      description = lib.mdDoc ''
+        Defines the Site Admin's email address.
+        This will be used to send emails from to create mailboxes and
+        from Send Email / Broadcast message pages.
+      '';
+    };
+
+    setupPasswordFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Password file for the admin.
+        Generate with `php -r "echo password_hash('some password here', PASSWORD_DEFAULT);"`
+      '';
+    };
+
+    database = {
+      username = mkOption {
+        type = types.str;
+        default = "postfixadmin";
+        description = lib.mdDoc ''
+          Username for the postgresql connection.
+          If `database.host` is set to `localhost`, a unix user and group of the same name will be created as well.
+        '';
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Host of the postgresql server. If this is not set to
+          `localhost`, you have to create the
+          postgresql user and database yourself, with appropriate
+          permissions.
+        '';
+      };
+      passwordFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Password file for the postgresql connection. Must be readable by user `nginx`.";
+      };
+      dbname = mkOption {
+        type = types.str;
+        default = "postfixadmin";
+        description = lib.mdDoc "Name of the postgresql database";
+      };
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Extra configuration for the postfixadmin instance, see postfixadmin's config.inc.php for available options.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."postfixadmin/config.local.php".text = ''
+      <?php
+
+      $CONF['setup_password'] = file_get_contents('${cfg.setupPasswordFile}');
+
+      $CONF['database_type'] = 'pgsql';
+      $CONF['database_host'] = ${if localDB then "null" else "'${cfg.database.host}'"};
+      ${optionalString localDB "$CONF['database_user'] = '${cfg.database.username}';"}
+      $CONF['database_password'] = ${if localDB then "'dummy'" else "file_get_contents('${cfg.database.passwordFile}')"};
+      $CONF['database_name'] = '${cfg.database.dbname}';
+      $CONF['configured'] = true;
+
+      ${cfg.extraConfig}
+    '';
+
+    systemd.tmpfiles.rules = [ "d /var/cache/postfixadmin/templates_c 700 ${user} ${user}" ];
+
+    services.nginx = {
+      enable = true;
+      virtualHosts = {
+        ${cfg.hostName} = {
+          forceSSL = mkDefault true;
+          enableACME = mkDefault true;
+          locations."/" = {
+            root = "${pkgs.postfixadmin}/public";
+            index = "index.php";
+            extraConfig = ''
+              location ~* \.php$ {
+                fastcgi_split_path_info ^(.+\.php)(/.+)$;
+                fastcgi_pass unix:${fpm.socket};
+                include ${config.services.nginx.package}/conf/fastcgi_params;
+                include ${pkgs.nginx}/conf/fastcgi.conf;
+              }
+            '';
+          };
+        };
+      };
+    };
+
+    services.postgresql = mkIf localDB {
+      enable = true;
+      ensureUsers = [ {
+        name = cfg.database.username;
+      } ];
+    };
+    # The postgresql module doesn't currently support concepts like
+    # objects owners and extensions; for now we tack on what's needed
+    # here.
+    systemd.services.postfixadmin-postgres = let pgsql = config.services.postgresql; in mkIf localDB {
+      after = [ "postgresql.service" ];
+      bindsTo = [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [
+        pgsql.package
+        pkgs.util-linux
+      ];
+      script = ''
+        set -eu
+
+        PSQL() {
+            psql --port=${toString pgsql.port} "$@"
+        }
+
+        PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.database.dbname}'" | grep -q 1 || PSQL -tAc 'CREATE DATABASE "${cfg.database.dbname}" OWNER "${cfg.database.username}"'
+        current_owner=$(PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.database.dbname}'")
+        if [[ "$current_owner" != "${cfg.database.username}" ]]; then
+            PSQL -tAc 'ALTER DATABASE "${cfg.database.dbname}" OWNER TO "${cfg.database.username}"'
+            if [[ -e "${config.services.postgresql.dataDir}/.reassigning_${cfg.database.dbname}" ]]; then
+                echo "Reassigning ownership of database ${cfg.database.dbname} to user ${cfg.database.username} failed on last boot. Failing..."
+                exit 1
+            fi
+            touch "${config.services.postgresql.dataDir}/.reassigning_${cfg.database.dbname}"
+            PSQL "${cfg.database.dbname}" -tAc "REASSIGN OWNED BY \"$current_owner\" TO \"${cfg.database.username}\""
+            rm "${config.services.postgresql.dataDir}/.reassigning_${cfg.database.dbname}"
+        fi
+      '';
+
+      serviceConfig = {
+        User = pgsql.superUser;
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+
+    users.users.${user} = mkIf localDB {
+      group = user;
+      isSystemUser = true;
+      createHome = false;
+    };
+    users.groups.${user} = mkIf localDB {};
+
+    services.phpfpm.pools.postfixadmin = {
+      user = user;
+      phpPackage = pkgs.php81;
+      phpOptions = ''
+        error_log = 'stderr'
+        log_errors = on
+      '';
+      settings = mapAttrs (name: mkDefault) {
+        "listen.owner" = "nginx";
+        "listen.group" = "nginx";
+        "listen.mode" = "0660";
+        "pm" = "dynamic";
+        "pm.max_children" = 75;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 1;
+        "pm.max_spare_servers" = 20;
+        "pm.max_requests" = 500;
+        "catch_workers_output" = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/postgrey.nix b/nixpkgs/nixos/modules/services/mail/postgrey.nix
new file mode 100644
index 000000000000..fdfa08946ddf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/postgrey.nix
@@ -0,0 +1,205 @@
+{ config, lib, pkgs, ... }:
+
+with lib; let
+
+  cfg = config.services.postgrey;
+
+  natural = with types; addCheck int (x: x >= 0);
+  natural' = with types; addCheck int (x: x > 0);
+
+  socket = with types; addCheck (either (submodule unixSocket) (submodule inetSocket)) (x: x ? path || x ? port);
+
+  inetSocket = with types; {
+    options = {
+      addr = mkOption {
+        type = nullOr str;
+        default = null;
+        example = "127.0.0.1";
+        description = lib.mdDoc "The address to bind to. Localhost if null";
+      };
+      port = mkOption {
+        type = natural';
+        default = 10030;
+        description = lib.mdDoc "Tcp port to bind to";
+      };
+    };
+  };
+
+  unixSocket = with types; {
+    options = {
+      path = mkOption {
+        type = path;
+        default = "/run/postgrey.sock";
+        description = lib.mdDoc "Path of the unix socket";
+      };
+
+      mode = mkOption {
+        type = str;
+        default = "0777";
+        description = lib.mdDoc "Mode of the unix socket";
+      };
+    };
+  };
+
+in {
+  imports = [
+    (mkMergedOptionModule [ [ "services" "postgrey" "inetAddr" ] [ "services" "postgrey" "inetPort" ] ] [ "services" "postgrey" "socket" ] (config: let
+        value = p: getAttrFromPath p config;
+        inetAddr = [ "services" "postgrey" "inetAddr" ];
+        inetPort = [ "services" "postgrey" "inetPort" ];
+      in
+        if value inetAddr == null
+        then { path = "/run/postgrey.sock"; }
+        else { addr = value inetAddr; port = value inetPort; }
+    ))
+  ];
+
+  options = {
+    services.postgrey = with types; {
+      enable = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the Postgrey daemon";
+      };
+      socket = mkOption {
+        type = socket;
+        default = {
+          path = "/run/postgrey.sock";
+          mode = "0777";
+        };
+        example = {
+          addr = "127.0.0.1";
+          port = 10030;
+        };
+        description = lib.mdDoc "Socket to bind to";
+      };
+      greylistText = mkOption {
+        type = str;
+        default = "Greylisted for %%s seconds";
+        description = lib.mdDoc "Response status text for greylisted messages; use %%s for seconds left until greylisting is over and %%r for mail domain of recipient";
+      };
+      greylistAction = mkOption {
+        type = str;
+        default = "DEFER_IF_PERMIT";
+        description = lib.mdDoc "Response status for greylisted messages (see access(5))";
+      };
+      greylistHeader = mkOption {
+        type = str;
+        default = "X-Greylist: delayed %%t seconds by postgrey-%%v at %%h; %%d";
+        description = lib.mdDoc "Prepend header to greylisted mails; use %%t for seconds delayed due to greylisting, %%v for the version of postgrey, %%d for the date, and %%h for the host";
+      };
+      delay = mkOption {
+        type = natural;
+        default = 300;
+        description = lib.mdDoc "Greylist for N seconds";
+      };
+      maxAge = mkOption {
+        type = natural;
+        default = 35;
+        description = lib.mdDoc "Delete entries from whitelist if they haven't been seen for N days";
+      };
+      retryWindow = mkOption {
+        type = either str natural;
+        default = 2;
+        example = "12h";
+        description = lib.mdDoc "Allow N days for the first retry. Use string with appended 'h' to specify time in hours";
+      };
+      lookupBySubnet = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Strip the last N bits from IP addresses, determined by IPv4CIDR and IPv6CIDR";
+      };
+      IPv4CIDR = mkOption {
+        type = natural;
+        default = 24;
+        description = lib.mdDoc "Strip N bits from IPv4 addresses if lookupBySubnet is true";
+      };
+      IPv6CIDR = mkOption {
+        type = natural;
+        default = 64;
+        description = lib.mdDoc "Strip N bits from IPv6 addresses if lookupBySubnet is true";
+      };
+      privacy = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Store data using one-way hash functions (SHA1)";
+      };
+      autoWhitelist = mkOption {
+        type = nullOr natural';
+        default = 5;
+        description = lib.mdDoc "Whitelist clients after successful delivery of N messages";
+      };
+      whitelistClients = mkOption {
+        type = listOf path;
+        default = [];
+        description = lib.mdDoc "Client address whitelist files (see postgrey(8))";
+      };
+      whitelistRecipients = mkOption {
+        type = listOf path;
+        default = [];
+        description = lib.mdDoc "Recipient address whitelist files (see postgrey(8))";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.postgrey ];
+
+    users = {
+      users = {
+        postgrey = {
+          description = "Postgrey Daemon";
+          uid = config.ids.uids.postgrey;
+          group = "postgrey";
+        };
+      };
+      groups = {
+        postgrey = {
+          gid = config.ids.gids.postgrey;
+        };
+      };
+    };
+
+    systemd.services.postgrey = let
+      bind-flag = if cfg.socket ? path then
+        "--unix=${cfg.socket.path} --socketmode=${cfg.socket.mode}"
+      else
+        ''--inet=${optionalString (cfg.socket.addr != null) (cfg.socket.addr + ":")}${toString cfg.socket.port}'';
+    in {
+      description = "Postfix Greylisting Service";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "postfix.service" ];
+      preStart = ''
+        mkdir -p /var/postgrey
+        chown postgrey:postgrey /var/postgrey
+        chmod 0770 /var/postgrey
+      '';
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = ''${pkgs.postgrey}/bin/postgrey \
+          ${bind-flag} \
+          --group=postgrey --user=postgrey \
+          --dbdir=/var/postgrey \
+          --delay=${toString cfg.delay} \
+          --max-age=${toString cfg.maxAge} \
+          --retry-window=${toString cfg.retryWindow} \
+          ${if cfg.lookupBySubnet then "--lookup-by-subnet" else "--lookup-by-host"} \
+          --ipv4cidr=${toString cfg.IPv4CIDR} --ipv6cidr=${toString cfg.IPv6CIDR} \
+          ${optionalString cfg.privacy "--privacy"} \
+          --auto-whitelist-clients=${toString (if cfg.autoWhitelist == null then 0 else cfg.autoWhitelist)} \
+          --greylist-action=${cfg.greylistAction} \
+          --greylist-text="${cfg.greylistText}" \
+          --x-greylist-header="${cfg.greylistHeader}" \
+          ${concatMapStringsSep " " (x: "--whitelist-clients=" + x) cfg.whitelistClients} \
+          ${concatMapStringsSep " " (x: "--whitelist-recipients=" + x) cfg.whitelistRecipients}
+        '';
+        Restart = "always";
+        RestartSec = 5;
+        TimeoutSec = 10;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/mail/postsrsd.nix b/nixpkgs/nixos/modules/services/mail/postsrsd.nix
new file mode 100644
index 000000000000..41301c8697d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/postsrsd.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.postsrsd;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.postsrsd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the postsrsd SRS server for Postfix.";
+      };
+
+      secretsFile = mkOption {
+        type = types.path;
+        default = "/var/lib/postsrsd/postsrsd.secret";
+        description = lib.mdDoc "Secret keys used for signing and verification";
+      };
+
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Domain name for rewrite";
+      };
+
+      separator = mkOption {
+        type = types.enum ["-" "=" "+"];
+        default = "=";
+        description = lib.mdDoc "First separator character in generated addresses";
+      };
+
+      # bindAddress = mkOption { # uncomment once 1.5 is released
+      #   type = types.str;
+      #   default = "127.0.0.1";
+      #   description = "Socket listen address";
+      # };
+
+      forwardPort = mkOption {
+        type = types.int;
+        default = 10001;
+        description = lib.mdDoc "Port for the forward SRS lookup";
+      };
+
+      reversePort = mkOption {
+        type = types.int;
+        default = 10002;
+        description = lib.mdDoc "Port for the reverse SRS lookup";
+      };
+
+      timeout = mkOption {
+        type = types.int;
+        default = 1800;
+        description = lib.mdDoc "Timeout for idle client connections in seconds";
+      };
+
+      excludeDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Origin domains to exclude from rewriting in addition to primary domain";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "postsrsd";
+        description = lib.mdDoc "User for the daemon";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "postsrsd";
+        description = lib.mdDoc "Group for the daemon";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.postsrsd.domain = mkDefault config.networking.hostName;
+
+    users.users = optionalAttrs (cfg.user == "postsrsd") {
+      postsrsd = {
+        group = cfg.group;
+        uid = config.ids.uids.postsrsd;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "postsrsd") {
+      postsrsd.gid = config.ids.gids.postsrsd;
+    };
+
+    systemd.services.postsrsd = {
+      description = "PostSRSd SRS rewriting server";
+      after = [ "network.target" ];
+      before = [ "postfix.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.coreutils ];
+
+      serviceConfig = {
+        ExecStart = ''${pkgs.postsrsd}/sbin/postsrsd "-s${cfg.secretsFile}" "-d${cfg.domain}" -a${cfg.separator} -f${toString cfg.forwardPort} -r${toString cfg.reversePort} -t${toString cfg.timeout} "-X${concatStringsSep "," cfg.excludeDomains}"'';
+        User = cfg.user;
+        Group = cfg.group;
+        PermissionsStartOnly = true;
+      };
+
+      preStart = ''
+        if [ ! -e "${cfg.secretsFile}" ]; then
+          echo "WARNING: secrets file not found, autogenerating!"
+          DIR="$(dirname "${cfg.secretsFile}")"
+          if [ ! -d "$DIR" ]; then
+            mkdir -p -m750 "$DIR"
+            chown "${cfg.user}:${cfg.group}" "$DIR"
+          fi
+          dd if=/dev/random bs=18 count=1 | base64 > "${cfg.secretsFile}"
+          chmod 600 "${cfg.secretsFile}"
+        fi
+        chown "${cfg.user}:${cfg.group}" "${cfg.secretsFile}"
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/public-inbox.nix b/nixpkgs/nixos/modules/services/mail/public-inbox.nix
new file mode 100644
index 000000000000..0464b7164149
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/public-inbox.nix
@@ -0,0 +1,596 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.public-inbox;
+  stateDir = "/var/lib/public-inbox";
+
+  gitIni = pkgs.formats.gitIni { listsAsDuplicateKeys = true; };
+  iniAtom = elemAt gitIni.type/*attrsOf*/.functor.wrapped/*attrsOf*/.functor.wrapped/*either*/.functor.wrapped 0;
+
+  useSpamAssassin = cfg.settings.publicinboxmda.spamcheck == "spamc" ||
+                    cfg.settings.publicinboxwatch.spamcheck == "spamc";
+
+  publicInboxDaemonOptions = proto: defaultPort: {
+    args = mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = lib.mdDoc "Command-line arguments to pass to {manpage}`public-inbox-${proto}d(1)`.";
+    };
+    port = mkOption {
+      type = with types; nullOr (either str port);
+      default = defaultPort;
+      description = lib.mdDoc ''
+        Listening port.
+        Beware that public-inbox uses well-known ports number to decide whether to enable TLS or not.
+        Set to null and use `systemd.sockets.public-inbox-${proto}d.listenStreams`
+        if you need a more advanced listening.
+      '';
+    };
+    cert = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "/path/to/fullchain.pem";
+      description = lib.mdDoc "Path to TLS certificate to use for connections to {manpage}`public-inbox-${proto}d(1)`.";
+    };
+    key = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "/path/to/key.pem";
+      description = lib.mdDoc "Path to TLS key to use for connections to {manpage}`public-inbox-${proto}d(1)`.";
+    };
+  };
+
+  serviceConfig = srv:
+    let proto = removeSuffix "d" srv;
+        needNetwork = builtins.hasAttr proto cfg && cfg.${proto}.port == null;
+    in {
+    serviceConfig = {
+      # Enable JIT-compiled C (via Inline::C)
+      Environment = [ "PERL_INLINE_DIRECTORY=/run/public-inbox-${srv}/perl-inline" ];
+      # NonBlocking is REQUIRED to avoid a race condition
+      # if running simultaneous services.
+      NonBlocking = true;
+      #LimitNOFILE = 30000;
+      User = config.users.users."public-inbox".name;
+      Group = config.users.groups."public-inbox".name;
+      RuntimeDirectory = [
+          "public-inbox-${srv}/perl-inline"
+        ];
+      RuntimeDirectoryMode = "700";
+      # This is for BindPaths= and BindReadOnlyPaths=
+      # to allow traversal of directories they create inside RootDirectory=
+      UMask = "0066";
+      StateDirectory = ["public-inbox"];
+      StateDirectoryMode = "0750";
+      WorkingDirectory = stateDir;
+      BindReadOnlyPaths = [
+          "/etc"
+          "/run/systemd"
+          "${config.i18n.glibcLocales}"
+        ] ++
+        mapAttrsToList (name: inbox: inbox.description) cfg.inboxes ++
+        # Without confinement the whole Nix store
+        # is made available to the service
+        optionals (!config.systemd.services."public-inbox-${srv}".confinement.enable) [
+          "${pkgs.dash}/bin/dash:/bin/sh"
+          builtins.storeDir
+        ];
+      # The following options are only for optimizing:
+      # systemd-analyze security public-inbox-'*'
+      AmbientCapabilities = "";
+      CapabilityBoundingSet = "";
+      # ProtectClock= adds DeviceAllow=char-rtc r
+      DeviceAllow = "";
+      LockPersonality = true;
+      MemoryDenyWriteExecute = true;
+      NoNewPrivileges = true;
+      PrivateNetwork = mkDefault (!needNetwork);
+      ProcSubset = "pid";
+      ProtectClock = true;
+      ProtectHome = "tmpfs";
+      ProtectHostname = true;
+      ProtectKernelLogs = true;
+      ProtectProc = "invisible";
+      #ProtectSystem = "strict";
+      RemoveIPC = true;
+      RestrictAddressFamilies = [ "AF_UNIX" ] ++
+        optionals needNetwork [ "AF_INET" "AF_INET6" ];
+      RestrictNamespaces = true;
+      RestrictRealtime = true;
+      RestrictSUIDSGID = true;
+      SystemCallFilter = [
+        "@system-service"
+        "~@aio" "~@chown" "~@keyring" "~@memlock" "~@resources"
+        # Not removing @setuid and @privileged because Inline::C needs them.
+        # Not removing @timer because git upload-pack needs it.
+      ];
+      SystemCallArchitectures = "native";
+
+      # The following options are redundant when confinement is enabled
+      RootDirectory = "/var/empty";
+      TemporaryFileSystem = "/";
+      PrivateMounts = true;
+      MountAPIVFS = true;
+      PrivateDevices = true;
+      PrivateTmp = true;
+      PrivateUsers = true;
+      ProtectControlGroups = true;
+      ProtectKernelModules = true;
+      ProtectKernelTunables = true;
+    };
+    confinement = {
+      # Until we agree upon doing it directly here in NixOS
+      # https://github.com/NixOS/nixpkgs/pull/104457#issuecomment-1115768447
+      # let the user choose to enable the confinement with:
+      # systemd.services.public-inbox-httpd.confinement.enable = true;
+      # systemd.services.public-inbox-imapd.confinement.enable = true;
+      # systemd.services.public-inbox-init.confinement.enable = true;
+      # systemd.services.public-inbox-nntpd.confinement.enable = true;
+      #enable = true;
+      mode = "full-apivfs";
+      # Inline::C needs a /bin/sh, and dash is enough
+      binSh = "${pkgs.dash}/bin/dash";
+      packages = [
+          pkgs.iana-etc
+          (getLib pkgs.nss)
+          pkgs.tzdata
+        ];
+    };
+  };
+in
+
+{
+  options.services.public-inbox = {
+    enable = mkEnableOption (lib.mdDoc "the public-inbox mail archiver");
+    package = mkOption {
+      type = types.package;
+      default = pkgs.public-inbox;
+      defaultText = literalExpression "pkgs.public-inbox";
+      description = lib.mdDoc "public-inbox package to use.";
+    };
+    path = mkOption {
+      type = with types; listOf package;
+      default = [];
+      example = literalExpression "with pkgs; [ spamassassin ]";
+      description = lib.mdDoc ''
+        Additional packages to place in the path of public-inbox-mda,
+        public-inbox-watch, etc.
+      '';
+    };
+    inboxes = mkOption {
+      description = lib.mdDoc ''
+        Inboxes to configure, where attribute names are inbox names.
+      '';
+      default = {};
+      type = types.attrsOf (types.submodule ({name, ...}: {
+        freeformType = types.attrsOf iniAtom;
+        options.inboxdir = mkOption {
+          type = types.str;
+          default = "${stateDir}/inboxes/${name}";
+          description = lib.mdDoc "The absolute path to the directory which hosts the public-inbox.";
+        };
+        options.address = mkOption {
+          type = with types; listOf str;
+          example = "example-discuss@example.org";
+          description = lib.mdDoc "The email addresses of the public-inbox.";
+        };
+        options.url = mkOption {
+          type = types.nonEmptyStr;
+          example = "https://example.org/lists/example-discuss";
+          description = lib.mdDoc "URL where this inbox can be accessed over HTTP.";
+        };
+        options.description = mkOption {
+          type = types.str;
+          example = "user/dev discussion of public-inbox itself";
+          description = lib.mdDoc "User-visible description for the repository.";
+          apply = pkgs.writeText "public-inbox-description-${name}";
+        };
+        options.hide = mkOption {
+          type = with types; listOf (enum [ "www" "manifest" ]);
+          default = [];
+          example = [ "www" "manifest" ];
+          description = lib.mdDoc "Listings to hide the inbox from";
+        };
+        options.newsgroup = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "NNTP group name for the inbox.";
+        };
+        options.watch = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = lib.mdDoc "Paths for {manpage}`public-inbox-watch(1)` to monitor for new mail.";
+          example = [ "maildir:/path/to/test.example.com.git" ];
+        };
+        options.watchheader = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          example = "List-Id:<test@example.com>";
+          description = lib.mdDoc ''
+            If specified, {manpage}`public-inbox-watch(1)` will only process
+            mail containing a matching header.
+          '';
+        };
+        options.coderepo = mkOption {
+          type = (types.listOf (types.enum (attrNames cfg.settings.coderepo))) // {
+            description = "list of coderepo names";
+          };
+          default = [];
+          description = lib.mdDoc "Nicknames of a 'coderepo' section associated with the inbox.";
+        };
+      }));
+    };
+    imap = {
+      enable = mkEnableOption (lib.mdDoc "the public-inbox IMAP server");
+    } // publicInboxDaemonOptions "imap" 993;
+    http = {
+      enable = mkEnableOption (lib.mdDoc "the public-inbox HTTP server");
+      mounts = mkOption {
+        type = with types; listOf str;
+        default = [ "/" ];
+        example = [ "/lists/archives" ];
+        description = lib.mdDoc ''
+          Root paths or URLs that public-inbox will be served on.
+          If domain parts are present, only requests to those
+          domains will be accepted.
+        '';
+      };
+      args = (publicInboxDaemonOptions "http" 80).args;
+      port = mkOption {
+        type = with types; nullOr (either str port);
+        default = 80;
+        example = "/run/public-inbox-httpd.sock";
+        description = lib.mdDoc ''
+          Listening port or systemd's ListenStream= entry
+          to be used as a reverse proxy, eg. in nginx:
+          `locations."/inbox".proxyPass = "http://unix:''${config.services.public-inbox.http.port}:/inbox";`
+          Set to null and use `systemd.sockets.public-inbox-httpd.listenStreams`
+          if you need a more advanced listening.
+        '';
+      };
+    };
+    mda = {
+      enable = mkEnableOption (lib.mdDoc "the public-inbox Mail Delivery Agent");
+      args = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc "Command-line arguments to pass to {manpage}`public-inbox-mda(1)`.";
+      };
+    };
+    postfix.enable = mkEnableOption (lib.mdDoc "the integration into Postfix");
+    nntp = {
+      enable = mkEnableOption (lib.mdDoc "the public-inbox NNTP server");
+    } // publicInboxDaemonOptions "nntp" 563;
+    spamAssassinRules = mkOption {
+      type = with types; nullOr path;
+      default = "${cfg.package.sa_config}/user/.spamassassin/user_prefs";
+      defaultText = literalExpression "\${cfg.package.sa_config}/user/.spamassassin/user_prefs";
+      description = lib.mdDoc "SpamAssassin configuration specific to public-inbox.";
+    };
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Settings for the [public-inbox config file](https://public-inbox.org/public-inbox-config.html).
+      '';
+      default = {};
+      type = types.submodule {
+        freeformType = gitIni.type;
+        options.publicinbox = mkOption {
+          default = {};
+          description = lib.mdDoc "public inboxes";
+          type = types.submodule {
+            # Support both global options like `services.public-inbox.settings.publicinbox.imapserver`
+            # and inbox specific options like `services.public-inbox.settings.publicinbox.foo.address`.
+            freeformType = with types; attrsOf (oneOf [ iniAtom (attrsOf iniAtom) ]);
+
+            options.css = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = lib.mdDoc "The local path name of a CSS file for the PSGI web interface.";
+            };
+            options.imapserver = mkOption {
+              type = with types; listOf str;
+              default = [];
+              example = [ "imap.public-inbox.org" ];
+              description = lib.mdDoc "IMAP URLs to this public-inbox instance";
+            };
+            options.nntpserver = mkOption {
+              type = with types; listOf str;
+              default = [];
+              example = [ "nntp://news.public-inbox.org" "nntps://news.public-inbox.org" ];
+              description = lib.mdDoc "NNTP URLs to this public-inbox instance";
+            };
+            options.pop3server = mkOption {
+              type = with types; listOf str;
+              default = [];
+              example = [ "pop.public-inbox.org" ];
+              description = lib.mdDoc "POP3 URLs to this public-inbox instance";
+            };
+            options.sourceinfo = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              example = ''git clone <a href="https://example.com/">https://example.com/</a>'';
+              description = lib.mdDoc "HTML info about public-inbox's source code";
+            };
+            options.wwwlisting = mkOption {
+              type = with types; enum [ "all" "404" "match=domain" ];
+              default = "404";
+              description = lib.mdDoc ''
+                Controls which lists (if any) are listed for when the root
+                public-inbox URL is accessed over HTTP.
+              '';
+            };
+          };
+        };
+        options.publicinboxmda.spamcheck = mkOption {
+          type = with types; enum [ "spamc" "none" ];
+          default = "none";
+          description = lib.mdDoc ''
+            If set to spamc, {manpage}`public-inbox-watch(1)` will filter spam
+            using SpamAssassin.
+          '';
+        };
+        options.publicinboxwatch.spamcheck = mkOption {
+          type = with types; enum [ "spamc" "none" ];
+          default = "none";
+          description = lib.mdDoc ''
+            If set to spamc, {manpage}`public-inbox-watch(1)` will filter spam
+            using SpamAssassin.
+          '';
+        };
+        options.publicinboxwatch.watchspam = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          example = "maildir:/path/to/spam";
+          description = lib.mdDoc ''
+            If set, mail in this maildir will be trained as spam and
+            deleted from all watched inboxes
+          '';
+        };
+        options.coderepo = mkOption {
+          default = {};
+          description = lib.mdDoc "code repositories";
+          type = types.attrsOf (types.submodule {
+            freeformType = types.attrsOf iniAtom;
+            options.cgitUrl = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc "URL of a cgit instance";
+            };
+            options.dir = mkOption {
+              type = types.str;
+              description = lib.mdDoc "Path to a git repository";
+            };
+          });
+        };
+      };
+    };
+    openFirewall = mkEnableOption (lib.mdDoc "opening the firewall when using a port option");
+  };
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = config.services.spamassassin.enable || !useSpamAssassin;
+        message = ''
+          public-inbox is configured to use SpamAssassin, but
+          services.spamassassin.enable is false.  If you don't need
+          spam checking, set `services.public-inbox.settings.publicinboxmda.spamcheck' and
+          `services.public-inbox.settings.publicinboxwatch.spamcheck' to null.
+        '';
+      }
+      { assertion = cfg.path != [] || !useSpamAssassin;
+        message = ''
+          public-inbox is configured to use SpamAssassin, but there is
+          no spamc executable in services.public-inbox.path.  If you
+          don't need spam checking, set
+          `services.public-inbox.settings.publicinboxmda.spamcheck' and
+          `services.public-inbox.settings.publicinboxwatch.spamcheck' to null.
+        '';
+      }
+    ];
+    services.public-inbox.settings =
+      filterAttrsRecursive (n: v: v != null) {
+        publicinbox = mapAttrs (n: filterAttrs (n: v: n != "description")) cfg.inboxes;
+    };
+    users = {
+      users.public-inbox = {
+        home = stateDir;
+        group = "public-inbox";
+        isSystemUser = true;
+      };
+      groups.public-inbox = {};
+    };
+    networking.firewall = mkIf cfg.openFirewall
+      { allowedTCPPorts = mkMerge
+        (map (proto: (mkIf (cfg.${proto}.enable && types.port.check cfg.${proto}.port) [ cfg.${proto}.port ]))
+        ["imap" "http" "nntp"]);
+      };
+    services.postfix = mkIf (cfg.postfix.enable && cfg.mda.enable) {
+      # Not sure limiting to 1 is necessary, but better safe than sorry.
+      config.public-inbox_destination_recipient_limit = "1";
+
+      # Register the addresses as existing
+      virtual =
+        concatStringsSep "\n" (mapAttrsToList (_: inbox:
+          concatMapStringsSep "\n" (address:
+            "${address} ${address}"
+          ) inbox.address
+        ) cfg.inboxes);
+
+      # Deliver the addresses with the public-inbox transport
+      transport =
+        concatStringsSep "\n" (mapAttrsToList (_: inbox:
+          concatMapStringsSep "\n" (address:
+            "${address} public-inbox:${address}"
+          ) inbox.address
+        ) cfg.inboxes);
+
+      # The public-inbox transport
+      masterConfig.public-inbox = {
+        type = "unix";
+        privileged = true; # Required for user=
+        command = "pipe";
+        args = [
+          "flags=X" # Report as a final delivery
+          "user=${with config.users; users."public-inbox".name + ":" + groups."public-inbox".name}"
+          # Specifying a nexthop when using the transport
+          # (eg. test public-inbox:test) allows to
+          # receive mails with an extension (eg. test+foo).
+          "argv=${pkgs.writeShellScript "public-inbox-transport" ''
+            export HOME="${stateDir}"
+            export ORIGINAL_RECIPIENT="''${2:-1}"
+            export PATH="${makeBinPath cfg.path}:$PATH"
+            exec ${cfg.package}/bin/public-inbox-mda ${escapeShellArgs cfg.mda.args}
+          ''} \${original_recipient} \${nexthop}"
+        ];
+      };
+    };
+    systemd.sockets = mkMerge (map (proto:
+      mkIf (cfg.${proto}.enable && cfg.${proto}.port != null)
+        { "public-inbox-${proto}d" = {
+            listenStreams = [ (toString cfg.${proto}.port) ];
+            wantedBy = [ "sockets.target" ];
+          };
+        }
+      ) [ "imap" "http" "nntp" ]);
+    systemd.services = mkMerge [
+      (mkIf cfg.imap.enable
+        { public-inbox-imapd = mkMerge [(serviceConfig "imapd") {
+          after = [ "public-inbox-init.service" "public-inbox-watch.service" ];
+          requires = [ "public-inbox-init.service" ];
+          serviceConfig = {
+            ExecStart = escapeShellArgs (
+              [ "${cfg.package}/bin/public-inbox-imapd" ] ++
+              cfg.imap.args ++
+              optionals (cfg.imap.cert != null) [ "--cert" cfg.imap.cert ] ++
+              optionals (cfg.imap.key != null) [ "--key" cfg.imap.key ]
+            );
+          };
+        }];
+      })
+      (mkIf cfg.http.enable
+        { public-inbox-httpd = mkMerge [(serviceConfig "httpd") {
+          after = [ "public-inbox-init.service" "public-inbox-watch.service" ];
+          requires = [ "public-inbox-init.service" ];
+          serviceConfig = {
+            BindPathsReadOnly =
+              map (c: c.dir) (lib.attrValues cfg.settings.coderepo);
+            ExecStart = escapeShellArgs (
+              [ "${cfg.package}/bin/public-inbox-httpd" ] ++
+              cfg.http.args ++
+              # See https://public-inbox.org/public-inbox.git/tree/examples/public-inbox.psgi
+              # for upstream's example.
+              [ (pkgs.writeText "public-inbox.psgi" ''
+                #!${cfg.package.fullperl} -w
+                use strict;
+                use warnings;
+                use Plack::Builder;
+                use PublicInbox::WWW;
+
+                my $www = PublicInbox::WWW->new;
+                $www->preload;
+
+                builder {
+                  # If reached through a reverse proxy,
+                  # make it transparent by resetting some HTTP headers
+                  # used by public-inbox to generate URIs.
+                  enable 'ReverseProxy';
+
+                  # No need to send a response body if it's an HTTP HEAD requests.
+                  enable 'Head';
+
+                  # Route according to configured domains and root paths.
+                  ${concatMapStrings (path: ''
+                  mount q(${path}) => sub { $www->call(@_); };
+                  '') cfg.http.mounts}
+                }
+              '') ]
+            );
+          };
+        }];
+      })
+      (mkIf cfg.nntp.enable
+        { public-inbox-nntpd = mkMerge [(serviceConfig "nntpd") {
+          after = [ "public-inbox-init.service" "public-inbox-watch.service" ];
+          requires = [ "public-inbox-init.service" ];
+          serviceConfig = {
+            ExecStart = escapeShellArgs (
+              [ "${cfg.package}/bin/public-inbox-nntpd" ] ++
+              cfg.nntp.args ++
+              optionals (cfg.nntp.cert != null) [ "--cert" cfg.nntp.cert ] ++
+              optionals (cfg.nntp.key != null) [ "--key" cfg.nntp.key ]
+            );
+          };
+        }];
+      })
+      (mkIf (any (inbox: inbox.watch != []) (attrValues cfg.inboxes)
+        || cfg.settings.publicinboxwatch.watchspam != null)
+        { public-inbox-watch = mkMerge [(serviceConfig "watch") {
+          inherit (cfg) path;
+          wants = [ "public-inbox-init.service" ];
+          requires = [ "public-inbox-init.service" ] ++
+            optional (cfg.settings.publicinboxwatch.spamcheck == "spamc") "spamassassin.service";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            ExecStart = "${cfg.package}/bin/public-inbox-watch";
+            ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          };
+        }];
+      })
+      ({ public-inbox-init = let
+          PI_CONFIG = gitIni.generate "public-inbox.ini"
+            (filterAttrsRecursive (n: v: v != null) cfg.settings);
+          in mkMerge [(serviceConfig "init") {
+          wantedBy = [ "multi-user.target" ];
+          restartIfChanged = true;
+          restartTriggers = [ PI_CONFIG ];
+          script = ''
+            set -ux
+            install -D -p ${PI_CONFIG} ${stateDir}/.public-inbox/config
+            '' + optionalString useSpamAssassin ''
+              install -m 0700 -o spamd -d ${stateDir}/.spamassassin
+              ${optionalString (cfg.spamAssassinRules != null) ''
+                ln -sf ${cfg.spamAssassinRules} ${stateDir}/.spamassassin/user_prefs
+              ''}
+            '' + concatStrings (mapAttrsToList (name: inbox: ''
+              if [ ! -e ${escapeShellArg inbox.inboxdir} ]; then
+                # public-inbox-init creates an inbox and adds it to a config file.
+                # It tries to atomically write the config file by creating
+                # another file in the same directory, and renaming it.
+                # This has the sad consequence that we can't use
+                # /dev/null, or it would try to create a file in /dev.
+                conf_dir="$(mktemp -d)"
+
+                PI_CONFIG=$conf_dir/conf \
+                ${cfg.package}/bin/public-inbox-init -V2 \
+                  ${escapeShellArgs ([ name inbox.inboxdir inbox.url ] ++ inbox.address)}
+
+                rm -rf $conf_dir
+              fi
+
+              ln -sf ${inbox.description} ${escapeShellArg inbox.inboxdir}/description
+
+              export GIT_DIR=${escapeShellArg inbox.inboxdir}/all.git
+              if test -d "$GIT_DIR"; then
+                # Config is inherited by each epoch repository,
+                # so just needs to be set for all.git.
+                ${pkgs.git}/bin/git config core.sharedRepository 0640
+              fi
+            '') cfg.inboxes
+            );
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            StateDirectory = [
+              "public-inbox/.public-inbox"
+              "public-inbox/.public-inbox/emergency"
+              "public-inbox/inboxes"
+            ];
+          };
+        }];
+      })
+    ];
+    environment.systemPackages = with pkgs; [ cfg.package ];
+  };
+  meta.maintainers = with lib.maintainers; [ julm qyliss ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/roundcube.nix b/nixpkgs/nixos/modules/services/mail/roundcube.nix
new file mode 100644
index 000000000000..4e29f567ed92
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/roundcube.nix
@@ -0,0 +1,275 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.roundcube;
+  fpm = config.services.phpfpm.pools.roundcube;
+  localDB = cfg.database.host == "localhost";
+  user = cfg.database.username;
+  phpWithPspell = pkgs.php81.withExtensions ({ enabled, all }: [ all.pspell ] ++ enabled);
+in
+{
+  options.services.roundcube = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable roundcube.
+
+        Also enables nginx virtual host management.
+        Further nginx configuration can be done by adapting `services.nginx.virtualHosts.<name>`.
+        See [](#opt-services.nginx.virtualHosts) for further information.
+      '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      example = "webmail.example.com";
+      description = lib.mdDoc "Hostname to use for the nginx vhost";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.roundcube;
+      defaultText = literalExpression "pkgs.roundcube";
+
+      example = literalExpression ''
+        roundcube.withPlugins (plugins: [ plugins.persistent_login ])
+      '';
+
+      description = lib.mdDoc ''
+        The package which contains roundcube's sources. Can be overridden to create
+        an environment which contains roundcube and third-party plugins.
+      '';
+    };
+
+    database = {
+      username = mkOption {
+        type = types.str;
+        default = "roundcube";
+        description = lib.mdDoc ''
+          Username for the postgresql connection.
+          If `database.host` is set to `localhost`, a unix user and group of the same name will be created as well.
+        '';
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Host of the postgresql server. If this is not set to
+          `localhost`, you have to create the
+          postgresql user and database yourself, with appropriate
+          permissions.
+        '';
+      };
+      password = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Password for the postgresql connection. Do not use: the password will be stored world readable in the store; use `passwordFile` instead.";
+        default = "";
+      };
+      passwordFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Password file for the postgresql connection.
+          Must be formatted according to PostgreSQL .pgpass standard (see https://www.postgresql.org/docs/current/libpq-pgpass.html)
+          but only one line, no comments and readable by user `nginx`.
+          Ignored if `database.host` is set to `localhost`, as peer authentication will be used.
+        '';
+      };
+      dbname = mkOption {
+        type = types.str;
+        default = "roundcube";
+        description = lib.mdDoc "Name of the postgresql database";
+      };
+    };
+
+    plugins = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        List of roundcube plugins to enable. Currently, only those directly shipped with Roundcube are supported.
+      '';
+    };
+
+    dicts = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "with pkgs.aspellDicts; [ en fr de ]";
+      description = lib.mdDoc ''
+        List of aspell dictionaries for spell checking. If empty, spell checking is disabled.
+      '';
+    };
+
+    maxAttachmentSize = mkOption {
+      type = types.int;
+      default = 18;
+      description = lib.mdDoc ''
+        The maximum attachment size in MB.
+
+        Note: Since roundcube only uses 70% of max upload values configured in php
+        30% is added automatically to [](#opt-services.roundcube.maxAttachmentSize).
+      '';
+      apply = configuredMaxAttachmentSize: "${toString (configuredMaxAttachmentSize * 1.3)}M";
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Extra configuration for roundcube webmail instance";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # backward compatibility: if password is set but not passwordFile, make one.
+    services.roundcube.database.passwordFile = mkIf (!localDB && cfg.database.password != "") (mkDefault ("${pkgs.writeText "roundcube-password" cfg.database.password}"));
+    warnings = lib.optional (!localDB && cfg.database.password != "") "services.roundcube.database.password is deprecated and insecure; use services.roundcube.database.passwordFile instead";
+
+    environment.etc."roundcube/config.inc.php".text = ''
+      <?php
+
+      ${lib.optionalString (!localDB) ''
+        $password = file('${cfg.database.passwordFile}')[0];
+        $password = preg_split('~\\\\.(*SKIP)(*FAIL)|\:~s', $password);
+        $password = end($password);
+        $password = str_replace("\\:", ":", $password);
+        $password = str_replace("\\\\", "\\", $password);
+      ''}
+
+      $config = array();
+      $config['db_dsnw'] = 'pgsql://${cfg.database.username}${lib.optionalString (!localDB) ":' . $password . '"}@${if localDB then "unix(/run/postgresql)" else cfg.database.host}/${cfg.database.dbname}';
+      $config['log_driver'] = 'syslog';
+      $config['max_message_size'] =  '${cfg.maxAttachmentSize}';
+      $config['plugins'] = [${concatMapStringsSep "," (p: "'${p}'") cfg.plugins}];
+      $config['des_key'] = file_get_contents('/var/lib/roundcube/des_key');
+      $config['mime_types'] = '${pkgs.nginx}/conf/mime.types';
+      # Roundcube uses PHP-FPM which has `PrivateTmp = true;`
+      $config['temp_dir'] = '/tmp';
+      $config['enable_spellcheck'] = ${if cfg.dicts == [] then "false" else "true"};
+      # by default, spellchecking uses a third-party cloud services
+      $config['spellcheck_engine'] = 'pspell';
+      $config['spellcheck_languages'] = array(${lib.concatMapStringsSep ", " (dict: let p = builtins.parseDrvName dict.shortName; in "'${p.name}' => '${dict.fullName}'") cfg.dicts});
+
+      ${cfg.extraConfig}
+    '';
+
+    services.nginx = {
+      enable = true;
+      virtualHosts = {
+        ${cfg.hostName} = {
+          forceSSL = mkDefault true;
+          enableACME = mkDefault true;
+          locations."/" = {
+            root = cfg.package;
+            index = "index.php";
+            extraConfig = ''
+              location ~* \.php(/|$) {
+                fastcgi_split_path_info ^(.+\.php)(/.+)$;
+                fastcgi_pass unix:${fpm.socket};
+
+                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+                fastcgi_param PATH_INFO       $fastcgi_path_info;
+
+                include ${config.services.nginx.package}/conf/fastcgi_params;
+                include ${pkgs.nginx}/conf/fastcgi.conf;
+              }
+            '';
+          };
+        };
+      };
+    };
+
+    assertions = [
+      {
+        assertion = localDB -> cfg.database.username == cfg.database.dbname;
+        message = ''
+          When setting up a DB and its owner user, the owner and the DB name must be
+          equal!
+        '';
+      }
+    ];
+
+    services.postgresql = mkIf localDB {
+      enable = true;
+      ensureDatabases = [ cfg.database.dbname ];
+      ensureUsers = [ {
+        name = cfg.database.username;
+        ensureDBOwnership = true;
+      } ];
+    };
+
+    users.users.${user} = mkIf localDB {
+      group = user;
+      isSystemUser = true;
+      createHome = false;
+    };
+    users.groups.${user} = mkIf localDB {};
+
+    services.phpfpm.pools.roundcube = {
+      user = if localDB then user else "nginx";
+      phpOptions = ''
+        error_log = 'stderr'
+        log_errors = on
+        post_max_size = ${cfg.maxAttachmentSize}
+        upload_max_filesize = ${cfg.maxAttachmentSize}
+      '';
+      settings = mapAttrs (name: mkDefault) {
+        "listen.owner" = "nginx";
+        "listen.group" = "nginx";
+        "listen.mode" = "0660";
+        "pm" = "dynamic";
+        "pm.max_children" = 75;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 1;
+        "pm.max_spare_servers" = 20;
+        "pm.max_requests" = 500;
+        "catch_workers_output" = true;
+      };
+      phpPackage = phpWithPspell;
+      phpEnv.ASPELL_CONF = "dict-dir ${pkgs.aspellWithDicts (_: cfg.dicts)}/lib/aspell";
+    };
+    systemd.services.phpfpm-roundcube.after = [ "roundcube-setup.service" ];
+
+    # Restart on config changes.
+    systemd.services.phpfpm-roundcube.restartTriggers = [
+      config.environment.etc."roundcube/config.inc.php".source
+    ];
+
+    systemd.services.roundcube-setup = mkMerge [
+      (mkIf (cfg.database.host == "localhost") {
+        requires = [ "postgresql.service" ];
+        after = [ "postgresql.service" ];
+        path = [ config.services.postgresql.package ];
+      })
+      {
+        after = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        script = let
+          psql = "${lib.optionalString (!localDB) "PGPASSFILE=${cfg.database.passwordFile}"} ${pkgs.postgresql}/bin/psql ${lib.optionalString (!localDB) "-h ${cfg.database.host} -U ${cfg.database.username} "} ${cfg.database.dbname}";
+        in
+        ''
+          version="$(${psql} -t <<< "select value from system where name = 'roundcube-version';" || true)"
+          if ! (grep -E '[a-zA-Z0-9]' <<< "$version"); then
+            ${psql} -f ${cfg.package}/SQL/postgres.initial.sql
+          fi
+
+          if [ ! -f /var/lib/roundcube/des_key ]; then
+            base64 /dev/urandom | head -c 24 > /var/lib/roundcube/des_key;
+            # we need to log out everyone in case change the des_key
+            # from the default when upgrading from nixos 19.09
+            ${psql} <<< 'TRUNCATE TABLE session;'
+          fi
+
+          ${phpWithPspell}/bin/php ${cfg.package}/bin/update.sh
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          StateDirectory = "roundcube";
+          User = if localDB then user else "nginx";
+          # so that the des_key is not world readable
+          StateDirectoryMode = "0700";
+        };
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/rspamd.nix b/nixpkgs/nixos/modules/services/mail/rspamd.nix
new file mode 100644
index 000000000000..ca88d8122179
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/rspamd.nix
@@ -0,0 +1,446 @@
+{ config, options, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rspamd;
+  opt = options.services.rspamd;
+  postfixCfg = config.services.postfix;
+
+  bindSocketOpts = {options, config, ... }: {
+    options = {
+      socket = mkOption {
+        type = types.str;
+        example = "localhost:11333";
+        description = lib.mdDoc ''
+          Socket for this worker to listen on in a format acceptable by rspamd.
+        '';
+      };
+      mode = mkOption {
+        type = types.str;
+        default = "0644";
+        description = lib.mdDoc "Mode to set on unix socket";
+      };
+      owner = mkOption {
+        type = types.str;
+        default = "${cfg.user}";
+        description = lib.mdDoc "Owner to set on unix socket";
+      };
+      group = mkOption {
+        type = types.str;
+        default = "${cfg.group}";
+        description = lib.mdDoc "Group to set on unix socket";
+      };
+      rawEntry = mkOption {
+        type = types.str;
+        internal = true;
+      };
+    };
+    config.rawEntry = let
+      maybeOption = option:
+        optionalString options.${option}.isDefined " ${option}=${config.${option}}";
+    in
+      if (!(hasPrefix "/" config.socket)) then "${config.socket}"
+      else "${config.socket}${maybeOption "mode"}${maybeOption "owner"}${maybeOption "group"}";
+  };
+
+  traceWarning = w: x: builtins.trace "warning: ${w}" x;
+
+  workerOpts = { name, options, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = lib.mdDoc "Whether to run the rspamd worker.";
+      };
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = name;
+        description = lib.mdDoc "Name of the worker";
+      };
+      type = mkOption {
+        type = types.nullOr (types.enum [
+          "normal" "controller" "fuzzy" "rspamd_proxy" "lua" "proxy"
+        ]);
+        description = lib.mdDoc ''
+          The type of this worker. The type `proxy` is
+          deprecated and only kept for backwards compatibility and should be
+          replaced with `rspamd_proxy`.
+        '';
+        apply = let
+            from = "services.rspamd.workers.\"${name}\".type";
+            files = options.type.files;
+            warning = "The option `${from}` defined in ${showFiles files} has enum value `proxy` which has been renamed to `rspamd_proxy`";
+          in x: if x == "proxy" then traceWarning warning "rspamd_proxy" else x;
+      };
+      bindSockets = mkOption {
+        type = types.listOf (types.either types.str (types.submodule bindSocketOpts));
+        default = [];
+        description = lib.mdDoc ''
+          List of sockets to listen, in format acceptable by rspamd
+        '';
+        example = [{
+          socket = "/run/rspamd.sock";
+          mode = "0666";
+          owner = "rspamd";
+        } "*:11333"];
+        apply = value: map (each: if (isString each)
+          then if (isUnixSocket each)
+            then {socket = each; owner = cfg.user; group = cfg.group; mode = "0644"; rawEntry = "${each}";}
+            else {socket = each; rawEntry = "${each}";}
+          else each) value;
+      };
+      count = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Number of worker instances to run
+        '';
+      };
+      includes = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          List of files to include in configuration
+        '';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional entries to put verbatim into worker section of rspamd config file.";
+      };
+    };
+    config = mkIf (name == "normal" || name == "controller" || name == "fuzzy" || name == "rspamd_proxy") {
+      type = mkDefault name;
+      includes = mkDefault [ "$CONFDIR/worker-${if name == "rspamd_proxy" then "proxy" else name}.inc" ];
+      bindSockets =
+        let
+          unixSocket = name: {
+            mode = "0660";
+            socket = "/run/rspamd/${name}.sock";
+            owner = cfg.user;
+            group = cfg.group;
+          };
+        in mkDefault (if name == "normal" then [(unixSocket "rspamd")]
+          else if name == "controller" then [ "localhost:11334" ]
+          else if name == "rspamd_proxy" then [ (unixSocket "proxy") ]
+          else [] );
+    };
+  };
+
+  isUnixSocket = socket: hasPrefix "/" (if (isString socket) then socket else socket.socket);
+
+  mkBindSockets = enabled: socks: concatStringsSep "\n  "
+    (flatten (map (each: "bind_socket = \"${each.rawEntry}\";") socks));
+
+  rspamdConfFile = pkgs.writeText "rspamd.conf"
+    ''
+      .include "$CONFDIR/common.conf"
+
+      options {
+        pidfile = "$RUNDIR/rspamd.pid";
+        .include "$CONFDIR/options.inc"
+        .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/options.inc"
+        .include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/options.inc"
+      }
+
+      logging {
+        type = "syslog";
+        .include "$CONFDIR/logging.inc"
+        .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/logging.inc"
+        .include(try=true; priority=10) "$LOCAL_CONFDIR/override.d/logging.inc"
+      }
+
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: let
+          includeName = if name == "rspamd_proxy" then "proxy" else name;
+          tryOverride = boolToString (value.extraConfig == "");
+        in ''
+        worker "${value.type}" {
+          type = "${value.type}";
+          ${optionalString (value.enable != null)
+            "enabled = ${if value.enable != false then "yes" else "no"};"}
+          ${mkBindSockets value.enable value.bindSockets}
+          ${optionalString (value.count != null) "count = ${toString value.count};"}
+          ${concatStringsSep "\n  " (map (each: ".include \"${each}\"") value.includes)}
+          .include(try=true; priority=1,duplicate=merge) "$LOCAL_CONFDIR/local.d/worker-${includeName}.inc"
+          .include(try=${tryOverride}; priority=10) "$LOCAL_CONFDIR/override.d/worker-${includeName}.inc"
+        }
+      '') cfg.workers)}
+
+      ${optionalString (cfg.extraConfig != "") ''
+        .include(priority=10) "$LOCAL_CONFDIR/override.d/extra-config.inc"
+      ''}
+   '';
+
+  filterFiles = files: filterAttrs (n: v: v.enable) files;
+  rspamdDir = pkgs.linkFarm "etc-rspamd-dir" (
+    (mapAttrsToList (name: file: { name = "local.d/${name}"; path = file.source; }) (filterFiles cfg.locals)) ++
+    (mapAttrsToList (name: file: { name = "override.d/${name}"; path = file.source; }) (filterFiles cfg.overrides)) ++
+    (optional (cfg.localLuaRules != null) { name = "rspamd.local.lua"; path = cfg.localLuaRules; }) ++
+    [ { name = "rspamd.conf"; path = rspamdConfFile; } ]
+  );
+
+  configFileModule = prefix: { name, config, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether this file ${prefix} should be generated.  This
+          option allows specific ${prefix} files to be disabled.
+        '';
+      };
+
+      text = mkOption {
+        default = null;
+        type = types.nullOr types.lines;
+        description = lib.mdDoc "Text of the file.";
+      };
+
+      source = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path of the source file.";
+      };
+    };
+    config = {
+      source = mkIf (config.text != null) (
+        let name' = "rspamd-${prefix}-" + baseNameOf name;
+        in mkDefault (pkgs.writeText name' config.text));
+    };
+  };
+
+  configOverrides =
+    (mapAttrs' (n: v: nameValuePair "worker-${if n == "rspamd_proxy" then "proxy" else n}.inc" {
+      text = v.extraConfig;
+    })
+    (filterAttrs (n: v: v.extraConfig != "") cfg.workers))
+    // (lib.optionalAttrs (cfg.extraConfig != "") {
+      "extra-config.inc".text = cfg.extraConfig;
+    });
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.rspamd = {
+
+      enable = mkEnableOption (lib.mdDoc "rspamd, the Rapid spam filtering system");
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the rspamd daemon in debug mode.";
+      };
+
+      locals = mkOption {
+        type = with types; attrsOf (submodule (configFileModule "locals"));
+        default = {};
+        description = lib.mdDoc ''
+          Local configuration files, written into {file}`/etc/rspamd/local.d/{name}`.
+        '';
+        example = literalExpression ''
+          { "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
+            "arc.conf".text = "allow_envfrom_empty = true;";
+          }
+        '';
+      };
+
+      overrides = mkOption {
+        type = with types; attrsOf (submodule (configFileModule "overrides"));
+        default = {};
+        description = lib.mdDoc ''
+          Overridden configuration files, written into {file}`/etc/rspamd/override.d/{name}`.
+        '';
+        example = literalExpression ''
+          { "redis.conf".source = "/nix/store/.../etc/dir/redis.conf";
+            "arc.conf".text = "allow_envfrom_empty = true;";
+          }
+        '';
+      };
+
+      localLuaRules = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          Path of file to link to {file}`/etc/rspamd/rspamd.local.lua` for local
+          rules written in Lua
+        '';
+      };
+
+      workers = mkOption {
+        type = with types; attrsOf (submodule workerOpts);
+        description = lib.mdDoc ''
+          Attribute set of workers to start.
+        '';
+        default = {
+          normal = {};
+          controller = {};
+        };
+        example = literalExpression ''
+          {
+            normal = {
+              includes = [ "$CONFDIR/worker-normal.inc" ];
+              bindSockets = [{
+                socket = "/run/rspamd/rspamd.sock";
+                mode = "0660";
+                owner = "''${config.${opt.user}}";
+                group = "''${config.${opt.group}}";
+              }];
+            };
+            controller = {
+              includes = [ "$CONFDIR/worker-controller.inc" ];
+              bindSockets = [ "[::1]:11334" ];
+            };
+          }
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration to add at the end of the rspamd configuration
+          file.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "rspamd";
+        description = lib.mdDoc ''
+          User to use when no root privileges are required.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "rspamd";
+        description = lib.mdDoc ''
+          Group to use when no root privileges are required.
+        '';
+      };
+
+      postfix = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Add rspamd milter to postfix main.conf";
+        };
+
+        config = mkOption {
+          type = with types; attrsOf (oneOf [ bool str (listOf str) ]);
+          description = lib.mdDoc ''
+            Addon to postfix configuration
+          '';
+          default = {
+            smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+            non_smtpd_milters = ["unix:/run/rspamd/rspamd-milter.sock"];
+          };
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.rspamd.overrides = configOverrides;
+    services.rspamd.workers = mkIf cfg.postfix.enable {
+      controller = {};
+      rspamd_proxy = {
+        bindSockets = [ {
+          mode = "0660";
+          socket = "/run/rspamd/rspamd-milter.sock";
+          owner = cfg.user;
+          group = postfixCfg.group;
+        } ];
+        extraConfig = ''
+          upstream "local" {
+            default = yes; # Self-scan upstreams are always default
+            self_scan = yes; # Enable self-scan
+          }
+        '';
+      };
+    };
+    services.postfix.config = mkIf cfg.postfix.enable cfg.postfix.config;
+
+    systemd.services.postfix = mkIf cfg.postfix.enable {
+      serviceConfig.SupplementaryGroups = [ postfixCfg.group ];
+    };
+
+    # Allow users to run 'rspamc' and 'rspamadm'.
+    environment.systemPackages = [ pkgs.rspamd ];
+
+    users.users.${cfg.user} = {
+      description = "rspamd daemon";
+      uid = config.ids.uids.rspamd;
+      group = cfg.group;
+    };
+
+    users.groups.${cfg.group} = {
+      gid = config.ids.gids.rspamd;
+    };
+
+    environment.etc.rspamd.source = rspamdDir;
+
+    systemd.services.rspamd = {
+      description = "Rspamd Service";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      restartTriggers = [ rspamdDir ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.rspamd}/bin/rspamd ${optionalString cfg.debug "-d"} -c /etc/rspamd/rspamd.conf -f";
+        Restart = "always";
+
+        User = "${cfg.user}";
+        Group = "${cfg.group}";
+        SupplementaryGroups = mkIf cfg.postfix.enable [ postfixCfg.group ];
+
+        RuntimeDirectory = "rspamd";
+        RuntimeDirectoryMode = "0755";
+        StateDirectory = "rspamd";
+        StateDirectoryMode = "0700";
+
+        AmbientCapabilities = [];
+        CapabilityBoundingSet = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        # we need to chown socket to rspamd-milter
+        PrivateUsers = !cfg.postfix.enable;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "@system-service";
+        UMask = "0077";
+      };
+    };
+  };
+  imports = [
+    (mkRemovedOptionModule [ "services" "rspamd" "socketActivation" ]
+       "Socket activation never worked correctly and could at this time not be fixed and so was removed")
+    (mkRenamedOptionModule [ "services" "rspamd" "bindSocket" ] [ "services" "rspamd" "workers" "normal" "bindSockets" ])
+    (mkRenamedOptionModule [ "services" "rspamd" "bindUISocket" ] [ "services" "rspamd" "workers" "controller" "bindSockets" ])
+    (mkRemovedOptionModule [ "services" "rmilter" ] "Use services.rspamd.* instead to set up milter service")
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/rss2email.nix b/nixpkgs/nixos/modules/services/mail/rss2email.nix
new file mode 100644
index 000000000000..bd5cfd437838
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/rss2email.nix
@@ -0,0 +1,137 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rss2email;
+in {
+
+  ###### interface
+
+  options = {
+
+    services.rss2email = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable rss2email.";
+      };
+
+      to = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Mail address to which to send emails";
+      };
+
+      interval = mkOption {
+        type = types.str;
+        default = "12h";
+        description = lib.mdDoc "How often to check the feeds, in systemd interval format";
+      };
+
+      config = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool ]);
+        default = {};
+        description = lib.mdDoc ''
+          The configuration to give rss2email.
+
+          Default will use system-wide `sendmail` to send the
+          email. This is rss2email's default when running
+          `r2e new`.
+
+          This set contains key-value associations that will be set in the
+          `[DEFAULT]` block along with the
+          `to` parameter.
+
+          See `man r2e` for more information on which
+          parameters are accepted.
+        '';
+      };
+
+      feeds = mkOption {
+        description = lib.mdDoc "The feeds to watch.";
+        type = types.attrsOf (types.submodule {
+          options = {
+            url = mkOption {
+              type = types.str;
+              description = lib.mdDoc "The URL at which to fetch the feed.";
+            };
+
+            to = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                Email address to which to send feed items.
+
+                If `null`, this will not be set in the
+                configuration file, and rss2email will make it default to
+                `rss2email.to`.
+              '';
+            };
+          };
+        });
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.groups = {
+      rss2email.gid = config.ids.gids.rss2email;
+    };
+
+    users.users = {
+      rss2email = {
+        description = "rss2email user";
+        uid = config.ids.uids.rss2email;
+        group = "rss2email";
+      };
+    };
+
+    environment.systemPackages = with pkgs; [ rss2email ];
+
+    services.rss2email.config.to = cfg.to;
+
+    system.activationScripts.rss2email = lib.stringAfter [ "users" ] ''
+      if [ -e /var/rss2email -a ! -e /var/lib/rss2email ]; then
+          mv /var/rss2email /var/lib/rss2email
+      fi
+    '';
+
+    systemd.services.rss2email = let
+      conf = pkgs.writeText "rss2email.cfg" (lib.generators.toINI {} ({
+          DEFAULT = cfg.config;
+        } // lib.mapAttrs' (name: feed: nameValuePair "feed.${name}" (
+          { inherit (feed) url; } //
+          lib.optionalAttrs (feed.to != null) { inherit (feed) to; }
+        )) cfg.feeds
+      ));
+    in
+    {
+      preStart = ''
+        if [ ! -f /var/lib/rss2email/db.json ]; then
+          echo '{"version":2,"feeds":[]}' > /var/lib/rss2email/db.json
+        fi
+      '';
+      path = [ pkgs.system-sendmail ];
+      serviceConfig = {
+        StateDirectory = "rss2email";
+        ExecStart =
+          "${pkgs.rss2email}/bin/r2e -c ${conf} -d /var/lib/rss2email/db.json run";
+        User = "rss2email";
+      };
+    };
+
+    systemd.timers.rss2email = {
+      partOf = [ "rss2email.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnBootSec = "0";
+      timerConfig.OnUnitActiveSec = cfg.interval;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/schleuder.nix b/nixpkgs/nixos/modules/services/mail/schleuder.nix
new file mode 100644
index 000000000000..2991418dd804
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/schleuder.nix
@@ -0,0 +1,162 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.schleuder;
+  settingsFormat = pkgs.formats.yaml { };
+  postfixMap = entries: lib.concatStringsSep "\n" (lib.mapAttrsToList (name: value: "${name} ${value}") entries);
+  writePostfixMap = name: entries: pkgs.writeText name (postfixMap entries);
+  configScript = pkgs.writeScript "schleuder-cfg" ''
+    #!${pkgs.runtimeShell}
+    set -exuo pipefail
+    umask 0077
+    ${pkgs.yq}/bin/yq \
+      --slurpfile overrides <(${pkgs.yq}/bin/yq . <${lib.escapeShellArg cfg.extraSettingsFile}) \
+      < ${settingsFormat.generate "schleuder.yml" cfg.settings} \
+      '. * $overrides[0]' \
+      > /etc/schleuder/schleuder.yml
+    chown schleuder: /etc/schleuder/schleuder.yml
+  '';
+in
+{
+  options.services.schleuder = {
+    enable = lib.mkEnableOption (lib.mdDoc "Schleuder secure remailer");
+    enablePostfix = lib.mkEnableOption (lib.mdDoc "automatic postfix integration") // { default = true; };
+    lists = lib.mkOption {
+      description = lib.mdDoc ''
+        List of list addresses that should be handled by Schleuder.
+
+        Note that this is only handled by the postfix integration, and
+        the setup of the lists, their members and their keys has to be
+        performed separately via schleuder's API, using a tool such as
+        schleuder-cli.
+      '';
+      type = lib.types.listOf lib.types.str;
+      default = [ ];
+      example = [ "widget-team@example.com" "security@example.com" ];
+    };
+    /* maybe one day....
+      domains = lib.mkOption {
+      description = "Domains for which all mail should be handled by Schleuder.";
+      type = lib.types.listOf lib.types.str;
+      default = [];
+      example = ["securelists.example.com"];
+      };
+    */
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Settings for schleuder.yml.
+
+        Check the [example configuration](https://0xacab.org/schleuder/schleuder/blob/master/etc/schleuder.yml) for possible values.
+      '';
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+        options.keyserver = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc ''
+            Key server from which to fetch and update keys.
+
+            Note that NixOS uses a different default from upstream, since the upstream default sks-keyservers.net is deprecated.
+          '';
+          default = "keys.openpgp.org";
+        };
+      };
+      default = { };
+    };
+    extraSettingsFile = lib.mkOption {
+      description = lib.mdDoc "YAML file to merge into the schleuder config at runtime. This can be used for secrets such as API keys.";
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+    };
+    listDefaults = lib.mkOption {
+      description = lib.mdDoc ''
+        Default settings for lists (list-defaults.yml).
+
+        Check the [example configuration](https://0xacab.org/schleuder/schleuder/-/blob/master/etc/list-defaults.yml) for possible values.
+      '';
+      type = settingsFormat.type;
+      default = { };
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !(cfg.settings.api ? valid_api_keys);
+        message = ''
+          services.schleuder.settings.api.valid_api_keys is set. Defining API keys via NixOS config results in them being copied to the world-readable Nix store. Please use the extraSettingsFile option to store API keys in a non-public location.
+        '';
+      }
+      {
+        assertion = !(lib.any (db: db ? password) (lib.attrValues cfg.settings.database or {}));
+        message = ''
+          A password is defined for at least one database in services.schleuder.settings.database. Defining passwords via NixOS config results in them being copied to the world-readable Nix store. Please use the extraSettingsFile option to store database passwords in a non-public location.
+        '';
+      }
+    ];
+    users.users.schleuder.isSystemUser = true;
+    users.users.schleuder.group = "schleuder";
+    users.groups.schleuder = {};
+    environment.systemPackages = [
+      pkgs.schleuder-cli
+    ];
+    services.postfix = lib.mkIf cfg.enablePostfix {
+      extraMasterConf = ''
+        schleuder  unix  -       n       n       -       -       pipe
+          flags=DRhu user=schleuder argv=/${pkgs.schleuder}/bin/schleuder work ''${recipient}
+      '';
+      transport = lib.mkIf (cfg.lists != [ ]) (postfixMap (lib.genAttrs cfg.lists (_: "schleuder:")));
+      extraConfig = ''
+        schleuder_destination_recipient_limit = 1
+      '';
+      # review: does this make sense?
+      localRecipients = lib.mkIf (cfg.lists != [ ]) cfg.lists;
+    };
+    systemd.services = let commonServiceConfig = {
+      # We would have liked to use DynamicUser, but since the default
+      # database is SQLite and lives in StateDirectory, and that same
+      # database needs to be readable from the postfix service, this
+      # isn't trivial to do.
+      User = "schleuder";
+      StateDirectory = "schleuder";
+      StateDirectoryMode = "0700";
+    }; in
+      {
+        schleuder-init = {
+          serviceConfig = commonServiceConfig // {
+            ExecStartPre = lib.mkIf (cfg.extraSettingsFile != null) [
+              "+${configScript}"
+            ];
+            ExecStart = [ "${pkgs.schleuder}/bin/schleuder install" ];
+            Type = "oneshot";
+          };
+        };
+        schleuder-api-daemon = {
+          after = [ "local-fs.target" "network.target" "schleuder-init.service" ];
+          wantedBy = [ "multi-user.target" ];
+          requires = [ "schleuder-init.service" ];
+          serviceConfig = commonServiceConfig // {
+            ExecStart = [ "${pkgs.schleuder}/bin/schleuder-api-daemon" ];
+          };
+        };
+        schleuder-weekly-key-maintenance = {
+          after = [ "local-fs.target" "network.target" ];
+          startAt = "weekly";
+          serviceConfig = commonServiceConfig // {
+            ExecStart = [
+              "${pkgs.schleuder}/bin/schleuder refresh_keys"
+              "${pkgs.schleuder}/bin/schleuder check_keys"
+            ];
+          };
+        };
+      };
+
+    environment.etc."schleuder/schleuder.yml" = lib.mkIf (cfg.extraSettingsFile == null) {
+      source = settingsFormat.generate "schleuder.yml" cfg.settings;
+    };
+    environment.etc."schleuder/list-defaults.yml".source = settingsFormat.generate "list-defaults.yml" cfg.listDefaults;
+
+    services.schleuder = {
+      #lists_dir = "/var/lib/schleuder.lists";
+      settings.filters_dir = lib.mkDefault "/var/lib/schleuder/filters";
+      settings.keyword_handlers_dir = lib.mkDefault "/var/lib/schleuder/keyword_handlers";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/spamassassin.nix b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
new file mode 100644
index 000000000000..072172e31451
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/spamassassin.nix
@@ -0,0 +1,194 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.spamassassin;
+  spamassassin-local-cf = pkgs.writeText "local.cf" cfg.config;
+
+in
+
+{
+  options = {
+
+    services.spamassassin = {
+      enable = mkEnableOption (lib.mdDoc "the SpamAssassin daemon");
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the SpamAssassin daemon in debug mode";
+      };
+
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          The SpamAssassin local.cf config
+
+          If you are using this configuration:
+
+              add_header all Status _YESNO_, score=_SCORE_ required=_REQD_ tests=_TESTS_ autolearn=_AUTOLEARN_ version=_VERSION_
+
+          Then you can Use this sieve filter:
+
+              require ["fileinto", "reject", "envelope"];
+
+              if header :contains "X-Spam-Flag" "YES" {
+                fileinto "spam";
+              }
+
+          Or this procmail filter:
+
+              :0:
+              * ^X-Spam-Flag: YES
+              /var/vpopmail/domains/lastlog.de/js/.maildir/.spam/new
+
+          To filter your messages based on the additional mail headers added by spamassassin.
+        '';
+        example = ''
+          #rewrite_header Subject [***** SPAM _SCORE_ *****]
+          required_score          5.0
+          use_bayes               1
+          bayes_auto_learn        1
+          add_header all Status _YESNO_, score=_SCORE_ required=_REQD_ tests=_TESTS_ autolearn=_AUTOLEARN_ version=_VERSION_
+        '';
+        default = "";
+      };
+
+      initPreConf = mkOption {
+        type = with types; either str path;
+        description = lib.mdDoc "The SpamAssassin init.pre config.";
+        apply = val: if builtins.isPath val then val else pkgs.writeText "init.pre" val;
+        default =
+        ''
+          #
+          # to update this list, run this command in the rules directory:
+          # grep 'loadplugin.*Mail::SpamAssassin::Plugin::.*' -o -h * | sort | uniq
+          #
+
+          #loadplugin Mail::SpamAssassin::Plugin::AccessDB
+          #loadplugin Mail::SpamAssassin::Plugin::AntiVirus
+          loadplugin Mail::SpamAssassin::Plugin::AskDNS
+          # loadplugin Mail::SpamAssassin::Plugin::ASN
+          loadplugin Mail::SpamAssassin::Plugin::AutoLearnThreshold
+          #loadplugin Mail::SpamAssassin::Plugin::AWL
+          loadplugin Mail::SpamAssassin::Plugin::Bayes
+          loadplugin Mail::SpamAssassin::Plugin::BodyEval
+          loadplugin Mail::SpamAssassin::Plugin::Check
+          #loadplugin Mail::SpamAssassin::Plugin::DCC
+          loadplugin Mail::SpamAssassin::Plugin::DKIM
+          loadplugin Mail::SpamAssassin::Plugin::DMARC
+          loadplugin Mail::SpamAssassin::Plugin::DNSEval
+          loadplugin Mail::SpamAssassin::Plugin::FreeMail
+          loadplugin Mail::SpamAssassin::Plugin::HeaderEval
+          loadplugin Mail::SpamAssassin::Plugin::HTMLEval
+          loadplugin Mail::SpamAssassin::Plugin::HTTPSMismatch
+          loadplugin Mail::SpamAssassin::Plugin::ImageInfo
+          loadplugin Mail::SpamAssassin::Plugin::MIMEEval
+          loadplugin Mail::SpamAssassin::Plugin::MIMEHeader
+          # loadplugin Mail::SpamAssassin::Plugin::PDFInfo
+          #loadplugin Mail::SpamAssassin::Plugin::PhishTag
+          loadplugin Mail::SpamAssassin::Plugin::Pyzor
+          loadplugin Mail::SpamAssassin::Plugin::Razor2
+          # loadplugin Mail::SpamAssassin::Plugin::RelayCountry
+          loadplugin Mail::SpamAssassin::Plugin::RelayEval
+          loadplugin Mail::SpamAssassin::Plugin::ReplaceTags
+          # loadplugin Mail::SpamAssassin::Plugin::Rule2XSBody
+          # loadplugin Mail::SpamAssassin::Plugin::Shortcircuit
+          loadplugin Mail::SpamAssassin::Plugin::SpamCop
+          loadplugin Mail::SpamAssassin::Plugin::SPF
+          #loadplugin Mail::SpamAssassin::Plugin::TextCat
+          # loadplugin Mail::SpamAssassin::Plugin::TxRep
+          loadplugin Mail::SpamAssassin::Plugin::URIDetail
+          loadplugin Mail::SpamAssassin::Plugin::URIDNSBL
+          loadplugin Mail::SpamAssassin::Plugin::URIEval
+          # loadplugin Mail::SpamAssassin::Plugin::URILocalBL
+          loadplugin Mail::SpamAssassin::Plugin::VBounce
+          loadplugin Mail::SpamAssassin::Plugin::WhiteListSubject
+          loadplugin Mail::SpamAssassin::Plugin::WLBLEval
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."mail/spamassassin/init.pre".source = cfg.initPreConf;
+    environment.etc."mail/spamassassin/local.cf".source = spamassassin-local-cf;
+
+    # Allow users to run 'spamc'.
+    environment.systemPackages = [ pkgs.spamassassin ];
+
+    users.users.spamd = {
+      description = "Spam Assassin Daemon";
+      uid = config.ids.uids.spamd;
+      group = "spamd";
+    };
+
+    users.groups.spamd = {
+      gid = config.ids.gids.spamd;
+    };
+
+    systemd.services.sa-update = {
+      # Needs to be able to contact the update server.
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "spamd";
+        Group = "spamd";
+        StateDirectory = "spamassassin";
+        ExecStartPost = "+${config.systemd.package}/bin/systemctl -q --no-block try-reload-or-restart spamd.service";
+      };
+
+      script = ''
+        set +e
+        ${pkgs.spamassassin}/bin/sa-update --verbose --gpghomedir=/var/lib/spamassassin/sa-update-keys/
+        rc=$?
+        set -e
+
+        if [[ $rc -gt 1 ]]; then
+          # sa-update failed.
+          exit $rc
+        fi
+
+        if [[ $rc -eq 1 ]]; then
+          # No update was available, exit successfully.
+          exit 0
+        fi
+
+        # An update was available and installed. Compile the rules.
+        ${pkgs.spamassassin}/bin/sa-compile
+      '';
+    };
+
+    systemd.timers.sa-update = {
+      description = "sa-update-service";
+      partOf      = [ "sa-update.service" ];
+      wantedBy    = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "1:*";
+        Persistent = true;
+      };
+    };
+
+    systemd.services.spamd = {
+      description = "SpamAssassin Server";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "sa-update.service" ];
+      after = [
+        "network.target"
+        "sa-update.service"
+      ];
+
+      serviceConfig = {
+        User = "spamd";
+        Group = "spamd";
+        ExecStart = "+${pkgs.spamassassin}/bin/spamd ${optionalString cfg.debug "-D"} --username=spamd --groupname=spamd --virtual-config-dir=%S/spamassassin/user-%u --allow-tell --pidfile=/run/spamd.pid";
+        ExecReload = "+${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        StateDirectory = "spamassassin";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/stalwart-mail.nix b/nixpkgs/nixos/modules/services/mail/stalwart-mail.nix
new file mode 100644
index 000000000000..eb87d9f6f695
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/stalwart-mail.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.stalwart-mail;
+  configFormat = pkgs.formats.toml { };
+  configFile = configFormat.generate "stalwart-mail.toml" cfg.settings;
+  dataDir = "/var/lib/stalwart-mail";
+
+in {
+  options.services.stalwart-mail = {
+    enable = mkEnableOption (mdDoc "the Stalwart all-in-one email server");
+    package = mkPackageOptionMD pkgs "stalwart-mail" { };
+
+    settings = mkOption {
+      inherit (configFormat) type;
+      default = { };
+      description = mdDoc ''
+        Configuration options for the Stalwart email server.
+        See <https://stalw.art/docs/category/configuration> for available options.
+
+        By default, the module is configured to store everything locally.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Default config: all local
+    services.stalwart-mail.settings = {
+      global.tracing.method = mkDefault "stdout";
+      global.tracing.level = mkDefault "info";
+      queue.path = mkDefault "${dataDir}/queue";
+      report.path = mkDefault "${dataDir}/reports";
+      store.db.path = mkDefault "${dataDir}/data/index.sqlite3";
+      store.blob.type = mkDefault "local";
+      store.blob.local.path = mkDefault "${dataDir}/data/blobs";
+      resolver.type = mkDefault "system";
+    };
+
+    systemd.services.stalwart-mail = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "local-fs.target" "network.target" ];
+
+      preStart = ''
+        mkdir -p ${dataDir}/{queue,reports,data/blobs}
+      '';
+
+      serviceConfig = {
+        ExecStart =
+          "${cfg.package}/bin/stalwart-mail --config=${configFile}";
+
+        # Base from template resources/systemd/stalwart-mail.service
+        Type = "simple";
+        LimitNOFILE = 65536;
+        KillMode = "process";
+        KillSignal = "SIGINT";
+        Restart = "on-failure";
+        RestartSec = 5;
+        StandardOutput = "syslog";
+        StandardError = "syslog";
+        SyslogIdentifier = "stalwart-mail";
+
+        DynamicUser = true;
+        User = "stalwart-mail";
+        StateDirectory = "stalwart-mail";
+
+        # Bind standard privileged ports
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+
+        # Hardening
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = false;  # incompatible with CAP_NET_BIND_SERVICE
+        ProcSubset = "pid";
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+
+    # Make admin commands available in the shell
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with maintainers; [ happysalada pacien ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/mail/sympa.nix b/nixpkgs/nixos/modules/services/mail/sympa.nix
new file mode 100644
index 000000000000..04ae46f66eea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/sympa.nix
@@ -0,0 +1,588 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sympa;
+  dataDir = "/var/lib/sympa";
+  user = "sympa";
+  group = "sympa";
+  pkg = pkgs.sympa;
+  fqdns = attrNames cfg.domains;
+  usingNginx = cfg.web.enable && cfg.web.server == "nginx";
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "MySQL";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "PostgreSQL";
+
+  sympaSubServices = [
+    "sympa-archive.service"
+    "sympa-bounce.service"
+    "sympa-bulk.service"
+    "sympa-task.service"
+  ];
+
+  # common for all services including wwsympa
+  commonServiceConfig = {
+    StateDirectory = "sympa";
+    ProtectHome = true;
+    ProtectSystem = "full";
+    ProtectControlGroups = true;
+  };
+
+  # wwsympa has its own service config
+  sympaServiceConfig = srv: {
+    Type = "simple";
+    Restart = "always";
+    ExecStart = "${pkg}/bin/${srv}.pl --foreground";
+    PIDFile = "/run/sympa/${srv}.pid";
+    User = user;
+    Group = group;
+
+    # avoid duplicating log messageges in journal
+    StandardError = "null";
+  } // commonServiceConfig;
+
+  configVal = value:
+    if isBool value then
+      if value then "on" else "off"
+    else toString value;
+  configGenerator = c: concatStrings (flip mapAttrsToList c (key: val: "${key}\t${configVal val}\n"));
+
+  mainConfig = pkgs.writeText "sympa.conf" (configGenerator cfg.settings);
+  robotConfig = fqdn: domain: pkgs.writeText "${fqdn}-robot.conf" (configGenerator domain.settings);
+
+  transport = pkgs.writeText "transport.sympa" (concatStringsSep "\n" (flip map fqdns (domain: ''
+    ${domain}                        error:User unknown in recipient table
+    sympa@${domain}                  sympa:sympa@${domain}
+    listmaster@${domain}             sympa:listmaster@${domain}
+    bounce@${domain}                 sympabounce:sympa@${domain}
+    abuse-feedback-report@${domain}  sympabounce:sympa@${domain}
+  '')));
+
+  virtual = pkgs.writeText "virtual.sympa" (concatStringsSep "\n" (flip map fqdns (domain: ''
+    sympa-request@${domain}  postmaster@localhost
+    sympa-owner@${domain}    postmaster@localhost
+  '')));
+
+  listAliases = pkgs.writeText "list_aliases.tt2" ''
+    #--- [% list.name %]@[% list.domain %]: list transport map created at [% date %]
+    [% list.name %]@[% list.domain %] sympa:[% list.name %]@[% list.domain %]
+    [% list.name %]-request@[% list.domain %] sympa:[% list.name %]-request@[% list.domain %]
+    [% list.name %]-editor@[% list.domain %] sympa:[% list.name %]-editor@[% list.domain %]
+    #[% list.name %]-subscribe@[% list.domain %] sympa:[% list.name %]-subscribe@[%list.domain %]
+    [% list.name %]-unsubscribe@[% list.domain %] sympa:[% list.name %]-unsubscribe@[% list.domain %]
+    [% list.name %][% return_path_suffix %]@[% list.domain %] sympabounce:[% list.name %]@[% list.domain %]
+  '';
+
+  enabledFiles = filterAttrs (n: v: v.enable) cfg.settingsFile;
+in
+{
+
+  ###### interface
+  options.services.sympa = with types; {
+
+    enable = mkEnableOption (lib.mdDoc "Sympa mailing list manager");
+
+    lang = mkOption {
+      type = str;
+      default = "en_US";
+      example = "cs";
+      description = lib.mdDoc ''
+        Default Sympa language.
+        See <https://github.com/sympa-community/sympa/tree/sympa-6.2/po/sympa>
+        for available options.
+      '';
+    };
+
+    listMasters = mkOption {
+      type = listOf str;
+      example = [ "postmaster@sympa.example.org" ];
+      description = lib.mdDoc ''
+        The list of the email addresses of the listmasters
+        (users authorized to perform global server commands).
+      '';
+    };
+
+    mainDomain = mkOption {
+      type = nullOr str;
+      default = null;
+      example = "lists.example.org";
+      description = lib.mdDoc ''
+        Main domain to be used in {file}`sympa.conf`.
+        If `null`, one of the {option}`services.sympa.domains` is chosen for you.
+      '';
+    };
+
+    domains = mkOption {
+      type = attrsOf (submodule ({ name, config, ... }: {
+        options = {
+          webHost = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "archive.example.org";
+            description = lib.mdDoc ''
+              Domain part of the web interface URL (no web interface for this domain if `null`).
+              DNS record of type A (or AAAA or CNAME) has to exist with this value.
+            '';
+          };
+          webLocation = mkOption {
+            type = str;
+            default = "/";
+            example = "/sympa";
+            description = lib.mdDoc "URL path part of the web interface.";
+          };
+          settings = mkOption {
+            type = attrsOf (oneOf [ str int bool ]);
+            default = {};
+            example = {
+              default_max_list_members = 3;
+            };
+            description = lib.mdDoc ''
+              The {file}`robot.conf` configuration file as key value set.
+              See <https://sympa-community.github.io/gpldoc/man/sympa.conf.5.html>
+              for list of configuration parameters.
+            '';
+          };
+        };
+
+        config.settings = mkIf (cfg.web.enable && config.webHost != null) {
+          wwsympa_url = mkDefault "https://${config.webHost}${strings.removeSuffix "/" config.webLocation}";
+        };
+      }));
+
+      description = lib.mdDoc ''
+        Email domains handled by this instance. There have
+        to be MX records for keys of this attribute set.
+      '';
+      example = literalExpression ''
+        {
+          "lists.example.org" = {
+            webHost = "lists.example.org";
+            webLocation = "/";
+          };
+          "sympa.example.com" = {
+            webHost = "example.com";
+            webLocation = "/sympa";
+          };
+        }
+      '';
+    };
+
+    database = {
+      type = mkOption {
+        type = enum [ "SQLite" "PostgreSQL" "MySQL" ];
+        default = "SQLite";
+        example = "MySQL";
+        description = lib.mdDoc "Database engine to use.";
+      };
+
+      host = mkOption {
+        type = nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Database host address.
+
+          For MySQL, use `localhost` to connect using Unix domain socket.
+
+          For PostgreSQL, use path to directory (e.g. {file}`/run/postgresql`)
+          to connect using Unix domain socket located in this directory.
+
+          Use `null` to fall back on Sympa default, or when using
+          {option}`services.sympa.database.createLocally`.
+        '';
+      };
+
+      port = mkOption {
+        type = nullOr port;
+        default = null;
+        description = lib.mdDoc "Database port. Use `null` for default port.";
+      };
+
+      name = mkOption {
+        type = str;
+        default = if cfg.database.type == "SQLite" then "${dataDir}/sympa.sqlite" else "sympa";
+        defaultText = literalExpression ''if database.type == "SQLite" then "${dataDir}/sympa.sqlite" else "sympa"'';
+        description = lib.mdDoc ''
+          Database name. When using SQLite this must be an absolute
+          path to the database file.
+        '';
+      };
+
+      user = mkOption {
+        type = nullOr str;
+        default = user;
+        description = lib.mdDoc "Database user. The system user name is used as a default.";
+      };
+
+      passwordFile = mkOption {
+        type = nullOr path;
+        default = null;
+        example = "/run/keys/sympa-dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password for {option}`services.sympa.database.name`.
+        '';
+      };
+
+      createLocally = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Whether to create a local database automatically.";
+      };
+    };
+
+    web = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable Sympa web interface.";
+      };
+
+      server = mkOption {
+        type = enum [ "nginx" "none" ];
+        default = "nginx";
+        description = lib.mdDoc ''
+          The webserver used for the Sympa web interface. Set it to `none` if you want to configure it yourself.
+          Further nginx configuration can be done by adapting
+          {option}`services.nginx.virtualHosts.«name»`.
+        '';
+      };
+
+      https = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to use HTTPS. When nginx integration is enabled, this option forces SSL and enables ACME.
+          Please note that Sympa web interface always uses https links even when this option is disabled.
+        '';
+      };
+
+      fcgiProcs = mkOption {
+        type = ints.positive;
+        default = 2;
+        description = lib.mdDoc "Number of FastCGI processes to fork.";
+      };
+    };
+
+    mta = {
+      type = mkOption {
+        type = enum [ "postfix" "none" ];
+        default = "postfix";
+        description = lib.mdDoc ''
+          Mail transfer agent (MTA) integration. Use `none` if you want to configure it yourself.
+
+          The `postfix` integration sets up local Postfix instance that will pass incoming
+          messages from configured domains to Sympa. You still need to configure at least outgoing message
+          handling using e.g. {option}`services.postfix.relayHost`.
+        '';
+      };
+    };
+
+    settings = mkOption {
+      type = attrsOf (oneOf [ str int bool ]);
+      default = {};
+      example = literalExpression ''
+        {
+          default_home = "lists";
+          viewlogs_page_size = 50;
+        }
+      '';
+      description = lib.mdDoc ''
+        The {file}`sympa.conf` configuration file as key value set.
+        See <https://sympa-community.github.io/gpldoc/man/sympa.conf.5.html>
+        for list of configuration parameters.
+      '';
+    };
+
+    settingsFile = mkOption {
+      type = attrsOf (submodule ({ name, config, ... }: {
+        options = {
+          enable = mkOption {
+            type = bool;
+            default = true;
+            description = lib.mdDoc "Whether this file should be generated. This option allows specific files to be disabled.";
+          };
+          text = mkOption {
+            default = null;
+            type = nullOr lines;
+            description = lib.mdDoc "Text of the file.";
+          };
+          source = mkOption {
+            type = path;
+            description = lib.mdDoc "Path of the source file.";
+          };
+        };
+
+        config.source = mkIf (config.text != null) (mkDefault (pkgs.writeText "sympa-${baseNameOf name}" config.text));
+      }));
+      default = {};
+      example = literalExpression ''
+        {
+          "list_data/lists.example.org/help" = {
+            text = "subject This list provides help to users";
+          };
+        }
+      '';
+      description = lib.mdDoc "Set of files to be linked in {file}`${dataDir}`.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.sympa.settings = (mapAttrs (_: v: mkDefault v) {
+      domain     = if cfg.mainDomain != null then cfg.mainDomain else head fqdns;
+      listmaster = concatStringsSep "," cfg.listMasters;
+      lang       = cfg.lang;
+
+      home        = "${dataDir}/list_data";
+      arc_path    = "${dataDir}/arc";
+      bounce_path = "${dataDir}/bounce";
+
+      sendmail = "${pkgs.system-sendmail}/bin/sendmail";
+
+      db_type = cfg.database.type;
+      db_name = cfg.database.name;
+      db_user = cfg.database.name;
+    }
+    // (optionalAttrs (cfg.database.host != null) {
+      db_host = cfg.database.host;
+    })
+    // (optionalAttrs mysqlLocal {
+      db_host = "localhost"; # use unix domain socket
+    })
+    // (optionalAttrs pgsqlLocal {
+      db_host = "/run/postgresql"; # use unix domain socket
+    })
+    // (optionalAttrs (cfg.database.port != null) {
+      db_port = cfg.database.port;
+    })
+    // (optionalAttrs (cfg.mta.type == "postfix") {
+      sendmail_aliases = "${dataDir}/sympa_transport";
+      aliases_program  = "${pkgs.postfix}/bin/postmap";
+      aliases_db_type  = "hash";
+    })
+    // (optionalAttrs cfg.web.enable {
+      static_content_path = "${dataDir}/static_content";
+      css_path            = "${dataDir}/static_content/css";
+      pictures_path       = "${dataDir}/static_content/pictures";
+      mhonarc             = "${pkgs.perlPackages.MHonArc}/bin/mhonarc";
+    }));
+
+    services.sympa.settingsFile = {
+      "virtual.sympa"        = mkDefault { source = virtual; };
+      "transport.sympa"      = mkDefault { source = transport; };
+      "etc/list_aliases.tt2" = mkDefault { source = listAliases; };
+    }
+    // (flip mapAttrs' cfg.domains (fqdn: domain:
+          nameValuePair "etc/${fqdn}/robot.conf" (mkDefault { source = robotConfig fqdn domain; })));
+
+    environment = {
+      systemPackages = [ pkg ];
+    };
+
+    users.users.${user} = {
+      description = "Sympa mailing list manager user";
+      group = group;
+      home = dataDir;
+      createHome = false;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = {};
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
+        message = "services.sympa.database.user must be set to ${user} if services.sympa.database.createLocally is set to true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.sympa.database.createLocally is set to true";
+      }
+    ];
+
+    systemd.tmpfiles.rules = [
+      "d  ${dataDir}                   0711 ${user} ${group} - -"
+      "d  ${dataDir}/etc               0700 ${user} ${group} - -"
+      "d  ${dataDir}/spool             0700 ${user} ${group} - -"
+      "d  ${dataDir}/list_data         0700 ${user} ${group} - -"
+      "d  ${dataDir}/arc               0700 ${user} ${group} - -"
+      "d  ${dataDir}/bounce            0700 ${user} ${group} - -"
+      "f  ${dataDir}/sympa_transport   0600 ${user} ${group} - -"
+
+      # force-copy static_content so it's up to date with package
+      # set permissions for wwsympa which needs write access (...)
+      "R  ${dataDir}/static_content    -    -       -        - -"
+      "C  ${dataDir}/static_content    0711 ${user} ${group} - ${pkg}/var/lib/sympa/static_content"
+      "e  ${dataDir}/static_content/*  0711 ${user} ${group} - -"
+
+      "d  /run/sympa                   0755 ${user} ${group} - -"
+    ]
+    ++ (flip concatMap fqdns (fqdn: [
+      "d  ${dataDir}/etc/${fqdn}       0700 ${user} ${group} - -"
+      "d  ${dataDir}/list_data/${fqdn} 0700 ${user} ${group} - -"
+    ]))
+    #++ (flip mapAttrsToList enabledFiles (k: v:
+    #  "L+ ${dataDir}/${k}              -    -       -        - ${v.source}"
+    #))
+    ++ (concatLists (flip mapAttrsToList enabledFiles (k: v: [
+      # sympa doesn't handle symlinks well (e.g. fails to create locks)
+      # force-copy instead
+      "R ${dataDir}/${k}              -    -       -        - -"
+      "C ${dataDir}/${k}              0700 ${user}  ${group} - ${v.source}"
+    ])));
+
+    systemd.services.sympa = {
+      description = "Sympa mailing list manager";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = sympaSubServices;
+      before = sympaSubServices;
+      serviceConfig = sympaServiceConfig "sympa_msg";
+
+      preStart = ''
+        umask 0077
+
+        cp -f ${mainConfig} ${dataDir}/etc/sympa.conf
+        ${optionalString (cfg.database.passwordFile != null) ''
+          chmod u+w ${dataDir}/etc/sympa.conf
+          echo -n "db_passwd " >> ${dataDir}/etc/sympa.conf
+          cat ${cfg.database.passwordFile} >> ${dataDir}/etc/sympa.conf
+        ''}
+
+        ${optionalString (cfg.mta.type == "postfix") ''
+          ${pkgs.postfix}/bin/postmap hash:${dataDir}/virtual.sympa
+          ${pkgs.postfix}/bin/postmap hash:${dataDir}/transport.sympa
+        ''}
+        ${pkg}/bin/sympa_newaliases.pl
+        ${pkg}/bin/sympa.pl --health_check
+      '';
+    };
+    systemd.services.sympa-archive = {
+      description = "Sympa mailing list manager (archiving)";
+      bindsTo = [ "sympa.service" ];
+      serviceConfig = sympaServiceConfig "archived";
+    };
+    systemd.services.sympa-bounce = {
+      description = "Sympa mailing list manager (bounce processing)";
+      bindsTo = [ "sympa.service" ];
+      serviceConfig = sympaServiceConfig "bounced";
+    };
+    systemd.services.sympa-bulk = {
+      description = "Sympa mailing list manager (message distribution)";
+      bindsTo = [ "sympa.service" ];
+      serviceConfig = sympaServiceConfig "bulk";
+    };
+    systemd.services.sympa-task = {
+      description = "Sympa mailing list manager (task management)";
+      bindsTo = [ "sympa.service" ];
+      serviceConfig = sympaServiceConfig "task_manager";
+    };
+
+    systemd.services.wwsympa = mkIf usingNginx {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "sympa.service" ];
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/sympa/wwsympa.pid";
+        Restart = "always";
+        ExecStart = ''${pkgs.spawn_fcgi}/bin/spawn-fcgi \
+          -u ${user} \
+          -g ${group} \
+          -U nginx \
+          -M 0600 \
+          -F ${toString cfg.web.fcgiProcs} \
+          -P /run/sympa/wwsympa.pid \
+          -s /run/sympa/wwsympa.socket \
+          -- ${pkg}/lib/sympa/cgi/wwsympa.fcgi
+        '';
+
+      } // commonServiceConfig;
+    };
+
+    services.nginx.enable = mkIf usingNginx true;
+    services.nginx.virtualHosts = mkIf usingNginx (let
+      vHosts = unique (remove null (mapAttrsToList (_k: v: v.webHost) cfg.domains));
+      hostLocations = host: map (v: v.webLocation) (filter (v: v.webHost == host) (attrValues cfg.domains));
+      httpsOpts = optionalAttrs cfg.web.https { forceSSL = mkDefault true; enableACME = mkDefault true; };
+    in
+    genAttrs vHosts (host: {
+      locations = genAttrs (hostLocations host) (loc: {
+        extraConfig = ''
+          include ${config.services.nginx.package}/conf/fastcgi_params;
+
+          fastcgi_pass unix:/run/sympa/wwsympa.socket;
+        '';
+      }) // {
+        "/static-sympa/".alias = "${dataDir}/static_content/";
+      };
+    } // httpsOpts));
+
+    services.postfix = mkIf (cfg.mta.type == "postfix") {
+      enable = true;
+      recipientDelimiter = "+";
+      config = {
+        virtual_alias_maps = [ "hash:${dataDir}/virtual.sympa" ];
+        virtual_mailbox_maps = [
+          "hash:${dataDir}/transport.sympa"
+          "hash:${dataDir}/sympa_transport"
+          "hash:${dataDir}/virtual.sympa"
+        ];
+        virtual_mailbox_domains = [ "hash:${dataDir}/transport.sympa" ];
+        transport_maps = [
+          "hash:${dataDir}/transport.sympa"
+          "hash:${dataDir}/sympa_transport"
+        ];
+      };
+      masterConfig = {
+        "sympa" = {
+          type = "unix";
+          privileged = true;
+          chroot = false;
+          command = "pipe";
+          args = [
+            "flags=hqRu"
+            "user=${user}"
+            "argv=${pkg}/libexec/queue"
+            "\${nexthop}"
+          ];
+        };
+        "sympabounce" = {
+          type = "unix";
+          privileged = true;
+          chroot = false;
+          command = "pipe";
+          args = [
+            "flags=hqRu"
+            "user=${user}"
+            "argv=${pkg}/libexec/bouncequeue"
+            "\${nexthop}"
+          ];
+        };
+      };
+    };
+
+    services.mysql = optionalAttrs mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.postgresql = optionalAttrs pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ mmilata sorki ];
+}
diff --git a/nixpkgs/nixos/modules/services/mail/zeyple.nix b/nixpkgs/nixos/modules/services/mail/zeyple.nix
new file mode 100644
index 000000000000..e7f9ddd92dc2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/mail/zeyple.nix
@@ -0,0 +1,125 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.zeyple;
+  ini = pkgs.formats.ini { };
+
+  gpgHome = pkgs.runCommand "zeyple-gpg-home" { } ''
+    mkdir -p $out
+    for file in ${lib.concatStringsSep " " cfg.keys}; do
+      ${config.programs.gnupg.package}/bin/gpg --homedir="$out" --import "$file"
+    done
+
+    # Remove socket files
+    rm -f $out/S.*
+  '';
+in {
+  options.services.zeyple = {
+    enable = mkEnableOption (lib.mdDoc "Zeyple, an utility program to automatically encrypt outgoing emails with GPG");
+
+    user = mkOption {
+      type = types.str;
+      default = "zeyple";
+      description = lib.mdDoc ''
+        User to run Zeyple as.
+
+        ::: {.note}
+        If left as the default value this user will automatically be created
+        on system activation, otherwise the sysadmin is responsible for
+        ensuring the user exists.
+        :::
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "zeyple";
+      description = lib.mdDoc ''
+        Group to use to run Zeyple.
+
+        ::: {.note}
+        If left as the default value this group will automatically be created
+        on system activation, otherwise the sysadmin is responsible for
+        ensuring the user exists.
+        :::
+      '';
+    };
+
+    settings = mkOption {
+      type = ini.type;
+      default = { };
+      description = lib.mdDoc ''
+        Zeyple configuration. refer to
+        <https://github.com/infertux/zeyple/blob/master/zeyple/zeyple.conf.example>
+        for details on supported values.
+      '';
+    };
+
+    keys = mkOption {
+      type = with types; listOf path;
+      description = lib.mdDoc "List of public key files that will be imported by gpg.";
+    };
+
+    rotateLogs = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Whether to enable rotation of log files.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "zeyple") { "${cfg.group}" = { }; };
+    users.users = optionalAttrs (cfg.user == "zeyple") {
+      "${cfg.user}" = {
+        isSystemUser = true;
+        group = cfg.group;
+      };
+    };
+
+    services.zeyple.settings = {
+      zeyple = mapAttrs (name: mkDefault) {
+        log_file = "/var/log/zeyple/zeyple.log";
+        force_encrypt = true;
+      };
+
+      gpg = mapAttrs (name: mkDefault) { home = "${gpgHome}"; };
+
+      relay = mapAttrs (name: mkDefault) {
+        host = "localhost";
+        port = 10026;
+      };
+    };
+
+    environment.etc."zeyple.conf".source = ini.generate "zeyple.conf" cfg.settings;
+
+    systemd.tmpfiles.rules = [ "f '${cfg.settings.zeyple.log_file}' 0600 ${cfg.user} ${cfg.group} - -" ];
+    services.logrotate = mkIf cfg.rotateLogs {
+      enable = true;
+      settings.zeyple = {
+        files = cfg.settings.zeyple.log_file;
+        frequency = "weekly";
+        rotate = 5;
+        compress = true;
+        copytruncate = true;
+      };
+    };
+
+    services.postfix.extraMasterConf = ''
+      zeyple    unix  -       n       n       -       -       pipe
+        user=${cfg.user} argv=${pkgs.zeyple}/bin/zeyple ''${recipient}
+
+      localhost:${toString cfg.settings.relay.port} inet  n       -       n       -       10      smtpd
+        -o content_filter=
+        -o receive_override_options=no_unknown_recipient_checks,no_header_body_checks,no_milters
+        -o smtpd_helo_restrictions=
+        -o smtpd_client_restrictions=
+        -o smtpd_sender_restrictions=
+        -o smtpd_recipient_restrictions=permit_mynetworks,reject
+        -o mynetworks=127.0.0.0/8,[::1]/128
+        -o smtpd_authorized_xforward_hosts=127.0.0.0/8,[::1]/128
+    '';
+
+    services.postfix.extraConfig = "content_filter = zeyple";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/appservice-discord.nix b/nixpkgs/nixos/modules/services/matrix/appservice-discord.nix
new file mode 100644
index 000000000000..6ce8718c35d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/appservice-discord.nix
@@ -0,0 +1,162 @@
+{ config, options, pkgs, lib, ... }:
+
+with lib;
+
+let
+  dataDir = "/var/lib/matrix-appservice-discord";
+  registrationFile = "${dataDir}/discord-registration.yaml";
+  cfg = config.services.matrix-appservice-discord;
+  opt = options.services.matrix-appservice-discord;
+  # TODO: switch to configGen.json once RFC42 is implemented
+  settingsFile = pkgs.writeText "matrix-appservice-discord-settings.json" (builtins.toJSON cfg.settings);
+
+in {
+  options = {
+    services.matrix-appservice-discord = {
+      enable = mkEnableOption (lib.mdDoc "a bridge between Matrix and Discord");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.matrix-appservice-discord;
+        defaultText = literalExpression "pkgs.matrix-appservice-discord";
+        description = lib.mdDoc ''
+          Which package of matrix-appservice-discord to use.
+        '';
+      };
+
+      settings = mkOption rec {
+        # TODO: switch to types.config.json as prescribed by RFC42 once it's implemented
+        type = types.attrs;
+        apply = recursiveUpdate default;
+        default = {
+          database = {
+            filename = "${dataDir}/discord.db";
+          };
+
+          # empty values necessary for registration file generation
+          # actual values defined in environmentFile
+          auth = {
+            clientID = "";
+            botToken = "";
+          };
+        };
+        example = literalExpression ''
+          {
+            bridge = {
+              domain = "public-domain.tld";
+              homeserverUrl = "http://public-domain.tld:8008";
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          {file}`config.yaml` configuration as a Nix attribute set.
+
+          Configuration options should match those described in
+          [config.sample.yaml](https://github.com/Half-Shot/matrix-appservice-discord/blob/master/config/config.sample.yaml).
+
+          {option}`config.bridge.domain` and {option}`config.bridge.homeserverUrl`
+          should be set to match the public host name of the Matrix homeserver for webhooks and avatars to work.
+
+          Secret tokens should be specified using {option}`environmentFile`
+          instead of this world-readable attribute set.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          File containing environment variables to be passed to the matrix-appservice-discord service,
+          in which secret tokens can be specified securely by defining values for
+          `APPSERVICE_DISCORD_AUTH_CLIENT_I_D` and
+          `APPSERVICE_DISCORD_AUTH_BOT_TOKEN`.
+        '';
+      };
+
+      url = mkOption {
+        type = types.str;
+        default = "http://localhost:${toString cfg.port}";
+        defaultText = literalExpression ''"http://localhost:''${toString config.${opt.port}}"'';
+        description = lib.mdDoc ''
+          The URL where the application service is listening for HS requests.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 9005; # from https://github.com/Half-Shot/matrix-appservice-discord/blob/master/package.json#L11
+        description = lib.mdDoc ''
+          Port number on which the bridge should listen for internal communication with the Matrix homeserver.
+        '';
+      };
+
+      localpart = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          The user_id localpart to assign to the AS.
+        '';
+      };
+
+      serviceDependencies = mkOption {
+        type = with types; listOf str;
+        default = optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
+        defaultText = literalExpression ''
+          optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit
+        '';
+        description = lib.mdDoc ''
+          List of Systemd services to require and wait for when starting the application service,
+          such as the Matrix homeserver if it's running on the same host.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.matrix-appservice-discord = {
+      description = "A bridge between Matrix and Discord.";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
+      after = [ "network-online.target" ] ++ cfg.serviceDependencies;
+
+      preStart = ''
+        if [ ! -f '${registrationFile}' ]; then
+          ${cfg.package}/bin/matrix-appservice-discord \
+            --generate-registration \
+            --url=${escapeShellArg cfg.url} \
+            ${optionalString (cfg.localpart != null) "--localpart=${escapeShellArg cfg.localpart}"} \
+            --config='${settingsFile}' \
+            --file='${registrationFile}'
+        fi
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        DynamicUser = true;
+        PrivateTmp = true;
+        WorkingDirectory = "${cfg.package}/${cfg.package.passthru.nodeAppDir}";
+        StateDirectory = baseNameOf dataDir;
+        UMask = "0027";
+        EnvironmentFile = cfg.environmentFile;
+
+        ExecStart = ''
+          ${cfg.package}/bin/matrix-appservice-discord \
+            --file='${registrationFile}' \
+            --config='${settingsFile}' \
+            --port='${toString cfg.port}'
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pacien ];
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/appservice-irc.nix b/nixpkgs/nixos/modules/services/matrix/appservice-irc.nix
new file mode 100644
index 000000000000..d153ffc2ace8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/appservice-irc.nix
@@ -0,0 +1,236 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.matrix-appservice-irc;
+
+  pkg = pkgs.matrix-appservice-irc;
+  bin = "${pkg}/bin/matrix-appservice-irc";
+
+  jsonType = (pkgs.formats.json {}).type;
+
+  configFile = pkgs.runCommand "matrix-appservice-irc.yml" {
+    # Because this program will be run at build time, we need `nativeBuildInputs`
+    nativeBuildInputs = [ (pkgs.python3.withPackages (ps: [ ps.jsonschema ])) pkgs.remarshal ];
+    preferLocalBuild = true;
+
+    config = builtins.toJSON cfg.settings;
+    passAsFile = [ "config" ];
+  } ''
+    # The schema is given as yaml, we need to convert it to json
+    remarshal --if yaml --of json -i ${pkg}/config.schema.yml -o config.schema.json
+    python -m jsonschema config.schema.json -i $configPath
+    cp "$configPath" "$out"
+  '';
+  registrationFile = "/var/lib/matrix-appservice-irc/registration.yml";
+in {
+  options.services.matrix-appservice-irc = with types; {
+    enable = mkEnableOption (lib.mdDoc "the Matrix/IRC bridge");
+
+    port = mkOption {
+      type = port;
+      description = lib.mdDoc "The port to listen on";
+      default = 8009;
+    };
+
+    needBindingCap = mkOption {
+      type = bool;
+      description = lib.mdDoc "Whether the daemon needs to bind to ports below 1024 (e.g. for the ident service)";
+      default = false;
+    };
+
+    passwordEncryptionKeyLength = mkOption {
+      type = ints.unsigned;
+      description = lib.mdDoc "Length of the key to encrypt IRC passwords with";
+      default = 4096;
+      example = 8192;
+    };
+
+    registrationUrl = mkOption {
+      type = str;
+      description = lib.mdDoc ''
+        The URL where the application service is listening for homeserver requests,
+        from the Matrix homeserver perspective.
+      '';
+      example = "http://localhost:8009";
+    };
+
+    localpart = mkOption {
+      type = str;
+      description = lib.mdDoc "The user_id localpart to assign to the appservice";
+      default = "appservice-irc";
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Configuration for the appservice, see
+        <https://github.com/matrix-org/matrix-appservice-irc/blob/${pkgs.matrix-appservice-irc.version}/config.sample.yaml>
+        for supported values
+      '';
+      default = {};
+      type = submodule {
+        freeformType = jsonType;
+
+        options = {
+          homeserver = mkOption {
+            description = lib.mdDoc "Homeserver configuration";
+            default = {};
+            type = submodule {
+              freeformType = jsonType;
+
+              options = {
+                url = mkOption {
+                  type = str;
+                  description = lib.mdDoc "The URL to the home server for client-server API calls";
+                };
+
+                domain = mkOption {
+                  type = str;
+                  description = lib.mdDoc ''
+                    The 'domain' part for user IDs on this home server. Usually
+                    (but not always) is the "domain name" part of the homeserver URL.
+                  '';
+                };
+              };
+            };
+          };
+
+          database = mkOption {
+            default = {};
+            description = lib.mdDoc "Configuration for the database";
+            type = submodule {
+              freeformType = jsonType;
+
+              options = {
+                engine = mkOption {
+                  type = str;
+                  description = lib.mdDoc "Which database engine to use";
+                  default = "nedb";
+                  example = "postgres";
+                };
+
+                connectionString = mkOption {
+                  type = str;
+                  description = lib.mdDoc "The database connection string";
+                  default = "nedb://var/lib/matrix-appservice-irc/data";
+                  example = "postgres://username:password@host:port/databasename";
+                };
+              };
+            };
+          };
+
+          ircService = mkOption {
+            default = {};
+            description = lib.mdDoc "IRC bridge configuration";
+            type = submodule {
+              freeformType = jsonType;
+
+              options = {
+                passwordEncryptionKeyPath = mkOption {
+                  type = str;
+                  description = lib.mdDoc ''
+                    Location of the key with which IRC passwords are encrypted
+                    for storage. Will be generated on first run if not present.
+                  '';
+                  default = "/var/lib/matrix-appservice-irc/passkey.pem";
+                };
+
+                servers = mkOption {
+                  type = submodule { freeformType = jsonType; };
+                  description = lib.mdDoc "IRC servers to connect to";
+                };
+              };
+            };
+          };
+        };
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.matrix-appservice-irc = {
+      description = "Matrix-IRC bridge";
+      before = [ "matrix-synapse.service" ]; # So the registration can be used by Synapse
+      after = lib.optionals (cfg.settings.database.engine == "postgres") [
+        "postgresql.service"
+      ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        umask 077
+        # Generate key for crypting passwords
+        if ! [ -f "${cfg.settings.ircService.passwordEncryptionKeyPath}" ]; then
+          ${pkgs.openssl}/bin/openssl genpkey \
+              -out "${cfg.settings.ircService.passwordEncryptionKeyPath}" \
+              -outform PEM \
+              -algorithm RSA \
+              -pkeyopt "rsa_keygen_bits:${toString cfg.passwordEncryptionKeyLength}"
+        fi
+        # Generate registration file
+        if ! [ -f "${registrationFile}" ]; then
+          # The easy case: the file has not been generated yet
+          ${bin} --generate-registration --file ${registrationFile} --config ${configFile} --url ${cfg.registrationUrl} --localpart ${cfg.localpart}
+        else
+          # The tricky case: we already have a generation file. Because the NixOS configuration might have changed, we need to
+          # regenerate it. But this would give the service a new random ID and tokens, so we need to back up and restore them.
+          # 1. Backup
+          id=$(grep "^id:.*$" ${registrationFile})
+          hs_token=$(grep "^hs_token:.*$" ${registrationFile})
+          as_token=$(grep "^as_token:.*$" ${registrationFile})
+          # 2. Regenerate
+          ${bin} --generate-registration --file ${registrationFile} --config ${configFile} --url ${cfg.registrationUrl} --localpart ${cfg.localpart}
+          # 3. Restore
+          sed -i "s/^id:.*$/$id/g" ${registrationFile}
+          sed -i "s/^hs_token:.*$/$hs_token/g" ${registrationFile}
+          sed -i "s/^as_token:.*$/$as_token/g" ${registrationFile}
+        fi
+        # Allow synapse access to the registration
+        if ${pkgs.getent}/bin/getent group matrix-synapse > /dev/null; then
+          chgrp matrix-synapse ${registrationFile}
+          chmod g+r ${registrationFile}
+        fi
+      '';
+
+      serviceConfig = rec {
+        Type = "simple";
+        ExecStart = "${bin} --config ${configFile} --file ${registrationFile} --port ${toString cfg.port}";
+
+        ProtectHome = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        StateDirectory = "matrix-appservice-irc";
+        StateDirectoryMode = "755";
+
+        User = "matrix-appservice-irc";
+        Group = "matrix-appservice-irc";
+
+        CapabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.needBindingCap) "CAP_NET_BIND_SERVICE";
+        AmbientCapabilities = CapabilityBoundingSet;
+        NoNewPrivileges = true;
+
+        LockPersonality = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        SystemCallFilter = [
+          "@system-service @pkey"
+          "~@privileged @resources"
+        ];
+        SystemCallArchitectures = "native";
+        # AF_UNIX is required to connect to a postgres socket.
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+      };
+    };
+
+    users.groups.matrix-appservice-irc = {};
+    users.users.matrix-appservice-irc = {
+      description = "Service user for the Matrix-IRC bridge";
+      group = "matrix-appservice-irc";
+      isSystemUser = true;
+    };
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/conduit.nix b/nixpkgs/nixos/modules/services/matrix/conduit.nix
new file mode 100644
index 000000000000..76af7ba22857
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/conduit.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.matrix-conduit;
+
+  format = pkgs.formats.toml {};
+  configFile = format.generate "conduit.toml" cfg.settings;
+in
+  {
+    meta.maintainers = with maintainers; [ pstn piegames ];
+    options.services.matrix-conduit = {
+      enable = mkEnableOption (lib.mdDoc "matrix-conduit");
+
+      extraEnvironment = mkOption {
+        type = types.attrsOf types.str;
+        description = lib.mdDoc "Extra Environment variables to pass to the conduit server.";
+        default = {};
+        example = { RUST_BACKTRACE="yes"; };
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.matrix-conduit;
+        defaultText = lib.literalExpression "pkgs.matrix-conduit";
+        description = lib.mdDoc ''
+          Package of the conduit matrix server to use.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            global.server_name = mkOption {
+              type = types.str;
+              example = "example.com";
+              description = lib.mdDoc "The server_name is the name of this server. It is used as a suffix for user # and room ids.";
+            };
+            global.port = mkOption {
+              type = types.port;
+              default = 6167;
+              description = lib.mdDoc "The port Conduit will be running on. You need to set up a reverse proxy in your web server (e.g. apache or nginx), so all requests to /_matrix on port 443 and 8448 will be forwarded to the Conduit instance running on this port";
+            };
+            global.max_request_size = mkOption {
+              type = types.ints.positive;
+              default = 20000000;
+              description = lib.mdDoc "Max request size in bytes. Don't forget to also change it in the proxy.";
+            };
+            global.allow_registration = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "Whether new users can register on this server.";
+            };
+            global.allow_encryption = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc "Whether new encrypted rooms can be created. Note: existing rooms will continue to work.";
+            };
+            global.allow_federation = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether this server federates with other servers.
+              '';
+            };
+            global.trusted_servers = mkOption {
+              type = types.listOf types.str;
+              default = [ "matrix.org" ];
+              description = lib.mdDoc "Servers trusted with signing server keys.";
+            };
+            global.address = mkOption {
+              type = types.str;
+              default = "::1";
+              description = lib.mdDoc "Address to listen on for connections by the reverse proxy/tls terminator.";
+            };
+            global.database_path = mkOption {
+              type = types.str;
+              default = "/var/lib/matrix-conduit/";
+              readOnly = true;
+              description = lib.mdDoc ''
+                Path to the conduit database, the directory where conduit will save its data.
+                Note that due to using the DynamicUser feature of systemd, this value should not be changed
+                and is set to be read only.
+              '';
+            };
+            global.database_backend = mkOption {
+              type = types.enum [ "sqlite" "rocksdb" ];
+              default = "sqlite";
+              example = "rocksdb";
+              description = lib.mdDoc ''
+                The database backend for the service. Switching it on an existing
+                instance will require manual migration of data.
+              '';
+            };
+            global.allow_check_for_updates = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether to allow Conduit to automatically contact
+                <https://conduit.rs> hourly to check for important Conduit news.
+
+                Disabled by default because nixpkgs handles updates.
+              '';
+            };
+          };
+        };
+        default = {};
+        description = lib.mdDoc ''
+            Generates the conduit.toml configuration file. Refer to
+            <https://gitlab.com/famedly/conduit/-/blob/master/conduit-example.toml>
+            for details on supported values.
+            Note that database_path can not be edited because the service's reliance on systemd StateDir.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      systemd.services.conduit = {
+        description = "Conduit Matrix Server";
+        documentation = [ "https://gitlab.com/famedly/conduit/" ];
+        wantedBy = [ "multi-user.target" ];
+        environment = lib.mkMerge ([
+          { CONDUIT_CONFIG = configFile; }
+          cfg.extraEnvironment
+        ]);
+        serviceConfig = {
+          DynamicUser = true;
+          User = "conduit";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          PrivateDevices = true;
+          PrivateMounts = true;
+          PrivateUsers = true;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [
+            "@system-service"
+            "~@privileged"
+          ];
+          StateDirectory = "matrix-conduit";
+          StateDirectoryMode = "0700";
+          ExecStart = "${cfg.package}/bin/conduit";
+          Restart = "on-failure";
+          RestartSec = 10;
+          StartLimitBurst = 5;
+          UMask = "077";
+        };
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/matrix/dendrite.nix b/nixpkgs/nixos/modules/services/matrix/dendrite.nix
new file mode 100644
index 000000000000..244c15fbf7a9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/dendrite.nix
@@ -0,0 +1,323 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.dendrite;
+  settingsFormat = pkgs.formats.yaml { };
+  configurationYaml = settingsFormat.generate "dendrite.yaml" cfg.settings;
+  workingDir = "/var/lib/dendrite";
+in
+{
+  options.services.dendrite = {
+    enable = lib.mkEnableOption (lib.mdDoc "matrix.org dendrite");
+    httpPort = lib.mkOption {
+      type = lib.types.nullOr lib.types.port;
+      default = 8008;
+      description = lib.mdDoc ''
+        The port to listen for HTTP requests on.
+      '';
+    };
+    httpsPort = lib.mkOption {
+      type = lib.types.nullOr lib.types.port;
+      default = null;
+      description = lib.mdDoc ''
+        The port to listen for HTTPS requests on.
+      '';
+    };
+    tlsCert = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      example = "/var/lib/dendrite/server.cert";
+      default = null;
+      description = lib.mdDoc ''
+        The path to the TLS certificate.
+
+        ```
+          nix-shell -p dendrite --command "generate-keys --tls-cert server.crt --tls-key server.key"
+        ```
+      '';
+    };
+    tlsKey = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      example = "/var/lib/dendrite/server.key";
+      default = null;
+      description = lib.mdDoc ''
+        The path to the TLS key.
+
+        ```
+          nix-shell -p dendrite --command "generate-keys --tls-cert server.crt --tls-key server.key"
+        ```
+      '';
+    };
+    environmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      example = "/var/lib/dendrite/registration_secret";
+      default = null;
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+        Secrets may be passed to the service without adding them to the world-readable
+        Nix store, by specifying placeholder variables as the option value in Nix and
+        setting these variables accordingly in the environment file. Currently only used
+        for the registration secret to allow secure registration when
+        client_api.registration_disabled is true.
+
+        ```
+          # snippet of dendrite-related config
+          services.dendrite.settings.client_api.registration_shared_secret = "$REGISTRATION_SHARED_SECRET";
+        ```
+
+        ```
+          # content of the environment file
+          REGISTRATION_SHARED_SECRET=verysecretpassword
+        ```
+
+        Note that this file needs to be available on the host on which
+        `dendrite` is running.
+      '';
+    };
+    loadCredential = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      default = [ ];
+      example = [ "private_key:/path/to/my_private_key" ];
+      description = lib.mdDoc ''
+        This can be used to pass secrets to the systemd service without adding them to
+        the nix store.
+        To use the example setting, see the example of
+        {option}`services.dendrite.settings.global.private_key`.
+        See the LoadCredential section of systemd.exec manual for more information.
+      '';
+    };
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+        options.global = {
+          server_name = lib.mkOption {
+            type = lib.types.str;
+            example = "example.com";
+            description = lib.mdDoc ''
+              The domain name of the server, with optional explicit port.
+              This is used by remote servers to connect to this server.
+              This is also the last part of your UserID.
+            '';
+          };
+          private_key = lib.mkOption {
+            type = lib.types.either
+              lib.types.path
+              (lib.types.strMatching "^\\$CREDENTIALS_DIRECTORY/.+");
+            example = "$CREDENTIALS_DIRECTORY/private_key";
+            description = lib.mdDoc ''
+              The path to the signing private key file, used to sign
+              requests and events.
+
+              ```
+                nix-shell -p dendrite --command "generate-keys --private-key matrix_key.pem"
+              ```
+            '';
+          };
+          trusted_third_party_id_servers = lib.mkOption {
+            type = lib.types.listOf lib.types.str;
+            example = [ "matrix.org" ];
+            default = [ "matrix.org" "vector.im" ];
+            description = lib.mdDoc ''
+              Lists of domains that the server will trust as identity
+              servers to verify third party identifiers such as phone
+              numbers and email addresses
+            '';
+          };
+        };
+        options.app_service_api.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:federationapi.db";
+            description = lib.mdDoc ''
+              Database for the Appservice API.
+            '';
+          };
+        };
+        options.client_api = {
+          registration_disabled = lib.mkOption {
+            type = lib.types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Whether to disable user registration to the server
+              without the shared secret.
+            '';
+          };
+        };
+        options.federation_api.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:federationapi.db";
+            description = lib.mdDoc ''
+              Database for the Federation API.
+            '';
+          };
+        };
+        options.key_server.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:keyserver.db";
+            description = lib.mdDoc ''
+              Database for the Key Server (for end-to-end encryption).
+            '';
+          };
+        };
+        options.relay_api.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:relayapi.db";
+            description = lib.mdDoc ''
+              Database for the Relay Server.
+            '';
+          };
+        };
+        options.media_api = {
+          database = {
+            connection_string = lib.mkOption {
+              type = lib.types.str;
+              default = "file:mediaapi.db";
+              description = lib.mdDoc ''
+                Database for the Media API.
+              '';
+            };
+          };
+          base_path = lib.mkOption {
+            type = lib.types.str;
+            default = "${workingDir}/media_store";
+            description = lib.mdDoc ''
+              Storage path for uploaded media.
+            '';
+          };
+        };
+        options.room_server.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:roomserver.db";
+            description = lib.mdDoc ''
+              Database for the Room Server.
+            '';
+          };
+        };
+        options.sync_api.database = {
+          connection_string = lib.mkOption {
+            type = lib.types.str;
+            default = "file:syncserver.db";
+            description = lib.mdDoc ''
+              Database for the Sync API.
+            '';
+          };
+        };
+        options.sync_api.search = {
+          enable = lib.mkEnableOption (lib.mdDoc "Dendrite's full-text search engine");
+          index_path = lib.mkOption {
+            type = lib.types.str;
+            default = "${workingDir}/searchindex";
+            description = lib.mdDoc ''
+              The path the search index will be created in.
+            '';
+          };
+          language = lib.mkOption {
+            type = lib.types.str;
+            default = "en";
+            description = lib.mdDoc ''
+              The language most likely to be used on the server - used when indexing, to
+              ensure the returned results match expectations. A full list of possible languages
+              can be found at https://github.com/blevesearch/bleve/tree/master/analysis/lang
+            '';
+          };
+        };
+        options.user_api = {
+          account_database = {
+            connection_string = lib.mkOption {
+              type = lib.types.str;
+              default = "file:userapi_accounts.db";
+              description = lib.mdDoc ''
+                Database for the User API, accounts.
+              '';
+            };
+          };
+          device_database = {
+            connection_string = lib.mkOption {
+              type = lib.types.str;
+              default = "file:userapi_devices.db";
+              description = lib.mdDoc ''
+                Database for the User API, devices.
+              '';
+            };
+          };
+        };
+        options.mscs = {
+          database = {
+            connection_string = lib.mkOption {
+              type = lib.types.str;
+              default = "file:mscs.db";
+              description = lib.mdDoc ''
+                Database for exerimental MSC's.
+              '';
+            };
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for dendrite, see:
+        <https://github.com/matrix-org/dendrite/blob/master/dendrite-config.yaml>
+        for available options with which to populate settings.
+      '';
+    };
+    openRegistration = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Allow open registration without secondary verification (reCAPTCHA).
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = cfg.httpsPort != null -> (cfg.tlsCert != null && cfg.tlsKey != null);
+      message = ''
+        If Dendrite is configured to use https, tlsCert and tlsKey must be provided.
+
+        nix-shell -p dendrite --command "generate-keys --tls-cert server.crt --tls-key server.key"
+      '';
+    }];
+
+    systemd.services.dendrite = {
+      description = "Dendrite Matrix homeserver";
+      after = [
+        "network.target"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        StateDirectory = "dendrite";
+        WorkingDirectory = workingDir;
+        RuntimeDirectory = "dendrite";
+        RuntimeDirectoryMode = "0700";
+        LimitNOFILE = 65535;
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        LoadCredential = cfg.loadCredential;
+        ExecStartPre = [''
+          ${pkgs.envsubst}/bin/envsubst \
+            -i ${configurationYaml} \
+            -o /run/dendrite/dendrite.yaml
+        ''];
+        ExecStart = lib.strings.concatStringsSep " " ([
+          "${pkgs.dendrite}/bin/dendrite"
+          "--config /run/dendrite/dendrite.yaml"
+        ] ++ lib.optionals (cfg.httpPort != null) [
+          "--http-bind-address :${builtins.toString cfg.httpPort}"
+        ] ++ lib.optionals (cfg.httpsPort != null) [
+          "--https-bind-address :${builtins.toString cfg.httpsPort}"
+          "--tls-cert ${cfg.tlsCert}"
+          "--tls-key ${cfg.tlsKey}"
+        ] ++ lib.optionals cfg.openRegistration [
+          "--really-enable-open-registration"
+        ]);
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "on-failure";
+      };
+    };
+  };
+  meta.maintainers = lib.teams.matrix.members;
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/matrix-sliding-sync.nix b/nixpkgs/nixos/modules/services/matrix/matrix-sliding-sync.nix
new file mode 100644
index 000000000000..84bb38f35aeb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/matrix-sliding-sync.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.matrix-synapse.sliding-sync;
+in
+{
+  options.services.matrix-synapse.sliding-sync = {
+    enable = lib.mkEnableOption (lib.mdDoc "sliding sync");
+
+    package = lib.mkPackageOptionMD pkgs "matrix-sliding-sync" { };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = with lib.types; attrsOf str;
+        options = {
+          SYNCV3_SERVER = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              The destination homeserver to talk to not including `/_matrix/` e.g `https://matrix.example.org`.
+            '';
+          };
+
+          SYNCV3_DB = lib.mkOption {
+            type = lib.types.str;
+            default = "postgresql:///matrix-sliding-sync?host=/run/postgresql";
+            description = lib.mdDoc ''
+              The postgres connection string.
+              Refer to <https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNSTRING>.
+            '';
+          };
+
+          SYNCV3_BINDADDR = lib.mkOption {
+            type = lib.types.str;
+            default = "127.0.0.1:8009";
+            example = "[::]:8008";
+            description = lib.mdDoc "The interface and port to listen on.";
+          };
+
+          SYNCV3_LOG_LEVEL = lib.mkOption {
+            type = lib.types.enum [ "trace" "debug" "info" "warn" "error" "fatal" ];
+            default = "info";
+            description = lib.mdDoc "The level of verbosity for messages logged.";
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Freeform environment variables passed to the sliding sync proxy.
+        Refer to <https://github.com/matrix-org/sliding-sync#setup> for all supported values.
+      '';
+    };
+
+    createDatabase = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable and configure `services.postgres` to ensure that the database user `matrix-sliding-sync`
+        and the database `matrix-sliding-sync` exist.
+      '';
+    };
+
+    environmentFile = lib.mkOption {
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+
+        This must contain the {env}`SYNCV3_SECRET` variable which should
+        be generated with {command}`openssl rand -hex 32`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.postgresql = lib.optionalAttrs cfg.createDatabase {
+      enable = true;
+      ensureDatabases = [ "matrix-sliding-sync" ];
+      ensureUsers = [ {
+        name = "matrix-sliding-sync";
+        ensureDBOwnership = true;
+      } ];
+    };
+
+    systemd.services.matrix-sliding-sync = rec {
+      after =
+        lib.optional cfg.createDatabase "postgresql.service"
+        ++ lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
+      wants = after;
+      wantedBy = [ "multi-user.target" ];
+      environment = cfg.settings;
+      serviceConfig = {
+        DynamicUser = true;
+        EnvironmentFile = cfg.environmentFile;
+        ExecStart = lib.getExe cfg.package;
+        StateDirectory = "matrix-sliding-sync";
+        WorkingDirectory = "%S/matrix-sliding-sync";
+        Restart = "on-failure";
+        RestartSec = "1s";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/mautrix-facebook.nix b/nixpkgs/nixos/modules/services/matrix/mautrix-facebook.nix
new file mode 100644
index 000000000000..d7cf024bb807
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mautrix-facebook.nix
@@ -0,0 +1,200 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mautrix-facebook;
+  settingsFormat = pkgs.formats.json {};
+  settingsFile = settingsFormat.generate "mautrix-facebook-config.json" cfg.settings;
+
+  puppetRegex = concatStringsSep
+    ".*"
+    (map
+      escapeRegex
+      (splitString
+        "{userid}"
+        cfg.settings.bridge.username_template));
+in {
+  options = {
+    services.mautrix-facebook = {
+      enable = mkEnableOption (lib.mdDoc "Mautrix-Facebook, a Matrix-Facebook hybrid puppeting/relaybot bridge");
+
+      settings = mkOption rec {
+        apply = recursiveUpdate default;
+        type = settingsFormat.type;
+        default = {
+          homeserver = {
+            address = "http://localhost:8008";
+            software = "standard";
+          };
+
+          appservice = rec {
+            id = "facebook";
+            address = "http://${hostname}:${toString port}";
+            hostname = "localhost";
+            port = 29319;
+
+            database = "postgresql://";
+
+            bot_username = "facebookbot";
+          };
+
+          metrics.enabled = false;
+          manhole.enabled = false;
+
+          bridge = {
+            encryption = {
+              allow = true;
+              default = true;
+
+              verification_levels = {
+                receive = "cross-signed-tofu";
+                send = "cross-signed-tofu";
+                share = "cross-signed-tofu";
+              };
+            };
+            username_template = "facebook_{userid}";
+          };
+
+          logging = {
+            version = 1;
+            formatters.journal_fmt.format = "%(name)s: %(message)s";
+            handlers.journal = {
+              class = "systemd.journal.JournalHandler";
+              formatter = "journal_fmt";
+              SYSLOG_IDENTIFIER = "mautrix-facebook";
+            };
+            root = {
+              level = "INFO";
+              handlers = ["journal"];
+            };
+          };
+        };
+        example = literalExpression ''
+          {
+            homeserver = {
+              address = "http://localhost:8008";
+              domain = "mydomain.example";
+            };
+
+            bridge.permissions = {
+              "@admin:mydomain.example" = "admin";
+              "mydomain.example" = "user";
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          {file}`config.yaml` configuration as a Nix attribute set.
+          Configuration options should match those described in
+          [example-config.yaml](https://github.com/mautrix/facebook/blob/master/mautrix_facebook/example-config.yaml).
+
+          Secret tokens should be specified using {option}`environmentFile`
+          instead of this world-readable attribute set.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          File containing environment variables to be passed to the mautrix-facebook service.
+
+          Any config variable can be overridden by setting `MAUTRIX_FACEBOOK_SOME_KEY` to override the `some.key` variable.
+        '';
+      };
+
+      configurePostgresql = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable PostgreSQL and create a user and database for mautrix-facebook. The default `settings` reference this database, if you disable this option you must provide a database URL.
+        '';
+      };
+
+      registrationData = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Output data for appservice registration. Simply make any desired changes and serialize to JSON. Note that this data contains secrets so think twice before putting it into the nix store.
+
+          Currently `as_token` and `hs_token` need to be added as they are not known to this module.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.mautrix-facebook = {};
+
+    users.users.mautrix-facebook = {
+      group = "mautrix-facebook";
+      isSystemUser = true;
+    };
+
+    services.postgresql = mkIf cfg.configurePostgresql {
+      ensureDatabases = ["mautrix-facebook"];
+      ensureUsers = [{
+        name = "mautrix-facebook";
+        ensureDBOwnership = true;
+      }];
+    };
+
+    systemd.services.mautrix-facebook = rec {
+      wantedBy = [ "multi-user.target" ];
+      wants = [
+        "network-online.target"
+      ] ++ optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit
+        ++ optional cfg.configurePostgresql "postgresql.service";
+      after = wants;
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        User = "mautrix-facebook";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        PrivateTmp = true;
+
+        EnvironmentFile = cfg.environmentFile;
+
+        ExecStart = ''
+          ${pkgs.mautrix-facebook}/bin/mautrix-facebook --config=${settingsFile}
+        '';
+      };
+    };
+
+    services.mautrix-facebook = {
+      registrationData = {
+        id = cfg.settings.appservice.id;
+
+        namespaces = {
+          users = [
+            {
+              exclusive = true;
+              regex = escapeRegex "@${cfg.settings.appservice.bot_username}:${cfg.settings.homeserver.domain}";
+            }
+            {
+              exclusive = true;
+              regex = "@${puppetRegex}:${escapeRegex cfg.settings.homeserver.domain}";
+            }
+          ];
+          aliases = [];
+        };
+
+        url = cfg.settings.appservice.address;
+        sender_localpart = "mautrix-facebook-sender";
+
+        rate_limited = false;
+        "de.sorunome.msc2409.push_ephemeral" = true;
+        push_ephemeral = true;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ kevincox ];
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/mautrix-telegram.nix b/nixpkgs/nixos/modules/services/matrix/mautrix-telegram.nix
new file mode 100644
index 000000000000..168c8bf436ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mautrix-telegram.nix
@@ -0,0 +1,196 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  dataDir = "/var/lib/mautrix-telegram";
+  registrationFile = "${dataDir}/telegram-registration.yaml";
+  cfg = config.services.mautrix-telegram;
+  settingsFormat = pkgs.formats.json {};
+  settingsFile =
+    settingsFormat.generate "mautrix-telegram-config.json" cfg.settings;
+
+in {
+  options = {
+    services.mautrix-telegram = {
+      enable = mkEnableOption (lib.mdDoc "Mautrix-Telegram, a Matrix-Telegram hybrid puppeting/relaybot bridge");
+
+      settings = mkOption rec {
+        apply = recursiveUpdate default;
+        inherit (settingsFormat) type;
+        default = {
+          homeserver = {
+            software = "standard";
+          };
+
+          appservice = rec {
+            database = "sqlite:///${dataDir}/mautrix-telegram.db";
+            database_opts = {};
+            hostname = "0.0.0.0";
+            port = 8080;
+            address = "http://localhost:${toString port}";
+          };
+
+          bridge = {
+            permissions."*" = "relaybot";
+            relaybot.whitelist = [ ];
+            double_puppet_server_map = {};
+            login_shared_secret_map = {};
+          };
+
+          logging = {
+            version = 1;
+
+            formatters.precise.format = "[%(levelname)s@%(name)s] %(message)s";
+
+            handlers.console = {
+              class = "logging.StreamHandler";
+              formatter = "precise";
+            };
+
+            loggers = {
+              mau.level = "INFO";
+              telethon.level = "INFO";
+
+              # prevent tokens from leaking in the logs:
+              # https://github.com/tulir/mautrix-telegram/issues/351
+              aiohttp.level = "WARNING";
+            };
+
+            # log to console/systemd instead of file
+            root = {
+              level = "INFO";
+              handlers = [ "console" ];
+            };
+          };
+        };
+        example = literalExpression ''
+          {
+            homeserver = {
+              address = "http://localhost:8008";
+              domain = "public-domain.tld";
+            };
+
+            appservice.public = {
+              prefix = "/public";
+              external = "https://public-appservice-address/public";
+            };
+
+            bridge.permissions = {
+              "example.com" = "full";
+              "@admin:example.com" = "admin";
+            };
+            telegram = {
+              connection.use_ipv6 = true;
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          {file}`config.yaml` configuration as a Nix attribute set.
+          Configuration options should match those described in
+          [example-config.yaml](https://github.com/mautrix/telegram/blob/master/mautrix_telegram/example-config.yaml).
+
+          Secret tokens should be specified using {option}`environmentFile`
+          instead of this world-readable attribute set.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          File containing environment variables to be passed to the mautrix-telegram service,
+          in which secret tokens can be specified securely by defining values for e.g.
+          `MAUTRIX_TELEGRAM_APPSERVICE_AS_TOKEN`,
+          `MAUTRIX_TELEGRAM_APPSERVICE_HS_TOKEN`,
+          `MAUTRIX_TELEGRAM_TELEGRAM_API_ID`,
+          `MAUTRIX_TELEGRAM_TELEGRAM_API_HASH` and optionally
+          `MAUTRIX_TELEGRAM_TELEGRAM_BOT_TOKEN`.
+
+          These environment variables can also be used to set other options by
+          replacing hierarchy levels by `.`, converting the name to uppercase
+          and prepending `MAUTRIX_TELEGRAM_`.
+          For example, the first value above maps to
+          {option}`settings.appservice.as_token`.
+
+          The environment variable values can be prefixed with `json::` to have
+          them be parsed as JSON. For example, `login_shared_secret_map` can be
+          set as follows:
+          `MAUTRIX_TELEGRAM_BRIDGE_LOGIN_SHARED_SECRET_MAP=json::{"example.com":"secret"}`.
+        '';
+      };
+
+      serviceDependencies = mkOption {
+        type = with types; listOf str;
+        default = optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
+        defaultText = literalExpression ''
+          optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit
+        '';
+        description = lib.mdDoc ''
+          List of Systemd services to require and wait for when starting the application service.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.mautrix-telegram = {
+      description = "Mautrix-Telegram, a Matrix-Telegram hybrid puppeting/relaybot bridge.";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
+      after = [ "network-online.target" ] ++ cfg.serviceDependencies;
+      path = [ pkgs.lottieconverter pkgs.ffmpeg-full ];
+
+      # mautrix-telegram tries to generate a dotfile in the home directory of
+      # the running user if using a postgresql database:
+      #
+      #  File "python3.10/site-packages/asyncpg/connect_utils.py", line 257, in _dot_postgre>
+      #    return (pathlib.Path.home() / '.postgresql' / filename).resolve()
+      #  File "python3.10/pathlib.py", line 1000, in home
+      #    return cls("~").expanduser()
+      #  File "python3.10/pathlib.py", line 1440, in expanduser
+      #    raise RuntimeError("Could not determine home directory.")
+      # RuntimeError: Could not determine home directory.
+      environment.HOME = dataDir;
+
+      preStart = ''
+        # generate the appservice's registration file if absent
+        if [ ! -f '${registrationFile}' ]; then
+          ${pkgs.mautrix-telegram}/bin/mautrix-telegram \
+            --generate-registration \
+            --config='${settingsFile}' \
+            --registration='${registrationFile}'
+        fi
+      '' + lib.optionalString (pkgs.mautrix-telegram ? alembic) ''
+        # run automatic database init and migration scripts
+        ${pkgs.mautrix-telegram.alembic}/bin/alembic -x config='${settingsFile}' upgrade head
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        DynamicUser = true;
+        PrivateTmp = true;
+        WorkingDirectory = pkgs.mautrix-telegram; # necessary for the database migration scripts to be found
+        StateDirectory = baseNameOf dataDir;
+        UMask = "0027";
+        EnvironmentFile = cfg.environmentFile;
+
+        ExecStart = ''
+          ${pkgs.mautrix-telegram}/bin/mautrix-telegram \
+            --config='${settingsFile}'
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pacien vskilet ];
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/mautrix-whatsapp.nix b/nixpkgs/nixos/modules/services/matrix/mautrix-whatsapp.nix
new file mode 100644
index 000000000000..4b561a4b07a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mautrix-whatsapp.nix
@@ -0,0 +1,205 @@
+{
+  lib,
+  config,
+  pkgs,
+  ...
+}: let
+  cfg = config.services.mautrix-whatsapp;
+  dataDir = "/var/lib/mautrix-whatsapp";
+  registrationFile = "${dataDir}/whatsapp-registration.yaml";
+  settingsFile = "${dataDir}/config.json";
+  settingsFileUnsubstituted = settingsFormat.generate "mautrix-whatsapp-config-unsubstituted.json" cfg.settings;
+  settingsFormat = pkgs.formats.json {};
+  appservicePort = 29318;
+
+  mkDefaults = lib.mapAttrsRecursive (n: v: lib.mkDefault v);
+  defaultConfig = {
+    homeserver.address = "http://localhost:8448";
+    appservice = {
+      hostname = "[::]";
+      port = appservicePort;
+      database.type = "sqlite3";
+      database.uri = "${dataDir}/mautrix-whatsapp.db";
+      id = "whatsapp";
+      bot.username = "whatsappbot";
+      bot.displayname = "WhatsApp Bridge Bot";
+      as_token = "";
+      hs_token = "";
+    };
+    bridge = {
+      username_template = "whatsapp_{{.}}";
+      displayname_template = "{{if .BusinessName}}{{.BusinessName}}{{else if .PushName}}{{.PushName}}{{else}}{{.JID}}{{end}} (WA)";
+      double_puppet_server_map = {};
+      login_shared_secret_map = {};
+      command_prefix = "!wa";
+      permissions."*" = "relay";
+      relay.enabled = true;
+    };
+    logging = {
+      min_level = "info";
+      writers = lib.singleton {
+        type = "stdout";
+        format = "pretty-colored";
+        time_format = " ";
+      };
+    };
+  };
+
+in {
+  options.services.mautrix-whatsapp = {
+    enable = lib.mkEnableOption (lib.mdDoc "mautrix-whatsapp, a puppeting/relaybot bridge between Matrix and WhatsApp.");
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = defaultConfig;
+      description = lib.mdDoc ''
+        {file}`config.yaml` configuration as a Nix attribute set.
+        Configuration options should match those described in
+        [example-config.yaml](https://github.com/mautrix/whatsapp/blob/master/example-config.yaml).
+        Secret tokens should be specified using {option}`environmentFile`
+        instead of this world-readable attribute set.
+      '';
+      example = {
+        appservice = {
+          database = {
+            type = "postgres";
+            uri = "postgresql:///mautrix_whatsapp?host=/run/postgresql";
+          };
+          id = "whatsapp";
+          ephemeral_events = false;
+        };
+        bridge = {
+          history_sync = {
+            request_full_sync = true;
+          };
+          private_chat_portal_meta = true;
+          mute_bridging = true;
+          encryption = {
+            allow = true;
+            default = true;
+            require = true;
+          };
+          provisioning = {
+            shared_secret = "disable";
+          };
+          permissions = {
+            "example.com" = "user";
+          };
+        };
+      };
+    };
+    environmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing environment variables to be passed to the mautrix-whatsapp service,
+        in which secret tokens can be specified securely by optionally defining a value for
+        `MAUTRIX_WHATSAPP_BRIDGE_LOGIN_SHARED_SECRET`.
+      '';
+    };
+
+    serviceDependencies = lib.mkOption {
+      type = with lib.types; listOf str;
+      default = lib.optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
+      defaultText = lib.literalExpression ''
+        optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnits
+      '';
+      description = lib.mdDoc ''
+        List of Systemd services to require and wait for when starting the application service.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    users.users.mautrix-whatsapp = {
+      isSystemUser = true;
+      group = "mautrix-whatsapp";
+      home = dataDir;
+      description = "Mautrix-WhatsApp bridge user";
+    };
+
+    users.groups.mautrix-whatsapp = {};
+
+    services.mautrix-whatsapp.settings = lib.mkMerge (map mkDefaults [
+      defaultConfig
+      # Note: this is defined here to avoid the docs depending on `config`
+      { homeserver.domain = config.services.matrix-synapse.settings.server_name; }
+    ]);
+
+    systemd.services.mautrix-whatsapp = {
+      description = "Mautrix-WhatsApp Service - A WhatsApp bridge for Matrix";
+
+      wantedBy = ["multi-user.target"];
+      wants = ["network-online.target"] ++ cfg.serviceDependencies;
+      after = ["network-online.target"] ++ cfg.serviceDependencies;
+
+      preStart = ''
+        # substitute the settings file by environment variables
+        # in this case read from EnvironmentFile
+        test -f '${settingsFile}' && rm -f '${settingsFile}'
+        old_umask=$(umask)
+        umask 0177
+        ${pkgs.envsubst}/bin/envsubst \
+          -o '${settingsFile}' \
+          -i '${settingsFileUnsubstituted}'
+        umask $old_umask
+
+        # generate the appservice's registration file if absent
+        if [ ! -f '${registrationFile}' ]; then
+          ${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp \
+            --generate-registration \
+            --config='${settingsFile}' \
+            --registration='${registrationFile}'
+        fi
+        chmod 640 ${registrationFile}
+
+        umask 0177
+        ${pkgs.yq}/bin/yq -s '.[0].appservice.as_token = .[1].as_token
+          | .[0].appservice.hs_token = .[1].hs_token
+          | .[0]' '${settingsFile}' '${registrationFile}' \
+          > '${settingsFile}.tmp'
+        mv '${settingsFile}.tmp' '${settingsFile}'
+        umask $old_umask
+      '';
+
+      serviceConfig = {
+        User = "mautrix-whatsapp";
+        Group = "mautrix-whatsapp";
+        EnvironmentFile = cfg.environmentFile;
+        StateDirectory = baseNameOf dataDir;
+        WorkingDirectory = dataDir;
+        ExecStart = ''
+          ${pkgs.mautrix-whatsapp}/bin/mautrix-whatsapp \
+          --config='${settingsFile}' \
+          --registration='${registrationFile}'
+        '';
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        Restart = "on-failure";
+        RestartSec = "30s";
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = ["@system-service"];
+        Type = "simple";
+        UMask = 0027;
+      };
+      restartTriggers = [settingsFileUnsubstituted];
+    };
+  };
+  meta.maintainers = with lib.maintainers; [frederictobiasc];
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/mjolnir.md b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
new file mode 100644
index 000000000000..f6994eeb8fa5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mjolnir.md
@@ -0,0 +1,110 @@
+# Mjolnir (Matrix Moderation Tool) {#module-services-mjolnir}
+
+This chapter will show you how to set up your own, self-hosted
+[Mjolnir](https://github.com/matrix-org/mjolnir) instance.
+
+As an all-in-one moderation tool, it can protect your server from
+malicious invites, spam messages, and whatever else you don't want.
+In addition to server-level protection, Mjolnir is great for communities
+wanting to protect their rooms without having to use their personal
+accounts for moderation.
+
+The bot by default includes support for bans, redactions, anti-spam,
+server ACLs, room directory changes, room alias transfers, account
+deactivation, room shutdown, and more.
+
+See the [README](https://github.com/matrix-org/mjolnir#readme)
+page and the [Moderator's guide](https://github.com/matrix-org/mjolnir/blob/main/docs/moderators.md)
+for additional instructions on how to setup and use Mjolnir.
+
+For [additional settings](#opt-services.mjolnir.settings)
+see [the default configuration](https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml).
+
+## Mjolnir Setup {#module-services-mjolnir-setup}
+
+First create a new Room which will be used as a management room for Mjolnir. In
+this room, Mjolnir will log possible errors and debugging information. You'll
+need to set this Room-ID in [services.mjolnir.managementRoom](#opt-services.mjolnir.managementRoom).
+
+Next, create a new user for Mjolnir on your homeserver, if not present already.
+
+The Mjolnir Matrix user expects to be free of any rate limiting.
+See [Synapse #6286](https://github.com/matrix-org/synapse/issues/6286)
+for an example on how to achieve this.
+
+If you want Mjolnir to be able to deactivate users, move room aliases, shutdown rooms, etc.
+you'll need to make the Mjolnir user a Matrix server admin.
+
+Now invite the Mjolnir user to the management room.
+
+It is recommended to use [Pantalaimon](https://github.com/matrix-org/pantalaimon),
+so your management room can be encrypted. This also applies if you are looking to moderate an encrypted room.
+
+To enable the Pantalaimon E2E Proxy for mjolnir, enable
+[services.mjolnir.pantalaimon](#opt-services.mjolnir.pantalaimon.enable). This will
+autoconfigure a new Pantalaimon instance, which will connect to the homeserver
+set in [services.mjolnir.homeserverUrl](#opt-services.mjolnir.homeserverUrl) and Mjolnir itself
+will be configured to connect to the new Pantalaimon instance.
+
+```
+{
+  services.mjolnir = {
+    enable = true;
+    homeserverUrl = "https://matrix.domain.tld";
+    pantalaimon = {
+       enable = true;
+       username = "mjolnir";
+       passwordFile = "/run/secrets/mjolnir-password";
+    };
+    protectedRooms = [
+      "https://matrix.to/#/!xxx:domain.tld"
+    ];
+    managementRoom = "!yyy:domain.tld";
+  };
+}
+```
+
+### Element Matrix Services (EMS) {#module-services-mjolnir-setup-ems}
+
+If you are using a managed ["Element Matrix Services (EMS)"](https://ems.element.io/)
+server, you will need to consent to the terms and conditions. Upon startup, an error
+log entry with a URL to the consent page will be generated.
+
+## Synapse Antispam Module {#module-services-mjolnir-matrix-synapse-antispam}
+
+A Synapse module is also available to apply the same rulesets the bot
+uses across an entire homeserver.
+
+To use the Antispam Module, add `matrix-synapse-plugins.matrix-synapse-mjolnir-antispam`
+to the Synapse plugin list and enable the `mjolnir.Module` module.
+
+```
+{
+  services.matrix-synapse = {
+    plugins = with pkgs; [
+      matrix-synapse-plugins.matrix-synapse-mjolnir-antispam
+    ];
+    extraConfig = ''
+      modules:
+        - module: mjolnir.Module
+          config:
+            # Prevent servers/users in the ban lists from inviting users on this
+            # server to rooms. Default true.
+            block_invites: true
+            # Flag messages sent by servers/users in the ban lists as spam. Currently
+            # this means that spammy messages will appear as empty to users. Default
+            # false.
+            block_messages: false
+            # Remove users from the user directory search by filtering matrix IDs and
+            # display names by the entries in the user ban list. Default false.
+            block_usernames: false
+            # The room IDs of the ban lists to honour. Unlike other parts of Mjolnir,
+            # this list cannot be room aliases or permalinks. This server is expected
+            # to already be joined to the room - Mjolnir will not automatically join
+            # these rooms.
+            ban_lists:
+              - "!roomid:example.org"
+    '';
+  };
+}
+```
diff --git a/nixpkgs/nixos/modules/services/matrix/mjolnir.nix b/nixpkgs/nixos/modules/services/matrix/mjolnir.nix
new file mode 100644
index 000000000000..4e9a915c23c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mjolnir.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.mjolnir;
+
+  yamlConfig = {
+    inherit (cfg) dataPath managementRoom protectedRooms;
+
+    accessToken = "@ACCESS_TOKEN@"; # will be replaced in "generateConfig"
+    homeserverUrl =
+      if cfg.pantalaimon.enable then
+        "http://${cfg.pantalaimon.options.listenAddress}:${toString cfg.pantalaimon.options.listenPort}"
+      else
+        cfg.homeserverUrl;
+
+    rawHomeserverUrl = cfg.homeserverUrl;
+
+    pantalaimon = {
+      inherit (cfg.pantalaimon) username;
+
+      use = cfg.pantalaimon.enable;
+      password = "@PANTALAIMON_PASSWORD@"; # will be replaced in "generateConfig"
+    };
+  };
+
+  moduleConfigFile = pkgs.writeText "module-config.yaml" (
+    generators.toYAML { } (filterAttrs (_: v: v != null)
+      (fold recursiveUpdate { } [ yamlConfig cfg.settings ])));
+
+  # these config files will be merged one after the other to build the final config
+  configFiles = [
+    "${pkgs.mjolnir}/libexec/mjolnir/deps/mjolnir/config/default.yaml"
+    moduleConfigFile
+  ];
+
+  # this will generate the default.yaml file with all configFiles as inputs and
+  # replace all secret strings using replace-secret
+  generateConfig = pkgs.writeShellScript "mjolnir-generate-config" (
+    let
+      yqEvalStr = concatImapStringsSep " * " (pos: _: "select(fileIndex == ${toString (pos - 1)})") configFiles;
+      yqEvalArgs = concatStringsSep " " configFiles;
+    in
+    ''
+      set -euo pipefail
+
+      umask 077
+
+      # mjolnir will try to load a config from "./config/default.yaml" in the working directory
+      # -> let's place the generated config there
+      mkdir -p ${cfg.dataPath}/config
+
+      # merge all config files into one, overriding settings of the previous one with the next config
+      # e.g. "eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' filea.yaml fileb.yaml" will merge filea.yaml with fileb.yaml
+      ${pkgs.yq-go}/bin/yq eval-all -P '${yqEvalStr}' ${yqEvalArgs} > ${cfg.dataPath}/config/default.yaml
+
+      ${optionalString (cfg.accessTokenFile != null) ''
+        ${pkgs.replace-secret}/bin/replace-secret '@ACCESS_TOKEN@' '${cfg.accessTokenFile}' ${cfg.dataPath}/config/default.yaml
+      ''}
+      ${optionalString (cfg.pantalaimon.passwordFile != null) ''
+        ${pkgs.replace-secret}/bin/replace-secret '@PANTALAIMON_PASSWORD@' '${cfg.pantalaimon.passwordFile}' ${cfg.dataPath}/config/default.yaml
+      ''}
+    ''
+  );
+in
+{
+  options.services.mjolnir = {
+    enable = mkEnableOption (lib.mdDoc "Mjolnir, a moderation tool for Matrix");
+
+    homeserverUrl = mkOption {
+      type = types.str;
+      default = "https://matrix.org";
+      description = lib.mdDoc ''
+        Where the homeserver is located (client-server URL).
+
+        If `pantalaimon.enable` is `true`, this option will become the homeserver to which `pantalaimon` connects.
+        The listen address of `pantalaimon` will then become the `homeserverUrl` of `mjolnir`.
+      '';
+    };
+
+    accessTokenFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing the matrix access token for the `mjolnir` user.
+      '';
+    };
+
+    pantalaimon = mkOption {
+      description = lib.mdDoc ''
+        `pantalaimon` options (enables E2E Encryption support).
+
+        This will create a `pantalaimon` instance with the name "mjolnir".
+      '';
+      default = { };
+      type = types.submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc ''
+            ignoring the accessToken. If true, accessToken is ignored and the username/password below will be
+            used instead. The access token of the bot will be stored in the dataPath
+          '');
+
+          username = mkOption {
+            type = types.str;
+            description = lib.mdDoc "The username to login with.";
+          };
+
+          passwordFile = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              File containing the matrix password for the `mjolnir` user.
+            '';
+          };
+
+          options = mkOption {
+            type = types.submodule (import ./pantalaimon-options.nix);
+            default = { };
+            description = lib.mdDoc ''
+              passthrough additional options to the `pantalaimon` service.
+            '';
+          };
+        };
+      };
+    };
+
+    dataPath = mkOption {
+      type = types.path;
+      default = "/var/lib/mjolnir";
+      description = lib.mdDoc ''
+        The directory the bot should store various bits of information in.
+      '';
+    };
+
+    managementRoom = mkOption {
+      type = types.str;
+      default = "#moderators:example.org";
+      description = lib.mdDoc ''
+        The room ID where people can use the bot. The bot has no access controls, so
+        anyone in this room can use the bot - secure your room!
+        This should be a room alias or room ID - not a matrix.to URL.
+        Note: `mjolnir` is fairly verbose - expect a lot of messages from it.
+      '';
+    };
+
+    protectedRooms = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = literalExpression ''
+        [
+          "https://matrix.to/#/#yourroom:example.org"
+          "https://matrix.to/#/#anotherroom:example.org"
+        ]
+      '';
+      description = lib.mdDoc ''
+        A list of rooms to protect (matrix.to URLs).
+      '';
+    };
+
+    settings = mkOption {
+      default = { };
+      type = (pkgs.formats.yaml { }).type;
+      example = literalExpression ''
+        {
+          autojoinOnlyIfManager = true;
+          automaticallyRedactForReasons = [ "spam" "advertising" ];
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional settings (see [mjolnir default config](https://github.com/matrix-org/mjolnir/blob/main/config/default.yaml) for available settings). These settings will override settings made by the module config.
+      '';
+    };
+  };
+
+  config = mkIf config.services.mjolnir.enable {
+    assertions = [
+      {
+        assertion = !(cfg.pantalaimon.enable && cfg.pantalaimon.passwordFile == null);
+        message = "Specify pantalaimon.passwordFile";
+      }
+      {
+        assertion = !(cfg.pantalaimon.enable && cfg.accessTokenFile != null);
+        message = "Do not specify accessTokenFile when using pantalaimon";
+      }
+      {
+        assertion = !(!cfg.pantalaimon.enable && cfg.accessTokenFile == null);
+        message = "Specify accessTokenFile when not using pantalaimon";
+      }
+    ];
+
+    services.pantalaimon-headless.instances."mjolnir" = mkIf cfg.pantalaimon.enable
+      {
+        homeserver = cfg.homeserverUrl;
+      } // cfg.pantalaimon.options;
+
+    systemd.services.mjolnir = {
+      description = "mjolnir - a moderation tool for Matrix";
+      wants = [ "network-online.target" ] ++ optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
+      after = [ "network-online.target" ] ++ optionals (cfg.pantalaimon.enable) [ "pantalaimon-mjolnir.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''${pkgs.mjolnir}/bin/mjolnir --mjolnir-config ./config/default.yaml'';
+        ExecStartPre = [ generateConfig ];
+        WorkingDirectory = cfg.dataPath;
+        StateDirectory = "mjolnir";
+        StateDirectoryMode = "0700";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        User = "mjolnir";
+        Restart = "on-failure";
+
+        /* TODO: wait for #102397 to be resolved. Then load secrets from $CREDENTIALS_DIRECTORY+"/NAME"
+        DynamicUser = true;
+        LoadCredential = [] ++
+          optionals (cfg.accessTokenFile != null) [
+            "access_token:${cfg.accessTokenFile}"
+          ] ++
+          optionals (cfg.pantalaimon.passwordFile != null) [
+            "pantalaimon_password:${cfg.pantalaimon.passwordFile}"
+          ];
+        */
+      };
+    };
+
+    users = {
+      users.mjolnir = {
+        group = "mjolnir";
+        isSystemUser = true;
+      };
+      groups.mjolnir = { };
+    };
+  };
+
+  meta = {
+    doc = ./mjolnir.md;
+    maintainers = with maintainers; [ jojosch ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/mx-puppet-discord.nix b/nixpkgs/nixos/modules/services/matrix/mx-puppet-discord.nix
new file mode 100644
index 000000000000..70828804b556
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/mx-puppet-discord.nix
@@ -0,0 +1,122 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  dataDir = "/var/lib/mx-puppet-discord";
+  registrationFile = "${dataDir}/discord-registration.yaml";
+  cfg = config.services.mx-puppet-discord;
+  settingsFormat = pkgs.formats.json {};
+  settingsFile = settingsFormat.generate "mx-puppet-discord-config.json" cfg.settings;
+
+in {
+  options = {
+    services.mx-puppet-discord = {
+      enable = mkEnableOption (lib.mdDoc ''
+        mx-puppet-discord is a discord puppeting bridge for matrix.
+        It handles bridging private and group DMs, as well as Guilds (servers)
+      '');
+
+      settings = mkOption rec {
+        apply = recursiveUpdate default;
+        inherit (settingsFormat) type;
+        default = {
+          bridge.port = 8434;
+          presence = {
+            enabled = true;
+            interval = 500;
+          };
+          provisioning.whitelist = [ ];
+          relay.whitelist = [ ];
+
+          # variables are preceded by a colon.
+          namePatterns = {
+            user = ":name";
+            userOverride = ":displayname";
+            room = ":name";
+            group = ":name";
+          };
+
+          #defaults to sqlite but can be configured to use postgresql with
+          #connstring
+          database.filename = "${dataDir}/database.db";
+          logging = {
+            console = "info";
+            lineDateFormat = "MMM-D HH:mm:ss.SSS";
+          };
+        };
+        example = literalExpression ''
+          {
+            bridge = {
+              bindAddress = "localhost";
+              domain = "example.com";
+              homeserverUrl = "https://example.com";
+            };
+
+            provisioning.whitelist = [ "@admin:example.com" ];
+            relay.whitelist = [ "@.*:example.com" ];
+          }
+        '';
+        description = lib.mdDoc ''
+          {file}`config.yaml` configuration as a Nix attribute set.
+          Configuration options should match those described in
+          [
+          sample.config.yaml](https://github.com/matrix-discord/mx-puppet-discord/blob/master/sample.config.yaml).
+        '';
+      };
+      serviceDependencies = mkOption {
+        type = with types; listOf str;
+        default = optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit;
+        defaultText = literalExpression ''
+          optional config.services.matrix-synapse.enable config.services.matrix-synapse.serviceUnit
+        '';
+        description = lib.mdDoc ''
+          List of Systemd services to require and wait for when starting the application service.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.mx-puppet-discord = {
+      description = "Matrix to Discord puppeting bridge";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ] ++ cfg.serviceDependencies;
+      after = [ "network-online.target" ] ++ cfg.serviceDependencies;
+
+      preStart = ''
+        # generate the appservice's registration file if absent
+        if [ ! -f '${registrationFile}' ]; then
+          ${pkgs.mx-puppet-discord}/bin/mx-puppet-discord -r -c ${settingsFile} \
+          -f ${registrationFile}
+        fi
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        DynamicUser = true;
+        PrivateTmp = true;
+        WorkingDirectory = pkgs.mx-puppet-discord;
+        StateDirectory = baseNameOf dataDir;
+        UMask = "0027";
+
+        ExecStart = ''
+          ${pkgs.mx-puppet-discord}/bin/mx-puppet-discord \
+            -c ${settingsFile} \
+            -f ${registrationFile}
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ govanify ];
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/pantalaimon-options.nix b/nixpkgs/nixos/modules/services/matrix/pantalaimon-options.nix
new file mode 100644
index 000000000000..3945a70fc86b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/pantalaimon-options.nix
@@ -0,0 +1,70 @@
+{ config, lib, name, ... }:
+
+with lib;
+{
+  options = {
+    dataPath = mkOption {
+      type = types.path;
+      default = "/var/lib/pantalaimon-${name}";
+      description = lib.mdDoc ''
+        The directory where `pantalaimon` should store its state such as the database file.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum [ "info" "warning" "error" "debug" ];
+      default = "warning";
+      description = lib.mdDoc ''
+        Set the log level of the daemon.
+      '';
+    };
+
+    homeserver = mkOption {
+      type = types.str;
+      example = "https://matrix.org";
+      description = lib.mdDoc ''
+        The URI of the homeserver that the `pantalaimon` proxy should
+        forward requests to, without the matrix API path but including
+        the http(s) schema.
+      '';
+    };
+
+    ssl = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether or not SSL verification should be enabled for outgoing
+        connections to the homeserver.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc ''
+        The address where the daemon will listen to client connections
+        for this homeserver.
+      '';
+    };
+
+    listenPort = mkOption {
+      type = types.port;
+      default = 8009;
+      description = lib.mdDoc ''
+        The port where the daemon will listen to client connections for
+        this homeserver. Note that the listen address/port combination
+        needs to be unique between different homeservers.
+      '';
+    };
+
+    extraSettings = mkOption {
+      type = types.attrs;
+      default = { };
+      description = lib.mdDoc ''
+        Extra configuration options. See
+        [pantalaimon(5)](https://github.com/matrix-org/pantalaimon/blob/master/docs/man/pantalaimon.5.md)
+        for available options.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/pantalaimon.nix b/nixpkgs/nixos/modules/services/matrix/pantalaimon.nix
new file mode 100644
index 000000000000..591ba9a7ab55
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/pantalaimon.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.pantalaimon-headless;
+
+  iniFmt = pkgs.formats.ini { };
+
+  mkConfigFile = name: instanceConfig: iniFmt.generate "pantalaimon.conf" {
+    Default = {
+      LogLevel = instanceConfig.logLevel;
+      Notifications = false;
+    };
+
+    ${name} = (recursiveUpdate
+      {
+        Homeserver = instanceConfig.homeserver;
+        ListenAddress = instanceConfig.listenAddress;
+        ListenPort = instanceConfig.listenPort;
+        SSL = instanceConfig.ssl;
+
+        # Set some settings to prevent user interaction for headless operation
+        IgnoreVerification = true;
+        UseKeyring = false;
+      }
+      instanceConfig.extraSettings
+    );
+  };
+
+  mkPantalaimonService = name: instanceConfig:
+    nameValuePair "pantalaimon-${name}" {
+      description = "pantalaimon instance ${name} - E2EE aware proxy daemon for matrix clients";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''${pkgs.pantalaimon-headless}/bin/pantalaimon --config ${mkConfigFile name instanceConfig} --data-path ${instanceConfig.dataPath}'';
+        Restart = "on-failure";
+        DynamicUser = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        StateDirectory = "pantalaimon-${name}";
+      };
+    };
+in
+{
+  options.services.pantalaimon-headless.instances = mkOption {
+    default = { };
+    type = types.attrsOf (types.submodule (import ./pantalaimon-options.nix));
+    description = lib.mdDoc ''
+      Declarative instance config.
+
+      Note: to use pantalaimon interactively, e.g. for a Matrix client which does not
+      support End-to-end encryption (like `fractal`), refer to the home-manager module.
+    '';
+  };
+
+  config = mkIf (config.services.pantalaimon-headless.instances != { })
+    {
+      systemd.services = mapAttrs' mkPantalaimonService config.services.pantalaimon-headless.instances;
+    };
+
+  meta = {
+    maintainers = with maintainers; [ jojosch ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/matrix/synapse.md b/nixpkgs/nixos/modules/services/matrix/synapse.md
new file mode 100644
index 000000000000..58be24204fcf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/synapse.md
@@ -0,0 +1,219 @@
+# Matrix {#module-services-matrix}
+
+[Matrix](https://matrix.org/) is an open standard for
+interoperable, decentralised, real-time communication over IP. It can be used
+to power Instant Messaging, VoIP/WebRTC signalling, Internet of Things
+communication - or anywhere you need a standard HTTP API for publishing and
+subscribing to data whilst tracking the conversation history.
+
+This chapter will show you how to set up your own, self-hosted Matrix
+homeserver using the Synapse reference homeserver, and how to serve your own
+copy of the Element web client. See the
+[Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
+overview page for links to Element Apps for Android and iOS,
+desktop clients, as well as bridges to other networks and other projects
+around Matrix.
+
+## Synapse Homeserver {#module-services-matrix-synapse}
+
+[Synapse](https://github.com/matrix-org/synapse) is
+the reference homeserver implementation of Matrix from the core development
+team at matrix.org. The following configuration example will set up a
+synapse server for the `example.org` domain, served from
+the host `myhostname.example.org`. For more information,
+please refer to the
+[installation instructions of Synapse](https://matrix-org.github.io/synapse/latest/setup/installation.html) .
+```
+{ pkgs, lib, config, ... }:
+let
+  fqdn = "${config.networking.hostName}.${config.networking.domain}";
+  baseUrl = "https://${fqdn}";
+  clientConfig."m.homeserver".base_url = baseUrl;
+  serverConfig."m.server" = "${fqdn}:443";
+  mkWellKnown = data: ''
+    default_type application/json;
+    add_header Access-Control-Allow-Origin *;
+    return 200 '${builtins.toJSON data}';
+  '';
+in {
+  networking.hostName = "myhostname";
+  networking.domain = "example.org";
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+  services.postgresql.enable = true;
+  services.postgresql.initialScript = pkgs.writeText "synapse-init.sql" ''
+    CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
+    CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
+      TEMPLATE template0
+      LC_COLLATE = "C"
+      LC_CTYPE = "C";
+  '';
+
+  services.nginx = {
+    enable = true;
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+    recommendedProxySettings = true;
+    virtualHosts = {
+      # If the A and AAAA DNS records on example.org do not point on the same host as the
+      # records for myhostname.example.org, you can easily move the /.well-known
+      # virtualHost section of the code to the host that is serving example.org, while
+      # the rest stays on myhostname.example.org with no other changes required.
+      # This pattern also allows to seamlessly move the homeserver from
+      # myhostname.example.org to myotherhost.example.org by only changing the
+      # /.well-known redirection target.
+      "${config.networking.domain}" = {
+        enableACME = true;
+        forceSSL = true;
+        # This section is not needed if the server_name of matrix-synapse is equal to
+        # the domain (i.e. example.org from @foo:example.org) and the federation port
+        # is 8448.
+        # Further reference can be found in the docs about delegation under
+        # https://matrix-org.github.io/synapse/latest/delegate.html
+        locations."= /.well-known/matrix/server".extraConfig = mkWellKnown serverConfig;
+        # This is usually needed for homeserver discovery (from e.g. other Matrix clients).
+        # Further reference can be found in the upstream docs at
+        # https://spec.matrix.org/latest/client-server-api/#getwell-knownmatrixclient
+        locations."= /.well-known/matrix/client".extraConfig = mkWellKnown clientConfig;
+      };
+      "${fqdn}" = {
+        enableACME = true;
+        forceSSL = true;
+        # It's also possible to do a redirect here or something else, this vhost is not
+        # needed for Matrix. It's recommended though to *not put* element
+        # here, see also the section about Element.
+        locations."/".extraConfig = ''
+          return 404;
+        '';
+        # Forward all Matrix API calls to the synapse Matrix homeserver. A trailing slash
+        # *must not* be used here.
+        locations."/_matrix".proxyPass = "http://[::1]:8008";
+        # Forward requests for e.g. SSO and password-resets.
+        locations."/_synapse/client".proxyPass = "http://[::1]:8008";
+      };
+    };
+  };
+
+  services.matrix-synapse = {
+    enable = true;
+    settings.server_name = config.networking.domain;
+    # The public base URL value must match the `base_url` value set in `clientConfig` above.
+    # The default value here is based on `server_name`, so if your `server_name` is different
+    # from the value of `fqdn` above, you will likely run into some mismatched domain names
+    # in client applications.
+    settings.public_baseurl = baseUrl;
+    settings.listeners = [
+      { port = 8008;
+        bind_addresses = [ "::1" ];
+        type = "http";
+        tls = false;
+        x_forwarded = true;
+        resources = [ {
+          names = [ "client" "federation" ];
+          compress = true;
+        } ];
+      }
+    ];
+  };
+}
+```
+
+## Registering Matrix users {#module-services-matrix-register-users}
+
+If you want to run a server with public registration by anybody, you can
+then enable `services.matrix-synapse.settings.enable_registration = true;`.
+Otherwise, or you can generate a registration secret with
+{command}`pwgen -s 64 1` and set it with
+[](#opt-services.matrix-synapse.settings.registration_shared_secret).
+To create a new user or admin, run the following after you have set the secret
+and have rebuilt NixOS:
+```ShellSession
+$ nix-shell -p matrix-synapse
+$ register_new_matrix_user -k your-registration-shared-secret http://localhost:8008
+New user localpart: your-username
+Password:
+Confirm password:
+Make admin [no]:
+Success!
+```
+In the example, this would create a user with the Matrix Identifier
+`@your-username:example.org`.
+
+::: {.warning}
+When using [](#opt-services.matrix-synapse.settings.registration_shared_secret), the secret
+will end up in the world-readable store. Instead it's recommended to deploy the secret
+in an additional file like this:
+
+  - Create a file with the following contents:
+
+    ```
+    registration_shared_secret: your-very-secret-secret
+    ```
+  - Deploy the file with a secret-manager such as
+    [{option}`deployment.keys`](https://nixops.readthedocs.io/en/latest/overview.html#managing-keys)
+    from {manpage}`nixops(1)` or [sops-nix](https://github.com/Mic92/sops-nix/) to
+    e.g. {file}`/run/secrets/matrix-shared-secret` and ensure that it's readable
+    by `matrix-synapse`.
+  - Include the file like this in your configuration:
+
+    ```
+    {
+      services.matrix-synapse.extraConfigFiles = [
+        "/run/secrets/matrix-shared-secret"
+      ];
+    }
+    ```
+:::
+
+::: {.note}
+It's also possible to user alternative authentication mechanism such as
+[LDAP (via `matrix-synapse-ldap3`)](https://github.com/matrix-org/matrix-synapse-ldap3)
+or [OpenID](https://matrix-org.github.io/synapse/latest/openid.html).
+:::
+
+## Element (formerly known as Riot) Web Client {#module-services-matrix-element-web}
+
+[Element Web](https://github.com/vector-im/riot-web/) is
+the reference web client for Matrix and developed by the core team at
+matrix.org. Element was formerly known as Riot.im, see the
+[Element introductory blog post](https://element.io/blog/welcome-to-element/)
+for more information. The following snippet can be optionally added to the code before
+to complete the synapse installation with a web client served at
+`https://element.myhostname.example.org` and
+`https://element.example.org`. Alternatively, you can use the hosted
+copy at <https://app.element.io/>,
+or use other web clients or native client applications. Due to the
+`/.well-known` urls set up done above, many clients should
+fill in the required connection details automatically when you enter your
+Matrix Identifier. See
+[Try Matrix Now!](https://matrix.org/docs/projects/try-matrix-now.html)
+for a list of existing clients and their supported featureset.
+```
+{
+  services.nginx.virtualHosts."element.${fqdn}" = {
+    enableACME = true;
+    forceSSL = true;
+    serverAliases = [
+      "element.${config.networking.domain}"
+    ];
+
+    root = pkgs.element-web.override {
+      conf = {
+        default_server_config = clientConfig; # see `clientConfig` from the snippet above.
+      };
+    };
+  };
+}
+```
+
+::: {.note}
+The Element developers do not recommend running Element and your Matrix
+homeserver on the same fully-qualified domain name for security reasons. In
+the example, this means that you should not reuse the
+`myhostname.example.org` virtualHost to also serve Element,
+but instead serve it on a different subdomain, like
+`element.example.org` in the example. See the
+[Element Important Security Notes](https://github.com/vector-im/element-web/tree/v1.10.0#important-security-notes)
+for more information on this subject.
+:::
diff --git a/nixpkgs/nixos/modules/services/matrix/synapse.nix b/nixpkgs/nixos/modules/services/matrix/synapse.nix
new file mode 100644
index 000000000000..9cc769c2d0db
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/matrix/synapse.nix
@@ -0,0 +1,1188 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.matrix-synapse;
+  format = pkgs.formats.yaml { };
+
+  # remove null values from the final configuration
+  finalSettings = lib.filterAttrsRecursive (_: v: v != null) cfg.settings;
+  configFile = format.generate "homeserver.yaml" finalSettings;
+
+  usePostgresql = cfg.settings.database.name == "psycopg2";
+  hasLocalPostgresDB = let args = cfg.settings.database.args; in
+    usePostgresql
+    && (!(args ? host) || (elem args.host [ "localhost" "127.0.0.1" "::1" ]))
+    && config.services.postgresql.enable;
+  hasWorkers = cfg.workers != { };
+
+  listenerSupportsResource = resource: listener:
+    lib.any ({ names, ... }: builtins.elem resource names) listener.resources;
+
+  clientListener = findFirst
+    (listenerSupportsResource "client")
+    null
+    (cfg.settings.listeners
+      ++ concatMap ({ worker_listeners, ... }: worker_listeners) (attrValues cfg.workers));
+
+  registerNewMatrixUser =
+    let
+      isIpv6 = hasInfix ":";
+
+      # add a tail, so that without any bind_addresses we still have a useable address
+      bindAddress = head (clientListener.bind_addresses ++ [ "127.0.0.1" ]);
+      listenerProtocol = if clientListener.tls
+        then "https"
+        else "http";
+    in
+    assert assertMsg (clientListener != null) "No client listener found in synapse or one of its workers";
+    pkgs.writeShellScriptBin "matrix-synapse-register_new_matrix_user" ''
+      exec ${cfg.package}/bin/register_new_matrix_user \
+        $@ \
+        ${lib.concatMapStringsSep " " (x: "-c ${x}") ([ configFile ] ++ cfg.extraConfigFiles)} \
+        "${listenerProtocol}://${
+          if (isIpv6 bindAddress) then
+            "[${bindAddress}]"
+          else
+            "${bindAddress}"
+        }:${builtins.toString clientListener.port}/"
+    '';
+
+  defaultExtras = [
+    "systemd"
+    "postgres"
+    "url-preview"
+    "user-search"
+  ];
+
+  wantedExtras = cfg.extras
+    ++ lib.optional (cfg.settings ? oidc_providers) "oidc"
+    ++ lib.optional (cfg.settings ? jwt_config) "jwt"
+    ++ lib.optional (cfg.settings ? saml2_config) "saml2"
+    ++ lib.optional (cfg.settings ? redis) "redis"
+    ++ lib.optional (cfg.settings ? sentry) "sentry"
+    ++ lib.optional (cfg.settings ? user_directory) "user-search"
+    ++ lib.optional (cfg.settings.url_preview_enabled) "url-preview"
+    ++ lib.optional (cfg.settings.database.name == "psycopg2") "postgres";
+
+  wrapped = pkgs.matrix-synapse.override {
+    extras = wantedExtras;
+    inherit (cfg) plugins;
+  };
+
+  defaultCommonLogConfig = {
+    version = 1;
+    formatters.journal_fmt.format = "%(name)s: [%(request)s] %(message)s";
+    handlers.journal = {
+      class = "systemd.journal.JournalHandler";
+      formatter = "journal_fmt";
+    };
+    root = {
+      level = "INFO";
+      handlers = [ "journal" ];
+    };
+    disable_existing_loggers = false;
+  };
+
+  defaultCommonLogConfigText = generators.toPretty { } defaultCommonLogConfig;
+
+  logConfigText = logName:
+    lib.literalMD ''
+      Path to a yaml file generated from this Nix expression:
+
+      ```
+      ${generators.toPretty { } (
+        recursiveUpdate defaultCommonLogConfig { handlers.journal.SYSLOG_IDENTIFIER = logName; }
+      )}
+      ```
+    '';
+
+  genLogConfigFile = logName: format.generate
+    "synapse-log-${logName}.yaml"
+    (cfg.log // optionalAttrs (cfg.log?handlers.journal) {
+      handlers.journal = cfg.log.handlers.journal // {
+        SYSLOG_IDENTIFIER = logName;
+      };
+    });
+in {
+
+  imports = [
+
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "trusted_third_party_id_servers" ] ''
+      The `trusted_third_party_id_servers` option as been removed in `matrix-synapse` v1.4.0
+      as the behavior is now obsolete.
+    '')
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "create_local_database" ] ''
+      Database configuration must be done manually. An exemplary setup is demonstrated in
+      <nixpkgs/nixos/tests/matrix/synapse.nix>
+    '')
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "web_client" ] "")
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "room_invite_state_types" ] ''
+      You may add additional event types via
+      `services.matrix-synapse.room_prejoin_state.additional_event_types` and
+      disable the default events via
+      `services.matrix-synapse.room_prejoin_state.disable_default_event_types`.
+    '')
+
+    # options that don't exist in synapse anymore
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "bind_host" ] "Use listener settings instead." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "bind_port" ] "Use listener settings instead." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "expire_access_tokens" ] "" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "no_tls" ] "It is no longer supported by synapse." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "tls_dh_param_path" ] "It was removed from synapse." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "unsecure_port" ] "Use settings.listeners instead." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "user_creation_max_duration" ] "It is no longer supported by synapse." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "verbose" ] "Use a log config instead." )
+
+    # options that were moved into rfc42 style settings
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "app_service_config_files" ] "Use settings.app_service_config_files instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "database_args" ] "Use settings.database.args instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "database_name" ] "Use settings.database.args.database instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "database_type" ] "Use settings.database.name instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "database_user" ] "Use settings.database.args.user instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "dynamic_thumbnails" ] "Use settings.dynamic_thumbnails instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "enable_metrics" ] "Use settings.enable_metrics instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "enable_registration" ] "Use settings.enable_registration instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "extraConfig" ] "Use settings instead." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "listeners" ] "Use settings.listeners instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "logConfig" ] "Use settings.log_config instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "max_image_pixels" ] "Use settings.max_image_pixels instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "max_upload_size" ] "Use settings.max_upload_size instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "presence" "enabled" ] "Use settings.presence.enabled instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "public_baseurl" ] "Use settings.public_baseurl instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "report_stats" ] "Use settings.report_stats instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "server_name" ] "Use settings.server_name instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "servers" ] "Use settings.trusted_key_servers instead." )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "tls_certificate_path" ] "Use settings.tls_certificate_path instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "tls_private_key_path" ] "Use settings.tls_private_key_path instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "turn_shared_secret" ] "Use settings.turn_shared_secret instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "turn_uris" ] "Use settings.turn_uris instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "turn_user_lifetime" ] "Use settings.turn_user_lifetime instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "url_preview_enabled" ] "Use settings.url_preview_enabled instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "url_preview_ip_range_blacklist" ] "Use settings.url_preview_ip_range_blacklist instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "url_preview_ip_range_whitelist" ] "Use settings.url_preview_ip_range_whitelist instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "url_preview_url_blacklist" ] "Use settings.url_preview_url_blacklist instead" )
+
+    # options that are too specific to mention them explicitly in settings
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "account_threepid_delegates" "email" ] "Use settings.account_threepid_delegates.email instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "account_threepid_delegates" "msisdn" ] "Use settings.account_threepid_delegates.msisdn instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "allow_guest_access" ] "Use settings.allow_guest_access instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "bcrypt_rounds" ] "Use settings.bcrypt_rounds instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "enable_registration_captcha" ] "Use settings.enable_registration_captcha instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "event_cache_size" ] "Use settings.event_cache_size instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "federation_rc_concurrent" ] "Use settings.rc_federation.concurrent instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "federation_rc_reject_limit" ] "Use settings.rc_federation.reject_limit instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "federation_rc_sleep_delay" ] "Use settings.rc_federation.sleep_delay instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "federation_rc_sleep_limit" ] "Use settings.rc_federation.sleep_limit instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "federation_rc_window_size" ] "Use settings.rc_federation.window_size instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "key_refresh_interval" ] "Use settings.key_refresh_interval instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "rc_messages_burst_count" ] "Use settings.rc_messages.burst_count instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "rc_messages_per_second" ] "Use settings.rc_messages.per_second instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "recaptcha_private_key" ] "Use settings.recaptcha_private_key instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "recaptcha_public_key" ] "Use settings.recaptcha_public_key instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "redaction_retention_period" ] "Use settings.redaction_retention_period instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "room_prejoin_state" "additional_event_types" ] "Use settings.room_prejoin_state.additional_event_types instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "room_prejoin_state" "disable_default_event_types" ] "Use settings.room_prejoin-state.disable_default_event_types instead" )
+
+    # Options that should be passed via extraConfigFiles, so they are not persisted into the nix store
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "macaroon_secret_key" ] "Pass this value via extraConfigFiles instead" )
+    (mkRemovedOptionModule [ "services" "matrix-synapse" "registration_shared_secret" ] "Pass this value via extraConfigFiles instead" )
+
+  ];
+
+  options = let
+    listenerType = workerContext: types.submodule {
+      options = {
+        port = mkOption {
+          type = types.port;
+          example = 8448;
+          description = lib.mdDoc ''
+            The port to listen for HTTP(S) requests on.
+          '';
+        };
+
+        bind_addresses = mkOption {
+          type = types.listOf types.str;
+          default = [
+            "::1"
+            "127.0.0.1"
+          ];
+          example = literalExpression ''
+            [
+              "::"
+              "0.0.0.0"
+            ]
+          '';
+          description = lib.mdDoc ''
+            IP addresses to bind the listener to.
+          '';
+        };
+
+        type = mkOption {
+          type = types.enum [
+            "http"
+            "manhole"
+            "metrics"
+            "replication"
+          ];
+          default = "http";
+          example = "metrics";
+          description = lib.mdDoc ''
+            The type of the listener, usually http.
+          '';
+        };
+
+        tls = mkOption {
+          type = types.bool;
+          default = !workerContext;
+          example = false;
+          description = lib.mdDoc ''
+            Whether to enable TLS on the listener socket.
+          '';
+        };
+
+        x_forwarded = mkOption {
+          type = types.bool;
+          default = false;
+          example = true;
+          description = lib.mdDoc ''
+            Use the X-Forwarded-For (XFF) header as the client IP and not the
+            actual client IP.
+          '';
+        };
+
+        resources = mkOption {
+          type = types.listOf (types.submodule {
+            options = {
+              names = mkOption {
+                type = types.listOf (types.enum [
+                  "client"
+                  "consent"
+                  "federation"
+                  "health"
+                  "keys"
+                  "media"
+                  "metrics"
+                  "openid"
+                  "replication"
+                  "static"
+                ]);
+                description = lib.mdDoc ''
+                  List of resources to host on this listener.
+                '';
+                example = [
+                  "client"
+                ];
+              };
+              compress = mkOption {
+                default = false;
+                type = types.bool;
+                description = lib.mdDoc ''
+                  Whether synapse should compress HTTP responses to clients that support it.
+                  This should be disabled if running synapse behind a load balancer
+                  that can do automatic compression.
+                '';
+              };
+            };
+          });
+          description = lib.mdDoc ''
+            List of HTTP resources to serve on this listener.
+          '';
+        };
+      };
+    };
+  in {
+    services.matrix-synapse = {
+      enable = mkEnableOption (lib.mdDoc "matrix.org synapse");
+
+      serviceUnit = lib.mkOption {
+        type = lib.types.str;
+        readOnly = true;
+        description = lib.mdDoc ''
+          The systemd unit (a service or a target) for other services to depend on if they
+          need to be started after matrix-synapse.
+
+          This option is useful as the actual parent unit for all matrix-synapse processes
+          changes when configuring workers.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        readOnly = true;
+        description = lib.mdDoc ''
+          Path to the configuration file on the target system. Useful to configure e.g. workers
+          that also need this.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        readOnly = true;
+        description = lib.mdDoc ''
+          Reference to the `matrix-synapse` wrapper with all extras
+          (e.g. for `oidc` or `saml2`) added to the `PYTHONPATH` of all executables.
+
+          This option is useful to reference the "final" `matrix-synapse` package that's
+          actually used by `matrix-synapse.service`. For instance, when using
+          workers, it's possible to run
+          `''${config.services.matrix-synapse.package}/bin/synapse_worker` and
+          no additional PYTHONPATH needs to be specified for extras or plugins configured
+          via `services.matrix-synapse`.
+
+          However, this means that this option is supposed to be only declared
+          by the `services.matrix-synapse` module itself and is thus read-only.
+          In order to modify `matrix-synapse` itself, use an overlay to override
+          `pkgs.matrix-synapse-unwrapped`.
+        '';
+      };
+
+      extras = mkOption {
+        type = types.listOf (types.enum (lib.attrNames pkgs.matrix-synapse-unwrapped.optional-dependencies));
+        default = defaultExtras;
+        example = literalExpression ''
+          [
+            "cache-memory" # Provide statistics about caching memory consumption
+            "jwt"          # JSON Web Token authentication
+            "oidc"         # OpenID Connect authentication
+            "postgres"     # PostgreSQL database backend
+            "redis"        # Redis support for the replication stream between worker processes
+            "saml2"        # SAML2 authentication
+            "sentry"       # Error tracking and performance metrics
+            "systemd"      # Provide the JournalHandler used in the default log_config
+            "url-preview"  # Support for oEmbed URL previews
+            "user-search"  # Support internationalized domain names in user-search
+          ]
+        '';
+        description = lib.mdDoc ''
+          Explicitly install extras provided by matrix-synapse. Most
+          will require some additional configuration.
+
+          Extras will automatically be enabled, when the relevant
+          configuration sections are present.
+
+          Please note that this option is additive: i.e. when adding a new item
+          to this list, the defaults are still kept. To override the defaults as well,
+          use `lib.mkForce`.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExpression ''
+          with config.services.matrix-synapse.package.plugins; [
+            matrix-synapse-ldap3
+            matrix-synapse-pam
+          ];
+        '';
+        description = lib.mdDoc ''
+          List of additional Matrix plugins to make available.
+        '';
+      };
+
+      withJemalloc = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to preload jemalloc to reduce memory fragmentation and overall usage.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/matrix-synapse";
+        description = lib.mdDoc ''
+          The directory where matrix-synapse stores its stateful data such as
+          certificates, media and uploads.
+        '';
+      };
+
+      log = mkOption {
+        type = types.attrsOf format.type;
+        defaultText = literalExpression defaultCommonLogConfigText;
+        description = mdDoc ''
+          Default configuration for the loggers used by `matrix-synapse` and its workers.
+          The defaults are added with the default priority which means that
+          these will be merged with additional declarations. These additional
+          declarations also take precedence over the defaults when declared
+          with at least normal priority. For instance
+          the log-level for synapse and its workers can be changed like this:
+
+          ```nix
+          { lib, ... }: {
+            services.matrix-synapse.log.root.level = "WARNING";
+          }
+          ```
+
+          And another field can be added like this:
+
+          ```nix
+          {
+            services.matrix-synapse.log = {
+              loggers."synapse.http.matrixfederationclient".level = "DEBUG";
+            };
+          }
+          ```
+
+          Additionally, the field `handlers.journal.SYSLOG_IDENTIFIER` will be added to
+          each log config, i.e.
+          * `synapse` for `matrix-synapse.service`
+          * `synapse-<worker name>` for `matrix-synapse-worker-<worker name>.service`
+
+          This is only done if this option has a `handlers.journal` field declared.
+
+          To discard all settings declared by this option for each worker and synapse,
+          `lib.mkForce` can be used.
+
+          To discard all settings declared by this option for a single worker or synapse only,
+          [](#opt-services.matrix-synapse.workers._name_.worker_log_config) or
+          [](#opt-services.matrix-synapse.settings.log_config) can be used.
+        '';
+      };
+
+      settings = mkOption {
+        default = { };
+        description = mdDoc ''
+          The primary synapse configuration. See the
+          [sample configuration](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_config.yaml)
+          for possible values.
+
+          Secrets should be passed in by using the `extraConfigFiles` option.
+        '';
+        type = with types; submodule {
+          freeformType = format.type;
+          options = {
+            # This is a reduced set of popular options and defaults
+            # Do not add every available option here, they can be specified
+            # by the user at their own discretion. This is a freeform type!
+
+            server_name = mkOption {
+              type = types.str;
+              example = "example.com";
+              default = config.networking.hostName;
+              defaultText = literalExpression "config.networking.hostName";
+              description = lib.mdDoc ''
+                The domain name of the server, with optional explicit port.
+                This is used by remote servers to look up the server address.
+                This is also the last part of your UserID.
+
+                The server_name cannot be changed later so it is important to configure this correctly before you start Synapse.
+              '';
+            };
+
+            enable_registration = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Enable registration for new users.
+              '';
+            };
+
+            registration_shared_secret = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = mdDoc ''
+                If set, allows registration by anyone who also has the shared
+                secret, even if registration is otherwise disabled.
+
+                Secrets should be passed in via `extraConfigFiles`!
+              '';
+            };
+
+            macaroon_secret_key = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = mdDoc ''
+                Secret key for authentication tokens. If none is specified,
+                the registration_shared_secret is used, if one is given; otherwise,
+                a secret key is derived from the signing key.
+
+                Secrets should be passed in via `extraConfigFiles`!
+              '';
+            };
+
+            enable_metrics = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Enable collection and rendering of performance metrics
+              '';
+            };
+
+            report_stats = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether or not to report anonymized homeserver usage statistics.
+              '';
+            };
+
+            signing_key_path = mkOption {
+              type = types.path;
+              default = "${cfg.dataDir}/homeserver.signing.key";
+              description = lib.mdDoc ''
+                Path to the signing key to sign messages with.
+              '';
+            };
+
+            pid_file = mkOption {
+              type = types.path;
+              default = "/run/matrix-synapse.pid";
+              readOnly = true;
+              description = lib.mdDoc ''
+                The file to store the PID in.
+              '';
+            };
+
+            log_config = mkOption {
+              type = types.path;
+              default = genLogConfigFile "synapse";
+              defaultText = logConfigText "synapse";
+              description = lib.mdDoc ''
+                The file that holds the logging configuration.
+              '';
+            };
+
+            media_store_path = mkOption {
+              type = types.path;
+              default = if lib.versionAtLeast config.system.stateVersion "22.05"
+                then "${cfg.dataDir}/media_store"
+                else "${cfg.dataDir}/media";
+              defaultText = "${cfg.dataDir}/media_store for when system.stateVersion is at least 22.05, ${cfg.dataDir}/media when lower than 22.05";
+              description = lib.mdDoc ''
+                Directory where uploaded images and attachments are stored.
+              '';
+            };
+
+            public_baseurl = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "https://example.com:8448/";
+              description = lib.mdDoc ''
+                The public-facing base URL for the client API (not including _matrix/...)
+              '';
+            };
+
+            tls_certificate_path = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "/var/lib/acme/example.com/fullchain.pem";
+              description = lib.mdDoc ''
+                PEM encoded X509 certificate for TLS.
+                You can replace the self-signed certificate that synapse
+                autogenerates on launch with your own SSL certificate + key pair
+                if you like.  Any required intermediary certificates can be
+                appended after the primary certificate in hierarchical order.
+              '';
+            };
+
+            tls_private_key_path = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "/var/lib/acme/example.com/key.pem";
+              description = lib.mdDoc ''
+                PEM encoded private key for TLS. Specify null if synapse is not
+                speaking TLS directly.
+              '';
+            };
+
+            presence.enabled = mkOption {
+              type = types.bool;
+              default = true;
+              example = false;
+              description = lib.mdDoc ''
+                Whether to enable presence tracking.
+
+                Presence tracking allows users to see the state (e.g online/offline)
+                of other local and remote users.
+              '';
+            };
+
+            listeners = mkOption {
+              type = types.listOf (listenerType false);
+              default = [{
+                port = 8008;
+                bind_addresses = [ "127.0.0.1" ];
+                type = "http";
+                tls = false;
+                x_forwarded = true;
+                resources = [{
+                  names = [ "client" ];
+                  compress = true;
+                } {
+                  names = [ "federation" ];
+                  compress = false;
+                }];
+              }] ++ lib.optional hasWorkers {
+                port = 9093;
+                bind_addresses = [ "127.0.0.1" ];
+                type = "http";
+                tls = false;
+                x_forwarded = false;
+                resources = [{
+                  names = [ "replication" ];
+                  compress = false;
+                }];
+              };
+              description = lib.mdDoc ''
+                List of ports that Synapse should listen on, their purpose and their configuration.
+
+                By default, synapse will be configured for client and federation traffic on port 8008, and
+                for worker replication traffic on port 9093. See [`services.matrix-synapse.workers`](#opt-services.matrix-synapse.workers)
+                for more details.
+              '';
+            };
+
+            database.name = mkOption {
+              type = types.enum [
+                "sqlite3"
+                "psycopg2"
+              ];
+              default = if versionAtLeast config.system.stateVersion "18.03"
+                then "psycopg2"
+                else "sqlite3";
+              defaultText = literalExpression ''
+                if versionAtLeast config.system.stateVersion "18.03"
+                then "psycopg2"
+                else "sqlite3"
+              '';
+              description = lib.mdDoc ''
+                The database engine name. Can be sqlite3 or psycopg2.
+              '';
+            };
+
+            database.args.database = mkOption {
+              type = types.str;
+              default = {
+                sqlite3 = "${cfg.dataDir}/homeserver.db";
+                psycopg2 = "matrix-synapse";
+              }.${cfg.settings.database.name};
+              defaultText = literalExpression ''
+                {
+                  sqlite3 = "''${${options.services.matrix-synapse.dataDir}}/homeserver.db";
+                  psycopg2 = "matrix-synapse";
+                }.''${${options.services.matrix-synapse.settings}.database.name};
+              '';
+              description = lib.mdDoc ''
+                Name of the database when using the psycopg2 backend,
+                path to the database location when using sqlite3.
+              '';
+            };
+
+            database.args.user = mkOption {
+              type = types.nullOr types.str;
+              default = {
+                sqlite3 = null;
+                psycopg2 = "matrix-synapse";
+              }.${cfg.settings.database.name};
+              defaultText = lib.literalExpression ''
+                {
+                  sqlite3 = null;
+                  psycopg2 = "matrix-synapse";
+                }.''${cfg.settings.database.name};
+              '';
+              description = lib.mdDoc ''
+                Username to connect with psycopg2, set to null
+                when using sqlite3.
+              '';
+            };
+
+            url_preview_enabled = mkOption {
+              type = types.bool;
+              default = true;
+              example = false;
+              description = lib.mdDoc ''
+                Is the preview URL API enabled?  If enabled, you *must* specify an
+                explicit url_preview_ip_range_blacklist of IPs that the spider is
+                denied from accessing.
+              '';
+            };
+
+            url_preview_ip_range_blacklist = mkOption {
+              type = types.listOf types.str;
+              default = [
+                "10.0.0.0/8"
+                "100.64.0.0/10"
+                "127.0.0.0/8"
+                "169.254.0.0/16"
+                "172.16.0.0/12"
+                "192.0.0.0/24"
+                "192.0.2.0/24"
+                "192.168.0.0/16"
+                "192.88.99.0/24"
+                "198.18.0.0/15"
+                "198.51.100.0/24"
+                "2001:db8::/32"
+                "203.0.113.0/24"
+                "224.0.0.0/4"
+                "::1/128"
+                "fc00::/7"
+                "fe80::/10"
+                "fec0::/10"
+                "ff00::/8"
+              ];
+              description = lib.mdDoc ''
+                List of IP address CIDR ranges that the URL preview spider is denied
+                from accessing.
+              '';
+            };
+
+            url_preview_ip_range_whitelist = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              description = lib.mdDoc ''
+                List of IP address CIDR ranges that the URL preview spider is allowed
+                to access even if they are specified in url_preview_ip_range_blacklist.
+              '';
+            };
+
+            url_preview_url_blacklist = mkOption {
+              # FIXME revert to just `listOf (attrsOf str)` after some time(tm).
+              type = types.listOf (
+                types.coercedTo
+                  types.str
+                  (const (throw ''
+                    Setting `config.services.matrix-synapse.settings.url_preview_url_blacklist`
+                    to a list of strings has never worked. Due to a bug, this was the type accepted
+                    by the module, but in practice it broke on runtime and as a result, no URL
+                    preview worked anywhere if this was set.
+
+                    See https://matrix-org.github.io/synapse/latest/usage/configuration/config_documentation.html#url_preview_url_blacklist
+                    on how to configure it properly.
+                  ''))
+                  (types.attrsOf types.str));
+              default = [ ];
+              example = literalExpression ''
+                [
+                  { scheme = "http"; } # no http previews
+                  { netloc = "www.acme.com"; path = "/foo"; } # block http(s)://www.acme.com/foo
+                ]
+              '';
+              description = lib.mdDoc ''
+                Optional list of URL matches that the URL preview spider is
+                denied from accessing.
+              '';
+            };
+
+            max_upload_size = mkOption {
+              type = types.str;
+              default = "50M";
+              example = "100M";
+              description = lib.mdDoc ''
+                The largest allowed upload size in bytes
+              '';
+            };
+
+            max_image_pixels = mkOption {
+              type = types.str;
+              default = "32M";
+              example = "64M";
+              description = lib.mdDoc ''
+                Maximum number of pixels that will be thumbnailed
+              '';
+            };
+
+            dynamic_thumbnails = mkOption {
+              type = types.bool;
+              default = false;
+              example = true;
+              description = lib.mdDoc ''
+                Whether to generate new thumbnails on the fly to precisely match
+                the resolution requested by the client. If true then whenever
+                a new resolution is requested by the client the server will
+                generate a new thumbnail. If false the server will pick a thumbnail
+                from a precalculated list.
+              '';
+            };
+
+            turn_uris = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              example = [
+                "turn:turn.example.com:3487?transport=udp"
+                "turn:turn.example.com:3487?transport=tcp"
+                "turns:turn.example.com:5349?transport=udp"
+                "turns:turn.example.com:5349?transport=tcp"
+              ];
+              description = lib.mdDoc ''
+                The public URIs of the TURN server to give to clients
+              '';
+            };
+            turn_shared_secret = mkOption {
+              type = types.str;
+              default = "";
+              example = literalExpression ''
+                config.services.coturn.static-auth-secret
+              '';
+              description = mdDoc ''
+                The shared secret used to compute passwords for the TURN server.
+
+                Secrets should be passed in via `extraConfigFiles`!
+              '';
+            };
+
+            trusted_key_servers = mkOption {
+              type = types.listOf (types.submodule {
+                freeformType = format.type;
+                options = {
+                  server_name = mkOption {
+                    type = types.str;
+                    example = "matrix.org";
+                    description = lib.mdDoc ''
+                      Hostname of the trusted server.
+                    '';
+                  };
+                };
+              });
+              default = [{
+                server_name = "matrix.org";
+                verify_keys = {
+                  "ed25519:auto" = "Noi6WqcDj0QmPxCNQqgezwTlBKrfqehY1u2FyWP9uYw";
+                };
+              }];
+              description = lib.mdDoc ''
+                The trusted servers to download signing keys from.
+              '';
+            };
+
+            app_service_config_files = mkOption {
+              type = types.listOf types.path;
+              default = [ ];
+              description = lib.mdDoc ''
+                A list of application service config file to use
+              '';
+            };
+
+            redis = lib.mkOption {
+              type = types.submodule {
+                freeformType = format.type;
+                options = {
+                  enabled = lib.mkOption {
+                    type = types.bool;
+                    default = false;
+                    description = lib.mdDoc ''
+                      Whether to use redis support
+                    '';
+                  };
+                };
+              };
+              default = { };
+              description = lib.mdDoc ''
+                Redis configuration for synapse.
+
+                See the
+                [upstream documentation](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/usage/configuration/config_documentation.md#redis)
+                for available options.
+              '';
+            };
+          };
+        };
+      };
+
+      workers = lib.mkOption {
+        default = { };
+        description = lib.mdDoc ''
+          Options for configuring workers. Worker support will be enabled if at least one worker is configured here.
+
+          See the [worker documention](https://matrix-org.github.io/synapse/latest/workers.html#worker-configuration)
+          for possible options for each worker. Worker-specific options overriding the shared homeserver configuration can be
+          specified here for each worker.
+
+          ::: {.note}
+            Worker support will add a replication listener on port 9093 to the main synapse process using the default
+            value of [`services.matrix-synapse.settings.listeners`](#opt-services.matrix-synapse.settings.listeners) and configure that
+            listener as `services.matrix-synapse.settings.instance_map.main`.
+            If you set either of those options, make sure to configure a replication listener yourself.
+
+            A redis server is required for running workers. A local one can be enabled
+            using [`services.matrix-synapse.configureRedisLocally`](#opt-services.matrix-synapse.configureRedisLocally).
+
+            Workers also require a proper reverse proxy setup to direct incoming requests to the appropriate process. See
+            the [reverse proxy documentation](https://matrix-org.github.io/synapse/latest/reverse_proxy.html) for a
+            general reverse proxying setup and
+            the [worker documentation](https://matrix-org.github.io/synapse/latest/workers.html#available-worker-applications)
+            for the available endpoints per worker application.
+          :::
+        '';
+        type = types.attrsOf (types.submodule ({name, ...}: {
+          freeformType = format.type;
+          options = {
+            worker_app = lib.mkOption {
+              type = types.enum [
+                "synapse.app.generic_worker"
+                "synapse.app.media_repository"
+              ];
+              description = "Type of this worker";
+              default = "synapse.app.generic_worker";
+            };
+            worker_listeners = lib.mkOption {
+              default = [ ];
+              type = types.listOf (listenerType true);
+              description = lib.mdDoc ''
+                List of ports that this worker should listen on, their purpose and their configuration.
+              '';
+            };
+            worker_log_config = lib.mkOption {
+              type = types.path;
+              default = genLogConfigFile "synapse-${name}";
+              defaultText = logConfigText "synapse-${name}";
+              description = lib.mdDoc ''
+                The file for log configuration.
+
+                See the [python documentation](https://docs.python.org/3/library/logging.config.html#configuration-dictionary-schema)
+                for the schema and the [upstream repository](https://github.com/matrix-org/synapse/blob/v${pkgs.matrix-synapse-unwrapped.version}/docs/sample_log_config.yaml)
+                for an example.
+              '';
+            };
+          };
+        }));
+        default = { };
+        example = lib.literalExpression ''
+          {
+            "federation_sender" = { };
+            "federation_receiver" = {
+              worker_listeners = [
+                {
+                  type = "http";
+                  port = 8009;
+                  bind_addresses = [ "127.0.0.1" ];
+                  tls = false;
+                  x_forwarded = true;
+                  resources = [{
+                    names = [ "federation" ];
+                  }];
+                }
+              ];
+            };
+          }
+        '';
+      };
+
+      extraConfigFiles = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        description = lib.mdDoc ''
+          Extra config files to include.
+
+          The configuration files will be included based on the command line
+          argument --config-path. This allows to configure secrets without
+          having to go through the Nix store, e.g. based on deployment keys if
+          NixOps is in use.
+        '';
+      };
+
+      configureRedisLocally = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically configure a local redis server for matrix-synapse.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = clientListener != null;
+        message = ''
+          At least one listener which serves the `client` resource via HTTP is required
+          by synapse in `services.matrix-synapse.settings.listeners` or in one of the workers!
+        '';
+      }
+      {
+        assertion = hasWorkers -> cfg.settings.redis.enabled;
+        message = ''
+          Workers for matrix-synapse require configuring a redis instance. This can be done
+          automatically by setting `services.matrix-synapse.configureRedisLocally = true`.
+        '';
+      }
+      {
+        assertion =
+          let
+            main = cfg.settings.instance_map.main;
+            listener = lib.findFirst
+              (
+                listener:
+                  listener.port == main.port
+                  && listenerSupportsResource "replication" listener
+                  && (lib.any (bind: bind == main.host || bind == "0.0.0.0" || bind == "::") listener.bind_addresses)
+              )
+              null
+              cfg.settings.listeners;
+          in
+          hasWorkers -> (cfg.settings.instance_map ? main && listener != null);
+        message = ''
+          Workers for matrix-synapse require setting `services.matrix-synapse.settings.instance_map.main`
+          to any listener configured in `services.matrix-synapse.settings.listeners` with a `"replication"`
+          resource.
+
+          This is done by default unless you manually configure either of those settings.
+        '';
+      }
+    ];
+
+    services.matrix-synapse.settings.redis = lib.mkIf cfg.configureRedisLocally {
+      enabled = true;
+      path = config.services.redis.servers.matrix-synapse.unixSocket;
+    };
+    services.matrix-synapse.settings.instance_map.main = lib.mkIf hasWorkers (lib.mkDefault {
+      host = "127.0.0.1";
+      port = 9093;
+    });
+
+    services.matrix-synapse.serviceUnit = if hasWorkers then "matrix-synapse.target" else "matrix-synapse.service";
+    services.matrix-synapse.configFile = configFile;
+    services.matrix-synapse.package = wrapped;
+
+    # default them, so they are additive
+    services.matrix-synapse.extras = defaultExtras;
+
+    services.matrix-synapse.log = mapAttrsRecursive (const mkDefault) defaultCommonLogConfig;
+
+    users.users.matrix-synapse = {
+      group = "matrix-synapse";
+      home = cfg.dataDir;
+      createHome = true;
+      shell = "${pkgs.bash}/bin/bash";
+      uid = config.ids.uids.matrix-synapse;
+    };
+
+    users.groups.matrix-synapse = {
+      gid = config.ids.gids.matrix-synapse;
+    };
+
+    systemd.targets.matrix-synapse = lib.mkIf hasWorkers {
+      description = "Synapse Matrix parent target";
+      after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services =
+      let
+        targetConfig =
+          if hasWorkers
+          then {
+            partOf = [ "matrix-synapse.target" ];
+            wantedBy = [ "matrix-synapse.target" ];
+            unitConfig.ReloadPropagatedFrom = "matrix-synapse.target";
+            requires = optional hasLocalPostgresDB "postgresql.service";
+          }
+          else {
+            after = [ "network-online.target" ] ++ optional hasLocalPostgresDB "postgresql.service";
+            requires = optional hasLocalPostgresDB "postgresql.service";
+            wantedBy = [ "multi-user.target" ];
+          };
+        baseServiceConfig = {
+          environment = optionalAttrs (cfg.withJemalloc) {
+            LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
+          };
+          serviceConfig = {
+            Type = "notify";
+            User = "matrix-synapse";
+            Group = "matrix-synapse";
+            WorkingDirectory = cfg.dataDir;
+            ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
+            Restart = "on-failure";
+            UMask = "0077";
+
+            # Security Hardening
+            # Refer to systemd.exec(5) for option descriptions.
+            CapabilityBoundingSet = [ "" ];
+            LockPersonality = true;
+            NoNewPrivileges = true;
+            PrivateDevices = true;
+            PrivateTmp = true;
+            PrivateUsers = true;
+            ProcSubset = "pid";
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHome = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectProc = "invisible";
+            ProtectSystem = "strict";
+            ReadWritePaths = [ cfg.dataDir cfg.settings.media_store_path ];
+            RemoveIPC = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+          };
+        }
+        // targetConfig;
+        genWorkerService = name: workerCfg:
+          let
+            finalWorkerCfg = workerCfg // { worker_name = name; };
+            workerConfigFile = format.generate "worker-${name}.yaml" finalWorkerCfg;
+          in
+          {
+            name = "matrix-synapse-worker-${name}";
+            value = lib.mkMerge [
+              baseServiceConfig
+              {
+                description = "Synapse Matrix worker ${name}";
+                # make sure the main process starts first for potential database migrations
+                after = [ "matrix-synapse.service" ];
+                requires = [ "matrix-synapse.service" ];
+                serviceConfig = {
+                  ExecStart = ''
+                    ${cfg.package}/bin/synapse_worker \
+                      ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile workerConfigFile ] ++ cfg.extraConfigFiles) }
+                      --keys-directory ${cfg.dataDir}
+                  '';
+                };
+              }
+            ];
+          };
+      in
+      {
+        matrix-synapse = lib.mkMerge [
+          baseServiceConfig
+          {
+            description = "Synapse Matrix homeserver";
+            preStart = ''
+              ${cfg.package}/bin/synapse_homeserver \
+                --config-path ${configFile} \
+                --keys-directory ${cfg.dataDir} \
+                --generate-keys
+            '';
+            serviceConfig = {
+              ExecStartPre = [
+                ("+" + (pkgs.writeShellScript "matrix-synapse-fix-permissions" ''
+                  chown matrix-synapse:matrix-synapse ${cfg.settings.signing_key_path}
+                  chmod 0600 ${cfg.settings.signing_key_path}
+                ''))
+              ];
+              ExecStart = ''
+                ${cfg.package}/bin/synapse_homeserver \
+                  ${ concatMapStringsSep "\n  " (x: "--config-path ${x} \\") ([ configFile ] ++ cfg.extraConfigFiles) }
+                  --keys-directory ${cfg.dataDir}
+              '';
+            };
+          }
+        ];
+      }
+      // (lib.mapAttrs' genWorkerService cfg.workers);
+
+    services.redis.servers.matrix-synapse = lib.mkIf cfg.configureRedisLocally {
+      enable = true;
+      user = "matrix-synapse";
+    };
+
+    environment.systemPackages = [ registerNewMatrixUser ];
+  };
+
+  meta = {
+    buildDocsInSandbox = false;
+    doc = ./synapse.md;
+    maintainers = teams.matrix.members;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/airsonic.nix b/nixpkgs/nixos/modules/services/misc/airsonic.nix
new file mode 100644
index 000000000000..b8e9dcaf4663
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/airsonic.nix
@@ -0,0 +1,179 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.airsonic;
+  opt = options.services.airsonic;
+in {
+  options = {
+
+    services.airsonic = {
+      enable = mkEnableOption (lib.mdDoc "Airsonic, the Free and Open Source media streaming server (fork of Subsonic and Libresonic)");
+
+      user = mkOption {
+        type = types.str;
+        default = "airsonic";
+        description = lib.mdDoc "User account under which airsonic runs.";
+      };
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/airsonic";
+        description = lib.mdDoc ''
+          The directory where Airsonic will create files.
+          Make sure it is writable.
+        '';
+      };
+
+      virtualHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The host name or IP address on which to bind Airsonic.
+          The default value is appropriate for first launch, when the
+          default credentials are easy to guess. It is also appropriate
+          if you intend to use the virtualhost option in the service
+          module. In other cases, you may want to change this to a
+          specific IP or 0.0.0.0 to listen on all interfaces.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 4040;
+        description = lib.mdDoc ''
+          The port on which Airsonic will listen for
+          incoming HTTP traffic. Set to 0 to disable.
+        '';
+      };
+
+      contextPath = mkOption {
+        type = types.path;
+        default = "/";
+        description = lib.mdDoc ''
+          The context path, i.e., the last part of the Airsonic
+          URL. Typically '/' or '/airsonic'. Default '/'
+        '';
+      };
+
+      maxMemory = mkOption {
+        type = types.int;
+        default = 100;
+        description = lib.mdDoc ''
+          The memory limit (max Java heap size) in megabytes.
+          Default: 100
+        '';
+      };
+
+      transcoders = mkOption {
+        type = types.listOf types.path;
+        default = [ "${pkgs.ffmpeg.bin}/bin/ffmpeg" ];
+        defaultText = literalExpression ''[ "''${pkgs.ffmpeg.bin}/bin/ffmpeg" ]'';
+        description = lib.mdDoc ''
+          List of paths to transcoder executables that should be accessible
+          from Airsonic. Symlinks will be created to each executable inside
+          ''${config.${opt.home}}/transcoders.
+        '';
+      };
+
+      jre = mkOption {
+        type = types.package;
+        default = pkgs.jre8;
+        defaultText = literalExpression "pkgs.jre8";
+        description = lib.mdDoc ''
+          JRE package to use.
+
+          Airsonic only supports Java 8, airsonic-advanced requires at least
+          Java 11.
+        '';
+      };
+
+      war = mkOption {
+        type = types.path;
+        default = "${pkgs.airsonic}/webapps/airsonic.war";
+        defaultText = literalExpression ''"''${pkgs.airsonic}/webapps/airsonic.war"'';
+        description = lib.mdDoc "Airsonic war file to use.";
+      };
+
+      jvmOptions = mkOption {
+        description = lib.mdDoc ''
+          Extra command line options for the JVM running AirSonic.
+          Useful for sending jukebox output to non-default alsa
+          devices.
+        '';
+        default = [
+        ];
+        type = types.listOf types.str;
+        example = [
+          "-Djavax.sound.sampled.Clip='#CODEC [plughw:1,0]'"
+          "-Djavax.sound.sampled.Port='#Port CODEC [hw:1]'"
+          "-Djavax.sound.sampled.SourceDataLine='#CODEC [plughw:1,0]'"
+          "-Djavax.sound.sampled.TargetDataLine='#CODEC [plughw:1,0]'"
+        ];
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.airsonic = {
+      description = "Airsonic Media Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Install transcoders.
+        rm -rf ${cfg.home}/transcode
+        mkdir -p ${cfg.home}/transcode
+        for exe in ${toString cfg.transcoders}; do
+          ln -sf "$exe" ${cfg.home}/transcode
+        done
+      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.jre}/bin/java -Xmx${toString cfg.maxMemory}m \
+          -Dairsonic.home=${cfg.home} \
+          -Dserver.address=${cfg.listenAddress} \
+          -Dserver.port=${toString cfg.port} \
+          -Dairsonic.contextPath=${cfg.contextPath} \
+          -Djava.awt.headless=true \
+          ${optionalString (cfg.virtualHost != null)
+            "-Dserver.use-forward-headers=true"} \
+          ${toString cfg.jvmOptions} \
+          -verbose:gc \
+          -jar ${cfg.war}
+        '';
+        Restart = "always";
+        User = "airsonic";
+        UMask = "0022";
+      };
+    };
+
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      recommendedProxySettings = true;
+      virtualHosts.${cfg.virtualHost} = {
+        locations.${cfg.contextPath}.proxyPass = "http://${cfg.listenAddress}:${toString cfg.port}";
+      };
+    };
+
+    users.users.airsonic = {
+      description = "Airsonic service user";
+      group = "airsonic";
+      name = cfg.user;
+      home = cfg.home;
+      createHome = true;
+      isSystemUser = true;
+    };
+    users.groups.airsonic = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/amazon-ssm-agent.nix b/nixpkgs/nixos/modules/services/misc/amazon-ssm-agent.nix
new file mode 100644
index 000000000000..02e44c73d87a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/amazon-ssm-agent.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.amazon-ssm-agent;
+
+  # The SSM agent doesn't pay attention to our /etc/os-release yet, and the lsb-release tool
+  # in nixpkgs doesn't seem to work properly on NixOS, so let's just fake the two fields SSM
+  # looks for. See https://github.com/aws/amazon-ssm-agent/issues/38 for upstream fix.
+  fake-lsb-release = pkgs.writeScriptBin "lsb_release" ''
+    #!${pkgs.runtimeShell}
+
+    case "$1" in
+      -i) echo "nixos";;
+      -r) echo "${config.system.nixos.version}";;
+    esac
+  '';
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "ssm-agent" "enable" ] [ "services" "amazon-ssm-agent" "enable" ])
+    (mkRenamedOptionModule [ "services" "ssm-agent" "package" ] [ "services" "amazon-ssm-agent" "package" ])
+  ];
+
+  options.services.amazon-ssm-agent = {
+    enable = mkEnableOption (lib.mdDoc "Amazon SSM agent");
+
+    package = mkOption {
+      type = types.path;
+      description = lib.mdDoc "The Amazon SSM agent package to use";
+      default = pkgs.amazon-ssm-agent.override { overrideEtc = false; };
+      defaultText = literalExpression "pkgs.amazon-ssm-agent.override { overrideEtc = false; }";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # See https://github.com/aws/amazon-ssm-agent/blob/mainline/packaging/linux/amazon-ssm-agent.service
+    systemd.services.amazon-ssm-agent = {
+      inherit (cfg.package.meta) description;
+      after    = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ fake-lsb-release pkgs.coreutils ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/amazon-ssm-agent";
+        KillMode = "process";
+        # We want this restating pretty frequently. It could be our only means
+        # of accessing the instance.
+        Restart = "always";
+        RestartPreventExitStatus = 194;
+        RestartSec = "90";
+      };
+    };
+
+    # Add user that Session Manager needs, and give it sudo.
+    # This is consistent with Amazon Linux 2 images.
+    security.sudo.extraRules = [
+      {
+        users = [ "ssm-user" ];
+        commands = [
+          {
+            command = "ALL";
+            options = [ "NOPASSWD" ];
+          }
+        ];
+      }
+    ];
+    # On Amazon Linux 2 images, the ssm-user user is pretty much a
+    # normal user with its own group. We do the same.
+    users.groups.ssm-user = {};
+    users.users.ssm-user = {
+      isNormalUser = true;
+      group = "ssm-user";
+    };
+
+    environment.etc."amazon/ssm/seelog.xml".source = "${cfg.package}/etc/amazon/ssm/seelog.xml.template";
+
+    environment.etc."amazon/ssm/amazon-ssm-agent.json".source =  "${cfg.package}/etc/amazon/ssm/amazon-ssm-agent.json.template";
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ananicy.nix b/nixpkgs/nixos/modules/services/misc/ananicy.nix
new file mode 100644
index 000000000000..bc1b28efc0ba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ananicy.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ananicy;
+  configFile = pkgs.writeText "ananicy.conf" (generators.toKeyValue { } cfg.settings);
+  extraRules = pkgs.writeText "extraRules" (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.extraRules);
+  extraTypes = pkgs.writeText "extraTypes" (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.extraTypes);
+  extraCgroups = pkgs.writeText "extraCgroups" (concatMapStringsSep "\n" (l: builtins.toJSON l) cfg.extraCgroups);
+  servicename = if ((lib.getName cfg.package) == (lib.getName pkgs.ananicy-cpp)) then "ananicy-cpp" else "ananicy";
+in
+{
+  options = {
+    services.ananicy = {
+      enable = mkEnableOption (lib.mdDoc "Ananicy, an auto nice daemon");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ananicy;
+        defaultText = literalExpression "pkgs.ananicy";
+        example = literalExpression "pkgs.ananicy-cpp";
+        description = lib.mdDoc ''
+          Which ananicy package to use.
+        '';
+      };
+
+      rulesProvider = mkOption {
+        type = types.package;
+        default = pkgs.ananicy;
+        defaultText = literalExpression "pkgs.ananicy";
+        example = literalExpression "pkgs.ananicy-cpp";
+        description = lib.mdDoc ''
+          Which package to copy default rules,types,cgroups from.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int bool str ]);
+        default = { };
+        example = {
+          apply_nice = false;
+        };
+        description = lib.mdDoc ''
+          See <https://github.com/Nefelim4ag/Ananicy/blob/master/ananicy.d/ananicy.conf>
+        '';
+      };
+
+      extraRules = mkOption {
+        type = with types; listOf attrs;
+        default = [ ];
+        description = lib.mdDoc ''
+          Rules to write in 'nixRules.rules'. See:
+          <https://github.com/Nefelim4ag/Ananicy#configuration>
+          <https://gitlab.com/ananicy-cpp/ananicy-cpp/#global-configuration>
+        '';
+        example = [
+          { name = "eog"; type = "Image-Viewer"; }
+          { name = "fdupes"; type = "BG_CPUIO"; }
+        ];
+      };
+      extraTypes = mkOption {
+        type = with types; listOf attrs;
+        default = [ ];
+        description = lib.mdDoc ''
+          Types to write in 'nixTypes.types'. See:
+          <https://gitlab.com/ananicy-cpp/ananicy-cpp/#types>
+        '';
+        example = [
+          { type = "my_type"; nice = 19; other_parameter = "value"; }
+          { type = "compiler"; nice = 19; sched = "batch"; ioclass = "idle"; }
+        ];
+      };
+      extraCgroups = mkOption {
+        type = with types; listOf attrs;
+        default = [ ];
+        description = lib.mdDoc ''
+          Cgroups to write in 'nixCgroups.cgroups'. See:
+          <https://gitlab.com/ananicy-cpp/ananicy-cpp/#cgroups>
+        '';
+        example = [
+          { cgroup = "cpu80"; CPUQuota = 80; }
+        ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      systemPackages = [ cfg.package ];
+      etc."ananicy.d".source = pkgs.runCommandLocal "ananicyfiles" { } ''
+        mkdir -p $out
+        # ananicy-cpp does not include rules or settings on purpose
+        if [[ -d "${cfg.rulesProvider}/etc/ananicy.d/00-default" ]]; then
+          cp -r ${cfg.rulesProvider}/etc/ananicy.d/* $out
+        else
+          cp -r ${cfg.rulesProvider}/* $out
+        fi
+
+        # configured through .setings
+        rm -f $out/ananicy.conf
+        cp ${configFile} $out/ananicy.conf
+        ${optionalString (cfg.extraRules != [ ]) "cp ${extraRules} $out/nixRules.rules"}
+        ${optionalString (cfg.extraTypes != [ ]) "cp ${extraTypes} $out/nixTypes.types"}
+        ${optionalString (cfg.extraCgroups != [ ]) "cp ${extraCgroups} $out/nixCgroups.cgroups"}
+      '';
+    };
+
+    # ananicy and ananicy-cpp have different default settings
+    services.ananicy.settings =
+      let
+        mkOD = mkOptionDefault;
+      in
+      {
+        cgroup_load = mkOD true;
+        type_load = mkOD true;
+        rule_load = mkOD true;
+        apply_nice = mkOD true;
+        apply_ioclass = mkOD true;
+        apply_ionice = mkOD true;
+        apply_sched = mkOD true;
+        apply_oom_score_adj = mkOD true;
+        apply_cgroup = mkOD true;
+      } // (if ((lib.getName cfg.package) == (lib.getName pkgs.ananicy-cpp)) then {
+        # https://gitlab.com/ananicy-cpp/ananicy-cpp/-/blob/master/src/config.cpp#L12
+        loglevel = mkOD "warn"; # default is info but its spammy
+        cgroup_realtime_workaround = mkOD config.systemd.enableUnifiedCgroupHierarchy;
+        log_applied_rule = mkOD false;
+      } else {
+        # https://github.com/Nefelim4ag/Ananicy/blob/master/ananicy.d/ananicy.conf
+        check_disks_schedulers = mkOD true;
+        check_freq = mkOD 5;
+      });
+
+    systemd = {
+      # https://gitlab.com/ananicy-cpp/ananicy-cpp/#cgroups applies to both ananicy and -cpp
+      enableUnifiedCgroupHierarchy = mkDefault false;
+      packages = [ cfg.package ];
+      services."${servicename}" = {
+        wantedBy = [ "default.target" ];
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ artturin ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ankisyncd.nix b/nixpkgs/nixos/modules/services/misc/ankisyncd.nix
new file mode 100644
index 000000000000..7be8dc7dab8f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ankisyncd.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ankisyncd;
+
+  name = "ankisyncd";
+
+  stateDir = "/var/lib/${name}";
+
+  toml = pkgs.formats.toml {};
+
+  configFile = toml.generate "ankisyncd.conf" {
+    listen = {
+      host = cfg.host;
+      port = cfg.port;
+    };
+    paths.root_dir = stateDir;
+    # encryption.ssl_enable / cert_file / key_file
+  };
+in
+  {
+    options.services.ankisyncd = {
+      enable = mkEnableOption (lib.mdDoc "ankisyncd");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ankisyncd;
+        defaultText = literalExpression "pkgs.ankisyncd";
+        description = lib.mdDoc "The package to use for the ankisyncd command.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "ankisyncd host";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 27701;
+        description = lib.mdDoc "ankisyncd port";
+      };
+
+      openFirewall = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to open the firewall for the specified port.";
+      };
+    };
+
+    config = mkIf cfg.enable {
+      networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+      systemd.services.ankisyncd = {
+        description = "ankisyncd - Anki sync server";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        path = [ cfg.package ];
+
+        serviceConfig = {
+          Type = "simple";
+          DynamicUser = true;
+          StateDirectory = name;
+          ExecStart = "${cfg.package}/bin/ankisyncd --config ${configFile}";
+          Restart = "always";
+        };
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/misc/apache-kafka.nix b/nixpkgs/nixos/modules/services/misc/apache-kafka.nix
new file mode 100644
index 000000000000..598907aaf1c6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/apache-kafka.nix
@@ -0,0 +1,151 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.apache-kafka;
+
+  serverProperties =
+    if cfg.serverProperties != null then
+      cfg.serverProperties
+    else
+      ''
+        # Generated by nixos
+        broker.id=${toString cfg.brokerId}
+        port=${toString cfg.port}
+        host.name=${cfg.hostname}
+        log.dirs=${concatStringsSep "," cfg.logDirs}
+        zookeeper.connect=${cfg.zookeeper}
+        ${toString cfg.extraProperties}
+      '';
+
+  serverConfig = pkgs.writeText "server.properties" serverProperties;
+  logConfig = pkgs.writeText "log4j.properties" cfg.log4jProperties;
+
+in {
+
+  options.services.apache-kafka = {
+    enable = mkOption {
+      description = lib.mdDoc "Whether to enable Apache Kafka.";
+      default = false;
+      type = types.bool;
+    };
+
+    brokerId = mkOption {
+      description = lib.mdDoc "Broker ID.";
+      default = -1;
+      type = types.int;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Port number the broker should listen on.";
+      default = 9092;
+      type = types.port;
+    };
+
+    hostname = mkOption {
+      description = lib.mdDoc "Hostname the broker should bind to.";
+      default = "localhost";
+      type = types.str;
+    };
+
+    logDirs = mkOption {
+      description = lib.mdDoc "Log file directories";
+      default = [ "/tmp/kafka-logs" ];
+      type = types.listOf types.path;
+    };
+
+    zookeeper = mkOption {
+      description = lib.mdDoc "Zookeeper connection string";
+      default = "localhost:2181";
+      type = types.str;
+    };
+
+    extraProperties = mkOption {
+      description = lib.mdDoc "Extra properties for server.properties.";
+      type = types.nullOr types.lines;
+      default = null;
+    };
+
+    serverProperties = mkOption {
+      description = lib.mdDoc ''
+        Complete server.properties content. Other server.properties config
+        options will be ignored if this option is used.
+      '';
+      type = types.nullOr types.lines;
+      default = null;
+    };
+
+    log4jProperties = mkOption {
+      description = lib.mdDoc "Kafka log4j property configuration.";
+      default = ''
+        log4j.rootLogger=INFO, stdout
+
+        log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+        log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+        log4j.appender.stdout.layout.ConversionPattern=[%d] %p %m (%c)%n
+      '';
+      type = types.lines;
+    };
+
+    jvmOptions = mkOption {
+      description = lib.mdDoc "Extra command line options for the JVM running Kafka.";
+      default = [];
+      type = types.listOf types.str;
+      example = [
+        "-Djava.net.preferIPv4Stack=true"
+        "-Dcom.sun.management.jmxremote"
+        "-Dcom.sun.management.jmxremote.local.only=true"
+      ];
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "The kafka package to use";
+      default = pkgs.apacheKafka;
+      defaultText = literalExpression "pkgs.apacheKafka";
+      type = types.package;
+    };
+
+    jre = mkOption {
+      description = lib.mdDoc "The JRE with which to run Kafka";
+      default = cfg.package.passthru.jre;
+      defaultText = literalExpression "pkgs.apacheKafka.passthru.jre";
+      type = types.package;
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [cfg.package];
+
+    users.users.apache-kafka = {
+      isSystemUser = true;
+      group = "apache-kafka";
+      description = "Apache Kafka daemon user";
+      home = head cfg.logDirs;
+    };
+    users.groups.apache-kafka = {};
+
+    systemd.tmpfiles.rules = map (logDir: "d '${logDir}' 0700 apache-kafka - - -") cfg.logDirs;
+
+    systemd.services.apache-kafka = {
+      description = "Apache Kafka Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.jre}/bin/java \
+            -cp "${cfg.package}/libs/*" \
+            -Dlog4j.configuration=file:${logConfig} \
+            ${toString cfg.jvmOptions} \
+            kafka.Kafka \
+            ${serverConfig}
+        '';
+        User = "apache-kafka";
+        SuccessExitStatus = "0 143";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/atuin.nix b/nixpkgs/nixos/modules/services/misc/atuin.nix
new file mode 100644
index 000000000000..2d6ffc510ce5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/atuin.nix
@@ -0,0 +1,143 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib) mkOption types mdDoc mkIf;
+  cfg = config.services.atuin;
+in
+{
+  options = {
+    services.atuin = {
+      enable = lib.mkEnableOption (mdDoc "Atuin server for shell history sync");
+
+      openRegistration = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc "Allow new user registrations with the atuin server.";
+      };
+
+      path = mkOption {
+        type = types.str;
+        default = "";
+        description = mdDoc "A path to prepend to all the routes of the server.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = mdDoc "The host address the atuin server should listen on.";
+      };
+
+      maxHistoryLength = mkOption {
+        type = types.int;
+        default = 8192;
+        description = mdDoc "The max length of each history item the atuin server should store.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8888;
+        description = mdDoc "The port the atuin server should listen on.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc "Open ports in the firewall for the atuin server.";
+      };
+
+      database = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = mdDoc "Create the database and database user locally.";
+        };
+
+        uri = mkOption {
+          type = types.str;
+          default = "postgresql:///atuin?host=/run/postgresql";
+          example = "postgresql://atuin@localhost:5432/atuin";
+          description = mdDoc "URI to the database";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.database.createLocally -> config.services.postgresql.enable;
+        message = "Postgresql must be enabled to create a local database";
+      }
+    ];
+
+    services.postgresql = mkIf cfg.database.createLocally {
+      enable = true;
+      ensureUsers = [{
+        name = "atuin";
+        ensureDBOwnership = true;
+      }];
+      ensureDatabases = [ "atuin" ];
+    };
+
+    systemd.services.atuin = {
+      description = "atuin server";
+      requires = lib.optionals cfg.database.createLocally [ "postgresql.service" ];
+      after = [ "network.target" ] ++ lib.optionals cfg.database.createLocally [ "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.atuin}/bin/atuin server start";
+        RuntimeDirectory = "atuin";
+        RuntimeDirectoryMode = "0700";
+        DynamicUser = true;
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "full";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          # Required for connecting to database sockets,
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+
+      environment = {
+        ATUIN_HOST = cfg.host;
+        ATUIN_PORT = toString cfg.port;
+        ATUIN_MAX_HISTORY_LENGTH = toString cfg.maxHistoryLength;
+        ATUIN_OPEN_REGISTRATION = lib.boolToString cfg.openRegistration;
+        ATUIN_DB_URI = cfg.database.uri;
+        ATUIN_PATH = cfg.path;
+        ATUIN_CONFIG_DIR = "/run/atuin"; # required to start, but not used as configuration is via environment variables
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/autofs.nix b/nixpkgs/nixos/modules/services/misc/autofs.nix
new file mode 100644
index 000000000000..723b67e8bb6b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/autofs.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.autofs;
+
+  autoMaster = pkgs.writeText "auto.master" cfg.autoMaster;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.autofs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Mount filesystems on demand. Unmount them automatically.
+          You may also be interested in afuse.
+        '';
+      };
+
+      autoMaster = mkOption {
+        type = types.str;
+        example = literalExpression ''
+          let
+            mapConf = pkgs.writeText "auto" '''
+             kernel    -ro,soft,intr       ftp.kernel.org:/pub/linux
+             boot      -fstype=ext2        :/dev/hda1
+             windoze   -fstype=smbfs       ://windoze/c
+             removable -fstype=ext2        :/dev/hdd
+             cd        -fstype=iso9660,ro  :/dev/hdc
+             floppy    -fstype=auto        :/dev/fd0
+             server    -rw,hard,intr       / -ro myserver.me.org:/ \
+                                           /usr myserver.me.org:/usr \
+                                           /home myserver.me.org:/home
+            ''';
+          in '''
+            /auto file:''${mapConf}
+          '''
+        '';
+        description = lib.mdDoc ''
+          Contents of `/etc/auto.master` file. See {command}`auto.master(5)` and {command}`autofs(5)`.
+        '';
+      };
+
+      timeout = mkOption {
+        type = types.int;
+        default = 600;
+        description = lib.mdDoc "Set the global minimum timeout, in seconds, until directories are unmounted";
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Pass -d and -7 to automount and write log to the system journal.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "autofs" ];
+
+    systemd.services.autofs =
+      { description = "Automounts filesystems on demand";
+        after = [ "network.target" "ypbind.service" "sssd.service" "network-online.target" ];
+        wants = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        preStart = ''
+          # There should be only one autofs service managed by systemd, so this should be safe.
+          rm -f /tmp/autofs-running
+        '';
+
+        serviceConfig = {
+          Type = "forking";
+          PIDFile = "/run/autofs.pid";
+          ExecStart = "${pkgs.autofs5}/bin/automount ${optionalString cfg.debug "-d"} -p /run/autofs.pid -t ${builtins.toString cfg.timeout} ${autoMaster}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/autorandr.nix b/nixpkgs/nixos/modules/services/misc/autorandr.nix
new file mode 100644
index 000000000000..aa96acb61306
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/autorandr.nix
@@ -0,0 +1,365 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.autorandr;
+  hookType = types.lines;
+
+  matrixOf = n: m: elemType:
+  mkOptionType rec {
+    name = "matrixOf";
+    description =
+      "${toString n}×${toString m} matrix of ${elemType.description}s";
+    check = xss:
+      let listOfSize = l: xs: isList xs && length xs == l;
+      in listOfSize n xss
+      && all (xs: listOfSize m xs && all elemType.check xs) xss;
+    merge = mergeOneOption;
+    getSubOptions = prefix: elemType.getSubOptions (prefix ++ [ "*" "*" ]);
+    getSubModules = elemType.getSubModules;
+    substSubModules = mod: matrixOf n m (elemType.substSubModules mod);
+    functor = (defaultFunctor name) // { wrapped = elemType; };
+  };
+
+  profileModule = types.submodule {
+    options = {
+      fingerprint = mkOption {
+        type = types.attrsOf types.str;
+        description = lib.mdDoc ''
+          Output name to EDID mapping.
+          Use `autorandr --fingerprint` to get current setup values.
+        '';
+        default = { };
+      };
+
+      config = mkOption {
+        type = types.attrsOf configModule;
+        description = lib.mdDoc "Per output profile configuration.";
+        default = { };
+      };
+
+      hooks = mkOption {
+        type = hooksModule;
+        description = lib.mdDoc "Profile hook scripts.";
+        default = { };
+      };
+    };
+  };
+
+  configModule = types.submodule {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable the output.";
+        default = true;
+      };
+
+      crtc = mkOption {
+        type = types.nullOr types.ints.unsigned;
+        description = lib.mdDoc "Output video display controller.";
+        default = null;
+        example = 0;
+      };
+
+      primary = mkOption {
+        type = types.bool;
+        description = lib.mdDoc "Whether output should be marked as primary";
+        default = false;
+      };
+
+      position = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Output position";
+        default = "";
+        example = "5760x0";
+      };
+
+      mode = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Output resolution.";
+        default = "";
+        example = "3840x2160";
+      };
+
+      rate = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Output framerate.";
+        default = "";
+        example = "60.00";
+      };
+
+      gamma = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Output gamma configuration.";
+        default = "";
+        example = "1.0:0.909:0.833";
+      };
+
+      rotate = mkOption {
+        type = types.nullOr (types.enum [ "normal" "left" "right" "inverted" ]);
+        description = lib.mdDoc "Output rotate configuration.";
+        default = null;
+        example = "left";
+      };
+
+      transform = mkOption {
+        type = types.nullOr (matrixOf 3 3 types.float);
+        default = null;
+        example = literalExpression ''
+          [
+            [ 0.6 0.0 0.0 ]
+            [ 0.0 0.6 0.0 ]
+            [ 0.0 0.0 1.0 ]
+          ]
+        '';
+        description = lib.mdDoc ''
+          Refer to
+          {manpage}`xrandr(1)`
+          for the documentation of the transform matrix.
+        '';
+      };
+
+      dpi = mkOption {
+        type = types.nullOr types.ints.positive;
+        description = lib.mdDoc "Output DPI configuration.";
+        default = null;
+        example = 96;
+      };
+
+      scale = mkOption {
+        type = types.nullOr (types.submodule {
+          options = {
+            method = mkOption {
+              type = types.enum [ "factor" "pixel" ];
+              description = lib.mdDoc "Output scaling method.";
+              default = "factor";
+              example = "pixel";
+            };
+
+            x = mkOption {
+              type = types.either types.float types.ints.positive;
+              description = lib.mdDoc "Horizontal scaling factor/pixels.";
+            };
+
+            y = mkOption {
+              type = types.either types.float types.ints.positive;
+              description = lib.mdDoc "Vertical scaling factor/pixels.";
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          Output scale configuration.
+
+          Either configure by pixels or a scaling factor. When using pixel method the
+          {manpage}`xrandr(1)`
+          option
+          `--scale-from`
+          will be used; when using factor method the option
+          `--scale`
+          will be used.
+
+          This option is a shortcut version of the transform option and they are mutually
+          exclusive.
+        '';
+        default = null;
+        example = literalExpression ''
+          {
+            x = 1.25;
+            y = 1.25;
+          }
+        '';
+      };
+    };
+  };
+
+  hooksModule = types.submodule {
+    options = {
+      postswitch = mkOption {
+        type = types.attrsOf hookType;
+        description = lib.mdDoc "Postswitch hook executed after mode switch.";
+        default = { };
+      };
+
+      preswitch = mkOption {
+        type = types.attrsOf hookType;
+        description = lib.mdDoc "Preswitch hook executed before mode switch.";
+        default = { };
+      };
+
+      predetect = mkOption {
+        type = types.attrsOf hookType;
+        description = lib.mdDoc ''
+          Predetect hook executed before autorandr attempts to run xrandr.
+        '';
+        default = { };
+      };
+    };
+  };
+
+  hookToFile = folder: name: hook:
+    nameValuePair "xdg/autorandr/${folder}/${name}" {
+      source = "${pkgs.writeShellScriptBin "hook" hook}/bin/hook";
+    };
+  profileToFiles = name: profile:
+    with profile;
+    mkMerge ([
+      {
+        "xdg/autorandr/${name}/setup".text = concatStringsSep "\n"
+          (mapAttrsToList fingerprintToString fingerprint);
+        "xdg/autorandr/${name}/config".text =
+          concatStringsSep "\n" (mapAttrsToList configToString profile.config);
+      }
+      (mapAttrs' (hookToFile "${name}/postswitch.d") hooks.postswitch)
+      (mapAttrs' (hookToFile "${name}/preswitch.d") hooks.preswitch)
+      (mapAttrs' (hookToFile "${name}/predetect.d") hooks.predetect)
+    ]);
+  fingerprintToString = name: edid: "${name} ${edid}";
+  configToString = name: config:
+    if config.enable then
+      concatStringsSep "\n" ([ "output ${name}" ]
+        ++ optional (config.position != "") "pos ${config.position}"
+        ++ optional (config.crtc != null) "crtc ${toString config.crtc}"
+        ++ optional config.primary "primary"
+        ++ optional (config.dpi != null) "dpi ${toString config.dpi}"
+        ++ optional (config.gamma != "") "gamma ${config.gamma}"
+        ++ optional (config.mode != "") "mode ${config.mode}"
+        ++ optional (config.rate != "") "rate ${config.rate}"
+        ++ optional (config.rotate != null) "rotate ${config.rotate}"
+        ++ optional (config.transform != null) ("transform "
+          + concatMapStringsSep "," toString (flatten config.transform))
+        ++ optional (config.scale != null)
+        ((if config.scale.method == "factor" then "scale" else "scale-from")
+          + " ${toString config.scale.x}x${toString config.scale.y}"))
+    else ''
+      output ${name}
+      off
+    '';
+
+in {
+
+  options = {
+
+    services.autorandr = {
+      enable = mkEnableOption (lib.mdDoc "handling of hotplug and sleep events by autorandr");
+
+      defaultTarget = mkOption {
+        default = "default";
+        type = types.str;
+        description = lib.mdDoc ''
+          Fallback if no monitor layout can be detected. See the docs
+          (https://github.com/phillipberndt/autorandr/blob/v1.0/README.md#how-to-use)
+          for further reference.
+        '';
+      };
+
+      ignoreLid = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Treat outputs as connected even if their lids are closed";
+      };
+
+      hooks = mkOption {
+        type = hooksModule;
+        description = lib.mdDoc "Global hook scripts";
+        default = { };
+        example = literalExpression ''
+          {
+            postswitch = {
+              "notify-i3" = "''${pkgs.i3}/bin/i3-msg restart";
+              "change-background" = readFile ./change-background.sh;
+              "change-dpi" = '''
+                case "$AUTORANDR_CURRENT_PROFILE" in
+                  default)
+                    DPI=120
+                    ;;
+                  home)
+                    DPI=192
+                    ;;
+                  work)
+                    DPI=144
+                    ;;
+                  *)
+                    echo "Unknown profle: $AUTORANDR_CURRENT_PROFILE"
+                    exit 1
+                esac
+                echo "Xft.dpi: $DPI" | ''${pkgs.xorg.xrdb}/bin/xrdb -merge
+              ''';
+            };
+          }
+        '';
+      };
+      profiles = mkOption {
+        type = types.attrsOf profileModule;
+        description = lib.mdDoc "Autorandr profiles specification.";
+        default = { };
+        example = literalExpression ''
+          {
+            "work" = {
+              fingerprint = {
+                eDP1 = "<EDID>";
+                DP1 = "<EDID>";
+              };
+              config = {
+                eDP1.enable = false;
+                DP1 = {
+                  enable = true;
+                  crtc = 0;
+                  primary = true;
+                  position = "0x0";
+                  mode = "3840x2160";
+                  gamma = "1.0:0.909:0.833";
+                  rate = "60.00";
+                  rotate = "left";
+                };
+              };
+              hooks.postswitch = readFile ./work-postswitch.sh;
+            };
+          }
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.udev.packages = [ pkgs.autorandr ];
+
+    environment = {
+      systemPackages = [ pkgs.autorandr ];
+      etc = mkMerge ([
+        (mapAttrs' (hookToFile "postswitch.d") cfg.hooks.postswitch)
+        (mapAttrs' (hookToFile "preswitch.d") cfg.hooks.preswitch)
+        (mapAttrs' (hookToFile "predetect.d") cfg.hooks.predetect)
+        (mkMerge (mapAttrsToList profileToFiles cfg.profiles))
+      ]);
+    };
+
+    systemd.services.autorandr = {
+      wantedBy = [ "sleep.target" ];
+      description = "Autorandr execution hook";
+      after = [ "sleep.target" ];
+
+      startLimitIntervalSec = 5;
+      startLimitBurst = 1;
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.autorandr}/bin/autorandr \
+            --batch \
+            --change \
+            --default ${cfg.defaultTarget} \
+            ${optionalString cfg.ignoreLid "--ignore-lid"}
+        '';
+        Type = "oneshot";
+        RemainAfterExit = false;
+        KillMode = "process";
+      };
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ alexnortung ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/autosuspend.nix b/nixpkgs/nixos/modules/services/misc/autosuspend.nix
new file mode 100644
index 000000000000..b3e362533a09
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/autosuspend.nix
@@ -0,0 +1,230 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib) mapAttrs' nameValuePair filterAttrs types mkEnableOption
+    mdDoc mkPackageOptionMD mkOption literalExpression mkIf flatten
+    maintainers attrValues;
+
+  cfg = config.services.autosuspend;
+
+  settingsFormat = pkgs.formats.ini { };
+
+  checks =
+    mapAttrs'
+      (n: v: nameValuePair "check.${n}" (filterAttrs (_: v: v != null) v))
+      cfg.checks;
+  wakeups =
+    mapAttrs'
+      (n: v: nameValuePair "wakeup.${n}" (filterAttrs (_: v: v != null) v))
+      cfg.wakeups;
+
+  # Whether the given check is enabled
+  hasCheck = class:
+    (filterAttrs
+      (n: v: v.enabled && (if v.class == null then n else v.class) == class)
+      cfg.checks)
+    != { };
+
+  # Dependencies needed by specific checks
+  dependenciesForChecks = {
+    "Smb" = pkgs.samba;
+    "XIdleTime" = [ pkgs.xprintidle pkgs.sudo ];
+  };
+
+  autosuspend-conf =
+    settingsFormat.generate "autosuspend.conf" ({ general = cfg.settings; } // checks // wakeups);
+
+  autosuspend = cfg.package;
+
+  checkType = types.submodule {
+    freeformType = settingsFormat.type.nestedTypes.elemType;
+
+    options.enabled = mkEnableOption (mdDoc "this activity check") // { default = true; };
+
+    options.class = mkOption {
+      default = null;
+      type = with types; nullOr (enum [
+        "ActiveCalendarEvent"
+        "ActiveConnection"
+        "ExternalCommand"
+        "JsonPath"
+        "Kodi"
+        "KodiIdleTime"
+        "LastLogActivity"
+        "Load"
+        "LogindSessionsIdle"
+        "Mpd"
+        "NetworkBandwidth"
+        "Ping"
+        "Processes"
+        "Smb"
+        "Users"
+        "XIdleTime"
+        "XPath"
+      ]);
+      description = mdDoc ''
+        Name of the class implementing the check.  If this option is not specified, the check's
+        name must represent a valid internal check class.
+      '';
+    };
+  };
+
+  wakeupType = types.submodule {
+    freeformType = settingsFormat.type.nestedTypes.elemType;
+
+    options.enabled = mkEnableOption (mdDoc "this wake-up check") // { default = true; };
+
+    options.class = mkOption {
+      default = null;
+      type = with types; nullOr (enum [
+        "Calendar"
+        "Command"
+        "File"
+        "Periodic"
+        "SystemdTimer"
+        "XPath"
+        "XPathDelta"
+      ]);
+      description = mdDoc ''
+        Name of the class implementing the check.  If this option is not specified, the check's
+        name must represent a valid internal check class.
+      '';
+    };
+  };
+in
+{
+  options = {
+    services.autosuspend = {
+      enable = mkEnableOption (mdDoc "the autosuspend daemon");
+
+      package = mkPackageOptionMD pkgs "autosuspend" { };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = settingsFormat.type.nestedTypes.elemType;
+
+          options = {
+            # Provide reasonable defaults for these two (required) options
+            suspend_cmd = mkOption {
+              default = "systemctl suspend";
+              type = with types; str;
+              description = mdDoc ''
+                The command to execute in case the host shall be suspended. This line can contain
+                additional command line arguments to the command to execute.
+              '';
+            };
+            wakeup_cmd = mkOption {
+              default = ''sh -c 'echo 0 > /sys/class/rtc/rtc0/wakealarm && echo {timestamp:.0f} > /sys/class/rtc/rtc0/wakealarm' '';
+              type = with types; str;
+              description = mdDoc ''
+                The command to execute for scheduling a wake up of the system. The given string is
+                processed using Python’s `str.format()` and a format argument called `timestamp`
+                encodes the UTC timestamp of the planned wake up time (float). Additionally `iso`
+                can be used to acquire the timestamp in ISO 8601 format.
+              '';
+            };
+          };
+        };
+        default = { };
+        example = literalExpression ''
+          {
+            enable = true;
+            interval = 30;
+            idle_time = 120;
+          }
+        '';
+        description = mdDoc ''
+          Configuration for autosuspend, see
+          <https://autosuspend.readthedocs.io/en/latest/configuration_file.html#general-configuration>
+          for supported values.
+        '';
+      };
+
+      checks = mkOption {
+        default = { };
+        type = with types; attrsOf checkType;
+        description = mdDoc ''
+          Checks for activity.  For more information, see:
+           - <https://autosuspend.readthedocs.io/en/latest/configuration_file.html#activity-check-configuration>
+           - <https://autosuspend.readthedocs.io/en/latest/available_checks.html>
+        '';
+        example = literalExpression ''
+          {
+            # Basic activity check configuration.
+            # The check class name is derived from the section header (Ping in this case).
+            # Remember to enable desired checks. They are disabled by default.
+            Ping = {
+              hosts = "192.168.0.7";
+            };
+
+            # This check is disabled.
+            Smb.enabled = false;
+
+            # Example for a custom check name.
+            # This will use the Users check with the custom name RemoteUsers.
+            # Custom names are necessary in case a check class is used multiple times.
+            # Custom names can also be used for clarification.
+            RemoteUsers = {
+              class = "Users";
+              name = ".*";
+              terminal = ".*";
+              host = "[0-9].*";
+            };
+
+            # Here the Users activity check is used again with different settings and a different name
+            LocalUsers = {
+              class = "Users";
+              name = ".*";
+              terminal = ".*";
+              host = "localhost";
+            };
+          }
+        '';
+      };
+
+      wakeups = mkOption {
+        default = { };
+        type = with types; attrsOf wakeupType;
+        description = mdDoc ''
+          Checks for wake up.  For more information, see:
+           - <https://autosuspend.readthedocs.io/en/latest/configuration_file.html#wake-up-check-configuration>
+           - <https://autosuspend.readthedocs.io/en/latest/available_wakeups.html>
+        '';
+        example = literalExpression ''
+          {
+            # Wake up checks reuse the same configuration mechanism as activity checks.
+            Calendar = {
+              url = "http://example.org/test.ics";
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.autosuspend = {
+      description = "A daemon to suspend your server in case of inactivity";
+      documentation = [ "https://autosuspend.readthedocs.io/en/latest/systemd_integration.html" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = flatten (attrValues (filterAttrs (n: _: hasCheck n) dependenciesForChecks));
+      serviceConfig = {
+        ExecStart = ''${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} daemon'';
+      };
+    };
+
+    systemd.services.autosuspend-detect-suspend = {
+      description = "Notifies autosuspend about suspension";
+      documentation = [ "https://autosuspend.readthedocs.io/en/latest/systemd_integration.html" ];
+      wantedBy = [ "sleep.target" ];
+      after = [ "sleep.target" ];
+      serviceConfig = {
+        ExecStart = ''${autosuspend}/bin/autosuspend -l ${autosuspend}/etc/autosuspend-logging.conf -c ${autosuspend-conf} presuspend'';
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ xlambein ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/bazarr.nix b/nixpkgs/nixos/modules/services/misc/bazarr.nix
new file mode 100644
index 000000000000..07c935053591
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/bazarr.nix
@@ -0,0 +1,77 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.bazarr;
+in
+{
+  options = {
+    services.bazarr = {
+      enable = mkEnableOption (lib.mdDoc "bazarr, a subtitle manager for Sonarr and Radarr");
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the bazarr web interface.";
+      };
+
+      listenPort = mkOption {
+        type = types.port;
+        default = 6767;
+        description = lib.mdDoc "Port on which the bazarr web interface should listen";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bazarr";
+        description = lib.mdDoc "User account under which bazarr runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bazarr";
+        description = lib.mdDoc "Group under which bazarr runs.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.bazarr = {
+      description = "bazarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = rec {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = "bazarr";
+        SyslogIdentifier = "bazarr";
+        ExecStart = pkgs.writeShellScript "start-bazarr" ''
+          ${pkgs.bazarr}/bin/bazarr \
+            --config '/var/lib/${StateDirectory}' \
+            --port ${toString cfg.listenPort} \
+            --no-update True
+        '';
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listenPort ];
+    };
+
+    users.users = mkIf (cfg.user == "bazarr") {
+      bazarr = {
+        isSystemUser = true;
+        group = cfg.group;
+        home = "/var/lib/${config.systemd.services.bazarr.serviceConfig.StateDirectory}";
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "bazarr") {
+      bazarr = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/bcg.nix b/nixpkgs/nixos/modules/services/misc/bcg.nix
new file mode 100644
index 000000000000..214c89dbfe72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/bcg.nix
@@ -0,0 +1,175 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+
+with lib;
+
+let
+  cfg = config.services.bcg;
+  configFile = (pkgs.formats.yaml {}).generate "bcg.conf.yaml" (
+    filterAttrsRecursive (n: v: v != null) {
+      inherit (cfg) device name mqtt;
+      retain_node_messages = cfg.retainNodeMessages;
+      qos_node_messages = cfg.qosNodeMessages;
+      base_topic_prefix = cfg.baseTopicPrefix;
+      automatic_remove_kit_from_names = cfg.automaticRemoveKitFromNames;
+      automatic_rename_kit_nodes = cfg.automaticRenameKitNodes;
+      automatic_rename_generic_nodes = cfg.automaticRenameGenericNodes;
+      automatic_rename_nodes = cfg.automaticRenameNodes;
+    }
+  );
+in
+{
+  options = {
+    services.bcg = {
+      enable = mkEnableOption (mdDoc "BigClown gateway");
+      package = mkOption {
+        default = pkgs.python3Packages.bcg;
+        defaultText = literalExpression "pkgs.python3Packages.bcg";
+        description = mdDoc "Which bcg derivation to use.";
+        type = types.package;
+      };
+      environmentFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = [ "/run/keys/bcg.env" ];
+        description = mdDoc ''
+          File to load as environment file. Environment variables from this file
+          will be interpolated into the config file using envsubst with this
+          syntax: `$ENVIRONMENT` or `''${VARIABLE}`.
+          This is useful to avoid putting secrets into the nix store.
+        '';
+      };
+      verbose = mkOption {
+        type = types.enum ["CRITICAL" "ERROR" "WARNING" "INFO" "DEBUG"];
+        default = "WARNING";
+        description = mdDoc "Verbosity level.";
+      };
+      device = mkOption {
+        type = types.str;
+        description = mdDoc "Device name to configure gateway to use.";
+      };
+      name = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = mdDoc ''
+          Name for the device.
+
+          Supported variables:
+          * `{ip}` IP address
+          * `{id}` The ID of the connected usb-dongle or core-module
+
+          `null` can be used for automatic detection from gateway firmware.
+        '';
+      };
+      mqtt = {
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = mdDoc "Host where MQTT server is running.";
+        };
+        port = mkOption {
+          type = types.port;
+          default = 1883;
+          description = mdDoc "Port of MQTT server.";
+        };
+        username = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "MQTT server access username.";
+        };
+        password = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "MQTT server access password.";
+        };
+        cafile = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "Certificate Authority file for MQTT server access.";
+        };
+        certfile = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "Certificate file for MQTT server access.";
+        };
+        keyfile = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "Key file for MQTT server access.";
+        };
+      };
+      retainNodeMessages = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc "Specify that node messages should be retaied in MQTT broker.";
+      };
+      qosNodeMessages = mkOption {
+        type = types.int;
+        default = 1;
+        description = mdDoc "Set the guarantee of MQTT message delivery.";
+      };
+      baseTopicPrefix = mkOption {
+        type = types.str;
+        default = "";
+        description = mdDoc "Topic prefix added to all MQTT messages.";
+      };
+      automaticRemoveKitFromNames = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Automatically remove kits.";
+      };
+      automaticRenameKitNodes = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Automatically rename kit's nodes.";
+      };
+      automaticRenameGenericNodes = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Automatically rename generic nodes.";
+      };
+      automaticRenameNodes = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Automatically rename all nodes.";
+      };
+      rename = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = mdDoc "Rename nodes to different name.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      python3Packages.bcg
+      python3Packages.bch
+    ];
+
+    systemd.services.bcg = let
+      envConfig = cfg.environmentFiles != [];
+      finalConfig = if envConfig
+                    then "$RUNTIME_DIRECTORY/bcg.config.yaml"
+                    else configFile;
+    in {
+      description = "BigClown Gateway";
+      wantedBy = [ "multi-user.target" ];
+      wants = mkIf config.services.mosquitto.enable [ "mosquitto.service" ];
+      after = [ "network-online.target" ];
+      preStart = ''
+        umask 077
+        ${pkgs.envsubst}/bin/envsubst -i "${configFile}" -o "${finalConfig}"
+        '';
+      serviceConfig = {
+        EnvironmentFile = cfg.environmentFiles;
+        ExecStart="${cfg.package}/bin/bcg -c ${finalConfig} -v ${cfg.verbose}";
+        RuntimeDirectory = "bcg";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/beanstalkd.nix b/nixpkgs/nixos/modules/services/misc/beanstalkd.nix
new file mode 100644
index 000000000000..4262cae323b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/beanstalkd.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.beanstalkd;
+  pkg = pkgs.beanstalkd;
+in
+
+{
+  # interface
+
+  options = {
+    services.beanstalkd = {
+      enable = mkEnableOption (lib.mdDoc "the Beanstalk work queue");
+
+      listen = {
+        port = mkOption {
+          type = types.port;
+          description = lib.mdDoc "TCP port that will be used to accept client connections.";
+          default = 11300;
+        };
+
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc "IP address to listen on.";
+          default = "127.0.0.1";
+          example = "0.0.0.0";
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to open ports in the firewall for the server.";
+      };
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    environment.systemPackages = [ pkg ];
+
+    systemd.services.beanstalkd = {
+      description = "Beanstalk Work Queue";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        ExecStart = "${pkg}/bin/beanstalkd -l ${cfg.listen.address} -p ${toString cfg.listen.port} -b $STATE_DIRECTORY";
+        StateDirectory = "beanstalkd";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/bees.nix b/nixpkgs/nixos/modules/services/misc/bees.nix
new file mode 100644
index 000000000000..37f90c682221
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/bees.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.beesd;
+
+  logLevels = { emerg = 0; alert = 1; crit = 2; err = 3; warning = 4; notice = 5; info = 6; debug = 7; };
+
+  fsOptions = with types; {
+    options.spec = mkOption {
+      type = str;
+      description = lib.mdDoc ''
+        Description of how to identify the filesystem to be duplicated by this
+        instance of bees. Note that deduplication crosses subvolumes; one must
+        not configure multiple instances for subvolumes of the same filesystem
+        (or block devices which are part of the same filesystem), but only for
+        completely independent btrfs filesystems.
+
+        This must be in a format usable by findmnt; that could be a key=value
+        pair, or a bare path to a mount point.
+        Using bare paths will allow systemd to start the beesd service only
+        after mounting the associated path.
+      '';
+      example = "LABEL=MyBulkDataDrive";
+    };
+    options.hashTableSizeMB = mkOption {
+      type = types.addCheck types.int (n: mod n 16 == 0);
+      default = 1024; # 1GB; default from upstream beesd script
+      description = lib.mdDoc ''
+        Hash table size in MB; must be a multiple of 16.
+
+        A larger ratio of index size to storage size means smaller blocks of
+        duplicate content are recognized.
+
+        If you have 1TB of data, a 4GB hash table (which is to say, a value of
+        4096) will permit 4KB extents (the smallest possible size) to be
+        recognized, whereas a value of 1024 -- creating a 1GB hash table --
+        will recognize only aligned duplicate blocks of 16KB.
+      '';
+    };
+    options.verbosity = mkOption {
+      type = types.enum (attrNames logLevels ++ attrValues logLevels);
+      apply = v: if isString v then logLevels.${v} else v;
+      default = "info";
+      description = lib.mdDoc "Log verbosity (syslog keyword/level).";
+    };
+    options.workDir = mkOption {
+      type = str;
+      default = ".beeshome";
+      description = lib.mdDoc ''
+        Name (relative to the root of the filesystem) of the subvolume where
+        the hash table will be stored.
+      '';
+    };
+    options.extraOptions = mkOption {
+      type = listOf str;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra command-line options passed to the daemon. See upstream bees documentation.
+      '';
+      example = literalExpression ''
+        [ "--thread-count" "4" ]
+      '';
+    };
+  };
+
+in
+{
+
+  options.services.beesd = {
+    filesystems = mkOption {
+      type = with types; attrsOf (submodule fsOptions);
+      description = lib.mdDoc "BTRFS filesystems to run block-level deduplication on.";
+      default = { };
+      example = literalExpression ''
+        {
+          root = {
+            spec = "LABEL=root";
+            hashTableSizeMB = 2048;
+            verbosity = "crit";
+            extraOptions = [ "--loadavg-target" "5.0" ];
+          };
+        }
+      '';
+    };
+  };
+  config = {
+    systemd.services = mapAttrs'
+      (name: fs: nameValuePair "beesd@${name}" {
+        description = "Block-level BTRFS deduplication for %i";
+        after = [ "sysinit.target" ];
+
+        serviceConfig =
+          let
+            configOpts = [
+              fs.spec
+              "verbosity=${toString fs.verbosity}"
+              "idxSizeMB=${toString fs.hashTableSizeMB}"
+              "workDir=${fs.workDir}"
+            ];
+            configOptsStr = escapeShellArgs configOpts;
+          in
+          {
+            # Values from https://github.com/Zygo/bees/blob/v0.6.5/scripts/beesd@.service.in
+            ExecStart = "${pkgs.bees}/bin/bees-service-wrapper run ${configOptsStr} -- --no-timestamps ${escapeShellArgs fs.extraOptions}";
+            ExecStopPost = "${pkgs.bees}/bin/bees-service-wrapper cleanup ${configOptsStr}";
+            CPUAccounting = true;
+            CPUSchedulingPolicy = "batch";
+            CPUWeight = 12;
+            IOSchedulingClass = "idle";
+            IOSchedulingPriority = 7;
+            IOWeight = 10;
+            KillMode = "control-group";
+            KillSignal = "SIGTERM";
+            MemoryAccounting = true;
+            Nice = 19;
+            Restart = "on-abnormal";
+            StartupCPUWeight = 25;
+            StartupIOWeight = 25;
+            SyslogIdentifier = "beesd"; # would otherwise be "bees-service-wrapper"
+          };
+        unitConfig.RequiresMountsFor = lib.mkIf (lib.hasPrefix "/" fs.spec) fs.spec;
+        wantedBy = [ "multi-user.target" ];
+      })
+      cfg.filesystems;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/bepasty.nix b/nixpkgs/nixos/modules/services/misc/bepasty.nix
new file mode 100644
index 000000000000..70d07629493b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/bepasty.nix
@@ -0,0 +1,179 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  gunicorn = pkgs.python3Packages.gunicorn;
+  bepasty = pkgs.bepasty;
+  gevent = pkgs.python3Packages.gevent;
+  python = pkgs.python3Packages.python;
+  cfg = config.services.bepasty;
+  user = "bepasty";
+  group = "bepasty";
+  default_home = "/var/lib/bepasty";
+in
+{
+  options.services.bepasty = {
+    enable = mkEnableOption (lib.mdDoc "Bepasty servers");
+
+    servers = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        configure a number of bepasty servers which will be started with
+        gunicorn.
+        '';
+      type = with types ; attrsOf (submodule ({ config, ... } : {
+
+        options = {
+
+          bind = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Bind address to be used for this server.
+              '';
+            example = "0.0.0.0:8000";
+            default = "127.0.0.1:8000";
+          };
+
+          dataDir = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Path to the directory where the pastes will be saved to
+              '';
+            default = default_home+"/data";
+          };
+
+          defaultPermissions = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              default permissions for all unauthenticated accesses.
+              '';
+            example = "read,create,delete";
+            default = "read";
+          };
+
+          extraConfig = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Extra configuration for bepasty server to be appended on the
+              configuration.
+              see https://bepasty-server.readthedocs.org/en/latest/quickstart.html#configuring-bepasty
+              for all options.
+              '';
+            default = "";
+            example = ''
+              PERMISSIONS = {
+                'myadminsecret': 'admin,list,create,read,delete',
+              }
+              MAX_ALLOWED_FILE_SIZE = 5 * 1000 * 1000
+              '';
+          };
+
+          secretKey = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              server secret for safe session cookies, must be set.
+
+              Warning: this secret is stored in the WORLD-READABLE Nix store!
+
+              It's recommended to use {option}`secretKeyFile`
+              which takes precedence over {option}`secretKey`.
+              '';
+            default = "";
+          };
+
+          secretKeyFile = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc ''
+              A file that contains the server secret for safe session cookies, must be set.
+
+              {option}`secretKeyFile` takes precedence over {option}`secretKey`.
+
+              Warning: when {option}`secretKey` is non-empty {option}`secretKeyFile`
+              defaults to a file in the WORLD-READABLE Nix store containing that secret.
+              '';
+          };
+
+          workDir = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Path to the working directory (used for config and pidfile).
+              Defaults to the users home directory.
+              '';
+            default = default_home;
+          };
+
+        };
+        config = {
+          secretKeyFile = mkDefault (
+            if config.secretKey != ""
+            then toString (pkgs.writeTextFile {
+              name = "bepasty-secret-key";
+              text = config.secretKey;
+            })
+            else null
+          );
+        };
+      }));
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ bepasty ];
+
+    # creates gunicorn systemd service for each configured server
+    systemd.services = mapAttrs' (name: server:
+      nameValuePair ("bepasty-server-${name}-gunicorn")
+        ({
+          description = "Bepasty Server ${name}";
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          restartIfChanged = true;
+
+          environment = let
+            penv = python.buildEnv.override {
+              extraLibs = [ bepasty gevent ];
+            };
+          in {
+            BEPASTY_CONFIG = "${server.workDir}/bepasty-${name}.conf";
+            PYTHONPATH= "${penv}/${python.sitePackages}/";
+          };
+
+          serviceConfig = {
+            Type = "simple";
+            PrivateTmp = true;
+            ExecStartPre = assert server.secretKeyFile != null; pkgs.writeScript "bepasty-server.${name}-init" ''
+              #!/bin/sh
+              mkdir -p "${server.workDir}"
+              mkdir -p "${server.dataDir}"
+              chown ${user}:${group} "${server.workDir}" "${server.dataDir}"
+              cat > ${server.workDir}/bepasty-${name}.conf <<EOF
+              SITENAME="${name}"
+              STORAGE_FILESYSTEM_DIRECTORY="${server.dataDir}"
+              SECRET_KEY="$(cat "${server.secretKeyFile}")"
+              DEFAULT_PERMISSIONS="${server.defaultPermissions}"
+              ${server.extraConfig}
+              EOF
+            '';
+            ExecStart = ''${gunicorn}/bin/gunicorn bepasty.wsgi --name ${name} \
+              -u ${user} \
+              -g ${group} \
+              --workers 3 --log-level=info \
+              --bind=${server.bind} \
+              --pid ${server.workDir}/gunicorn-${name}.pid \
+              -k gevent
+            '';
+          };
+        })
+    ) cfg.servers;
+
+    users.users.${user} =
+      { uid = config.ids.uids.bepasty;
+        group = group;
+        home = default_home;
+      };
+
+    users.groups.${group}.gid = config.ids.gids.bepasty;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/calibre-server.nix b/nixpkgs/nixos/modules/services/misc/calibre-server.nix
new file mode 100644
index 000000000000..e1ddae1de1f8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/calibre-server.nix
@@ -0,0 +1,146 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.calibre-server;
+
+  documentationLink = "https://manual.calibre-ebook.com";
+  generatedDocumentationLink = documentationLink + "/generated/en/calibre-server.html";
+
+  execFlags = (concatStringsSep " "
+    (mapAttrsToList (k: v: "${k} ${toString v}") (filterAttrs (name: value: value != null) {
+      "--listen-on" = cfg.host;
+      "--port" = cfg.port;
+      "--auth-mode" = cfg.auth.mode;
+      "--userdb" = cfg.auth.userDb;
+    }) ++ [(optionalString (cfg.auth.enable == true) "--enable-auth")])
+  );
+in
+
+{
+  imports = [
+    (mkChangedOptionModule [ "services" "calibre-server" "libraryDir" ] [ "services" "calibre-server" "libraries" ]
+      (config:
+        let libraryDir = getAttrFromPath [ "services" "calibre-server" "libraryDir" ] config;
+        in [ libraryDir ]
+      )
+    )
+  ];
+
+  options = {
+    services.calibre-server = {
+
+      enable = mkEnableOption (lib.mdDoc "calibre-server");
+      package = lib.mkPackageOptionMD pkgs "calibre" { };
+
+      libraries = mkOption {
+        type = types.listOf types.path;
+        default = [ "/var/lib/calibre-server" ];
+        description = lib.mdDoc ''
+          Make sure each library path is initialized before service startup.
+          The directories of the libraries to serve. They must be readable for the user under which the server runs.
+          See the [calibredb documentation](${documentationLink}/generated/en/calibredb.html#add) for details.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "calibre-server";
+        description = lib.mdDoc "The user under which calibre-server runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "calibre-server";
+        description = lib.mdDoc "The group under which calibre-server runs.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        example = "::1";
+        description = lib.mdDoc ''
+          The interface on which to listen for connections.
+          See the [calibre-server documentation](${generatedDocumentationLink}#cmdoption-calibre-server-listen-on) for details.
+        '';
+      };
+
+      port = mkOption {
+        default = 8080;
+        type = types.port;
+        description = lib.mdDoc ''
+          The port on which to listen for connections.
+          See the [calibre-server documentation](${generatedDocumentationLink}#cmdoption-calibre-server-port) for details.
+        '';
+      };
+
+      auth = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Password based authentication to access the server.
+            See the [calibre-server documentation](${generatedDocumentationLink}#cmdoption-calibre-server-enable-auth) for details.
+          '';
+        };
+
+        mode = mkOption {
+          type = types.enum [ "auto" "basic" "digest" ];
+          default = "auto";
+          description = lib.mdDoc ''
+            Choose the type of authentication used.
+            Set the HTTP authentication mode used by the server.
+            See the [calibre-server documentation](${generatedDocumentationLink}#cmdoption-calibre-server-auth-mode) for details.
+          '';
+        };
+
+        userDb = mkOption {
+          default = null;
+          type = types.nullOr types.path;
+          description = lib.mdDoc ''
+            Choose users database file to use for authentication.
+            Make sure users database file is initialized before service startup.
+            See the [calibre-server documentation](${documentationLink}/server.html#managing-user-accounts-from-the-command-line-only) for details.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.calibre-server = {
+      description = "Calibre Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Restart = "always";
+        ExecStart = "${cfg.package}/bin/calibre-server ${lib.concatStringsSep " " cfg.libraries} ${execFlags}";
+      };
+
+    };
+
+    environment.systemPackages = [ pkgs.calibre ];
+
+    users.users = optionalAttrs (cfg.user == "calibre-server") {
+      calibre-server = {
+        home = "/var/lib/calibre-server";
+        createHome = true;
+        uid = config.ids.uids.calibre-server;
+        group = cfg.group;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "calibre-server") {
+      calibre-server = {
+        gid = config.ids.gids.calibre-server;
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ gaelreyrol ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/canto-daemon.nix b/nixpkgs/nixos/modules/services/misc/canto-daemon.nix
new file mode 100644
index 000000000000..8150e038bc13
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/canto-daemon.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+cfg = config.services.canto-daemon;
+
+in {
+
+##### interface
+
+  options = {
+
+    services.canto-daemon = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the canto RSS daemon.";
+      };
+    };
+
+  };
+
+##### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.user.services.canto-daemon = {
+      description = "Canto RSS Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "default.target" ];
+      serviceConfig.ExecStart = "${pkgs.canto-daemon}/bin/canto-daemon";
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/cfdyndns.nix b/nixpkgs/nixos/modules/services/misc/cfdyndns.nix
new file mode 100644
index 000000000000..dba8ac200151
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/cfdyndns.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cfdyndns;
+in
+{
+  imports = [
+    (mkRemovedOptionModule
+      [ "services" "cfdyndns" "apikey" ]
+      "Use services.cfdyndns.apikeyFile instead.")
+  ];
+
+  options = {
+    services.cfdyndns = {
+      enable = mkEnableOption (lib.mdDoc "Cloudflare Dynamic DNS Client");
+
+      email = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The email address to use to authenticate to CloudFlare.
+        '';
+      };
+
+      apiTokenFile = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The path to a file containing the API Token
+          used to authenticate with CloudFlare.
+        '';
+      };
+
+      apikeyFile = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The path to a file containing the API Key
+          used to authenticate with CloudFlare.
+        '';
+      };
+
+      records = mkOption {
+        default = [];
+        example = [ "host.tld" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The records to update in CloudFlare.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.cfdyndns = {
+      description = "CloudFlare Dynamic DNS Client";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      startAt = "*:0/5";
+      serviceConfig = {
+        Type = "simple";
+        LoadCredential = lib.optional (cfg.apiTokenFile != null) "CLOUDFLARE_APITOKEN_FILE:${cfg.apiTokenFile}";
+        DynamicUser = true;
+      };
+      environment = {
+        CLOUDFLARE_RECORDS="${concatStringsSep "," cfg.records}";
+      };
+      script = ''
+        ${optionalString (cfg.apikeyFile != null) ''
+          export CLOUDFLARE_APIKEY="$(cat ${escapeShellArg cfg.apikeyFile})"
+          export CLOUDFLARE_EMAIL="${cfg.email}"
+        ''}
+        ${optionalString (cfg.apiTokenFile != null) ''
+          export CLOUDFLARE_APITOKEN=$(${pkgs.systemd}/bin/systemd-creds cat CLOUDFLARE_APITOKEN_FILE)
+        ''}
+        ${pkgs.cfdyndns}/bin/cfdyndns
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/cgminer.nix b/nixpkgs/nixos/modules/services/misc/cgminer.nix
new file mode 100644
index 000000000000..a6fbfee73bad
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/cgminer.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cgminer;
+
+  convType = with builtins;
+    v: if isBool v then boolToString v else toString v;
+  mergedHwConfig =
+    mapAttrsToList (n: v: ''"${n}": "${(concatStringsSep "," (map convType v))}"'')
+      (foldAttrs (n: a: [n] ++ a) [] cfg.hardware);
+  mergedConfig = with builtins;
+    mapAttrsToList (n: v: ''"${n}":  ${if isBool v then convType v else ''"${convType v}"''}'')
+      cfg.config;
+
+  cgminerConfig = pkgs.writeText "cgminer.conf" ''
+  {
+  ${concatStringsSep ",\n" mergedHwConfig},
+  ${concatStringsSep ",\n" mergedConfig},
+  "pools": [
+  ${concatStringsSep ",\n"
+    (map (v: ''{"url": "${v.url}", "user": "${v.user}", "pass": "${v.pass}"}'')
+          cfg.pools)}]
+  }
+  '';
+in
+{
+  ###### interface
+  options = {
+
+    services.cgminer = {
+
+      enable = mkEnableOption (lib.mdDoc "cgminer, an ASIC/FPGA/GPU miner for bitcoin and litecoin");
+
+      package = mkOption {
+        default = pkgs.cgminer;
+        defaultText = literalExpression "pkgs.cgminer";
+        description = lib.mdDoc "Which cgminer derivation to use.";
+        type = types.package;
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "cgminer";
+        description = lib.mdDoc "User account under which cgminer runs";
+      };
+
+      pools = mkOption {
+        default = [];  # Run benchmark
+        type = types.listOf (types.attrsOf types.str);
+        description = lib.mdDoc "List of pools where to mine";
+        example = [{
+          url = "http://p2pool.org:9332";
+          username = "17EUZxTvs9uRmPsjPZSYUU3zCz9iwstudk";
+          password="X";
+        }];
+      };
+
+      hardware = mkOption {
+        default = []; # Run without options
+        type = types.listOf (types.attrsOf (types.either types.str types.int));
+        description= lib.mdDoc "List of config options for every GPU";
+        example = [
+        {
+          intensity = 9;
+          gpu-engine = "0-985";
+          gpu-fan = "0-85";
+          gpu-memclock = 860;
+          gpu-powertune = 20;
+          temp-cutoff = 95;
+          temp-overheat = 85;
+          temp-target = 75;
+        }
+        {
+          intensity = 9;
+          gpu-engine = "0-950";
+          gpu-fan = "0-85";
+          gpu-memclock = 825;
+          gpu-powertune = 20;
+          temp-cutoff = 95;
+          temp-overheat = 85;
+          temp-target = 75;
+        }];
+      };
+
+      config = mkOption {
+        default = {};
+        type = types.attrsOf (types.either types.bool types.int);
+        description = lib.mdDoc "Additional config";
+        example = {
+          auto-fan = true;
+          auto-gpu = true;
+          expiry = 120;
+          failover-only = true;
+          gpu-threads = 2;
+          log = 5;
+          queue = 1;
+          scan-time = 60;
+          temp-histeresys = 3;
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.cgminer.enable {
+
+    users.users = optionalAttrs (cfg.user == "cgminer") {
+      cgminer = {
+        isSystemUser = true;
+        group = "cgminer";
+        description = "Cgminer user";
+      };
+    };
+    users.groups = optionalAttrs (cfg.user == "cgminer") {
+      cgminer = {};
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.cgminer = {
+      path = [ pkgs.cgminer ];
+
+      after = [ "network.target" "display-manager.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        LD_LIBRARY_PATH = "/run/opengl-driver/lib:/run/opengl-driver-32/lib";
+        DISPLAY = ":${toString config.services.xserver.display}";
+        GPU_MAX_ALLOC_PERCENT = "100";
+        GPU_USE_SYNC_OBJECTS = "1";
+      };
+
+      startLimitIntervalSec = 60;  # 1 min
+      serviceConfig = {
+        ExecStart = "${pkgs.cgminer}/bin/cgminer --syslog --text-only --config ${cgminerConfig}";
+        User = cfg.user;
+        RestartSec = "30s";
+        Restart = "always";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/clipcat.nix b/nixpkgs/nixos/modules/services/misc/clipcat.nix
new file mode 100644
index 000000000000..0129de3a9efb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/clipcat.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.clipcat;
+in {
+
+  options.services.clipcat= {
+    enable = mkEnableOption (lib.mdDoc "Clipcat clipboard daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.clipcat;
+      defaultText = literalExpression "pkgs.clipcat";
+      description = lib.mdDoc "clipcat derivation to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.clipcat = {
+      enable      = true;
+      description = "clipcat daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after    = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/clipcatd --no-daemon";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/clipmenu.nix b/nixpkgs/nixos/modules/services/misc/clipmenu.nix
new file mode 100644
index 000000000000..1cc8c4c47f7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/clipmenu.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.clipmenu;
+in {
+
+  options.services.clipmenu = {
+    enable = mkEnableOption (lib.mdDoc "clipmenu, the clipboard management daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.clipmenu;
+      defaultText = literalExpression "pkgs.clipmenu";
+      description = lib.mdDoc "clipmenu derivation to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.clipmenu = {
+      enable      = true;
+      description = "Clipboard management daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after    = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/clipmenud";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/confd.nix b/nixpkgs/nixos/modules/services/misc/confd.nix
new file mode 100644
index 000000000000..17c1be57ccbc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/confd.nix
@@ -0,0 +1,90 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.confd;
+
+  confdConfig = ''
+    backend = "${cfg.backend}"
+    confdir = "${cfg.confDir}"
+    interval = ${toString cfg.interval}
+    nodes = [ ${concatMapStringsSep "," (s: ''"${s}"'') cfg.nodes}, ]
+    prefix = "${cfg.prefix}"
+    log-level = "${cfg.logLevel}"
+    watch = ${boolToString cfg.watch}
+  '';
+
+in {
+  options.services.confd = {
+    enable = mkEnableOption (lib.mdDoc "confd service");
+
+    backend = mkOption {
+      description = lib.mdDoc "Confd config storage backend to use.";
+      default = "etcd";
+      type = types.enum ["etcd" "consul" "redis" "zookeeper"];
+    };
+
+    interval = mkOption {
+      description = lib.mdDoc "Confd check interval.";
+      default = 10;
+      type = types.int;
+    };
+
+    nodes = mkOption {
+      description = lib.mdDoc "Confd list of nodes to connect to.";
+      default = [ "http://127.0.0.1:2379" ];
+      type = types.listOf types.str;
+    };
+
+    watch = mkOption {
+      description = lib.mdDoc "Confd, whether to watch etcd config for changes.";
+      default = true;
+      type = types.bool;
+    };
+
+    prefix = mkOption {
+      description = lib.mdDoc "The string to prefix to keys.";
+      default = "/";
+      type = types.path;
+    };
+
+    logLevel = mkOption {
+      description = lib.mdDoc "Confd log level.";
+      default = "info";
+      type = types.enum ["info" "debug"];
+    };
+
+    confDir = mkOption {
+      description = lib.mdDoc "The path to the confd configs.";
+      default = "/etc/confd";
+      type = types.path;
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "Confd package to use.";
+      default = pkgs.confd;
+      defaultText = literalExpression "pkgs.confd";
+      type = types.package;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.confd = {
+      description = "Confd Service.";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/confd";
+      };
+    };
+
+    environment.etc = {
+      "confd/confd.toml".text = confdConfig;
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    services.etcd.enable = mkIf (cfg.backend == "etcd") (mkDefault true);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/cpuminer-cryptonight.nix b/nixpkgs/nixos/modules/services/misc/cpuminer-cryptonight.nix
new file mode 100644
index 000000000000..7b18c6b3cd20
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/cpuminer-cryptonight.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cpuminer-cryptonight;
+
+  json = builtins.toJSON (
+    cfg // {
+       enable = null;
+       threads =
+         if cfg.threads == 0 then null else toString cfg.threads;
+    }
+  );
+
+  confFile = builtins.toFile "cpuminer.json" json;
+in
+{
+
+  options = {
+
+    services.cpuminer-cryptonight = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the cpuminer cryptonight miner.
+        '';
+      };
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc "URL of mining server";
+      };
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Username for mining server";
+      };
+      pass = mkOption {
+        type = types.str;
+        default = "x";
+        description = lib.mdDoc "Password for mining server";
+      };
+      threads = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc "Number of miner threads, defaults to available processors";
+      };
+    };
+
+  };
+
+  config = mkIf config.services.cpuminer-cryptonight.enable {
+
+    systemd.services.cpuminer-cryptonight = {
+      description = "Cryptonight cpuminer";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.cpuminer-multi}/bin/minerd --syslog --config=${confFile}";
+        User = "nobody";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/devmon.nix b/nixpkgs/nixos/modules/services/misc/devmon.nix
new file mode 100644
index 000000000000..bd0b738b7018
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/devmon.nix
@@ -0,0 +1,25 @@
+{ pkgs, config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.devmon;
+
+in {
+  options = {
+    services.devmon = {
+      enable = mkEnableOption (lib.mdDoc "devmon, an automatic device mounting daemon");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.devmon = {
+      description = "devmon automatic device mounting daemon";
+      wantedBy = [ "default.target" ];
+      path = [ pkgs.udevil pkgs.procps pkgs.udisks2 pkgs.which ];
+      serviceConfig.ExecStart = "${pkgs.udevil}/bin/devmon";
+    };
+
+    services.udisks2.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/dictd.nix b/nixpkgs/nixos/modules/services/misc/dictd.nix
new file mode 100644
index 000000000000..4b714b84f3b2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/dictd.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dictd;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.dictd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the DICT.org dictionary server.
+        '';
+      };
+
+      DBs = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs.dictdDBs; [ wiktionary wordnet ];
+        defaultText = literalExpression "with pkgs.dictdDBs; [ wiktionary wordnet ]";
+        example = literalExpression "[ pkgs.dictdDBs.nld2eng ]";
+        description = lib.mdDoc "List of databases to make available.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = let dictdb = pkgs.dictDBCollector { dictlist = map (x: {
+               name = x.name;
+               filename = x; } ) cfg.DBs; };
+  in mkIf cfg.enable {
+
+    # get the command line client on system path to make some use of the service
+    environment.systemPackages = [ pkgs.dict ];
+
+    environment.etc."dict.conf".text = ''
+      server localhost
+    '';
+
+    users.users.dictd =
+      { group = "dictd";
+        description = "DICT.org dictd server";
+        home = "${dictdb}/share/dictd";
+        uid = config.ids.uids.dictd;
+      };
+
+    users.groups.dictd.gid = config.ids.gids.dictd;
+
+    systemd.services.dictd = {
+      description = "DICT.org Dictionary Server";
+      wantedBy = [ "multi-user.target" ];
+      environment = { LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive"; };
+      serviceConfig.Type = "forking";
+      script = "${pkgs.dict}/sbin/dictd -s -c ${dictdb}/share/dictd/dictd.conf --locale en_US.UTF-8";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/disnix.nix b/nixpkgs/nixos/modules/services/misc/disnix.nix
new file mode 100644
index 000000000000..13c57ce6b85b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/disnix.nix
@@ -0,0 +1,98 @@
+# Disnix server
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.disnix;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.disnix = {
+
+      enable = mkEnableOption (lib.mdDoc "Disnix");
+
+      enableMultiUser = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to support multi-user mode by enabling the Disnix D-Bus service";
+      };
+
+      useWebServiceInterface = mkEnableOption (lib.mdDoc "the DisnixWebService interface running on Apache Tomcat");
+
+      package = mkOption {
+        type = types.path;
+        description = lib.mdDoc "The Disnix package";
+        default = pkgs.disnix;
+        defaultText = literalExpression "pkgs.disnix";
+      };
+
+      enableProfilePath = mkEnableOption (lib.mdDoc "exposing the Disnix profiles in the system's PATH");
+
+      profiles = mkOption {
+        type = types.listOf types.str;
+        default = [ "default" ];
+        description = lib.mdDoc "Names of the Disnix profiles to expose in the system's PATH";
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    dysnomia.enable = true;
+
+    environment.systemPackages = [ pkgs.disnix ] ++ optional cfg.useWebServiceInterface pkgs.DisnixWebService;
+    environment.variables.PATH = lib.optionals cfg.enableProfilePath (map (profileName: "/nix/var/nix/profiles/disnix/${profileName}/bin" ) cfg.profiles);
+    environment.variables.DISNIX_REMOTE_CLIENT = lib.optionalString (cfg.enableMultiUser) "disnix-client";
+
+    services.dbus.enable = true;
+    services.dbus.packages = [ pkgs.disnix ];
+
+    services.tomcat.enable = cfg.useWebServiceInterface;
+    services.tomcat.extraGroups = [ "disnix" ];
+    services.tomcat.javaOpts = "${optionalString cfg.useWebServiceInterface "-Djava.library.path=${pkgs.libmatthew_java}/lib/jni"} ";
+    services.tomcat.sharedLibs = optional cfg.useWebServiceInterface "${pkgs.DisnixWebService}/share/java/DisnixConnection.jar"
+      ++ optional cfg.useWebServiceInterface "${pkgs.dbus_java}/share/java/dbus.jar";
+    services.tomcat.webapps = optional cfg.useWebServiceInterface pkgs.DisnixWebService;
+
+    users.groups.disnix.gid = config.ids.gids.disnix;
+
+    systemd.services = {
+      disnix = mkIf cfg.enableMultiUser {
+        description = "Disnix server";
+        wants = [ "dysnomia.target" ];
+        wantedBy = [ "multi-user.target" ];
+        after = [ "dbus.service" ]
+          ++ optional config.services.httpd.enable "httpd.service"
+          ++ optional config.services.mysql.enable "mysql.service"
+          ++ optional config.services.postgresql.enable "postgresql.service"
+          ++ optional config.services.tomcat.enable "tomcat.service"
+          ++ optional config.services.svnserve.enable "svnserve.service"
+          ++ optional config.services.mongodb.enable "mongodb.service"
+          ++ optional config.services.influxdb.enable "influxdb.service";
+
+        restartIfChanged = false;
+
+        path = [ config.nix.package cfg.package config.dysnomia.package "/run/current-system/sw" ];
+
+        environment = {
+          HOME = "/root";
+        }
+        // (optionalAttrs (config.environment.variables ? DYSNOMIA_CONTAINERS_PATH) { inherit (config.environment.variables) DYSNOMIA_CONTAINERS_PATH; })
+        // (optionalAttrs (config.environment.variables ? DYSNOMIA_MODULES_PATH) { inherit (config.environment.variables) DYSNOMIA_MODULES_PATH; });
+
+        serviceConfig.ExecStart = "${cfg.package}/bin/disnix-service";
+      };
+
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/docker-registry.nix b/nixpkgs/nixos/modules/services/misc/docker-registry.nix
new file mode 100644
index 000000000000..b0e910634637
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/docker-registry.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dockerRegistry;
+
+  blobCache = if cfg.enableRedisCache
+    then "redis"
+    else "inmemory";
+
+  registryConfig = {
+    version =  "0.1";
+    log.fields.service = "registry";
+    storage = {
+      cache.blobdescriptor = blobCache;
+      delete.enabled = cfg.enableDelete;
+    } // (optionalAttrs (cfg.storagePath != null) { filesystem.rootdirectory = cfg.storagePath; });
+    http = {
+      addr = "${cfg.listenAddress}:${builtins.toString cfg.port}";
+      headers.X-Content-Type-Options = ["nosniff"];
+    };
+    health.storagedriver = {
+      enabled = true;
+      interval = "10s";
+      threshold = 3;
+    };
+  };
+
+  registryConfig.redis = mkIf cfg.enableRedisCache {
+    addr = "${cfg.redisUrl}";
+    password = "${cfg.redisPassword}";
+    db = 0;
+    dialtimeout = "10ms";
+    readtimeout = "10ms";
+    writetimeout = "10ms";
+    pool = {
+      maxidle = 16;
+      maxactive = 64;
+      idletimeout = "300s";
+    };
+  };
+
+  configFile = pkgs.writeText "docker-registry-config.yml" (builtins.toJSON (recursiveUpdate registryConfig cfg.extraConfig));
+
+in {
+  options.services.dockerRegistry = {
+    enable = mkEnableOption (lib.mdDoc "Docker Registry");
+
+    package = mkOption {
+      type = types.package;
+      description = mdDoc "Which Docker registry package to use.";
+      default = pkgs.docker-distribution;
+      defaultText = literalExpression "pkgs.docker-distribution";
+      example = literalExpression "pkgs.gitlab-container-registry";
+    };
+
+    listenAddress = mkOption {
+      description = lib.mdDoc "Docker registry host or ip to bind to.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Docker registry port to bind to.";
+      default = 5000;
+      type = types.port;
+    };
+
+    storagePath = mkOption {
+      type = types.nullOr types.path;
+      default = "/var/lib/docker-registry";
+      description = lib.mdDoc ''
+        Docker registry storage path for the filesystem storage backend. Set to
+        null to configure another backend via extraConfig.
+      '';
+    };
+
+    enableDelete = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable delete for manifests and blobs.";
+    };
+
+    enableRedisCache = mkEnableOption (lib.mdDoc "redis as blob cache");
+
+    redisUrl = mkOption {
+      type = types.str;
+      default = "localhost:6379";
+      description = lib.mdDoc "Set redis host and port.";
+    };
+
+    redisPassword = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc "Set redis password.";
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc ''
+        Docker extra registry configuration via environment variables.
+      '';
+      default = {};
+      type = types.attrs;
+    };
+
+    enableGarbageCollect = mkEnableOption (lib.mdDoc "garbage collect");
+
+    garbageCollectDates = mkOption {
+      default = "daily";
+      type = types.str;
+      description = lib.mdDoc ''
+        Specification (in the format described by
+        {manpage}`systemd.time(7)`) of the time at
+        which the garbage collect will occur.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.docker-registry = {
+      description = "Docker Container Registry";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      script = ''
+        ${cfg.package}/bin/registry serve ${configFile}
+      '';
+
+      serviceConfig = {
+        User = "docker-registry";
+        WorkingDirectory = cfg.storagePath;
+        AmbientCapabilities = mkIf (cfg.port < 1024) "cap_net_bind_service";
+      };
+    };
+
+    systemd.services.docker-registry-garbage-collect = {
+      description = "Run Garbage Collection for docker registry";
+
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig.Type = "oneshot";
+
+      script = ''
+        ${cfg.package}/bin/registry garbage-collect ${configFile}
+        /run/current-system/systemd/bin/systemctl restart docker-registry.service
+      '';
+
+      startAt = optional cfg.enableGarbageCollect cfg.garbageCollectDates;
+    };
+
+    users.users.docker-registry =
+      (optionalAttrs (cfg.storagePath != null) {
+        createHome = true;
+        home = cfg.storagePath;
+      }) // {
+        group = "docker-registry";
+        isSystemUser = true;
+      };
+    users.groups.docker-registry = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/domoticz.nix b/nixpkgs/nixos/modules/services/misc/domoticz.nix
new file mode 100644
index 000000000000..fd9fcf0b78eb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/domoticz.nix
@@ -0,0 +1,51 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.domoticz;
+  pkgDesc = "Domoticz home automation";
+
+in {
+
+  options = {
+
+    services.domoticz = {
+      enable = mkEnableOption (lib.mdDoc pkgDesc);
+
+      bind = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "IP address to bind to.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc "Port to bind to for HTTP, set to 0 to disable HTTP.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services."domoticz" = {
+      description = pkgDesc;
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "domoticz";
+        Restart = "always";
+        ExecStart = ''
+          ${pkgs.domoticz}/bin/domoticz -noupdates -www ${toString cfg.port} -wwwbind ${cfg.bind} -sslwww 0 -userdata /var/lib/domoticz -approot ${pkgs.domoticz}/share/domoticz/ -pidfile /var/run/domoticz.pid
+        '';
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/duckling.nix b/nixpkgs/nixos/modules/services/misc/duckling.nix
new file mode 100644
index 000000000000..4d06ca7fa667
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/duckling.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.duckling;
+in {
+  options = {
+    services.duckling = {
+      enable = mkEnableOption (lib.mdDoc "duckling");
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc ''
+          Port on which duckling will run.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.duckling = {
+      description = "Duckling server service";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+
+      environment = {
+        PORT = builtins.toString cfg.port;
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.haskellPackages.duckling}/bin/duckling-example-exe --no-access-log --no-error-log";
+        Restart = "always";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/dwm-status.nix b/nixpkgs/nixos/modules/services/misc/dwm-status.nix
new file mode 100644
index 000000000000..de3e28c41d27
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/dwm-status.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dwm-status;
+
+  order = concatMapStringsSep "," (feature: ''"${feature}"'') cfg.order;
+
+  configFile = pkgs.writeText "dwm-status.toml" ''
+    order = [${order}]
+
+    ${cfg.extraConfig}
+  '';
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.dwm-status = {
+
+      enable = mkEnableOption (lib.mdDoc "dwm-status user service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.dwm-status;
+        defaultText = literalExpression "pkgs.dwm-status";
+        example = literalExpression "pkgs.dwm-status.override { enableAlsaUtils = false; }";
+        description = lib.mdDoc ''
+          Which dwm-status package to use.
+        '';
+      };
+
+      order = mkOption {
+        type = types.listOf (types.enum [ "audio" "backlight" "battery" "cpu_load" "network" "time" ]);
+        description = lib.mdDoc ''
+          List of enabled features in order.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra config in TOML format.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.upower.enable = elem "battery" cfg.order;
+
+    systemd.user.services.dwm-status = {
+      description = "Highly performant and configurable DWM status service";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+
+      serviceConfig.ExecStart = "${cfg.package}/bin/dwm-status ${configFile}";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/dysnomia.nix b/nixpkgs/nixos/modules/services/misc/dysnomia.nix
new file mode 100644
index 000000000000..129345e38106
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/dysnomia.nix
@@ -0,0 +1,265 @@
+{pkgs, lib, config, ...}:
+
+with lib;
+
+let
+  cfg = config.dysnomia;
+
+  printProperties = properties:
+    concatMapStrings (propertyName:
+      let
+        property = properties.${propertyName};
+      in
+      if isList property then "${propertyName}=(${lib.concatMapStrings (elem: "\"${toString elem}\" ") (properties.${propertyName})})\n"
+      else "${propertyName}=\"${toString property}\"\n"
+    ) (builtins.attrNames properties);
+
+  properties = pkgs.stdenv.mkDerivation {
+    name = "dysnomia-properties";
+    buildCommand = ''
+      cat > $out << "EOF"
+      ${printProperties cfg.properties}
+      EOF
+    '';
+  };
+
+  containersDir = pkgs.stdenv.mkDerivation {
+    name = "dysnomia-containers";
+    buildCommand = ''
+      mkdir -p $out
+      cd $out
+
+      ${concatMapStrings (containerName:
+        let
+          containerProperties = cfg.containers.${containerName};
+        in
+        ''
+          cat > ${containerName} <<EOF
+          ${printProperties containerProperties}
+          type=${containerName}
+          EOF
+        ''
+      ) (builtins.attrNames cfg.containers)}
+    '';
+  };
+
+  linkMutableComponents = {containerName}:
+    ''
+      mkdir ${containerName}
+
+      ${concatMapStrings (componentName:
+        let
+          component = cfg.components.${containerName}.${componentName};
+        in
+        "ln -s ${component} ${containerName}/${componentName}\n"
+      ) (builtins.attrNames (cfg.components.${containerName} or {}))}
+    '';
+
+  componentsDir = pkgs.stdenv.mkDerivation {
+    name = "dysnomia-components";
+    buildCommand = ''
+      mkdir -p $out
+      cd $out
+
+      ${concatMapStrings (containerName:
+        linkMutableComponents { inherit containerName; }
+      ) (builtins.attrNames cfg.components)}
+    '';
+  };
+
+  dysnomiaFlags = {
+    enableApacheWebApplication = config.services.httpd.enable;
+    enableAxis2WebService = config.services.tomcat.axis2.enable;
+    enableDockerContainer = config.virtualisation.docker.enable;
+    enableEjabberdDump = config.services.ejabberd.enable;
+    enableMySQLDatabase = config.services.mysql.enable;
+    enablePostgreSQLDatabase = config.services.postgresql.enable;
+    enableTomcatWebApplication = config.services.tomcat.enable;
+    enableMongoDatabase = config.services.mongodb.enable;
+    enableSubversionRepository = config.services.svnserve.enable;
+    enableInfluxDatabase = config.services.influxdb.enable;
+  };
+in
+{
+  options = {
+    dysnomia = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable Dysnomia";
+      };
+
+      enableAuthentication = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to publish privacy-sensitive authentication credentials";
+      };
+
+      package = mkOption {
+        type = types.path;
+        description = lib.mdDoc "The Dysnomia package";
+      };
+
+      properties = mkOption {
+        description = lib.mdDoc "An attribute set in which each attribute represents a machine property. Optionally, these values can be shell substitutions.";
+        default = {};
+        type = types.attrs;
+      };
+
+      containers = mkOption {
+        description = lib.mdDoc "An attribute set in which each key represents a container and each value an attribute set providing its configuration properties";
+        default = {};
+        type = types.attrsOf types.attrs;
+      };
+
+      components = mkOption {
+        description = lib.mdDoc "An attribute set in which each key represents a container and each value an attribute set in which each key represents a component and each value a derivation constructing its initial state";
+        default = {};
+        type = types.attrsOf types.attrs;
+      };
+
+      extraContainerProperties = mkOption {
+        description = lib.mdDoc "An attribute set providing additional container settings in addition to the default properties";
+        default = {};
+        type = types.attrs;
+      };
+
+      extraContainerPaths = mkOption {
+        description = lib.mdDoc "A list of paths containing additional container configurations that are added to the search folders";
+        default = [];
+        type = types.listOf types.path;
+      };
+
+      extraModulePaths = mkOption {
+        description = lib.mdDoc "A list of paths containing additional modules that are added to the search folders";
+        default = [];
+        type = types.listOf types.path;
+      };
+
+      enableLegacyModules = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable Dysnomia legacy process and wrapper modules";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc = {
+      "dysnomia/containers" = {
+        source = containersDir;
+      };
+      "dysnomia/components" = {
+        source = componentsDir;
+      };
+      "dysnomia/properties" = {
+        source = properties;
+      };
+    };
+
+    environment.variables = {
+      DYSNOMIA_STATEDIR = "/var/state/dysnomia-nixos";
+      DYSNOMIA_CONTAINERS_PATH = "${lib.concatMapStrings (containerPath: "${containerPath}:") cfg.extraContainerPaths}/etc/dysnomia/containers";
+      DYSNOMIA_MODULES_PATH = "${lib.concatMapStrings (modulePath: "${modulePath}:") cfg.extraModulePaths}/etc/dysnomia/modules";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    dysnomia.package = pkgs.dysnomia.override (origArgs: dysnomiaFlags // lib.optionalAttrs (cfg.enableLegacyModules) {
+      enableLegacy = builtins.trace ''
+        WARNING: Dysnomia has been configured to use the legacy 'process' and 'wrapper'
+        modules for compatibility reasons! If you rely on these modules, consider
+        migrating to better alternatives.
+
+        More information: https://raw.githubusercontent.com/svanderburg/dysnomia/f65a9a84827bcc4024d6b16527098b33b02e4054/README-legacy.md
+
+        If you have migrated already or don't rely on these Dysnomia modules, you can
+        disable legacy mode with the following NixOS configuration option:
+
+        dysnomia.enableLegacyModules = false;
+
+        In a future version of Dysnomia (and NixOS) the legacy option will go away!
+      '' true;
+    });
+
+    dysnomia.properties = {
+      hostname = config.networking.hostName;
+      inherit (pkgs.stdenv.hostPlatform) system;
+
+      supportedTypes = [
+        "echo"
+        "fileset"
+        "process"
+        "wrapper"
+
+        # These are not base modules, but they are still enabled because they work with technology that are always enabled in NixOS
+        "systemd-unit"
+        "sysvinit-script"
+        "nixos-configuration"
+      ]
+      ++ optional (dysnomiaFlags.enableApacheWebApplication) "apache-webapplication"
+      ++ optional (dysnomiaFlags.enableAxis2WebService) "axis2-webservice"
+      ++ optional (dysnomiaFlags.enableDockerContainer) "docker-container"
+      ++ optional (dysnomiaFlags.enableEjabberdDump) "ejabberd-dump"
+      ++ optional (dysnomiaFlags.enableInfluxDatabase) "influx-database"
+      ++ optional (dysnomiaFlags.enableMySQLDatabase) "mysql-database"
+      ++ optional (dysnomiaFlags.enablePostgreSQLDatabase) "postgresql-database"
+      ++ optional (dysnomiaFlags.enableTomcatWebApplication) "tomcat-webapplication"
+      ++ optional (dysnomiaFlags.enableMongoDatabase) "mongo-database"
+      ++ optional (dysnomiaFlags.enableSubversionRepository) "subversion-repository";
+    };
+
+    dysnomia.containers = lib.recursiveUpdate ({
+      process = {};
+      wrapper = {};
+    }
+    // lib.optionalAttrs (config.services.httpd.enable) { apache-webapplication = {
+      documentRoot = config.services.httpd.virtualHosts.localhost.documentRoot;
+    }; }
+    // lib.optionalAttrs (config.services.tomcat.axis2.enable) { axis2-webservice = {}; }
+    // lib.optionalAttrs (config.services.ejabberd.enable) { ejabberd-dump = {
+      ejabberdUser = config.services.ejabberd.user;
+    }; }
+    // lib.optionalAttrs (config.services.mysql.enable) { mysql-database = {
+        mysqlPort = config.services.mysql.settings.mysqld.port;
+        mysqlSocket = "/run/mysqld/mysqld.sock";
+      } // lib.optionalAttrs cfg.enableAuthentication {
+        mysqlUsername = "root";
+      };
+    }
+    // lib.optionalAttrs (config.services.postgresql.enable) { postgresql-database = {
+      } // lib.optionalAttrs (cfg.enableAuthentication) {
+        postgresqlUsername = "postgres";
+      };
+    }
+    // lib.optionalAttrs (config.services.tomcat.enable) { tomcat-webapplication = {
+      tomcatPort = 8080;
+    }; }
+    // lib.optionalAttrs (config.services.mongodb.enable) { mongo-database = {}; }
+    // lib.optionalAttrs (config.services.influxdb.enable) {
+      influx-database = {
+        influxdbUsername = config.services.influxdb.user;
+        influxdbDataDir = "${config.services.influxdb.dataDir}/data";
+        influxdbMetaDir = "${config.services.influxdb.dataDir}/meta";
+      };
+    }
+    // lib.optionalAttrs (config.services.svnserve.enable) { subversion-repository = {
+      svnBaseDir = config.services.svnserve.svnBaseDir;
+    }; }) cfg.extraContainerProperties;
+
+    boot.extraSystemdUnitPaths = [ "/etc/systemd-mutable/system" ];
+
+    system.activationScripts.dysnomia = ''
+      mkdir -p /etc/systemd-mutable/system
+      if [ ! -f /etc/systemd-mutable/system/dysnomia.target ]
+      then
+          ( echo "[Unit]"
+            echo "Description=Services that are activated and deactivated by Dysnomia"
+            echo "After=final.target"
+          ) > /etc/systemd-mutable/system/dysnomia.target
+      fi
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/errbot.nix b/nixpkgs/nixos/modules/services/misc/errbot.nix
new file mode 100644
index 000000000000..a650bc5bbd92
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/errbot.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.errbot;
+  pluginEnv = plugins: pkgs.buildEnv {
+    name = "errbot-plugins";
+    paths = plugins;
+  };
+  mkConfigDir = instanceCfg: dataDir: pkgs.writeTextDir "config.py" ''
+    import logging
+    BACKEND = '${instanceCfg.backend}'
+    BOT_DATA_DIR = '${dataDir}'
+    BOT_EXTRA_PLUGIN_DIR = '${pluginEnv instanceCfg.plugins}'
+
+    BOT_LOG_LEVEL = logging.${instanceCfg.logLevel}
+    BOT_LOG_FILE = False
+
+    BOT_ADMINS = (${concatMapStringsSep "," (name: "'${name}'") instanceCfg.admins})
+
+    BOT_IDENTITY = ${builtins.toJSON instanceCfg.identity}
+
+    ${instanceCfg.extraConfig}
+  '';
+in {
+  options = {
+    services.errbot.instances = mkOption {
+      default = {};
+      description = lib.mdDoc "Errbot instance configs";
+      type = types.attrsOf (types.submodule {
+        options = {
+          dataDir = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc "Data directory for errbot instance.";
+          };
+
+          plugins = mkOption {
+            type = types.listOf types.package;
+            default = [];
+            description = lib.mdDoc "List of errbot plugin derivations.";
+          };
+
+          logLevel = mkOption {
+            type = types.str;
+            default = "INFO";
+            description = lib.mdDoc "Errbot log level";
+          };
+
+          admins = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            description = lib.mdDoc "List of identifiers of errbot admins.";
+          };
+
+          backend = mkOption {
+            type = types.str;
+            default = "XMPP";
+            description = lib.mdDoc "Errbot backend name.";
+          };
+
+          identity = mkOption {
+            type = types.attrs;
+            description = lib.mdDoc "Errbot identity configuration";
+          };
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc "String to be appended to the config verbatim";
+          };
+        };
+      });
+    };
+  };
+
+  config = mkIf (cfg.instances != {}) {
+    users.users.errbot = {
+      group = "errbot";
+      isSystemUser = true;
+    };
+    users.groups.errbot = {};
+
+    systemd.services = mapAttrs' (name: instanceCfg: nameValuePair "errbot-${name}" (
+    let
+      dataDir = if instanceCfg.dataDir != null then instanceCfg.dataDir else
+        "/var/lib/errbot/${name}";
+    in {
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -p ${dataDir}
+        chown -R errbot:errbot ${dataDir}
+      '';
+      serviceConfig = {
+        User = "errbot";
+        Restart = "on-failure";
+        ExecStart = "${pkgs.errbot}/bin/errbot -c ${mkConfigDir instanceCfg dataDir}/config.py";
+        PermissionsStartOnly = true;
+      };
+    })) cfg.instances;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/etcd.nix b/nixpkgs/nixos/modules/services/misc/etcd.nix
new file mode 100644
index 000000000000..7bc7a9499113
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/etcd.nix
@@ -0,0 +1,208 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.etcd;
+  opt = options.services.etcd;
+
+in {
+
+  options.services.etcd = {
+    enable = mkOption {
+      description = lib.mdDoc "Whether to enable etcd.";
+      default = false;
+      type = types.bool;
+    };
+
+    package = mkPackageOptionMD pkgs "etcd" { };
+
+    name = mkOption {
+      description = lib.mdDoc "Etcd unique node name.";
+      default = config.networking.hostName;
+      defaultText = literalExpression "config.networking.hostName";
+      type = types.str;
+    };
+
+    advertiseClientUrls = mkOption {
+      description = lib.mdDoc "Etcd list of this member's client URLs to advertise to the rest of the cluster.";
+      default = cfg.listenClientUrls;
+      defaultText = literalExpression "config.${opt.listenClientUrls}";
+      type = types.listOf types.str;
+    };
+
+    listenClientUrls = mkOption {
+      description = lib.mdDoc "Etcd list of URLs to listen on for client traffic.";
+      default = ["http://127.0.0.1:2379"];
+      type = types.listOf types.str;
+    };
+
+    listenPeerUrls = mkOption {
+      description = lib.mdDoc "Etcd list of URLs to listen on for peer traffic.";
+      default = ["http://127.0.0.1:2380"];
+      type = types.listOf types.str;
+    };
+
+    initialAdvertisePeerUrls = mkOption {
+      description = lib.mdDoc "Etcd list of this member's peer URLs to advertise to rest of the cluster.";
+      default = cfg.listenPeerUrls;
+      defaultText = literalExpression "config.${opt.listenPeerUrls}";
+      type = types.listOf types.str;
+    };
+
+    initialCluster = mkOption {
+      description = lib.mdDoc "Etcd initial cluster configuration for bootstrapping.";
+      default = ["${cfg.name}=http://127.0.0.1:2380"];
+      defaultText = literalExpression ''["''${config.${opt.name}}=http://127.0.0.1:2380"]'';
+      type = types.listOf types.str;
+    };
+
+    initialClusterState = mkOption {
+      description = lib.mdDoc "Etcd initial cluster configuration for bootstrapping.";
+      default = "new";
+      type = types.enum ["new" "existing"];
+    };
+
+    initialClusterToken = mkOption {
+      description = lib.mdDoc "Etcd initial cluster token for etcd cluster during bootstrap.";
+      default = "etcd-cluster";
+      type = types.str;
+    };
+
+    discovery = mkOption {
+      description = lib.mdDoc "Etcd discovery url";
+      default = "";
+      type = types.str;
+    };
+
+    clientCertAuth = mkOption {
+      description = lib.mdDoc "Whether to use certs for client authentication";
+      default = false;
+      type = types.bool;
+    };
+
+    trustedCaFile = mkOption {
+      description = lib.mdDoc "Certificate authority file to use for clients";
+      default = null;
+      type = types.nullOr types.path;
+    };
+
+    certFile = mkOption {
+      description = lib.mdDoc "Cert file to use for clients";
+      default = null;
+      type = types.nullOr types.path;
+    };
+
+    keyFile = mkOption {
+      description = lib.mdDoc "Key file to use for clients";
+      default = null;
+      type = types.nullOr types.path;
+    };
+
+    peerCertFile = mkOption {
+      description = lib.mdDoc "Cert file to use for peer to peer communication";
+      default = cfg.certFile;
+      defaultText = literalExpression "config.${opt.certFile}";
+      type = types.nullOr types.path;
+    };
+
+    peerKeyFile = mkOption {
+      description = lib.mdDoc "Key file to use for peer to peer communication";
+      default = cfg.keyFile;
+      defaultText = literalExpression "config.${opt.keyFile}";
+      type = types.nullOr types.path;
+    };
+
+    peerTrustedCaFile = mkOption {
+      description = lib.mdDoc "Certificate authority file to use for peer to peer communication";
+      default = cfg.trustedCaFile;
+      defaultText = literalExpression "config.${opt.trustedCaFile}";
+      type = types.nullOr types.path;
+    };
+
+    peerClientCertAuth = mkOption {
+      description = lib.mdDoc "Whether to check all incoming peer requests from the cluster for valid client certificates signed by the supplied CA";
+      default = false;
+      type = types.bool;
+    };
+
+    extraConf = mkOption {
+      description = lib.mdDoc ''
+        Etcd extra configuration. See
+        <https://github.com/coreos/etcd/blob/master/Documentation/op-guide/configuration.md#configuration-flags>
+      '';
+      type = types.attrsOf types.str;
+      default = {};
+      example = literalExpression ''
+        {
+          "CORS" = "*";
+          "NAME" = "default-name";
+          "MAX_RESULT_BUFFER" = "1024";
+          "MAX_CLUSTER_SIZE" = "9";
+          "MAX_RETRY_ATTEMPTS" = "3";
+        }
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/etcd";
+      description = lib.mdDoc "Etcd data directory.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 etcd - - -"
+    ];
+
+    systemd.services.etcd = {
+      description = "etcd key-value store";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      environment = (filterAttrs (n: v: v != null) {
+        ETCD_NAME = cfg.name;
+        ETCD_DISCOVERY = cfg.discovery;
+        ETCD_DATA_DIR = cfg.dataDir;
+        ETCD_ADVERTISE_CLIENT_URLS = concatStringsSep "," cfg.advertiseClientUrls;
+        ETCD_LISTEN_CLIENT_URLS = concatStringsSep "," cfg.listenClientUrls;
+        ETCD_LISTEN_PEER_URLS = concatStringsSep "," cfg.listenPeerUrls;
+        ETCD_INITIAL_ADVERTISE_PEER_URLS = concatStringsSep "," cfg.initialAdvertisePeerUrls;
+        ETCD_PEER_CLIENT_CERT_AUTH = toString cfg.peerClientCertAuth;
+        ETCD_PEER_TRUSTED_CA_FILE = cfg.peerTrustedCaFile;
+        ETCD_PEER_CERT_FILE = cfg.peerCertFile;
+        ETCD_PEER_KEY_FILE = cfg.peerKeyFile;
+        ETCD_CLIENT_CERT_AUTH = toString cfg.clientCertAuth;
+        ETCD_TRUSTED_CA_FILE = cfg.trustedCaFile;
+        ETCD_CERT_FILE = cfg.certFile;
+        ETCD_KEY_FILE = cfg.keyFile;
+      }) // (optionalAttrs (cfg.discovery == ""){
+        ETCD_INITIAL_CLUSTER = concatStringsSep "," cfg.initialCluster;
+        ETCD_INITIAL_CLUSTER_STATE = cfg.initialClusterState;
+        ETCD_INITIAL_CLUSTER_TOKEN = cfg.initialClusterToken;
+      }) // (mapAttrs' (n: v: nameValuePair "ETCD_${n}" v) cfg.extraConf);
+
+      unitConfig = {
+        Documentation = "https://github.com/coreos/etcd";
+      };
+
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/etcd";
+        User = "etcd";
+        LimitNOFILE = 40000;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    users.users.etcd = {
+      isSystemUser = true;
+      group = "etcd";
+      description = "Etcd daemon user";
+      home = cfg.dataDir;
+    };
+    users.groups.etcd = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/etebase-server.nix b/nixpkgs/nixos/modules/services/misc/etebase-server.nix
new file mode 100644
index 000000000000..045048a1a2e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/etebase-server.nix
@@ -0,0 +1,226 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.etebase-server;
+
+  pythonEnv = pkgs.python3.withPackages (ps: with ps;
+    [ etebase-server daphne ]);
+
+  iniFmt = pkgs.formats.ini {};
+
+  configIni = iniFmt.generate "etebase-server.ini" cfg.settings;
+
+  defaultUser = "etebase-server";
+in
+{
+  imports = [
+    (mkRemovedOptionModule
+      [ "services" "etebase-server" "customIni" ]
+      "Set the option `services.etebase-server.settings' instead.")
+    (mkRemovedOptionModule
+      [ "services" "etebase-server" "database" ]
+      "Set the option `services.etebase-server.settings.database' instead.")
+    (mkRenamedOptionModule
+      [ "services" "etebase-server" "secretFile" ]
+      [ "services" "etebase-server" "settings" "secret_file" ])
+    (mkRenamedOptionModule
+      [ "services" "etebase-server" "host" ]
+      [ "services" "etebase-server" "settings" "allowed_hosts" "allowed_host1" ])
+  ];
+
+  options = {
+    services.etebase-server = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Whether to enable the Etebase server.
+
+          Once enabled you need to create an admin user by invoking the
+          shell command `etebase-server createsuperuser` with
+          the user specified by the `user` option or a superuser.
+          Then you can login and create accounts on your-etebase-server.com/admin
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/etebase-server";
+        description = lib.mdDoc "Directory to store the Etebase server data.";
+      };
+
+      port = mkOption {
+        type = with types; nullOr port;
+        default = 8001;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open ports in the firewall for the server.
+        '';
+      };
+
+      unixSocket = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc "The path to the socket to bind to.";
+        example = "/run/etebase-server/etebase-server.sock";
+      };
+
+      settings = mkOption {
+        type = lib.types.submodule {
+          freeformType = iniFmt.type;
+
+          options = {
+            global = {
+              debug = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to set django's DEBUG flag.
+                '';
+              };
+              secret_file = mkOption {
+                type = with types; nullOr str;
+                default = null;
+                description = lib.mdDoc ''
+                  The path to a file containing the secret
+                  used as django's SECRET_KEY.
+                '';
+              };
+              static_root = mkOption {
+                type = types.str;
+                default = "${cfg.dataDir}/static";
+                defaultText = literalExpression ''"''${config.services.etebase-server.dataDir}/static"'';
+                description = lib.mdDoc "The directory for static files.";
+              };
+              media_root = mkOption {
+                type = types.str;
+                default = "${cfg.dataDir}/media";
+                defaultText = literalExpression ''"''${config.services.etebase-server.dataDir}/media"'';
+                description = lib.mdDoc "The media directory.";
+              };
+            };
+            allowed_hosts = {
+              allowed_host1 = mkOption {
+                type = types.str;
+                default = "0.0.0.0";
+                example = "localhost";
+                description = lib.mdDoc ''
+                  The main host that is allowed access.
+                '';
+              };
+            };
+            database = {
+              engine = mkOption {
+                type = types.enum [ "django.db.backends.sqlite3" "django.db.backends.postgresql" ];
+                default = "django.db.backends.sqlite3";
+                description = lib.mdDoc "The database engine to use.";
+              };
+              name = mkOption {
+                type = types.str;
+                default = "${cfg.dataDir}/db.sqlite3";
+                defaultText = literalExpression ''"''${config.services.etebase-server.dataDir}/db.sqlite3"'';
+                description = lib.mdDoc "The database name.";
+              };
+            };
+          };
+        };
+        default = {};
+        description = lib.mdDoc ''
+          Configuration for `etebase-server`. Refer to
+          <https://github.com/etesync/server/blob/master/etebase-server.ini.example>
+          and <https://github.com/etesync/server/wiki>
+          for details on supported values.
+        '';
+        example = {
+          global = {
+            debug = true;
+            media_root = "/path/to/media";
+          };
+          allowed_hosts = {
+            allowed_host2 = "localhost";
+          };
+        };
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = lib.mdDoc "User under which Etebase server runs.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = with pkgs; [
+      (runCommand "etebase-server" {
+        nativeBuildInputs = [ makeWrapper ];
+      } ''
+        makeWrapper ${pythonEnv}/bin/etebase-server \
+          $out/bin/etebase-server \
+          --chdir ${escapeShellArg cfg.dataDir} \
+          --prefix ETEBASE_EASY_CONFIG_PATH : "${configIni}"
+      '')
+    ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+    ];
+
+    systemd.services.etebase-server = {
+      description = "An Etebase (EteSync 2.0) server";
+      after = [ "network.target" "systemd-tmpfiles-setup.service" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pythonEnv ];
+      serviceConfig = {
+        User = cfg.user;
+        Restart = "always";
+        WorkingDirectory = cfg.dataDir;
+      };
+      environment = {
+        ETEBASE_EASY_CONFIG_PATH = configIni;
+      };
+      preStart = ''
+        # Auto-migrate on first run or if the package has changed
+        versionFile="${cfg.dataDir}/src-version"
+        if [[ $(cat "$versionFile" 2>/dev/null) != ${pkgs.etebase-server} ]]; then
+          etebase-server migrate --no-input
+          etebase-server collectstatic --no-input --clear
+          echo ${pkgs.etebase-server} > "$versionFile"
+        fi
+      '';
+      script =
+        let
+          networking = if cfg.unixSocket != null
+          then "-u ${cfg.unixSocket}"
+          else "-b 0.0.0.0 -p ${toString cfg.port}";
+        in ''
+          cd "${pythonEnv}/lib/etebase-server";
+          daphne ${networking} \
+            etebase_server.asgi:application
+        '';
+    };
+
+    users = optionalAttrs (cfg.user == defaultUser) {
+      users.${defaultUser} = {
+        isSystemUser = true;
+        group = defaultUser;
+        home = cfg.dataDir;
+      };
+
+      groups.${defaultUser} = {};
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/etesync-dav.nix b/nixpkgs/nixos/modules/services/misc/etesync-dav.nix
new file mode 100644
index 000000000000..9d99d548d95b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/etesync-dav.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.etesync-dav;
+in
+  {
+    options.services.etesync-dav = {
+      enable = mkEnableOption (lib.mdDoc "etesync-dav");
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "The server host address.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 37358;
+        description = lib.mdDoc "The server host port.";
+      };
+
+      apiUrl = mkOption {
+        type = types.str;
+        default = "https://api.etesync.com/";
+        description = lib.mdDoc "The url to the etesync API.";
+      };
+
+      openFirewall = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to open the firewall for the specified port.";
+      };
+
+      sslCertificate = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/etesync.crt";
+        description = lib.mdDoc ''
+          Path to server SSL certificate. It will be copied into
+          etesync-dav's data directory.
+        '';
+      };
+
+      sslCertificateKey = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/etesync.key";
+        description = lib.mdDoc ''
+          Path to server SSL certificate key.  It will be copied into
+          etesync-dav's data directory.
+        '';
+      };
+    };
+
+    config = mkIf cfg.enable {
+      networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+      systemd.services.etesync-dav = {
+        description = "etesync-dav - A CalDAV and CardDAV adapter for EteSync";
+        after = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        path = [ pkgs.etesync-dav ];
+        environment = {
+          ETESYNC_LISTEN_ADDRESS = cfg.host;
+          ETESYNC_LISTEN_PORT = toString cfg.port;
+          ETESYNC_URL = cfg.apiUrl;
+          ETESYNC_DATA_DIR = "/var/lib/etesync-dav";
+        };
+
+        serviceConfig = {
+          Type = "simple";
+          DynamicUser = true;
+          StateDirectory = "etesync-dav";
+          ExecStart = "${pkgs.etesync-dav}/bin/etesync-dav";
+          ExecStartPre = mkIf (cfg.sslCertificate != null || cfg.sslCertificateKey != null) (
+            pkgs.writers.writeBash "etesync-dav-copy-keys" ''
+              ${optionalString (cfg.sslCertificate != null) ''
+                cp ${toString cfg.sslCertificate} $STATE_DIRECTORY/etesync.crt
+              ''}
+              ${optionalString (cfg.sslCertificateKey != null) ''
+                cp ${toString cfg.sslCertificateKey} $STATE_DIRECTORY/etesync.key
+              ''}
+            ''
+          );
+          Restart = "on-failure";
+          RestartSec = "30min 1s";
+        };
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/misc/evdevremapkeys.nix b/nixpkgs/nixos/modules/services/misc/evdevremapkeys.nix
new file mode 100644
index 000000000000..11ea6a5f03f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/evdevremapkeys.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  format = pkgs.formats.yaml { };
+  cfg = config.services.evdevremapkeys;
+
+in
+{
+  options.services.evdevremapkeys = {
+    enable = mkEnableOption (lib.mdDoc ''evdevremapkeys'');
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = lib.mdDoc ''
+        config.yaml for evdevremapkeys
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "uinput" ];
+    services.udev.extraRules = ''
+      KERNEL=="uinput", MODE="0660", GROUP="input"
+    '';
+    users.groups.evdevremapkeys = { };
+    users.users.evdevremapkeys = {
+      description = "evdevremapkeys service user";
+      group = "evdevremapkeys";
+      extraGroups = [ "input" ];
+      isSystemUser = true;
+    };
+    systemd.services.evdevremapkeys = {
+      description = "evdevremapkeys";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+        let
+          config = format.generate "config.yaml" cfg.settings;
+        in
+        {
+          ExecStart = "${pkgs.evdevremapkeys}/bin/evdevremapkeys --config-file ${config}";
+          User = "evdevremapkeys";
+          Group = "evdevremapkeys";
+          StateDirectory = "evdevremapkeys";
+          Restart = "always";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateNetwork = true;
+          PrivateTmp = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectKernelTunables = true;
+          ProtectSystem = true;
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/felix.nix b/nixpkgs/nixos/modules/services/misc/felix.nix
new file mode 100644
index 000000000000..306d4cf0d7cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/felix.nix
@@ -0,0 +1,104 @@
+# Felix server
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.felix;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.felix = {
+
+      enable = mkEnableOption (lib.mdDoc "the Apache Felix OSGi service");
+
+      bundles = mkOption {
+        type = types.listOf types.package;
+        default = [ pkgs.felix_remoteshell ];
+        defaultText = literalExpression "[ pkgs.felix_remoteshell ]";
+        description = lib.mdDoc "List of bundles that should be activated on startup";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "osgi";
+        description = lib.mdDoc "User account under which Apache Felix runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "osgi";
+        description = lib.mdDoc "Group account under which Apache Felix runs.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.groups.osgi.gid = config.ids.gids.osgi;
+
+    users.users.osgi =
+      { uid = config.ids.uids.osgi;
+        description = "OSGi user";
+        home = "/homeless-shelter";
+      };
+
+    systemd.services.felix = {
+      description = "Felix server";
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Initialise felix instance on first startup
+        if [ ! -d /var/felix ]
+        then
+          # Symlink system files
+
+          mkdir -p /var/felix
+          chown ${cfg.user}:${cfg.group} /var/felix
+
+          for i in ${pkgs.felix}/*
+          do
+              if [ "$i" != "${pkgs.felix}/bundle" ]
+              then
+                  ln -sfn $i /var/felix/$(basename $i)
+              fi
+          done
+
+          # Symlink bundles
+          mkdir -p /var/felix/bundle
+          chown ${cfg.user}:${cfg.group} /var/felix/bundle
+
+          for i in ${pkgs.felix}/bundle/* ${toString cfg.bundles}
+          do
+              if [ -f $i ]
+              then
+                  ln -sfn $i /var/felix/bundle/$(basename $i)
+              elif [ -d $i ]
+              then
+                  for j in $i/bundle/*
+              do
+                  ln -sfn $j /var/felix/bundle/$(basename $j)
+              done
+              fi
+          done
+        fi
+      '';
+
+      script = ''
+        cd /var/felix
+        ${pkgs.su}/bin/su -s ${pkgs.bash}/bin/sh ${cfg.user} -c '${pkgs.jre}/bin/java -jar bin/felix.jar'
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/forgejo.md b/nixpkgs/nixos/modules/services/misc/forgejo.md
new file mode 100644
index 000000000000..3df8bc20976a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/forgejo.md
@@ -0,0 +1,79 @@
+# Forgejo {#module-forgejo}
+
+Forgejo is a soft-fork of gitea, with strong community focus, as well
+as on self-hosting and federation. [Codeberg](https://codeberg.org) is
+deployed from it.
+
+See [upstream docs](https://forgejo.org/docs/latest/).
+
+The method of choice for running forgejo is using [`services.forgejo`](#opt-services.forgejo.enable).
+
+::: {.warning}
+Running forgejo using `services.gitea.package = pkgs.forgejo` is no longer
+recommended.
+If you experience issues with your instance using `services.gitea`,
+**DO NOT** report them to the `services.gitea` module maintainers.
+**DO** report them to the `services.forgejo` module maintainers instead.
+:::
+
+## Migration from Gitea {#module-forgejo-migration-gitea}
+
+::: {.note}
+Migrating is, while not strictly necessary at this point, highly recommended.
+Both modules and projects are likely to divide further with each release.
+Which might lead to an even more involved migration.
+:::
+
+### Full-Migration {#module-forgejo-migration-gitea-default}
+
+This will migrate the state directory (data), rename and chown the database and
+delete the gitea user.
+
+::: {.note}
+This will also change the git remote ssh-url user from `gitea@` to `forgejo@`,
+when using the host's openssh server (default) instead of the integrated one.
+:::
+
+Instructions for PostgreSQL (default). Adapt accordingly for other databases:
+
+```sh
+systemctl stop gitea
+mv /var/lib/gitea /var/lib/forgejo
+runuser -u postgres -- psql -c '
+  ALTER USER gitea RENAME TO forgejo;
+  ALTER DATABASE gitea RENAME TO forgejo;
+'
+nixos-rebuild switch
+systemctl stop forgejo
+chown -R forgejo:forgejo /var/lib/forgejo
+systemctl restart forgejo
+```
+
+### Alternatively, keeping the gitea user {#module-forgejo-migration-gitea-impersonate}
+
+Alternatively, instead of renaming the database, copying the state folder and
+changing the user, the forgejo module can be set up to re-use the old storage
+locations and database, instead of having to copy or rename them.
+Make sure to disable `services.gitea`, when doing this.
+
+```nix
+services.gitea.enable = false;
+
+services.forgejo = {
+  enable = true;
+  user = "gitea";
+  group = "gitea";
+  stateDir = "/var/lib/gitea";
+  database.name = "gitea";
+  database.user = "gitea";
+};
+
+users.users.gitea = {
+  home = "/var/lib/gitea";
+  useDefaultShell = true;
+  group = "gitea";
+  isSystemUser = true;
+};
+
+users.groups.gitea = {};
+```
diff --git a/nixpkgs/nixos/modules/services/misc/forgejo.nix b/nixpkgs/nixos/modules/services/misc/forgejo.nix
new file mode 100644
index 000000000000..454febda5893
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/forgejo.nix
@@ -0,0 +1,679 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.forgejo;
+  opt = options.services.forgejo;
+  format = pkgs.formats.ini { };
+
+  exe = lib.getExe cfg.package;
+
+  pg = config.services.postgresql;
+  useMysql = cfg.database.type == "mysql";
+  usePostgresql = cfg.database.type == "postgres";
+  useSqlite = cfg.database.type == "sqlite3";
+
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkChangedOptionModule
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkMerge
+    mkOption
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    mkRenamedOptionModule
+    optionalAttrs
+    optionals
+    optionalString
+    types
+    ;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "forgejo" "appName" ] [ "services" "forgejo" "settings" "DEFAULT" "APP_NAME" ])
+    (mkRemovedOptionModule [ "services" "forgejo" "extraConfig" ] "services.forgejo.extraConfig has been removed. Please use the freeform services.forgejo.settings option instead")
+    (mkRemovedOptionModule [ "services" "forgejo" "database" "password" ] "services.forgejo.database.password has been removed. Please use services.forgejo.database.passwordFile instead")
+
+    # copied from services.gitea; remove at some point
+    (mkRenamedOptionModule [ "services" "forgejo" "cookieSecure" ] [ "services" "forgejo" "settings" "session" "COOKIE_SECURE" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "disableRegistration" ] [ "services" "forgejo" "settings" "service" "DISABLE_REGISTRATION" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "domain" ] [ "services" "forgejo" "settings" "server" "DOMAIN" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "httpAddress" ] [ "services" "forgejo" "settings" "server" "HTTP_ADDR" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "httpPort" ] [ "services" "forgejo" "settings" "server" "HTTP_PORT" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "log" "level" ] [ "services" "forgejo" "settings" "log" "LEVEL" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "log" "rootPath" ] [ "services" "forgejo" "settings" "log" "ROOT_PATH" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "rootUrl" ] [ "services" "forgejo" "settings" "server" "ROOT_URL" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "ssh" "clonePort" ] [ "services" "forgejo" "settings" "server" "SSH_PORT" ])
+    (mkRenamedOptionModule [ "services" "forgejo" "staticRootPath" ] [ "services" "forgejo" "settings" "server" "STATIC_ROOT_PATH" ])
+    (mkChangedOptionModule [ "services" "forgejo" "enableUnixSocket" ] [ "services" "forgejo" "settings" "server" "PROTOCOL" ] (
+      config: if config.services.forgejo.enableUnixSocket then "http+unix" else "http"
+    ))
+    (mkRemovedOptionModule [ "services" "forgejo" "ssh" "enable" ] "services.forgejo.ssh.enable has been migrated into freeform setting services.forgejo.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted")
+  ];
+
+  options = {
+    services.forgejo = {
+      enable = mkEnableOption (mdDoc "Forgejo");
+
+      package = mkPackageOptionMD pkgs "forgejo" { };
+
+      useWizard = mkOption {
+        default = false;
+        type = types.bool;
+        description = mdDoc ''
+          Whether to use the built-in installation wizard instead of
+          declaratively managing the {file}`app.ini` config file in nix.
+        '';
+      };
+
+      stateDir = mkOption {
+        default = "/var/lib/forgejo";
+        type = types.str;
+        description = mdDoc "Forgejo data directory.";
+      };
+
+      customDir = mkOption {
+        default = "${cfg.stateDir}/custom";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/custom"'';
+        type = types.str;
+        description = mdDoc ''
+          Base directory for custom templates and other options.
+
+          If {option}`${opt.useWizard}` is disabled (default), this directory will also
+          hold secrets and the resulting {file}`app.ini` config at runtime.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "forgejo";
+        description = mdDoc "User account under which Forgejo runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "forgejo";
+        description = mdDoc "Group under which Forgejo runs.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "sqlite3" "mysql" "postgres" ];
+          example = "mysql";
+          default = "sqlite3";
+          description = mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if !usePostgresql then 3306 else pg.port;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} != "postgresql"
+            then 3306
+            else config.${options.services.postgresql.port}
+          '';
+          description = mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "forgejo";
+          description = mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "forgejo";
+          description = mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/forgejo-dbpassword";
+          description = mdDoc ''
+            A file containing the password corresponding to
+            {option}`${opt.database.user}`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = if (cfg.database.createDatabase && usePostgresql) then "/run/postgresql" else if (cfg.database.createDatabase && useMysql) then "/run/mysqld/mysqld.sock" else null;
+          defaultText = literalExpression "null";
+          example = "/run/mysqld/mysqld.sock";
+          description = mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        path = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/forgejo.db";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/forgejo.db"'';
+          description = mdDoc "Path to the sqlite3 database file.";
+        };
+
+        createDatabase = mkOption {
+          type = types.bool;
+          default = true;
+          description = mdDoc "Whether to create a local database automatically.";
+        };
+      };
+
+      dump = {
+        enable = mkEnableOption (mdDoc "periodic dumps via the [built-in {command}`dump` command](https://forgejo.org/docs/latest/admin/command-line/#dump)");
+
+        interval = mkOption {
+          type = types.str;
+          default = "04:31";
+          example = "hourly";
+          description = mdDoc ''
+            Run a Forgejo dump at this interval. Runs by default at 04:31 every day.
+
+            The format is described in
+            {manpage}`systemd.time(7)`.
+          '';
+        };
+
+        backupDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/dump";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/dump"'';
+          description = mdDoc "Path to the directory where the dump archives will be stored.";
+        };
+
+        type = mkOption {
+          type = types.enum [ "zip" "tar" "tar.sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" "tar.zst" ];
+          default = "zip";
+          description = mdDoc "Archive format used to store the dump file.";
+        };
+
+        file = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = mdDoc "Filename to be used for the dump. If `null` a default name is chosen by forgejo.";
+          example = "forgejo-dump";
+        };
+      };
+
+      lfs = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = mdDoc "Enables git-lfs support.";
+        };
+
+        contentDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/lfs";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/lfs"'';
+          description = mdDoc "Where to store LFS files.";
+        };
+      };
+
+      repositoryRoot = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/repositories";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/repositories"'';
+        description = mdDoc "Path to the git repositories.";
+      };
+
+      mailerPasswordFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/run/keys/forgejo-mailpw";
+        description = mdDoc "Path to a file containing the SMTP password.";
+      };
+
+      settings = mkOption {
+        default = { };
+        description = mdDoc ''
+          Free-form settings written directly to the `app.ini` configfile file.
+          Refer to <https://forgejo.org/docs/latest/admin/config-cheat-sheet/> for supported values.
+        '';
+        example = literalExpression ''
+          {
+            DEFAULT = {
+              RUN_MODE = "dev";
+            };
+            "cron.sync_external_users" = {
+              RUN_AT_START = true;
+              SCHEDULE = "@every 24h";
+              UPDATE_EXISTING = true;
+            };
+            mailer = {
+              ENABLED = true;
+              MAILER_TYPE = "sendmail";
+              FROM = "do-not-reply@example.org";
+              SENDMAIL_PATH = "''${pkgs.system-sendmail}/bin/sendmail";
+            };
+            other = {
+              SHOW_FOOTER_VERSION = false;
+            };
+          }
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            log = {
+              ROOT_PATH = mkOption {
+                default = "${cfg.stateDir}/log";
+                defaultText = literalExpression ''"''${config.${opt.stateDir}}/log"'';
+                type = types.str;
+                description = mdDoc "Root path for log files.";
+              };
+              LEVEL = mkOption {
+                default = "Info";
+                type = types.enum [ "Trace" "Debug" "Info" "Warn" "Error" "Critical" ];
+                description = mdDoc "General log level.";
+              };
+            };
+
+            server = {
+              PROTOCOL = mkOption {
+                type = types.enum [ "http" "https" "fcgi" "http+unix" "fcgi+unix" ];
+                default = "http";
+                description = mdDoc ''Listen protocol. `+unix` means "over unix", not "in addition to."'';
+              };
+
+              HTTP_ADDR = mkOption {
+                type = types.either types.str types.path;
+                default = if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/forgejo/forgejo.sock" else "0.0.0.0";
+                defaultText = literalExpression ''if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/forgejo/forgejo.sock" else "0.0.0.0"'';
+                description = mdDoc "Listen address. Must be a path when using a unix socket.";
+              };
+
+              HTTP_PORT = mkOption {
+                type = types.port;
+                default = 3000;
+                description = mdDoc "Listen port. Ignored when using a unix socket.";
+              };
+
+              DOMAIN = mkOption {
+                type = types.str;
+                default = "localhost";
+                description = mdDoc "Domain name of your server.";
+              };
+
+              ROOT_URL = mkOption {
+                type = types.str;
+                default = "http://${cfg.settings.server.DOMAIN}:${toString cfg.settings.server.HTTP_PORT}/";
+                defaultText = literalExpression ''"http://''${config.services.forgejo.settings.server.DOMAIN}:''${toString config.services.forgejo.settings.server.HTTP_PORT}/"'';
+                description = mdDoc "Full public URL of Forgejo server.";
+              };
+
+              STATIC_ROOT_PATH = mkOption {
+                type = types.either types.str types.path;
+                default = cfg.package.data;
+                defaultText = literalExpression "config.${opt.package}.data";
+                example = "/var/lib/forgejo/data";
+                description = mdDoc "Upper level of template and static files path.";
+              };
+
+              DISABLE_SSH = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc "Disable external SSH feature.";
+              };
+
+              SSH_PORT = mkOption {
+                type = types.port;
+                default = 22;
+                example = 2222;
+                description = mdDoc ''
+                  SSH port displayed in clone URL.
+                  The option is required to configure a service when the external visible port
+                  differs from the local listening port i.e. if port forwarding is used.
+                '';
+              };
+            };
+
+            session = {
+              COOKIE_SECURE = mkOption {
+                type = types.bool;
+                default = false;
+                description = mdDoc ''
+                  Marks session cookies as "secure" as a hint for browsers to only send
+                  them via HTTPS. This option is recommend, if Forgejo is being served over HTTPS.
+                '';
+              };
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
+        message = "services.forgejo.database.user must match services.forgejo.user if the database is to be automatically provisioned";
+      }
+      { assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.forgejo.createDatabase` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
+    ];
+
+    services.forgejo.settings = {
+      DEFAULT = {
+        RUN_MODE = mkDefault "prod";
+        RUN_USER = mkDefault cfg.user;
+        WORK_PATH = mkDefault cfg.stateDir;
+      };
+
+      database = mkMerge [
+        {
+          DB_TYPE = cfg.database.type;
+        }
+        (mkIf (useMysql || usePostgresql) {
+          HOST = if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port;
+          NAME = cfg.database.name;
+          USER = cfg.database.user;
+          PASSWD = "#dbpass#";
+        })
+        (mkIf useSqlite {
+          PATH = cfg.database.path;
+        })
+        (mkIf usePostgresql {
+          SSL_MODE = "disable";
+        })
+      ];
+
+      repository = {
+        ROOT = cfg.repositoryRoot;
+      };
+
+      server = mkIf cfg.lfs.enable {
+        LFS_START_SERVER = true;
+        LFS_JWT_SECRET = "#lfsjwtsecret#";
+      };
+
+      session = {
+        COOKIE_NAME = mkDefault "session";
+      };
+
+      security = {
+        SECRET_KEY = "#secretkey#";
+        INTERNAL_TOKEN = "#internaltoken#";
+        INSTALL_LOCK = true;
+      };
+
+      mailer = mkIf (cfg.mailerPasswordFile != null) {
+        PASSWD = "#mailerpass#";
+      };
+
+      oauth2 = {
+        JWT_SECRET = "#oauth2jwtsecret#";
+      };
+
+      lfs = mkIf cfg.lfs.enable {
+        PATH = cfg.lfs.contentDir;
+      };
+    };
+
+    services.postgresql = optionalAttrs (usePostgresql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    services.mysql = optionalAttrs (useMysql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+      package = mkDefault pkgs.mariadb;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+
+      # If we have a folder or symlink with Forgejo locales, remove it
+      # And symlink the current Forgejo locales in place
+      "L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
+
+    ] ++ optionals cfg.lfs.enable [
+      "d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.forgejo = {
+      description = "Forgejo (Beyond coding. We forge.)";
+      after = [
+        "network.target"
+      ] ++ optionals usePostgresql [
+        "postgresql.service"
+      ] ++ optionals useMysql [
+        "mysql.service"
+      ];
+      requires = optionals (cfg.database.createDatabase && usePostgresql) [
+        "postgresql.service"
+      ] ++ optionals (cfg.database.createDatabase && useMysql) [
+        "mysql.service"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package pkgs.git pkgs.gnupg ];
+
+      # In older versions the secret naming for JWT was kind of confusing.
+      # The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
+      # wasn't persistent at all.
+      # To fix that, there is now the file oauth2_jwt_secret containing the
+      # values for JWT_SECRET and the file jwt_secret gets renamed to
+      # lfs_jwt_secret.
+      # We have to consider this to stay compatible with older installations.
+      preStart =
+        let
+          runConfig = "${cfg.customDir}/conf/app.ini";
+          secretKey = "${cfg.customDir}/conf/secret_key";
+          oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
+          oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
+          lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
+          internalToken = "${cfg.customDir}/conf/internal_token";
+          replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+        in
+        ''
+          # copy custom configuration and generate random secrets if needed
+          ${lib.optionalString (!cfg.useWizard) ''
+            function forgejo_setup {
+              cp -f '${format.generate "app.ini" cfg.settings}' '${runConfig}'
+
+              if [ ! -s '${secretKey}' ]; then
+                  ${exe} generate secret SECRET_KEY > '${secretKey}'
+              fi
+
+              # Migrate LFS_JWT_SECRET filename
+              if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
+                  mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
+              fi
+
+              if [ ! -s '${oauth2JwtSecret}' ]; then
+                  ${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
+              fi
+
+              ${optionalString cfg.lfs.enable ''
+              if [ ! -s '${lfsJwtSecret}' ]; then
+                  ${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
+              fi
+              ''}
+
+              if [ ! -s '${internalToken}' ]; then
+                  ${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
+              fi
+
+              chmod u+w '${runConfig}'
+              ${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
+              ${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
+              ${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
+
+              ${optionalString cfg.lfs.enable ''
+                ${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
+              ''}
+
+              ${optionalString (cfg.database.passwordFile != null) ''
+                ${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
+              ''}
+
+              ${optionalString (cfg.mailerPasswordFile != null) ''
+                ${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
+              ''}
+              chmod u-w '${runConfig}'
+            }
+            (umask 027; forgejo_setup)
+          ''}
+
+          # run migrations/init the database
+          ${exe} migrate
+
+          # update all hooks' binary paths
+          ${exe} admin regenerate hooks
+
+          # update command option in authorized_keys
+          if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
+          then
+            ${exe} admin regenerate keys
+          fi
+        '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ExecStart = "${exe} web --pid /run/forgejo/forgejo.pid";
+        Restart = "always";
+        # Runtime directory and mode
+        RuntimeDirectory = "forgejo";
+        RuntimeDirectoryMode = "0755";
+        # Proc filesystem
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        # Access write directories
+        ReadWritePaths = [ cfg.customDir cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid" "setrlimit" ];
+      };
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        # `GITEA_` prefix until https://codeberg.org/forgejo/forgejo/issues/497
+        # is resolved.
+        GITEA_WORK_DIR = cfg.stateDir;
+        GITEA_CUSTOM = cfg.customDir;
+      };
+    };
+
+    services.openssh.settings.AcceptEnv = mkIf (!cfg.settings.START_SSH_SERVER or false) "GIT_PROTOCOL";
+
+    users.users = mkIf (cfg.user == "forgejo") {
+      forgejo = {
+        home = cfg.stateDir;
+        useDefaultShell = true;
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "forgejo") {
+      forgejo = { };
+    };
+
+    systemd.services.forgejo-dump = mkIf cfg.dump.enable {
+      description = "forgejo dump";
+      after = [ "forgejo.service" ];
+      path = [ cfg.package ];
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        # `GITEA_` prefix until https://codeberg.org/forgejo/forgejo/issues/497
+        # is resolved.
+        GITEA_WORK_DIR = cfg.stateDir;
+        GITEA_CUSTOM = cfg.customDir;
+      };
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
+        WorkingDirectory = cfg.dump.backupDir;
+      };
+    };
+
+    systemd.timers.forgejo-dump = mkIf cfg.dump.enable {
+      description = "Forgejo dump timer";
+      partOf = [ "forgejo-dump.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.dump.interval;
+    };
+  };
+
+  meta.doc = ./forgejo.md;
+  meta.maintainers = with lib.maintainers; [ bendlas emilylange ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/freeswitch.nix b/nixpkgs/nixos/modules/services/misc/freeswitch.nix
new file mode 100644
index 000000000000..b8b81e586944
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/freeswitch.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ...}:
+with lib;
+let
+  cfg = config.services.freeswitch;
+  pkg = cfg.package;
+  configDirectory = pkgs.runCommand "freeswitch-config-d" { } ''
+    mkdir -p $out
+    cp -rT ${cfg.configTemplate} $out
+    chmod -R +w $out
+    ${concatStringsSep "\n" (mapAttrsToList (fileName: filePath: ''
+      mkdir -p $out/$(dirname ${fileName})
+      cp ${filePath} $out/${fileName}
+    '') cfg.configDir)}
+  '';
+  configPath = if cfg.enableReload
+    then "/etc/freeswitch"
+    else configDirectory;
+in {
+  options = {
+    services.freeswitch = {
+      enable = mkEnableOption (lib.mdDoc "FreeSWITCH");
+      enableReload = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Issue the `reloadxml` command to FreeSWITCH when configuration directory changes (instead of restart).
+          See [FreeSWITCH documentation](https://freeswitch.org/confluence/display/FREESWITCH/Reloading) for more info.
+          The configuration directory is exposed at {file}`/etc/freeswitch`.
+          See also `systemd.services.*.restartIfChanged`.
+        '';
+      };
+      configTemplate = mkOption {
+        type = types.path;
+        default = "${config.services.freeswitch.package}/share/freeswitch/conf/vanilla";
+        defaultText = literalExpression ''"''${config.services.freeswitch.package}/share/freeswitch/conf/vanilla"'';
+        example = literalExpression ''"''${config.services.freeswitch.package}/share/freeswitch/conf/minimal"'';
+        description = lib.mdDoc ''
+          Configuration template to use.
+          See available templates in [FreeSWITCH repository](https://github.com/signalwire/freeswitch/tree/master/conf).
+          You can also set your own configuration directory.
+        '';
+      };
+      configDir = mkOption {
+        type = with types; attrsOf path;
+        default = { };
+        example = literalExpression ''
+          {
+            "freeswitch.xml" = ./freeswitch.xml;
+            "dialplan/default.xml" = pkgs.writeText "dialplan-default.xml" '''
+              [xml lines]
+            ''';
+          }
+        '';
+        description = lib.mdDoc ''
+          Override file in FreeSWITCH config template directory.
+          Each top-level attribute denotes a file path in the configuration directory, its value is the file path.
+          See [FreeSWITCH documentation](https://freeswitch.org/confluence/display/FREESWITCH/Default+Configuration) for more info.
+          Also check available templates in [FreeSWITCH repository](https://github.com/signalwire/freeswitch/tree/master/conf).
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.freeswitch;
+        defaultText = literalExpression "pkgs.freeswitch";
+        description = lib.mdDoc ''
+          FreeSWITCH package.
+        '';
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    environment.etc.freeswitch = mkIf cfg.enableReload {
+      source = configDirectory;
+    };
+    systemd.services.freeswitch-config-reload = mkIf cfg.enableReload {
+      before = [ "freeswitch.service" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ configDirectory ];
+      serviceConfig = {
+        ExecStart = "/run/current-system/systemd/bin/systemctl try-reload-or-restart freeswitch.service";
+        RemainAfterExit = true;
+        Type = "oneshot";
+      };
+    };
+    systemd.services.freeswitch = {
+      description = "Free and open-source application server for real-time communication";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "freeswitch";
+        ExecStart = "${pkg}/bin/freeswitch -nf \\
+          -mod ${pkg}/lib/freeswitch/mod \\
+          -conf ${configPath} \\
+          -base /var/lib/freeswitch";
+        ExecReload = "${pkg}/bin/fs_cli -x reloadxml";
+        Restart = "on-failure";
+        RestartSec = "5s";
+        CPUSchedulingPolicy = "fifo";
+      };
+    };
+    environment.systemPackages = [ pkg ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/fstrim.nix b/nixpkgs/nixos/modules/services/misc/fstrim.nix
new file mode 100644
index 000000000000..55fb24e29272
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/fstrim.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.fstrim;
+
+in {
+
+  options = {
+
+    services.fstrim = {
+      enable = mkEnableOption (lib.mdDoc "periodic SSD TRIM of mounted partitions in background");
+
+      interval = mkOption {
+        type = types.str;
+        default = "weekly";
+        description = lib.mdDoc ''
+          How often we run fstrim. For most desktop and server systems
+          a sufficient trimming frequency is once a week.
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.packages = [ pkgs.util-linux ];
+
+    systemd.timers.fstrim = {
+      timerConfig = {
+        OnCalendar = [ "" cfg.interval ];
+      };
+      wantedBy = [ "timers.target" ];
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gammu-smsd.nix b/nixpkgs/nixos/modules/services/misc/gammu-smsd.nix
new file mode 100644
index 000000000000..eff725f5a868
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gammu-smsd.nix
@@ -0,0 +1,253 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+let
+  cfg = config.services.gammu-smsd;
+
+  configFile = pkgs.writeText "gammu-smsd.conf" ''
+    [gammu]
+    Device = ${cfg.device.path}
+    Connection = ${cfg.device.connection}
+    SynchronizeTime = ${if cfg.device.synchronizeTime then "yes" else "no"}
+    LogFormat = ${cfg.log.format}
+    ${optionalString (cfg.device.pin != null) "PIN = ${cfg.device.pin}"}
+    ${cfg.extraConfig.gammu}
+
+
+    [smsd]
+    LogFile = ${cfg.log.file}
+    Service = ${cfg.backend.service}
+
+    ${optionalString (cfg.backend.service == "files") ''
+      InboxPath = ${cfg.backend.files.inboxPath}
+      OutboxPath = ${cfg.backend.files.outboxPath}
+      SentSMSPath = ${cfg.backend.files.sentSMSPath}
+      ErrorSMSPath = ${cfg.backend.files.errorSMSPath}
+    ''}
+
+    ${optionalString (cfg.backend.service == "sql" && cfg.backend.sql.driver == "sqlite") ''
+      Driver = ${cfg.backend.sql.driver}
+      DBDir = ${cfg.backend.sql.database}
+    ''}
+
+    ${optionalString (cfg.backend.service == "sql" && cfg.backend.sql.driver == "native_pgsql") (
+      with cfg.backend; ''
+        Driver = ${sql.driver}
+        ${optionalString (sql.database!= null) "Database = ${sql.database}"}
+        ${optionalString (sql.host != null) "Host = ${sql.host}"}
+        ${optionalString (sql.user != null) "User = ${sql.user}"}
+        ${optionalString (sql.password != null) "Password = ${sql.password}"}
+      '')}
+
+    ${cfg.extraConfig.smsd}
+  '';
+
+  initDBDir = "share/doc/gammu/examples/sql";
+
+  gammuPackage = with cfg.backend; (pkgs.gammu.override {
+    dbiSupport = service == "sql" && sql.driver == "sqlite";
+    postgresSupport = service == "sql" && sql.driver == "native_pgsql";
+  });
+
+in {
+  options = {
+    services.gammu-smsd = {
+
+      enable = mkEnableOption (lib.mdDoc "gammu-smsd daemon");
+
+      user = mkOption {
+        type = types.str;
+        default = "smsd";
+        description = lib.mdDoc "User that has access to the device";
+      };
+
+      device = {
+        path = mkOption {
+          type = types.path;
+          description = lib.mdDoc "Device node or address of the phone";
+          example = "/dev/ttyUSB2";
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "root";
+          description = lib.mdDoc "Owner group of the device";
+          example = "dialout";
+        };
+
+        connection = mkOption {
+          type = types.str;
+          default = "at";
+          description = lib.mdDoc "Protocol which will be used to talk to the phone";
+        };
+
+        synchronizeTime = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to set time from computer to the phone during starting connection";
+        };
+
+        pin = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc "PIN code for the simcard";
+        };
+      };
+
+
+      log = {
+        file = mkOption {
+          type = types.str;
+          default = "syslog";
+          description = lib.mdDoc "Path to file where information about communication will be stored";
+        };
+
+        format = mkOption {
+          type = types.enum [ "nothing" "text" "textall" "textalldate" "errors" "errorsdate" "binary" ];
+          default = "errors";
+          description = lib.mdDoc "Determines what will be logged to the LogFile";
+        };
+      };
+
+
+      extraConfig = {
+        gammu = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc "Extra config lines to be added into [gammu] section";
+        };
+
+
+        smsd = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc "Extra config lines to be added into [smsd] section";
+        };
+      };
+
+
+      backend = {
+        service = mkOption {
+          type = types.enum [ "null" "files" "sql" ];
+          default = "null";
+          description = lib.mdDoc "Service to use to store sms data.";
+        };
+
+        files = {
+          inboxPath = mkOption {
+            type = types.path;
+            default = "/var/spool/sms/inbox/";
+            description = lib.mdDoc "Where the received SMSes are stored";
+          };
+
+          outboxPath = mkOption {
+            type = types.path;
+            default = "/var/spool/sms/outbox/";
+            description = lib.mdDoc "Where SMSes to be sent should be placed";
+          };
+
+          sentSMSPath = mkOption {
+            type = types.path;
+            default = "/var/spool/sms/sent/";
+            description = lib.mdDoc "Where the transmitted SMSes are placed";
+          };
+
+          errorSMSPath = mkOption {
+            type = types.path;
+            default = "/var/spool/sms/error/";
+            description = lib.mdDoc "Where SMSes with error in transmission is placed";
+          };
+        };
+
+        sql = {
+          driver = mkOption {
+            type = types.enum [ "native_mysql" "native_pgsql" "odbc" "dbi" ];
+            description = lib.mdDoc "DB driver to use";
+          };
+
+          sqlDialect = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "SQL dialect to use (odbc driver only)";
+          };
+
+          database = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "Database name to store sms data";
+          };
+
+          host = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = lib.mdDoc "Database server address";
+          };
+
+          user = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "User name used for connection to the database";
+          };
+
+          password = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "User password used for connection to the database";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.${cfg.user} = {
+      description = "gammu-smsd user";
+      isSystemUser = true;
+      group = cfg.device.group;
+    };
+
+    environment.systemPackages = with cfg.backend; [ gammuPackage ]
+    ++ optionals (service == "sql" && sql.driver == "sqlite")  [ pkgs.sqlite ];
+
+    systemd.services.gammu-smsd = {
+      description = "gammu-smsd daemon";
+
+      wantedBy = [ "multi-user.target" ];
+
+      wants = with cfg.backend; [ ]
+      ++ optionals (service == "sql" && sql.driver == "native_pgsql") [ "postgresql.service" ];
+
+      preStart = with cfg.backend;
+
+        optionalString (service == "files") (with files; ''
+          mkdir -m 755 -p ${inboxPath} ${outboxPath} ${sentSMSPath} ${errorSMSPath}
+          chown ${cfg.user} -R ${inboxPath}
+          chown ${cfg.user} -R ${outboxPath}
+          chown ${cfg.user} -R ${sentSMSPath}
+          chown ${cfg.user} -R ${errorSMSPath}
+        '')
+      + optionalString (service == "sql" && sql.driver == "sqlite") ''
+         cat "${gammuPackage}/${initDBDir}/sqlite.sql" \
+         | ${pkgs.sqlite.bin}/bin/sqlite3 ${sql.database}
+        ''
+      + (let execPsql = extraArgs: concatStringsSep " " [
+          (optionalString (sql.password != null) "PGPASSWORD=${sql.password}")
+          "${config.services.postgresql.package}/bin/psql"
+          (optionalString (sql.host != null) "-h ${sql.host}")
+          (optionalString (sql.user != null) "-U ${sql.user}")
+          "$extraArgs"
+          "${sql.database}"
+        ]; in optionalString (service == "sql" && sql.driver == "native_pgsql") ''
+         echo '\i '"${gammuPackage}/${initDBDir}/pgsql.sql" | ${execPsql ""}
+       '');
+
+      serviceConfig = {
+        User = "${cfg.user}";
+        Group = "${cfg.device.group}";
+        PermissionsStartOnly = true;
+        ExecStart = "${gammuPackage}/bin/gammu-smsd -c ${configFile}";
+      };
+
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/geoipupdate.nix b/nixpkgs/nixos/modules/services/misc/geoipupdate.nix
new file mode 100644
index 000000000000..27c1157e9a8c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/geoipupdate.nix
@@ -0,0 +1,221 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.geoipupdate;
+  inherit (builtins) isAttrs isString isInt isList typeOf hashString;
+in
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "geoip-updater" ] "services.geoip-updater has been removed, use services.geoipupdate instead.")
+  ];
+
+  options = {
+    services.geoipupdate = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        periodic downloading of GeoIP databases using geoipupdate.
+      '');
+
+      interval = lib.mkOption {
+        type = lib.types.str;
+        default = "weekly";
+        description = lib.mdDoc ''
+          Update the GeoIP databases at this time / interval.
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      settings = lib.mkOption {
+        example = lib.literalExpression ''
+          {
+            AccountID = 200001;
+            DatabaseDirectory = "/var/lib/GeoIP";
+            LicenseKey = { _secret = "/run/keys/maxmind_license_key"; };
+            Proxy = "10.0.0.10:8888";
+            ProxyUserPassword = { _secret = "/run/keys/proxy_pass"; };
+          }
+        '';
+        description = lib.mdDoc ''
+          geoipupdate configuration options. See
+          <https://github.com/maxmind/geoipupdate/blob/main/doc/GeoIP.conf.md>
+          for a full list of available options.
+
+          Settings containing secret data should be set to an
+          attribute set containing the attribute
+          `_secret` - a string pointing to a file
+          containing the value the option should be set to. See the
+          example to get a better picture of this: in the resulting
+          {file}`GeoIP.conf` file, the
+          `ProxyUserPassword` key will be set to the
+          contents of the
+          {file}`/run/keys/proxy_pass` file.
+        '';
+        type = lib.types.submodule {
+          freeformType =
+            with lib.types;
+            let
+              type = oneOf [str int bool];
+            in
+              attrsOf (either type (listOf type));
+
+          options = {
+
+            AccountID = lib.mkOption {
+              type = lib.types.int;
+              description = lib.mdDoc ''
+                Your MaxMind account ID.
+              '';
+            };
+
+            EditionIDs = lib.mkOption {
+              type = with lib.types; listOf (either str int);
+              example = [
+                "GeoLite2-ASN"
+                "GeoLite2-City"
+                "GeoLite2-Country"
+              ];
+              description = lib.mdDoc ''
+                List of database edition IDs. This includes new string
+                IDs like `GeoIP2-City` and old
+                numeric IDs like `106`.
+              '';
+            };
+
+            LicenseKey = lib.mkOption {
+              type = with lib.types; either path (attrsOf path);
+              description = lib.mdDoc ''
+                A file containing the MaxMind license key.
+
+                Always handled as a secret whether the value is
+                wrapped in a `{ _secret = ...; }`
+                attrset or not (refer to [](#opt-services.geoipupdate.settings) for
+                details).
+              '';
+              apply = x: if isAttrs x then x else { _secret = x; };
+            };
+
+            DatabaseDirectory = lib.mkOption {
+              type = lib.types.path;
+              default = "/var/lib/GeoIP";
+              example = "/run/GeoIP";
+              description = lib.mdDoc ''
+                The directory to store the database files in. The
+                directory will be automatically created, the owner
+                changed to `geoip` and permissions
+                set to world readable. This applies if the directory
+                already exists as well, so don't use a directory with
+                sensitive contents.
+              '';
+            };
+
+          };
+        };
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    services.geoipupdate.settings = {
+      LockFile = "/run/geoipupdate/.lock";
+    };
+
+    systemd.services.geoipupdate-create-db-dir = {
+      serviceConfig.Type = "oneshot";
+      script = ''
+        set -o errexit -o pipefail -o nounset -o errtrace
+        shopt -s inherit_errexit
+
+        mkdir -p ${cfg.settings.DatabaseDirectory}
+        chmod 0755 ${cfg.settings.DatabaseDirectory}
+      '';
+    };
+
+    systemd.services.geoipupdate = {
+      description = "GeoIP Updater";
+      requires = [ "geoipupdate-create-db-dir.service" ];
+      after = [
+        "geoipupdate-create-db-dir.service"
+        "network-online.target"
+        "nss-lookup.target"
+      ];
+      path = [ pkgs.replace-secret ];
+      wants = [ "network-online.target" ];
+      startAt = cfg.interval;
+      serviceConfig = {
+        ExecStartPre =
+          let
+            isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+            geoipupdateKeyValue = lib.generators.toKeyValue {
+              mkKeyValue = lib.flip lib.generators.mkKeyValueDefault " " rec {
+                mkValueString = v:
+                  if isInt           v then toString v
+                  else if isString   v then v
+                  else if true  ==   v then "1"
+                  else if false ==   v then "0"
+                  else if isList     v then lib.concatMapStringsSep " " mkValueString v
+                  else if isSecret   v then hashString "sha256" v._secret
+                  else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+              };
+            };
+            secretPaths = lib.catAttrs "_secret" (lib.collect isSecret cfg.settings);
+            mkSecretReplacement = file: ''
+              replace-secret ${lib.escapeShellArgs [ (hashString "sha256" file) file "/run/geoipupdate/GeoIP.conf" ]}
+            '';
+            secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+
+            geoipupdateConf = pkgs.writeText "geoipupdate.conf" (geoipupdateKeyValue cfg.settings);
+
+            script = ''
+              set -o errexit -o pipefail -o nounset -o errtrace
+              shopt -s inherit_errexit
+
+              chown geoip "${cfg.settings.DatabaseDirectory}"
+
+              cp ${geoipupdateConf} /run/geoipupdate/GeoIP.conf
+              ${secretReplacements}
+            '';
+          in
+            "+${pkgs.writeShellScript "start-pre-full-privileges" script}";
+        ExecStart = "${pkgs.geoipupdate}/bin/geoipupdate -f /run/geoipupdate/GeoIP.conf";
+        User = "geoip";
+        DynamicUser = true;
+        ReadWritePaths = cfg.settings.DatabaseDirectory;
+        RuntimeDirectory = "geoipupdate";
+        RuntimeDirectoryMode = "0700";
+        CapabilityBoundingSet = "";
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        MemoryDenyWriteExecute = true;
+        LockPersonality = true;
+        SystemCallArchitectures = "native";
+      };
+    };
+
+    systemd.timers.geoipupdate-initial-run = {
+      wantedBy = [ "timers.target" ];
+      unitConfig.ConditionPathExists = "!${cfg.settings.DatabaseDirectory}";
+      timerConfig = {
+        Unit = "geoipupdate.service";
+        OnActiveSec = 0;
+      };
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.talyz ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gitea.nix b/nixpkgs/nixos/modules/services/misc/gitea.nix
new file mode 100644
index 000000000000..be528a298991
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gitea.nix
@@ -0,0 +1,711 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitea;
+  opt = options.services.gitea;
+  exe = lib.getExe cfg.package;
+  pg = config.services.postgresql;
+  useMysql = cfg.database.type == "mysql";
+  usePostgresql = cfg.database.type == "postgres";
+  useSqlite = cfg.database.type == "sqlite3";
+  format = pkgs.formats.ini { };
+  configFile = pkgs.writeText "app.ini" ''
+    APP_NAME = ${cfg.appName}
+    RUN_USER = ${cfg.user}
+    RUN_MODE = prod
+    WORK_PATH = ${cfg.stateDir}
+
+    ${generators.toINI {} cfg.settings}
+
+    ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
+  '';
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "gitea" "cookieSecure" ] [ "services" "gitea" "settings" "session" "COOKIE_SECURE" ])
+    (mkRenamedOptionModule [ "services" "gitea" "disableRegistration" ] [ "services" "gitea" "settings" "service" "DISABLE_REGISTRATION" ])
+    (mkRenamedOptionModule [ "services" "gitea" "domain" ] [ "services" "gitea" "settings" "server" "DOMAIN" ])
+    (mkRenamedOptionModule [ "services" "gitea" "httpAddress" ] [ "services" "gitea" "settings" "server" "HTTP_ADDR" ])
+    (mkRenamedOptionModule [ "services" "gitea" "httpPort" ] [ "services" "gitea" "settings" "server" "HTTP_PORT" ])
+    (mkRenamedOptionModule [ "services" "gitea" "log" "level" ] [ "services" "gitea" "settings" "log" "LEVEL" ])
+    (mkRenamedOptionModule [ "services" "gitea" "log" "rootPath" ] [ "services" "gitea" "settings" "log" "ROOT_PATH" ])
+    (mkRenamedOptionModule [ "services" "gitea" "rootUrl" ] [ "services" "gitea" "settings" "server" "ROOT_URL" ])
+    (mkRenamedOptionModule [ "services" "gitea" "ssh" "clonePort" ] [ "services" "gitea" "settings" "server" "SSH_PORT" ])
+    (mkRenamedOptionModule [ "services" "gitea" "staticRootPath" ] [ "services" "gitea" "settings" "server" "STATIC_ROOT_PATH" ])
+
+    (mkChangedOptionModule [ "services" "gitea" "enableUnixSocket" ] [ "services" "gitea" "settings" "server" "PROTOCOL" ] (
+      config: if config.services.gitea.enableUnixSocket then "http+unix" else "http"
+    ))
+
+    (mkRemovedOptionModule [ "services" "gitea" "ssh" "enable" ] "services.gitea.ssh.enable has been migrated into freeform setting services.gitea.settings.server.DISABLE_SSH. Keep in mind that the setting is inverted")
+  ];
+
+  options = {
+    services.gitea = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Enable Gitea Service.";
+      };
+
+      package = mkOption {
+        default = pkgs.gitea;
+        type = types.package;
+        defaultText = literalExpression "pkgs.gitea";
+        description = lib.mdDoc "gitea derivation to use";
+      };
+
+      useWizard = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Do not generate a configuration and use gitea' installation wizard instead. The first registered user will be administrator.";
+      };
+
+      stateDir = mkOption {
+        default = "/var/lib/gitea";
+        type = types.str;
+        description = lib.mdDoc "Gitea data directory.";
+      };
+
+      customDir = mkOption {
+        default = "${cfg.stateDir}/custom";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/custom"'';
+        type = types.str;
+        description = lib.mdDoc "Gitea custom directory. Used for config, custom templates and other options.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "gitea";
+        description = lib.mdDoc "User account under which gitea runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "gitea";
+        description = lib.mdDoc "Group under which gitea runs.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "sqlite3" "mysql" "postgres" ];
+          example = "mysql";
+          default = "sqlite3";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if !usePostgresql then 3306 else pg.port;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} != "postgresql"
+            then 3306
+            else config.${options.services.postgresql.port}
+          '';
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "gitea";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "gitea";
+          description = lib.mdDoc "Database user.";
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            The password corresponding to {option}`database.user`.
+            Warning: this is stored in cleartext in the Nix store!
+            Use {option}`database.passwordFile` instead.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/gitea-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = if (cfg.database.createDatabase && usePostgresql) then "/run/postgresql" else if (cfg.database.createDatabase && useMysql) then "/run/mysqld/mysqld.sock" else null;
+          defaultText = literalExpression "null";
+          example = "/run/mysqld/mysqld.sock";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        path = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/gitea.db";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/gitea.db"'';
+          description = lib.mdDoc "Path to the sqlite3 database file.";
+        };
+
+        createDatabase = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local database automatically.";
+        };
+      };
+
+      dump = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable a timer that runs gitea dump to generate backup-files of the
+            current gitea database and repositories.
+          '';
+        };
+
+        interval = mkOption {
+          type = types.str;
+          default = "04:31";
+          example = "hourly";
+          description = lib.mdDoc ''
+            Run a gitea dump at this interval. Runs by default at 04:31 every day.
+
+            The format is described in
+            {manpage}`systemd.time(7)`.
+          '';
+        };
+
+        backupDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/dump";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/dump"'';
+          description = lib.mdDoc "Path to the dump files.";
+        };
+
+        type = mkOption {
+          type = types.enum [ "zip" "rar" "tar" "sz" "tar.gz" "tar.xz" "tar.bz2" "tar.br" "tar.lz4" "tar.zst" ];
+          default = "zip";
+          description = lib.mdDoc "Archive format used to store the dump file.";
+        };
+
+        file = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc "Filename to be used for the dump. If `null` a default name is chosen by gitea.";
+          example = "gitea-dump";
+        };
+      };
+
+      lfs = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Enables git-lfs support.";
+        };
+
+        contentDir = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/lfs";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/lfs"'';
+          description = lib.mdDoc "Where to store LFS files.";
+        };
+      };
+
+      appName = mkOption {
+        type = types.str;
+        default = "gitea: Gitea Service";
+        description = lib.mdDoc "Application name.";
+      };
+
+      repositoryRoot = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/repositories";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/repositories"'';
+        description = lib.mdDoc "Path to the git repositories.";
+      };
+
+      mailerPasswordFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/secrets/gitea/mailpw";
+        description = lib.mdDoc "Path to a file containing the SMTP password.";
+      };
+
+      metricsTokenFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/secrets/gitea/metrics_token";
+        description = lib.mdDoc "Path to a file containing the metrics authentication token.";
+      };
+
+      settings = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Gitea configuration. Refer to <https://docs.gitea.io/en-us/config-cheat-sheet/>
+          for details on supported values.
+        '';
+        example = literalExpression ''
+          {
+            "cron.sync_external_users" = {
+              RUN_AT_START = true;
+              SCHEDULE = "@every 24h";
+              UPDATE_EXISTING = true;
+            };
+            mailer = {
+              ENABLED = true;
+              MAILER_TYPE = "sendmail";
+              FROM = "do-not-reply@example.org";
+              SENDMAIL_PATH = "''${pkgs.system-sendmail}/bin/sendmail";
+            };
+            other = {
+              SHOW_FOOTER_VERSION = false;
+            };
+          }
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            log = {
+              ROOT_PATH = mkOption {
+                default = "${cfg.stateDir}/log";
+                defaultText = literalExpression ''"''${config.${opt.stateDir}}/log"'';
+                type = types.str;
+                description = lib.mdDoc "Root path for log files.";
+              };
+              LEVEL = mkOption {
+                default = "Info";
+                type = types.enum [ "Trace" "Debug" "Info" "Warn" "Error" "Critical" ];
+                description = lib.mdDoc "General log level.";
+              };
+            };
+
+            server = {
+              PROTOCOL = mkOption {
+                type = types.enum [ "http" "https" "fcgi" "http+unix" "fcgi+unix" ];
+                default = "http";
+                description = lib.mdDoc ''Listen protocol. `+unix` means "over unix", not "in addition to."'';
+              };
+
+              HTTP_ADDR = mkOption {
+                type = types.either types.str types.path;
+                default = if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0";
+                defaultText = literalExpression ''if lib.hasSuffix "+unix" cfg.settings.server.PROTOCOL then "/run/gitea/gitea.sock" else "0.0.0.0"'';
+                description = lib.mdDoc "Listen address. Must be a path when using a unix socket.";
+              };
+
+              HTTP_PORT = mkOption {
+                type = types.port;
+                default = 3000;
+                description = lib.mdDoc "Listen port. Ignored when using a unix socket.";
+              };
+
+              DOMAIN = mkOption {
+                type = types.str;
+                default = "localhost";
+                description = lib.mdDoc "Domain name of your server.";
+              };
+
+              ROOT_URL = mkOption {
+                type = types.str;
+                default = "http://${cfg.settings.server.DOMAIN}:${toString cfg.settings.server.HTTP_PORT}/";
+                defaultText = literalExpression ''"http://''${config.services.gitea.settings.server.DOMAIN}:''${toString config.services.gitea.settings.server.HTTP_PORT}/"'';
+                description = lib.mdDoc "Full public URL of gitea server.";
+              };
+
+              STATIC_ROOT_PATH = mkOption {
+                type = types.either types.str types.path;
+                default = cfg.package.data;
+                defaultText = literalExpression "config.${opt.package}.data";
+                example = "/var/lib/gitea/data";
+                description = lib.mdDoc "Upper level of template and static files path.";
+              };
+
+              DISABLE_SSH = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc "Disable external SSH feature.";
+              };
+
+              SSH_PORT = mkOption {
+                type = types.port;
+                default = 22;
+                example = 2222;
+                description = lib.mdDoc ''
+                  SSH port displayed in clone URL.
+                  The option is required to configure a service when the external visible port
+                  differs from the local listening port i.e. if port forwarding is used.
+                '';
+              };
+            };
+
+            service = {
+              DISABLE_REGISTRATION = mkEnableOption (lib.mdDoc "the registration lock") // {
+                description = lib.mdDoc ''
+                  By default any user can create an account on this `gitea` instance.
+                  This can be disabled by using this option.
+
+                  *Note:* please keep in mind that this should be added after the initial
+                  deploy unless [](#opt-services.gitea.useWizard)
+                  is `true` as the first registered user will be the administrator if
+                  no install wizard is used.
+                '';
+              };
+            };
+
+            session = {
+              COOKIE_SECURE = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Marks session cookies as "secure" as a hint for browsers to only send
+                  them via HTTPS. This option is recommend, if gitea is being served over HTTPS.
+                '';
+              };
+            };
+          };
+        };
+      };
+
+      extraConfig = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc "Configuration lines appended to the generated gitea configuration file.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.database.createDatabase -> useSqlite || cfg.database.user == cfg.user;
+        message = "services.gitea.database.user must match services.gitea.user if the database is to be automatically provisioned";
+      }
+      { assertion = cfg.database.createDatabase && usePostgresql -> cfg.database.user == cfg.database.name;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.gitea.createDatabase` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
+    ];
+
+    services.gitea.settings = {
+      "cron.update_checker".ENABLED = lib.mkDefault false;
+
+      database = mkMerge [
+        {
+          DB_TYPE = cfg.database.type;
+        }
+        (mkIf (useMysql || usePostgresql) {
+          HOST = if cfg.database.socket != null then cfg.database.socket else cfg.database.host + ":" + toString cfg.database.port;
+          NAME = cfg.database.name;
+          USER = cfg.database.user;
+          PASSWD = "#dbpass#";
+        })
+        (mkIf useSqlite {
+          PATH = cfg.database.path;
+        })
+        (mkIf usePostgresql {
+          SSL_MODE = "disable";
+        })
+      ];
+
+      repository = {
+        ROOT = cfg.repositoryRoot;
+      };
+
+      server = mkIf cfg.lfs.enable {
+        LFS_START_SERVER = true;
+        LFS_JWT_SECRET = "#lfsjwtsecret#";
+      };
+
+      session = {
+        COOKIE_NAME = lib.mkDefault "session";
+      };
+
+      security = {
+        SECRET_KEY = "#secretkey#";
+        INTERNAL_TOKEN = "#internaltoken#";
+        INSTALL_LOCK = true;
+      };
+
+      mailer = mkIf (cfg.mailerPasswordFile != null) {
+        PASSWD = "#mailerpass#";
+      };
+
+      metrics = mkIf (cfg.metricsTokenFile != null) {
+        TOKEN = "#metricstoken#";
+      };
+
+      oauth2 = {
+        JWT_SECRET = "#oauth2jwtsecret#";
+      };
+
+      lfs = mkIf cfg.lfs.enable {
+        PATH = cfg.lfs.contentDir;
+      };
+
+      packages.CHUNKED_UPLOAD_PATH = "${cfg.stateDir}/tmp/package-upload";
+    };
+
+    services.postgresql = optionalAttrs (usePostgresql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    services.mysql = optionalAttrs (useMysql && cfg.database.createDatabase) {
+      enable = mkDefault true;
+      package = mkDefault pkgs.mariadb;
+
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.dump.backupDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.repositoryRoot}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/.ssh' 0700 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.customDir}/conf' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/data' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+
+      # If we have a folder or symlink with gitea locales, remove it
+      # And symlink the current gitea locales in place
+      "L+ '${cfg.stateDir}/conf/locale' - - - - ${cfg.package.out}/locale"
+
+    ] ++ lib.optionals cfg.lfs.enable [
+      "d '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '${cfg.lfs.contentDir}' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.gitea = {
+      description = "gitea";
+      after = [ "network.target" ] ++ optional usePostgresql "postgresql.service" ++ optional useMysql "mysql.service";
+      requires = optional (cfg.database.createDatabase && usePostgresql) "postgresql.service" ++ optional (cfg.database.createDatabase && useMysql) "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package pkgs.git pkgs.gnupg ];
+
+      # In older versions the secret naming for JWT was kind of confusing.
+      # The file jwt_secret hold the value for LFS_JWT_SECRET and JWT_SECRET
+      # wasn't persistent at all.
+      # To fix that, there is now the file oauth2_jwt_secret containing the
+      # values for JWT_SECRET and the file jwt_secret gets renamed to
+      # lfs_jwt_secret.
+      # We have to consider this to stay compatible with older installations.
+      preStart = let
+        runConfig = "${cfg.customDir}/conf/app.ini";
+        secretKey = "${cfg.customDir}/conf/secret_key";
+        oauth2JwtSecret = "${cfg.customDir}/conf/oauth2_jwt_secret";
+        oldLfsJwtSecret = "${cfg.customDir}/conf/jwt_secret"; # old file for LFS_JWT_SECRET
+        lfsJwtSecret = "${cfg.customDir}/conf/lfs_jwt_secret"; # new file for LFS_JWT_SECRET
+        internalToken = "${cfg.customDir}/conf/internal_token";
+        replaceSecretBin = "${pkgs.replace-secret}/bin/replace-secret";
+      in ''
+        # copy custom configuration and generate random secrets if needed
+        ${optionalString (!cfg.useWizard) ''
+          function gitea_setup {
+            cp -f '${configFile}' '${runConfig}'
+
+            if [ ! -s '${secretKey}' ]; then
+                ${exe} generate secret SECRET_KEY > '${secretKey}'
+            fi
+
+            # Migrate LFS_JWT_SECRET filename
+            if [[ -s '${oldLfsJwtSecret}' && ! -s '${lfsJwtSecret}' ]]; then
+                mv '${oldLfsJwtSecret}' '${lfsJwtSecret}'
+            fi
+
+            if [ ! -s '${oauth2JwtSecret}' ]; then
+                ${exe} generate secret JWT_SECRET > '${oauth2JwtSecret}'
+            fi
+
+            ${lib.optionalString cfg.lfs.enable ''
+            if [ ! -s '${lfsJwtSecret}' ]; then
+                ${exe} generate secret LFS_JWT_SECRET > '${lfsJwtSecret}'
+            fi
+            ''}
+
+            if [ ! -s '${internalToken}' ]; then
+                ${exe} generate secret INTERNAL_TOKEN > '${internalToken}'
+            fi
+
+            chmod u+w '${runConfig}'
+            ${replaceSecretBin} '#secretkey#' '${secretKey}' '${runConfig}'
+            ${replaceSecretBin} '#dbpass#' '${cfg.database.passwordFile}' '${runConfig}'
+            ${replaceSecretBin} '#oauth2jwtsecret#' '${oauth2JwtSecret}' '${runConfig}'
+            ${replaceSecretBin} '#internaltoken#' '${internalToken}' '${runConfig}'
+
+            ${lib.optionalString cfg.lfs.enable ''
+              ${replaceSecretBin} '#lfsjwtsecret#' '${lfsJwtSecret}' '${runConfig}'
+            ''}
+
+            ${lib.optionalString (cfg.mailerPasswordFile != null) ''
+              ${replaceSecretBin} '#mailerpass#' '${cfg.mailerPasswordFile}' '${runConfig}'
+            ''}
+
+            ${lib.optionalString (cfg.metricsTokenFile != null) ''
+              ${replaceSecretBin} '#metricstoken#' '${cfg.metricsTokenFile}' '${runConfig}'
+            ''}
+            chmod u-w '${runConfig}'
+          }
+          (umask 027; gitea_setup)
+        ''}
+
+        # run migrations/init the database
+        ${exe} migrate
+
+        # update all hooks' binary paths
+        ${exe} admin regenerate hooks
+
+        # update command option in authorized_keys
+        if [ -r ${cfg.stateDir}/.ssh/authorized_keys ]
+        then
+          ${exe} admin regenerate keys
+        fi
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ExecStart = "${exe} web --pid /run/gitea/gitea.pid";
+        Restart = "always";
+        # Runtime directory and mode
+        RuntimeDirectory = "gitea";
+        RuntimeDirectoryMode = "0755";
+        # Proc filesystem
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        # Access write directories
+        ReadWritePaths = [ cfg.customDir cfg.dump.backupDir cfg.repositoryRoot cfg.stateDir cfg.lfs.contentDir ];
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid" "setrlimit" ];
+      };
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        GITEA_WORK_DIR = cfg.stateDir;
+        GITEA_CUSTOM = cfg.customDir;
+      };
+    };
+
+    users.users = mkIf (cfg.user == "gitea") {
+      gitea = {
+        description = "Gitea Service";
+        home = cfg.stateDir;
+        useDefaultShell = true;
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "gitea") {
+      gitea = {};
+    };
+
+    warnings =
+      optional (cfg.database.password != "") "config.services.gitea.database.password will be stored as plaintext in the Nix store. Use database.passwordFile instead." ++
+      optional (cfg.extraConfig != null) ''
+        services.gitea.`extraConfig` is deprecated, please use services.gitea.`settings`.
+      '';
+
+    # Create database passwordFile default when password is configured.
+    services.gitea.database.passwordFile =
+      mkDefault (toString (pkgs.writeTextFile {
+        name = "gitea-database-password";
+        text = cfg.database.password;
+      }));
+
+    systemd.services.gitea-dump = mkIf cfg.dump.enable {
+       description = "gitea dump";
+       after = [ "gitea.service" ];
+       path = [ cfg.package ];
+
+       environment = {
+         USER = cfg.user;
+         HOME = cfg.stateDir;
+         GITEA_WORK_DIR = cfg.stateDir;
+         GITEA_CUSTOM = cfg.customDir;
+       };
+
+       serviceConfig = {
+         Type = "oneshot";
+         User = cfg.user;
+         ExecStart = "${exe} dump --type ${cfg.dump.type}" + optionalString (cfg.dump.file != null) " --file ${cfg.dump.file}";
+         WorkingDirectory = cfg.dump.backupDir;
+       };
+    };
+
+    systemd.timers.gitea-dump = mkIf cfg.dump.enable {
+      description = "Update timer for gitea-dump";
+      partOf = [ "gitea-dump.service" ];
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.dump.interval;
+    };
+  };
+  meta.maintainers = with lib.maintainers; [ srhb ma27 thehedgeh0g ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gitlab.md b/nixpkgs/nixos/modules/services/misc/gitlab.md
new file mode 100644
index 000000000000..916b23584ed0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gitlab.md
@@ -0,0 +1,112 @@
+# GitLab {#module-services-gitlab}
+
+GitLab is a feature-rich git hosting service.
+
+## Prerequisites {#module-services-gitlab-prerequisites}
+
+The `gitlab` service exposes only an Unix socket at
+`/run/gitlab/gitlab-workhorse.socket`. You need to
+configure a webserver to proxy HTTP requests to the socket.
+
+For instance, the following configuration could be used to use nginx as
+frontend proxy:
+```
+services.nginx = {
+  enable = true;
+  recommendedGzipSettings = true;
+  recommendedOptimisation = true;
+  recommendedProxySettings = true;
+  recommendedTlsSettings = true;
+  virtualHosts."git.example.com" = {
+    enableACME = true;
+    forceSSL = true;
+    locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+  };
+};
+```
+
+## Configuring {#module-services-gitlab-configuring}
+
+GitLab depends on both PostgreSQL and Redis and will automatically enable
+both services. In the case of PostgreSQL, a database and a role will be
+created.
+
+The default state dir is `/var/gitlab/state`. This is where
+all data like the repositories and uploads will be stored.
+
+A basic configuration with some custom settings could look like this:
+```
+services.gitlab = {
+  enable = true;
+  databasePasswordFile = "/var/keys/gitlab/db_password";
+  initialRootPasswordFile = "/var/keys/gitlab/root_password";
+  https = true;
+  host = "git.example.com";
+  port = 443;
+  user = "git";
+  group = "git";
+  smtp = {
+    enable = true;
+    address = "localhost";
+    port = 25;
+  };
+  secrets = {
+    dbFile = "/var/keys/gitlab/db";
+    secretFile = "/var/keys/gitlab/secret";
+    otpFile = "/var/keys/gitlab/otp";
+    jwsFile = "/var/keys/gitlab/jws";
+  };
+  extraConfig = {
+    gitlab = {
+      email_from = "gitlab-no-reply@example.com";
+      email_display_name = "Example GitLab";
+      email_reply_to = "gitlab-no-reply@example.com";
+      default_projects_features = { builds = false; };
+    };
+  };
+};
+```
+
+If you're setting up a new GitLab instance, generate new
+secrets. You for instance use
+`tr -dc A-Za-z0-9 < /dev/urandom | head -c 128 > /var/keys/gitlab/db` to
+generate a new db secret. Make sure the files can be read by, and
+only by, the user specified by
+[services.gitlab.user](#opt-services.gitlab.user). GitLab
+encrypts sensitive data stored in the database. If you're restoring
+an existing GitLab instance, you must specify the secrets secret
+from `config/secrets.yml` located in your GitLab
+state folder.
+
+When `incoming_mail.enabled` is set to `true`
+in [extraConfig](#opt-services.gitlab.extraConfig) an additional
+service called `gitlab-mailroom` is enabled for fetching incoming mail.
+
+Refer to [](#ch-options) for all available configuration
+options for the [services.gitlab](#opt-services.gitlab.enable) module.
+
+## Maintenance {#module-services-gitlab-maintenance}
+
+### Backups {#module-services-gitlab-maintenance-backups}
+
+Backups can be configured with the options in
+[services.gitlab.backup](#opt-services.gitlab.backup.keepTime). Use
+the [services.gitlab.backup.startAt](#opt-services.gitlab.backup.startAt)
+option to configure regular backups.
+
+To run a manual backup, start the `gitlab-backup` service:
+```ShellSession
+$ systemctl start gitlab-backup.service
+```
+
+### Rake tasks {#module-services-gitlab-maintenance-rake}
+
+You can run GitLab's rake tasks with `gitlab-rake`
+which will be available on the system when GitLab is enabled. You
+will have to run the command as the user that you configured to run
+GitLab with.
+
+A list of all available rake tasks can be obtained by running:
+```ShellSession
+$ sudo -u git -H gitlab-rake -T
+```
diff --git a/nixpkgs/nixos/modules/services/misc/gitlab.nix b/nixpkgs/nixos/modules/services/misc/gitlab.nix
new file mode 100644
index 000000000000..b399ccc38f58
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gitlab.nix
@@ -0,0 +1,1694 @@
+{ config, lib, options, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitlab;
+  opt = options.services.gitlab;
+
+  toml = pkgs.formats.toml {};
+  yaml = pkgs.formats.yaml {};
+
+  postgresqlPackage = if config.services.postgresql.enable then
+                        config.services.postgresql.package
+                      else
+                        pkgs.postgresql_13;
+
+  gitlabSocket = "${cfg.statePath}/tmp/sockets/gitlab.socket";
+  gitalySocket = "${cfg.statePath}/tmp/sockets/gitaly.socket";
+  pathUrlQuote = url: replaceStrings ["/"] ["%2F"] url;
+
+  databaseConfig = let
+    val = {
+      adapter = "postgresql";
+      database = cfg.databaseName;
+      host = cfg.databaseHost;
+      username = cfg.databaseUsername;
+      encoding = "utf8";
+      pool = cfg.databasePool;
+    } // cfg.extraDatabaseConfig;
+  in if lib.versionAtLeast (lib.getVersion cfg.packages.gitlab) "15.0" then {
+    production.main = val;
+  } else {
+    production = val;
+  };
+
+  # We only want to create a database if we're actually going to connect to it.
+  databaseActuallyCreateLocally = cfg.databaseCreateLocally && cfg.databaseHost == "";
+
+  gitalyToml = pkgs.writeText "gitaly.toml" ''
+    socket_path = "${lib.escape ["\""] gitalySocket}"
+    runtime_dir = "/run/gitaly"
+    bin_dir = "${cfg.packages.gitaly}/bin"
+    prometheus_listen_addr = "localhost:9236"
+
+    [git]
+    bin_path = "${pkgs.git}/bin/git"
+
+    [gitlab-shell]
+    dir = "${cfg.packages.gitlab-shell}"
+
+    [hooks]
+    custom_hooks_dir = "${cfg.statePath}/custom_hooks"
+
+    [gitlab]
+    secret_file = "${cfg.statePath}/gitlab_shell_secret"
+    url = "http+unix://${pathUrlQuote gitlabSocket}"
+
+    [gitlab.http-settings]
+    self_signed_cert = false
+
+    ${concatStringsSep "\n" (attrValues (mapAttrs (k: v: ''
+    [[storage]]
+    name = "${lib.escape ["\""] k}"
+    path = "${lib.escape ["\""] v.path}"
+    '') gitlabConfig.production.repositories.storages))}
+  '';
+
+  gitlabShellConfig = flip recursiveUpdate cfg.extraShellConfig {
+    user = cfg.user;
+    gitlab_url = "http+unix://${pathUrlQuote gitlabSocket}";
+    http_settings.self_signed_cert = false;
+    repos_path = "${cfg.statePath}/repositories";
+    secret_file = "${cfg.statePath}/gitlab_shell_secret";
+    log_file = "${cfg.statePath}/log/gitlab-shell.log";
+  };
+
+  redisConfig.production.url = cfg.redisUrl;
+
+  cableYml = yaml.generate "cable.yml" {
+    production = {
+      adapter = "redis";
+      url = cfg.redisUrl;
+      channel_prefix = "gitlab_production";
+    };
+  };
+
+  # Redis configuration file
+  resqueYml = pkgs.writeText "resque.yml" (builtins.toJSON redisConfig);
+
+  gitlabConfig = {
+    # These are the default settings from config/gitlab.example.yml
+    production = flip recursiveUpdate cfg.extraConfig {
+      gitlab = {
+        host = cfg.host;
+        port = cfg.port;
+        https = cfg.https;
+        user = cfg.user;
+        email_enabled = true;
+        email_display_name = "GitLab";
+        email_reply_to = "noreply@localhost";
+        default_theme = 2;
+        default_projects_features = {
+          issues = true;
+          merge_requests = true;
+          wiki = true;
+          snippets = true;
+          builds = true;
+          container_registry = true;
+        };
+      };
+      repositories.storages.default.path = "${cfg.statePath}/repositories";
+      repositories.storages.default.gitaly_address = "unix:${gitalySocket}";
+      artifacts.enabled = true;
+      lfs.enabled = true;
+      gravatar.enabled = true;
+      cron_jobs = { };
+      gitlab_ci.builds_path = "${cfg.statePath}/builds";
+      ldap.enabled = false;
+      omniauth.enabled = false;
+      shared.path = "${cfg.statePath}/shared";
+      gitaly.client_path = "${cfg.packages.gitaly}/bin";
+      backup = {
+        gitaly_backup_path = "${cfg.packages.gitaly}/bin/gitaly-backup";
+        path = cfg.backup.path;
+        keep_time = cfg.backup.keepTime;
+      } // (optionalAttrs (cfg.backup.uploadOptions != {}) {
+        upload = cfg.backup.uploadOptions;
+      });
+      gitlab_shell = {
+        path = "${cfg.packages.gitlab-shell}";
+        hooks_path = "${cfg.statePath}/shell/hooks";
+        secret_file = "${cfg.statePath}/gitlab_shell_secret";
+        upload_pack = true;
+        receive_pack = true;
+      };
+      workhorse.secret_file = "${cfg.statePath}/.gitlab_workhorse_secret";
+      gitlab_kas.secret_file = "${cfg.statePath}/.gitlab_kas_secret";
+      git.bin_path = "git";
+      monitoring = {
+        ip_whitelist = [ "127.0.0.0/8" "::1/128" ];
+        sidekiq_exporter = {
+          enable = true;
+          address = "localhost";
+          port = 3807;
+        };
+      };
+      registry = lib.optionalAttrs cfg.registry.enable {
+        enabled = true;
+        host = cfg.registry.externalAddress;
+        port = cfg.registry.externalPort;
+        key = cfg.registry.keyFile;
+        api_url = "http://${config.services.dockerRegistry.listenAddress}:${toString config.services.dockerRegistry.port}/";
+        issuer = cfg.registry.issuer;
+      };
+      elasticsearch.indexer_path = "${pkgs.gitlab-elasticsearch-indexer}/bin/gitlab-elasticsearch-indexer";
+      extra = {};
+      uploads.storage_path = cfg.statePath;
+      pages = optionalAttrs cfg.pages.enable {
+        enabled = cfg.pages.enable;
+        port = 8090;
+        host = cfg.pages.settings.pages-domain;
+        secret_file = cfg.pages.settings.api-secret-key;
+      };
+    };
+  };
+
+  gitlabEnv = cfg.packages.gitlab.gitlabEnv // {
+    HOME = "${cfg.statePath}/home";
+    PUMA_PATH = "${cfg.statePath}/";
+    GITLAB_PATH = "${cfg.packages.gitlab}/share/gitlab/";
+    SCHEMA = "${cfg.statePath}/db/structure.sql";
+    GITLAB_UPLOADS_PATH = "${cfg.statePath}/uploads";
+    GITLAB_LOG_PATH = "${cfg.statePath}/log";
+    prometheus_multiproc_dir = "/run/gitlab";
+    RAILS_ENV = "production";
+    MALLOC_ARENA_MAX = "2";
+  } // cfg.extraEnv;
+
+  runtimeDeps = with pkgs; [
+    nodejs
+    gzip
+    git
+    gnutar
+    postgresqlPackage
+    coreutils
+    procps
+    findutils # Needed for gitlab:cleanup:orphan_job_artifact_files
+  ];
+
+  gitlab-rake = pkgs.stdenv.mkDerivation {
+    name = "gitlab-rake";
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+    dontBuild = true;
+    dontUnpack = true;
+    installPhase = ''
+      mkdir -p $out/bin
+      makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rake $out/bin/gitlab-rake \
+          ${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
+          --set PATH '${lib.makeBinPath runtimeDeps}:$PATH' \
+          --set RAKEOPT '-f ${cfg.packages.gitlab}/share/gitlab/Rakefile' \
+          --chdir '${cfg.packages.gitlab}/share/gitlab'
+     '';
+  };
+
+  gitlab-rails = pkgs.stdenv.mkDerivation {
+    name = "gitlab-rails";
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+    dontBuild = true;
+    dontUnpack = true;
+    installPhase = ''
+      mkdir -p $out/bin
+      makeWrapper ${cfg.packages.gitlab.rubyEnv}/bin/rails $out/bin/gitlab-rails \
+          ${concatStrings (mapAttrsToList (name: value: "--set ${name} '${value}' ") gitlabEnv)} \
+          --set PATH '${lib.makeBinPath runtimeDeps}:$PATH' \
+          --chdir '${cfg.packages.gitlab}/share/gitlab'
+     '';
+  };
+
+  extraGitlabRb = pkgs.writeText "extra-gitlab.rb" cfg.extraGitlabRb;
+
+  smtpSettings = pkgs.writeText "gitlab-smtp-settings.rb" ''
+    if Rails.env.production?
+      Rails.application.config.action_mailer.delivery_method = :smtp
+
+      ActionMailer::Base.delivery_method = :smtp
+      ActionMailer::Base.smtp_settings = {
+        address: "${cfg.smtp.address}",
+        port: ${toString cfg.smtp.port},
+        ${optionalString (cfg.smtp.username != null) ''user_name: "${cfg.smtp.username}",''}
+        ${optionalString (cfg.smtp.passwordFile != null) ''password: "@smtpPassword@",''}
+        domain: "${cfg.smtp.domain}",
+        ${optionalString (cfg.smtp.authentication != null) "authentication: :${cfg.smtp.authentication},"}
+        enable_starttls_auto: ${boolToString cfg.smtp.enableStartTLSAuto},
+        tls: ${boolToString cfg.smtp.tls},
+        ca_file: "/etc/ssl/certs/ca-certificates.crt",
+        openssl_verify_mode: '${cfg.smtp.opensslVerifyMode}'
+      }
+    end
+  '';
+
+in {
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "gitlab" "stateDir" ] [ "services" "gitlab" "statePath" ])
+    (mkRenamedOptionModule [ "services" "gitlab" "backupPath" ] [ "services" "gitlab" "backup" "path" ])
+    (mkRemovedOptionModule [ "services" "gitlab" "satelliteDir" ] "")
+    (mkRemovedOptionModule [ "services" "gitlab" "logrotate" "extraConfig" ] "Modify services.logrotate.settings.gitlab directly instead")
+    (mkRemovedOptionModule [ "services" "gitlab" "pagesExtraArgs" ] "Use services.gitlab.pages.settings instead")
+  ];
+
+  options = {
+    services.gitlab = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the gitlab service.
+        '';
+      };
+
+      packages.gitlab = mkOption {
+        type = types.package;
+        default = pkgs.gitlab;
+        defaultText = literalExpression "pkgs.gitlab";
+        description = lib.mdDoc "Reference to the gitlab package";
+        example = literalExpression "pkgs.gitlab-ee";
+      };
+
+      packages.gitlab-shell = mkOption {
+        type = types.package;
+        default = pkgs.gitlab-shell;
+        defaultText = literalExpression "pkgs.gitlab-shell";
+        description = lib.mdDoc "Reference to the gitlab-shell package";
+      };
+
+      packages.gitlab-workhorse = mkOption {
+        type = types.package;
+        default = pkgs.gitlab-workhorse;
+        defaultText = literalExpression "pkgs.gitlab-workhorse";
+        description = lib.mdDoc "Reference to the gitlab-workhorse package";
+      };
+
+      packages.gitaly = mkOption {
+        type = types.package;
+        default = pkgs.gitaly;
+        defaultText = literalExpression "pkgs.gitaly";
+        description = lib.mdDoc "Reference to the gitaly package";
+      };
+
+      packages.pages = mkOption {
+        type = types.package;
+        default = pkgs.gitlab-pages;
+        defaultText = literalExpression "pkgs.gitlab-pages";
+        description = lib.mdDoc "Reference to the gitlab-pages package";
+      };
+
+      statePath = mkOption {
+        type = types.str;
+        default = "/var/gitlab/state";
+        description = lib.mdDoc ''
+          GitLab state directory. Configuration, repositories and
+          logs, among other things, are stored here.
+
+          The directory will be created automatically if it doesn't
+          exist already. Its parent directories must be owned by
+          either `root` or the user set in
+          {option}`services.gitlab.user`.
+        '';
+      };
+
+      extraEnv = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = lib.mdDoc ''
+          Additional environment variables for the GitLab environment.
+        '';
+      };
+
+      backup.startAt = mkOption {
+        type = with types; either str (listOf str);
+        default = [];
+        example = "03:00";
+        description = lib.mdDoc ''
+          The time(s) to run automatic backup of GitLab
+          state. Specified in systemd's time format; see
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      backup.path = mkOption {
+        type = types.str;
+        default = cfg.statePath + "/backup";
+        defaultText = literalExpression ''config.${opt.statePath} + "/backup"'';
+        description = lib.mdDoc "GitLab path for backups.";
+      };
+
+      backup.keepTime = mkOption {
+        type = types.int;
+        default = 0;
+        example = 48;
+        apply = x: x * 60 * 60;
+        description = lib.mdDoc ''
+          How long to keep the backups around, in
+          hours. `0` means “keep foreverâ€.
+        '';
+      };
+
+      backup.skip = mkOption {
+        type = with types;
+          let value = enum [
+                "db"
+                "uploads"
+                "builds"
+                "artifacts"
+                "lfs"
+                "registry"
+                "pages"
+                "repositories"
+                "tar"
+              ];
+          in
+            either value (listOf value);
+        default = [];
+        example = [ "artifacts" "lfs" ];
+        apply = x: if isString x then x else concatStringsSep "," x;
+        description = lib.mdDoc ''
+          Directories to exclude from the backup. The example excludes
+          CI artifacts and LFS objects from the backups. The
+          `tar` option skips the creation of a tar
+          file.
+
+          Refer to <https://docs.gitlab.com/ee/raketasks/backup_restore.html#excluding-specific-directories-from-the-backup>
+          for more information.
+        '';
+      };
+
+      backup.uploadOptions = mkOption {
+        type = types.attrs;
+        default = {};
+        example = literalExpression ''
+          {
+            # Fog storage connection settings, see http://fog.io/storage/
+            connection = {
+              provider = "AWS";
+              region = "eu-north-1";
+              aws_access_key_id = "AKIAXXXXXXXXXXXXXXXX";
+              aws_secret_access_key = { _secret = config.deployment.keys.aws_access_key.path; };
+            };
+
+            # The remote 'directory' to store your backups in.
+            # For S3, this would be the bucket name.
+            remote_directory = "my-gitlab-backups";
+
+            # Use multipart uploads when file size reaches 100MB, see
+            # http://docs.aws.amazon.com/AmazonS3/latest/dev/uploadobjusingmpu.html
+            multipart_chunk_size = 104857600;
+
+            # Turns on AWS Server-Side Encryption with Amazon S3-Managed Keys for backups, this is optional
+            encryption = "AES256";
+
+            # Specifies Amazon S3 storage class to use for backups, this is optional
+            storage_class = "STANDARD";
+          };
+        '';
+        description = lib.mdDoc ''
+          GitLab automatic upload specification. Tells GitLab to
+          upload the backup to a remote location when done.
+
+          Attributes specified here are added under
+          `production -> backup -> upload` in
+          {file}`config/gitlab.yml`.
+        '';
+      };
+
+      databaseHost = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          GitLab database hostname. An empty string means
+          “use local unix socket connectionâ€.
+        '';
+      };
+
+      databasePasswordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          File containing the GitLab database user password.
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      databaseCreateLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether a database should be automatically created on the
+          local host. Set this to `false` if you plan
+          on provisioning a local database yourself. This has no effect
+          if {option}`services.gitlab.databaseHost` is customized.
+        '';
+      };
+
+      databaseName = mkOption {
+        type = types.str;
+        default = "gitlab";
+        description = lib.mdDoc "GitLab database name.";
+      };
+
+      databaseUsername = mkOption {
+        type = types.str;
+        default = "gitlab";
+        description = lib.mdDoc "GitLab database user.";
+      };
+
+      databasePool = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc "Database connection pool size.";
+      };
+
+      extraDatabaseConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Extra configuration in config/database.yml.";
+      };
+
+      redisUrl = mkOption {
+        type = types.str;
+        default = "unix:/run/gitlab/redis.sock";
+        example = "redis://localhost:6379/";
+        description = lib.mdDoc "Redis URL for all GitLab services.";
+      };
+
+      extraGitlabRb = mkOption {
+        type = types.str;
+        default = "";
+        example = ''
+          if Rails.env.production?
+            Rails.application.config.action_mailer.delivery_method = :sendmail
+            ActionMailer::Base.delivery_method = :sendmail
+            ActionMailer::Base.sendmail_settings = {
+              location: "/run/wrappers/bin/sendmail",
+              arguments: "-i -t"
+            }
+          end
+        '';
+        description = lib.mdDoc ''
+          Extra configuration to be placed in config/extra-gitlab.rb. This can
+          be used to add configuration not otherwise exposed through this module's
+          options.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = config.networking.hostName;
+        defaultText = literalExpression "config.networking.hostName";
+        description = lib.mdDoc "GitLab host name. Used e.g. for copy-paste URLs.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc ''
+          GitLab server port for copy-paste URLs, e.g. 80 or 443 if you're
+          service over https.
+        '';
+      };
+
+      https = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether gitlab prints URLs with https as scheme.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "gitlab";
+        description = lib.mdDoc "User to run gitlab and all related services.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "gitlab";
+        description = lib.mdDoc "Group to run gitlab and all related services.";
+      };
+
+      initialRootEmail = mkOption {
+        type = types.str;
+        default = "admin@local.host";
+        description = lib.mdDoc ''
+          Initial email address of the root account if this is a new install.
+        '';
+      };
+
+      initialRootPasswordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          File containing the initial password of the root account if
+          this is a new install.
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      registry = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Enable GitLab container registry.";
+        };
+        package = mkOption {
+          type = types.package;
+          default =
+            if versionAtLeast config.system.stateVersion "23.11"
+            then pkgs.gitlab-container-registry
+            else pkgs.docker-distribution;
+          defaultText = literalExpression "pkgs.docker-distribution";
+          description = lib.mdDoc ''
+            Container registry package to use.
+
+            External container registries such as `pkgs.docker-distribution` are not supported
+            anymore since GitLab 16.0.0.
+          '';
+        };
+        host = mkOption {
+          type = types.str;
+          default = config.services.gitlab.host;
+          defaultText = literalExpression "config.services.gitlab.host";
+          description = lib.mdDoc "GitLab container registry host name.";
+        };
+        port = mkOption {
+          type = types.port;
+          default = 4567;
+          description = lib.mdDoc "GitLab container registry port.";
+        };
+        certFile = mkOption {
+          type = types.path;
+          description = lib.mdDoc "Path to GitLab container registry certificate.";
+        };
+        keyFile = mkOption {
+          type = types.path;
+          description = lib.mdDoc "Path to GitLab container registry certificate-key.";
+        };
+        defaultForProjects = mkOption {
+          type = types.bool;
+          default = cfg.registry.enable;
+          defaultText = literalExpression "config.${opt.registry.enable}";
+          description = lib.mdDoc "If GitLab container registry should be enabled by default for projects.";
+        };
+        issuer = mkOption {
+          type = types.str;
+          default = "gitlab-issuer";
+          description = lib.mdDoc "GitLab container registry issuer.";
+        };
+        serviceName = mkOption {
+          type = types.str;
+          default = "container_registry";
+          description = lib.mdDoc "GitLab container registry service name.";
+        };
+        externalAddress = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "External address used to access registry from the internet";
+        };
+        externalPort = mkOption {
+          type = types.int;
+          description = lib.mdDoc "External port used to access registry from the internet";
+        };
+      };
+
+      smtp = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Enable gitlab mail delivery over SMTP.";
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Address of the SMTP server for GitLab.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 25;
+          description = lib.mdDoc "Port of the SMTP server for GitLab.";
+        };
+
+        username = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "Username of the SMTP server for GitLab.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          description = lib.mdDoc ''
+            File containing the password of the SMTP server for GitLab.
+
+            This should be a string, not a nix path, since nix paths
+            are copied into the world-readable nix store.
+          '';
+        };
+
+        domain = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "HELO domain to use for outgoing mail.";
+        };
+
+        authentication = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "Authentication type to use, see http://api.rubyonrails.org/classes/ActionMailer/Base.html";
+        };
+
+        enableStartTLSAuto = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to try to use StartTLS.";
+        };
+
+        tls = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Whether to use TLS wrapper-mode.";
+        };
+
+        opensslVerifyMode = mkOption {
+          type = types.str;
+          default = "peer";
+          description = lib.mdDoc "How OpenSSL checks the certificate, see http://api.rubyonrails.org/classes/ActionMailer/Base.html";
+        };
+      };
+
+      pages.enable = mkEnableOption (lib.mdDoc "the GitLab Pages service");
+
+      pages.settings = mkOption {
+        example = literalExpression ''
+          {
+            pages-domain = "example.com";
+            auth-client-id = "generated-id-xxxxxxx";
+            auth-client-secret = { _secret = "/var/keys/auth-client-secret"; };
+            auth-redirect-uri = "https://projects.example.com/auth";
+            auth-secret = { _secret = "/var/keys/auth-secret"; };
+            auth-server = "https://gitlab.example.com";
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Configuration options to set in the GitLab Pages config
+          file.
+
+          Options containing secret data should be set to an attribute
+          set containing the attribute `_secret` - a string pointing
+          to a file containing the value the option should be set
+          to. See the example to get a better picture of this: in the
+          resulting configuration file, the `auth-client-secret` and
+          `auth-secret` keys will be set to the contents of the
+          {file}`/var/keys/auth-client-secret` and
+          {file}`/var/keys/auth-secret` files respectively.
+        '';
+
+        type = types.submodule {
+          freeformType = with types; attrsOf (nullOr (oneOf [ str int bool attrs ]));
+
+          options = {
+            listen-http = mkOption {
+              type = with types; listOf str;
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
+              default = [];
+              description = lib.mdDoc ''
+                The address(es) to listen on for HTTP requests.
+              '';
+            };
+
+            listen-https = mkOption {
+              type = with types; listOf str;
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
+              default = [];
+              description = lib.mdDoc ''
+                The address(es) to listen on for HTTPS requests.
+              '';
+            };
+
+            listen-proxy = mkOption {
+              type = with types; listOf str;
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
+              default = [ "127.0.0.1:8090" ];
+              description = lib.mdDoc ''
+                The address(es) to listen on for proxy requests.
+              '';
+            };
+
+            artifacts-server = mkOption {
+              type = with types; nullOr str;
+              default = "http${optionalString cfg.https "s"}://${cfg.host}/api/v4";
+              defaultText = "http(s)://<services.gitlab.host>/api/v4";
+              example = "https://gitlab.example.com/api/v4";
+              description = lib.mdDoc ''
+                API URL to proxy artifact requests to.
+              '';
+            };
+
+            gitlab-server = mkOption {
+              type = with types; nullOr str;
+              default = "http${optionalString cfg.https "s"}://${cfg.host}";
+              defaultText = "http(s)://<services.gitlab.host>";
+              example = "https://gitlab.example.com";
+              description = lib.mdDoc ''
+                Public GitLab server URL.
+              '';
+            };
+
+            internal-gitlab-server = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              defaultText = "http(s)://<services.gitlab.host>";
+              example = "https://gitlab.example.internal";
+              description = lib.mdDoc ''
+                Internal GitLab server used for API requests, useful
+                if you want to send that traffic over an internal load
+                balancer. By default, the value of
+                `services.gitlab.pages.settings.gitlab-server` is
+                used.
+              '';
+            };
+
+            api-secret-key = mkOption {
+              type = with types; nullOr str;
+              default = "${cfg.statePath}/gitlab_pages_secret";
+              internal = true;
+              description = lib.mdDoc ''
+                File with secret key used to authenticate with the
+                GitLab API.
+              '';
+            };
+
+            pages-domain = mkOption {
+              type = with types; nullOr str;
+              example = "example.com";
+              description = lib.mdDoc ''
+                The domain to serve static pages on.
+              '';
+            };
+
+            pages-root = mkOption {
+              type = types.str;
+              default = "${gitlabConfig.production.shared.path}/pages";
+              defaultText = literalExpression ''config.${opt.extraConfig}.production.shared.path + "/pages"'';
+              description = lib.mdDoc ''
+                The directory where pages are stored.
+              '';
+            };
+          };
+        };
+      };
+
+      secrets.secretFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing the secret used to encrypt variables in
+          the DB. If you change or lose this key you will be unable to
+          access variables stored in database.
+
+          Make sure the secret is at least 32 characters and all random,
+          no regular words or you'll be exposed to dictionary attacks.
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      secrets.dbFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing the secret used to encrypt variables in
+          the DB. If you change or lose this key you will be unable to
+          access variables stored in database.
+
+          Make sure the secret is at least 32 characters and all random,
+          no regular words or you'll be exposed to dictionary attacks.
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      secrets.otpFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing the secret used to encrypt secrets for OTP
+          tokens. If you change or lose this key, users which have 2FA
+          enabled for login won't be able to login anymore.
+
+          Make sure the secret is at least 32 characters and all random,
+          no regular words or you'll be exposed to dictionary attacks.
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      secrets.jwsFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc ''
+          A file containing the secret used to encrypt session
+          keys. If you change or lose this key, users will be
+          disconnected.
+
+          Make sure the secret is an RSA private key in PEM format. You can
+          generate one with
+
+          openssl genrsa 2048
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      extraShellConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Extra configuration to merge into shell-config.yml";
+      };
+
+      puma.workers = mkOption {
+        type = types.int;
+        default = 2;
+        apply = x: builtins.toString x;
+        description = lib.mdDoc ''
+          The number of worker processes Puma should spawn. This
+          controls the amount of parallel Ruby code can be
+          executed. GitLab recommends `Number of CPU cores - 1`, but at least two.
+
+          ::: {.note}
+          Each worker consumes quite a bit of memory, so
+          be careful when increasing this.
+          :::
+        '';
+      };
+
+      puma.threadsMin = mkOption {
+        type = types.int;
+        default = 0;
+        apply = x: builtins.toString x;
+        description = lib.mdDoc ''
+          The minimum number of threads Puma should use per
+          worker.
+
+          ::: {.note}
+          Each thread consumes memory and contributes to Global VM
+          Lock contention, so be careful when increasing this.
+          :::
+        '';
+      };
+
+      puma.threadsMax = mkOption {
+        type = types.int;
+        default = 4;
+        apply = x: builtins.toString x;
+        description = lib.mdDoc ''
+          The maximum number of threads Puma should use per
+          worker. This limits how many threads Puma will automatically
+          spawn in response to requests. In contrast to workers,
+          threads will never be able to run Ruby code in parallel, but
+          give higher IO parallelism.
+
+          ::: {.note}
+          Each thread consumes memory and contributes to Global VM
+          Lock contention, so be careful when increasing this.
+          :::
+        '';
+      };
+
+      sidekiq.memoryKiller.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the Sidekiq MemoryKiller should be turned
+          on. MemoryKiller kills Sidekiq when its memory consumption
+          exceeds a certain limit.
+
+          See <https://docs.gitlab.com/ee/administration/operations/sidekiq_memory_killer.html>
+          for details.
+        '';
+      };
+
+      sidekiq.memoryKiller.maxMemory = mkOption {
+        type = types.int;
+        default = 2000;
+        apply = x: builtins.toString (x * 1024);
+        description = lib.mdDoc ''
+          The maximum amount of memory, in MiB, a Sidekiq worker is
+          allowed to consume before being killed.
+        '';
+      };
+
+      sidekiq.memoryKiller.graceTime = mkOption {
+        type = types.int;
+        default = 900;
+        apply = x: builtins.toString x;
+        description = lib.mdDoc ''
+          The time MemoryKiller waits after noticing excessive memory
+          consumption before killing Sidekiq.
+        '';
+      };
+
+      sidekiq.memoryKiller.shutdownWait = mkOption {
+        type = types.int;
+        default = 30;
+        apply = x: builtins.toString x;
+        description = lib.mdDoc ''
+          The time allowed for all jobs to finish before Sidekiq is
+          killed forcefully.
+        '';
+      };
+
+      logrotate = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Enable rotation of log files.
+          '';
+        };
+
+        frequency = mkOption {
+          type = types.str;
+          default = "daily";
+          description = lib.mdDoc "How often to rotate the logs.";
+        };
+
+        keep = mkOption {
+          type = types.int;
+          default = 30;
+          description = lib.mdDoc "How many rotations to keep.";
+        };
+      };
+
+      workhorse.config = mkOption {
+        type = toml.type;
+        default = {};
+        example = literalExpression ''
+          {
+            object_storage.provider = "AWS";
+            object_storage.s3 = {
+              aws_access_key_id = "AKIAXXXXXXXXXXXXXXXX";
+              aws_secret_access_key = { _secret = "/var/keys/aws_secret_access_key"; };
+            };
+          };
+        '';
+        description = lib.mdDoc ''
+          Configuration options to add to Workhorse's configuration
+          file.
+
+          See
+          <https://gitlab.com/gitlab-org/gitlab/-/blob/master/workhorse/config.toml.example>
+          and
+          <https://docs.gitlab.com/ee/development/workhorse/configuration.html>
+          for examples and option documentation.
+
+          Options containing secret data should be set to an attribute
+          set containing the attribute `_secret` - a string pointing
+          to a file containing the value the option should be set
+          to. See the example to get a better picture of this: in the
+          resulting configuration file, the
+          `object_storage.s3.aws_secret_access_key` key will be set to
+          the contents of the {file}`/var/keys/aws_secret_access_key`
+          file.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = yaml.type;
+        default = {};
+        example = literalExpression ''
+          {
+            gitlab = {
+              default_projects_features = {
+                builds = false;
+              };
+            };
+            omniauth = {
+              enabled = true;
+              auto_sign_in_with_provider = "openid_connect";
+              allow_single_sign_on = ["openid_connect"];
+              block_auto_created_users = false;
+              providers = [
+                {
+                  name = "openid_connect";
+                  label = "OpenID Connect";
+                  args = {
+                    name = "openid_connect";
+                    scope = ["openid" "profile"];
+                    response_type = "code";
+                    issuer = "https://keycloak.example.com/auth/realms/My%20Realm";
+                    discovery = true;
+                    client_auth_method = "query";
+                    uid_field = "preferred_username";
+                    client_options = {
+                      identifier = "gitlab";
+                      secret = { _secret = "/var/keys/gitlab_oidc_secret"; };
+                      redirect_uri = "https://git.example.com/users/auth/openid_connect/callback";
+                    };
+                  };
+                }
+              ];
+            };
+          };
+        '';
+        description = lib.mdDoc ''
+          Extra options to be added under
+          `production` in
+          {file}`config/gitlab.yml`, as a nix attribute
+          set.
+
+          Options containing secret data should be set to an attribute
+          set containing the attribute `_secret` - a
+          string pointing to a file containing the value the option
+          should be set to. See the example to get a better picture of
+          this: in the resulting
+          {file}`config/gitlab.yml` file, the
+          `production.omniauth.providers[0].args.client_options.secret`
+          key will be set to the contents of the
+          {file}`/var/keys/gitlab_oidc_secret` file.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = [
+      (mkIf
+        (cfg.registry.enable && versionAtLeast (getVersion cfg.packages.gitlab) "16.0.0" && cfg.registry.package == pkgs.docker-distribution)
+        ''Support for container registries other than gitlab-container-registry has ended since GitLab 16.0.0 and is scheduled for removal in a future release.
+          Please back up your data and migrate to the gitlab-container-registry package.''
+      )
+      (mkIf
+        (versionAtLeast (getVersion cfg.packages.gitlab) "16.2.0" && versionOlder (getVersion cfg.packages.gitlab) "16.5.0")
+        ''GitLab instances created or updated between versions [15.11.0, 15.11.2] have an incorrect database schema.
+        Check the upstream documentation for a workaround: https://docs.gitlab.com/ee/update/versions/gitlab_16_changes.html#undefined-column-error-upgrading-to-162-or-later''
+      )
+    ];
+
+    assertions = [
+      {
+        assertion = databaseActuallyCreateLocally -> (cfg.user == cfg.databaseUsername);
+        message = ''For local automatic database provisioning (services.gitlab.databaseCreateLocally == true) with peer authentication (services.gitlab.databaseHost == "") to work services.gitlab.user and services.gitlab.databaseUsername must be identical.'';
+      }
+      {
+        assertion = (cfg.databaseHost != "") -> (cfg.databasePasswordFile != null);
+        message = "When services.gitlab.databaseHost is customized, services.gitlab.databasePasswordFile must be set!";
+      }
+      {
+        assertion = cfg.initialRootPasswordFile != null;
+        message = "services.gitlab.initialRootPasswordFile must be set!";
+      }
+      {
+        assertion = cfg.secrets.secretFile != null;
+        message = "services.gitlab.secrets.secretFile must be set!";
+      }
+      {
+        assertion = cfg.secrets.dbFile != null;
+        message = "services.gitlab.secrets.dbFile must be set!";
+      }
+      {
+        assertion = cfg.secrets.otpFile != null;
+        message = "services.gitlab.secrets.otpFile must be set!";
+      }
+      {
+        assertion = cfg.secrets.jwsFile != null;
+        message = "services.gitlab.secrets.jwsFile must be set!";
+      }
+      {
+        assertion = versionAtLeast postgresqlPackage.version "13.6.0";
+        message = "PostgreSQL >=13.6 is required to run GitLab 16. Follow the instructions in the manual section for upgrading PostgreSQL here: https://nixos.org/manual/nixos/stable/index.html#module-services-postgres-upgrading";
+      }
+    ];
+
+    environment.systemPackages = [ pkgs.git gitlab-rake gitlab-rails cfg.packages.gitlab-shell ];
+
+    systemd.targets.gitlab = {
+      description = "Common target for all GitLab services.";
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    # Redis is required for the sidekiq queue runner.
+    services.redis.servers.gitlab = {
+      enable = mkDefault true;
+      user = mkDefault cfg.user;
+      unixSocket = mkDefault "/run/gitlab/redis.sock";
+      unixSocketPerm = mkDefault 770;
+    };
+
+    # We use postgres as the main data store.
+    services.postgresql = optionalAttrs databaseActuallyCreateLocally {
+      enable = true;
+      ensureUsers = singleton { name = cfg.databaseUsername; };
+    };
+
+    # Enable rotation of log files
+    services.logrotate = {
+      enable = cfg.logrotate.enable;
+      settings = {
+        gitlab = {
+          files = "${cfg.statePath}/log/*.log";
+          su = "${cfg.user} ${cfg.group}";
+          frequency = cfg.logrotate.frequency;
+          rotate = cfg.logrotate.keep;
+          copytruncate = true;
+          compress = true;
+        };
+      };
+    };
+
+    # The postgresql module doesn't currently support concepts like
+    # objects owners and extensions; for now we tack on what's needed
+    # here.
+    systemd.services.gitlab-postgresql = let pgsql = config.services.postgresql; in mkIf databaseActuallyCreateLocally {
+      after = [ "postgresql.service" ];
+      bindsTo = [ "postgresql.service" ];
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      path = [
+        pgsql.package
+        pkgs.util-linux
+      ];
+      script = ''
+        set -eu
+
+        PSQL() {
+            psql --port=${toString pgsql.port} "$@"
+        }
+
+        PSQL -tAc "SELECT 1 FROM pg_database WHERE datname = '${cfg.databaseName}'" | grep -q 1 || PSQL -tAc 'CREATE DATABASE "${cfg.databaseName}" OWNER "${cfg.databaseUsername}"'
+        current_owner=$(PSQL -tAc "SELECT pg_catalog.pg_get_userbyid(datdba) FROM pg_catalog.pg_database WHERE datname = '${cfg.databaseName}'")
+        if [[ "$current_owner" != "${cfg.databaseUsername}" ]]; then
+            PSQL -tAc 'ALTER DATABASE "${cfg.databaseName}" OWNER TO "${cfg.databaseUsername}"'
+            if [[ -e "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}" ]]; then
+                echo "Reassigning ownership of database ${cfg.databaseName} to user ${cfg.databaseUsername} failed on last boot. Failing..."
+                exit 1
+            fi
+            touch "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}"
+            PSQL "${cfg.databaseName}" -tAc "REASSIGN OWNED BY \"$current_owner\" TO \"${cfg.databaseUsername}\""
+            rm "${config.services.postgresql.dataDir}/.reassigning_${cfg.databaseName}"
+        fi
+        PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS pg_trgm"
+        PSQL '${cfg.databaseName}' -tAc "CREATE EXTENSION IF NOT EXISTS btree_gist;"
+      '';
+
+      serviceConfig = {
+        User = pgsql.superUser;
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+
+    systemd.services.gitlab-registry-cert = optionalAttrs cfg.registry.enable {
+      path = with pkgs; [ openssl ];
+
+      script = ''
+        mkdir -p $(dirname ${cfg.registry.keyFile})
+        mkdir -p $(dirname ${cfg.registry.certFile})
+        openssl req -nodes -newkey rsa:4096 -keyout ${cfg.registry.keyFile} -out /tmp/registry-auth.csr -subj "/CN=${cfg.registry.issuer}"
+        openssl x509 -in /tmp/registry-auth.csr -out ${cfg.registry.certFile} -req -signkey ${cfg.registry.keyFile} -days 3650
+        chown ${cfg.user}:${cfg.group} $(dirname ${cfg.registry.keyFile})
+        chown ${cfg.user}:${cfg.group} $(dirname ${cfg.registry.certFile})
+        chown ${cfg.user}:${cfg.group} ${cfg.registry.keyFile}
+        chown ${cfg.user}:${cfg.group} ${cfg.registry.certFile}
+      '';
+
+      unitConfig = {
+        ConditionPathExists = "!${cfg.registry.certFile}";
+      };
+    };
+
+    # Ensure Docker Registry launches after the certificate generation job
+    systemd.services.docker-registry = optionalAttrs cfg.registry.enable {
+      wants = [ "gitlab-registry-cert.service" ];
+      after = [ "gitlab-registry-cert.service" ];
+    };
+
+    # Enable Docker Registry, if GitLab-Container Registry is enabled
+    services.dockerRegistry = optionalAttrs cfg.registry.enable {
+      enable = true;
+      enableDelete = true; # This must be true, otherwise GitLab won't manage it correctly
+      package = cfg.registry.package;
+      extraConfig = {
+        auth.token = {
+          realm = "http${optionalString (cfg.https == true) "s"}://${cfg.host}/jwt/auth";
+          service = cfg.registry.serviceName;
+          issuer = cfg.registry.issuer;
+          rootcertbundle = cfg.registry.certFile;
+        };
+      };
+    };
+
+    # Use postfix to send out mails.
+    services.postfix.enable = mkDefault (cfg.smtp.enable && cfg.smtp.address == "localhost");
+
+    users.users.${cfg.user} =
+      { group = cfg.group;
+        home = "${cfg.statePath}/home";
+        shell = "${pkgs.bash}/bin/bash";
+        uid = config.ids.uids.gitlab;
+      };
+
+    users.groups.${cfg.group}.gid = config.ids.gids.gitlab;
+
+    systemd.tmpfiles.rules = [
+      "d /run/gitlab 0755 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabEnv.HOME} 0750 ${cfg.user} ${cfg.group} -"
+      "z ${gitlabEnv.HOME}/.ssh/authorized_keys 0600 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.backup.path} 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath} 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/builds 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/config 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/db 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/log 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/repositories 2770 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/shell 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/tmp 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/tmp/pids 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/tmp/sockets 0750 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/uploads 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/pre-receive.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/post-receive.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${cfg.statePath}/custom_hooks/update.d 0700 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path} 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/artifacts 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/lfs-objects 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/packages 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/pages 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/registry 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/terraform_state 0750 ${cfg.user} ${cfg.group} -"
+      "d ${gitlabConfig.production.shared.path}/ci_secure_files 0750 ${cfg.user} ${cfg.group} -"
+      "L+ /run/gitlab/config - - - - ${cfg.statePath}/config"
+      "L+ /run/gitlab/log - - - - ${cfg.statePath}/log"
+      "L+ /run/gitlab/tmp - - - - ${cfg.statePath}/tmp"
+      "L+ /run/gitlab/uploads - - - - ${cfg.statePath}/uploads"
+
+      "L+ /run/gitlab/shell-config.yml - - - - ${pkgs.writeText "config.yml" (builtins.toJSON gitlabShellConfig)}"
+    ];
+
+
+    systemd.services.gitlab-config = {
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      path = with pkgs; [
+        jq
+        openssl
+        replace-secret
+        git
+      ];
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
+        RemainAfterExit = true;
+
+        ExecStartPre = let
+          preStartFullPrivileges = ''
+            set -o errexit -o pipefail -o nounset
+            shopt -s dotglob nullglob inherit_errexit
+
+            chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/*
+            if [[ -n "$(ls -A '${cfg.statePath}'/config/)" ]]; then
+              chown --no-dereference '${cfg.user}':'${cfg.group}' '${cfg.statePath}'/config/*
+            fi
+          '';
+        in "+${pkgs.writeShellScript "gitlab-pre-start-full-privileges" preStartFullPrivileges}";
+
+        ExecStart = pkgs.writeShellScript "gitlab-config" ''
+          set -o errexit -o pipefail -o nounset
+          shopt -s inherit_errexit
+
+          umask u=rwx,g=rx,o=
+
+          cp -f ${cfg.packages.gitlab}/share/gitlab/VERSION ${cfg.statePath}/VERSION
+          rm -rf ${cfg.statePath}/db/*
+          rm -f ${cfg.statePath}/lib
+          find '${cfg.statePath}/config/' -maxdepth 1 -mindepth 1 -type d -execdir rm -rf {} \;
+          cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/config.dist/* ${cfg.statePath}/config
+          cp -rf --no-preserve=mode ${cfg.packages.gitlab}/share/gitlab/db/* ${cfg.statePath}/db
+          ln -sf ${extraGitlabRb} ${cfg.statePath}/config/initializers/extra-gitlab.rb
+          ln -sf ${cableYml} ${cfg.statePath}/config/cable.yml
+          ln -sf ${resqueYml} ${cfg.statePath}/config/resque.yml
+
+          ${cfg.packages.gitlab-shell}/bin/install
+
+          ${optionalString cfg.smtp.enable ''
+              install -m u=rw ${smtpSettings} ${cfg.statePath}/config/initializers/smtp_settings.rb
+              ${optionalString (cfg.smtp.passwordFile != null) ''
+                  replace-secret '@smtpPassword@' '${cfg.smtp.passwordFile}' '${cfg.statePath}/config/initializers/smtp_settings.rb'
+              ''}
+          ''}
+
+          (
+            umask u=rwx,g=,o=
+
+            openssl rand -hex 32 > ${cfg.statePath}/gitlab_shell_secret
+            ${optionalString cfg.pages.enable ''
+                openssl rand -base64 32 > ${cfg.pages.settings.api-secret-key}
+            ''}
+
+            rm -f '${cfg.statePath}/config/database.yml'
+
+            ${if cfg.databasePasswordFile != null then ''
+                db_password="$(<'${cfg.databasePasswordFile}')"
+                export db_password
+
+                if [[ -z "$db_password" ]]; then
+                  >&2 echo "Database password was an empty string!"
+                  exit 1
+                fi
+
+                jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
+                   '.${if lib.versionAtLeast (lib.getVersion cfg.packages.gitlab) "15.0" then "production.main" else "production"}.password = $ENV.db_password' \
+                   >'${cfg.statePath}/config/database.yml'
+              ''
+              else ''
+                jq <${pkgs.writeText "database.yml" (builtins.toJSON databaseConfig)} \
+                   >'${cfg.statePath}/config/database.yml'
+              ''
+            }
+
+            ${utils.genJqSecretsReplacementSnippet
+                gitlabConfig
+                "${cfg.statePath}/config/gitlab.yml"
+            }
+
+            rm -f '${cfg.statePath}/config/secrets.yml'
+
+            secret="$(<'${cfg.secrets.secretFile}')"
+            db="$(<'${cfg.secrets.dbFile}')"
+            otp="$(<'${cfg.secrets.otpFile}')"
+            jws="$(<'${cfg.secrets.jwsFile}')"
+            export secret db otp jws
+            jq -n '{production: {secret_key_base: $ENV.secret,
+                    otp_key_base: $ENV.otp,
+                    db_key_base: $ENV.db,
+                    openid_connect_signing_key: $ENV.jws}}' \
+               > '${cfg.statePath}/config/secrets.yml'
+          )
+
+          # We remove potentially broken links to old gitlab-shell versions
+          rm -Rf ${cfg.statePath}/repositories/**/*.git/hooks
+
+          git config --global core.autocrlf "input"
+        '';
+      };
+    };
+
+    systemd.services.gitlab-db-config = {
+      after = [ "gitlab-config.service" "gitlab-postgresql.service" "postgresql.service" ];
+      bindsTo = [
+        "gitlab-config.service"
+      ] ++ optional (cfg.databaseHost == "") "postgresql.service"
+        ++ optional databaseActuallyCreateLocally "gitlab-postgresql.service";
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
+        RemainAfterExit = true;
+
+        ExecStart = pkgs.writeShellScript "gitlab-db-config" ''
+          set -o errexit -o pipefail -o nounset
+          shopt -s inherit_errexit
+          umask u=rwx,g=rx,o=
+
+          initial_root_password="$(<'${cfg.initialRootPasswordFile}')"
+          ${gitlab-rake}/bin/gitlab-rake gitlab:db:configure GITLAB_ROOT_PASSWORD="$initial_root_password" \
+                                                             GITLAB_ROOT_EMAIL='${cfg.initialRootEmail}' > /dev/null
+        '';
+      };
+    };
+
+    systemd.services.gitlab-sidekiq = {
+      after = [
+        "network.target"
+        "redis-gitlab.service"
+        "postgresql.service"
+        "gitlab-config.service"
+        "gitlab-db-config.service"
+      ];
+      bindsTo = [
+        "redis-gitlab.service"
+        "gitlab-config.service"
+        "gitlab-db-config.service"
+      ] ++ optional (cfg.databaseHost == "") "postgresql.service";
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      environment = gitlabEnv // (optionalAttrs cfg.sidekiq.memoryKiller.enable {
+        SIDEKIQ_MEMORY_KILLER_MAX_RSS = cfg.sidekiq.memoryKiller.maxMemory;
+        SIDEKIQ_MEMORY_KILLER_GRACE_TIME = cfg.sidekiq.memoryKiller.graceTime;
+        SIDEKIQ_MEMORY_KILLER_SHUTDOWN_WAIT = cfg.sidekiq.memoryKiller.shutdownWait;
+      });
+      path = with pkgs; [
+        postgresqlPackage
+        git
+        ruby
+        openssh
+        nodejs
+        gnupg
+
+        # Needed for GitLab project imports
+        gnutar
+        gzip
+
+        procps # Sidekiq MemoryKiller
+      ];
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "always";
+        WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
+        ExecStart="${cfg.packages.gitlab.rubyEnv}/bin/sidekiq -C \"${cfg.packages.gitlab}/share/gitlab/config/sidekiq_queues.yml\" -e production";
+      };
+    };
+
+    systemd.services.gitaly = {
+      after = [ "network.target" "gitlab-config.service" ];
+      bindsTo = [ "gitlab-config.service" ];
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      path = with pkgs; [
+        openssh
+        git
+        gzip
+        bzip2
+      ];
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = gitlabEnv.HOME;
+        RuntimeDirectory = "gitaly";
+        ExecStart = "${cfg.packages.gitaly}/bin/gitaly ${gitalyToml}";
+      };
+    };
+
+    services.gitlab.pages.settings = {
+      api-secret-key = "${cfg.statePath}/gitlab_pages_secret";
+    };
+
+    systemd.services.gitlab-pages =
+      let
+        filteredConfig = filterAttrs (_: v: v != null) cfg.pages.settings;
+        isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+        mkPagesKeyValue = lib.generators.toKeyValue {
+          mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" rec {
+            mkValueString = v:
+              if isInt           v then toString v
+              else if isString   v then v
+              else if true  ==   v then "true"
+              else if false ==   v then "false"
+              else if isSecret   v then builtins.hashString "sha256" v._secret
+              else throw "unsupported type ${builtins.typeOf v}: ${(lib.generators.toPretty {}) v}";
+          };
+        };
+        secretPaths = lib.catAttrs "_secret" (lib.collect isSecret filteredConfig);
+        mkSecretReplacement = file: ''
+          replace-secret ${lib.escapeShellArgs [ (builtins.hashString "sha256" file) file "/run/gitlab-pages/gitlab-pages.conf" ]}
+        '';
+        secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+        configFile = pkgs.writeText "gitlab-pages.conf" (mkPagesKeyValue filteredConfig);
+      in
+        mkIf cfg.pages.enable {
+          description = "GitLab static pages daemon";
+          after = [ "network.target" "gitlab-config.service" "gitlab.service" ];
+          bindsTo = [ "gitlab-config.service" "gitlab.service" ];
+          wantedBy = [ "gitlab.target" ];
+          partOf = [ "gitlab.target" ];
+
+          path = with pkgs; [
+            unzip
+            replace-secret
+          ];
+
+          serviceConfig = {
+            Type = "simple";
+            TimeoutSec = "infinity";
+            Restart = "on-failure";
+
+            User = cfg.user;
+            Group = cfg.group;
+
+            ExecStartPre = pkgs.writeShellScript "gitlab-pages-pre-start" ''
+              set -o errexit -o pipefail -o nounset
+              shopt -s dotglob nullglob inherit_errexit
+
+              install -m u=rw ${configFile} /run/gitlab-pages/gitlab-pages.conf
+              ${secretReplacements}
+            '';
+            ExecStart = "${cfg.packages.pages}/bin/gitlab-pages -config=/run/gitlab-pages/gitlab-pages.conf";
+            WorkingDirectory = gitlabEnv.HOME;
+            RuntimeDirectory = "gitlab-pages";
+            RuntimeDirectoryMode = "0700";
+          };
+        };
+
+    systemd.services.gitlab-workhorse = {
+      after = [ "network.target" ];
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      path = with pkgs; [
+        remarshal
+        exiftool
+        git
+        gnutar
+        gzip
+        openssh
+        gitlab-workhorse
+      ];
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = gitlabEnv.HOME;
+        ExecStartPre = pkgs.writeShellScript "gitlab-workhorse-pre-start" ''
+          set -o errexit -o pipefail -o nounset
+          shopt -s dotglob nullglob inherit_errexit
+
+          ${utils.genJqSecretsReplacementSnippet
+              cfg.workhorse.config
+              "${cfg.statePath}/config/gitlab-workhorse.json"}
+
+          json2toml "${cfg.statePath}/config/gitlab-workhorse.json" "${cfg.statePath}/config/gitlab-workhorse.toml"
+          rm "${cfg.statePath}/config/gitlab-workhorse.json"
+        '';
+        ExecStart =
+          "${cfg.packages.gitlab-workhorse}/bin/workhorse "
+          + "-listenUmask 0 "
+          + "-listenNetwork unix "
+          + "-listenAddr /run/gitlab/gitlab-workhorse.socket "
+          + "-authSocket ${gitlabSocket} "
+          + "-documentRoot ${cfg.packages.gitlab}/share/gitlab/public "
+          + "-config ${cfg.statePath}/config/gitlab-workhorse.toml "
+          + "-secretPath ${cfg.statePath}/.gitlab_workhorse_secret";
+      };
+    };
+
+    systemd.services.gitlab-mailroom = mkIf (gitlabConfig.production.incoming_email.enabled or false) {
+      description = "GitLab incoming mail daemon";
+      after = [ "network.target" "redis-gitlab.service" "gitlab-config.service" ];
+      bindsTo = [ "gitlab-config.service" ];
+      wantedBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      environment = gitlabEnv;
+      serviceConfig = {
+        Type = "simple";
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.packages.gitlab.rubyEnv}/bin/bundle exec mail_room -c ${cfg.statePath}/config/mail_room.yml";
+        WorkingDirectory = gitlabEnv.HOME;
+      };
+    };
+
+    systemd.services.gitlab = {
+      after = [
+        "gitlab-workhorse.service"
+        "network.target"
+        "redis-gitlab.service"
+        "gitlab-config.service"
+        "gitlab-db-config.service"
+      ];
+      bindsTo = [
+        "redis-gitlab.service"
+        "gitlab-config.service"
+        "gitlab-db-config.service"
+      ] ++ optional (cfg.databaseHost == "") "postgresql.service";
+      requiredBy = [ "gitlab.target" ];
+      partOf = [ "gitlab.target" ];
+      environment = gitlabEnv;
+      path = with pkgs; [
+        postgresqlPackage
+        git
+        openssh
+        nodejs
+        procps
+        gnupg
+        gzip
+      ];
+      serviceConfig = {
+        Type = "notify";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = "${cfg.packages.gitlab}/share/gitlab";
+        ExecStart = concatStringsSep " " [
+          "${cfg.packages.gitlab.rubyEnv}/bin/bundle" "exec" "puma"
+          "-e production"
+          "-C ${cfg.statePath}/config/puma.rb"
+          "-w ${cfg.puma.workers}"
+          "-t ${cfg.puma.threadsMin}:${cfg.puma.threadsMax}"
+        ];
+      };
+
+    };
+
+    systemd.services.gitlab-backup = {
+      after = [ "gitlab.service" ];
+      bindsTo = [ "gitlab.service" ];
+      startAt = cfg.backup.startAt;
+      environment = {
+        RAILS_ENV = "production";
+        CRON = "1";
+      } // optionalAttrs (stringLength cfg.backup.skip > 0) {
+        SKIP = cfg.backup.skip;
+      };
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${gitlab-rake}/bin/gitlab-rake gitlab:backup:create";
+      };
+    };
+
+  };
+
+  meta.doc = ./gitlab.md;
+  meta.maintainers = teams.gitlab.members;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gitolite.nix b/nixpkgs/nixos/modules/services/misc/gitolite.nix
new file mode 100644
index 000000000000..012abda2d76f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gitolite.nix
@@ -0,0 +1,241 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitolite;
+  # Use writeTextDir to not leak Nix store hash into file name
+  pubkeyFile = (pkgs.writeTextDir "gitolite-admin.pub" cfg.adminPubkey) + "/gitolite-admin.pub";
+  hooks = lib.concatMapStrings (hook: "${hook} ") cfg.commonHooks;
+in
+{
+  options = {
+    services.gitolite = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable gitolite management under the
+          `gitolite` user. After
+          switching to a configuration with Gitolite enabled, you can
+          then run `git clone gitolite@host:gitolite-admin.git` to manage it further.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/gitolite";
+        description = lib.mdDoc ''
+          The gitolite home directory used to store all repositories. If left as the default value
+          this directory will automatically be created before the gitolite server starts, otherwise
+          the sysadmin is responsible for ensuring the directory exists with appropriate ownership
+          and permissions.
+        '';
+      };
+
+      adminPubkey = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Initial administrative public key for Gitolite. This should
+          be an SSH Public Key. Note that this key will only be used
+          once, upon the first initialization of the Gitolite user.
+          The key string cannot have any line breaks in it.
+        '';
+      };
+
+      enableGitAnnex = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable git-annex support. Uses the `extraGitoliteRc` option
+          to apply the necessary configuration.
+        '';
+      };
+
+      commonHooks = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of custom git hooks that get copied to `~/.gitolite/hooks/common`.
+        '';
+      };
+
+      extraGitoliteRc = mkOption {
+        type = types.lines;
+        default = "";
+        example = literalExpression ''
+          '''
+            $RC{UMASK} = 0027;
+            $RC{SITE_INFO} = 'This is our private repository host';
+            push( @{$RC{ENABLE}}, 'Kindergarten' ); # enable the command/feature
+            @{$RC{ENABLE}} = grep { $_ ne 'desc' } @{$RC{ENABLE}}; # disable the command/feature
+          '''
+        '';
+        description = lib.mdDoc ''
+          Extra configuration to append to the default `~/.gitolite.rc`.
+
+          This should be Perl code that modifies the `%RC`
+          configuration variable. The default `~/.gitolite.rc`
+          content is generated by invoking `gitolite print-default-rc`,
+          and extra configuration from this option is appended to it. The result
+          is placed to Nix store, and the `~/.gitolite.rc` file
+          becomes a symlink to it.
+
+          If you already have a customized (or otherwise changed)
+          `~/.gitolite.rc` file, NixOS will refuse to replace
+          it with a symlink, and the `gitolite-init` initialization service
+          will fail. In this situation, in order to use this option, you
+          will need to take any customizations you may have in
+          `~/.gitolite.rc`, convert them to appropriate Perl
+          statements, add them to this option, and remove the file.
+
+          See also the `enableGitAnnex` option.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "gitolite";
+        description = lib.mdDoc ''
+          Gitolite user account. This is the username of the gitolite endpoint.
+        '';
+      };
+
+      description = mkOption {
+        type = types.str;
+        default = "Gitolite user";
+        description = lib.mdDoc ''
+          Gitolite user account's description.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "gitolite";
+        description = lib.mdDoc ''
+          Primary group of the Gitolite user account.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (
+  let
+    manageGitoliteRc = cfg.extraGitoliteRc != "";
+    rcDir = pkgs.runCommand "gitolite-rc" { preferLocalBuild = true; } rcDirScript;
+    rcDirScript =
+      ''
+        mkdir "$out"
+        export HOME=temp-home
+        mkdir -p "$HOME/.gitolite/logs" # gitolite can't run without it
+        '${pkgs.gitolite}'/bin/gitolite print-default-rc >>"$out/gitolite.rc.default"
+        cat <<END >>"$out/gitolite.rc"
+        # This file is managed by NixOS.
+        # Use services.gitolite options to control it.
+
+        END
+        cat "$out/gitolite.rc.default" >>"$out/gitolite.rc"
+      '' +
+      optionalString (cfg.extraGitoliteRc != "") ''
+        echo -n ${escapeShellArg ''
+
+          # Added by NixOS:
+          ${removeSuffix "\n" cfg.extraGitoliteRc}
+
+          # per perl rules, this should be the last line in such a file:
+          1;
+        ''} >>"$out/gitolite.rc"
+      '';
+  in {
+    services.gitolite.extraGitoliteRc = optionalString cfg.enableGitAnnex ''
+      # Enable git-annex support:
+      push( @{$RC{ENABLE}}, 'git-annex-shell ua');
+    '';
+
+    users.users.${cfg.user} = {
+      description     = cfg.description;
+      home            = cfg.dataDir;
+      uid             = config.ids.uids.gitolite;
+      group           = cfg.group;
+      useDefaultShell = true;
+    };
+    users.groups.${cfg.group}.gid = config.ids.gids.gitolite;
+
+    systemd.services.gitolite-init = {
+      description = "Gitolite initialization";
+      wantedBy    = [ "multi-user.target" ];
+      unitConfig.RequiresMountsFor = cfg.dataDir;
+
+      environment = {
+        GITOLITE_RC = ".gitolite.rc";
+        GITOLITE_RC_DEFAULT = "${rcDir}/gitolite.rc.default";
+      };
+
+      serviceConfig = mkMerge [
+        (mkIf (cfg.dataDir == "/var/lib/gitolite") {
+          StateDirectory = "gitolite gitolite/.gitolite gitolite/.gitolite/logs";
+          StateDirectoryMode = "0750";
+        })
+        {
+          Type = "oneshot";
+          User = cfg.user;
+          Group = cfg.group;
+          WorkingDirectory = "~";
+          RemainAfterExit = true;
+        }
+      ];
+
+      path = [ pkgs.gitolite pkgs.git pkgs.perl pkgs.bash pkgs.diffutils config.programs.ssh.package ];
+      script =
+      let
+        rcSetupScriptIfCustomFile =
+          if manageGitoliteRc then ''
+            cat <<END
+            <3>ERROR: NixOS can't apply declarative configuration
+            <3>to your .gitolite.rc file, because it seems to be
+            <3>already customized manually.
+            <3>See the services.gitolite.extraGitoliteRc option
+            <3>in "man configuration.nix" for more information.
+            END
+            # Not sure if the line below addresses the issue directly or just
+            # adds a delay, but without it our error message often doesn't
+            # show up in `systemctl status gitolite-init`.
+            journalctl --flush
+            exit 1
+          '' else ''
+            :
+          '';
+        rcSetupScriptIfDefaultFileOrStoreSymlink =
+          if manageGitoliteRc then ''
+            ln -sf "${rcDir}/gitolite.rc" "$GITOLITE_RC"
+          '' else ''
+            [[ -L "$GITOLITE_RC" ]] && rm -f "$GITOLITE_RC"
+          '';
+      in
+        ''
+          if ( [[ ! -e "$GITOLITE_RC" ]] && [[ ! -L "$GITOLITE_RC" ]] ) ||
+             ( [[ -f "$GITOLITE_RC" ]] && diff -q "$GITOLITE_RC" "$GITOLITE_RC_DEFAULT" >/dev/null ) ||
+             ( [[ -L "$GITOLITE_RC" ]] && [[ "$(readlink "$GITOLITE_RC")" =~ ^/nix/store/ ]] )
+          then
+        '' + rcSetupScriptIfDefaultFileOrStoreSymlink +
+        ''
+          else
+        '' + rcSetupScriptIfCustomFile +
+        ''
+          fi
+
+          if [ ! -d repositories ]; then
+            gitolite setup -pk ${pubkeyFile}
+          fi
+          if [ -n "${hooks}" ]; then
+            cp -f ${hooks} .gitolite/hooks/common/
+            chmod +x .gitolite/hooks/common/*
+          fi
+          gitolite setup # Upgrade if needed
+        '';
+    };
+
+    environment.systemPackages = [ pkgs.gitolite pkgs.git ]
+        ++ optional cfg.enableGitAnnex pkgs.git-annex;
+  });
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gitweb.nix b/nixpkgs/nixos/modules/services/misc/gitweb.nix
new file mode 100644
index 000000000000..aac0dac8a080
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gitweb.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitweb;
+
+in
+{
+
+  options.services.gitweb = {
+
+    projectroot = mkOption {
+      default = "/srv/git";
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to git projects (bare repositories) that should be served by
+        gitweb. Must not end with a slash.
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Verbatim configuration text appended to the generated gitweb.conf file.
+      '';
+      example = ''
+        $feature{'highlight'}{'default'} = [1];
+        $feature{'ctags'}{'default'} = [1];
+        $feature{'avatar'}{'default'} = ['gravatar'];
+      '';
+    };
+
+    gitwebTheme = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Use an alternative theme for gitweb, strongly inspired by GitHub.
+      '';
+    };
+
+    gitwebConfigFile = mkOption {
+      default = pkgs.writeText "gitweb.conf" ''
+        # path to git projects (<project>.git)
+        $projectroot = "${cfg.projectroot}";
+        $highlight_bin = "${pkgs.highlight}/bin/highlight";
+        ${cfg.extraConfig}
+      '';
+      defaultText = literalMD "generated config file";
+      type = types.path;
+      readOnly = true;
+      internal = true;
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gogs.nix b/nixpkgs/nixos/modules/services/misc/gogs.nix
new file mode 100644
index 000000000000..9bf7e4aab814
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gogs.nix
@@ -0,0 +1,274 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gogs;
+  opt = options.services.gogs;
+  configFile = pkgs.writeText "app.ini" ''
+    BRAND_NAME = ${cfg.appName}
+    RUN_USER = ${cfg.user}
+    RUN_MODE = prod
+
+    [database]
+    TYPE = ${cfg.database.type}
+    HOST = ${cfg.database.host}:${toString cfg.database.port}
+    NAME = ${cfg.database.name}
+    USER = ${cfg.database.user}
+    PASSWORD = #dbpass#
+    PATH = ${cfg.database.path}
+
+    [repository]
+    ROOT = ${cfg.repositoryRoot}
+
+    [server]
+    DOMAIN = ${cfg.domain}
+    HTTP_ADDR = ${cfg.httpAddress}
+    HTTP_PORT = ${toString cfg.httpPort}
+    EXTERNAL_URL = ${cfg.rootUrl}
+
+    [session]
+    COOKIE_NAME = session
+    COOKIE_SECURE = ${boolToString cfg.cookieSecure}
+
+    [security]
+    SECRET_KEY = #secretkey#
+    INSTALL_LOCK = true
+
+    [log]
+    ROOT_PATH = ${cfg.stateDir}/log
+
+    ${cfg.extraConfig}
+  '';
+in
+
+{
+  options = {
+    services.gogs = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Enable Go Git Service.";
+      };
+
+      useWizard = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Do not generate a configuration and use Gogs' installation wizard instead. The first registered user will be administrator.";
+      };
+
+      stateDir = mkOption {
+        default = "/var/lib/gogs";
+        type = types.str;
+        description = lib.mdDoc "Gogs data directory.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "gogs";
+        description = lib.mdDoc "User account under which Gogs runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "gogs";
+        description = lib.mdDoc "Group account under which Gogs runs.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "sqlite3" "mysql" "postgres" ];
+          example = "mysql";
+          default = "sqlite3";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 3306;
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "gogs";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "gogs";
+          description = lib.mdDoc "Database user.";
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            The password corresponding to {option}`database.user`.
+            Warning: this is stored in cleartext in the Nix store!
+            Use {option}`database.passwordFile` instead.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/gogs-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        path = mkOption {
+          type = types.str;
+          default = "${cfg.stateDir}/data/gogs.db";
+          defaultText = literalExpression ''"''${config.${opt.stateDir}}/data/gogs.db"'';
+          description = lib.mdDoc "Path to the sqlite3 database file.";
+        };
+      };
+
+      appName = mkOption {
+        type = types.str;
+        default = "Gogs: Go Git Service";
+        description = lib.mdDoc "Application name.";
+      };
+
+      repositoryRoot = mkOption {
+        type = types.str;
+        default = "${cfg.stateDir}/repositories";
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/repositories"'';
+        description = lib.mdDoc "Path to the git repositories.";
+      };
+
+      domain = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Domain name of your server.";
+      };
+
+      rootUrl = mkOption {
+        type = types.str;
+        default = "http://localhost:3000/";
+        description = lib.mdDoc "Full public URL of Gogs server.";
+      };
+
+      httpAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "HTTP listen address.";
+      };
+
+      httpPort = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc "HTTP listen port.";
+      };
+
+      cookieSecure = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Marks session cookies as "secure" as a hint for browsers to only send
+          them via HTTPS. This option is recommend, if Gogs is being served over HTTPS.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Configuration lines appended to the generated Gogs configuration file.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.gogs = {
+      description = "Gogs (Go Git Service)";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.gogs ];
+
+      preStart = let
+        runConfig = "${cfg.stateDir}/custom/conf/app.ini";
+        secretKey = "${cfg.stateDir}/custom/conf/secret_key";
+      in ''
+        mkdir -p ${cfg.stateDir}
+
+        # copy custom configuration and generate a random secret key if needed
+        ${optionalString (cfg.useWizard == false) ''
+          mkdir -p ${cfg.stateDir}/custom/conf
+          cp -f ${configFile} ${runConfig}
+
+          if [ ! -e ${secretKey} ]; then
+              head -c 16 /dev/urandom | base64 > ${secretKey}
+          fi
+
+          KEY=$(head -n1 ${secretKey})
+          DBPASS=$(head -n1 ${cfg.database.passwordFile})
+          sed -e "s,#secretkey#,$KEY,g" \
+              -e "s,#dbpass#,$DBPASS,g" \
+              -i ${runConfig}
+          chmod 440 ${runConfig} ${secretKey}
+        ''}
+
+        mkdir -p ${cfg.repositoryRoot}
+        # update all hooks' binary paths
+        HOOKS=$(find ${cfg.repositoryRoot} -mindepth 4 -maxdepth 4 -type f -wholename "*git/hooks/*")
+        if [ "$HOOKS" ]
+        then
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/gogs,${pkgs.gogs}/bin/gogs,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/env,${pkgs.coreutils}/bin/env,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/bash,${pkgs.bash}/bin/bash,g' $HOOKS
+          sed -ri 's,/nix/store/[a-z0-9.-]+/bin/perl,${pkgs.perl}/bin/perl,g' $HOOKS
+        fi
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ExecStart = "${pkgs.gogs}/bin/gogs web";
+        Restart = "always";
+      };
+
+      environment = {
+        USER = cfg.user;
+        HOME = cfg.stateDir;
+        GOGS_WORK_DIR = cfg.stateDir;
+      };
+    };
+
+    users = mkIf (cfg.user == "gogs") {
+      users.gogs = {
+        description = "Go Git Service";
+        uid = config.ids.uids.gogs;
+        group = "gogs";
+        home = cfg.stateDir;
+        createHome = true;
+        shell = pkgs.bash;
+      };
+      groups.gogs.gid = config.ids.gids.gogs;
+    };
+
+    warnings = optional (cfg.database.password != "")
+      ''config.services.gogs.database.password will be stored as plaintext
+        in the Nix store. Use database.passwordFile instead.'';
+
+    # Create database passwordFile default when password is configured.
+    services.gogs.database.passwordFile =
+      (mkDefault (toString (pkgs.writeTextFile {
+        name = "gogs-database-password";
+        text = cfg.database.password;
+      })));
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gollum.nix b/nixpkgs/nixos/modules/services/misc/gollum.nix
new file mode 100644
index 000000000000..b73528abaf65
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gollum.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gollum;
+in
+
+{
+  options.services.gollum = {
+    enable = mkEnableOption (lib.mdDoc "Gollum service");
+
+    address = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc "IP address on which the web server will listen.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 4567;
+      description = lib.mdDoc "Port on which the web server will run.";
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Content of the configuration file";
+    };
+
+    mathjax = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable support for math rendering using MathJax";
+    };
+
+    allowUploads = mkOption {
+      type = types.nullOr (types.enum [ "dir" "page" ]);
+      default = null;
+      description = lib.mdDoc "Enable uploads of external files";
+    };
+
+    user-icons = mkOption {
+      type = types.nullOr (types.enum [ "gravatar" "identicon" ]);
+      default = null;
+      description = lib.mdDoc "Enable specific user icons for history view";
+    };
+
+    emoji = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Parse and interpret emoji tags";
+    };
+
+    h1-title = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use the first h1 as page title";
+    };
+
+    no-edit = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Disable editing pages";
+    };
+
+    local-time = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use the browser's local timezone instead of the server's for displaying dates.";
+    };
+
+    branch = mkOption {
+      type = types.str;
+      default = "master";
+      example = "develop";
+      description = lib.mdDoc "Git branch to serve";
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/lib/gollum";
+      description = lib.mdDoc "Specifies the path of the repository directory. If it does not exist, Gollum will create it on startup.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.gollum;
+      defaultText = literalExpression "pkgs.gollum";
+      description = lib.mdDoc ''
+        The package used in the service
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "gollum";
+      description = lib.mdDoc "Specifies the owner of the wiki directory";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "gollum";
+      description = lib.mdDoc "Specifies the owner group of the wiki directory";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.gollum = mkIf (cfg.user == "gollum") {
+      group = cfg.group;
+      description = "Gollum user";
+      createHome = false;
+      isSystemUser = true;
+    };
+
+    users.groups."${cfg.group}" = { };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' - ${config.users.users.gollum.name} ${config.users.groups.gollum.name} - -"
+    ];
+
+    systemd.services.gollum = {
+      description = "Gollum wiki";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.git ];
+
+      preStart = ''
+        # This is safe to be run on an existing repo
+        git init ${cfg.stateDir}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ExecStart = ''
+          ${cfg.package}/bin/gollum \
+            --port ${toString cfg.port} \
+            --host ${cfg.address} \
+            --config ${pkgs.writeText "gollum-config.rb" cfg.extraConfig} \
+            --ref ${cfg.branch} \
+            ${optionalString cfg.mathjax "--mathjax"} \
+            ${optionalString cfg.emoji "--emoji"} \
+            ${optionalString cfg.h1-title "--h1-title"} \
+            ${optionalString cfg.no-edit "--no-edit"} \
+            ${optionalString cfg.local-time "--local-time"} \
+            ${optionalString (cfg.allowUploads != null) "--allow-uploads ${cfg.allowUploads}"} \
+            ${optionalString (cfg.user-icons != null) "--user-icons ${cfg.user-icons}"} \
+            ${cfg.stateDir}
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ erictapen bbenno ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/gpsd.nix b/nixpkgs/nixos/modules/services/misc/gpsd.nix
new file mode 100644
index 000000000000..5d2e806181df
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/gpsd.nix
@@ -0,0 +1,145 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  uid = config.ids.uids.gpsd;
+  gid = config.ids.gids.gpsd;
+  cfg = config.services.gpsd;
+
+in {
+
+  ###### interface
+
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "gpsd" "device" ]
+      "Use `services.gpsd.devices` instead.")
+  ];
+
+  options = {
+
+    services.gpsd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable `gpsd`, a GPS service daemon.
+        '';
+      };
+
+      devices = mkOption {
+        type = types.listOf types.str;
+        default = [ "/dev/ttyUSB0" ];
+        description = lib.mdDoc ''
+          List of devices that `gpsd` should subscribe to.
+
+          A device may be a local serial device for GPS input, or a
+          URL of the form:
+          `[{dgpsip|ntrip}://][user:passwd@]host[:port][/stream]` in
+          which case it specifies an input source for DGPS or ntrip
+          data.
+        '';
+      };
+
+      readonly = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the broken-device-safety, otherwise
+          known as read-only mode.  Some popular bluetooth and USB
+          receivers lock up or become totally inaccessible when
+          probed or reconfigured.  This switch prevents gpsd from
+          writing to a receiver.  This means that gpsd cannot
+          configure the receiver for optimal performance, but it
+          also means that gpsd cannot break the receiver.  A better
+          solution would be for Bluetooth to not be so fragile.  A
+          platform independent method to identify
+          serial-over-Bluetooth devices would also be nice.
+        '';
+      };
+
+      nowait = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          don't wait for client connects to poll GPS
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 2947;
+        description = lib.mdDoc ''
+          The port where to listen for TCP connections.
+        '';
+      };
+
+      debugLevel = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          The debugging level.
+        '';
+      };
+
+      listenany = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Listen on all addresses rather than just loopback.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "-r" "-s" "19200" ];
+        description = lib.mdDoc ''
+          A list of extra command line arguments to pass to gpsd.
+          Check gpsd(8) mangpage for possible arguments.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.gpsd = {
+      inherit uid;
+      group = "gpsd";
+      description = "gpsd daemon user";
+      home = "/var/empty";
+    };
+
+    users.groups.gpsd = { inherit gid; };
+
+    systemd.services.gpsd = {
+      description = "GPSD daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = let
+          devices = utils.escapeSystemdExecArgs cfg.devices;
+          extraArgs = utils.escapeSystemdExecArgs cfg.extraArgs;
+        in ''
+          ${pkgs.gpsd}/sbin/gpsd -D "${toString cfg.debugLevel}"  \
+            -S "${toString cfg.port}"                             \
+            ${optionalString cfg.readonly "-b"}                   \
+            ${optionalString cfg.nowait "-n"}                     \
+            ${optionalString cfg.listenany "-G"}                  \
+            ${extraArgs}                                          \
+            ${devices}
+        '';
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/greenclip.nix b/nixpkgs/nixos/modules/services/misc/greenclip.nix
new file mode 100644
index 000000000000..45847af71141
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/greenclip.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.greenclip;
+in {
+
+  options.services.greenclip = {
+    enable = mkEnableOption (lib.mdDoc "Greenclip daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.haskellPackages.greenclip;
+      defaultText = literalExpression "pkgs.haskellPackages.greenclip";
+      description = lib.mdDoc "greenclip derivation to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.greenclip = {
+      enable      = true;
+      description = "greenclip daemon";
+      wantedBy = [ "graphical-session.target" ];
+      after    = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/greenclip daemon";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/headphones.nix b/nixpkgs/nixos/modules/services/misc/headphones.nix
new file mode 100644
index 000000000000..472b330fff15
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/headphones.nix
@@ -0,0 +1,89 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "headphones";
+
+  cfg = config.services.headphones;
+  opt = options.services.headphones;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.headphones = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the headphones server.";
+      };
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        description = lib.mdDoc "Path where to store data files.";
+      };
+      configFile = mkOption {
+        type = types.path;
+        default = "${cfg.dataDir}/config.ini";
+        defaultText = literalExpression ''"''${config.${opt.dataDir}}/config.ini"'';
+        description = lib.mdDoc "Path to config file.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Host to listen on.";
+      };
+      port = mkOption {
+        type = types.ints.u16;
+        default = 8181;
+        description = lib.mdDoc "Port to bind to.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "User to run the service as";
+      };
+      group = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "Group to run the service as";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        uid = config.ids.uids.headphones;
+        group = cfg.group;
+        description = "headphones user";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = config.ids.gids.headphones;
+    };
+
+    systemd.services.headphones = {
+        description = "Headphones Server";
+        wantedBy    = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${pkgs.headphones}/bin/headphones --datadir ${cfg.dataDir} --config ${cfg.configFile} --host ${cfg.host} --port ${toString cfg.port}";
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/heisenbridge.nix b/nixpkgs/nixos/modules/services/misc/heisenbridge.nix
new file mode 100644
index 000000000000..822a09d7cd4d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/heisenbridge.nix
@@ -0,0 +1,221 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.heisenbridge;
+
+  pkg = config.services.heisenbridge.package;
+  bin = "${pkg}/bin/heisenbridge";
+
+  jsonType = (pkgs.formats.json { }).type;
+
+  registrationFile = "/var/lib/heisenbridge/registration.yml";
+  # JSON is a proper subset of YAML
+  bridgeConfig = builtins.toFile "heisenbridge-registration.yml" (builtins.toJSON {
+    id = "heisenbridge";
+    url = cfg.registrationUrl;
+    # Don't specify as_token and hs_token
+    rate_limited = false;
+    sender_localpart = "heisenbridge";
+    namespaces = cfg.namespaces;
+  });
+in
+{
+  options.services.heisenbridge = {
+    enable = mkEnableOption (lib.mdDoc "the Matrix to IRC bridge");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.heisenbridge;
+      defaultText = lib.literalExpression "pkgs.heisenbridge";
+      description = lib.mdDoc ''
+        Package of the application to run, exposed for overriding purposes.
+      '';
+    };
+
+    homeserver = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The URL to the home server for client-server API calls";
+      example = "http://localhost:8008";
+    };
+
+    registrationUrl = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The URL where the application service is listening for HS requests, from the Matrix HS perspective.#
+        The default value assumes the bridge runs on the same host as the home server, in the same network.
+      '';
+      example = "https://matrix.example.org";
+      default = "http://${cfg.address}:${toString cfg.port}";
+      defaultText = "http://$${cfg.address}:$${toString cfg.port}";
+    };
+
+    address = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Address to listen on. IPv6 does not seem to be supported.";
+      default = "127.0.0.1";
+      example = "0.0.0.0";
+    };
+
+    port = mkOption {
+      type = types.port;
+      description = lib.mdDoc "The port to listen on";
+      default = 9898;
+    };
+
+    debug = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "More verbose logging. Recommended during initial setup.";
+      default = false;
+    };
+
+    owner = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        Set owner MXID otherwise first talking local user will claim the bridge
+      '';
+      default = null;
+      example = "@admin:example.org";
+    };
+
+    namespaces = mkOption {
+      description = lib.mdDoc "Configure the 'namespaces' section of the registration.yml for the bridge and the server";
+      # TODO link to Matrix documentation of the format
+      type = types.submodule {
+        freeformType = jsonType;
+      };
+
+      default = {
+        users = [
+          {
+            regex = "@irc_.*";
+            exclusive = true;
+          }
+        ];
+        aliases = [ ];
+        rooms = [ ];
+      };
+    };
+
+    identd.enable = mkEnableOption (lib.mdDoc "identd service support");
+    identd.port = mkOption {
+      type = types.port;
+      description = lib.mdDoc "identd listen port";
+      default = 113;
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc "Heisenbridge is configured over the command line. Append extra arguments here";
+      default = [ ];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.heisenbridge = {
+      description = "Matrix<->IRC bridge";
+      before = [ "matrix-synapse.service" ]; # So the registration file can be used by Synapse
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        umask 077
+        set -e -u -o pipefail
+
+        if ! [ -f "${registrationFile}" ]; then
+          # Generate registration file if not present (actually, we only care about the tokens in it)
+          ${bin} --generate --config ${registrationFile}
+        fi
+
+        # Overwrite the registration file with our generated one (the config may have changed since then),
+        # but keep the tokens. Two step procedure to be failure safe
+        ${pkgs.yq}/bin/yq --slurp \
+          '.[0] + (.[1] | {as_token, hs_token})' \
+          ${bridgeConfig} \
+          ${registrationFile} \
+          > ${registrationFile}.new
+        mv -f ${registrationFile}.new ${registrationFile}
+
+        # Grant Synapse access to the registration
+        if ${pkgs.getent}/bin/getent group matrix-synapse > /dev/null; then
+          chgrp -v matrix-synapse ${registrationFile}
+          chmod -v g+r ${registrationFile}
+        fi
+      '';
+
+      serviceConfig = rec {
+        Type = "simple";
+        ExecStart = lib.concatStringsSep " " (
+          [
+            bin
+            (if cfg.debug then "-vvv" else "-v")
+            "--config"
+            registrationFile
+            "--listen-address"
+            (lib.escapeShellArg cfg.address)
+            "--listen-port"
+            (toString cfg.port)
+          ]
+          ++ (lib.optionals (cfg.owner != null) [
+            "--owner"
+            (lib.escapeShellArg cfg.owner)
+          ])
+          ++ (lib.optionals cfg.identd.enable [
+            "--identd"
+            "--identd-port"
+            (toString cfg.identd.port)
+          ])
+          ++ [
+            (lib.escapeShellArg cfg.homeserver)
+          ]
+          ++ (map (lib.escapeShellArg) cfg.extraArgs)
+        );
+
+        # Hardening options
+
+        User = "heisenbridge";
+        Group = "heisenbridge";
+        RuntimeDirectory = "heisenbridge";
+        RuntimeDirectoryMode = "0700";
+        StateDirectory = "heisenbridge";
+        StateDirectoryMode = "0755";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        RestrictNamespaces = true;
+        RemoveIPC = true;
+        UMask = "0077";
+
+        CapabilityBoundingSet = [ "CAP_CHOWN" ] ++ optional (cfg.port < 1024 || (cfg.identd.enable && cfg.identd.port < 1024)) "CAP_NET_BIND_SERVICE";
+        AmbientCapabilities = CapabilityBoundingSet;
+        NoNewPrivileges = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        SystemCallFilter = ["@system-service" "~@privileged" "@chown"];
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "AF_INET AF_INET6";
+      };
+    };
+
+    users.groups.heisenbridge = {};
+    users.users.heisenbridge = {
+      description = "Service user for the Heisenbridge";
+      group = "heisenbridge";
+      isSystemUser = true;
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.piegames ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/homepage-dashboard.nix b/nixpkgs/nixos/modules/services/misc/homepage-dashboard.nix
new file mode 100644
index 000000000000..e68571253433
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/homepage-dashboard.nix
@@ -0,0 +1,55 @@
+{ config
+, pkgs
+, lib
+, ...
+}:
+
+let
+  cfg = config.services.homepage-dashboard;
+in
+{
+  options = {
+    services.homepage-dashboard = {
+      enable = lib.mkEnableOption (lib.mdDoc "Homepage Dashboard");
+
+      package = lib.mkPackageOptionMD pkgs "homepage-dashboard" { };
+
+      openFirewall = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for Homepage.";
+      };
+
+      listenPort = lib.mkOption {
+        type = lib.types.int;
+        default = 8082;
+        description = lib.mdDoc "Port for Homepage to bind to.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.homepage-dashboard = {
+      description = "Homepage Dashboard";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        HOMEPAGE_CONFIG_DIR = "/var/lib/homepage-dashboard";
+        PORT = "${toString cfg.listenPort}";
+      };
+
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        StateDirectory = "homepage-dashboard";
+        ExecStart = "${lib.getExe cfg.package}";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listenPort ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ihaskell.nix b/nixpkgs/nixos/modules/services/misc/ihaskell.nix
new file mode 100644
index 000000000000..4782053c4fb8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ihaskell.nix
@@ -0,0 +1,65 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ihaskell;
+  ihaskell = pkgs.ihaskell.override {
+    packages = cfg.extraPackages;
+  };
+
+in
+
+{
+  options = {
+    services.ihaskell = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Autostart an IHaskell notebook service.";
+      };
+
+      extraPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = haskellPackages: [];
+        defaultText = literalExpression "haskellPackages: []";
+        example = literalExpression ''
+          haskellPackages: [
+            haskellPackages.wreq
+            haskellPackages.lens
+          ]
+        '';
+        description = lib.mdDoc ''
+          Extra packages available to ghc when running ihaskell. The
+          value must be a function which receives the attrset defined
+          in {var}`haskellPackages` as the sole argument.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.ihaskell = {
+      group = config.users.groups.ihaskell.name;
+      description = "IHaskell user";
+      home = "/var/lib/ihaskell";
+      createHome = true;
+      uid = config.ids.uids.ihaskell;
+    };
+
+    users.groups.ihaskell.gid = config.ids.gids.ihaskell;
+
+    systemd.services.ihaskell = {
+      description = "IHaskell notebook instance";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        User = config.users.users.ihaskell.name;
+        Group = config.users.groups.ihaskell.name;
+        ExecStart = "${pkgs.runtimeShell} -c \"cd $HOME;${ihaskell}/bin/ihaskell-notebook\"";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/input-remapper.nix b/nixpkgs/nixos/modules/services/misc/input-remapper.nix
new file mode 100644
index 000000000000..3f6d97f85738
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/input-remapper.nix
@@ -0,0 +1,30 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let cfg = config.services.input-remapper; in
+{
+  options = {
+    services.input-remapper = {
+      enable = mkEnableOption (lib.mdDoc "input-remapper, an easy to use tool to change the mapping of your input device buttons");
+      package = mkPackageOptionMD pkgs "input-remapper" { };
+      enableUdevRules = mkEnableOption (lib.mdDoc "udev rules added by input-remapper to handle hotplugged devices. Currently disabled by default due to https://github.com/sezanzeb/input-remapper/issues/140");
+      serviceWantedBy = mkOption {
+        default = [ "graphical.target" ];
+        example = [ "multi-user.target" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc "Specifies the WantedBy setting for the input-remapper service.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.udev.packages = mkIf cfg.enableUdevRules [ cfg.package ];
+    services.dbus.packages = [ cfg.package ];
+    systemd.packages = [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
+    systemd.services.input-remapper.wantedBy = cfg.serviceWantedBy;
+  };
+
+  meta.maintainers = with lib.maintainers; [ LunNova ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/irkerd.nix b/nixpkgs/nixos/modules/services/misc/irkerd.nix
new file mode 100644
index 000000000000..d080cc0a7358
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/irkerd.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.irkerd;
+  ports = [ 6659 ];
+in
+{
+  options.services.irkerd = {
+    enable = mkOption {
+      description = lib.mdDoc "Whether to enable irker, an IRC notification daemon.";
+      default = false;
+      type = types.bool;
+    };
+
+    openPorts = mkOption {
+      description = lib.mdDoc "Open ports in the firewall for irkerd";
+      default = false;
+      type = types.bool;
+    };
+
+    listenAddress = mkOption {
+      default = "localhost";
+      example = "0.0.0.0";
+      type = types.str;
+      description = lib.mdDoc ''
+        Specifies the bind address on which the irker daemon listens.
+        The default is localhost.
+
+        Irker authors strongly warn about the risks of running this on
+        a publicly accessible interface, so change this with caution.
+      '';
+    };
+
+    nick = mkOption {
+      default = "irker";
+      type = types.str;
+      description = lib.mdDoc "Nick to use for irker";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.irkerd = {
+      description = "Internet Relay Chat (IRC) notification daemon";
+      documentation = [ "man:irkerd(8)" "man:irkerhook(1)" "man:irk(1)" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.irker}/bin/irkerd -H ${cfg.listenAddress} -n ${cfg.nick}";
+        User = "irkerd";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.irker ];
+
+    users.users.irkerd = {
+      description = "Irker daemon user";
+      isSystemUser = true;
+      group = "irkerd";
+    };
+    users.groups.irkerd = {};
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openPorts ports;
+    networking.firewall.allowedUDPPorts = mkIf cfg.openPorts ports;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/jackett.nix b/nixpkgs/nixos/modules/services/misc/jackett.nix
new file mode 100644
index 000000000000..b0edf0d18da4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/jackett.nix
@@ -0,0 +1,82 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jackett;
+
+in
+{
+  options = {
+    services.jackett = {
+      enable = mkEnableOption (lib.mdDoc "Jackett");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/jackett/.config/Jackett";
+        description = lib.mdDoc "The directory where Jackett stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the Jackett web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = lib.mdDoc "User account under which Jackett runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "jackett";
+        description = lib.mdDoc "Group under which Jackett runs.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.jackett;
+        defaultText = literalExpression "pkgs.jackett";
+        description = lib.mdDoc "Jackett package to use.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.jackett = {
+      description = "Jackett";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/Jackett --NoUpdates --DataFolder '${cfg.dataDir}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 9117 ];
+    };
+
+    users.users = mkIf (cfg.user == "jackett") {
+      jackett = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.jackett;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "jackett") {
+      jackett.gid = config.ids.gids.jackett;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/jellyfin.nix b/nixpkgs/nixos/modules/services/misc/jellyfin.nix
new file mode 100644
index 000000000000..43fdc09f4559
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/jellyfin.nix
@@ -0,0 +1,131 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jellyfin;
+in
+{
+  options = {
+    services.jellyfin = {
+      enable = mkEnableOption (lib.mdDoc "Jellyfin Media Server");
+
+      user = mkOption {
+        type = types.str;
+        default = "jellyfin";
+        description = lib.mdDoc "User account under which Jellyfin runs.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.jellyfin;
+        defaultText = literalExpression "pkgs.jellyfin";
+        description = lib.mdDoc ''
+          Jellyfin package to use.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "jellyfin";
+        description = lib.mdDoc "Group under which jellyfin runs.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the default ports in the firewall for the media server. The
+          HTTP/HTTPS ports can be changed in the Web UI, so this option should
+          only be used if they are unchanged.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.jellyfin = {
+      description = "Jellyfin Media Server";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      # This is mostly follows: https://github.com/jellyfin/jellyfin/blob/master/fedora/jellyfin.service
+      # Upstream also disable some hardenings when running in LXC, we do the same with the isContainer option
+      serviceConfig = rec {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = "jellyfin";
+        StateDirectoryMode = "0700";
+        CacheDirectory = "jellyfin";
+        CacheDirectoryMode = "0700";
+        UMask = "0077";
+        WorkingDirectory = "/var/lib/jellyfin";
+        ExecStart = "${cfg.package}/bin/jellyfin --datadir '/var/lib/${StateDirectory}' --cachedir '/var/cache/${CacheDirectory}'";
+        Restart = "on-failure";
+        TimeoutSec = 15;
+        SuccessExitStatus = ["0" "143"];
+
+        # Security options:
+        NoNewPrivileges = true;
+        SystemCallArchitectures = "native";
+        # AF_NETLINK needed because Jellyfin monitors the network connection
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictNamespaces = !config.boot.isContainer;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        ProtectControlGroups = !config.boot.isContainer;
+        ProtectHostname = true;
+        ProtectKernelLogs = !config.boot.isContainer;
+        ProtectKernelModules = !config.boot.isContainer;
+        ProtectKernelTunables = !config.boot.isContainer;
+        LockPersonality = true;
+        PrivateTmp = !config.boot.isContainer;
+        # needed for hardware acceleration
+        PrivateDevices = false;
+        PrivateUsers = true;
+        RemoveIPC = true;
+
+        SystemCallFilter = [
+          "~@clock"
+          "~@aio"
+          "~@chown"
+          "~@cpu-emulation"
+          "~@debug"
+          "~@keyring"
+          "~@memlock"
+          "~@module"
+          "~@mount"
+          "~@obsolete"
+          "~@privileged"
+          "~@raw-io"
+          "~@reboot"
+          "~@setuid"
+          "~@swap"
+        ];
+        SystemCallErrorNumber = "EPERM";
+      };
+    };
+
+    users.users = mkIf (cfg.user == "jellyfin") {
+      jellyfin = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "jellyfin") {
+      jellyfin = {};
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      # from https://jellyfin.org/docs/general/networking/index.html
+      allowedTCPPorts = [ 8096 8920 ];
+      allowedUDPPorts = [ 1900 7359 ];
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ minijackson ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/jellyseerr.nix b/nixpkgs/nixos/modules/services/misc/jellyseerr.nix
new file mode 100644
index 000000000000..31e0c5beb673
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/jellyseerr.nix
@@ -0,0 +1,62 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.jellyseerr;
+in
+{
+  meta.maintainers = [ maintainers.camillemndn ];
+
+  options.services.jellyseerr = {
+    enable = mkEnableOption (mdDoc ''Jellyseerr, a requests manager for Jellyfin'');
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''Open port in the firewall for the Jellyseerr web interface.'';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5055;
+      description = mdDoc ''The port which the Jellyseerr web UI should listen to.'';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.jellyseerr = {
+      description = "Jellyseerr, a requests manager for Jellyfin";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.PORT = toString cfg.port;
+      serviceConfig = {
+        Type = "exec";
+        StateDirectory = "jellyseerr";
+        WorkingDirectory = "${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr";
+        DynamicUser = true;
+        ExecStart = "${pkgs.jellyseerr}/bin/jellyseerr";
+        BindPaths = [ "/var/lib/jellyseerr/:${pkgs.jellyseerr}/libexec/jellyseerr/deps/jellyseerr/config/" ];
+        Restart = "on-failure";
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        NoNewPrivileges = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/klipper.nix b/nixpkgs/nixos/modules/services/misc/klipper.nix
new file mode 100644
index 000000000000..9eb2fdb46593
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/klipper.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.klipper;
+  format = pkgs.formats.ini {
+    # https://github.com/NixOS/nixpkgs/pull/121613#issuecomment-885241996
+    listToValue = l:
+      if builtins.length l == 1 then generators.mkValueStringDefault { } (head l)
+      else lib.concatMapStrings (s: "\n  ${generators.mkValueStringDefault {} s}") l;
+    mkKeyValue = generators.mkKeyValueDefault { } ":";
+  };
+in
+{
+  ##### interface
+  options = {
+    services.klipper = {
+      enable = mkEnableOption (lib.mdDoc "Klipper, the 3D printer firmware");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.klipper;
+        defaultText = literalExpression "pkgs.klipper";
+        description = lib.mdDoc "The Klipper package.";
+      };
+
+      logFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/klipper/klipper.log";
+        description = lib.mdDoc ''
+          Path of the file Klipper should log to.
+          If `null`, it logs to stdout, which is not recommended by upstream.
+        '';
+      };
+
+      inputTTY = mkOption {
+        type = types.path;
+        default = "/run/klipper/tty";
+        description = lib.mdDoc "Path of the virtual printer symlink to create.";
+      };
+
+      apiSocket = mkOption {
+        type = types.nullOr types.path;
+        default = "/run/klipper/api";
+        description = lib.mdDoc "Path of the API socket to create.";
+      };
+
+      mutableConfig = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Whether to copy the config to a mutable directory instead of using the one directly from the nix store.
+          This will only copy the config if the file at `services.klipper.mutableConfigPath` doesn't exist.
+        '';
+      };
+
+      mutableConfigFolder = mkOption {
+        type = types.path;
+        default = "/var/lib/klipper";
+        description = lib.mdDoc "Path to mutable Klipper config file.";
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to default Klipper config.
+        '';
+      };
+
+      octoprintIntegration = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Allows Octoprint to control Klipper.";
+      };
+
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          User account under which Klipper runs.
+
+          If null is specified (default), a temporary user will be created by systemd.
+        '';
+      };
+
+      group = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Group account under which Klipper runs.
+
+          If null is specified (default), a temporary user will be created by systemd.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.nullOr format.type;
+        default = null;
+        description = lib.mdDoc ''
+          Configuration for Klipper. See the [documentation](https://www.klipper3d.org/Overview.html#configuration-and-tuning-guides)
+          for supported values.
+        '';
+      };
+
+      firmwares = mkOption {
+        description = lib.mdDoc "Firmwares klipper should manage";
+        default = { };
+        type = with types; attrsOf
+          (submodule {
+            options = {
+              enable = mkEnableOption (lib.mdDoc ''
+                building of firmware for manual flashing
+              '');
+              enableKlipperFlash = mkEnableOption (lib.mdDoc ''
+                flashings scripts for firmware. This will add `klipper-flash-$mcu` scripts to your environment which can be called to flash the firmware.
+                Please check the configs at [klipper](https://github.com/Klipper3d/klipper/tree/master/config) whether your board supports flashing via `make flash`
+              '');
+              serial = mkOption {
+                type = types.nullOr path;
+                description = lib.mdDoc "Path to serial port this printer is connected to. Leave `null` to derive it from `service.klipper.settings`.";
+              };
+              configFile = mkOption {
+                type = path;
+                description = lib.mdDoc "Path to firmware config which is generated using `klipper-genconf`";
+              };
+            };
+          });
+      };
+    };
+  };
+
+  ##### implementation
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.octoprintIntegration -> config.services.octoprint.enable;
+        message = "Option services.klipper.octoprintIntegration requires Octoprint to be enabled on this system. Please enable services.octoprint to use it.";
+      }
+      {
+        assertion = cfg.user != null -> cfg.group != null;
+        message = "Option services.klipper.group is not set when services.klipper.user is specified.";
+      }
+      {
+        assertion = cfg.settings != null -> foldl (a: b: a && b) true (mapAttrsToList (mcu: _: mcu != null -> (hasAttrByPath [ "${mcu}" "serial" ] cfg.settings)) cfg.firmwares);
+        message = "Option services.klipper.settings.$mcu.serial must be set when settings.klipper.firmware.$mcu is specified";
+      }
+      {
+        assertion = (cfg.configFile != null) != (cfg.settings != null);
+        message = "You need to either specify services.klipper.settings or services.klipper.configFile.";
+      }
+    ];
+
+    environment.etc = mkIf (!cfg.mutableConfig) {
+      "klipper.cfg".source = if cfg.settings != null then format.generate "klipper.cfg" cfg.settings else cfg.configFile;
+    };
+
+    services.klipper = mkIf cfg.octoprintIntegration {
+      user = config.services.octoprint.user;
+      group = config.services.octoprint.group;
+    };
+
+    systemd.services.klipper =
+      let
+        klippyArgs = "--input-tty=${cfg.inputTTY}"
+          + optionalString (cfg.apiSocket != null) " --api-server=${cfg.apiSocket}"
+          + optionalString (cfg.logFile != null) " --logfile=${cfg.logFile}"
+        ;
+        printerConfigPath =
+          if cfg.mutableConfig
+          then cfg.mutableConfigFolder + "/printer.cfg"
+          else "/etc/klipper.cfg";
+        printerConfigFile =
+          if cfg.settings != null
+          then format.generate "klipper.cfg" cfg.settings
+          else cfg.configFile;
+      in
+      {
+        description = "Klipper 3D Printer Firmware";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        preStart = ''
+          mkdir -p ${cfg.mutableConfigFolder}
+          ${lib.optionalString (cfg.mutableConfig) ''
+            [ -e ${printerConfigPath} ] || {
+              cp ${printerConfigFile} ${printerConfigPath}
+              chmod +w ${printerConfigPath}
+            }
+          ''}
+          mkdir -p ${cfg.mutableConfigFolder}/gcodes
+        '';
+
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/klippy ${klippyArgs} ${printerConfigPath}";
+          RuntimeDirectory = "klipper";
+          StateDirectory = "klipper";
+          SupplementaryGroups = [ "dialout" ];
+          WorkingDirectory = "${cfg.package}/lib";
+          OOMScoreAdjust = "-999";
+          CPUSchedulingPolicy = "rr";
+          CPUSchedulingPriority = 99;
+          IOSchedulingClass = "realtime";
+          IOSchedulingPriority = 0;
+          UMask = "0002";
+        } // (if cfg.user != null then {
+          Group = cfg.group;
+          User = cfg.user;
+        } else {
+          DynamicUser = true;
+          User = "klipper";
+        });
+      };
+
+    environment.systemPackages =
+      with pkgs;
+      let
+        default = a: b: if a != null then a else b;
+        firmwares = filterAttrs (n: v: v != null) (mapAttrs
+          (mcu: { enable, enableKlipperFlash, configFile, serial }:
+            if enable then
+              pkgs.klipper-firmware.override
+                {
+                  mcu = lib.strings.sanitizeDerivationName mcu;
+                  firmwareConfig = configFile;
+                } else null)
+          cfg.firmwares);
+        firmwareFlasher = mapAttrsToList
+          (mcu: firmware: pkgs.klipper-flash.override {
+            mcu = lib.strings.sanitizeDerivationName mcu;
+            klipper-firmware = firmware;
+            flashDevice = default cfg.firmwares."${mcu}".serial cfg.settings."${mcu}".serial;
+            firmwareConfig = cfg.firmwares."${mcu}".configFile;
+          })
+          (filterAttrs (mcu: firmware: cfg.firmwares."${mcu}".enableKlipperFlash) firmwares);
+      in
+      [ klipper-genconf ] ++ firmwareFlasher ++ attrValues firmwares;
+  };
+  meta.maintainers = [
+    maintainers.cab404
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/languagetool.nix b/nixpkgs/nixos/modules/services/misc/languagetool.nix
new file mode 100644
index 000000000000..9adf792373b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/languagetool.nix
@@ -0,0 +1,78 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.languagetool;
+  settingsFormat = pkgs.formats.javaProperties {};
+in {
+  options.services.languagetool = {
+    enable = mkEnableOption (mdDoc "the LanguageTool server");
+
+    port = mkOption {
+      type = types.port;
+      default = 8081;
+      example = 8081;
+      description = mdDoc ''
+        Port on which LanguageTool listens.
+      '';
+    };
+
+    public = mkEnableOption (mdDoc "access from anywhere (rather than just localhost)");
+
+    allowOrigin = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "https://my-website.org";
+      description = mdDoc ''
+        Set the Access-Control-Allow-Origin header in the HTTP response,
+        used for direct (non-proxy) JavaScript-based access from browsers.
+        `null` to allow access from all sites.
+      '';
+    };
+
+    settings = lib.mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options.cacheSize = mkOption {
+          type = types.ints.unsigned;
+          default = 1000;
+          apply = toString;
+          description = mdDoc "Number of sentences cached.";
+        };
+      };
+      default = {};
+      description = mdDoc ''
+        Configuration file options for LanguageTool, see
+        'languagetool-http-server --help'
+        for supported settings.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.languagetool =  {
+      description = "LanguageTool HTTP server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        User = "languagetool";
+        Group = "languagetool";
+        CapabilityBoundingSet = [ "" ];
+        RestrictNamespaces = [ "" ];
+        SystemCallFilter = [ "@system-service" "~ @privileged" ];
+        ProtectHome = "yes";
+        ExecStart = ''
+          ${pkgs.languagetool}/bin/languagetool-http-server \
+            --port ${toString cfg.port} \
+            ${optionalString cfg.public "--public"} \
+            ${optionalString (cfg.allowOrigin != null) "--allow-origin ${cfg.allowOrigin}"} \
+            "--config" ${settingsFormat.generate "languagetool.conf" cfg.settings}
+          '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/leaps.nix b/nixpkgs/nixos/modules/services/misc/leaps.nix
new file mode 100644
index 000000000000..5522223ecc97
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/leaps.nix
@@ -0,0 +1,62 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.leaps;
+  stateDir = "/var/lib/leaps/";
+in
+{
+  options = {
+    services.leaps = {
+      enable = mkEnableOption (lib.mdDoc "leaps");
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc "A port where leaps listens for incoming http requests";
+      };
+      address = mkOption {
+        default = "";
+        type = types.str;
+        example = "127.0.0.1";
+        description = lib.mdDoc "Hostname or IP-address to listen to. By default it will listen on all interfaces.";
+      };
+      path = mkOption {
+        default = "/";
+        type = types.path;
+        description = lib.mdDoc "Subdirectory used for reverse proxy setups";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users = {
+      users.leaps = {
+        uid             = config.ids.uids.leaps;
+        description     = "Leaps server user";
+        group           = "leaps";
+        home            = stateDir;
+        createHome      = true;
+      };
+
+      groups.leaps = {
+        gid = config.ids.gids.leaps;
+      };
+    };
+
+    systemd.services.leaps = {
+      description   = "leaps service";
+      wantedBy      = [ "multi-user.target" ];
+      after         = [ "network.target" ];
+
+      serviceConfig = {
+        User = "leaps";
+        Group = "leaps";
+        Restart = "on-failure";
+        WorkingDirectory = stateDir;
+        PrivateTmp = true;
+        ExecStart = "${pkgs.leaps}/bin/leaps -path ${toString cfg.path} -address ${cfg.address}:${toString cfg.port}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/libreddit.nix b/nixpkgs/nixos/modules/services/misc/libreddit.nix
new file mode 100644
index 000000000000..fd58928d2821
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/libreddit.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.libreddit;
+
+  args = concatStringsSep " " ([
+    "--port ${toString cfg.port}"
+    "--address ${cfg.address}"
+  ]);
+in
+{
+  options = {
+    services.libreddit = {
+      enable = mkEnableOption (lib.mdDoc "Private front-end for Reddit");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.libreddit;
+        defaultText = literalExpression "pkgs.libreddit";
+        description = lib.mdDoc "Libreddit package to use.";
+      };
+
+      address = mkOption {
+        default = "0.0.0.0";
+        example = "127.0.0.1";
+        type =  types.str;
+        description = lib.mdDoc "The address to listen on";
+      };
+
+      port = mkOption {
+        default = 8080;
+        example = 8000;
+        type = types.port;
+        description = lib.mdDoc "The port to listen on";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the libreddit web interface";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.libreddit = {
+        description = "Private front-end for Reddit";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${cfg.package}/bin/libreddit ${args}";
+          AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+          Restart = "on-failure";
+          RestartSec = "2s";
+          # Hardening
+          CapabilityBoundingSet = if (cfg.port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
+          DeviceAllow = [ "" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          PrivateDevices = true;
+          # A private user cannot have process capabilities on the host's user
+          # namespace and thus CAP_NET_BIND_SERVICE has no effect.
+          PrivateUsers = (cfg.port >= 1024);
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+          UMask = "0077";
+        };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/lidarr.nix b/nixpkgs/nixos/modules/services/misc/lidarr.nix
new file mode 100644
index 000000000000..92b00054bdff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/lidarr.nix
@@ -0,0 +1,89 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lidarr;
+in
+{
+  options = {
+    services.lidarr = {
+      enable = mkEnableOption (lib.mdDoc "Lidarr");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/lidarr/.config/Lidarr";
+        description = lib.mdDoc "The directory where Lidarr stores its data files.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.lidarr;
+        defaultText = literalExpression "pkgs.lidarr";
+        description = lib.mdDoc "The Lidarr package to use";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for Lidarr
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "lidarr";
+        description = lib.mdDoc ''
+          User account under which Lidarr runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "lidarr";
+        description = lib.mdDoc ''
+          Group under which Lidarr runs.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.lidarr = {
+      description = "Lidarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/Lidarr -nobrowser -data='${cfg.dataDir}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 8686 ];
+    };
+
+    users.users = mkIf (cfg.user == "lidarr") {
+      lidarr = {
+        group = cfg.group;
+        home = "/var/lib/lidarr";
+        uid = config.ids.uids.lidarr;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "lidarr") {
+      lidarr = {
+        gid = config.ids.gids.lidarr;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/lifecycled.nix b/nixpkgs/nixos/modules/services/misc/lifecycled.nix
new file mode 100644
index 000000000000..fb5cabb4f038
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/lifecycled.nix
@@ -0,0 +1,164 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.lifecycled;
+
+  # TODO: Add the ability to extend this with an rfc 42-like interface.
+  # In the meantime, one can modify the environment (as
+  # long as it's not overriding anything from here) with
+  # systemd.services.lifecycled.serviceConfig.Environment
+  configFile = pkgs.writeText "lifecycled" ''
+    LIFECYCLED_HANDLER=${cfg.handler}
+    ${lib.optionalString (cfg.cloudwatchGroup != null) "LIFECYCLED_CLOUDWATCH_GROUP=${cfg.cloudwatchGroup}"}
+    ${lib.optionalString (cfg.cloudwatchStream != null) "LIFECYCLED_CLOUDWATCH_STREAM=${cfg.cloudwatchStream}"}
+    ${lib.optionalString cfg.debug "LIFECYCLED_DEBUG=${lib.boolToString cfg.debug}"}
+    ${lib.optionalString (cfg.instanceId != null) "LIFECYCLED_INSTANCE_ID=${cfg.instanceId}"}
+    ${lib.optionalString cfg.json "LIFECYCLED_JSON=${lib.boolToString cfg.json}"}
+    ${lib.optionalString cfg.noSpot "LIFECYCLED_NO_SPOT=${lib.boolToString cfg.noSpot}"}
+    ${lib.optionalString (cfg.snsTopic != null) "LIFECYCLED_SNS_TOPIC=${cfg.snsTopic}"}
+    ${lib.optionalString (cfg.awsRegion != null) "AWS_REGION=${cfg.awsRegion}"}
+  '';
+in
+{
+  meta.maintainers = with maintainers; [ cole-h grahamc ];
+
+  options = {
+    services.lifecycled = {
+      enable = mkEnableOption (lib.mdDoc "lifecycled");
+
+      queueCleaner = {
+        enable = mkEnableOption (lib.mdDoc "lifecycled-queue-cleaner");
+
+        frequency = mkOption {
+          type = types.str;
+          default = "hourly";
+          description = lib.mdDoc ''
+            How often to trigger the queue cleaner.
+
+            NOTE: This string should be a valid value for a systemd
+            timer's `OnCalendar` configuration. See
+            {manpage}`systemd.timer(5)`
+            for more information.
+          '';
+        };
+
+        parallel = mkOption {
+          type = types.ints.unsigned;
+          default = 20;
+          description = lib.mdDoc ''
+            The number of parallel deletes to run.
+          '';
+        };
+      };
+
+      instanceId = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The instance ID to listen for events for.
+        '';
+      };
+
+      snsTopic = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The SNS topic that receives events.
+        '';
+      };
+
+      noSpot = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Disable the spot termination listener.
+        '';
+      };
+
+      handler = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The script to invoke to handle events.
+        '';
+      };
+
+      json = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable JSON logging.
+        '';
+      };
+
+      cloudwatchGroup = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Write logs to a specific Cloudwatch Logs group.
+        '';
+      };
+
+      cloudwatchStream = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Write logs to a specific Cloudwatch Logs stream. Defaults to the instance ID.
+        '';
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable debugging information.
+        '';
+      };
+
+      # XXX: Can be removed if / when
+      # https://github.com/buildkite/lifecycled/pull/91 is merged.
+      awsRegion = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The region used for accessing AWS services.
+        '';
+      };
+    };
+  };
+
+  ### Implementation ###
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      environment.etc."lifecycled".source = configFile;
+
+      systemd.packages = [ pkgs.lifecycled ];
+      systemd.services.lifecycled = {
+        wantedBy = [ "network-online.target" ];
+        restartTriggers = [ configFile ];
+      };
+    })
+
+    (mkIf cfg.queueCleaner.enable {
+      systemd.services.lifecycled-queue-cleaner = {
+        description = "Lifecycle Daemon Queue Cleaner";
+        environment = optionalAttrs (cfg.awsRegion != null) { AWS_REGION = cfg.awsRegion; };
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkgs.lifecycled}/bin/lifecycled-queue-cleaner -parallel ${toString cfg.queueCleaner.parallel}";
+        };
+      };
+
+      systemd.timers.lifecycled-queue-cleaner = {
+        description = "Lifecycle Daemon Queue Cleaner Timer";
+        wantedBy = [ "timers.target" ];
+        after = [ "network-online.target" ];
+        timerConfig = {
+          Unit = "lifecycled-queue-cleaner.service";
+          OnCalendar = "${cfg.queueCleaner.frequency}";
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/logkeys.nix b/nixpkgs/nixos/modules/services/misc/logkeys.nix
new file mode 100644
index 000000000000..75d073a0c94b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/logkeys.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.logkeys;
+in {
+  options.services.logkeys = {
+    enable = mkEnableOption (lib.mdDoc "logkeys service");
+
+    device = mkOption {
+      description = lib.mdDoc "Use the given device as keyboard input event device instead of /dev/input/eventX default.";
+      default = null;
+      type = types.nullOr types.str;
+      example = "/dev/input/event15";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.logkeys = {
+      description = "LogKeys Keylogger Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.logkeys}/bin/logkeys -s${lib.optionalString (cfg.device != null) " -d ${cfg.device}"}";
+        ExecStop = "${pkgs.logkeys}/bin/logkeys -k";
+        Type = "forking";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/mame.nix b/nixpkgs/nixos/modules/services/misc/mame.nix
new file mode 100644
index 000000000000..6e9d2fd26cff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mame.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mame;
+  mame = "mame${lib.optionalString pkgs.stdenv.is64bit "64"}";
+in
+{
+  options = {
+    services.mame = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to setup TUN/TAP Ethernet interface for MAME emulator.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          User from which you run MAME binary.
+        '';
+      };
+      hostAddr = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          IP address of the host system. Usually an address of the main network
+          adapter or the adapter through which you get an internet connection.
+        '';
+        example = "192.168.31.156";
+      };
+      emuAddr = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          IP address of the guest system. The same you set inside guest OS under
+          MAME. Should be on the same subnet as {option}`services.mame.hostAddr`.
+        '';
+        example = "192.168.31.155";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.mame ];
+
+    security.wrappers."${mame}" = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_net_admin,cap_net_raw+eip";
+      source = "${pkgs.mame}/bin/${mame}";
+    };
+
+    systemd.services.mame = {
+      description = "MAME TUN/TAP Ethernet interface";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.iproute2 ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${pkgs.mame}/bin/taputil.sh -c ${cfg.user} ${cfg.emuAddr} ${cfg.hostAddr} -";
+        ExecStop = "${pkgs.mame}/bin/taputil.sh -d ${cfg.user}";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/mbpfan.nix b/nixpkgs/nixos/modules/services/misc/mbpfan.nix
new file mode 100644
index 000000000000..8f64fb2d9c52
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mbpfan.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.mbpfan;
+  verbose = optionalString cfg.verbose "v";
+  settingsFormat = pkgs.formats.ini {};
+  settingsFile = settingsFormat.generate "mbpfan.ini" cfg.settings;
+
+in {
+  options.services.mbpfan = {
+    enable = mkEnableOption (lib.mdDoc "mbpfan, fan controller daemon for Apple Macs and MacBooks");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.mbpfan;
+      defaultText = literalExpression "pkgs.mbpfan";
+      description = lib.mdDoc "The package used for the mbpfan daemon.";
+    };
+
+    verbose = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "If true, sets the log level to verbose.";
+    };
+
+    aggressive = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "If true, favors higher default fan speeds.";
+    };
+
+    settings = mkOption {
+      default = {};
+      description = lib.mdDoc "INI configuration for Mbpfan.";
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options.general.low_temp = mkOption {
+          type = types.int;
+          default = (if cfg.aggressive then 55 else 63);
+          defaultText = literalExpression "55";
+          description = lib.mdDoc "If temperature is below this, fans will run at minimum speed.";
+        };
+        options.general.high_temp = mkOption {
+          type = types.int;
+          default = (if cfg.aggressive then 58 else 66);
+          defaultText = literalExpression "58";
+          description = lib.mdDoc "If temperature is above this, fan speed will gradually increase.";
+        };
+        options.general.max_temp = mkOption {
+          type = types.int;
+          default = (if cfg.aggressive then 78 else 86);
+          defaultText = literalExpression "78";
+          description = lib.mdDoc "If temperature is above this, fans will run at maximum speed.";
+        };
+        options.general.polling_interval = mkOption {
+          type = types.int;
+          default = 1;
+          description = lib.mdDoc "The polling interval.";
+        };
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "mbpfan" "pollingInterval" ] [ "services" "mbpfan" "settings" "general" "polling_interval" ])
+    (mkRenamedOptionModule [ "services" "mbpfan" "maxTemp" ] [ "services" "mbpfan" "settings" "general" "max_temp" ])
+    (mkRenamedOptionModule [ "services" "mbpfan" "lowTemp" ] [ "services" "mbpfan" "settings" "general" "low_temp" ])
+    (mkRenamedOptionModule [ "services" "mbpfan" "highTemp" ] [ "services" "mbpfan" "settings" "general" "high_temp" ])
+    (mkRenamedOptionModule [ "services" "mbpfan" "minFanSpeed" ] [ "services" "mbpfan" "settings" "general" "min_fan1_speed" ])
+    (mkRenamedOptionModule [ "services" "mbpfan" "maxFanSpeed" ] [ "services" "mbpfan" "settings" "general" "max_fan1_speed" ])
+  ];
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "coretemp" "applesmc" ];
+    environment.systemPackages = [ cfg.package ];
+    environment.etc."mbpfan.conf".source = settingsFile;
+
+    systemd.services.mbpfan = {
+      description = "A fan manager daemon for MacBook Pro";
+      wantedBy = [ "sysinit.target" ];
+      after = [ "syslog.target" "sysinit.target" ];
+      restartTriggers = [ config.environment.etc."mbpfan.conf".source ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/mbpfan -f${verbose}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        PIDFile = "/run/mbpfan.pid";
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/mediatomb.nix b/nixpkgs/nixos/modules/services/misc/mediatomb.nix
new file mode 100644
index 000000000000..335b1b684b1a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mediatomb.nix
@@ -0,0 +1,396 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  gid = config.ids.gids.mediatomb;
+  cfg = config.services.mediatomb;
+  opt = options.services.mediatomb;
+  name = cfg.package.pname;
+  pkg = cfg.package;
+  optionYesNo = option: if option then "yes" else "no";
+  # configuration on media directory
+  mediaDirectory = {
+    options = {
+      path = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Absolute directory path to the media directory to index.
+        '';
+      };
+      recursive = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether the indexation must take place recursively or not.";
+      };
+      hidden-files = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to index the hidden files or not.";
+      };
+    };
+  };
+  toMediaDirectory = d: "<directory location=\"${d.path}\" mode=\"inotify\" recursive=\"${optionYesNo d.recursive}\" hidden-files=\"${optionYesNo d.hidden-files}\" />\n";
+
+  transcodingConfig = if cfg.transcoding then with pkgs; ''
+    <transcoding enabled="yes">
+      <mimetype-profile-mappings>
+        <transcode mimetype="video/x-flv" using="vlcmpeg" />
+        <transcode mimetype="application/ogg" using="vlcmpeg" />
+        <transcode mimetype="audio/ogg" using="ogg2mp3" />
+        <transcode mimetype="audio/x-flac" using="oggflac2raw"/>
+      </mimetype-profile-mappings>
+      <profiles>
+        <profile name="ogg2mp3" enabled="no" type="external">
+          <mimetype>audio/mpeg</mimetype>
+          <accept-url>no</accept-url>
+          <first-resource>yes</first-resource>
+          <accept-ogg-theora>no</accept-ogg-theora>
+          <agent command="${ffmpeg}/bin/ffmpeg" arguments="-y -i %in -f mp3 %out" />
+          <buffer size="1048576" chunk-size="131072" fill-size="262144" />
+        </profile>
+        <profile name="vlcmpeg" enabled="no" type="external">
+          <mimetype>video/mpeg</mimetype>
+          <accept-url>yes</accept-url>
+          <first-resource>yes</first-resource>
+          <accept-ogg-theora>yes</accept-ogg-theora>
+          <agent command="${libsForQt5.vlc}/bin/vlc"
+            arguments="-I dummy %in --sout #transcode{venc=ffmpeg,vcodec=mp2v,vb=4096,fps=25,aenc=ffmpeg,acodec=mpga,ab=192,samplerate=44100,channels=2}:standard{access=file,mux=ps,dst=%out} vlc:quit" />
+          <buffer size="14400000" chunk-size="512000" fill-size="120000" />
+        </profile>
+      </profiles>
+    </transcoding>
+'' else ''
+    <transcoding enabled="no">
+    </transcoding>
+'';
+
+  configText = optionalString (! cfg.customCfg) ''
+<?xml version="1.0" encoding="UTF-8"?>
+<config version="2" xmlns="http://mediatomb.cc/config/2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://mediatomb.cc/config/2 http://mediatomb.cc/config/2.xsd">
+    <server>
+      <ui enabled="yes" show-tooltips="yes">
+        <accounts enabled="no" session-timeout="30">
+          <account user="${name}" password="${name}"/>
+        </accounts>
+      </ui>
+      <name>${cfg.serverName}</name>
+      <udn>uuid:${cfg.uuid}</udn>
+      <home>${cfg.dataDir}</home>
+      <interface>${cfg.interface}</interface>
+      <webroot>${pkg}/share/${name}/web</webroot>
+      <pc-directory upnp-hide="${optionYesNo cfg.pcDirectoryHide}"/>
+      <storage>
+        <sqlite3 enabled="yes">
+          <database-file>${name}.db</database-file>
+        </sqlite3>
+      </storage>
+      <protocolInfo extend="${optionYesNo cfg.ps3Support}"/>
+      ${optionalString cfg.dsmSupport ''
+      <custom-http-headers>
+        <add header="X-User-Agent: redsonic"/>
+      </custom-http-headers>
+
+      <manufacturerURL>redsonic.com</manufacturerURL>
+      <modelNumber>105</modelNumber>
+      ''}
+        ${optionalString cfg.tg100Support ''
+      <upnp-string-limit>101</upnp-string-limit>
+      ''}
+      <extended-runtime-options>
+        <mark-played-items enabled="yes" suppress-cds-updates="yes">
+          <string mode="prepend">*</string>
+          <mark>
+            <content>video</content>
+          </mark>
+        </mark-played-items>
+      </extended-runtime-options>
+    </server>
+    <import hidden-files="no">
+      <autoscan use-inotify="auto">
+      ${concatMapStrings toMediaDirectory cfg.mediaDirectories}
+      </autoscan>
+      <scripting script-charset="UTF-8">
+        <common-script>${pkg}/share/${name}/js/common.js</common-script>
+        <playlist-script>${pkg}/share/${name}/js/playlists.js</playlist-script>
+        <virtual-layout type="builtin">
+          <import-script>${pkg}/share/${name}/js/import.js</import-script>
+        </virtual-layout>
+      </scripting>
+      <mappings>
+        <extension-mimetype ignore-unknown="no">
+          <map from="mp3" to="audio/mpeg"/>
+          <map from="ogx" to="application/ogg"/>
+          <map from="ogv" to="video/ogg"/>
+          <map from="oga" to="audio/ogg"/>
+          <map from="ogg" to="audio/ogg"/>
+          <map from="ogm" to="video/ogg"/>
+          <map from="asf" to="video/x-ms-asf"/>
+          <map from="asx" to="video/x-ms-asf"/>
+          <map from="wma" to="audio/x-ms-wma"/>
+          <map from="wax" to="audio/x-ms-wax"/>
+          <map from="wmv" to="video/x-ms-wmv"/>
+          <map from="wvx" to="video/x-ms-wvx"/>
+          <map from="wm" to="video/x-ms-wm"/>
+          <map from="wmx" to="video/x-ms-wmx"/>
+          <map from="m3u" to="audio/x-mpegurl"/>
+          <map from="pls" to="audio/x-scpls"/>
+          <map from="flv" to="video/x-flv"/>
+          <map from="mkv" to="video/x-matroska"/>
+          <map from="mka" to="audio/x-matroska"/>
+          ${optionalString cfg.ps3Support ''
+          <map from="avi" to="video/divx"/>
+          ''}
+          ${optionalString cfg.dsmSupport ''
+          <map from="avi" to="video/avi"/>
+          ''}
+        </extension-mimetype>
+        <mimetype-upnpclass>
+          <map from="audio/*" to="object.item.audioItem.musicTrack"/>
+          <map from="video/*" to="object.item.videoItem"/>
+          <map from="image/*" to="object.item.imageItem"/>
+        </mimetype-upnpclass>
+        <mimetype-contenttype>
+          <treat mimetype="audio/mpeg" as="mp3"/>
+          <treat mimetype="application/ogg" as="ogg"/>
+          <treat mimetype="audio/ogg" as="ogg"/>
+          <treat mimetype="audio/x-flac" as="flac"/>
+          <treat mimetype="audio/x-ms-wma" as="wma"/>
+          <treat mimetype="audio/x-wavpack" as="wv"/>
+          <treat mimetype="image/jpeg" as="jpg"/>
+          <treat mimetype="audio/x-mpegurl" as="playlist"/>
+          <treat mimetype="audio/x-scpls" as="playlist"/>
+          <treat mimetype="audio/x-wav" as="pcm"/>
+          <treat mimetype="audio/L16" as="pcm"/>
+          <treat mimetype="video/x-msvideo" as="avi"/>
+          <treat mimetype="video/mp4" as="mp4"/>
+          <treat mimetype="audio/mp4" as="mp4"/>
+          <treat mimetype="application/x-iso9660" as="dvd"/>
+          <treat mimetype="application/x-iso9660-image" as="dvd"/>
+        </mimetype-contenttype>
+      </mappings>
+      <online-content>
+        <YouTube enabled="no" refresh="28800" update-at-start="no" purge-after="604800" racy-content="exclude" format="mp4" hd="no">
+          <favorites user="${name}"/>
+          <standardfeed feed="most_viewed" time-range="today"/>
+          <playlists user="${name}"/>
+          <uploads user="${name}"/>
+          <standardfeed feed="recently_featured" time-range="today"/>
+        </YouTube>
+      </online-content>
+    </import>
+    ${transcodingConfig}
+  </config>
+'';
+  defaultFirewallRules = {
+    # udp 1900 port needs to be opened for SSDP (not configurable within
+    # mediatomb/gerbera) cf.
+    # https://docs.gerbera.io/en/latest/run.html?highlight=udp%20port#network-setup
+    allowedUDPPorts = [ 1900 cfg.port ];
+    allowedTCPPorts = [ cfg.port ];
+  };
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.mediatomb = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Gerbera/Mediatomb DLNA server.
+        '';
+      };
+
+      serverName = mkOption {
+        type = types.str;
+        default = "Gerbera (Mediatomb)";
+        description = lib.mdDoc ''
+          How to identify the server on the network.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.gerbera;
+        defaultText = literalExpression "pkgs.gerbera";
+        description = lib.mdDoc ''
+          Underlying package to be used with the module.
+        '';
+      };
+
+      ps3Support = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable ps3 specific tweaks.
+          WARNING: incompatible with DSM 320 support.
+        '';
+      };
+
+      dsmSupport = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable D-Link DSM 320 specific tweaks.
+          WARNING: incompatible with ps3 support.
+        '';
+      };
+
+      tg100Support = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Telegent TG100 specific tweaks.
+        '';
+      };
+
+      transcoding = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable transcoding.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        defaultText = literalExpression ''"/var/lib/''${config.${opt.package}.pname}"'';
+        description = lib.mdDoc ''
+          The directory where Gerbera/Mediatomb stores its state, data, etc.
+        '';
+      };
+
+      pcDirectoryHide = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to list the top-level directory or not (from upnp client standpoint).
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mediatomb";
+        description = lib.mdDoc "User account under which the service runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "mediatomb";
+        description = lib.mdDoc "Group account under which the service runs.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 49152;
+        description = lib.mdDoc ''
+          The network port to listen on.
+        '';
+      };
+
+      interface = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          A specific interface to bind to.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If false (the default), this is up to the user to declare the firewall rules.
+          If true, this opens port 1900 (tcp and udp) and the port specified by
+          {option}`sercvices.mediatomb.port`.
+
+          If the option {option}`services.mediatomb.interface` is set,
+          the firewall rules opened are dedicated to that interface. Otherwise,
+          those rules are opened globally.
+        '';
+      };
+
+      uuid = mkOption {
+        type = types.str;
+        default = "fdfc8a4e-a3ad-4c1d-b43d-a2eedb03a687";
+        description = lib.mdDoc ''
+          A unique (on your network) to identify the server by.
+        '';
+      };
+
+      mediaDirectories = mkOption {
+        type = with types; listOf (submodule mediaDirectory);
+        default = [];
+        description = lib.mdDoc ''
+          Declare media directories to index.
+        '';
+        example = [
+          { path = "/data/pictures"; recursive = false; hidden-files = false; }
+          { path = "/data/audio"; recursive = true; hidden-files = false; }
+        ];
+      };
+
+      customCfg = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Allow the service to create and use its own config file inside the `dataDir` as
+          configured by {option}`services.mediatomb.dataDir`.
+          Deactivated by default, the service then runs with the configuration generated from this module.
+          Otherwise, when enabled, no service configuration is generated. Gerbera/Mediatomb then starts using
+          config.xml within the configured `dataDir`. It's up to the user to make a correct
+          configuration file.
+        '';
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = let binaryCommand = "${pkg}/bin/${name}";
+               interfaceFlag = optionalString ( cfg.interface != "") "--interface ${cfg.interface}";
+               configFlag = optionalString (! cfg.customCfg) "--config ${pkgs.writeText "config.xml" configText}";
+    in mkIf cfg.enable {
+    systemd.services.mediatomb = {
+      description = "${cfg.serverName} media Server";
+      # Gerbera might fail if the network interface is not available on startup
+      # https://github.com/gerbera/gerbera/issues/1324
+      after = [ "network.target" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${binaryCommand} --port ${toString cfg.port} ${interfaceFlag} ${configFlag} --home ${cfg.dataDir}";
+      serviceConfig.User = cfg.user;
+      serviceConfig.Group = cfg.group;
+    };
+
+    users.groups = optionalAttrs (cfg.group == "mediatomb") {
+      mediatomb.gid = gid;
+    };
+
+    users.users = optionalAttrs (cfg.user == "mediatomb") {
+      mediatomb = {
+        isSystemUser = true;
+        group = cfg.group;
+        home = cfg.dataDir;
+        createHome = true;
+        description = "${name} DLNA Server User";
+      };
+    };
+
+    # Open firewall only if users enable it
+    networking.firewall = mkMerge [
+      (mkIf (cfg.openFirewall && cfg.interface != "") {
+        interfaces."${cfg.interface}" = defaultFirewallRules;
+      })
+      (mkIf (cfg.openFirewall && cfg.interface == "") defaultFirewallRules)
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/metabase.nix b/nixpkgs/nixos/modules/services/misc/metabase.nix
new file mode 100644
index 000000000000..883fa0b95911
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/metabase.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.metabase;
+
+  inherit (lib) mkEnableOption mkIf mkOption;
+  inherit (lib) optional optionalAttrs types;
+
+  dataDir = "/var/lib/metabase";
+
+in {
+
+  options = {
+
+    services.metabase = {
+      enable = mkEnableOption (lib.mdDoc "Metabase service");
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            IP address that Metabase should listen on.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 3000;
+          description = lib.mdDoc ''
+            Listen port for Metabase.
+          '';
+        };
+      };
+
+      ssl = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to enable SSL (https) support.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8443;
+          description = lib.mdDoc ''
+            Listen port over SSL (https) for Metabase.
+          '';
+        };
+
+        keystore = mkOption {
+          type = types.nullOr types.path;
+          default = "${dataDir}/metabase.jks";
+          example = "/etc/secrets/keystore.jks";
+          description = lib.mdDoc ''
+            [Java KeyStore](https://www.digitalocean.com/community/tutorials/java-keytool-essentials-working-with-java-keystores) file containing the certificates.
+          '';
+        };
+
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for Metabase.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.metabase = {
+      description = "Metabase server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      environment = {
+        MB_PLUGINS_DIR = "${dataDir}/plugins";
+        MB_DB_FILE = "${dataDir}/metabase.db";
+        MB_JETTY_HOST = cfg.listen.ip;
+        MB_JETTY_PORT = toString cfg.listen.port;
+      } // optionalAttrs (cfg.ssl.enable) {
+        MB_JETTY_SSL = true;
+        MB_JETTY_SSL_PORT = toString cfg.ssl.port;
+        MB_JETTY_SSL_KEYSTORE = cfg.ssl.keystore;
+      };
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = baseNameOf dataDir;
+        ExecStart = "${pkgs.metabase}/bin/metabase";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ] ++ optional cfg.ssl.enable cfg.ssl.port;
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/moonraker.nix b/nixpkgs/nixos/modules/services/misc/moonraker.nix
new file mode 100644
index 000000000000..797e145c47a6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/moonraker.nix
@@ -0,0 +1,215 @@
+{ config, lib, options, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.moonraker;
+  pkg = cfg.package;
+  opt = options.services.moonraker;
+  format = pkgs.formats.ini {
+    # https://github.com/NixOS/nixpkgs/pull/121613#issuecomment-885241996
+    listToValue = l:
+      if builtins.length l == 1 then generators.mkValueStringDefault {} (head l)
+      else lib.concatMapStrings (s: "\n  ${generators.mkValueStringDefault {} s}") l;
+    mkKeyValue = generators.mkKeyValueDefault {} ":";
+  };
+
+  unifiedConfigDir = cfg.stateDir + "/config";
+in {
+  options = {
+    services.moonraker = {
+      enable = mkEnableOption (lib.mdDoc "Moonraker, an API web server for Klipper");
+
+      package = mkOption {
+        type = with types; nullOr package;
+        default = pkgs.moonraker;
+        defaultText = literalExpression "pkgs.moonraker";
+        example = literalExpression "pkgs.moonraker.override { useGpiod = true; }";
+        description = lib.mdDoc "Moonraker package to use";
+      };
+
+      klipperSocket = mkOption {
+        type = types.path;
+        default = config.services.klipper.apiSocket;
+        defaultText = literalExpression "config.services.klipper.apiSocket";
+        description = lib.mdDoc "Path to Klipper's API socket.";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/moonraker";
+        description = lib.mdDoc "The directory containing the Moonraker databases.";
+      };
+
+      configDir = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Deprecated directory containing client-writable configuration files.
+
+          Clients will be able to edit files in this directory via the API. This directory must be writable.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "moonraker";
+        description = lib.mdDoc "User account under which Moonraker runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "moonraker";
+        description = lib.mdDoc "Group account under which Moonraker runs.";
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        example = "0.0.0.0";
+        description = lib.mdDoc "The IP or host to listen on.";
+      };
+
+      port = mkOption {
+        type = types.ints.unsigned;
+        default = 7125;
+        description = lib.mdDoc "The port to listen on.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        example = {
+          authorization = {
+            trusted_clients = [ "10.0.0.0/24" ];
+            cors_domains = [ "https://app.fluidd.xyz" "https://my.mainsail.xyz" ];
+          };
+        };
+        description = lib.mdDoc ''
+          Configuration for Moonraker. See the [documentation](https://moonraker.readthedocs.io/en/latest/configuration/)
+          for supported values.
+        '';
+      };
+
+      allowSystemControl = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to allow Moonraker to perform system-level operations.
+
+          Moonraker exposes APIs to perform system-level operations, such as
+          reboot, shutdown, and management of systemd units. See the
+          [documentation](https://moonraker.readthedocs.io/en/latest/web_api/#machine-commands)
+          for details on what clients are able to do.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = []
+      ++ optional (cfg.settings ? update_manager)
+        ''Enabling update_manager is not supported on NixOS and will lead to non-removable warnings in some clients.''
+      ++ optional (cfg.configDir != null)
+        ''
+          services.moonraker.configDir has been deprecated upstream and will be removed.
+
+          Action: ${
+            if cfg.configDir == unifiedConfigDir then "Simply remove services.moonraker.configDir from your config."
+            else "Move files from `${cfg.configDir}` to `${unifiedConfigDir}` then remove services.moonraker.configDir from your config."
+          }
+        '';
+
+    assertions = [
+      {
+        assertion = cfg.allowSystemControl -> config.security.polkit.enable;
+        message = "services.moonraker.allowSystemControl requires polkit to be enabled (security.polkit.enable).";
+      }
+    ];
+
+    users.users = optionalAttrs (cfg.user == "moonraker") {
+      moonraker = {
+        group = cfg.group;
+        uid = config.ids.uids.moonraker;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "moonraker") {
+      moonraker.gid = config.ids.gids.moonraker;
+    };
+
+    environment.etc."moonraker.cfg".source = let
+      forcedConfig = {
+        server = {
+          host = cfg.address;
+          port = cfg.port;
+          klippy_uds_address = cfg.klipperSocket;
+        };
+        machine = {
+          validate_service = false;
+        };
+      } // (lib.optionalAttrs (cfg.configDir != null) {
+        file_manager = {
+          config_path = cfg.configDir;
+        };
+      });
+      fullConfig = recursiveUpdate cfg.settings forcedConfig;
+    in format.generate "moonraker.cfg" fullConfig;
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
+    ] ++ lib.optional (cfg.configDir != null) "d '${cfg.configDir}' - ${cfg.user} ${cfg.group} - -";
+
+    systemd.services.moonraker = {
+      description = "Moonraker, an API web server for Klipper";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ]
+        ++ optional config.services.klipper.enable "klipper.service";
+
+      # Moonraker really wants its own config to be writable...
+      script = ''
+        config_path=${
+          # Deprecated separate config dir
+          if cfg.configDir != null then "${cfg.configDir}/moonraker-temp.cfg"
+          # Config in unified data path
+          else "${unifiedConfigDir}/moonraker-temp.cfg"
+        }
+        mkdir -p $(dirname "$config_path")
+        cp /etc/moonraker.cfg "$config_path"
+        chmod u+w "$config_path"
+        exec ${pkg}/bin/moonraker -d ${cfg.stateDir} -c "$config_path"
+      '';
+
+      # Needs `ip` command
+      path = [ pkgs.iproute2 ];
+
+      serviceConfig = {
+        WorkingDirectory = cfg.stateDir;
+        PrivateTmp = true;
+        Group = cfg.group;
+        User = cfg.user;
+      };
+    };
+
+    security.polkit.extraConfig = lib.optionalString cfg.allowSystemControl ''
+      // nixos/moonraker: Allow Moonraker to perform system-level operations
+      //
+      // This was enabled via services.moonraker.allowSystemControl.
+      polkit.addRule(function(action, subject) {
+        if ((action.id == "org.freedesktop.systemd1.manage-units" ||
+             action.id == "org.freedesktop.login1.power-off" ||
+             action.id == "org.freedesktop.login1.power-off-multiple-sessions" ||
+             action.id == "org.freedesktop.login1.reboot" ||
+             action.id == "org.freedesktop.login1.reboot-multiple-sessions" ||
+             action.id.startsWith("org.freedesktop.packagekit.")) &&
+             subject.user == "${cfg.user}") {
+          return polkit.Result.YES;
+        }
+      });
+    '';
+  };
+
+  meta.maintainers = with maintainers; [
+    cab404
+    vtuan10
+    zhaofengli
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/mqtt2influxdb.nix b/nixpkgs/nixos/modules/services/misc/mqtt2influxdb.nix
new file mode 100644
index 000000000000..621f51a4e7fd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/mqtt2influxdb.nix
@@ -0,0 +1,253 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+
+with lib;
+
+let
+  cfg = config.services.mqtt2influxdb;
+  filterNull = filterAttrsRecursive (n: v: v != null);
+  configFile = (pkgs.formats.yaml {}).generate "mqtt2influxdb.config.yaml" (
+    filterNull {
+      inherit (cfg) mqtt influxdb;
+      points = map filterNull cfg.points;
+    }
+  );
+
+  pointType = types.submodule {
+    options = {
+      measurement = mkOption {
+        type = types.str;
+        description = mdDoc "Name of the measurement";
+      };
+      topic = mkOption {
+        type = types.str;
+        description = mdDoc "MQTT topic to subscribe to.";
+      };
+      fields = mkOption {
+        type = types.submodule {
+          options = {
+            value = mkOption {
+              type = types.str;
+              default = "$.payload";
+              description = mdDoc "Value to be picked up";
+            };
+            type = mkOption {
+              type = with types; nullOr str;
+              default = null;
+              description = mdDoc "Type to be picked up";
+            };
+          };
+        };
+        description = mdDoc "Field selector.";
+      };
+      tags = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = mdDoc "Tags applied";
+      };
+    };
+  };
+
+  defaultPoints = [
+    {
+      measurement = "temperature";
+      topic = "node/+/thermometer/+/temperature";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+        channel = "$.topic[3]";
+      };
+    }
+    {
+      measurement = "relative-humidity";
+      topic = "node/+/hygrometer/+/relative-humidity";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+        channel = "$.topic[3]";
+      };
+    }
+    {
+      measurement = "illuminance";
+      topic = "node/+/lux-meter/0:0/illuminance";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+      };
+    }
+    {
+      measurement = "pressure";
+      topic = "node/+/barometer/0:0/pressure";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+      };
+    }
+    {
+      measurement = "co2";
+      topic = "node/+/co2-meter/-/concentration";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+      };
+    }
+    {
+      measurement = "voltage";
+      topic = "node/+/battery/+/voltage";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+      };
+    }
+    {
+      measurement = "button";
+      topic = "node/+/push-button/+/event-count";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+        channel = "$.topic[3]";
+      };
+    }
+    {
+      measurement = "tvoc";
+      topic = "node/+/voc-lp-sensor/0:0/tvoc";
+      fields.value = "$.payload";
+      tags = {
+        id = "$.topic[1]";
+      };
+    }
+  ];
+in {
+  options = {
+    services.mqtt2influxdb = {
+      enable = mkEnableOption (mdDoc "BigClown MQTT to InfluxDB bridge.");
+      environmentFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = [ "/run/keys/mqtt2influxdb.env" ];
+        description = mdDoc ''
+          File to load as environment file. Environment variables from this file
+          will be interpolated into the config file using envsubst with this
+          syntax: `$ENVIRONMENT` or `''${VARIABLE}`.
+          This is useful to avoid putting secrets into the nix store.
+        '';
+      };
+      mqtt = {
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = mdDoc "Host where MQTT server is running.";
+        };
+        port = mkOption {
+          type = types.port;
+          default = 1883;
+          description = mdDoc "MQTT server port.";
+        };
+        username = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "Username used to connect to the MQTT server.";
+        };
+        password = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc ''
+            MQTT password.
+
+            It is highly suggested to use here replacement through
+            environmentFiles as otherwise the password is put world readable to
+            the store.
+          '';
+        };
+        cafile = mkOption {
+          type = with types; nullOr path;
+          default = null;
+          description = mdDoc "Certification Authority file for MQTT";
+        };
+        certfile = mkOption {
+          type = with types; nullOr path;
+          default = null;
+          description = mdDoc "Certificate file for MQTT";
+        };
+        keyfile = mkOption {
+          type = with types; nullOr path;
+          default = null;
+          description = mdDoc "Key file for MQTT";
+        };
+      };
+      influxdb = {
+        host = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = mdDoc "Host where InfluxDB server is running.";
+        };
+        port = mkOption {
+          type = types.port;
+          default = 8086;
+          description = mdDoc "InfluxDB server port";
+        };
+        database = mkOption {
+          type = types.str;
+          description = mdDoc "Name of the InfluxDB database.";
+        };
+        username = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc "Username for InfluxDB login.";
+        };
+        password = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = mdDoc ''
+            Password for InfluxDB login.
+
+            It is highly suggested to use here replacement through
+            environmentFiles as otherwise the password is put world readable to
+            the store.
+            '';
+        };
+        ssl = mkOption {
+          type = types.bool;
+          default = false;
+          description = mdDoc "Use SSL to connect to the InfluxDB server.";
+        };
+        verify_ssl = mkOption {
+          type = types.bool;
+          default = true;
+          description = mdDoc "Verify SSL certificate when connecting to the InfluxDB server.";
+        };
+      };
+      points = mkOption {
+        type = types.listOf pointType;
+        default = defaultPoints;
+        description = mdDoc "Points to bridge from MQTT to InfluxDB.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.bigclown-mqtt2influxdb = let
+      envConfig = cfg.environmentFiles != [];
+      finalConfig = if envConfig
+        then "$RUNTIME_DIRECTORY/mqtt2influxdb.config.yaml"
+        else configFile;
+    in {
+      description = "BigClown MQTT to InfluxDB bridge";
+      wantedBy = ["multi-user.target"];
+      wants = mkIf config.services.mosquitto.enable ["mosquitto.service"];
+      preStart = ''
+        umask 077
+        ${pkgs.envsubst}/bin/envsubst -i "${configFile}" -o "${finalConfig}"
+      '';
+      serviceConfig = {
+        EnvironmentFile = cfg.environmentFiles;
+        ExecStart = "${cfg.package}/bin/mqtt2influxdb -dc ${finalConfig}";
+        RuntimeDirectory = "mqtt2influxdb";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/n8n.nix b/nixpkgs/nixos/modules/services/misc/n8n.nix
new file mode 100644
index 000000000000..2af37fba910a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/n8n.nix
@@ -0,0 +1,92 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.n8n;
+  format = pkgs.formats.json {};
+  configFile = format.generate "n8n.json" cfg.settings;
+in
+{
+  options.services.n8n = {
+    enable = mkEnableOption (lib.mdDoc "n8n server");
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Open ports in the firewall for the n8n web interface.";
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = {};
+      description = lib.mdDoc ''
+        Configuration for n8n, see <https://docs.n8n.io/hosting/environment-variables/configuration-methods/>
+        for supported values.
+      '';
+    };
+
+    webhookUrl = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        WEBHOOK_URL for n8n, in case we're running behind a reverse proxy.
+        This cannot be set through configuration and must reside in an environment variable.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    services.n8n.settings = {
+      # We use this to open the firewall, so we need to know about the default at eval time
+      port = lib.mkDefault 5678;
+    };
+
+    systemd.services.n8n = {
+      description = "N8N service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        # This folder must be writeable as the application is storing
+        # its data in it, so the StateDirectory is a good choice
+        N8N_USER_FOLDER = "/var/lib/n8n";
+        HOME = "/var/lib/n8n";
+        N8N_CONFIG_FILES = "${configFile}";
+        WEBHOOK_URL = "${cfg.webhookUrl}";
+
+        # Don't phone home
+        N8N_DIAGNOSTICS_ENABLED = "false";
+        N8N_VERSION_NOTIFICATIONS_ENABLED = "false";
+      };
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.n8n}/bin/n8n";
+        Restart = "on-failure";
+        StateDirectory = "n8n";
+
+        # Basic Hardening
+        NoNewPrivileges = "yes";
+        PrivateTmp = "yes";
+        PrivateDevices = "yes";
+        DevicePolicy = "closed";
+        DynamicUser = "true";
+        ProtectSystem = "strict";
+        ProtectHome = "read-only";
+        ProtectControlGroups = "yes";
+        ProtectKernelModules = "yes";
+        ProtectKernelTunables = "yes";
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = "yes";
+        RestrictRealtime = "yes";
+        RestrictSUIDSGID = "yes";
+        MemoryDenyWriteExecute = "no"; # v8 JIT requires memory segments to be Writable-Executable.
+        LockPersonality = "yes";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.settings.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nitter.nix b/nixpkgs/nixos/modules/services/misc/nitter.nix
new file mode 100644
index 000000000000..77f5459d117c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nitter.nix
@@ -0,0 +1,388 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nitter;
+  configFile = pkgs.writeText "nitter.conf" ''
+    ${generators.toINI {
+      # String values need to be quoted
+      mkKeyValue = generators.mkKeyValueDefault {
+        mkValueString = v:
+          if isString v then "\"" + (strings.escape ["\""] (toString v)) + "\""
+          else generators.mkValueStringDefault {} v;
+      } " = ";
+    } (lib.recursiveUpdate {
+      Server = cfg.server;
+      Cache = cfg.cache;
+      Config = cfg.config // { hmacKey = "@hmac@"; };
+      Preferences = cfg.preferences;
+    } cfg.settings)}
+  '';
+  # `hmac` is a secret used for cryptographic signing of video URLs.
+  # Generate it on first launch, then copy configuration and replace
+  # `@hmac@` with this value.
+  # We are not using sed as it would leak the value in the command line.
+  preStart = pkgs.writers.writePython3 "nitter-prestart" {} ''
+    import os
+    import secrets
+
+    state_dir = os.environ.get("STATE_DIRECTORY")
+    if not os.path.isfile(f"{state_dir}/hmac"):
+        # Generate hmac on first launch
+        hmac = secrets.token_hex(32)
+        with open(f"{state_dir}/hmac", "w") as f:
+            f.write(hmac)
+    else:
+        # Load previously generated hmac
+        with open(f"{state_dir}/hmac", "r") as f:
+            hmac = f.read()
+
+    configFile = "${configFile}"
+    with open(configFile, "r") as f_in:
+        with open(f"{state_dir}/nitter.conf", "w") as f_out:
+            f_out.write(f_in.read().replace("@hmac@", hmac))
+  '';
+in
+{
+  imports = [
+    # https://github.com/zedeus/nitter/pull/772
+    (mkRemovedOptionModule [ "services" "nitter" "replaceInstagram" ] "Nitter no longer supports this option as Bibliogram has been discontinued.")
+  ];
+
+  options = {
+    services.nitter = {
+      enable = mkEnableOption (lib.mdDoc "Nitter");
+
+      package = mkOption {
+        default = pkgs.nitter;
+        type = types.package;
+        defaultText = literalExpression "pkgs.nitter";
+        description = lib.mdDoc "The nitter derivation to use.";
+      };
+
+      server = {
+        address = mkOption {
+          type =  types.str;
+          default = "0.0.0.0";
+          example = "127.0.0.1";
+          description = lib.mdDoc "The address to listen on.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8080;
+          example = 8000;
+          description = lib.mdDoc "The port to listen on.";
+        };
+
+        https = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Set secure attribute on cookies. Keep it disabled to enable cookies when not using HTTPS.";
+        };
+
+        httpMaxConnections = mkOption {
+          type = types.int;
+          default = 100;
+          description = lib.mdDoc "Maximum number of HTTP connections.";
+        };
+
+        staticDir = mkOption {
+          type = types.path;
+          default = "${cfg.package}/share/nitter/public";
+          defaultText = literalExpression ''"''${config.services.nitter.package}/share/nitter/public"'';
+          description = lib.mdDoc "Path to the static files directory.";
+        };
+
+        title = mkOption {
+          type = types.str;
+          default = "nitter";
+          description = lib.mdDoc "Title of the instance.";
+        };
+
+        hostname = mkOption {
+          type = types.str;
+          default = "localhost";
+          example = "nitter.net";
+          description = lib.mdDoc "Hostname of the instance.";
+        };
+      };
+
+      cache = {
+        listMinutes = mkOption {
+          type = types.int;
+          default = 240;
+          description = lib.mdDoc "How long to cache list info (not the tweets, so keep it high).";
+        };
+
+        rssMinutes = mkOption {
+          type = types.int;
+          default = 10;
+          description = lib.mdDoc "How long to cache RSS queries.";
+        };
+
+        redisHost = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Redis host.";
+        };
+
+        redisPort = mkOption {
+          type = types.port;
+          default = 6379;
+          description = lib.mdDoc "Redis port.";
+        };
+
+        redisConnections = mkOption {
+          type = types.int;
+          default = 20;
+          description = lib.mdDoc "Redis connection pool size.";
+        };
+
+        redisMaxConnections = mkOption {
+          type = types.int;
+          default = 30;
+          description = lib.mdDoc ''
+            Maximum number of connections to Redis.
+
+            New connections are opened when none are available, but if the
+            pool size goes above this, they are closed when released, do not
+            worry about this unless you receive tons of requests per second.
+          '';
+        };
+      };
+
+      config = {
+        base64Media = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Use base64 encoding for proxied media URLs.";
+        };
+
+        enableRSS = mkEnableOption (lib.mdDoc "RSS feeds") // { default = true; };
+
+        enableDebug = mkEnableOption (lib.mdDoc "request logs and debug endpoints");
+
+        proxy = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "URL to a HTTP/HTTPS proxy.";
+        };
+
+        proxyAuth = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Credentials for proxy.";
+        };
+
+        tokenCount = mkOption {
+          type = types.int;
+          default = 10;
+          description = lib.mdDoc ''
+            Minimum amount of usable tokens.
+
+            Tokens are used to authorize API requests, but they expire after
+            ~1 hour, and have a limit of 187 requests. The limit gets reset
+            every 15 minutes, and the pool is filled up so there is always at
+            least tokenCount usable tokens. Only increase this if you receive
+            major bursts all the time.
+          '';
+        };
+      };
+
+      preferences = {
+        replaceTwitter = mkOption {
+          type = types.str;
+          default = "";
+          example = "nitter.net";
+          description = lib.mdDoc "Replace Twitter links with links to this instance (blank to disable).";
+        };
+
+        replaceYouTube = mkOption {
+          type = types.str;
+          default = "";
+          example = "piped.kavin.rocks";
+          description = lib.mdDoc "Replace YouTube links with links to this instance (blank to disable).";
+        };
+
+        replaceReddit = mkOption {
+          type = types.str;
+          default = "";
+          example = "teddit.net";
+          description = lib.mdDoc "Replace Reddit links with links to this instance (blank to disable).";
+        };
+
+        mp4Playback = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Enable MP4 video playback.";
+        };
+
+        hlsPlayback = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Enable HLS video streaming (requires JavaScript).";
+        };
+
+        proxyVideos = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Proxy video streaming through the server (might be slow).";
+        };
+
+        muteVideos = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Mute videos by default.";
+        };
+
+        autoplayGifs = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Autoplay GIFs.";
+        };
+
+        theme = mkOption {
+          type = types.str;
+          default = "Nitter";
+          description = lib.mdDoc "Instance theme.";
+        };
+
+        infiniteScroll = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Infinite scrolling (requires JavaScript, experimental!).";
+        };
+
+        stickyProfile = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Make profile sidebar stick to top.";
+        };
+
+        bidiSupport = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Support bidirectional text (makes clicking on tweets harder).";
+        };
+
+        hideTweetStats = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Hide tweet stats (replies, retweets, likes).";
+        };
+
+        hideBanner = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Hide profile banner.";
+        };
+
+        hidePins = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Hide pinned tweets.";
+        };
+
+        hideReplies = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Hide tweet replies.";
+        };
+
+        squareAvatars = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Square profile pictures.";
+        };
+      };
+
+      settings = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Add settings here to override NixOS module generated settings.
+
+          Check the official repository for the available settings:
+          https://github.com/zedeus/nitter/blob/master/nitter.example.conf
+        '';
+      };
+
+      redisCreateLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Configure local Redis server for Nitter.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for Nitter web interface.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !cfg.redisCreateLocally || (cfg.cache.redisHost == "localhost" && cfg.cache.redisPort == 6379);
+        message = "When services.nitter.redisCreateLocally is enabled, you need to use localhost:6379 as a cache server.";
+      }
+    ];
+
+    systemd.services.nitter = {
+        description = "Nitter (An alternative Twitter front-end)";
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "network-online.target" ];
+        after = [ "network-online.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          StateDirectory = "nitter";
+          Environment = [ "NITTER_CONF_FILE=/var/lib/nitter/nitter.conf" ];
+          # Some parts of Nitter expect `public` folder in working directory,
+          # see https://github.com/zedeus/nitter/issues/414
+          WorkingDirectory = "${cfg.package}/share/nitter";
+          ExecStart = "${cfg.package}/bin/nitter";
+          ExecStartPre = "${preStart}";
+          AmbientCapabilities = lib.mkIf (cfg.server.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+          Restart = "on-failure";
+          RestartSec = "5s";
+          # Hardening
+          CapabilityBoundingSet = if (cfg.server.port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
+          DeviceAllow = [ "" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          PrivateDevices = true;
+          # A private user cannot have process capabilities on the host's user
+          # namespace and thus CAP_NET_BIND_SERVICE has no effect.
+          PrivateUsers = (cfg.server.port >= 1024);
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+          UMask = "0077";
+        };
+    };
+
+    services.redis.servers.nitter = lib.mkIf (cfg.redisCreateLocally) {
+      enable = true;
+      port = cfg.cache.redisPort;
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.server.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nix-gc.nix b/nixpkgs/nixos/modules/services/misc/nix-gc.nix
new file mode 100644
index 000000000000..97596d28cd89
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nix-gc.nix
@@ -0,0 +1,104 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.nix.gc;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    nix.gc = {
+
+      automatic = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Automatically run the garbage collector at a specific time.";
+      };
+
+      dates = mkOption {
+        type = types.str;
+        default = "03:15";
+        example = "weekly";
+        description = lib.mdDoc ''
+          How often or when garbage collection is performed. For most desktop and server systems
+          a sufficient garbage collection is once a week.
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      randomizedDelaySec = mkOption {
+        default = "0";
+        type = types.str;
+        example = "45min";
+        description = lib.mdDoc ''
+          Add a randomized delay before each garbage collection.
+          The delay will be chosen between zero and this value.
+          This value must be a time span in the format specified by
+          {manpage}`systemd.time(7)`
+        '';
+      };
+
+      persistent = mkOption {
+        default = true;
+        type = types.bool;
+        example = false;
+        description = lib.mdDoc ''
+          Takes a boolean argument. If true, the time when the service
+          unit was last triggered is stored on disk. When the timer is
+          activated, the service unit is triggered immediately if it
+          would have been triggered at least once during the time when
+          the timer was inactive. Such triggering is nonetheless
+          subject to the delay imposed by RandomizedDelaySec=. This is
+          useful to catch up on missed runs of the service when the
+          system was powered down.
+        '';
+      };
+
+      options = mkOption {
+        default = "";
+        example = "--max-freed $((64 * 1024**3))";
+        type = types.str;
+        description = lib.mdDoc ''
+          Options given to {file}`nix-collect-garbage` when the
+          garbage collector is run automatically.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+    assertions = [
+      {
+        assertion = cfg.automatic -> config.nix.enable;
+        message = ''nix.gc.automatic requires nix.enable'';
+      }
+    ];
+
+    systemd.services.nix-gc = lib.mkIf config.nix.enable {
+      description = "Nix Garbage Collector";
+      script = "exec ${config.nix.package.out}/bin/nix-collect-garbage ${cfg.options}";
+      startAt = optional cfg.automatic cfg.dates;
+    };
+
+    systemd.timers.nix-gc = lib.mkIf cfg.automatic {
+      timerConfig = {
+        RandomizedDelaySec = cfg.randomizedDelaySec;
+        Persistent = cfg.persistent;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nix-optimise.nix b/nixpkgs/nixos/modules/services/misc/nix-optimise.nix
new file mode 100644
index 000000000000..0398229a13da
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nix-optimise.nix
@@ -0,0 +1,51 @@
+{ config, lib, ... }:
+
+let
+  cfg = config.nix.optimise;
+in
+
+{
+  options = {
+    nix.optimise = {
+      automatic = lib.mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc "Automatically run the nix store optimiser at a specific time.";
+      };
+
+      dates = lib.mkOption {
+        default = ["03:45"];
+        type = with lib.types; listOf str;
+        description = lib.mdDoc ''
+          Specification (in the format described by
+          {manpage}`systemd.time(7)`) of the time at
+          which the optimiser will run.
+        '';
+      };
+    };
+  };
+
+  config = {
+    assertions = [
+      {
+        assertion = cfg.automatic -> config.nix.enable;
+        message = ''nix.optimise.automatic requires nix.enable'';
+      }
+    ];
+
+    systemd = lib.mkIf config.nix.enable {
+      services.nix-optimise = {
+        description = "Nix Store Optimiser";
+        # No point this if the nix daemon (and thus the nix store) is outside
+        unitConfig.ConditionPathIsReadWrite = "/nix/var/nix/daemon-socket";
+        serviceConfig.ExecStart = "${config.nix.package}/bin/nix-store --optimise";
+        startAt = lib.optionals cfg.automatic cfg.dates;
+      };
+
+      timers.nix-optimise.timerConfig = {
+        Persistent = true;
+        RandomizedDelaySec = 1800;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nix-ssh-serve.nix b/nixpkgs/nixos/modules/services/misc/nix-ssh-serve.nix
new file mode 100644
index 000000000000..b656692ca01c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nix-ssh-serve.nix
@@ -0,0 +1,69 @@
+{ config, lib, ... }:
+
+with lib;
+let cfg = config.nix.sshServe;
+    command =
+      if cfg.protocol == "ssh"
+        then "nix-store --serve ${lib.optionalString cfg.write "--write"}"
+      else "nix-daemon --stdio";
+in {
+  options = {
+
+    nix.sshServe = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable serving the Nix store as a remote store via SSH.";
+      };
+
+      write = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable writing to the Nix store as a remote store via SSH. Note: the sshServe user is named nix-ssh and is not a trusted-user. nix-ssh should be added to the {option}`nix.settings.trusted-users` option in most use cases, such as allowing remote building of derivations.";
+      };
+
+      keys = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "ssh-dss AAAAB3NzaC1k... alice@example.org" ];
+        description = lib.mdDoc "A list of SSH public keys allowed to access the binary cache via SSH.";
+      };
+
+      protocol = mkOption {
+        type = types.enum [ "ssh" "ssh-ng" ];
+        default = "ssh";
+        description = lib.mdDoc "The specific Nix-over-SSH protocol to use.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.nix-ssh = {
+      description = "Nix SSH store user";
+      isSystemUser = true;
+      group = "nix-ssh";
+      useDefaultShell = true;
+    };
+    users.groups.nix-ssh = {};
+
+    services.openssh.enable = true;
+
+    services.openssh.extraConfig = ''
+      Match User nix-ssh
+        AllowAgentForwarding no
+        AllowTcpForwarding no
+        PermitTTY no
+        PermitTunnel no
+        X11Forwarding no
+        ForceCommand ${config.nix.package.out}/bin/${command}
+      Match All
+    '';
+
+    users.users.nix-ssh.openssh.authorizedKeys.keys = cfg.keys;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/novacomd.nix b/nixpkgs/nixos/modules/services/misc/novacomd.nix
new file mode 100644
index 000000000000..bde8328d46f8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/novacomd.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.novacomd;
+
+in {
+
+  options = {
+    services.novacomd = {
+      enable = mkEnableOption (lib.mdDoc "Novacom service for connecting to WebOS devices");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.webos.novacom ];
+
+    systemd.services.novacomd = {
+      description = "Novacom WebOS daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.webos.novacomd}/sbin/novacomd";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ dtzWill ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ntfy-sh.nix b/nixpkgs/nixos/modules/services/misc/ntfy-sh.nix
new file mode 100644
index 000000000000..8fc1df93afb1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ntfy-sh.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ntfy-sh;
+
+  settingsFormat = pkgs.formats.yaml { };
+in
+
+{
+  options.services.ntfy-sh = {
+    enable = mkEnableOption (mdDoc "[ntfy-sh](https://ntfy.sh), a push notification service");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.ntfy-sh;
+      defaultText = literalExpression "pkgs.ntfy-sh";
+      description = mdDoc "The ntfy.sh package to use.";
+    };
+
+    user = mkOption {
+      default = "ntfy-sh";
+      type = types.str;
+      description = lib.mdDoc "User the ntfy-sh server runs under.";
+    };
+
+    group = mkOption {
+      default = "ntfy-sh";
+      type = types.str;
+      description = lib.mdDoc "Primary group of ntfy-sh user.";
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+        options = {
+          base-url = mkOption {
+            type = types.str;
+            example = "https://ntfy.example";
+            description = lib.mdDoc ''
+              Public facing base URL of the service
+
+              This setting is required for any of the following features:
+              - attachments (to return a download URL)
+              - e-mail sending (for the topic URL in the email footer)
+              - iOS push notifications for self-hosted servers
+                (to calculate the Firebase poll_request topic)
+              - Matrix Push Gateway (to validate that the pushkey is correct)
+            '';
+          };
+        };
+      };
+
+      default = { };
+
+      example = literalExpression ''
+        {
+          listen-http = ":8080";
+        }
+      '';
+
+      description = mdDoc ''
+        Configuration for ntfy.sh, supported values are [here](https://ntfy.sh/docs/config/#config-options).
+      '';
+    };
+  };
+
+  config =
+    let
+      configuration = settingsFormat.generate "server.yml" cfg.settings;
+    in
+    mkIf cfg.enable {
+      # to configure access control via the cli
+      environment = {
+        etc."ntfy/server.yml".source = configuration;
+        systemPackages = [ cfg.package ];
+      };
+
+      services.ntfy-sh.settings = {
+        auth-file = mkDefault "/var/lib/ntfy-sh/user.db";
+        listen-http = mkDefault "127.0.0.1:2586";
+        attachment-cache-dir = mkDefault "/var/lib/ntfy-sh/attachments";
+        cache-file = mkDefault "/var/lib/ntfy-sh/cache-file.db";
+      };
+
+      systemd.tmpfiles.rules = [
+        "f ${cfg.settings.auth-file} 0600 ${cfg.user} ${cfg.group} - -"
+        "d ${cfg.settings.attachment-cache-dir} 0700 ${cfg.user} ${cfg.group} - -"
+        "f ${cfg.settings.cache-file} 0600 ${cfg.user} ${cfg.group} - -"
+      ];
+
+      systemd.services.ntfy-sh = {
+        description = "Push notifications server";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/ntfy serve -c ${configuration}";
+          User = cfg.user;
+          StateDirectory = "ntfy-sh";
+
+          DynamicUser = true;
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+          PrivateTmp = true;
+          NoNewPrivileges = true;
+          CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+          ProtectSystem = "full";
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          PrivateDevices = true;
+          RestrictSUIDSGID = true;
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          MemoryDenyWriteExecute = true;
+          # Upstream Recommandation
+          LimitNOFILE = 20500;
+        };
+      };
+
+      users.groups = optionalAttrs (cfg.group == "ntfy-sh") {
+        ntfy-sh = { };
+      };
+
+      users.users = optionalAttrs (cfg.user == "ntfy-sh") {
+        ntfy-sh = {
+          isSystemUser = true;
+          group = cfg.group;
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nzbget.nix b/nixpkgs/nixos/modules/services/misc/nzbget.nix
new file mode 100644
index 000000000000..d02fda62fa4f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nzbget.nix
@@ -0,0 +1,117 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nzbget;
+  pkg = pkgs.nzbget;
+  stateDir = "/var/lib/nzbget";
+  configFile = "${stateDir}/nzbget.conf";
+  configOpts = concatStringsSep " " (mapAttrsToList (name: value: "-o ${name}=${escapeShellArg (toStr value)}") cfg.settings);
+  toStr = v:
+    if v == true then "yes"
+    else if v == false then "no"
+    else if isInt v then toString v
+    else v;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "misc" "nzbget" "configFile" ] "The configuration of nzbget is now managed by users through the web interface.")
+    (mkRemovedOptionModule [ "services" "misc" "nzbget" "dataDir" ] "The data directory for nzbget is now /var/lib/nzbget.")
+    (mkRemovedOptionModule [ "services" "misc" "nzbget" "openFirewall" ] "The port used by nzbget is managed through the web interface so you should adjust your firewall rules accordingly.")
+  ];
+
+  # interface
+
+  options = {
+    services.nzbget = {
+      enable = mkEnableOption (lib.mdDoc "NZBGet");
+
+      user = mkOption {
+        type = types.str;
+        default = "nzbget";
+        description = lib.mdDoc "User account under which NZBGet runs";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nzbget";
+        description = lib.mdDoc "Group under which NZBGet runs";
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ bool int str ]);
+        default = {};
+        description = lib.mdDoc ''
+          NZBGet configuration, passed via command line using switch -o. Refer to
+          <https://github.com/nzbget/nzbget/blob/master/nzbget.conf>
+          for details on supported values.
+        '';
+        example = {
+          MainDir = "/data";
+        };
+      };
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+    services.nzbget.settings = {
+      # allows nzbget to run as a "simple" service
+      OutputMode = "loggable";
+      # use journald for logging
+      WriteLog = "none";
+      ErrorTarget = "screen";
+      WarningTarget = "screen";
+      InfoTarget = "screen";
+      DetailTarget = "screen";
+      # required paths
+      ConfigTemplate = "${pkg}/share/nzbget/nzbget.conf";
+      WebDir = "${pkg}/share/nzbget/webui";
+      # nixos handles package updates
+      UpdateCheck = "none";
+    };
+
+    systemd.services.nzbget = {
+      description = "NZBGet Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [
+        unrar
+        p7zip
+      ];
+
+      preStart = ''
+        if [ ! -f ${configFile} ]; then
+          ${pkgs.coreutils}/bin/install -m 0700 ${pkg}/share/nzbget/nzbget.conf ${configFile}
+        fi
+      '';
+
+      serviceConfig = {
+        StateDirectory = "nzbget";
+        StateDirectoryMode = "0750";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0002";
+        Restart = "on-failure";
+        ExecStart = "${pkg}/bin/nzbget --server --configfile ${stateDir}/nzbget.conf ${configOpts}";
+        ExecStop = "${pkg}/bin/nzbget --quit";
+      };
+    };
+
+    users.users = mkIf (cfg.user == "nzbget") {
+      nzbget = {
+        home = stateDir;
+        group = cfg.group;
+        uid = config.ids.uids.nzbget;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "nzbget") {
+      nzbget = {
+        gid = config.ids.gids.nzbget;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/nzbhydra2.nix b/nixpkgs/nixos/modules/services/misc/nzbhydra2.nix
new file mode 100644
index 000000000000..47d08135f57e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/nzbhydra2.nix
@@ -0,0 +1,78 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.services.nzbhydra2;
+
+in {
+  options = {
+    services.nzbhydra2 = {
+      enable = mkEnableOption (lib.mdDoc "NZBHydra2");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/nzbhydra2";
+        description = lib.mdDoc "The directory where NZBHydra2 stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc "Open ports in the firewall for the NZBHydra2 web interface.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nzbhydra2;
+        defaultText = literalExpression "pkgs.nzbhydra2";
+        description = lib.mdDoc "NZBHydra2 package to use.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules =
+      [ "d '${cfg.dataDir}' 0700 nzbhydra2 nzbhydra2 - -" ];
+
+    systemd.services.nzbhydra2 = {
+      description = "NZBHydra2";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = "nzbhydra2";
+        Group = "nzbhydra2";
+        ExecStart =
+          "${cfg.package}/bin/nzbhydra2 --nobrowser --datafolder '${cfg.dataDir}'";
+        Restart = "on-failure";
+        # Hardening
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        DevicePolicy = "closed";
+        ProtectSystem = "strict";
+        ReadWritePaths = cfg.dataDir;
+        ProtectHome = "read-only";
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies ="AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        LockPersonality = true;
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall { allowedTCPPorts = [ 5076 ]; };
+
+    users.users.nzbhydra2 = {
+      group = "nzbhydra2";
+      isSystemUser = true;
+    };
+
+    users.groups.nzbhydra2 = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/octoprint.nix b/nixpkgs/nixos/modules/services/misc/octoprint.nix
new file mode 100644
index 000000000000..43e0ce0c21d3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/octoprint.nix
@@ -0,0 +1,142 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.octoprint;
+
+  baseConfig = {
+    plugins.curalegacy.cura_engine = "${pkgs.curaengine_stable}/bin/CuraEngine";
+    server.host = cfg.host;
+    server.port = cfg.port;
+    webcam.ffmpeg = "${pkgs.ffmpeg.bin}/bin/ffmpeg";
+  };
+
+  fullConfig = recursiveUpdate cfg.extraConfig baseConfig;
+
+  cfgUpdate = pkgs.writeText "octoprint-config.yaml" (builtins.toJSON fullConfig);
+
+  pluginsEnv = package.python.withPackages (ps: [ ps.octoprint ] ++ (cfg.plugins ps));
+
+  package = pkgs.octoprint;
+
+in
+{
+  ##### interface
+
+  options = {
+
+    services.octoprint = {
+
+      enable = mkEnableOption (lib.mdDoc "OctoPrint, web interface for 3D printers");
+
+      host = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          Host to bind OctoPrint to.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5000;
+        description = lib.mdDoc ''
+          Port to bind OctoPrint to.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for OctoPrint.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "octoprint";
+        description = lib.mdDoc "User for the daemon.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "octoprint";
+        description = lib.mdDoc "Group for the daemon.";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/octoprint";
+        description = lib.mdDoc "State directory of the daemon.";
+      };
+
+      plugins = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = plugins: [ ];
+        defaultText = literalExpression "plugins: []";
+        example = literalExpression "plugins: with plugins; [ themeify stlviewer ]";
+        description = lib.mdDoc "Additional plugins to be used. Available plugins are passed through the plugins input.";
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = { };
+        description = lib.mdDoc "Extra options which are added to OctoPrint's YAML configuration file.";
+      };
+
+    };
+
+  };
+
+  ##### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == "octoprint") {
+      octoprint = {
+        group = cfg.group;
+        uid = config.ids.uids.octoprint;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "octoprint") {
+      octoprint.gid = config.ids.gids.octoprint;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' - ${cfg.user} ${cfg.group} - -"
+      # this will allow octoprint access to raspberry specific hardware to check for throttling
+      # read-only will not work: "VCHI initialization failed" error
+      "a /dev/vchiq - - - - u:octoprint:rw"
+    ];
+
+    systemd.services.octoprint = {
+      description = "OctoPrint, web interface for 3D printers";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ pluginsEnv ];
+
+      preStart = ''
+        if [ -e "${cfg.stateDir}/config.yaml" ]; then
+          ${pkgs.yaml-merge}/bin/yaml-merge "${cfg.stateDir}/config.yaml" "${cfgUpdate}" > "${cfg.stateDir}/config.yaml.tmp"
+          mv "${cfg.stateDir}/config.yaml.tmp" "${cfg.stateDir}/config.yaml"
+        else
+          cp "${cfgUpdate}" "${cfg.stateDir}/config.yaml"
+          chmod 600 "${cfg.stateDir}/config.yaml"
+        fi
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pluginsEnv}/bin/octoprint serve -b ${cfg.stateDir}";
+        User = cfg.user;
+        Group = cfg.group;
+        SupplementaryGroups = [
+          "dialout"
+        ];
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ombi.nix b/nixpkgs/nixos/modules/services/misc/ombi.nix
new file mode 100644
index 000000000000..8bf6a9b116ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ombi.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.services.ombi;
+
+in {
+  options = {
+    services.ombi = {
+      enable = mkEnableOption (lib.mdDoc ''
+        Ombi.
+        Optionally see <https://docs.ombi.app/info/reverse-proxy>
+        on how to set up a reverse proxy
+      '');
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/ombi";
+        description = lib.mdDoc "The directory where Ombi stores its data files.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5000;
+        description = lib.mdDoc "The port for the Ombi web interface.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the Ombi web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ombi";
+        description = lib.mdDoc "User account under which Ombi runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "ombi";
+        description = lib.mdDoc "Group under which Ombi runs.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.ombi = {
+      description = "Ombi";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.ombi}/bin/Ombi --storage '${cfg.dataDir}' --host 'http://*:${toString cfg.port}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+    users.users = mkIf (cfg.user == "ombi") {
+      ombi = {
+        isSystemUser = true;
+        group = cfg.group;
+        home = cfg.dataDir;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "ombi") { ombi = { }; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/osrm.nix b/nixpkgs/nixos/modules/services/misc/osrm.nix
new file mode 100644
index 000000000000..12c908a761e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/osrm.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.osrm;
+in
+
+{
+  options.services.osrm = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable the OSRM service.";
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc "IP address on which the web server will listen.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5000;
+      description = lib.mdDoc "Port on which the web server will run.";
+    };
+
+    threads = mkOption {
+      type = types.int;
+      default = 4;
+      description = lib.mdDoc "Number of threads to use.";
+    };
+
+    algorithm = mkOption {
+      type = types.enum [ "CH" "CoreCH" "MLD" ];
+      default = "MLD";
+      description = lib.mdDoc "Algorithm to use for the data. Must be one of CH, CoreCH, MLD";
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--max-table-size 1000" "--max-matching-size 1000" ];
+      description = lib.mdDoc "Extra command line arguments passed to osrm-routed";
+    };
+
+    dataFile = mkOption {
+      type = types.path;
+      example = "/var/lib/osrm/berlin-latest.osrm";
+      description = lib.mdDoc "Data file location";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.osrm = {
+      group = config.users.users.osrm.name;
+      description = "OSRM user";
+      createHome = false;
+      isSystemUser = true;
+    };
+
+    users.groups.osrm = { };
+
+    systemd.services.osrm = {
+      description = "OSRM service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = config.users.users.osrm.name;
+        ExecStart = ''
+          ${pkgs.osrm-backend}/bin/osrm-routed \
+            --ip ${cfg.address} \
+            --port ${toString cfg.port} \
+            --threads ${toString cfg.threads} \
+            --algorithm ${cfg.algorithm} \
+            ${toString cfg.extraFlags} \
+            ${cfg.dataFile}
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/owncast.nix b/nixpkgs/nixos/modules/services/misc/owncast.nix
new file mode 100644
index 000000000000..01fe34cf50fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/owncast.nix
@@ -0,0 +1,98 @@
+{ lib, pkgs, config, ... }:
+with lib;
+let cfg = config.services.owncast;
+in {
+
+  options.services.owncast = {
+
+    enable = mkEnableOption (lib.mdDoc "owncast");
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/owncast";
+      description = lib.mdDoc ''
+        The directory where owncast stores its data files. If left as the default value this directory will automatically be created before the owncast server starts, otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership and permissions.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open the appropriate ports in the firewall for owncast.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "owncast";
+      description = lib.mdDoc "User account under which owncast runs.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "owncast";
+      description = lib.mdDoc "Group under which owncast runs.";
+    };
+
+    listen = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      example = "0.0.0.0";
+      description = lib.mdDoc "The IP address to bind the owncast web server to.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        TCP port where owncast web-gui listens.
+      '';
+    };
+
+    rtmp-port = mkOption {
+      type = types.port;
+      default = 1935;
+      description = lib.mdDoc ''
+        TCP port where owncast rtmp service listens.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.owncast = {
+      description = "A self-hosted live video and web chat server";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = mkMerge [
+        {
+          User = cfg.user;
+          Group = cfg.group;
+          WorkingDirectory = cfg.dataDir;
+          ExecStart = "${pkgs.owncast}/bin/owncast -webserverport ${toString cfg.port} -rtmpport ${toString cfg.rtmp-port} -webserverip ${cfg.listen}";
+          Restart = "on-failure";
+        }
+        (mkIf (cfg.dataDir == "/var/lib/owncast") {
+          StateDirectory = "owncast";
+        })
+      ];
+    };
+
+    users.users = mkIf (cfg.user == "owncast") {
+      owncast = {
+        isSystemUser = true;
+        group = cfg.group;
+        description = "owncast system user";
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "owncast") { owncast = { }; };
+
+    networking.firewall =
+      mkIf cfg.openFirewall { allowedTCPPorts = [ cfg.rtmp-port ] ++ optional (cfg.listen != "127.0.0.1") cfg.port; };
+
+  };
+  meta = { maintainers = with lib.maintainers; [ MayNiklas ]; };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/packagekit.nix b/nixpkgs/nixos/modules/services/misc/packagekit.nix
new file mode 100644
index 000000000000..5a0d314d25cd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/packagekit.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.packagekit;
+
+  inherit (lib)
+    mkEnableOption mkOption mkIf mkRemovedOptionModule types
+    listToAttrs recursiveUpdate;
+
+  iniFmt = pkgs.formats.ini { };
+
+  confFiles = [
+    (iniFmt.generate "PackageKit.conf" (recursiveUpdate
+      {
+        Daemon = {
+          DefaultBackend = "nix";
+          KeepCache = false;
+        };
+      }
+      cfg.settings))
+
+    (iniFmt.generate "Vendor.conf" (recursiveUpdate
+      {
+        PackagesNotFound = rec {
+          DefaultUrl = "https://github.com/NixOS/nixpkgs";
+          CodecUrl = DefaultUrl;
+          HardwareUrl = DefaultUrl;
+          FontUrl = DefaultUrl;
+          MimeUrl = DefaultUrl;
+        };
+      }
+      cfg.vendorSettings))
+  ];
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "packagekit" "backend" ] "Always set to Nix.")
+  ];
+
+  options.services.packagekit = {
+    enable = mkEnableOption (lib.mdDoc ''
+      PackageKit, a cross-platform D-Bus abstraction layer for
+      installing software. Software utilizing PackageKit can install
+      software regardless of the package manager
+    '');
+
+    settings = mkOption {
+      type = iniFmt.type;
+      default = { };
+      description = lib.mdDoc "Additional settings passed straight through to PackageKit.conf";
+    };
+
+    vendorSettings = mkOption {
+      type = iniFmt.type;
+      default = { };
+      description = lib.mdDoc "Additional settings passed straight through to Vendor.conf";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.dbus.packages = with pkgs; [ packagekit ];
+
+    environment.systemPackages = with pkgs; [ packagekit ];
+
+    systemd.packages = with pkgs; [ packagekit ];
+
+    environment.etc = listToAttrs (map
+      (e:
+        lib.nameValuePair "PackageKit/${e.name}" { source = e; })
+      confFiles);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/paperless.nix b/nixpkgs/nixos/modules/services/misc/paperless.nix
new file mode 100644
index 000000000000..1e0a8d0f928e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/paperless.nix
@@ -0,0 +1,387 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.paperless;
+  pkg = cfg.package;
+
+  defaultUser = "paperless";
+  nltkDir = "/var/cache/paperless/nltk";
+  defaultFont = "${pkgs.liberation_ttf}/share/fonts/truetype/LiberationSerif-Regular.ttf";
+
+  # Don't start a redis instance if the user sets a custom redis connection
+  enableRedis = !hasAttr "PAPERLESS_REDIS" cfg.extraConfig;
+  redisServer = config.services.redis.servers.paperless;
+
+  env = {
+    PAPERLESS_DATA_DIR = cfg.dataDir;
+    PAPERLESS_MEDIA_ROOT = cfg.mediaDir;
+    PAPERLESS_CONSUMPTION_DIR = cfg.consumptionDir;
+    PAPERLESS_NLTK_DIR = nltkDir;
+    PAPERLESS_THUMBNAIL_FONT_NAME = defaultFont;
+    GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
+  } // optionalAttrs (config.time.timeZone != null) {
+    PAPERLESS_TIME_ZONE = config.time.timeZone;
+  } // optionalAttrs enableRedis {
+    PAPERLESS_REDIS = "unix://${redisServer.unixSocket}";
+  } // (
+    lib.mapAttrs (_: toString) cfg.extraConfig
+  );
+
+  manage = pkgs.writeShellScript "manage" ''
+    set -o allexport # Export the following env vars
+    ${lib.toShellVars env}
+    exec ${pkg}/bin/paperless-ngx "$@"
+  '';
+
+  # Secure the services
+  defaultServiceConfig = {
+    ReadWritePaths = [
+      cfg.consumptionDir
+      cfg.dataDir
+      cfg.mediaDir
+    ];
+    CacheDirectory = "paperless";
+    CapabilityBoundingSet = "";
+    # ProtectClock adds DeviceAllow=char-rtc r
+    DeviceAllow = "";
+    LockPersonality = true;
+    MemoryDenyWriteExecute = true;
+    NoNewPrivileges = true;
+    PrivateDevices = true;
+    PrivateMounts = true;
+    PrivateNetwork = true;
+    PrivateTmp = true;
+    PrivateUsers = true;
+    ProtectClock = true;
+    # Breaks if the home dir of the user is in /home
+    # ProtectHome = true;
+    ProtectHostname = true;
+    ProtectSystem = "strict";
+    ProtectControlGroups = true;
+    ProtectKernelLogs = true;
+    ProtectKernelModules = true;
+    ProtectKernelTunables = true;
+    ProtectProc = "invisible";
+    # Don't restrict ProcSubset because django-q requires read access to /proc/stat
+    # to query CPU and memory information.
+    # Note that /proc only contains processes of user `paperless`, so this is safe.
+    # ProcSubset = "pid";
+    RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+    RestrictNamespaces = true;
+    RestrictRealtime = true;
+    RestrictSUIDSGID = true;
+    SupplementaryGroups = optional enableRedis redisServer.user;
+    SystemCallArchitectures = "native";
+    SystemCallFilter = [ "@system-service" "~@privileged @setuid @keyring" ];
+    UMask = "0066";
+  };
+in
+{
+  meta.maintainers = with maintainers; [ erikarvstedt Flakebi leona ];
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "paperless-ng" ] [ "services" "paperless" ])
+  ];
+
+  options.services.paperless = {
+    enable = mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable Paperless.
+
+        When started, the Paperless database is automatically created if it doesn't
+        exist and updated if the Paperless package has changed.
+        Both tasks are achieved by running a Django migration.
+
+        A script to manage the Paperless instance (by wrapping Django's manage.py) is linked to
+        `''${dataDir}/paperless-manage`.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/paperless";
+      description = lib.mdDoc "Directory to store the Paperless data.";
+    };
+
+    mediaDir = mkOption {
+      type = types.str;
+      default = "${cfg.dataDir}/media";
+      defaultText = literalExpression ''"''${dataDir}/media"'';
+      description = lib.mdDoc "Directory to store the Paperless documents.";
+    };
+
+    consumptionDir = mkOption {
+      type = types.str;
+      default = "${cfg.dataDir}/consume";
+      defaultText = literalExpression ''"''${dataDir}/consume"'';
+      description = lib.mdDoc "Directory from which new documents are imported.";
+    };
+
+    consumptionDirIsPublic = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Whether all users can write to the consumption dir.";
+    };
+
+    passwordFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/keys/paperless-password";
+      description = lib.mdDoc ''
+        A file containing the superuser password.
+
+        A superuser is required to access the web interface.
+        If unset, you can create a superuser manually by running
+        `''${dataDir}/paperless-manage createsuperuser`.
+
+        The default superuser name is `admin`. To change it, set
+        option {option}`extraConfig.PAPERLESS_ADMIN_USER`.
+        WARNING: When changing the superuser name after the initial setup, the old superuser
+        will continue to exist.
+
+        To disable login for the web interface, set the following:
+        `extraConfig.PAPERLESS_AUTO_LOGIN_USERNAME = "admin";`.
+        WARNING: Only use this on a trusted system without internet access to Paperless.
+      '';
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Web interface address.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 28981;
+      description = lib.mdDoc "Web interface port.";
+    };
+
+    # FIXME this should become an RFC42-style settings attr
+    extraConfig = mkOption {
+      type = types.attrs;
+      default = { };
+      description = lib.mdDoc ''
+        Extra paperless config options.
+
+        See [the documentation](https://docs.paperless-ngx.com/configuration/)
+        for available options.
+
+        Note that some options such as `PAPERLESS_CONSUMER_IGNORE_PATTERN` expect JSON values. Use `builtins.toJSON` to ensure proper quoting.
+      '';
+      example = literalExpression ''
+        {
+          PAPERLESS_OCR_LANGUAGE = "deu+eng";
+
+          PAPERLESS_DBHOST = "/run/postgresql";
+
+          PAPERLESS_CONSUMER_IGNORE_PATTERN = builtins.toJSON [ ".DS_STORE/*" "desktop.ini" ];
+
+          PAPERLESS_OCR_USER_ARGS = builtins.toJSON {
+            optimize = 1;
+            pdfa_image_compression = "lossless";
+          };
+        };
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = lib.mdDoc "User under which Paperless runs.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.paperless-ngx;
+      defaultText = literalExpression "pkgs.paperless-ngx";
+      description = lib.mdDoc "The Paperless package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.redis.servers.paperless.enable = mkIf enableRedis true;
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+      "d '${cfg.mediaDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+      (if cfg.consumptionDirIsPublic then
+        "d '${cfg.consumptionDir}' 777 - - - -"
+      else
+        "d '${cfg.consumptionDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+      )
+    ];
+
+    systemd.services.paperless-scheduler = {
+      description = "Paperless Celery Beat";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "paperless-consumer.service" "paperless-web.service" "paperless-task-queue.service" ];
+      serviceConfig = defaultServiceConfig // {
+        User = cfg.user;
+        ExecStart = "${pkg}/bin/celery --app paperless beat --loglevel INFO";
+        Restart = "on-failure";
+      };
+      environment = env;
+
+      preStart = ''
+        ln -sf ${manage} ${cfg.dataDir}/paperless-manage
+
+        # Auto-migrate on first run or if the package has changed
+        versionFile="${cfg.dataDir}/src-version"
+        version=$(cat "$versionFile" 2>/dev/null || echo 0)
+
+        if [[ $version != ${pkg.version} ]]; then
+          ${pkg}/bin/paperless-ngx migrate
+
+          # Parse old version string format for backwards compatibility
+          version=$(echo "$version" | grep -ohP '[^-]+$')
+
+          versionLessThan() {
+            target=$1
+            [[ $({ echo "$version"; echo "$target"; } | sort -V | head -1) != "$target" ]]
+          }
+
+          if versionLessThan 1.12.0; then
+            # Reindex documents as mentioned in https://github.com/paperless-ngx/paperless-ngx/releases/tag/v1.12.1
+            echo "Reindexing documents, to allow searching old comments. Required after the 1.12.x upgrade."
+            ${pkg}/bin/paperless-ngx document_index reindex
+          fi
+
+          echo ${pkg.version} > "$versionFile"
+        fi
+      ''
+      + optionalString (cfg.passwordFile != null) ''
+        export PAPERLESS_ADMIN_USER="''${PAPERLESS_ADMIN_USER:-admin}"
+        export PAPERLESS_ADMIN_PASSWORD=$(cat "${cfg.dataDir}/superuser-password")
+        superuserState="$PAPERLESS_ADMIN_USER:$PAPERLESS_ADMIN_PASSWORD"
+        superuserStateFile="${cfg.dataDir}/superuser-state"
+
+        if [[ $(cat "$superuserStateFile" 2>/dev/null) != $superuserState ]]; then
+          ${pkg}/bin/paperless-ngx manage_superuser
+          echo "$superuserState" > "$superuserStateFile"
+        fi
+      '';
+    } // optionalAttrs enableRedis {
+      after = [ "redis-paperless.service" ];
+    };
+
+    systemd.services.paperless-task-queue = {
+      description = "Paperless Celery Workers";
+      after = [ "paperless-scheduler.service" ];
+      serviceConfig = defaultServiceConfig // {
+        User = cfg.user;
+        ExecStart = "${pkg}/bin/celery --app paperless worker --loglevel INFO";
+        Restart = "on-failure";
+        # The `mbind` syscall is needed for running the classifier.
+        SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "mbind" ];
+        # Needs to talk to mail server for automated import rules
+        PrivateNetwork = false;
+      };
+      environment = env;
+    };
+
+    # Reading the user-provided password file requires root access
+    systemd.services.paperless-copy-password = mkIf (cfg.passwordFile != null) {
+      requiredBy = [ "paperless-scheduler.service" ];
+      before = [ "paperless-scheduler.service" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.coreutils}/bin/install --mode 600 --owner '${cfg.user}' --compare \
+            '${cfg.passwordFile}' '${cfg.dataDir}/superuser-password'
+        '';
+        Type = "oneshot";
+      };
+    };
+
+    # Download NLTK corpus data
+    systemd.services.paperless-download-nltk-data = {
+      wantedBy = [ "paperless-scheduler.service" ];
+      before = [ "paperless-scheduler.service" ];
+      after = [ "network-online.target" ];
+      serviceConfig = defaultServiceConfig // {
+        User = cfg.user;
+        Type = "oneshot";
+        # Enable internet access
+        PrivateNetwork = false;
+        ExecStart = let pythonWithNltk = pkg.python.withPackages (ps: [ ps.nltk ]); in ''
+          ${pythonWithNltk}/bin/python -m nltk.downloader -d '${nltkDir}' punkt snowball_data stopwords
+        '';
+      };
+    };
+
+    systemd.services.paperless-consumer = {
+      description = "Paperless document consumer";
+      # Bind to `paperless-scheduler` so that the consumer never runs
+      # during migrations
+      bindsTo = [ "paperless-scheduler.service" ];
+      after = [ "paperless-scheduler.service" ];
+      serviceConfig = defaultServiceConfig // {
+        User = cfg.user;
+        ExecStart = "${pkg}/bin/paperless-ngx document_consumer";
+        Restart = "on-failure";
+      };
+      environment = env;
+    };
+
+    systemd.services.paperless-web = {
+      description = "Paperless web server";
+      # Bind to `paperless-scheduler` so that the web server never runs
+      # during migrations
+      bindsTo = [ "paperless-scheduler.service" ];
+      after = [ "paperless-scheduler.service" ];
+      # Setup PAPERLESS_SECRET_KEY.
+      # If this environment variable is left unset, paperless-ngx defaults
+      # to a well-known value, which is insecure.
+      script = let
+        secretKeyFile = "${cfg.dataDir}/nixos-paperless-secret-key";
+      in ''
+        if [[ ! -f '${secretKeyFile}' ]]; then
+          (
+            umask 0377
+            tr -dc A-Za-z0-9 < /dev/urandom | head -c64 | ${pkgs.moreutils}/bin/sponge '${secretKeyFile}'
+          )
+        fi
+        export PAPERLESS_SECRET_KEY=$(cat '${secretKeyFile}')
+        if [[ ! $PAPERLESS_SECRET_KEY ]]; then
+          echo "PAPERLESS_SECRET_KEY is empty, refusing to start."
+          exit 1
+        fi
+        exec ${pkg.python.pkgs.gunicorn}/bin/gunicorn \
+          -c ${pkg}/lib/paperless-ngx/gunicorn.conf.py paperless.asgi:application
+      '';
+      serviceConfig = defaultServiceConfig // {
+        User = cfg.user;
+        Restart = "on-failure";
+
+        # gunicorn needs setuid, liblapack needs mbind
+        SystemCallFilter = defaultServiceConfig.SystemCallFilter ++ [ "@setuid mbind" ];
+        # Needs to serve web page
+        PrivateNetwork = false;
+      } // lib.optionalAttrs (cfg.port < 1024) {
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+      };
+      environment = env // {
+        PYTHONPATH = "${pkg.python.pkgs.makePythonPath pkg.propagatedBuildInputs}:${pkg}/lib/paperless-ngx/src";
+      };
+      # Allow the web interface to access the private /tmp directory of the server.
+      # This is required to support uploading files via the web interface.
+      unitConfig.JoinsNamespaceOf = "paperless-task-queue.service";
+    };
+
+    users = optionalAttrs (cfg.user == defaultUser) {
+      users.${defaultUser} = {
+        group = defaultUser;
+        uid = config.ids.uids.paperless;
+        home = cfg.dataDir;
+      };
+
+      groups.${defaultUser} = {
+        gid = config.ids.gids.paperless;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/parsoid.nix b/nixpkgs/nixos/modules/services/misc/parsoid.nix
new file mode 100644
index 000000000000..6f4a340c8a18
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/parsoid.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.parsoid;
+
+  parsoid = pkgs.nodePackages.parsoid;
+
+  confTree = {
+    worker_heartbeat_timeout = 300000;
+    logging = { level = "info"; };
+    services = [{
+      module = "lib/index.js";
+      entrypoint = "apiServiceWorker";
+      conf = {
+        mwApis = map (x: if isAttrs x then x else { uri = x; }) cfg.wikis;
+        serverInterface = cfg.interface;
+        serverPort = cfg.port;
+      };
+    }];
+  };
+
+  confFile = pkgs.writeText "config.yml" (builtins.toJSON (recursiveUpdate confTree cfg.extraConfig));
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "parsoid" "interwikis" ] "Use services.parsoid.wikis instead")
+  ];
+
+  ##### interface
+
+  options = {
+
+    services.parsoid = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Parsoid -- bidirectional
+          wikitext parser.
+        '';
+      };
+
+      wikis = mkOption {
+        type = types.listOf (types.either types.str types.attrs);
+        example = [ "http://localhost/api.php" ];
+        description = lib.mdDoc ''
+          Used MediaWiki API endpoints.
+        '';
+      };
+
+      workers = mkOption {
+        type = types.int;
+        default = 2;
+        description = lib.mdDoc ''
+          Number of Parsoid workers.
+        '';
+      };
+
+      interface = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          Interface to listen on.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8000;
+        description = lib.mdDoc ''
+          Port to listen on.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Extra configuration to add to parsoid configuration.
+        '';
+      };
+
+    };
+
+  };
+
+  ##### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.parsoid = {
+      description = "Bidirectional wikitext parser";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${parsoid}/lib/node_modules/parsoid/bin/server.js -c ${confFile} -n ${toString cfg.workers}";
+
+        DynamicUser = true;
+        User = "parsoid";
+        Group = "parsoid";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        #MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/persistent-evdev.nix b/nixpkgs/nixos/modules/services/misc/persistent-evdev.nix
new file mode 100644
index 000000000000..b1f367fec7fb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/persistent-evdev.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.persistent-evdev;
+  settingsFormat = pkgs.formats.json {};
+
+  configFile = settingsFormat.generate "persistent-evdev-config" {
+    cache = "/var/cache/persistent-evdev";
+    devices = lib.mapAttrs (virt: phys: "/dev/input/by-id/${phys}") cfg.devices;
+  };
+in
+{
+  options.services.persistent-evdev = {
+    enable = lib.mkEnableOption (lib.mdDoc "virtual input devices that persist even if the backing device is hotplugged");
+
+    devices = lib.mkOption {
+      default = {};
+      type = with lib.types; attrsOf str;
+      description = lib.mdDoc ''
+        A set of virtual proxy device labels with backing physical device ids.
+
+        Physical devices should already exist in {file}`/dev/input/by-id/`.
+        Proxy devices will be automatically given a `uinput-` prefix.
+
+        See the [project page](https://github.com/aiberia/persistent-evdev#example-usage-with-libvirt)
+        for example configuration of virtual devices with libvirt
+        and remember to add `uinput-*` devices to the qemu
+        `cgroup_device_acl` list (see [](#opt-virtualisation.libvirtd.qemu.verbatimConfig)).
+      '';
+      example = lib.literalExpression ''
+        {
+          persist-mouse0 = "usb-Logitech_G403_Prodigy_Gaming_Mouse_078738533531-event-if01";
+          persist-mouse1 = "usb-Logitech_G403_Prodigy_Gaming_Mouse_078738533531-event-mouse";
+          persist-mouse2 = "usb-Logitech_G403_Prodigy_Gaming_Mouse_078738533531-if01-event-kbd";
+          persist-keyboard0 = "usb-Microsoft_Natural®_Ergonomic_Keyboard_4000-event-kbd";
+          persist-keyboard1 = "usb-Microsoft_Natural®_Ergonomic_Keyboard_4000-if01-event-kbd";
+        }
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services.persistent-evdev = {
+      documentation = [ "https://github.com/aiberia/persistent-evdev/blob/master/README.md" ];
+      description = "Persistent evdev proxy";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart = "on-failure";
+        ExecStart = "${pkgs.persistent-evdev}/bin/persistent-evdev.py ${configFile}";
+        CacheDirectory = "persistent-evdev";
+      };
+    };
+
+    services.udev.packages = [ pkgs.persistent-evdev ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ lodi ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/pinnwand.nix b/nixpkgs/nixos/modules/services/misc/pinnwand.nix
new file mode 100644
index 000000000000..5fca9f4125a8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/pinnwand.nix
@@ -0,0 +1,122 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pinnwand;
+
+  format = pkgs.formats.toml {};
+  configFile = format.generate "pinnwand.toml" cfg.settings;
+in
+{
+  options.services.pinnwand = {
+    enable = mkEnableOption (lib.mdDoc "Pinnwand");
+
+    port = mkOption {
+      type = types.port;
+      description = lib.mdDoc "The port to listen on.";
+      default = 8000;
+    };
+
+    settings = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Your {file}`pinnwand.toml` as a Nix attribute set. Look up
+        possible options in the [documentation](https://pinnwand.readthedocs.io/en/v${pkgs.pinnwand.version}/configuration.html).
+      '';
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          database_uri = mkOption {
+            type = types.str;
+            default = "sqlite:////var/lib/pinnwand/pinnwand.db";
+            example = "sqlite:///:memory";
+            description = lib.mdDoc ''
+              Database URI compatible with [SQLAlchemyhttps://docs.sqlalchemy.org/en/14/core/engines.html#database-urls].
+
+              Additional packages may need to be introduced into the environment for certain databases.
+            '';
+          };
+
+          paste_size = mkOption {
+            type = types.ints.positive;
+            default = 262144;
+            example = 524288;
+            description = lib.mdDoc ''
+              Maximum size of a paste in bytes.
+            '';
+          };
+          paste_help = mkOption {
+            type = types.str;
+            default = ''
+              <p>Welcome to pinnwand, this site is a pastebin. It allows you to share code with others. If you write code in the text area below and press the paste button you will be given a link you can share with others so they can view your code as well.</p><p>People with the link can view your pasted code, only you can remove your paste and it expires automatically. Note that anyone could guess the URI to your paste so don't rely on it being private.</p>
+              '';
+            description = lib.mdDoc ''
+              Raw HTML help text shown in the header area.
+            '';
+          };
+          footer = mkOption {
+            type = types.str;
+            default = ''
+              View <a href="//github.com/supakeen/pinnwand" target="_BLANK">source code</a>, the <a href="/removal">removal</a> or <a href="/expiry">expiry</a> stories, or read the <a href="/about">about</a> page.
+            '';
+            description = lib.mdDoc ''
+              The footer in raw HTML.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.pinnwand = {
+      description = "Pinnwannd HTTP Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      unitConfig.Documentation = "https://pinnwand.readthedocs.io/en/latest/";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.pinnwand}/bin/pinnwand --configuration-path ${configFile} http --port ${toString cfg.port}";
+        User = "pinnwand";
+        DynamicUser = true;
+
+        StateDirectory = "pinnwand";
+        StateDirectoryMode = "0700";
+
+        AmbientCapabilities = [];
+        CapabilityBoundingSet = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [
+          "AF_UNIX"
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/plex.nix b/nixpkgs/nixos/modules/services/misc/plex.nix
new file mode 100644
index 000000000000..7fc76028c02a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/plex.nix
@@ -0,0 +1,181 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.plex;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "plex" "managePlugins" ] "Please omit or define the option: `services.plex.extraPlugins' instead.")
+  ];
+
+  options = {
+    services.plex = {
+      enable = mkEnableOption (lib.mdDoc "Plex Media Server");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/plex";
+        description = lib.mdDoc ''
+          The directory where Plex stores its data files.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the media server.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "plex";
+        description = lib.mdDoc ''
+          User account under which Plex runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "plex";
+        description = lib.mdDoc ''
+          Group under which Plex runs.
+        '';
+      };
+
+      extraPlugins = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of paths to extra plugin bundles to install in Plex's plugin
+          directory. Every time the systemd unit for Plex starts up, all of the
+          symlinks in Plex's plugin directory will be cleared and this module
+          will symlink all of the paths specified here to that directory.
+        '';
+        example = literalExpression ''
+          [
+            (builtins.path {
+              name = "Audnexus.bundle";
+              path = pkgs.fetchFromGitHub {
+                owner = "djdembeck";
+                repo = "Audnexus.bundle";
+                rev = "v0.2.8";
+                sha256 = "sha256-IWOSz3vYL7zhdHan468xNc6C/eQ2C2BukQlaJNLXh7E=";
+              };
+            })
+          ]
+        '';
+      };
+
+      extraScanners = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of paths to extra scanners to install in Plex's scanners
+          directory.
+
+          Every time the systemd unit for Plex starts up, all of the symlinks
+          in Plex's scanners directory will be cleared and this module will
+          symlink all of the paths specified here to that directory.
+        '';
+        example = literalExpression ''
+          [
+            (fetchFromGitHub {
+              owner = "ZeroQI";
+              repo = "Absolute-Series-Scanner";
+              rev = "773a39f502a1204b0b0255903cee4ed02c46fde0";
+              sha256 = "4l+vpiDdC8L/EeJowUgYyB3JPNTZ1sauN8liFAcK+PY=";
+            })
+          ]
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.plex;
+        defaultText = literalExpression "pkgs.plex";
+        description = lib.mdDoc ''
+          The Plex package to use. Plex subscribers may wish to use their own
+          package here, pointing to subscriber-only server versions.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Most of this is just copied from the RPM package's systemd service file.
+    systemd.services.plex = {
+      description = "Plex Media Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+
+        # Run the pre-start script with full permissions (the "!" prefix) so it
+        # can create the data directory if necessary.
+        ExecStartPre = let
+          preStartScript = pkgs.writeScript "plex-run-prestart" ''
+            #!${pkgs.bash}/bin/bash
+
+            # Create data directory if it doesn't exist
+            if ! test -d "$PLEX_DATADIR"; then
+              echo "Creating initial Plex data directory in: $PLEX_DATADIR"
+              install -d -m 0755 -o "${cfg.user}" -g "${cfg.group}" "$PLEX_DATADIR"
+            fi
+         '';
+        in
+          "!${preStartScript}";
+
+        ExecStart = "${cfg.package}/bin/plexmediaserver";
+        KillSignal = "SIGQUIT";
+        PIDFile = "${cfg.dataDir}/Plex Media Server/plexmediaserver.pid";
+        Restart = "on-failure";
+      };
+
+      environment = {
+        # Configuration for our FHS userenv script
+        PLEX_DATADIR=cfg.dataDir;
+        PLEX_PLUGINS=concatMapStringsSep ":" builtins.toString cfg.extraPlugins;
+        PLEX_SCANNERS=concatMapStringsSep ":" builtins.toString cfg.extraScanners;
+
+        # The following variables should be set by the FHS userenv script:
+        #   PLEX_MEDIA_SERVER_APPLICATION_SUPPORT_DIR
+        #   PLEX_MEDIA_SERVER_HOME
+
+        # Allow access to GPU acceleration; the Plex LD_LIBRARY_PATH is added
+        # by the FHS userenv script.
+        LD_LIBRARY_PATH="/run/opengl-driver/lib";
+
+        PLEX_MEDIA_SERVER_MAX_PLUGIN_PROCS="6";
+        PLEX_MEDIA_SERVER_TMPDIR="/tmp";
+        PLEX_MEDIA_SERVER_USE_SYSLOG="true";
+        LC_ALL="en_US.UTF-8";
+        LANG="en_US.UTF-8";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 32400 3005 8324 32469 ];
+      allowedUDPPorts = [ 1900 5353 32410 32412 32413 32414 ];
+    };
+
+    users.users = mkIf (cfg.user == "plex") {
+      plex = {
+        group = cfg.group;
+        uid = config.ids.uids.plex;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "plex") {
+      plex = {
+        gid = config.ids.gids.plex;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/plikd.nix b/nixpkgs/nixos/modules/services/misc/plikd.nix
new file mode 100644
index 000000000000..9b0825bf40c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/plikd.nix
@@ -0,0 +1,82 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.plikd;
+
+  format = pkgs.formats.toml {};
+  plikdCfg = format.generate "plikd.cfg" cfg.settings;
+in
+{
+  options = {
+    services.plikd = {
+      enable = mkEnableOption (lib.mdDoc "the plikd server");
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the plikd.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = {};
+        description = lib.mdDoc ''
+          Configuration for plikd, see <https://github.com/root-gg/plik/blob/master/server/plikd.cfg>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.plikd.settings = mapAttrs (name: mkDefault) {
+      ListenPort = 8080;
+      ListenAddress = "localhost";
+      DataBackend = "file";
+      DataBackendConfig = {
+         Directory = "/var/lib/plikd";
+      };
+      MetadataBackendConfig = {
+        Driver = "sqlite3";
+        ConnectionString = "/var/lib/plikd/plik.db";
+      };
+    };
+
+    systemd.services.plikd = {
+      description = "Plikd file sharing server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.plikd}/bin/plikd --config ${plikdCfg}";
+        Restart = "on-failure";
+        StateDirectory = "plikd";
+        LogsDirectory = "plikd";
+        DynamicUser = true;
+
+        # Basic hardening
+        NoNewPrivileges = "yes";
+        PrivateTmp = "yes";
+        PrivateDevices = "yes";
+        DevicePolicy = "closed";
+        ProtectSystem = "strict";
+        ProtectHome = "read-only";
+        ProtectControlGroups = "yes";
+        ProtectKernelModules = "yes";
+        ProtectKernelTunables = "yes";
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = "yes";
+        RestrictRealtime = "yes";
+        RestrictSUIDSGID = "yes";
+        MemoryDenyWriteExecute = "yes";
+        LockPersonality = "yes";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.settings.ListenPort ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/podgrab.nix b/nixpkgs/nixos/modules/services/misc/podgrab.nix
new file mode 100644
index 000000000000..c596122fd31c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/podgrab.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.podgrab;
+in
+{
+  options.services.podgrab = with lib; {
+    enable = mkEnableOption (lib.mdDoc "Podgrab, a self-hosted podcast manager");
+
+    passwordFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "/run/secrets/password.env";
+      description = lib.mdDoc ''
+        The path to a file containing the PASSWORD environment variable
+        definition for Podgrab's authentication.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      example = 4242;
+      description = lib.mdDoc "The port on which Podgrab will listen for incoming HTTP traffic.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.podgrab = {
+      description = "Podgrab podcast manager";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        CONFIG = "/var/lib/podgrab/config";
+        DATA = "/var/lib/podgrab/data";
+        GIN_MODE = "release";
+        PORT = toString cfg.port;
+      };
+      serviceConfig = {
+        DynamicUser = true;
+        EnvironmentFile = lib.optionals (cfg.passwordFile != null) [
+          cfg.passwordFile
+        ];
+        ExecStart = "${pkgs.podgrab}/bin/podgrab";
+        WorkingDirectory = "${pkgs.podgrab}/share";
+        StateDirectory = [ "podgrab/config" "podgrab/data" ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ambroisie ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/polaris.nix b/nixpkgs/nixos/modules/services/misc/polaris.nix
new file mode 100644
index 000000000000..70f097f02840
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/polaris.nix
@@ -0,0 +1,151 @@
+{ config
+, pkgs
+, lib
+, ...}:
+
+with lib;
+let
+  cfg = config.services.polaris;
+  settingsFormat = pkgs.formats.toml {};
+in
+{
+  options = {
+    services.polaris = {
+      enable = mkEnableOption (lib.mdDoc "Polaris Music Server");
+
+      package = mkPackageOptionMD pkgs "polaris" { };
+
+      user = mkOption {
+        type = types.str;
+        default = "polaris";
+        description = lib.mdDoc "User account under which Polaris runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "polaris";
+        description = lib.mdDoc "Group under which Polaris is run.";
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Polaris' auxiliary groups.";
+        example = literalExpression ''["media" "music"]'';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5050;
+        description = lib.mdDoc ''
+          The port which the Polaris REST api and web UI should listen to.
+          Note: polaris is hardcoded to listen to the hostname "0.0.0.0".
+        '';
+      };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = {};
+        description = lib.mdDoc ''
+          Contents for the TOML Polaris config, applied each start.
+          Although poorly documented, an example may be found here:
+          [test-config.toml](https://github.com/agersant/polaris/blob/374d0ca56fc0a466d797a4b252e2078607476797/test-data/config.toml)
+        '';
+        example = literalExpression ''
+          {
+            settings.reindex_every_n_seconds = 7*24*60*60; # weekly, default is 1800
+            settings.album_art_pattern =
+              "(cover|front|folder)\.(jpeg|jpg|png|bmp|gif)";
+            mount_dirs = [
+              {
+                name = "NAS";
+                source = "/mnt/nas/music";
+              }
+              {
+                name = "Local";
+                source = "/home/my_user/Music";
+              }
+            ];
+          }
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the configured port in the firewall.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.polaris = {
+      description = "Polaris Music Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = rec {
+        User = cfg.user;
+        Group = cfg.group;
+        DynamicUser = true;
+        SupplementaryGroups = cfg.extraGroups;
+        StateDirectory = "polaris";
+        CacheDirectory = "polaris";
+        ExecStart = escapeShellArgs ([
+          "${cfg.package}/bin/polaris"
+          "--foreground"
+          "--port" cfg.port
+          "--database" "/var/lib/${StateDirectory}/db.sqlite"
+          "--cache" "/var/cache/${CacheDirectory}"
+        ] ++ optionals (cfg.settings != {}) [
+          "--config" (settingsFormat.generate "polaris-config.toml" cfg.settings)
+        ]);
+        Restart = "on-failure";
+
+        # Security options:
+
+        #NoNewPrivileges = true; # implied by DynamicUser
+        #RemoveIPC = true; # implied by DynamicUser
+
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+
+        DeviceAllow = "";
+
+        LockPersonality = true;
+
+        #PrivateTmp = true; # implied by DynamicUser
+        PrivateDevices = true;
+        PrivateUsers = true;
+
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictRealtime = true;
+        #RestrictSUIDSGID = true; # implied by DynamicUser
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+        ];
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ pbsds ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/portunus.nix b/nixpkgs/nixos/modules/services/misc/portunus.nix
new file mode 100644
index 000000000000..d18881986970
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/portunus.nix
@@ -0,0 +1,289 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.portunus;
+
+in
+{
+  options.services.portunus = {
+    enable = mkEnableOption (lib.mdDoc "Portunus, a self-contained user/group management and authentication service for LDAP");
+
+    domain = mkOption {
+      type = types.str;
+      example = "sso.example.com";
+      description = lib.mdDoc "Subdomain which gets reverse proxied to Portunus webserver.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        Port where the Portunus webserver should listen on.
+
+        This must be put behind a TLS-capable reverse proxy because Portunus only listens on localhost.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.portunus;
+      defaultText = lib.literalExpression "pkgs.portunus";
+      description = lib.mdDoc "The Portunus package to use.";
+    };
+
+    seedPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a portunus seed file in json format.
+        See <https://github.com/majewsky/portunus#seeding-users-and-groups-from-static-configuration> for available options.
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/lib/portunus";
+      description = lib.mdDoc "Path where Portunus stores its state.";
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "portunus";
+      description = lib.mdDoc "User account under which Portunus runs its webserver.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "portunus";
+      description = lib.mdDoc "Group account under which Portunus runs its webserver.";
+    };
+
+    dex = {
+      enable = mkEnableOption (lib.mdDoc ''
+        Dex ldap connector.
+
+        To activate dex, first a search user must be created in the Portunus web ui
+        and then the password must to be set as the `DEX_SEARCH_USER_PASSWORD` environment variable
+        in the [](#opt-services.dex.environmentFile) setting.
+      '');
+
+      oidcClients = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            callbackURL = mkOption {
+              type = types.str;
+              description = lib.mdDoc "URL where the OIDC client should redirect";
+            };
+            id = mkOption {
+              type = types.str;
+              description = lib.mdDoc "ID of the OIDC client";
+            };
+          };
+        });
+        default = [ ];
+        example = [
+          {
+            callbackURL = "https://example.com/client/oidc/callback";
+            id = "service";
+          }
+        ];
+        description = lib.mdDoc ''
+          List of OIDC clients.
+
+          The OIDC secret must be set as the `DEX_CLIENT_''${id}` environment variable
+          in the [](#opt-services.dex.environmentFile) setting.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5556;
+        description = lib.mdDoc "Port where dex should listen on.";
+      };
+    };
+
+    ldap = {
+      package = mkOption {
+        type = types.package;
+        # needs openldap built with a libxcrypt that support crypt sha256 until https://github.com/majewsky/portunus/issues/2 is solved
+        default = pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; };
+        defaultText = lib.literalExpression "pkgs.openldap.override { libxcrypt = pkgs.libxcrypt-legacy; }";
+        description = lib.mdDoc "The OpenLDAP package to use.";
+      };
+
+      searchUserName = mkOption {
+        type = types.str;
+        default = "";
+        example = "admin";
+        description = lib.mdDoc ''
+          The login name of the search user.
+          This user account must be configured in Portunus either manually or via seeding.
+        '';
+      };
+
+      suffix = mkOption {
+        type = types.str;
+        example = "dc=example,dc=org";
+        description = lib.mdDoc ''
+          The DN of the topmost entry in your LDAP directory.
+          Please refer to the Portunus documentation for more information on how this impacts the structure of the LDAP directory.
+        '';
+      };
+
+      tls = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable LDAPS protocol.
+          This also adds two entries to the `/etc/hosts` file to point [](#opt-services.portunus.domain) to localhost,
+          so that CLIs and programs can use ldaps protocol and verify the certificate without opening the firewall port for the protocol.
+
+          This requires a TLS certificate for [](#opt-services.portunus.domain) to be configured via [](#opt-security.acme.certs).
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "openldap";
+        description = lib.mdDoc "User account under which Portunus runs its LDAP server.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "openldap";
+        description = lib.mdDoc "Group account under which Portunus runs its LDAP server.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.dex.enable -> cfg.ldap.searchUserName != "";
+        message = "services.portunus.dex.enable requires services.portunus.ldap.searchUserName to be set.";
+      }
+    ];
+
+    # add ldapsearch(1) etc. to interactive shells
+    environment.systemPackages = [ cfg.ldap.package ];
+
+    # allow connecting via ldaps /w certificate without opening ports
+    networking.hosts = mkIf cfg.ldap.tls {
+      "::1" = [ cfg.domain ];
+      "127.0.0.1" = [ cfg.domain ];
+    };
+
+    services.dex = mkIf cfg.dex.enable {
+      enable = true;
+      settings = {
+        issuer = "https://${cfg.domain}/dex";
+        web.http = "127.0.0.1:${toString cfg.dex.port}";
+        storage = {
+          type = "sqlite3";
+          config.file = "/var/lib/dex/dex.db";
+        };
+        enablePasswordDB = false;
+        connectors = [{
+          type = "ldap";
+          id = "ldap";
+          name = "LDAP";
+          config = {
+            host = "${cfg.domain}:636";
+            bindDN = "uid=${cfg.ldap.searchUserName},ou=users,${cfg.ldap.suffix}";
+            bindPW = "$DEX_SEARCH_USER_PASSWORD";
+            userSearch = {
+              baseDN = "ou=users,${cfg.ldap.suffix}";
+              filter = "(objectclass=person)";
+              username = "uid";
+              idAttr = "uid";
+              emailAttr = "mail";
+              nameAttr = "cn";
+              preferredUsernameAttr = "uid";
+            };
+            groupSearch = {
+              baseDN = "ou=groups,${cfg.ldap.suffix}";
+              filter = "(objectclass=groupOfNames)";
+              nameAttr = "cn";
+              userMatchers = [{ userAttr = "DN"; groupAttr = "member"; }];
+            };
+          };
+        }];
+
+        staticClients = forEach cfg.dex.oidcClients (client: {
+          inherit (client) id;
+          redirectURIs = [ client.callbackURL ];
+          name = "OIDC for ${client.id}";
+          secretEnv = "DEX_CLIENT_${client.id}";
+        });
+      };
+    };
+
+    systemd.services = {
+      dex.serviceConfig = mkIf cfg.dex.enable {
+        # `dex.service` is super locked down out of the box, but we need some
+        # place to write the SQLite database. This creates $STATE_DIRECTORY below
+        # /var/lib/private because DynamicUser=true, but it gets symlinked into
+        # /var/lib/dex inside the unit
+        StateDirectory = "dex";
+      };
+
+      portunus = {
+        description = "Self-contained authentication service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig.ExecStart = "${cfg.package.out}/bin/portunus-orchestrator";
+        environment = {
+          PORTUNUS_LDAP_SUFFIX = cfg.ldap.suffix;
+          PORTUNUS_SERVER_BINARY = "${cfg.package}/bin/portunus-server";
+          PORTUNUS_SERVER_GROUP = cfg.group;
+          PORTUNUS_SERVER_USER = cfg.user;
+          PORTUNUS_SERVER_HTTP_LISTEN = "127.0.0.1:${toString cfg.port}";
+          PORTUNUS_SERVER_STATE_DIR = cfg.stateDir;
+          PORTUNUS_SLAPD_BINARY = "${cfg.ldap.package}/libexec/slapd";
+          PORTUNUS_SLAPD_GROUP = cfg.ldap.group;
+          PORTUNUS_SLAPD_USER = cfg.ldap.user;
+          PORTUNUS_SLAPD_SCHEMA_DIR = "${cfg.ldap.package}/etc/schema";
+        } // (optionalAttrs (cfg.seedPath != null) ({
+          PORTUNUS_SEED_PATH = cfg.seedPath;
+        })) // (optionalAttrs cfg.ldap.tls (
+          let
+            acmeDirectory = config.security.acme.certs."${cfg.domain}".directory;
+          in
+          {
+            PORTUNUS_SLAPD_TLS_CA_CERTIFICATE = "/etc/ssl/certs/ca-certificates.crt";
+            PORTUNUS_SLAPD_TLS_CERTIFICATE = "${acmeDirectory}/cert.pem";
+            PORTUNUS_SLAPD_TLS_DOMAIN_NAME = cfg.domain;
+            PORTUNUS_SLAPD_TLS_PRIVATE_KEY = "${acmeDirectory}/key.pem";
+          }));
+      };
+    };
+
+    users.users = mkMerge [
+      (mkIf (cfg.ldap.user == "openldap") {
+        openldap = {
+          group = cfg.ldap.group;
+          isSystemUser = true;
+        };
+      })
+      (mkIf (cfg.user == "portunus") {
+        portunus = {
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      })
+    ];
+
+    users.groups = mkMerge [
+      (mkIf (cfg.ldap.user == "openldap") {
+        openldap = { };
+      })
+      (mkIf (cfg.user == "portunus") {
+        portunus = { };
+      })
+    ];
+  };
+
+  meta.maintainers = [ maintainers.majewsky ] ++ teams.c3d2.members;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/prowlarr.nix b/nixpkgs/nixos/modules/services/misc/prowlarr.nix
new file mode 100644
index 000000000000..836280d3e5fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/prowlarr.nix
@@ -0,0 +1,43 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prowlarr;
+
+in
+{
+  options = {
+    services.prowlarr = {
+      enable = mkEnableOption (lib.mdDoc "Prowlarr");
+
+      package = mkPackageOptionMD pkgs "prowlarr" { };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the Prowlarr web interface.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.prowlarr = {
+      description = "Prowlarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        StateDirectory = "prowlarr";
+        ExecStart = "${lib.getExe cfg.package} -nobrowser -data=/var/lib/prowlarr";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 9696 ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/pufferpanel.nix b/nixpkgs/nixos/modules/services/misc/pufferpanel.nix
new file mode 100644
index 000000000000..2022406c8325
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/pufferpanel.nix
@@ -0,0 +1,176 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.pufferpanel;
+in
+{
+  options.services.pufferpanel = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable PufferPanel game management server.
+
+        Note that [PufferPanel templates] and binaries downloaded by PufferPanel
+        expect [FHS environment]. It is possible to set {option}`package` option
+        to use PufferPanel wrapper with FHS environment. For example, to use
+        `Download Game from Steam` and `Download Java` template operations:
+        ```Nix
+        { lib, pkgs, ... }: {
+          services.pufferpanel = {
+            enable = true;
+            extraPackages = with pkgs; [ bash curl gawk gnutar gzip ];
+            package = pkgs.buildFHSEnv {
+              name = "pufferpanel-fhs";
+              runScript = lib.getExe pkgs.pufferpanel;
+              targetPkgs = pkgs': with pkgs'; [ icu openssl zlib ];
+            };
+          };
+        }
+        ```
+
+        [PufferPanel templates]: https://github.com/PufferPanel/templates
+        [FHS environment]: https://wikipedia.org/wiki/Filesystem_Hierarchy_Standard
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "pufferpanel" { };
+
+    extraGroups = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      default = [ ];
+      example = [ "podman" ];
+      description = lib.mdDoc ''
+        Additional groups for the systemd service.
+      '';
+    };
+
+    extraPackages = lib.mkOption {
+      type = lib.types.listOf lib.types.package;
+      default = [ ];
+      example = lib.literalExpression "[ pkgs.jre ]";
+      description = lib.mdDoc ''
+        Packages to add to the PATH environment variable. Both the {file}`bin`
+        and {file}`sbin` subdirectories of each package are added.
+      '';
+    };
+
+    environment = lib.mkOption {
+      type = lib.types.attrsOf lib.types.str;
+      default = { };
+      example = lib.literalExpression ''
+        {
+          PUFFER_WEB_HOST = ":8080";
+          PUFFER_DAEMON_SFTP_HOST = ":5657";
+          PUFFER_DAEMON_CONSOLE_BUFFER = "1000";
+          PUFFER_DAEMON_CONSOLE_FORWARD = "true";
+          PUFFER_PANEL_REGISTRATIONENABLED = "false";
+        }
+      '';
+      description = lib.mdDoc ''
+        Environment variables to set for the service. Secrets should be
+        specified using {option}`environmentFile`.
+
+        Refer to the [PufferPanel source code][] for the list of available
+        configuration options. Variable name is an upper-cased configuration
+        entry name with underscores instead of dots, prefixed with `PUFFER_`.
+        For example, `panel.settings.companyName` entry can be set using
+        {env}`PUFFER_PANEL_SETTINGS_COMPANYNAME`.
+
+        When running with panel enabled (configured with `PUFFER_PANEL_ENABLE`
+        environment variable), it is recommended disable registration using
+        `PUFFER_PANEL_REGISTRATIONENABLED` environment variable (registration is
+        enabled by default). To create the initial administrator user, run
+        {command}`pufferpanel --workDir /var/lib/pufferpanel user add --admin`.
+
+        Some options override corresponding settings set via web interface (e.g.
+        `PUFFER_PANEL_REGISTRATIONENABLED`). Those options can be temporarily
+        toggled or set in settings but do not persist between restarts.
+
+        [PufferPanel source code]: https://github.com/PufferPanel/PufferPanel/blob/master/config/entries.go
+      '';
+    };
+
+    environmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File to load environment variables from. Loaded variables override
+        values set in {option}`environment`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.pufferpanel = {
+      description = "PufferPanel game management server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      path = cfg.extraPackages;
+      environment = cfg.environment;
+
+      # Note that we export environment variables for service directories if the
+      # value is not set. An empty environment variable is considered to be set.
+      # E.g.
+      #   export PUFFER_LOGS=${PUFFER_LOGS-$LOGS_DIRECTORY}
+      # would set PUFFER_LOGS to $LOGS_DIRECTORY if PUFFER_LOGS environment
+      # variable is not defined.
+      script = ''
+        ${lib.concatLines (lib.mapAttrsToList (name: value: ''
+          export ${name}="''${${name}-${value}}"
+        '') {
+          PUFFER_LOGS = "$LOGS_DIRECTORY";
+          PUFFER_DAEMON_DATA_CACHE = "$CACHE_DIRECTORY";
+          PUFFER_DAEMON_DATA_SERVERS = "$STATE_DIRECTORY/servers";
+          PUFFER_DAEMON_DATA_BINARIES = "$STATE_DIRECTORY/binaries";
+        })}
+        exec ${lib.getExe cfg.package} run --workDir "$STATE_DIRECTORY"
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        UMask = "0077";
+
+        SupplementaryGroups = cfg.extraGroups;
+
+        StateDirectory = "pufferpanel";
+        StateDirectoryMode = "0700";
+        CacheDirectory = "pufferpanel";
+        CacheDirectoryMode = "0700";
+        LogsDirectory = "pufferpanel";
+        LogsDirectoryMode = "0700";
+
+        EnvironmentFile = cfg.environmentFile;
+
+        # Command "pufferpanel shutdown --pid $MAINPID" sends SIGTERM (code 15)
+        # to the main process and waits for termination. This is essentially
+        # KillMode=mixed we are using here. See
+        # https://freedesktop.org/software/systemd/man/systemd.kill.html#KillMode=
+        KillMode = "mixed";
+
+        DynamicUser = true;
+        ProtectHome = true;
+        ProtectProc = "invisible";
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        PrivateUsers = true;
+        PrivateDevices = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = [ "user" "mnt" ]; # allow buildFHSEnv
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        LockPersonality = true;
+        DeviceAllow = [ "" ];
+        DevicePolicy = "closed";
+        CapabilityBoundingSet = [ "" ];
+      };
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.tie ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/pykms.nix b/nixpkgs/nixos/modules/services/misc/pykms.nix
new file mode 100644
index 000000000000..be3accc0d7e5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/pykms.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.pykms;
+  libDir = "/var/lib/pykms";
+
+in
+{
+  meta.maintainers = with lib.maintainers; [ peterhoeg ];
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "pykms" "verbose" ] "Use services.pykms.logLevel instead")
+  ];
+
+  options = {
+    services.pykms = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the PyKMS service.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "The IP address on which to listen.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 1688;
+        description = lib.mdDoc "The port on which to listen.";
+      };
+
+      openFirewallPort = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether the listening port should be opened automatically.";
+      };
+
+      memoryLimit = mkOption {
+        type = types.str;
+        default = "64M";
+        description = lib.mdDoc "How much memory to use at most.";
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "CRITICAL" "ERROR" "WARNING" "INFO" "DEBUG" "MININFO" ];
+        default = "INFO";
+        description = lib.mdDoc "How much to log";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc "Additional arguments";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewallPort [ cfg.port ];
+
+    systemd.services.pykms = {
+      description = "Python KMS";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      # python programs with DynamicUser = true require HOME to be set
+      environment.HOME = libDir;
+      serviceConfig = with pkgs; {
+        DynamicUser = true;
+        StateDirectory = baseNameOf libDir;
+        ExecStartPre = "${getBin pykms}/libexec/create_pykms_db.sh ${libDir}/clients.db";
+        ExecStart = lib.concatStringsSep " " ([
+          "${getBin pykms}/bin/server"
+          "--logfile=STDOUT"
+          "--loglevel=${cfg.logLevel}"
+          "--sqlite=${libDir}/clients.db"
+        ] ++ cfg.extraArgs ++ [
+          cfg.listenAddress
+          (toString cfg.port)
+        ]);
+        ProtectHome = "tmpfs";
+        WorkingDirectory = libDir;
+        SyslogIdentifier = "pykms";
+        Restart = "on-failure";
+        MemoryMax = cfg.memoryLimit;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/radarr.nix b/nixpkgs/nixos/modules/services/misc/radarr.nix
new file mode 100644
index 000000000000..834b092c0d14
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/radarr.nix
@@ -0,0 +1,83 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.radarr;
+
+in
+{
+  options = {
+    services.radarr = {
+      enable = mkEnableOption (lib.mdDoc "Radarr");
+
+      package = mkOption {
+        description = lib.mdDoc "Radarr package to use";
+        default = pkgs.radarr;
+        defaultText = literalExpression "pkgs.radarr";
+        example = literalExpression "pkgs.radarr";
+        type = types.package;
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/radarr/.config/Radarr";
+        description = lib.mdDoc "The directory where Radarr stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the Radarr web interface.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = lib.mdDoc "User account under which Radarr runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "radarr";
+        description = lib.mdDoc "Group under which Radarr runs.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.radarr = {
+      description = "Radarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/Radarr -nobrowser -data='${cfg.dataDir}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 7878 ];
+    };
+
+    users.users = mkIf (cfg.user == "radarr") {
+      radarr = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.radarr;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "radarr") {
+      radarr.gid = config.ids.gids.radarr;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/readarr.nix b/nixpkgs/nixos/modules/services/misc/readarr.nix
new file mode 100644
index 000000000000..dd4fef6e598d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/readarr.nix
@@ -0,0 +1,88 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.readarr;
+in
+{
+  options = {
+    services.readarr = {
+      enable = mkEnableOption (lib.mdDoc "Readarr");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/readarr/";
+        description = lib.mdDoc "The directory where Readarr stores its data files.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.readarr;
+        defaultText = literalExpression "pkgs.readarr";
+        description = lib.mdDoc "The Readarr package to use";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for Readarr
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "readarr";
+        description = lib.mdDoc ''
+          User account under which Readarr runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "readarr";
+        description = lib.mdDoc ''
+          Group under which Readarr runs.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.readarr = {
+      description = "Readarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/Readarr -nobrowser -data='${cfg.dataDir}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 8787 ];
+    };
+
+    users.users = mkIf (cfg.user == "readarr") {
+      readarr = {
+        description = "Readarr service";
+        home = cfg.dataDir;
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "readarr") {
+      readarr = { };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/redmine.nix b/nixpkgs/nixos/modules/services/misc/redmine.nix
new file mode 100644
index 000000000000..20fa71507b6b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/redmine.nix
@@ -0,0 +1,441 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkBefore mkDefault mkEnableOption mkIf mkOption mkRemovedOptionModule types;
+  inherit (lib) concatStringsSep literalExpression mapAttrsToList;
+  inherit (lib) optional optionalAttrs optionalString;
+
+  cfg = config.services.redmine;
+  format = pkgs.formats.yaml {};
+  bundle = "${cfg.package}/share/redmine/bin/bundle";
+
+  databaseYml = pkgs.writeText "database.yml" ''
+    production:
+      adapter: ${cfg.database.type}
+      database: ${cfg.database.name}
+      host: ${if (cfg.database.type == "postgresql" && cfg.database.socket != null) then cfg.database.socket else cfg.database.host}
+      port: ${toString cfg.database.port}
+      username: ${cfg.database.user}
+      password: #dbpass#
+      ${optionalString (cfg.database.type == "mysql2" && cfg.database.socket != null) "socket: ${cfg.database.socket}"}
+  '';
+
+  configurationYml = format.generate "configuration.yml" cfg.settings;
+  additionalEnvironment = pkgs.writeText "additional_environment.rb" cfg.extraEnv;
+
+  unpackTheme = unpack "theme";
+  unpackPlugin = unpack "plugin";
+  unpack = id: (name: source:
+    pkgs.stdenv.mkDerivation {
+      name = "redmine-${id}-${name}";
+      nativeBuildInputs = [ pkgs.unzip ];
+      buildCommand = ''
+        mkdir -p $out
+        cd $out
+        unpackFile ${source}
+      '';
+  });
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql2";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "postgresql";
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "redmine" "extraConfig" ] "Use services.redmine.settings instead.")
+    (mkRemovedOptionModule [ "services" "redmine" "database" "password" ] "Use services.redmine.database.passwordFile instead.")
+  ];
+
+  # interface
+  options = {
+    services.redmine = {
+      enable = mkEnableOption (lib.mdDoc "Redmine");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.redmine;
+        defaultText = literalExpression "pkgs.redmine";
+        description = lib.mdDoc "Which Redmine package to use.";
+        example = literalExpression "pkgs.redmine.override { ruby = pkgs.ruby_2_7; }";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "redmine";
+        description = lib.mdDoc "User under which Redmine is ran.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "redmine";
+        description = lib.mdDoc "Group under which Redmine is ran.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc "Port on which Redmine is ran.";
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/lib/redmine";
+        description = lib.mdDoc "The state directory, logs and plugins are stored here.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = {};
+        description = lib.mdDoc ''
+          Redmine configuration ({file}`configuration.yml`). Refer to
+          <https://guides.rubyonrails.org/action_mailer_basics.html#action-mailer-configuration>
+          for details.
+        '';
+        example = literalExpression ''
+          {
+            email_delivery = {
+              delivery_method = "smtp";
+              smtp_settings = {
+                address = "mail.example.com";
+                port = 25;
+              };
+            };
+          }
+        '';
+      };
+
+      extraEnv = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration in additional_environment.rb.
+
+          See <https://svn.redmine.org/redmine/trunk/config/additional_environment.rb.example>
+          for details.
+        '';
+        example = ''
+          config.logger.level = Logger::DEBUG
+        '';
+      };
+
+      themes = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc "Set of themes.";
+        example = literalExpression ''
+          {
+            dkuk-redmine_alex_skin = builtins.fetchurl {
+              url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
+              sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+            };
+          }
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc "Set of plugins.";
+        example = literalExpression ''
+          {
+            redmine_env_auth = builtins.fetchurl {
+              url = "https://github.com/Intera/redmine_env_auth/archive/0.6.zip";
+              sha256 = "0yyr1yjd8gvvh832wdc8m3xfnhhxzk2pk3gm2psg5w9jdvd6skak";
+            };
+          }
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql2" "postgresql" ];
+          example = "postgresql";
+          default = "mysql2";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if cfg.database.type == "postgresql" then 5432 else 3306;
+          defaultText = literalExpression "3306";
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "redmine";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "redmine";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/redmine-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default =
+            if mysqlLocal then "/run/mysqld/mysqld.sock"
+            else if pgsqlLocal then "/run/postgresql"
+            else null;
+          defaultText = literalExpression "/run/mysqld/mysqld.sock";
+          example = "/run/mysqld/mysqld.sock";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Create the database and database user locally.";
+        };
+      };
+
+      components = {
+        subversion = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Subversion integration.";
+        };
+
+        mercurial = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Mercurial integration.";
+        };
+
+        git = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "git integration.";
+        };
+
+        cvs = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "cvs integration.";
+        };
+
+        breezy = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "bazaar integration.";
+        };
+
+        imagemagick = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Allows exporting Gant diagrams as PNG.";
+        };
+
+        ghostscript = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Allows exporting Gant diagrams as PDF.";
+        };
+
+        minimagick_font_path = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "MiniMagick font path";
+          example = "/run/current-system/sw/share/X11/fonts/LiberationSans-Regular.ttf";
+        };
+      };
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.passwordFile != null || cfg.database.socket != null;
+        message = "one of services.redmine.database.socket or services.redmine.database.passwordFile must be set";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user && cfg.database.user == cfg.database.name;
+        message = "services.redmine.database.user must be set to ${cfg.user} if services.redmine.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.socket != null;
+        message = "services.redmine.database.socket must be set if services.redmine.database.createLocally is set to true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.host == "localhost";
+        message = "services.redmine.database.host must be set to localhost if services.redmine.database.createLocally is set to true";
+      }
+      { assertion = cfg.components.imagemagick -> cfg.components.minimagick_font_path != "";
+        message = "services.redmine.components.minimagick_font_path must be configured with a path to a font file if services.redmine.components.imagemagick is set to true.";
+      }
+    ];
+
+    services.redmine.settings = {
+      production = {
+        scm_subversion_command = optionalString cfg.components.subversion "${pkgs.subversion}/bin/svn";
+        scm_mercurial_command = optionalString cfg.components.mercurial "${pkgs.mercurial}/bin/hg";
+        scm_git_command = optionalString cfg.components.git "${pkgs.git}/bin/git";
+        scm_cvs_command = optionalString cfg.components.cvs "${pkgs.cvs}/bin/cvs";
+        scm_bazaar_command = optionalString cfg.components.breezy "${pkgs.breezy}/bin/bzr";
+        imagemagick_convert_command = optionalString cfg.components.imagemagick "${pkgs.imagemagick}/bin/convert";
+        gs_command = optionalString cfg.components.ghostscript "${pkgs.ghostscript}/bin/gs";
+        minimagick_font_path = "${cfg.components.minimagick_font_path}";
+      };
+    };
+
+    services.redmine.extraEnv = mkBefore ''
+      config.logger = Logger.new("${cfg.stateDir}/log/production.log", 14, 1048576)
+      config.logger.level = Logger::INFO
+    '';
+
+    services.mysql = mkIf mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.postgresql = mkIf pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    # create symlinks for the basic directory layout the redmine package expects
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/cache' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/config' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/files' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/log' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/plugins' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/public' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/public/plugin_assets' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/public/themes' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.stateDir}/tmp' 0750 ${cfg.user} ${cfg.group} - -"
+
+      "d /run/redmine - - - - -"
+      "d /run/redmine/public - - - - -"
+      "L+ /run/redmine/config - - - - ${cfg.stateDir}/config"
+      "L+ /run/redmine/files - - - - ${cfg.stateDir}/files"
+      "L+ /run/redmine/log - - - - ${cfg.stateDir}/log"
+      "L+ /run/redmine/plugins - - - - ${cfg.stateDir}/plugins"
+      "L+ /run/redmine/public/plugin_assets - - - - ${cfg.stateDir}/public/plugin_assets"
+      "L+ /run/redmine/public/themes - - - - ${cfg.stateDir}/public/themes"
+      "L+ /run/redmine/tmp - - - - ${cfg.stateDir}/tmp"
+    ];
+
+    systemd.services.redmine = {
+      after = [ "network.target" ] ++ optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+      wantedBy = [ "multi-user.target" ];
+      environment.RAILS_ENV = "production";
+      environment.RAILS_CACHE = "${cfg.stateDir}/cache";
+      environment.REDMINE_LANG = "en";
+      environment.SCHEMA = "${cfg.stateDir}/cache/schema.db";
+      path = with pkgs; [
+      ]
+      ++ optional cfg.components.subversion subversion
+      ++ optional cfg.components.mercurial mercurial
+      ++ optional cfg.components.git git
+      ++ optional cfg.components.cvs cvs
+      ++ optional cfg.components.breezy breezy
+      ++ optional cfg.components.imagemagick imagemagick
+      ++ optional cfg.components.ghostscript ghostscript;
+
+      preStart = ''
+        rm -rf "${cfg.stateDir}/plugins/"*
+        rm -rf "${cfg.stateDir}/public/themes/"*
+
+        # start with a fresh config directory
+        # the config directory is copied instead of linked as some mutable data is stored in there
+        find "${cfg.stateDir}/config" ! -name "secret_token.rb" -type f -exec rm -f {} +
+        cp -r ${cfg.package}/share/redmine/config.dist/* "${cfg.stateDir}/config/"
+
+        chmod -R u+w "${cfg.stateDir}/config"
+
+        # link in the application configuration
+        ln -fs ${configurationYml} "${cfg.stateDir}/config/configuration.yml"
+
+        # link in the additional environment configuration
+        ln -fs ${additionalEnvironment} "${cfg.stateDir}/config/additional_environment.rb"
+
+
+        # link in all user specified themes
+        for theme in ${concatStringsSep " " (mapAttrsToList unpackTheme cfg.themes)}; do
+          ln -fs $theme/* "${cfg.stateDir}/public/themes"
+        done
+
+        # link in redmine provided themes
+        ln -sf ${cfg.package}/share/redmine/public/themes.dist/* "${cfg.stateDir}/public/themes/"
+
+
+        # link in all user specified plugins
+        for plugin in ${concatStringsSep " " (mapAttrsToList unpackPlugin cfg.plugins)}; do
+          ln -fs $plugin/* "${cfg.stateDir}/plugins/''${plugin##*-redmine-plugin-}"
+        done
+
+
+        # handle database.passwordFile & permissions
+        DBPASS=${optionalString (cfg.database.passwordFile != null) "$(head -n1 ${cfg.database.passwordFile})"}
+        cp -f ${databaseYml} "${cfg.stateDir}/config/database.yml"
+        sed -e "s,#dbpass#,$DBPASS,g" -i "${cfg.stateDir}/config/database.yml"
+        chmod 440 "${cfg.stateDir}/config/database.yml"
+
+
+        # generate a secret token if required
+        if ! test -e "${cfg.stateDir}/config/initializers/secret_token.rb"; then
+          ${bundle} exec rake generate_secret_token
+          chmod 440 "${cfg.stateDir}/config/initializers/secret_token.rb"
+        fi
+
+        # execute redmine required commands prior to starting the application
+        ${bundle} exec rake db:migrate
+        ${bundle} exec rake redmine:plugins:migrate
+        ${bundle} exec rake redmine:load_default_data
+      '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        TimeoutSec = "300";
+        WorkingDirectory = "${cfg.package}/share/redmine";
+        ExecStart="${bundle} exec rails server -u webrick -e production -p ${toString cfg.port} -P '${cfg.stateDir}/redmine.pid'";
+      };
+
+    };
+
+    users.users = optionalAttrs (cfg.user == "redmine") {
+      redmine = {
+        group = cfg.group;
+        home = cfg.stateDir;
+        uid = config.ids.uids.redmine;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "redmine") {
+      redmine.gid = config.ids.gids.redmine;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/ripple-data-api.nix b/nixpkgs/nixos/modules/services/misc/ripple-data-api.nix
new file mode 100644
index 000000000000..30623a321338
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/ripple-data-api.nix
@@ -0,0 +1,195 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rippleDataApi;
+
+  deployment_env_config = builtins.toJSON {
+    production = {
+      port = toString cfg.port;
+      maxSockets = 150;
+      batchSize = 100;
+      startIndex = 32570;
+      rippleds = cfg.rippleds;
+      redis = {
+        enable = cfg.redis.enable;
+        host = cfg.redis.host;
+        port = cfg.redis.port;
+        options.auth_pass = null;
+      };
+    };
+  };
+
+  db_config = builtins.toJSON {
+    production = {
+      username = optional (cfg.couchdb.pass != "") cfg.couchdb.user;
+      password = optional (cfg.couchdb.pass != "") cfg.couchdb.pass;
+      host = cfg.couchdb.host;
+      port = cfg.couchdb.port;
+      database = cfg.couchdb.db;
+      protocol = "http";
+    };
+  };
+
+in {
+  options = {
+    services.rippleDataApi = {
+      enable = mkEnableOption (lib.mdDoc "ripple data api");
+
+      port = mkOption {
+        description = lib.mdDoc "Ripple data api port";
+        default = 5993;
+        type = types.port;
+      };
+
+      importMode = mkOption {
+        description = lib.mdDoc "Ripple data api import mode.";
+        default = "liveOnly";
+        type = types.enum ["live" "liveOnly"];
+      };
+
+      minLedger = mkOption {
+        description = lib.mdDoc "Ripple data api minimal ledger to fetch.";
+        default = null;
+        type = types.nullOr types.int;
+      };
+
+      maxLedger = mkOption {
+        description = lib.mdDoc "Ripple data api maximal ledger to fetch.";
+        default = null;
+        type = types.nullOr types.int;
+      };
+
+      redis = {
+        enable = mkOption {
+          description = lib.mdDoc "Whether to enable caching of ripple data to redis.";
+          default = true;
+          type = types.bool;
+        };
+
+        host = mkOption {
+          description = lib.mdDoc "Ripple data api redis host.";
+          default = "localhost";
+          type = types.str;
+        };
+
+        port = mkOption {
+          description = lib.mdDoc "Ripple data api redis port.";
+          default = 5984;
+          type = types.port;
+        };
+      };
+
+      couchdb = {
+        host = mkOption {
+          description = lib.mdDoc "Ripple data api couchdb host.";
+          default = "localhost";
+          type = types.str;
+        };
+
+        port = mkOption {
+          description = lib.mdDoc "Ripple data api couchdb port.";
+          default = 5984;
+          type = types.port;
+        };
+
+        db = mkOption {
+          description = lib.mdDoc "Ripple data api couchdb database.";
+          default = "rippled";
+          type = types.str;
+        };
+
+        user = mkOption {
+          description = lib.mdDoc "Ripple data api couchdb username.";
+          default = "rippled";
+          type = types.str;
+        };
+
+        pass = mkOption {
+          description = lib.mdDoc "Ripple data api couchdb password.";
+          default = "";
+          type = types.str;
+        };
+
+        create = mkOption {
+          description = lib.mdDoc "Whether to create couchdb database needed by ripple data api.";
+          type = types.bool;
+          default = true;
+        };
+      };
+
+      rippleds = mkOption {
+        description = lib.mdDoc "List of rippleds to be used by ripple data api.";
+        default = [
+          "http://s_east.ripple.com:51234"
+          "http://s_west.ripple.com:51234"
+        ];
+        type = types.listOf types.str;
+      };
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+    services.couchdb.enable = mkDefault true;
+    services.couchdb.bindAddress = mkDefault "0.0.0.0";
+    services.redis.enable = mkDefault true;
+
+    systemd.services.ripple-data-api = {
+      after = [ "couchdb.service" "redis.service" "ripple-data-api-importer.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        NODE_ENV = "production";
+        DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
+        DB_CONFIG = pkgs.writeText "db.config.json" db_config;
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.ripple-data-api}/bin/api";
+        Restart = "always";
+        User = "ripple-data-api";
+      };
+    };
+
+    systemd.services.ripple-data-importer = {
+      after = [ "couchdb.service" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.curl ];
+
+      environment = {
+        NODE_ENV = "production";
+        DEPLOYMENT_ENVS_CONFIG = pkgs.writeText "deployment.environment.json" deployment_env_config;
+        DB_CONFIG = pkgs.writeText "db.config.json" db_config;
+        LOG_FILE = "/dev/null";
+      };
+
+      serviceConfig = let
+        importMode =
+          if cfg.minLedger != null && cfg.maxLedger != null then
+            "${toString cfg.minLedger} ${toString cfg.maxLedger}"
+          else
+            cfg.importMode;
+      in {
+        ExecStart = "${pkgs.ripple-data-api}/bin/importer ${importMode} debug";
+        Restart = "always";
+        User = "ripple-data-api";
+      };
+
+      preStart = mkMerge [
+        (mkIf (cfg.couchdb.create) ''
+          HOST="http://${optionalString (cfg.couchdb.pass != "") "${cfg.couchdb.user}:${cfg.couchdb.pass}@"}${cfg.couchdb.host}:${toString cfg.couchdb.port}"
+          curl -X PUT $HOST/${cfg.couchdb.db} || true
+        '')
+        "${pkgs.ripple-data-api}/bin/update-views"
+      ];
+    };
+
+    users.users.ripple-data-api =
+      { description = "Ripple data api user";
+        isSystemUser = true;
+        group = "ripple-data-api";
+      };
+    users.groups.ripple-data-api = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/rippled.nix b/nixpkgs/nixos/modules/services/misc/rippled.nix
new file mode 100644
index 000000000000..d14b6421b742
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/rippled.nix
@@ -0,0 +1,438 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rippled;
+  opt = options.services.rippled;
+
+  b2i = val: if val then "1" else "0";
+
+  dbCfg = db: ''
+    type=${db.type}
+    path=${db.path}
+    ${optionalString (db.compression != null) ("compression=${b2i db.compression}") }
+    ${optionalString (db.onlineDelete != null) ("online_delete=${toString db.onlineDelete}")}
+    ${optionalString (db.advisoryDelete != null) ("advisory_delete=${b2i db.advisoryDelete}")}
+    ${db.extraOpts}
+  '';
+
+  rippledCfg = ''
+    [server]
+    ${concatMapStringsSep "\n" (n: "port_${n}") (attrNames cfg.ports)}
+
+    ${concatMapStrings (p: ''
+    [port_${p.name}]
+    ip=${p.ip}
+    port=${toString p.port}
+    protocol=${concatStringsSep "," p.protocol}
+    ${optionalString (p.user != "") "user=${p.user}"}
+    ${optionalString (p.password != "") "user=${p.password}"}
+    admin=${concatStringsSep "," p.admin}
+    ${optionalString (p.ssl.key != null) "ssl_key=${p.ssl.key}"}
+    ${optionalString (p.ssl.cert != null) "ssl_cert=${p.ssl.cert}"}
+    ${optionalString (p.ssl.chain != null) "ssl_chain=${p.ssl.chain}"}
+    '') (attrValues cfg.ports)}
+
+    [database_path]
+    ${cfg.databasePath}
+
+    [node_db]
+    ${dbCfg cfg.nodeDb}
+
+    ${optionalString (cfg.tempDb != null) ''
+    [temp_db]
+    ${dbCfg cfg.tempDb}''}
+
+    ${optionalString (cfg.importDb != null) ''
+    [import_db]
+    ${dbCfg cfg.importDb}''}
+
+    [ips]
+    ${concatStringsSep "\n" cfg.ips}
+
+    [ips_fixed]
+    ${concatStringsSep "\n" cfg.ipsFixed}
+
+    [validators]
+    ${concatStringsSep "\n" cfg.validators}
+
+    [node_size]
+    ${cfg.nodeSize}
+
+    [ledger_history]
+    ${toString cfg.ledgerHistory}
+
+    [fetch_depth]
+    ${toString cfg.fetchDepth}
+
+    [validation_quorum]
+    ${toString cfg.validationQuorum}
+
+    [sntp_servers]
+    ${concatStringsSep "\n" cfg.sntpServers}
+
+    ${optionalString cfg.statsd.enable ''
+    [insight]
+    server=statsd
+    address=${cfg.statsd.address}
+    prefix=${cfg.statsd.prefix}
+    ''}
+
+    [rpc_startup]
+    { "command": "log_level", "severity": "${cfg.logLevel}" }
+  '' + cfg.extraConfig;
+
+  portOptions = { name, ...}: {
+    options = {
+      name = mkOption {
+        internal = true;
+        default = name;
+      };
+
+      ip = mkOption {
+        default = "127.0.0.1";
+        description = lib.mdDoc "Ip where rippled listens.";
+        type = types.str;
+      };
+
+      port = mkOption {
+        description = lib.mdDoc "Port where rippled listens.";
+        type = types.port;
+      };
+
+      protocol = mkOption {
+        description = lib.mdDoc "Protocols expose by rippled.";
+        type = types.listOf (types.enum ["http" "https" "ws" "wss" "peer"]);
+      };
+
+      user = mkOption {
+        description = lib.mdDoc "When set, these credentials will be required on HTTP/S requests.";
+        type = types.str;
+        default = "";
+      };
+
+      password = mkOption {
+        description = lib.mdDoc "When set, these credentials will be required on HTTP/S requests.";
+        type = types.str;
+        default = "";
+      };
+
+      admin = mkOption {
+        description = lib.mdDoc "A comma-separated list of admin IP addresses.";
+        type = types.listOf types.str;
+        default = ["127.0.0.1"];
+      };
+
+      ssl = {
+        key = mkOption {
+          description = lib.mdDoc ''
+            Specifies the filename holding the SSL key in PEM format.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
+
+        cert = mkOption {
+          description = lib.mdDoc ''
+            Specifies the path to the SSL certificate file in PEM format.
+            This is not needed if the chain includes it.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
+
+        chain = mkOption {
+          description = lib.mdDoc ''
+            If you need a certificate chain, specify the path to the
+            certificate chain here. The chain may include the end certificate.
+          '';
+          default = null;
+          type = types.nullOr types.path;
+        };
+      };
+    };
+  };
+
+  dbOptions = {
+    options = {
+      type = mkOption {
+        description = lib.mdDoc "Rippled database type.";
+        type = types.enum ["rocksdb" "nudb"];
+        default = "rocksdb";
+      };
+
+      path = mkOption {
+        description = lib.mdDoc "Location to store the database.";
+        type = types.path;
+        default = cfg.databasePath;
+        defaultText = literalExpression "config.${opt.databasePath}";
+      };
+
+      compression = mkOption {
+        description = lib.mdDoc "Whether to enable snappy compression.";
+        type = types.nullOr types.bool;
+        default = null;
+      };
+
+      onlineDelete = mkOption {
+        description = lib.mdDoc "Enable automatic purging of older ledger information.";
+        type = types.nullOr (types.addCheck types.int (v: v > 256));
+        default = cfg.ledgerHistory;
+        defaultText = literalExpression "config.${opt.ledgerHistory}";
+      };
+
+      advisoryDelete = mkOption {
+        description = lib.mdDoc ''
+          If set, then require administrative RPC call "can_delete"
+          to enable online deletion of ledger records.
+        '';
+        type = types.nullOr types.bool;
+        default = null;
+      };
+
+      extraOpts = mkOption {
+        description = lib.mdDoc "Extra database options.";
+        type = types.lines;
+        default = "";
+      };
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.rippled = {
+      enable = mkEnableOption (lib.mdDoc "rippled");
+
+      package = mkOption {
+        description = lib.mdDoc "Which rippled package to use.";
+        type = types.package;
+        default = pkgs.rippled;
+        defaultText = literalExpression "pkgs.rippled";
+      };
+
+      ports = mkOption {
+        description = lib.mdDoc "Ports exposed by rippled";
+        type = with types; attrsOf (submodule portOptions);
+        default = {
+          rpc = {
+            port = 5005;
+            admin = ["127.0.0.1"];
+            protocol = ["http"];
+          };
+
+          peer = {
+            port = 51235;
+            ip = "0.0.0.0";
+            protocol = ["peer"];
+          };
+
+          ws_public = {
+            port = 5006;
+            ip = "0.0.0.0";
+            protocol = ["ws" "wss"];
+          };
+        };
+      };
+
+      nodeDb = mkOption {
+        description = lib.mdDoc "Rippled main database options.";
+        type = with types; nullOr (submodule dbOptions);
+        default = {
+          type = "rocksdb";
+          extraOpts = ''
+            open_files=2000
+            filter_bits=12
+            cache_mb=256
+            file_size_pb=8
+            file_size_mult=2;
+          '';
+        };
+      };
+
+      tempDb = mkOption {
+        description = lib.mdDoc "Rippled temporary database options.";
+        type = with types; nullOr (submodule dbOptions);
+        default = null;
+      };
+
+      importDb = mkOption {
+        description = lib.mdDoc "Settings for performing a one-time import.";
+        type = with types; nullOr (submodule dbOptions);
+        default = null;
+      };
+
+      nodeSize = mkOption {
+        description = lib.mdDoc ''
+          Rippled size of the node you are running.
+          "tiny", "small", "medium", "large", and "huge"
+        '';
+        type = types.enum ["tiny" "small" "medium" "large" "huge"];
+        default = "small";
+      };
+
+      ips = mkOption {
+        description = lib.mdDoc ''
+          List of hostnames or ips where the Ripple protocol is served.
+          For a starter list, you can either copy entries from:
+          https://ripple.com/ripple.txt or if you prefer you can let it
+           default to r.ripple.com 51235
+
+          A port may optionally be specified after adding a space to the
+          address. By convention, if known, IPs are listed in from most
+          to least trusted.
+        '';
+        type = types.listOf types.str;
+        default = ["r.ripple.com 51235"];
+      };
+
+      ipsFixed = mkOption {
+        description = lib.mdDoc ''
+          List of IP addresses or hostnames to which rippled should always
+          attempt to maintain peer connections with. This is useful for
+          manually forming private networks, for example to configure a
+          validation server that connects to the Ripple network through a
+          public-facing server, or for building a set of cluster peers.
+
+          A port may optionally be specified after adding a space to the address
+        '';
+        type = types.listOf types.str;
+        default = [];
+      };
+
+      validators = mkOption {
+        description = lib.mdDoc ''
+          List of nodes to always accept as validators. Nodes are specified by domain
+          or public key.
+        '';
+        type = types.listOf types.str;
+        default = [
+          "n949f75evCHwgyP4fPVgaHqNHxUVN15PsJEZ3B3HnXPcPjcZAoy7  RL1"
+          "n9MD5h24qrQqiyBC8aeqqCWvpiBiYQ3jxSr91uiDvmrkyHRdYLUj  RL2"
+          "n9L81uNCaPgtUJfaHh89gmdvXKAmSt5Gdsw2g1iPWaPkAHW5Nm4C  RL3"
+          "n9KiYM9CgngLvtRCQHZwgC2gjpdaZcCcbt3VboxiNFcKuwFVujzS  RL4"
+          "n9LdgEtkmGB9E2h3K4Vp7iGUaKuq23Zr32ehxiU8FWY7xoxbWTSA  RL5"
+        ];
+      };
+
+      databasePath = mkOption {
+        description = lib.mdDoc ''
+          Path to the ripple database.
+        '';
+        type = types.path;
+        default = "/var/lib/rippled";
+      };
+
+      validationQuorum = mkOption {
+        description = lib.mdDoc ''
+          The minimum number of trusted validations a ledger must have before
+          the server considers it fully validated.
+        '';
+        type = types.int;
+        default = 3;
+      };
+
+      ledgerHistory = mkOption {
+        description = lib.mdDoc ''
+          The number of past ledgers to acquire on server startup and the minimum
+          to maintain while running.
+        '';
+        type = types.either types.int (types.enum ["full"]);
+        default = 1296000; # 1 month
+      };
+
+      fetchDepth = mkOption {
+        description = lib.mdDoc ''
+          The number of past ledgers to serve to other peers that request historical
+          ledger data (or "full" for no limit).
+        '';
+        type = types.either types.int (types.enum ["full"]);
+        default = "full";
+      };
+
+      sntpServers = mkOption {
+        description = lib.mdDoc ''
+          IP address or domain of NTP servers to use for time synchronization.;
+        '';
+        type = types.listOf types.str;
+        default = [
+          "time.windows.com"
+          "time.apple.com"
+          "time.nist.gov"
+          "pool.ntp.org"
+        ];
+      };
+
+      logLevel = mkOption {
+        description = lib.mdDoc "Logging verbosity.";
+        type = types.enum ["debug" "error" "info"];
+        default = "error";
+      };
+
+      statsd = {
+        enable = mkEnableOption (lib.mdDoc "statsd monitoring for rippled");
+
+        address = mkOption {
+          description = lib.mdDoc "The UDP address and port of the listening StatsD server.";
+          default = "127.0.0.1:8125";
+          type = types.str;
+        };
+
+        prefix = mkOption {
+          description = lib.mdDoc "A string prepended to each collected metric.";
+          default = "";
+          type = types.str;
+        };
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the rippled.cfg configuration file.
+        '';
+      };
+
+      config = mkOption {
+        internal = true;
+        default = pkgs.writeText "rippled.conf" rippledCfg;
+        defaultText = literalMD "generated config file";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.rippled = {
+        description = "Ripple server user";
+        isSystemUser = true;
+        group = "rippled";
+        home = cfg.databasePath;
+        createHome = true;
+      };
+    users.groups.rippled = {};
+
+    systemd.services.rippled = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/rippled --fg --conf ${cfg.config}";
+        User = "rippled";
+        Restart = "on-failure";
+        LimitNOFILE=10000;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/rkvm.nix b/nixpkgs/nixos/modules/services/misc/rkvm.nix
new file mode 100644
index 000000000000..582e8511ed96
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/rkvm.nix
@@ -0,0 +1,164 @@
+{ options, config, pkgs, lib, ... }:
+
+with lib;
+let
+  opt = options.services.rkvm;
+  cfg = config.services.rkvm;
+  toml = pkgs.formats.toml { };
+in
+{
+  meta.maintainers = with maintainers; [ ckie ];
+
+  options.services.rkvm = {
+    enable = mkOption {
+      default = cfg.server.enable || cfg.client.enable;
+      defaultText = literalExpression "config.${opt.server.enable} || config.${opt.client.enable}";
+      type = types.bool;
+      description = mdDoc ''
+        Whether to enable rkvm, a Virtual KVM switch for Linux machines.
+      '';
+    };
+
+    package = mkPackageOption pkgs "rkvm" { };
+
+    server = {
+      enable = mkEnableOption "the rkvm server daemon (input transmitter)";
+
+      settings = mkOption {
+        type = types.submodule
+          {
+            freeformType = toml.type;
+            options = {
+              listen = mkOption {
+                type = types.str;
+                default = "0.0.0.0:5258";
+                description = mdDoc ''
+                  An internet socket address to listen on, either IPv4 or IPv6.
+                '';
+              };
+
+              switch-keys = mkOption {
+                type = types.listOf types.str;
+                default = [ "left-alt" "left-ctrl" ];
+                description = mdDoc ''
+                  A key list specifying a host switch combination.
+
+                  _A list of key names is available in <https://github.com/htrefil/rkvm/blob/master/switch-keys.md>._
+                '';
+              };
+
+              certificate = mkOption {
+                type = types.path;
+                default = "/etc/rkvm/certificate.pem";
+                description = mdDoc ''
+                  TLS certificate path.
+
+                  ::: {.note}
+                  This should be generated with {command}`rkvm-certificate-gen`.
+                  :::
+                '';
+              };
+
+              key = mkOption {
+                type = types.path;
+                default = "/etc/rkvm/key.pem";
+                description = mdDoc ''
+                  TLS key path.
+
+                  ::: {.note}
+                  This should be generated with {command}`rkvm-certificate-gen`.
+                  :::
+                '';
+              };
+
+              password = mkOption {
+                type = types.str;
+                description = mdDoc ''
+                  Shared secret token to authenticate the client.
+                  Make sure this matches your client's config.
+                '';
+              };
+            };
+          };
+
+        default = { };
+        description = mdDoc "Structured server daemon configuration";
+      };
+    };
+
+    client = {
+      enable = mkEnableOption "the rkvm client daemon (input receiver)";
+
+      settings = mkOption {
+        type = types.submodule
+          {
+            freeformType = toml.type;
+            options = {
+              server = mkOption {
+                type = types.str;
+                example = "192.168.0.123:5258";
+                description = mdDoc ''
+                  An RKVM server's internet socket address, either IPv4 or IPv6.
+                '';
+              };
+
+              certificate = mkOption {
+                type = types.path;
+                default = "/etc/rkvm/certificate.pem";
+                description = mdDoc ''
+                  TLS ceritficate path.
+
+                  ::: {.note}
+                  This should be generated with {command}`rkvm-certificate-gen`.
+                  :::
+                '';
+              };
+
+              password = mkOption {
+                type = types.str;
+                description = mdDoc ''
+                  Shared secret token to authenticate the client.
+                  Make sure this matches your server's config.
+                '';
+              };
+            };
+          };
+
+        default = {};
+        description = mdDoc "Structured client daemon configuration";
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services =
+      let
+        mkBase = component: {
+          description = "RKVM ${component}";
+          wantedBy = [ "multi-user.target" ];
+          after = {
+            server = [ "network.target" ];
+            client = [ "network-online.target" ];
+          }.${component};
+          wants = {
+            server = [ ];
+            client = [ "network-online.target" ];
+          }.${component};
+          serviceConfig = {
+            ExecStart = "${cfg.package}/bin/rkvm-${component} ${toml.generate "rkvm-${component}.toml" cfg.${component}.settings}";
+            Restart = "always";
+            RestartSec = 5;
+            Type = "simple";
+          };
+        };
+      in
+      {
+        rkvm-server = mkIf cfg.server.enable (mkBase "server");
+        rkvm-client = mkIf cfg.client.enable (mkBase "client");
+      };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/rmfakecloud.nix b/nixpkgs/nixos/modules/services/misc/rmfakecloud.nix
new file mode 100644
index 000000000000..1cdfdeceabcd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/rmfakecloud.nix
@@ -0,0 +1,147 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rmfakecloud;
+  serviceDataDir = "/var/lib/rmfakecloud";
+
+in {
+  options = {
+    services.rmfakecloud = {
+      enable = mkEnableOption (lib.mdDoc "rmfakecloud remarkable self-hosted cloud");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.rmfakecloud;
+        defaultText = literalExpression "pkgs.rmfakecloud";
+        description = lib.mdDoc ''
+          rmfakecloud package to use.
+
+          The default does not include the web user interface.
+        '';
+      };
+
+      storageUrl = mkOption {
+        type = types.str;
+        example = "https://local.appspot.com";
+        description = lib.mdDoc ''
+          URL used by the tablet to access the rmfakecloud service.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc ''
+          Listening port number.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "info" "debug" "warn" "error" ];
+        default = "info";
+        description = lib.mdDoc ''
+          Logging level.
+        '';
+      };
+
+      extraSettings = mkOption {
+        type = with types; attrsOf str;
+        default = { };
+        example = { DATADIR = "/custom/path/for/rmfakecloud/data"; };
+        description = lib.mdDoc ''
+          Extra settings in the form of a set of key-value pairs.
+          For tokens and secrets, use `environmentFile` instead.
+
+          Available settings are listed on
+          https://ddvk.github.io/rmfakecloud/install/configuration/.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/etc/secrets/rmfakecloud.env";
+        description = lib.mdDoc ''
+          Path to an environment file loaded for the rmfakecloud service.
+
+          This can be used to securely store tokens and secrets outside of the
+          world-readable Nix store. Since this file is read by systemd, it may
+          have permission 0400 and be owned by root.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.rmfakecloud = {
+      description = "rmfakecloud remarkable self-hosted cloud";
+
+      environment = {
+        STORAGE_URL = cfg.storageUrl;
+        PORT = toString cfg.port;
+        LOGLEVEL = cfg.logLevel;
+      } // cfg.extraSettings;
+
+      preStart = ''
+        # Generate the secret key used to sign client session tokens.
+        # Replacing it invalidates the previously established sessions.
+        if [ -z "$JWT_SECRET_KEY" ] && [ ! -f jwt_secret_key ]; then
+          (umask 077; touch jwt_secret_key)
+          cat /dev/urandom | tr -cd '[:alnum:]' | head -c 48 >> jwt_secret_key
+        fi
+      '';
+
+      script = ''
+        if [ -z "$JWT_SECRET_KEY" ]; then
+          export JWT_SECRET_KEY="$(cat jwt_secret_key)"
+        fi
+
+        ${cfg.package}/bin/rmfakecloud
+      '';
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+
+        EnvironmentFile =
+          mkIf (cfg.environmentFile != null) cfg.environmentFile;
+
+        AmbientCapabilities =
+          mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+
+        DynamicUser = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        CapabilityBoundingSet = [ "" ];
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        WorkingDirectory = serviceDataDir;
+        StateDirectory = baseNameOf serviceDataDir;
+        UMask = "0027";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pacien ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/rshim.nix b/nixpkgs/nixos/modules/services/misc/rshim.nix
new file mode 100644
index 000000000000..706cf9136b00
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/rshim.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.rshim;
+
+  rshimCommand = [ "${cfg.package}/bin/rshim" ]
+    ++ lib.optionals (cfg.backend != null) [ "--backend ${cfg.backend}" ]
+    ++ lib.optionals (cfg.device != null) [ "--device ${cfg.device}" ]
+    ++ lib.optionals (cfg.index != null) [ "--index ${builtins.toString cfg.index}" ]
+    ++ [ "--log-level ${builtins.toString cfg.log-level}" ]
+  ;
+in
+{
+  options.services.rshim = {
+    enable = lib.mkEnableOption (lib.mdDoc "user-space rshim driver for the BlueField SoC");
+
+    package = lib.mkPackageOptionMD pkgs "rshim-user-space" { };
+
+    backend = lib.mkOption {
+      type = with lib.types; nullOr (enum [ "usb" "pcie" "pcie_lf" ]);
+      description = lib.mdDoc ''
+        Specify the backend to attach. If not specified, the driver will scan
+        all rshim backends unless the `device` option is given with a device
+        name specified.
+      '';
+      default = null;
+      example = "pcie";
+    };
+
+    device = lib.mkOption {
+      type = with lib.types; nullOr str;
+      description = lib.mdDoc ''
+        Specify the device name to attach. The backend driver can be deduced
+        from the device name, thus the `backend` option is not needed.
+      '';
+      default = null;
+      example = "pcie-04:00.2";
+    };
+
+    index = lib.mkOption {
+      type = with lib.types; nullOr int;
+      description = lib.mdDoc ''
+        Specify the index to create device path `/dev/rshim<index>`. It's also
+        used to create network interface name `tmfifo_net<index>`. This option
+        is needed when multiple rshim instances are running.
+      '';
+      default = null;
+      example = 1;
+    };
+
+    log-level = lib.mkOption {
+      type = lib.types.int;
+      description = lib.mdDoc ''
+        Specify the log level (0:none, 1:error, 2:warning, 3:notice, 4:debug).
+      '';
+      default = 2;
+      example = 4;
+    };
+
+    config = lib.mkOption {
+      type = with lib.types; attrsOf (oneOf [ int str ]);
+      description = lib.mdDoc ''
+        Structural setting for the rshim configuration file
+        (`/etc/rshim.conf`). It can be used to specify the static mapping
+        between rshim devices and rshim names. It can also be used to ignore
+        some rshim devices.
+      '';
+      default = { };
+      example = {
+        DISPLAY_LEVEL = 0;
+        rshim0 = "usb-2-1.7";
+        none = "usb-1-1.4";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.etc = lib.mkIf (cfg.config != { }) {
+      "rshim.conf".text = lib.generators.toKeyValue
+        { mkKeyValue = lib.generators.mkKeyValueDefault { } " "; }
+        cfg.config;
+    };
+
+    systemd.services.rshim = {
+      after = [ "network.target" ];
+      serviceConfig = {
+        Restart = "always";
+        Type = "forking";
+        ExecStart = [
+          (lib.concatStringsSep " \\\n" rshimCommand)
+        ];
+        KillMode = "control-group";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/safeeyes.nix b/nixpkgs/nixos/modules/services/misc/safeeyes.nix
new file mode 100644
index 000000000000..9dfa2001bcb7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/safeeyes.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.safeeyes;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.safeeyes = {
+
+      enable = mkEnableOption (lib.mdDoc "the safeeyes OSGi service");
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.safeeyes ];
+
+    systemd.user.services.safeeyes = {
+      description = "Safeeyes";
+
+      wantedBy = [ "graphical-session.target" ];
+      partOf   = [ "graphical-session.target" ];
+
+      startLimitIntervalSec = 350;
+      startLimitBurst = 10;
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.safeeyes}/bin/safeeyes
+        '';
+        Restart = "on-failure";
+        RestartSec = 3;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sdrplay.nix b/nixpkgs/nixos/modules/services/misc/sdrplay.nix
new file mode 100644
index 000000000000..2d5333e3885b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sdrplay.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+with lib;
+{
+  options.services.sdrplayApi = {
+    enable = mkOption {
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to enable the SDRplay API service and udev rules.
+
+        ::: {.note}
+        To enable integration with SoapySDR and GUI applications like gqrx create an overlay containing
+        `soapysdr-with-plugins = super.soapysdr.override { extraPackages = [ super.soapysdrplay ]; };`
+        :::
+      '';
+      type = lib.types.bool;
+    };
+  };
+
+  config = mkIf config.services.sdrplayApi.enable {
+    systemd.services.sdrplayApi = {
+      description = "SDRplay API Service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.sdrplay}/bin/sdrplay_apiService";
+        DynamicUser = true;
+        Restart = "on-failure";
+        RestartSec = "1s";
+      };
+    };
+    services.udev.packages = [ pkgs.sdrplay ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/serviio.nix b/nixpkgs/nixos/modules/services/misc/serviio.nix
new file mode 100644
index 000000000000..18e64030d79d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/serviio.nix
@@ -0,0 +1,87 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.serviio;
+
+  serviioStart = pkgs.writeScript "serviio.sh" ''
+    #!${pkgs.bash}/bin/sh
+
+    SERVIIO_HOME=${pkgs.serviio}
+
+    # Setup the classpath
+    SERVIIO_CLASS_PATH="$SERVIIO_HOME/lib/*:$SERVIIO_HOME/config"
+
+    # Setup Serviio specific properties
+    JAVA_OPTS="-Djava.net.preferIPv4Stack=true -Djava.awt.headless=true -Dorg.restlet.engine.loggerFacadeClass=org.restlet.ext.slf4j.Slf4jLoggerFacade
+               -Dderby.system.home=${cfg.dataDir}/library -Dserviio.home=${cfg.dataDir} -Dffmpeg.location=${pkgs.ffmpeg}/bin/ffmpeg -Ddcraw.location=${pkgs.dcraw}/bin/dcraw"
+
+    # Execute the JVM in the foreground
+    exec ${pkgs.jre}/bin/java -Xmx512M -Xms20M -XX:+UseG1GC -XX:GCTimeRatio=1 -XX:MinHeapFreeRatio=10 -XX:MaxHeapFreeRatio=20 $JAVA_OPTS -classpath "$SERVIIO_CLASS_PATH" org.serviio.MediaServer "$@"
+  '';
+
+in {
+
+  ###### interface
+  options = {
+    services.serviio = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Serviio Media Server.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/serviio";
+        description = lib.mdDoc ''
+          The directory where serviio stores its state, data, etc.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.serviio = {
+      description = "Serviio Media Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.serviio ];
+      serviceConfig = {
+        User = "serviio";
+        Group = "serviio";
+        ExecStart = "${serviioStart}";
+        ExecStop = "${serviioStart} -stop";
+      };
+    };
+
+    users.users.serviio =
+      { group = "serviio";
+        home = cfg.dataDir;
+        description = "Serviio Media Server User";
+        createHome = true;
+        isSystemUser = true;
+      };
+
+    users.groups.serviio = { };
+
+    networking.firewall = {
+      allowedTCPPorts = [
+        8895  # serve UPnP responses
+        23423 # console
+        23424 # mediabrowser
+      ];
+      allowedUDPPorts = [
+        1900 # UPnP service discovery
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sickbeard.nix b/nixpkgs/nixos/modules/services/misc/sickbeard.nix
new file mode 100644
index 000000000000..bd8d8d8fa7cc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sickbeard.nix
@@ -0,0 +1,95 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  name = "sickbeard";
+
+  cfg = config.services.sickbeard;
+  opt = options.services.sickbeard;
+  sickbeard = cfg.package;
+
+in
+{
+
+  ###### interface
+
+  options = {
+    services.sickbeard = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the sickbeard server.";
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.sickbeard;
+        defaultText = literalExpression "pkgs.sickbeard";
+        example = literalExpression "pkgs.sickrage";
+        description =lib.mdDoc ''
+          Enable `pkgs.sickrage` or `pkgs.sickgear`
+          as an alternative to SickBeard
+        '';
+      };
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/${name}";
+        description = lib.mdDoc "Path where to store data files.";
+      };
+      configFile = mkOption {
+        type = types.path;
+        default = "${cfg.dataDir}/config.ini";
+        defaultText = literalExpression ''"''${config.${opt.dataDir}}/config.ini"'';
+        description = lib.mdDoc "Path to config file.";
+      };
+      port = mkOption {
+        type = types.ints.u16;
+        default = 8081;
+        description = lib.mdDoc "Port to bind to.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "User to run the service as";
+      };
+      group = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "Group to run the service as";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        uid = config.ids.uids.sickbeard;
+        group = cfg.group;
+        description = "sickbeard user";
+        home = cfg.dataDir;
+        createHome = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      ${name}.gid = config.ids.gids.sickbeard;
+    };
+
+    systemd.services.sickbeard = {
+      description = "Sickbeard Server";
+      wantedBy    = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${sickbeard}/bin/${sickbeard.pname} --datadir ${cfg.dataDir} --config ${cfg.configFile} --port ${toString cfg.port}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/signald.nix b/nixpkgs/nixos/modules/services/misc/signald.nix
new file mode 100644
index 000000000000..32ba154506ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/signald.nix
@@ -0,0 +1,105 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.signald;
+  dataDir = "/var/lib/signald";
+  defaultUser = "signald";
+in
+{
+  options.services.signald = {
+    enable = mkEnableOption (lib.mdDoc "the signald service");
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = lib.mdDoc "User under which signald runs.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = lib.mdDoc "Group under which signald runs.";
+    };
+
+    socketPath = mkOption {
+      type = types.str;
+      default = "/run/signald/signald.sock";
+      description = lib.mdDoc "Path to the signald socket";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
+    };
+
+    systemd.services.signald = {
+      description = "A daemon for interacting with the Signal Private Messenger";
+      wants = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.signald}/bin/signald -d ${dataDir} -s ${cfg.socketPath}";
+        Restart = "on-failure";
+        StateDirectory = "signald";
+        RuntimeDirectory = "signald";
+        StateDirectoryMode = "0750";
+        RuntimeDirectoryMode = "0750";
+
+        BindReadOnlyPaths = [
+          "/nix/store"
+          "-/etc/resolv.conf"
+          "-/etc/nsswitch.conf"
+          "-/etc/hosts"
+          "-/etc/localtime"
+        ];
+        CapabilityBoundingSet = "";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        # Use a static user so other applications can access the files
+        #DynamicUser = true;
+        LockPersonality = true;
+        # Needed for java
+        #MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        # Needs network access
+        #PrivateNetwork = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        # Would re-mount paths ignored by temporary root
+        #ProtectSystem = "strict";
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
+        TemporaryFileSystem = "/:ro";
+        # Does not work well with the temporary root
+        #UMask = "0066";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/siproxd.nix b/nixpkgs/nixos/modules/services/misc/siproxd.nix
new file mode 100644
index 000000000000..3890962b7cfb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/siproxd.nix
@@ -0,0 +1,179 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.siproxd;
+
+  conf = ''
+    daemonize = 0
+    rtp_proxy_enable = 1
+    user = siproxd
+    if_inbound  = ${cfg.ifInbound}
+    if_outbound = ${cfg.ifOutbound}
+    sip_listen_port = ${toString cfg.sipListenPort}
+    rtp_port_low    = ${toString cfg.rtpPortLow}
+    rtp_port_high   = ${toString cfg.rtpPortHigh}
+    rtp_dscp        = ${toString cfg.rtpDscp}
+    sip_dscp        = ${toString cfg.sipDscp}
+    ${optionalString (cfg.hostsAllowReg != []) "hosts_allow_reg = ${concatStringsSep "," cfg.hostsAllowReg}"}
+    ${optionalString (cfg.hostsAllowSip != []) "hosts_allow_sip = ${concatStringsSep "," cfg.hostsAllowSip}"}
+    ${optionalString (cfg.hostsDenySip != []) "hosts_deny_sip  = ${concatStringsSep "," cfg.hostsDenySip}"}
+    ${optionalString (cfg.passwordFile != "") "proxy_auth_pwfile = ${cfg.passwordFile}"}
+    ${cfg.extraConfig}
+  '';
+
+  confFile = builtins.toFile "siproxd.conf" conf;
+
+in
+{
+  ##### interface
+
+  options = {
+
+    services.siproxd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Siproxd SIP
+          proxy/masquerading daemon.
+        '';
+      };
+
+      ifInbound = mkOption {
+        type = types.str;
+        example = "eth0";
+        description = lib.mdDoc "Local network interface";
+      };
+
+      ifOutbound = mkOption {
+        type = types.str;
+        example = "ppp0";
+        description = lib.mdDoc "Public network interface";
+      };
+
+      hostsAllowReg = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "192.168.1.0/24" "192.168.2.0/24" ];
+        description = lib.mdDoc ''
+          Access control list for incoming SIP registrations.
+        '';
+      };
+
+      hostsAllowSip = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "123.45.0.0/16" "123.46.0.0/16" ];
+        description = lib.mdDoc ''
+          Access control list for incoming SIP traffic.
+        '';
+      };
+
+      hostsDenySip = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "10.0.0.0/8" "11.0.0.0/8" ];
+        description = lib.mdDoc ''
+          Access control list for denying incoming
+          SIP registrations and traffic.
+        '';
+      };
+
+      sipListenPort = mkOption {
+        type = types.int;
+        default = 5060;
+        description = lib.mdDoc ''
+          Port to listen for incoming SIP messages.
+        '';
+      };
+
+      rtpPortLow = mkOption {
+        type = types.int;
+        default = 7070;
+        description = lib.mdDoc ''
+         Bottom of UDP port range for incoming and outgoing RTP traffic
+        '';
+      };
+
+      rtpPortHigh = mkOption {
+        type = types.int;
+        default = 7089;
+        description = lib.mdDoc ''
+         Top of UDP port range for incoming and outgoing RTP traffic
+        '';
+      };
+
+      rtpTimeout = mkOption {
+        type = types.int;
+        default = 300;
+        description = lib.mdDoc ''
+          Timeout for an RTP stream. If for the specified
+          number of seconds no data is relayed on an active
+          stream, it is considered dead and will be killed.
+        '';
+      };
+
+      rtpDscp = mkOption {
+        type = types.int;
+        default = 46;
+        description = lib.mdDoc ''
+          DSCP (differentiated services) value to be assigned
+          to RTP packets. Allows QOS aware routers to handle
+          different types traffic with different priorities.
+        '';
+      };
+
+      sipDscp = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          DSCP (differentiated services) value to be assigned
+          to SIP packets. Allows QOS aware routers to handle
+          different types traffic with different priorities.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Path to per-user password file.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration to add to siproxd configuration.
+        '';
+      };
+
+    };
+
+  };
+
+  ##### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.siproxyd = {
+      uid = config.ids.uids.siproxd;
+    };
+
+    systemd.services.siproxd = {
+      description = "SIP proxy/masquerading daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.siproxd}/sbin/siproxd -c ${confFile}";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/snapper.nix b/nixpkgs/nixos/modules/services/misc/snapper.nix
new file mode 100644
index 000000000000..569433c3c71d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/snapper.nix
@@ -0,0 +1,253 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.snapper;
+
+  mkValue = v:
+    if isList v then "\"${concatMapStringsSep " " (escape [ "\\" " " ]) v}\""
+    else if v == true then "yes"
+    else if v == false then "no"
+    else if isString v then "\"${v}\""
+    else builtins.toJSON v;
+
+  mkKeyValue = k: v: "${k}=${mkValue v}";
+
+  # "it's recommended to always specify the filesystem type"  -- man snapper-configs
+  defaultOf = k: if k == "FSTYPE" then null else configOptions.${k}.default or null;
+
+  safeStr = types.strMatching "[^\n\"]*" // {
+    description = "string without line breaks or quotes";
+    descriptionClass = "conjunction";
+  };
+
+  configOptions = {
+    SUBVOLUME = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path of the subvolume or mount point.
+        This path is a subvolume and has to contain a subvolume named
+        .snapshots.
+        See also man:snapper(8) section PERMISSIONS.
+      '';
+    };
+
+    FSTYPE = mkOption {
+      type = types.enum [ "btrfs" ];
+      default = "btrfs";
+      description = lib.mdDoc ''
+        Filesystem type. Only btrfs is stable and tested.
+      '';
+    };
+
+    ALLOW_GROUPS = mkOption {
+      type = types.listOf safeStr;
+      default = [];
+      description = lib.mdDoc ''
+        List of groups allowed to operate with the config.
+
+        Also see the PERMISSIONS section in man:snapper(8).
+      '';
+    };
+
+    ALLOW_USERS = mkOption {
+      type = types.listOf safeStr;
+      default = [];
+      example = [ "alice" ];
+      description = lib.mdDoc ''
+        List of users allowed to operate with the config. "root" is always
+        implicitly included.
+
+        Also see the PERMISSIONS section in man:snapper(8).
+      '';
+    };
+
+    TIMELINE_CLEANUP = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Defines whether the timeline cleanup algorithm should be run for the config.
+      '';
+    };
+
+    TIMELINE_CREATE = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Defines whether hourly snapshots should be created.
+      '';
+    };
+  };
+in
+
+{
+  options.services.snapper = {
+
+    snapshotRootOnBoot = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to snapshot root on boot
+      '';
+    };
+
+    snapshotInterval = mkOption {
+      type = types.str;
+      default = "hourly";
+      description = lib.mdDoc ''
+        Snapshot interval.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    cleanupInterval = mkOption {
+      type = types.str;
+      default = "1d";
+      description = lib.mdDoc ''
+        Cleanup interval.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    filters = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = lib.mdDoc ''
+        Global display difference filter. See man:snapper(8) for more details.
+      '';
+    };
+
+    configs = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          home = {
+            SUBVOLUME = "/home";
+            ALLOW_USERS = [ "alice" ];
+            TIMELINE_CREATE = true;
+            TIMELINE_CLEANUP = true;
+          };
+        }
+      '';
+
+      description = lib.mdDoc ''
+        Subvolume configuration. Any option mentioned in man:snapper-configs(5)
+        is valid here, even if NixOS doesn't document it.
+      '';
+
+      type = types.attrsOf (types.submodule {
+        freeformType = types.attrsOf (types.oneOf [ (types.listOf safeStr) types.bool safeStr types.number ]);
+
+        options = configOptions;
+      });
+    };
+  };
+
+  config = mkIf (cfg.configs != {}) (let
+    documentation = [ "man:snapper(8)" "man:snapper-configs(5)" ];
+  in {
+
+    environment = {
+
+      systemPackages = [ pkgs.snapper ];
+
+      # Note: snapper/config-templates/default is only needed for create-config
+      #       which is not the NixOS way to configure.
+      etc = {
+
+        "sysconfig/snapper".text = ''
+          SNAPPER_CONFIGS="${lib.concatStringsSep " " (builtins.attrNames cfg.configs)}"
+        '';
+
+      }
+      // (mapAttrs' (name: subvolume: nameValuePair "snapper/configs/${name}" ({
+        text = lib.generators.toKeyValue { inherit mkKeyValue; } (filterAttrs (k: v: v != defaultOf k) subvolume);
+      })) cfg.configs)
+      // (lib.optionalAttrs (cfg.filters != null) {
+        "snapper/filters/default.txt".text = cfg.filters;
+      });
+
+    };
+
+    services.dbus.packages = [ pkgs.snapper ];
+
+    systemd.services.snapperd = {
+      description = "DBus interface for snapper";
+      inherit documentation;
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "org.opensuse.Snapper";
+        ExecStart = "${pkgs.snapper}/bin/snapperd";
+        CapabilityBoundingSet = "CAP_DAC_OVERRIDE CAP_FOWNER CAP_CHOWN CAP_FSETID CAP_SETFCAP CAP_SYS_ADMIN CAP_SYS_MODULE CAP_IPC_LOCK CAP_SYS_NICE";
+        LockPersonality = true;
+        NoNewPrivileges = false;
+        PrivateNetwork = true;
+        ProtectHostname = true;
+        RestrictAddressFamilies = "AF_UNIX";
+        RestrictRealtime = true;
+      };
+    };
+
+    systemd.services.snapper-timeline = {
+      description = "Timeline of Snapper Snapshots";
+      inherit documentation;
+      requires = [ "local-fs.target" ];
+      serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --timeline";
+      startAt = cfg.snapshotInterval;
+    };
+
+    systemd.services.snapper-cleanup = {
+      description = "Cleanup of Snapper Snapshots";
+      inherit documentation;
+      serviceConfig.ExecStart = "${pkgs.snapper}/lib/snapper/systemd-helper --cleanup";
+    };
+
+    systemd.timers.snapper-cleanup = {
+      description = "Cleanup of Snapper Snapshots";
+      inherit documentation;
+      wantedBy = [ "timers.target" ];
+      requires = [ "local-fs.target" ];
+      timerConfig.OnBootSec = "10m";
+      timerConfig.OnUnitActiveSec = cfg.cleanupInterval;
+    };
+
+    systemd.services.snapper-boot = lib.optionalAttrs cfg.snapshotRootOnBoot {
+      description = "Take snapper snapshot of root on boot";
+      inherit documentation;
+      serviceConfig.ExecStart = "${pkgs.snapper}/bin/snapper --config root create --cleanup-algorithm number --description boot";
+      serviceConfig.Type = "oneshot";
+      requires = [ "local-fs.target" ];
+      wantedBy = [ "multi-user.target" ];
+      unitConfig.ConditionPathExists = "/etc/snapper/configs/root";
+    };
+
+    assertions =
+      concatMap
+        (name:
+          let
+            sub = cfg.configs.${name};
+          in
+          [ { assertion = !(sub ? extraConfig);
+              message = ''
+                The option definition `services.snapper.configs.${name}.extraConfig' no longer has any effect; please remove it.
+                The contents of this option should be migrated to attributes on `services.snapper.configs.${name}'.
+              '';
+            }
+          ] ++
+          map
+            (attr: {
+              assertion = !(hasAttr attr sub);
+              message = ''
+                The option definition `services.snapper.configs.${name}.${attr}' has been renamed to `services.snapper.configs.${name}.${toUpper attr}'.
+              '';
+            })
+            [ "fstype" "subvolume" ]
+        )
+        (attrNames cfg.configs);
+  });
+}
diff --git a/nixpkgs/nixos/modules/services/misc/soft-serve.nix b/nixpkgs/nixos/modules/services/misc/soft-serve.nix
new file mode 100644
index 000000000000..2b63b6bcd867
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/soft-serve.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.soft-serve;
+  configFile = format.generate "config.yaml" cfg.settings;
+  format = pkgs.formats.yaml { };
+  docUrl = "https://charm.sh/blog/self-hosted-soft-serve/";
+  stateDir = "/var/lib/soft-serve";
+in
+{
+  options = {
+    services.soft-serve = {
+      enable = mkEnableOption "soft-serve";
+
+      package = mkPackageOption pkgs "soft-serve" { };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = mdDoc ''
+          The contents of the configuration file for soft-serve.
+
+          See <${docUrl}>.
+        '';
+        example = literalExpression ''
+          {
+            name = "dadada's repos";
+            log_format = "text";
+            ssh = {
+              listen_addr = ":23231";
+              public_url = "ssh://localhost:23231";
+              max_timeout = 30;
+              idle_timeout = 120;
+            };
+            stats.listen_addr = ":23233";
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      # The config file has to be inside the state dir
+      "L+ ${stateDir}/config.yaml - - - - ${configFile}"
+    ];
+
+    systemd.services.soft-serve = {
+      description = "Soft Serve git server";
+      documentation = [ docUrl ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment.SOFT_SERVE_DATA_PATH = stateDir;
+
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        Restart = "always";
+        ExecStart = "${getExe cfg.package} serve";
+        StateDirectory = "soft-serve";
+        WorkingDirectory = stateDir;
+        RuntimeDirectory = "soft-serve";
+        RuntimeDirectoryMode = "0750";
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        UMask = "0027";
+        CapabilityBoundingSet = "";
+        ProtectHome = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation @debug @keyring @module @mount @obsolete @privileged @raw-io @reboot @setuid @swap"
+        ];
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.dadada ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sonarr.nix b/nixpkgs/nixos/modules/services/misc/sonarr.nix
new file mode 100644
index 000000000000..65c51d9677d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sonarr.nix
@@ -0,0 +1,85 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sonarr;
+in
+{
+  options = {
+    services.sonarr = {
+      enable = mkEnableOption (lib.mdDoc "Sonarr");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/sonarr/.config/NzbDrone";
+        description = lib.mdDoc "The directory where Sonarr stores its data files.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the Sonarr web interface
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "sonarr";
+        description = lib.mdDoc "User account under which Sonaar runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "sonarr";
+        description = lib.mdDoc "Group under which Sonaar runs.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.sonarr;
+        defaultText = literalExpression "pkgs.sonarr";
+        description = lib.mdDoc ''
+          Sonarr package to use.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.sonarr = {
+      description = "Sonarr";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/NzbDrone -nobrowser -data='${cfg.dataDir}'";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 8989 ];
+    };
+
+    users.users = mkIf (cfg.user == "sonarr") {
+      sonarr = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        uid = config.ids.uids.sonarr;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "sonarr") {
+      sonarr.gid = config.ids.gids.sonarr;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sourcehut/default.md b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
new file mode 100644
index 000000000000..44d58aa0bef3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sourcehut/default.md
@@ -0,0 +1,93 @@
+# Sourcehut {#module-services-sourcehut}
+
+[Sourcehut](https://sr.ht.com/) is an open-source,
+self-hostable software development platform. The server setup can be automated using
+[services.sourcehut](#opt-services.sourcehut.enable).
+
+## Basic usage {#module-services-sourcehut-basic-usage}
+
+Sourcehut is a Python and Go based set of applications.
+This NixOS module also provides basic configuration integrating Sourcehut into locally running
+`services.nginx`, `services.redis.servers.sourcehut`, `services.postfix`
+and `services.postgresql` services.
+
+A very basic configuration may look like this:
+```
+{ pkgs, ... }:
+let
+  fqdn =
+    let
+      join = hostName: domain: hostName + optionalString (domain != null) ".${domain}";
+    in join config.networking.hostName config.networking.domain;
+in {
+
+  networking = {
+    hostName = "srht";
+    domain = "tld";
+    firewall.allowedTCPPorts = [ 22 80 443 ];
+  };
+
+  services.sourcehut = {
+    enable = true;
+    git.enable = true;
+    man.enable = true;
+    meta.enable = true;
+    nginx.enable = true;
+    postfix.enable = true;
+    postgresql.enable = true;
+    redis.enable = true;
+    settings = {
+        "sr.ht" = {
+          environment = "production";
+          global-domain = fqdn;
+          origin = "https://${fqdn}";
+          # Produce keys with srht-keygen from sourcehut.coresrht.
+          network-key = "/run/keys/path/to/network-key";
+          service-key = "/run/keys/path/to/service-key";
+        };
+        webhooks.private-key= "/run/keys/path/to/webhook-key";
+    };
+  };
+
+  security.acme.certs."${fqdn}".extraDomainNames = [
+    "meta.${fqdn}"
+    "man.${fqdn}"
+    "git.${fqdn}"
+  ];
+
+  services.nginx = {
+    enable = true;
+    # only recommendedProxySettings are strictly required, but the rest make sense as well.
+    recommendedTlsSettings = true;
+    recommendedOptimisation = true;
+    recommendedGzipSettings = true;
+    recommendedProxySettings = true;
+
+    # Settings to setup what certificates are used for which endpoint.
+    virtualHosts = {
+      "${fqdn}".enableACME = true;
+      "meta.${fqdn}".useACMEHost = fqdn:
+      "man.${fqdn}".useACMEHost = fqdn:
+      "git.${fqdn}".useACMEHost = fqdn:
+    };
+  };
+}
+```
+
+  The `hostName` option is used internally to configure the nginx
+reverse-proxy. The `settings` attribute set is
+used by the configuration generator and the result is placed in `/etc/sr.ht/config.ini`.
+
+## Configuration {#module-services-sourcehut-configuration}
+
+All configuration parameters are also stored in
+`/etc/sr.ht/config.ini` which is generated by
+the module and linked from the store to ensure that all values from `config.ini`
+can be modified by the module.
+
+## Using an alternative webserver as reverse-proxy (e.g. `httpd`) {#module-services-sourcehut-httpd}
+
+By default, `nginx` is used as reverse-proxy for `sourcehut`.
+However, it's possible to use e.g. `httpd` by explicitly disabling
+`nginx` using [](#opt-services.nginx.enable) and fixing the
+`settings`.
diff --git a/nixpkgs/nixos/modules/services/misc/sourcehut/default.nix b/nixpkgs/nixos/modules/services/misc/sourcehut/default.nix
new file mode 100644
index 000000000000..f2b09f4bc4b6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sourcehut/default.nix
@@ -0,0 +1,1373 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  inherit (config.services) nginx postfix postgresql redis;
+  inherit (config.users) users groups;
+  cfg = config.services.sourcehut;
+  domain = cfg.settings."sr.ht".global-domain;
+  settingsFormat = pkgs.formats.ini {
+    listToValue = concatMapStringsSep "," (generators.mkValueStringDefault {});
+    mkKeyValue = k: v:
+      optionalString (v != null)
+      (generators.mkKeyValueDefault {
+        mkValueString = v:
+          if v == true then "yes"
+          else if v == false then "no"
+          else generators.mkValueStringDefault {} v;
+      } "=" k v);
+  };
+  configIniOfService = srv: settingsFormat.generate "sourcehut-${srv}-config.ini"
+    # Each service needs access to only a subset of sections (and secrets).
+    (filterAttrs (k: v: v != null)
+    (mapAttrs (section: v:
+      let srvMatch = builtins.match "^([a-z]*)\\.sr\\.ht(::.*)?$" section; in
+      if srvMatch == null # Include sections shared by all services
+      || head srvMatch == srv # Include sections for the service being configured
+      then v
+      # Enable Web links and integrations between services.
+      else if tail srvMatch == [ null ] && cfg.${head srvMatch}.enable
+      then {
+        inherit (v) origin;
+        # mansrht crashes without it
+        oauth-client-id = v.oauth-client-id or null;
+      }
+      # Drop sub-sections of other services
+      else null)
+    (recursiveUpdate cfg.settings {
+      # Those paths are mounted using BindPaths= or BindReadOnlyPaths=
+      # for services needing access to them.
+      "builds.sr.ht::worker".buildlogs = "/var/log/sourcehut/buildsrht-worker";
+      "git.sr.ht".post-update-script = "/usr/bin/gitsrht-update-hook";
+      "git.sr.ht".repos = cfg.settings."git.sr.ht".repos;
+      "hg.sr.ht".changegroup-script = "/usr/bin/hgsrht-hook-changegroup";
+      "hg.sr.ht".repos = cfg.settings."hg.sr.ht".repos;
+      # Making this a per service option despite being in a global section,
+      # so that it uses the redis-server used by the service.
+      "sr.ht".redis-host = cfg.${srv}.redis.host;
+    })));
+  commonServiceSettings = srv: {
+    origin = mkOption {
+      description = lib.mdDoc "URL ${srv}.sr.ht is being served at (protocol://domain)";
+      type = types.str;
+      default = "https://${srv}.${domain}";
+      defaultText = "https://${srv}.example.com";
+    };
+    debug-host = mkOption {
+      description = lib.mdDoc "Address to bind the debug server to.";
+      type = with types; nullOr str;
+      default = null;
+    };
+    debug-port = mkOption {
+      description = lib.mdDoc "Port to bind the debug server to.";
+      type = with types; nullOr str;
+      default = null;
+    };
+    connection-string = mkOption {
+      description = lib.mdDoc "SQLAlchemy connection string for the database.";
+      type = types.str;
+      default = "postgresql:///localhost?user=${srv}srht&host=/run/postgresql";
+    };
+    migrate-on-upgrade = mkEnableOption (lib.mdDoc "automatic migrations on package upgrade") // { default = true; };
+    oauth-client-id = mkOption {
+      description = lib.mdDoc "${srv}.sr.ht's OAuth client id for meta.sr.ht.";
+      type = types.str;
+    };
+    oauth-client-secret = mkOption {
+      description = lib.mdDoc "${srv}.sr.ht's OAuth client secret for meta.sr.ht.";
+      type = types.path;
+      apply = s: "<" + toString s;
+    };
+    api-origin = mkOption {
+      description = lib.mdDoc "Origin URL for the API";
+      type = types.str;
+      default = "http://${cfg.listenAddress}:${toString (cfg.${srv}.port + 100)}";
+      defaultText = lib.literalMD ''
+        `"http://''${`[](#opt-services.sourcehut.listenAddress)`}:''${toString (`[](#opt-services.sourcehut.${srv}.port)` + 100)}"`
+      '';
+    };
+  };
+
+  # Specialized python containing all the modules
+  python = pkgs.sourcehut.python.withPackages (ps: with ps; [
+    gunicorn
+    eventlet
+    # For monitoring Celery: sudo -u listssrht celery --app listssrht.process -b redis+socket:///run/redis-sourcehut/redis.sock?virtual_host=1 flower
+    flower
+    # Sourcehut services
+    srht
+    buildsrht
+    gitsrht
+    hgsrht
+    hubsrht
+    listssrht
+    mansrht
+    metasrht
+    # Not a python package
+    #pagessrht
+    pastesrht
+    todosrht
+  ]);
+  mkOptionNullOrStr = description: mkOption {
+    description = lib.mdDoc description;
+    type = with types; nullOr str;
+    default = null;
+  };
+in
+{
+  options.services.sourcehut = {
+    enable = mkEnableOption (lib.mdDoc ''
+      sourcehut - git hosting, continuous integration, mailing list, ticket tracking, wiki
+      and account management services
+    '');
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Address to bind to.";
+    };
+
+    python = mkOption {
+      internal = true;
+      type = types.package;
+      default = python;
+      description = lib.mdDoc ''
+        The python package to use. It should contain references to the *srht modules and also
+        gunicorn.
+      '';
+    };
+
+    minio = {
+      enable = mkEnableOption (lib.mdDoc ''local minio integration'');
+    };
+
+    nginx = {
+      enable = mkEnableOption (lib.mdDoc ''local nginx integration'');
+      virtualHost = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Virtual-host configuration merged with all Sourcehut's virtual-hosts.";
+      };
+    };
+
+    postfix = {
+      enable = mkEnableOption (lib.mdDoc ''local postfix integration'');
+    };
+
+    postgresql = {
+      enable = mkEnableOption (lib.mdDoc ''local postgresql integration'');
+    };
+
+    redis = {
+      enable = mkEnableOption (lib.mdDoc ''local redis integration in a dedicated redis-server'');
+    };
+
+    settings = mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+        options."sr.ht" = {
+          global-domain = mkOption {
+            description = lib.mdDoc "Global domain name.";
+            type = types.str;
+            example = "example.com";
+          };
+          environment = mkOption {
+            description = lib.mdDoc "Values other than \"production\" adds a banner to each page.";
+            type = types.enum [ "development" "production" ];
+            default = "development";
+          };
+          network-key = mkOption {
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to a secret key to encrypt internal messages with. Use `srht-keygen network` to
+              generate this key. It must be consistent between all services and nodes.
+            '';
+            type = types.path;
+            apply = s: "<" + toString s;
+          };
+          owner-email = mkOption {
+            description = lib.mdDoc "Owner's email.";
+            type = types.str;
+            default = "contact@example.com";
+          };
+          owner-name = mkOption {
+            description = lib.mdDoc "Owner's name.";
+            type = types.str;
+            default = "John Doe";
+          };
+          site-blurb = mkOption {
+            description = lib.mdDoc "Blurb for your site.";
+            type = types.str;
+            default = "the hacker's forge";
+          };
+          site-info = mkOption {
+            description = lib.mdDoc "The top-level info page for your site.";
+            type = types.str;
+            default = "https://sourcehut.org";
+          };
+          service-key = mkOption {
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to a key used for encrypting session cookies. Use `srht-keygen service` to
+              generate the service key. This must be shared between each node of the same
+              service (e.g. git1.sr.ht and git2.sr.ht), but different services may use
+              different keys. If you configure all of your services with the same
+              config.ini, you may use the same service-key for all of them.
+            '';
+            type = types.path;
+            apply = s: "<" + toString s;
+          };
+          site-name = mkOption {
+            description = lib.mdDoc "The name of your network of sr.ht-based sites.";
+            type = types.str;
+            default = "sourcehut";
+          };
+          source-url = mkOption {
+            description = lib.mdDoc "The source code for your fork of sr.ht.";
+            type = types.str;
+            default = "https://git.sr.ht/~sircmpwn/srht";
+          };
+        };
+        options.mail = {
+          smtp-host = mkOptionNullOrStr "Outgoing SMTP host.";
+          smtp-port = mkOption {
+            description = lib.mdDoc "Outgoing SMTP port.";
+            type = with types; nullOr port;
+            default = null;
+          };
+          smtp-user = mkOptionNullOrStr "Outgoing SMTP user.";
+          smtp-password = mkOptionNullOrStr "Outgoing SMTP password.";
+          smtp-from = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Outgoing SMTP FROM.";
+          };
+          error-to = mkOptionNullOrStr "Address receiving application exceptions";
+          error-from = mkOptionNullOrStr "Address sending application exceptions";
+          pgp-privkey = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to an OpenPGP private key.
+
+              Your PGP key information (DO NOT mix up pub and priv here)
+              You must remove the password from your secret key, if present.
+              You can do this with `gpg --edit-key [key-id]`,
+              then use the `passwd` command and do not enter a new password.
+            '';
+          };
+          pgp-pubkey = mkOption {
+            type = with types; either path str;
+            description = lib.mdDoc "OpenPGP public key.";
+          };
+          pgp-key-id = mkOption {
+            type = types.str;
+            description = lib.mdDoc "OpenPGP key identifier.";
+          };
+        };
+        options.objects = {
+          s3-upstream = mkOption {
+            description = lib.mdDoc "Configure the S3-compatible object storage service.";
+            type = with types; nullOr str;
+            default = null;
+          };
+          s3-access-key = mkOption {
+            description = lib.mdDoc "Access key to the S3-compatible object storage service";
+            type = with types; nullOr str;
+            default = null;
+          };
+          s3-secret-key = mkOption {
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to the secret key of the S3-compatible object storage service.
+            '';
+            type = with types; nullOr path;
+            default = null;
+            apply = mapNullable (s: "<" + toString s);
+          };
+        };
+        options.webhooks = {
+          private-key = mkOption {
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to a base64-encoded Ed25519 key for signing webhook payloads.
+              This should be consistent for all *.sr.ht sites,
+              as this key will be used to verify signatures
+              from other sites in your network.
+              Use the `srht-keygen webhook` command to generate a key.
+            '';
+            type = types.path;
+            apply = s: "<" + toString s;
+          };
+        };
+
+        options."builds.sr.ht" = commonServiceSettings "builds" // {
+          allow-free = mkEnableOption (lib.mdDoc "nonpaying users to submit builds");
+          redis = mkOption {
+            description = lib.mdDoc "The Redis connection used for the Celery worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-buildsrht/redis.sock?virtual_host=2";
+          };
+          shell = mkOption {
+            description = lib.mdDoc ''
+              Scripts used to launch on SSH connection.
+              `/usr/bin/master-shell` on master,
+              `/usr/bin/runner-shell` on runner.
+              If master and worker are on the same system
+              set to `/usr/bin/runner-shell`.
+            '';
+            type = types.enum ["/usr/bin/master-shell" "/usr/bin/runner-shell"];
+            default = "/usr/bin/master-shell";
+          };
+        };
+        options."builds.sr.ht::worker" = {
+          bind-address = mkOption {
+            description = lib.mdDoc ''
+              HTTP bind address for serving local build information/monitoring.
+            '';
+            type = types.str;
+            default = "localhost:8080";
+          };
+          buildlogs = mkOption {
+            description = lib.mdDoc "Path to write build logs.";
+            type = types.str;
+            default = "/var/log/sourcehut/buildsrht-worker";
+          };
+          name = mkOption {
+            description = lib.mdDoc ''
+              Listening address and listening port
+              of the build runner (with HTTP port if not 80).
+            '';
+            type = types.str;
+            default = "localhost:5020";
+          };
+          timeout = mkOption {
+            description = lib.mdDoc ''
+              Max build duration.
+              See <https://golang.org/pkg/time/#ParseDuration>.
+            '';
+            type = types.str;
+            default = "3m";
+          };
+        };
+
+        options."git.sr.ht" = commonServiceSettings "git" // {
+          outgoing-domain = mkOption {
+            description = lib.mdDoc "Outgoing domain.";
+            type = types.str;
+            default = "https://git.localhost.localdomain";
+          };
+          post-update-script = mkOption {
+            description = lib.mdDoc ''
+              A post-update script which is installed in every git repo.
+              This setting is propagated to newer and existing repositories.
+            '';
+            type = types.path;
+            default = "${pkgs.sourcehut.gitsrht}/bin/gitsrht-update-hook";
+            defaultText = "\${pkgs.sourcehut.gitsrht}/bin/gitsrht-update-hook";
+          };
+          repos = mkOption {
+            description = lib.mdDoc ''
+              Path to git repositories on disk.
+              If changing the default, you must ensure that
+              the gitsrht's user as read and write access to it.
+            '';
+            type = types.str;
+            default = "/var/lib/sourcehut/gitsrht/repos";
+          };
+          webhooks = mkOption {
+            description = lib.mdDoc "The Redis connection used for the webhooks worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-gitsrht/redis.sock?virtual_host=1";
+          };
+        };
+        options."git.sr.ht::api" = {
+          internal-ipnet = mkOption {
+            description = lib.mdDoc ''
+              Set of IP subnets which are permitted to utilize internal API
+              authentication. This should be limited to the subnets
+              from which your *.sr.ht services are running.
+              See [](#opt-services.sourcehut.listenAddress).
+            '';
+            type = with types; listOf str;
+            default = [ "127.0.0.0/8" "::1/128" ];
+          };
+        };
+
+        options."hg.sr.ht" = commonServiceSettings "hg" // {
+          changegroup-script = mkOption {
+            description = lib.mdDoc ''
+              A changegroup script which is installed in every mercurial repo.
+              This setting is propagated to newer and existing repositories.
+            '';
+            type = types.str;
+            default = "${pkgs.sourcehut.hgsrht}/bin/hgsrht-hook-changegroup";
+            defaultText = "\${pkgs.sourcehut.hgsrht}/bin/hgsrht-hook-changegroup";
+          };
+          repos = mkOption {
+            description = lib.mdDoc ''
+              Path to mercurial repositories on disk.
+              If changing the default, you must ensure that
+              the hgsrht's user as read and write access to it.
+            '';
+            type = types.str;
+            default = "/var/lib/sourcehut/hgsrht/repos";
+          };
+          srhtext = mkOptionNullOrStr ''
+            Path to the srht mercurial extension
+            (defaults to where the hgsrht code is)
+          '';
+          clone_bundle_threshold = mkOption {
+            description = lib.mdDoc ".hg/store size (in MB) past which the nightly job generates clone bundles.";
+            type = types.ints.unsigned;
+            default = 50;
+          };
+          hg_ssh = mkOption {
+            description = lib.mdDoc "Path to hg-ssh (if not in $PATH).";
+            type = types.str;
+            default = "${pkgs.mercurial}/bin/hg-ssh";
+            defaultText = "\${pkgs.mercurial}/bin/hg-ssh";
+          };
+          webhooks = mkOption {
+            description = lib.mdDoc "The Redis connection used for the webhooks worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-hgsrht/redis.sock?virtual_host=1";
+          };
+        };
+
+        options."hub.sr.ht" = commonServiceSettings "hub" // {
+        };
+
+        options."lists.sr.ht" = commonServiceSettings "lists" // {
+          allow-new-lists = mkEnableOption (lib.mdDoc "creation of new lists");
+          notify-from = mkOption {
+            description = lib.mdDoc "Outgoing email for notifications generated by users.";
+            type = types.str;
+            default = "lists-notify@localhost.localdomain";
+          };
+          posting-domain = mkOption {
+            description = lib.mdDoc "Posting domain.";
+            type = types.str;
+            default = "lists.localhost.localdomain";
+          };
+          redis = mkOption {
+            description = lib.mdDoc "The Redis connection used for the Celery worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-listssrht/redis.sock?virtual_host=2";
+          };
+          webhooks = mkOption {
+            description = lib.mdDoc "The Redis connection used for the webhooks worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-listssrht/redis.sock?virtual_host=1";
+          };
+        };
+        options."lists.sr.ht::worker" = {
+          reject-mimetypes = mkOption {
+            description = lib.mdDoc ''
+              Comma-delimited list of Content-Types to reject. Messages with Content-Types
+              included in this list are rejected. Multipart messages are always supported,
+              and each part is checked against this list.
+
+              Uses fnmatch for wildcard expansion.
+            '';
+            type = with types; listOf str;
+            default = ["text/html"];
+          };
+          reject-url = mkOption {
+            description = lib.mdDoc "Reject URL.";
+            type = types.str;
+            default = "https://man.sr.ht/lists.sr.ht/etiquette.md";
+          };
+          sock = mkOption {
+            description = lib.mdDoc ''
+              Path for the lmtp daemon's unix socket. Direct incoming mail to this socket.
+              Alternatively, specify IP:PORT and an SMTP server will be run instead.
+            '';
+            type = types.str;
+            default = "/tmp/lists.sr.ht-lmtp.sock";
+          };
+          sock-group = mkOption {
+            description = lib.mdDoc ''
+              The lmtp daemon will make the unix socket group-read/write
+              for users in this group.
+            '';
+            type = types.str;
+            default = "postfix";
+          };
+        };
+
+        options."man.sr.ht" = commonServiceSettings "man" // {
+        };
+
+        options."meta.sr.ht" =
+          removeAttrs (commonServiceSettings "meta")
+            ["oauth-client-id" "oauth-client-secret"] // {
+          webhooks = mkOption {
+            description = lib.mdDoc "The Redis connection used for the webhooks worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-metasrht/redis.sock?virtual_host=1";
+          };
+          welcome-emails = mkEnableOption (lib.mdDoc "sending stock sourcehut welcome emails after signup");
+        };
+        options."meta.sr.ht::api" = {
+          internal-ipnet = mkOption {
+            description = lib.mdDoc ''
+              Set of IP subnets which are permitted to utilize internal API
+              authentication. This should be limited to the subnets
+              from which your *.sr.ht services are running.
+              See [](#opt-services.sourcehut.listenAddress).
+            '';
+            type = with types; listOf str;
+            default = [ "127.0.0.0/8" "::1/128" ];
+          };
+        };
+        options."meta.sr.ht::aliases" = mkOption {
+          description = lib.mdDoc "Aliases for the client IDs of commonly used OAuth clients.";
+          type = with types; attrsOf int;
+          default = {};
+          example = { "git.sr.ht" = 12345; };
+        };
+        options."meta.sr.ht::billing" = {
+          enabled = mkEnableOption (lib.mdDoc "the billing system");
+          stripe-public-key = mkOptionNullOrStr "Public key for Stripe. Get your keys at https://dashboard.stripe.com/account/apikeys";
+          stripe-secret-key = mkOptionNullOrStr ''
+            An absolute file path (which should be outside the Nix-store)
+            to a secret key for Stripe. Get your keys at https://dashboard.stripe.com/account/apikeys
+          '' // {
+            apply = mapNullable (s: "<" + toString s);
+          };
+        };
+        options."meta.sr.ht::settings" = {
+          registration = mkEnableOption (lib.mdDoc "public registration");
+          onboarding-redirect = mkOption {
+            description = lib.mdDoc "Where to redirect new users upon registration.";
+            type = types.str;
+            default = "https://meta.localhost.localdomain";
+          };
+          user-invites = mkOption {
+            description = lib.mdDoc ''
+              How many invites each user is issued upon registration
+              (only applicable if open registration is disabled).
+            '';
+            type = types.ints.unsigned;
+            default = 5;
+          };
+        };
+
+        options."pages.sr.ht" = commonServiceSettings "pages" // {
+          gemini-certs = mkOption {
+            description = lib.mdDoc ''
+              An absolute file path (which should be outside the Nix-store)
+              to Gemini certificates.
+            '';
+            type = with types; nullOr path;
+            default = null;
+          };
+          max-site-size = mkOption {
+            description = lib.mdDoc "Maximum size of any given site (post-gunzip), in MiB.";
+            type = types.int;
+            default = 1024;
+          };
+          user-domain = mkOption {
+            description = lib.mdDoc ''
+              Configures the user domain, if enabled.
+              All users are given \<username\>.this.domain.
+            '';
+            type = with types; nullOr str;
+            default = null;
+          };
+        };
+        options."pages.sr.ht::api" = {
+          internal-ipnet = mkOption {
+            description = lib.mdDoc ''
+              Set of IP subnets which are permitted to utilize internal API
+              authentication. This should be limited to the subnets
+              from which your *.sr.ht services are running.
+              See [](#opt-services.sourcehut.listenAddress).
+            '';
+            type = with types; listOf str;
+            default = [ "127.0.0.0/8" "::1/128" ];
+          };
+        };
+
+        options."paste.sr.ht" = commonServiceSettings "paste" // {
+        };
+
+        options."todo.sr.ht" = commonServiceSettings "todo" // {
+          notify-from = mkOption {
+            description = lib.mdDoc "Outgoing email for notifications generated by users.";
+            type = types.str;
+            default = "todo-notify@localhost.localdomain";
+          };
+          webhooks = mkOption {
+            description = lib.mdDoc "The Redis connection used for the webhooks worker.";
+            type = types.str;
+            default = "redis+socket:///run/redis-sourcehut-todosrht/redis.sock?virtual_host=1";
+          };
+        };
+        options."todo.sr.ht::mail" = {
+          posting-domain = mkOption {
+            description = lib.mdDoc "Posting domain.";
+            type = types.str;
+            default = "todo.localhost.localdomain";
+          };
+          sock = mkOption {
+            description = lib.mdDoc ''
+              Path for the lmtp daemon's unix socket. Direct incoming mail to this socket.
+              Alternatively, specify IP:PORT and an SMTP server will be run instead.
+            '';
+            type = types.str;
+            default = "/tmp/todo.sr.ht-lmtp.sock";
+          };
+          sock-group = mkOption {
+            description = lib.mdDoc ''
+              The lmtp daemon will make the unix socket group-read/write
+              for users in this group.
+            '';
+            type = types.str;
+            default = "postfix";
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        The configuration for the sourcehut network.
+      '';
+    };
+
+    builds = {
+      enableWorker = mkEnableOption (lib.mdDoc ''
+        worker for builds.sr.ht
+
+        ::: {.warning}
+        For smaller deployments, job runners can be installed alongside the master server
+        but even if you only build your own software, integration with other services
+        may cause you to run untrusted builds
+        (e.g. automatic testing of patches via listssrht).
+        See <https://man.sr.ht/builds.sr.ht/configuration.md#security-model>.
+        :::
+      '');
+
+      images = mkOption {
+        type = with types; attrsOf (attrsOf (attrsOf package));
+        default = { };
+        example = lib.literalExpression ''(let
+            # Pinning unstable to allow usage with flakes and limit rebuilds.
+            pkgs_unstable = builtins.fetchGit {
+                url = "https://github.com/NixOS/nixpkgs";
+                rev = "ff96a0fa5635770390b184ae74debea75c3fd534";
+                ref = "nixos-unstable";
+            };
+            image_from_nixpkgs = (import ("''${pkgs.sourcehut.buildsrht}/lib/images/nixos/image.nix") {
+              pkgs = (import pkgs_unstable {});
+            });
+          in
+          {
+            nixos.unstable.x86_64 = image_from_nixpkgs;
+          }
+        )'';
+        description = lib.mdDoc ''
+          Images for builds.sr.ht. Each package should be distro.release.arch and point to a /nix/store/package/root.img.qcow2.
+        '';
+      };
+    };
+
+    git = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.git;
+        defaultText = literalExpression "pkgs.git";
+        example = literalExpression "pkgs.gitFull";
+        description = lib.mdDoc ''
+          Git package for git.sr.ht. This can help silence collisions.
+        '';
+      };
+      fcgiwrap.preforkProcess = mkOption {
+        description = lib.mdDoc "Number of fcgiwrap processes to prefork.";
+        type = types.int;
+        default = 4;
+      };
+    };
+
+    hg = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.mercurial;
+        defaultText = literalExpression "pkgs.mercurial";
+        description = lib.mdDoc ''
+          Mercurial package for hg.sr.ht. This can help silence collisions.
+        '';
+      };
+      cloneBundles = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Generate clonebundles (which require more disk space but dramatically speed up cloning large repositories).
+        '';
+      };
+    };
+
+    lists = {
+      process = {
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          default = [ "--loglevel DEBUG" "--pool eventlet" "--without-heartbeat" ];
+          description = lib.mdDoc "Extra arguments passed to the Celery responsible for processing mails.";
+        };
+        celeryConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc "Content of the `celeryconfig.py` used by the Celery of `listssrht-process`.";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      environment.systemPackages = [ pkgs.sourcehut.coresrht ];
+
+      services.sourcehut.settings = {
+        "git.sr.ht".outgoing-domain = mkDefault "https://git.${domain}";
+        "lists.sr.ht".notify-from = mkDefault "lists-notify@${domain}";
+        "lists.sr.ht".posting-domain = mkDefault "lists.${domain}";
+        "meta.sr.ht::settings".onboarding-redirect = mkDefault "https://meta.${domain}";
+        "todo.sr.ht".notify-from = mkDefault "todo-notify@${domain}";
+        "todo.sr.ht::mail".posting-domain = mkDefault "todo.${domain}";
+      };
+    }
+    (mkIf cfg.postgresql.enable {
+      assertions = [
+        { assertion = postgresql.enable;
+          message = "postgresql must be enabled and configured";
+        }
+      ];
+    })
+    (mkIf cfg.postfix.enable {
+      assertions = [
+        { assertion = postfix.enable;
+          message = "postfix must be enabled and configured";
+        }
+      ];
+      # Needed for sharing the LMTP sockets with JoinsNamespaceOf=
+      systemd.services.postfix.serviceConfig.PrivateTmp = true;
+    })
+    (mkIf cfg.redis.enable {
+      services.redis.vmOverCommit = mkDefault true;
+    })
+    (mkIf cfg.nginx.enable {
+      assertions = [
+        { assertion = nginx.enable;
+          message = "nginx must be enabled and configured";
+        }
+      ];
+      # For proxyPass= in virtual-hosts for Sourcehut services.
+      services.nginx.recommendedProxySettings = mkDefault true;
+    })
+    (mkIf (cfg.builds.enable || cfg.git.enable || cfg.hg.enable) {
+      services.openssh = {
+        # Note that sshd will continue to honor AuthorizedKeysFile.
+        # Note that you may want automatically rotate
+        # or link to /dev/null the following log files:
+        # - /var/log/gitsrht-dispatch
+        # - /var/log/{build,git,hg}srht-keys
+        # - /var/log/{git,hg}srht-shell
+        # - /var/log/gitsrht-update-hook
+        authorizedKeysCommand = ''/etc/ssh/sourcehut/subdir/srht-dispatch "%u" "%h" "%t" "%k"'';
+        # srht-dispatch will setuid/setgid according to [git.sr.ht::dispatch]
+        authorizedKeysCommandUser = "root";
+        extraConfig = ''
+          PermitUserEnvironment SRHT_*
+        '';
+        startWhenNeeded = false;
+      };
+      environment.etc."ssh/sourcehut/config.ini".source =
+        settingsFormat.generate "sourcehut-dispatch-config.ini"
+          (filterAttrs (k: v: k == "git.sr.ht::dispatch")
+          cfg.settings);
+      environment.etc."ssh/sourcehut/subdir/srht-dispatch" = {
+        # sshd_config(5): The program must be owned by root, not writable by group or others
+        mode = "0755";
+        source = pkgs.writeShellScript "srht-dispatch-wrapper" ''
+          set -e
+          set -x
+          cd /etc/ssh/sourcehut/subdir
+          ${pkgs.sourcehut.gitsrht}/bin/gitsrht-dispatch "$@"
+        '';
+      };
+      systemd.tmpfiles.settings."10-sourcehut-gitsrht" = mkIf cfg.git.enable (
+        builtins.listToAttrs (map (name: {
+          name = "/var/log/sourcehut/gitsrht-${name}";
+          value.f = {
+            inherit (cfg.git) user group;
+            mode = "0644";
+          };
+        }) [ "keys" "shell" "update-hook" ])
+      );
+      systemd.services.sshd = {
+        preStart = mkIf cfg.hg.enable ''
+          chown ${cfg.hg.user}:${cfg.hg.group} /var/log/sourcehut/hgsrht-keys
+        '';
+        serviceConfig = {
+          LogsDirectory = "sourcehut";
+          BindReadOnlyPaths =
+            # Note that those /usr/bin/* paths are hardcoded in multiple places in *.sr.ht,
+            # for instance to get the user from the [git.sr.ht::dispatch] settings.
+            # *srht-keys needs to:
+            # - access a redis-server in [sr.ht] redis-host,
+            # - access the PostgreSQL server in [*.sr.ht] connection-string,
+            # - query metasrht-api (through the HTTP API).
+            # Using this has the side effect of creating empty files in /usr/bin/
+            optionals cfg.builds.enable [
+              "${pkgs.writeShellScript "buildsrht-keys-wrapper" ''
+                set -e
+                cd /run/sourcehut/buildsrht/subdir
+                exec -a "$0" ${pkgs.sourcehut.buildsrht}/bin/buildsrht-keys "$@"
+              ''}:/usr/bin/buildsrht-keys"
+              "${pkgs.sourcehut.buildsrht}/bin/master-shell:/usr/bin/master-shell"
+              "${pkgs.sourcehut.buildsrht}/bin/runner-shell:/usr/bin/runner-shell"
+            ] ++
+            optionals cfg.git.enable [
+              # /path/to/gitsrht-keys calls /path/to/gitsrht-shell,
+              # or [git.sr.ht] shell= if set.
+              "${pkgs.writeShellScript "gitsrht-keys-wrapper" ''
+                set -e
+                cd /run/sourcehut/gitsrht/subdir
+                exec -a "$0" ${pkgs.sourcehut.gitsrht}/bin/gitsrht-keys "$@"
+              ''}:/usr/bin/gitsrht-keys"
+              "${pkgs.writeShellScript "gitsrht-shell-wrapper" ''
+                set -e
+                cd /run/sourcehut/gitsrht/subdir
+                export PATH="${cfg.git.package}/bin:$PATH"
+                export SRHT_CONFIG=/run/sourcehut/gitsrht/config.ini
+                exec -a "$0" ${pkgs.sourcehut.gitsrht}/bin/gitsrht-shell "$@"
+              ''}:/usr/bin/gitsrht-shell"
+              "${pkgs.writeShellScript "gitsrht-update-hook" ''
+                set -e
+                export SRHT_CONFIG=/run/sourcehut/gitsrht/config.ini
+                # hooks/post-update calls /usr/bin/gitsrht-update-hook as hooks/stage-3
+                # but this wrapper being a bash script, it overrides $0 with /usr/bin/gitsrht-update-hook
+                # hence this hack to put hooks/stage-3 back into gitsrht-update-hook's $0
+                if test "''${STAGE3:+set}"
+                then
+                  exec -a hooks/stage-3 ${pkgs.sourcehut.gitsrht}/bin/gitsrht-update-hook "$@"
+                else
+                  export STAGE3=set
+                  exec -a "$0" ${pkgs.sourcehut.gitsrht}/bin/gitsrht-update-hook "$@"
+                fi
+              ''}:/usr/bin/gitsrht-update-hook"
+            ] ++
+            optionals cfg.hg.enable [
+              # /path/to/hgsrht-keys calls /path/to/hgsrht-shell,
+              # or [hg.sr.ht] shell= if set.
+              "${pkgs.writeShellScript "hgsrht-keys-wrapper" ''
+                set -e
+                cd /run/sourcehut/hgsrht/subdir
+                exec -a "$0" ${pkgs.sourcehut.hgsrht}/bin/hgsrht-keys "$@"
+              ''}:/usr/bin/hgsrht-keys"
+              "${pkgs.writeShellScript "hgsrht-shell-wrapper" ''
+                set -e
+                cd /run/sourcehut/hgsrht/subdir
+                exec -a "$0" ${pkgs.sourcehut.hgsrht}/bin/hgsrht-shell "$@"
+              ''}:/usr/bin/hgsrht-shell"
+              # Mercurial's changegroup hooks are run relative to their repository's directory,
+              # but hgsrht-hook-changegroup looks up ./config.ini
+              "${pkgs.writeShellScript "hgsrht-hook-changegroup" ''
+                set -e
+                test -e "''$PWD"/config.ini ||
+                ln -s /run/sourcehut/hgsrht/config.ini "''$PWD"/config.ini
+                exec -a "$0" ${pkgs.sourcehut.hgsrht}/bin/hgsrht-hook-changegroup "$@"
+              ''}:/usr/bin/hgsrht-hook-changegroup"
+            ];
+        };
+      };
+    })
+  ]);
+
+  imports = [
+
+    (import ./service.nix "builds" {
+      inherit configIniOfService;
+      srvsrht = "buildsrht";
+      port = 5002;
+      extraServices.buildsrht-api = {
+        serviceConfig.Restart = "always";
+        serviceConfig.RestartSec = "5s";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.buildsrht}/bin/buildsrht-api -b ${cfg.listenAddress}:${toString (cfg.builds.port + 100)}";
+      };
+      # TODO: a celery worker on the master and worker are apparently needed
+      extraServices.buildsrht-worker = let
+        qemuPackage = pkgs.qemu_kvm;
+        serviceName = "buildsrht-worker";
+        statePath = "/var/lib/sourcehut/${serviceName}";
+        in mkIf cfg.builds.enableWorker {
+        path = [ pkgs.openssh pkgs.docker ];
+        preStart = ''
+          set -x
+          if test -z "$(docker images -q qemu:latest 2>/dev/null)" \
+          || test "$(cat ${statePath}/docker-image-qemu)" != "${qemuPackage.version}"
+          then
+            # Create and import qemu:latest image for docker
+            ${pkgs.dockerTools.streamLayeredImage {
+              name = "qemu";
+              tag = "latest";
+              contents = [ qemuPackage ];
+            }} | docker load
+            # Mark down current package version
+            echo '${qemuPackage.version}' >${statePath}/docker-image-qemu
+          fi
+        '';
+        serviceConfig = {
+          ExecStart = "${pkgs.sourcehut.buildsrht}/bin/buildsrht-worker";
+          BindPaths = [ cfg.settings."builds.sr.ht::worker".buildlogs ];
+          LogsDirectory = [ "sourcehut/${serviceName}" ];
+          RuntimeDirectory = [ "sourcehut/${serviceName}/subdir" ];
+          StateDirectory = [ "sourcehut/${serviceName}" ];
+          TimeoutStartSec = "1800s";
+          # buildsrht-worker looks up ../config.ini
+          WorkingDirectory = "-"+"/run/sourcehut/${serviceName}/subdir";
+        };
+      };
+      extraConfig = let
+        image_dirs = flatten (
+          mapAttrsToList (distro: revs:
+            mapAttrsToList (rev: archs:
+              mapAttrsToList (arch: image:
+                pkgs.runCommand "buildsrht-images" { } ''
+                  mkdir -p $out/${distro}/${rev}/${arch}
+                  ln -s ${image}/*.qcow2 $out/${distro}/${rev}/${arch}/root.img.qcow2
+                ''
+              ) archs
+            ) revs
+          ) cfg.builds.images
+        );
+        image_dir_pre = pkgs.symlinkJoin {
+          name = "buildsrht-worker-images-pre";
+          paths = image_dirs;
+            # FIXME: not working, apparently because ubuntu/latest is a broken link
+            # ++ [ "${pkgs.sourcehut.buildsrht}/lib/images" ];
+        };
+        image_dir = pkgs.runCommand "buildsrht-worker-images" { } ''
+          mkdir -p $out/images
+          cp -Lr ${image_dir_pre}/* $out/images
+        '';
+        in mkMerge [
+        {
+          users.users.${cfg.builds.user}.shell = pkgs.bash;
+
+          virtualisation.docker.enable = true;
+
+          services.sourcehut.settings = mkMerge [
+            { # Note that git.sr.ht::dispatch is not a typo,
+              # gitsrht-dispatch always use this section
+              "git.sr.ht::dispatch"."/usr/bin/buildsrht-keys" =
+                mkDefault "${cfg.builds.user}:${cfg.builds.group}";
+            }
+            (mkIf cfg.builds.enableWorker {
+              "builds.sr.ht::worker".shell = "/usr/bin/runner-shell";
+              "builds.sr.ht::worker".images = mkDefault "${image_dir}/images";
+              "builds.sr.ht::worker".controlcmd = mkDefault "${image_dir}/images/control";
+            })
+          ];
+        }
+        (mkIf cfg.builds.enableWorker {
+          users.groups = {
+            docker.members = [ cfg.builds.user ];
+          };
+        })
+        (mkIf (cfg.builds.enableWorker && cfg.nginx.enable) {
+          # Allow nginx access to buildlogs
+          users.users.${nginx.user}.extraGroups = [ cfg.builds.group ];
+          systemd.services.nginx = {
+            serviceConfig.BindReadOnlyPaths = [ cfg.settings."builds.sr.ht::worker".buildlogs ];
+          };
+          services.nginx.virtualHosts."logs.${domain}" = mkMerge [ {
+            /* FIXME: is a listen needed?
+            listen = with builtins;
+              # FIXME: not compatible with IPv6
+              let address = split ":" cfg.settings."builds.sr.ht::worker".name; in
+              [{ addr = elemAt address 0; port = lib.toInt (elemAt address 2); }];
+            */
+            locations."/logs/".alias = cfg.settings."builds.sr.ht::worker".buildlogs + "/";
+          } cfg.nginx.virtualHost ];
+        })
+      ];
+    })
+
+    (import ./service.nix "git" (let
+      baseService = {
+        path = [ cfg.git.package ];
+        serviceConfig.BindPaths = [ "${cfg.settings."git.sr.ht".repos}:/var/lib/sourcehut/gitsrht/repos" ];
+      };
+      in {
+      inherit configIniOfService;
+      mainService = mkMerge [ baseService {
+        serviceConfig.StateDirectory = [ "sourcehut/gitsrht" "sourcehut/gitsrht/repos" ];
+        preStart = mkIf (versionOlder config.system.stateVersion "22.05") (mkBefore ''
+          # Fix Git hooks of repositories pre-dating https://github.com/NixOS/nixpkgs/pull/133984
+          (
+          set +f
+          shopt -s nullglob
+          for h in /var/lib/sourcehut/gitsrht/repos/~*/*/hooks/{pre-receive,update,post-update}
+          do ln -fnsv /usr/bin/gitsrht-update-hook "$h"; done
+          )
+        '');
+      } ];
+      port = 5001;
+      webhooks = true;
+      extraTimers.gitsrht-periodic = {
+        service = baseService;
+        timerConfig.OnCalendar = ["*:0/20"];
+      };
+      extraConfig = mkMerge [
+        {
+          # https://stackoverflow.com/questions/22314298/git-push-results-in-fatal-protocol-error-bad-line-length-character-this
+          # Probably could use gitsrht-shell if output is restricted to just parameters...
+          users.users.${cfg.git.user}.shell = pkgs.bash;
+          services.sourcehut.settings = {
+            "git.sr.ht::dispatch"."/usr/bin/gitsrht-keys" =
+              mkDefault "${cfg.git.user}:${cfg.git.group}";
+          };
+          systemd.services.sshd = baseService;
+        }
+        (mkIf cfg.nginx.enable {
+          services.nginx.virtualHosts."git.${domain}" = {
+            locations."/authorize" = {
+              proxyPass = "http://${cfg.listenAddress}:${toString cfg.git.port}";
+              extraConfig = ''
+                proxy_pass_request_body off;
+                proxy_set_header Content-Length "";
+                proxy_set_header X-Original-URI $request_uri;
+              '';
+            };
+            locations."~ ^/([^/]+)/([^/]+)/(HEAD|info/refs|objects/info/.*|git-upload-pack).*$" = {
+              root = "/var/lib/sourcehut/gitsrht/repos";
+              fastcgiParams = {
+                GIT_HTTP_EXPORT_ALL = "";
+                GIT_PROJECT_ROOT = "$document_root";
+                PATH_INFO = "$uri";
+                SCRIPT_FILENAME = "${cfg.git.package}/bin/git-http-backend";
+              };
+              extraConfig = ''
+                auth_request /authorize;
+                fastcgi_read_timeout 500s;
+                fastcgi_pass unix:/run/gitsrht-fcgiwrap.sock;
+                gzip off;
+              '';
+            };
+          };
+          systemd.sockets.gitsrht-fcgiwrap = {
+            before = [ "nginx.service" ];
+            wantedBy = [ "sockets.target" "gitsrht.service" ];
+            # This path remains accessible to nginx.service, which has no RootDirectory=
+            socketConfig.ListenStream = "/run/gitsrht-fcgiwrap.sock";
+            socketConfig.SocketUser = nginx.user;
+            socketConfig.SocketMode = "600";
+          };
+        })
+      ];
+      extraServices.gitsrht-api.serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        ExecStart = "${pkgs.sourcehut.gitsrht}/bin/gitsrht-api -b ${cfg.listenAddress}:${toString (cfg.git.port + 100)}";
+        BindPaths = [ "${cfg.settings."git.sr.ht".repos}:/var/lib/sourcehut/gitsrht/repos" ];
+      };
+      extraServices.gitsrht-fcgiwrap = mkIf cfg.nginx.enable {
+        serviceConfig = {
+          # Socket is passed by gitsrht-fcgiwrap.socket
+          ExecStart = "${pkgs.fcgiwrap}/sbin/fcgiwrap -c ${toString cfg.git.fcgiwrap.preforkProcess}";
+          # No need for config.ini
+          ExecStartPre = mkForce [];
+          User = null;
+          DynamicUser = true;
+          BindReadOnlyPaths = [ "${cfg.settings."git.sr.ht".repos}:/var/lib/sourcehut/gitsrht/repos" ];
+          IPAddressDeny = "any";
+          InaccessiblePaths = [ "-+/run/postgresql" "-+/run/redis-sourcehut" ];
+          PrivateNetwork = true;
+          RestrictAddressFamilies = mkForce [ "none" ];
+          SystemCallFilter = mkForce [
+            "@system-service"
+            "~@aio" "~@keyring" "~@memlock" "~@privileged" "~@resources" "~@setuid"
+            # @timer is needed for alarm()
+          ];
+        };
+      };
+    }))
+
+    (import ./service.nix "hg" (let
+      baseService = {
+        path = [ cfg.hg.package ];
+        serviceConfig.BindPaths = [ "${cfg.settings."hg.sr.ht".repos}:/var/lib/sourcehut/hgsrht/repos" ];
+      };
+      in {
+      inherit configIniOfService;
+      mainService = mkMerge [ baseService {
+        serviceConfig.StateDirectory = [ "sourcehut/hgsrht" "sourcehut/hgsrht/repos" ];
+      } ];
+      port = 5010;
+      webhooks = true;
+      extraTimers.hgsrht-periodic = {
+        service = baseService;
+        timerConfig.OnCalendar = ["*:0/20"];
+      };
+      extraTimers.hgsrht-clonebundles = mkIf cfg.hg.cloneBundles {
+        service = baseService;
+        timerConfig.OnCalendar = ["daily"];
+        timerConfig.AccuracySec = "1h";
+      };
+      extraServices.hgsrht-api = {
+        serviceConfig.Restart = "always";
+        serviceConfig.RestartSec = "5s";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.hgsrht}/bin/hgsrht-api -b ${cfg.listenAddress}:${toString (cfg.hg.port + 100)}";
+      };
+      extraConfig = mkMerge [
+        {
+          users.users.${cfg.hg.user}.shell = pkgs.bash;
+          services.sourcehut.settings = {
+            # Note that git.sr.ht::dispatch is not a typo,
+            # gitsrht-dispatch always uses this section.
+            "git.sr.ht::dispatch"."/usr/bin/hgsrht-keys" =
+              mkDefault "${cfg.hg.user}:${cfg.hg.group}";
+          };
+          systemd.services.sshd = baseService;
+        }
+        (mkIf cfg.nginx.enable {
+          # Allow nginx access to repositories
+          users.users.${nginx.user}.extraGroups = [ cfg.hg.group ];
+          services.nginx.virtualHosts."hg.${domain}" = {
+            locations."/authorize" = {
+              proxyPass = "http://${cfg.listenAddress}:${toString cfg.hg.port}";
+              extraConfig = ''
+                proxy_pass_request_body off;
+                proxy_set_header Content-Length "";
+                proxy_set_header X-Original-URI $request_uri;
+              '';
+            };
+            # Let clients reach pull bundles. We don't really need to lock this down even for
+            # private repos because the bundles are named after the revision hashes...
+            # so someone would need to know or guess a SHA value to download anything.
+            # TODO: proxyPass to an hg serve service?
+            locations."~ ^/[~^][a-z0-9_]+/[a-zA-Z0-9_.-]+/\\.hg/bundles/.*$" = {
+              root = "/var/lib/nginx/hgsrht/repos";
+              extraConfig = ''
+                auth_request /authorize;
+                gzip off;
+              '';
+            };
+          };
+          systemd.services.nginx = {
+            serviceConfig.BindReadOnlyPaths = [ "${cfg.settings."hg.sr.ht".repos}:/var/lib/nginx/hgsrht/repos" ];
+          };
+        })
+      ];
+    }))
+
+    (import ./service.nix "hub" {
+      inherit configIniOfService;
+      port = 5014;
+      extraConfig = {
+        services.nginx = mkIf cfg.nginx.enable {
+          virtualHosts."hub.${domain}" = mkMerge [ {
+            serverAliases = [ domain ];
+          } cfg.nginx.virtualHost ];
+        };
+      };
+    })
+
+    (import ./service.nix "lists" (let
+      srvsrht = "listssrht";
+      in {
+      inherit configIniOfService;
+      port = 5006;
+      webhooks = true;
+      extraServices.listssrht-api = {
+        serviceConfig.Restart = "always";
+        serviceConfig.RestartSec = "5s";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.listssrht}/bin/listssrht-api -b ${cfg.listenAddress}:${toString (cfg.lists.port + 100)}";
+      };
+      # Receive the mail from Postfix and enqueue them into Redis and PostgreSQL
+      extraServices.listssrht-lmtp = {
+        wants = [ "postfix.service" ];
+        unitConfig.JoinsNamespaceOf = optional cfg.postfix.enable "postfix.service";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.listssrht}/bin/listssrht-lmtp";
+        # Avoid crashing: os.chown(sock, os.getuid(), sock_gid)
+        serviceConfig.PrivateUsers = mkForce false;
+      };
+      # Dequeue the mails from Redis and dispatch them
+      extraServices.listssrht-process = {
+        serviceConfig = {
+          preStart = ''
+            cp ${pkgs.writeText "${srvsrht}-webhooks-celeryconfig.py" cfg.lists.process.celeryConfig} \
+               /run/sourcehut/${srvsrht}-webhooks/celeryconfig.py
+          '';
+          ExecStart = "${cfg.python}/bin/celery --app listssrht.process worker --hostname listssrht-process@%%h " + concatStringsSep " " cfg.lists.process.extraArgs;
+          # Avoid crashing: os.getloadavg()
+          ProcSubset = mkForce "all";
+        };
+      };
+      extraConfig = mkIf cfg.postfix.enable {
+        users.groups.${postfix.group}.members = [ cfg.lists.user ];
+        services.sourcehut.settings."lists.sr.ht::mail".sock-group = postfix.group;
+        services.postfix = {
+          destination = [ "lists.${domain}" ];
+          # FIXME: an accurate recipient list should be queried
+          # from the lists.sr.ht PostgreSQL database to avoid backscattering.
+          # But usernames are unfortunately not in that database but in meta.sr.ht.
+          # Note that two syntaxes are allowed:
+          # - ~username/list-name@lists.${domain}
+          # - u.username.list-name@lists.${domain}
+          localRecipients = [ "@lists.${domain}" ];
+          transport = ''
+            lists.${domain} lmtp:unix:${cfg.settings."lists.sr.ht::worker".sock}
+          '';
+        };
+      };
+    }))
+
+    (import ./service.nix "man" {
+      inherit configIniOfService;
+      port = 5004;
+    })
+
+    (import ./service.nix "meta" {
+      inherit configIniOfService;
+      port = 5000;
+      webhooks = true;
+      extraTimers.metasrht-daily.timerConfig = {
+        OnCalendar = ["daily"];
+        AccuracySec = "1h";
+      };
+      extraServices.metasrht-api = {
+        serviceConfig.Restart = "always";
+        serviceConfig.RestartSec = "5s";
+        preStart = "set -x\n" + concatStringsSep "\n\n" (attrValues (mapAttrs (k: s:
+          let srvMatch = builtins.match "^([a-z]*)\\.sr\\.ht$" k;
+              srv = head srvMatch;
+          in
+          # Configure client(s) as "preauthorized"
+          optionalString (srvMatch != null && cfg.${srv}.enable && ((s.oauth-client-id or null) != null)) ''
+            # Configure ${srv}'s OAuth client as "preauthorized"
+            ${postgresql.package}/bin/psql '${cfg.settings."meta.sr.ht".connection-string}' \
+              -c "UPDATE oauthclient SET preauthorized = true WHERE client_id = '${s.oauth-client-id}'"
+          ''
+          ) cfg.settings));
+        serviceConfig.ExecStart = "${pkgs.sourcehut.metasrht}/bin/metasrht-api -b ${cfg.listenAddress}:${toString (cfg.meta.port + 100)}";
+      };
+      extraConfig = {
+        assertions = [
+          { assertion = let s = cfg.settings."meta.sr.ht::billing"; in
+                        s.enabled == "yes" -> (s.stripe-public-key != null && s.stripe-secret-key != null);
+            message = "If meta.sr.ht::billing is enabled, the keys must be defined.";
+          }
+        ];
+        environment.systemPackages = optional cfg.meta.enable
+          (pkgs.writeShellScriptBin "metasrht-manageuser" ''
+            set -eux
+            if test "$(${pkgs.coreutils}/bin/id -n -u)" != '${cfg.meta.user}'
+            then exec sudo -u '${cfg.meta.user}' "$0" "$@"
+            else
+              # In order to load config.ini
+              if cd /run/sourcehut/metasrht
+              then exec ${pkgs.sourcehut.metasrht}/bin/metasrht-manageuser "$@"
+              else cat <<EOF
+                Please run: sudo systemctl start metasrht
+            EOF
+                exit 1
+              fi
+            fi
+          '');
+      };
+    })
+
+    (import ./service.nix "pages" {
+      inherit configIniOfService;
+      port = 5112;
+      mainService = let
+        srvsrht = "pagessrht";
+        version = pkgs.sourcehut.${srvsrht}.version;
+        stateDir = "/var/lib/sourcehut/${srvsrht}";
+        iniKey = "pages.sr.ht";
+        in {
+        preStart = mkBefore ''
+          set -x
+          # Use the /run/sourcehut/${srvsrht}/config.ini
+          # installed by a previous ExecStartPre= in baseService
+          cd /run/sourcehut/${srvsrht}
+
+          if test ! -e ${stateDir}/db; then
+            ${postgresql.package}/bin/psql '${cfg.settings.${iniKey}.connection-string}' -f ${pkgs.sourcehut.pagessrht}/share/sql/schema.sql
+            echo ${version} >${stateDir}/db
+          fi
+
+          ${optionalString cfg.settings.${iniKey}.migrate-on-upgrade ''
+            # Just try all the migrations because they're not linked to the version
+            for sql in ${pkgs.sourcehut.pagessrht}/share/sql/migrations/*.sql; do
+              ${postgresql.package}/bin/psql '${cfg.settings.${iniKey}.connection-string}' -f "$sql" || true
+            done
+          ''}
+
+          # Disable webhook
+          touch ${stateDir}/webhook
+        '';
+        serviceConfig = {
+          ExecStart = mkForce "${pkgs.sourcehut.pagessrht}/bin/pages.sr.ht -b ${cfg.listenAddress}:${toString cfg.pages.port}";
+        };
+      };
+    })
+
+    (import ./service.nix "paste" {
+      inherit configIniOfService;
+      port = 5011;
+    })
+
+    (import ./service.nix "todo" {
+      inherit configIniOfService;
+      port = 5003;
+      webhooks = true;
+      extraServices.todosrht-api = {
+        serviceConfig.Restart = "always";
+        serviceConfig.RestartSec = "5s";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.todosrht}/bin/todosrht-api -b ${cfg.listenAddress}:${toString (cfg.todo.port + 100)}";
+      };
+      extraServices.todosrht-lmtp = {
+        wants = [ "postfix.service" ];
+        unitConfig.JoinsNamespaceOf = optional cfg.postfix.enable "postfix.service";
+        serviceConfig.ExecStart = "${pkgs.sourcehut.todosrht}/bin/todosrht-lmtp";
+        # Avoid crashing: os.chown(sock, os.getuid(), sock_gid)
+        serviceConfig.PrivateUsers = mkForce false;
+      };
+      extraConfig = mkIf cfg.postfix.enable {
+        users.groups.${postfix.group}.members = [ cfg.todo.user ];
+        services.sourcehut.settings."todo.sr.ht::mail".sock-group = postfix.group;
+        services.postfix = {
+          destination = [ "todo.${domain}" ];
+          # FIXME: an accurate recipient list should be queried
+          # from the todo.sr.ht PostgreSQL database to avoid backscattering.
+          # But usernames are unfortunately not in that database but in meta.sr.ht.
+          # Note that two syntaxes are allowed:
+          # - ~username/tracker-name@todo.${domain}
+          # - u.username.tracker-name@todo.${domain}
+          localRecipients = [ "@todo.${domain}" ];
+          transport = ''
+            todo.${domain} lmtp:unix:${cfg.settings."todo.sr.ht::mail".sock}
+          '';
+        };
+      };
+    })
+
+    (mkRenamedOptionModule [ "services" "sourcehut" "originBase" ]
+                           [ "services" "sourcehut" "settings" "sr.ht" "global-domain" ])
+    (mkRenamedOptionModule [ "services" "sourcehut" "address" ]
+                           [ "services" "sourcehut" "listenAddress" ])
+
+    (mkRemovedOptionModule [ "services" "sourcehut" "dispatch" ] ''
+        dispatch is deprecated. See https://sourcehut.org/blog/2022-08-01-dispatch-deprecation-plans/
+        for more information.
+    '')
+
+    (mkRemovedOptionModule [ "services" "sourcehut" "services"] ''
+        This option was removed in favor of individual <service>.enable flags.
+    '')
+  ];
+
+  meta.doc = ./default.md;
+  meta.maintainers = with maintainers; [ tomberek ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sourcehut/service.nix b/nixpkgs/nixos/modules/services/misc/sourcehut/service.nix
new file mode 100644
index 000000000000..f08d5eb46871
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sourcehut/service.nix
@@ -0,0 +1,407 @@
+srv:
+{ configIniOfService
+, srvsrht ? "${srv}srht" # Because "buildsrht" does not follow that pattern (missing an "s").
+, iniKey ? "${srv}.sr.ht"
+, webhooks ? false
+, extraTimers ? {}
+, mainService ? {}
+, extraServices ? {}
+, extraConfig ? {}
+, port
+}:
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  inherit (config.services) postgresql;
+  redis = config.services.redis.servers."sourcehut-${srvsrht}";
+  inherit (config.users) users;
+  cfg = config.services.sourcehut;
+  configIni = configIniOfService srv;
+  srvCfg = cfg.${srv};
+  baseService = serviceName: { allowStripe ? false }: extraService: let
+    runDir = "/run/sourcehut/${serviceName}";
+    rootDir = "/run/sourcehut/chroots/${serviceName}";
+    in
+    mkMerge [ extraService {
+    after = [ "network.target" ] ++
+      optional cfg.postgresql.enable "postgresql.service" ++
+      optional cfg.redis.enable "redis-sourcehut-${srvsrht}.service";
+    requires =
+      optional cfg.postgresql.enable "postgresql.service" ++
+      optional cfg.redis.enable "redis-sourcehut-${srvsrht}.service";
+    path = [ pkgs.gawk ];
+    environment.HOME = runDir;
+    serviceConfig = {
+      User = mkDefault srvCfg.user;
+      Group = mkDefault srvCfg.group;
+      RuntimeDirectory = [
+        "sourcehut/${serviceName}"
+        # Used by *srht-keys which reads ../config.ini
+        "sourcehut/${serviceName}/subdir"
+        "sourcehut/chroots/${serviceName}"
+      ];
+      RuntimeDirectoryMode = "2750";
+      # No need for the chroot path once inside the chroot
+      InaccessiblePaths = [ "-+${rootDir}" ];
+      # g+rx is for group members (eg. fcgiwrap or nginx)
+      # to read Git/Mercurial repositories, buildlogs, etc.
+      # o+x is for intermediate directories created by BindPaths= and like,
+      # as they're owned by root:root.
+      UMask = "0026";
+      RootDirectory = rootDir;
+      RootDirectoryStartOnly = true;
+      PrivateTmp = true;
+      MountAPIVFS = true;
+      # config.ini is looked up in there, before /etc/srht/config.ini
+      # Note that it fails to be set in ExecStartPre=
+      WorkingDirectory = mkDefault ("-"+runDir);
+      BindReadOnlyPaths = [
+        builtins.storeDir
+        "/etc"
+        "/run/booted-system"
+        "/run/current-system"
+        "/run/systemd"
+        ] ++
+        optional cfg.postgresql.enable "/run/postgresql" ++
+        optional cfg.redis.enable "/run/redis-sourcehut-${srvsrht}";
+      # LoadCredential= are unfortunately not available in ExecStartPre=
+      # Hence this one is run as root (the +) with RootDirectoryStartOnly=
+      # to reach credentials wherever they are.
+      # Note that each systemd service gets its own ${runDir}/config.ini file.
+      ExecStartPre = mkBefore [("+"+pkgs.writeShellScript "${serviceName}-credentials" ''
+        set -x
+        # Replace values beginning with a '<' by the content of the file whose name is after.
+        gawk '{ if (match($0,/^([^=]+=)<(.+)/,m)) { getline f < m[2]; print m[1] f } else print $0 }' ${configIni} |
+        ${optionalString (!allowStripe) "gawk '!/^stripe-secret-key=/' |"}
+        install -o ${srvCfg.user} -g root -m 400 /dev/stdin ${runDir}/config.ini
+      '')];
+      # The following options are only for optimizing:
+      # systemd-analyze security
+      AmbientCapabilities = "";
+      CapabilityBoundingSet = "";
+      # ProtectClock= adds DeviceAllow=char-rtc r
+      DeviceAllow = "";
+      LockPersonality = true;
+      MemoryDenyWriteExecute = true;
+      NoNewPrivileges = true;
+      PrivateDevices = true;
+      PrivateMounts = true;
+      PrivateNetwork = mkDefault false;
+      PrivateUsers = true;
+      ProcSubset = "pid";
+      ProtectClock = true;
+      ProtectControlGroups = true;
+      ProtectHome = true;
+      ProtectHostname = true;
+      ProtectKernelLogs = true;
+      ProtectKernelModules = true;
+      ProtectKernelTunables = true;
+      ProtectProc = "invisible";
+      ProtectSystem = "strict";
+      RemoveIPC = true;
+      RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+      RestrictNamespaces = true;
+      RestrictRealtime = true;
+      RestrictSUIDSGID = true;
+      #SocketBindAllow = [ "tcp:${toString srvCfg.port}" "tcp:${toString srvCfg.prometheusPort}" ];
+      #SocketBindDeny = "any";
+      SystemCallFilter = [
+        "@system-service"
+        "~@aio" "~@keyring" "~@memlock" "~@privileged" "~@timer"
+        "@chown" "@setuid"
+      ];
+      SystemCallArchitectures = "native";
+    };
+  } ];
+in
+{
+  options.services.sourcehut.${srv} = {
+    enable = mkEnableOption (lib.mdDoc "${srv} service");
+
+    user = mkOption {
+      type = types.str;
+      default = srvsrht;
+      description = lib.mdDoc ''
+        User for ${srv}.sr.ht.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = srvsrht;
+      description = lib.mdDoc ''
+        Group for ${srv}.sr.ht.
+        Membership grants access to the Git/Mercurial repositories by default,
+        but not to the config.ini file (where secrets are).
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = port;
+      description = lib.mdDoc ''
+        Port on which the "${srv}" backend should listen.
+      '';
+    };
+
+    redis = {
+      host = mkOption {
+        type = types.str;
+        default = "unix:///run/redis-sourcehut-${srvsrht}/redis.sock?db=0";
+        example = "redis://shared.wireguard:6379/0";
+        description = lib.mdDoc ''
+          The redis host URL. This is used for caching and temporary storage, and must
+          be shared between nodes (e.g. git1.sr.ht and git2.sr.ht), but need not be
+          shared between services. It may be shared between services, however, with no
+          ill effect, if this better suits your infrastructure.
+        '';
+      };
+    };
+
+    postgresql = {
+      database = mkOption {
+        type = types.str;
+        default = "${srv}.sr.ht";
+        description = lib.mdDoc ''
+          PostgreSQL database name for the ${srv}.sr.ht service,
+          used if [](#opt-services.sourcehut.postgresql.enable) is `true`.
+        '';
+      };
+    };
+
+    gunicorn = {
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        default = ["--timeout 120" "--workers 1" "--log-level=info"];
+        description = lib.mdDoc "Extra arguments passed to Gunicorn.";
+      };
+    };
+  } // optionalAttrs webhooks {
+    webhooks = {
+      extraArgs = mkOption {
+        type = with types; listOf str;
+        default = ["--loglevel DEBUG" "--pool eventlet" "--without-heartbeat"];
+        description = lib.mdDoc "Extra arguments passed to the Celery responsible for webhooks.";
+      };
+      celeryConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Content of the `celeryconfig.py` used by the Celery responsible for webhooks.";
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.enable && srvCfg.enable) (mkMerge [ extraConfig {
+    users = {
+      users = {
+        "${srvCfg.user}" = {
+          isSystemUser = true;
+          group = mkDefault srvCfg.group;
+          description = mkDefault "sourcehut user for ${srv}.sr.ht";
+        };
+      };
+      groups = {
+        "${srvCfg.group}" = { };
+      } // optionalAttrs (cfg.postgresql.enable
+        && hasSuffix "0" (postgresql.settings.unix_socket_permissions or "")) {
+        "postgres".members = [ srvCfg.user ];
+      } // optionalAttrs (cfg.redis.enable
+        && hasSuffix "0" (redis.settings.unixsocketperm or "")) {
+        "redis-sourcehut-${srvsrht}".members = [ srvCfg.user ];
+      };
+    };
+
+    services.nginx = mkIf cfg.nginx.enable {
+      virtualHosts."${srv}.${cfg.settings."sr.ht".global-domain}" = mkMerge [ {
+        forceSSL = mkDefault true;
+        locations."/".proxyPass = "http://${cfg.listenAddress}:${toString srvCfg.port}";
+        locations."/static" = {
+          root = "${pkgs.sourcehut.${srvsrht}}/${pkgs.sourcehut.python.sitePackages}/${srvsrht}";
+          extraConfig = mkDefault ''
+            expires 30d;
+          '';
+        };
+        locations."/query" = mkIf (cfg.settings.${iniKey} ? api-origin) {
+          proxyPass = cfg.settings.${iniKey}.api-origin;
+          extraConfig = ''
+            add_header 'Access-Control-Allow-Origin' '*';
+            add_header 'Access-Control-Allow-Methods' 'GET, POST, OPTIONS';
+            add_header 'Access-Control-Allow-Headers' 'User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type,Range';
+
+            if ($request_method = 'OPTIONS') {
+              add_header 'Access-Control-Max-Age' 1728000;
+              add_header 'Content-Type' 'text/plain; charset=utf-8';
+              add_header 'Content-Length' 0;
+              return 204;
+            }
+
+            add_header 'Access-Control-Expose-Headers' 'Content-Length,Content-Range';
+          '';
+        };
+      } cfg.nginx.virtualHost ];
+    };
+
+    services.postgresql = mkIf cfg.postgresql.enable {
+      authentication = ''
+        local ${srvCfg.postgresql.database} ${srvCfg.user} trust
+      '';
+      ensureDatabases = [ srvCfg.postgresql.database ];
+      ensureUsers = map (name: {
+          inherit name;
+          # We don't use it because we have a special default database name with dots.
+          # TODO(for maintainers of sourcehut): migrate away from custom preStart script.
+          ensureDBOwnership = false;
+        }) [srvCfg.user];
+    };
+
+
+    services.sourcehut.settings = mkMerge [
+      {
+        "${srv}.sr.ht".origin = mkDefault "https://${srv}.${cfg.settings."sr.ht".global-domain}";
+      }
+
+      (mkIf cfg.postgresql.enable {
+        "${srv}.sr.ht".connection-string = mkDefault "postgresql:///${srvCfg.postgresql.database}?user=${srvCfg.user}&host=/run/postgresql";
+      })
+    ];
+
+    services.redis.servers."sourcehut-${srvsrht}" = mkIf cfg.redis.enable {
+      enable = true;
+      databases = 3;
+      syslog = true;
+      # TODO: set a more informed value
+      save = mkDefault [ [1800 10] [300 100] ];
+      settings = {
+        # TODO: set a more informed value
+        maxmemory = "128MB";
+        maxmemory-policy = "volatile-ttl";
+      };
+    };
+
+    systemd.services = mkMerge [
+      {
+        "${srvsrht}" = baseService srvsrht { allowStripe = srv == "meta"; } (mkMerge [
+        {
+          description = "sourcehut ${srv}.sr.ht website service";
+          before = optional cfg.nginx.enable "nginx.service";
+          wants = optional cfg.nginx.enable "nginx.service";
+          wantedBy = [ "multi-user.target" ];
+          path = optional cfg.postgresql.enable postgresql.package;
+          # Beware: change in credentials' content will not trigger restart.
+          restartTriggers = [ configIni ];
+          serviceConfig = {
+            Type = "simple";
+            Restart = mkDefault "always";
+            #RestartSec = mkDefault "2min";
+            StateDirectory = [ "sourcehut/${srvsrht}" ];
+            StateDirectoryMode = "2750";
+            ExecStart = "${cfg.python}/bin/gunicorn ${srvsrht}.app:app --name ${srvsrht} --bind ${cfg.listenAddress}:${toString srvCfg.port} " + concatStringsSep " " srvCfg.gunicorn.extraArgs;
+          };
+          preStart = let
+            version = pkgs.sourcehut.${srvsrht}.version;
+            stateDir = "/var/lib/sourcehut/${srvsrht}";
+            in mkBefore ''
+            set -x
+            # Use the /run/sourcehut/${srvsrht}/config.ini
+            # installed by a previous ExecStartPre= in baseService
+            cd /run/sourcehut/${srvsrht}
+
+            if test ! -e ${stateDir}/db; then
+              # Setup the initial database.
+              # Note that it stamps the alembic head afterward
+              ${cfg.python}/bin/${srvsrht}-initdb
+              echo ${version} >${stateDir}/db
+            fi
+
+            ${optionalString cfg.settings.${iniKey}.migrate-on-upgrade ''
+              if [ "$(cat ${stateDir}/db)" != "${version}" ]; then
+                # Manage schema migrations using alembic
+                ${cfg.python}/bin/${srvsrht}-migrate -a upgrade head
+                echo ${version} >${stateDir}/db
+              fi
+            ''}
+
+            # Update copy of each users' profile to the latest
+            # See https://lists.sr.ht/~sircmpwn/sr.ht-admins/<20190302181207.GA13778%40cirno.my.domain>
+            if test ! -e ${stateDir}/webhook; then
+              # Update ${iniKey}'s users' profile copy to the latest
+              ${cfg.python}/bin/srht-update-profiles ${iniKey}
+              touch ${stateDir}/webhook
+            fi
+          '';
+        } mainService ]);
+      }
+
+      (mkIf webhooks {
+        "${srvsrht}-webhooks" = baseService "${srvsrht}-webhooks" {}
+          {
+            description = "sourcehut ${srv}.sr.ht webhooks service";
+            after = [ "${srvsrht}.service" ];
+            wantedBy = [ "${srvsrht}.service" ];
+            partOf = [ "${srvsrht}.service" ];
+            preStart = ''
+              cp ${pkgs.writeText "${srvsrht}-webhooks-celeryconfig.py" srvCfg.webhooks.celeryConfig} \
+                 /run/sourcehut/${srvsrht}-webhooks/celeryconfig.py
+            '';
+            serviceConfig = {
+              Type = "simple";
+              Restart = "always";
+              ExecStart = "${cfg.python}/bin/celery --app ${srvsrht}.webhooks worker --hostname ${srvsrht}-webhooks@%%h " + concatStringsSep " " srvCfg.webhooks.extraArgs;
+              # Avoid crashing: os.getloadavg()
+              ProcSubset = mkForce "all";
+            };
+          };
+      })
+
+      (mapAttrs (timerName: timer: (baseService timerName {} (mkMerge [
+        {
+          description = "sourcehut ${timerName} service";
+          after = [ "network.target" "${srvsrht}.service" ];
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${cfg.python}/bin/${timerName}";
+          };
+        }
+        (timer.service or {})
+      ]))) extraTimers)
+
+      (mapAttrs (serviceName: extraService: baseService serviceName {} (mkMerge [
+        {
+          description = "sourcehut ${serviceName} service";
+          # So that extraServices have the PostgreSQL database initialized.
+          after = [ "${srvsrht}.service" ];
+          wantedBy = [ "${srvsrht}.service" ];
+          partOf = [ "${srvsrht}.service" ];
+          serviceConfig = {
+            Type = "simple";
+            Restart = mkDefault "always";
+          };
+        }
+        extraService
+      ])) extraServices)
+
+      # Work around 'pq: permission denied for schema public' with postgres v15.
+      # See https://github.com/NixOS/nixpkgs/issues/216989
+      # Workaround taken from nixos/forgejo: https://github.com/NixOS/nixpkgs/pull/262741
+      # TODO(to maintainers of sourcehut): please migrate away from this workaround
+      # by migrating away from database name defaults with dots.
+      (lib.mkIf (
+          cfg.postgresql.enable
+          && lib.strings.versionAtLeast config.services.postgresql.package.version "15.0"
+        ) {
+          postgresql.postStart = (lib.mkAfter ''
+            $PSQL -tAc 'ALTER DATABASE "${srvCfg.postgresql.database}" OWNER TO "${srvCfg.user}";'
+          '');
+        }
+      )
+    ];
+
+    systemd.timers = mapAttrs (timerName: timer:
+      {
+        description = "sourcehut timer for ${timerName}";
+        wantedBy = [ "timers.target" ];
+        inherit (timer) timerConfig;
+      }) extraTimers;
+  } ]);
+}
diff --git a/nixpkgs/nixos/modules/services/misc/spice-autorandr.nix b/nixpkgs/nixos/modules/services/misc/spice-autorandr.nix
new file mode 100644
index 000000000000..8437441c752a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/spice-autorandr.nix
@@ -0,0 +1,26 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.spice-autorandr;
+in
+{
+  options = {
+    services.spice-autorandr = {
+      enable = lib.mkEnableOption (lib.mdDoc "spice-autorandr service that will automatically resize display to match SPICE client window size.");
+      package = lib.mkPackageOptionMD pkgs "spice-autorandr" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.user.services.spice-autorandr = {
+      wantedBy = [ "default.target" ];
+      after = [ "spice-vdagentd.service" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/spice-autorandr";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix b/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix
new file mode 100644
index 000000000000..bde64847d89e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/spice-vdagentd.nix
@@ -0,0 +1,30 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.spice-vdagentd;
+in
+{
+  options = {
+    services.spice-vdagentd = {
+      enable = mkEnableOption (lib.mdDoc "Spice guest vdagent daemon");
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.spice-vdagent ];
+
+    systemd.services.spice-vdagentd = {
+      description = "spice-vdagent daemon";
+      wantedBy = [ "graphical.target" ];
+      preStart = ''
+        mkdir -p "/run/spice-vdagentd/"
+      '';
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.spice-vdagent}/bin/spice-vdagentd";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/spice-webdavd.nix b/nixpkgs/nixos/modules/services/misc/spice-webdavd.nix
new file mode 100644
index 000000000000..6c817e429ac6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/spice-webdavd.nix
@@ -0,0 +1,38 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.spice-webdavd;
+in
+{
+  options = {
+    services.spice-webdavd = {
+      enable = mkEnableOption (lib.mdDoc "the spice guest webdav proxy daemon");
+
+      package = mkOption {
+        default = pkgs.phodav;
+        defaultText = literalExpression "pkgs.phodav";
+        type = types.package;
+        description = lib.mdDoc "spice-webdavd provider package to use.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # ensure the webdav fs this exposes can actually be mounted
+    services.davfs2.enable = true;
+
+    # add the udev rule which starts the proxy when the spice socket is present
+    services.udev.packages = [ cfg.package ];
+
+    systemd.services.spice-webdavd = {
+      description = "spice-webdav proxy daemon";
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/spice-webdavd -p 9843";
+        Restart = "on-success";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sssd.nix b/nixpkgs/nixos/modules/services/misc/sssd.nix
new file mode 100644
index 000000000000..f83c82bbb7d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sssd.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.sssd;
+  nscd = config.services.nscd;
+
+  dataDir = "/var/lib/sssd";
+  settingsFile = "${dataDir}/sssd.conf";
+  settingsFileUnsubstituted = pkgs.writeText "${dataDir}/sssd-unsubstituted.conf" cfg.config;
+in {
+  options = {
+    services.sssd = {
+      enable = mkEnableOption (lib.mdDoc "the System Security Services Daemon");
+
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "Contents of {file}`sssd.conf`.";
+        default = ''
+          [sssd]
+          config_file_version = 2
+          services = nss, pam
+          domains = shadowutils
+
+          [nss]
+
+          [pam]
+
+          [domain/shadowutils]
+          id_provider = proxy
+          proxy_lib_name = files
+          auth_provider = proxy
+          proxy_pam_target = sssd-shadowutils
+          proxy_fast_alias = True
+        '';
+      };
+
+      sshAuthorizedKeysIntegration = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to make sshd look up authorized keys from SSS.
+          For this to work, the `ssh` SSS service must be enabled in the sssd configuration.
+        '';
+      };
+
+      kcm = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use SSS as a Kerberos Cache Manager (KCM).
+          Kerberos will be configured to cache credentials in SSS.
+        '';
+      };
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Environment file as defined in {manpage}`systemd.exec(5)`.
+
+          Secrets may be passed to the service without adding them to the world-readable
+          Nix store, by specifying placeholder variables as the option value in Nix and
+          setting these variables accordingly in the environment file.
+
+          ```
+            # snippet of sssd-related config
+            [domain/LDAP]
+            ldap_default_authtok = $SSSD_LDAP_DEFAULT_AUTHTOK
+          ```
+
+          ```
+            # contents of the environment file
+            SSSD_LDAP_DEFAULT_AUTHTOK=verysecretpassword
+          ```
+        '';
+      };
+    };
+  };
+  config = mkMerge [
+    (mkIf cfg.enable {
+      # For `sssctl` to work.
+      environment.etc."sssd/sssd.conf".source = settingsFile;
+      environment.etc."sssd/conf.d".source = "${dataDir}/conf.d";
+
+      systemd.services.sssd = {
+        description = "System Security Services Daemon";
+        wantedBy    = [ "multi-user.target" ];
+        before = [ "systemd-user-sessions.service" "nss-user-lookup.target" ];
+        after = [ "network-online.target" "nscd.service" ];
+        requires = [ "network-online.target" "nscd.service" ];
+        wants = [ "nss-user-lookup.target" ];
+        restartTriggers = [
+          config.environment.etc."nscd.conf".source
+          settingsFileUnsubstituted
+        ];
+        script = ''
+          export LDB_MODULES_PATH+="''${LDB_MODULES_PATH+:}${pkgs.ldb}/modules/ldb:${pkgs.sssd}/modules/ldb"
+          mkdir -p /var/lib/sss/{pubconf,db,mc,pipes,gpo_cache,secrets} /var/lib/sss/pipes/private /var/lib/sss/pubconf/krb5.include.d
+          ${pkgs.sssd}/bin/sssd -D -c ${settingsFile}
+        '';
+        serviceConfig = {
+          Type = "forking";
+          PIDFile = "/run/sssd.pid";
+          StateDirectory = baseNameOf dataDir;
+          # We cannot use LoadCredential here because it's not available in ExecStartPre
+          EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        };
+        preStart = ''
+          mkdir -p "${dataDir}/conf.d"
+          [ -f ${settingsFile} ] && rm -f ${settingsFile}
+          old_umask=$(umask)
+          umask 0177
+          ${pkgs.envsubst}/bin/envsubst \
+            -o ${settingsFile} \
+            -i ${settingsFileUnsubstituted}
+          umask $old_umask
+        '';
+      };
+
+      system.nssModules = [ pkgs.sssd ];
+      system.nssDatabases = {
+        group = [ "sss" ];
+        passwd = [ "sss" ];
+        services = [ "sss" ];
+        shadow = [ "sss" ];
+      };
+      services.dbus.packages = [ pkgs.sssd ];
+    })
+
+    (mkIf cfg.kcm {
+      systemd.services.sssd-kcm = {
+        description = "SSSD Kerberos Cache Manager";
+        requires = [ "sssd-kcm.socket" ];
+        serviceConfig = {
+          ExecStartPre = "-${pkgs.sssd}/bin/sssd --genconf-section=kcm";
+          ExecStart = "${pkgs.sssd}/libexec/sssd/sssd_kcm --uid 0 --gid 0";
+        };
+        restartTriggers = [
+          settingsFileUnsubstituted
+        ];
+      };
+      systemd.sockets.sssd-kcm = {
+        description = "SSSD Kerberos Cache Manager responder socket";
+        wantedBy = [ "sockets.target" ];
+        # Matches the default in MIT krb5 and Heimdal:
+        # https://github.com/krb5/krb5/blob/krb5-1.19.3-final/src/include/kcm.h#L43
+        listenStreams = [ "/var/run/.heim_org.h5l.kcm-socket" ];
+      };
+      krb5.libdefaults.default_ccache_name = "KCM:";
+    })
+
+    (mkIf cfg.sshAuthorizedKeysIntegration {
+    # Ugly: sshd refuses to start if a store path is given because /nix/store is group-writable.
+    # So indirect by a symlink.
+    environment.etc."ssh/authorized_keys_command" = {
+      mode = "0755";
+      text = ''
+        #!/bin/sh
+        exec ${pkgs.sssd}/bin/sss_ssh_authorizedkeys "$@"
+      '';
+    };
+    services.openssh.authorizedKeysCommand = "/etc/ssh/authorized_keys_command";
+    services.openssh.authorizedKeysCommandUser = "nobody";
+  })];
+
+  meta.maintainers = with maintainers; [ bbigras ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/subsonic.nix b/nixpkgs/nixos/modules/services/misc/subsonic.nix
new file mode 100644
index 000000000000..0862d5782595
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/subsonic.nix
@@ -0,0 +1,169 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.subsonic;
+  opt = options.services.subsonic;
+in {
+  options = {
+    services.subsonic = {
+      enable = mkEnableOption (lib.mdDoc "Subsonic daemon");
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/subsonic";
+        description = lib.mdDoc ''
+          The directory where Subsonic will create files.
+          Make sure it is writable.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          The host name or IP address on which to bind Subsonic.
+          Only relevant if you have multiple network interfaces and want
+          to make Subsonic available on only one of them. The default value
+          will bind Subsonic to all available network interfaces.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 4040;
+        description = lib.mdDoc ''
+          The port on which Subsonic will listen for
+          incoming HTTP traffic. Set to 0 to disable.
+        '';
+      };
+
+      httpsPort = mkOption {
+        type = types.port;
+        default = 0;
+        description = lib.mdDoc ''
+          The port on which Subsonic will listen for
+          incoming HTTPS traffic. Set to 0 to disable.
+        '';
+      };
+
+      contextPath = mkOption {
+        type = types.path;
+        default = "/";
+        description = lib.mdDoc ''
+          The context path, i.e., the last part of the Subsonic
+          URL. Typically '/' or '/subsonic'. Default '/'
+        '';
+      };
+
+      maxMemory = mkOption {
+        type = types.int;
+        default = 100;
+        description = lib.mdDoc ''
+          The memory limit (max Java heap size) in megabytes.
+          Default: 100
+        '';
+      };
+
+      defaultMusicFolder = mkOption {
+        type = types.path;
+        default = "/var/music";
+        description = lib.mdDoc ''
+          Configure Subsonic to use this folder for music.  This option
+          only has effect the first time Subsonic is started.
+        '';
+      };
+
+      defaultPodcastFolder = mkOption {
+        type = types.path;
+        default = "/var/music/Podcast";
+        description = lib.mdDoc ''
+          Configure Subsonic to use this folder for Podcasts.  This option
+          only has effect the first time Subsonic is started.
+        '';
+      };
+
+      defaultPlaylistFolder = mkOption {
+        type = types.path;
+        default = "/var/playlists";
+        description = lib.mdDoc ''
+          Configure Subsonic to use this folder for playlists.  This option
+          only has effect the first time Subsonic is started.
+        '';
+      };
+
+      transcoders = mkOption {
+        type = types.listOf types.path;
+        default = [ "${pkgs.ffmpeg.bin}/bin/ffmpeg" ];
+        defaultText = literalExpression ''[ "''${pkgs.ffmpeg.bin}/bin/ffmpeg" ]'';
+        description = lib.mdDoc ''
+          List of paths to transcoder executables that should be accessible
+          from Subsonic. Symlinks will be created to each executable inside
+          ''${config.${opt.home}}/transcoders.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.subsonic = {
+      description = "Personal media streamer";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        ${pkgs.jre8}/bin/java -Xmx${toString cfg.maxMemory}m \
+          -Dsubsonic.home=${cfg.home} \
+          -Dsubsonic.host=${cfg.listenAddress} \
+          -Dsubsonic.port=${toString cfg.port} \
+          -Dsubsonic.httpsPort=${toString cfg.httpsPort} \
+          -Dsubsonic.contextPath=${cfg.contextPath} \
+          -Dsubsonic.defaultMusicFolder=${cfg.defaultMusicFolder} \
+          -Dsubsonic.defaultPodcastFolder=${cfg.defaultPodcastFolder} \
+          -Dsubsonic.defaultPlaylistFolder=${cfg.defaultPlaylistFolder} \
+          -Djava.awt.headless=true \
+          -verbose:gc \
+          -jar ${pkgs.subsonic}/subsonic-booter-jar-with-dependencies.jar
+      '';
+
+      preStart = ''
+        # Formerly this module set cfg.home to /var/subsonic. Try to move
+        # /var/subsonic to cfg.home.
+        oldHome="/var/subsonic"
+        if [ "${cfg.home}" != "$oldHome" ] &&
+                ! [ -e "${cfg.home}" ] &&
+                [ -d "$oldHome" ] &&
+                [ $(${pkgs.coreutils}/bin/stat -c %u "$oldHome") -eq \
+                    ${toString config.users.users.subsonic.uid} ]; then
+            logger Moving "$oldHome" to "${cfg.home}"
+            ${pkgs.coreutils}/bin/mv -T "$oldHome" "${cfg.home}"
+        fi
+
+        # Install transcoders.
+        ${pkgs.coreutils}/bin/rm -rf ${cfg.home}/transcode ; \
+        ${pkgs.coreutils}/bin/mkdir -p ${cfg.home}/transcode ; \
+        ${pkgs.bash}/bin/bash -c ' \
+          for exe in "$@"; do \
+            ${pkgs.coreutils}/bin/ln -sf "$exe" ${cfg.home}/transcode; \
+          done' IGNORED_FIRST_ARG ${toString cfg.transcoders}
+      '';
+      serviceConfig = {
+        # Needed for Subsonic to find subsonic.war.
+        WorkingDirectory = "${pkgs.subsonic}";
+        Restart = "always";
+        User = "subsonic";
+        UMask = "0022";
+      };
+    };
+
+    users.users.subsonic = {
+      description = "Subsonic daemon user";
+      home = cfg.home;
+      createHome = true;
+      group = "subsonic";
+      uid = config.ids.uids.subsonic;
+    };
+
+    users.groups.subsonic.gid = config.ids.gids.subsonic;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/sundtek.nix b/nixpkgs/nixos/modules/services/misc/sundtek.nix
new file mode 100644
index 000000000000..e85d7c5b92b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sundtek.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sundtek;
+
+in
+{
+  options.services.sundtek = {
+    enable = mkEnableOption (lib.mdDoc "Sundtek driver");
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.sundtek ];
+
+    systemd.services.sundtek = {
+      description = "Sundtek driver";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = ''
+          ${pkgs.sundtek}/bin/mediasrv -d -v -p ${pkgs.sundtek}/bin ;\
+          ${pkgs.sundtek}/bin/mediaclient --start --wait-for-devices
+          '';
+        ExecStop = "${pkgs.sundtek}/bin/mediaclient --shutdown";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/svnserve.nix b/nixpkgs/nixos/modules/services/misc/svnserve.nix
new file mode 100644
index 000000000000..a0103641c650
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/svnserve.nix
@@ -0,0 +1,46 @@
+# SVN server
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.svnserve;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.svnserve = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable svnserve to serve Subversion repositories through the SVN protocol.";
+      };
+
+      svnBaseDir = mkOption {
+        type = types.str;
+        default = "/repos";
+        description = lib.mdDoc "Base directory from which Subversion repositories are accessed.";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.svnserve = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = "mkdir -p ${cfg.svnBaseDir}";
+      script = "${pkgs.subversion.out}/bin/svnserve -r ${cfg.svnBaseDir} -d --foreground --pid-file=/run/svnserve.pid";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/synergy.nix b/nixpkgs/nixos/modules/services/misc/synergy.nix
new file mode 100644
index 000000000000..0cbdc7599c0f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/synergy.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfgC = config.services.synergy.client;
+  cfgS = config.services.synergy.server;
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.synergy = {
+
+      # !!! All these option descriptions needs to be cleaned up.
+
+      client = {
+        enable = mkEnableOption (lib.mdDoc "the Synergy client (receive keyboard and mouse events from a Synergy server)");
+
+        screenName = mkOption {
+          default = "";
+          type = types.str;
+          description = lib.mdDoc ''
+            Use the given name instead of the hostname to identify
+            ourselves to the server.
+          '';
+        };
+        serverAddress = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            The server address is of the form: [hostname][:port].  The
+            hostname must be the address or hostname of the server.  The
+            port overrides the default port, 24800.
+          '';
+        };
+        autoStart = mkOption {
+          default = true;
+          type = types.bool;
+          description = lib.mdDoc "Whether the Synergy client should be started automatically.";
+        };
+      };
+
+      server = {
+        enable = mkEnableOption (lib.mdDoc "the Synergy server (send keyboard and mouse events)");
+
+        configFile = mkOption {
+          type = types.path;
+          default = "/etc/synergy-server.conf";
+          description = lib.mdDoc "The Synergy server configuration file.";
+        };
+        screenName = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            Use the given name instead of the hostname to identify
+            this screen in the configuration.
+          '';
+        };
+        address = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Address on which to listen for clients.";
+        };
+        autoStart = mkOption {
+          default = true;
+          type = types.bool;
+          description = lib.mdDoc "Whether the Synergy server should be started automatically.";
+        };
+        tls = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether TLS encryption should be used.
+
+              Using this requires a TLS certificate that can be
+              generated by starting the Synergy GUI once and entering
+              a valid product key.
+            '';
+          };
+
+          cert = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "~/.synergy/SSL/Synergy.pem";
+            description = lib.mdDoc "The TLS certificate to use for encryption.";
+          };
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf cfgC.enable {
+      systemd.user.services.synergy-client = {
+        after = [ "network.target" "graphical-session.target" ];
+        description = "Synergy client";
+        wantedBy = optional cfgC.autoStart "graphical-session.target";
+        path = [ pkgs.synergy ];
+        serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergyc -f ${optionalString (cfgC.screenName != "") "-n ${cfgC.screenName}"} ${cfgC.serverAddress}'';
+        serviceConfig.Restart = "on-failure";
+      };
+    })
+    (mkIf cfgS.enable {
+      systemd.user.services.synergy-server = {
+        after = [ "network.target" "graphical-session.target" ];
+        description = "Synergy server";
+        wantedBy = optional cfgS.autoStart "graphical-session.target";
+        path = [ pkgs.synergy ];
+        serviceConfig.ExecStart = ''${pkgs.synergy}/bin/synergys -c ${cfgS.configFile} -f${optionalString (cfgS.address != "") " -a ${cfgS.address}"}${optionalString (cfgS.screenName != "") " -n ${cfgS.screenName}"}${optionalString cfgS.tls.enable " --enable-crypto"}${optionalString (cfgS.tls.cert != null) (" --tls-cert ${cfgS.tls.cert}")}'';
+        serviceConfig.Restart = "on-failure";
+      };
+    })
+  ];
+
+}
+
+/* SYNERGY SERVER example configuration file
+section: screens
+  laptop:
+  dm:
+  win:
+end
+section: aliases
+    laptop:
+      192.168.5.5
+    dm:
+      192.168.5.78
+    win:
+      192.168.5.54
+end
+section: links
+   laptop:
+       left = dm
+   dm:
+       right = laptop
+       left = win
+  win:
+      right = dm
+end
+*/
diff --git a/nixpkgs/nixos/modules/services/misc/sysprof.nix b/nixpkgs/nixos/modules/services/misc/sysprof.nix
new file mode 100644
index 000000000000..25c5b0fabf61
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/sysprof.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, ... }:
+
+{
+  options = {
+    services.sysprof = {
+      enable = lib.mkEnableOption (lib.mdDoc "sysprof profiling daemon");
+    };
+  };
+
+  config = lib.mkIf config.services.sysprof.enable {
+    environment.systemPackages = [ pkgs.sysprof ];
+
+    services.dbus.packages = [ pkgs.sysprof ];
+
+    systemd.packages = [ pkgs.sysprof ];
+  };
+
+  meta.maintainers = pkgs.sysprof.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
new file mode 100644
index 000000000000..63d3e3d2a857
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/tandoor-recipes.nix
@@ -0,0 +1,145 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.tandoor-recipes;
+  pkg = cfg.package;
+
+  # SECRET_KEY through an env file
+  env = {
+    GUNICORN_CMD_ARGS = "--bind=${cfg.address}:${toString cfg.port}";
+    DEBUG = "0";
+    DEBUG_TOOLBAR = "0";
+    MEDIA_ROOT = "/var/lib/tandoor-recipes";
+  } // optionalAttrs (config.time.timeZone != null) {
+    TIMEZONE = config.time.timeZone;
+  } // (
+    lib.mapAttrs (_: toString) cfg.extraConfig
+  );
+
+  manage =
+    let
+      setupEnv = lib.concatStringsSep "\n" (mapAttrsToList (name: val: "export ${name}=\"${val}\"") env);
+    in
+    pkgs.writeShellScript "manage" ''
+      ${setupEnv}
+      exec ${pkg}/bin/tandoor-recipes "$@"
+    '';
+in
+{
+  meta.maintainers = with maintainers; [ ambroisie ];
+
+  options.services.tandoor-recipes = {
+    enable = mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable Tandoor Recipes.
+
+        When started, the Tandoor Recipes database is automatically created if
+        it doesn't exist and updated if the package has changed. Both tasks are
+        achieved by running a Django migration.
+
+        A script to manage the instance (by wrapping Django's manage.py) is linked to
+        `/var/lib/tandoor-recipes/tandoor-recipes-manage`.
+      '';
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Web interface address.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc "Web interface port.";
+    };
+
+    extraConfig = mkOption {
+      type = types.attrs;
+      default = { };
+      description = lib.mdDoc ''
+        Extra tandoor recipes config options.
+
+        See [the example dot-env file](https://raw.githubusercontent.com/vabene1111/recipes/master/.env.template)
+        for available options.
+      '';
+      example = {
+        ENABLE_SIGNUP = "1";
+      };
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.tandoor-recipes;
+      defaultText = literalExpression "pkgs.tandoor-recipes";
+      description = lib.mdDoc "The Tandoor Recipes package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.tandoor-recipes = {
+      description = "Tandoor Recipes server";
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg.python.pkgs.gunicorn}/bin/gunicorn recipes.wsgi
+        '';
+        Restart = "on-failure";
+
+        User = "tandoor_recipes";
+        DynamicUser = true;
+        StateDirectory = "tandoor-recipes";
+        WorkingDirectory = "/var/lib/tandoor-recipes";
+        RuntimeDirectory = "tandoor-recipes";
+
+        BindReadOnlyPaths = [
+          "${config.environment.etc."ssl/certs/ca-certificates.crt".source}:/etc/ssl/certs/ca-certificates.crt"
+          builtins.storeDir
+          "-/etc/resolv.conf"
+          "-/etc/nsswitch.conf"
+          "-/etc/hosts"
+          "-/etc/localtime"
+          "-/run/postgresql"
+        ];
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        # gunicorn needs setuid
+        SystemCallFilter = [ "@system-service" "~@privileged" "@resources" "@setuid" "@keyring" ];
+        UMask = "0066";
+      } // lib.optionalAttrs (cfg.port < 1024) {
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+      };
+
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        ln -sf ${manage} tandoor-recipes-manage
+
+        # Let django migrate the DB as needed
+        ${pkg}/bin/tandoor-recipes migrate
+      '';
+
+      environment = env // {
+        PYTHONPATH = "${pkg.python.pkgs.makePythonPath pkg.propagatedBuildInputs}:${pkg}/lib/tandoor-recipes";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/taskserver/default.md b/nixpkgs/nixos/modules/services/misc/taskserver/default.md
new file mode 100644
index 000000000000..ee3b3908e2ae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/taskserver/default.md
@@ -0,0 +1,93 @@
+# Taskserver {#module-services-taskserver}
+
+Taskserver is the server component of
+[Taskwarrior](https://taskwarrior.org/), a free and
+open source todo list application.
+
+*Upstream documentation:* <https://taskwarrior.org/docs/#taskd>
+
+## Configuration {#module-services-taskserver-configuration}
+
+Taskserver does all of its authentication via TLS using client certificates,
+so you either need to roll your own CA or purchase a certificate from a
+known CA, which allows creation of client certificates. These certificates
+are usually advertised as "server certificates".
+
+So in order to make it easier to handle your own CA, there is a helper tool
+called {command}`nixos-taskserver` which manages the custom CA along
+with Taskserver organisations, users and groups.
+
+While the client certificates in Taskserver only authenticate whether a user
+is allowed to connect, every user has its own UUID which identifies it as an
+entity.
+
+With {command}`nixos-taskserver` the client certificate is created
+along with the UUID of the user, so it handles all of the credentials needed
+in order to setup the Taskwarrior client to work with a Taskserver.
+
+## The nixos-taskserver tool {#module-services-taskserver-nixos-taskserver-tool}
+
+Because Taskserver by default only provides scripts to setup users
+imperatively, the {command}`nixos-taskserver` tool is used for
+addition and deletion of organisations along with users and groups defined
+by [](#opt-services.taskserver.organisations) and as well for
+imperative set up.
+
+The tool is designed to not interfere if the command is used to manually set
+up some organisations, users or groups.
+
+For example if you add a new organisation using {command}`nixos-taskserver
+org add foo`, the organisation is not modified and deleted no
+matter what you define in
+{option}`services.taskserver.organisations`, even if you're adding
+the same organisation in that option.
+
+The tool is modelled to imitate the official {command}`taskd`
+command, documentation for each subcommand can be shown by using the
+{option}`--help` switch.
+
+## Declarative/automatic CA management {#module-services-taskserver-declarative-ca-management}
+
+Everything is done according to what you specify in the module options,
+however in order to set up a Taskwarrior client for synchronisation with a
+Taskserver instance, you have to transfer the keys and certificates to the
+client machine.
+
+This is done using {command}`nixos-taskserver user export $orgname
+$username` which is printing a shell script fragment to stdout
+which can either be used verbatim or adjusted to import the user on the
+client machine.
+
+For example, let's say you have the following configuration:
+```ShellSession
+{
+  services.taskserver.enable = true;
+  services.taskserver.fqdn = "server";
+  services.taskserver.listenHost = "::";
+  services.taskserver.organisations.my-company.users = [ "alice" ];
+}
+```
+This creates an organisation called `my-company` with the
+user `alice`.
+
+Now in order to import the `alice` user to another machine
+`alicebox`, all we need to do is something like this:
+```ShellSession
+$ ssh server nixos-taskserver user export my-company alice | sh
+```
+Of course, if no SSH daemon is available on the server you can also copy
+&amp; paste it directly into a shell.
+
+After this step the user should be set up and you can start synchronising
+your tasks for the first time with {command}`task sync init` on
+`alicebox`.
+
+Subsequent synchronisation requests merely require the command {command}`task
+sync` after that stage.
+
+## Manual CA management {#module-services-taskserver-manual-ca-management}
+
+If you set any options within
+[service.taskserver.pki.manual](#opt-services.taskserver.pki.manual.ca.cert).*,
+{command}`nixos-taskserver` won't issue certificates, but you can
+still use it for adding or removing user accounts.
diff --git a/nixpkgs/nixos/modules/services/misc/taskserver/default.nix b/nixpkgs/nixos/modules/services/misc/taskserver/default.nix
new file mode 100644
index 000000000000..775b3b6d2eae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/taskserver/default.nix
@@ -0,0 +1,570 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.taskserver;
+
+  taskd = "${pkgs.taskserver}/bin/taskd";
+
+  mkManualPkiOption = desc: mkOption {
+    type = types.nullOr types.path;
+    default = null;
+    description = lib.mdDoc ''
+      ${desc}
+
+      ::: {.note}
+      Setting this option will prevent automatic CA creation and handling.
+      :::
+    '';
+  };
+
+  manualPkiOptions = {
+    ca.cert = mkManualPkiOption ''
+      Fully qualified path to the CA certificate.
+    '';
+
+    server.cert = mkManualPkiOption ''
+      Fully qualified path to the server certificate.
+    '';
+
+    server.crl = mkManualPkiOption ''
+      Fully qualified path to the server certificate revocation list.
+    '';
+
+    server.key = mkManualPkiOption ''
+      Fully qualified path to the server key.
+    '';
+  };
+
+  mkAutoDesc = preamble: lib.mdDoc ''
+    ${preamble}
+
+    ::: {.note}
+    This option is for the automatically handled CA and will be ignored if any
+    of the {option}`services.taskserver.pki.manual.*` options are set.
+    :::
+  '';
+
+  mkExpireOption = desc: mkOption {
+    type = types.nullOr types.int;
+    default = null;
+    example = 365;
+    apply = val: if val == null then -1 else val;
+    description = mkAutoDesc ''
+      The expiration time of ${desc} in days or `null` for no
+      expiration time.
+    '';
+  };
+
+  autoPkiOptions = {
+    bits = mkOption {
+      type = types.int;
+      default = 4096;
+      example = 2048;
+      description = mkAutoDesc "The bit size for generated keys.";
+    };
+
+    expiration = {
+      ca = mkExpireOption "the CA certificate";
+      server = mkExpireOption "the server certificate";
+      client = mkExpireOption "client certificates";
+      crl = mkExpireOption "the certificate revocation list (CRL)";
+    };
+  };
+
+  needToCreateCA = let
+    notFound = path: let
+      dotted = concatStringsSep "." path;
+    in throw "Can't find option definitions for path `${dotted}'.";
+    findPkiDefinitions = path: attrs: let
+      mkSublist = key: val: let
+        newPath = path ++ singleton key;
+      in if isOption val
+         then attrByPath newPath (notFound newPath) cfg.pki.manual
+         else findPkiDefinitions newPath val;
+    in flatten (mapAttrsToList mkSublist attrs);
+  in all (x: x == null) (findPkiDefinitions [] manualPkiOptions);
+
+  orgOptions = { ... }: {
+    options.users = mkOption {
+      type = types.uniq (types.listOf types.str);
+      default = [];
+      example = [ "alice" "bob" ];
+      description = lib.mdDoc ''
+        A list of user names that belong to the organization.
+      '';
+    };
+
+    options.groups = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "workers" "slackers" ];
+      description = lib.mdDoc ''
+        A list of group names that belong to the organization.
+      '';
+    };
+  };
+
+  certtool = "${pkgs.gnutls.bin}/bin/certtool";
+
+  nixos-taskserver = with pkgs.python3.pkgs; buildPythonApplication {
+    name = "nixos-taskserver";
+
+    src = pkgs.runCommand "nixos-taskserver-src" { preferLocalBuild = true; } ''
+      mkdir -p "$out"
+      cat "${pkgs.substituteAll {
+        src = ./helper-tool.py;
+        inherit taskd certtool;
+        inherit (cfg) dataDir user group fqdn;
+        certBits = cfg.pki.auto.bits;
+        clientExpiration = cfg.pki.auto.expiration.client;
+        crlExpiration = cfg.pki.auto.expiration.crl;
+        isAutoConfig = if needToCreateCA then "True" else "False";
+      }}" > "$out/main.py"
+      cat > "$out/setup.py" <<EOF
+      from setuptools import setup
+      setup(name="nixos-taskserver",
+            py_modules=["main"],
+            install_requires=["Click"],
+            entry_points="[console_scripts]\\nnixos-taskserver=main:cli")
+      EOF
+    '';
+
+    propagatedBuildInputs = [ click ];
+  };
+
+in {
+  options = {
+    services.taskserver = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = let
+          url = "https://nixos.org/manual/nixos/stable/index.html#module-services-taskserver";
+        in lib.mdDoc ''
+          Whether to enable the Taskwarrior server.
+
+          More instructions about NixOS in conjunction with Taskserver can be
+          found [in the NixOS manual](${url}).
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "taskd";
+        description = lib.mdDoc "User for Taskserver.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "taskd";
+        description = lib.mdDoc "Group for Taskserver.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/taskserver";
+        description = lib.mdDoc "Data directory for Taskserver.";
+      };
+
+      ciphers = mkOption {
+        type = types.nullOr (types.separatedString ":");
+        default = null;
+        example = "NORMAL:-VERS-SSL3.0";
+        description = let
+          url = "https://gnutls.org/manual/html_node/Priority-Strings.html";
+        in lib.mdDoc ''
+          List of GnuTLS ciphers to use. See the GnuTLS documentation about
+          priority strings at <${url}> for full details.
+        '';
+      };
+
+      organisations = mkOption {
+        type = types.attrsOf (types.submodule orgOptions);
+        default = {};
+        example.myShinyOrganisation.users = [ "alice" "bob" ];
+        example.myShinyOrganisation.groups = [ "staff" "outsiders" ];
+        example.yetAnotherOrganisation.users = [ "foo" "bar" ];
+        description = lib.mdDoc ''
+          An attribute set where the keys name the organisation and the values
+          are a set of lists of {option}`users` and
+          {option}`groups`.
+        '';
+      };
+
+      confirmation = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Determines whether certain commands are confirmed.
+        '';
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Logs debugging information.
+        '';
+      };
+
+      extensions = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Fully qualified path of the Taskserver extension scripts.
+          Currently there are none.
+        '';
+      };
+
+      ipLog = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Logs the IP addresses of incoming requests.
+        '';
+      };
+
+      queueSize = mkOption {
+        type = types.int;
+        default = 10;
+        description = lib.mdDoc ''
+          Size of the connection backlog, see {manpage}`listen(2)`.
+        '';
+      };
+
+      requestLimit = mkOption {
+        type = types.int;
+        default = 1048576;
+        description = lib.mdDoc ''
+          Size limit of incoming requests, in bytes.
+        '';
+      };
+
+      allowedClientIDs = mkOption {
+        type = with types; either str (listOf str);
+        default = [];
+        example = [ "[Tt]ask [2-9]+" ];
+        description = lib.mdDoc ''
+          A list of regular expressions that are matched against the reported
+          client id (such as `task 2.3.0`).
+
+          The values `all` or `none` have
+          special meaning. Overridden by any entry in the option
+          {option}`services.taskserver.disallowedClientIDs`.
+        '';
+      };
+
+      disallowedClientIDs = mkOption {
+        type = with types; either str (listOf str);
+        default = [];
+        example = [ "[Tt]ask [2-9]+" ];
+        description = lib.mdDoc ''
+          A list of regular expressions that are matched against the reported
+          client id (such as `task 2.3.0`).
+
+          The values `all` or `none` have
+          special meaning. Any entry here overrides those in
+          {option}`services.taskserver.allowedClientIDs`.
+        '';
+      };
+
+      listenHost = mkOption {
+        type = types.str;
+        default = "localhost";
+        example = "::";
+        description = lib.mdDoc ''
+          The address (IPv4, IPv6 or DNS) to listen on.
+        '';
+      };
+
+      listenPort = mkOption {
+        type = types.int;
+        default = 53589;
+        description = lib.mdDoc ''
+          Port number of the Taskserver.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the firewall for the specified Taskserver port.
+        '';
+      };
+
+      fqdn = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          The fully qualified domain name of this server, which is also used
+          as the common name in the certificates.
+        '';
+      };
+
+      trust = mkOption {
+        type = types.enum [ "allow all" "strict" ];
+        default = "strict";
+        description = lib.mdDoc ''
+          Determines how client certificates are validated.
+
+          The value `allow all` performs no client
+          certificate validation. This is not recommended. The value
+          `strict` causes the client certificate to be
+          validated against a CA.
+        '';
+      };
+
+      pki.manual = manualPkiOptions;
+      pki.auto = autoPkiOptions;
+
+      config = mkOption {
+        type = types.attrs;
+        example.client.cert = "/tmp/debugging.cert";
+        description = lib.mdDoc ''
+          Configuration options to pass to Taskserver.
+
+          The options here are the same as described in
+          {manpage}`taskdrc(5)`, but with one difference:
+
+          The `server` option is
+          `server.listen` here, because the
+          `server` option would collide with other options
+          like `server.cert` and we would run in a type error
+          (attribute set versus string).
+
+          Nix types like integers or booleans are automatically converted to
+          the right values Taskserver would expect.
+        '';
+        apply = let
+          mkKey = path: if path == ["server" "listen"] then "server"
+                        else concatStringsSep "." path;
+          recurse = path: attrs: let
+            mapper = name: val: let
+              newPath = path ++ [ name ];
+              scalar = if val == true then "true"
+                       else if val == false then "false"
+                       else toString val;
+            in if isAttrs val then recurse newPath val
+               else [ "${mkKey newPath}=${scalar}" ];
+          in concatLists (mapAttrsToList mapper attrs);
+        in recurse [];
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "taskserver" "extraConfig"] ''
+      This option was removed in favor of `services.taskserver.config` with
+      different semantics (it's now a list of attributes instead of lines).
+
+      Please look up the documentation of `services.taskserver.config' to get
+      more information about the new way to pass additional configuration
+      options.
+    '')
+  ];
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      environment.systemPackages = [ nixos-taskserver ];
+
+      users.users = optionalAttrs (cfg.user == "taskd") {
+        taskd = {
+          uid = config.ids.uids.taskd;
+          description = "Taskserver user";
+          group = cfg.group;
+        };
+      };
+
+      users.groups = optionalAttrs (cfg.group == "taskd") {
+        taskd.gid = config.ids.gids.taskd;
+      };
+
+      services.taskserver.config = {
+        # systemd related
+        daemon = false;
+        log = "-";
+
+        # logging
+        debug = cfg.debug;
+        ip.log = cfg.ipLog;
+
+        # general
+        ciphers = cfg.ciphers;
+        confirmation = cfg.confirmation;
+        extensions = cfg.extensions;
+        queue.size = cfg.queueSize;
+        request.limit = cfg.requestLimit;
+
+        # client
+        client.allow = cfg.allowedClientIDs;
+        client.deny = cfg.disallowedClientIDs;
+
+        # server
+        trust = cfg.trust;
+        server = {
+          listen = "${cfg.listenHost}:${toString cfg.listenPort}";
+        } // (if needToCreateCA then {
+          cert = "${cfg.dataDir}/keys/server.cert";
+          key = "${cfg.dataDir}/keys/server.key";
+          crl = "${cfg.dataDir}/keys/server.crl";
+        } else {
+          cert = "${cfg.pki.manual.server.cert}";
+          key = "${cfg.pki.manual.server.key}";
+          ${mapNullable (_: "crl") cfg.pki.manual.server.crl} = "${cfg.pki.manual.server.crl}";
+        });
+
+        ca.cert = if needToCreateCA then "${cfg.dataDir}/keys/ca.cert"
+                  else "${cfg.pki.manual.ca.cert}";
+      };
+
+      systemd.services.taskserver-init = {
+        wantedBy = [ "taskserver.service" ];
+        before = [ "taskserver.service" ];
+        description = "Initialize Taskserver Data Directory";
+
+        preStart = ''
+          mkdir -m 0770 -p "${cfg.dataDir}"
+          chown "${cfg.user}:${cfg.group}" "${cfg.dataDir}"
+        '';
+
+        script = ''
+          ${taskd} init
+          touch "${cfg.dataDir}/.is_initialized"
+        '';
+
+        environment.TASKDDATA = cfg.dataDir;
+
+        unitConfig.ConditionPathExists = "!${cfg.dataDir}/.is_initialized";
+
+        serviceConfig.Type = "oneshot";
+        serviceConfig.User = cfg.user;
+        serviceConfig.Group = cfg.group;
+        serviceConfig.PermissionsStartOnly = true;
+        serviceConfig.PrivateNetwork = true;
+        serviceConfig.PrivateDevices = true;
+        serviceConfig.PrivateTmp = true;
+      };
+
+      systemd.services.taskserver = {
+        description = "Taskwarrior Server";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        environment.TASKDDATA = cfg.dataDir;
+
+        preStart = let
+          jsonOrgs = builtins.toJSON cfg.organisations;
+          jsonFile = pkgs.writeText "orgs.json" jsonOrgs;
+          helperTool = "${nixos-taskserver}/bin/nixos-taskserver";
+        in "${helperTool} process-json '${jsonFile}'";
+
+        serviceConfig = {
+          ExecStart = let
+            mkCfgFlag = flag: escapeShellArg "--${flag}";
+            cfgFlags = concatMapStringsSep " " mkCfgFlag cfg.config;
+          in "@${taskd} taskd server ${cfgFlags}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -USR1 $MAINPID";
+          Restart = "on-failure";
+          PermissionsStartOnly = true;
+          PrivateTmp = true;
+          PrivateDevices = true;
+          User = cfg.user;
+          Group = cfg.group;
+        };
+      };
+    })
+    (mkIf (cfg.enable && needToCreateCA) {
+      systemd.services.taskserver-ca = {
+        wantedBy = [ "taskserver.service" ];
+        after = [ "taskserver-init.service" ];
+        before = [ "taskserver.service" ];
+        description = "Initialize CA for TaskServer";
+        serviceConfig.Type = "oneshot";
+        serviceConfig.UMask = "0077";
+        serviceConfig.PrivateNetwork = true;
+        serviceConfig.PrivateTmp = true;
+
+        script = ''
+          silent_certtool() {
+            if ! output="$("${certtool}" "$@" 2>&1)"; then
+              echo "GNUTLS certtool invocation failed with output:" >&2
+              echo "$output" >&2
+            fi
+          }
+
+          mkdir -m 0700 -p "${cfg.dataDir}/keys"
+          chown root:root "${cfg.dataDir}/keys"
+
+          if [ ! -e "${cfg.dataDir}/keys/ca.key" ]; then
+            silent_certtool -p \
+              --bits ${toString cfg.pki.auto.bits} \
+              --outfile "${cfg.dataDir}/keys/ca.key"
+            silent_certtool -s \
+              --template "${pkgs.writeText "taskserver-ca.template" ''
+                cn = ${cfg.fqdn}
+                expiration_days = ${toString cfg.pki.auto.expiration.ca}
+                cert_signing_key
+                ca
+              ''}" \
+              --load-privkey "${cfg.dataDir}/keys/ca.key" \
+              --outfile "${cfg.dataDir}/keys/ca.cert"
+
+            chgrp "${cfg.group}" "${cfg.dataDir}/keys/ca.cert"
+            chmod g+r "${cfg.dataDir}/keys/ca.cert"
+          fi
+
+          if [ ! -e "${cfg.dataDir}/keys/server.key" ]; then
+            silent_certtool -p \
+              --bits ${toString cfg.pki.auto.bits} \
+              --outfile "${cfg.dataDir}/keys/server.key"
+
+            silent_certtool -c \
+              --template "${pkgs.writeText "taskserver-cert.template" ''
+                cn = ${cfg.fqdn}
+                expiration_days = ${toString cfg.pki.auto.expiration.server}
+                tls_www_server
+                encryption_key
+                signing_key
+              ''}" \
+              --load-ca-privkey "${cfg.dataDir}/keys/ca.key" \
+              --load-ca-certificate "${cfg.dataDir}/keys/ca.cert" \
+              --load-privkey "${cfg.dataDir}/keys/server.key" \
+              --outfile "${cfg.dataDir}/keys/server.cert"
+
+            chgrp "${cfg.group}" \
+              "${cfg.dataDir}/keys/server.key" \
+              "${cfg.dataDir}/keys/server.cert"
+
+            chmod g+r \
+              "${cfg.dataDir}/keys/server.key" \
+              "${cfg.dataDir}/keys/server.cert"
+          fi
+
+          if [ ! -e "${cfg.dataDir}/keys/server.crl" ]; then
+            silent_certtool --generate-crl \
+              --template "${pkgs.writeText "taskserver-crl.template" ''
+                expiration_days = ${toString cfg.pki.auto.expiration.crl}
+              ''}" \
+              --load-ca-privkey "${cfg.dataDir}/keys/ca.key" \
+              --load-ca-certificate "${cfg.dataDir}/keys/ca.cert" \
+              --outfile "${cfg.dataDir}/keys/server.crl"
+
+            chgrp "${cfg.group}" "${cfg.dataDir}/keys/server.crl"
+            chmod g+r "${cfg.dataDir}/keys/server.crl"
+          fi
+
+          chmod go+x "${cfg.dataDir}/keys"
+        '';
+      };
+    })
+    (mkIf (cfg.enable && cfg.openFirewall) {
+      networking.firewall.allowedTCPPorts = [ cfg.listenPort ];
+    })
+  ];
+
+  meta.doc = ./default.md;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/taskserver/helper-tool.py b/nixpkgs/nixos/modules/services/misc/taskserver/helper-tool.py
new file mode 100644
index 000000000000..fec05728b2b6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/taskserver/helper-tool.py
@@ -0,0 +1,688 @@
+import grp
+import json
+import pwd
+import os
+import re
+import string
+import subprocess
+import sys
+
+from contextlib import contextmanager
+from shutil import rmtree
+from tempfile import NamedTemporaryFile
+
+import click
+
+IS_AUTO_CONFIG = @isAutoConfig@ # NOQA
+CERTTOOL_COMMAND = "@certtool@"
+CERT_BITS = "@certBits@"
+CLIENT_EXPIRATION = "@clientExpiration@"
+CRL_EXPIRATION = "@crlExpiration@"
+
+TASKD_COMMAND = "@taskd@"
+TASKD_DATA_DIR = "@dataDir@"
+TASKD_USER = "@user@"
+TASKD_GROUP = "@group@"
+FQDN = "@fqdn@"
+
+CA_KEY = os.path.join(TASKD_DATA_DIR, "keys", "ca.key")
+CA_CERT = os.path.join(TASKD_DATA_DIR, "keys", "ca.cert")
+CRL_FILE = os.path.join(TASKD_DATA_DIR, "keys", "server.crl")
+
+RE_CONFIGUSER = re.compile(r'^\s*user\s*=(.*)$')
+RE_USERKEY = re.compile(r'New user key: (.+)$', re.MULTILINE)
+
+
+def lazyprop(fun):
+    """
+    Decorator which only evaluates the specified function when accessed.
+    """
+    name = '_lazy_' + fun.__name__
+
+    @property
+    def _lazy(self):
+        val = getattr(self, name, None)
+        if val is None:
+            val = fun(self)
+            setattr(self, name, val)
+        return val
+
+    return _lazy
+
+
+class TaskdError(OSError):
+    pass
+
+
+def run_as_taskd_user():
+    uid = pwd.getpwnam(TASKD_USER).pw_uid
+    gid = grp.getgrnam(TASKD_GROUP).gr_gid
+    os.setgid(gid)
+    os.setuid(uid)
+
+
+def taskd_cmd(cmd, *args, **kwargs):
+    """
+    Invoke taskd with the specified command with the privileges of the 'taskd'
+    user and 'taskd' group.
+
+    If 'capture_stdout' is passed as a keyword argument with the value True,
+    the return value are the contents the command printed to stdout.
+    """
+    capture_stdout = kwargs.pop("capture_stdout", False)
+    fun = subprocess.check_output if capture_stdout else subprocess.check_call
+    return fun(
+        [TASKD_COMMAND, cmd, "--data", TASKD_DATA_DIR] + list(args),
+        preexec_fn=run_as_taskd_user,
+        **kwargs
+    )
+
+
+def certtool_cmd(*args, **kwargs):
+    """
+    Invoke certtool from GNUTLS and return the output of the command.
+
+    The provided arguments are added to the certtool command and keyword
+    arguments are added to subprocess.check_output().
+
+    Note that this will suppress all output of certtool and it will only be
+    printed whenever there is an unsuccessful return code.
+    """
+    return subprocess.check_output(
+        [CERTTOOL_COMMAND] + list(args),
+        preexec_fn=lambda: os.umask(0o077),
+        stderr=subprocess.STDOUT,
+        **kwargs
+    )
+
+
+def label(msg):
+    if sys.stdout.isatty() or sys.stderr.isatty():
+        sys.stderr.write(msg + "\n")
+
+
+def mkpath(*args):
+    return os.path.join(TASKD_DATA_DIR, "orgs", *args)
+
+
+def mark_imperative(*path):
+    """
+    Mark the specified path as being imperatively managed by creating an empty
+    file called ".imperative", so that it doesn't interfere with the
+    declarative configuration.
+    """
+    open(os.path.join(mkpath(*path), ".imperative"), 'a').close()
+
+
+def is_imperative(*path):
+    """
+    Check whether the given path is marked as imperative, see mark_imperative()
+    for more information.
+    """
+    full_path = []
+    for component in path:
+        full_path.append(component)
+        if os.path.exists(os.path.join(mkpath(*full_path), ".imperative")):
+            return True
+    return False
+
+
+def fetch_username(org, key):
+    for line in open(mkpath(org, "users", key, "config"), "r"):
+        match = RE_CONFIGUSER.match(line)
+        if match is None:
+            continue
+        return match.group(1).strip()
+    return None
+
+
+@contextmanager
+def create_template(contents):
+    """
+    Generate a temporary file with the specified contents as a list of strings
+    and yield its path as the context.
+    """
+    template = NamedTemporaryFile(mode="w", prefix="certtool-template")
+    template.writelines(map(lambda l: l + "\n", contents))
+    template.flush()
+    yield template.name
+    template.close()
+
+
+def generate_key(org, user):
+    if not IS_AUTO_CONFIG:
+        msg = "Automatic PKI handling is disabled, you need to " \
+              "manually issue a client certificate for user {}.\n"
+        sys.stderr.write(msg.format(user))
+        return
+
+    basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user)
+    if os.path.exists(basedir):
+        raise OSError("Keyfile directory for {} already exists.".format(user))
+
+    privkey = os.path.join(basedir, "private.key")
+    pubcert = os.path.join(basedir, "public.cert")
+
+    try:
+        os.makedirs(basedir, mode=0o700)
+
+        certtool_cmd("-p", "--bits", CERT_BITS, "--outfile", privkey)
+
+        template_data = [
+            "organization = {0}".format(org),
+            "cn = {}".format(FQDN),
+            "expiration_days = {}".format(CLIENT_EXPIRATION),
+            "tls_www_client",
+            "encryption_key",
+            "signing_key"
+        ]
+
+        with create_template(template_data) as template:
+            certtool_cmd(
+                "-c",
+                "--load-privkey", privkey,
+                "--load-ca-privkey", CA_KEY,
+                "--load-ca-certificate", CA_CERT,
+                "--template", template,
+                "--outfile", pubcert
+            )
+    except:
+        rmtree(basedir)
+        raise
+
+
+def revoke_key(org, user):
+    basedir = os.path.join(TASKD_DATA_DIR, "keys", org, user)
+    if not os.path.exists(basedir):
+        raise OSError("Keyfile directory for {} doesn't exist.".format(user))
+
+    pubcert = os.path.join(basedir, "public.cert")
+
+    expiration = "expiration_days = {}".format(CRL_EXPIRATION)
+
+    with create_template([expiration]) as template:
+        oldcrl = NamedTemporaryFile(mode="wb", prefix="old-crl")
+        oldcrl.write(open(CRL_FILE, "rb").read())
+        oldcrl.flush()
+        certtool_cmd(
+            "--generate-crl",
+            "--load-crl", oldcrl.name,
+            "--load-ca-privkey", CA_KEY,
+            "--load-ca-certificate", CA_CERT,
+            "--load-certificate", pubcert,
+            "--template", template,
+            "--outfile", CRL_FILE
+        )
+        oldcrl.close()
+    rmtree(basedir)
+
+
+def is_key_line(line, match):
+    return line.startswith("---") and line.lstrip("- ").startswith(match)
+
+
+def getkey(*args):
+    path = os.path.join(TASKD_DATA_DIR, "keys", *args)
+    buf = []
+    for line in open(path, "r"):
+        if len(buf) == 0:
+            if is_key_line(line, "BEGIN"):
+                buf.append(line)
+            continue
+
+        buf.append(line)
+
+        if is_key_line(line, "END"):
+            return ''.join(buf)
+    raise IOError("Unable to get key from {}.".format(path))
+
+
+def mktaskkey(cfg, path, keydata):
+    heredoc = 'cat > "{}" <<EOF\n{}EOF'.format(path, keydata)
+    cmd = 'task config taskd.{} -- "{}"'.format(cfg, path)
+    return heredoc + "\n" + cmd
+
+
+class User(object):
+    def __init__(self, org, name, key):
+        self.__org = org
+        self.name = name
+        self.key = key
+
+    def export(self):
+        credentials = '/'.join([self.__org, self.name, self.key])
+        allow_unquoted = string.ascii_letters + string.digits + "/-_."
+        if not all((c in allow_unquoted) for c in credentials):
+            credentials = "'" + credentials.replace("'", r"'\''") + "'"
+
+        script = []
+
+        if IS_AUTO_CONFIG:
+            pubcert = getkey(self.__org, self.name, "public.cert")
+            privkey = getkey(self.__org, self.name, "private.key")
+            cacert = getkey("ca.cert")
+
+            keydir = "${TASKDATA:-$HOME/.task}/keys"
+
+            script += [
+                "umask 0077",
+                'mkdir -p "{}"'.format(keydir),
+                mktaskkey("certificate", os.path.join(keydir, "public.cert"),
+                          pubcert),
+                mktaskkey("key", os.path.join(keydir, "private.key"), privkey),
+                mktaskkey("ca", os.path.join(keydir, "ca.cert"), cacert)
+            ]
+
+        script.append(
+            "task config taskd.credentials -- {}".format(credentials)
+        )
+
+        return "\n".join(script) + "\n"
+
+
+class Group(object):
+    def __init__(self, org, name):
+        self.__org = org
+        self.name = name
+
+
+class Organisation(object):
+    def __init__(self, name, ignore_imperative):
+        self.name = name
+        self.ignore_imperative = ignore_imperative
+
+    def add_user(self, name):
+        """
+        Create a new user along with a certificate and key.
+
+        Returns a 'User' object or None if the user already exists.
+        """
+        if self.ignore_imperative and is_imperative(self.name):
+            return None
+        if name not in self.users.keys():
+            output = taskd_cmd("add", "user", self.name, name,
+                               capture_stdout=True, encoding='utf-8')
+            key = RE_USERKEY.search(output)
+            if key is None:
+                msg = "Unable to find key while creating user {}."
+                raise TaskdError(msg.format(name))
+
+            generate_key(self.name, name)
+            newuser = User(self.name, name, key.group(1))
+            self._lazy_users[name] = newuser
+            return newuser
+        return None
+
+    def del_user(self, name):
+        """
+        Delete a user and revoke its keys.
+        """
+        if name in self.users.keys():
+            user = self.get_user(name)
+            if self.ignore_imperative and \
+               is_imperative(self.name, "users", user.key):
+                return
+
+            # Work around https://bug.tasktools.org/browse/TD-40:
+            rmtree(mkpath(self.name, "users", user.key))
+
+            revoke_key(self.name, name)
+            del self._lazy_users[name]
+
+    def add_group(self, name):
+        """
+        Create a new group.
+
+        Returns a 'Group' object or None if the group already exists.
+        """
+        if self.ignore_imperative and is_imperative(self.name):
+            return None
+        if name not in self.groups.keys():
+            taskd_cmd("add", "group", self.name, name)
+            newgroup = Group(self.name, name)
+            self._lazy_groups[name] = newgroup
+            return newgroup
+        return None
+
+    def del_group(self, name):
+        """
+        Delete a group.
+        """
+        if name in self.users.keys():
+            if self.ignore_imperative and \
+               is_imperative(self.name, "groups", name):
+                return
+            taskd_cmd("remove", "group", self.name, name)
+            del self._lazy_groups[name]
+
+    def get_user(self, name):
+        return self.users.get(name)
+
+    @lazyprop
+    def users(self):
+        result = {}
+        for key in os.listdir(mkpath(self.name, "users")):
+            user = fetch_username(self.name, key)
+            if user is not None:
+                result[user] = User(self.name, user, key)
+        return result
+
+    def get_group(self, name):
+        return self.groups.get(name)
+
+    @lazyprop
+    def groups(self):
+        result = {}
+        for group in os.listdir(mkpath(self.name, "groups")):
+            result[group] = Group(self.name, group)
+        return result
+
+
+class Manager(object):
+    def __init__(self, ignore_imperative=False):
+        """
+        Instantiates an organisations manager.
+
+        If ignore_imperative is True, all actions that modify data are checked
+        whether they're created imperatively and if so, they will result in no
+        operation.
+        """
+        self.ignore_imperative = ignore_imperative
+
+    def add_org(self, name):
+        """
+        Create a new organisation.
+
+        Returns an 'Organisation' object or None if the organisation already
+        exists.
+        """
+        if name not in self.orgs.keys():
+            taskd_cmd("add", "org", name)
+            neworg = Organisation(name, self.ignore_imperative)
+            self._lazy_orgs[name] = neworg
+            return neworg
+        return None
+
+    def del_org(self, name):
+        """
+        Delete and revoke keys of an organisation with all its users and
+        groups.
+        """
+        org = self.get_org(name)
+        if org is not None:
+            if self.ignore_imperative and is_imperative(name):
+                return
+            for user in list(org.users.keys()):
+                org.del_user(user)
+            for group in list(org.groups.keys()):
+                org.del_group(group)
+            taskd_cmd("remove", "org", name)
+            del self._lazy_orgs[name]
+
+    def get_org(self, name):
+        return self.orgs.get(name)
+
+    @lazyprop
+    def orgs(self):
+        result = {}
+        for org in os.listdir(mkpath()):
+            result[org] = Organisation(org, self.ignore_imperative)
+        return result
+
+
+class OrganisationType(click.ParamType):
+    name = 'organisation'
+
+    def convert(self, value, param, ctx):
+        org = Manager().get_org(value)
+        if org is None:
+            self.fail("Organisation {} does not exist.".format(value))
+        return org
+
+ORGANISATION = OrganisationType()
+
+
+@click.group()
+@click.pass_context
+def cli(ctx):
+    """
+    Manage Taskserver users and certificates
+    """
+    if not IS_AUTO_CONFIG:
+        return
+    for path in (CA_KEY, CA_CERT, CRL_FILE):
+        if not os.path.exists(path):
+            msg = "CA setup not done or incomplete, missing file {}."
+            ctx.fail(msg.format(path))
+
+
+@cli.group("org")
+def org_cli():
+    """
+    Manage organisations
+    """
+    pass
+
+
+@cli.group("user")
+def user_cli():
+    """
+    Manage users
+    """
+    pass
+
+
+@cli.group("group")
+def group_cli():
+    """
+    Manage groups
+    """
+    pass
+
+
+@user_cli.command("list")
+@click.argument("organisation", type=ORGANISATION)
+def list_users(organisation):
+    """
+    List all users belonging to the specified organisation.
+    """
+    label("The following users exists for {}:".format(organisation.name))
+    for user in organisation.users.values():
+        sys.stdout.write(user.name + "\n")
+
+
+@group_cli.command("list")
+@click.argument("organisation", type=ORGANISATION)
+def list_groups(organisation):
+    """
+    List all users belonging to the specified organisation.
+    """
+    label("The following users exists for {}:".format(organisation.name))
+    for group in organisation.groups.values():
+        sys.stdout.write(group.name + "\n")
+
+
+@org_cli.command("list")
+def list_orgs():
+    """
+    List available organisations
+    """
+    label("The following organisations exist:")
+    for org in Manager().orgs:
+        sys.stdout.write(org.name + "\n")
+
+
+@user_cli.command("getkey")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("user")
+def get_uuid(organisation, user):
+    """
+    Get the UUID of the specified user belonging to the specified organisation.
+    """
+    userobj = organisation.get_user(user)
+    if userobj is None:
+        msg = "User {} doesn't exist in organisation {}."
+        sys.exit(msg.format(userobj.name, organisation.name))
+
+    label("User {} has the following UUID:".format(userobj.name))
+    sys.stdout.write(user.key + "\n")
+
+
+@user_cli.command("export")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("user")
+def export_user(organisation, user):
+    """
+    Export user of the specified organisation as a series of shell commands
+    that can be used on the client side to easily import the certificates.
+
+    Note that the private key will be exported as well, so use this with care!
+    """
+    userobj = organisation.get_user(user)
+    if userobj is None:
+        msg = "User {} doesn't exist in organisation {}."
+        sys.exit(msg.format(user, organisation.name))
+
+    sys.stdout.write(userobj.export())
+
+
+@org_cli.command("add")
+@click.argument("name")
+def add_org(name):
+    """
+    Create an organisation with the specified name.
+    """
+    if os.path.exists(mkpath(name)):
+        msg = "Organisation with name {} already exists."
+        sys.exit(msg.format(name))
+
+    taskd_cmd("add", "org", name)
+    mark_imperative(name)
+
+
+@org_cli.command("remove")
+@click.argument("name")
+def del_org(name):
+    """
+    Delete the organisation with the specified name.
+
+    All of the users and groups will be deleted as well and client certificates
+    will be revoked.
+    """
+    Manager().del_org(name)
+    msg = ("Organisation {} deleted. Be sure to restart the Taskserver"
+           " using 'systemctl restart taskserver.service' in order for"
+           " the certificate revocation to apply.")
+    click.echo(msg.format(name), err=True)
+
+
+@user_cli.command("add")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("user")
+def add_user(organisation, user):
+    """
+    Create a user for the given organisation along with a client certificate
+    and print the key of the new user.
+
+    The client certificate along with it's public key can be shown via the
+    'user export' subcommand.
+    """
+    userobj = organisation.add_user(user)
+    if userobj is None:
+        msg = "User {} already exists in organisation {}."
+        sys.exit(msg.format(user, organisation))
+    else:
+        mark_imperative(organisation.name, "users", userobj.key)
+
+
+@user_cli.command("remove")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("user")
+def del_user(organisation, user):
+    """
+    Delete a user from the given organisation.
+
+    This will also revoke the client certificate of the given user.
+    """
+    organisation.del_user(user)
+    msg = ("User {} deleted. Be sure to restart the Taskserver using"
+           " 'systemctl restart taskserver.service' in order for the"
+           " certificate revocation to apply.")
+    click.echo(msg.format(user), err=True)
+
+
+@group_cli.command("add")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("group")
+def add_group(organisation, group):
+    """
+    Create a group for the given organisation.
+    """
+    groupobj = organisation.add_group(group)
+    if groupobj is None:
+        msg = "Group {} already exists in organisation {}."
+        sys.exit(msg.format(group, organisation))
+    else:
+        mark_imperative(organisation.name, "groups", groupobj.name)
+
+
+@group_cli.command("remove")
+@click.argument("organisation", type=ORGANISATION)
+@click.argument("group")
+def del_group(organisation, group):
+    """
+    Delete a group from the given organisation.
+    """
+    organisation.del_group(group)
+    click("Group {} deleted.".format(group), err=True)
+
+
+def add_or_delete(old, new, add_fun, del_fun):
+    """
+    Given an 'old' and 'new' list, figure out the intersections and invoke
+    'add_fun' against every element that is not in the 'old' list and 'del_fun'
+    against every element that is not in the 'new' list.
+
+    Returns a tuple where the first element is the list of elements that were
+    added and the second element consisting of elements that were deleted.
+    """
+    old_set = set(old)
+    new_set = set(new)
+    to_delete = old_set - new_set
+    to_add = new_set - old_set
+    for elem in to_delete:
+        del_fun(elem)
+    for elem in to_add:
+        add_fun(elem)
+    return to_add, to_delete
+
+
+@cli.command("process-json")
+@click.argument('json-file', type=click.File('rb'))
+def process_json(json_file):
+    """
+    Create and delete users, groups and organisations based on a JSON file.
+
+    The structure of this file is exactly the same as the
+    'services.taskserver.organisations' option of the NixOS module and is used
+    for declaratively adding and deleting users.
+
+    Hence this subcommand is not recommended outside of the scope of the NixOS
+    module.
+    """
+    data = json.load(json_file)
+
+    mgr = Manager(ignore_imperative=True)
+    add_or_delete(mgr.orgs.keys(), data.keys(), mgr.add_org, mgr.del_org)
+
+    for org in mgr.orgs.values():
+        if is_imperative(org.name):
+            continue
+        add_or_delete(org.users.keys(), data[org.name]['users'],
+                      org.add_user, org.del_user)
+        add_or_delete(org.groups.keys(), data[org.name]['groups'],
+                      org.add_group, org.del_group)
+
+
+if __name__ == '__main__':
+    cli()
diff --git a/nixpkgs/nixos/modules/services/misc/tautulli.nix b/nixpkgs/nixos/modules/services/misc/tautulli.nix
new file mode 100644
index 000000000000..b29e9dc0c8d5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/tautulli.nix
@@ -0,0 +1,89 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.tautulli;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "plexpy" ] [ "services" "tautulli" ])
+  ];
+
+  options = {
+    services.tautulli = {
+      enable = mkEnableOption (lib.mdDoc "Tautulli Plex Monitor");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/plexpy";
+        description = lib.mdDoc "The directory where Tautulli stores its data files.";
+      };
+
+      configFile = mkOption {
+        type = types.str;
+        default = "/var/lib/plexpy/config.ini";
+        description = lib.mdDoc "The location of Tautulli's config file.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8181;
+        description = lib.mdDoc "TCP port where Tautulli listens.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for Tautulli.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "plexpy";
+        description = lib.mdDoc "User account under which Tautulli runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nogroup";
+        description = lib.mdDoc "Group under which Tautulli runs.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.tautulli;
+        defaultText = literalExpression "pkgs.tautulli";
+        description = lib.mdDoc ''
+          The Tautulli package to use.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.tautulli = {
+      description = "Tautulli Plex Monitor";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        GuessMainPID = "false";
+        ExecStart = "${cfg.package}/bin/tautulli --datadir ${cfg.dataDir} --config ${cfg.configFile} --port ${toString cfg.port} --pidfile ${cfg.dataDir}/tautulli.pid --nolaunch";
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    users.users = mkIf (cfg.user == "plexpy") {
+      plexpy = { group = cfg.group; uid = config.ids.uids.plexpy; };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/tiddlywiki.nix b/nixpkgs/nixos/modules/services/misc/tiddlywiki.nix
new file mode 100644
index 000000000000..849f53ca2d48
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/tiddlywiki.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tiddlywiki;
+  listenParams = concatStrings (mapAttrsToList (n: v: " '${n}=${toString v}' ") cfg.listenOptions);
+  exe = "${pkgs.nodePackages.tiddlywiki}/lib/node_modules/.bin/tiddlywiki";
+  name = "tiddlywiki";
+  dataDir = "/var/lib/" + name;
+
+in {
+
+  options.services.tiddlywiki = {
+
+    enable = mkEnableOption (lib.mdDoc "TiddlyWiki nodejs server");
+
+    listenOptions = mkOption {
+      type = types.attrs;
+      default = {};
+      example = {
+        credentials = "../credentials.csv";
+        readers="(authenticated)";
+        port = 3456;
+      };
+      description = lib.mdDoc ''
+        Parameters passed to `--listen` command.
+        Refer to <https://tiddlywiki.com/#WebServer>
+        for details on supported values.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd = {
+      services.tiddlywiki = {
+        description = "TiddlyWiki nodejs server";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          Type = "simple";
+          Restart = "on-failure";
+          DynamicUser = true;
+          StateDirectory = name;
+          ExecStartPre = "-${exe} ${dataDir} --init server";
+          ExecStart = "${exe} ${dataDir} --listen ${listenParams}";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/tp-auto-kbbl.nix b/nixpkgs/nixos/modules/services/misc/tp-auto-kbbl.nix
new file mode 100644
index 000000000000..1076c814e86c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/tp-auto-kbbl.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.tp-auto-kbbl;
+
+in {
+  meta.maintainers = with maintainers; [ sebtm ];
+
+  options = {
+    services.tp-auto-kbbl = {
+      enable = mkEnableOption (lib.mdDoc "auto toggle keyboard back-lighting on Thinkpads (and maybe other laptops) for Linux");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.tp-auto-kbbl;
+        defaultText = literalExpression "pkgs.tp-auto-kbbl";
+        description = lib.mdDoc "Package providing {command}`tp-auto-kbbl`.";
+      };
+
+      arguments = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of arguments appended to `./tp-auto-kbbl --device [device] [arguments]`
+        '';
+      };
+
+      device = mkOption {
+        type = types.str;
+        default = "/dev/input/event0";
+        description = lib.mdDoc "Device watched for activities.";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.tp-auto-kbbl = {
+      serviceConfig = {
+        ExecStart = concatStringsSep " "
+          ([ "${cfg.package}/bin/tp-auto-kbbl" "--device ${cfg.device}" ] ++ cfg.arguments);
+        Restart = "always";
+        Type = "simple";
+      };
+
+      unitConfig = {
+        Description = "Auto toggle keyboard backlight";
+        Documentation = "https://github.com/saibotd/tp-auto-kbbl";
+        After = [ "dbus.service" ];
+      };
+
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/tzupdate.nix b/nixpkgs/nixos/modules/services/misc/tzupdate.nix
new file mode 100644
index 000000000000..300a578f7c4a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/tzupdate.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.tzupdate;
+in {
+  options.services.tzupdate = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable the tzupdate timezone updating service. This provides
+        a one-shot service which can be activated with systemctl to
+        update the timezone.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # We need to have imperative time zone management for this to work.
+    # This will give users an error if they have set an explicit time
+    # zone, which is better than silently overriding it.
+    time.timeZone = null;
+
+    # We provide a one-shot service which can be manually run. We could
+    # provide a service that runs on startup, but it's tricky to get
+    # a service to run after you have *internet* access.
+    systemd.services.tzupdate = {
+      description = "tzupdate timezone update service";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        # We could link directly into pkgs.tzdata, but at least timedatectl seems
+        # to expect the symlink to point directly to a file in etc.
+        # Setting the "debian timezone file" to point at /dev/null stops it doing anything.
+        ExecStart = "${pkgs.tzupdate}/bin/tzupdate -z /etc/zoneinfo -d /dev/null";
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.michaelpj ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/uhub.nix b/nixpkgs/nixos/modules/services/misc/uhub.nix
new file mode 100644
index 000000000000..80266b024e35
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/uhub.nix
@@ -0,0 +1,116 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  settingsFormat = {
+    type = with lib.types; attrsOf (oneOf [ bool int str ]);
+    generate = name: attrs:
+      pkgs.writeText name (lib.strings.concatStringsSep "\n"
+        (lib.attrsets.mapAttrsToList
+          (key: value: "${key}=${builtins.toJSON value}") attrs));
+  };
+in {
+  options = {
+
+    services.uhub = mkOption {
+      default = { };
+      description = lib.mdDoc "Uhub ADC hub instances";
+      type = types.attrsOf (types.submodule {
+        options = {
+
+          enable = mkEnableOption (lib.mdDoc "hub instance") // { default = true; };
+
+          enableTLS = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether to enable TLS support.";
+          };
+
+          settings = mkOption {
+            inherit (settingsFormat) type;
+            description = lib.mdDoc ''
+              Configuration of uhub.
+              See https://www.uhub.org/doc/config.php for a list of options.
+            '';
+            default = { };
+            example = {
+              server_bind_addr = "any";
+              server_port = 1511;
+              hub_name = "My Public Hub";
+              hub_description = "Yet another ADC hub";
+              max_users = 150;
+            };
+          };
+
+          plugins = mkOption {
+            description = lib.mdDoc "Uhub plugin configuration.";
+            type = with types;
+              listOf (submodule {
+                options = {
+                  plugin = mkOption {
+                    type = path;
+                    example = literalExpression
+                      "$${pkgs.uhub}/plugins/mod_auth_sqlite.so";
+                    description = lib.mdDoc "Path to plugin file.";
+                  };
+                  settings = mkOption {
+                    description = lib.mdDoc "Settings specific to this plugin.";
+                    type = with types; attrsOf str;
+                    example = { file = "/etc/uhub/users.db"; };
+                  };
+                };
+              });
+            default = [ ];
+          };
+
+        };
+      });
+    };
+
+  };
+
+  config = let
+    hubs = lib.attrsets.filterAttrs (_: cfg: cfg.enable) config.services.uhub;
+  in {
+
+    environment.etc = lib.attrsets.mapAttrs' (name: cfg:
+      let
+        settings' = cfg.settings // {
+          tls_enable = cfg.enableTLS;
+          file_plugins = pkgs.writeText "uhub-plugins.conf"
+            (lib.strings.concatStringsSep "\n" (map ({ plugin, settings }:
+              ''
+                plugin ${plugin} "${
+                  toString
+                  (lib.attrsets.mapAttrsToList (key: value: "${key}=${value}")
+                    settings)
+                }"'') cfg.plugins));
+        };
+      in {
+        name = "uhub/${name}.conf";
+        value.source = settingsFormat.generate "uhub-${name}.conf" settings';
+      }) hubs;
+
+    systemd.services = lib.attrsets.mapAttrs' (name: cfg: {
+      name = "uhub-${name}";
+      value = let pkg = pkgs.uhub.override { tlsSupport = cfg.enableTLS; };
+      in {
+        description = "high performance peer-to-peer hub for the ADC network";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        reloadIfChanged = true;
+        serviceConfig = {
+          Type = "notify";
+          ExecStart = "${pkg}/bin/uhub -c /etc/uhub/${name}.conf -L";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          DynamicUser = true;
+
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+          CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        };
+      };
+    }) hubs;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/misc/weechat.md b/nixpkgs/nixos/modules/services/misc/weechat.md
new file mode 100644
index 000000000000..21f41be5b4a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/weechat.md
@@ -0,0 +1,46 @@
+# WeeChat {#module-services-weechat}
+
+[WeeChat](https://weechat.org/) is a fast and
+extensible IRC client.
+
+## Basic Usage {#module-services-weechat-basic-usage}
+
+By default, the module creates a
+[`systemd`](https://www.freedesktop.org/wiki/Software/systemd/)
+unit which runs the chat client in a detached
+[`screen`](https://www.gnu.org/software/screen/)
+session.
+
+This can be done by enabling the `weechat` service:
+```
+{ ... }:
+
+{
+  services.weechat.enable = true;
+}
+```
+
+The service is managed by a dedicated user named `weechat`
+in the state directory `/var/lib/weechat`.
+
+## Re-attaching to WeeChat {#module-services-weechat-reattach}
+
+WeeChat runs in a screen session owned by a dedicated user. To explicitly
+allow your another user to attach to this session, the
+`screenrc` needs to be tweaked by adding
+[multiuser](https://www.gnu.org/software/screen/manual/html_node/Multiuser.html#Multiuser)
+support:
+```
+{
+  programs.screen.screenrc = ''
+    multiuser on
+    acladd normal_user
+  '';
+}
+```
+Now, the session can be re-attached like this:
+```
+screen -x weechat/weechat-screen
+```
+
+*The session name can be changed using [services.weechat.sessionName.](options.html#opt-services.weechat.sessionName)*
diff --git a/nixpkgs/nixos/modules/services/misc/weechat.nix b/nixpkgs/nixos/modules/services/misc/weechat.nix
new file mode 100644
index 000000000000..338493e3cd37
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/weechat.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.weechat;
+in
+
+{
+  options.services.weechat = {
+    enable = mkEnableOption (lib.mdDoc "weechat");
+    root = mkOption {
+      description = lib.mdDoc "Weechat state directory.";
+      type = types.str;
+      default = "/var/lib/weechat";
+    };
+    sessionName = mkOption {
+      description = lib.mdDoc "Name of the `screen` session for weechat.";
+      default = "weechat-screen";
+      type = types.str;
+    };
+    binary = mkOption {
+      type = types.path;
+      description = lib.mdDoc "Binary to execute.";
+      default = "${pkgs.weechat}/bin/weechat";
+      defaultText = literalExpression ''"''${pkgs.weechat}/bin/weechat"'';
+      example = literalExpression ''"''${pkgs.weechat}/bin/weechat-headless"'';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users = {
+      groups.weechat = {};
+      users.weechat = {
+        createHome = true;
+        group = "weechat";
+        home = cfg.root;
+        isSystemUser = true;
+      };
+    };
+
+    systemd.services.weechat = {
+      environment.WEECHAT_HOME = cfg.root;
+      serviceConfig = {
+        User = "weechat";
+        Group = "weechat";
+        RemainAfterExit = "yes";
+      };
+      script = "exec ${config.security.wrapperDir}/screen -Dm -S ${cfg.sessionName} ${cfg.binary}";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network.target" ];
+    };
+
+    security.wrappers.screen =
+      { setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.screen}/bin/screen";
+      };
+  };
+
+  meta.doc = ./weechat.md;
+}
diff --git a/nixpkgs/nixos/modules/services/misc/xmr-stak.nix b/nixpkgs/nixos/modules/services/misc/xmr-stak.nix
new file mode 100644
index 000000000000..54efae48d5d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/xmr-stak.nix
@@ -0,0 +1,89 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xmr-stak;
+
+  pkg = pkgs.xmr-stak.override {
+    inherit (cfg) openclSupport;
+  };
+
+in
+
+{
+  options = {
+    services.xmr-stak = {
+      enable = mkEnableOption (lib.mdDoc "xmr-stak miner");
+      openclSupport = mkEnableOption (lib.mdDoc "support for OpenCL (AMD/ATI graphics cards)");
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--noCPU" "--currency monero" ];
+        description = lib.mdDoc "List of parameters to pass to xmr-stak.";
+      };
+
+      configFiles = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = literalExpression ''
+          {
+            "config.txt" = '''
+              "verbose_level" : 4,
+              "h_print_time" : 60,
+              "tls_secure_algo" : true,
+            ''';
+            "pools.txt" = '''
+              "currency" : "monero7",
+              "pool_list" :
+              [ { "pool_address" : "pool.supportxmr.com:443",
+                  "wallet_address" : "my-wallet-address",
+                  "rig_id" : "",
+                  "pool_password" : "nixos",
+                  "use_nicehash" : false,
+                  "use_tls" : true,
+                  "tls_fingerprint" : "",
+                  "pool_weight" : 23
+                },
+              ],
+            ''';
+          }
+        '';
+        description = lib.mdDoc ''
+          Content of config files like config.txt, pools.txt or cpu.txt.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.xmr-stak = {
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      preStart = concatStrings (flip mapAttrsToList cfg.configFiles (fn: content: ''
+        ln -sf '${pkgs.writeText "xmr-stak-${fn}" content}' '${fn}'
+      ''));
+
+      serviceConfig = let rootRequired = cfg.openclSupport; in {
+        ExecStart = "${pkg}/bin/xmr-stak ${concatStringsSep " " cfg.extraArgs}";
+        # xmr-stak generates cpu and/or gpu configuration files
+        WorkingDirectory = "/tmp";
+        PrivateTmp = true;
+        DynamicUser = !rootRequired;
+        LimitMEMLOCK = toString (1024*1024);
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "xmr-stak" "configText"] ''
+      This option was removed in favour of `services.xmr-stak.configFiles`
+      because the new config file `pools.txt` was introduced. You are
+      now able to define all other config files like cpu.txt or amd.txt.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/xmrig.nix b/nixpkgs/nixos/modules/services/misc/xmrig.nix
new file mode 100644
index 000000000000..f75b47ffeced
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/xmrig.nix
@@ -0,0 +1,76 @@
+{ config, pkgs, lib, ... }:
+
+
+let
+  cfg = config.services.xmrig;
+
+  json = pkgs.formats.json { };
+  configFile = json.generate "config.json" cfg.settings;
+in
+
+with lib;
+
+{
+  options = {
+    services.xmrig = {
+      enable = mkEnableOption (lib.mdDoc "XMRig Mining Software");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xmrig;
+        defaultText = literalExpression "pkgs.xmrig";
+        example = literalExpression "pkgs.xmrig-mo";
+        description = lib.mdDoc "XMRig package to use.";
+      };
+
+      settings = mkOption {
+        default = { };
+        type = json.type;
+        example = literalExpression ''
+          {
+            autosave = true;
+            cpu = true;
+            opencl = false;
+            cuda = false;
+            pools = [
+              {
+                url = "pool.supportxmr.com:443";
+                user = "your-wallet";
+                keepalive = true;
+                tls = true;
+              }
+            ]
+          }
+        '';
+        description = lib.mdDoc ''
+          XMRig configuration. Refer to
+          <https://xmrig.com/docs/miner/config>
+          for details on supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    hardware.cpu.x86.msr.enable = true;
+
+    systemd.services.xmrig = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "XMRig Mining Software Service";
+      serviceConfig = {
+        ExecStartPre = "${lib.getExe cfg.package} --config=${configFile} --dry-run";
+        ExecStart = "${lib.getExe cfg.package} --config=${configFile}";
+        # https://xmrig.com/docs/miner/randomx-optimization-guide/msr
+        # If you use recent XMRig with root privileges (Linux) or admin
+        # privileges (Windows) the miner configure all MSR registers
+        # automatically.
+        DynamicUser = lib.mkDefault false;
+      };
+    };
+  };
+
+  meta = with lib; {
+    maintainers = with maintainers; [ ratsclub ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/misc/zoneminder.nix b/nixpkgs/nixos/modules/services/misc/zoneminder.nix
new file mode 100644
index 000000000000..fca03b2ad4e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/zoneminder.nix
@@ -0,0 +1,378 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.zoneminder;
+  fpm = config.services.phpfpm.pools.zoneminder;
+  pkg = pkgs.zoneminder;
+
+  dirName = pkg.dirName;
+
+  user = "zoneminder";
+  group = {
+    nginx = config.services.nginx.group;
+    none  = user;
+  }.${cfg.webserver};
+
+  useNginx = cfg.webserver == "nginx";
+
+  defaultDir = "/var/lib/${user}";
+  home = if useCustomDir then cfg.storageDir else defaultDir;
+
+  useCustomDir = cfg.storageDir != null;
+
+  zms = "/cgi-bin/zms";
+
+  dirs = dirList: [ dirName ] ++ map (e: "${dirName}/${e}") dirList;
+
+  cacheDirs = [ "swap" ];
+  libDirs   = [ "events" "exports" "images" "sounds" ];
+
+  dirStanzas = baseDir:
+    lib.concatStringsSep "\n" (map (e:
+      "ZM_DIR_${lib.toUpper e}=${baseDir}/${e}"
+      ) libDirs);
+
+  defaultsFile = pkgs.writeText "60-defaults.conf" ''
+    # 01-system-paths.conf
+    ${dirStanzas home}
+    ZM_PATH_ARP=${lib.getBin pkgs.nettools}/bin/arp
+    ZM_PATH_LOGS=/var/log/${dirName}
+    ZM_PATH_MAP=/dev/shm
+    ZM_PATH_SOCKS=/run/${dirName}
+    ZM_PATH_SWAP=/var/cache/${dirName}/swap
+    ZM_PATH_ZMS=${zms}
+
+    # 02-multiserver.conf
+    ZM_SERVER_HOST=
+
+    # Database
+    ZM_DB_TYPE=mysql
+    ZM_DB_HOST=${cfg.database.host}
+    ZM_DB_NAME=${cfg.database.name}
+    ZM_DB_USER=${cfg.database.username}
+    ZM_DB_PASS=${cfg.database.password}
+
+    # Web
+    ZM_WEB_USER=${user}
+    ZM_WEB_GROUP=${group}
+  '';
+
+  configFile = pkgs.writeText "80-nixos.conf" ''
+    # You can override defaults here
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+  options = {
+    services.zoneminder = with lib; {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        ZoneMinder.
+
+        If you intend to run the database locally, you should set
+        `config.services.zoneminder.database.createLocally` to true. Otherwise,
+        when set to `false` (the default), you will have to create the database
+        and database user as well as populate the database yourself.
+        Additionally, you will need to run `zmupdate.pl` yourself when
+        upgrading to a newer version
+      '');
+
+      webserver = mkOption {
+        type = types.enum [ "nginx" "none" ];
+        default = "nginx";
+        description = lib.mdDoc ''
+          The webserver to configure for the PHP frontend.
+
+          Set it to `none` if you want to configure it yourself. PRs are welcome
+          for support for other web servers.
+        '';
+      };
+
+      hostname = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          The hostname on which to listen.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8095;
+        description = lib.mdDoc ''
+          The port on which to listen.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the firewall port(s).
+        '';
+      };
+
+      database = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Create the database and database user locally.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Hostname hosting the database.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zm";
+          description = lib.mdDoc ''
+            Name of database.
+          '';
+        };
+
+        username = mkOption {
+          type = types.str;
+          default = "zmuser";
+          description = lib.mdDoc ''
+            Username for accessing the database.
+          '';
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "zmpass";
+          description = lib.mdDoc ''
+            Username for accessing the database.
+            Not used if `createLocally` is set.
+          '';
+        };
+      };
+
+      cameras = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          Set this to the number of cameras you expect to support.
+        '';
+      };
+
+      storageDir = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/storage/tank";
+        description = lib.mdDoc ''
+          ZoneMinder can generate quite a lot of data, so in case you don't want
+          to use the default ${defaultDir}, you can override the path here.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional configuration added verbatim to the configuration file.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.username == user;
+        message = "services.zoneminder.database.username must be set to ${user} if services.zoneminder.database.createLocally is set true";
+      }
+    ];
+
+    environment.etc = {
+      "zoneminder/60-defaults.conf".source = defaultsFile;
+      "zoneminder/80-nixos.conf".source    = configFile;
+    };
+
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [
+      cfg.port
+      6802 # zmtrigger
+    ];
+
+    services = {
+      fcgiwrap = lib.mkIf useNginx {
+        enable = true;
+        preforkProcesses = cfg.cameras;
+        inherit user group;
+      };
+
+      mysql = lib.mkIf cfg.database.createLocally {
+        enable = true;
+        package = lib.mkDefault pkgs.mariadb;
+        ensureDatabases = [ cfg.database.name ];
+        ensureUsers = [{
+          name = cfg.database.username;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }];
+      };
+
+      nginx = lib.mkIf useNginx {
+        enable = true;
+        virtualHosts = {
+          ${cfg.hostname} = {
+            default = true;
+            root = "${pkg}/share/zoneminder/www";
+            listen = [ { addr = "0.0.0.0"; inherit (cfg) port; } ];
+            extraConfig = let
+              fcgi = config.services.fcgiwrap;
+            in ''
+              index index.php;
+
+              location / {
+                try_files $uri $uri/ /index.php?$args =404;
+
+                rewrite ^/skins/.*/css/fonts/(.*)$ /fonts/$1 permanent;
+
+                location ~ /api/(css|img|ico) {
+                  rewrite ^/api(.+)$ /api/app/webroot/$1 break;
+                  try_files $uri $uri/ =404;
+                }
+
+                location ~ \.(gif|ico|jpg|jpeg|png)$ {
+                  access_log off;
+                  expires 30d;
+                }
+
+                location /api {
+                  rewrite ^/api(.+)$ /api/app/webroot/index.php?p=$1 last;
+                }
+
+                location /cgi-bin {
+                  gzip off;
+
+                  include ${config.services.nginx.package}/conf/fastcgi_params;
+                  fastcgi_param SCRIPT_FILENAME ${pkg}/libexec/zoneminder/${zms};
+                  fastcgi_param HTTP_PROXY "";
+                  fastcgi_intercept_errors on;
+
+                  fastcgi_pass ${fcgi.socketType}:${fcgi.socketAddress};
+                }
+
+                location /cache/ {
+                  alias /var/cache/${dirName}/;
+                }
+
+                location ~ \.php$ {
+                  try_files $uri =404;
+                  fastcgi_index index.php;
+
+                  include ${config.services.nginx.package}/conf/fastcgi_params;
+                  fastcgi_param SCRIPT_FILENAME $request_filename;
+                  fastcgi_param HTTP_PROXY "";
+
+                  fastcgi_pass unix:${fpm.socket};
+                }
+              }
+            '';
+          };
+        };
+      };
+
+      phpfpm = lib.mkIf useNginx {
+        pools.zoneminder = {
+          inherit user group;
+          phpPackage = pkgs.php.withExtensions (
+            { enabled, all }: enabled ++ [ all.apcu all.sysvsem ]);
+          phpOptions = ''
+            date.timezone = "${config.time.timeZone}"
+          '';
+          settings = lib.mapAttrs (name: lib.mkDefault) {
+            "listen.owner" = user;
+            "listen.group" = group;
+            "listen.mode" = "0660";
+
+            "pm" = "dynamic";
+            "pm.start_servers" = 1;
+            "pm.min_spare_servers" = 1;
+            "pm.max_spare_servers" = 2;
+            "pm.max_requests" = 500;
+            "pm.max_children" = 5;
+            "pm.status_path" = "/$pool-status";
+            "ping.path" = "/$pool-ping";
+          };
+        };
+      };
+    };
+
+    systemd.services = {
+      zoneminder = with pkgs; {
+        inherit (zoneminder.meta) description;
+        documentation = [ "https://zoneminder.readthedocs.org/en/latest/" ];
+        path = [
+          coreutils
+          procps
+          psmisc
+        ];
+        after = [ "nginx.service" ] ++ lib.optional cfg.database.createLocally "mysql.service";
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ defaultsFile configFile ];
+        preStart = lib.optionalString useCustomDir ''
+          install -dm775 -o ${user} -g ${group} ${cfg.storageDir}/{${lib.concatStringsSep "," libDirs}}
+        '' + lib.optionalString cfg.database.createLocally ''
+          if ! test -e "/var/lib/${dirName}/db-created"; then
+            ${config.services.mysql.package}/bin/mysql < ${pkg}/share/zoneminder/db/zm_create.sql
+            touch "/var/lib/${dirName}/db-created"
+          fi
+
+          ${zoneminder}/bin/zmupdate.pl -nointeractive
+          ${zoneminder}/bin/zmupdate.pl --nointeractive -f
+
+          # Update ZM's Nix store path in the configuration table. Do nothing if the config doesn't
+          # contain ZM's Nix store path.
+          ${config.services.mysql.package}/bin/mysql -u zoneminder zm << EOF
+            UPDATE Config
+              SET Value = REGEXP_REPLACE(Value, "^/nix/store/[^-/]+-zoneminder-[^/]+", "${pkgs.zoneminder}")
+              WHERE Name = "ZM_FONT_FILE_LOCATION";
+          EOF
+        '';
+        serviceConfig = {
+          User = user;
+          Group = group;
+          SupplementaryGroups = [ "video" ];
+          ExecStart  = "${zoneminder}/bin/zmpkg.pl start";
+          ExecStop   = "${zoneminder}/bin/zmpkg.pl stop";
+          ExecReload = "${zoneminder}/bin/zmpkg.pl restart";
+          PIDFile = "/run/${dirName}/zm.pid";
+          Type = "forking";
+          Restart = "on-failure";
+          RestartSec = "10s";
+          CacheDirectory = dirs cacheDirs;
+          RuntimeDirectory = dirName;
+          ReadWriteDirectories = lib.mkIf useCustomDir [ cfg.storageDir ];
+          StateDirectory = dirs (lib.optionals (!useCustomDir) libDirs);
+          LogsDirectory = dirName;
+          PrivateTmp = true;
+          ProtectSystem = "strict";
+          ProtectKernelTunables = true;
+          SystemCallArchitectures = "native";
+          NoNewPrivileges = true;
+        };
+      };
+    };
+
+    users.groups.${user} = {
+      gid = config.ids.gids.zoneminder;
+    };
+
+    users.users.${user} = {
+      uid = config.ids.uids.zoneminder;
+      group = user;
+      inherit home;
+      inherit (pkgs.zoneminder.meta) description;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/misc/zookeeper.nix b/nixpkgs/nixos/modules/services/misc/zookeeper.nix
new file mode 100644
index 000000000000..fb51be698e72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/misc/zookeeper.nix
@@ -0,0 +1,161 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.zookeeper;
+
+  zookeeperConfig = ''
+    dataDir=${cfg.dataDir}
+    clientPort=${toString cfg.port}
+    autopurge.purgeInterval=${toString cfg.purgeInterval}
+    ${cfg.extraConf}
+    ${cfg.servers}
+  '';
+
+  configDir = pkgs.buildEnv {
+    name = "zookeeper-conf";
+    paths = [
+      (pkgs.writeTextDir "zoo.cfg" zookeeperConfig)
+      (pkgs.writeTextDir "log4j.properties" cfg.logging)
+    ];
+  };
+
+in {
+
+  options.services.zookeeper = {
+    enable = mkEnableOption (lib.mdDoc "Zookeeper");
+
+    port = mkOption {
+      description = lib.mdDoc "Zookeeper Client port.";
+      default = 2181;
+      type = types.port;
+    };
+
+    id = mkOption {
+      description = lib.mdDoc "Zookeeper ID.";
+      default = 0;
+      type = types.int;
+    };
+
+    purgeInterval = mkOption {
+      description = lib.mdDoc ''
+        The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging.
+      '';
+      default = 1;
+      type = types.int;
+    };
+
+    extraConf = mkOption {
+      description = lib.mdDoc "Extra configuration for Zookeeper.";
+      type = types.lines;
+      default = ''
+        initLimit=5
+        syncLimit=2
+        tickTime=2000
+      '';
+    };
+
+    servers = mkOption {
+      description = lib.mdDoc "All Zookeeper Servers.";
+      default = "";
+      type = types.lines;
+      example = ''
+        server.0=host0:2888:3888
+        server.1=host1:2888:3888
+        server.2=host2:2888:3888
+      '';
+    };
+
+    logging = mkOption {
+      description = lib.mdDoc "Zookeeper logging configuration.";
+      default = ''
+        zookeeper.root.logger=INFO, CONSOLE
+        log4j.rootLogger=INFO, CONSOLE
+        log4j.logger.org.apache.zookeeper.audit.Log4jAuditLogger=INFO, CONSOLE
+        log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+        log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+        log4j.appender.CONSOLE.layout.ConversionPattern=[myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n
+      '';
+      type = types.lines;
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/zookeeper";
+      description = lib.mdDoc ''
+        Data directory for Zookeeper
+      '';
+    };
+
+    extraCmdLineOptions = mkOption {
+      description = lib.mdDoc "Extra command line options for the Zookeeper launcher.";
+      default = [ "-Dcom.sun.management.jmxremote" "-Dcom.sun.management.jmxremote.local.only=true" ];
+      type = types.listOf types.str;
+      example = [ "-Djava.net.preferIPv4Stack=true" "-Dcom.sun.management.jmxremote" "-Dcom.sun.management.jmxremote.local.only=true" ];
+    };
+
+    preferIPv4 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Add the -Djava.net.preferIPv4Stack=true flag to the Zookeeper server.
+      '';
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "The zookeeper package to use";
+      default = pkgs.zookeeper;
+      defaultText = literalExpression "pkgs.zookeeper";
+      type = types.package;
+    };
+
+    jre = mkOption {
+      description = lib.mdDoc "The JRE with which to run Zookeeper";
+      default = cfg.package.jre;
+      defaultText = literalExpression "pkgs.zookeeper.jre";
+      example = literalExpression "pkgs.jre";
+      type = types.package;
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [cfg.package];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0700 zookeeper - - -"
+      "Z '${cfg.dataDir}' 0700 zookeeper - - -"
+    ];
+
+    systemd.services.zookeeper = {
+      description = "Zookeeper Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.jre}/bin/java \
+            -cp "${cfg.package}/lib/*:${configDir}" \
+            ${escapeShellArgs cfg.extraCmdLineOptions} \
+            -Dzookeeper.datadir.autocreate=false \
+            ${optionalString cfg.preferIPv4 "-Djava.net.preferIPv4Stack=true"} \
+            org.apache.zookeeper.server.quorum.QuorumPeerMain \
+            ${configDir}/zoo.cfg
+        '';
+        User = "zookeeper";
+      };
+      preStart = ''
+        echo "${toString cfg.id}" > ${cfg.dataDir}/myid
+        mkdir -p ${cfg.dataDir}/version-2
+      '';
+    };
+
+    users.users.zookeeper = {
+      isSystemUser = true;
+      group = "zookeeper";
+      description = "Zookeeper daemon user";
+      home = cfg.dataDir;
+    };
+    users.groups.zookeeper = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/alerta.nix b/nixpkgs/nixos/modules/services/monitoring/alerta.nix
new file mode 100644
index 000000000000..6c7ebec4191c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/alerta.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.alerta;
+
+  alertaConf = pkgs.writeTextFile {
+    name = "alertad.conf";
+    text = ''
+      DATABASE_URL = '${cfg.databaseUrl}'
+      DATABASE_NAME = '${cfg.databaseName}'
+      LOG_FILE = '${cfg.logDir}/alertad.log'
+      LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
+      CORS_ORIGINS = [ ${concatMapStringsSep ", " (s: "\"" + s + "\"") cfg.corsOrigins} ];
+      AUTH_REQUIRED = ${if cfg.authenticationRequired then "True" else "False"}
+      SIGNUP_ENABLED = ${if cfg.signupEnabled then "True" else "False"}
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.alerta = {
+    enable = mkEnableOption (lib.mdDoc "alerta");
+
+    port = mkOption {
+      type = types.port;
+      default = 5000;
+      description = lib.mdDoc "Port of Alerta";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc "Address to bind to. The default is to bind to all addresses";
+    };
+
+    logDir = mkOption {
+      type = types.path;
+      description = lib.mdDoc "Location where the logfiles are stored";
+      default = "/var/log/alerta";
+    };
+
+    databaseUrl = mkOption {
+      type = types.str;
+      description = lib.mdDoc "URL of the MongoDB or PostgreSQL database to connect to";
+      default = "mongodb://localhost";
+    };
+
+    databaseName = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Name of the database instance to connect to";
+      default = "monitoring";
+    };
+
+    corsOrigins = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc "List of URLs that can access the API for Cross-Origin Resource Sharing (CORS)";
+      default = [ "http://localhost" "http://localhost:5000" ];
+    };
+
+    authenticationRequired = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Whether users must authenticate when using the web UI or command-line tool";
+      default = false;
+    };
+
+    signupEnabled = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Whether to prevent sign-up of new users via the web UI";
+      default = true;
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc "These lines go into alertad.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.logDir}' - alerta alerta - -"
+    ];
+
+    systemd.services.alerta = {
+      description = "Alerta Monitoring System";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      environment = {
+        ALERTA_SVR_CONF_FILE = alertaConf;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.alerta-server}/bin/alertad run --port ${toString cfg.port} --host ${cfg.bind}";
+        User = "alerta";
+        Group = "alerta";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.alerta ];
+
+    users.users.alerta = {
+      uid = config.ids.uids.alerta;
+      description = "Alerta user";
+    };
+
+    users.groups.alerta = {
+      gid = config.ids.gids.alerta;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix
new file mode 100644
index 000000000000..666479c78a84
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/apcupsd.nix
@@ -0,0 +1,206 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.apcupsd;
+
+  configFile = pkgs.writeText "apcupsd.conf" ''
+    ## apcupsd.conf v1.1 ##
+    # apcupsd complains if the first line is not like above.
+    ${cfg.configText}
+    SCRIPTDIR ${toString scriptDir}
+  '';
+
+  # List of events from "man apccontrol"
+  eventList = [
+    "annoyme"
+    "battattach"
+    "battdetach"
+    "changeme"
+    "commfailure"
+    "commok"
+    "doreboot"
+    "doshutdown"
+    "emergency"
+    "failing"
+    "killpower"
+    "loadlimit"
+    "mainsback"
+    "onbattery"
+    "offbattery"
+    "powerout"
+    "remotedown"
+    "runlimit"
+    "timeout"
+    "startselftest"
+    "endselftest"
+  ];
+
+  shellCmdsForEventScript = eventname: commands: ''
+    echo "#!${pkgs.runtimeShell}" > "$out/${eventname}"
+    echo '${commands}' >> "$out/${eventname}"
+    chmod a+x "$out/${eventname}"
+  '';
+
+  eventToShellCmds = event: if builtins.hasAttr event cfg.hooks then (shellCmdsForEventScript event (builtins.getAttr event cfg.hooks)) else "";
+
+  scriptDir = pkgs.runCommand "apcupsd-scriptdir" { preferLocalBuild = true; } (''
+    mkdir "$out"
+    # Copy SCRIPTDIR from apcupsd package
+    cp -r ${pkgs.apcupsd}/etc/apcupsd/* "$out"/
+    # Make the files writeable (nix will unset the write bits afterwards)
+    chmod u+w "$out"/*
+    # Remove the sample event notification scripts, because they don't work
+    # anyways (they try to send mail to "root" with the "mail" command)
+    (cd "$out" && rm changeme commok commfailure onbattery offbattery)
+    # Remove the sample apcupsd.conf file (we're generating our own)
+    rm "$out/apcupsd.conf"
+    # Set the SCRIPTDIR= line in apccontrol to the dir we're creating now
+    sed -i -e "s|^SCRIPTDIR=.*|SCRIPTDIR=$out|" "$out/apccontrol"
+    '' + concatStringsSep "\n" (map eventToShellCmds eventList)
+
+  );
+
+  # Ensure the CLI uses our generated configFile
+  wrappedBinaries = pkgs.runCommandLocal "apcupsd-wrapped-binaries"
+    { nativeBuildInputs = [ pkgs.makeWrapper ]; }
+    ''
+      for p in "${lib.getBin pkgs.apcupsd}/bin/"*; do
+          bname=$(basename "$p")
+          makeWrapper "$p" "$out/bin/$bname" --add-flags "-f ${configFile}"
+      done
+    '';
+
+  apcupsdWrapped = pkgs.symlinkJoin {
+    name = "apcupsd-wrapped";
+    # Put wrappers first so they "win"
+    paths = [ wrappedBinaries pkgs.apcupsd ];
+  };
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.apcupsd = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the APC UPS daemon. apcupsd monitors your UPS and
+          permits orderly shutdown of your computer in the event of a power
+          failure. User manual: http://www.apcupsd.com/manual/manual.html.
+          Note that apcupsd runs as root (to allow shutdown of computer).
+          You can check the status of your UPS with the "apcaccess" command.
+        '';
+      };
+
+      configText = mkOption {
+        default = ''
+          UPSTYPE usb
+          NISIP 127.0.0.1
+          BATTERYLEVEL 50
+          MINUTES 5
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Contents of the runtime configuration file, apcupsd.conf. The default
+          settings makes apcupsd autodetect USB UPSes, limit network access to
+          localhost and shutdown the system when the battery level is below 50
+          percent, or when the UPS has calculated that it has 5 minutes or less
+          of remaining power-on time. See man apcupsd.conf for details.
+        '';
+      };
+
+      hooks = mkOption {
+        default = {};
+        example = {
+          doshutdown = "# shell commands to notify that the computer is shutting down";
+        };
+        type = types.attrsOf types.lines;
+        description = lib.mdDoc ''
+          Each attribute in this option names an apcupsd event and the string
+          value it contains will be executed in a shell, in response to that
+          event (prior to the default action). See "man apccontrol" for the
+          list of events and what they represent.
+
+          A hook script can stop apccontrol from doing its default action by
+          exiting with value 99. Do not do this unless you know what you're
+          doing.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [ {
+      assertion = let hooknames = builtins.attrNames cfg.hooks; in all (x: elem x eventList) hooknames;
+      message = ''
+        One (or more) attribute names in services.apcupsd.hooks are invalid.
+        Current attribute names: ${toString (builtins.attrNames cfg.hooks)}
+        Valid attribute names  : ${toString eventList}
+      '';
+    } ];
+
+    # Give users access to the "apcaccess" tool
+    environment.systemPackages = [ apcupsdWrapped ];
+
+    # NOTE 1: apcupsd runs as root because it needs permission to run
+    # "shutdown"
+    #
+    # NOTE 2: When apcupsd calls "wall", it prints an error because stdout is
+    # not connected to a tty (it is connected to the journal):
+    #   wall: cannot get tty name: Inappropriate ioctl for device
+    # The message still gets through.
+    systemd.services.apcupsd = {
+      description = "APC UPS Daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = "mkdir -p /run/apcupsd/";
+      serviceConfig = {
+        ExecStart = "${pkgs.apcupsd}/bin/apcupsd -b -f ${configFile} -d1";
+        # TODO: When apcupsd has initiated a shutdown, systemd always ends up
+        # waiting for it to stop ("A stop job is running for UPS daemon"). This
+        # is weird, because in the journal one can clearly see that apcupsd has
+        # received the SIGTERM signal and has already quit (or so it seems).
+        # This reduces the wait time from 90 seconds (default) to just 5. Then
+        # systemd kills it with SIGKILL.
+        TimeoutStopSec = 5;
+      };
+      unitConfig.Documentation = "man:apcupsd(8)";
+    };
+
+    # A special service to tell the UPS to power down/hibernate just before the
+    # computer shuts down. (The UPS has a built in delay before it actually
+    # shuts off power.) Copied from here:
+    # http://forums.opensuse.org/english/get-technical-help-here/applications/479499-apcupsd-systemd-killpower-issues.html
+    systemd.services.apcupsd-killpower = {
+      description = "APC UPS Kill Power";
+      after = [ "shutdown.target" ]; # append umount.target?
+      before = [ "final.target" ];
+      wantedBy = [ "shutdown.target" ];
+      unitConfig = {
+        ConditionPathExists = "/run/apcupsd/powerfail";
+        DefaultDependencies = "no";
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.apcupsd}/bin/apcupsd --killpower -f ${configFile}";
+        TimeoutSec = "infinity";
+        StandardOutput = "tty";
+        RemainAfterExit = "yes";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/arbtt.nix b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix
new file mode 100644
index 000000000000..f07ecc5d5dd0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/arbtt.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.arbtt;
+in {
+  options = {
+    services.arbtt = {
+      enable = mkEnableOption (lib.mdDoc "Arbtt statistics capture service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.haskellPackages.arbtt;
+        defaultText = literalExpression "pkgs.haskellPackages.arbtt";
+        description = lib.mdDoc ''
+          The package to use for the arbtt binaries.
+        '';
+      };
+
+      logFile = mkOption {
+        type = types.str;
+        default = "%h/.arbtt/capture.log";
+        example = "/home/username/.arbtt-capture.log";
+        description = lib.mdDoc ''
+          The log file for captured samples.
+        '';
+      };
+
+      sampleRate = mkOption {
+        type = types.int;
+        default = 60;
+        example = 120;
+        description = lib.mdDoc ''
+          The sampling interval in seconds.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.arbtt = {
+      description = "arbtt statistics capture service";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/arbtt-capture --logfile=${cfg.logFile} --sample-rate=${toString cfg.sampleRate}";
+        Restart = "always";
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.michaelpj ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/below.nix b/nixpkgs/nixos/modules/services/monitoring/below.nix
new file mode 100644
index 000000000000..4a7135162ac4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/below.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.below;
+  cfgContents = concatStringsSep "\n" (
+    mapAttrsToList (n: v: ''${n} = "${v}"'') (filterAttrs (_k: v: v != null) {
+      log_dir = cfg.dirs.log;
+      store_dir = cfg.dirs.store;
+      cgroup_filter_out = cfg.cgroupFilterOut;
+    })
+  );
+
+  mkDisableOption = n: mkOption {
+    type = types.bool;
+    default = true;
+    description = mdDoc "Whether to enable ${n}.";
+  };
+  optionalType = ty: x: mkOption (x // {
+    description = mdDoc x.description;
+    type = (types.nullOr ty);
+    default = null;
+  });
+  optionalPath = optionalType types.path;
+  optionalStr = optionalType types.str;
+  optionalInt = optionalType types.int;
+in {
+  options = {
+    services.below = {
+      enable = mkEnableOption (mdDoc "'below' resource monitor");
+
+      cgroupFilterOut = optionalStr {
+        description = "A regexp matching the full paths of cgroups whose data shouldn't be collected";
+        example = "user.slice.*";
+      };
+      collect = {
+        diskStats = mkDisableOption "dist_stat collection";
+        ioStats   = mkEnableOption (mdDoc "io.stat collection for cgroups");
+        exitStats = mkDisableOption "eBPF-based exitstats";
+      };
+      compression.enable = mkEnableOption (mdDoc "data compression");
+      retention = {
+        size = optionalInt {
+          description = ''
+            Size limit for below's data, in bytes. Data is deleted oldest-first, in 24h 'shards'.
+
+            ::: {.note}
+            The size limit may be exceeded by at most the size of the active shard, as:
+            - the active shard cannot be deleted;
+            - the size limit is only enforced when a new shard is created.
+            :::
+          '';
+        };
+        time = optionalInt {
+          description = ''
+            Retention time, in seconds.
+
+            ::: {.note}
+            As data is stored in 24 hour shards which are discarded as a whole,
+            only data expired by 24h (or more) is guaranteed to be discarded.
+            :::
+
+            ::: {.note}
+            If `retention.size` is set, data may be discarded earlier than the specified time.
+            :::
+          '';
+        };
+      };
+      dirs = {
+        log = optionalPath { description = "Where to store below's logs"; };
+        store = optionalPath {
+          description = "Where to store below's data";
+          example = "/var/lib/below";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.below ];
+    # /etc/below.conf is also refered to by the `below` CLI tool,
+    #  so this can't be a store-only file whose path is passed to the service
+    environment.etc."below/below.conf".text = cfgContents;
+
+    systemd = {
+      packages = [ pkgs.below ];
+      services.below = {
+        # Workaround for https://github.com/NixOS/nixpkgs/issues/81138
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ cfgContents ];
+
+        serviceConfig.ExecStart = [
+          ""
+          ("${lib.getExe pkgs.below} record " + (concatStringsSep " " (
+            optional (!cfg.collect.diskStats) "--disable-disk-stat" ++
+            optional   cfg.collect.ioStats    "--collect-io-stat"   ++
+            optional (!cfg.collect.exitStats) "--disable-exitstats" ++
+            optional   cfg.compression.enable "--compress"          ++
+
+            optional (cfg.retention.size != null) "--store-size-limit ${toString cfg.retention.size}" ++
+            optional (cfg.retention.time != null) "--retain-for-s ${toString cfg.retention.time}"
+          )))
+        ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ nicoo ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/bosun.nix b/nixpkgs/nixos/modules/services/monitoring/bosun.nix
new file mode 100644
index 000000000000..1dc19743461b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/bosun.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.bosun;
+
+  configFile = pkgs.writeText "bosun.conf" ''
+    ${optionalString (cfg.opentsdbHost !=null) "tsdbHost = ${cfg.opentsdbHost}"}
+    ${optionalString (cfg.influxHost !=null) "influxHost = ${cfg.influxHost}"}
+    httpListen = ${cfg.listenAddress}
+    stateFile = ${cfg.stateFile}
+    ledisDir = ${cfg.ledisDir}
+    checkFrequency = ${cfg.checkFrequency}
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  options = {
+
+    services.bosun = {
+
+      enable = mkEnableOption (lib.mdDoc "bosun");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bosun;
+        defaultText = literalExpression "pkgs.bosun";
+        description = lib.mdDoc ''
+          bosun binary to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bosun";
+        description = lib.mdDoc ''
+          User account under which bosun runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bosun";
+        description = lib.mdDoc ''
+          Group account under which bosun runs.
+        '';
+      };
+
+      opentsdbHost = mkOption {
+        type = types.nullOr types.str;
+        default = "localhost:4242";
+        description = lib.mdDoc ''
+          Host and port of the OpenTSDB database that stores bosun data.
+          To disable opentsdb you can pass null as parameter.
+        '';
+      };
+
+      influxHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "localhost:8086";
+        description = lib.mdDoc ''
+           Host and port of the influxdb database.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":8070";
+        description = lib.mdDoc ''
+          The host address and port that bosun's web interface will listen on.
+        '';
+      };
+
+      stateFile = mkOption {
+        type = types.path;
+        default = "/var/lib/bosun/bosun.state";
+        description = lib.mdDoc ''
+          Path to bosun's state file.
+        '';
+      };
+
+      ledisDir = mkOption {
+        type = types.path;
+        default = "/var/lib/bosun/ledis_data";
+        description = lib.mdDoc ''
+          Path to bosun's ledis data dir
+        '';
+      };
+
+      checkFrequency = mkOption {
+        type = types.str;
+        default = "5m";
+        description = lib.mdDoc ''
+          Bosun's check frequency
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration options for Bosun. You should describe your
+          desired templates, alerts, macros, etc through this configuration
+          option.
+
+          A detailed description of the supported syntax can be found at-spi2-atk
+          https://bosun.org/configuration.html
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.bosun = {
+      description = "bosun metrics collector (part of Bosun)";
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p "$(dirname "${cfg.stateFile}")";
+        touch "${cfg.stateFile}"
+        touch "${cfg.stateFile}.tmp"
+
+        mkdir -p "${cfg.ledisDir}";
+
+        if [ "$(id -u)" = 0 ]; then
+          chown ${cfg.user}:${cfg.group} "${cfg.stateFile}"
+          chown ${cfg.user}:${cfg.group} "${cfg.stateFile}.tmp"
+          chown ${cfg.user}:${cfg.group} "${cfg.ledisDir}"
+        fi
+      '';
+
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = ''
+          ${cfg.package}/bin/bosun -c ${configFile}
+        '';
+      };
+    };
+
+    users.users.bosun = {
+      description = "bosun user";
+      group = "bosun";
+      uid = config.ids.uids.bosun;
+    };
+
+    users.groups.bosun.gid = config.ids.gids.bosun;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix
new file mode 100644
index 000000000000..68e6e8e40b31
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/cadvisor.nix
@@ -0,0 +1,138 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cadvisor;
+
+in {
+  options = {
+    services.cadvisor = {
+      enable = mkEnableOption (lib.mdDoc "Cadvisor service");
+
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = lib.mdDoc "Cadvisor listening host";
+      };
+
+      port = mkOption {
+        default = 8080;
+        type = types.port;
+        description = lib.mdDoc "Cadvisor listening port";
+      };
+
+      storageDriver = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "influxdb";
+        description = lib.mdDoc "Cadvisor storage driver.";
+      };
+
+      storageDriverHost = mkOption {
+        default = "localhost:8086";
+        type = types.str;
+        description = lib.mdDoc "Cadvisor storage driver host.";
+      };
+
+      storageDriverDb = mkOption {
+        default = "root";
+        type = types.str;
+        description = lib.mdDoc "Cadvisord storage driver database name.";
+      };
+
+      storageDriverUser = mkOption {
+        default = "root";
+        type = types.str;
+        description = lib.mdDoc "Cadvisor storage driver username.";
+      };
+
+      storageDriverPassword = mkOption {
+        default = "root";
+        type = types.str;
+        description = lib.mdDoc ''
+          Cadvisor storage driver password.
+
+          Warning: this password is stored in the world-readable Nix store. It's
+          recommended to use the {option}`storageDriverPasswordFile` option
+          since that gives you control over the security of the password.
+          {option}`storageDriverPasswordFile` also takes precedence over {option}`storageDriverPassword`.
+        '';
+      };
+
+      storageDriverPasswordFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          File that contains the cadvisor storage driver password.
+
+          {option}`storageDriverPasswordFile` takes precedence over {option}`storageDriverPassword`
+
+          Warning: when {option}`storageDriverPassword` is non-empty this defaults to a file in the
+          world-readable Nix store that contains the value of {option}`storageDriverPassword`.
+
+          It's recommended to override this with a path not in the Nix store.
+          Tip: use [nixops key management](https://nixos.org/nixops/manual/#idm140737318306400)
+        '';
+      };
+
+      storageDriverSecure = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Cadvisor storage driver, enable secure communication.";
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Additional cadvisor options.
+
+          See <https://github.com/google/cadvisor/blob/master/docs/runtime_options.md> for available options.
+        '';
+      };
+    };
+  };
+
+  config = mkMerge [
+    { services.cadvisor.storageDriverPasswordFile = mkIf (cfg.storageDriverPassword != "") (
+        mkDefault (toString (pkgs.writeTextFile {
+          name = "cadvisor-storage-driver-password";
+          text = cfg.storageDriverPassword;
+        }))
+      );
+    }
+
+    (mkIf cfg.enable {
+      systemd.services.cadvisor = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" "docker.service" "influxdb.service" ];
+
+        path = optionals config.boot.zfs.enabled [ pkgs.zfs ];
+
+        postStart = mkBefore ''
+          until ${pkgs.curl.bin}/bin/curl -s -o /dev/null 'http://${cfg.listenAddress}:${toString cfg.port}/containers/'; do
+            sleep 1;
+          done
+        '';
+
+        script = ''
+          exec ${pkgs.cadvisor}/bin/cadvisor \
+            -logtostderr=true \
+            -listen_ip="${cfg.listenAddress}" \
+            -port="${toString cfg.port}" \
+            ${escapeShellArgs cfg.extraOptions} \
+            ${optionalString (cfg.storageDriver != null) ''
+              -storage_driver "${cfg.storageDriver}" \
+              -storage_driver_host "${cfg.storageDriverHost}" \
+              -storage_driver_db "${cfg.storageDriverDb}" \
+              -storage_driver_user "${cfg.storageDriverUser}" \
+              -storage_driver_password "$(cat "${cfg.storageDriverPasswordFile}")" \
+              ${optionalString cfg.storageDriverSecure "-storage_driver_secure"}
+            ''}
+        '';
+
+        serviceConfig.TimeoutStartSec=300;
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/certspotter.md b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
new file mode 100644
index 000000000000..9bf6e1d946a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/certspotter.md
@@ -0,0 +1,74 @@
+# Cert Spotter {#module-services-certspotter}
+
+Cert Spotter is a tool for monitoring [Certificate Transparency](https://en.wikipedia.org/wiki/Certificate_Transparency)
+logs.
+
+## Service Configuration {#modules-services-certspotter-service-configuration}
+
+A basic config that notifies you of all certificate changes for your
+domain would look as follows:
+
+```nix
+services.certspotter = {
+  enable = true;
+  # replace example.org with your domain name
+  watchlist = [ ".example.org" ];
+  emailRecipients = [ "webmaster@example.org" ];
+};
+
+# Configure an SMTP client
+programs.msmtp.enable = true;
+# Or you can use any other module that provides sendmail, like
+# services.nullmailer, services.opensmtpd, services.postfix
+```
+
+In this case, the leading dot in `".example.org"` means that Cert
+Spotter should monitor not only `example.org`, but also all of its
+subdomains.
+
+## Operation {#modules-services-certspotter-operation}
+
+**By default, NixOS configures Cert Spotter to skip all certificates
+issued before its first launch**, because checking the entire
+Certificate Transparency logs requires downloading tens of terabytes of
+data. If you want to check the *entire* logs for previously issued
+certificates, you have to set `services.certspotter.startAtEnd` to
+`false` and remove all previously saved log state in
+`/var/lib/certspotter/logs`. The downloaded logs aren't saved, so if you
+add a new domain to the watchlist and want Cert Spotter to go through
+the logs again, you will have to remove `/var/lib/certspotter/logs`
+again.
+
+After catching up with the logs, Cert Spotter will start monitoring live
+logs. As of October 2023, it uses around **20 Mbps** of traffic on
+average.
+
+## Hooks {#modules-services-certspotter-hooks}
+
+Cert Spotter supports running custom hooks instead of (or in addition
+to) sending emails. Hooks are shell scripts that will be passed certain
+environment variables.
+
+To see hook documentation, see Cert Spotter's man pages:
+
+```ShellSession
+nix-shell -p certspotter --run 'man 8 certspotter-script'
+```
+
+For example, you can remove `emailRecipients` and send email
+notifications manually using the following hook:
+
+```nix
+services.certspotter.hooks = [
+  (pkgs.writeShellScript "certspotter-hook" ''
+    function print_email() {
+      echo "Subject: [certspotter] $SUMMARY"
+      echo "Mime-Version: 1.0"
+      echo "Content-Type: text/plain; charset=US-ASCII"
+      echo
+      cat "$TEXT_FILENAME"
+    }
+    print_email | ${config.services.certspotter.sendmailPath} -i webmaster@example.org
+  '')
+];
+```
diff --git a/nixpkgs/nixos/modules/services/monitoring/certspotter.nix b/nixpkgs/nixos/modules/services/monitoring/certspotter.nix
new file mode 100644
index 000000000000..aafa29daa872
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/certspotter.nix
@@ -0,0 +1,143 @@
+{ config
+, lib
+, pkgs
+, ... }:
+
+let
+  cfg = config.services.certspotter;
+
+  configDir = pkgs.linkFarm "certspotter-config" (
+    lib.toList {
+      name = "watchlist";
+      path = pkgs.writeText "certspotter-watchlist" (builtins.concatStringsSep "\n" cfg.watchlist);
+    }
+    ++ lib.optional (cfg.emailRecipients != [ ]) {
+      name = "email_recipients";
+      path = pkgs.writeText "certspotter-email_recipients" (builtins.concatStringsSep "\n" cfg.emailRecipients);
+    }
+    # always generate hooks dir when no emails are provided to allow running cert spotter with no hooks/emails
+    ++ lib.optional (cfg.emailRecipients == [ ] || cfg.hooks != [ ]) {
+      name = "hooks.d";
+      path = pkgs.linkFarm "certspotter-hooks" (lib.imap1 (i: path: {
+        inherit path;
+        name = "hook${toString i}";
+      }) cfg.hooks);
+    });
+in
+{
+  options.services.certspotter = {
+    enable = lib.mkEnableOption "Cert Spotter, a Certificate Transparency log monitor";
+
+    package = lib.mkPackageOptionMD pkgs "certspotter" { };
+
+    startAtEnd = lib.mkOption {
+      type = lib.types.bool;
+      description = ''
+        Whether to skip certificates issued before the first launch of Cert Spotter.
+        Setting this to `false` will cause Cert Spotter to download tens of terabytes of data.
+      '';
+      default = true;
+    };
+
+    sendmailPath = lib.mkOption {
+      type = with lib.types; nullOr path;
+      description = ''
+        Path to the `sendmail` binary. By default, the local sendmail wrapper is used
+        (see {option}`services.mail.sendmailSetuidWrapper`}).
+      '';
+      example = lib.literalExpression ''"''${pkgs.system-sendmail}/bin/sendmail"'';
+    };
+
+    watchlist = lib.mkOption {
+      type = with lib.types; listOf str;
+      description = "Domain names to watch. To monitor a domain with all subdomains, prefix its name with `.` (e.g. `.example.org`).";
+      default = [ ];
+      example = [ ".example.org" "another.example.com" ];
+    };
+
+    emailRecipients = lib.mkOption {
+      type = with lib.types; listOf str;
+      description = "A list of email addresses to send certificate updates to.";
+      default = [ ];
+    };
+
+    hooks = lib.mkOption {
+      type = with lib.types; listOf path;
+      description = ''
+        Scripts to run upon the detection of a new certificate. See `man 8 certspotter-script` or
+        [the GitHub page](https://github.com/SSLMate/certspotter/blob/${pkgs.certspotter.src.rev or "master"}/man/certspotter-script.md)
+        for more info.
+      '';
+      default = [ ];
+      example = lib.literalExpression ''
+        [
+          (pkgs.writeShellScript "certspotter-hook" '''
+            echo "Event summary: $SUMMARY."
+          ''')
+        ]
+      '';
+    };
+
+    extraFlags = lib.mkOption {
+      type = with lib.types; listOf str;
+      description = "Extra command-line arguments to pass to Cert Spotter";
+      example = [ "-no_save" ];
+      default = [ ];
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.emailRecipients != [ ]) -> (cfg.sendmailPath != null);
+        message = ''
+          You must configure the sendmail setuid wrapper (services.mail.sendmailSetuidWrapper)
+          or services.certspotter.sendmailPath
+        '';
+      }
+    ];
+
+    services.certspotter.sendmailPath = let
+      inherit (config.security) wrapperDir;
+      inherit (config.services.mail) sendmailSetuidWrapper;
+    in lib.mkMerge [
+      (lib.mkIf (sendmailSetuidWrapper != null) (lib.mkOptionDefault "${wrapperDir}/${sendmailSetuidWrapper.program}"))
+      (lib.mkIf (sendmailSetuidWrapper == null) (lib.mkOptionDefault null))
+    ];
+
+    users.users.certspotter = {
+      description = "Cert Spotter user";
+      group = "certspotter";
+      home = "/var/lib/certspotter";
+      isSystemUser = true;
+    };
+    users.groups.certspotter = { };
+
+    systemd.services.certspotter = {
+      description = "Cert Spotter - Certificate Transparency Monitor";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.CERTSPOTTER_CONFIG_DIR = configDir;
+      environment.SENDMAIL_PATH = if cfg.sendmailPath != null then cfg.sendmailPath else "/run/current-system/sw/bin/false";
+      script = ''
+        export CERTSPOTTER_STATE_DIR="$STATE_DIRECTORY"
+        cd "$CERTSPOTTER_STATE_DIR"
+        ${lib.optionalString cfg.startAtEnd ''
+          if [[ ! -d logs ]]; then
+            # Don't download certificates issued before the first launch
+            exec ${cfg.package}/bin/certspotter -start_at_end ${lib.escapeShellArgs cfg.extraFlags}
+          fi
+        ''}
+        exec ${cfg.package}/bin/certspotter ${lib.escapeShellArgs cfg.extraFlags}
+      '';
+      serviceConfig = {
+        User = "certspotter";
+        Group = "certspotter";
+        StateDirectory = "certspotter";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ chayleaf ];
+  meta.doc = ./certspotter.md;
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/cockpit.nix b/nixpkgs/nixos/modules/services/monitoring/cockpit.nix
new file mode 100644
index 000000000000..2947b4d80120
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/cockpit.nix
@@ -0,0 +1,231 @@
+{ pkgs, config, lib, ... }:
+
+let
+  cfg = config.services.cockpit;
+  inherit (lib) types mkEnableOption mkOption mkIf mdDoc literalMD mkPackageOptionMD;
+  settingsFormat = pkgs.formats.ini {};
+in {
+  options = {
+    services.cockpit = {
+      enable = mkEnableOption (mdDoc "Cockpit");
+
+      package = mkPackageOptionMD pkgs "Cockpit" {
+        default = [ "cockpit" ];
+      };
+
+      settings = lib.mkOption {
+        type = settingsFormat.type;
+
+        default = {};
+
+        description = mdDoc ''
+          Settings for cockpit that will be saved in /etc/cockpit/cockpit.conf.
+
+          See the [documentation](https://cockpit-project.org/guide/latest/cockpit.conf.5.html), that is also available with `man cockpit.conf.5` for details.
+        '';
+      };
+
+      port = mkOption {
+        description = mdDoc "Port where cockpit will listen.";
+        type = types.port;
+        default = 9090;
+      };
+
+      openFirewall = mkOption {
+        description = mdDoc "Open port for cockpit.";
+        type = types.bool;
+        default = false;
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+
+    # expose cockpit-bridge system-wide
+    environment.systemPackages = [ cfg.package ];
+
+    # allow cockpit to find its plugins
+    environment.pathsToLink = [ "/share/cockpit" ];
+
+    # generate cockpit settings
+    environment.etc."cockpit/cockpit.conf".source = settingsFormat.generate "cockpit.conf" cfg.settings;
+
+    security.pam.services.cockpit = {};
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    # units are in reverse sort order if you ls $out/lib/systemd/system
+    # all these units are basically verbatim translated from upstream
+
+    # Translation from $out/lib/systemd/system/systemd-cockpithttps.slice
+    systemd.slices.system-cockpithttps = {
+      description = "Resource limits for all cockpit-ws-https@.service instances";
+      sliceConfig = {
+        TasksMax = 200;
+        MemoryHigh = "75%";
+        MemoryMax = "90%";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-https@.socket
+    systemd.sockets."cockpit-wsinstance-https@" = {
+      unitConfig = {
+        Description = "Socket for Cockpit Web Service https instance %I";
+        BindsTo = [ "cockpit.service" "cockpit-wsinstance-https@%i.service" ];
+        # clean up the socket after the service exits, to prevent fd leak
+        # this also effectively prevents a DoS by starting arbitrarily many sockets, as
+        # the services are resource-limited by system-cockpithttps.slice
+        Documentation = "man:cockpit-ws(8)";
+      };
+      socketConfig = {
+        ListenStream = "/run/cockpit/wsinstance/https@%i.sock";
+        SocketUser = "root";
+        SocketMode = "0600";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-https@.service
+    systemd.services."cockpit-wsinstance-https@" = {
+      description = "Cockpit Web Service https instance %I";
+      bindsTo = [ "cockpit.service"];
+      path = [ cfg.package ];
+      documentation = [ "man:cockpit-ws(8)" ];
+      serviceConfig = {
+        Slice = "system-cockpithttps.slice";
+        ExecStart = "${cfg.package}/libexec/cockpit-ws --for-tls-proxy --port=0";
+        User = "root";
+        Group = "";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-http.socket
+    systemd.sockets.cockpit-wsinstance-http = {
+      unitConfig = {
+        Description = "Socket for Cockpit Web Service http instance";
+        BindsTo = "cockpit.service";
+        Documentation = "man:cockpit-ws(8)";
+      };
+      socketConfig = {
+        ListenStream = "/run/cockpit/wsinstance/http.sock";
+        SocketUser = "root";
+        SocketMode = "0600";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-https-factory.socket
+    systemd.sockets.cockpit-wsinstance-https-factory = {
+      unitConfig = {
+        Description = "Socket for Cockpit Web Service https instance factory";
+        BindsTo = "cockpit.service";
+        Documentation = "man:cockpit-ws(8)";
+      };
+      socketConfig = {
+        ListenStream = "/run/cockpit/wsinstance/https-factory.sock";
+        Accept = true;
+        SocketUser = "root";
+        SocketMode = "0600";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-https-factory@.service
+    systemd.services."cockpit-wsinstance-https-factory@" = {
+      description = "Cockpit Web Service https instance factory";
+      documentation = [ "man:cockpit-ws(8)" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/libexec/cockpit-wsinstance-factory";
+        User = "root";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-wsinstance-http.service
+    systemd.services."cockpit-wsinstance-http" = {
+      description = "Cockpit Web Service http instance";
+      bindsTo = [ "cockpit.service" ];
+      path = [ cfg.package ];
+      documentation = [ "man:cockpit-ws(8)" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/libexec/cockpit-ws --no-tls --port=0";
+        User = "root";
+        Group = "";
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit.socket
+    systemd.sockets."cockpit" = {
+      unitConfig = {
+        Description = "Cockpit Web Service Socket";
+        Documentation = "man:cockpit-ws(8)";
+        Wants = "cockpit-motd.service";
+      };
+      socketConfig = {
+        ListenStream = cfg.port;
+        ExecStartPost = [
+          "-${cfg.package}/share/cockpit/motd/update-motd \"\" localhost"
+          "-${pkgs.coreutils}/bin/ln -snf active.motd /run/cockpit/motd"
+        ];
+        ExecStopPost = "-${pkgs.coreutils}/bin/ln -snf inactive.motd /run/cockpit/motd";
+      };
+      wantedBy = [ "sockets.target" ];
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit.service
+    systemd.services."cockpit" = {
+      description = "Cockpit Web Service";
+      documentation = [ "man:cockpit-ws(8)" ];
+      restartIfChanged = true;
+      path = with pkgs; [ coreutils cfg.package ];
+      requires = [ "cockpit.socket" "cockpit-wsinstance-http.socket" "cockpit-wsinstance-https-factory.socket" ];
+      after = [ "cockpit-wsinstance-http.socket" "cockpit-wsinstance-https-factory.socket" ];
+      environment = {
+        G_MESSAGES_DEBUG = "cockpit-ws,cockpit-bridge";
+      };
+      serviceConfig = {
+        RuntimeDirectory="cockpit/tls";
+        ExecStartPre = [
+          # cockpit-tls runs in a more constrained environment, these + means that these commands
+          # will run with full privilege instead of inside that constrained environment
+          # See https://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart= for details
+          "+${cfg.package}/libexec/cockpit-certificate-ensure --for-cockpit-tls"
+        ];
+        ExecStart = "${cfg.package}/libexec/cockpit-tls";
+        User = "root";
+        Group = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        MemoryDenyWriteExecute = true;
+      };
+    };
+
+    # Translation from $out/lib/systemd/system/cockpit-motd.service
+    # This part basically implements a motd state machine:
+    # - If cockpit.socket is enabled then /run/cockpit/motd points to /run/cockpit/active.motd
+    # - If cockpit.socket is disabled then /run/cockpit/motd points to /run/cockpit/inactive.motd
+    # - As cockpit.socket is disabled by default, /run/cockpit/motd points to /run/cockpit/inactive.motd
+    # /run/cockpit/active.motd is generated dynamically by cockpit-motd.service
+    systemd.services."cockpit-motd" = {
+      path = with pkgs; [ nettools ];
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${cfg.package}/share/cockpit/motd/update-motd";
+      };
+      description = "Cockpit motd updater service";
+      documentation = [ "man:cockpit-ws(8)" ];
+      wants = [ "network.target" ];
+      after = [ "network.target" "cockpit.socket" ];
+    };
+
+    systemd.tmpfiles.rules = [ # From $out/lib/tmpfiles.d/cockpit-tmpfiles.conf
+      "C /run/cockpit/inactive.motd 0640 root root - ${cfg.package}/share/cockpit/motd/inactive.motd"
+      "f /run/cockpit/active.motd   0640 root root -"
+      "L+ /run/cockpit/motd - - - - inactive.motd"
+      "d /etc/cockpit/ws-certs.d 0600 root root 0"
+    ];
+  };
+
+  meta.maintainers = pkgs.cockpit.meta.maintainers;
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/collectd.nix
new file mode 100644
index 000000000000..5d525995c67a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/collectd.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.collectd;
+
+  baseDirLine = ''BaseDir "${cfg.dataDir}"'';
+  unvalidated_conf = pkgs.writeText "collectd-unvalidated.conf" cfg.extraConfig;
+
+  conf = if cfg.validateConfig then
+    pkgs.runCommand "collectd.conf" {} ''
+      echo testing ${unvalidated_conf}
+      cp ${unvalidated_conf} collectd.conf
+      # collectd -t fails if BaseDir does not exist.
+      substituteInPlace collectd.conf --replace ${lib.escapeShellArgs [ baseDirLine ]} 'BaseDir "."'
+      ${package}/bin/collectd -t -C collectd.conf
+      cp ${unvalidated_conf} $out
+    '' else unvalidated_conf;
+
+  package =
+    if cfg.buildMinimalPackage
+    then minimalPackage
+    else cfg.package;
+
+  minimalPackage = cfg.package.override {
+    enabledPlugins = [ "syslog" ] ++ builtins.attrNames cfg.plugins;
+  };
+
+in {
+  options.services.collectd = with types; {
+    enable = mkEnableOption (lib.mdDoc "collectd agent");
+
+    validateConfig = mkOption {
+      default = true;
+      description = lib.mdDoc ''
+        Validate the syntax of collectd configuration file at build time.
+        Disable this if you use the Include directive on files unavailable in
+        the build sandbox, or when cross-compiling.
+      '';
+      type = types.bool;
+    };
+
+    package = mkOption {
+      default = pkgs.collectd;
+      defaultText = literalExpression "pkgs.collectd";
+      description = lib.mdDoc ''
+        Which collectd package to use.
+      '';
+      type = types.package;
+    };
+
+    buildMinimalPackage = mkOption {
+      default = false;
+      description = lib.mdDoc ''
+        Build a minimal collectd package with only the configured `services.collectd.plugins`
+      '';
+      type = bool;
+    };
+
+    user = mkOption {
+      default = "collectd";
+      description = lib.mdDoc ''
+        User under which to run collectd.
+      '';
+      type = nullOr str;
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/collectd";
+      description = lib.mdDoc ''
+        Data directory for collectd agent.
+      '';
+      type = path;
+    };
+
+    autoLoadPlugin = mkOption {
+      default = false;
+      description = lib.mdDoc ''
+        Enable plugin autoloading.
+      '';
+      type = bool;
+    };
+
+    include = mkOption {
+      default = [];
+      description = lib.mdDoc ''
+        Additional paths to load config from.
+      '';
+      type = listOf str;
+    };
+
+    plugins = mkOption {
+      default = {};
+      example = { cpu = ""; memory = ""; network = "Server 192.168.1.1 25826"; };
+      description = lib.mdDoc ''
+        Attribute set of plugin names to plugin config segments
+      '';
+      type = attrsOf lines;
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      description = lib.mdDoc ''
+        Extra configuration for collectd. Use mkBefore to add lines before the
+        default config, and mkAfter to add them below.
+      '';
+      type = lines;
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    # 1200 is after the default (1000) but before mkAfter (1500).
+    services.collectd.extraConfig = lib.mkOrder 1200 ''
+      ${baseDirLine}
+      AutoLoadPlugin ${boolToString cfg.autoLoadPlugin}
+      Hostname "${config.networking.hostName}"
+
+      LoadPlugin syslog
+      <Plugin "syslog">
+        LogLevel "info"
+        NotifyLevel "OKAY"
+      </Plugin>
+
+      ${concatStrings (mapAttrsToList (plugin: pluginConfig: ''
+        LoadPlugin ${plugin}
+        <Plugin "${plugin}">
+        ${pluginConfig}
+        </Plugin>
+      '') cfg.plugins)}
+
+      ${concatMapStrings (f: ''
+        Include "${f}"
+      '') cfg.include}
+    '';
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} - - -"
+    ];
+
+    systemd.services.collectd = {
+      description = "Collectd Monitoring Agent";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${package}/sbin/collectd -C ${conf} -f";
+        User = cfg.user;
+        Restart = "on-failure";
+        RestartSec = 3;
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "collectd") {
+      collectd = {
+        isSystemUser = true;
+        group = "collectd";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.user == "collectd") {
+      collectd = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix
new file mode 100644
index 000000000000..fd420b0c8a06
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/das_watchdog.nix
@@ -0,0 +1,34 @@
+# A general watchdog for the linux operating system that should run in the
+# background at all times to ensure a realtime process won't hang the machine
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) das_watchdog;
+
+in {
+  ###### interface
+
+  options = {
+    services.das_watchdog.enable = mkEnableOption (lib.mdDoc "realtime watchdog");
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.das_watchdog.enable {
+    environment.systemPackages = [ das_watchdog ];
+    systemd.services.das_watchdog = {
+      description = "Watchdog to ensure a realtime process won't hang the machine";
+      after = [ "multi-user.target" "sound.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "root";
+        Type = "simple";
+        ExecStart = "${das_watchdog}/bin/das_watchdog";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix
new file mode 100644
index 000000000000..1736b0c088a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/datadog-agent.nix
@@ -0,0 +1,302 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.datadog-agent;
+
+  ddConf = {
+    skip_ssl_validation = false;
+    confd_path          = "/etc/datadog-agent/conf.d";
+    additional_checksd  = "/etc/datadog-agent/checks.d";
+    use_dogstatsd       = true;
+  }
+  // optionalAttrs (cfg.logLevel != null) { log_level = cfg.logLevel; }
+  // optionalAttrs (cfg.hostname != null) { inherit (cfg) hostname; }
+  // optionalAttrs (cfg.ddUrl != null) { dd_url = cfg.ddUrl; }
+  // optionalAttrs (cfg.site != null) { site = cfg.site; }
+  // optionalAttrs (cfg.tags != null ) { tags = concatStringsSep ", " cfg.tags; }
+  // optionalAttrs (cfg.enableLiveProcessCollection) { process_config = { enabled = "true"; }; }
+  // optionalAttrs (cfg.enableTraceAgent) { apm_config = { enabled = true; }; }
+  // cfg.extraConfig;
+
+  # Generate Datadog configuration files for each configured checks.
+  # This works because check configurations have predictable paths,
+  # and because JSON is a valid subset of YAML.
+  makeCheckConfigs = entries: mapAttrs' (name: conf: {
+    name = "datadog-agent/conf.d/${name}.d/conf.yaml";
+    value.source = pkgs.writeText "${name}-check-conf.yaml" (builtins.toJSON conf);
+  }) entries;
+
+  defaultChecks = {
+    disk = cfg.diskCheck;
+    network = cfg.networkCheck;
+  };
+
+  # Assemble all check configurations and the top-level agent
+  # configuration.
+  etcfiles = with pkgs; with builtins;
+  { "datadog-agent/datadog.yaml" = {
+      source = writeText "datadog.yaml" (toJSON ddConf);
+    };
+  } // makeCheckConfigs (cfg.checks // defaultChecks);
+
+  # Apply the configured extraIntegrations to the provided agent
+  # package. See the documentation of `dd-agent/integrations-core.nix`
+  # for detailed information on this.
+  datadogPkg = cfg.package.override {
+    pythonPackages = pkgs.datadog-integrations-core cfg.extraIntegrations;
+  };
+in {
+  options.services.datadog-agent = {
+    enable = mkEnableOption (lib.mdDoc "Datadog-agent v7 monitoring service");
+
+    package = mkOption {
+      default = pkgs.datadog-agent;
+      defaultText = literalExpression "pkgs.datadog-agent";
+      description = lib.mdDoc ''
+        Which DataDog v7 agent package to use. Note that the provided
+        package is expected to have an overridable `pythonPackages`-attribute
+        which configures the Python environment with the Datadog
+        checks.
+      '';
+      type = types.package;
+    };
+
+    apiKeyFile = mkOption {
+      description = lib.mdDoc ''
+        Path to a file containing the Datadog API key to associate the
+        agent with your account.
+      '';
+      example = "/run/keys/datadog_api_key";
+      type = types.path;
+    };
+
+    ddUrl = mkOption {
+      description = lib.mdDoc ''
+        Custom dd_url to configure the agent with. Useful if traffic to datadog
+        needs to go through a proxy.
+        Don't use this to point to another datadog site (EU) - use site instead.
+      '';
+      default = null;
+      example = "http://haproxy.example.com:3834";
+      type = types.nullOr types.str;
+    };
+
+    site = mkOption {
+      description = lib.mdDoc ''
+        The datadog site to point the agent towards.
+        Set to datadoghq.eu to point it to their EU site.
+      '';
+      default = null;
+      example = "datadoghq.eu";
+      type = types.nullOr types.str;
+    };
+
+    tags = mkOption {
+      description = lib.mdDoc "The tags to mark this Datadog agent";
+      example = [ "test" "service" ];
+      default = null;
+      type = types.nullOr (types.listOf types.str);
+    };
+
+    hostname = mkOption {
+      description = lib.mdDoc "The hostname to show in the Datadog dashboard (optional)";
+      default = null;
+      example = "mymachine.mydomain";
+      type = types.nullOr types.str;
+    };
+
+    logLevel = mkOption {
+      description = lib.mdDoc "Logging verbosity.";
+      default = null;
+      type = types.nullOr (types.enum ["DEBUG" "INFO" "WARN" "ERROR"]);
+    };
+
+    extraIntegrations = mkOption {
+      default = {};
+      type    = types.attrs;
+
+      description = lib.mdDoc ''
+        Extra integrations from the Datadog core-integrations
+        repository that should be built and included.
+
+        By default the included integrations are disk, mongo, network,
+        nginx and postgres.
+
+        To include additional integrations the name of the derivation
+        and a function to filter its dependencies from the Python
+        package set must be provided.
+      '';
+
+      example = literalExpression ''
+        {
+          ntp = pythonPackages: [ pythonPackages.ntplib ];
+        }
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = {};
+      type = types.attrs;
+      description = lib.mdDoc ''
+        Extra configuration options that will be merged into the
+        main config file {file}`datadog.yaml`.
+      '';
+     };
+
+    enableLiveProcessCollection = mkOption {
+      description = lib.mdDoc ''
+        Whether to enable the live process collection agent.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    processAgentPackage = mkOption {
+      default = pkgs.datadog-process-agent;
+      defaultText = literalExpression "pkgs.datadog-process-agent";
+      description = lib.mdDoc ''
+        Which DataDog v7 agent package to use. Note that the provided
+        package is expected to have an overridable `pythonPackages`-attribute
+        which configures the Python environment with the Datadog
+        checks.
+      '';
+      type = types.package;
+    };
+
+    enableTraceAgent = mkOption {
+      description = lib.mdDoc ''
+        Whether to enable the trace agent.
+      '';
+      default = false;
+      type = types.bool;
+    };
+
+    checks = mkOption {
+      description = lib.mdDoc ''
+        Configuration for all Datadog checks. Keys of this attribute
+        set will be used as the name of the check to create the
+        appropriate configuration in `conf.d/$check.d/conf.yaml`.
+
+        The configuration is converted into JSON from the plain Nix
+        language configuration, meaning that you should write
+        configuration adhering to Datadog's documentation - but in Nix
+        language.
+
+        Refer to the implementation of this module (specifically the
+        definition of `defaultChecks`) for an example.
+
+        Note: The 'disk' and 'network' check are configured in
+        separate options because they exist by default. Attempting to
+        override their configuration here will have no effect.
+      '';
+
+      example = {
+        http_check = {
+          init_config = null; # sic!
+          instances = [
+            {
+              name = "some-service";
+              url = "http://localhost:1337/healthz";
+              tags = [ "some-service" ];
+            }
+          ];
+        };
+      };
+
+      default = {};
+
+      # sic! The structure of the values is up to the check, so we can
+      # not usefully constrain the type further.
+      type = with types; attrsOf attrs;
+    };
+
+    diskCheck = mkOption {
+      description = lib.mdDoc "Disk check config";
+      type = types.attrs;
+      default = {
+        init_config = {};
+        instances = [ { use_mount = "false"; } ];
+      };
+    };
+
+    networkCheck = mkOption {
+      description = lib.mdDoc "Network check config";
+      type = types.attrs;
+      default = {
+        init_config = {};
+        # Network check only supports one configured instance
+        instances = [ { collect_connection_state = false;
+          excluded_interfaces = [ "lo" "lo0" ]; } ];
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
+
+    users.users.datadog = {
+      description = "Datadog Agent User";
+      uid = config.ids.uids.datadog;
+      group = "datadog";
+      home = "/var/log/datadog/";
+      createHome = true;
+    };
+
+    users.groups.datadog.gid = config.ids.gids.datadog;
+
+    systemd.services = let
+      makeService = attrs: recursiveUpdate {
+        path = [ datadogPkg pkgs.sysstat pkgs.procps pkgs.iproute2 ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "datadog";
+          Group = "datadog";
+          Restart = "always";
+          RestartSec = 2;
+        };
+        restartTriggers = [ datadogPkg ] ++  map (x: x.source) (attrValues etcfiles);
+      } attrs;
+    in {
+      datadog-agent = makeService {
+        description = "Datadog agent monitor";
+        preStart = ''
+          chown -R datadog: /etc/datadog-agent
+          rm -f /etc/datadog-agent/auth_token
+        '';
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          exec ${datadogPkg}/bin/agent run -c /etc/datadog-agent/datadog.yaml
+        '';
+        serviceConfig.PermissionsStartOnly = true;
+      };
+
+      dd-jmxfetch = lib.mkIf (lib.hasAttr "jmx" cfg.checks) (makeService {
+        description = "Datadog JMX Fetcher";
+        path = [ datadogPkg pkgs.python pkgs.sysstat pkgs.procps pkgs.jdk ];
+        serviceConfig.ExecStart = "${datadogPkg}/bin/dd-jmxfetch";
+      });
+
+      datadog-process-agent = lib.mkIf cfg.enableLiveProcessCollection (makeService {
+        description = "Datadog Live Process Agent";
+        path = [ ];
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          ${cfg.processAgentPackage}/bin/process-agent --config /etc/datadog-agent/datadog.yaml
+        '';
+      });
+
+      datadog-trace-agent = lib.mkIf cfg.enableTraceAgent (makeService {
+        description = "Datadog Trace Agent";
+        path = [ ];
+        script = ''
+          export DD_API_KEY=$(head -n 1 ${cfg.apiKeyFile})
+          ${datadogPkg}/bin/trace-agent -config /etc/datadog-agent/datadog.yaml
+        '';
+      });
+
+    };
+
+    environment.etc = etcfiles;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/do-agent.nix b/nixpkgs/nixos/modules/services/monitoring/do-agent.nix
new file mode 100644
index 000000000000..c1788c640c23
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/do-agent.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.do-agent;
+
+in
+{
+  options.services.do-agent = {
+    enable = mkEnableOption (lib.mdDoc "do-agent, the DigitalOcean droplet metrics agent");
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ pkgs.do-agent ];
+
+    systemd.services.do-agent = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = [ "" "${pkgs.do-agent}/bin/do-agent --syslog" ];
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix
new file mode 100644
index 000000000000..7b28e8de1229
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/fusion-inventory.nix
@@ -0,0 +1,63 @@
+# Fusion Inventory daemon.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fusionInventory;
+
+  configFile = pkgs.writeText "fusion_inventory.conf" ''
+    server = ${concatStringsSep ", " cfg.servers}
+
+    logger = stderr
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.fusionInventory = {
+
+      enable = mkEnableOption (lib.mdDoc "Fusion Inventory Agent");
+
+      servers = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The urls of the OCS/GLPI servers to connect to.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Configuration that is injected verbatim into the configuration file.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.fusion-inventory = {
+      description = "FusionInventory user";
+      isSystemUser = true;
+    };
+
+    systemd.services.fusion-inventory = {
+      description = "Fusion Inventory Agent";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.fusionInventory}/bin/fusioninventory-agent --conf-file=${configFile} --daemon --no-fork";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/goss.md b/nixpkgs/nixos/modules/services/monitoring/goss.md
new file mode 100644
index 000000000000..1e636aa3bdf3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/goss.md
@@ -0,0 +1,44 @@
+# Goss {#module-services-goss}
+
+[goss](https://goss.rocks/) is a YAML based serverspec alternative tool
+for validating a server's configuration.
+
+## Basic Usage {#module-services-goss-basic-usage}
+
+A minimal configuration looks like this:
+
+```
+{
+  services.goss = {
+    enable = true;
+
+    environment = {
+      GOSS_FMT = "json";
+      GOSS_LOGLEVEL = "TRACE";
+    };
+
+    settings = {
+      addr."tcp://localhost:8080" = {
+        reachable = true;
+        local-address = "127.0.0.1";
+      };
+      command."check-goss-version" = {
+        exec = "${lib.getExe pkgs.goss} --version";
+        exit-status = 0;
+      };
+      dns.localhost.resolvable = true;
+      file."/nix" = {
+        filetype = "directory";
+        exists = true;
+      };
+      group.root.exists = true;
+      kernel-param."kernel.ostype".value = "Linux";
+      service.goss = {
+        enabled = true;
+        running = true;
+      };
+      user.root.exists = true;
+    };
+  };
+}
+```
diff --git a/nixpkgs/nixos/modules/services/monitoring/goss.nix b/nixpkgs/nixos/modules/services/monitoring/goss.nix
new file mode 100644
index 000000000000..64a8dad0703e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/goss.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.goss;
+
+  settingsFormat = pkgs.formats.yaml { };
+  configFile = settingsFormat.generate "goss.yaml" cfg.settings;
+
+in {
+  meta = {
+    doc = ./goss.md;
+    maintainers = [ lib.maintainers.anthonyroussel ];
+  };
+
+  options = {
+    services.goss = {
+      enable = lib.mkEnableOption (lib.mdDoc "Goss daemon");
+
+      package = lib.mkPackageOptionMD pkgs "goss" { };
+
+      environment = lib.mkOption {
+        type = lib.types.attrsOf lib.types.str;
+        default = { };
+        example = {
+          GOSS_FMT = "json";
+          GOSS_LOGLEVEL = "FATAL";
+          GOSS_LISTEN = ":8080";
+        };
+        description = lib.mdDoc ''
+          Environment variables to set for the goss service.
+
+          See <https://github.com/goss-org/goss/blob/master/docs/manual.md>
+        '';
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule { freeformType = settingsFormat.type; };
+        default = { };
+        example = {
+          addr."tcp://localhost:8080" = {
+            reachable = true;
+            local-address = "127.0.0.1";
+          };
+          service.goss = {
+            enabled = true;
+            running = true;
+          };
+        };
+        description = lib.mdDoc ''
+          The global options in `config` file in yaml format.
+
+          Refer to <https://github.com/goss-org/goss/blob/master/docs/goss-json-schema.yaml> for schema.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.goss = {
+      description = "Goss - Quick and Easy server validation";
+      unitConfig.Documentation = "https://github.com/goss-org/goss/blob/master/docs/manual.md";
+
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+
+      environment = {
+        GOSS_FILE = configFile;
+      } // cfg.environment;
+
+      reloadTriggers = [ configFile ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStart = "${cfg.package}/bin/goss serve";
+        Group = "goss";
+        Restart = "on-failure";
+        RestartSec = 5;
+        User = "goss";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana-agent.nix b/nixpkgs/nixos/modules/services/monitoring/grafana-agent.nix
new file mode 100644
index 000000000000..13604ff77c68
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana-agent.nix
@@ -0,0 +1,163 @@
+{ lib, pkgs, config, generators, ... }:
+with lib;
+let
+  cfg = config.services.grafana-agent;
+  settingsFormat = pkgs.formats.yaml { };
+  configFile = settingsFormat.generate "grafana-agent.yaml" cfg.settings;
+in
+{
+  meta = {
+    maintainers = with maintainers; [ flokli zimbatm ];
+  };
+
+  options.services.grafana-agent = {
+    enable = mkEnableOption (lib.mdDoc "grafana-agent");
+
+    package = mkPackageOptionMD pkgs "grafana-agent" { };
+
+    credentials = mkOption {
+      description = lib.mdDoc ''
+        Credentials to load at service startup. Keys that are UPPER_SNAKE will be loaded as env vars. Values are absolute paths to the credentials.
+      '';
+      type = types.attrsOf types.str;
+      default = { };
+
+      example = {
+        logs_remote_write_password = "/run/keys/grafana_agent_logs_remote_write_password";
+        LOGS_REMOTE_WRITE_URL = "/run/keys/grafana_agent_logs_remote_write_url";
+        LOGS_REMOTE_WRITE_USERNAME = "/run/keys/grafana_agent_logs_remote_write_username";
+        metrics_remote_write_password = "/run/keys/grafana_agent_metrics_remote_write_password";
+        METRICS_REMOTE_WRITE_URL = "/run/keys/grafana_agent_metrics_remote_write_url";
+        METRICS_REMOTE_WRITE_USERNAME = "/run/keys/grafana_agent_metrics_remote_write_username";
+      };
+    };
+
+    extraFlags = mkOption {
+      type = with types; listOf str;
+      default = [ ];
+      example = [ "-enable-features=integrations-next" "-disable-reporting" ];
+      description = lib.mdDoc ''
+        Extra command-line flags passed to {command}`grafana-agent`.
+
+        See <https://grafana.com/docs/agent/latest/static/configuration/flags/>
+      '';
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Configuration for {command}`grafana-agent`.
+
+        See <https://grafana.com/docs/agent/latest/configuration/>
+      '';
+
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+      };
+
+      default = { };
+      defaultText = lib.literalExpression ''
+        {
+          metrics = {
+            wal_directory = "\''${STATE_DIRECTORY}";
+            global.scrape_interval = "5s";
+          };
+          integrations = {
+            agent.enabled = true;
+            agent.scrape_integration = true;
+            node_exporter.enabled = true;
+          };
+        }
+      '';
+      example = {
+        metrics.global.remote_write = [{
+          url = "\${METRICS_REMOTE_WRITE_URL}";
+          basic_auth.username = "\${METRICS_REMOTE_WRITE_USERNAME}";
+          basic_auth.password_file = "\${CREDENTIALS_DIRECTORY}/metrics_remote_write_password";
+        }];
+        logs.configs = [{
+          name = "default";
+          scrape_configs = [
+            {
+              job_name = "journal";
+              journal = {
+                max_age = "12h";
+                labels.job = "systemd-journal";
+              };
+              relabel_configs = [
+                {
+                  source_labels = [ "__journal__systemd_unit" ];
+                  target_label = "systemd_unit";
+                }
+                {
+                  source_labels = [ "__journal__hostname" ];
+                  target_label = "nodename";
+                }
+                {
+                  source_labels = [ "__journal_syslog_identifier" ];
+                  target_label = "syslog_identifier";
+                }
+              ];
+            }
+          ];
+          positions.filename = "\${STATE_DIRECTORY}/loki_positions.yaml";
+          clients = [{
+            url = "\${LOGS_REMOTE_WRITE_URL}";
+            basic_auth.username = "\${LOGS_REMOTE_WRITE_USERNAME}";
+            basic_auth.password_file = "\${CREDENTIALS_DIRECTORY}/logs_remote_write_password";
+          }];
+        }];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.grafana-agent.settings = {
+      # keep this in sync with config.services.grafana-agent.settings.defaultText.
+      metrics = {
+        wal_directory = mkDefault "\${STATE_DIRECTORY}";
+        global.scrape_interval = mkDefault "5s";
+      };
+      integrations = {
+        agent.enabled = mkDefault true;
+        agent.scrape_integration = mkDefault true;
+        node_exporter.enabled = mkDefault true;
+      };
+    };
+
+    systemd.services.grafana-agent = {
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        set -euo pipefail
+        shopt -u nullglob
+
+        # Load all credentials into env if they are in UPPER_SNAKE form.
+        if [[ -n "''${CREDENTIALS_DIRECTORY:-}" ]]; then
+          for file in "$CREDENTIALS_DIRECTORY"/*; do
+            key=$(basename "$file")
+            if [[ $key =~ ^[A-Z0-9_]+$ ]]; then
+              echo "Environ $key"
+              export "$key=$(< "$file")"
+            fi
+          done
+        fi
+
+        # We can't use Environment=HOSTNAME=%H, as it doesn't include the domain part.
+        export HOSTNAME=$(< /proc/sys/kernel/hostname)
+
+        exec ${lib.getExe cfg.package} -config.expand-env -config.file ${configFile} ${escapeShellArgs cfg.extraFlags}
+      '';
+      serviceConfig = {
+        Restart = "always";
+        DynamicUser = true;
+        RestartSec = 2;
+        SupplementaryGroups = [
+          # allow to read the systemd journal for loki log forwarding
+          "systemd-journal"
+        ];
+        StateDirectory = "grafana-agent";
+        LoadCredential = lib.mapAttrsToList (key: value: "${key}:${value}") cfg.credentials;
+        Type = "simple";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana-image-renderer.nix b/nixpkgs/nixos/modules/services/monitoring/grafana-image-renderer.nix
new file mode 100644
index 000000000000..afe9eb4d7b95
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana-image-renderer.nix
@@ -0,0 +1,148 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grafana-image-renderer;
+
+  format = pkgs.formats.json { };
+
+  configFile = format.generate "grafana-image-renderer-config.json" cfg.settings;
+in {
+  options.services.grafana-image-renderer = {
+    enable = mkEnableOption (lib.mdDoc "grafana-image-renderer");
+
+    chromium = mkOption {
+      type = types.package;
+      description = lib.mdDoc ''
+        The chromium to use for image rendering.
+      '';
+    };
+
+    verbose = mkEnableOption (lib.mdDoc "verbosity for the service");
+
+    provisionGrafana = mkEnableOption (lib.mdDoc "Grafana configuration for grafana-image-renderer");
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = format.type;
+
+        options = {
+          service = {
+            port = mkOption {
+              type = types.port;
+              default = 8081;
+              description = lib.mdDoc ''
+                The TCP port to use for the rendering server.
+              '';
+            };
+            logging.level = mkOption {
+              type = types.enum [ "error" "warning" "info" "debug" ];
+              default = "info";
+              description = lib.mdDoc ''
+                The log-level of the {file}`grafana-image-renderer.service`-unit.
+              '';
+            };
+          };
+          rendering = {
+            width = mkOption {
+              default = 1000;
+              type = types.ints.positive;
+              description = lib.mdDoc ''
+                Width of the PNG used to display the alerting graph.
+              '';
+            };
+            height = mkOption {
+              default = 500;
+              type = types.ints.positive;
+              description = lib.mdDoc ''
+                Height of the PNG used to display the alerting graph.
+              '';
+            };
+            mode = mkOption {
+              default = "default";
+              type = types.enum [ "default" "reusable" "clustered" ];
+              description = lib.mdDoc ''
+                Rendering mode of `grafana-image-renderer`:
+
+                - `default:` Creates on browser-instance
+                  per rendering request.
+                - `reusable:` One browser instance
+                  will be started and reused for each rendering request.
+                - `clustered:` allows to precisely
+                  configure how many browser-instances are supposed to be used. The values
+                  for that mode can be declared in `rendering.clustering`.
+              '';
+            };
+            args = mkOption {
+              type = types.listOf types.str;
+              default = [ "--no-sandbox" ];
+              description = lib.mdDoc ''
+                List of CLI flags passed to `chromium`.
+              '';
+            };
+          };
+        };
+      };
+
+      default = {};
+
+      description = lib.mdDoc ''
+        Configuration attributes for `grafana-image-renderer`.
+
+        See <https://github.com/grafana/grafana-image-renderer/blob/ce1f81438e5f69c7fd7c73ce08bab624c4c92e25/default.json>
+        for supported values.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.provisionGrafana -> config.services.grafana.enable;
+        message = ''
+          To provision a Grafana instance to use grafana-image-renderer,
+          `services.grafana.enable` must be set to `true`!
+        '';
+      }
+    ];
+
+    services.grafana.settings.rendering = mkIf cfg.provisionGrafana {
+      server_url = "http://localhost:${toString cfg.settings.service.port}/render";
+      callback_url = "http://${config.services.grafana.settings.server.http_addr}:${toString config.services.grafana.settings.server.http_port}";
+    };
+
+    services.grafana-image-renderer.chromium = mkDefault pkgs.chromium;
+
+    services.grafana-image-renderer.settings = {
+      rendering = mapAttrs (const mkDefault) {
+        chromeBin = "${cfg.chromium}/bin/chromium";
+        verboseLogging = cfg.verbose;
+        timezone = config.time.timeZone;
+      };
+
+      service = {
+        logging.level = mkIf cfg.verbose (mkDefault "debug");
+        metrics.enabled = mkDefault false;
+      };
+    };
+
+    systemd.services.grafana-image-renderer = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = " A Grafana backend plugin that handles rendering of panels & dashboards to PNGs using headless browser (Chromium/Chrome)";
+
+      environment = {
+        PUPPETEER_SKIP_CHROMIUM_DOWNLOAD = "true";
+      };
+
+      serviceConfig = {
+        DynamicUser = true;
+        PrivateTmp = true;
+        ExecStart = "${pkgs.grafana-image-renderer}/bin/grafana-image-renderer server --config=${configFile}";
+        Restart = "always";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ ma27 ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix
new file mode 100644
index 000000000000..eac304d63aa1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana-reporter.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grafana_reporter;
+
+in {
+  options.services.grafana_reporter = {
+    enable = mkEnableOption (lib.mdDoc "grafana_reporter");
+
+    grafana = {
+      protocol = mkOption {
+        description = lib.mdDoc "Grafana protocol.";
+        default = "http";
+        type = types.enum ["http" "https"];
+      };
+      addr = mkOption {
+        description = lib.mdDoc "Grafana address.";
+        default = "127.0.0.1";
+        type = types.str;
+      };
+      port = mkOption {
+        description = lib.mdDoc "Grafana port.";
+        default = 3000;
+        type = types.port;
+      };
+
+    };
+    addr = mkOption {
+      description = lib.mdDoc "Listening address.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Listening port.";
+      default = 8686;
+      type = types.port;
+    };
+
+    templateDir = mkOption {
+      description = lib.mdDoc "Optional template directory to use custom tex templates";
+      default = pkgs.grafana_reporter;
+      defaultText = literalExpression "pkgs.grafana_reporter";
+      type = types.either types.str types.path;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.grafana_reporter = {
+      description = "Grafana Reporter Service Daemon";
+      wantedBy = ["multi-user.target"];
+      after = ["network.target"];
+      serviceConfig = let
+        args = lib.concatStringsSep " " [
+          "-proto ${cfg.grafana.protocol}://"
+          "-ip ${cfg.grafana.addr}:${toString cfg.grafana.port}"
+          "-port :${toString cfg.port}"
+          "-templates ${cfg.templateDir}"
+        ];
+      in {
+        ExecStart = "${pkgs.grafana_reporter}/bin/grafana-reporter ${args}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/grafana.nix b/nixpkgs/nixos/modules/services/monitoring/grafana.nix
new file mode 100644
index 000000000000..f84d677f14d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/grafana.nix
@@ -0,0 +1,1893 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grafana;
+  opt = options.services.grafana;
+  provisioningSettingsFormat = pkgs.formats.yaml { };
+  declarativePlugins = pkgs.linkFarm "grafana-plugins" (builtins.map (pkg: { name = pkg.pname; path = pkg; }) cfg.declarativePlugins);
+  useMysql = cfg.settings.database.type == "mysql";
+  usePostgresql = cfg.settings.database.type == "postgres";
+
+  # Prefer using the values from the default config file[0] directly. This way,
+  # people reading the NixOS manual can see them without cross-referencing the
+  # official documentation.
+  #
+  # However, if there is no default entry or if the setting is optional, use
+  # `null` as the default value. It will be turned into the empty string.
+  #
+  # If a setting is a list, always allow setting it as a plain string as well.
+  #
+  # [0]: https://github.com/grafana/grafana/blob/main/conf/defaults.ini
+  settingsFormatIni = pkgs.formats.ini {
+    listToValue = concatMapStringsSep " " (generators.mkValueStringDefault { });
+    mkKeyValue = generators.mkKeyValueDefault
+      {
+        mkValueString = v:
+          if v == null then ""
+          else generators.mkValueStringDefault { } v;
+      }
+      "=";
+  };
+  configFile = settingsFormatIni.generate "config.ini" cfg.settings;
+
+  mkProvisionCfg = name: attr: provisionCfg:
+    if provisionCfg.path != null
+    then provisionCfg.path
+    else
+      provisioningSettingsFormat.generate "${name}.yaml"
+        (if provisionCfg.settings != null
+        then provisionCfg.settings
+        else {
+          apiVersion = 1;
+          ${attr} = [ ];
+        });
+
+  datasourceFileOrDir = mkProvisionCfg "datasource" "datasources" cfg.provision.datasources;
+  dashboardFileOrDir = mkProvisionCfg "dashboard" "providers" cfg.provision.dashboards;
+
+  notifierConfiguration = {
+    apiVersion = 1;
+    notifiers = cfg.provision.notifiers;
+  };
+
+  notifierFileOrDir = pkgs.writeText "notifier.yaml" (builtins.toJSON notifierConfiguration);
+
+  generateAlertingProvisioningYaml = x:
+    if (cfg.provision.alerting."${x}".path == null)
+    then provisioningSettingsFormat.generate "${x}.yaml" cfg.provision.alerting."${x}".settings
+    else cfg.provision.alerting."${x}".path;
+  rulesFileOrDir = generateAlertingProvisioningYaml "rules";
+  contactPointsFileOrDir = generateAlertingProvisioningYaml "contactPoints";
+  policiesFileOrDir = generateAlertingProvisioningYaml "policies";
+  templatesFileOrDir = generateAlertingProvisioningYaml "templates";
+  muteTimingsFileOrDir = generateAlertingProvisioningYaml "muteTimings";
+
+  ln = { src, dir, filename }: ''
+    if [[ -d "${src}" ]]; then
+      pushd $out/${dir} &>/dev/null
+        lndir "${src}"
+      popd &>/dev/null
+    else
+      ln -sf ${src} $out/${dir}/${filename}.yaml
+    fi
+  '';
+  provisionConfDir = pkgs.runCommand "grafana-provisioning" { nativeBuildInputs = [ pkgs.xorg.lndir ]; } ''
+    mkdir -p $out/{datasources,dashboards,notifiers,alerting}
+    ${ln { src = datasourceFileOrDir;    dir = "datasources"; filename = "datasource"; }}
+    ${ln { src = dashboardFileOrDir;     dir = "dashboards";  filename = "dashboard"; }}
+    ${ln { src = notifierFileOrDir;      dir = "notifiers";   filename = "notifier"; }}
+    ${ln { src = rulesFileOrDir;         dir = "alerting";    filename = "rules"; }}
+    ${ln { src = contactPointsFileOrDir; dir = "alerting";    filename = "contactPoints"; }}
+    ${ln { src = policiesFileOrDir;      dir = "alerting";    filename = "policies"; }}
+    ${ln { src = templatesFileOrDir;     dir = "alerting";    filename = "templates"; }}
+    ${ln { src = muteTimingsFileOrDir;   dir = "alerting";    filename = "muteTimings"; }}
+  '';
+
+  # Get a submodule without any embedded metadata:
+  _filter = x: filterAttrs (k: v: k != "_module") x;
+
+  # https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
+  grafanaTypes.datasourceConfig = types.submodule {
+    freeformType = provisioningSettingsFormat.type;
+
+    options = {
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Name of the datasource. Required.";
+      };
+      type = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Datasource type. Required.";
+      };
+      access = mkOption {
+        type = types.enum [ "proxy" "direct" ];
+        default = "proxy";
+        description = lib.mdDoc "Access mode. proxy or direct (Server or Browser in the UI). Required.";
+      };
+      uid = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically.";
+      };
+      url = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Url of the datasource.";
+      };
+      editable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Allow users to edit datasources from the UI.";
+      };
+      jsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc "Extra data for datasource plugins.";
+      };
+      secureJsonData = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc ''
+          Datasource specific secure configuration. Please note that the contents of this option
+          will end up in a world-readable Nix store. Use the file provider
+          pointing at a reasonably secured file in the local filesystem
+          to work around that. Look at the documentation for details:
+          <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+        '';
+      };
+    };
+  };
+
+  # https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards
+  grafanaTypes.dashboardConfig = types.submodule {
+    freeformType = provisioningSettingsFormat.type;
+
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = "default";
+        description = lib.mdDoc "A unique provider name.";
+      };
+      type = mkOption {
+        type = types.str;
+        default = "file";
+        description = lib.mdDoc "Dashboard provider type.";
+      };
+      options.path = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path grafana will watch for dashboards. Required when using the 'file' type.";
+      };
+    };
+  };
+
+  grafanaTypes.notifierConfig = types.submodule {
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = "default";
+        description = lib.mdDoc "Notifier name.";
+      };
+      type = mkOption {
+        type = types.enum [ "dingding" "discord" "email" "googlechat" "hipchat" "kafka" "line" "teams" "opsgenie" "pagerduty" "prometheus-alertmanager" "pushover" "sensu" "sensugo" "slack" "telegram" "threema" "victorops" "webhook" ];
+        description = lib.mdDoc "Notifier type.";
+      };
+      uid = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Unique notifier identifier.";
+      };
+      org_id = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "Organization ID.";
+      };
+      org_name = mkOption {
+        type = types.str;
+        default = "Main Org.";
+        description = lib.mdDoc "Organization name.";
+      };
+      is_default = mkOption {
+        type = types.bool;
+        description = lib.mdDoc "Is the default notifier.";
+        default = false;
+      };
+      send_reminder = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Should the notifier be sent reminder notifications while alerts continue to fire.";
+      };
+      frequency = mkOption {
+        type = types.str;
+        default = "5m";
+        description = lib.mdDoc "How frequently should the notifier be sent reminders.";
+      };
+      disable_resolve_message = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Turn off the message that sends when an alert returns to OK.";
+      };
+      settings = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc "Settings for the notifier type.";
+      };
+      secure_settings = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc ''
+          Secure settings for the notifier type. Please note that the contents of this option
+          will end up in a world-readable Nix store. Use the file provider
+          pointing at a reasonably secured file in the local filesystem
+          to work around that. Look at the documentation for details:
+          <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+        '';
+      };
+    };
+  };
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "grafana" "protocol" ] [ "services" "grafana" "settings" "server" "protocol" ])
+    (mkRenamedOptionModule [ "services" "grafana" "addr" ] [ "services" "grafana" "settings" "server" "http_addr" ])
+    (mkRenamedOptionModule [ "services" "grafana" "port" ] [ "services" "grafana" "settings" "server" "http_port" ])
+    (mkRenamedOptionModule [ "services" "grafana" "domain" ] [ "services" "grafana" "settings" "server" "domain" ])
+    (mkRenamedOptionModule [ "services" "grafana" "rootUrl" ] [ "services" "grafana" "settings" "server" "root_url" ])
+    (mkRenamedOptionModule [ "services" "grafana" "staticRootPath" ] [ "services" "grafana" "settings" "server" "static_root_path" ])
+    (mkRenamedOptionModule [ "services" "grafana" "certFile" ] [ "services" "grafana" "settings" "server" "cert_file" ])
+    (mkRenamedOptionModule [ "services" "grafana" "certKey" ] [ "services" "grafana" "settings" "server" "cert_key" ])
+    (mkRenamedOptionModule [ "services" "grafana" "socket" ] [ "services" "grafana" "settings" "server" "socket" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "type" ] [ "services" "grafana" "settings" "database" "type" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "host" ] [ "services" "grafana" "settings" "database" "host" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "name" ] [ "services" "grafana" "settings" "database" "name" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "user" ] [ "services" "grafana" "settings" "database" "user" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "password" ] [ "services" "grafana" "settings" "database" "password" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "path" ] [ "services" "grafana" "settings" "database" "path" ])
+    (mkRenamedOptionModule [ "services" "grafana" "database" "connMaxLifetime" ] [ "services" "grafana" "settings" "database" "conn_max_lifetime" ])
+    (mkRenamedOptionModule [ "services" "grafana" "security" "adminUser" ] [ "services" "grafana" "settings" "security" "admin_user" ])
+    (mkRenamedOptionModule [ "services" "grafana" "security" "adminPassword" ] [ "services" "grafana" "settings" "security" "admin_password" ])
+    (mkRenamedOptionModule [ "services" "grafana" "security" "secretKey" ] [ "services" "grafana" "settings" "security" "secret_key" ])
+    (mkRenamedOptionModule [ "services" "grafana" "server" "serveFromSubPath" ] [ "services" "grafana" "settings" "server" "serve_from_sub_path" ])
+    (mkRenamedOptionModule [ "services" "grafana" "smtp" "enable" ] [ "services" "grafana" "settings" "smtp" "enabled" ])
+    (mkRenamedOptionModule [ "services" "grafana" "smtp" "user" ] [ "services" "grafana" "settings" "smtp" "user" ])
+    (mkRenamedOptionModule [ "services" "grafana" "smtp" "password" ] [ "services" "grafana" "settings" "smtp" "password" ])
+    (mkRenamedOptionModule [ "services" "grafana" "smtp" "fromAddress" ] [ "services" "grafana" "settings" "smtp" "from_address" ])
+    (mkRenamedOptionModule [ "services" "grafana" "users" "allowSignUp" ] [ "services" "grafana" "settings" "users" "allow_sign_up" ])
+    (mkRenamedOptionModule [ "services" "grafana" "users" "allowOrgCreate" ] [ "services" "grafana" "settings" "users" "allow_org_create" ])
+    (mkRenamedOptionModule [ "services" "grafana" "users" "autoAssignOrg" ] [ "services" "grafana" "settings" "users" "auto_assign_org" ])
+    (mkRenamedOptionModule [ "services" "grafana" "users" "autoAssignOrgRole" ] [ "services" "grafana" "settings" "users" "auto_assign_org_role" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "disableLoginForm" ] [ "services" "grafana" "settings" "auth" "disable_login_form" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "anonymous" "enable" ] [ "services" "grafana" "settings" "auth.anonymous" "enabled" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "anonymous" "org_name" ] [ "services" "grafana" "settings" "auth.anonymous" "org_name" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "anonymous" "org_role" ] [ "services" "grafana" "settings" "auth.anonymous" "org_role" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "azuread" "enable" ] [ "services" "grafana" "settings" "auth.azuread" "enabled" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "azuread" "allowSignUp" ] [ "services" "grafana" "settings" "auth.azuread" "allow_sign_up" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "azuread" "clientId" ] [ "services" "grafana" "settings" "auth.azuread" "client_id" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "azuread" "allowedDomains" ] [ "services" "grafana" "settings" "auth.azuread" "allowed_domains" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "azuread" "allowedGroups" ] [ "services" "grafana" "settings" "auth.azuread" "allowed_groups" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "google" "enable" ] [ "services" "grafana" "settings" "auth.google" "enabled" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "google" "allowSignUp" ] [ "services" "grafana" "settings" "auth.google" "allow_sign_up" ])
+    (mkRenamedOptionModule [ "services" "grafana" "auth" "google" "clientId" ] [ "services" "grafana" "settings" "auth.google" "client_id" ])
+    (mkRenamedOptionModule [ "services" "grafana" "analytics" "reporting" "enable" ] [ "services" "grafana" "settings" "analytics" "reporting_enabled" ])
+
+    (mkRemovedOptionModule [ "services" "grafana" "database" "passwordFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.database.password' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "security" "adminPasswordFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.security.admin_password' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "security" "secretKeyFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.security.secret_key' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "smtp" "passwordFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.smtp.password' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "auth" "azuread" "clientSecretFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.azuread.client_secret' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "auth" "google" "clientSecretFile" ] ''
+      This option has been removed. Use 'services.grafana.settings.google.client_secret' with file provider instead.
+    '')
+    (mkRemovedOptionModule [ "services" "grafana" "extraOptions" ] ''
+      This option has been removed. Use 'services.grafana.settings' instead. For a detailed migration guide, please
+      review the release notes of NixOS 22.11.
+    '')
+
+    (mkRemovedOptionModule [ "services" "grafana" "auth" "azuread" "tenantId" ] "This option has been deprecated upstream.")
+  ];
+
+  options.services.grafana = {
+    enable = mkEnableOption (lib.mdDoc "grafana");
+
+    declarativePlugins = mkOption {
+      type = with types; nullOr (listOf path);
+      default = null;
+      description = lib.mdDoc "If non-null, then a list of packages containing Grafana plugins to install. If set, plugins cannot be manually installed.";
+      example = literalExpression "with pkgs.grafanaPlugins; [ grafana-piechart-panel ]";
+      # Make sure each plugin is added only once; otherwise building
+      # the link farm fails, since the same path is added multiple
+      # times.
+      apply = x: if isList x then lib.unique x else x;
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "Package to use.";
+      default = pkgs.grafana;
+      defaultText = literalExpression "pkgs.grafana";
+      type = types.package;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "Data directory.";
+      default = "/var/lib/grafana";
+      type = types.path;
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Grafana settings. See <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/>
+        for available options. INI format is used.
+      '';
+      type = types.submodule {
+        freeformType = settingsFormatIni.type;
+
+        options = {
+          paths = {
+            plugins = mkOption {
+              description = lib.mdDoc "Directory where grafana will automatically scan and look for plugins";
+              default = if (cfg.declarativePlugins == null) then "${cfg.dataDir}/plugins" else declarativePlugins;
+              defaultText = literalExpression "if (cfg.declarativePlugins == null) then \"\${cfg.dataDir}/plugins\" else declarativePlugins";
+              type = types.path;
+            };
+
+            provisioning = mkOption {
+              description = lib.mdDoc ''
+                Folder that contains provisioning config files that grafana will apply on startup and while running.
+                Don't change the value of this option if you are planning to use `services.grafana.provision` options.
+              '';
+              default = provisionConfDir;
+              defaultText = "directory with links to files generated from services.grafana.provision";
+              type = types.path;
+            };
+          };
+
+          server = {
+            protocol = mkOption {
+              description = lib.mdDoc "Which protocol to listen.";
+              default = "http";
+              type = types.enum [ "http" "https" "h2" "socket" ];
+            };
+
+            http_addr = mkOption {
+              type = types.str;
+              default = "127.0.0.1";
+              description = lib.mdDoc ''
+                Listening address.
+
+                ::: {.note}
+                This setting intentionally varies from upstream's default to be a bit more secure by default.
+                :::
+              '';
+            };
+
+            http_port = mkOption {
+              description = lib.mdDoc "Listening port.";
+              default = 3000;
+              type = types.port;
+            };
+
+            domain = mkOption {
+              description = lib.mdDoc ''
+                The public facing domain name used to access grafana from a browser.
+
+                This setting is only used in the default value of the `root_url` setting.
+                If you set the latter manually, this option does not have to be specified.
+              '';
+              default = "localhost";
+              type = types.str;
+            };
+
+            enforce_domain = mkOption {
+              description = lib.mdDoc ''
+                Redirect to correct domain if the host header does not match the domain.
+                Prevents DNS rebinding attacks.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            root_url = mkOption {
+              description = lib.mdDoc ''
+                This is the full URL used to access Grafana from a web browser.
+                This is important if you use Google or GitHub OAuth authentication (for the callback URL to be correct).
+
+                This setting is also important if you have a reverse proxy in front of Grafana that exposes it through a subpath.
+                In that case add the subpath to the end of this URL setting.
+              '';
+              default = "%(protocol)s://%(domain)s:%(http_port)s/";
+              type = types.str;
+            };
+
+            serve_from_sub_path = mkOption {
+              description = lib.mdDoc ''
+                Serve Grafana from subpath specified in the `root_url` setting.
+                By default it is set to `false` for compatibility reasons.
+
+                By enabling this setting and using a subpath in `root_url` above,
+                e.g. `root_url = "http://localhost:3000/grafana"`,
+                Grafana is accessible on `http://localhost:3000/grafana`.
+                If accessed without subpath, Grafana will redirect to an URL with the subpath.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            router_logging = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` for Grafana to log all HTTP requests (not just errors).
+                These are logged as Info level events to the Grafana log.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            static_root_path = mkOption {
+              description = lib.mdDoc "Root path for static assets.";
+              default = "${cfg.package}/share/grafana/public";
+              defaultText = literalExpression ''"''${package}/share/grafana/public"'';
+              type = types.str;
+            };
+
+            enable_gzip = mkOption {
+              description = lib.mdDoc ''
+                Set this option to `true` to enable HTTP compression, this can improve transfer speed and bandwidth utilization.
+                It is recommended that most users set it to `true`. By default it is set to `false` for compatibility reasons.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            cert_file = mkOption {
+              description = lib.mdDoc ''
+                Path to the certificate file (if `protocol` is set to `https` or `h2`).
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            cert_key = mkOption {
+              description = lib.mdDoc ''
+                Path to the certificate key file (if `protocol` is set to `https` or `h2`).
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            socket_gid = mkOption {
+              description = lib.mdDoc ''
+                GID where the socket should be set when `protocol=socket`.
+                Make sure that the target group is in the group of Grafana process and that Grafana process is the file owner before you change this setting.
+                It is recommended to set the gid as http server user gid.
+                Not set when the value is -1.
+              '';
+              default = -1;
+              type = types.int;
+            };
+
+            socket_mode = mkOption {
+              description = lib.mdDoc ''
+                Mode where the socket should be set when `protocol=socket`.
+                Make sure that Grafana process is the file owner before you change this setting.
+              '';
+              # I assume this value is interpreted as octal literal by grafana.
+              # If this was an int, people following tutorials or porting their
+              # old config could stumble across nix not having octal literals.
+              default = "0660";
+              type = types.str;
+            };
+
+            socket = mkOption {
+              description = lib.mdDoc ''
+                Path where the socket should be created when `protocol=socket`.
+                Make sure that Grafana has appropriate permissions before you change this setting.
+              '';
+              default = "/run/grafana/grafana.sock";
+              type = types.str;
+            };
+
+            cdn_url = mkOption {
+              description = lib.mdDoc ''
+                Specify a full HTTP URL address to the root of your Grafana CDN assets.
+                Grafana will add edition and version paths.
+
+                For example, given a cdn url like `https://cdn.myserver.com`
+                grafana will try to load a javascript file from `http://cdn.myserver.com/grafana-oss/7.4.0/public/build/app.<hash>.js`.
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            read_timeout = mkOption {
+              description = lib.mdDoc ''
+                Sets the maximum time using a duration format (5s/5m/5ms)
+                before timing out read of an incoming request and closing idle connections.
+                0 means there is no timeout for reading the request.
+              '';
+              default = "0";
+              type = types.str;
+            };
+          };
+
+          database = {
+            type = mkOption {
+              description = lib.mdDoc "Database type.";
+              default = "sqlite3";
+              type = types.enum [ "mysql" "sqlite3" "postgres" ];
+            };
+
+            host = mkOption {
+              description = lib.mdDoc ''
+                Only applicable to MySQL or Postgres.
+                Includes IP or hostname and port or in case of Unix sockets the path to it.
+                For example, for MySQL running on the same host as Grafana: `host = "127.0.0.1:3306"`
+                or with Unix sockets: `host = "/var/run/mysqld/mysqld.sock"`
+              '';
+              default = "127.0.0.1:3306";
+              type = types.str;
+            };
+
+            name = mkOption {
+              description = lib.mdDoc "The name of the Grafana database.";
+              default = "grafana";
+              type = types.str;
+            };
+
+            user = mkOption {
+              description = lib.mdDoc "The database user (not applicable for `sqlite3`).";
+              default = "root";
+              type = types.str;
+            };
+
+            password = mkOption {
+              description = lib.mdDoc ''
+                The database user's password (not applicable for `sqlite3`).
+
+                Please note that the contents of this option
+                will end up in a world-readable Nix store. Use the file provider
+                pointing at a reasonably secured file in the local filesystem
+                to work around that. Look at the documentation for details:
+                <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+              '';
+              default = "";
+              type = types.str;
+            };
+
+            max_idle_conn = mkOption {
+              description = lib.mdDoc "The maximum number of connections in the idle connection pool.";
+              default = 2;
+              type = types.int;
+            };
+
+            max_open_conn = mkOption {
+              description = lib.mdDoc "The maximum number of open connections to the database.";
+              default = 0;
+              type = types.int;
+            };
+
+            conn_max_lifetime = mkOption {
+              description = lib.mdDoc ''
+                Sets the maximum amount of time a connection may be reused.
+                The default is 14400 (which means 14400 seconds or 4 hours).
+                For MySQL, this setting should be shorter than the `wait_timeout` variable.
+              '';
+              default = 14400;
+              type = types.int;
+            };
+
+            locking_attempt_timeout_sec = mkOption {
+              description = lib.mdDoc ''
+                For `mysql`, if the `migrationLocking` feature toggle is set,
+                specify the time (in seconds) to wait before failing to lock the database for the migrations.
+              '';
+              default = 0;
+              type = types.int;
+            };
+
+            log_queries = mkOption {
+              description = lib.mdDoc "Set to `true` to log the sql calls and execution times";
+              default = false;
+              type = types.bool;
+            };
+
+            ssl_mode = mkOption {
+              description = lib.mdDoc ''
+                For Postgres, use either `disable`, `require` or `verify-full`.
+                For MySQL, use either `true`, `false`, or `skip-verify`.
+              '';
+              default = "disable";
+              type = types.enum [ "disable" "require" "verify-full" "true" "false" "skip-verify" ];
+            };
+
+            isolation_level = mkOption {
+              description = lib.mdDoc ''
+                Only the MySQL driver supports isolation levels in Grafana.
+                In case the value is empty, the driver's default isolation level is applied.
+              '';
+              default = null;
+              type = types.nullOr (types.enum [ "READ-UNCOMMITTED" "READ-COMMITTED" "REPEATABLE-READ" "SERIALIZABLE" ]);
+            };
+
+            ca_cert_path = mkOption {
+              description = lib.mdDoc "The path to the CA certificate to use.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            client_key_path = mkOption {
+              description = lib.mdDoc "The path to the client key. Only if server requires client authentication.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            client_cert_path = mkOption {
+              description = lib.mdDoc "The path to the client cert. Only if server requires client authentication.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            server_cert_name = mkOption {
+              description = lib.mdDoc ''
+                The common name field of the certificate used by the `mysql` or `postgres` server.
+                Not necessary if `ssl_mode` is set to `skip-verify`.
+              '';
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            path = mkOption {
+              description = lib.mdDoc "Only applicable to `sqlite3` database. The file path where the database will be stored.";
+              default = "${cfg.dataDir}/data/grafana.db";
+              defaultText = literalExpression ''"''${config.${opt.dataDir}}/data/grafana.db"'';
+              type = types.path;
+            };
+
+            cache_mode = mkOption {
+              description = lib.mdDoc ''
+                For `sqlite3` only.
+                [Shared cache](https://www.sqlite.org/sharedcache.html) setting used for connecting to the database.
+              '';
+              default = "private";
+              type = types.enum [ "private" "shared" ];
+            };
+
+            wal = mkOption {
+              description = lib.mdDoc ''
+                For `sqlite3` only.
+                Setting to enable/disable [Write-Ahead Logging](https://sqlite.org/wal.html).
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            query_retries = mkOption {
+              description = lib.mdDoc ''
+                This setting applies to `sqlite3` only and controls the number of times the system retries a query when the database is locked.
+              '';
+              default = 0;
+              type = types.int;
+            };
+
+            transaction_retries = mkOption {
+              description = lib.mdDoc ''
+                This setting applies to `sqlite3` only and controls the number of times the system retries a transaction when the database is locked.
+              '';
+              default = 5;
+              type = types.int;
+            };
+
+            # TODO Add "instrument_queries" option when upgrading to grafana 10.0
+            # instrument_queries = mkOption {
+            #   description = lib.mdDoc "Set to `true` to add metrics and tracing for database queries.";
+            #   default = false;
+            #   type = types.bool;
+            # };
+          };
+
+          security = {
+            disable_initial_admin_creation = mkOption {
+              description = lib.mdDoc "Disable creation of admin user on first start of Grafana.";
+              default = false;
+              type = types.bool;
+            };
+
+            admin_user = mkOption {
+              description = lib.mdDoc "Default admin username.";
+              default = "admin";
+              type = types.str;
+            };
+
+            admin_password = mkOption {
+              description = lib.mdDoc ''
+                Default admin password. Please note that the contents of this option
+                will end up in a world-readable Nix store. Use the file provider
+                pointing at a reasonably secured file in the local filesystem
+                to work around that. Look at the documentation for details:
+                <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+              '';
+              default = "admin";
+              type = types.str;
+            };
+
+            admin_email = mkOption {
+              description = lib.mdDoc "The email of the default Grafana Admin, created on startup.";
+              default = "admin@localhost";
+              type = types.str;
+            };
+
+            secret_key = mkOption {
+              description = lib.mdDoc ''
+                Secret key used for signing. Please note that the contents of this option
+                will end up in a world-readable Nix store. Use the file provider
+                pointing at a reasonably secured file in the local filesystem
+                to work around that. Look at the documentation for details:
+                <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+              '';
+              default = "SW2YcwTIb9zpOOhoPsMm";
+              type = types.str;
+            };
+
+            disable_gravatar = mkOption {
+              description = lib.mdDoc "Set to `true` to disable the use of Gravatar for user profile images.";
+              default = false;
+              type = types.bool;
+            };
+
+            data_source_proxy_whitelist = mkOption {
+              description = lib.mdDoc ''
+                Define a whitelist of allowed IP addresses or domains, with ports,
+                to be used in data source URLs with the Grafana data source proxy.
+                Format: `ip_or_domain:port` separated by spaces.
+                PostgreSQL, MySQL, and MSSQL data sources do not use the proxy and are therefore unaffected by this setting.
+              '';
+              default = [ ];
+              type = types.oneOf [ types.str (types.listOf types.str) ];
+            };
+
+            disable_brute_force_login_protection = mkOption {
+              description = lib.mdDoc "Set to `true` to disable [brute force login protection](https://cheatsheetseries.owasp.org/cheatsheets/Authentication_Cheat_Sheet.html#account-lockout).";
+              default = false;
+              type = types.bool;
+            };
+
+            cookie_secure = mkOption {
+              description = lib.mdDoc "Set to `true` if you host Grafana behind HTTPS.";
+              default = false;
+              type = types.bool;
+            };
+
+            cookie_samesite = mkOption {
+              description = lib.mdDoc ''
+                Sets the `SameSite` cookie attribute and prevents the browser from sending this cookie along with cross-site requests.
+                The main goal is to mitigate the risk of cross-origin information leakage.
+                This setting also provides some protection against cross-site request forgery attacks (CSRF),
+                [read more about SameSite here](https://owasp.org/www-community/SameSite).
+                Using value `disabled` does not add any `SameSite` attribute to cookies.
+              '';
+              default = "lax";
+              type = types.enum [ "lax" "strict" "none" "disabled" ];
+            };
+
+            allow_embedding = mkOption {
+              description = lib.mdDoc ''
+                When `false`, the HTTP header `X-Frame-Options: deny` will be set in Grafana HTTP responses
+                which will instruct browsers to not allow rendering Grafana in a `<frame>`, `<iframe>`, `<embed>` or `<object>`.
+                The main goal is to mitigate the risk of [Clickjacking](https://owasp.org/www-community/attacks/Clickjacking).
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            strict_transport_security = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` if you want to enable HTTP `Strict-Transport-Security` (HSTS) response header.
+                Only use this when HTTPS is enabled in your configuration,
+                or when there is another upstream system that ensures your application does HTTPS (like a frontend load balancer).
+                HSTS tells browsers that the site should only be accessed using HTTPS.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            strict_transport_security_max_age_seconds = mkOption {
+              description = lib.mdDoc ''
+                Sets how long a browser should cache HSTS in seconds.
+                Only applied if `strict_transport_security` is enabled.
+              '';
+              default = 86400;
+              type = types.int;
+            };
+
+            strict_transport_security_preload = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` to enable HSTS `preloading` option.
+                Only applied if `strict_transport_security` is enabled.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            strict_transport_security_subdomains = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` to enable HSTS `includeSubDomains` option.
+                Only applied if `strict_transport_security` is enabled.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            x_content_type_options = mkOption {
+              description = lib.mdDoc ''
+                Set to `false` to disable the `X-Content-Type-Options` response header.
+                The `X-Content-Type-Options` response HTTP header is a marker used by the server
+                to indicate that the MIME types advertised in the `Content-Type` headers should not be changed and be followed.
+              '';
+              default = true;
+              type = types.bool;
+            };
+
+            x_xss_protection = mkOption {
+              description = lib.mdDoc ''
+                Set to `false` to disable the `X-XSS-Protection` header,
+                which tells browsers to stop pages from loading when they detect reflected cross-site scripting (XSS) attacks.
+              '';
+              default = true;
+              type = types.bool;
+            };
+
+            content_security_policy = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` to add the `Content-Security-Policy` header to your requests.
+                CSP allows to control resources that the user agent can load and helps prevent XSS attacks.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            content_security_policy_report_only = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` to add the `Content-Security-Policy-Report-Only` header to your requests.
+                CSP in Report Only mode enables you to experiment with policies by monitoring their effects without enforcing them.
+                You can enable both policies simultaneously.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            # The options content_security_policy_template and
+            # content_security_policy_template are missing because I'm not sure
+            # how exactly the quoting of the default value works. See also
+            # https://github.com/grafana/grafana/blob/cb7e18938b8eb6860a64b91aaba13a7eb31bc95b/conf/defaults.ini#L364
+            # https://github.com/grafana/grafana/blob/cb7e18938b8eb6860a64b91aaba13a7eb31bc95b/conf/defaults.ini#L373
+
+            # These two options are lists joined with spaces:
+            # https://github.com/grafana/grafana/blob/916d9793aa81c2990640b55a15dee0db6b525e41/pkg/middleware/csrf/csrf.go#L37-L38
+
+            csrf_trusted_origins = mkOption {
+              description = lib.mdDoc ''
+                List of additional allowed URLs to pass by the CSRF check.
+                Suggested when authentication comes from an IdP.
+              '';
+              default = [ ];
+              type = types.oneOf [ types.str (types.listOf types.str) ];
+            };
+
+            csrf_additional_headers = mkOption {
+              description = lib.mdDoc ''
+                List of allowed headers to be set by the user.
+                Suggested to use for if authentication lives behind reverse proxies.
+              '';
+              default = [ ];
+              type = types.oneOf [ types.str (types.listOf types.str) ];
+            };
+          };
+
+          smtp = {
+            enabled = mkOption {
+              description = lib.mdDoc "Whether to enable SMTP.";
+              default = false;
+              type = types.bool;
+            };
+
+            host = mkOption {
+              description = lib.mdDoc "Host to connect to.";
+              default = "localhost:25";
+              type = types.str;
+            };
+
+            user = mkOption {
+              description = lib.mdDoc "User used for authentication.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            password = mkOption {
+              description = lib.mdDoc ''
+                Password used for authentication. Please note that the contents of this option
+                will end up in a world-readable Nix store. Use the file provider
+                pointing at a reasonably secured file in the local filesystem
+                to work around that. Look at the documentation for details:
+                <https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#file-provider>
+              '';
+              default = "";
+              type = types.str;
+            };
+
+            cert_file = mkOption {
+              description = lib.mdDoc "File path to a cert file.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            key_file = mkOption {
+              description = lib.mdDoc "File path to a key file.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            skip_verify = mkOption {
+              description = lib.mdDoc "Verify SSL for SMTP server.";
+              default = false;
+              type = types.bool;
+            };
+
+            from_address = mkOption {
+              description = lib.mdDoc "Address used when sending out emails.";
+              default = "admin@grafana.localhost";
+              type = types.str;
+            };
+
+            from_name = mkOption {
+              description = lib.mdDoc "Name to be used as client identity for EHLO in SMTP dialog.";
+              default = "Grafana";
+              type = types.str;
+            };
+
+            ehlo_identity = mkOption {
+              description = lib.mdDoc "Name to be used as client identity for EHLO in SMTP dialog.";
+              default = null;
+              type = types.nullOr types.str;
+            };
+
+            startTLS_policy = mkOption {
+              description = lib.mdDoc "StartTLS policy when connecting to server.";
+              default = null;
+              type = types.nullOr (types.enum [ "OpportunisticStartTLS" "MandatoryStartTLS" "NoStartTLS" ]);
+            };
+          };
+
+          users = {
+            allow_sign_up = mkOption {
+              description = lib.mdDoc ''
+                Set to false to prohibit users from being able to sign up / create user accounts.
+                The admin user can still create users.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            allow_org_create = mkOption {
+              description = lib.mdDoc "Set to `false` to prohibit users from creating new organizations.";
+              default = false;
+              type = types.bool;
+            };
+
+            auto_assign_org = mkOption {
+              description = lib.mdDoc ''
+                Set to `true` to automatically add new users to the main organization (id 1).
+                When set to `false,` new users automatically cause a new organization to be created for that new user.
+                The organization will be created even if the `allow_org_create` setting is set to `false`.
+              '';
+              default = true;
+              type = types.bool;
+            };
+
+            auto_assign_org_id = mkOption {
+              description = lib.mdDoc ''
+                Set this value to automatically add new users to the provided org.
+                This requires `auto_assign_org` to be set to `true`.
+                Please make sure that this organization already exists.
+              '';
+              default = 1;
+              type = types.int;
+            };
+
+            auto_assign_org_role = mkOption {
+              description = lib.mdDoc ''
+                The role new users will be assigned for the main organization (if the `auto_assign_org` setting is set to `true`).
+              '';
+              default = "Viewer";
+              type = types.enum [ "Viewer" "Editor" "Admin" ];
+            };
+
+            verify_email_enabled = mkOption {
+              description = lib.mdDoc "Require email validation before sign up completes.";
+              default = false;
+              type = types.bool;
+            };
+
+            login_hint = mkOption {
+              description = lib.mdDoc "Text used as placeholder text on login page for login/username input.";
+              default = "email or username";
+              type = types.str;
+            };
+
+            password_hint = mkOption {
+              description = lib.mdDoc "Text used as placeholder text on login page for password input.";
+              default = "password";
+              type = types.str;
+            };
+
+            default_theme = mkOption {
+              description = lib.mdDoc "Sets the default UI theme. `system` matches the user's system theme.";
+              default = "dark";
+              type = types.enum [ "dark" "light" "system" ];
+            };
+
+            default_language = mkOption {
+              description = lib.mdDoc "This setting configures the default UI language, which must be a supported IETF language tag, such as `en-US`.";
+              default = "en-US";
+              type = types.str;
+            };
+
+            home_page = mkOption {
+              description = lib.mdDoc ''
+                Path to a custom home page.
+                Users are only redirected to this if the default home dashboard is used.
+                It should match a frontend route and contain a leading slash.
+              '';
+              default = "";
+              type = types.str;
+            };
+
+            viewers_can_edit = mkOption {
+              description = lib.mdDoc ''
+                Viewers can access and use Explore and perform temporary edits on panels in dashboards they have access to.
+                They cannot save their changes.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            editors_can_admin = mkOption {
+              description = lib.mdDoc "Editors can administrate dashboards, folders and teams they create.";
+              default = false;
+              type = types.bool;
+            };
+
+            user_invite_max_lifetime_duration = mkOption {
+              description = lib.mdDoc ''
+                The duration in time a user invitation remains valid before expiring.
+                This setting should be expressed as a duration.
+                Examples: `6h` (hours), `2d` (days), `1w` (week).
+                The minimum supported duration is `15m` (15 minutes).
+              '';
+              default = "24h";
+              type = types.str;
+            };
+
+            # Lists are joined via space, so this option can't be a list.
+            # Users have to manually join their values.
+            hidden_users = mkOption {
+              description = lib.mdDoc ''
+                This is a comma-separated list of usernames.
+                Users specified here are hidden in the Grafana UI.
+                They are still visible to Grafana administrators and to themselves.
+              '';
+              default = "";
+              type = types.str;
+            };
+          };
+
+          analytics = {
+            reporting_enabled = mkOption {
+              description = lib.mdDoc ''
+                When enabled Grafana will send anonymous usage statistics to `stats.grafana.org`.
+                No IP addresses are being tracked, only simple counters to track running instances, versions, dashboard and error counts.
+                Counters are sent every 24 hours.
+              '';
+              default = true;
+              type = types.bool;
+            };
+
+            check_for_updates = mkOption {
+              description = lib.mdDoc ''
+                When set to `false`, disables checking for new versions of Grafana from Grafana's GitHub repository.
+                When enabled, the check for a new version runs every 10 minutes.
+                It will notify, via the UI, when a new version is available.
+                The check itself will not prompt any auto-updates of the Grafana software, nor will it send any sensitive information.
+              '';
+              default = false;
+              type = types.bool;
+            };
+
+            check_for_plugin_updates = mkOption {
+              description = lib.mdDoc ''
+                When set to `false`, disables checking for new versions of installed plugins from https://grafana.com.
+                When enabled, the check for a new plugin runs every 10 minutes.
+                It will notify, via the UI, when a new plugin update exists.
+                The check itself will not prompt any auto-updates of the plugin, nor will it send any sensitive information.
+              '';
+              default = cfg.declarativePlugins == null;
+              defaultText = literalExpression "cfg.declarativePlugins == null";
+              type = types.bool;
+            };
+
+            feedback_links_enabled = mkOption {
+              description = lib.mdDoc "Set to `false` to remove all feedback links from the UI.";
+              default = true;
+              type = types.bool;
+            };
+          };
+        };
+      };
+    };
+
+    provision = {
+      enable = mkEnableOption (lib.mdDoc "provision");
+
+      datasources = mkOption {
+        description = lib.mdDoc ''
+          Declaratively provision Grafana's datasources.
+        '';
+        default = { };
+        type = types.submodule {
+          options.settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana datasource configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.datasources.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#data-sources>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                datasources = mkOption {
+                  description = lib.mdDoc "List of datasources to insert/update.";
+                  default = [ ];
+                  type = types.listOf grafanaTypes.datasourceConfig;
+                };
+
+                deleteDatasources = mkOption {
+                  description = lib.mdDoc "List of datasources that should be deleted from the database.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the datasource to delete.";
+                      type = types.str;
+                    };
+
+                    options.orgId = mkOption {
+                      description = lib.mdDoc "Organization ID of the datasource to delete.";
+                      type = types.int;
+                    };
+                  });
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                datasources = [{
+                  name = "Graphite";
+                  type = "graphite";
+                }];
+
+                deleteDatasources = [{
+                  name = "Graphite";
+                  orgId = 1;
+                }];
+              }
+            '';
+          };
+
+          options.path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML datasource configuration. Can't be used with
+              [](#opt-services.grafana.provision.datasources.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+        };
+      };
+
+
+      dashboards = mkOption {
+        description = lib.mdDoc ''
+          Declaratively provision Grafana's dashboards.
+        '';
+        default = { };
+        type = types.submodule {
+          options.settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana dashboard configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.dashboards.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#dashboards>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options.apiVersion = mkOption {
+                description = lib.mdDoc "Config file version.";
+                default = 1;
+                type = types.int;
+              };
+
+              options.providers = mkOption {
+                description = lib.mdDoc "List of dashboards to insert/update.";
+                default = [ ];
+                type = types.listOf grafanaTypes.dashboardConfig;
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                providers = [{
+                    name = "default";
+                    options.path = "/var/lib/grafana/dashboards";
+                }];
+              }
+            '';
+          };
+
+          options.path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML dashboard configuration. Can't be used with
+              [](#opt-services.grafana.provision.dashboards.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+        };
+      };
+
+
+      notifiers = mkOption {
+        description = lib.mdDoc "Grafana notifier configuration.";
+        default = [ ];
+        type = types.listOf grafanaTypes.notifierConfig;
+        apply = x: map _filter x;
+      };
+
+
+      alerting = {
+        rules = {
+          path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML rules configuration. Can't be used with
+              [](#opt-services.grafana.provision.alerting.rules.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana rules configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.alerting.rules.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#rules>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                groups = mkOption {
+                  description = lib.mdDoc "List of rule groups to import or update.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    freeformType = provisioningSettingsFormat.type;
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the rule group. Required.";
+                      type = types.str;
+                    };
+
+                    options.folder = mkOption {
+                      description = lib.mdDoc "Name of the folder the rule group will be stored in. Required.";
+                      type = types.str;
+                    };
+
+                    options.interval = mkOption {
+                      description = lib.mdDoc "Interval that the rule group should be evaluated at. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+
+                deleteRules = mkOption {
+                  description = lib.mdDoc "List of alert rule UIDs that should be deleted.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    options.orgId = mkOption {
+                      description = lib.mdDoc "Organization ID, default = 1";
+                      default = 1;
+                      type = types.int;
+                    };
+
+                    options.uid = mkOption {
+                      description = lib.mdDoc "Unique identifier for the rule. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                groups = [{
+                  orgId = 1;
+                  name = "my_rule_group";
+                  folder = "my_first_folder";
+                  interval = "60s";
+                  rules = [{
+                    uid = "my_id_1";
+                    title = "my_first_rule";
+                    condition = "A";
+                    data = [{
+                      refId = "A";
+                      datasourceUid = "-100";
+                      model = {
+                        conditions = [{
+                          evaluator = {
+                            params = [ 3 ];
+                            type = "git";
+                          };
+                          operator.type = "and";
+                          query.params = [ "A" ];
+                          reducer.type = "last";
+                          type = "query";
+                        }];
+                        datasource = {
+                          type = "__expr__";
+                          uid = "-100";
+                        };
+                        expression = "1==0";
+                        intervalMs = 1000;
+                        maxDataPoints = 43200;
+                        refId = "A";
+                        type = "math";
+                      };
+                    }];
+                    dashboardUid = "my_dashboard";
+                    panelId = 123;
+                    noDataState = "Alerting";
+                    for = "60s";
+                    annotations.some_key = "some_value";
+                    labels.team = "sre_team1";
+                  }];
+                }];
+
+                deleteRules = [{
+                  orgId = 1;
+                  uid = "my_id_1";
+                }];
+              }
+            '';
+          };
+        };
+
+        contactPoints = {
+          path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML contact points configuration. Can't be used with
+              [](#opt-services.grafana.provision.alerting.contactPoints.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana contact points configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.alerting.contactPoints.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#contact-points>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                contactPoints = mkOption {
+                  description = lib.mdDoc "List of contact points to import or update.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    freeformType = provisioningSettingsFormat.type;
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the contact point. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+
+                deleteContactPoints = mkOption {
+                  description = lib.mdDoc "List of receivers that should be deleted.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    options.orgId = mkOption {
+                      description = lib.mdDoc "Organization ID, default = 1.";
+                      default = 1;
+                      type = types.int;
+                    };
+
+                    options.uid = mkOption {
+                      description = lib.mdDoc "Unique identifier for the receiver. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                contactPoints = [{
+                  orgId = 1;
+                  name = "cp_1";
+                  receivers = [{
+                    uid = "first_uid";
+                    type = "prometheus-alertmanager";
+                    settings.url = "http://test:9000";
+                  }];
+                }];
+
+                deleteContactPoints = [{
+                  orgId = 1;
+                  uid = "first_uid";
+                }];
+              }
+            '';
+          };
+        };
+
+        policies = {
+          path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML notification policies configuration. Can't be used with
+              [](#opt-services.grafana.provision.alerting.policies.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana notification policies configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.alerting.policies.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#notification-policies>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                policies = mkOption {
+                  description = lib.mdDoc "List of contact points to import or update.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    freeformType = provisioningSettingsFormat.type;
+                  });
+                };
+
+                resetPolicies = mkOption {
+                  description = lib.mdDoc "List of orgIds that should be reset to the default policy.";
+                  default = [ ];
+                  type = types.listOf types.int;
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                policies = [{
+                  orgId = 1;
+                  receiver = "grafana-default-email";
+                  group_by = [ "..." ];
+                  matchers = [
+                    "alertname = Watchdog"
+                    "severity =~ \"warning|critical\""
+                  ];
+                  mute_time_intervals = [
+                    "abc"
+                  ];
+                  group_wait = "30s";
+                  group_interval = "5m";
+                  repeat_interval = "4h";
+                }];
+
+                resetPolicies = [
+                  1
+                ];
+              }
+            '';
+          };
+        };
+
+        templates = {
+          path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML templates configuration. Can't be used with
+              [](#opt-services.grafana.provision.alerting.templates.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana templates configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.alerting.templates.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#templates>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                templates = mkOption {
+                  description = lib.mdDoc "List of templates to import or update.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    freeformType = provisioningSettingsFormat.type;
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the template, must be unique. Required.";
+                      type = types.str;
+                    };
+
+                    options.template = mkOption {
+                      description = lib.mdDoc "Alerting with a custom text template";
+                      type = types.str;
+                    };
+                  });
+                };
+
+                deleteTemplates = mkOption {
+                  description = lib.mdDoc "List of alert rule UIDs that should be deleted.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    options.orgId = mkOption {
+                      description = lib.mdDoc "Organization ID, default = 1.";
+                      default = 1;
+                      type = types.int;
+                    };
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the template, must be unique. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                templates = [{
+                  orgId = 1;
+                  name = "my_first_template";
+                  template = "Alerting with a custom text template";
+                }];
+
+                deleteTemplates = [{
+                  orgId = 1;
+                  name = "my_first_template";
+                }];
+              }
+            '';
+          };
+        };
+
+        muteTimings = {
+          path = mkOption {
+            description = lib.mdDoc ''
+              Path to YAML mute timings configuration. Can't be used with
+              [](#opt-services.grafana.provision.alerting.muteTimings.settings) simultaneously.
+              Can be either a directory or a single YAML file. Will end up in the store.
+            '';
+            default = null;
+            type = types.nullOr types.path;
+          };
+
+          settings = mkOption {
+            description = lib.mdDoc ''
+              Grafana mute timings configuration in Nix. Can't be used with
+              [](#opt-services.grafana.provision.alerting.muteTimings.path) simultaneously. See
+              <https://grafana.com/docs/grafana/latest/administration/provisioning/#mute-timings>
+              for supported options.
+            '';
+            default = null;
+            type = types.nullOr (types.submodule {
+              options = {
+                apiVersion = mkOption {
+                  description = lib.mdDoc "Config file version.";
+                  default = 1;
+                  type = types.int;
+                };
+
+                muteTimes = mkOption {
+                  description = lib.mdDoc "List of mute time intervals to import or update.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    freeformType = provisioningSettingsFormat.type;
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the mute time interval, must be unique. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+
+                deleteMuteTimes = mkOption {
+                  description = lib.mdDoc "List of mute time intervals that should be deleted.";
+                  default = [ ];
+                  type = types.listOf (types.submodule {
+                    options.orgId = mkOption {
+                      description = lib.mdDoc "Organization ID, default = 1.";
+                      default = 1;
+                      type = types.int;
+                    };
+
+                    options.name = mkOption {
+                      description = lib.mdDoc "Name of the mute time interval, must be unique. Required.";
+                      type = types.str;
+                    };
+                  });
+                };
+              };
+            });
+            example = literalExpression ''
+              {
+                apiVersion = 1;
+
+                muteTimes = [{
+                  orgId = 1;
+                  name = "mti_1";
+                  time_intervals = [{
+                    times = [{
+                      start_time = "06:00";
+                      end_time = "23:59";
+                    }];
+                    weekdays = [
+                      "monday:wednesday"
+                      "saturday"
+                      "sunday"
+                    ];
+                    months = [
+                      "1:3"
+                      "may:august"
+                      "december"
+                    ];
+                    years = [
+                      "2020:2022"
+                      "2030"
+                    ];
+                    days_of_month = [
+                      "1:5"
+                      "-3:-1"
+                    ];
+                  }];
+                }];
+
+                deleteMuteTimes = [{
+                  orgId = 1;
+                  name = "mti_1";
+                }];
+              }
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings =
+      let
+        doesntUseFileProvider = opt: defaultValue:
+          let regex = "${optionalString (defaultValue != null) "^${defaultValue}$|"}^\\$__(file|env)\\{.*}$|^\\$[^_\\$][^ ]+$";
+          in builtins.match regex opt == null;
+
+        # Ensure that no custom credentials are leaked into the Nix store. Unless the default value
+        # is specified, this can be achieved by using the file/env provider:
+        # https://grafana.com/docs/grafana/latest/setup-grafana/configure-grafana/#variable-expansion
+        passwordWithoutFileProvider = optional
+          (
+            doesntUseFileProvider cfg.settings.database.password "" ||
+            doesntUseFileProvider cfg.settings.security.admin_password "admin"
+          )
+          ''
+            Grafana passwords will be stored as plaintext in the Nix store!
+            Use file provider or an env-var instead.
+          '';
+
+        # Warn about deprecated notifiers.
+        deprecatedNotifiers = optional (cfg.provision.notifiers != [ ]) ''
+          Notifiers are deprecated upstream and will be removed in Grafana 11.
+          Use `services.grafana.provision.alerting.contactPoints` instead.
+        '';
+
+        # Ensure that `secureJsonData` of datasources provisioned via `datasources.settings`
+        # only uses file/env providers.
+        secureJsonDataWithoutFileProvider = optional
+          (
+            let
+              datasourcesToCheck = optionals
+                (cfg.provision.datasources.settings != null)
+                cfg.provision.datasources.settings.datasources;
+              declarationUnsafe = { secureJsonData, ... }:
+                secureJsonData != null
+                && any (flip doesntUseFileProvider null) (attrValues secureJsonData);
+            in
+            any declarationUnsafe datasourcesToCheck
+          )
+          ''
+            Declarations in the `secureJsonData`-block of a datasource will be leaked to the
+            Nix store unless a file-provider or an env-var is used!
+          '';
+
+        notifierSecureSettingsWithoutFileProvider = optional
+          (any (x: x.secure_settings != null) cfg.provision.notifiers)
+          "Notifier secure settings will be stored as plaintext in the Nix store! Use file provider instead.";
+      in
+      passwordWithoutFileProvider
+      ++ deprecatedNotifiers
+      ++ secureJsonDataWithoutFileProvider
+      ++ notifierSecureSettingsWithoutFileProvider;
+
+    environment.systemPackages = [ cfg.package ];
+
+    assertions = [
+      {
+        assertion = cfg.provision.datasources.settings == null || cfg.provision.datasources.path == null;
+        message = "Cannot set both datasources settings and datasources path";
+      }
+      {
+        assertion =
+          let
+            prometheusIsNotDirect = opt: all
+              ({ type, access, ... }: type == "prometheus" -> access != "direct")
+              opt;
+          in
+          cfg.provision.datasources.settings == null || prometheusIsNotDirect cfg.provision.datasources.settings.datasources;
+        message = "For datasources of type `prometheus`, the `direct` access mode is not supported anymore (since Grafana 9.2.0)";
+      }
+      {
+        assertion = cfg.provision.dashboards.settings == null || cfg.provision.dashboards.path == null;
+        message = "Cannot set both dashboards settings and dashboards path";
+      }
+      {
+        assertion = cfg.provision.alerting.rules.settings == null || cfg.provision.alerting.rules.path == null;
+        message = "Cannot set both rules settings and rules path";
+      }
+      {
+        assertion = cfg.provision.alerting.contactPoints.settings == null || cfg.provision.alerting.contactPoints.path == null;
+        message = "Cannot set both contact points settings and contact points path";
+      }
+      {
+        assertion = cfg.provision.alerting.policies.settings == null || cfg.provision.alerting.policies.path == null;
+        message = "Cannot set both policies settings and policies path";
+      }
+      {
+        assertion = cfg.provision.alerting.templates.settings == null || cfg.provision.alerting.templates.path == null;
+        message = "Cannot set both templates settings and templates path";
+      }
+      {
+        assertion = cfg.provision.alerting.muteTimings.settings == null || cfg.provision.alerting.muteTimings.path == null;
+        message = "Cannot set both mute timings settings and mute timings path";
+      }
+    ];
+
+    systemd.services.grafana = {
+      description = "Grafana Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
+      script = ''
+        set -o errexit -o pipefail -o nounset -o errtrace
+        shopt -s inherit_errexit
+
+        exec ${cfg.package}/bin/grafana-server -homepath ${cfg.dataDir} -config ${configFile}
+      '';
+      serviceConfig = {
+        WorkingDirectory = cfg.dataDir;
+        User = "grafana";
+        Restart = "on-failure";
+        RuntimeDirectory = "grafana";
+        RuntimeDirectoryMode = "0755";
+        # Hardening
+        AmbientCapabilities = lib.mkIf (cfg.settings.server.http_port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = if (cfg.settings.server.http_port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "full";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        # Upstream grafana is not setting SystemCallFilter for compatibility
+        # reasons, see https://github.com/grafana/grafana/pull/40176
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ] ++ lib.optionals (cfg.settings.server.protocol == "socket") [ "@chown" ];
+        UMask = "0027";
+      };
+      preStart = ''
+        ln -fs ${cfg.package}/share/grafana/conf ${cfg.dataDir}
+        ln -fs ${cfg.package}/share/grafana/tools ${cfg.dataDir}
+      '';
+    };
+
+    users.users.grafana = {
+      uid = config.ids.uids.grafana;
+      description = "Grafana user";
+      home = cfg.dataDir;
+      createHome = true;
+      group = "grafana";
+    };
+    users.groups.grafana = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/graphite.nix b/nixpkgs/nixos/modules/services/monitoring/graphite.nix
new file mode 100644
index 000000000000..cc3d70976204
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/graphite.nix
@@ -0,0 +1,428 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.graphite;
+  opt = options.services.graphite;
+  writeTextOrNull = f: t: mapNullable (pkgs.writeTextDir f) t;
+
+  dataDir = cfg.dataDir;
+  staticDir = cfg.dataDir + "/static";
+
+  graphiteLocalSettingsDir = pkgs.runCommand "graphite_local_settings" {
+      inherit graphiteLocalSettings;
+      preferLocalBuild = true;
+    } ''
+    mkdir -p $out
+    ln -s $graphiteLocalSettings $out/graphite_local_settings.py
+  '';
+
+  graphiteLocalSettings = pkgs.writeText "graphite_local_settings.py" (
+    "STATIC_ROOT = '${staticDir}'\n" +
+    optionalString (config.time.timeZone != null) "TIME_ZONE = '${config.time.timeZone}'\n"
+    + cfg.web.extraConfig
+  );
+
+  seyrenConfig = {
+    SEYREN_URL = cfg.seyren.seyrenUrl;
+    MONGO_URL = cfg.seyren.mongoUrl;
+    GRAPHITE_URL = cfg.seyren.graphiteUrl;
+  } // cfg.seyren.extraConfig;
+
+  configDir = pkgs.buildEnv {
+    name = "graphite-config";
+    paths = lists.filter (el: el != null) [
+      (writeTextOrNull "carbon.conf" cfg.carbon.config)
+      (writeTextOrNull "storage-aggregation.conf" cfg.carbon.storageAggregation)
+      (writeTextOrNull "storage-schemas.conf" cfg.carbon.storageSchemas)
+      (writeTextOrNull "blacklist.conf" cfg.carbon.blacklist)
+      (writeTextOrNull "whitelist.conf" cfg.carbon.whitelist)
+      (writeTextOrNull "rewrite-rules.conf" cfg.carbon.rewriteRules)
+      (writeTextOrNull "relay-rules.conf" cfg.carbon.relayRules)
+      (writeTextOrNull "aggregation-rules.conf" cfg.carbon.aggregationRules)
+    ];
+  };
+
+  carbonOpts = name: with config.ids; ''
+    --nodaemon --syslog --prefix=${name} --pidfile /run/${name}/${name}.pid ${name}
+  '';
+
+  carbonEnv = {
+    PYTHONPATH = let
+      cenv = pkgs.python3.buildEnv.override {
+        extraLibs = [ pkgs.python3Packages.carbon ];
+      };
+    in "${cenv}/${pkgs.python3.sitePackages}";
+    GRAPHITE_ROOT = dataDir;
+    GRAPHITE_CONF_DIR = configDir;
+    GRAPHITE_STORAGE_DIR = dataDir;
+  };
+
+in {
+
+  imports = [
+    (mkRemovedOptionModule ["services" "graphite" "api"] "")
+    (mkRemovedOptionModule ["services" "graphite" "beacon"] "")
+    (mkRemovedOptionModule ["services" "graphite" "pager"] "")
+  ];
+
+  ###### interface
+
+  options.services.graphite = {
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/db/graphite";
+      description = lib.mdDoc ''
+        Data directory for graphite.
+      '';
+    };
+
+    web = {
+      enable = mkOption {
+        description = lib.mdDoc "Whether to enable graphite web frontend.";
+        default = false;
+        type = types.bool;
+      };
+
+      listenAddress = mkOption {
+        description = lib.mdDoc "Graphite web frontend listen address.";
+        default = "127.0.0.1";
+        type = types.str;
+      };
+
+      port = mkOption {
+        description = lib.mdDoc "Graphite web frontend port.";
+        default = 8080;
+        type = types.port;
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Graphite webapp settings. See:
+          <https://graphite.readthedocs.io/en/latest/config-local-settings.html>
+        '';
+      };
+    };
+
+    carbon = {
+      config = mkOption {
+        description = lib.mdDoc "Content of carbon configuration file.";
+        default = ''
+          [cache]
+          # Listen on localhost by default for security reasons
+          UDP_RECEIVER_INTERFACE = 127.0.0.1
+          PICKLE_RECEIVER_INTERFACE = 127.0.0.1
+          LINE_RECEIVER_INTERFACE = 127.0.0.1
+          CACHE_QUERY_INTERFACE = 127.0.0.1
+          # Do not log every update
+          LOG_UPDATES = False
+          LOG_CACHE_HITS = False
+        '';
+        type = types.str;
+      };
+
+      enableCache = mkOption {
+        description = lib.mdDoc "Whether to enable carbon cache, the graphite storage daemon.";
+        default = false;
+        type = types.bool;
+      };
+
+      storageAggregation = mkOption {
+        description = lib.mdDoc "Defines how to aggregate data to lower-precision retentions.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [all_min]
+          pattern = \.min$
+          xFilesFactor = 0.1
+          aggregationMethod = min
+        '';
+      };
+
+      storageSchemas = mkOption {
+        description = lib.mdDoc "Defines retention rates for storing metrics.";
+        default = "";
+        type = types.nullOr types.str;
+        example = ''
+          [apache_busyWorkers]
+          pattern = ^servers\.www.*\.workers\.busyWorkers$
+          retentions = 15s:7d,1m:21d,15m:5y
+        '';
+      };
+
+      blacklist = mkOption {
+        description = lib.mdDoc "Any metrics received which match one of the expressions will be dropped.";
+        default = null;
+        type = types.nullOr types.str;
+        example = "^some\\.noisy\\.metric\\.prefix\\..*";
+      };
+
+      whitelist = mkOption {
+        description = lib.mdDoc "Only metrics received which match one of the expressions will be persisted.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ".*";
+      };
+
+      rewriteRules = mkOption {
+        description = lib.mdDoc ''
+          Regular expression patterns that can be used to rewrite metric names
+          in a search and replace fashion.
+        '';
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [post]
+          _sum$ =
+          _avg$ =
+        '';
+      };
+
+      enableRelay = mkOption {
+        description = lib.mdDoc "Whether to enable carbon relay, the carbon replication and sharding service.";
+        default = false;
+        type = types.bool;
+      };
+
+      relayRules = mkOption {
+        description = lib.mdDoc "Relay rules are used to send certain metrics to a certain backend.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          [example]
+          pattern = ^mydata\.foo\..+
+          servers = 10.1.2.3, 10.1.2.4:2004, myserver.mydomain.com
+        '';
+      };
+
+      enableAggregator = mkOption {
+        description = lib.mdDoc "Whether to enable carbon aggregator, the carbon buffering service.";
+        default = false;
+        type = types.bool;
+      };
+
+      aggregationRules = mkOption {
+        description = lib.mdDoc "Defines if and how received metrics will be aggregated.";
+        default = null;
+        type = types.nullOr types.str;
+        example = ''
+          <env>.applications.<app>.all.requests (60) = sum <env>.applications.<app>.*.requests
+          <env>.applications.<app>.all.latency (60) = avg <env>.applications.<app>.*.latency
+        '';
+      };
+    };
+
+    seyren = {
+      enable = mkOption {
+        description = lib.mdDoc "Whether to enable seyren service.";
+        default = false;
+        type = types.bool;
+      };
+
+      port = mkOption {
+        description = lib.mdDoc "Seyren listening port.";
+        default = 8081;
+        type = types.port;
+      };
+
+      seyrenUrl = mkOption {
+        default = "http://localhost:${toString cfg.seyren.port}/";
+        defaultText = literalExpression ''"http://localhost:''${toString config.${opt.seyren.port}}/"'';
+        description = lib.mdDoc "Host where seyren is accessible.";
+        type = types.str;
+      };
+
+      graphiteUrl = mkOption {
+        default = "http://${cfg.web.listenAddress}:${toString cfg.web.port}";
+        defaultText = literalExpression ''"http://''${config.${opt.web.listenAddress}}:''${toString config.${opt.web.port}}"'';
+        description = lib.mdDoc "Host where graphite service runs.";
+        type = types.str;
+      };
+
+      mongoUrl = mkOption {
+        default = "mongodb://${config.services.mongodb.bind_ip}:27017/seyren";
+        defaultText = literalExpression ''"mongodb://''${config.services.mongodb.bind_ip}:27017/seyren"'';
+        description = lib.mdDoc "Mongodb connection string.";
+        type = types.str;
+      };
+
+      extraConfig = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Extra seyren configuration. See
+          <https://github.com/scobal/seyren#config>
+        '';
+        type = types.attrsOf types.str;
+        example = literalExpression ''
+          {
+            GRAPHITE_USERNAME = "user";
+            GRAPHITE_PASSWORD = "pass";
+          }
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf cfg.carbon.enableCache {
+      systemd.services.carbonCache = let name = "carbon-cache"; in {
+        description = "Graphite Data Storage Backend";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PermissionsStartOnly = true;
+          PIDFile="/run/${name}/${name}.pid";
+        };
+        preStart = ''
+          install -dm0700 -o graphite -g graphite ${cfg.dataDir}
+          install -dm0700 -o graphite -g graphite ${cfg.dataDir}/whisper
+        '';
+      };
+    })
+
+    (mkIf cfg.carbon.enableAggregator {
+      systemd.services.carbonAggregator = let name = "carbon-aggregator"; in {
+        enable = cfg.carbon.enableAggregator;
+        description = "Carbon Data Aggregator";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PIDFile="/run/${name}/${name}.pid";
+        };
+      };
+    })
+
+    (mkIf cfg.carbon.enableRelay {
+      systemd.services.carbonRelay = let name = "carbon-relay"; in {
+        description = "Carbon Data Relay";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        environment = carbonEnv;
+        serviceConfig = {
+          RuntimeDirectory = name;
+          ExecStart = "${pkgs.python3Packages.twisted}/bin/twistd ${carbonOpts name}";
+          User = "graphite";
+          Group = "graphite";
+          PIDFile="/run/${name}/${name}.pid";
+        };
+      };
+    })
+
+    (mkIf (cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay) {
+      environment.systemPackages = [
+        pkgs.python3Packages.carbon
+      ];
+    })
+
+    (mkIf cfg.web.enable ({
+      systemd.services.graphiteWeb = {
+        description = "Graphite Web Interface";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        path = [ pkgs.perl ];
+        environment = {
+          PYTHONPATH = let
+              penv = pkgs.python3.buildEnv.override {
+                extraLibs = [
+                  pkgs.python3Packages.graphite-web
+                ];
+              };
+              penvPack = "${penv}/${pkgs.python3.sitePackages}";
+            in concatStringsSep ":" [
+                 "${graphiteLocalSettingsDir}"
+                 "${penvPack}"
+                 # explicitly adding pycairo in path because it cannot be imported via buildEnv
+                 "${pkgs.python3Packages.pycairo}/${pkgs.python3.sitePackages}"
+               ];
+          DJANGO_SETTINGS_MODULE = "graphite.settings";
+          GRAPHITE_SETTINGS_MODULE = "graphite_local_settings";
+          GRAPHITE_CONF_DIR = configDir;
+          GRAPHITE_STORAGE_DIR = dataDir;
+          LD_LIBRARY_PATH = "${pkgs.cairo.out}/lib";
+        };
+        serviceConfig = {
+          ExecStart = ''
+            ${pkgs.python3Packages.waitress-django}/bin/waitress-serve-django \
+              --host=${cfg.web.listenAddress} --port=${toString cfg.web.port}
+          '';
+          User = "graphite";
+          Group = "graphite";
+          PermissionsStartOnly = true;
+        };
+        preStart = ''
+          if ! test -e ${dataDir}/db-created; then
+            mkdir -p ${dataDir}/{whisper/,log/webapp/}
+            chmod 0700 ${dataDir}/{whisper/,log/webapp/}
+
+            ${pkgs.python3Packages.django}/bin/django-admin.py migrate --noinput
+
+            chown -R graphite:graphite ${dataDir}
+
+            touch ${dataDir}/db-created
+          fi
+
+          # Only collect static files when graphite_web changes.
+          if ! [ "${dataDir}/current_graphite_web" -ef "${pkgs.python3Packages.graphite-web}" ]; then
+            mkdir -p ${staticDir}
+            ${pkgs.python3Packages.django}/bin/django-admin.py collectstatic  --noinput --clear
+            chown -R graphite:graphite ${staticDir}
+            ln -sfT "${pkgs.python3Packages.graphite-web}" "${dataDir}/current_graphite_web"
+          fi
+        '';
+      };
+
+      environment.systemPackages = [ pkgs.python3Packages.graphite-web ];
+    }))
+
+    (mkIf cfg.seyren.enable {
+      systemd.services.seyren = {
+        description = "Graphite Alerting Dashboard";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" "mongodb.service" ];
+        environment = seyrenConfig;
+        serviceConfig = {
+          ExecStart = "${pkgs.seyren}/bin/seyren -httpPort ${toString cfg.seyren.port}";
+          WorkingDirectory = dataDir;
+          User = "graphite";
+          Group = "graphite";
+        };
+        preStart = ''
+          if ! test -e ${dataDir}/db-created; then
+            mkdir -p ${dataDir}
+            chown graphite:graphite ${dataDir}
+          fi
+        '';
+      };
+
+      services.mongodb.enable = mkDefault true;
+    })
+
+    (mkIf (
+      cfg.carbon.enableCache || cfg.carbon.enableAggregator || cfg.carbon.enableRelay ||
+      cfg.web.enable || cfg.seyren.enable
+     ) {
+      users.users.graphite = {
+        uid = config.ids.uids.graphite;
+        group = "graphite";
+        description = "Graphite daemon user";
+        home = dataDir;
+      };
+      users.groups.graphite.gid = config.ids.gids.graphite;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/hdaps.nix b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix
new file mode 100644
index 000000000000..59b8b9b3c054
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/hdaps.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hdapsd;
+  hdapsd = [ pkgs.hdapsd ];
+in
+{
+  options = {
+    services.hdapsd.enable = mkEnableOption
+      (lib.mdDoc ''
+        Hard Drive Active Protection System Daemon,
+        devices are detected and managed automatically by udev and systemd
+      '');
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "hdapsd" ];
+    services.udev.packages = hdapsd;
+    systemd.packages = hdapsd;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/heapster.nix b/nixpkgs/nixos/modules/services/monitoring/heapster.nix
new file mode 100644
index 000000000000..fc63276b62f7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/heapster.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.heapster;
+in {
+  options.services.heapster = {
+    enable = mkEnableOption (lib.mdDoc "Heapster monitoring");
+
+    source = mkOption {
+      description = lib.mdDoc "Heapster metric source";
+      example = "kubernetes:https://kubernetes.default";
+      type = types.str;
+    };
+
+    sink = mkOption {
+      description = lib.mdDoc "Heapster metic sink";
+      example = "influxdb:http://localhost:8086";
+      type = types.str;
+    };
+
+    extraOpts = mkOption {
+      description = lib.mdDoc "Heapster extra options";
+      default = "";
+      type = types.separatedString " ";
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "Package to use by heapster";
+      default = pkgs.heapster;
+      defaultText = literalExpression "pkgs.heapster";
+      type = types.package;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.heapster = {
+      wantedBy = ["multi-user.target"];
+      after = ["cadvisor.service" "kube-apiserver.service"];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/heapster --source=${cfg.source} --sink=${cfg.sink} ${cfg.extraOpts}";
+        User = "heapster";
+      };
+    };
+
+    users.users.heapster = {
+      isSystemUser = true;
+      group = "heapster";
+      description = "Heapster user";
+    };
+    users.groups.heapster = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/incron.nix b/nixpkgs/nixos/modules/services/monitoring/incron.nix
new file mode 100644
index 000000000000..3766f1fa238d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/incron.nix
@@ -0,0 +1,103 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.incron;
+
+in
+
+{
+  options = {
+
+    services.incron = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the incron daemon.
+
+          Note that commands run under incrontab only support common Nix profiles for the {env}`PATH` provided variable.
+        '';
+      };
+
+      allow = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        description = lib.mdDoc ''
+          Users allowed to use incrontab.
+
+          If empty then no user will be allowed to have their own incrontab.
+          If `null` then will defer to {option}`deny`.
+          If both {option}`allow` and {option}`deny` are null
+          then all users will be allowed to have their own incrontab.
+        '';
+      };
+
+      deny = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        description = lib.mdDoc "Users forbidden from using incrontab.";
+      };
+
+      systab = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "The system incrontab contents.";
+        example = ''
+          /var/mail IN_CLOSE_WRITE abc $@/$#
+          /tmp IN_ALL_EVENTS efg $@/$# $&
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.rsync ]";
+        description = lib.mdDoc "Extra packages available to the system incrontab.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    warnings = optional (cfg.allow != null && cfg.deny != null)
+      "If `services.incron.allow` is set then `services.incron.deny` will be ignored.";
+
+    environment.systemPackages = [ pkgs.incron ];
+
+    security.wrappers.incrontab =
+    { setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${pkgs.incron}/bin/incrontab";
+    };
+
+    # incron won't read symlinks
+    environment.etc."incron.d/system" = {
+      mode = "0444";
+      text = cfg.systab;
+    };
+    environment.etc."incron.allow" = mkIf (cfg.allow != null) {
+      text = concatStringsSep "\n" cfg.allow;
+    };
+    environment.etc."incron.deny" = mkIf (cfg.deny != null) {
+      text = concatStringsSep "\n" cfg.deny;
+    };
+
+    systemd.services.incron = {
+      description = "File System Events Scheduler";
+      wantedBy = [ "multi-user.target" ];
+      path = cfg.extraPackages;
+      serviceConfig.PIDFile = "/run/incrond.pid";
+      serviceConfig.ExecStartPre = "${pkgs.coreutils}/bin/mkdir -m 710 -p /var/spool/incron";
+      serviceConfig.ExecStart = "${pkgs.incron}/bin/incrond --foreground";
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix
new file mode 100644
index 000000000000..727b694047b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/kapacitor.nix
@@ -0,0 +1,188 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kapacitor;
+
+  kapacitorConf = pkgs.writeTextFile {
+    name = "kapacitord.conf";
+    text = ''
+      hostname="${config.networking.hostName}"
+      data_dir="${cfg.dataDir}"
+
+      [http]
+        bind-address = "${cfg.bind}:${toString cfg.port}"
+        log-enabled = false
+        auth-enabled = false
+
+      [task]
+        dir = "${cfg.dataDir}/tasks"
+        snapshot-interval = "${cfg.taskSnapshotInterval}"
+
+      [replay]
+        dir = "${cfg.dataDir}/replay"
+
+      [storage]
+        boltdb = "${cfg.dataDir}/kapacitor.db"
+
+      ${optionalString (cfg.loadDirectory != null) ''
+        [load]
+          enabled = true
+          dir = "${cfg.loadDirectory}"
+      ''}
+
+      ${optionalString (cfg.defaultDatabase.enable) ''
+        [[influxdb]]
+          name = "default"
+          enabled = true
+          default = true
+          urls = [ "${cfg.defaultDatabase.url}" ]
+          username = "${cfg.defaultDatabase.username}"
+          password = "${cfg.defaultDatabase.password}"
+      ''}
+
+      ${optionalString (cfg.alerta.enable) ''
+        [alerta]
+          enabled = true
+          url = "${cfg.alerta.url}"
+          token = "${cfg.alerta.token}"
+          environment = "${cfg.alerta.environment}"
+          origin = "${cfg.alerta.origin}"
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+  };
+in
+{
+  options.services.kapacitor = {
+    enable = mkEnableOption (lib.mdDoc "kapacitor");
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/kapacitor";
+      description = lib.mdDoc "Location where Kapacitor stores its state";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 9092;
+      description = lib.mdDoc "Port of Kapacitor";
+    };
+
+    bind = mkOption {
+      type = types.str;
+      default = "";
+      example = "0.0.0.0";
+      description = lib.mdDoc "Address to bind to. The default is to bind to all addresses";
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc "These lines go into kapacitord.conf verbatim.";
+      default = "";
+      type = types.lines;
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = lib.mdDoc "User account under which Kapacitor runs";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "kapacitor";
+      description = lib.mdDoc "Group under which Kapacitor runs";
+    };
+
+    taskSnapshotInterval = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Specifies how often to snapshot the task state  (in InfluxDB time units)";
+      default = "1m0s";
+    };
+
+    loadDirectory = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Directory where to load services from, such as tasks, templates and handlers (or null to disable service loading on startup)";
+      default = null;
+    };
+
+    defaultDatabase = {
+      enable = mkEnableOption (lib.mdDoc "kapacitor.defaultDatabase");
+
+      url = mkOption {
+        description = lib.mdDoc "The URL to an InfluxDB server that serves as the default database";
+        example = "http://localhost:8086";
+        type = types.str;
+      };
+
+      username = mkOption {
+        description = lib.mdDoc "The username to connect to the remote InfluxDB server";
+        type = types.str;
+      };
+
+      password = mkOption {
+        description = lib.mdDoc "The password to connect to the remote InfluxDB server";
+        type = types.str;
+      };
+    };
+
+    alerta = {
+      enable = mkEnableOption (lib.mdDoc "kapacitor alerta integration");
+
+      url = mkOption {
+        description = lib.mdDoc "The URL to the Alerta REST API";
+        default = "http://localhost:5000";
+        type = types.str;
+      };
+
+      token = mkOption {
+        description = lib.mdDoc "Default Alerta authentication token";
+        type = types.str;
+        default = "";
+      };
+
+      environment = mkOption {
+        description = lib.mdDoc "Default Alerta environment";
+        type = types.str;
+        default = "Production";
+      };
+
+      origin = mkOption {
+        description = lib.mdDoc "Default origin of alert";
+        type = types.str;
+        default = "kapacitor";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.kapacitor ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.kapacitor = {
+      description = "Kapacitor Real-Time Stream Processing Engine";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.kapacitor}/bin/kapacitord -config ${kapacitorConf}";
+        User = "kapacitor";
+        Group = "kapacitor";
+      };
+    };
+
+    users.users.kapacitor = {
+      uid = config.ids.uids.kapacitor;
+      description = "Kapacitor user";
+      home = cfg.dataDir;
+    };
+
+    users.groups.kapacitor = {
+      gid = config.ids.gids.kapacitor;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/karma.nix b/nixpkgs/nixos/modules/services/monitoring/karma.nix
new file mode 100644
index 000000000000..85dbc81f443f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/karma.nix
@@ -0,0 +1,128 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.karma;
+  yaml = pkgs.formats.yaml { };
+in
+{
+  options.services.karma = {
+    enable = mkEnableOption (mdDoc "the Karma dashboard service");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.karma;
+      defaultText = literalExpression "pkgs.karma";
+      description = mdDoc ''
+        The Karma package that should be used.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      default = yaml.generate "karma.yaml" cfg.settings;
+      defaultText = "A configuration file generated from the provided nix attributes settings option.";
+      description = mdDoc ''
+        A YAML config file which can be used to configure karma instead of the nix-generated file.
+      '';
+      example = "/etc/karma/karma.conf";
+    };
+
+    environment = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      description = mdDoc ''
+        Additional environment variables to provide to karma.
+      '';
+      example = {
+        ALERTMANAGER_URI = "https://alertmanager.example.com";
+        ALERTMANAGER_NAME= "single";
+      };
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to open ports in the firewall needed for karma to function.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = mdDoc ''
+        Extra command line options.
+      '';
+      example = [
+        "--alertmanager.timeout 10s"
+      ];
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = yaml.type;
+
+        options.listen = {
+          address = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            description = mdDoc ''
+              Hostname or IP to listen on.
+            '';
+            example = "[::]";
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 8080;
+            description = mdDoc ''
+              HTTP port to listen on.
+            '';
+            example = 8182;
+          };
+        };
+      };
+      default = {
+        listen = {
+          address = "127.0.0.1";
+        };
+      };
+      description = mdDoc ''
+        Karma dashboard configuration as nix attributes.
+
+        Reference: <https://github.com/prymitive/karma/blob/main/docs/CONFIGURATION.md>
+      '';
+      example = {
+        listen = {
+          address = "192.168.1.4";
+          port = "8000";
+          prefix = "/dashboard";
+        };
+        alertmanager = {
+          interval = "15s";
+          servers = [
+            {
+              name = "prod";
+              uri = "http://alertmanager.example.com";
+            }
+          ];
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.karma = {
+      description = "Alert dashboard for Prometheus Alertmanager";
+      wantedBy = [ "multi-user.target" ];
+      environment = cfg.environment;
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        Restart = "on-failure";
+        ExecStart = "${pkgs.karma}/bin/karma --config.file ${cfg.configFile} ${concatStringsSep " " cfg.extraOptions}";
+      };
+    };
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.listen.port ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/kthxbye.nix b/nixpkgs/nixos/modules/services/monitoring/kthxbye.nix
new file mode 100644
index 000000000000..3f988dcb722f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/kthxbye.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+with lib;
+
+let
+  cfg = config.services.kthxbye;
+in
+
+{
+  options.services.kthxbye = {
+    enable = mkEnableOption (mdDoc "kthxbye alert acknowledgement management daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.kthxbye;
+      defaultText = literalExpression "pkgs.kthxbye";
+      description = mdDoc ''
+        The kthxbye package that should be used.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to open ports in the firewall needed for the daemon to function.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = mdDoc ''
+        Extra command line options.
+
+        Documentation can be found [here](https://github.com/prymitive/kthxbye/blob/main/README.md).
+      '';
+      example = literalExpression ''
+        [
+          "-extend-with-prefix 'ACK!'"
+        ];
+      '';
+    };
+
+    alertmanager = {
+      timeout = mkOption {
+        type = types.str;
+        default = "1m0s";
+        description = mdDoc ''
+          Alertmanager request timeout duration in the [time.Duration](https://pkg.go.dev/time#ParseDuration) format.
+        '';
+        example = "30s";
+      };
+      uri = mkOption {
+        type = types.str;
+        default = "http://localhost:9093";
+        description = mdDoc ''
+          Alertmanager URI to use.
+        '';
+        example = "https://alertmanager.example.com";
+      };
+    };
+
+    extendBy = mkOption {
+      type = types.str;
+      default = "15m0s";
+      description = mdDoc ''
+        Extend silences by adding DURATION seconds.
+
+        DURATION should be provided in the [time.Duration](https://pkg.go.dev/time#ParseDuration) format.
+      '';
+      example = "6h0m0s";
+    };
+
+    extendIfExpiringIn = mkOption {
+      type = types.str;
+      default = "5m0s";
+      description = mdDoc ''
+        Extend silences that are about to expire in the next DURATION seconds.
+
+        DURATION should be provided in the [time.Duration](https://pkg.go.dev/time#ParseDuration) format.
+      '';
+      example = "1m0s";
+    };
+
+    extendWithPrefix = mkOption {
+      type = types.str;
+      default = "ACK!";
+      description = mdDoc ''
+        Extend silences with comment starting with PREFIX string.
+      '';
+      example = "!perma-silence";
+    };
+
+    interval = mkOption {
+      type = types.str;
+      default = "45s";
+      description = mdDoc ''
+        Silence check interval duration in the [time.Duration](https://pkg.go.dev/time#ParseDuration) format.
+      '';
+      example = "30s";
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = mdDoc ''
+        The address to listen on for HTTP requests.
+      '';
+      example = "127.0.0.1";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = mdDoc ''
+        The port to listen on for HTTP requests.
+      '';
+    };
+
+    logJSON = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Format logged messages as JSON.
+      '';
+    };
+
+    maxDuration = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = mdDoc ''
+        Maximum duration of a silence, it won't be extended anymore after reaching it.
+
+        Duration should be provided in the [time.Duration](https://pkg.go.dev/time#ParseDuration) format.
+      '';
+      example = "30d";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.kthxbye = {
+      description = "kthxbye Alertmanager ack management daemon";
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        ${cfg.package}/bin/kthxbye \
+          -alertmanager.timeout ${cfg.alertmanager.timeout} \
+          -alertmanager.uri ${cfg.alertmanager.uri} \
+          -extend-by ${cfg.extendBy} \
+          -extend-if-expiring-in ${cfg.extendIfExpiringIn} \
+          -extend-with-prefix ${cfg.extendWithPrefix} \
+          -interval ${cfg.interval} \
+          -listen ${cfg.listenAddress}:${toString cfg.port} \
+          ${optionalString cfg.logJSON "-log-json"} \
+          ${optionalString (cfg.maxDuration != null) "-max-duration ${cfg.maxDuration}"} \
+          ${concatStringsSep " " cfg.extraOptions}
+      '';
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        Restart = "on-failure";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/librenms.nix b/nixpkgs/nixos/modules/services/monitoring/librenms.nix
new file mode 100644
index 000000000000..08a46754e0e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/librenms.nix
@@ -0,0 +1,624 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.librenms;
+  settingsFormat = pkgs.formats.json {};
+  configJson = settingsFormat.generate "librenms-config.json" cfg.settings;
+
+  package = pkgs.librenms.override {
+    logDir = cfg.logDir;
+    dataDir = cfg.dataDir;
+  };
+
+  phpOptions = ''
+    log_errors = on
+    post_max_size = 100M
+    upload_max_filesize = 100M
+    date.timezone = "${config.time.timeZone}"
+  '';
+  phpIni = pkgs.runCommand "php.ini" {
+    inherit (package) phpPackage;
+    inherit phpOptions;
+    preferLocalBuild = true;
+    passAsFile = [ "phpOptions" ];
+  } ''
+    cat $phpPackage/etc/php.ini $phpOptionsPath > $out
+  '';
+
+  artisanWrapper = pkgs.writeShellScriptBin "librenms-artisan" ''
+    cd ${package}
+    sudo=exec
+    if [[ "$USER" != ${cfg.user} ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${cfg.user}'
+    fi
+    $sudo ${package}/artisan $*
+  '';
+
+  lnmsWrapper = pkgs.writeShellScriptBin "lnms" ''
+    cd ${package}
+    exec ${package}/lnms $*
+  '';
+
+  configFile = pkgs.writeText "config.php" ''
+    <?php
+    $new_config = json_decode(file_get_contents("${cfg.dataDir}/config.json"), true);
+    $config = ($config == null) ? $new_config : array_merge($config, $new_config);
+
+    ${lib.optionalString (cfg.extraConfig != null) cfg.extraConfig}
+  '';
+
+in {
+  options.services.librenms = with lib; {
+    enable = mkEnableOption "LibreNMS network monitoring system";
+
+    user = mkOption {
+      type = types.str;
+      default = "librenms";
+      description = ''
+        Name of the LibreNMS user.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "librenms";
+      description = ''
+        Name of the LibreNMS group.
+      '';
+    };
+
+    hostname = mkOption {
+      type = types.str;
+      default = config.networking.fqdnOrHostName;
+      defaultText = literalExpression "config.networking.fqdnOrHostName";
+      description = ''
+        The hostname to serve LibreNMS on.
+      '';
+    };
+
+    pollerThreads = mkOption {
+      type = types.int;
+      default = 16;
+      description = ''
+        Amount of threads of the cron-poller.
+      '';
+    };
+
+    enableOneMinutePolling = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enables the [1-Minute Polling](https://docs.librenms.org/Support/1-Minute-Polling/).
+        Changing this option will automatically convert your existing rrd files.
+      '';
+    };
+
+    useDistributedPollers = mkOption {
+      type = types.bool;
+      default = false;
+      description = ''
+        Enables (distributed pollers)[https://docs.librenms.org/Extensions/Distributed-Poller/]
+        for this LibreNMS instance. This will enable a local `rrdcached` and `memcached` server.
+
+        To use this feature, make sure to configure your firewall that the distributed pollers
+        can reach the local `mysql`, `rrdcached` and `memcached` ports.
+      '';
+    };
+
+    distributedPoller = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Configure this LibreNMS instance as a (distributed poller)[https://docs.librenms.org/Extensions/Distributed-Poller/].
+          This will disable all web features and just configure the poller features.
+          Use the `mysql` database of your main LibreNMS instance in the database settings.
+        '';
+      };
+
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = ''
+          Custom name of this poller.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "0";
+        example = "1,2";
+        description = ''
+          Group(s) of this poller.
+        '';
+      };
+
+      distributedBilling = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Enable distributed billing on this poller.
+        '';
+      };
+
+      memcachedHost = mkOption {
+        type = types.str;
+        description = ''
+          Hostname or IP of the `memcached` server.
+        '';
+      };
+
+      memcachedPort = mkOption {
+        type = types.port;
+        default = 11211;
+        description = ''
+          Port of the `memcached` server.
+        '';
+      };
+
+      rrdcachedHost = mkOption {
+        type = types.str;
+        description = ''
+          Hostname or IP of the `rrdcached` server.
+        '';
+      };
+
+      rrdcachedPort = mkOption {
+        type = types.port;
+        default = 42217;
+        description = ''
+          Port of the `memcached` server.
+        '';
+      };
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = ''
+        Options for the LibreNMS PHP pool. See the documentation on `php-fpm.conf`
+        for details on configuration directives.
+      '';
+    };
+
+    nginx = mkOption {
+      type = types.submodule (
+        recursiveUpdate
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
+      );
+      default = { };
+      example = literalExpression ''
+        {
+          serverAliases = [
+            "librenms.''${config.networking.domain}"
+          ];
+          # To enable encryption and let let's encrypt take care of certificate
+          forceSSL = true;
+          enableACME = true;
+          # To set the LibreNMS virtualHost as the default virtualHost;
+          default = true;
+        }
+      '';
+      description = ''
+        With this option, you can customize the nginx virtualHost settings.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/librenms";
+      description = ''
+        Path of the LibreNMS state directory.
+      '';
+    };
+
+    logDir = mkOption {
+      type = types.path;
+      default = "/var/log/librenms";
+      description = ''
+        Path of the LibreNMS logging directory.
+      '';
+    };
+
+    database = {
+      createLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to create a local database automatically.
+        '';
+      };
+
+      host = mkOption {
+        default = "localhost";
+        description = ''
+          Hostname or IP of the MySQL/MariaDB server.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = ''
+          Port of the MySQL/MariaDB server.
+        '';
+      };
+
+      database = mkOption {
+        type = types.str;
+        default = "librenms";
+        description = ''
+          Name of the database on the MySQL/MariaDB server.
+        '';
+      };
+
+      username = mkOption {
+        type = types.str;
+        default = "librenms";
+        description = ''
+          Name of the user on the MySQL/MariaDB server.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.path;
+        example = "/run/secrets/mysql.pass";
+        description = ''
+          A file containing the password for the user of the MySQL/MariaDB server.
+          Must be readable for the LibreNMS user.
+        '';
+      };
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        File containing env-vars to be substituted into the final config. Useful for secrets.
+        Does not apply to settings defined in `extraConfig`.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+        options = {};
+      };
+      description = ''
+        Attrset of the LibreNMS configuration.
+        See https://docs.librenms.org/Support/Configuration/ for reference.
+        All possible options are listed [here](https://github.com/librenms/librenms/blob/master/misc/config_definitions.json).
+        See https://docs.librenms.org/Extensions/Authentication/ for setting other authentication methods.
+      '';
+      default = { };
+      example = {
+        base_url = "/librenms/";
+        top_devices = true;
+        top_ports = false;
+      };
+    };
+
+    extraConfig = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = ''
+        Additional config for LibreNMS that will be appended to the `config.php`. See
+        https://github.com/librenms/librenms/blob/master/misc/config_definitions.json
+        for possible options. Useful if you want to use PHP-Functions in your config.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = config.time.timeZone != null;
+        message = "You must set `time.timeZone` to use the LibreNMS module.";
+      }
+      {
+        assertion = cfg.database.createLocally -> cfg.database.host == "localhost";
+        message = "The database host must be \"localhost\" if services.librenms.database.createLocally is set to true.";
+      }
+      {
+        assertion = !(cfg.useDistributedPollers && cfg.distributedPoller.enable);
+        message = "The LibreNMS instance can't be a distributed poller and a full instance at the same time.";
+      }
+    ];
+
+    users.users.${cfg.user} = {
+      group = "${cfg.group}";
+      isSystemUser = true;
+    };
+
+    users.groups.${cfg.group} = { };
+
+    services.librenms.settings = {
+      # basic configs
+      "user" = cfg.user;
+      "own_hostname" = cfg.hostname;
+      "base_url" = lib.mkDefault "/";
+      "auth_mechanism" = lib.mkDefault "mysql";
+
+      # disable auto update function (won't work with NixOS)
+      "update" = false;
+
+      # enable fast ping by default
+      "ping_rrd_step" = 60;
+
+      # one minute polling
+      "rrd.step" = if cfg.enableOneMinutePolling then 60 else 300;
+      "rrd.heartbeat" = if cfg.enableOneMinutePolling then 120 else 600;
+    } // (lib.optionalAttrs cfg.distributedPoller.enable {
+      "distributed_poller" = true;
+      "distributed_poller_name" = lib.mkIf (cfg.distributedPoller.name != null) cfg.distributedPoller.name;
+      "distributed_poller_group" = cfg.distributedPoller.group;
+      "distributed_billing" = cfg.distributedPoller.distributedBilling;
+      "distributed_poller_memcached_host" = cfg.distributedPoller.memcachedHost;
+      "distributed_poller_memcached_port" = cfg.distributedPoller.memcachedPort;
+      "rrdcached" = "${cfg.distributedPoller.rrdcachedHost}:${toString cfg.distributedPoller.rrdcachedPort}";
+    }) // (lib.optionalAttrs cfg.useDistributedPollers {
+      "distributed_poller" = true;
+      # still enable a local poller with distributed polling
+      "distributed_poller_group" = lib.mkDefault "0";
+      "distributed_billing" = lib.mkDefault true;
+      "distributed_poller_memcached_host" = "localhost";
+      "distributed_poller_memcached_port" = 11211;
+      "rrdcached" = "localhost:42217";
+    });
+
+    services.memcached = lib.mkIf cfg.useDistributedPollers {
+      enable = true;
+      listen = "0.0.0.0";
+    };
+
+    systemd.services.rrdcached = lib.mkIf cfg.useDistributedPollers {
+      description = "rrdcached";
+      after = [ "librenms-setup.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        User = cfg.user;
+        Group = cfg.group;
+        LimitNOFILE = 16384;
+        RuntimeDirectory = "rrdcached";
+        PidFile = "/run/rrdcached/rrdcached.pid";
+        # rrdcached params from https://docs.librenms.org/Extensions/Distributed-Poller/#config-sample
+        ExecStart = "${pkgs.rrdtool}/bin/rrdcached -l 0:42217 -R -j ${cfg.dataDir}/rrdcached-journal/ -F -b ${cfg.dataDir}/rrd -B -w 1800 -z 900 -p /run/rrdcached/rrdcached.pid";
+      };
+    };
+
+    services.mysql = lib.mkIf cfg.database.createLocally {
+      enable = true;
+      package = lib.mkDefault pkgs.mariadb;
+      settings.mysqld = {
+        innodb_file_per_table = 1;
+        lower_case_table_names = 0;
+      } // (lib.optionalAttrs cfg.useDistributedPollers {
+        bind-address = "0.0.0.0";
+      });
+      ensureDatabases = [ cfg.database.database ];
+      ensureUsers = [
+        {
+          name = cfg.database.username;
+          ensurePermissions = {
+            "${cfg.database.database}.*" = "ALL PRIVILEGES";
+          };
+        }
+      ];
+      initialScript = lib.mkIf cfg.useDistributedPollers (pkgs.writeText "mysql-librenms-init" ''
+        CREATE USER IF NOT EXISTS '${cfg.database.username}'@'%';
+        GRANT ALL PRIVILEGES ON ${cfg.database.database}.* TO '${cfg.database.username}'@'%';
+      '');
+    };
+
+    services.nginx = lib.mkIf (!cfg.distributedPoller.enable) {
+      enable = true;
+      virtualHosts."${cfg.hostname}" = lib.mkMerge [
+        cfg.nginx
+        {
+          root = lib.mkForce "${package}/html";
+          locations."/" = {
+            index = "index.php";
+            tryFiles = "$uri $uri/ /index.php?$query_string";
+          };
+          locations."~ .php$".extraConfig = ''
+            fastcgi_pass unix:${config.services.phpfpm.pools."librenms".socket};
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+          '';
+        }
+      ];
+    };
+
+    services.phpfpm.pools.librenms = lib.mkIf (!cfg.distributedPoller.enable) {
+      user = cfg.user;
+      group = cfg.group;
+      inherit (package) phpPackage;
+      inherit phpOptions;
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = config.services.nginx.user;
+        "listen.group" = config.services.nginx.group;
+      } // cfg.poolConfig;
+    };
+
+    systemd.services.librenms-scheduler = {
+      description = "LibreNMS Scheduler";
+      path = [ pkgs.unixtools.whereis ];
+      serviceConfig = {
+        Type = "oneshot";
+        WorkingDirectory = package;
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${artisanWrapper}/bin/librenms-artisan schedule:run";
+      };
+    };
+
+    systemd.timers.librenms-scheduler = {
+      description = "LibreNMS Scheduler";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "minutely";
+        AccuracySec = "1second";
+      };
+    };
+
+    systemd.services.librenms-setup = {
+      description = "Preparation tasks for LibreNMS";
+      before = [ "phpfpm-librenms.service" ];
+      after = [ "systemd-tmpfiles-setup.service" ]
+        ++ (lib.optional (cfg.database.host == "localhost") "mysql.service");
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ package configFile ];
+      path = [ pkgs.mariadb pkgs.unixtools.whereis pkgs.gnused ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStartPre = lib.mkIf cfg.database.createLocally [ "!${pkgs.writeShellScript "librenms-db-init" ''
+          DB_PASSWORD=$(cat ${cfg.database.passwordFile} | tr -d '\n')
+          echo "ALTER USER '${cfg.database.username}'@'localhost' IDENTIFIED BY '$DB_PASSWORD';" | ${pkgs.mariadb}/bin/mysql
+          ${lib.optionalString cfg.useDistributedPollers ''
+            echo "ALTER USER '${cfg.database.username}'@'%' IDENTIFIED BY '$DB_PASSWORD';" | ${pkgs.mariadb}/bin/mysql
+          ''}
+        ''}"];
+      };
+      script = ''
+        set -euo pipefail
+
+        # config setup
+        ln -sf ${configFile} ${cfg.dataDir}/config.php
+        ${pkgs.envsubst}/bin/envsubst -i ${configJson} -o ${cfg.dataDir}/config.json
+        export PHPRC=${phpIni}
+
+        if [[ ! -s ${cfg.dataDir}/.env ]]; then
+          # init .env file
+          echo "APP_KEY=" > ${cfg.dataDir}/.env
+          ${artisanWrapper}/bin/librenms-artisan key:generate --ansi
+          ${artisanWrapper}/bin/librenms-artisan webpush:vapid
+          echo "" >> ${cfg.dataDir}/.env
+          echo -n "NODE_ID=" >> ${cfg.dataDir}/.env
+          ${package.phpPackage}/bin/php -r "echo uniqid();" >> ${cfg.dataDir}/.env
+          echo "" >> ${cfg.dataDir}/.env
+        else
+          # .env file already exists --> only update database and cache config
+          ${pkgs.gnused}/bin/sed -i /^DB_/d ${cfg.dataDir}/.env
+          ${pkgs.gnused}/bin/sed -i /^CACHE_DRIVER/d ${cfg.dataDir}/.env
+        fi
+        ${lib.optionalString (cfg.useDistributedPollers || cfg.distributedPoller.enable) ''
+          echo "CACHE_DRIVER=memcached" >> ${cfg.dataDir}/.env
+        ''}
+        echo "DB_HOST=${cfg.database.host}" >> ${cfg.dataDir}/.env
+        echo "DB_PORT=${toString cfg.database.port}" >> ${cfg.dataDir}/.env
+        echo "DB_DATABASE=${cfg.database.database}" >> ${cfg.dataDir}/.env
+        echo "DB_USERNAME=${cfg.database.username}" >> ${cfg.dataDir}/.env
+        echo -n "DB_PASSWORD=" >> ${cfg.dataDir}/.env
+        cat ${cfg.database.passwordFile} >> ${cfg.dataDir}/.env
+
+        # clear cache after update
+        OLD_VERSION=$(cat ${cfg.dataDir}/version)
+        if [[ $OLD_VERSION != "${package.version}" ]]; then
+          rm -r ${cfg.dataDir}/cache/*
+          echo "${package.version}" > ${cfg.dataDir}/version
+        fi
+
+        # convert rrd files when the oneMinutePolling option is changed
+        OLD_ENABLED=$(cat ${cfg.dataDir}/one_minute_enabled)
+        if [[ $OLD_ENABLED != "${lib.boolToString cfg.enableOneMinutePolling}" ]]; then
+          ${package}/scripts/rrdstep.php -h all
+          echo "${lib.boolToString cfg.enableOneMinutePolling}" > ${cfg.dataDir}/one_minute_enabled
+        fi
+
+        # migrate db
+        ${artisanWrapper}/bin/librenms-artisan migrate --force --no-interaction
+      '';
+    };
+
+    programs.mtr.enable = true;
+
+    services.logrotate = {
+      enable = true;
+      settings."${cfg.logDir}/librenms.log" = {
+        su = "${cfg.user} ${cfg.group}";
+        create = "0640 ${cfg.user} ${cfg.group}";
+        rotate = 6;
+        frequency = "weekly";
+        compress = true;
+        delaycompress = true;
+        missingok = true;
+        notifempty = true;
+      };
+    };
+
+    services.cron = {
+      enable = true;
+      systemCronJobs = let
+        env = "PHPRC=${phpIni}";
+      in [
+        # based on crontab provided by LibreNMS
+        "33 */6 * * * ${cfg.user} ${env} ${package}/cronic ${package}/discovery-wrapper.py 1"
+        "*/5 * * * * ${cfg.user} ${env} ${package}/discovery.php -h new >> /dev/null 2>&1"
+
+        "${if cfg.enableOneMinutePolling then "*" else "*/5"} * * * * ${cfg.user} ${env} ${package}/cronic ${package}/poller-wrapper.py ${toString cfg.pollerThreads}"
+        "* * * * * ${cfg.user} ${env} ${package}/alerts.php >> /dev/null 2>&1"
+
+        "*/5 * * * * ${cfg.user} ${env} ${package}/poll-billing.php >> /dev/null 2>&1"
+        "01 * * * * ${cfg.user} ${env} ${package}/billing-calculate.php >> /dev/null 2>&1"
+        "*/5 * * * * ${cfg.user} ${env} ${package}/check-services.php >> /dev/null 2>&1"
+
+        # extra: fast ping
+        "* * * * * ${cfg.user} ${env} ${package}/ping.php >> /dev/null 2>&1"
+
+        # daily.sh tasks are split to exclude update
+        "19 0 * * * ${cfg.user} ${env} ${package}/daily.sh cleanup >> /dev/null 2>&1"
+        "19 0 * * * ${cfg.user} ${env} ${package}/daily.sh notifications >> /dev/null 2>&1"
+        "19 0 * * * ${cfg.user} ${env} ${package}/daily.sh peeringdb >> /dev/null 2>&1"
+        "19 0 * * * ${cfg.user} ${env} ${package}/daily.sh mac_oui >> /dev/null 2>&1"
+      ];
+    };
+
+    security.wrappers = {
+      fping = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${pkgs.fping}/bin/fping";
+      };
+    };
+
+    environment.systemPackages = [ artisanWrapper lnmsWrapper ];
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.logDir}                               0750 ${cfg.user} ${cfg.group} - -"
+      "f ${cfg.logDir}/librenms.log                  0640 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}                              0750 ${cfg.user} ${cfg.group} - -"
+      "f ${cfg.dataDir}/.env                         0600 ${cfg.user} ${cfg.group} - -"
+      "f ${cfg.dataDir}/version                      0600 ${cfg.user} ${cfg.group} - -"
+      "f ${cfg.dataDir}/one_minute_enabled           0600 ${cfg.user} ${cfg.group} - -"
+      "f ${cfg.dataDir}/config.json                  0600 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage                      0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/app                  0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/debugbar             0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/framework            0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/framework/cache      0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/framework/sessions   0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/framework/views      0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/storage/logs                 0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/rrd                          0700 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/cache                        0700 ${cfg.user} ${cfg.group} - -"
+    ] ++ lib.optionals cfg.useDistributedPollers [
+      "d ${cfg.dataDir}/rrdcached-journal            0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+  };
+
+  meta.maintainers = lib.teams.wdz.members;
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/loki.nix b/nixpkgs/nixos/modules/services/monitoring/loki.nix
new file mode 100644
index 000000000000..f3b97e9151ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/loki.nix
@@ -0,0 +1,116 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) escapeShellArgs mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.loki;
+
+  prettyJSON = conf:
+    pkgs.runCommand "loki-config.json" { } ''
+      echo '${builtins.toJSON conf}' | ${pkgs.jq}/bin/jq 'del(._module)' > $out
+    '';
+
+in {
+  options.services.loki = {
+    enable = mkEnableOption (lib.mdDoc "loki");
+
+    user = mkOption {
+      type = types.str;
+      default = "loki";
+      description = lib.mdDoc ''
+        User under which the Loki service runs.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "grafana-loki" { };
+
+    group = mkOption {
+      type = types.str;
+      default = "loki";
+      description = lib.mdDoc ''
+        Group under which the Loki service runs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/loki";
+      description = lib.mdDoc ''
+        Specify the directory for Loki.
+      '';
+    };
+
+    configuration = mkOption {
+      type = (pkgs.formats.json {}).type;
+      default = {};
+      description = lib.mdDoc ''
+        Specify the configuration for Loki in Nix.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify a configuration file that Loki should use.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--server.http-listen-port=3101" ];
+      description = lib.mdDoc ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Loki.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = (
+        (cfg.configuration == {} -> cfg.configFile != null) &&
+        (cfg.configFile != null -> cfg.configuration == {})
+      );
+      message  = ''
+        Please specify either
+        'services.loki.configuration' or
+        'services.loki.configFile'.
+      '';
+    }];
+
+    environment.systemPackages = [ cfg.package ]; # logcli
+
+    users.groups.${cfg.group} = { };
+    users.users.${cfg.user} = {
+      description = "Loki Service User";
+      group = cfg.group;
+      home = cfg.dataDir;
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    systemd.services.loki = {
+      description = "Loki Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        conf = if cfg.configFile == null
+               then prettyJSON cfg.configuration
+               else cfg.configFile;
+      in
+      {
+        ExecStart = "${cfg.package}/bin/loki --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
+        User = cfg.user;
+        Restart = "always";
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        WorkingDirectory = cfg.dataDir;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/longview.nix b/nixpkgs/nixos/modules/services/monitoring/longview.nix
new file mode 100644
index 000000000000..5825cab0134c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/longview.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.longview;
+
+  runDir = "/run/longview";
+  configsDir = "${runDir}/longview.d";
+
+in {
+  options = {
+
+    services.longview = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, system metrics will be sent to Linode LongView.
+        '';
+      };
+
+      apiKey = mkOption {
+        type = types.str;
+        default = "";
+        example = "01234567-89AB-CDEF-0123456789ABCDEF";
+        description = lib.mdDoc ''
+          Longview API key. To get this, look in Longview settings which
+          are found at https://manager.linode.com/longview/.
+
+          Warning: this secret is stored in the world-readable Nix store!
+          Use {option}`apiKeyFile` instead.
+        '';
+      };
+
+      apiKeyFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/longview-api-key";
+        description = lib.mdDoc ''
+          A file containing the Longview API key.
+          To get this, look in Longview settings which
+          are found at https://manager.linode.com/longview/.
+
+          {option}`apiKeyFile` takes precedence over {option}`apiKey`.
+        '';
+      };
+
+      apacheStatusUrl = mkOption {
+        type = types.str;
+        default = "";
+        example = "http://127.0.0.1/server-status";
+        description = lib.mdDoc ''
+          The Apache status page URL. If provided, Longview will
+          gather statistics from this location. This requires Apache
+          mod_status to be loaded and enabled.
+        '';
+      };
+
+      nginxStatusUrl = mkOption {
+        type = types.str;
+        default = "";
+        example = "http://127.0.0.1/nginx_status";
+        description = lib.mdDoc ''
+          The Nginx status page URL. Longview will gather statistics
+          from this URL. This requires the Nginx stub_status module to
+          be enabled and configured at the given location.
+        '';
+      };
+
+      mysqlUser = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          The user for connecting to the MySQL database. If provided,
+          Longview will connect to MySQL and collect statistics about
+          queries, etc. This user does not need to have been granted
+          any extra privileges.
+        '';
+      };
+
+      mysqlPassword = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          The password corresponding to {option}`mysqlUser`.
+          Warning: this is stored in cleartext in the Nix store!
+          Use {option}`mysqlPasswordFile` instead.
+        '';
+      };
+
+      mysqlPasswordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to {option}`mysqlUser`.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.longview =
+      { description = "Longview Metrics Collection";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.Type = "forking";
+        serviceConfig.ExecStop = "-${pkgs.coreutils}/bin/kill -TERM $MAINPID";
+        serviceConfig.ExecReload = "-${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        serviceConfig.PIDFile = "${runDir}/longview.pid";
+        serviceConfig.ExecStart = "${pkgs.longview}/bin/longview";
+        preStart = ''
+          umask 077
+          mkdir -p ${configsDir}
+        '' + (optionalString (cfg.apiKeyFile != null) ''
+          cp --no-preserve=all "${cfg.apiKeyFile}" ${runDir}/longview.key
+        '') + (optionalString (cfg.apacheStatusUrl != "") ''
+          cat > ${configsDir}/Apache.conf <<EOF
+          location ${cfg.apacheStatusUrl}?auto
+          EOF
+        '') + (optionalString (cfg.mysqlUser != "" && cfg.mysqlPasswordFile != null) ''
+          cat > ${configsDir}/MySQL.conf <<EOF
+          username ${cfg.mysqlUser}
+          password `head -n1 "${cfg.mysqlPasswordFile}"`
+          EOF
+        '') + (optionalString (cfg.nginxStatusUrl != "") ''
+          cat > ${configsDir}/Nginx.conf <<EOF
+          location ${cfg.nginxStatusUrl}
+          EOF
+        '');
+      };
+
+    warnings = let warn = k: optional (cfg.${k} != "")
+                 "config.services.longview.${k} is insecure. Use ${k}File instead.";
+               in concatMap warn [ "apiKey" "mysqlPassword" ];
+
+    assertions = [
+      { assertion = cfg.apiKeyFile != null;
+        message = "Longview needs an API key configured";
+      }
+    ];
+
+    # Create API key file if not configured.
+    services.longview.apiKeyFile = mkIf (cfg.apiKey != "")
+      (mkDefault (toString (pkgs.writeTextFile {
+        name = "longview.key";
+        text = cfg.apiKey;
+      })));
+
+    # Create MySQL password file if not configured.
+    services.longview.mysqlPasswordFile = mkDefault (toString (pkgs.writeTextFile {
+      name = "mysql-password-file";
+      text = cfg.mysqlPassword;
+    }));
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/mackerel-agent.nix b/nixpkgs/nixos/modules/services/monitoring/mackerel-agent.nix
new file mode 100644
index 000000000000..62a7858500f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/mackerel-agent.nix
@@ -0,0 +1,110 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mackerel-agent;
+  settingsFmt = pkgs.formats.toml {};
+in {
+  options.services.mackerel-agent = {
+    enable = mkEnableOption (lib.mdDoc "mackerel.io agent");
+
+    # the upstream package runs as root, but doesn't seem to be strictly
+    # necessary for basic functionality
+    runAsRoot = mkEnableOption (lib.mdDoc "running as root");
+
+    autoRetirement = mkEnableOption (lib.mdDoc ''
+      retiring the host upon OS shutdown
+    '');
+
+    apiKeyFile = mkOption {
+      type = types.path;
+      example = "/run/keys/mackerel-api-key";
+      description = lib.mdDoc ''
+        Path to file containing the Mackerel API key. The file should contain a
+        single line of the following form:
+
+        `apikey = "EXAMPLE_API_KEY"`
+      '';
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Options for mackerel-agent.conf.
+
+        Documentation:
+        <https://mackerel.io/docs/entry/spec/agent>
+      '';
+
+      default = {};
+      example = {
+        verbose = false;
+        silent = false;
+      };
+
+      type = types.submodule {
+        freeformType = settingsFmt.type;
+
+        options.host_status = {
+          on_start = mkOption {
+            type = types.enum [ "working" "standby" "maintenance" "poweroff" ];
+            description = lib.mdDoc "Host status after agent startup.";
+            default = "working";
+          };
+          on_stop = mkOption {
+            type = types.enum [ "working" "standby" "maintenance" "poweroff" ];
+            description = lib.mdDoc "Host status after agent shutdown.";
+            default = "poweroff";
+          };
+        };
+
+        options.diagnostic =
+          mkEnableOption (lib.mdDoc "collecting memory usage for the agent itself");
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ mackerel-agent ];
+
+    environment.etc = {
+      "mackerel-agent/mackerel-agent.conf".source =
+        settingsFmt.generate "mackerel-agent.conf" cfg.settings;
+      "mackerel-agent/conf.d/api-key.conf".source = cfg.apiKeyFile;
+    };
+
+    services.mackerel-agent.settings = {
+      root = mkDefault "/var/lib/mackerel-agent";
+      pidfile = mkDefault "/run/mackerel-agent/mackerel-agent.pid";
+
+      # conf.d stores the symlink to cfg.apiKeyFile
+      include = mkDefault "/etc/mackerel-agent/conf.d/*.conf";
+    };
+
+    # upstream service file in https://git.io/JUt4Q
+    systemd.services.mackerel-agent = {
+      description = "mackerel.io agent";
+      after = [ "network-online.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        MACKEREL_PLUGIN_WORKDIR = mkDefault "%C/mackerel-agent";
+      };
+      serviceConfig = {
+        DynamicUser = !cfg.runAsRoot;
+        PrivateTmp = mkDefault true;
+        CacheDirectory = "mackerel-agent";
+        ConfigurationDirectory = "mackerel-agent";
+        RuntimeDirectory = "mackerel-agent";
+        StateDirectory = "mackerel-agent";
+        ExecStart = "${pkgs.mackerel-agent}/bin/mackerel-agent supervise";
+        ExecStopPost = mkIf cfg.autoRetirement "${pkg.mackerel-agent}/bin/mackerel-agent retire -force";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitNOFILE = mkDefault 65536;
+        LimitNPROC = mkDefault 65536;
+      };
+      restartTriggers = [
+        config.environment.etc."mackerel-agent/mackerel-agent.conf".source
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/metricbeat.nix b/nixpkgs/nixos/modules/services/monitoring/metricbeat.nix
new file mode 100644
index 000000000000..310c9d8ed509
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/metricbeat.nix
@@ -0,0 +1,151 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    attrValues
+    literalExpression
+    mkEnableOption
+    mkIf
+    mkOption
+    types
+    ;
+  cfg = config.services.metricbeat;
+
+  settingsFormat = pkgs.formats.yaml {};
+
+in
+{
+  options = {
+
+    services.metricbeat = {
+
+      enable = mkEnableOption (lib.mdDoc "metricbeat");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.metricbeat;
+        defaultText = literalExpression "pkgs.metricbeat";
+        example = literalExpression "pkgs.metricbeat7";
+        description = lib.mdDoc ''
+          The metricbeat package to use
+        '';
+      };
+
+      modules = mkOption {
+        description = lib.mdDoc ''
+          Metricbeat modules are responsible for reading metrics from the various sources.
+
+          This is like `services.metricbeat.settings.metricbeat.modules`,
+          but structured as an attribute set. This has the benefit that multiple
+          NixOS modules can contribute settings to a single metricbeat module.
+
+          A module can be specified multiple times by choosing a different `<name>`
+          for each, but setting [](#opt-services.metricbeat.modules._name_.module) to the same value.
+
+          See <https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html>.
+        '';
+        default = {};
+        type = types.attrsOf (types.submodule ({ name, ... }: {
+          freeformType = settingsFormat.type;
+          options = {
+            module = mkOption {
+              type = types.str;
+              default = name;
+              description = lib.mdDoc ''
+                The name of the module.
+
+                Look for the value after `module:` on the individual
+                module pages linked from <https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html>.
+              '';
+            };
+          };
+        }));
+        example = {
+          system = {
+            metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
+            enabled = true;
+            period = "10s";
+            processes = [".*"];
+            cpu.metrics = ["percentages" "normalized_percentages"];
+            core.metrics = ["percentages"];
+          };
+        };
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+          options = {
+
+            name = mkOption {
+              type = types.str;
+              default = "";
+              description = lib.mdDoc ''
+                Name of the beat. Defaults to the hostname.
+                See <https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-general-options.html#_name>.
+              '';
+            };
+
+            tags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc ''
+                Tags to place on the shipped metrics.
+                See <https://www.elastic.co/guide/en/beats/metricbeat/current/configuration-general-options.html#_tags_2>.
+              '';
+            };
+
+            metricbeat.modules = mkOption {
+              type = types.listOf settingsFormat.type;
+              default = [];
+              internal = true;
+              description = lib.mdDoc ''
+                The metric collecting modules. Use [](#opt-services.metricbeat.modules) instead.
+
+                See <https://www.elastic.co/guide/en/beats/metricbeat/current/metricbeat-modules.html>.
+              '';
+            };
+          };
+        };
+        default = {};
+        description = lib.mdDoc ''
+          Configuration for metricbeat. See <https://www.elastic.co/guide/en/beats/metricbeat/current/configuring-howto-metricbeat.html> for supported values.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        # empty modules would cause a failure at runtime
+        assertion = cfg.settings.metricbeat.modules != [];
+        message = "services.metricbeat: You must configure one or more modules.";
+      }
+    ];
+
+    services.metricbeat.settings.metricbeat.modules = attrValues cfg.modules;
+
+    systemd.services.metricbeat = {
+      description = "metricbeat metrics shipper";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/metricbeat \
+            -c ${settingsFormat.generate "metricbeat.yml" cfg.settings} \
+            --path.data $STATE_DIRECTORY \
+            --path.logs $LOGS_DIRECTORY \
+            ;
+        '';
+        Restart = "always";
+        DynamicUser = true;
+        ProtectSystem = "strict";
+        ProtectHome = "tmpfs";
+        StateDirectory = "metricbeat";
+        LogsDirectory = "metricbeat";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/mimir.nix b/nixpkgs/nixos/modules/services/monitoring/mimir.nix
new file mode 100644
index 000000000000..6ed139b22974
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/mimir.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) escapeShellArgs mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.mimir;
+
+  settingsFormat = pkgs.formats.yaml {};
+in {
+  options.services.mimir = {
+    enable = mkEnableOption (lib.mdDoc "mimir");
+
+    configuration = mkOption {
+      type = (pkgs.formats.json {}).type;
+      default = {};
+      description = lib.mdDoc ''
+        Specify the configuration for Mimir in Nix.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify a configuration file that Mimir should use.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.mimir;
+      defaultText = lib.literalExpression "pkgs.mimir";
+      type = types.package;
+      description = lib.mdDoc ''Mimir package to use.'';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--config.expand-env=true" ];
+      description = lib.mdDoc ''
+        Specify a list of additional command line flags,
+        which get escaped and are then passed to Mimir.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # for mimirtool
+    environment.systemPackages = [ cfg.package ];
+
+    assertions = [{
+      assertion = (
+        (cfg.configuration == {} -> cfg.configFile != null) &&
+        (cfg.configFile != null -> cfg.configuration == {})
+      );
+      message  = ''
+        Please specify either
+        'services.mimir.configuration' or
+        'services.mimir.configFile'.
+      '';
+    }];
+
+    systemd.services.mimir = {
+      description = "mimir Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        conf = if cfg.configFile == null
+               then settingsFormat.generate "config.yaml" cfg.configuration
+               else cfg.configFile;
+      in
+      {
+        ExecStart = "${cfg.package}/bin/mimir --config.file=${conf} ${escapeShellArgs cfg.extraFlags}";
+        DynamicUser = true;
+        Restart = "always";
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        WorkingDirectory = "/var/lib/mimir";
+        StateDirectory = "mimir";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/monit.nix b/nixpkgs/nixos/modules/services/monitoring/monit.nix
new file mode 100644
index 000000000000..a22bbc9046ba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/monit.nix
@@ -0,0 +1,48 @@
+{config, pkgs, lib, ...}:
+
+with lib;
+
+let
+  cfg = config.services.monit;
+in
+
+{
+  options.services.monit = {
+
+    enable = mkEnableOption (lib.mdDoc "Monit");
+
+    config = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "monitrc content";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.monit ];
+
+    environment.etc.monitrc = {
+      text = cfg.config;
+      mode = "0400";
+    };
+
+    systemd.services.monit = {
+      description = "Pro-active monitoring utility for unix systems";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.monit}/bin/monit -I -c /etc/monitrc";
+        ExecStop = "${pkgs.monit}/bin/monit -c /etc/monitrc quit";
+        ExecReload = "${pkgs.monit}/bin/monit -c /etc/monitrc reload";
+        KillMode = "process";
+        Restart = "always";
+      };
+      restartTriggers = [ config.environment.etc.monitrc.source ];
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ ryantm ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/munin.nix b/nixpkgs/nixos/modules/services/monitoring/munin.nix
new file mode 100644
index 000000000000..5ed7cac48ae7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/munin.nix
@@ -0,0 +1,409 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: support munin-async
+# TODO: LWP/Pg perl libs aren't recognized
+
+# TODO: support fastcgi
+# https://guide.munin-monitoring.org/en/latest/example/webserver/apache-cgi.html
+# spawn-fcgi -s /run/munin/fastcgi-graph.sock -U www-data   -u munin -g munin /usr/lib/munin/cgi/munin-cgi-graph
+# spawn-fcgi -s /run/munin/fastcgi-html.sock  -U www-data   -u munin -g munin /usr/lib/munin/cgi/munin-cgi-html
+# https://paste.sh/vofcctHP#-KbDSXVeWoifYncZmLfZzgum
+# nginx https://munin.readthedocs.org/en/latest/example/webserver/nginx.html
+
+
+with lib;
+
+let
+  nodeCfg = config.services.munin-node;
+  cronCfg = config.services.munin-cron;
+
+  muninConf = pkgs.writeText "munin.conf"
+    ''
+      dbdir     /var/lib/munin
+      htmldir   /var/www/munin
+      logdir    /var/log/munin
+      rundir    /run/munin
+
+      ${lib.optionalString (cronCfg.extraCSS != "") "staticdir ${customStaticDir}"}
+
+      ${cronCfg.extraGlobalConfig}
+
+      ${cronCfg.hosts}
+    '';
+
+  nodeConf = pkgs.writeText "munin-node.conf"
+    ''
+      log_level 3
+      log_file Sys::Syslog
+      port 4949
+      host *
+      background 0
+      user root
+      group root
+      host_name ${config.networking.hostName}
+      setsid 0
+
+      # wrapped plugins by makeWrapper being with dots
+      ignore_file ^\.
+
+      allow ^::1$
+      allow ^127\.0\.0\.1$
+
+      ${nodeCfg.extraConfig}
+    '';
+
+  pluginConf = pkgs.writeText "munin-plugin-conf"
+    ''
+      [hddtemp_smartctl]
+      user root
+      group root
+
+      [meminfo]
+      user root
+      group root
+
+      [ipmi*]
+      user root
+      group root
+
+      [munin*]
+      env.UPDATE_STATSFILE /var/lib/munin/munin-update.stats
+
+      ${nodeCfg.extraPluginConfig}
+    '';
+
+  pluginConfDir = pkgs.stdenv.mkDerivation {
+    name = "munin-plugin-conf.d";
+    buildCommand = ''
+      mkdir $out
+      ln -s ${pluginConf} $out/nixos-config
+    '';
+  };
+
+  # Copy one Munin plugin into the Nix store with a specific name.
+  # This is suitable for use with plugins going directly into /etc/munin/plugins,
+  # i.e. munin.extraPlugins.
+  internOnePlugin = { name, path }:
+    "cp -a '${path}' '${name}'";
+
+  # Copy an entire tree of Munin plugins into a single directory in the Nix
+  # store, with no renaming. The output is suitable for use with
+  # munin-node-configure --suggest, i.e. munin.extraAutoPlugins.
+  # Note that this flattens the input; this is intentional, as
+  # munin-node-configure won't recurse into subdirectories.
+  internManyPlugins = path:
+    "find '${path}' -type f -perm /a+x -exec cp -a -t . '{}' '+'";
+
+  # Use the appropriate intern-fn to copy the plugins into the store and patch
+  # them afterwards in an attempt to get them to run on NixOS.
+  # This is a bit hairy because we can't just fix shebangs; lots of munin plugins
+  # hardcode paths like /sbin/mount rather than trusting $PATH, so we have to
+  # look for and update those throughout the script. At the same time, if the
+  # plugin comes from a package that is already nixified, we don't want to
+  # rewrite paths like /nix/store/foo/sbin/mount.
+  # For now we make the simplifying assumption that no file will contain lines
+  # which mix store paths and FHS paths, and thus run our substitution only on
+  # lines which do not contain store paths.
+  internAndFixPlugins = name: intern-fn: paths:
+    pkgs.runCommand name {} ''
+      mkdir -p "$out"
+      cd "$out"
+      ${lib.concatStringsSep "\n" (map intern-fn paths)}
+      chmod -R u+w .
+      ${pkgs.findutils}/bin/find . -type f -exec ${pkgs.gnused}/bin/sed -E -i "
+        \%''${NIX_STORE}/%! s,(/usr)?/s?bin/,/run/current-system/sw/bin/,g
+      " '{}' '+'
+    '';
+
+  # TODO: write a derivation for munin-contrib, so that for contrib plugins
+  # you can just refer to them by name rather than needing to include a copy
+  # of munin-contrib in your nixos configuration.
+  extraPluginDir = internAndFixPlugins "munin-extra-plugins.d"
+    internOnePlugin
+    (lib.attrsets.mapAttrsToList (k: v: { name = k; path = v; }) nodeCfg.extraPlugins);
+
+  extraAutoPluginDir = internAndFixPlugins "munin-extra-auto-plugins.d"
+    internManyPlugins nodeCfg.extraAutoPlugins;
+
+  customStaticDir = pkgs.runCommand "munin-custom-static-data" {} ''
+    cp -a "${pkgs.munin}/etc/opt/munin/static" "$out"
+    cd "$out"
+    chmod -R u+w .
+    echo "${cronCfg.extraCSS}" >> style.css
+    echo "${cronCfg.extraCSS}" >> style-new.css
+  '';
+in
+
+{
+
+  options = {
+
+    services.munin-node = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable Munin Node agent. Munin node listens on 0.0.0.0 and
+          by default accepts connections only from 127.0.0.1 for security reasons.
+
+          See <https://guide.munin-monitoring.org/en/latest/architecture/index.html>.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          {file}`munin-node.conf` extra configuration. See
+          <https://guide.munin-monitoring.org/en/latest/reference/munin-node.conf.html>
+        '';
+      };
+
+      extraPluginConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          {file}`plugin-conf.d` extra plugin configuration. See
+          <https://guide.munin-monitoring.org/en/latest/plugin/use.html>
+        '';
+        example = ''
+          [fail2ban_*]
+          user root
+        '';
+      };
+
+      extraPlugins = mkOption {
+        default = {};
+        type = with types; attrsOf path;
+        description = lib.mdDoc ''
+          Additional Munin plugins to activate. Keys are the name of the plugin
+          symlink, values are the path to the underlying plugin script. You
+          can use the same plugin script multiple times (e.g. for wildcard
+          plugins).
+
+          Note that these plugins do not participate in autoconfiguration. If
+          you want to autoconfigure additional plugins, use
+          {option}`services.munin-node.extraAutoPlugins`.
+
+          Plugins enabled in this manner take precedence over autoconfigured
+          plugins.
+
+          Plugins will be copied into the Nix store, and it will attempt to
+          modify them to run properly by fixing hardcoded references to
+          `/bin`, `/usr/bin`,
+          `/sbin`, and `/usr/sbin`.
+        '';
+        example = literalExpression ''
+          {
+            zfs_usage_bigpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
+            zfs_usage_smallpool = /src/munin-contrib/plugins/zfs/zfs_usage_;
+            zfs_list = /src/munin-contrib/plugins/zfs/zfs_list;
+          };
+        '';
+      };
+
+      extraAutoPlugins = mkOption {
+        default = [];
+        type = with types; listOf path;
+        description = lib.mdDoc ''
+          Additional Munin plugins to autoconfigure, using
+          `munin-node-configure --suggest`. These should be
+          the actual paths to the plugin files (or directories containing them),
+          not just their names.
+
+          If you want to manually enable individual plugins instead, use
+          {option}`services.munin-node.extraPlugins`.
+
+          Note that only plugins that have the 'autoconfig' capability will do
+          anything if listed here, since plugins that cannot autoconfigure
+          won't be automatically enabled by
+          `munin-node-configure`.
+
+          Plugins will be copied into the Nix store, and it will attempt to
+          modify them to run properly by fixing hardcoded references to
+          `/bin`, `/usr/bin`,
+          `/sbin`, and `/usr/sbin`.
+        '';
+        example = literalExpression ''
+          [
+            /src/munin-contrib/plugins/zfs
+            /src/munin-contrib/plugins/ssh
+          ];
+        '';
+      };
+
+      disabledPlugins = mkOption {
+        # TODO: figure out why Munin isn't writing the log file and fix it.
+        # In the meantime this at least suppresses a useless graph full of
+        # NaNs in the output.
+        default = [ "munin_stats" ];
+        type = with types; listOf str;
+        description = lib.mdDoc ''
+          Munin plugins to disable, even if
+          `munin-node-configure --suggest` tries to enable
+          them. To disable a wildcard plugin, use an actual wildcard, as in
+          the example.
+
+          munin_stats is disabled by default as it tries to read
+          `/var/log/munin/munin-update.log` for timing
+          information, and the NixOS build of Munin does not write this file.
+        '';
+        example = [ "diskstats" "zfs_usage_*" ];
+      };
+    };
+
+    services.munin-cron = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable munin-cron. Takes care of all heavy lifting to collect data from
+          nodes and draws graphs to html. Runs munin-update, munin-limits,
+          munin-graphs and munin-html in that order.
+
+          HTML output is in {file}`/var/www/munin/`, configure your
+          favourite webserver to serve static files.
+        '';
+      };
+
+      extraGlobalConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          {file}`munin.conf` extra global configuration.
+          See <https://guide.munin-monitoring.org/en/latest/reference/munin.conf.html>.
+          Useful to setup notifications, see
+          <https://guide.munin-monitoring.org/en/latest/tutorial/alert.html>
+        '';
+        example = ''
+          contact.email.command mail -s "Munin notification for ''${var:host}" someone@example.com
+        '';
+      };
+
+      hosts = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Definitions of hosts of nodes to collect data from. Needs at least one
+          host for cron to succeed. See
+          <https://guide.munin-monitoring.org/en/latest/reference/munin.conf.html>
+        '';
+        example = literalExpression ''
+          '''
+            [''${config.networking.hostName}]
+            address localhost
+          '''
+        '';
+      };
+
+      extraCSS = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Custom styling for the HTML that munin-cron generates. This will be
+          appended to the CSS files used by munin-cron and will thus take
+          precedence over the builtin styles.
+        '';
+        example = ''
+          /* A simple dark theme. */
+          html, body { background: #222222; }
+          #header, #footer { background: #333333; }
+          img.i, img.iwarn, img.icrit, img.iunkn {
+            filter: invert(100%) hue-rotate(-30deg);
+          }
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkMerge [ (mkIf (nodeCfg.enable || cronCfg.enable)  {
+
+    environment.systemPackages = [ pkgs.munin ];
+
+    users.users.munin = {
+      description = "Munin monitoring user";
+      group = "munin";
+      uid = config.ids.uids.munin;
+      home = "/var/lib/munin";
+    };
+
+    users.groups.munin = {
+      gid = config.ids.gids.munin;
+    };
+
+  }) (mkIf nodeCfg.enable {
+
+    systemd.services.munin-node = {
+      description = "Munin Node";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ munin smartmontools "/run/current-system/sw" "/run/wrappers" ];
+      environment.MUNIN_LIBDIR = "${pkgs.munin}/lib";
+      environment.MUNIN_PLUGSTATE = "/run/munin";
+      environment.MUNIN_LOGDIR = "/var/log/munin";
+      preStart = ''
+        echo "Updating munin plugins..."
+
+        mkdir -p /etc/munin/plugins
+        rm -rf /etc/munin/plugins/*
+
+        # Autoconfigure builtin plugins
+        ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${pkgs.munin}/lib/plugins --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
+
+        # Autoconfigure extra plugins
+        ${pkgs.munin}/bin/munin-node-configure --suggest --shell --families contrib,auto,manual --config ${nodeConf} --libdir=${extraAutoPluginDir} --servicedir=/etc/munin/plugins --sconfdir=${pluginConfDir} 2>/dev/null | ${pkgs.bash}/bin/bash
+
+        ${lib.optionalString (nodeCfg.extraPlugins != {}) ''
+            # Link in manually enabled plugins
+            ln -f -s -t /etc/munin/plugins ${extraPluginDir}/*
+          ''}
+
+        ${lib.optionalString (nodeCfg.disabledPlugins != []) ''
+            # Disable plugins
+            cd /etc/munin/plugins
+            rm -f ${toString nodeCfg.disabledPlugins}
+          ''}
+      '';
+      serviceConfig = {
+        ExecStart = "${pkgs.munin}/sbin/munin-node --config ${nodeConf} --servicedir /etc/munin/plugins/ --sconfdir=${pluginConfDir}";
+      };
+    };
+
+    # munin_stats plugin breaks as of 2.0.33 when this doesn't exist
+    systemd.tmpfiles.rules = [ "d /run/munin 0755 munin munin -" ];
+
+  }) (mkIf cronCfg.enable {
+
+    # Munin is hardcoded to use DejaVu Mono and the graphs come out wrong if
+    # it's not available.
+    fonts.packages = [ pkgs.dejavu_fonts ];
+
+    systemd.timers.munin-cron = {
+      description = "batch Munin master programs";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = "*:0/5";
+    };
+
+    systemd.services.munin-cron = {
+      description = "batch Munin master programs";
+      unitConfig.Documentation = "man:munin-cron(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "munin";
+        ExecStart = "${pkgs.munin}/bin/munin-cron --config ${muninConf}";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /run/munin 0755 munin munin -"
+      "d /var/log/munin 0755 munin munin -"
+      "d /var/www/munin 0755 munin munin -"
+      "d /var/lib/munin 0755 munin munin -"
+    ];
+  })];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/nagios.nix b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
new file mode 100644
index 000000000000..dc5fa1be2922
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/nagios.nix
@@ -0,0 +1,213 @@
+# Nagios system/network monitoring daemon.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nagios;
+
+  nagiosState = "/var/lib/nagios";
+  nagiosLogDir = "/var/log/nagios";
+  urlPath = "/nagios";
+
+  nagiosObjectDefs = cfg.objectDefs;
+
+  nagiosObjectDefsDir = pkgs.runCommand "nagios-objects" {
+      inherit nagiosObjectDefs;
+      preferLocalBuild = true;
+    } "mkdir -p $out; ln -s $nagiosObjectDefs $out/";
+
+  nagiosCfgFile = let
+    default = {
+      log_file="${nagiosLogDir}/current";
+      log_archive_path="${nagiosLogDir}/archive";
+      status_file="${nagiosState}/status.dat";
+      object_cache_file="${nagiosState}/objects.cache";
+      temp_file="${nagiosState}/nagios.tmp";
+      lock_file="/run/nagios.lock";
+      state_retention_file="${nagiosState}/retention.dat";
+      query_socket="${nagiosState}/nagios.qh";
+      check_result_path="${nagiosState}";
+      command_file="${nagiosState}/nagios.cmd";
+      cfg_dir="${nagiosObjectDefsDir}";
+      nagios_user="nagios";
+      nagios_group="nagios";
+      illegal_macro_output_chars="`~$&|'\"<>";
+      retain_state_information="1";
+    };
+    lines = mapAttrsToList (key: value: "${key}=${value}") (default // cfg.extraConfig);
+    content = concatStringsSep "\n" lines;
+    file = pkgs.writeText "nagios.cfg" content;
+    validated =  pkgs.runCommand "nagios-checked.cfg" {preferLocalBuild=true;} ''
+      cp ${file} nagios.cfg
+      # nagios checks the existence of /var/lib/nagios, but
+      # it does not exist in the build sandbox, so we fake it
+      mkdir lib
+      lib=$(readlink -f lib)
+      sed -i s@=${nagiosState}@=$lib@ nagios.cfg
+      ${pkgs.nagios}/bin/nagios -v nagios.cfg && cp ${file} $out
+    '';
+    defaultCfgFile = if cfg.validateConfig then validated else file;
+  in
+  if cfg.mainConfigFile == null then defaultCfgFile else cfg.mainConfigFile;
+
+  # Plain configuration for the Nagios web-interface with no
+  # authentication.
+  nagiosCGICfgFile = pkgs.writeText "nagios.cgi.conf"
+    ''
+      main_config_file=${cfg.mainConfigFile}
+      use_authentication=0
+      url_html_path=${urlPath}
+    '';
+
+  extraHttpdConfig =
+    ''
+      ScriptAlias ${urlPath}/cgi-bin ${pkgs.nagios}/sbin
+
+      <Directory "${pkgs.nagios}/sbin">
+        Options ExecCGI
+        Require all granted
+        SetEnv NAGIOS_CGI_CONFIG ${cfg.cgiConfigFile}
+      </Directory>
+
+      Alias ${urlPath} ${pkgs.nagios}/share
+
+      <Directory "${pkgs.nagios}/share">
+        Options None
+        Require all granted
+      </Directory>
+    '';
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "nagios" "urlPath" ] "The urlPath option has been removed as it is hard coded to /nagios in the nagios package.")
+  ];
+
+  meta.maintainers = with lib.maintainers; [ symphorien ];
+
+  options = {
+    services.nagios = {
+      enable = mkEnableOption (lib.mdDoc ''[Nagios](https://www.nagios.org/) to monitor your system or network.'');
+
+      objectDefs = mkOption {
+        description = lib.mdDoc ''
+          A list of Nagios object configuration files that must define
+          the hosts, host groups, services and contacts for the
+          network that you want Nagios to monitor.
+        '';
+        type = types.listOf types.path;
+        example = literalExpression "[ ./objects.cfg ]";
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ monitoring-plugins msmtp mailutils ];
+        defaultText = literalExpression "[pkgs.monitoring-plugins pkgs.msmtp pkgs.mailutils]";
+        description = lib.mdDoc ''
+          Packages to be added to the Nagios {env}`PATH`.
+          Typically used to add plugins, but can be anything.
+        '';
+      };
+
+      mainConfigFile = mkOption {
+        type = types.nullOr types.package;
+        default = null;
+        description = lib.mdDoc ''
+          If non-null, overrides the main configuration file of Nagios.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrsOf types.str;
+        example = {
+          debug_level = "-1";
+          debug_file = "/var/log/nagios/debug.log";
+        };
+        default = {};
+        description = lib.mdDoc "Configuration to add to /etc/nagios.cfg";
+      };
+
+      validateConfig = mkOption {
+        type = types.bool;
+        default = pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform;
+        defaultText = literalExpression "pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform";
+        description = lib.mdDoc "if true, the syntax of the nagios configuration file is checked at build time";
+      };
+
+      cgiConfigFile = mkOption {
+        type = types.package;
+        default = nagiosCGICfgFile;
+        defaultText = literalExpression "nagiosCGICfgFile";
+        description = lib.mdDoc ''
+          Derivation for the configuration file of Nagios CGI scripts
+          that can be used in web servers for running the Nagios web interface.
+        '';
+      };
+
+      enableWebInterface = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Nagios web interface.  You should also
+          enable Apache ({option}`services.httpd.enable`).
+        '';
+      };
+
+      virtualHost = mkOption {
+        type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+        example = literalExpression ''
+          { hostName = "example.org";
+            adminAddr = "webmaster@example.org";
+            enableSSL = true;
+            sslServerCert = "/var/lib/acme/example.org/full.pem";
+            sslServerKey = "/var/lib/acme/example.org/key.pem";
+          }
+        '';
+        description = lib.mdDoc ''
+          Apache configuration can be done by adapting {option}`services.httpd.virtualHosts`.
+          See [](#opt-services.httpd.virtualHosts) for further information.
+        '';
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    users.users.nagios = {
+      description = "Nagios user ";
+      uid         = config.ids.uids.nagios;
+      home        = nagiosState;
+      group       = "nagios";
+    };
+
+    users.groups.nagios = { };
+
+    # This isn't needed, it's just so that the user can type "nagiostats
+    # -c /etc/nagios.cfg".
+    environment.etc."nagios.cfg".source = nagiosCfgFile;
+
+    environment.systemPackages = [ pkgs.nagios ];
+    systemd.services.nagios = {
+      description = "Nagios monitoring daemon";
+      path     = [ pkgs.nagios ] ++ cfg.plugins;
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      restartTriggers = [ nagiosCfgFile ];
+
+      serviceConfig = {
+        User = "nagios";
+        Group = "nagios";
+        Restart = "always";
+        RestartSec = 2;
+        LogsDirectory = "nagios";
+        StateDirectory = "nagios";
+        ExecStart = "${pkgs.nagios}/bin/nagios /etc/nagios.cfg";
+      };
+    };
+
+    services.httpd.virtualHosts = optionalAttrs cfg.enableWebInterface {
+      ${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost { extraConfig = extraHttpdConfig; } ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/netdata.nix b/nixpkgs/nixos/modules/services/monitoring/netdata.nix
new file mode 100644
index 000000000000..de0e044453ee
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/netdata.nix
@@ -0,0 +1,366 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.netdata;
+
+  wrappedPlugins = pkgs.runCommand "wrapped-plugins" { preferLocalBuild = true; } ''
+    mkdir -p $out/libexec/netdata/plugins.d
+    ln -s /run/wrappers/bin/apps.plugin $out/libexec/netdata/plugins.d/apps.plugin
+    ln -s /run/wrappers/bin/cgroup-network $out/libexec/netdata/plugins.d/cgroup-network
+    ln -s /run/wrappers/bin/perf.plugin $out/libexec/netdata/plugins.d/perf.plugin
+    ln -s /run/wrappers/bin/slabinfo.plugin $out/libexec/netdata/plugins.d/slabinfo.plugin
+    ln -s /run/wrappers/bin/freeipmi.plugin $out/libexec/netdata/plugins.d/freeipmi.plugin
+    ln -s /run/wrappers/bin/systemd-journal.plugin $out/libexec/netdata/plugins.d/systemd-journal.plugin
+  '';
+
+  plugins = [
+    "${cfg.package}/libexec/netdata/plugins.d"
+    "${wrappedPlugins}/libexec/netdata/plugins.d"
+  ] ++ cfg.extraPluginPaths;
+
+  configDirectory = pkgs.runCommand "netdata-config-d" { } ''
+    mkdir $out
+    ${concatStringsSep "\n" (mapAttrsToList (path: file: ''
+        mkdir -p "$out/$(dirname ${path})"
+        ln -s "${file}" "$out/${path}"
+      '') cfg.configDir)}
+  '';
+
+  localConfig = {
+    global = {
+      "config directory" = "/etc/netdata/conf.d";
+      "plugins directory" = concatStringsSep " " plugins;
+    };
+    web = {
+      "web files owner" = "root";
+      "web files group" = "root";
+    };
+    "plugin:cgroups" = {
+      "script to get cgroup network interfaces" = "${wrappedPlugins}/libexec/netdata/plugins.d/cgroup-network";
+      "use unified cgroups" = "yes";
+    };
+  };
+  mkConfig = generators.toINI {} (recursiveUpdate localConfig cfg.config);
+  configFile = pkgs.writeText "netdata.conf" (if cfg.configText != null then cfg.configText else mkConfig);
+
+  defaultUser = "netdata";
+
+in {
+  options = {
+    services.netdata = {
+      enable = mkEnableOption (lib.mdDoc "netdata");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.netdata;
+        defaultText = literalExpression "pkgs.netdata";
+        description = lib.mdDoc "Netdata package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "netdata";
+        description = lib.mdDoc "User account under which netdata runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "netdata";
+        description = lib.mdDoc "Group under which netdata runs.";
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        description = lib.mdDoc "Verbatim netdata.conf, cannot be combined with config.";
+        default = null;
+        example = ''
+          [global]
+          debug log = syslog
+          access log = syslog
+          error log = syslog
+        '';
+      };
+
+      python = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable python-based plugins
+          '';
+        };
+        extraPackages = mkOption {
+          type = types.functionTo (types.listOf types.package);
+          default = ps: [];
+          defaultText = literalExpression "ps: []";
+          example = literalExpression ''
+            ps: [
+              ps.psycopg2
+              ps.docker
+              ps.dnspython
+            ]
+          '';
+          description = lib.mdDoc ''
+            Extra python packages available at runtime
+            to enable additional python plugins.
+          '';
+        };
+      };
+
+      extraPluginPaths = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        example = literalExpression ''
+          [ "/path/to/plugins.d" ]
+        '';
+        description = lib.mdDoc ''
+          Extra paths to add to the netdata global "plugins directory"
+          option.  Useful for when you want to include your own
+          collection scripts.
+
+          Details about writing a custom netdata plugin are available at:
+          <https://docs.netdata.cloud/collectors/plugins.d/>
+
+          Cannot be combined with configText.
+        '';
+      };
+
+      config = mkOption {
+        type = types.attrsOf types.attrs;
+        default = {};
+        description = lib.mdDoc "netdata.conf configuration as nix attributes. cannot be combined with configText.";
+        example = literalExpression ''
+          global = {
+            "debug log" = "syslog";
+            "access log" = "syslog";
+            "error log" = "syslog";
+          };
+        '';
+      };
+
+      configDir = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc ''
+          Complete netdata config directory except netdata.conf.
+          The default configuration is merged with changes
+          defined in this option.
+          Each top-level attribute denotes a path in the configuration
+          directory as in environment.etc.
+          Its value is the absolute path and must be readable by netdata.
+          Cannot be combined with configText.
+        '';
+        example = literalExpression ''
+          "health_alarm_notify.conf" = pkgs.writeText "health_alarm_notify.conf" '''
+            sendmail="/path/to/sendmail"
+          ''';
+          "health.d" = "/run/secrets/netdata/health.d";
+        '';
+      };
+
+      claimTokenFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          If set, automatically registers the agent using the given claim token
+          file.
+        '';
+      };
+
+      enableAnalyticsReporting = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable reporting of anonymous usage statistics to Netdata Inc. via either
+          Google Analytics (in versions prior to 1.29.4), or Netdata Inc.'s
+          self-hosted PostHog (in versions 1.29.4 and later).
+          See: <https://learn.netdata.cloud/docs/agent/anonymous-statistics>
+        '';
+      };
+
+      deadlineBeforeStopSec = mkOption {
+        type = types.int;
+        default = 120;
+        description = lib.mdDoc ''
+          In order to detect when netdata is misbehaving, we run a concurrent task pinging netdata (wait-for-netdata-up)
+          in the systemd unit.
+
+          If after a while, this task does not succeed, we stop the unit and mark it as failed.
+
+          You can control this deadline in seconds with this option, it's useful to bump it
+          if you have (1) a lot of data (2) doing upgrades (3) have low IOPS/throughput.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.config != {} -> cfg.configText == null ;
+          message = "Cannot specify both config and configText";
+        }
+      ];
+
+    environment.etc."netdata/netdata.conf".source = configFile;
+    environment.etc."netdata/conf.d".source = configDirectory;
+
+    systemd.services.netdata = {
+      description = "Real time performance monitoring";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = (with pkgs; [ curl gawk iproute2 which procps bash ])
+        ++ lib.optional cfg.python.enable (pkgs.python3.withPackages cfg.python.extraPackages)
+        ++ lib.optional config.virtualisation.libvirtd.enable (config.virtualisation.libvirtd.package);
+      environment = {
+        PYTHONPATH = "${cfg.package}/libexec/netdata/python.d/python_modules";
+        NETDATA_PIPENAME = "/run/netdata/ipc";
+      } // lib.optionalAttrs (!cfg.enableAnalyticsReporting) {
+        DO_NOT_TRACK = "1";
+      };
+      restartTriggers = [
+        config.environment.etc."netdata/netdata.conf".source
+        config.environment.etc."netdata/conf.d".source
+      ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/netdata -P /run/netdata/netdata.pid -D -c /etc/netdata/netdata.conf";
+        ExecReload = "${pkgs.util-linux}/bin/kill -s HUP -s USR1 -s USR2 $MAINPID";
+        ExecStartPost = pkgs.writeShellScript "wait-for-netdata-up" ''
+          while [ "$(${cfg.package}/bin/netdatacli ping)" != pong ]; do sleep 0.5; done
+        '';
+
+        TimeoutStopSec = cfg.deadlineBeforeStopSec;
+        Restart = "on-failure";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+        # Performance
+        LimitNOFILE = "30000";
+        # Runtime directory and mode
+        RuntimeDirectory = "netdata";
+        RuntimeDirectoryMode = "0750";
+        # State directory and mode
+        StateDirectory = "netdata";
+        StateDirectoryMode = "0750";
+        # Cache directory and mode
+        CacheDirectory = "netdata";
+        CacheDirectoryMode = "0750";
+        # Logs directory and mode
+        LogsDirectory = "netdata";
+        LogsDirectoryMode = "0750";
+        # Configuration directory and mode
+        ConfigurationDirectory = "netdata";
+        ConfigurationDirectoryMode = "0755";
+        # Capabilities
+        CapabilityBoundingSet = [
+          "CAP_DAC_OVERRIDE"      # is required for freeipmi and slabinfo plugins
+          "CAP_DAC_READ_SEARCH"   # is required for apps and systemd-journal plugin
+          "CAP_FOWNER"            # is required for freeipmi plugin
+          "CAP_SETPCAP"           # is required for apps, perf and slabinfo plugins
+          "CAP_SYS_ADMIN"         # is required for perf plugin
+          "CAP_SYS_PTRACE"        # is required for apps plugin
+          "CAP_SYS_RESOURCE"      # is required for ebpf plugin
+          "CAP_NET_RAW"           # is required for fping app
+          "CAP_SYS_CHROOT"        # is required for cgroups plugin
+          "CAP_SETUID"            # is required for cgroups and cgroups-network plugins
+          "CAP_SYSLOG"            # is required for systemd-journal plugin
+        ];
+        # Sandboxing
+        ProtectSystem = "full";
+        ProtectHome = "read-only";
+        PrivateTmp = true;
+        ProtectControlGroups = true;
+        PrivateMounts = true;
+      } // (lib.optionalAttrs (cfg.claimTokenFile != null) {
+        LoadCredential = [
+          "netdata_claim_token:${cfg.claimTokenFile}"
+        ];
+
+        ExecStartPre = pkgs.writeShellScript "netdata-claim" ''
+          set -euo pipefail
+
+          if [[ -f /var/lib/netdata/cloud.d/claimed_id ]]; then
+            # Already registered
+            exit
+          fi
+
+          exec ${cfg.package}/bin/netdata-claim.sh \
+            -token="$(< "$CREDENTIALS_DIRECTORY/netdata_claim_token")" \
+            -url=https://app.netdata.cloud \
+            -daemon-not-running
+        '';
+      });
+    };
+
+    systemd.enableCgroupAccounting = true;
+
+    security.wrappers = {
+      "apps.plugin" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/apps.plugin.org";
+        capabilities = "cap_dac_read_search,cap_sys_ptrace+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+
+      "cgroup-network" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/cgroup-network.org";
+        capabilities = "cap_setuid+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+
+      "perf.plugin" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/perf.plugin.org";
+        capabilities = "cap_sys_admin+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+
+      "systemd-journal.plugin" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/systemd-journal.plugin.org";
+        capabilities = "cap_dac_read_search,cap_syslog+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+
+      "slabinfo.plugin" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/slabinfo.plugin.org";
+        capabilities = "cap_dac_override+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+
+    } // optionalAttrs (cfg.package.withIpmi) {
+      "freeipmi.plugin" = {
+        source = "${cfg.package}/libexec/netdata/plugins.d/freeipmi.plugin.org";
+        capabilities = "cap_dac_override,cap_fowner+ep";
+        owner = cfg.user;
+        group = cfg.group;
+        permissions = "u+rx,g+x,o-rwx";
+      };
+    };
+
+    security.pam.loginLimits = [
+      { domain = "netdata"; type = "soft"; item = "nofile"; value = "10000"; }
+      { domain = "netdata"; type = "hard"; item = "nofile"; value = "30000"; }
+    ];
+
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        group = defaultUser;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.md b/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.md
new file mode 100644
index 000000000000..50e246fb6531
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.md
@@ -0,0 +1,33 @@
+# OCS Inventory Agent {#module-services-ocsinventory-agent}
+
+[OCS Inventory NG](https://ocsinventory-ng.org/) or Open Computers and Software inventory
+is an application designed to help IT administrator to keep track of the hardware and software
+configurations of computers that are installed on their network.
+
+OCS Inventory collects information about the hardware and software of networked machines
+through the **OCS Inventory Agent** program.
+
+This NixOS module enables you to install and configure this agent so that it sends information from your computer to the OCS Inventory server.
+
+For more technical information about OCS Inventory Agent, refer to [the Wiki documentation](https://wiki.ocsinventory-ng.org/03.Basic-documentation/Setting-up-the-UNIX-agent-manually-on-client-computers/).
+
+
+## Basic Usage {#module-services-ocsinventory-agent-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.ocsinventory-agent = {
+    enable = true;
+    settings = {
+      server = "https://ocsinventory.localhost:8080/ocsinventory";
+      tag = "01234567890123";
+    };
+  };
+}
+```
+
+This configuration will periodically run the ocsinventory-agent SystemD service.
+
+The OCS Inventory Agent will inventory the computer and then sends the results to the specified OCS Inventory Server.
diff --git a/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.nix b/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.nix
new file mode 100644
index 000000000000..7585ae863750
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/ocsinventory-agent.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.ocsinventory-agent;
+
+  settingsFormat = pkgs.formats.keyValue {
+    mkKeyValue = lib.generators.mkKeyValueDefault { } "=";
+  };
+
+in
+{
+  meta = {
+    doc = ./ocsinventory-agent.md;
+    maintainers = with lib.maintainers; [ anthonyroussel ];
+  };
+
+  options = {
+    services.ocsinventory-agent = {
+      enable = lib.mkEnableOption (lib.mdDoc "OCS Inventory Agent");
+
+      package = lib.mkPackageOptionMD pkgs "ocsinventory-agent" { };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type.nestedTypes.elemType;
+
+          options = {
+            server = lib.mkOption {
+              type = lib.types.nullOr lib.types.str;
+              example = "https://ocsinventory.localhost:8080/ocsinventory";
+              default = null;
+              description = lib.mdDoc ''
+                The URI of the OCS Inventory server where to send the inventory file.
+
+                This option is ignored if {option}`services.ocsinventory-agent.settings.local` is set.
+              '';
+            };
+
+            local = lib.mkOption {
+              type = lib.types.nullOr lib.types.path;
+              example = "/var/lib/ocsinventory-agent/reports";
+              default = null;
+              description = lib.mdDoc ''
+                If specified, the OCS Inventory Agent will run in offline mode
+                and the resulting inventory file will be stored in the specified path.
+              '';
+            };
+
+            ca = lib.mkOption {
+              type = lib.types.path;
+              default = "/etc/ssl/certs/ca-certificates.crt";
+              description = lib.mdDoc ''
+                Path to CA certificates file in PEM format, for server
+                SSL certificate validation.
+              '';
+            };
+
+            tag = lib.mkOption {
+              type = lib.types.nullOr lib.types.str;
+              default = null;
+              example = "01234567890123";
+              description = lib.mdDoc "Tag for the generated inventory.";
+            };
+
+            debug = lib.mkEnableOption (lib.mdDoc "debug mode");
+          };
+        };
+        default = { };
+        example = {
+          ca = "/etc/ssl/certs/ca-certificates.crt";
+          debug = true;
+          server = "https://ocsinventory.localhost:8080/ocsinventory";
+          tag = "01234567890123";
+        };
+        description = lib.mdDoc ''
+          Configuration for /etc/ocsinventory-agent/ocsinventory-agent.cfg.
+
+          Refer to
+          {manpage}`ocsinventory-agent(1)` for available options.
+        '';
+      };
+
+      interval = lib.mkOption {
+        type = lib.types.str;
+        default = "daily";
+        example = "06:00";
+        description = lib.mdDoc ''
+          How often we run the ocsinventory-agent service. Runs by default every daily.
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+    };
+  };
+
+  config =
+    let
+      configFile = settingsFormat.generate "ocsinventory-agent.cfg" cfg.settings;
+
+    in lib.mkIf cfg.enable {
+      # Path of the configuration file is hard-coded and cannot be changed
+      # https://github.com/OCSInventory-NG/UnixAgent/blob/v2.10.0/lib/Ocsinventory/Agent/Config.pm#L78
+      #
+      environment.etc."ocsinventory-agent/ocsinventory-agent.cfg".source = configFile;
+
+      systemd.services.ocsinventory-agent = {
+        description = "OCS Inventory Agent service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        reloadTriggers = [ configFile ];
+
+        serviceConfig = {
+          ExecStart = lib.getExe cfg.package;
+          ConfigurationDirectory = "ocsinventory-agent";
+          StateDirectory = "ocsinventory-agent";
+        };
+      };
+
+      systemd.timers.ocsinventory-agent = {
+        description = "Launch OCS Inventory Agent regularly";
+        wantedBy = [ "timers.target" ];
+
+        timerConfig = {
+          OnCalendar = cfg.interval;
+          AccuracySec = "1h";
+          RandomizedDelaySec = 240;
+          Persistent = true;
+          Unit = "ocsinventory-agent.service";
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/opentelemetry-collector.nix b/nixpkgs/nixos/modules/services/monitoring/opentelemetry-collector.nix
new file mode 100644
index 000000000000..1d211b689777
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/opentelemetry-collector.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption types getExe;
+
+  cfg = config.services.opentelemetry-collector;
+  opentelemetry-collector = cfg.package;
+
+  settingsFormat = pkgs.formats.yaml {};
+in {
+  options.services.opentelemetry-collector = {
+    enable = mkEnableOption (lib.mdDoc "Opentelemetry Collector");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.opentelemetry-collector;
+      defaultText = lib.literalExpression "pkgs.opentelemetry-collector";
+      description = lib.mdDoc "The opentelemetry-collector package to use.";
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Specify the configuration for Opentelemetry Collector in Nix.
+
+        See https://opentelemetry.io/docs/collector/configuration/ for available options.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify a path to a configuration file that Opentelemetry Collector should use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = (
+        (cfg.settings == {}) != (cfg.configFile == null)
+      );
+      message  = ''
+        Please specify a configuration for Opentelemetry Collector with either
+        'services.opentelemetry-collector.settings' or
+        'services.opentelemetry-collector.configFile'.
+      '';
+    }];
+
+    systemd.services.opentelemetry-collector = {
+      description = "Opentelemetry Collector Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        conf = if cfg.configFile == null
+               then settingsFormat.generate "config.yaml" cfg.settings
+               else cfg.configFile;
+      in
+      {
+        ExecStart = "${getExe opentelemetry-collector} --config=file:${conf}";
+        DynamicUser = true;
+        Restart = "always";
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        WorkingDirectory = "/var/lib/opentelemetry-collector";
+        StateDirectory = "opentelemetry-collector";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/osquery.nix b/nixpkgs/nixos/modules/services/monitoring/osquery.nix
new file mode 100644
index 000000000000..4f6c2557a641
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/osquery.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.osquery;
+  dirname = path: with lib.strings; with lib.lists; concatStringsSep "/"
+    (init (splitString "/" (normalizePath path)));
+
+  # conf is the osquery configuration file used when the --config_plugin=filesystem.
+  # filesystem is the osquery default value for the config_plugin flag.
+  conf = pkgs.writeText "osquery.conf" (builtins.toJSON cfg.settings);
+
+  # flagfile is the file containing osquery command line flags to be
+  # provided to the application using the special --flagfile option.
+  flagfile = pkgs.writeText "osquery.flags"
+    (concatStringsSep "\n"
+      (mapAttrsToList (name: value: "--${name}=${value}")
+        # Use the conf derivation if not otherwise specified.
+        ({ config_path = conf; } // cfg.flags)));
+
+  osqueryi = pkgs.runCommand "osqueryi" { nativeBuildInputs = [ pkgs.makeWrapper ]; } ''
+    mkdir -p $out/bin
+    makeWrapper ${pkgs.osquery}/bin/osqueryi $out/bin/osqueryi \
+      --add-flags "--flagfile ${flagfile} --disable-database"
+  '';
+in
+{
+  options.services.osquery = {
+    enable = mkEnableOption (mdDoc "osqueryd daemon");
+
+    settings = mkOption {
+      default = { };
+      description = mdDoc ''
+        Configuration to be written to the osqueryd JSON configuration file.
+        To understand the configuration format, refer to https://osquery.readthedocs.io/en/stable/deployment/configuration/#configuration-components.
+      '';
+      example = {
+        options.utc = false;
+      };
+      type = types.attrs;
+    };
+
+    flags = mkOption {
+      default = { };
+      description = mdDoc ''
+        Attribute set of flag names and values to be written to the osqueryd flagfile.
+        For more information, refer to https://osquery.readthedocs.io/en/stable/installation/cli-flags.
+      '';
+      example = {
+        config_refresh = "10";
+      };
+      type = with types;
+        submodule {
+          freeformType = attrsOf str;
+          options = {
+            database_path = mkOption {
+              default = "/var/lib/osquery/osquery.db";
+              readOnly = true;
+              description = mdDoc "Path used for the database file.";
+              type = path;
+            };
+            logger_path = mkOption {
+              default = "/var/log/osquery";
+              readOnly = true;
+              description = mdDoc "Base directory used for logging.";
+              type = path;
+            };
+            pidfile = mkOption {
+              default = "/run/osquery/osqueryd.pid";
+              readOnly = true;
+              description = mdDoc "Path used for pid file.";
+              type = path;
+            };
+          };
+        };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ osqueryi ];
+    systemd.services.osqueryd = {
+      after = [ "network.target" "syslog.service" ];
+      description = "The osquery daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.osquery}/bin/osqueryd --flagfile ${flagfile}";
+        PIDFile = cfg.flags.pidfile;
+        LogsDirectory = cfg.flags.logger_path;
+        StateDirectory = dirname cfg.flags.database_path;
+        Restart = "always";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+    systemd.tmpfiles.rules = [
+      "d ${dirname (cfg.flags.pidfile)} 0755 root root -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
new file mode 100644
index 000000000000..eac07e0cc9fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.md
@@ -0,0 +1,112 @@
+# parsedmarc {#module-services-parsedmarc}
+[parsedmarc](https://domainaware.github.io/parsedmarc/) is a service
+which parses incoming [DMARC](https://dmarc.org/) reports and stores
+or sends them to a downstream service for further analysis. In
+combination with Elasticsearch, Grafana and the included Grafana
+dashboard, it provides a handy overview of DMARC reports over time.
+
+## Basic usage {#module-services-parsedmarc-basic-usage}
+A very minimal setup which reads incoming reports from an external
+email address and saves them to a local Elasticsearch instance looks
+like this:
+
+```nix
+services.parsedmarc = {
+  enable = true;
+  settings.imap = {
+    host = "imap.example.com";
+    user = "alice@example.com";
+    password = "/path/to/imap_password_file";
+  };
+  provision.geoIp = false; # Not recommended!
+};
+```
+
+Note that GeoIP provisioning is disabled in the example for
+simplicity, but should be turned on for fully functional reports.
+
+## Local mail {#module-services-parsedmarc-local-mail}
+Instead of watching an external inbox, a local inbox can be
+automatically provisioned. The recipient's name is by default set to
+`dmarc`, but can be configured in
+[services.parsedmarc.provision.localMail.recipientName](options.html#opt-services.parsedmarc.provision.localMail.recipientName). You
+need to add an MX record pointing to the host. More concretely: for
+the example to work, an MX record needs to be set up for
+`monitoring.example.com` and the complete email address that should be
+configured in the domain's dmarc policy is
+`dmarc@monitoring.example.com`.
+
+```nix
+services.parsedmarc = {
+  enable = true;
+  provision = {
+    localMail = {
+      enable = true;
+      hostname = monitoring.example.com;
+    };
+    geoIp = false; # Not recommended!
+  };
+};
+```
+
+## Grafana and GeoIP {#module-services-parsedmarc-grafana-geoip}
+The reports can be visualized and summarized with parsedmarc's
+official Grafana dashboard. For all views to work, and for the data to
+be complete, GeoIP databases are also required. The following example
+shows a basic deployment where the provisioned Elasticsearch instance
+is automatically added as a Grafana datasource, and the dashboard is
+added to Grafana as well.
+
+```nix
+services.parsedmarc = {
+  enable = true;
+  provision = {
+    localMail = {
+      enable = true;
+      hostname = url;
+    };
+    grafana = {
+      datasource = true;
+      dashboard = true;
+    };
+  };
+};
+
+# Not required, but recommended for full functionality
+services.geoipupdate = {
+  settings = {
+    AccountID = 000000;
+    LicenseKey = "/path/to/license_key_file";
+  };
+};
+
+services.grafana = {
+  enable = true;
+  addr = "0.0.0.0";
+  domain = url;
+  rootUrl = "https://" + url;
+  protocol = "socket";
+  security = {
+    adminUser = "admin";
+    adminPasswordFile = "/path/to/admin_password_file";
+    secretKeyFile = "/path/to/secret_key_file";
+  };
+};
+
+services.nginx = {
+  enable = true;
+  recommendedTlsSettings = true;
+  recommendedOptimisation = true;
+  recommendedGzipSettings = true;
+  recommendedProxySettings = true;
+  upstreams.grafana.servers."unix:/${config.services.grafana.socket}" = {};
+  virtualHosts.${url} = {
+    root = config.services.grafana.staticRootPath;
+    enableACME = true;
+    forceSSL = true;
+    locations."/".tryFiles = "$uri @grafana";
+    locations."@grafana".proxyPass = "http://grafana";
+  };
+};
+users.users.nginx.extraGroups = [ "grafana" ];
+```
diff --git a/nixpkgs/nixos/modules/services/monitoring/parsedmarc.nix b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.nix
new file mode 100644
index 000000000000..a146e7ab9543
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/parsedmarc.nix
@@ -0,0 +1,545 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.parsedmarc;
+  opt = options.services.parsedmarc;
+  isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+  ini = pkgs.formats.ini {
+    mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" rec {
+      mkValueString = v:
+        if isInt           v then toString v
+        else if isString   v then v
+        else if true  ==   v then "True"
+        else if false ==   v then "False"
+        else if isSecret   v then hashString "sha256" v._secret
+        else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+    };
+  };
+  inherit (builtins) elem isAttrs isString isInt isList typeOf hashString;
+in
+{
+  options.services.parsedmarc = {
+
+    enable = lib.mkEnableOption (lib.mdDoc ''
+      parsedmarc, a DMARC report monitoring service
+    '');
+
+    provision = {
+      localMail = {
+        enable = lib.mkOption {
+          type = lib.types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether Postfix and Dovecot should be set up to receive
+            mail locally. parsedmarc will be configured to watch the
+            local inbox as the automatically created user specified in
+            [](#opt-services.parsedmarc.provision.localMail.recipientName)
+          '';
+        };
+
+        recipientName = lib.mkOption {
+          type = lib.types.str;
+          default = "dmarc";
+          description = lib.mdDoc ''
+            The DMARC mail recipient name, i.e. the name part of the
+            email address which receives DMARC reports.
+
+            A local user with this name will be set up and assigned a
+            randomized password on service start.
+          '';
+        };
+
+        hostname = lib.mkOption {
+          type = lib.types.str;
+          default = config.networking.fqdn;
+          defaultText = lib.literalExpression "config.networking.fqdn";
+          example = "monitoring.example.com";
+          description = lib.mdDoc ''
+            The hostname to use when configuring Postfix.
+
+            Should correspond to the host's fully qualified domain
+            name and the domain part of the email address which
+            receives DMARC reports. You also have to set up an MX record
+            pointing to this domain name.
+          '';
+        };
+      };
+
+      geoIp = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable and configure the [geoipupdate](#opt-services.geoipupdate.enable)
+          service to automatically fetch GeoIP databases. Not crucial,
+          but recommended for full functionality.
+
+          To finish the setup, you need to manually set the [](#opt-services.geoipupdate.settings.AccountID) and
+          [](#opt-services.geoipupdate.settings.LicenseKey)
+          options.
+        '';
+      };
+
+      elasticsearch = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to set up and use a local instance of Elasticsearch.
+        '';
+      };
+
+      grafana = {
+        datasource = lib.mkOption {
+          type = lib.types.bool;
+          default = cfg.provision.elasticsearch && config.services.grafana.enable;
+          defaultText = lib.literalExpression ''
+            config.${opt.provision.elasticsearch} && config.${options.services.grafana.enable}
+          '';
+          apply = x: x && cfg.provision.elasticsearch;
+          description = lib.mdDoc ''
+            Whether the automatically provisioned Elasticsearch
+            instance should be added as a grafana datasource. Has no
+            effect unless
+            [](#opt-services.parsedmarc.provision.elasticsearch)
+            is also enabled.
+          '';
+        };
+
+        dashboard = lib.mkOption {
+          type = lib.types.bool;
+          default = config.services.grafana.enable;
+          defaultText = lib.literalExpression "config.services.grafana.enable";
+          description = lib.mdDoc ''
+            Whether the official parsedmarc grafana dashboard should
+            be provisioned to the local grafana instance.
+          '';
+        };
+      };
+    };
+
+    settings = lib.mkOption {
+      example = lib.literalExpression ''
+        {
+          imap = {
+            host = "imap.example.com";
+            user = "alice@example.com";
+            password = { _secret = "/run/keys/imap_password" };
+          };
+          mailbox = {
+            watch = true;
+            batch_size = 30;
+          };
+          splunk_hec = {
+            url = "https://splunkhec.example.com";
+            token = { _secret = "/run/keys/splunk_token" };
+            index = "email";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Configuration parameters to set in
+        {file}`parsedmarc.ini`. For a full list of
+        available parameters, see
+        <https://domainaware.github.io/parsedmarc/#configuration-file>.
+
+        Settings containing secret data should be set to an attribute
+        set containing the attribute `_secret` - a
+        string pointing to a file containing the value the option
+        should be set to. See the example to get a better picture of
+        this: in the resulting {file}`parsedmarc.ini`
+        file, the `splunk_hec.token` key will be set
+        to the contents of the
+        {file}`/run/keys/splunk_token` file.
+      '';
+
+      type = lib.types.submodule {
+        freeformType = ini.type;
+
+        options = {
+          general = {
+            save_aggregate = lib.mkOption {
+              type = lib.types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Save aggregate report data to Elasticsearch and/or Splunk.
+              '';
+            };
+
+            save_forensic = lib.mkOption {
+              type = lib.types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Save forensic report data to Elasticsearch and/or Splunk.
+              '';
+            };
+          };
+
+          mailbox = {
+            watch = lib.mkOption {
+              type = lib.types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Use the IMAP IDLE command to process messages as they arrive.
+              '';
+            };
+
+            delete = lib.mkOption {
+              type = lib.types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Delete messages after processing them, instead of archiving them.
+              '';
+            };
+          };
+
+          imap = {
+            host = lib.mkOption {
+              type = lib.types.str;
+              default = "localhost";
+              description = lib.mdDoc ''
+                The IMAP server hostname or IP address.
+              '';
+            };
+
+            port = lib.mkOption {
+              type = lib.types.port;
+              default = 993;
+              description = lib.mdDoc ''
+                The IMAP server port.
+              '';
+            };
+
+            ssl = lib.mkOption {
+              type = lib.types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Use an encrypted SSL/TLS connection.
+              '';
+            };
+
+            user = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                The IMAP server username.
+              '';
+            };
+
+            password = lib.mkOption {
+              type = with lib.types; nullOr (either path (attrsOf path));
+              default = null;
+              description = lib.mdDoc ''
+                The IMAP server password.
+
+                Always handled as a secret whether the value is
+                wrapped in a `{ _secret = ...; }`
+                attrset or not (refer to [](#opt-services.parsedmarc.settings) for
+                details).
+              '';
+              apply = x: if isAttrs x || x == null then x else { _secret = x; };
+            };
+          };
+
+          smtp = {
+            host = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                The SMTP server hostname or IP address.
+              '';
+            };
+
+            port = lib.mkOption {
+              type = with lib.types; nullOr port;
+              default = null;
+              description = lib.mdDoc ''
+                The SMTP server port.
+              '';
+            };
+
+            ssl = lib.mkOption {
+              type = with lib.types; nullOr bool;
+              default = null;
+              description = lib.mdDoc ''
+                Use an encrypted SSL/TLS connection.
+              '';
+            };
+
+            user = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                The SMTP server username.
+              '';
+            };
+
+            password = lib.mkOption {
+              type = with lib.types; nullOr (either path (attrsOf path));
+              default = null;
+              description = lib.mdDoc ''
+                The SMTP server password.
+
+                Always handled as a secret whether the value is
+                wrapped in a `{ _secret = ...; }`
+                attrset or not (refer to [](#opt-services.parsedmarc.settings) for
+                details).
+              '';
+              apply = x: if isAttrs x || x == null then x else { _secret = x; };
+            };
+
+            from = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                The `From` address to use for the
+                outgoing mail.
+              '';
+            };
+
+            to = lib.mkOption {
+              type = with lib.types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                The addresses to send outgoing mail to.
+              '';
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
+            };
+          };
+
+          elasticsearch = {
+            hosts = lib.mkOption {
+              default = [];
+              type = with lib.types; listOf str;
+              apply = x: if x == [] then null else lib.concatStringsSep "," x;
+              description = lib.mdDoc ''
+                A list of Elasticsearch hosts to push parsed reports
+                to.
+              '';
+            };
+
+            user = lib.mkOption {
+              type = with lib.types; nullOr str;
+              default = null;
+              description = lib.mdDoc ''
+                Username to use when connecting to Elasticsearch, if
+                required.
+              '';
+            };
+
+            password = lib.mkOption {
+              type = with lib.types; nullOr (either path (attrsOf path));
+              default = null;
+              description = lib.mdDoc ''
+                The password to use when connecting to Elasticsearch,
+                if required.
+
+                Always handled as a secret whether the value is
+                wrapped in a `{ _secret = ...; }`
+                attrset or not (refer to [](#opt-services.parsedmarc.settings) for
+                details).
+              '';
+              apply = x: if isAttrs x || x == null then x else { _secret = x; };
+            };
+
+            ssl = lib.mkOption {
+              type = lib.types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether to use an encrypted SSL/TLS connection.
+              '';
+            };
+
+            cert_path = lib.mkOption {
+              type = lib.types.path;
+              default = "/etc/ssl/certs/ca-certificates.crt";
+              description = lib.mdDoc ''
+                The path to a TLS certificate bundle used to verify
+                the server's certificate.
+              '';
+            };
+          };
+        };
+
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    warnings = let
+      deprecationWarning = optname: "Starting in 8.0.0, the `${optname}` option has been moved from the `services.parsedmarc.settings.imap`"
+        + "configuration section to the `services.parsedmarc.settings.mailbox` configuration section.";
+      hasImapOpt = lib.flip builtins.hasAttr cfg.settings.imap;
+      movedOptions = [ "reports_folder" "archive_folder" "watch" "delete" "test" "batch_size" ];
+    in builtins.map deprecationWarning (builtins.filter hasImapOpt movedOptions);
+
+    services.elasticsearch.enable = lib.mkDefault cfg.provision.elasticsearch;
+
+    services.geoipupdate = lib.mkIf cfg.provision.geoIp {
+      enable = true;
+      settings = {
+        EditionIDs = [
+          "GeoLite2-ASN"
+          "GeoLite2-City"
+          "GeoLite2-Country"
+        ];
+        DatabaseDirectory = "/var/lib/GeoIP";
+      };
+    };
+
+    services.dovecot2 = lib.mkIf cfg.provision.localMail.enable {
+      enable = true;
+      protocols = [ "imap" ];
+    };
+
+    services.postfix = lib.mkIf cfg.provision.localMail.enable {
+      enable = true;
+      origin = cfg.provision.localMail.hostname;
+      config = {
+        myhostname = cfg.provision.localMail.hostname;
+        mydestination = cfg.provision.localMail.hostname;
+      };
+    };
+
+    services.grafana = {
+      declarativePlugins = with pkgs.grafanaPlugins;
+        lib.mkIf cfg.provision.grafana.dashboard [
+          grafana-worldmap-panel
+          grafana-piechart-panel
+        ];
+
+      provision = {
+        enable = cfg.provision.grafana.datasource || cfg.provision.grafana.dashboard;
+        datasources.settings.datasources =
+          let
+            esVersion = lib.getVersion config.services.elasticsearch.package;
+          in
+            lib.mkIf cfg.provision.grafana.datasource [
+              {
+                name = "dmarc-ag";
+                type = "elasticsearch";
+                access = "proxy";
+                url = "http://localhost:9200";
+                jsonData = {
+                  timeField = "date_range";
+                  inherit esVersion;
+                };
+              }
+              {
+                name = "dmarc-fo";
+                type = "elasticsearch";
+                access = "proxy";
+                url = "http://localhost:9200";
+                jsonData = {
+                  timeField = "date_range";
+                  inherit esVersion;
+                };
+              }
+            ];
+        dashboards.settings.providers = lib.mkIf cfg.provision.grafana.dashboard [{
+          name = "parsedmarc";
+          options.path = "${pkgs.python3Packages.parsedmarc.dashboard}";
+        }];
+      };
+    };
+
+    services.parsedmarc.settings = lib.mkMerge [
+      (lib.mkIf cfg.provision.elasticsearch {
+        elasticsearch = {
+          hosts = [ "localhost:9200" ];
+          ssl = false;
+        };
+      })
+      (lib.mkIf cfg.provision.localMail.enable {
+        imap = {
+          host = "localhost";
+          port = 143;
+          ssl = false;
+          user = cfg.provision.localMail.recipientName;
+          password = "${pkgs.writeText "imap-password" "@imap-password@"}";
+        };
+        mailbox = {
+          watch = true;
+        };
+      })
+    ];
+
+    systemd.services.parsedmarc =
+      let
+        # Remove any empty attributes from the config, i.e. empty
+        # lists, empty attrsets and null. This makes it possible to
+        # list interesting options in `settings` without them always
+        # ending up in the resulting config.
+        filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ null [] {} ])) cfg.settings;
+
+        # Extract secrets (attributes set to an attrset with a
+        # "_secret" key) from the settings and generate the commands
+        # to run to perform the secret replacements.
+        secretPaths = lib.catAttrs "_secret" (lib.collect isSecret filteredConfig);
+        parsedmarcConfig = ini.generate "parsedmarc.ini" filteredConfig;
+        mkSecretReplacement = file: ''
+          replace-secret ${lib.escapeShellArgs [ (hashString "sha256" file) file "/run/parsedmarc/parsedmarc.ini" ]}
+        '';
+        secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+      in
+        {
+          wantedBy = [ "multi-user.target" ];
+          after = [ "postfix.service" "dovecot2.service" "elasticsearch.service" ];
+          path = with pkgs; [ replace-secret openssl shadow ];
+          serviceConfig = {
+            ExecStartPre = let
+              startPreFullPrivileges = ''
+                set -o errexit -o pipefail -o nounset -o errtrace
+                shopt -s inherit_errexit
+
+                umask u=rwx,g=,o=
+                cp ${parsedmarcConfig} /run/parsedmarc/parsedmarc.ini
+                chown parsedmarc:parsedmarc /run/parsedmarc/parsedmarc.ini
+                ${secretReplacements}
+              '' + lib.optionalString cfg.provision.localMail.enable ''
+                openssl rand -hex 64 >/run/parsedmarc/dmarc_user_passwd
+                replace-secret '@imap-password@' '/run/parsedmarc/dmarc_user_passwd' /run/parsedmarc/parsedmarc.ini
+                echo "Setting new randomized password for user '${cfg.provision.localMail.recipientName}'."
+                cat <(echo -n "${cfg.provision.localMail.recipientName}:") /run/parsedmarc/dmarc_user_passwd | chpasswd
+              '';
+            in
+              "+${pkgs.writeShellScript "parsedmarc-start-pre-full-privileges" startPreFullPrivileges}";
+            Type = "simple";
+            User = "parsedmarc";
+            Group = "parsedmarc";
+            DynamicUser = true;
+            RuntimeDirectory = "parsedmarc";
+            RuntimeDirectoryMode = "0700";
+            CapabilityBoundingSet = "";
+            PrivateDevices = true;
+            PrivateMounts = true;
+            PrivateUsers = true;
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHome = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectProc = "invisible";
+            ProcSubset = "pid";
+            SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+            RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+            RestrictRealtime = true;
+            RestrictNamespaces = true;
+            MemoryDenyWriteExecute = true;
+            LockPersonality = true;
+            SystemCallArchitectures = "native";
+            ExecStart = "${pkgs.python3Packages.parsedmarc}/bin/parsedmarc -c /run/parsedmarc/parsedmarc.ini";
+          };
+        };
+
+    users.users.${cfg.provision.localMail.recipientName} = lib.mkIf cfg.provision.localMail.enable {
+      isNormalUser = true;
+      description = "DMARC mail recipient";
+    };
+  };
+
+  meta.doc = ./parsedmarc.md;
+  meta.maintainers = [ lib.maintainers.talyz ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-irc-relay.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-irc-relay.nix
new file mode 100644
index 000000000000..b81d5f6db5e0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager-irc-relay.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.alertmanagerIrcRelay;
+
+  configFormat = pkgs.formats.yaml { };
+  configFile = configFormat.generate "alertmanager-irc-relay.yml" cfg.settings;
+in
+{
+  options.services.prometheus.alertmanagerIrcRelay = {
+    enable = mkEnableOption (mdDoc "Alertmanager IRC Relay");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.alertmanager-irc-relay;
+      defaultText = literalExpression "pkgs.alertmanager-irc-relay";
+      description = mdDoc "Alertmanager IRC Relay package to use.";
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = mdDoc "Extra command line options to pass to alertmanager-irc-relay.";
+    };
+
+    settings = mkOption {
+      type = configFormat.type;
+      example = literalExpression ''
+        {
+          http_host = "localhost";
+          http_port = 8000;
+
+          irc_host = "irc.example.com";
+          irc_port = 7000;
+          irc_nickname = "myalertbot";
+
+          irc_channels = [
+            { name = "#mychannel"; }
+          ];
+        }
+      '';
+      description = mdDoc ''
+        Configuration for Alertmanager IRC Relay as a Nix attribute set.
+        For a reference, check out the
+        [example configuration](https://github.com/google/alertmanager-irc-relay#configuring-and-running-the-bot)
+        and the
+        [source code](https://github.com/google/alertmanager-irc-relay/blob/master/config.go).
+
+        Note: The webhook's URL MUST point to the IRC channel where the message
+        should be posted. For `#mychannel` from the example, this would be
+        `http://localhost:8080/mychannel`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.alertmanager-irc-relay = {
+      description = "Alertmanager IRC Relay";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/alertmanager-irc-relay \
+          -config ${configFile} \
+          ${escapeShellArgs cfg.extraFlags}
+        '';
+
+        DynamicUser = true;
+        NoNewPrivileges = true;
+
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        ProtectHome = "tmpfs";
+
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation"
+          "~@privileged"
+          "~@reboot"
+          "~@setuid"
+          "~@swap"
+        ];
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.oxzi ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
new file mode 100644
index 000000000000..5fb543ec6195
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/alertmanager.nix
@@ -0,0 +1,203 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.alertmanager;
+  mkConfigFile = pkgs.writeText "alertmanager.yml" (builtins.toJSON cfg.configuration);
+
+  checkedConfig = file:
+    if cfg.checkConfig then
+      pkgs.runCommand "checked-config" { nativeBuildInputs = [ cfg.package ]; } ''
+        ln -s ${file} $out
+        amtool check-config $out
+      '' else file;
+
+  alertmanagerYml = let
+    yml = if cfg.configText != null then
+        pkgs.writeText "alertmanager.yml" cfg.configText
+        else mkConfigFile;
+    in checkedConfig yml;
+
+  cmdlineArgs = cfg.extraFlags ++ [
+    "--config.file /tmp/alert-manager-substituted.yaml"
+    "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
+    "--log.level ${cfg.logLevel}"
+    "--storage.path /var/lib/alertmanager"
+    (toString (map (peer: "--cluster.peer ${peer}:9094") cfg.clusterPeers))
+    ] ++ (optional (cfg.webExternalUrl != null)
+      "--web.external-url ${cfg.webExternalUrl}"
+    ) ++ (optional (cfg.logFormat != null)
+      "--log.format ${cfg.logFormat}"
+  );
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "user" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a user setting.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanager" "group" ] "The alertmanager service is now using systemd's DynamicUser mechanism which obviates a group setting.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanagerURL" ] ''
+      Due to incompatibility, the alertmanagerURL option has been removed,
+      please use 'services.prometheus.alertmanagers' instead.
+    '')
+  ];
+
+  options = {
+    services.prometheus.alertmanager = {
+      enable = mkEnableOption (lib.mdDoc "Prometheus Alertmanager");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus-alertmanager;
+        defaultText = literalExpression "pkgs.alertmanager";
+        description = lib.mdDoc ''
+          Package that should be used for alertmanager.
+        '';
+      };
+
+      configuration = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc ''
+          Alertmanager configuration as nix attribute set.
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Alertmanager configuration as YAML text. If non-null, this option
+          defines the text that is written to alertmanager.yml. If null, the
+          contents of alertmanager.yml is generated from the structured config
+          options.
+        '';
+      };
+
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Check configuration with `amtool check-config`. The call to `amtool` is
+          subject to sandboxing by Nix.
+
+          If you use credentials stored in external files
+          (`environmentFile`, etc),
+          they will not be visible to `amtool`
+          and it will report errors, despite a correct configuration.
+        '';
+      };
+
+      logFormat = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          If set use a syslog logger or JSON logging.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum ["debug" "info" "warn" "error" "fatal"];
+        default = "warn";
+        description = lib.mdDoc ''
+          Only log messages with the given severity or above.
+        '';
+      };
+
+      webExternalUrl = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The URL under which Alertmanager is externally reachable (for example, if Alertmanager is served via a reverse proxy).
+          Used for generating relative and absolute links back to Alertmanager itself.
+          If the URL has a path portion, it will be used to prefix all HTTP endoints served by Alertmanager.
+          If omitted, relevant URL components will be derived automatically.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Address to listen on for the web interface and API. Empty string will listen on all interfaces.
+          "localhost" will listen on 127.0.0.1 (but not ::1).
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 9093;
+        description = lib.mdDoc ''
+          Port to listen on for the web interface and API.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open port in firewall for incoming connections.
+        '';
+      };
+
+      clusterPeers = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Initial peers for HA cluster.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra commandline options when launching the Alertmanager.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/root/alertmanager.env";
+        description = lib.mdDoc ''
+          File to load as environment file. Environment variables
+          from this file will be interpolated into the config file
+          using envsubst with this syntax:
+          `$ENVIRONMENT ''${VARIABLE}`
+        '';
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      assertions = singleton {
+        assertion = cfg.configuration != null || cfg.configText != null;
+        message = "Can not enable alertmanager without a configuration. "
+         + "Set either the `configuration` or `configText` attribute.";
+      };
+    })
+    (mkIf cfg.enable {
+      networking.firewall.allowedTCPPorts = optional cfg.openFirewall cfg.port;
+
+      systemd.services.alertmanager = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network-online.target" ];
+        preStart = ''
+           ${lib.getBin pkgs.envsubst}/bin/envsubst -o "/tmp/alert-manager-substituted.yaml" \
+                                                    -i "${alertmanagerYml}"
+        '';
+        serviceConfig = {
+          Restart  = "always";
+          StateDirectory = "alertmanager";
+          DynamicUser = true; # implies PrivateTmp
+          EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+          WorkingDirectory = "/tmp";
+          ExecStart = "${cfg.package}/bin/alertmanager" +
+            optionalString (length cmdlineArgs != 0) (" \\\n  " +
+              concatStringsSep " \\\n  " cmdlineArgs);
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
new file mode 100644
index 000000000000..a38855ccd408
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/default.nix
@@ -0,0 +1,1855 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  yaml = pkgs.formats.yaml { };
+  cfg = config.services.prometheus;
+  checkConfigEnabled =
+    (lib.isBool cfg.checkConfig && cfg.checkConfig)
+      || cfg.checkConfig == "syntax-only";
+
+  workingDir = "/var/lib/" + cfg.stateDir;
+
+  triggerReload = pkgs.writeShellScriptBin "trigger-reload-prometheus" ''
+    PATH="${makeBinPath (with pkgs; [ systemd ])}"
+    if systemctl -q is-active prometheus.service; then
+      systemctl reload prometheus.service
+    fi
+  '';
+
+  reload = pkgs.writeShellScriptBin "reload-prometheus" ''
+    PATH="${makeBinPath (with pkgs; [ systemd coreutils gnugrep ])}"
+    cursor=$(journalctl --show-cursor -n0 | grep -oP "cursor: \K.*")
+    kill -HUP $MAINPID
+    journalctl -u prometheus.service --after-cursor="$cursor" -f \
+      | grep -m 1 "Completed loading of configuration file" > /dev/null
+  '';
+
+  # a wrapper that verifies that the configuration is valid
+  promtoolCheck = what: name: file:
+    if checkConfigEnabled then
+      pkgs.runCommandLocal
+        "${name}-${replaceStrings [" "] [""] what}-checked"
+        { nativeBuildInputs = [ cfg.package.cli ]; } ''
+        ln -s ${file} $out
+        promtool ${what} $out
+      '' else file;
+
+  generatedPrometheusYml = yaml.generate "prometheus.yml" promConfig;
+
+  # This becomes the main config file for Prometheus
+  promConfig = {
+    global = filterValidPrometheus cfg.globalConfig;
+    rule_files = map (promtoolCheck "check rules" "rules") (cfg.ruleFiles ++ [
+      (pkgs.writeText "prometheus.rules" (concatStringsSep "\n" cfg.rules))
+    ]);
+    scrape_configs = filterValidPrometheus cfg.scrapeConfigs;
+    remote_write = filterValidPrometheus cfg.remoteWrite;
+    remote_read = filterValidPrometheus cfg.remoteRead;
+    alerting = {
+      inherit (cfg) alertmanagers;
+    };
+  };
+
+  prometheusYml =
+    let
+      yml =
+        if cfg.configText != null then
+          pkgs.writeText "prometheus.yml" cfg.configText
+        else generatedPrometheusYml;
+    in
+    promtoolCheck "check config ${lib.optionalString (cfg.checkConfig == "syntax-only") "--syntax-only"}" "prometheus.yml" yml;
+
+  cmdlineArgs = cfg.extraFlags ++ [
+    "--storage.tsdb.path=${workingDir}/data/"
+    "--config.file=${
+      if cfg.enableReload
+      then "/etc/prometheus/prometheus.yaml"
+      else prometheusYml
+    }"
+    "--web.listen-address=${cfg.listenAddress}:${builtins.toString cfg.port}"
+    "--alertmanager.notification-queue-capacity=${toString cfg.alertmanagerNotificationQueueCapacity}"
+  ] ++ optional (cfg.webExternalUrl != null) "--web.external-url=${cfg.webExternalUrl}"
+    ++ optional (cfg.retentionTime != null) "--storage.tsdb.retention.time=${cfg.retentionTime}"
+    ++ optional (cfg.webConfigFile != null) "--web.config.file=${cfg.webConfigFile}";
+
+  filterValidPrometheus = filterAttrsListRecursive (n: v: !(n == "_module" || v == null));
+  filterAttrsListRecursive = pred: x:
+    if isAttrs x then
+      listToAttrs
+        (
+          concatMap
+            (name:
+              let v = x.${name}; in
+              if pred name v then [
+                (nameValuePair name (filterAttrsListRecursive pred v))
+              ] else [ ]
+            )
+            (attrNames x)
+        )
+    else if isList x then
+      map (filterAttrsListRecursive pred) x
+    else x;
+
+  #
+  # Config types: helper functions
+  #
+
+  mkDefOpt = type: defaultStr: description: mkOpt type (description + ''
+
+    Defaults to ````${defaultStr}```` in prometheus
+    when set to `null`.
+  '');
+
+  mkOpt = type: description: mkOption {
+    type = types.nullOr type;
+    default = null;
+    description = lib.mdDoc description;
+  };
+
+  mkSdConfigModule = extraOptions: types.submodule {
+    options = {
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Optional HTTP basic authentication information.
+      '';
+
+      authorization = mkOpt
+        (types.submodule {
+          options = {
+            type = mkDefOpt types.str "Bearer" ''
+              Sets the authentication type.
+            '';
+
+            credentials = mkOpt types.str ''
+              Sets the credentials. It is mutually exclusive with `credentials_file`.
+            '';
+
+            credentials_file = mkOpt types.str ''
+              Sets the credentials to the credentials read from the configured file.
+              It is mutually exclusive with `credentials`.
+            '';
+          };
+        }) ''
+        Optional `Authorization` header configuration.
+      '';
+
+      oauth2 = mkOpt promtypes.oauth2 ''
+        Optional OAuth 2.0 configuration.
+        Cannot be used at the same time as basic_auth or authorization.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    } // extraOptions;
+  };
+
+  #
+  # Config types: general
+  #
+
+  promTypes.globalConfig = types.submodule {
+    options = {
+      scrape_interval = mkDefOpt types.str "1m" ''
+        How frequently to scrape targets by default.
+      '';
+
+      scrape_timeout = mkDefOpt types.str "10s" ''
+        How long until a scrape request times out.
+      '';
+
+      evaluation_interval = mkDefOpt types.str "1m" ''
+        How frequently to evaluate rules by default.
+      '';
+
+      external_labels = mkOpt (types.attrsOf types.str) ''
+        The labels to add to any time series or alerts when
+        communicating with external systems (federation, remote
+        storage, Alertmanager).
+      '';
+    };
+  };
+
+  promTypes.basic_auth = types.submodule {
+    options = {
+      username = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          HTTP username
+        '';
+      };
+      password = mkOpt types.str "HTTP password";
+      password_file = mkOpt types.str "HTTP password file";
+    };
+  };
+
+  promTypes.tls_config = types.submodule {
+    options = {
+      ca_file = mkOpt types.str ''
+        CA certificate to validate API server certificate with.
+      '';
+
+      cert_file = mkOpt types.str ''
+        Certificate file for client cert authentication to the server.
+      '';
+
+      key_file = mkOpt types.str ''
+        Key file for client cert authentication to the server.
+      '';
+
+      server_name = mkOpt types.str ''
+        ServerName extension to indicate the name of the server.
+        http://tools.ietf.org/html/rfc4366#section-3.1
+      '';
+
+      insecure_skip_verify = mkOpt types.bool ''
+        Disable validation of the server certificate.
+      '';
+    };
+  };
+
+  promtypes.oauth2 = types.submodule {
+    options = {
+      client_id = mkOpt types.str ''
+        OAuth client ID.
+      '';
+
+      client_secret = mkOpt types.str ''
+        OAuth client secret.
+      '';
+
+      client_secret_file = mkOpt types.str ''
+        Read the client secret from a file. It is mutually exclusive with `client_secret`.
+      '';
+
+      scopes = mkOpt (types.listOf types.str) ''
+        Scopes for the token request.
+      '';
+
+      token_url = mkOpt types.str ''
+        The URL to fetch the token from.
+      '';
+
+      endpoint_params = mkOpt (types.attrsOf types.str) ''
+        Optional parameters to append to the token URL.
+      '';
+    };
+  };
+
+  promTypes.scrape_config = types.submodule {
+    options = {
+      authorization = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        description = lib.mdDoc ''
+          Sets the `Authorization` header on every scrape request with the configured credentials.
+        '';
+      };
+      job_name = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The job name assigned to scraped metrics by default.
+        '';
+      };
+      scrape_interval = mkOpt types.str ''
+        How frequently to scrape targets from this job. Defaults to the
+        globally configured default.
+      '';
+
+      scrape_timeout = mkOpt types.str ''
+        Per-target timeout when scraping this job. Defaults to the
+        globally configured default.
+      '';
+
+      metrics_path = mkDefOpt types.str "/metrics" ''
+        The HTTP resource path on which to fetch metrics from targets.
+      '';
+
+      honor_labels = mkDefOpt types.bool "false" ''
+        Controls how Prometheus handles conflicts between labels
+        that are already present in scraped data and labels that
+        Prometheus would attach server-side ("job" and "instance"
+        labels, manually configured target labels, and labels
+        generated by service discovery implementations).
+
+        If honor_labels is set to "true", label conflicts are
+        resolved by keeping label values from the scraped data and
+        ignoring the conflicting server-side labels.
+
+        If honor_labels is set to "false", label conflicts are
+        resolved by renaming conflicting labels in the scraped data
+        to "exported_\<original-label\>" (for example
+        "exported_instance", "exported_job") and then attaching
+        server-side labels. This is useful for use cases such as
+        federation, where all labels specified in the target should
+        be preserved.
+      '';
+
+      honor_timestamps = mkDefOpt types.bool "true" ''
+        honor_timestamps controls whether Prometheus respects the timestamps present
+        in scraped data.
+
+        If honor_timestamps is set to `true`, the timestamps of the metrics exposed
+        by the target will be used.
+
+        If honor_timestamps is set to `false`, the timestamps of the metrics exposed
+        by the target will be ignored.
+      '';
+
+      scheme = mkDefOpt (types.enum [ "http" "https" ]) "http" ''
+        The URL scheme with which to fetch metrics from targets.
+      '';
+
+      params = mkOpt (types.attrsOf (types.listOf types.str)) ''
+        Optional HTTP URL parameters.
+      '';
+
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Sets the `Authorization` header on every scrape request with the
+        configured username and password.
+        password and password_file are mutually exclusive.
+      '';
+
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every scrape request with
+        the configured bearer token. It is mutually exclusive with
+        {option}`bearer_token_file`.
+      '';
+
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every scrape request with
+        the bearer token read from the configured file. It is mutually
+        exclusive with {option}`bearer_token`.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the scrape request's TLS settings.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      azure_sd_configs = mkOpt (types.listOf promTypes.azure_sd_config) ''
+        List of Azure service discovery configurations.
+      '';
+
+      consul_sd_configs = mkOpt (types.listOf promTypes.consul_sd_config) ''
+        List of Consul service discovery configurations.
+      '';
+
+      digitalocean_sd_configs = mkOpt (types.listOf promTypes.digitalocean_sd_config) ''
+        List of DigitalOcean service discovery configurations.
+      '';
+
+      docker_sd_configs = mkOpt (types.listOf promTypes.docker_sd_config) ''
+        List of Docker service discovery configurations.
+      '';
+
+      dockerswarm_sd_configs = mkOpt (types.listOf promTypes.dockerswarm_sd_config) ''
+        List of Docker Swarm service discovery configurations.
+      '';
+
+      dns_sd_configs = mkOpt (types.listOf promTypes.dns_sd_config) ''
+        List of DNS service discovery configurations.
+      '';
+
+      ec2_sd_configs = mkOpt (types.listOf promTypes.ec2_sd_config) ''
+        List of EC2 service discovery configurations.
+      '';
+
+      eureka_sd_configs = mkOpt (types.listOf promTypes.eureka_sd_config) ''
+        List of Eureka service discovery configurations.
+      '';
+
+      file_sd_configs = mkOpt (types.listOf promTypes.file_sd_config) ''
+        List of file service discovery configurations.
+      '';
+
+      gce_sd_configs = mkOpt (types.listOf promTypes.gce_sd_config) ''
+        List of Google Compute Engine service discovery configurations.
+
+        See [the relevant Prometheus configuration docs](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#gce_sd_config)
+        for more detail.
+      '';
+
+      hetzner_sd_configs = mkOpt (types.listOf promTypes.hetzner_sd_config) ''
+        List of Hetzner service discovery configurations.
+      '';
+
+      http_sd_configs = mkOpt (types.listOf promTypes.http_sd_config) ''
+        List of HTTP service discovery configurations.
+      '';
+
+      kubernetes_sd_configs = mkOpt (types.listOf promTypes.kubernetes_sd_config) ''
+        List of Kubernetes service discovery configurations.
+      '';
+
+      kuma_sd_configs = mkOpt (types.listOf promTypes.kuma_sd_config) ''
+        List of Kuma service discovery configurations.
+      '';
+
+      lightsail_sd_configs = mkOpt (types.listOf promTypes.lightsail_sd_config) ''
+        List of Lightsail service discovery configurations.
+      '';
+
+      linode_sd_configs = mkOpt (types.listOf promTypes.linode_sd_config) ''
+        List of Linode service discovery configurations.
+      '';
+
+      marathon_sd_configs = mkOpt (types.listOf promTypes.marathon_sd_config) ''
+        List of Marathon service discovery configurations.
+      '';
+
+      nerve_sd_configs = mkOpt (types.listOf promTypes.nerve_sd_config) ''
+        List of AirBnB's Nerve service discovery configurations.
+      '';
+
+      openstack_sd_configs = mkOpt (types.listOf promTypes.openstack_sd_config) ''
+        List of OpenStack service discovery configurations.
+      '';
+
+      puppetdb_sd_configs = mkOpt (types.listOf promTypes.puppetdb_sd_config) ''
+        List of PuppetDB service discovery configurations.
+      '';
+
+      scaleway_sd_configs = mkOpt (types.listOf promTypes.scaleway_sd_config) ''
+        List of Scaleway service discovery configurations.
+      '';
+
+      serverset_sd_configs = mkOpt (types.listOf promTypes.serverset_sd_config) ''
+        List of Zookeeper Serverset service discovery configurations.
+      '';
+
+      triton_sd_configs = mkOpt (types.listOf promTypes.triton_sd_config) ''
+        List of Triton Serverset service discovery configurations.
+      '';
+
+      uyuni_sd_configs = mkOpt (types.listOf promTypes.uyuni_sd_config) ''
+        List of Uyuni Serverset service discovery configurations.
+      '';
+
+      static_configs = mkOpt (types.listOf promTypes.static_config) ''
+        List of labeled target groups for this job.
+      '';
+
+      relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
+        List of relabel configurations.
+      '';
+
+      metric_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
+        List of metric relabel configurations.
+      '';
+
+      body_size_limit = mkDefOpt types.str "0" ''
+        An uncompressed response body larger than this many bytes will cause the
+        scrape to fail. 0 means no limit. Example: 100MB.
+        This is an experimental feature, this behaviour could
+        change or be removed in the future.
+      '';
+
+      sample_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on number of scraped samples that will be accepted.
+        If more than this number of samples are present after metric relabelling
+        the entire scrape will be treated as failed. 0 means no limit.
+      '';
+
+      label_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on number of labels that will be accepted for a sample. If
+        more than this number of labels are present post metric-relabeling, the
+        entire scrape will be treated as failed. 0 means no limit.
+      '';
+
+      label_name_length_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on length of labels name that will be accepted for a sample.
+        If a label name is longer than this number post metric-relabeling, the entire
+        scrape will be treated as failed. 0 means no limit.
+      '';
+
+      label_value_length_limit = mkDefOpt types.int "0" ''
+        Per-scrape limit on length of labels value that will be accepted for a sample.
+        If a label value is longer than this number post metric-relabeling, the
+        entire scrape will be treated as failed. 0 means no limit.
+      '';
+
+      target_limit = mkDefOpt types.int "0" ''
+        Per-scrape config limit on number of unique targets that will be
+        accepted. If more than this number of targets are present after target
+        relabeling, Prometheus will mark the targets as failed without scraping them.
+        0 means no limit. This is an experimental feature, this behaviour could
+        change in the future.
+      '';
+    };
+  };
+
+  #
+  # Config types: service discovery
+  #
+
+  # For this one, the docs actually define all types needed to use mkSdConfigModule, but a bunch
+  # of them are marked with 'currently not support by Azure' so we don't bother adding them in
+  # here.
+  promTypes.azure_sd_config = types.submodule {
+    options = {
+      environment = mkDefOpt types.str "AzurePublicCloud" ''
+        The Azure environment.
+      '';
+
+      authentication_method = mkDefOpt (types.enum [ "OAuth" "ManagedIdentity" ]) "OAuth" ''
+        The authentication method, either OAuth or ManagedIdentity.
+        See https://docs.microsoft.com/en-us/azure/active-directory/managed-identities-azure-resources/overview
+      '';
+
+      subscription_id = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The subscription ID.
+        '';
+      };
+
+      tenant_id = mkOpt types.str ''
+        Optional tenant ID. Only required with authentication_method OAuth.
+      '';
+
+      client_id = mkOpt types.str ''
+        Optional client ID. Only required with authentication_method OAuth.
+      '';
+
+      client_secret = mkOpt types.str ''
+        Optional client secret. Only required with authentication_method OAuth.
+      '';
+
+      refresh_interval = mkDefOpt types.str "300s" ''
+        Refresh interval to re-read the instance list.
+      '';
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP
+        address, this must instead be specified in the relabeling
+        rule.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  promTypes.consul_sd_config = mkSdConfigModule {
+    server = mkDefOpt types.str "localhost:8500" ''
+      Consul server to query.
+    '';
+
+    token = mkOpt types.str "Consul token";
+
+    datacenter = mkOpt types.str "Consul datacenter";
+
+    scheme = mkDefOpt types.str "http" "Consul scheme";
+
+    username = mkOpt types.str "Consul username";
+
+    password = mkOpt types.str "Consul password";
+
+    tls_config = mkOpt promTypes.tls_config ''
+      Configures the Consul request's TLS settings.
+    '';
+
+    services = mkOpt (types.listOf types.str) ''
+      A list of services for which targets are retrieved.
+    '';
+
+    tags = mkOpt (types.listOf types.str) ''
+      An optional list of tags used to filter nodes for a given
+      service. Services must contain all tags in the list.
+    '';
+
+    node_meta = mkOpt (types.attrsOf types.str) ''
+      Node metadata used to filter nodes for a given service.
+    '';
+
+    tag_separator = mkDefOpt types.str "," ''
+      The string by which Consul tags are joined into the tag label.
+    '';
+
+    allow_stale = mkOpt types.bool ''
+      Allow stale Consul results
+      (see <https://www.consul.io/api/index.html#consistency-modes>).
+
+      Will reduce load on Consul.
+    '';
+
+    refresh_interval = mkDefOpt types.str "30s" ''
+      The time after which the provided names are refreshed.
+
+      On large setup it might be a good idea to increase this value
+      because the catalog will change all the time.
+    '';
+  };
+
+  promTypes.digitalocean_sd_config = mkSdConfigModule {
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the droplets are refreshed.
+    '';
+  };
+
+  mkDockerSdConfigModule = extraOptions: mkSdConfigModule ({
+    host = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Address of the Docker daemon.
+      '';
+    };
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from, when `role` is nodes, and for discovered
+      tasks and services that don't have published ports.
+    '';
+
+    filters = mkOpt
+      (types.listOf (types.submodule {
+        options = {
+          name = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Name of the filter. The available filters are listed in the upstream documentation:
+              Services: <https://docs.docker.com/engine/api/v1.40/#operation/ServiceList>
+              Tasks: <https://docs.docker.com/engine/api/v1.40/#operation/TaskList>
+              Nodes: <https://docs.docker.com/engine/api/v1.40/#operation/NodeList>
+            '';
+          };
+          values = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Value for the filter.
+            '';
+          };
+        };
+      })) ''
+      Optional filters to limit the discovery process to a subset of available resources.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the containers are refreshed.
+    '';
+  } // extraOptions);
+
+  promTypes.docker_sd_config = mkDockerSdConfigModule {
+    host_networking_host = mkDefOpt types.str "localhost" ''
+      The host to use if the container is in host networking mode.
+    '';
+  };
+
+  promTypes.dockerswarm_sd_config = mkDockerSdConfigModule {
+    role = mkOption {
+      type = types.enum [ "services" "tasks" "nodes" ];
+      description = lib.mdDoc ''
+        Role of the targets to retrieve. Must be `services`, `tasks`, or `nodes`.
+      '';
+    };
+  };
+
+  promTypes.dns_sd_config = types.submodule {
+    options = {
+      names = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          A list of DNS SRV record names to be queried.
+        '';
+      };
+
+      type = mkDefOpt (types.enum [ "SRV" "A" "AAAA" ]) "SRV" ''
+        The type of DNS query to perform. One of SRV, A, or AAAA.
+      '';
+
+      port = mkOpt types.int ''
+        The port number used if the query type is not SRV.
+      '';
+
+      refresh_interval = mkDefOpt types.str "30s" ''
+        The time after which the provided names are refreshed.
+      '';
+    };
+  };
+
+  promTypes.ec2_sd_config = types.submodule {
+    options = {
+      region = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The AWS Region. If blank, the region from the instance metadata is used.
+        '';
+      };
+      endpoint = mkOpt types.str ''
+        Custom endpoint to be used.
+      '';
+
+      access_key = mkOpt types.str ''
+        The AWS API key id. If blank, the environment variable
+        `AWS_ACCESS_KEY_ID` is used.
+      '';
+
+      secret_key = mkOpt types.str ''
+        The AWS API key secret. If blank, the environment variable
+         `AWS_SECRET_ACCESS_KEY` is used.
+      '';
+
+      profile = mkOpt types.str ''
+        Named AWS profile used to connect to the API.
+      '';
+
+      role_arn = mkOpt types.str ''
+        AWS Role ARN, an alternative to using AWS API keys.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the instance list.
+      '';
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP
+        address, this must instead be specified in the relabeling
+        rule.
+      '';
+
+      filters = mkOpt
+        (types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                See [this list](https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_DescribeInstances.html)
+                for the available filters.
+              '';
+            };
+
+            values = mkOption {
+              type = types.listOf types.str;
+              default = [ ];
+              description = lib.mdDoc ''
+                Value of the filter.
+              '';
+            };
+          };
+        })) ''
+        Filters can be used optionally to filter the instance list by other criteria.
+      '';
+    };
+  };
+
+  promTypes.eureka_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The URL to connect to the Eureka server.
+      '';
+    };
+  };
+
+  promTypes.file_sd_config = types.submodule {
+    options = {
+      files = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Patterns for files from which target groups are extracted. Refer
+          to the Prometheus documentation for permitted filename patterns
+          and formats.
+        '';
+      };
+
+      refresh_interval = mkDefOpt types.str "5m" ''
+        Refresh interval to re-read the files.
+      '';
+    };
+  };
+
+  promTypes.gce_sd_config = types.submodule {
+    options = {
+      # Use `mkOption` instead of `mkOpt` for project and zone because they are
+      # required configuration values for `gce_sd_config`.
+      project = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The GCP Project.
+        '';
+      };
+
+      zone = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The zone of the scrape targets. If you need multiple zones use multiple
+          gce_sd_configs.
+        '';
+      };
+
+      filter = mkOpt types.str ''
+        Filter can be used optionally to filter the instance list by other
+        criteria Syntax of this filter string is described here in the filter
+        query parameter section: <https://cloud.google.com/compute/docs/reference/latest/instances/list>.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the cloud instance list.
+      '';
+
+      port = mkDefOpt types.port "80" ''
+        The port to scrape metrics from. If using the public IP address, this
+        must instead be specified in the relabeling rule.
+      '';
+
+      tag_separator = mkDefOpt types.str "," ''
+        The tag separator used to separate concatenated GCE instance network tags.
+
+        See the GCP documentation on network tags for more information:
+        <https://cloud.google.com/vpc/docs/add-remove-network-tags>
+      '';
+    };
+  };
+
+  promTypes.hetzner_sd_config = mkSdConfigModule {
+    role = mkOption {
+      type = types.enum [ "robot" "hcloud" ];
+      description = lib.mdDoc ''
+        The Hetzner role of entities that should be discovered.
+        One of `robot` or `hcloud`.
+      '';
+    };
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the servers are refreshed.
+    '';
+  };
+
+  promTypes.http_sd_config = types.submodule {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          URL from which the targets are fetched.
+        '';
+      };
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-query the endpoint.
+      '';
+
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Authentication information used to authenticate to the API server.
+        password and password_file are mutually exclusive.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the scrape request's TLS settings.
+      '';
+    };
+  };
+
+  promTypes.kubernetes_sd_config = mkSdConfigModule {
+    api_server = mkOpt types.str ''
+      The API server addresses. If left empty, Prometheus is assumed to run inside
+      of the cluster and will discover API servers automatically and use the pod's
+      CA certificate and bearer token file at /var/run/secrets/kubernetes.io/serviceaccount/.
+    '';
+
+    role = mkOption {
+      type = types.enum [ "endpoints" "service" "pod" "node" "ingress" ];
+      description = lib.mdDoc ''
+        The Kubernetes role of entities that should be discovered.
+        One of endpoints, service, pod, node, or ingress.
+      '';
+    };
+
+    kubeconfig_file = mkOpt types.str ''
+      Optional path to a kubeconfig file.
+      Note that api_server and kube_config are mutually exclusive.
+    '';
+
+    namespaces = mkOpt
+      (
+        types.submodule {
+          options = {
+            names = mkOpt (types.listOf types.str) ''
+              Namespace name.
+            '';
+          };
+        }
+      ) ''
+      Optional namespace discovery. If omitted, all namespaces are used.
+    '';
+
+    selectors = mkOpt
+      (
+        types.listOf (
+          types.submodule {
+            options = {
+              role = mkOption {
+                type = types.str;
+                description = lib.mdDoc ''
+                  Selector role
+                '';
+              };
+
+              label = mkOpt types.str ''
+                Selector label
+              '';
+
+              field = mkOpt types.str ''
+                Selector field
+              '';
+            };
+          }
+        )
+      ) ''
+      Optional label and field selectors to limit the discovery process to a subset of available resources.
+      See https://kubernetes.io/docs/concepts/overview/working-with-objects/field-selectors/
+      and https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ to learn more about the possible
+      filters that can be used. Endpoints role supports pod, service and endpoints selectors, other roles
+      only support selectors matching the role itself (e.g. node role can only contain node selectors).
+
+      Note: When making decision about using field/label selector make sure that this
+      is the best approach - it will prevent Prometheus from reusing single list/watch
+      for all scrape configs. This might result in a bigger load on the Kubernetes API,
+      because per each selector combination there will be additional LIST/WATCH. On the other hand,
+      if you just want to monitor small subset of pods in large cluster it's recommended to use selectors.
+      Decision, if selectors should be used or not depends on the particular situation.
+    '';
+  };
+
+  promTypes.kuma_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Address of the Kuma Control Plane's MADS xDS server.
+      '';
+    };
+
+    refresh_interval = mkDefOpt types.str "30s" ''
+      The time to wait between polling update requests.
+    '';
+
+    fetch_timeout = mkDefOpt types.str "2m" ''
+      The time after which the monitoring assignments are refreshed.
+    '';
+  };
+
+  promTypes.lightsail_sd_config = types.submodule {
+    options = {
+      region = mkOpt types.str ''
+        The AWS region. If blank, the region from the instance metadata is used.
+      '';
+
+      endpoint = mkOpt types.str ''
+        Custom endpoint to be used.
+      '';
+
+      access_key = mkOpt types.str ''
+        The AWS API keys. If blank, the environment variable `AWS_ACCESS_KEY_ID` is used.
+      '';
+
+      secret_key = mkOpt types.str ''
+        The AWS API keys. If blank, the environment variable `AWS_SECRET_ACCESS_KEY` is used.
+      '';
+
+      profile = mkOpt types.str ''
+        Named AWS profile used to connect to the API.
+      '';
+
+      role_arn = mkOpt types.str ''
+        AWS Role ARN, an alternative to using AWS API keys.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the instance list.
+      '';
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from. If using the public IP address, this must
+        instead be specified in the relabeling rule.
+      '';
+    };
+  };
+
+  promTypes.linode_sd_config = mkSdConfigModule {
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+
+    tag_separator = mkDefOpt types.str "," ''
+      The string by which Linode Instance tags are joined into the tag label.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      The time after which the linode instances are refreshed.
+    '';
+  };
+
+  promTypes.marathon_sd_config = mkSdConfigModule {
+    servers = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        List of URLs to be used to contact Marathon servers. You need to provide at least one server URL.
+      '';
+    };
+
+    refresh_interval = mkDefOpt types.str "30s" ''
+      Polling interval.
+    '';
+
+    auth_token = mkOpt types.str ''
+      Optional authentication information for token-based authentication:
+      <https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token>
+      It is mutually exclusive with `auth_token_file` and other authentication mechanisms.
+    '';
+
+    auth_token_file = mkOpt types.str ''
+      Optional authentication information for token-based authentication:
+      <https://docs.mesosphere.com/1.11/security/ent/iam-api/#passing-an-authentication-token>
+      It is mutually exclusive with `auth_token` and other authentication mechanisms.
+    '';
+  };
+
+  promTypes.nerve_sd_config = types.submodule {
+    options = {
+      servers = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The Zookeeper servers.
+        '';
+      };
+
+      paths = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Paths can point to a single service, or the root of a tree of services.
+        '';
+      };
+
+      timeout = mkDefOpt types.str "10s" ''
+        Timeout value.
+      '';
+    };
+  };
+
+  promTypes.openstack_sd_config = types.submodule {
+    options =
+      let
+        userDescription = ''
+          username is required if using Identity V2 API. Consult with your provider's
+          control panel to discover your account's username. In Identity V3, either
+          userid or a combination of username and domain_id or domain_name are needed.
+        '';
+
+        domainDescription = ''
+          At most one of domain_id and domain_name must be provided if using username
+          with Identity V3. Otherwise, either are optional.
+        '';
+
+        projectDescription = ''
+          The project_id and project_name fields are optional for the Identity V2 API.
+          Some providers allow you to specify a project_name instead of the project_id.
+          Some require both. Your provider's authentication policies will determine
+          how these fields influence authentication.
+        '';
+
+        applicationDescription = ''
+          The application_credential_id or application_credential_name fields are
+          required if using an application credential to authenticate. Some providers
+          allow you to create an application credential to authenticate rather than a
+          password.
+        '';
+      in
+      {
+        role = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            The OpenStack role of entities that should be discovered.
+          '';
+        };
+
+        region = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            The OpenStack Region.
+          '';
+        };
+
+        identity_endpoint = mkOpt types.str ''
+          identity_endpoint specifies the HTTP endpoint that is required to work with
+          the Identity API of the appropriate version. While it's ultimately needed by
+          all of the identity services, it will often be populated by a provider-level
+          function.
+        '';
+
+        username = mkOpt types.str userDescription;
+        userid = mkOpt types.str userDescription;
+
+        password = mkOpt types.str ''
+          password for the Identity V2 and V3 APIs. Consult with your provider's
+          control panel to discover your account's preferred method of authentication.
+        '';
+
+        domain_name = mkOpt types.str domainDescription;
+        domain_id = mkOpt types.str domainDescription;
+
+        project_name = mkOpt types.str projectDescription;
+        project_id = mkOpt types.str projectDescription;
+
+        application_credential_name = mkOpt types.str applicationDescription;
+        application_credential_id = mkOpt types.str applicationDescription;
+
+        application_credential_secret = mkOpt types.str ''
+          The application_credential_secret field is required if using an application
+          credential to authenticate.
+        '';
+
+        all_tenants = mkDefOpt types.bool "false" ''
+          Whether the service discovery should list all instances for all projects.
+          It is only relevant for the 'instance' role and usually requires admin permissions.
+        '';
+
+        refresh_interval = mkDefOpt types.str "60s" ''
+          Refresh interval to re-read the instance list.
+        '';
+
+        port = mkDefOpt types.int "80" ''
+          The port to scrape metrics from. If using the public IP address, this must
+          instead be specified in the relabeling rule.
+        '';
+
+        availability = mkDefOpt (types.enum [ "public" "admin" "internal" ]) "public" ''
+          The availability of the endpoint to connect to. Must be one of public, admin or internal.
+        '';
+
+        tls_config = mkOpt promTypes.tls_config ''
+          TLS configuration.
+        '';
+      };
+  };
+
+  promTypes.puppetdb_sd_config = mkSdConfigModule {
+    url = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The URL of the PuppetDB root query endpoint.
+      '';
+    };
+
+    query = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Puppet Query Language (PQL) query. Only resources are supported.
+        https://puppet.com/docs/puppetdb/latest/api/query/v4/pql.html
+      '';
+    };
+
+    include_parameters = mkDefOpt types.bool "false" ''
+      Whether to include the parameters as meta labels.
+      Due to the differences between parameter types and Prometheus labels,
+      some parameters might not be rendered. The format of the parameters might
+      also change in future releases.
+
+      Note: Enabling this exposes parameters in the Prometheus UI and API. Make sure
+      that you don't have secrets exposed as parameters if you enable this.
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      Refresh interval to re-read the resources list.
+    '';
+
+    port = mkDefOpt types.int "80" ''
+      The port to scrape metrics from.
+    '';
+  };
+
+  promTypes.scaleway_sd_config = types.submodule {
+    options = {
+      access_key = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Access key to use. https://console.scaleway.com/project/credentials
+        '';
+      };
+
+      secret_key = mkOpt types.str ''
+        Secret key to use when listing targets. https://console.scaleway.com/project/credentials
+        It is mutually exclusive with `secret_key_file`.
+      '';
+
+      secret_key_file = mkOpt types.str ''
+        Sets the secret key with the credentials read from the configured file.
+        It is mutually exclusive with `secret_key`.
+      '';
+
+      project_id = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Project ID of the targets.
+        '';
+      };
+
+      role = mkOption {
+        type = types.enum [ "instance" "baremetal" ];
+        description = lib.mdDoc ''
+          Role of the targets to retrieve. Must be `instance` or `baremetal`.
+        '';
+      };
+
+      port = mkDefOpt types.int "80" ''
+        The port to scrape metrics from.
+      '';
+
+      api_url = mkDefOpt types.str "https://api.scaleway.com" ''
+        API URL to use when doing the server listing requests.
+      '';
+
+      zone = mkDefOpt types.str "fr-par-1" ''
+        Zone is the availability zone of your targets (e.g. fr-par-1).
+      '';
+
+      name_filter = mkOpt types.str ''
+        Specify a name filter (works as a LIKE) to apply on the server listing request.
+      '';
+
+      tags_filter = mkOpt (types.listOf types.str) ''
+        Specify a tag filter (a server needs to have all defined tags to be listed) to apply on the server listing request.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        Refresh interval to re-read the managed targets list.
+      '';
+
+      proxy_url = mkOpt types.str ''
+        Optional proxy URL.
+      '';
+
+      follow_redirects = mkDefOpt types.bool "true" ''
+        Configure whether HTTP requests follow HTTP 3xx redirects.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  # These are exactly the same.
+  promTypes.serverset_sd_config = promTypes.nerve_sd_config;
+
+  promTypes.triton_sd_config = types.submodule {
+    options = {
+      account = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The account to use for discovering new targets.
+        '';
+      };
+
+      role = mkDefOpt (types.enum [ "container" "cn" ]) "container" ''
+        The type of targets to discover, can be set to:
+        - "container" to discover virtual machines (SmartOS zones, lx/KVM/bhyve branded zones) running on Triton
+        - "cn" to discover compute nodes (servers/global zones) making up the Triton infrastructure
+      '';
+
+      dns_suffix = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The DNS suffix which should be applied to target.
+        '';
+      };
+
+      endpoint = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The Triton discovery endpoint (e.g. `cmon.us-east-3b.triton.zone`). This is
+          often the same value as dns_suffix.
+        '';
+      };
+
+      groups = mkOpt (types.listOf types.str) ''
+        A list of groups for which targets are retrieved, only supported when targeting the `container` role.
+        If omitted all containers owned by the requesting account are scraped.
+      '';
+
+      port = mkDefOpt types.int "9163" ''
+        The port to use for discovery and metric scraping.
+      '';
+
+      refresh_interval = mkDefOpt types.str "60s" ''
+        The interval which should be used for refreshing targets.
+      '';
+
+      version = mkDefOpt types.int "1" ''
+        The Triton discovery API version.
+      '';
+
+      tls_config = mkOpt promTypes.tls_config ''
+        TLS configuration.
+      '';
+    };
+  };
+
+  promTypes.uyuni_sd_config = mkSdConfigModule {
+    server = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The URL to connect to the Uyuni server.
+      '';
+    };
+
+    username = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Credentials are used to authenticate the requests to Uyuni API.
+      '';
+    };
+
+    password = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Credentials are used to authenticate the requests to Uyuni API.
+      '';
+    };
+
+    entitlement = mkDefOpt types.str "monitoring_entitled" ''
+      The entitlement string to filter eligible systems.
+    '';
+
+    separator = mkDefOpt types.str "," ''
+      The string by which Uyuni group names are joined into the groups label
+    '';
+
+    refresh_interval = mkDefOpt types.str "60s" ''
+      Refresh interval to re-read the managed targets list.
+    '';
+  };
+
+  promTypes.static_config = types.submodule {
+    options = {
+      targets = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The targets specified by the target group.
+        '';
+      };
+      labels = mkOption {
+        type = types.attrsOf types.str;
+        default = { };
+        description = lib.mdDoc ''
+          Labels assigned to all metrics scraped from the targets.
+        '';
+      };
+    };
+  };
+
+  #
+  # Config types: relabling
+  #
+
+  promTypes.relabel_config = types.submodule {
+    options = {
+      source_labels = mkOpt (types.listOf types.str) ''
+        The source labels select values from existing labels. Their content
+        is concatenated using the configured separator and matched against
+        the configured regular expression.
+      '';
+
+      separator = mkDefOpt types.str ";" ''
+        Separator placed between concatenated source label values.
+      '';
+
+      target_label = mkOpt types.str ''
+        Label to which the resulting value is written in a replace action.
+        It is mandatory for replace actions.
+      '';
+
+      regex = mkDefOpt types.str "(.*)" ''
+        Regular expression against which the extracted value is matched.
+      '';
+
+      modulus = mkOpt types.int ''
+        Modulus to take of the hash of the source label values.
+      '';
+
+      replacement = mkDefOpt types.str "$1" ''
+        Replacement value against which a regex replace is performed if the
+        regular expression matches.
+      '';
+
+      action =
+        mkDefOpt (types.enum [ "replace" "lowercase" "uppercase" "keep" "drop" "hashmod" "labelmap" "labeldrop" "labelkeep" ]) "replace" ''
+          Action to perform based on regex matching.
+        '';
+    };
+  };
+
+  #
+  # Config types : remote read / write
+  #
+
+  promTypes.remote_write = types.submodule {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          ServerName extension to indicate the name of the server.
+          http://tools.ietf.org/html/rfc4366#section-3.1
+        '';
+      };
+      remote_timeout = mkOpt types.str ''
+        Timeout for requests to the remote write endpoint.
+      '';
+      write_relabel_configs = mkOpt (types.listOf promTypes.relabel_config) ''
+        List of remote write relabel configurations.
+      '';
+      name = mkOpt types.str ''
+        Name of the remote write config, which if specified must be unique among remote write configs.
+        The name will be used in metrics and logging in place of a generated value to help users distinguish between
+        remote write configs.
+      '';
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Sets the `Authorization` header on every remote write request with the
+        configured username and password.
+        password and password_file are mutually exclusive.
+      '';
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every remote write request with
+        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+      '';
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every remote write request with the bearer token
+        read from the configured file. It is mutually exclusive with `bearer_token`.
+      '';
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the remote write request's TLS settings.
+      '';
+      proxy_url = mkOpt types.str "Optional Proxy URL.";
+      queue_config = mkOpt
+        (types.submodule {
+          options = {
+            capacity = mkOpt types.int ''
+              Number of samples to buffer per shard before we block reading of more
+              samples from the WAL. It is recommended to have enough capacity in each
+              shard to buffer several requests to keep throughput up while processing
+              occasional slow remote requests.
+            '';
+            max_shards = mkOpt types.int ''
+              Maximum number of shards, i.e. amount of concurrency.
+            '';
+            min_shards = mkOpt types.int ''
+              Minimum number of shards, i.e. amount of concurrency.
+            '';
+            max_samples_per_send = mkOpt types.int ''
+              Maximum number of samples per send.
+            '';
+            batch_send_deadline = mkOpt types.str ''
+              Maximum time a sample will wait in buffer.
+            '';
+            min_backoff = mkOpt types.str ''
+              Initial retry delay. Gets doubled for every retry.
+            '';
+            max_backoff = mkOpt types.str ''
+              Maximum retry delay.
+            '';
+          };
+        }) ''
+        Configures the queue used to write to remote storage.
+      '';
+      metadata_config = mkOpt
+        (types.submodule {
+          options = {
+            send = mkOpt types.bool ''
+              Whether metric metadata is sent to remote storage or not.
+            '';
+            send_interval = mkOpt types.str ''
+              How frequently metric metadata is sent to remote storage.
+            '';
+          };
+        }) ''
+        Configures the sending of series metadata to remote storage.
+        Metadata configuration is subject to change at any point
+        or be removed in future releases.
+      '';
+    };
+  };
+
+  promTypes.remote_read = types.submodule {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          ServerName extension to indicate the name of the server.
+          http://tools.ietf.org/html/rfc4366#section-3.1
+        '';
+      };
+      name = mkOpt types.str ''
+        Name of the remote read config, which if specified must be unique among remote read configs.
+        The name will be used in metrics and logging in place of a generated value to help users distinguish between
+        remote read configs.
+      '';
+      required_matchers = mkOpt (types.attrsOf types.str) ''
+        An optional list of equality matchers which have to be
+        present in a selector to query the remote read endpoint.
+      '';
+      remote_timeout = mkOpt types.str ''
+        Timeout for requests to the remote read endpoint.
+      '';
+      read_recent = mkOpt types.bool ''
+        Whether reads should be made for queries for time ranges that
+        the local storage should have complete data for.
+      '';
+      basic_auth = mkOpt promTypes.basic_auth ''
+        Sets the `Authorization` header on every remote read request with the
+        configured username and password.
+        password and password_file are mutually exclusive.
+      '';
+      bearer_token = mkOpt types.str ''
+        Sets the `Authorization` header on every remote read request with
+        the configured bearer token. It is mutually exclusive with `bearer_token_file`.
+      '';
+      bearer_token_file = mkOpt types.str ''
+        Sets the `Authorization` header on every remote read request with the bearer token
+        read from the configured file. It is mutually exclusive with `bearer_token`.
+      '';
+      tls_config = mkOpt promTypes.tls_config ''
+        Configures the remote read request's TLS settings.
+      '';
+      proxy_url = mkOpt types.str "Optional Proxy URL.";
+    };
+  };
+
+in
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "prometheus2" ] [ "services" "prometheus" ])
+    (mkRemovedOptionModule [ "services" "prometheus" "environmentFile" ]
+      "It has been removed since it was causing issues (https://github.com/NixOS/nixpkgs/issues/126083) and Prometheus now has native support for secret files, i.e. `basic_auth.password_file` and `authorization.credentials_file`.")
+    (mkRemovedOptionModule [ "services" "prometheus" "alertmanagerTimeout" ]
+      "Deprecated upstream and no longer had any effect")
+  ];
+
+  options.services.prometheus = {
+
+    enable = mkEnableOption (lib.mdDoc "Prometheus monitoring daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.prometheus;
+      defaultText = literalExpression "pkgs.prometheus";
+      description = lib.mdDoc ''
+        The prometheus package that should be used.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 9090;
+      description = lib.mdDoc ''
+        Port to listen on.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        Address to listen on for the web interface, API, and telemetry.
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.str;
+      default = "prometheus2";
+      description = lib.mdDoc ''
+        Directory below `/var/lib` to store Prometheus metrics data.
+        This directory will be created automatically using systemd's StateDirectory mechanism.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra commandline options when launching Prometheus.
+      '';
+    };
+
+    enableReload = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Reload prometheus when configuration file changes (instead of restart).
+
+        The following property holds: switching to a configuration
+        (`switch-to-configuration`) that changes the prometheus
+        configuration only finishes successfully when prometheus has finished
+        loading the new configuration.
+      '';
+    };
+
+    configText = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = lib.mdDoc ''
+        If non-null, this option defines the text that is written to
+        prometheus.yml. If null, the contents of prometheus.yml is generated
+        from the structured config options.
+      '';
+    };
+
+    globalConfig = mkOption {
+      type = promTypes.globalConfig;
+      default = { };
+      description = lib.mdDoc ''
+        Parameters that are valid in all  configuration contexts. They
+        also serve as defaults for other configuration sections
+      '';
+    };
+
+    remoteRead = mkOption {
+      type = types.listOf promTypes.remote_read;
+      default = [ ];
+      description = lib.mdDoc ''
+        Parameters of the endpoints to query from.
+        See [the official documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read) for more information.
+      '';
+    };
+
+    remoteWrite = mkOption {
+      type = types.listOf promTypes.remote_write;
+      default = [ ];
+      description = lib.mdDoc ''
+        Parameters of the endpoints to send samples to.
+        See [the official documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write) for more information.
+      '';
+    };
+
+    rules = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = lib.mdDoc ''
+        Alerting and/or Recording rules to evaluate at runtime.
+      '';
+    };
+
+    ruleFiles = mkOption {
+      type = types.listOf types.path;
+      default = [ ];
+      description = lib.mdDoc ''
+        Any additional rules files to include in this configuration.
+      '';
+    };
+
+    scrapeConfigs = mkOption {
+      type = types.listOf promTypes.scrape_config;
+      default = [ ];
+      description = lib.mdDoc ''
+        A list of scrape configurations.
+      '';
+    };
+
+    alertmanagers = mkOption {
+      type = types.listOf types.attrs;
+      example = literalExpression ''
+        [ {
+          scheme = "https";
+          path_prefix = "/alertmanager";
+          static_configs = [ {
+            targets = [
+              "prometheus.domain.tld"
+            ];
+          } ];
+        } ]
+      '';
+      default = [ ];
+      description = lib.mdDoc ''
+        A list of alertmanagers to send alerts to.
+        See [the official documentation](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config) for more information.
+      '';
+    };
+
+    alertmanagerNotificationQueueCapacity = mkOption {
+      type = types.int;
+      default = 10000;
+      description = lib.mdDoc ''
+        The capacity of the queue for pending alert manager notifications.
+      '';
+    };
+
+    webExternalUrl = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "https://example.com/";
+      description = lib.mdDoc ''
+        The URL under which Prometheus is externally reachable (for example,
+        if Prometheus is served via a reverse proxy).
+      '';
+    };
+
+    webConfigFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specifies which file should be used as web.config.file and be passed on startup.
+        See https://prometheus.io/docs/prometheus/latest/configuration/https/ for valid options.
+      '';
+    };
+
+    checkConfig = mkOption {
+      type = with types; either bool (enum [ "syntax-only" ]);
+      default = true;
+      example = "syntax-only";
+      description = lib.mdDoc ''
+        Check configuration with `promtool check`. The call to `promtool` is
+        subject to sandboxing by Nix.
+
+        If you use credentials stored in external files
+        (`password_file`, `bearer_token_file`, etc),
+        they will not be visible to `promtool`
+        and it will report errors, despite a correct configuration.
+        To resolve this, you may set this option to `"syntax-only"`
+        in order to only syntax check the Prometheus configuration.
+      '';
+    };
+
+    retentionTime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "15d";
+      description = lib.mdDoc ''
+        How long to retain samples in storage.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      (
+        let
+          # Match something with dots (an IPv4 address) or something ending in
+          # a square bracket (an IPv6 addresses) followed by a port number.
+          legacy = builtins.match "(.*\\..*|.*]):([[:digit:]]+)" cfg.listenAddress;
+        in
+        {
+          assertion = legacy == null;
+          message = ''
+            Do not specify the port for Prometheus to listen on in the
+            listenAddress option; use the port option instead:
+              services.prometheus.listenAddress = ${builtins.elemAt legacy 0};
+              services.prometheus.port = ${builtins.elemAt legacy 1};
+          '';
+        }
+      )
+    ];
+
+    users.groups.prometheus.gid = config.ids.gids.prometheus;
+    users.users.prometheus = {
+      description = "Prometheus daemon user";
+      uid = config.ids.uids.prometheus;
+      group = "prometheus";
+    };
+    environment.etc."prometheus/prometheus.yaml" = mkIf cfg.enableReload {
+      source = prometheusYml;
+    };
+    systemd.services.prometheus = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/prometheus" +
+          optionalString (length cmdlineArgs != 0) (" \\\n  " +
+            concatStringsSep " \\\n  " cmdlineArgs);
+        ExecReload = mkIf cfg.enableReload "+${reload}/bin/reload-prometheus";
+        User = "prometheus";
+        Restart = "always";
+        RuntimeDirectory = "prometheus";
+        RuntimeDirectoryMode = "0700";
+        WorkingDirectory = workingDir;
+        StateDirectory = cfg.stateDir;
+        StateDirectoryMode = "0700";
+        # Hardening
+        AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = if (cfg.port < 1024) then [ "CAP_NET_BIND_SERVICE" ] else [ "" ];
+        DeviceAllow = [ "/dev/null rw" ];
+        DevicePolicy = "strict";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "full";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+    # prometheus-config-reload will activate after prometheus. However, what we
+    # don't want is that on startup it immediately reloads prometheus because
+    # prometheus itself might have just started.
+    #
+    # Instead we only want to reload prometheus when the config file has
+    # changed. So on startup prometheus-config-reload will just output a
+    # harmless message and then stay active (RemainAfterExit).
+    #
+    # Then, when the config file has changed, switch-to-configuration notices
+    # that this service has changed (restartTriggers) and needs to be reloaded
+    # (reloadIfChanged). The reload command then reloads prometheus.
+    systemd.services.prometheus-config-reload = mkIf cfg.enableReload {
+      wantedBy = [ "prometheus.service" ];
+      after = [ "prometheus.service" ];
+      reloadIfChanged = true;
+      restartTriggers = [ prometheusYml ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        TimeoutSec = 60;
+        ExecStart = "${pkgs.logger}/bin/logger 'prometheus-config-reload will only reload prometheus when reloaded itself.'";
+        ExecReload = [ "${triggerReload}/bin/trigger-reload-prometheus" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
new file mode 100644
index 000000000000..34fadecadc74
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.md
@@ -0,0 +1,180 @@
+# Prometheus exporters {#module-services-prometheus-exporters}
+
+Prometheus exporters provide metrics for the
+[prometheus monitoring system](https://prometheus.io).
+
+## Configuration {#module-services-prometheus-exporters-configuration}
+
+One of the most common exporters is the
+[node exporter](https://github.com/prometheus/node_exporter),
+it provides hardware and OS metrics from the host it's
+running on. The exporter could be configured as follows:
+```
+  services.prometheus.exporters.node = {
+    enable = true;
+    port = 9100;
+    enabledCollectors = [
+      "logind"
+      "systemd"
+    ];
+    disabledCollectors = [
+      "textfile"
+    ];
+    openFirewall = true;
+    firewallFilter = "-i br0 -p tcp -m tcp --dport 9100";
+  };
+```
+It should now serve all metrics from the collectors that are explicitly
+enabled and the ones that are
+[enabled by default](https://github.com/prometheus/node_exporter#enabled-by-default),
+via http under `/metrics`. In this
+example the firewall should just allow incoming connections to the
+exporter's port on the bridge interface `br0` (this would
+have to be configured separately of course). For more information about
+configuration see `man configuration.nix` or search through
+the [available options](https://nixos.org/nixos/options.html#prometheus.exporters).
+
+Prometheus can now be configured to consume the metrics produced by the exporter:
+```
+    services.prometheus = {
+      # ...
+
+      scrapeConfigs = [
+        {
+          job_name = "node";
+          static_configs = [{
+            targets = [ "localhost:${toString config.services.prometheus.exporters.node.port}" ];
+          }];
+        }
+      ];
+
+      # ...
+    }
+```
+
+## Adding a new exporter {#module-services-prometheus-exporters-new-exporter}
+
+To add a new exporter, it has to be packaged first (see
+`nixpkgs/pkgs/servers/monitoring/prometheus/` for
+examples), then a module can be added. The postfix exporter is used in this
+example:
+
+  - Some default options for all exporters are provided by
+    `nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix`:
+
+      - `enable`
+      - `port`
+      - `listenAddress`
+      - `extraFlags`
+      - `openFirewall`
+      - `firewallFilter`
+      - `user`
+      - `group`
+  - As there is already a package available, the module can now be added. This
+    is accomplished by adding a new file to the
+    `nixos/modules/services/monitoring/prometheus/exporters/`
+    directory, which will be called postfix.nix and contains all exporter
+    specific options and configuration:
+    ```
+    # nixpkgs/nixos/modules/services/prometheus/exporters/postfix.nix
+    { config, lib, pkgs, options }:
+
+    with lib;
+
+    let
+      # for convenience we define cfg here
+      cfg = config.services.prometheus.exporters.postfix;
+    in
+    {
+      port = 9154; # The postfix exporter listens on this port by default
+
+      # `extraOpts` is an attribute set which contains additional options
+      # (and optional overrides for default options).
+      # Note that this attribute is optional.
+      extraOpts = {
+        telemetryPath = mkOption {
+          type = types.str;
+          default = "/metrics";
+          description = ''
+            Path under which to expose metrics.
+          '';
+        };
+        logfilePath = mkOption {
+          type = types.path;
+          default = /var/log/postfix_exporter_input.log;
+          example = /var/log/mail.log;
+          description = ''
+            Path where Postfix writes log entries.
+            This file will be truncated by this exporter!
+          '';
+        };
+        showqPath = mkOption {
+          type = types.path;
+          default = /var/spool/postfix/public/showq;
+          example = /var/lib/postfix/queue/public/showq;
+          description = ''
+            Path at which Postfix places its showq socket.
+          '';
+        };
+      };
+
+      # `serviceOpts` is an attribute set which contains configuration
+      # for the exporter's systemd service. One of
+      # `serviceOpts.script` and `serviceOpts.serviceConfig.ExecStart`
+      # has to be specified here. This will be merged with the default
+      # service configuration.
+      # Note that by default 'DynamicUser' is 'true'.
+      serviceOpts = {
+        serviceConfig = {
+          DynamicUser = false;
+          ExecStart = ''
+            ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+              --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+              --web.telemetry-path ${cfg.telemetryPath} \
+              ${concatStringsSep " \\\n  " cfg.extraFlags}
+          '';
+        };
+      };
+    }
+    ```
+  - This should already be enough for the postfix exporter. Additionally one
+    could now add assertions and conditional default values. This can be done
+    in the 'meta-module' that combines all exporter definitions and generates
+    the submodules:
+    `nixpkgs/nixos/modules/services/prometheus/exporters.nix`
+
+## Updating an exporter module {#module-services-prometheus-exporters-update-exporter-module}
+
+Should an exporter option change at some point, it is possible to add
+information about the change to the exporter definition similar to
+`nixpkgs/nixos/modules/rename.nix`:
+```
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginx;
+in
+{
+  port = 9113;
+  extraOpts = {
+    # additional module options
+    # ...
+  };
+  serviceOpts = {
+    # service configuration
+    # ...
+  };
+  imports = [
+    # 'services.prometheus.exporters.nginx.telemetryEndpoint' -> 'services.prometheus.exporters.nginx.telemetryPath'
+    (mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
+
+    # removed option 'services.prometheus.exporters.nginx.insecure'
+    (mkRemovedOptionModule [ "insecure" ] ''
+      This option was replaced by 'prometheus.exporters.nginx.sslVerify' which defaults to true.
+    '')
+    ({ options.warnings = options.warnings; })
+  ];
+}
+```
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
new file mode 100644
index 000000000000..f89522c09864
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters.nix
@@ -0,0 +1,442 @@
+{ config, pkgs, lib, options, ... }:
+
+let
+  inherit (lib) concatStrings foldl foldl' genAttrs literalExpression maintainers
+    mapAttrs mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption
+    optional types mkOptionDefault flip attrNames;
+
+  cfg = config.services.prometheus.exporters;
+
+  # each attribute in `exporterOpts` is expected to have specified:
+  #   - port        (types.int):   port on which the exporter listens
+  #   - serviceOpts (types.attrs): config that is merged with the
+  #                                default definition of the exporter's
+  #                                systemd service
+  #   - extraOpts   (types.attrs): extra configuration options to
+  #                                configure the exporter with, which
+  #                                are appended to the default options
+  #
+  #  Note that `extraOpts` is optional, but a script for the exporter's
+  #  systemd service must be provided by specifying either
+  #  `serviceOpts.script` or `serviceOpts.serviceConfig.ExecStart`
+
+  exporterOpts = (genAttrs [
+    "apcupsd"
+    "artifactory"
+    "bind"
+    "bird"
+    "bitcoin"
+    "blackbox"
+    "buildkite-agent"
+    "collectd"
+    "dmarc"
+    "dnsmasq"
+    "domain"
+    "dovecot"
+    "fastly"
+    "flow"
+    "fritzbox"
+    "graphite"
+    "idrac"
+    "imap-mailstat"
+    "influxdb"
+    "ipmi"
+    "jitsi"
+    "json"
+    "junos-czerwonk"
+    "kea"
+    "keylight"
+    "knot"
+    "lnd"
+    "mail"
+    "mikrotik"
+    "minio"
+    "modemmanager"
+    "mysqld"
+    "nextcloud"
+    "nginx"
+    "nginxlog"
+    "node"
+    "nut"
+    "openldap"
+    "openvpn"
+    "pgbouncer"
+    "php-fpm"
+    "pihole"
+    "postfix"
+    "postgres"
+    "process"
+    "pve"
+    "py-air-control"
+    "redis"
+    "rspamd"
+    "rtl_433"
+    "sabnzbd"
+    "scaphandre"
+    "script"
+    "shelly"
+    "smartctl"
+    "smokeping"
+    "snmp"
+    "sql"
+    "statsd"
+    "surfboard"
+    "systemd"
+    "tor"
+    "unbound"
+    "unifi"
+    "unpoller"
+    "v2ray"
+    "varnish"
+    "wireguard"
+    "zfs"
+  ]
+    (name:
+      import (./. + "/exporters/${name}.nix") { inherit config lib pkgs options; }
+    )) // (mapAttrs
+    (name: params:
+      import (./. + "/exporters/${params.name}.nix") { inherit config lib pkgs options; type = params.type ; })
+    {
+      exportarr-bazarr = {
+        name = "exportarr";
+        type = "bazarr";
+      };
+      exportarr-lidarr = {
+        name = "exportarr";
+        type = "lidarr";
+      };
+      exportarr-prowlarr = {
+        name = "exportarr";
+        type = "prowlarr";
+      };
+      exportarr-radarr = {
+        name = "exportarr";
+        type = "radarr";
+      };
+      exportarr-readarr = {
+        name = "exportarr";
+        type = "readarr";
+      };
+      exportarr-sonarr = {
+        name = "exportarr";
+        type = "sonarr";
+      };
+    }
+  );
+
+  mkExporterOpts = ({ name, port }: {
+    enable = mkEnableOption (lib.mdDoc "the prometheus ${name} exporter");
+    port = mkOption {
+      type = types.port;
+      default = port;
+      description = lib.mdDoc ''
+        Port to listen on.
+      '';
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        Address to listen on.
+      '';
+    };
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra commandline options to pass to the ${name} exporter.
+      '';
+    };
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open port in firewall for incoming connections.
+      '';
+    };
+    firewallFilter = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = literalExpression ''
+        "-i eth0 -p tcp -m tcp --dport ${toString port}"
+      '';
+      description = lib.mdDoc ''
+        Specify a filter for iptables to use when
+        {option}`services.prometheus.exporters.${name}.openFirewall`
+        is true. It is used as `ip46tables -I nixos-fw firewallFilter -j nixos-fw-accept`.
+      '';
+    };
+    user = mkOption {
+      type = types.str;
+      default = "${name}-exporter";
+      description = lib.mdDoc ''
+        User name under which the ${name} exporter shall be run.
+      '';
+    };
+    group = mkOption {
+      type = types.str;
+      default = "${name}-exporter";
+      description = lib.mdDoc ''
+        Group under which the ${name} exporter shall be run.
+      '';
+    };
+  });
+
+  mkSubModule = { name, port, extraOpts, imports }: {
+    ${name} = mkOption {
+      type = types.submodule [{
+        inherit imports;
+        options = (mkExporterOpts {
+          inherit name port;
+        } // extraOpts);
+      } ({ config, ... }: mkIf config.openFirewall {
+        firewallFilter = mkDefault "-p tcp -m tcp --dport ${toString config.port}";
+      })];
+      internal = true;
+      default = {};
+    };
+  };
+
+  mkSubModules = (foldl' (a: b: a//b) {}
+    (mapAttrsToList (name: opts: mkSubModule {
+      inherit name;
+      inherit (opts) port;
+      extraOpts = opts.extraOpts or {};
+      imports = opts.imports or [];
+    }) exporterOpts)
+  );
+
+  mkExporterConf = { name, conf, serviceOpts }:
+    let
+      enableDynamicUser = serviceOpts.serviceConfig.DynamicUser or true;
+    in
+    mkIf conf.enable {
+      warnings = conf.warnings or [];
+      users.users."${name}-exporter" = (mkIf (conf.user == "${name}-exporter" && !enableDynamicUser) {
+        description = "Prometheus ${name} exporter service user";
+        isSystemUser = true;
+        inherit (conf) group;
+      });
+      users.groups = (mkIf (conf.group == "${name}-exporter" && !enableDynamicUser) {
+        "${name}-exporter" = {};
+      });
+      networking.firewall.extraCommands = mkIf conf.openFirewall (concatStrings [
+        "ip46tables -A nixos-fw ${conf.firewallFilter} "
+        "-m comment --comment ${name}-exporter -j nixos-fw-accept"
+      ]);
+      systemd.services."prometheus-${name}-exporter" = mkMerge ([{
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig.Restart = mkDefault "always";
+        serviceConfig.PrivateTmp = mkDefault true;
+        serviceConfig.WorkingDirectory = mkDefault /tmp;
+        serviceConfig.DynamicUser = mkDefault enableDynamicUser;
+        serviceConfig.User = mkDefault conf.user;
+        serviceConfig.Group = conf.group;
+        # Hardening
+        serviceConfig.CapabilityBoundingSet = mkDefault [ "" ];
+        serviceConfig.DeviceAllow = [ "" ];
+        serviceConfig.LockPersonality = true;
+        serviceConfig.MemoryDenyWriteExecute = true;
+        serviceConfig.NoNewPrivileges = true;
+        serviceConfig.PrivateDevices = mkDefault true;
+        serviceConfig.ProtectClock = mkDefault true;
+        serviceConfig.ProtectControlGroups = true;
+        serviceConfig.ProtectHome = true;
+        serviceConfig.ProtectHostname = true;
+        serviceConfig.ProtectKernelLogs = true;
+        serviceConfig.ProtectKernelModules = true;
+        serviceConfig.ProtectKernelTunables = true;
+        serviceConfig.ProtectSystem = mkDefault "strict";
+        serviceConfig.RemoveIPC = true;
+        serviceConfig.RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        serviceConfig.RestrictNamespaces = true;
+        serviceConfig.RestrictRealtime = true;
+        serviceConfig.RestrictSUIDSGID = true;
+        serviceConfig.SystemCallArchitectures = "native";
+        serviceConfig.UMask = "0077";
+      } serviceOpts ]);
+  };
+in
+{
+
+  imports = (lib.forEach [ "blackboxExporter" "collectdExporter" "fritzboxExporter"
+                   "jsonExporter" "minioExporter" "nginxExporter" "nodeExporter"
+                   "snmpExporter" "unifiExporter" "varnishExporter" ]
+       (opt: lib.mkRemovedOptionModule [ "services" "prometheus" "${opt}" ] ''
+         The prometheus exporters are now configured using `services.prometheus.exporters'.
+         See the 18.03 release notes for more information.
+       '' ));
+
+  options.services.prometheus.exporters = mkOption {
+    type = types.submodule {
+      options = (mkSubModules);
+      imports = [
+        ../../../misc/assertions.nix
+        (lib.mkRenamedOptionModule [ "unifi-poller" ] [ "unpoller" ])
+      ];
+    };
+    description = lib.mdDoc "Prometheus exporter configuration";
+    default = {};
+    example = literalExpression ''
+      {
+        node = {
+          enable = true;
+          enabledCollectors = [ "systemd" ];
+        };
+        varnish.enable = true;
+      }
+    '';
+  };
+
+  config = mkMerge ([{
+    assertions = [ {
+      assertion = cfg.ipmi.enable -> (cfg.ipmi.configFile != null) -> (
+        !(lib.hasPrefix "/tmp/" cfg.ipmi.configFile)
+      );
+      message = ''
+        Config file specified in `services.prometheus.exporters.ipmi.configFile' must
+          not reside within /tmp - it won't be visible to the systemd service.
+      '';
+    } {
+      assertion = cfg.ipmi.enable -> (cfg.ipmi.webConfigFile != null) -> (
+        !(lib.hasPrefix "/tmp/" cfg.ipmi.webConfigFile)
+      );
+      message = ''
+        Config file specified in `services.prometheus.exporters.ipmi.webConfigFile' must
+          not reside within /tmp - it won't be visible to the systemd service.
+      '';
+    } {
+      assertion = cfg.snmp.enable -> (
+        (cfg.snmp.configurationPath == null) != (cfg.snmp.configuration == null)
+      );
+      message = ''
+        Please ensure you have either `services.prometheus.exporters.snmp.configuration'
+          or `services.prometheus.exporters.snmp.configurationPath' set!
+      '';
+    } {
+      assertion = cfg.mikrotik.enable -> (
+        (cfg.mikrotik.configFile == null) != (cfg.mikrotik.configuration == null)
+      );
+      message = ''
+        Please specify either `services.prometheus.exporters.mikrotik.configuration'
+          or `services.prometheus.exporters.mikrotik.configFile'.
+      '';
+    } {
+      assertion = cfg.mail.enable -> (
+        (cfg.mail.configFile == null) != (cfg.mail.configuration == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.mail.configuration'
+          or 'services.prometheus.exporters.mail.configFile'.
+      '';
+    } {
+      assertion = cfg.mysqld.runAsLocalSuperUser -> config.services.mysql.enable;
+      message = ''
+        The exporter is configured to run as 'services.mysql.user', but
+          'services.mysql.enable' is set to false.
+      '';
+    } {
+      assertion = cfg.nextcloud.enable -> (
+        (cfg.nextcloud.passwordFile == null) != (cfg.nextcloud.tokenFile == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.nextcloud.passwordFile' or
+          'services.prometheus.exporters.nextcloud.tokenFile'
+      '';
+    } {
+      assertion =  cfg.pgbouncer.enable -> (
+        (cfg.pgbouncer.connectionStringFile != null || cfg.pgbouncer.connectionString != "")
+      );
+        message = ''
+          PgBouncer exporter needs either connectionStringFile or connectionString configured"
+        '';
+    } {
+      assertion = cfg.pgbouncer.enable -> (
+        config.services.pgbouncer.ignoreStartupParameters != null && builtins.match ".*extra_float_digits.*" config.services.pgbouncer.ignoreStartupParameters != null
+        );
+        message = ''
+          Prometheus PgBouncer exporter requires including `extra_float_digits` in services.pgbouncer.ignoreStartupParameters
+
+          Example:
+          services.pgbouncer.ignoreStartupParameters = extra_float_digits;
+
+          See https://github.com/prometheus-community/pgbouncer_exporter#pgbouncer-configuration
+        '';
+    } {
+      assertion = cfg.sql.enable -> (
+        (cfg.sql.configFile == null) != (cfg.sql.configuration == null)
+      );
+      message = ''
+        Please specify either 'services.prometheus.exporters.sql.configuration' or
+          'services.prometheus.exporters.sql.configFile'
+      '';
+    } {
+      assertion = cfg.scaphandre.enable -> (pkgs.stdenv.targetPlatform.isx86_64 == true);
+      message = ''
+        Scaphandre only support x86_64 architectures.
+      '';
+    } {
+      assertion = cfg.scaphandre.enable -> ((lib.kernel.whenHelpers pkgs.linux.version).whenOlder "5.11" true).condition == false;
+      message = ''
+        Scaphandre requires a kernel version newer than '5.11', '${pkgs.linux.version}' given.
+      '';
+    } {
+      assertion = cfg.scaphandre.enable -> (builtins.elem "intel_rapl_common" config.boot.kernelModules);
+      message = ''
+        Scaphandre needs 'intel_rapl_common' kernel module to be enabled. Please add it in 'boot.kernelModules'.
+      '';
+    } {
+      assertion = cfg.idrac.enable -> (
+        (cfg.idrac.configurationPath == null) != (cfg.idrac.configuration == null)
+      );
+      message = ''
+        Please ensure you have either `services.prometheus.exporters.idrac.configuration'
+          or `services.prometheus.exporters.idrac.configurationPath' set!
+      '';
+    } ] ++ (flip map (attrNames exporterOpts) (exporter: {
+      assertion = cfg.${exporter}.firewallFilter != null -> cfg.${exporter}.openFirewall;
+      message = ''
+        The `firewallFilter'-option of exporter ${exporter} doesn't have any effect unless
+        `openFirewall' is set to `true'!
+      '';
+    })) ++ config.services.prometheus.exporters.assertions;
+    warnings = [
+      (mkIf (config.services.prometheus.exporters.idrac.enable && config.services.prometheus.exporters.idrac.configurationPath != null) ''
+          Configuration file in `services.prometheus.exporters.idrac.configurationPath` may override
+          `services.prometheus.exporters.idrac.listenAddress` and/or `services.prometheus.exporters.idrac.port`.
+          Consider using `services.prometheus.exporters.idrac.configuration` instead.
+        ''
+      )
+      (mkIf
+        (cfg.pgbouncer.enable && cfg.pgbouncer.connectionString != "") ''
+          config.services.prometheus.exporters.pgbouncer.connectionString is insecure. Use connectionStringFile instead.
+        ''
+      )
+      (mkIf
+        (cfg.pgbouncer.enable && config.services.pgbouncer.authType != "any") ''
+          Admin user (with password or passwordless) MUST exist in the services.pgbouncer.authFile if authType other than any is used.
+        ''
+      )
+    ] ++ config.services.prometheus.exporters.warnings;
+  }] ++ [(mkIf config.services.minio.enable {
+    services.prometheus.exporters.minio.minioAddress  = mkDefault "http://localhost:9000";
+    services.prometheus.exporters.minio.minioAccessKey = mkDefault config.services.minio.accessKey;
+    services.prometheus.exporters.minio.minioAccessSecret = mkDefault config.services.minio.secretKey;
+  })] ++ [(mkIf config.services.prometheus.exporters.rtl_433.enable {
+    hardware.rtl-sdr.enable = mkDefault true;
+  })] ++ [(mkIf config.services.postfix.enable {
+    services.prometheus.exporters.postfix.group = mkDefault config.services.postfix.setgidGroup;
+  })] ++ (mapAttrsToList (name: conf:
+    mkExporterConf {
+      inherit name;
+      inherit (conf) serviceOpts;
+      conf = cfg.${name};
+    }) exporterOpts)
+  );
+
+  meta = {
+    doc = ./exporters.md;
+    maintainers = [ maintainers.willibutz ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
new file mode 100644
index 000000000000..a8a9f84ea8ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/apcupsd.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.apcupsd;
+in
+{
+  port = 9162;
+  extraOpts = {
+    apcupsdAddress = mkOption {
+      type = types.str;
+      default = ":3551";
+      description = lib.mdDoc ''
+        Address of the apcupsd Network Information Server (NIS).
+      '';
+    };
+
+    apcupsdNetwork = mkOption {
+      type = types.enum ["tcp" "tcp4" "tcp6"];
+      default = "tcp";
+      description = lib.mdDoc ''
+        Network of the apcupsd Network Information Server (NIS): one of "tcp", "tcp4", or "tcp6".
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-apcupsd-exporter}/bin/apcupsd_exporter \
+          -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
+          -apcupsd.addr ${cfg.apcupsdAddress} \
+          -apcupsd.network ${cfg.apcupsdNetwork} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix
new file mode 100644
index 000000000000..bc67fe59b3b8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/artifactory.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.artifactory;
+in
+{
+  port = 9531;
+  extraOpts = {
+    scrapeUri = mkOption {
+      type = types.str;
+      default = "http://localhost:8081/artifactory";
+      description = lib.mdDoc ''
+        URI on which to scrape JFrog Artifactory.
+      '';
+    };
+
+    artiUsername = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Username for authentication against JFrog Artifactory API.
+      '';
+    };
+
+    artiPassword = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Password for authentication against JFrog Artifactory API.
+        One of the password or access token needs to be set.
+      '';
+    };
+
+    artiAccessToken = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Access token for authentication against JFrog Artifactory API.
+        One of the password or access token needs to be set.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-artifactory-exporter}/bin/artifactory_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --artifactory.scrape-uri ${cfg.scrapeUri} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      Environment = [
+        "ARTI_USERNAME=${cfg.artiUsername}"
+        "ARTI_PASSWORD=${cfg.artiPassword}"
+        "ARTI_ACCESS_TOKEN=${cfg.artiAccessToken}"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
new file mode 100644
index 000000000000..bd2003f06504
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bind.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.bind;
+in
+{
+  port = 9119;
+  extraOpts = {
+    bindURI = mkOption {
+      type = types.str;
+      default = "http://localhost:8053/";
+      description = lib.mdDoc ''
+        HTTP XML API address of an Bind server.
+      '';
+    };
+    bindTimeout = mkOption {
+      type = types.str;
+      default = "10s";
+      description = lib.mdDoc ''
+        Timeout for trying to get stats from Bind.
+      '';
+    };
+    bindVersion = mkOption {
+      type = types.enum [ "xml.v2" "xml.v3" "auto" ];
+      default = "auto";
+      description = lib.mdDoc ''
+        BIND statistics version. Can be detected automatically.
+      '';
+    };
+    bindGroups = mkOption {
+      type = types.listOf (types.enum [ "server" "view" "tasks" ]);
+      default = [ "server" "view" ];
+      description = lib.mdDoc ''
+        List of statistics to collect. Available: [server, view, tasks]
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-bind-exporter}/bin/bind_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --bind.pid-file /var/run/named/named.pid \
+          --bind.timeout ${toString cfg.bindTimeout} \
+          --bind.stats-url ${cfg.bindURI} \
+          --bind.stats-version ${cfg.bindVersion} \
+          --bind.stats-groups ${concatStringsSep "," cfg.bindGroups} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bird.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bird.nix
new file mode 100644
index 000000000000..5f6c36f4c567
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bird.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.bird;
+in
+{
+  port = 9324;
+  extraOpts = {
+    birdVersion = mkOption {
+      type = types.enum [ 1 2 ];
+      default = 2;
+      description = lib.mdDoc ''
+        Specifies whether BIRD1 or BIRD2 is in use.
+      '';
+    };
+    birdSocket = mkOption {
+      type = types.path;
+      default = "/run/bird/bird.ctl";
+      description = lib.mdDoc ''
+        Path to BIRD2 (or BIRD1 v4) socket.
+      '';
+    };
+    newMetricFormat = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable the new more-generic metric format.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      SupplementaryGroups = singleton (if cfg.birdVersion == 1 then "bird" else "bird2");
+      ExecStart = ''
+        ${pkgs.prometheus-bird-exporter}/bin/bird_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -bird.socket ${cfg.birdSocket} \
+          -bird.v2=${if cfg.birdVersion == 2 then "true" else "false"} \
+          -format.new=${if cfg.newMetricFormat then "true" else "false"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix
new file mode 100644
index 000000000000..330d54126448
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/bitcoin.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.bitcoin;
+in
+{
+  port = 9332;
+  extraOpts = {
+    rpcUser = mkOption {
+      type = types.str;
+      default = "bitcoinrpc";
+      description = lib.mdDoc ''
+        RPC user name.
+      '';
+    };
+
+    rpcPasswordFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        File containing RPC password.
+      '';
+    };
+
+    rpcScheme = mkOption {
+      type = types.enum [ "http" "https" ];
+      default = "http";
+      description = lib.mdDoc ''
+        Whether to connect to bitcoind over http or https.
+      '';
+    };
+
+    rpcHost = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc ''
+        RPC host.
+      '';
+    };
+
+    rpcPort = mkOption {
+      type = types.port;
+      default = 8332;
+      description = lib.mdDoc ''
+        RPC port number.
+      '';
+    };
+
+    refreshSeconds = mkOption {
+      type = types.ints.unsigned;
+      default = 300;
+      description = lib.mdDoc ''
+        How often to ask bitcoind for metrics.
+      '';
+    };
+
+    extraEnv = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      description = lib.mdDoc ''
+        Extra environment variables for the exporter.
+      '';
+    };
+  };
+  serviceOpts = {
+    script = ''
+      export BITCOIN_RPC_PASSWORD=$(cat ${cfg.rpcPasswordFile})
+      exec ${pkgs.prometheus-bitcoin-exporter}/bin/bitcoind-monitor.py
+    '';
+
+    environment = {
+      BITCOIN_RPC_USER = cfg.rpcUser;
+      BITCOIN_RPC_SCHEME = cfg.rpcScheme;
+      BITCOIN_RPC_HOST = cfg.rpcHost;
+      BITCOIN_RPC_PORT = toString cfg.rpcPort;
+      METRICS_ADDR = cfg.listenAddress;
+      METRICS_PORT = toString cfg.port;
+      REFRESH_SECONDS = toString cfg.refreshSeconds;
+    } // cfg.extraEnv;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
new file mode 100644
index 000000000000..ce2c391de523
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/blackbox.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  logPrefix = "services.prometheus.exporter.blackbox";
+  cfg = config.services.prometheus.exporters.blackbox;
+
+  # This ensures that we can deal with string paths, path types and
+  # store-path strings with context.
+  coerceConfigFile = file:
+    if (builtins.isPath file) || (lib.isStorePath file) then
+      file
+    else
+      (lib.warn ''
+        ${logPrefix}: configuration file "${file}" is being copied to the nix-store.
+        If you would like to avoid that, please set enableConfigCheck to false.
+      '' /. + file);
+  checkConfigLocation = file:
+    if lib.hasPrefix "/tmp/" file then
+      throw
+      "${logPrefix}: configuration file must not reside within /tmp - it won't be visible to the systemd service."
+    else
+      file;
+  checkConfig = file:
+    pkgs.runCommand "checked-blackbox-exporter.conf" {
+      preferLocalBuild = true;
+      nativeBuildInputs = [ pkgs.buildPackages.prometheus-blackbox-exporter ];
+    } ''
+      ln -s ${coerceConfigFile file} $out
+      blackbox_exporter --config.check --config.file $out
+    '';
+in {
+  port = 9115;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to configuration file.
+      '';
+    };
+    enableConfigCheck = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to run a correctness check for the configuration file. This depends
+        on the configuration file residing in the nix-store. Paths passed as string will
+        be copied to the store.
+      '';
+    };
+  };
+
+  serviceOpts = let
+    adjustedConfigFile = if cfg.enableConfigCheck then
+      checkConfig cfg.configFile
+    else
+      checkConfigLocation cfg.configFile;
+  in {
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_RAW" ]; # for ping probes
+      ExecStart = ''
+        ${pkgs.prometheus-blackbox-exporter}/bin/blackbox_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.file ${escapeShellArg adjustedConfigFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix
new file mode 100644
index 000000000000..0515b72b13f9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/buildkite-agent.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.buildkite-agent;
+in
+{
+  port = 9876;
+  extraOpts = {
+    tokenPath = mkOption {
+      type = types.nullOr types.path;
+      apply = final: if final == null then null else toString final;
+      description = lib.mdDoc ''
+        The token from your Buildkite "Agents" page.
+
+        A run-time path to the token file, which is supposed to be provisioned
+        outside of Nix store.
+      '';
+    };
+    interval = mkOption {
+      type = types.str;
+      default = "30s";
+      example = "1min";
+      description = lib.mdDoc ''
+        How often to update metrics.
+      '';
+    };
+    endpoint = mkOption {
+      type = types.str;
+      default = "https://agent.buildkite.com/v3";
+      description = lib.mdDoc ''
+        The Buildkite Agent API endpoint.
+      '';
+    };
+    queues = mkOption {
+      type = with types; nullOr (listOf str);
+      default = null;
+      example = literalExpression ''[ "my-queue1" "my-queue2" ]'';
+      description = lib.mdDoc ''
+        Which specific queues to process.
+      '';
+    };
+  };
+  serviceOpts = {
+    script =
+      let
+        queues = concatStringsSep " " (map (q: "-queue ${q}") cfg.queues);
+      in
+      ''
+        export BUILDKITE_AGENT_TOKEN="$(cat ${toString cfg.tokenPath})"
+        exec ${pkgs.buildkite-agent-metrics}/bin/buildkite-agent-metrics \
+          -backend prometheus \
+          -interval ${cfg.interval} \
+          -endpoint ${cfg.endpoint} \
+          ${optionalString (cfg.queues != null) queues} \
+          -prometheus-addr "${cfg.listenAddress}:${toString cfg.port}" ${concatStringsSep " " cfg.extraFlags}
+      '';
+    serviceConfig = {
+      DynamicUser = false;
+      RuntimeDirectory = "buildkite-agent-metrics";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
new file mode 100644
index 000000000000..f67596f05a3a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/collectd.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.collectd;
+in
+{
+  port = 9103;
+  extraOpts = {
+    collectdBinary = {
+      enable = mkEnableOption (lib.mdDoc "collectd binary protocol receiver");
+
+      authFile = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc "File mapping user names to pre-shared keys (passwords).";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 25826;
+        description = lib.mdDoc "Network address on which to accept collectd binary network packets.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          Address to listen on for binary network packets.
+          '';
+      };
+
+      securityLevel = mkOption {
+        type = types.enum ["None" "Sign" "Encrypt"];
+        default = "None";
+        description = lib.mdDoc ''
+          Minimum required security level for accepted packets.
+        '';
+      };
+    };
+
+    logFormat = mkOption {
+      type = types.enum [ "logfmt" "json" ];
+      default = "logfmt";
+      example = "json";
+      description = lib.mdDoc ''
+        Set the log format.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error" "fatal"];
+      default = "info";
+      description = lib.mdDoc ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    collectSettingsArgs = optionalString (cfg.collectdBinary.enable) ''
+      --collectd.listen-address ${cfg.collectdBinary.listenAddress}:${toString cfg.collectdBinary.port} \
+      --collectd.security-level ${cfg.collectdBinary.securityLevel} \
+    '';
+  in {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-collectd-exporter}/bin/collectd_exporter \
+          --log.format ${escapeShellArg cfg.logFormat} \
+          --log.level ${cfg.logLevel} \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${collectSettingsArgs} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
new file mode 100644
index 000000000000..437cece588a7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dmarc.nix
@@ -0,0 +1,117 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dmarc;
+
+  json = builtins.toJSON {
+    inherit (cfg) folders port;
+    listen_addr = cfg.listenAddress;
+    storage_path = "$STATE_DIRECTORY";
+    imap = (builtins.removeAttrs cfg.imap [ "passwordFile" ]) // { password = "$IMAP_PASSWORD"; use_ssl = true; };
+    poll_interval_seconds = cfg.pollIntervalSeconds;
+    deduplication_max_seconds = cfg.deduplicationMaxSeconds;
+    logging = {
+      version = 1;
+      disable_existing_loggers = false;
+    };
+  };
+in {
+  port = 9797;
+  extraOpts = {
+    imap = {
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Hostname of IMAP server to connect to.
+        '';
+      };
+      port = mkOption {
+        type = types.port;
+        default = 993;
+        description = lib.mdDoc ''
+          Port of the IMAP server to connect to.
+        '';
+      };
+      username = mkOption {
+        type = types.str;
+        example = "postmaster@example.org";
+        description = lib.mdDoc ''
+          Login username for the IMAP connection.
+        '';
+      };
+      passwordFile = mkOption {
+        type = types.str;
+        example = "/run/secrets/dovecot_pw";
+        description = lib.mdDoc ''
+          File containing the login password for the IMAP connection.
+        '';
+      };
+    };
+    folders = {
+      inbox = mkOption {
+        type = types.str;
+        default = "INBOX";
+        description = lib.mdDoc ''
+          IMAP mailbox that is checked for incoming DMARC aggregate reports
+        '';
+      };
+      done = mkOption {
+        type = types.str;
+        default = "Archive";
+        description = lib.mdDoc ''
+          IMAP mailbox that successfully processed reports are moved to.
+        '';
+      };
+      error = mkOption {
+        type = types.str;
+        default = "Invalid";
+        description = lib.mdDoc ''
+          IMAP mailbox that emails are moved to that could not be processed.
+        '';
+      };
+    };
+    pollIntervalSeconds = mkOption {
+      type = types.ints.unsigned;
+      default = 60;
+      description = lib.mdDoc ''
+        How often to poll the IMAP server in seconds.
+      '';
+    };
+    deduplicationMaxSeconds = mkOption {
+      type = types.ints.unsigned;
+      default = 604800;
+      defaultText = "7 days (in seconds)";
+      description = lib.mdDoc ''
+        How long individual report IDs will be remembered to avoid
+        counting double delivered reports twice.
+      '';
+    };
+    debug = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to declare enable `--debug`.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = with pkgs; [ envsubst coreutils ];
+    serviceConfig = {
+      StateDirectory = "prometheus-dmarc-exporter";
+      WorkingDirectory = "/var/lib/prometheus-dmarc-exporter";
+      ExecStart = "${pkgs.writeShellScript "setup-cfg" ''
+        export IMAP_PASSWORD="$(<${cfg.imap.passwordFile})"
+        envsubst \
+          -i ${pkgs.writeText "dmarc-exporter.json.template" json} \
+          -o ''${STATE_DIRECTORY}/dmarc-exporter.json
+
+        exec ${pkgs.dmarc-metrics-exporter}/bin/dmarc-metrics-exporter \
+          --configuration /var/lib/prometheus-dmarc-exporter/dmarc-exporter.json \
+          ${optionalString cfg.debug "--debug"}
+      ''}";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
new file mode 100644
index 000000000000..ece42a34cb06
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dnsmasq.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dnsmasq;
+in
+{
+  port = 9153;
+  extraOpts = {
+    dnsmasqListenAddress = mkOption {
+      type = types.str;
+      default = "localhost:53";
+      description = lib.mdDoc ''
+        Address on which dnsmasq listens.
+      '';
+    };
+    leasesPath = mkOption {
+      type = types.path;
+      default = "/var/lib/misc/dnsmasq.leases";
+      example = "/var/lib/dnsmasq/dnsmasq.leases";
+      description = lib.mdDoc ''
+        Path to the `dnsmasq.leases` file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-dnsmasq-exporter}/bin/dnsmasq_exporter \
+          --listen ${cfg.listenAddress}:${toString cfg.port} \
+          --dnsmasq ${cfg.dnsmasqListenAddress} \
+          --leases_path ${escapeShellArg cfg.leasesPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
new file mode 100644
index 000000000000..61e2fc80afde
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/domain.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.domain;
+in
+{
+  port = 9222;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-domain-exporter}/bin/domain_exporter \
+          --bind ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
new file mode 100644
index 000000000000..6fb438353a4c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/dovecot.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.dovecot;
+in
+{
+  port = 9166;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+    socketPath = mkOption {
+      type = types.path;
+      default = "/var/run/dovecot/stats";
+      example = "/var/run/dovecot2/old-stats";
+      description = lib.mdDoc ''
+        Path under which the stats socket is placed.
+        The user/group under which the exporter runs,
+        should be able to access the socket in order
+        to scrape the metrics successfully.
+
+        Please keep in mind that the stats module has changed in
+        [Dovecot 2.3+](https://wiki2.dovecot.org/Upgrading/2.3) which
+        is not [compatible with this exporter](https://github.com/kumina/dovecot_exporter/issues/8).
+
+        The following extra config has to be passed to Dovecot to ensure that recent versions
+        work with this exporter:
+        ```
+        {
+          services.prometheus.exporters.dovecot.enable = true;
+          services.prometheus.exporters.dovecot.socketPath = "/var/run/dovecot2/old-stats";
+          services.dovecot2.mailPlugins.globally.enable = [ "old_stats" ];
+          services.dovecot2.extraConfig = '''
+            service old-stats {
+              unix_listener old-stats {
+                user = dovecot-exporter
+                group = dovecot-exporter
+                mode = 0660
+              }
+              fifo_listener old-stats-mail {
+                mode = 0660
+                user = dovecot
+                group = dovecot
+              }
+              fifo_listener old-stats-user {
+                mode = 0660
+                user = dovecot
+                group = dovecot
+              }
+            }
+            plugin {
+              old_stats_refresh = 30 secs
+              old_stats_track_cmds = yes
+            }
+          ''';
+        }
+        ```
+      '';
+    };
+    scopes = mkOption {
+      type = types.listOf types.str;
+      default = [ "user" ];
+      example = [ "user" "global" ];
+      description = lib.mdDoc ''
+        Stats scopes to query.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-dovecot-exporter}/bin/dovecot_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --dovecot.socket-path ${escapeShellArg cfg.socketPath} \
+          --dovecot.scopes ${concatStringsSep "," cfg.scopes} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
new file mode 100644
index 000000000000..132209335410
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/exportarr.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, options, type }:
+
+let
+  cfg = config.services.prometheus.exporters."exportarr-${type}";
+  exportarrEnvironment = (
+    lib.mapAttrs (_: toString) cfg.environment
+  ) // {
+    PORT = toString cfg.port;
+    URL = cfg.url;
+    API_KEY_FILE = lib.mkIf (cfg.apiKeyFile != null) "%d/api-key";
+  };
+in
+{
+  port = 9708;
+  extraOpts = {
+    url = lib.mkOption {
+      type = lib.types.str;
+      default = "http://127.0.0.1";
+      description = lib.mdDoc ''
+        The full URL to Sonarr, Radarr, or Lidarr.
+      '';
+    };
+
+    apiKeyFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing the api-key.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "exportarr" { };
+
+    environment = lib.mkOption {
+      type = lib.types.attrsOf lib.types.str;
+      default = { };
+      description = lib.mdDoc ''
+        See [the configuration guide](https://github.com/onedr0p/exportarr#configuration) for available options.
+      '';
+      example = {
+        PROWLARR__BACKFILL = true;
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      LoadCredential = lib.optionalString (cfg.apiKeyFile != null) "api-key:${cfg.apiKeyFile}";
+      ExecStart = ''${cfg.package}/bin/exportarr ${type} "$@"'';
+      ProcSubset = "pid";
+      ProtectProc = "invisible";
+      SystemCallFilter = ["@system-service" "~@privileged"];
+    };
+    environment = exportarrEnvironment;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
new file mode 100644
index 000000000000..36409caccf2e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fastly.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let cfg = config.services.prometheus.exporters.fastly;
+in
+{
+  port = 9118;
+  extraOpts = {
+    debug = mkEnableOption (lib.mdDoc "Debug logging mode for fastly-exporter");
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a fastly-exporter configuration file.
+        Example one can be generated with `fastly-exporter --config-file-example`.
+      '';
+      example = "./fastly-exporter-config.txt";
+    };
+
+    tokenPath = mkOption {
+      type = types.nullOr types.path;
+      apply = final: if final == null then null else toString final;
+      description = lib.mdDoc ''
+        A run-time path to the token file, which is supposed to be provisioned
+        outside of Nix store.
+      '';
+    };
+  };
+  serviceOpts = {
+    script = ''
+      ${optionalString (cfg.tokenPath != null)
+      "export FASTLY_API_TOKEN=$(cat ${toString cfg.tokenPath})"}
+      ${pkgs.prometheus-fastly-exporter}/bin/fastly-exporter \
+        -listen http://${cfg.listenAddress}:${toString cfg.port}
+        ${optionalString cfg.debug "-debug true"} \
+        ${optionalString (cfg.configFile != null) "-config-file ${cfg.configFile}"}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/flow.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/flow.nix
new file mode 100644
index 000000000000..81099aaf1704
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/flow.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.flow;
+in {
+  port = 9590;
+  extraOpts = {
+    brokers = mkOption {
+      type = types.listOf types.str;
+      example = literalExpression ''[ "kafka.example.org:19092" ]'';
+      description = lib.mdDoc "List of Kafka brokers to connect to.";
+    };
+
+    asn = mkOption {
+      type = types.ints.positive;
+      example = 65542;
+      description = lib.mdDoc "The ASN being monitored.";
+    };
+
+    partitions = mkOption {
+      type = types.listOf types.int;
+      default = [];
+      description = lib.mdDoc ''
+        The number of the partitions to consume, none means all.
+      '';
+    };
+
+    topic = mkOption {
+      type = types.str;
+      example = "pmacct.acct";
+      description = lib.mdDoc "The Kafka topic to consume from.";
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = true;
+      ExecStart = ''
+        ${pkgs.prometheus-flow-exporter}/bin/flow-exporter \
+          -asn ${toString cfg.asn} \
+          -topic ${cfg.topic} \
+          -brokers ${concatStringsSep "," cfg.brokers} \
+          ${optionalString (cfg.partitions != []) "-partitions ${concatStringsSep "," cfg.partitions}"} \
+          -addr ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
new file mode 100644
index 000000000000..dc53d21406ff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/fritzbox.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.fritzbox;
+in
+{
+  port = 9133;
+  extraOpts = {
+    gatewayAddress = mkOption {
+      type = types.str;
+      default = "fritz.box";
+      description = lib.mdDoc ''
+        The hostname or IP of the FRITZ!Box.
+      '';
+    };
+
+    gatewayPort = mkOption {
+      type = types.int;
+      default = 49000;
+      description = lib.mdDoc ''
+        The port of the FRITZ!Box UPnP service.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-fritzbox-exporter}/bin/exporter \
+          -listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -gateway-address ${cfg.gatewayAddress} \
+          -gateway-port ${toString cfg.gatewayPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix
new file mode 100644
index 000000000000..34a887104212
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/graphite.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, options }:
+
+let
+  cfg = config.services.prometheus.exporters.graphite;
+  format = pkgs.formats.yaml { };
+in
+{
+  port = 9108;
+  extraOpts = {
+    graphitePort = lib.mkOption {
+      type = lib.types.port;
+      default = 9109;
+      description = lib.mdDoc ''
+        Port to use for the graphite server.
+      '';
+    };
+    mappingSettings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = format.type;
+        options = { };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Mapping configuration for the exporter, see
+        <https://github.com/prometheus/graphite_exporter#yaml-config> for
+        available options.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-graphite-exporter}/bin/graphite_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --graphite.listen-address ${cfg.listenAddress}:${toString cfg.graphitePort} \
+          --graphite.mapping-config ${format.generate "mapping.yml" cfg.mappingSettings} \
+          ${lib.concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix
new file mode 100644
index 000000000000..f5604bc00ee0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/idrac.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+let
+  cfg = config.services.prometheus.exporters.idrac;
+
+  configFile = if cfg.configurationPath != null
+               then cfg.configurationPath
+               else pkgs.writeText "idrac.yml" (builtins.toJSON cfg.configuration);
+in
+{
+  port = 9348;
+  extraOpts = {
+    configurationPath = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/etc/prometheus-idrac-exporter/idrac.yml";
+      description = lib.mdDoc ''
+        Path to the service's config file. This path can either be a computed path in /nix/store or a path in the local filesystem.
+
+        The config file should NOT be stored in /nix/store as it will contain passwords and/or keys in plain text.
+
+        Mutually exclusive with `configuration` option.
+
+        Configuration reference: https://github.com/mrlhansen/idrac_exporter/#configuration
+      '';
+    };
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      description = lib.mdDoc ''
+        Configuration for iDRAC exporter, as a nix attribute set.
+
+        Configuration reference: https://github.com/mrlhansen/idrac_exporter/#configuration
+
+        Mutually exclusive with `configurationPath` option.
+      '';
+      default = null;
+      example = {
+        timeout = 10;
+        retries = 1;
+        hosts = {
+          default = {
+            username = "username";
+            password = "password";
+          };
+        };
+        metrics = {
+          system = true;
+          sensors = true;
+          power = true;
+          sel = true;
+          storage = true;
+          memory = true;
+        };
+      };
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      LoadCredential = "configFile:${configFile}";
+      ExecStart = "${pkgs.prometheus-idrac-exporter}/bin/idrac_exporter -config %d/configFile";
+      Environment = [
+        "IDRAC_EXPORTER_LISTEN_ADDRESS=${cfg.listenAddress}"
+        "IDRAC_EXPORTER_LISTEN_PORT=${toString cfg.port}"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix
new file mode 100644
index 000000000000..c5024a258e71
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/imap-mailstat.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.imap-mailstat;
+  valueToString = value:
+    if (builtins.typeOf value == "string") then "\"${value}\""
+    else (
+      if (builtins.typeOf value == "int") then "${toString value}"
+      else (
+        if (builtins.typeOf value == "bool") then (if value then "true" else "false")
+        else "XXX ${toString value}"
+      )
+    );
+  createConfigFile = accounts:
+    # unfortunately on toTOML yet
+    # https://github.com/NixOS/nix/issues/3929
+    pkgs.writeText "imap-mailstat-exporter.conf" ''
+      ${concatStrings (attrValues (mapAttrs (name: config: "[[Accounts]]\nname = \"${name}\"\n${concatStrings (attrValues (mapAttrs (k: v: "${k} = ${valueToString v}\n") config))}") accounts))}
+    '';
+  mkOpt = type: description: mkOption {
+    type = types.nullOr type;
+    default = null;
+    description = lib.mdDoc description;
+  };
+  accountOptions.options = {
+    mailaddress = mkOpt types.str "Your email address (at the moment used as login name)";
+    username = mkOpt types.str "If empty string mailaddress value is used";
+    password = mkOpt types.str "";
+    serveraddress = mkOpt types.str "mailserver name or address";
+    serverport = mkOpt types.int "imap port number (at the moment only tls connection is supported)";
+    starttls = mkOpt types.bool "set to true for using STARTTLS to start a TLS connection";
+  };
+in
+{
+  port = 8081;
+  extraOpts = {
+    oldestUnseenDate = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable metric with timestamp of oldest unseen mail
+      '';
+    };
+    accounts = mkOption {
+      type = types.attrsOf (types.submodule accountOptions);
+      default = {};
+      description = lib.mdDoc ''
+        Accounts to monitor
+      '';
+    };
+    configurationFile = mkOption {
+      type = types.path;
+      example = "/path/to/config-file";
+      description = lib.mdDoc ''
+        File containing the configuration
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-imap-mailstat-exporter}/bin/imap-mailstat-exporter \
+          -config ${createConfigFile cfg.accounts} \
+          ${optionalString cfg.oldestUnseenDate "-oldestunseendate"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix
new file mode 100644
index 000000000000..61c0c08d2250
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/influxdb.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.influxdb;
+in
+{
+  port = 9122;
+  extraOpts = {
+    sampleExpiry = mkOption {
+      type = types.str;
+      default = "5m";
+      example = "10m";
+      description = lib.mdDoc "How long a sample is valid for";
+    };
+    udpBindAddress = mkOption {
+      type = types.str;
+      default = ":9122";
+      example = "192.0.2.1:9122";
+      description = lib.mdDoc "Address on which to listen for udp packets";
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      RuntimeDirectory = "prometheus-influxdb-exporter";
+      ExecStart = ''
+        ${pkgs.prometheus-influxdb-exporter}/bin/influxdb_exporter \
+        --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+        --influxdb.sample-expiry ${cfg.sampleExpiry} ${concatStringsSep " " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix
new file mode 100644
index 000000000000..9adbe31d84d6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/ipmi.nix
@@ -0,0 +1,42 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  logPrefix = "services.prometheus.exporter.ipmi";
+  cfg = config.services.prometheus.exporters.ipmi;
+in {
+  port = 9290;
+
+  extraOpts = {
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to configuration file.
+      '';
+    };
+
+    webConfigFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to configuration file that can enable TLS or authentication.
+      '';
+    };
+  };
+
+  serviceOpts.serviceConfig = {
+    ExecStart = with cfg; concatStringsSep " " ([
+      "${pkgs.prometheus-ipmi-exporter}/bin/ipmi_exporter"
+      "--web.listen-address ${listenAddress}:${toString port}"
+    ] ++ optionals (cfg.webConfigFile != null) [
+      "--web.config.file ${escapeShellArg cfg.webConfigFile}"
+    ] ++ optionals (cfg.configFile != null) [
+      "--config.file ${escapeShellArg cfg.configFile}"
+    ] ++ extraFlags);
+
+    ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+    RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix
new file mode 100644
index 000000000000..024602718602
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/jitsi.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.jitsi;
+in
+{
+  port = 9700;
+  extraOpts = {
+    url = mkOption {
+      type = types.str;
+      default = "http://localhost:8080/colibri/stats";
+      description = lib.mdDoc ''
+        Jitsi Videobridge metrics URL to monitor.
+        This is usually /colibri/stats on port 8080 of the jitsi videobridge host.
+      '';
+    };
+    interval = mkOption {
+      type = types.str;
+      default = "30s";
+      example = "1min";
+      description = lib.mdDoc ''
+        How often to scrape new data
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-jitsi-exporter}/bin/jitsiexporter \
+          -url ${escapeShellArg cfg.url} \
+          -host ${cfg.listenAddress} \
+          -port ${toString cfg.port} \
+          -interval ${toString cfg.interval} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix
new file mode 100644
index 000000000000..473f3a7e47e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/json.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.json;
+in
+{
+  port = 7979;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to configuration file.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-json-exporter}/bin/json_exporter \
+          --config.file ${escapeShellArg cfg.configFile} \
+          --web.listen-address="${cfg.listenAddress}:${toString cfg.port}" \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+  imports = [
+    (mkRemovedOptionModule [ "url" ] ''
+      This option was removed. The URL of the endpoint serving JSON
+      must now be provided to the exporter by prometheus via the url
+      parameter `target'.
+
+      In prometheus a scrape URL would look like this:
+
+        http://some.json-exporter.host:7979/probe?target=https://example.com/some/json/endpoint
+
+      For more information, take a look at the official documentation
+      (https://github.com/prometheus-community/json_exporter) of the json_exporter.
+    '')
+     ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix
new file mode 100644
index 000000000000..15e0c9ecb177
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/junos-czerwonk.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.junos-czerwonk;
+
+  configFile = if cfg.configuration != null then configurationFile else (escapeShellArg cfg.configurationFile);
+
+  configurationFile = pkgs.writeText "prometheus-junos-czerwonk-exporter.conf" (builtins.toJSON (cfg.configuration));
+in
+{
+  port = 9326;
+  extraOpts = {
+    environmentFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        File containing env-vars to be substituted into the exporter's config.
+      '';
+    };
+    configurationFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify the JunOS exporter configuration file to use.
+      '';
+    };
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = null;
+      description = lib.mdDoc ''
+        JunOS exporter configuration as nix attribute set. Mutually exclusive with the `configurationFile` option.
+      '';
+      example = {
+        devices = [
+          {
+            host = "router1";
+            key_file = "/path/to/key";
+          }
+        ];
+      };
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      RuntimeDirectory = "prometheus-junos-czerwonk-exporter";
+      ExecStartPre = [
+        "${pkgs.writeShellScript "subst-secrets-junos-czerwonk-exporter" ''
+          umask 0077
+          ${pkgs.envsubst}/bin/envsubst -i ${configFile} -o ''${RUNTIME_DIRECTORY}/junos-exporter.json
+        ''}"
+      ];
+      ExecStart = ''
+        ${pkgs.prometheus-junos-czerwonk-exporter}/bin/junos_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -web.telemetry-path ${cfg.telemetryPath} \
+          -config.file ''${RUNTIME_DIRECTORY}/junos-exporter.json \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/kea.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
new file mode 100644
index 000000000000..8b1cd47d0a40
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/kea.nix
@@ -0,0 +1,47 @@
+{ config
+, lib
+, pkgs
+, options
+}:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.kea;
+in {
+  port = 9547;
+  extraOpts = {
+    controlSocketPaths = mkOption {
+      type = types.listOf types.str;
+      example = literalExpression ''
+        [
+          "/run/kea-dhcp4/kea-dhcp4.socket"
+          "/run/kea-dhcp6/kea-dhcp6.socket"
+        ]
+      '';
+      description = lib.mdDoc ''
+        Paths to kea control sockets
+      '';
+    };
+  };
+  serviceOpts = {
+    after = [
+      "kea-dhcp4-server.service"
+      "kea-dhcp6-server.service"
+    ];
+    serviceConfig = {
+      User = "kea";
+      ExecStart = ''
+        ${pkgs.prometheus-kea-exporter}/bin/kea-exporter \
+          --address ${cfg.listenAddress} \
+          --port ${toString cfg.port} \
+          ${concatStringsSep " " cfg.controlSocketPaths}
+      '';
+      SupplementaryGroups = [ "kea" ];
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
new file mode 100644
index 000000000000..dfa56343b871
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/keylight.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.keylight;
+in
+{
+  port = 9288;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-keylight-exporter}/bin/keylight_exporter \
+          -metrics.addr ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/knot.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
new file mode 100644
index 000000000000..775848750803
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/knot.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.knot;
+in {
+  port = 9433;
+  extraOpts = {
+    knotLibraryPath = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = literalExpression ''"''${pkgs.knot-dns.out}/lib/libknot.so"'';
+      description = lib.mdDoc ''
+        Path to the library of `knot-dns`.
+      '';
+    };
+
+    knotSocketPath = mkOption {
+      type = types.str;
+      default = "/run/knot/knot.sock";
+      description = lib.mdDoc ''
+        Socket path of {manpage}`knotd(8)`.
+      '';
+    };
+
+    knotSocketTimeout = mkOption {
+      type = types.ints.positive;
+      default = 2000;
+      description = lib.mdDoc ''
+        Timeout in seconds.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = with pkgs; [
+      procps
+    ];
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-knot-exporter}/bin/knot-exporter \
+          --web-listen-addr ${cfg.listenAddress} \
+          --web-listen-port ${toString cfg.port} \
+          --knot-socket-path ${cfg.knotSocketPath} \
+          --knot-socket-timeout ${toString cfg.knotSocketTimeout} \
+          ${lib.optionalString (cfg.knotLibraryPath != null) "--knot-library-path ${cfg.knotLibraryPath}"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      SupplementaryGroups = [
+        "knot"
+      ];
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
new file mode 100644
index 000000000000..9f914b1dc146
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/lnd.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.lnd;
+in
+{
+  port = 9092;
+  extraOpts = {
+    lndHost = mkOption {
+      type = types.str;
+      default = "localhost:10009";
+      description = lib.mdDoc ''
+        lnd instance gRPC address:port.
+      '';
+    };
+
+    lndTlsPath = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to lnd TLS certificate.
+      '';
+    };
+
+    lndMacaroonDir = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to lnd macaroons.
+      '';
+    };
+  };
+  serviceOpts.serviceConfig = {
+    ExecStart = ''
+      ${pkgs.prometheus-lnd-exporter}/bin/lndmon \
+        --prometheus.listenaddr=${cfg.listenAddress}:${toString cfg.port} \
+        --prometheus.logdir=/var/log/prometheus-lnd-exporter \
+        --lnd.host=${cfg.lndHost} \
+        --lnd.tlspath=${cfg.lndTlsPath} \
+        --lnd.macaroondir=${cfg.lndMacaroonDir} \
+        ${concatStringsSep " \\\n  " cfg.extraFlags}
+    '';
+    LogsDirectory = "prometheus-lnd-exporter";
+    ReadOnlyPaths = [ cfg.lndTlsPath cfg.lndMacaroonDir ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
new file mode 100644
index 000000000000..15079f5841f4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mail.nix
@@ -0,0 +1,190 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.mail;
+
+  configFile = if cfg.configuration != null then configurationFile else (escapeShellArg cfg.configFile);
+
+  configurationFile = pkgs.writeText "prometheus-mail-exporter.conf" (builtins.toJSON (
+    # removes the _module attribute, null values and converts attrNames to lowercase
+    mapAttrs' (name: value:
+      if name == "servers"
+      then nameValuePair (toLower name)
+        ((map (srv: (mapAttrs' (n: v: nameValuePair (toLower n) v)
+          (filterAttrs (n: v: !(n == "_module" || v == null)) srv)
+        ))) value)
+      else nameValuePair (toLower name) value
+    ) (filterAttrs (n: _: !(n == "_module")) cfg.configuration)
+  ));
+
+  serverOptions.options = {
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Value for label 'configname' which will be added to all metrics.
+      '';
+    };
+    server = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Hostname of the server that should be probed.
+      '';
+    };
+    port = mkOption {
+      type = types.port;
+      example = 587;
+      description = lib.mdDoc ''
+        Port to use for SMTP.
+      '';
+    };
+    from = mkOption {
+      type = types.str;
+      example = "exporteruser@domain.tld";
+      description = lib.mdDoc ''
+        Content of 'From' Header for probing mails.
+      '';
+    };
+    to = mkOption {
+      type = types.str;
+      example = "exporteruser@domain.tld";
+      description = lib.mdDoc ''
+        Content of 'To' Header for probing mails.
+      '';
+    };
+    detectionDir = mkOption {
+      type = types.path;
+      example = "/var/spool/mail/exporteruser/new";
+      description = lib.mdDoc ''
+        Directory in which new mails for the exporter user are placed.
+        Note that this needs to exist when the exporter starts.
+      '';
+    };
+    login = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "exporteruser@domain.tld";
+      description = lib.mdDoc ''
+        Username to use for SMTP authentication.
+      '';
+    };
+    passphrase = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Password to use for SMTP authentication.
+      '';
+    };
+  };
+
+  exporterOptions.options = {
+    monitoringInterval = mkOption {
+      type = types.str;
+      example = "10s";
+      description = lib.mdDoc ''
+        Time interval between two probe attempts.
+      '';
+    };
+    mailCheckTimeout = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Timeout until mails are considered "didn't make it".
+      '';
+    };
+    disableFileDeletion = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Disables the exporter's function to delete probing mails.
+      '';
+    };
+    servers = mkOption {
+      type = types.listOf (types.submodule serverOptions);
+      default = [];
+      example = literalExpression ''
+        [ {
+          name = "testserver";
+          server = "smtp.domain.tld";
+          port = 587;
+          from = "exporteruser@domain.tld";
+          to = "exporteruser@domain.tld";
+          detectionDir = "/path/to/Maildir/new";
+        } ]
+      '';
+      description = lib.mdDoc ''
+        List of servers that should be probed.
+
+        *Note:* if your mailserver has {manpage}`rspamd(8)` configured,
+        it can happen that emails from this exporter are marked as spam.
+
+        It's possible to work around the issue with a config like this:
+        ```
+        {
+          services.rspamd.locals."multimap.conf".text = '''
+            ALLOWLIST_PROMETHEUS {
+              filter = "email:domain:tld";
+              type = "from";
+              map = "''${pkgs.writeText "allowmap" "domain.tld"}";
+              score = -100.0;
+            }
+          ''';
+        }
+        ```
+      '';
+    };
+  };
+in
+{
+  port = 9225;
+  extraOpts = {
+    environmentFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        File containing env-vars to be substituted into the exporter's config.
+      '';
+    };
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify the mailexporter configuration file to use.
+      '';
+    };
+    configuration = mkOption {
+      type = types.nullOr (types.submodule exporterOptions);
+      default = null;
+      description = lib.mdDoc ''
+        Specify the mailexporter configuration file to use.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      RuntimeDirectory = "prometheus-mail-exporter";
+      ExecStartPre = [
+        "${pkgs.writeShellScript "subst-secrets-mail-exporter" ''
+          umask 0077
+          ${pkgs.envsubst}/bin/envsubst -i ${configFile} -o ''${RUNTIME_DIRECTORY}/mail-exporter.json
+        ''}"
+      ];
+      ExecStart = ''
+        ${pkgs.prometheus-mail-exporter}/bin/mailexporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --config.file ''${RUNTIME_DIRECTORY}/mail-exporter.json \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
new file mode 100644
index 000000000000..54dab4b5581a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mikrotik.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.mikrotik;
+in
+{
+  port = 9436;
+  extraOpts = {
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a mikrotik exporter configuration file. Mutually exclusive with
+        {option}`configuration` option.
+      '';
+      example = literalExpression "./mikrotik.yml";
+    };
+
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = null;
+      description = lib.mdDoc ''
+        Mikrotik exporter configuration as nix attribute set. Mutually exclusive with
+        {option}`configFile` option.
+
+        See <https://github.com/nshttpd/mikrotik-exporter/blob/master/README.md>
+        for the description of the configuration file format.
+      '';
+      example = literalExpression ''
+        {
+          devices = [
+            {
+              name = "my_router";
+              address = "10.10.0.1";
+              user = "prometheus";
+              password = "changeme";
+            }
+          ];
+          features = {
+            bgp = true;
+            dhcp = true;
+            routes = true;
+            optics = true;
+          };
+        }
+      '';
+    };
+  };
+  serviceOpts = let
+    configFile = if cfg.configFile != null
+                 then cfg.configFile
+                 else "${pkgs.writeText "mikrotik-exporter.yml" (builtins.toJSON cfg.configuration)}";
+    in {
+    serviceConfig = {
+      # -port is misleading name, it actually accepts address too
+      ExecStart = ''
+        ${pkgs.prometheus-mikrotik-exporter}/bin/mikrotik-exporter \
+          -config-file=${escapeShellArg configFile} \
+          -port=${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
new file mode 100644
index 000000000000..82cc3fc314f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/minio.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.minio;
+in
+{
+  port = 9290;
+  extraOpts = {
+    minioAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:9000";
+      description = lib.mdDoc ''
+        The URL of the minio server.
+        Use HTTPS if Minio accepts secure connections only.
+        By default this connects to the local minio server if enabled.
+      '';
+    };
+
+    minioAccessKey = mkOption {
+      type = types.str;
+      example = "yourMinioAccessKey";
+      description = lib.mdDoc ''
+        The value of the Minio access key.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and `config.services.minio.accessKey`.
+      '';
+    };
+
+    minioAccessSecret = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The value of the Minio access secret.
+        It is required in order to connect to the server.
+        By default this uses the one from the local minio server if enabled
+        and `config.services.minio.secretKey`.
+      '';
+    };
+
+    minioBucketStats = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Collect statistics about the buckets and files in buckets.
+        It requires more computation, use it carefully in case of large buckets..
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-minio-exporter}/bin/minio-exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -minio.server ${cfg.minioAddress} \
+          -minio.access-key ${escapeShellArg cfg.minioAccessKey} \
+          -minio.access-secret ${escapeShellArg cfg.minioAccessSecret} \
+          ${optionalString cfg.minioBucketStats "-minio.bucket-stats"} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix
new file mode 100644
index 000000000000..222ea3e5384f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/modemmanager.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.modemmanager;
+in
+{
+  port = 9539;
+  extraOpts = {
+    refreshRate = mkOption {
+      type = types.str;
+      default = "5s";
+      description = lib.mdDoc ''
+        How frequently ModemManager will refresh the extended signal quality
+        information for each modem. The duration should be specified in seconds
+        ("5s"), minutes ("1m"), or hours ("1h").
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      # Required in order to authenticate with ModemManager via D-Bus.
+      SupplementaryGroups = "networkmanager";
+      ExecStart = ''
+        ${pkgs.prometheus-modemmanager-exporter}/bin/modemmanager_exporter \
+          -addr ${cfg.listenAddress}:${toString cfg.port} \
+          -rate ${cfg.refreshRate} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
new file mode 100644
index 000000000000..849c514de681
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/mysqld.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, options }:
+let
+  cfg = config.services.prometheus.exporters.mysqld;
+  inherit (lib) types mkOption mdDoc mkIf mkForce cli concatStringsSep optionalString escapeShellArgs;
+in {
+  port = 9104;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    runAsLocalSuperUser = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Whether to run the exporter as {option}`services.mysql.user`.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = "/var/lib/prometheus-mysqld-exporter.cnf";
+      description = mdDoc ''
+        Path to the services config file.
+
+        See <https://github.com/prometheus/mysqld_exporter#running> for more information about
+        the available options.
+
+        ::: {.warn}
+        Please do not store this file in the nix store if you choose to include any credentials here,
+        as it would be world-readable.
+        :::
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = !cfg.runAsLocalSuperUser;
+      User = mkIf cfg.runAsLocalSuperUser (mkForce config.services.mysql.user);
+      LoadCredential = mkIf (cfg.configFile != null) (mkForce ("config:" + cfg.configFile));
+      ExecStart = concatStringsSep " " [
+        "${pkgs.prometheus-mysqld-exporter}/bin/mysqld_exporter"
+        "--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
+        "--web.telemetry-path=${cfg.telemetryPath}"
+        (optionalString (cfg.configFile != null) ''--config.my-cnf=''${CREDENTIALS_DIRECTORY}/config'')
+        (escapeShellArgs cfg.extraFlags)
+      ];
+      RestrictAddressFamilies = [
+        # The exporter can be configured to talk to a local mysql server via a unix socket.
+        "AF_UNIX"
+      ];
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
new file mode 100644
index 000000000000..28a3eb6a134c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nextcloud.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nextcloud;
+in
+{
+  port = 9205;
+  extraOpts = {
+    url = mkOption {
+      type = types.str;
+      example = "https://domain.tld";
+      description = lib.mdDoc ''
+        URL to the Nextcloud serverinfo page.
+        Adding the path to the serverinfo API is optional, it defaults
+        to `/ocs/v2.php/apps/serverinfo/api/v1/info`.
+      '';
+    };
+    username = mkOption {
+      type = types.str;
+      default = "nextcloud-exporter";
+      description = lib.mdDoc ''
+        Username for connecting to Nextcloud.
+        Note that this account needs to have admin privileges in Nextcloud.
+        Unused when using token authentication.
+      '';
+    };
+    passwordFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/path/to/password-file";
+      description = lib.mdDoc ''
+        File containing the password for connecting to Nextcloud.
+        Make sure that this file is readable by the exporter user.
+      '';
+    };
+    tokenFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/path/to/token-file";
+      description = lib.mdDoc ''
+        File containing the token for connecting to Nextcloud.
+        Make sure that this file is readable by the exporter user.
+      '';
+    };
+    timeout = mkOption {
+      type = types.str;
+      default = "5s";
+      description = lib.mdDoc ''
+        Timeout for getting server info document.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-nextcloud-exporter}/bin/nextcloud-exporter \
+          --addr ${cfg.listenAddress}:${toString cfg.port} \
+          --timeout ${cfg.timeout} \
+          --server ${cfg.url} \
+          ${if cfg.passwordFile != null then ''
+            --username ${cfg.username} \
+            --password ${escapeShellArg "@${cfg.passwordFile}"} \
+          '' else ''
+            --auth-token ${escapeShellArg "@${cfg.tokenFile}"} \
+          ''} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}'';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
new file mode 100644
index 000000000000..3158e71f0468
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginx.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginx;
+in
+{
+  port = 9113;
+  extraOpts = {
+    scrapeUri = mkOption {
+      type = types.str;
+      default = "http://localhost/nginx_status";
+      description = lib.mdDoc ''
+        Address to access the nginx status page.
+        Can be enabled with services.nginx.statusPage = true.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+    sslVerify = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to perform certificate verification for https.
+      '';
+    };
+    constLabels = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "label1=value1"
+        "label2=value2"
+      ];
+      description = lib.mdDoc ''
+        A list of constant labels that will be used in every metric.
+      '';
+    };
+  };
+  serviceOpts = mkMerge ([{
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-nginx-exporter}/bin/nginx-prometheus-exporter \
+          --nginx.scrape-uri='${cfg.scrapeUri}' \
+          --nginx.ssl-verify=${boolToString cfg.sslVerify} \
+          --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path=${cfg.telemetryPath} \
+          --prometheus.const-labels=${concatStringsSep "," cfg.constLabels} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  }] ++ [(mkIf config.services.nginx.enable {
+    after = [ "nginx.service" ];
+    requires = [ "nginx.service" ];
+  })]);
+  imports = [
+    (mkRenamedOptionModule [ "telemetryEndpoint" ] [ "telemetryPath" ])
+    (mkRemovedOptionModule [ "insecure" ] ''
+      This option was replaced by 'prometheus.exporters.nginx.sslVerify'.
+    '')
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix
new file mode 100644
index 000000000000..674dc9dd4158
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nginxlog.nix
@@ -0,0 +1,51 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nginxlog;
+in {
+  port = 9117;
+  extraOpts = {
+    settings = mkOption {
+      type = types.attrs;
+      default = {};
+      description = lib.mdDoc ''
+        All settings of nginxlog expressed as an Nix attrset.
+
+        Check the official documentation for the corresponding YAML
+        settings that can all be used here: https://github.com/martin-helmich/prometheus-nginxlog-exporter
+
+        The `listen` object is already generated by `port`, `listenAddress` and `metricsEndpoint` and
+        will be merged with the value of `settings` before writing it as JSON.
+      '';
+    };
+
+    metricsEndpoint = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+
+  serviceOpts = let
+    listenConfig = {
+      listen = {
+        port = cfg.port;
+        address = cfg.listenAddress;
+        metrics_endpoint = cfg.metricsEndpoint;
+      };
+    };
+    completeConfig = pkgs.writeText "nginxlog-exporter.yaml" (builtins.toJSON (lib.recursiveUpdate listenConfig cfg.settings));
+  in {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-nginxlog-exporter}/bin/prometheus-nginxlog-exporter -config-file ${completeConfig}
+      '';
+      Restart="always";
+      ProtectSystem="full";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix
new file mode 100644
index 000000000000..dd8602e2c63d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/node.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.node;
+  collectorIsEnabled = final: any (collector: (final == collector)) cfg.enabledCollectors;
+  collectorIsDisabled = final: any (collector: (final == collector)) cfg.disabledCollectors;
+in
+{
+  port = 9100;
+  extraOpts = {
+    enabledCollectors = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "systemd" ];
+      description = lib.mdDoc ''
+        Collectors to enable. The collectors listed here are enabled in addition to the default ones.
+      '';
+    };
+    disabledCollectors = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "timex" ];
+      description = lib.mdDoc ''
+        Collectors to disable which are enabled by default.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      RuntimeDirectory = "prometheus-node-exporter";
+      ExecStart = ''
+        ${pkgs.prometheus-node-exporter}/bin/node_exporter \
+          ${concatMapStringsSep " " (x: "--collector." + x) cfg.enabledCollectors} \
+          ${concatMapStringsSep " " (x: "--no-collector." + x) cfg.disabledCollectors} \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = optionals (collectorIsEnabled "logind" || collectorIsEnabled "systemd") [
+        # needs access to dbus via unix sockets (logind/systemd)
+        "AF_UNIX"
+      ] ++ optionals (collectorIsEnabled "network_route" || collectorIsEnabled "wifi" || ! collectorIsDisabled "netdev") [
+        # needs netlink sockets for wireless collector
+        "AF_NETLINK"
+      ];
+      # The timex collector needs to access clock APIs
+      ProtectClock = collectorIsDisabled "timex";
+      # Allow space monitoring under /home
+      ProtectHome = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nut.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nut.nix
new file mode 100644
index 000000000000..1c86b48b4509
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/nut.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.nut;
+in
+{
+  port = 9199;
+  extraOpts = {
+    nutServer = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        Hostname or address of the NUT server
+      '';
+    };
+    nutUser = mkOption {
+      type = types.str;
+      default = "";
+      example = "nut";
+      description = lib.mdDoc ''
+        The user to log in into NUT server. If set, passwordPath should
+        also be set.
+
+        Default NUT configs usually permit reading variables without
+        authentication.
+      '';
+    };
+    passwordPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      apply = final: if final == null then null else toString final;
+      description = lib.mdDoc ''
+        A run-time path to the nutUser password file, which should be
+        provisioned outside of Nix store.
+      '';
+    };
+  };
+  serviceOpts = {
+    script = ''
+      ${optionalString (cfg.passwordPath != null)
+      "export NUT_EXPORTER_PASSWORD=$(cat ${toString cfg.passwordPath})"}
+      ${pkgs.prometheus-nut-exporter}/bin/nut_exporter \
+        --nut.server=${cfg.nutServer} \
+        --web.listen-address="${cfg.listenAddress}:${toString cfg.port}" \
+        ${optionalString (cfg.nutUser != "") "--nut.username=${cfg.nutUser}"}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix
new file mode 100644
index 000000000000..aee3ae5bb2d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openldap.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.openldap;
+in {
+  port = 9330;
+  extraOpts = {
+    ldapCredentialFile = mkOption {
+      type = types.path;
+      example = "/run/keys/ldap_pass";
+      description = lib.mdDoc ''
+        Environment file to contain the credentials to authenticate against
+        `openldap`.
+
+        The file should look like this:
+        ```
+        ---
+        ldapUser: "cn=monitoring,cn=Monitor"
+        ldapPass: "secret"
+        ```
+      '';
+    };
+    protocol = mkOption {
+      default = "tcp";
+      example = "udp";
+      type = types.str;
+      description = lib.mdDoc ''
+        Which protocol to use to connect against `openldap`.
+      '';
+    };
+    ldapAddr = mkOption {
+      default = "localhost:389";
+      type = types.str;
+      description = lib.mdDoc ''
+        Address of the `openldap`-instance.
+      '';
+    };
+    metricsPath = mkOption {
+      default = "/metrics";
+      type = types.str;
+      description = lib.mdDoc ''
+        URL path where metrics should be exposed.
+      '';
+    };
+    interval = mkOption {
+      default = "30s";
+      type = types.str;
+      example = "1m";
+      description = lib.mdDoc ''
+        Scrape interval of the exporter.
+      '';
+    };
+  };
+  serviceOpts.serviceConfig = {
+    ExecStart = ''
+      ${pkgs.prometheus-openldap-exporter}/bin/openldap_exporter \
+        --promAddr ${cfg.listenAddress}:${toString cfg.port} \
+        --metrPath ${cfg.metricsPath} \
+        --ldapNet ${cfg.protocol} \
+        --interval ${cfg.interval} \
+        --config ${cfg.ldapCredentialFile} \
+        ${concatStringsSep " \\\n  " cfg.extraFlags}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openvpn.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openvpn.nix
new file mode 100644
index 000000000000..5b54dad99805
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/openvpn.nix
@@ -0,0 +1,39 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.openvpn;
+in {
+  port = 9176;
+  extraOpts = {
+    statusPaths = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        Paths to OpenVPN status files. Please configure the OpenVPN option
+        `status` accordingly.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      PrivateDevices = true;
+      ProtectKernelModules = true;
+      NoNewPrivileges = true;
+      ExecStart = ''
+        ${pkgs.prometheus-openvpn-exporter}/bin/openvpn_exporter \
+          -openvpn.status_paths "${concatStringsSep "," cfg.statusPaths}" \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -web.telemetry-path ${cfg.telemetryPath}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix
new file mode 100644
index 000000000000..9e55cadae523
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pgbouncer.nix
@@ -0,0 +1,145 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.pgbouncer;
+in
+{
+  port = 9127;
+  extraOpts = {
+
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    connectionString = mkOption {
+      type = types.str;
+      default = "";
+      example = "postgres://admin:@localhost:6432/pgbouncer?sslmode=require";
+      description = lib.mdDoc ''
+        Connection string for accessing pgBouncer.
+
+        NOTE: You MUST keep pgbouncer as database name (special internal db)!!!
+
+        NOTE: Admin user (with password or passwordless) MUST exist
+        in the services.pgbouncer.authFile if authType other than any is used.
+
+        WARNING: this secret is stored in the world-readable Nix store!
+        Use {option}`connectionStringFile` instead.
+      '';
+    };
+
+    connectionStringFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/keys/pgBouncer-connection-string";
+      description = lib.mdDoc ''
+        File that contains pgBouncer connection string in format:
+        postgres://admin:@localhost:6432/pgbouncer?sslmode=require
+
+        NOTE: You MUST keep pgbouncer as database name (special internal db)!!!
+
+        NOTE: Admin user (with password or passwordless) MUST exist
+        in the services.pgbouncer.authFile if authType other than any is used.
+
+        {option}`connectionStringFile` takes precedence over {option}`connectionString`
+      '';
+    };
+
+    pidFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Path to PgBouncer pid file.
+
+        If provided, the standard process metrics get exported for the PgBouncer
+        process, prefixed with 'pgbouncer_process_...'. The pgbouncer_process exporter
+        needs to have read access to files owned by the PgBouncer process. Depends on
+        the availability of /proc.
+
+        https://prometheus.io/docs/instrumenting/writing_clientlibs/#process-metrics.
+
+      '';
+    };
+
+    webSystemdSocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Use systemd socket activation listeners instead of port listeners (Linux only).
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error" ];
+      default = "info";
+      description = lib.mdDoc ''
+        Only log messages with the given severity or above.
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.enum ["logfmt" "json"];
+      default = "logfmt";
+      description = lib.mdDoc ''
+        Output format of log messages. One of: [logfmt, json]
+      '';
+    };
+
+    webConfigFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to configuration file that can enable TLS or authentication.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra commandline options when launching Prometheus.
+      '';
+    };
+
+  };
+
+  serviceOpts = {
+    after = [ "pgbouncer.service" ];
+      serviceConfig = let
+      startScript = pkgs.writeShellScriptBin "pgbouncer-start" "${concatStringsSep " " ([
+            "${pkgs.prometheus-pgbouncer-exporter}/bin/pgbouncer_exporter"
+            "--web.listen-address ${cfg.listenAddress}:${toString cfg.port}"
+            "--pgBouncer.connectionString ${if cfg.connectionStringFile != null then
+            "$(head -n1 ${cfg.connectionStringFile})" else "${escapeShellArg cfg.connectionString}"}"
+          ]
+            ++ optionals (cfg.telemetryPath != null) [
+            "--web.telemetry-path ${escapeShellArg cfg.telemetryPath}"
+          ]
+            ++ optionals (cfg.pidFile != null) [
+            "--pgBouncer.pid-file= ${escapeShellArg cfg.pidFile}"
+          ]
+            ++ optionals (cfg.logLevel != null) [
+            "--log.level ${escapeShellArg cfg.logLevel}"
+          ]
+            ++ optionals (cfg.logFormat != null) [
+            "--log.format ${escapeShellArg cfg.logFormat}"
+          ]
+            ++ optionals (cfg.webSystemdSocket != false) [
+            "--web.systemd-socket ${escapeShellArg cfg.webSystemdSocket}"
+          ]
+            ++ optionals (cfg.webConfigFile != null) [
+            "--web.config.file ${escapeShellArg cfg.webConfigFile}"
+          ]
+            ++ cfg.extraFlags)}";
+      in
+      {
+        ExecStart = "${startScript}/bin/pgbouncer-start";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix
new file mode 100644
index 000000000000..8f6942002f79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/php-fpm.nix
@@ -0,0 +1,65 @@
+{ config
+, lib
+, pkgs
+, options
+}:
+
+let
+  logPrefix = "services.prometheus.exporter.php-fpm";
+  cfg = config.services.prometheus.exporters.php-fpm;
+in {
+  port = 9253;
+  extraOpts = {
+    package = lib.mkPackageOptionMD pkgs "prometheus-php-fpm-exporter" {};
+
+    telemetryPath = lib.mkOption {
+      type = lib.types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    environmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/root/prometheus-php-fpm-exporter.env";
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+
+        Secrets may be passed to the service without adding them to the
+        world-readable Nix store, by specifying placeholder variables as
+        the option value in Nix and setting these variables accordingly in the
+        environment file.
+
+        Environment variables from this file will be interpolated into the
+        config file using envsubst with this syntax:
+        `$ENVIRONMENT ''${VARIABLE}`
+
+        For variables to use see [options and defaults](https://github.com/hipages/php-fpm_exporter#options-and-defaults).
+
+        The main use is to set the PHP_FPM_SCRAPE_URI that indicate how to connect to PHP-FPM process.
+
+        ```
+          # Content of the environment file
+          PHP_FPM_SCRAPE_URI="unix:///tmp/php.sock;/status"
+        ```
+
+        Note that this file needs to be available on the host on which
+        this exporter is running.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      ExecStart = ''
+        ${lib.getExe cfg.package} server \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${lib.concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix
new file mode 100644
index 000000000000..6f403b3e58c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pihole.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.pihole;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "interval"] "This option has been removed.")
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+
+  port = 9617;
+  extraOpts = {
+    apiToken = mkOption {
+      type = types.str;
+      default = "";
+      example = "580a770cb40511eb85290242ac130003580a770cb40511eb85290242ac130003";
+      description = lib.mdDoc ''
+        Pi-Hole API token which can be used instead of a password
+      '';
+    };
+    password = mkOption {
+      type = types.str;
+      default = "";
+      example = "password";
+      description = lib.mdDoc ''
+        The password to login into Pi-Hole. An api token can be used instead.
+      '';
+    };
+    piholeHostname = mkOption {
+      type = types.str;
+      default = "pihole";
+      example = "127.0.0.1";
+      description = lib.mdDoc ''
+        Hostname or address where to find the Pi-Hole webinterface
+      '';
+    };
+    piholePort = mkOption {
+      type = types.port;
+      default = 80;
+      example = 443;
+      description = lib.mdDoc ''
+        The port Pi-Hole webinterface is reachable on
+      '';
+    };
+    protocol = mkOption {
+      type = types.enum [ "http" "https" ];
+      default = "http";
+      example = "https";
+      description = lib.mdDoc ''
+        The protocol which is used to connect to Pi-Hole
+      '';
+    };
+    timeout = mkOption {
+      type = types.str;
+      default = "5s";
+      description = lib.mdDoc ''
+        Controls the timeout to connect to a Pi-Hole instance
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-pihole-exporter}/bin/pihole-exporter \
+          ${optionalString (cfg.apiToken != "") "-pihole_api_token ${cfg.apiToken}"} \
+          -pihole_hostname ${cfg.piholeHostname} \
+          ${optionalString (cfg.password != "") "-pihole_password ${cfg.password}"} \
+          -pihole_port ${toString cfg.piholePort} \
+          -pihole_protocol ${cfg.protocol} \
+          -port ${toString cfg.port} \
+          -timeout ${cfg.timeout}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
new file mode 100644
index 000000000000..9f402b123110
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postfix.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.postfix;
+in
+{
+  port = 9154;
+  extraOpts = {
+    group = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Group under which the postfix exporter shall be run.
+        It should match the group that is allowed to access the
+        `showq` socket in the `queue/public/` directory.
+        Defaults to `services.postfix.setgidGroup` when postfix is enabled.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+    logfilePath = mkOption {
+      type = types.path;
+      default = "/var/log/postfix_exporter_input.log";
+      example = "/var/log/mail.log";
+      description = lib.mdDoc ''
+        Path where Postfix writes log entries.
+        This file will be truncated by this exporter!
+      '';
+    };
+    showqPath = mkOption {
+      type = types.path;
+      default = "/var/lib/postfix/queue/public/showq";
+      example = "/var/spool/postfix/public/showq";
+      description = lib.mdDoc ''
+        Path where Postfix places its showq socket.
+      '';
+    };
+    systemd = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable reading metrics from the systemd journal instead of from a logfile
+        '';
+      };
+      unit = mkOption {
+        type = types.str;
+        default = "postfix.service";
+        description = lib.mdDoc ''
+          Name of the postfix systemd unit.
+        '';
+      };
+      slice = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Name of the postfix systemd slice.
+          This overrides the {option}`systemd.unit`.
+        '';
+      };
+      journalPath = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the systemd journal.
+        '';
+      };
+    };
+  };
+  serviceOpts = {
+    after = mkIf cfg.systemd.enable [ cfg.systemd.unit ];
+    serviceConfig = {
+      DynamicUser = false;
+      # By default, each prometheus exporter only gets AF_INET & AF_INET6,
+      # but AF_UNIX is needed to read from the `showq`-socket.
+      RestrictAddressFamilies = [ "AF_UNIX" ];
+      SupplementaryGroups = mkIf cfg.systemd.enable [ "systemd-journal" ];
+      ExecStart = ''
+        ${pkgs.prometheus-postfix-exporter}/bin/postfix_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --postfix.showq_path ${escapeShellArg cfg.showqPath} \
+          ${concatStringsSep " \\\n  " (cfg.extraFlags
+          ++ optional cfg.systemd.enable "--systemd.enable"
+          ++ optional cfg.systemd.enable (if cfg.systemd.slice != null
+                                          then "--systemd.slice ${cfg.systemd.slice}"
+                                          else "--systemd.unit ${cfg.systemd.unit}")
+          ++ optional (cfg.systemd.enable && (cfg.systemd.journalPath != null))
+                       "--systemd.journal_path ${escapeShellArg cfg.systemd.journalPath}"
+          ++ optional (!cfg.systemd.enable) "--postfix.logfile_path ${escapeShellArg cfg.logfilePath}")}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
new file mode 100644
index 000000000000..755d771ecdff
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/postgres.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.postgres;
+in
+{
+  port = 9187;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+    dataSourceName = mkOption {
+      type = types.str;
+      default = "user=postgres database=postgres host=/run/postgresql sslmode=disable";
+      example = "postgresql://username:password@localhost:5432/postgres?sslmode=disable";
+      description = lib.mdDoc ''
+        Accepts PostgreSQL URI form and key=value form arguments.
+      '';
+    };
+    runAsLocalSuperUser = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to run the exporter as the local 'postgres' super user.
+      '';
+    };
+
+    # TODO perhaps LoadCredential would be more appropriate
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/root/prometheus-postgres-exporter.env";
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+
+        Secrets may be passed to the service without adding them to the
+        world-readable Nix store, by specifying placeholder variables as
+        the option value in Nix and setting these variables accordingly in the
+        environment file.
+
+        Environment variables from this file will be interpolated into the
+        config file using envsubst with this syntax:
+        `$ENVIRONMENT ''${VARIABLE}`
+
+        The main use is to set the DATA_SOURCE_NAME that contains the
+        postgres password
+
+        note that contents from this file will override dataSourceName
+        if you have set it from nix.
+
+        ```
+          # Content of the environment file
+          DATA_SOURCE_NAME=postgresql://username:password@localhost:5432/postgres?sslmode=disable
+        ```
+
+        Note that this file needs to be available on the host on which
+        this exporter is running.
+      '';
+    };
+
+  };
+  serviceOpts = {
+    environment.DATA_SOURCE_NAME = cfg.dataSourceName;
+    serviceConfig = {
+      DynamicUser = false;
+      User = mkIf cfg.runAsLocalSuperUser (mkForce "postgres");
+      EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      ExecStart = ''
+        ${pkgs.prometheus-postgres-exporter}/bin/postgres_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/process.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/process.nix
new file mode 100644
index 000000000000..278d6cd78074
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/process.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.process;
+  configFile = pkgs.writeText "process-exporter.yaml" (builtins.toJSON cfg.settings);
+in
+{
+  port = 9256;
+  extraOpts = {
+    settings.process_names = mkOption {
+      type = types.listOf types.anything;
+      default = [];
+      example = literalExpression ''
+        [
+          # Remove nix store path from process name
+          { name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
+        ]
+      '';
+      description = lib.mdDoc ''
+        All settings expressed as an Nix attrset.
+
+        Check the official documentation for the corresponding YAML
+        settings that can all be used here: <https://github.com/ncabatoff/process-exporter>
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-process-exporter}/bin/process-exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.path ${configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      NoNewPrivileges = true;
+      ProtectHome = true;
+      ProtectSystem = true;
+      ProtectKernelTunables = true;
+      ProtectKernelModules = true;
+      ProtectControlGroups = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pve.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
new file mode 100644
index 000000000000..f95412efd7dd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/pve.nix
@@ -0,0 +1,120 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+let
+  cfg = config.services.prometheus.exporters.pve;
+
+  # pve exporter requires a config file so create an empty one if configFile is not provided
+  emptyConfigFile = pkgs.writeTextFile {
+    name = "pve.yml";
+    text = "default:";
+  };
+
+  computedConfigFile = if cfg.configFile == null then emptyConfigFile else cfg.configFile;
+in
+{
+  port = 9221;
+  extraOpts = {
+    package = mkOption {
+      type = types.package;
+      default = pkgs.prometheus-pve-exporter;
+      defaultText = literalExpression "pkgs.prometheus-pve-exporter";
+      example = literalExpression "pkgs.prometheus-pve-exporter";
+      description = lib.mdDoc ''
+        The package to use for prometheus-pve-exporter
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/etc/prometheus-pve-exporter/pve.env";
+      description = lib.mdDoc ''
+        Path to the service's environment file. This path can either be a computed path in /nix/store or a path in the local filesystem.
+
+        The environment file should NOT be stored in /nix/store as it contains passwords and/or keys in plain text.
+
+        Environment reference: https://github.com/prometheus-pve/prometheus-pve-exporter#authentication
+      '';
+    };
+
+    configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/etc/prometheus-pve-exporter/pve.yml";
+      description = lib.mdDoc ''
+        Path to the service's config file. This path can either be a computed path in /nix/store or a path in the local filesystem.
+
+        The config file should NOT be stored in /nix/store as it will contain passwords and/or keys in plain text.
+
+        If both configFile and environmentFile are provided, the configFile option will be ignored.
+
+        Configuration reference: https://github.com/prometheus-pve/prometheus-pve-exporter/#authentication
+      '';
+    };
+
+    collectors = {
+      status = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect Node/VM/CT status
+        '';
+      };
+      version = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect PVE version info
+        '';
+      };
+      node = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect PVE node info
+        '';
+      };
+      cluster = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect PVE cluster info
+        '';
+      };
+      resources = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect PVE resources info
+        '';
+      };
+      config = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Collect PVE onboot status
+        '';
+      };
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = cfg.environmentFile == null;
+      LoadCredential = "configFile:${computedConfigFile}";
+      ExecStart = ''
+        ${cfg.package}/bin/pve_exporter \
+          --${optionalString (!cfg.collectors.status) "no-"}collector.status \
+          --${optionalString (!cfg.collectors.version) "no-"}collector.version \
+          --${optionalString (!cfg.collectors.node) "no-"}collector.node \
+          --${optionalString (!cfg.collectors.cluster) "no-"}collector.cluster \
+          --${optionalString (!cfg.collectors.resources) "no-"}collector.resources \
+          --${optionalString (!cfg.collectors.config) "no-"}collector.config \
+          %d/configFile \
+          ${toString cfg.port} ${cfg.listenAddress}
+      '';
+    } // optionalAttrs (cfg.environmentFile != null) {
+      EnvironmentFile = cfg.environmentFile;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix
new file mode 100644
index 000000000000..f03b3c4df916
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/py-air-control.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.py-air-control;
+
+  workingDir = "/var/lib/${cfg.stateDir}";
+
+in
+{
+  port = 9896;
+  extraOpts = {
+    deviceHostname = mkOption {
+      type = types.str;
+      example = "192.168.1.123";
+      description = lib.mdDoc ''
+        The hostname of the air purification device from which to scrape the metrics.
+      '';
+    };
+    protocol = mkOption {
+      type = types.str;
+      default = "http";
+      description = lib.mdDoc ''
+        The protocol to use when communicating with the air purification device.
+        Available: [http, coap, plain_coap]
+      '';
+    };
+    stateDir = mkOption {
+      type = types.str;
+      default = "prometheus-py-air-control-exporter";
+      description = lib.mdDoc ''
+        Directory below `/var/lib` to store runtime data.
+        This directory will be created automatically using systemd's StateDirectory mechanism.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      DynamicUser = false;
+      StateDirectory = cfg.stateDir;
+      WorkingDirectory = workingDir;
+      ExecStart = ''
+        ${pkgs.python3Packages.py-air-control-exporter}/bin/py-air-control-exporter \
+          --host ${cfg.deviceHostname} \
+          --protocol ${cfg.protocol} \
+          --listen-port ${toString cfg.port} \
+          --listen-address ${cfg.listenAddress}
+      '';
+      Environment = [ "HOME=${workingDir}" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/redis.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/redis.nix
new file mode 100644
index 000000000000..befbcb21f766
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/redis.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.redis;
+in
+{
+  port = 9121;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-redis-exporter}/bin/redis_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
new file mode 100644
index 000000000000..f9dcfad07d30
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rspamd.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.rspamd;
+
+  mkFile = conf:
+    pkgs.writeText "rspamd-exporter-config.yml" (builtins.toJSON conf);
+
+  generateConfig = extraLabels: {
+    modules.default.metrics = (map (path: {
+      name = "rspamd_${replaceStrings [ "[" "." " " "]" "\\" "'" ] [ "_" "_" "_" "" "" "" ] path}";
+      path = "{ .${path} }";
+      labels = extraLabels;
+    }) [
+      "actions['add\\ header']"
+      "actions['no\\ action']"
+      "actions['rewrite\\ subject']"
+      "actions['soft\\ reject']"
+      "actions.greylist"
+      "actions.reject"
+      "bytes_allocated"
+      "chunks_allocated"
+      "chunks_freed"
+      "chunks_oversized"
+      "connections"
+      "control_connections"
+      "ham_count"
+      "learned"
+      "pools_allocated"
+      "pools_freed"
+      "read_only"
+      "scanned"
+      "shared_chunks_allocated"
+      "spam_count"
+      "total_learns"
+    ]) ++ [{
+      name = "rspamd_statfiles";
+      type = "object";
+      path = "{.statfiles[*]}";
+      labels = recursiveUpdate {
+        symbol = "{.symbol}";
+        type = "{.type}";
+      } extraLabels;
+      values = {
+        revision = "{.revision}";
+        size = "{.size}";
+        total = "{.total}";
+        used = "{.used}";
+        languages = "{.languages}";
+        users = "{.users}";
+      };
+    }];
+  };
+in
+{
+  port = 7980;
+  extraOpts = {
+    extraLabels = mkOption {
+      type = types.attrsOf types.str;
+      default = {
+        host = config.networking.hostName;
+      };
+      defaultText = literalExpression "{ host = config.networking.hostName; }";
+      example = literalExpression ''
+        {
+          host = config.networking.hostName;
+          custom_label = "some_value";
+        }
+      '';
+      description = lib.mdDoc "Set of labels added to each metric.";
+    };
+  };
+  serviceOpts.serviceConfig.ExecStart = ''
+    ${pkgs.prometheus-json-exporter}/bin/json_exporter \
+      --config.file ${mkFile (generateConfig cfg.extraLabels)} \
+      --web.listen-address "${cfg.listenAddress}:${toString cfg.port}" \
+      ${concatStringsSep " \\\n  " cfg.extraFlags}
+  '';
+
+  imports = [
+    (mkRemovedOptionModule [ "url" ] ''
+      This option was removed. The URL of the rspamd metrics endpoint
+      must now be provided to the exporter by prometheus via the url
+      parameter `target'.
+
+      In prometheus a scrape URL would look like this:
+
+        http://some.rspamd-exporter.host:7980/probe?target=http://some.rspamd.host:11334/stat
+
+      For more information, take a look at the official documentation
+      (https://github.com/prometheus-community/json_exporter) of the json_exporter.
+    '')
+     ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix
new file mode 100644
index 000000000000..1f7235cb7830
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/rtl_433.nix
@@ -0,0 +1,83 @@
+{ config, lib, pkgs, options }:
+
+let
+  cfg = config.services.prometheus.exporters.rtl_433;
+in
+{
+  port = 9550;
+
+  extraOpts = let
+    mkMatcherOptionType = field: description: with lib.types;
+      listOf (submodule {
+        options = {
+          name = lib.mkOption {
+            type = str;
+            description = lib.mdDoc "Name to match.";
+          };
+          "${field}" = lib.mkOption {
+            type = int;
+            description = lib.mdDoc description;
+          };
+          location = lib.mkOption {
+            type = str;
+            description = lib.mdDoc "Location to match.";
+          };
+        };
+      });
+  in
+  {
+    rtl433Flags = lib.mkOption {
+      type = lib.types.str;
+      default = "-C si";
+      example = "-C si -R 19";
+      description = lib.mdDoc ''
+        Flags passed verbatim to rtl_433 binary.
+        Having `-C si` (the default) is recommended since only Celsius temperatures are parsed.
+      '';
+    };
+    channels = lib.mkOption {
+      type = mkMatcherOptionType "channel" "Channel to match.";
+      default = [];
+      example = [
+        { name = "Acurite"; channel = 6543; location = "Kitchen"; }
+      ];
+      description = lib.mdDoc ''
+        List of channel matchers to export.
+      '';
+    };
+    ids = lib.mkOption {
+      type = mkMatcherOptionType "id" "ID to match.";
+      default = [];
+      example = [
+        { name = "Nexus"; id = 1; location = "Bedroom"; }
+      ];
+      description = lib.mdDoc ''
+        List of ID matchers to export.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      # rtl-sdr udev rules make supported USB devices +rw by plugdev.
+      SupplementaryGroups = "plugdev";
+      # rtl_433 needs rw access to the USB radio.
+      PrivateDevices = lib.mkForce false;
+      DeviceAllow = lib.mkForce "char-usb_device rw";
+      RestrictAddressFamilies = [ "AF_NETLINK" ];
+
+      ExecStart = let
+        matchers = (map (m:
+          "--channel_matcher '${m.name},${toString m.channel},${m.location}'"
+        ) cfg.channels) ++ (map (m:
+          "--id_matcher '${m.name},${toString m.id},${m.location}'"
+        ) cfg.ids); in ''
+        ${pkgs.prometheus-rtl_433-exporter}/bin/rtl_433_prometheus \
+          -listen ${cfg.listenAddress}:${toString cfg.port} \
+          -subprocess "${pkgs.rtl_433}/bin/rtl_433 -F json ${cfg.rtl433Flags}" \
+          ${lib.concatStringsSep " \\\n  " matchers} \
+          ${lib.concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
new file mode 100644
index 000000000000..411277494013
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sabnzbd.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, options }:
+
+let
+  inherit (lib) mkOption types;
+  cfg = config.services.prometheus.exporters.sabnzbd;
+in
+{
+  port = 9387;
+
+  extraOpts = {
+    servers = mkOption {
+      description = "List of sabnzbd servers to connect to.";
+      type = types.listOf (types.submodule {
+        options = {
+          baseUrl = mkOption {
+            type = types.str;
+            description = "Base URL of the sabnzbd server.";
+            example = "http://localhost:8080/sabnzbd";
+          };
+          apiKeyFile = mkOption {
+            type = types.str;
+            description = "File containing the API key.";
+            example = "/run/secrets/sabnzbd_apikey";
+          };
+        };
+      });
+    };
+  };
+
+  serviceOpts =
+    let
+      servers = lib.zipAttrs cfg.servers;
+      apiKeys = lib.concatStringsSep "," (builtins.map (file: "$(cat ${file})") servers.apiKeyFile);
+    in
+    {
+      environment = {
+        METRICS_PORT = toString cfg.port;
+        METRICS_ADDR = cfg.listenAddress;
+        SABNZBD_BASEURLS = lib.concatStringsSep "," servers.baseUrl;
+      };
+
+      script = ''
+        export SABNZBD_APIKEYS="${apiKeys}"
+        exec ${lib.getExe pkgs.prometheus-sabnzbd-exporter}
+      '';
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix
new file mode 100644
index 000000000000..3b6ebf65b090
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/scaphandre.nix
@@ -0,0 +1,33 @@
+{ config
+, lib
+, pkgs
+, options
+}:
+
+let
+  logPrefix = "services.prometheus.exporter.scaphandre";
+  cfg = config.services.prometheus.exporters.scaphandre;
+in {
+  port = 8080;
+  extraOpts = {
+    telemetryPath = lib.mkOption {
+      type = lib.types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.scaphandre}/bin/scaphandre prometheus \
+          --address ${cfg.listenAddress} \
+          --port ${toString cfg.port} \
+          --suffix ${cfg.telemetryPath} \
+          ${lib.concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/script.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/script.nix
new file mode 100644
index 000000000000..eab0e1d8a6b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/script.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.script;
+  configFile = pkgs.writeText "script-exporter.yaml" (builtins.toJSON cfg.settings);
+in
+{
+  port = 9172;
+  extraOpts = {
+    settings.scripts = mkOption {
+      type = with types; listOf (submodule {
+        options = {
+          name = mkOption {
+            type = str;
+            example = "sleep";
+            description = lib.mdDoc "Name of the script.";
+          };
+          script = mkOption {
+            type = str;
+            example = "sleep 5";
+            description = lib.mdDoc "Shell script to execute when metrics are requested.";
+          };
+          timeout = mkOption {
+            type = nullOr int;
+            default = null;
+            example = 60;
+            description = lib.mdDoc "Optional timeout for the script in seconds.";
+          };
+        };
+      });
+      example = literalExpression ''
+        {
+          scripts = [
+            { name = "sleep"; script = "sleep 5"; }
+          ];
+        }
+      '';
+      description = lib.mdDoc ''
+        All settings expressed as an Nix attrset.
+
+        Check the official documentation for the corresponding YAML
+        settings that can all be used here: <https://github.com/adhocteam/script_exporter#sample-configuration>
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-script-exporter}/bin/script_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --config.file ${configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      NoNewPrivileges = true;
+      ProtectHome = true;
+      ProtectSystem = "strict";
+      ProtectKernelTunables = true;
+      ProtectKernelModules = true;
+      ProtectControlGroups = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix
new file mode 100644
index 000000000000..b9cfd1b1e84a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/shelly.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.shelly;
+in
+{
+  port = 9784;
+  extraOpts = {
+    metrics-file = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to the JSON file with the metric definitions
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-shelly-exporter}/bin/shelly_exporter \
+          -metrics-file ${cfg.metrics-file} \
+          -listen-address ${cfg.listenAddress}:${toString cfg.port}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix
new file mode 100644
index 000000000000..50e1321a1e9c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smartctl.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.smartctl;
+  args = lib.escapeShellArgs ([
+    "--web.listen-address=${cfg.listenAddress}:${toString cfg.port}"
+    "--smartctl.path=${pkgs.smartmontools}/bin/smartctl"
+    "--smartctl.interval=${cfg.maxInterval}"
+  ] ++ map (device: "--smartctl.device=${device}") cfg.devices
+  ++ cfg.extraFlags);
+in {
+  port = 9633;
+
+  extraOpts = {
+    devices = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = literalExpression ''
+        [ "/dev/sda", "/dev/nvme0n1" ];
+      '';
+      description = lib.mdDoc ''
+        Paths to the disks that will be monitored. Will autodiscover
+        all disks if none given.
+      '';
+    };
+    maxInterval = mkOption {
+      type = types.str;
+      default = "60s";
+      example = "2m";
+      description = lib.mdDoc ''
+        Interval that limits how often a disk can be queried.
+      '';
+    };
+  };
+
+  serviceOpts = {
+    serviceConfig = {
+      AmbientCapabilities = [
+        "CAP_SYS_RAWIO"
+        "CAP_SYS_ADMIN"
+      ];
+      CapabilityBoundingSet = [
+        "CAP_SYS_RAWIO"
+        "CAP_SYS_ADMIN"
+      ];
+      DevicePolicy = "closed";
+      DeviceAllow = lib.mkOverride 50 [
+        "block-blkext rw"
+        "block-sd rw"
+        "char-nvme rw"
+      ];
+      ExecStart = ''
+        ${pkgs.prometheus-smartctl-exporter}/bin/smartctl_exporter ${args}
+      '';
+      PrivateDevices = lib.mkForce false;
+      ProtectProc = "invisible";
+      ProcSubset = "pid";
+      SupplementaryGroups = [ "disk" ];
+      SystemCallFilter = [ "@system-service" "~@privileged" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix
new file mode 100644
index 000000000000..459f5842f546
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/smokeping.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.smokeping;
+  goDuration = types.mkOptionType {
+    name = "goDuration";
+    description = "Go duration (https://golang.org/pkg/time/#ParseDuration)";
+    check = x: types.str.check x && builtins.match "(-?[0-9]+(\.[0-9]+)?(ns|us|µs|ms|s|m|h))+" x != null;
+    inherit (types.str) merge;
+  };
+in
+{
+  port = 9374;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+    pingInterval = mkOption {
+      type = goDuration;
+      default = "1s";
+      description = lib.mdDoc ''
+        Interval between pings.
+      '';
+    };
+    buckets = mkOption {
+      type = types.commas;
+      default = "5e-05,0.0001,0.0002,0.0004,0.0008,0.0016,0.0032,0.0064,0.0128,0.0256,0.0512,0.1024,0.2048,0.4096,0.8192,1.6384,3.2768,6.5536,13.1072,26.2144";
+      description = lib.mdDoc ''
+        List of buckets to use for the response duration histogram.
+      '';
+    };
+    hosts = mkOption {
+      type = with types; listOf str;
+      description = lib.mdDoc ''
+        List of endpoints to probe.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_RAW" ];
+      CapabilityBoundingSet = [ "CAP_NET_RAW" ];
+      ExecStart = ''
+        ${pkgs.prometheus-smokeping-prober}/bin/smokeping_prober \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --buckets ${cfg.buckets} \
+          --ping.interval ${cfg.pingInterval} \
+          --privileged \
+          ${concatStringsSep " \\\n  " cfg.extraFlags} \
+          ${concatStringsSep " " cfg.hosts}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
new file mode 100644
index 000000000000..edc6e4b5022a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/snmp.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.snmp;
+in
+{
+  port = 9116;
+  extraOpts = {
+    configurationPath = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a snmp exporter configuration file. Mutually exclusive with 'configuration' option.
+      '';
+      example = literalExpression "./snmp.yml";
+    };
+
+    configuration = mkOption {
+      type = types.nullOr types.attrs;
+      default = null;
+      description = lib.mdDoc ''
+        Snmp exporter configuration as nix attribute set. Mutually exclusive with 'configurationPath' option.
+      '';
+      example = {
+        "default" = {
+          "version" = 2;
+          "auth" = {
+            "community" = "public";
+          };
+        };
+      };
+    };
+
+    logFormat = mkOption {
+      type = types.enum ["logfmt" "json"];
+      default = "logfmt";
+      description = lib.mdDoc ''
+        Output format of log messages.
+      '';
+    };
+
+    logLevel = mkOption {
+      type = types.enum ["debug" "info" "warn" "error"];
+      default = "info";
+      description = lib.mdDoc ''
+        Only log messages with the given severity or above.
+      '';
+    };
+  };
+  serviceOpts = let
+    configFile = if cfg.configurationPath != null
+                 then cfg.configurationPath
+                 else "${pkgs.writeText "snmp-exporter-conf.yml" (builtins.toJSON cfg.configuration)}";
+    in {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-snmp-exporter}/bin/snmp_exporter \
+          --config.file=${escapeShellArg configFile} \
+          --log.format=${escapeShellArg cfg.logFormat} \
+          --log.level=${cfg.logLevel} \
+          --web.listen-address=${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sql.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
new file mode 100644
index 000000000000..678bc348679d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/sql.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, options }:
+with lib;
+let
+  cfg = config.services.prometheus.exporters.sql;
+  cfgOptions = {
+    options = with types; {
+      jobs = mkOption {
+        type = attrsOf (submodule jobOptions);
+        default = { };
+        description = lib.mdDoc "An attrset of metrics scraping jobs to run.";
+      };
+    };
+  };
+  jobOptions = {
+    options = with types; {
+      interval = mkOption {
+        type = str;
+        description = lib.mdDoc ''
+          How often to run this job, specified in
+          [Go duration](https://golang.org/pkg/time/#ParseDuration) format.
+        '';
+      };
+      connections = mkOption {
+        type = listOf str;
+        description = lib.mdDoc "A list of connection strings of the SQL servers to scrape metrics from";
+      };
+      startupSql = mkOption {
+        type = listOf str;
+        default = [];
+        description = lib.mdDoc "A list of SQL statements to execute once after making a connection.";
+      };
+      queries = mkOption {
+        type = attrsOf (submodule queryOptions);
+        description = lib.mdDoc "SQL queries to run.";
+      };
+    };
+  };
+  queryOptions = {
+    options = with types; {
+      help = mkOption {
+        type = nullOr str;
+        default = null;
+        description = lib.mdDoc "A human-readable description of this metric.";
+      };
+      labels = mkOption {
+        type = listOf str;
+        default = [ ];
+        description = lib.mdDoc "A set of columns that will be used as Prometheus labels.";
+      };
+      query = mkOption {
+        type = str;
+        description = lib.mdDoc "The SQL query to run.";
+      };
+      values = mkOption {
+        type = listOf str;
+        description = lib.mdDoc "A set of columns that will be used as values of this metric.";
+      };
+    };
+  };
+
+  configFile =
+    if cfg.configFile != null
+    then cfg.configFile
+    else
+      let
+        nameInline = mapAttrsToList (k: v: v // { name = k; });
+        renameStartupSql = j: removeAttrs (j // { startup_sql = j.startupSql; }) [ "startupSql" ];
+        configuration = {
+          jobs = map renameStartupSql
+            (nameInline (mapAttrs (k: v: (v // { queries = nameInline v.queries; })) cfg.configuration.jobs));
+        };
+      in
+      builtins.toFile "config.yaml" (builtins.toJSON configuration);
+in
+{
+  extraOpts = {
+    configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to configuration file.
+      '';
+    };
+    configuration = mkOption {
+      type = with types; nullOr (submodule cfgOptions);
+      default = null;
+      description = lib.mdDoc ''
+        Exporter configuration as nix attribute set. Mutually exclusive with 'configFile' option.
+      '';
+    };
+  };
+
+  port = 9237;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-sql-exporter}/bin/sql_exporter \
+          -web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          -config.file ${configFile} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
new file mode 100644
index 000000000000..d9d732d8c125
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/statsd.nix
@@ -0,0 +1,19 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.statsd;
+in
+{
+  port = 9102;
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-statsd-exporter}/bin/statsd_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
new file mode 100644
index 000000000000..b1d6760b40b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/surfboard.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.surfboard;
+in
+{
+  port = 9239;
+  extraOpts = {
+    modemAddress = mkOption {
+      type = types.str;
+      default = "192.168.100.1";
+      description = lib.mdDoc ''
+        The hostname or IP of the cable modem.
+      '';
+    };
+  };
+  serviceOpts = {
+    description = "Prometheus exporter for surfboard cable modem";
+    unitConfig.Documentation = "https://github.com/ipstatic/surfboard_exporter";
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-surfboard-exporter}/bin/surfboard_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --modem-address ${cfg.modemAddress} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/systemd.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/systemd.nix
new file mode 100644
index 000000000000..2edd1de83e1b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/systemd.nix
@@ -0,0 +1,22 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let cfg = config.services.prometheus.exporters.systemd;
+
+in {
+  port = 9558;
+
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-systemd-exporter}/bin/systemd_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} ${concatStringsSep " " cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_UNIX to collect data
+        "AF_UNIX"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
new file mode 100644
index 000000000000..7a9167110a27
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/tor.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.tor;
+in
+{
+  port = 9130;
+  extraOpts = {
+    torControlAddress = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        Tor control IP address or hostname.
+      '';
+    };
+
+    torControlPort = mkOption {
+      type = types.port;
+      default = 9051;
+      description = lib.mdDoc ''
+        Tor control port.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-tor-exporter}/bin/prometheus-tor-exporter \
+          -b ${cfg.listenAddress} \
+          -p ${toString cfg.port} \
+          -a ${cfg.torControlAddress} \
+          -c ${toString cfg.torControlPort} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+
+    # CPython requires a process to either have $HOME defined or run as a UID
+    # defined in /etc/passwd. The latter is false with DynamicUser, so define a
+    # dummy $HOME. https://bugs.python.org/issue10496
+    environment = { HOME = "/var/empty"; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
new file mode 100644
index 000000000000..f2336429d42f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unbound.nix
@@ -0,0 +1,95 @@
+{ config
+, lib
+, pkgs
+, options
+}:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.unbound;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "controlInterface" ] "This option was removed, use the `unbound.host` option instead.")
+    (mkRemovedOptionModule [ "fetchType" ] "This option was removed, use the `unbound.host` option instead.")
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+
+  port = 9167;
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    unbound = {
+      ca = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_server.pem";
+        example = null;
+        description = ''
+          Path to the Unbound server certificate authority
+        '';
+      };
+
+      certificate = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.pem";
+        example = null;
+        description = ''
+          Path to the Unbound control socket certificate
+        '';
+      };
+
+      key = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/unbound/unbound_control.key";
+        example = null;
+        description = ''
+          Path to the Unbound control socket key.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "tcp://127.0.0.1:8953";
+        example = "unix:///run/unbound/unbound.socket";
+        description = lib.mdDoc ''
+          Path to the unbound control socket. Supports unix domain sockets, as well as the TCP interface.
+        '';
+      };
+    };
+  };
+
+  serviceOpts = mkMerge ([{
+    serviceConfig = {
+      User = "unbound"; # to access the unbound_control.key
+      ExecStart = ''
+        ${pkgs.prometheus-unbound-exporter}/bin/unbound_exporter \
+          --unbound.host "${cfg.unbound.host}" \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${optionalString (cfg.unbound.ca != null) "--unbound.ca ${cfg.unbound.ca}"} \
+          ${optionalString (cfg.unbound.certificate != null) "--unbound.cert ${cfg.unbound.certificate}"} \
+          ${optionalString (cfg.unbound.key != null) "--unbound.key ${cfg.unbound.key}"} \
+          ${toString cfg.extraFlags}
+      '';
+      RestrictAddressFamilies = [
+        "AF_UNIX"
+        "AF_INET"
+        "AF_INET6"
+      ];
+    } // optionalAttrs (!config.services.unbound.enable) {
+      DynamicUser = true;
+    };
+  }] ++ [
+    (mkIf config.services.unbound.enable {
+      after = [ "unbound.service" ];
+      requires = [ "unbound.service" ];
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
new file mode 100644
index 000000000000..70f26d9783be
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unifi.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.unifi;
+in
+{
+  port = 9130;
+  extraOpts = {
+    unifiAddress = mkOption {
+      type = types.str;
+      example = "https://10.0.0.1:8443";
+      description = lib.mdDoc ''
+        URL of the UniFi Controller API.
+      '';
+    };
+
+    unifiInsecure = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled skip the verification of the TLS certificate of the UniFi Controller API.
+        Use with caution.
+      '';
+    };
+
+    unifiUsername = mkOption {
+      type = types.str;
+      example = "ReadOnlyUser";
+      description = lib.mdDoc ''
+        username for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiPassword = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Password for authentication against UniFi Controller API.
+      '';
+    };
+
+    unifiTimeout = mkOption {
+      type = types.str;
+      default = "5s";
+      example = "2m";
+      description = lib.mdDoc ''
+        Timeout including unit for UniFi Controller API requests.
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-unifi-exporter}/bin/unifi_exporter \
+          -telemetry.addr ${cfg.listenAddress}:${toString cfg.port} \
+          -unifi.addr ${cfg.unifiAddress} \
+          -unifi.username ${escapeShellArg cfg.unifiUsername} \
+          -unifi.password ${escapeShellArg cfg.unifiPassword} \
+          -unifi.timeout ${cfg.unifiTimeout} \
+          ${optionalString cfg.unifiInsecure "-unifi.insecure" } \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix
new file mode 100644
index 000000000000..3b7f978528cd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/unpoller.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.unpoller;
+
+  configFile = pkgs.writeText "prometheus-unpoller-exporter.json" (generators.toJSON {} {
+    poller = { inherit (cfg.log) debug quiet; };
+    unifi = { inherit (cfg) controllers; };
+    influxdb.disable = true;
+    datadog.disable = true; # workaround for https://github.com/unpoller/unpoller/issues/442
+    prometheus = {
+      http_listen = "${cfg.listenAddress}:${toString cfg.port}";
+      report_errors = cfg.log.prometheusErrors;
+    };
+    inherit (cfg) loki;
+  });
+
+in {
+  port = 9130;
+
+  extraOpts = {
+    inherit (options.services.unpoller.unifi) controllers;
+    inherit (options.services.unpoller) loki;
+    log = {
+      debug = mkEnableOption (lib.mdDoc "debug logging including line numbers, high resolution timestamps, per-device logs");
+      quiet = mkEnableOption (lib.mdDoc "startup and error logs only");
+      prometheusErrors = mkEnableOption (lib.mdDoc "emitting errors to prometheus");
+    };
+  };
+
+  serviceOpts.serviceConfig = {
+    ExecStart = "${pkgs.unpoller}/bin/unpoller --config ${configFile}";
+    DynamicUser = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
new file mode 100644
index 000000000000..a019157c664b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/v2ray.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.v2ray;
+in
+{
+  port = 9299;
+  extraOpts = {
+    v2rayEndpoint = mkOption {
+      type = types.str;
+      default = "127.0.0.1:54321";
+      description = lib.mdDoc ''
+        v2ray grpc api endpoint
+      '';
+    };
+  };
+  serviceOpts = {
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-v2ray-exporter}/bin/v2ray-exporter \
+          --v2ray-endpoint ${cfg.v2rayEndpoint} \
+          --listen ${cfg.listenAddress}:${toString cfg.port} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
new file mode 100644
index 000000000000..a7e5b41dffc6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/varnish.nix
@@ -0,0 +1,89 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.varnish;
+in
+{
+  port = 9131;
+  extraOpts = {
+    noExit = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Do not exit server on Varnish scrape errors.
+      '';
+    };
+    withGoMetrics = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Export go runtime and http handler metrics.
+      '';
+    };
+    verbose = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable verbose logging.
+      '';
+    };
+    raw = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable raw stdout logging without timestamps.
+      '';
+    };
+    varnishStatPath = mkOption {
+      type = types.str;
+      default = "varnishstat";
+      description = lib.mdDoc ''
+        Path to varnishstat.
+      '';
+    };
+    instance = mkOption {
+      type = types.nullOr types.str;
+      default = config.services.varnish.stateDir;
+      defaultText = lib.literalExpression "config.services.varnish.stateDir";
+      description = lib.mdDoc ''
+        varnishstat -n value.
+      '';
+    };
+    healthPath = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Path under which to expose healthcheck. Disabled unless configured.
+      '';
+    };
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = [ config.services.varnish.package ];
+    serviceConfig = {
+      RestartSec = mkDefault 1;
+      DynamicUser = false;
+      ExecStart = ''
+        ${pkgs.prometheus-varnish-exporter}/bin/prometheus_varnish_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          --varnishstat-path ${escapeShellArg cfg.varnishStatPath} \
+          ${concatStringsSep " \\\n  " (cfg.extraFlags
+            ++ optional (cfg.healthPath != null) "--web.health-path ${cfg.healthPath}"
+            ++ optional (cfg.instance != null) "-n ${escapeShellArg cfg.instance}"
+            ++ optional cfg.noExit "--no-exit"
+            ++ optional cfg.withGoMetrics "--with-go-metrics"
+            ++ optional cfg.verbose "--verbose"
+            ++ optional cfg.raw "--raw")}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
new file mode 100644
index 000000000000..9b7590314936
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/wireguard.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.wireguard;
+in {
+  port = 9586;
+  imports = [
+    (mkRenamedOptionModule [ "addr" ] [ "listenAddress" ])
+    ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+  ];
+  extraOpts = {
+    verbose = mkEnableOption (lib.mdDoc "verbose logging mode for prometheus-wireguard-exporter");
+
+    wireguardConfig = mkOption {
+      type = with types; nullOr (either path str);
+      default = null;
+
+      description = lib.mdDoc ''
+        Path to the Wireguard Config to
+        [add the peer's name to the stats of a peer](https://github.com/MindFlavor/prometheus_wireguard_exporter/tree/2.0.0#usage).
+
+        Please note that `networking.wg-quick` is required for this feature
+        as `networking.wireguard` uses
+        {manpage}`wg(8)`
+        to set the peers up.
+      '';
+    };
+
+    singleSubnetPerField = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        By default, all allowed IPs and subnets are comma-separated in the
+        `allowed_ips` field. With this option enabled,
+        a single IP and subnet will be listed in fields like `allowed_ip_0`,
+        `allowed_ip_1` and so on.
+      '';
+    };
+
+    withRemoteIp = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not the remote IP of a WireGuard peer should be exposed via prometheus.
+      '';
+    };
+  };
+  serviceOpts = {
+    path = [ pkgs.wireguard-tools ];
+
+    serviceConfig = {
+      AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+      CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+      ExecStart = ''
+        ${pkgs.prometheus-wireguard-exporter}/bin/prometheus_wireguard_exporter \
+          -p ${toString cfg.port} \
+          -l ${cfg.listenAddress} \
+          ${optionalString cfg.verbose "-v true"} \
+          ${optionalString cfg.singleSubnetPerField "-s true"} \
+          ${optionalString cfg.withRemoteIp "-r true"} \
+          ${optionalString (cfg.wireguardConfig != null) "-n ${escapeShellArg cfg.wireguardConfig}"}
+      '';
+      RestrictAddressFamilies = [
+        # Need AF_NETLINK to collect data
+        "AF_NETLINK"
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix
new file mode 100644
index 000000000000..ff12a52d49a9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/exporters/zfs.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, options }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.exporters.zfs;
+in
+{
+  port = 9134;
+
+  extraOpts = {
+    telemetryPath = mkOption {
+      type = types.str;
+      default = "/metrics";
+      description = lib.mdDoc ''
+        Path under which to expose metrics.
+      '';
+    };
+
+    pools = mkOption {
+      type = with types; nullOr (listOf str);
+      default = [ ];
+      description = lib.mdDoc ''
+        Name of the pool(s) to collect, repeat for multiple pools (default: all pools).
+      '';
+    };
+  };
+
+  serviceOpts = {
+    # needs zpool
+    path = [ config.boot.zfs.package ];
+    serviceConfig = {
+      ExecStart = ''
+        ${pkgs.prometheus-zfs-exporter}/bin/zfs_exporter \
+          --web.listen-address ${cfg.listenAddress}:${toString cfg.port} \
+          --web.telemetry-path ${cfg.telemetryPath} \
+          ${concatMapStringsSep " " (x: "--pool=${x}") cfg.pools} \
+          ${concatStringsSep " \\\n  " cfg.extraFlags}
+      '';
+      ProtectClock = false;
+      PrivateDevices = false;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
new file mode 100644
index 000000000000..f5c114c92752
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/pushgateway.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.pushgateway;
+
+  cmdlineArgs =
+       opt "web.listen-address" cfg.web.listen-address
+    ++ opt "web.telemetry-path" cfg.web.telemetry-path
+    ++ opt "web.external-url" cfg.web.external-url
+    ++ opt "web.route-prefix" cfg.web.route-prefix
+    ++ optional cfg.persistMetrics ''--persistence.file="/var/lib/${cfg.stateDir}/metrics"''
+    ++ opt "persistence.interval" cfg.persistence.interval
+    ++ opt "log.level" cfg.log.level
+    ++ opt "log.format" cfg.log.format
+    ++ cfg.extraFlags;
+
+  opt = k : v : optional (v != null) ''--${k}="${v}"'';
+
+in {
+  options = {
+    services.prometheus.pushgateway = {
+      enable = mkEnableOption (lib.mdDoc "Prometheus Pushgateway");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.prometheus-pushgateway;
+        defaultText = literalExpression "pkgs.prometheus-pushgateway";
+        description = lib.mdDoc ''
+          Package that should be used for the prometheus pushgateway.
+        '';
+      };
+
+      web.listen-address = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Address to listen on for the web interface, API and telemetry.
+
+          `null` will default to `:9091`.
+        '';
+      };
+
+      web.telemetry-path = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Path under which to expose metrics.
+
+          `null` will default to `/metrics`.
+        '';
+      };
+
+      web.external-url = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The URL under which Pushgateway is externally reachable.
+        '';
+      };
+
+      web.route-prefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Prefix for the internal routes of web endpoints.
+
+          Defaults to the path of
+          {option}`services.prometheus.pushgateway.web.external-url`.
+        '';
+      };
+
+      persistence.interval = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "10m";
+        description = lib.mdDoc ''
+          The minimum interval at which to write out the persistence file.
+
+          `null` will default to `5m`.
+        '';
+      };
+
+      log.level = mkOption {
+        type = types.nullOr (types.enum ["debug" "info" "warn" "error" "fatal"]);
+        default = null;
+        description = lib.mdDoc ''
+          Only log messages with the given severity or above.
+
+          `null` will default to `info`.
+        '';
+      };
+
+      log.format = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "logger:syslog?appname=bob&local=7";
+        description = lib.mdDoc ''
+          Set the log target and format.
+
+          `null` will default to `logger:stderr`.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra commandline options when launching the Pushgateway.
+        '';
+      };
+
+      persistMetrics = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to persist metrics to a file.
+
+          When enabled metrics will be saved to a file called
+          `metrics` in the directory
+          `/var/lib/pushgateway`. The directory below
+          `/var/lib` can be set using
+          {option}`services.prometheus.pushgateway.stateDir`.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "pushgateway";
+        description = lib.mdDoc ''
+          Directory below `/var/lib` to store metrics.
+
+          This directory will be created automatically using systemd's
+          StateDirectory mechanism when
+          {option}`services.prometheus.pushgateway.persistMetrics`
+          is enabled.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.stateDir;
+        message =
+          "The option services.prometheus.pushgateway.stateDir" +
+          " shouldn't be an absolute directory." +
+          " It should be a directory relative to /var/lib.";
+      }
+    ];
+    systemd.services.pushgateway = {
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      serviceConfig = {
+        Restart  = "always";
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/pushgateway" +
+          optionalString (length cmdlineArgs != 0) (" \\\n  " +
+            concatStringsSep " \\\n  " cmdlineArgs);
+        StateDirectory = if cfg.persistMetrics then cfg.stateDir else null;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/sachet.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/sachet.nix
new file mode 100644
index 000000000000..c908d599bd4e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/sachet.nix
@@ -0,0 +1,88 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.sachet;
+  configFile = pkgs.writeText "sachet.yml" (builtins.toJSON cfg.configuration);
+in
+{
+  options = {
+    services.prometheus.sachet = {
+      enable = mkEnableOption (lib.mdDoc "Sachet, an SMS alerting tool for the Prometheus Alertmanager");
+
+      configuration = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        example = literalExpression ''
+          {
+            providers = {
+              twilio = {
+                # environment variables gets expanded at runtime
+                account_sid = "$TWILIO_ACCOUNT";
+                auth_token = "$TWILIO_TOKEN";
+              };
+            };
+            templates = [ ./some-template.tmpl ];
+            receivers = [{
+              name = "pager";
+              provider = "twilio";
+              to = [ "+33123456789" ];
+              text = "{{ template \"message\" . }}";
+            }];
+          }
+        '';
+        description = lib.mdDoc ''
+          Sachet's configuration as a nix attribute set.
+        '';
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          The address Sachet will listen to.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 9876;
+        description = lib.mdDoc ''
+          The port Sachet will listen to.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = cfg.configuration != null;
+      message = "Cannot enable Sachet without a configuration.";
+    };
+
+    systemd.services.sachet = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+      script = ''
+        ${pkgs.envsubst}/bin/envsubst -i "${configFile}" > /tmp/sachet.yaml
+        exec ${pkgs.prometheus-sachet}/bin/sachet -config /tmp/sachet.yaml -listen-address ${cfg.address}:${builtins.toString cfg.port}
+      '';
+
+      serviceConfig = {
+        Restart = "always";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        DynamicUser = true;
+        PrivateTmp = true;
+        WorkingDirectory = "/tmp/";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix b/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
new file mode 100644
index 000000000000..4545ca37d278
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/prometheus/xmpp-alerts.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.prometheus.xmpp-alerts;
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "prometheus-xmpp-alerts.yml" cfg.settings;
+in
+{
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "prometheus" "xmpp-alerts" "configuration" ]
+      [ "services" "prometheus" "xmpp-alerts" "settings" ])
+  ];
+
+  options.services.prometheus.xmpp-alerts = {
+    enable = mkEnableOption (lib.mdDoc "XMPP Web hook service for Alertmanager");
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+
+      description = lib.mdDoc ''
+        Configuration for prometheus xmpp-alerts, see
+        <https://github.com/jelmer/prometheus-xmpp-alerts/blob/master/xmpp-alerts.yml.example>
+        for supported values.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.prometheus-xmpp-alerts = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.prometheus-xmpp-alerts}/bin/prometheus-xmpp-alerts --config ${configFile}";
+        Restart = "on-failure";
+        DynamicUser = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        NoNewPrivileges = true;
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        SystemCallFilter = [ "@system-service" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix
new file mode 100644
index 000000000000..1ca8af14e777
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann-dash.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann-dash;
+
+  conf = writeText "config.rb" ''
+    riemann_base = "${cfg.dataDir}"
+    config.store[:ws_config] = "#{riemann_base}/config/config.json"
+    ${cfg.config}
+  '';
+
+  launcher = writeScriptBin "riemann-dash" ''
+    #!/bin/sh
+    exec ${pkgs.riemann-dash}/bin/riemann-dash ${conf}
+  '';
+
+in {
+
+  options = {
+
+    services.riemann-dash = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the riemann-dash dashboard daemon.
+        '';
+      };
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          Contents added to the end of the riemann-dash configuration file.
+        '';
+      };
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/riemann-dash";
+        description = lib.mdDoc ''
+          Location of the riemann-base dir. The dashboard configuration file is
+          is stored to this directory. The directory is created automatically on
+          service start, and owner is set to the riemanndash user.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.groups.riemanndash.gid = config.ids.gids.riemanndash;
+
+    users.users.riemanndash = {
+      description = "riemann-dash daemon user";
+      uid = config.ids.uids.riemanndash;
+      group = "riemanndash";
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - riemanndash riemanndash - -"
+    ];
+
+    systemd.services.riemann-dash = {
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "riemann.service" ];
+      after = [ "riemann.service" ];
+      preStart = ''
+        mkdir -p '${cfg.dataDir}/config'
+      '';
+      serviceConfig = {
+        User = "riemanndash";
+        ExecStart = "${launcher}/bin/riemann-dash";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix
new file mode 100644
index 000000000000..28821267b4f3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann-tools.nix
@@ -0,0 +1,70 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann-tools;
+
+  riemannHost = "${cfg.riemannHost}";
+
+  healthLauncher = writeScriptBin "riemann-health" ''
+    #!/bin/sh
+    exec ${pkgs.riemann-tools}/bin/riemann-health ${builtins.concatStringsSep " " cfg.extraArgs} --host ${riemannHost}
+  '';
+
+
+in {
+
+  options = {
+
+    services.riemann-tools = {
+      enableHealth = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the riemann-health daemon.
+        '';
+      };
+      riemannHost = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          Address of the host riemann node. Defaults to localhost.
+        '';
+      };
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          A list of commandline-switches forwarded to a riemann-tool.
+          See for example `riemann-health --help` for available options.
+        '';
+        example = ["-p 5555" "--timeout=30" "--attribute=myattribute=42"];
+      };
+    };
+  };
+
+  config = mkIf cfg.enableHealth {
+
+    users.groups.riemanntools.gid = config.ids.gids.riemanntools;
+
+    users.users.riemanntools = {
+      description = "riemann-tools daemon user";
+      uid = config.ids.uids.riemanntools;
+      group = "riemanntools";
+    };
+
+    systemd.services.riemann-health = {
+      wantedBy = [ "multi-user.target" ];
+      path = [ procps ];
+      serviceConfig = {
+        User = "riemanntools";
+        ExecStart = "${healthLauncher}/bin/riemann-health";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/riemann.nix b/nixpkgs/nixos/modules/services/monitoring/riemann.nix
new file mode 100644
index 000000000000..7ab8af85ed79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/riemann.nix
@@ -0,0 +1,100 @@
+{ config, pkgs, lib, ... }:
+
+with pkgs;
+with lib;
+
+let
+
+  cfg = config.services.riemann;
+
+  classpath = concatStringsSep ":" (
+    cfg.extraClasspathEntries ++ [ "${riemann}/share/java/riemann.jar" ]
+  );
+
+  riemannConfig = concatStringsSep "\n" (
+    [cfg.config] ++ (map (f: ''(load-file "${f}")'') cfg.configFiles)
+  );
+
+  launcher = writeScriptBin "riemann" ''
+    #!/bin/sh
+    exec ${jdk}/bin/java ${concatStringsSep " " cfg.extraJavaOpts} \
+      -cp ${classpath} \
+      riemann.bin ${cfg.configFile}
+  '';
+
+in {
+
+  options = {
+
+    services.riemann = {
+      enable = mkEnableOption (lib.mdDoc "Riemann network monitoring daemon");
+
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          Contents of the Riemann configuration file. For more complicated
+          config you should use configFile.
+        '';
+      };
+      configFiles = mkOption {
+        type = with types; listOf path;
+        default = [];
+        description = lib.mdDoc ''
+          Extra files containing Riemann configuration. These files will be
+          loaded at runtime by Riemann (with Clojure's
+          `load-file` function) at the end of the
+          configuration if you use the config option, this is ignored if you
+          use configFile.
+        '';
+      };
+      configFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          A Riemann config file. Any files in the same directory as this file
+          will be added to the classpath by Riemann.
+        '';
+      };
+      extraClasspathEntries = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra entries added to the Java classpath when running Riemann.
+        '';
+      };
+      extraJavaOpts = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra Java options used when launching Riemann.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.groups.riemann.gid = config.ids.gids.riemann;
+
+    users.users.riemann = {
+      description = "riemann daemon user";
+      uid = config.ids.uids.riemann;
+      group = "riemann";
+    };
+
+    services.riemann.configFile = mkDefault (
+      writeText "riemann-config.clj" riemannConfig
+    );
+
+    systemd.services.riemann = {
+      wantedBy = [ "multi-user.target" ];
+      path = [ inetutils ];
+      serviceConfig = {
+        User = "riemann";
+        ExecStart = "${launcher}/bin/riemann";
+      };
+      serviceConfig.LimitNOFILE = 65536;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/scollector.nix b/nixpkgs/nixos/modules/services/monitoring/scollector.nix
new file mode 100644
index 000000000000..48be309c9599
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/scollector.nix
@@ -0,0 +1,134 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.scollector;
+
+  collectors = pkgs.runCommand "collectors" { preferLocalBuild = true; }
+    ''
+    mkdir -p $out
+    ${lib.concatStringsSep
+        "\n"
+        (lib.mapAttrsToList
+          (frequency: binaries:
+            "mkdir -p $out/${frequency}\n" +
+            (lib.concatStringsSep
+              "\n"
+              (map (path: "ln -s ${path} $out/${frequency}/$(basename ${path})")
+                   binaries)))
+          cfg.collectors)}
+    '';
+
+  conf = pkgs.writeText "scollector.toml" ''
+    Host = "${cfg.bosunHost}"
+    ColDir = "${collectors}"
+    ${cfg.extraConfig}
+  '';
+
+in {
+
+  options = {
+
+    services.scollector = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run scollector.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.scollector;
+        defaultText = literalExpression "pkgs.scollector";
+        description = lib.mdDoc ''
+          scollector binary to use.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "scollector";
+        description = lib.mdDoc ''
+          User account under which scollector runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "scollector";
+        description = lib.mdDoc ''
+          Group account under which scollector runs.
+        '';
+      };
+
+      bosunHost = mkOption {
+        type = types.str;
+        default = "localhost:8070";
+        description = lib.mdDoc ''
+          Host and port of the bosun server that will store the collected
+          data.
+        '';
+      };
+
+      collectors = mkOption {
+        type = with types; attrsOf (listOf path);
+        default = {};
+        example = literalExpression ''{ "0" = [ "''${postgresStats}/bin/collect-stats" ]; }'';
+        description = lib.mdDoc ''
+          An attribute set mapping the frequency of collection to a list of
+          binaries that should be executed at that frequency. You can use "0"
+          to run a binary forever.
+        '';
+      };
+
+      extraOpts = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "-d" ];
+        description = lib.mdDoc ''
+          Extra scollector command line options
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra scollector configuration added to the end of scollector.toml
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf config.services.scollector.enable {
+
+    systemd.services.scollector = {
+      description = "scollector metrics collector (part of Bosun)";
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.coreutils pkgs.iproute2 ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/scollector -conf=${conf} ${lib.concatStringsSep " " cfg.extraOpts}";
+      };
+    };
+
+    users.users.scollector = {
+      description = "scollector user";
+      group = "scollector";
+      uid = config.ids.uids.scollector;
+    };
+
+    users.groups.scollector.gid = config.ids.gids.scollector;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/smartd.nix b/nixpkgs/nixos/modules/services/monitoring/smartd.nix
new file mode 100644
index 000000000000..8b79ac0e0c1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/smartd.nix
@@ -0,0 +1,252 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  host = config.networking.fqdnOrHostName;
+
+  cfg = config.services.smartd;
+  opt = options.services.smartd;
+
+  nm = cfg.notifications.mail;
+  nw = cfg.notifications.wall;
+  nx = cfg.notifications.x11;
+
+  smartdNotify = pkgs.writeScript "smartd-notify.sh" ''
+    #! ${pkgs.runtimeShell}
+    ${optionalString nm.enable ''
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      From: smartd on ${host} <${nm.sender}>
+      To: ${nm.recipient}
+      Subject: $SMARTD_SUBJECT
+
+      $SMARTD_FULLMESSAGE
+      EOF
+
+      ${pkgs.smartmontools}/sbin/smartctl -a -d "$SMARTD_DEVICETYPE" "$SMARTD_DEVICE"
+      } | ${nm.mailer} -i "${nm.recipient}"
+    ''}
+    ${optionalString nw.enable ''
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      Problem detected with disk: $SMARTD_DEVICESTRING
+      Warning message from smartd is:
+
+      $SMARTD_MESSAGE
+      EOF
+      } | ${pkgs.util-linux}/bin/wall 2>/dev/null
+    ''}
+    ${optionalString nx.enable ''
+      export DISPLAY=${nx.display}
+      {
+      ${pkgs.coreutils}/bin/cat << EOF
+      Problem detected with disk: $SMARTD_DEVICESTRING
+      Warning message from smartd is:
+
+      $SMARTD_FULLMESSAGE
+      EOF
+      } | ${pkgs.xorg.xmessage}/bin/xmessage -file - 2>/dev/null &
+    ''}
+  '';
+
+  notifyOpts = optionalString (nm.enable || nw.enable || nx.enable)
+    ("-m <nomailer> -M exec ${smartdNotify} " + optionalString cfg.notifications.test "-M test ");
+
+  smartdConf = pkgs.writeText "smartd.conf" ''
+    # Autogenerated smartd startup config file
+    DEFAULT ${notifyOpts}${cfg.defaults.monitored}
+
+    ${concatMapStringsSep "\n" (d: "${d.device} ${d.options}") cfg.devices}
+
+    ${optionalString cfg.autodetect
+       "DEVICESCAN ${notifyOpts}${cfg.defaults.autodetected}"}
+  '';
+
+  smartdDeviceOpts = { ... }: {
+
+    options = {
+
+      device = mkOption {
+        example = "/dev/sda";
+        type = types.str;
+        description = lib.mdDoc "Location of the device.";
+      };
+
+      options = mkOption {
+        default = "";
+        example = "-d sat";
+        type = types.separatedString " ";
+        description = lib.mdDoc "Options that determine how smartd monitors the device.";
+      };
+
+    };
+
+  };
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    services.smartd = {
+
+      enable = mkEnableOption (lib.mdDoc "smartd daemon from `smartmontools` package");
+
+      autodetect = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whenever smartd should monitor all devices connected to the
+          machine at the time it's being started (the default).
+
+          Set to false to monitor the devices listed in
+          {option}`services.smartd.devices` only.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = ["-A /var/log/smartd/" "--interval=3600"];
+        description = lib.mdDoc ''
+          Extra command-line options passed to the `smartd`
+          daemon on startup.
+
+          (See `man 8 smartd`.)
+        '';
+      };
+
+      notifications = {
+
+        mail = {
+          enable = mkOption {
+            default = config.services.mail.sendmailSetuidWrapper != null;
+            defaultText = literalExpression "config.services.mail.sendmailSetuidWrapper != null";
+            type = types.bool;
+            description = lib.mdDoc "Whenever to send e-mail notifications.";
+          };
+
+          sender = mkOption {
+            default = "root";
+            example = "example@domain.tld";
+            type = types.str;
+            description = lib.mdDoc ''
+              Sender of the notification messages.
+              Acts as the value of `email` in the emails' `From: ...` field.
+            '';
+          };
+
+          recipient = mkOption {
+            default = "root";
+            type = types.str;
+            description = lib.mdDoc "Recipient of the notification messages.";
+          };
+
+          mailer = mkOption {
+            default = "/run/wrappers/bin/sendmail";
+            type = types.path;
+            description = lib.mdDoc ''
+              Sendmail-compatible binary to be used to send the messages.
+
+              You should probably enable
+              {option}`services.postfix` or some other MTA for
+              this to work.
+            '';
+          };
+        };
+
+        wall = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = lib.mdDoc "Whenever to send wall notifications to all users.";
+          };
+        };
+
+        x11 = {
+          enable = mkOption {
+            default = config.services.xserver.enable;
+            defaultText = literalExpression "config.services.xserver.enable";
+            type = types.bool;
+            description = lib.mdDoc "Whenever to send X11 xmessage notifications.";
+          };
+
+          display = mkOption {
+            default = ":${toString config.services.xserver.display}";
+            defaultText = literalExpression ''":''${toString config.services.xserver.display}"'';
+            type = types.str;
+            description = lib.mdDoc "DISPLAY to send X11 notifications to.";
+          };
+        };
+
+        test = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc "Whenever to send a test notification on startup.";
+        };
+
+      };
+
+      defaults = {
+        monitored = mkOption {
+          default = "-a";
+          type = types.separatedString " ";
+          example = "-a -o on -s (S/../.././02|L/../../7/04)";
+          description = lib.mdDoc ''
+            Common default options for explicitly monitored (listed in
+            {option}`services.smartd.devices`) devices.
+
+            The default value turns on monitoring of all the things (see
+            `man 5 smartd.conf`).
+
+            The example also turns on SMART Automatic Offline Testing on
+            startup, and schedules short self-tests daily, and long
+            self-tests weekly.
+          '';
+        };
+
+        autodetected = mkOption {
+          default = cfg.defaults.monitored;
+          defaultText = literalExpression "config.${opt.defaults.monitored}";
+          type = types.separatedString " ";
+          description = lib.mdDoc ''
+            Like {option}`services.smartd.defaults.monitored`, but for the
+            autodetected devices.
+          '';
+        };
+      };
+
+      devices = mkOption {
+        default = [];
+        example = [ { device = "/dev/sda"; } { device = "/dev/sdb"; options = "-d sat"; } ];
+        type = with types; listOf (submodule smartdDeviceOpts);
+        description = lib.mdDoc "List of devices to monitor.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [ {
+      assertion = cfg.autodetect || cfg.devices != [];
+      message = "smartd can't run with both disabled autodetect and an empty list of devices to monitor.";
+    } ];
+
+    systemd.services.smartd = {
+      description = "S.M.A.R.T. Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${pkgs.smartmontools}/sbin/smartd ${lib.concatStringsSep " " cfg.extraOptions} --no-fork --configfile=${smartdConf}";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/statsd.nix b/nixpkgs/nixos/modules/services/monitoring/statsd.nix
new file mode 100644
index 000000000000..bbc1c7146a84
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/statsd.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.statsd;
+
+  isBuiltinBackend = name:
+    builtins.elem name [ "graphite" "console" "repeater" ];
+
+  backendsToPackages = let
+    mkMap = list: name:
+      if isBuiltinBackend name then list
+      else list ++ [ pkgs.nodePackages.${name} ];
+  in foldl mkMap [];
+
+  configFile = pkgs.writeText "statsd.conf" ''
+    {
+      address: "${cfg.listenAddress}",
+      port: "${toString cfg.port}",
+      mgmt_address: "${cfg.mgmt_address}",
+      mgmt_port: "${toString cfg.mgmt_port}",
+      backends: [${
+        concatMapStringsSep "," (name:
+          if (isBuiltinBackend name)
+          then ''"./backends/${name}"''
+          else ''"${name}"''
+        ) cfg.backends}],
+      ${optionalString (cfg.graphiteHost!=null) ''graphiteHost: "${cfg.graphiteHost}",''}
+      ${optionalString (cfg.graphitePort!=null) ''graphitePort: "${toString cfg.graphitePort}",''}
+      console: {
+        prettyprint: false
+      },
+      log: {
+        backend: "stdout"
+      },
+      automaticConfigReload: false${optionalString (cfg.extraConfig != null) ","}
+      ${cfg.extraConfig}
+    }
+  '';
+
+  deps = pkgs.buildEnv {
+    name = "statsd-runtime-deps";
+    pathsToLink = [ "/lib" ];
+    ignoreCollisions = true;
+
+    paths = backendsToPackages cfg.backends;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options.services.statsd = {
+
+    enable = mkEnableOption (lib.mdDoc "statsd");
+
+    listenAddress = mkOption {
+      description = lib.mdDoc "Address that statsd listens on over UDP";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Port that stats listens for messages on over UDP";
+      default = 8125;
+      type = types.int;
+    };
+
+    mgmt_address = mkOption {
+      description = lib.mdDoc "Address to run management TCP interface on";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    mgmt_port = mkOption {
+      description = lib.mdDoc "Port to run the management TCP interface on";
+      default = 8126;
+      type = types.int;
+    };
+
+    backends = mkOption {
+      description = lib.mdDoc "List of backends statsd will use for data persistence";
+      default = [];
+      example = [
+        "graphite"
+        "console"
+        "repeater"
+        "statsd-librato-backend"
+        "stackdriver-statsd-backend"
+        "statsd-influxdb-backend"
+      ];
+      type = types.listOf types.str;
+    };
+
+    graphiteHost = mkOption {
+      description = lib.mdDoc "Hostname or IP of Graphite server";
+      default = null;
+      type = types.nullOr types.str;
+    };
+
+    graphitePort = mkOption {
+      description = lib.mdDoc "Port of Graphite server (i.e. carbon-cache).";
+      default = null;
+      type = types.nullOr types.int;
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc "Extra configuration options for statsd";
+      default = "";
+      type = types.nullOr types.str;
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = map (backend: {
+      assertion = !isBuiltinBackend backend -> hasAttrByPath [ backend ] pkgs.nodePackages;
+      message = "Only builtin backends (graphite, console, repeater) or backends enumerated in `pkgs.nodePackages` are allowed!";
+    }) cfg.backends;
+
+    users.users.statsd = {
+      uid = config.ids.uids.statsd;
+      description = "Statsd daemon user";
+    };
+
+    systemd.services.statsd = {
+      description = "Statsd Server";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        NODE_PATH = "${deps}/lib/node_modules";
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.statsd}/bin/statsd ${configFile}";
+        User = "statsd";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.statsd ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/sysstat.nix b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix
new file mode 100644
index 000000000000..5468fc3aa454
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/sysstat.nix
@@ -0,0 +1,76 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.sysstat;
+in {
+  options = {
+    services.sysstat = {
+      enable = mkEnableOption (lib.mdDoc "sar system activity collection");
+
+      collect-frequency = mkOption {
+        type = types.str;
+        default = "*:00/10";
+        description = lib.mdDoc ''
+          OnCalendar specification for sysstat-collect
+        '';
+      };
+
+      collect-args = mkOption {
+        type = types.str;
+        default = "1 1";
+        description = lib.mdDoc ''
+          Arguments to pass sa1 when collecting statistics
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.sysstat = {
+      description = "Resets System Activity Logs";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "root";
+        RemainAfterExit = true;
+        Type = "oneshot";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa1 --boot";
+        LogsDirectory = "sa";
+      };
+    };
+
+    systemd.services.sysstat-collect = {
+      description = "system activity accounting tool";
+      unitConfig.Documentation = "man:sa1(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa1 ${cfg.collect-args}";
+      };
+    };
+
+    systemd.timers.sysstat-collect = {
+      description = "Run system activity accounting tool on a regular basis";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = cfg.collect-frequency;
+    };
+
+    systemd.services.sysstat-summary = {
+      description = "Generate a daily summary of process accounting";
+      unitConfig.Documentation = "man:sa2(8)";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "root";
+        ExecStart = "${pkgs.sysstat}/lib/sa/sa2 -A";
+      };
+    };
+
+    systemd.timers.sysstat-summary = {
+      description = "Generate summary of yesterday's process accounting";
+      wantedBy = [ "timers.target" ];
+      timerConfig.OnCalendar = "00:07:00";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix
new file mode 100644
index 000000000000..9b1278317943
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/teamviewer.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.teamviewer;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.teamviewer.enable = mkEnableOption (lib.mdDoc "TeamViewer daemon");
+
+  };
+
+  ###### implementation
+
+  config = mkIf (cfg.enable) {
+
+    environment.systemPackages = [ pkgs.teamviewer ];
+
+    services.dbus.packages = [ pkgs.teamviewer ];
+
+    systemd.services.teamviewerd = {
+      description = "TeamViewer remote control daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "network.target" "dbus.service" ];
+      requires = [ "dbus.service" ];
+      preStart = "mkdir -pv /var/lib/teamviewer /var/log/teamviewer";
+
+      startLimitIntervalSec = 60;
+      startLimitBurst = 10;
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.teamviewer}/bin/teamviewerd -f";
+        PIDFile = "/run/teamviewerd.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "on-abort";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/telegraf.nix b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix
new file mode 100644
index 000000000000..913e599c189a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/telegraf.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.telegraf;
+
+  settingsFormat = pkgs.formats.toml {};
+  configFile = settingsFormat.generate "config.toml" cfg.extraConfig;
+in {
+  ###### interface
+  options = {
+    services.telegraf = {
+      enable = mkEnableOption (lib.mdDoc "telegraf server");
+
+      package = mkOption {
+        default = pkgs.telegraf;
+        defaultText = literalExpression "pkgs.telegraf";
+        description = lib.mdDoc "Which telegraf derivation to use";
+        type = types.package;
+      };
+
+      environmentFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = [ "/run/keys/telegraf.env" ];
+        description = lib.mdDoc ''
+          File to load as environment file. Environment variables from this file
+          will be interpolated into the config file using envsubst with this
+          syntax: `$ENVIRONMENT` or `''${VARIABLE}`.
+          This is useful to avoid putting secrets into the nix store.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = {};
+        description = lib.mdDoc "Extra configuration options for telegraf";
+        type = settingsFormat.type;
+        example = {
+          outputs.influxdb = {
+            urls = ["http://localhost:8086"];
+            database = "telegraf";
+          };
+          inputs.statsd = {
+            service_address = ":8125";
+            delete_timings = true;
+          };
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+  config = mkIf config.services.telegraf.enable {
+    systemd.services.telegraf = let
+      finalConfigFile = if config.services.telegraf.environmentFiles == []
+                        then configFile
+                        else "/var/run/telegraf/config.toml";
+    in {
+      description = "Telegraf Agent";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        EnvironmentFile = config.services.telegraf.environmentFiles;
+        ExecStartPre = lib.optional (config.services.telegraf.environmentFiles != [])
+          (pkgs.writeShellScript "pre-start" ''
+            umask 077
+            ${pkgs.envsubst}/bin/envsubst -i "${configFile}" > /var/run/telegraf/config.toml
+          '');
+        ExecStart="${cfg.package}/bin/telegraf -config ${finalConfigFile}";
+        ExecReload="${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        RuntimeDirectory = "telegraf";
+        User = "telegraf";
+        Group = "telegraf";
+        Restart = "on-failure";
+        # for ping probes
+        AmbientCapabilities = [ "CAP_NET_RAW" ];
+      };
+    };
+
+    users.users.telegraf = {
+      uid = config.ids.uids.telegraf;
+      group = "telegraf";
+      description = "telegraf daemon user";
+    };
+
+    users.groups.telegraf = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/thanos.nix b/nixpkgs/nixos/modules/services/monitoring/thanos.nix
new file mode 100644
index 000000000000..db8641aa6146
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/thanos.nix
@@ -0,0 +1,883 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    collect
+    concatLists
+    concatStringsSep
+    flip
+    getAttrFromPath
+    hasPrefix
+    isList
+    length
+    literalExpression
+    literalMD
+    mapAttrsRecursiveCond
+    mapAttrsToList
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkMerge
+    mkOption
+    mkPackageOptionMD
+    optional
+    optionalAttrs
+    optionalString
+    types
+    ;
+
+  cfg = config.services.thanos;
+
+  nullOpt = type: description: mkOption {
+    type = types.nullOr type;
+    default = null;
+    description = mdDoc description;
+  };
+
+  optionToArgs = opt: v  : optional (v != null)  ''--${opt}="${toString v}"'';
+  flagToArgs   = opt: v  : optional v            "--${opt}";
+  listToArgs   = opt: vs : map               (v: ''--${opt}="${v}"'') vs;
+  attrsToArgs  = opt: kvs: mapAttrsToList (k: v: ''--${opt}=${k}=\"${v}\"'') kvs;
+
+  mkParamDef = type: default: description: mkParam type (description + ''
+
+    Defaults to `${toString default}` in Thanos
+    when set to `null`.
+  '');
+
+  mkParam = type: description: {
+    toArgs = optionToArgs;
+    option = nullOpt type description;
+  };
+
+  mkFlagParam = description: {
+    toArgs = flagToArgs;
+    option = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc description;
+    };
+  };
+
+  mkListParam = opt: description: {
+    toArgs = _opt: listToArgs opt;
+    option = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = mdDoc description;
+    };
+  };
+
+  mkAttrsParam = opt: description: {
+    toArgs = _opt: attrsToArgs opt;
+    option = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      description = mdDoc description;
+    };
+  };
+
+  mkStateDirParam = opt: default: description: {
+    toArgs = _opt: stateDir: optionToArgs opt "/var/lib/${stateDir}";
+    option = mkOption {
+      type = types.str;
+      inherit default;
+      description = mdDoc description;
+    };
+  };
+
+  toYAML = name: attrs: pkgs.runCommand name {
+    preferLocalBuild = true;
+    json = builtins.toFile "${name}.json" (builtins.toJSON attrs);
+    nativeBuildInputs = [ pkgs.remarshal ];
+  } "json2yaml -i $json -o $out";
+
+  thanos = cmd: "${cfg.package}/bin/thanos ${cmd}" +
+    (let args = cfg.${cmd}.arguments;
+     in optionalString (length args != 0) (" \\\n  " +
+         concatStringsSep " \\\n  " args));
+
+  argumentsOf = cmd: concatLists (collect isList
+    (flip mapParamsRecursive params.${cmd} (path: param:
+      let opt = concatStringsSep "." path;
+          v = getAttrFromPath path cfg.${cmd};
+      in param.toArgs opt v)));
+
+  mkArgumentsOption = cmd: mkOption {
+    type = types.listOf types.str;
+    default = argumentsOf cmd;
+    defaultText = literalMD ''
+      calculated from `config.services.thanos.${cmd}`
+    '';
+    description = mdDoc ''
+      Arguments to the `thanos ${cmd}` command.
+
+      Defaults to a list of arguments formed by converting the structured
+      options of {option}`services.thanos.${cmd}` to a list of arguments.
+
+      Overriding this option will cause none of the structured options to have
+      any effect. So only set this if you know what you're doing!
+    '';
+  };
+
+  mapParamsRecursive =
+    let noParam = attr: !(attr ? toArgs && attr ? option);
+    in mapAttrsRecursiveCond noParam;
+
+  paramsToOptions = mapParamsRecursive (_path: param: param.option);
+
+  params = {
+
+    log = {
+
+      log.level = mkParamDef (types.enum ["debug" "info" "warn" "error" "fatal"]) "info" ''
+        Log filtering level.
+      '';
+
+      log.format = mkParam types.str ''
+        Log format to use.
+      '';
+    };
+
+    tracing = cfg: {
+      tracing.config-file = {
+        toArgs = _opt: path: optionToArgs "tracing.config-file" path;
+        option = mkOption {
+          type = with types; nullOr str;
+          default = if cfg.tracing.config == null then null
+                    else toString (toYAML "tracing.yaml" cfg.tracing.config);
+          defaultText = literalExpression ''
+            if config.services.thanos.<cmd>.tracing.config == null then null
+            else toString (toYAML "tracing.yaml" config.services.thanos.<cmd>.tracing.config);
+          '';
+          description = mdDoc ''
+            Path to YAML file that contains tracing configuration.
+
+            See format details: <https://thanos.io/tip/thanos/tracing.md/#configuration>
+          '';
+        };
+      };
+
+      tracing.config =
+        {
+          toArgs = _opt: _attrs: [];
+          option = nullOpt types.attrs ''
+            Tracing configuration.
+
+            When not `null` the attribute set gets converted to
+            a YAML file and stored in the Nix store. The option
+            {option}`tracing.config-file` will default to its path.
+
+            If {option}`tracing.config-file` is set this option has no effect.
+
+            See format details: <https://thanos.io/tip/thanos/tracing.md/#configuration>
+          '';
+        };
+    };
+
+    common = cfg: params.log // params.tracing cfg // {
+
+      http-address = mkParamDef types.str "0.0.0.0:10902" ''
+        Listen `host:port` for HTTP endpoints.
+      '';
+
+      grpc-address = mkParamDef types.str "0.0.0.0:10901" ''
+        Listen `ip:port` address for gRPC endpoints (StoreAPI).
+
+        Make sure this address is routable from other components.
+      '';
+
+      grpc-server-tls-cert = mkParam types.str ''
+        TLS Certificate for gRPC server, leave blank to disable TLS
+      '';
+
+      grpc-server-tls-key = mkParam types.str ''
+        TLS Key for the gRPC server, leave blank to disable TLS
+      '';
+
+      grpc-server-tls-client-ca = mkParam types.str ''
+        TLS CA to verify clients against.
+
+        If no client CA is specified, there is no client verification on server side.
+        (tls.NoClientCert)
+      '';
+    };
+
+    objstore = cfg: {
+
+      objstore.config-file = {
+        toArgs = _opt: path: optionToArgs "objstore.config-file" path;
+        option = mkOption {
+          type = with types; nullOr str;
+          default = if cfg.objstore.config == null then null
+                    else toString (toYAML "objstore.yaml" cfg.objstore.config);
+          defaultText = literalExpression ''
+            if config.services.thanos.<cmd>.objstore.config == null then null
+            else toString (toYAML "objstore.yaml" config.services.thanos.<cmd>.objstore.config);
+          '';
+          description = mdDoc ''
+            Path to YAML file that contains object store configuration.
+
+            See format details: <https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage>
+          '';
+        };
+      };
+
+      objstore.config =
+        {
+          toArgs = _opt: _attrs: [];
+          option = nullOpt types.attrs ''
+            Object store configuration.
+
+            When not `null` the attribute set gets converted to
+            a YAML file and stored in the Nix store. The option
+            {option}`objstore.config-file` will default to its path.
+
+            If {option}`objstore.config-file` is set this option has no effect.
+
+            See format details: <https://thanos.io/tip/thanos/storage.md/#configuring-access-to-object-storage>
+          '';
+        };
+    };
+
+    sidecar = params.common cfg.sidecar // params.objstore cfg.sidecar // {
+
+      prometheus.url = mkParamDef types.str "http://localhost:9090" ''
+        URL at which to reach Prometheus's API.
+
+        For better performance use local network.
+      '';
+
+      tsdb.path = {
+        toArgs = optionToArgs;
+        option = mkOption {
+          type = types.str;
+          default = "/var/lib/${config.services.prometheus.stateDir}/data";
+          defaultText = literalExpression ''"/var/lib/''${config.services.prometheus.stateDir}/data"'';
+          description = mdDoc ''
+            Data directory of TSDB.
+          '';
+        };
+      };
+
+      reloader.config-file = mkParam types.str ''
+        Config file watched by the reloader.
+      '';
+
+      reloader.config-envsubst-file = mkParam types.str ''
+        Output file for environment variable substituted config file.
+      '';
+
+      reloader.rule-dirs = mkListParam "reloader.rule-dir" ''
+        Rule directories for the reloader to refresh.
+      '';
+
+    };
+
+    store = params.common cfg.store // params.objstore cfg.store // {
+
+      stateDir = mkStateDirParam "data-dir" "thanos-store" ''
+        Data directory relative to `/var/lib`
+        in which to cache remote blocks.
+      '';
+
+      index-cache-size = mkParamDef types.str "250MB" ''
+        Maximum size of items held in the index cache.
+      '';
+
+      chunk-pool-size = mkParamDef types.str "2GB" ''
+        Maximum size of concurrently allocatable bytes for chunks.
+      '';
+
+      store.limits.request-samples = mkParamDef types.int 0 ''
+        The maximum samples allowed for a single Series request.
+        The Series call fails if this limit is exceeded.
+
+        `0` means no limit.
+
+        NOTE: For efficiency the limit is internally implemented as 'chunks limit'
+        considering each chunk contains a maximum of 120 samples.
+      '';
+
+      store.grpc.series-max-concurrency = mkParamDef types.int 20 ''
+        Maximum number of concurrent Series calls.
+      '';
+
+      sync-block-duration = mkParamDef types.str "3m" ''
+        Repeat interval for syncing the blocks between local and remote view.
+      '';
+
+      block-sync-concurrency = mkParamDef types.int 20 ''
+        Number of goroutines to use when syncing blocks from object storage.
+      '';
+
+      min-time = mkParamDef types.str "0000-01-01T00:00:00Z" ''
+        Start of time range limit to serve.
+
+        Thanos Store serves only metrics, which happened later than this
+        value. Option can be a constant time in RFC3339 format or time duration
+        relative to current time, such as -1d or 2h45m. Valid duration units are
+        ms, s, m, h, d, w, y.
+      '';
+
+      max-time = mkParamDef types.str "9999-12-31T23:59:59Z" ''
+        End of time range limit to serve.
+
+        Thanos Store serves only blocks, which happened earlier than this
+        value. Option can be a constant time in RFC3339 format or time duration
+        relative to current time, such as -1d or 2h45m. Valid duration units are
+        ms, s, m, h, d, w, y.
+      '';
+    };
+
+    query = params.common cfg.query // {
+
+      grpc-client-tls-secure = mkFlagParam ''
+        Use TLS when talking to the gRPC server
+      '';
+
+      grpc-client-tls-cert = mkParam types.str ''
+        TLS Certificates to use to identify this client to the server
+      '';
+
+      grpc-client-tls-key = mkParam types.str ''
+        TLS Key for the client's certificate
+      '';
+
+      grpc-client-tls-ca = mkParam types.str ''
+        TLS CA Certificates to use to verify gRPC servers
+      '';
+
+      grpc-client-server-name = mkParam types.str ''
+        Server name to verify the hostname on the returned gRPC certificates.
+        See <https://tools.ietf.org/html/rfc4366#section-3.1>
+      '';
+
+      web.route-prefix = mkParam types.str ''
+        Prefix for API and UI endpoints.
+
+        This allows thanos UI to be served on a sub-path. This option is
+        analogous to {option}`web.route-prefix` of Promethus.
+      '';
+
+      web.external-prefix = mkParam types.str ''
+        Static prefix for all HTML links and redirect URLs in the UI query web
+        interface.
+
+        Actual endpoints are still served on / or the
+        {option}`web.route-prefix`. This allows thanos UI to be served
+        behind a reverse proxy that strips a URL sub-path.
+      '';
+
+      web.prefix-header = mkParam types.str ''
+        Name of HTTP request header used for dynamic prefixing of UI links and
+        redirects.
+
+        This option is ignored if the option
+        `web.external-prefix` is set.
+
+        Security risk: enable this option only if a reverse proxy in front of
+        thanos is resetting the header.
+
+        The setting `web.prefix-header="X-Forwarded-Prefix"`
+        can be useful, for example, if Thanos UI is served via Traefik reverse
+        proxy with `PathPrefixStrip` option enabled, which
+        sends the stripped prefix value in `X-Forwarded-Prefix`
+        header. This allows thanos UI to be served on a sub-path.
+      '';
+
+      query.timeout = mkParamDef types.str "2m" ''
+        Maximum time to process query by query node.
+      '';
+
+      query.max-concurrent = mkParamDef types.int 20 ''
+        Maximum number of queries processed concurrently by query node.
+      '';
+
+      query.replica-labels = mkAttrsParam "query.replica-label" ''
+        Labels to treat as a replica indicator along which data is
+
+        deduplicated.
+
+        Still you will be able to query without deduplication using
+        'dedup=false' parameter. Data includes time series, recording
+        rules, and alerting rules.
+      '';
+
+      selector-labels = mkAttrsParam "selector-label" ''
+        Query selector labels that will be exposed in info endpoint.
+      '';
+
+      endpoints = mkListParam "endpoint" ''
+        Addresses of statically configured Thanos API servers (repeatable).
+
+        The scheme may be prefixed with 'dns+' or 'dnssrv+' to detect
+        Thanos API servers through respective DNS lookups.
+      '';
+
+      store.sd-files = mkListParam "store.sd-files" ''
+        Path to files that contain addresses of store API servers. The path
+        can be a glob pattern.
+      '';
+
+      store.sd-interval = mkParamDef types.str "5m" ''
+        Refresh interval to re-read file SD files. It is used as a resync fallback.
+      '';
+
+      store.sd-dns-interval = mkParamDef types.str "30s" ''
+        Interval between DNS resolutions.
+      '';
+
+      store.unhealthy-timeout = mkParamDef types.str "5m" ''
+        Timeout before an unhealthy store is cleaned from the store UI page.
+      '';
+
+      query.auto-downsampling = mkFlagParam ''
+        Enable automatic adjustment (step / 5) to what source of data should
+        be used in store gateways if no
+        `max_source_resolution` param is specified.
+      '';
+
+      query.partial-response = mkFlagParam ''
+        Enable partial response for queries if no
+        `partial_response` param is specified.
+      '';
+
+      query.default-evaluation-interval = mkParamDef types.str "1m" ''
+        Set default evaluation interval for sub queries.
+      '';
+
+      store.response-timeout = mkParamDef types.str "0ms" ''
+        If a Store doesn't send any data in this specified duration then a
+        Store will be ignored and partial data will be returned if it's
+        enabled. `0` disables timeout.
+      '';
+    };
+
+    query-frontend = params.common cfg.query-frontend // {
+      query-frontend.downstream-url = mkParamDef types.str "http://localhost:9090" ''
+        URL of downstream Prometheus Query compatible API.
+      '';
+    };
+
+    rule = params.common cfg.rule // params.objstore cfg.rule // {
+
+      labels = mkAttrsParam "label" ''
+        Labels to be applied to all generated metrics.
+
+        Similar to external labels for Prometheus,
+        used to identify ruler and its blocks as unique source.
+      '';
+
+      stateDir = mkStateDirParam "data-dir" "thanos-rule" ''
+        Data directory relative to `/var/lib`.
+      '';
+
+      rule-files = mkListParam "rule-file" ''
+        Rule files that should be used by rule manager. Can be in glob format.
+      '';
+
+      eval-interval = mkParamDef types.str "1m" ''
+        The default evaluation interval to use.
+      '';
+
+      tsdb.block-duration = mkParamDef types.str "2h" ''
+        Block duration for TSDB block.
+      '';
+
+      tsdb.retention = mkParamDef types.str "48h" ''
+        Block retention time on local disk.
+      '';
+
+      alertmanagers.urls = mkListParam "alertmanagers.url" ''
+        Alertmanager replica URLs to push firing alerts.
+
+        Ruler claims success if push to at least one alertmanager from
+        discovered succeeds. The scheme may be prefixed with
+        `dns+` or `dnssrv+` to detect
+        Alertmanager IPs through respective DNS lookups. The port defaults to
+        `9093` or the SRV record's value. The URL path is
+        used as a prefix for the regular Alertmanager API path.
+      '';
+
+      alertmanagers.send-timeout = mkParamDef types.str "10s" ''
+        Timeout for sending alerts to alertmanager.
+      '';
+
+      alert.query-url = mkParam types.str ''
+        The external Thanos Query URL that would be set in all alerts 'Source' field.
+      '';
+
+      alert.label-drop = mkListParam "alert.label-drop" ''
+        Labels by name to drop before sending to alertmanager.
+
+        This allows alert to be deduplicated on replica label.
+
+        Similar Prometheus alert relabelling
+      '';
+
+      web.route-prefix = mkParam types.str ''
+        Prefix for API and UI endpoints.
+
+        This allows thanos UI to be served on a sub-path.
+
+        This option is analogous to `--web.route-prefix` of Promethus.
+      '';
+
+      web.external-prefix = mkParam types.str ''
+        Static prefix for all HTML links and redirect URLs in the UI query web
+        interface.
+
+        Actual endpoints are still served on / or the
+        {option}`web.route-prefix`. This allows thanos UI to be served
+        behind a reverse proxy that strips a URL sub-path.
+      '';
+
+      web.prefix-header = mkParam types.str ''
+        Name of HTTP request header used for dynamic prefixing of UI links and
+        redirects.
+
+        This option is ignored if the option
+        {option}`web.external-prefix` is set.
+
+        Security risk: enable this option only if a reverse proxy in front of
+        thanos is resetting the header.
+
+        The header `X-Forwarded-Prefix` can be useful, for
+        example, if Thanos UI is served via Traefik reverse proxy with
+        `PathPrefixStrip` option enabled, which sends the
+        stripped prefix value in `X-Forwarded-Prefix`
+        header. This allows thanos UI to be served on a sub-path.
+      '';
+
+      query.addresses = mkListParam "query" ''
+        Addresses of statically configured query API servers.
+
+        The scheme may be prefixed with `dns+` or
+        `dnssrv+` to detect query API servers through
+        respective DNS lookups.
+      '';
+
+      query.sd-files = mkListParam "query.sd-files" ''
+        Path to file that contain addresses of query peers.
+        The path can be a glob pattern.
+      '';
+
+      query.sd-interval = mkParamDef types.str "5m" ''
+        Refresh interval to re-read file SD files. (used as a fallback)
+      '';
+
+      query.sd-dns-interval = mkParamDef types.str "30s" ''
+        Interval between DNS resolutions.
+      '';
+    };
+
+    compact = params.log // params.tracing cfg.compact // params.objstore cfg.compact // {
+
+      http-address = mkParamDef types.str "0.0.0.0:10902" ''
+        Listen `host:port` for HTTP endpoints.
+      '';
+
+      stateDir = mkStateDirParam "data-dir" "thanos-compact" ''
+        Data directory relative to `/var/lib`
+        in which to cache blocks and process compactions.
+      '';
+
+      consistency-delay = mkParamDef types.str "30m" ''
+        Minimum age of fresh (non-compacted) blocks before they are being
+        processed. Malformed blocks older than the maximum of consistency-delay
+        and 30m0s will be removed.
+      '';
+
+      retention.resolution-raw = mkParamDef types.str "0d" ''
+        How long to retain raw samples in bucket.
+
+        `0d` - disables this retention
+      '';
+
+      retention.resolution-5m = mkParamDef types.str "0d" ''
+        How long to retain samples of resolution 1 (5 minutes) in bucket.
+
+        `0d` - disables this retention
+      '';
+
+      retention.resolution-1h = mkParamDef types.str "0d" ''
+        How long to retain samples of resolution 2 (1 hour) in bucket.
+
+        `0d` - disables this retention
+      '';
+
+      startAt = {
+        toArgs = _opt: startAt: flagToArgs "wait" (startAt == null);
+        option = nullOpt types.str ''
+          When this option is set to a `systemd.time`
+          specification the Thanos compactor will run at the specified period.
+
+          When this option is `null` the Thanos compactor service
+          will run continuously. So it will not exit after all compactions have
+          been processed but wait for new work.
+        '';
+      };
+
+      downsampling.disable = mkFlagParam ''
+        Disables downsampling.
+
+        This is not recommended as querying long time ranges without
+        non-downsampled data is not efficient and useful e.g it is not possible
+        to render all samples for a human eye anyway
+      '';
+
+      compact.concurrency = mkParamDef types.int 1 ''
+        Number of goroutines to use when compacting groups.
+      '';
+    };
+
+    downsample = params.log // params.tracing cfg.downsample // params.objstore cfg.downsample // {
+
+      stateDir = mkStateDirParam "data-dir" "thanos-downsample" ''
+        Data directory relative to `/var/lib`
+        in which to cache blocks and process downsamplings.
+      '';
+
+    };
+
+    receive = params.common cfg.receive // params.objstore cfg.receive // {
+
+      remote-write.address = mkParamDef types.str "0.0.0.0:19291" ''
+        Address to listen on for remote write requests.
+      '';
+
+      stateDir = mkStateDirParam "tsdb.path" "thanos-receive" ''
+        Data directory relative to `/var/lib` of TSDB.
+      '';
+
+      labels = mkAttrsParam "label" ''
+        External labels to announce.
+
+        This flag will be removed in the future when handling multiple tsdb
+        instances is added.
+      '';
+
+      tsdb.retention = mkParamDef types.str "15d" ''
+        How long to retain raw samples on local storage.
+
+        `0d` - disables this retention
+      '';
+    };
+
+  };
+
+  assertRelativeStateDir = cmd: {
+    assertions = [
+      {
+        assertion = !hasPrefix "/" cfg.${cmd}.stateDir;
+        message =
+          "The option services.thanos.${cmd}.stateDir should not be an absolute directory." +
+          " It should be a directory relative to /var/lib.";
+      }
+    ];
+  };
+
+in {
+
+  options.services.thanos = {
+
+    package = mkPackageOptionMD pkgs "thanos" {};
+
+    sidecar = paramsToOptions params.sidecar // {
+      enable = mkEnableOption
+        (mdDoc "the Thanos sidecar for Prometheus server");
+      arguments = mkArgumentsOption "sidecar";
+    };
+
+    store = paramsToOptions params.store // {
+      enable = mkEnableOption
+        (mdDoc "the Thanos store node giving access to blocks in a bucket provider.");
+      arguments = mkArgumentsOption "store";
+    };
+
+    query = paramsToOptions params.query // {
+      enable = mkEnableOption
+        (mdDoc ("the Thanos query node exposing PromQL enabled Query API " +
+         "with data retrieved from multiple store nodes"));
+      arguments = mkArgumentsOption "query";
+    };
+
+    query-frontend = paramsToOptions params.query-frontend // {
+      enable = mkEnableOption
+        (mdDoc ("the Thanos query frontend implements a service deployed in front of queriers to
+          improve query parallelization and caching."));
+      arguments = mkArgumentsOption "query-frontend";
+    };
+
+    rule = paramsToOptions params.rule // {
+      enable = mkEnableOption
+        (mdDoc ("the Thanos ruler service which evaluates Prometheus rules against" +
+        " given Query nodes, exposing Store API and storing old blocks in bucket"));
+      arguments = mkArgumentsOption "rule";
+    };
+
+    compact = paramsToOptions params.compact // {
+      enable = mkEnableOption
+        (mdDoc "the Thanos compactor which continuously compacts blocks in an object store bucket");
+      arguments = mkArgumentsOption "compact";
+    };
+
+    downsample = paramsToOptions params.downsample // {
+      enable = mkEnableOption
+        (mdDoc "the Thanos downsampler which continuously downsamples blocks in an object store bucket");
+      arguments = mkArgumentsOption "downsample";
+    };
+
+    receive = paramsToOptions params.receive // {
+      enable = mkEnableOption
+        (mdDoc ("the Thanos receiver which accept Prometheus remote write API requests and write to local tsdb"));
+      arguments = mkArgumentsOption "receive";
+    };
+  };
+
+  config = mkMerge [
+
+    (mkIf cfg.sidecar.enable {
+      assertions = [
+        {
+          assertion = config.services.prometheus.enable;
+          message =
+            "Please enable services.prometheus when enabling services.thanos.sidecar.";
+        }
+        {
+          assertion = !(config.services.prometheus.globalConfig.external_labels == null ||
+                        config.services.prometheus.globalConfig.external_labels == {});
+          message =
+            "services.thanos.sidecar requires uniquely identifying external labels " +
+            "to be configured in the Prometheus server. " +
+            "Please set services.prometheus.globalConfig.external_labels.";
+        }
+      ];
+      systemd.services.thanos-sidecar = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" "prometheus.service" ];
+        serviceConfig = {
+          User = "prometheus";
+          Restart = "always";
+          ExecStart = thanos "sidecar";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+    })
+
+    (mkIf cfg.store.enable (mkMerge [
+      (assertRelativeStateDir "store")
+      {
+        systemd.services.thanos-store = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.store.stateDir;
+            Restart = "always";
+            ExecStart = thanos "store";
+            ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.query.enable {
+      systemd.services.thanos-query = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          Restart = "always";
+          ExecStart = thanos "query";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+    })
+
+    (mkIf cfg.query-frontend.enable {
+      systemd.services.thanos-query-frontend = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          Restart = "always";
+          ExecStart = thanos "query-frontend";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        };
+      };
+    })
+
+    (mkIf cfg.rule.enable (mkMerge [
+      (assertRelativeStateDir "rule")
+      {
+        systemd.services.thanos-rule = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.rule.stateDir;
+            Restart = "always";
+            ExecStart = thanos "rule";
+            ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.compact.enable (mkMerge [
+      (assertRelativeStateDir "compact")
+      {
+        systemd.services.thanos-compact =
+          let wait = cfg.compact.startAt == null; in {
+            wantedBy = [ "multi-user.target" ];
+            after    = [ "network.target" ];
+            serviceConfig = {
+              Type    = if wait then "simple" else "oneshot";
+              Restart = if wait then "always" else "no";
+              DynamicUser = true;
+              StateDirectory = cfg.compact.stateDir;
+              ExecStart = thanos "compact";
+              ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+            };
+          } // optionalAttrs (!wait) { inherit (cfg.compact) startAt; };
+      }
+    ]))
+
+    (mkIf cfg.downsample.enable (mkMerge [
+      (assertRelativeStateDir "downsample")
+      {
+        systemd.services.thanos-downsample = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.downsample.stateDir;
+            Restart = "always";
+            ExecStart = thanos "downsample";
+            ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          };
+        };
+      }
+    ]))
+
+    (mkIf cfg.receive.enable (mkMerge [
+      (assertRelativeStateDir "receive")
+      {
+        systemd.services.thanos-receive = {
+          wantedBy = [ "multi-user.target" ];
+          after    = [ "network.target" ];
+          serviceConfig = {
+            DynamicUser = true;
+            StateDirectory = cfg.receive.stateDir;
+            Restart = "always";
+            ExecStart = thanos "receive";
+            ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          };
+        };
+      }
+    ]))
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/tremor-rs.nix b/nixpkgs/nixos/modules/services/monitoring/tremor-rs.nix
new file mode 100644
index 000000000000..213e8a474868
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/tremor-rs.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.tremor-rs;
+
+  loggerSettingsFormat = pkgs.formats.yaml { };
+  loggerConfigFile = loggerSettingsFormat.generate "logger.yaml" cfg.loggerSettings;
+in {
+
+  options = {
+    services.tremor-rs = {
+      enable = lib.mkEnableOption (lib.mdDoc "Tremor event- or stream-processing system");
+
+      troyFileList = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc "List of troy files to load.";
+      };
+
+      tremorLibDir = mkOption {
+        type = types.path;
+        default = "";
+        description = lib.mdDoc "Directory where to find /lib containing tremor script files";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "The host tremor should be listening on";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 9898;
+        description = lib.mdDoc "the port tremor should be listening on";
+      };
+
+      loggerSettings = mkOption {
+        description = lib.mdDoc "Tremor logger configuration";
+        default = {};
+        type = loggerSettingsFormat.type;
+
+        example = {
+          refresh_rate = "30 seconds";
+          appenders.stdout.kind = "console";
+          root = {
+            level = "warn";
+            appenders = [ "stdout" ];
+          };
+          loggers = {
+            tremor_runtime = {
+              level = "debug";
+              appenders = [ "stdout" ];
+              additive = false;
+            };
+            tremor = {
+              level = "debug";
+              appenders = [ "stdout" ];
+              additive = false;
+            };
+          };
+        };
+
+        defaultText = literalExpression ''
+          {
+            refresh_rate = "30 seconds";
+            appenders.stdout.kind = "console";
+            root = {
+              level = "warn";
+              appenders = [ "stdout" ];
+            };
+            loggers = {
+              tremor_runtime = {
+                level = "debug";
+                appenders = [ "stdout" ];
+                additive = false;
+              };
+              tremor = {
+                level = "debug";
+                appenders = [ "stdout" ];
+                additive = false;
+              };
+            };
+          }
+        '';
+
+      };
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+
+    environment.systemPackages = [ pkgs.tremor-rs ] ;
+
+    systemd.services.tremor-rs = {
+      description = "Tremor event- or stream-processing system";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      environment.TREMOR_PATH = "${pkgs.tremor-rs}/lib:${cfg.tremorLibDir}";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.tremor-rs}/bin/tremor --logger-config ${loggerConfigFile} server run ${concatStringsSep " " cfg.troyFileList} --api-host ${cfg.host}:${toString cfg.port}";
+        DynamicUser = true;
+        Restart = "always";
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/tuptime.nix b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
new file mode 100644
index 000000000000..97cc37526254
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/tuptime.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.tuptime;
+
+in {
+
+  options.services.tuptime = {
+
+    enable = mkEnableOption (lib.mdDoc "the total uptime service");
+
+    timer = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to regularly log uptime to detect bad shutdowns.";
+      };
+
+      period = mkOption {
+        type = types.str;
+        default = "*:0/5";
+        description = lib.mdDoc "systemd calendar event";
+      };
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.tuptime ];
+
+    users = {
+      groups._tuptime.members = [ "_tuptime" ];
+      users._tuptime = {
+        isSystemUser = true;
+        group = "_tuptime";
+        description = "tuptime database owner";
+      };
+    };
+
+    systemd = {
+      services = {
+
+        tuptime = {
+          description = "The total uptime service";
+          documentation = [ "man:tuptime(1)" ];
+          after = [ "time-sync.target" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            StateDirectory = "tuptime";
+            Type = "oneshot";
+            User = "_tuptime";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -q";
+            ExecStop = "${pkgs.tuptime}/bin/tuptime -qg";
+          };
+        };
+
+        tuptime-sync = mkIf cfg.timer.enable {
+          description = "Tuptime scheduled sync service";
+          serviceConfig = {
+            Type = "oneshot";
+            User = "_tuptime";
+            ExecStart = "${pkgs.tuptime}/bin/tuptime -q";
+          };
+        };
+      };
+
+      timers.tuptime-sync = mkIf cfg.timer.enable {
+        description = "Tuptime scheduled sync timer";
+        # this timer should be started if the service is started
+        # even if the timer was previously stopped
+        wantedBy = [ "tuptime.service" "timers.target" ];
+        # this timer should be stopped if the service is stopped
+        partOf = [ "tuptime.service" ];
+        timerConfig = {
+          OnBootSec = "1min";
+          OnCalendar = cfg.timer.period;
+          Unit = "tuptime-sync.service";
+        };
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/unpoller.nix b/nixpkgs/nixos/modules/services/monitoring/unpoller.nix
new file mode 100644
index 000000000000..557e2bff4c26
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/unpoller.nix
@@ -0,0 +1,322 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.unpoller;
+
+  configFile = pkgs.writeText "unpoller.json" (generators.toJSON {} {
+    inherit (cfg) poller influxdb loki prometheus unifi;
+  });
+
+in {
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "unifi-poller" ] [ "services" "unpoller" ])
+  ];
+
+  options.services.unpoller = {
+    enable = mkEnableOption (lib.mdDoc "unpoller");
+
+    poller = {
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Turns on line numbers, microsecond logging, and a per-device log.
+          This may be noisy if you have a lot of devices. It adds one line per device.
+        '';
+      };
+      quiet = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Turns off per-interval logs. Only startup and error logs will be emitted.
+        '';
+      };
+      plugins = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Load additional plugins.
+        '';
+      };
+    };
+
+    prometheus = {
+      disable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to disable the prometheus output plugin.
+        '';
+      };
+      http_listen = mkOption {
+        type = types.str;
+        default = "[::]:9130";
+        description = lib.mdDoc ''
+          Bind the prometheus exporter to this IP or hostname.
+        '';
+      };
+      report_errors = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to report errors.
+        '';
+      };
+    };
+
+    influxdb = {
+      disable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to disable the influxdb output plugin.
+        '';
+      };
+      url = mkOption {
+        type = types.str;
+        default = "http://127.0.0.1:8086";
+        description = lib.mdDoc ''
+          URL of the influxdb host.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        default = "unifipoller";
+        description = lib.mdDoc ''
+          Username for the influxdb.
+        '';
+      };
+      pass = mkOption {
+        type = types.path;
+        default = pkgs.writeText "unpoller-influxdb-default.password" "unifipoller";
+        defaultText = literalExpression "unpoller-influxdb-default.password";
+        description = lib.mdDoc ''
+          Path of a file containing the password for influxdb.
+          This file needs to be readable by the unifi-poller user.
+        '';
+        apply = v: "file://${v}";
+      };
+      db = mkOption {
+        type = types.str;
+        default = "unifi";
+        description = lib.mdDoc ''
+          Database name. Database should exist.
+        '';
+      };
+      verify_ssl = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Verify the influxdb's certificate.
+        '';
+      };
+      interval = mkOption {
+        type = types.str;
+        default = "30s";
+        description = lib.mdDoc ''
+          Setting this lower than the Unifi controller's refresh
+          interval may lead to zeroes in your database.
+        '';
+      };
+    };
+
+    loki = {
+      url = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          URL of the Loki host.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Username for Loki.
+        '';
+      };
+      pass = mkOption {
+        type = types.path;
+        default = pkgs.writeText "unpoller-loki-default.password" "";
+        defaultText = "unpoller-influxdb-default.password";
+        description = lib.mdDoc ''
+          Path of a file containing the password for Loki.
+          This file needs to be readable by the unifi-poller user.
+        '';
+        apply = v: "file://${v}";
+      };
+      verify_ssl = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Verify Loki's certificate.
+        '';
+      };
+      tenant_id = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Tenant ID to use in Loki.
+        '';
+      };
+      interval = mkOption {
+        type = types.str;
+        default = "2m";
+        description = lib.mdDoc ''
+          How often the events are polled and pushed to Loki.
+        '';
+      };
+      timeout = mkOption {
+        type = types.str;
+        default = "10s";
+        description = lib.mdDoc ''
+          Should be increased in case of timeout errors.
+        '';
+      };
+    };
+
+    unifi = let
+      controllerOptions = {
+        user = mkOption {
+          type = types.str;
+          default = "unifi";
+          description = lib.mdDoc ''
+            Unifi service user name.
+          '';
+        };
+        pass = mkOption {
+          type = types.path;
+          default = pkgs.writeText "unpoller-unifi-default.password" "unifi";
+          defaultText = literalExpression "unpoller-unifi-default.password";
+          description = lib.mdDoc ''
+            Path of a file containing the password for the unifi service user.
+            This file needs to be readable by the unifi-poller user.
+          '';
+          apply = v: "file://${v}";
+        };
+        url = mkOption {
+          type = types.str;
+          default = "https://unifi:8443";
+          description = lib.mdDoc ''
+            URL of the Unifi controller.
+          '';
+        };
+        sites = mkOption {
+          type = with types; either (enum [ "default" "all" ]) (listOf str);
+          default = "all";
+          description = lib.mdDoc ''
+            List of site names for which statistics should be exported.
+            Or the string "default" for the default site or the string "all" for all sites.
+          '';
+          apply = toList;
+        };
+        save_ids = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Collect and save data from the intrusion detection system to influxdb and Loki.
+          '';
+        };
+        save_events = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Collect and save data from UniFi events to influxdb and Loki.
+          '';
+        };
+        save_alarms = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Collect and save data from UniFi alarms to influxdb and Loki.
+          '';
+        };
+        save_anomalies = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Collect and save data from UniFi anomalies to influxdb and Loki.
+          '';
+        };
+        save_dpi = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Collect and save data from deep packet inspection.
+            Adds around 150 data points and impacts performance.
+          '';
+        };
+        save_sites = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Collect and save site data.
+          '';
+        };
+        hash_pii = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Hash, with md5, client names and MAC addresses. This attempts
+            to protect personally identifiable information.
+          '';
+        };
+        verify_ssl = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Verify the Unifi controller's certificate.
+          '';
+        };
+      };
+
+    in {
+      dynamic = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Let prometheus select which controller to poll when scraping.
+          Use with default credentials. See unifi-poller wiki for more.
+        '';
+      };
+
+      defaults = controllerOptions;
+
+      controllers = mkOption {
+        type = with types; listOf (submodule { options = controllerOptions; });
+        default = [];
+        description = lib.mdDoc ''
+          List of Unifi controllers to poll. Use defaults if empty.
+        '';
+        apply = map (flip removeAttrs [ "_module" ]);
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.unifi-poller = { };
+    users.users.unifi-poller = {
+      description = "unifi-poller Service User";
+      group = "unifi-poller";
+      isSystemUser = true;
+    };
+
+    systemd.services.unifi-poller = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.unpoller}/bin/unpoller --config ${configFile}";
+        Restart = "always";
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        User = "unifi-poller";
+        WorkingDirectory = "/tmp";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/ups.nix b/nixpkgs/nixos/modules/services/monitoring/ups.nix
new file mode 100644
index 000000000000..efef2d777acd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/ups.nix
@@ -0,0 +1,261 @@
+{ config, lib, pkgs, ... }:
+
+# TODO: This is not secure, have a look at the file docs/security.txt inside
+# the project sources.
+with lib;
+
+let
+  cfg = config.power.ups;
+in
+
+let
+  upsOptions = {name, config, ...}:
+  {
+    options = {
+      # This can be inferred from the UPS model by looking at
+      # /nix/store/nut/share/driver.list
+      driver = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Specify the program to run to talk to this UPS.  apcsmart,
+          bestups, and sec are some examples.
+        '';
+      };
+
+      port = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The serial port to which your UPS is connected.  /dev/ttyS0 is
+          usually the first port on Linux boxes, for example.
+        '';
+      };
+
+      shutdownOrder = mkOption {
+        default = 0;
+        type = types.int;
+        description = lib.mdDoc ''
+          When you have multiple UPSes on your system, you usually need to
+          turn them off in a certain order.  upsdrvctl shuts down all the
+          0s, then the 1s, 2s, and so on.  To exclude a UPS from the
+          shutdown sequence, set this to -1.
+        '';
+      };
+
+      maxStartDelay = mkOption {
+        default = null;
+        type = types.uniq (types.nullOr types.int);
+        description = lib.mdDoc ''
+          This can be set as a global variable above your first UPS
+          definition and it can also be set in a UPS section.  This value
+          controls how long upsdrvctl will wait for the driver to finish
+          starting.  This keeps your system from getting stuck due to a
+          broken driver or UPS.
+        '';
+      };
+
+      description = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Description of the UPS.
+        '';
+      };
+
+      directives = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of configuration directives for this UPS.
+        '';
+      };
+
+      summary = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Lines which would be added inside ups.conf for handling this UPS.
+        '';
+      };
+
+    };
+
+    config = {
+      directives = mkOrder 10 ([
+        "driver = ${config.driver}"
+        "port = ${config.port}"
+        ''desc = "${config.description}"''
+        "sdorder = ${toString config.shutdownOrder}"
+      ] ++ (optional (config.maxStartDelay != null)
+            "maxstartdelay = ${toString config.maxStartDelay}")
+      );
+
+      summary =
+        concatStringsSep "\n      "
+          (["[${name}]"] ++ config.directives);
+    };
+  };
+
+in
+
+
+{
+  options = {
+    # powerManagement.powerDownCommands
+
+    power.ups = {
+      enable = mkOption {
+        default = false;
+        type = with types; bool;
+        description = lib.mdDoc ''
+          Enables support for Power Devices, such as Uninterruptible Power
+          Supplies, Power Distribution Units and Solar Controllers.
+        '';
+      };
+
+      # This option is not used yet.
+      mode = mkOption {
+        default = "standalone";
+        type = types.str;
+        description = lib.mdDoc ''
+          The MODE determines which part of the NUT is to be started, and
+          which configuration files must be modified.
+
+          The values of MODE can be:
+
+          - none: NUT is not configured, or use the Integrated Power
+            Management, or use some external system to startup NUT
+            components. So nothing is to be started.
+
+          - standalone: This mode address a local only configuration, with 1
+            UPS protecting the local system. This implies to start the 3 NUT
+            layers (driver, upsd and upsmon) and the matching configuration
+            files. This mode can also address UPS redundancy.
+
+          - netserver: same as for the standalone configuration, but also
+            need some more ACLs and possibly a specific LISTEN directive in
+            upsd.conf.  Since this MODE is opened to the network, a special
+            care should be applied to security concerns.
+
+          - netclient: this mode only requires upsmon.
+        '';
+      };
+
+      schedulerRules = mkOption {
+        example = "/etc/nixos/upssched.conf";
+        type = types.str;
+        description = lib.mdDoc ''
+          File which contains the rules to handle UPS events.
+        '';
+      };
+
+
+      maxStartDelay = mkOption {
+        default = 45;
+        type = types.int;
+        description = lib.mdDoc ''
+          This can be set as a global variable above your first UPS
+          definition and it can also be set in a UPS section.  This value
+          controls how long upsdrvctl will wait for the driver to finish
+          starting.  This keeps your system from getting stuck due to a
+          broken driver or UPS.
+        '';
+      };
+
+      ups = mkOption {
+        default = {};
+        # see nut/etc/ups.conf.sample
+        description = lib.mdDoc ''
+          This is where you configure all the UPSes that this system will be
+          monitoring directly.  These are usually attached to serial ports,
+          but USB devices are also supported.
+        '';
+        type = with types; attrsOf (submodule upsOptions);
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.nut ];
+
+    systemd.services.upsmon = {
+      description = "Uninterruptible Power Supplies (Monitor)";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "forking";
+      script = "${pkgs.nut}/sbin/upsmon";
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    systemd.services.upsd = {
+      description = "Uninterruptible Power Supplies (Daemon)";
+      after = [ "network.target" "upsmon.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "forking";
+      # TODO: replace 'root' by another username.
+      script = "${pkgs.nut}/sbin/upsd -u root";
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    systemd.services.upsdrv = {
+      description = "Uninterruptible Power Supplies (Register all UPS)";
+      after = [ "upsd.service" ];
+      wantedBy = [ "multi-user.target" ];
+      # TODO: replace 'root' by another username.
+      script = "${pkgs.nut}/bin/upsdrvctl -u root start";
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      environment.NUT_CONFPATH = "/etc/nut/";
+      environment.NUT_STATEPATH = "/var/lib/nut/";
+    };
+
+    environment.etc = {
+      "nut/nut.conf".source = pkgs.writeText "nut.conf"
+        ''
+          MODE = ${cfg.mode}
+        '';
+      "nut/ups.conf".source = pkgs.writeText "ups.conf"
+        ''
+          maxstartdelay = ${toString cfg.maxStartDelay}
+
+          ${flip concatStringsSep (forEach (attrValues cfg.ups) (ups: ups.summary)) "
+
+          "}
+        '';
+      "nut/upssched.conf".source = cfg.schedulerRules;
+      # These file are containing private information and thus should not
+      # be stored inside the Nix store.
+      /*
+      "nut/upsd.conf".source = "";
+      "nut/upsd.users".source = "";
+      "nut/upsmon.conf".source = "";
+      */
+    };
+
+    power.ups.schedulerRules = mkDefault "${pkgs.nut}/etc/upssched.conf.sample";
+
+    systemd.tmpfiles.rules = [
+      "d /var/state/ups -"
+    ];
+
+
+/*
+    users.users.nut =
+      { uid = 84;
+        home = "/var/lib/nut";
+        createHome = true;
+        group = "nut";
+        description = "UPnP A/V Media Server user";
+      };
+
+    users.groups."nut" =
+      { gid = 84; };
+*/
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/uptime-kuma.nix b/nixpkgs/nixos/modules/services/monitoring/uptime-kuma.nix
new file mode 100644
index 000000000000..7027046b2425
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/uptime-kuma.nix
@@ -0,0 +1,81 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.uptime-kuma;
+in
+{
+
+  meta.maintainers = [ lib.maintainers.julienmalka ];
+
+  options = {
+    services.uptime-kuma = {
+      enable = mkEnableOption (mdDoc "Uptime Kuma, this assumes a reverse proxy to be set");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.uptime-kuma;
+        defaultText = literalExpression "pkgs.uptime-kuma";
+        description = lib.mdDoc "Uptime Kuma package to use.";
+      };
+
+      appriseSupport = mkEnableOption (mdDoc "apprise support for notifications");
+
+      settings = lib.mkOption {
+        type = lib.types.submodule { freeformType = with lib.types; attrsOf str; };
+        default = { };
+        example = {
+          PORT = "4000";
+          NODE_EXTRA_CA_CERTS = "/etc/ssl/certs/ca-certificates.crt";
+        };
+        description = lib.mdDoc ''
+          Additional configuration for Uptime Kuma, see
+          <https://github.com/louislam/uptime-kuma/wiki/Environment-Variables>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.uptime-kuma.settings = {
+      DATA_DIR = "/var/lib/uptime-kuma/";
+      NODE_ENV = mkDefault "production";
+      HOST = mkDefault "127.0.0.1";
+      PORT = mkDefault "3001";
+    };
+
+    systemd.services.uptime-kuma = {
+      description = "Uptime Kuma";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = cfg.settings;
+      path = with pkgs; [ unixtools.ping ] ++ lib.optional cfg.appriseSupport apprise;
+      serviceConfig = {
+        Type = "simple";
+        StateDirectory = "uptime-kuma";
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/uptime-kuma-server";
+        Restart = "on-failure";
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        NoNewPrivileges = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/monitoring/uptime.nix b/nixpkgs/nixos/modules/services/monitoring/uptime.nix
new file mode 100644
index 000000000000..7bf9e593c95e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/uptime.nix
@@ -0,0 +1,100 @@
+{ config, options, pkgs, lib, ... }:
+let
+  inherit (lib) literalExpression mkOption mkEnableOption mkIf mkMerge types optional;
+
+  cfg = config.services.uptime;
+  opt = options.services.uptime;
+
+  configDir = pkgs.runCommand "config" { preferLocalBuild = true; }
+  (if cfg.configFile != null then ''
+    mkdir $out
+    ext=`echo ${cfg.configFile} | grep -o \\..*`
+    ln -sv ${cfg.configFile} $out/default$ext
+    ln -sv /var/lib/uptime/runtime.json $out/runtime.json
+  '' else ''
+    mkdir $out
+    cat ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/config/default.yaml > $out/default.yaml
+    cat >> $out/default.yaml <<EOF
+
+    autoStartMonitor: false
+
+    mongodb:
+      connectionString: 'mongodb://localhost/uptime'
+    EOF
+    ln -sv /var/lib/uptime/runtime.json $out/runtime.json
+  '');
+in {
+  options.services.uptime = {
+    configFile = mkOption {
+      description = lib.mdDoc ''
+        The uptime configuration file
+
+        If mongodb: server != localhost, please set usesRemoteMongo = true
+
+        If you only want to run the monitor, please set enableWebService = false
+        and enableSeparateMonitoringService = true
+
+        If autoStartMonitor: false (recommended) and you want to run both
+        services, please set enableSeparateMonitoringService = true
+      '';
+
+      type = types.nullOr types.path;
+
+      default = null;
+    };
+
+    usesRemoteMongo = mkOption {
+      description = lib.mdDoc "Whether the configuration file specifies a remote mongo instance";
+
+      default = false;
+
+      type = types.bool;
+    };
+
+    enableWebService = mkEnableOption (lib.mdDoc "the uptime monitoring program web service");
+
+    enableSeparateMonitoringService = mkEnableOption (lib.mdDoc "the uptime monitoring service") // {
+      default = cfg.enableWebService;
+      defaultText = literalExpression "config.${opt.enableWebService}";
+    };
+
+    nodeEnv = mkOption {
+      description = lib.mdDoc "The node environment to run in (development, production, etc.)";
+
+      type = types.str;
+
+      default = "production";
+    };
+  };
+
+  config = mkMerge [ (mkIf cfg.enableWebService {
+    systemd.services.uptime = {
+      description = "uptime web service";
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        NODE_CONFIG_DIR = configDir;
+        NODE_ENV = cfg.nodeEnv;
+        NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules";
+      };
+      preStart = "mkdir -p /var/lib/uptime";
+      serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/app.js";
+    };
+
+    services.mongodb.enable = mkIf (!cfg.usesRemoteMongo) true;
+  }) (mkIf cfg.enableSeparateMonitoringService {
+    systemd.services.uptime-monitor = {
+      description = "uptime monitoring service";
+      wantedBy = [ "multi-user.target" ];
+      requires = optional cfg.enableWebService "uptime.service";
+      after = optional cfg.enableWebService "uptime.service";
+      environment = {
+        NODE_CONFIG_DIR = configDir;
+        NODE_ENV = cfg.nodeEnv;
+        NODE_PATH = "${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/node_modules";
+      };
+      # Ugh, need to wait for web service to be up
+      preStart = if cfg.enableWebService then "sleep 1s" else "mkdir -p /var/lib/uptime";
+      serviceConfig.ExecStart = "${pkgs.nodejs}/bin/node ${pkgs.nodePackages.node-uptime}/lib/node_modules/node-uptime/monitor.js";
+    };
+  }) ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/vmagent.nix b/nixpkgs/nixos/modules/services/monitoring/vmagent.nix
new file mode 100644
index 000000000000..0e2ffb31c57c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/vmagent.nix
@@ -0,0 +1,110 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.vmagent;
+  settingsFormat = pkgs.formats.json { };
+in {
+  options.services.vmagent = {
+    enable = mkEnableOption (lib.mdDoc "vmagent");
+
+    user = mkOption {
+      default = "vmagent";
+      type = types.str;
+      description = lib.mdDoc ''
+        User account under which vmagent runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "vmagent";
+      description = lib.mdDoc ''
+        Group under which vmagent runs.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.vmagent;
+      defaultText = lib.literalMD "pkgs.vmagent";
+      type = types.package;
+      description = lib.mdDoc ''
+        vmagent package to use.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/vmagent";
+      description = lib.mdDoc ''
+        The directory where vmagent stores its data files.
+      '';
+    };
+
+    remoteWriteUrl = mkOption {
+      default = "http://localhost:8428/api/v1/write";
+      type = types.str;
+      description = lib.mdDoc ''
+        The storage endpoint such as VictoriaMetrics
+      '';
+    };
+
+    prometheusConfig = mkOption {
+      type = lib.types.submodule { freeformType = settingsFormat.type; };
+      description = lib.mdDoc ''
+        Config for prometheus style metrics
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open the firewall for the default ports.
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra args to pass to `vmagent`. See the docs:
+        <https://docs.victoriametrics.com/vmagent.html#advanced-usage>
+        or {command}`vmagent -help` for more information.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = mkIf (cfg.group == "vmagent") { vmagent = { }; };
+
+    users.users = mkIf (cfg.user == "vmagent") {
+      vmagent = {
+        group = cfg.group;
+        description = "vmagent daemon user";
+        home = cfg.dataDir;
+        isSystemUser = true;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ 8429 ];
+
+    systemd.services.vmagent = let
+      prometheusConfig = settingsFormat.generate "prometheusConfig.yaml" cfg.prometheusConfig;
+    in {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "vmagent system service";
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Type = "simple";
+        Restart = "on-failure";
+        WorkingDirectory = cfg.dataDir;
+        ExecStart = "${cfg.package}/bin/vmagent -remoteWrite.url=${cfg.remoteWriteUrl} -promscrape.config=${prometheusConfig} ${escapeShellArgs cfg.extraArgs}";
+      };
+    };
+
+    systemd.tmpfiles.rules =
+      [ "d '${cfg.dataDir}' 0755 ${cfg.user} ${cfg.group} -" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/vmalert.nix b/nixpkgs/nixos/modules/services/monitoring/vmalert.nix
new file mode 100644
index 000000000000..27fb34e199b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/vmalert.nix
@@ -0,0 +1,136 @@
+{ config, pkgs, lib, ... }: with lib;
+let
+  cfg = config.services.vmalert;
+
+  format = pkgs.formats.yaml {};
+
+  confOpts = concatStringsSep " \\\n" (mapAttrsToList mkLine (filterAttrs (_: v: v != false) cfg.settings));
+  confType = with types;
+    let
+      valueType = oneOf [ bool int path str ];
+    in
+    attrsOf (either valueType (listOf valueType));
+
+  mkLine = key: value:
+    if value == true then "-${key}"
+    else if isList value then concatMapStringsSep " " (v: "-${key}=${escapeShellArg (toString v)}") value
+    else "-${key}=${escapeShellArg (toString value)}"
+  ;
+in
+{
+  # interface
+  options.services.vmalert = {
+    enable = mkEnableOption (mdDoc "vmalert");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.victoriametrics;
+      defaultText = "pkgs.victoriametrics";
+      description = mdDoc ''
+        The VictoriaMetrics derivation to use.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = confType;
+        options = {
+
+          "datasource.url" = mkOption {
+            type = types.nonEmptyStr;
+            example = "http://localhost:8428";
+            description = mdDoc ''
+              Datasource compatible with Prometheus HTTP API.
+            '';
+          };
+
+          "notifier.url" = mkOption {
+            type = with types; listOf nonEmptyStr;
+            default = [];
+            example = [ "http://127.0.0.1:9093" ];
+            description = mdDoc ''
+              Prometheus Alertmanager URL. List all Alertmanager URLs if it runs in the cluster mode to ensure high availability.
+            '';
+          };
+
+          "rule" = mkOption {
+            type = with types; listOf path;
+            description = mdDoc ''
+              Path to the files with alerting and/or recording rules.
+
+              ::: {.note}
+              Consider using the {option}`services.vmalert.rules` option as a convenient alternative for declaring rules
+              directly in the `nix` language.
+              :::
+            '';
+          };
+
+        };
+      };
+      default = { };
+      example = {
+        "datasource.url" = "http://localhost:8428";
+        "datasource.disableKeepAlive" = true;
+        "datasource.showURL" = false;
+        "rule" = [
+          "http://<some-server-addr>/path/to/rules"
+          "dir/*.yaml"
+        ];
+      };
+      description = mdDoc ''
+        `vmalert` configuration, passed via command line flags. Refer to
+        <https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/README.md#configuration>
+        for details on supported values.
+      '';
+    };
+
+    rules = mkOption {
+      type = format.type;
+      default = {};
+      example = {
+        group = [
+          { name = "TestGroup";
+            rules = [
+              { alert = "ExampleAlertAlwaysFiring";
+                expr = ''
+                  sum by(job)
+                  (up == 1)
+                '';
+              }
+            ];
+          }
+        ];
+      };
+      description = mdDoc ''
+        A list of the given alerting or recording rules against configured `"datasource.url"` compatible with
+        Prometheus HTTP API for `vmalert` to execute. Refer to
+        <https://github.com/VictoriaMetrics/VictoriaMetrics/blob/master/app/vmalert/README.md#rules>
+        for details on supported values.
+      '';
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    environment.etc."vmalert/rules.yml".source = format.generate "rules.yml" cfg.rules;
+
+    services.vmalert.settings.rule = [
+      "/etc/vmalert/rules.yml"
+    ];
+
+    systemd.services.vmalert = {
+      description = "vmalert service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      reloadTriggers = [ config.environment.etc."vmalert/rules.yml".source ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/bin/vmalert ${confOpts}";
+        ExecReload = ''${pkgs.coreutils}/bin/kill -SIGHUP "$MAINPID"'';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/vnstat.nix b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
new file mode 100644
index 000000000000..a498962ae57e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/vnstat.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vnstat;
+in {
+  options.services.vnstat = {
+    enable = mkEnableOption (lib.mdDoc "update of network usage statistics via vnstatd");
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.vnstat ];
+
+    users = {
+      groups.vnstatd = {};
+
+      users.vnstatd = {
+        isSystemUser = true;
+        group = "vnstatd";
+        description = "vnstat daemon user";
+      };
+    };
+
+    systemd.services.vnstat = {
+      description = "vnStat network traffic monitor";
+      path = [ pkgs.coreutils ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      documentation = [
+        "man:vnstatd(1)"
+        "man:vnstat(1)"
+        "man:vnstat.conf(5)"
+      ];
+      serviceConfig = {
+        ExecStart = "${pkgs.vnstat}/bin/vnstatd -n";
+        ExecReload = "${pkgs.procps}/bin/kill -HUP $MAINPID";
+
+        # Hardening (from upstream example service)
+        ProtectSystem = "strict";
+        StateDirectory = "vnstat";
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectKernelModules = true;
+        PrivateTmp = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+
+        User = "vnstatd";
+        Group = "vnstatd";
+      };
+    };
+  };
+
+  meta.maintainers = [ maintainers.evils ];
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
new file mode 100644
index 000000000000..b497ecbcdb6c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-agent.nix
@@ -0,0 +1,178 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixAgent;
+
+  inherit (lib) mkDefault mkEnableOption mkIf mkMerge mkOption;
+  inherit (lib) attrValues concatMapStringsSep literalExpression optionalString types;
+  inherit (lib.generators) toKeyValue;
+
+  user = "zabbix-agent";
+  group = "zabbix-agent";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-agent-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_agent.conf" (toKeyValue { listsAsDuplicateKeys = true; } cfg.settings);
+
+in
+
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "zabbixAgent" "extraConfig" ] "Use services.zabbixAgent.settings instead.")
+  ];
+
+  # interface
+
+  options = {
+
+    services.zabbixAgent = {
+      enable = mkEnableOption (lib.mdDoc "the Zabbix Agent");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.zabbix.agent;
+        defaultText = literalExpression "pkgs.zabbix.agent";
+        description = lib.mdDoc "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools ];
+        defaultText = literalExpression "with pkgs; [ nettools ]";
+        example = literalExpression "with pkgs; [ nettools mysql ]";
+        description = lib.mdDoc ''
+          Packages to be added to the Zabbix {env}`PATH`.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = lib.mdDoc "A set of modules to load.";
+        default = {};
+        example = literalExpression ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      server = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The IP address or hostname of the Zabbix server to connect to.
+        '';
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            List of comma delimited IP addresses that the agent should listen on.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10050;
+          description = lib.mdDoc ''
+            Agent will listen on this port for connections from the server.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the Zabbix Agent.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int str (listOf str) ]);
+        default = {};
+        description = lib.mdDoc ''
+          Zabbix Agent configuration. Refer to
+          <https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_agentd>
+          for details on supported values.
+        '';
+        example = {
+          Hostname = "example.org";
+          DebugLevel = 4;
+        };
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    services.zabbixAgent.settings = mkMerge [
+      {
+        LogType = "console";
+        Server = cfg.server;
+        ListenPort = cfg.listen.port;
+      }
+      (mkIf (cfg.modules != {}) {
+        LoadModule = builtins.attrNames cfg.modules;
+        LoadModulePath = "${moduleEnv}/lib";
+      })
+
+      # the default value for "ListenIP" is 0.0.0.0 but zabbix agent 2 cannot accept configuration files which
+      # explicitly set "ListenIP" to the default value...
+      (mkIf (cfg.listen.ip != "0.0.0.0") { ListenIP = cfg.listen.ip; })
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix Agent daemon user";
+      inherit group;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = { };
+
+    systemd.services.zabbix-agent = {
+      description = "Zabbix Agent";
+
+      wantedBy = [ "multi-user.target" ];
+
+      # https://www.zabbix.com/documentation/current/manual/config/items/userparameters
+      # > User parameters are commands executed by Zabbix agent.
+      # > /bin/sh is used as a command line interpreter under UNIX operating systems.
+      path = with pkgs; [ bash "/run/wrappers" ] ++ cfg.extraPackages;
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_agentd zabbix_agentd -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        PrivateTmp = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix
new file mode 100644
index 000000000000..503e81b48a58
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-proxy.nix
@@ -0,0 +1,323 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixProxy;
+  opt = options.services.zabbixProxy;
+  pgsql = config.services.postgresql;
+  mysql = config.services.mysql;
+
+  inherit (lib) mkAfter mkDefault mkEnableOption mkIf mkMerge mkOption;
+  inherit (lib) attrValues concatMapStringsSep getName literalExpression optional optionalAttrs optionalString types;
+  inherit (lib.generators) toKeyValue;
+
+  user = "zabbix";
+  group = "zabbix";
+  runtimeDir = "/run/zabbix";
+  stateDir = "/var/lib/zabbix";
+  passwordFile = "${runtimeDir}/zabbix-dbpassword.conf";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-proxy-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_proxy.conf" (toKeyValue { listsAsDuplicateKeys = true; } cfg.settings);
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+in
+
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "zabbixProxy" "extraConfig" ] "Use services.zabbixProxy.settings instead.")
+  ];
+
+  # interface
+
+  options = {
+
+    services.zabbixProxy = {
+      enable = mkEnableOption (lib.mdDoc "the Zabbix Proxy");
+
+      server = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The IP address or hostname of the Zabbix server to connect to.
+          '';
+        };
+
+      package = mkOption {
+        type = types.package;
+        default =
+          if cfg.database.type == "mysql" then pkgs.zabbix.proxy-mysql
+          else if cfg.database.type == "pgsql" then pkgs.zabbix.proxy-pgsql
+          else pkgs.zabbix.proxy-sqlite;
+        defaultText = literalExpression "pkgs.zabbix.proxy-pgsql";
+        description = lib.mdDoc "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools nmap traceroute ];
+        defaultText = literalExpression "[ nettools nmap traceroute ]";
+        description = lib.mdDoc ''
+          Packages to be added to the Zabbix {env}`PATH`.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = lib.mdDoc "A set of modules to load.";
+        default = {};
+        example = literalExpression ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" "sqlite" ];
+          example = "mysql";
+          default = "pgsql";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if cfg.database.type == "mysql" then mysql.port else pgsql.port;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} == "mysql"
+            then config.${options.services.mysql.port}
+            else config.${options.services.postgresql.port}
+          '';
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = if cfg.database.type == "sqlite" then "${stateDir}/zabbix.db" else "zabbix";
+          defaultText = literalExpression "zabbix";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zabbix-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/postgresql";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local database automatically.";
+        };
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            List of comma delimited IP addresses that the trapper should listen on.
+            Trapper will listen on all network interfaces if this parameter is missing.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10051;
+          description = lib.mdDoc ''
+            Listen port for trapper.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the Zabbix Proxy.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int str (listOf str) ]);
+        default = {};
+        description = lib.mdDoc ''
+          Zabbix Proxy configuration. Refer to
+          <https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_proxy>
+          for details on supported values.
+        '';
+        example = {
+          CacheSize = "1G";
+          SSHKeyLocation = "/var/lib/zabbix/.ssh";
+          StartPingers = 32;
+        };
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.services.zabbixServer.enable;
+        message = "Please choose one of services.zabbixServer or services.zabbixProxy.";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
+        message = "services.zabbixProxy.database.user must be set to ${user} if services.zabbixProxy.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.zabbixProxy.database.createLocally is set to true";
+      }
+    ];
+
+    services.zabbixProxy.settings = mkMerge [
+      {
+        LogType = "console";
+        ListenIP = cfg.listen.ip;
+        ListenPort = cfg.listen.port;
+        Server = cfg.server;
+        # TODO: set to cfg.database.socket if database type is pgsql?
+        DBHost = optionalString (cfg.database.createLocally != true) cfg.database.host;
+        DBName = cfg.database.name;
+        DBUser = cfg.database.user;
+        SocketDir = runtimeDir;
+        FpingLocation = "/run/wrappers/bin/fping";
+        LoadModule = builtins.attrNames cfg.modules;
+      }
+      (mkIf (cfg.database.createLocally != true) { DBPort = cfg.database.port; })
+      (mkIf (cfg.database.passwordFile != null) { Include = [ "${passwordFile}" ]; })
+      (mkIf (mysqlLocal && cfg.database.socket != null) { DBSocket = cfg.database.socket; })
+      (mkIf (cfg.modules != {}) { LoadModulePath = "${moduleEnv}/lib"; })
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    services.mysql = optionalAttrs mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+    };
+
+    systemd.services.mysql.postStart = mkAfter (optionalString mysqlLocal ''
+      ( echo "CREATE DATABASE IF NOT EXISTS \`${cfg.database.name}\` CHARACTER SET utf8 COLLATE utf8_bin;"
+        echo "CREATE USER IF NOT EXISTS '${cfg.database.user}'@'localhost' IDENTIFIED WITH ${if (getName config.services.mysql.package == getName pkgs.mariadb) then "unix_socket" else "auth_socket"};"
+        echo "GRANT ALL PRIVILEGES ON \`${cfg.database.name}\`.* TO '${cfg.database.user}'@'localhost';"
+      ) | ${config.services.mysql.package}/bin/mysql -N
+    '');
+
+    services.postgresql = optionalAttrs pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix daemon user";
+      uid = config.ids.uids.zabbix;
+      inherit group;
+    };
+
+    users.groups.${group} = {
+      gid = config.ids.gids.zabbix;
+    };
+
+    security.wrappers = {
+      fping =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.fping}/bin/fping";
+        };
+    };
+
+    systemd.services.zabbix-proxy = {
+      description = "Zabbix Proxy";
+
+      wantedBy = [ "multi-user.target" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+      path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+      preStart = optionalString pgsqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/postgresql/schema.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString mysqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/mysql/schema.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString (cfg.database.type == "sqlite") ''
+        if ! test -e "${cfg.database.name}"; then
+          ${pkgs.sqlite}/bin/sqlite3 "${cfg.database.name}" < ${cfg.package}/share/zabbix/database/sqlite3/schema.sql
+        fi
+      '' + optionalString (cfg.database.passwordFile != null) ''
+        # create a copy of the supplied password file in a format zabbix can consume
+        touch ${passwordFile}
+        chmod 0600 ${passwordFile}
+        echo -n "DBPassword = " > ${passwordFile}
+        cat ${cfg.database.passwordFile} >> ${passwordFile}
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_proxy zabbix_proxy -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        RuntimeDirectory = "zabbix";
+        StateDirectory = "zabbix";
+        PrivateTmp = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
new file mode 100644
index 000000000000..0607188d2131
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/monitoring/zabbix-server.nix
@@ -0,0 +1,320 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.zabbixServer;
+  opt = options.services.zabbixServer;
+  pgsql = config.services.postgresql;
+  mysql = config.services.mysql;
+
+  inherit (lib) mkAfter mkDefault mkEnableOption mkIf mkMerge mkOption;
+  inherit (lib) attrValues concatMapStringsSep getName literalExpression optional optionalAttrs optionalString types;
+  inherit (lib.generators) toKeyValue;
+
+  user = "zabbix";
+  group = "zabbix";
+  runtimeDir = "/run/zabbix";
+  stateDir = "/var/lib/zabbix";
+  passwordFile = "${runtimeDir}/zabbix-dbpassword.conf";
+
+  moduleEnv = pkgs.symlinkJoin {
+    name = "zabbix-server-module-env";
+    paths = attrValues cfg.modules;
+  };
+
+  configFile = pkgs.writeText "zabbix_server.conf" (toKeyValue { listsAsDuplicateKeys = true; } cfg.settings);
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+in
+
+{
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "zabbixServer" "dbServer" ] [ "services" "zabbixServer" "database" "host" ])
+    (lib.mkRemovedOptionModule [ "services" "zabbixServer" "dbPassword" ] "Use services.zabbixServer.database.passwordFile instead.")
+    (lib.mkRemovedOptionModule [ "services" "zabbixServer" "extraConfig" ] "Use services.zabbixServer.settings instead.")
+  ];
+
+  # interface
+
+  options = {
+
+    services.zabbixServer = {
+      enable = mkEnableOption (lib.mdDoc "the Zabbix Server");
+
+      package = mkOption {
+        type = types.package;
+        default = if cfg.database.type == "mysql" then pkgs.zabbix.server-mysql else pkgs.zabbix.server-pgsql;
+        defaultText = literalExpression "pkgs.zabbix.server-pgsql";
+        description = lib.mdDoc "The Zabbix package to use.";
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = with pkgs; [ nettools nmap traceroute ];
+        defaultText = literalExpression "[ nettools nmap traceroute ]";
+        description = lib.mdDoc ''
+          Packages to be added to the Zabbix {env}`PATH`.
+          Typically used to add executables for scripts, but can be anything.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.attrsOf types.package;
+        description = lib.mdDoc "A set of modules to load.";
+        default = {};
+        example = literalExpression ''
+          {
+            "dummy.so" = pkgs.stdenv.mkDerivation {
+              name = "zabbix-dummy-module-''${cfg.package.version}";
+              src = cfg.package.src;
+              buildInputs = [ cfg.package ];
+              sourceRoot = "zabbix-''${cfg.package.version}/src/modules/dummy";
+              installPhase = '''
+                mkdir -p $out/lib
+                cp dummy.so $out/lib/
+              ''';
+            };
+          }
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" ];
+          example = "mysql";
+          default = "pgsql";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if cfg.database.type == "mysql" then mysql.port else pgsql.port;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} == "mysql"
+            then config.${options.services.mysql.port}
+            else config.${options.services.postgresql.port}
+          '';
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zabbix-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/postgresql";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to create a local database automatically.";
+        };
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            List of comma delimited IP addresses that the trapper should listen on.
+            Trapper will listen on all network interfaces if this parameter is missing.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 10051;
+          description = lib.mdDoc ''
+            Listen port for trapper.
+          '';
+        };
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the Zabbix Server.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int str (listOf str) ]);
+        default = {};
+        description = lib.mdDoc ''
+          Zabbix Server configuration. Refer to
+          <https://www.zabbix.com/documentation/current/manual/appendix/config/zabbix_server>
+          for details on supported values.
+        '';
+        example = {
+          CacheSize = "1G";
+          SSHKeyLocation = "/var/lib/zabbix/.ssh";
+          StartPingers = 32;
+        };
+      };
+
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.user == cfg.database.name;
+        message = "services.zabbixServer.database.user must be set to ${user} if services.zabbixServer.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.zabbixServer.database.createLocally is set to true";
+      }
+    ];
+
+    services.zabbixServer.settings = mkMerge [
+      {
+        LogType = "console";
+        ListenIP = cfg.listen.ip;
+        ListenPort = cfg.listen.port;
+        # TODO: set to cfg.database.socket if database type is pgsql?
+        DBHost = optionalString (cfg.database.createLocally != true) cfg.database.host;
+        DBName = cfg.database.name;
+        DBUser = cfg.database.user;
+        PidFile = "${runtimeDir}/zabbix_server.pid";
+        SocketDir = runtimeDir;
+        FpingLocation = "/run/wrappers/bin/fping";
+        LoadModule = builtins.attrNames cfg.modules;
+      }
+      (mkIf (cfg.database.createLocally != true) { DBPort = cfg.database.port; })
+      (mkIf (cfg.database.passwordFile != null) { Include = [ "${passwordFile}" ]; })
+      (mkIf (mysqlLocal && cfg.database.socket != null) { DBSocket = cfg.database.socket; })
+      (mkIf (cfg.modules != {}) { LoadModulePath = "${moduleEnv}/lib"; })
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    services.mysql = optionalAttrs mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+    };
+
+    systemd.services.mysql.postStart = mkAfter (optionalString mysqlLocal ''
+      ( echo "CREATE DATABASE IF NOT EXISTS \`${cfg.database.name}\` CHARACTER SET utf8 COLLATE utf8_bin;"
+        echo "CREATE USER IF NOT EXISTS '${cfg.database.user}'@'localhost' IDENTIFIED WITH ${if (getName config.services.mysql.package == getName pkgs.mariadb) then "unix_socket" else "auth_socket"};"
+        echo "GRANT ALL PRIVILEGES ON \`${cfg.database.name}\`.* TO '${cfg.database.user}'@'localhost';"
+      ) | ${config.services.mysql.package}/bin/mysql -N
+    '');
+
+    services.postgresql = optionalAttrs pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    users.users.${user} = {
+      description = "Zabbix daemon user";
+      uid = config.ids.uids.zabbix;
+      inherit group;
+    };
+
+    users.groups.${group} = {
+      gid = config.ids.gids.zabbix;
+    };
+
+    security.wrappers = {
+      fping =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.fping}/bin/fping";
+        };
+    };
+
+    systemd.services.zabbix-server = {
+      description = "Zabbix Server";
+
+      wantedBy = [ "multi-user.target" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+      path = [ "/run/wrappers" ] ++ cfg.extraPackages;
+      preStart = ''
+        # pre 19.09 compatibility
+        if test -e "${runtimeDir}/db-created"; then
+          mv "${runtimeDir}/db-created" "${stateDir}/"
+        fi
+      '' + optionalString pgsqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/postgresql/schema.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/postgresql/images.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/postgresql/data.sql | ${pgsql.package}/bin/psql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString mysqlLocal ''
+        if ! test -e "${stateDir}/db-created"; then
+          cat ${cfg.package}/share/zabbix/database/mysql/schema.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/mysql/images.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          cat ${cfg.package}/share/zabbix/database/mysql/data.sql | ${mysql.package}/bin/mysql ${cfg.database.name}
+          touch "${stateDir}/db-created"
+        fi
+      '' + optionalString (cfg.database.passwordFile != null) ''
+        # create a copy of the supplied password file in a format zabbix can consume
+        touch ${passwordFile}
+        chmod 0600 ${passwordFile}
+        echo -n "DBPassword = " > ${passwordFile}
+        cat ${cfg.database.passwordFile} >> ${passwordFile}
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/zabbix_server zabbix_server -f --config ${configFile}";
+        Restart = "always";
+        RestartSec = 2;
+
+        User = user;
+        Group = group;
+        RuntimeDirectory = "zabbix";
+        StateDirectory = "zabbix";
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.services.httpd.after =
+      optional (config.services.zabbixWeb.enable && mysqlLocal) "mysql.service" ++
+      optional (config.services.zabbixWeb.enable && pgsqlLocal) "postgresql.service";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/cachefilesd.nix b/nixpkgs/nixos/modules/services/network-filesystems/cachefilesd.nix
new file mode 100644
index 000000000000..da5a79a062c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/cachefilesd.nix
@@ -0,0 +1,63 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.cachefilesd;
+
+  cfgFile = pkgs.writeText "cachefilesd.conf" ''
+    dir ${cfg.cacheDir}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+  options = {
+    services.cachefilesd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable cachefilesd network filesystems caching daemon.";
+      };
+
+      cacheDir = mkOption {
+        type = types.str;
+        default = "/var/cache/fscache";
+        description = lib.mdDoc "Directory to contain filesystem cache.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "brun 10%";
+        description = lib.mdDoc "Additional configuration file entries. See cachefilesd.conf(5) for more information.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "cachefiles" ];
+
+    systemd.services.cachefilesd = {
+      description = "Local network file caching management daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "exec";
+        ExecStart = "${pkgs.cachefilesd}/bin/cachefilesd -n -f ${cfgFile}";
+        Restart = "on-failure";
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.cacheDir} 0700 root root - -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/ceph.nix b/nixpkgs/nixos/modules/services/network-filesystems/ceph.nix
new file mode 100644
index 000000000000..aad03728b203
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/ceph.nix
@@ -0,0 +1,409 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ceph;
+
+  # function that translates "camelCaseOptions" to "camel case options", credits to tilpner in #nixos@freenode
+  expandCamelCase = replaceStrings upperChars (map (s: " ${s}") lowerChars);
+  expandCamelCaseAttrs = mapAttrs' (name: value: nameValuePair (expandCamelCase name) value);
+
+  makeServices = daemonType: daemonIds:
+    mkMerge (map (daemonId:
+      { "ceph-${daemonType}-${daemonId}" = makeService daemonType daemonId cfg.global.clusterName cfg.${daemonType}.package; })
+      daemonIds);
+
+  makeService = daemonType: daemonId: clusterName: ceph:
+    let
+      stateDirectory = "ceph/${if daemonType == "rgw" then "radosgw" else daemonType}/${clusterName}-${daemonId}"; in {
+    enable = true;
+    description = "Ceph ${builtins.replaceStrings lowerChars upperChars daemonType} daemon ${daemonId}";
+    after = [ "network-online.target" "time-sync.target" ] ++ optional (daemonType == "osd") "ceph-mon.target";
+    wants = [ "network-online.target" "time-sync.target" ];
+    partOf = [ "ceph-${daemonType}.target" ];
+    wantedBy = [ "ceph-${daemonType}.target" ];
+
+    path = [ pkgs.getopt ];
+
+    # Don't start services that are not yet initialized
+    unitConfig.ConditionPathExists = "/var/lib/${stateDirectory}/keyring";
+    startLimitBurst =
+      if daemonType == "osd" then 30 else if lib.elem daemonType ["mgr" "mds"] then 3 else 5;
+    startLimitIntervalSec = 60 * 30;  # 30 mins
+
+    serviceConfig = {
+      LimitNOFILE = 1048576;
+      LimitNPROC = 1048576;
+      Environment = "CLUSTER=${clusterName}";
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      PrivateDevices = "yes";
+      PrivateTmp = "true";
+      ProtectHome = "true";
+      ProtectSystem = "full";
+      Restart = "on-failure";
+      StateDirectory = stateDirectory;
+      User = "ceph";
+      Group = if daemonType == "osd" then "disk" else "ceph";
+      ExecStart = ''${ceph.out}/bin/${if daemonType == "rgw" then "radosgw" else "ceph-${daemonType}"} \
+                    -f --cluster ${clusterName} --id ${daemonId}'';
+    } // optionalAttrs (daemonType == "osd") {
+      ExecStartPre = "${ceph.lib}/libexec/ceph/ceph-osd-prestart.sh --id ${daemonId} --cluster ${clusterName}";
+      RestartSec = "20s";
+      PrivateDevices = "no"; # osd needs disk access
+    } // optionalAttrs ( daemonType == "mon") {
+      RestartSec = "10";
+    };
+  };
+
+  makeTarget = daemonType:
+    {
+      "ceph-${daemonType}" = {
+        description = "Ceph target allowing to start/stop all ceph-${daemonType} services at once";
+        partOf = [ "ceph.target" ];
+        wantedBy = [ "ceph.target" ];
+        before = [ "ceph.target" ];
+        unitConfig.StopWhenUnneeded = true;
+      };
+    };
+in
+{
+  options.services.ceph = {
+    # Ceph has a monolithic configuration file but different sections for
+    # each daemon, a separate client section and a global section
+    enable = mkEnableOption (lib.mdDoc "Ceph global configuration");
+
+    global = {
+      fsid = mkOption {
+        type = types.str;
+        example = ''
+          433a2193-4f8a-47a0-95d2-209d7ca2cca5
+        '';
+        description = lib.mdDoc ''
+          Filesystem ID, a generated uuid, its must be generated and set before
+          attempting to start a cluster
+        '';
+      };
+
+      clusterName = mkOption {
+        type = types.str;
+        default = "ceph";
+        description = lib.mdDoc ''
+          Name of cluster
+        '';
+      };
+
+      mgrModulePath = mkOption {
+        type = types.path;
+        default = "${pkgs.ceph.lib}/lib/ceph/mgr";
+        defaultText = literalExpression ''"''${pkgs.ceph.lib}/lib/ceph/mgr"'';
+        description = lib.mdDoc ''
+          Path at which to find ceph-mgr modules.
+        '';
+      };
+
+      monInitialMembers = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          node0, node1, node2
+        '';
+        description = lib.mdDoc ''
+          List of hosts that will be used as monitors at startup.
+        '';
+      };
+
+      monHost = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.1, 10.10.0.2, 10.10.0.3
+        '';
+        description = lib.mdDoc ''
+          List of hostname shortnames/IP addresses of the initial monitors.
+        '';
+      };
+
+      maxOpenFiles = mkOption {
+        type = types.int;
+        default = 131072;
+        description = lib.mdDoc ''
+          Max open files for each OSD daemon.
+        '';
+      };
+
+      authClusterRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = lib.mdDoc ''
+          Enables requiring daemons to authenticate with eachother in the cluster.
+        '';
+      };
+
+      authServiceRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = lib.mdDoc ''
+          Enables requiring clients to authenticate with the cluster to access services in the cluster (e.g. radosgw, mds or osd).
+        '';
+      };
+
+      authClientRequired = mkOption {
+        type = types.enum [ "cephx" "none" ];
+        default = "cephx";
+        description = lib.mdDoc ''
+          Enables requiring the cluster to authenticate itself to the client.
+        '';
+      };
+
+      publicNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.20.0.0/24, 192.168.1.0/24
+        '';
+        description = lib.mdDoc ''
+          A comma-separated list of subnets that will be used as public networks in the cluster.
+        '';
+      };
+
+      clusterNetwork = mkOption {
+        type = with types; nullOr commas;
+        default = null;
+        example = ''
+          10.10.0.0/24, 192.168.0.0/24
+        '';
+        description = lib.mdDoc ''
+          A comma-separated list of subnets that will be used as cluster networks in the cluster.
+        '';
+      };
+
+      rgwMimeTypesFile = mkOption {
+        type = with types; nullOr path;
+        default = "${pkgs.mailcap}/etc/mime.types";
+        defaultText = literalExpression ''"''${pkgs.mailcap}/etc/mime.types"'';
+        description = lib.mdDoc ''
+          Path to mime types used by radosgw.
+        '';
+      };
+    };
+
+    extraConfig = mkOption {
+      type = with types; attrsOf str;
+      default = {};
+      example = {
+        "ms bind ipv6" = "true";
+      };
+      description = lib.mdDoc ''
+        Extra configuration to add to the global section. Use for setting values that are common for all daemons in the cluster.
+      '';
+    };
+
+    mgr = {
+      enable = mkEnableOption (lib.mdDoc "Ceph MGR daemon");
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = lib.mdDoc ''
+          A list of names for manager daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mgr.name1
+        '';
+      };
+      package = mkPackageOptionMD pkgs "ceph" { };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = lib.mdDoc ''
+          Extra configuration to add to the global section for manager daemons.
+        '';
+      };
+    };
+
+    mon = {
+      enable = mkEnableOption (lib.mdDoc "Ceph MON daemon");
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = lib.mdDoc ''
+          A list of monitor daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mon.name1
+        '';
+      };
+      package = mkPackageOptionMD pkgs "ceph" { };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = lib.mdDoc ''
+          Extra configuration to add to the monitor section.
+        '';
+      };
+    };
+
+    osd = {
+      enable = mkEnableOption (lib.mdDoc "Ceph OSD daemon");
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = lib.mdDoc ''
+          A list of OSD daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in osd.name1
+        '';
+      };
+      package = mkPackageOptionMD pkgs "ceph" { };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {
+          "osd journal size" = "10000";
+          "osd pool default size" = "3";
+          "osd pool default min size" = "2";
+          "osd pool default pg num" = "200";
+          "osd pool default pgp num" = "200";
+          "osd crush chooseleaf type" = "1";
+        };
+        description = lib.mdDoc ''
+          Extra configuration to add to the OSD section.
+        '';
+      };
+    };
+
+    mds = {
+      enable = mkEnableOption (lib.mdDoc "Ceph MDS daemon");
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = lib.mdDoc ''
+          A list of metadata service daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in mds.name1
+        '';
+      };
+      package = mkPackageOptionMD pkgs "ceph" { };
+      extraConfig = mkOption {
+        type = with types; attrsOf str;
+        default = {};
+        description = lib.mdDoc ''
+          Extra configuration to add to the MDS section.
+        '';
+      };
+    };
+
+    rgw = {
+      enable = mkEnableOption (lib.mdDoc "Ceph RadosGW daemon");
+      package = mkPackageOptionMD pkgs "ceph" { };
+      daemons = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "name1" "name2" ];
+        description = lib.mdDoc ''
+          A list of rados gateway daemons that should have a service created. The names correspond
+          to the id part in ceph i.e. [ "name1" ] would result in client.name1, radosgw daemons
+          aren't daemons to cluster in the sense that OSD, MGR or MON daemons are. They are simply
+          daemons, from ceph, that uses the cluster as a backend.
+        '';
+      };
+    };
+
+    client = {
+      enable = mkEnableOption (lib.mdDoc "Ceph client configuration");
+      extraConfig = mkOption {
+        type = with types; attrsOf (attrsOf str);
+        default = {};
+        example = literalExpression ''
+          {
+            # This would create a section for a radosgw daemon named node0 and related
+            # configuration for it
+            "client.radosgw.node0" = { "some config option" = "true"; };
+          };
+        '';
+        description = lib.mdDoc ''
+          Extra configuration to add to the client section. Configuration for rados gateways
+          would be added here, with their own sections, see example.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.ceph.enable {
+    assertions = [
+      { assertion = cfg.global.fsid != "";
+        message = "fsid has to be set to a valid uuid for the cluster to function";
+      }
+      { assertion = cfg.mon.enable -> cfg.mon.daemons != [];
+        message = "have to set id of atleast one MON if you're going to enable Monitor";
+      }
+      { assertion = cfg.mds.enable -> cfg.mds.daemons != [];
+        message = "have to set id of atleast one MDS if you're going to enable Metadata Service";
+      }
+      { assertion = cfg.osd.enable -> cfg.osd.daemons != [];
+        message = "have to set id of atleast one OSD if you're going to enable OSD";
+      }
+      { assertion = cfg.mgr.enable -> cfg.mgr.daemons != [];
+        message = "have to set id of atleast one MGR if you're going to enable MGR";
+      }
+    ];
+
+    warnings = optional (cfg.global.monInitialMembers == null)
+      "Not setting up a list of members in monInitialMembers requires that you set the host variable for each mon daemon or else the cluster won't function";
+
+    environment.etc."ceph/ceph.conf".text = let
+      # Merge the extraConfig set for mgr daemons, as mgr don't have their own section
+      globalSection = expandCamelCaseAttrs (cfg.global // cfg.extraConfig // optionalAttrs cfg.mgr.enable cfg.mgr.extraConfig);
+      # Remove all name-value pairs with null values from the attribute set to avoid making empty sections in the ceph.conf
+      globalSection' = filterAttrs (name: value: value != null) globalSection;
+      totalConfig = {
+          global = globalSection';
+        } // optionalAttrs (cfg.mon.enable && cfg.mon.extraConfig != {}) { mon = cfg.mon.extraConfig; }
+          // optionalAttrs (cfg.mds.enable && cfg.mds.extraConfig != {}) { mds = cfg.mds.extraConfig; }
+          // optionalAttrs (cfg.osd.enable && cfg.osd.extraConfig != {}) { osd = cfg.osd.extraConfig; }
+          // optionalAttrs (cfg.client.enable && cfg.client.extraConfig != {})  cfg.client.extraConfig;
+      in
+        generators.toINI {} totalConfig;
+
+    users.users.ceph = {
+      uid = config.ids.uids.ceph;
+      description = "Ceph daemon user";
+      group = "ceph";
+      extraGroups = [ "disk" ];
+    };
+
+    users.groups.ceph = {
+      gid = config.ids.gids.ceph;
+    };
+
+    systemd.services = let
+      services = []
+        ++ optional cfg.mon.enable (makeServices "mon" cfg.mon.daemons)
+        ++ optional cfg.mds.enable (makeServices "mds" cfg.mds.daemons)
+        ++ optional cfg.osd.enable (makeServices "osd" cfg.osd.daemons)
+        ++ optional cfg.rgw.enable (makeServices "rgw" cfg.rgw.daemons)
+        ++ optional cfg.mgr.enable (makeServices "mgr" cfg.mgr.daemons);
+      in
+        mkMerge services;
+
+    systemd.targets = let
+      targets = [
+        { ceph = {
+          description = "Ceph target allowing to start/stop all ceph service instances at once";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig.StopWhenUnneeded = true;
+        }; } ]
+        ++ optional cfg.mon.enable (makeTarget "mon")
+        ++ optional cfg.mds.enable (makeTarget "mds")
+        ++ optional cfg.osd.enable (makeTarget "osd")
+        ++ optional cfg.rgw.enable (makeTarget "rgw")
+        ++ optional cfg.mgr.enable (makeTarget "mgr");
+      in
+        mkMerge targets;
+
+    systemd.tmpfiles.rules = [
+      "d /etc/ceph - ceph ceph - -"
+      "d /run/ceph 0770 ceph ceph -"
+      "d /var/lib/ceph - ceph ceph - -"]
+    ++ optionals cfg.mgr.enable [ "d /var/lib/ceph/mgr - ceph ceph - -"]
+    ++ optionals cfg.mon.enable [ "d /var/lib/ceph/mon - ceph ceph - -"]
+    ++ optionals cfg.osd.enable [ "d /var/lib/ceph/osd - ceph ceph - -"];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/davfs2.nix b/nixpkgs/nixos/modules/services/network-filesystems/davfs2.nix
new file mode 100644
index 000000000000..8024cfba08be
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/davfs2.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.davfs2;
+  cfgFile = pkgs.writeText "davfs2.conf" ''
+    dav_user ${cfg.davUser}
+    dav_group ${cfg.davGroup}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options.services.davfs2 = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable davfs2.
+      '';
+    };
+
+    davUser = mkOption {
+      type = types.str;
+      default = "davfs2";
+      description = lib.mdDoc ''
+        When invoked by root the mount.davfs daemon will run as this user.
+        Value must be given as name, not as numerical id.
+      '';
+    };
+
+    davGroup = mkOption {
+      type = types.str;
+      default = "davfs2";
+      description = lib.mdDoc ''
+        The group of the running mount.davfs daemon. Ordinary users must be
+        member of this group in order to mount a davfs2 file system. Value must
+        be given as name, not as numerical id.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        kernel_fs coda
+        proxy foo.bar:8080
+        use_locks 0
+      '';
+      description = lib.mdDoc ''
+        Extra lines appended to the configuration of davfs2.
+      ''  ;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.davfs2 ];
+    environment.etc."davfs2/davfs2.conf".source = cfgFile;
+
+    users.groups = optionalAttrs (cfg.davGroup == "davfs2") {
+      davfs2.gid = config.ids.gids.davfs2;
+    };
+
+    users.users = optionalAttrs (cfg.davUser == "davfs2") {
+      davfs2 = {
+        createHome = false;
+        group = cfg.davGroup;
+        uid = config.ids.uids.davfs2;
+        description = "davfs2 user";
+      };
+    };
+
+    security.wrappers."mount.davfs" = {
+      program = "mount.davfs";
+      source = "${pkgs.davfs2}/bin/mount.davfs";
+      owner = "root";
+      group = cfg.davGroup;
+      setuid = true;
+      permissions = "u+rx,g+x";
+    };
+
+    security.wrappers."umount.davfs" = {
+      program = "umount.davfs";
+      source = "${pkgs.davfs2}/bin/umount.davfs";
+      owner = "root";
+      group = cfg.davGroup;
+      setuid = true;
+      permissions = "u+rx,g+x";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/diod.nix b/nixpkgs/nixos/modules/services/network-filesystems/diod.nix
new file mode 100644
index 000000000000..541b4ffd6b46
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/diod.nix
@@ -0,0 +1,159 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.diod;
+
+  diodBool = b: if b then "1" else "0";
+
+  diodConfig = pkgs.writeText "diod.conf" ''
+    allsquash = ${diodBool cfg.allsquash}
+    auth_required = ${diodBool cfg.authRequired}
+    exportall = ${diodBool cfg.exportall}
+    exportopts = "${concatStringsSep "," cfg.exportopts}"
+    exports = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.exports)} }
+    listen = { ${concatStringsSep ", " (map (s: ''"${s}"'' ) cfg.listen)} }
+    logdest = "${cfg.logdest}"
+    nwthreads = ${toString cfg.nwthreads}
+    squashuser = "${cfg.squashuser}"
+    statfs_passthru = ${diodBool cfg.statfsPassthru}
+    userdb = ${diodBool cfg.userdb}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options = {
+    services.diod = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the diod 9P file server.";
+      };
+
+      listen = mkOption {
+        type = types.listOf types.str;
+        default = [ "0.0.0.0:564" ];
+        description = lib.mdDoc ''
+          [ "IP:PORT" [,"IP:PORT",...] ]
+          List the interfaces and ports that diod should listen on.
+        '';
+      };
+
+      exports = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          List the file systems that clients will be allowed to mount. All paths should
+          be fully qualified. The exports table can include two types of element:
+          a string element (as above),
+          or an alternate table element form { path="/path", opts="ro" }.
+          In the alternate form, the (optional) opts attribute is a comma-separated list
+          of export options. The two table element forms can be mixed in the exports
+          table. Note that although diod will not traverse file system boundaries for a
+          given mount due to inode uniqueness constraints, subdirectories of a file
+          system can be separately exported.
+        '';
+      };
+
+      exportall = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Export all file systems listed in /proc/mounts. If new file systems are mounted
+          after diod has started, they will become immediately mountable. If there is a
+          duplicate entry for a file system in the exports list, any options listed in
+          the exports entry will apply.
+        '';
+      };
+
+      exportopts = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Establish a default set of export options. These are overridden, not appended
+          to, by opts attributes in an "exports" entry.
+        '';
+      };
+
+      nwthreads = mkOption {
+        type = types.int;
+        default = 16;
+        description = lib.mdDoc ''
+          Sets the (fixed) number of worker threads created to handle 9P
+          requests for a unique aname.
+        '';
+      };
+
+      authRequired = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Allow clients to connect without authentication, i.e. without a valid MUNGE credential.
+        '';
+      };
+
+      userdb = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option disables password/group lookups. It allows any uid to attach and
+          assumes gid=uid, and supplementary groups contain only the primary gid.
+        '';
+      };
+
+      allsquash = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Remap all users to "nobody". The attaching user need not be present in the
+          password file.
+        '';
+      };
+
+      squashuser = mkOption {
+        type = types.str;
+        default = "nobody";
+        description = lib.mdDoc ''
+          Change the squash user. The squash user must be present in the password file.
+        '';
+      };
+
+      logdest = mkOption {
+        type = types.str;
+        default = "syslog:daemon:err";
+        description = lib.mdDoc ''
+          Set the destination for logging.
+          The value has the form of "syslog:facility:level" or "filename".
+        '';
+      };
+
+
+      statfsPassthru = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option configures statfs to return the host file system's type
+          rather than V9FS_MAGIC.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra configuration options for diod.conf.";
+      };
+    };
+  };
+
+  config = mkIf config.services.diod.enable {
+    environment.systemPackages = [ pkgs.diod ];
+
+    systemd.services.diod = {
+      description = "diod 9P file server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.diod}/sbin/diod -f -c ${diodConfig}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/drbd.nix b/nixpkgs/nixos/modules/services/network-filesystems/drbd.nix
new file mode 100644
index 000000000000..e74ed391d48e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/drbd.nix
@@ -0,0 +1,63 @@
+# Support for DRBD, the Distributed Replicated Block Device.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.drbd; in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.drbd.enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable support for DRBD, the Distributed Replicated
+        Block Device.
+      '';
+    };
+
+    services.drbd.config = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Contents of the {file}`drbd.conf` configuration file.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.drbd ];
+
+    services.udev.packages = [ pkgs.drbd ];
+
+    boot.kernelModules = [ "drbd" ];
+
+    boot.extraModprobeConfig =
+      ''
+        options drbd usermode_helper=/run/current-system/sw/bin/drbdadm
+      '';
+
+    environment.etc."drbd.conf" =
+      { source = pkgs.writeText "drbd.conf" cfg.config; };
+
+    systemd.services.drbd = {
+      after = [ "systemd-udev.settle.service" "network.target" ];
+      wants = [ "systemd-udev.settle.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.drbd}/sbin/drbdadm up all";
+        ExecStop = "${pkgs.drbd}/sbin/drbdadm down all";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/eris-server.nix b/nixpkgs/nixos/modules/services/network-filesystems/eris-server.nix
new file mode 100644
index 000000000000..66eccfac408c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/eris-server.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.eris-server;
+  stateDirectoryPath = "\${STATE_DIRECTORY}";
+in {
+
+  options.services.eris-server = {
+
+    enable = lib.mkEnableOption "an ERIS server";
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.eris-go;
+      defaultText = lib.literalExpression "pkgs.eris-go";
+      description = "Package to use for the ERIS server.";
+    };
+
+    decode = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = ''
+        Whether the HTTP service (when enabled) will decode ERIS content at /uri-res/N2R?urn:eris:.
+        Enabling this is recommended only for private or local-only servers.
+      '';
+    };
+
+    listenCoap = lib.mkOption {
+      type = lib.types.str;
+      default = ":5683";
+      example = "[::1]:5683";
+      description = ''
+        Server CoAP listen address. Listen on all IP addresses at port 5683 by default.
+        Please note that the server can service client requests for ERIS-blocks by
+        querying other clients connected to the server. Whether or not blocks are
+        relayed back to the server depends on client configuration but be aware this
+        may leak sensitive metadata and trigger network activity.
+      '';
+    };
+
+    listenHttp = lib.mkOption {
+      type = lib.types.str;
+      default = "";
+      example = "[::1]:8080";
+      description = "Server HTTP listen address. Do not listen by default.";
+    };
+
+    backends = lib.mkOption {
+      type = with lib.types; listOf str;
+      description = ''
+        List of backend URLs.
+        Add "get" and "put" as query elements to enable those operations.
+      '';
+      example = [
+        "bolt+file:///srv/eris.bolt?get&put"
+        "coap+tcp://eris.example.com:5683?get"
+      ];
+    };
+
+    mountpoint = lib.mkOption {
+      type = lib.types.str;
+      default = "";
+      example = "/eris";
+      description = ''
+        Mountpoint for FUSE namespace that exposes "urn:eris:…" files.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.eris-server = let
+      cmd =
+        "${cfg.package}/bin/eris-go server --coap '${cfg.listenCoap}' --http '${cfg.listenHttp}' ${
+          lib.optionalString cfg.decode "--decode "
+        }${
+          lib.optionalString (cfg.mountpoint != "")
+          ''--mountpoint "${cfg.mountpoint}" ''
+        }${lib.strings.escapeShellArgs cfg.backends}";
+    in {
+      description = "ERIS block server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      script = lib.mkIf (cfg.mountpoint != "") ''
+        export PATH=${config.security.wrapperDir}:$PATH
+        ${cmd}
+      '';
+      serviceConfig = let
+        umounter = lib.mkIf (cfg.mountpoint != "")
+          "-${config.security.wrapperDir}/fusermount -uz ${cfg.mountpoint}";
+      in {
+        ExecStartPre = umounter;
+        ExecStart = lib.mkIf (cfg.mountpoint == "") cmd;
+        ExecStopPost = umounter;
+        Restart = "always";
+        RestartSec = 20;
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ehmry ];
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/glusterfs.nix b/nixpkgs/nixos/modules/services/network-filesystems/glusterfs.nix
new file mode 100644
index 000000000000..ee03bada492d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/glusterfs.nix
@@ -0,0 +1,209 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inherit (pkgs) glusterfs rsync;
+
+  tlsCmd = if (cfg.tlsSettings != null) then
+  ''
+    mkdir -p /var/lib/glusterd
+    touch /var/lib/glusterd/secure-access
+  ''
+  else
+  ''
+    rm -f /var/lib/glusterd/secure-access
+  '';
+
+  restartTriggers = optionals (cfg.tlsSettings != null) [
+    config.environment.etc."ssl/glusterfs.pem".source
+    config.environment.etc."ssl/glusterfs.key".source
+    config.environment.etc."ssl/glusterfs.ca".source
+  ];
+
+  cfg = config.services.glusterfs;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.glusterfs = {
+
+      enable = mkEnableOption (lib.mdDoc "GlusterFS Daemon");
+
+      logLevel = mkOption {
+        type = types.enum ["DEBUG" "INFO" "WARNING" "ERROR" "CRITICAL" "TRACE" "NONE"];
+        description = lib.mdDoc "Log level used by the GlusterFS daemon";
+        default = "INFO";
+      };
+
+      useRpcbind = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable use of rpcbind. This is required for Gluster's NFS functionality.
+
+          You may want to turn it off to reduce the attack surface for DDoS reflection attacks.
+
+          See https://davelozier.com/glusterfs-and-rpcbind-portmap-ddos-reflection-attacks/
+          and https://bugzilla.redhat.com/show_bug.cgi?id=1426842 for details.
+        '';
+        default = true;
+      };
+
+      enableGlustereventsd = mkOption {
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable the GlusterFS Events Daemon";
+        default = true;
+      };
+
+      killMode = mkOption {
+        type = types.enum ["control-group" "process" "mixed" "none"];
+        description = lib.mdDoc ''
+          The systemd KillMode to use for glusterd.
+
+          glusterd spawns other daemons like gsyncd.
+          If you want these to stop when glusterd is stopped (e.g. to ensure
+          that NixOS config changes are reflected even for these sub-daemons),
+          set this to 'control-group'.
+          If however you want running volume processes (glusterfsd) and thus
+          gluster mounts not be interrupted when glusterd is restarted
+          (for example, when you want to restart them manually at a later time),
+          set this to 'process'.
+        '';
+        default = "control-group";
+      };
+
+      stopKillTimeout = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The systemd TimeoutStopSec to use.
+
+          After this time after having been asked to shut down, glusterd
+          (and depending on the killMode setting also its child processes)
+          are killed by systemd.
+
+          The default is set low because GlusterFS (as of 3.10) is known to
+          not tell its children (like gsyncd) to terminate at all.
+        '';
+        default = "5s";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Extra flags passed to the GlusterFS daemon";
+        default = [];
+      };
+
+      tlsSettings = mkOption {
+        description = lib.mdDoc ''
+          Make the server communicate via TLS.
+          This means it will only connect to other gluster
+          servers having certificates signed by the same CA.
+
+          Enabling this will create a file {file}`/var/lib/glusterd/secure-access`.
+          Disabling will delete this file again.
+
+          See also: https://gluster.readthedocs.io/en/latest/Administrator%20Guide/SSL/
+        '';
+        default = null;
+        type = types.nullOr (types.submodule {
+          options = {
+            tlsKeyPath = mkOption {
+              type = types.str;
+              description = lib.mdDoc "Path to the private key used for TLS.";
+            };
+
+            tlsPem = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the certificate used for TLS.";
+            };
+
+            caCert = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path certificate authority used to sign the cluster certificates.";
+            };
+          };
+        });
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.glusterfs ];
+
+    services.rpcbind.enable = cfg.useRpcbind;
+
+    environment.etc = mkIf (cfg.tlsSettings != null) {
+      "ssl/glusterfs.pem".source = cfg.tlsSettings.tlsPem;
+      "ssl/glusterfs.key".source = cfg.tlsSettings.tlsKeyPath;
+      "ssl/glusterfs.ca".source = cfg.tlsSettings.caCert;
+    };
+
+    systemd.services.glusterd = {
+      inherit restartTriggers;
+
+      description = "GlusterFS, a clustered file-system server";
+
+      wantedBy = [ "multi-user.target" ];
+
+      requires = lib.optional cfg.useRpcbind "rpcbind.service";
+      after = [ "network.target" ] ++ lib.optional cfg.useRpcbind "rpcbind.service";
+
+      preStart = ''
+        install -m 0755 -d /var/log/glusterfs
+      ''
+      # The copying of hooks is due to upstream bug https://bugzilla.redhat.com/show_bug.cgi?id=1452761
+      # Excludes one hook due to missing SELinux binaries.
+      + ''
+        mkdir -p /var/lib/glusterd/hooks/
+        ${rsync}/bin/rsync -a --exclude="S10selinux-label-brick.sh" ${glusterfs}/var/lib/glusterd/hooks/ /var/lib/glusterd/hooks/
+
+        ${tlsCmd}
+      ''
+      # `glusterfind` needs dirs that upstream installs at `make install` phase
+      # https://github.com/gluster/glusterfs/blob/v3.10.2/tools/glusterfind/Makefile.am#L16-L17
+      + ''
+        mkdir -p /var/lib/glusterd/glusterfind/.keys
+        mkdir -p /var/lib/glusterd/hooks/1/delete/post/
+      '';
+
+      serviceConfig = {
+        LimitNOFILE=65536;
+        ExecStart="${glusterfs}/sbin/glusterd --no-daemon --log-level=${cfg.logLevel} ${toString cfg.extraFlags}";
+        KillMode=cfg.killMode;
+        TimeoutStopSec=cfg.stopKillTimeout;
+      };
+    };
+
+    systemd.services.glustereventsd = mkIf cfg.enableGlustereventsd {
+      inherit restartTriggers;
+
+      description = "Gluster Events Notifier";
+
+      wantedBy = [ "multi-user.target" ];
+
+      after = [ "network.target" ];
+
+      preStart = ''
+        install -m 0755 -d /var/log/glusterfs
+      '';
+
+      # glustereventsd uses the `gluster` executable
+      path = [ glusterfs ];
+
+      serviceConfig = {
+        Type="simple";
+        PIDFile="/run/glustereventsd.pid";
+        ExecStart="${glusterfs}/sbin/glustereventsd --pid-file /run/glustereventsd.pid";
+        ExecReload="/bin/kill -SIGUSR2 $MAINPID";
+        KillMode="control-group";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix b/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix
new file mode 100644
index 000000000000..33ff283d5e81
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/kbfs.nix
@@ -0,0 +1,118 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  inherit (config.security) wrapperDir;
+  cfg = config.services.kbfs;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.kbfs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to mount the Keybase filesystem.";
+      };
+
+      enableRedirector = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Keybase root redirector service, allowing
+          any user to access KBFS files via `/keybase`,
+          which will show different contents depending on the requester.
+        '';
+      };
+
+      mountPoint = mkOption {
+        type = types.str;
+        default = "%h/keybase";
+        example = "/keybase";
+        description = lib.mdDoc "Mountpoint for the Keybase filesystem.";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [
+          "-label kbfs"
+          "-mount-type normal"
+        ];
+        description = lib.mdDoc ''
+          Additional flags to pass to the Keybase filesystem on launch.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/kbfs.service
+      systemd.user.services.kbfs = {
+        description = "Keybase File System";
+
+        # Note that the "Requires" directive will cause a unit to be restarted whenever its dependency is restarted.
+        # Do not issue a hard dependency on keybase, because kbfs can reconnect to a restarted service.
+        # Do not issue a hard dependency on keybase-redirector, because it's ok if it fails (e.g., if it is disabled).
+        wants = [ "keybase.service" ] ++ optional cfg.enableRedirector "keybase-redirector.service";
+        path = [ "/run/wrappers" ];
+        unitConfig.ConditionUser = "!@system";
+
+        serviceConfig = {
+          Type = "notify";
+          # Keybase notifies from a forked process
+          EnvironmentFile = [
+            "-%E/keybase/keybase.autogen.env"
+            "-%E/keybase/keybase.env"
+          ];
+          ExecStartPre = [
+            "${pkgs.coreutils}/bin/mkdir -p \"${cfg.mountPoint}\""
+            "-${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\""
+          ];
+          ExecStart = "${pkgs.kbfs}/bin/kbfsfuse ${toString cfg.extraFlags} \"${cfg.mountPoint}\"";
+          ExecStop = "${wrapperDir}/fusermount -uz \"${cfg.mountPoint}\"";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+        wantedBy = [ "default.target" ];
+      };
+
+      services.keybase.enable = true;
+
+      environment.systemPackages = [ pkgs.kbfs ];
+    }
+
+    (mkIf cfg.enableRedirector {
+      security.wrappers."keybase-redirector".source = "${pkgs.kbfs}/bin/redirector";
+
+      systemd.tmpfiles.rules = [ "d /keybase 0755 root root 0" ];
+
+      # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/keybase-redirector.service
+      systemd.user.services.keybase-redirector = {
+        description = "Keybase Root Redirector for KBFS";
+        wants = [ "keybase.service" ];
+        unitConfig.ConditionUser = "!@system";
+
+        serviceConfig = {
+          EnvironmentFile = [
+            "-%E/keybase/keybase.autogen.env"
+            "-%E/keybase/keybase.env"
+          ];
+          # Note: The /keybase mount point is not currently configurable upstream.
+          ExecStart = "${wrapperDir}/keybase-redirector /keybase";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+
+        wantedBy = [ "default.target" ];
+      };
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/kubo.nix b/nixpkgs/nixos/modules/services/network-filesystems/kubo.nix
new file mode 100644
index 000000000000..bc746bed31f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/kubo.nix
@@ -0,0 +1,428 @@
+{ config, lib, pkgs, utils, ... }:
+with lib;
+let
+  cfg = config.services.kubo;
+
+  settingsFormat = pkgs.formats.json {};
+
+  rawDefaultConfig = lib.importJSON (pkgs.runCommand "kubo-default-config" {
+    nativeBuildInputs = [ cfg.package ];
+  } ''
+    export IPFS_PATH="$TMPDIR"
+    ipfs init --empty-repo --profile=${profile}
+    ipfs --offline config show > "$out"
+  '');
+
+  # Remove the PeerID (an attribute of "Identity") of the temporary Kubo repo.
+  # The "Pinning" section contains the "RemoteServices" section, which would prevent
+  # the daemon from starting as that setting can't be changed via ipfs config replace.
+  defaultConfig = builtins.removeAttrs rawDefaultConfig [ "Identity" "Pinning" ];
+
+  customizedConfig = lib.recursiveUpdate defaultConfig cfg.settings;
+
+  configFile = settingsFormat.generate "kubo-config.json" customizedConfig;
+
+  # Create a fake repo containing only the file "api".
+  # $IPFS_PATH will point to this directory instead of the real one.
+  # For some reason the Kubo CLI tools insist on reading the
+  # config file when it exists. But the Kubo daemon sets the file
+  # permissions such that only the ipfs user is allowed to read
+  # this file. This prevents normal users from talking to the daemon.
+  # To work around this terrible design, create a fake repo with no
+  # config file, only an api file and everything should work as expected.
+  fakeKuboRepo = pkgs.writeTextDir "api" ''
+    /unix/run/ipfs.sock
+  '';
+
+  kuboFlags = utils.escapeSystemdExecArgs (
+    optional cfg.autoMount "--mount" ++
+    optional cfg.enableGC "--enable-gc" ++
+    optional (cfg.serviceFdlimit != null) "--manage-fdlimit=false" ++
+    optional (cfg.defaultMode == "offline") "--offline" ++
+    optional (cfg.defaultMode == "norouting") "--routing=none" ++
+    cfg.extraFlags
+  );
+
+  profile =
+    if cfg.localDiscovery
+    then "local-discovery"
+    else "server";
+
+  splitMulitaddr = addrRaw: lib.tail (lib.splitString "/" addrRaw);
+
+  multiaddrsToListenStreams = addrIn:
+    let
+      addrs = if builtins.typeOf addrIn == "list"
+      then addrIn else [ addrIn ];
+      unfilteredResult = map multiaddrToListenStream addrs;
+    in
+      builtins.filter (addr: addr != null) unfilteredResult;
+
+  multiaddrsToListenDatagrams = addrIn:
+    let
+      addrs = if builtins.typeOf addrIn == "list"
+      then addrIn else [ addrIn ];
+      unfilteredResult = map multiaddrToListenDatagram addrs;
+    in
+      builtins.filter (addr: addr != null) unfilteredResult;
+
+  multiaddrToListenStream = addrRaw:
+    let
+      addr = splitMulitaddr addrRaw;
+      s = builtins.elemAt addr;
+    in
+    if s 0 == "ip4" && s 2 == "tcp"
+    then "${s 1}:${s 3}"
+    else if s 0 == "ip6" && s 2 == "tcp"
+    then "[${s 1}]:${s 3}"
+    else if s 0 == "unix"
+    then "/${lib.concatStringsSep "/" (lib.tail addr)}"
+    else null; # not valid for listen stream, skip
+
+  multiaddrToListenDatagram = addrRaw:
+    let
+      addr = splitMulitaddr addrRaw;
+      s = builtins.elemAt addr;
+    in
+    if s 0 == "ip4" && s 2 == "udp"
+    then "${s 1}:${s 3}"
+    else if s 0 == "ip6" && s 2 == "udp"
+    then "[${s 1}]:${s 3}"
+    else null; # not valid for listen datagram, skip
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.kubo = {
+
+      enable = mkEnableOption (lib.mdDoc "Interplanetary File System (WARNING: may cause severe network degradation)");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.kubo;
+        defaultText = literalExpression "pkgs.kubo";
+        description = lib.mdDoc "Which Kubo package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ipfs";
+        description = lib.mdDoc "User under which the Kubo daemon runs";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "ipfs";
+        description = lib.mdDoc "Group under which the Kubo daemon runs";
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default =
+          if versionAtLeast config.system.stateVersion "17.09"
+          then "/var/lib/ipfs"
+          else "/var/lib/ipfs/.ipfs";
+        defaultText = literalExpression ''
+          if versionAtLeast config.system.stateVersion "17.09"
+          then "/var/lib/ipfs"
+          else "/var/lib/ipfs/.ipfs"
+        '';
+        description = lib.mdDoc "The data dir for Kubo";
+      };
+
+      defaultMode = mkOption {
+        type = types.enum [ "online" "offline" "norouting" ];
+        default = "online";
+        description = lib.mdDoc "systemd service that is enabled by default";
+      };
+
+      autoMount = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether Kubo should try to mount /ipfs and /ipns at startup.";
+      };
+
+      autoMigrate = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether Kubo should try to run the fs-repo-migration at startup.";
+      };
+
+      ipfsMountDir = mkOption {
+        type = types.str;
+        default = "/ipfs";
+        description = lib.mdDoc "Where to mount the IPFS namespace to";
+      };
+
+      ipnsMountDir = mkOption {
+        type = types.str;
+        default = "/ipns";
+        description = lib.mdDoc "Where to mount the IPNS namespace to";
+      };
+
+      enableGC = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable automatic garbage collection";
+      };
+
+      emptyRepo = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "If set to false, the repo will be initialized with help files";
+      };
+
+      settings = mkOption {
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            Addresses.API = mkOption {
+              type = types.oneOf [ types.str (types.listOf types.str) ];
+              default = [ ];
+              description = lib.mdDoc ''
+                Multiaddr or array of multiaddrs describing the address to serve the local HTTP API on.
+                In addition to the multiaddrs listed here, the daemon will also listen on a Unix domain socket.
+                To allow the ipfs CLI tools to communicate with the daemon over that socket,
+                add your user to the correct group, e.g. `users.users.alice.extraGroups = [ config.services.kubo.group ];`
+              '';
+            };
+
+            Addresses.Gateway = mkOption {
+              type = types.oneOf [ types.str (types.listOf types.str) ];
+              default = "/ip4/127.0.0.1/tcp/8080";
+              description = lib.mdDoc "Where the IPFS Gateway can be reached";
+            };
+
+            Addresses.Swarm = mkOption {
+              type = types.listOf types.str;
+              default = [
+                "/ip4/0.0.0.0/tcp/4001"
+                "/ip6/::/tcp/4001"
+                "/ip4/0.0.0.0/udp/4001/quic-v1"
+                "/ip4/0.0.0.0/udp/4001/quic-v1/webtransport"
+                "/ip6/::/udp/4001/quic-v1"
+                "/ip6/::/udp/4001/quic-v1/webtransport"
+              ];
+              description = lib.mdDoc "Where Kubo listens for incoming p2p connections";
+            };
+          };
+        };
+        description = lib.mdDoc ''
+          Attrset of daemon configuration.
+          See [https://github.com/ipfs/kubo/blob/master/docs/config.md](https://github.com/ipfs/kubo/blob/master/docs/config.md) for reference.
+          You can't set `Identity` or `Pinning`.
+        '';
+        default = { };
+        example = {
+          Datastore.StorageMax = "100GB";
+          Discovery.MDNS.Enabled = false;
+          Bootstrap = [
+            "/ip4/128.199.219.111/tcp/4001/ipfs/QmSoLSafTMBsPKadTEgaXctDQVcqN88CNLHXMkTNwMKPnu"
+            "/ip4/162.243.248.213/tcp/4001/ipfs/QmSoLueR4xBeUbY9WZ9xGUUxunbKWcrNFTDAadQJmocnWm"
+          ];
+          Swarm.AddrFilters = null;
+        };
+
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Extra flags passed to the Kubo daemon";
+        default = [ ];
+      };
+
+      localDiscovery = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''Whether to enable local discovery for the Kubo daemon.
+          This will allow Kubo to scan ports on your local network. Some hosting services will ban you if you do this.
+        '';
+        default = false;
+      };
+
+      serviceFdlimit = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc "The fdlimit for the Kubo systemd unit or `null` to have the daemon attempt to manage it";
+        example = 64 * 1024;
+      };
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to use socket activation to start Kubo when needed.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !builtins.hasAttr "Identity" cfg.settings;
+        message = ''
+          You can't set services.kubo.settings.Identity because the ``config replace`` subcommand used at startup does not support modifying any of the Identity settings.
+        '';
+      }
+      {
+        assertion = !((builtins.hasAttr "Pinning" cfg.settings) && (builtins.hasAttr "RemoteServices" cfg.settings.Pinning));
+        message = ''
+          You can't set services.kubo.settings.Pinning.RemoteServices because the ``config replace`` subcommand used at startup does not work with it.
+        '';
+      }
+      {
+        assertion = !((lib.versionAtLeast cfg.package.version "0.21") && (builtins.hasAttr "Experimental" cfg.settings) && (builtins.hasAttr "AcceleratedDHTClient" cfg.settings.Experimental));
+        message = ''
+    The `services.kubo.settings.Experimental.AcceleratedDHTClient` option was renamed to `services.kubo.settings.Routing.AcceleratedDHTClient` in Kubo 0.21.
+  '';
+      }
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+    environment.variables.IPFS_PATH = fakeKuboRepo;
+
+    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+
+    programs.fuse = mkIf cfg.autoMount {
+      userAllowOther = true;
+    };
+
+    users.users = mkIf (cfg.user == "ipfs") {
+      ipfs = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        createHome = false;
+        uid = config.ids.uids.ipfs;
+        description = "IPFS daemon user";
+        packages = [
+          pkgs.kubo-migrator
+        ];
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "ipfs") {
+      ipfs.gid = config.ids.gids.ipfs;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${cfg.user} ${cfg.group} - -"
+    ] ++ optionals cfg.autoMount [
+      "d '${cfg.ipfsMountDir}' - ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.ipnsMountDir}' - ${cfg.user} ${cfg.group} - -"
+    ];
+
+    # The hardened systemd unit breaks the fuse-mount function according to documentation in the unit file itself
+    systemd.packages = if cfg.autoMount
+      then [ cfg.package.systemd_unit ]
+      else [ cfg.package.systemd_unit_hardened ];
+
+    services.kubo.settings = mkIf cfg.autoMount {
+      Mounts.FuseAllowOther = lib.mkDefault true;
+      Mounts.IPFS = lib.mkDefault cfg.ipfsMountDir;
+      Mounts.IPNS = lib.mkDefault cfg.ipnsMountDir;
+    };
+
+    systemd.services.ipfs = {
+      path = [ "/run/wrappers" cfg.package ];
+      environment.IPFS_PATH = cfg.dataDir;
+
+      preStart = ''
+        if [[ ! -f "$IPFS_PATH/config" ]]; then
+          ipfs init --empty-repo=${lib.boolToString cfg.emptyRepo}
+        else
+          # After an unclean shutdown this file may exist which will cause the config command to attempt to talk to the daemon. This will hang forever if systemd is holding our sockets open.
+          rm -vf "$IPFS_PATH/api"
+      '' + optionalString cfg.autoMigrate ''
+        ${pkgs.kubo-migrator}/bin/fs-repo-migrations -to '${cfg.package.repoVersion}' -y
+      '' + ''
+        fi
+        ipfs --offline config show |
+          ${pkgs.jq}/bin/jq -s '.[0].Pinning as $Pinning | .[0].Identity as $Identity | .[1] + {$Identity,$Pinning}' - '${configFile}' |
+
+          # This command automatically injects the private key and other secrets from
+          # the old config file back into the new config file.
+          # Unfortunately, it doesn't keep the original `Identity.PeerID`,
+          # so we need `ipfs config show` and jq above.
+          # See https://github.com/ipfs/kubo/issues/8993 for progress on fixing this problem.
+          # Kubo also wants a specific version of the original "Pinning.RemoteServices"
+          # section (redacted by `ipfs config show`), such that that section doesn't
+          # change when the changes are applied. Whyyyyyy.....
+          ipfs --offline config replace -
+      '';
+      postStop = mkIf cfg.autoMount ''
+        # After an unclean shutdown the fuse mounts at cfg.ipnsMountDir and cfg.ipfsMountDir are locked
+        umount --quiet '${cfg.ipnsMountDir}' '${cfg.ipfsMountDir}' || true
+      '';
+      serviceConfig = {
+        ExecStart = [ "" "${cfg.package}/bin/ipfs daemon ${kuboFlags}" ];
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = "";
+        ReadWritePaths = optionals (!cfg.autoMount) [ "" cfg.dataDir ];
+      } // optionalAttrs (cfg.serviceFdlimit != null) { LimitNOFILE = cfg.serviceFdlimit; };
+    } // optionalAttrs (!cfg.startWhenNeeded) {
+      wantedBy = [ "default.target" ];
+    };
+
+    systemd.sockets.ipfs-gateway = {
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream =
+          [ "" ] ++ (multiaddrsToListenStreams cfg.settings.Addresses.Gateway);
+        ListenDatagram =
+          [ "" ] ++ (multiaddrsToListenDatagrams cfg.settings.Addresses.Gateway);
+      };
+    };
+
+    systemd.sockets.ipfs-api = {
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        # We also include "%t/ipfs.sock" because there is no way to put the "%t"
+        # in the multiaddr.
+        ListenStream =
+          [ "" "%t/ipfs.sock" ] ++ (multiaddrsToListenStreams cfg.settings.Addresses.API);
+        SocketMode = "0660";
+        SocketUser = cfg.user;
+        SocketGroup = cfg.group;
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ Luflosi ];
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "ipfs" "enable" ] [ "services" "kubo" "enable" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "package" ] [ "services" "kubo" "package" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "user" ] [ "services" "kubo" "user" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "group" ] [ "services" "kubo" "group" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "dataDir" ] [ "services" "kubo" "dataDir" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "defaultMode" ] [ "services" "kubo" "defaultMode" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "autoMount" ] [ "services" "kubo" "autoMount" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "autoMigrate" ] [ "services" "kubo" "autoMigrate" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipfsMountDir" ] [ "services" "kubo" "ipfsMountDir" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "ipnsMountDir" ] [ "services" "kubo" "ipnsMountDir" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "enableGC" ] [ "services" "kubo" "enableGC" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "emptyRepo" ] [ "services" "kubo" "emptyRepo" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "extraConfig" ] [ "services" "kubo" "settings" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "extraFlags" ] [ "services" "kubo" "extraFlags" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "localDiscovery" ] [ "services" "kubo" "localDiscovery" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "serviceFdlimit" ] [ "services" "kubo" "serviceFdlimit" ])
+    (mkRenamedOptionModule [ "services" "ipfs" "startWhenNeeded" ] [ "services" "kubo" "startWhenNeeded" ])
+    (mkRenamedOptionModule [ "services" "kubo" "extraConfig" ] [ "services" "kubo" "settings" ])
+    (mkRenamedOptionModule [ "services" "kubo" "gatewayAddress" ] [ "services" "kubo" "settings" "Addresses" "Gateway" ])
+    (mkRenamedOptionModule [ "services" "kubo" "apiAddress" ] [ "services" "kubo" "settings" "Addresses" "API" ])
+    (mkRenamedOptionModule [ "services" "kubo" "swarmAddress" ] [ "services" "kubo" "settings" "Addresses" "Swarm" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
new file mode 100644
index 000000000000..8d8486507b77
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.md
@@ -0,0 +1,52 @@
+# Litestream {#module-services-litestream}
+
+[Litestream](https://litestream.io/) is a standalone streaming
+replication tool for SQLite.
+
+## Configuration {#module-services-litestream-configuration}
+
+Litestream service is managed by a dedicated user named `litestream`
+which needs permission to the database file. Here's an example config which gives
+required permissions to access [grafana database](#opt-services.grafana.settings.database.path):
+```
+{ pkgs, ... }:
+{
+  users.users.litestream.extraGroups = [ "grafana" ];
+
+  systemd.services.grafana.serviceConfig.ExecStartPost = "+" + pkgs.writeShellScript "grant-grafana-permissions" ''
+    timeout=10
+
+    while [ ! -f /var/lib/grafana/data/grafana.db ];
+    do
+      if [ "$timeout" == 0 ]; then
+        echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db."
+        exit 1
+      fi
+
+      sleep 1
+
+      ((timeout--))
+    done
+
+    find /var/lib/grafana -type d -exec chmod -v 775 {} \;
+    find /var/lib/grafana -type f -exec chmod -v 660 {} \;
+  '';
+
+  services.litestream = {
+    enable = true;
+
+    environmentFile = "/run/secrets/litestream";
+
+    settings = {
+      dbs = [
+        {
+          path = "/var/lib/grafana/data/grafana.db";
+          replicas = [{
+            url = "s3://mybkt.litestream.io/grafana";
+          }];
+        }
+      ];
+    };
+  };
+}
+```
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.nix b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.nix
new file mode 100644
index 000000000000..6e2ec1ccaa3c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/litestream/default.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.litestream;
+  settingsFormat = pkgs.formats.yaml {};
+in
+{
+  options.services.litestream = {
+    enable = mkEnableOption (lib.mdDoc "litestream");
+
+    package = mkOption {
+      description = lib.mdDoc "Package to use.";
+      default = pkgs.litestream;
+      defaultText = literalExpression "pkgs.litestream";
+      type = types.package;
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        See the [documentation](https://litestream.io/reference/config/).
+      '';
+      type = settingsFormat.type;
+      example = {
+        dbs = [
+          {
+            path = "/var/lib/db1";
+            replicas = [
+              {
+                url = "s3://mybkt.litestream.io/db1";
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/secrets/litestream";
+      description = lib.mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+
+        Secrets may be passed to the service without adding them to the
+        world-readable Nix store, by specifying placeholder variables as
+        the option value in Nix and setting these variables accordingly in the
+        environment file.
+
+        By default, Litestream will perform environment variable expansion
+        within the config file before reading it. Any references to ''$VAR or
+        ''${VAR} formatted variables will be replaced with their environment
+        variable values. If no value is set then it will be replaced with an
+        empty string.
+
+        ```
+          # Content of the environment file
+          LITESTREAM_ACCESS_KEY_ID=AKIAxxxxxxxxxxxxxxxx
+          LITESTREAM_SECRET_ACCESS_KEY=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx/xxxxxxxxx
+        ```
+
+        Note that this file needs to be available on the host on which
+        this exporter is running.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    environment.etc = {
+      "litestream.yml" = {
+        source = settingsFormat.generate "litestream-config.yaml" cfg.settings;
+      };
+    };
+
+    systemd.services.litestream = {
+      description = "Litestream";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = {
+        EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        ExecStart = "${cfg.package}/bin/litestream replicate";
+        Restart = "always";
+        User = "litestream";
+        Group = "litestream";
+      };
+    };
+
+    users.users.litestream = {
+      description = "Litestream user";
+      group = "litestream";
+      isSystemUser = true;
+    };
+    users.groups.litestream = {};
+  };
+
+  meta.doc = ./default.md;
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/moosefs.nix b/nixpkgs/nixos/modules/services/network-filesystems/moosefs.nix
new file mode 100644
index 000000000000..49cbc89d5a91
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/moosefs.nix
@@ -0,0 +1,249 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.moosefs;
+
+  mfsUser = if cfg.runAsUser then "moosefs" else "root";
+
+  settingsFormat = let
+    listSep = " ";
+    allowedTypes = with types; [ bool int float str ];
+    valueToString = val:
+        if isList val then concatStringsSep listSep (map (x: valueToString x) val)
+        else if isBool val then (if val then "1" else "0")
+        else toString val;
+
+    in {
+      type = with types; let
+        valueType = oneOf ([
+          (listOf valueType)
+        ] ++ allowedTypes) // {
+          description = "Flat key-value file";
+        };
+      in attrsOf valueType;
+
+      generate = name: value:
+        pkgs.writeText name ( lib.concatStringsSep "\n" (
+          lib.mapAttrsToList (key: val: "${key} = ${valueToString val}") value ));
+    };
+
+
+  initTool = pkgs.writeShellScriptBin "mfsmaster-init" ''
+    if [ ! -e ${cfg.master.settings.DATA_PATH}/metadata.mfs ]; then
+      cp ${pkgs.moosefs}/var/mfs/metadata.mfs.empty ${cfg.master.settings.DATA_PATH}
+      chmod +w ${cfg.master.settings.DATA_PATH}/metadata.mfs.empty
+      ${pkgs.moosefs}/bin/mfsmaster -a -c ${masterCfg} start
+      ${pkgs.moosefs}/bin/mfsmaster -c ${masterCfg} stop
+      rm ${cfg.master.settings.DATA_PATH}/metadata.mfs.empty
+    fi
+  '';
+
+  # master config file
+  masterCfg = settingsFormat.generate
+    "mfsmaster.cfg" cfg.master.settings;
+
+  # metalogger config file
+  metaloggerCfg = settingsFormat.generate
+    "mfsmetalogger.cfg" cfg.metalogger.settings;
+
+  # chunkserver config file
+  chunkserverCfg = settingsFormat.generate
+    "mfschunkserver.cfg" cfg.chunkserver.settings;
+
+  # generic template for all daemons
+  systemdService = name: extraConfig: configFile: {
+    wantedBy = [ "multi-user.target" ];
+    wants = [ "network-online.target" ];
+    after = [ "network.target" "network-online.target" ];
+
+    serviceConfig = {
+      Type = "forking";
+      ExecStart  = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} start";
+      ExecStop   = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} stop";
+      ExecReload = "${pkgs.moosefs}/bin/mfs${name} -c ${configFile} reload";
+      PIDFile = "${cfg."${name}".settings.DATA_PATH}/.mfs${name}.lock";
+    } // extraConfig;
+  };
+
+in {
+  ###### interface
+
+  options = {
+    services.moosefs = {
+      masterHost = mkOption {
+        type = types.str;
+        default = null;
+        description = lib.mdDoc "IP or DNS name of master host.";
+      };
+
+      runAsUser = mkOption {
+        type = types.bool;
+        default = true;
+        example = true;
+        description = lib.mdDoc "Run daemons as user moosefs instead of root.";
+      };
+
+      client.enable = mkEnableOption (lib.mdDoc "Moosefs client");
+
+      master = {
+        enable = mkOption {
+          type = types.bool;
+          description = lib.mdDoc ''
+            Enable Moosefs master daemon.
+
+            You need to run `mfsmaster-init` on a freshly installed master server to
+            initialize the `DATA_PATH` directory.
+          '';
+          default = false;
+        };
+
+        exports = mkOption {
+          type = with types; listOf str;
+          default = null;
+          description = lib.mdDoc "Paths to export (see mfsexports.cfg).";
+          example = [
+            "* / rw,alldirs,admin,maproot=0:0"
+            "* . rw"
+          ];
+        };
+
+        openFirewall = mkOption {
+          type = types.bool;
+          description = lib.mdDoc "Whether to automatically open the necessary ports in the firewall.";
+          default = false;
+        };
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = lib.mdDoc "Data storage directory.";
+            };
+          };
+
+          description = lib.mdDoc "Contents of config file (mfsmaster.cfg).";
+        };
+      };
+
+      metalogger = {
+        enable = mkEnableOption (lib.mdDoc "Moosefs metalogger daemon");
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = lib.mdDoc "Data storage directory";
+            };
+          };
+
+          description = lib.mdDoc "Contents of metalogger config file (mfsmetalogger.cfg).";
+        };
+      };
+
+      chunkserver = {
+        enable = mkEnableOption (lib.mdDoc "Moosefs chunkserver daemon");
+
+        openFirewall = mkOption {
+          type = types.bool;
+          description = lib.mdDoc "Whether to automatically open the necessary ports in the firewall.";
+          default = false;
+        };
+
+        hdds = mkOption {
+          type = with types; listOf str;
+          default =  null;
+          description = lib.mdDoc "Mount points to be used by chunkserver for storage (see mfshdd.cfg).";
+          example = [ "/mnt/hdd1" ];
+        };
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = settingsFormat.type;
+
+            options.DATA_PATH = mkOption {
+              type = types.str;
+              default = "/var/lib/mfs";
+              description = lib.mdDoc "Directory for lock file.";
+            };
+          };
+
+          description = lib.mdDoc "Contents of chunkserver config file (mfschunkserver.cfg).";
+        };
+      };
+    };
+  };
+
+  ###### implementation
+
+  config =  mkIf ( cfg.client.enable || cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable ) {
+
+    warnings = [ ( mkIf (!cfg.runAsUser) "Running moosefs services as root is not recommended.") ];
+
+    # Service settings
+    services.moosefs = {
+      master.settings = mkIf cfg.master.enable {
+        WORKING_USER = mfsUser;
+        EXPORTS_FILENAME = toString ( pkgs.writeText "mfsexports.cfg"
+          (concatStringsSep "\n" cfg.master.exports));
+      };
+
+      metalogger.settings = mkIf cfg.metalogger.enable {
+        WORKING_USER = mfsUser;
+        MASTER_HOST = cfg.masterHost;
+      };
+
+      chunkserver.settings = mkIf cfg.chunkserver.enable {
+        WORKING_USER = mfsUser;
+        MASTER_HOST = cfg.masterHost;
+        HDD_CONF_FILENAME = toString ( pkgs.writeText "mfshdd.cfg"
+          (concatStringsSep "\n" cfg.chunkserver.hdds));
+      };
+    };
+
+    # Create system user account for daemons
+    users = mkIf ( cfg.runAsUser && ( cfg.master.enable || cfg.metalogger.enable || cfg.chunkserver.enable ) ) {
+      users.moosefs = {
+        isSystemUser = true;
+        description = "moosefs daemon user";
+        group = "moosefs";
+      };
+      groups.moosefs = {};
+    };
+
+    environment.systemPackages =
+      (lib.optional cfg.client.enable pkgs.moosefs) ++
+      (lib.optional cfg.master.enable initTool);
+
+    networking.firewall.allowedTCPPorts =
+      (lib.optionals cfg.master.openFirewall [ 9419 9420 9421 ]) ++
+      (lib.optional cfg.chunkserver.openFirewall 9422);
+
+    # Ensure storage directories exist
+    systemd.tmpfiles.rules =
+         optional cfg.master.enable "d ${cfg.master.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}"
+      ++ optional cfg.metalogger.enable "d ${cfg.metalogger.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}"
+      ++ optional cfg.chunkserver.enable "d ${cfg.chunkserver.settings.DATA_PATH} 0700 ${mfsUser} ${mfsUser}";
+
+    # Service definitions
+    systemd.services.mfs-master = mkIf cfg.master.enable
+    ( systemdService "master" {
+      TimeoutStartSec = 1800;
+      TimeoutStopSec = 1800;
+      Restart = "no";
+    } masterCfg );
+
+    systemd.services.mfs-metalogger = mkIf cfg.metalogger.enable
+      ( systemdService "metalogger" { Restart = "on-abnormal"; } metaloggerCfg );
+
+    systemd.services.mfs-chunkserver = mkIf cfg.chunkserver.enable
+      ( systemdService "chunkserver" { Restart = "on-abnormal"; } chunkserverCfg );
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/netatalk.nix b/nixpkgs/nixos/modules/services/network-filesystems/netatalk.nix
new file mode 100644
index 000000000000..a40f68557c0e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/netatalk.nix
@@ -0,0 +1,95 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.netatalk;
+  settingsFormat = pkgs.formats.ini { };
+  afpConfFile = settingsFormat.generate "afp.conf" cfg.settings;
+in {
+  options = {
+    services.netatalk = {
+
+      enable = mkEnableOption (lib.mdDoc "the Netatalk AFP fileserver");
+
+      port = mkOption {
+        type = types.port;
+        default = 548;
+        description = lib.mdDoc "TCP port to be used for AFP.";
+      };
+
+      settings = mkOption {
+        inherit (settingsFormat) type;
+        default = { };
+        example = {
+          Global = { "uam list" = "uams_guest.so"; };
+          Homes = {
+            path = "afp-data";
+            "basedir regex" = "/home";
+          };
+          example-volume = {
+            path = "/srv/volume";
+            "read only" = true;
+          };
+        };
+        description = lib.mdDoc ''
+          Configuration for Netatalk. See
+          {manpage}`afp.conf(5)`.
+        '';
+      };
+
+      extmap = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          File name extension mappings.
+          See {manpage}`extmap.conf(5)`. for more information.
+        '';
+      };
+
+    };
+  };
+
+  imports = (map (option:
+    mkRemovedOptionModule [ "services" "netatalk" option ]
+    "This option was removed in favor of `services.netatalk.settings`.") [
+      "extraConfig"
+      "homes"
+      "volumes"
+    ]);
+
+  config = mkIf cfg.enable {
+
+    services.netatalk.settings.Global = {
+      "afp port" = toString cfg.port;
+      "extmap file" = "${pkgs.writeText "extmap.conf" cfg.extmap}";
+    };
+
+    systemd.services.netatalk = {
+      description = "Netatalk AFP fileserver for Macintosh clients";
+      unitConfig.Documentation =
+        "man:afp.conf(5) man:netatalk(8) man:afpd(8) man:cnid_metad(8) man:cnid_dbd(8)";
+      after = [ "network.target" "avahi-daemon.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.netatalk ];
+
+      serviceConfig = {
+        Type = "forking";
+        GuessMainPID = "no";
+        PIDFile = "/run/lock/netatalk";
+        ExecStart = "${pkgs.netatalk}/sbin/netatalk -F ${afpConfFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP  $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -TERM $MAINPID";
+        Restart = "always";
+        RestartSec = 1;
+        StateDirectory = [ "netatalk/CNID" ];
+      };
+
+    };
+
+    security.pam.services.netatalk.unixAuth = true;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/nfsd.nix b/nixpkgs/nixos/modules/services/network-filesystems/nfsd.nix
new file mode 100644
index 000000000000..c9e1cbcbbda4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/nfsd.nix
@@ -0,0 +1,173 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nfs.server;
+
+  exports = pkgs.writeText "exports" cfg.exports;
+
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "nfs" "lockdPort" ] [ "services" "nfs" "server" "lockdPort" ])
+    (mkRenamedOptionModule [ "services" "nfs" "statdPort" ] [ "services" "nfs" "server" "statdPort" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.nfs = {
+
+      server = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to enable the kernel's NFS server.
+          '';
+        };
+
+        extraNfsdConfig = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            Extra configuration options for the [nfsd] section of /etc/nfs.conf.
+          '';
+        };
+
+        exports = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            Contents of the /etc/exports file.  See
+            {manpage}`exports(5)` for the format.
+          '';
+        };
+
+        hostName = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Hostname or address on which NFS requests will be accepted.
+            Default is all.  See the {option}`-H` option in
+            {manpage}`nfsd(8)`.
+          '';
+        };
+
+        nproc = mkOption {
+          type = types.int;
+          default = 8;
+          description = lib.mdDoc ''
+            Number of NFS server threads.  Defaults to the recommended value of 8.
+          '';
+        };
+
+        createMountPoints = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Whether to create the mount points in the exports file at startup time.";
+        };
+
+        mountdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4002;
+          description = lib.mdDoc ''
+            Use fixed port for rpc.mountd, useful if server is behind firewall.
+          '';
+        };
+
+        lockdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4001;
+          description = lib.mdDoc ''
+            Use a fixed port for the NFS lock manager kernel module
+            (`lockd/nlockmgr`).  This is useful if the
+            NFS server is behind a firewall.
+          '';
+        };
+
+        statdPort = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          example = 4000;
+          description = lib.mdDoc ''
+            Use a fixed port for {command}`rpc.statd`. This is
+            useful if the NFS server is behind a firewall.
+          '';
+        };
+
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.nfs.extraConfig = ''
+      [nfsd]
+      threads=${toString cfg.nproc}
+      ${optionalString (cfg.hostName != null) "host=${cfg.hostName}"}
+      ${cfg.extraNfsdConfig}
+
+      [mountd]
+      ${optionalString (cfg.mountdPort != null) "port=${toString cfg.mountdPort}"}
+
+      [statd]
+      ${optionalString (cfg.statdPort != null) "port=${toString cfg.statdPort}"}
+
+      [lockd]
+      ${optionalString (cfg.lockdPort != null) ''
+        port=${toString cfg.lockdPort}
+        udp-port=${toString cfg.lockdPort}
+      ''}
+    '';
+
+    services.rpcbind.enable = true;
+
+    boot.supportedFilesystems = [ "nfs" ]; # needed for statd and idmapd
+
+    environment.etc.exports.source = exports;
+
+    systemd.services.nfs-server =
+      { enable = true;
+        wantedBy = [ "multi-user.target" ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs/v4recovery
+          '';
+      };
+
+    systemd.services.nfs-mountd =
+      { enable = true;
+        restartTriggers = [ exports ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs
+
+            ${optionalString cfg.createMountPoints
+              ''
+                # create export directories:
+                # skip comments, take first col which may either be a quoted
+                # "foo bar" or just foo (-> man export)
+                sed '/^#.*/d;s/^"\([^"]*\)".*/\1/;t;s/[ ].*//' ${exports} \
+                | xargs -d '\n' mkdir -p
+              ''
+            }
+          '';
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/openafs/client.nix b/nixpkgs/nixos/modules/services/network-filesystems/openafs/client.nix
new file mode 100644
index 000000000000..bb0fee087e62
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/openafs/client.nix
@@ -0,0 +1,252 @@
+{ config, lib, pkgs, ... }:
+
+# openafsMod, openafsBin, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
+
+let
+  inherit (lib) getBin literalExpression mkOption mkIf optionalString singleton types;
+
+  cfg = config.services.openafsClient;
+
+  cellServDB = pkgs.fetchurl {
+    url = "http://dl.central.org/dl/cellservdb/CellServDB.2018-05-14";
+    sha256 = "1wmjn6mmyy2r8p10nlbdzs4nrqxy8a9pjyrdciy5nmppg4053rk2";
+  };
+
+  clientServDB = pkgs.writeText "client-cellServDB-${cfg.cellName}" (mkCellServDB cfg.cellName cfg.cellServDB);
+
+  afsConfig = pkgs.runCommand "afsconfig" { preferLocalBuild = true; } ''
+    mkdir -p $out
+    echo ${cfg.cellName} > $out/ThisCell
+    cat ${cellServDB} ${clientServDB} > $out/CellServDB
+    echo "${cfg.mountPoint}:${cfg.cache.directory}:${toString cfg.cache.blocks}" > $out/cacheinfo
+  '';
+
+in
+{
+  ###### interface
+
+  options = {
+
+    services.openafsClient = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable the OpenAFS client.";
+      };
+
+      afsdb = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Resolve cells via AFSDB DNS records.";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc "Cell name.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule { options = cellServDBConfig; });
+        description = lib.mdDoc ''
+          This cell's database server records, added to the global
+          CellServDB. See CellServDB(5) man page for syntax. Ignored when
+          `afsdb` is set to `true`.
+        '';
+        example = [
+          { ip = "1.2.3.4"; dnsname = "first.afsdb.server.dns.fqdn.org"; }
+          { ip = "2.3.4.5"; dnsname = "second.afsdb.server.dns.fqdn.org"; }
+        ];
+      };
+
+      cache = {
+        blocks = mkOption {
+          default = 100000;
+          type = types.int;
+          description = lib.mdDoc "Cache size in 1KB blocks.";
+        };
+
+        chunksize = mkOption {
+          default = 0;
+          type = types.ints.between 0 30;
+          description = lib.mdDoc ''
+            Size of each cache chunk given in powers of
+            2. `0` resets the chunk size to its default
+            values (13 (8 KB) for memcache, 18-20 (256 KB to 1 MB) for
+            diskcache). Maximum value is 30. Important performance
+            parameter. Set to higher values when dealing with large files.
+          '';
+        };
+
+        directory = mkOption {
+          default = "/var/cache/openafs";
+          type = types.str;
+          description = lib.mdDoc "Cache directory.";
+        };
+
+        diskless = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Use in-memory cache for diskless machines. Has no real
+            performance benefit anymore.
+          '';
+        };
+      };
+
+      crypt = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable (weak) protocol encryption.";
+      };
+
+      daemons = mkOption {
+        default = 2;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of daemons to serve user requests. Numbers higher than 6
+          usually do no increase performance. Default is sufficient for up
+          to five concurrent users.
+        '';
+      };
+
+      fakestat = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Return fake data on stat() calls. If `true`,
+          always do so. If `false`, only do so for
+          cross-cell mounts (as these are potentially expensive).
+        '';
+      };
+
+      inumcalc = mkOption {
+        default = "compat";
+        type = types.strMatching "compat|md5";
+        description = lib.mdDoc ''
+          Inode calculation method. `compat` is
+          computationally less expensive, but `md5` greatly
+          reduces the likelihood of inode collisions in larger scenarios
+          involving multiple cells mounted into one AFS space.
+        '';
+      };
+
+      mountPoint = mkOption {
+        default = "/afs";
+        type = types.str;
+        description = lib.mdDoc ''
+          Mountpoint of the AFS file tree, conventionally
+          `/afs`. When set to a different value, only
+          cross-cells that use the same value can be accessed.
+        '';
+      };
+
+      packages = {
+        module = mkOption {
+          default = config.boot.kernelPackages.openafs;
+          defaultText = literalExpression "config.boot.kernelPackages.openafs";
+          type = types.package;
+          description = lib.mdDoc "OpenAFS kernel module package. MUST match the userland package!";
+        };
+        programs = mkOption {
+          default = getBin pkgs.openafs;
+          defaultText = literalExpression "getBin pkgs.openafs";
+          type = types.package;
+          description = lib.mdDoc "OpenAFS programs package. MUST match the kernel module package!";
+        };
+      };
+
+      sparse = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Minimal cell list in /afs.";
+      };
+
+      startDisconnected = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Start up in disconnected mode.  You need to execute
+          `fs disco online` (as root) to switch to
+          connected mode. Useful for roaming devices.
+        '';
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.afsdb || cfg.cellServDB != [];
+        message = "You should specify all cell-local database servers in config.services.openafsClient.cellServDB or set config.services.openafsClient.afsdb.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsClient.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ openafsBin ];
+
+    environment.etc = {
+      clientCellServDB = {
+        source = pkgs.runCommand "CellServDB" { preferLocalBuild = true; } ''
+          cat ${cellServDB} ${clientServDB} > $out
+        '';
+        target = "openafs/CellServDB";
+        mode = "0644";
+      };
+      clientCell = {
+        text = ''
+          ${cfg.cellName}
+        '';
+        target = "openafs/ThisCell";
+        mode = "0644";
+      };
+    };
+
+    systemd.services.afsd = {
+      description = "AFS client";
+      wantedBy = [ "multi-user.target" ];
+      after = singleton (if cfg.startDisconnected then  "network.target" else "network-online.target");
+      serviceConfig = { RemainAfterExit = true; };
+      restartIfChanged = false;
+
+      preStart = ''
+        mkdir -p -m 0755 ${cfg.mountPoint}
+        mkdir -m 0700 -p ${cfg.cache.directory}
+        ${pkgs.kmod}/bin/insmod ${openafsMod}/lib/modules/*/extra/openafs/libafs.ko.xz
+        ${openafsBin}/sbin/afsd \
+          -mountdir ${cfg.mountPoint} \
+          -confdir ${afsConfig} \
+          ${optionalString (!cfg.cache.diskless) "-cachedir ${cfg.cache.directory}"} \
+          -blocks ${toString cfg.cache.blocks} \
+          -chunksize ${toString cfg.cache.chunksize} \
+          ${optionalString cfg.cache.diskless "-memcache"} \
+          -inumcalc ${cfg.inumcalc} \
+          ${if cfg.fakestat then "-fakestat-all" else "-fakestat"} \
+          ${if cfg.sparse then "-dynroot-sparse" else "-dynroot"} \
+          ${optionalString cfg.afsdb "-afsdb"}
+        ${openafsBin}/bin/fs setcrypt ${if cfg.crypt then "on" else "off"}
+        ${optionalString cfg.startDisconnected "${openafsBin}/bin/fs discon offline"}
+      '';
+
+      # Doing this in preStop, because after these commands AFS is basically
+      # stopped, so systemd has nothing to do, just noticing it.  If done in
+      # postStop, then we get a hang + kernel oops, because AFS can't be
+      # stopped simply by sending signals to processes.
+      preStop = ''
+        ${pkgs.util-linux}/bin/umount ${cfg.mountPoint}
+        ${openafsBin}/sbin/afsd -shutdown
+        ${pkgs.kmod}/sbin/rmmod libafs
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/openafs/lib.nix b/nixpkgs/nixos/modules/services/network-filesystems/openafs/lib.nix
new file mode 100644
index 000000000000..e5e147a8dc33
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/openafs/lib.nix
@@ -0,0 +1,33 @@
+{ config, lib, ...}:
+
+let
+  inherit (lib) concatStringsSep mkOption types optionalString;
+
+in {
+
+  mkCellServDB = cellName: db: ''
+    >${cellName}
+  '' + (concatStringsSep "\n" (map (dbm: optionalString (dbm.ip != "" && dbm.dnsname != "") "${dbm.ip} #${dbm.dnsname}")
+                                   db))
+     + "\n";
+
+  # CellServDB configuration type
+  cellServDBConfig = {
+    ip = mkOption {
+      type = types.str;
+      default = "";
+      example = "1.2.3.4";
+      description = lib.mdDoc "IP Address of a database server";
+    };
+    dnsname = mkOption {
+      type = types.str;
+      default = "";
+      example = "afs.example.org";
+      description = lib.mdDoc "DNS full-qualified domain name of a database server";
+    };
+  };
+
+  openafsMod = config.services.openafsClient.packages.module;
+  openafsBin = config.services.openafsClient.packages.programs;
+  openafsSrv = config.services.openafsServer.package;
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/openafs/server.nix b/nixpkgs/nixos/modules/services/network-filesystems/openafs/server.nix
new file mode 100644
index 000000000000..fbaa7cfc1929
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/openafs/server.nix
@@ -0,0 +1,319 @@
+{ config, lib, pkgs, ... }:
+
+# openafsBin, openafsSrv, mkCellServDB
+with import ./lib.nix { inherit config lib pkgs; };
+
+let
+  inherit (lib) concatStringsSep literalExpression mkIf mkOption mkEnableOption
+  optionalString types;
+
+  bosConfig = pkgs.writeText "BosConfig" (''
+    restrictmode 1
+    restarttime 16 0 0 0 0
+    checkbintime 3 0 5 0 0
+  '' + (optionalString cfg.roles.database.enable ''
+    bnode simple vlserver 1
+    parm ${openafsSrv}/libexec/openafs/vlserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.vlserverArgs}
+    end
+    bnode simple ptserver 1
+    parm ${openafsSrv}/libexec/openafs/ptserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} ${cfg.roles.database.ptserverArgs}
+    end
+  '') + (optionalString cfg.roles.fileserver.enable ''
+    bnode dafs dafs 1
+    parm ${openafsSrv}/libexec/openafs/dafileserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.fileserverArgs}
+    parm ${openafsSrv}/libexec/openafs/davolserver ${optionalString cfg.dottedPrincipals "-allow-dotted-principals"} -udpsize ${udpSizeStr} ${cfg.roles.fileserver.volserverArgs}
+    parm ${openafsSrv}/libexec/openafs/salvageserver ${cfg.roles.fileserver.salvageserverArgs}
+    parm ${openafsSrv}/libexec/openafs/dasalvager ${cfg.roles.fileserver.salvagerArgs}
+    end
+  '') + (optionalString (cfg.roles.database.enable && cfg.roles.backup.enable && (!cfg.roles.backup.enableFabs)) ''
+    bnode simple buserver 1
+    parm ${openafsSrv}/libexec/openafs/buserver ${cfg.roles.backup.buserverArgs} ${optionalString useBuCellServDB "-cellservdb /etc/openafs/backup/"}
+    end
+  '') + (optionalString (cfg.roles.database.enable &&
+                         cfg.roles.backup.enable &&
+                         cfg.roles.backup.enableFabs) ''
+    bnode simple buserver 1
+    parm ${lib.getBin pkgs.fabs}/bin/fabsys server --config ${fabsConfFile} ${cfg.roles.backup.fabsArgs}
+    end
+  ''));
+
+  netInfo = if (cfg.advertisedAddresses != []) then
+    pkgs.writeText "NetInfo" ((concatStringsSep "\nf " cfg.advertisedAddresses) + "\n")
+  else null;
+
+  buCellServDB = pkgs.writeText "backup-cellServDB-${cfg.cellName}"
+    (mkCellServDB cfg.cellName cfg.roles.backup.cellServDB);
+
+  useBuCellServDB = (cfg.roles.backup.cellServDB != []) && (!cfg.roles.backup.enableFabs);
+
+  cfg = config.services.openafsServer;
+
+  udpSizeStr = toString cfg.udpPacketSize;
+
+  fabsConfFile = pkgs.writeText "fabs.yaml" (builtins.toJSON ({
+    afs = {
+      aklog = cfg.package + "/bin/aklog";
+      cell = cfg.cellName;
+      dumpscan = cfg.package + "/bin/afsdump_scan";
+      fs = cfg.package + "/bin/fs";
+      pts = cfg.package + "/bin/pts";
+      vos = cfg.package + "/bin/vos";
+    };
+    k5start.command = (lib.getBin pkgs.kstart) + "/bin/k5start";
+  } // cfg.roles.backup.fabsExtraConfig));
+
+in {
+
+  options = {
+
+    services.openafsServer = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the OpenAFS server. An OpenAFS server needs a
+          complex setup. So, be aware that enabling this service and setting
+          some options does not give you a turn-key-ready solution. You need
+          at least a running Kerberos 5 setup, as OpenAFS relies on it for
+          authentication. See the Guide "QuickStartUnix" coming with
+          `pkgs.openafs.doc` for complete setup
+          instructions.
+        '';
+      };
+
+      advertisedAddresses = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "List of IP addresses this server is advertised under. See NetInfo(5)";
+      };
+
+      cellName = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc "Cell name, this server will serve.";
+        example = "grand.central.org";
+      };
+
+      cellServDB = mkOption {
+        default = [];
+        type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+        description = lib.mdDoc "Definition of all cell-local database server machines.";
+      };
+
+      package = mkOption {
+        default = pkgs.openafs;
+        defaultText = literalExpression "pkgs.openafs";
+        type = types.package;
+        description = lib.mdDoc "OpenAFS package for the server binaries";
+      };
+
+      roles = {
+        fileserver = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = lib.mdDoc "Fileserver role, serves files and volumes from its local storage.";
+          };
+
+          fileserverArgs = mkOption {
+            default = "-vattachpar 128 -vhashsize 11 -L -rxpck 400 -cb 1000000";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the dafileserver process. See its man page.";
+          };
+
+          volserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the davolserver process. See its man page.";
+            example = "-sync never";
+          };
+
+          salvageserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the salvageserver process. See its man page.";
+            example = "-showlog";
+          };
+
+          salvagerArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the dasalvager process. See its man page.";
+            example = "-showlog -showmounts";
+          };
+        };
+
+        database = {
+          enable = mkOption {
+            default = true;
+            type = types.bool;
+            description = lib.mdDoc ''
+              Database server role, maintains the Volume Location Database,
+              Protection Database (and Backup Database, see
+              `backup` role). There can be multiple
+              servers in the database role for replication, which then need
+              reliable network connection to each other.
+
+              Servers in this role appear in AFSDB DNS records or the
+              CellServDB.
+            '';
+          };
+
+          vlserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the vlserver process. See its man page.";
+            example = "-rxbind";
+          };
+
+          ptserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the ptserver process. See its man page.";
+            example = "-restricted -default_access S---- S-M---";
+          };
+        };
+
+        backup = {
+          enable = mkEnableOption (lib.mdDoc ''
+            the backup server role. When using OpenAFS built-in buserver, use in conjunction with the
+            `database` role to maintain the Backup
+            Database. Normally only used in conjunction with tape storage
+            or IBM's Tivoli Storage Manager.
+
+            For a modern backup server, enable this role and see
+            {option}`enableFabs`
+          '');
+
+          enableFabs = mkEnableOption (lib.mdDoc ''
+            FABS, the flexible AFS backup system. It stores volumes as dump files, relying on other
+            pre-existing backup solutions for handling them.
+          '');
+
+          buserverArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc "Arguments to the buserver process. See its man page.";
+            example = "-p 8";
+          };
+
+          cellServDB = mkOption {
+            default = [];
+            type = with types; listOf (submodule [ { options = cellServDBConfig;} ]);
+            description = lib.mdDoc ''
+              Definition of all cell-local backup database server machines.
+              Use this when your cell uses less backup database servers than
+              other database server machines.
+            '';
+          };
+
+          fabsArgs = mkOption {
+            default = "";
+            type = types.str;
+            description = lib.mdDoc ''
+              Arguments to the fabsys process. See
+              {manpage}`fabsys_server(1)` and
+              {manpage}`fabsys_config(1)`.
+            '';
+          };
+
+          fabsExtraConfig = mkOption {
+            default = {};
+            type = types.attrs;
+            description = lib.mdDoc ''
+              Additional configuration parameters for the FABS backup server.
+            '';
+            example = literalExpression ''
+            {
+              afs.localauth = true;
+              afs.keytab = config.sops.secrets.fabsKeytab.path;
+            }
+            '';
+          };
+        };
+      };
+
+      dottedPrincipals= mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If enabled, allow principal names containing (.) dots. Enabling
+          this has security implications!
+        '';
+      };
+
+      udpPacketSize = mkOption {
+        default = 1310720;
+        type = types.int;
+        description = lib.mdDoc ''
+          UDP packet size to use in Bytes. Higher values can speed up
+          communications. The default of 1 MB is a sufficient in most
+          cases. Make sure to increase the kernel's UDP buffer size
+          accordingly via `net.core(w|r|opt)mem_max`
+          sysctl.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.cellServDB != [];
+        message = "You must specify all cell-local database servers in config.services.openafsServer.cellServDB.";
+      }
+      { assertion = cfg.cellName != "";
+        message = "You must specify the local cell name in config.services.openafsServer.cellName.";
+      }
+    ];
+
+    environment.systemPackages = [ openafsBin ];
+
+    environment.etc = {
+      bosConfig = {
+        source = bosConfig;
+        target = "openafs/BosConfig";
+        mode = "0644";
+      };
+      cellServDB = {
+        text = mkCellServDB cfg.cellName cfg.cellServDB;
+        target = "openafs/server/CellServDB";
+        mode = "0644";
+      };
+      thisCell = {
+        text = cfg.cellName;
+        target = "openafs/server/ThisCell";
+        mode = "0644";
+      };
+      buCellServDB = {
+        enable = useBuCellServDB;
+        text = mkCellServDB cfg.cellName cfg.roles.backup.cellServDB;
+        target = "openafs/backup/CellServDB";
+      };
+    };
+
+    systemd.services = {
+      openafs-server = {
+        description = "OpenAFS server";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        restartIfChanged = false;
+        unitConfig.ConditionPathExists = [
+          "|/etc/openafs/server/KeyFileExt"
+        ];
+        preStart = ''
+          mkdir -m 0755 -p /var/openafs
+          ${optionalString (netInfo != null) "cp ${netInfo} /var/openafs/netInfo"}
+          ${optionalString useBuCellServDB "cp ${buCellServDB}"}
+        '';
+        serviceConfig = {
+          ExecStart = "${openafsBin}/bin/bosserver -nofork";
+          ExecStop = "${openafsBin}/bin/bos shutdown localhost -wait -localauth";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/orangefs/client.nix b/nixpkgs/nixos/modules/services/network-filesystems/orangefs/client.nix
new file mode 100644
index 000000000000..68f23f477af1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/orangefs/client.nix
@@ -0,0 +1,96 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.client;
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.client = {
+      enable = mkEnableOption (lib.mdDoc "OrangeFS client daemon");
+
+      extraOptions = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc "Extra command line options for pvfs2-client.";
+      };
+
+      fileSystems = mkOption {
+        description = lib.mdDoc ''
+          The orangefs file systems to be mounted.
+          This option is preferred over using {option}`fileSystems` directly since
+          the pvfs client service needs to be running for it to be mounted.
+        '';
+
+        example = [{
+          mountPoint = "/orangefs";
+          target = "tcp://server:3334/orangefs";
+        }];
+
+        type = with types; listOf (submodule ({ ... } : {
+          options = {
+
+            mountPoint = mkOption {
+              type = types.str;
+              default = "/orangefs";
+              description = lib.mdDoc "Mount point.";
+            };
+
+            options = mkOption {
+              type = with types; listOf str;
+              default = [];
+              description = lib.mdDoc "Mount options";
+            };
+
+            target = mkOption {
+              type = types.str;
+              example = "tcp://server:3334/orangefs";
+              description = lib.mdDoc "Target URL";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    boot.supportedFilesystems = [ "pvfs2" ];
+    boot.kernelModules = [ "orangefs" ];
+
+    systemd.services.orangefs-client = {
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+
+         ExecStart = ''
+           ${pkgs.orangefs}/bin/pvfs2-client-core \
+              --logtype=syslog ${concatStringsSep " " cfg.extraOptions}
+        '';
+
+        TimeoutStopSec = "120";
+      };
+    };
+
+    systemd.mounts = map (fs: {
+      requires = [ "orangefs-client.service" ];
+      after = [ "orangefs-client.service" ];
+      bindsTo = [ "orangefs-client.service" ];
+      wantedBy = [ "remote-fs.target" ];
+      type = "pvfs2";
+      options = concatStringsSep "," fs.options;
+      what = fs.target;
+      where = fs.mountPoint;
+    }) cfg.fileSystems;
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/orangefs/server.nix b/nixpkgs/nixos/modules/services/network-filesystems/orangefs/server.nix
new file mode 100644
index 000000000000..085b64e4c040
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/orangefs/server.nix
@@ -0,0 +1,225 @@
+{ config, lib, pkgs, ...} :
+
+with lib;
+
+let
+  cfg = config.services.orangefs.server;
+
+  aliases = mapAttrsToList (alias: url: alias) cfg.servers;
+
+  # Maximum handle number is 2^63
+  maxHandle = 9223372036854775806;
+
+  # One range of handles for each meta/data instance
+  handleStep = maxHandle / (length aliases) / 2;
+
+  fileSystems = mapAttrsToList (name: fs: ''
+    <FileSystem>
+      Name ${name}
+      ID ${toString fs.id}
+      RootHandle ${toString fs.rootHandle}
+
+      ${fs.extraConfig}
+
+      <MetaHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </MetaHandleRanges>
+
+      <DataHandleRanges>
+      ${concatStringsSep "\n" (
+          imap0 (i: alias:
+            let
+              begin = i * handleStep + 3 + (length aliases) * handleStep;
+              end = begin + handleStep - 1;
+            in "Range ${alias} ${toString begin}-${toString end}") aliases
+       )}
+      </DataHandleRanges>
+
+      <StorageHints>
+      TroveSyncMeta ${if fs.troveSyncMeta then "yes" else "no"}
+      TroveSyncData ${if fs.troveSyncData then "yes" else "no"}
+      ${fs.extraStorageHints}
+      </StorageHints>
+
+    </FileSystem>
+  '') cfg.fileSystems;
+
+  configFile = ''
+    <Defaults>
+    LogType ${cfg.logType}
+    DataStorageSpace ${cfg.dataStorageSpace}
+    MetaDataStorageSpace ${cfg.metadataStorageSpace}
+
+    BMIModules ${concatStringsSep "," cfg.BMIModules}
+    ${cfg.extraDefaults}
+    </Defaults>
+
+    ${cfg.extraConfig}
+
+    <Aliases>
+    ${concatStringsSep "\n" (mapAttrsToList (alias: url: "Alias ${alias} ${url}") cfg.servers)}
+    </Aliases>
+
+    ${concatStringsSep "\n" fileSystems}
+  '';
+
+in {
+  ###### interface
+
+  options = {
+    services.orangefs.server = {
+      enable = mkEnableOption (lib.mdDoc "OrangeFS server");
+
+      logType = mkOption {
+        type = with types; enum [ "file" "syslog" ];
+        default = "syslog";
+        description = lib.mdDoc "Destination for log messages.";
+      };
+
+      dataStorageSpace = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/data/storage";
+        description = lib.mdDoc "Directory for data storage.";
+      };
+
+      metadataStorageSpace = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/data/meta";
+        description = lib.mdDoc "Directory for meta data storage.";
+      };
+
+      BMIModules = mkOption {
+        type = with types; listOf str;
+        default = [ "bmi_tcp" ];
+        example = [ "bmi_tcp" "bmi_ib"];
+        description = lib.mdDoc "List of BMI modules to load.";
+      };
+
+      extraDefaults = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra config for `<Defaults>` section.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra config for the global section.";
+      };
+
+      servers = mkOption {
+        type = with types; attrsOf types.str;
+        default = {};
+        example = {
+          node1 = "tcp://node1:3334";
+          node2 = "tcp://node2:3334";
+        };
+        description = lib.mdDoc "URLs for storage server including port. The attribute names define the server alias.";
+      };
+
+      fileSystems = mkOption {
+        description = lib.mdDoc ''
+          These options will create the `<FileSystem>` sections of config file.
+        '';
+        default = { orangefs = {}; };
+        example = literalExpression ''
+          {
+            fs1 = {
+              id = 101;
+            };
+
+            fs2 = {
+              id = 102;
+            };
+          }
+        '';
+        type = with types; attrsOf (submodule ({ ... } : {
+          options = {
+            id = mkOption {
+              type = types.int;
+              default = 1;
+              description = lib.mdDoc "File system ID (must be unique within configuration).";
+            };
+
+            rootHandle = mkOption {
+              type = types.int;
+              default = 3;
+              description = lib.mdDoc "File system root ID.";
+            };
+
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = lib.mdDoc "Extra config for `<FileSystem>` section.";
+            };
+
+            troveSyncMeta = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc "Sync meta data.";
+            };
+
+            troveSyncData = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "Sync data.";
+            };
+
+            extraStorageHints = mkOption {
+              type = types.lines;
+              default = "";
+              description = lib.mdDoc "Extra config for `<StorageHints>` section.";
+            };
+          };
+        }));
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.orangefs ];
+
+    # orangefs daemon will run as user
+    users.users.orangefs = {
+      isSystemUser = true;
+      group = "orangefs";
+    };
+    users.groups.orangefs = {};
+
+    # To format the file system the config file is needed.
+    environment.etc."orangefs/server.conf" = {
+      text = configFile;
+      user = "orangefs";
+      group = "orangefs";
+    };
+
+    systemd.services.orangefs-server = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        # Run as "simple" in foreground mode.
+        # This is more reliable
+        ExecStart = ''
+          ${pkgs.orangefs}/bin/pvfs2-server -d \
+            /etc/orangefs/server.conf
+        '';
+        TimeoutStopSec = "120";
+        User = "orangefs";
+        Group = "orangefs";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/rsyncd.nix b/nixpkgs/nixos/modules/services/network-filesystems/rsyncd.nix
new file mode 100644
index 000000000000..c9d7475395fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/rsyncd.nix
@@ -0,0 +1,127 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.rsyncd;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "rsyncd.conf" cfg.settings;
+in {
+  options = {
+    services.rsyncd = {
+
+      enable = mkEnableOption (lib.mdDoc "the rsync daemon");
+
+      port = mkOption {
+        default = 873;
+        type = types.port;
+        description = lib.mdDoc "TCP port the daemon will listen on.";
+      };
+
+      settings = mkOption {
+        inherit (settingsFormat) type;
+        default = { };
+        example = {
+          global = {
+            uid = "nobody";
+            gid = "nobody";
+            "use chroot" = true;
+            "max connections" = 4;
+          };
+          ftp = {
+            path = "/var/ftp/./pub";
+            comment = "whole ftp area";
+          };
+          cvs = {
+            path = "/data/cvs";
+            comment = "CVS repository (requires authentication)";
+            "auth users" = [ "tridge" "susan" ];
+            "secrets file" = "/etc/rsyncd.secrets";
+          };
+        };
+        description = lib.mdDoc ''
+          Configuration for rsyncd. See
+          {manpage}`rsyncd.conf(5)`.
+        '';
+      };
+
+      socketActivated = mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          lib.mdDoc "If enabled Rsync will be socket-activated rather than run persistently.";
+      };
+
+    };
+  };
+
+  imports = (map (option:
+    mkRemovedOptionModule [ "services" "rsyncd" option ]
+    "This option was removed in favor of `services.rsyncd.settings`.") [
+      "address"
+      "extraConfig"
+      "motd"
+      "user"
+      "group"
+    ]);
+
+  config = mkIf cfg.enable {
+
+    services.rsyncd.settings.global.port = toString cfg.port;
+
+    systemd = let
+      serviceConfigSecurity = {
+        ProtectSystem = "full";
+        PrivateDevices = "on";
+        NoNewPrivileges = "on";
+      };
+    in {
+      services.rsync = {
+        enable = !cfg.socketActivated;
+        aliases = [ "rsyncd.service" ];
+
+        description = "fast remote file copy program daemon";
+        after = [ "network.target" ];
+        documentation = [ "man:rsync(1)" "man:rsyncd.conf(5)" ];
+
+        serviceConfig = serviceConfigSecurity // {
+          ExecStart =
+            "${pkgs.rsync}/bin/rsync --daemon --no-detach --config=${configFile}";
+          RestartSec = 1;
+        };
+
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      services."rsync@" = {
+        description = "fast remote file copy program daemon";
+        after = [ "network.target" ];
+
+        serviceConfig = serviceConfigSecurity // {
+          ExecStart = "${pkgs.rsync}/bin/rsync --daemon --config=${configFile}";
+          StandardInput = "socket";
+          StandardOutput = "inherit";
+          StandardError = "journal";
+        };
+      };
+
+      sockets.rsync = {
+        enable = cfg.socketActivated;
+
+        description = "socket for fast remote file copy program daemon";
+        conflicts = [ "rsync.service" ];
+
+        listenStreams = [ (toString cfg.port) ];
+        socketConfig.Accept = true;
+
+        wantedBy = [ "sockets.target" ];
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ ehmry ];
+
+  # TODO: socket activated rsyncd
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/samba-wsdd.nix b/nixpkgs/nixos/modules/services/network-filesystems/samba-wsdd.nix
new file mode 100644
index 000000000000..ad600796217b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/samba-wsdd.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.samba-wsdd;
+
+in {
+  options = {
+    services.samba-wsdd = {
+      enable = mkEnableOption (lib.mdDoc ''
+        Web Services Dynamic Discovery host daemon. This enables (Samba) hosts, like your local NAS device,
+        to be found by Web Service Discovery Clients like Windows.
+      '');
+      interface = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "eth0";
+        description = lib.mdDoc "Interface or address to use.";
+      };
+      hoplimit = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 2;
+        description = lib.mdDoc "Hop limit for multicast packets (default = 1).";
+      };
+      openFirewall = mkOption {
+        description = lib.mdDoc ''
+          Whether to open the required firewall ports in the firewall.
+        '';
+        default = false;
+        type = lib.types.bool;
+      };
+      workgroup = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "HOME";
+        description = lib.mdDoc "Set workgroup name (default WORKGROUP).";
+      };
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "FILESERVER";
+        description = lib.mdDoc "Override (NetBIOS) hostname to be used (default hostname).";
+      };
+      domain = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Set domain name (disables workgroup).";
+      };
+      discovery = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable discovery operation mode.";
+      };
+      listen = mkOption {
+        type = types.str;
+        default = "/run/wsdd/wsdd.sock";
+        description = lib.mdDoc "Listen on path or localhost port in discovery mode.";
+      };
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [ "--shortlog" ];
+        example = [ "--verbose" "--no-http" "--ipv4only" "--no-host" ];
+        description = lib.mdDoc "Additional wsdd options.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.wsdd ];
+
+    systemd.services.samba-wsdd = {
+      description = "Web Services Dynamic Discovery host daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        Type = "simple";
+        ExecStart = ''
+          ${pkgs.wsdd}/bin/wsdd ${optionalString (cfg.interface != null) "--interface '${cfg.interface}'"} \
+                                ${optionalString (cfg.hoplimit != null) "--hoplimit '${toString cfg.hoplimit}'"} \
+                                ${optionalString (cfg.workgroup != null) "--workgroup '${cfg.workgroup}'"} \
+                                ${optionalString (cfg.hostname != null) "--hostname '${cfg.hostname}'"} \
+                                ${optionalString (cfg.domain != null) "--domain '${cfg.domain}'"} \
+                                ${optionalString cfg.discovery "--discovery --listen '${cfg.listen}'"} \
+                                ${escapeShellArgs cfg.extraOptions}
+        '';
+        # Runtime directory and mode
+        RuntimeDirectory = "wsdd";
+        RuntimeDirectoryMode = "0750";
+        # Access write directories
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = false;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@cpu-emulation @debug @mount @obsolete @privileged @resources";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 5357 ];
+      allowedUDPPorts = [ 3702 ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/samba.nix b/nixpkgs/nixos/modules/services/network-filesystems/samba.nix
new file mode 100644
index 000000000000..0b22302c0b6d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/samba.nix
@@ -0,0 +1,252 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  smbToString = x: if builtins.typeOf x == "bool"
+                   then boolToString x
+                   else toString x;
+
+  cfg = config.services.samba;
+
+  samba = cfg.package;
+
+  shareConfig = name:
+    let share = getAttr name cfg.shares; in
+    "[${name}]\n " + (smbToString (
+       map
+         (key: "${key} = ${smbToString (getAttr key share)}\n")
+         (attrNames share)
+    ));
+
+  configFile = pkgs.writeText "smb.conf"
+    (if cfg.configText != null then cfg.configText else
+    ''
+      [global]
+      security = ${cfg.securityType}
+      passwd program = /run/wrappers/bin/passwd %u
+      invalid users = ${smbToString cfg.invalidUsers}
+
+      ${cfg.extraConfig}
+
+      ${smbToString (map shareConfig (attrNames cfg.shares))}
+    '');
+
+  # This may include nss_ldap, needed for samba if it has to use ldap.
+  nssModulesPath = config.system.nssModules.path;
+
+  daemonService = appName: args:
+    { description = "Samba Service Daemon ${appName}";
+
+      after = [ (mkIf (cfg.enableNmbd && "${appName}" == "smbd") "samba-nmbd.service") "network.target" ];
+      requiredBy = [ "samba.target" ];
+      partOf = [ "samba.target" ];
+
+      environment = {
+        LD_LIBRARY_PATH = nssModulesPath;
+        LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive";
+      };
+
+      serviceConfig = {
+        ExecStart = "${samba}/sbin/${appName} --foreground --no-process-group ${args}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitNOFILE = 16384;
+        PIDFile = "/run/${appName}.pid";
+        Type = "notify";
+        NotifyAccess = "all"; #may not do anything...
+      };
+      unitConfig.RequiresMountsFor = "/var/lib/samba";
+
+      restartTriggers = [ configFile ];
+    };
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "samba" "defaultShare" ] "")
+    (mkRemovedOptionModule [ "services" "samba" "syncPasswordsByPam" ] "This option has been removed by upstream, see https://bugzilla.samba.org/show_bug.cgi?id=10669#c10")
+  ];
+
+  ###### interface
+
+  options = {
+
+    # !!! clean up the descriptions.
+
+    services.samba = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Samba, which provides file and print
+          services to Windows clients through the SMB/CIFS protocol.
+
+          ::: {.note}
+          If you use the firewall consider adding the following:
+
+              services.samba.openFirewall = true;
+          :::
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically open the necessary ports in the firewall.
+        '';
+      };
+
+      enableNmbd = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable Samba's nmbd, which replies to NetBIOS over IP name
+          service requests. It also participates in the browsing protocols
+          which make up the Windows "Network Neighborhood" view.
+        '';
+      };
+
+      enableWinbindd = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable Samba's winbindd, which provides a number of services
+          to the Name Service Switch capability found in most modern C libraries,
+          to arbitrary applications via PAM and ntlm_auth and to Samba itself.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.samba;
+        defaultText = literalExpression "pkgs.samba";
+        example = literalExpression "pkgs.samba4Full";
+        description = lib.mdDoc ''
+          Defines which package should be used for the samba server.
+        '';
+      };
+
+      invalidUsers = mkOption {
+        type = types.listOf types.str;
+        default = [ "root" ];
+        description = lib.mdDoc ''
+          List of users who are denied to login via Samba.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional global section and extra section lines go in here.
+        '';
+        example = ''
+          guest account = nobody
+          map to guest = bad user
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Verbatim contents of smb.conf. If null (default), use the
+          autogenerated file from NixOS instead.
+        '';
+      };
+
+      securityType = mkOption {
+        type = types.str;
+        default = "user";
+        description = lib.mdDoc "Samba security type";
+      };
+
+      nsswins = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the WINS NSS (Name Service Switch) plug-in.
+          Enabling it allows applications to resolve WINS/NetBIOS names (a.k.a.
+          Windows machine names) by transparently querying the winbindd daemon.
+        '';
+      };
+
+      shares = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          A set describing shared resources.
+          See {command}`man smb.conf` for options.
+        '';
+        type = types.attrsOf (types.attrsOf types.unspecified);
+        example = literalExpression ''
+          { public =
+            { path = "/srv/public";
+              "read only" = true;
+              browseable = "yes";
+              "guest ok" = "yes";
+              comment = "Public samba share.";
+            };
+          }
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge
+    [ { assertions =
+          [ { assertion = cfg.nsswins -> cfg.enableWinbindd;
+              message   = "If samba.nsswins is enabled, then samba.enableWinbindd must also be enabled";
+            }
+          ];
+        # Always provide a smb.conf to shut up programs like smbclient and smbspool.
+        environment.etc."samba/smb.conf".source = mkOptionDefault (
+          if cfg.enable then configFile
+          else pkgs.writeText "smb-dummy.conf" "# Samba is disabled."
+        );
+      }
+
+      (mkIf cfg.enable {
+
+        system.nssModules = optional cfg.nsswins samba;
+        system.nssDatabases.hosts = optional cfg.nsswins "wins";
+
+        systemd = {
+          targets.samba = {
+            description = "Samba Server";
+            after = [ "network.target" ];
+            wants = [ "network-online.target" ];
+            wantedBy = [ "multi-user.target" ];
+          };
+          # Refer to https://github.com/samba-team/samba/tree/master/packaging/systemd
+          # for correct use with systemd
+          services = {
+            samba-smbd = daemonService "smbd" "";
+            samba-nmbd = mkIf cfg.enableNmbd (daemonService "nmbd" "");
+            samba-winbindd = mkIf cfg.enableWinbindd (daemonService "winbindd" "");
+          };
+          tmpfiles.rules = [
+            "d /var/lock/samba - - - - -"
+            "d /var/log/samba - - - - -"
+            "d /var/cache/samba - - - - -"
+            "d /var/lib/samba/private - - - - -"
+          ];
+        };
+
+        security.pam.services.samba = {};
+        environment.systemPackages = [ cfg.package ];
+
+        networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ 139 445 ];
+        networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ 137 138 ];
+      })
+    ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/tahoe.nix b/nixpkgs/nixos/modules/services/network-filesystems/tahoe.nix
new file mode 100644
index 000000000000..14c0a3d4725f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/tahoe.nix
@@ -0,0 +1,366 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.tahoe;
+in
+  {
+    options.services.tahoe = {
+      introducers = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule {
+          options = {
+            nickname = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                The nickname of this Tahoe introducer.
+              '';
+            };
+            tub.port = mkOption {
+              default = 3458;
+              type = types.port;
+              description = lib.mdDoc ''
+                The port on which the introducer will listen.
+              '';
+            };
+            tub.location = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The external location that the introducer should listen on.
+
+                If specified, the port should be included.
+              '';
+            };
+            package = mkOption {
+              default = pkgs.tahoelafs;
+              defaultText = literalExpression "pkgs.tahoelafs";
+              type = types.package;
+              description = lib.mdDoc ''
+                The package to use for the Tahoe LAFS daemon.
+              '';
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          The Tahoe introducers.
+        '';
+      };
+      nodes = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule {
+          options = {
+            nickname = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                The nickname of this Tahoe node.
+              '';
+            };
+            tub.port = mkOption {
+              default = 3457;
+              type = types.port;
+              description = lib.mdDoc ''
+                The port on which the tub will listen.
+
+                This is the correct setting to tweak if you want Tahoe's storage
+                system to listen on a different port.
+              '';
+            };
+            tub.location = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The external location that the node should listen on.
+
+                This is the setting to tweak if there are multiple interfaces
+                and you want to alter which interface Tahoe is advertising.
+
+                If specified, the port should be included.
+              '';
+            };
+            web.port = mkOption {
+              default = 3456;
+              type = types.port;
+              description = lib.mdDoc ''
+                The port on which the Web server will listen.
+
+                This is the correct setting to tweak if you want Tahoe's WUI to
+                listen on a different port.
+              '';
+            };
+            client.introducer = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The furl for a Tahoe introducer node.
+
+                Like all furls, keep this safe and don't share it.
+              '';
+            };
+            client.helper = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The furl for a Tahoe helper node.
+
+                Like all furls, keep this safe and don't share it.
+              '';
+            };
+            client.shares.needed = mkOption {
+              default = 3;
+              type = types.int;
+              description = lib.mdDoc ''
+                The number of shares required to reconstitute a file.
+              '';
+            };
+            client.shares.happy = mkOption {
+              default = 7;
+              type = types.int;
+              description = lib.mdDoc ''
+                The number of distinct storage nodes required to store
+                a file.
+              '';
+            };
+            client.shares.total = mkOption {
+              default = 10;
+              type = types.int;
+              description = lib.mdDoc ''
+                The number of shares required to store a file.
+              '';
+            };
+            storage.enable = mkEnableOption (lib.mdDoc "storage service");
+            storage.reservedSpace = mkOption {
+              default = "1G";
+              type = types.str;
+              description = lib.mdDoc ''
+                The amount of filesystem space to not use for storage.
+              '';
+            };
+            helper.enable = mkEnableOption (lib.mdDoc "helper service");
+            sftpd.enable = mkEnableOption (lib.mdDoc "SFTP service");
+            sftpd.port = mkOption {
+              default = null;
+              type = types.nullOr types.int;
+              description = lib.mdDoc ''
+                The port on which the SFTP server will listen.
+
+                This is the correct setting to tweak if you want Tahoe's SFTP
+                daemon to listen on a different port.
+              '';
+            };
+            sftpd.hostPublicKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                Path to the SSH host public key.
+              '';
+            };
+            sftpd.hostPrivateKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                Path to the SSH host private key.
+              '';
+            };
+            sftpd.accounts.file = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                Path to the accounts file.
+              '';
+            };
+            sftpd.accounts.url = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                URL of the accounts server.
+              '';
+            };
+            package = mkOption {
+              default = pkgs.tahoelafs;
+              defaultText = literalExpression "pkgs.tahoelafs";
+              type = types.package;
+              description = lib.mdDoc ''
+                The package to use for the Tahoe LAFS daemon.
+              '';
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          The Tahoe nodes.
+        '';
+      };
+    };
+    config = mkMerge [
+      (mkIf (cfg.introducers != {}) {
+        environment = {
+          etc = flip mapAttrs' cfg.introducers (node: settings:
+            nameValuePair "tahoe-lafs/introducer-${node}.cfg" {
+              mode = "0444";
+              text = ''
+                # This configuration is generated by Nix. Edit at your own
+                # peril; here be dragons.
+
+                [node]
+                nickname = ${settings.nickname}
+                tub.port = ${toString settings.tub.port}
+                ${optionalString (settings.tub.location != null)
+                  "tub.location = ${settings.tub.location}"}
+              '';
+            });
+          # Actually require Tahoe, so that we will have it installed.
+          systemPackages = flip mapAttrsToList cfg.introducers (node: settings:
+            settings.package
+          );
+        };
+        # Open up the firewall.
+        # networking.firewall.allowedTCPPorts = flip mapAttrsToList cfg.introducers
+        #   (node: settings: settings.tub.port);
+        systemd.services = flip mapAttrs' cfg.introducers (node: settings:
+          let
+            pidfile = "/run/tahoe.introducer-${node}.pid";
+            # This is a directory, but it has no trailing slash. Tahoe commands
+            # get antsy when there's a trailing slash.
+            nodedir = "/var/db/tahoe-lafs/introducer-${node}";
+          in nameValuePair "tahoe.introducer-${node}" {
+            description = "Tahoe LAFS node ${node}";
+            wantedBy = [ "multi-user.target" ];
+            path = [ settings.package ];
+            restartTriggers = [
+              config.environment.etc."tahoe-lafs/introducer-${node}.cfg".source ];
+            serviceConfig = {
+              Type = "simple";
+              PIDFile = pidfile;
+              # Believe it or not, Tahoe is very brittle about the order of
+              # arguments to $(tahoe run). The node directory must come first,
+              # and arguments which alter Twisted's behavior come afterwards.
+              ExecStart = ''
+                ${settings.package}/bin/tahoe run ${lib.escapeShellArg nodedir} --pidfile=${lib.escapeShellArg pidfile}
+              '';
+            };
+            preStart = ''
+              if [ ! -d ${lib.escapeShellArg nodedir} ]; then
+                mkdir -p /var/db/tahoe-lafs
+                # See https://github.com/NixOS/nixpkgs/issues/25273
+                tahoe create-introducer \
+                  --hostname="${config.networking.hostName}" \
+                  ${lib.escapeShellArg nodedir}
+              fi
+
+              # Tahoe has created a predefined tahoe.cfg which we must now
+              # scribble over.
+              # XXX I thought that a symlink would work here, but it doesn't, so
+              # we must do this on every prestart. Fixes welcome.
+              # rm ${nodedir}/tahoe.cfg
+              # ln -s /etc/tahoe-lafs/introducer-${node}.cfg ${nodedir}/tahoe.cfg
+              cp /etc/tahoe-lafs/introducer-"${node}".cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
+            '';
+          });
+        users.users = flip mapAttrs' cfg.introducers (node: _:
+          nameValuePair "tahoe.introducer-${node}" {
+            description = "Tahoe node user for introducer ${node}";
+            isSystemUser = true;
+          });
+      })
+      (mkIf (cfg.nodes != {}) {
+        environment = {
+          etc = flip mapAttrs' cfg.nodes (node: settings:
+            nameValuePair "tahoe-lafs/${node}.cfg" {
+              mode = "0444";
+              text = ''
+                # This configuration is generated by Nix. Edit at your own
+                # peril; here be dragons.
+
+                [node]
+                nickname = ${settings.nickname}
+                tub.port = ${toString settings.tub.port}
+                ${optionalString (settings.tub.location != null)
+                  "tub.location = ${settings.tub.location}"}
+                # This is a Twisted endpoint. Twisted Web doesn't work on
+                # non-TCP. ~ C.
+                web.port = tcp:${toString settings.web.port}
+
+                [client]
+                ${optionalString (settings.client.introducer != null)
+                  "introducer.furl = ${settings.client.introducer}"}
+                ${optionalString (settings.client.helper != null)
+                  "helper.furl = ${settings.client.helper}"}
+
+                shares.needed = ${toString settings.client.shares.needed}
+                shares.happy = ${toString settings.client.shares.happy}
+                shares.total = ${toString settings.client.shares.total}
+
+                [storage]
+                enabled = ${boolToString settings.storage.enable}
+                reserved_space = ${settings.storage.reservedSpace}
+
+                [helper]
+                enabled = ${boolToString settings.helper.enable}
+
+                [sftpd]
+                enabled = ${boolToString settings.sftpd.enable}
+                ${optionalString (settings.sftpd.port != null)
+                  "port = ${toString settings.sftpd.port}"}
+                ${optionalString (settings.sftpd.hostPublicKeyFile != null)
+                  "host_pubkey_file = ${settings.sftpd.hostPublicKeyFile}"}
+                ${optionalString (settings.sftpd.hostPrivateKeyFile != null)
+                  "host_privkey_file = ${settings.sftpd.hostPrivateKeyFile}"}
+                ${optionalString (settings.sftpd.accounts.file != null)
+                  "accounts.file = ${settings.sftpd.accounts.file}"}
+                ${optionalString (settings.sftpd.accounts.url != null)
+                  "accounts.url = ${settings.sftpd.accounts.url}"}
+              '';
+            });
+          # Actually require Tahoe, so that we will have it installed.
+          systemPackages = flip mapAttrsToList cfg.nodes (node: settings:
+            settings.package
+          );
+        };
+        # Open up the firewall.
+        # networking.firewall.allowedTCPPorts = flip mapAttrsToList cfg.nodes
+        #   (node: settings: settings.tub.port);
+        systemd.services = flip mapAttrs' cfg.nodes (node: settings:
+          let
+            pidfile = "/run/tahoe.${node}.pid";
+            # This is a directory, but it has no trailing slash. Tahoe commands
+            # get antsy when there's a trailing slash.
+            nodedir = "/var/db/tahoe-lafs/${node}";
+          in nameValuePair "tahoe.${node}" {
+            description = "Tahoe LAFS node ${node}";
+            wantedBy = [ "multi-user.target" ];
+            path = [ settings.package ];
+            restartTriggers = [
+              config.environment.etc."tahoe-lafs/${node}.cfg".source ];
+            serviceConfig = {
+              Type = "simple";
+              PIDFile = pidfile;
+              # Believe it or not, Tahoe is very brittle about the order of
+              # arguments to $(tahoe run). The node directory must come first,
+              # and arguments which alter Twisted's behavior come afterwards.
+              ExecStart = ''
+                ${settings.package}/bin/tahoe run ${lib.escapeShellArg nodedir} --pidfile=${lib.escapeShellArg pidfile}
+              '';
+            };
+            preStart = ''
+              if [ ! -d ${lib.escapeShellArg nodedir} ]; then
+                mkdir -p /var/db/tahoe-lafs
+                tahoe create-node --hostname=localhost ${lib.escapeShellArg nodedir}
+              fi
+
+              # Tahoe has created a predefined tahoe.cfg which we must now
+              # scribble over.
+              # XXX I thought that a symlink would work here, but it doesn't, so
+              # we must do this on every prestart. Fixes welcome.
+              # rm ${nodedir}/tahoe.cfg
+              # ln -s /etc/tahoe-lafs/${lib.escapeShellArg node}.cfg ${nodedir}/tahoe.cfg
+              cp /etc/tahoe-lafs/${lib.escapeShellArg node}.cfg ${lib.escapeShellArg nodedir}/tahoe.cfg
+            '';
+          });
+        users.users = flip mapAttrs' cfg.nodes (node: _:
+          nameValuePair "tahoe.${node}" {
+            description = "Tahoe node user for node ${node}";
+            isSystemUser = true;
+          });
+      })
+    ];
+  }
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/u9fs.nix b/nixpkgs/nixos/modules/services/network-filesystems/u9fs.nix
new file mode 100644
index 000000000000..d6968b2cb826
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/u9fs.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.u9fs;
+in
+{
+
+  options = {
+
+    services.u9fs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run the u9fs 9P server for Unix.";
+      };
+
+      listenStreams = mkOption {
+        type = types.listOf types.str;
+        default = [ "564" ];
+        example = [ "192.168.16.1:564" ];
+        description = lib.mdDoc ''
+          Sockets to listen for clients on.
+          See {command}`man 5 systemd.socket` for socket syntax.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nobody";
+        description =
+          lib.mdDoc "User to run u9fs under.";
+      };
+
+      extraArgs = mkOption {
+        type = types.str;
+        default = "";
+        example = "-a none";
+        description =
+          lib.mdDoc ''
+            Extra arguments to pass on invocation,
+            see {command}`man 4 u9fs`
+          '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      sockets.u9fs = {
+        description = "U9fs Listening Socket";
+        wantedBy = [ "sockets.target" ];
+        after = [ "network.target" ];
+        inherit (cfg) listenStreams;
+        socketConfig.Accept = "yes";
+      };
+      services."u9fs@" = {
+        description = "9P Protocol Server";
+        reloadIfChanged = true;
+        requires = [ "u9fs.socket" ];
+        serviceConfig =
+          { ExecStart = "-${pkgs.u9fs}/bin/u9fs ${cfg.extraArgs}";
+            StandardInput = "socket";
+            StandardError = "journal";
+            User = cfg.user;
+            AmbientCapabilities = "cap_setuid cap_setgid";
+          };
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/webdav-server-rs.nix b/nixpkgs/nixos/modules/services/network-filesystems/webdav-server-rs.nix
new file mode 100644
index 000000000000..34e717025e64
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/webdav-server-rs.nix
@@ -0,0 +1,150 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.webdav-server-rs;
+  format = pkgs.formats.toml { };
+  settings = recursiveUpdate
+    {
+      server.uid = config.users.users."${cfg.user}".uid;
+      server.gid = config.users.groups."${cfg.group}".gid;
+    }
+    cfg.settings;
+in
+{
+  options = {
+    services.webdav-server-rs = {
+      enable = mkEnableOption (lib.mdDoc "WebDAV server");
+
+      user = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = lib.mdDoc "User to run under when setuid is not enabled.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = lib.mdDoc "Group to run under when setuid is not enabled.";
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable debug mode.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = lib.mdDoc ''
+          Attrset that is converted and passed as config file. Available
+          options can be found at
+          [here](https://github.com/miquels/webdav-server-rs/blob/master/webdav-server.toml).
+        '';
+        example = literalExpression ''
+          {
+            server.listen = [ "0.0.0.0:4918" "[::]:4918" ];
+            accounts = {
+              auth-type = "htpasswd.default";
+              acct-type = "unix";
+            };
+            htpasswd.default = {
+              htpasswd = "/etc/htpasswd";
+            };
+            location = [
+              {
+                route = [ "/public/*path" ];
+                directory = "/srv/public";
+                handler = "filesystem";
+                methods = [ "webdav-ro" ];
+                autoindex = true;
+                auth = "false";
+              }
+              {
+                route = [ "/user/:user/*path" ];
+                directory = "~";
+                handler = "filesystem";
+                methods = [ "webdav-rw" ];
+                autoindex = true;
+                auth = "true";
+                setuid = true;
+              }
+            ];
+          }
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = format.generate "webdav-server.toml" settings;
+        defaultText = "Config file generated from services.webdav-server-rs.settings";
+        description = lib.mdDoc ''
+          Path to config file. If this option is set, it will override any
+          configuration done in services.webdav-server-rs.settings.
+        '';
+        example = "/etc/webdav-server.toml";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = hasAttr cfg.user config.users.users && config.users.users."${cfg.user}".uid != null;
+        message = "users.users.${cfg.user} and users.users.${cfg.user}.uid must be defined.";
+      }
+      {
+        assertion = hasAttr cfg.group config.users.groups && config.users.groups."${cfg.group}".gid != null;
+        message = "users.groups.${cfg.group} and users.groups.${cfg.group}.gid must be defined.";
+      }
+    ];
+
+    users.users = optionalAttrs (cfg.user == "webdav") {
+      webdav = {
+        description = "WebDAV user";
+        group = cfg.group;
+        uid = config.ids.uids.webdav;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "webdav") {
+      webdav.gid = config.ids.gids.webdav;
+    };
+
+    systemd.services.webdav-server-rs = {
+      description = "WebDAV server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.webdav-server-rs}/bin/webdav-server ${lib.optionalString cfg.debug "--debug"} -c ${cfg.configFile}";
+
+        CapabilityBoundingSet = [
+          "CAP_SETUID"
+          "CAP_SETGID"
+        ];
+
+        NoExecPaths = [ "/" ];
+        ExecPaths = [ "/nix/store" ];
+
+        # This program actively detects if it is running in root user account
+        # when it starts and uses root privilege to switch process uid to
+        # respective unix user when a user logs in.  Maybe we can enable
+        # DynamicUser in the future when it's able to detect CAP_SETUID and
+        # CAP_SETGID capabilities.
+
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = true;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pmy ];
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/webdav.nix b/nixpkgs/nixos/modules/services/network-filesystems/webdav.nix
new file mode 100644
index 000000000000..a384e58c96bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/webdav.nix
@@ -0,0 +1,105 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.webdav;
+  format = pkgs.formats.yaml { };
+in
+{
+  options = {
+    services.webdav = {
+      enable = mkEnableOption (lib.mdDoc "WebDAV server");
+
+      user = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = lib.mdDoc "User account under which WebDAV runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "webdav";
+        description = lib.mdDoc "Group under which WebDAV runs.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = lib.mdDoc ''
+          Attrset that is converted and passed as config file. Available options
+          can be found at
+          [here](https://github.com/hacdias/webdav).
+
+          This program supports reading username and password configuration
+          from environment variables, so it's strongly recommended to store
+          username and password in a separate
+          [EnvironmentFile](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#EnvironmentFile=).
+          This prevents adding secrets to the world-readable Nix store.
+        '';
+        example = literalExpression ''
+          {
+              address = "0.0.0.0";
+              port = 8080;
+              scope = "/srv/public";
+              modify = true;
+              auth = true;
+              users = [
+                {
+                  username = "{env}ENV_USERNAME";
+                  password = "{env}ENV_PASSWORD";
+                }
+              ];
+          }
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = format.generate "webdav.yaml" cfg.settings;
+        defaultText = "Config file generated from services.webdav.settings";
+        description = lib.mdDoc ''
+          Path to config file. If this option is set, it will override any
+          configuration done in options.services.webdav.settings.
+        '';
+        example = "/etc/webdav/config.yaml";
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Environment file as defined in {manpage}`systemd.exec(5)`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users = mkIf (cfg.user == "webdav") {
+      webdav = {
+        description = "WebDAV daemon user";
+        group = cfg.group;
+        uid = config.ids.uids.webdav;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "webdav") {
+      webdav.gid = config.ids.gids.webdav;
+    };
+
+    systemd.services.webdav = {
+      description = "WebDAV server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.webdav}/bin/webdav -c ${cfg.configFile}";
+        Restart = "on-failure";
+        User = cfg.user;
+        Group = cfg.group;
+        EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ pmy ];
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/xtreemfs.nix b/nixpkgs/nixos/modules/services/network-filesystems/xtreemfs.nix
new file mode 100644
index 000000000000..866661cf4e6f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/xtreemfs.nix
@@ -0,0 +1,495 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xtreemfs;
+
+  xtreemfs = pkgs.xtreemfs;
+
+  home = cfg.homeDir;
+
+  startupScript = class: configPath: pkgs.writeScript "xtreemfs-osd.sh" ''
+    #! ${pkgs.runtimeShell}
+    JAVA_HOME="${pkgs.jdk}"
+    JAVADIR="${xtreemfs}/share/java"
+    JAVA_CALL="$JAVA_HOME/bin/java -ea -cp $JAVADIR/XtreemFS.jar:$JAVADIR/BabuDB.jar:$JAVADIR/Flease.jar:$JAVADIR/protobuf-java-2.5.0.jar:$JAVADIR/Foundation.jar:$JAVADIR/jdmkrt.jar:$JAVADIR/jdmktk.jar:$JAVADIR/commons-codec-1.3.jar"
+    $JAVA_CALL ${class} ${configPath}
+  '';
+
+  dirReplicationConfig = pkgs.writeText "xtreemfs-dir-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-dir
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.dir.replication.extraConfig}
+  '';
+
+  dirConfig = pkgs.writeText "xtreemfs-dir-config.properties" ''
+    uuid = ${cfg.dir.uuid}
+    listen.port = ${toString cfg.dir.port}
+    ${optionalString (cfg.dir.address != "") "listen.address = ${cfg.dir.address}"}
+    http_port = ${toString cfg.dir.httpPort}
+    babudb.baseDir = ${home}/dir/database
+    babudb.logDir = ${home}/dir/db-log
+    babudb.sync = ${if cfg.dir.replication.enable then "FDATASYNC" else cfg.dir.syncMode}
+
+    ${optionalString cfg.dir.replication.enable "babudb.plugin.0 = ${dirReplicationConfig}"}
+
+    ${cfg.dir.extraConfig}
+  '';
+
+  mrcReplicationConfig = pkgs.writeText "xtreemfs-mrc-replication-plugin.properties" ''
+    babudb.repl.backupDir = ${home}/server-repl-mrc
+    plugin.jar = ${xtreemfs}/share/java/BabuDB_replication_plugin.jar
+    babudb.repl.dependency.0 = ${xtreemfs}/share/java/Flease.jar
+
+    ${cfg.mrc.replication.extraConfig}
+  '';
+
+  mrcConfig = pkgs.writeText "xtreemfs-mrc-config.properties" ''
+    uuid = ${cfg.mrc.uuid}
+    listen.port = ${toString cfg.mrc.port}
+    ${optionalString (cfg.mrc.address != "") "listen.address = ${cfg.mrc.address}"}
+    http_port = ${toString cfg.mrc.httpPort}
+    babudb.baseDir = ${home}/mrc/database
+    babudb.logDir = ${home}/mrc/db-log
+    babudb.sync = ${if cfg.mrc.replication.enable then "FDATASYNC" else cfg.mrc.syncMode}
+
+    ${optionalString cfg.mrc.replication.enable "babudb.plugin.0 = ${mrcReplicationConfig}"}
+
+    ${cfg.mrc.extraConfig}
+  '';
+
+  osdConfig = pkgs.writeText "xtreemfs-osd-config.properties" ''
+    uuid = ${cfg.osd.uuid}
+    listen.port = ${toString cfg.osd.port}
+    ${optionalString (cfg.osd.address != "") "listen.address = ${cfg.osd.address}"}
+    http_port = ${toString cfg.osd.httpPort}
+    object_dir = ${home}/osd/
+
+    ${cfg.osd.extraConfig}
+  '';
+
+  optionalDir = optionals cfg.dir.enable ["xtreemfs-dir.service"];
+
+  systemdOptionalDependencies = {
+    after = [ "network.target" ] ++ optionalDir;
+    wantedBy = [ "multi-user.target" ] ++ optionalDir;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xtreemfs = {
+
+      enable = mkEnableOption (lib.mdDoc "XtreemFS");
+
+      homeDir = mkOption {
+        type = types.path;
+        default = "/var/lib/xtreemfs";
+        description = lib.mdDoc ''
+          XtreemFS home dir for the xtreemfs user.
+        '';
+      };
+
+      dir = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable XtreemFS DIR service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e40";
+          type = types.str;
+          description = lib.mdDoc ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32638;
+          type = types.port;
+          description = lib.mdDoc ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          type = types.str;
+          example = "127.0.0.1";
+          default = "";
+          description = lib.mdDoc ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30638;
+          type = types.port;
+          description = lib.mdDoc ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          type = types.enum [ "ASYNC" "SYNC_WRITE_METADATA" "SYNC_WRITE" "FDATASYNC" "FSYNC" ];
+          default = "FSYNC";
+          example = "FDATASYNC";
+          description = lib.mdDoc ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.dir.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/dir.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = lib.mdDoc ''
+            Configuration of XtreemFS DIR service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: https://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption (lib.mdDoc "XtreemFS DIR replication plugin");
+          extraConfig = mkOption {
+            type = types.lines;
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35676
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35676
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35676
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = lib.mdDoc ''
+              Configuration of XtreemFS DIR replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: https://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      mrc = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable XtreemFS MRC service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e41";
+          type = types.str;
+          description = lib.mdDoc ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32636;
+          type = types.port;
+          description = lib.mdDoc ''
+            The port to listen on for incoming connections (TCP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30636;
+          type = types.port;
+          description = lib.mdDoc ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        syncMode = mkOption {
+          default = "FSYNC";
+          type = types.enum [ "ASYNC" "SYNC_WRITE_METADATA" "SYNC_WRITE" "FDATASYNC" "FSYNC" ];
+          example = "FDATASYNC";
+          description = lib.mdDoc ''
+            The sync mode influences how operations are committed to the disk
+            log before the operation is acknowledged to the caller.
+
+            -ASYNC mode the writes to the disk log are buffered in memory by the operating system. This is the fastest mode but will lead to data loss in case of a crash, kernel panic or power failure.
+            -SYNC_WRITE_METADATA opens the file with O_SYNC, the system will not buffer any writes. The operation will be acknowledged when data has been safely written to disk. This mode is slow but offers maximum data safety. However, BabuDB cannot influence the disk drive caches, this depends on the OS and hard disk model.
+            -SYNC_WRITE similar to SYNC_WRITE_METADATA but opens file with O_DSYNC which means that only the data is commit to disk. This can lead to some data loss depending on the implementation of the underlying file system. Linux does not implement this mode.
+            -FDATASYNC is similar to SYNC_WRITE but opens the file in asynchronous mode and calls fdatasync() after writing the data to disk.
+            -FSYNC is similar to SYNC_WRITE_METADATA but opens the file in asynchronous mode and calls fsync() after writing the data to disk.
+
+            For best throughput use ASYNC, for maximum data safety use FSYNC.
+
+            (If xtreemfs.mrc.replication.enable is true then FDATASYNC is forced)
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          example = ''
+            osd_check_interval = 300
+            no_atime = true
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            authentication_provider = org.xtreemfs.common.auth.NullAuthProvider
+
+            # shared secret between the MRC and all OSDs
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is enabled
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.protocol = tlsv12
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/mrc.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = lib.mdDoc ''
+            Configuration of XtreemFS MRC service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: https://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+        replication = {
+          enable = mkEnableOption (lib.mdDoc "XtreemFS MRC replication plugin");
+          extraConfig = mkOption {
+            type = types.lines;
+            example = ''
+              # participants of the replication including this replica
+              babudb.repl.participant.0 = 192.168.0.10
+              babudb.repl.participant.0.port = 35678
+              babudb.repl.participant.1 = 192.168.0.11
+              babudb.repl.participant.1.port = 35678
+              babudb.repl.participant.2 = 192.168.0.12
+              babudb.repl.participant.2.port = 35678
+
+              # number of servers that at least have to be up to date
+              # To have a fault-tolerant system, this value has to be set to the
+              # majority of nodes i.e., if you have three replicas, set this to 2
+              # Please note that a setup with two nodes provides no fault-tolerance.
+              babudb.repl.sync.n = 2
+
+              # specify whether SSL is required
+              babudb.ssl.enabled = true
+
+              babudb.ssl.protocol = tlsv12
+
+              # server credentials for SSL handshakes
+              babudb.ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+              babudb.ssl.service_creds.pw = passphrase
+              babudb.ssl.service_creds.container = pkcs12
+
+              # trusted certificates for SSL handshakes
+              babudb.ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+              babudb.ssl.trusted_certs.pw = jks_passphrase
+              babudb.ssl.trusted_certs.container = jks
+
+              babudb.ssl.authenticationWithoutEncryption = false
+            '';
+            description = lib.mdDoc ''
+              Configuration of XtreemFS MRC replication plugin.
+              WARNING: configuration is saved as plaintext inside nix store.
+              For more options: https://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+            '';
+          };
+        };
+      };
+
+      osd = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable XtreemFS OSD service.
+          '';
+        };
+
+        uuid = mkOption {
+          example = "eacb6bab-f444-4ebf-a06a-3f72d7465e42";
+          type = types.str;
+          description = lib.mdDoc ''
+            Must be set to a unique identifier, preferably a UUID according to
+            RFC 4122. UUIDs can be generated with `uuidgen` command, found in
+            the `util-linux` package.
+          '';
+        };
+        port = mkOption {
+          default = 32640;
+          type = types.port;
+          description = lib.mdDoc ''
+            The port to listen on for incoming connections (TCP and UDP).
+          '';
+        };
+        address = mkOption {
+          example = "127.0.0.1";
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            If specified, it defines the interface to listen on. If not
+            specified, the service will listen on all interfaces (any).
+          '';
+        };
+        httpPort = mkOption {
+          default = 30640;
+          type = types.port;
+          description = lib.mdDoc ''
+            Specifies the listen port for the HTTP service that returns the
+            status page.
+          '';
+        };
+        extraConfig = mkOption {
+          type = types.lines;
+          example = ''
+            local_clock_renewal = 0
+            remote_time_sync = 30000
+            report_free_space = true
+            capability_secret = iNG8UuQJrJ6XVDTe
+
+            dir_service.host = 192.168.0.10
+            dir_service.port = 32638
+
+            # if replication is used
+            dir_service.1.host = 192.168.0.11
+            dir_service.1.port = 32638
+            dir_service.2.host = 192.168.0.12
+            dir_service.2.port = 32638
+
+            # specify whether SSL is required
+            ssl.enabled = true
+            ssl.service_creds.pw = passphrase
+            ssl.service_creds.container = pkcs12
+            ssl.service_creds = /etc/xos/xtreemfs/truststore/certs/osd.p12
+            ssl.trusted_certs = /etc/xos/xtreemfs/truststore/certs/trusted.jks
+            ssl.trusted_certs.pw = jks_passphrase
+            ssl.trusted_certs.container = jks
+          '';
+          description = lib.mdDoc ''
+            Configuration of XtreemFS OSD service.
+            WARNING: configuration is saved as plaintext inside nix store.
+            For more options: https://www.xtreemfs.org/xtfs-guide-1.5.1/index.html
+          '';
+        };
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ xtreemfs ];
+
+    users.users.xtreemfs =
+      { uid = config.ids.uids.xtreemfs;
+        description = "XtreemFS user";
+        createHome = true;
+        home = home;
+      };
+
+    users.groups.xtreemfs =
+      { gid = config.ids.gids.xtreemfs;
+      };
+
+    systemd.services.xtreemfs-dir = mkIf cfg.dir.enable {
+      description = "XtreemFS-DIR Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.dir.DIR" dirConfig}";
+      };
+    };
+
+    systemd.services.xtreemfs-mrc = mkIf cfg.mrc.enable ({
+      description = "XtreemFS-MRC Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.mrc.MRC" mrcConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+    systemd.services.xtreemfs-osd = mkIf cfg.osd.enable ({
+      description = "XtreemFS-OSD Server";
+      serviceConfig = {
+        User = "xtreemfs";
+        ExecStart = "${startupScript "org.xtreemfs.osd.OSD" osdConfig}";
+      };
+    } // systemdOptionalDependencies);
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/network-filesystems/yandex-disk.nix b/nixpkgs/nixos/modules/services/network-filesystems/yandex-disk.nix
new file mode 100644
index 000000000000..1078df0bed25
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/network-filesystems/yandex-disk.nix
@@ -0,0 +1,116 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.yandex-disk;
+
+  dir = "/var/lib/yandex-disk";
+
+  u = if cfg.user != null then cfg.user else "yandexdisk";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.yandex-disk = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Yandex-disk client. See https://disk.yandex.ru/
+        '';
+      };
+
+      username = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Your yandex.com login name.
+        '';
+      };
+
+      password = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Your yandex.com password. Warning: it will be world-readable in /nix/store.
+        '';
+      };
+
+      user = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The user the yandex-disk daemon should run as.
+        '';
+      };
+
+      directory = mkOption {
+        type = types.path;
+        default = "/home/Yandex.Disk";
+        description = lib.mdDoc "The directory to use for Yandex.Disk storage";
+      };
+
+      excludes = mkOption {
+        default = "";
+        type = types.commas;
+        example = "data,backup";
+        description = lib.mdDoc ''
+          Comma-separated list of directories which are excluded from synchronization.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = mkIf (cfg.user == null) [ {
+      name = u;
+      uid = config.ids.uids.yandexdisk;
+      group = "nogroup";
+      home = dir;
+    } ];
+
+    systemd.services.yandex-disk = {
+      description = "Yandex-disk server";
+
+      after = [ "network.target" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      # FIXME: have to specify ${directory} here as well
+      unitConfig.RequiresMountsFor = dir;
+
+      script = ''
+        mkdir -p -m 700 ${dir}
+        chown ${u} ${dir}
+
+        if ! test -d "${cfg.directory}" ; then
+          (mkdir -p -m 755 ${cfg.directory} && chown ${u} ${cfg.directory}) ||
+            exit 1
+        fi
+
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
+          -c '${pkgs.yandex-disk}/bin/yandex-disk token -p ${cfg.password} ${cfg.username} ${dir}/token'
+
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${u} \
+          -c '${pkgs.yandex-disk}/bin/yandex-disk start --no-daemon -a ${dir}/token -d ${cfg.directory} --exclude-dirs=${cfg.excludes}'
+      '';
+
+    };
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/3proxy.nix b/nixpkgs/nixos/modules/services/networking/3proxy.nix
new file mode 100644
index 000000000000..ef695a7f49fa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/3proxy.nix
@@ -0,0 +1,381 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  pkg = pkgs._3proxy;
+  cfg = config.services._3proxy;
+  optionalList = list: if list == [ ] then "*" else concatMapStringsSep "," toString list;
+in {
+  options.services._3proxy = {
+    enable = mkEnableOption (lib.mdDoc "3proxy");
+    confFile = mkOption {
+      type = types.path;
+      example = "/var/lib/3proxy/3proxy.conf";
+      description = lib.mdDoc ''
+        Ignore all other 3proxy options and load configuration from this file.
+      '';
+    };
+    usersFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/var/lib/3proxy/3proxy.passwd";
+      description = lib.mdDoc ''
+        Load users and passwords from this file.
+
+        Example users file with plain-text passwords:
+
+        ```
+          test1:CL:password1
+          test2:CL:password2
+        ```
+
+        Example users file with md5-crypted passwords:
+
+        ```
+          test1:CR:$1$tFkisVd2$1GA8JXkRmTXdLDytM/i3a1
+          test2:CR:$1$rkpibm5J$Aq1.9VtYAn0JrqZ8M.1ME.
+        ```
+
+        You can generate md5-crypted passwords via https://unix4lyfe.org/crypt/
+        Note that htpasswd tool generates incompatible md5-crypted passwords.
+        Consult [documentation](https://github.com/z3APA3A/3proxy/wiki/How-To-%28incomplete%29#USERS) for more information.
+      '';
+    };
+    services = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          type = mkOption {
+            type = types.enum [
+              "proxy"
+              "socks"
+              "pop3p"
+              "ftppr"
+              "admin"
+              "dnspr"
+              "tcppm"
+              "udppm"
+            ];
+            example = "proxy";
+            description = lib.mdDoc ''
+              Service type. The following values are valid:
+
+              - `"proxy"`: HTTP/HTTPS proxy (default port 3128).
+              - `"socks"`: SOCKS 4/4.5/5 proxy (default port 1080).
+              - `"pop3p"`: POP3 proxy (default port 110).
+              - `"ftppr"`: FTP proxy (default port 21).
+              - `"admin"`: Web interface (default port 80).
+              - `"dnspr"`: Caching DNS proxy (default port 53).
+              - `"tcppm"`: TCP portmapper.
+              - `"udppm"`: UDP portmapper.
+            '';
+          };
+          bindAddress = mkOption {
+            type = types.str;
+            default = "[::]";
+            example = "127.0.0.1";
+            description = lib.mdDoc ''
+              Address used for service.
+            '';
+          };
+          bindPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            example = 3128;
+            description = lib.mdDoc ''
+              Override default port used for service.
+            '';
+          };
+          maxConnections = mkOption {
+            type = types.int;
+            default = 100;
+            example = 1000;
+            description = lib.mdDoc ''
+              Maximum number of simulationeous connections to this service.
+            '';
+          };
+          auth = mkOption {
+            type = types.listOf (types.enum [ "none" "iponly" "strong" ]);
+            example = [ "iponly" "strong" ];
+            description = lib.mdDoc ''
+              Authentication type. The following values are valid:
+
+              - `"none"`: disables both authentication and authorization. You can not use ACLs.
+              - `"iponly"`: specifies no authentication. ACLs authorization is used.
+              - `"strong"`: authentication by username/password. If user is not registered their access is denied regardless of ACLs.
+
+              Double authentication is possible, e.g.
+
+              ```
+                {
+                  auth = [ "iponly" "strong" ];
+                  acl = [
+                    {
+                      rule = "allow";
+                      targets = [ "192.168.0.0/16" ];
+                    }
+                    {
+                      rule = "allow"
+                      users = [ "user1" "user2" ];
+                    }
+                  ];
+                }
+              ```
+              In this example strong username authentication is not required to access 192.168.0.0/16.
+            '';
+          };
+          acl = mkOption {
+            type = types.listOf (types.submodule {
+              options = {
+                rule = mkOption {
+                  type = types.enum [ "allow" "deny" ];
+                  example = "allow";
+                  description = lib.mdDoc ''
+                    ACL rule. The following values are valid:
+
+                    - `"allow"`: connections allowed.
+                    - `"deny"`: connections not allowed.
+                  '';
+                };
+                users = mkOption {
+                  type = types.listOf types.str;
+                  default = [ ];
+                  example = [ "user1" "user2" "user3" ];
+                  description = lib.mdDoc ''
+                    List of users, use empty list for any.
+                  '';
+                };
+                sources = mkOption {
+                  type = types.listOf types.str;
+                  default = [ ];
+                  example = [ "127.0.0.1" "192.168.1.0/24" ];
+                  description = lib.mdDoc ''
+                    List of source IP range, use empty list for any.
+                  '';
+                };
+                targets = mkOption {
+                  type = types.listOf types.str;
+                  default = [ ];
+                  example = [ "127.0.0.1" "192.168.1.0/24" ];
+                  description = lib.mdDoc ''
+                    List of target IP ranges, use empty list for any.
+                    May also contain host names instead of addresses.
+                    It's possible to use wildmask in the beginning and in the the end of hostname, e.g. `*badsite.com` or `*badcontent*`.
+                    Hostname is only checked if hostname presents in request.
+                  '';
+                };
+                targetPorts = mkOption {
+                  type = types.listOf types.int;
+                  default = [ ];
+                  example = [ 80 443 ];
+                  description = lib.mdDoc ''
+                    List of target ports, use empty list for any.
+                  '';
+                };
+              };
+            });
+            default = [ ];
+            example = literalExpression ''
+              [
+                {
+                  rule = "allow";
+                  users = [ "user1" ];
+                }
+                {
+                  rule = "allow";
+                  sources = [ "192.168.1.0/24" ];
+                }
+                {
+                  rule = "deny";
+                }
+              ]
+            '';
+            description = lib.mdDoc ''
+              Use this option to limit user access to resources.
+            '';
+          };
+          extraArguments = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "-46";
+            description = lib.mdDoc ''
+              Extra arguments for service.
+              Consult "Options" section in [documentation](https://github.com/z3APA3A/3proxy/wiki/3proxy.cfg) for available arguments.
+            '';
+          };
+          extraConfig = mkOption {
+            type = types.nullOr types.lines;
+            default = null;
+            description = lib.mdDoc ''
+              Extra configuration for service. Use this to configure things like bandwidth limiter or ACL-based redirection.
+              Consult [documentation](https://github.com/z3APA3A/3proxy/wiki/3proxy.cfg) for available options.
+            '';
+          };
+        };
+      });
+      default = [ ];
+      example = literalExpression ''
+        [
+          {
+            type = "proxy";
+            bindAddress = "192.168.1.24";
+            bindPort = 3128;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindAddress = "10.10.1.20";
+            bindPort = 3128;
+            auth = [ "iponly" ];
+          }
+          {
+            type = "socks";
+            bindAddress = "172.17.0.1";
+            bindPort = 1080;
+            auth = [ "strong" ];
+          }
+        ]
+      '';
+      description = lib.mdDoc ''
+        Use this option to define 3proxy services.
+      '';
+    };
+    denyPrivate = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to deny access to private IP ranges including loopback.
+      '';
+    };
+    privateRanges = mkOption {
+      type = types.listOf types.str;
+      default = [
+        "0.0.0.0/8"
+        "127.0.0.0/8"
+        "10.0.0.0/8"
+        "100.64.0.0/10"
+        "172.16.0.0/12"
+        "192.168.0.0/16"
+        "::"
+        "::1"
+        "fc00::/7"
+      ];
+      description = lib.mdDoc ''
+        What IP ranges to deny access when denyPrivate is set tu true.
+      '';
+    };
+    resolution = mkOption {
+      type = types.submodule {
+        options = {
+          nserver = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "127.0.0.53" "192.168.1.3:5353/tcp" ];
+            description = lib.mdDoc ''
+              List of nameservers to use.
+
+              Up to 5 nservers may be specified. If no nserver is configured,
+              default system name resolution functions are used.
+            '';
+          };
+          nscache = mkOption {
+            type = types.int;
+            default = 65535;
+            description = lib.mdDoc "Set name cache size for IPv4.";
+          };
+          nscache6 = mkOption {
+            type = types.int;
+            default = 65535;
+            description = lib.mdDoc "Set name cache size for IPv6.";
+          };
+          nsrecord = mkOption {
+            type = types.attrsOf types.str;
+            default = { };
+            example = literalExpression ''
+              {
+                "files.local" = "192.168.1.12";
+                "site.local" = "192.168.1.43";
+              }
+            '';
+            description = lib.mdDoc "Adds static nsrecords.";
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Use this option to configure name resolution and DNS caching.
+      '';
+    };
+    extraConfig = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = lib.mdDoc ''
+        Extra configuration, appended to the 3proxy configuration file.
+        Consult [documentation](https://github.com/z3APA3A/3proxy/wiki/3proxy.cfg) for available options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services._3proxy.confFile = mkDefault (pkgs.writeText "3proxy.conf" ''
+      # log to stdout
+      log
+
+      ${concatMapStringsSep "\n" (x: "nserver " + x) cfg.resolution.nserver}
+
+      nscache ${toString cfg.resolution.nscache}
+      nscache6 ${toString cfg.resolution.nscache6}
+
+      ${concatMapStringsSep "\n" (x: "nsrecord " + x)
+      (mapAttrsToList (name: value: "${name} ${value}")
+        cfg.resolution.nsrecord)}
+
+      ${optionalString (cfg.usersFile != null)
+        ''users $"${cfg.usersFile}"''
+      }
+
+      ${concatMapStringsSep "\n" (service: ''
+        auth ${concatStringsSep " " service.auth}
+
+        ${optionalString (cfg.denyPrivate)
+        "deny * * ${optionalList cfg.privateRanges}"}
+
+        ${concatMapStringsSep "\n" (acl:
+          "${acl.rule} ${
+            concatMapStringsSep " " optionalList [
+              acl.users
+              acl.sources
+              acl.targets
+              acl.targetPorts
+            ]
+          }") service.acl}
+
+        maxconn ${toString service.maxConnections}
+
+        ${optionalString (service.extraConfig != null) service.extraConfig}
+
+        ${service.type} -i${toString service.bindAddress} ${
+          optionalString (service.bindPort != null)
+          "-p${toString service.bindPort}"
+        } ${
+          optionalString (service.extraArguments != null) service.extraArguments
+        }
+
+        flush
+      '') cfg.services}
+      ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
+    '');
+    systemd.services."3proxy" = {
+      description = "Tiny free proxy server";
+      documentation = [ "https://github.com/z3APA3A/3proxy/wiki" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "3proxy";
+        ExecStart = "${pkg}/bin/3proxy ${cfg.confFile}";
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ misuzu ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/acme-dns.nix b/nixpkgs/nixos/modules/services/networking/acme-dns.nix
new file mode 100644
index 000000000000..5c53fa2cc4f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/acme-dns.nix
@@ -0,0 +1,154 @@
+{ lib
+, config
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.services.acme-dns;
+  format = pkgs.formats.toml { };
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkEnableOption
+    mkOption
+    mkPackageOptionMD
+    types
+    ;
+  domain = "acme-dns.example.com";
+in
+{
+  options.services.acme-dns = {
+    enable = mkEnableOption (mdDoc "acme-dns");
+
+    package = mkPackageOptionMD pkgs "acme-dns" { };
+
+    settings = mkOption {
+      description = mdDoc ''
+        Free-form settings written directly to the `acme-dns.cfg` file.
+        Refer to <https://github.com/joohoi/acme-dns/blob/master/README.md#configuration> for supported values.
+      '';
+
+      default = { };
+
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          general = {
+            listen = mkOption {
+              type = types.str;
+              description = mdDoc "IP+port combination to bind and serve the DNS server on.";
+              default = "[::]:53";
+              example = "127.0.0.1:53";
+            };
+
+            protocol = mkOption {
+              type = types.enum [ "both" "both4" "both6" "udp" "udp4" "udp6" "tcp" "tcp4" "tcp6" ];
+              description = mdDoc "Protocols to serve DNS responses on.";
+              default = "both";
+            };
+
+            domain = mkOption {
+              type = types.str;
+              description = mdDoc "Domain name to serve the requests off of.";
+              example = domain;
+            };
+
+            nsname = mkOption {
+              type = types.str;
+              description = mdDoc "Zone name server.";
+              example = domain;
+            };
+
+            nsadmin = mkOption {
+              type = types.str;
+              description = mdDoc "Zone admin email address for `SOA`.";
+              example = "admin.example.com";
+            };
+
+            records = mkOption {
+              type = types.listOf types.str;
+              description = mdDoc "Predefined DNS records served in addition to the `_acme-challenge` TXT records.";
+              example = literalExpression ''
+                [
+                  # replace with your acme-dns server's public IPv4
+                  "${domain}. A 198.51.100.1"
+                  # replace with your acme-dns server's public IPv6
+                  "${domain}. AAAA 2001:db8::1"
+                  # ${domain} should resolve any *.${domain} records
+                  "${domain}. NS ${domain}."
+                ]
+              '';
+            };
+          };
+
+          database = {
+            engine = mkOption {
+              type = types.enum [ "sqlite3" "postgres" ];
+              description = mdDoc "Database engine to use.";
+              default = "sqlite3";
+            };
+            connection = mkOption {
+              type = types.str;
+              description = mdDoc "Database connection string.";
+              example = "postgres://user:password@localhost/acmedns";
+              default = "/var/lib/acme-dns/acme-dns.db";
+            };
+          };
+
+          api = {
+            ip = mkOption {
+              type = types.str;
+              description = mdDoc "IP to bind the HTTP API on.";
+              default = "[::]";
+              example = "127.0.0.1";
+            };
+
+            port = mkOption {
+              type = types.port;
+              description = mdDoc "Listen port for the HTTP API.";
+              default = 8080;
+              # acme-dns expects this value to be a string
+              apply = toString;
+            };
+
+            disable_registration = mkOption {
+              type = types.bool;
+              description = mdDoc "Whether to disable the HTTP registration endpoint.";
+              default = false;
+              example = true;
+            };
+
+            tls = mkOption {
+              type = types.enum [ "letsencrypt" "letsencryptstaging" "cert" "none" ];
+              description = mdDoc "TLS backend to use.";
+              default = "none";
+            };
+          };
+
+
+          logconfig = {
+            loglevel = mkOption {
+              type = types.enum [ "error" "warning" "info" "debug" ];
+              description = mdDoc "Level to log on.";
+              default = "info";
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+    systemd.services.acme-dns = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = [ "" "${lib.getExe cfg.package} -c ${format.generate "acme-dns.toml" cfg.settings}" ];
+        StateDirectory = "acme-dns";
+        WorkingDirectory = "%S/acme-dns";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/adguardhome.nix b/nixpkgs/nixos/modules/services/networking/adguardhome.nix
new file mode 100644
index 000000000000..399d838ccc69
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/adguardhome.nix
@@ -0,0 +1,175 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.adguardhome;
+
+  args = concatStringsSep " " ([
+    "--no-check-update"
+    "--pidfile /run/AdGuardHome/AdGuardHome.pid"
+    "--work-dir /var/lib/AdGuardHome/"
+    "--config /var/lib/AdGuardHome/AdGuardHome.yaml"
+  ] ++ cfg.extraArgs);
+
+  configFile = pkgs.writeTextFile {
+    name = "AdGuardHome.yaml";
+    text = builtins.toJSON cfg.settings;
+    checkPhase = "${pkgs.adguardhome}/bin/adguardhome -c $out --check-config";
+  };
+  defaultBindPort = 3000;
+
+in
+{
+
+  imports =
+    let cfgPath = [ "services" "adguardhome" ];
+    in
+    [
+      (mkRenamedOptionModuleWith { sinceRelease = 2211; from = cfgPath ++ [ "host" ]; to = cfgPath ++ [ "settings" "bind_host" ]; })
+      (mkRenamedOptionModuleWith { sinceRelease = 2211; from = cfgPath ++ [ "port" ]; to = cfgPath ++ [ "settings" "bind_port" ]; })
+    ];
+
+  options.services.adguardhome = with types; {
+    enable = mkEnableOption (lib.mdDoc "AdGuard Home network-wide ad blocker");
+
+    openFirewall = mkOption {
+      default = false;
+      type = bool;
+      description = lib.mdDoc ''
+        Open ports in the firewall for the AdGuard Home web interface. Does not
+        open the port needed to access the DNS resolver.
+      '';
+    };
+
+    allowDHCP = mkOption {
+      default = cfg.settings.dhcp.enabled or false;
+      defaultText = literalExpression ''config.services.adguardhome.settings.dhcp.enabled or false'';
+      type = bool;
+      description = lib.mdDoc ''
+        Allows AdGuard Home to open raw sockets (`CAP_NET_RAW`), which is
+        required for the integrated DHCP server.
+
+        The default enables this conditionally if the declarative configuration
+        enables the integrated DHCP server. Manually setting this option is only
+        required for non-declarative setups.
+      '';
+    };
+
+    mutableSettings = mkOption {
+      default = true;
+      type = bool;
+      description = lib.mdDoc ''
+        Allow changes made on the AdGuard Home web interface to persist between
+        service restarts.
+      '';
+    };
+
+    settings = mkOption {
+      default = null;
+      type = nullOr (submodule {
+        freeformType = (pkgs.formats.yaml { }).type;
+        options = {
+          schema_version = mkOption {
+            default = pkgs.adguardhome.schema_version;
+            defaultText = literalExpression "pkgs.adguardhome.schema_version";
+            type = int;
+            description = lib.mdDoc ''
+              Schema version for the configuration.
+              Defaults to the `schema_version` supplied by `pkgs.adguardhome`.
+            '';
+          };
+          bind_host = mkOption {
+            default = "0.0.0.0";
+            type = str;
+            description = lib.mdDoc ''
+              Host address to bind HTTP server to.
+            '';
+          };
+          bind_port = mkOption {
+            default = defaultBindPort;
+            type = port;
+            description = lib.mdDoc ''
+              Port to serve HTTP pages on.
+            '';
+          };
+        };
+      });
+      description = lib.mdDoc ''
+        AdGuard Home configuration. Refer to
+        <https://github.com/AdguardTeam/AdGuardHome/wiki/Configuration#configuration-file>
+        for details on supported values.
+
+        ::: {.note}
+        On start and if {option}`mutableSettings` is `true`,
+        these options are merged into the configuration file on start, taking
+        precedence over configuration changes made on the web interface.
+
+        Set this to `null` (default) for a non-declarative configuration without any
+        Nix-supplied values.
+        Declarative configurations are supplied with a default `schema_version`, `bind_host`, and `bind_port`.
+        :::
+      '';
+    };
+
+    extraArgs = mkOption {
+      default = [ ];
+      type = listOf str;
+      description = lib.mdDoc ''
+        Extra command line parameters to be passed to the adguardhome binary.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.settings != null -> cfg.mutableSettings
+          || (hasAttrByPath [ "dns" "bind_host" ] cfg.settings)
+          || (hasAttrByPath [ "dns" "bind_hosts" ] cfg.settings);
+        message =
+          "AdGuard setting dns.bind_host or dns.bind_hosts needs to be configured for a minimal working configuration";
+      }
+      {
+        assertion = cfg.settings != null -> cfg.mutableSettings
+          || hasAttrByPath [ "dns" "bootstrap_dns" ] cfg.settings;
+        message =
+          "AdGuard setting dns.bootstrap_dns needs to be configured for a minimal working configuration";
+      }
+    ];
+
+    systemd.services.adguardhome = {
+      description = "AdGuard Home: Network-level blocker";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      unitConfig = {
+        StartLimitIntervalSec = 5;
+        StartLimitBurst = 10;
+      };
+
+      preStart = optionalString (cfg.settings != null) ''
+        if    [ -e "$STATE_DIRECTORY/AdGuardHome.yaml" ] \
+           && [ "${toString cfg.mutableSettings}" = "1" ]; then
+          # Writing directly to AdGuardHome.yaml results in empty file
+          ${pkgs.yaml-merge}/bin/yaml-merge "$STATE_DIRECTORY/AdGuardHome.yaml" "${configFile}" > "$STATE_DIRECTORY/AdGuardHome.yaml.tmp"
+          mv "$STATE_DIRECTORY/AdGuardHome.yaml.tmp" "$STATE_DIRECTORY/AdGuardHome.yaml"
+        else
+          cp --force "${configFile}" "$STATE_DIRECTORY/AdGuardHome.yaml"
+          chmod 600 "$STATE_DIRECTORY/AdGuardHome.yaml"
+        fi
+      '';
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.adguardhome}/bin/adguardhome ${args}";
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ] ++ optionals cfg.allowDHCP [ "CAP_NET_RAW" ];
+        Restart = "always";
+        RestartSec = 10;
+        RuntimeDirectory = "AdGuardHome";
+        StateDirectory = "AdGuardHome";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.bind_port or defaultBindPort ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/alice-lg.nix b/nixpkgs/nixos/modules/services/networking/alice-lg.nix
new file mode 100644
index 000000000000..06b9ac89f12f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/alice-lg.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.alice-lg;
+  settingsFormat = pkgs.formats.ini { };
+in
+{
+  options = {
+    services.alice-lg = {
+      enable = mkEnableOption (lib.mdDoc "Alice Looking Glass");
+
+      package = mkPackageOptionMD pkgs "alice-lg" { };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = { };
+        description = lib.mdDoc ''
+          alice-lg configuration, for configuration options see the example on [github](https://github.com/alice-lg/alice-lg/blob/main/etc/alice-lg/alice.example.conf)
+        '';
+        example = literalExpression ''
+          {
+            server = {
+              # configures the built-in webserver and provides global application settings
+              listen_http = "127.0.0.1:7340";
+              enable_prefix_lookup = true;
+              asn = 9033;
+              store_backend = postgres;
+              routes_store_refresh_parallelism = 5;
+              neighbors_store_refresh_parallelism = 10000;
+              routes_store_refresh_interval = 5;
+              neighbors_store_refresh_interval = 5;
+            };
+            postgres = {
+              url = "postgres://postgres:postgres@localhost:5432/alice";
+              min_connections = 2;
+              max_connections = 128;
+            };
+            pagination = {
+              routes_filtered_page_size = 250;
+              routes_accepted_page_size = 250;
+              routes_not_exported_page_size = 250;
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment = {
+      etc."alice-lg/alice.conf".source = settingsFormat.generate "alice-lg.conf" cfg.settings;
+    };
+    systemd.services = {
+      alice-lg = {
+        wants = [ "network.target" ];
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        description = "Alice Looking Glass";
+        serviceConfig = {
+          DynamicUser = true;
+          Type = "simple";
+          Restart = "on-failure";
+          RestartSec = 15;
+          ExecStart = "${cfg.package}/bin/alice-lg";
+          StateDirectoryMode = "0700";
+          UMask = "0007";
+          CapabilityBoundingSet = "";
+          NoNewPrivileges = true;
+          ProtectSystem = "strict";
+          PrivateTmp = true;
+          PrivateDevices = true;
+          PrivateUsers = true;
+          ProtectHostname = true;
+          ProtectClock = true;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          PrivateMounts = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
+          BindReadOnlyPaths = [
+            "-/etc/resolv.conf"
+            "-/etc/nsswitch.conf"
+            "-/etc/ssl/certs"
+            "-/etc/static/ssl/certs"
+            "-/etc/hosts"
+            "-/etc/localtime"
+          ];
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/amuled.nix b/nixpkgs/nixos/modules/services/networking/amuled.nix
new file mode 100644
index 000000000000..1cd543358196
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/amuled.nix
@@ -0,0 +1,83 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.amule;
+  opt = options.services.amule;
+  user = if cfg.user != null then cfg.user else "amule";
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.amule = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run the AMule daemon. You need to manually run "amuled --ec-config" to configure the service for the first time.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/home/${user}/";
+        defaultText = literalExpression ''
+          "/home/''${config.${opt.user}}/"
+        '';
+        description = lib.mdDoc ''
+          The directory holding configuration, incoming and temporary files.
+        '';
+      };
+
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The user the AMule daemon should run as.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = mkIf (cfg.user == null) [
+      { name = "amule";
+        description = "AMule daemon";
+        group = "amule";
+        uid = config.ids.uids.amule;
+      } ];
+
+    users.groups = mkIf (cfg.user == null) [
+      { name = "amule";
+        gid = config.ids.gids.amule;
+      } ];
+
+    systemd.services.amuled = {
+      description = "AMule daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        mkdir -p ${cfg.dataDir}
+        chown ${user} ${cfg.dataDir}
+      '';
+
+      script = ''
+        ${pkgs.su}/bin/su -s ${pkgs.runtimeShell} ${user} \
+            -c 'HOME="${cfg.dataDir}" ${pkgs.amule-daemon}/bin/amuled'
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/antennas.nix b/nixpkgs/nixos/modules/services/networking/antennas.nix
new file mode 100644
index 000000000000..c0e56890864a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/antennas.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.antennas;
+in
+
+{
+  options = {
+    services.antennas = {
+      enable = mkEnableOption (lib.mdDoc "Antennas");
+
+      tvheadendUrl = mkOption {
+        type        = types.str;
+        default     = "http://localhost:9981";
+        description = lib.mdDoc "URL of Tvheadend.";
+      };
+
+      antennasUrl = mkOption {
+        type        = types.str;
+        default     = "http://127.0.0.1:5004";
+        description = lib.mdDoc "URL of Antennas.";
+      };
+
+      tunerCount = mkOption {
+        type        = types.int;
+        default     = 6;
+        description = lib.mdDoc "Numbers of tuners in tvheadend.";
+      };
+
+      deviceUUID = mkOption {
+        type        = types.str;
+        default     = "2f70c0d7-90a3-4429-8275-cbeeee9cd605";
+        description = lib.mdDoc "Device tuner UUID. Change this if you are running multiple instances.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.antennas = {
+      description = "Antennas HDHomeRun emulator for Tvheadend. ";
+      wantedBy    = [ "multi-user.target" ];
+
+      # Config
+      environment = {
+        TVHEADEND_URL = cfg.tvheadendUrl;
+        ANTENNAS_URL = cfg.antennasUrl;
+        TUNER_COUNT = toString cfg.tunerCount;
+        DEVICE_UUID = cfg.deviceUUID;
+      };
+
+      serviceConfig = {
+         ExecStart = "${pkgs.antennas}/bin/antennas";
+
+        # Antennas expects all resources like html and config to be relative to it's working directory
+        WorkingDirectory = "${pkgs.antennas}/libexec/antennas/deps/antennas/";
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        ProcSubset = "pid";
+        PrivateDevices = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/aria2.nix b/nixpkgs/nixos/modules/services/networking/aria2.nix
new file mode 100644
index 000000000000..e848869cc0ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/aria2.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.aria2;
+
+  homeDir = "/var/lib/aria2";
+
+  settingsDir = "${homeDir}";
+  sessionFile = "${homeDir}/aria2.session";
+  downloadDir = "${homeDir}/Downloads";
+
+  rangesToStringList = map (x: builtins.toString x.from +"-"+ builtins.toString x.to);
+
+  settingsFile = pkgs.writeText "aria2.conf"
+  ''
+    dir=${cfg.downloadDir}
+    listen-port=${concatStringsSep "," (rangesToStringList cfg.listenPortRange)}
+    rpc-listen-port=${toString cfg.rpcListenPort}
+    rpc-secret=${cfg.rpcSecret}
+  '';
+
+in
+{
+  options = {
+    services.aria2 = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether or not to enable the headless Aria2 daemon service.
+
+          Aria2 daemon can be controlled via the RPC interface using
+          one of many WebUI (http://localhost:6800/ by default).
+
+          Targets are downloaded to ${downloadDir} by default and are
+          accessible to users in the "aria2" group.
+        '';
+      };
+      openPorts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open listen and RPC ports found in listenPortRange and rpcListenPort
+          options in the firewall.
+        '';
+      };
+      downloadDir = mkOption {
+        type = types.path;
+        default = downloadDir;
+        description = lib.mdDoc ''
+          Directory to store downloaded files.
+        '';
+      };
+      listenPortRange = mkOption {
+        type = types.listOf types.attrs;
+        default = [ { from = 6881; to = 6999; } ];
+        description = lib.mdDoc ''
+          Set UDP listening port range used by DHT(IPv4, IPv6) and UDP tracker.
+        '';
+      };
+      rpcListenPort = mkOption {
+        type = types.int;
+        default = 6800;
+        description = lib.mdDoc "Specify a port number for JSON-RPC/XML-RPC server to listen to. Possible Values: 1024-65535";
+      };
+      rpcSecret = mkOption {
+        type = types.str;
+        default = "aria2rpc";
+        description = lib.mdDoc ''
+          Set RPC secret authorization token.
+          Read https://aria2.github.io/manual/en/html/aria2c.html#rpc-auth to know how this option value is used.
+        '';
+      };
+      extraArguments = mkOption {
+        type = types.separatedString " ";
+        example = "--rpc-listen-all --remote-time=true";
+        default = "";
+        description = lib.mdDoc ''
+          Additional arguments to be passed to Aria2.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    # Need to open ports for proper functioning
+    networking.firewall = mkIf cfg.openPorts {
+      allowedUDPPortRanges = config.services.aria2.listenPortRange;
+      allowedTCPPorts = [ config.services.aria2.rpcListenPort ];
+    };
+
+    users.users.aria2 = {
+      group = "aria2";
+      uid = config.ids.uids.aria2;
+      description = "aria2 user";
+      home = homeDir;
+      createHome = false;
+    };
+
+    users.groups.aria2.gid = config.ids.gids.aria2;
+
+    systemd.tmpfiles.rules = [
+      "d '${homeDir}' 0770 aria2 aria2 - -"
+      "d '${config.services.aria2.downloadDir}' 0770 aria2 aria2 - -"
+    ];
+
+    systemd.services.aria2 = {
+      description = "aria2 Service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        if [[ ! -e "${sessionFile}" ]]
+        then
+          touch "${sessionFile}"
+        fi
+        cp -f "${settingsFile}" "${settingsDir}/aria2.conf"
+      '';
+
+      serviceConfig = {
+        Restart = "on-abort";
+        ExecStart = "${pkgs.aria2}/bin/aria2c --enable-rpc --conf-path=${settingsDir}/aria2.conf ${config.services.aria2.extraArguments} --save-session=${sessionFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = "aria2";
+        Group = "aria2";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/asterisk.nix b/nixpkgs/nixos/modules/services/networking/asterisk.nix
new file mode 100644
index 000000000000..279927781edc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/asterisk.nix
@@ -0,0 +1,232 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.asterisk;
+
+  asteriskUser = "asterisk";
+  asteriskGroup = "asterisk";
+
+  varlibdir = "/var/lib/asterisk";
+  spooldir = "/var/spool/asterisk";
+  logdir = "/var/log/asterisk";
+
+  # Add filecontents from files of useTheseDefaultConfFiles to confFiles, do not override
+  defaultConfFiles = subtractLists (attrNames cfg.confFiles) cfg.useTheseDefaultConfFiles;
+  allConfFiles = {
+    # Default asterisk.conf file
+    "asterisk.conf".text = ''
+      [directories]
+      astetcdir => /etc/asterisk
+      astmoddir => ${cfg.package}/lib/asterisk/modules
+      astvarlibdir => /var/lib/asterisk
+      astdbdir => /var/lib/asterisk
+      astkeydir => /var/lib/asterisk
+      astdatadir => /var/lib/asterisk
+      astagidir => /var/lib/asterisk/agi-bin
+      astspooldir => /var/spool/asterisk
+      astrundir => /run/asterisk
+      astlogdir => /var/log/asterisk
+      astsbindir => ${cfg.package}/sbin
+      ${cfg.extraConfig}
+    '';
+
+    # Loading all modules by default is considered sensible by the authors of
+    # "Asterisk: The Definitive Guide". Secure sites will likely want to
+    # specify their own "modules.conf" in the confFiles option.
+    "modules.conf".text = ''
+      [modules]
+      autoload=yes
+    '';
+
+    # Use syslog for logging so logs can be viewed with journalctl
+    "logger.conf".text = ''
+      [general]
+
+      [logfiles]
+      syslog.local0 => notice,warning,error
+    '';
+  } //
+    mapAttrs (name: text: { inherit text; }) cfg.confFiles //
+    listToAttrs (map (x: nameValuePair x { source = cfg.package + "/etc/asterisk/" + x; }) defaultConfFiles);
+
+in
+
+{
+  options = {
+    services.asterisk = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Asterisk PBX server.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        example = ''
+          [options]
+          verbose=3
+          debug=3
+        '';
+        description = lib.mdDoc ''
+          Extra configuration options appended to the default
+          `asterisk.conf` file.
+        '';
+      };
+
+      confFiles = mkOption {
+        default = {};
+        type = types.attrsOf types.str;
+        example = literalExpression
+          ''
+            {
+              "extensions.conf" = '''
+                [tests]
+                ; Dial 100 for "hello, world"
+                exten => 100,1,Answer()
+                same  =>     n,Wait(1)
+                same  =>     n,Playback(hello-world)
+                same  =>     n,Hangup()
+
+                [softphones]
+                include => tests
+
+                [unauthorized]
+              ''';
+              "sip.conf" = '''
+                [general]
+                allowguest=no              ; Require authentication
+                context=unauthorized       ; Send unauthorized users to /dev/null
+                srvlookup=no               ; Don't do DNS lookup
+                udpbindaddr=0.0.0.0        ; Listen on all interfaces
+                nat=force_rport,comedia    ; Assume device is behind NAT
+
+                [softphone](!)
+                type=friend                ; Match on username first, IP second
+                context=softphones         ; Send to softphones context in
+                                           ; extensions.conf file
+                host=dynamic               ; Device will register with asterisk
+                disallow=all               ; Manually specify codecs to allow
+                allow=g722
+                allow=ulaw
+                allow=alaw
+
+                [myphone](softphone)
+                secret=GhoshevFew          ; Change this password!
+              ''';
+              "logger.conf" = '''
+                [general]
+
+                [logfiles]
+                ; Add debug output to log
+                syslog.local0 => notice,warning,error,debug
+              ''';
+            }
+        '';
+        description = lib.mdDoc ''
+          Sets the content of config files (typically ending with
+          `.conf`) in the Asterisk configuration directory.
+
+          Note that if you want to change `asterisk.conf`, it
+          is preferable to use the {option}`services.asterisk.extraConfig`
+          option over this option. If `"asterisk.conf"` is
+          specified with the {option}`confFiles` option (not recommended),
+          you must be prepared to set your own `astetcdir`
+          path.
+
+          See
+          <https://www.asterisk.org/community/documentation/>
+          for more examples of what is possible here.
+        '';
+      };
+
+      useTheseDefaultConfFiles = mkOption {
+        default = [ "ari.conf" "acl.conf" "agents.conf" "amd.conf" "calendar.conf" "cdr.conf" "cdr_syslog.conf" "cdr_custom.conf" "cel.conf" "cel_custom.conf" "cli_aliases.conf" "confbridge.conf" "dundi.conf" "features.conf" "hep.conf" "iax.conf" "pjsip.conf" "pjsip_wizard.conf" "phone.conf" "phoneprov.conf" "queues.conf" "res_config_sqlite3.conf" "res_parking.conf" "statsd.conf" "udptl.conf" "unistim.conf" ];
+        type = types.listOf types.str;
+        example = [ "sip.conf" "dundi.conf" ];
+        description = lib.mdDoc ''Sets these config files to the default content. The default value for
+          this option contains all necesscary files to avoid errors at startup.
+          This does not override settings via {option}`services.asterisk.confFiles`.
+        '';
+      };
+
+      extraArguments = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example =
+          [ "-vvvddd" "-e" "1024" ];
+        description = lib.mdDoc ''
+          Additional command line arguments to pass to Asterisk.
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.asterisk;
+        defaultText = literalExpression "pkgs.asterisk";
+        description = lib.mdDoc "The Asterisk package to use.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc = mapAttrs' (name: value:
+      nameValuePair "asterisk/${name}" value
+    ) allConfFiles;
+
+    users.users.asterisk =
+      { name = asteriskUser;
+        group = asteriskGroup;
+        uid = config.ids.uids.asterisk;
+        description = "Asterisk daemon user";
+        home = varlibdir;
+      };
+
+    users.groups.asterisk =
+      { name = asteriskGroup;
+        gid = config.ids.gids.asterisk;
+      };
+
+    systemd.services.asterisk = {
+      description = ''
+        Asterisk PBX server
+      '';
+
+      wantedBy = [ "multi-user.target" ];
+
+      # Do not restart, to avoid disruption of running calls. Restart unit by yourself!
+      restartIfChanged = false;
+
+      preStart = ''
+        # Copy skeleton directory tree to /var
+        for d in '${varlibdir}' '${spooldir}' '${logdir}'; do
+          # TODO: Make exceptions for /var directories that likely should be updated
+          if [ ! -e "$d" ]; then
+            mkdir -p "$d"
+            cp --recursive ${cfg.package}/"$d"/* "$d"/
+            chown --recursive ${asteriskUser}:${asteriskGroup} "$d"
+            find "$d" -type d | xargs chmod 0755
+          fi
+        done
+      '';
+
+      serviceConfig = {
+        ExecStart =
+          let
+            # FIXME: This doesn't account for arguments with spaces
+            argString = concatStringsSep " " cfg.extraArguments;
+          in
+          "${cfg.package}/bin/asterisk -U ${asteriskUser} -C /etc/asterisk/asterisk.conf ${argString} -F";
+        ExecReload = ''${cfg.package}/bin/asterisk -x "core reload"
+          '';
+        Type = "forking";
+        PIDFile = "/run/asterisk/asterisk.pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/atftpd.nix b/nixpkgs/nixos/modules/services/networking/atftpd.nix
new file mode 100644
index 000000000000..e31b447e6c5b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/atftpd.nix
@@ -0,0 +1,65 @@
+# NixOS module for atftpd TFTP server
+
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.atftpd;
+
+in
+
+{
+
+  options = {
+
+    services.atftpd = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the atftpd TFTP server. By default, the server
+          binds to address 0.0.0.0.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = literalExpression ''
+          [ "--bind-address 192.168.9.1"
+            "--verbose=7"
+          ]
+        '';
+        description = lib.mdDoc ''
+          Extra command line arguments to pass to atftp.
+        '';
+      };
+
+      root = mkOption {
+        default = "/srv/tftp";
+        type = types.path;
+        description = lib.mdDoc ''
+          Document root directory for the atftpd.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.atftpd = {
+      description = "TFTP Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      # runs as nobody
+      serviceConfig.ExecStart = "${pkgs.atftp}/sbin/atftpd --daemon --no-fork ${lib.concatStringsSep " " cfg.extraOptions} ${cfg.root}";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/autossh.nix b/nixpkgs/nixos/modules/services/networking/autossh.nix
new file mode 100644
index 000000000000..ed9c07d9a147
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/autossh.nix
@@ -0,0 +1,113 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.autossh;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.autossh = {
+
+      sessions = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            name = mkOption {
+              type = types.str;
+              example = "socks-peer";
+              description = lib.mdDoc "Name of the local AutoSSH session";
+            };
+            user = mkOption {
+              type = types.str;
+              example = "bill";
+              description = lib.mdDoc "Name of the user the AutoSSH session should run as";
+            };
+            monitoringPort = mkOption {
+              type = types.int;
+              default = 0;
+              example = 20000;
+              description = lib.mdDoc ''
+                Port to be used by AutoSSH for peer monitoring. Note, that
+                AutoSSH also uses mport+1. Value of 0 disables the keep-alive
+                style monitoring
+              '';
+            };
+            extraArguments = mkOption {
+              type = types.separatedString " ";
+              example = "-N -D4343 bill@socks.example.net";
+              description = lib.mdDoc ''
+                Arguments to be passed to AutoSSH and retransmitted to SSH
+                process. Some meaningful options include -N (don't run remote
+                command), -D (open SOCKS proxy on local port), -R (forward
+                remote port), -L (forward local port), -v (Enable debug). Check
+                ssh manual for the complete list.
+              '';
+            };
+          };
+        });
+
+        default = [];
+        description = lib.mdDoc ''
+          List of AutoSSH sessions to start as systemd services. Each service is
+          named 'autossh-{session.name}'.
+        '';
+
+        example = [
+          {
+            name="socks-peer";
+            user="bill";
+            monitoringPort = 20000;
+            extraArguments="-N -D4343 billremote@socks.host.net";
+          }
+        ];
+
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf (cfg.sessions != []) {
+
+    systemd.services =
+
+      lib.foldr ( s : acc : acc //
+        {
+          "autossh-${s.name}" =
+            let
+              mport = if s ? monitoringPort then s.monitoringPort else 0;
+            in
+            {
+              description = "AutoSSH session (" + s.name + ")";
+
+              after = [ "network.target" ];
+              wantedBy = [ "multi-user.target" ];
+
+              # To be able to start the service with no network connection
+              environment.AUTOSSH_GATETIME="0";
+
+              # How often AutoSSH checks the network, in seconds
+              environment.AUTOSSH_POLL="30";
+
+              serviceConfig = {
+                  User = "${s.user}";
+                  # AutoSSH may exit with 0 code if the SSH session was
+                  # gracefully terminated by either local or remote side.
+                  Restart = "on-success";
+                  ExecStart = "${pkgs.autossh}/bin/autossh -M ${toString mport} ${s.extraArguments}";
+              };
+            };
+        }) {} cfg.sessions;
+
+    environment.systemPackages = [ pkgs.autossh ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix b/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix
new file mode 100644
index 000000000000..bdbf9aad9acc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/avahi-daemon.nix
@@ -0,0 +1,314 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.avahi;
+
+  yesNo = yes: if yes then "yes" else "no";
+
+  avahiDaemonConf = with cfg; pkgs.writeText "avahi-daemon.conf" ''
+    [server]
+    ${# Users can set `networking.hostName' to the empty string, when getting
+      # a host name from DHCP.  In that case, let Avahi take whatever the
+      # current host name is; setting `host-name' to the empty string in
+      # `avahi-daemon.conf' would be invalid.
+      optionalString (hostName != "") "host-name=${hostName}"}
+    browse-domains=${concatStringsSep ", " browseDomains}
+    use-ipv4=${yesNo ipv4}
+    use-ipv6=${yesNo ipv6}
+    ${optionalString (allowInterfaces!=null) "allow-interfaces=${concatStringsSep "," allowInterfaces}"}
+    ${optionalString (denyInterfaces!=null) "deny-interfaces=${concatStringsSep "," denyInterfaces}"}
+    ${optionalString (domainName!=null) "domain-name=${domainName}"}
+    allow-point-to-point=${yesNo allowPointToPoint}
+    ${optionalString (cacheEntriesMax!=null) "cache-entries-max=${toString cacheEntriesMax}"}
+
+    [wide-area]
+    enable-wide-area=${yesNo wideArea}
+
+    [publish]
+    disable-publishing=${yesNo (!publish.enable)}
+    disable-user-service-publishing=${yesNo (!publish.userServices)}
+    publish-addresses=${yesNo (publish.userServices || publish.addresses)}
+    publish-hinfo=${yesNo publish.hinfo}
+    publish-workstation=${yesNo publish.workstation}
+    publish-domain=${yesNo publish.domain}
+
+    [reflector]
+    enable-reflector=${yesNo reflector}
+    ${extraConfig}
+  '';
+in
+{
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "avahi" "interfaces" ] [ "services" "avahi" "allowInterfaces" ])
+  ];
+
+  options.services.avahi = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to run the Avahi daemon, which allows Avahi clients
+        to use Avahi's service discovery facilities and also allows
+        the local machine to advertise its presence and services
+        (through the mDNS responder implemented by `avahi-daemon`).
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.avahi;
+      defaultText = literalExpression "pkgs.avahi";
+      description = lib.mdDoc ''
+        The avahi package to use for running the daemon.
+      '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = config.networking.hostName;
+      defaultText = literalExpression "config.networking.hostName";
+      description = lib.mdDoc ''
+        Host name advertised on the LAN. If not set, avahi will use the value
+        of {option}`config.networking.hostName`.
+      '';
+    };
+
+    domainName = mkOption {
+      type = types.str;
+      default = "local";
+      description = lib.mdDoc ''
+        Domain name for all advertisements.
+      '';
+    };
+
+    browseDomains = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "0pointer.de" "zeroconf.org" ];
+      description = lib.mdDoc ''
+        List of non-local DNS domains to be browsed.
+      '';
+    };
+
+    ipv4 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Whether to use IPv4.";
+    };
+
+    ipv6 = mkOption {
+      type = types.bool;
+      default = config.networking.enableIPv6;
+      defaultText = literalExpression "config.networking.enableIPv6";
+      description = lib.mdDoc "Whether to use IPv6.";
+    };
+
+    allowInterfaces = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = lib.mdDoc ''
+        List of network interfaces that should be used by the {command}`avahi-daemon`.
+        Other interfaces will be ignored. If `null`, all local interfaces
+        except loopback and point-to-point will be used.
+      '';
+    };
+
+    denyInterfaces = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = lib.mdDoc ''
+        List of network interfaces that should be ignored by the
+        {command}`avahi-daemon`. Other unspecified interfaces will be used,
+        unless {option}`allowInterfaces` is set. This option takes precedence
+        over {option}`allowInterfaces`.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to open the firewall for UDP port 5353.
+        Disabling this setting also disables discovering of network devices.
+      '';
+    };
+
+    allowPointToPoint = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to use POINTTOPOINT interfaces. Might make mDNS unreliable due to usually large
+        latencies with such links and opens a potential security hole by allowing mDNS access from Internet
+        connections.
+      '';
+    };
+
+    wideArea = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Whether to enable wide-area service discovery.";
+    };
+
+    reflector = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Reflect incoming mDNS requests to all allowed network interfaces.";
+    };
+
+    extraServiceFiles = mkOption {
+      type = with types; attrsOf (either str path);
+      default = { };
+      example = literalExpression ''
+        {
+          ssh = "''${pkgs.avahi}/etc/avahi/services/ssh.service";
+          smb = '''
+            <?xml version="1.0" standalone='no'?><!--*-nxml-*-->
+            <!DOCTYPE service-group SYSTEM "avahi-service.dtd">
+            <service-group>
+              <name replace-wildcards="yes">%h</name>
+              <service>
+                <type>_smb._tcp</type>
+                <port>445</port>
+              </service>
+            </service-group>
+          ''';
+        }
+      '';
+      description = lib.mdDoc ''
+        Specify custom service definitions which are placed in the avahi service directory.
+        See the {manpage}`avahi.service(5)` manpage for detailed information.
+      '';
+    };
+
+    publish = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to allow publishing in general.";
+      };
+
+      userServices = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to publish user services. Will set `addresses=true`.";
+      };
+
+      addresses = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to register mDNS address records for all local IP addresses.";
+      };
+
+      hinfo = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to register a mDNS HINFO record which contains information about the
+          local operating system and CPU.
+        '';
+      };
+
+      workstation = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to register a service of type "_workstation._tcp" on the local LAN.
+        '';
+      };
+
+      domain = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to announce the locally used domain name for browsing by other hosts.";
+      };
+    };
+
+    nssmdns = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the mDNS NSS (Name Service Switch) plug-in.
+        Enabling it allows applications to resolve names in the `.local`
+        domain by transparently querying the Avahi daemon.
+      '';
+    };
+
+    cacheEntriesMax = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        Number of resource records to be cached per interface. Use 0 to
+        disable caching. Avahi daemon defaults to 4096 if not set.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra config to append to avahi-daemon.conf.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.avahi = {
+      description = "avahi-daemon privilege separation user";
+      home = "/var/empty";
+      group = "avahi";
+      isSystemUser = true;
+    };
+
+    users.groups.avahi = { };
+
+    system.nssModules = optional cfg.nssmdns pkgs.nssmdns;
+    system.nssDatabases.hosts = optionals cfg.nssmdns (mkMerge [
+      (mkBefore [ "mdns_minimal [NOTFOUND=return]" ]) # before resolve
+      (mkAfter [ "mdns" ]) # after dns
+    ]);
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc = (mapAttrs'
+      (n: v: nameValuePair
+        "avahi/services/${n}.service"
+        { ${if types.path.check v then "source" else "text"} = v; }
+      )
+      cfg.extraServiceFiles);
+
+    systemd.sockets.avahi-daemon = {
+      description = "Avahi mDNS/DNS-SD Stack Activation Socket";
+      listenStreams = [ "/run/avahi-daemon/socket" ];
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.tmpfiles.rules = [ "d /run/avahi-daemon - avahi avahi -" ];
+
+    systemd.services.avahi-daemon = {
+      description = "Avahi mDNS/DNS-SD Stack";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "avahi-daemon.socket" ];
+
+      # Make NSS modules visible so that `avahi_nss_support ()' can
+      # return a sensible value.
+      environment.LD_LIBRARY_PATH = config.system.nssModules.path;
+
+      path = [ pkgs.coreutils cfg.package ];
+
+      serviceConfig = {
+        NotifyAccess = "main";
+        BusName = "org.freedesktop.Avahi";
+        Type = "dbus";
+        ExecStart = "${cfg.package}/sbin/avahi-daemon --syslog -f ${avahiDaemonConf}";
+        ConfigurationDirectory = "avahi/services";
+      };
+    };
+
+    services.dbus.enable = true;
+    services.dbus.packages = [ cfg.package ];
+
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ 5353 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/babeld.nix b/nixpkgs/nixos/modules/services/networking/babeld.nix
new file mode 100644
index 000000000000..ff1ac6998ee9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/babeld.nix
@@ -0,0 +1,144 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.babeld;
+
+  conditionalBoolToString = value: if (isBool value) then (boolToString value) else (toString value);
+
+  paramsString = params:
+    concatMapStringsSep " " (name: "${name} ${conditionalBoolToString (getAttr name params)}")
+                   (attrNames params);
+
+  interfaceConfig = name:
+    let
+      interface = getAttr name cfg.interfaces;
+    in
+    "interface ${name} ${paramsString interface}\n";
+
+  configFile = with cfg; pkgs.writeText "babeld.conf" (
+    ''
+      skip-kernel-setup true
+    ''
+    + (optionalString (cfg.interfaceDefaults != null) ''
+      default ${paramsString cfg.interfaceDefaults}
+    '')
+    + (concatMapStrings interfaceConfig (attrNames cfg.interfaces))
+    + extraConfig);
+
+in
+
+{
+
+  meta.maintainers = with maintainers; [ hexa ];
+
+  ###### interface
+
+  options = {
+
+    services.babeld = {
+
+      enable = mkEnableOption (lib.mdDoc "the babeld network routing daemon");
+
+      interfaceDefaults = mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          A set describing default parameters for babeld interfaces.
+          See {manpage}`babeld(8)` for options.
+        '';
+        type = types.nullOr (types.attrsOf types.unspecified);
+        example =
+          {
+            type = "tunnel";
+            split-horizon = true;
+          };
+      };
+
+      interfaces = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          A set describing babeld interfaces.
+          See {manpage}`babeld(8)` for options.
+        '';
+        type = types.attrsOf (types.attrsOf types.unspecified);
+        example =
+          { enp0s2 =
+            { type = "wired";
+              hello-interval = 5;
+              split-horizon = "auto";
+            };
+          };
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Options that will be copied to babeld.conf.
+          See {manpage}`babeld(8)` for details.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.babeld.enable {
+
+    boot.kernel.sysctl = {
+      "net.ipv6.conf.all.forwarding" = 1;
+      "net.ipv6.conf.all.accept_redirects" = 0;
+      "net.ipv4.conf.all.forwarding" = 1;
+      "net.ipv4.conf.all.rp_filter" = 0;
+    } // lib.mapAttrs' (ifname: _: lib.nameValuePair "net.ipv4.conf.${ifname}.rp_filter" (lib.mkDefault 0)) config.services.babeld.interfaces;
+
+    systemd.services.babeld = {
+      description = "Babel routing daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.babeld}/bin/babeld -c ${configFile} -I /run/babeld/babeld.pid -S /var/lib/babeld/state";
+        AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+        CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        IPAddressAllow = [ "fe80::/64" "ff00::/8" "::1/128" "127.0.0.0/8" ];
+        IPAddressDeny = "any";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        MemoryDenyWriteExecute = true;
+        ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_NETLINK" "AF_INET6" "AF_INET" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectProc = "invisible";
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = false; # kernel_route(ADD): Operation not permitted
+        ProcSubset = "pid";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged @resources"
+        ];
+        UMask = "0177";
+        RuntimeDirectory = "babeld";
+        StateDirectory = "babeld";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bee-clef.nix b/nixpkgs/nixos/modules/services/networking/bee-clef.nix
new file mode 100644
index 000000000000..75e76f019a71
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bee-clef.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+# NOTE for now nothing is installed into /etc/bee-clef/. the config files are used as read-only from the nix store.
+
+with lib;
+let
+  cfg = config.services.bee-clef;
+in {
+  meta = {
+    maintainers = with maintainers; [ attila-lendvai ];
+  };
+
+  ### interface
+
+  options = {
+    services.bee-clef = {
+      enable = mkEnableOption (lib.mdDoc "clef external signer instance for Ethereum Swarm Bee");
+
+      dataDir = mkOption {
+        type = types.nullOr types.str;
+        default = "/var/lib/bee-clef";
+        description = lib.mdDoc ''
+          Data dir for bee-clef. Beware that some helper scripts may not work when changed!
+          The service itself should work fine, though.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.str;
+        default = "/var/lib/bee-clef/password";
+        description = lib.mdDoc "Password file for bee-clef.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bee-clef";
+        description = lib.mdDoc ''
+          User the bee-clef daemon should execute under.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bee-clef";
+        description = lib.mdDoc ''
+          Group the bee-clef daemon should execute under.
+        '';
+      };
+    };
+  };
+
+  ### implementation
+
+  config = mkIf cfg.enable {
+    # if we ever want to have rules.js under /etc/bee-clef/
+    # environment.etc."bee-clef/rules.js".source = ${pkgs.bee-clef}/rules.js
+
+    systemd.packages = [ pkgs.bee-clef ]; # include the upstream bee-clef.service file
+
+    systemd.tmpfiles.rules = [
+        "d '${cfg.dataDir}/'         0750 ${cfg.user} ${cfg.group}"
+        "d '${cfg.dataDir}/keystore' 0700 ${cfg.user} ${cfg.group}"
+      ];
+
+    systemd.services.bee-clef = {
+      path = [
+        # these are needed for the ensure-clef-account script
+        pkgs.coreutils
+        pkgs.gnused
+        pkgs.gawk
+      ];
+
+      wantedBy = [ "bee.service" "multi-user.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStartPre = ''${pkgs.bee-clef}/share/bee-clef/ensure-clef-account "${cfg.dataDir}" "${pkgs.bee-clef}/share/bee-clef/"'';
+        ExecStart = [
+          "" # this hides/overrides what's in the original entry
+          "${pkgs.bee-clef}/share/bee-clef/bee-clef-service start"
+        ];
+        ExecStop = [
+          "" # this hides/overrides what's in the original entry
+          "${pkgs.bee-clef}/share/bee-clef/bee-clef-service stop"
+        ];
+        Environment = [
+          "CONFIGDIR=${cfg.dataDir}"
+          "PASSWORD_FILE=${cfg.passwordFile}"
+        ];
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "bee-clef") {
+      bee-clef = {
+        group = cfg.group;
+        home = cfg.dataDir;
+        isSystemUser = true;
+        description = "Daemon user for the bee-clef service";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "bee-clef") {
+      bee-clef = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bee.nix b/nixpkgs/nixos/modules/services/networking/bee.nix
new file mode 100644
index 000000000000..add9861ebfcd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bee.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.bee;
+  format = pkgs.formats.yaml {};
+  configFile = format.generate "bee.yaml" cfg.settings;
+in {
+  meta = {
+    # doc = ./bee.xml;
+    maintainers = with maintainers; [ attila-lendvai ];
+  };
+
+  ### interface
+
+  options = {
+    services.bee = {
+      enable = mkEnableOption (lib.mdDoc "Ethereum Swarm Bee");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bee;
+        defaultText = literalExpression "pkgs.bee";
+        example = literalExpression "pkgs.bee-unstable";
+        description = lib.mdDoc "The package providing the bee binary for the service.";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        description = lib.mdDoc ''
+          Ethereum Swarm Bee configuration. Refer to
+          <https://gateway.ethswarm.org/bzz/docs.swarm.eth/docs/installation/configuration/>
+          for details on supported values.
+        '';
+      };
+
+      daemonNiceLevel = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Daemon process priority for bee.
+          0 is the default Unix process priority, 19 is the lowest.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bee";
+        description = lib.mdDoc ''
+          User the bee binary should execute under.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bee";
+        description = lib.mdDoc ''
+          Group the bee binary should execute under.
+        '';
+      };
+    };
+  };
+
+  ### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = (hasAttr "password" cfg.settings) != true;
+        message = ''
+          `services.bee.settings.password` is insecure. Use `services.bee.settings.password-file` or `systemd.services.bee.serviceConfig.EnvironmentFile` instead.
+        '';
+      }
+      { assertion = (hasAttr "swap-endpoint" cfg.settings) || (cfg.settings.swap-enable or true == false);
+        message = ''
+          In a swap-enabled network a working Ethereum blockchain node is required. You must specify one using `services.bee.settings.swap-endpoint`, or disable `services.bee.settings.swap-enable` = false.
+        '';
+      }
+    ];
+
+    warnings = optional (! config.services.bee-clef.enable) "The bee service requires an external signer. Consider setting `config.services.bee-clef.enable` = true";
+
+    services.bee.settings = {
+      data-dir             = lib.mkDefault "/var/lib/bee";
+      password-file        = lib.mkDefault "/var/lib/bee/password";
+      clef-signer-enable   = lib.mkDefault true;
+      clef-signer-endpoint = lib.mkDefault "/var/lib/bee-clef/clef.ipc";
+      swap-endpoint        = lib.mkDefault "https://rpc.slock.it/goerli";
+    };
+
+    systemd.packages = [ cfg.package ]; # include the upstream bee.service file
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.settings.data-dir}' 0750 ${cfg.user} ${cfg.group}"
+    ];
+
+    systemd.services.bee = {
+      requires = optional config.services.bee-clef.enable
+        "bee-clef.service";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Nice = cfg.daemonNiceLevel;
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = [
+          "" # this hides/overrides what's in the original entry
+          "${cfg.package}/bin/bee --config=${configFile} start"
+        ];
+      };
+
+      preStart = with cfg.settings; ''
+        if ! test -f ${password-file}; then
+          < /dev/urandom tr -dc _A-Z-a-z-0-9 2> /dev/null | head -c32 > ${password-file}
+          chmod 0600 ${password-file}
+          echo "Initialized ${password-file} from /dev/urandom"
+        fi
+        if [ ! -f ${data-dir}/keys/libp2p.key ]; then
+          ${cfg.package}/bin/bee init --config=${configFile} >/dev/null
+          echo "
+Logs:   journalctl -f -u bee.service
+
+Bee has SWAP enabled by default and it needs ethereum endpoint to operate.
+It is recommended to use external signer with bee.
+Check documentation for more info:
+- SWAP https://docs.ethswarm.org/docs/installation/manual#swap-bandwidth-incentives
+- External signer https://docs.ethswarm.org/docs/installation/bee-clef
+
+After you finish configuration run 'sudo bee-get-addr'."
+        fi
+      '';
+    };
+
+    users.users = optionalAttrs (cfg.user == "bee") {
+      bee = {
+        group = cfg.group;
+        home = cfg.settings.data-dir;
+        isSystemUser = true;
+        description = "Daemon user for Ethereum Swarm Bee";
+        extraGroups = optional config.services.bee-clef.enable
+          config.services.bee-clef.group;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "bee") {
+      bee = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/biboumi.nix b/nixpkgs/nixos/modules/services/networking/biboumi.nix
new file mode 100644
index 000000000000..d44a46b35a29
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/biboumi.nix
@@ -0,0 +1,269 @@
+{ config, lib, pkgs, options, ... }:
+with lib;
+let
+  cfg = config.services.biboumi;
+  inherit (config.environment) etc;
+  rootDir = "/run/biboumi/mnt-root";
+  stateDir = "/var/lib/biboumi";
+  settingsFile = pkgs.writeText "biboumi.cfg" (
+    generators.toKeyValue {
+      mkKeyValue = k: v:
+        lib.optionalString (v != null) (generators.mkKeyValueDefault {} "=" k v);
+    } cfg.settings);
+  need_CAP_NET_BIND_SERVICE = cfg.settings.identd_port != 0 && cfg.settings.identd_port < 1024;
+in
+{
+  options = {
+    services.biboumi = {
+      enable = mkEnableOption (lib.mdDoc "the Biboumi XMPP gateway to IRC");
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          See [biboumi 8.5](https://lab.louiz.org/louiz/biboumi/blob/8.5/doc/biboumi.1.rst)
+          for documentation.
+        '';
+        default = {};
+        type = types.submodule {
+          freeformType = with types;
+            (attrsOf (nullOr (oneOf [str int bool]))) // {
+              description = "settings option";
+            };
+          options.admin = mkOption {
+            type = with types; listOf str;
+            default = [];
+            example = ["admin@example.org"];
+            apply = concatStringsSep ":";
+            description = lib.mdDoc ''
+              The bare JID of the gateway administrator. This JID will have more
+              privileges than other standard users, for example some administration
+              ad-hoc commands will only be available to that JID.
+            '';
+          };
+          options.ca_file = mkOption {
+            type = types.path;
+            default = "/etc/ssl/certs/ca-certificates.crt";
+            description = lib.mdDoc ''
+              Specifies which file should be used as the list of trusted CA
+              when negotiating a TLS session.
+            '';
+          };
+          options.db_name = mkOption {
+            type = with types; either path str;
+            default = "${stateDir}/biboumi.sqlite";
+            description = lib.mdDoc ''
+              The name of the database to use.
+            '';
+            example = "postgresql://user:secret@localhost";
+          };
+          options.hostname = mkOption {
+            type = types.str;
+            example = "biboumi.example.org";
+            description = lib.mdDoc ''
+              The hostname served by the XMPP gateway.
+              This domain must be configured in the XMPP server
+              as an external component.
+            '';
+          };
+          options.identd_port = mkOption {
+            type = types.port;
+            default = 113;
+            example = 0;
+            description = lib.mdDoc ''
+              The TCP port on which to listen for identd queries.
+            '';
+          };
+          options.log_level = mkOption {
+            type = types.ints.between 0 3;
+            default = 1;
+            description = lib.mdDoc ''
+              Indicate what type of log messages to write in the logs.
+              0 is debug, 1 is info, 2 is warning, 3 is error.
+            '';
+          };
+          options.password = mkOption {
+            type = with types; nullOr str;
+            description = lib.mdDoc ''
+              The password used to authenticate the XMPP component to your XMPP server.
+              This password must be configured in the XMPP server,
+              associated with the external component on
+              [hostname](#opt-services.biboumi.settings.hostname).
+
+              Set it to null and use [credentialsFile](#opt-services.biboumi.credentialsFile)
+              if you do not want this password to go into the Nix store.
+            '';
+          };
+          options.persistent_by_default = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether all rooms will be persistent by default:
+              the value of the “persistent†option in the global configuration of each
+              user will be “trueâ€, but the value of each individual room will still
+              default to false. This means that a user just needs to change the global
+              “persistent†configuration option to false in order to override this.
+            '';
+          };
+          options.policy_directory = mkOption {
+            type = types.path;
+            default = "${pkgs.biboumi}/etc/biboumi";
+            defaultText = literalExpression ''"''${pkgs.biboumi}/etc/biboumi"'';
+            description = lib.mdDoc ''
+              A directory that should contain the policy files,
+              used to customize Botan’s behaviour
+              when negotiating the TLS connections with the IRC servers.
+            '';
+          };
+          options.port = mkOption {
+            type = types.port;
+            default = 5347;
+            description = lib.mdDoc ''
+              The TCP port to use to connect to the local XMPP component.
+            '';
+          };
+          options.realname_customization = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Whether the users will be able to use
+              the ad-hoc commands that lets them configure
+              their realname and username.
+            '';
+          };
+          options.realname_from_jid = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether the realname and username of each biboumi
+              user will be extracted from their JID.
+              Otherwise they will be set to the nick
+              they used to connect to the IRC server.
+            '';
+          };
+          options.xmpp_server_ip = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            description = lib.mdDoc ''
+              The IP address to connect to the XMPP server on.
+              The connection to the XMPP server is unencrypted,
+              so the biboumi instance and the server should
+              normally be on the same host.
+            '';
+          };
+        };
+      };
+
+      credentialsFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to a configuration file to be merged with the settings.
+          Beware not to surround "=" with spaces when setting biboumi's options in this file.
+          Useful to merge a file which is better kept out of the Nix store
+          because it contains sensible data like
+          [password](#opt-services.biboumi.settings.password).
+        '';
+        default = "/dev/null";
+        example = "/run/keys/biboumi.cfg";
+      };
+
+      openFirewall = mkEnableOption (lib.mdDoc "opening of the identd port in the firewall");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf (cfg.openFirewall && cfg.settings.identd_port != 0)
+      { allowedTCPPorts = [ cfg.settings.identd_port ]; };
+
+    systemd.services.biboumi = {
+      description = "Biboumi, XMPP to IRC gateway";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "notify";
+        # Biboumi supports systemd's watchdog.
+        WatchdogSec = 20;
+        Restart = "always";
+        # Use "+" because credentialsFile may not be accessible to User= or Group=.
+        ExecStartPre = [("+" + pkgs.writeShellScript "biboumi-prestart" ''
+          set -eux
+          cat ${settingsFile} '${cfg.credentialsFile}' |
+          install -m 644 /dev/stdin /run/biboumi/biboumi.cfg
+        '')];
+        ExecStart = "${pkgs.biboumi}/bin/biboumi /run/biboumi/biboumi.cfg";
+        ExecReload = "${pkgs.coreutils}/bin/kill -USR1 $MAINPID";
+        # Firewalls needing opening for output connections can still do that
+        # selectively for biboumi with:
+        # users.users.biboumi.isSystemUser = true;
+        # and, for example:
+        # networking.nftables.ruleset = ''
+        #   add rule inet filter output meta skuid biboumi tcp accept
+        # '';
+        DynamicUser = true;
+        RootDirectory = rootDir;
+        RootDirectoryStartOnly = true;
+        InaccessiblePaths = [ "-+${rootDir}" ];
+        RuntimeDirectory = [ "biboumi" (removePrefix "/run/" rootDir) ];
+        RuntimeDirectoryMode = "700";
+        StateDirectory = "biboumi";
+        StateDirectoryMode = "700";
+        MountAPIVFS = true;
+        UMask = "0066";
+        BindPaths = [
+          stateDir
+          # This is for Type="notify"
+          # See https://github.com/systemd/systemd/issues/3544
+          "/run/systemd/notify"
+          "/run/systemd/journal/socket"
+        ];
+        BindReadOnlyPaths = [
+          builtins.storeDir
+          "/etc"
+        ];
+        # The following options are only for optimizing:
+        # systemd-analyze security biboumi
+        AmbientCapabilities = [ (optionalString need_CAP_NET_BIND_SERVICE "CAP_NET_BIND_SERVICE") ];
+        CapabilityBoundingSet = [ (optionalString need_CAP_NET_BIND_SERVICE "CAP_NET_BIND_SERVICE") ];
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateNetwork = mkDefault false;
+        PrivateTmp = true;
+        # PrivateUsers=true breaks AmbientCapabilities=CAP_NET_BIND_SERVICE
+        # See https://bugs.archlinux.org/task/65921
+        PrivateUsers = !need_CAP_NET_BIND_SERVICE;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        # AF_UNIX is for /run/systemd/notify
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallFilter = [
+          "@system-service"
+          # Groups in @system-service which do not contain a syscall
+          # listed by perf stat -e 'syscalls:sys_enter_*' biboumi biboumi.cfg
+          # in tests, and seem likely not necessary for biboumi.
+          # To run such a perf in ExecStart=, you have to:
+          # - AmbientCapabilities="CAP_SYS_ADMIN"
+          # - mount -o remount,mode=755 /sys/kernel/debug/{,tracing}
+          "~@aio" "~@chown" "~@ipc" "~@keyring" "~@resources" "~@setuid" "~@timer"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ julm ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bind.nix b/nixpkgs/nixos/modules/services/networking/bind.nix
new file mode 100644
index 000000000000..f1829747bb1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bind.nix
@@ -0,0 +1,287 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.bind;
+
+  bindPkg = config.services.bind.package;
+
+  bindUser = "named";
+
+  bindZoneCoerce = list: builtins.listToAttrs (lib.forEach list (zone: { name = zone.name; value = zone; }));
+
+  bindZoneOptions = { name, config, ... }: {
+    options = {
+      name = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc "Name of the zone.";
+      };
+      master = mkOption {
+        description = lib.mdDoc "Master=false means slave server";
+        type = types.bool;
+      };
+      file = mkOption {
+        type = types.either types.str types.path;
+        description = lib.mdDoc "Zone file resource records contain columns of data, separated by whitespace, that define the record.";
+      };
+      masters = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "List of servers for inclusion in stub and secondary zones.";
+      };
+      slaves = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Addresses who may request zone transfers.";
+        default = [ ];
+      };
+      allowQuery = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of address ranges allowed to query this zone. Instead of the address(es), this may instead
+          contain the single string "any".
+
+          NOTE: This overrides the global-level `allow-query` setting, which is set to the contents
+          of `cachenetworks`.
+        '';
+        default = [ "any" ];
+      };
+      extraConfig = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Extra zone config to be appended at the end of the zone section.";
+        default = "";
+      };
+    };
+  };
+
+  confFile = pkgs.writeText "named.conf"
+    ''
+      include "/etc/bind/rndc.key";
+      controls {
+        inet 127.0.0.1 allow {localhost;} keys {"rndc-key";};
+      };
+
+      acl cachenetworks { ${concatMapStrings (entry: " ${entry}; ") cfg.cacheNetworks} };
+      acl badnetworks { ${concatMapStrings (entry: " ${entry}; ") cfg.blockedNetworks} };
+
+      options {
+        listen-on { ${concatMapStrings (entry: " ${entry}; ") cfg.listenOn} };
+        listen-on-v6 { ${concatMapStrings (entry: " ${entry}; ") cfg.listenOnIpv6} };
+        allow-query { cachenetworks; };
+        blackhole { badnetworks; };
+        forward ${cfg.forward};
+        forwarders { ${concatMapStrings (entry: " ${entry}; ") cfg.forwarders} };
+        directory "${cfg.directory}";
+        pid-file "/run/named/named.pid";
+        ${cfg.extraOptions}
+      };
+
+      ${cfg.extraConfig}
+
+      ${ concatMapStrings
+          ({ name, file, master ? true, slaves ? [], masters ? [], allowQuery ? [], extraConfig ? "" }:
+            ''
+              zone "${name}" {
+                type ${if master then "master" else "slave"};
+                file "${file}";
+                ${ if master then
+                   ''
+                     allow-transfer {
+                       ${concatMapStrings (ip: "${ip};\n") slaves}
+                     };
+                   ''
+                   else
+                   ''
+                     masters {
+                       ${concatMapStrings (ip: "${ip};\n") masters}
+                     };
+                   ''
+                }
+                allow-query { ${concatMapStrings (ip: "${ip}; ") allowQuery}};
+                ${extraConfig}
+              };
+            '')
+          (attrValues cfg.zones) }
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.bind = {
+
+      enable = mkEnableOption (lib.mdDoc "BIND domain name server");
+
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bind;
+        defaultText = literalExpression "pkgs.bind";
+        description = lib.mdDoc "The BIND package to use.";
+      };
+
+      cacheNetworks = mkOption {
+        default = [ "127.0.0.0/24" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          What networks are allowed to use us as a resolver.  Note
+          that this is for recursive queries -- all networks are
+          allowed to query zones configured with the `zones` option
+          by default (although this may be overridden within each
+          zone's configuration, via the `allowQuery` option).
+          It is recommended that you limit cacheNetworks to avoid your
+          server being used for DNS amplification attacks.
+        '';
+      };
+
+      blockedNetworks = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          What networks are just blocked.
+        '';
+      };
+
+      ipv4Only = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Only use ipv4, even if the host supports ipv6.
+        '';
+      };
+
+      forwarders = mkOption {
+        default = config.networking.nameservers;
+        defaultText = literalExpression "config.networking.nameservers";
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of servers we should forward requests to.
+        '';
+      };
+
+      forward = mkOption {
+        default = "first";
+        type = types.enum ["first" "only"];
+        description = lib.mdDoc ''
+          Whether to forward 'first' (try forwarding but lookup directly if forwarding fails) or 'only'.
+        '';
+      };
+
+      listenOn = mkOption {
+        default = [ "any" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Interfaces to listen on.
+        '';
+      };
+
+      listenOnIpv6 = mkOption {
+        default = [ "any" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Ipv6 interfaces to listen on.
+        '';
+      };
+
+      directory = mkOption {
+        type = types.str;
+        default = "/run/named";
+        description = lib.mdDoc "Working directory of BIND.";
+      };
+
+      zones = mkOption {
+        default = [ ];
+        type = with types; coercedTo (listOf attrs) bindZoneCoerce (attrsOf (types.submodule bindZoneOptions));
+        description = lib.mdDoc ''
+          List of zones we claim authority over.
+        '';
+        example = {
+          "example.com" = {
+            master = false;
+            file = "/var/dns/example.com";
+            masters = [ "192.168.0.1" ];
+            slaves = [ ];
+            extraConfig = "";
+          };
+        };
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the generated named configuration file.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the options section of the
+          generated named configuration file.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = confFile;
+        defaultText = literalExpression "confFile";
+        description = lib.mdDoc ''
+          Overridable config file to use for named. By default, that
+          generated by nixos.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    networking.resolvconf.useLocalResolver = mkDefault true;
+
+    users.users.${bindUser} =
+      {
+        group = bindUser;
+        description = "BIND daemon user";
+        isSystemUser = true;
+      };
+    users.groups.${bindUser} = {};
+
+    systemd.services.bind = {
+      description = "BIND Domain Name Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -m 0755 -p /etc/bind
+        if ! [ -f "/etc/bind/rndc.key" ]; then
+          ${bindPkg.out}/sbin/rndc-confgen -c /etc/bind/rndc.key -u ${bindUser} -a -A hmac-sha256 2>/dev/null
+        fi
+
+        ${pkgs.coreutils}/bin/mkdir -p /run/named
+        chown ${bindUser} /run/named
+
+        ${pkgs.coreutils}/bin/mkdir -p ${cfg.directory}
+        chown ${bindUser} ${cfg.directory}
+      '';
+
+      serviceConfig = {
+        ExecStart = "${bindPkg.out}/sbin/named -u ${bindUser} ${optionalString cfg.ipv4Only "-4"} -c ${cfg.configFile} -f";
+        ExecReload = "${bindPkg.out}/sbin/rndc -k '/etc/bind/rndc.key' reload";
+        ExecStop = "${bindPkg.out}/sbin/rndc -k '/etc/bind/rndc.key' stop";
+      };
+
+      unitConfig.Documentation = "man:named(8)";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bird-lg.nix b/nixpkgs/nixos/modules/services/networking/bird-lg.nix
new file mode 100644
index 000000000000..dc861dbfd11b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bird-lg.nix
@@ -0,0 +1,319 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.bird-lg;
+
+  stringOrConcat = sep: v: if builtins.isString v then v else concatStringsSep sep v;
+
+  frontend_args = let
+    fe = cfg.frontend;
+  in {
+    "--servers" = concatStringsSep "," fe.servers;
+    "--domain" = fe.domain;
+    "--listen" = fe.listenAddress;
+    "--proxy-port" = fe.proxyPort;
+    "--whois" = fe.whois;
+    "--dns-interface" = fe.dnsInterface;
+    "--bgpmap-info" = concatStringsSep "," cfg.frontend.bgpMapInfo;
+    "--title-brand" = fe.titleBrand;
+    "--navbar-brand" = fe.navbar.brand;
+    "--navbar-brand-url" = fe.navbar.brandURL;
+    "--navbar-all-servers" = fe.navbar.allServers;
+    "--navbar-all-url" = fe.navbar.allServersURL;
+    "--net-specific-mode" = fe.netSpecificMode;
+    "--protocol-filter" = concatStringsSep "," cfg.frontend.protocolFilter;
+  };
+
+  proxy_args = let
+    px = cfg.proxy;
+  in {
+    "--allowed" = concatStringsSep "," px.allowedIPs;
+    "--bird" = px.birdSocket;
+    "--listen" = px.listenAddress;
+    "--traceroute_bin" = px.traceroute.binary;
+    "--traceroute_flags" = concatStringsSep " " px.traceroute.flags;
+    "--traceroute_raw" = px.traceroute.rawOutput;
+  };
+
+  mkArgValue = value:
+    if isString value
+      then escapeShellArg value
+      else if isBool value
+        then boolToString value
+        else toString value;
+
+  filterNull = filterAttrs (_: v: v != "" && v != null && v != []);
+
+  argsAttrToList = args: mapAttrsToList (name: value: "${name} " + mkArgValue value ) (filterNull args);
+in
+{
+  options = {
+    services.bird-lg = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bird-lg;
+        defaultText = literalExpression "pkgs.bird-lg";
+        description = lib.mdDoc "The Bird Looking Glass package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bird-lg";
+        description = lib.mdDoc "User to run the service.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "bird-lg";
+        description = lib.mdDoc "Group to run the service.";
+      };
+
+      frontend = {
+        enable = mkEnableOption (lib.mdDoc "Bird Looking Glass Frontend Webserver");
+
+        listenAddress = mkOption {
+          type = types.str;
+          default = "127.0.0.1:5000";
+          description = lib.mdDoc "Address to listen on.";
+        };
+
+        proxyPort = mkOption {
+          type = types.port;
+          default = 8000;
+          description = lib.mdDoc "Port bird-lg-proxy is running on.";
+        };
+
+        domain = mkOption {
+          type = types.str;
+          example = "dn42.lantian.pub";
+          description = lib.mdDoc "Server name domain suffixes.";
+        };
+
+        servers = mkOption {
+          type = types.listOf types.str;
+          example = [ "gigsgigscloud" "hostdare" ];
+          description = lib.mdDoc "Server name prefixes.";
+        };
+
+        whois = mkOption {
+          type = types.str;
+          default = "whois.verisign-grs.com";
+          description = lib.mdDoc "Whois server for queries.";
+        };
+
+        dnsInterface = mkOption {
+          type = types.str;
+          default = "asn.cymru.com";
+          description = lib.mdDoc "DNS zone to query ASN information.";
+        };
+
+        bgpMapInfo = mkOption {
+          type = types.listOf types.str;
+          default = [ "asn" "as-name" "ASName" "descr" ];
+          description = lib.mdDoc "Information displayed in bgpmap.";
+        };
+
+        titleBrand = mkOption {
+          type = types.str;
+          default = "Bird-lg Go";
+          description = lib.mdDoc "Prefix of page titles in browser tabs.";
+        };
+
+        netSpecificMode = mkOption {
+          type = types.str;
+          default = "";
+          example = "dn42";
+          description = lib.mdDoc "Apply network-specific changes for some networks.";
+        };
+
+        protocolFilter = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [ "ospf" ];
+          description = lib.mdDoc "Information displayed in bgpmap.";
+        };
+
+        nameFilter = mkOption {
+          type = types.str;
+          default = "";
+          example = "^ospf";
+          description = lib.mdDoc "Protocol names to hide in summary tables (RE2 syntax),";
+        };
+
+        timeout = mkOption {
+          type = types.int;
+          default = 120;
+          description = lib.mdDoc "Time before request timed out, in seconds.";
+        };
+
+        navbar = {
+          brand = mkOption {
+            type = types.str;
+            default = "Bird-lg Go";
+            description = lib.mdDoc "Brand to show in the navigation bar .";
+          };
+
+          brandURL = mkOption {
+            type = types.str;
+            default = "/";
+            description = lib.mdDoc "URL of the brand to show in the navigation bar.";
+          };
+
+          allServers = mkOption {
+            type = types.str;
+            default = "ALL Servers";
+            description = lib.mdDoc "Text of 'All server' button in the navigation bar.";
+          };
+
+          allServersURL = mkOption {
+            type = types.str;
+            default = "all";
+            description = lib.mdDoc "URL of 'All servers' button.";
+          };
+        };
+
+        extraArgs = mkOption {
+          type = with types; either lines (listOf str);
+          default = [ ];
+          description = lib.mdDoc ''
+            Extra parameters documented [here](https://github.com/xddxdd/bird-lg-go#frontend).
+
+            :::{.note}
+            Passing lines (plain strings) is deprecated in favour of passing lists of strings.
+            :::
+          '';
+        };
+      };
+
+      proxy = {
+        enable = mkEnableOption (lib.mdDoc "Bird Looking Glass Proxy");
+
+        listenAddress = mkOption {
+          type = types.str;
+          default = "127.0.0.1:8000";
+          description = lib.mdDoc "Address to listen on.";
+        };
+
+        allowedIPs = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [ "192.168.25.52" "192.168.25.53" ];
+          description = lib.mdDoc "List of IPs to allow (default all allowed).";
+        };
+
+        birdSocket = mkOption {
+          type = types.str;
+          default = "/var/run/bird/bird.ctl";
+          description = lib.mdDoc "Bird control socket path.";
+        };
+
+        traceroute = {
+          binary = mkOption {
+            type = types.str;
+            default = "${pkgs.traceroute}/bin/traceroute";
+            defaultText = literalExpression ''"''${pkgs.traceroute}/bin/traceroute"'';
+            description = lib.mdDoc "Traceroute's binary path.";
+          };
+
+          flags = mkOption {
+            type = with types; listOf str;
+            default = [ ];
+            description = lib.mdDoc "Flags for traceroute process";
+          };
+
+          rawOutput = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Display traceroute output in raw format.";
+          };
+        };
+
+        extraArgs = mkOption {
+          type = with types; either lines (listOf str);
+          default = [ ];
+          description = lib.mdDoc ''
+            Extra parameters documented [here](https://github.com/xddxdd/bird-lg-go#proxy).
+
+            :::{.note}
+            Passing lines (plain strings) is deprecated in favour of passing lists of strings.
+            :::
+          '';
+        };
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = {
+
+    warnings =
+      lib.optional (cfg.frontend.enable  && builtins.isString cfg.frontend.extraArgs) ''
+        Passing strings to `services.bird-lg.frontend.extraOptions' is deprecated. Please pass a list of strings instead.
+      ''
+      ++ lib.optional (cfg.proxy.enable  && builtins.isString cfg.proxy.extraArgs) ''
+        Passing strings to `services.bird-lg.proxy.extraOptions' is deprecated. Please pass a list of strings instead.
+      ''
+    ;
+
+    systemd.services = {
+      bird-lg-frontend = mkIf cfg.frontend.enable {
+        enable = true;
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        description = "Bird Looking Glass Frontend Webserver";
+        serviceConfig = {
+          Type = "simple";
+          Restart = "on-failure";
+          ProtectSystem = "full";
+          ProtectHome = "yes";
+          MemoryDenyWriteExecute = "yes";
+          User = cfg.user;
+          Group = cfg.group;
+        };
+        script = ''
+          ${cfg.package}/bin/frontend \
+            ${concatStringsSep " \\\n  " (argsAttrToList frontend_args)} \
+            ${stringOrConcat " " cfg.frontend.extraArgs}
+        '';
+      };
+
+      bird-lg-proxy = mkIf cfg.proxy.enable {
+        enable = true;
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        description = "Bird Looking Glass Proxy";
+        serviceConfig = {
+          Type = "simple";
+          Restart = "on-failure";
+          ProtectSystem = "full";
+          ProtectHome = "yes";
+          MemoryDenyWriteExecute = "yes";
+          User = cfg.user;
+          Group = cfg.group;
+        };
+        script = ''
+          ${cfg.package}/bin/proxy \
+            ${concatStringsSep " \\\n  " (argsAttrToList proxy_args)} \
+            ${stringOrConcat " " cfg.proxy.extraArgs}
+        '';
+      };
+    };
+    users = mkIf (cfg.frontend.enable || cfg.proxy.enable) {
+      groups."bird-lg" = mkIf (cfg.group == "bird-lg") { };
+      users."bird-lg" = mkIf (cfg.user == "bird-lg") {
+        description = "Bird Looking Glass user";
+        extraGroups = lib.optionals (config.services.bird2.enable) [ "bird2" ];
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [
+    e1mo
+    tchekda
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bird.nix b/nixpkgs/nixos/modules/services/networking/bird.nix
new file mode 100644
index 000000000000..9deeb7694d2a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bird.nix
@@ -0,0 +1,102 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption optionalString types;
+
+  cfg = config.services.bird2;
+  caps = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" "CAP_NET_RAW" ];
+in
+{
+  ###### interface
+  options = {
+    services.bird2 = {
+      enable = mkEnableOption (lib.mdDoc "BIRD Internet Routing Daemon");
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          BIRD Internet Routing Daemon configuration file.
+          <http://bird.network.cz/>
+        '';
+      };
+      checkConfig = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the config should be checked at build time.
+          When the config can't be checked during build time, for example when it includes
+          other files, either disable this option or use `preCheckConfig` to create
+          the included files before checking.
+        '';
+      };
+      preCheckConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          echo "cost 100;" > include.conf
+        '';
+        description = lib.mdDoc ''
+          Commands to execute before the config file check. The file to be checked will be
+          available as `bird2.conf` in the current directory.
+
+          Files created with this option will not be available at service runtime, only during
+          build time checking.
+        '';
+      };
+    };
+  };
+
+
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "bird" ] "Use services.bird2 instead")
+    (lib.mkRemovedOptionModule [ "services" "bird6" ] "Use services.bird2 instead")
+  ];
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.bird ];
+
+    environment.etc."bird/bird2.conf".source = pkgs.writeTextFile {
+      name = "bird2";
+      text = cfg.config;
+      checkPhase = optionalString cfg.checkConfig ''
+        ln -s $out bird2.conf
+        ${cfg.preCheckConfig}
+        ${pkgs.buildPackages.bird}/bin/bird -d -p -c bird2.conf
+      '';
+    };
+
+    systemd.services.bird2 = {
+      description = "BIRD Internet Routing Daemon";
+      wantedBy = [ "multi-user.target" ];
+      reloadTriggers = [ config.environment.etc."bird/bird2.conf".source ];
+      serviceConfig = {
+        Type = "forking";
+        Restart = "on-failure";
+        User = "bird2";
+        Group = "bird2";
+        ExecStart = "${pkgs.bird}/bin/bird -c /etc/bird/bird2.conf";
+        ExecReload = "${pkgs.bird}/bin/birdc configure";
+        ExecStop = "${pkgs.bird}/bin/birdc down";
+        RuntimeDirectory = "bird";
+        CapabilityBoundingSet = caps;
+        AmbientCapabilities = caps;
+        ProtectSystem = "full";
+        ProtectHome = "yes";
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        SystemCallFilter = "~@cpu-emulation @debug @keyring @module @mount @obsolete @raw-io";
+        MemoryDenyWriteExecute = "yes";
+      };
+    };
+    users = {
+      users.bird2 = {
+        description = "BIRD Internet Routing Daemon user";
+        group = "bird2";
+        isSystemUser = true;
+      };
+      groups.bird2 = { };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/birdwatcher.nix b/nixpkgs/nixos/modules/services/networking/birdwatcher.nix
new file mode 100644
index 000000000000..a129b7a2b4cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/birdwatcher.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.birdwatcher;
+in
+{
+  options = {
+    services.birdwatcher = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.birdwatcher;
+        defaultText = literalExpression "pkgs.birdwatcher";
+        description = lib.mdDoc "The Birdwatcher package to use.";
+      };
+      enable = mkEnableOption (lib.mdDoc "Birdwatcher");
+      flags = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = [ "-worker-pool-size 16" "-6" ];
+        description = lib.mdDoc ''
+          Flags to append to the program call
+        '';
+      };
+
+      settings = mkOption {
+        type = types.lines;
+        default = { };
+        description = lib.mdDoc ''
+          birdwatcher configuration, for configuration options see the example on [github](https://github.com/alice-lg/birdwatcher/blob/master/etc/birdwatcher/birdwatcher.conf)
+        '';
+        example = literalExpression ''
+          [server]
+          allow_from = []
+          allow_uncached = false
+          modules_enabled = ["status",
+                             "protocols",
+                             "protocols_bgp",
+                             "protocols_short",
+                             "routes_protocol",
+                             "routes_peer",
+                             "routes_table",
+                             "routes_table_filtered",
+                             "routes_table_peer",
+                             "routes_filtered",
+                             "routes_prefixed",
+                             "routes_noexport",
+                             "routes_pipe_filtered_count",
+                             "routes_pipe_filtered"
+                            ]
+
+          [status]
+          reconfig_timestamp_source = "bird"
+          reconfig_timestamp_match = "# created: (.*)"
+
+          filter_fields = []
+
+          [bird]
+          listen = "0.0.0.0:29184"
+          config = "/etc/bird/bird2.conf"
+          birdc  = "''${pkgs.bird}/bin/birdc"
+          ttl = 5 # time to live (in minutes) for caching of cli output
+
+          [parser]
+          filter_fields = []
+
+          [cache]
+          use_redis = false # if not using redis cache, activate housekeeping to save memory!
+
+          [housekeeping]
+          interval = 5
+          force_release_memory = true
+        '';
+      };
+    };
+  };
+
+  config =
+    let flagsStr = escapeShellArgs cfg.flags;
+    in lib.mkIf cfg.enable {
+      environment.etc."birdwatcher/birdwatcher.conf".source = pkgs.writeTextFile {
+        name = "birdwatcher.conf";
+        text = cfg.settings;
+      };
+      systemd.services = {
+        birdwatcher = {
+          wants = [ "network.target" ];
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          description = "Birdwatcher";
+          serviceConfig = {
+            Type = "simple";
+            Restart = "on-failure";
+            RestartSec = 15;
+            ExecStart = "${cfg.package}/bin/birdwatcher";
+            StateDirectoryMode = "0700";
+            UMask = "0117";
+            NoNewPrivileges = true;
+            ProtectSystem = "strict";
+            PrivateTmp = true;
+            PrivateDevices = true;
+            ProtectHostname = true;
+            ProtectClock = true;
+            ProtectKernelTunables = true;
+            ProtectKernelModules = true;
+            ProtectKernelLogs = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = [ "AF_UNIX AF_INET AF_INET6" ];
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            PrivateMounts = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = "~@clock @privileged @cpu-emulation @debug @keyring @module @mount @obsolete @raw-io @reboot @setuid @swap";
+            BindReadOnlyPaths = [
+              "-/etc/resolv.conf"
+              "-/etc/nsswitch.conf"
+              "-/etc/ssl/certs"
+              "-/etc/static/ssl/certs"
+              "-/etc/hosts"
+              "-/etc/localtime"
+            ];
+          };
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bitcoind.nix b/nixpkgs/nixos/modules/services/networking/bitcoind.nix
new file mode 100644
index 000000000000..a48066b43b16
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bitcoind.nix
@@ -0,0 +1,260 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  eachBitcoind = filterAttrs (bitcoindName: cfg: cfg.enable) config.services.bitcoind;
+
+  rpcUserOpts = { name, ... }: {
+    options = {
+      name = mkOption {
+        type = types.str;
+        example = "alice";
+        description = lib.mdDoc ''
+          Username for JSON-RPC connections.
+        '';
+      };
+      passwordHMAC = mkOption {
+        type = types.uniq (types.strMatching "[0-9a-f]+\\$[0-9a-f]{64}");
+        example = "f7efda5c189b999524f151318c0c86$d5b51b3beffbc02b724e5d095828e0bc8b2456e9ac8757ae3211a5d9b16a22ae";
+        description = lib.mdDoc ''
+          Password HMAC-SHA-256 for JSON-RPC connections. Must be a string of the
+          format \<SALT-HEX\>$\<HMAC-HEX\>.
+
+          Tool (Python script) for HMAC generation is available here:
+          <https://github.com/bitcoin/bitcoin/blob/master/share/rpcauth/rpcauth.py>
+        '';
+      };
+    };
+    config = {
+      name = mkDefault name;
+    };
+  };
+
+  bitcoindOpts = { config, lib, name, ...}: {
+    options = {
+
+      enable = mkEnableOption (lib.mdDoc "Bitcoin daemon");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.bitcoind;
+        defaultText = literalExpression "pkgs.bitcoind";
+        description = lib.mdDoc "The package providing bitcoin binaries.";
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/${name}/bitcoin.conf";
+        description = lib.mdDoc "The configuration file path to supply bitcoind.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          par=16
+          rpcthreads=16
+          logips=1
+        '';
+        description = lib.mdDoc "Additional configurations to be appended to {file}`bitcoin.conf`.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/bitcoind-${name}";
+        description = lib.mdDoc "The data directory for bitcoind.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "bitcoind-${name}";
+        description = lib.mdDoc "The user as which to run bitcoind.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = config.user;
+        description = lib.mdDoc "The group as which to run bitcoind.";
+      };
+
+      rpc = {
+        port = mkOption {
+          type = types.nullOr types.port;
+          default = null;
+          description = lib.mdDoc "Override the default port on which to listen for JSON-RPC connections.";
+        };
+        users = mkOption {
+          default = {};
+          example = literalExpression ''
+            {
+              alice.passwordHMAC = "f7efda5c189b999524f151318c0c86$d5b51b3beffbc02b724e5d095828e0bc8b2456e9ac8757ae3211a5d9b16a22ae";
+              bob.passwordHMAC = "b2dd077cb54591a2f3139e69a897ac$4e71f08d48b4347cf8eff3815c0e25ae2e9a4340474079f55705f40574f4ec99";
+            }
+          '';
+          type = types.attrsOf (types.submodule rpcUserOpts);
+          description = lib.mdDoc "RPC user information for JSON-RPC connections.";
+        };
+      };
+
+      pidFile = mkOption {
+        type = types.path;
+        default = "${config.dataDir}/bitcoind.pid";
+        description = lib.mdDoc "Location of bitcoind pid file.";
+      };
+
+      testnet = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to use the testnet instead of mainnet.";
+      };
+
+      port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        description = lib.mdDoc "Override the default port on which to listen for connections.";
+      };
+
+      dbCache = mkOption {
+        type = types.nullOr (types.ints.between 4 16384);
+        default = null;
+        example = 4000;
+        description = lib.mdDoc "Override the default database cache size in MiB.";
+      };
+
+      prune = mkOption {
+        type = types.nullOr (types.coercedTo
+          (types.enum [ "disable" "manual" ])
+          (x: if x == "disable" then 0 else 1)
+          types.ints.unsigned
+        );
+        default = null;
+        example = 10000;
+        description = lib.mdDoc ''
+          Reduce storage requirements by enabling pruning (deleting) of old
+          blocks. This allows the pruneblockchain RPC to be called to delete
+          specific blocks, and enables automatic pruning of old blocks if a
+          target size in MiB is provided. This mode is incompatible with -txindex
+          and -rescan. Warning: Reverting this setting requires re-downloading
+          the entire blockchain. ("disable" = disable pruning blocks, "manual"
+          = allow manual pruning via RPC, >=550 = automatically prune block files
+          to stay under the specified target size in MiB).
+        '';
+      };
+
+      extraCmdlineOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra command line options to pass to bitcoind.
+          Run bitcoind --help to list all available options.
+        '';
+      };
+    };
+  };
+in
+{
+
+  options = {
+    services.bitcoind = mkOption {
+      type = types.attrsOf (types.submodule bitcoindOpts);
+      default = {};
+      description = lib.mdDoc "Specification of one or more bitcoind instances.";
+    };
+  };
+
+  config = mkIf (eachBitcoind != {}) {
+
+    assertions = flatten (mapAttrsToList (bitcoindName: cfg: [
+    {
+      assertion = (cfg.prune != null) -> (builtins.elem cfg.prune [ "disable" "manual" 0 1 ] || (builtins.isInt cfg.prune && cfg.prune >= 550));
+      message = ''
+        If set, services.bitcoind.${bitcoindName}.prune has to be "disable", "manual", 0 , 1 or >= 550.
+      '';
+    }
+    {
+      assertion = (cfg.rpc.users != {}) -> (cfg.configFile == null);
+      message = ''
+        You cannot set both services.bitcoind.${bitcoindName}.rpc.users and services.bitcoind.${bitcoindName}.configFile
+        as they are exclusive. RPC user setting would have no effect if custom configFile would be used.
+      '';
+    }
+    ]) eachBitcoind);
+
+    environment.systemPackages = flatten (mapAttrsToList (bitcoindName: cfg: [
+      cfg.package
+    ]) eachBitcoind);
+
+    systemd.services = mapAttrs' (bitcoindName: cfg: (
+      nameValuePair "bitcoind-${bitcoindName}" (
+      let
+        configFile = pkgs.writeText "bitcoin.conf" ''
+          # If Testnet is enabled, we need to add [test] section
+          # otherwise, some options (e.g.: custom RPC port) will not work
+          ${optionalString cfg.testnet "[test]"}
+          # RPC users
+          ${concatMapStringsSep  "\n"
+            (rpcUser: "rpcauth=${rpcUser.name}:${rpcUser.passwordHMAC}")
+            (attrValues cfg.rpc.users)
+          }
+          # Extra config options (from bitcoind nixos service)
+          ${cfg.extraConfig}
+        '';
+      in {
+        description = "Bitcoin daemon";
+        after = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = ''
+            ${cfg.package}/bin/bitcoind \
+            ${if (cfg.configFile != null) then
+              "-conf=${cfg.configFile}"
+            else
+              "-conf=${configFile}"
+            } \
+            -datadir=${cfg.dataDir} \
+            -pid=${cfg.pidFile} \
+            ${optionalString cfg.testnet "-testnet"}\
+            ${optionalString (cfg.port != null) "-port=${toString cfg.port}"}\
+            ${optionalString (cfg.prune != null) "-prune=${toString cfg.prune}"}\
+            ${optionalString (cfg.dbCache != null) "-dbcache=${toString cfg.dbCache}"}\
+            ${optionalString (cfg.rpc.port != null) "-rpcport=${toString cfg.rpc.port}"}\
+            ${toString cfg.extraCmdlineOptions}
+          '';
+          Restart = "on-failure";
+
+          # Hardening measures
+          PrivateTmp = "true";
+          ProtectSystem = "full";
+          NoNewPrivileges = "true";
+          PrivateDevices = "true";
+          MemoryDenyWriteExecute = "true";
+        };
+      }
+    ))) eachBitcoind;
+
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (bitcoindName: cfg: [
+      "d '${cfg.dataDir}' 0770 '${cfg.user}' '${cfg.group}' - -"
+    ]) eachBitcoind);
+
+    users.users = mapAttrs' (bitcoindName: cfg: (
+      nameValuePair "bitcoind-${bitcoindName}" {
+      name = cfg.user;
+      group = cfg.group;
+      description = "Bitcoin daemon user";
+      home = cfg.dataDir;
+      isSystemUser = true;
+    })) eachBitcoind;
+
+    users.groups = mapAttrs' (bitcoindName: cfg: (
+      nameValuePair "${cfg.group}" { }
+    )) eachBitcoind;
+
+  };
+
+  meta.maintainers = with maintainers; [ _1000101 ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/bitlbee.nix b/nixpkgs/nixos/modules/services/networking/bitlbee.nix
new file mode 100644
index 000000000000..146bffaa6edf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/bitlbee.nix
@@ -0,0 +1,190 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.bitlbee;
+  bitlbeeUid = config.ids.uids.bitlbee;
+
+  bitlbeePkg = pkgs.bitlbee.override {
+    enableLibPurple = cfg.libpurple_plugins != [];
+    enablePam = cfg.authBackend == "pam";
+  };
+
+  bitlbeeConfig = pkgs.writeText "bitlbee.conf"
+    ''
+    [settings]
+    RunMode = Daemon
+    ConfigDir = ${cfg.configDir}
+    DaemonInterface = ${cfg.interface}
+    DaemonPort = ${toString cfg.portNumber}
+    AuthMode = ${cfg.authMode}
+    AuthBackend = ${cfg.authBackend}
+    Plugindir = ${pkgs.bitlbee-plugins cfg.plugins}/lib/bitlbee
+    ${lib.optionalString (cfg.hostName != "") "HostName = ${cfg.hostName}"}
+    ${lib.optionalString (cfg.protocols != "") "Protocols = ${cfg.protocols}"}
+    ${cfg.extraSettings}
+
+    [defaults]
+    ${cfg.extraDefaults}
+    '';
+
+  purple_plugin_path =
+    lib.concatMapStringsSep ":"
+      (plugin: "${plugin}/lib/pidgin/:${plugin}/lib/purple-2/")
+      cfg.libpurple_plugins
+    ;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.bitlbee = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run the BitlBee IRC to other chat network gateway.
+          Running it allows you to access the MSN, Jabber, Yahoo! and ICQ chat
+          networks via an IRC client.
+        '';
+      };
+
+      interface = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The interface the BitlBee daemon will be listening to.  If `127.0.0.1`,
+          only clients on the local host can connect to it; if `0.0.0.0`, clients
+          can access it from any network interface.
+        '';
+      };
+
+      portNumber = mkOption {
+        default = 6667;
+        type = types.port;
+        description = lib.mdDoc ''
+          Number of the port BitlBee will be listening to.
+        '';
+      };
+
+      authBackend = mkOption {
+        default = "storage";
+        type = types.enum [ "storage" "pam" ];
+        description = lib.mdDoc ''
+          How users are authenticated
+            storage -- save passwords internally
+            pam -- Linux PAM authentication
+        '';
+      };
+
+      authMode = mkOption {
+        default = "Open";
+        type = types.enum [ "Open" "Closed" "Registered" ];
+        description = lib.mdDoc ''
+          The following authentication modes are available:
+            Open -- Accept connections from anyone, use NickServ for user authentication.
+            Closed -- Require authorization (using the PASS command during login) before allowing the user to connect at all.
+            Registered -- Only allow registered users to use this server; this disables the register- and the account command until the user identifies himself.
+        '';
+      };
+
+      hostName = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          Normally, BitlBee gets a hostname using getsockname(). If you have a nicer
+          alias for your BitlBee daemon, you can set it here and BitlBee will identify
+          itself with that name instead.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.bitlbee-facebook ]";
+        description = lib.mdDoc ''
+          The list of bitlbee plugins to install.
+        '';
+      };
+
+      libpurple_plugins = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.purple-matrix ]";
+        description = lib.mdDoc ''
+          The list of libpurple plugins to install.
+        '';
+      };
+
+      configDir = mkOption {
+        default = "/var/lib/bitlbee";
+        type = types.path;
+        description = lib.mdDoc ''
+          Specify an alternative directory to store all the per-user configuration
+          files.
+        '';
+      };
+
+      protocols = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc ''
+          This option allows to remove the support of protocol, even if compiled
+          in. If nothing is given, there are no restrictions.
+        '';
+      };
+
+      extraSettings = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Will be inserted in the Settings section of the config file.
+        '';
+      };
+
+      extraDefaults = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Will be inserted in the Default section of the config file.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config =  mkMerge [
+    (mkIf config.services.bitlbee.enable {
+      systemd.services.bitlbee = {
+        environment.PURPLE_PLUGIN_PATH = purple_plugin_path;
+        description = "BitlBee IRC to other chat networks gateway";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          DynamicUser = true;
+          StateDirectory = "bitlbee";
+          ReadWritePaths = [ cfg.configDir ];
+          ExecStart = "${bitlbeePkg}/sbin/bitlbee -F -n -c ${bitlbeeConfig}";
+        };
+      };
+
+      environment.systemPackages = [ bitlbeePkg ];
+
+    })
+    (mkIf (config.services.bitlbee.authBackend == "pam") {
+      security.pam.services.bitlbee = {};
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/blockbook-frontend.nix b/nixpkgs/nixos/modules/services/networking/blockbook-frontend.nix
new file mode 100644
index 000000000000..46b26195d211
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/blockbook-frontend.nix
@@ -0,0 +1,278 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  eachBlockbook = config.services.blockbook-frontend;
+
+  blockbookOpts = { config, lib, name, ...}: {
+
+    options = {
+
+      enable = mkEnableOption (lib.mdDoc "blockbook-frontend application");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.blockbook;
+        defaultText = literalExpression "pkgs.blockbook";
+        description = lib.mdDoc "Which blockbook package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "blockbook-frontend-${name}";
+        description = lib.mdDoc "The user as which to run blockbook-frontend-${name}.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "${config.user}";
+        description = lib.mdDoc "The group as which to run blockbook-frontend-${name}.";
+      };
+
+      certFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/etc/secrets/blockbook-frontend-${name}/certFile";
+        description = lib.mdDoc ''
+          To enable SSL, specify path to the name of certificate files without extension.
+          Expecting {file}`certFile.crt` and {file}`certFile.key`.
+        '';
+      };
+
+      configFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "${config.dataDir}/config.json";
+        description = lib.mdDoc "Location of the blockbook configuration file.";
+      };
+
+      coinName = mkOption {
+        type = types.str;
+        default = "Bitcoin";
+        description = lib.mdDoc ''
+          See <https://github.com/trezor/blockbook/blob/master/bchain/coins/blockchain.go#L61>
+          for current of coins supported in master (Note: may differ from release).
+        '';
+      };
+
+      cssDir = mkOption {
+        type = types.path;
+        default = "${config.package}/share/css/";
+        defaultText = literalExpression ''"''${package}/share/css/"'';
+        example = literalExpression ''"''${dataDir}/static/css/"'';
+        description = lib.mdDoc ''
+          Location of the dir with {file}`main.css` CSS file.
+          By default, the one shipped with the package is used.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/blockbook-frontend-${name}";
+        description = lib.mdDoc "Location of blockbook-frontend-${name} data directory.";
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Debug mode, return more verbose errors, reload templates on each request.";
+      };
+
+      internal = mkOption {
+        type = types.nullOr types.str;
+        default = ":9030";
+        description = lib.mdDoc "Internal http server binding `[address]:port`.";
+      };
+
+      messageQueueBinding = mkOption {
+        type = types.str;
+        default = "tcp://127.0.0.1:38330";
+        description = lib.mdDoc "Message Queue Binding `address:port`.";
+      };
+
+      public = mkOption {
+        type = types.nullOr types.str;
+        default = ":9130";
+        description = lib.mdDoc "Public http server binding `[address]:port`.";
+      };
+
+      rpc = {
+        url = mkOption {
+          type = types.str;
+          default = "http://127.0.0.1";
+          description = lib.mdDoc "URL for JSON-RPC connections.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8030;
+          description = lib.mdDoc "Port for JSON-RPC connections.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "rpc";
+          description = lib.mdDoc "Username for JSON-RPC connections.";
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "rpc";
+          description = lib.mdDoc ''
+            RPC password for JSON-RPC connections.
+            Warning: this is stored in cleartext in the Nix store!!!
+            Use `configFile` or `passwordFile` if needed.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          description = lib.mdDoc ''
+            File containing password of the RPC user.
+            Note: This options is ignored when `configFile` is used.
+          '';
+        };
+      };
+
+      sync = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Synchronizes until tip, if together with zeromq, keeps index synchronized.";
+      };
+
+      templateDir = mkOption {
+        type = types.path;
+        default = "${config.package}/share/templates/";
+        defaultText = literalExpression ''"''${package}/share/templates/"'';
+        example = literalExpression ''"''${dataDir}/templates/static/"'';
+        description = lib.mdDoc "Location of the HTML templates. By default, ones shipped with the package are used.";
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        example = literalExpression '' {
+          "alternative_estimate_fee" = "whatthefee-disabled";
+          "alternative_estimate_fee_params" = "{\"url\": \"https://whatthefee.io/data.json\", \"periodSeconds\": 60}";
+          "fiat_rates" = "coingecko";
+          "fiat_rates_params" = "{\"url\": \"https://api.coingecko.com/api/v3\", \"coin\": \"bitcoin\", \"periodSeconds\": 60}";
+          "coin_shortcut" = "BTC";
+          "coin_label" = "Bitcoin";
+          "parse" = true;
+          "subversion" = "";
+          "address_format" = "";
+          "xpub_magic" = 76067358;
+          "xpub_magic_segwit_p2sh" = 77429938;
+          "xpub_magic_segwit_native" = 78792518;
+          "mempool_workers" = 8;
+          "mempool_sub_workers" = 2;
+          "block_addresses_to_keep" = 300;
+        }'';
+        description = lib.mdDoc ''
+          Additional configurations to be appended to {file}`coin.conf`.
+          Overrides any already defined configuration options.
+          See <https://github.com/trezor/blockbook/tree/master/configs/coins>
+          for current configuration options supported in master (Note: may differ from release).
+        '';
+      };
+
+      extraCmdLineOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-workers=1" "-dbcache=0" "-logtosderr" ];
+        description = lib.mdDoc ''
+          Extra command line options to pass to Blockbook.
+          Run blockbook --help to list all available options.
+        '';
+      };
+    };
+  };
+in
+{
+  # interface
+
+  options = {
+    services.blockbook-frontend = mkOption {
+      type = types.attrsOf (types.submodule blockbookOpts);
+      default = {};
+      description = lib.mdDoc "Specification of one or more blockbook-frontend instances.";
+    };
+  };
+
+  # implementation
+
+  config = mkIf (eachBlockbook != {}) {
+
+    systemd.services = mapAttrs' (blockbookName: cfg: (
+      nameValuePair "blockbook-frontend-${blockbookName}" (
+        let
+          configFile = if cfg.configFile != null then cfg.configFile else
+            pkgs.writeText "config.conf" (builtins.toJSON ( {
+                coin_name = "${cfg.coinName}";
+                rpc_user = "${cfg.rpc.user}";
+                rpc_pass = "${cfg.rpc.password}";
+                rpc_url = "${cfg.rpc.url}:${toString cfg.rpc.port}";
+                message_queue_binding = "${cfg.messageQueueBinding}";
+              } // cfg.extraConfig)
+            );
+        in {
+          description = "blockbook-frontend-${blockbookName} daemon";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          preStart = ''
+            ln -sf ${cfg.templateDir} ${cfg.dataDir}/static/
+            ln -sf ${cfg.cssDir} ${cfg.dataDir}/static/
+            ${optionalString (cfg.rpc.passwordFile != null && cfg.configFile == null) ''
+              CONFIGTMP=$(mktemp)
+              ${pkgs.jq}/bin/jq ".rpc_pass = \"$(cat ${cfg.rpc.passwordFile})\"" ${configFile} > $CONFIGTMP
+              mv $CONFIGTMP ${cfg.dataDir}/${blockbookName}-config.json
+            ''}
+          '';
+          serviceConfig = {
+            User = cfg.user;
+            Group = cfg.group;
+            ExecStart = ''
+               ${cfg.package}/bin/blockbook \
+               ${if (cfg.rpc.passwordFile != null && cfg.configFile == null) then
+               "-blockchaincfg=${cfg.dataDir}/${blockbookName}-config.json"
+               else
+               "-blockchaincfg=${configFile}"
+               } \
+               -datadir=${cfg.dataDir} \
+               ${optionalString (cfg.sync != false) "-sync"} \
+               ${optionalString (cfg.certFile != null) "-certfile=${toString cfg.certFile}"} \
+               ${optionalString (cfg.debug != false) "-debug"} \
+               ${optionalString (cfg.internal != null) "-internal=${toString cfg.internal}"} \
+               ${optionalString (cfg.public != null) "-public=${toString cfg.public}"} \
+               ${toString cfg.extraCmdLineOptions}
+            '';
+            Restart = "on-failure";
+            WorkingDirectory = cfg.dataDir;
+            LimitNOFILE = 65536;
+          };
+        }
+    ) )) eachBlockbook;
+
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (blockbookName: cfg: [
+      "d ${cfg.dataDir} 0750 ${cfg.user} ${cfg.group} - -"
+      "d ${cfg.dataDir}/static 0750 ${cfg.user} ${cfg.group} - -"
+    ]) eachBlockbook);
+
+    users.users = mapAttrs' (blockbookName: cfg: (
+      nameValuePair "blockbook-frontend-${blockbookName}" {
+      name = cfg.user;
+      group = cfg.group;
+      home = cfg.dataDir;
+      isSystemUser = true;
+    })) eachBlockbook;
+
+    users.groups = mapAttrs' (instanceName: cfg: (
+      nameValuePair "${cfg.group}" { })) eachBlockbook;
+  };
+
+  meta.maintainers = with maintainers; [ _1000101 ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/blocky.nix b/nixpkgs/nixos/modules/services/networking/blocky.nix
new file mode 100644
index 000000000000..30a41fa6a421
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/blocky.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.blocky;
+
+  format = pkgs.formats.yaml { };
+  configFile = format.generate "config.yaml" cfg.settings;
+in
+{
+  options.services.blocky = {
+    enable = mkEnableOption (lib.mdDoc "blocky, a fast and lightweight DNS proxy as ad-blocker for local network with many features");
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = lib.mdDoc ''
+        Blocky configuration. Refer to
+        <https://0xerr0r.github.io/blocky/configuration/>
+        for details on supported values.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.blocky = {
+      description = "A DNS proxy and ad-blocker for the local network";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.blocky}/bin/blocky --config ${configFile}";
+        Restart = "on-failure";
+
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/cgit.nix b/nixpkgs/nixos/modules/services/networking/cgit.nix
new file mode 100644
index 000000000000..7d1f12fa9146
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/cgit.nix
@@ -0,0 +1,205 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+let
+  cfgs = config.services.cgit;
+
+  settingType = with types; oneOf [ bool int str ];
+
+  genAttrs' = names: f: listToAttrs (map f names);
+
+  regexEscape =
+    let
+      # taken from https://github.com/python/cpython/blob/05cb728d68a278d11466f9a6c8258d914135c96c/Lib/re.py#L251-L266
+      special = [
+        "(" ")" "[" "]" "{" "}" "?" "*" "+" "-" "|" "^" "$" "\\" "." "&" "~"
+        "#" " " "\t" "\n" "\r"
+        "" # \v / 0x0B
+        "" # \f / 0x0C
+      ];
+    in
+      replaceStrings special (map (c: "\\${c}") special);
+
+  stripLocation = cfg: removeSuffix "/" cfg.nginx.location;
+
+  regexLocation = cfg: regexEscape (stripLocation cfg);
+
+  mkFastcgiPass = cfg: ''
+    ${if cfg.nginx.location == "/" then ''
+      fastcgi_param PATH_INFO $uri;
+    '' else ''
+      fastcgi_split_path_info ^(${regexLocation cfg})(/.+)$;
+      fastcgi_param PATH_INFO $fastcgi_path_info;
+    ''
+    }fastcgi_pass unix:${config.services.fcgiwrap.socketAddress};
+  '';
+
+  cgitrcLine = name: value: "${name}=${
+    if value == true then
+      "1"
+    else if value == false then
+      "0"
+    else
+      toString value
+  }";
+
+  mkCgitrc = cfg: pkgs.writeText "cgitrc" ''
+    # global settings
+    ${concatStringsSep "\n" (
+        mapAttrsToList
+          cgitrcLine
+          ({ virtual-root = cfg.nginx.location; } // cfg.settings)
+      )
+    }
+    ${optionalString (cfg.scanPath != null) (cgitrcLine "scan-path" cfg.scanPath)}
+
+    # repository settings
+    ${concatStrings (
+        mapAttrsToList
+          (url: settings: ''
+            ${cgitrcLine "repo.url" url}
+            ${concatStringsSep "\n" (
+                mapAttrsToList (name: cgitrcLine "repo.${name}") settings
+              )
+            }
+          '')
+          cfg.repos
+      )
+    }
+
+    # extra config
+    ${cfg.extraConfig}
+  '';
+
+  mkCgitReposDir = cfg:
+    if cfg.scanPath != null then
+      cfg.scanPath
+    else
+      pkgs.runCommand "cgit-repos" {
+        preferLocalBuild = true;
+        allowSubstitutes = false;
+      } ''
+        mkdir -p "$out"
+        ${
+          concatStrings (
+            mapAttrsToList
+              (name: value: ''
+                ln -s ${escapeShellArg value.path} "$out"/${escapeShellArg name}
+              '')
+              cfg.repos
+          )
+        }
+      '';
+
+in
+{
+  options = {
+    services.cgit = mkOption {
+      description = mdDoc "Configure cgit instances.";
+      default = {};
+      type = types.attrsOf (types.submodule ({ config, ... }: {
+        options = {
+          enable = mkEnableOption (mdDoc "cgit");
+
+          package = mkPackageOptionMD pkgs "cgit" {};
+
+          nginx.virtualHost = mkOption {
+            description = mdDoc "VirtualHost to serve cgit on, defaults to the attribute name.";
+            type = types.str;
+            default = config._module.args.name;
+            example = "git.example.com";
+          };
+
+          nginx.location = mkOption {
+            description = mdDoc "Location to serve cgit under.";
+            type = types.str;
+            default = "/";
+            example = "/git/";
+          };
+
+          repos = mkOption {
+            description = mdDoc "cgit repository settings, see cgitrc(5)";
+            type = with types; attrsOf (attrsOf settingType);
+            default = {};
+            example = {
+              blah = {
+                path = "/var/lib/git/example";
+                desc = "An example repository";
+              };
+            };
+          };
+
+          scanPath = mkOption {
+            description = mdDoc "A path which will be scanned for repositories.";
+            type = types.nullOr types.path;
+            default = null;
+            example = "/var/lib/git";
+          };
+
+          settings = mkOption {
+            description = mdDoc "cgit configuration, see cgitrc(5)";
+            type = types.attrsOf settingType;
+            default = {};
+            example = literalExpression ''
+              {
+                enable-follow-links = true;
+                source-filter = "''${pkgs.cgit}/lib/cgit/filters/syntax-highlighting.py";
+              }
+            '';
+          };
+
+          extraConfig = mkOption {
+            description = mdDoc "These lines go to the end of cgitrc verbatim.";
+            type = types.lines;
+            default = "";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf (any (cfg: cfg.enable) (attrValues cfgs)) {
+    assertions = mapAttrsToList (vhost: cfg: {
+      assertion = !cfg.enable || (cfg.scanPath == null) != (cfg.repos == {});
+      message = "Exactly one of services.cgit.${vhost}.scanPath or services.cgit.${vhost}.repos must be set.";
+    }) cfgs;
+
+    services.fcgiwrap.enable = true;
+
+    services.nginx.enable = true;
+
+    services.nginx.virtualHosts = mkMerge (mapAttrsToList (_: cfg: {
+      ${cfg.nginx.virtualHost} = {
+        locations = (
+          genAttrs'
+            [ "cgit.css" "cgit.png" "favicon.ico" "robots.txt" ]
+            (name: nameValuePair "= ${stripLocation cfg}/${name}" {
+              extraConfig = ''
+                alias ${cfg.package}/cgit/${name};
+              '';
+            })
+        ) // {
+          "~ ${regexLocation cfg}/.+/(info/refs|git-upload-pack)" = {
+            fastcgiParams = rec {
+              SCRIPT_FILENAME = "${pkgs.git}/libexec/git-core/git-http-backend";
+              GIT_HTTP_EXPORT_ALL = "1";
+              GIT_PROJECT_ROOT = mkCgitReposDir cfg;
+              HOME = GIT_PROJECT_ROOT;
+            };
+            extraConfig = mkFastcgiPass cfg;
+          };
+          "${stripLocation cfg}/" = {
+            fastcgiParams = {
+              SCRIPT_FILENAME = "${cfg.package}/cgit/cgit.cgi";
+              QUERY_STRING = "$args";
+              HTTP_HOST = "$server_name";
+              CGIT_CONFIG = mkCgitrc cfg;
+            };
+            extraConfig = mkFastcgiPass cfg;
+          };
+        };
+      };
+    }) cfgs);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/charybdis.nix b/nixpkgs/nixos/modules/services/networking/charybdis.nix
new file mode 100644
index 000000000000..168da243dba1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/charybdis.nix
@@ -0,0 +1,114 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption singleton types;
+  inherit (pkgs) coreutils charybdis;
+  cfg = config.services.charybdis;
+
+  configFile = pkgs.writeText "charybdis.conf" ''
+    ${cfg.config}
+  '';
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.charybdis = {
+
+      enable = mkEnableOption (lib.mdDoc "Charybdis IRC daemon");
+
+      config = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Charybdis IRC daemon configuration file.
+        '';
+      };
+
+      statedir = mkOption {
+        type = types.path;
+        default = "/var/lib/charybdis";
+        description = lib.mdDoc ''
+          Location of the state directory of charybdis.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ircd";
+        description = lib.mdDoc ''
+          Charybdis IRC daemon user.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "ircd";
+        description = lib.mdDoc ''
+          Charybdis IRC daemon group.
+        '';
+      };
+
+      motd = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Charybdis MOTD text.
+
+          Charybdis will read its MOTD from /etc/charybdis/ircd.motd .
+          If set, the value of this option will be written to this path.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable (lib.mkMerge [
+    {
+      users.users.${cfg.user} = {
+        description = "Charybdis IRC daemon user";
+        uid = config.ids.uids.ircd;
+        group = cfg.group;
+      };
+
+      users.groups.${cfg.group} = {
+        gid = config.ids.gids.ircd;
+      };
+
+      systemd.tmpfiles.rules = [
+        "d ${cfg.statedir} - ${cfg.user} ${cfg.group} - -"
+      ];
+
+      environment.etc."charybdis/ircd.conf".source = configFile;
+
+      systemd.services.charybdis = {
+        description = "Charybdis IRC daemon";
+        wantedBy = [ "multi-user.target" ];
+        reloadIfChanged = true;
+        restartTriggers = [
+          configFile
+        ];
+        environment = {
+          BANDB_DBPATH = "${cfg.statedir}/ban.db";
+        };
+        serviceConfig = {
+          ExecStart   = "${charybdis}/bin/charybdis -foreground -logfile /dev/stdout -configfile /etc/charybdis/ircd.conf";
+          ExecReload = "${coreutils}/bin/kill -HUP $MAINPID";
+          Group = cfg.group;
+          User = cfg.user;
+        };
+      };
+
+    }
+
+    (mkIf (cfg.motd != null) {
+      environment.etc."charybdis/ircd.motd".text = cfg.motd;
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/networking/chisel-server.nix b/nixpkgs/nixos/modules/services/networking/chisel-server.nix
new file mode 100644
index 000000000000..134c71430cd0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/chisel-server.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.chisel-server;
+
+in {
+  options = {
+    services.chisel-server = {
+      enable = mkEnableOption (mdDoc "Chisel Tunnel Server");
+      host = mkOption {
+        description = mdDoc "Address to listen on, falls back to 0.0.0.0";
+        type = with types; nullOr str;
+        default = null;
+        example = "[::1]";
+      };
+      port = mkOption {
+        description = mdDoc "Port to listen on, falls back to 8080";
+        type = with types; nullOr port;
+        default = null;
+      };
+      authfile = mkOption {
+        description = mdDoc "Path to auth.json file";
+        type = with types; nullOr path;
+        default = null;
+      };
+      keepalive  = mkOption {
+        description = mdDoc "Keepalive interval, falls back to 25s";
+        type = with types; nullOr str;
+        default = null;
+        example = "5s";
+      };
+      backend = mkOption {
+        description = mdDoc "HTTP server to proxy normal requests to";
+        type = with types; nullOr str;
+        default = null;
+        example = "http://127.0.0.1:8888";
+      };
+      socks5 = mkOption {
+        description = mdDoc "Allow clients access to internal SOCKS5 proxy";
+        type = types.bool;
+        default = false;
+      };
+      reverse = mkOption {
+        description = mdDoc "Allow clients reverse port forwarding";
+        type = types.bool;
+        default = false;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.chisel-server = {
+      description = "Chisel Tunnel Server";
+      wantedBy = [ "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.chisel}/bin/chisel server " + concatStringsSep " " (
+          optional (cfg.host != null) "--host ${cfg.host}"
+          ++ optional (cfg.port != null) "--port ${builtins.toString cfg.port}"
+          ++ optional (cfg.authfile != null) "--authfile ${cfg.authfile}"
+          ++ optional (cfg.keepalive != null) "--keepalive ${cfg.keepalive}"
+          ++ optional (cfg.backend != null) "--backend ${cfg.backend}"
+          ++ optional cfg.socks5 "--socks5"
+          ++ optional cfg.reverse "--reverse"
+        );
+
+        # Security Hardening
+        # Refer to systemd.exec(5) for option descriptions.
+        CapabilityBoundingSet = "";
+
+        # implies RemoveIPC=, PrivateTmp=, NoNewPrivileges=, RestrictSUIDSGID=,
+        # ProtectSystem=strict, ProtectHome=read-only
+        DynamicUser = true;
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectProc = "invisible";
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "~@clock @cpu-emulation @debug @mount @obsolete @reboot @swap @privileged @resources";
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ clerie ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/cjdns.nix b/nixpkgs/nixos/modules/services/networking/cjdns.nix
new file mode 100644
index 000000000000..80085da92702
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/cjdns.nix
@@ -0,0 +1,304 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  pkg = pkgs.cjdns;
+
+  cfg = config.services.cjdns;
+
+  connectToSubmodule =
+  { ... }:
+  { options =
+    { password = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Authorized password to the opposite end of the tunnel.";
+      };
+      login = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc "(optional) name your peer has for you";
+      };
+      peerName = mkOption {
+        default = "";
+        type = types.str;
+        description = lib.mdDoc "(optional) human-readable name for peer";
+      };
+      publicKey = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Public key at the opposite end of the tunnel.";
+      };
+      hostname = mkOption {
+        default = "";
+        example = "foobar.hype";
+        type = types.str;
+        description = lib.mdDoc "Optional hostname to add to /etc/hosts; prevents reverse lookup failures.";
+      };
+    };
+  };
+
+  # Additional /etc/hosts entries for peers with an associated hostname
+  cjdnsExtraHosts = pkgs.runCommand "cjdns-hosts" {} ''
+    exec >$out
+    ${concatStringsSep "\n" (mapAttrsToList (k: v:
+        optionalString (v.hostname != "")
+          "echo $(${pkgs.cjdns}/bin/publictoip6 ${v.publicKey}) ${v.hostname}")
+        (cfg.ETHInterface.connectTo // cfg.UDPInterface.connectTo))}
+  '';
+
+  parseModules = x:
+    x // { connectTo = mapAttrs (name: value: { inherit (value) password publicKey; }) x.connectTo; };
+
+  cjdrouteConf = builtins.toJSON ( recursiveUpdate {
+    admin = {
+      bind = cfg.admin.bind;
+      password = "@CJDNS_ADMIN_PASSWORD@";
+    };
+    authorizedPasswords = map (p: { password = p; }) cfg.authorizedPasswords;
+    interfaces = {
+      ETHInterface = if (cfg.ETHInterface.bind != "") then [ (parseModules cfg.ETHInterface) ] else [ ];
+      UDPInterface = if (cfg.UDPInterface.bind != "") then [ (parseModules cfg.UDPInterface) ] else [ ];
+    };
+
+    privateKey = "@CJDNS_PRIVATE_KEY@";
+
+    resetAfterInactivitySeconds = 100;
+
+    router = {
+      interface = { type = "TUNInterface"; };
+      ipTunnel = {
+        allowedConnections = [];
+        outgoingConnections = [];
+      };
+    };
+
+    security = [ { exemptAngel = 1; setuser = "nobody"; } ];
+
+  } cfg.extraConfig);
+
+in
+
+{
+  options = {
+
+    services.cjdns = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the cjdns network encryption
+          and routing engine. A file at /etc/cjdns.keys will
+          be created if it does not exist to contain a random
+          secret key that your IPv6 address will be derived from.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        example = { router.interface.tunDevice = "tun10"; };
+        description = lib.mdDoc ''
+          Extra configuration, given as attrs, that will be merged recursively
+          with the rest of the JSON generated by this module, at the root node.
+        '';
+      };
+
+      confFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/etc/cjdroute.conf";
+        description = lib.mdDoc ''
+          Ignore all other cjdns options and load configuration from this file.
+        '';
+      };
+
+      authorizedPasswords = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [
+          "snyrfgkqsc98qh1y4s5hbu0j57xw5s0"
+          "z9md3t4p45mfrjzdjurxn4wuj0d8swv"
+          "49275fut6tmzu354pq70sr5b95qq0vj"
+        ];
+        description = lib.mdDoc ''
+          Any remote cjdns nodes that offer these passwords on
+          connection will be allowed to route through this node.
+        '';
+      };
+
+      admin = {
+        bind = mkOption {
+          type = types.str;
+          default = "127.0.0.1:11234";
+          description = lib.mdDoc ''
+            Bind the administration port to this address and port.
+          '';
+        };
+      };
+
+      UDPInterface = {
+        bind = mkOption {
+          type = types.str;
+          default = "";
+          example = "192.168.1.32:43211";
+          description = lib.mdDoc ''
+            Address and port to bind UDP tunnels to.
+          '';
+         };
+        connectTo = mkOption {
+          type = types.attrsOf ( types.submodule ( connectToSubmodule ) );
+          default = { };
+          example = literalExpression ''
+            {
+              "192.168.1.1:27313" = {
+                hostname = "homer.hype";
+                password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
+                publicKey = "371zpkgs8ss387tmr81q04mp0hg1skb51hw34vk1cq644mjqhup0.k";
+              };
+            }
+          '';
+          description = lib.mdDoc ''
+            Credentials for making UDP tunnels.
+          '';
+        };
+      };
+
+      ETHInterface = {
+        bind = mkOption {
+          type = types.str;
+          default = "";
+          example = "eth0";
+          description =
+            lib.mdDoc ''
+              Bind to this device for native ethernet operation.
+              `all` is a pseudo-name which will try to connect to all devices.
+            '';
+        };
+
+        beacon = mkOption {
+          type = types.int;
+          default = 2;
+          description = lib.mdDoc ''
+            Auto-connect to other cjdns nodes on the same network.
+            Options:
+              0: Disabled.
+              1: Accept beacons, this will cause cjdns to accept incoming
+                 beacon messages and try connecting to the sender.
+              2: Accept and send beacons, this will cause cjdns to broadcast
+                 messages on the local network which contain a randomly
+                 generated per-session password, other nodes which have this
+                 set to 1 or 2 will hear the beacon messages and connect
+                 automatically.
+          '';
+        };
+
+        connectTo = mkOption {
+          type = types.attrsOf ( types.submodule ( connectToSubmodule ) );
+          default = { };
+          example = literalExpression ''
+            {
+              "01:02:03:04:05:06" = {
+                hostname = "homer.hype";
+                password = "5kG15EfpdcKNX3f2GSQ0H1HC7yIfxoCoImnO5FHM";
+                publicKey = "371zpkgs8ss387tmr81q04mp0hg1skb51hw34vk1cq644mjqhup0.k";
+              };
+            }
+          '';
+          description = lib.mdDoc ''
+            Credentials for connecting look similar to UDP credientials
+            except they begin with the mac address.
+          '';
+        };
+      };
+
+      addExtraHosts = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add cjdns peers with an associated hostname to
+          {file}`/etc/hosts`.  Beware that enabling this
+          incurs heavy eval-time costs.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.kernelModules = [ "tun" ];
+
+    # networking.firewall.allowedUDPPorts = ...
+
+    systemd.services.cjdns = {
+      description = "cjdns: routing engine designed for security, scalability, speed and ease of use";
+      wantedBy = [ "multi-user.target" "sleep.target"];
+      after = [ "network-online.target" ];
+      bindsTo = [ "network-online.target" ];
+
+      preStart = optionalString (cfg.confFile == null) ''
+        [ -e /etc/cjdns.keys ] && source /etc/cjdns.keys
+
+        if [ -z "$CJDNS_PRIVATE_KEY" ]; then
+            shopt -s lastpipe
+            ${pkg}/bin/makekeys | { read private ipv6 public; }
+
+            umask 0077
+            echo "CJDNS_PRIVATE_KEY=$private" >> /etc/cjdns.keys
+            echo -e "CJDNS_IPV6=$ipv6\nCJDNS_PUBLIC_KEY=$public" > /etc/cjdns.public
+
+            chmod 600 /etc/cjdns.keys
+            chmod 444 /etc/cjdns.public
+        fi
+
+        if [ -z "$CJDNS_ADMIN_PASSWORD" ]; then
+            echo "CJDNS_ADMIN_PASSWORD=$(tr -dc A-Za-z0-9 </dev/urandom | head -c 32)" \
+                >> /etc/cjdns.keys
+        fi
+      '';
+
+      script = (
+        if cfg.confFile != null then "${pkg}/bin/cjdroute < ${cfg.confFile}" else
+          ''
+            source /etc/cjdns.keys
+            (cat <<'EOF'
+            ${cjdrouteConf}
+            EOF
+            ) | sed \
+                -e "s/@CJDNS_ADMIN_PASSWORD@/$CJDNS_ADMIN_PASSWORD/g" \
+                -e "s/@CJDNS_PRIVATE_KEY@/$CJDNS_PRIVATE_KEY/g" \
+                | ${pkg}/bin/cjdroute
+         ''
+      );
+
+      startLimitIntervalSec = 0;
+      serviceConfig = {
+        Type = "forking";
+        Restart = "always";
+        RestartSec = 1;
+        CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW CAP_SETUID";
+        ProtectSystem = true;
+        # Doesn't work on i686, causing service to fail
+        MemoryDenyWriteExecute = !pkgs.stdenv.isi686;
+        ProtectHome = true;
+        PrivateTmp = true;
+      };
+    };
+
+    networking.hostFiles = mkIf cfg.addExtraHosts [ cjdnsExtraHosts ];
+
+    assertions = [
+      { assertion = ( cfg.ETHInterface.bind != "" || cfg.UDPInterface.bind != "" || cfg.confFile != null );
+        message = "Neither cjdns.ETHInterface.bind nor cjdns.UDPInterface.bind defined.";
+      }
+      { assertion = config.networking.enableIPv6;
+        message = "networking.enableIPv6 must be enabled for CJDNS to work";
+      }
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/cloudflare-dyndns.nix b/nixpkgs/nixos/modules/services/networking/cloudflare-dyndns.nix
new file mode 100644
index 000000000000..627fdb880a67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/cloudflare-dyndns.nix
@@ -0,0 +1,93 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cloudflare-dyndns;
+in
+{
+  options = {
+    services.cloudflare-dyndns = {
+      enable = mkEnableOption (lib.mdDoc "Cloudflare Dynamic DNS Client");
+
+      apiTokenFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The path to a file containing the CloudFlare API token.
+
+          The file must have the form `CLOUDFLARE_API_TOKEN=...`
+        '';
+      };
+
+      domains = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of domain names to update records for.
+        '';
+      };
+
+      proxied = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether this is a DNS-only record, or also being proxied through CloudFlare.
+        '';
+      };
+
+      ipv4 = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable setting IPv4 A records.
+        '';
+      };
+
+      ipv6 = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable setting IPv6 AAAA records.
+        '';
+      };
+
+      deleteMissing = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to delete the record when no IP address is found.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.cloudflare-dyndns = {
+      description = "CloudFlare Dynamic DNS Client";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      startAt = "*:0/5";
+
+      environment = {
+        CLOUDFLARE_DOMAINS = toString cfg.domains;
+      };
+
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        StateDirectory = "cloudflare-dyndns";
+        EnvironmentFile = cfg.apiTokenFile;
+        ExecStart =
+          let
+            args = [ "--cache-file /var/lib/cloudflare-dyndns/ip.cache" ]
+              ++ (if cfg.ipv4 then [ "-4" ] else [ "-no-4" ])
+              ++ (if cfg.ipv6 then [ "-6" ] else [ "-no-6" ])
+              ++ optional cfg.deleteMissing "--delete-missing"
+              ++ optional cfg.proxied "--proxied";
+          in
+          "${pkgs.cloudflare-dyndns}/bin/cloudflare-dyndns ${toString args}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/cloudflared.nix b/nixpkgs/nixos/modules/services/networking/cloudflared.nix
new file mode 100644
index 000000000000..b3f0e37d8e9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/cloudflared.nix
@@ -0,0 +1,331 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cloudflared;
+
+  originRequest = {
+    connectTimeout = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "30s";
+      description = lib.mdDoc ''
+        Timeout for establishing a new TCP connection to your origin server. This excludes the time taken to establish TLS, which is controlled by [https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/local-management/ingress/#tlstimeout](tlsTimeout).
+      '';
+    };
+
+    tlsTimeout = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "10s";
+      description = lib.mdDoc ''
+        Timeout for completing a TLS handshake to your origin server, if you have chosen to connect Tunnel to an HTTPS server.
+      '';
+    };
+
+    tcpKeepAlive = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "30s";
+      description = lib.mdDoc ''
+        The timeout after which a TCP keepalive packet is sent on a connection between Tunnel and the origin server.
+      '';
+    };
+
+    noHappyEyeballs = mkOption {
+      type = with types; nullOr bool;
+      default = null;
+      example = false;
+      description = lib.mdDoc ''
+        Disable the “happy eyeballs†algorithm for IPv4/IPv6 fallback if your local network has misconfigured one of the protocols.
+      '';
+    };
+
+    keepAliveConnections = mkOption {
+      type = with types; nullOr int;
+      default = null;
+      example = 100;
+      description = lib.mdDoc ''
+        Maximum number of idle keepalive connections between Tunnel and your origin. This does not restrict the total number of concurrent connections.
+      '';
+    };
+
+    keepAliveTimeout = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "1m30s";
+      description = lib.mdDoc ''
+        Timeout after which an idle keepalive connection can be discarded.
+      '';
+    };
+
+    httpHostHeader = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "";
+      description = lib.mdDoc ''
+        Sets the HTTP `Host` header on requests sent to the local service.
+      '';
+    };
+
+    originServerName = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "";
+      description = lib.mdDoc ''
+        Hostname that `cloudflared` should expect from your origin server certificate.
+      '';
+    };
+
+    caPool = mkOption {
+      type = with types; nullOr (either str path);
+      default = null;
+      example = "";
+      description = lib.mdDoc ''
+        Path to the certificate authority (CA) for the certificate of your origin. This option should be used only if your certificate is not signed by Cloudflare.
+      '';
+    };
+
+    noTLSVerify = mkOption {
+      type = with types; nullOr bool;
+      default = null;
+      example = false;
+      description = lib.mdDoc ''
+        Disables TLS verification of the certificate presented by your origin. Will allow any certificate from the origin to be accepted.
+      '';
+    };
+
+    disableChunkedEncoding = mkOption {
+      type = with types; nullOr bool;
+      default = null;
+      example = false;
+      description = lib.mdDoc ''
+        Disables chunked transfer encoding. Useful if you are running a WSGI server.
+      '';
+    };
+
+    proxyAddress = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "127.0.0.1";
+      description = lib.mdDoc ''
+        `cloudflared` starts a proxy server to translate HTTP traffic into TCP when proxying, for example, SSH or RDP. This configures the listen address for that proxy.
+      '';
+    };
+
+    proxyPort = mkOption {
+      type = with types; nullOr int;
+      default = null;
+      example = 0;
+      description = lib.mdDoc ''
+        `cloudflared` starts a proxy server to translate HTTP traffic into TCP when proxying, for example, SSH or RDP. This configures the listen port for that proxy. If set to zero, an unused port will randomly be chosen.
+      '';
+    };
+
+    proxyType = mkOption {
+      type = with types; nullOr (enum [ "" "socks" ]);
+      default = null;
+      example = "";
+      description = lib.mdDoc ''
+        `cloudflared` starts a proxy server to translate HTTP traffic into TCP when proxying, for example, SSH or RDP. This configures what type of proxy will be started. Valid options are:
+
+        - `""` for the regular proxy
+        - `"socks"` for a SOCKS5 proxy. Refer to the [https://developers.cloudflare.com/cloudflare-one/tutorials/kubectl/](tutorial on connecting through Cloudflare Access using kubectl) for more information.
+      '';
+    };
+  };
+in
+{
+  options.services.cloudflared = {
+    enable = mkEnableOption (lib.mdDoc "Cloudflare Tunnel client daemon (formerly Argo Tunnel)");
+
+    user = mkOption {
+      type = types.str;
+      default = "cloudflared";
+      description = lib.mdDoc "User account under which Cloudflared runs.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "cloudflared";
+      description = lib.mdDoc "Group under which cloudflared runs.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.cloudflared;
+      defaultText = "pkgs.cloudflared";
+      description = lib.mdDoc "The package to use for Cloudflared.";
+    };
+
+    tunnels = mkOption {
+      description = lib.mdDoc ''
+        Cloudflare tunnels.
+      '';
+      type = types.attrsOf (types.submodule ({ name, ... }: {
+        options = {
+          inherit originRequest;
+
+          credentialsFile = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Credential file.
+
+              See [https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-useful-terms/#credentials-file](Credentials file).
+            '';
+          };
+
+          warp-routing = {
+            enabled = mkOption {
+              type = with types; nullOr bool;
+              default = null;
+              description = lib.mdDoc ''
+                Enable warp routing.
+
+                See [https://developers.cloudflare.com/cloudflare-one/tutorials/warp-to-tunnel/](Connect from WARP to a private network on Cloudflare using Cloudflare Tunnel).
+              '';
+            };
+          };
+
+          default = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              Catch-all service if no ingress matches.
+
+              See `service`.
+            '';
+            example = "http_status:404";
+          };
+
+          ingress = mkOption {
+            type = with types; attrsOf (either str (submodule ({ hostname, ... }: {
+              options = {
+                inherit originRequest;
+
+                service = mkOption {
+                  type = with types; nullOr str;
+                  default = null;
+                  description = lib.mdDoc ''
+                    Service to pass the traffic.
+
+                    See [https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/local-management/ingress/#supported-protocols](Supported protocols).
+                  '';
+                  example = "http://localhost:80, tcp://localhost:8000, unix:/home/production/echo.sock, hello_world or http_status:404";
+                };
+
+                path = mkOption {
+                  type = with types; nullOr str;
+                  default = null;
+                  description = lib.mdDoc ''
+                    Path filter.
+
+                    If not specified, all paths will be matched.
+                  '';
+                  example = "/*.(jpg|png|css|js)";
+                };
+
+              };
+            })));
+            default = { };
+            description = lib.mdDoc ''
+              Ingress rules.
+
+              See [https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/local-management/ingress/](Ingress rules).
+            '';
+            example = {
+              "*.domain.com" = "http://localhost:80";
+              "*.anotherone.com" = "http://localhost:80";
+            };
+          };
+        };
+      }));
+
+      default = { };
+      example = {
+        "00000000-0000-0000-0000-000000000000" = {
+          credentialsFile = "/tmp/test";
+          ingress = {
+            "*.domain1.com" = {
+              service = "http://localhost:80";
+            };
+          };
+          default = "http_status:404";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.targets =
+      mapAttrs'
+        (name: tunnel:
+          nameValuePair "cloudflared-tunnel-${name}" {
+            description = "Cloudflare tunnel '${name}' target";
+            requires = [ "cloudflared-tunnel-${name}.service" ];
+            after = [ "cloudflared-tunnel-${name}.service" ];
+            unitConfig.StopWhenUnneeded = true;
+          }
+        )
+        config.services.cloudflared.tunnels;
+
+    systemd.services =
+      mapAttrs'
+        (name: tunnel:
+          let
+            filterConfig = lib.attrsets.filterAttrsRecursive (_: v: ! builtins.elem v [ null [ ] { } ]);
+
+            filterIngressSet = filterAttrs (_: v: builtins.typeOf v == "set");
+            filterIngressStr = filterAttrs (_: v: builtins.typeOf v == "string");
+
+            ingressesSet = filterIngressSet tunnel.ingress;
+            ingressesStr = filterIngressStr tunnel.ingress;
+
+            fullConfig = {
+              tunnel = name;
+              "credentials-file" = tunnel.credentialsFile;
+              ingress =
+                (map
+                  (key: {
+                    hostname = key;
+                  } // getAttr key (filterConfig (filterConfig ingressesSet)))
+                  (attrNames ingressesSet))
+                ++
+                (map
+                  (key: {
+                    hostname = key;
+                    service = getAttr key ingressesStr;
+                  })
+                  (attrNames ingressesStr))
+                ++ [{ service = tunnel.default; }];
+            };
+            mkConfigFile = pkgs.writeText "cloudflared.yml" (builtins.toJSON fullConfig);
+          in
+          nameValuePair "cloudflared-tunnel-${name}" ({
+            after = [ "network.target" "network-online.target" ];
+            wants = [ "network.target" "network-online.target" ];
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              User = cfg.user;
+              Group = cfg.group;
+              ExecStart = "${cfg.package}/bin/cloudflared tunnel --config=${mkConfigFile} --no-autoupdate run";
+              Restart = "on-failure";
+            };
+          })
+        )
+        config.services.cloudflared.tunnels;
+
+    users.users = mkIf (cfg.user == "cloudflared") {
+      cloudflared = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "cloudflared") {
+      cloudflared = { };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ bbigras ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/cntlm.nix b/nixpkgs/nixos/modules/services/networking/cntlm.nix
new file mode 100644
index 000000000000..41510a8f074d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/cntlm.nix
@@ -0,0 +1,126 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.cntlm;
+
+  configFile = if cfg.configText != "" then
+    pkgs.writeText "cntlm.conf" ''
+      ${cfg.configText}
+    ''
+    else
+    pkgs.writeText "lighttpd.conf" ''
+      # Cntlm Authentication Proxy Configuration
+      Username ${cfg.username}
+      Domain ${cfg.domain}
+      Password ${cfg.password}
+      ${optionalString (cfg.netbios_hostname != "") "Workstation ${cfg.netbios_hostname}"}
+      ${concatMapStrings (entry: "Proxy ${entry}\n") cfg.proxy}
+      ${optionalString (cfg.noproxy != []) "NoProxy ${concatStringsSep ", " cfg.noproxy}"}
+
+      ${concatMapStrings (port: ''
+        Listen ${toString port}
+      '') cfg.port}
+
+      ${cfg.extraConfig}
+    '';
+
+in
+
+{
+
+  options.services.cntlm = {
+
+    enable = mkEnableOption (lib.mdDoc "cntlm, which starts a local proxy");
+
+    username = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Proxy account name, without the possibility to include domain name ('at' sign is interpreted literally).
+      '';
+    };
+
+    domain = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Proxy account domain/workgroup name.";
+    };
+
+    password = mkOption {
+      default = "/etc/cntlm.password";
+      type = types.str;
+      description = lib.mdDoc "Proxy account password. Note: use chmod 0600 on /etc/cntlm.password for security.";
+    };
+
+    netbios_hostname = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        The hostname of your machine.
+      '';
+    };
+
+    proxy = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of NTLM/NTLMv2 authenticating HTTP proxies.
+
+        Parent proxy, which requires authentication. The same as proxy on the command-line, can be used more than  once  to  specify  unlimited
+        number  of  proxies.  Should  one proxy fail, cntlm automatically moves on to the next one. The connect request fails only if the whole
+        list of proxies is scanned and (for each request) and found to be invalid. Command-line takes precedence over the configuration file.
+      '';
+      example = [ "proxy.example.com:81" ];
+    };
+
+    noproxy = mkOption {
+      description = lib.mdDoc ''
+        A list of domains where the proxy is skipped.
+      '';
+      default = [];
+      type = types.listOf types.str;
+      example = [ "*.example.com" "example.com" ];
+    };
+
+    port = mkOption {
+      default = [3128];
+      type = types.listOf types.port;
+      description = lib.mdDoc "Specifies on which ports the cntlm daemon listens.";
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Additional config appended to the end of the generated {file}`cntlm.conf`.";
+    };
+
+    configText = mkOption {
+       type = types.lines;
+       default = "";
+       description = lib.mdDoc "Verbatim contents of {file}`cntlm.conf`.";
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.cntlm = {
+      description = "CNTLM is an NTLM / NTLM Session Response / NTLMv2 authenticating HTTP proxy";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "cntlm";
+        ExecStart = ''
+          ${pkgs.cntlm}/bin/cntlm -U cntlm -c ${configFile} -v -f
+        '';
+      };
+    };
+
+    users.users.cntlm = {
+      name = "cntlm";
+      description = "cntlm system-wide daemon";
+      isSystemUser = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/connman.nix b/nixpkgs/nixos/modules/services/networking/connman.nix
new file mode 100644
index 000000000000..c626945ccd0c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/connman.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.connman;
+  configFile = pkgs.writeText "connman.conf" ''
+    [General]
+    NetworkInterfaceBlacklist=${lib.concatStringsSep "," cfg.networkInterfaceBlacklist}
+
+    ${cfg.extraConfig}
+  '';
+  enableIwd = cfg.wifi.backend == "iwd";
+in {
+  meta.maintainers = with lib.maintainers; [ AndersonTorres ];
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "networking" "connman" ] [ "services" "connman" ])
+  ];
+
+  ###### interface
+
+  options = {
+    services.connman = {
+      enable = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use ConnMan for managing your network connections.
+        '';
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        description = lib.mdDoc "The connman package / build flavor";
+        default = pkgs.connman;
+        defaultText = lib.literalExpression "pkgs.connman";
+        example = lib.literalExpression "pkgs.connmanFull";
+      };
+
+      enableVPN = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable ConnMan VPN service.
+        '';
+      };
+
+      extraConfig = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines appended to the generated connman configuration file.
+        '';
+      };
+
+      networkInterfaceBlacklist = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [ "vmnet" "vboxnet" "virbr" "ifb" "ve" ];
+        description = lib.mdDoc ''
+          Default blacklisted interfaces, this includes NixOS containers interfaces (ve).
+        '';
+      };
+
+      wifi = {
+        backend = lib.mkOption {
+          type = lib.types.enum [ "wpa_supplicant" "iwd" ];
+          default = "wpa_supplicant";
+          description = lib.mdDoc ''
+            Specify the Wi-Fi backend used.
+            Currently supported are {option}`wpa_supplicant` or {option}`iwd`.
+          '';
+        };
+      };
+
+      extraFlags = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [ ];
+        example = [ "--nodnsproxy" ];
+        description = lib.mdDoc ''
+          Extra flags to pass to connmand
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = !config.networking.useDHCP;
+      message = "You can not use services.connman with networking.useDHCP";
+    }{
+      # TODO: connman seemingly can be used along network manager and
+      # connmanFull supports this - so this should be worked out somehow
+      assertion = !config.networking.networkmanager.enable;
+      message = "You can not use services.connman with networking.networkmanager";
+    }];
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.connman = {
+      description = "Connection service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "syslog.target" ] ++ lib.optional enableIwd "iwd.service";
+      requires = lib.optional enableIwd "iwd.service";
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "net.connman";
+        Restart = "on-failure";
+        ExecStart = toString ([
+          "${cfg.package}/sbin/connmand"
+          "--config=${configFile}"
+          "--nodaemon"
+        ] ++ lib.optional enableIwd "--wifi=iwd_agent"
+          ++ cfg.extraFlags);
+        StandardOutput = "null";
+      };
+    };
+
+    systemd.services.connman-vpn = lib.mkIf cfg.enableVPN {
+      description = "ConnMan VPN service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "syslog.target" ];
+      before = [ "connman.service" ];
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "net.connman.vpn";
+        ExecStart = "${cfg.package}/sbin/connman-vpnd -n";
+        StandardOutput = "null";
+      };
+    };
+
+    systemd.services.net-connman-vpn = lib.mkIf cfg.enableVPN {
+      description = "D-BUS Service";
+      serviceConfig = {
+        Name = "net.connman.vpn";
+        before = [ "connman.service" ];
+        ExecStart = "${cfg.package}/sbin/connman-vpnd -n";
+        User = "root";
+        SystemdService = "connman-vpn.service";
+      };
+    };
+
+    networking = {
+      useDHCP = false;
+      wireless = {
+        enable = lib.mkIf (!enableIwd) true;
+        dbusControlled = true;
+        iwd = lib.mkIf enableIwd {
+          enable = true;
+        };
+      };
+      networkmanager.enable = false;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/consul.nix b/nixpkgs/nixos/modules/services/networking/consul.nix
new file mode 100644
index 000000000000..955463b9031e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/consul.nix
@@ -0,0 +1,285 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+let
+
+  dataDir = "/var/lib/consul";
+  cfg = config.services.consul;
+
+  configOptions = {
+    data_dir = dataDir;
+    ui_config = {
+      enabled = cfg.webUi;
+    };
+  } // cfg.extraConfig;
+
+  configFiles = [ "/etc/consul.json" "/etc/consul-addrs.json" ]
+    ++ cfg.extraConfigFiles;
+
+  devices = attrValues (filterAttrs (_: i: i != null) cfg.interface);
+  systemdDevices = forEach devices
+    (i: "sys-subsystem-net-devices-${utils.escapeSystemdPath i}.device");
+in
+{
+  options = {
+
+    services.consul = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enables the consul daemon.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.consul;
+        defaultText = literalExpression "pkgs.consul";
+        description = lib.mdDoc ''
+          The package used for the Consul agent and CLI.
+        '';
+      };
+
+
+      webUi = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enables the web interface on the consul http port.
+        '';
+      };
+
+      leaveOnStop = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, causes a leave action to be sent when closing consul.
+          This allows a clean termination of the node, but permanently removes
+          it from the cluster. You probably don't want this option unless you
+          are running a node which going offline in a permanent / semi-permanent
+          fashion.
+        '';
+      };
+
+      interface = {
+
+        advertise = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The name of the interface to pull the advertise_addr from.
+          '';
+        };
+
+        bind = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The name of the interface to pull the bind_addr from.
+          '';
+        };
+      };
+
+      forceAddrFamily = mkOption {
+        type = types.enum [ "any" "ipv4" "ipv6" ];
+        default = "any";
+        description = lib.mdDoc ''
+          Whether to bind ipv4/ipv6 or both kind of addresses.
+        '';
+      };
+
+      forceIpv4 = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = lib.mdDoc ''
+          Deprecated: Use consul.forceAddrFamily instead.
+          Whether we should force the interfaces to only pull ipv4 addresses.
+        '';
+      };
+
+      dropPrivileges = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the consul agent should be run as a non-root consul user.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = { };
+        type = types.attrsOf types.anything;
+        description = lib.mdDoc ''
+          Extra configuration options which are serialized to json and added
+          to the config.json file.
+        '';
+      };
+
+      extraConfigFiles = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Additional configuration files to pass to consul
+          NOTE: These will not trigger the service to be restarted when altered.
+        '';
+      };
+
+      alerts = {
+        enable = mkEnableOption (lib.mdDoc "consul-alerts");
+
+        package = mkOption {
+          description = lib.mdDoc "Package to use for consul-alerts.";
+          default = pkgs.consul-alerts;
+          defaultText = literalExpression "pkgs.consul-alerts";
+          type = types.package;
+        };
+
+        listenAddr = mkOption {
+          description = lib.mdDoc "Api listening address.";
+          default = "localhost:9000";
+          type = types.str;
+        };
+
+        consulAddr = mkOption {
+          description = lib.mdDoc "Consul api listening address";
+          default = "localhost:8500";
+          type = types.str;
+        };
+
+        watchChecks = mkOption {
+          description = lib.mdDoc "Whether to enable check watcher.";
+          default = true;
+          type = types.bool;
+        };
+
+        watchEvents = mkOption {
+          description = lib.mdDoc "Whether to enable event watcher.";
+          default = true;
+          type = types.bool;
+        };
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable (
+    mkMerge [{
+
+      users.users.consul = {
+        description = "Consul agent daemon user";
+        isSystemUser = true;
+        group = "consul";
+        # The shell is needed for health checks
+        shell = "/run/current-system/sw/bin/bash";
+      };
+      users.groups.consul = {};
+
+      environment = {
+        etc."consul.json".text = builtins.toJSON configOptions;
+        # We need consul.d to exist for consul to start
+        etc."consul.d/dummy.json".text = "{ }";
+        systemPackages = [ cfg.package ];
+      };
+
+      warnings = lib.flatten [
+        (lib.optional (cfg.forceIpv4 != null) ''
+          The option consul.forceIpv4 is deprecated, please use
+          consul.forceAddrFamily instead.
+        '')
+      ];
+
+      systemd.services.consul = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ] ++ systemdDevices;
+        bindsTo = systemdDevices;
+        restartTriggers = [ config.environment.etc."consul.json".source ]
+          ++ mapAttrsToList (_: d: d.source)
+            (filterAttrs (n: _: hasPrefix "consul.d/" n) config.environment.etc);
+
+        serviceConfig = {
+          ExecStart = "@${lib.getExe cfg.package} consul agent -config-dir /etc/consul.d"
+            + concatMapStrings (n: " -config-file ${n}") configFiles;
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          PermissionsStartOnly = true;
+          User = if cfg.dropPrivileges then "consul" else null;
+          Restart = "on-failure";
+          TimeoutStartSec = "infinity";
+        } // (optionalAttrs (cfg.leaveOnStop) {
+          ExecStop = "${lib.getExe cfg.package} leave";
+        });
+
+        path = with pkgs; [ iproute2 gawk cfg.package ];
+        preStart = let
+          family = if cfg.forceAddrFamily == "ipv6" then
+            "-6"
+          else if cfg.forceAddrFamily == "ipv4" then
+            "-4"
+          else
+            "";
+        in ''
+          mkdir -m 0700 -p ${dataDir}
+          chown -R consul ${dataDir}
+
+          # Determine interface addresses
+          getAddrOnce () {
+            ip ${family} addr show dev "$1" scope global \
+              | awk -F '[ /\t]*' '/inet/ {print $3}' | head -n 1
+          }
+          getAddr () {
+            ADDR="$(getAddrOnce $1)"
+            LEFT=60 # Die after 1 minute
+            while [ -z "$ADDR" ]; do
+              sleep 1
+              LEFT=$(expr $LEFT - 1)
+              if [ "$LEFT" -eq "0" ]; then
+                echo "Address lookup timed out"
+                exit 1
+              fi
+              ADDR="$(getAddrOnce $1)"
+            done
+            echo "$ADDR"
+          }
+          echo "{" > /etc/consul-addrs.json
+          delim=" "
+        ''
+        + concatStrings (flip mapAttrsToList cfg.interface (name: i:
+          optionalString (i != null) ''
+            echo "$delim \"${name}_addr\": \"$(getAddr "${i}")\"" >> /etc/consul-addrs.json
+            delim=","
+          ''))
+        + ''
+          echo "}" >> /etc/consul-addrs.json
+        '';
+      };
+    }
+
+    # deprecated
+    (mkIf (cfg.forceIpv4 != null && cfg.forceIpv4) {
+      services.consul.forceAddrFamily = "ipv4";
+    })
+
+    (mkIf (cfg.alerts.enable) {
+      systemd.services.consul-alerts = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "consul.service" ];
+
+        path = [ cfg.package ];
+
+        serviceConfig = {
+          ExecStart = ''
+            ${lib.getExe cfg.alerts.package} start \
+              --alert-addr=${cfg.alerts.listenAddr} \
+              --consul-addr=${cfg.alerts.consulAddr} \
+              ${optionalString cfg.alerts.watchChecks "--watch-checks"} \
+              ${optionalString cfg.alerts.watchEvents "--watch-events"}
+          '';
+          User = if cfg.dropPrivileges then "consul" else null;
+          Restart = "on-failure";
+        };
+      };
+    })
+
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/networking/coredns.nix b/nixpkgs/nixos/modules/services/networking/coredns.nix
new file mode 100644
index 000000000000..f1fe7b2f1241
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/coredns.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.coredns;
+  configFile = pkgs.writeText "Corefile" cfg.config;
+in {
+  options.services.coredns = {
+    enable = mkEnableOption (lib.mdDoc "Coredns dns server");
+
+    config = mkOption {
+      default = "";
+      example = ''
+        . {
+          whoami
+        }
+      '';
+      type = types.lines;
+      description = lib.mdDoc ''
+        Verbatim Corefile to use.
+        See <https://coredns.io/manual/toc/#configuration> for details.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.coredns;
+      defaultText = literalExpression "pkgs.coredns";
+      type = types.package;
+      description = lib.mdDoc "Coredns package to use.";
+    };
+
+    extraArgs = mkOption {
+      default = [];
+      example = [ "-dns.port=53" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc "Extra arguments to pass to coredns.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.coredns = {
+      description = "Coredns dns server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        PermissionsStartOnly = true;
+        LimitNPROC = 512;
+        LimitNOFILE = 1048576;
+        CapabilityBoundingSet = "cap_net_bind_service";
+        AmbientCapabilities = "cap_net_bind_service";
+        NoNewPrivileges = true;
+        DynamicUser = true;
+        ExecStart = "${getBin cfg.package}/bin/coredns -conf=${configFile} ${lib.escapeShellArgs cfg.extraArgs}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGUSR1 $MAINPID";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/corerad.nix b/nixpkgs/nixos/modules/services/networking/corerad.nix
new file mode 100644
index 000000000000..0c6fb7a17cab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/corerad.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.corerad;
+  settingsFormat = pkgs.formats.toml {};
+
+in {
+  meta.maintainers = with maintainers; [ mdlayher ];
+
+  options.services.corerad = {
+    enable = mkEnableOption (lib.mdDoc "CoreRAD IPv6 NDP RA daemon");
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      example = literalExpression ''
+        {
+          interfaces = [
+            # eth0 is an upstream interface monitoring for IPv6 router advertisements.
+            {
+              name = "eth0";
+              monitor = true;
+            }
+            # eth1 is a downstream interface advertising IPv6 prefixes for SLAAC.
+            {
+              name = "eth1";
+              advertise = true;
+              prefix = [{ prefix = "::/64"; }];
+            }
+          ];
+          # Optionally enable Prometheus metrics.
+          debug = {
+            address = "localhost:9430";
+            prometheus = true;
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Configuration for CoreRAD, see <https://github.com/mdlayher/corerad/blob/main/internal/config/reference.toml>
+        for supported values. Ignored if configFile is set.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      example = literalExpression ''"''${pkgs.corerad}/etc/corerad/corerad.toml"'';
+      description = lib.mdDoc "Path to CoreRAD TOML configuration file.";
+    };
+
+    package = mkOption {
+      default = pkgs.corerad;
+      defaultText = literalExpression "pkgs.corerad";
+      type = types.package;
+      description = lib.mdDoc "CoreRAD package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Prefer the config file over settings if both are set.
+    services.corerad.configFile = mkDefault (settingsFormat.generate "corerad.toml" cfg.settings);
+
+    systemd.services.corerad = {
+      description = "CoreRAD IPv6 NDP RA daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        LimitNPROC = 512;
+        LimitNOFILE = 1048576;
+        CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
+        AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_RAW";
+        NoNewPrivileges = true;
+        DynamicUser = true;
+        Type = "notify";
+        NotifyAccess = "main";
+        ExecStart = "${getBin cfg.package}/bin/corerad -c=${cfg.configFile}";
+        Restart = "on-failure";
+        RestartKillSignal = "SIGHUP";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/coturn.nix b/nixpkgs/nixos/modules/services/networking/coturn.nix
new file mode 100644
index 000000000000..2f34a72377ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/coturn.nix
@@ -0,0 +1,366 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.coturn;
+  pidfile = "/run/turnserver/turnserver.pid";
+  configFile = pkgs.writeText "turnserver.conf" ''
+listening-port=${toString cfg.listening-port}
+tls-listening-port=${toString cfg.tls-listening-port}
+alt-listening-port=${toString cfg.alt-listening-port}
+alt-tls-listening-port=${toString cfg.alt-tls-listening-port}
+${concatStringsSep "\n" (map (x: "listening-ip=${x}") cfg.listening-ips)}
+${concatStringsSep "\n" (map (x: "relay-ip=${x}") cfg.relay-ips)}
+min-port=${toString cfg.min-port}
+max-port=${toString cfg.max-port}
+${lib.optionalString cfg.lt-cred-mech "lt-cred-mech"}
+${lib.optionalString cfg.no-auth "no-auth"}
+${lib.optionalString cfg.use-auth-secret "use-auth-secret"}
+${lib.optionalString (cfg.static-auth-secret != null) ("static-auth-secret=${cfg.static-auth-secret}")}
+${lib.optionalString (cfg.static-auth-secret-file != null) ("static-auth-secret=#static-auth-secret#")}
+realm=${cfg.realm}
+${lib.optionalString cfg.no-udp "no-udp"}
+${lib.optionalString cfg.no-tcp "no-tcp"}
+${lib.optionalString cfg.no-tls "no-tls"}
+${lib.optionalString cfg.no-dtls "no-dtls"}
+${lib.optionalString cfg.no-udp-relay "no-udp-relay"}
+${lib.optionalString cfg.no-tcp-relay "no-tcp-relay"}
+${lib.optionalString (cfg.cert != null) "cert=${cfg.cert}"}
+${lib.optionalString (cfg.pkey != null) "pkey=${cfg.pkey}"}
+${lib.optionalString (cfg.dh-file != null) ("dh-file=${cfg.dh-file}")}
+no-stdout-log
+syslog
+pidfile=${pidfile}
+${lib.optionalString cfg.secure-stun "secure-stun"}
+${lib.optionalString cfg.no-cli "no-cli"}
+cli-ip=${cfg.cli-ip}
+cli-port=${toString cfg.cli-port}
+${lib.optionalString (cfg.cli-password != null) ("cli-password=${cfg.cli-password}")}
+${cfg.extraConfig}
+'';
+in {
+  options = {
+    services.coturn = {
+      enable = mkEnableOption (lib.mdDoc "coturn TURN server");
+      listening-port = mkOption {
+        type = types.int;
+        default = 3478;
+        description = lib.mdDoc ''
+          TURN listener port for UDP and TCP.
+          Note: actually, TLS and DTLS sessions can connect to the
+          "plain" TCP and UDP port(s), too - if allowed by configuration.
+        '';
+      };
+      tls-listening-port = mkOption {
+        type = types.int;
+        default = 5349;
+        description = lib.mdDoc ''
+          TURN listener port for TLS.
+          Note: actually, "plain" TCP and UDP sessions can connect to the TLS and
+          DTLS port(s), too - if allowed by configuration. The TURN server
+          "automatically" recognizes the type of traffic. Actually, two listening
+          endpoints (the "plain" one and the "tls" one) are equivalent in terms of
+          functionality; but we keep both endpoints to satisfy the RFC 5766 specs.
+          For secure TCP connections, we currently support SSL version 3 and
+          TLS version 1.0, 1.1 and 1.2.
+          For secure UDP connections, we support DTLS version 1.
+        '';
+      };
+      alt-listening-port = mkOption {
+        type = types.int;
+        default = cfg.listening-port + 1;
+        defaultText = literalExpression "listening-port + 1";
+        description = lib.mdDoc ''
+          Alternative listening port for UDP and TCP listeners;
+          default (or zero) value means "listening port plus one".
+          This is needed for RFC 5780 support
+          (STUN extension specs, NAT behavior discovery). The TURN Server
+          supports RFC 5780 only if it is started with more than one
+          listening IP address of the same family (IPv4 or IPv6).
+          RFC 5780 is supported only by UDP protocol, other protocols
+          are listening to that endpoint only for "symmetry".
+        '';
+      };
+      alt-tls-listening-port = mkOption {
+        type = types.int;
+        default = cfg.tls-listening-port + 1;
+        defaultText = literalExpression "tls-listening-port + 1";
+        description = lib.mdDoc ''
+          Alternative listening port for TLS and DTLS protocols.
+        '';
+      };
+      listening-ips = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "203.0.113.42" "2001:DB8::42" ];
+        description = lib.mdDoc ''
+          Listener IP addresses of relay server.
+          If no IP(s) specified in the config file or in the command line options,
+          then all IPv4 and IPv6 system IPs will be used for listening.
+        '';
+      };
+      relay-ips = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "203.0.113.42" "2001:DB8::42" ];
+        description = lib.mdDoc ''
+          Relay address (the local IP address that will be used to relay the
+          packets to the peer).
+          Multiple relay addresses may be used.
+          The same IP(s) can be used as both listening IP(s) and relay IP(s).
+
+          If no relay IP(s) specified, then the turnserver will apply the default
+          policy: it will decide itself which relay addresses to be used, and it
+          will always be using the client socket IP address as the relay IP address
+          of the TURN session (if the requested relay address family is the same
+          as the family of the client socket).
+        '';
+      };
+      min-port = mkOption {
+        type = types.int;
+        default = 49152;
+        description = lib.mdDoc ''
+          Lower bound of UDP relay endpoints
+        '';
+      };
+      max-port = mkOption {
+        type = types.int;
+        default = 65535;
+        description = lib.mdDoc ''
+          Upper bound of UDP relay endpoints
+        '';
+      };
+      lt-cred-mech = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Use long-term credential mechanism.
+        '';
+      };
+      no-auth = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option is opposite to lt-cred-mech.
+          (TURN Server with no-auth option allows anonymous access).
+          If neither option is defined, and no users are defined,
+          then no-auth is default. If at least one user is defined,
+          in this file or in command line or in usersdb file, then
+          lt-cred-mech is default.
+        '';
+      };
+      use-auth-secret = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          TURN REST API flag.
+          Flag that sets a special authorization option that is based upon authentication secret.
+          This feature can be used with the long-term authentication mechanism, only.
+          This feature purpose is to support "TURN Server REST API", see
+          "TURN REST API" link in the project's page
+          https://github.com/coturn/coturn/
+
+          This option is used with timestamp:
+
+          usercombo -> "timestamp:userid"
+          turn user -> usercombo
+          turn password -> base64(hmac(secret key, usercombo))
+
+          This allows TURN credentials to be accounted for a specific user id.
+          If you don't have a suitable id, the timestamp alone can be used.
+          This option is just turning on secret-based authentication.
+          The actual value of the secret is defined either by option static-auth-secret,
+          or can be found in the turn_secret table in the database.
+        '';
+      };
+      static-auth-secret = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          'Static' authentication secret value (a string) for TURN REST API only.
+          If not set, then the turn server
+          will try to use the 'dynamic' value in turn_secret table
+          in user database (if present). The database-stored  value can be changed on-the-fly
+          by a separate program, so this is why that other mode is 'dynamic'.
+        '';
+      };
+      static-auth-secret-file = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the file containing the static authentication secret.
+        '';
+      };
+      realm = mkOption {
+        type = types.str;
+        default = config.networking.hostName;
+        defaultText = literalExpression "config.networking.hostName";
+        example = "example.com";
+        description = lib.mdDoc ''
+          The default realm to be used for the users when no explicit
+          origin/realm relationship was found in the database, or if the TURN
+          server is not using any database (just the commands-line settings
+          and the userdb file). Must be used with long-term credentials
+          mechanism or with TURN REST API.
+        '';
+      };
+      cert = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/acme/example.com/fullchain.pem";
+        description = lib.mdDoc ''
+          Certificate file in PEM format.
+        '';
+      };
+      pkey = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/lib/acme/example.com/key.pem";
+        description = lib.mdDoc ''
+          Private key file in PEM format.
+        '';
+      };
+      dh-file = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Use custom DH TLS key, stored in PEM format in the file.
+        '';
+      };
+      secure-stun = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Require authentication of the STUN Binding request.
+          By default, the clients are allowed anonymous access to the STUN Binding functionality.
+        '';
+      };
+      no-cli = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Turn OFF the CLI support.
+        '';
+      };
+      cli-ip = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          Local system IP address to be used for CLI server endpoint.
+        '';
+      };
+      cli-port = mkOption {
+        type = types.int;
+        default = 5766;
+        description = lib.mdDoc ''
+          CLI server port.
+        '';
+      };
+      cli-password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          CLI access password.
+          For the security reasons, it is recommended to use the encrypted
+          for of the password (see the -P command in the turnadmin utility).
+        '';
+      };
+      no-udp = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable UDP client listener";
+      };
+      no-tcp = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable TCP client listener";
+      };
+      no-tls = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable TLS client listener";
+      };
+      no-dtls = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable DTLS client listener";
+      };
+      no-udp-relay = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable UDP relay endpoints";
+      };
+      no-tcp-relay = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Disable TCP relay endpoints";
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional configuration options";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge ([
+    { assertions = [
+      { assertion = cfg.static-auth-secret != null -> cfg.static-auth-secret-file == null ;
+        message = "static-auth-secret and static-auth-secret-file cannot be set at the same time";
+      }
+    ];}
+
+    {
+      users.users.turnserver =
+        { uid = config.ids.uids.turnserver;
+          group = "turnserver";
+          description = "coturn TURN server user";
+        };
+      users.groups.turnserver =
+        { gid = config.ids.gids.turnserver;
+          members = [ "turnserver" ];
+        };
+
+      systemd.services.coturn = let
+        runConfig = "/run/coturn/turnserver.cfg";
+      in {
+        description = "coturn TURN server";
+        after = [ "network-online.target" ];
+        wants = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        unitConfig = {
+          Documentation = "man:coturn(1) man:turnadmin(1) man:turnserver(1)";
+        };
+
+        preStart = ''
+          cat ${configFile} > ${runConfig}
+          ${optionalString (cfg.static-auth-secret-file != null) ''
+            ${pkgs.replace-secret}/bin/replace-secret \
+              "#static-auth-secret#" \
+              ${cfg.static-auth-secret-file} \
+              ${runConfig}
+          '' }
+          chmod 640 ${runConfig}
+        '';
+        serviceConfig = {
+          Type = "simple";
+          ExecStart = "${pkgs.coturn}/bin/turnserver -c ${runConfig}";
+          RuntimeDirectory = "turnserver";
+          User = "turnserver";
+          Group = "turnserver";
+          AmbientCapabilities =
+            mkIf (
+              cfg.listening-port < 1024 ||
+              cfg.alt-listening-port < 1024 ||
+              cfg.tls-listening-port < 1024 ||
+              cfg.alt-tls-listening-port < 1024 ||
+              cfg.min-port < 1024
+            ) "cap_net_bind_service";
+          Restart = "on-abort";
+        };
+      };
+    systemd.tmpfiles.rules = [
+      "d  /run/coturn 0700 turnserver turnserver - -"
+    ];
+  }]));
+}
diff --git a/nixpkgs/nixos/modules/services/networking/create_ap.nix b/nixpkgs/nixos/modules/services/networking/create_ap.nix
new file mode 100644
index 000000000000..994aa6d36d2a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/create_ap.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.create_ap;
+  configFile = pkgs.writeText "create_ap.conf" (generators.toKeyValue { } cfg.settings);
+in {
+  options = {
+    services.create_ap = {
+      enable = mkEnableOption (lib.mdDoc "setting up wifi hotspots using create_ap");
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ int bool str ]);
+        default = {};
+        description = lib.mdDoc ''
+          Configuration for `create_ap`.
+          See [upstream example configuration](https://raw.githubusercontent.com/lakinduakash/linux-wifi-hotspot/master/src/scripts/create_ap.conf)
+          for supported values.
+        '';
+        example = {
+          INTERNET_IFACE = "eth0";
+          WIFI_IFACE = "wlan0";
+          SSID = "My Wifi Hotspot";
+          PASSPHRASE = "12345678";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      services.create_ap = {
+        wantedBy = [ "multi-user.target" ];
+        description = "Create AP Service";
+        after = [ "network.target" ];
+        restartTriggers = [ configFile ];
+        serviceConfig = {
+          ExecStart = "${pkgs.linux-wifi-hotspot}/bin/create_ap --config ${configFile}";
+          KillSignal = "SIGINT";
+          Restart = "on-failure";
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/croc.nix b/nixpkgs/nixos/modules/services/networking/croc.nix
new file mode 100644
index 000000000000..45bfd447da45
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/croc.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) types;
+  cfg = config.services.croc;
+  rootDir = "/run/croc";
+in
+{
+  options.services.croc = {
+    enable = lib.mkEnableOption (lib.mdDoc "croc relay");
+    ports = lib.mkOption {
+      type = with types; listOf port;
+      default = [9009 9010 9011 9012 9013];
+      description = lib.mdDoc "Ports of the relay.";
+    };
+    pass = lib.mkOption {
+      type = with types; either path str;
+      default = "pass123";
+      description = lib.mdDoc "Password or passwordfile for the relay.";
+    };
+    openFirewall = lib.mkEnableOption (lib.mdDoc "opening of the peer port(s) in the firewall");
+    debug = lib.mkEnableOption (lib.mdDoc "debug logs");
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.croc = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.croc}/bin/croc --pass '${cfg.pass}' ${lib.optionalString cfg.debug "--debug"} relay --ports ${lib.concatMapStringsSep "," toString cfg.ports}";
+        # The following options are only for optimizing:
+        # systemd-analyze security croc
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        DynamicUser = true;
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        MountAPIVFS = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateNetwork = lib.mkDefault false;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RootDirectory = rootDir;
+        # Avoid mounting rootDir in the own rootDir of ExecStart='s mount namespace.
+        InaccessiblePaths = [ "-+${rootDir}" ];
+        BindReadOnlyPaths = [
+          builtins.storeDir
+        ] ++ lib.optional (types.path.check cfg.pass) cfg.pass;
+        # This is for BindReadOnlyPaths=
+        # to allow traversal of directories they create in RootDirectory=.
+        UMask = "0066";
+        # Create rootDir in the host's mount namespace.
+        RuntimeDirectory = [(baseNameOf rootDir)];
+        RuntimeDirectoryMode = "700";
+        SystemCallFilter = [
+          "@system-service"
+          "~@aio" "~@keyring" "~@memlock" "~@privileged" "~@setuid" "~@sync" "~@timer"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall cfg.ports;
+  };
+
+  meta.maintainers = with lib.maintainers; [ hax404 julm ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dae.nix b/nixpkgs/nixos/modules/services/networking/dae.nix
new file mode 100644
index 000000000000..cf3fead19be5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dae.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.dae;
+  assets = cfg.assets;
+  genAssetsDrv = paths: pkgs.symlinkJoin {
+    name = "dae-assets";
+    inherit paths;
+  };
+in
+{
+  meta.maintainers = with lib.maintainers; [ pokon548 oluceps ];
+
+  options = {
+    services.dae = with lib;{
+      enable = mkEnableOption
+        (mdDoc "dae, a Linux high-performance transparent proxy solution based on eBPF");
+
+      package = mkPackageOptionMD pkgs "dae" { };
+
+
+      assets = mkOption {
+        type = with types;(listOf path);
+        default = with pkgs; [ v2ray-geoip v2ray-domain-list-community ];
+        defaultText = literalExpression "with pkgs; [ v2ray-geoip v2ray-domain-list-community ]";
+        description = mdDoc ''
+          Assets required to run dae.
+        '';
+      };
+
+      assetsPath = mkOption {
+        type = types.str;
+        default = "${genAssetsDrv assets}/share/v2ray";
+        defaultText = literalExpression ''
+          (symlinkJoin {
+              name = "dae-assets";
+              paths = assets;
+          })/share/v2ray
+        '';
+        description = mdDoc ''
+          The path which contains geolocation database.
+          This option will override `assets`.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = with types; submodule {
+          options = {
+            enable = mkEnableOption (mdDoc "opening {option}`port` in the firewall");
+            port = mkOption {
+              type = types.port;
+              description = ''
+                Port to be opened. Consist with field `tproxy_port` in config file.
+              '';
+            };
+          };
+        };
+        default = {
+          enable = true;
+          port = 12345;
+        };
+        defaultText = literalExpression ''
+          {
+            enable = true;
+            port = 12345;
+          }
+        '';
+        description = mdDoc ''
+          Open the firewall port.
+        '';
+      };
+
+      configFile = mkOption {
+        type = with types; (nullOr path);
+        default = null;
+        example = "/path/to/your/config.dae";
+        description = mdDoc ''
+          The path of dae config file, end with `.dae`.
+        '';
+      };
+
+      config = mkOption {
+        type = with types; (nullOr str);
+        default = null;
+        description = mdDoc ''
+          WARNING: This option will expose store your config unencrypted world-readable in the nix store.
+          Config text for dae.
+
+          See <https://github.com/daeuniverse/dae/blob/main/example.dae>.
+        '';
+      };
+
+      disableTxChecksumIpGeneric =
+        mkEnableOption "" // { description = mdDoc "See <https://github.com/daeuniverse/dae/issues/43>"; };
+
+    };
+  };
+
+  config = lib.mkIf cfg.enable
+
+    {
+      environment.systemPackages = [ cfg.package ];
+      systemd.packages = [ cfg.package ];
+
+      networking = lib.mkIf cfg.openFirewall.enable {
+        firewall =
+          let portToOpen = cfg.openFirewall.port;
+          in
+          {
+            allowedTCPPorts = [ portToOpen ];
+            allowedUDPPorts = [ portToOpen ];
+          };
+      };
+
+      systemd.services.dae =
+        let
+          daeBin = lib.getExe cfg.package;
+
+          configPath =
+            if cfg.configFile != null
+            then cfg.configFile else pkgs.writeText "config.dae" cfg.config;
+
+          TxChecksumIpGenericWorkaround = with lib;
+            (getExe pkgs.writeShellApplication {
+              name = "disable-tx-checksum-ip-generic";
+              text = with pkgs; ''
+                iface=$(${iproute2}/bin/ip route | ${lib.getExe gawk} '/default/ {print $5}')
+                ${lib.getExe ethtool} -K "$iface" tx-checksum-ip-generic off
+              '';
+            });
+        in
+        {
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            LoadCredential = [ "config.dae:${configPath}" ];
+            ExecStartPre = [ "" "${daeBin} validate -c \${CREDENTIALS_DIRECTORY}/config.dae" ]
+              ++ (with lib; optional cfg.disableTxChecksumIpGeneric TxChecksumIpGenericWorkaround);
+            ExecStart = [ "" "${daeBin} run --disable-timestamp -c \${CREDENTIALS_DIRECTORY}/config.dae" ];
+            Environment = "DAE_LOCATION_ASSET=${cfg.assetsPath}";
+          };
+        };
+
+      assertions = [
+        {
+          assertion = lib.pathExists (toString (genAssetsDrv cfg.assets) + "/share/v2ray");
+          message = ''
+            Packages in `assets` has no preset paths included.
+            Please set `assetsPath` instead.
+          '';
+        }
+
+        {
+          assertion = !((config.services.dae.config != null)
+            && (config.services.dae.configFile != null));
+          message = ''
+            Option `config` and `configFile` could not be set
+            at the same time.
+          '';
+        }
+
+        {
+          assertion = !((config.services.dae.config == null)
+            && (config.services.dae.configFile == null));
+          message = ''
+            Either `config` or `configFile` should be set.
+          '';
+        }
+      ];
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dante.nix b/nixpkgs/nixos/modules/services/networking/dante.nix
new file mode 100644
index 000000000000..605f2d74f827
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dante.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.dante;
+  confFile = pkgs.writeText "dante-sockd.conf" ''
+    user.privileged: root
+    user.unprivileged: dante
+    logoutput: syslog
+
+    ${cfg.config}
+  '';
+in
+
+{
+  meta = {
+    maintainers = with maintainers; [ arobyn ];
+  };
+
+  options = {
+    services.dante = {
+      enable = mkEnableOption (lib.mdDoc "Dante SOCKS proxy");
+
+      config = mkOption {
+        type        = types.lines;
+        description = lib.mdDoc ''
+          Contents of Dante's configuration file.
+          NOTE: user.privileged, user.unprivileged and logoutput are set by the service.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion   = cfg.config != "";
+        message     = "please provide Dante configuration file contents";
+      }
+    ];
+
+    users.users.dante = {
+      description   = "Dante SOCKS proxy daemon user";
+      isSystemUser  = true;
+      group         = "dante";
+    };
+    users.groups.dante = {};
+
+    systemd.services.dante = {
+      description   = "Dante SOCKS v4 and v5 compatible proxy server";
+      after         = [ "network-online.target" ];
+      wantedBy      = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type        = "simple";
+        ExecStart   = "${pkgs.dante}/bin/sockd -f ${confFile}";
+        ExecReload  = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        # Can crash sometimes; see https://github.com/NixOS/nixpkgs/pull/39005#issuecomment-381828708
+        Restart     = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ddclient.nix b/nixpkgs/nixos/modules/services/networking/ddclient.nix
new file mode 100644
index 000000000000..8f4fb0bc78d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ddclient.nix
@@ -0,0 +1,234 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.ddclient;
+  boolToStr = bool: if bool then "yes" else "no";
+  dataDir = "/var/lib/ddclient";
+  StateDirectory = builtins.baseNameOf dataDir;
+  RuntimeDirectory = StateDirectory;
+
+  configFile' = pkgs.writeText "ddclient.conf" ''
+    # This file can be used as a template for configFile or is automatically generated by Nix options.
+    cache=${dataDir}/ddclient.cache
+    foreground=YES
+    use=${cfg.use}
+    login=${cfg.username}
+    password=${if cfg.protocol == "nsupdate" then "/run/${RuntimeDirectory}/ddclient.key" else "@password_placeholder@"}
+    protocol=${cfg.protocol}
+    ${lib.optionalString (cfg.script != "") "script=${cfg.script}"}
+    ${lib.optionalString (cfg.server != "") "server=${cfg.server}"}
+    ${lib.optionalString (cfg.zone != "")   "zone=${cfg.zone}"}
+    ssl=${boolToStr cfg.ssl}
+    wildcard=YES
+    quiet=${boolToStr cfg.quiet}
+    verbose=${boolToStr cfg.verbose}
+    ${cfg.extraConfig}
+    ${lib.concatStringsSep "," cfg.domains}
+  '';
+  configFile = if (cfg.configFile != null) then cfg.configFile else configFile';
+
+  preStart = ''
+    install --mode=600 --owner=$USER ${configFile} /run/${RuntimeDirectory}/ddclient.conf
+    ${lib.optionalString (cfg.configFile == null) (if (cfg.protocol == "nsupdate") then ''
+      install --mode=600 --owner=$USER ${cfg.passwordFile} /run/${RuntimeDirectory}/ddclient.key
+    '' else if (cfg.passwordFile != null) then ''
+      "${pkgs.replace-secret}/bin/replace-secret" "@password_placeholder@" "${cfg.passwordFile}" "/run/${RuntimeDirectory}/ddclient.conf"
+    '' else ''
+      sed -i '/^password=@password_placeholder@$/d' /run/${RuntimeDirectory}/ddclient.conf
+    '')}
+  '';
+
+in
+
+with lib;
+
+{
+
+  imports = [
+    (mkChangedOptionModule [ "services" "ddclient" "domain" ] [ "services" "ddclient" "domains" ]
+      (config:
+        let value = getAttrFromPath [ "services" "ddclient" "domain" ] config;
+        in optional (value != "") value))
+    (mkRemovedOptionModule [ "services" "ddclient" "homeDir" ] "")
+    (mkRemovedOptionModule [ "services" "ddclient" "password" ] "Use services.ddclient.passwordFile instead.")
+    (mkRemovedOptionModule [ "services" "ddclient" "ipv6" ] "")
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.ddclient = with lib.types; {
+
+      enable = mkOption {
+        default = false;
+        type = bool;
+        description = lib.mdDoc ''
+          Whether to synchronise your machine's IP address with a dynamic DNS provider (e.g. dyndns.org).
+        '';
+      };
+
+      package = mkOption {
+        type = package;
+        default = pkgs.ddclient;
+        defaultText = lib.literalExpression "pkgs.ddclient";
+        description = lib.mdDoc ''
+          The ddclient executable package run by the service.
+        '';
+      };
+
+      domains = mkOption {
+        default = [ "" ];
+        type = listOf str;
+        description = lib.mdDoc ''
+          Domain name(s) to synchronize.
+        '';
+      };
+
+      username = mkOption {
+        # For `nsupdate` username contains the path to the nsupdate executable
+        default = lib.optionalString (config.services.ddclient.protocol == "nsupdate") "${pkgs.bind.dnsutils}/bin/nsupdate";
+        defaultText = "";
+        type = str;
+        description = lib.mdDoc ''
+          User name.
+        '';
+      };
+
+      passwordFile = mkOption {
+        default = null;
+        type = nullOr str;
+        description = lib.mdDoc ''
+          A file containing the password or a TSIG key in named format when using the nsupdate protocol.
+        '';
+      };
+
+      interval = mkOption {
+        default = "10min";
+        type = str;
+        description = lib.mdDoc ''
+          The interval at which to run the check and update.
+          See {command}`man 7 systemd.time` for the format.
+        '';
+      };
+
+      configFile = mkOption {
+        default = null;
+        type = nullOr path;
+        description = lib.mdDoc ''
+          Path to configuration file.
+          When set this overrides the generated configuration from module options.
+        '';
+        example = "/root/nixos/secrets/ddclient.conf";
+      };
+
+      protocol = mkOption {
+        default = "dyndns2";
+        type = str;
+        description = lib.mdDoc ''
+          Protocol to use with dynamic DNS provider (see https://sourceforge.net/p/ddclient/wiki/protocols).
+        '';
+      };
+
+      server = mkOption {
+        default = "";
+        type = str;
+        description = lib.mdDoc ''
+          Server address.
+        '';
+      };
+
+      ssl = mkOption {
+        default = true;
+        type = bool;
+        description = lib.mdDoc ''
+          Whether to use SSL/TLS to connect to dynamic DNS provider.
+        '';
+      };
+
+      quiet = mkOption {
+        default = false;
+        type = bool;
+        description = lib.mdDoc ''
+          Print no messages for unnecessary updates.
+        '';
+      };
+
+      script = mkOption {
+        default = "";
+        type = str;
+        description = lib.mdDoc ''
+          script as required by some providers.
+        '';
+      };
+
+      use = mkOption {
+        default = "web, web=checkip.dyndns.com/, web-skip='Current IP Address: '";
+        type = str;
+        description = lib.mdDoc ''
+          Method to determine the IP address to send to the dynamic DNS provider.
+        '';
+      };
+
+      verbose = mkOption {
+        default = false;
+        type = bool;
+        description = lib.mdDoc ''
+          Print verbose information.
+        '';
+      };
+
+      zone = mkOption {
+        default = "";
+        type = str;
+        description = lib.mdDoc ''
+          zone as required by some providers.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = lines;
+        description = lib.mdDoc ''
+          Extra configuration. Contents will be added verbatim to the configuration file.
+
+          ::: {.note}
+          `daemon` should not be added here because it does not work great with the systemd-timer approach the service uses.
+          :::
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.ddclient.enable {
+    systemd.services.ddclient = {
+      description = "Dynamic DNS Client";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      restartTriggers = optional (cfg.configFile != null) cfg.configFile;
+      path = lib.optional (lib.hasPrefix "if," cfg.use) pkgs.iproute2;
+
+      serviceConfig = {
+        DynamicUser = true;
+        RuntimeDirectoryMode = "0700";
+        inherit RuntimeDirectory;
+        inherit StateDirectory;
+        Type = "oneshot";
+        ExecStartPre = "!${pkgs.writeShellScript "ddclient-prestart" preStart}";
+        ExecStart = "${lib.getExe cfg.package} -file /run/${RuntimeDirectory}/ddclient.conf";
+      };
+    };
+
+    systemd.timers.ddclient = {
+      description = "Run ddclient";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnBootSec = cfg.interval;
+        OnUnitInactiveSec = cfg.interval;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/deconz.nix b/nixpkgs/nixos/modules/services/networking/deconz.nix
new file mode 100644
index 000000000000..05b724708777
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/deconz.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.deconz;
+  name = "deconz";
+  stateDir = "/var/lib/${name}";
+  # ref. upstream deconz.service
+  capabilities =
+    lib.optionals (cfg.httpPort < 1024 || cfg.wsPort < 1024) [ "CAP_NET_BIND_SERVICE" ]
+    ++ lib.optionals (cfg.allowRebootSystem) [ "CAP_SYS_BOOT" ]
+    ++ lib.optionals (cfg.allowRestartService) [ "CAP_KILL" ]
+    ++ lib.optionals (cfg.allowSetSystemTime) [ "CAP_SYS_TIME" ];
+in
+{
+  options.services.deconz = {
+
+    enable = lib.mkEnableOption "deCONZ, a Zigbee gateway for use with ConBee hardware (https://phoscon.de/en/conbee2)";
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.deconz;
+      defaultText = lib.literalExpression "pkgs.deconz";
+      description = "Which deCONZ package to use.";
+    };
+
+    device = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = ''
+        Force deCONZ to use a specific USB device (e.g. /dev/ttyACM0). By
+        default it does a search.
+      '';
+    };
+
+    listenAddress = lib.mkOption {
+      type = lib.types.str;
+      default = "127.0.0.1";
+      description = ''
+        Pin deCONZ to the network interface specified through the provided IP
+        address. This applies for the webserver as well as the websocket
+        notifications.
+      '';
+    };
+
+    httpPort = lib.mkOption {
+      type = lib.types.port;
+      default = 80;
+      description = "TCP port for the web server.";
+    };
+
+    wsPort = lib.mkOption {
+      type = lib.types.port;
+      default = 443;
+      description = "TCP port for the WebSocket.";
+    };
+
+    openFirewall = lib.mkEnableOption "opening up the service ports in the firewall";
+
+    allowRebootSystem = lib.mkEnableOption "rebooting the system";
+
+    allowRestartService = lib.mkEnableOption "killing/restarting processes";
+
+    allowSetSystemTime = lib.mkEnableOption "setting the system time";
+
+    extraArgs = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      default = [ ];
+      example = [
+        "--dbg-info=1"
+        "--dbg-err=2"
+      ];
+      description = ''
+        Extra command line arguments for deCONZ, see
+        https://github.com/dresden-elektronik/deconz-rest-plugin/wiki/deCONZ-command-line-parameters.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    networking.firewall.allowedTCPPorts = lib.mkIf cfg.openFirewall [
+      cfg.httpPort
+      cfg.wsPort
+    ];
+
+    services.udev.packages = [ cfg.package ];
+
+    systemd.services.deconz = {
+      description = "deCONZ Zigbee gateway";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        # The service puts a nix store path reference in here, and that path can
+        # be garbage collected. Ensure the file gets "refreshed" on every start.
+        rm -f ${stateDir}/.local/share/dresden-elektronik/deCONZ/zcldb.txt
+      '';
+      environment = {
+        HOME = stateDir;
+        XDG_RUNTIME_DIR = "/run/${name}";
+      };
+      serviceConfig = {
+        ExecStart =
+          "${lib.getExe cfg.package}"
+          + " -platform minimal"
+          + " --http-listen=${cfg.listenAddress}"
+          + " --http-port=${toString cfg.httpPort}"
+          + " --ws-port=${toString cfg.wsPort}"
+          + " --auto-connect=1"
+          + (lib.optionalString (cfg.device != null) " --dev=${cfg.device}")
+          + " " + (lib.escapeShellArgs cfg.extraArgs);
+        Restart = "on-failure";
+        AmbientCapabilities = capabilities;
+        CapabilityBoundingSet = capabilities;
+        UMask = "0027";
+        DynamicUser = true;
+        RuntimeDirectory = name;
+        RuntimeDirectoryMode = "0700";
+        StateDirectory = name;
+        WorkingDirectory = stateDir;
+        # For access to /dev/ttyACM0 (ConBee).
+        SupplementaryGroups = [ "dialout" ];
+        ProtectHome = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dhcpcd.nix b/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
new file mode 100644
index 000000000000..8b6d3fc55f3e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dhcpcd.nix
@@ -0,0 +1,272 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dhcpcd = if !config.boot.isContainer then pkgs.dhcpcd else pkgs.dhcpcd.override { udev = null; };
+
+  cfg = config.networking.dhcpcd;
+
+  interfaces = attrValues config.networking.interfaces;
+
+  enableDHCP = config.networking.dhcpcd.enable &&
+        (config.networking.useDHCP || any (i: i.useDHCP == true) interfaces);
+
+  # Don't start dhcpcd on explicitly configured interfaces or on
+  # interfaces that are part of a bridge, bond or sit device.
+  ignoredInterfaces =
+    map (i: i.name) (filter (i: if i.useDHCP != null then !i.useDHCP else i.ipv4.addresses != [ ]) interfaces)
+    ++ mapAttrsToList (i: _: i) config.networking.sits
+    ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.bridges))
+    ++ flatten (concatMap (i: attrNames (filterAttrs (_: config: config.type != "internal") i.interfaces)) (attrValues config.networking.vswitches))
+    ++ concatLists (attrValues (mapAttrs (n: v: v.interfaces) config.networking.bonds))
+    ++ config.networking.dhcpcd.denyInterfaces;
+
+  arrayAppendOrNull = a1: a2: if a1 == null && a2 == null then null
+    else if a1 == null then a2 else if a2 == null then a1
+      else a1 ++ a2;
+
+  # If dhcp is disabled but explicit interfaces are enabled,
+  # we need to provide dhcp just for those interfaces.
+  allowInterfaces = arrayAppendOrNull cfg.allowInterfaces
+    (if !config.networking.useDHCP && enableDHCP then
+      map (i: i.name) (filter (i: i.useDHCP == true) interfaces) else null);
+
+  staticIPv6Addresses = map (i: i.name) (filter (i: i.ipv6.addresses != [ ]) interfaces);
+
+  noIPv6rs = concatStringsSep "\n" (map (name: ''
+    interface ${name}
+    noipv6rs
+  '') staticIPv6Addresses);
+
+  # Config file adapted from the one that ships with dhcpcd.
+  dhcpcdConf = pkgs.writeText "dhcpcd.conf"
+    ''
+      # Inform the DHCP server of our hostname for DDNS.
+      hostname
+
+      # A list of options to request from the DHCP server.
+      option domain_name_servers, domain_name, domain_search, host_name
+      option classless_static_routes, ntp_servers, interface_mtu
+
+      # A ServerID is required by RFC2131.
+      # Commented out because of many non-compliant DHCP servers in the wild :(
+      #require dhcp_server_identifier
+
+      # A hook script is provided to lookup the hostname if not set by
+      # the DHCP server, but it should not be run by default.
+      nohook lookup-hostname
+
+      # Ignore peth* devices; on Xen, they're renamed physical
+      # Ethernet cards used for bridging.  Likewise for vif* and tap*
+      # (Xen) and virbr* and vnet* (libvirt).
+      denyinterfaces ${toString ignoredInterfaces} lo peth* vif* tap* tun* virbr* vnet* vboxnet* sit*
+
+      # Use the list of allowed interfaces if specified
+      ${optionalString (allowInterfaces != null) "allowinterfaces ${toString allowInterfaces}"}
+
+      # Immediately fork to background if specified, otherwise wait for IP address to be assigned
+      ${{
+        background = "background";
+        any = "waitip";
+        ipv4 = "waitip 4";
+        ipv6 = "waitip 6";
+        both = "waitip 4\nwaitip 6";
+        if-carrier-up = "";
+      }.${cfg.wait}}
+
+      ${optionalString (config.networking.enableIPv6 == false) ''
+        # Don't solicit or accept IPv6 Router Advertisements and DHCPv6 if disabled IPv6
+        noipv6
+      ''}
+
+      ${optionalString (config.networking.enableIPv6 && cfg.IPv6rs == null && staticIPv6Addresses != [ ]) noIPv6rs}
+      ${optionalString (config.networking.enableIPv6 && cfg.IPv6rs == false) ''
+        noipv6rs
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+
+  exitHook = pkgs.writeText "dhcpcd.exit-hook"
+    ''
+      if [ "$reason" = BOUND -o "$reason" = REBOOT ]; then
+          # Restart ntpd.  We need to restart it to make sure that it
+          # will actually do something: if ntpd cannot resolve the
+          # server hostnames in its config file, then it will never do
+          # anything ever again ("couldn't resolve ..., giving up on
+          # it"), so we silently lose time synchronisation. This also
+          # applies to openntpd.
+          /run/current-system/systemd/bin/systemctl try-reload-or-restart ntpd.service openntpd.service chronyd.service || true
+      fi
+
+      ${cfg.runHook}
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.dhcpcd.enable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable dhcpcd for device configuration. This is mainly to
+        explicitly disable dhcpcd (for example when using networkd).
+      '';
+    };
+
+    networking.dhcpcd.persistent = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+          Whenever to leave interfaces configured on dhcpcd daemon
+          shutdown. Set to true if you have your root or store mounted
+          over the network or this machine accepts SSH connections
+          through DHCP interfaces and clients should be notified when
+          it shuts down.
+      '';
+    };
+
+    networking.dhcpcd.denyInterfaces = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+         Disable the DHCP client for any interface whose name matches
+         any of the shell glob patterns in this list. The purpose of
+         this option is to blacklist virtual interfaces such as those
+         created by Xen, libvirt, LXC, etc.
+      '';
+    };
+
+    networking.dhcpcd.allowInterfaces = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = lib.mdDoc ''
+         Enable the DHCP client for any interface whose name matches
+         any of the shell glob patterns in this list. Any interface not
+         explicitly matched by this pattern will be denied. This pattern only
+         applies when non-null.
+      '';
+    };
+
+    networking.dhcpcd.extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+         Literal string to append to the config file generated for dhcpcd.
+      '';
+    };
+
+    networking.dhcpcd.IPv6rs = mkOption {
+      type = types.nullOr types.bool;
+      default = null;
+      description = lib.mdDoc ''
+        Force enable or disable solicitation and receipt of IPv6 Router Advertisements.
+        This is required, for example, when using a static unique local IPv6 address (ULA)
+        and global IPv6 address auto-configuration with SLAAC.
+      '';
+    };
+
+    networking.dhcpcd.runHook = mkOption {
+      type = types.lines;
+      default = "";
+      example = "if [[ $reason =~ BOUND ]]; then echo $interface: Routers are $new_routers - were $old_routers; fi";
+      description = lib.mdDoc ''
+         Shell code that will be run after all other hooks. See
+         `man dhcpcd-run-hooks` for details on what is possible.
+      '';
+    };
+
+    networking.dhcpcd.wait = mkOption {
+      type = types.enum [ "background" "any" "ipv4" "ipv6" "both" "if-carrier-up" ];
+      default = "any";
+      description = lib.mdDoc ''
+        This option specifies when the dhcpcd service will fork to background.
+        If set to "background", dhcpcd will fork to background immediately.
+        If set to "ipv4" or "ipv6", dhcpcd will wait for the corresponding IP
+        address to be assigned. If set to "any", dhcpcd will wait for any type
+        (IPv4 or IPv6) to be assigned. If set to "both", dhcpcd will wait for
+        both an IPv4 and an IPv6 address before forking.
+        The option "if-carrier-up" is equivalent to "any" if either ethernet
+        is plugged nor WiFi is powered, and to "background" otherwise.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf enableDHCP {
+
+    assertions = [ {
+      # dhcpcd doesn't start properly with malloc ∉ [ libc scudo ]
+      # see https://github.com/NixOS/nixpkgs/issues/151696
+      assertion =
+        dhcpcd.enablePrivSep
+          -> elem config.environment.memoryAllocator.provider [ "libc" "scudo" ];
+      message = ''
+        dhcpcd with privilege separation is incompatible with chosen system malloc.
+          Currently only the `libc` and `scudo` allocators are known to work.
+          To disable dhcpcd's privilege separation, overlay Nixpkgs and override dhcpcd
+          to set `enablePrivSep = false`.
+      '';
+    } ];
+
+    systemd.services.dhcpcd = let
+      cfgN = config.networking;
+      hasDefaultGatewaySet = (cfgN.defaultGateway != null && cfgN.defaultGateway.address != "")
+                          && (!cfgN.enableIPv6 || (cfgN.defaultGateway6 != null && cfgN.defaultGateway6.address != ""));
+    in
+      { description = "DHCP Client";
+
+        wantedBy = [ "multi-user.target" ] ++ optional (!hasDefaultGatewaySet) "network-online.target";
+        wants = [ "network.target" ];
+        before = [ "network-online.target" ];
+
+        restartTriggers = [ exitHook ];
+
+        # Stopping dhcpcd during a reconfiguration is undesirable
+        # because it brings down the network interfaces configured by
+        # dhcpcd.  So do a "systemctl restart" instead.
+        stopIfChanged = false;
+
+        path = [ dhcpcd pkgs.nettools config.networking.resolvconf.package ];
+
+        unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+
+        serviceConfig =
+          { Type = "forking";
+            PIDFile = "/run/dhcpcd/pid";
+            RuntimeDirectory = "dhcpcd";
+            ExecStart = "@${dhcpcd}/sbin/dhcpcd dhcpcd --quiet ${optionalString cfg.persistent "--persistent"} --config ${dhcpcdConf}";
+            ExecReload = "${dhcpcd}/sbin/dhcpcd --rebind";
+            Restart = "always";
+          };
+      };
+
+    users.users.dhcpcd = {
+      isSystemUser = true;
+      group = "dhcpcd";
+    };
+    users.groups.dhcpcd = {};
+
+    environment.systemPackages = [ dhcpcd ];
+
+    environment.etc."dhcpcd.exit-hook".source = exitHook;
+
+    powerManagement.resumeCommands = mkIf config.systemd.services.dhcpcd.enable
+      ''
+        # Tell dhcpcd to rebind its interfaces if it's running.
+        /run/current-system/systemd/bin/systemctl reload dhcpcd.service
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dnscache.nix b/nixpkgs/nixos/modules/services/networking/dnscache.nix
new file mode 100644
index 000000000000..eff13f69f470
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnscache.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dnscache;
+
+  dnscache-root = pkgs.runCommand "dnscache-root" { preferLocalBuild = true; } ''
+    mkdir -p $out/{servers,ip}
+
+    ${concatMapStrings (ip: ''
+      touch "$out/ip/"${lib.escapeShellArg ip}
+    '') cfg.clientIps}
+
+    ${concatStrings (mapAttrsToList (host: ips: ''
+      ${concatMapStrings (ip: ''
+        echo ${lib.escapeShellArg ip} >> "$out/servers/"${lib.escapeShellArg host}
+      '') ips}
+    '') cfg.domainServers)}
+
+    # if a list of root servers was not provided in config, copy it
+    # over. (this is also done by dnscache-conf, but we 'rm -rf
+    # /var/lib/dnscache/root' below & replace it wholesale with this,
+    # so we have to ensure servers/@ exists ourselves.)
+    if [ ! -e $out/servers/@ ]; then
+      # symlink does not work here, due chroot
+      cp ${pkgs.djbdns}/etc/dnsroots.global $out/servers/@;
+    fi
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+    services.dnscache = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to run the dnscache caching dns server.";
+      };
+
+      ip = mkOption {
+        default = "0.0.0.0";
+        type = types.str;
+        description = lib.mdDoc "IP address on which to listen for connections.";
+      };
+
+      clientIps = mkOption {
+        default = [ "127.0.0.1" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc "Client IP addresses (or prefixes) from which to accept connections.";
+        example = ["192.168" "172.23.75.82"];
+      };
+
+      domainServers = mkOption {
+        default = { };
+        type = types.attrsOf (types.listOf types.str);
+        description = lib.mdDoc ''
+          Table of {hostname: server} pairs to use as authoritative servers for hosts (and subhosts).
+          If entry for @ is not specified predefined list of root servers is used.
+        '';
+        example = literalExpression ''
+          {
+            "@" = ["8.8.8.8" "8.8.4.4"];
+            "example.com" = ["192.168.100.100"];
+          }
+        '';
+      };
+
+      forwardOnly = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to treat root servers (for @) as caching
+          servers, requesting addresses the same way a client does. This is
+          needed if you want to use e.g. Google DNS as your upstream DNS.
+        '';
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.dnscache.enable {
+    environment.systemPackages = [ pkgs.djbdns ];
+    users.users.dnscache.isSystemUser = true;
+
+    systemd.services.dnscache = {
+      description = "djbdns dnscache server";
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ bash daemontools djbdns ];
+      preStart = ''
+        rm -rf /var/lib/dnscache
+        dnscache-conf dnscache dnscache /var/lib/dnscache ${config.services.dnscache.ip}
+        rm -rf /var/lib/dnscache/root
+        ln -sf ${dnscache-root} /var/lib/dnscache/root
+      '';
+      script = ''
+        cd /var/lib/dnscache/
+        ${optionalString cfg.forwardOnly "export FORWARDONLY=1"}
+        exec ./run
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dnscrypt-proxy2.nix b/nixpkgs/nixos/modules/services/networking/dnscrypt-proxy2.nix
new file mode 100644
index 000000000000..4592a0c2f6b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnscrypt-proxy2.nix
@@ -0,0 +1,123 @@
+{ config, lib, pkgs, ... }: with lib;
+
+let
+  cfg = config.services.dnscrypt-proxy2;
+in
+
+{
+  options.services.dnscrypt-proxy2 = {
+    enable = mkEnableOption (lib.mdDoc "dnscrypt-proxy2");
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Attrset that is converted and passed as TOML config file.
+        For available params, see: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/${pkgs.dnscrypt-proxy.version}/dnscrypt-proxy/example-dnscrypt-proxy.toml>
+      '';
+      example = literalExpression ''
+        {
+          sources.public-resolvers = {
+            urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+            cache_file = "public-resolvers.md";
+            minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+            refresh_delay = 72;
+          };
+        }
+      '';
+      type = types.attrs;
+      default = {};
+    };
+
+    upstreamDefaults = mkOption {
+      description = lib.mdDoc ''
+        Whether to base the config declared in {option}`services.dnscrypt-proxy2.settings` on the upstream example config (<https://github.com/DNSCrypt/dnscrypt-proxy/blob/master/dnscrypt-proxy/example-dnscrypt-proxy.toml>)
+
+        Disable this if you want to declare your dnscrypt config from scratch.
+      '';
+      type = types.bool;
+      default = true;
+    };
+
+    configFile = mkOption {
+      description = lib.mdDoc ''
+        Path to TOML config file. See: <https://github.com/DNSCrypt/dnscrypt-proxy/blob/master/dnscrypt-proxy/example-dnscrypt-proxy.toml>
+        If this option is set, it will override any configuration done in options.services.dnscrypt-proxy2.settings.
+      '';
+      example = "/etc/dnscrypt-proxy/dnscrypt-proxy.toml";
+      type = types.path;
+      default = pkgs.runCommand "dnscrypt-proxy.toml" {
+        json = builtins.toJSON cfg.settings;
+        passAsFile = [ "json" ];
+      } ''
+        ${if cfg.upstreamDefaults then ''
+          ${pkgs.remarshal}/bin/toml2json ${pkgs.dnscrypt-proxy.src}/dnscrypt-proxy/example-dnscrypt-proxy.toml > example.json
+          ${pkgs.jq}/bin/jq --slurp add example.json $jsonPath > config.json # merges the two
+        '' else ''
+          cp $jsonPath config.json
+        ''}
+        ${pkgs.remarshal}/bin/json2toml < config.json > $out
+      '';
+      defaultText = literalMD "TOML file generated from {option}`services.dnscrypt-proxy2.settings`";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    networking.nameservers = lib.mkDefault [ "127.0.0.1" ];
+
+    systemd.services.dnscrypt-proxy2 = {
+      description = "DNSCrypt-proxy client";
+      wants = [
+        "network-online.target"
+        "nss-lookup.target"
+      ];
+      before = [
+        "nss-lookup.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      serviceConfig = {
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        CacheDirectory = "dnscrypt-proxy";
+        DynamicUser = true;
+        ExecStart = "${pkgs.dnscrypt-proxy}/bin/dnscrypt-proxy -config ${cfg.configFile}";
+        LockPersonality = true;
+        LogsDirectory = "dnscrypt-proxy";
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        NonBlocking = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        Restart = "always";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RuntimeDirectory = "dnscrypt-proxy";
+        StateDirectory = "dnscrypt-proxy";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "@chown"
+          "~@aio"
+          "~@keyring"
+          "~@memlock"
+          "~@setuid"
+          "~@timer"
+        ];
+      };
+    };
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dnscrypt-wrapper.nix b/nixpkgs/nixos/modules/services/networking/dnscrypt-wrapper.nix
new file mode 100644
index 000000000000..741f054cd88b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnscrypt-wrapper.nix
@@ -0,0 +1,275 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg     = config.services.dnscrypt-wrapper;
+  dataDir = "/var/lib/dnscrypt-wrapper";
+
+  mkPath = path: default:
+    if path != null
+      then toString path
+      else default;
+
+  publicKey = mkPath cfg.providerKey.public "${dataDir}/public.key";
+  secretKey = mkPath cfg.providerKey.secret "${dataDir}/secret.key";
+
+  daemonArgs = with cfg; [
+    "--listen-address=${address}:${toString port}"
+    "--resolver-address=${upstream.address}:${toString upstream.port}"
+    "--provider-name=${providerName}"
+    "--provider-publickey-file=${publicKey}"
+    "--provider-secretkey-file=${secretKey}"
+    "--provider-cert-file=${providerName}.crt"
+    "--crypt-secretkey-file=${providerName}.key"
+  ];
+
+  genKeys = ''
+    # generates time-limited keypairs
+    keyGen() {
+      dnscrypt-wrapper --gen-crypt-keypair \
+        --crypt-secretkey-file=${cfg.providerName}.key
+
+      dnscrypt-wrapper --gen-cert-file \
+        --crypt-secretkey-file=${cfg.providerName}.key \
+        --provider-cert-file=${cfg.providerName}.crt \
+        --provider-publickey-file=${publicKey} \
+        --provider-secretkey-file=${secretKey} \
+        --cert-file-expire-days=${toString cfg.keys.expiration}
+    }
+
+    cd ${dataDir}
+
+    # generate provider keypair (first run only)
+    ${optionalString (cfg.providerKey.public == null || cfg.providerKey.secret == null) ''
+      if [ ! -f ${publicKey} ] || [ ! -f ${secretKey} ]; then
+        dnscrypt-wrapper --gen-provider-keypair
+      fi
+    ''}
+
+    # generate new keys for rotation
+    if [ ! -f ${cfg.providerName}.key ] || [ ! -f ${cfg.providerName}.crt ]; then
+      keyGen
+    fi
+  '';
+
+  rotateKeys = ''
+    # check if keys are not expired
+    keyValid() {
+      fingerprint=$(dnscrypt-wrapper \
+        --show-provider-publickey \
+        --provider-publickey-file=${publicKey} \
+        | awk '{print $(NF)}')
+      dnscrypt-proxy --test=${toString (cfg.keys.checkInterval + 1)} \
+        --resolver-address=127.0.0.1:${toString cfg.port} \
+        --provider-name=${cfg.providerName} \
+        --provider-key=$fingerprint
+    }
+
+    cd ${dataDir}
+
+    # archive old keys and restart the service
+    if ! keyValid; then
+      echo "certificate soon to become invalid; backing up old cert"
+      mkdir -p oldkeys
+      mv -v "${cfg.providerName}.key" "oldkeys/${cfg.providerName}-$(date +%F-%T).key"
+      mv -v "${cfg.providerName}.crt" "oldkeys/${cfg.providerName}-$(date +%F-%T).crt"
+      kill "$(pidof -s dnscrypt-wrapper)"
+    fi
+  '';
+
+
+  # This is the fork of the original dnscrypt-proxy maintained by Dyne.org.
+  # dnscrypt-proxy2 doesn't provide the `--test` feature that is needed to
+  # correctly implement key rotation of dnscrypt-wrapper ephemeral keys.
+  dnscrypt-proxy1 = pkgs.callPackage
+    ({ stdenv, fetchFromGitHub, autoreconfHook
+    , pkg-config, libsodium, ldns, openssl, systemd }:
+
+    stdenv.mkDerivation rec {
+      pname = "dnscrypt-proxy";
+      version = "2019-08-20";
+
+      src = fetchFromGitHub {
+        owner = "dyne";
+        repo = "dnscrypt-proxy";
+        rev = "07ac3825b5069adc28e2547c16b1d983a8ed8d80";
+        sha256 = "0c4mq741q4rpmdn09agwmxap32kf0vgfz7pkhcdc5h54chc3g3xy";
+      };
+
+      configureFlags = optional stdenv.isLinux "--with-systemd";
+
+      nativeBuildInputs = [ autoreconfHook pkg-config ];
+
+      # <ldns/ldns.h> depends on <openssl/ssl.h>
+      buildInputs = [ libsodium openssl.dev ldns ] ++ optional stdenv.isLinux systemd;
+
+      postInstall = ''
+        # Previous versions required libtool files to load plugins; they are
+        # now strictly optional.
+        rm $out/lib/dnscrypt-proxy/*.la
+      '';
+
+      meta = {
+        description = "A tool for securing communications between a client and a DNS resolver";
+        homepage = "https://github.com/dyne/dnscrypt-proxy";
+        license = licenses.isc;
+        maintainers = with maintainers; [ rnhmjoj ];
+        platforms = platforms.linux;
+      };
+    }) { };
+
+in {
+
+
+  ###### interface
+
+  options.services.dnscrypt-wrapper = {
+    enable = mkEnableOption (lib.mdDoc "DNSCrypt wrapper");
+
+    address = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        The DNSCrypt wrapper will bind to this IP address.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5353;
+      description = lib.mdDoc ''
+        The DNSCrypt wrapper will listen for DNS queries on this port.
+      '';
+    };
+
+    providerName = mkOption {
+      type = types.str;
+      default = "2.dnscrypt-cert.${config.networking.hostName}";
+      defaultText = literalExpression ''"2.dnscrypt-cert.''${config.networking.hostName}"'';
+      example = "2.dnscrypt-cert.myresolver";
+      description = lib.mdDoc ''
+        The name that will be given to this DNSCrypt resolver.
+        Note: the resolver name must start with `2.dnscrypt-cert.`.
+      '';
+    };
+
+    providerKey.public = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/etc/secrets/public.key";
+      description = lib.mdDoc ''
+        The filepath to the provider public key. If not given a new
+        provider key pair will be generated on the first run.
+      '';
+    };
+
+    providerKey.secret = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/etc/secrets/secret.key";
+      description = lib.mdDoc ''
+        The filepath to the provider secret key. If not given a new
+        provider key pair will be generated on the first run.
+      '';
+    };
+
+    upstream.address = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        The IP address of the upstream DNS server DNSCrypt will "wrap".
+      '';
+    };
+
+    upstream.port = mkOption {
+      type = types.port;
+      default = 53;
+      description = lib.mdDoc ''
+        The port of the upstream DNS server DNSCrypt will "wrap".
+      '';
+    };
+
+    keys.expiration = mkOption {
+      type = types.int;
+      default = 30;
+      description = lib.mdDoc ''
+        The duration (in days) of the time-limited secret key.
+        This will be automatically rotated before expiration.
+      '';
+    };
+
+    keys.checkInterval = mkOption {
+      type = types.int;
+      default = 1440;
+      description = lib.mdDoc ''
+        The time interval (in minutes) between key expiration checks.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.dnscrypt-wrapper = {
+      description = "dnscrypt-wrapper daemon user";
+      home = "${dataDir}";
+      createHome = true;
+      isSystemUser = true;
+      group = "dnscrypt-wrapper";
+    };
+    users.groups.dnscrypt-wrapper = { };
+
+    systemd.services.dnscrypt-wrapper = {
+      description = "dnscrypt-wrapper daemon";
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path     = [ pkgs.dnscrypt-wrapper ];
+
+      serviceConfig = {
+        User = "dnscrypt-wrapper";
+        WorkingDirectory = dataDir;
+        Restart   = "always";
+        ExecStart = "${pkgs.dnscrypt-wrapper}/bin/dnscrypt-wrapper ${toString daemonArgs}";
+      };
+
+      preStart = genKeys;
+    };
+
+
+    systemd.services.dnscrypt-wrapper-rotate = {
+      after    = [ "network.target" ];
+      requires = [ "dnscrypt-wrapper.service" ];
+      description = "Rotates DNSCrypt wrapper keys if soon to expire";
+
+      path   = with pkgs; [ dnscrypt-wrapper dnscrypt-proxy1 gawk procps ];
+      script = rotateKeys;
+      serviceConfig.User = "dnscrypt-wrapper";
+    };
+
+
+    systemd.timers.dnscrypt-wrapper-rotate = {
+      description = "Periodically check DNSCrypt wrapper keys for expiration";
+      wantedBy = [ "multi-user.target" ];
+
+      timerConfig = {
+        Unit = "dnscrypt-wrapper-rotate.service";
+        OnBootSec = "1min";
+        OnUnitActiveSec = cfg.keys.checkInterval * 60;
+      };
+    };
+
+    assertions = with cfg; [
+      { assertion = (providerKey.public == null && providerKey.secret == null) ||
+                    (providerKey.secret != null && providerKey.public != null);
+        message = "The secret and public provider key must be set together.";
+      }
+    ];
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dnsdist.nix b/nixpkgs/nixos/modules/services/networking/dnsdist.nix
new file mode 100644
index 000000000000..483300111df9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnsdist.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dnsdist;
+  configFile = pkgs.writeText "dnsdist.conf" ''
+    setLocal('${cfg.listenAddress}:${toString cfg.listenPort}')
+    ${cfg.extraConfig}
+  '';
+in {
+  options = {
+    services.dnsdist = {
+      enable = mkEnableOption (lib.mdDoc "dnsdist domain name server");
+
+      listenAddress = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Listen IP Address";
+        default = "0.0.0.0";
+      };
+      listenPort = mkOption {
+        type = types.int;
+        description = lib.mdDoc "Listen port";
+        default = 53;
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to dnsdist.conf.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ pkgs.dnsdist ];
+
+    systemd.services.dnsdist = {
+      wantedBy = [ "multi-user.target" ];
+
+      startLimitIntervalSec = 0;
+      serviceConfig = {
+        DynamicUser = true;
+
+        # upstream overrides for better nixos compatibility
+        ExecStartPre = [ "" "${pkgs.dnsdist}/bin/dnsdist --check-config --config ${configFile}" ];
+        ExecStart = [ "" "${pkgs.dnsdist}/bin/dnsdist --supervised --disable-syslog --config ${configFile}" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/dnsmasq.nix b/nixpkgs/nixos/modules/services/networking/dnsmasq.nix
new file mode 100644
index 000000000000..14bbe334e50d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/dnsmasq.nix
@@ -0,0 +1,184 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dnsmasq;
+  dnsmasq = cfg.package;
+  stateDir = "/var/lib/dnsmasq";
+
+  # True values are just put as `name` instead of `name=true`, and false values
+  # are turned to comments (false values are expected to be overrides e.g.
+  # mkForce)
+  formatKeyValue =
+    name: value:
+    if value == true
+    then name
+    else if value == false
+    then "# setting `${name}` explicitly set to false"
+    else generators.mkKeyValueDefault { } "=" name value;
+
+  settingsFormat = pkgs.formats.keyValue {
+    mkKeyValue = formatKeyValue;
+    listsAsDuplicateKeys = true;
+  };
+
+  # Because formats.generate is outputting a file, we use of conf-file. Once
+  # `extraConfig` is deprecated we can just use
+  # `dnsmasqConf = format.generate "dnsmasq.conf" cfg.settings`
+  dnsmasqConf = pkgs.writeText "dnsmasq.conf" ''
+    conf-file=${settingsFormat.generate "dnsmasq.conf" cfg.settings}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "dnsmasq" "servers" ] [ "services" "dnsmasq" "settings" "server" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.dnsmasq = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run dnsmasq.
+        '';
+      };
+
+      package = mkPackageOptionMD pkgs "dnsmasq" {};
+
+      resolveLocalQueries = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether dnsmasq should resolve local queries (i.e. add 127.0.0.1 to
+          /etc/resolv.conf).
+        '';
+      };
+
+      alwaysKeepRunning = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, systemd will always respawn dnsmasq even if shut down manually. The default, disabled, will only restart it on error.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+
+          freeformType = settingsFormat.type;
+
+          options.server = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = [ "8.8.8.8" "8.8.4.4" ];
+            description = lib.mdDoc ''
+              The DNS servers which dnsmasq should query.
+            '';
+          };
+
+        };
+        default = { };
+        description = lib.mdDoc ''
+          Configuration of dnsmasq. Lists get added one value per line (empty
+          lists and false values don't get added, though false values get
+          turned to comments). Gets merged with
+
+              {
+                dhcp-leasefile = "${stateDir}/dnsmasq.leases";
+                conf-file = optional cfg.resolveLocalQueries "/etc/dnsmasq-conf.conf";
+                resolv-file = optional cfg.resolveLocalQueries "/etc/dnsmasq-resolv.conf";
+              }
+        '';
+        example = literalExpression ''
+          {
+            domain-needed = true;
+            dhcp-range = [ "192.168.0.2,192.168.0.254" ];
+          }
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration directives that should be added to
+          `dnsmasq.conf`.
+
+          This option is deprecated, please use {option}`settings` instead.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    warnings = lib.optional (cfg.extraConfig != "") "Text based config is deprecated, dnsmasq now supports `services.dnsmasq.settings` for an attribute-set based config";
+
+    services.dnsmasq.settings = {
+      dhcp-leasefile = mkDefault "${stateDir}/dnsmasq.leases";
+      conf-file = mkDefault (optional cfg.resolveLocalQueries "/etc/dnsmasq-conf.conf");
+      resolv-file = mkDefault (optional cfg.resolveLocalQueries "/etc/dnsmasq-resolv.conf");
+    };
+
+    networking.nameservers =
+      optional cfg.resolveLocalQueries "127.0.0.1";
+
+    services.dbus.packages = [ dnsmasq ];
+
+    users.users.dnsmasq = {
+      isSystemUser = true;
+      group = "dnsmasq";
+      description = "Dnsmasq daemon user";
+    };
+    users.groups.dnsmasq = {};
+
+    networking.resolvconf = mkIf cfg.resolveLocalQueries {
+      useLocalResolver = mkDefault true;
+
+      extraConfig = ''
+        dnsmasq_conf=/etc/dnsmasq-conf.conf
+        dnsmasq_resolv=/etc/dnsmasq-resolv.conf
+      '';
+    };
+
+    systemd.services.dnsmasq = {
+        description = "Dnsmasq Daemon";
+        after = [ "network.target" "systemd-resolved.service" ];
+        wantedBy = [ "multi-user.target" ];
+        path = [ dnsmasq ];
+        preStart = ''
+          mkdir -m 755 -p ${stateDir}
+          touch ${stateDir}/dnsmasq.leases
+          chown -R dnsmasq ${stateDir}
+          touch /etc/dnsmasq-{conf,resolv}.conf
+          dnsmasq --test
+        '';
+        serviceConfig = {
+          Type = "dbus";
+          BusName = "uk.org.thekelleys.dnsmasq";
+          ExecStart = "${dnsmasq}/bin/dnsmasq -k --enable-dbus --user=dnsmasq -C ${dnsmasqConf}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          PrivateTmp = true;
+          ProtectSystem = true;
+          ProtectHome = true;
+          Restart = if cfg.alwaysKeepRunning then "always" else "on-failure";
+        };
+        restartTriggers = [ config.environment.etc.hosts.source ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix b/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix
new file mode 100644
index 000000000000..7f8bbb8a7699
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/doh-proxy-rust.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.doh-proxy-rust;
+
+in {
+
+  options.services.doh-proxy-rust = {
+
+    enable = mkEnableOption (lib.mdDoc "doh-proxy-rust");
+
+    flags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--server-address=9.9.9.9:53" ];
+      description = lib.mdDoc ''
+        A list of command-line flags to pass to doh-proxy. For details on the
+        available options, see <https://github.com/jedisct1/doh-server#usage>.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.doh-proxy-rust = {
+      description = "doh-proxy-rust";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.doh-proxy-rust}/bin/doh-proxy ${escapeShellArgs cfg.flags}";
+        Restart = "always";
+        RestartSec = 10;
+        DynamicUser = true;
+
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = "AF_INET AF_INET6";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged @resources" ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ stephank ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ejabberd.nix b/nixpkgs/nixos/modules/services/networking/ejabberd.nix
new file mode 100644
index 000000000000..3feafc3bb3bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ejabberd.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ejabberd;
+
+  ctlcfg = pkgs.writeText "ejabberdctl.cfg" ''
+    ERL_EPMD_ADDRESS=127.0.0.1
+    ${cfg.ctlConfig}
+  '';
+
+  ectl = ''${cfg.package}/bin/ejabberdctl ${optionalString (cfg.configFile != null) "--config ${cfg.configFile}"} --ctl-config "${ctlcfg}" --spool "${cfg.spoolDir}" --logs "${cfg.logsDir}"'';
+
+  dumps = lib.escapeShellArgs cfg.loadDumps;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.ejabberd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable ejabberd server";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ejabberd;
+        defaultText = literalExpression "pkgs.ejabberd";
+        description = lib.mdDoc "ejabberd server package to use";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ejabberd";
+        description = lib.mdDoc "User under which ejabberd is ran";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "ejabberd";
+        description = lib.mdDoc "Group under which ejabberd is ran";
+      };
+
+      spoolDir = mkOption {
+        type = types.path;
+        default = "/var/lib/ejabberd";
+        description = lib.mdDoc "Location of the spooldir of ejabberd";
+      };
+
+      logsDir = mkOption {
+        type = types.path;
+        default = "/var/log/ejabberd";
+        description = lib.mdDoc "Location of the logfile directory of ejabberd";
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        description = lib.mdDoc "Configuration file for ejabberd in YAML format";
+        default = null;
+      };
+
+      ctlConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Configuration of ejabberdctl";
+      };
+
+      loadDumps = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc "Configuration dumps that should be loaded on the first startup";
+        example = literalExpression "[ ./myejabberd.dump ]";
+      };
+
+      imagemagick = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Add ImageMagick to server's path; allows for image thumbnailing";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = optionalAttrs (cfg.user == "ejabberd") {
+      ejabberd = {
+        group = cfg.group;
+        home = cfg.spoolDir;
+        createHome = true;
+        uid = config.ids.uids.ejabberd;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "ejabberd") {
+      ejabberd.gid = config.ids.gids.ejabberd;
+    };
+
+    systemd.services.ejabberd = {
+      description = "ejabberd server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ pkgs.findutils pkgs.coreutils ] ++ lib.optional cfg.imagemagick pkgs.imagemagick;
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${ectl} foreground";
+        ExecStop = "${ectl} stop";
+        ExecReload = "${ectl} reload_config";
+      };
+
+      preStart = ''
+        if [ -z "$(ls -A '${cfg.spoolDir}')" ]; then
+          touch "${cfg.spoolDir}/.firstRun"
+        fi
+      '';
+
+      postStart = ''
+        while ! ${ectl} status >/dev/null 2>&1; do
+          if ! kill -0 "$MAINPID"; then exit 1; fi
+          sleep 0.1
+        done
+
+        if [ -e "${cfg.spoolDir}/.firstRun" ]; then
+          rm "${cfg.spoolDir}/.firstRun"
+          for src in ${dumps}; do
+            find "$src" -type f | while read dump; do
+              echo "Loading configuration dump at $dump"
+              ${ectl} load "$dump"
+            done
+          done
+        fi
+      '';
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.logsDir}' 0750 ${cfg.user} ${cfg.group} -"
+      "d '${cfg.spoolDir}' 0700 ${cfg.user} ${cfg.group} -"
+    ];
+
+    security.pam.services.ejabberd = {};
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/envoy.nix b/nixpkgs/nixos/modules/services/networking/envoy.nix
new file mode 100644
index 000000000000..c68ceab9619c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/envoy.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.envoy;
+  format = pkgs.formats.json { };
+  conf = format.generate "envoy.json" cfg.settings;
+  validateConfig = required: file:
+    pkgs.runCommand "validate-envoy-conf" { } ''
+      ${cfg.package}/bin/envoy --log-level error --mode validate -c "${file}" ${lib.optionalString (!required) "|| true"}
+      cp "${file}" "$out"
+    '';
+in
+
+{
+  options.services.envoy = {
+    enable = mkEnableOption (lib.mdDoc "Envoy reverse proxy");
+
+    package = mkPackageOptionMD pkgs "envoy" { };
+
+    requireValidConfig = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether a failure during config validation at build time is fatal.
+        When the config can't be checked during build time, for example when it includes
+        other files, disable this option.
+      '';
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      example = literalExpression ''
+        {
+          admin = {
+            access_log_path = "/dev/null";
+            address = {
+              socket_address = {
+                protocol = "TCP";
+                address = "127.0.0.1";
+                port_value = 9901;
+              };
+            };
+          };
+          static_resources = {
+            listeners = [];
+            clusters = [];
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Specify the configuration for Envoy in Nix.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    systemd.services.envoy = {
+      description = "Envoy reverse proxy";
+      after = [ "network-online.target" ];
+      requires = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/envoy -c ${validateConfig cfg.requireValidConfig conf}";
+        CacheDirectory = [ "envoy" ];
+        LogsDirectory = [ "envoy" ];
+        Restart = "no";
+        # Hardening
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+        DeviceAllow = [ "" ];
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = false; # at least wasmr needs WX permission
+        PrivateDevices = true;
+        PrivateUsers = false; # breaks CAP_NET_BIND_SERVICE
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "ptraceable";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" "AF_XDP" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0066";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/epmd.nix b/nixpkgs/nixos/modules/services/networking/epmd.nix
new file mode 100644
index 000000000000..0bc8c71f4eaa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/epmd.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.epmd;
+in
+{
+  ###### interface
+  options.services.epmd = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable socket activation for Erlang Port Mapper Daemon (epmd),
+        which acts as a name server on all hosts involved in distributed
+        Erlang computations.
+      '';
+    };
+    package = mkOption {
+      type = types.package;
+      default = pkgs.erlang;
+      defaultText = literalExpression "pkgs.erlang";
+      description = lib.mdDoc ''
+        The Erlang package to use to get epmd binary. That way you can re-use
+        an Erlang runtime that is already installed for other purposes.
+      '';
+    };
+    listenStream = mkOption
+      {
+        type = types.str;
+        default = "[::]:4369";
+        description = lib.mdDoc ''
+          the listenStream used by the systemd socket.
+          see https://www.freedesktop.org/software/systemd/man/systemd.socket.html#ListenStream= for more information.
+          use this to change the port epmd will run on.
+          if not defined, epmd will use "[::]:4369"
+        '';
+      };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = cfg.listenStream == "[::]:4369" -> config.networking.enableIPv6;
+      message = "epmd listens by default on ipv6, enable ipv6 or change config.services.epmd.listenStream";
+    }];
+    systemd.sockets.epmd = rec {
+      description = "Erlang Port Mapper Daemon Activation Socket";
+      wantedBy = [ "sockets.target" ];
+      before = wantedBy;
+      socketConfig = {
+        ListenStream = cfg.listenStream;
+        Accept = "false";
+      };
+    };
+
+    systemd.services.epmd = {
+      description = "Erlang Port Mapper Daemon";
+      after = [ "network.target" ];
+      requires = [ "epmd.socket" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/epmd -systemd";
+        Type = "notify";
+      };
+    };
+  };
+
+  meta.maintainers = teams.beam.members;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ergo.nix b/nixpkgs/nixos/modules/services/networking/ergo.nix
new file mode 100644
index 000000000000..033d4d9caf8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ergo.nix
@@ -0,0 +1,143 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.ergo;
+  opt = options.services.ergo;
+
+  inherit (lib) literalExpression mkEnableOption mkIf mkOption optionalString types;
+
+  configFile = pkgs.writeText "ergo.conf" (''
+ergo {
+  directory = "${cfg.dataDir}"
+  node {
+    mining = false
+  }
+  wallet.secretStorage.secretDir = "${cfg.dataDir}/wallet/keystore"
+}
+
+scorex {
+  network {
+    bindAddress = "${cfg.listen.ip}:${toString cfg.listen.port}"
+  }
+'' + optionalString (cfg.api.keyHash != null) ''
+ restApi {
+    apiKeyHash = "${cfg.api.keyHash}"
+    bindAddress = "${cfg.api.listen.ip}:${toString cfg.api.listen.port}"
+ }
+'' + ''
+}
+'');
+
+in {
+
+  options = {
+
+    services.ergo = {
+      enable = mkEnableOption (lib.mdDoc "Ergo service");
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/ergo";
+        description = lib.mdDoc "The data directory for the Ergo node.";
+      };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc "IP address on which the Ergo node should listen.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 9006;
+          description = lib.mdDoc "Listen port for the Ergo node.";
+        };
+      };
+
+      api = {
+       keyHash = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf";
+        description = lib.mdDoc "Hex-encoded Blake2b256 hash of an API key as a 64-chars long Base16 string.";
+       };
+
+       listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc "IP address that the Ergo node API should listen on if {option}`api.keyHash` is defined.";
+          };
+
+        port = mkOption {
+          type = types.port;
+          default = 9052;
+          description = lib.mdDoc "Listen port for the API endpoint if {option}`api.keyHash` is defined.";
+        };
+       };
+      };
+
+      testnet = mkOption {
+         type = types.bool;
+         default = false;
+         description = lib.mdDoc "Connect to testnet network instead of the default mainnet.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "ergo";
+        description = lib.mdDoc "The user as which to run the Ergo node.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = cfg.user;
+        defaultText = literalExpression "config.${opt.user}";
+        description = lib.mdDoc "The group as which to run the Ergo node.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the Ergo node as well as the API.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0770 '${cfg.user}' '${cfg.group}' - -"
+    ];
+
+    systemd.services.ergo = {
+      description = "ergo server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = ''${pkgs.ergo}/bin/ergo \
+                      ${optionalString (!cfg.testnet)
+                      "--mainnet"} \
+                      -c ${configFile}'';
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ] ++ [ cfg.api.listen.port ];
+    };
+
+    users.users.${cfg.user} = {
+      name = cfg.user;
+      group = cfg.group;
+      description = "Ergo daemon user";
+      home = cfg.dataDir;
+      isSystemUser = true;
+    };
+
+    users.groups.${cfg.group} = {};
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ergochat.nix b/nixpkgs/nixos/modules/services/networking/ergochat.nix
new file mode 100644
index 000000000000..a003512677eb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ergochat.nix
@@ -0,0 +1,155 @@
+{ config, lib, options, pkgs, ... }: let
+  cfg = config.services.ergochat;
+in {
+  options = {
+    services.ergochat = {
+
+      enable = lib.mkEnableOption (lib.mdDoc "Ergo IRC daemon");
+
+      openFilesLimit = lib.mkOption {
+        type = lib.types.int;
+        default = 1024;
+        description = lib.mdDoc ''
+          Maximum number of open files. Limits the clients and server connections.
+        '';
+      };
+
+      configFile = lib.mkOption {
+        type = lib.types.path;
+        default = (pkgs.formats.yaml {}).generate "ergo.conf" cfg.settings;
+        defaultText = lib.literalMD "generated config file from `settings`";
+        description = lib.mdDoc ''
+          Path to configuration file.
+          Setting this will skip any configuration done via `settings`
+        '';
+      };
+
+      settings = lib.mkOption {
+        type = (pkgs.formats.yaml {}).type;
+        description = lib.mdDoc ''
+          Ergo IRC daemon configuration file.
+          https://raw.githubusercontent.com/ergochat/ergo/master/default.yaml
+        '';
+        default = {
+          network = {
+            name = "testnetwork";
+          };
+          server = {
+            name = "example.com";
+            listeners = {
+              ":6667" = {};
+            };
+            casemapping = "permissive";
+            enforce-utf = true;
+            lookup-hostnames = false;
+            ip-cloaking = {
+              enabled = false;
+            };
+            forward-confirm-hostnames = false;
+            check-ident = false;
+            relaymsg = {
+              enabled = false;
+            };
+            max-sendq = "1M";
+            ip-limits = {
+              count = false;
+              throttle = false;
+            };
+          };
+          datastore = {
+            autoupgrade = true;
+            # this points to the StateDirectory of the systemd service
+            path = "/var/lib/ergo/ircd.db";
+          };
+          accounts = {
+            authentication-enabled = true;
+            registration = {
+              enabled = true;
+              allow-before-connect = true;
+              throttling = {
+                enabled = true;
+                duration = "10m";
+                max-attempts = 30;
+              };
+              bcrypt-cost = 4;
+              email-verification.enabled = false;
+            };
+            multiclient = {
+              enabled = true;
+              allowed-by-default = true;
+              always-on = "opt-out";
+              auto-away = "opt-out";
+            };
+          };
+          channels = {
+            default-modes = "+ntC";
+            registration = {
+              enabled = true;
+            };
+          };
+          limits = {
+            nicklen = 32;
+            identlen = 20;
+            channellen = 64;
+            awaylen = 390;
+            kicklen = 390;
+            topiclen = 390;
+          };
+          history = {
+            enabled = true;
+            channel-length = 2048;
+            client-length = 256;
+            autoresize-window = "3d";
+            autoreplay-on-join = 0;
+            chathistory-maxmessages = 100;
+            znc-maxmessages = 2048;
+            restrictions = {
+              expire-time = "1w";
+              query-cutoff = "none";
+              grace-period = "1h";
+            };
+            retention = {
+              allow-individual-delete = false;
+              enable-account-indexing = false;
+            };
+            tagmsg-storage = {
+              default = false;
+              whitelist = [
+                "+draft/react"
+                "+react"
+              ];
+            };
+          };
+        };
+      };
+
+    };
+  };
+  config = lib.mkIf cfg.enable {
+
+    environment.etc."ergo.yaml".source = cfg.configFile;
+
+    # merge configured values with default values
+    services.ergochat.settings =
+      lib.mapAttrsRecursive (_: lib.mkDefault) options.services.ergochat.settings.default;
+
+    systemd.services.ergochat = {
+      description = "Ergo IRC daemon";
+      wantedBy = [ "multi-user.target" ];
+      # reload is not applying the changed config. further investigation is needed
+      # at some point this should be enabled, since we don't want to restart for
+      # every config change
+      # reloadIfChanged = true;
+      restartTriggers = [ cfg.configFile ];
+      serviceConfig = {
+        ExecStart = "${pkgs.ergochat}/bin/ergo run --conf /etc/ergo.yaml";
+        ExecReload = "${pkgs.util-linux}/bin/kill -HUP $MAINPID";
+        DynamicUser = true;
+        StateDirectory = "ergo";
+        LimitNOFILE = toString cfg.openFilesLimit;
+      };
+    };
+
+  };
+  meta.maintainers = with lib.maintainers; [ lassulus tv ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/eternal-terminal.nix b/nixpkgs/nixos/modules/services/networking/eternal-terminal.nix
new file mode 100644
index 000000000000..c6b6b04dcf72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/eternal-terminal.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.eternal-terminal;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.eternal-terminal = {
+
+      enable = mkEnableOption (lib.mdDoc "Eternal Terminal server");
+
+      port = mkOption {
+        default = 2022;
+        type = types.port;
+        description = lib.mdDoc ''
+          The port the server should listen on. Will use the server's default (2022) if not specified.
+
+          Make sure to open this port in the firewall if necessary.
+        '';
+      };
+
+      verbosity = mkOption {
+        default = 0;
+        type = types.enum (lib.range 0 9);
+        description = lib.mdDoc ''
+          The verbosity level (0-9).
+        '';
+      };
+
+      silent = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If enabled, disables all logging.
+        '';
+      };
+
+      logSize = mkOption {
+        default = 20971520;
+        type = types.int;
+        description = lib.mdDoc ''
+          The maximum log size.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # We need to ensure the et package is fully installed because
+    # the (remote) et client runs the `etterminal` binary when it
+    # connects.
+    environment.systemPackages = [ pkgs.eternal-terminal ];
+
+    systemd.services = {
+      eternal-terminal = {
+        description = "Eternal Terminal server.";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${pkgs.eternal-terminal}/bin/etserver --daemon --cfgfile=${pkgs.writeText "et.cfg" ''
+            ; et.cfg : Config file for Eternal Terminal
+            ;
+
+            [Networking]
+            port = ${toString cfg.port}
+
+            [Debug]
+            verbose = ${toString cfg.verbosity}
+            silent = ${if cfg.silent then "1" else "0"}
+            logsize = ${toString cfg.logSize}
+          ''}";
+          Restart = "on-failure";
+          KillMode = "process";
+        };
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/expressvpn.nix b/nixpkgs/nixos/modules/services/networking/expressvpn.nix
new file mode 100644
index 000000000000..30de6987d31f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/expressvpn.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+{
+  options.services.expressvpn.enable = mkOption {
+    type = types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Enable the ExpressVPN daemon.
+    '';
+  };
+
+  config = mkIf config.services.expressvpn.enable {
+    boot.kernelModules = [ "tun" ];
+
+    systemd.services.expressvpn = {
+      description = "ExpressVPN Daemon";
+      serviceConfig = {
+        ExecStart = "${pkgs.expressvpn}/bin/expressvpnd";
+        Restart = "on-failure";
+        RestartSec = 5;
+      };
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+    };
+  };
+
+  meta.maintainers = with maintainers; [ yureien ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/fakeroute.nix b/nixpkgs/nixos/modules/services/networking/fakeroute.nix
new file mode 100644
index 000000000000..faf5879a6ed3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/fakeroute.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.fakeroute;
+  routeConf = pkgs.writeText "route.conf" (lib.concatStringsSep "\n" cfg.route);
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.fakeroute = {
+
+      enable = lib.mkEnableOption (lib.mdDoc "the fakeroute service");
+
+      route = lib.mkOption {
+        type = with lib.types; listOf str;
+        default = [];
+        example = [
+          "216.102.187.130"
+          "4.0.1.122"
+          "198.116.142.34"
+          "63.199.8.242"
+        ];
+        description = lib.mdDoc ''
+         Fake route that will appear after the real
+         one to any host running a traceroute.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.fakeroute = {
+      description = "Fakeroute Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        User = "fakeroute";
+        DynamicUser = true;
+        AmbientCapabilities = [ "CAP_NET_RAW" ];
+        ExecStart = "${pkgs.fakeroute}/bin/fakeroute -f ${routeConf}";
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/fastnetmon-advanced.nix b/nixpkgs/nixos/modules/services/networking/fastnetmon-advanced.nix
new file mode 100644
index 000000000000..26e8ad8b76d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/fastnetmon-advanced.nix
@@ -0,0 +1,222 @@
+{ config, lib, pkgs, ... }:
+
+let
+  # Background information: FastNetMon requires a MongoDB to start. This is because
+  # it uses MongoDB to store its configuration. That is, in a normal setup there is
+  # one collection with one document.
+  # To provide declarative configuration in our NixOS module, this database is
+  # completely emptied and replaced on each boot by the fastnetmon-setup service
+  # using the configuration backup functionality.
+
+  cfg = config.services.fastnetmon-advanced;
+  settingsFormat = pkgs.formats.yaml { };
+
+  # obtain the default configs by starting up ferretdb and fcli in a derivation
+  default_configs = pkgs.runCommand "default-configs" {
+    nativeBuildInputs = [
+      pkgs.ferretdb
+      pkgs.fastnetmon-advanced # for fcli
+      pkgs.proot
+    ];
+  } ''
+    mkdir ferretdb fastnetmon $out
+    FERRETDB_TELEMETRY="disable" FERRETDB_HANDLER="sqlite" FERRETDB_STATE_DIR="$PWD/ferretdb" FERRETDB_SQLITE_URL="file:$PWD/ferretdb/" ferretdb &
+
+    cat << EOF > fastnetmon/fastnetmon.conf
+    ${builtins.toJSON {
+      mongodb_username = "";
+    }}
+    EOF
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli create_configuration
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli set bgp default
+    proot -b fastnetmon:/etc/fastnetmon -0 fcli export_configuration backup.tar
+    tar -C $out --no-same-owner -xvf backup.tar
+  '';
+
+  # merge the user configs into the default configs
+  config_tar = pkgs.runCommand "fastnetmon-config.tar" {
+    nativeBuildInputs = with pkgs; [ jq ];
+  } ''
+    jq -s add ${default_configs}/main.json ${pkgs.writeText "main-add.json" (builtins.toJSON cfg.settings)} > main.json
+    mkdir hostgroup
+    ${lib.concatImapStringsSep "\n" (pos: hostgroup: ''
+      jq -s add ${default_configs}/hostgroup/0.json ${pkgs.writeText "hostgroup-${toString (pos - 1)}-add.json" (builtins.toJSON hostgroup)} > hostgroup/${toString (pos - 1)}.json
+    '') hostgroups}
+    mkdir bgp
+    ${lib.concatImapStringsSep "\n" (pos: bgp: ''
+      jq -s add ${default_configs}/bgp/0.json ${pkgs.writeText "bgp-${toString (pos - 1)}-add.json" (builtins.toJSON bgp)} > bgp/${toString (pos - 1)}.json
+    '') bgpPeers}
+    tar -cf $out main.json ${lib.concatImapStringsSep " " (pos: _: "hostgroup/${toString (pos - 1)}.json") hostgroups} ${lib.concatImapStringsSep " " (pos: _: "bgp/${toString (pos - 1)}.json") bgpPeers}
+  '';
+
+  hostgroups = lib.mapAttrsToList (name: hostgroup: { inherit name; } // hostgroup) cfg.hostgroups;
+  bgpPeers = lib.mapAttrsToList (name: bgpPeer: { inherit name; } // bgpPeer) cfg.bgpPeers;
+
+in {
+  options.services.fastnetmon-advanced = with lib; {
+    enable = mkEnableOption "the fastnetmon-advanced DDoS Protection daemon";
+
+    settings = mkOption {
+      description = ''
+        Extra configuration options to declaratively load into FastNetMon Advanced.
+
+        See the [FastNetMon Advanced Configuration options reference](https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-configuration-options/) for more details.
+      '';
+      type = settingsFormat.type;
+      default = {};
+      example = literalExpression ''
+        {
+          networks_list = [ "192.0.2.0/24" ];
+          gobgp = true;
+          gobgp_flow_spec_announces = true;
+        }
+      '';
+    };
+    hostgroups = mkOption {
+      description = "Hostgroups to declaratively load into FastNetMon Advanced";
+      type = types.attrsOf settingsFormat.type;
+      default = {};
+    };
+    bgpPeers = mkOption {
+      description = "BGP Peers to declaratively load into FastNetMon Advanced";
+      type = types.attrsOf settingsFormat.type;
+      default = {};
+    };
+
+    enableAdvancedTrafficPersistence = mkOption {
+      description = "Store historical flow data in clickhouse";
+      type = types.bool;
+      default = false;
+    };
+
+    traffic_db.settings = mkOption {
+      type = settingsFormat.type;
+      description = "Additional settings for /etc/fastnetmon/traffic_db.conf";
+    };
+  };
+
+  config = lib.mkMerge [ (lib.mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [
+      fastnetmon-advanced # for fcli
+    ];
+
+    environment.etc."fastnetmon/license.lic".source = "/var/lib/fastnetmon/license.lic";
+    environment.etc."fastnetmon/gobgpd.conf".source = "/run/fastnetmon/gobgpd.conf";
+    environment.etc."fastnetmon/fastnetmon.conf".source = pkgs.writeText "fastnetmon.conf" (builtins.toJSON {
+      mongodb_username = "";
+    });
+
+    services.ferretdb.enable = true;
+
+    systemd.services.fastnetmon-setup = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "ferretdb.service" ];
+      path = with pkgs; [ fastnetmon-advanced config.systemd.package ];
+      script = ''
+        fcli create_configuration
+        fcli delete hostgroup global
+        fcli import_configuration ${config_tar}
+        systemctl --no-block try-restart fastnetmon
+      '';
+      serviceConfig.Type = "oneshot";
+    };
+
+    systemd.services.fastnetmon = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "ferretdb.service" "fastnetmon-setup.service" "polkit.service" ];
+      path = with pkgs; [ iproute2 ];
+      unitConfig = {
+        # Disable logic which shuts service when we do too many restarts
+        # We do restarts from sudo fcli commit and it's expected that we may have many restarts
+        # Details: https://github.com/systemd/systemd/issues/2416
+        StartLimitInterval = 0;
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/fastnetmon --log_to_console";
+
+        LimitNOFILE = 65535;
+        # Restart service when it fails due to any reasons, we need to keep processing traffic no matter what happened
+        Restart= "on-failure";
+        RestartSec= "5s";
+
+        DynamicUser = true;
+        CacheDirectory = "fastnetmon";
+        RuntimeDirectory = "fastnetmon"; # for gobgpd config
+        StateDirectory = "fastnetmon"; # for license file
+      };
+    };
+
+    security.polkit.enable = true;
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.freedesktop.systemd1.manage-units" &&
+          subject.isInGroup("fastnetmon")) {
+          if (action.lookup("unit") == "gobgp.service") {
+            var verb = action.lookup("verb");
+            if (verb == "start" || verb == "stop" || verb == "restart") {
+              return polkit.Result.YES;
+            }
+          }
+        }
+      });
+    '';
+
+    # We don't use the existing gobgp NixOS module and package, because the gobgp
+    # version might not be compatible with fastnetmon. Also, the service name
+    # _must_ be 'gobgp' and not 'gobgpd', so that fastnetmon can reload the config.
+    systemd.services.gobgp = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "GoBGP Routing Daemon";
+      unitConfig = {
+        ConditionPathExists = "/run/fastnetmon/gobgpd.conf";
+      };
+      serviceConfig = {
+        Type = "notify";
+        ExecStartPre = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -f /run/fastnetmon/gobgpd.conf -d";
+        SupplementaryGroups = [ "fastnetmon" ];
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -f /run/fastnetmon/gobgpd.conf --sdnotify";
+        ExecReload = "${pkgs.fastnetmon-advanced}/bin/fnm-gobgpd -r";
+        DynamicUser = true;
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  })
+
+  (lib.mkIf (cfg.enable && cfg.enableAdvancedTrafficPersistence) {
+    ## Advanced Traffic persistence
+    ## https://fastnetmon.com/docs-fnm-advanced/fastnetmon-advanced-traffic-persistency/
+
+    services.clickhouse.enable = true;
+
+    services.fastnetmon-advanced.settings.traffic_db = true;
+
+    services.fastnetmon-advanced.traffic_db.settings = {
+      clickhouse_batch_size = lib.mkDefault 1000;
+      clickhouse_batch_delay = lib.mkDefault 1;
+      traffic_db_host = lib.mkDefault "127.0.0.1";
+      traffic_db_port = lib.mkDefault 8100;
+      clickhouse_host = lib.mkDefault "127.0.0.1";
+      clickhouse_port = lib.mkDefault 9000;
+      clickhouse_user = lib.mkDefault "default";
+      clickhouse_password = lib.mkDefault "";
+    };
+    environment.etc."fastnetmon/traffic_db.conf".text = builtins.toJSON cfg.traffic_db.settings;
+
+    systemd.services.traffic_db = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.fastnetmon-advanced}/bin/traffic_db";
+        # Restart service when it fails due to any reasons, we need to keep processing traffic no matter what happened
+        Restart= "on-failure";
+        RestartSec= "5s";
+
+        DynamicUser = true;
+      };
+    };
+
+  }) ];
+
+  meta.maintainers = lib.teams.wdz.members;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ferm.nix b/nixpkgs/nixos/modules/services/networking/ferm.nix
new file mode 100644
index 000000000000..09151eb0b544
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ferm.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ferm;
+
+  configFile = pkgs.stdenv.mkDerivation {
+    name = "ferm.conf";
+    text = cfg.config;
+    preferLocalBuild = true;
+    buildCommand = ''
+      echo -n "$text" > $out
+      ${cfg.package}/bin/ferm --noexec $out
+    '';
+  };
+in {
+  options = {
+    services.ferm = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable Ferm Firewall.
+          *Warning*: Enabling this service WILL disable the existing NixOS
+          firewall! Default firewall rules provided by packages are not
+          considered at the moment.
+        '';
+      };
+      config = mkOption {
+        description = lib.mdDoc "Verbatim ferm.conf configuration.";
+        default = "";
+        defaultText = literalMD "empty firewall, allows any traffic";
+        type = types.lines;
+      };
+      package = mkOption {
+        description = lib.mdDoc "The ferm package.";
+        type = types.package;
+        default = pkgs.ferm;
+        defaultText = literalExpression "pkgs.ferm";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.firewall.enable = false;
+    systemd.services.ferm = {
+      description = "Ferm Firewall";
+      after = [ "ipset.target" ];
+      before = [ "network-pre.target" ];
+      wants = [ "network-pre.target" ];
+      wantedBy = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      serviceConfig = {
+        Type="oneshot";
+        RemainAfterExit = "yes";
+        ExecStart = "${cfg.package}/bin/ferm ${configFile}";
+        ExecReload = "${cfg.package}/bin/ferm ${configFile}";
+        ExecStop = "${cfg.package}/bin/ferm -F ${configFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
new file mode 100644
index 000000000000..4d8777d204bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.md
@@ -0,0 +1,55 @@
+# Firefox Sync server {#module-services-firefox-syncserver}
+
+A storage server for Firefox Sync that you can easily host yourself.
+
+## Quickstart {#module-services-firefox-syncserver-quickstart}
+
+The absolute minimal configuration for the sync server looks like this:
+
+```nix
+services.mysql.package = pkgs.mariadb;
+
+services.firefox-syncserver = {
+  enable = true;
+  secrets = builtins.toFile "sync-secrets" ''
+    SYNC_MASTER_SECRET=this-secret-is-actually-leaked-to-/nix/store
+  '';
+  singleNode = {
+    enable = true;
+    hostname = "localhost";
+    url = "http://localhost:5000";
+  };
+};
+```
+
+This will start a sync server that is only accessible locally. Once the services is
+running you can navigate to `about:config` in your Firefox profile and set
+`identity.sync.tokenserver.uri` to `http://localhost:5000/1.0/sync/1.5`. Your browser
+will now use your local sync server for data storage.
+
+::: {.warning}
+This configuration should never be used in production. It is not encrypted and
+stores its secrets in a world-readable location.
+:::
+
+## More detailed setup {#module-services-firefox-syncserver-configuration}
+
+The `firefox-syncserver` service provides a number of options to make setting up
+small deployment easier. These are grouped under the `singleNode` element of the
+option tree and allow simple configuration of the most important parameters.
+
+Single node setup is split into two kinds of options: those that affect the sync
+server itself, and those that affect its surroundings. Options that affect the
+sync server are `capacity`, which configures how many accounts may be active on
+this instance, and `url`, which holds the URL under which the sync server can be
+accessed. The `url` can be configured automatically when using nginx.
+
+Options that affect the surroundings of the sync server are `enableNginx`,
+`enableTLS` and `hostname`. If `enableNginx` is set the sync server module will
+automatically add an nginx virtual host to the system using `hostname` as the
+domain and set `url` accordingly. If `enableTLS` is set the module will also
+enable ACME certificates on the new virtual host and force all connections to
+be made via TLS.
+
+For actual deployment it is also recommended to store the `secrets` file in a
+secure location.
diff --git a/nixpkgs/nixos/modules/services/networking/firefox-syncserver.nix b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.nix
new file mode 100644
index 000000000000..71eb2f537acc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/firefox-syncserver.nix
@@ -0,0 +1,322 @@
+{ config, pkgs, lib, options, ... }:
+
+let
+  cfg = config.services.firefox-syncserver;
+  opt = options.services.firefox-syncserver;
+  defaultDatabase = "firefox_syncserver";
+  defaultUser = "firefox-syncserver";
+
+  dbIsLocal = cfg.database.host == "localhost";
+  dbURL = "mysql://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}";
+
+  format = pkgs.formats.toml {};
+  settings = {
+    human_logs = true;
+    syncstorage = {
+      database_url = dbURL;
+    };
+    tokenserver = {
+      node_type = "mysql";
+      database_url = dbURL;
+      fxa_email_domain = "api.accounts.firefox.com";
+      fxa_oauth_server_url = "https://oauth.accounts.firefox.com/v1";
+      run_migrations = true;
+      # if JWK caching is not enabled the token server must verify tokens
+      # using the fxa api, on a thread pool with a static size.
+      additional_blocking_threads_for_fxa_requests = 10;
+    } // lib.optionalAttrs cfg.singleNode.enable {
+      # Single-node mode is likely to be used on small instances with little
+      # capacity. The default value (0.1) can only ever release capacity when
+      # accounts are removed if the total capacity is 10 or larger to begin
+      # with.
+      # https://github.com/mozilla-services/syncstorage-rs/issues/1313#issuecomment-1145293375
+      node_capacity_release_rate = 1;
+    };
+  };
+  configFile = format.generate "syncstorage.toml" (lib.recursiveUpdate settings cfg.settings);
+  setupScript = pkgs.writeShellScript "firefox-syncserver-setup" ''
+        set -euo pipefail
+        shopt -s inherit_errexit
+
+        schema_configured() {
+          mysql ${cfg.database.name} -Ne 'SHOW TABLES' | grep -q services
+        }
+
+        update_config() {
+          mysql ${cfg.database.name} <<"EOF"
+            BEGIN;
+
+            INSERT INTO `services` (`id`, `service`, `pattern`)
+              VALUES (1, 'sync-1.5', '{node}/1.5/{uid}')
+              ON DUPLICATE KEY UPDATE service='sync-1.5', pattern='{node}/1.5/{uid}';
+            INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`,
+                                 `capacity`, `downed`, `backoff`)
+              VALUES (1, 1, '${cfg.singleNode.url}', ${toString cfg.singleNode.capacity},
+              0, ${toString cfg.singleNode.capacity}, 0, 0)
+              ON DUPLICATE KEY UPDATE node = '${cfg.singleNode.url}', capacity=${toString cfg.singleNode.capacity};
+
+            COMMIT;
+        EOF
+        }
+
+
+        for (( try = 0; try < 60; try++ )); do
+          if ! schema_configured; then
+            sleep 2
+          else
+            update_config
+            exit 0
+          fi
+        done
+
+        echo "Single-node setup failed"
+        exit 1
+      '';
+in
+
+{
+  options = {
+    services.firefox-syncserver = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        the Firefox Sync storage service.
+
+        Out of the box this will not be very useful unless you also configure at least
+        one service and one nodes by inserting them into the mysql database manually, e.g.
+        by running
+
+        ```
+          INSERT INTO `services` (`id`, `service`, `pattern`) VALUES ('1', 'sync-1.5', '{node}/1.5/{uid}');
+          INSERT INTO `nodes` (`id`, `service`, `node`, `available`, `current_load`,
+              `capacity`, `downed`, `backoff`)
+            VALUES ('1', '1', 'https://mydomain.tld', '1', '0', '10', '0', '0');
+        ```
+
+        {option}`${opt.singleNode.enable}` does this automatically when enabled
+      '');
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.syncstorage-rs;
+        defaultText = lib.literalExpression "pkgs.syncstorage-rs";
+        description = lib.mdDoc ''
+          Package to use.
+        '';
+      };
+
+      database.name = lib.mkOption {
+        # the mysql module does not allow `-quoting without resorting to shell
+        # escaping, so we restrict db names for forward compaitiblity should this
+        # behavior ever change.
+        type = lib.types.strMatching "[a-z_][a-z0-9_]*";
+        default = defaultDatabase;
+        description = lib.mdDoc ''
+          Database to use for storage. Will be created automatically if it does not exist
+          and `config.${opt.database.createLocally}` is set.
+        '';
+      };
+
+      database.user = lib.mkOption {
+        type = lib.types.str;
+        default = defaultUser;
+        description = lib.mdDoc ''
+          Username for database connections.
+        '';
+      };
+
+      database.host = lib.mkOption {
+        type = lib.types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Database host name. `localhost` is treated specially and inserts
+          systemd dependencies, other hostnames or IP addresses of the local machine do not.
+        '';
+      };
+
+      database.createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to create database and user on the local machine if they do not exist.
+          This includes enabling unix domain socket authentication for the configured user.
+        '';
+      };
+
+      logLevel = lib.mkOption {
+        type = lib.types.str;
+        default = "error";
+        description = lib.mdDoc ''
+          Log level to run with. This can be a simple log level like `error`
+          or `trace`, or a more complicated logging expression.
+        '';
+      };
+
+      secrets = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          A file containing the various secrets. Should be in the format expected by systemd's
+          `EnvironmentFile` directory. Two secrets are currently available:
+          `SYNC_MASTER_SECRET` and
+          `SYNC_TOKENSERVER__FXA_METRICS_HASH_SECRET`.
+        '';
+      };
+
+      singleNode = {
+        enable = lib.mkEnableOption (lib.mdDoc "auto-configuration for a simple single-node setup");
+
+        enableTLS = lib.mkEnableOption (lib.mdDoc "automatic TLS setup");
+
+        enableNginx = lib.mkEnableOption (lib.mdDoc "nginx virtualhost definitions");
+
+        hostname = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc ''
+            Host name to use for this service.
+          '';
+        };
+
+        capacity = lib.mkOption {
+          type = lib.types.ints.unsigned;
+          default = 10;
+          description = lib.mdDoc ''
+            How many sync accounts are allowed on this server. Setting this value
+            equal to or less than the number of currently active accounts will
+            effectively deny service to accounts not yet registered here.
+          '';
+        };
+
+        url = lib.mkOption {
+          type = lib.types.str;
+          default = "${if cfg.singleNode.enableTLS then "https" else "http"}://${cfg.singleNode.hostname}";
+          defaultText = lib.literalExpression ''
+            ''${if cfg.singleNode.enableTLS then "https" else "http"}://''${config.${opt.singleNode.hostname}}
+          '';
+          description = lib.mdDoc ''
+            URL of the host. If you are not using the automatic webserver proxy setup you will have
+            to change this setting or your sync server may not be functional.
+          '';
+        };
+      };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = format.type;
+
+          options = {
+            port = lib.mkOption {
+              type = lib.types.port;
+              default = 5000;
+              description = lib.mdDoc ''
+                Port to bind to.
+              '';
+            };
+
+            tokenserver.enabled = lib.mkOption {
+              type = lib.types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether to enable the token service as well.
+              '';
+            };
+          };
+        };
+        default = { };
+        description = lib.mdDoc ''
+          Settings for the sync server. These take priority over values computed
+          from NixOS options.
+
+          See the example config in
+          <https://github.com/mozilla-services/syncstorage-rs/blob/master/config/local.example.toml>
+          and the doc comments on the `Settings` structs in
+          <https://github.com/mozilla-services/syncstorage-rs/blob/master/syncstorage-settings/src/lib.rs>
+          and
+          <https://github.com/mozilla-services/syncstorage-rs/blob/master/tokenserver-settings/src/lib.rs>
+          for available options.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.mysql = lib.mkIf cfg.database.createLocally {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = {
+          "${cfg.database.name}.*" = "all privileges";
+        };
+      }];
+    };
+
+    systemd.services.firefox-syncserver = {
+      wantedBy = [ "multi-user.target" ];
+      requires = lib.mkIf dbIsLocal [ "mysql.service" ];
+      after = lib.mkIf dbIsLocal [ "mysql.service" ];
+      restartTriggers = lib.optional cfg.singleNode.enable setupScript;
+      environment.RUST_LOG = cfg.logLevel;
+      serviceConfig = {
+        User = defaultUser;
+        Group = defaultUser;
+        ExecStart = "${cfg.package}/bin/syncserver --config ${configFile}";
+        EnvironmentFile = lib.mkIf (cfg.secrets != null) "${cfg.secrets}";
+
+        # hardening
+        RemoveIPC = true;
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        SystemCallArchitectures = "native";
+        # syncstorage-rs uses python-cffi internally, and python-cffi does not
+        # work with MemoryDenyWriteExecute=true
+        MemoryDenyWriteExecute = false;
+        RestrictNamespaces = true;
+        RestrictSUIDSGID = true;
+        ProtectHostname = true;
+        LockPersonality = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictRealtime = true;
+        ProtectSystem = "strict";
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectHome = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        SystemCallFilter = [ "@system-service" "~ @privileged @resources" ];
+        UMask = "0077";
+      };
+    };
+
+    systemd.services.firefox-syncserver-setup = lib.mkIf cfg.singleNode.enable {
+      wantedBy = [ "firefox-syncserver.service" ];
+      requires = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
+      after = [ "firefox-syncserver.service" ] ++ lib.optional dbIsLocal "mysql.service";
+      path = [ config.services.mysql.package ];
+      serviceConfig.ExecStart = [ "${setupScript}" ];
+    };
+
+    services.nginx.virtualHosts = lib.mkIf cfg.singleNode.enableNginx {
+      ${cfg.singleNode.hostname} = {
+        enableACME = cfg.singleNode.enableTLS;
+        forceSSL = cfg.singleNode.enableTLS;
+        locations."/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.settings.port}";
+          # We need to pass the Host header that matches the original Host header. Otherwise,
+          # Hawk authentication will fail (because it assumes that the client and server see
+          # the same value of the Host header).
+          recommendedProxySettings = true;
+        };
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ pennae ];
+    doc = ./firefox-syncserver.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/fireqos.nix b/nixpkgs/nixos/modules/services/networking/fireqos.nix
new file mode 100644
index 000000000000..b7f51a89c0e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/fireqos.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fireqos;
+  fireqosConfig = pkgs.writeText "fireqos.conf" "${cfg.config}";
+in {
+  options.services.fireqos = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled, FireQOS will be launched with the specified
+        configuration given in `config`.
+      '';
+    };
+
+    config = mkOption {
+      type = types.str;
+      default = "";
+      example = ''
+        interface wlp3s0 world-in input rate 10mbit ethernet
+          class web commit 50kbit
+            match tcp ports 80,443
+
+        interface wlp3s0 world-out input rate 10mbit ethernet
+          class web commit 50kbit
+            match tcp ports 80,443
+      '';
+      description = lib.mdDoc ''
+        The FireQOS configuration goes here.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.fireqos = {
+      description = "FireQOS";
+      after = [ "network.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${pkgs.firehol}/bin/fireqos start ${fireqosConfig}";
+        ExecStop = [
+          "${pkgs.firehol}/bin/fireqos stop"
+          "${pkgs.firehol}/bin/fireqos clear_all_qos"
+        ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firewall-iptables.nix b/nixpkgs/nixos/modules/services/networking/firewall-iptables.nix
new file mode 100644
index 000000000000..63e952194d67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/firewall-iptables.nix
@@ -0,0 +1,334 @@
+/* This module enables a simple firewall.
+
+   The firewall can be customised in arbitrary ways by setting
+   ‘networking.firewall.extraCommands’.  For modularity, the firewall
+   uses several chains:
+
+   - ‘nixos-fw’ is the main chain for input packet processing.
+
+   - ‘nixos-fw-accept’ is called for accepted packets.  If you want
+   additional logging, or want to reject certain packets anyway, you
+   can insert rules at the start of this chain.
+
+   - ‘nixos-fw-log-refuse’ and ‘nixos-fw-refuse’ are called for
+   refused packets.  (The former jumps to the latter after logging
+   the packet.)  If you want additional logging, or want to accept
+   certain packets anyway, you can insert rules at the start of
+   this chain.
+
+   - ‘nixos-fw-rpfilter’ is used as the main chain in the mangle table,
+   called from the built-in ‘PREROUTING’ chain.  If the kernel
+   supports it and `cfg.checkReversePath` is set this chain will
+   perform a reverse path filter test.
+
+   - ‘nixos-drop’ is used while reloading the firewall in order to drop
+   all traffic.  Since reloading isn't implemented in an atomic way
+   this'll prevent any traffic from leaking through while reloading
+   the firewall.  However, if the reloading fails, the ‘firewall-stop’
+   script will be called which in return will effectively disable the
+   complete firewall (in the default configuration).
+
+*/
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.firewall;
+
+  inherit (config.boot.kernelPackages) kernel;
+
+  kernelHasRPFilter = ((kernel.config.isEnabled or (x: false)) "IP_NF_MATCH_RPFILTER") || (kernel.features.netfilterRPFilter or false);
+
+  helpers = import ./helpers.nix { inherit config lib; };
+
+  writeShScript = name: text:
+    let
+      dir = pkgs.writeScriptBin name ''
+        #! ${pkgs.runtimeShell} -e
+        ${text}
+      '';
+    in
+    "${dir}/bin/${name}";
+
+  startScript = writeShScript "firewall-start" ''
+    ${helpers}
+
+    # Flush the old firewall rules.  !!! Ideally, updating the
+    # firewall would be atomic.  Apparently that's possible
+    # with iptables-restore.
+    ip46tables -D INPUT -j nixos-fw 2> /dev/null || true
+    for chain in nixos-fw nixos-fw-accept nixos-fw-log-refuse nixos-fw-refuse; do
+      ip46tables -F "$chain" 2> /dev/null || true
+      ip46tables -X "$chain" 2> /dev/null || true
+    done
+
+
+    # The "nixos-fw-accept" chain just accepts packets.
+    ip46tables -N nixos-fw-accept
+    ip46tables -A nixos-fw-accept -j ACCEPT
+
+
+    # The "nixos-fw-refuse" chain rejects or drops packets.
+    ip46tables -N nixos-fw-refuse
+
+    ${if cfg.rejectPackets then ''
+      # Send a reset for existing TCP connections that we've
+      # somehow forgotten about.  Send ICMP "port unreachable"
+      # for everything else.
+      ip46tables -A nixos-fw-refuse -p tcp ! --syn -j REJECT --reject-with tcp-reset
+      ip46tables -A nixos-fw-refuse -j REJECT
+    '' else ''
+      ip46tables -A nixos-fw-refuse -j DROP
+    ''}
+
+
+    # The "nixos-fw-log-refuse" chain performs logging, then
+    # jumps to the "nixos-fw-refuse" chain.
+    ip46tables -N nixos-fw-log-refuse
+
+    ${optionalString cfg.logRefusedConnections ''
+      ip46tables -A nixos-fw-log-refuse -p tcp --syn -j LOG --log-level info --log-prefix "refused connection: "
+    ''}
+    ${optionalString (cfg.logRefusedPackets && !cfg.logRefusedUnicastsOnly) ''
+      ip46tables -A nixos-fw-log-refuse -m pkttype --pkt-type broadcast \
+        -j LOG --log-level info --log-prefix "refused broadcast: "
+      ip46tables -A nixos-fw-log-refuse -m pkttype --pkt-type multicast \
+        -j LOG --log-level info --log-prefix "refused multicast: "
+    ''}
+    ip46tables -A nixos-fw-log-refuse -m pkttype ! --pkt-type unicast -j nixos-fw-refuse
+    ${optionalString cfg.logRefusedPackets ''
+      ip46tables -A nixos-fw-log-refuse \
+        -j LOG --log-level info --log-prefix "refused packet: "
+    ''}
+    ip46tables -A nixos-fw-log-refuse -j nixos-fw-refuse
+
+
+    # The "nixos-fw" chain does the actual work.
+    ip46tables -N nixos-fw
+
+    # Clean up rpfilter rules
+    ip46tables -t mangle -D PREROUTING -j nixos-fw-rpfilter 2> /dev/null || true
+    ip46tables -t mangle -F nixos-fw-rpfilter 2> /dev/null || true
+    ip46tables -t mangle -X nixos-fw-rpfilter 2> /dev/null || true
+
+    ${optionalString (kernelHasRPFilter && (cfg.checkReversePath != false)) ''
+      # Perform a reverse-path test to refuse spoofers
+      # For now, we just drop, as the mangle table doesn't have a log-refuse yet
+      ip46tables -t mangle -N nixos-fw-rpfilter 2> /dev/null || true
+      ip46tables -t mangle -A nixos-fw-rpfilter -m rpfilter --validmark ${optionalString (cfg.checkReversePath == "loose") "--loose"} -j RETURN
+
+      # Allows this host to act as a DHCP4 client without first having to use APIPA
+      iptables -t mangle -A nixos-fw-rpfilter -p udp --sport 67 --dport 68 -j RETURN
+
+      # Allows this host to act as a DHCPv4 server
+      iptables -t mangle -A nixos-fw-rpfilter -s 0.0.0.0 -d 255.255.255.255 -p udp --sport 68 --dport 67 -j RETURN
+
+      ${optionalString cfg.logReversePathDrops ''
+        ip46tables -t mangle -A nixos-fw-rpfilter -j LOG --log-level info --log-prefix "rpfilter drop: "
+      ''}
+      ip46tables -t mangle -A nixos-fw-rpfilter -j DROP
+
+      ip46tables -t mangle -A PREROUTING -j nixos-fw-rpfilter
+    ''}
+
+    # Accept all traffic on the trusted interfaces.
+    ${flip concatMapStrings cfg.trustedInterfaces (iface: ''
+      ip46tables -A nixos-fw -i ${iface} -j nixos-fw-accept
+    '')}
+
+    # Accept packets from established or related connections.
+    ip46tables -A nixos-fw -m conntrack --ctstate ESTABLISHED,RELATED -j nixos-fw-accept
+
+    # Accept connections to the allowed TCP ports.
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (port:
+        ''
+          ip46tables -A nixos-fw -p tcp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
+        ''
+      ) cfg.allowedTCPPorts
+    ) cfg.allInterfaces)}
+
+    # Accept connections to the allowed TCP port ranges.
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (rangeAttr:
+        let range = toString rangeAttr.from + ":" + toString rangeAttr.to; in
+        ''
+          ip46tables -A nixos-fw -p tcp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
+        ''
+      ) cfg.allowedTCPPortRanges
+    ) cfg.allInterfaces)}
+
+    # Accept packets on the allowed UDP ports.
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (port:
+        ''
+          ip46tables -A nixos-fw -p udp --dport ${toString port} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
+        ''
+      ) cfg.allowedUDPPorts
+    ) cfg.allInterfaces)}
+
+    # Accept packets on the allowed UDP port ranges.
+    ${concatStrings (mapAttrsToList (iface: cfg:
+      concatMapStrings (rangeAttr:
+        let range = toString rangeAttr.from + ":" + toString rangeAttr.to; in
+        ''
+          ip46tables -A nixos-fw -p udp --dport ${range} -j nixos-fw-accept ${optionalString (iface != "default") "-i ${iface}"}
+        ''
+      ) cfg.allowedUDPPortRanges
+    ) cfg.allInterfaces)}
+
+    # Optionally respond to ICMPv4 pings.
+    ${optionalString cfg.allowPing ''
+      iptables -w -A nixos-fw -p icmp --icmp-type echo-request ${optionalString (cfg.pingLimit != null)
+        "-m limit ${cfg.pingLimit} "
+      }-j nixos-fw-accept
+    ''}
+
+    ${optionalString config.networking.enableIPv6 ''
+      # Accept all ICMPv6 messages except redirects and node
+      # information queries (type 139).  See RFC 4890, section
+      # 4.4.
+      ip6tables -A nixos-fw -p icmpv6 --icmpv6-type redirect -j DROP
+      ip6tables -A nixos-fw -p icmpv6 --icmpv6-type 139 -j DROP
+      ip6tables -A nixos-fw -p icmpv6 -j nixos-fw-accept
+
+      # Allow this host to act as a DHCPv6 client
+      ip6tables -A nixos-fw -d fe80::/64 -p udp --dport 546 -j nixos-fw-accept
+    ''}
+
+    ${cfg.extraCommands}
+
+    # Reject/drop everything else.
+    ip46tables -A nixos-fw -j nixos-fw-log-refuse
+
+
+    # Enable the firewall.
+    ip46tables -A INPUT -j nixos-fw
+  '';
+
+  stopScript = writeShScript "firewall-stop" ''
+    ${helpers}
+
+    # Clean up in case reload fails
+    ip46tables -D INPUT -j nixos-drop 2>/dev/null || true
+
+    # Clean up after added ruleset
+    ip46tables -D INPUT -j nixos-fw 2>/dev/null || true
+
+    ${optionalString (kernelHasRPFilter && (cfg.checkReversePath != false)) ''
+      ip46tables -t mangle -D PREROUTING -j nixos-fw-rpfilter 2>/dev/null || true
+    ''}
+
+    ${cfg.extraStopCommands}
+  '';
+
+  reloadScript = writeShScript "firewall-reload" ''
+    ${helpers}
+
+    # Create a unique drop rule
+    ip46tables -D INPUT -j nixos-drop 2>/dev/null || true
+    ip46tables -F nixos-drop 2>/dev/null || true
+    ip46tables -X nixos-drop 2>/dev/null || true
+    ip46tables -N nixos-drop
+    ip46tables -A nixos-drop -j DROP
+
+    # Don't allow traffic to leak out until the script has completed
+    ip46tables -A INPUT -j nixos-drop
+
+    ${cfg.extraStopCommands}
+
+    if ${startScript}; then
+      ip46tables -D INPUT -j nixos-drop 2>/dev/null || true
+    else
+      echo "Failed to reload firewall... Stopping"
+      ${stopScript}
+      exit 1
+    fi
+  '';
+
+in
+
+{
+
+  options = {
+
+    networking.firewall = {
+      extraCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = "iptables -A INPUT -p icmp -j ACCEPT";
+        description = lib.mdDoc ''
+          Additional shell commands executed as part of the firewall
+          initialisation script.  These are executed just before the
+          final "reject" firewall rule is added, so they can be used
+          to allow packets that would otherwise be refused.
+
+          This option only works with the iptables based firewall.
+        '';
+      };
+
+      extraStopCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = "iptables -P INPUT ACCEPT";
+        description = lib.mdDoc ''
+          Additional shell commands executed as part of the firewall
+          shutdown script.  These are executed just after the removal
+          of the NixOS input rule, or if the service enters a failed
+          state.
+
+          This option only works with the iptables based firewall.
+        '';
+      };
+    };
+
+  };
+
+  # FIXME: Maybe if `enable' is false, the firewall should still be
+  # built but not started by default?
+  config = mkIf (cfg.enable && config.networking.nftables.enable == false) {
+
+    assertions = [
+      # This is approximately "checkReversePath -> kernelHasRPFilter",
+      # but the checkReversePath option can include non-boolean
+      # values.
+      {
+        assertion = cfg.checkReversePath == false || kernelHasRPFilter;
+        message = "This kernel does not support rpfilter";
+      }
+    ];
+
+    networking.firewall.checkReversePath = mkIf (!kernelHasRPFilter) (mkDefault false);
+
+    systemd.services.firewall = {
+      description = "Firewall";
+      wantedBy = [ "sysinit.target" ];
+      wants = [ "network-pre.target" ];
+      before = [ "network-pre.target" ];
+      after = [ "systemd-modules-load.service" ];
+
+      path = [ cfg.package ] ++ cfg.extraPackages;
+
+      # FIXME: this module may also try to load kernel modules, but
+      # containers don't have CAP_SYS_MODULE.  So the host system had
+      # better have all necessary modules already loaded.
+      unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+      unitConfig.DefaultDependencies = false;
+
+      reloadIfChanged = true;
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "@${startScript} firewall-start";
+        ExecReload = "@${reloadScript} firewall-reload";
+        ExecStop = "@${stopScript} firewall-stop";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix b/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix
new file mode 100644
index 000000000000..7c7136cc96f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/firewall-nftables.nix
@@ -0,0 +1,174 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.firewall;
+
+  ifaceSet = concatStringsSep ", " (
+    map (x: ''"${x}"'') cfg.trustedInterfaces
+  );
+
+  portsToNftSet = ports: portRanges: concatStringsSep ", " (
+    map (x: toString x) ports
+    ++ map (x: "${toString x.from}-${toString x.to}") portRanges
+  );
+
+in
+
+{
+
+  options = {
+
+    networking.firewall = {
+      extraInputRules = mkOption {
+        type = types.lines;
+        default = "";
+        example = "ip6 saddr { fc00::/7, fe80::/10 } tcp dport 24800 accept";
+        description = lib.mdDoc ''
+          Additional nftables rules to be appended to the input-allow
+          chain.
+
+          This option only works with the nftables based firewall.
+        '';
+      };
+
+      extraForwardRules = mkOption {
+        type = types.lines;
+        default = "";
+        example = "iifname wg0 accept";
+        description = lib.mdDoc ''
+          Additional nftables rules to be appended to the forward-allow
+          chain.
+
+          This option only works with the nftables based firewall.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf (cfg.enable && config.networking.nftables.enable) {
+
+    assertions = [
+      {
+        assertion = cfg.extraCommands == "";
+        message = "extraCommands is incompatible with the nftables based firewall: ${cfg.extraCommands}";
+      }
+      {
+        assertion = cfg.extraStopCommands == "";
+        message = "extraStopCommands is incompatible with the nftables based firewall: ${cfg.extraStopCommands}";
+      }
+      {
+        assertion = cfg.pingLimit == null || !(hasPrefix "--" cfg.pingLimit);
+        message = "nftables syntax like \"2/second\" should be used in networking.firewall.pingLimit";
+      }
+      {
+        assertion = config.networking.nftables.rulesetFile == null;
+        message = "networking.nftables.rulesetFile conflicts with the firewall";
+      }
+    ];
+
+    networking.nftables.tables."nixos-fw".family = "inet";
+    networking.nftables.tables."nixos-fw".content = ''
+        ${optionalString (cfg.checkReversePath != false) ''
+          chain rpfilter {
+            type filter hook prerouting priority mangle + 10; policy drop;
+
+            meta nfproto ipv4 udp sport . udp dport { 67 . 68, 68 . 67 } accept comment "DHCPv4 client/server"
+            fib saddr . mark ${optionalString (cfg.checkReversePath != "loose") ". iif"} oif exists accept
+
+            ${optionalString cfg.logReversePathDrops ''
+              log level info prefix "rpfilter drop: "
+            ''}
+
+          }
+        ''}
+
+        chain input {
+          type filter hook input priority filter; policy drop;
+
+          ${optionalString (ifaceSet != "") ''iifname { ${ifaceSet} } accept comment "trusted interfaces"''}
+
+          # Some ICMPv6 types like NDP is untracked
+          ct state vmap {
+            invalid : drop,
+            established : accept,
+            related : accept,
+            new : jump input-allow,
+            untracked: jump input-allow,
+          }
+
+          ${optionalString cfg.logRefusedConnections ''
+            tcp flags syn / fin,syn,rst,ack log level info prefix "refused connection: "
+          ''}
+          ${optionalString (cfg.logRefusedPackets && !cfg.logRefusedUnicastsOnly) ''
+            pkttype broadcast log level info prefix "refused broadcast: "
+            pkttype multicast log level info prefix "refused multicast: "
+          ''}
+          ${optionalString cfg.logRefusedPackets ''
+            pkttype host log level info prefix "refused packet: "
+          ''}
+
+          ${optionalString cfg.rejectPackets ''
+            meta l4proto tcp reject with tcp reset
+            reject
+          ''}
+
+        }
+
+        chain input-allow {
+
+          ${concatStrings (mapAttrsToList (iface: cfg:
+            let
+              ifaceExpr = optionalString (iface != "default") "iifname ${iface}";
+              tcpSet = portsToNftSet cfg.allowedTCPPorts cfg.allowedTCPPortRanges;
+              udpSet = portsToNftSet cfg.allowedUDPPorts cfg.allowedUDPPortRanges;
+            in
+            ''
+              ${optionalString (tcpSet != "") "${ifaceExpr} tcp dport { ${tcpSet} } accept"}
+              ${optionalString (udpSet != "") "${ifaceExpr} udp dport { ${udpSet} } accept"}
+            ''
+          ) cfg.allInterfaces)}
+
+          ${optionalString cfg.allowPing ''
+            icmp type echo-request ${optionalString (cfg.pingLimit != null) "limit rate ${cfg.pingLimit}"} accept comment "allow ping"
+          ''}
+
+          icmpv6 type != { nd-redirect, 139 } accept comment "Accept all ICMPv6 messages except redirects and node information queries (type 139).  See RFC 4890, section 4.4."
+          ip6 daddr fe80::/64 udp dport 546 accept comment "DHCPv6 client"
+
+          ${cfg.extraInputRules}
+
+        }
+
+        ${optionalString cfg.filterForward ''
+          chain forward {
+            type filter hook forward priority filter; policy drop;
+
+            ct state vmap {
+              invalid : drop,
+              established : accept,
+              related : accept,
+              new : jump forward-allow,
+              untracked : jump forward-allow,
+            }
+
+          }
+
+          chain forward-allow {
+
+            icmpv6 type != { router-renumbering, 139 } accept comment "Accept all ICMPv6 messages except renumbering and node information queries (type 139).  See RFC 4890, section 4.3."
+
+            ct status dnat accept comment "allow port forward"
+
+            ${cfg.extraForwardRules}
+
+          }
+        ''}
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/firewall.nix b/nixpkgs/nixos/modules/services/networking/firewall.nix
new file mode 100644
index 000000000000..ac02a93836b8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/firewall.nix
@@ -0,0 +1,290 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.firewall;
+
+  canonicalizePortList =
+    ports: lib.unique (builtins.sort builtins.lessThan ports);
+
+  commonOptions = {
+    allowedTCPPorts = mkOption {
+      type = types.listOf types.port;
+      default = [ ];
+      apply = canonicalizePortList;
+      example = [ 22 80 ];
+      description = lib.mdDoc ''
+        List of TCP ports on which incoming connections are
+        accepted.
+      '';
+    };
+
+    allowedTCPPortRanges = mkOption {
+      type = types.listOf (types.attrsOf types.port);
+      default = [ ];
+      example = [{ from = 8999; to = 9003; }];
+      description = lib.mdDoc ''
+        A range of TCP ports on which incoming connections are
+        accepted.
+      '';
+    };
+
+    allowedUDPPorts = mkOption {
+      type = types.listOf types.port;
+      default = [ ];
+      apply = canonicalizePortList;
+      example = [ 53 ];
+      description = lib.mdDoc ''
+        List of open UDP ports.
+      '';
+    };
+
+    allowedUDPPortRanges = mkOption {
+      type = types.listOf (types.attrsOf types.port);
+      default = [ ];
+      example = [{ from = 60000; to = 61000; }];
+      description = lib.mdDoc ''
+        Range of open UDP ports.
+      '';
+    };
+  };
+
+in
+
+{
+
+  options = {
+
+    networking.firewall = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the firewall.  This is a simple stateful
+          firewall that blocks connection attempts to unauthorised TCP
+          or UDP ports on this machine.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = if config.networking.nftables.enable then pkgs.nftables else pkgs.iptables;
+        defaultText = literalExpression ''if config.networking.nftables.enable then "pkgs.nftables" else "pkgs.iptables"'';
+        example = literalExpression "pkgs.iptables-legacy";
+        description = lib.mdDoc ''
+          The package to use for running the firewall service.
+        '';
+      };
+
+      logRefusedConnections = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to log rejected or dropped incoming connections.
+          Note: The logs are found in the kernel logs, i.e. dmesg
+          or journalctl -k.
+        '';
+      };
+
+      logRefusedPackets = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to log all rejected or dropped incoming packets.
+          This tends to give a lot of log messages, so it's mostly
+          useful for debugging.
+          Note: The logs are found in the kernel logs, i.e. dmesg
+          or journalctl -k.
+        '';
+      };
+
+      logRefusedUnicastsOnly = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If {option}`networking.firewall.logRefusedPackets`
+          and this option are enabled, then only log packets
+          specifically directed at this machine, i.e., not broadcasts
+          or multicasts.
+        '';
+      };
+
+      rejectPackets = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, refused packets are rejected rather than dropped
+          (ignored).  This means that an ICMP "port unreachable" error
+          message is sent back to the client (or a TCP RST packet in
+          case of an existing connection).  Rejecting packets makes
+          port scanning somewhat easier.
+        '';
+      };
+
+      trustedInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "enp0s2" ];
+        description = lib.mdDoc ''
+          Traffic coming in from these interfaces will be accepted
+          unconditionally.  Traffic from the loopback (lo) interface
+          will always be accepted.
+        '';
+      };
+
+      allowPing = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to respond to incoming ICMPv4 echo requests
+          ("pings").  ICMPv6 pings are always allowed because the
+          larger address space of IPv6 makes network scanning much
+          less effective.
+        '';
+      };
+
+      pingLimit = mkOption {
+        type = types.nullOr (types.separatedString " ");
+        default = null;
+        example = "--limit 1/minute --limit-burst 5";
+        description = lib.mdDoc ''
+          If pings are allowed, this allows setting rate limits on them.
+
+          For the iptables based firewall, it should be set like
+          "--limit 1/minute --limit-burst 5".
+
+          For the nftables based firewall, it should be set like
+          "2/second" or "1/minute burst 5 packets".
+        '';
+      };
+
+      checkReversePath = mkOption {
+        type = types.either types.bool (types.enum [ "strict" "loose" ]);
+        default = true;
+        defaultText = literalMD "`true` except if the iptables based firewall is in use and the kernel lacks rpfilter support";
+        example = "loose";
+        description = lib.mdDoc ''
+          Performs a reverse path filter test on a packet.  If a reply
+          to the packet would not be sent via the same interface that
+          the packet arrived on, it is refused.
+
+          If using asymmetric routing or other complicated routing, set
+          this option to loose mode or disable it and setup your own
+          counter-measures.
+
+          This option can be either true (or "strict"), "loose" (only
+          drop the packet if the source address is not reachable via any
+          interface) or false.
+        '';
+      };
+
+      logReversePathDrops = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Logs dropped packets failing the reverse path filter test if
+          the option networking.firewall.checkReversePath is enabled.
+        '';
+      };
+
+      filterForward = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable filtering in IP forwarding.
+
+          This option only works with the nftables based firewall.
+        '';
+      };
+
+      connectionTrackingModules = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "ftp" "irc" "sane" "sip" "tftp" "amanda" "h323" "netbios_sn" "pptp" "snmp" ];
+        description = lib.mdDoc ''
+          List of connection-tracking helpers that are auto-loaded.
+          The complete list of possible values is given in the example.
+
+          As helpers can pose as a security risk, it is advised to
+          set this to an empty list and disable the setting
+          networking.firewall.autoLoadConntrackHelpers unless you
+          know what you are doing. Connection tracking is disabled
+          by default.
+
+          Loading of helpers is recommended to be done through the
+          CT target.  More info:
+          https://home.regit.org/netfilter-en/secure-use-of-helpers/
+        '';
+      };
+
+      autoLoadConntrackHelpers = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to auto-load connection-tracking helpers.
+          See the description at networking.firewall.connectionTrackingModules
+
+          (needs kernel 3.5+)
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExpression "[ pkgs.ipset ]";
+        description = lib.mdDoc ''
+          Additional packages to be included in the environment of the system
+          as well as the path of networking.firewall.extraCommands.
+        '';
+      };
+
+      interfaces = mkOption {
+        default = { };
+        type = with types; attrsOf (submodule [{ options = commonOptions; }]);
+        description = lib.mdDoc ''
+          Interface-specific open ports.
+        '';
+      };
+
+      allInterfaces = mkOption {
+        internal = true;
+        visible = false;
+        default = { default = mapAttrs (name: value: cfg.${name}) commonOptions; } // cfg.interfaces;
+        type = with types; attrsOf (submodule [{ options = commonOptions; }]);
+        description = lib.mdDoc ''
+          All open ports.
+        '';
+      };
+    } // commonOptions;
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.filterForward -> config.networking.nftables.enable;
+        message = "filterForward only works with the nftables based firewall";
+      }
+      {
+        assertion = cfg.autoLoadConntrackHelpers -> lib.versionOlder config.boot.kernelPackages.kernel.version "6";
+        message = "conntrack helper autoloading has been removed from kernel 6.0 and newer";
+      }
+    ];
+
+    networking.firewall.trustedInterfaces = [ "lo" ];
+
+    environment.systemPackages = [ cfg.package ] ++ cfg.extraPackages;
+
+    boot.kernelModules = (optional cfg.autoLoadConntrackHelpers "nf_conntrack")
+      ++ map (x: "nf_conntrack_${x}") cfg.connectionTrackingModules;
+    boot.extraModprobeConfig = optionalString cfg.autoLoadConntrackHelpers ''
+      options nf_conntrack nf_conntrack_helper=1
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/flannel.nix b/nixpkgs/nixos/modules/services/networking/flannel.nix
new file mode 100644
index 000000000000..6ed4f78ddc92
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/flannel.nix
@@ -0,0 +1,191 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.flannel;
+
+  networkConfig = filterAttrs (n: v: v != null) {
+    Network = cfg.network;
+    SubnetLen = cfg.subnetLen;
+    SubnetMin = cfg.subnetMin;
+    SubnetMax = cfg.subnetMax;
+    Backend = cfg.backend;
+  };
+in {
+  options.services.flannel = {
+    enable = mkEnableOption (lib.mdDoc "flannel");
+
+    package = mkOption {
+      description = lib.mdDoc "Package to use for flannel";
+      type = types.package;
+      default = pkgs.flannel;
+      defaultText = literalExpression "pkgs.flannel";
+    };
+
+    publicIp = mkOption {
+      description = lib.mdDoc ''
+        IP accessible by other nodes for inter-host communication.
+        Defaults to the IP of the interface being used for communication.
+      '';
+      type = types.nullOr types.str;
+      default = null;
+    };
+
+    iface = mkOption {
+      description = lib.mdDoc ''
+        Interface to use (IP or name) for inter-host communication.
+        Defaults to the interface for the default route on the machine.
+      '';
+      type = types.nullOr types.str;
+      default = null;
+    };
+
+    etcd = {
+      endpoints = mkOption {
+        description = lib.mdDoc "Etcd endpoints";
+        type = types.listOf types.str;
+        default = ["http://127.0.0.1:2379"];
+      };
+
+      prefix = mkOption {
+        description = lib.mdDoc "Etcd key prefix";
+        type = types.str;
+        default = "/coreos.com/network";
+      };
+
+      caFile = mkOption {
+        description = lib.mdDoc "Etcd certificate authority file";
+        type = types.nullOr types.path;
+        default = null;
+      };
+
+      certFile = mkOption {
+        description = lib.mdDoc "Etcd cert file";
+        type = types.nullOr types.path;
+        default = null;
+      };
+
+      keyFile = mkOption {
+        description = lib.mdDoc "Etcd key file";
+        type = types.nullOr types.path;
+        default = null;
+      };
+    };
+
+    kubeconfig = mkOption {
+      description = lib.mdDoc ''
+        Path to kubeconfig to use for storing flannel config using the
+        Kubernetes API
+      '';
+      type = types.nullOr types.path;
+      default = null;
+    };
+
+    network = mkOption {
+      description = lib.mdDoc " IPv4 network in CIDR format to use for the entire flannel network.";
+      type = types.str;
+    };
+
+    nodeName = mkOption {
+      description = lib.mdDoc ''
+        Needed when running with Kubernetes as backend as this cannot be auto-detected";
+      '';
+      type = types.nullOr types.str;
+      default = config.networking.fqdnOrHostName;
+      defaultText = literalExpression "config.networking.fqdnOrHostName";
+      example = "node1.example.com";
+    };
+
+    storageBackend = mkOption {
+      description = lib.mdDoc "Determines where flannel stores its configuration at runtime";
+      type = types.enum ["etcd" "kubernetes"];
+      default = "etcd";
+    };
+
+    subnetLen = mkOption {
+      description = lib.mdDoc ''
+        The size of the subnet allocated to each host. Defaults to 24 (i.e. /24)
+        unless the Network was configured to be smaller than a /24 in which case
+        it is one less than the network.
+      '';
+      type = types.int;
+      default = 24;
+    };
+
+    subnetMin = mkOption {
+      description = lib.mdDoc ''
+        The beginning of IP range which the subnet allocation should start with.
+        Defaults to the first subnet of Network.
+      '';
+      type = types.nullOr types.str;
+      default = null;
+    };
+
+    subnetMax = mkOption {
+      description = lib.mdDoc ''
+        The end of IP range which the subnet allocation should start with.
+        Defaults to the last subnet of Network.
+      '';
+      type = types.nullOr types.str;
+      default = null;
+    };
+
+    backend = mkOption {
+      description = lib.mdDoc "Type of backend to use and specific configurations for that backend.";
+      type = types.attrs;
+      default = {
+        Type = "vxlan";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.flannel = {
+      description = "Flannel Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        FLANNELD_PUBLIC_IP = cfg.publicIp;
+        FLANNELD_IFACE = cfg.iface;
+      } // optionalAttrs (cfg.storageBackend == "etcd") {
+        FLANNELD_ETCD_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
+        FLANNELD_ETCD_KEYFILE = cfg.etcd.keyFile;
+        FLANNELD_ETCD_CERTFILE = cfg.etcd.certFile;
+        FLANNELD_ETCD_CAFILE = cfg.etcd.caFile;
+        ETCDCTL_CERT = cfg.etcd.certFile;
+        ETCDCTL_KEY = cfg.etcd.keyFile;
+        ETCDCTL_CACERT = cfg.etcd.caFile;
+        ETCDCTL_ENDPOINTS = concatStringsSep "," cfg.etcd.endpoints;
+        ETCDCTL_API = "3";
+      } // optionalAttrs (cfg.storageBackend == "kubernetes") {
+        FLANNELD_KUBE_SUBNET_MGR = "true";
+        FLANNELD_KUBECONFIG_FILE = cfg.kubeconfig;
+        NODE_NAME = cfg.nodeName;
+      };
+      path = [ pkgs.iptables ];
+      preStart = optionalString (cfg.storageBackend == "etcd") ''
+        echo "setting network configuration"
+        until ${pkgs.etcd}/bin/etcdctl put /coreos.com/network/config '${builtins.toJSON networkConfig}'
+        do
+          echo "setting network configuration, retry"
+          sleep 1
+        done
+      '';
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/flannel";
+        Restart = "always";
+        RestartSec = "10s";
+        RuntimeDirectory = "flannel";
+      };
+    };
+
+    services.etcd.enable = mkDefault (cfg.storageBackend == "etcd" && cfg.etcd.endpoints == ["http://127.0.0.1:2379"]);
+
+    # for some reason, flannel doesn't let you configure this path
+    # see: https://github.com/coreos/flannel/blob/master/Documentation/configuration.md#configuration
+    environment.etc."kube-flannel/net-conf.json" = mkIf (cfg.storageBackend == "kubernetes") {
+      source = pkgs.writeText "net-conf.json" (builtins.toJSON networkConfig);
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/freenet.nix b/nixpkgs/nixos/modules/services/networking/freenet.nix
new file mode 100644
index 000000000000..e1737e820a51
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/freenet.nix
@@ -0,0 +1,64 @@
+# NixOS module for Freenet daemon
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.freenet;
+  varDir = "/var/lib/freenet";
+
+in
+
+{
+
+  ### configuration
+
+  options = {
+
+    services.freenet = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the Freenet daemon";
+      };
+
+      nice = mkOption {
+        type = types.int;
+        default = 10;
+        description = lib.mdDoc "Set the nice level for the Freenet daemon";
+      };
+
+    };
+
+  };
+
+  ### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.freenet = {
+      description = "Freenet daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${pkgs.freenet}/bin/freenet";
+      serviceConfig.User = "freenet";
+      serviceConfig.UMask = "0007";
+      serviceConfig.WorkingDirectory = varDir;
+      serviceConfig.Nice = cfg.nice;
+    };
+
+    users.users.freenet = {
+      group = "freenet";
+      description = "Freenet daemon user";
+      home = varDir;
+      createHome = true;
+      uid = config.ids.uids.freenet;
+    };
+
+    users.groups.freenet.gid = config.ids.gids.freenet;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/freeradius.nix b/nixpkgs/nixos/modules/services/networking/freeradius.nix
new file mode 100644
index 000000000000..419a683cb774
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/freeradius.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.freeradius;
+
+  freeradiusService = cfg:
+  {
+    description = "FreeRadius server";
+    wantedBy = ["multi-user.target"];
+    after = ["network.target"];
+    wants = ["network.target"];
+    preStart = ''
+      ${pkgs.freeradius}/bin/radiusd -C -d ${cfg.configDir} -l stdout
+    '';
+
+    serviceConfig = {
+        ExecStart = "${pkgs.freeradius}/bin/radiusd -f -d ${cfg.configDir} -l stdout" +
+                    optionalString cfg.debug " -xx";
+        ExecReload = [
+          "${pkgs.freeradius}/bin/radiusd -C -d ${cfg.configDir} -l stdout"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
+        User = "radius";
+        ProtectSystem = "full";
+        ProtectHome = "on";
+        Restart = "on-failure";
+        RestartSec = 2;
+        LogsDirectory = "radius";
+    };
+  };
+
+  freeradiusConfig = {
+    enable = mkEnableOption (lib.mdDoc "the freeradius server");
+
+    configDir = mkOption {
+      type = types.path;
+      default = "/etc/raddb";
+      description = lib.mdDoc ''
+        The path of the freeradius server configuration directory.
+      '';
+    };
+
+    debug = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable debug logging for freeradius (-xx
+        option). This should not be left on, since it includes
+        sensitive data such as passwords in the logs.
+      '';
+    };
+
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.freeradius = freeradiusConfig;
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.enable) {
+
+    users = {
+      users.radius = {
+        /*uid = config.ids.uids.radius;*/
+        description = "Radius daemon user";
+        isSystemUser = true;
+      };
+    };
+
+    systemd.services.freeradius = freeradiusService cfg;
+    warnings = optional cfg.debug "Freeradius debug logging is enabled. This will log passwords in plaintext to the journal!";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/frp.nix b/nixpkgs/nixos/modules/services/networking/frp.nix
new file mode 100644
index 000000000000..e4f9a220b5e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/frp.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.frp;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "frp.ini" cfg.settings;
+  isClient = (cfg.role == "client");
+  isServer = (cfg.role == "server");
+in
+{
+  options = {
+    services.frp = {
+      enable = mkEnableOption (mdDoc "frp");
+
+      package = mkPackageOptionMD pkgs "frp" { };
+
+      role = mkOption {
+        type = types.enum [ "server" "client" ];
+        description = mdDoc ''
+          The frp consists of `client` and `server`. The server is usually
+          deployed on the machine with a public IP address, and
+          the client is usually deployed on the machine
+          where the Intranet service to be penetrated resides.
+        '';
+      };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = { };
+        description = mdDoc ''
+          Frp configuration, for configuration options
+          see the example of [client](https://github.com/fatedier/frp/blob/dev/conf/frpc_legacy_full.ini)
+          or [server](https://github.com/fatedier/frp/blob/dev/conf/frps_legacy_full.ini) on github.
+        '';
+        example = literalExpression ''
+          {
+            common = {
+              server_addr = "x.x.x.x";
+              server_port = 7000;
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config =
+    let
+      serviceCapability = optionals isServer [ "CAP_NET_BIND_SERVICE" ];
+      executableFile = if isClient then "frpc" else "frps";
+    in
+    mkIf cfg.enable {
+      systemd.services = {
+        frp = {
+          wants = optionals isClient [ "network-online.target" ];
+          after = if isClient then [ "network-online.target" ] else [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          description = "A fast reverse proxy frp ${cfg.role}";
+          serviceConfig = {
+            Type = "simple";
+            Restart = "on-failure";
+            RestartSec = 15;
+            ExecStart = "${cfg.package}/bin/${executableFile} -c ${configFile}";
+            StateDirectoryMode = optionalString isServer "0700";
+            DynamicUser = true;
+            # Hardening
+            UMask = optionalString isServer "0007";
+            CapabilityBoundingSet = serviceCapability;
+            AmbientCapabilities = serviceCapability;
+            PrivateDevices = true;
+            ProtectHostname = true;
+            ProtectClock = true;
+            ProtectKernelTunables = true;
+            ProtectKernelModules = true;
+            ProtectKernelLogs = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ] ++ optionals isClient [ "AF_UNIX" ];
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            PrivateMounts = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = [ "@system-service" ];
+          };
+        };
+      };
+    };
+
+  meta.maintainers = with maintainers; [ zaldnoay ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/frr.nix b/nixpkgs/nixos/modules/services/networking/frr.nix
new file mode 100644
index 000000000000..8488a4e4ef48
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/frr.nix
@@ -0,0 +1,221 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.frr;
+
+  services = [
+    "static"
+    "bgp"
+    "ospf"
+    "ospf6"
+    "rip"
+    "ripng"
+    "isis"
+    "pim"
+    "ldp"
+    "nhrp"
+    "eigrp"
+    "babel"
+    "sharp"
+    "pbr"
+    "bfd"
+    "fabric"
+    "mgmt"
+  ];
+
+  allServices = services ++ [ "zebra" ];
+
+  isEnabled = service: cfg.${service}.enable;
+
+  daemonName = service: if service == "zebra" then service else "${service}d";
+
+  configFile = service:
+    let
+      scfg = cfg.${service};
+    in
+      if scfg.configFile != null then scfg.configFile
+      else pkgs.writeText "${daemonName service}.conf"
+        ''
+          ! FRR ${daemonName service} configuration
+          !
+          hostname ${config.networking.hostName}
+          log syslog
+          service password-encryption
+          !
+          ${scfg.config}
+          !
+          end
+        '';
+
+  serviceOptions = service:
+    {
+      enable = mkEnableOption (lib.mdDoc "the FRR ${toUpper service} routing protocol");
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/etc/frr/${daemonName service}.conf";
+        description = lib.mdDoc ''
+          Configuration file to use for FRR ${daemonName service}.
+          By default the NixOS generated files are used.
+        '';
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          let
+            examples = {
+              rip = ''
+                router rip
+                  network 10.0.0.0/8
+              '';
+
+              ospf = ''
+                router ospf
+                  network 10.0.0.0/8 area 0
+              '';
+
+              bgp = ''
+                router bgp 65001
+                  neighbor 10.0.0.1 remote-as 65001
+              '';
+            };
+          in
+            examples.${service} or "";
+        description = lib.mdDoc ''
+          ${daemonName service} configuration statements.
+        '';
+      };
+
+      vtyListenAddress = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          Address to bind to for the VTY interface.
+        '';
+      };
+
+      vtyListenPort = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          TCP Port to bind to for the VTY interface.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Extra options for the daemon.
+        '';
+      };
+    };
+
+in
+
+{
+
+  ###### interface
+  imports = [
+    {
+      options.services.frr = {
+        zebra = (serviceOptions "zebra") // {
+          enable = mkOption {
+            type = types.bool;
+            default = any isEnabled services;
+            description = lib.mdDoc ''
+              Whether to enable the Zebra routing manager.
+
+              The Zebra routing manager is automatically enabled
+              if any routing protocols are configured.
+            '';
+          };
+        };
+      };
+    }
+    { options.services.frr = (genAttrs services serviceOptions); }
+  ];
+
+  ###### implementation
+
+  config = mkIf (any isEnabled allServices) {
+
+    environment.systemPackages = [
+      pkgs.frr # for the vtysh tool
+    ];
+
+    users.users.frr = {
+      description = "FRR daemon user";
+      isSystemUser = true;
+      group = "frr";
+    };
+
+    users.groups = {
+      frr = {};
+      # Members of the frrvty group can use vtysh to inspect the FRR daemons
+      frrvty = { members = [ "frr" ]; };
+    };
+
+    environment.etc = let
+      mkEtcLink = service: {
+        name = "frr/${service}.conf";
+        value.source = configFile service;
+      };
+    in
+      (builtins.listToAttrs
+      (map mkEtcLink (filter isEnabled allServices))) // {
+        "frr/vtysh.conf".text = "";
+      };
+
+    systemd.tmpfiles.rules = [
+      "d /run/frr 0750 frr frr -"
+    ];
+
+    systemd.services =
+      let
+        frrService = service:
+          let
+            scfg = cfg.${service};
+            daemon = daemonName service;
+          in
+            nameValuePair daemon ({
+              wantedBy = [ "multi-user.target" ];
+              after = [ "network-pre.target" "systemd-sysctl.service" ] ++ lib.optionals (service != "zebra") [ "zebra.service" ];
+              bindsTo = lib.optionals (service != "zebra") [ "zebra.service" ];
+              wants = [ "network.target" ];
+
+              description = if service == "zebra" then "FRR Zebra routing manager"
+                else "FRR ${toUpper service} routing daemon";
+
+              unitConfig.Documentation = if service == "zebra" then "man:zebra(8)"
+                else "man:${daemon}(8) man:zebra(8)";
+
+              restartTriggers = [
+                (configFile service)
+              ];
+              reloadIfChanged = true;
+
+              serviceConfig = {
+                PIDFile = "frr/${daemon}.pid";
+                ExecStart = "${pkgs.frr}/libexec/frr/${daemon} -f /etc/frr/${service}.conf"
+                  + optionalString (scfg.vtyListenAddress != "") " -A ${scfg.vtyListenAddress}"
+                  + optionalString (scfg.vtyListenPort != null) " -P ${toString scfg.vtyListenPort}"
+                  + " " + (concatStringsSep " " scfg.extraOptions);
+                ExecReload = "${pkgs.python3.interpreter} ${pkgs.frr}/libexec/frr/frr-reload.py --reload --daemon ${daemonName service} --bindir ${pkgs.frr}/bin --rundir /run/frr /etc/frr/${service}.conf";
+                Restart = "on-abnormal";
+              };
+            });
+       in
+         listToAttrs (map frrService (filter isEnabled allServices));
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ woffs ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/gateone.nix b/nixpkgs/nixos/modules/services/networking/gateone.nix
new file mode 100644
index 000000000000..ac3f3c9bbf2c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gateone.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ...}:
+with lib;
+let
+  cfg = config.services.gateone;
+in
+{
+options = {
+    services.gateone = {
+      enable = mkEnableOption (lib.mdDoc "GateOne server");
+      pidDir = mkOption {
+        default = "/run/gateone";
+        type = types.path;
+        description = lib.mdDoc "Path of pid files for GateOne.";
+      };
+      settingsDir = mkOption {
+        default = "/var/lib/gateone";
+        type = types.path;
+        description = lib.mdDoc "Path of configuration files for GateOne.";
+      };
+    };
+};
+config = mkIf cfg.enable {
+  environment.systemPackages = with pkgs.pythonPackages; [
+    gateone pkgs.openssh pkgs.procps pkgs.coreutils pkgs.cacert];
+
+  users.users.gateone = {
+    description = "GateOne privilege separation user";
+    uid = config.ids.uids.gateone;
+    home = cfg.settingsDir;
+  };
+  users.groups.gateone.gid = config.ids.gids.gateone;
+
+  systemd.services.gateone = with pkgs; {
+    description = "GateOne web-based terminal";
+    path = [ pythonPackages.gateone nix openssh procps coreutils ];
+    preStart = ''
+      if [ ! -d ${cfg.settingsDir} ] ; then
+        mkdir -m 0750 -p ${cfg.settingsDir}
+        chown -R gateone:gateone ${cfg.settingsDir}
+      fi
+      if [ ! -d ${cfg.pidDir} ] ; then
+        mkdir -m 0750 -p ${cfg.pidDir}
+        chown -R gateone:gateone ${cfg.pidDir}
+      fi
+      '';
+    #unitConfig.RequiresMountsFor = "${cfg.settingsDir}";
+    serviceConfig = {
+      ExecStart = ''${pythonPackages.gateone}/bin/gateone --settings_dir=${cfg.settingsDir} --pid_file=${cfg.pidDir}/gateone.pid --gid=${toString config.ids.gids.gateone} --uid=${toString config.ids.uids.gateone}'';
+      User = "gateone";
+      Group = "gateone";
+      WorkingDirectory = cfg.settingsDir;
+    };
+
+    wantedBy = [ "multi-user.target" ];
+    requires = [ "network.target" ];
+  };
+};
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/gdomap.nix b/nixpkgs/nixos/modules/services/networking/gdomap.nix
new file mode 100644
index 000000000000..53ea8b6875d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gdomap.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  #
+  # interface
+  #
+  options = {
+    services.gdomap = {
+      enable = mkEnableOption (lib.mdDoc "GNUstep Distributed Objects name server");
+   };
+  };
+
+  #
+  # implementation
+  #
+  config = mkIf config.services.gdomap.enable {
+    # NOTE: gdomap runs as root
+    # TODO: extra user for gdomap?
+    systemd.services.gdomap = {
+      description = "gdomap server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path  = [ pkgs.gnustep.base ];
+      serviceConfig.ExecStart = "${pkgs.gnustep.base}/bin/gdomap -f";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ghostunnel.nix b/nixpkgs/nixos/modules/services/networking/ghostunnel.nix
new file mode 100644
index 000000000000..4902367e2a6a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ghostunnel.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib)
+    attrValues
+    concatMap
+    concatStringsSep
+    escapeShellArg
+    literalExpression
+    mapAttrs'
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkOption
+    nameValuePair
+    optional
+    types
+    ;
+
+  mainCfg = config.services.ghostunnel;
+
+  module = { config, name, ... }:
+    {
+      options = {
+
+        listen = mkOption {
+          description = lib.mdDoc ''
+            Address and port to listen on (can be HOST:PORT, unix:PATH).
+          '';
+          type = types.str;
+        };
+
+        target = mkOption {
+          description = lib.mdDoc ''
+            Address to forward connections to (can be HOST:PORT or unix:PATH).
+          '';
+          type = types.str;
+        };
+
+        keystore = mkOption {
+          description = lib.mdDoc ''
+            Path to keystore (combined PEM with cert/key, or PKCS12 keystore).
+
+            NB: storepass is not supported because it would expose credentials via `/proc/*/cmdline`.
+
+            Specify this or `cert` and `key`.
+          '';
+          type = types.nullOr types.str;
+          default = null;
+        };
+
+        cert = mkOption {
+          description = lib.mdDoc ''
+            Path to certificate (PEM with certificate chain).
+
+            Not required if `keystore` is set.
+          '';
+          type = types.nullOr types.str;
+          default = null;
+        };
+
+        key = mkOption {
+          description = lib.mdDoc ''
+            Path to certificate private key (PEM with private key).
+
+            Not required if `keystore` is set.
+          '';
+          type = types.nullOr types.str;
+          default = null;
+        };
+
+        cacert = mkOption {
+          description = lib.mdDoc ''
+            Path to CA bundle file (PEM/X509). Uses system trust store if `null`.
+          '';
+          type = types.nullOr types.str;
+        };
+
+        disableAuthentication = mkOption {
+          description = lib.mdDoc ''
+            Disable client authentication, no client certificate will be required.
+          '';
+          type = types.bool;
+          default = false;
+        };
+
+        allowAll = mkOption {
+          description = lib.mdDoc ''
+            If true, allow all clients, do not check client cert subject.
+          '';
+          type = types.bool;
+          default = false;
+        };
+
+        allowCN = mkOption {
+          description = lib.mdDoc ''
+            Allow client if common name appears in the list.
+          '';
+          type = types.listOf types.str;
+          default = [];
+        };
+
+        allowOU = mkOption {
+          description = lib.mdDoc ''
+            Allow client if organizational unit name appears in the list.
+          '';
+          type = types.listOf types.str;
+          default = [];
+        };
+
+        allowDNS = mkOption {
+          description = lib.mdDoc ''
+            Allow client if DNS subject alternative name appears in the list.
+          '';
+          type = types.listOf types.str;
+          default = [];
+        };
+
+        allowURI = mkOption {
+          description = lib.mdDoc ''
+            Allow client if URI subject alternative name appears in the list.
+          '';
+          type = types.listOf types.str;
+          default = [];
+        };
+
+        extraArguments = mkOption {
+          description = lib.mdDoc "Extra arguments to pass to `ghostunnel server`";
+          type = types.separatedString " ";
+          default = "";
+        };
+
+        unsafeTarget = mkOption {
+          description = lib.mdDoc ''
+            If set, does not limit target to localhost, 127.0.0.1, [::1], or UNIX sockets.
+
+            This is meant to protect against accidental unencrypted traffic on
+            untrusted networks.
+          '';
+          type = types.bool;
+          default = false;
+        };
+
+        # Definitions to apply at the root of the NixOS configuration.
+        atRoot = mkOption {
+          internal = true;
+        };
+      };
+
+      # Clients should not be authenticated with the public root certificates
+      # (afaict, it doesn't make sense), so we only provide that default when
+      # client cert auth is disabled.
+      config.cacert = mkIf config.disableAuthentication (mkDefault null);
+
+      config.atRoot = {
+        assertions = [
+          { message = ''
+              services.ghostunnel.servers.${name}: At least one access control flag is required.
+              Set at least one of:
+                - services.ghostunnel.servers.${name}.disableAuthentication
+                - services.ghostunnel.servers.${name}.allowAll
+                - services.ghostunnel.servers.${name}.allowCN
+                - services.ghostunnel.servers.${name}.allowOU
+                - services.ghostunnel.servers.${name}.allowDNS
+                - services.ghostunnel.servers.${name}.allowURI
+            '';
+            assertion = config.disableAuthentication
+              || config.allowAll
+              || config.allowCN != []
+              || config.allowOU != []
+              || config.allowDNS != []
+              || config.allowURI != []
+              ;
+          }
+        ];
+
+        systemd.services."ghostunnel-server-${name}" = {
+          after = [ "network.target" ];
+          wants = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            Restart = "always";
+            AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
+            DynamicUser = true;
+            LoadCredential = optional (config.keystore != null) "keystore:${config.keystore}"
+              ++ optional (config.cert != null) "cert:${config.cert}"
+              ++ optional (config.key != null) "key:${config.key}"
+              ++ optional (config.cacert != null) "cacert:${config.cacert}";
+           };
+          script = concatStringsSep " " (
+            [ "${mainCfg.package}/bin/ghostunnel" ]
+            ++ optional (config.keystore != null) "--keystore=$CREDENTIALS_DIRECTORY/keystore"
+            ++ optional (config.cert != null) "--cert=$CREDENTIALS_DIRECTORY/cert"
+            ++ optional (config.key != null) "--key=$CREDENTIALS_DIRECTORY/key"
+            ++ optional (config.cacert != null) "--cacert=$CREDENTIALS_DIRECTORY/cacert"
+            ++ [
+              "server"
+              "--listen ${config.listen}"
+              "--target ${config.target}"
+            ] ++ optional config.allowAll "--allow-all"
+              ++ map (v: "--allow-cn=${escapeShellArg v}") config.allowCN
+              ++ map (v: "--allow-ou=${escapeShellArg v}") config.allowOU
+              ++ map (v: "--allow-dns=${escapeShellArg v}") config.allowDNS
+              ++ map (v: "--allow-uri=${escapeShellArg v}") config.allowURI
+              ++ optional config.disableAuthentication "--disable-authentication"
+              ++ optional config.unsafeTarget "--unsafe-target"
+              ++ [ config.extraArguments ]
+          );
+        };
+      };
+    };
+
+in
+{
+
+  options = {
+    services.ghostunnel.enable = mkEnableOption (lib.mdDoc "ghostunnel");
+
+    services.ghostunnel.package = mkOption {
+      description = lib.mdDoc "The ghostunnel package to use.";
+      type = types.package;
+      default = pkgs.ghostunnel;
+      defaultText = literalExpression "pkgs.ghostunnel";
+    };
+
+    services.ghostunnel.servers = mkOption {
+      description = lib.mdDoc ''
+        Server mode ghostunnels (TLS listener -> plain TCP/UNIX target)
+      '';
+      type = types.attrsOf (types.submodule module);
+      default = {};
+    };
+  };
+
+  config = mkIf mainCfg.enable {
+    assertions = lib.mkMerge (map (v: v.atRoot.assertions) (attrValues mainCfg.servers));
+    systemd = lib.mkMerge (map (v: v.atRoot.systemd) (attrValues mainCfg.servers));
+  };
+
+  meta.maintainers = with lib.maintainers; [
+    roberth
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/git-daemon.nix b/nixpkgs/nixos/modules/services/networking/git-daemon.nix
new file mode 100644
index 000000000000..80b15eedbbd4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/git-daemon.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+
+  cfg = config.services.gitDaemon;
+
+in
+{
+
+  ###### interface
+
+  options = {
+    services.gitDaemon = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Git daemon, which allows public hosting of git repositories
+          without any access controls. This is mostly intended for read-only access.
+
+          You can allow write access by setting daemon.receivepack configuration
+          item of the repository to true. This is solely meant for a closed LAN setting
+          where everybody is friendly.
+
+          If you need any access controls, use something else.
+        '';
+      };
+
+      basePath = mkOption {
+        type = types.str;
+        default = "";
+        example = "/srv/git/";
+        description = lib.mdDoc ''
+          Remap all the path requests as relative to the given path. For example,
+          if you set base-path to /srv/git, then if you later try to pull
+          git://example.com/hello.git, Git daemon will interpret the path as /srv/git/hello.git.
+        '';
+      };
+
+      exportAll = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Publish all directories that look like Git repositories (have the objects
+          and refs subdirectories), even if they do not have the git-daemon-export-ok file.
+
+          If disabled, you need to touch .git/git-daemon-export-ok in each repository
+          you want the daemon to publish.
+
+          Warning: enabling this without a repository whitelist or basePath
+          publishes every git repository you have.
+        '';
+      };
+
+      repositories = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "/srv/git" "/home/user/git/repo2" ];
+        description = lib.mdDoc ''
+          A whitelist of paths of git repositories, or directories containing repositories
+          all of which would be published. Paths must not end in "/".
+
+          Warning: leaving this empty and enabling exportAll publishes all
+          repositories in your filesystem or basePath if specified.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "";
+        example = "example.com";
+        description = lib.mdDoc "Listen on a specific IP address or hostname.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 9418;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      options = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Extra configuration options to be passed to Git daemon.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "git";
+        description = lib.mdDoc "User under which Git daemon would be running.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "git";
+        description = lib.mdDoc "Group under which Git daemon would be running.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == "git") {
+      git = {
+        uid = config.ids.uids.git;
+        group = "git";
+        description = "Git daemon user";
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "git") {
+      git.gid = config.ids.gids.git;
+    };
+
+    systemd.services.git-daemon = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      script = "${pkgs.git}/bin/git daemon --reuseaddr "
+        + (optionalString (cfg.basePath != "") "--base-path=${cfg.basePath} ")
+        + (optionalString (cfg.listenAddress != "") "--listen=${cfg.listenAddress} ")
+        + "--port=${toString cfg.port} --user=${cfg.user} --group=${cfg.group} ${cfg.options} "
+        + "--verbose " + (optionalString cfg.exportAll "--export-all ")  + concatStringsSep " " cfg.repositories;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/globalprotect-vpn.nix b/nixpkgs/nixos/modules/services/networking/globalprotect-vpn.nix
new file mode 100644
index 000000000000..36aa93780402
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/globalprotect-vpn.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.globalprotect;
+
+  execStart =
+    if cfg.csdWrapper == null then
+      "${pkgs.globalprotect-openconnect}/bin/gpservice"
+    else
+      "${pkgs.globalprotect-openconnect}/bin/gpservice --csd-wrapper=${cfg.csdWrapper}";
+in
+
+{
+  options.services.globalprotect = {
+    enable = mkEnableOption (lib.mdDoc "globalprotect");
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        GlobalProtect-openconnect configuration. For more information, visit
+        <https://github.com/yuezk/GlobalProtect-openconnect/wiki/Configuration>.
+      '';
+      default = { };
+      example = {
+        "vpn1.company.com" = {
+          openconnect-args = "--script=/path/to/vpnc-script";
+        };
+      };
+      type = types.attrs;
+    };
+
+    csdWrapper = mkOption {
+      description = lib.mdDoc ''
+        A script that will produce a Host Integrity Protection (HIP) report,
+        as described at <https://www.infradead.org/openconnect/hip.html>
+      '';
+      default = null;
+      example = literalExpression ''"''${pkgs.openconnect}/libexec/openconnect/hipreport.sh"'';
+      type = types.nullOr types.path;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.dbus.packages = [ pkgs.globalprotect-openconnect ];
+
+    environment.etc."gpservice/gp.conf".text = lib.generators.toINI { } cfg.settings;
+
+    systemd.services.gpservice = {
+      description = "GlobalProtect openconnect DBus service";
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "com.yuezk.qt.GPService";
+        ExecStart = execStart;
+      };
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/gnunet.nix b/nixpkgs/nixos/modules/services/networking/gnunet.nix
new file mode 100644
index 000000000000..fdb353fd3443
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gnunet.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.gnunet;
+
+  stateDir = "/var/lib/gnunet";
+
+  configFile = with cfg;
+    ''
+      [PATHS]
+      GNUNET_HOME = ${stateDir}
+      GNUNET_RUNTIME_DIR = /run/gnunet
+      GNUNET_USER_RUNTIME_DIR = /run/gnunet
+      GNUNET_DATA_HOME = ${stateDir}/data
+
+      [ats]
+      WAN_QUOTA_IN = ${toString load.maxNetDownBandwidth} b
+      WAN_QUOTA_OUT = ${toString load.maxNetUpBandwidth} b
+
+      [datastore]
+      QUOTA = ${toString fileSharing.quota} MB
+
+      [transport-udp]
+      PORT = ${toString udp.port}
+      ADVERTISED_PORT = ${toString udp.port}
+
+      [transport-tcp]
+      PORT = ${toString tcp.port}
+      ADVERTISED_PORT = ${toString tcp.port}
+
+      ${extraOptions}
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gnunet = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run the GNUnet daemon.  GNUnet is GNU's anonymous
+          peer-to-peer communication and file sharing framework.
+        '';
+      };
+
+      fileSharing = {
+        quota = mkOption {
+          type = types.int;
+          default = 1024;
+          description = lib.mdDoc ''
+            Maximum file system usage (in MiB) for file sharing.
+          '';
+        };
+      };
+
+      udp = {
+        port = mkOption {
+          type = types.port;
+          default = 2086;  # assigned by IANA
+          description = lib.mdDoc ''
+            The UDP port for use by GNUnet.
+          '';
+        };
+      };
+
+      tcp = {
+        port = mkOption {
+          type = types.port;
+          default = 2086;  # assigned by IANA
+          description = lib.mdDoc ''
+            The TCP port for use by GNUnet.
+          '';
+        };
+      };
+
+      load = {
+        maxNetDownBandwidth = mkOption {
+          type = types.int;
+          default = 50000;
+          description = lib.mdDoc ''
+            Maximum bandwidth usage (in bits per second) for GNUnet
+            when downloading data.
+          '';
+        };
+
+        maxNetUpBandwidth = mkOption {
+          type = types.int;
+          default = 50000;
+          description = lib.mdDoc ''
+            Maximum bandwidth usage (in bits per second) for GNUnet
+            when downloading data.
+          '';
+        };
+
+        hardNetUpBandwidth = mkOption {
+          type = types.int;
+          default = 0;
+          description = lib.mdDoc ''
+            Hard bandwidth limit (in bits per second) when uploading
+            data.
+          '';
+        };
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.gnunet;
+        defaultText = literalExpression "pkgs.gnunet";
+        description = lib.mdDoc "Overridable attribute of the gnunet package to use.";
+        example = literalExpression "pkgs.gnunet_git";
+      };
+
+      extraOptions = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional options that will be copied verbatim in `gnunet.conf`.
+          See {manpage}`gnunet.conf(5)` for details.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.gnunet.enable {
+
+    users.users.gnunet = {
+      group = "gnunet";
+      description = "GNUnet User";
+      uid = config.ids.uids.gnunet;
+    };
+
+    users.groups.gnunet.gid = config.ids.gids.gnunet;
+
+    # The user tools that talk to `gnunetd' should come from the same source,
+    # so install them globally.
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."gnunet.conf".text = configFile;
+
+    systemd.services.gnunet = {
+      description = "GNUnet";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."gnunet.conf".source ];
+      path = [ cfg.package pkgs.miniupnpc ];
+      serviceConfig.ExecStart = "${cfg.package}/lib/gnunet/libexec/gnunet-service-arm -c /etc/gnunet.conf";
+      serviceConfig.User = "gnunet";
+      serviceConfig.UMask = "0007";
+      serviceConfig.WorkingDirectory = stateDir;
+      serviceConfig.RuntimeDirectory = "gnunet";
+      serviceConfig.StateDirectory = "gnunet";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/go-autoconfig.nix b/nixpkgs/nixos/modules/services/networking/go-autoconfig.nix
new file mode 100644
index 000000000000..07c628ae2cad
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/go-autoconfig.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.go-autoconfig;
+  format = pkgs.formats.yaml { };
+  configFile = format.generate "config.yml" cfg.settings;
+
+in {
+  options = {
+    services.go-autoconfig = {
+
+      enable = mkEnableOption (mdDoc "IMAP/SMTP autodiscover feature for mail clients");
+
+      settings = mkOption {
+        default = { };
+        description = mdDoc ''
+          Configuration for go-autoconfig. See
+          <https://github.com/L11R/go-autoconfig/blob/master/config.yml>
+          for more information.
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+        };
+        example = literalExpression ''
+          {
+            service_addr = ":1323";
+            domain = "autoconfig.example.org";
+            imap = {
+              server = "example.org";
+              port = 993;
+            };
+            smtp = {
+              server = "example.org";
+              port = 465;
+            };
+          }
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      services.go-autoconfig = {
+        wantedBy = [ "multi-user.target" ];
+        description = "IMAP/SMTP autodiscover server";
+        after = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${pkgs.go-autoconfig}/bin/go-autoconfig -config ${configFile}";
+          Restart = "on-failure";
+          WorkingDirectory = ''${pkgs.go-autoconfig}/'';
+          DynamicUser = true;
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ onny ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/go-neb.nix b/nixpkgs/nixos/modules/services/networking/go-neb.nix
new file mode 100644
index 000000000000..78d24ecf17d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/go-neb.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.go-neb;
+
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "config.yaml" cfg.config;
+in {
+  options.services.go-neb = {
+    enable = mkEnableOption (lib.mdDoc "an extensible matrix bot written in Go");
+
+    bindAddress = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Port (and optionally address) to listen on.";
+      default = ":4050";
+    };
+
+    secretFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/keys/go-neb.env";
+      description = lib.mdDoc ''
+        Environment variables from this file will be interpolated into the
+        final config file using envsubst with this syntax: `$ENVIRONMENT`
+        or `''${VARIABLE}`.
+        The file should contain lines formatted as `SECRET_VAR=SECRET_VALUE`.
+        This is useful to avoid putting secrets into the nix store.
+      '';
+    };
+
+    baseUrl = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Public-facing endpoint that can receive webhooks.";
+    };
+
+    config = mkOption {
+      inherit (settingsFormat) type;
+      description = lib.mdDoc ''
+        Your {file}`config.yaml` as a Nix attribute set.
+        See [config.sample.yaml](https://github.com/matrix-org/go-neb/blob/master/config.sample.yaml)
+        for possible options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.go-neb = let
+      finalConfigFile = if cfg.secretFile == null then configFile else "/var/run/go-neb/config.yaml";
+    in {
+      description = "Extensible matrix bot written in Go";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        BASE_URL = cfg.baseUrl;
+        BIND_ADDRESS = cfg.bindAddress;
+        CONFIG_FILE = finalConfigFile;
+      };
+
+      serviceConfig = {
+        ExecStartPre = lib.optional (cfg.secretFile != null)
+          ("+" + pkgs.writeShellScript "pre-start" ''
+            umask 077
+            export $(xargs < ${cfg.secretFile})
+            ${pkgs.envsubst}/bin/envsubst -i "${configFile}" > ${finalConfigFile}
+            chown go-neb ${finalConfigFile}
+          '');
+        RuntimeDirectory = "go-neb";
+        ExecStart = "${pkgs.go-neb}/bin/go-neb";
+        User = "go-neb";
+        DynamicUser = true;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ hexa maralorn ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/go-shadowsocks2.nix b/nixpkgs/nixos/modules/services/networking/go-shadowsocks2.nix
new file mode 100644
index 000000000000..d9c4a2421d72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/go-shadowsocks2.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.go-shadowsocks2.server;
+in {
+  options.services.go-shadowsocks2.server = {
+    enable = mkEnableOption (lib.mdDoc "go-shadowsocks2 server");
+
+    listenAddress = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Server listen address or URL";
+      example = "ss://AEAD_CHACHA20_POLY1305:your-password@:8488";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.go-shadowsocks2-server = {
+      description = "go-shadowsocks2 server";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.go-shadowsocks2}/bin/go-shadowsocks2 -s '${cfg.listenAddress}'";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/gobgpd.nix b/nixpkgs/nixos/modules/services/networking/gobgpd.nix
new file mode 100644
index 000000000000..b22242edaade
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gobgpd.nix
@@ -0,0 +1,64 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gobgpd;
+  format = pkgs.formats.toml { };
+  confFile = format.generate "gobgpd.conf" cfg.settings;
+in {
+  options.services.gobgpd = {
+    enable = mkEnableOption (lib.mdDoc "GoBGP Routing Daemon");
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = lib.mdDoc ''
+        GoBGP configuration. Refer to
+        <https://github.com/osrg/gobgp#documentation>
+        for details on supported values.
+      '';
+      example = literalExpression ''
+        {
+          global = {
+            config = {
+              as = 64512;
+              router-id = "192.168.255.1";
+            };
+          };
+          neighbors = [
+            {
+              config = {
+                neighbor-address = "10.0.255.1";
+                peer-as = 65001;
+              };
+            }
+            {
+              config = {
+                neighbor-address = "10.0.255.2";
+                peer-as = 65002;
+              };
+            }
+          ];
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.gobgpd ];
+    systemd.services.gobgpd = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "GoBGP Routing Daemon";
+      serviceConfig = {
+        Type = "notify";
+        ExecStartPre = "${pkgs.gobgpd}/bin/gobgpd -f ${confFile} -d";
+        ExecStart = "${pkgs.gobgpd}/bin/gobgpd -f ${confFile} --sdnotify";
+        ExecReload = "${pkgs.gobgpd}/bin/gobgpd -r";
+        DynamicUser = true;
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/gvpe.nix b/nixpkgs/nixos/modules/services/networking/gvpe.nix
new file mode 100644
index 000000000000..558f499022c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/gvpe.nix
@@ -0,0 +1,130 @@
+# GNU Virtual Private Ethernet
+
+{config, pkgs, lib, ...}:
+
+let
+  inherit (lib) mkOption mkIf types;
+
+  cfg = config.services.gvpe;
+
+  finalConfig = if cfg.configFile != null then
+    cfg.configFile
+  else if cfg.configText != null then
+    pkgs.writeTextFile {
+      name = "gvpe.conf";
+      text = cfg.configText;
+    }
+  else
+    throw "You must either specify contents of the config file or the config file itself for GVPE";
+
+  ifupScript = if cfg.ipAddress == null || cfg.subnet == null then
+     throw "Specify IP address and subnet (with mask) for GVPE"
+   else if cfg.nodename == null then
+     throw "You must set node name for GVPE"
+   else
+   (pkgs.writeTextFile {
+    name = "gvpe-if-up";
+    text = ''
+      #! /bin/sh
+
+      export PATH=$PATH:${pkgs.iproute2}/sbin
+
+      ip link set dev $IFNAME up
+      ip address add ${cfg.ipAddress} dev $IFNAME
+      ip route add ${cfg.subnet} dev $IFNAME
+
+      ${cfg.customIFSetup}
+    '';
+    executable = true;
+  });
+in
+
+{
+  options = {
+    services.gvpe = {
+      enable = lib.mkEnableOption (lib.mdDoc "gvpe");
+
+      nodename = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description =lib.mdDoc ''
+          GVPE node name
+        '';
+      };
+      configText = mkOption {
+        default = null;
+        type = types.nullOr types.lines;
+        example = ''
+          tcp-port = 655
+          udp-port = 655
+          mtu = 1480
+          ifname = vpn0
+
+          node = alpha
+          hostname = alpha.example.org
+          connect = always
+          enable-udp = true
+          enable-tcp = true
+          on alpha if-up = if-up-0
+          on alpha pid-file = /var/gvpe/gvpe.pid
+        '';
+        description = lib.mdDoc ''
+          GVPE config contents
+        '';
+      };
+      configFile = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        example = "/root/my-gvpe-conf";
+        description = lib.mdDoc ''
+          GVPE config file, if already present
+        '';
+      };
+      ipAddress = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          IP address to assign to GVPE interface
+        '';
+      };
+      subnet = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "10.0.0.0/8";
+        description = lib.mdDoc ''
+          IP subnet assigned to GVPE network
+        '';
+      };
+      customIFSetup = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional commands to apply in ifup script
+        '';
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.gvpe = {
+      description = "GNU Virtual Private Ethernet node";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p /var/gvpe
+        mkdir -p /var/gvpe/pubkey
+        chown root /var/gvpe
+        chmod 700 /var/gvpe
+        cp ${finalConfig} /var/gvpe/gvpe.conf
+        cp ${ifupScript} /var/gvpe/if-up
+      '';
+
+      script = "${pkgs.gvpe}/sbin/gvpe -c /var/gvpe -D ${cfg.nodename} "
+        + " ${cfg.nodename}.pid-file=/var/gvpe/gvpe.pid"
+        + " ${cfg.nodename}.if-up=if-up"
+        + " &> /var/log/gvpe";
+
+      serviceConfig.Restart = "always";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hans.nix b/nixpkgs/nixos/modules/services/networking/hans.nix
new file mode 100644
index 000000000000..3ea95b3bdae9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hans.nix
@@ -0,0 +1,145 @@
+# NixOS module for hans, ip over icmp daemon
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.hans;
+
+  hansUser = "hans";
+
+in
+{
+
+  ### configuration
+
+  options = {
+
+    services.hans = {
+      clients = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Each attribute of this option defines a systemd service that
+          runs hans. Many or none may be defined.
+          The name of each service is
+          `hans-«name»`
+          where «name» is the name of the
+          corresponding attribute name.
+        '';
+        example = literalExpression ''
+        {
+          foo = {
+            server = "192.0.2.1";
+            extraConfig = "-v";
+          }
+        }
+        '';
+        type = types.attrsOf (types.submodule (
+        {
+          options = {
+            server = mkOption {
+              type = types.str;
+              default = "";
+              description = lib.mdDoc "IP address of server running hans";
+              example = "192.0.2.1";
+            };
+
+            extraConfig = mkOption {
+              type = types.str;
+              default = "";
+              description = lib.mdDoc "Additional command line parameters";
+              example = "-v";
+            };
+
+            passwordFile = mkOption {
+              type = types.str;
+              default = "";
+              description = lib.mdDoc "File that contains password";
+            };
+
+          };
+        }));
+      };
+
+      server = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "enable hans server";
+        };
+
+        ip = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "The assigned ip range";
+          example = "198.51.100.0";
+        };
+
+        respondToSystemPings = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Force hans respond to ordinary pings";
+        };
+
+        extraConfig = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Additional command line parameters";
+          example = "-v";
+        };
+
+        passwordFile = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "File that contains password";
+        };
+      };
+
+    };
+  };
+
+  ### implementation
+
+  config = mkIf (cfg.server.enable || cfg.clients != {}) {
+    boot.kernel.sysctl = optionalAttrs cfg.server.respondToSystemPings {
+      "net.ipv4.icmp_echo_ignore_all" = 1;
+    };
+
+    boot.kernelModules = [ "tun" ];
+
+    systemd.services =
+    let
+      createHansClientService = name: cfg:
+      {
+        description = "hans client - ${name}";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        script = "${pkgs.hans}/bin/hans -f -u ${hansUser} ${cfg.extraConfig} -c ${cfg.server} ${optionalString (cfg.passwordFile != "") "-p $(cat \"${cfg.passwordFile}\")"}";
+        serviceConfig = {
+          RestartSec = "30s";
+          Restart = "always";
+        };
+      };
+    in
+    listToAttrs (
+      mapAttrsToList
+        (name: value: nameValuePair "hans-${name}" (createHansClientService name value))
+        cfg.clients
+    ) // {
+      hans = mkIf (cfg.server.enable) {
+        description = "hans, ip over icmp server daemon";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        script = "${pkgs.hans}/bin/hans -f -u ${hansUser} ${cfg.server.extraConfig} -s ${cfg.server.ip} ${optionalString cfg.server.respondToSystemPings "-r"} ${optionalString (cfg.server.passwordFile != "") "-p $(cat \"${cfg.server.passwordFile}\")"}";
+      };
+    };
+
+    users.users.${hansUser} = {
+      description = "Hans daemon user";
+      isSystemUser = true;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/haproxy.nix b/nixpkgs/nixos/modules/services/networking/haproxy.nix
new file mode 100644
index 000000000000..208eb356d629
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/haproxy.nix
@@ -0,0 +1,107 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.haproxy;
+
+  haproxyCfg = pkgs.writeText "haproxy.conf" ''
+    global
+      # needed for hot-reload to work without dropping packets in multi-worker mode
+      stats socket /run/haproxy/haproxy.sock mode 600 expose-fd listeners level user
+
+    ${cfg.config}
+  '';
+
+in
+with lib;
+{
+  options = {
+    services.haproxy = {
+
+      enable = mkEnableOption (lib.mdDoc "HAProxy, the reliable, high performance TCP/HTTP load balancer.");
+
+      package = mkPackageOptionMD pkgs "haproxy" { };
+
+      user = mkOption {
+        type = types.str;
+        default = "haproxy";
+        description = lib.mdDoc "User account under which haproxy runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "haproxy";
+        description = lib.mdDoc "Group account under which haproxy runs.";
+      };
+
+      config = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Contents of the HAProxy configuration file,
+          {file}`haproxy.conf`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [{
+      assertion = cfg.config != null;
+      message = "You must provide services.haproxy.config.";
+    }];
+
+    # configuration file indirection is needed to support reloading
+    environment.etc."haproxy.cfg".source = haproxyCfg;
+
+    systemd.services.haproxy = {
+      description = "HAProxy";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Type = "notify";
+        ExecStartPre = [
+          # when the master process receives USR2, it reloads itself using exec(argv[0]),
+          # so we create a symlink there and update it before reloading
+          "${pkgs.coreutils}/bin/ln -sf ${lib.getExe cfg.package} /run/haproxy/haproxy"
+          # when running the config test, don't be quiet so we can see what goes wrong
+          "/run/haproxy/haproxy -c -f ${haproxyCfg}"
+        ];
+        ExecStart = "/run/haproxy/haproxy -Ws -f /etc/haproxy.cfg -p /run/haproxy/haproxy.pid";
+        # support reloading
+        ExecReload = [
+          "${lib.getExe cfg.package} -c -f ${haproxyCfg}"
+          "${pkgs.coreutils}/bin/ln -sf ${lib.getExe cfg.package} /run/haproxy/haproxy"
+          "${pkgs.coreutils}/bin/kill -USR2 $MAINPID"
+        ];
+        KillMode = "mixed";
+        SuccessExitStatus = "143";
+        Restart = "always";
+        RuntimeDirectory = "haproxy";
+        # upstream hardening options
+        NoNewPrivileges = true;
+        ProtectHome = true;
+        ProtectSystem = "strict";
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        SystemCallFilter= "~@cpu-emulation @keyring @module @obsolete @raw-io @reboot @swap @sync";
+        # needed in case we bind to port < 1024
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "haproxy") {
+      haproxy = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "haproxy") {
+      haproxy = {};
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/harmonia.nix b/nixpkgs/nixos/modules/services/networking/harmonia.nix
new file mode 100644
index 000000000000..4733165cf7d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/harmonia.nix
@@ -0,0 +1,90 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.harmonia;
+  format = pkgs.formats.toml { };
+in
+{
+  options = {
+    services.harmonia = {
+      enable = lib.mkEnableOption (lib.mdDoc "Harmonia: Nix binary cache written in Rust");
+
+      signKeyPath = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        description = lib.mdDoc "Path to the signing key that will be used for signing the cache";
+      };
+
+      package = lib.mkPackageOptionMD pkgs "harmonia" { };
+
+      settings = lib.mkOption {
+        inherit (format) type;
+        default = { };
+        description = lib.mdDoc ''
+          Settings to merge with the default configuration.
+          For the list of the default configuration, see <https://github.com/nix-community/harmonia/tree/master#configuration>.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    nix.settings.extra-allowed-users = [ "harmonia" ];
+
+    systemd.services.harmonia = {
+      description = "harmonia binary cache service";
+
+      requires = [ "nix-daemon.socket" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        CONFIG_FILE = format.generate "harmonia.toml" cfg.settings;
+        SIGN_KEY_PATH = lib.mkIf (cfg.signKeyPath != null) "%d/sign-key";
+        # Note: it's important to set this for nix-store, because it wants to use
+        # $HOME in order to use a temporary cache dir. bizarre failures will occur
+        # otherwise
+        HOME = "/run/harmonia";
+      };
+
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+        User = "harmonia";
+        Group = "harmonia";
+        DynamicUser = true;
+        PrivateUsers = true;
+        DeviceAllow = [ "" ];
+        UMask = "0066";
+        RuntimeDirectory = "harmonia";
+        LoadCredential = lib.mkIf (cfg.signKeyPath != null) [ "sign-key:${cfg.signKeyPath}" ];
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        CapabilityBoundingSet = "";
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        RestrictRealtime = true;
+        MemoryDenyWriteExecute = true;
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        RestrictNamespaces = true;
+        SystemCallArchitectures = "native";
+        PrivateNetwork = false;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        LockPersonality = true;
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+        LimitNOFILE = 65536;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/headscale.nix b/nixpkgs/nixos/modules/services/networking/headscale.nix
new file mode 100644
index 000000000000..03e6f86af53f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/headscale.nix
@@ -0,0 +1,531 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+with lib; let
+  cfg = config.services.headscale;
+
+  dataDir = "/var/lib/headscale";
+  runDir = "/run/headscale";
+
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "headscale.yaml" cfg.settings;
+in {
+  options = {
+    services.headscale = {
+      enable = mkEnableOption (lib.mdDoc "headscale, Open Source coordination server for Tailscale");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.headscale;
+        defaultText = literalExpression "pkgs.headscale";
+        description = lib.mdDoc ''
+          Which headscale package to use for the running server.
+        '';
+      };
+
+      user = mkOption {
+        default = "headscale";
+        type = types.str;
+        description = lib.mdDoc ''
+          User account under which headscale runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the headscale service starts.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        default = "headscale";
+        type = types.str;
+        description = lib.mdDoc ''
+          Group under which headscale runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the headscale service starts.
+          :::
+        '';
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          Listening address of headscale.
+        '';
+        example = "0.0.0.0";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc ''
+          Listening port of headscale.
+        '';
+        example = 443;
+      };
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Overrides to {file}`config.yaml` as a Nix attribute set.
+          Check the [example config](https://github.com/juanfont/headscale/blob/main/config-example.yaml)
+          for possible options.
+        '';
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            server_url = mkOption {
+              type = types.str;
+              default = "http://127.0.0.1:8080";
+              description = lib.mdDoc ''
+                The url clients will connect to.
+              '';
+              example = "https://myheadscale.example.com:443";
+            };
+
+            private_key_path = mkOption {
+              type = types.path;
+              default = "${dataDir}/private.key";
+              description = lib.mdDoc ''
+                Path to private key file, generated automatically if it does not exist.
+              '';
+            };
+
+            noise.private_key_path = mkOption {
+              type = types.path;
+              default = "${dataDir}/noise_private.key";
+              description = lib.mdDoc ''
+                Path to noise private key file, generated automatically if it does not exist.
+              '';
+            };
+
+            derp = {
+              urls = mkOption {
+                type = types.listOf types.str;
+                default = ["https://controlplane.tailscale.com/derpmap/default"];
+                description = lib.mdDoc ''
+                  List of urls containing DERP maps.
+                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
+                '';
+              };
+
+              paths = mkOption {
+                type = types.listOf types.path;
+                default = [];
+                description = lib.mdDoc ''
+                  List of file paths containing DERP maps.
+                  See [How Tailscale works](https://tailscale.com/blog/how-tailscale-works/) for more information on DERP maps.
+                '';
+              };
+
+              auto_update_enable = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether to automatically update DERP maps on a set frequency.
+                '';
+                example = false;
+              };
+
+              update_frequency = mkOption {
+                type = types.str;
+                default = "24h";
+                description = lib.mdDoc ''
+                  Frequency to update DERP maps.
+                '';
+                example = "5m";
+              };
+            };
+
+            ephemeral_node_inactivity_timeout = mkOption {
+              type = types.str;
+              default = "30m";
+              description = lib.mdDoc ''
+                Time before an inactive ephemeral node is deleted.
+              '';
+              example = "5m";
+            };
+
+            db_type = mkOption {
+              type = types.enum ["sqlite3" "postgres"];
+              example = "postgres";
+              default = "sqlite3";
+              description = lib.mdDoc "Database engine to use.";
+            };
+
+            db_host = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "127.0.0.1";
+              description = lib.mdDoc "Database host address.";
+            };
+
+            db_port = mkOption {
+              type = types.nullOr types.port;
+              default = null;
+              example = 3306;
+              description = lib.mdDoc "Database host port.";
+            };
+
+            db_name = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "headscale";
+              description = lib.mdDoc "Database name.";
+            };
+
+            db_user = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = "headscale";
+              description = lib.mdDoc "Database user.";
+            };
+
+            db_password_file = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              example = "/run/keys/headscale-dbpassword";
+              description = lib.mdDoc ''
+                A file containing the password corresponding to
+                {option}`database.user`.
+              '';
+            };
+
+            db_path = mkOption {
+              type = types.nullOr types.str;
+              default = "${dataDir}/db.sqlite";
+              description = lib.mdDoc "Path to the sqlite3 database file.";
+            };
+
+            log.level = mkOption {
+              type = types.str;
+              default = "info";
+              description = lib.mdDoc ''
+                headscale log level.
+              '';
+              example = "debug";
+            };
+
+            log.format = mkOption {
+              type = types.str;
+              default = "text";
+              description = lib.mdDoc ''
+                headscale log format.
+              '';
+              example = "json";
+            };
+
+            dns_config = {
+              nameservers = mkOption {
+                type = types.listOf types.str;
+                default = ["1.1.1.1"];
+                description = lib.mdDoc ''
+                  List of nameservers to pass to Tailscale clients.
+                '';
+              };
+
+              override_local_dns = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to use [Override local DNS](https://tailscale.com/kb/1054/dns/).
+                '';
+                example = true;
+              };
+
+              domains = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                description = lib.mdDoc ''
+                  Search domains to inject to Tailscale clients.
+                '';
+                example = ["mydomain.internal"];
+              };
+
+              magic_dns = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether to use [MagicDNS](https://tailscale.com/kb/1081/magicdns/).
+                  Only works if there is at least a nameserver defined.
+                '';
+                example = false;
+              };
+
+              base_domain = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  Defines the base domain to create the hostnames for MagicDNS.
+                  {option}`baseDomain` must be a FQDNs, without the trailing dot.
+                  The FQDN of the hosts will be
+                  `hostname.namespace.base_domain` (e.g.
+                  `myhost.mynamespace.example.com`).
+                '';
+              };
+            };
+
+            oidc = {
+              issuer = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  URL to OpenID issuer.
+                '';
+                example = "https://openid.example.com";
+              };
+
+              client_id = mkOption {
+                type = types.str;
+                default = "";
+                description = lib.mdDoc ''
+                  OpenID Connect client ID.
+                '';
+              };
+
+              client_secret_path = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                description = lib.mdDoc ''
+                  Path to OpenID Connect client secret file. Expands environment variables in format ''${VAR}.
+                '';
+              };
+
+              scope = mkOption {
+                type = types.listOf types.str;
+                default = ["openid" "profile" "email"];
+                description = lib.mdDoc ''
+                  Scopes used in the OIDC flow.
+                '';
+              };
+
+              extra_params = mkOption {
+                type = types.attrsOf types.str;
+                default = { };
+                description = lib.mdDoc ''
+                  Custom query parameters to send with the Authorize Endpoint request.
+                '';
+                example = {
+                  domain_hint = "example.com";
+                };
+              };
+
+              allowed_domains = mkOption {
+                type = types.listOf types.str;
+                default = [ ];
+                description = lib.mdDoc ''
+                  Allowed principal domains. if an authenticated user's domain
+                  is not in this list authentication request will be rejected.
+                '';
+                example = [ "example.com" ];
+              };
+
+              allowed_users = mkOption {
+                type = types.listOf types.str;
+                default = [ ];
+                description = lib.mdDoc ''
+                  Users allowed to authenticate even if not in allowedDomains.
+                '';
+                example = [ "alice@example.com" ];
+              };
+
+              strip_email_domain = mkOption {
+                type = types.bool;
+                default = true;
+                description = lib.mdDoc ''
+                  Whether the domain part of the email address should be removed when generating namespaces.
+                '';
+              };
+            };
+
+            tls_letsencrypt_hostname = mkOption {
+              type = types.nullOr types.str;
+              default = "";
+              description = lib.mdDoc ''
+                Domain name to request a TLS certificate for.
+              '';
+            };
+
+            tls_letsencrypt_challenge_type = mkOption {
+              type = types.enum ["TLS-ALPN-01" "HTTP-01"];
+              default = "HTTP-01";
+              description = lib.mdDoc ''
+                Type of ACME challenge to use, currently supported types:
+                `HTTP-01` or `TLS-ALPN-01`.
+              '';
+            };
+
+            tls_letsencrypt_listen = mkOption {
+              type = types.nullOr types.str;
+              default = ":http";
+              description = lib.mdDoc ''
+                When HTTP-01 challenge is chosen, letsencrypt must set up a
+                verification endpoint, and it will be listening on:
+                `:http = port 80`.
+              '';
+            };
+
+            tls_cert_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to already created certificate.
+              '';
+            };
+
+            tls_key_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to key for already created certificate.
+              '';
+            };
+
+            acl_policy_path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+                Path to a file containing ACL policies.
+              '';
+            };
+          };
+        };
+      };
+    };
+  };
+
+  imports = [
+    # TODO address + port = listen_addr
+    (mkRenamedOptionModule ["services" "headscale" "serverUrl"] ["services" "headscale" "settings" "server_url"])
+    (mkRenamedOptionModule ["services" "headscale" "privateKeyFile"] ["services" "headscale" "settings" "private_key_path"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "urls"] ["services" "headscale" "settings" "derp" "urls"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "paths"] ["services" "headscale" "settings" "derp" "paths"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "autoUpdate"] ["services" "headscale" "settings" "derp" "auto_update_enable"])
+    (mkRenamedOptionModule ["services" "headscale" "derp" "updateFrequency"] ["services" "headscale" "settings" "derp" "update_frequency"])
+    (mkRenamedOptionModule ["services" "headscale" "ephemeralNodeInactivityTimeout"] ["services" "headscale" "settings" "ephemeral_node_inactivity_timeout"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "type"] ["services" "headscale" "settings" "db_type"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "path"] ["services" "headscale" "settings" "db_path"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "host"] ["services" "headscale" "settings" "db_host"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "port"] ["services" "headscale" "settings" "db_port"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "name"] ["services" "headscale" "settings" "db_name"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "user"] ["services" "headscale" "settings" "db_user"])
+    (mkRenamedOptionModule ["services" "headscale" "database" "passwordFile"] ["services" "headscale" "settings" "db_password_file"])
+    (mkRenamedOptionModule ["services" "headscale" "logLevel"] ["services" "headscale" "settings" "log" "level"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "nameservers"] ["services" "headscale" "settings" "dns_config" "nameservers"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "domains"] ["services" "headscale" "settings" "dns_config" "domains"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "magicDns"] ["services" "headscale" "settings" "dns_config" "magic_dns"])
+    (mkRenamedOptionModule ["services" "headscale" "dns" "baseDomain"] ["services" "headscale" "settings" "dns_config" "base_domain"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "issuer"] ["services" "headscale" "settings" "oidc" "issuer"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientId"] ["services" "headscale" "settings" "oidc" "client_id"])
+    (mkRenamedOptionModule ["services" "headscale" "openIdConnect" "clientSecretFile"] ["services" "headscale" "settings" "oidc" "client_secret_path"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "hostname"] ["services" "headscale" "settings" "tls_letsencrypt_hostname"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "challengeType"] ["services" "headscale" "settings" "tls_letsencrypt_challenge_type"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "letsencrypt" "httpListen"] ["services" "headscale" "settings" "tls_letsencrypt_listen"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "certFile"] ["services" "headscale" "settings" "tls_cert_path"])
+    (mkRenamedOptionModule ["services" "headscale" "tls" "keyFile"] ["services" "headscale" "settings" "tls_key_path"])
+    (mkRenamedOptionModule ["services" "headscale" "aclPolicyFile"] ["services" "headscale" "settings" "acl_policy_path"])
+
+    (mkRemovedOptionModule ["services" "headscale" "openIdConnect" "domainMap"] ''
+      Headscale no longer uses domain_map. If you're using an old version of headscale you can still set this option via services.headscale.settings.oidc.domain_map.
+    '')
+  ];
+
+  config = mkIf cfg.enable {
+    services.headscale.settings = {
+      listen_addr = mkDefault "${cfg.address}:${toString cfg.port}";
+
+      # Turn off update checks since the origin of our package
+      # is nixpkgs and not Github.
+      disable_check_updates = true;
+
+      unix_socket = "${runDir}/headscale.sock";
+
+      tls_letsencrypt_cache_dir = "${dataDir}/.cache";
+    };
+
+    # Setup the headscale configuration in a known path in /etc to
+    # allow both the Server and the Client use it to find the socket
+    # for communication.
+    environment.etc."headscale/config.yaml".source = configFile;
+
+    users.groups.headscale = mkIf (cfg.group == "headscale") {};
+
+    users.users.headscale = mkIf (cfg.user == "headscale") {
+      description = "headscale user";
+      home = dataDir;
+      group = cfg.group;
+      isSystemUser = true;
+    };
+
+    systemd.services.headscale = {
+      description = "headscale coordination server for Tailscale";
+      after = ["network-online.target"];
+      wantedBy = ["multi-user.target"];
+      restartTriggers = [configFile];
+
+      environment.GIN_MODE = "release";
+
+      script = ''
+        ${optionalString (cfg.settings.db_password_file != null) ''
+          export HEADSCALE_DB_PASS="$(head -n1 ${escapeShellArg cfg.settings.db_password_file})"
+        ''}
+
+        exec ${cfg.package}/bin/headscale serve
+      '';
+
+      serviceConfig = let
+        capabilityBoundingSet = ["CAP_CHOWN"] ++ optional (cfg.port < 1024) "CAP_NET_BIND_SERVICE";
+      in {
+        Restart = "always";
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+
+        # Hardening options
+        RuntimeDirectory = "headscale";
+        # Allow headscale group access so users can be added and use the CLI.
+        RuntimeDirectoryMode = "0750";
+
+        StateDirectory = "headscale";
+        StateDirectoryMode = "0750";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        RestrictNamespaces = true;
+        RemoveIPC = true;
+        UMask = "0077";
+
+        CapabilityBoundingSet = capabilityBoundingSet;
+        AmbientCapabilities = capabilityBoundingSet;
+        NoNewPrivileges = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        SystemCallFilter = ["@system-service" "~@privileged" "@chown"];
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [kradalby misterio77];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/helpers.nix b/nixpkgs/nixos/modules/services/networking/helpers.nix
new file mode 100644
index 000000000000..d7d42de0e3a8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/helpers.nix
@@ -0,0 +1,11 @@
+{ config, lib, ... }: ''
+  # Helper command to manipulate both the IPv4 and IPv6 tables.
+  ip46tables() {
+    iptables -w "$@"
+    ${
+      lib.optionalString config.networking.enableIPv6 ''
+        ip6tables -w "$@"
+      ''
+    }
+  }
+''
diff --git a/nixpkgs/nixos/modules/services/networking/hostapd.nix b/nixpkgs/nixos/modules/services/networking/hostapd.nix
new file mode 100644
index 000000000000..5bd8e1d4d7a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hostapd.nix
@@ -0,0 +1,1257 @@
+{ config, lib, pkgs, utils, ... }:
+# All hope abandon ye who enter here. hostapd's configuration
+# format is ... special, and you won't be able to infer any
+# of their assumptions from just reading the "documentation"
+# (i.e. the example config). Assume footguns at all points -
+# to make informed decisions you will probably need to look
+# at hostapd's code. You have been warned, proceed with care.
+let
+  inherit
+    (lib)
+    attrNames
+    attrValues
+    concatLists
+    concatMap
+    concatMapStrings
+    concatStringsSep
+    count
+    escapeShellArg
+    filter
+    flip
+    generators
+    getAttr
+    hasPrefix
+    imap0
+    isInt
+    isString
+    length
+    literalExpression
+    maintainers
+    mapAttrsToList
+    mdDoc
+    mkDefault
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    mkRemovedOptionModule
+    optional
+    optionalAttrs
+    optionalString
+    optionals
+    singleton
+    stringLength
+    toLower
+    types
+    unique
+    ;
+
+  cfg = config.services.hostapd;
+
+  extraSettingsFormat = {
+    type = let
+      singleAtom = types.oneOf [ types.bool types.int types.str ];
+      atom = types.either singleAtom (types.listOf singleAtom) // {
+        description = "atom (bool, int or string) or a list of them for duplicate keys";
+      };
+    in types.attrsOf atom;
+
+    generate = name: value: pkgs.writeText name (generators.toKeyValue {
+      listsAsDuplicateKeys = true;
+      mkKeyValue = generators.mkKeyValueDefault {
+        mkValueString = v:
+          if      isInt    v then toString v
+          else if isString v then v
+          else if true  == v then "1"
+          else if false == v then "0"
+          else throw "unsupported type ${builtins.typeOf v}: ${(generators.toPretty {}) v}";
+      } "=";
+    } value);
+  };
+
+  # Generates the header for a single BSS (i.e. WiFi network)
+  writeBssHeader = radio: bss: bssIdx: pkgs.writeText "hostapd-radio-${radio}-bss-${bss}.conf" ''
+    ''\n''\n# BSS ${toString bssIdx}: ${bss}
+    ################################
+
+    ${if bssIdx == 0 then "interface" else "bss"}=${bss}
+  '';
+
+  makeRadioRuntimeFiles = radio: radioCfg:
+    pkgs.writeShellScript "make-hostapd-${radio}-files" (''
+      set -euo pipefail
+
+      hostapd_config_file=/run/hostapd/${escapeShellArg radio}.hostapd.conf
+      rm -f "$hostapd_config_file"
+      cat > "$hostapd_config_file" <<EOF
+      # Radio base configuration: ${radio}
+      ################################
+
+      EOF
+
+      cat ${escapeShellArg (extraSettingsFormat.generate "hostapd-radio-${radio}-extra.conf" radioCfg.settings)} >> "$hostapd_config_file"
+      ${concatMapStrings (script: "${script} \"$hostapd_config_file\"\n") (attrValues radioCfg.dynamicConfigScripts)}
+    ''
+    + concatMapStrings (x: "${x}\n") (imap0 (i: f: f i)
+      (mapAttrsToList (bss: bssCfg: bssIdx: ''
+        ''\n# BSS configuration: ${bss}
+
+        mac_allow_file=/run/hostapd/${escapeShellArg bss}.mac.allow
+        rm -f "$mac_allow_file"
+        touch "$mac_allow_file"
+
+        mac_deny_file=/run/hostapd/${escapeShellArg bss}.mac.deny
+        rm -f "$mac_deny_file"
+        touch "$mac_deny_file"
+
+        cat ${writeBssHeader radio bss bssIdx} >> "$hostapd_config_file"
+        cat ${escapeShellArg (extraSettingsFormat.generate "hostapd-radio-${radio}-bss-${bss}-extra.conf" bssCfg.settings)} >> "$hostapd_config_file"
+        ${concatMapStrings (script: "${script} \"$hostapd_config_file\" \"$mac_allow_file\" \"$mac_deny_file\"\n") (attrValues bssCfg.dynamicConfigScripts)}
+      '') radioCfg.networks)));
+
+  runtimeConfigFiles = mapAttrsToList (radio: _: "/run/hostapd/${radio}.hostapd.conf") cfg.radios;
+in {
+  meta.maintainers = with maintainers; [ oddlama ];
+
+  options = {
+    services.hostapd = {
+      enable = mkEnableOption (mdDoc ''
+        hostapd, a user space daemon for access point and
+        authentication servers. It implements IEEE 802.11 access point management,
+        IEEE 802.1X/WPA/WPA2/EAP Authenticators, RADIUS client, EAP server, and RADIUS
+        authentication server
+      '');
+
+      package = mkPackageOption pkgs "hostapd" {};
+
+      radios = mkOption {
+        default = {};
+        example = literalExpression ''
+          {
+            # Simple 2.4GHz AP
+            wlp2s0 = {
+              # countryCode = "US";
+              networks.wlp2s0 = {
+                ssid = "AP 1";
+                authentication.saePasswords = [{ password = "a flakey password"; }]; # Use saePasswordsFile if possible.
+              };
+            };
+
+            # WiFi 5 (5GHz) with two advertised networks
+            wlp3s0 = {
+              band = "5g";
+              channel = 0; # Enable automatic channel selection (ACS). Use only if your hardware supports it.
+              # countryCode = "US";
+              networks.wlp3s0 = {
+                ssid = "My AP";
+                authentication.saePasswords = [{ password = "a flakey password"; }]; # Use saePasswordsFile if possible.
+              };
+              networks.wlp3s0-1 = {
+                ssid = "Open AP with WiFi5";
+                authentication.mode = "none";
+              };
+            };
+
+            # Legacy WPA2 example
+            wlp4s0 = {
+              # countryCode = "US";
+              networks.wlp4s0 = {
+                ssid = "AP 2";
+                authentication = {
+                  mode = "wpa2-sha256";
+                  wpaPassword = "a flakey password"; # Use wpaPasswordFile if possible.
+                };
+              };
+            };
+          }
+        '';
+        description = mdDoc ''
+          This option allows you to define APs for one or multiple physical radios.
+          At least one radio must be specified.
+
+          For each radio, hostapd requires a separate logical interface (like wlp3s0, wlp3s1, ...).
+          A default interface is usually be created automatically by your system, but to use
+          multiple radios of a single device, it may be required to create additional logical interfaces
+          for example by using {option}`networking.wlanInterfaces`.
+
+          Each physical radio can only support a single hardware-mode that is configured via
+          ({option}`services.hostapd.radios.<radio>.band`). To create a dual-band
+          or tri-band AP, you will have to use a device that has multiple physical radios
+          and supports configuring multiple APs (Refer to valid interface combinations in
+          {command}`iw list`).
+        '';
+        type = types.attrsOf (types.submodule (radioSubmod: {
+          options = {
+            driver = mkOption {
+              default = "nl80211";
+              example = "none";
+              type = types.str;
+              description = mdDoc ''
+                The driver {command}`hostapd` will use.
+                {var}`nl80211` is used with all Linux mac80211 drivers.
+                {var}`none` is used if building a standalone RADIUS server that does
+                not control any wireless/wired driver.
+                Most applications will probably use the default.
+              '';
+            };
+
+            noScan = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Disables scan for overlapping BSSs in HT40+/- mode.
+                Caution: turning this on will likely violate regulatory requirements!
+              '';
+            };
+
+            countryCode = mkOption {
+              default = null;
+              example = "US";
+              type = types.nullOr types.str;
+              description = mdDoc ''
+                Country code (ISO/IEC 3166-1). Used to set regulatory domain.
+                Set as needed to indicate country in which device is operating.
+                This can limit available channels and transmit power.
+                These two octets are used as the first two octets of the Country String
+                (dot11CountryString).
+
+                Setting this will force you to also enable IEEE 802.11d and IEEE 802.11h.
+
+                IEEE 802.11d: This advertises the countryCode and the set of allowed channels
+                and transmit power levels based on the regulatory limits.
+
+                IEEE802.11h: This enables radar detection and DFS (Dynamic Frequency Selection)
+                support if available. DFS support is required on outdoor 5 GHz channels in most
+                countries of the world.
+              '';
+            };
+
+            band = mkOption {
+              default = "2g";
+              type = types.enum ["2g" "5g" "6g" "60g"];
+              description = mdDoc ''
+                Specifies the frequency band to use, possible values are 2g for 2.4 GHz,
+                5g for 5 GHz, 6g for 6 GHz and 60g for 60 GHz.
+              '';
+            };
+
+            channel = mkOption {
+              default = 7;
+              example = 11;
+              type = types.int;
+              description = mdDoc ''
+                The channel to operate on. Use 0 to enable ACS (Automatic Channel Selection).
+                Beware that not every device supports ACS in which case {command}`hostapd`
+                will fail to start.
+              '';
+            };
+
+            settings = mkOption {
+              default = {};
+              example = { acs_exclude_dfs = true; };
+              type = types.submodule {
+                freeformType = extraSettingsFormat.type;
+              };
+              description = mdDoc ''
+                Extra configuration options to put at the end of global initialization, before defining BSSs.
+                To find out which options are global and which are per-bss you have to read hostapd's source code,
+                which is non-trivial and not documented otherwise.
+
+                Lists will be converted to multiple definitions of the same key, and booleans to 0/1.
+                Otherwise, the inputs are not modified or checked for correctness.
+              '';
+            };
+
+            dynamicConfigScripts = mkOption {
+              default = {};
+              type = types.attrsOf types.path;
+              example = literalExpression ''
+                {
+                  exampleDynamicConfig = pkgs.writeShellScript "dynamic-config" '''
+                    HOSTAPD_CONFIG=$1
+
+                    cat >> "$HOSTAPD_CONFIG" << EOF
+                    # Add some dynamically generated statements here,
+                    # for example based on the physical adapter in use
+                    EOF
+                  ''';
+                }
+              '';
+              description = mdDoc ''
+                All of these scripts will be executed in lexicographical order before hostapd
+                is started, right after the global segment was generated and may dynamically
+                append global options the generated configuration file.
+
+                The first argument will point to the configuration file that you may append to.
+              '';
+            };
+
+            #### IEEE 802.11n (WiFi 4) related configuration
+
+            wifi4 = {
+              enable = mkOption {
+                default = true;
+                type = types.bool;
+                description = mdDoc ''
+                  Enables support for IEEE 802.11n (WiFi 4, HT).
+                  This is enabled by default, since the vase majority of devices
+                  are expected to support this.
+                '';
+              };
+
+              capabilities = mkOption {
+                type = types.listOf types.str;
+                default = ["HT40" "HT40-" "SHORT-GI-20" "SHORT-GI-40"];
+                example = ["LDPC" "HT40+" "HT40-" "GF" "SHORT-GI-20" "SHORT-GI-40" "TX-STBC" "RX-STBC1"];
+                description = mdDoc ''
+                  HT (High Throughput) capabilities given as a list of flags.
+                  Please refer to the hostapd documentation for allowed values and
+                  only set values supported by your physical adapter.
+
+                  The default contains common values supported by most adapters.
+                '';
+              };
+
+              require = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "Require stations (clients) to support WiFi 4 (HT) and disassociate them if they don't.";
+              };
+            };
+
+            #### IEEE 802.11ac (WiFi 5) related configuration
+
+            wifi5 = {
+              enable = mkOption {
+                default = true;
+                type = types.bool;
+                description = mdDoc "Enables support for IEEE 802.11ac (WiFi 5, VHT)";
+              };
+
+              capabilities = mkOption {
+                type = types.listOf types.str;
+                default = [];
+                example = ["SHORT-GI-80" "TX-STBC-2BY1" "RX-STBC-1" "RX-ANTENNA-PATTERN" "TX-ANTENNA-PATTERN"];
+                description = mdDoc ''
+                  VHT (Very High Throughput) capabilities given as a list of flags.
+                  Please refer to the hostapd documentation for allowed values and
+                  only set values supported by your physical adapter.
+                '';
+              };
+
+              require = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "Require stations (clients) to support WiFi 5 (VHT) and disassociate them if they don't.";
+              };
+
+              operatingChannelWidth = mkOption {
+                default = "20or40";
+                type = types.enum ["20or40" "80" "160" "80+80"];
+                apply = x:
+                  getAttr x {
+                    "20or40" = 0;
+                    "80" = 1;
+                    "160" = 2;
+                    "80+80" = 3;
+                  };
+                description = mdDoc ''
+                  Determines the operating channel width for VHT.
+
+                  - {var}`"20or40"`: 20 or 40 MHz operating channel width
+                  - {var}`"80"`: 80 MHz channel width
+                  - {var}`"160"`: 160 MHz channel width
+                  - {var}`"80+80"`: 80+80 MHz channel width
+                '';
+              };
+            };
+
+            #### IEEE 802.11ax (WiFi 6) related configuration
+
+            wifi6 = {
+              enable = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "Enables support for IEEE 802.11ax (WiFi 6, HE)";
+              };
+
+              require = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "Require stations (clients) to support WiFi 6 (HE) and disassociate them if they don't.";
+              };
+
+              singleUserBeamformer = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "HE single user beamformer support";
+              };
+
+              singleUserBeamformee = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "HE single user beamformee support";
+              };
+
+              multiUserBeamformer = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "HE multi user beamformee support";
+              };
+
+              operatingChannelWidth = mkOption {
+                default = "20or40";
+                type = types.enum ["20or40" "80" "160" "80+80"];
+                apply = x:
+                  getAttr x {
+                    "20or40" = 0;
+                    "80" = 1;
+                    "160" = 2;
+                    "80+80" = 3;
+                  };
+                description = mdDoc ''
+                  Determines the operating channel width for HE.
+
+                  - {var}`"20or40"`: 20 or 40 MHz operating channel width
+                  - {var}`"80"`: 80 MHz channel width
+                  - {var}`"160"`: 160 MHz channel width
+                  - {var}`"80+80"`: 80+80 MHz channel width
+                '';
+              };
+            };
+
+            #### IEEE 802.11be (WiFi 7) related configuration
+
+            wifi7 = {
+              enable = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc ''
+                  Enables support for IEEE 802.11be (WiFi 7, EHT). This is currently experimental
+                  and requires you to manually enable CONFIG_IEEE80211BE when building hostapd.
+                '';
+              };
+
+              singleUserBeamformer = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "EHT single user beamformer support";
+              };
+
+              singleUserBeamformee = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "EHT single user beamformee support";
+              };
+
+              multiUserBeamformer = mkOption {
+                default = false;
+                type = types.bool;
+                description = mdDoc "EHT multi user beamformee support";
+              };
+
+              operatingChannelWidth = mkOption {
+                default = "20or40";
+                type = types.enum ["20or40" "80" "160" "80+80"];
+                apply = x:
+                  getAttr x {
+                    "20or40" = 0;
+                    "80" = 1;
+                    "160" = 2;
+                    "80+80" = 3;
+                  };
+                description = mdDoc ''
+                  Determines the operating channel width for EHT.
+
+                  - {var}`"20or40"`: 20 or 40 MHz operating channel width
+                  - {var}`"80"`: 80 MHz channel width
+                  - {var}`"160"`: 160 MHz channel width
+                  - {var}`"80+80"`: 80+80 MHz channel width
+                '';
+              };
+            };
+
+            #### BSS definitions
+
+            networks = mkOption {
+              default = {};
+              example = literalExpression ''
+                {
+                  wlp2s0 = {
+                    ssid = "Primary advertised network";
+                    authentication.saePasswords = [{ password = "a flakey password"; }]; # Use saePasswordsFile if possible.
+                  };
+                  wlp2s0-1 = {
+                    ssid = "Secondary advertised network (Open)";
+                    authentication.mode = "none";
+                  };
+                }
+              '';
+              description = mdDoc ''
+                This defines a BSS, colloquially known as a WiFi network.
+                You have to specify at least one.
+              '';
+              type = types.attrsOf (types.submodule (bssSubmod: {
+                options = {
+                  logLevel = mkOption {
+                    default = 2;
+                    type = types.int;
+                    description = mdDoc ''
+                      Levels (minimum value for logged events):
+                      0 = verbose debugging
+                      1 = debugging
+                      2 = informational messages
+                      3 = notification
+                      4 = warning
+                    '';
+                  };
+
+                  group = mkOption {
+                    default = "wheel";
+                    example = "network";
+                    type = types.str;
+                    description = mdDoc ''
+                      Members of this group can access the control socket for this interface.
+                    '';
+                  };
+
+                  utf8Ssid = mkOption {
+                    default = true;
+                    type = types.bool;
+                    description = mdDoc "Whether the SSID is to be interpreted using UTF-8 encoding.";
+                  };
+
+                  ssid = mkOption {
+                    example = "â„ï¸ cool â„ï¸";
+                    type = types.str;
+                    description = mdDoc "SSID to be used in IEEE 802.11 management frames.";
+                  };
+
+                  bssid = mkOption {
+                    type = types.nullOr types.str;
+                    default = null;
+                    example = "11:22:33:44:55:66";
+                    description = mdDoc ''
+                      Specifies the BSSID for this BSS. Usually determined automatically,
+                      but for now you have to manually specify them when using multiple BSS.
+                      Try assigning related addresses from the locally administered MAC address ranges,
+                      by reusing the hardware address but replacing the second nibble with 2, 6, A or E.
+                      (e.g. if real address is `XX:XX:XX:XX:XX`, try `X2:XX:XX:XX:XX:XX`, `X6:XX:XX:XX:XX:XX`, ...
+                      for the second, third, ... BSS)
+                    '';
+                  };
+
+                  macAcl = mkOption {
+                    default = "deny";
+                    type = types.enum ["deny" "allow" "radius"];
+                    apply = x:
+                      getAttr x {
+                        "deny" = 0;
+                        "allow" = 1;
+                        "radius" = 2;
+                      };
+                    description = mdDoc ''
+                      Station MAC address -based authentication. The following modes are available:
+
+                      - {var}`"deny"`: Allow unless listed in {option}`macDeny` (default)
+                      - {var}`"allow"`: Deny unless listed in {option}`macAllow`
+                      - {var}`"radius"`: Use external radius server, but check both {option}`macAllow` and {option}`macDeny` first
+
+                      Please note that this kind of access control requires a driver that uses
+                      hostapd to take care of management frame processing and as such, this can be
+                      used with driver=hostap or driver=nl80211, but not with driver=atheros.
+                    '';
+                  };
+
+                  macAllow = mkOption {
+                    type = types.listOf types.str;
+                    default = [];
+                    example = ["11:22:33:44:55:66"];
+                    description = mdDoc ''
+                      Specifies the MAC addresses to allow if {option}`macAcl` is set to {var}`"allow"` or {var}`"radius"`.
+                      These values will be world-readable in the Nix store. Values will automatically be merged with
+                      {option}`macAllowFile` if necessary.
+                    '';
+                  };
+
+                  macAllowFile = mkOption {
+                    type = types.nullOr types.path;
+                    default = null;
+                    description = mdDoc ''
+                      Specifies a file containing the MAC addresses to allow if {option}`macAcl` is set to {var}`"allow"` or {var}`"radius"`.
+                      The file should contain exactly one MAC address per line. Comments and empty lines are ignored,
+                      only lines starting with a valid MAC address will be considered (e.g. `11:22:33:44:55:66`) and
+                      any content after the MAC address is ignored.
+                    '';
+                  };
+
+                  macDeny = mkOption {
+                    type = types.listOf types.str;
+                    default = [];
+                    example = ["11:22:33:44:55:66"];
+                    description = mdDoc ''
+                      Specifies the MAC addresses to deny if {option}`macAcl` is set to {var}`"deny"` or {var}`"radius"`.
+                      These values will be world-readable in the Nix store. Values will automatically be merged with
+                      {option}`macDenyFile` if necessary.
+                    '';
+                  };
+
+                  macDenyFile = mkOption {
+                    type = types.nullOr types.path;
+                    default = null;
+                    description = mdDoc ''
+                      Specifies a file containing the MAC addresses to deny if {option}`macAcl` is set to {var}`"deny"` or {var}`"radius"`.
+                      The file should contain exactly one MAC address per line. Comments and empty lines are ignored,
+                      only lines starting with a valid MAC address will be considered (e.g. `11:22:33:44:55:66`) and
+                      any content after the MAC address is ignored.
+                    '';
+                  };
+
+                  ignoreBroadcastSsid = mkOption {
+                    default = "disabled";
+                    type = types.enum ["disabled" "empty" "clear"];
+                    apply = x:
+                      getAttr x {
+                        "disabled" = 0;
+                        "empty" = 1;
+                        "clear" = 2;
+                      };
+                    description = mdDoc ''
+                      Send empty SSID in beacons and ignore probe request frames that do not
+                      specify full SSID, i.e., require stations to know SSID. Note that this does
+                      not increase security, since your clients will then broadcast the SSID instead,
+                      which can increase congestion.
+
+                      - {var}`"disabled"`: Advertise ssid normally.
+                      - {var}`"empty"`: send empty (length=0) SSID in beacon and ignore probe request for broadcast SSID
+                      - {var}`"clear"`: clear SSID (ASCII 0), but keep the original length (this may be required with some
+                        legacy clients that do not support empty SSID) and ignore probe requests for broadcast SSID. Only
+                        use this if empty does not work with your clients.
+                    '';
+                  };
+
+                  apIsolate = mkOption {
+                    default = false;
+                    type = types.bool;
+                    description = mdDoc ''
+                      Isolate traffic between stations (clients) and prevent them from
+                      communicating with each other.
+                    '';
+                  };
+
+                  settings = mkOption {
+                    default = {};
+                    example = { multi_ap = true; };
+                    type = types.submodule {
+                      freeformType = extraSettingsFormat.type;
+                    };
+                    description = mdDoc ''
+                      Extra configuration options to put at the end of this BSS's defintion in the
+                      hostapd.conf for the associated interface. To find out which options are global
+                      and which are per-bss you have to read hostapd's source code, which is non-trivial
+                      and not documented otherwise.
+
+                      Lists will be converted to multiple definitions of the same key, and booleans to 0/1.
+                      Otherwise, the inputs are not modified or checked for correctness.
+                    '';
+                  };
+
+                  dynamicConfigScripts = mkOption {
+                    default = {};
+                    type = types.attrsOf types.path;
+                    example = literalExpression ''
+                      {
+                        exampleDynamicConfig = pkgs.writeShellScript "dynamic-config" '''
+                          HOSTAPD_CONFIG=$1
+                          # These always exist, but may or may not be used depending on the actual configuration
+                          MAC_ALLOW_FILE=$2
+                          MAC_DENY_FILE=$3
+
+                          cat >> "$HOSTAPD_CONFIG" << EOF
+                          # Add some dynamically generated statements here
+                          EOF
+                        ''';
+                      }
+                    '';
+                    description = mdDoc ''
+                      All of these scripts will be executed in lexicographical order before hostapd
+                      is started, right after the bss segment was generated and may dynamically
+                      append bss options to the generated configuration file.
+
+                      The first argument will point to the configuration file that you may append to.
+                      The second and third argument will point to this BSS's MAC allow and MAC deny file respectively.
+                    '';
+                  };
+
+                  #### IEEE 802.11i (WPA) configuration
+
+                  authentication = {
+                    mode = mkOption {
+                      default = "wpa3-sae";
+                      type = types.enum ["none" "wpa2-sha256" "wpa3-sae-transition" "wpa3-sae"];
+                      description = mdDoc ''
+                        Selects the authentication mode for this AP.
+
+                        - {var}`"none"`: Don't configure any authentication. This will disable wpa alltogether
+                          and create an open AP. Use {option}`settings` together with this option if you
+                          want to configure the authentication manually. Any password options will still be
+                          effective, if set.
+                        - {var}`"wpa2-sha256"`: WPA2-Personal using SHA256 (IEEE 802.11i/RSN). Passwords are set
+                          using {option}`wpaPassword` or preferably by {option}`wpaPasswordFile` or {option}`wpaPskFile`.
+                        - {var}`"wpa3-sae-transition"`: Use WPA3-Personal (SAE) if possible, otherwise fallback
+                          to WPA2-SHA256. Only use if necessary and switch to the newer WPA3-SAE when possible.
+                          You will have to specify both {option}`wpaPassword` and {option}`saePasswords` (or one of their alternatives).
+                        - {var}`"wpa3-sae"`: Use WPA3-Personal (SAE). This is currently the recommended way to
+                          setup a secured WiFi AP (as of March 2023) and therefore the default. Passwords are set
+                          using either {option}`saePasswords` or preferably {option}`saePasswordsFile`.
+                      '';
+                    };
+
+                    pairwiseCiphers = mkOption {
+                      default = ["CCMP"];
+                      example = ["CCMP-256" "GCMP-256"];
+                      type = types.listOf types.str;
+                      description = mdDoc ''
+                        Set of accepted cipher suites (encryption algorithms) for pairwise keys (unicast packets).
+                        By default this allows just CCMP, which is the only commonly supported secure option.
+                        Use {option}`enableRecommendedPairwiseCiphers` to also enable newer recommended ciphers.
+
+                        Please refer to the hostapd documentation for allowed values. Generally, only
+                        CCMP or GCMP modes should be considered safe options. Most devices support CCMP while
+                        GCMP is often only available with devices supporting WiFi 5 (IEEE 802.11ac) or higher.
+                      '';
+                    };
+
+                    enableRecommendedPairwiseCiphers = mkOption {
+                      default = false;
+                      example = true;
+                      type = types.bool;
+                      description = mdDoc ''
+                        Additionally enable the recommended set of pairwise ciphers.
+                        This enables newer secure ciphers, additionally to those defined in {option}`pairwiseCiphers`.
+                        You will have to test whether your hardware supports these by trial-and-error, because
+                        even if `iw list` indicates hardware support, your driver might not expose it.
+
+                        Beware {command}`hostapd` will most likely not return a useful error message in case
+                        this is enabled despite the driver or hardware not supporting the newer ciphers.
+                        Look out for messages like `Failed to set beacon parameters`.
+                      '';
+                    };
+
+                    wpaPassword = mkOption {
+                      default = null;
+                      example = "a flakey password";
+                      type = types.nullOr types.str;
+                      description = mdDoc ''
+                        Sets the password for WPA-PSK that will be converted to the pre-shared key.
+                        The password length must be in the range [8, 63] characters. While some devices
+                        may allow arbitrary characters (such as UTF-8) to be used, but the standard specifies
+                        that each character in the passphrase must be an ASCII character in the range [0x20, 0x7e]
+                        (IEEE Std. 802.11i-2004, Annex H.4.1). Use emojis at your own risk.
+
+                        Not used when {option}`mode` is {var}`"wpa3-sae"`.
+
+                        Warning: This password will get put into a world-readable file in the Nix store!
+                        Using {option}`wpaPasswordFile` or {option}`wpaPskFile` instead is recommended.
+                      '';
+                    };
+
+                    wpaPasswordFile = mkOption {
+                      default = null;
+                      type = types.nullOr types.path;
+                      description = mdDoc ''
+                        Sets the password for WPA-PSK. Follows the same rules as {option}`wpaPassword`,
+                        but reads the password from the given file to prevent the password from being
+                        put into the Nix store.
+
+                        Not used when {option}`mode` is {var}`"wpa3-sae"`.
+                      '';
+                    };
+
+                    wpaPskFile = mkOption {
+                      default = null;
+                      type = types.nullOr types.path;
+                      description = mdDoc ''
+                        Sets the password(s) for WPA-PSK. Similar to {option}`wpaPasswordFile`,
+                        but additionally allows specifying multiple passwords, and some other options.
+
+                        Each line, except for empty lines and lines starting with #, must contain a
+                        MAC address and either a 64-hex-digit PSK or a password separated with a space.
+                        The password must follow the same rules as outlined in {option}`wpaPassword`.
+                        The special MAC address `00:00:00:00:00:00` can be used to configure PSKs
+                        that any client can use.
+
+                        An optional key identifier can be added by prefixing the line with `keyid=<keyid_string>`
+                        An optional VLAN ID can be specified by prefixing the line with `vlanid=<VLAN ID>`.
+                        An optional WPS tag can be added by prefixing the line with `wps=<0/1>` (default: 0).
+                        Any matching entry with that tag will be used when generating a PSK for a WPS Enrollee
+                        instead of generating a new random per-Enrollee PSK.
+
+                        Not used when {option}`mode` is {var}`"wpa3-sae"`.
+                      '';
+                    };
+
+                    saePasswords = mkOption {
+                      default = [];
+                      example = literalExpression ''
+                        [
+                          # Any client may use these passwords
+                          { password = "Wi-Figure it out"; }
+                          { password = "second password for everyone"; mac = "ff:ff:ff:ff:ff:ff"; }
+
+                          # Only the client with MAC-address 11:22:33:44:55:66 can use this password
+                          { password = "sekret pazzword"; mac = "11:22:33:44:55:66"; }
+                        ]
+                      '';
+                      description = mdDoc ''
+                        Sets allowed passwords for WPA3-SAE.
+
+                        The last matching (based on peer MAC address and identifier) entry is used to
+                        select which password to use. An empty string has the special meaning of
+                        removing all previously added entries.
+
+                        Warning: These entries will get put into a world-readable file in
+                        the Nix store! Using {option}`saePasswordFile` instead is recommended.
+
+                        Not used when {option}`mode` is {var}`"wpa2-sha256"`.
+                      '';
+                      type = types.listOf (types.submodule {
+                        options = {
+                          password = mkOption {
+                            example = "a flakey password";
+                            type = types.str;
+                            description = mdDoc ''
+                              The password for this entry. SAE technically imposes no restrictions on
+                              password length or character set. But due to limitations of {command}`hostapd`'s
+                              config file format, a true newline character cannot be parsed.
+
+                              Warning: This password will get put into a world-readable file in
+                              the Nix store! Using {option}`wpaPasswordFile` or {option}`wpaPskFile` is recommended.
+                            '';
+                          };
+
+                          mac = mkOption {
+                            default = null;
+                            example = "11:22:33:44:55:66";
+                            type = types.nullOr types.str;
+                            description = mdDoc ''
+                              If this attribute is not included, or if is set to the wildcard address (`ff:ff:ff:ff:ff:ff`),
+                              the entry is available for any station (client) to use. If a specific peer MAC address is included,
+                              only a station with that MAC address is allowed to use the entry.
+                            '';
+                          };
+
+                          vlanid = mkOption {
+                            default = null;
+                            example = 1;
+                            type = types.nullOr types.int;
+                            description = mdDoc "If this attribute is given, all clients using this entry will get tagged with the given VLAN ID.";
+                          };
+
+                          pk = mkOption {
+                            default = null;
+                            example = "";
+                            type = types.nullOr types.str;
+                            description = mdDoc ''
+                              If this attribute is given, SAE-PK will be enabled for this connection.
+                              This prevents evil-twin attacks, but a public key is required additionally to connect.
+                              (Essentially adds pubkey authentication such that the client can verify identity of the AP)
+                            '';
+                          };
+
+                          id = mkOption {
+                            default = null;
+                            example = "";
+                            type = types.nullOr types.str;
+                            description = mdDoc ''
+                              If this attribute is given with non-zero length, it will set the password identifier
+                              for this entry. It can then only be used with that identifier.
+                            '';
+                          };
+                        };
+                      });
+                    };
+
+                    saePasswordsFile = mkOption {
+                      default = null;
+                      type = types.nullOr types.path;
+                      description = mdDoc ''
+                        Sets the password for WPA3-SAE. Follows the same rules as {option}`saePasswords`,
+                        but reads the entries from the given file to prevent them from being
+                        put into the Nix store.
+
+                        One entry per line, empty lines and lines beginning with # will be ignored.
+                        Each line must match the following format, although the order of optional
+                        parameters doesn't matter:
+                        `<password>[|mac=<peer mac>][|vlanid=<VLAN ID>][|pk=<m:ECPrivateKey-base64>][|id=<identifier>]`
+
+                        Not used when {option}`mode` is {var}`"wpa2-sha256"`.
+                      '';
+                    };
+
+                    saeAddToMacAllow = mkOption {
+                      type = types.bool;
+                      default = false;
+                      description = mdDoc ''
+                        If set, all sae password entries that have a non-wildcard MAC associated to
+                        them will additionally be used to populate the MAC allow list. This is
+                        additional to any entries set via {option}`macAllow` or {option}`macAllowFile`.
+                      '';
+                    };
+                  };
+                };
+
+                config = let
+                  bssCfg = bssSubmod.config;
+                  pairwiseCiphers =
+                    concatStringsSep " " (unique (bssCfg.authentication.pairwiseCiphers
+                      ++ optionals bssCfg.authentication.enableRecommendedPairwiseCiphers ["CCMP" "CCMP-256" "GCMP" "GCMP-256"]));
+                in {
+                  settings = {
+                    ssid = bssCfg.ssid;
+                    utf8_ssid = bssCfg.ssid;
+
+                    logger_syslog = mkDefault (-1);
+                    logger_syslog_level = bssCfg.logLevel;
+                    logger_stdout = mkDefault (-1);
+                    logger_stdout_level = bssCfg.logLevel;
+                    ctrl_interface = mkDefault "/run/hostapd";
+                    ctrl_interface_group = bssCfg.group;
+
+                    macaddr_acl = bssCfg.macAcl;
+
+                    ignore_broadcast_ssid = bssCfg.ignoreBroadcastSsid;
+
+                    # IEEE 802.11i (authentication) related configuration
+                    # Encrypt management frames to protect against deauthentication and similar attacks
+                    ieee80211w = mkDefault 1;
+                    sae_require_mfp = mkDefault 1;
+
+                    # Only allow WPA by default and disable insecure WEP
+                    auth_algs = mkDefault 1;
+                    # Always enable QoS, which is required for 802.11n and above
+                    wmm_enabled = mkDefault true;
+                    ap_isolate = bssCfg.apIsolate;
+
+                    sae_password = flip map bssCfg.authentication.saePasswords (
+                      entry:
+                        entry.password
+                        + optionalString (entry.mac != null) "|mac=${entry.mac}"
+                        + optionalString (entry.vlanid != null) "|vlanid=${toString entry.vlanid}"
+                        + optionalString (entry.pk != null) "|pk=${entry.pk}"
+                        + optionalString (entry.id != null) "|id=${entry.id}"
+                    );
+                  } // optionalAttrs (bssCfg.bssid != null) {
+                    bssid = bssCfg.bssid;
+                  } // optionalAttrs (bssCfg.macAllow != [] || bssCfg.macAllowFile != null || bssCfg.authentication.saeAddToMacAllow) {
+                    accept_mac_file = "/run/hostapd/${bssCfg._module.args.name}.mac.allow";
+                  } // optionalAttrs (bssCfg.macDeny != [] || bssCfg.macDenyFile != null) {
+                    deny_mac_file = "/run/hostapd/${bssCfg._module.args.name}.mac.deny";
+                  } // optionalAttrs (bssCfg.authentication.mode == "none") {
+                    wpa = mkDefault 0;
+                  } // optionalAttrs (bssCfg.authentication.mode == "wpa3-sae") {
+                    wpa = 2;
+                    wpa_key_mgmt = "SAE";
+                    # Derive PWE using both hunting-and-pecking loop and hash-to-element
+                    sae_pwe = 2;
+                    # Prevent downgrade attacks by indicating to clients that they should
+                    # disable any transition modes from now on.
+                    transition_disable = "0x01";
+                  } // optionalAttrs (bssCfg.authentication.mode == "wpa3-sae-transition") {
+                    wpa = 2;
+                    wpa_key_mgmt = "WPA-PSK-SHA256 SAE";
+                  } // optionalAttrs (bssCfg.authentication.mode == "wpa2-sha256") {
+                    wpa = 2;
+                    wpa_key_mgmt = "WPA-PSK-SHA256";
+                  } // optionalAttrs (bssCfg.authentication.mode != "none") {
+                    wpa_pairwise = pairwiseCiphers;
+                    rsn_pairwise = pairwiseCiphers;
+                  } // optionalAttrs (bssCfg.authentication.wpaPassword != null) {
+                    wpa_passphrase = bssCfg.authentication.wpaPassword;
+                  } // optionalAttrs (bssCfg.authentication.wpaPskFile != null) {
+                    wpa_psk_file = toString bssCfg.authentication.wpaPskFile;
+                  };
+
+                  dynamicConfigScripts = let
+                    # All MAC addresses from SAE entries that aren't the wildcard address
+                    saeMacs = filter (mac: mac != null && (toLower mac) != "ff:ff:ff:ff:ff:ff") (map (x: x.mac) bssCfg.authentication.saePasswords);
+                  in {
+                    "20-addMacAllow" = mkIf (bssCfg.macAllow != []) (pkgs.writeShellScript "add-mac-allow" ''
+                      MAC_ALLOW_FILE=$2
+                      cat >> "$MAC_ALLOW_FILE" <<EOF
+                      ${concatStringsSep "\n" bssCfg.macAllow}
+                      EOF
+                    '');
+                    "20-addMacAllowFile" = mkIf (bssCfg.macAllowFile != null) (pkgs.writeShellScript "add-mac-allow-file" ''
+                      MAC_ALLOW_FILE=$2
+                      grep -Eo '^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})' ${escapeShellArg bssCfg.macAllowFile} >> "$MAC_ALLOW_FILE"
+                    '');
+                    "20-addMacAllowFromSae" = mkIf (bssCfg.authentication.saeAddToMacAllow && saeMacs != []) (pkgs.writeShellScript "add-mac-allow-from-sae" ''
+                      MAC_ALLOW_FILE=$2
+                      cat >> "$MAC_ALLOW_FILE" <<EOF
+                      ${concatStringsSep "\n" saeMacs}
+                      EOF
+                    '');
+                    # Populate mac allow list from saePasswordsFile
+                    # (filter for lines with mac=;  exclude commented lines; filter for real mac-addresses; strip mac=)
+                    "20-addMacAllowFromSaeFile" = mkIf (bssCfg.authentication.saeAddToMacAllow && bssCfg.authentication.saePasswordsFile != null) (pkgs.writeShellScript "add-mac-allow-from-sae-file" ''
+                      MAC_ALLOW_FILE=$2
+                      grep mac= ${escapeShellArg bssCfg.authentication.saePasswordsFile} \
+                        | grep -v '\s*#' \
+                        | grep -Eo 'mac=([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})' \
+                        | sed 's|^mac=||' >> "$MAC_ALLOW_FILE"
+                    '');
+                    "20-addMacDeny" = mkIf (bssCfg.macDeny != []) (pkgs.writeShellScript "add-mac-deny" ''
+                      MAC_DENY_FILE=$3
+                      cat >> "$MAC_DENY_FILE" <<EOF
+                      ${concatStringsSep "\n" bssCfg.macDeny}
+                      EOF
+                    '');
+                    "20-addMacDenyFile" = mkIf (bssCfg.macDenyFile != null) (pkgs.writeShellScript "add-mac-deny-file" ''
+                      MAC_DENY_FILE=$3
+                      grep -Eo '^([0-9A-Fa-f]{2}[:]){5}([0-9A-Fa-f]{2})' ${escapeShellArg bssCfg.macDenyFile} >> "$MAC_DENY_FILE"
+                    '');
+                    # Add wpa_passphrase from file
+                    "20-wpaPasswordFile" = mkIf (bssCfg.authentication.wpaPasswordFile != null) (pkgs.writeShellScript "wpa-password-file" ''
+                      HOSTAPD_CONFIG_FILE=$1
+                      cat >> "$HOSTAPD_CONFIG_FILE" <<EOF
+                      wpa_passphrase=$(cat ${escapeShellArg bssCfg.authentication.wpaPasswordFile})
+                      EOF
+                    '');
+                    # Add sae passwords from file
+                    "20-saePasswordsFile" = mkIf (bssCfg.authentication.saePasswordsFile != null) (pkgs.writeShellScript "sae-passwords-file" ''
+                      HOSTAPD_CONFIG_FILE=$1
+                      grep -v '\s*#' ${escapeShellArg bssCfg.authentication.saePasswordsFile} \
+                        | sed 's/^/sae_password=/' >> "$HOSTAPD_CONFIG_FILE"
+                    '');
+                  };
+                };
+              }));
+            };
+          };
+
+          config.settings = let
+            radioCfg = radioSubmod.config;
+          in {
+            driver = radioCfg.driver;
+            hw_mode = {
+              "2g" = "g";
+              "5g" = "a";
+              "6g" = "a";
+              "60g" = "ad";
+            }.${radioCfg.band};
+            channel = radioCfg.channel;
+            noscan = radioCfg.noScan;
+          } // optionalAttrs (radioCfg.countryCode != null) {
+            country_code = radioCfg.countryCode;
+            # IEEE 802.11d: Limit to frequencies allowed in country
+            ieee80211d = true;
+            # IEEE 802.11h: Enable radar detection and DFS (Dynamic Frequency Selection)
+            ieee80211h = true;
+          } // optionalAttrs radioCfg.wifi4.enable {
+            # IEEE 802.11n (WiFi 4) related configuration
+            ieee80211n = true;
+            require_ht = radioCfg.wifi4.require;
+            ht_capab = concatMapStrings (x: "[${x}]") radioCfg.wifi4.capabilities;
+          } // optionalAttrs radioCfg.wifi5.enable {
+            # IEEE 802.11ac (WiFi 5) related configuration
+            ieee80211ac = true;
+            require_vht = radioCfg.wifi5.require;
+            vht_oper_chwidth = radioCfg.wifi5.operatingChannelWidth;
+            vht_capab = concatMapStrings (x: "[${x}]") radioCfg.wifi5.capabilities;
+          } // optionalAttrs radioCfg.wifi6.enable {
+            # IEEE 802.11ax (WiFi 6) related configuration
+            ieee80211ax = true;
+            require_he = mkIf radioCfg.wifi6.require true;
+            he_oper_chwidth = radioCfg.wifi6.operatingChannelWidth;
+            he_su_beamformer = radioCfg.wifi6.singleUserBeamformer;
+            he_su_beamformee = radioCfg.wifi6.singleUserBeamformee;
+            he_mu_beamformer = radioCfg.wifi6.multiUserBeamformer;
+          } // optionalAttrs radioCfg.wifi7.enable {
+            # IEEE 802.11be (WiFi 7) related configuration
+            ieee80211be = true;
+            eht_oper_chwidth = radioCfg.wifi7.operatingChannelWidth;
+            eht_su_beamformer = radioCfg.wifi7.singleUserBeamformer;
+            eht_su_beamformee = radioCfg.wifi7.singleUserBeamformee;
+            eht_mu_beamformer = radioCfg.wifi7.multiUserBeamformer;
+          };
+        }));
+      };
+    };
+  };
+
+  imports = let
+    renamedOptionMessage = message: ''
+      ${message}
+      Refer to the documentation of `services.hostapd.radios` for an example and more information.
+    '';
+  in [
+    (mkRemovedOptionModule ["services" "hostapd" "interface"]
+      (renamedOptionMessage "All other options for this interface are now set via `services.hostapd.radios.«interface».*`."))
+
+    (mkRemovedOptionModule ["services" "hostapd" "driver"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».driver`."))
+    (mkRemovedOptionModule ["services" "hostapd" "noScan"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».noScan`."))
+    (mkRemovedOptionModule ["services" "hostapd" "countryCode"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».countryCode`."))
+    (mkRemovedOptionModule ["services" "hostapd" "hwMode"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».band`."))
+    (mkRemovedOptionModule ["services" "hostapd" "channel"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».channel`."))
+    (mkRemovedOptionModule ["services" "hostapd" "extraConfig"]
+      (renamedOptionMessage ''
+        It has been replaced by `services.hostapd.radios.«interface».settings` and
+        `services.hostapd.radios.«interface».networks.«network».settings` respectively
+        for per-radio and per-network extra configuration. The module now supports a lot more
+        options inherently, so please re-check whether using settings is still necessary.''))
+
+    (mkRemovedOptionModule ["services" "hostapd" "logLevel"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».networks.«network».logLevel`."))
+    (mkRemovedOptionModule ["services" "hostapd" "group"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».networks.«network».group`."))
+    (mkRemovedOptionModule ["services" "hostapd" "ssid"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».networks.«network».ssid`."))
+
+    (mkRemovedOptionModule ["services" "hostapd" "wpa"]
+      (renamedOptionMessage "It has been replaced by `services.hostapd.radios.«interface».networks.«network».authentication.mode`."))
+    (mkRemovedOptionModule ["services" "hostapd" "wpaPassphrase"]
+      (renamedOptionMessage ''
+        It has been replaced by `services.hostapd.radios.«interface».networks.«network».authentication.wpaPassword`.
+        While upgrading your config, please consider using the newer SAE authentication scheme
+        and one of the new `passwordFile`-like options to avoid putting the password into the world readable nix-store.''))
+  ];
+
+  config = mkIf cfg.enable {
+    assertions =
+      [
+        {
+          assertion = cfg.radios != {};
+          message = "At least one radio must be configured with hostapd!";
+        }
+      ]
+      # Radio warnings
+      ++ (concatLists (mapAttrsToList (
+          radio: radioCfg:
+            [
+              {
+                assertion = radioCfg.networks != {};
+                message = "hostapd radio ${radio}: At least one network must be configured!";
+              }
+              # XXX: There could be many more useful assertions about (band == xy) -> ensure other required settings.
+              # see https://github.com/openwrt/openwrt/blob/539cb5389d9514c99ec1f87bd4465f77c7ed9b93/package/kernel/mac80211/files/lib/netifd/wireless/mac80211.sh#L158
+              {
+                assertion = length (filter (bss: bss == radio) (attrNames radioCfg.networks)) == 1;
+                message = ''hostapd radio ${radio}: Exactly one network must be named like the radio, for reasons internal to hostapd.'';
+              }
+              {
+                assertion = (radioCfg.wifi4.enable && builtins.elem "HT40-" radioCfg.wifi4.capabilities) -> radioCfg.channel != 0;
+                message = ''hostapd radio ${radio}: using ACS (channel = 0) together with HT40- (wifi4.capabilities) is unsupported by hostapd'';
+              }
+            ]
+            # BSS warnings
+            ++ (concatLists (mapAttrsToList (bss: bssCfg: let
+                auth = bssCfg.authentication;
+                countWpaPasswordDefinitions = count (x: x != null) [
+                  auth.wpaPassword
+                  auth.wpaPasswordFile
+                  auth.wpaPskFile
+                ];
+              in [
+                {
+                  assertion = hasPrefix radio bss;
+                  message = "hostapd radio ${radio} bss ${bss}: The bss (network) name ${bss} is invalid. It must be prefixed by the radio name for reasons internal to hostapd. A valid name would be e.g. ${radio}, ${radio}-1, ...";
+                }
+                {
+                  assertion = (length (attrNames radioCfg.networks) > 1) -> (bssCfg.bssid != null);
+                  message = ''hostapd radio ${radio} bss ${bss}: bssid must be specified manually (for now) since this radio uses multiple BSS.'';
+                }
+                {
+                  assertion = countWpaPasswordDefinitions <= 1;
+                  message = ''hostapd radio ${radio} bss ${bss}: must use at most one WPA password option (wpaPassword, wpaPasswordFile, wpaPskFile)'';
+                }
+                {
+                  assertion = auth.wpaPassword != null -> (stringLength auth.wpaPassword >= 8 && stringLength auth.wpaPassword <= 63);
+                  message = ''hostapd radio ${radio} bss ${bss}: uses a wpaPassword of invalid length (must be in [8,63]).'';
+                }
+                {
+                  assertion = auth.saePasswords == [] || auth.saePasswordsFile == null;
+                  message = ''hostapd radio ${radio} bss ${bss}: must use only one SAE password option (saePasswords or saePasswordsFile)'';
+                }
+                {
+                  assertion = auth.mode == "wpa3-sae" -> (auth.saePasswords != [] || auth.saePasswordsFile != null);
+                  message = ''hostapd radio ${radio} bss ${bss}: uses WPA3-SAE which requires defining a sae password option'';
+                }
+                {
+                  assertion = auth.mode == "wpa3-sae-transition" -> (auth.saePasswords != [] || auth.saePasswordsFile != null) && countWpaPasswordDefinitions == 1;
+                  message = ''hostapd radio ${radio} bss ${bss}: uses WPA3-SAE in transition mode requires defining both a wpa password option and a sae password option'';
+                }
+                {
+                  assertion = auth.mode == "wpa2-sha256" -> countWpaPasswordDefinitions == 1;
+                  message = ''hostapd radio ${radio} bss ${bss}: uses WPA2-SHA256 which requires defining a wpa password option'';
+                }
+              ])
+              radioCfg.networks))
+        )
+        cfg.radios));
+
+    environment.systemPackages = [cfg.package];
+
+    services.udev.packages = with pkgs; [crda];
+
+    systemd.services.hostapd = {
+      description = "IEEE 802.11 Host Access-Point Daemon";
+
+      path = [cfg.package];
+      after = map (radio: "sys-subsystem-net-devices-${utils.escapeSystemdPath radio}.device") (attrNames cfg.radios);
+      bindsTo = map (radio: "sys-subsystem-net-devices-${utils.escapeSystemdPath radio}.device") (attrNames cfg.radios);
+      wantedBy = ["multi-user.target"];
+
+      # Create merged configuration and acl files for each radio (and their bss's) prior to starting
+      preStart = concatStringsSep "\n" (mapAttrsToList makeRadioRuntimeFiles cfg.radios);
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/hostapd ${concatStringsSep " " runtimeConfigFiles}";
+        Restart = "always";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        RuntimeDirectory = "hostapd";
+
+        # Hardening
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        DevicePolicy = "closed";
+        DeviceAllow = "/dev/rfkill rw";
+        NoNewPrivileges = true;
+        PrivateUsers = false; # hostapd requires true root access.
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+          "AF_UNIX"
+          "AF_PACKET"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "@chown"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/htpdate.nix b/nixpkgs/nixos/modules/services/networking/htpdate.nix
new file mode 100644
index 000000000000..8b9bb2888dac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/htpdate.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inherit (pkgs) htpdate;
+
+  cfg = config.services.htpdate;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.htpdate = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable htpdate daemon.
+        '';
+      };
+
+      extraOptions = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Additional command line arguments to pass to htpdate.
+        '';
+      };
+
+      servers = mkOption {
+        type = types.listOf types.str;
+        default = [ "www.google.com" ];
+        description = lib.mdDoc ''
+          HTTP servers to use for time synchronization.
+        '';
+      };
+
+      proxy = mkOption {
+        type = types.str;
+        default = "";
+        example = "127.0.0.1:8118";
+        description = lib.mdDoc ''
+          HTTP proxy used for requests.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.htpdate = {
+      description = "htpdate daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/htpdate.pid";
+        ExecStart = concatStringsSep " " [
+          "${htpdate}/bin/htpdate"
+          "-D -u nobody"
+          "-a -s"
+          "-l"
+          "${optionalString (cfg.proxy != "") "-P ${cfg.proxy}"}"
+          "${cfg.extraOptions}"
+          "${concatStringsSep " " cfg.servers}"
+        ];
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/https-dns-proxy.nix b/nixpkgs/nixos/modules/services/networking/https-dns-proxy.nix
new file mode 100644
index 000000000000..87eb23ea4585
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/https-dns-proxy.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    concatStringsSep
+    mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.https-dns-proxy;
+
+  providers = {
+    cloudflare = {
+      ips = [ "1.1.1.1" "1.0.0.1" ];
+      url = "https://cloudflare-dns.com/dns-query";
+    };
+    google = {
+      ips = [ "8.8.8.8" "8.8.4.4" ];
+      url = "https://dns.google/dns-query";
+    };
+    quad9 = {
+      ips = [ "9.9.9.9" "149.112.112.112" ];
+      url = "https://dns.quad9.net/dns-query";
+    };
+    opendns = {
+      ips = [ "208.67.222.222" "208.67.220.220" ];
+      url = "https://doh.opendns.com/dns-query";
+    };
+    custom = {
+      inherit (cfg.provider) ips url;
+    };
+  };
+
+  defaultProvider = "quad9";
+
+  providerCfg =
+    concatStringsSep " " [
+      "-b"
+      (concatStringsSep "," providers."${cfg.provider.kind}".ips)
+      "-r"
+      providers."${cfg.provider.kind}".url
+    ];
+
+in
+{
+  meta.maintainers = with lib.maintainers; [ peterhoeg ];
+
+  ###### interface
+
+  options.services.https-dns-proxy = {
+    enable = mkEnableOption (lib.mdDoc "https-dns-proxy daemon");
+
+    address = mkOption {
+      description = lib.mdDoc "The address on which to listen";
+      type = types.str;
+      default = "127.0.0.1";
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "The port on which to listen";
+      type = types.port;
+      default = 5053;
+    };
+
+    provider = {
+      kind = mkOption {
+        description = lib.mdDoc ''
+          The upstream provider to use or custom in case you do not trust any of
+          the predefined providers or just want to use your own.
+
+          The default is ${defaultProvider} and there are privacy and security
+          trade-offs when using any upstream provider. Please consider that
+          before using any of them.
+
+          Supported providers: ${concatStringsSep ", " (builtins.attrNames providers)}
+
+          If you pick the custom provider, you will need to provide the
+          bootstrap IP addresses as well as the resolver https URL.
+        '';
+        type = types.enum (builtins.attrNames providers);
+        default = defaultProvider;
+      };
+
+      ips = mkOption {
+        description = lib.mdDoc "The custom provider IPs";
+        type = types.listOf types.str;
+      };
+
+      url = mkOption {
+        description = lib.mdDoc "The custom provider URL";
+        type = types.str;
+      };
+    };
+
+    preferIPv4 = mkOption {
+      description = lib.mdDoc ''
+        https_dns_proxy will by default use IPv6 and fail if it is not available.
+        To play it safe, we choose IPv4.
+      '';
+      type = types.bool;
+      default = true;
+    };
+
+    extraArgs = mkOption {
+      description = lib.mdDoc "Additional arguments to pass to the process.";
+      type = types.listOf types.str;
+      default = [ "-v" ];
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.https-dns-proxy = {
+      description = "DNS to DNS over HTTPS (DoH) proxy";
+      requires = [ "network.target" ];
+      after = [ "network.target" ];
+      wants = [ "nss-lookup.target" ];
+      before = [ "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        Type = "exec";
+        DynamicUser = true;
+        ProtectHome = "tmpfs";
+        ExecStart = lib.concatStringsSep " " (
+          [
+            (lib.getExe pkgs.https-dns-proxy)
+            "-a ${toString cfg.address}"
+            "-p ${toString cfg.port}"
+            "-l -"
+            providerCfg
+          ]
+          ++ lib.optional cfg.preferIPv4 "-4"
+          ++ cfg.extraArgs
+        );
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/default.nix
new file mode 100644
index 000000000000..d8ffa3fc04d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/default.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+{
+
+  imports = [
+    ./options.nix
+    ./systemd.nix
+  ];
+
+  config = lib.modules.mkIf config.services.hylafax.enable {
+    environment.systemPackages = [ pkgs.hylafaxplus ];
+    users.users.uucp = {
+      uid = config.ids.uids.uucp;
+      group = "uucp";
+      description = "Unix-to-Unix CoPy system";
+      isSystemUser = true;
+      inherit (config.users.users.nobody) home;
+    };
+    assertions = [{
+      assertion = config.services.hylafax.modems != {};
+      message = ''
+        HylaFAX cannot be used without modems.
+        Please define at least one modem with
+        <option>config.services.hylafax.modems</option>.
+      '';
+    }];
+  };
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix
new file mode 100644
index 000000000000..9b634650cf79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-default.nix
@@ -0,0 +1,12 @@
+{ ... }:
+
+# see man:hylafax-config(5)
+
+{
+
+  ModemGroup = [ ''"any:0:.*"'' ];
+  ServerTracing = "0x78701";
+  SessionTracing = "0x78701";
+  UUCPLockDir = "/var/lock";
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/faxq-wait.sh b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-wait.sh
new file mode 100755
index 000000000000..1826aa30e627
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/faxq-wait.sh
@@ -0,0 +1,29 @@
+#! @runtimeShell@ -e
+
+# skip this if there are no modems at all
+if ! stat -t "@spoolAreaPath@"/etc/config.* >/dev/null 2>&1
+then
+  exit 0
+fi
+
+echo "faxq started, waiting for modem(s) to initialize..."
+
+for i in `seq @timeoutSec@0 -1 0`  # gracefully timeout
+do
+  sleep 0.1
+  # done if status files exist, but don't mention initialization
+  if \
+    stat -t "@spoolAreaPath@"/status/* >/dev/null 2>&1 \
+    && \
+    ! grep --silent --ignore-case 'initializing server' \
+    "@spoolAreaPath@"/status/*
+  then
+    echo "modem(s) apparently ready"
+    exit 0
+  fi
+  # if i reached 0, modems probably failed to initialize
+  if test $i -eq 0
+  then
+    echo "warning: modem initialization timed out"
+  fi
+done
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/hfaxd-default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/hfaxd-default.nix
new file mode 100644
index 000000000000..8999dae57f41
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/hfaxd-default.nix
@@ -0,0 +1,10 @@
+{ ... }:
+
+# see man:hfaxd(8)
+
+{
+
+  ServerTracing = "0x91";
+  XferLogFile = "/clientlog";
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/modem-default.nix b/nixpkgs/nixos/modules/services/networking/hylafax/modem-default.nix
new file mode 100644
index 000000000000..707b82092829
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/modem-default.nix
@@ -0,0 +1,22 @@
+{ pkgs, ... }:
+
+# see man:hylafax-config(5)
+
+{
+
+  TagLineFont = "etc/LiberationSans-25.pcf";
+  TagLineLocale = "en_US.UTF-8";
+
+  AdminGroup = "root";  # groups that can change server config
+  AnswerRotary = "fax";  # don't accept anything else but faxes
+  LogFileMode = "0640";
+  PriorityScheduling = true;
+  RecvFileMode = "0640";
+  ServerTracing = "0x78701";
+  SessionTracing = "0x78701";
+  UUCPLockDir = "/var/lock";
+
+  SendPageCmd = "${pkgs.coreutils}/bin/false";  # prevent pager transmit
+  SendUUCPCmd = "${pkgs.coreutils}/bin/false";  # prevent UUCP transmit
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/options.nix b/nixpkgs/nixos/modules/services/networking/hylafax/options.nix
new file mode 100644
index 000000000000..49b2bef90a5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/options.nix
@@ -0,0 +1,372 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inherit (lib.options) literalExpression mkEnableOption mkOption;
+  inherit (lib.types) bool enum ints lines attrsOf nonEmptyStr nullOr path str submodule;
+  inherit (lib.modules) mkDefault mkIf mkMerge;
+
+  commonDescr = ''
+    Values can be either strings or integers
+    (which will be added to the config file verbatimly)
+    or lists thereof
+    (which will be translated to multiple
+    lines with the same configuration key).
+    Boolean values are translated to "Yes" or "No".
+    The default contains some reasonable
+    configuration to yield an operational system.
+  '';
+
+  configAttrType =
+    # Options in HylaFAX configuration files can be
+    # booleans, strings, integers, or list thereof
+    # representing multiple config directives with the same key.
+    # This type definition resolves all
+    # those types into a list of strings.
+    let
+      inherit (lib.types) attrsOf coercedTo int listOf;
+      innerType = coercedTo bool (x: if x then "Yes" else "No")
+        (coercedTo int (toString) str);
+    in
+      attrsOf (coercedTo innerType lib.singleton (listOf innerType));
+
+  cfg = config.services.hylafax;
+
+  modemConfigOptions = { name, config, ... }: {
+    options = {
+      name = mkOption {
+        type = nonEmptyStr;
+        example = "ttyS1";
+        description = lib.mdDoc ''
+          Name of modem device,
+          will be searched for in {file}`/dev`.
+        '';
+      };
+      type = mkOption {
+        type = nonEmptyStr;
+        example = "cirrus";
+        description = lib.mdDoc ''
+          Name of modem configuration file,
+          will be searched for in {file}`config`
+          in the spooling area directory.
+        '';
+      };
+      config = mkOption {
+        type = configAttrType;
+        example = {
+          AreaCode = "49";
+          LocalCode = "30";
+          FAXNumber = "123456";
+          LocalIdentifier = "LostInBerlin";
+        };
+        description = lib.mdDoc ''
+          Attribute set of values for the given modem.
+          ${commonDescr}
+          Options defined here override options in
+          {option}`commonModemConfig` for this modem.
+        '';
+      };
+    };
+    config.name = mkDefault name;
+    config.config.Include = [ "config/${config.type}" ];
+  };
+
+  defaultConfig =
+    let
+      inherit (config.security) wrapperDir;
+      inherit (config.services.mail.sendmailSetuidWrapper) program;
+      mkIfDefault = cond: value: mkIf cond (mkDefault value);
+      noWrapper = config.services.mail.sendmailSetuidWrapper==null;
+      # If a sendmail setuid wrapper exists,
+      # we add the path to the default configuration file.
+      # Otherwise, we use `false` to provoke
+      # an error if hylafax tries to use it.
+      c.sendmailPath = mkMerge [
+        (mkIfDefault noWrapper "${pkgs.coreutils}/bin/false")
+        (mkIfDefault (!noWrapper) "${wrapperDir}/${program}")
+      ];
+      importDefaultConfig = file:
+        lib.attrsets.mapAttrs
+        (lib.trivial.const mkDefault)
+        (import file { inherit pkgs; });
+      c.commonModemConfig = importDefaultConfig ./modem-default.nix;
+      c.faxqConfig = importDefaultConfig ./faxq-default.nix;
+      c.hfaxdConfig = importDefaultConfig ./hfaxd-default.nix;
+    in
+      c;
+
+  localConfig =
+    let
+      c.hfaxdConfig.UserAccessFile = cfg.userAccessFile;
+      c.faxqConfig = lib.attrsets.mapAttrs
+        (lib.trivial.const (v: mkIf (v!=null) v))
+        {
+          AreaCode = cfg.areaCode;
+          CountryCode = cfg.countryCode;
+          LongDistancePrefix = cfg.longDistancePrefix;
+          InternationalPrefix = cfg.internationalPrefix;
+        };
+      c.commonModemConfig = c.faxqConfig;
+    in
+      c;
+
+in
+
+
+{
+
+
+  options.services.hylafax = {
+
+    enable = mkEnableOption (lib.mdDoc "HylaFAX server");
+
+    autostart = mkOption {
+      type = bool;
+      default = true;
+      example = false;
+      description = lib.mdDoc ''
+        Autostart the HylaFAX queue manager at system start.
+        If this is `false`, the queue manager
+        will still be started if there are pending
+        jobs or if a user tries to connect to it.
+      '';
+    };
+
+    countryCode = mkOption {
+      type = nullOr nonEmptyStr;
+      default = null;
+      example = "49";
+      description = lib.mdDoc "Country code for server and all modems.";
+    };
+
+    areaCode = mkOption {
+      type = nullOr nonEmptyStr;
+      default = null;
+      example = "30";
+      description = lib.mdDoc "Area code for server and all modems.";
+    };
+
+    longDistancePrefix = mkOption {
+      type = nullOr str;
+      default = null;
+      example = "0";
+      description = lib.mdDoc "Long distance prefix for server and all modems.";
+    };
+
+    internationalPrefix = mkOption {
+      type = nullOr str;
+      default = null;
+      example = "00";
+      description = lib.mdDoc "International prefix for server and all modems.";
+    };
+
+    spoolAreaPath = mkOption {
+      type = path;
+      default = "/var/spool/fax";
+      description = lib.mdDoc ''
+        The spooling area will be created/maintained
+        at the location given here.
+      '';
+    };
+
+    userAccessFile = mkOption {
+      type = path;
+      default = "/etc/hosts.hfaxd";
+      description = lib.mdDoc ''
+        The {file}`hosts.hfaxd`
+        file entry in the spooling area
+        will be symlinked to the location given here.
+        This file must exist and be
+        readable only by the `uucp` user.
+        See hosts.hfaxd(5) for details.
+        This configuration permits access for all users:
+        ```
+          environment.etc."hosts.hfaxd" = {
+            mode = "0600";
+            user = "uucp";
+            text = ".*";
+          };
+        ```
+        Note that host-based access can be controlled with
+        {option}`config.systemd.sockets.hylafax-hfaxd.listenStreams`;
+        by default, only 127.0.0.1 is permitted to connect.
+      '';
+    };
+
+    sendmailPath = mkOption {
+      type = path;
+      example = literalExpression ''"''${pkgs.postfix}/bin/sendmail"'';
+      # '' ;  # fix vim
+      description = lib.mdDoc ''
+        Path to {file}`sendmail` program.
+        The default uses the local sendmail wrapper
+        (see {option}`config.services.mail.sendmailSetuidWrapper`),
+        otherwise the {file}`false`
+        binary to cause an error if used.
+      '';
+    };
+
+    hfaxdConfig = mkOption {
+      type = configAttrType;
+      example.RecvqProtection = "0400";
+      description = lib.mdDoc ''
+        Attribute set of lines for the global
+        hfaxd config file {file}`etc/hfaxd.conf`.
+        ${commonDescr}
+      '';
+    };
+
+    faxqConfig = mkOption {
+      type = configAttrType;
+      example = {
+        InternationalPrefix = "00";
+        LongDistancePrefix = "0";
+      };
+      description = lib.mdDoc ''
+        Attribute set of lines for the global
+        faxq config file {file}`etc/config`.
+        ${commonDescr}
+      '';
+    };
+
+    commonModemConfig = mkOption {
+      type = configAttrType;
+      example = {
+        InternationalPrefix = "00";
+        LongDistancePrefix = "0";
+      };
+      description = lib.mdDoc ''
+        Attribute set of default values for
+        modem config files {file}`etc/config.*`.
+        ${commonDescr}
+        Think twice before changing
+        paths of fax-processing scripts.
+      '';
+    };
+
+    modems = mkOption {
+      type = attrsOf (submodule [ modemConfigOptions ]);
+      default = {};
+      example.ttyS1 = {
+        type = "cirrus";
+        config = {
+          FAXNumber = "123456";
+          LocalIdentifier = "Smith";
+        };
+      };
+      description = lib.mdDoc ''
+        Description of installed modems.
+        At least on modem must be defined
+        to enable the HylaFAX server.
+      '';
+    };
+
+    spoolExtraInit = mkOption {
+      type = lines;
+      default = "";
+      example = "chmod 0755 .  # everyone may read my faxes";
+      description = lib.mdDoc ''
+        Additional shell code that is executed within the
+        spooling area directory right after its setup.
+      '';
+    };
+
+    faxcron.enable.spoolInit = mkEnableOption (lib.mdDoc ''
+      purging old files from the spooling area with
+      {file}`faxcron`
+      each time the spooling area is initialized
+    '');
+    faxcron.enable.frequency = mkOption {
+      type = nullOr nonEmptyStr;
+      default = null;
+      example = "daily";
+      description = lib.mdDoc ''
+        purging old files from the spooling area with
+        {file}`faxcron` with the given frequency
+        (see systemd.time(7))
+      '';
+    };
+    faxcron.infoDays = mkOption {
+      type = ints.positive;
+      default = 30;
+      description = lib.mdDoc ''
+        Set the expiration time for data in the
+        remote machine information directory in days.
+      '';
+    };
+    faxcron.logDays = mkOption {
+      type = ints.positive;
+      default = 30;
+      description = lib.mdDoc ''
+        Set the expiration time for
+        session trace log files in days.
+      '';
+    };
+    faxcron.rcvDays = mkOption {
+      type = ints.positive;
+      default = 7;
+      description = lib.mdDoc ''
+        Set the expiration time for files in
+        the received facsimile queue in days.
+      '';
+    };
+
+    faxqclean.enable.spoolInit = mkEnableOption (lib.mdDoc ''
+      Purge old files from the spooling area with
+      {file}`faxqclean`
+      each time the spooling area is initialized.
+    '');
+    faxqclean.enable.frequency = mkOption {
+      type = nullOr nonEmptyStr;
+      default = null;
+      example = "daily";
+      description = lib.mdDoc ''
+        Purge old files from the spooling area with
+        {file}`faxcron` with the given frequency
+        (see systemd.time(7)).
+      '';
+    };
+    faxqclean.archiving = mkOption {
+      type = enum [ "never" "as-flagged" "always" ];
+      default = "as-flagged";
+      example = "always";
+      description = lib.mdDoc ''
+        Enable or suppress job archiving:
+        `never` disables job archiving,
+        `as-flagged` archives jobs that
+        have been flagged for archiving by sendfax,
+        `always` forces archiving of all jobs.
+        See also sendfax(1) and faxqclean(8).
+      '';
+    };
+    faxqclean.doneqMinutes = mkOption {
+      type = ints.positive;
+      default = 15;
+      example = literalExpression "24*60";
+      description = lib.mdDoc ''
+        Set the job
+        age threshold (in minutes) that controls how long
+        jobs may reside in the doneq directory.
+      '';
+    };
+    faxqclean.docqMinutes = mkOption {
+      type = ints.positive;
+      default = 60;
+      example = literalExpression "24*60";
+      description = lib.mdDoc ''
+        Set the document
+        age threshold (in minutes) that controls how long
+        unreferenced files may reside in the docq directory.
+      '';
+    };
+
+  };
+
+
+  config.services.hylafax =
+    mkIf
+    (config.services.hylafax.enable)
+    (mkMerge [ defaultConfig localConfig ])
+  ;
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/spool.sh b/nixpkgs/nixos/modules/services/networking/hylafax/spool.sh
new file mode 100755
index 000000000000..8b723df77df9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/spool.sh
@@ -0,0 +1,111 @@
+#! @runtimeShell@ -e
+
+# The following lines create/update the HylaFAX spool directory:
+# Subdirectories/files with persistent data are kept,
+# other directories/files are removed/recreated,
+# mostly from the template spool
+# directory in the HylaFAX package.
+
+# This block explains how the spool area is
+# derived from the spool template in the HylaFAX package:
+#
+#                  + capital letter: directory; file otherwise
+#                  + P/p: persistent directory
+#                  + F/f: directory with symlinks per entry
+#                  + T/t: temporary data
+#                  + S/s: single symlink into package
+#                  |
+#                  | + u: change ownership to uucp:uucp
+#                  | + U: ..also change access mode to user-only
+#                  | |
+# archive          P U
+# bin              S
+# client           T u  (client connection info)
+# config           S
+# COPYRIGHT        s
+# dev              T u  (maybe some FIFOs)
+# docq             P U
+# doneq            P U
+# etc              F    contains customized config files!
+# etc/hosts.hfaxd  f
+# etc/xferfaxlog   f
+# info             P u  (database of called devices)
+# log              P u  (communication logs)
+# pollq            P U
+# recvq            P u
+# sendq            P U
+# status           T u  (modem status info files)
+# tmp              T U
+
+
+shopt -s dotglob  # if bash sees "*", it also includes dot files
+lnsym () { ln --symbol "$@" ; }
+lnsymfrc () { ln --symbolic --force "$@" ; }
+cprd () { cp --remove-destination "$@" ; }
+update () { install --owner=@faxuser@ --group=@faxgroup@ "$@" ; }
+
+
+## create/update spooling area
+
+update --mode=0750 -d "@spoolAreaPath@"
+cd "@spoolAreaPath@"
+
+persist=(archive docq doneq info log pollq recvq sendq)
+
+# remove entries that don't belong here
+touch dummy  # ensure "*" resolves to something
+for k in *
+do
+  keep=0
+  for j in "${persist[@]}" xferfaxlog clientlog faxcron.lastrun
+  do
+    if test "$k" == "$j"
+    then
+      keep=1
+      break
+    fi
+  done
+  if test "$keep" == "0"
+  then
+    rm --recursive "$k"
+  fi
+done
+
+# create persistent data directories (unless they exist already)
+update --mode=0700 -d "${persist[@]}"
+chmod 0755 info log recvq
+
+# create ``xferfaxlog``, ``faxcron.lastrun``, ``clientlog``
+touch clientlog faxcron.lastrun xferfaxlog
+chown @faxuser@:@faxgroup@ clientlog faxcron.lastrun xferfaxlog
+
+# create symlinks for frozen directories/files
+lnsym --target-directory=. "@hylafaxplus@"/spool/{COPYRIGHT,bin,config}
+
+# create empty temporary directories
+update --mode=0700 -d client dev status
+update -d tmp
+
+
+## create and fill etc
+
+install -d "@spoolAreaPath@/etc"
+cd "@spoolAreaPath@/etc"
+
+# create symlinks to all files in template's etc
+lnsym --target-directory=. "@hylafaxplus@/spool/etc"/*
+
+# set LOCKDIR in setup.cache
+sed --regexp-extended 's|^(UUCP_LOCKDIR=).*$|\1'"'@lockPath@'|g" --in-place setup.cache
+
+# etc/{xferfaxlog,lastrun} are stored in the spool root
+lnsymfrc --target-directory=. ../xferfaxlog
+lnsymfrc --no-target-directory ../faxcron.lastrun lastrun
+
+# etc/hosts.hfaxd is provided by the NixOS configuration
+lnsymfrc --no-target-directory "@userAccessFile@" hosts.hfaxd
+
+# etc/config and etc/config.${DEVID} must be copied:
+# hfaxd reads these file after locking itself up in a chroot
+cprd --no-target-directory "@globalConfigPath@" config
+cprd --target-directory=. "@modemConfigPath@"/*
diff --git a/nixpkgs/nixos/modules/services/networking/hylafax/systemd.nix b/nixpkgs/nixos/modules/services/networking/hylafax/systemd.nix
new file mode 100644
index 000000000000..df6d0f49eec4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/hylafax/systemd.nix
@@ -0,0 +1,249 @@
+{ config, lib, pkgs, ... }:
+
+
+let
+
+  inherit (lib) mkIf mkMerge;
+  inherit (lib) concatStringsSep optionalString;
+
+  cfg = config.services.hylafax;
+  mapModems = lib.forEach (lib.attrValues cfg.modems);
+
+  mkConfigFile = name: conf:
+    # creates hylafax config file,
+    # makes sure "Include" is listed *first*
+    let
+      mkLines = lib.flip lib.pipe [
+        (lib.mapAttrsToList (key: map (val: "${key}: ${val}")))
+        lib.concatLists
+      ];
+      include = mkLines { Include = conf.Include or []; };
+      other = mkLines ( conf // { Include = []; } );
+    in
+      pkgs.writeText "hylafax-config${name}"
+      (concatStringsSep "\n" (include ++ other));
+
+  globalConfigPath = mkConfigFile "" cfg.faxqConfig;
+
+  modemConfigPath =
+    let
+      mkModemConfigFile = { config, name, ... }:
+        mkConfigFile ".${name}"
+        (cfg.commonModemConfig // config);
+      mkLine = { name, type, ... }@modem: ''
+        # check if modem config file exists:
+        test -f "${pkgs.hylafaxplus}/spool/config/${type}"
+        ln \
+          --symbolic \
+          --no-target-directory \
+          "${mkModemConfigFile modem}" \
+          "$out/config.${name}"
+      '';
+    in
+      pkgs.runCommand "hylafax-config-modems" { preferLocalBuild = true; }
+      ''mkdir --parents "$out/" ${concatStringsSep "\n" (mapModems mkLine)}'';
+
+  setupSpoolScript = pkgs.substituteAll {
+    name = "hylafax-setup-spool.sh";
+    src = ./spool.sh;
+    isExecutable = true;
+    faxuser = "uucp";
+    faxgroup = "uucp";
+    lockPath = "/var/lock";
+    inherit globalConfigPath modemConfigPath;
+    inherit (cfg) sendmailPath spoolAreaPath userAccessFile;
+    inherit (pkgs) hylafaxplus runtimeShell;
+  };
+
+  waitFaxqScript = pkgs.substituteAll {
+    # This script checks the modems status files
+    # and waits until all modems report readiness.
+    name = "hylafax-faxq-wait-start.sh";
+    src = ./faxq-wait.sh;
+    isExecutable = true;
+    timeoutSec = toString 10;
+    inherit (cfg) spoolAreaPath;
+    inherit (pkgs) runtimeShell;
+  };
+
+  sockets.hylafax-hfaxd = {
+    description = "HylaFAX server socket";
+    documentation = [ "man:hfaxd(8)" ];
+    wantedBy = [ "multi-user.target" ];
+    listenStreams = [ "127.0.0.1:4559" ];
+    socketConfig.FreeBind = true;
+    socketConfig.Accept = true;
+  };
+
+  paths.hylafax-faxq = {
+    description = "HylaFAX queue manager sendq watch";
+    documentation = [ "man:faxq(8)" "man:sendq(5)" ];
+    wantedBy = [ "multi-user.target" ];
+    pathConfig.PathExistsGlob = [ "${cfg.spoolAreaPath}/sendq/q*" ];
+  };
+
+  timers = mkMerge [
+    (
+      mkIf (cfg.faxcron.enable.frequency!=null)
+      { hylafax-faxcron.timerConfig.Persistent = true; }
+    )
+    (
+      mkIf (cfg.faxqclean.enable.frequency!=null)
+      { hylafax-faxqclean.timerConfig.Persistent = true; }
+    )
+  ];
+
+  hardenService =
+    # Add some common systemd service hardening settings,
+    # but allow each service (here) to override
+    # settings by explicitly setting those to `null`.
+    # More hardening would be nice but makes
+    # customizing hylafax setups very difficult.
+    # If at all, it should only be added along
+    # with some options to customize it.
+    let
+      hardening = {
+        PrivateDevices = true;  # breaks /dev/tty...
+        PrivateNetwork = true;
+        PrivateTmp = true;
+        #ProtectClock = true;  # breaks /dev/tty... (why?)
+        ProtectControlGroups = true;
+        #ProtectHome = true;  # breaks custom spool dirs
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        #ProtectSystem = "strict";  # breaks custom spool dirs
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+      };
+      filter = key: value: (value != null) || ! (lib.hasAttr key hardening);
+      apply = service: lib.filterAttrs filter (hardening // (service.serviceConfig or {}));
+    in
+      service: service // { serviceConfig = apply service; };
+
+  services.hylafax-spool = {
+    description = "HylaFAX spool area preparation";
+    documentation = [ "man:hylafax-server(4)" ];
+    script = ''
+      ${setupSpoolScript}
+      cd "${cfg.spoolAreaPath}"
+      ${cfg.spoolExtraInit}
+      if ! test -f "${cfg.spoolAreaPath}/etc/hosts.hfaxd"
+      then
+        echo hosts.hfaxd is missing
+        exit 1
+      fi
+    '';
+    serviceConfig.ExecStop = "${setupSpoolScript}";
+    serviceConfig.RemainAfterExit = true;
+    serviceConfig.Type = "oneshot";
+    unitConfig.RequiresMountsFor = [ cfg.spoolAreaPath ];
+  };
+
+  services.hylafax-faxq = {
+    description = "HylaFAX queue manager";
+    documentation = [ "man:faxq(8)" ];
+    requires = [ "hylafax-spool.service" ];
+    after = [ "hylafax-spool.service" ];
+    wants = mapModems ( { name, ... }: "hylafax-faxgetty@${name}.service" );
+    wantedBy = mkIf cfg.autostart [ "multi-user.target" ];
+    serviceConfig.Type = "forking";
+    serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/faxq -q "${cfg.spoolAreaPath}"'';
+    # This delays the "readiness" of this service until
+    # all modems are initialized (or a timeout is reached).
+    # Otherwise, sending a fax with the fax service
+    # stopped will always yield a failed send attempt:
+    # The fax service is started when the job is created with
+    # `sendfax`, but modems need some time to initialize.
+    serviceConfig.ExecStartPost = [ "${waitFaxqScript}" ];
+    # faxquit fails if the pipe is already gone
+    # (e.g. the service is already stopping)
+    serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}"'';
+    # disable some systemd hardening settings
+    serviceConfig.PrivateDevices = null;
+    serviceConfig.RestrictRealtime = null;
+  };
+
+  services."hylafax-hfaxd@" = {
+    description = "HylaFAX server";
+    documentation = [ "man:hfaxd(8)" ];
+    after = [ "hylafax-faxq.service" ];
+    requires = [ "hylafax-faxq.service" ];
+    serviceConfig.StandardInput = "socket";
+    serviceConfig.StandardOutput = "socket";
+    serviceConfig.ExecStart = ''${pkgs.hylafaxplus}/spool/bin/hfaxd -q "${cfg.spoolAreaPath}" -d -I'';
+    unitConfig.RequiresMountsFor = [ cfg.userAccessFile ];
+    # disable some systemd hardening settings
+    serviceConfig.PrivateDevices = null;
+    serviceConfig.PrivateNetwork = null;
+  };
+
+  services.hylafax-faxcron = rec {
+    description = "HylaFAX spool area maintenance";
+    documentation = [ "man:faxcron(8)" ];
+    after = [ "hylafax-spool.service" ];
+    requires = [ "hylafax-spool.service" ];
+    wantedBy = mkIf cfg.faxcron.enable.spoolInit requires;
+    startAt = mkIf (cfg.faxcron.enable.frequency!=null) cfg.faxcron.enable.frequency;
+    serviceConfig.ExecStart = concatStringsSep " " [
+      "${pkgs.hylafaxplus}/spool/bin/faxcron"
+      ''-q "${cfg.spoolAreaPath}"''
+      ''-info ${toString cfg.faxcron.infoDays}''
+      ''-log  ${toString cfg.faxcron.logDays}''
+      ''-rcv  ${toString cfg.faxcron.rcvDays}''
+    ];
+  };
+
+  services.hylafax-faxqclean = rec {
+    description = "HylaFAX spool area queue cleaner";
+    documentation = [ "man:faxqclean(8)" ];
+    after = [ "hylafax-spool.service" ];
+    requires = [ "hylafax-spool.service" ];
+    wantedBy = mkIf cfg.faxqclean.enable.spoolInit requires;
+    startAt = mkIf (cfg.faxqclean.enable.frequency!=null) cfg.faxqclean.enable.frequency;
+    serviceConfig.ExecStart = concatStringsSep " " [
+      "${pkgs.hylafaxplus}/spool/bin/faxqclean"
+      ''-q "${cfg.spoolAreaPath}"''
+      "-v"
+      (optionalString (cfg.faxqclean.archiving!="never") "-a")
+      (optionalString (cfg.faxqclean.archiving=="always")  "-A")
+      ''-j ${toString (cfg.faxqclean.doneqMinutes*60)}''
+      ''-d ${toString (cfg.faxqclean.docqMinutes*60)}''
+    ];
+  };
+
+  mkFaxgettyService = { name, ... }:
+    lib.nameValuePair "hylafax-faxgetty@${name}" rec {
+      description = "HylaFAX faxgetty for %I";
+      documentation = [ "man:faxgetty(8)" ];
+      bindsTo = [ "dev-%i.device" ];
+      requires = [ "hylafax-spool.service" ];
+      after = bindsTo ++ requires;
+      before = [ "hylafax-faxq.service" "getty.target" ];
+      unitConfig.StopWhenUnneeded = true;
+      unitConfig.AssertFileNotEmpty = "${cfg.spoolAreaPath}/etc/config.%I";
+      serviceConfig.UtmpIdentifier = "%I";
+      serviceConfig.TTYPath = "/dev/%I";
+      serviceConfig.Restart = "always";
+      serviceConfig.KillMode = "process";
+      serviceConfig.IgnoreSIGPIPE = false;
+      serviceConfig.ExecStart = ''-${pkgs.hylafaxplus}/spool/bin/faxgetty -q "${cfg.spoolAreaPath}" /dev/%I'';
+      # faxquit fails if the pipe is already gone
+      # (e.g. the service is already stopping)
+      serviceConfig.ExecStop = ''-${pkgs.hylafaxplus}/spool/bin/faxquit -q "${cfg.spoolAreaPath}" %I'';
+      # disable some systemd hardening settings
+      serviceConfig.PrivateDevices = null;
+      serviceConfig.RestrictRealtime = null;
+    };
+
+  modemServices =
+    lib.listToAttrs (mapModems mkFaxgettyService);
+
+in
+
+{
+  config.systemd = mkIf cfg.enable {
+    inherit sockets timers paths;
+    services = lib.mapAttrs (lib.const hardenService) (services // modemServices);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/i2p.nix b/nixpkgs/nixos/modules/services/networking/i2p.nix
new file mode 100644
index 000000000000..c5c7a955cbd4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/i2p.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.i2p;
+  homeDir = "/var/lib/i2p";
+in {
+  ###### interface
+  options.services.i2p.enable = mkEnableOption (lib.mdDoc "I2P router");
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    users.users.i2p = {
+      group = "i2p";
+      description = "i2p User";
+      home = homeDir;
+      createHome = true;
+      uid = config.ids.uids.i2p;
+    };
+    users.groups.i2p.gid = config.ids.gids.i2p;
+    systemd.services.i2p = {
+      description = "I2P router with administration interface for hidden services";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "i2p";
+        WorkingDirectory = homeDir;
+        Restart = "on-abort";
+        ExecStart = "${pkgs.i2p}/bin/i2prouter-plain";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/i2pd.nix b/nixpkgs/nixos/modules/services/networking/i2pd.nix
new file mode 100644
index 000000000000..808e7b66d36e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/i2pd.nix
@@ -0,0 +1,695 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.i2pd;
+
+  homeDir = "/var/lib/i2pd";
+
+  strOpt = k: v: k + " = " + v;
+  boolOpt = k: v: k + " = " + boolToString v;
+  intOpt = k: v: k + " = " + toString v;
+  lstOpt = k: xs: k + " = " + concatStringsSep "," xs;
+  optionalNullString = o: s: optional (s != null) (strOpt o s);
+  optionalNullBool = o: b: optional (b != null) (boolOpt o b);
+  optionalNullInt = o: i: optional (i != null) (intOpt o i);
+  optionalEmptyList = o: l: optional ([] != l) (lstOpt o l);
+
+  mkEnableTrueOption = name: mkEnableOption (lib.mdDoc name) // { default = true; };
+
+  mkEndpointOpt = name: addr: port: {
+    enable = mkEnableOption (lib.mdDoc name);
+    name = mkOption {
+      type = types.str;
+      default = name;
+      description = lib.mdDoc "The endpoint name.";
+    };
+    address = mkOption {
+      type = types.str;
+      default = addr;
+      description = lib.mdDoc "Bind address for ${name} endpoint.";
+    };
+    port = mkOption {
+      type = types.port;
+      default = port;
+      description = lib.mdDoc "Bind port for ${name} endpoint.";
+    };
+  };
+
+  i2cpOpts = name: {
+    length = mkOption {
+      type = types.int;
+      description = lib.mdDoc "Guaranteed minimum hops for ${name} tunnels.";
+      default = 3;
+    };
+    quantity = mkOption {
+      type = types.int;
+      description = lib.mdDoc "Number of simultaneous ${name} tunnels.";
+      default = 5;
+    };
+  };
+
+  mkKeyedEndpointOpt = name: addr: port: keyloc:
+    (mkEndpointOpt name addr port) // {
+      keys = mkOption {
+        type = with types; nullOr str;
+        default = keyloc;
+        description = lib.mdDoc ''
+          File to persist ${lib.toUpper name} keys.
+        '';
+      };
+      inbound = i2cpOpts name;
+      outbound = i2cpOpts name;
+      latency.min = mkOption {
+        type = with types; nullOr int;
+        description = lib.mdDoc "Min latency for tunnels.";
+        default = null;
+      };
+      latency.max = mkOption {
+        type = with types; nullOr int;
+        description = lib.mdDoc "Max latency for tunnels.";
+        default = null;
+      };
+    };
+
+  commonTunOpts = name: {
+    outbound = i2cpOpts name;
+    inbound = i2cpOpts name;
+    crypto.tagsToSend = mkOption {
+      type = types.int;
+      description = lib.mdDoc "Number of ElGamal/AES tags to send.";
+      default = 40;
+    };
+    destination = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Remote endpoint, I2P hostname or b32.i2p address.";
+    };
+    keys = mkOption {
+      type = types.str;
+      default = name + "-keys.dat";
+      description = lib.mdDoc "Keyset used for tunnel identity.";
+    };
+  } // mkEndpointOpt name "127.0.0.1" 0;
+
+  sec = name: "\n[" + name + "]";
+  notice = "# DO NOT EDIT -- this file has been generated automatically.";
+  i2pdConf = let
+    opts = [
+      notice
+      (strOpt "loglevel" cfg.logLevel)
+      (boolOpt "logclftime" cfg.logCLFTime)
+      (boolOpt "ipv4" cfg.enableIPv4)
+      (boolOpt "ipv6" cfg.enableIPv6)
+      (boolOpt "notransit" cfg.notransit)
+      (boolOpt "floodfill" cfg.floodfill)
+      (intOpt "netid" cfg.netid)
+    ] ++ (optionalNullInt "bandwidth" cfg.bandwidth)
+      ++ (optionalNullInt "port" cfg.port)
+      ++ (optionalNullString "family" cfg.family)
+      ++ (optionalNullString "datadir" cfg.dataDir)
+      ++ (optionalNullInt "share" cfg.share)
+      ++ (optionalNullBool "ssu" cfg.ssu)
+      ++ (optionalNullBool "ntcp" cfg.ntcp)
+      ++ (optionalNullString "ntcpproxy" cfg.ntcpProxy)
+      ++ (optionalNullString "ifname" cfg.ifname)
+      ++ (optionalNullString "ifname4" cfg.ifname4)
+      ++ (optionalNullString "ifname6" cfg.ifname6)
+      ++ [
+      (sec "limits")
+      (intOpt "transittunnels" cfg.limits.transittunnels)
+      (intOpt "coresize" cfg.limits.coreSize)
+      (intOpt "openfiles" cfg.limits.openFiles)
+      (intOpt "ntcphard" cfg.limits.ntcpHard)
+      (intOpt "ntcpsoft" cfg.limits.ntcpSoft)
+      (intOpt "ntcpthreads" cfg.limits.ntcpThreads)
+      (sec "upnp")
+      (boolOpt "enabled" cfg.upnp.enable)
+      (sec "precomputation")
+      (boolOpt "elgamal" cfg.precomputation.elgamal)
+      (sec "reseed")
+      (boolOpt "verify" cfg.reseed.verify)
+    ] ++ (optionalNullString "file" cfg.reseed.file)
+      ++ (optionalEmptyList "urls" cfg.reseed.urls)
+      ++ (optionalNullString "floodfill" cfg.reseed.floodfill)
+      ++ (optionalNullString "zipfile" cfg.reseed.zipfile)
+      ++ (optionalNullString "proxy" cfg.reseed.proxy)
+      ++ [
+      (sec "trust")
+      (boolOpt "enabled" cfg.trust.enable)
+      (boolOpt "hidden" cfg.trust.hidden)
+    ] ++ (optionalEmptyList "routers" cfg.trust.routers)
+      ++ (optionalNullString "family" cfg.trust.family)
+      ++ [
+      (sec "websockets")
+      (boolOpt "enabled" cfg.websocket.enable)
+      (strOpt "address" cfg.websocket.address)
+      (intOpt "port" cfg.websocket.port)
+      (sec "exploratory")
+      (intOpt "inbound.length" cfg.exploratory.inbound.length)
+      (intOpt "inbound.quantity" cfg.exploratory.inbound.quantity)
+      (intOpt "outbound.length" cfg.exploratory.outbound.length)
+      (intOpt "outbound.quantity" cfg.exploratory.outbound.quantity)
+      (sec "ntcp2")
+      (boolOpt "enabled" cfg.ntcp2.enable)
+      (boolOpt "published" cfg.ntcp2.published)
+      (intOpt "port" cfg.ntcp2.port)
+      (sec "addressbook")
+      (strOpt "defaulturl" cfg.addressbook.defaulturl)
+    ] ++ (optionalEmptyList "subscriptions" cfg.addressbook.subscriptions)
+      ++ [
+      (sec "meshnets")
+      (boolOpt "yggdrasil" cfg.yggdrasil.enable)
+    ] ++ (optionalNullString "yggaddress" cfg.yggdrasil.address)
+      ++ (flip map
+      (collect (proto: proto ? port && proto ? address) cfg.proto)
+      (proto: let protoOpts = [
+        (sec proto.name)
+        (boolOpt "enabled" proto.enable)
+        (strOpt "address" proto.address)
+        (intOpt "port" proto.port)
+        ] ++ (optionals (proto ? keys) (optionalNullString "keys" proto.keys))
+        ++ (optionals (proto ? auth) (optionalNullBool "auth" proto.auth))
+        ++ (optionals (proto ? user) (optionalNullString "user" proto.user))
+        ++ (optionals (proto ? pass) (optionalNullString "pass" proto.pass))
+        ++ (optionals (proto ? strictHeaders) (optionalNullBool "strictheaders" proto.strictHeaders))
+        ++ (optionals (proto ? hostname) (optionalNullString "hostname" proto.hostname))
+        ++ (optionals (proto ? outproxy) (optionalNullString "outproxy" proto.outproxy))
+        ++ (optionals (proto ? outproxyPort) (optionalNullInt "outproxyport" proto.outproxyPort))
+        ++ (optionals (proto ? outproxyEnable) (optionalNullBool "outproxy.enabled" proto.outproxyEnable));
+        in (concatStringsSep "\n" protoOpts)
+      ));
+  in
+    pkgs.writeText "i2pd.conf" (concatStringsSep "\n" opts);
+
+  tunnelConf = let opts = [
+    notice
+    (flip map
+      (collect (tun: tun ? port && tun ? destination) cfg.outTunnels)
+      (tun: let outTunOpts = [
+        (sec tun.name)
+        "type = client"
+        (intOpt "port" tun.port)
+        (strOpt "destination" tun.destination)
+        ] ++ (optionals (tun ? destinationPort) (optionalNullInt "destinationport" tun.destinationPort))
+        ++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
+        ++ (optionals (tun ? address) (optionalNullString "address" tun.address))
+        ++ (optionals (tun ? inbound.length) (optionalNullInt "inbound.length" tun.inbound.length))
+        ++ (optionals (tun ? inbound.quantity) (optionalNullInt "inbound.quantity" tun.inbound.quantity))
+        ++ (optionals (tun ? outbound.length) (optionalNullInt "outbound.length" tun.outbound.length))
+        ++ (optionals (tun ? outbound.quantity) (optionalNullInt "outbound.quantity" tun.outbound.quantity))
+        ++ (optionals (tun ? crypto.tagsToSend) (optionalNullInt "crypto.tagstosend" tun.crypto.tagsToSend));
+        in concatStringsSep "\n" outTunOpts))
+    (flip map
+      (collect (tun: tun ? port && tun ? address) cfg.inTunnels)
+      (tun: let inTunOpts = [
+        (sec tun.name)
+        "type = server"
+        (intOpt "port" tun.port)
+        (strOpt "host" tun.address)
+      ] ++ (optionals (tun ? destination) (optionalNullString "destination" tun.destination))
+        ++ (optionals (tun ? keys) (optionalNullString "keys" tun.keys))
+        ++ (optionals (tun ? inPort) (optionalNullInt "inport" tun.inPort))
+        ++ (optionals (tun ? accessList) (optionalEmptyList "accesslist" tun.accessList));
+        in concatStringsSep "\n" inTunOpts))];
+    in pkgs.writeText "i2pd-tunnels.conf" opts;
+
+  i2pdFlags = concatStringsSep " " (
+    optional (cfg.address != null) ("--host=" + cfg.address) ++ [
+    "--service"
+    ("--conf=" + i2pdConf)
+    ("--tunconf=" + tunnelConf)
+  ]);
+
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "i2pd" "extIp" ] [ "services" "i2pd" "address" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.i2pd = {
+
+      enable = mkEnableOption (lib.mdDoc "I2Pd daemon") // {
+        description = lib.mdDoc ''
+          Enables I2Pd as a running service upon activation.
+          Please read <https://i2pd.readthedocs.io/en/latest/> for further
+          configuration help.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.i2pd;
+        defaultText = literalExpression "pkgs.i2pd";
+        description = lib.mdDoc ''
+          i2pd package to use.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum ["debug" "info" "warn" "error"];
+        default = "error";
+        description = lib.mdDoc ''
+          The log level. {command}`i2pd` defaults to "info"
+          but that generates copious amounts of log messages.
+
+          We default to "error" which is similar to the default log
+          level of {command}`tor`.
+        '';
+      };
+
+      logCLFTime = mkEnableOption (lib.mdDoc "full CLF-formatted date and time to log");
+
+      address = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Your external IP or hostname.
+        '';
+      };
+
+      family = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Specify a family the router belongs to.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Alternative path to storage of i2pd data (RI, keys, peer profiles, ...)
+        '';
+      };
+
+      share = mkOption {
+        type = types.int;
+        default = 100;
+        description = lib.mdDoc ''
+          Limit of transit traffic from max bandwidth in percents.
+        '';
+      };
+
+      ifname = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Network interface to bind to.
+        '';
+      };
+
+      ifname4 = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          IPv4 interface to bind to.
+        '';
+      };
+
+      ifname6 = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          IPv6 interface to bind to.
+        '';
+      };
+
+      ntcpProxy = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Proxy URL for NTCP transport.
+        '';
+      };
+
+      ntcp = mkEnableTrueOption "ntcp";
+      ssu = mkEnableTrueOption "ssu";
+
+      notransit = mkEnableOption (lib.mdDoc "notransit") // {
+        description = lib.mdDoc ''
+          Tells the router to not accept transit tunnels during startup.
+        '';
+      };
+
+      floodfill = mkEnableOption (lib.mdDoc "floodfill") // {
+        description = lib.mdDoc ''
+          If the router is declared to be unreachable and needs introduction nodes.
+        '';
+      };
+
+      netid = mkOption {
+        type = types.int;
+        default = 2;
+        description = lib.mdDoc ''
+          I2P overlay netid.
+        '';
+      };
+
+      bandwidth = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+           Set a router bandwidth limit integer in KBps.
+           If not set, {command}`i2pd` defaults to 32KBps.
+        '';
+      };
+
+      port = mkOption {
+        type = with types; nullOr int;
+        default = null;
+        description = lib.mdDoc ''
+          I2P listen port. If no one is given the router will pick between 9111 and 30777.
+        '';
+      };
+
+      enableIPv4 = mkEnableTrueOption "IPv4 connectivity";
+      enableIPv6 = mkEnableOption (lib.mdDoc "IPv6 connectivity");
+      nat = mkEnableTrueOption "NAT bypass";
+
+      upnp.enable = mkEnableOption (lib.mdDoc "UPnP service discovery");
+      upnp.name = mkOption {
+        type = types.str;
+        default = "I2Pd";
+        description = lib.mdDoc ''
+          Name i2pd appears in UPnP forwardings list.
+        '';
+      };
+
+      precomputation.elgamal = mkEnableTrueOption "Precomputed ElGamal tables" // {
+        description = lib.mdDoc ''
+          Whenever to use precomputated tables for ElGamal.
+          {command}`i2pd` defaults to `false`
+          to save 64M of memory (and looses some performance).
+
+          We default to `true` as that is what most
+          users want anyway.
+        '';
+      };
+
+      reseed.verify = mkEnableOption (lib.mdDoc "SU3 signature verification");
+
+      reseed.file = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Full path to SU3 file to reseed from.
+        '';
+      };
+
+      reseed.urls = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Reseed URLs.
+        '';
+      };
+
+      reseed.floodfill = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to router info of floodfill to reseed from.
+        '';
+      };
+
+      reseed.zipfile = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to local .zip file to reseed from.
+        '';
+      };
+
+      reseed.proxy = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          URL for reseed proxy, supports http/socks.
+        '';
+      };
+
+     addressbook.defaulturl = mkOption {
+        type = types.str;
+        default = "http://joajgazyztfssty4w2on5oaqksz6tqoxbduy553y34mf4byv6gpq.b32.i2p/export/alive-hosts.txt";
+        description = lib.mdDoc ''
+          AddressBook subscription URL for initial setup
+        '';
+      };
+     addressbook.subscriptions = mkOption {
+        type = with types; listOf str;
+        default = [
+          "http://inr.i2p/export/alive-hosts.txt"
+          "http://i2p-projekt.i2p/hosts.txt"
+          "http://stats.i2p/cgi-bin/newhosts.txt"
+        ];
+        description = lib.mdDoc ''
+          AddressBook subscription URLs
+        '';
+      };
+
+      trust.enable = mkEnableOption (lib.mdDoc "explicit trust options");
+
+      trust.family = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Router Family to trust for first hops.
+        '';
+      };
+
+      trust.routers = mkOption {
+        type = with types; listOf str;
+        default = [];
+        description = lib.mdDoc ''
+          Only connect to the listed routers.
+        '';
+      };
+
+      trust.hidden = mkEnableOption (lib.mdDoc "router concealment");
+
+      websocket = mkEndpointOpt "websockets" "127.0.0.1" 7666;
+
+      exploratory.inbound = i2cpOpts "exploratory";
+      exploratory.outbound = i2cpOpts "exploratory";
+
+      ntcp2.enable = mkEnableTrueOption "NTCP2";
+      ntcp2.published = mkEnableOption (lib.mdDoc "NTCP2 publication");
+      ntcp2.port = mkOption {
+        type = types.port;
+        default = 0;
+        description = lib.mdDoc ''
+          Port to listen for incoming NTCP2 connections (0=auto).
+        '';
+      };
+
+      limits.transittunnels = mkOption {
+        type = types.int;
+        default = 2500;
+        description = lib.mdDoc ''
+          Maximum number of active transit sessions.
+        '';
+      };
+
+      limits.coreSize = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Maximum size of corefile in Kb (0 - use system limit).
+        '';
+      };
+
+      limits.openFiles = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Maximum number of open files (0 - use system default).
+        '';
+      };
+
+      limits.ntcpHard = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Maximum number of active transit sessions.
+        '';
+      };
+
+      limits.ntcpSoft = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Threshold to start probabalistic backoff with ntcp sessions (default: use system limit).
+        '';
+      };
+
+      limits.ntcpThreads = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          Maximum number of threads used by NTCP DH worker.
+        '';
+      };
+
+      yggdrasil.enable = mkEnableOption (lib.mdDoc "Yggdrasil");
+
+      yggdrasil.address = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Your local yggdrasil address. Specify it if you want to bind your router to a
+          particular address.
+        '';
+      };
+
+      proto.http = (mkEndpointOpt "http" "127.0.0.1" 7070) // {
+
+        auth = mkEnableOption (lib.mdDoc "webconsole authentication");
+
+        user = mkOption {
+          type = types.str;
+          default = "i2pd";
+          description = lib.mdDoc ''
+            Username for webconsole access
+          '';
+        };
+
+        pass = mkOption {
+          type = types.str;
+          default = "i2pd";
+          description = lib.mdDoc ''
+            Password for webconsole access.
+          '';
+        };
+
+        strictHeaders = mkOption {
+          type = with types; nullOr bool;
+          default = null;
+          description = lib.mdDoc ''
+            Enable strict host checking on WebUI.
+          '';
+        };
+
+        hostname = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            Expected hostname for WebUI.
+          '';
+        };
+      };
+
+      proto.httpProxy = (mkKeyedEndpointOpt "httpproxy" "127.0.0.1" 4444 "httpproxy-keys.dat")
+      // {
+        outproxy = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "Upstream outproxy bind address.";
+        };
+      };
+      proto.socksProxy = (mkKeyedEndpointOpt "socksproxy" "127.0.0.1" 4447 "socksproxy-keys.dat")
+      // {
+        outproxyEnable = mkEnableOption (lib.mdDoc "SOCKS outproxy");
+        outproxy = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Upstream outproxy bind address.";
+        };
+        outproxyPort = mkOption {
+          type = types.int;
+          default = 4444;
+          description = lib.mdDoc "Upstream outproxy bind port.";
+        };
+      };
+
+      proto.sam = mkEndpointOpt "sam" "127.0.0.1" 7656;
+      proto.bob = mkEndpointOpt "bob" "127.0.0.1" 2827;
+      proto.i2cp = mkEndpointOpt "i2cp" "127.0.0.1" 7654;
+      proto.i2pControl = mkEndpointOpt "i2pcontrol" "127.0.0.1" 7650;
+
+      outTunnels = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule (
+          { name, ... }: {
+            options = {
+              destinationPort = mkOption {
+                type = with types; nullOr int;
+                default = null;
+                description = lib.mdDoc "Connect to particular port at destination.";
+              };
+            } // commonTunOpts name;
+            config = {
+              name = mkDefault name;
+            };
+          }
+        ));
+        description = lib.mdDoc ''
+          Connect to someone as a client and establish a local accept endpoint
+        '';
+      };
+
+      inTunnels = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule (
+          { name, ... }: {
+            options = {
+              inPort = mkOption {
+                type = types.int;
+                default = 0;
+                description = lib.mdDoc "Service port. Default to the tunnel's listen port.";
+              };
+              accessList = mkOption {
+                type = with types; listOf str;
+                default = [];
+                description = lib.mdDoc "I2P nodes that are allowed to connect to this service.";
+              };
+            } // commonTunOpts name;
+            config = {
+              name = mkDefault name;
+            };
+          }
+        ));
+        description = lib.mdDoc ''
+          Serve something on I2P network at port and delegate requests to address inPort.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.i2pd = {
+      group = "i2pd";
+      description = "I2Pd User";
+      home = homeDir;
+      createHome = true;
+      uid = config.ids.uids.i2pd;
+    };
+
+    users.groups.i2pd.gid = config.ids.gids.i2pd;
+
+    systemd.services.i2pd = {
+      description = "Minimal I2P router";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+      {
+        User = "i2pd";
+        WorkingDirectory = homeDir;
+        Restart = "on-abort";
+        ExecStart = "${cfg.package}/bin/i2pd ${i2pdFlags}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/icecream/daemon.nix b/nixpkgs/nixos/modules/services/networking/icecream/daemon.nix
new file mode 100644
index 000000000000..fdd7a139c2fa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/icecream/daemon.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.icecream.daemon;
+in {
+
+  ###### interface
+
+  options = {
+
+    services.icecream.daemon = {
+
+     enable = mkEnableOption (lib.mdDoc "Icecream Daemon");
+
+      openFirewall = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to automatically open receive port in the firewall.
+        '';
+      };
+
+      openBroadcast = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to automatically open the firewall for scheduler discovery.
+        '';
+      };
+
+      cacheLimit = mkOption {
+        type = types.ints.u16;
+        default = 256;
+        description = lib.mdDoc ''
+          Maximum size in Megabytes of cache used to store compile environments of compile clients.
+        '';
+      };
+
+      netName = mkOption {
+        type = types.str;
+        default = "ICECREAM";
+        description = lib.mdDoc ''
+          Network name to connect to. A scheduler with the same name needs to be running.
+        '';
+      };
+
+      noRemote = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Prevent jobs from other nodes being scheduled on this daemon.
+        '';
+      };
+
+      schedulerHost = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Explicit scheduler hostname, useful in firewalled environments.
+
+          Uses scheduler autodiscovery via broadcast if set to null.
+        '';
+      };
+
+      maxProcesses = mkOption {
+        type = types.nullOr types.ints.u16;
+        default = null;
+        description = lib.mdDoc ''
+          Maximum number of compile jobs started in parallel for this daemon.
+
+          Uses the number of CPUs if set to null.
+        '';
+      };
+
+      nice = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc ''
+          The level of niceness to use.
+        '';
+      };
+
+      hostname = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Hostname of the daemon in the icecream infrastructure.
+
+          Uses the hostname retrieved via uname if set to null.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "icecc";
+        description = lib.mdDoc ''
+          User to run the icecream daemon as. Set to root to enable receive of
+          remote compile environments.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.icecream;
+        defaultText = literalExpression "pkgs.icecream";
+        type = types.package;
+        description = lib.mdDoc "Icecream package to use.";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Additional command line parameters.";
+        example = [ "-v" ];
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ 10245 ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.openBroadcast [ 8765 ];
+
+    systemd.services.icecc-daemon = {
+      description = "Icecream compile daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = escapeShellArgs ([
+          "${getBin cfg.package}/bin/iceccd"
+          "-b" "$STATE_DIRECTORY"
+          "-u" "icecc"
+          (toString cfg.nice)
+        ]
+        ++ optionals (cfg.schedulerHost != null) ["-s" cfg.schedulerHost]
+        ++ optionals (cfg.netName != null) [ "-n" cfg.netName ]
+        ++ optionals (cfg.cacheLimit != null) [ "--cache-limit" (toString cfg.cacheLimit) ]
+        ++ optionals (cfg.maxProcesses != null) [ "-m" (toString cfg.maxProcesses) ]
+        ++ optionals (cfg.hostname != null) [ "-N" (cfg.hostname) ]
+        ++ optional  cfg.noRemote "--no-remote"
+        ++ cfg.extraArgs);
+        DynamicUser = true;
+        User = "icecc";
+        Group = "icecc";
+        StateDirectory = "icecc";
+        RuntimeDirectory = "icecc";
+        AmbientCapabilities = "CAP_SYS_CHROOT";
+        CapabilityBoundingSet = "CAP_SYS_CHROOT";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ emantor ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/icecream/scheduler.nix b/nixpkgs/nixos/modules/services/networking/icecream/scheduler.nix
new file mode 100644
index 000000000000..33aee1bb19cc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/icecream/scheduler.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.icecream.scheduler;
+in {
+
+  ###### interface
+
+  options = {
+
+    services.icecream.scheduler = {
+      enable = mkEnableOption (lib.mdDoc "Icecream Scheduler");
+
+      netName = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Network name for the icecream scheduler.
+
+          Uses the default ICECREAM if null.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8765;
+        description = lib.mdDoc ''
+          Server port to listen for icecream daemon requests.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to automatically open the daemon port in the firewall.
+        '';
+      };
+
+      openTelnet = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the telnet TCP port on 8766.
+        '';
+      };
+
+      persistentClientConnection = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to prevent clients from connecting to a better scheduler.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.icecream;
+        defaultText = literalExpression "pkgs.icecream";
+        type = types.package;
+        description = lib.mdDoc "Icecream package to use.";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Additional command line parameters";
+        example = [ "-v" ];
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkMerge [
+      (mkIf cfg.openFirewall [ cfg.port ])
+      (mkIf cfg.openTelnet [ 8766 ])
+    ];
+
+    systemd.services.icecc-scheduler = {
+      description = "Icecream scheduling server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = escapeShellArgs ([
+          "${getBin cfg.package}/bin/icecc-scheduler"
+          "-p" (toString cfg.port)
+        ]
+        ++ optionals (cfg.netName != null) [ "-n" (toString cfg.netName) ]
+        ++ optional cfg.persistentClientConnection "-r"
+        ++ cfg.extraArgs);
+
+        DynamicUser = true;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ emantor ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/imaginary.nix b/nixpkgs/nixos/modules/services/networking/imaginary.nix
new file mode 100644
index 000000000000..a655903d1031
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/imaginary.nix
@@ -0,0 +1,113 @@
+{ lib, config, pkgs, utils, ... }:
+
+let
+  inherit (lib) mdDoc mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.imaginary;
+in {
+  options.services.imaginary = {
+    enable = mkEnableOption (mdDoc "imaginary image processing microservice");
+
+    address = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = mdDoc ''
+        Bind address. Corresponds to the `-a` flag.
+        Set to `""` to bind to all addresses.
+      '';
+      example = "[::1]";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8088;
+      description = mdDoc "Bind port. Corresponds to the `-p` flag.";
+    };
+
+    settings = mkOption {
+      description = mdDoc ''
+        Command line arguments passed to the imaginary executable, stripped of
+        the prefix `-`. See upstream's
+        [README](https://github.com/h2non/imaginary#command-line-usage) for all
+        options.
+      '';
+      type = types.submodule {
+        freeformType = with types; attrsOf (oneOf [
+          bool
+          int
+          (nonEmptyListOf str)
+          str
+        ]);
+
+        options = {
+          return-size = mkOption {
+            type = types.bool;
+            default = false;
+            description = mdDoc "Return the image size in the HTTP headers.";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = ! lib.hasAttr "a" cfg.settings;
+      message = "Use services.imaginary.address to specify the -a flag.";
+    } {
+      assertion = ! lib.hasAttr "p" cfg.settings;
+      message = "Use services.imaginary.port to specify the -p flag.";
+    } ];
+
+    systemd.services.imaginary = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        ExecStart = let
+          args = lib.mapAttrsToList (key: val:
+            "-" + key + "=" + lib.concatStringsSep "," (map toString (lib.toList val))
+          ) (cfg.settings // { a = cfg.address; p = cfg.port; });
+        in "${pkgs.imaginary}/bin/imaginary ${utils.escapeSystemdExecArgs args}";
+        ProtectProc = "invisible";
+        BindReadOnlyPaths = lib.optional (cfg.settings ? mount) cfg.settings.mount;
+        CapabilityBoundingSet = if cfg.port < 1024 then
+          [ "CAP_NET_BIND_SERVICE" ]
+        else
+          [ "" ];
+        AmbientCapabilities = CapabilityBoundingSet;
+        NoNewPrivileges = true;
+        DynamicUser = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        TemporaryFileSystem = [ "/:ro" ];
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = cfg.port >= 1024;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        DevicePolicy = "closed";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ dotlambda ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/inspircd.nix b/nixpkgs/nixos/modules/services/networking/inspircd.nix
new file mode 100644
index 000000000000..da193df105b7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/inspircd.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.inspircd;
+
+  configFile = pkgs.writeText "inspircd.conf" cfg.config;
+
+in {
+  meta = {
+    maintainers = [ lib.maintainers.sternenseemann ];
+  };
+
+  options = {
+    services.inspircd = {
+      enable = lib.mkEnableOption (lib.mdDoc "InspIRCd");
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.inspircd;
+        defaultText = lib.literalExpression "pkgs.inspircd";
+        example = lib.literalExpression "pkgs.inspircdMinimal";
+        description = lib.mdDoc ''
+          The InspIRCd package to use. This is mainly useful
+          to specify an overridden version of the
+          `pkgs.inspircd` dervivation, for
+          example if you want to use a more minimal InspIRCd
+          distribution with less modules enabled or with
+          modules enabled which can't be distributed in binary
+          form due to licensing issues.
+        '';
+      };
+
+      config = lib.mkOption {
+        type = lib.types.lines;
+        description = lib.mdDoc ''
+          Verbatim `inspircd.conf` file.
+          For a list of options, consult the
+          [InspIRCd documentation](https://docs.inspircd.org/3/configuration/), the
+          [Module documentation](https://docs.inspircd.org/3/modules/)
+          and the example configuration files distributed
+          with `pkgs.inspircd.doc`
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.inspircd = {
+      description = "InspIRCd - the stable, high-performance and modular Internet Relay Chat Daemon";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = ''
+          ${lib.getBin cfg.package}/bin/inspircd start --config ${configFile} --nofork --nopid
+        '';
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iodine.nix b/nixpkgs/nixos/modules/services/networking/iodine.nix
new file mode 100644
index 000000000000..ea2fa3ac4be4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iodine.nix
@@ -0,0 +1,198 @@
+# NixOS module for iodine, ip over dns daemon
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.iodine;
+
+  iodinedUser = "iodined";
+
+  /* is this path made unreadable by ProtectHome = true ? */
+  isProtected = x: hasPrefix "/root" x || hasPrefix "/home" x;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "iodined" "enable" ] [ "services" "iodine" "server" "enable" ])
+    (mkRenamedOptionModule [ "services" "iodined" "domain" ] [ "services" "iodine" "server" "domain" ])
+    (mkRenamedOptionModule [ "services" "iodined" "ip" ] [ "services" "iodine" "server" "ip" ])
+    (mkRenamedOptionModule [ "services" "iodined" "extraConfig" ] [ "services" "iodine" "server" "extraConfig" ])
+    (mkRemovedOptionModule [ "services" "iodined" "client" ] "")
+  ];
+
+  ### configuration
+
+  options = {
+
+    services.iodine = {
+      clients = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Each attribute of this option defines a systemd service that
+          runs iodine. Many or none may be defined.
+          The name of each service is
+          `iodine-«name»`
+          where «name» is the name of the
+          corresponding attribute name.
+        '';
+        example = literalExpression ''
+          {
+            foo = {
+              server = "tunnel.mdomain.com";
+              relay = "8.8.8.8";
+              extraConfig = "-v";
+            }
+          }
+        '';
+        type = types.attrsOf (
+          types.submodule (
+            {
+              options = {
+                server = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = lib.mdDoc "Hostname of server running iodined";
+                  example = "tunnel.mydomain.com";
+                };
+
+                relay = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = lib.mdDoc "DNS server to use as an intermediate relay to the iodined server";
+                  example = "8.8.8.8";
+                };
+
+                extraConfig = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = lib.mdDoc "Additional command line parameters";
+                  example = "-l 192.168.1.10 -p 23";
+                };
+
+                passwordFile = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = lib.mdDoc "Path to a file containing the password.";
+                };
+              };
+            }
+          )
+        );
+      };
+
+      server = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "enable iodined server";
+        };
+
+        ip = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "The assigned ip address or ip range";
+          example = "172.16.10.1/24";
+        };
+
+        domain = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Domain or subdomain of which nameservers point to us";
+          example = "tunnel.mydomain.com";
+        };
+
+        extraConfig = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Additional command line parameters";
+          example = "-l 192.168.1.10 -p 23";
+        };
+
+        passwordFile = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "File that contains password";
+        };
+      };
+
+    };
+  };
+
+  ### implementation
+
+  config = mkIf (cfg.server.enable || cfg.clients != {}) {
+    environment.systemPackages = [ pkgs.iodine ];
+    boot.kernelModules = [ "tun" ];
+
+    systemd.services =
+      let
+        createIodineClientService = name: cfg:
+          {
+            description = "iodine client - ${name}";
+            after = [ "network.target" ];
+            wantedBy = [ "multi-user.target" ];
+            script = "exec ${pkgs.iodine}/bin/iodine -f -u ${iodinedUser} ${cfg.extraConfig} ${optionalString (cfg.passwordFile != "") "< \"${builtins.toString cfg.passwordFile}\""} ${cfg.relay} ${cfg.server}";
+            serviceConfig = {
+              RestartSec = "30s";
+              Restart = "always";
+
+              # hardening :
+              # Filesystem access
+              ProtectSystem = "strict";
+              ProtectHome = if isProtected cfg.passwordFile then "read-only" else "true" ;
+              PrivateTmp = true;
+              ReadWritePaths = "/dev/net/tun";
+              PrivateDevices = false;
+              ProtectKernelTunables = true;
+              ProtectKernelModules = true;
+              ProtectControlGroups = true;
+              # Caps
+              NoNewPrivileges = true;
+              # Misc.
+              LockPersonality = true;
+              RestrictRealtime = true;
+              PrivateMounts = true;
+              MemoryDenyWriteExecute = true;
+            };
+          };
+      in
+        listToAttrs (
+          mapAttrsToList
+            (name: value: nameValuePair "iodine-${name}" (createIodineClientService name value))
+            cfg.clients
+        ) // {
+          iodined = mkIf (cfg.server.enable) {
+            description = "iodine, ip over dns server daemon";
+            after = [ "network.target" ];
+            wantedBy = [ "multi-user.target" ];
+            script = "exec ${pkgs.iodine}/bin/iodined -f -u ${iodinedUser} ${cfg.server.extraConfig} ${optionalString (cfg.server.passwordFile != "") "< \"${builtins.toString cfg.server.passwordFile}\""} ${cfg.server.ip} ${cfg.server.domain}";
+            serviceConfig = {
+              # Filesystem access
+              ProtectSystem = "strict";
+              ProtectHome = if isProtected cfg.server.passwordFile then "read-only" else "true" ;
+              PrivateTmp = true;
+              ReadWritePaths = "/dev/net/tun";
+              PrivateDevices = false;
+              ProtectKernelTunables = true;
+              ProtectKernelModules = true;
+              ProtectControlGroups = true;
+              # Caps
+              NoNewPrivileges = true;
+              # Misc.
+              LockPersonality = true;
+              RestrictRealtime = true;
+              PrivateMounts = true;
+              MemoryDenyWriteExecute = true;
+            };
+          };
+        };
+
+    users.users.${iodinedUser} = {
+      uid = config.ids.uids.iodined;
+      group = "iodined";
+      description = "Iodine daemon user";
+    };
+    users.groups.iodined.gid = config.ids.gids.iodined;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iperf3.nix b/nixpkgs/nixos/modules/services/networking/iperf3.nix
new file mode 100644
index 000000000000..0a204524e00f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iperf3.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.services.iperf3;
+
+  api = {
+    enable = mkEnableOption (lib.mdDoc "iperf3 network throughput testing server");
+    port = mkOption {
+      type        = types.ints.u16;
+      default     = 5201;
+      description = lib.mdDoc "Server port to listen on for iperf3 client requests.";
+    };
+    affinity = mkOption {
+      type        = types.nullOr types.ints.unsigned;
+      default     = null;
+      description = lib.mdDoc "CPU affinity for the process.";
+    };
+    bind = mkOption {
+      type        = types.nullOr types.str;
+      default     = null;
+      description = lib.mdDoc "Bind to the specific interface associated with the given address.";
+    };
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Open ports in the firewall for iperf3.";
+    };
+    verbose = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = lib.mdDoc "Give more detailed output.";
+    };
+    forceFlush = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = lib.mdDoc "Force flushing output at every interval.";
+    };
+    debug = mkOption {
+      type        = types.bool;
+      default     = false;
+      description = lib.mdDoc "Emit debugging output.";
+    };
+    rsaPrivateKey = mkOption {
+      type        = types.nullOr types.path;
+      default     = null;
+      description = lib.mdDoc "Path to the RSA private key (not password-protected) used to decrypt authentication credentials from the client.";
+    };
+    authorizedUsersFile = mkOption {
+      type        = types.nullOr types.path;
+      default     = null;
+      description = lib.mdDoc "Path to the configuration file containing authorized users credentials to run iperf tests.";
+    };
+    extraFlags = mkOption {
+      type        = types.listOf types.str;
+      default     = [ ];
+      description = lib.mdDoc "Extra flags to pass to iperf3(1).";
+    };
+  };
+
+  imp = {
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+    systemd.services.iperf3 = {
+      description = "iperf3 daemon";
+      unitConfig.Documentation = "man:iperf3(1) https://iperf.fr/iperf-doc.php";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = 2;
+        DynamicUser = true;
+        PrivateDevices = true;
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ExecStart = ''
+          ${pkgs.iperf3}/bin/iperf \
+            --server \
+            --port ${toString cfg.port} \
+            ${optionalString (cfg.affinity != null) "--affinity ${toString cfg.affinity}"} \
+            ${optionalString (cfg.bind != null) "--bind ${cfg.bind}"} \
+            ${optionalString (cfg.rsaPrivateKey != null) "--rsa-private-key-path ${cfg.rsaPrivateKey}"} \
+            ${optionalString (cfg.authorizedUsersFile != null) "--authorized-users-path ${cfg.authorizedUsersFile}"} \
+            ${optionalString cfg.verbose "--verbose"} \
+            ${optionalString cfg.debug "--debug"} \
+            ${optionalString cfg.forceFlush "--forceflush"} \
+            ${escapeShellArgs cfg.extraFlags}
+        '';
+      };
+    };
+  };
+in {
+  options.services.iperf3 = api;
+  config = mkIf cfg.enable imp;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/builder.sh b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/builder.sh
new file mode 100644
index 000000000000..07a3788abf7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/builder.sh
@@ -0,0 +1,32 @@
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
+source $stdenv/setup
+
+doSub() {
+    local src=$1
+    local dst=$2
+    mkdir -p $(dirname $dst)
+    substituteAll $src $dst
+}
+
+subDir=/
+for i in $scripts; do
+    if test "$(echo $i | cut -c1-2)" = "=>"; then
+        subDir=$(echo $i | cut -c3-)
+    else
+        dst=$out/$subDir/$(stripHash $i | sed 's/\.in//')
+        doSub $i $dst
+        chmod +x $dst # !!!
+    fi
+done
+
+subDir=/
+for i in $substFiles; do
+    if test "$(echo $i | cut -c1-2)" = "=>"; then
+        subDir=$(echo $i | cut -c3-)
+    else
+        dst=$out/$subDir/$(stripHash $i | sed 's/\.in//')
+        doSub $i $dst
+    fi
+done
+
+mkdir -p $out/bin
diff --git a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/control.in b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/control.in
new file mode 100644
index 000000000000..312dfaada329
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/control.in
@@ -0,0 +1,26 @@
+#! @shell@ -e
+
+# Make sure that the environment is deterministic.
+export PATH=@coreutils@/bin
+
+if test "$1" = "start"; then
+	if ! @procps@/bin/pgrep ircd; then
+	if @ipv6Enabled@; then 
+		while ! @iproute@/sbin/ip addr | 
+			@gnugrep@/bin/grep inet6 | 
+			@gnugrep@/bin/grep global; do
+			sleep 1;
+		done;
+	fi;
+	rm -rf /home/ircd
+	mkdir -p /home/ircd
+	chown ircd: /home/ircd
+	cd /home/ircd
+    env - HOME=/homeless-shelter $extraEnv \
+        @su@/bin/su ircd --shell=/bin/sh -c ' @ircdHybrid@/bin/ircd -configfile @out@/conf/ircd.conf </dev/null -logfile /home/ircd/ircd.log' 2>&1 >/var/log/ircd-hybrid.out
+	fi;
+fi
+
+if test "$1" = "stop" ; then 
+	@procps@/bin/pkill ircd;
+fi;
diff --git a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/default.nix b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/default.nix
new file mode 100644
index 000000000000..554b0f7bb8b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/default.nix
@@ -0,0 +1,133 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ircdHybrid;
+
+  ircdService = pkgs.stdenv.mkDerivation rec {
+    name = "ircd-hybrid-service";
+    scripts = [ "=>/bin" ./control.in ];
+    substFiles = [ "=>/conf" ./ircd.conf ];
+    inherit (pkgs) ircdHybrid coreutils su iproute2 gnugrep procps;
+
+    ipv6Enabled = boolToString config.networking.enableIPv6;
+
+    inherit (cfg) serverName sid description adminEmail
+            extraPort;
+
+    cryptoSettings =
+      (optionalString (cfg.rsaKey != null) "rsa_private_key_file = \"${cfg.rsaKey}\";\n") +
+      (optionalString (cfg.certificate != null) "ssl_certificate_file = \"${cfg.certificate}\";\n");
+
+    extraListen = map (ip: "host = \""+ip+"\";\nport = 6665 .. 6669, "+extraPort+"; ") cfg.extraIPs;
+
+    builder = ./builder.sh;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.ircdHybrid = {
+
+      enable = mkEnableOption (lib.mdDoc "IRCD");
+
+      serverName = mkOption {
+        default = "hades.arpa";
+        type = types.str;
+        description = lib.mdDoc ''
+          IRCD server name.
+        '';
+      };
+
+      sid = mkOption {
+        default = "0NL";
+        type = types.str;
+        description = lib.mdDoc ''
+          IRCD server unique ID in a net of servers.
+        '';
+      };
+
+      description = mkOption {
+        default = "Hybrid-7 IRC server.";
+        type = types.str;
+        description = lib.mdDoc ''
+          IRCD server description.
+        '';
+      };
+
+      rsaKey = mkOption {
+        default = null;
+        example = literalExpression "/root/certificates/irc.key";
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          IRCD server RSA key.
+        '';
+      };
+
+      certificate = mkOption {
+        default = null;
+        example = literalExpression "/root/certificates/irc.pem";
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          IRCD server SSL certificate. There are some limitations - read manual.
+        '';
+      };
+
+      adminEmail = mkOption {
+        default = "<bit-bucket@example.com>";
+        type = types.str;
+        example = "<name@domain.tld>";
+        description = lib.mdDoc ''
+          IRCD server administrator e-mail.
+        '';
+      };
+
+      extraIPs = mkOption {
+        default = [];
+        example = ["127.0.0.1"];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Extra IP's to bind.
+        '';
+      };
+
+      extraPort = mkOption {
+        default = "7117";
+        type = types.str;
+        description = lib.mdDoc ''
+          Extra port to avoid filtering.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.ircdHybrid.enable {
+
+    users.users.ircd =
+      { description = "IRCD owner";
+        group = "ircd";
+        uid = config.ids.uids.ircd;
+      };
+
+    users.groups.ircd.gid = config.ids.gids.ircd;
+
+    systemd.services.ircd-hybrid = {
+      description = "IRCD Hybrid server";
+      after = [ "started networking" ];
+      wantedBy = [ "multi-user.target" ];
+      script = "${ircdService}/bin/control start";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf
new file mode 100644
index 000000000000..b82094cf5f09
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ircd-hybrid/ircd.conf
@@ -0,0 +1,1051 @@
+/* doc/example.conf - ircd-hybrid-7 Example configuration file
+ * Copyright (C) 2000-2006 Hybrid Development Team
+ *
+ * Written by ejb, wcampbel, db, leeh and others
+ * Other example configurations can be found in the source dir under
+ * etc/.
+ *
+ * $Id: example.conf 639 2006-06-01 14:12:21Z michael $
+ */
+
+/* IMPORTANT NOTES:
+ *
+ * auth {} blocks MUST be specified in order of precedence.  The first one
+ * that matches a user will be used.  So place spoofs first, then specials,
+ * then general access.
+ *
+ * Shell style (#), C++ style (//) and C style comments are supported.
+ *
+ * Files may be included by either:
+ *        .include "filename"
+ *        .include <filename>
+ *
+ * Times/durations are written as:
+ *        12 hours 30 minutes 1 second
+ *        
+ * Valid units of time:
+ *        month, week, day, hour, minute, second
+ *
+ * Valid units of size:
+ *        megabyte/mbyte/mb, kilobyte/kbyte/kb, byte
+ *
+ * Sizes and times may be singular or plural.  
+ */ 
+
+/* EFNET NOTE:
+ *
+ * This config file is NOT suitable for EFNet.  EFNet admins should use
+ * example.efnet.conf
+ */
+ 
+/*
+ * serverinfo {}:  contains information about the server. (OLD M:)
+ */
+serverinfo {
+	/*
+	 * name: the name of our server.  This cannot be changed at runtime.
+	 */
+	name = "@serverName@";
+
+	/*
+	 * sid: a server's unique ID.  This is three characters long and must
+	 * be in the form [0-9][A-Z0-9][A-Z0-9].  The first character must be
+	 * a digit, followed by 2 alpha-numerical letters.
+	 * NOTE: The letters must be capitalized.  This cannot be changed at runtime.
+	 */
+	sid = "@sid@";
+
+	/*
+	 * description: the description of the server.  '[' and ']' may not
+	 * be used here for compatibility with older servers.
+	 */
+	description = "@description@";
+
+	/*
+	 * network info: the name and description of the network this server
+	 * is on.  Shown in the 005 reply and used with serverhiding.
+	 */
+	network_name = "JustIRCNetwork";
+	network_desc = "This is My Network";
+
+	/*
+	 * hub: allow this server to act as a hub and have multiple servers
+	 * connected to it.  This may not be changed if there are active
+	 * LazyLink servers.
+	 */
+	hub = no;
+
+	/*
+	 * vhost: the IP to bind to when we connect outward to ipv4 servers.
+	 * This should be an ipv4 IP only, or "* for INADDR_ANY.
+	 */
+	#vhost = "192.169.0.1";
+
+	/*
+	 * vhost6: the IP to bind to when we connect outward to ipv6 servers.
+	 * This should be an ipv6 IP only, or "* for INADDR_ANY.
+	 */
+	#vhost6 = "3ffe:80e8:546::2";
+
+	/* max_clients: the maximum number of clients allowed to connect */
+	max_clients = 512;
+
+	/*
+	 * rsa key: the path to the file containing our rsa key for cryptlink.
+	 *
+	 * Example command to store a 2048 bit RSA keypair in
+	 * rsa.key, and the public key in rsa.pub:
+	 * 
+	 * 	openssl genrsa -out rsa.key 2048
+	 *	openssl rsa -in rsa.key -pubout -out rsa.pub
+	 *	chown <ircd-user>:<ircd.group> rsa.key rsa.pub
+	 *	chmod 0600 rsa.key
+	 *	chmod 0644 rsa.pub
+	 */
+	#rsa_private_key_file = "/usr/local/ircd/etc/rsa.key";
+
+	/*
+	 * ssl certificate: the path to the file containing our ssl certificate
+	 * for encrypted client connection.
+	 *
+	 * This assumes your private RSA key is stored in rsa.key. You
+	 * MUST have an RSA key in order to generate the certificate
+	 *
+	 *	openssl req -new -days 365 -x509 -key rsa.key -out cert.pem
+	 *
+	 * See http://www.openssl.org/docs/HOWTO/certificates.txt
+	 *
+	 * Please use the following values when generating the cert
+	 *
+	 *	Organization Name: Network Name
+	 *	Organization Unit Name: changme.someirc.net
+	 *	Common Name: irc.someirc.net
+	 *	E-mail: you@domain.com
+	 */
+	#ssl_certificate_file = "/usr/local/ircd/etc/cert.pem";
+
+	@cryptoSettings@
+};
+
+/*
+ * admin {}:  contains admin information about the server. (OLD A:)
+ */
+admin {
+	name = "Anonymous Hero";
+	description = "Main Server Administrator";
+	email = "@adminEmail@";
+};
+
+/*
+ * log {}:  contains information about logfiles.
+ */
+log {
+	/* Do you want to enable logging to ircd.log? */
+	use_logging = yes;
+
+	/*
+	 * logfiles: the logfiles to use for user connects, /oper uses,
+	 * and failed /oper.  These files must exist for logging to be used.
+	 */
+	fname_userlog = "/home/ircd/logs/userlog";
+	fname_operlog = "/home/ircd/logs/operlog";
+	fname_killlog = "/home/ircd/logs/kill";
+	fname_klinelog = "/home/ircd/logs/kline";
+	fname_glinelog = "/home/ircd/logs/gline";
+
+	/*
+	 * log_level: the amount of detail to log in ircd.log.  The
+	 * higher, the more information is logged.  May be changed
+	 * once the server is running via /quote SET LOG.  Either:
+	 * L_CRIT, L_ERROR, L_WARN, L_NOTICE, L_TRACE, L_INFO or L_DEBUG
+	 */
+	log_level = L_INFO;
+};
+
+/*
+ * class {}:  contains information about classes for users (OLD Y:)
+ */
+class {
+	/* name: the name of the class.  classes are text now */
+	name = "users";
+
+	/*
+	 * ping_time: how often a client must reply to a PING from the
+	 * server before they are dropped.
+	 */
+	ping_time = 90 seconds;
+
+	/*
+	 * number_per_ip: how many local users are allowed to connect
+	 * from one IP  (optional)
+	 */
+	number_per_ip = 10;
+
+	/*
+	 * max_local: how many local users are allowed to connect
+	 * from one ident@host  (optional)
+	 */
+	max_local = 50;
+
+	/*
+	 * max_global: network-wide limit of users per ident@host  (optional)
+	 */
+	max_global = 50;
+
+	/*
+	 * max_number: the maximum number of users allowed in this class (optional)
+	 */
+	max_number = 10000;
+
+	/*
+	 * the following lines are optional and allow you to define
+	 * how many users can connect from one /NN subnet
+	 */
+	/*cidr_bitlen_ipv4 = 24;
+	 *cidr_bitlen_ipv6 = 120;
+	 *number_per_cidr = 16;*/
+
+	/*
+	 * sendq: the amount of data allowed in a clients queue before
+	 * they are dropped.
+	 */
+	sendq = 100 kbytes;
+};
+
+class {
+	name = "opers";
+	ping_time = 90 seconds;
+	number_per_ip = 10;
+	max_number = 100;
+	sendq = 100kbytes;
+};
+
+class {
+	name = "server";
+	ping_time = 90 seconds;
+
+	/*
+	 * ping_warning: how fast a server must reply to a PING before
+	 * a warning to opers is generated.
+	 */
+	ping_warning = 15 seconds;
+
+	/*
+	 * connectfreq: only used in server classes.  Specifies the delay
+	 * between autoconnecting to servers.
+	 */
+	connectfreq = 5 minutes;
+
+	/* max number: the amount of servers to autoconnect to */
+	max_number = 1;
+
+	/* sendq: servers need a higher sendq as they send more data */
+	sendq = 2 megabytes;
+};
+
+/*
+ * listen {}:  contains information about the ports ircd listens on (OLD P:)
+ */
+listen {
+	/*
+	 * port: the specific port to listen on.  If no host is specified
+	 * before, it will listen on all available IPs.
+	 *
+	 * Ports are separated via a comma, a range may be specified using ".."
+	 */
+	
+	/* port: listen on all available IPs, ports 6665 to 6669 */
+	port = 6665 .. 6669;
+
+	/*
+	 * Listen on 192.168.0.1/6697 with ssl enabled and hidden from STATS P
+	 * unless you are an administrator.
+	 *
+	 * NOTE: The "flags" directive has to come before "port".  Always!
+	 */
+	#flags = hidden, ssl;
+	#host = "192.168.0.1";
+	#port = 6697;
+
+	/*
+	 * host: set a specific IP/host the ports after the line will listen 
+	 * on.  This may be ipv4 or ipv6.
+	 */
+	#host = "1.2.3.4";
+	#port = 7000, 7001;
+
+	#host = "3ffe:1234:a:b:c::d";
+	#port = 7002;
+	
+	@extraListen@
+};
+
+auth {
+	user = "*@*";
+	class = "users";
+	#flags = need_ident;
+};
+
+/*
+ * operator {}:  defines ircd operators. (OLD O:)
+ *
+ * ircd-hybrid no longer supports local operators, privileges are
+ * controlled via flags.
+ */
+operator {
+	/* name: the name of the oper */
+	/* NOTE: operator "opername"{} is also supported */
+	name = "god";
+
+	/*
+	 * user: the user@host required for this operator.  CIDR is not
+	 * supported.  Multiple user="" lines are supported.
+	 */
+	user = "*god@*";
+	user = "*@127.0.0.1";
+
+	/*
+	 * password: the password required to oper.  By default this will
+	 * need to be encrypted using 'mkpasswd'.  MD5 is supported.
+	 */
+	password = "iamoperator";
+
+	/*
+	 * encrypted: controls whether the oper password above has been
+	 * encrypted.  (OLD CRYPT_OPER_PASSWORD now optional per operator)
+	 */
+	encrypted = no;
+
+	/*
+	 * rsa_public_key_file: the public key for this oper when using Challenge.
+	 * A password should not be defined when this is used, see 
+	 * doc/challenge.txt for more information.
+	 */
+#	rsa_public_key_file = "/usr/local/ircd/etc/oper.pub";
+
+	/* class: the class the oper joins when they successfully /oper */
+	class = "opers";
+
+	/*
+	 * umodes: default usermodes opers get when they /oper.  If defined,
+	 * it will override oper_umodes settings in general {}.
+	 * Available usermodes:
+	 *
+	 * +b - bots         - See bot and drone flooding notices
+	 * +c - cconn        - Client connection/quit notices
+	 * +D - deaf         - Don't receive channel messages
+	 * +d - debug        - See debugging notices
+	 * +f - full         - See I: line full notices
+	 * +G - softcallerid - Server Side Ignore for users not on your channels
+	 * +g - callerid     - Server Side Ignore (for privmsgs etc)
+	 * +i - invisible    - Not shown in NAMES or WHO unless you share a
+	 *                     a channel
+	 * +k - skill        - See server generated KILL messages
+	 * +l - locops       - See LOCOPS messages
+	 * +n - nchange      - See client nick changes
+	 * +r - rej          - See rejected client notices
+	 * +s - servnotice   - See general server notices
+	 * +u - unauth       - See unauthorized client notices
+	 * +w - wallop       - See server generated WALLOPS
+	 * +x - external     - See remote server connection and split notices
+	 * +y - spy          - See LINKS, STATS, TRACE notices etc.
+	 * +z - operwall     - See oper generated WALLOPS
+	 */
+#	umodes = locops, servnotice, operwall, wallop;
+
+	/*
+	 * privileges: controls the activities and commands an oper is 
+	 * allowed to do on the server.  All options default to no.
+	 * Available options:
+	 *
+	 * global_kill:  allows remote users to be /KILL'd (OLD 'O' flag)
+	 * remote:       allows remote SQUIT and CONNECT   (OLD 'R' flag)
+	 * remoteban:    allows remote KLINE/UNKLINE
+	 * kline:        allows KILL, KLINE and DLINE      (OLD 'K' flag)
+	 * unkline:      allows UNKLINE and UNDLINE        (OLD 'U' flag)
+	 * gline:        allows GLINE                      (OLD 'G' flag)
+	 * xline:         allows XLINE                     (OLD 'X' flag)
+	 * operwall:     allows OPERWALL
+	 * nick_changes: allows oper to see nickchanges    (OLD 'N' flag)
+	 *               via usermode +n
+	 * rehash:       allows oper to REHASH config      (OLD 'H' flag)
+	 * die:          allows DIE and RESTART            (OLD 'D' flag)
+	 * admin:        gives admin privileges.  admins
+	 *               may (un)load modules and see the
+	 *               real IPs of servers.
+	 * hidden_admin: same as 'admin', but noone can recognize you as
+	 *               being an admin
+	 * hidden_oper:  not shown in /stats p (except for other operators)
+	 */
+	/* You can either use
+	 * die = yes;
+	 * rehash = yes;
+	 *
+	 * or in a flags statement i.e.
+	 * flags = die, rehash;
+	 *
+	 * You can also negate a flag with ~ i.e.
+	 * flags = ~remote;
+	 *
+	 */
+	flags = global_kill, remote, kline, unkline, xline,
+		die, rehash, nick_changes, admin, operwall;
+};
+
+/*
+ * shared {}: users that are allowed to remote kline (OLD U:)
+ *
+ * NOTE: This can be effectively used for remote klines.
+ *       Please note that there is no password authentication
+ *       for users setting remote klines.  You must also be
+ *       /oper'd in order to issue a remote kline.
+ */
+shared {
+	/*
+	 * name: the server the user must be on to set klines.  If this is not
+	 * specified, the user will be allowed to kline from all servers.
+	 */
+	name = "irc2.some.server";
+
+	/*
+	 * user: the user@host mask that is allowed to set klines.  If this is
+	 * not specified, all users on the server above will be allowed to set
+	 * a remote kline.
+	 */
+	user = "oper@my.host.is.spoofed";
+
+	/*
+	 * type: list of what to share, options are as follows:
+	 *	kline	- allow oper/server to kline
+	 *	tkline	- allow temporary klines
+	 *	unkline	- allow oper/server to unkline
+	 *	xline	- allow oper/server to xline
+	 * 	txline	- allow temporary xlines
+	 *	unxline	- allow oper/server to unxline
+	 *	resv	- allow oper/server to resv
+	 * 	tresv	- allow temporary resvs
+	 *	unresv	- allow oper/server to unresv
+	 *      locops  - allow oper/server to locops - only used for servers that cluster
+	 *	all	- allow oper/server to do all of the above (default)
+	 */
+	type = kline, unkline, resv;
+};
+
+/*
+ * kill {}:  users that are not allowed to connect (OLD K:)
+ * Oper issued klines will be added to the specified kline config
+ */
+kill {
+	user = "bad@*.hacked.edu";
+	reason = "Obviously hacked account";
+};
+
+kill {
+	user = "^O[[:alpha:]]?[[:digit:]]+(x\.o|\.xo)$@^[[:alnum:]]{4}\.evilnet.org$";
+	type = regex;
+};
+
+/*
+ * deny {}:  IPs that are not allowed to connect (before DNS/ident lookup)
+ * Oper issued dlines will be added to the specified dline config
+ */
+deny {
+	ip = "10.0.1.0/24";
+	reason = "Reconnecting vhosted bots";
+};
+
+/*
+ * exempt {}: IPs that are exempt from deny {} and Dlines. (OLD d:)
+ */
+exempt {
+	ip = "192.168.0.0/16";
+};
+
+/*
+ * resv {}:  nicks and channels users may not use/join (OLD Q:)
+ */
+resv {
+	/* reason: the reason for the proceeding resv's */
+	reason = "There are no services on this network";
+
+	/* resv: the nicks and channels users may not join/use */
+	nick = "nickserv";
+	nick = "chanserv";
+	channel = "#services";
+
+	/* resv: wildcard masks are also supported in nicks only */
+	reason = "Clone bots";
+	nick = "clone*";
+};
+
+/*
+ * gecos {}:  The X: replacement, used for banning users based on
+ * their "realname".
+ */
+gecos {
+	name = "*sex*";
+	reason = "Possible spambot";
+};
+
+gecos {
+	name = "sub7server";
+	reason = "Trojan drone";
+};
+
+gecos {
+	name = "*http*";
+	reason = "Spambot";
+};
+
+gecos {
+	name = "^\[J[0o]hn Do[3e]\]-[0-9]{2,5}$";
+	type = regex;
+};
+
+/*
+ * channel {}:  The channel block contains options pertaining to channels
+ */
+channel {
+	/*
+	 * disable_fake_channels: this option, if set to 'yes', will
+	 * disallow clients to create or join channels that have one
+	 * of the following ASCII characters in their name:
+	 *
+	 *   2 | bold
+	 *   3 | mirc color
+         *  15 | plain text
+	 *  22 | reverse
+	 *  31 | underline
+	 * 160 | non-breaking space
+	 */
+	disable_fake_channels = yes;
+
+	/*
+	 * restrict_channels: reverse channel RESVs logic, only reserved
+	 * channels are allowed
+	 */
+	restrict_channels = no;
+
+	/*
+	 * disable_local_channels: prevent users from joining &channels.
+	 */
+	disable_local_channels = no;
+
+	/*
+	 * use_invex: Enable/disable channel mode +I, a n!u@h list of masks
+	 * that can join a +i channel without an invite.
+	 */
+	use_invex = yes;
+
+	/*
+	 * use_except: Enable/disable channel mode +e, a n!u@h list of masks
+	 * that can join a channel through a ban (+b).
+	 */
+	use_except = yes;
+
+	/*
+	 * use_knock: Allows users to request an invite to a channel that
+	 * is locked somehow (+ikl).  If the channel is +p or you are banned
+	 * the knock will not be sent.
+	 */
+	use_knock = yes;
+
+	/*
+	 * knock_delay: The amount of time a user must wait between issuing
+	 * the knock command.
+	 */
+	knock_delay = 1 minutes;
+
+	/*
+	 * knock_delay_channel: How often a knock to any specific channel
+	 * is permitted, regardless of the user sending the knock.
+	 */
+	knock_delay_channel = 1 minute;
+
+	/*
+	 * burst_topicwho: enable sending of who set topic on topicburst
+	 * default is yes
+	 */
+	burst_topicwho = yes;
+
+	/*
+	 * max_chans_per_user: The maximum number of channels a user can
+	 * join/be on.
+	 */
+	max_chans_per_user = 25;
+
+	/* quiet_on_ban: stop banned people talking in channels. */
+	quiet_on_ban = yes;
+
+	/* max_bans: maximum number of +b/e/I modes in a channel */
+	max_bans = 1000;
+
+	/*
+	 * how many joins in how many seconds constitute a flood, use 0 to
+	 * disable. +b opers will be notified (changeable via /set)
+	 */
+	join_flood_count = 100;
+	join_flood_time = 10 seconds;
+
+	/*
+	 * splitcode: The ircd will now check splitmode every few seconds.
+	 *
+	 * Either split users or split servers can activate splitmode, but
+	 * both conditions must be met for the ircd to deactivate splitmode.
+	 * 
+	 * You may force splitmode to be permanent by /quote set splitmode on
+	 */
+
+	/*
+	 * default_split_user_count: when the usercount is lower than this level,
+	 * consider ourselves split.  This must be set for automatic splitmode.
+	 */
+	default_split_user_count = 0;
+
+	/*
+	 * default_split_server_count: when the servercount is lower than this,
+	 * consider ourselves split.  This must be set for automatic splitmode.
+	 */
+	default_split_server_count = 0;
+
+	/* split no create: disallow users creating channels on split. */
+	no_create_on_split = yes;
+
+	/* split: no join: disallow users joining channels at all on a split */
+	no_join_on_split = no;
+};
+
+/*
+ * serverhide {}:  The serverhide block contains the options regarding
+ * serverhiding
+ */
+serverhide {
+	/*
+	 * flatten_links: this option will show all servers in /links appear
+	 * that they are linked to this current server
+	 */
+	flatten_links = no;
+
+	/*
+	 * links_delay: how often to update the links file when it is
+	 * flattened.
+	 */
+	links_delay = 5 minutes;
+
+	/*
+	 * hidden: hide this server from a /links output on servers that
+	 * support it.  This allows hub servers to be hidden etc.
+	 */
+	hidden = no;
+
+	/*
+	 * disable_hidden: prevent servers hiding themselves from a
+	 * /links output.
+	 */
+	disable_hidden = no;
+
+	/*
+	 * hide_servers: hide remote servernames everywhere and instead use
+	 * hidden_name and network_desc.
+	 */
+	hide_servers = no;
+
+	/*
+	 * Use this as the servername users see if hide_servers = yes.
+	 */
+	hidden_name = "*.hidden.com";
+
+	/*
+	 * hide_server_ips: If this is disabled, opers will be unable to see servers
+	 * ips and will be shown a masked ip, admins will be shown the real ip.
+	 *
+	 * If this is enabled, nobody can see a servers ip.  *This is a kludge*, it
+	 * has the side effect of hiding the ips everywhere, including logfiles.
+	 *
+	 * We recommend you leave this disabled, and just take care with who you
+	 * give admin=yes; to.
+	 */
+	hide_server_ips = no;
+};
+
+/*
+ * general {}:  The general block contains many of the options that were once
+ * compiled in options in config.h.  The general block is read at start time.
+ */
+general {
+	/*
+	 * gline_min_cidr: the minimum required length of a CIDR bitmask
+	 * for IPv4 based glines
+	 */
+	gline_min_cidr = 16;
+
+	/*
+	 * gline_min_cidr6: the minimum required length of a CIDR bitmask
+	 * for IPv6 based glines
+	 */
+	gline_min_cidr6 = 48;
+
+	/*
+	 * Whether to automatically set mode +i on connecting users.
+	 */
+	invisible_on_connect = yes;
+
+	/*
+	 * If you don't explicitly specify burst_away in your connect blocks, then
+	 * they will default to the burst_away value below.
+	 */
+	burst_away = no;
+
+	/*
+	 * Show "actually using host <ip>" on /whois when possible.
+	 */
+	use_whois_actually = yes;
+
+	/*
+	 * Max time from the nickname change that still causes KILL
+	 * automatically to switch for the current nick of that user. (seconds)
+	 */
+	kill_chase_time_limit = 90;
+
+	/*
+	 * If hide_spoof_ips is disabled, opers will be allowed to see the real IP of spoofed
+	 * users in /trace etc.  If this is defined they will be shown a masked IP.
+	 */
+	hide_spoof_ips = yes;
+
+	/*
+	 * Ignore bogus timestamps from other servers.  Yes, this will desync
+	 * the network, but it will allow chanops to resync with a valid non TS 0
+	 *
+	 * This should be enabled network wide, or not at all.
+	 */
+	ignore_bogus_ts = no;
+
+	/*
+	 * disable_auth: completely disable ident lookups; if you enable this,
+	 * be careful of what you set need_ident to in your auth {} blocks
+	 */
+	disable_auth = no;
+
+	/* disable_remote_commands: disable users doing commands on remote servers */
+	disable_remote_commands = no;
+
+	/*
+	 * tkline_expire_notices: enables or disables temporary kline/xline
+	 * expire notices.
+	 */
+	tkline_expire_notices = no;
+
+	/*
+	 * default_floodcount: the default value of floodcount that is configurable
+	 * via /quote set floodcount.  This is the amount of lines a user
+	 * may send to any other user/channel in one second.
+	 */
+	default_floodcount = 10;
+
+	/*
+	 * failed_oper_notice: send a notice to all opers on the server when 
+	 * someone tries to OPER and uses the wrong password, host or ident.
+	 */
+	failed_oper_notice = yes;
+
+	/*
+	 * dots_in_ident: the amount of '.' characters permitted in an ident
+	 * reply before the user is rejected.
+	 */
+	dots_in_ident = 2;
+
+	/*
+	 * dot_in_ip6_addr: ircd-hybrid-6.0 and earlier will disallow hosts 
+	 * without a '.' in them.  This will add one to the end.  Only needed
+	 * for older servers.
+	 */
+	dot_in_ip6_addr = no;
+
+	/*
+	 * min_nonwildcard: the minimum non wildcard characters in k/d/g lines
+	 * placed via the server.  klines hand placed are exempt from limits.
+	 * wildcard chars: '.' ':' '*' '?' '@' '!' '#'
+	 */
+	min_nonwildcard = 4;
+
+	/*
+	 * min_nonwildcard_simple: the minimum non wildcard characters in 
+	 * gecos bans.  wildcard chars: '*' '?' '#'
+	 */
+	min_nonwildcard_simple = 3;
+
+	/* max_accept: maximum allowed /accept's for +g usermode */
+	max_accept = 20;
+
+	/* anti_nick_flood: enable the nickflood control code */
+	anti_nick_flood = yes;
+
+	/* nick flood: the nick changes allowed in the specified period */
+	max_nick_time = 20 seconds;
+	max_nick_changes = 5;
+
+	/*
+	 * anti_spam_exit_message_time: the minimum time a user must be connected
+	 * before custom quit messages are allowed.
+	 */
+	anti_spam_exit_message_time = 5 minutes;
+
+	/*
+	 * ts delta: the time delta allowed between server clocks before
+	 * a warning is given, or before the link is dropped.  all servers
+	 * should run ntpdate/rdate to keep clocks in sync
+	 */
+	ts_warn_delta = 30 seconds;
+	ts_max_delta = 5 minutes;
+
+	/*
+	 * kline_with_reason: show the user the reason why they are k/d/glined 
+	 * on exit.  May give away who set k/dline when set via tcm.
+	 */
+	kline_with_reason = yes;
+
+	/*
+	 * kline_reason: show this message to users on channel
+	 * instead of the oper reason.
+	 */
+	kline_reason = "Connection closed";
+
+	/*
+	 * reject_hold_time: wait this amount of time before disconnecting
+	 * a rejected client. Use 0 to disable.
+	 */
+	reject_hold_time = 0;
+
+	/*
+	 * warn_no_nline: warn opers about servers that try to connect but
+	 * we don't have a connect {} block for.  Twits with misconfigured 
+	 * servers can get really annoying with this enabled.
+	 */
+	warn_no_nline = yes;
+
+	/*
+	 * stats_e_disabled: set this to 'yes' to disable "STATS e" for both
+	 * operators and administrators.  Doing so is a good idea in case
+	 * there are any exempted (exempt{}) server IPs you don't want to
+	 * see leaked.
+	 */
+	stats_e_disabled = no;
+
+	/* stats_o_oper only: make stats o (opers) oper only */
+	stats_o_oper_only = yes;
+
+	/* stats_P_oper_only: make stats P (ports) oper only */
+	stats_P_oper_only = yes;
+
+	/*
+	 * stats i oper only: make stats i (auth {}) oper only. set to:
+	 *     yes:    show users no auth blocks, made oper only.
+	 *     masked: show users first matching auth block
+	 *     no:     show users all auth blocks.
+	 */
+	stats_i_oper_only = yes;
+
+	/*
+	 * stats_k_oper_only: make stats k/K (klines) oper only.  set to:
+	 *     yes:    show users no auth blocks, made oper only
+	 *     masked: show users first matching auth block
+	 *     no:     show users all auth blocks.
+	 */
+	stats_k_oper_only = yes;
+
+	/*
+	 * caller_id_wait: time between notifying a +g user that somebody
+	 * is messaging them.
+	 */
+	caller_id_wait = 1 minute;
+
+	/*
+	 * opers_bypass_callerid: allows operators to bypass +g and message
+	 * anyone who has it set (useful if you use services).
+	 */
+	opers_bypass_callerid = no;
+
+	/*
+	 * pace_wait_simple: time between use of less intensive commands
+	 * (ADMIN, HELP, (L)USERS, VERSION, remote WHOIS)
+	 */
+	pace_wait_simple = 1 second;
+
+	/*
+	 * pace_wait: time between more intensive commands
+	 * (INFO, LINKS, LIST, MAP, MOTD, STATS, WHO, wildcard WHOIS, WHOWAS)
+	 */
+	pace_wait = 10 seconds;
+
+	/*
+	 * short_motd: send clients a notice telling them to read the motd
+	 * instead of forcing a motd to clients who may simply ignore it.
+	 */
+	short_motd = no;
+
+	/*
+	 * ping_cookie: require clients to respond exactly to a ping command,
+	 * can help block certain types of drones and FTP PASV mode spoofing.
+	 */
+	ping_cookie = no;
+
+	/* no_oper_flood: increase flood limits for opers. */
+	no_oper_flood = yes;
+
+	/*
+	 * true_no_oper_flood: completely eliminate flood limits for opers
+	 * and for clients with can_flood = yes in their auth {} blocks
+	 */
+	true_no_oper_flood = yes;
+
+	/* oper_pass_resv: allow opers to over-ride RESVs on nicks/channels */
+	oper_pass_resv = yes;
+
+	/*
+	 * idletime: the maximum amount of time a user may idle before
+	 * they are disconnected
+	 */
+	idletime = 0;
+
+	/* REMOVE ME.  The following line checks you've been reading. */
+	#havent_read_conf = 1;
+
+	/*
+	 * max_targets: the maximum amount of targets in a single 
+	 * PRIVMSG/NOTICE.  Set to 999 NOT 0 for unlimited.
+	 */
+	max_targets = 4;
+
+	/*
+	 * client_flood: maximum amount of data in a clients queue before
+	 * they are dropped for flooding.
+	 */
+	client_flood = 2560 bytes;
+
+	/*
+	 * message_locale: the default message locale
+	 * Use "standard" for the compiled in defaults.
+	 * To install the translated messages, go into messages/ in the
+	 * source directory and run `make install'.
+	 */
+	message_locale = "standard";
+
+	/*
+	 * usermodes configurable: a list of usermodes for the options below
+	 *
+	 * +b - bots         - See bot and drone flooding notices
+	 * +c - cconn        - Client connection/quit notices
+	 * +D - deaf         - Don't receive channel messages
+	 * +d - debug        - See debugging notices
+	 * +f - full         - See I: line full notices
+	 * +G - softcallerid - Server Side Ignore for users not on your channels
+	 * +g - callerid     - Server Side Ignore (for privmsgs etc)
+	 * +i - invisible    - Not shown in NAMES or WHO unless you share a 
+	 *                     a channel
+	 * +k - skill        - See server generated KILL messages
+	 * +l - locops       - See LOCOPS messages
+	 * +n - nchange      - See client nick changes
+	 * +r - rej          - See rejected client notices
+	 * +s - servnotice   - See general server notices
+	 * +u - unauth       - See unauthorized client notices
+	 * +w - wallop       - See server generated WALLOPS
+	 * +x - external     - See remote server connection and split notices
+	 * +y - spy          - See LINKS, STATS, TRACE notices etc.
+	 * +z - operwall     - See oper generated WALLOPS
+	 */
+
+	/* oper_only_umodes: usermodes only opers may set */
+	oper_only_umodes = bots, cconn, debug, full, skill, nchange, 
+			   rej, spy, external, operwall, locops, unauth;
+
+	/* oper_umodes: default usermodes opers get when they /oper */
+	oper_umodes = bots, locops, servnotice, operwall, wallop;
+
+	/*
+	 * servlink_path: path to 'servlink' program used by ircd to handle
+	 * encrypted/compressed server <-> server links.
+	 *
+	 * only define if servlink is not in same directory as ircd itself.
+	 */
+	#servlink_path = "/usr/local/ircd/bin/servlink";
+
+	/*
+	 * default_cipher_preference: default cipher to use for cryptlink when none is
+	 * specified in connect block.
+	 */
+	#default_cipher_preference = "BF/168";
+
+	/*
+	 * use_egd: if your system does not have *random devices yet you
+	 * want to use OpenSSL and encrypted links, enable this.  Beware -
+	 * EGD is *very* CPU intensive when gathering data for its pool
+	 */
+#	use_egd = yes;
+
+	/*
+	 * egdpool_path: path to EGD pool. Not necessary for OpenSSL >= 0.9.7
+	 * which automatically finds the path.
+	 */
+#	egdpool_path = "/run/egd-pool";
+
+
+	/*
+	 * compression_level: level of compression for compressed links between
+	 * servers.  
+	 *
+	 * values are between: 1 (least compression, fastest)
+	 *                and: 9 (most compression, slowest).
+	 */
+#	compression_level = 6;
+
+	/*
+	 * throttle_time: the minimum amount of time between connections from
+	 * the same ip.  exempt {} blocks are excluded from this throttling.
+	 * Offers protection against flooders who reconnect quickly.  
+	 * Set to 0 to disable.
+	 */
+	throttle_time = 10;
+};
+
+glines {
+	/* enable: enable glines, network wide temp klines */
+	enable = yes;
+
+	/*
+	 * duration: the amount of time a gline will remain on your
+	 * server before expiring
+	 */
+	duration = 1 day;
+
+	/*
+	 * logging: which types of rules you want to log when triggered
+	 * (choose reject or block)
+	 */
+	logging = reject, block;
+
+	/*
+	 * NOTE: gline ACLs can cause a desync of glines throughout the
+	 * network, meaning some servers may have a gline triggered, and
+	 * others may not. Also, you only need insert rules for glines
+	 * that you want to block and/or reject. If you want to accept and
+	 * propagate the gline, do NOT put a rule for it.
+	 */
+
+	/* user@host for rule to apply to */
+	user = "god@I.still.hate.packets";
+	/* server for rule to apply to */
+	name = "hades.arpa";
+
+	/*
+	 * action: action to take when a matching gline is found. options are:
+	 *  reject	- do not apply the gline locally
+	 *  block	- do not propagate the gline
+	 */
+	action = reject, block;
+
+	user = "god@*";
+	name = "*";
+	action = block;
+};
+
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix b/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix
new file mode 100644
index 000000000000..6c30f89b7968
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/initiator.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.services.openiscsi;
+in
+{
+  options.services.openiscsi = with types; {
+    enable = mkEnableOption (lib.mdDoc "the openiscsi iscsi daemon");
+    enableAutoLoginOut = mkEnableOption (lib.mdDoc ''
+      automatic login and logout of all automatic targets.
+      You probably do not want this
+    '');
+    discoverPortal = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc "Portal to discover targets on";
+    };
+    name = mkOption {
+      type = str;
+      description = lib.mdDoc "Name of this iscsi initiator";
+      example = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+    };
+    package = mkOption {
+      type = package;
+      description = lib.mdDoc "openiscsi package to use";
+      default = pkgs.openiscsi;
+      defaultText = literalExpression "pkgs.openiscsi";
+    };
+
+    extraConfig = mkOption {
+      type = str;
+      default = "";
+      description = lib.mdDoc "Lines to append to default iscsid.conf";
+    };
+
+    extraConfigFile = mkOption {
+      description = lib.mdDoc ''
+        Append an additional file's contents to /etc/iscsid.conf. Use a non-store path
+        and store passwords in this file.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."iscsi/iscsid.conf.fragment".source = pkgs.runCommand "iscsid.conf" {} ''
+      cat "${cfg.package}/etc/iscsi/iscsid.conf" > $out
+      cat << 'EOF' >> $out
+      ${cfg.extraConfig}
+      ${optionalString cfg.enableAutoLoginOut "node.startup = automatic"}
+      EOF
+    '';
+    environment.etc."iscsi/initiatorname.iscsi".text = "InitiatorName=${cfg.name}";
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services."iscsid" = {
+      wantedBy = [ "multi-user.target" ];
+      preStart =
+        let
+          extraCfgDumper = optionalString (cfg.extraConfigFile != null) ''
+            if [ -f "${cfg.extraConfigFile}" ]; then
+              printf "\n# The following is from ${cfg.extraConfigFile}:\n"
+              cat "${cfg.extraConfigFile}"
+            else
+              echo "Warning: services.openiscsi.extraConfigFile ${cfg.extraConfigFile} does not exist!" >&2
+            fi
+          '';
+        in ''
+          (
+            cat ${config.environment.etc."iscsi/iscsid.conf.fragment".source}
+            ${extraCfgDumper}
+          ) > /etc/iscsi/iscsid.conf
+        '';
+    };
+    systemd.sockets."iscsid".wantedBy = [ "sockets.target" ];
+
+    systemd.services."iscsi" = mkIf cfg.enableAutoLoginOut {
+      wantedBy = [ "remote-fs.target" ];
+      serviceConfig.ExecStartPre = mkIf (cfg.discoverPortal != null) "${cfg.package}/bin/iscsiadm --mode discoverydb --type sendtargets --portal ${escapeShellArg cfg.discoverPortal} --discover";
+    };
+
+    environment.systemPackages = [ cfg.package ];
+    boot.kernelModules = [ "iscsi_tcp" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix b/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix
new file mode 100644
index 000000000000..895467cc674a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/root-initiator.nix
@@ -0,0 +1,194 @@
+{ config, lib, pkgs, ... }: with lib;
+let
+  cfg = config.boot.iscsi-initiator;
+in
+{
+  # If you're booting entirely off another machine you may want to add
+  # this snippet to always boot the latest "system" version. It is not
+  # enabled by default in case you have an initrd on a local disk:
+  #
+  #     boot.initrd.postMountCommands = ''
+  #       ln -sfn /nix/var/nix/profiles/system/init /mnt-root/init
+  #       stage2Init=/init
+  #     '';
+  #
+  # Note: Theoretically you might want to connect to multiple portals and
+  # log in to multiple targets, however the authors of this module so far
+  # don't have the need or expertise to reasonably implement it. Also,
+  # consider carefully before making your boot chain depend on multiple
+  # machines to be up.
+  options.boot.iscsi-initiator = with types; {
+    name = mkOption {
+      description = lib.mdDoc ''
+        Name of the iSCSI initiator to boot from. Note, booting from iscsi
+        requires networkd based networking.
+      '';
+      default = null;
+      example = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+      type = nullOr str;
+    };
+
+    discoverPortal = mkOption {
+      description = lib.mdDoc ''
+        iSCSI portal to boot from.
+      '';
+      default = null;
+      example = "192.168.1.1:3260";
+      type = nullOr str;
+    };
+
+    target = mkOption {
+      description = lib.mdDoc ''
+        Name of the iSCSI target to boot from.
+      '';
+      default = null;
+      example = "iqn.2020-08.org.linux-iscsi.targethost:example";
+      type = nullOr str;
+    };
+
+    logLevel = mkOption {
+      description = lib.mdDoc ''
+        Higher numbers elicits more logs.
+      '';
+      default = 1;
+      example = 8;
+      type = int;
+    };
+
+    loginAll = mkOption {
+      description = lib.mdDoc ''
+        Do not log into a specific target on the portal, but to all that we discover.
+        This overrides setting target.
+      '';
+      type = bool;
+      default = false;
+    };
+
+    extraIscsiCommands = mkOption {
+      description = lib.mdDoc "Extra iscsi commands to run in the initrd.";
+      default = "";
+      type = lines;
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc "Extra lines to append to /etc/iscsid.conf";
+      default = null;
+      type = nullOr lines;
+    };
+
+    extraConfigFile = mkOption {
+      description = lib.mdDoc ''
+        Append an additional file's contents to `/etc/iscsid.conf`. Use a non-store path
+        and store passwords in this file. Note: the file specified here must be available
+        in the initrd, see: `boot.initrd.secrets`.
+      '';
+      default = null;
+      type = nullOr str;
+    };
+  };
+
+  config = mkIf (cfg.name != null) {
+    # The "scripted" networking configuration (ie: non-networkd)
+    # doesn't properly order the start and stop of the interfaces, and the
+    # network interfaces are torn down before unmounting disks. Since this
+    # module is specifically for very-early-boot network mounts, we need
+    # the network to stay on.
+    #
+    # We could probably fix the scripted options to properly order, but I'm
+    # not inclined to invest that time today. Hopefully this gets users far
+    # enough along and they can just use networkd.
+    networking.useNetworkd = true;
+    networking.useDHCP = false; # Required to set useNetworkd = true
+
+    boot.initrd = {
+      network.enable = true;
+
+      # By default, the stage-1 disables the network and resets the interfaces
+      # on startup. Since our startup disks are on the network, we can't let
+      # the network not work.
+      network.flushBeforeStage2 = false;
+
+      kernelModules = [ "iscsi_tcp" ];
+
+      extraUtilsCommands = ''
+        copy_bin_and_libs ${pkgs.openiscsi}/bin/iscsid
+        copy_bin_and_libs ${pkgs.openiscsi}/bin/iscsiadm
+        ${optionalString (!config.boot.initrd.network.ssh.enable) "cp -pv ${pkgs.glibc.out}/lib/libnss_files.so.* $out/lib"}
+
+        mkdir -p $out/etc/iscsi
+        cp ${config.environment.etc.hosts.source} $out/etc/hosts
+        cp ${pkgs.openiscsi}/etc/iscsi/iscsid.conf $out/etc/iscsi/iscsid.fragment.conf
+        chmod +w $out/etc/iscsi/iscsid.fragment.conf
+        cat << 'EOF' >> $out/etc/iscsi/iscsid.fragment.conf
+        ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
+        EOF
+      '';
+
+      extraUtilsCommandsTest = ''
+        $out/bin/iscsiadm --version
+      '';
+
+      preLVMCommands = let
+        extraCfgDumper = optionalString (cfg.extraConfigFile != null) ''
+          if [ -f "${cfg.extraConfigFile}" ]; then
+            printf "\n# The following is from ${cfg.extraConfigFile}:\n"
+            cat "${cfg.extraConfigFile}"
+          else
+            echo "Warning: boot.iscsi-initiator.extraConfigFile ${cfg.extraConfigFile} does not exist!" >&2
+          fi
+        '';
+      in ''
+        ${optionalString (!config.boot.initrd.network.ssh.enable) ''
+        # stolen from initrd-ssh.nix
+        echo 'root:x:0:0:root:/root:/bin/ash' > /etc/passwd
+        echo 'passwd: files' > /etc/nsswitch.conf
+      ''}
+
+        cp -f $extraUtils/etc/hosts /etc/hosts
+
+        mkdir -p /etc/iscsi /run/lock/iscsi
+        echo "InitiatorName=${cfg.name}" > /etc/iscsi/initiatorname.iscsi
+
+        (
+          cat "$extraUtils/etc/iscsi/iscsid.fragment.conf"
+          printf "\n"
+          ${optionalString cfg.loginAll ''echo "node.startup = automatic"''}
+          ${extraCfgDumper}
+        ) > /etc/iscsi/iscsid.conf
+
+        iscsid --foreground --no-pid-file --debug ${toString cfg.logLevel} &
+        iscsiadm --mode discoverydb \
+          --type sendtargets \
+          --discover \
+          --portal ${escapeShellArg cfg.discoverPortal} \
+          --debug ${toString cfg.logLevel}
+
+        ${if cfg.loginAll then ''
+        iscsiadm --mode node --loginall all
+      '' else ''
+        iscsiadm --mode node --targetname ${escapeShellArg cfg.target} --login
+      ''}
+
+        ${cfg.extraIscsiCommands}
+
+        pkill -9 iscsid
+      '';
+    };
+
+    services.openiscsi = {
+      enable = true;
+      inherit (cfg) name;
+    };
+
+    assertions = [
+      {
+        assertion = cfg.loginAll -> cfg.target == null;
+        message = "iSCSI target name is set while login on all portals is enabled.";
+      }
+      {
+        assertion = !config.boot.initrd.systemd.enable;
+        message = "systemd stage 1 does not support iscsi yet.";
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iscsi/target.nix b/nixpkgs/nixos/modules/services/networking/iscsi/target.nix
new file mode 100644
index 000000000000..88eaf4590030
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iscsi/target.nix
@@ -0,0 +1,53 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.target;
+in
+{
+  ###### interface
+  options = {
+    services.target = with types; {
+      enable = mkEnableOption (lib.mdDoc "the kernel's LIO iscsi target");
+
+      config = mkOption {
+        type = attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Content of /etc/target/saveconfig.json
+          This file is normally read and written by targetcli
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."target/saveconfig.json" = {
+      text = builtins.toJSON cfg.config;
+      mode = "0600";
+    };
+
+    environment.systemPackages = with pkgs; [ targetcli ];
+
+    boot.kernelModules = [ "configfs" "target_core_mod" "iscsi_target_mod" ];
+
+    systemd.services.iscsi-target = {
+      enable = true;
+      after = [ "network.target" "local-fs.target" ];
+      requires = [ "sys-kernel-config.mount" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.python3.pkgs.rtslib}/bin/targetctl restore";
+        ExecStop = "${pkgs.python3.pkgs.rtslib}/bin/targetctl clear";
+        RemainAfterExit = "yes";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /etc/target 0700 root root - -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ivpn.nix b/nixpkgs/nixos/modules/services/networking/ivpn.nix
new file mode 100644
index 000000000000..6df630c1f194
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ivpn.nix
@@ -0,0 +1,51 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.ivpn;
+in
+with lib;
+{
+  options.services.ivpn = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        This option enables iVPN daemon.
+        This sets {option}`networking.firewall.checkReversePath` to "loose", which might be undesirable for security.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "tun" ];
+
+    environment.systemPackages = with pkgs; [ ivpn ivpn-service ];
+
+    # iVPN writes to /etc/iproute2/rt_tables
+    networking.iproute2.enable = true;
+    networking.firewall.checkReversePath = "loose";
+
+    systemd.services.ivpn-service = {
+      description = "iVPN daemon";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network.target" ];
+      after = [
+        "network-online.target"
+        "NetworkManager.service"
+        "systemd-resolved.service"
+      ];
+      path = [
+        # Needed for mount
+        "/run/wrappers"
+      ];
+      startLimitBurst = 5;
+      startLimitIntervalSec = 20;
+      serviceConfig = {
+        ExecStart = "${pkgs.ivpn-service}/bin/ivpn-service --logging";
+        Restart = "always";
+        RestartSec = 1;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ ataraxiasjel ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/iwd.nix b/nixpkgs/nixos/modules/services/networking/iwd.nix
new file mode 100644
index 000000000000..993a603c1ed5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/iwd.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    mkEnableOption mkIf mkOption types
+    recursiveUpdate;
+
+  cfg = config.networking.wireless.iwd;
+  ini = pkgs.formats.ini { };
+  defaults = {
+    # without UseDefaultInterface, sometimes wlan0 simply goes AWOL with NetworkManager
+    # https://iwd.wiki.kernel.org/interface_lifecycle#interface_management_in_iwd
+    General.UseDefaultInterface = with config.networking.networkmanager; (enable && (wifi.backend == "iwd"));
+  };
+  configFile = ini.generate "main.conf" (recursiveUpdate defaults cfg.settings);
+
+in
+{
+  options.networking.wireless.iwd = {
+    enable = mkEnableOption (lib.mdDoc "iwd");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.iwd;
+      defaultText = lib.literalExpression "pkgs.iwd";
+      description = lib.mdDoc ''
+        The iwd package to use.
+      '';
+    };
+
+    settings = mkOption {
+      type = ini.type;
+      default = { };
+
+      example = {
+        Settings.AutoConnect = true;
+
+        Network = {
+          EnableIPv6 = true;
+          RoutePriorityOffset = 300;
+        };
+      };
+
+      description = lib.mdDoc ''
+        Options passed to iwd.
+        See [here](https://iwd.wiki.kernel.org/networkconfigurationsettings) for supported options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = !config.networking.wireless.enable;
+      message = ''
+        Only one wireless daemon is allowed at the time: networking.wireless.enable and networking.wireless.iwd.enable are mutually exclusive.
+      '';
+    }];
+
+    environment.etc."iwd/${configFile.name}".source = configFile;
+
+    # for iwctl
+    environment.systemPackages = [ cfg.package ];
+
+    services.dbus.packages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.network.links."80-iwd" = {
+      matchConfig.Type = "wlan";
+      linkConfig.NamePolicy = "keep kernel";
+    };
+
+    systemd.services.iwd = {
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ configFile ];
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ dtzWill ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/jibri/default.nix b/nixpkgs/nixos/modules/services/networking/jibri/default.nix
new file mode 100644
index 000000000000..a931831fc281
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/jibri/default.nix
@@ -0,0 +1,417 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jibri;
+
+  # Copied from the jitsi-videobridge.nix file.
+  toHOCON = x:
+    if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}")
+    else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}"
+    else if isList x then "[${ concatMapStringsSep "," toHOCON x }]"
+    else builtins.toJSON x;
+
+  # We're passing passwords in environment variables that have names generated
+  # from an attribute name, which may not be a valid bash identifier.
+  toVarName = s: "XMPP_PASSWORD_" + stringAsChars (c: if builtins.match "[A-Za-z0-9]" c != null then c else "_") s;
+
+  defaultJibriConfig = {
+    id = "";
+    single-use-mode = false;
+
+    api = {
+      http.external-api-port = 2222;
+      http.internal-api-port = 3333;
+
+      xmpp.environments = flip mapAttrsToList cfg.xmppEnvironments (name: env: {
+        inherit name;
+
+        xmpp-server-hosts = env.xmppServerHosts;
+        xmpp-domain = env.xmppDomain;
+        control-muc = {
+          domain = env.control.muc.domain;
+          room-name = env.control.muc.roomName;
+          nickname = env.control.muc.nickname;
+        };
+
+        control-login = {
+          domain = env.control.login.domain;
+          username = env.control.login.username;
+          password.__hocon_envvar = toVarName "${name}_control";
+        };
+
+        call-login = {
+          domain = env.call.login.domain;
+          username = env.call.login.username;
+          password.__hocon_envvar = toVarName "${name}_call";
+        };
+
+        strip-from-room-domain = env.stripFromRoomDomain;
+        usage-timeout = env.usageTimeout;
+        trust-all-xmpp-certs = env.disableCertificateVerification;
+      });
+    };
+
+    recording = {
+      recordings-directory = "/tmp/recordings";
+      finalize-script = "${cfg.finalizeScript}";
+    };
+
+    streaming.rtmp-allow-list = [ ".*" ];
+
+    chrome.flags = [
+      "--use-fake-ui-for-media-stream"
+      "--start-maximized"
+      "--kiosk"
+      "--enabled"
+      "--disable-infobars"
+      "--autoplay-policy=no-user-gesture-required"
+    ]
+    ++ lists.optional cfg.ignoreCert
+      "--ignore-certificate-errors";
+
+
+    stats.enable-stats-d = true;
+    webhook.subscribers = [ ];
+
+    jwt-info = { };
+
+    call-status-checks = {
+      no-media-timout = "30 seconds";
+      all-muted-timeout = "10 minutes";
+      default-call-empty-timout = "30 seconds";
+    };
+  };
+  # Allow overriding leaves of the default config despite types.attrs not doing any merging.
+  jibriConfig = recursiveUpdate defaultJibriConfig cfg.config;
+  configFile = pkgs.writeText "jibri.conf" (toHOCON { jibri = jibriConfig; });
+in
+{
+  options.services.jibri = with types; {
+    enable = mkEnableOption (lib.mdDoc "Jitsi BRoadcasting Infrastructure. Currently Jibri must be run on a host that is also running {option}`services.jitsi-meet.enable`, so for most use cases it will be simpler to run {option}`services.jitsi-meet.jibri.enable`");
+    config = mkOption {
+      type = attrs;
+      default = { };
+      description = lib.mdDoc ''
+        Jibri configuration.
+        See <https://github.com/jitsi/jibri/blob/master/src/main/resources/reference.conf>
+        for default configuration with comments.
+      '';
+    };
+
+    finalizeScript = mkOption {
+      type = types.path;
+      default = pkgs.writeScript "finalize_recording.sh" ''
+        #!/bin/sh
+
+        RECORDINGS_DIR=$1
+
+        echo "This is a dummy finalize script" > /tmp/finalize.out
+        echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out
+        echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out
+        echo "or storage provider, etc.) in this script" >> /tmp/finalize.out
+
+        exit 0
+      '';
+      defaultText = literalExpression ''
+        pkgs.writeScript "finalize_recording.sh" ''''''
+        #!/bin/sh
+
+        RECORDINGS_DIR=$1
+
+        echo "This is a dummy finalize script" > /tmp/finalize.out
+        echo "The script was invoked with recordings directory $RECORDINGS_DIR." >> /tmp/finalize.out
+        echo "You should put any finalize logic (renaming, uploading to a service" >> /tmp/finalize.out
+        echo "or storage provider, etc.) in this script" >> /tmp/finalize.out
+
+        exit 0
+        '''''';
+      '';
+      example = literalExpression ''
+        pkgs.writeScript "finalize_recording.sh" ''''''
+        #!/bin/sh
+        RECORDINGS_DIR=$1
+        ''${pkgs.rclone}/bin/rclone copy $RECORDINGS_DIR RCLONE_REMOTE:jibri-recordings/ -v --log-file=/var/log/jitsi/jibri/recording-upload.txt
+        exit 0
+        '''''';
+      '';
+      description = lib.mdDoc ''
+        This script runs when jibri finishes recording a video of a conference.
+      '';
+    };
+
+    ignoreCert = mkOption {
+      type = bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to enable the flag "--ignore-certificate-errors" for the Chromium browser opened by Jibri.
+        Intended for use in automated tests or anywhere else where using a verified cert for Jitsi-Meet is not possible.
+      '';
+    };
+
+    xmppEnvironments = mkOption {
+      description = lib.mdDoc ''
+        XMPP servers to connect to.
+      '';
+      example = literalExpression ''
+        "jitsi-meet" = {
+          xmppServerHosts = [ "localhost" ];
+          xmppDomain = config.services.jitsi-meet.hostName;
+
+          control.muc = {
+            domain = "internal.''${config.services.jitsi-meet.hostName}";
+            roomName = "JibriBrewery";
+            nickname = "jibri";
+          };
+
+          control.login = {
+            domain = "auth.''${config.services.jitsi-meet.hostName}";
+            username = "jibri";
+            passwordFile = "/var/lib/jitsi-meet/jibri-auth-secret";
+          };
+
+          call.login = {
+            domain = "recorder.''${config.services.jitsi-meet.hostName}";
+            username = "recorder";
+            passwordFile = "/var/lib/jitsi-meet/jibri-recorder-secret";
+          };
+
+          usageTimeout = "0";
+          disableCertificateVerification = true;
+          stripFromRoomDomain = "conference.";
+        };
+      '';
+      default = { };
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          xmppServerHosts = mkOption {
+            type = listOf str;
+            example = [ "xmpp.example.org" ];
+            description = lib.mdDoc ''
+              Hostnames of the XMPP servers to connect to.
+            '';
+          };
+          xmppDomain = mkOption {
+            type = str;
+            example = "xmpp.example.org";
+            description = lib.mdDoc ''
+              The base XMPP domain.
+            '';
+          };
+          control.muc.domain = mkOption {
+            type = str;
+            description = lib.mdDoc ''
+              The domain part of the MUC to connect to for control.
+            '';
+          };
+          control.muc.roomName = mkOption {
+            type = str;
+            default = "JibriBrewery";
+            description = lib.mdDoc ''
+              The room name of the MUC to connect to for control.
+            '';
+          };
+          control.muc.nickname = mkOption {
+            type = str;
+            default = "jibri";
+            description = lib.mdDoc ''
+              The nickname for this Jibri instance in the MUC.
+            '';
+          };
+          control.login.domain = mkOption {
+            type = str;
+            description = lib.mdDoc ''
+              The domain part of the JID for this Jibri instance.
+            '';
+          };
+          control.login.username = mkOption {
+            type = str;
+            default = "jvb";
+            description = lib.mdDoc ''
+              User part of the JID.
+            '';
+          };
+          control.login.passwordFile = mkOption {
+            type = str;
+            example = "/run/keys/jibri-xmpp1";
+            description = lib.mdDoc ''
+              File containing the password for the user.
+            '';
+          };
+
+          call.login.domain = mkOption {
+            type = str;
+            example = "recorder.xmpp.example.org";
+            description = lib.mdDoc ''
+              The domain part of the JID for the recorder.
+            '';
+          };
+          call.login.username = mkOption {
+            type = str;
+            default = "recorder";
+            description = lib.mdDoc ''
+              User part of the JID for the recorder.
+            '';
+          };
+          call.login.passwordFile = mkOption {
+            type = str;
+            example = "/run/keys/jibri-recorder-xmpp1";
+            description = lib.mdDoc ''
+              File containing the password for the user.
+            '';
+          };
+          disableCertificateVerification = mkOption {
+            type = bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether to skip validation of the server's certificate.
+            '';
+          };
+
+          stripFromRoomDomain = mkOption {
+            type = str;
+            default = "0";
+            example = "conference.";
+            description = lib.mdDoc ''
+              The prefix to strip from the room's JID domain to derive the call URL.
+            '';
+          };
+          usageTimeout = mkOption {
+            type = str;
+            default = "0";
+            example = "1 hour";
+            description = lib.mdDoc ''
+              The duration that the Jibri session can be.
+              A value of zero means indefinitely.
+            '';
+          };
+        };
+
+        config =
+          let
+            nick = mkDefault (builtins.replaceStrings [ "." ] [ "-" ] (
+              config.networking.hostName + optionalString (config.networking.domain != null) ".${config.networking.domain}"
+            ));
+          in
+          {
+            call.login.username = nick;
+            control.muc.nickname = nick;
+          };
+      }));
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.jibri = { };
+    users.groups.plugdev = { };
+    users.users.jibri = {
+      isSystemUser = true;
+      group = "jibri";
+      home = "/var/lib/jibri";
+      extraGroups = [ "jitsi-meet" "adm" "audio" "video" "plugdev" ];
+    };
+
+    systemd.services.jibri-xorg = {
+      description = "Jitsi Xorg Process";
+
+      after = [ "network.target" ];
+      wantedBy = [ "jibri.service" "jibri-icewm.service" ];
+
+      preStart = ''
+        cp --no-preserve=mode,ownership ${pkgs.jibri}/etc/jitsi/jibri/* /var/lib/jibri
+        mv /var/lib/jibri/{,.}asoundrc
+      '';
+
+      environment.DISPLAY = ":0";
+      serviceConfig = {
+        Type = "simple";
+
+        User = "jibri";
+        Group = "jibri";
+        KillMode = "process";
+        Restart = "on-failure";
+        RestartPreventExitStatus = 255;
+
+        StateDirectory = "jibri";
+
+        ExecStart = "${pkgs.xorg.xorgserver}/bin/Xorg -nocursor -noreset +extension RANDR +extension RENDER -config ${pkgs.jibri}/etc/jitsi/jibri/xorg-video-dummy.conf -logfile /dev/null :0";
+      };
+    };
+
+    systemd.services.jibri-icewm = {
+      description = "Jitsi Window Manager";
+
+      requires = [ "jibri-xorg.service" ];
+      after = [ "jibri-xorg.service" ];
+      wantedBy = [ "jibri.service" ];
+
+      environment.DISPLAY = ":0";
+      serviceConfig = {
+        Type = "simple";
+
+        User = "jibri";
+        Group = "jibri";
+        Restart = "on-failure";
+        RestartPreventExitStatus = 255;
+
+        StateDirectory = "jibri";
+
+        ExecStart = "${pkgs.icewm}/bin/icewm-session";
+      };
+    };
+
+    systemd.services.jibri = {
+      description = "Jibri Process";
+
+      requires = [ "jibri-icewm.service" "jibri-xorg.service" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [ chromedriver chromium ffmpeg-full ];
+
+      script = (concatStrings (mapAttrsToList
+        (name: env: ''
+          export ${toVarName "${name}_control"}=$(cat ${env.control.login.passwordFile})
+          export ${toVarName "${name}_call"}=$(cat ${env.call.login.passwordFile})
+        '')
+        cfg.xmppEnvironments))
+      + ''
+        ${pkgs.jdk11_headless}/bin/java -Djava.util.logging.config.file=${./logging.properties-journal} -Dconfig.file=${configFile} -jar ${pkgs.jibri}/opt/jitsi/jibri/jibri.jar --config /var/lib/jibri/jibri.json
+      '';
+
+      environment.HOME = "/var/lib/jibri";
+
+      serviceConfig = {
+        Type = "simple";
+
+        User = "jibri";
+        Group = "jibri";
+        Restart = "always";
+        RestartPreventExitStatus = 255;
+
+        StateDirectory = "jibri";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/log/jitsi/jibri 755 jibri jibri"
+    ];
+
+
+
+    # Configure Chromium to not show the "Chrome is being controlled by automatic test software" message.
+    environment.etc."chromium/policies/managed/managed_policies.json".text = builtins.toJSON { CommandLineFlagSecurityWarningsEnabled = false; };
+    warnings = [ "All security warnings for Chromium have been disabled. This is necessary for Jibri, but it also impacts all other uses of Chromium on this system." ];
+
+    boot = {
+      extraModprobeConfig = ''
+        options snd-aloop enable=1,1,1,1,1,1,1,1
+      '';
+      kernelModules = [ "snd-aloop" ];
+    };
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/jibri/logging.properties-journal b/nixpkgs/nixos/modules/services/networking/jibri/logging.properties-journal
new file mode 100644
index 000000000000..61eadbfddcb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/jibri/logging.properties-journal
@@ -0,0 +1,32 @@
+handlers = java.util.logging.FileHandler
+
+java.util.logging.FileHandler.level = FINE
+java.util.logging.FileHandler.pattern   = /var/log/jitsi/jibri/log.%g.txt
+java.util.logging.FileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
+java.util.logging.FileHandler.count = 10
+java.util.logging.FileHandler.limit = 10000000
+
+org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.level = FINE
+org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.pattern   = /var/log/jitsi/jibri/ffmpeg.%g.txt
+org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
+org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.count = 10
+org.jitsi.jibri.capture.ffmpeg.util.FfmpegFileHandler.limit = 10000000
+
+org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.level = FINE
+org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.pattern   = /var/log/jitsi/jibri/pjsua.%g.txt
+org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
+org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.count = 10
+org.jitsi.jibri.sipgateway.pjsua.util.PjsuaFileHandler.limit = 10000000
+
+org.jitsi.jibri.selenium.util.BrowserFileHandler.level = FINE
+org.jitsi.jibri.selenium.util.BrowserFileHandler.pattern   = /var/log/jitsi/jibri/browser.%g.txt
+org.jitsi.jibri.selenium.util.BrowserFileHandler.formatter = net.java.sip.communicator.util.ScLogFormatter
+org.jitsi.jibri.selenium.util.BrowserFileHandler.count = 10
+org.jitsi.jibri.selenium.util.BrowserFileHandler.limit = 10000000
+
+org.jitsi.level = FINE
+org.jitsi.jibri.config.level = INFO
+
+org.glassfish.level = INFO
+org.osgi.level = INFO
+org.jitsi.xmpp.level = INFO
diff --git a/nixpkgs/nixos/modules/services/networking/jicofo.nix b/nixpkgs/nixos/modules/services/networking/jicofo.nix
new file mode 100644
index 000000000000..0886bbe004c4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/jicofo.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jicofo;
+
+  # HOCON is a JSON superset that some jitsi-meet components use for configuration
+  toHOCON = x: if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}")
+    else if isAttrs x && x ? __hocon_unquoted_string then x.__hocon_unquoted_string
+    else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}"
+    else if isList x then "[${ concatMapStringsSep "," toHOCON x }]"
+    else builtins.toJSON x;
+
+  configFile = pkgs.writeText "jicofo.conf" (toHOCON cfg.config);
+in
+{
+  options.services.jicofo = with types; {
+    enable = mkEnableOption (lib.mdDoc "Jitsi Conference Focus - component of Jitsi Meet");
+
+    xmppHost = mkOption {
+      type = str;
+      example = "localhost";
+      description = lib.mdDoc ''
+        Hostname of the XMPP server to connect to.
+      '';
+    };
+
+    xmppDomain = mkOption {
+      type = nullOr str;
+      example = "meet.example.org";
+      description = lib.mdDoc ''
+        Domain name of the XMMP server to which to connect as a component.
+
+        If null, {option}`xmppHost` is used.
+      '';
+    };
+
+    componentPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jicofo-component";
+      description = lib.mdDoc ''
+        Path to file containing component secret.
+      '';
+    };
+
+    userName = mkOption {
+      type = str;
+      default = "focus";
+      description = lib.mdDoc ''
+        User part of the JID for XMPP user connection.
+      '';
+    };
+
+    userDomain = mkOption {
+      type = str;
+      example = "auth.meet.example.org";
+      description = lib.mdDoc ''
+        Domain part of the JID for XMPP user connection.
+      '';
+    };
+
+    userPasswordFile = mkOption {
+      type = str;
+      example = "/run/keys/jicofo-user";
+      description = lib.mdDoc ''
+        Path to file containing password for XMPP user connection.
+      '';
+    };
+
+    bridgeMuc = mkOption {
+      type = str;
+      example = "jvbbrewery@internal.meet.example.org";
+      description = lib.mdDoc ''
+        JID of the internal MUC used to communicate with Videobridges.
+      '';
+    };
+
+    config = mkOption {
+      type = (pkgs.formats.json {}).type;
+      default = { };
+      example = literalExpression ''
+        {
+          jicofo.bridge.max-bridge-participants = 42;
+        }
+      '';
+      description = lib.mdDoc ''
+        Contents of the {file}`jicofo.conf` configuration file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.jicofo.config = {
+      jicofo = {
+        bridge.brewery-jid = cfg.bridgeMuc;
+        xmpp = rec {
+          client = {
+            hostname = cfg.xmppHost;
+            username = cfg.userName;
+            domain = cfg.userDomain;
+            password = { __hocon_envvar = "JICOFO_AUTH_PASS"; };
+            xmpp-domain = if cfg.xmppDomain == null then cfg.xmppHost else cfg.xmppDomain;
+          };
+          service = client;
+        };
+      };
+    };
+
+    users.groups.jitsi-meet = {};
+
+    systemd.services.jicofo = let
+      jicofoProps = {
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "/etc/jitsi";
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "jicofo";
+        "-Djava.util.logging.config.file" = "/etc/jitsi/jicofo/logging.properties";
+        "-Dconfig.file" = configFile;
+      };
+    in
+    {
+      description = "JItsi COnference FOcus";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      restartTriggers = [
+        configFile
+      ];
+      environment.JAVA_SYS_PROPS = concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") jicofoProps);
+
+      script = ''
+        export JICOFO_AUTH_PASS="$(<${cfg.userPasswordFile})"
+        exec "${pkgs.jicofo}/bin/jicofo"
+      '';
+
+      serviceConfig = {
+        Type = "exec";
+
+        DynamicUser = true;
+        User = "jicofo";
+        Group = "jitsi-meet";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
+    };
+
+    environment.etc."jitsi/jicofo/sip-communicator.properties".text = "";
+    environment.etc."jitsi/jicofo/logging.properties".source =
+      mkDefault "${pkgs.jicofo}/etc/jitsi/jicofo/logging.properties-journal";
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/jitsi-videobridge.nix b/nixpkgs/nixos/modules/services/networking/jitsi-videobridge.nix
new file mode 100644
index 000000000000..37b0b1e5bf50
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/jitsi-videobridge.nix
@@ -0,0 +1,293 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jitsi-videobridge;
+  attrsToArgs = a: concatStringsSep " " (mapAttrsToList (k: v: "${k}=${toString v}") a);
+
+  # HOCON is a JSON superset that videobridge2 uses for configuration.
+  # It can substitute environment variables which we use for passwords here.
+  # https://github.com/lightbend/config/blob/master/README.md
+  #
+  # Substitution for environment variable FOO is represented as attribute set
+  # { __hocon_envvar = "FOO"; }
+  toHOCON = x: if isAttrs x && x ? __hocon_envvar then ("\${" + x.__hocon_envvar + "}")
+    else if isAttrs x then "{${ concatStringsSep "," (mapAttrsToList (k: v: ''"${k}":${toHOCON v}'') x) }}"
+    else if isList x then "[${ concatMapStringsSep "," toHOCON x }]"
+    else builtins.toJSON x;
+
+  # We're passing passwords in environment variables that have names generated
+  # from an attribute name, which may not be a valid bash identifier.
+  toVarName = s: "XMPP_PASSWORD_" + stringAsChars (c: if builtins.match "[A-Za-z0-9]" c != null then c else "_") s;
+
+  defaultJvbConfig = {
+    videobridge = {
+      ice = {
+        tcp = {
+          enabled = true;
+          port = 4443;
+        };
+        udp.port = 10000;
+      };
+      stats = {
+        enabled = true;
+        transports = [ { type = "muc"; } ];
+      };
+      apis.xmpp-client.configs = flip mapAttrs cfg.xmppConfigs (name: xmppConfig: {
+        hostname = xmppConfig.hostName;
+        domain = xmppConfig.domain;
+        username = xmppConfig.userName;
+        password = { __hocon_envvar = toVarName name; };
+        muc_jids = xmppConfig.mucJids;
+        muc_nickname = xmppConfig.mucNickname;
+        disable_certificate_verification = xmppConfig.disableCertificateVerification;
+      });
+      apis.rest.enabled = cfg.colibriRestApi;
+    };
+  };
+
+  # Allow overriding leaves of the default config despite types.attrs not doing any merging.
+  jvbConfig = recursiveUpdate defaultJvbConfig cfg.config;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "jitsi-videobridge" "apis" ]
+      "services.jitsi-videobridge.apis was broken and has been migrated into the boolean option services.jitsi-videobridge.colibriRestApi. It is set to false by default, setting it to true will correctly enable the private /colibri rest API."
+    )
+  ];
+  options.services.jitsi-videobridge = with types; {
+    enable = mkEnableOption (lib.mdDoc "Jitsi Videobridge, a WebRTC compatible video router");
+
+    config = mkOption {
+      type = attrs;
+      default = { };
+      example = literalExpression ''
+        {
+          videobridge = {
+            ice.udp.port = 5000;
+            websockets = {
+              enabled = true;
+              server-id = "jvb1";
+            };
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Videobridge configuration.
+
+        See <https://github.com/jitsi/jitsi-videobridge/blob/master/jvb/src/main/resources/reference.conf>
+        for default configuration with comments.
+      '';
+    };
+
+    xmppConfigs = mkOption {
+      description = lib.mdDoc ''
+        XMPP servers to connect to.
+
+        See <https://github.com/jitsi/jitsi-videobridge/blob/master/doc/muc.md> for more information.
+      '';
+      default = { };
+      example = literalExpression ''
+        {
+          "localhost" = {
+            hostName = "localhost";
+            userName = "jvb";
+            domain = "auth.xmpp.example.org";
+            passwordFile = "/var/lib/jitsi-meet/videobridge-secret";
+            mucJids = "jvbbrewery@internal.xmpp.example.org";
+          };
+        }
+      '';
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          hostName = mkOption {
+            type = str;
+            example = "xmpp.example.org";
+            description = lib.mdDoc ''
+              Hostname of the XMPP server to connect to. Name of the attribute set is used by default.
+            '';
+          };
+          domain = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "auth.xmpp.example.org";
+            description = lib.mdDoc ''
+              Domain part of JID of the XMPP user, if it is different from hostName.
+            '';
+          };
+          userName = mkOption {
+            type = str;
+            default = "jvb";
+            description = lib.mdDoc ''
+              User part of the JID.
+            '';
+          };
+          passwordFile = mkOption {
+            type = str;
+            example = "/run/keys/jitsi-videobridge-xmpp1";
+            description = lib.mdDoc ''
+              File containing the password for the user.
+            '';
+          };
+          mucJids = mkOption {
+            type = str;
+            example = "jvbbrewery@internal.xmpp.example.org";
+            description = lib.mdDoc ''
+              JID of the MUC to join. JiCoFo needs to be configured to join the same MUC.
+            '';
+          };
+          mucNickname = mkOption {
+            # Upstream DEBs use UUID, let's use hostname instead.
+            type = str;
+            description = lib.mdDoc ''
+              Videobridges use the same XMPP account and need to be distinguished by the
+              nickname (aka resource part of the JID). By default, system hostname is used.
+            '';
+          };
+          disableCertificateVerification = mkOption {
+            type = bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether to skip validation of the server's certificate.
+            '';
+          };
+        };
+        config = {
+          hostName = mkDefault name;
+          mucNickname = mkDefault (builtins.replaceStrings [ "." ] [ "-" ] (
+            config.networking.fqdnOrHostName
+          ));
+        };
+      }));
+    };
+
+    nat = {
+      localAddress = mkOption {
+        type = nullOr str;
+        default = null;
+        example = "192.168.1.42";
+        description = lib.mdDoc ''
+          Local address when running behind NAT.
+        '';
+      };
+
+      publicAddress = mkOption {
+        type = nullOr str;
+        default = null;
+        example = "1.2.3.4";
+        description = lib.mdDoc ''
+          Public address when running behind NAT.
+        '';
+      };
+    };
+
+    extraProperties = mkOption {
+      type = attrsOf str;
+      default = { };
+      description = lib.mdDoc ''
+        Additional Java properties passed to jitsi-videobridge.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open ports in the firewall for the videobridge.
+      '';
+    };
+
+    colibriRestApi = mkOption {
+      type = bool;
+      description = lib.mdDoc ''
+        Whether to enable the private rest API for the COLIBRI control interface.
+        Needed for monitoring jitsi, enabling scraping of the /colibri/stats endpoint.
+      '';
+      default = false;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.jitsi-meet = {};
+
+    services.jitsi-videobridge.extraProperties = optionalAttrs (cfg.nat.localAddress != null) {
+      "org.ice4j.ice.harvest.NAT_HARVESTER_LOCAL_ADDRESS" = cfg.nat.localAddress;
+      "org.ice4j.ice.harvest.NAT_HARVESTER_PUBLIC_ADDRESS" = cfg.nat.publicAddress;
+    };
+
+    systemd.services.jitsi-videobridge2 = let
+      jvbProps = {
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_LOCATION" = "/etc/jitsi";
+        "-Dnet.java.sip.communicator.SC_HOME_DIR_NAME" = "videobridge";
+        "-Djava.util.logging.config.file" = "/etc/jitsi/videobridge/logging.properties";
+        "-Dconfig.file" = pkgs.writeText "jvb.conf" (toHOCON jvbConfig);
+        # Mitigate CVE-2021-44228
+        "-Dlog4j2.formatMsgNoLookups" = true;
+      } // (mapAttrs' (k: v: nameValuePair "-D${k}" v) cfg.extraProperties);
+    in
+    {
+      aliases = [ "jitsi-videobridge.service" ];
+      description = "Jitsi Videobridge";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment.JAVA_SYS_PROPS = attrsToArgs jvbProps;
+
+      script = (concatStrings (mapAttrsToList (name: xmppConfig:
+        "export ${toVarName name}=$(cat ${xmppConfig.passwordFile})\n"
+      ) cfg.xmppConfigs))
+      + ''
+        ${pkgs.jitsi-videobridge}/bin/jitsi-videobridge
+      '';
+
+      serviceConfig = {
+        Type = "exec";
+
+        DynamicUser = true;
+        User = "jitsi-videobridge";
+        Group = "jitsi-meet";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        TasksMax = 65000;
+        LimitNPROC = 65000;
+        LimitNOFILE = 65000;
+      };
+    };
+
+    environment.etc."jitsi/videobridge/logging.properties".source =
+      mkDefault "${pkgs.jitsi-videobridge}/etc/jitsi/videobridge/logging.properties-journal";
+
+    # (from videobridge2 .deb)
+    # this sets the max, so that we can bump the JVB UDP single port buffer size.
+    boot.kernel.sysctl."net.core.rmem_max" = mkDefault 10485760;
+    boot.kernel.sysctl."net.core.netdev_max_backlog" = mkDefault 100000;
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall
+      [ jvbConfig.videobridge.ice.tcp.port ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall
+      [ jvbConfig.videobridge.ice.udp.port ];
+
+    assertions = [{
+      message = "publicAddress must be set if and only if localAddress is set";
+      assertion = (cfg.nat.publicAddress == null) == (cfg.nat.localAddress == null);
+    }];
+  };
+
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/jool.nix b/nixpkgs/nixos/modules/services/networking/jool.nix
new file mode 100644
index 000000000000..d2d2b0956e8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/jool.nix
@@ -0,0 +1,281 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.networking.jool;
+
+  jool = config.boot.kernelPackages.jool;
+  jool-cli = pkgs.jool-cli;
+
+  hardening = {
+    # Run as unprivileged user
+    User = "jool";
+    Group = "jool";
+    DynamicUser = true;
+
+    # Restrict filesystem to only read the jool module
+    TemporaryFileSystem = [ "/" ];
+    BindReadOnlyPaths = [
+      builtins.storeDir
+      "/run/booted-system/kernel-modules"
+    ];
+
+    # Give capabilities to load the module and configure it
+    AmbientCapabilities = [ "CAP_SYS_MODULE" "CAP_NET_ADMIN" ];
+    RestrictAddressFamilies = [ "AF_NETLINK" ];
+
+    # Other restrictions
+    RestrictNamespaces = [ "net" ];
+    SystemCallFilter = [ "@system-service" "@module" ];
+    CapabilityBoundingSet = [ "CAP_SYS_MODULE" "CAP_NET_ADMIN" ];
+  };
+
+  configFormat = pkgs.formats.json {};
+
+  # Generate the config file of instance `name`
+  nat64Conf = name:
+    configFormat.generate "jool-nat64-${name}.conf"
+      (cfg.nat64.${name} // { instance = name; });
+  siitConf = name:
+    configFormat.generate "jool-siit-${name}.conf"
+      (cfg.siit.${name} // { instance = name; });
+
+  # NAT64 config type
+  nat64Options = lib.types.submodule {
+    # The format is plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options.framework = lib.mkOption {
+      type = lib.types.enum [ "netfilter" "iptables" ];
+      default = "netfilter";
+      description = lib.mdDoc ''
+        The framework to use for attaching Jool's translation to the exist
+        kernel packet processing rules. See the
+        [documentation](https://nicmx.github.io/Jool/en/intro-jool.html#design)
+        for the differences between the two options.
+      '';
+    };
+    options.global.pool6 = lib.mkOption {
+      type = lib.types.strMatching "[[:xdigit:]:]+/[[:digit:]]+"
+        // { description = "Network prefix in CIDR notation"; };
+      default = "64:ff9b::/96";
+      description = lib.mdDoc ''
+        The prefix used for embedding IPv4 into IPv6 addresses.
+        Defaults to the well-known NAT64 prefix, defined by
+        [RFC 6052](https://datatracker.ietf.org/doc/html/rfc6052).
+      '';
+    };
+  };
+
+  # SIIT config type
+  siitOptions = lib.types.submodule {
+    # The format is, again, plain JSON
+    freeformType = configFormat.type;
+    # Some options with a default value
+    options = { inherit (nat64Options.getSubOptions []) framework; };
+  };
+
+  makeNat64Unit = name: opts: {
+    "jool-nat64-${name}" = {
+      description = "Jool, NAT64 setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool";
+        ExecStart    = "${jool-cli}/bin/jool file handle ${nat64Conf name}";
+        ExecStop     = "${jool-cli}/bin/jool -f ${nat64Conf name} instance remove";
+      } // hardening;
+    };
+  };
+
+  makeSiitUnit = name: opts: {
+    "jool-siit-${name}" = {
+      description = "Jool, SIIT setup of instance ${name}";
+      documentation = [ "https://nicmx.github.io/Jool/en/documentation.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStartPre = "${pkgs.kmod}/bin/modprobe jool_siit";
+        ExecStart    = "${jool-cli}/bin/jool_siit file handle ${siitConf name}";
+        ExecStop     = "${jool-cli}/bin/jool_siit -f ${siitConf name} instance remove";
+      } // hardening;
+    };
+  };
+
+  checkNat64 = name: _: ''
+    printf 'Validating Jool configuration for NAT64 instance "${name}"... '
+    jool file check ${nat64Conf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
+
+  checkSiit = name: _: ''
+    printf 'Validating Jool configuration for SIIT instance "${name}"... '
+    jool_siit file check ${siitConf name}
+    printf 'Ok.\n'; touch "$out"
+  '';
+
+in
+
+{
+  options = {
+    networking.jool.enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      relatedPackages = [ "linuxPackages.jool" "jool-cli" ];
+      description = lib.mdDoc ''
+        Whether to enable Jool, an Open Source implementation of IPv4/IPv6
+        translation on Linux.
+
+        Jool can perform stateless IP/ICMP translation (SIIT) or stateful
+        NAT64, analogous to the IPv4 NAPT. Refer to the upstream
+        [documentation](https://nicmx.github.io/Jool/en/intro-xlat.html) for
+        the supported modes of translation and how to configure them.
+
+        Enabling this option will install the Jool kernel module and the
+        command line tools for controlling it.
+      '';
+    };
+
+    networking.jool.nat64 = lib.mkOption {
+      type = lib.types.attrsOf nat64Options;
+      default = { };
+      example = lib.literalExpression ''
+        {
+          default = {
+            # custom NAT64 prefix
+            global.pool6 = "2001:db8:64::/96";
+
+            # Port forwarding
+            bib = [
+              { # SSH 192.0.2.16 → 2001:db8:a::1
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#22";
+                "ipv6 address" = "2001:db8:a::1#22";
+              }
+              { # DNS (TCP) 192.0.2.16 → 2001:db8:a::2
+                "protocol"     = "TCP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+              { # DNS (UDP) 192.0.2.16 → 2001:db8:a::2
+                "protocol" = "UDP";
+                "ipv4 address" = "192.0.2.16#53";
+                "ipv6 address" = "2001:db8:a::2#53";
+              }
+            ];
+
+            pool4 = [
+              # Port ranges for dynamic translation
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+              { protocol = "ICMP";  prefix = "192.0.2.16/32"; "port range" = "40001-65535"; }
+
+              # Ports for static BIB entries
+              { protocol =  "TCP";  prefix = "192.0.2.16/32"; "port range" = "22"; }
+              { protocol =  "UDP";  prefix = "192.0.2.16/32"; "port range" = "53"; }
+            ];
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Definitions of NAT64 instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-nat64.html) for an
+        introduction to NAT64 and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
+
+        ::: {.note}
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective `pool4` addresses and
+        port ranges are not overlapping.
+        :::
+
+        ::: {.warning}
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-nat64-<name>.service`.
+        :::
+      '';
+    };
+
+    networking.jool.siit = lib.mkOption {
+      type = lib.types.attrsOf siitOptions;
+      default = { };
+      example = lib.literalExpression ''
+        {
+          default = {
+            # Maps any IPv4 address x.y.z.t to 2001:db8::x.y.z.t and v.v.
+            global.pool6 = "2001:db8::/96";
+
+            # Explicit address mappings
+            eamt = [
+              # 2001:db8:1:: â†â†’ 192.0.2.0
+              { "ipv6 prefix" = "2001:db8:1::/128"; "ipv4 prefix" = "192.0.2.0"; }
+              # 2001:db8:1::x â†â†’ 198.51.100.x
+              { "ipv6 prefix" = "2001:db8:2::/120"; "ipv4 prefix" = "198.51.100.0/24"; }
+            ];
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Definitions of SIIT instances of Jool.
+        See the
+        [documentation](https://nicmx.github.io/Jool/en/config-atomic.html) for
+        the available options. Also check out the
+        [tutorial](https://nicmx.github.io/Jool/en/run-vanilla.html) for an
+        introduction to SIIT and how to troubleshoot the setup.
+
+        The attribute name defines the name of the instance, with the main one
+        being `default`: this can be accessed from the command line without
+        specifying the name with `-i`.
+
+        ::: {.note}
+        Instances created imperatively from the command line will not interfere
+        with the NixOS instances, provided the respective EAMT addresses and
+        port ranges are not overlapping.
+        :::
+
+        ::: {.warning}
+        Changes to an instance performed via `jool -i <name>` are applied
+        correctly but will be lost after restarting the respective
+        `jool-siit-<name>.service`.
+        :::
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    # Install kernel module and cli tools
+    boot.extraModulePackages = [ jool ];
+    environment.systemPackages = [ jool-cli ];
+
+    # Install services for each instance
+    systemd.services = lib.mkMerge
+      (lib.mapAttrsToList makeNat64Unit cfg.nat64 ++
+       lib.mapAttrsToList makeSiitUnit cfg.siit);
+
+    # Check the configuration of each instance
+    system.checks = lib.optional (cfg.nat64 != {} || cfg.siit != {})
+      (pkgs.runCommand "jool-validated"
+        {
+          nativeBuildInputs = with pkgs.buildPackages; [ jool-cli ];
+          preferLocalBuild = true;
+        }
+        (lib.concatStrings
+          (lib.mapAttrsToList checkNat64 cfg.nat64 ++
+           lib.mapAttrsToList checkSiit cfg.siit)));
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/kea.nix b/nixpkgs/nixos/modules/services/networking/kea.nix
new file mode 100644
index 000000000000..2f922a026a3a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/kea.nix
@@ -0,0 +1,452 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+with lib;
+
+let
+  cfg = config.services.kea;
+
+  xor = x: y: (!x && y) || (x && !y);
+  format = pkgs.formats.json {};
+
+  chooseNotNull = x: y: if x != null then x else y;
+
+  ctrlAgentConfig = chooseNotNull cfg.ctrl-agent.configFile (format.generate "kea-ctrl-agent.conf" {
+    Control-agent = cfg.ctrl-agent.settings;
+  });
+
+  dhcp4Config = chooseNotNull cfg.dhcp4.configFile (format.generate "kea-dhcp4.conf" {
+    Dhcp4 = cfg.dhcp4.settings;
+  });
+
+  dhcp6Config = chooseNotNull cfg.dhcp6.configFile (format.generate "kea-dhcp6.conf" {
+    Dhcp6 = cfg.dhcp6.settings;
+  });
+
+  dhcpDdnsConfig = chooseNotNull cfg.dhcp-ddns.configFile (format.generate "kea-dhcp-ddns.conf" {
+    DhcpDdns = cfg.dhcp-ddns.settings;
+  });
+
+  package = pkgs.kea;
+in
+{
+  options.services.kea = with types; {
+    ctrl-agent = mkOption {
+      description = lib.mdDoc ''
+        Kea Control Agent configuration
+      '';
+      default = {};
+      type = submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc "Kea Control Agent");
+
+          extraArgs = mkOption {
+            type = listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              List of additional arguments to pass to the daemon.
+            '';
+          };
+
+          configFile = mkOption {
+            type = nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              Kea Control Agent configuration as a path, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/agent.html>.
+
+              Takes preference over [settings](#opt-services.kea.ctrl-agent.settings).
+              Most users should prefer using [settings](#opt-services.kea.ctrl-agent.settings) instead.
+            '';
+          };
+
+          settings = mkOption {
+            type = format.type;
+            default = null;
+            description = lib.mdDoc ''
+              Kea Control Agent configuration as an attribute set, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/agent.html>.
+            '';
+          };
+        };
+      };
+    };
+
+    dhcp4 = mkOption {
+      description = lib.mdDoc ''
+        DHCP4 Server configuration
+      '';
+      default = {};
+      type = submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc "Kea DHCP4 server");
+
+          extraArgs = mkOption {
+            type = listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              List of additional arguments to pass to the daemon.
+            '';
+          };
+
+          configFile = mkOption {
+            type = nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              Kea DHCP4 configuration as a path, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp4-srv.html>.
+
+              Takes preference over [settings](#opt-services.kea.dhcp4.settings).
+              Most users should prefer using [settings](#opt-services.kea.dhcp4.settings) instead.
+            '';
+          };
+
+          settings = mkOption {
+            type = format.type;
+            default = null;
+            example = {
+              valid-lifetime = 4000;
+              renew-timer = 1000;
+              rebind-timer = 2000;
+              interfaces-config = {
+                interfaces = [
+                  "eth0"
+                ];
+              };
+              lease-database = {
+                type = "memfile";
+                persist = true;
+                name = "/var/lib/kea/dhcp4.leases";
+              };
+              subnet4 = [ {
+                subnet = "192.0.2.0/24";
+                pools = [ {
+                  pool = "192.0.2.100 - 192.0.2.240";
+                } ];
+              } ];
+            };
+            description = lib.mdDoc ''
+              Kea DHCP4 configuration as an attribute set, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp4-srv.html>.
+            '';
+          };
+        };
+      };
+    };
+
+    dhcp6 = mkOption {
+      description = lib.mdDoc ''
+        DHCP6 Server configuration
+      '';
+      default = {};
+      type = submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc "Kea DHCP6 server");
+
+          extraArgs = mkOption {
+            type = listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              List of additional arguments to pass to the daemon.
+            '';
+          };
+
+          configFile = mkOption {
+            type = nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              Kea DHCP6 configuration as a path, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp6-srv.html>.
+
+              Takes preference over [settings](#opt-services.kea.dhcp6.settings).
+              Most users should prefer using [settings](#opt-services.kea.dhcp6.settings) instead.
+            '';
+          };
+
+          settings = mkOption {
+            type = format.type;
+            default = null;
+            example = {
+              valid-lifetime = 4000;
+              renew-timer = 1000;
+              rebind-timer = 2000;
+              preferred-lifetime = 3000;
+              interfaces-config = {
+                interfaces = [
+                  "eth0"
+                ];
+              };
+              lease-database = {
+                type = "memfile";
+                persist = true;
+                name = "/var/lib/kea/dhcp6.leases";
+              };
+              subnet6 = [ {
+                subnet = "2001:db8:1::/64";
+                pools = [ {
+                  pool = "2001:db8:1::1-2001:db8:1::ffff";
+                } ];
+              } ];
+            };
+            description = lib.mdDoc ''
+              Kea DHCP6 configuration as an attribute set, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp6-srv.html>.
+            '';
+          };
+        };
+      };
+    };
+
+    dhcp-ddns = mkOption {
+      description = lib.mdDoc ''
+        Kea DHCP-DDNS configuration
+      '';
+      default = {};
+      type = submodule {
+        options = {
+          enable = mkEnableOption (lib.mdDoc "Kea DDNS server");
+
+          extraArgs = mkOption {
+            type = listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              List of additional arguments to pass to the daemon.
+            '';
+          };
+
+          configFile = mkOption {
+            type = nullOr path;
+            default = null;
+            description = lib.mdDoc ''
+              Kea DHCP-DDNS configuration as a path, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/ddns.html>.
+
+              Takes preference over [settings](#opt-services.kea.dhcp-ddns.settings).
+              Most users should prefer using [settings](#opt-services.kea.dhcp-ddns.settings) instead.
+            '';
+          };
+
+          settings = mkOption {
+            type = format.type;
+            default = null;
+            example = {
+              ip-address = "127.0.0.1";
+              port = 53001;
+              dns-server-timeout = 100;
+              ncr-protocol = "UDP";
+              ncr-format = "JSON";
+              tsig-keys = [ ];
+              forward-ddns = {
+                ddns-domains = [ ];
+              };
+              reverse-ddns = {
+                ddns-domains = [ ];
+              };
+            };
+            description = lib.mdDoc ''
+              Kea DHCP-DDNS configuration as an attribute set, see <https://kea.readthedocs.io/en/kea-${package.version}/arm/ddns.html>.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = let
+    commonServiceConfig = {
+      ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      DynamicUser = true;
+      User = "kea";
+      ConfigurationDirectory = "kea";
+      StateDirectory = "kea";
+      UMask = "0077";
+    };
+  in mkIf (cfg.ctrl-agent.enable || cfg.dhcp4.enable || cfg.dhcp6.enable || cfg.dhcp-ddns.enable) (mkMerge [
+  {
+    environment.systemPackages = [ package ];
+  }
+
+  (mkIf cfg.ctrl-agent.enable {
+    assertions = [{
+        assertion = xor (cfg.ctrl-agent.settings == null) (cfg.ctrl-agent.configFile == null);
+        message = "Either services.kea.ctrl-agent.settings or services.kea.ctrl-agent.configFile must be set to a non-null value.";
+    }];
+
+    environment.etc."kea/ctrl-agent.conf".source = ctrlAgentConfig;
+
+    systemd.services.kea-ctrl-agent = {
+      description = "Kea Control Agent";
+      documentation = [
+        "man:kea-ctrl-agent(8)"
+        "https://kea.readthedocs.io/en/kea-${package.version}/arm/agent.html"
+      ];
+
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      wantedBy = [
+        "kea-dhcp4-server.service"
+        "kea-dhcp6-server.service"
+        "kea-dhcp-ddns-server.service"
+      ];
+
+      environment = {
+        KEA_PIDFILE_DIR = "/run/kea-ctrl-agent";
+        KEA_LOCKFILE_DIR = "/run/kea-ctrl-agent";
+      };
+
+      restartTriggers = [
+        ctrlAgentConfig
+      ];
+
+      serviceConfig = {
+        ExecStart = "${package}/bin/kea-ctrl-agent -c /etc/kea/ctrl-agent.conf ${lib.escapeShellArgs cfg.ctrl-agent.extraArgs}";
+        KillMode = "process";
+        Restart = "on-failure";
+        RuntimeDirectory = "kea-ctrl-agent";
+      } // commonServiceConfig;
+    };
+  })
+
+  (mkIf cfg.dhcp4.enable {
+    assertions = [{
+        assertion = xor (cfg.dhcp4.settings == null) (cfg.dhcp4.configFile == null);
+        message = "Either services.kea.dhcp4.settings or services.kea.dhcp4.configFile must be set to a non-null value.";
+    }];
+
+    environment.etc."kea/dhcp4-server.conf".source = dhcp4Config;
+
+    systemd.services.kea-dhcp4-server = {
+      description = "Kea DHCP4 Server";
+      documentation = [
+        "man:kea-dhcp4(8)"
+        "https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp4-srv.html"
+      ];
+
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+
+      environment = {
+        KEA_PIDFILE_DIR = "/run/kea-dhcp4";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp4";
+      };
+
+      restartTriggers = [
+        dhcp4Config
+      ];
+
+      serviceConfig = {
+        ExecStart = "${package}/bin/kea-dhcp4 -c /etc/kea/dhcp4-server.conf ${lib.escapeShellArgs cfg.dhcp4.extraArgs}";
+        # Kea does not request capabilities by itself
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW"
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW"
+        ];
+        RuntimeDirectory = "kea-dhcp4";
+      } // commonServiceConfig;
+    };
+  })
+
+  (mkIf cfg.dhcp6.enable {
+    assertions = [{
+        assertion = xor (cfg.dhcp6.settings == null) (cfg.dhcp6.configFile == null);
+        message = "Either services.kea.dhcp6.settings or services.kea.dhcp6.configFile must be set to a non-null value.";
+    }];
+
+    environment.etc."kea/dhcp6-server.conf".source = dhcp6Config;
+
+    systemd.services.kea-dhcp6-server = {
+      description = "Kea DHCP6 Server";
+      documentation = [
+        "man:kea-dhcp6(8)"
+        "https://kea.readthedocs.io/en/kea-${package.version}/arm/dhcp6-srv.html"
+      ];
+
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+
+      environment = {
+        KEA_PIDFILE_DIR = "/run/kea-dhcp6";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp6";
+      };
+
+      restartTriggers = [
+        dhcp6Config
+      ];
+
+      serviceConfig = {
+        ExecStart = "${package}/bin/kea-dhcp6 -c /etc/kea/dhcp6-server.conf ${lib.escapeShellArgs cfg.dhcp6.extraArgs}";
+        # Kea does not request capabilities by itself
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        RuntimeDirectory = "kea-dhcp6";
+      } // commonServiceConfig;
+    };
+  })
+
+  (mkIf cfg.dhcp-ddns.enable {
+    assertions = [{
+        assertion = xor (cfg.dhcp-ddns.settings == null) (cfg.dhcp-ddns.configFile == null);
+        message = "Either services.kea.dhcp-ddns.settings or services.kea.dhcp-ddns.configFile must be set to a non-null value.";
+    }];
+
+    environment.etc."kea/dhcp-ddns.conf".source = dhcpDdnsConfig;
+
+    systemd.services.kea-dhcp-ddns-server = {
+      description = "Kea DHCP-DDNS Server";
+      documentation = [
+        "man:kea-dhcp-ddns(8)"
+        "https://kea.readthedocs.io/en/kea-${package.version}/arm/ddns.html"
+      ];
+
+      after = [
+        "network-online.target"
+        "time-sync.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+
+      environment = {
+        KEA_PIDFILE_DIR = "/run/kea-dhcp-ddns";
+        KEA_LOCKFILE_DIR = "/run/kea-dhcp-ddns";
+      };
+
+      restartTriggers = [
+        dhcpDdnsConfig
+      ];
+
+      serviceConfig = {
+        ExecStart = "${package}/bin/kea-dhcp-ddns -c /etc/kea/dhcp-ddns.conf ${lib.escapeShellArgs cfg.dhcp-ddns.extraArgs}";
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        RuntimeDirectory = "kea-dhcp-ddns";
+      } // commonServiceConfig;
+    };
+  })
+
+  ]);
+
+  meta.maintainers = with maintainers; [ hexa ];
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/keepalived/default.nix b/nixpkgs/nixos/modules/services/networking/keepalived/default.nix
new file mode 100644
index 000000000000..29fbea5545c3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/keepalived/default.nix
@@ -0,0 +1,323 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.keepalived;
+
+  keepalivedConf = pkgs.writeText "keepalived.conf" ''
+    global_defs {
+      ${optionalString cfg.enableScriptSecurity "enable_script_security"}
+      ${snmpGlobalDefs}
+      ${cfg.extraGlobalDefs}
+    }
+
+    ${vrrpScriptStr}
+    ${vrrpInstancesStr}
+    ${cfg.extraConfig}
+  '';
+
+  snmpGlobalDefs = with cfg.snmp; optionalString enable (
+    optionalString (socket != null) "snmp_socket ${socket}\n"
+    + optionalString enableKeepalived "enable_snmp_keepalived\n"
+    + optionalString enableChecker "enable_snmp_checker\n"
+    + optionalString enableRfc "enable_snmp_rfc\n"
+    + optionalString enableRfcV2 "enable_snmp_rfcv2\n"
+    + optionalString enableRfcV3 "enable_snmp_rfcv3\n"
+    + optionalString enableTraps "enable_traps"
+  );
+
+  vrrpScriptStr = concatStringsSep "\n" (map (s:
+    ''
+      vrrp_script ${s.name} {
+        script "${s.script}"
+        interval ${toString s.interval}
+        fall ${toString s.fall}
+        rise ${toString s.rise}
+        timeout ${toString s.timeout}
+        weight ${toString s.weight}
+        user ${s.user} ${optionalString (s.group != null) s.group}
+
+        ${s.extraConfig}
+      }
+    ''
+  ) vrrpScripts);
+
+  vrrpInstancesStr = concatStringsSep "\n" (map (i:
+    ''
+      vrrp_instance ${i.name} {
+        interface ${i.interface}
+        state ${i.state}
+        virtual_router_id ${toString i.virtualRouterId}
+        priority ${toString i.priority}
+        ${optionalString i.noPreempt "nopreempt"}
+
+        ${optionalString i.useVmac (
+          "use_vmac" + optionalString (i.vmacInterface != null) " ${i.vmacInterface}"
+        )}
+        ${optionalString i.vmacXmitBase "vmac_xmit_base"}
+
+        ${optionalString (i.unicastSrcIp != null) "unicast_src_ip ${i.unicastSrcIp}"}
+        unicast_peer {
+          ${concatStringsSep "\n" i.unicastPeers}
+        }
+
+        virtual_ipaddress {
+          ${concatMapStringsSep "\n" virtualIpLine i.virtualIps}
+        }
+
+        ${optionalString (builtins.length i.trackScripts > 0) ''
+          track_script {
+            ${concatStringsSep "\n" i.trackScripts}
+          }
+        ''}
+
+        ${optionalString (builtins.length i.trackInterfaces > 0) ''
+          track_interface {
+            ${concatStringsSep "\n" i.trackInterfaces}
+          }
+        ''}
+
+        ${i.extraConfig}
+      }
+    ''
+  ) vrrpInstances);
+
+  virtualIpLine = ip: ip.addr
+    + optionalString (notNullOrEmpty ip.brd) " brd ${ip.brd}"
+    + optionalString (notNullOrEmpty ip.dev) " dev ${ip.dev}"
+    + optionalString (notNullOrEmpty ip.scope) " scope ${ip.scope}"
+    + optionalString (notNullOrEmpty ip.label) " label ${ip.label}";
+
+  notNullOrEmpty = s: !(s == null || s == "");
+
+  vrrpScripts = mapAttrsToList (name: config:
+    {
+      inherit name;
+    } // config
+  ) cfg.vrrpScripts;
+
+  vrrpInstances = mapAttrsToList (iName: iConfig:
+    {
+      name = iName;
+    } // iConfig
+  ) cfg.vrrpInstances;
+
+  vrrpInstanceAssertions = i: [
+    { assertion = i.interface != "";
+      message = "services.keepalived.vrrpInstances.${i.name}.interface option cannot be empty.";
+    }
+    { assertion = i.virtualRouterId >= 0 && i.virtualRouterId <= 255;
+      message = "services.keepalived.vrrpInstances.${i.name}.virtualRouterId must be an integer between 0..255.";
+    }
+    { assertion = i.priority >= 0 && i.priority <= 255;
+      message = "services.keepalived.vrrpInstances.${i.name}.priority must be an integer between 0..255.";
+    }
+    { assertion = i.vmacInterface == null || i.useVmac;
+      message = "services.keepalived.vrrpInstances.${i.name}.vmacInterface has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
+    }
+    { assertion = !i.vmacXmitBase || i.useVmac;
+      message = "services.keepalived.vrrpInstances.${i.name}.vmacXmitBase has no effect when services.keepalived.vrrpInstances.${i.name}.useVmac is not set.";
+    }
+  ] ++ flatten (map (virtualIpAssertions i.name) i.virtualIps)
+    ++ flatten (map (vrrpScriptAssertion i.name) i.trackScripts);
+
+  virtualIpAssertions = vrrpName: ip: [
+    { assertion = ip.addr != "";
+      message = "The 'addr' option for an services.keepalived.vrrpInstances.${vrrpName}.virtualIps entry cannot be empty.";
+    }
+  ];
+
+  vrrpScriptAssertion = vrrpName: scriptName: {
+    assertion = builtins.hasAttr scriptName cfg.vrrpScripts;
+    message = "services.keepalived.vrrpInstances.${vrrpName} trackscript ${scriptName} is not defined in services.keepalived.vrrpScripts.";
+  };
+
+  pidFile = "/run/keepalived.pid";
+
+in
+{
+
+  options = {
+    services.keepalived = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Keepalived.
+        '';
+      };
+
+      enableScriptSecurity = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Don't run scripts configured to be run as root if any part of the path is writable by a non-root user.
+        '';
+      };
+
+      snmp = {
+
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to enable the builtin AgentX subagent.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Socket to use for connecting to SNMP master agent. If this value is
+            set to null, keepalived's default will be used, which is
+            unix:/var/agentx/master, unless using a network namespace, when the
+            default is udp:localhost:705.
+          '';
+        };
+
+        enableKeepalived = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP handling of vrrp element of KEEPALIVED MIB.
+          '';
+        };
+
+        enableChecker = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP handling of checker element of KEEPALIVED MIB.
+          '';
+        };
+
+        enableRfc = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP handling of RFC2787 and RFC6527 VRRP MIBs.
+          '';
+        };
+
+        enableRfcV2 = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP handling of RFC2787 VRRP MIB.
+          '';
+        };
+
+        enableRfcV3 = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP handling of RFC6527 VRRP MIB.
+          '';
+        };
+
+        enableTraps = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable SNMP traps.
+          '';
+        };
+
+      };
+
+      vrrpScripts = mkOption {
+        type = types.attrsOf (types.submodule (import ./vrrp-script-options.nix {
+          inherit lib;
+        }));
+        default = {};
+        description = lib.mdDoc "Declarative vrrp script config";
+      };
+
+      vrrpInstances = mkOption {
+        type = types.attrsOf (types.submodule (import ./vrrp-instance-options.nix {
+          inherit lib;
+        }));
+        default = {};
+        description = lib.mdDoc "Declarative vhost config";
+      };
+
+      extraGlobalDefs = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the 'global_defs' block of the
+          configuration file
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to the configuration file.
+        '';
+      };
+
+      secretFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/keepalived.env";
+        description = lib.mdDoc ''
+          Environment variables from this file will be interpolated into the
+          final config file using envsubst with this syntax: `$ENVIRONMENT`
+          or `''${VARIABLE}`.
+          The file should contain lines formatted as `SECRET_VAR=SECRET_VALUE`.
+          This is useful to avoid putting secrets into the nix store.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = flatten (map vrrpInstanceAssertions vrrpInstances);
+
+    systemd.timers.keepalived-boot-delay = {
+      description = "Keepalive Daemon delay to avoid instant transition to MASTER state";
+      after = [ "network.target" "network-online.target" "syslog.target" ];
+      requires = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      timerConfig = {
+        OnActiveSec = "5s";
+        Unit = "keepalived.service";
+      };
+    };
+
+    systemd.services.keepalived = let
+      finalConfigFile = if cfg.secretFile == null then keepalivedConf else "/run/keepalived/keepalived.conf";
+    in {
+      description = "Keepalive Daemon (LVS and VRRP)";
+      after = [ "network.target" "network-online.target" "syslog.target" ];
+      wants = [ "network-online.target" ];
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = pidFile;
+        KillMode = "process";
+        RuntimeDirectory = "keepalived";
+        EnvironmentFile = lib.optional (cfg.secretFile != null) cfg.secretFile;
+        ExecStartPre = lib.optional (cfg.secretFile != null)
+        (pkgs.writeShellScript "keepalived-pre-start" ''
+          umask 077
+          ${pkgs.envsubst}/bin/envsubst -i "${keepalivedConf}" > ${finalConfigFile}
+        '');
+        ExecStart = "${pkgs.keepalived}/sbin/keepalived"
+          + " -f ${finalConfigFile}"
+          + " -p ${pidFile}"
+          + optionalString cfg.snmp.enable " --snmp";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "always";
+        RestartSec = "1s";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/keepalived/virtual-ip-options.nix b/nixpkgs/nixos/modules/services/networking/keepalived/virtual-ip-options.nix
new file mode 100644
index 000000000000..1fa6a0ee3bf4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/keepalived/virtual-ip-options.nix
@@ -0,0 +1,50 @@
+{ lib } :
+
+with lib;
+{
+  options = {
+
+    addr = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        IP address, optionally with a netmask: IPADDR[/MASK]
+      '';
+    };
+
+    brd = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The broadcast address on the interface.
+      '';
+    };
+
+    dev = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The name of the device to add the address to.
+      '';
+    };
+
+    scope = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The scope of the area where this address is valid.
+      '';
+    };
+
+    label = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Each address may be tagged with a label string. In order to preserve
+        compatibility with Linux-2.0 net aliases, this string must coincide with
+        the name of the device or must be prefixed with the device name followed
+        by colon.
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-instance-options.nix b/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-instance-options.nix
new file mode 100644
index 000000000000..35401d439a91
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-instance-options.nix
@@ -0,0 +1,133 @@
+{ lib } :
+
+with lib;
+{
+  options = {
+
+    interface = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Interface for inside_network, bound by vrrp.
+      '';
+    };
+
+    state = mkOption {
+      type = types.enum [ "MASTER" "BACKUP" ];
+      default = "BACKUP";
+      description = lib.mdDoc ''
+        Initial state. As soon as the other machine(s) come up, an election will
+        be held and the machine with the highest "priority" will become MASTER.
+        So the entry here doesn't matter a whole lot.
+      '';
+    };
+
+    virtualRouterId = mkOption {
+      type = types.ints.between 1 255;
+      description = lib.mdDoc ''
+        Arbitrary unique number 1..255. Used to differentiate multiple instances
+        of vrrpd running on the same NIC (and hence same socket).
+      '';
+    };
+
+    priority = mkOption {
+      type = types.int;
+      default = 100;
+      description = lib.mdDoc ''
+        For electing MASTER, highest priority wins. To be MASTER, make 50 more
+        than other machines.
+      '';
+    };
+
+    noPreempt = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        VRRP will normally preempt a lower priority machine when a higher
+        priority machine comes online. "nopreempt" allows the lower priority
+        machine to maintain the master role, even when a higher priority machine
+        comes back online. NOTE: For this to work, the initial state of this
+        entry must be BACKUP.
+      '';
+    };
+
+    useVmac = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Use VRRP Virtual MAC.
+      '';
+    };
+
+    vmacInterface = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+         Name of the vmac interface to use. keepalived will come up with a name
+         if you don't specify one.
+      '';
+    };
+
+    vmacXmitBase = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Send/Recv VRRP messages from base interface instead of VMAC interface.
+      '';
+    };
+
+    unicastSrcIp = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+         Default IP for binding vrrpd is the primary IP on interface. If you
+         want to hide location of vrrpd, use this IP as src_addr for unicast
+         vrrp packets.
+      '';
+    };
+
+    unicastPeers = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Do not send VRRP adverts over VRRP multicast group. Instead it sends
+        adverts to the following list of ip addresses using unicast design
+        fashion. It can be cool to use VRRP FSM and features in a networking
+        environment where multicast is not supported! IP Addresses specified can
+        IPv4 as well as IPv6.
+      '';
+    };
+
+    virtualIps = mkOption {
+      type = types.listOf (types.submodule (import ./virtual-ip-options.nix {
+        inherit lib;
+      }));
+      default = [];
+      # TODO: example
+      description = lib.mdDoc "Declarative vhost config";
+    };
+
+    trackScripts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "chk_cmd1" "chk_cmd2" ];
+      description = lib.mdDoc "List of script names to invoke for health tracking.";
+    };
+
+    trackInterfaces = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "eth0" "eth1" ];
+      description = lib.mdDoc "List of network interfaces to monitor for health tracking.";
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra lines to be added verbatim to the vrrp_instance section.
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-script-options.nix b/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-script-options.nix
new file mode 100644
index 000000000000..852d6b0ec26f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/keepalived/vrrp-script-options.nix
@@ -0,0 +1,64 @@
+{ lib } :
+
+with lib;
+with lib.types;
+{
+  options = {
+
+    script = mkOption {
+      type = str;
+      example = literalExpression ''"''${pkgs.curl} -f http://localhost:80"'';
+      description = lib.mdDoc "(Path of) Script command to execute followed by args, i.e. cmd [args]...";
+    };
+
+    interval = mkOption {
+      type = int;
+      default = 1;
+      description = lib.mdDoc "Seconds between script invocations.";
+    };
+
+    timeout = mkOption {
+      type = int;
+      default = 5;
+      description = lib.mdDoc "Seconds after which script is considered to have failed.";
+    };
+
+    weight = mkOption {
+      type = int;
+      default = 0;
+      description = lib.mdDoc "Following a failure, adjust the priority by this weight.";
+    };
+
+    rise = mkOption {
+      type = int;
+      default = 5;
+      description = lib.mdDoc "Required number of successes for OK transition.";
+    };
+
+    fall = mkOption {
+      type = int;
+      default = 3;
+      description = lib.mdDoc "Required number of failures for KO transition.";
+    };
+
+    user = mkOption {
+      type = str;
+      default = "keepalived_script";
+      description = lib.mdDoc "Name of user to run the script under.";
+    };
+
+    group = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc "Name of group to run the script under. Defaults to user group.";
+    };
+
+    extraConfig = mkOption {
+      type = lines;
+      default = "";
+      description = lib.mdDoc "Extra lines to be added verbatim to the vrrp_script section.";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/keybase.nix b/nixpkgs/nixos/modules/services/networking/keybase.nix
new file mode 100644
index 000000000000..ae10aebb86e2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/keybase.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.keybase;
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.keybase = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to start the Keybase service.";
+      };
+
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # Upstream: https://github.com/keybase/client/blob/master/packaging/linux/systemd/keybase.service
+    systemd.user.services.keybase = {
+      description = "Keybase service";
+      unitConfig.ConditionUser = "!@system";
+      environment.KEYBASE_SERVICE_TYPE = "systemd";
+      serviceConfig = {
+        Type = "notify";
+        EnvironmentFile = [
+          "-%E/keybase/keybase.autogen.env"
+          "-%E/keybase/keybase.env"
+        ];
+        ExecStart = "${pkgs.keybase}/bin/keybase service";
+        Restart = "on-failure";
+        PrivateTmp = true;
+      };
+      wantedBy = [ "default.target" ];
+    };
+
+    environment.systemPackages = [ pkgs.keybase ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/knot.nix b/nixpkgs/nixos/modules/services/networking/knot.nix
new file mode 100644
index 000000000000..4f6ac945cf97
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/knot.nix
@@ -0,0 +1,274 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.knot;
+
+  yamlConfig = let
+    result = assert secsCheck; nix2yaml cfg.settings;
+
+    secAllow = n: hasPrefix "mod-" n || elem n [
+      "module"
+      "server" "xdp" "control"
+      "log"
+      "statistics" "database"
+      "keystore" "key" "remote" "remotes" "acl" "submission" "policy"
+      "template"
+      "zone"
+      "include"
+    ];
+    secsCheck = let
+      secsBad = filter (n: !secAllow n) (attrNames cfg.settings);
+    in if secsBad == [] then true else throw
+      ("services.knot.settings contains unknown sections: " + toString secsBad);
+
+    nix2yaml = nix_def: concatStrings (
+        # We output the config section in the upstream-mandated order.
+        # Ordering is important due to forward-references not being allowed.
+        # See definition of conf_export and 'const yp_item_t conf_schema'
+        # upstream for reference.  Last updated for 3.3.
+        # When changing the set of sections, also update secAllow above.
+        [ (sec_list_fa "id" nix_def "module") ]
+        ++ map (sec_plain nix_def)
+          [ "server" "xdp" "control" ]
+        ++ [ (sec_list_fa "target" nix_def "log") ]
+        ++ map (sec_plain nix_def)
+          [  "statistics" "database" ]
+        ++ map (sec_list_fa "id" nix_def)
+          [ "keystore" "key" "remote" "remotes" "acl" "submission" "policy" ]
+
+        # Export module sections before the template section.
+        ++ map (sec_list_fa "id" nix_def) (filter (hasPrefix "mod-") (attrNames nix_def))
+
+        ++ [ (sec_list_fa "id" nix_def "template") ]
+        ++ [ (sec_list_fa "domain" nix_def "zone") ]
+        ++ [ (sec_plain nix_def "include") ]
+      );
+
+    # A plain section contains directly attributes (we don't really check that ATM).
+    sec_plain = nix_def: sec_name: if !hasAttr sec_name nix_def then "" else
+      n2y "" { ${sec_name} = nix_def.${sec_name}; };
+
+    # This section contains a list of attribute sets.  In each of the sets
+    # there's an attribute (`fa_name`, typically "id") that must exist and come first.
+    # Alternatively we support using attribute sets instead of lists; example diff:
+    # -template = [ { id = "default"; /* other attributes */ }   { id = "foo"; } ]
+    # +template = { default = {       /* those attributes */ };  foo = { };      }
+    sec_list_fa = fa_name: nix_def: sec_name: if !hasAttr sec_name nix_def then "" else
+      let
+        elem2yaml = fa_val: other_attrs:
+          "  - " + n2y "" { ${fa_name} = fa_val; }
+          + "    " + n2y "    " other_attrs
+          + "\n";
+        sec = nix_def.${sec_name};
+      in
+        sec_name + ":\n" +
+          (if isList sec
+            then flip concatMapStrings sec
+              (elem: elem2yaml elem.${fa_name} (removeAttrs elem [ fa_name ]))
+            else concatStrings (mapAttrsToList elem2yaml sec)
+          );
+
+    # This convertor doesn't care about ordering of attributes.
+    # TODO: it could probably be simplified even more, now that it's not
+    # to be used directly, but we might want some other tweaks, too.
+    n2y = indent: val:
+      if doRecurse val then concatStringsSep "\n${indent}"
+        (mapAttrsToList
+          # This is a bit wacky - set directly under a set would start on bad indent,
+          # so we start those on a new line, but not other types of attribute values.
+          (aname: aval: "${aname}:${if doRecurse aval then "\n${indent}  " else " "}"
+            + n2y (indent + "  ") aval)
+          val
+        )
+        + "\n"
+        else
+      /*
+      if isList val && stringLength indent < 4 then concatMapStrings
+        (elem: "\n${indent}- " + n2y (indent + "  ") elem)
+        val
+        else
+      */
+      if isList val /* and long indent */ then
+        "[ " + concatMapStringsSep ", " quoteString val + " ]" else
+      if isBool val then (if val then "on" else "off") else
+      quoteString val;
+
+    # We don't want paths like ./my-zone.txt be converted to plain strings.
+    quoteString = s: ''"${if builtins.typeOf s == "path" then s else toString s}"'';
+    # We don't want to walk the insides of derivation attributes.
+    doRecurse = val: isAttrs val && !isDerivation val;
+
+  in result;
+
+  configFile = if cfg.settingsFile != null then
+    # Note: with extraConfig, the 23.05 compat code did include keyFiles from settingsFile.
+    assert cfg.settings == {} && (cfg.keyFiles == [] || cfg.extraConfig != null);
+    cfg.settingsFile
+  else
+    mkConfigFile yamlConfig;
+
+  mkConfigFile = configString: pkgs.writeTextFile {
+    name = "knot.conf";
+    text = (concatMapStringsSep "\n" (file: "include: ${file}") cfg.keyFiles) + "\n" + configString;
+    # TODO: maybe we could do some checks even when private keys complicate this?
+    checkPhase = lib.optionalString (cfg.keyFiles == []) ''
+      ${cfg.package}/bin/knotc --config=$out conf-check
+    '';
+  };
+
+  socketFile = "/run/knot/knot.sock";
+
+  knot-cli-wrappers = pkgs.stdenv.mkDerivation {
+    name = "knot-cli-wrappers";
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+    buildCommand = ''
+      mkdir -p $out/bin
+      makeWrapper ${cfg.package}/bin/knotc "$out/bin/knotc" \
+        --add-flags "--config=${configFile}" \
+        --add-flags "--socket=${socketFile}"
+      makeWrapper ${cfg.package}/bin/keymgr "$out/bin/keymgr" \
+        --add-flags "--config=${configFile}"
+      for executable in kdig khost kjournalprint knsec3hash knsupdate kzonecheck
+      do
+        ln -s "${cfg.package}/bin/$executable" "$out/bin/$executable"
+      done
+      mkdir -p "$out/share"
+      ln -s '${cfg.package}/share/man' "$out/share/"
+    '';
+  };
+in {
+  options = {
+    services.knot = {
+      enable = mkEnableOption (lib.mdDoc "Knot authoritative-only DNS server");
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          List of additional command line parameters for knotd
+        '';
+      };
+
+      keyFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of files containing additional configuration
+          to be included using the include directive. This option
+          allows to include configuration like TSIG keys without
+          exposing them to the nix store readable to any process.
+          Note that using this option will also disable configuration
+          checks at build time.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Extra configuration as nix values.
+        '';
+      };
+
+      settingsFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          As alternative to ``settings``, you can provide whole configuration
+          directly in the almost-YAML format of Knot DNS.
+          You might want to utilize ``pkgs.writeText "knot.conf" "longConfigString"`` for this.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.knot-dns;
+        defaultText = literalExpression "pkgs.knot-dns";
+        description = lib.mdDoc ''
+          Which Knot DNS package to use
+        '';
+      };
+    };
+  };
+  imports = [
+    # Compatibility with NixOS 23.05.
+    (mkChangedOptionModule [ "services" "knot" "extraConfig" ] [ "services" "knot" "settingsFile" ]
+      (config: mkConfigFile config.services.knot.extraConfig)
+    )
+  ];
+
+  config = mkIf config.services.knot.enable {
+    users.groups.knot = {};
+    users.users.knot = {
+      isSystemUser = true;
+      group = "knot";
+      description = "Knot daemon user";
+    };
+
+    environment.etc."knot/knot.conf".source = configFile; # just for user's convenience
+
+    systemd.services.knot = {
+      unitConfig.Documentation = "man:knotd(8) man:knot.conf(5) man:knotc(8) https://www.knot-dns.cz/docs/${cfg.package.version}/html/";
+      description = cfg.package.meta.description;
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network.target" ];
+      after = ["network.target" ];
+
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/knotd --config=${configFile} --socket=${socketFile} ${concatStringsSep " " cfg.extraArgs}";
+        ExecReload = "${knot-cli-wrappers}/bin/knotc reload";
+        User = "knot";
+        Group = "knot";
+
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        DeviceAllow = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = false; # breaks capability passing
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        Restart = "on-abort";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime =true;
+        RestrictSUIDSGID = true;
+        RuntimeDirectory = "knot";
+        StateDirectory = "knot";
+        StateDirectoryMode = "0700";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+
+    environment.systemPackages = [ knot-cli-wrappers ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/kresd.nix b/nixpkgs/nixos/modules/services/networking/kresd.nix
new file mode 100644
index 000000000000..3ad757133a60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/kresd.nix
@@ -0,0 +1,151 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.kresd;
+
+  # Convert systemd-style address specification to kresd config line(s).
+  # On Nix level we don't attempt to precisely validate the address specifications.
+  # The optional IPv6 scope spec comes *after* port, perhaps surprisingly.
+  mkListen = kind: addr: let
+    al_v4 = builtins.match "([0-9.]+):([0-9]+)($)" addr;
+    al_v6 = builtins.match "\\[(.+)]:([0-9]+)(%.*|$)" addr;
+    al_portOnly = builtins.match "([0-9]+)" addr;
+    al = findFirst (a: a != null)
+      (throw "services.kresd.*: incorrect address specification '${addr}'")
+      [ al_v4 al_v6 al_portOnly ];
+    port = elemAt al 1;
+    addrSpec = if al_portOnly == null then "'${head al}${elemAt al 2}'" else "{'::', '0.0.0.0'}";
+    in # freebind is set for compatibility with earlier kresd services;
+       # it could be configurable, for example.
+      ''
+        net.listen(${addrSpec}, ${port}, { kind = '${kind}', freebind = true })
+      '';
+
+  configFile = pkgs.writeText "kresd.conf" (
+    ""
+    + concatMapStrings (mkListen "dns") cfg.listenPlain
+    + concatMapStrings (mkListen "tls") cfg.listenTLS
+    + concatMapStrings (mkListen "doh2") cfg.listenDoH
+    + cfg.extraConfig
+  );
+in {
+  meta.maintainers = [ maintainers.vcunat /* upstream developer */ ];
+
+  imports = [
+    (mkChangedOptionModule [ "services" "kresd" "interfaces" ] [ "services" "kresd" "listenPlain" ]
+      (config:
+        let value = getAttrFromPath [ "services" "kresd" "interfaces" ] config;
+        in map
+          (iface: if elem ":" (stringToCharacters iface) then "[${iface}]:53" else "${iface}:53") # Syntax depends on being IPv6 or IPv4.
+          value
+      )
+    )
+    (mkRemovedOptionModule [ "services" "kresd" "cacheDir" ] "Please use (bind-)mounting instead.")
+  ];
+
+  ###### interface
+  options.services.kresd = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable knot-resolver domain name server.
+        DNSSEC validation is turned on by default.
+        You can run `sudo nc -U /run/knot-resolver/control/1`
+        and give commands interactively to kresd@1.service.
+      '';
+    };
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc ''
+        knot-resolver package to use.
+      '';
+      default = pkgs.knot-resolver;
+      defaultText = literalExpression "pkgs.knot-resolver";
+      example = literalExpression "pkgs.knot-resolver.override { extraFeatures = true; }";
+    };
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra lines to be added verbatim to the generated configuration file.
+      '';
+    };
+    listenPlain = mkOption {
+      type = with types; listOf str;
+      default = [ "[::1]:53" "127.0.0.1:53" ];
+      example = [ "53" ];
+      description = lib.mdDoc ''
+        What addresses and ports the server should listen on.
+        For detailed syntax see ListenStream in {manpage}`systemd.socket(5)`.
+      '';
+    };
+    listenTLS = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "198.51.100.1:853" "[2001:db8::1]:853" "853" ];
+      description = lib.mdDoc ''
+        Addresses and ports on which kresd should provide DNS over TLS (see RFC 7858).
+        For detailed syntax see ListenStream in {manpage}`systemd.socket(5)`.
+      '';
+    };
+    listenDoH = mkOption {
+      type = with types; listOf str;
+      default = [];
+      example = [ "198.51.100.1:443" "[2001:db8::1]:443" "443" ];
+      description = lib.mdDoc ''
+        Addresses and ports on which kresd should provide DNS over HTTPS/2 (see RFC 8484).
+        For detailed syntax see ListenStream in {manpage}`systemd.socket(5)`.
+      '';
+    };
+    instances = mkOption {
+      type = types.ints.unsigned;
+      default = 1;
+      description = lib.mdDoc ''
+        The number of instances to start.  They will be called kresd@{1,2,...}.service.
+        Knot Resolver uses no threads, so this is the way to scale.
+        You can dynamically start/stop them at will, so this is just system default.
+      '';
+    };
+    # TODO: perhaps options for more common stuff like cache size or forwarding
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    environment.etc."knot-resolver/kresd.conf".source = configFile; # not required
+
+    networking.resolvconf.useLocalResolver = mkDefault true;
+
+    users.users.knot-resolver =
+      { isSystemUser = true;
+        group = "knot-resolver";
+        description = "Knot-resolver daemon user";
+      };
+    users.groups.knot-resolver.gid = null;
+
+    systemd.packages = [ cfg.package ]; # the units are patched inside the package a bit
+
+    systemd.targets.kresd = { # configure units started by default
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "kres-cache-gc.service" ]
+        ++ map (i: "kresd@${toString i}.service") (range 1 cfg.instances);
+    };
+    systemd.services."kresd@".serviceConfig = {
+      ExecStart = "${cfg.package}/bin/kresd --noninteractive "
+        + "-c ${cfg.package}/lib/knot-resolver/distro-preconfig.lua -c ${configFile}";
+      # Ensure /run/knot-resolver exists
+      RuntimeDirectory = "knot-resolver";
+      RuntimeDirectoryMode = "0770";
+      # Ensure /var/lib/knot-resolver exists
+      StateDirectory = "knot-resolver";
+      StateDirectoryMode = "0770";
+      # Ensure /var/cache/knot-resolver exists
+      CacheDirectory = "knot-resolver";
+      CacheDirectoryMode = "0770";
+    };
+    # We don't mind running stop phase from wrong version.  It seems less racy.
+    systemd.services."kresd@".stopIfChanged = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/lambdabot.nix b/nixpkgs/nixos/modules/services/networking/lambdabot.nix
new file mode 100644
index 000000000000..8609bc971962
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/lambdabot.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.lambdabot;
+
+  rc = builtins.toFile "script.rc" cfg.script;
+
+in
+
+{
+
+  ### configuration
+
+  options = {
+
+    services.lambdabot = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the Lambdabot IRC bot";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.lambdabot;
+        defaultText = literalExpression "pkgs.lambdabot";
+        description = lib.mdDoc "Used lambdabot package";
+      };
+
+      script = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Lambdabot script";
+      };
+
+    };
+
+  };
+
+  ### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.lambdabot = {
+      description = "Lambdabot daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      # Workaround for https://github.com/lambdabot/lambdabot/issues/117
+      script = ''
+        mkdir -p ~/.lambdabot
+        cd ~/.lambdabot
+        mkfifo /run/lambdabot/offline
+        (
+          echo 'rc ${rc}'
+          while true; do
+            cat /run/lambdabot/offline
+          done
+        ) | ${cfg.package}/bin/lambdabot
+      '';
+      serviceConfig = {
+        User = "lambdabot";
+        RuntimeDirectory = [ "lambdabot" ];
+      };
+    };
+
+    users.users.lambdabot = {
+      group = "lambdabot";
+      description = "Lambdabot daemon user";
+      home = "/var/lib/lambdabot";
+      createHome = true;
+      uid = config.ids.uids.lambdabot;
+    };
+
+    users.groups.lambdabot.gid = config.ids.gids.lambdabot;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/legit.nix b/nixpkgs/nixos/modules/services/networking/legit.nix
new file mode 100644
index 000000000000..90234f3955e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/legit.nix
@@ -0,0 +1,182 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    literalExpression
+    mkEnableOption
+    mdDoc
+    mkIf
+    mkOption
+    mkPackageOptionMD
+    optionalAttrs
+    optional
+    types;
+
+  cfg = config.services.legit;
+
+  yaml = pkgs.formats.yaml { };
+  configFile = yaml.generate "legit.yaml" cfg.settings;
+
+  defaultStateDir = "/var/lib/legit";
+  defaultStaticDir = "${cfg.settings.repo.scanPath}/static";
+  defaultTemplatesDir = "${cfg.settings.repo.scanPath}/templates";
+in
+{
+  options.services.legit = {
+    enable = mkEnableOption (mdDoc "legit git web frontend");
+
+    package = mkPackageOptionMD pkgs "legit-web" { };
+
+    user = mkOption {
+      type = types.str;
+      default = "legit";
+      description = mdDoc "User account under which legit runs.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "legit";
+      description = mdDoc "Group account under which legit runs.";
+    };
+
+    settings = mkOption {
+      default = { };
+      description = mdDoc ''
+        The primary legit configuration. See the
+        [sample configuration](https://github.com/icyphox/legit/blob/master/config.yaml)
+        for possible values.
+      '';
+      type = types.submodule {
+        options.repo = {
+          scanPath = mkOption {
+            type = types.path;
+            default = defaultStateDir;
+            description = mdDoc "Directory where legit will scan for repositories.";
+          };
+          readme = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            description = mdDoc "Readme files to look for.";
+          };
+          mainBranch = mkOption {
+            type = types.listOf types.str;
+            default = [ "main" "master" ];
+            description = mdDoc "Main branch to look for.";
+          };
+          ignore = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            description = mdDoc "Repositories to ignore.";
+          };
+        };
+        options.dirs = {
+          templates = mkOption {
+            type = types.path;
+            default = "${pkgs.legit-web}/lib/legit/templates";
+            defaultText = literalExpression ''"''${pkgs.legit-web}/lib/legit/templates"'';
+            description = mdDoc "Directories where template files are located.";
+          };
+          static = mkOption {
+            type = types.path;
+            default = "${pkgs.legit-web}/lib/legit/static";
+            defaultText = literalExpression ''"''${pkgs.legit-web}/lib/legit/static"'';
+            description = mdDoc "Directories where static files are located.";
+          };
+        };
+        options.meta = {
+          title = mkOption {
+            type = types.str;
+            default = "legit";
+            description = mdDoc "Website title.";
+          };
+          description = mkOption {
+            type = types.str;
+            default = "git frontend";
+            description = mdDoc "Website description.";
+          };
+        };
+        options.server = {
+          name = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = mdDoc "Server name.";
+          };
+          host = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            description = mdDoc "Host address.";
+          };
+          port = mkOption {
+            type = types.port;
+            default = 5555;
+            description = mdDoc "Legit port.";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "legit") {
+      "${cfg.group}" = { };
+    };
+
+    users.users = optionalAttrs (cfg.user == "legit") {
+      "${cfg.user}" = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    systemd.services.legit = {
+      description = "legit git frontend";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ configFile ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${cfg.package}/bin/legit -config ${configFile}";
+        Restart = "always";
+
+        WorkingDirectory = cfg.settings.repo.scanPath;
+        StateDirectory = [ ] ++
+          optional (cfg.settings.repo.scanPath == defaultStateDir) "legit" ++
+          optional (cfg.settings.dirs.static == defaultStaticDir) "legit/static" ++
+          optional (cfg.settings.dirs.templates == defaultTemplatesDir) "legit/templates";
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        ReadWritePaths = cfg.settings.repo.scanPath;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/libreswan.nix b/nixpkgs/nixos/modules/services/networking/libreswan.nix
new file mode 100644
index 000000000000..db4d2f7f0ba0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/libreswan.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.libreswan;
+
+  libexec = "${pkgs.libreswan}/libexec/ipsec";
+  ipsec = "${pkgs.libreswan}/sbin/ipsec";
+
+  trim = chars: str:
+  let
+    nonchars = filter (x : !(elem x.value chars))
+               (imap0 (i: v: {ind = i; value = v;}) (stringToCharacters str));
+  in
+    lib.optionalString (nonchars != [ ])
+      (substring (head nonchars).ind (add 1 (sub (last nonchars).ind (head nonchars).ind)) str);
+  indent = str: concatStrings (concatMap (s: ["  " (trim [" " "\t"] s) "\n"]) (splitString "\n" str));
+  configText = indent (toString cfg.configSetup);
+  connectionText = concatStrings (mapAttrsToList (n: v:
+    ''
+      conn ${n}
+      ${indent v}
+    '') cfg.connections);
+
+  configFile = pkgs.writeText "ipsec-nixos.conf"
+    ''
+      config setup
+      ${configText}
+
+      ${connectionText}
+    '';
+
+  policyFiles = mapAttrs' (name: text:
+    { name = "ipsec.d/policies/${name}";
+      value.source = pkgs.writeText "ipsec-policy-${name}" text;
+    }) cfg.policies;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.libreswan = {
+
+      enable = mkEnableOption (lib.mdDoc "Libreswan IPsec service");
+
+      configSetup = mkOption {
+        type = types.lines;
+        default = ''
+            protostack=netkey
+            virtual_private=%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v4:25.0.0.0/8,%v4:100.64.0.0/10,%v6:fd00::/8,%v6:fe80::/10
+        '';
+        example = ''
+            secretsfile=/root/ipsec.secrets
+            protostack=netkey
+            virtual_private=%v4:10.0.0.0/8,%v4:192.168.0.0/16,%v4:172.16.0.0/12,%v4:25.0.0.0/8,%v4:100.64.0.0/10,%v6:fd00::/8,%v6:fe80::/10
+        '';
+        description = lib.mdDoc "Options to go in the 'config setup' section of the Libreswan IPsec configuration";
+      };
+
+      connections = mkOption {
+        type = types.attrsOf types.lines;
+        default = {};
+        example = literalExpression ''
+          { myconnection = '''
+              auto=add
+              left=%defaultroute
+              leftid=@user
+
+              right=my.vpn.com
+
+              ikev2=no
+              ikelifetime=8h
+            ''';
+          }
+        '';
+        description = lib.mdDoc "A set of connections to define for the Libreswan IPsec service";
+      };
+
+      policies = mkOption {
+        type = types.attrsOf types.lines;
+        default = {};
+        example = literalExpression ''
+          { private-or-clear = '''
+              # Attempt opportunistic IPsec for the entire Internet
+              0.0.0.0/0
+              ::/0
+            ''';
+          }
+        '';
+        description = lib.mdDoc ''
+          A set of policies to apply to the IPsec connections.
+
+          ::: {.note}
+          The policy name must match the one of connection it needs to apply to.
+          :::
+        '';
+      };
+
+      disableRedirects = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to disable send and accept redirects for all network interfaces.
+          See the Libreswan [
+          FAQ](https://libreswan.org/wiki/FAQ#Why_is_it_recommended_to_disable_send_redirects_in_.2Fproc.2Fsys.2Fnet_.3F) page for why this is recommended.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # Install package, systemd units, etc.
+    environment.systemPackages = [ pkgs.libreswan pkgs.iproute2 ];
+    systemd.packages = [ pkgs.libreswan ];
+    systemd.tmpfiles.packages = [ pkgs.libreswan ];
+
+    # Install configuration files
+    environment.etc = {
+      "ipsec.secrets".source = "${pkgs.libreswan}/etc/ipsec.secrets";
+      "ipsec.conf".source = "${pkgs.libreswan}/etc/ipsec.conf";
+      "ipsec.d/01-nixos.conf".source = configFile;
+    } // policyFiles;
+
+    # Create NSS database directory
+    systemd.tmpfiles.rules = [ "d /var/lib/ipsec/nss 755 root root -" ];
+
+    systemd.services.ipsec = {
+      description = "Internet Key Exchange (IKE) Protocol Daemon for IPsec";
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ configFile ] ++ mapAttrsToList (n: v: v.source) policyFiles;
+      path = with pkgs; [
+        libreswan
+        iproute2
+        procps
+        nssTools
+        iptables
+        nettools
+      ];
+      preStart = optionalString cfg.disableRedirects ''
+        # Disable send/receive redirects
+        echo 0 | tee /proc/sys/net/ipv4/conf/*/send_redirects
+        echo 0 | tee /proc/sys/net/ipv{4,6}/conf/*/accept_redirects
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/lldpd.nix b/nixpkgs/nixos/modules/services/networking/lldpd.nix
new file mode 100644
index 000000000000..b7ac99d75d75
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/lldpd.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lldpd;
+
+in
+
+{
+  options.services.lldpd = {
+    enable = mkEnableOption (lib.mdDoc "Link Layer Discovery Protocol Daemon");
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "-c" "-k" "-I eth0" ];
+      description = lib.mdDoc "List of command line parameters for lldpd";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users._lldpd = {
+      description = "lldpd user";
+      group = "_lldpd";
+      home = "/run/lldpd";
+      isSystemUser = true;
+    };
+    users.groups._lldpd = {};
+
+    environment.systemPackages = [ pkgs.lldpd ];
+    systemd.packages = [ pkgs.lldpd ];
+
+    systemd.services.lldpd = {
+      wantedBy = [ "multi-user.target" ];
+      environment.LLDPD_OPTIONS = concatStringsSep " " cfg.extraArgs;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/logmein-hamachi.nix b/nixpkgs/nixos/modules/services/networking/logmein-hamachi.nix
new file mode 100644
index 000000000000..7c00b82e3b34
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/logmein-hamachi.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.logmein-hamachi;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.logmein-hamachi.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description =
+        lib.mdDoc ''
+          Whether to enable LogMeIn Hamachi, a proprietary
+          (closed source) commercial VPN software.
+        '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.logmein-hamachi = {
+      description = "LogMeIn Hamachi Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.logmein-hamachi}/bin/hamachid";
+      };
+    };
+
+    environment.systemPackages = [ pkgs.logmein-hamachi ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/lokinet.nix b/nixpkgs/nixos/modules/services/networking/lokinet.nix
new file mode 100644
index 000000000000..f6bc314ed260
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/lokinet.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.lokinet;
+  dataDir = "/var/lib/lokinet";
+  settingsFormat = pkgs.formats.ini { listsAsDuplicateKeys = true; };
+  configFile = settingsFormat.generate "lokinet.ini" (lib.filterAttrsRecursive (n: v: v != null) cfg.settings);
+in with lib; {
+  options.services.lokinet = {
+    enable = mkEnableOption (lib.mdDoc "Lokinet daemon");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.lokinet;
+      defaultText = literalExpression "pkgs.lokinet";
+      description = lib.mdDoc "Lokinet package to use.";
+    };
+
+    useLocally = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc "Whether to use Lokinet locally.";
+    };
+
+    settings = mkOption {
+      type = with types;
+        submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            dns = {
+              bind = mkOption {
+                type = str;
+                default = "127.3.2.1";
+                description = lib.mdDoc "Address to bind to for handling DNS requests.";
+              };
+
+              upstream = mkOption {
+                type = listOf str;
+                default = [ "9.9.9.10" ];
+                example = [ "1.1.1.1" "8.8.8.8" ];
+                description = lib.mdDoc ''
+                  Upstream resolver(s) to use as fallback for non-loki addresses.
+                  Multiple values accepted.
+                '';
+              };
+            };
+
+            network = {
+              exit = mkOption {
+                type = bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Whether to act as an exit node. Beware that this
+                  increases demand on the server and may pose liability concerns.
+                  Enable at your own risk.
+                '';
+              };
+
+              exit-node = mkOption {
+                type = nullOr (listOf str);
+                default = null;
+                example = ''
+                  exit-node = [ "example.loki" ];              # maps all exit traffic to example.loki
+                  exit-node = [ "example.loki:100.0.0.0/24" ]; # maps 100.0.0.0/24 to example.loki
+                '';
+                description = lib.mdDoc ''
+                  Specify a `.loki` address and an optional ip range to use as an exit broker.
+                  See <http://probably.loki/wiki/index.php?title=Exit_Nodes> for
+                  a list of exit nodes.
+                '';
+              };
+
+              keyfile = mkOption {
+                type = nullOr str;
+                default = null;
+                example = "snappkey.private";
+                description = lib.mdDoc ''
+                  The private key to persist address with. If not specified the address will be ephemeral.
+                  This keyfile is generated automatically if the specified file doesn't exist.
+                '';
+              };
+            };
+          };
+        };
+      default = { };
+      example = literalExpression ''
+        {
+          dns = {
+            bind = "127.3.2.1";
+            upstream = [ "1.1.1.1" "8.8.8.8" ];
+          };
+
+          network.exit-node = [ "example.loki" "example2.loki" ];
+        }
+      '';
+      description = lib.mdDoc ''
+        Configuration for Lokinet.
+        Currently, the best way to view the available settings is by
+        generating a config file using `lokinet -g`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.resolvconf.extraConfig = mkIf cfg.useLocally ''
+      name_servers="${cfg.settings.dns.bind}"
+    '';
+
+    systemd.services.lokinet = {
+      description = "Lokinet";
+      after = [ "network-online.target" "network.target" ];
+      wants = [ "network-online.target" "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        ln -sf ${cfg.package}/share/bootstrap.signed ${dataDir}
+        ${pkgs.coreutils}/bin/install -m 600 ${configFile} ${dataDir}/lokinet.ini
+
+        ${optionalString (cfg.settings.network.keyfile != null) ''
+          ${pkgs.crudini}/bin/crudini --set ${dataDir}/lokinet.ini network keyfile "${dataDir}/${cfg.settings.network.keyfile}"
+        ''}
+      '';
+
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "lokinet";
+        AmbientCapabilities = [ "CAP_NET_ADMIN" "CAP_NET_BIND_SERVICE" ];
+        ExecStart = "${cfg.package}/bin/lokinet ${dataDir}/lokinet.ini";
+        Restart = "always";
+        RestartSec = "5s";
+
+        # hardening
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateMounts = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        ReadWritePaths = "/dev/net/tun";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/lxd-image-server.nix b/nixpkgs/nixos/modules/services/networking/lxd-image-server.nix
new file mode 100644
index 000000000000..d8e32eb997e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/lxd-image-server.nix
@@ -0,0 +1,133 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lxd-image-server;
+  format = pkgs.formats.toml {};
+
+  location = "/var/www/simplestreams";
+in
+{
+  options = {
+    services.lxd-image-server = {
+      enable = mkEnableOption (lib.mdDoc "lxd-image-server");
+
+      group = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Group assigned to the user and the webroot directory.";
+        default = "nginx";
+        example = "www-data";
+      };
+
+      settings = mkOption {
+        type = format.type;
+        description = lib.mdDoc ''
+          Configuration for lxd-image-server.
+
+          Example see <https://github.com/Avature/lxd-image-server/blob/master/config.toml>.
+        '';
+        default = {};
+      };
+
+      nginx = {
+        enable = mkEnableOption (lib.mdDoc "nginx");
+        domain = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Domain to use for nginx virtual host.";
+          example = "images.example.org";
+        };
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf (cfg.enable) {
+      users.users.lxd-image-server = {
+        isSystemUser = true;
+        group = cfg.group;
+      };
+      users.groups.${cfg.group} = {};
+
+      environment.etc."lxd-image-server/config.toml".source = format.generate "config.toml" cfg.settings;
+
+      services.logrotate.settings.lxd-image-server = {
+        files = "/var/log/lxd-image-server/lxd-image-server.log";
+        frequency = "daily";
+        rotate = 21;
+        create = "755 lxd-image-server ${cfg.group}";
+        compress = true;
+        delaycompress = true;
+        copytruncate = true;
+      };
+
+      systemd.tmpfiles.rules = [
+        "d /var/www/simplestreams 0755 lxd-image-server ${cfg.group}"
+      ];
+
+      systemd.services.lxd-image-server = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+
+        description = "LXD Image Server";
+
+        script = ''
+          ${pkgs.lxd-image-server}/bin/lxd-image-server init
+          ${pkgs.lxd-image-server}/bin/lxd-image-server watch
+        '';
+
+        serviceConfig = {
+          User = "lxd-image-server";
+          Group = cfg.group;
+          DynamicUser = true;
+          LogsDirectory = "lxd-image-server";
+          RuntimeDirectory = "lxd-image-server";
+          ExecReload = "${pkgs.lxd-image-server}/bin/lxd-image-server reload";
+          ReadWritePaths = [ location ];
+        };
+      };
+    })
+    # this is separate so it can be enabled on mirrored hosts
+    (mkIf (cfg.nginx.enable) {
+      # https://github.com/Avature/lxd-image-server/blob/master/resources/nginx/includes/lxd-image-server.pkg.conf
+      services.nginx.virtualHosts = {
+        "${cfg.nginx.domain}" = {
+          forceSSL = true;
+          enableACME = mkDefault true;
+
+          root = location;
+
+          locations = {
+            "/streams/v1/" = {
+              index = "index.json";
+            };
+
+            # Serve json files with content type header application/json
+            "~ \.json$" = {
+              extraConfig = ''
+                add_header Content-Type application/json;
+              '';
+            };
+
+            "~ \.tar.xz$" = {
+              extraConfig = ''
+                add_header Content-Type application/octet-stream;
+              '';
+            };
+
+            "~ \.tar.gz$" = {
+              extraConfig = ''
+                add_header Content-Type application/octet-stream;
+              '';
+            };
+
+            # Deny access to document root and the images folder
+            "~ ^/(images/)?$" = {
+              return = "403";
+            };
+          };
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/magic-wormhole-mailbox-server.nix b/nixpkgs/nixos/modules/services/networking/magic-wormhole-mailbox-server.nix
new file mode 100644
index 000000000000..9dd1f62350af
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/magic-wormhole-mailbox-server.nix
@@ -0,0 +1,28 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.magic-wormhole-mailbox-server;
+  dataDir = "/var/lib/magic-wormhole-mailbox-server;";
+  python = pkgs.python3.withPackages (py: [ py.magic-wormhole-mailbox-server py.twisted ]);
+in
+{
+  options.services.magic-wormhole-mailbox-server = {
+    enable = mkEnableOption (lib.mdDoc "Magic Wormhole Mailbox Server");
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.magic-wormhole-mailbox-server = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${python}/bin/twistd --nodaemon wormhole-mailbox";
+        WorkingDirectory = dataDir;
+        StateDirectory = baseNameOf dataDir;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/matterbridge.nix b/nixpkgs/nixos/modules/services/networking/matterbridge.nix
new file mode 100644
index 000000000000..2921074fcd2b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/matterbridge.nix
@@ -0,0 +1,120 @@
+{ options, config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.matterbridge;
+
+  matterbridgeConfToml =
+    if cfg.configPath == null then
+      pkgs.writeText "matterbridge.toml" (cfg.configFile)
+    else
+      cfg.configPath;
+
+in
+
+{
+  options = {
+    services.matterbridge = {
+      enable = mkEnableOption (lib.mdDoc "Matterbridge chat platform bridge");
+
+      configPath = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "/etc/nixos/matterbridge.toml";
+        description = lib.mdDoc ''
+          The path to the matterbridge configuration file.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.str;
+        example = ''
+          # WARNING: as this file contains credentials, do not use this option!
+          # It is kept only for backwards compatibility, and would cause your
+          # credentials to be in the nix-store, thus with the world-readable
+          # permission bits.
+          # Use services.matterbridge.configPath instead.
+
+          [irc]
+              [irc.libera]
+              Server="irc.libera.chat:6667"
+              Nick="matterbot"
+
+          [mattermost]
+              [mattermost.work]
+               # Do not prefix it with http:// or https://
+               Server="yourmattermostserver.domain"
+               Team="yourteam"
+               Login="yourlogin"
+               Password="yourpass"
+               PrefixMessagesWithNick=true
+
+          [[gateway]]
+          name="gateway1"
+          enable=true
+              [[gateway.inout]]
+              account="irc.libera"
+              channel="#testing"
+
+              [[gateway.inout]]
+              account="mattermost.work"
+              channel="off-topic"
+        '';
+        description = lib.mdDoc ''
+          WARNING: THIS IS INSECURE, as your password will end up in
+          {file}`/nix/store`, thus publicly readable. Use
+          `services.matterbridge.configPath` instead.
+
+          The matterbridge configuration file in the TOML file format.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        default = "matterbridge";
+        description = lib.mdDoc ''
+          User which runs the matterbridge service.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "matterbridge";
+        description = lib.mdDoc ''
+          Group which runs the matterbridge service.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = optional options.services.matterbridge.configFile.isDefined
+      "The option services.matterbridge.configFile is insecure and should be replaced with services.matterbridge.configPath";
+
+    users.users = optionalAttrs (cfg.user == "matterbridge")
+      { matterbridge = {
+          group = "matterbridge";
+          isSystemUser = true;
+        };
+      };
+
+    users.groups = optionalAttrs (cfg.group == "matterbridge")
+      { matterbridge = { };
+      };
+
+    systemd.services.matterbridge = {
+      description = "Matterbridge chat platform bridge";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.matterbridge}/bin/matterbridge -conf ${matterbridgeConfToml}";
+        Restart = "always";
+        RestartSec = "10";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/minidlna.nix b/nixpkgs/nixos/modules/services/networking/minidlna.nix
new file mode 100644
index 000000000000..d0de6cd4fdc6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/minidlna.nix
@@ -0,0 +1,148 @@
+# Module for MiniDLNA, a simple DLNA server.
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  cfg = config.services.minidlna;
+  settingsFormat = pkgs.formats.keyValue { listsAsDuplicateKeys = true; };
+  settingsFile = settingsFormat.generate "minidlna.conf" cfg.settings;
+in
+
+{
+  ###### interface
+  options.services.minidlna.enable = mkOption {
+    type = types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Whether to enable MiniDLNA, a simple DLNA server.
+      It serves media files such as video and music to DLNA client devices
+      such as televisions and media players. If you use the firewall, consider
+      adding the following: `services.minidlna.openFirewall = true;`
+    '';
+  };
+
+  options.services.minidlna.openFirewall = mkOption {
+    type = types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Whether to open both HTTP (TCP) and SSDP (UDP) ports in the firewall.
+    '';
+  };
+
+  options.services.minidlna.settings = mkOption {
+    default = {};
+    description = lib.mdDoc ''
+      The contents of MiniDLNA's configuration file.
+      When the service is activated, a basic template is generated from the current options opened here.
+    '';
+    type = types.submodule {
+      freeformType = settingsFormat.type;
+
+      options.media_dir = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "/data/media" "V,/home/alice/video" ];
+        description = lib.mdDoc ''
+          Directories to be scanned for media files.
+          The `A,` `V,` `P,` prefixes restrict a directory to audio, video or image files.
+          The directories must be accessible to the `minidlna` user account.
+        '';
+      };
+      options.notify_interval = mkOption {
+        type = types.int;
+        default = 90000;
+        description = lib.mdDoc ''
+          The interval between announces (in seconds).
+          Instead of waiting for announces, you should set `openFirewall` option to use SSDP discovery.
+          Lower values (e.g. 30 seconds) should be used if your network blocks the discovery unicast.
+          Some relevant information can be found here:
+          https://sourceforge.net/p/minidlna/discussion/879957/thread/1389d197/
+        '';
+      };
+      options.port = mkOption {
+        type = types.port;
+        default = 8200;
+        description = lib.mdDoc "Port number for HTTP traffic (descriptions, SOAP, media transfer).";
+      };
+      options.db_dir = mkOption {
+        type = types.path;
+        default = "/var/cache/minidlna";
+        example = "/tmp/minidlna";
+        description = lib.mdDoc "Specify the directory where you want MiniDLNA to store its database and album art cache.";
+      };
+      options.friendly_name = mkOption {
+        type = types.str;
+        default = config.networking.hostName;
+        defaultText = literalExpression "config.networking.hostName";
+        example = "rpi3";
+        description = lib.mdDoc "Name that the DLNA server presents to clients.";
+      };
+      options.root_container = mkOption {
+        type = types.str;
+        default = "B";
+        example = ".";
+        description = lib.mdDoc "Use a different container as the root of the directory tree presented to clients.";
+      };
+      options.log_level = mkOption {
+        type = types.str;
+        default = "warn";
+        example = "general,artwork,database,inotify,scanner,metadata,http,ssdp,tivo=warn";
+        description = lib.mdDoc "Defines the type of messages that should be logged and down to which level of importance.";
+      };
+      options.inotify = mkOption {
+        type = types.enum [ "yes" "no" ];
+        default = "no";
+        description = lib.mdDoc "Whether to enable inotify monitoring to automatically discover new files.";
+      };
+      options.enable_tivo = mkOption {
+        type = types.enum [ "yes" "no" ];
+        default = "no";
+        description = lib.mdDoc "Support for streaming .jpg and .mp3 files to a TiVo supporting HMO.";
+      };
+      options.wide_links = mkOption {
+        type = types.enum [ "yes" "no" ];
+        default = "no";
+        description = lib.mdDoc "Set this to yes to allow symlinks that point outside user-defined `media_dir`.";
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "minidlna" "config" ] "")
+    (mkRemovedOptionModule [ "services" "minidlna" "extraConfig" ] "")
+    (mkRenamedOptionModule [ "services" "minidlna" "loglevel"] [ "services" "minidlna" "settings" "log_level" ])
+    (mkRenamedOptionModule [ "services" "minidlna" "rootContainer"] [ "services" "minidlna" "settings" "root_container" ])
+    (mkRenamedOptionModule [ "services" "minidlna" "mediaDirs"] [ "services" "minidlna" "settings" "media_dir" ])
+    (mkRenamedOptionModule [ "services" "minidlna" "friendlyName"] [ "services" "minidlna" "settings" "friendly_name" ])
+    (mkRenamedOptionModule [ "services" "minidlna" "announceInterval"] [ "services" "minidlna" "settings" "notify_interval" ])
+  ];
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.settings.port ];
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ 1900 ];
+
+    users.users.minidlna = {
+      description = "MiniDLNA daemon user";
+      group = "minidlna";
+      uid = config.ids.uids.minidlna;
+    };
+
+    users.groups.minidlna.gid = config.ids.gids.minidlna;
+
+    systemd.services.minidlna = {
+      description = "MiniDLNA Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = "minidlna";
+        Group = "minidlna";
+        CacheDirectory = "minidlna";
+        RuntimeDirectory = "minidlna";
+        PIDFile = "/run/minidlna/pid";
+        ExecStart = "${pkgs.minidlna}/sbin/minidlnad -S -P /run/minidlna/pid -f ${settingsFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/miniupnpd.nix b/nixpkgs/nixos/modules/services/networking/miniupnpd.nix
new file mode 100644
index 000000000000..64aacaf35040
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/miniupnpd.nix
@@ -0,0 +1,79 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.miniupnpd;
+  configFile = pkgs.writeText "miniupnpd.conf" ''
+    ext_ifname=${cfg.externalInterface}
+    enable_natpmp=${if cfg.natpmp then "yes" else "no"}
+    enable_upnp=${if cfg.upnp then "yes" else "no"}
+
+    ${concatMapStrings (range: ''
+      listening_ip=${range}
+    '') cfg.internalIPs}
+
+    ${cfg.appendConfig}
+  '';
+in
+{
+  options = {
+    services.miniupnpd = {
+      enable = mkEnableOption (lib.mdDoc "MiniUPnP daemon");
+
+      externalInterface = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Name of the external interface.
+        '';
+      };
+
+      internalIPs = mkOption {
+        type = types.listOf types.str;
+        example = [ "192.168.1.1/24" "enp1s0" ];
+        description = lib.mdDoc ''
+          The IP address ranges to listen on.
+        '';
+      };
+
+      natpmp = mkEnableOption (lib.mdDoc "NAT-PMP support");
+
+      upnp = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable UPNP support.
+        '';
+      };
+
+      appendConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines appended to the MiniUPnP config.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall.extraCommands = ''
+      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_init.sh -i ${cfg.externalInterface}
+    '';
+
+    networking.firewall.extraStopCommands = ''
+      ${pkgs.bash}/bin/bash -x ${pkgs.miniupnpd}/etc/miniupnpd/iptables_removeall.sh -i ${cfg.externalInterface}
+    '';
+
+    systemd.services.miniupnpd = {
+      description = "MiniUPnP daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.miniupnpd}/bin/miniupnpd -f ${configFile}";
+        PIDFile = "/run/miniupnpd.pid";
+        Type = "forking";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/miredo.nix b/nixpkgs/nixos/modules/services/networking/miredo.nix
new file mode 100644
index 000000000000..d15a55b4d7d6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/miredo.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.miredo;
+  pidFile = "/run/miredo.pid";
+  miredoConf = pkgs.writeText "miredo.conf" ''
+    InterfaceName ${cfg.interfaceName}
+    ServerAddress ${cfg.serverAddress}
+    ${optionalString (cfg.bindAddress != null) "BindAddress ${cfg.bindAddress}"}
+    ${optionalString (cfg.bindPort != null) "BindPort ${cfg.bindPort}"}
+  '';
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.miredo = {
+
+      enable = mkEnableOption (lib.mdDoc "the Miredo IPv6 tunneling service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.miredo;
+        defaultText = literalExpression "pkgs.miredo";
+        description = lib.mdDoc ''
+          The package to use for the miredo daemon's binary.
+        '';
+      };
+
+      serverAddress = mkOption {
+        default = "teredo.remlab.net";
+        type = types.str;
+        description = lib.mdDoc ''
+          The hostname or primary IPv4 address of the Teredo server.
+          This setting is required if Miredo runs as a Teredo client.
+          "teredo.remlab.net" is an experimental service for testing only.
+          Please use another server for production and/or large scale deployments.
+        '';
+      };
+
+      interfaceName = mkOption {
+        default = "teredo";
+        type = types.str;
+        description = lib.mdDoc ''
+          Name of the network tunneling interface.
+        '';
+      };
+
+      bindAddress = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Depending on the local firewall/NAT rules, you might need to force
+          Miredo to use a fixed UDP port and or IPv4 address.
+        '';
+      };
+
+      bindPort = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Depending on the local firewall/NAT rules, you might need to force
+          Miredo to use a fixed UDP port and or IPv4 address.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.miredo = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "Teredo IPv6 Tunneling Daemon";
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        ExecStart = "${cfg.package}/bin/miredo -c ${miredoConf} -p ${pidFile} -f";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mjpg-streamer.nix b/nixpkgs/nixos/modules/services/networking/mjpg-streamer.nix
new file mode 100644
index 000000000000..8f8d5f5c4d35
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mjpg-streamer.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.mjpg-streamer;
+
+in {
+
+  options = {
+
+    services.mjpg-streamer = {
+
+      enable = mkEnableOption (lib.mdDoc "mjpg-streamer webcam streamer");
+
+      inputPlugin = mkOption {
+        type = types.str;
+        default = "input_uvc.so";
+        description = lib.mdDoc ''
+          Input plugin. See plugins documentation for more information.
+        '';
+      };
+
+      outputPlugin = mkOption {
+        type = types.str;
+        default = "output_http.so -w @www@ -n -p 5050";
+        description = lib.mdDoc ''
+          Output plugin. `@www@` is substituted for default mjpg-streamer www directory.
+          See plugins documentation for more information.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mjpg-streamer";
+        description = lib.mdDoc "mjpg-streamer user name.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "video";
+        description = lib.mdDoc "mjpg-streamer group name.";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users = optionalAttrs (cfg.user == "mjpg-streamer") {
+      mjpg-streamer = {
+        uid = config.ids.uids.mjpg-streamer;
+        group = cfg.group;
+      };
+    };
+
+    systemd.services.mjpg-streamer = {
+      description = "mjpg-streamer webcam streamer";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+        RestartSec = 1;
+      };
+
+      script = ''
+        IPLUGIN="${cfg.inputPlugin}"
+        OPLUGIN="${cfg.outputPlugin}"
+        OPLUGIN="''${OPLUGIN//@www@/${pkgs.mjpg-streamer}/share/mjpg-streamer/www}"
+        exec ${pkgs.mjpg-streamer}/bin/mjpg_streamer -i "$IPLUGIN" -o "$OPLUGIN"
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mmsd.nix b/nixpkgs/nixos/modules/services/networking/mmsd.nix
new file mode 100644
index 000000000000..7e262a9326c1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mmsd.nix
@@ -0,0 +1,38 @@
+{ pkgs, lib, config, ... }:
+with lib;
+let
+  cfg = config.services.mmsd;
+  dbusServiceFile = pkgs.writeTextDir "share/dbus-1/services/org.ofono.mms.service" ''
+    [D-BUS Service]
+    Name=org.ofono.mms
+    SystemdService=dbus-org.ofono.mms.service
+
+    # Exec= is still required despite SystemdService= being used:
+    # https://github.com/freedesktop/dbus/blob/ef55a3db0d8f17848f8a579092fb05900cc076f5/test/data/systemd-activation/com.example.SystemdActivatable1.service
+    Exec=${pkgs.coreutils}/bin/false mmsd
+  '';
+in
+{
+  options.services.mmsd = {
+    enable = mkEnableOption (mdDoc "Multimedia Messaging Service Daemon");
+    extraArgs = mkOption {
+      type = with types; listOf str;
+      description = mdDoc "Extra arguments passed to `mmsd-tng`";
+      default = [];
+      example = ["--debug"];
+    };
+  };
+  config = mkIf cfg.enable {
+    services.dbus.packages = [ dbusServiceFile ];
+    systemd.user.services.mmsd = {
+      after = [ "ModemManager.service" ];
+      aliases = [ "dbus-org.ofono.mms.service" ];
+      serviceConfig = {
+        Type = "dbus";
+        ExecStart = "${pkgs.mmsd-tng}/bin/mmsdtng " + escapeShellArgs cfg.extraArgs;
+        BusName = "org.ofono.mms";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/monero.nix b/nixpkgs/nixos/modules/services/networking/monero.nix
new file mode 100644
index 000000000000..0de02882acab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/monero.nix
@@ -0,0 +1,244 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg     = config.services.monero;
+
+  listToConf = option: list:
+    concatMapStrings (value: "${option}=${value}\n") list;
+
+  login = (cfg.rpc.user != null && cfg.rpc.password != null);
+
+  configFile = with cfg; pkgs.writeText "monero.conf" ''
+    log-file=/dev/stdout
+    data-dir=${dataDir}
+
+    ${optionalString mining.enable ''
+      start-mining=${mining.address}
+      mining-threads=${toString mining.threads}
+    ''}
+
+    rpc-bind-ip=${rpc.address}
+    rpc-bind-port=${toString rpc.port}
+    ${optionalString login ''
+      rpc-login=${rpc.user}:${rpc.password}
+    ''}
+    ${optionalString rpc.restricted ''
+      restricted-rpc=1
+    ''}
+
+    limit-rate-up=${toString limits.upload}
+    limit-rate-down=${toString limits.download}
+    max-concurrency=${toString limits.threads}
+    block-sync-size=${toString limits.syncSize}
+
+    ${listToConf "add-peer" extraNodes}
+    ${listToConf "add-priority-node" priorityNodes}
+    ${listToConf "add-exclusive-node" exclusiveNodes}
+
+    ${extraConfig}
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.monero = {
+
+      enable = mkEnableOption (lib.mdDoc "Monero node daemon");
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/monero";
+        description = lib.mdDoc ''
+          The directory where Monero stores its data files.
+        '';
+      };
+
+      mining.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to mine monero.
+        '';
+      };
+
+      mining.address = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Monero address where to send mining rewards.
+        '';
+      };
+
+      mining.threads = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = lib.mdDoc ''
+          Number of threads used for mining.
+          Set to `0` to use all available.
+        '';
+      };
+
+      rpc.user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          User name for RPC connections.
+        '';
+      };
+
+      rpc.password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Password for RPC connections.
+        '';
+      };
+
+      rpc.address = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          IP address the RPC server will bind to.
+        '';
+      };
+
+      rpc.port = mkOption {
+        type = types.port;
+        default = 18081;
+        description = lib.mdDoc ''
+          Port the RPC server will bind to.
+        '';
+      };
+
+      rpc.restricted = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to restrict RPC to view only commands.
+        '';
+      };
+
+      limits.upload = mkOption {
+        type = types.addCheck types.int (x: x>=-1);
+        default = -1;
+        description = lib.mdDoc ''
+          Limit of the upload rate in kB/s.
+          Set to `-1` to leave unlimited.
+        '';
+      };
+
+      limits.download = mkOption {
+        type = types.addCheck types.int (x: x>=-1);
+        default = -1;
+        description = lib.mdDoc ''
+          Limit of the download rate in kB/s.
+          Set to `-1` to leave unlimited.
+        '';
+      };
+
+      limits.threads = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = lib.mdDoc ''
+          Maximum number of threads used for a parallel job.
+          Set to `0` to leave unlimited.
+        '';
+      };
+
+      limits.syncSize = mkOption {
+        type = types.addCheck types.int (x: x>=0);
+        default = 0;
+        description = lib.mdDoc ''
+          Maximum number of blocks to sync at once.
+          Set to `0` for adaptive.
+        '';
+      };
+
+      extraNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of additional peer IP addresses to add to the local list.
+        '';
+      };
+
+      priorityNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of peer IP addresses to connect to and
+          attempt to keep the connection open.
+        '';
+      };
+
+      exclusiveNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of peer IP addresses to connect to *only*.
+          If given the other peer options will be ignored.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra lines to be added verbatim to monerod configuration.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.monero = {
+      isSystemUser = true;
+      group = "monero";
+      description = "Monero daemon user";
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    users.groups.monero = { };
+
+    systemd.services.monero = {
+      description = "monero daemon";
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User  = "monero";
+        Group = "monero";
+        ExecStart = "${pkgs.monero-cli}/bin/monerod --config-file=${configFile} --non-interactive";
+        Restart = "always";
+        SuccessExitStatus = [ 0 1 ];
+      };
+    };
+
+    assertions = singleton {
+      assertion = cfg.mining.enable -> cfg.mining.address != "";
+      message   = ''
+       You need a Monero address to receive mining rewards:
+       specify one using option monero.mining.address.
+      '';
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/morty.nix b/nixpkgs/nixos/modules/services/networking/morty.nix
new file mode 100644
index 000000000000..72514764a7c6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/morty.nix
@@ -0,0 +1,98 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.morty;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.morty = {
+
+      enable = mkEnableOption
+        (lib.mdDoc "Morty proxy server. See https://github.com/asciimoo/morty");
+
+      ipv6 = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Allow IPv6 HTTP requests?";
+      };
+
+      key = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          HMAC url validation key (hexadecimal encoded).
+          Leave blank to disable. Without validation key, anyone can
+          submit proxy requests. Leave blank to disable.
+          Generate with `printf %s somevalue | openssl dgst -sha1 -hmac somekey`
+        '';
+      };
+
+      timeout = mkOption {
+        type = types.int;
+        default = 2;
+        description = lib.mdDoc "Request timeout in seconds.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.morty;
+        defaultText = literalExpression "pkgs.morty";
+        description = lib.mdDoc "morty package to use.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3000;
+        description = lib.mdDoc "Listing port";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "The address on which the service listens";
+      };
+
+    };
+
+  };
+
+  ###### Service definition
+
+  config = mkIf config.services.morty.enable {
+
+    users.users.morty =
+      { description = "Morty user";
+        createHome = true;
+        home = "/var/lib/morty";
+        isSystemUser = true;
+        group = "morty";
+      };
+    users.groups.morty = {};
+
+    systemd.services.morty =
+      {
+        description = "Morty sanitizing proxy server.";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "morty";
+          ExecStart = ''${cfg.package}/bin/morty              \
+            -listen ${cfg.listenAddress}:${toString cfg.port} \
+            ${optionalString cfg.ipv6 "-ipv6"}                \
+            ${optionalString (cfg.key != "") "-key " + cfg.key} \
+          '';
+        };
+      };
+    environment.systemPackages = [ cfg.package ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mosquitto.md b/nixpkgs/nixos/modules/services/networking/mosquitto.md
new file mode 100644
index 000000000000..5cdb598151e5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mosquitto.md
@@ -0,0 +1,102 @@
+# Mosquitto {#module-services-mosquitto}
+
+Mosquitto is a MQTT broker often used for IoT or home automation data transport.
+
+## Quickstart {#module-services-mosquitto-quickstart}
+
+A minimal configuration for Mosquitto is
+
+```nix
+services.mosquitto = {
+  enable = true;
+  listeners = [ {
+    acl = [ "pattern readwrite #" ];
+    omitPasswordAuth = true;
+    settings.allow_anonymous = true;
+  } ];
+};
+```
+
+This will start a broker on port 1883, listening on all interfaces of the machine, allowing
+read/write access to all topics to any user without password requirements.
+
+User authentication can be configured with the `users` key of listeners. A config that gives
+full read access to a user `monitor` and restricted write access to a user `service` could look
+like
+
+```nix
+services.mosquitto = {
+  enable = true;
+  listeners = [ {
+    users = {
+      monitor = {
+        acl = [ "read #" ];
+        password = "monitor";
+      };
+      service = {
+        acl = [ "write service/#" ];
+        password = "service";
+      };
+    };
+  } ];
+};
+```
+
+TLS authentication is configured by setting TLS-related options of the listener:
+
+```nix
+services.mosquitto = {
+  enable = true;
+  listeners = [ {
+    port = 8883; # port change is not required, but helpful to avoid mistakes
+    # ...
+    settings = {
+      cafile = "/path/to/mqtt.ca.pem";
+      certfile = "/path/to/mqtt.pem";
+      keyfile = "/path/to/mqtt.key";
+    };
+  } ];
+```
+
+## Configuration {#module-services-mosquitto-config}
+
+The Mosquitto configuration has four distinct types of settings:
+the global settings of the daemon, listeners, plugins, and bridges.
+Bridges and listeners are part of the global configuration, plugins are part of listeners.
+Users of the broker are configured as parts of listeners rather than globally, allowing
+configurations in which a given user is only allowed to log in to the broker using specific
+listeners (eg to configure an admin user with full access to all topics, but restricted to
+localhost).
+
+Almost all options of Mosquitto are available for configuration at their appropriate levels, some
+as NixOS options written in camel case, the remainders under `settings` with their exact names in
+the Mosquitto config file. The exceptions are `acl_file` (which is always set according to the
+`acl` attributes of a listener and its users) and `per_listener_settings` (which is always set to
+`true`).
+
+### Password authentication {#module-services-mosquitto-config-passwords}
+
+Mosquitto can be run in two modes, with a password file or without. Each listener has its own
+password file, and different listeners may use different password files. Password file generation
+can be disabled by setting `omitPasswordAuth = true` for a listener; in this case it is necessary
+to either set `settings.allow_anonymous = true` to allow all logins, or to configure other
+authentication methods like TLS client certificates with `settings.use_identity_as_username = true`.
+
+The default is to generate a password file for each listener from the users configured to that
+listener. Users with no configured password will not be added to the password file and thus
+will not be able to use the broker.
+
+### ACL format {#module-services-mosquitto-config-acl}
+
+Every listener has a Mosquitto `acl_file` attached to it. This ACL is configured via two
+attributes of the config:
+
+  * the `acl` attribute of the listener configures pattern ACL entries and topic ACL entries
+    for anonymous users. Each entry must be prefixed with `pattern` or `topic` to distinguish
+    between these two cases.
+  * the `acl` attribute of every user configures in the listener configured the ACL for that
+    given user. Only topic ACLs are supported by Mosquitto in this setting, so no prefix is
+    required or allowed.
+
+The default ACL for a listener is empty, disallowing all accesses from all clients. To configure
+a completely open ACL, set `acl = [ "pattern readwrite #" ]` in the listener.
diff --git a/nixpkgs/nixos/modules/services/networking/mosquitto.nix b/nixpkgs/nixos/modules/services/networking/mosquitto.nix
new file mode 100644
index 000000000000..c53d86c0babc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mosquitto.nix
@@ -0,0 +1,723 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+let
+  cfg = config.services.mosquitto;
+
+  # note that mosquitto config parsing is very simplistic as of may 2021.
+  # often times they'll e.g. strtok() a line, check the first two tokens, and ignore the rest.
+  # there's no escaping available either, so we have to prevent any being necessary.
+  str = types.strMatching "[^\r\n]*" // {
+    description = "single-line string";
+  };
+  path = types.addCheck types.path (p: str.check "${p}");
+  configKey = types.strMatching "[^\r\n\t ]+";
+  optionType = with types; oneOf [ str path bool int ] // {
+    description = "string, path, bool, or integer";
+  };
+  optionToString = v:
+    if isBool v then boolToString v
+    else if path.check v then "${v}"
+    else toString v;
+
+  assertKeysValid = prefix: valid: config:
+    mapAttrsToList
+      (n: _: {
+        assertion = valid ? ${n};
+        message = "Invalid config key ${prefix}.${n}.";
+      })
+      config;
+
+  formatFreeform = { prefix ? "" }: mapAttrsToList (n: v: "${prefix}${n} ${optionToString v}");
+
+  userOptions = with types; submodule {
+    options = {
+      password = mkOption {
+        type = uniq (nullOr str);
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the (clear text) password for the MQTT User.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = uniq (nullOr path);
+        example = "/path/to/file";
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the path to a file containing the
+          clear text password for the MQTT user.
+          The file is securely passed to mosquitto by
+          leveraging systemd credentials. No special
+          permissions need to be set on this file.
+        '';
+      };
+
+      hashedPassword = mkOption {
+        type = uniq (nullOr str);
+        default = null;
+        description = mdDoc ''
+          Specifies the hashed password for the MQTT User.
+          To generate hashed password install the `mosquitto`
+          package and use `mosquitto_passwd`, then extract
+          the second field (after the `:`) from the generated
+          file.
+        '';
+      };
+
+      hashedPasswordFile = mkOption {
+        type = uniq (nullOr path);
+        example = "/path/to/file";
+        default = null;
+        description = mdDoc ''
+          Specifies the path to a file containing the
+          hashed password for the MQTT user.
+          To generate hashed password install the `mosquitto`
+          package and use `mosquitto_passwd`, then remove the
+          `username:` prefix from the generated file.
+          The file is securely passed to mosquitto by
+          leveraging systemd credentials. No special
+          permissions need to be set on this file.
+        '';
+      };
+
+      acl = mkOption {
+        type = listOf str;
+        example = [ "read A/B" "readwrite A/#" ];
+        default = [];
+        description = lib.mdDoc ''
+          Control client access to topics on the broker.
+        '';
+      };
+    };
+  };
+
+  userAsserts = prefix: users:
+    mapAttrsToList
+      (n: _: {
+        assertion = builtins.match "[^:\r\n]+" n != null;
+        message = "Invalid user name ${n} in ${prefix}";
+      })
+      users
+    ++ mapAttrsToList
+      (n: u: {
+        assertion = count (s: s != null) [
+          u.password u.passwordFile u.hashedPassword u.hashedPasswordFile
+        ] <= 1;
+        message = "Cannot set more than one password option for user ${n} in ${prefix}";
+      }) users;
+
+  listenerScope = index: "listener-${toString index}";
+  userScope = prefix: index: "${prefix}-user-${toString index}";
+  credentialID = prefix: credential: "${prefix}-${credential}";
+
+  toScopedUsers = listenerScope: users: pipe users [
+    attrNames
+    (imap0 (index: user: nameValuePair user
+      (users.${user} // { scope = userScope listenerScope index; })
+    ))
+    listToAttrs
+  ];
+
+  userCredentials = user: credentials: pipe credentials [
+    (filter (credential: user.${credential} != null))
+    (map (credential: "${credentialID user.scope credential}:${user.${credential}}"))
+  ];
+  usersCredentials = listenerScope: users: credentials: pipe users [
+    (toScopedUsers listenerScope)
+    (mapAttrsToList (_: user: userCredentials user credentials))
+    concatLists
+  ];
+  systemdCredentials = listeners: listenerCredentials: pipe listeners [
+    (imap0 (index: listener: listenerCredentials (listenerScope index) listener))
+    concatLists
+  ];
+
+  makePasswordFile = listenerScope: users: path:
+    let
+      makeLines = store: file: let
+        scopedUsers = toScopedUsers listenerScope users;
+      in
+        mapAttrsToList
+          (name: user: ''addLine ${escapeShellArg name} "''$(systemd-creds cat ${credentialID user.scope store})"'')
+          (filterAttrs (_: user: user.${store} != null) scopedUsers)
+        ++ mapAttrsToList
+          (name: user: ''addFile ${escapeShellArg name} "''${CREDENTIALS_DIRECTORY}/${credentialID user.scope file}"'')
+          (filterAttrs (_: user: user.${file} != null) scopedUsers);
+      plainLines = makeLines "password" "passwordFile";
+      hashedLines = makeLines "hashedPassword" "hashedPasswordFile";
+    in
+      pkgs.writeScript "make-mosquitto-passwd"
+        (''
+          #! ${pkgs.runtimeShell}
+
+          set -eu
+
+          file=${escapeShellArg path}
+
+          rm -f "$file"
+          touch "$file"
+
+          addLine() {
+            echo "$1:$2" >> "$file"
+          }
+          addFile() {
+            if [ $(wc -l <"$2") -gt 1 ]; then
+              echo "invalid mosquitto password file $2" >&2
+              return 1
+            fi
+            echo "$1:$(cat "$2")" >> "$file"
+          }
+        ''
+        + concatStringsSep "\n"
+          (plainLines
+           ++ optional (plainLines != []) ''
+             ${cfg.package}/bin/mosquitto_passwd -U "$file"
+           ''
+           ++ hashedLines));
+
+  makeACLFile = idx: users: supplement:
+    pkgs.writeText "mosquitto-acl-${toString idx}.conf"
+      (concatStringsSep
+        "\n"
+        (flatten [
+          supplement
+          (mapAttrsToList
+            (n: u: [ "user ${n}" ] ++ map (t: "topic ${t}") u.acl)
+            users)
+        ]));
+
+  authPluginOptions = with types; submodule {
+    options = {
+      plugin = mkOption {
+        type = path;
+        description = mdDoc ''
+          Plugin path to load, should be a `.so` file.
+        '';
+      };
+
+      denySpecialChars = mkOption {
+        type = bool;
+        description = mdDoc ''
+          Automatically disallow all clients using `#`
+          or `+` in their name/id.
+        '';
+        default = true;
+      };
+
+      options = mkOption {
+        type = attrsOf optionType;
+        description = mdDoc ''
+          Options for the auth plugin. Each key turns into a `auth_opt_*`
+           line in the config.
+        '';
+        default = {};
+      };
+    };
+  };
+
+  authAsserts = prefix: auth:
+    mapAttrsToList
+      (n: _: {
+        assertion = configKey.check n;
+        message = "Invalid auth plugin key ${prefix}.${n}";
+      })
+      auth;
+
+  formatAuthPlugin = plugin:
+    [
+      "auth_plugin ${plugin.plugin}"
+      "auth_plugin_deny_special_chars ${optionToString plugin.denySpecialChars}"
+    ]
+    ++ formatFreeform { prefix = "auth_opt_"; } plugin.options;
+
+  freeformListenerKeys = {
+    allow_anonymous = 1;
+    allow_zero_length_clientid = 1;
+    auto_id_prefix = 1;
+    bind_interface = 1;
+    cafile = 1;
+    capath = 1;
+    certfile = 1;
+    ciphers = 1;
+    "ciphers_tls1.3" = 1;
+    crlfile = 1;
+    dhparamfile = 1;
+    http_dir = 1;
+    keyfile = 1;
+    max_connections = 1;
+    max_qos = 1;
+    max_topic_alias = 1;
+    mount_point = 1;
+    protocol = 1;
+    psk_file = 1;
+    psk_hint = 1;
+    require_certificate = 1;
+    socket_domain = 1;
+    tls_engine = 1;
+    tls_engine_kpass_sha1 = 1;
+    tls_keyform = 1;
+    tls_version = 1;
+    use_identity_as_username = 1;
+    use_subject_as_username = 1;
+    use_username_as_clientid = 1;
+  };
+
+  listenerOptions = with types; submodule {
+    options = {
+      port = mkOption {
+        type = port;
+        description = lib.mdDoc ''
+          Port to listen on. Must be set to 0 to listen on a unix domain socket.
+        '';
+        default = 1883;
+      };
+
+      address = mkOption {
+        type = nullOr str;
+        description = mdDoc ''
+          Address to listen on. Listen on `0.0.0.0`/`::`
+          when unset.
+        '';
+        default = null;
+      };
+
+      authPlugins = mkOption {
+        type = listOf authPluginOptions;
+        description = mdDoc ''
+          Authentication plugin to attach to this listener.
+          Refer to the [mosquitto.conf documentation](https://mosquitto.org/man/mosquitto-conf-5.html)
+          for details on authentication plugins.
+        '';
+        default = [];
+      };
+
+      users = mkOption {
+        type = attrsOf userOptions;
+        example = { john = { password = "123456"; acl = [ "readwrite john/#" ]; }; };
+        description = lib.mdDoc ''
+          A set of users and their passwords and ACLs.
+        '';
+        default = {};
+      };
+
+      omitPasswordAuth = mkOption {
+        type = bool;
+        description = lib.mdDoc ''
+          Omits password checking, allowing anyone to log in with any user name unless
+          other mandatory authentication methods (eg TLS client certificates) are configured.
+        '';
+        default = false;
+      };
+
+      acl = mkOption {
+        type = listOf str;
+        description = lib.mdDoc ''
+          Additional ACL items to prepend to the generated ACL file.
+        '';
+        example = [ "pattern read #" "topic readwrite anon/report/#" ];
+        default = [];
+      };
+
+      settings = mkOption {
+        type = submodule {
+          freeformType = attrsOf optionType;
+        };
+        description = lib.mdDoc ''
+          Additional settings for this listener.
+        '';
+        default = {};
+      };
+    };
+  };
+
+  listenerAsserts = prefix: listener:
+    assertKeysValid "${prefix}.settings" freeformListenerKeys listener.settings
+    ++ userAsserts prefix listener.users
+    ++ imap0
+      (i: v: authAsserts "${prefix}.authPlugins.${toString i}" v)
+      listener.authPlugins;
+
+  formatListener = idx: listener:
+    [
+      "listener ${toString listener.port} ${toString listener.address}"
+      "acl_file ${makeACLFile idx listener.users listener.acl}"
+    ]
+    ++ optional (! listener.omitPasswordAuth) "password_file ${cfg.dataDir}/passwd-${toString idx}"
+    ++ formatFreeform {} listener.settings
+    ++ concatMap formatAuthPlugin listener.authPlugins;
+
+  freeformBridgeKeys = {
+    bridge_alpn = 1;
+    bridge_attempt_unsubscribe = 1;
+    bridge_bind_address = 1;
+    bridge_cafile = 1;
+    bridge_capath = 1;
+    bridge_certfile = 1;
+    bridge_identity = 1;
+    bridge_insecure = 1;
+    bridge_keyfile = 1;
+    bridge_max_packet_size = 1;
+    bridge_outgoing_retain = 1;
+    bridge_protocol_version = 1;
+    bridge_psk = 1;
+    bridge_require_ocsp = 1;
+    bridge_tls_version = 1;
+    cleansession = 1;
+    idle_timeout = 1;
+    keepalive_interval = 1;
+    local_cleansession = 1;
+    local_clientid = 1;
+    local_password = 1;
+    local_username = 1;
+    notification_topic = 1;
+    notifications = 1;
+    notifications_local_only = 1;
+    remote_clientid = 1;
+    remote_password = 1;
+    remote_username = 1;
+    restart_timeout = 1;
+    round_robin = 1;
+    start_type = 1;
+    threshold = 1;
+    try_private = 1;
+  };
+
+  bridgeOptions = with types; submodule {
+    options = {
+      addresses = mkOption {
+        type = listOf (submodule {
+          options = {
+            address = mkOption {
+              type = str;
+              description = lib.mdDoc ''
+                Address of the remote MQTT broker.
+              '';
+            };
+
+            port = mkOption {
+              type = port;
+              description = lib.mdDoc ''
+                Port of the remote MQTT broker.
+              '';
+              default = 1883;
+            };
+          };
+        });
+        default = [];
+        description = lib.mdDoc ''
+          Remote endpoints for the bridge.
+        '';
+      };
+
+      topics = mkOption {
+        type = listOf str;
+        description = lib.mdDoc ''
+          Topic patterns to be shared between the two brokers.
+          Refer to the [
+          mosquitto.conf documentation](https://mosquitto.org/man/mosquitto-conf-5.html) for details on the format.
+        '';
+        default = [];
+        example = [ "# both 2 local/topic/ remote/topic/" ];
+      };
+
+      settings = mkOption {
+        type = submodule {
+          freeformType = attrsOf optionType;
+        };
+        description = lib.mdDoc ''
+          Additional settings for this bridge.
+        '';
+        default = {};
+      };
+    };
+  };
+
+  bridgeAsserts = prefix: bridge:
+    assertKeysValid "${prefix}.settings" freeformBridgeKeys bridge.settings
+    ++ [ {
+      assertion = length bridge.addresses > 0;
+      message = "Bridge ${prefix} needs remote broker addresses";
+    } ];
+
+  formatBridge = name: bridge:
+    [
+      "connection ${name}"
+      "addresses ${concatMapStringsSep " " (a: "${a.address}:${toString a.port}") bridge.addresses}"
+    ]
+    ++ map (t: "topic ${t}") bridge.topics
+    ++ formatFreeform {} bridge.settings;
+
+  freeformGlobalKeys = {
+    allow_duplicate_messages = 1;
+    autosave_interval = 1;
+    autosave_on_changes = 1;
+    check_retain_source = 1;
+    connection_messages = 1;
+    log_facility = 1;
+    log_timestamp = 1;
+    log_timestamp_format = 1;
+    max_inflight_bytes = 1;
+    max_inflight_messages = 1;
+    max_keepalive = 1;
+    max_packet_size = 1;
+    max_queued_bytes = 1;
+    max_queued_messages = 1;
+    memory_limit = 1;
+    message_size_limit = 1;
+    persistence_file = 1;
+    persistence_location = 1;
+    persistent_client_expiration = 1;
+    pid_file = 1;
+    queue_qos0_messages = 1;
+    retain_available = 1;
+    set_tcp_nodelay = 1;
+    sys_interval = 1;
+    upgrade_outgoing_qos = 1;
+    websockets_headers_size = 1;
+    websockets_log_level = 1;
+  };
+
+  globalOptions = with types; {
+    enable = mkEnableOption (lib.mdDoc "the MQTT Mosquitto broker");
+
+    package = mkOption {
+      type = package;
+      default = pkgs.mosquitto;
+      defaultText = literalExpression "pkgs.mosquitto";
+      description = lib.mdDoc ''
+        Mosquitto package to use.
+      '';
+    };
+
+    bridges = mkOption {
+      type = attrsOf bridgeOptions;
+      default = {};
+      description = lib.mdDoc ''
+        Bridges to build to other MQTT brokers.
+      '';
+    };
+
+    listeners = mkOption {
+      type = listOf listenerOptions;
+      default = {};
+      description = lib.mdDoc ''
+        Listeners to configure on this broker.
+      '';
+    };
+
+    includeDirs = mkOption {
+      type = listOf path;
+      description = mdDoc ''
+        Directories to be scanned for further config files to include.
+        Directories will processed in the order given,
+        `*.conf` files in the directory will be
+        read in case-sensitive alphabetical order.
+      '';
+      default = [];
+    };
+
+    logDest = mkOption {
+      type = listOf (either path (enum [ "stdout" "stderr" "syslog" "topic" "dlt" ]));
+      description = lib.mdDoc ''
+        Destinations to send log messages to.
+      '';
+      default = [ "stderr" ];
+    };
+
+    logType = mkOption {
+      type = listOf (enum [ "debug" "error" "warning" "notice" "information"
+                            "subscribe" "unsubscribe" "websockets" "none" "all" ]);
+      description = lib.mdDoc ''
+        Types of messages to log.
+      '';
+      default = [];
+    };
+
+    persistence = mkOption {
+      type = bool;
+      description = lib.mdDoc ''
+        Enable persistent storage of subscriptions and messages.
+      '';
+      default = true;
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/mosquitto";
+      type = types.path;
+      description = lib.mdDoc ''
+        The data directory.
+      '';
+    };
+
+    settings = mkOption {
+      type = submodule {
+        freeformType = attrsOf optionType;
+      };
+      description = lib.mdDoc ''
+        Global configuration options for the mosquitto broker.
+      '';
+      default = {};
+    };
+  };
+
+  globalAsserts = prefix: cfg:
+    flatten [
+      (assertKeysValid "${prefix}.settings" freeformGlobalKeys cfg.settings)
+      (imap0 (n: l: listenerAsserts "${prefix}.listener.${toString n}" l) cfg.listeners)
+      (mapAttrsToList (n: b: bridgeAsserts "${prefix}.bridge.${n}" b) cfg.bridges)
+    ];
+
+  formatGlobal = cfg:
+    [
+      "per_listener_settings true"
+      "persistence ${optionToString cfg.persistence}"
+    ]
+    ++ map
+      (d: if path.check d then "log_dest file ${d}" else "log_dest ${d}")
+      cfg.logDest
+    ++ map (t: "log_type ${t}") cfg.logType
+    ++ formatFreeform {} cfg.settings
+    ++ concatLists (imap0 formatListener cfg.listeners)
+    ++ concatLists (mapAttrsToList formatBridge cfg.bridges)
+    ++ map (d: "include_dir ${d}") cfg.includeDirs;
+
+  configFile = pkgs.writeText "mosquitto.conf"
+    (concatStringsSep "\n" (formatGlobal cfg));
+
+in
+
+{
+
+  ###### Interface
+
+  options.services.mosquitto = globalOptions;
+
+  ###### Implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = globalAsserts "services.mosquitto" cfg;
+
+    systemd.services.mosquitto = {
+      description = "Mosquitto MQTT Broker Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        Type = "notify";
+        NotifyAccess = "main";
+        User = "mosquitto";
+        Group = "mosquitto";
+        RuntimeDirectory = "mosquitto";
+        WorkingDirectory = cfg.dataDir;
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/bin/mosquitto -c ${configFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+
+        # Credentials
+        SetCredential = let
+          listenerCredentials = listenerScope: listener:
+            usersCredentials listenerScope listener.users [ "password" "hashedPassword" ];
+        in
+          systemdCredentials cfg.listeners listenerCredentials;
+
+        LoadCredential = let
+          listenerCredentials = listenerScope: listener:
+            usersCredentials listenerScope listener.users [ "passwordFile" "hashedPasswordFile" ];
+        in
+          systemdCredentials cfg.listeners listenerCredentials;
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        ReadWritePaths = [
+          cfg.dataDir
+          "/tmp"  # mosquitto_passwd creates files in /tmp before moving them
+        ] ++ filter path.check cfg.logDest;
+        ReadOnlyPaths =
+          map (p: "${p}")
+            (cfg.includeDirs
+             ++ filter
+               (v: v != null)
+               (flatten [
+                 (map
+                   (l: [
+                     (l.settings.psk_file or null)
+                     (l.settings.http_dir or null)
+                     (l.settings.cafile or null)
+                     (l.settings.capath or null)
+                     (l.settings.certfile or null)
+                     (l.settings.crlfile or null)
+                     (l.settings.dhparamfile or null)
+                     (l.settings.keyfile or null)
+                   ])
+                   cfg.listeners)
+                 (mapAttrsToList
+                   (_: b: [
+                     (b.settings.bridge_cafile or null)
+                     (b.settings.bridge_capath or null)
+                     (b.settings.bridge_certfile or null)
+                     (b.settings.bridge_keyfile or null)
+                   ])
+                   cfg.bridges)
+               ]));
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_UNIX"
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        UMask = "0077";
+      };
+      preStart =
+        concatStringsSep
+          "\n"
+          (imap0
+            (idx: listener: makePasswordFile (listenerScope idx) listener.users "${cfg.dataDir}/passwd-${toString idx}")
+            cfg.listeners);
+    };
+
+    users.users.mosquitto = {
+      description = "Mosquitto MQTT Broker Daemon owner";
+      group = "mosquitto";
+      uid = config.ids.uids.mosquitto;
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    users.groups.mosquitto.gid = config.ids.gids.mosquitto;
+
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ pennae ];
+    doc = ./mosquitto.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mozillavpn.nix b/nixpkgs/nixos/modules/services/networking/mozillavpn.nix
new file mode 100644
index 000000000000..cf962879b421
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mozillavpn.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+
+{
+  options.services.mozillavpn.enable =
+    lib.mkEnableOption (lib.mdDoc "Mozilla VPN daemon");
+
+  config = lib.mkIf config.services.mozillavpn.enable {
+    environment.systemPackages = [ pkgs.mozillavpn ];
+    services.dbus.packages = [ pkgs.mozillavpn ];
+    systemd.packages = [ pkgs.mozillavpn ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ andersk ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mstpd.nix b/nixpkgs/nixos/modules/services/networking/mstpd.nix
new file mode 100644
index 000000000000..ba82c5ac8232
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mstpd.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.mstpd;
+in
+with lib;
+{
+  options.services.mstpd = {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable the multiple spanning tree protocol daemon.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.mstpd ];
+
+    systemd.services.mstpd = {
+      description = "Multiple Spanning Tree Protocol Daemon";
+      wantedBy = [ "network.target" ];
+      unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "@${pkgs.mstpd}/bin/mstpd mstpd";
+        PIDFile = "/run/mstpd.pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mtprotoproxy.nix b/nixpkgs/nixos/modules/services/networking/mtprotoproxy.nix
new file mode 100644
index 000000000000..3dd197697b23
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mtprotoproxy.nix
@@ -0,0 +1,110 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.mtprotoproxy;
+
+  configOpts = {
+    PORT = cfg.port;
+    USERS = cfg.users;
+    SECURE_ONLY = cfg.secureOnly;
+  } // lib.optionalAttrs (cfg.adTag != null) { AD_TAG = cfg.adTag; }
+    // cfg.extraConfig;
+
+  convertOption = opt:
+    if isString opt || isInt opt then
+      builtins.toJSON opt
+    else if isBool opt then
+      if opt then "True" else "False"
+    else if isList opt then
+      "[" + concatMapStringsSep "," convertOption opt + "]"
+    else if isAttrs opt then
+      "{" + concatStringsSep "," (mapAttrsToList (name: opt: "${builtins.toJSON name}: ${convertOption opt}") opt) + "}"
+    else
+      throw "Invalid option type";
+
+  configFile = pkgs.writeText "config.py" (concatStringsSep "\n" (mapAttrsToList (name: opt: "${name} = ${convertOption opt}") configOpts));
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.mtprotoproxy = {
+
+      enable = mkEnableOption (lib.mdDoc "mtprotoproxy");
+
+      port = mkOption {
+        type = types.port;
+        default = 3256;
+        description = lib.mdDoc ''
+          TCP port to accept mtproto connections on.
+        '';
+      };
+
+      users = mkOption {
+        type = types.attrsOf types.str;
+        example = {
+          tg = "00000000000000000000000000000000";
+          tg2 = "0123456789abcdef0123456789abcdef";
+        };
+        description = lib.mdDoc ''
+          Allowed users and their secrets. A secret is a 32 characters long hex string.
+        '';
+      };
+
+      secureOnly = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Don't allow users to connect in non-secure mode (without random padding).
+        '';
+      };
+
+      adTag = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        # Taken from mtproxyproto's repo.
+        example = "3c09c680b76ee91a4c25ad51f742267d";
+        description = lib.mdDoc ''
+          Tag for advertising that can be obtained from @MTProxybot.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        example = {
+          STATS_PRINT_PERIOD = 600;
+        };
+        description = lib.mdDoc ''
+          Extra configuration options for mtprotoproxy.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.mtprotoproxy = {
+      description = "MTProto Proxy Daemon";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.mtprotoproxy}/bin/mtprotoproxy ${configFile}";
+        DynamicUser = true;
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mtr-exporter.nix b/nixpkgs/nixos/modules/services/networking/mtr-exporter.nix
new file mode 100644
index 000000000000..af694c3e736b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mtr-exporter.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    maintainers types literalExpression
+    escapeShellArg escapeShellArgs
+    mkEnableOption mkOption mkRemovedOptionModule mkIf mdDoc
+    optionalString concatMapStrings concatStringsSep;
+
+  cfg = config.services.mtr-exporter;
+
+  jobsConfig = pkgs.writeText "mtr-exporter.conf" (concatMapStrings (job: ''
+    ${job.name} -- ${job.schedule} -- ${concatStringsSep " " job.flags} ${job.address}
+  '') cfg.jobs);
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "mtr-exporter" "target" ] "Use services.mtr-exporter.jobs instead.")
+    (mkRemovedOptionModule [ "services" "mtr-exporter" "mtrFlags" ] "Use services.mtr-exporter.jobs.<job>.flags instead.")
+  ];
+
+  options = {
+    services = {
+      mtr-exporter = {
+        enable = mkEnableOption (mdDoc "a Prometheus exporter for MTR");
+
+        address = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Listen address for MTR exporter.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8080;
+          description = mdDoc "Listen port for MTR exporter.";
+        };
+
+        extraFlags = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = ["-flag.deprecatedMetrics"];
+          description = mdDoc ''
+            Extra command line options to pass to MTR exporter.
+          '';
+        };
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.mtr-exporter;
+          defaultText = literalExpression "pkgs.mtr-exporter";
+          description = mdDoc "The MTR exporter package to use.";
+        };
+
+        mtrPackage = mkOption {
+          type = types.package;
+          default = pkgs.mtr;
+          defaultText = literalExpression "pkgs.mtr";
+          description = mdDoc "The MTR package to use.";
+        };
+
+        jobs = mkOption {
+          description = mdDoc "List of MTR jobs. Will be added to /etc/mtr-exporter.conf";
+          type = types.nonEmptyListOf (types.submodule {
+            options = {
+              name = mkOption {
+                type = types.str;
+                description = mdDoc "Name of ICMP pinging job.";
+              };
+
+              address = mkOption {
+                type = types.str;
+                example = "host.example.org:1234";
+                description = mdDoc "Target address for MTR client.";
+              };
+
+              schedule = mkOption {
+                type = types.str;
+                default = "@every 60s";
+                example = "@hourly";
+                description = mdDoc "Schedule of MTR checks. Also accepts Cron format.";
+              };
+
+              flags = mkOption {
+                type = with types; listOf str;
+                default = [];
+                example = ["-G1"];
+                description = mdDoc "Additional flags to pass to MTR.";
+              };
+            };
+          });
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."mtr-exporter.conf" = {
+      source = jobsConfig;
+    };
+
+    systemd.services.mtr-exporter = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "network.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/mtr-exporter \
+            -mtr '${cfg.mtrPackage}/bin/mtr' \
+            -bind ${escapeShellArg "${cfg.address}:${toString cfg.port}"} \
+            -jobs '${jobsConfig}' \
+            ${escapeShellArgs cfg.extraFlags}
+        '';
+        Restart = "on-failure";
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        ProcSubset = "pid";
+        PrivateDevices = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ jakubgs ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mullvad-vpn.nix b/nixpkgs/nixos/modules/services/networking/mullvad-vpn.nix
new file mode 100644
index 000000000000..8c7d5237971f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mullvad-vpn.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.mullvad-vpn;
+in
+with lib;
+{
+  options.services.mullvad-vpn = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        This option enables Mullvad VPN daemon.
+        This sets {option}`networking.firewall.checkReversePath` to "loose", which might be undesirable for security.
+      '';
+    };
+
+    enableExcludeWrapper = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        This option activates the wrapper that allows the use of mullvad-exclude.
+        Might have minor security impact, so consider disabling if you do not use the feature.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.mullvad;
+      defaultText = literalExpression "pkgs.mullvad";
+      description = lib.mdDoc ''
+        The Mullvad package to use. `pkgs.mullvad` only provides the CLI tool, `pkgs.mullvad-vpn` provides both the CLI and the GUI.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "tun" ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    # mullvad-daemon writes to /etc/iproute2/rt_tables
+    networking.iproute2.enable = true;
+
+    # See https://github.com/NixOS/nixpkgs/issues/113589
+    networking.firewall.checkReversePath = "loose";
+
+    # See https://github.com/NixOS/nixpkgs/issues/176603
+    security.wrappers.mullvad-exclude = mkIf cfg.enableExcludeWrapper {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${cfg.package}/bin/mullvad-exclude";
+    };
+
+    systemd.services.mullvad-daemon = {
+      description = "Mullvad VPN daemon";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network.target" ];
+      after = [
+        "network-online.target"
+        "NetworkManager.service"
+        "systemd-resolved.service"
+      ];
+      path = [
+        pkgs.iproute2
+        # Needed for ping
+        "/run/wrappers"
+        # See https://github.com/NixOS/nixpkgs/issues/262681
+      ] ++ (lib.optional config.networking.resolvconf.enable
+        config.networking.resolvconf.package);
+      startLimitBurst = 5;
+      startLimitIntervalSec = 20;
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/mullvad-daemon -v --disable-stdout-timestamps";
+        Restart = "always";
+        RestartSec = 1;
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ arcuru ymarkus ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/multipath.nix b/nixpkgs/nixos/modules/services/networking/multipath.nix
new file mode 100644
index 000000000000..9099cbe0cd32
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/multipath.nix
@@ -0,0 +1,557 @@
+{ config, lib, pkgs, ... }: with lib;
+
+# See http://christophe.varoqui.free.fr/usage.html and
+# https://github.com/opensvc/multipath-tools/blob/master/multipath/multipath.conf.5
+
+let
+  cfg = config.services.multipath;
+
+  indentLines = n: str: concatStringsSep "\n" (
+    map (line: "${fixedWidthString n " " " "}${line}") (
+      filter ( x: x != "" ) ( splitString "\n" str )
+    )
+  );
+
+  addCheckDesc = desc: elemType: check: types.addCheck elemType check
+    // { description = "${elemType.description} (with check: ${desc})"; };
+  hexChars = stringToCharacters "0123456789abcdef";
+  isHexString = s: all (c: elem c hexChars) (stringToCharacters (toLower s));
+  hexStr = addCheckDesc "hexadecimal string" types.str isHexString;
+
+in {
+
+  options.services.multipath = with types; {
+
+    enable = mkEnableOption (lib.mdDoc "the device mapper multipath (DM-MP) daemon");
+
+    package = mkOption {
+      type = package;
+      description = lib.mdDoc "multipath-tools package to use";
+      default = pkgs.multipath-tools;
+      defaultText = lib.literalExpression "pkgs.multipath-tools";
+    };
+
+    devices = mkOption {
+      default = [ ];
+      example = literalExpression ''
+        [
+          {
+            vendor = "\"COMPELNT\"";
+            product = "\"Compellent Vol\"";
+            path_checker = "tur";
+            no_path_retry = "queue";
+            max_sectors_kb = 256;
+          }, ...
+        ]
+      '';
+      description = lib.mdDoc ''
+        This option allows you to define arrays for use in multipath
+        groups.
+      '';
+      type = listOf (submodule {
+        options = {
+
+          vendor = mkOption {
+            type = str;
+            example = "COMPELNT";
+            description = lib.mdDoc "Regular expression to match the vendor name";
+          };
+
+          product = mkOption {
+            type = str;
+            example = "Compellent Vol";
+            description = lib.mdDoc "Regular expression to match the product name";
+          };
+
+          revision = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Regular expression to match the product revision";
+          };
+
+          product_blacklist = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Products with the given vendor matching this string are blacklisted";
+          };
+
+          alias_prefix = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "The user_friendly_names prefix to use for this device type, instead of the default mpath";
+          };
+
+          vpd_vendor = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "The vendor specific vpd page information, using the vpd page abbreviation";
+          };
+
+          hardware_handler = mkOption {
+            type = nullOr (enum [ "emc" "rdac" "hp_sw" "alua" "ana" ]);
+            default = null;
+            description = lib.mdDoc "The hardware handler to use for this device type";
+          };
+
+          # Optional arguments
+          path_grouping_policy = mkOption {
+            type = nullOr (enum [ "failover" "multibus" "group_by_serial" "group_by_prio" "group_by_node_name" ]);
+            default = null; # real default: "failover"
+            description = lib.mdDoc "The default path grouping policy to apply to unspecified multipaths";
+          };
+
+          uid_attribute = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "The udev attribute providing a unique path identifier (WWID)";
+          };
+
+          getuid_callout = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              (Superseded by uid_attribute) The default program and args to callout
+              to obtain a unique path identifier. Should be specified with an absolute path.
+            '';
+          };
+
+          path_selector = mkOption {
+            type = nullOr (enum [
+              ''"round-robin 0"''
+              ''"queue-length 0"''
+              ''"service-time 0"''
+              ''"historical-service-time 0"''
+            ]);
+            default = null; # real default: "service-time 0"
+            description = lib.mdDoc "The default path selector algorithm to use; they are offered by the kernel multipath target";
+          };
+
+          path_checker = mkOption {
+            type = enum [ "readsector0" "tur" "emc_clariion" "hp_sw" "rdac" "directio" "cciss_tur" "none" ];
+            default = "tur";
+            description = lib.mdDoc "The default method used to determine the paths state";
+          };
+
+          prio = mkOption {
+            type = nullOr (enum [
+              "none" "const" "sysfs" "emc" "alua" "ontap" "rdac" "hp_sw" "hds"
+              "random" "weightedpath" "path_latency" "ana" "datacore" "iet"
+            ]);
+            default = null; # real default: "const"
+            description = lib.mdDoc "The name of the path priority routine";
+          };
+
+          prio_args = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Arguments to pass to to the prio function";
+          };
+
+          features = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Specify any device-mapper features to be used";
+          };
+
+          failback = mkOption {
+            type = nullOr str;
+            default = null; # real default: "manual"
+            description = lib.mdDoc "Tell multipathd how to manage path group failback. Quote integers as strings";
+          };
+
+          rr_weight = mkOption {
+            type = nullOr (enum [ "priorities" "uniform" ]);
+            default = null; # real default: "uniform"
+            description = lib.mdDoc ''
+              If set to priorities the multipath configurator will assign path weights
+              as "path prio * rr_min_io".
+            '';
+          };
+
+          no_path_retry = mkOption {
+            type = nullOr str;
+            default = null; # real default: "fail"
+            description = lib.mdDoc "Specify what to do when all paths are down. Quote integers as strings";
+          };
+
+          rr_min_io = mkOption {
+            type = nullOr int;
+            default = null; # real default: 1000
+            description = lib.mdDoc ''
+              Number of I/O requests to route to a path before switching to the next in the
+              same path group. This is only for Block I/O (BIO) based multipath and
+              only apply to round-robin path_selector.
+            '';
+          };
+
+          rr_min_io_rq = mkOption {
+            type = nullOr int;
+            default = null; # real default: 1
+            description = lib.mdDoc ''
+              Number of I/O requests to route to a path before switching to the next in the
+              same path group. This is only for Request based multipath and
+              only apply to round-robin path_selector.
+            '';
+          };
+
+          fast_io_fail_tmo = mkOption {
+            type = nullOr str;
+            default = null; # real default: 5
+            description = lib.mdDoc ''
+              Specify the number of seconds the SCSI layer will wait after a problem has been
+              detected on a FC remote port before failing I/O to devices on that remote port.
+              This should be smaller than dev_loss_tmo. Setting this to "off" will disable
+              the timeout. Quote integers as strings.
+            '';
+          };
+
+          dev_loss_tmo = mkOption {
+            type = nullOr str;
+            default = null; # real default: 600
+            description = lib.mdDoc ''
+              Specify the number of seconds the SCSI layer will wait after a problem has
+              been detected on a FC remote port before removing it from the system. This
+              can be set to "infinity" which sets it to the max value of 2147483647
+              seconds, or 68 years. It will be automatically adjusted to the overall
+              retry interval no_path_retry * polling_interval
+              if a number of retries is given with no_path_retry and the
+              overall retry interval is longer than the specified dev_loss_tmo value.
+              The Linux kernel will cap this value to 600 if fast_io_fail_tmo
+              is not set.
+            '';
+          };
+
+          flush_on_last_del = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "no"
+            description = lib.mdDoc ''
+              If set to "yes" multipathd will disable queueing when the last path to a
+              device has been deleted.
+            '';
+          };
+
+          user_friendly_names = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "no"
+            description = lib.mdDoc ''
+              If set to "yes", using the bindings file /etc/multipath/bindings
+              to assign a persistent and unique alias to the multipath, in the
+              form of mpath. If set to "no" use the WWID as the alias. In either
+              case this be will be overridden by any specific aliases in the
+              multipaths section.
+            '';
+          };
+
+          detect_prio = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "yes"
+            description = lib.mdDoc ''
+              If set to "yes", multipath will try to detect if the device supports
+              SCSI-3 ALUA. If so, the device will automatically use the sysfs
+              prioritizer if the required sysf attributes access_state and
+              preferred_path are supported, or the alua prioritizer if not. If set
+              to "no", the prioritizer will be selected as usual.
+            '';
+          };
+
+          detect_checker = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "yes"
+            description = lib.mdDoc ''
+              If set to "yes", multipath will try to detect if the device supports
+              SCSI-3 ALUA. If so, the device will automatically use the tur checker.
+              If set to "no", the checker will be selected as usual.
+            '';
+          };
+
+          deferred_remove = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "no"
+            description = lib.mdDoc ''
+              If set to "yes", multipathd will do a deferred remove instead of a
+              regular remove when the last path device has been deleted. This means
+              that if the multipath device is still in use, it will be freed when
+              the last user closes it. If path is added to the multipath device
+              before the last user closes it, the deferred remove will be canceled.
+            '';
+          };
+
+          san_path_err_threshold = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              If set to a value greater than 0, multipathd will watch paths and check
+              how many times a path has been failed due to errors.If the number of
+              failures on a particular path is greater then the san_path_err_threshold,
+              then the path will not reinstate till san_path_err_recovery_time. These
+              path failures should occur within a san_path_err_forget_rate checks, if
+              not we will consider the path is good enough to reinstantate.
+            '';
+          };
+
+          san_path_err_forget_rate = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              If set to a value greater than 0, multipathd will check whether the path
+              failures has exceeded the san_path_err_threshold within this many checks
+              i.e san_path_err_forget_rate. If so we will not reinstante the path till
+              san_path_err_recovery_time.
+            '';
+          };
+
+          san_path_err_recovery_time = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              If set to a value greater than 0, multipathd will make sure that when
+              path failures has exceeded the san_path_err_threshold within
+              san_path_err_forget_rate then the path will be placed in failed state
+              for san_path_err_recovery_time duration. Once san_path_err_recovery_time
+              has timeout we will reinstante the failed path. san_path_err_recovery_time
+              value should be in secs.
+            '';
+          };
+
+          marginal_path_err_sample_time = mkOption {
+            type = nullOr int;
+            default = null;
+            description = lib.mdDoc "One of the four parameters of supporting path check based on accounting IO error such as intermittent error";
+          };
+
+          marginal_path_err_rate_threshold = mkOption {
+            type = nullOr int;
+            default = null;
+            description = lib.mdDoc "The error rate threshold as a permillage (1/1000)";
+          };
+
+          marginal_path_err_recheck_gap_time = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "One of the four parameters of supporting path check based on accounting IO error such as intermittent error";
+          };
+
+          marginal_path_double_failed_time = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "One of the four parameters of supporting path check based on accounting IO error such as intermittent error";
+          };
+
+          delay_watch_checks = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "This option is deprecated, and mapped to san_path_err_forget_rate";
+          };
+
+          delay_wait_checks = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "This option is deprecated, and mapped to san_path_err_recovery_time";
+          };
+
+          skip_kpartx = mkOption {
+            type = nullOr (enum [ "yes" "no" ]);
+            default = null; # real default: "no"
+            description = lib.mdDoc "If set to yes, kpartx will not automatically create partitions on the device";
+          };
+
+          max_sectors_kb = mkOption {
+            type = nullOr int;
+            default = null;
+            description = lib.mdDoc "Sets the max_sectors_kb device parameter on all path devices and the multipath device to the specified value";
+          };
+
+          ghost_delay = mkOption {
+            type = nullOr int;
+            default = null;
+            description = lib.mdDoc "Sets the number of seconds that multipath will wait after creating a device with only ghost paths before marking it ready for use in systemd";
+          };
+
+          all_tg_pt = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Set the 'all targets ports' flag when registering keys with mpathpersist";
+          };
+
+        };
+      });
+    };
+
+    defaults = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        This section defines default values for attributes which are used
+        whenever no values are given in the appropriate device or multipath
+        sections.
+      '';
+    };
+
+    blacklist = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        This section defines which devices should be excluded from the
+        multipath topology discovery.
+      '';
+    };
+
+    blacklist_exceptions = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        This section defines which devices should be included in the
+        multipath topology discovery, despite being listed in the
+        blacklist section.
+      '';
+    };
+
+    overrides = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        This section defines values for attributes that should override the
+        device-specific settings for all devices.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc "Lines to append to default multipath.conf";
+    };
+
+    extraConfigFile = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc "Append an additional file's contents to /etc/multipath.conf";
+    };
+
+    pathGroups = mkOption {
+      example = literalExpression ''
+        [
+          {
+            wwid = "360080e500043b35c0123456789abcdef";
+            alias = 10001234;
+            array = "bigarray.example.com";
+            fsType = "zfs"; # optional
+            options = "ro"; # optional
+          }, ...
+        ]
+      '';
+      description = lib.mdDoc ''
+        This option allows you to define multipath groups as described
+        in http://christophe.varoqui.free.fr/usage.html.
+      '';
+      type = listOf (submodule {
+        options = {
+
+          alias = mkOption {
+            type = int;
+            example = 1001234;
+            description = lib.mdDoc "The name of the multipath device";
+          };
+
+          wwid = mkOption {
+            type = hexStr;
+            example = "360080e500043b35c0123456789abcdef";
+            description = lib.mdDoc "The identifier for the multipath device";
+          };
+
+          array = mkOption {
+            type = str;
+            default = null;
+            example = "bigarray.example.com";
+            description = lib.mdDoc "The DNS name of the storage array";
+          };
+
+          fsType = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "zfs";
+            description = lib.mdDoc "Type of the filesystem";
+          };
+
+          options = mkOption {
+            type = nullOr str;
+            default = null;
+            example = "ro";
+            description = lib.mdDoc "Options used to mount the file system";
+          };
+
+        };
+      });
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."multipath.conf".text =
+      let
+        inherit (cfg) defaults blacklist blacklist_exceptions overrides;
+
+        mkDeviceBlock = cfg: let
+          nonNullCfg = lib.filterAttrs (k: v: v != null) cfg;
+          attrs = lib.mapAttrsToList (name: value: "  ${name} ${toString value}") nonNullCfg;
+        in ''
+          device {
+          ${lib.concatStringsSep "\n" attrs}
+          }
+        '';
+        devices = lib.concatMapStringsSep "\n" mkDeviceBlock cfg.devices;
+
+        mkMultipathBlock = m: ''
+          multipath {
+            wwid ${m.wwid}
+            alias ${toString m.alias}
+          }
+        '';
+        multipaths = lib.concatMapStringsSep "\n" mkMultipathBlock cfg.pathGroups;
+
+      in ''
+        devices {
+        ${indentLines 2 devices}
+        }
+
+        ${optionalString (defaults != null) ''
+          defaults {
+          ${indentLines 2 defaults}
+          }
+        ''}
+        ${optionalString (blacklist != null) ''
+          blacklist {
+          ${indentLines 2 blacklist}
+          }
+        ''}
+        ${optionalString (blacklist_exceptions != null) ''
+          blacklist_exceptions {
+          ${indentLines 2 blacklist_exceptions}
+          }
+        ''}
+        ${optionalString (overrides != null) ''
+          overrides {
+          ${indentLines 2 overrides}
+          }
+        ''}
+        multipaths {
+        ${indentLines 2 multipaths}
+        }
+      '';
+
+    systemd.packages = [ cfg.package ];
+
+    environment.systemPackages = [ cfg.package ];
+    boot.kernelModules = [ "dm-multipath" "dm-service-time" ];
+
+    # We do not have systemd in stage-1 boot so must invoke `multipathd`
+    # with the `-1` argument which disables systemd calls. Invoke `multipath`
+    # to display the multipath mappings in the output of `journalctl -b`.
+    # TODO: Implement for systemd stage 1
+    boot.initrd.kernelModules = [ "dm-multipath" "dm-service-time" ];
+    boot.initrd.postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      modprobe -a dm-multipath dm-service-time
+      multipathd -s
+      (set -x && sleep 1 && multipath -ll)
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/murmur.nix b/nixpkgs/nixos/modules/services/networking/murmur.nix
new file mode 100644
index 000000000000..20c2eff11e62
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/murmur.nix
@@ -0,0 +1,391 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.murmur;
+  forking = cfg.logFile != null;
+  configFile = pkgs.writeText "murmurd.ini" ''
+    database=/var/lib/murmur/murmur.sqlite
+    dbDriver=QSQLITE
+
+    autobanAttempts=${toString cfg.autobanAttempts}
+    autobanTimeframe=${toString cfg.autobanTimeframe}
+    autobanTime=${toString cfg.autobanTime}
+
+    logfile=${optionalString (cfg.logFile != null) cfg.logFile}
+    ${optionalString forking "pidfile=/run/murmur/murmurd.pid"}
+
+    welcometext="${cfg.welcometext}"
+    port=${toString cfg.port}
+
+    ${optionalString (cfg.hostName != "") "host=${cfg.hostName}"}
+    ${optionalString (cfg.password != "") "serverpassword=${cfg.password}"}
+
+    bandwidth=${toString cfg.bandwidth}
+    users=${toString cfg.users}
+
+    textmessagelength=${toString cfg.textMsgLength}
+    imagemessagelength=${toString cfg.imgMsgLength}
+    allowhtml=${boolToString cfg.allowHtml}
+    logdays=${toString cfg.logDays}
+    bonjour=${boolToString cfg.bonjour}
+    sendversion=${boolToString cfg.sendVersion}
+
+    ${optionalString (cfg.registerName != "") "registerName=${cfg.registerName}"}
+    ${optionalString (cfg.registerPassword == "") "registerPassword=${cfg.registerPassword}"}
+    ${optionalString (cfg.registerUrl != "") "registerUrl=${cfg.registerUrl}"}
+    ${optionalString (cfg.registerHostname != "") "registerHostname=${cfg.registerHostname}"}
+
+    certrequired=${boolToString cfg.clientCertRequired}
+    ${optionalString (cfg.sslCert != "") "sslCert=${cfg.sslCert}"}
+    ${optionalString (cfg.sslKey != "") "sslKey=${cfg.sslKey}"}
+    ${optionalString (cfg.sslCa != "") "sslCA=${cfg.sslCa}"}
+
+    ${optionalString (cfg.dbus != null) "dbus=${cfg.dbus}"}
+
+    ${cfg.extraConfig}
+  '';
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "murmur" "welcome" ] [ "services" "murmur" "welcometext" ])
+    (mkRemovedOptionModule [ "services" "murmur" "pidfile" ] "Hardcoded to /run/murmur/murmurd.pid now")
+  ];
+
+  options = {
+    services.murmur = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If enabled, start the Murmur Mumble server.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the Murmur Mumble server.
+        '';
+      };
+
+      autobanAttempts = mkOption {
+        type = types.int;
+        default = 10;
+        description = lib.mdDoc ''
+          Number of attempts a client is allowed to make in
+          `autobanTimeframe` seconds, before being
+          banned for `autobanTime`.
+        '';
+      };
+
+      autobanTimeframe = mkOption {
+        type = types.int;
+        default = 120;
+        description = lib.mdDoc ''
+          Timeframe in which a client can connect without being banned
+          for repeated attempts (in seconds).
+        '';
+      };
+
+      autobanTime = mkOption {
+        type = types.int;
+        default = 300;
+        description = lib.mdDoc "The amount of time an IP ban lasts (in seconds).";
+      };
+
+      logFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/log/murmur/murmurd.log";
+        description = lib.mdDoc "Path to the log file for Murmur daemon. Empty means log to journald.";
+      };
+
+      welcometext = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Welcome message for connected clients.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 64738;
+        description = lib.mdDoc "Ports to bind to (UDP and TCP).";
+      };
+
+      hostName = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Host to bind to. Defaults binding on all addresses.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.murmur;
+        defaultText = literalExpression "pkgs.murmur";
+        description = lib.mdDoc "Overridable attribute of the murmur package to use.";
+      };
+
+      password = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Required password to join server, if specified.";
+      };
+
+      bandwidth = mkOption {
+        type = types.int;
+        default = 72000;
+        description = lib.mdDoc ''
+          Maximum bandwidth (in bits per second) that clients may send
+          speech at.
+        '';
+      };
+
+      users = mkOption {
+        type = types.int;
+        default = 100;
+        description = lib.mdDoc "Maximum number of concurrent clients allowed.";
+      };
+
+      textMsgLength = mkOption {
+        type = types.int;
+        default = 5000;
+        description = lib.mdDoc "Max length of text messages. Set 0 for no limit.";
+      };
+
+      imgMsgLength = mkOption {
+        type = types.int;
+        default = 131072;
+        description = lib.mdDoc "Max length of image messages. Set 0 for no limit.";
+      };
+
+      allowHtml = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Allow HTML in client messages, comments, and channel
+          descriptions.
+        '';
+      };
+
+      logDays = mkOption {
+        type = types.int;
+        default = 31;
+        description = lib.mdDoc ''
+          How long to store RPC logs for in the database. Set 0 to
+          keep logs forever, or -1 to disable DB logging.
+        '';
+      };
+
+      bonjour = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Bonjour auto-discovery, which allows clients over
+          your LAN to automatically discover Murmur servers.
+        '';
+      };
+
+      sendVersion = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Send Murmur version in UDP response.";
+      };
+
+      registerName = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Public server registration name, and also the name of the
+          Root channel. Even if you don't publicly register your
+          server, you probably still want to set this.
+        '';
+      };
+
+      registerPassword = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Public server registry password, used authenticate your
+          server to the registry to prevent impersonation; required for
+          subsequent registry updates.
+        '';
+      };
+
+      registerUrl = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "URL website for your server.";
+      };
+
+      registerHostname = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          DNS hostname where your server can be reached. This is only
+          needed if you want your server to be accessed by its
+          hostname and not IP - but the name *must* resolve on the
+          internet properly.
+        '';
+      };
+
+      clientCertRequired = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Require clients to authenticate via certificates.";
+      };
+
+      sslCert = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Path to your SSL certificate.";
+      };
+
+      sslKey = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Path to your SSL key.";
+      };
+
+      sslCa = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Path to your SSL CA certificate.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra configuration to put into murmur.ini.";
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/murmur/murmurd.env";
+        description = lib.mdDoc ''
+          Environment file as defined in {manpage}`systemd.exec(5)`.
+
+          Secrets may be passed to the service without adding them to the world-readable
+          Nix store, by specifying placeholder variables as the option value in Nix and
+          setting these variables accordingly in the environment file.
+
+          ```
+            # snippet of murmur-related config
+            services.murmur.password = "$MURMURD_PASSWORD";
+          ```
+
+          ```
+            # content of the environment file
+            MURMURD_PASSWORD=verysecretpassword
+          ```
+
+          Note that this file needs to be available on the host on which
+          `murmur` is running.
+        '';
+      };
+
+      dbus = mkOption {
+        type = types.enum [ null "session" "system" ];
+        default = null;
+        description = lib.mdDoc "Enable D-Bus remote control. Set to the bus you want Murmur to connect to.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.murmur = {
+      description     = "Murmur Service user";
+      home            = "/var/lib/murmur";
+      createHome      = true;
+      uid             = config.ids.uids.murmur;
+      group           = "murmur";
+    };
+    users.groups.murmur = {
+      gid             = config.ids.gids.murmur;
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+      allowedUDPPorts = [ cfg.port ];
+    };
+
+    systemd.services.murmur = {
+      description = "Murmur Chat Service";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+      preStart    = ''
+        ${pkgs.envsubst}/bin/envsubst \
+          -o /run/murmur/murmurd.ini \
+          -i ${configFile}
+      '';
+
+      serviceConfig = {
+        # murmurd doesn't fork when logging to the console.
+        Type = if forking then "forking" else "simple";
+        PIDFile = mkIf forking "/run/murmur/murmurd.pid";
+        EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        ExecStart = "${cfg.package}/bin/mumble-server -ini /run/murmur/murmurd.ini";
+        Restart = "always";
+        RuntimeDirectory = "murmur";
+        RuntimeDirectoryMode = "0700";
+        User = "murmur";
+        Group = "murmur";
+      };
+    };
+
+    # currently not included in upstream package, addition requested at
+    # https://github.com/mumble-voip/mumble/issues/6078
+    services.dbus.packages = mkIf (cfg.dbus == "system") [(pkgs.writeTextFile {
+      name = "murmur-dbus-policy";
+      text = ''
+        <!DOCTYPE busconfig PUBLIC
+          "-//freedesktop//DTD D-BUS Bus Configuration 1.0//EN"
+          "http://www.freedesktop.org/standards/dbus/1.0/busconfig.dtd">
+        <busconfig>
+          <policy user="murmur">
+            <allow own="net.sourceforge.mumble.murmur"/>
+          </policy>
+
+          <policy context="default">
+            <allow send_destination="net.sourceforge.mumble.murmur"/>
+            <allow receive_sender="net.sourceforge.mumble.murmur"/>
+          </policy>
+        </busconfig>
+      '';
+      destination = "/share/dbus-1/system.d/murmur.conf";
+    })];
+
+    security.apparmor.policies."bin.mumble-server".profile = ''
+      include <tunables/global>
+
+      ${cfg.package}/bin/{mumble-server,.mumble-server-wrapped} {
+        include <abstractions/base>
+        include <abstractions/nameservice>
+        include <abstractions/ssl_certs>
+        include "${pkgs.apparmorRulesFromClosure { name = "mumble-server"; } cfg.package}"
+        pix ${cfg.package}/bin/.mumble-server-wrapped,
+
+        r ${config.environment.etc."os-release".source},
+        r ${config.environment.etc."lsb-release".source},
+        owner rwk /var/lib/murmur/murmur.sqlite,
+        owner rw /var/lib/murmur/murmur.sqlite-journal,
+        owner r /var/lib/murmur/,
+        r /run/murmur/murmurd.pid,
+        r /run/murmur/murmurd.ini,
+        r ${configFile},
+      '' + optionalString (cfg.logFile != null) ''
+        rw ${cfg.logFile},
+      '' + optionalString (cfg.sslCert != "") ''
+        r ${cfg.sslCert},
+      '' + optionalString (cfg.sslKey != "") ''
+        r ${cfg.sslKey},
+      '' + optionalString (cfg.sslCa != "") ''
+        r ${cfg.sslCa},
+      '' + optionalString (cfg.dbus != null) ''
+        dbus bus=${cfg.dbus}
+      '' + ''
+      }
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/mxisd.nix b/nixpkgs/nixos/modules/services/networking/mxisd.nix
new file mode 100644
index 000000000000..528a51c1f3af
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/mxisd.nix
@@ -0,0 +1,142 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  isMa1sd =
+    package:
+    lib.hasPrefix "ma1sd" package.name;
+
+  isMxisd =
+    package:
+    lib.hasPrefix "mxisd" package.name;
+
+  cfg = config.services.mxisd;
+
+  server = optionalAttrs (cfg.server.name != null) { inherit (cfg.server) name; }
+        // optionalAttrs (cfg.server.port != null) { inherit (cfg.server) port; };
+
+  baseConfig = {
+    matrix.domain = cfg.matrix.domain;
+    key.path = "${cfg.dataDir}/signing.key";
+    storage = {
+      provider.sqlite.database = if isMa1sd cfg.package
+                                 then "${cfg.dataDir}/ma1sd.db"
+                                 else "${cfg.dataDir}/mxisd.db";
+    };
+  } // optionalAttrs (server != {}) { inherit server; };
+
+  # merges baseConfig and extraConfig into a single file
+  fullConfig = recursiveUpdate baseConfig cfg.extraConfig;
+
+  configFile = if isMa1sd cfg.package
+               then pkgs.writeText "ma1sd-config.yaml" (builtins.toJSON fullConfig)
+               else pkgs.writeText "mxisd-config.yaml" (builtins.toJSON fullConfig);
+
+in {
+  options = {
+    services.mxisd = {
+      enable = mkEnableOption (lib.mdDoc "matrix federated identity server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ma1sd;
+        defaultText = literalExpression "pkgs.ma1sd";
+        description = lib.mdDoc "The mxisd/ma1sd package to use";
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to an environment-file which may contain secrets to be
+          substituted via `envsubst`.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/mxisd";
+        description = lib.mdDoc "Where data mxisd/ma1sd uses resides";
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Extra options merged into the mxisd/ma1sd configuration";
+      };
+
+      matrix = {
+
+        domain = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            the domain of the matrix homeserver
+          '';
+        };
+
+      };
+
+      server = {
+
+        name = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Public hostname of mxisd/ma1sd, if different from the Matrix domain.
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          description = lib.mdDoc ''
+            HTTP port to listen on (unencrypted)
+          '';
+        };
+
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.mxisd =
+      {
+        group = "mxisd";
+        home = cfg.dataDir;
+        createHome = true;
+        shell = "${pkgs.bash}/bin/bash";
+        uid = config.ids.uids.mxisd;
+      };
+
+    users.groups.mxisd =
+      {
+        gid = config.ids.gids.mxisd;
+      };
+
+    systemd.services.mxisd = {
+      description = "a federated identity server for the matrix ecosystem";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        executable = if isMa1sd cfg.package then "ma1sd" else "mxisd";
+      in {
+        Type = "simple";
+        User = "mxisd";
+        Group = "mxisd";
+        EnvironmentFile = mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+        ExecStart = "${cfg.package}/bin/${executable} -c ${cfg.dataDir}/mxisd-config.yaml";
+        ExecStartPre = "${pkgs.writeShellScript "mxisd-substitute-secrets" ''
+          umask 0077
+          ${pkgs.envsubst}/bin/envsubst -o ${cfg.dataDir}/mxisd-config.yaml \
+            -i ${configFile}
+        ''}";
+        WorkingDirectory = cfg.dataDir;
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/namecoind.nix b/nixpkgs/nixos/modules/services/networking/namecoind.nix
new file mode 100644
index 000000000000..085d6c5fe282
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/namecoind.nix
@@ -0,0 +1,199 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg     = config.services.namecoind;
+  dataDir = "/var/lib/namecoind";
+  useSSL  = (cfg.rpc.certificate != null) && (cfg.rpc.key != null);
+  useRPC  = (cfg.rpc.user != null) && (cfg.rpc.password != null);
+
+  listToConf = option: list:
+    concatMapStrings (value :"${option}=${value}\n") list;
+
+  configFile = pkgs.writeText "namecoin.conf" (''
+    server=1
+    daemon=0
+    txindex=1
+    txprevcache=1
+    walletpath=${cfg.wallet}
+    gen=${if cfg.generate then "1" else "0"}
+    ${listToConf "addnode" cfg.extraNodes}
+    ${listToConf "connect" cfg.trustedNodes}
+  '' + optionalString useRPC ''
+    rpcbind=${cfg.rpc.address}
+    rpcport=${toString cfg.rpc.port}
+    rpcuser=${cfg.rpc.user}
+    rpcpassword=${cfg.rpc.password}
+    ${listToConf "rpcallowip" cfg.rpc.allowFrom}
+  '' + optionalString useSSL ''
+    rpcssl=1
+    rpcsslcertificatechainfile=${cfg.rpc.certificate}
+    rpcsslprivatekeyfile=${cfg.rpc.key}
+    rpcsslciphers=TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH
+  '');
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.namecoind = {
+
+      enable = mkEnableOption (lib.mdDoc "namecoind, Namecoin client");
+
+      wallet = mkOption {
+        type = types.path;
+        default = "${dataDir}/wallet.dat";
+        description = lib.mdDoc ''
+          Wallet file. The ownership of the file has to be
+          namecoin:namecoin, and the permissions must be 0640.
+        '';
+      };
+
+      generate = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to generate (mine) Namecoins.
+        '';
+      };
+
+      extraNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of additional peer IP addresses to connect to.
+        '';
+      };
+
+      trustedNodes = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of the only peer IP addresses to connect to. If specified
+          no other connection will be made.
+        '';
+      };
+
+      rpc.user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          User name for RPC connections.
+        '';
+      };
+
+      rpc.password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Password for RPC connections.
+        '';
+      };
+
+      rpc.address = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          IP address the RPC server will bind to.
+        '';
+      };
+
+      rpc.port = mkOption {
+        type = types.port;
+        default = 8332;
+        description = lib.mdDoc ''
+          Port the RPC server will bind to.
+        '';
+      };
+
+      rpc.certificate = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/namecoind/server.cert";
+        description = lib.mdDoc ''
+          Certificate file for securing RPC connections.
+        '';
+      };
+
+      rpc.key = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/namecoind/server.pem";
+        description = lib.mdDoc ''
+          Key file for securing RPC connections.
+        '';
+      };
+
+
+      rpc.allowFrom = mkOption {
+        type = types.listOf types.str;
+        default = [ "127.0.0.1" ];
+        description = lib.mdDoc ''
+          List of IP address ranges allowed to use the RPC API.
+          Wiledcards (*) can be user to specify a range.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.namecoin = {
+      uid  = config.ids.uids.namecoin;
+      description = "Namecoin daemon user";
+      home = dataDir;
+      createHome = true;
+    };
+
+    users.groups.namecoin = {
+      gid  = config.ids.gids.namecoin;
+    };
+
+    systemd.services.namecoind = {
+      description = "Namecoind daemon";
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      startLimitIntervalSec = 120;
+      startLimitBurst = 5;
+      serviceConfig = {
+        User  = "namecoin";
+        Group = "namecoin";
+        ExecStart  = "${pkgs.namecoind}/bin/namecoind -conf=${configFile} -datadir=${dataDir} -printtoconsole";
+        ExecStop   = "${pkgs.coreutils}/bin/kill -KILL $MAINPID";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Nice = "10";
+        PrivateTmp = true;
+        TimeoutStopSec     = "60s";
+        TimeoutStartSec    = "2s";
+        Restart            = "always";
+      };
+
+      preStart = optionalString (cfg.wallet != "${dataDir}/wallet.dat")  ''
+        # check wallet file permissions
+        if [ "$(stat --printf '%u' ${cfg.wallet})" != "${toString config.ids.uids.namecoin}" \
+           -o "$(stat --printf '%g' ${cfg.wallet})" != "${toString config.ids.gids.namecoin}" \
+           -o "$(stat --printf '%a' ${cfg.wallet})" != "640" ]; then
+           echo "ERROR: bad ownership or rights on ${cfg.wallet}" >&2
+           exit 1
+        fi
+      '';
+
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nar-serve.nix b/nixpkgs/nixos/modules/services/networking/nar-serve.nix
new file mode 100644
index 000000000000..b8b76120e44f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nar-serve.nix
@@ -0,0 +1,55 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+let
+  cfg = config.services.nar-serve;
+in
+{
+  meta = {
+    maintainers = [ maintainers.rizary ];
+  };
+  options = {
+    services.nar-serve = {
+      enable = mkEnableOption (lib.mdDoc "serving NAR file contents via HTTP");
+
+      port = mkOption {
+        type = types.port;
+        default = 8383;
+        description = lib.mdDoc ''
+          Port number where nar-serve will listen on.
+        '';
+      };
+
+      cacheURL = mkOption {
+        type = types.str;
+        default = "https://cache.nixos.org/";
+        description = lib.mdDoc ''
+          Binary cache URL to connect to.
+
+          The URL format is compatible with the nix remote url style, such as:
+          - http://, https:// for binary caches via HTTP or HTTPS
+          - s3:// for binary caches stored in Amazon S3
+          - gs:// for binary caches stored in Google Cloud Storage
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.nar-serve = {
+      description = "NAR server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment.PORT = toString cfg.port;
+      environment.NAR_CACHE_URL = cfg.cacheURL;
+
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        ExecStart = "${pkgs.nar-serve}/bin/nar-serve";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nat-iptables.nix b/nixpkgs/nixos/modules/services/networking/nat-iptables.nix
new file mode 100644
index 000000000000..d1bed401feeb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nat-iptables.nix
@@ -0,0 +1,191 @@
+# This module enables Network Address Translation (NAT).
+# XXX: todo: support multiple upstream links
+# see http://yesican.chsoft.biz/lartc/MultihomedLinuxNetworking.html
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.nat;
+
+  mkDest = externalIP:
+    if externalIP == null
+    then "-j MASQUERADE"
+    else "-j SNAT --to-source ${externalIP}";
+  dest = mkDest cfg.externalIP;
+  destIPv6 = mkDest cfg.externalIPv6;
+
+  # Whether given IP (plus optional port) is an IPv6.
+  isIPv6 = ip: builtins.length (lib.splitString ":" ip) > 2;
+
+  helpers = import ./helpers.nix { inherit config lib; };
+
+  flushNat = ''
+    ${helpers}
+    ip46tables -w -t nat -D PREROUTING -j nixos-nat-pre 2>/dev/null|| true
+    ip46tables -w -t nat -F nixos-nat-pre 2>/dev/null || true
+    ip46tables -w -t nat -X nixos-nat-pre 2>/dev/null || true
+    ip46tables -w -t nat -D POSTROUTING -j nixos-nat-post 2>/dev/null || true
+    ip46tables -w -t nat -F nixos-nat-post 2>/dev/null || true
+    ip46tables -w -t nat -X nixos-nat-post 2>/dev/null || true
+    ip46tables -w -t nat -D OUTPUT -j nixos-nat-out 2>/dev/null || true
+    ip46tables -w -t nat -F nixos-nat-out 2>/dev/null || true
+    ip46tables -w -t nat -X nixos-nat-out 2>/dev/null || true
+
+    ${cfg.extraStopCommands}
+  '';
+
+  mkSetupNat = { iptables, dest, internalIPs, forwardPorts }: ''
+    # We can't match on incoming interface in POSTROUTING, so
+    # mark packets coming from the internal interfaces.
+    ${concatMapStrings (iface: ''
+      ${iptables} -w -t nat -A nixos-nat-pre \
+        -i '${iface}' -j MARK --set-mark 1
+    '') cfg.internalInterfaces}
+
+    # NAT the marked packets.
+    ${optionalString (cfg.internalInterfaces != []) ''
+      ${iptables} -w -t nat -A nixos-nat-post -m mark --mark 1 \
+        ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
+    ''}
+
+    # NAT packets coming from the internal IPs.
+    ${concatMapStrings (range: ''
+      ${iptables} -w -t nat -A nixos-nat-post \
+        -s '${range}' ${optionalString (cfg.externalInterface != null) "-o ${cfg.externalInterface}"} ${dest}
+    '') internalIPs}
+
+    # NAT from external ports to internal ports.
+    ${concatMapStrings (fwd: ''
+      ${iptables} -w -t nat -A nixos-nat-pre \
+        -i ${toString cfg.externalInterface} -p ${fwd.proto} \
+        --dport ${builtins.toString fwd.sourcePort} \
+        -j DNAT --to-destination ${fwd.destination}
+
+      ${concatMapStrings (loopbackip:
+        let
+          matchIP          = if isIPv6 fwd.destination then "[[]([0-9a-fA-F:]+)[]]" else "([0-9.]+)";
+          m                = builtins.match "${matchIP}:([0-9-]+)" fwd.destination;
+          destinationIP    = if m == null then throw "bad ip:ports `${fwd.destination}'" else elemAt m 0;
+          destinationPorts = if m == null then throw "bad ip:ports `${fwd.destination}'" else builtins.replaceStrings ["-"] [":"] (elemAt m 1);
+        in ''
+          # Allow connections to ${loopbackip}:${toString fwd.sourcePort} from the host itself
+          ${iptables} -w -t nat -A nixos-nat-out \
+            -d ${loopbackip} -p ${fwd.proto} \
+            --dport ${builtins.toString fwd.sourcePort} \
+            -j DNAT --to-destination ${fwd.destination}
+
+          # Allow connections to ${loopbackip}:${toString fwd.sourcePort} from other hosts behind NAT
+          ${iptables} -w -t nat -A nixos-nat-pre \
+            -d ${loopbackip} -p ${fwd.proto} \
+            --dport ${builtins.toString fwd.sourcePort} \
+            -j DNAT --to-destination ${fwd.destination}
+
+          ${iptables} -w -t nat -A nixos-nat-post \
+            -d ${destinationIP} -p ${fwd.proto} \
+            --dport ${destinationPorts} \
+            -j SNAT --to-source ${loopbackip}
+        '') fwd.loopbackIPs}
+    '') forwardPorts}
+  '';
+
+  setupNat = ''
+    ${helpers}
+    # Create subchains where we store rules
+    ip46tables -w -t nat -N nixos-nat-pre
+    ip46tables -w -t nat -N nixos-nat-post
+    ip46tables -w -t nat -N nixos-nat-out
+
+    ${mkSetupNat {
+      iptables = "iptables";
+      inherit dest;
+      inherit (cfg) internalIPs;
+      forwardPorts = filter (x: !(isIPv6 x.destination)) cfg.forwardPorts;
+    }}
+
+    ${optionalString cfg.enableIPv6 (mkSetupNat {
+      iptables = "ip6tables";
+      dest = destIPv6;
+      internalIPs = cfg.internalIPv6s;
+      forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
+    })}
+
+    ${optionalString (cfg.dmzHost != null) ''
+      iptables -w -t nat -A nixos-nat-pre \
+        -i ${toString cfg.externalInterface} -j DNAT \
+        --to-destination ${cfg.dmzHost}
+    ''}
+
+    ${cfg.extraCommands}
+
+    # Append our chains to the nat tables
+    ip46tables -w -t nat -A PREROUTING -j nixos-nat-pre
+    ip46tables -w -t nat -A POSTROUTING -j nixos-nat-post
+    ip46tables -w -t nat -A OUTPUT -j nixos-nat-out
+  '';
+
+in
+
+{
+
+  options = {
+
+    networking.nat.extraCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = "iptables -A INPUT -p icmp -j ACCEPT";
+      description = lib.mdDoc ''
+        Additional shell commands executed as part of the nat
+        initialisation script.
+
+        This option is incompatible with the nftables based nat module.
+      '';
+    };
+
+    networking.nat.extraStopCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = "iptables -D INPUT -p icmp -j ACCEPT || true";
+      description = lib.mdDoc ''
+        Additional shell commands executed as part of the nat
+        teardown script.
+
+        This option is incompatible with the nftables based nat module.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (!config.networking.nftables.enable)
+    (mkMerge [
+      ({ networking.firewall.extraCommands = mkBefore flushNat; })
+      (mkIf config.networking.nat.enable {
+
+        networking.firewall = mkIf config.networking.firewall.enable {
+          extraCommands = setupNat;
+          extraStopCommands = flushNat;
+        };
+
+        systemd.services = mkIf (!config.networking.firewall.enable) {
+          nat = {
+            description = "Network Address Translation";
+            wantedBy = [ "network.target" ];
+            after = [ "network-pre.target" "systemd-modules-load.service" ];
+            path = [ config.networking.firewall.package ];
+            unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+
+            script = flushNat + setupNat;
+
+            postStop = flushNat;
+          };
+        };
+      })
+    ]);
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nat-nftables.nix b/nixpkgs/nixos/modules/services/networking/nat-nftables.nix
new file mode 100644
index 000000000000..4b2317ca2ffc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nat-nftables.nix
@@ -0,0 +1,184 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.nat;
+
+  mkDest = externalIP:
+    if externalIP == null
+    then "masquerade"
+    else "snat ${externalIP}";
+  dest = mkDest cfg.externalIP;
+  destIPv6 = mkDest cfg.externalIPv6;
+
+  toNftSet = list: concatStringsSep ", " list;
+  toNftRange = ports: replaceStrings [ ":" ] [ "-" ] (toString ports);
+
+  ifaceSet = toNftSet (map (x: ''"${x}"'') cfg.internalInterfaces);
+  ipSet = toNftSet cfg.internalIPs;
+  ipv6Set = toNftSet cfg.internalIPv6s;
+  oifExpr = optionalString (cfg.externalInterface != null) ''oifname "${cfg.externalInterface}"'';
+
+  # Whether given IP (plus optional port) is an IPv6.
+  isIPv6 = ip: length (lib.splitString ":" ip) > 2;
+
+  splitIPPorts = IPPorts:
+    let
+      matchIP = if isIPv6 IPPorts then "[[]([0-9a-fA-F:]+)[]]" else "([0-9.]+)";
+      m = builtins.match "${matchIP}:([0-9-]+)" IPPorts;
+    in
+    {
+      IP = if m == null then throw "bad ip:ports `${IPPorts}'" else elemAt m 0;
+      ports = if m == null then throw "bad ip:ports `${IPPorts}'" else elemAt m 1;
+    };
+
+  mkTable = { ipVer, dest, ipSet, forwardPorts, dmzHost }:
+    let
+      # nftables does not support both port and port range as values in a dnat map.
+      # e.g. "dnat th dport map { 80 : 10.0.0.1 . 80, 443 : 10.0.0.2 . 900-1000 }"
+      # So we split them.
+      fwdPorts = filter (x: length (splitString "-" x.destination) == 1) forwardPorts;
+      fwdPortsRange = filter (x: length (splitString "-" x.destination) > 1) forwardPorts;
+
+      # nftables maps for port forward
+      # l4proto . dport : addr . port
+      toFwdMap = forwardPorts: toNftSet (map
+        (fwd:
+          with (splitIPPorts fwd.destination);
+          "${fwd.proto} . ${toNftRange fwd.sourcePort} : ${IP} . ${ports}"
+        )
+        forwardPorts);
+      fwdMap = toFwdMap fwdPorts;
+      fwdRangeMap = toFwdMap fwdPortsRange;
+
+      # nftables maps for port forward loopback dnat
+      # daddr . l4proto . dport : addr . port
+      toFwdLoopDnatMap = forwardPorts: toNftSet (concatMap
+        (fwd: map
+          (loopbackip:
+            with (splitIPPorts fwd.destination);
+            "${loopbackip} . ${fwd.proto} . ${toNftRange fwd.sourcePort} : ${IP} . ${ports}"
+          )
+          fwd.loopbackIPs)
+        forwardPorts);
+      fwdLoopDnatMap = toFwdLoopDnatMap fwdPorts;
+      fwdLoopDnatRangeMap = toFwdLoopDnatMap fwdPortsRange;
+
+      # nftables set for port forward loopback snat
+      # daddr . l4proto . dport
+      fwdLoopSnatSet = toNftSet (map
+        (fwd:
+          with (splitIPPorts fwd.destination);
+          "${IP} . ${fwd.proto} . ${ports}"
+        )
+        forwardPorts);
+    in
+    ''
+      chain pre {
+        type nat hook prerouting priority dstnat;
+
+        ${optionalString (fwdMap != "") ''
+          iifname "${cfg.externalInterface}" dnat meta l4proto . th dport map { ${fwdMap} } comment "port forward"
+        ''}
+        ${optionalString (fwdRangeMap != "") ''
+          iifname "${cfg.externalInterface}" dnat meta l4proto . th dport map { ${fwdRangeMap} } comment "port forward"
+        ''}
+
+        ${optionalString (fwdLoopDnatMap != "") ''
+          dnat ${ipVer} daddr . meta l4proto . th dport map { ${fwdLoopDnatMap} } comment "port forward loopback from other hosts behind NAT"
+        ''}
+        ${optionalString (fwdLoopDnatRangeMap != "") ''
+          dnat ${ipVer} daddr . meta l4proto . th dport map { ${fwdLoopDnatRangeMap} } comment "port forward loopback from other hosts behind NAT"
+        ''}
+
+        ${optionalString (dmzHost != null) ''
+          iifname "${cfg.externalInterface}" dnat ${dmzHost} comment "dmz"
+        ''}
+      }
+
+      chain post {
+        type nat hook postrouting priority srcnat;
+
+        ${optionalString (ifaceSet != "") ''
+          iifname { ${ifaceSet} } ${oifExpr} ${dest} comment "from internal interfaces"
+        ''}
+        ${optionalString (ipSet != "") ''
+          ${ipVer} saddr { ${ipSet} } ${oifExpr} ${dest} comment "from internal IPs"
+        ''}
+
+        ${optionalString (fwdLoopSnatSet != "") ''
+          iifname != "${cfg.externalInterface}" ${ipVer} daddr . meta l4proto . th dport { ${fwdLoopSnatSet} } masquerade comment "port forward loopback snat"
+        ''}
+      }
+
+      chain out {
+        type nat hook output priority mangle;
+
+        ${optionalString (fwdLoopDnatMap != "") ''
+          dnat ${ipVer} daddr . meta l4proto . th dport map { ${fwdLoopDnatMap} } comment "port forward loopback from the host itself"
+        ''}
+        ${optionalString (fwdLoopDnatRangeMap != "") ''
+          dnat ${ipVer} daddr . meta l4proto . th dport map { ${fwdLoopDnatRangeMap} } comment "port forward loopback from the host itself"
+        ''}
+      }
+    '';
+
+in
+
+{
+
+  config = mkIf (config.networking.nftables.enable && cfg.enable) {
+
+    assertions = [
+      {
+        assertion = cfg.extraCommands == "";
+        message = "extraCommands is incompatible with the nftables based nat module: ${cfg.extraCommands}";
+      }
+      {
+        assertion = cfg.extraStopCommands == "";
+        message = "extraStopCommands is incompatible with the nftables based nat module: ${cfg.extraStopCommands}";
+      }
+      {
+        assertion = config.networking.nftables.rulesetFile == null;
+        message = "networking.nftables.rulesetFile conflicts with the nat module";
+      }
+    ];
+
+    networking.nftables.tables = {
+      "nixos-nat" = {
+        family = "ip";
+        content = mkTable {
+          ipVer = "ip";
+          inherit dest ipSet;
+          forwardPorts = filter (x: !(isIPv6 x.destination)) cfg.forwardPorts;
+          inherit (cfg) dmzHost;
+        };
+      };
+      "nixos-nat6" = mkIf cfg.enableIPv6 {
+        family = "ip6";
+        name = "nixos-nat";
+        content = mkTable {
+          ipVer = "ip6";
+          dest = destIPv6;
+          ipSet = ipv6Set;
+          forwardPorts = filter (x: isIPv6 x.destination) cfg.forwardPorts;
+          dmzHost = null;
+        };
+      };
+    };
+
+    networking.firewall.extraForwardRules = optionalString config.networking.firewall.filterForward ''
+      ${optionalString (ifaceSet != "") ''
+        iifname { ${ifaceSet} } ${oifExpr} accept comment "from internal interfaces"
+      ''}
+      ${optionalString (ipSet != "") ''
+        ip saddr { ${ipSet} } ${oifExpr} accept comment "from internal IPs"
+      ''}
+      ${optionalString (ipv6Set != "") ''
+        ip6 saddr { ${ipv6Set} } ${oifExpr} accept comment "from internal IPv6s"
+      ''}
+    '';
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nat.nix b/nixpkgs/nixos/modules/services/networking/nat.nix
new file mode 100644
index 000000000000..3afe6fe0a971
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nat.nix
@@ -0,0 +1,196 @@
+# This module enables Network Address Translation (NAT).
+# XXX: todo: support multiple upstream links
+# see http://yesican.chsoft.biz/lartc/MultihomedLinuxNetworking.html
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.nat;
+
+in
+
+{
+
+  options = {
+
+    networking.nat.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable Network Address Translation (NAT).
+      '';
+    };
+
+    networking.nat.enableIPv6 = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable IPv6 NAT.
+      '';
+    };
+
+    networking.nat.internalInterfaces = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "eth0" ];
+      description = lib.mdDoc ''
+        The interfaces for which to perform NAT. Packets coming from
+        these interface and destined for the external interface will
+        be rewritten.
+      '';
+    };
+
+    networking.nat.internalIPs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "192.168.1.0/24" ];
+      description = lib.mdDoc ''
+        The IP address ranges for which to perform NAT.  Packets
+        coming from these addresses (on any interface) and destined
+        for the external interface will be rewritten.
+      '';
+    };
+
+    networking.nat.internalIPv6s = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "fc00::/64" ];
+      description = lib.mdDoc ''
+        The IPv6 address ranges for which to perform NAT.  Packets
+        coming from these addresses (on any interface) and destined
+        for the external interface will be rewritten.
+      '';
+    };
+
+    networking.nat.externalInterface = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "eth1";
+      description = lib.mdDoc ''
+        The name of the external network interface.
+      '';
+    };
+
+    networking.nat.externalIP = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "203.0.113.123";
+      description = lib.mdDoc ''
+        The public IP address to which packets from the local
+        network are to be rewritten.  If this is left empty, the
+        IP address associated with the external interface will be
+        used.
+      '';
+    };
+
+    networking.nat.externalIPv6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "2001:dc0:2001:11::175";
+      description = lib.mdDoc ''
+        The public IPv6 address to which packets from the local
+        network are to be rewritten.  If this is left empty, the
+        IP address associated with the external interface will be
+        used.
+      '';
+    };
+
+    networking.nat.forwardPorts = mkOption {
+      type = with types; listOf (submodule {
+        options = {
+          sourcePort = mkOption {
+            type = types.either types.int (types.strMatching "[[:digit:]]+:[[:digit:]]+");
+            example = 8080;
+            description = lib.mdDoc "Source port of the external interface; to specify a port range, use a string with a colon (e.g. \"60000:61000\")";
+          };
+
+          destination = mkOption {
+            type = types.str;
+            example = "10.0.0.1:80";
+            description = lib.mdDoc "Forward connection to destination ip:port (or [ipv6]:port); to specify a port range, use ip:start-end";
+          };
+
+          proto = mkOption {
+            type = types.str;
+            default = "tcp";
+            example = "udp";
+            description = lib.mdDoc "Protocol of forwarded connection";
+          };
+
+          loopbackIPs = mkOption {
+            type = types.listOf types.str;
+            default = [ ];
+            example = literalExpression ''[ "55.1.2.3" ]'';
+            description = lib.mdDoc "Public IPs for NAT reflection; for connections to `loopbackip:sourcePort` from the host itself and from other hosts behind NAT";
+          };
+        };
+      });
+      default = [ ];
+      example = [
+        { sourcePort = 8080; destination = "10.0.0.1:80"; proto = "tcp"; }
+        { sourcePort = 8080; destination = "[fc00::2]:80"; proto = "tcp"; }
+      ];
+      description = lib.mdDoc ''
+        List of forwarded ports from the external interface to
+        internal destinations by using DNAT. Destination can be
+        IPv6 if IPv6 NAT is enabled.
+      '';
+    };
+
+    networking.nat.dmzHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.0.0.1";
+      description = lib.mdDoc ''
+        The local IP address to which all traffic that does not match any
+        forwarding rule is forwarded.
+      '';
+    };
+
+  };
+
+
+  config = mkIf config.networking.nat.enable {
+
+    assertions = [
+      {
+        assertion = cfg.enableIPv6 -> config.networking.enableIPv6;
+        message = "networking.nat.enableIPv6 requires networking.enableIPv6";
+      }
+      {
+        assertion = (cfg.dmzHost != null) -> (cfg.externalInterface != null);
+        message = "networking.nat.dmzHost requires networking.nat.externalInterface";
+      }
+      {
+        assertion = (cfg.forwardPorts != [ ]) -> (cfg.externalInterface != null);
+        message = "networking.nat.forwardPorts requires networking.nat.externalInterface";
+      }
+    ];
+
+    # Use the same iptables package as in config.networking.firewall.
+    # When the firewall is enabled, this should be deduplicated without any
+    # error.
+    environment.systemPackages = [ config.networking.firewall.package ];
+
+    boot = {
+      kernelModules = [ "nf_nat_ftp" ];
+      kernel.sysctl = {
+        "net.ipv4.conf.all.forwarding" = mkOverride 99 true;
+        "net.ipv4.conf.default.forwarding" = mkOverride 99 true;
+      } // optionalAttrs cfg.enableIPv6 {
+        # Do not prevent IPv6 autoconfiguration.
+        # See <http://strugglers.net/~andy/blog/2011/09/04/linux-ipv6-router-advertisements-and-forwarding/>.
+        "net.ipv6.conf.all.accept_ra" = mkOverride 99 2;
+        "net.ipv6.conf.default.accept_ra" = mkOverride 99 2;
+
+        # Forward IPv6 packets.
+        "net.ipv6.conf.all.forwarding" = mkOverride 99 true;
+        "net.ipv6.conf.default.forwarding" = mkOverride 99 true;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nats.nix b/nixpkgs/nixos/modules/services/networking/nats.nix
new file mode 100644
index 000000000000..6c21e21b5cb8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nats.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nats;
+
+  format = pkgs.formats.json { };
+
+  configFile = format.generate "nats.conf" cfg.settings;
+
+in {
+
+  ### Interface
+
+  options = {
+    services.nats = {
+      enable = mkEnableOption (lib.mdDoc "NATS messaging system");
+
+      user = mkOption {
+        type = types.str;
+        default = "nats";
+        description = lib.mdDoc "User account under which NATS runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nats";
+        description = lib.mdDoc "Group under which NATS runs.";
+      };
+
+      serverName = mkOption {
+        default = "nats";
+        example = "n1-c3";
+        type = types.str;
+        description = lib.mdDoc ''
+          Name of the NATS server, must be unique if clustered.
+        '';
+      };
+
+      jetstream = mkEnableOption (lib.mdDoc "JetStream");
+
+      port = mkOption {
+        default = 4222;
+        type = types.port;
+        description = lib.mdDoc ''
+          Port on which to listen.
+        '';
+      };
+
+      dataDir = mkOption {
+        default = "/var/lib/nats";
+        type = types.path;
+        description = lib.mdDoc ''
+          The NATS data directory. Only used if JetStream is enabled, for
+          storing stream metadata and messages.
+
+          If left as the default value this directory will automatically be
+          created before the NATS server starts, otherwise the sysadmin is
+          responsible for ensuring the directory exists with appropriate
+          ownership and permissions.
+        '';
+      };
+
+      settings = mkOption {
+        default = { };
+        type = format.type;
+        example = literalExpression ''
+          {
+            jetstream = {
+              max_mem = "1G";
+              max_file = "10G";
+            };
+          };
+        '';
+        description = lib.mdDoc ''
+          Declarative NATS configuration. See the
+          [
+          NATS documentation](https://docs.nats.io/nats-server/configuration) for a list of options.
+        '';
+      };
+    };
+  };
+
+  ### Implementation
+
+  config = mkIf cfg.enable {
+    services.nats.settings = {
+      server_name = cfg.serverName;
+      port = cfg.port;
+      jetstream = optionalAttrs cfg.jetstream { store_dir = cfg.dataDir; };
+    };
+
+    systemd.services.nats = {
+      description = "NATS messaging system";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = mkMerge [
+        (mkIf (cfg.dataDir == "/var/lib/nats") {
+          StateDirectory = "nats";
+          StateDirectoryMode = "0750";
+        })
+        {
+          Type = "simple";
+          ExecStart = "${pkgs.nats-server}/bin/nats-server -c ${configFile}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          ExecStop = "${pkgs.coreutils}/bin/kill -SIGINT $MAINPID";
+          Restart = "on-failure";
+
+          User = cfg.user;
+          Group = cfg.group;
+
+          # Hardening
+          CapabilityBoundingSet = "";
+          LimitNOFILE = 800000; # JetStream requires 2 FDs open per stream.
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          ReadOnlyPaths = [ ];
+          ReadWritePaths = [ cfg.dataDir ];
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+          UMask = "0077";
+        }
+      ];
+    };
+
+    users.users = mkIf (cfg.user == "nats") {
+      nats = {
+        description = "NATS daemon user";
+        isSystemUser = true;
+        group = cfg.group;
+        home = cfg.dataDir;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "nats") { nats = { }; };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nbd.nix b/nixpkgs/nixos/modules/services/networking/nbd.nix
new file mode 100644
index 000000000000..454380aa3154
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nbd.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nbd;
+  iniFields = with types; attrsOf (oneOf [ bool int float str ]);
+  # The `[generic]` section must come before all the others in the
+  # config file.  This means we can't just dump an attrset to INI
+  # because that sorts the sections by name.  Instead, we serialize it
+  # on its own first.
+  genericSection = {
+    generic = (cfg.server.extraOptions // {
+      user = "root";
+      group = "root";
+      port = cfg.server.listenPort;
+    } // (optionalAttrs (cfg.server.listenAddress != null) {
+      listenaddr = cfg.server.listenAddress;
+    }));
+  };
+  exportSections =
+    mapAttrs
+      (_: { path, allowAddresses, extraOptions }:
+        extraOptions // {
+          exportname = path;
+        } // (optionalAttrs (allowAddresses != null) {
+          authfile = pkgs.writeText "authfile" (concatStringsSep "\n" allowAddresses);
+        }))
+      cfg.server.exports;
+  serverConfig =
+    pkgs.writeText "nbd-server-config" ''
+      ${lib.generators.toINI {} genericSection}
+      ${lib.generators.toINI {} exportSections}
+    '';
+  splitLists =
+    partition
+      (path: hasPrefix "/dev/" path)
+      (mapAttrsToList (_: { path, ... }: path) cfg.server.exports);
+  allowedDevices = splitLists.right;
+  boundPaths = splitLists.wrong;
+in
+{
+  options = {
+    services.nbd = {
+      server = {
+        enable = mkEnableOption (lib.mdDoc "the Network Block Device (nbd) server");
+
+        listenPort = mkOption {
+          type = types.port;
+          default = 10809;
+          description = lib.mdDoc "Port to listen on. The port is NOT automatically opened in the firewall.";
+        };
+
+        extraOptions = mkOption {
+          type = iniFields;
+          default = {
+            allowlist = false;
+          };
+          description = lib.mdDoc ''
+            Extra options for the server. See
+            {manpage}`nbd-server(5)`.
+          '';
+        };
+
+        exports = mkOption {
+          description = lib.mdDoc "Files or block devices to make available over the network.";
+          default = { };
+          type = with types; attrsOf
+            (submodule {
+              options = {
+                path = mkOption {
+                  type = str;
+                  description = lib.mdDoc "File or block device to export.";
+                  example = "/dev/sdb1";
+                };
+
+                allowAddresses = mkOption {
+                  type = nullOr (listOf str);
+                  default = null;
+                  example = [ "10.10.0.0/24" "127.0.0.1" ];
+                  description = lib.mdDoc "IPs and subnets that are authorized to connect for this device. If not specified, the server will allow all connections.";
+                };
+
+                extraOptions = mkOption {
+                  type = iniFields;
+                  default = {
+                    flush = true;
+                    fua = true;
+                  };
+                  description = lib.mdDoc ''
+                    Extra options for this export. See
+                    {manpage}`nbd-server(5)`.
+                  '';
+                };
+              };
+            });
+        };
+
+        listenAddress = mkOption {
+          type = with types; nullOr str;
+          description = lib.mdDoc "Address to listen on. If not specified, the server will listen on all interfaces.";
+          default = null;
+          example = "10.10.0.1";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.server.enable {
+    assertions = [
+      {
+        assertion = !(cfg.server.exports ? "generic");
+        message = "services.nbd.server exports must not be named 'generic'";
+      }
+    ];
+
+    boot.kernelModules = [ "nbd" ];
+
+    systemd.services.nbd-server = {
+      after = [ "network-online.target" ];
+      before = [ "multi-user.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.nbd}/bin/nbd-server -C ${serverConfig}";
+        Type = "forking";
+
+        DeviceAllow = map (path: "${path} rw") allowedDevices;
+        BindPaths = boundPaths;
+
+        CapabilityBoundingSet = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = false;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "noaccess";
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = "AF_INET AF_INET6";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ncdns.nix b/nixpkgs/nixos/modules/services/networking/ncdns.nix
new file mode 100644
index 000000000000..cc97beb14e01
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ncdns.nix
@@ -0,0 +1,283 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfgs = config.services;
+  cfg  = cfgs.ncdns;
+
+  dataDir  = "/var/lib/ncdns";
+  username = "ncdns";
+
+  valueType = with types; oneOf [ int str bool path ]
+    // { description = "setting type (integer, string, bool or path)"; };
+
+  configType = with types; attrsOf (nullOr (either valueType configType))
+    // { description = ''
+          ncdns.conf configuration type. The format consists of an
+          attribute set of settings. Each setting can be either `null`,
+          a value or an attribute set. The allowed values are integers,
+          strings, booleans or paths.
+         '';
+       };
+
+  configFile = pkgs.runCommand "ncdns.conf"
+    { json = builtins.toJSON cfg.settings;
+      passAsFile = [ "json" ];
+    }
+    "${pkgs.remarshal}/bin/json2toml < $jsonPath > $out";
+
+  defaultFiles = {
+    public  = "${dataDir}/bit.key";
+    private = "${dataDir}/bit.private";
+    zonePublic  = "${dataDir}/bit-zone.key";
+    zonePrivate = "${dataDir}/bit-zone.private";
+  };
+
+  # if all keys are the default value
+  needsKeygen = all id (flip mapAttrsToList cfg.dnssec.keys
+    (n: v: v == getAttr n defaultFiles));
+
+  mkDefaultAttrs = mapAttrs (n: v: mkDefault v);
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.ncdns = {
+
+      enable = mkEnableOption (lib.mdDoc ''
+        ncdns, a Go daemon to bridge Namecoin to DNS.
+        To resolve .bit domains set `services.namecoind.enable = true;`
+        and an RPC username/password
+      '');
+
+      address = mkOption {
+        type = types.str;
+        default = "[::1]";
+        description = lib.mdDoc ''
+          The IP address the ncdns resolver will bind to.  Leave this unchanged
+          if you do not wish to directly expose the resolver.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5333;
+        description = lib.mdDoc ''
+          The port the ncdns resolver will bind to.
+        '';
+      };
+
+      identity.hostname = mkOption {
+        type = types.str;
+        default = config.networking.hostName;
+        defaultText = literalExpression "config.networking.hostName";
+        example = "example.com";
+        description = lib.mdDoc ''
+          The hostname of this ncdns instance, which defaults to the machine
+          hostname. If specified, ncdns lists the hostname as an NS record at
+          the zone apex:
+          ```
+          bit. IN NS ns1.example.com.
+          ```
+          If unset ncdns will generate an internal pseudo-hostname under the
+          zone, which will resolve to the value of
+          {option}`services.ncdns.identity.address`.
+          If you are only using ncdns locally you can ignore this.
+        '';
+      };
+
+      identity.hostmaster = mkOption {
+        type = types.str;
+        default = "";
+        example = "root@example.com";
+        description = lib.mdDoc ''
+          An email address for the SOA record at the bit zone.
+          If you are only using ncdns locally you can ignore this.
+        '';
+      };
+
+      identity.address = mkOption {
+        type = types.str;
+        default = "127.127.127.127";
+        description = lib.mdDoc ''
+          The IP address the hostname specified in
+          {option}`services.ncdns.identity.hostname` should resolve to.
+          If you are only using ncdns locally you can ignore this.
+        '';
+      };
+
+      dnssec.enable = mkEnableOption (lib.mdDoc ''
+        DNSSEC support in ncdns. This will generate KSK and ZSK keypairs
+        (unless provided via the options
+        {option}`services.ncdns.dnssec.publicKey`,
+        {option}`services.ncdns.dnssec.privateKey` etc.) and add a trust
+        anchor to recursive resolvers
+      '');
+
+      dnssec.keys.public = mkOption {
+        type = types.path;
+        default = defaultFiles.public;
+        description = lib.mdDoc ''
+          Path to the file containing the KSK public key.
+          The key can be generated using the `dnssec-keygen`
+          command, provided by the package `bind` as follows:
+          ```
+          $ dnssec-keygen -a RSASHA256 -3 -b 2048 -f KSK bit
+          ```
+        '';
+      };
+
+      dnssec.keys.private = mkOption {
+        type = types.path;
+        default = defaultFiles.private;
+        description = lib.mdDoc ''
+          Path to the file containing the KSK private key.
+        '';
+      };
+
+      dnssec.keys.zonePublic = mkOption {
+        type = types.path;
+        default = defaultFiles.zonePublic;
+        description = lib.mdDoc ''
+          Path to the file containing the ZSK public key.
+          The key can be generated using the `dnssec-keygen`
+          command, provided by the package `bind` as follows:
+          ```
+          $ dnssec-keygen -a RSASHA256 -3 -b 2048 bit
+          ```
+        '';
+      };
+
+      dnssec.keys.zonePrivate = mkOption {
+        type = types.path;
+        default = defaultFiles.zonePrivate;
+        description = lib.mdDoc ''
+          Path to the file containing the ZSK private key.
+        '';
+      };
+
+      settings = mkOption {
+        type = configType;
+        default = { };
+        example = literalExpression ''
+          { # enable webserver
+            ncdns.httplistenaddr = ":8202";
+
+            # synchronize TLS certs
+            certstore.nss = true;
+            # note: all paths are relative to the config file
+            certstore.nsscertdir =  "../../var/lib/ncdns";
+            certstore.nssdbdir = "../../home/alice/.pki/nssdb";
+          }
+        '';
+        description = lib.mdDoc ''
+          ncdns settings. Use this option to configure ncds
+          settings not exposed in a NixOS option or to bypass one.
+          See the example ncdns.conf file at <https://github.com/namecoin/ncdns/blob/master/_doc/ncdns.conf.example>
+          for the available options.
+        '';
+      };
+
+    };
+
+    services.pdns-recursor.resolveNamecoin = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Resolve `.bit` top-level domains using ncdns and namecoin.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.pdns-recursor = mkIf cfgs.pdns-recursor.resolveNamecoin {
+      forwardZonesRecurse.bit = "${cfg.address}:${toString cfg.port}";
+      luaConfig =
+        if cfg.dnssec.enable
+          then ''readTrustAnchorsFromFile("${cfg.dnssec.keys.public}")''
+          else ''addNTA("bit", "namecoin DNSSEC disabled")'';
+    };
+
+    # Avoid pdns-recursor not finding the DNSSEC keys
+    systemd.services.pdns-recursor = mkIf cfgs.pdns-recursor.resolveNamecoin {
+      after = [ "ncdns.service" ];
+      wants = [ "ncdns.service" ];
+    };
+
+    services.ncdns.settings = mkDefaultAttrs {
+      ncdns =
+        { # Namecoin RPC
+          namecoinrpcaddress =
+            "${cfgs.namecoind.rpc.address}:${toString cfgs.namecoind.rpc.port}";
+          namecoinrpcusername = cfgs.namecoind.rpc.user;
+          namecoinrpcpassword = cfgs.namecoind.rpc.password;
+
+          # Identity
+          selfname = cfg.identity.hostname;
+          hostmaster = cfg.identity.hostmaster;
+          selfip = cfg.identity.address;
+
+          # Other
+          bind = "${cfg.address}:${toString cfg.port}";
+        }
+        // optionalAttrs cfg.dnssec.enable
+        { # DNSSEC
+          publickey  = "../.." + cfg.dnssec.keys.public;
+          privatekey = "../.." + cfg.dnssec.keys.private;
+          zonepublickey  = "../.." + cfg.dnssec.keys.zonePublic;
+          zoneprivatekey = "../.." + cfg.dnssec.keys.zonePrivate;
+        };
+
+        # Daemon
+        service.daemon = true;
+        xlog.journal = true;
+    };
+
+    users.users.ncdns = {
+      isSystemUser = true;
+      group = "ncdns";
+      description = "ncdns daemon user";
+    };
+    users.groups.ncdns = {};
+
+    systemd.services.ncdns = {
+      description = "ncdns daemon";
+      after    = [ "namecoind.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "ncdns";
+        StateDirectory = "ncdns";
+        Restart = "on-failure";
+        ExecStart = "${pkgs.ncdns}/bin/ncdns -conf=${configFile}";
+      };
+
+      preStart = optionalString (cfg.dnssec.enable && needsKeygen) ''
+        cd ${dataDir}
+        if [ ! -e bit.key ]; then
+          ${pkgs.bind}/bin/dnssec-keygen -a RSASHA256 -3 -b 2048 bit
+          mv Kbit.*.key bit-zone.key
+          mv Kbit.*.private bit-zone.private
+          ${pkgs.bind}/bin/dnssec-keygen -a RSASHA256 -3 -b 2048 -f KSK bit
+          mv Kbit.*.key bit.key
+          mv Kbit.*.private bit.private
+        fi
+      '';
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ndppd.nix b/nixpkgs/nixos/modules/services/networking/ndppd.nix
new file mode 100644
index 000000000000..d221c95ae620
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ndppd.nix
@@ -0,0 +1,189 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ndppd;
+
+  render = s: f: concatStringsSep "\n" (mapAttrsToList f s);
+  prefer = a: b: if a != null then a else b;
+
+  ndppdConf = prefer cfg.configFile (pkgs.writeText "ndppd.conf" ''
+    route-ttl ${toString cfg.routeTTL}
+    ${render cfg.proxies (proxyInterfaceName: proxy: ''
+    proxy ${prefer proxy.interface proxyInterfaceName} {
+      router ${boolToString proxy.router}
+      timeout ${toString proxy.timeout}
+      ttl ${toString proxy.ttl}
+      ${render proxy.rules (ruleNetworkName: rule: ''
+      rule ${prefer rule.network ruleNetworkName} {
+        ${rule.method}${optionalString (rule.method == "iface") " ${rule.interface}"}
+      }'')}
+    }'')}
+  '');
+
+  proxy = types.submodule {
+    options = {
+      interface = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Listen for any Neighbor Solicitation messages on this interface,
+          and respond to them according to a set of rules.
+          Defaults to the name of the attrset.
+        '';
+        default = null;
+      };
+      router = mkOption {
+        type = types.bool;
+        description = lib.mdDoc ''
+          Turns on or off the router flag for Neighbor Advertisement Messages.
+        '';
+        default = true;
+      };
+      timeout = mkOption {
+        type = types.int;
+        description = lib.mdDoc ''
+          Controls how long to wait for a Neighbor Advertisement Message before
+          invalidating the entry, in milliseconds.
+        '';
+        default = 500;
+      };
+      ttl = mkOption {
+        type = types.int;
+        description = lib.mdDoc ''
+          Controls how long a valid or invalid entry remains in the cache, in
+          milliseconds.
+        '';
+        default = 30000;
+      };
+      rules = mkOption {
+        type = types.attrsOf rule;
+        description = lib.mdDoc ''
+          This is a rule that the target address is to match against. If no netmask
+          is provided, /128 is assumed. You may have several rule sections, and the
+          addresses may or may not overlap.
+        '';
+        default = {};
+      };
+    };
+  };
+
+  rule = types.submodule {
+    options = {
+      network = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          This is the target address is to match against. If no netmask
+          is provided, /128 is assumed. The addresses of several rules
+          may or may not overlap.
+          Defaults to the name of the attrset.
+        '';
+        default = null;
+      };
+      method = mkOption {
+        type = types.enum [ "static" "iface" "auto" ];
+        description = lib.mdDoc ''
+          static: Immediately answer any Neighbor Solicitation Messages
+            (if they match the IP rule).
+          iface: Forward the Neighbor Solicitation Message through the specified
+            interface and only respond if a matching Neighbor Advertisement
+            Message is received.
+          auto: Same as iface, but instead of manually specifying the outgoing
+            interface, check for a matching route in /proc/net/ipv6_route.
+        '';
+        default = "auto";
+      };
+      interface = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Interface to use when method is iface.";
+        default = null;
+      };
+    };
+  };
+
+in {
+  options.services.ndppd = {
+    enable = mkEnableOption (lib.mdDoc "daemon that proxies NDP (Neighbor Discovery Protocol) messages between interfaces");
+    interface = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        Interface which is on link-level with router.
+        (Legacy option, use services.ndppd.proxies.\<interface\>.rules.\<network\> instead)
+      '';
+      default = null;
+      example = "eth0";
+    };
+    network = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        Network that we proxy.
+        (Legacy option, use services.ndppd.proxies.\<interface\>.rules.\<network\> instead)
+      '';
+      default = null;
+      example = "1111::/64";
+    };
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Path to configuration file.";
+      default = null;
+    };
+    routeTTL = mkOption {
+      type = types.int;
+      description = lib.mdDoc ''
+        This tells 'ndppd' how often to reload the route file /proc/net/ipv6_route,
+        in milliseconds.
+      '';
+      default = 30000;
+    };
+    proxies = mkOption {
+      type = types.attrsOf proxy;
+      description = lib.mdDoc ''
+        This sets up a listener, that will listen for any Neighbor Solicitation
+        messages, and respond to them according to a set of rules.
+      '';
+      default = {};
+      example = literalExpression ''
+        {
+          eth0.rules."1111::/64" = {};
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = mkIf (cfg.interface != null && cfg.network != null) [ ''
+      The options services.ndppd.interface and services.ndppd.network will probably be removed soon,
+      please use services.ndppd.proxies.<interface>.rules.<network> instead.
+    '' ];
+
+    services.ndppd.proxies = mkIf (cfg.interface != null && cfg.network != null) {
+      ${cfg.interface}.rules.${cfg.network} = {};
+    };
+
+    systemd.services.ndppd = {
+      description = "NDP Proxy Daemon";
+      documentation = [ "man:ndppd(1)" "man:ndppd.conf(5)" ];
+      after = [ "network-pre.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.ndppd}/bin/ndppd -c ${ndppdConf}";
+
+        # Sandboxing
+        CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = "AF_INET6 AF_PACKET AF_NETLINK";
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nebula.nix b/nixpkgs/nixos/modules/services/networking/nebula.nix
new file mode 100644
index 000000000000..e1a8c6740f57
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nebula.nix
@@ -0,0 +1,253 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nebula;
+  enabledNetworks = filterAttrs (n: v: v.enable) cfg.networks;
+
+  format = pkgs.formats.yaml {};
+
+  nameToId = netName: "nebula-${netName}";
+in
+{
+  # Interface
+
+  options = {
+    services.nebula = {
+      networks = mkOption {
+        description = lib.mdDoc "Nebula network definitions.";
+        default = {};
+        type = types.attrsOf (types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc "Enable or disable this network.";
+            };
+
+            package = mkOption {
+              type = types.package;
+              default = pkgs.nebula;
+              defaultText = literalExpression "pkgs.nebula";
+              description = lib.mdDoc "Nebula derivation to use.";
+            };
+
+            ca = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the certificate authority certificate.";
+              example = "/etc/nebula/ca.crt";
+            };
+
+            cert = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the host certificate.";
+              example = "/etc/nebula/host.crt";
+            };
+
+            key = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the host key.";
+              example = "/etc/nebula/host.key";
+            };
+
+            staticHostMap = mkOption {
+              type = types.attrsOf (types.listOf (types.str));
+              default = {};
+              description = lib.mdDoc ''
+                The static host map defines a set of hosts with fixed IP addresses on the internet (or any network).
+                A host can have multiple fixed IP addresses defined here, and nebula will try each when establishing a tunnel.
+              '';
+              example = { "192.168.100.1" = [ "100.64.22.11:4242" ]; };
+            };
+
+            isLighthouse = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "Whether this node is a lighthouse.";
+            };
+
+            isRelay = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc "Whether this node is a relay.";
+            };
+
+            lighthouses = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc ''
+                List of IPs of lighthouse hosts this node should report to and query from. This should be empty on lighthouse
+                nodes. The IPs should be the lighthouse's Nebula IPs, not their external IPs.
+              '';
+              example = [ "192.168.100.1" ];
+            };
+
+            relays = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc ''
+                List of IPs of relays that this node should allow traffic from.
+              '';
+              example = [ "192.168.100.1" ];
+            };
+
+            listen.host = mkOption {
+              type = types.str;
+              default = "0.0.0.0";
+              description = lib.mdDoc "IP address to listen on.";
+            };
+
+            listen.port = mkOption {
+              type = types.port;
+              default = 4242;
+              description = lib.mdDoc "Port number to listen on.";
+            };
+
+            tun.disable = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                When tun is disabled, a lighthouse can be started without a local tun interface (and therefore without root).
+              '';
+            };
+
+            tun.device = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc "Name of the tun device. Defaults to nebula.\${networkName}.";
+            };
+
+            firewall.outbound = mkOption {
+              type = types.listOf types.attrs;
+              default = [];
+              description = lib.mdDoc "Firewall rules for outbound traffic.";
+              example = [ { port = "any"; proto = "any"; host = "any"; } ];
+            };
+
+            firewall.inbound = mkOption {
+              type = types.listOf types.attrs;
+              default = [];
+              description = lib.mdDoc "Firewall rules for inbound traffic.";
+              example = [ { port = "any"; proto = "any"; host = "any"; } ];
+            };
+
+            settings = mkOption {
+              type = format.type;
+              default = {};
+              description = lib.mdDoc ''
+                Nebula configuration. Refer to
+                <https://github.com/slackhq/nebula/blob/master/examples/config.yml>
+                for details on supported values.
+              '';
+              example = literalExpression ''
+                {
+                  lighthouse.dns = {
+                    host = "0.0.0.0";
+                    port = 53;
+                  };
+                }
+              '';
+            };
+          };
+        });
+      };
+    };
+  };
+
+  # Implementation
+  config = mkIf (enabledNetworks != {}) {
+    systemd.services = mkMerge (mapAttrsToList (netName: netCfg:
+      let
+        networkId = nameToId netName;
+        settings = recursiveUpdate {
+          pki = {
+            ca = netCfg.ca;
+            cert = netCfg.cert;
+            key = netCfg.key;
+          };
+          static_host_map = netCfg.staticHostMap;
+          lighthouse = {
+            am_lighthouse = netCfg.isLighthouse;
+            hosts = netCfg.lighthouses;
+          };
+          relay = {
+            am_relay = netCfg.isRelay;
+            relays = netCfg.relays;
+            use_relays = true;
+          };
+          listen = {
+            host = netCfg.listen.host;
+            port = netCfg.listen.port;
+          };
+          tun = {
+            disabled = netCfg.tun.disable;
+            dev = if (netCfg.tun.device != null) then netCfg.tun.device else "nebula.${netName}";
+          };
+          firewall = {
+            inbound = netCfg.firewall.inbound;
+            outbound = netCfg.firewall.outbound;
+          };
+        } netCfg.settings;
+        configFile = format.generate "nebula-config-${netName}.yml" settings;
+        in
+        {
+          # Create the systemd service for Nebula.
+          "nebula@${netName}" = {
+            description = "Nebula VPN service for ${netName}";
+            wants = [ "basic.target" ];
+            after = [ "basic.target" "network.target" ];
+            before = [ "sshd.service" ];
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "simple";
+              Restart = "always";
+              ExecStart = "${netCfg.package}/bin/nebula -config ${configFile}";
+              UMask = "0027";
+              CapabilityBoundingSet = "CAP_NET_ADMIN";
+              AmbientCapabilities = "CAP_NET_ADMIN";
+              LockPersonality = true;
+              NoNewPrivileges = true;
+              PrivateDevices = false; # needs access to /dev/net/tun (below)
+              DeviceAllow = "/dev/net/tun rw";
+              DevicePolicy = "closed";
+              PrivateTmp = true;
+              PrivateUsers = false; # CapabilityBoundingSet needs to apply to the host namespace
+              ProtectClock = true;
+              ProtectControlGroups = true;
+              ProtectHome = true;
+              ProtectHostname = true;
+              ProtectKernelLogs = true;
+              ProtectKernelModules = true;
+              ProtectKernelTunables = true;
+              ProtectProc = "invisible";
+              ProtectSystem = "strict";
+              RestrictNamespaces = true;
+              RestrictSUIDSGID = true;
+              User = networkId;
+              Group = networkId;
+            };
+            unitConfig.StartLimitIntervalSec = 0; # ensure Restart=always is always honoured (networks can go down for arbitrarily long)
+          };
+        }) enabledNetworks);
+
+    # Open the chosen ports for UDP.
+    networking.firewall.allowedUDPPorts =
+      unique (mapAttrsToList (netName: netCfg: netCfg.listen.port) enabledNetworks);
+
+    # Create the service users and groups.
+    users.users = mkMerge (mapAttrsToList (netName: netCfg:
+      {
+        ${nameToId netName} = {
+          group = nameToId netName;
+          description = "Nebula service user for network ${netName}";
+          isSystemUser = true;
+        };
+      }) enabledNetworks);
+
+    users.groups = mkMerge (mapAttrsToList (netName: netCfg: {
+      ${nameToId netName} = {};
+    }) enabledNetworks);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/netbird.nix b/nixpkgs/nixos/modules/services/networking/netbird.nix
new file mode 100644
index 000000000000..647c0ce3e6d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/netbird.nix
@@ -0,0 +1,65 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.netbird;
+  kernel = config.boot.kernelPackages;
+  interfaceName = "wt0";
+in {
+  meta.maintainers = with maintainers; [ misuzu ];
+
+  options.services.netbird = {
+    enable = mkEnableOption (lib.mdDoc "Netbird daemon");
+    package = mkOption {
+      type = types.package;
+      default = pkgs.netbird;
+      defaultText = literalExpression "pkgs.netbird";
+      description = lib.mdDoc "The package to use for netbird";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.extraModulePackages = optional (versionOlder kernel.kernel.version "5.6") kernel.wireguard;
+
+    environment.systemPackages = [ cfg.package ];
+
+    networking.dhcpcd.denyInterfaces = [ interfaceName ];
+
+    systemd.network.networks."50-netbird" = mkIf config.networking.useNetworkd {
+      matchConfig = {
+        Name = interfaceName;
+      };
+      linkConfig = {
+        Unmanaged = true;
+        ActivationPolicy = "manual";
+      };
+    };
+
+    systemd.services.netbird = {
+      description = "A WireGuard-based mesh network that connects your devices into a single private network";
+      documentation = [ "https://netbird.io/docs/" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [
+        openresolv
+      ];
+      serviceConfig = {
+        Environment = [
+          "NB_CONFIG=/var/lib/netbird/config.json"
+          "NB_LOG_FILE=console"
+        ];
+        ExecStart = "${cfg.package}/bin/netbird service run";
+        Restart = "always";
+        RuntimeDirectory = "netbird";
+        StateDirectory = "netbird";
+        WorkingDirectory = "/var/lib/netbird";
+      };
+      unitConfig = {
+        StartLimitInterval = 5;
+        StartLimitBurst = 10;
+      };
+      stopIfChanged = false;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/netclient.nix b/nixpkgs/nixos/modules/services/networking/netclient.nix
new file mode 100644
index 000000000000..124735fd716a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/netclient.nix
@@ -0,0 +1,27 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.netclient;
+in
+{
+  meta.maintainers = with lib.maintainers; [ wexder ];
+
+  options.services.netclient = {
+    enable = lib.mkEnableOption (lib.mdDoc "Netclient Daemon");
+    package = lib.mkPackageOptionMD pkgs "netclient" { };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    systemd.services.netclient = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      description = "Netclient Daemon";
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${lib.getExe cfg.package} daemon";
+        Restart = "on-failure";
+        RestartSec = "15s";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/networkd-dispatcher.nix b/nixpkgs/nixos/modules/services/networking/networkd-dispatcher.nix
new file mode 100644
index 000000000000..c5319ca7b88a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/networkd-dispatcher.nix
@@ -0,0 +1,98 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.networkd-dispatcher;
+
+in {
+
+  options = {
+    services.networkd-dispatcher = {
+
+      enable = mkEnableOption (mdDoc ''
+        Networkd-dispatcher service for systemd-networkd connection status
+        change. See [https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
+        for usage.
+      '');
+
+      rules = mkOption {
+        default = {};
+        example = lib.literalExpression ''
+          { "restart-tor" = {
+              onState = ["routable" "off"];
+              script = '''
+                #!''${pkgs.runtimeShell}
+                if [[ $IFACE == "wlan0" && $AdministrativeState == "configured" ]]; then
+                  echo "Restarting Tor ..."
+                  systemctl restart tor
+                fi
+                exit 0
+              ''';
+            };
+          };
+        '';
+        description = lib.mdDoc ''
+          Declarative configuration of networkd-dispatcher rules. See
+          [https://gitlab.com/craftyguy/networkd-dispatcher](upstream instructions)
+          for an introduction and example scripts.
+        '';
+        type = types.attrsOf (types.submodule {
+          options = {
+            onState = mkOption {
+              type = types.listOf (types.enum [
+                "routable" "dormant" "no-carrier" "off" "carrier" "degraded"
+                "configuring" "configured"
+              ]);
+              default = null;
+              description = lib.mdDoc ''
+                List of names of the systemd-networkd operational states which
+                should trigger the script. See <https://www.freedesktop.org/software/systemd/man/networkctl.html>
+                for a description of the specific state type.
+              '';
+            };
+            script = mkOption {
+              type = types.lines;
+              description = lib.mdDoc ''
+                Shell commands executed on specified operational states.
+              '';
+            };
+          };
+        });
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd = {
+      packages = [ pkgs.networkd-dispatcher ];
+      services.networkd-dispatcher = {
+        wantedBy = [ "multi-user.target" ];
+        # Override existing ExecStart definition
+        serviceConfig.ExecStart = let
+          scriptDir = pkgs.symlinkJoin {
+            name = "networkd-dispatcher-script-dir";
+            paths = lib.mapAttrsToList (name: cfg:
+              (map(state:
+                pkgs.writeTextFile {
+                  inherit name;
+                  text = cfg.script;
+                  destination = "/${state}.d/${name}";
+                  executable = true;
+                }
+              ) cfg.onState)
+            ) cfg.rules;
+          };
+        in [
+          ""
+          "${pkgs.networkd-dispatcher}/bin/networkd-dispatcher -v --script-dir ${scriptDir} $networkd_dispatcher_args"
+        ];
+      };
+    };
+
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/networkmanager.nix b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
new file mode 100644
index 000000000000..d32712c8243d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/networkmanager.nix
@@ -0,0 +1,652 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.networkmanager;
+  ini = pkgs.formats.ini { };
+
+  delegateWireless = config.networking.wireless.enable == true && cfg.unmanaged != [ ];
+
+  enableIwd = cfg.wifi.backend == "iwd";
+
+  mkValue = v:
+    if v == true then "yes"
+    else if v == false then "no"
+    else if lib.isInt v then toString v
+    else v;
+
+  mkSection = name: attrs: ''
+    [${name}]
+    ${
+      lib.concatStringsSep "\n"
+        (lib.mapAttrsToList
+          (k: v: "${k}=${mkValue v}")
+          (lib.filterAttrs
+            (k: v: v != null)
+            attrs))
+    }
+  '';
+
+  configFile = pkgs.writeText "NetworkManager.conf" (lib.concatStringsSep "\n" [
+    (mkSection "main" {
+      plugins = "keyfile";
+      inherit (cfg) dhcp dns;
+      # If resolvconf is disabled that means that resolv.conf is managed by some other module.
+      rc-manager =
+        if config.networking.resolvconf.enable then "resolvconf"
+        else "unmanaged";
+    })
+    (mkSection "keyfile" {
+      unmanaged-devices =
+        if cfg.unmanaged == [ ] then null
+        else lib.concatStringsSep ";" cfg.unmanaged;
+    })
+    (mkSection "logging" {
+      audit = config.security.audit.enable;
+      level = cfg.logLevel;
+    })
+    (mkSection "connection" cfg.connectionConfig)
+    (mkSection "device" {
+      "wifi.scan-rand-mac-address" = cfg.wifi.scanRandMacAddress;
+      "wifi.backend" = cfg.wifi.backend;
+    })
+    cfg.extraConfig
+  ]);
+
+  /*
+    [network-manager]
+    Identity=unix-group:networkmanager
+    Action=org.freedesktop.NetworkManager.*
+    ResultAny=yes
+    ResultInactive=no
+    ResultActive=yes
+
+    [modem-manager]
+    Identity=unix-group:networkmanager
+    Action=org.freedesktop.ModemManager*
+    ResultAny=yes
+    ResultInactive=no
+    ResultActive=yes
+  */
+  polkitConf = ''
+    polkit.addRule(function(action, subject) {
+      if (
+        subject.isInGroup("networkmanager")
+        && (action.id.indexOf("org.freedesktop.NetworkManager.") == 0
+            || action.id.indexOf("org.freedesktop.ModemManager")  == 0
+        ))
+          { return polkit.Result.YES; }
+    });
+  '';
+
+  ns = xs: pkgs.writeText "nameservers" (
+    concatStrings (map (s: "nameserver ${s}\n") xs)
+  );
+
+  overrideNameserversScript = pkgs.writeScript "02overridedns" ''
+    #!/bin/sh
+    PATH=${with pkgs; makeBinPath [ gnused gnugrep coreutils ]}
+    tmp=$(mktemp)
+    sed '/nameserver /d' /etc/resolv.conf > $tmp
+    grep 'nameserver ' /etc/resolv.conf | \
+      grep -vf ${ns (cfg.appendNameservers ++ cfg.insertNameservers)} > $tmp.ns
+    cat $tmp ${ns cfg.insertNameservers} $tmp.ns ${ns cfg.appendNameservers} > /etc/resolv.conf
+    rm -f $tmp $tmp.ns
+  '';
+
+  dispatcherTypesSubdirMap = {
+    basic = "";
+    pre-up = "pre-up.d/";
+    pre-down = "pre-down.d/";
+  };
+
+  macAddressOpt = mkOption {
+    type = types.either types.str (types.enum [ "permanent" "preserve" "random" "stable" ]);
+    default = "preserve";
+    example = "00:11:22:33:44:55";
+    description = lib.mdDoc ''
+      Set the MAC address of the interface.
+
+      - `"XX:XX:XX:XX:XX:XX"`: MAC address of the interface
+      - `"permanent"`: Use the permanent MAC address of the device
+      - `"preserve"`: Don’t change the MAC address of the device upon activation
+      - `"random"`: Generate a randomized value upon each connect
+      - `"stable"`: Generate a stable, hashed MAC address
+    '';
+  };
+
+  packages = [
+    pkgs.modemmanager
+    pkgs.networkmanager
+  ]
+  ++ cfg.plugins
+  ++ lib.optionals (!delegateWireless && !enableIwd) [
+    pkgs.wpa_supplicant
+  ];
+
+in
+{
+
+  meta = {
+    maintainers = teams.freedesktop.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    networking.networkmanager = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to use NetworkManager to obtain an IP address and other
+          configuration for all network interfaces that are not manually
+          configured. If enabled, a group `networkmanager`
+          will be created. Add all users that should have permission
+          to change network settings to this group.
+        '';
+      };
+
+      connectionConfig = mkOption {
+        type = with types; attrsOf (nullOr (oneOf [
+          bool
+          int
+          str
+        ]));
+        default = { };
+        description = lib.mdDoc ''
+          Configuration for the [connection] section of NetworkManager.conf.
+          Refer to
+          [
+            https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html#id-1.2.3.11
+          ](https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html)
+          or
+          {manpage}`NetworkManager.conf(5)`
+          for more information.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration appended to the generated NetworkManager.conf.
+          Refer to
+          [
+            https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html
+          ](https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html)
+          or
+          {manpage}`NetworkManager.conf(5)`
+          for more information.
+        '';
+      };
+
+      unmanaged = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of interfaces that will not be managed by NetworkManager.
+          Interface name can be specified here, but if you need more fidelity,
+          refer to
+          [
+            https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html#device-spec
+          ](https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html#device-spec)
+          or the "Device List Format" Appendix of
+          {manpage}`NetworkManager.conf(5)`.
+        '';
+      };
+
+      plugins = mkOption {
+        type =
+          let
+            networkManagerPluginPackage = types.package // {
+              description = "NetworkManager plug-in";
+              check =
+                p:
+                lib.assertMsg
+                  (types.package.check p
+                    && p ? networkManagerPlugin
+                    && lib.isString p.networkManagerPlugin)
+                  ''
+                    Package ‘${p.name}’, is not a NetworkManager plug-in.
+                    Those need to have a ‘networkManagerPlugin’ attribute.
+                  '';
+            };
+          in
+          types.listOf networkManagerPluginPackage;
+        default = [ ];
+        description = lib.mdDoc ''
+          List of NetworkManager plug-ins to enable.
+          Some plug-ins are enabled by the NetworkManager module by default.
+        '';
+      };
+
+      dhcp = mkOption {
+        type = types.enum [ "dhcpcd" "internal" ];
+        default = "internal";
+        description = lib.mdDoc ''
+          Which program (or internal library) should be used for DHCP.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "OFF" "ERR" "WARN" "INFO" "DEBUG" "TRACE" ];
+        default = "WARN";
+        description = lib.mdDoc ''
+          Set the default logging verbosity level.
+        '';
+      };
+
+      appendNameservers = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          A list of name servers that should be appended
+          to the ones configured in NetworkManager or received by DHCP.
+        '';
+      };
+
+      insertNameservers = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          A list of name servers that should be inserted before
+          the ones configured in NetworkManager or received by DHCP.
+        '';
+      };
+
+      ethernet.macAddress = macAddressOpt;
+
+      wifi = {
+        macAddress = macAddressOpt;
+
+        backend = mkOption {
+          type = types.enum [ "wpa_supplicant" "iwd" ];
+          default = "wpa_supplicant";
+          description = lib.mdDoc ''
+            Specify the Wi-Fi backend used for the device.
+            Currently supported are {option}`wpa_supplicant` or {option}`iwd` (experimental).
+          '';
+        };
+
+        powersave = mkOption {
+          type = types.nullOr types.bool;
+          default = null;
+          description = lib.mdDoc ''
+            Whether to enable Wi-Fi power saving.
+          '';
+        };
+
+        scanRandMacAddress = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable MAC address randomization of a Wi-Fi device
+            during scanning.
+          '';
+        };
+      };
+
+      dns = mkOption {
+        type = types.enum [ "default" "dnsmasq" "unbound" "systemd-resolved" "none" ];
+        default = "default";
+        description = lib.mdDoc ''
+          Set the DNS (`resolv.conf`) processing mode.
+
+          A description of these modes can be found in the main section of
+          [
+            https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html
+          ](https://developer.gnome.org/NetworkManager/stable/NetworkManager.conf.html)
+          or in
+          {manpage}`NetworkManager.conf(5)`.
+        '';
+      };
+
+      dispatcherScripts = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            source = mkOption {
+              type = types.path;
+              description = lib.mdDoc ''
+                Path to the hook script.
+              '';
+            };
+
+            type = mkOption {
+              type = types.enum (attrNames dispatcherTypesSubdirMap);
+              default = "basic";
+              description = lib.mdDoc ''
+                Dispatcher hook type. Look up the hooks described at
+                [https://developer.gnome.org/NetworkManager/stable/NetworkManager.html](https://developer.gnome.org/NetworkManager/stable/NetworkManager.html)
+                and choose the type depending on the output folder.
+                You should then filter the event type (e.g., "up"/"down") from within your script.
+              '';
+            };
+          };
+        });
+        default = [ ];
+        example = literalExpression ''
+          [ {
+            source = pkgs.writeText "upHook" '''
+              if [ "$2" != "up" ]; then
+                logger "exit: event $2 != up"
+                exit
+              fi
+
+              # coreutils and iproute are in PATH too
+              logger "Device $DEVICE_IFACE coming up"
+            ''';
+            type = "basic";
+          } ]
+        '';
+        description = lib.mdDoc ''
+          A list of scripts which will be executed in response to network events.
+        '';
+      };
+
+      enableStrongSwan = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the StrongSwan plugin.
+
+          If you enable this option the
+          `networkmanager_strongswan` plugin will be added to
+          the {option}`networking.networkmanager.plugins` option
+          so you don't need to do that yourself.
+        '';
+      };
+
+      fccUnlockScripts = mkOption {
+        type = types.listOf (types.submodule {
+          options = {
+            id = mkOption {
+              type = types.str;
+              description = lib.mdDoc "vid:pid of either the PCI or USB vendor and product ID";
+            };
+            path = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path to the unlock script";
+            };
+          };
+        });
+        default = [ ];
+        example = literalExpression ''[{ name = "03f0:4e1d"; script = "''${pkgs.modemmanager}/share/ModemManager/fcc-unlock.available.d/03f0:4e1d"; }]'';
+        description = lib.mdDoc ''
+          List of FCC unlock scripts to enable on the system, behaving as described in
+          https://modemmanager.org/docs/modemmanager/fcc-unlock/#integration-with-third-party-fcc-unlock-tools.
+        '';
+      };
+      ensureProfiles = {
+        profiles = with lib.types; mkOption {
+          type = attrsOf (submodule {
+            freeformType = ini.type;
+
+            options = {
+              connection = {
+                id = lib.mkOption {
+                  type = str;
+                  description = "This is the name that will be displayed by NetworkManager and GUIs.";
+                };
+                type = lib.mkOption {
+                  type = str;
+                  description = "The connection type defines the connection kind, like vpn, wireguard, gsm, wifi and more.";
+                  example = "vpn";
+                };
+              };
+            };
+          });
+          apply = (lib.filterAttrsRecursive (n: v: v != { }));
+          default = { };
+          example = {
+            home-wifi = {
+              connection = {
+                id = "home-wifi";
+                type = "wifi";
+                permissions = "";
+              };
+              wifi = {
+                mac-address-blacklist = "";
+                mode = "infrastructure";
+                ssid = "Home Wi-Fi";
+              };
+              wifi-security = {
+                auth-alg = "open";
+                key-mgmt = "wpa-psk";
+                psk = "$HOME_WIFI_PASSWORD";
+              };
+              ipv4 = {
+                dns-search = "";
+                method = "auto";
+              };
+              ipv6 = {
+                addr-gen-mode = "stable-privacy";
+                dns-search = "";
+                method = "auto";
+              };
+            };
+          };
+          description = lib.mdDoc ''
+            Declaratively define NetworkManager profiles. You can find information about the generated file format [here](https://networkmanager.dev/docs/api/latest/nm-settings-keyfile.html) and [here](https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/8/html/configuring_and_managing_networking/assembly_networkmanager-connection-profiles-in-keyfile-format_configuring-and-managing-networking).
+            You current profiles which are most likely stored in `/etc/NetworkManager/system-connections` and there is [a tool](https://github.com/janik-haag/nm2nix) to convert them to the needed nix code.
+            If you add a new ad-hoc connection via a GUI or nmtui or anything similar it should just work together with the declarative ones.
+            And if you edit a declarative profile NetworkManager will move it to the persistent storage and treat it like a ad-hoc one,
+            but there will be two profiles as soon as the systemd unit from this option runs again which can be confusing since NetworkManager tools will start displaying two profiles with the same name and probably a bit different settings depending on what you edited.
+            A profile won't be deleted even if it's removed from the config until the system reboots because that's when NetworkManager clears it's temp directory.
+          '';
+        };
+        environmentFiles = mkOption {
+          default = [];
+          type = types.listOf types.path;
+          example = [ "/run/secrets/network-manager.env" ];
+          description = lib.mdDoc ''
+            Files to load as environment file. Environment variables from this file
+            will be substituted into the static configuration file using [envsubst](https://github.com/a8m/envsubst).
+          '';
+        };
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule
+      [ "networking" "networkmanager" "packages" ]
+      [ "networking" "networkmanager" "plugins" ])
+    (mkRenamedOptionModule [ "networking" "networkmanager" "useDnsmasq" ] [ "networking" "networkmanager" "dns" ])
+    (mkRemovedOptionModule [ "networking" "networkmanager" "enableFccUnlock" ] ''
+      This option was removed, because using bundled FCC unlock scripts is risky,
+      might conflict with vendor-provided unlock scripts, and should
+      be a conscious decision on a per-device basis.
+      Instead it's recommended to use the
+      `networking.networkmanager.fccUnlockScripts` option.
+    '')
+    (mkRemovedOptionModule [ "networking" "networkmanager" "dynamicHosts" ] ''
+      This option was removed because allowing (multiple) regular users to
+      override host entries affecting the whole system opens up a huge attack
+      vector. There seem to be very rare cases where this might be useful.
+      Consider setting system-wide host entries using networking.hosts, provide
+      them via the DNS server in your network, or use environment.etc
+      to add a file into /etc/NetworkManager/dnsmasq.d reconfiguring hostsdir.
+    '')
+    (mkRemovedOptionModule [ "networking" "networkmanager" "firewallBackend" ] ''
+      This option was removed as NixOS is now using iptables-nftables-compat even when using iptables, therefore Networkmanager now uses the nftables backend unconditionally.
+    '')
+  ];
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = config.networking.wireless.enable == true -> cfg.unmanaged != [ ];
+        message = ''
+          You can not use networking.networkmanager with networking.wireless.
+          Except if you mark some interfaces as <literal>unmanaged</literal> by NetworkManager.
+        '';
+      }
+    ];
+
+    hardware.wirelessRegulatoryDatabase = true;
+
+    environment.etc = {
+      "NetworkManager/NetworkManager.conf".source = configFile;
+    }
+    // builtins.listToAttrs (map
+      (pkg: nameValuePair "NetworkManager/${pkg.networkManagerPlugin}" {
+        source = "${pkg}/lib/NetworkManager/${pkg.networkManagerPlugin}";
+      })
+      cfg.plugins)
+    // builtins.listToAttrs (map
+      (e: nameValuePair "ModemManager/fcc-unlock.d/${e.id}" {
+        source = e.path;
+      })
+      cfg.fccUnlockScripts)
+    // optionalAttrs (cfg.appendNameservers != [ ] || cfg.insertNameservers != [ ])
+      {
+        "NetworkManager/dispatcher.d/02overridedns".source = overrideNameserversScript;
+      }
+    // listToAttrs (lib.imap1
+      (i: s:
+        {
+          name = "NetworkManager/dispatcher.d/${dispatcherTypesSubdirMap.${s.type}}03userscript${lib.fixedWidthNumber 4 i}";
+          value = { mode = "0544"; inherit (s) source; };
+        })
+      cfg.dispatcherScripts);
+
+    environment.systemPackages = packages;
+
+    users.groups = {
+      networkmanager.gid = config.ids.gids.networkmanager;
+      nm-openvpn.gid = config.ids.gids.nm-openvpn;
+    };
+
+    users.users = {
+      nm-openvpn = {
+        uid = config.ids.uids.nm-openvpn;
+        group = "nm-openvpn";
+        extraGroups = [ "networkmanager" ];
+      };
+      nm-iodine = {
+        isSystemUser = true;
+        group = "networkmanager";
+      };
+    };
+
+    systemd.packages = packages;
+
+    systemd.tmpfiles.rules = [
+      "d /etc/NetworkManager/system-connections 0700 root root -"
+      "d /etc/ipsec.d 0700 root root -"
+      "d /var/lib/NetworkManager-fortisslvpn 0700 root root -"
+
+      "d /var/lib/misc 0755 root root -" # for dnsmasq.leases
+      # ppp isn't able to mkdir that directory at runtime
+      "d /run/pppd/lock 0700 root root -"
+    ];
+
+    systemd.services.NetworkManager = {
+      wantedBy = [ "network.target" ];
+      restartTriggers = [ configFile ];
+
+      aliases = [ "dbus-org.freedesktop.NetworkManager.service" ];
+
+      serviceConfig = {
+        StateDirectory = "NetworkManager";
+        StateDirectoryMode = 755; # not sure if this really needs to be 755
+      };
+    };
+
+    systemd.services.NetworkManager-wait-online = {
+      wantedBy = [ "network-online.target" ];
+    };
+
+    systemd.services.ModemManager.aliases = [ "dbus-org.freedesktop.ModemManager1.service" ];
+
+    systemd.services.NetworkManager-dispatcher = {
+      wantedBy = [ "network.target" ];
+      restartTriggers = [ configFile overrideNameserversScript ];
+
+      # useful binaries for user-specified hooks
+      path = [ pkgs.iproute2 pkgs.util-linux pkgs.coreutils ];
+      aliases = [ "dbus-org.freedesktop.nm-dispatcher.service" ];
+    };
+
+    systemd.services.NetworkManager-ensure-profiles = mkIf (cfg.ensureProfiles.profiles != { }) {
+      description = "Ensure that NetworkManager declarative profiles are created";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "network-online.target" ];
+      script = let
+        path = id: "/run/NetworkManager/system-connections/${id}.nmconnection";
+      in ''
+        mkdir -p /run/NetworkManager/system-connections
+      '' + lib.concatMapStringsSep "\n"
+        (profile: ''
+          ${pkgs.envsubst}/bin/envsubst -i ${ini.generate (lib.escapeShellArg profile.n) profile.v} > ${path (lib.escapeShellArg profile.n)}
+        '') (lib.mapAttrsToList (n: v: { inherit n v; }) cfg.ensureProfiles.profiles)
+      + ''
+        if systemctl is-active --quiet NetworkManager; then
+          ${pkgs.networkmanager}/bin/nmcli connection reload
+        fi
+      '';
+      serviceConfig = {
+        EnvironmentFile = cfg.ensureProfiles.environmentFiles;
+        UMask = "0177";
+        Type = "oneshot";
+      };
+    };
+
+    # Turn off NixOS' network management when networking is managed entirely by NetworkManager
+    networking = mkMerge [
+      (mkIf (!delegateWireless) {
+        useDHCP = false;
+      })
+
+      {
+        networkmanager.plugins = with pkgs; [
+          networkmanager-fortisslvpn
+          networkmanager-iodine
+          networkmanager-l2tp
+          networkmanager-openconnect
+          networkmanager-openvpn
+          networkmanager-vpnc
+          networkmanager-sstp
+        ];
+      }
+
+      (mkIf cfg.enableStrongSwan {
+        networkmanager.plugins = [ pkgs.networkmanager_strongswan ];
+      })
+
+      (mkIf enableIwd {
+        wireless.iwd.enable = true;
+      })
+
+      {
+        networkmanager.connectionConfig = {
+          "ethernet.cloned-mac-address" = cfg.ethernet.macAddress;
+          "wifi.cloned-mac-address" = cfg.wifi.macAddress;
+          "wifi.powersave" =
+            if cfg.wifi.powersave == null then null
+            else if cfg.wifi.powersave then 3
+            else 2;
+        };
+      }
+    ];
+
+    boot.kernelModules = [ "ctr" ];
+
+    security.polkit.enable = true;
+    security.polkit.extraConfig = polkitConf;
+
+    services.dbus.packages = packages
+      ++ optional cfg.enableStrongSwan pkgs.strongswanNM
+      ++ optional (cfg.dns == "dnsmasq") pkgs.dnsmasq;
+
+    services.udev.packages = packages;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nextdns.nix b/nixpkgs/nixos/modules/services/networking/nextdns.nix
new file mode 100644
index 000000000000..697fa605049e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nextdns.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nextdns;
+in {
+  options = {
+    services.nextdns = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the NextDNS DNS/53 to DoH Proxy service.";
+      };
+      arguments = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-config" "10.0.3.0/24=abcdef" ];
+        description = lib.mdDoc "Additional arguments to be passed to nextdns run.";
+      };
+    };
+  };
+
+  # https://github.com/nextdns/nextdns/blob/628ea509eaaccd27adb66337db03e5b56f6f38a8/host/service/systemd/service.go
+  config = mkIf cfg.enable {
+    systemd.services.nextdns = {
+      description = "NextDNS DNS/53 to DoH Proxy";
+      environment = {
+        SERVICE_RUN_MODE = "1";
+      };
+      startLimitIntervalSec = 5;
+      startLimitBurst = 10;
+      serviceConfig = {
+        ExecStart = "${pkgs.nextdns}/bin/nextdns run ${escapeShellArgs config.services.nextdns.arguments}";
+        RestartSec = 120;
+        LimitMEMLOCK = "infinity";
+      };
+      after = [ "network.target" ];
+      before = [ "nss-lookup.target" ];
+      wants = [ "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nftables.nix b/nixpkgs/nixos/modules/services/networking/nftables.nix
new file mode 100644
index 000000000000..424d005dc0b5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nftables.nix
@@ -0,0 +1,320 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.networking.nftables;
+
+  tableSubmodule = { name, ... }: {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable this table.";
+      };
+
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Table name.";
+      };
+
+      content = mkOption {
+        type = types.lines;
+        description = lib.mdDoc "The table content.";
+      };
+
+      family = mkOption {
+        description = lib.mdDoc "Table family.";
+        type = types.enum [ "ip" "ip6" "inet" "arp" "bridge" "netdev" ];
+      };
+    };
+
+    config = {
+      name = mkDefault name;
+    };
+  };
+in
+{
+  ###### interface
+
+  options = {
+    networking.nftables.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description =
+        lib.mdDoc ''
+          Whether to enable nftables and use nftables based firewall if enabled.
+          nftables is a Linux-based packet filtering framework intended to
+          replace frameworks like iptables.
+
+          Note that if you have Docker enabled you will not be able to use
+          nftables without intervention. Docker uses iptables internally to
+          setup NAT for containers. This module disables the ip_tables kernel
+          module, however Docker automatically loads the module. Please see
+          <https://github.com/NixOS/nixpkgs/issues/24318#issuecomment-289216273>
+          for more information.
+
+          There are other programs that use iptables internally too, such as
+          libvirt. For information on how the two firewalls interact, see
+          <https://wiki.nftables.org/wiki-nftables/index.php/Troubleshooting#Question_4._How_do_nftables_and_iptables_interact_when_used_on_the_same_system.3F>.
+        '';
+    };
+
+    networking.nftables.checkRuleset = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Run `nft check` on the ruleset to spot syntax errors during build.
+        Because this is executed in a sandbox, the check might fail if it requires
+        access to any environmental factors or paths outside the Nix store.
+        To circumvent this, the ruleset file can be edited using the preCheckRuleset
+        option to work in the sandbox environment.
+      '';
+    };
+
+    networking.nftables.checkRulesetRedirects = mkOption {
+      type = types.addCheck (types.attrsOf types.path) (attrs: all types.path.check (attrNames attrs));
+      default = {
+        "/etc/hosts" = config.environment.etc.hosts.source;
+        "/etc/protocols" = config.environment.etc.protocols.source;
+        "/etc/services" = config.environment.etc.services.source;
+      };
+      defaultText = literalExpression ''
+        {
+          "/etc/hosts" = config.environment.etc.hosts.source;
+          "/etc/protocols" = config.environment.etc.protocols.source;
+          "/etc/services" = config.environment.etc.services.source;
+        }
+      '';
+      description = mdDoc ''
+        Set of paths that should be intercepted and rewritten while checking the ruleset
+        using `pkgs.buildPackages.libredirect`.
+      '';
+    };
+
+    networking.nftables.preCheckRuleset = mkOption {
+      type = types.lines;
+      default = "";
+      example = lib.literalExpression ''
+        sed 's/skgid meadow/skgid nogroup/g' -i ruleset.conf
+      '';
+      description = lib.mdDoc ''
+        This script gets run before the ruleset is checked. It can be used to
+        create additional files needed for the ruleset check to work, or modify
+        the ruleset for cases the build environment cannot cover.
+      '';
+    };
+
+    networking.nftables.flushRuleset = mkEnableOption (lib.mdDoc "flushing the entire ruleset on each reload");
+
+    networking.nftables.extraDeletions = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        # this makes deleting a non-existing table a no-op instead of an error
+        table inet some-table;
+
+        delete table inet some-table;
+      '';
+      description =
+        lib.mdDoc ''
+          Extra deletion commands to be run on every firewall start, reload
+          and after stopping the firewall.
+        '';
+    };
+
+    networking.nftables.ruleset = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        # Check out https://wiki.nftables.org/ for better documentation.
+        # Table for both IPv4 and IPv6.
+        table inet filter {
+          # Block all incoming connections traffic except SSH and "ping".
+          chain input {
+            type filter hook input priority 0;
+
+            # accept any localhost traffic
+            iifname lo accept
+
+            # accept traffic originated from us
+            ct state {established, related} accept
+
+            # ICMP
+            # routers may also want: mld-listener-query, nd-router-solicit
+            ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } accept
+            ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
+
+            # allow "ping"
+            ip6 nexthdr icmpv6 icmpv6 type echo-request accept
+            ip protocol icmp icmp type echo-request accept
+
+            # accept SSH connections (required for a server)
+            tcp dport 22 accept
+
+            # count and drop any other traffic
+            counter drop
+          }
+
+          # Allow all outgoing connections.
+          chain output {
+            type filter hook output priority 0;
+            accept
+          }
+
+          chain forward {
+            type filter hook forward priority 0;
+            accept
+          }
+        }
+      '';
+      description =
+        lib.mdDoc ''
+          The ruleset to be used with nftables.  Should be in a format that
+          can be loaded using "/bin/nft -f".  The ruleset is updated atomically.
+          Note that if the tables should be cleaned first, either:
+          - networking.nftables.flushRuleset = true; needs to be set (flushes all tables)
+          - networking.nftables.extraDeletions needs to be set
+          - or networking.nftables.tables can be used, which will clean up the table automatically
+        '';
+    };
+    networking.nftables.rulesetFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description =
+        lib.mdDoc ''
+          The ruleset file to be used with nftables.  Should be in a format that
+          can be loaded using "nft -f".  The ruleset is updated atomically.
+        '';
+    };
+    networking.nftables.tables = mkOption {
+      type = types.attrsOf (types.submodule tableSubmodule);
+
+      default = {};
+
+      description = lib.mdDoc ''
+        Tables to be added to ruleset.
+        Tables will be added together with delete statements to clean up the table before every update.
+      '';
+
+      example = {
+        filter = {
+          family = "inet";
+          content = ''
+            # Check out https://wiki.nftables.org/ for better documentation.
+            # Table for both IPv4 and IPv6.
+            # Block all incoming connections traffic except SSH and "ping".
+            chain input {
+              type filter hook input priority 0;
+
+              # accept any localhost traffic
+              iifname lo accept
+
+              # accept traffic originated from us
+              ct state {established, related} accept
+
+              # ICMP
+              # routers may also want: mld-listener-query, nd-router-solicit
+              ip6 nexthdr icmpv6 icmpv6 type { destination-unreachable, packet-too-big, time-exceeded, parameter-problem, nd-router-advert, nd-neighbor-solicit, nd-neighbor-advert } accept
+              ip protocol icmp icmp type { destination-unreachable, router-advertisement, time-exceeded, parameter-problem } accept
+
+              # allow "ping"
+              ip6 nexthdr icmpv6 icmpv6 type echo-request accept
+              ip protocol icmp icmp type echo-request accept
+
+              # accept SSH connections (required for a server)
+              tcp dport 22 accept
+
+              # count and drop any other traffic
+              counter drop
+            }
+
+            # Allow all outgoing connections.
+            chain output {
+              type filter hook output priority 0;
+              accept
+            }
+
+            chain forward {
+              type filter hook forward priority 0;
+              accept
+            }
+          '';
+        };
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    boot.blacklistedKernelModules = [ "ip_tables" ];
+    environment.systemPackages = [ pkgs.nftables ];
+    # versionOlder for backportability, remove afterwards
+    networking.nftables.flushRuleset = mkDefault (versionOlder config.system.stateVersion "23.11" || (cfg.rulesetFile != null || cfg.ruleset != ""));
+    systemd.services.nftables = {
+      description = "nftables firewall";
+      before = [ "network-pre.target" ];
+      wants = [ "network-pre.target" ];
+      wantedBy = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      serviceConfig = let
+        enabledTables = filterAttrs (_: table: table.enable) cfg.tables;
+        deletionsScript = pkgs.writeScript "nftables-deletions" ''
+          #! ${pkgs.nftables}/bin/nft -f
+          ${if cfg.flushRuleset then "flush ruleset"
+            else concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name}
+              delete table ${table.family} ${table.name}
+            '') enabledTables)}
+          ${cfg.extraDeletions}
+        '';
+        deletionsScriptVar = "/var/lib/nftables/deletions.nft";
+        ensureDeletions = pkgs.writeShellScript "nftables-ensure-deletions" ''
+          touch ${deletionsScriptVar}
+          chmod +x ${deletionsScriptVar}
+        '';
+        saveDeletionsScript = pkgs.writeShellScript "nftables-save-deletions" ''
+          cp ${deletionsScript} ${deletionsScriptVar}
+        '';
+        cleanupDeletionsScript = pkgs.writeShellScript "nftables-cleanup-deletions" ''
+          rm ${deletionsScriptVar}
+        '';
+        rulesScript = pkgs.writeTextFile {
+          name =  "nftables-rules";
+          executable = true;
+          text = ''
+            #! ${pkgs.nftables}/bin/nft -f
+            # previous deletions, if any
+            include "${deletionsScriptVar}"
+            # current deletions
+            include "${deletionsScript}"
+            ${concatStringsSep "\n" (mapAttrsToList (_: table: ''
+              table ${table.family} ${table.name} {
+                ${table.content}
+              }
+            '') enabledTables)}
+            ${cfg.ruleset}
+            ${lib.optionalString (cfg.rulesetFile != null) ''
+              include "${cfg.rulesetFile}"
+            ''}
+          '';
+          checkPhase = lib.optionalString cfg.checkRuleset ''
+            cp $out ruleset.conf
+            sed 's|include "${deletionsScriptVar}"||' -i ruleset.conf
+            ${cfg.preCheckRuleset}
+            export NIX_REDIRECTS=${escapeShellArg (concatStringsSep ":" (mapAttrsToList (n: v: "${n}=${v}") cfg.checkRulesetRedirects))}
+            LD_PRELOAD="${pkgs.buildPackages.libredirect}/lib/libredirect.so ${pkgs.buildPackages.lklWithFirewall.lib}/lib/liblkl-hijack.so" \
+              ${pkgs.buildPackages.nftables}/bin/nft --check --file ruleset.conf
+          '';
+        };
+      in {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = [ ensureDeletions rulesScript ];
+        ExecStartPost = saveDeletionsScript;
+        ExecReload = [ ensureDeletions rulesScript saveDeletionsScript ];
+        ExecStop = [ deletionsScriptVar cleanupDeletionsScript ];
+        StateDirectory = "nftables";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/backend-params-submodule.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/backend-params-submodule.nix
new file mode 100644
index 000000000000..510dc02b5c9f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/backend-params-submodule.nix
@@ -0,0 +1,131 @@
+{ lib, ...}:
+{ options = {
+    proto = lib.mkOption {
+      type        = lib.types.enum [ "h2" "http/1.1" ];
+      default     = "http/1.1";
+      description = lib.mdDoc ''
+        This option configures the protocol the backend server expects
+        to use.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    tls = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        This option determines whether nghttpx will negotiate its
+        connection with a backend server using TLS or not. The burden
+        is on the backend server to provide the TLS certificate!
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    sni = lib.mkOption {
+      type        = lib.types.nullOr lib.types.str;
+      default     = null;
+      description = lib.mdDoc ''
+        Override the TLS SNI field value. This value (in nghttpx)
+        defaults to the host value of the backend configuration.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    fall = lib.mkOption {
+      type        = lib.types.int;
+      default     = 0;
+      description = lib.mdDoc ''
+        If nghttpx cannot connect to the backend N times in a row, the
+        backend is assumed to be offline and is excluded from load
+        balancing. If N is 0 the backend is never excluded from load
+        balancing.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    rise = lib.mkOption {
+      type        = lib.types.int;
+      default     = 0;
+      description = lib.mdDoc ''
+        If the backend is excluded from load balancing, nghttpx will
+        periodically attempt to make a connection to the backend. If
+        the connection is successful N times in a row the backend is
+        re-included in load balancing. If N is 0 a backend is never
+        reconsidered for load balancing once it falls.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    affinity = lib.mkOption {
+      type        = lib.types.enum [ "ip" "none" ];
+      default     = "none";
+      description = lib.mdDoc ''
+        If "ip" is given, client IP based session affinity is
+        enabled. If "none" is given, session affinity is disabled.
+
+        Session affinity is enabled (by nghttpx) per-backend
+        pattern. If at least one backend has a non-"none" affinity,
+        then session affinity is enabled for all backend servers
+        sharing the same pattern.
+
+        It is advised to set affinity on all backends explicitly if
+        session affinity is desired. The session affinity may break if
+        one of the backend gets unreachable, or backend settings are
+        reloaded or replaced by API.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    dns = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Name resolution of a backends host name is done at start up,
+        or configuration reload. If "dns" is true, name resolution
+        takes place dynamically.
+
+        This is useful if a backends address changes frequently. If
+        "dns" is true, name resolution of a backend's host name at
+        start up, or configuration reload is skipped.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+
+    redirect-if-not-tls = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        If true, a backend match requires the frontend connection be
+        TLS encrypted. If it is not, nghttpx responds to the request
+        with a 308 status code and https URI the client should use
+        instead in the Location header.
+
+        The port number in the redirect URI is 443 by default and can
+        be changed using 'services.nghttpx.redirect-https-port'
+        option.
+
+        If at least one backend has "redirect-if-not-tls" set to true,
+        this feature is enabled for all backend servers with the same
+        pattern. It is advised to set "redirect-if-no-tls" parameter
+        to all backends explicitly if this feature is desired.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more detail.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/backend-submodule.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/backend-submodule.nix
new file mode 100644
index 000000000000..af99b21c9ab3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/backend-submodule.nix
@@ -0,0 +1,50 @@
+{ lib, ... }:
+{ options = {
+    server = lib.mkOption {
+      type =
+        lib.types.either
+          (lib.types.submodule (import ./server-options.nix))
+          (lib.types.path);
+      example = {
+        host = "127.0.0.1";
+        port = 8888;
+      };
+      default = {
+        host = "127.0.0.1";
+        port = 80;
+      };
+      description = lib.mdDoc ''
+        Backend server location specified as either a host:port pair
+        or a unix domain docket.
+      '';
+    };
+
+    patterns = lib.mkOption {
+      type    = lib.types.listOf lib.types.str;
+      example = [
+        "*.host.net/v1/"
+        "host.org/v2/mypath"
+        "/somepath"
+      ];
+      default     = [];
+      description = lib.mdDoc ''
+        List of nghttpx backend patterns.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-b
+        for more information on the pattern syntax and nghttpxs behavior.
+      '';
+    };
+
+    params = lib.mkOption {
+      type    = lib.types.nullOr (lib.types.submodule (import ./backend-params-submodule.nix));
+      example = {
+        proto = "h2";
+        tls   = true;
+      };
+      default     = null;
+      description = lib.mdDoc ''
+        Parameters to configure a backend.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/default.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/default.nix
new file mode 100644
index 000000000000..b8a0a24e3aad
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/default.nix
@@ -0,0 +1,118 @@
+{config, pkgs, lib, ...}:
+let
+  cfg = config.services.nghttpx;
+
+  # renderHost :: Either ServerOptions Path -> String
+  renderHost = server:
+    if builtins.isString server
+    then "unix://${server}"
+    else "${server.host},${builtins.toString server.port}";
+
+  # Filter out submodule parameters whose value is null or false or is
+  # the key _module.
+  #
+  # filterParams :: ParamsSubmodule -> ParamsSubmodule
+  filterParams = p:
+    lib.filterAttrs
+      (n: v: ("_module" != n) && (null != v) && (false != v))
+      (lib.optionalAttrs (null != p) p);
+
+  # renderBackend :: BackendSubmodule -> String
+  renderBackend = backend:
+    let
+      host = renderHost backend.server;
+      patterns = lib.concatStringsSep ":" backend.patterns;
+
+      # Render a set of backend parameters, this is somewhat
+      # complicated because nghttpx backend patterns can be entirely
+      # omitted and the params may be given as a mixed collection of
+      # 'key=val' pairs or atoms (e.g: 'proto=h2;tls')
+      params =
+        lib.mapAttrsToList
+          (n: v:
+            if builtins.isBool v
+            then n
+            else if builtins.isString v
+            then "${n}=${v}"
+            else "${n}=${builtins.toString v}")
+          (filterParams backend.params);
+
+      # NB: params are delimited by a ";" which is the same delimiter
+      # to separate the host;[pattern];[params] sections of a backend
+      sections =
+        builtins.filter (e: "" != e) ([
+          host
+          patterns
+        ]++params);
+      formattedSections = lib.concatStringsSep ";" sections;
+    in
+      "backend=${formattedSections}";
+
+  # renderFrontend :: FrontendSubmodule -> String
+  renderFrontend = frontend:
+    let
+      host   = renderHost frontend.server;
+      params0 =
+        lib.mapAttrsToList
+          (n: v: if builtins.isBool v then n else v)
+          (filterParams frontend.params);
+
+      # NB: nghttpx doesn't accept "tls", you must omit "no-tls" for
+      # the default behavior of turning on TLS.
+      params1 = lib.remove "tls" params0;
+
+      sections          = [ host] ++ params1;
+      formattedSections = lib.concatStringsSep ";" sections;
+    in
+      "frontend=${formattedSections}";
+
+  configurationFile = pkgs.writeText "nghttpx.conf" ''
+    ${lib.optionalString (null != cfg.tls) ("private-key-file="+cfg.tls.key)}
+    ${lib.optionalString (null != cfg.tls) ("certificate-file="+cfg.tls.crt)}
+
+    user=nghttpx
+
+    ${lib.concatMapStringsSep "\n" renderFrontend cfg.frontends}
+    ${lib.concatMapStringsSep "\n" renderBackend  cfg.backends}
+
+    backlog=${builtins.toString cfg.backlog}
+    backend-address-family=${cfg.backend-address-family}
+
+    workers=${builtins.toString cfg.workers}
+    rlimit-nofile=${builtins.toString cfg.rlimit-nofile}
+
+    ${lib.optionalString cfg.single-thread "single-thread=yes"}
+    ${lib.optionalString cfg.single-process "single-process=yes"}
+
+    ${cfg.extraConfig}
+  '';
+in
+{ imports = [
+    ./nghttpx-options.nix
+  ];
+
+  config = lib.mkIf cfg.enable {
+
+    users.groups.nghttpx = { };
+    users.users.nghttpx = {
+      group = config.users.groups.nghttpx.name;
+      isSystemUser = true;
+    };
+
+
+    systemd.services = {
+      nghttpx = {
+        wantedBy = [ "multi-user.target" ];
+        after    = [ "network.target" ];
+        script   = ''
+          ${pkgs.nghttp2}/bin/nghttpx --conf=${configurationFile}
+        '';
+
+        serviceConfig = {
+          Restart    = "on-failure";
+          RestartSec = 60;
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-params-submodule.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-params-submodule.nix
new file mode 100644
index 000000000000..66c6d7efa6a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-params-submodule.nix
@@ -0,0 +1,64 @@
+{ lib, ...}:
+{ options = {
+    tls = lib.mkOption {
+      type        = lib.types.enum [ "tls" "no-tls" ];
+      default     = "tls";
+      description = lib.mdDoc ''
+        Enable or disable TLS. If true (enabled) the key and
+        certificate must be configured for nghttpx.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-f
+        for more detail.
+      '';
+    };
+
+    sni-fwd = lib.mkOption {
+      type    = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        When performing a match to select a backend server, SNI host
+        name received from the client is used instead of the request
+        host. See --backend option about the pattern match.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-f
+        for more detail.
+      '';
+    };
+
+    api = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Enable API access for this frontend. This enables you to
+        dynamically modify nghttpx at run-time therefore this feature
+        is disabled by default and should be turned on with care.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-f
+        for more detail.
+      '';
+    };
+
+    healthmon = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Make this frontend a health monitor endpoint. Any request
+        received on this frontend is responded to with a 200 OK.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-f
+        for more detail.
+      '';
+    };
+
+    proxyproto = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Accept PROXY protocol version 1 on frontend connection.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-f
+        for more detail.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-submodule.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-submodule.nix
new file mode 100644
index 000000000000..3175df20eec5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/frontend-submodule.nix
@@ -0,0 +1,36 @@
+{ lib, ... }:
+{ options = {
+    server = lib.mkOption {
+      type =
+        lib.types.either
+          (lib.types.submodule (import ./server-options.nix))
+          (lib.types.path);
+      example = {
+        host = "127.0.0.1";
+        port = 8888;
+      };
+      default = {
+        host = "127.0.0.1";
+        port = 80;
+      };
+      description = lib.mdDoc ''
+        Frontend server interface binding specification as either a
+        host:port pair or a unix domain docket.
+
+        NB: a host of "*" listens on all interfaces and includes IPv6
+        addresses.
+      '';
+    };
+
+    params = lib.mkOption {
+      type    = lib.types.nullOr (lib.types.submodule (import ./frontend-params-submodule.nix));
+      example = {
+        tls   = "tls";
+      };
+      default     = null;
+      description = lib.mdDoc ''
+        Parameters to configure a backend.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/nghttpx-options.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/nghttpx-options.nix
new file mode 100644
index 000000000000..82ab8c4223e6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/nghttpx-options.nix
@@ -0,0 +1,142 @@
+{ lib, ... }:
+{ options.services.nghttpx = {
+    enable = lib.mkEnableOption (lib.mdDoc "nghttpx");
+
+    frontends = lib.mkOption {
+      type        = lib.types.listOf (lib.types.submodule (import ./frontend-submodule.nix));
+      description = lib.mdDoc ''
+        A list of frontend listener specifications.
+      '';
+      example = [
+        { server = {
+            host = "*";
+            port = 80;
+          };
+
+          params = {
+            tls = "no-tls";
+          };
+        }
+      ];
+    };
+
+    backends  = lib.mkOption {
+      type = lib.types.listOf (lib.types.submodule (import ./backend-submodule.nix));
+      description = lib.mdDoc ''
+        A list of backend specifications.
+      '';
+      example = [
+        { server = {
+            host = "172.16.0.22";
+            port = 8443;
+          };
+          patterns = [ "/" ];
+          params   = {
+            proto               = "http/1.1";
+            redirect-if-not-tls = true;
+          };
+        }
+      ];
+    };
+
+    tls = lib.mkOption {
+      type        = lib.types.nullOr (lib.types.submodule (import ./tls-submodule.nix));
+      default     = null;
+      description = lib.mdDoc ''
+        TLS certificate and key paths. Note that this does not enable
+        TLS for a frontend listener, to do so, a frontend
+        specification must set `params.tls` to true.
+      '';
+      example = {
+        key = "/etc/ssl/keys/server.key";
+        crt = "/etc/ssl/certs/server.crt";
+      };
+    };
+
+    extraConfig = lib.mkOption {
+      type        = lib.types.lines;
+      default     = "";
+      description = lib.mdDoc ''
+        Extra configuration options to be appended to the generated
+        configuration file.
+      '';
+    };
+
+    single-process = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Run this program in a single process mode for debugging
+        purpose. Without this option, nghttpx creates at least 2
+        processes: master and worker processes. If this option is
+        used, master and worker are unified into a single
+        process. nghttpx still spawns additional process if neverbleed
+        is used. In the single process mode, the signal handling
+        feature is disabled.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx--single-process
+      '';
+    };
+
+    backlog = lib.mkOption {
+      type        = lib.types.int;
+      default     = 65536;
+      description = lib.mdDoc ''
+        Listen backlog size.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx--backlog
+      '';
+    };
+
+    backend-address-family = lib.mkOption {
+      type = lib.types.enum [
+        "auto"
+        "IPv4"
+        "IPv6"
+      ];
+      default = "auto";
+      description = lib.mdDoc ''
+        Specify address family of backend connections. If "auto" is
+        given, both IPv4 and IPv6 are considered. If "IPv4" is given,
+        only IPv4 address is considered. If "IPv6" is given, only IPv6
+        address is considered.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx--backend-address-family
+      '';
+    };
+
+    workers = lib.mkOption {
+      type        = lib.types.int;
+      default     = 1;
+      description = lib.mdDoc ''
+        Set the number of worker threads.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx-n
+      '';
+    };
+
+    single-thread = lib.mkOption {
+      type        = lib.types.bool;
+      default     = false;
+      description = lib.mdDoc ''
+        Run everything in one thread inside the worker process. This
+        feature is provided for better debugging experience, or for
+        the platforms which lack thread support. If threading is
+        disabled, this option is always enabled.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx--single-thread
+      '';
+    };
+
+    rlimit-nofile = lib.mkOption {
+      type        = lib.types.int;
+      default     = 0;
+      description = lib.mdDoc ''
+        Set maximum number of open files (RLIMIT_NOFILE) to \<N\>. If 0
+        is given, nghttpx does not set the limit.
+
+        Please see https://nghttp2.org/documentation/nghttpx.1.html#cmdoption-nghttpx--rlimit-nofile
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/server-options.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/server-options.nix
new file mode 100644
index 000000000000..48e2a3045596
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/server-options.nix
@@ -0,0 +1,18 @@
+{ lib, ... }:
+{ options = {
+    host = lib.mkOption {
+      type        = lib.types.str;
+      example     = "127.0.0.1";
+      description = lib.mdDoc ''
+        Server host address.
+      '';
+    };
+    port = lib.mkOption {
+      type        = lib.types.int;
+      example     = 5088;
+      description = lib.mdDoc ''
+        Server host port.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nghttpx/tls-submodule.nix b/nixpkgs/nixos/modules/services/networking/nghttpx/tls-submodule.nix
new file mode 100644
index 000000000000..bb6cdae07e58
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nghttpx/tls-submodule.nix
@@ -0,0 +1,21 @@
+{lib, ...}:
+{ options = {
+    key = lib.mkOption {
+      type        = lib.types.str;
+      example     = "/etc/ssl/keys/mykeyfile.key";
+      default     = "/etc/ssl/keys/server.key";
+      description = lib.mdDoc ''
+        Path to the TLS key file.
+      '';
+    };
+
+    crt = lib.mkOption {
+      type        = lib.types.str;
+      example     = "/etc/ssl/certs/mycert.crt";
+      default     = "/etc/ssl/certs/server.crt";
+      description = lib.mdDoc ''
+        Path to the TLS certificate file.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ngircd.nix b/nixpkgs/nixos/modules/services/networking/ngircd.nix
new file mode 100644
index 000000000000..5e721f5aa625
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ngircd.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ngircd;
+
+  configFile = pkgs.stdenv.mkDerivation {
+    name = "ngircd.conf";
+
+    text = cfg.config;
+
+    preferLocalBuild = true;
+
+    buildCommand = ''
+      echo -n "$text" > $out
+      ${cfg.package}/sbin/ngircd --config $out --configtest
+    '';
+  };
+in {
+  options = {
+    services.ngircd = {
+      enable = mkEnableOption (lib.mdDoc "the ngircd IRC server");
+
+      config = mkOption {
+        description = lib.mdDoc "The ngircd configuration (see ngircd.conf(5)).";
+
+        type = types.lines;
+      };
+
+      package = mkOption {
+        description = lib.mdDoc "The ngircd package.";
+
+        type = types.package;
+
+        default = pkgs.ngircd;
+        defaultText = literalExpression "pkgs.ngircd";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    #!!! TODO: Use ExecReload (see https://github.com/NixOS/nixpkgs/issues/1988)
+    systemd.services.ngircd = {
+      description = "The ngircd IRC server";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig.ExecStart = "${cfg.package}/sbin/ngircd --config ${configFile} --nodaemon";
+
+      serviceConfig.User = "ngircd";
+    };
+
+    users.users.ngircd = {
+      isSystemUser = true;
+      group = "ngircd";
+      description = "ngircd user.";
+    };
+    users.groups.ngircd = {};
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nix-serve.nix b/nixpkgs/nixos/modules/services/networking/nix-serve.nix
new file mode 100644
index 000000000000..8c4352bc95e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nix-serve.nix
@@ -0,0 +1,102 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nix-serve;
+in
+{
+  options = {
+    services.nix-serve = {
+      enable = mkEnableOption (lib.mdDoc "nix-serve, the standalone Nix binary cache server");
+
+      port = mkOption {
+        type = types.port;
+        default = 5000;
+        description = lib.mdDoc ''
+          Port number where nix-serve will listen on.
+        '';
+      };
+
+      bindAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc ''
+          IP address where nix-serve will bind its listening socket.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nix-serve;
+        defaultText = literalExpression "pkgs.nix-serve";
+        description = lib.mdDoc ''
+          nix-serve package to use.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for nix-serve.";
+      };
+
+      secretKeyFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the file used for signing derivation data.
+          Generate with:
+
+          ```
+          nix-store --generate-binary-cache-key key-name secret-key-file public-key-file
+          ```
+
+          For more details see {manpage}`nix-store(1)`.
+        '';
+      };
+
+      extraParams = mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description = lib.mdDoc ''
+          Extra command line parameters for nix-serve.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    nix.settings.extra-allowed-users = [ "nix-serve" ];
+
+    systemd.services.nix-serve = {
+      description = "nix-serve binary cache server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ config.nix.package.out pkgs.bzip2.bin ];
+      environment.NIX_REMOTE = "daemon";
+
+      script = ''
+        ${lib.optionalString (cfg.secretKeyFile != null) ''
+          export NIX_SECRET_KEY_FILE="$CREDENTIALS_DIRECTORY/NIX_SECRET_KEY_FILE"
+        ''}
+        exec ${cfg.package}/bin/nix-serve --listen ${cfg.bindAddress}:${toString cfg.port} ${cfg.extraParams}
+      '';
+
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        User = "nix-serve";
+        Group = "nix-serve";
+        DynamicUser = true;
+        LoadCredential = lib.optionalString (cfg.secretKeyFile != null)
+          "NIX_SECRET_KEY_FILE:${cfg.secretKeyFile}";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nix-store-gcs-proxy.nix b/nixpkgs/nixos/modules/services/networking/nix-store-gcs-proxy.nix
new file mode 100644
index 000000000000..531b2bde7633
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nix-store-gcs-proxy.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  opts = { name, config, ... }: {
+    options = {
+      enable = mkOption {
+        default = true;
+        type = types.bool;
+        example = true;
+        description = lib.mdDoc "Whether to enable proxy for this bucket";
+      };
+      bucketName = mkOption {
+        type = types.str;
+        default = name;
+        example = "my-bucket-name";
+        description = lib.mdDoc "Name of Google storage bucket";
+      };
+      address = mkOption {
+        type = types.str;
+        example = "localhost:3000";
+        description = lib.mdDoc "The address of the proxy.";
+      };
+    };
+  };
+  enabledProxies = lib.filterAttrs (n: v: v.enable) config.services.nix-store-gcs-proxy;
+  mapProxies = function: lib.mkMerge (lib.mapAttrsToList function enabledProxies);
+in
+{
+  options.services.nix-store-gcs-proxy = mkOption {
+    type = types.attrsOf (types.submodule opts);
+    default = {};
+    description = lib.mdDoc ''
+      An attribute set describing an HTTP to GCS proxy that allows us to use GCS
+      bucket via HTTP protocol.
+    '';
+  };
+
+  config.systemd.services = mapProxies (name: cfg: {
+    "nix-store-gcs-proxy-${name}" = {
+      description = "A HTTP nix store that proxies requests to Google Storage";
+      wantedBy = ["multi-user.target"];
+
+      startLimitIntervalSec = 10;
+      serviceConfig = {
+        RestartSec = 5;
+        ExecStart = ''
+          ${pkgs.nix-store-gcs-proxy}/bin/nix-store-gcs-proxy \
+            --bucket-name ${cfg.bucketName} \
+            --addr ${cfg.address}
+        '';
+
+        DynamicUser = true;
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        NoNewPrivileges = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+      };
+    };
+  });
+
+  meta.maintainers = [ maintainers.mrkkrp ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nixops-dns.nix b/nixpkgs/nixos/modules/services/networking/nixops-dns.nix
new file mode 100644
index 000000000000..378c2ee6d05f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nixops-dns.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  pkg = pkgs.nixops-dns;
+  cfg = config.services.nixops-dns;
+in
+
+{
+  options = {
+    services.nixops-dns = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the nixops-dns resolution
+          of NixOps virtual machines via dnsmasq and fake domain name.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The user the nixops-dns daemon should run as.
+          This should be the user, which is also used for nixops and
+          have the .nixops directory in its home.
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Fake domain name to resolve to NixOps virtual machines.
+
+          For example "ops" will resolve "vm.ops".
+        '';
+        default = "ops";
+      };
+
+      dnsmasq = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable dnsmasq forwarding to nixops-dns. This allows to use
+          nixops-dns for `services.nixops-dns.domain` resolution
+          while forwarding the rest of the queries to original resolvers.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.nixops-dns = {
+      description = "nixops-dns: DNS server for resolving NixOps machines";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        ExecStart="${pkg}/bin/nixops-dns --domain=.${cfg.domain}";
+      };
+    };
+
+    services.dnsmasq = mkIf cfg.dnsmasq {
+      enable = true;
+      resolveLocalQueries = true;
+      servers = [
+        "/${cfg.domain}/127.0.0.1#5300"
+      ];
+      extraConfig = ''
+        bind-interfaces
+        listen-address=127.0.0.1
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nncp.nix b/nixpkgs/nixos/modules/services/networking/nncp.nix
new file mode 100644
index 000000000000..3cfe41995e76
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nncp.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+with lib;
+
+let
+  nncpCfgFile = "/run/nncp.hjson";
+  programCfg = config.programs.nncp;
+  callerCfg = config.services.nncp.caller;
+  daemonCfg = config.services.nncp.daemon;
+  settingsFormat = pkgs.formats.json { };
+  jsonCfgFile = settingsFormat.generate "nncp.json" programCfg.settings;
+  pkg = programCfg.package;
+in {
+  options = {
+
+    services.nncp = {
+      caller = {
+        enable = mkEnableOption ''
+          cron'ed NNCP TCP daemon caller.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to caller.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+      daemon = {
+        enable = mkEnableOption ''
+          NNCP TCP synronization daemon.
+          The daemon will take configuration from
+          [](#opt-programs.nncp.settings)
+        '';
+        socketActivation = {
+          enable = mkEnableOption ''
+            Whether to run nncp-daemon persistently or socket-activated.
+          '';
+          listenStreams = mkOption {
+            type = with types; listOf str;
+            description = lib.mdDoc ''
+              TCP sockets to bind to.
+              See [](#opt-systemd.sockets._name_.listenStreams).
+            '';
+            default = [ "5400" ];
+          };
+        };
+        extraArgs = mkOption {
+          type = with types; listOf str;
+          description = "Extra command-line arguments to pass to daemon.";
+          default = [ ];
+          example = [ "-autotoss" ];
+        };
+      };
+
+    };
+  };
+
+  config = mkIf (programCfg.enable or callerCfg.enable or daemonCfg.enable) {
+
+    assertions = [{
+      assertion = with builtins;
+        let
+          callerCongfigured =
+            let neigh = config.programs.nncp.settings.neigh or { };
+            in lib.lists.any (x: hasAttr "calls" x && x.calls != [ ])
+            (attrValues neigh);
+        in !callerCfg.enable || callerCongfigured;
+      message = "NNCP caller enabled but call configuration is missing";
+    }];
+
+    systemd.services."nncp-caller" = {
+      inherit (callerCfg) enable;
+      description = "Croned NNCP TCP daemon caller.";
+      documentation = [ "http://www.nncpgo.org/nncp_002dcaller.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-caller -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs callerCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon" = mkIf daemonCfg.enable {
+      enable = !daemonCfg.socketActivation.enable;
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Restart = "on-failure";
+        Group = "uucp";
+        UMask = "0002";
+      };
+    };
+
+    systemd.services."nncp-daemon@" = mkIf daemonCfg.socketActivation.enable {
+      description = "NNCP TCP syncronization daemon.";
+      documentation = [ "http://www.nncpgo.org/nncp_002ddaemon.html" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nncp-daemon -noprogress -ucspi -cfg "${nncpCfgFile}" ${
+            lib.strings.escapeShellArgs daemonCfg.extraArgs
+          }'';
+        Group = "uucp";
+        UMask = "0002";
+        StandardInput = "socket";
+        StandardOutput = "inherit";
+        StandardError = "journal";
+      };
+    };
+
+    systemd.sockets.nncp-daemon = mkIf daemonCfg.socketActivation.enable {
+      inherit (daemonCfg.socketActivation) listenStreams;
+      description = "socket for NNCP TCP syncronization.";
+      conflicts = [ "nncp-daemon.service" ];
+      wantedBy = [ "sockets.target" ];
+      socketConfig.Accept = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nntp-proxy.nix b/nixpkgs/nixos/modules/services/networking/nntp-proxy.nix
new file mode 100644
index 000000000000..b887c0e16ef4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nntp-proxy.nix
@@ -0,0 +1,234 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) nntp-proxy;
+
+  cfg = config.services.nntp-proxy;
+
+  configBool = b: if b then "TRUE" else "FALSE";
+
+  confFile = pkgs.writeText "nntp-proxy.conf" ''
+    nntp_server:
+    {
+      # NNTP Server host and port address
+      server = "${cfg.upstreamServer}";
+      port = ${toString cfg.upstreamPort};
+      # NNTP username
+      username = "${cfg.upstreamUser}";
+      # NNTP password in clear text
+      password = "${cfg.upstreamPassword}";
+      # Maximum number of connections allowed by the NNTP
+      max_connections = ${toString cfg.upstreamMaxConnections};
+    };
+
+    proxy:
+    {
+      # Local address and port to bind to
+      bind_ip = "${cfg.listenAddress}";
+      bind_port = ${toString cfg.port};
+
+      # SSL key and cert file
+      ssl_key = "${cfg.sslKey}";
+      ssl_cert = "${cfg.sslCert}";
+
+      # prohibit users from posting
+      prohibit_posting = ${configBool cfg.prohibitPosting};
+      # Verbose levels: ERROR, WARNING, NOTICE, INFO, DEBUG
+      verbose = "${toUpper cfg.verbosity}";
+      # Password is made with: 'mkpasswd -m sha-512 <password>'
+      users = (${concatStringsSep ",\n" (mapAttrsToList (username: userConfig:
+        ''
+          {
+              username = "${username}";
+              password = "${userConfig.passwordHash}";
+              max_connections = ${toString userConfig.maxConnections};
+          }
+        '') cfg.users)});
+    };
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.nntp-proxy = {
+      enable = mkEnableOption (lib.mdDoc "NNTP-Proxy");
+
+      upstreamServer = mkOption {
+        type = types.str;
+        default = "";
+        example = "ssl-eu.astraweb.com";
+        description = lib.mdDoc ''
+          Upstream server address
+        '';
+      };
+
+      upstreamPort = mkOption {
+        type = types.port;
+        default = 563;
+        description = lib.mdDoc ''
+          Upstream server port
+        '';
+      };
+
+      upstreamMaxConnections = mkOption {
+        type = types.int;
+        default = 20;
+        description = lib.mdDoc ''
+          Upstream server maximum allowed concurrent connections
+        '';
+      };
+
+      upstreamUser = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Upstream server username
+        '';
+      };
+
+      upstreamPassword = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Upstream server password
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        example = "[::]";
+        description = lib.mdDoc ''
+          Proxy listen address (IPv6 literal addresses need to be enclosed in "[" and "]" characters)
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5555;
+        description = lib.mdDoc ''
+          Proxy listen port
+        '';
+      };
+
+      sslKey = mkOption {
+        type = types.str;
+        default = "key.pem";
+        example = "/path/to/your/key.file";
+        description = lib.mdDoc ''
+          Proxy ssl key path
+        '';
+      };
+
+      sslCert = mkOption {
+        type = types.str;
+        default = "cert.pem";
+        example = "/path/to/your/cert.file";
+        description = lib.mdDoc ''
+          Proxy ssl certificate path
+        '';
+      };
+
+      prohibitPosting = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to prohibit posting to the upstream server
+        '';
+      };
+
+      verbosity = mkOption {
+        type = types.enum [ "error" "warning" "notice" "info" "debug" ];
+        default = "info";
+        example = "error";
+        description = lib.mdDoc ''
+          Verbosity level
+        '';
+      };
+
+      users = mkOption {
+        type = types.attrsOf (types.submodule {
+          options = {
+            username = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Username
+              '';
+            };
+
+            passwordHash = mkOption {
+              type = types.str;
+              example = "$6$GtzE7FrpE$wwuVgFYU.TZH4Rz.Snjxk9XGua89IeVwPQ/fEUD8eujr40q5Y021yhn0aNcsQ2Ifw.BLclyzvzgegopgKcneL0";
+              description = lib.mdDoc ''
+                SHA-512 password hash (can be generated by
+                `mkpasswd -m sha-512 <password>`)
+              '';
+            };
+
+            maxConnections = mkOption {
+              type = types.int;
+              default = 1;
+              description = lib.mdDoc ''
+                Maximum number of concurrent connections to the proxy for this user
+              '';
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          NNTP-Proxy user configuration
+        '';
+
+        default = {};
+        example = literalExpression ''
+          {
+            "user1" = {
+              passwordHash = "$6$1l0t5Kn2Dk$appzivc./9l/kjq57eg5UCsBKlcfyCr0zNWYNerKoPsI1d7eAwiT0SVsOVx/CTgaBNT/u4fi2vN.iGlPfv1ek0";
+              maxConnections = 5;
+            };
+            "anotheruser" = {
+              passwordHash = "$6$6lwEsWB.TmsS$W7m1riUx4QrA8pKJz8hvff0dnF1NwtZXgdjmGqA1Dx2MDPj07tI9GNcb0SWlMglE.2/hBgynDdAd/XqqtRqVQ0";
+              maxConnections = 7;
+            };
+          }
+        '';
+      };
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.nntp-proxy = {
+      isSystemUser = true;
+      group = "nntp-proxy";
+      description = "NNTP-Proxy daemon user";
+    };
+    users.groups.nntp-proxy = {};
+
+    systemd.services.nntp-proxy = {
+      description = "NNTP proxy";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = { User="nntp-proxy"; };
+      serviceConfig.ExecStart = "${nntp-proxy}/bin/nntp-proxy ${confFile}";
+      preStart = ''
+        if [ ! \( -f ${cfg.sslCert} -a -f ${cfg.sslKey} \) ]; then
+          ${pkgs.openssl.bin}/bin/openssl req -subj '/CN=AutoGeneratedCert/O=NixOS Service/C=US' \
+          -new -newkey rsa:2048 -days 365 -nodes -x509 -keyout ${cfg.sslKey} -out ${cfg.sslCert};
+        fi
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nomad.nix b/nixpkgs/nixos/modules/services/networking/nomad.nix
new file mode 100644
index 000000000000..b1e51195247a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nomad.nix
@@ -0,0 +1,198 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.nomad;
+  format = pkgs.formats.json { };
+in
+{
+  ##### interface
+  options = {
+    services.nomad = {
+      enable = mkEnableOption (lib.mdDoc "Nomad, a distributed, highly available, datacenter-aware scheduler");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nomad;
+        defaultText = literalExpression "pkgs.nomad";
+        description = lib.mdDoc ''
+          The package used for the Nomad agent and CLI.
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        description = lib.mdDoc ''
+          Extra packages to add to {env}`PATH` for the Nomad agent process.
+        '';
+        example = literalExpression ''
+          with pkgs; [ cni-plugins ]
+        '';
+      };
+
+      dropPrivileges = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether the nomad agent should be run as a non-root nomad user.
+        '';
+      };
+
+      enableDocker = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable Docker support. Needed for Nomad's docker driver.
+
+          Note that the docker group membership is effectively equivalent
+          to being root, see https://github.com/moby/moby/issues/9976.
+        '';
+      };
+
+      extraSettingsPaths = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional settings paths used to configure nomad. These can be files or directories.
+        '';
+        example = literalExpression ''
+          [ "/etc/nomad-mutable.json" "/run/keys/nomad-with-secrets.json" "/etc/nomad/config.d" ]
+        '';
+      };
+
+      extraSettingsPlugins = mkOption {
+        type = types.listOf (types.either types.package types.path);
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional plugins dir used to configure nomad.
+        '';
+        example = literalExpression ''
+          [ "<pluginDir>" pkgs.nomad-driver-nix pkgs.nomad-driver-podman  ]
+        '';
+      };
+
+      credentials = mkOption {
+        description = lib.mdDoc ''
+          Credentials envs used to configure nomad secrets.
+        '';
+        type = types.attrsOf types.str;
+        default = { };
+
+        example = {
+          logs_remote_write_password = "/run/keys/nomad_write_password";
+        };
+      };
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        description = lib.mdDoc ''
+          Configuration for Nomad. See the [documentation](https://www.nomadproject.io/docs/configuration)
+          for supported values.
+
+          Notes about `data_dir`:
+
+          If `data_dir` is set to a value other than the
+          default value of `"/var/lib/nomad"` it is the Nomad
+          cluster manager's responsibility to make sure that this directory
+          exists and has the appropriate permissions.
+
+          Additionally, if `dropPrivileges` is
+          `true` then `data_dir`
+          *cannot* be customized. Setting
+          `dropPrivileges` to `true` enables
+          the `DynamicUser` feature of systemd which directly
+          manages and operates on `StateDirectory`.
+        '';
+        example = literalExpression ''
+          {
+            # A minimal config example:
+            server = {
+              enabled = true;
+              bootstrap_expect = 1; # for demo; no fault tolerance
+            };
+            client = {
+              enabled = true;
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  ##### implementation
+  config = mkIf cfg.enable {
+    services.nomad.settings = {
+      # Agrees with `StateDirectory = "nomad"` set below.
+      data_dir = mkDefault "/var/lib/nomad";
+    };
+
+    environment = {
+      etc."nomad.json".source = format.generate "nomad.json" cfg.settings;
+      systemPackages = [ cfg.package ];
+    };
+
+    systemd.services.nomad = {
+      description = "Nomad";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      restartTriggers = [ config.environment.etc."nomad.json".source ];
+
+      path = cfg.extraPackages ++ (with pkgs; [
+        # Client mode requires at least the following:
+        coreutils
+        iproute2
+        iptables
+      ]);
+
+      serviceConfig = mkMerge [
+        {
+          DynamicUser = cfg.dropPrivileges;
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          ExecStart =
+            let
+              pluginsDir = pkgs.symlinkJoin
+                {
+                  name = "nomad-plugins";
+                  paths = cfg.extraSettingsPlugins;
+                };
+            in
+            "${cfg.package}/bin/nomad agent -config=/etc/nomad.json -plugin-dir=${pluginsDir}/bin" +
+            concatMapStrings (path: " -config=${path}") cfg.extraSettingsPaths +
+            concatMapStrings (key: " -config=\${CREDENTIALS_DIRECTORY}/${key}") (lib.attrNames cfg.credentials);
+          KillMode = "process";
+          KillSignal = "SIGINT";
+          LimitNOFILE = 65536;
+          LimitNPROC = "infinity";
+          OOMScoreAdjust = -1000;
+          Restart = "on-failure";
+          RestartSec = 2;
+          TasksMax = "infinity";
+          LoadCredential = lib.mapAttrsToList (key: value: "${key}:${value}") cfg.credentials;
+        }
+        (mkIf cfg.enableDocker {
+          SupplementaryGroups = "docker"; # space-separated string
+        })
+        (mkIf (cfg.settings.data_dir == "/var/lib/nomad") {
+          StateDirectory = "nomad";
+        })
+      ];
+
+      unitConfig = {
+        StartLimitIntervalSec = 10;
+        StartLimitBurst = 3;
+      };
+    };
+
+    assertions = [
+      {
+        assertion = cfg.dropPrivileges -> cfg.settings.data_dir == "/var/lib/nomad";
+        message = "settings.data_dir must be equal to \"/var/lib/nomad\" if dropPrivileges is true";
+      }
+    ];
+
+    # Docker support requires the Docker daemon to be running.
+    virtualisation.docker.enable = mkIf cfg.enableDocker true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nsd.nix b/nixpkgs/nixos/modules/services/networking/nsd.nix
new file mode 100644
index 000000000000..6db728e7aa5a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nsd.nix
@@ -0,0 +1,991 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nsd;
+
+  username = "nsd";
+  stateDir = "/var/lib/nsd";
+  pidFile = stateDir + "/var/nsd.pid";
+
+  # build nsd with the options needed for the given config
+  nsdPkg = pkgs.nsd.override {
+    bind8Stats = cfg.bind8Stats;
+    ipv6 = cfg.ipv6;
+    ratelimit = cfg.ratelimit.enable;
+    rootServer = cfg.rootServer;
+    zoneStats = length (collect (x: (x.zoneStats or null) != null) cfg.zones) > 0;
+  };
+
+  mkZoneFileName = name: if name == "." then "root" else name;
+
+  # replaces include: directives for keys with fake keys for nsd-checkconf
+  injectFakeKeys = keys: concatStrings
+    (mapAttrsToList
+      (keyName: keyOptions: ''
+        fakeKey="$(${pkgs.bind}/bin/tsig-keygen -a ${escapeShellArgs [ keyOptions.algorithm keyName ]} | grep -oP "\s*secret \"\K.*(?=\";)")"
+        sed "s@^\s*include:\s*\"${stateDir}/private/${keyName}\"\$@secret: $fakeKey@" -i $out/nsd.conf
+      '')
+      keys);
+
+  nsdEnv = pkgs.buildEnv {
+    name = "nsd-env";
+
+    paths = [ configFile ]
+      ++ mapAttrsToList (name: zone: writeZoneData name zone.data) zoneConfigs;
+
+    postBuild = ''
+      echo "checking zone files"
+      cd $out/zones
+
+      for zoneFile in *; do
+        echo "|- checking zone '$out/zones/$zoneFile'"
+        ${nsdPkg}/sbin/nsd-checkzone "$zoneFile" "$zoneFile" || {
+          if grep -q \\\\\\$ "$zoneFile"; then
+            echo zone "$zoneFile" contains escaped dollar signs \\\$
+            echo Escaping them is not needed any more. Please make sure \
+                 to unescape them where they prefix a variable name.
+          fi
+
+          exit 1
+        }
+      done
+
+      echo "checking configuration file"
+      # Save original config file including key references...
+      cp $out/nsd.conf{,.orig}
+      # ...inject mock keys into config
+      ${injectFakeKeys cfg.keys}
+      # ...do the checkconf
+      ${nsdPkg}/sbin/nsd-checkconf $out/nsd.conf
+      # ... and restore original config file.
+      mv $out/nsd.conf{.orig,}
+    '';
+  };
+
+  writeZoneData = name: text: pkgs.writeTextFile {
+    name = "nsd-zone-${mkZoneFileName name}";
+    inherit text;
+    destination = "/zones/${mkZoneFileName name}";
+  };
+
+
+  # options are ordered alphanumerically by the nixos option name
+  configFile = pkgs.writeTextDir "nsd.conf" ''
+    server:
+      chroot:   "${stateDir}"
+      username: ${username}
+
+      # The directory for zonefile: files. The daemon chdirs here.
+      zonesdir: "${stateDir}"
+
+      # the list of dynamically added zones.
+      database:     "${stateDir}/var/nsd.db"
+      pidfile:      "${pidFile}"
+      xfrdfile:     "${stateDir}/var/xfrd.state"
+      xfrdir:       "${stateDir}/tmp"
+      zonelistfile: "${stateDir}/var/zone.list"
+
+      # interfaces
+    ${forEach "  ip-address: " cfg.interfaces}
+
+      ip-freebind:         ${yesOrNo  cfg.ipFreebind}
+      hide-version:        ${yesOrNo  cfg.hideVersion}
+      identity:            "${cfg.identity}"
+      ip-transparent:      ${yesOrNo  cfg.ipTransparent}
+      do-ip4:              ${yesOrNo  cfg.ipv4}
+      ipv4-edns-size:      ${toString cfg.ipv4EDNSSize}
+      do-ip6:              ${yesOrNo  cfg.ipv6}
+      ipv6-edns-size:      ${toString cfg.ipv6EDNSSize}
+      log-time-ascii:      ${yesOrNo  cfg.logTimeAscii}
+      ${maybeString "nsid: " cfg.nsid}
+      port:                ${toString cfg.port}
+      reuseport:           ${yesOrNo  cfg.reuseport}
+      round-robin:         ${yesOrNo  cfg.roundRobin}
+      server-count:        ${toString cfg.serverCount}
+      ${maybeToString "statistics: " cfg.statistics}
+      tcp-count:           ${toString cfg.tcpCount}
+      tcp-query-count:     ${toString cfg.tcpQueryCount}
+      tcp-timeout:         ${toString cfg.tcpTimeout}
+      verbosity:           ${toString cfg.verbosity}
+      ${maybeString "version: " cfg.version}
+      xfrd-reload-timeout: ${toString cfg.xfrdReloadTimeout}
+      zonefiles-check:     ${yesOrNo  cfg.zonefilesCheck}
+
+      ${maybeString "rrl-ipv4-prefix-length: " cfg.ratelimit.ipv4PrefixLength}
+      ${maybeString "rrl-ipv6-prefix-length: " cfg.ratelimit.ipv6PrefixLength}
+      rrl-ratelimit:           ${toString cfg.ratelimit.ratelimit}
+      ${maybeString "rrl-slip: "               cfg.ratelimit.slip}
+      rrl-size:                ${toString cfg.ratelimit.size}
+      rrl-whitelist-ratelimit: ${toString cfg.ratelimit.whitelistRatelimit}
+
+    ${keyConfigFile}
+
+    remote-control:
+      control-enable:    ${yesOrNo  cfg.remoteControl.enable}
+      control-key-file:  "${cfg.remoteControl.controlKeyFile}"
+      control-cert-file: "${cfg.remoteControl.controlCertFile}"
+    ${forEach "  control-interface: " cfg.remoteControl.interfaces}
+      control-port:      ${toString cfg.remoteControl.port}
+      server-key-file:   "${cfg.remoteControl.serverKeyFile}"
+      server-cert-file:  "${cfg.remoteControl.serverCertFile}"
+
+    ${concatStrings (mapAttrsToList zoneConfigFile zoneConfigs)}
+
+    ${cfg.extraConfig}
+  '';
+
+  yesOrNo = b: if b then "yes" else "no";
+  maybeString = prefix: x: optionalString (x != null) ''${prefix} "${x}"'';
+  maybeToString = prefix: x: optionalString (x != null) ''${prefix} ${toString x}'';
+  forEach = pre: l: concatMapStrings (x: pre + x + "\n") l;
+
+
+  keyConfigFile = concatStrings (mapAttrsToList (keyName: keyOptions: ''
+    key:
+      name:      "${keyName}"
+      algorithm: "${keyOptions.algorithm}"
+      include:   "${stateDir}/private/${keyName}"
+  '') cfg.keys);
+
+  copyKeys = concatStrings (mapAttrsToList (keyName: keyOptions: ''
+    secret=$(cat "${keyOptions.keyFile}")
+    dest="${stateDir}/private/${keyName}"
+    echo "  secret: \"$secret\"" > "$dest"
+    chown ${username}:${username} "$dest"
+    chmod 0400 "$dest"
+  '') cfg.keys);
+
+
+  # options are ordered alphanumerically by the nixos option name
+  zoneConfigFile = name: zone: ''
+    zone:
+      name:         "${name}"
+      zonefile:     "${stateDir}/zones/${mkZoneFileName name}"
+      ${maybeString "outgoing-interface: " zone.outgoingInterface}
+    ${forEach     "  rrl-whitelist: "      zone.rrlWhitelist}
+      ${maybeString "zonestats: "          zone.zoneStats}
+
+      ${maybeToString "max-refresh-time: " zone.maxRefreshSecs}
+      ${maybeToString "min-refresh-time: " zone.minRefreshSecs}
+      ${maybeToString "max-retry-time:   " zone.maxRetrySecs}
+      ${maybeToString "min-retry-time:   " zone.minRetrySecs}
+
+      allow-axfr-fallback: ${yesOrNo       zone.allowAXFRFallback}
+    ${forEach     "  allow-notify: "       zone.allowNotify}
+    ${forEach     "  request-xfr: "        zone.requestXFR}
+
+    ${forEach     "  notify: "             zone.notify}
+      notify-retry:                        ${toString zone.notifyRetry}
+    ${forEach     "  provide-xfr: "        zone.provideXFR}
+  '';
+
+  zoneConfigs = zoneConfigs' {} "" { children = cfg.zones; };
+
+  zoneConfigs' = parent: name: zone:
+    if !(zone ? children) || zone.children == null || zone.children == { }
+      # leaf -> actual zone
+      then listToAttrs [ (nameValuePair name (parent // zone)) ]
+
+      # fork -> pattern
+      else zipAttrsWith (name: head) (
+        mapAttrsToList (name: child: zoneConfigs' (parent // zone // { children = {}; }) name child)
+                       zone.children
+      );
+
+  # options are ordered alphanumerically
+  zoneOptions = types.submodule {
+    options = {
+
+      allowAXFRFallback = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If NSD as secondary server should be allowed to AXFR if the primary
+          server does not allow IXFR.
+        '';
+      };
+
+      allowNotify = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "192.0.2.0/24 NOKEY" "10.0.0.1-10.0.0.5 my_tsig_key_name"
+                    "10.0.3.4&255.255.0.0 BLOCKED"
+                  ];
+        description = lib.mdDoc ''
+          Listed primary servers are allowed to notify this secondary server.
+
+          Format: `<ip> <key-name | NOKEY | BLOCKED>`
+
+          `<ip>` either a plain IPv4/IPv6 address or range.
+          Valid patters for ranges:
+          * `10.0.0.0/24`: via subnet size
+          * `10.0.0.0&255.255.255.0`: via subnet mask
+          * `10.0.0.1-10.0.0.254`: via range
+
+          A optional port number could be added with a '@':
+          * `2001:1234::1@1234`
+
+          `<key-name | NOKEY | BLOCKED>`
+          * `<key-name>` will use the specified TSIG key
+          * `NOKEY` no TSIG signature is required
+          * `BLOCKED`notifies from non-listed or blocked IPs will be ignored
+        '';
+      };
+
+      children = mkOption {
+        # TODO: This relies on the fact that `types.anything` doesn't set any
+        # values of its own to any defaults, because in the above zoneConfigs',
+        # values from children override ones from parents, but only if the
+        # attributes are defined. Because of this, we can't replace the element
+        # type here with `zoneConfigs`, since that would set all the attributes
+        # to default values, breaking the parent inheriting function.
+        type = types.attrsOf types.anything;
+        default = {};
+        description = lib.mdDoc ''
+          Children zones inherit all options of their parents. Attributes
+          defined in a child will overwrite the ones of its parent. Only
+          leaf zones will be actually served. This way it's possible to
+          define maybe zones which share most attributes without
+          duplicating everything. This mechanism replaces nsd's patterns
+          in a save and functional way.
+        '';
+      };
+
+      data = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          The actual zone data. This is the content of your zone file.
+          Use imports or pkgs.lib.readFile if you don't want this data in your config file.
+        '';
+      };
+
+      dnssec = mkEnableOption (lib.mdDoc "DNSSEC");
+
+      dnssecPolicy = {
+        algorithm = mkOption {
+          type = types.str;
+          default = "RSASHA256";
+          description = lib.mdDoc "Which algorithm to use for DNSSEC";
+        };
+        keyttl = mkOption {
+          type = types.str;
+          default = "1h";
+          description = lib.mdDoc "TTL for dnssec records";
+        };
+        coverage = mkOption {
+          type = types.str;
+          default = "1y";
+          description = lib.mdDoc ''
+            The length of time to ensure that keys will be correct; no action will be taken to create new keys to be activated after this time.
+          '';
+        };
+        zsk = mkOption {
+          type = keyPolicy;
+          default = { keySize = 2048;
+                      prePublish = "1w";
+                      postPublish = "1w";
+                      rollPeriod = "1mo";
+                    };
+          description = lib.mdDoc "Key policy for zone signing keys";
+        };
+        ksk = mkOption {
+          type = keyPolicy;
+          default = { keySize = 4096;
+                      prePublish = "1mo";
+                      postPublish = "1mo";
+                      rollPeriod = "0";
+                    };
+          description = lib.mdDoc "Key policy for key signing keys";
+        };
+      };
+
+      maxRefreshSecs = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Limit refresh time for secondary zones. This is the timer which
+          checks to see if the zone has to be refetched when it expires.
+          Normally the value from the SOA record is used, but this  option
+          restricts that value.
+        '';
+      };
+
+      minRefreshSecs = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Limit refresh time for secondary zones.
+        '';
+      };
+
+      maxRetrySecs = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Limit retry time for secondary zones. This is the timeout after
+          a failed fetch attempt for the zone. Normally the value from
+          the SOA record is used, but this option restricts that value.
+        '';
+      };
+
+      minRetrySecs = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Limit retry time for secondary zones.
+        '';
+      };
+
+
+      notify = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "10.0.0.1@3721 my_key" "::5 NOKEY" ];
+        description = lib.mdDoc ''
+          This primary server will notify all given secondary servers about
+          zone changes.
+
+          Format: `<ip> <key-name | NOKEY>`
+
+          `<ip>` a plain IPv4/IPv6 address with on optional port number (ip@port)
+
+          `<key-name | NOKEY>`
+          - `<key-name>` sign notifies with the specified key
+          - `NOKEY` don't sign notifies
+        '';
+      };
+
+      notifyRetry = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc ''
+          Specifies the number of retries for failed notifies. Set this along with notify.
+        '';
+      };
+
+      outgoingInterface = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "2000::1@1234";
+        description = lib.mdDoc ''
+          This address will be used for zone-transfer requests if configured
+          as a secondary server or notifications in case of a primary server.
+          Supply either a plain IPv4 or IPv6 address with an optional port
+          number (ip@port).
+        '';
+      };
+
+      provideXFR = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "192.0.2.0/24 NOKEY" "192.0.2.0/24 my_tsig_key_name" ];
+        description = lib.mdDoc ''
+          Allow these IPs and TSIG to transfer zones, addr TSIG|NOKEY|BLOCKED
+          address range 192.0.2.0/24, 1.2.3.4&255.255.0.0, 3.0.2.20-3.0.2.40
+        '';
+      };
+
+      requestXFR = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Format: `[AXFR|UDP] <ip-address> <key-name | NOKEY>`
+        '';
+      };
+
+      rrlWhitelist = mkOption {
+        type = with types; listOf (enum [ "nxdomain" "error" "referral" "any" "rrsig" "wildcard" "nodata" "dnskey" "positive" "all" ]);
+        default = [];
+        description = lib.mdDoc ''
+          Whitelists the given rrl-types.
+        '';
+      };
+
+      zoneStats = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "%s";
+        description = lib.mdDoc ''
+          When set to something distinct to null NSD is able to collect
+          statistics per zone. All statistics of this zone(s) will be added
+          to the group specified by this given name. Use "%s" to use the zones
+          name as the group. The groups are output from nsd-control stats
+          and stats_noreset.
+        '';
+      };
+    };
+  };
+
+  keyPolicy = types.submodule {
+    options = {
+      keySize = mkOption {
+        type = types.int;
+        description = lib.mdDoc "Key size in bits";
+      };
+      prePublish = mkOption {
+        type = types.str;
+        description = lib.mdDoc "How long in advance to publish new keys";
+      };
+      postPublish = mkOption {
+        type = types.str;
+        description = lib.mdDoc "How long after deactivation to keep a key in the zone";
+      };
+      rollPeriod = mkOption {
+        type = types.str;
+        description = lib.mdDoc "How frequently to change keys";
+      };
+    };
+  };
+
+  dnssecZones = (filterAttrs (n: v: if v ? dnssec then v.dnssec else false) zoneConfigs);
+
+  dnssec = dnssecZones != {};
+
+  dnssecTools = pkgs.bind.override { enablePython = true; };
+
+  signZones = optionalString dnssec ''
+    mkdir -p ${stateDir}/dnssec
+    chown ${username}:${username} ${stateDir}/dnssec
+    chmod 0600 ${stateDir}/dnssec
+
+    ${concatStrings (mapAttrsToList signZone dnssecZones)}
+  '';
+  signZone = name: zone: ''
+    ${dnssecTools}/bin/dnssec-keymgr -g ${dnssecTools}/bin/dnssec-keygen -s ${dnssecTools}/bin/dnssec-settime -K ${stateDir}/dnssec -c ${policyFile name zone.dnssecPolicy} ${name}
+    ${dnssecTools}/bin/dnssec-signzone -S -K ${stateDir}/dnssec -o ${name} -O full -N date ${stateDir}/zones/${name}
+    ${nsdPkg}/sbin/nsd-checkzone ${name} ${stateDir}/zones/${name}.signed && mv -v ${stateDir}/zones/${name}.signed ${stateDir}/zones/${name}
+  '';
+  policyFile = name: policy: pkgs.writeText "${name}.policy" ''
+    zone ${name} {
+      algorithm ${policy.algorithm};
+      key-size zsk ${toString policy.zsk.keySize};
+      key-size ksk ${toString policy.ksk.keySize};
+      keyttl ${policy.keyttl};
+      pre-publish zsk ${policy.zsk.prePublish};
+      pre-publish ksk ${policy.ksk.prePublish};
+      post-publish zsk ${policy.zsk.postPublish};
+      post-publish ksk ${policy.ksk.postPublish};
+      roll-period zsk ${policy.zsk.rollPeriod};
+      roll-period ksk ${policy.ksk.rollPeriod};
+      coverage ${policy.coverage};
+    };
+  '';
+in
+{
+  # options are ordered alphanumerically
+  options.services.nsd = {
+
+    enable = mkEnableOption (lib.mdDoc "NSD authoritative DNS server");
+
+    bind8Stats = mkEnableOption (lib.mdDoc "BIND8 like statistics");
+
+    dnssecInterval = mkOption {
+      type = types.str;
+      default = "1h";
+      description = lib.mdDoc ''
+        How often to check whether dnssec key rollover is required
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra nsd config.
+      '';
+    };
+
+    hideVersion = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether NSD should answer VERSION.BIND and VERSION.SERVER CHAOS class queries.
+      '';
+    };
+
+    identity = mkOption {
+      type = types.str;
+      default = "unidentified server";
+      description = lib.mdDoc ''
+        Identify the server (CH TXT ID.SERVER entry).
+      '';
+    };
+
+    interfaces = mkOption {
+      type = types.listOf types.str;
+      default = [ "127.0.0.0" "::1" ];
+      description = lib.mdDoc ''
+        What addresses the server should listen to.
+      '';
+    };
+
+    ipFreebind = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to bind to nonlocal addresses and interfaces that are down.
+        Similar to ip-transparent.
+      '';
+    };
+
+    ipTransparent = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Allow binding to non local addresses.
+      '';
+    };
+
+    ipv4 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to listen on IPv4 connections.
+      '';
+    };
+
+    ipv4EDNSSize = mkOption {
+      type = types.int;
+      default = 4096;
+      description = lib.mdDoc ''
+        Preferred EDNS buffer size for IPv4.
+      '';
+    };
+
+    ipv6 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to listen on IPv6 connections.
+      '';
+    };
+
+    ipv6EDNSSize = mkOption {
+      type = types.int;
+      default = 4096;
+      description = lib.mdDoc ''
+        Preferred EDNS buffer size for IPv6.
+      '';
+    };
+
+    logTimeAscii = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Log time in ascii, if false then in unix epoch seconds.
+      '';
+    };
+
+    nsid = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        NSID identity (hex string, or "ascii_somestring").
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 53;
+      description = lib.mdDoc ''
+        Port the service should bind do.
+      '';
+    };
+
+    reuseport = mkOption {
+      type = types.bool;
+      default = pkgs.stdenv.isLinux;
+      defaultText = literalExpression "pkgs.stdenv.isLinux";
+      description = lib.mdDoc ''
+        Whether to enable SO_REUSEPORT on all used sockets. This lets multiple
+        processes bind to the same port. This speeds up operation especially
+        if the server count is greater than one and makes fast restarts less
+        prone to fail
+      '';
+    };
+
+    rootServer = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether this server will be a root server (a DNS root server, you
+        usually don't want that).
+      '';
+    };
+
+    roundRobin = mkEnableOption (lib.mdDoc "round robin rotation of records");
+
+    serverCount = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        Number of NSD servers to fork. Put the number of CPUs to use here.
+      '';
+    };
+
+    statistics = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      description = lib.mdDoc ''
+        Statistics are produced every number of seconds. Prints to log.
+        If null no statistics are logged.
+      '';
+    };
+
+    tcpCount = mkOption {
+      type = types.int;
+      default = 100;
+      description = lib.mdDoc ''
+        Maximum number of concurrent TCP connections per server.
+      '';
+    };
+
+    tcpQueryCount = mkOption {
+      type = types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Maximum number of queries served on a single TCP connection.
+        0 means no maximum.
+      '';
+    };
+
+    tcpTimeout = mkOption {
+      type = types.int;
+      default = 120;
+      description = lib.mdDoc ''
+        TCP timeout in seconds.
+      '';
+    };
+
+    verbosity = mkOption {
+      type = types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Verbosity level.
+      '';
+    };
+
+    version = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The version string replied for CH TXT version.server and version.bind
+        queries. Will use the compiled package version on null.
+        See hideVersion for enabling/disabling this responses.
+      '';
+    };
+
+    xfrdReloadTimeout = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        Number of seconds between reloads triggered by xfrd.
+      '';
+    };
+
+    zonefilesCheck = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to check mtime of all zone files on start and sighup.
+      '';
+    };
+
+
+    keys = mkOption {
+      type = types.attrsOf (types.submodule {
+        options = {
+
+          algorithm = mkOption {
+            type = types.str;
+            default = "hmac-sha256";
+            description = lib.mdDoc ''
+              Authentication algorithm for this key.
+            '';
+          };
+
+          keyFile = mkOption {
+            type = types.path;
+            description = lib.mdDoc ''
+              Path to the file which contains the actual base64 encoded
+              key. The key will be copied into "${stateDir}/private" before
+              NSD starts. The copied file is only accessibly by the NSD
+              user.
+            '';
+          };
+
+        };
+      });
+      default = {};
+      example = literalExpression ''
+        { "tsig.example.org" = {
+            algorithm = "hmac-md5";
+            keyFile = "/path/to/my/key";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Define your TSIG keys here.
+      '';
+    };
+
+
+    ratelimit = {
+
+      enable = mkEnableOption (lib.mdDoc "ratelimit capabilities");
+
+      ipv4PrefixLength = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          IPv4 prefix length. Addresses are grouped by netblock.
+        '';
+      };
+
+      ipv6PrefixLength = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          IPv6 prefix length. Addresses are grouped by netblock.
+        '';
+      };
+
+      ratelimit = mkOption {
+        type = types.int;
+        default = 200;
+        description = lib.mdDoc ''
+          Max qps allowed from any query source.
+          0 means unlimited. With an verbosity of 2 blocked and
+          unblocked subnets will be logged.
+        '';
+      };
+
+      slip = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Number of packets that get discarded before replying a SLIP response.
+          0 disables SLIP responses. 1 will make every response a SLIP response.
+        '';
+      };
+
+      size = mkOption {
+        type = types.int;
+        default = 1000000;
+        description = lib.mdDoc ''
+          Size of the hashtable. More buckets use more memory but lower
+          the chance of hash hash collisions.
+        '';
+      };
+
+      whitelistRatelimit = mkOption {
+        type = types.int;
+        default = 2000;
+        description = lib.mdDoc ''
+          Max qps allowed from whitelisted sources.
+          0 means unlimited. Set the rrl-whitelist option for specific
+          queries to apply this limit instead of the default to them.
+        '';
+      };
+
+    };
+
+
+    remoteControl = {
+
+      enable = mkEnableOption (lib.mdDoc "remote control via nsd-control");
+
+      controlCertFile = mkOption {
+        type = types.path;
+        default = "/etc/nsd/nsd_control.pem";
+        description = lib.mdDoc ''
+          Path to the client certificate signed with the server certificate.
+          This file is used by nsd-control and generated by nsd-control-setup.
+        '';
+      };
+
+      controlKeyFile = mkOption {
+        type = types.path;
+        default = "/etc/nsd/nsd_control.key";
+        description = lib.mdDoc ''
+          Path to the client private key, which is used by nsd-control
+          but not by the server. This file is generated by nsd-control-setup.
+        '';
+      };
+
+      interfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "127.0.0.1" "::1" ];
+        description = lib.mdDoc ''
+          Which interfaces NSD should bind to for remote control.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8952;
+        description = lib.mdDoc ''
+          Port number for remote control operations (uses TLS over TCP).
+        '';
+      };
+
+      serverCertFile = mkOption {
+        type = types.path;
+        default = "/etc/nsd/nsd_server.pem";
+        description = lib.mdDoc ''
+          Path to the server self signed certificate, which is used by the server
+          but and by nsd-control. This file is generated by nsd-control-setup.
+        '';
+      };
+
+      serverKeyFile = mkOption {
+        type = types.path;
+        default = "/etc/nsd/nsd_server.key";
+        description = lib.mdDoc ''
+          Path to the server private key, which is used by the server
+          but not by nsd-control. This file is generated by nsd-control-setup.
+        '';
+      };
+
+    };
+
+    zones = mkOption {
+      type = types.attrsOf zoneOptions;
+      default = {};
+      example = literalExpression ''
+        { "serverGroup1" = {
+            provideXFR = [ "10.1.2.3 NOKEY" ];
+            children = {
+              "example.com." = {
+                data = '''
+                  $ORIGIN example.com.
+                  $TTL    86400
+                  @ IN SOA a.ns.example.com. admin.example.com. (
+                  ...
+                ''';
+              };
+              "example.org." = {
+                data = '''
+                  $ORIGIN example.org.
+                  $TTL    86400
+                  @ IN SOA a.ns.example.com. admin.example.com. (
+                  ...
+                ''';
+              };
+            };
+          };
+
+          "example.net." = {
+            provideXFR = [ "10.3.2.1 NOKEY" ];
+            data = '''
+              ...
+            ''';
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Define your zones here. Zones can cascade other zones and therefore
+        inherit settings from parent zones. Look at the definition of
+        children to learn about inheritance and child zones.
+        The given example will define 3 zones (example.(com|org|net).). Both
+        example.com. and example.org. inherit their configuration from
+        serverGroup1.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = zoneConfigs ? "." -> cfg.rootServer;
+      message = "You have a root zone configured. If this is really what you "
+              + "want, please enable 'services.nsd.rootServer'.";
+    };
+
+    environment = {
+      systemPackages = [ nsdPkg ];
+      etc."nsd/nsd.conf".source = "${configFile}/nsd.conf";
+    };
+
+    users.groups.${username}.gid = config.ids.gids.nsd;
+
+    users.users.${username} = {
+      description = "NSD service user";
+      home = stateDir;
+      createHome  = true;
+      uid = config.ids.uids.nsd;
+      group = username;
+    };
+
+    systemd.services.nsd = {
+      description = "NSD authoritative only domain name service";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      startLimitBurst = 4;
+      startLimitIntervalSec = 5 * 60;  # 5 mins
+      serviceConfig = {
+        ExecStart = "${nsdPkg}/sbin/nsd -d -c ${nsdEnv}/nsd.conf";
+        StandardError = "null";
+        PIDFile = pidFile;
+        Restart = "always";
+        RestartSec = "4s";
+      };
+
+      preStart = ''
+        rm -Rf "${stateDir}/private/"
+        rm -Rf "${stateDir}/tmp/"
+
+        mkdir -m 0700 -p "${stateDir}/private"
+        mkdir -m 0700 -p "${stateDir}/tmp"
+        mkdir -m 0700 -p "${stateDir}/var"
+
+        cat > "${stateDir}/don't touch anything in here" << EOF
+        Everything in this directory except NSD's state in var and dnssec
+        is automatically generated and will be purged and redeployed by
+        the nsd.service pre-start script.
+        EOF
+
+        chown ${username}:${username} -R "${stateDir}/private"
+        chown ${username}:${username} -R "${stateDir}/tmp"
+        chown ${username}:${username} -R "${stateDir}/var"
+
+        rm -rf "${stateDir}/zones"
+        cp -rL "${nsdEnv}/zones" "${stateDir}/zones"
+
+        ${copyKeys}
+      '';
+    };
+
+    systemd.timers.nsd-dnssec = mkIf dnssec {
+      description = "Automatic DNSSEC key rollover";
+
+      wantedBy = [ "nsd.service" ];
+
+      timerConfig = {
+        OnActiveSec = cfg.dnssecInterval;
+        OnUnitActiveSec = cfg.dnssecInterval;
+      };
+    };
+
+    systemd.services.nsd-dnssec = mkIf dnssec {
+      description = "DNSSEC key rollover";
+
+      wantedBy = [ "nsd.service" ];
+      before = [ "nsd.service" ];
+
+      script = signZones;
+
+      postStop = ''
+        /run/current-system/systemd/bin/systemctl kill -s SIGHUP nsd.service
+      '';
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ hrdinka ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ntopng.nix b/nixpkgs/nixos/modules/services/networking/ntopng.nix
new file mode 100644
index 000000000000..a47ee0773d17
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ntopng.nix
@@ -0,0 +1,160 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ntopng;
+  opt = options.services.ntopng;
+
+  createRedis = cfg.redis.createInstance != null;
+  redisService =
+    if cfg.redis.createInstance == "" then
+      "redis.service"
+    else
+      "redis-${cfg.redis.createInstance}.service";
+
+  configFile = if cfg.configText != "" then
+    pkgs.writeText "ntopng.conf" ''
+      ${cfg.configText}
+    ''
+    else
+    pkgs.writeText "ntopng.conf" ''
+      ${concatStringsSep "\n" (map (e: "--interface=${e}") cfg.interfaces)}
+      --http-port=${toString cfg.httpPort}
+      --redis=${cfg.redis.address}
+      --data-dir=/var/lib/ntopng
+      --user=ntopng
+      ${cfg.extraConfig}
+    '';
+
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "ntopng" "http-port" ] [ "services" "ntopng" "httpPort" ])
+  ];
+
+  options = {
+
+    services.ntopng = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable ntopng, a high-speed web-based traffic analysis and flow
+          collection tool.
+
+          With the default configuration, ntopng monitors all network
+          interfaces and displays its findings at http://localhost:''${toString
+          config.${opt.http-port}}. Default username and password is admin/admin.
+
+          See the ntopng(8) manual page and http://www.ntop.org/products/ntop/
+          for more info.
+
+          Note that enabling ntopng will also enable redis (key-value
+          database server) for persistent data storage.
+        '';
+      };
+
+      interfaces = mkOption {
+        default = [ "any" ];
+        example = [ "eth0" "wlan0" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of interfaces to monitor. Use "any" to monitor all interfaces.
+        '';
+      };
+
+      httpPort = mkOption {
+        default = 3000;
+        type = types.int;
+        description = lib.mdDoc ''
+          Sets the HTTP port of the embedded web server.
+        '';
+      };
+
+      redis.address = mkOption {
+        type = types.str;
+        example = literalExpression "config.services.redis.ntopng.unixSocket";
+        description = lib.mdDoc ''
+          Redis address - may be a Unix socket or a network host and port.
+        '';
+      };
+
+      redis.createInstance = mkOption {
+        type = types.nullOr types.str;
+        default = optionalString (versionAtLeast config.system.stateVersion "22.05") "ntopng";
+        description = lib.mdDoc ''
+          Local Redis instance name. Set to `null` to disable
+          local Redis instance. Defaults to `""` for
+          `system.stateVersion` older than 22.05.
+        '';
+      };
+
+      configText = mkOption {
+        default = "";
+        example = ''
+          --interface=any
+          --http-port=3000
+          --disable-login
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Overridable configuration file contents to use for ntopng. By
+          default, use the contents automatically generated by NixOS.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Configuration lines that will be appended to the generated ntopng
+          configuration file. Note that this mechanism does not work when the
+          manual {option}`configText` option is used.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    # ntopng uses redis for data storage
+    services.ntopng.redis.address =
+      mkIf createRedis config.services.redis.servers.${cfg.redis.createInstance}.unixSocket;
+
+    services.redis.servers = mkIf createRedis {
+      ${cfg.redis.createInstance} = {
+        enable = true;
+        user = mkIf (cfg.redis.createInstance == "ntopng") "ntopng";
+      };
+    };
+
+    # nice to have manual page and ntopng command in PATH
+    environment.systemPackages = [ pkgs.ntopng ];
+
+    systemd.tmpfiles.rules = [ "d /var/lib/ntopng 0700 ntopng ntopng -" ];
+
+    systemd.services.ntopng = {
+      description = "Ntopng Network Monitor";
+      requires = optional createRedis redisService;
+      after = [ "network.target" ] ++ optional createRedis redisService;
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${pkgs.ntopng}/bin/ntopng ${configFile}";
+      unitConfig.Documentation = "man:ntopng(8)";
+    };
+
+    users.extraUsers.ntopng = {
+      group = "ntopng";
+      isSystemUser = true;
+    };
+
+    users.extraGroups.ntopng = { };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ntp/chrony.nix b/nixpkgs/nixos/modules/services/networking/ntp/chrony.nix
new file mode 100644
index 000000000000..3f10145033c5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ntp/chrony.nix
@@ -0,0 +1,274 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.chrony;
+  chronyPkg = cfg.package;
+
+  stateDir = cfg.directory;
+  driftFile = "${stateDir}/chrony.drift";
+  keyFile = "${stateDir}/chrony.keys";
+  rtcFile = "${stateDir}/chrony.rtc";
+
+  configFile = pkgs.writeText "chrony.conf" ''
+    ${concatMapStringsSep "\n" (server: "server " + server + " " + cfg.serverOption + optionalString (cfg.enableNTS) " nts") cfg.servers}
+
+    ${optionalString
+      (cfg.initstepslew.enabled && (cfg.servers != []))
+      "initstepslew ${toString cfg.initstepslew.threshold} ${concatStringsSep " " cfg.servers}"
+    }
+
+    driftfile ${driftFile}
+    keyfile ${keyFile}
+    ${optionalString (cfg.enableRTCTrimming) "rtcfile ${rtcFile}"}
+    ${optionalString (cfg.enableNTS) "ntsdumpdir ${stateDir}"}
+
+    ${optionalString (cfg.enableRTCTrimming) "rtcautotrim ${builtins.toString cfg.autotrimThreshold}"}
+    ${optionalString (!config.time.hardwareClockInLocalTime) "rtconutc"}
+
+    ${cfg.extraConfig}
+  '';
+
+  chronyFlags =
+    [ "-n" "-u" "chrony" "-f" "${configFile}" ]
+    ++ optional cfg.enableMemoryLocking "-m"
+    ++ cfg.extraFlags;
+in
+{
+  options = {
+    services.chrony = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to synchronise your machine's time using chrony.
+          Make sure you disable NTP if you enable this service.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.chrony;
+        defaultText = literalExpression "pkgs.chrony";
+        description = lib.mdDoc ''
+          Which chrony package to use.
+        '';
+      };
+
+      servers = mkOption {
+        default = config.networking.timeServers;
+        defaultText = literalExpression "config.networking.timeServers";
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The set of NTP servers from which to synchronise.
+        '';
+      };
+
+      serverOption = mkOption {
+        default = "iburst";
+        type = types.enum [ "iburst" "offline" ];
+        description = lib.mdDoc ''
+          Set option for server directives.
+
+          Use "iburst" to rapidly poll on startup. Recommended if your machine
+          is consistently online.
+
+          Use "offline" to prevent polling on startup. Recommended if your
+          machine boots offline or is otherwise frequently offline.
+        '';
+      };
+
+      enableMemoryLocking = mkOption {
+        type = types.bool;
+        default = config.environment.memoryAllocator.provider != "graphene-hardened";
+        defaultText = ''config.environment.memoryAllocator.provider != "graphene-hardened"'';
+        description = lib.mdDoc ''
+          Whether to add the `-m` flag to lock memory.
+        '';
+      };
+
+      enableRTCTrimming = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable tracking of the RTC offset to the system clock and automatic trimming.
+          See also [](#opt-services.chrony.autotrimThreshold)
+
+          ::: {.note}
+          This is not compatible with the `rtcsync` directive, which naively syncs the RTC time every 11 minutes.
+
+          Tracking the RTC drift will allow more precise timekeeping,
+          especially on intermittently running devices, where the RTC is very relevant.
+          :::
+        '';
+      };
+
+      autotrimThreshold = mkOption {
+        type = types.ints.positive;
+        default = 30;
+        example = 10;
+        description = ''
+          Maximum estimated error threshold for the `rtcautotrim` command.
+          When reached, the RTC will be trimmed.
+          Only used when [](#opt-services.chrony.enableRTCTrimming) is enabled.
+        '';
+      };
+
+      enableNTS = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable Network Time Security authentication.
+          Make sure it is supported by your selected NTP server(s).
+        '';
+      };
+
+      initstepslew = {
+        enabled = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Allow chronyd to make a rapid measurement of the system clock error
+            at boot time, and to correct the system clock by stepping before
+            normal operation begins.
+          '';
+        };
+
+        threshold = mkOption {
+          type = types.either types.float types.int;
+          default = 1000; # by default, same threshold as 'ntpd -g' (1000s)
+          description = lib.mdDoc ''
+            The threshold of system clock error (in seconds) above which the
+            clock will be stepped. If the correction required is less than the
+            threshold, a slew is used instead.
+          '';
+        };
+      };
+
+      directory = mkOption {
+        type = types.str;
+        default = "/var/lib/chrony";
+        description = lib.mdDoc "Directory where chrony state is stored.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration directives that should be added to
+          `chrony.conf`
+        '';
+      };
+
+      extraFlags = mkOption {
+        default = [ ];
+        example = [ "-s" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc "Extra flags passed to the chronyd command.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    meta.maintainers = with lib.maintainers; [ thoughtpolice vifino ];
+
+    environment.systemPackages = [ chronyPkg ];
+
+    users.groups.chrony.gid = config.ids.gids.chrony;
+
+    users.users.chrony =
+      {
+        uid = config.ids.uids.chrony;
+        group = "chrony";
+        description = "chrony daemon user";
+        home = stateDir;
+      };
+
+    services.timesyncd.enable = mkForce false;
+
+    # If chrony controls and tracks the RTC, writing it externally causes clock error.
+    systemd.services.save-hwclock = lib.mkIf cfg.enableRTCTrimming {
+      enable = lib.mkForce false;
+    };
+
+    systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "chronyd.service"; };
+
+    systemd.tmpfiles.rules = [
+      "d ${stateDir} 0750 chrony chrony - -"
+      "f ${driftFile} 0640 chrony chrony - -"
+      "f ${keyFile} 0640 chrony chrony - -"
+    ] ++ lib.optionals cfg.enableRTCTrimming [
+      "f ${rtcFile} 0640 chrony chrony - -"
+    ];
+
+    systemd.services.chronyd =
+      {
+        description = "chrony NTP daemon";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "time-sync.target" ];
+        before = [ "time-sync.target" ];
+        after = [ "network.target" "nss-lookup.target" ];
+        conflicts = [ "ntpd.service" "systemd-timesyncd.service" ];
+
+        path = [ chronyPkg ];
+
+        unitConfig.ConditionCapability = "CAP_SYS_TIME";
+        serviceConfig = {
+          Type = "simple";
+          ExecStart = "${chronyPkg}/bin/chronyd ${builtins.toString chronyFlags}";
+
+          # Proc filesystem
+          ProcSubset = "pid";
+          ProtectProc = "invisible";
+          # Access write directories
+          ReadWritePaths = [ "${stateDir}" ];
+          UMask = "0027";
+          # Capabilities
+          CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_DAC_OVERRIDE" "CAP_NET_BIND_SERVICE" "CAP_SETGID" "CAP_SETUID" "CAP_SYS_RESOURCE" "CAP_SYS_TIME" ];
+          # Device Access
+          DeviceAllow = [ "char-pps rw" "char-ptp rw" "char-rtc rw" ];
+          DevicePolicy = "closed";
+          # Security
+          NoNewPrivileges = true;
+          # Sandboxing
+          ProtectSystem = "full";
+          ProtectHome = true;
+          PrivateTmp = true;
+          PrivateDevices = false;
+          PrivateUsers = false;
+          ProtectHostname = true;
+          ProtectClock = false;
+          ProtectKernelTunables = true;
+          ProtectKernelModules = true;
+          ProtectKernelLogs = true;
+          ProtectControlGroups = true;
+          RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          RemoveIPC = true;
+          PrivateMounts = true;
+          # System Call Filtering
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @resources" "@clock" "@setuid" "capset" "@chown" ];
+        };
+      };
+
+    assertions = [
+      {
+        assertion = !(cfg.enableRTCTrimming && builtins.any (line: (builtins.match "^ *rtcsync" line) != null) (lib.strings.splitString "\n" cfg.extraConfig));
+        message = ''
+          The chrony module now configures `rtcfile` and `rtcautotrim` for you.
+          These options conflict with `rtcsync` and cause chrony to crash.
+          Unless you are very sure the former isn't what you want, please remove
+          `rtcsync` from `services.chrony.extraConfig`.
+          Alternatively, disable this behaviour by `services.chrony.enableRTCTrimming = false;`
+        '';
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ntp/ntpd.nix b/nixpkgs/nixos/modules/services/networking/ntp/ntpd.nix
new file mode 100644
index 000000000000..2bc690cacf09
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ntp/ntpd.nix
@@ -0,0 +1,147 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) ntp;
+
+  cfg = config.services.ntp;
+
+  stateDir = "/var/lib/ntp";
+
+  configFile = pkgs.writeText "ntp.conf" ''
+    driftfile ${stateDir}/ntp.drift
+
+    restrict default ${toString cfg.restrictDefault}
+    restrict -6 default ${toString cfg.restrictDefault}
+    restrict source ${toString cfg.restrictSource}
+
+    restrict 127.0.0.1
+    restrict -6 ::1
+
+    ${toString (map (server: "server " + server + " iburst\n") cfg.servers)}
+
+    ${cfg.extraConfig}
+  '';
+
+  ntpFlags = [ "-c" "${configFile}" "-u" "ntp:ntp" ] ++ cfg.extraFlags;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.ntp = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to synchronise your machine's time using ntpd, as a peer in
+          the NTP network.
+
+          Disables `systemd.timesyncd` if enabled.
+        '';
+      };
+
+      restrictDefault = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The restriction flags to be set by default.
+
+          The default flags prevent external hosts from using ntpd as a DDoS
+          reflector, setting system time, and querying OS/ntpd version. As
+          recommended in section 6.5.1.1.3, answer "No" of
+          https://support.ntp.org/Support/AccessRestrictions
+        '';
+        default = [ "limited" "kod" "nomodify" "notrap" "noquery" "nopeer" ];
+      };
+
+      restrictSource = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The restriction flags to be set on source.
+
+          The default flags allow peers to be added by ntpd from configured
+          pool(s), but not by other means.
+        '';
+        default = [ "limited" "kod" "nomodify" "notrap" "noquery" ];
+      };
+
+      servers = mkOption {
+        default = config.networking.timeServers;
+        defaultText = literalExpression "config.networking.timeServers";
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The set of NTP servers from which to synchronise.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          fudge 127.127.1.0 stratum 10
+        '';
+        description = lib.mdDoc ''
+          Additional text appended to {file}`ntp.conf`.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        description = lib.mdDoc "Extra flags passed to the ntpd command.";
+        example = literalExpression ''[ "--interface=eth0" ]'';
+        default = [];
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.ntp.enable {
+    meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+
+    # Make tools such as ntpq available in the system path.
+    environment.systemPackages = [ pkgs.ntp ];
+    services.timesyncd.enable = mkForce false;
+
+    systemd.services.systemd-timedated.environment = { SYSTEMD_TIMEDATED_NTP_SERVICES = "ntpd.service"; };
+
+    users.users.ntp =
+      { isSystemUser = true;
+        group = "ntp";
+        description = "NTP daemon user";
+        home = stateDir;
+      };
+    users.groups.ntp = {};
+
+    systemd.services.ntpd =
+      { description = "NTP Daemon";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "time-sync.target" ];
+        before = [ "time-sync.target" ];
+
+        preStart =
+          ''
+            mkdir -m 0755 -p ${stateDir}
+            chown ntp ${stateDir}
+          '';
+
+        serviceConfig = {
+          ExecStart = "@${ntp}/bin/ntpd ntpd -g ${builtins.toString ntpFlags}";
+          Type = "forking";
+        };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ntp/openntpd.nix b/nixpkgs/nixos/modules/services/networking/ntp/openntpd.nix
new file mode 100644
index 000000000000..05df1f6e6266
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ntp/openntpd.nix
@@ -0,0 +1,85 @@
+{ pkgs, lib, config, options, ... }:
+
+with lib;
+
+let
+  cfg = config.services.openntpd;
+
+  package = pkgs.openntpd_nixos;
+
+  configFile = ''
+    ${concatStringsSep "\n" (map (s: "server ${s}") cfg.servers)}
+    ${cfg.extraConfig}
+  '';
+
+  pidFile = "/run/openntpd.pid";
+
+in
+{
+  ###### interface
+
+  options.services.openntpd = {
+    enable = mkEnableOption (lib.mdDoc "OpenNTP time synchronization server");
+
+    servers = mkOption {
+      default = config.services.ntp.servers;
+      defaultText = literalExpression "config.services.ntp.servers";
+      type = types.listOf types.str;
+      inherit (options.services.ntp.servers) description;
+    };
+
+    extraConfig = mkOption {
+      type = with types; lines;
+      default = "";
+      example = ''
+        listen on 127.0.0.1
+        listen on ::1
+      '';
+      description = lib.mdDoc ''
+        Additional text appended to {file}`openntpd.conf`.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = with types; separatedString " ";
+      default = "";
+      example = "-s";
+      description = lib.mdDoc ''
+        Extra options used when launching openntpd.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    meta.maintainers = with lib.maintainers; [ thoughtpolice ];
+    services.timesyncd.enable = mkForce false;
+
+    # Add ntpctl to the environment for status checking
+    environment.systemPackages = [ package ];
+
+    environment.etc."ntpd.conf".text = configFile;
+
+    users.users.ntp = {
+      isSystemUser = true;
+      group = "ntp";
+      description = "OpenNTP daemon user";
+      home = "/var/empty";
+    };
+    users.groups.ntp = {};
+
+    systemd.services.openntpd = {
+      description = "OpenNTP Server";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" "time-sync.target" ];
+      before = [ "time-sync.target" ];
+      after = [ "dnsmasq.service" "bind.service" "network-online.target" ];
+      serviceConfig = {
+        ExecStart = "${package}/sbin/ntpd -p ${pidFile} ${cfg.extraOptions}";
+        Type = "forking";
+        PIDFile = pidFile;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nullidentdmod.nix b/nixpkgs/nixos/modules/services/networking/nullidentdmod.nix
new file mode 100644
index 000000000000..e74e1dd6b795
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nullidentdmod.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.nullidentdmod;
+
+in {
+  options.services.nullidentdmod = with types; {
+    enable = mkEnableOption (lib.mdDoc "the nullidentdmod identd daemon");
+
+    userid = mkOption {
+      type = nullOr str;
+      description = lib.mdDoc "User ID to return. Set to null to return a random string each time.";
+      default = null;
+      example = "alice";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.sockets.nullidentdmod = {
+      description = "Socket for identd (NullidentdMod)";
+      listenStreams = [ "113" ];
+      socketConfig.Accept = true;
+      wantedBy = [ "sockets.target" ];
+    };
+
+    systemd.services."nullidentdmod@" = {
+      description = "NullidentdMod service";
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.nullidentdmod}/bin/nullidentdmod${optionalString (cfg.userid != null) " ${cfg.userid}"}";
+        StandardInput = "socket";
+        StandardOutput = "socket";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/nylon.nix b/nixpkgs/nixos/modules/services/networking/nylon.nix
new file mode 100644
index 000000000000..401dbe97c52d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/nylon.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nylon;
+
+  homeDir = "/var/lib/nylon";
+
+  configFile = cfg: pkgs.writeText "nylon-${cfg.name}.conf" ''
+    [General]
+    No-Simultaneous-Conn=${toString cfg.nrConnections}
+    Log=${if cfg.logging then "1" else "0"}
+    Verbose=${if cfg.verbosity then "1" else "0"}
+
+    [Server]
+    Binding-Interface=${cfg.acceptInterface}
+    Connecting-Interface=${cfg.bindInterface}
+    Port=${toString cfg.port}
+    Allow-IP=${concatStringsSep " " cfg.allowedIPRanges}
+    Deny-IP=${concatStringsSep " " cfg.deniedIPRanges}
+  '';
+
+  nylonOpts = { name, ... }: {
+
+    options = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enables nylon as a running service upon activation.
+        '';
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "The name of this nylon instance.";
+      };
+
+      nrConnections = mkOption {
+        type = types.int;
+        default = 10;
+        description = lib.mdDoc ''
+          The number of allowed simultaneous connections to the daemon, default 10.
+        '';
+      };
+
+      logging = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable logging, default is no logging.
+        '';
+      };
+
+      verbosity = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable verbose output, default is to not be verbose.
+        '';
+      };
+
+      acceptInterface = mkOption {
+        type = types.str;
+        default = "lo";
+        description = lib.mdDoc ''
+          Tell nylon which interface to listen for client requests on, default is "lo".
+        '';
+      };
+
+      bindInterface = mkOption {
+        type = types.str;
+        default = "enp3s0f0";
+        description = lib.mdDoc ''
+          Tell nylon which interface to use as an uplink, default is "enp3s0f0".
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 1080;
+        description = lib.mdDoc ''
+          What port to listen for client requests, default is 1080.
+        '';
+      };
+
+      allowedIPRanges = mkOption {
+        type = with types; listOf str;
+        default = [ "192.168.0.0/16" "127.0.0.1/8" "172.16.0.1/12" "10.0.0.0/8" ];
+        description = lib.mdDoc ''
+           Allowed client IP ranges are evaluated first, defaults to ARIN IPv4 private ranges:
+             [ "192.168.0.0/16" "127.0.0.0/8" "172.16.0.0/12" "10.0.0.0/8" ]
+        '';
+      };
+
+      deniedIPRanges = mkOption {
+        type = with types; listOf str;
+        default = [ "0.0.0.0/0" ];
+        description = lib.mdDoc ''
+          Denied client IP ranges, these gets evaluated after the allowed IP ranges, defaults to all IPv4 addresses:
+            [ "0.0.0.0/0" ]
+          To block all other access than the allowed.
+        '';
+      };
+    };
+    config = { name = mkDefault name; };
+  };
+
+  mkNamedNylon = cfg: {
+    "nylon-${cfg.name}" = {
+      description = "Nylon, a lightweight SOCKS proxy server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+      {
+        User = "nylon";
+        Group = "nylon";
+        WorkingDirectory = homeDir;
+        ExecStart = "${pkgs.nylon}/bin/nylon -f -c ${configFile cfg}";
+      };
+    };
+  };
+
+  anyNylons = collect (p: p ? enable) cfg;
+  enabledNylons = filter (p: p.enable == true) anyNylons;
+  nylonUnits = map (nylon: mkNamedNylon nylon) enabledNylons;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.nylon = mkOption {
+      default = {};
+      description = lib.mdDoc "Collection of named nylon instances";
+      type = with types; attrsOf (submodule nylonOpts);
+      internal = true;
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf (length(enabledNylons) > 0) {
+
+    users.users.nylon = {
+      group = "nylon";
+      description = "Nylon SOCKS Proxy";
+      home = homeDir;
+      createHome = true;
+      uid = config.ids.uids.nylon;
+    };
+
+    users.groups.nylon.gid = config.ids.gids.nylon;
+
+    systemd.services = foldr (a: b: a // b) {} nylonUnits;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ocserv.nix b/nixpkgs/nixos/modules/services/networking/ocserv.nix
new file mode 100644
index 000000000000..9548fd92dbda
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ocserv.nix
@@ -0,0 +1,99 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ocserv;
+
+in
+
+{
+  options.services.ocserv = {
+    enable = mkEnableOption (lib.mdDoc "ocserv");
+
+    config = mkOption {
+      type = types.lines;
+
+      description = lib.mdDoc ''
+        Configuration content to start an OCServ server.
+
+        For a full configuration reference,please refer to the online documentation
+        (https://ocserv.gitlab.io/www/manual.html), the openconnect
+        recipes (https://github.com/openconnect/recipes) or `man ocserv`.
+      '';
+
+      example = ''
+        # configuration examples from $out/doc without explanatory comments.
+        # for a full reference please look at the installed man pages.
+        auth = "plain[passwd=./sample.passwd]"
+        tcp-port = 443
+        udp-port = 443
+        run-as-user = nobody
+        run-as-group = nogroup
+        socket-file = /run/ocserv-socket
+        server-cert = certs/server-cert.pem
+        server-key = certs/server-key.pem
+        keepalive = 32400
+        dpd = 90
+        mobile-dpd = 1800
+        switch-to-tcp-timeout = 25
+        try-mtu-discovery = false
+        cert-user-oid = 0.9.2342.19200300.100.1.1
+        tls-priorities = "NORMAL:%SERVER_PRECEDENCE:%COMPAT:-VERS-SSL3.0"
+        auth-timeout = 240
+        min-reauth-time = 300
+        max-ban-score = 80
+        ban-reset-time = 1200
+        cookie-timeout = 300
+        deny-roaming = false
+        rekey-time = 172800
+        rekey-method = ssl
+        use-occtl = true
+        pid-file = /run/ocserv.pid
+        device = vpns
+        predictable-ips = true
+        default-domain = example.com
+        ipv4-network = 192.168.1.0
+        ipv4-netmask = 255.255.255.0
+        dns = 192.168.1.2
+        ping-leases = false
+        route = 10.10.10.0/255.255.255.0
+        route = 192.168.0.0/255.255.0.0
+        no-route = 192.168.5.0/255.255.255.0
+        cisco-client-compat = true
+        dtls-legacy = true
+
+        [vhost:www.example.com]
+        auth = "certificate"
+        ca-cert = certs/ca.pem
+        server-cert = certs/server-cert-secp521r1.pem
+        server-key = cersts/certs/server-key-secp521r1.pem
+        ipv4-network = 192.168.2.0
+        ipv4-netmask = 255.255.255.0
+        cert-user-oid = 0.9.2342.19200300.100.1.1
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.ocserv ];
+    environment.etc."ocserv/ocserv.conf".text = cfg.config;
+
+    security.pam.services.ocserv = {};
+
+    systemd.services.ocserv = {
+      description = "OpenConnect SSL VPN server";
+      documentation = [ "man:ocserv(8)" ];
+      after = [ "dbus.service" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        PrivateTmp = true;
+        PIDFile = "/run/ocserv.pid";
+        ExecStart = "${pkgs.ocserv}/bin/ocserv --foreground --pid-file /run/ocesrv.pid --config /etc/ocserv/ocserv.conf";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ofono.nix b/nixpkgs/nixos/modules/services/networking/ofono.nix
new file mode 100644
index 000000000000..960fc35a70ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ofono.nix
@@ -0,0 +1,44 @@
+# Ofono daemon.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ofono;
+
+  plugin_path =
+    lib.concatMapStringsSep ":"
+      (plugin: "${plugin}/lib/ofono/plugins")
+      cfg.plugins
+    ;
+
+in
+
+{
+  ###### interface
+  options = {
+    services.ofono = {
+      enable = mkEnableOption (lib.mdDoc "Ofono");
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.modem-manager-gui ]";
+        description = lib.mdDoc ''
+          The list of plugins to install.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.dbus.packages = [ pkgs.ofono ];
+
+    systemd.packages = [ pkgs.ofono ];
+
+    systemd.services.ofono.environment.OFONO_PLUGIN_PATH = mkIf (cfg.plugins != []) plugin_path;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/oidentd.nix b/nixpkgs/nixos/modules/services/networking/oidentd.nix
new file mode 100644
index 000000000000..7c7883c94611
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/oidentd.nix
@@ -0,0 +1,44 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.oidentd.enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable ‘oidentd’, an implementation of the Ident
+        protocol (RFC 1413).  It allows remote systems to identify the
+        name of the user associated with a TCP connection.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.oidentd.enable {
+    systemd.services.oidentd = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "forking";
+      script = "${pkgs.oidentd}/sbin/oidentd -u oidentd -g nogroup";
+    };
+
+    users.users.oidentd = {
+      description = "Ident Protocol daemon user";
+      group = "oidentd";
+      uid = config.ids.uids.oidentd;
+    };
+
+    users.groups.oidentd.gid = config.ids.gids.oidentd;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/onedrive.nix b/nixpkgs/nixos/modules/services/networking/onedrive.nix
new file mode 100644
index 000000000000..d782ec05352b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/onedrive.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.onedrive;
+
+  onedriveLauncher =  pkgs.writeShellScriptBin
+    "onedrive-launcher"
+    ''
+      # XDG_CONFIG_HOME is not recognized in the environment here.
+      if [ -f $HOME/.config/onedrive-launcher ]
+      then
+        # Hopefully using underscore boundary helps locate variables
+        for _onedrive_config_dirname_ in $(cat $HOME/.config/onedrive-launcher | grep -v '[ \t]*#' )
+        do
+          systemctl --user start onedrive@$_onedrive_config_dirname_
+        done
+      else
+        systemctl --user start onedrive@onedrive
+      fi
+    ''
+  ;
+
+in {
+  ### Documentation
+  # meta.doc = ./onedrive.xml;
+
+  ### Interface
+
+  options.services.onedrive = {
+     enable = lib.mkEnableOption (lib.mdDoc "OneDrive service");
+
+     package = lib.mkOption {
+       type = lib.types.package;
+       default = pkgs.onedrive;
+       defaultText = lib.literalExpression "pkgs.onedrive";
+       description = lib.mdDoc ''
+         OneDrive package to use.
+       '';
+     };
+  };
+### Implementation
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.user.services."onedrive@" = {
+      description = "Onedrive sync service";
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = ''
+          ${cfg.package}/bin/onedrive --monitor --confdir=%h/.config/%i
+        '';
+        Restart="on-failure";
+        RestartSec=3;
+        RestartPreventExitStatus=3;
+      };
+    };
+
+    systemd.user.services.onedrive-launcher = {
+      wantedBy = [ "default.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${onedriveLauncher}/bin/onedrive-launcher";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/onedrive.xml b/nixpkgs/nixos/modules/services/networking/onedrive.xml
new file mode 100644
index 000000000000..5a9dcf01aeee
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/onedrive.xml
@@ -0,0 +1,34 @@
+<chapter xmlns="http://docbook.org/ns/docbook"
+         xmlns:xlink="http://www.w3.org/1999/xlink"
+         xmlns:xi="http://www.w3.org/2001/XInclude"
+         version="5.0"
+         xml:id="onedrive">
+ <title>Microsoft OneDrive</title>
+ <para>
+  Microsoft Onedrive is a popular cloud file-hosting service, used by 85% of Fortune 500 companies. NixOS uses a popular OneDrive client for Linux maintained by github user abraunegg. The Linux client is excellent and allows customization of which files or paths to download, not much unlike the default Windows OneDrive client by Microsoft itself. The client allows syncing with multiple onedrive accounts at the same time, of any type- OneDrive personal, OneDrive business, Office365 and Sharepoint libraries, without any additional charge.
+ </para>
+ <para>
+  For more information, guides and documentation, see <link xlink:href="https://abraunegg.github.io/"/>.
+ </para>
+ <para>
+  To enable OneDrive support, add the following to your <filename>configuration.nix</filename>:
+<programlisting>
+<xref linkend="opt-services.onedrive.enable"/> = true;
+</programlisting>
+  This installs the <literal>onedrive</literal> package and a service <literal>onedriveLauncher</literal> which will instantiate a <literal>onedrive</literal> service for all your OneDrive accounts. Follow the steps in documentation of the onedrive client to setup your accounts. To use the service with multiple accounts, create a file named <filename>onedrive-launcher</filename> in <filename>~/.config</filename> and add the filename of the config directory, relative to <filename>~/.config</filename>. For example, if you have two OneDrive accounts with configs in <filename>~/.config/onedrive_bob_work</filename> and <filename>~/.config/onedrive_bob_personal</filename>, add the following lines:
+<programlisting>
+onedrive_bob_work
+# Not in use:
+# onedrive_bob_office365
+onedrive_bob_personal
+</programlisting>
+  No such file needs to be created if you are using only a single OneDrive account with config in the default location <filename>~/.config/onedrive</filename>, in the absence of <filename>~/.config/onedrive-launcher</filename>, only a single service is instantiated, with default config path.
+</para>
+
+  <para>
+  If you wish to use a custom OneDrive package, say from another channel, add the following line:
+<programlisting>
+<xref linkend="opt-services.onedrive.package"/> = pkgs.unstable.onedrive;
+</programlisting>
+ </para>
+</chapter>
diff --git a/nixpkgs/nixos/modules/services/networking/openconnect.nix b/nixpkgs/nixos/modules/services/networking/openconnect.nix
new file mode 100644
index 000000000000..7f9006053b89
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/openconnect.nix
@@ -0,0 +1,145 @@
+{ config, lib, options, pkgs, ... }:
+with lib;
+let
+  cfg = config.networking.openconnect;
+  openconnect = cfg.package;
+  pkcs11 = types.strMatching "pkcs11:.+" // {
+    name = "pkcs11";
+    description = "PKCS#11 URI";
+  };
+  interfaceOptions = {
+    options = {
+      autoStart = mkOption {
+        default = true;
+        description = lib.mdDoc "Whether this VPN connection should be started automatically.";
+        type = types.bool;
+      };
+
+      gateway = mkOption {
+        description = lib.mdDoc "Gateway server to connect to.";
+        example = "gateway.example.com";
+        type = types.str;
+      };
+
+      protocol = mkOption {
+        description = lib.mdDoc "Protocol to use.";
+        example = "anyconnect";
+        type =
+          types.enum [ "anyconnect" "array" "nc" "pulse" "gp" "f5" "fortinet" ];
+      };
+
+      user = mkOption {
+        description = lib.mdDoc "Username to authenticate with.";
+        example = "example-user";
+        type = types.nullOr types.str;
+        default = null;
+      };
+
+      # Note: It does not make sense to provide a way to declaratively
+      # set an authentication cookie, because they have to be requested
+      # for every new connection and would only work once.
+      passwordFile = mkOption {
+        description = lib.mdDoc ''
+          File containing the password to authenticate with. This
+          is passed to `openconnect` via the
+          `--passwd-on-stdin` option.
+        '';
+        default = null;
+        example = "/var/lib/secrets/openconnect-passwd";
+        type = types.nullOr types.path;
+      };
+
+      certificate = mkOption {
+        description = lib.mdDoc "Certificate to authenticate with.";
+        default = null;
+        example = "/var/lib/secrets/openconnect_certificate.pem";
+        type = with types; nullOr (either path pkcs11);
+      };
+
+      privateKey = mkOption {
+        description = lib.mdDoc "Private key to authenticate with.";
+        example = "/var/lib/secrets/openconnect_private_key.pem";
+        default = null;
+        type = with types; nullOr (either path pkcs11);
+      };
+
+      extraOptions = mkOption {
+        description = lib.mdDoc ''
+          Extra config to be appended to the interface config. It should
+          contain long-format options as would be accepted on the command
+          line by `openconnect`
+          (see https://www.infradead.org/openconnect/manual.html).
+          Non-key-value options like `deflate` can be used by
+          declaring them as booleans, i. e. `deflate = true;`.
+        '';
+        default = { };
+        example = {
+          compression = "stateless";
+
+          no-http-keepalive = true;
+          no-dtls = true;
+        };
+        type = with types; attrsOf (either str bool);
+      };
+    };
+  };
+  generateExtraConfig = extra_cfg:
+    strings.concatStringsSep "\n" (attrsets.mapAttrsToList
+      (name: value: if (value == true) then name else "${name}=${value}")
+      (attrsets.filterAttrs (_: value: value != false) extra_cfg));
+  generateConfig = name: icfg:
+    pkgs.writeText "config" ''
+      interface=${name}
+      ${optionalString (icfg.protocol != null) "protocol=${icfg.protocol}"}
+      ${optionalString (icfg.user != null) "user=${icfg.user}"}
+      ${optionalString (icfg.passwordFile != null) "passwd-on-stdin"}
+      ${optionalString (icfg.certificate != null)
+      "certificate=${icfg.certificate}"}
+      ${optionalString (icfg.privateKey != null) "sslkey=${icfg.privateKey}"}
+
+      ${generateExtraConfig icfg.extraOptions}
+    '';
+  generateUnit = name: icfg: {
+    description = "OpenConnect Interface - ${name}";
+    requires = [ "network-online.target" ];
+    after = [ "network.target" "network-online.target" ];
+    wantedBy = optional icfg.autoStart "multi-user.target";
+
+    serviceConfig = {
+      Type = "simple";
+      ExecStart = "${openconnect}/bin/openconnect --config=${
+          generateConfig name icfg
+        } ${icfg.gateway}";
+      StandardInput = lib.mkIf (icfg.passwordFile != null) "file:${icfg.passwordFile}";
+
+      ProtectHome = true;
+    };
+  };
+in {
+  options.networking.openconnect = {
+    package = mkPackageOptionMD pkgs "openconnect" { };
+
+    interfaces = mkOption {
+      description = lib.mdDoc "OpenConnect interfaces.";
+      default = { };
+      example = {
+        openconnect0 = {
+          gateway = "gateway.example.com";
+          protocol = "anyconnect";
+          user = "example-user";
+          passwordFile = "/var/lib/secrets/openconnect-passwd";
+        };
+      };
+      type = with types; attrsOf (submodule interfaceOptions);
+    };
+  };
+
+  config = {
+    systemd.services = mapAttrs' (name: value: {
+      name = "openconnect-${name}";
+      value = generateUnit name value;
+    }) cfg.interfaces;
+  };
+
+  meta.maintainers = with maintainers; [ alyaeanyx ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/openvpn.nix b/nixpkgs/nixos/modules/services/networking/openvpn.nix
new file mode 100644
index 000000000000..9a5866f2afd4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/openvpn.nix
@@ -0,0 +1,235 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.openvpn;
+
+  inherit (pkgs) openvpn;
+
+  makeOpenVPNJob = cfg: name:
+    let
+
+      path = makeBinPath (getAttr "openvpn-${name}" config.systemd.services).path;
+
+      upScript = ''
+        export PATH=${path}
+
+        # For convenience in client scripts, extract the remote domain
+        # name and name server.
+        for var in ''${!foreign_option_*}; do
+          x=(''${!var})
+          if [ "''${x[0]}" = dhcp-option ]; then
+            if [ "''${x[1]}" = DOMAIN ]; then domain="''${x[2]}"
+            elif [ "''${x[1]}" = DNS ]; then nameserver="''${x[2]}"
+            fi
+          fi
+        done
+
+        ${cfg.up}
+        ${optionalString cfg.updateResolvConf
+           "${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
+      '';
+
+      downScript = ''
+        export PATH=${path}
+        ${optionalString cfg.updateResolvConf
+           "${pkgs.update-resolv-conf}/libexec/openvpn/update-resolv-conf"}
+        ${cfg.down}
+      '';
+
+      configFile = pkgs.writeText "openvpn-config-${name}"
+        ''
+          errors-to-stderr
+          ${optionalString (cfg.up != "" || cfg.down != "" || cfg.updateResolvConf) "script-security 2"}
+          ${cfg.config}
+          ${optionalString (cfg.up != "" || cfg.updateResolvConf)
+              "up ${pkgs.writeShellScript "openvpn-${name}-up" upScript}"}
+          ${optionalString (cfg.down != "" || cfg.updateResolvConf)
+              "down ${pkgs.writeShellScript "openvpn-${name}-down" downScript}"}
+          ${optionalString (cfg.authUserPass != null)
+              "auth-user-pass ${pkgs.writeText "openvpn-credentials-${name}" ''
+                ${cfg.authUserPass.username}
+                ${cfg.authUserPass.password}
+              ''}"}
+        '';
+
+    in
+    {
+      description = "OpenVPN instance ‘${name}’";
+
+      wantedBy = optional cfg.autoStart "multi-user.target";
+      after = [ "network.target" ];
+
+      path = [ pkgs.iptables pkgs.iproute2 pkgs.nettools ];
+
+      serviceConfig.ExecStart = "@${openvpn}/sbin/openvpn openvpn --suppress-timestamps --config ${configFile}";
+      serviceConfig.Restart = "always";
+      serviceConfig.Type = "notify";
+    };
+
+  restartService = optionalAttrs cfg.restartAfterSleep {
+    openvpn-restart = {
+      wantedBy = [ "sleep.target" ];
+      path = [ pkgs.procps ];
+      script = "pkill --signal SIGHUP --exact openvpn";
+      #SIGHUP makes openvpn process to self-exit and then it got restarted by systemd because of Restart=always
+      description = "Sends a signal to OpenVPN process to trigger a restart after return from sleep";
+    };
+  };
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "openvpn" "enable" ] "")
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.openvpn.servers = mkOption {
+      default = { };
+
+      example = literalExpression ''
+        {
+          server = {
+            config = '''
+              # Simplest server configuration: https://community.openvpn.net/openvpn/wiki/StaticKeyMiniHowto
+              # server :
+              dev tun
+              ifconfig 10.8.0.1 10.8.0.2
+              secret /root/static.key
+            ''';
+            up = "ip route add ...";
+            down = "ip route del ...";
+          };
+
+          client = {
+            config = '''
+              client
+              remote vpn.example.org
+              dev tun
+              proto tcp-client
+              port 8080
+              ca /root/.vpn/ca.crt
+              cert /root/.vpn/alice.crt
+              key /root/.vpn/alice.key
+            ''';
+            up = "echo nameserver $nameserver | ''${pkgs.openresolv}/sbin/resolvconf -m 0 -a $dev";
+            down = "''${pkgs.openresolv}/sbin/resolvconf -d $dev";
+          };
+        }
+      '';
+
+      description = lib.mdDoc ''
+        Each attribute of this option defines a systemd service that
+        runs an OpenVPN instance.  These can be OpenVPN servers or
+        clients.  The name of each systemd service is
+        `openvpn-«name».service`,
+        where «name» is the corresponding
+        attribute name.
+      '';
+
+      type = with types; attrsOf (submodule {
+
+        options = {
+
+          config = mkOption {
+            type = types.lines;
+            description = lib.mdDoc ''
+              Configuration of this OpenVPN instance.  See
+              {manpage}`openvpn(8)`
+              for details.
+
+              To import an external config file, use the following definition:
+              `config = "config /path/to/config.ovpn"`
+            '';
+          };
+
+          up = mkOption {
+            default = "";
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands executed when the instance is starting.
+            '';
+          };
+
+          down = mkOption {
+            default = "";
+            type = types.lines;
+            description = lib.mdDoc ''
+              Shell commands executed when the instance is shutting down.
+            '';
+          };
+
+          autoStart = mkOption {
+            default = true;
+            type = types.bool;
+            description = lib.mdDoc "Whether this OpenVPN instance should be started automatically.";
+          };
+
+          updateResolvConf = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc ''
+              Use the script from the update-resolv-conf package to automatically
+              update resolv.conf with the DNS information provided by openvpn. The
+              script will be run after the "up" commands and before the "down" commands.
+            '';
+          };
+
+          authUserPass = mkOption {
+            default = null;
+            description = lib.mdDoc ''
+              This option can be used to store the username / password credentials
+              with the "auth-user-pass" authentication method.
+
+              WARNING: Using this option will put the credentials WORLD-READABLE in the Nix store!
+            '';
+            type = types.nullOr (types.submodule {
+
+              options = {
+                username = mkOption {
+                  description = lib.mdDoc "The username to store inside the credentials file.";
+                  type = types.str;
+                };
+
+                password = mkOption {
+                  description = lib.mdDoc "The password to store inside the credentials file.";
+                  type = types.str;
+                };
+              };
+            });
+          };
+        };
+
+      });
+
+    };
+
+    services.openvpn.restartAfterSleep = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether OpenVPN client should be restarted after sleep.";
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.servers != { }) {
+
+    systemd.services = (listToAttrs (mapAttrsFlatten (name: value: nameValuePair "openvpn-${name}" (makeOpenVPNJob value name)) cfg.servers))
+      // restartService;
+
+    environment.systemPackages = [ openvpn ];
+
+    boot.kernelModules = [ "tun" ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ostinato.nix b/nixpkgs/nixos/modules/services/networking/ostinato.nix
new file mode 100644
index 000000000000..dc07313ea901
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ostinato.nix
@@ -0,0 +1,104 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  pkg = pkgs.ostinato;
+  cfg = config.services.ostinato;
+  configFile = pkgs.writeText "drone.ini" ''
+    [General]
+    RateAccuracy=${cfg.rateAccuracy}
+
+    [RpcServer]
+    Address=${cfg.rpcServer.address}
+
+    [PortList]
+    Include=${concatStringsSep "," cfg.portList.include}
+    Exclude=${concatStringsSep "," cfg.portList.exclude}
+  '';
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.ostinato = {
+
+      enable = mkEnableOption (lib.mdDoc "Ostinato agent-controller (Drone)");
+
+      port = mkOption {
+        type = types.port;
+        default = 7878;
+        description = lib.mdDoc ''
+          Port to listen on.
+        '';
+      };
+
+      rateAccuracy = mkOption {
+        type = types.enum [ "High" "Low" ];
+        default = "High";
+        description = lib.mdDoc ''
+          To ensure that the actual transmit rate is as close as possible to
+          the configured transmit rate, Drone runs a busy-wait loop.
+          While this provides the maximum accuracy possible, the CPU
+          utilization is 100% while the transmit is on. You can however,
+          sacrifice the accuracy to reduce the CPU load.
+        '';
+      };
+
+      rpcServer = {
+        address = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc ''
+            By default, the Drone RPC server will listen on all interfaces and
+            local IPv4 addresses for incoming connections from clients.  Specify
+            a single IPv4 or IPv6 address if you want to restrict that.
+            To listen on any IPv6 address, use ::
+          '';
+        };
+      };
+
+      portList = {
+        include = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "eth*" "lo*" ];
+          description = lib.mdDoc ''
+            For a port to pass the filter and appear on the port list managed
+            by drone, it be allowed by this include list.
+          '';
+        };
+        exclude = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "usbmon*" "eth0" ];
+          description = lib.mdDoc ''
+            A list of ports does not appear on the port list managed by drone.
+          '';
+        };
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkg ];
+
+    systemd.services.drone = {
+      description = "Ostinato agent-controller";
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        ${pkg}/bin/drone ${toString cfg.port} ${configFile}
+      '';
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/owamp.nix b/nixpkgs/nixos/modules/services/networking/owamp.nix
new file mode 100644
index 000000000000..32b2dab9e3c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/owamp.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.owamp;
+in
+{
+
+  ###### interface
+
+  options = {
+    services.owamp.enable = mkEnableOption (lib.mdDoc "OWAMP server");
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.users.owamp = {
+      group = "owamp";
+      description = "Owamp daemon";
+      isSystemUser = true;
+    };
+
+    users.groups.owamp = { };
+
+    systemd.services.owamp = {
+      description = "Owamp server";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart="${pkgs.owamp}/bin/owampd -R /run/owamp -d /run/owamp -v -Z ";
+        PrivateTmp = true;
+        Restart = "always";
+        Type="simple";
+        User = "owamp";
+        Group = "owamp";
+        RuntimeDirectory = "owamp";
+        StateDirectory = "owamp";
+        AmbientCapabilities = "cap_net_bind_service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pdns-recursor.nix b/nixpkgs/nixos/modules/services/networking/pdns-recursor.nix
new file mode 100644
index 000000000000..f929532ba09f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pdns-recursor.nix
@@ -0,0 +1,213 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pdns-recursor;
+
+  oneOrMore  = type: with types; either type (listOf type);
+  valueType  = with types; oneOf [ int str bool path ];
+  configType = with types; attrsOf (nullOr (oneOrMore valueType));
+
+  toBool    = val: if val then "yes" else "no";
+  serialize = val: with types;
+         if str.check       val then val
+    else if int.check       val then toString val
+    else if path.check      val then toString val
+    else if bool.check      val then toBool val
+    else if builtins.isList val then (concatMapStringsSep "," serialize val)
+    else "";
+
+  configDir = pkgs.writeTextDir "recursor.conf"
+    (concatStringsSep "\n"
+      (flip mapAttrsToList cfg.settings
+        (name: val: "${name}=${serialize val}")));
+
+  mkDefaultAttrs = mapAttrs (n: v: mkDefault v);
+
+in {
+  options.services.pdns-recursor = {
+    enable = mkEnableOption (lib.mdDoc "PowerDNS Recursor, a recursive DNS server");
+
+    dns.address = mkOption {
+      type = oneOrMore types.str;
+      default = [ "::" "0.0.0.0" ];
+      description = lib.mdDoc ''
+        IP addresses Recursor DNS server will bind to.
+      '';
+    };
+
+    dns.port = mkOption {
+      type = types.port;
+      default = 53;
+      description = lib.mdDoc ''
+        Port number Recursor DNS server will bind to.
+      '';
+    };
+
+    dns.allowFrom = mkOption {
+      type = types.listOf types.str;
+      default = [
+        "127.0.0.0/8" "10.0.0.0/8" "100.64.0.0/10"
+        "169.254.0.0/16" "192.168.0.0/16" "172.16.0.0/12"
+        "::1/128" "fc00::/7" "fe80::/10"
+      ];
+      example = [ "0.0.0.0/0" "::/0" ];
+      description = lib.mdDoc ''
+        IP address ranges of clients allowed to make DNS queries.
+      '';
+    };
+
+    api.address = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        IP address Recursor REST API server will bind to.
+      '';
+    };
+
+    api.port = mkOption {
+      type = types.port;
+      default = 8082;
+      description = lib.mdDoc ''
+        Port number Recursor REST API server will bind to.
+      '';
+    };
+
+    api.allowFrom = mkOption {
+      type = types.listOf types.str;
+      default = [ "127.0.0.1" "::1" ];
+      example = [ "0.0.0.0/0" "::/0" ];
+      description = lib.mdDoc ''
+        IP address ranges of clients allowed to make API requests.
+      '';
+    };
+
+    exportHosts = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+       Whether to export names and IP addresses defined in /etc/hosts.
+      '';
+    };
+
+    forwardZones = mkOption {
+      type = types.attrs;
+      default = {};
+      description = lib.mdDoc ''
+        DNS zones to be forwarded to other authoritative servers.
+      '';
+    };
+
+    forwardZonesRecurse = mkOption {
+      type = types.attrs;
+      example = { eth = "[::1]:5353"; };
+      default = {};
+      description = lib.mdDoc ''
+        DNS zones to be forwarded to other recursive servers.
+      '';
+    };
+
+    dnssecValidation = mkOption {
+      type = types.enum ["off" "process-no-validate" "process" "log-fail" "validate"];
+      default = "validate";
+      description = lib.mdDoc ''
+        Controls the level of DNSSEC processing done by the PowerDNS Recursor.
+        See https://doc.powerdns.com/md/recursor/dnssec/ for a detailed explanation.
+      '';
+    };
+
+    serveRFC1918 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to directly resolve the RFC1918 reverse-mapping domains:
+        `10.in-addr.arpa`,
+        `168.192.in-addr.arpa`,
+        `16-31.172.in-addr.arpa`
+        This saves load on the AS112 servers.
+      '';
+    };
+
+    settings = mkOption {
+      type = configType;
+      default = { };
+      example = literalExpression ''
+        {
+          loglevel = 8;
+          log-common-errors = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        PowerDNS Recursor settings. Use this option to configure Recursor
+        settings not exposed in a NixOS option or to bypass one.
+        See the full documentation at
+        <https://doc.powerdns.com/recursor/settings.html>
+        for the available options.
+      '';
+    };
+
+    luaConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        The content Lua configuration file for PowerDNS Recursor. See
+        <https://doc.powerdns.com/recursor/lua-config/index.html>.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc."pdns-recursor".source = configDir;
+
+    services.pdns-recursor.settings = mkDefaultAttrs {
+      local-address = cfg.dns.address;
+      local-port    = cfg.dns.port;
+      allow-from    = cfg.dns.allowFrom;
+
+      webserver-address    = cfg.api.address;
+      webserver-port       = cfg.api.port;
+      webserver-allow-from = cfg.api.allowFrom;
+
+      forward-zones         = mapAttrsToList (zone: uri: "${zone}.=${uri}") cfg.forwardZones;
+      forward-zones-recurse = mapAttrsToList (zone: uri: "${zone}.=${uri}") cfg.forwardZonesRecurse;
+      export-etc-hosts = cfg.exportHosts;
+      dnssec           = cfg.dnssecValidation;
+      serve-rfc1918    = cfg.serveRFC1918;
+      lua-config-file  = pkgs.writeText "recursor.lua" cfg.luaConfig;
+
+      daemon         = false;
+      write-pid      = false;
+      log-timestamp  = false;
+      disable-syslog = true;
+    };
+
+    systemd.packages = [ pkgs.pdns-recursor ];
+
+    systemd.services.pdns-recursor = {
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = [ "" "${pkgs.pdns-recursor}/bin/pdns_recursor --config-dir=${configDir}" ];
+      };
+    };
+
+    users.users.pdns-recursor = {
+      isSystemUser = true;
+      group = "pdns-recursor";
+      description = "PowerDNS Recursor daemon user";
+    };
+
+    users.groups.pdns-recursor = {};
+
+  };
+
+  imports = [
+   (mkRemovedOptionModule [ "services" "pdns-recursor" "extraConfig" ]
+     "To change extra Recursor settings use services.pdns-recursor.settings instead.")
+  ];
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pdnsd.nix b/nixpkgs/nixos/modules/services/networking/pdnsd.nix
new file mode 100644
index 000000000000..8fe27a44eee6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pdnsd.nix
@@ -0,0 +1,91 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pdnsd;
+  pdnsd = pkgs.pdnsd;
+  pdnsdUser = "pdnsd";
+  pdnsdGroup = "pdnsd";
+  pdnsdConf = pkgs.writeText "pdnsd.conf"
+    ''
+      global {
+        run_as=${pdnsdUser};
+        cache_dir="${cfg.cacheDir}";
+        ${cfg.globalConfig}
+      }
+
+      server {
+        ${cfg.serverConfig}
+      }
+      ${cfg.extraConfig}
+    '';
+in
+
+{ options =
+    { services.pdnsd =
+        { enable = mkEnableOption (lib.mdDoc "pdnsd");
+
+          cacheDir = mkOption {
+            type = types.str;
+            default = "/var/cache/pdnsd";
+            description = lib.mdDoc "Directory holding the pdnsd cache";
+          };
+
+          globalConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc ''
+              Global configuration that should be added to the global directory
+              of `pdnsd.conf`.
+            '';
+          };
+
+          serverConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc ''
+              Server configuration that should be added to the server directory
+              of `pdnsd.conf`.
+            '';
+          };
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc ''
+              Extra configuration directives that should be added to
+              `pdnsd.conf`.
+            '';
+          };
+        };
+    };
+
+  config = mkIf cfg.enable {
+    users.users.${pdnsdUser} = {
+      uid = config.ids.uids.pdnsd;
+      group = pdnsdGroup;
+      description = "pdnsd user";
+    };
+
+    users.groups.${pdnsdGroup} = {
+      gid = config.ids.gids.pdnsd;
+    };
+
+    systemd.services.pdnsd =
+      { wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        preStart =
+          ''
+            mkdir -p "${cfg.cacheDir}"
+            touch "${cfg.cacheDir}/pdnsd.cache"
+            chown -R ${pdnsdUser}:${pdnsdGroup} "${cfg.cacheDir}"
+          '';
+        description = "pdnsd";
+        serviceConfig =
+          {
+            ExecStart = "${pdnsd}/bin/pdnsd -c ${pdnsdConf}";
+          };
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/peroxide.nix b/nixpkgs/nixos/modules/services/networking/peroxide.nix
new file mode 100644
index 000000000000..885ee1d96cd0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/peroxide.nix
@@ -0,0 +1,131 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.peroxide;
+  settingsFormat = pkgs.formats.yaml { };
+  stateDir = "peroxide";
+in
+{
+  options.services.peroxide = {
+    enable = mkEnableOption (lib.mdDoc "peroxide");
+
+    package = mkPackageOptionMD pkgs "peroxide" {
+      default = [ "peroxide" ];
+    };
+
+    logLevel = mkOption {
+      # https://github.com/sirupsen/logrus#level-logging
+      type = types.enum [ "Panic" "Fatal" "Error" "Warning" "Info" "Debug" "Trace" ];
+      default = "Warning";
+      example = "Info";
+      description = lib.mdDoc "Only log messages of this priority or higher.";
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          UserPortImap = mkOption {
+            type = types.port;
+            default = 1143;
+            description = lib.mdDoc "The port on which to listen for IMAP connections.";
+          };
+
+          UserPortSmtp = mkOption {
+            type = types.port;
+            default = 1025;
+            description = lib.mdDoc "The port on which to listen for SMTP connections.";
+          };
+
+          ServerAddress = mkOption {
+            type = types.str;
+            default = "[::0]";
+            example = "localhost";
+            description = lib.mdDoc "The address on which to listen for connections.";
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for peroxide.  See
+        [config.example.yaml](https://github.com/ljanyst/peroxide/blob/master/config.example.yaml)
+        for an example configuration.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.peroxide.settings = {
+      # peroxide deletes the cache directory on startup, which requires write
+      # permission on the parent directory, so we can't use
+      # /var/cache/peroxide
+      CacheDir = "/var/cache/peroxide/cache";
+      X509Key = mkDefault "/var/lib/${stateDir}/key.pem";
+      X509Cert = mkDefault "/var/lib/${stateDir}/cert.pem";
+      CookieJar = "/var/lib/${stateDir}/cookies.json";
+      CredentialsStore = "/var/lib/${stateDir}/credentials.json";
+    };
+
+    users.users.peroxide = {
+      isSystemUser = true;
+      group = "peroxide";
+    };
+    users.groups.peroxide = { };
+
+    systemd.services.peroxide = {
+      description = "Peroxide ProtonMail bridge";
+      requires = [ "network.target" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      restartTriggers = [ config.environment.etc."peroxide.conf".source ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = "peroxide";
+        LogsDirectory = "peroxide";
+        LogsDirectoryMode = "0750";
+        # Specify just "peroxide" so that the user has write permission, because
+        # peroxide deletes and recreates the cache directory on startup.
+        CacheDirectory = [ "peroxide" "peroxide/cache" ];
+        CacheDirectoryMode = "0700";
+        StateDirectory = stateDir;
+        StateDirectoryMode = "0700";
+        ExecStart = "${cfg.package}/bin/peroxide -log-file=/var/log/peroxide/peroxide.log -log-level ${cfg.logLevel}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+      };
+
+      preStart = ''
+        # Create a self-signed certificate if no certificate exists.
+        if [[ ! -e "${cfg.settings.X509Key}" && ! -e "${cfg.settings.X509Cert}" ]]; then
+            ${cfg.package}/bin/peroxide-cfg -action gen-x509 \
+              -x509-org 'N/A' \
+              -x509-cn 'nixos' \
+              -x509-cert "${cfg.settings.X509Cert}" \
+              -x509-key "${cfg.settings.X509Key}"
+        fi
+      '';
+    };
+
+    # https://github.com/ljanyst/peroxide/blob/master/peroxide.logrotate
+    services.logrotate.settings.peroxide = {
+      files = "/var/log/peroxide/peroxide.log";
+      rotate = 31;
+      frequency = "daily";
+      compress = true;
+      delaycompress = true;
+      missingok = true;
+      notifempty = true;
+      su = "peroxide peroxide";
+      postrotate = "systemctl reload peroxide";
+    };
+
+    environment.etc."peroxide.conf".source = settingsFormat.generate "peroxide.conf" cfg.settings;
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with maintainers; [ aanderse aidalgol ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/picosnitch.nix b/nixpkgs/nixos/modules/services/networking/picosnitch.nix
new file mode 100644
index 000000000000..c9b38c1929ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/picosnitch.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.picosnitch;
+in
+{
+  options.services.picosnitch = {
+    enable = mkEnableOption (lib.mdDoc "picosnitch daemon");
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.picosnitch ];
+    systemd.services.picosnitch = {
+      description = "picosnitch";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+        RestartSec = 5;
+        ExecStart = "${pkgs.picosnitch}/bin/picosnitch start-no-daemon";
+        PIDFile = "/run/picosnitch/picosnitch.pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pixiecore.nix b/nixpkgs/nixos/modules/services/networking/pixiecore.nix
new file mode 100644
index 000000000000..1f47a1d0b631
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pixiecore.nix
@@ -0,0 +1,143 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pixiecore;
+in
+{
+  meta.maintainers = with maintainers; [ bbigras danderson ];
+
+  options = {
+    services.pixiecore = {
+      enable = mkEnableOption (lib.mdDoc "Pixiecore");
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports (67, 69, 4011 UDP and 'port', 'statusPort' TCP) in the firewall for Pixiecore.
+        '';
+      };
+
+      mode = mkOption {
+        description = lib.mdDoc "Which mode to use";
+        default = "boot";
+        type = types.enum [ "api" "boot" "quick" ];
+      };
+
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Log more things that aren't directly related to booting a recognized client";
+      };
+
+      dhcpNoBind = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Handle DHCP traffic without binding to the DHCP server port";
+      };
+
+      quick = mkOption {
+        description = lib.mdDoc "Which quick option to use";
+        default = "xyz";
+        type = types.enum [ "arch" "centos" "coreos" "debian" "fedora" "ubuntu" "xyz" ];
+      };
+
+      kernel = mkOption {
+        type = types.str or types.path;
+        default = "";
+        description = lib.mdDoc "Kernel path. Ignored unless mode is set to 'boot'";
+      };
+
+      initrd = mkOption {
+        type = types.str or types.path;
+        default = "";
+        description = lib.mdDoc "Initrd path. Ignored unless mode is set to 'boot'";
+      };
+
+      cmdLine = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "Kernel commandline arguments. Ignored unless mode is set to 'boot'";
+      };
+
+      listen = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "IPv4 address to listen on";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 80;
+        description = lib.mdDoc "Port to listen on for HTTP";
+      };
+
+      statusPort = mkOption {
+        type = types.port;
+        default = 80;
+        description = lib.mdDoc "HTTP port for status information (can be the same as --port)";
+      };
+
+      apiServer = mkOption {
+        type = types.str;
+        example = "localhost:8080";
+        description = lib.mdDoc "host:port to connect to the API. Ignored unless mode is set to 'api'";
+      };
+
+      extraArguments = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Additional command line arguments to pass to Pixiecore";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.pixiecore = {};
+    users.users.pixiecore = {
+      description = "Pixiecore daemon user";
+      group = "pixiecore";
+      isSystemUser = true;
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port cfg.statusPort ];
+      allowedUDPPorts = [ 67 69 4011 ];
+    };
+
+    systemd.services.pixiecore = {
+      description = "Pixiecore server";
+      after = [ "network.target"];
+      wants = [ "network.target"];
+      wantedBy = [ "multi-user.target"];
+      serviceConfig = {
+        User = "pixiecore";
+        Restart = "always";
+        AmbientCapabilities = [ "cap_net_bind_service" ] ++ optional cfg.dhcpNoBind "cap_net_raw";
+        ExecStart =
+          let
+            argString =
+              if cfg.mode == "boot"
+              then [ "boot" cfg.kernel ]
+                   ++ optional (cfg.initrd != "") cfg.initrd
+                   ++ optionals (cfg.cmdLine != "") [ "--cmdline" cfg.cmdLine ]
+              else if cfg.mode == "quick"
+              then [ "quick" cfg.quick ]
+              else [ "api" cfg.apiServer ];
+          in
+            ''
+              ${pkgs.pixiecore}/bin/pixiecore \
+                ${lib.escapeShellArgs argString} \
+                ${optionalString cfg.debug "--debug"} \
+                ${optionalString cfg.dhcpNoBind "--dhcp-no-bind"} \
+                --listen-addr ${lib.escapeShellArg cfg.listen} \
+                --port ${toString cfg.port} \
+                --status-port ${toString cfg.statusPort} \
+                ${escapeShellArgs cfg.extraArguments}
+              '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.md b/nixpkgs/nixos/modules/services/networking/pleroma.md
new file mode 100644
index 000000000000..7c499e1c616c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.md
@@ -0,0 +1,180 @@
+# Pleroma {#module-services-pleroma}
+
+[Pleroma](https://pleroma.social/) is a lightweight activity pub server.
+
+## Generating the Pleroma config {#module-services-pleroma-generate-config}
+
+The `pleroma_ctl` CLI utility will prompt you some questions and it will generate an initial config file. This is an example of usage
+```ShellSession
+$ mkdir tmp-pleroma
+$ cd tmp-pleroma
+$ nix-shell -p pleroma-otp
+$ pleroma_ctl instance gen --output config.exs --output-psql setup.psql
+```
+
+The `config.exs` file can be further customized following the instructions on the [upstream documentation](https://docs-develop.pleroma.social/backend/configuration/cheatsheet/). Many refinements can be applied also after the service is running.
+
+## Initializing the database {#module-services-pleroma-initialize-db}
+
+First, the Postgresql service must be enabled in the NixOS configuration
+```
+services.postgresql = {
+  enable = true;
+  package = pkgs.postgresql_13;
+};
+```
+and activated with the usual
+```ShellSession
+$ nixos-rebuild switch
+```
+
+Then you can create and seed the database, using the `setup.psql` file that you generated in the previous section, by running
+```ShellSession
+$ sudo -u postgres psql -f setup.psql
+```
+
+## Enabling the Pleroma service locally {#module-services-pleroma-enable}
+
+In this section we will enable the Pleroma service only locally, so its configurations can be improved incrementally.
+
+This is an example of configuration, where [](#opt-services.pleroma.configs) option contains the content of the file `config.exs`, generated [in the first section](#module-services-pleroma-generate-config), but with the secrets (database password, endpoint secret key, salts, etc.) removed. Removing secrets is important, because otherwise they will be stored publicly in the Nix store.
+```
+services.pleroma = {
+  enable = true;
+  secretConfigFile = "/var/lib/pleroma/secrets.exs";
+  configs = [
+    ''
+    import Config
+
+    config :pleroma, Pleroma.Web.Endpoint,
+      url: [host: "pleroma.example.net", scheme: "https", port: 443],
+      http: [ip: {127, 0, 0, 1}, port: 4000]
+
+    config :pleroma, :instance,
+      name: "Test",
+      email: "admin@example.net",
+      notify_email: "admin@example.net",
+      limit: 5000,
+      registrations_open: true
+
+    config :pleroma, :media_proxy,
+      enabled: false,
+      redirect_on_failure: true
+
+    config :pleroma, Pleroma.Repo,
+      adapter: Ecto.Adapters.Postgres,
+      username: "pleroma",
+      database: "pleroma",
+      hostname: "localhost"
+
+    # Configure web push notifications
+    config :web_push_encryption, :vapid_details,
+      subject: "mailto:admin@example.net"
+
+    # ... TO CONTINUE ...
+    ''
+  ];
+};
+```
+
+Secrets must be moved into a file pointed by [](#opt-services.pleroma.secretConfigFile), in our case `/var/lib/pleroma/secrets.exs`. This file can be created copying the previously generated `config.exs` file and then removing all the settings, except the secrets. This is an example
+```
+# Pleroma instance passwords
+
+import Config
+
+config :pleroma, Pleroma.Web.Endpoint,
+   secret_key_base: "<the secret generated by pleroma_ctl>",
+   signing_salt: "<the secret generated by pleroma_ctl>"
+
+config :pleroma, Pleroma.Repo,
+  password: "<the secret generated by pleroma_ctl>"
+
+# Configure web push notifications
+config :web_push_encryption, :vapid_details,
+  public_key: "<the secret generated by pleroma_ctl>",
+  private_key: "<the secret generated by pleroma_ctl>"
+
+# ... TO CONTINUE ...
+```
+Note that the lines of the same configuration group are comma separated (i.e. all the lines end with a comma, except the last one), so when the lines with passwords are added or removed, commas must be adjusted accordingly.
+
+The service can be enabled with the usual
+```ShellSession
+$ nixos-rebuild switch
+```
+
+The service is accessible only from the local `127.0.0.1:4000` port. It can be tested using a port forwarding like this
+```ShellSession
+$ ssh -L 4000:localhost:4000 myuser@example.net
+```
+and then accessing <http://localhost:4000> from a web browser.
+
+## Creating the admin user {#module-services-pleroma-admin-user}
+
+After Pleroma service is running, all [Pleroma administration utilities](https://docs-develop.pleroma.social/) can be used. In particular an admin user can be created with
+```ShellSession
+$ pleroma_ctl user new <nickname> <email>  --admin --moderator --password <password>
+```
+
+## Configuring Nginx {#module-services-pleroma-nginx}
+
+In this configuration, Pleroma is listening only on the local port 4000. Nginx can be configured as a Reverse Proxy, for forwarding requests from public ports to the Pleroma service. This is an example of configuration, using
+[Let's Encrypt](https://letsencrypt.org/) for the TLS certificates
+```
+security.acme = {
+  email = "root@example.net";
+  acceptTerms = true;
+};
+
+services.nginx = {
+  enable = true;
+  addSSL = true;
+
+  recommendedTlsSettings = true;
+  recommendedOptimisation = true;
+  recommendedGzipSettings = true;
+
+  recommendedProxySettings = false;
+  # NOTE: if enabled, the NixOS proxy optimizations will override the Pleroma
+  # specific settings, and they will enter in conflict.
+
+  virtualHosts = {
+    "pleroma.example.net" = {
+      http2 = true;
+      enableACME = true;
+      forceSSL = true;
+
+      locations."/" = {
+        proxyPass = "http://127.0.0.1:4000";
+
+        extraConfig = ''
+          etag on;
+          gzip on;
+
+          add_header 'Access-Control-Allow-Origin' '*' always;
+          add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
+          add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
+          add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
+          if ($request_method = OPTIONS) {
+            return 204;
+          }
+          add_header X-XSS-Protection "1; mode=block";
+          add_header X-Permitted-Cross-Domain-Policies none;
+          add_header X-Frame-Options DENY;
+          add_header X-Content-Type-Options nosniff;
+          add_header Referrer-Policy same-origin;
+          add_header X-Download-Options noopen;
+          proxy_http_version 1.1;
+          proxy_set_header Upgrade $http_upgrade;
+          proxy_set_header Connection "upgrade";
+          proxy_set_header Host $host;
+
+          client_max_body_size 16m;
+          # NOTE: increase if users need to upload very big files
+        '';
+      };
+    };
+  };
+};
+```
diff --git a/nixpkgs/nixos/modules/services/networking/pleroma.nix b/nixpkgs/nixos/modules/services/networking/pleroma.nix
new file mode 100644
index 000000000000..89e64d36c8a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pleroma.nix
@@ -0,0 +1,151 @@
+{ config, options, lib, pkgs, stdenv, ... }:
+let
+  cfg = config.services.pleroma;
+in {
+  options = {
+    services.pleroma = with lib; {
+      enable = mkEnableOption (lib.mdDoc "pleroma");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.pleroma;
+        defaultText = literalExpression "pkgs.pleroma";
+        description = lib.mdDoc "Pleroma package to use.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "pleroma";
+        description = lib.mdDoc "User account under which pleroma runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "pleroma";
+        description = lib.mdDoc "Group account under which pleroma runs.";
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/lib/pleroma";
+        readOnly = true;
+        description = lib.mdDoc "Directory where the pleroma service will save the uploads and static files.";
+      };
+
+      configs = mkOption {
+        type = with types; listOf str;
+        description = lib.mdDoc ''
+          Pleroma public configuration.
+
+          This list gets appended from left to
+          right into /etc/pleroma/config.exs. Elixir evaluates its
+          configuration imperatively, meaning you can override a
+          setting by appending a new str to this NixOS option list.
+
+          *DO NOT STORE ANY PLEROMA SECRET
+          HERE*, use
+          [services.pleroma.secretConfigFile](#opt-services.pleroma.secretConfigFile)
+          instead.
+
+          This setting is going to be stored in a file part of
+          the Nix store. The Nix store being world-readable, it's not
+          the right place to store any secret
+
+          Have a look to Pleroma section in the NixOS manual for more
+          information.
+          '';
+      };
+
+      secretConfigFile = mkOption {
+        type = types.str;
+        default = "/var/lib/pleroma/secrets.exs";
+        description = lib.mdDoc ''
+          Path to the file containing your secret pleroma configuration.
+
+          *DO NOT POINT THIS OPTION TO THE NIX
+          STORE*, the store being world-readable, it'll
+          compromise all your secrets.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    users = {
+      users."${cfg.user}" = {
+        description = "Pleroma user";
+        home = cfg.stateDir;
+        group = cfg.group;
+        isSystemUser = true;
+      };
+      groups."${cfg.group}" = {};
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."/pleroma/config.exs".text = ''
+      ${lib.concatMapStrings (x: "${x}") cfg.configs}
+
+      # The lau/tzdata library is trying to download the latest
+      # timezone database in the OTP priv directory by default.
+      # This directory being in the store, it's read-only.
+      # Setting that up to a more appropriate location.
+      config :tzdata, :data_dir, "/var/lib/pleroma/elixir_tzdata_data"
+
+      import_config "${cfg.secretConfigFile}"
+    '';
+
+    systemd.services.pleroma = {
+      description = "Pleroma social network";
+      after = [ "network-online.target" "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."/pleroma/config.exs".source ];
+      environment.RELEASE_COOKIE = "/var/lib/pleroma/.cookie";
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Type = "exec";
+        WorkingDirectory = "~";
+        StateDirectory = "pleroma pleroma/static pleroma/uploads";
+        StateDirectoryMode = "700";
+
+        # Checking the conf file is there then running the database
+        # migration before each service start, just in case there are
+        # some pending ones.
+        #
+        # It's sub-optimal as we'll always run this, even if pleroma
+        # has not been updated. But the no-op process is pretty fast.
+        # Better be safe than sorry migration-wise.
+        ExecStartPre =
+          let preScript = pkgs.writers.writeBashBin "pleromaStartPre" ''
+            if [ ! -f /var/lib/pleroma/.cookie ]
+            then
+              echo "Creating cookie file"
+              dd if=/dev/urandom bs=1 count=16 | hexdump -e '16/1 "%02x"' > /var/lib/pleroma/.cookie
+            fi
+            ${cfg.package}/bin/pleroma_ctl migrate
+          '';
+          in "${preScript}/bin/pleromaStartPre";
+
+        ExecStart = "${cfg.package}/bin/pleroma start";
+        ExecStop = "${cfg.package}/bin/pleroma stop";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+
+        # Systemd sandboxing directives.
+        # Taken from the upstream contrib systemd service at
+        # pleroma/installation/pleroma.service
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        PrivateDevices = false;
+        NoNewPrivileges = true;
+        CapabilityBoundingSet = "~CAP_SYS_ADMIN";
+      };
+      # disksup requires bash
+      path = [ pkgs.bash ];
+    };
+
+  };
+  meta.maintainers = with lib.maintainers; [ picnoir ];
+  meta.doc = ./pleroma.md;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/polipo.nix b/nixpkgs/nixos/modules/services/networking/polipo.nix
new file mode 100644
index 000000000000..8581553829bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/polipo.nix
@@ -0,0 +1,108 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.polipo;
+
+  polipoConfig = pkgs.writeText "polipo.conf" ''
+    proxyAddress = ${cfg.proxyAddress}
+    proxyPort = ${toString cfg.proxyPort}
+    allowedClients = ${concatStringsSep ", " cfg.allowedClients}
+    ${optionalString (cfg.parentProxy != "") "parentProxy = ${cfg.parentProxy}" }
+    ${optionalString (cfg.socksParentProxy != "") "socksParentProxy = ${cfg.socksParentProxy}" }
+    ${config.services.polipo.extraConfig}
+  '';
+
+in
+
+{
+
+  options = {
+
+    services.polipo = {
+
+      enable = mkEnableOption (lib.mdDoc "polipo caching web proxy");
+
+      proxyAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "IP address on which Polipo will listen.";
+      };
+
+      proxyPort = mkOption {
+        type = types.port;
+        default = 8123;
+        description = lib.mdDoc "TCP port on which Polipo will listen.";
+      };
+
+      allowedClients = mkOption {
+        type = types.listOf types.str;
+        default = [ "127.0.0.1" "::1" ];
+        example = [ "127.0.0.1" "::1" "134.157.168.0/24" "2001:660:116::/48" ];
+        description = lib.mdDoc ''
+          List of IP addresses or network addresses that may connect to Polipo.
+        '';
+      };
+
+      parentProxy = mkOption {
+        type = types.str;
+        default = "";
+        example = "localhost:8124";
+        description = lib.mdDoc ''
+          Hostname and port number of an HTTP parent proxy;
+          it should have the form ‘host:port’.
+        '';
+      };
+
+      socksParentProxy = mkOption {
+        type = types.str;
+        default = "";
+        example = "localhost:9050";
+        description = lib.mdDoc ''
+          Hostname and port number of an SOCKS parent proxy;
+          it should have the form ‘host:port’.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Polio configuration. Contents will be added
+          verbatim to the configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.polipo =
+      { uid = config.ids.uids.polipo;
+        description = "Polipo caching proxy user";
+        home = "/var/cache/polipo";
+        createHome = true;
+      };
+
+    users.groups.polipo =
+      { gid = config.ids.gids.polipo;
+        members = [ "polipo" ];
+      };
+
+    systemd.services.polipo = {
+      description = "caching web proxy";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target"];
+      serviceConfig = {
+        ExecStart  = "${pkgs.polipo}/bin/polipo -c ${polipoConfig}";
+        User = "polipo";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/powerdns.nix b/nixpkgs/nixos/modules/services/networking/powerdns.nix
new file mode 100644
index 000000000000..03bf93301d85
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/powerdns.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.powerdns;
+  configDir = pkgs.writeTextDir "pdns.conf" "${cfg.extraConfig}";
+  finalConfigDir = if cfg.secretFile == null then configDir else "/run/pdns";
+in {
+  options = {
+    services.powerdns = {
+      enable = mkEnableOption (lib.mdDoc "PowerDNS domain name server");
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "launch=bind";
+        description = lib.mdDoc ''
+          PowerDNS configuration. Refer to
+          <https://doc.powerdns.com/authoritative/settings.html>
+          for details on supported values.
+        '';
+      };
+
+      secretFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/powerdns.env";
+        description = lib.mdDoc ''
+          Environment variables from this file will be interpolated into the
+          final config file using envsubst with this syntax: `$ENVIRONMENT`
+          or `''${VARIABLE}`.
+          The file should contain lines formatted as `SECRET_VAR=SECRET_VALUE`.
+          This is useful to avoid putting secrets into the nix store.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.etc.pdns.source = finalConfigDir;
+
+    systemd.packages = [ pkgs.pdns ];
+
+    systemd.services.pdns = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "mysql.service" "postgresql.service" "openldap.service" ];
+
+      serviceConfig = {
+        EnvironmentFile = lib.optional (cfg.secretFile != null) cfg.secretFile;
+        ExecStartPre = lib.optional (cfg.secretFile != null)
+          (pkgs.writeShellScript "pdns-pre-start" ''
+            umask 077
+            ${pkgs.envsubst}/bin/envsubst -i "${configDir}/pdns.conf" > ${finalConfigDir}/pdns.conf
+          '');
+        ExecStart = [ "" "${pkgs.pdns}/bin/pdns_server --config-dir=${finalConfigDir} --guardian=no --daemon=no --disable-syslog --log-timestamp=no --write-pid=no" ];
+      };
+    };
+
+    users.users.pdns = {
+      isSystemUser = true;
+      group = "pdns";
+      description = "PowerDNS";
+    };
+
+    users.groups.pdns = {};
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pppd.nix b/nixpkgs/nixos/modules/services/networking/pppd.nix
new file mode 100644
index 000000000000..75fc04c67571
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pppd.nix
@@ -0,0 +1,154 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pppd;
+in
+{
+  meta = {
+    maintainers = with maintainers; [ danderson ];
+  };
+
+  options = {
+    services.pppd = {
+      enable = mkEnableOption (lib.mdDoc "pppd");
+
+      package = mkOption {
+        default = pkgs.ppp;
+        defaultText = literalExpression "pkgs.ppp";
+        type = types.package;
+        description = lib.mdDoc "pppd package to use.";
+      };
+
+      peers = mkOption {
+        default = {};
+        description = lib.mdDoc "pppd peers.";
+        type = types.attrsOf (types.submodule (
+          { name, ... }:
+          {
+            options = {
+              name = mkOption {
+                type = types.str;
+                default = name;
+                example = "dialup";
+                description = lib.mdDoc "Name of the PPP peer.";
+              };
+
+              enable = mkOption {
+                type = types.bool;
+                default = true;
+                example = false;
+                description = lib.mdDoc "Whether to enable this PPP peer.";
+              };
+
+              autostart = mkOption {
+                type = types.bool;
+                default = true;
+                example = false;
+                description = lib.mdDoc "Whether the PPP session is automatically started at boot time.";
+              };
+
+              config = mkOption {
+                type = types.lines;
+                default = "";
+                description = lib.mdDoc "pppd configuration for this peer, see the pppd(8) man page.";
+              };
+            };
+          }));
+      };
+    };
+  };
+
+  config = let
+    enabledConfigs = filter (f: f.enable) (attrValues cfg.peers);
+
+    mkEtc = peerCfg: {
+      name = "ppp/peers/${peerCfg.name}";
+      value.text = peerCfg.config;
+    };
+
+    mkSystemd = peerCfg: {
+      name = "pppd-${peerCfg.name}";
+      value = {
+        restartTriggers = [ config.environment.etc."ppp/peers/${peerCfg.name}".source ];
+        before = [ "network.target" ];
+        wants = [ "network.target" ];
+        after = [ "network-pre.target" ];
+        environment = {
+          # pppd likes to write directly into /var/run. This is rude
+          # on a modern system, so we use libredirect to transparently
+          # move those files into /run/pppd.
+          LD_PRELOAD = "${pkgs.libredirect}/lib/libredirect.so";
+          NIX_REDIRECTS = "/var/run=/run/pppd";
+        };
+        serviceConfig = let
+          capabilities = [
+            "CAP_BPF"
+            "CAP_SYS_TTY_CONFIG"
+            "CAP_NET_ADMIN"
+            "CAP_NET_RAW"
+          ];
+        in
+        {
+          ExecStart = "${getBin cfg.package}/sbin/pppd call ${peerCfg.name} nodetach nolog";
+          Restart = "always";
+          RestartSec = 5;
+
+          AmbientCapabilities = capabilities;
+          CapabilityBoundingSet = capabilities;
+          KeyringMode = "private";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateMounts = true;
+          PrivateTmp = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelModules = true;
+          # pppd can be configured to tweak kernel settings.
+          ProtectKernelTunables = false;
+          ProtectSystem = "strict";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [
+            "AF_ATMPVC"
+            "AF_ATMSVC"
+            "AF_INET"
+            "AF_INET6"
+            "AF_IPX"
+            "AF_NETLINK"
+            "AF_PACKET"
+            "AF_PPPOX"
+            "AF_UNIX"
+          ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SecureBits = "no-setuid-fixup-locked noroot-locked";
+          SystemCallFilter = "@system-service";
+          SystemCallArchitectures = "native";
+
+          # All pppd instances on a system must share a runtime
+          # directory in order for PPP multilink to work correctly. So
+          # we give all instances the same /run/pppd directory to store
+          # things in.
+          #
+          # For the same reason, we can't set PrivateUsers=true, because
+          # all instances need to run as the same user to access the
+          # multilink database.
+          RuntimeDirectory = "pppd";
+          RuntimeDirectoryPreserve = true;
+        };
+        wantedBy = mkIf peerCfg.autostart [ "multi-user.target" ];
+      };
+    };
+
+    etcFiles = listToAttrs (map mkEtc enabledConfigs);
+    systemdConfigs = listToAttrs (map mkSystemd enabledConfigs);
+
+  in mkIf cfg.enable {
+    environment.etc = etcFiles;
+    systemd.services = systemdConfigs;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/pptpd.nix b/nixpkgs/nixos/modules/services/networking/pptpd.nix
new file mode 100644
index 000000000000..703dda99803e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/pptpd.nix
@@ -0,0 +1,124 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    services.pptpd = {
+      enable = mkEnableOption (lib.mdDoc "pptpd, the Point-to-Point Tunneling Protocol daemon");
+
+      serverIp = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "The server-side IP address.";
+        default     = "10.124.124.1";
+      };
+
+      clientIpRange = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "The range from which client IPs are drawn.";
+        default     = "10.124.124.2-11";
+      };
+
+      maxClients = mkOption {
+        type        = types.int;
+        description = lib.mdDoc "The maximum number of simultaneous connections.";
+        default     = 10;
+      };
+
+      extraPptpdOptions = mkOption {
+        type        = types.lines;
+        description = lib.mdDoc "Adds extra lines to the pptpd configuration file.";
+        default     = "";
+      };
+
+      extraPppdOptions = mkOption {
+        type        = types.lines;
+        description = lib.mdDoc "Adds extra lines to the pppd options file.";
+        default     = "";
+        example     = ''
+          ms-dns 8.8.8.8
+          ms-dns 8.8.4.4
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.pptpd.enable {
+    systemd.services.pptpd = let
+      cfg = config.services.pptpd;
+
+      pptpd-conf = pkgs.writeText "pptpd.conf" ''
+        # Inspired from pptpd-1.4.0/samples/pptpd.conf
+        ppp ${ppp-pptpd-wrapped}/bin/pppd
+        option ${pppd-options}
+        pidfile /run/pptpd.pid
+        localip ${cfg.serverIp}
+        remoteip ${cfg.clientIpRange}
+        connections ${toString cfg.maxClients} # (Will get harmless warning if inconsistent with IP range)
+
+        # Extra
+        ${cfg.extraPptpdOptions}
+      '';
+
+      pppd-options = pkgs.writeText "ppp-options-pptpd.conf" ''
+        # From: cat pptpd-1.4.0/samples/options.pptpd | grep -v ^# | grep -v ^$
+        name pptpd
+        refuse-pap
+        refuse-chap
+        refuse-mschap
+        require-mschap-v2
+        require-mppe-128
+        proxyarp
+        lock
+        nobsdcomp
+        novj
+        novjccomp
+        nologfd
+
+        # Extra:
+        ${cfg.extraPppdOptions}
+      '';
+
+      ppp-pptpd-wrapped = pkgs.stdenv.mkDerivation {
+        name         = "ppp-pptpd-wrapped";
+        phases       = [ "installPhase" ];
+        nativeBuildInputs  = with pkgs; [ makeWrapper ];
+        installPhase = ''
+          mkdir -p $out/bin
+          makeWrapper ${pkgs.ppp}/bin/pppd $out/bin/pppd \
+            --set LD_PRELOAD    "${pkgs.libredirect}/lib/libredirect.so" \
+            --set NIX_REDIRECTS "/etc/ppp=/etc/ppp-pptpd"
+        '';
+      };
+    in {
+      description = "pptpd server";
+
+      requires = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p -m 700 /etc/ppp-pptpd
+
+        secrets="/etc/ppp-pptpd/chap-secrets"
+
+        [ -f "$secrets" ] || cat > "$secrets" << EOF
+        # From: pptpd-1.4.0/samples/chap-secrets
+        # Secrets for authentication using CHAP
+        # client	server	secret		IP addresses
+        #username	pptpd	password	*
+        EOF
+
+        chown root:root "$secrets"
+        chmod 600 "$secrets"
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.pptpd}/bin/pptpd --conf ${pptpd-conf}";
+        KillMode  = "process";
+        Restart   = "on-success";
+        Type      = "forking";
+        PIDFile   = "/run/pptpd.pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/privoxy.nix b/nixpkgs/nixos/modules/services/networking/privoxy.nix
new file mode 100644
index 000000000000..619490a4c020
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/privoxy.nix
@@ -0,0 +1,281 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.privoxy;
+
+  serialise = name: val:
+         if isList val then concatMapStrings (serialise name) val
+    else if isBool val then serialise name (if val then "1" else "0")
+    else "${name} ${toString val}\n";
+
+  configType = with types;
+    let atom = oneOf [ int bool str path ];
+    in attrsOf (either atom (listOf atom))
+    // { description = ''
+          privoxy configuration type. The format consists of an attribute
+          set of settings. Each setting can be either a value (integer, string,
+          boolean or path) or a list of such values.
+        '';
+       };
+
+  ageType = types.str // {
+    check = x:
+      isString x &&
+      (builtins.match "([0-9]+([smhdw]|min|ms|us)*)+" x != null);
+    description = "tmpfiles.d(5) age format";
+  };
+
+  configFile = pkgs.writeText "privoxy.conf"
+    (concatStrings (
+      # Relative paths in some options are relative to confdir. Privoxy seems
+      # to parse the options in order of appearance, so this must come first.
+      # Nix however doesn't preserve the order in attrsets, so we have to
+      # hardcode confdir here.
+      [ "confdir ${pkgs.privoxy}/etc\n" ]
+      ++ mapAttrsToList serialise cfg.settings
+    ));
+
+  inspectAction = pkgs.writeText "inspect-all-https.action"
+    ''
+      # Enable HTTPS inspection for all requests
+      {+https-inspection}
+      /
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options.services.privoxy = {
+
+    enable = mkEnableOption (lib.mdDoc "Privoxy, non-caching filtering proxy");
+
+    enableTor = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to configure Privoxy to use Tor's faster SOCKS port,
+        suitable for HTTP.
+      '';
+    };
+
+    inspectHttps = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to configure Privoxy to inspect HTTPS requests, meaning all
+        encrypted traffic will be filtered as well. This works by decrypting
+        and re-encrypting the requests using a per-domain generated certificate.
+
+        To issue per-domain certificates, Privoxy must be provided with a CA
+        certificate, using the `ca-cert-file`,
+        `ca-key-file` settings.
+
+        ::: {.warning}
+        The CA certificate must also be added to the system trust roots,
+        otherwise browsers will reject all Privoxy certificates as invalid.
+        You can do so by using the option
+        {option}`security.pki.certificateFiles`.
+        :::
+      '';
+    };
+
+    certsLifetime = mkOption {
+      type = ageType;
+      default = "10d";
+      example = "12h";
+      description = lib.mdDoc ''
+        If `inspectHttps` is enabled, the time generated HTTPS
+        certificates will be stored in a temporary directory for reuse. Once
+        the lifetime has expired the directory will cleared and the certificate
+        will have to be generated again, on-demand.
+
+        Depending on the traffic, you may want to reduce the lifetime to limit
+        the disk usage, since Privoxy itself never deletes the certificates.
+
+        ::: {.note}
+        The format is that of the `tmpfiles.d(5)`
+        Age parameter.
+        :::
+      '';
+    };
+
+    userActions = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Actions to be included in a `user.action` file. This
+        will have a higher priority and can be used to override all other
+        actions.
+      '';
+    };
+
+    userFilters = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Filters to be included in a `user.filter` file. This
+        will have a higher priority and can be used to override all other
+        filters definitions.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = configType;
+
+        options.listen-address = mkOption {
+          type = types.str;
+          default = "127.0.0.1:8118";
+          description = lib.mdDoc "Pair of address:port the proxy server is listening to.";
+        };
+
+        options.enable-edit-actions = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Whether the web-based actions file editor may be used.";
+        };
+
+        options.actionsfile = mkOption {
+          type = types.listOf types.str;
+          # This must come after all other entries, in order to override the
+          # other actions/filters installed by Privoxy or the user.
+          apply = x: x ++ optional (cfg.userActions != "")
+            (toString (pkgs.writeText "user.actions" cfg.userActions));
+          default = [ "match-all.action" "default.action" ];
+          description = lib.mdDoc ''
+            List of paths to Privoxy action files. These paths may either be
+            absolute or relative to the privoxy configuration directory.
+          '';
+        };
+
+        options.filterfile = mkOption {
+          type = types.listOf types.str;
+          default = [ "default.filter" ];
+          apply = x: x ++ optional (cfg.userFilters != "")
+            (toString (pkgs.writeText "user.filter" cfg.userFilters));
+          description = lib.mdDoc ''
+            List of paths to Privoxy filter files. These paths may either be
+            absolute or relative to the privoxy configuration directory.
+          '';
+        };
+      };
+      default = {};
+      example = literalExpression ''
+        { # Listen on IPv6 only
+          listen-address = "[::]:8118";
+
+          # Forward .onion requests to Tor
+          forward-socks5 = ".onion localhost:9050 .";
+
+          # Log redirects and filters
+          debug = [ 128 64 ];
+          # This is equivalent to writing these lines
+          # in the Privoxy configuration file:
+          # debug 128
+          # debug 64
+        }
+      '';
+      description = lib.mdDoc ''
+        This option is mapped to the main Privoxy configuration file.
+        Check out the Privoxy user manual at
+        <https://www.privoxy.org/user-manual/config.html>
+        for available settings and documentation.
+
+        ::: {.note}
+        Repeated settings can be represented by using a list.
+        :::
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.privoxy = {
+      description = "Privoxy daemon user";
+      isSystemUser = true;
+      group = "privoxy";
+    };
+
+    users.groups.privoxy = {};
+
+    systemd.tmpfiles.rules = optional cfg.inspectHttps
+      "d ${cfg.settings.certificate-directory} 0770 privoxy privoxy ${cfg.certsLifetime}";
+
+    systemd.services.privoxy = {
+      description = "Filtering web proxy";
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "privoxy";
+        Group = "privoxy";
+        ExecStart = "${pkgs.privoxy}/bin/privoxy --no-daemon ${configFile}";
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+      };
+      unitConfig =  mkIf cfg.inspectHttps {
+        ConditionPathExists = with cfg.settings;
+          [ ca-cert-file ca-key-file ];
+      };
+    };
+
+    services.tor.settings.SOCKSPort = mkIf cfg.enableTor [
+      # Route HTTP traffic over a faster port (without IsolateDestAddr).
+      { addr = "127.0.0.1"; port = 9063; IsolateDestAddr = false; }
+    ];
+
+    services.privoxy.settings = {
+      user-manual = "${pkgs.privoxy}/share/doc/privoxy/user-manual";
+      # This is needed for external filters
+      temporary-directory = "/tmp";
+      filterfile = [ "default.filter" ];
+      actionsfile =
+        [ "match-all.action"
+          "default.action"
+        ] ++ optional cfg.inspectHttps (toString inspectAction);
+    } // (optionalAttrs cfg.enableTor {
+      forward-socks5 = "/ 127.0.0.1:9063 .";
+      toggle = true;
+      enable-remote-toggle = false;
+      enable-edit-actions = false;
+      enable-remote-http-toggle = false;
+    }) // (optionalAttrs cfg.inspectHttps {
+      # This allows setting absolute key/crt paths
+      ca-directory = "/var/empty";
+      certificate-directory = "/run/privoxy/certs";
+      trusted-cas-file = "/etc/ssl/certs/ca-certificates.crt";
+    });
+
+  };
+
+  imports =
+    let
+      top = x: [ "services" "privoxy" x ];
+      setting = x: [ "services" "privoxy" "settings" x ];
+    in
+    [ (mkRenamedOptionModule (top "enableEditActions") (setting "enable-edit-actions"))
+      (mkRenamedOptionModule (top "listenAddress") (setting "listen-address"))
+      (mkRenamedOptionModule (top "actionsFiles") (setting "actionsfile"))
+      (mkRenamedOptionModule (top "filterFiles") (setting "filterfile"))
+      (mkRemovedOptionModule (top "extraConfig")
+      ''
+        Use services.privoxy.settings instead.
+        This is part of the general move to use structured settings instead of raw
+        text for config as introduced by RFC0042:
+        https://github.com/NixOS/rfcs/blob/master/rfcs/0042-config-option.md
+      '')
+    ];
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/prosody.md b/nixpkgs/nixos/modules/services/networking/prosody.md
new file mode 100644
index 000000000000..2da2c242a98b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/prosody.md
@@ -0,0 +1,72 @@
+# Prosody {#module-services-prosody}
+
+[Prosody](https://prosody.im/) is an open-source, modern XMPP server.
+
+## Basic usage {#module-services-prosody-basic-usage}
+
+A common struggle for most XMPP newcomers is to find the right set
+of XMPP Extensions (XEPs) to setup. Forget to activate a few of
+those and your XMPP experience might turn into a nightmare!
+
+The XMPP community tackles this problem by creating a meta-XEP
+listing a decent set of XEPs you should implement. This meta-XEP
+is issued every year, the 2020 edition being
+[XEP-0423](https://xmpp.org/extensions/xep-0423.html).
+
+The NixOS Prosody module will implement most of these recommendend XEPs out of
+the box. That being said, two components still require some
+manual configuration: the
+[Multi User Chat (MUC)](https://xmpp.org/extensions/xep-0045.html)
+and the [HTTP File Upload](https://xmpp.org/extensions/xep-0363.html) ones.
+You'll need to create a DNS subdomain for each of those. The current convention is to name your
+MUC endpoint `conference.example.org` and your HTTP upload domain `upload.example.org`.
+
+A good configuration to start with, including a
+[Multi User Chat (MUC)](https://xmpp.org/extensions/xep-0045.html)
+endpoint as well as a [HTTP File Upload](https://xmpp.org/extensions/xep-0363.html)
+endpoint will look like this:
+```
+services.prosody = {
+  enable = true;
+  admins = [ "root@example.org" ];
+  ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+  ssl.key = "/var/lib/acme/example.org/key.pem";
+  virtualHosts."example.org" = {
+      enabled = true;
+      domain = "example.org";
+      ssl.cert = "/var/lib/acme/example.org/fullchain.pem";
+      ssl.key = "/var/lib/acme/example.org/key.pem";
+  };
+  muc = [ {
+      domain = "conference.example.org";
+  } ];
+  uploadHttp = {
+      domain = "upload.example.org";
+  };
+};
+```
+
+## Let's Encrypt Configuration {#module-services-prosody-letsencrypt}
+
+As you can see in the code snippet from the
+[previous section](#module-services-prosody-basic-usage),
+you'll need a single TLS certificate covering your main endpoint,
+the MUC one as well as the HTTP Upload one. We can generate such a
+certificate by leveraging the ACME
+[extraDomainNames](#opt-security.acme.certs._name_.extraDomainNames) module option.
+
+Provided the setup detailed in the previous section, you'll need the following acme configuration to generate
+a TLS certificate for the three endponits:
+```
+security.acme = {
+  email = "root@example.org";
+  acceptTerms = true;
+  certs = {
+    "example.org" = {
+      webroot = "/var/www/example.org";
+      email = "root@example.org";
+      extraDomainNames = [ "conference.example.org" "upload.example.org" ];
+    };
+  };
+};
+```
diff --git a/nixpkgs/nixos/modules/services/networking/prosody.nix b/nixpkgs/nixos/modules/services/networking/prosody.nix
new file mode 100644
index 000000000000..038d574bd878
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/prosody.nix
@@ -0,0 +1,905 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.prosody;
+
+  sslOpts = { ... }: {
+
+    options = {
+
+      key = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path to the key file.";
+      };
+
+      # TODO: rename to certificate to match the prosody config
+      cert = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path to the certificate file.";
+      };
+
+      extraOptions = mkOption {
+        type = types.attrs;
+        default = {};
+        description = lib.mdDoc "Extra SSL configuration options.";
+      };
+
+    };
+  };
+
+  discoOpts = {
+    options = {
+      url = mkOption {
+        type = types.str;
+        description = lib.mdDoc "URL of the endpoint you want to make discoverable";
+      };
+      description = mkOption {
+        type = types.str;
+        description = lib.mdDoc "A short description of the endpoint you want to advertise";
+      };
+    };
+  };
+
+  moduleOpts = {
+    # Required for compliance with https://compliance.conversations.im/about/
+    roster = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allow users to have a roster";
+    };
+
+    saslauth = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Authentication for clients and servers. Recommended if you want to log in.";
+    };
+
+    tls = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Add support for secure TLS on c2s/s2s connections";
+    };
+
+    dialback = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "s2s dialback support";
+    };
+
+    disco = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Service discovery";
+    };
+
+    # Not essential, but recommended
+    carbons = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Keep multiple clients in sync";
+    };
+
+    csi = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Implements the CSI protocol that allows clients to report their active/inactive state to the server";
+    };
+
+    cloud_notify = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Push notifications to inform users of new messages or other pertinent information even when they have no XMPP clients online";
+    };
+
+    pep = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Enables users to publish their mood, activity, playing music and more";
+    };
+
+    private = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Private XML storage (for room bookmarks, etc.)";
+    };
+
+    blocklist = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allow users to block communications with other users";
+    };
+
+    vcard = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Allow users to set vCards";
+    };
+
+    vcard_legacy = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Converts users profiles and Avatars between old and new formats";
+    };
+
+    bookmarks = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allows interop between older clients that use XEP-0048: Bookmarks in its 1.0 version and recent clients which use it in PEP";
+    };
+
+    # Nice to have
+    version = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Replies to server version requests";
+    };
+
+    uptime = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Report how long server has been running";
+    };
+
+    time = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Let others know the time here on this server";
+    };
+
+    ping = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Replies to XMPP pings with pongs";
+    };
+
+    register = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allow users to register on this server using a client and change passwords";
+    };
+
+    mam = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Store messages in an archive and allow users to access it";
+    };
+
+    smacks = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allow a client to resume a disconnected session, and prevent message loss";
+    };
+
+    # Admin interfaces
+    admin_adhoc = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Allows administration via an XMPP client that supports ad-hoc commands";
+    };
+
+    http_files = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Serve static files from a directory over HTTP";
+    };
+
+    proxy65 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Enables a file transfer proxy service which clients behind NAT can use";
+    };
+
+    admin_telnet = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Opens telnet console interface on localhost port 5582";
+    };
+
+    # HTTP modules
+    bosh = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable BOSH clients, aka 'Jabber over HTTP'";
+    };
+
+    websocket = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable WebSocket support";
+    };
+
+    # Other specific functionality
+    limits = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable bandwidth limiting for XMPP connections";
+    };
+
+    groups = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Shared roster support";
+    };
+
+    server_contact_info = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Publish contact information for this service";
+    };
+
+    announce = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Send announcement to all online users";
+    };
+
+    welcome = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Welcome users who register accounts";
+    };
+
+    watchregistrations = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Alert admins of registrations";
+    };
+
+    motd = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Send a message to users when they log in";
+    };
+
+    legacyauth = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Legacy authentication. Only used by some old clients and bots";
+    };
+  };
+
+  toLua = x:
+    if builtins.isString x then ''"${x}"''
+    else if builtins.isBool x then boolToString x
+    else if builtins.isInt x then toString x
+    else if builtins.isList x then "{ ${lib.concatMapStringsSep ", " toLua x} }"
+    else throw "Invalid Lua value";
+
+  createSSLOptsStr = o: ''
+    ssl = {
+      cafile = "/etc/ssl/certs/ca-bundle.crt";
+      key = "${o.key}";
+      certificate = "${o.cert}";
+      ${concatStringsSep "\n" (mapAttrsToList (name: value: "${name} = ${toLua value};") o.extraOptions)}
+    };
+  '';
+
+  mucOpts = { ... }: {
+    options = {
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Domain name of the MUC";
+      };
+      name = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The name to return in service discovery responses for the MUC service itself";
+        default = "Prosody Chatrooms";
+      };
+      restrictRoomCreation = mkOption {
+        type = types.enum [ true false "admin" "local" ];
+        default = false;
+        description = lib.mdDoc "Restrict room creation to server admins";
+      };
+      maxHistoryMessages = mkOption {
+        type = types.int;
+        default = 20;
+        description = lib.mdDoc "Specifies a limit on what each room can be configured to keep";
+      };
+      roomLocking = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enables room locking, which means that a room must be
+          configured before it can be used. Locked rooms are invisible
+          and cannot be entered by anyone but the creator
+        '';
+      };
+      roomLockTimeout = mkOption {
+        type = types.int;
+        default = 300;
+        description = lib.mdDoc ''
+          Timeout after which the room is destroyed or unlocked if not
+          configured, in seconds
+       '';
+      };
+      tombstones = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          When a room is destroyed, it leaves behind a tombstone which
+          prevents the room being entered or recreated. It also allows
+          anyone who was not in the room at the time it was destroyed
+          to learn about it, and to update their bookmarks. Tombstones
+          prevents the case where someone could recreate a previously
+          semi-anonymous room in order to learn the real JIDs of those
+          who often join there.
+        '';
+      };
+      tombstoneExpiry = mkOption {
+        type = types.int;
+        default = 2678400;
+        description = lib.mdDoc ''
+          This settings controls how long a tombstone is considered
+          valid. It defaults to 31 days. After this time, the room in
+          question can be created again.
+        '';
+      };
+
+      vcard_muc = mkOption {
+        type = types.bool;
+        default = true;
+      description = lib.mdDoc "Adds the ability to set vCard for Multi User Chat rooms";
+      };
+
+      # Extra parameters. Defaulting to prosody default values.
+      # Adding them explicitly to make them visible from the options
+      # documentation.
+      #
+      # See https://prosody.im/doc/modules/mod_muc for more details.
+      roomDefaultPublic = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "If set, the MUC rooms will be public by default.";
+      };
+      roomDefaultMembersOnly = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If set, the MUC rooms will only be accessible to the members by default.";
+      };
+      roomDefaultModerated = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If set, the MUC rooms will be moderated by default.";
+      };
+      roomDefaultPublicJids = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If set, the MUC rooms will display the public JIDs by default.";
+      };
+      roomDefaultChangeSubject = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If set, the rooms will display the public JIDs by default.";
+      };
+      roomDefaultHistoryLength = mkOption {
+        type = types.int;
+        default = 20;
+        description = lib.mdDoc "Number of history message sent to participants by default.";
+      };
+      roomDefaultLanguage = mkOption {
+        type = types.str;
+        default = "en";
+        description = lib.mdDoc "Default room language.";
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional MUC specific configuration";
+      };
+    };
+  };
+
+  uploadHttpOpts = { ... }: {
+    options = {
+      domain = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Domain name for the http-upload service";
+      };
+      uploadFileSizeLimit = mkOption {
+        type = types.str;
+        default = "50 * 1024 * 1024";
+        description = lib.mdDoc "Maximum file size, in bytes. Defaults to 50MB.";
+      };
+      uploadExpireAfter = mkOption {
+        type = types.str;
+        default = "60 * 60 * 24 * 7";
+        description = lib.mdDoc "Max age of a file before it gets deleted, in seconds.";
+      };
+      userQuota = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 1234;
+        description = lib.mdDoc ''
+          Maximum size of all uploaded files per user, in bytes. There
+          will be no quota if this option is set to null.
+        '';
+      };
+      httpUploadPath = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Directory where the uploaded files will be stored. By
+          default, uploaded files are put in a sub-directory of the
+          default Prosody storage path (usually /var/lib/prosody).
+        '';
+        default = "/var/lib/prosody";
+      };
+    };
+  };
+
+  vHostOpts = { ... }: {
+
+    options = {
+
+      # TODO: require attribute
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Domain name";
+      };
+
+      enabled = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the virtual host";
+      };
+
+      ssl = mkOption {
+        type = types.nullOr (types.submodule sslOpts);
+        default = null;
+        description = lib.mdDoc "Paths to SSL files";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional virtual host specific configuration";
+      };
+
+    };
+
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.prosody = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the prosody server";
+      };
+
+      xmppComplianceSuite = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          The XEP-0423 defines a set of recommended XEPs to implement
+          for a server. It's generally a good idea to implement this
+          set of extensions if you want to provide your users with a
+          good XMPP experience.
+
+          This NixOS module aims to provide a "advanced server"
+          experience as per defined in the XEP-0423[1] specification.
+
+          Setting this option to true will prevent you from building a
+          NixOS configuration which won't comply with this standard.
+          You can explicitly decide to ignore this standard if you
+          know what you are doing by setting this option to false.
+
+          [1] https://xmpp.org/extensions/xep-0423.html
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        description = lib.mdDoc "Prosody package to use";
+        default = pkgs.prosody;
+        defaultText = literalExpression "pkgs.prosody";
+        example = literalExpression ''
+          pkgs.prosody.override {
+            withExtraLibs = [ pkgs.luaPackages.lpty ];
+            withCommunityModules = [ "auth_external" ];
+          };
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/prosody";
+        description = lib.mdDoc ''
+          The prosody home directory used to store all data. If left as the default value
+          this directory will automatically be created before the prosody server starts, otherwise
+          you are responsible for ensuring the directory exists with appropriate ownership
+          and permissions.
+        '';
+      };
+
+      disco_items = mkOption {
+        type = types.listOf (types.submodule discoOpts);
+        default = [];
+        description = lib.mdDoc "List of discoverable items you want to advertise.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "prosody";
+        description = lib.mdDoc ''
+          User account under which prosody runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the prosody service starts.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "prosody";
+        description = lib.mdDoc ''
+          Group account under which prosody runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the group exists before the prosody service starts.
+          :::
+        '';
+      };
+
+      allowRegistration = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Allow account creation";
+      };
+
+      # HTTP server-related options
+      httpPorts = mkOption {
+        type = types.listOf types.int;
+        description = lib.mdDoc "Listening HTTP ports list for this service.";
+        default = [ 5280 ];
+      };
+
+      httpInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "*" "::" ];
+        description = lib.mdDoc "Interfaces on which the HTTP server will listen on.";
+      };
+
+      httpsPorts = mkOption {
+        type = types.listOf types.int;
+        description = lib.mdDoc "Listening HTTPS ports list for this service.";
+        default = [ 5281 ];
+      };
+
+      httpsInterfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "*" "::" ];
+        description = lib.mdDoc "Interfaces on which the HTTPS server will listen on.";
+      };
+
+      c2sRequireEncryption = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Force clients to use encrypted connections? This option will
+          prevent clients from authenticating unless they are using encryption.
+        '';
+      };
+
+      s2sRequireEncryption = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Force servers to use encrypted connections? This option will
+          prevent servers from authenticating unless they are using encryption.
+          Note that this is different from authentication.
+        '';
+      };
+
+      s2sSecureAuth = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Force certificate authentication for server-to-server connections?
+          This provides ideal security, but requires servers you communicate
+          with to support encryption AND present valid, trusted certificates.
+          For more information see https://prosody.im/doc/s2s#security
+        '';
+      };
+
+      s2sInsecureDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "insecure.example.com" ];
+        description = lib.mdDoc ''
+          Some servers have invalid or self-signed certificates. You can list
+          remote domains here that will not be required to authenticate using
+          certificates. They will be authenticated using DNS instead, even
+          when s2s_secure_auth is enabled.
+        '';
+      };
+
+      s2sSecureDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "jabber.org" ];
+        description = lib.mdDoc ''
+          Even if you leave s2s_secure_auth disabled, you can still require valid
+          certificates for some domains by specifying a list here.
+        '';
+      };
+
+
+      modules = moduleOpts;
+
+      extraModules = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Enable custom modules";
+      };
+
+      extraPluginPaths = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc "Additional path in which to look find plugins/modules";
+      };
+
+      uploadHttp = mkOption {
+        description = lib.mdDoc ''
+          Configures the Prosody builtin HTTP server to handle user uploads.
+        '';
+        type = types.nullOr (types.submodule uploadHttpOpts);
+        default = null;
+        example = {
+          domain = "uploads.my-xmpp-example-host.org";
+        };
+      };
+
+      muc = mkOption {
+        type = types.listOf (types.submodule mucOpts);
+        default = [ ];
+        example = [ {
+          domain = "conference.my-xmpp-example-host.org";
+        } ];
+        description = lib.mdDoc "Multi User Chat (MUC) configuration";
+      };
+
+      virtualHosts = mkOption {
+
+        description = lib.mdDoc "Define the virtual hosts";
+
+        type = with types; attrsOf (submodule vHostOpts);
+
+        example = {
+          myhost = {
+            domain = "my-xmpp-example-host.org";
+            enabled = true;
+          };
+        };
+
+        default = {
+          localhost = {
+            domain = "localhost";
+            enabled = true;
+          };
+        };
+
+      };
+
+      ssl = mkOption {
+        type = types.nullOr (types.submodule sslOpts);
+        default = null;
+        description = lib.mdDoc "Paths to SSL files";
+      };
+
+      admins = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "admin1@example.com" "admin2@example.com" ];
+        description = lib.mdDoc "List of administrators of the current host";
+      };
+
+      authentication = mkOption {
+        type = types.enum [ "internal_plain" "internal_hashed" "cyrus" "anonymous" ];
+        default = "internal_hashed";
+        example = "internal_plain";
+        description = lib.mdDoc "Authentication mechanism used for logins.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional prosody configuration";
+      };
+
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = let
+      genericErrMsg = ''
+
+          Having a server not XEP-0423-compliant might make your XMPP
+          experience terrible. See the NixOS manual for further
+          information.
+
+          If you know what you're doing, you can disable this warning by
+          setting config.services.prosody.xmppComplianceSuite to false.
+      '';
+      errors = [
+        { assertion = (builtins.length cfg.muc > 0) || !cfg.xmppComplianceSuite;
+          message = ''
+            You need to setup at least a MUC domain to comply with
+            XEP-0423.
+          '' + genericErrMsg;}
+        { assertion = cfg.uploadHttp != null || !cfg.xmppComplianceSuite;
+          message = ''
+            You need to setup the uploadHttp module through
+            config.services.prosody.uploadHttp to comply with
+            XEP-0423.
+          '' + genericErrMsg;}
+      ];
+    in errors;
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc."prosody/prosody.cfg.lua".text =
+      let
+        httpDiscoItems = optionals (cfg.uploadHttp != null)
+            [{ url = cfg.uploadHttp.domain; description = "HTTP upload endpoint";}];
+        mucDiscoItems = builtins.foldl'
+            (acc: muc: [{ url = muc.domain; description = "${muc.domain} MUC endpoint";}] ++ acc)
+            []
+            cfg.muc;
+        discoItems = cfg.disco_items ++ httpDiscoItems ++ mucDiscoItems;
+      in ''
+
+      pidfile = "/run/prosody/prosody.pid"
+
+      log = "*syslog"
+
+      data_path = "${cfg.dataDir}"
+      plugin_paths = {
+        ${lib.concatStringsSep ", " (map (n: "\"${n}\"") cfg.extraPluginPaths) }
+      }
+
+      ${ optionalString  (cfg.ssl != null) (createSSLOptsStr cfg.ssl) }
+
+      admins = ${toLua cfg.admins}
+
+      modules_enabled = {
+
+        ${ lib.concatStringsSep "\n  " (lib.mapAttrsToList
+          (name: val: optionalString val "${toLua name};")
+        cfg.modules) }
+        ${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.package.communityModules)}
+        ${ lib.concatStringsSep "\n" (map (x: "${toLua x};") cfg.extraModules)}
+      };
+
+      disco_items = {
+      ${ lib.concatStringsSep "\n" (builtins.map (x: ''{ "${x.url}", "${x.description}"};'') discoItems)}
+      };
+
+      allow_registration = ${toLua cfg.allowRegistration}
+
+      c2s_require_encryption = ${toLua cfg.c2sRequireEncryption}
+
+      s2s_require_encryption = ${toLua cfg.s2sRequireEncryption}
+
+      s2s_secure_auth = ${toLua cfg.s2sSecureAuth}
+
+      s2s_insecure_domains = ${toLua cfg.s2sInsecureDomains}
+
+      s2s_secure_domains = ${toLua cfg.s2sSecureDomains}
+
+      authentication = ${toLua cfg.authentication}
+
+      http_interfaces = ${toLua cfg.httpInterfaces}
+
+      https_interfaces = ${toLua cfg.httpsInterfaces}
+
+      http_ports = ${toLua cfg.httpPorts}
+
+      https_ports = ${toLua cfg.httpsPorts}
+
+      ${ cfg.extraConfig }
+
+      ${lib.concatMapStrings (muc: ''
+        Component ${toLua muc.domain} "muc"
+            modules_enabled = { "muc_mam"; ${optionalString muc.vcard_muc ''"vcard_muc";'' } }
+            name = ${toLua muc.name}
+            restrict_room_creation = ${toLua muc.restrictRoomCreation}
+            max_history_messages = ${toLua muc.maxHistoryMessages}
+            muc_room_locking = ${toLua muc.roomLocking}
+            muc_room_lock_timeout = ${toLua muc.roomLockTimeout}
+            muc_tombstones = ${toLua muc.tombstones}
+            muc_tombstone_expiry = ${toLua muc.tombstoneExpiry}
+            muc_room_default_public = ${toLua muc.roomDefaultPublic}
+            muc_room_default_members_only = ${toLua muc.roomDefaultMembersOnly}
+            muc_room_default_moderated = ${toLua muc.roomDefaultModerated}
+            muc_room_default_public_jids = ${toLua muc.roomDefaultPublicJids}
+            muc_room_default_change_subject = ${toLua muc.roomDefaultChangeSubject}
+            muc_room_default_history_length = ${toLua muc.roomDefaultHistoryLength}
+            muc_room_default_language = ${toLua muc.roomDefaultLanguage}
+            ${ muc.extraConfig }
+        '') cfg.muc}
+
+      ${ lib.optionalString (cfg.uploadHttp != null) ''
+        -- TODO: think about migrating this to mod-http_file_share instead.
+        Component ${toLua cfg.uploadHttp.domain} "http_upload"
+            http_upload_file_size_limit = ${cfg.uploadHttp.uploadFileSizeLimit}
+            http_upload_expire_after = ${cfg.uploadHttp.uploadExpireAfter}
+            ${lib.optionalString (cfg.uploadHttp.userQuota != null) "http_upload_quota = ${toLua cfg.uploadHttp.userQuota}"}
+            http_upload_path = ${toLua cfg.uploadHttp.httpUploadPath}
+      ''}
+
+      ${ lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: ''
+        VirtualHost "${v.domain}"
+          enabled = ${boolToString v.enabled};
+          ${ optionalString (v.ssl != null) (createSSLOptsStr v.ssl) }
+          ${ v.extraConfig }
+        '') cfg.virtualHosts) }
+    '';
+
+    users.users.prosody = mkIf (cfg.user == "prosody") {
+      uid = config.ids.uids.prosody;
+      description = "Prosody user";
+      inherit (cfg) group;
+      home = cfg.dataDir;
+    };
+
+    users.groups.prosody = mkIf (cfg.group == "prosody") {
+      gid = config.ids.gids.prosody;
+    };
+
+    systemd.services.prosody = {
+      description = "Prosody XMPP server";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."prosody/prosody.cfg.lua".source ];
+      serviceConfig = mkMerge [
+        {
+          User = cfg.user;
+          Group = cfg.group;
+          Type = "forking";
+          RuntimeDirectory = [ "prosody" ];
+          PIDFile = "/run/prosody/prosody.pid";
+          ExecStart = "${cfg.package}/bin/prosodyctl start";
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+
+          MemoryDenyWriteExecute = true;
+          PrivateDevices = true;
+          PrivateMounts = true;
+          PrivateTmp = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+        }
+        (mkIf (cfg.dataDir == "/var/lib/prosody") {
+          StateDirectory = "prosody";
+        })
+      ];
+    };
+
+  };
+
+  meta.doc = ./prosody.md;
+}
diff --git a/nixpkgs/nixos/modules/services/networking/quassel.nix b/nixpkgs/nixos/modules/services/networking/quassel.nix
new file mode 100644
index 000000000000..a074023b5ee4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/quassel.nix
@@ -0,0 +1,139 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.quassel;
+  opt = options.services.quassel;
+  quassel = cfg.package;
+  user = if cfg.user != null then cfg.user else "quassel";
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.quassel = {
+
+      enable = mkEnableOption (lib.mdDoc "the Quassel IRC client daemon");
+
+      certificateFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the certificate used for SSL connections with clients.
+        '';
+      };
+
+      requireSSL = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Require SSL for connections from clients.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.quasselDaemon;
+        defaultText = literalExpression "pkgs.quasselDaemon";
+        description = lib.mdDoc ''
+          The package of the quassel daemon.
+        '';
+      };
+
+      interfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ "127.0.0.1" ];
+        description = lib.mdDoc ''
+          The interfaces the Quassel daemon will be listening to.  If `[ 127.0.0.1 ]`,
+          only clients on the local host can connect to it; if `[ 0.0.0.0 ]`, clients
+          can access it from any network interface.
+        '';
+      };
+
+      portNumber = mkOption {
+        type = types.port;
+        default = 4242;
+        description = lib.mdDoc ''
+          The port number the Quassel daemon will be listening to.
+        '';
+      };
+
+      dataDir = mkOption {
+        default = "/home/${user}/.config/quassel-irc.org";
+        defaultText = literalExpression ''
+          "/home/''${config.${opt.user}}/.config/quassel-irc.org"
+        '';
+        type = types.str;
+        description = lib.mdDoc ''
+          The directory holding configuration files, the SQlite database and the SSL Cert.
+        '';
+      };
+
+      user = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The existing user the Quassel daemon should run as. If left empty, a default "quassel" user will be created.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.requireSSL -> cfg.certificateFile != null;
+        message = "Quassel needs a certificate file in order to require SSL";
+      }];
+
+    users.users = optionalAttrs (cfg.user == null) {
+      quassel = {
+        name = "quassel";
+        description = "Quassel IRC client daemon";
+        group = "quassel";
+        uid = config.ids.uids.quassel;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.user == null) {
+      quassel = {
+        name = "quassel";
+        gid = config.ids.gids.quassel;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' - ${user} - - -"
+    ];
+
+    systemd.services.quassel =
+      { description = "Quassel IRC client daemon";
+
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ] ++ optional config.services.postgresql.enable "postgresql.service"
+                                     ++ optional config.services.mysql.enable "mysql.service";
+
+        serviceConfig =
+        {
+          ExecStart = concatStringsSep " " ([
+            "${quassel}/bin/quasselcore"
+            "--listen=${concatStringsSep "," cfg.interfaces}"
+            "--port=${toString cfg.portNumber}"
+            "--configdir=${cfg.dataDir}"
+          ] ++ optional cfg.requireSSL "--require-ssl"
+            ++ optional (cfg.certificateFile != null) "--ssl-cert=${cfg.certificateFile}");
+          User = user;
+        };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/quicktun.nix b/nixpkgs/nixos/modules/services/networking/quicktun.nix
new file mode 100644
index 000000000000..7aed972adc88
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/quicktun.nix
@@ -0,0 +1,118 @@
+{ config, pkgs, lib, ... }:
+
+let
+
+  cfg = config.services.quicktun;
+
+in
+
+with lib;
+
+{
+  options = {
+
+    services.quicktun = mkOption {
+      default = { };
+      description = lib.mdDoc "QuickTun tunnels";
+      type = types.attrsOf (types.submodule {
+        options = {
+          tunMode = mkOption {
+            type = types.int;
+            default = 0;
+            example = 1;
+            description = lib.mdDoc "";
+          };
+
+          remoteAddress = mkOption {
+            type = types.str;
+            example = "tunnel.example.com";
+            description = lib.mdDoc "";
+          };
+
+          localAddress = mkOption {
+            type = types.str;
+            example = "0.0.0.0";
+            description = lib.mdDoc "";
+          };
+
+          localPort = mkOption {
+            type = types.int;
+            default = 2998;
+            description = lib.mdDoc "";
+          };
+
+          remotePort = mkOption {
+            type = types.int;
+            default = 2998;
+            description = lib.mdDoc "";
+          };
+
+          remoteFloat = mkOption {
+            type = types.int;
+            default = 0;
+            description = lib.mdDoc "";
+          };
+
+          protocol = mkOption {
+            type = types.str;
+            default = "nacltai";
+            description = lib.mdDoc "";
+          };
+
+          privateKey = mkOption {
+            type = types.str;
+            description = lib.mdDoc "";
+          };
+
+          publicKey = mkOption {
+            type = types.str;
+            description = lib.mdDoc "";
+          };
+
+          timeWindow = mkOption {
+            type = types.int;
+            default = 5;
+            description = lib.mdDoc "";
+          };
+
+          upScript = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc "";
+          };
+        };
+      });
+    };
+
+  };
+
+  config = mkIf (cfg != []) {
+    systemd.services = foldr (a: b: a // b) {} (
+      mapAttrsToList (name: qtcfg: {
+        "quicktun-${name}" = {
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          environment = {
+            INTERFACE = name;
+            TUN_MODE = toString qtcfg.tunMode;
+            REMOTE_ADDRESS = qtcfg.remoteAddress;
+            LOCAL_ADDRESS = qtcfg.localAddress;
+            LOCAL_PORT = toString qtcfg.localPort;
+            REMOTE_PORT = toString qtcfg.remotePort;
+            REMOTE_FLOAT = toString qtcfg.remoteFloat;
+            PRIVATE_KEY = qtcfg.privateKey;
+            PUBLIC_KEY = qtcfg.publicKey;
+            TIME_WINDOW = toString qtcfg.timeWindow;
+            TUN_UP_SCRIPT = pkgs.writeScript "quicktun-${name}-up.sh" qtcfg.upScript;
+            SUID = "nobody";
+          };
+          serviceConfig = {
+            Type = "simple";
+            ExecStart = "${pkgs.quicktun}/bin/quicktun.${qtcfg.protocol}";
+          };
+        };
+      }) cfg
+    );
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/quorum.nix b/nixpkgs/nixos/modules/services/networking/quorum.nix
new file mode 100644
index 000000000000..4b90b12f86fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/quorum.nix
@@ -0,0 +1,231 @@
+{ config, options, pkgs, lib, ... }:
+let
+
+  inherit (lib) mkEnableOption mkIf mkOption literalExpression types optionalString;
+
+  cfg = config.services.quorum;
+  opt = options.services.quorum;
+  dataDir = "/var/lib/quorum";
+  genesisFile = pkgs.writeText "genesis.json" (builtins.toJSON cfg.genesis);
+  staticNodesFile = pkgs.writeText "static-nodes.json" (builtins.toJSON cfg.staticNodes);
+
+in {
+  options = {
+
+    services.quorum = {
+      enable = mkEnableOption (lib.mdDoc "Quorum blockchain daemon");
+
+      user = mkOption {
+        type = types.str;
+        default = "quorum";
+        description = lib.mdDoc "The user as which to run quorum.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = cfg.user;
+        defaultText = literalExpression "config.${opt.user}";
+        description = lib.mdDoc "The group as which to run quorum.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 21000;
+        description = lib.mdDoc "Override the default port on which to listen for connections.";
+      };
+
+      nodekeyFile = mkOption {
+        type = types.path;
+        default = "${dataDir}/nodekey";
+        description = lib.mdDoc "Path to the nodekey.";
+      };
+
+      staticNodes = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "enode://dd333ec28f0a8910c92eb4d336461eea1c20803eed9cf2c056557f986e720f8e693605bba2f4e8f289b1162e5ac7c80c914c7178130711e393ca76abc1d92f57@0.0.0.0:30303?discport=0" ];
+        description = lib.mdDoc "List of validator nodes.";
+      };
+
+      privateconfig = mkOption {
+        type = types.str;
+        default = "ignore";
+        description = lib.mdDoc "Configuration of privacy transaction manager.";
+      };
+
+      syncmode = mkOption {
+        type = types.enum [ "fast" "full" "light" ];
+        default = "full";
+        description = lib.mdDoc "Blockchain sync mode.";
+      };
+
+      blockperiod = mkOption {
+        type = types.int;
+        default = 5;
+        description = lib.mdDoc "Default minimum difference between two consecutive block's timestamps in seconds.";
+      };
+
+      permissioned = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Allow only a defined list of nodes to connect.";
+      };
+
+      rpc = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Enable RPC interface.";
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc "Listening address for RPC connections.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 22004;
+          description = lib.mdDoc "Override the default port on which to listen for RPC connections.";
+        };
+
+        api = mkOption {
+          type = types.str;
+          default = "admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul";
+          description = lib.mdDoc "API's offered over the HTTP-RPC interface.";
+        };
+      };
+
+     ws = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Enable WS-RPC interface.";
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "0.0.0.0";
+          description = lib.mdDoc "Listening address for WS-RPC connections.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8546;
+          description = lib.mdDoc "Override the default port on which to listen for WS-RPC connections.";
+        };
+
+        api = mkOption {
+          type = types.str;
+          default = "admin,db,eth,debug,miner,net,shh,txpool,personal,web3,quorum,istanbul";
+          description = lib.mdDoc "API's offered over the WS-RPC interface.";
+        };
+
+       origins = mkOption {
+          type = types.str;
+          default = "*";
+          description = lib.mdDoc "Origins from which to accept websockets requests";
+       };
+     };
+
+      genesis = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        example = literalExpression '' {
+          alloc = {
+            a47385db68718bdcbddc2d2bb7c54018066ec111 = {
+              balance = "1000000000000000000000000000";
+            };
+          };
+          coinbase = "0x0000000000000000000000000000000000000000";
+          config = {
+            byzantiumBlock = 4;
+            chainId = 494702925;
+            eip150Block = 2;
+            eip155Block = 3;
+            eip158Block = 3;
+            homesteadBlock = 1;
+            isQuorum = true;
+            istanbul = {
+              epoch = 30000;
+              policy = 0;
+            };
+          };
+          difficulty = "0x1";
+          extraData = "0x0000000000000000000000000000000000000000000000000000000000000000f85ad59438f0508111273d8e482f49410ca4078afc86a961b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0";
+          gasLimit = "0x2FEFD800";
+          mixHash = "0x63746963616c2062797a616e74696e65201111756c7420746f6c6572616e6365";
+          nonce = "0x0";
+          parentHash = "0x0000000000000000000000000000000000000000000000000000000000000000";
+          timestamp = "0x00";
+          }'';
+        description = lib.mdDoc "Blockchain genesis settings.";
+      };
+     };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.quorum ];
+    systemd.tmpfiles.rules = [
+      "d '${dataDir}' 0770 '${cfg.user}' '${cfg.group}' - -"
+    ];
+    systemd.services.quorum = {
+      description = "Quorum daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        PRIVATE_CONFIG = "${cfg.privateconfig}";
+      };
+      preStart = ''
+        if [ ! -d ${dataDir}/geth ]; then
+          if [ ! -d ${dataDir}/keystore ]; then
+            echo ERROR: You need to create a wallet before initializing your genesis file, run:
+            echo   # su -s /bin/sh - quorum
+            echo   $ geth --datadir ${dataDir} account new
+            echo and configure your genesis file accordingly.
+            exit 1;
+          fi
+          ln -s ${staticNodesFile} ${dataDir}/static-nodes.json
+          ${pkgs.quorum}/bin/geth --datadir ${dataDir} init ${genesisFile}
+        fi
+      '';
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = ''${pkgs.quorum}/bin/geth \
+            --nodiscover \
+            --verbosity 5 \
+            --nodekey ${cfg.nodekeyFile} \
+            --istanbul.blockperiod ${toString cfg.blockperiod} \
+            --syncmode ${cfg.syncmode} \
+            ${optionalString (cfg.permissioned)
+            "--permissioned"} \
+            --mine --minerthreads 1 \
+            ${optionalString (cfg.rpc.enable)
+            "--rpc --rpcaddr ${cfg.rpc.address} --rpcport ${toString cfg.rpc.port} --rpcapi ${cfg.rpc.api}"} \
+            ${optionalString (cfg.ws.enable)
+            "--ws --wsaddr ${cfg.ws.address} --wsport ${toString cfg.ws.port} --wsapi ${cfg.ws.api} --wsorigins ${cfg.ws.origins}"} \
+            --emitcheckpoints \
+            --datadir ${dataDir} \
+            --port ${toString cfg.port}'';
+        Restart = "on-failure";
+
+        # Hardening measures
+        PrivateTmp = "true";
+        ProtectSystem = "full";
+        NoNewPrivileges = "true";
+        PrivateDevices = "true";
+        MemoryDenyWriteExecute = "true";
+      };
+    };
+    users.users.${cfg.user} = {
+      name = cfg.user;
+      group = cfg.group;
+      description = "Quorum daemon user";
+      home = dataDir;
+      isSystemUser = true;
+    };
+    users.groups.${cfg.group} = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/r53-ddns.nix b/nixpkgs/nixos/modules/services/networking/r53-ddns.nix
new file mode 100644
index 000000000000..277b65dcecd4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/r53-ddns.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.r53-ddns;
+  pkg = pkgs.r53-ddns;
+in
+{
+  options = {
+    services.r53-ddns = {
+
+      enable = mkEnableOption (lib.mdDoc "r53-ddyns");
+
+      interval = mkOption {
+        type = types.str;
+        default = "15min";
+        description = lib.mdDoc "How often to update the entry";
+      };
+
+      zoneID = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The ID of your zone in Route53";
+      };
+
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The name of your domain in Route53";
+      };
+
+      hostname = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Manually specify the hostname. Otherwise the tool will try to use the name
+          returned by the OS (Call to gethostname)
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          File containing the AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
+          in the format of an EnvironmentFile as described by systemd.exec(5)
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.timers.r53-ddns = {
+      description = "r53-ddns timer";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnBootSec = cfg.interval;
+        OnUnitActiveSec = cfg.interval;
+      };
+    };
+
+    systemd.services.r53-ddns = {
+      description = "r53-ddns service";
+      serviceConfig = {
+        ExecStart = "${pkg}/bin/r53-ddns -zone-id ${cfg.zoneID} -domain ${cfg.domain}"
+          + lib.optionalString (cfg.hostname != null) " -hostname ${cfg.hostname}";
+        EnvironmentFile = "${cfg.environmentFile}";
+        DynamicUser = true;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/radicale.nix b/nixpkgs/nixos/modules/services/networking/radicale.nix
new file mode 100644
index 000000000000..00dbd6bbe386
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/radicale.nix
@@ -0,0 +1,204 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.radicale;
+
+  format = pkgs.formats.ini {
+    listToValue = concatMapStringsSep ", " (generators.mkValueStringDefault { });
+  };
+
+  pkg = if cfg.package == null then
+    pkgs.radicale
+  else
+    cfg.package;
+
+  confFile = if cfg.settings == { } then
+    pkgs.writeText "radicale.conf" cfg.config
+  else
+    format.generate "radicale.conf" cfg.settings;
+
+  rightsFile = format.generate "radicale.rights" cfg.rights;
+
+  bindLocalhost = cfg.settings != { } && !hasAttrByPath [ "server" "hosts" ] cfg.settings;
+
+in {
+  options.services.radicale = {
+    enable = mkEnableOption (lib.mdDoc "Radicale CalDAV and CardDAV server");
+
+    package = mkOption {
+      description = lib.mdDoc "Radicale package to use.";
+      # Default cannot be pkgs.radicale because non-null values suppress
+      # warnings about incompatible configuration and storage formats.
+      type = with types; nullOr package // { inherit (package) description; };
+      default = null;
+      defaultText = literalExpression "pkgs.radicale";
+    };
+
+    config = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Radicale configuration, this will set the service
+        configuration file.
+        This option is mutually exclusive with {option}`settings`.
+        This option is deprecated.  Use {option}`settings` instead.
+      '';
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for Radicale. See
+        <https://radicale.org/3.0.html#documentation/configuration>.
+        This option is mutually exclusive with {option}`config`.
+      '';
+      example = literalExpression ''
+        server = {
+          hosts = [ "0.0.0.0:5232" "[::]:5232" ];
+        };
+        auth = {
+          type = "htpasswd";
+          htpasswd_filename = "/etc/radicale/users";
+          htpasswd_encryption = "bcrypt";
+        };
+        storage = {
+          filesystem_folder = "/var/lib/radicale/collections";
+        };
+      '';
+    };
+
+    rights = mkOption {
+      type = format.type;
+      description = lib.mdDoc ''
+        Configuration for Radicale's rights file. See
+        <https://radicale.org/3.0.html#documentation/authentication-and-rights>.
+        This option only works in conjunction with {option}`settings`.
+        Setting this will also set {option}`settings.rights.type` and
+        {option}`settings.rights.file` to appropriate values.
+      '';
+      default = { };
+      example = literalExpression ''
+        root = {
+          user = ".+";
+          collection = "";
+          permissions = "R";
+        };
+        principal = {
+          user = ".+";
+          collection = "{user}";
+          permissions = "RW";
+        };
+        calendars = {
+          user = ".+";
+          collection = "{user}/[^/]+";
+          permissions = "rw";
+        };
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc "Extra arguments passed to the Radicale daemon.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.settings == { } || cfg.config == "";
+        message = ''
+          The options services.radicale.config and services.radicale.settings
+          are mutually exclusive.
+        '';
+      }
+    ];
+
+    warnings = optional (cfg.package == null && versionOlder config.system.stateVersion "17.09") ''
+      The configuration and storage formats of your existing Radicale
+      installation might be incompatible with the newest version.
+      For upgrade instructions see
+      https://radicale.org/2.1.html#documentation/migration-from-1xx-to-2xx.
+      Set services.radicale.package to suppress this warning.
+    '' ++ optional (cfg.package == null && versionOlder config.system.stateVersion "20.09") ''
+      The configuration format of your existing Radicale installation might be
+      incompatible with the newest version.  For upgrade instructions see
+      https://github.com/Kozea/Radicale/blob/3.0.6/NEWS.md#upgrade-checklist.
+      Set services.radicale.package to suppress this warning.
+    '' ++ optional (cfg.config != "") ''
+      The option services.radicale.config is deprecated.
+      Use services.radicale.settings instead.
+    '';
+
+    services.radicale.settings.rights = mkIf (cfg.rights != { }) {
+      type = "from_file";
+      file = toString rightsFile;
+    };
+
+    environment.systemPackages = [ pkg ];
+
+    users.users.radicale = {
+      isSystemUser = true;
+      group = "radicale";
+    };
+
+    users.groups.radicale = {};
+
+    systemd.services.radicale = {
+      description = "A Simple Calendar and Contact Server";
+      after = [ "network.target" ];
+      requires = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = concatStringsSep " " ([
+          "${pkg}/bin/radicale" "-C" confFile
+        ] ++ (
+          map escapeShellArg cfg.extraArgs
+        ));
+        User = "radicale";
+        Group = "radicale";
+        StateDirectory = "radicale/collections";
+        StateDirectoryMode = "0750";
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "/dev/stdin" "/dev/urandom" ];
+        DevicePolicy = "strict";
+        IPAddressAllow = mkIf bindLocalhost "localhost";
+        IPAddressDeny = mkIf bindLocalhost "any";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        ReadWritePaths = lib.optional
+          (hasAttrByPath [ "storage" "filesystem_folder" ] cfg.settings)
+          cfg.settings.storage.filesystem_folder;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0027";
+        WorkingDirectory = "/var/lib/radicale";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ infinisil dotlambda ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/radvd.nix b/nixpkgs/nixos/modules/services/networking/radvd.nix
new file mode 100644
index 000000000000..72590eda4ee6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/radvd.nix
@@ -0,0 +1,86 @@
+# Module for the IPv6 Router Advertisement Daemon.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.radvd;
+
+  confFile = pkgs.writeText "radvd.conf" cfg.config;
+
+in
+
+{
+
+  ###### interface
+
+  options.services.radvd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description =
+        lib.mdDoc ''
+          Whether to enable the Router Advertisement Daemon
+          ({command}`radvd`), which provides link-local
+          advertisements of IPv6 router addresses and prefixes using
+          the Neighbor Discovery Protocol (NDP).  This enables
+          stateless address autoconfiguration in IPv6 clients on the
+          network.
+        '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.radvd;
+      defaultText = literalExpression "pkgs.radvd";
+      description = lib.mdDoc ''
+        The RADVD package to use for the RADVD service.
+      '';
+    };
+
+    config = mkOption {
+      type = types.lines;
+      example =
+        ''
+          interface eth0 {
+            AdvSendAdvert on;
+            prefix 2001:db8:1234:5678::/64 { };
+          };
+        '';
+      description =
+        lib.mdDoc ''
+          The contents of the radvd configuration file.
+        '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.radvd =
+      {
+        isSystemUser = true;
+        group = "radvd";
+        description = "Router Advertisement Daemon User";
+      };
+    users.groups.radvd = {};
+
+    systemd.services.radvd =
+      { description = "IPv6 Router Advertisement Daemon";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig =
+          { ExecStart = "@${cfg.package}/bin/radvd radvd -n -u radvd -C ${confFile}";
+            Restart = "always";
+          };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/rdnssd.nix b/nixpkgs/nixos/modules/services/networking/rdnssd.nix
new file mode 100644
index 000000000000..c63356e73468
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/rdnssd.nix
@@ -0,0 +1,82 @@
+# Module for rdnssd, a daemon that configures DNS servers in
+# /etc/resolv/conf from IPv6 RDNSS advertisements.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  mergeHook = pkgs.writeScript "rdnssd-merge-hook" ''
+    #! ${pkgs.runtimeShell} -e
+    ${pkgs.openresolv}/bin/resolvconf -u
+  '';
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.rdnssd.enable = mkOption {
+      type = types.bool;
+      default = false;
+      #default = config.networking.enableIPv6;
+      description =
+        lib.mdDoc ''
+          Whether to enable the RDNSS daemon
+          ({command}`rdnssd`), which configures DNS servers in
+          {file}`/etc/resolv.conf` from RDNSS
+          advertisements sent by IPv6 routers.
+        '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.rdnssd.enable {
+
+    assertions = [{
+      assertion = config.networking.resolvconf.enable;
+      message = "rdnssd needs resolvconf to work (probably something sets up a static resolv.conf)";
+    }];
+
+    systemd.services.rdnssd = {
+      description = "RDNSS daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        # Create the proper run directory
+        mkdir -p /run/rdnssd
+        touch /run/rdnssd/resolv.conf
+        chown -R rdnssd /run/rdnssd
+
+        # Link the resolvconf interfaces to rdnssd
+        rm -f /run/resolvconf/interfaces/rdnssd
+        ln -s /run/rdnssd/resolv.conf /run/resolvconf/interfaces/rdnssd
+        ${mergeHook}
+      '';
+
+      postStop = ''
+        rm -f /run/resolvconf/interfaces/rdnssd
+        ${mergeHook}
+      '';
+
+      serviceConfig = {
+        ExecStart = "@${pkgs.ndisc6}/bin/rdnssd rdnssd -p /run/rdnssd/rdnssd.pid -r /run/rdnssd/resolv.conf -u rdnssd -H ${mergeHook}";
+        Type = "forking";
+        PIDFile = "/run/rdnssd/rdnssd.pid";
+      };
+    };
+
+    users.users.rdnssd = {
+      description = "RDNSSD Daemon User";
+      isSystemUser = true;
+      group = "rdnssd";
+    };
+    users.groups.rdnssd = {};
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/redsocks.nix b/nixpkgs/nixos/modules/services/networking/redsocks.nix
new file mode 100644
index 000000000000..30d6a0a6336d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/redsocks.nix
@@ -0,0 +1,273 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.redsocks;
+in
+{
+  ##### interface
+  options = {
+    services.redsocks = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable redsocks.";
+      };
+
+      log_debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Log connection progress.";
+      };
+
+      log_info = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Log start and end of client sessions.";
+      };
+
+      log = mkOption {
+        type = types.str;
+        default = "stderr";
+        description =
+          lib.mdDoc ''
+            Where to send logs.
+
+            Possible values are:
+              - stderr
+              - file:/path/to/file
+              - syslog:FACILITY where FACILITY is any of "daemon", "local0",
+                etc.
+          '';
+      };
+
+      chroot = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description =
+          lib.mdDoc ''
+            Chroot under which to run redsocks. Log file is opened before
+            chroot, but if logging to syslog /etc/localtime may be required.
+          '';
+      };
+
+      redsocks = mkOption {
+        description =
+          lib.mdDoc ''
+            Local port to proxy associations to be performed.
+
+            The example shows how to configure a proxy to handle port 80 as HTTP
+            relay, and all other ports as HTTP connect.
+          '';
+        example = [
+          { port = 23456; proxy = "1.2.3.4:8080"; type = "http-relay";
+            redirectCondition = "--dport 80";
+            doNotRedirect = [ "-d 1.2.0.0/16" ];
+          }
+          { port = 23457; proxy = "1.2.3.4:8080"; type = "http-connect";
+            redirectCondition = true;
+            doNotRedirect = [ "-d 1.2.0.0/16" ];
+          }
+        ];
+        type = types.listOf (types.submodule { options = {
+          ip = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            description =
+              lib.mdDoc ''
+                IP on which redsocks should listen. Defaults to 127.0.0.1 for
+                security reasons.
+              '';
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 12345;
+            description = lib.mdDoc "Port on which redsocks should listen.";
+          };
+
+          proxy = mkOption {
+            type = types.str;
+            description =
+              lib.mdDoc ''
+                Proxy through which redsocks should forward incoming traffic.
+                Example: "example.org:8080"
+              '';
+          };
+
+          type = mkOption {
+            type = types.enum [ "socks4" "socks5" "http-connect" "http-relay" ];
+            description = lib.mdDoc "Type of proxy.";
+          };
+
+          login = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = lib.mdDoc "Login to send to proxy.";
+          };
+
+          password = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description =
+              lib.mdDoc ''
+                Password to send to proxy. WARNING, this will end up
+                world-readable in the store! Awaiting
+                https://github.com/NixOS/nix/issues/8 to be able to fix.
+              '';
+          };
+
+          disclose_src = mkOption {
+            type = types.enum [ "false" "X-Forwarded-For" "Forwarded_ip"
+                                "Forwarded_ipport" ];
+            default = "false";
+            description =
+              lib.mdDoc ''
+                Way to disclose client IP to the proxy.
+                  - "false": do not disclose
+
+                http-connect supports the following ways:
+                  - "X-Forwarded-For": add header "X-Forwarded-For: IP"
+                  - "Forwarded_ip": add header "Forwarded: for=IP" (see RFC7239)
+                  - "Forwarded_ipport": add header 'Forwarded: for="IP:port"'
+              '';
+          };
+
+          redirectInternetOnly = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Exclude all non-globally-routable IPs from redsocks";
+          };
+
+          doNotRedirect = mkOption {
+            type = with types; listOf str;
+            default = [];
+            description =
+              lib.mdDoc ''
+                Iptables filters that if matched will get the packet off of
+                redsocks.
+              '';
+            example = [ "-d 1.2.3.4" ];
+          };
+
+          redirectCondition = mkOption {
+            type = with types; either bool str;
+            default = false;
+            description =
+              lib.mdDoc ''
+                Conditions to make outbound packets go through this redsocks
+                instance.
+
+                If set to false, no packet will be forwarded. If set to true,
+                all packets will be forwarded (except packets excluded by
+                redirectInternetOnly).
+
+                If set to a string, this is an iptables filter that will be
+                matched against packets before getting them into redsocks. For
+                example, setting it to "--dport 80" will only send
+                packets to port 80 to redsocks. Note "-p tcp" is always
+                implicitly added, as udp can only be proxied through redudp or
+                the like.
+              '';
+          };
+        };});
+      };
+
+      # TODO: Add support for redudp and dnstc
+    };
+  };
+
+  ##### implementation
+  config = let
+    redsocks_blocks = concatMapStrings (block:
+      let proxy = splitString ":" block.proxy; in
+      ''
+        redsocks {
+          local_ip = ${block.ip};
+          local_port = ${toString block.port};
+
+          ip = ${elemAt proxy 0};
+          port = ${elemAt proxy 1};
+          type = ${block.type};
+
+          ${optionalString (block.login != null) "login = \"${block.login}\";"}
+          ${optionalString (block.password != null) "password = \"${block.password}\";"}
+
+          disclose_src = ${block.disclose_src};
+        }
+      '') cfg.redsocks;
+    configfile = pkgs.writeText "redsocks.conf"
+      ''
+        base {
+          log_debug = ${if cfg.log_debug then "on" else "off" };
+          log_info = ${if cfg.log_info then "on" else "off" };
+          log = ${cfg.log};
+
+          daemon = off;
+          redirector = iptables;
+
+          user = redsocks;
+          group = redsocks;
+          ${optionalString (cfg.chroot != null) "chroot = ${cfg.chroot};"}
+        }
+
+        ${redsocks_blocks}
+      '';
+    internetOnly = [ # TODO: add ipv6-equivalent
+      "-d 0.0.0.0/8"
+      "-d 10.0.0.0/8"
+      "-d 127.0.0.0/8"
+      "-d 169.254.0.0/16"
+      "-d 172.16.0.0/12"
+      "-d 192.168.0.0/16"
+      "-d 224.168.0.0/4"
+      "-d 240.168.0.0/4"
+    ];
+    redCond = block:
+      optionalString (isString block.redirectCondition) block.redirectCondition;
+    iptables = concatImapStrings (idx: block:
+      let chain = "REDSOCKS${toString idx}"; doNotRedirect =
+        concatMapStringsSep "\n"
+          (f: "ip46tables -t nat -A ${chain} ${f} -j RETURN 2>/dev/null || true")
+          (block.doNotRedirect ++ (optionals block.redirectInternetOnly internetOnly));
+      in
+      optionalString (block.redirectCondition != false)
+        ''
+          ip46tables -t nat -F ${chain} 2>/dev/null || true
+          ip46tables -t nat -N ${chain} 2>/dev/null || true
+          ${doNotRedirect}
+          ip46tables -t nat -A ${chain} -p tcp -j REDIRECT --to-ports ${toString block.port}
+
+          # TODO: show errors, when it will be easily possible by a switch to
+          # iptables-restore
+          ip46tables -t nat -A OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true
+        ''
+    ) cfg.redsocks;
+  in
+    mkIf cfg.enable {
+      users.groups.redsocks = {};
+      users.users.redsocks = {
+        description = "Redsocks daemon";
+        group = "redsocks";
+        isSystemUser = true;
+      };
+
+      systemd.services.redsocks = {
+        description = "Redsocks";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        script = "${pkgs.redsocks}/bin/redsocks -c ${configfile}";
+      };
+
+      networking.firewall.extraCommands = iptables;
+
+      networking.firewall.extraStopCommands =
+        concatImapStringsSep "\n" (idx: block:
+          let chain = "REDSOCKS${toString idx}"; in
+          optionalString (block.redirectCondition != false)
+            "ip46tables -t nat -D OUTPUT -p tcp ${redCond block} -j ${chain} 2>/dev/null || true"
+        ) cfg.redsocks;
+    };
+
+  meta.maintainers = with lib.maintainers; [ ekleog ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/resilio.nix b/nixpkgs/nixos/modules/services/networking/resilio.nix
new file mode 100644
index 000000000000..7f6358d00d0b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/resilio.nix
@@ -0,0 +1,295 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.resilio;
+
+  resilioSync = pkgs.resilio-sync;
+
+  sharedFoldersRecord = map (entry: {
+    dir = entry.directory;
+
+    use_relay_server = entry.useRelayServer;
+    use_tracker = entry.useTracker;
+    use_dht = entry.useDHT;
+
+    search_lan = entry.searchLAN;
+    use_sync_trash = entry.useSyncTrash;
+    known_hosts = entry.knownHosts;
+  }) cfg.sharedFolders;
+
+  configFile = pkgs.writeText "config.json" (builtins.toJSON ({
+    device_name = cfg.deviceName;
+    storage_path = cfg.storagePath;
+    listening_port = cfg.listeningPort;
+    use_gui = false;
+    check_for_updates = cfg.checkForUpdates;
+    use_upnp = cfg.useUpnp;
+    download_limit = cfg.downloadLimit;
+    upload_limit = cfg.uploadLimit;
+    lan_encrypt_data = cfg.encryptLAN;
+  } // optionalAttrs (cfg.directoryRoot != "") { directory_root = cfg.directoryRoot; }
+    // optionalAttrs cfg.enableWebUI {
+    webui = { listen = "${cfg.httpListenAddr}:${toString cfg.httpListenPort}"; } //
+      (optionalAttrs (cfg.httpLogin != "") { login = cfg.httpLogin; }) //
+      (optionalAttrs (cfg.httpPass != "") { password = cfg.httpPass; }) //
+      (optionalAttrs (cfg.apiKey != "") { api_key = cfg.apiKey; });
+  } // optionalAttrs (sharedFoldersRecord != []) {
+    shared_folders = sharedFoldersRecord;
+  }));
+
+  sharedFoldersSecretFiles = map (entry: {
+    dir = entry.directory;
+    secretFile = if builtins.hasAttr "secret" entry then
+      toString (pkgs.writeTextFile {
+        name = "secret-file";
+        text = entry.secret;
+      })
+    else
+      entry.secretFile;
+  }) cfg.sharedFolders;
+
+  runConfigPath = "/run/rslsync/config.json";
+
+  createConfig = pkgs.writeShellScriptBin "create-resilio-config" (
+    if cfg.sharedFolders != [ ] then ''
+      ${pkgs.jq}/bin/jq \
+        '.shared_folders |= map(.secret = $ARGS.named[.dir])' \
+        ${
+          lib.concatMapStringsSep " \\\n  "
+          (entry: ''--arg '${entry.dir}' "$(cat '${entry.secretFile}')"'')
+          sharedFoldersSecretFiles
+        } \
+        <${configFile} \
+        >${runConfigPath}
+    '' else ''
+      # no secrets, passing through config
+      cp ${configFile} ${runConfigPath};
+    ''
+  );
+
+in
+{
+  options = {
+    services.resilio = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If enabled, start the Resilio Sync daemon. Once enabled, you can
+          interact with the service through the Web UI, or configure it in your
+          NixOS configuration.
+        '';
+      };
+
+      deviceName = mkOption {
+        type = types.str;
+        example = "Voltron";
+        default = config.networking.hostName;
+        defaultText = literalExpression "config.networking.hostName";
+        description = lib.mdDoc ''
+          Name of the Resilio Sync device.
+        '';
+      };
+
+      listeningPort = mkOption {
+        type = types.int;
+        default = 0;
+        example = 44444;
+        description = lib.mdDoc ''
+          Listening port. Defaults to 0 which randomizes the port.
+        '';
+      };
+
+      checkForUpdates = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Determines whether to check for updates and alert the user
+          about them in the UI.
+        '';
+      };
+
+      useUpnp = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Use Universal Plug-n-Play (UPnP)
+        '';
+      };
+
+      downloadLimit = mkOption {
+        type = types.int;
+        default = 0;
+        example = 1024;
+        description = lib.mdDoc ''
+          Download speed limit. 0 is unlimited (default).
+        '';
+      };
+
+      uploadLimit = mkOption {
+        type = types.int;
+        default = 0;
+        example = 1024;
+        description = lib.mdDoc ''
+          Upload speed limit. 0 is unlimited (default).
+        '';
+      };
+
+      httpListenAddr = mkOption {
+        type = types.str;
+        default = "[::1]";
+        example = "0.0.0.0";
+        description = lib.mdDoc ''
+          HTTP address to bind to.
+        '';
+      };
+
+      httpListenPort = mkOption {
+        type = types.int;
+        default = 9000;
+        description = lib.mdDoc ''
+          HTTP port to bind on.
+        '';
+      };
+
+      httpLogin = mkOption {
+        type = types.str;
+        example = "allyourbase";
+        default = "";
+        description = lib.mdDoc ''
+          HTTP web login username.
+        '';
+      };
+
+      httpPass = mkOption {
+        type = types.str;
+        example = "arebelongtous";
+        default = "";
+        description = lib.mdDoc ''
+          HTTP web login password.
+        '';
+      };
+
+      encryptLAN = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Encrypt LAN data.";
+      };
+
+      enableWebUI = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Web UI for administration. Bound to the specified
+          `httpListenAddress` and
+          `httpListenPort`.
+          '';
+      };
+
+      storagePath = mkOption {
+        type = types.path;
+        default = "/var/lib/resilio-sync/";
+        description = lib.mdDoc ''
+          Where BitTorrent Sync will store it's database files (containing
+          things like username info and licenses). Generally, you should not
+          need to ever change this.
+        '';
+      };
+
+      apiKey = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "API key, which enables the developer API.";
+      };
+
+      directoryRoot = mkOption {
+        type = types.str;
+        default = "";
+        example = "/media";
+        description = lib.mdDoc "Default directory to add folders in the web UI.";
+      };
+
+      sharedFolders = mkOption {
+        default = [];
+        type = types.listOf (types.attrsOf types.anything);
+        example =
+          [ { secretFile     = "/run/resilio-secret";
+              directory      = "/home/user/sync_test";
+              useRelayServer = true;
+              useTracker     = true;
+              useDHT         = false;
+              searchLAN      = true;
+              useSyncTrash   = true;
+              knownHosts     = [
+                "192.168.1.2:4444"
+                "192.168.1.3:4444"
+              ];
+            }
+          ];
+        description = lib.mdDoc ''
+          Shared folder list. If enabled, web UI must be
+          disabled. Secrets can be generated using `rslsync --generate-secret`.
+
+          If you would like to be able to modify the contents of this
+          directories, it is recommended that you make your user a
+          member of the `rslsync` group.
+
+          Directories in this list should be in the
+          `rslsync` group, and that group must have
+          write access to the directory. It is also recommended that
+          `chmod g+s` is applied to the directory
+          so that any sub directories created will also belong to
+          the `rslsync` group. Also,
+          `setfacl -d -m group:rslsync:rwx` and
+          `setfacl -m group:rslsync:rwx` should also
+          be applied so that the sub directories are writable by
+          the group.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.deviceName != "";
+          message   = "Device name cannot be empty.";
+        }
+        { assertion = cfg.enableWebUI -> cfg.sharedFolders == [];
+          message   = "If using shared folders, the web UI cannot be enabled.";
+        }
+        { assertion = cfg.apiKey != "" -> cfg.enableWebUI;
+          message   = "If you're using an API key, you must enable the web server.";
+        }
+      ];
+
+    users.users.rslsync = {
+      description     = "Resilio Sync Service user";
+      home            = cfg.storagePath;
+      createHome      = true;
+      uid             = config.ids.uids.rslsync;
+      group           = "rslsync";
+    };
+
+    users.groups.rslsync = {};
+
+    systemd.services.resilio = with pkgs; {
+      description = "Resilio Sync Service";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+      serviceConfig = {
+        Restart   = "on-abort";
+        UMask     = "0002";
+        User      = "rslsync";
+        RuntimeDirectory = "rslsync";
+        ExecStartPre = "${createConfig}/bin/create-resilio-config";
+        ExecStart = ''
+          ${resilioSync}/bin/rslsync --nodaemon --config ${runConfigPath}
+        '';
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ jwoudenberg ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/robustirc-bridge.nix b/nixpkgs/nixos/modules/services/networking/robustirc-bridge.nix
new file mode 100644
index 000000000000..9b93828c396c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/robustirc-bridge.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.robustirc-bridge;
+in
+{
+  options = {
+    services.robustirc-bridge = {
+      enable = mkEnableOption (lib.mdDoc "RobustIRC bridge");
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''Extra flags passed to the {command}`robustirc-bridge` command. See [RobustIRC Documentation](https://robustirc.net/docs/adminguide.html#_bridge) or robustirc-bridge(1) for details.'';
+        example = [
+          "-network robustirc.net"
+        ];
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.robustirc-bridge = {
+      description = "RobustIRC bridge";
+      documentation = [
+        "man:robustirc-bridge(1)"
+        "https://robustirc.net/"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.robustirc-bridge}/bin/robustirc-bridge ${concatStringsSep " " cfg.extraFlags}";
+        Restart = "on-failure";
+
+        # Hardening
+        PrivateDevices = true;
+        ProtectSystem = true;
+        ProtectHome = true;
+        PrivateTmp = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/rosenpass.nix b/nixpkgs/nixos/modules/services/networking/rosenpass.nix
new file mode 100644
index 000000000000..d2a264b83d67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/rosenpass.nix
@@ -0,0 +1,233 @@
+{ config
+, lib
+, options
+, pkgs
+, ...
+}:
+let
+  inherit (lib)
+    attrValues
+    concatLines
+    concatMap
+    filter
+    filterAttrsRecursive
+    flatten
+    getExe
+    mdDoc
+    mkIf
+    optional
+    ;
+
+  cfg = config.services.rosenpass;
+  opt = options.services.rosenpass;
+  settingsFormat = pkgs.formats.toml { };
+in
+{
+  options.services.rosenpass =
+    let
+      inherit (lib)
+        literalExpression
+        mdDoc
+        mkOption
+        ;
+      inherit (lib.types)
+        enum
+        listOf
+        nullOr
+        path
+        str
+        submodule
+        ;
+    in
+    {
+      enable = lib.mkEnableOption (mdDoc "Rosenpass");
+
+      package = lib.mkPackageOption pkgs "rosenpass" { };
+
+      defaultDevice = mkOption {
+        type = nullOr str;
+        description = mdDoc "Name of the network interface to use for all peers by default.";
+        example = "wg0";
+      };
+
+      settings = mkOption {
+        type = submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            public_key = mkOption {
+              type = path;
+              description = mdDoc "Path to a file containing the public key of the local Rosenpass peer. Generate this by running {command}`rosenpass gen-keys`.";
+            };
+
+            secret_key = mkOption {
+              type = path;
+              description = mdDoc "Path to a file containing the secret key of the local Rosenpass peer. Generate this by running {command}`rosenpass gen-keys`.";
+            };
+
+            listen = mkOption {
+              type = listOf str;
+              description = mdDoc "List of local endpoints to listen for connections.";
+              default = [ ];
+              example = literalExpression "[ \"0.0.0.0:10000\" ]";
+            };
+
+            verbosity = mkOption {
+              type = enum [ "Verbose" "Quiet" ];
+              default = "Quiet";
+              description = mdDoc "Verbosity of output produced by the service.";
+            };
+
+            peers =
+              let
+                peer = submodule {
+                  freeformType = settingsFormat.type;
+
+                  options = {
+                    public_key = mkOption {
+                      type = path;
+                      description = mdDoc "Path to a file containing the public key of the remote Rosenpass peer.";
+                    };
+
+                    endpoint = mkOption {
+                      type = nullOr str;
+                      default = null;
+                      description = mdDoc "Endpoint of the remote Rosenpass peer.";
+                    };
+
+                    device = mkOption {
+                      type = str;
+                      default = cfg.defaultDevice;
+                      defaultText = literalExpression "config.${opt.defaultDevice}";
+                      description = mdDoc "Name of the local WireGuard interface to use for this peer.";
+                    };
+
+                    peer = mkOption {
+                      type = str;
+                      description = mdDoc "WireGuard public key corresponding to the remote Rosenpass peer.";
+                    };
+                  };
+                };
+              in
+              mkOption {
+                type = listOf peer;
+                description = mdDoc "List of peers to exchange keys with.";
+                default = [ ];
+              };
+          };
+        };
+        default = { };
+        description = mdDoc "Configuration for Rosenpass, see <https://rosenpass.eu/> for further information.";
+      };
+    };
+
+  config = mkIf cfg.enable {
+    warnings =
+      let
+        # NOTE: In the descriptions below, we tried to refer to e.g.
+        # options.systemd.network.netdevs."<name>".wireguardPeers.*.PublicKey
+        # directly, but don't know how to traverse "<name>" and * in this path.
+        extractions = [
+          {
+            relevant = config.systemd.network.enable;
+            root = config.systemd.network.netdevs;
+            peer = (x: x.wireguardPeers);
+            key = (x: if x.wireguardPeerConfig ? PublicKey then x.wireguardPeerConfig.PublicKey else null);
+            description = mdDoc "${options.systemd.network.netdevs}.\"<name>\".wireguardPeers.*.wireguardPeerConfig.PublicKey";
+          }
+          {
+            relevant = config.networking.wireguard.enable;
+            root = config.networking.wireguard.interfaces;
+            peer = (x: x.peers);
+            key = (x: x.publicKey);
+            description = mdDoc "${options.networking.wireguard.interfaces}.\"<name>\".peers.*.publicKey";
+          }
+          rec {
+            relevant = root != { };
+            root = config.networking.wg-quick.interfaces;
+            peer = (x: x.peers);
+            key = (x: x.publicKey);
+            description = mdDoc "${options.networking.wg-quick.interfaces}.\"<name>\".peers.*.publicKey";
+          }
+        ];
+        relevantExtractions = filter (x: x.relevant) extractions;
+        extract = { root, peer, key, ... }:
+          filter (x: x != null) (flatten (concatMap (x: (map key (peer x))) (attrValues root)));
+        configuredKeys = flatten (map extract relevantExtractions);
+        itemize = xs: concatLines (map (x: " - ${x}") xs);
+        descriptions = map (x: "`${x.description}`");
+        missingKeys = filter (key: !builtins.elem key configuredKeys) (map (x: x.peer) cfg.settings.peers);
+        unusual = ''
+          While this may work as expected, e.g. you want to manually configure WireGuard,
+          such a scenario is unusual. Please double-check your configuration.
+        '';
+      in
+      (optional (relevantExtractions != [ ] && missingKeys != [ ]) ''
+        You have configured Rosenpass peers with the WireGuard public keys:
+        ${itemize missingKeys}
+        But there is no corresponding active Wireguard peer configuration in any of:
+        ${itemize (descriptions relevantExtractions)}
+        ${unusual}
+      '')
+      ++
+      optional (relevantExtractions == [ ]) ''
+        You have configured Rosenpass, but you have not configured Wireguard via any of:
+        ${itemize (descriptions extractions)}
+        ${unusual}
+      '';
+
+    environment.systemPackages = [ cfg.package pkgs.wireguard-tools ];
+
+    systemd.services.rosenpass =
+      let
+        filterNonNull = filterAttrsRecursive (_: v: v != null);
+        config = settingsFormat.generate "config.toml" (
+          filterNonNull (cfg.settings
+            //
+            (
+              let
+                credentialPath = id: "$CREDENTIALS_DIRECTORY/${id}";
+                # NOTE: We would like to remove all `null` values inside `cfg.settings`
+                # recursively, since `settingsFormat.generate` cannot handle `null`.
+                # This would require to traverse both attribute sets and lists recursively.
+                # `filterAttrsRecursive` only recurses into attribute sets, but not
+                # into values that might contain other attribute sets (such as lists,
+                # e.g. `cfg.settings.peers`). Here, we just specialize on `cfg.settings.peers`,
+                # and this may break unexpectedly whenever a `null` value is contained
+                # in a list in `cfg.settings`, other than `cfg.settings.peers`.
+                peersWithoutNulls = map filterNonNull cfg.settings.peers;
+              in
+              {
+                secret_key = credentialPath "pqsk";
+                public_key = credentialPath "pqpk";
+                peers = peersWithoutNulls;
+              }
+            )
+          )
+        );
+      in
+      rec {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network-online.target" ];
+        path = [ cfg.package pkgs.wireguard-tools ];
+
+        serviceConfig = {
+          User = "rosenpass";
+          Group = "rosenpass";
+          RuntimeDirectory = "rosenpass";
+          DynamicUser = true;
+          AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+          LoadCredential = [
+            "pqsk:${cfg.settings.secret_key}"
+            "pqpk:${cfg.settings.public_key}"
+          ];
+        };
+
+        # See <https://www.freedesktop.org/software/systemd/man/systemd.unit.html#Specifiers>
+        environment.CONFIG = "%t/${serviceConfig.RuntimeDirectory}/config.toml";
+
+        preStart = "${getExe pkgs.envsubst} -i ${config} -o \"$CONFIG\"";
+        script = "rosenpass exchange-config \"$CONFIG\"";
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/routedns.nix b/nixpkgs/nixos/modules/services/networking/routedns.nix
new file mode 100644
index 000000000000..2a29a06700ce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/routedns.nix
@@ -0,0 +1,84 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+with lib;
+
+let
+  cfg = config.services.routedns;
+  settingsFormat = pkgs.formats.toml { };
+in
+{
+  options.services.routedns = {
+    enable = mkEnableOption (lib.mdDoc "RouteDNS - DNS stub resolver, proxy and router");
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      example = literalExpression ''
+        {
+          resolvers.cloudflare-dot = {
+            address = "1.1.1.1:853";
+            protocol = "dot";
+          };
+          groups.cloudflare-cached = {
+            type = "cache";
+            resolvers = ["cloudflare-dot"];
+          };
+          listeners.local-udp = {
+            address = "127.0.0.1:53";
+            protocol = "udp";
+            resolver = "cloudflare-cached";
+          };
+          listeners.local-tcp = {
+            address = "127.0.0.1:53";
+            protocol = "tcp";
+            resolver = "cloudflare-cached";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Configuration for RouteDNS, see <https://github.com/folbricht/routedns/blob/master/doc/configuration.md>
+        for more information.
+      '';
+    };
+
+    configFile = mkOption {
+      default = settingsFormat.generate "routedns.toml" cfg.settings;
+      defaultText = "A RouteDNS configuration file automatically generated by values from services.routedns.*";
+      type = types.path;
+      example = literalExpression ''"''${pkgs.routedns}/cmd/routedns/example-config/use-case-1.toml"'';
+      description = lib.mdDoc "Path to RouteDNS TOML configuration file.";
+    };
+
+    package = mkOption {
+      default = pkgs.routedns;
+      defaultText = literalExpression "pkgs.routedns";
+      type = types.package;
+      description = lib.mdDoc "RouteDNS package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.routedns = {
+      description = "RouteDNS - DNS stub resolver, proxy and router";
+      after = [ "network.target" ]; # in case a bootstrap resolver is used, this might fail a few times until the respective server is actually reachable
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network.target" ];
+      startLimitIntervalSec = 30;
+      startLimitBurst = 5;
+      serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = "5s";
+        LimitNPROC = 512;
+        LimitNOFILE = 1048576;
+        DynamicUser = true;
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        NoNewPrivileges = true;
+        ExecStart = "${getBin cfg.package}/bin/routedns -l 4 ${cfg.configFile}";
+      };
+    };
+  };
+  meta.maintainers = with maintainers; [ jsimonetti ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/rpcbind.nix b/nixpkgs/nixos/modules/services/networking/rpcbind.nix
new file mode 100644
index 000000000000..63c4859fbd07
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/rpcbind.nix
@@ -0,0 +1,56 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.rpcbind = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable `rpcbind`, an ONC RPC directory service
+          notably used by NFS and NIS, and which can be queried
+          using the rpcinfo(1) command. `rpcbind` is a replacement for
+          `portmap`.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.rpcbind.enable {
+    environment.systemPackages = [ pkgs.rpcbind ];
+
+    systemd.packages = [ pkgs.rpcbind ];
+
+    systemd.services.rpcbind = {
+      wantedBy = [ "multi-user.target" ];
+      # rpcbind performs a check for /var/run/rpcbind.lock at startup
+      # and will crash if /var/run isn't present. In the stock NixOS
+      # var.conf tmpfiles configuration file, /var/run is symlinked to
+      # /run, so rpcbind can enter a race condition in which /var/run
+      # isn't symlinked yet but tries to interact with the path, so
+      # controlling the order explicitly here ensures that rpcbind can
+      # start successfully. The `wants` instead of `requires` should
+      # avoid creating a strict/brittle dependency.
+      wants = [ "systemd-tmpfiles-setup.service" ];
+      after = [ "systemd-tmpfiles-setup.service" ];
+    };
+
+    users.users.rpc = {
+      group = "nogroup";
+      uid = config.ids.uids.rpc;
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/rxe.nix b/nixpkgs/nixos/modules/services/networking/rxe.nix
new file mode 100644
index 000000000000..7dbb4823b4bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/rxe.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.rxe;
+
+in {
+  ###### interface
+
+  options = {
+    networking.rxe = {
+      enable = mkEnableOption (lib.mdDoc "RDMA over converged ethernet");
+      interfaces = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "eth0" ];
+        description = lib.mdDoc ''
+          Enable RDMA on the listed interfaces. The corresponding virtual
+          RDMA interfaces will be named rxe_\<interface\>.
+          UDP port 4791 must be open on the respective ethernet interfaces.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.rxe = {
+      description = "RoCE interfaces";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-modules-load.service" "network-online.target" ];
+      wants = [ "network-pre.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = map ( x:
+          "${pkgs.iproute2}/bin/rdma link add rxe_${x} type rxe netdev ${x}"
+          ) cfg.interfaces;
+
+        ExecStop = map ( x:
+          "${pkgs.iproute2}/bin/rdma link delete rxe_${x}"
+          ) cfg.interfaces;
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/sabnzbd.nix b/nixpkgs/nixos/modules/services/networking/sabnzbd.nix
new file mode 100644
index 000000000000..8f3545df8995
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/sabnzbd.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.sabnzbd;
+  inherit (pkgs) sabnzbd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.sabnzbd = {
+      enable = mkEnableOption (lib.mdDoc "the sabnzbd server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.sabnzbd;
+        defaultText = lib.literalExpression "pkgs.sabnzbd";
+        description = lib.mdDoc "The sabnzbd executable package run by the service.";
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = "/var/lib/sabnzbd/sabnzbd.ini";
+        description = lib.mdDoc "Path to config file.";
+      };
+
+      user = mkOption {
+        default = "sabnzbd";
+        type = types.str;
+        description = lib.mdDoc "User to run the service as";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "sabnzbd";
+        description = lib.mdDoc "Group to run the service as";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.sabnzbd = {
+          uid = config.ids.uids.sabnzbd;
+          group = "sabnzbd";
+          description = "sabnzbd user";
+          home = "/var/lib/sabnzbd/";
+          createHome = true;
+    };
+
+    users.groups.sabnzbd = {
+      gid = config.ids.gids.sabnzbd;
+    };
+
+    systemd.services.sabnzbd = {
+        description = "sabnzbd server";
+        wantedBy    = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          Type = "forking";
+          GuessMainPID = "no";
+          User = "${cfg.user}";
+          Group = "${cfg.group}";
+          ExecStart = "${lib.getBin cfg.package}/bin/sabnzbd -d -f ${cfg.configFile}";
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/seafile.nix b/nixpkgs/nixos/modules/services/networking/seafile.nix
new file mode 100644
index 000000000000..b07d51b9b49a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/seafile.nix
@@ -0,0 +1,297 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.seafile;
+  settingsFormat = pkgs.formats.ini { };
+
+  ccnetConf = settingsFormat.generate "ccnet.conf" cfg.ccnetSettings;
+
+  seafileConf = settingsFormat.generate "seafile.conf" cfg.seafileSettings;
+
+  seahubSettings = pkgs.writeText "seahub_settings.py" ''
+    FILE_SERVER_ROOT = '${cfg.ccnetSettings.General.SERVICE_URL}/seafhttp'
+    DATABASES = {
+        'default': {
+            'ENGINE': 'django.db.backends.sqlite3',
+            'NAME': '${seahubDir}/seahub.db',
+        }
+    }
+    MEDIA_ROOT = '${seahubDir}/media/'
+    THUMBNAIL_ROOT = '${seahubDir}/thumbnail/'
+
+    SERVICE_URL = '${cfg.ccnetSettings.General.SERVICE_URL}'
+
+    with open('${seafRoot}/.seahubSecret') as f:
+        SECRET_KEY = f.readline().rstrip()
+
+    ${cfg.seahubExtraConf}
+  '';
+
+  seafRoot = "/var/lib/seafile"; # hardcode it due to dynamicuser
+  ccnetDir = "${seafRoot}/ccnet";
+  dataDir = "${seafRoot}/data";
+  seahubDir = "${seafRoot}/seahub";
+
+in {
+
+  ###### Interface
+
+  options.services.seafile = {
+    enable = mkEnableOption (lib.mdDoc "Seafile server");
+
+    ccnetSettings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          General = {
+            SERVICE_URL = mkOption {
+              type = types.str;
+              example = "https://www.example.com";
+              description = lib.mdDoc ''
+                Seahub public URL.
+              '';
+            };
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for ccnet, see
+        <https://manual.seafile.com/config/ccnet-conf/>
+        for supported values.
+      '';
+    };
+
+    seafileSettings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          fileserver = {
+            port = mkOption {
+              type = types.port;
+              default = 8082;
+              description = lib.mdDoc ''
+                The tcp port used by seafile fileserver.
+              '';
+            };
+            host = mkOption {
+              type = types.str;
+              default = "127.0.0.1";
+              example = "0.0.0.0";
+              description = lib.mdDoc ''
+                The binding address used by seafile fileserver.
+              '';
+            };
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for seafile-server, see
+        <https://manual.seafile.com/config/seafile-conf/>
+        for supported values.
+      '';
+    };
+
+    workers = mkOption {
+      type = types.int;
+      default = 4;
+      example = 10;
+      description = lib.mdDoc ''
+        The number of gunicorn worker processes for handling requests.
+      '';
+    };
+
+    adminEmail = mkOption {
+      example = "john@example.com";
+      type = types.str;
+      description = lib.mdDoc ''
+        Seafile Seahub Admin Account Email.
+      '';
+    };
+
+    initialAdminPassword = mkOption {
+      example = "someStrongPass";
+      type = types.str;
+      description = lib.mdDoc ''
+        Seafile Seahub Admin Account initial password.
+        Should be change via Seahub web front-end.
+      '';
+    };
+
+    seafilePackage = mkOption {
+      type = types.package;
+      description = lib.mdDoc "Which package to use for the seafile server.";
+      default = pkgs.seafile-server;
+      defaultText = literalExpression "pkgs.seafile-server";
+    };
+
+    seahubExtraConf = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Extra config to append to `seahub_settings.py` file.
+        Refer to <https://manual.seafile.com/config/seahub_settings_py/>
+        for all available options.
+      '';
+    };
+  };
+
+  ###### Implementation
+
+  config = mkIf cfg.enable {
+
+    environment.etc."seafile/ccnet.conf".source = ccnetConf;
+    environment.etc."seafile/seafile.conf".source = seafileConf;
+    environment.etc."seafile/seahub_settings.py".source = seahubSettings;
+
+    systemd.targets.seafile = {
+      wantedBy = [ "multi-user.target" ];
+      description = "Seafile components";
+    };
+
+    systemd.services = let
+      securityOptions = {
+        ProtectHome = true;
+        PrivateUsers = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectProc = "invisible";
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" ];
+      };
+    in {
+      seaf-server = {
+        description = "Seafile server";
+        partOf = [ "seafile.target" ];
+        after = [ "network.target" ];
+        wantedBy = [ "seafile.target" ];
+        restartTriggers = [ ccnetConf seafileConf ];
+        path = [ pkgs.sqlite ];
+        serviceConfig = securityOptions // {
+          User = "seafile";
+          Group = "seafile";
+          DynamicUser = true;
+          StateDirectory = "seafile";
+          RuntimeDirectory = "seafile";
+          LogsDirectory = "seafile";
+          ConfigurationDirectory = "seafile";
+          ExecStart = ''
+            ${cfg.seafilePackage}/bin/seaf-server \
+            --foreground \
+            -F /etc/seafile \
+            -c ${ccnetDir} \
+            -d ${dataDir} \
+            -l /var/log/seafile/server.log \
+            -P /run/seafile/server.pid \
+            -p /run/seafile
+          '';
+        };
+        preStart = ''
+          if [ ! -f "${seafRoot}/server-setup" ]; then
+              mkdir -p ${dataDir}/library-template
+              mkdir -p ${ccnetDir}/{GroupMgr,misc,OrgMgr,PeerMgr}
+              sqlite3 ${ccnetDir}/GroupMgr/groupmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/groupmgr.sql"
+              sqlite3 ${ccnetDir}/misc/config.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/config.sql"
+              sqlite3 ${ccnetDir}/OrgMgr/orgmgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/org.sql"
+              sqlite3 ${ccnetDir}/PeerMgr/usermgr.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/user.sql"
+              sqlite3 ${dataDir}/seafile.db ".read ${cfg.seafilePackage}/share/seafile/sql/sqlite/seafile.sql"
+              echo "${cfg.seafilePackage.version}-sqlite" > "${seafRoot}"/server-setup
+          fi
+          # checking for upgrades and handling them
+          # WARNING: needs to be extended to actually handle major version migrations
+          installedMajor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f1)
+          installedMinor=$(cat "${seafRoot}/server-setup" | cut -d"-" -f1 | cut -d"." -f2)
+          pkgMajor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f1)
+          pkgMinor=$(echo "${cfg.seafilePackage.version}" | cut -d"." -f2)
+
+          if [[ $installedMajor == $pkgMajor && $installedMinor == $pkgMinor ]]; then
+             :
+          elif [[ $installedMajor == 8 && $installedMinor == 0 && $pkgMajor == 9 && $pkgMinor == 0 ]]; then
+              # Upgrade from 8.0 to 9.0
+              sqlite3 ${dataDir}/seafile.db ".read ${pkgs.seahub}/scripts/upgrade/sql/9.0.0/sqlite3/seafile.sql"
+              echo "${cfg.seafilePackage.version}-sqlite" > "${seafRoot}"/server-setup
+          else
+              echo "Unsupported upgrade" >&2
+              exit 1
+          fi
+        '';
+      };
+
+      seahub = {
+        description = "Seafile Server Web Frontend";
+        wantedBy = [ "seafile.target" ];
+        partOf = [ "seafile.target" ];
+        after = [ "network.target" "seaf-server.service" ];
+        requires = [ "seaf-server.service" ];
+        restartTriggers = [ seahubSettings ];
+        environment = {
+          PYTHONPATH = "${pkgs.seahub.pythonPath}:${pkgs.seahub}/thirdpart:${pkgs.seahub}";
+          DJANGO_SETTINGS_MODULE = "seahub.settings";
+          CCNET_CONF_DIR = ccnetDir;
+          SEAFILE_CONF_DIR = dataDir;
+          SEAFILE_CENTRAL_CONF_DIR = "/etc/seafile";
+          SEAFILE_RPC_PIPE_PATH = "/run/seafile";
+          SEAHUB_LOG_DIR = "/var/log/seafile";
+        };
+        serviceConfig = securityOptions // {
+          User = "seafile";
+          Group = "seafile";
+          DynamicUser = true;
+          RuntimeDirectory = "seahub";
+          StateDirectory = "seafile";
+          LogsDirectory = "seafile";
+          ConfigurationDirectory = "seafile";
+          ExecStart = ''
+            ${pkgs.seahub.python.pkgs.gunicorn}/bin/gunicorn seahub.wsgi:application \
+            --name seahub \
+            --workers ${toString cfg.workers} \
+            --log-level=info \
+            --preload \
+            --timeout=1200 \
+            --limit-request-line=8190 \
+            --bind unix:/run/seahub/gunicorn.sock
+          '';
+        };
+        preStart = ''
+          mkdir -p ${seahubDir}/media
+          # Link all media except avatars
+          for m in `find ${pkgs.seahub}/media/ -maxdepth 1 -not -name "avatars"`; do
+            ln -sf $m ${seahubDir}/media/
+          done
+          if [ ! -e "${seafRoot}/.seahubSecret" ]; then
+              ${pkgs.seahub.python}/bin/python ${pkgs.seahub}/tools/secret_key_generator.py > ${seafRoot}/.seahubSecret
+              chmod 400 ${seafRoot}/.seahubSecret
+          fi
+          if [ ! -f "${seafRoot}/seahub-setup" ]; then
+              # avatars directory should be writable
+              install -D -t ${seahubDir}/media/avatars/ ${pkgs.seahub}/media/avatars/default.png
+              install -D -t ${seahubDir}/media/avatars/groups ${pkgs.seahub}/media/avatars/groups/default.png
+              # init database
+              ${pkgs.seahub}/manage.py migrate
+              # create admin account
+              ${pkgs.expect}/bin/expect -c 'spawn ${pkgs.seahub}/manage.py createsuperuser --email=${cfg.adminEmail}; expect "Password: "; send "${cfg.initialAdminPassword}\r"; expect "Password (again): "; send "${cfg.initialAdminPassword}\r"; expect "Superuser created successfully."'
+              echo "${pkgs.seahub.version}-sqlite" > "${seafRoot}/seahub-setup"
+          fi
+          if [ $(cat "${seafRoot}/seahub-setup" | cut -d"-" -f1) != "${pkgs.seahub.version}" ]; then
+              # update database
+              ${pkgs.seahub}/manage.py migrate
+              echo "${pkgs.seahub.version}-sqlite" > "${seafRoot}/seahub-setup"
+          fi
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/searx.nix b/nixpkgs/nixos/modules/services/networking/searx.nix
new file mode 100644
index 000000000000..8054f01d705f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/searx.nix
@@ -0,0 +1,277 @@
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  runDir = "/run/searx";
+
+  cfg = config.services.searx;
+
+  settingsFile = pkgs.writeText "settings.yml"
+    (builtins.toJSON cfg.settings);
+
+  limiterSettingsFile = (pkgs.formats.toml { }).generate "limiter.toml" cfg.limiterSettings;
+
+  generateConfig = ''
+    cd ${runDir}
+
+    # write NixOS settings as JSON
+    (
+      umask 077
+      cp --no-preserve=mode ${settingsFile} settings.yml
+    )
+
+    # substitute environment variables
+    env -0 | while IFS='=' read -r -d ''' n v; do
+      sed "s#@$n@#$v#g" -i settings.yml
+    done
+  '';
+
+  settingType = with types; (oneOf
+    [ bool int float str
+      (listOf settingType)
+      (attrsOf settingType)
+    ]) // { description = "JSON value"; };
+
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "searx" "configFile" ]
+      [ "services" "searx" "settingsFile" ])
+  ];
+
+  options = {
+    services.searx = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        relatedPackages = [ "searx" ];
+        description = lib.mdDoc "Whether to enable Searx, the meta search engine.";
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Environment file (see `systemd.exec(5)`
+          "EnvironmentFile=" section for the syntax) to define variables for
+          Searx. This option can be used to safely include secret keys into the
+          Searx configuration.
+        '';
+      };
+
+      redisCreateLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Configure a local Redis server for SearXNG. This is required if you
+          want to enable the rate limiter and bot protection of SearXNG.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.attrsOf settingType;
+        default = { };
+        example = literalExpression ''
+          { server.port = 8080;
+            server.bind_address = "0.0.0.0";
+            server.secret_key = "@SEARX_SECRET_KEY@";
+
+            engines = lib.singleton
+              { name = "wolframalpha";
+                shortcut = "wa";
+                api_key = "@WOLFRAM_API_KEY@";
+                engine = "wolframalpha_api";
+              };
+          }
+        '';
+        description = lib.mdDoc ''
+          Searx settings. These will be merged with (taking precedence over)
+          the default configuration. It's also possible to refer to
+          environment variables
+          (defined in [](#opt-services.searx.environmentFile))
+          using the syntax `@VARIABLE_NAME@`.
+
+          ::: {.note}
+          For available settings, see the Searx
+          [docs](https://searx.github.io/searx/admin/settings.html).
+          :::
+        '';
+      };
+
+      settingsFile = mkOption {
+        type = types.path;
+        default = "${runDir}/settings.yml";
+        description = lib.mdDoc ''
+          The path of the Searx server settings.yml file. If no file is
+          specified, a default file is used (default config file has debug mode
+          enabled). Note: setting this options overrides
+          [](#opt-services.searx.settings).
+
+          ::: {.warning}
+          This file, along with any secret key it contains, will be copied
+          into the world-readable Nix store.
+          :::
+        '';
+      };
+
+      limiterSettings = mkOption {
+        type = types.attrsOf settingType;
+        default = { };
+        example = literalExpression ''
+          {
+            real_ip = {
+              x_for = 1;
+              ipv4_prefix = 32;
+              ipv6_prefix = 56;
+            }
+            botdetection.ip_lists.block_ip = [
+              # "93.184.216.34" # example.org
+            ];
+          }
+        '';
+        description = lib.mdDoc ''
+          Limiter settings for SearXNG.
+
+          ::: {.note}
+          For available settings, see the SearXNG
+          [schema file](https://github.com/searxng/searxng/blob/master/searx/botdetection/limiter.toml).
+          :::
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.searxng;
+        defaultText = literalExpression "pkgs.searxng";
+        description = lib.mdDoc "searx package to use.";
+      };
+
+      runInUwsgi = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run searx in uWSGI as a "vassal", instead of using its
+          built-in HTTP server. This is the recommended mode for public or
+          large instances, but is unnecessary for LAN or local-only use.
+
+          ::: {.warning}
+          The built-in HTTP server logs all queries by default.
+          :::
+        '';
+      };
+
+      uwsgiConfig = mkOption {
+        type = options.services.uwsgi.instance.type;
+        default = { http = ":8080"; };
+        example = literalExpression ''
+          {
+            disable-logging = true;
+            http = ":8080";                   # serve via HTTP...
+            socket = "/run/searx/searx.sock"; # ...or UNIX socket
+            chmod-socket = "660";             # allow the searx group to read/write to the socket
+          }
+        '';
+        description = lib.mdDoc ''
+          Additional configuration of the uWSGI vassal running searx. It
+          should notably specify on which interfaces and ports the vassal
+          should listen.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    users.users.searx =
+      { description = "Searx daemon user";
+        group = "searx";
+        isSystemUser = true;
+      };
+
+    users.groups.searx = { };
+
+    systemd.services.searx-init = {
+      description = "Initialise Searx settings";
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = "searx";
+        RuntimeDirectory = "searx";
+        RuntimeDirectoryMode = "750";
+      } // optionalAttrs (cfg.environmentFile != null)
+        { EnvironmentFile = builtins.toPath cfg.environmentFile; };
+      script = generateConfig;
+    };
+
+    systemd.services.searx = mkIf (!cfg.runInUwsgi) {
+      description = "Searx server, the meta search engine.";
+      wantedBy = [ "network.target" "multi-user.target" ];
+      requires = [ "searx-init.service" ];
+      after = [ "searx-init.service" ];
+      serviceConfig = {
+        User  = "searx";
+        Group = "searx";
+        ExecStart = "${cfg.package}/bin/searx-run";
+      } // optionalAttrs (cfg.environmentFile != null)
+        { EnvironmentFile = builtins.toPath cfg.environmentFile; };
+      environment = {
+        SEARX_SETTINGS_PATH = cfg.settingsFile;
+        SEARXNG_SETTINGS_PATH = cfg.settingsFile;
+      };
+    };
+
+    systemd.services.uwsgi = mkIf cfg.runInUwsgi {
+      requires = [ "searx-init.service" ];
+      after = [ "searx-init.service" ];
+    };
+
+    services.searx.settings = {
+      # merge NixOS settings with defaults settings.yml
+      use_default_settings = mkDefault true;
+      redis.url = lib.mkIf cfg.redisCreateLocally "unix://${config.services.redis.servers.searx.unixSocket}";
+    };
+
+    services.uwsgi = mkIf cfg.runInUwsgi {
+      enable = true;
+      plugins = [ "python3" ];
+
+      instance.type = "emperor";
+      instance.vassals.searx = {
+        type = "normal";
+        strict = true;
+        immediate-uid = "searx";
+        immediate-gid = "searx";
+        lazy-apps = true;
+        enable-threads = true;
+        module = "searx.webapp";
+        env = [
+          # TODO: drop this as it is only required for searx
+          "SEARX_SETTINGS_PATH=${cfg.settingsFile}"
+          # searxng compatibility https://github.com/searxng/searxng/issues/1519
+          "SEARXNG_SETTINGS_PATH=${cfg.settingsFile}"
+        ];
+        buffer-size = 32768;
+        pythonPackages = self: [ cfg.package ];
+      } // cfg.uwsgiConfig;
+    };
+
+    services.redis.servers.searx = lib.mkIf cfg.redisCreateLocally {
+      enable = true;
+      user = "searx";
+      port = 0;
+    };
+
+    environment.etc."searxng/limiter.toml" = lib.mkIf (cfg.limiterSettings != { }) {
+      source = limiterSettingsFile;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ rnhmjoj _999eagle ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/shadowsocks.nix b/nixpkgs/nixos/modules/services/networking/shadowsocks.nix
new file mode 100644
index 000000000000..2034dca6f26b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shadowsocks.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.shadowsocks;
+
+  opts = {
+    server = cfg.localAddress;
+    server_port = cfg.port;
+    method = cfg.encryptionMethod;
+    mode = cfg.mode;
+    user = "nobody";
+    fast_open = cfg.fastOpen;
+  } // optionalAttrs (cfg.plugin != null) {
+    plugin = cfg.plugin;
+    plugin_opts = cfg.pluginOpts;
+  } // optionalAttrs (cfg.password != null) {
+    password = cfg.password;
+  } // cfg.extraConfig;
+
+  configFile = pkgs.writeText "shadowsocks.json" (builtins.toJSON opts);
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.shadowsocks = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run shadowsocks-libev shadowsocks server.
+        '';
+      };
+
+      localAddress = mkOption {
+        type = types.coercedTo types.str singleton (types.listOf types.str);
+        default = [ "[::0]" "0.0.0.0" ];
+        description = lib.mdDoc ''
+          Local addresses to which the server binds.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8388;
+        description = lib.mdDoc ''
+          Port which the server uses.
+        '';
+      };
+
+      password = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Password for connecting clients.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Password file with a password for connecting clients.
+        '';
+      };
+
+      mode = mkOption {
+        type = types.enum [ "tcp_only" "tcp_and_udp" "udp_only" ];
+        default = "tcp_and_udp";
+        description = lib.mdDoc ''
+          Relay protocols.
+        '';
+      };
+
+      fastOpen = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          use TCP fast-open
+        '';
+      };
+
+      encryptionMethod = mkOption {
+        type = types.str;
+        default = "chacha20-ietf-poly1305";
+        description = lib.mdDoc ''
+          Encryption method. See <https://github.com/shadowsocks/shadowsocks-org/wiki/AEAD-Ciphers>.
+        '';
+      };
+
+      plugin = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = literalExpression ''"''${pkgs.shadowsocks-v2ray-plugin}/bin/v2ray-plugin"'';
+        description = lib.mdDoc ''
+          SIP003 plugin for shadowsocks
+        '';
+      };
+
+      pluginOpts = mkOption {
+        type = types.str;
+        default = "";
+        example = "server;host=example.com";
+        description = lib.mdDoc ''
+          Options to pass to the plugin if one was specified
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = {};
+        example = {
+          nameserver = "8.8.8.8";
+        };
+        description = lib.mdDoc ''
+          Additional configuration for shadowsocks that is not covered by the
+          provided options. The provided attrset will be serialized to JSON and
+          has to contain valid shadowsocks options. Unfortunately most
+          additional options are undocumented but it's easy to find out what is
+          available by looking into the source code of
+          <https://github.com/shadowsocks/shadowsocks-libev/blob/master/src/jconf.c>
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = singleton
+      { assertion = cfg.password == null || cfg.passwordFile == null;
+        message = "Cannot use both password and passwordFile for shadowsocks-libev";
+      };
+
+    systemd.services.shadowsocks-libev = {
+      description = "shadowsocks-libev Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.shadowsocks-libev ] ++ optional (cfg.plugin != null) cfg.plugin ++ optional (cfg.passwordFile != null) pkgs.jq;
+      serviceConfig.PrivateTmp = true;
+      script = ''
+        ${optionalString (cfg.passwordFile != null) ''
+          cat ${configFile} | jq --arg password "$(cat "${cfg.passwordFile}")" '. + { password: $password }' > /tmp/shadowsocks.json
+        ''}
+        exec ss-server -c ${if cfg.passwordFile != null then "/tmp/shadowsocks.json" else configFile}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/shairport-sync.nix b/nixpkgs/nixos/modules/services/networking/shairport-sync.nix
new file mode 100644
index 000000000000..75684eea3ad1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shairport-sync.nix
@@ -0,0 +1,112 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.shairport-sync;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.shairport-sync = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the shairport-sync daemon.
+
+          Running with a local system-wide or remote pulseaudio server
+          is recommended.
+        '';
+      };
+
+      arguments = mkOption {
+        type = types.str;
+        default = "-v -o pa";
+        description = lib.mdDoc ''
+          Arguments to pass to the daemon. Defaults to a local pulseaudio
+          server.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to automatically open ports in the firewall.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "shairport";
+        description = lib.mdDoc ''
+          User account name under which to run shairport-sync. The account
+          will be created.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "shairport";
+        description = lib.mdDoc ''
+          Group account name under which to run shairport-sync. The account
+          will be created.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.shairport-sync.enable {
+
+    services.avahi.enable = true;
+    services.avahi.publish.enable = true;
+    services.avahi.publish.userServices = true;
+
+    users = {
+      users.${cfg.user} = {
+        description = "Shairport user";
+        isSystemUser = true;
+        createHome = true;
+        home = "/var/lib/shairport-sync";
+        group = cfg.group;
+        extraGroups = [ "audio" ] ++ optional config.hardware.pulseaudio.enable "pulse";
+      };
+      groups.${cfg.group} = {};
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ 5000 ];
+      allowedUDPPortRanges = [ { from = 6001; to = 6011; } ];
+    };
+
+    systemd.services.shairport-sync =
+      {
+        description = "shairport-sync";
+        after = [ "network.target" "avahi-daemon.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${pkgs.shairport-sync}/bin/shairport-sync ${cfg.arguments}";
+          RuntimeDirectory = "shairport-sync";
+        };
+      };
+
+    environment.systemPackages = [ pkgs.shairport-sync ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/shellhub-agent.nix b/nixpkgs/nixos/modules/services/networking/shellhub-agent.nix
new file mode 100644
index 000000000000..7cce23cb9c4e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shellhub-agent.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.shellhub-agent;
+in
+{
+  ###### interface
+
+  options = {
+
+    services.shellhub-agent = {
+
+      enable = mkEnableOption (lib.mdDoc "ShellHub Agent daemon");
+
+      package = mkPackageOptionMD pkgs "shellhub-agent" { };
+
+      preferredHostname = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Set the device preferred hostname. This provides a hint to
+          the server to use this as hostname if it is available.
+        '';
+      };
+
+      keepAliveInterval = mkOption {
+        type = types.int;
+        default = 30;
+        description = lib.mdDoc ''
+          Determine the interval to send the keep alive message to
+          the server. This has a direct impact of the bandwidth
+          used by the device.
+        '';
+      };
+
+      tenantId = mkOption {
+        type = types.str;
+        example = "ba0a880c-2ada-11eb-a35e-17266ef329d6";
+        description = lib.mdDoc ''
+          The tenant ID to use when connecting to the ShellHub
+          Gateway.
+        '';
+      };
+
+      server = mkOption {
+        type = types.str;
+        default = "https://cloud.shellhub.io";
+        description = lib.mdDoc ''
+          Server address of ShellHub Gateway to connect.
+        '';
+      };
+
+      privateKey = mkOption {
+        type = types.path;
+        default = "/var/lib/shellhub-agent/private.key";
+        description = lib.mdDoc ''
+          Location where to store the ShellHub Agent private
+          key.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.shellhub-agent = {
+      description = "ShellHub Agent";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "local-fs.target" ];
+      wants = [ "network-online.target" ];
+      after = [
+        "local-fs.target"
+        "network.target"
+        "network-online.target"
+        "time-sync.target"
+      ];
+
+      environment = {
+        SHELLHUB_SERVER_ADDRESS = cfg.server;
+        SHELLHUB_PRIVATE_KEY = cfg.privateKey;
+        SHELLHUB_TENANT_ID = cfg.tenantId;
+        SHELLHUB_KEEPALIVE_INTERVAL = toString cfg.keepAliveInterval;
+        SHELLHUB_PREFERRED_HOSTNAME = cfg.preferredHostname;
+      };
+
+      serviceConfig = {
+        # The service starts sessions for different users.
+        User = "root";
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/bin/agent";
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/shorewall.nix b/nixpkgs/nixos/modules/services/networking/shorewall.nix
new file mode 100644
index 000000000000..ba59d71120da
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shorewall.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+let
+  types = lib.types;
+  cfg = config.services.shorewall;
+in {
+  options = {
+    services.shorewall = {
+      enable = lib.mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc ''
+          Whether to enable Shorewall IPv4 Firewall.
+
+          ::: {.warning}
+          Enabling this service WILL disable the existing NixOS
+          firewall! Default firewall rules provided by packages are not
+          considered at the moment.
+          :::
+        '';
+      };
+      package = lib.mkOption {
+        type        = types.package;
+        default     = pkgs.shorewall;
+        defaultText = lib.literalExpression "pkgs.shorewall";
+        description = lib.mdDoc "The shorewall package to use.";
+      };
+      configs = lib.mkOption {
+        type        = types.attrsOf types.lines;
+        default     = {};
+        description = lib.mdDoc ''
+          This option defines the Shorewall configs.
+          The attribute name defines the name of the config,
+          and the attribute value defines the content of the config.
+        '';
+        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.firewall.enable = false;
+    systemd.services.shorewall = {
+      description     = "Shorewall IPv4 Firewall";
+      after           = [ "ipset.target" ];
+      before          = [ "network-pre.target" ];
+      wants           = [ "network-pre.target" ];
+      wantedBy        = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      restartTriggers = lib.attrValues cfg.configs;
+      serviceConfig = {
+        Type            = "oneshot";
+        RemainAfterExit = "yes";
+        ExecStart       = "${cfg.package}/bin/shorewall start";
+        ExecReload      = "${cfg.package}/bin/shorewall reload";
+        ExecStop        = "${cfg.package}/bin/shorewall stop";
+      };
+      preStart = ''
+        install -D -d -m 750 /var/lib/shorewall
+        install -D -d -m 755 /var/lock/subsys
+        touch                /var/log/shorewall.log
+        chown 750            /var/log/shorewall.log
+      '';
+    };
+    environment = {
+      etc = lib.mapAttrs' (name: conf: lib.nameValuePair "shorewall/${name}" {source=conf;}) cfg.configs;
+      systemPackages = [ cfg.package ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/shorewall6.nix b/nixpkgs/nixos/modules/services/networking/shorewall6.nix
new file mode 100644
index 000000000000..e54be290bfb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shorewall6.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+let
+  types = lib.types;
+  cfg = config.services.shorewall6;
+in {
+  options = {
+    services.shorewall6 = {
+      enable = lib.mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc ''
+          Whether to enable Shorewall IPv6 Firewall.
+
+          ::: {.warning}
+          Enabling this service WILL disable the existing NixOS
+          firewall! Default firewall rules provided by packages are not
+          considered at the moment.
+          :::
+        '';
+      };
+      package = lib.mkOption {
+        type        = types.package;
+        default     = pkgs.shorewall;
+        defaultText = lib.literalExpression "pkgs.shorewall";
+        description = lib.mdDoc "The shorewall package to use.";
+      };
+      configs = lib.mkOption {
+        type        = types.attrsOf types.lines;
+        default     = {};
+        description = lib.mdDoc ''
+          This option defines the Shorewall configs.
+          The attribute name defines the name of the config,
+          and the attribute value defines the content of the config.
+        '';
+        apply = lib.mapAttrs (name: text: pkgs.writeText "${name}" text);
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.firewall.enable = false;
+    systemd.services.shorewall6 = {
+      description     = "Shorewall IPv6 Firewall";
+      after           = [ "ipset.target" ];
+      before          = [ "network-pre.target" ];
+      wants           = [ "network-pre.target" ];
+      wantedBy        = [ "multi-user.target" ];
+      reloadIfChanged = true;
+      restartTriggers = lib.attrValues cfg.configs;
+      serviceConfig = {
+        Type            = "oneshot";
+        RemainAfterExit = "yes";
+        ExecStart       = "${cfg.package}/bin/shorewall6 start";
+        ExecReload      = "${cfg.package}/bin/shorewall6 reload";
+        ExecStop        = "${cfg.package}/bin/shorewall6 stop";
+      };
+      preStart = ''
+        install -D -d -m 750 /var/lib/shorewall6
+        install -D -d -m 755 /var/lock/subsys
+        touch                /var/log/shorewall6.log
+        chown 750            /var/log/shorewall6.log
+      '';
+    };
+    environment = {
+      etc = lib.mapAttrs' (name: conf: lib.nameValuePair "shorewall6/${name}" {source=conf;}) cfg.configs;
+      systemPackages = [ cfg.package ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/shout.nix b/nixpkgs/nixos/modules/services/networking/shout.nix
new file mode 100644
index 000000000000..0b1687d44d9e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/shout.nix
@@ -0,0 +1,115 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.shout;
+  shoutHome = "/var/lib/shout";
+
+  defaultConfig = pkgs.runCommand "config.js" { preferLocalBuild = true; } ''
+    EDITOR=true ${pkgs.shout}/bin/shout config --home $PWD
+    mv config.js $out
+  '';
+
+  finalConfigFile = if (cfg.configFile != null) then cfg.configFile else ''
+    var _ = require('${pkgs.shout}/lib/node_modules/shout/node_modules/lodash')
+
+    module.exports = _.merge(
+      {},
+      require('${defaultConfig}'),
+      ${builtins.toJSON cfg.config}
+    )
+  '';
+
+in {
+  options.services.shout = {
+    enable = mkEnableOption (lib.mdDoc "Shout web IRC client");
+
+    private = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Make your shout instance private. You will need to configure user
+        accounts by adding entries in {file}`${shoutHome}/users`.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc "IP interface to listen on for http connections.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 9000;
+      description = lib.mdDoc "TCP port to listen on for http connections.";
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = lib.mdDoc ''
+        Contents of Shout's {file}`config.js` file.
+
+        Used for backward compatibility, recommended way is now to use
+        the `config` option.
+
+        Documentation: http://shout-irc.com/docs/server/configuration.html
+      '';
+    };
+
+    config = mkOption {
+      default = {};
+      type = types.attrs;
+      example = {
+        displayNetwork = false;
+        defaults = {
+          name = "Your Network";
+          host = "localhost";
+          port = 6697;
+        };
+      };
+      description = lib.mdDoc ''
+        Shout {file}`config.js` contents as attribute set (will be
+        converted to JSON to generate the configuration file).
+
+        The options defined here will be merged to the default configuration file.
+
+        Documentation: http://shout-irc.com/docs/server/configuration.html
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.shout = {
+      isSystemUser = true;
+      group = "shout";
+      description = "Shout daemon user";
+      home = shoutHome;
+      createHome = true;
+    };
+    users.groups.shout = {};
+
+    systemd.services.shout = {
+      description = "Shout web IRC client";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      preStart = "ln -sf ${pkgs.writeText "config.js" finalConfigFile} ${shoutHome}/config.js";
+      script = concatStringsSep " " [
+        "${pkgs.shout}/bin/shout"
+        (if cfg.private then "--private" else "--public")
+        "--port" (toString cfg.port)
+        "--host" (toString cfg.listenAddress)
+        "--home" shoutHome
+      ];
+      serviceConfig = {
+        User = "shout";
+        ProtectHome = "true";
+        ProtectSystem = "full";
+        PrivateTmp = "true";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/sing-box.nix b/nixpkgs/nixos/modules/services/networking/sing-box.nix
new file mode 100644
index 000000000000..a884bcd271ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/sing-box.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, utils, ... }:
+let
+  cfg = config.services.sing-box;
+  settingsFormat = pkgs.formats.json { };
+in
+{
+
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  options = {
+    services.sing-box = {
+      enable = lib.mkEnableOption (lib.mdDoc "sing-box universal proxy platform");
+
+      package = lib.mkPackageOptionMD pkgs "sing-box" { };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type;
+          options = {
+            route = {
+              geoip.path = lib.mkOption {
+                type = lib.types.path;
+                default = "${pkgs.sing-geoip}/share/sing-box/geoip.db";
+                defaultText = lib.literalExpression "\${pkgs.sing-geoip}/share/sing-box/geoip.db";
+                description = lib.mdDoc ''
+                  The path to the sing-geoip database.
+                '';
+              };
+              geosite.path = lib.mkOption {
+                type = lib.types.path;
+                default = "${pkgs.sing-geosite}/share/sing-box/geosite.db";
+                defaultText = lib.literalExpression "\${pkgs.sing-geosite}/share/sing-box/geosite.db";
+                description = lib.mdDoc ''
+                  The path to the sing-geosite database.
+                '';
+              };
+            };
+          };
+        };
+        default = { };
+        description = lib.mdDoc ''
+          The sing-box configuration, see https://sing-box.sagernet.org/configuration/ for documentation.
+
+          Options containing secret data should be set to an attribute set
+          containing the attribute `_secret` - a string pointing to a file
+          containing the value the option should be set to.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.sing-box = {
+      preStart = ''
+        umask 0077
+        mkdir -p /etc/sing-box
+        ${utils.genJqSecretsReplacementSnippet cfg.settings "/etc/sing-box/config.json"}
+      '';
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/sitespeed-io.nix b/nixpkgs/nixos/modules/services/networking/sitespeed-io.nix
new file mode 100644
index 000000000000..f7eab0bb19d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/sitespeed-io.nix
@@ -0,0 +1,122 @@
+{ lib, config, pkgs, ... }:
+let
+  cfg = config.services.sitespeed-io;
+  format = pkgs.formats.json { };
+in
+{
+  options.services.sitespeed-io = {
+    enable = lib.mkEnableOption (lib.mdDoc "Sitespeed.io");
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "sitespeed-io";
+      description = lib.mdDoc "User account under which sitespeed-io runs.";
+    };
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.sitespeed-io;
+      defaultText = "pkgs.sitespeed-io";
+      description = lib.mdDoc "Sitespeed.io package to use.";
+    };
+
+    dataDir = lib.mkOption {
+      default = "/var/lib/sitespeed-io";
+      type = lib.types.str;
+      description = lib.mdDoc "The base sitespeed-io data directory.";
+    };
+
+    period = lib.mkOption {
+      type = lib.types.str;
+      default = "hourly";
+      description = lib.mdDoc ''
+        Systemd calendar expression when to run. See {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    runs = lib.mkOption {
+      default = [ ];
+      description = lib.mdDoc ''
+        A list of run configurations. The service will call sitespeed-io once
+        for every run listed here. This lets you examine different websites
+        with different sitespeed-io settings.
+      '';
+      type = lib.types.listOf (lib.types.submodule {
+        options = {
+          urls = lib.mkOption {
+            type = with lib.types; listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              URLs the service should monitor.
+            '';
+          };
+
+          settings = lib.mkOption {
+            type = lib.types.submodule {
+              freeformType = format.type;
+              options = { };
+            };
+            default = { };
+            description = lib.mdDoc ''
+              Configuration for sitespeed-io, see
+              <https://www.sitespeed.io/documentation/sitespeed.io/configuration/>
+              for available options. The value here will be directly transformed to
+              JSON and passed as `--config` to the program.
+            '';
+          };
+
+          extraArgs = lib.mkOption {
+            type = with lib.types; listOf str;
+            default = [];
+            description = lib.mdDoc ''
+              Extra command line arguments to pass to the program.
+            '';
+          };
+        };
+      });
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+    {
+      assertion = cfg.runs != [];
+      message = "At least one run must be configured.";
+    }
+    {
+      assertion = lib.all (run: run.urls != []) cfg.runs;
+      message = "All runs must have at least one url configured.";
+    }
+  ];
+
+    systemd.services.sitespeed-io = {
+      description = "Check website status";
+      startAt = cfg.period;
+      serviceConfig = {
+        WorkingDirectory = cfg.dataDir;
+        User = cfg.user;
+      };
+      preStart = "chmod u+w -R ${cfg.dataDir}"; # Make sure things are writable
+      script = (lib.concatMapStrings (run: ''
+        ${lib.getExe cfg.package} \
+          --config ${format.generate "sitespeed.json" run.settings} \
+          ${lib.escapeShellArgs run.extraArgs} \
+          ${builtins.toFile "urls.txt" (lib.concatLines run.urls)} &
+      '') cfg.runs) +
+      ''
+        wait
+      '';
+    };
+
+    users = {
+      extraUsers.${cfg.user} = {
+        isSystemUser = true;
+        group = cfg.user;
+        home = cfg.dataDir;
+        createHome = true;
+        homeMode = "755";
+      };
+      extraGroups.${cfg.user} = { };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/skydns.nix b/nixpkgs/nixos/modules/services/networking/skydns.nix
new file mode 100644
index 000000000000..84cf6b0deac1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/skydns.nix
@@ -0,0 +1,93 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.skydns;
+
+in {
+  options.services.skydns = {
+    enable = mkEnableOption (lib.mdDoc "skydns service");
+
+    etcd = {
+      machines = mkOption {
+        default = [ "http://127.0.0.1:2379" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc "Skydns list of etcd endpoints to connect to.";
+      };
+
+      tlsKey = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc "Skydns path of TLS client certificate - private key.";
+      };
+
+      tlsPem = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc "Skydns path of TLS client certificate - public key.";
+      };
+
+      caCert = mkOption {
+        default = null;
+        type = types.nullOr types.path;
+        description = lib.mdDoc "Skydns path of TLS certificate authority public key.";
+      };
+    };
+
+    address = mkOption {
+      default = "0.0.0.0:53";
+      type = types.str;
+      description = lib.mdDoc "Skydns address to bind to.";
+    };
+
+    domain = mkOption {
+      default = "skydns.local.";
+      type = types.str;
+      description = lib.mdDoc "Skydns default domain if not specified by etcd config.";
+    };
+
+    nameservers = mkOption {
+      default = map (n: n + ":53") config.networking.nameservers;
+      defaultText = literalExpression ''map (n: n + ":53") config.networking.nameservers'';
+      type = types.listOf types.str;
+      description = lib.mdDoc "Skydns list of nameservers to forward DNS requests to when not authoritative for a domain.";
+      example = ["8.8.8.8:53" "8.8.4.4:53"];
+    };
+
+    package = mkOption {
+      default = pkgs.skydns;
+      defaultText = literalExpression "pkgs.skydns";
+      type = types.package;
+      description = lib.mdDoc "Skydns package to use.";
+    };
+
+    extraConfig = mkOption {
+      default = {};
+      type = types.attrsOf types.str;
+      description = lib.mdDoc "Skydns attribute set of extra config options passed as environment variables.";
+    };
+  };
+
+  config = mkIf (cfg.enable) {
+    systemd.services.skydns = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "etcd.service" ];
+      description = "Skydns Service";
+      environment = {
+        ETCD_MACHINES = concatStringsSep "," cfg.etcd.machines;
+        ETCD_TLSKEY = cfg.etcd.tlsKey;
+        ETCD_TLSPEM = cfg.etcd.tlsPem;
+        ETCD_CACERT = cfg.etcd.caCert;
+        SKYDNS_ADDR = cfg.address;
+        SKYDNS_DOMAIN = cfg.domain;
+        SKYDNS_NAMESERVERS = concatStringsSep "," cfg.nameservers;
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/skydns";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/smartdns.nix b/nixpkgs/nixos/modules/services/networking/smartdns.nix
new file mode 100644
index 000000000000..af8ee8b00c0a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/smartdns.nix
@@ -0,0 +1,62 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  inherit (lib.types) attrsOf coercedTo listOf oneOf str int bool;
+  cfg = config.services.smartdns;
+
+  confFile = pkgs.writeText "smartdns.conf" (with generators;
+    toKeyValue {
+      mkKeyValue = mkKeyValueDefault {
+        mkValueString = v:
+          if isBool v then
+            if v then "yes" else "no"
+          else
+            mkValueStringDefault { } v;
+      } " ";
+      listsAsDuplicateKeys =
+        true; # Allowing duplications because we need to deal with multiple entries with the same key.
+    } cfg.settings);
+in {
+  options.services.smartdns = {
+    enable = mkEnableOption (lib.mdDoc "SmartDNS DNS server");
+
+    bindPort = mkOption {
+      type = types.port;
+      default = 53;
+      description = lib.mdDoc "DNS listening port number.";
+    };
+
+    settings = mkOption {
+      type =
+      let atom = oneOf [ str int bool ];
+      in attrsOf (coercedTo atom toList (listOf atom));
+      example = literalExpression ''
+        {
+          bind = ":5353 -no-rule -group example";
+          cache-size = 4096;
+          server-tls = [ "8.8.8.8:853" "1.1.1.1:853" ];
+          server-https = "https://cloudflare-dns.com/dns-query -exclude-default-group";
+          prefetch-domain = true;
+          speed-check-mode = "ping,tcp:80";
+        };
+      '';
+      description = lib.mdDoc ''
+        A set that will be generated into configuration file, see the [SmartDNS README](https://github.com/pymumu/smartdns/blob/master/ReadMe_en.md#configuration-parameter) for details of configuration parameters.
+        You could override the options here like {option}`services.smartdns.bindPort` by writing `settings.bind = ":5353 -no-rule -group example";`.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.smartdns.settings.bind = mkDefault ":${toString cfg.bindPort}";
+
+    systemd.packages = [ pkgs.smartdns ];
+    systemd.services.smartdns.wantedBy = [ "multi-user.target" ];
+    systemd.services.smartdns.restartTriggers = [ confFile ];
+    environment.etc."smartdns/smartdns.conf".source = confFile;
+    environment.etc."default/smartdns".source =
+      "${pkgs.smartdns}/etc/default/smartdns";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/smokeping.nix b/nixpkgs/nixos/modules/services/networking/smokeping.nix
new file mode 100644
index 000000000000..c7aec7d9489f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/smokeping.nix
@@ -0,0 +1,374 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.smokeping;
+  smokepingHome = "/var/lib/smokeping";
+  smokepingPidDir = "/run";
+  configFile =
+    if cfg.config == null
+    then
+      ''
+        *** General ***
+        cgiurl   = ${cfg.cgiUrl}
+        contact = ${cfg.ownerEmail}
+        datadir  = ${smokepingHome}/data
+        imgcache = ${smokepingHome}/cache
+        imgurl   = ${cfg.imgUrl}
+        linkstyle = ${cfg.linkStyle}
+        ${lib.optionalString (cfg.mailHost != "") "mailhost = ${cfg.mailHost}"}
+        owner = ${cfg.owner}
+        pagedir = ${smokepingHome}/cache
+        piddir  = ${smokepingPidDir}
+        ${lib.optionalString (cfg.sendmail != null) "sendmail = ${cfg.sendmail}"}
+        smokemail = ${cfg.smokeMailTemplate}
+        *** Presentation ***
+        template = ${cfg.presentationTemplate}
+        ${cfg.presentationConfig}
+        *** Alerts ***
+        ${cfg.alertConfig}
+        *** Database ***
+        ${cfg.databaseConfig}
+        *** Probes ***
+        ${cfg.probeConfig}
+        *** Targets ***
+        ${cfg.targetConfig}
+        ${cfg.extraConfig}
+      ''
+    else
+      cfg.config;
+
+  configPath = pkgs.writeText "smokeping.conf" configFile;
+  cgiHome = pkgs.writeScript "smokeping.fcgi" ''
+    #!${pkgs.bash}/bin/bash
+    ${cfg.package}/bin/smokeping_cgi /etc/smokeping.conf
+  '';
+in
+
+{
+  options = {
+    services.smokeping = {
+      enable = mkEnableOption (lib.mdDoc "smokeping service");
+
+      alertConfig = mkOption {
+        type = types.lines;
+        default = ''
+          to = root@localhost
+          from = smokeping@localhost
+        '';
+        example = ''
+          to = alertee@address.somewhere
+          from = smokealert@company.xy
+
+          +someloss
+          type = loss
+          # in percent
+          pattern = >0%,*12*,>0%,*12*,>0%
+          comment = loss 3 times  in a row;
+        '';
+        description = lib.mdDoc "Configuration for alerts.";
+      };
+      cgiUrl = mkOption {
+        type = types.str;
+        default = "http://${cfg.hostName}:${toString cfg.port}/smokeping.cgi";
+        defaultText = literalExpression ''"http://''${hostName}:''${toString port}/smokeping.cgi"'';
+        example = "https://somewhere.example.com/smokeping.cgi";
+        description = lib.mdDoc "URL to the smokeping cgi.";
+      };
+      config = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Full smokeping config supplied by the user. Overrides
+          and replaces any other configuration supplied.
+        '';
+      };
+      databaseConfig = mkOption {
+        type = types.lines;
+        default = ''
+          step     = 300
+          pings    = 20
+          # consfn mrhb steps total
+          AVERAGE  0.5   1  1008
+          AVERAGE  0.5  12  4320
+              MIN  0.5  12  4320
+              MAX  0.5  12  4320
+          AVERAGE  0.5 144   720
+              MAX  0.5 144   720
+              MIN  0.5 144   720
+
+        '';
+        example = ''
+          # near constant pings.
+          step     = 30
+          pings    = 20
+          # consfn mrhb steps total
+          AVERAGE  0.5   1  10080
+          AVERAGE  0.5  12  43200
+              MIN  0.5  12  43200
+              MAX  0.5  12  43200
+          AVERAGE  0.5 144   7200
+              MAX  0.5 144   7200
+              MIN  0.5 144   7200
+        '';
+        description = lib.mdDoc ''Configure the ping frequency and retention of the rrd files.
+          Once set, changing the interval will require deletion or migration of all
+          the collected data.'';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Any additional customization not already included.";
+      };
+      hostName = mkOption {
+        type = types.str;
+        default = config.networking.fqdn;
+        defaultText = literalExpression "config.networking.fqdn";
+        example = "somewhere.example.com";
+        description = lib.mdDoc "DNS name for the urls generated in the cgi.";
+      };
+      imgUrl = mkOption {
+        type = types.str;
+        default = "cache";
+        defaultText = literalExpression ''"cache"'';
+        example = "https://somewhere.example.com/cache";
+        description = lib.mdDoc ''
+          Base url for images generated in the cgi.
+
+          The default is a relative URL to ensure it works also when e.g. forwarding
+          the GUI port via SSH.
+        '';
+      };
+      linkStyle = mkOption {
+        type = types.enum [ "original" "absolute" "relative" ];
+        default = "relative";
+        example = "absolute";
+        description = lib.mdDoc "DNS name for the urls generated in the cgi.";
+      };
+      mailHost = mkOption {
+        type = types.str;
+        default = "";
+        example = "localhost";
+        description = lib.mdDoc "Use this SMTP server to send alerts";
+      };
+      owner = mkOption {
+        type = types.str;
+        default = "nobody";
+        example = "Bob Foobawr";
+        description = lib.mdDoc "Real name of the owner of the instance";
+      };
+      ownerEmail = mkOption {
+        type = types.str;
+        default = "no-reply@${cfg.hostName}";
+        defaultText = literalExpression ''"no-reply@''${hostName}"'';
+        example = "no-reply@yourdomain.com";
+        description = lib.mdDoc "Email contact for owner";
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.smokeping;
+        defaultText = literalExpression "pkgs.smokeping";
+        description = lib.mdDoc "Specify a custom smokeping package";
+      };
+      host = mkOption {
+        type = types.nullOr types.str;
+        default = "localhost";
+        example = "192.0.2.1"; # rfc5737 example IP for documentation
+        description = lib.mdDoc ''
+          Host/IP to bind to for the web server.
+
+          Setting it to `null` skips passing the -h option to thttpd,
+          which makes it bind to all interfaces.
+        '';
+      };
+      port = mkOption {
+        type = types.port;
+        default = 8081;
+        description = lib.mdDoc "TCP port to use for the web server.";
+      };
+      presentationConfig = mkOption {
+        type = types.lines;
+        default = ''
+          + charts
+          menu = Charts
+          title = The most interesting destinations
+          ++ stddev
+          sorter = StdDev(entries=>4)
+          title = Top Standard Deviation
+          menu = Std Deviation
+          format = Standard Deviation %f
+          ++ max
+          sorter = Max(entries=>5)
+          title = Top Max Roundtrip Time
+          menu = by Max
+          format = Max Roundtrip Time %f seconds
+          ++ loss
+          sorter = Loss(entries=>5)
+          title = Top Packet Loss
+          menu = Loss
+          format = Packets Lost %f
+          ++ median
+          sorter = Median(entries=>5)
+          title = Top Median Roundtrip Time
+          menu = by Median
+          format = Median RTT %f seconds
+          + overview
+          width = 600
+          height = 50
+          range = 10h
+          + detail
+          width = 600
+          height = 200
+          unison_tolerance = 2
+          "Last 3 Hours"    3h
+          "Last 30 Hours"   30h
+          "Last 10 Days"    10d
+          "Last 360 Days"   360d
+        '';
+        description = lib.mdDoc "presentation graph style";
+      };
+      presentationTemplate = mkOption {
+        type = types.str;
+        default = "${pkgs.smokeping}/etc/basepage.html.dist";
+        defaultText = literalExpression ''"''${pkgs.smokeping}/etc/basepage.html.dist"'';
+        description = lib.mdDoc "Default page layout for the web UI.";
+      };
+      probeConfig = mkOption {
+        type = types.lines;
+        default = ''
+          + FPing
+          binary = ${config.security.wrapperDir}/fping
+        '';
+        defaultText = literalExpression ''
+          '''
+            + FPing
+            binary = ''${config.security.wrapperDir}/fping
+          '''
+        '';
+        description = lib.mdDoc "Probe configuration";
+      };
+      sendmail = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/wrappers/bin/sendmail";
+        description = lib.mdDoc "Use this sendmail compatible script to deliver alerts";
+      };
+      smokeMailTemplate = mkOption {
+        type = types.str;
+        default = "${cfg.package}/etc/smokemail.dist";
+        defaultText = literalExpression ''"''${package}/etc/smokemail.dist"'';
+        description = lib.mdDoc "Specify the smokemail template for alerts.";
+      };
+      targetConfig = mkOption {
+        type = types.lines;
+        default = ''
+          probe = FPing
+          menu = Top
+          title = Network Latency Grapher
+          remark = Welcome to the SmokePing website of xxx Company. \
+                   Here you will learn all about the latency of our network.
+          + Local
+          menu = Local
+          title = Local Network
+          ++ LocalMachine
+          menu = Local Machine
+          title = This host
+          host = localhost
+        '';
+        description = lib.mdDoc "Target configuration";
+      };
+      user = mkOption {
+        type = types.str;
+        default = "smokeping";
+        description = lib.mdDoc "User that runs smokeping and (optionally) thttpd. A group of the same name will be created as well.";
+      };
+      webService = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable a smokeping web interface";
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !(cfg.sendmail != null && cfg.mailHost != "");
+        message = "services.smokeping: sendmail and Mailhost cannot both be enabled.";
+      }
+    ];
+    security.wrappers = {
+      fping =
+        {
+          setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.fping}/bin/fping";
+        };
+    };
+    environment.etc."smokeping.conf".source = configPath;
+    environment.systemPackages = [ pkgs.fping ];
+    users.users.${cfg.user} = {
+      isNormalUser = false;
+      isSystemUser = true;
+      group = cfg.user;
+      description = "smokeping daemon user";
+      home = smokepingHome;
+      createHome = true;
+      # When `cfg.webService` is enabled, `thttpd` makes SmokePing available
+      # under `${cfg.host}:${cfg.port}/smokeping.fcgi` as per the `ln -s` below.
+      # We also want that going to `${cfg.host}:${cfg.port}` without `smokeping.fcgi`
+      # makes it easy for the user to find SmokePing.
+      # However `thttpd` does not seem to support easy redirections from `/` to `smokeping.fcgi`
+      # and only allows directory listings or `/` -> `index.html` resolution if the directory
+      # has `chmod 755` (see https://acme.com/software/thttpd/thttpd_man.html#PERMISSIONS,
+      # " directories should be 755 if you want to allow indexing").
+      # Otherwise it shows `403 Forbidden` on `/`.
+      # Thus, we need to make `smokepingHome` (which is given to `thttpd -d` below) `755`.
+      homeMode = "755";
+    };
+    users.groups.${cfg.user} = { };
+    systemd.services.smokeping = {
+      reloadTriggers = [ configPath ];
+      requiredBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/bin/smokeping --config=/etc/smokeping.conf --nodaemon";
+      };
+      preStart = ''
+        mkdir -m 0755 -p ${smokepingHome}/cache ${smokepingHome}/data
+        ln -snf ${cfg.package}/htdocs/css ${smokepingHome}/css
+        ln -snf ${cfg.package}/htdocs/js ${smokepingHome}/js
+        ln -snf ${cgiHome} ${smokepingHome}/smokeping.fcgi
+        ${cfg.package}/bin/smokeping --check --config=${configPath}
+        ${cfg.package}/bin/smokeping --static --config=${configPath}
+      '';
+    };
+    systemd.services.thttpd = mkIf cfg.webService {
+      requiredBy = [ "multi-user.target" ];
+      requires = [ "smokeping.service" ];
+      path = with pkgs; [ bash rrdtool smokeping thttpd ];
+      serviceConfig = {
+        Restart = "always";
+        ExecStart = lib.concatStringsSep " " (lib.concatLists [
+          [ "${pkgs.thttpd}/bin/thttpd" ]
+          [ "-u ${cfg.user}" ]
+          [ ''-c "**.fcgi"'' ]
+          [ "-d ${smokepingHome}" ]
+          (lib.optional (cfg.host != null) "-h ${cfg.host}")
+          [ "-p ${builtins.toString cfg.port}" ]
+          [ "-D -nos" ]
+        ]);
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [
+    erictapen
+    nh2
+  ];
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/sniproxy.nix b/nixpkgs/nixos/modules/services/networking/sniproxy.nix
new file mode 100644
index 000000000000..b805b7b44d72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/sniproxy.nix
@@ -0,0 +1,88 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.sniproxy;
+
+  configFile = pkgs.writeText "sniproxy.conf" ''
+    user ${cfg.user}
+    pidfile /run/sniproxy.pid
+    ${cfg.config}
+  '';
+
+in
+{
+  imports = [ (mkRemovedOptionModule [ "services" "sniproxy" "logDir" ] "Now done by LogsDirectory=. Set to a custom path if you log to a different folder in your config.") ];
+
+  options = {
+    services.sniproxy = {
+      enable = mkEnableOption (lib.mdDoc "sniproxy server");
+
+      user = mkOption {
+        type = types.str;
+        default = "sniproxy";
+        description = lib.mdDoc "User account under which sniproxy runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "sniproxy";
+        description = lib.mdDoc "Group under which sniproxy runs.";
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "sniproxy.conf configuration excluding the daemon username and pid file.";
+        example = ''
+          error_log {
+            filename /var/log/sniproxy/error.log
+          }
+          access_log {
+            filename /var/log/sniproxy/access.log
+          }
+          listen 443 {
+            proto tls
+          }
+          table {
+            example.com 192.0.2.10
+            example.net 192.0.2.20
+          }
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.sniproxy = {
+      description = "sniproxy server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.sniproxy}/bin/sniproxy -c ${configFile}";
+        LogsDirectory = "sniproxy";
+        LogsDirectoryMode = "0640";
+        Restart = "always";
+      };
+    };
+
+    users.users = mkIf (cfg.user == "sniproxy") {
+      sniproxy = {
+        group = cfg.group;
+        uid = config.ids.uids.sniproxy;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "sniproxy") {
+      sniproxy = {
+        gid = config.ids.gids.sniproxy;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/snowflake-proxy.nix b/nixpkgs/nixos/modules/services/networking/snowflake-proxy.nix
new file mode 100644
index 000000000000..19b68f1e20ba
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/snowflake-proxy.nix
@@ -0,0 +1,81 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.snowflake-proxy;
+in
+{
+  options = {
+    services.snowflake-proxy = {
+      enable = mkEnableOption (lib.mdDoc "snowflake-proxy, a system to defeat internet censorship");
+
+      broker = mkOption {
+        description = lib.mdDoc "Broker URL (default \"https://snowflake-broker.torproject.net/\")";
+        type = with types; nullOr str;
+        default = null;
+      };
+
+      capacity = mkOption {
+        description = lib.mdDoc "Limits the amount of maximum concurrent clients allowed.";
+        type = with types; nullOr int;
+        default = null;
+      };
+
+      relay = mkOption {
+        description = lib.mdDoc "websocket relay URL (default \"wss://snowflake.bamsoftware.com/\")";
+        type = with types; nullOr str;
+        default = null;
+      };
+
+      stun = mkOption {
+        description = lib.mdDoc "STUN broker URL (default \"stun:stun.stunprotocol.org:3478\")";
+        type = with types; nullOr str;
+        default = null;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.snowflake-proxy = {
+      wantedBy = [ "network-online.target" ];
+      serviceConfig = {
+        ExecStart =
+          "${pkgs.snowflake}/bin/proxy " + concatStringsSep " " (
+            optional (cfg.broker != null) "-broker ${cfg.broker}"
+            ++ optional (cfg.capacity != null) "-capacity ${builtins.toString cfg.capacity}"
+            ++ optional (cfg.relay != null) "-relay ${cfg.relay}"
+            ++ optional (cfg.stun != null) "-stun ${cfg.stun}"
+          );
+
+        # Security Hardening
+        # Refer to systemd.exec(5) for option descriptions.
+        CapabilityBoundingSet = "";
+
+        # implies RemoveIPC=, PrivateTmp=, NoNewPrivileges=, RestrictSUIDSGID=,
+        # ProtectSystem=strict, ProtectHome=read-only
+        DynamicUser = true;
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectProc = "invisible";
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ yayayayaka ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/softether.nix b/nixpkgs/nixos/modules/services/networking/softether.nix
new file mode 100644
index 000000000000..c8e888eafcc2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/softether.nix
@@ -0,0 +1,163 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.softether;
+
+  package = cfg.package.override { inherit (cfg) dataDir; };
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.softether = {
+
+      enable = mkEnableOption (lib.mdDoc "SoftEther VPN services");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.softether;
+        defaultText = literalExpression "pkgs.softether";
+        description = lib.mdDoc ''
+          softether derivation to use.
+        '';
+      };
+
+      vpnserver.enable = mkEnableOption (lib.mdDoc "SoftEther VPN Server");
+
+      vpnbridge.enable = mkEnableOption (lib.mdDoc "SoftEther VPN Bridge");
+
+      vpnclient = {
+        enable = mkEnableOption (lib.mdDoc "SoftEther VPN Client");
+        up = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            Shell commands executed when the Virtual Network Adapter(s) is/are starting.
+          '';
+        };
+        down = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            Shell commands executed when the Virtual Network Adapter(s) is/are shutting down.
+          '';
+        };
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/softether";
+        description = lib.mdDoc ''
+          Data directory for SoftEther VPN.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (
+
+    mkMerge [{
+      environment.systemPackages = [ package ];
+
+      systemd.services.softether-init = {
+        description = "SoftEther VPN services initial task";
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = false;
+        };
+        script = ''
+            for d in vpnserver vpnbridge vpnclient vpncmd; do
+                if ! test -e ${cfg.dataDir}/$d; then
+                    ${pkgs.coreutils}/bin/mkdir -m0700 -p ${cfg.dataDir}/$d
+                    install -m0600 ${package}${cfg.dataDir}/$d/hamcore.se2 ${cfg.dataDir}/$d/hamcore.se2
+                fi
+            done
+            rm -rf ${cfg.dataDir}/vpncmd/vpncmd
+            ln -s ${package}${cfg.dataDir}/vpncmd/vpncmd ${cfg.dataDir}/vpncmd/vpncmd
+        '';
+      };
+    }
+
+    (mkIf cfg.vpnserver.enable {
+      systemd.services.vpnserver = {
+        description = "SoftEther VPN Server";
+        after = [ "softether-init.service" ];
+        requires = [ "softether-init.service" ];
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${package}/bin/vpnserver start";
+          ExecStop = "${package}/bin/vpnserver stop";
+        };
+        preStart = ''
+            rm -rf ${cfg.dataDir}/vpnserver/vpnserver
+            ln -s ${package}${cfg.dataDir}/vpnserver/vpnserver ${cfg.dataDir}/vpnserver/vpnserver
+        '';
+        postStop = ''
+            rm -rf ${cfg.dataDir}/vpnserver/vpnserver
+        '';
+      };
+    })
+
+    (mkIf cfg.vpnbridge.enable {
+      systemd.services.vpnbridge = {
+        description = "SoftEther VPN Bridge";
+        after = [ "softether-init.service" ];
+        requires = [ "softether-init.service" ];
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${package}/bin/vpnbridge start";
+          ExecStop = "${package}/bin/vpnbridge stop";
+        };
+        preStart = ''
+            rm -rf ${cfg.dataDir}/vpnbridge/vpnbridge
+            ln -s ${package}${cfg.dataDir}/vpnbridge/vpnbridge ${cfg.dataDir}/vpnbridge/vpnbridge
+        '';
+        postStop = ''
+            rm -rf ${cfg.dataDir}/vpnbridge/vpnbridge
+        '';
+      };
+    })
+
+    (mkIf cfg.vpnclient.enable {
+      systemd.services.vpnclient = {
+        description = "SoftEther VPN Client";
+        after = [ "softether-init.service" ];
+        requires = [ "softether-init.service" ];
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${package}/bin/vpnclient start";
+          ExecStop = "${package}/bin/vpnclient stop";
+        };
+        preStart = ''
+            rm -rf ${cfg.dataDir}/vpnclient/vpnclient
+            ln -s ${package}${cfg.dataDir}/vpnclient/vpnclient ${cfg.dataDir}/vpnclient/vpnclient
+        '';
+        postStart = ''
+            sleep 1
+            ${cfg.vpnclient.up}
+        '';
+        postStop = ''
+            rm -rf ${cfg.dataDir}/vpnclient/vpnclient
+            sleep 1
+            ${cfg.vpnclient.down}
+        '';
+      };
+      boot.kernelModules = [ "tun" ];
+    })
+
+  ]);
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/soju.nix b/nixpkgs/nixos/modules/services/networking/soju.nix
new file mode 100644
index 000000000000..7f0ac3e3b8e6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/soju.nix
@@ -0,0 +1,124 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.soju;
+  stateDir = "/var/lib/soju";
+  listenCfg = concatMapStringsSep "\n" (l: "listen ${l}") cfg.listen;
+  tlsCfg = optionalString (cfg.tlsCertificate != null)
+    "tls ${cfg.tlsCertificate} ${cfg.tlsCertificateKey}";
+  logCfg = optionalString cfg.enableMessageLogging
+    "log fs ${stateDir}/logs";
+
+  configFile = pkgs.writeText "soju.conf" ''
+    ${listenCfg}
+    hostname ${cfg.hostName}
+    ${tlsCfg}
+    db sqlite3 ${stateDir}/soju.db
+    ${logCfg}
+    http-origin ${concatStringsSep " " cfg.httpOrigins}
+    accept-proxy-ip ${concatStringsSep " " cfg.acceptProxyIP}
+
+    ${cfg.extraConfig}
+  '';
+in
+{
+  ###### interface
+
+  options.services.soju = {
+    enable = mkEnableOption (lib.mdDoc "soju");
+
+    listen = mkOption {
+      type = types.listOf types.str;
+      default = [ ":6697" ];
+      description = lib.mdDoc ''
+        Where soju should listen for incoming connections. See the
+        `listen` directive in
+        {manpage}`soju(1)`.
+      '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = config.networking.hostName;
+      defaultText = literalExpression "config.networking.hostName";
+      description = lib.mdDoc "Server hostname.";
+    };
+
+    tlsCertificate = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/var/host.cert";
+      description = lib.mdDoc "Path to server TLS certificate.";
+    };
+
+    tlsCertificateKey = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/var/host.key";
+      description = lib.mdDoc "Path to server TLS certificate key.";
+    };
+
+    enableMessageLogging = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Whether to enable message logging.";
+    };
+
+    httpOrigins = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        List of allowed HTTP origins for WebSocket listeners. The parameters are
+        interpreted as shell patterns, see
+        {manpage}`glob(7)`.
+      '';
+    };
+
+    acceptProxyIP = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Allow the specified IPs to act as a proxy. Proxys have the ability to
+        overwrite the remote and local connection addresses (via the X-Forwarded-\*
+        HTTP header fields). The special name "localhost" accepts the loopback
+        addresses 127.0.0.0/8 and ::1/128. By default, all IPs are rejected.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Lines added verbatim to the configuration file.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.tlsCertificate != null) == (cfg.tlsCertificateKey != null);
+        message = ''
+          services.soju.tlsCertificate and services.soju.tlsCertificateKey
+          must both be specified to enable TLS.
+        '';
+      }
+    ];
+
+    systemd.services.soju = {
+      description = "soju IRC bouncer";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        ExecStart = "${pkgs.soju}/bin/soju -config ${configFile}";
+        StateDirectory = "soju";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ malte-v ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/solanum.nix b/nixpkgs/nixos/modules/services/networking/solanum.nix
new file mode 100644
index 000000000000..07a37279fecc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/solanum.nix
@@ -0,0 +1,109 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption types;
+  inherit (pkgs) solanum util-linux;
+  cfg = config.services.solanum;
+
+  configFile = pkgs.writeText "solanum.conf" cfg.config;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.solanum = {
+
+      enable = mkEnableOption (lib.mdDoc "Solanum IRC daemon");
+
+      config = mkOption {
+        type = types.str;
+        default = ''
+          serverinfo {
+            name = "irc.example.com";
+            sid = "1ix";
+            description = "irc!";
+
+            vhost = "0.0.0.0";
+            vhost6 = "::";
+          };
+
+          listen {
+            host = "0.0.0.0";
+            port = 6667;
+          };
+
+          auth {
+            user = "*@*";
+            class = "users";
+            flags = exceed_limit;
+          };
+          channel {
+            default_split_user_count = 0;
+          };
+        '';
+        description = lib.mdDoc ''
+          Solanum IRC daemon configuration file.
+          check <https://github.com/solanum-ircd/solanum/blob/main/doc/reference.conf> for all options.
+        '';
+      };
+
+      openFilesLimit = mkOption {
+        type = types.int;
+        default = 1024;
+        description = lib.mdDoc ''
+          Maximum number of open files. Limits the clients and server connections.
+        '';
+      };
+
+      motd = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Solanum MOTD text.
+
+          Solanum will read its MOTD from `/etc/solanum/ircd.motd`.
+          If set, the value of this option will be written to this path.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable (lib.mkMerge [
+    {
+
+      environment.etc."solanum/ircd.conf".source = configFile;
+
+      systemd.services.solanum = {
+        description = "Solanum IRC daemon";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        reloadIfChanged = true;
+        restartTriggers = [
+          configFile
+        ];
+        serviceConfig = {
+          ExecStart = "${solanum}/bin/solanum -foreground -logfile /dev/stdout -configfile /etc/solanum/ircd.conf -pidfile /run/solanum/ircd.pid";
+          ExecReload = "${util-linux}/bin/kill -HUP $MAINPID";
+          DynamicUser = true;
+          User = "solanum";
+          StateDirectory = "solanum";
+          RuntimeDirectory = "solanum";
+          LimitNOFILE = "${toString cfg.openFilesLimit}";
+        };
+      };
+
+    }
+
+    (mkIf (cfg.motd != null) {
+      environment.etc."solanum/ircd.motd".text = cfg.motd;
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/networking/spacecookie.nix b/nixpkgs/nixos/modules/services/networking/spacecookie.nix
new file mode 100644
index 000000000000..b2956edfcb7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/spacecookie.nix
@@ -0,0 +1,216 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.spacecookie;
+
+  spacecookieConfig = {
+    listen = {
+      inherit (cfg) port;
+    };
+  } // cfg.settings;
+
+  format = pkgs.formats.json {};
+
+  configFile = format.generate "spacecookie.json" spacecookieConfig;
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "spacecookie" "root" ] [ "services" "spacecookie" "settings" "root" ])
+    (mkRenamedOptionModule [ "services" "spacecookie" "hostname" ] [ "services" "spacecookie" "settings" "hostname" ])
+  ];
+
+  options = {
+
+    services.spacecookie = {
+
+      enable = mkEnableOption (lib.mdDoc "spacecookie");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.spacecookie;
+        defaultText = literalExpression "pkgs.spacecookie";
+        example = literalExpression "pkgs.haskellPackages.spacecookie";
+        description = lib.mdDoc ''
+          The spacecookie derivation to use. This can be used to
+          override the used package or to use another version.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the necessary port in the firewall for spacecookie.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 70;
+        description = lib.mdDoc ''
+          Port the gopher service should be exposed on.
+        '';
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "[::]";
+        description = lib.mdDoc ''
+          Address to listen on. Must be in the
+          `ListenStream=` syntax of
+          [systemd.socket(5)](https://www.freedesktop.org/software/systemd/man/systemd.socket.html).
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = format.type;
+
+          options.hostname = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = lib.mdDoc ''
+              The hostname the service is reachable via. Clients
+              will use this hostname for further requests after
+              loading the initial gopher menu.
+            '';
+          };
+
+          options.root = mkOption {
+            type = types.path;
+            default = "/srv/gopher";
+            description = lib.mdDoc ''
+              The directory spacecookie should serve via gopher.
+              Files in there need to be world-readable since
+              the spacecookie service file sets
+              `DynamicUser=true`.
+            '';
+          };
+
+          options.log = {
+            enable = mkEnableOption (lib.mdDoc "logging for spacecookie")
+              // { default = true; example = false; };
+
+            hide-ips = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                If enabled, spacecookie will hide personal
+                information of users like IP addresses from
+                log output.
+              '';
+            };
+
+            hide-time = mkOption {
+              type = types.bool;
+              # since we are starting with systemd anyways
+              # we deviate from the default behavior here:
+              # journald will add timestamps, so no need
+              # to double up.
+              default = true;
+              description = lib.mdDoc ''
+                If enabled, spacecookie will not print timestamps
+                at the beginning of every log line.
+              '';
+            };
+
+            level = mkOption {
+              type = types.enum [
+                "info"
+                "warn"
+                "error"
+              ];
+              default = "info";
+              description = lib.mdDoc ''
+                Log level for the spacecookie service.
+              '';
+            };
+          };
+        };
+
+        description = lib.mdDoc ''
+          Settings for spacecookie. The settings set here are
+          directly translated to the spacecookie JSON config
+          file. See
+          [spacecookie.json(5)](https://sternenseemann.github.io/spacecookie/spacecookie.json.5.html)
+          for explanations of all options.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !(cfg.settings ? user);
+        message = ''
+          spacecookie is started as a normal user, so the setuid
+          feature doesn't work. If you want to run spacecookie as
+          a specific user, set:
+          systemd.services.spacecookie.serviceConfig = {
+            DynamicUser = false;
+            User = "youruser";
+            Group = "yourgroup";
+          }
+        '';
+      }
+      {
+        assertion = !(cfg.settings ? listen || cfg.settings ? port);
+        message = ''
+          The NixOS spacecookie module uses socket activation,
+          so the listen options have no effect. Use the port
+          and address options in services.spacecookie instead.
+        '';
+      }
+    ];
+
+    systemd.sockets.spacecookie = {
+      description = "Socket for the Spacecookie Gopher Server";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "${cfg.address}:${toString cfg.port}" ];
+      socketConfig = {
+        BindIPv6Only = "both";
+      };
+    };
+
+    systemd.services.spacecookie = {
+      description = "Spacecookie Gopher Server";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "spacecookie.socket" ];
+
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${lib.getBin cfg.package}/bin/spacecookie ${configFile}";
+        FileDescriptorStoreMax = 1;
+
+        DynamicUser = true;
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+
+        # AF_UNIX for communication with systemd
+        # AF_INET replaced by BindIPv6Only=both
+        RestrictAddressFamilies = "AF_UNIX AF_INET6";
+      };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/spiped.nix b/nixpkgs/nixos/modules/services/networking/spiped.nix
new file mode 100644
index 000000000000..547317dbcbe2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/spiped.nix
@@ -0,0 +1,221 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.spiped;
+in
+{
+  options = {
+    services.spiped = {
+      enable = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "Enable the spiped service module.";
+      };
+
+      config = mkOption {
+        type = types.attrsOf (types.submodule (
+          {
+            options = {
+              encrypt = mkOption {
+                type    = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Take unencrypted connections from the
+                  `source` socket and send encrypted
+                  connections to the `target` socket.
+                '';
+              };
+
+              decrypt = mkOption {
+                type    = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Take encrypted connections from the
+                  `source` socket and send unencrypted
+                  connections to the `target` socket.
+                '';
+              };
+
+              source = mkOption {
+                type    = types.str;
+                description = lib.mdDoc ''
+                  Address on which spiped should listen for incoming
+                  connections.  Must be in one of the following formats:
+                  `/absolute/path/to/unix/socket`,
+                  `host.name:port`,
+                  `[ip.v4.ad.dr]:port` or
+                  `[ipv6::addr]:port` - note that
+                  hostnames are resolved when spiped is launched and are
+                  not re-resolved later; thus if DNS entries change
+                  spiped will continue to connect to the expired
+                  address.
+                '';
+              };
+
+              target = mkOption {
+                type    = types.str;
+                description = lib.mdDoc "Address to which spiped should connect.";
+              };
+
+              keyfile = mkOption {
+                type    = types.path;
+                description = lib.mdDoc ''
+                  Name of a file containing the spiped key. As the
+                  daemon runs as the `spiped` user, the
+                  key file must be somewhere owned by that user. By
+                  default, we recommend putting the keys for any spipe
+                  services in `/var/lib/spiped`.
+                '';
+              };
+
+              timeout = mkOption {
+                type = types.int;
+                default = 5;
+                description = lib.mdDoc ''
+                  Timeout, in seconds, after which an attempt to connect to
+                  the target or a protocol handshake will be aborted (and the
+                  connection dropped) if not completed
+                '';
+              };
+
+              maxConns = mkOption {
+                type = types.int;
+                default = 100;
+                description = lib.mdDoc ''
+                  Limit on the number of simultaneous connections allowed.
+                '';
+              };
+
+              waitForDNS = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Wait for DNS. Normally when `spiped` is
+                  launched it resolves addresses and binds to its source
+                  socket before the parent process returns; with this option
+                  it will daemonize first and retry failed DNS lookups until
+                  they succeed. This allows `spiped` to
+                  launch even if DNS isn't set up yet, but at the expense of
+                  losing the guarantee that once `spiped` has
+                  finished launching it will be ready to create pipes.
+                '';
+              };
+
+              disableKeepalives = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc "Disable transport layer keep-alives.";
+              };
+
+              weakHandshake = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc ''
+                  Use fast/weak handshaking: This reduces the CPU time spent
+                  in the initial connection setup, at the expense of losing
+                  perfect forward secrecy.
+                '';
+              };
+
+              resolveRefresh = mkOption {
+                type = types.int;
+                default = 60;
+                description = lib.mdDoc ''
+                  Resolution refresh time for the target socket, in seconds.
+                '';
+              };
+
+              disableReresolution = mkOption {
+                type = types.bool;
+                default = false;
+                description = lib.mdDoc "Disable target address re-resolution.";
+              };
+            };
+          }
+        ));
+
+        default = {};
+
+        example = literalExpression ''
+          {
+            pipe1 =
+              { keyfile = "/var/lib/spiped/pipe1.key";
+                encrypt = true;
+                source  = "localhost:6000";
+                target  = "endpoint.example.com:7000";
+              };
+            pipe2 =
+              { keyfile = "/var/lib/spiped/pipe2.key";
+                decrypt = true;
+                source  = "0.0.0.0:7000";
+                target  = "localhost:3000";
+              };
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Configuration for a secure pipe daemon. The daemon can be
+          started, stopped, or examined using
+          `systemctl`, under the name
+          `spiped@foo`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = mapAttrsToList (name: c: {
+      assertion = (c.encrypt -> !c.decrypt) || (c.decrypt -> c.encrypt);
+      message   = "A pipe must either encrypt or decrypt";
+    }) cfg.config;
+
+    users.groups.spiped.gid = config.ids.gids.spiped;
+    users.users.spiped = {
+      description = "Secure Pipe Service user";
+      group       = "spiped";
+      uid         = config.ids.uids.spiped;
+    };
+
+    systemd.services."spiped@" = {
+      description = "Secure pipe '%i'";
+      after       = [ "network.target" ];
+
+      serviceConfig = {
+        Restart   = "always";
+        User      = "spiped";
+        PermissionsStartOnly = true;
+      };
+
+      preStart  = ''
+        cd /var/lib/spiped
+        chmod -R 0660 *
+        chown -R spiped:spiped *
+      '';
+      scriptArgs = "%i";
+      script = "exec ${pkgs.spiped}/bin/spiped -F `cat /etc/spiped/$1.spec`";
+    };
+
+    systemd.tmpfiles.rules = lib.mkIf (cfg.config != { }) [
+      "d /var/lib/spiped -"
+    ];
+
+    # Setup spiped config files
+    environment.etc = mapAttrs' (name: cfg: nameValuePair "spiped/${name}.spec"
+      { text = concatStringsSep " "
+          [ (if cfg.encrypt then "-e" else "-d")        # Mode
+            "-s ${cfg.source}"                          # Source
+            "-t ${cfg.target}"                          # Target
+            "-k ${cfg.keyfile}"                         # Keyfile
+            "-n ${toString cfg.maxConns}"               # Max number of conns
+            "-o ${toString cfg.timeout}"                # Timeout
+            (optionalString cfg.waitForDNS "-D")        # Wait for DNS
+            (optionalString cfg.weakHandshake "-f")     # No PFS
+            (optionalString cfg.disableKeepalives "-j") # Keepalives
+            (if cfg.disableReresolution then "-R"
+              else "-r ${toString cfg.resolveRefresh}")
+          ];
+      }) cfg.config;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/squid.nix b/nixpkgs/nixos/modules/services/networking/squid.nix
new file mode 100644
index 000000000000..f93bcf19f2b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/squid.nix
@@ -0,0 +1,187 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.squid;
+
+
+  squidConfig = pkgs.writeText "squid.conf"
+    (if cfg.configText != null then cfg.configText else
+    ''
+    #
+    # Recommended minimum configuration (3.5):
+    #
+
+    # Example rule allowing access from your local networks.
+    # Adapt to list your (internal) IP networks from where browsing
+    # should be allowed
+    acl localnet src 10.0.0.0/8     # RFC 1918 possible internal network
+    acl localnet src 172.16.0.0/12  # RFC 1918 possible internal network
+    acl localnet src 192.168.0.0/16 # RFC 1918 possible internal network
+    acl localnet src 169.254.0.0/16 # RFC 3927 link-local (directly plugged) machines
+    acl localnet src fc00::/7       # RFC 4193 local private network range
+    acl localnet src fe80::/10      # RFC 4291 link-local (directly plugged) machines
+
+    acl SSL_ports port 443          # https
+    acl Safe_ports port 80          # http
+    acl Safe_ports port 21          # ftp
+    acl Safe_ports port 443         # https
+    acl Safe_ports port 70          # gopher
+    acl Safe_ports port 210         # wais
+    acl Safe_ports port 1025-65535  # unregistered ports
+    acl Safe_ports port 280         # http-mgmt
+    acl Safe_ports port 488         # gss-http
+    acl Safe_ports port 591         # filemaker
+    acl Safe_ports port 777         # multiling http
+    acl CONNECT method CONNECT
+
+    #
+    # Recommended minimum Access Permission configuration:
+    #
+    # Deny requests to certain unsafe ports
+    http_access deny !Safe_ports
+
+    # Deny CONNECT to other than secure SSL ports
+    http_access deny CONNECT !SSL_ports
+
+    # Only allow cachemgr access from localhost
+    http_access allow localhost manager
+    http_access deny manager
+
+    # We strongly recommend the following be uncommented to protect innocent
+    # web applications running on the proxy server who think the only
+    # one who can access services on "localhost" is a local user
+    http_access deny to_localhost
+
+    # Application logs to syslog, access and store logs have specific files
+    cache_log       syslog
+    access_log      stdio:/var/log/squid/access.log
+    cache_store_log stdio:/var/log/squid/store.log
+
+    # Required by systemd service
+    pid_filename    /run/squid.pid
+
+    # Run as user and group squid
+    cache_effective_user squid squid
+
+    #
+    # INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
+    #
+    ${cfg.extraConfig}
+
+    # Example rule allowing access from your local networks.
+    # Adapt localnet in the ACL section to list your (internal) IP networks
+    # from where browsing should be allowed
+    http_access allow localnet
+    http_access allow localhost
+
+    # And finally deny all other access to this proxy
+    http_access deny all
+
+    # Squid normally listens to port 3128
+    http_port ${
+      optionalString (cfg.proxyAddress != null) "${cfg.proxyAddress}:"
+    }${toString cfg.proxyPort}
+
+    # Leave coredumps in the first cache dir
+    coredump_dir /var/cache/squid
+
+    #
+    # Add any of your own refresh_pattern entries above these.
+    #
+    refresh_pattern ^ftp:           1440    20%     10080
+    refresh_pattern ^gopher:        1440    0%      1440
+    refresh_pattern -i (/cgi-bin/|\?) 0     0%      0
+    refresh_pattern .               0       20%     4320
+  '');
+
+in
+
+{
+
+  options = {
+
+    services.squid = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to run squid web proxy.";
+      };
+
+      package = mkOption {
+        default = pkgs.squid;
+        defaultText = literalExpression "pkgs.squid";
+        type = types.package;
+        description = lib.mdDoc "Squid package to use.";
+      };
+
+      proxyAddress = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "IP address on which squid will listen.";
+      };
+
+      proxyPort = mkOption {
+        type = types.int;
+        default = 3128;
+        description = lib.mdDoc "TCP port on which squid will listen.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Squid configuration. Contents will be added
+          verbatim to the configuration file.
+        '';
+      };
+
+      configText = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Verbatim contents of squid.conf. If null (default), use the
+          autogenerated file from NixOS instead.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.squid = {
+      isSystemUser = true;
+      group = "squid";
+      home = "/var/cache/squid";
+      createHome = true;
+    };
+
+    users.groups.squid = {};
+
+    systemd.services.squid = {
+      description = "Squid caching proxy";
+      documentation = [ "man:squid(8)" ];
+      after = [ "network.target" "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target"];
+      preStart = ''
+        mkdir -p "/var/log/squid"
+        chown squid:squid "/var/log/squid"
+        ${cfg.package}/bin/squid --foreground -z -f ${squidConfig}
+      '';
+      serviceConfig = {
+        PIDFile="/run/squid.pid";
+        ExecStart  = "${cfg.package}/bin/squid --foreground -YCs -f ${squidConfig}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        KillMode="mixed";
+        NotifyAccess="all";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ssh/lshd.nix b/nixpkgs/nixos/modules/services/networking/ssh/lshd.nix
new file mode 100644
index 000000000000..129e42055514
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ssh/lshd.nix
@@ -0,0 +1,187 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) lsh;
+
+  cfg = config.services.lshd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.lshd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the GNU lshd SSH2 daemon, which allows
+          secure remote login.
+        '';
+      };
+
+      portNumber = mkOption {
+        default = 22;
+        type = types.port;
+        description = lib.mdDoc ''
+          The port on which to listen for connections.
+        '';
+      };
+
+      interfaces = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of network interfaces where listening for connections.
+          When providing the empty list, `[]`, lshd listens on all
+          network interfaces.
+        '';
+        example = [ "localhost" "1.2.3.4:443" ];
+      };
+
+      hostKey = mkOption {
+        default = "/etc/lsh/host-key";
+        type = types.str;
+        description = lib.mdDoc ''
+          Path to the server's private key.  Note that this key must
+          have been created, e.g., using "lsh-keygen --server |
+          lsh-writekey --server", so that you can run lshd.
+        '';
+      };
+
+      syslog = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable syslog output.";
+      };
+
+      passwordAuthentication = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable password authentication.";
+      };
+
+      publicKeyAuthentication = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable public key authentication.";
+      };
+
+      rootLogin = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable remote root login.";
+      };
+
+      loginShell = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          If non-null, override the default login shell with the
+          specified value.
+        '';
+        example = "/nix/store/xyz-bash-10.0/bin/bash10";
+      };
+
+      srpKeyExchange = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable SRP key exchange and user authentication.
+        '';
+      };
+
+      tcpForwarding = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable TCP/IP forwarding.";
+      };
+
+      x11Forwarding = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable X11 forwarding.";
+      };
+
+      subsystems = mkOption {
+        type = types.listOf types.path;
+        description = lib.mdDoc ''
+          List of subsystem-path pairs, where the head of the pair
+          denotes the subsystem name, and the tail denotes the path to
+          an executable implementing it.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.lshd.subsystems = [ ["sftp" "${pkgs.lsh}/sbin/sftp-server"] ];
+
+    systemd.services.lshd = {
+      description = "GNU lshd SSH2 daemon";
+
+      after = [ "network.target" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        LD_LIBRARY_PATH = config.system.nssModules.path;
+      };
+
+      preStart = ''
+        test -d /etc/lsh || mkdir -m 0755 -p /etc/lsh
+        test -d /var/spool/lsh || mkdir -m 0755 -p /var/spool/lsh
+
+        if ! test -f /var/spool/lsh/yarrow-seed-file
+        then
+            # XXX: It would be nice to provide feedback to the
+            # user when this fails, so that they can retry it
+            # manually.
+            ${lsh}/bin/lsh-make-seed --sloppy \
+               -o /var/spool/lsh/yarrow-seed-file
+        fi
+
+        if ! test -f "${cfg.hostKey}"
+        then
+            ${lsh}/bin/lsh-keygen --server | \
+            ${lsh}/bin/lsh-writekey --server -o "${cfg.hostKey}"
+        fi
+      '';
+
+      script = with cfg; ''
+        ${lsh}/sbin/lshd --daemonic \
+          --password-helper="${lsh}/sbin/lsh-pam-checkpw" \
+          -p ${toString portNumber} \
+          ${optionalString (interfaces != []) (concatStrings (map (i: "--interface=\"${i}\"") interfaces))} \
+          -h "${hostKey}" \
+          ${optionalString (!syslog) "--no-syslog" } \
+          ${if passwordAuthentication then "--password" else "--no-password" } \
+          ${if publicKeyAuthentication then "--publickey" else "--no-publickey" } \
+          ${if rootLogin then "--root-login" else "--no-root-login" } \
+          ${optionalString (loginShell != null) "--login-shell=\"${loginShell}\"" } \
+          ${if srpKeyExchange then "--srp-keyexchange" else "--no-srp-keyexchange" } \
+          ${if !tcpForwarding then "--no-tcpip-forward" else "--tcpip-forward"} \
+          ${if x11Forwarding then "--x11-forward" else "--no-x11-forward" } \
+          --subsystems=${concatStringsSep ","
+                                          (map (pair: (head pair) + "=" +
+                                                      (head (tail pair)))
+                                               subsystems)}
+      '';
+    };
+
+    security.pam.services.lshd = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix b/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix
new file mode 100644
index 000000000000..14bc59089adf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ssh/sshd.nix
@@ -0,0 +1,709 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # The splicing information needed for nativeBuildInputs isn't available
+  # on the derivations likely to be used as `cfgc.package`.
+  # This middle-ground solution ensures *an* sshd can do their basic validation
+  # on the configuration.
+  validationPackage = if pkgs.stdenv.buildPlatform == pkgs.stdenv.hostPlatform
+    then cfgc.package
+    else pkgs.buildPackages.openssh;
+
+  # dont use the "=" operator
+  settingsFormat =
+    let
+      # reports boolean as yes / no
+      mkValueString = with lib; v:
+            if isInt           v then toString v
+            else if isString   v then v
+            else if true  ==   v then "yes"
+            else if false ==   v then "no"
+            else throw "unsupported type ${builtins.typeOf v}: ${(lib.generators.toPretty {}) v}";
+
+      base = pkgs.formats.keyValue {
+        mkKeyValue = lib.generators.mkKeyValueDefault { inherit mkValueString; } " ";
+      };
+      # OpenSSH is very inconsistent with options that can take multiple values.
+      # For some of them, they can simply appear multiple times and are appended, for others the
+      # values must be separated by whitespace or even commas.
+      # Consult either sshd_config(5) or, as last resort, the OpehSSH source for parsing
+      # the options at servconf.c:process_server_config_line_depth() to determine the right "mode"
+      # for each. But fortunaly this fact is documented for most of them in the manpage.
+      commaSeparated = [ "Ciphers" "KexAlgorithms" "Macs" ];
+      spaceSeparated = [ "AuthorizedKeysFile" "AllowGroups" "AllowUsers" "DenyGroups" "DenyUsers" ];
+    in {
+      inherit (base) type;
+      generate = name: value:
+        let transformedValue = mapAttrs (key: val:
+          if isList val then
+            if elem key commaSeparated then concatStringsSep "," val
+            else if elem key spaceSeparated then concatStringsSep " " val
+            else throw "list value for unknown key ${key}: ${(lib.generators.toPretty {}) val}"
+          else
+            val
+          ) value;
+        in
+          base.generate name transformedValue;
+    };
+
+  configFile = settingsFormat.generate "sshd.conf-settings" (filterAttrs (n: v: v != null) cfg.settings);
+  sshconf = pkgs.runCommand "sshd.conf-final" { } ''
+    cat ${configFile} - >$out <<EOL
+    ${cfg.extraConfig}
+    EOL
+  '';
+
+  cfg  = config.services.openssh;
+  cfgc = config.programs.ssh;
+
+
+  nssModulesPath = config.system.nssModules.path;
+
+  userOptions = {
+
+    options.openssh.authorizedKeys = {
+      keys = mkOption {
+        type = types.listOf types.singleLineStr;
+        default = [];
+        description = lib.mdDoc ''
+          A list of verbatim OpenSSH public keys that should be added to the
+          user's authorized keys. The keys are added to a file that the SSH
+          daemon reads in addition to the the user's authorized_keys file.
+          You can combine the `keys` and
+          `keyFiles` options.
+          Warning: If you are using `NixOps` then don't use this
+          option since it will replace the key required for deployment via ssh.
+        '';
+        example = [
+          "ssh-rsa AAAAB3NzaC1yc2etc/etc/etcjwrsh8e596z6J0l7 example@host"
+          "ssh-ed25519 AAAAC3NzaCetcetera/etceteraJZMfk3QPfQ foo@bar"
+        ];
+      };
+
+      keyFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of files each containing one OpenSSH public key that should be
+          added to the user's authorized keys. The contents of the files are
+          read at build time and added to a file that the SSH daemon reads in
+          addition to the the user's authorized_keys file. You can combine the
+          `keyFiles` and `keys` options.
+        '';
+      };
+    };
+
+    options.openssh.authorizedPrincipals = mkOption {
+      type = with types; listOf types.singleLineStr;
+      default = [];
+      description = mdDoc ''
+        A list of verbatim principal names that should be added to the user's
+        authorized principals.
+      '';
+      example = [
+        "example@host"
+        "foo@bar"
+      ];
+    };
+
+  };
+
+  authKeysFiles = let
+    mkAuthKeyFile = u: nameValuePair "ssh/authorized_keys.d/${u.name}" {
+      mode = "0444";
+      source = pkgs.writeText "${u.name}-authorized_keys" ''
+        ${concatStringsSep "\n" u.openssh.authorizedKeys.keys}
+        ${concatMapStrings (f: readFile f + "\n") u.openssh.authorizedKeys.keyFiles}
+      '';
+    };
+    usersWithKeys = attrValues (flip filterAttrs config.users.users (n: u:
+      length u.openssh.authorizedKeys.keys != 0 || length u.openssh.authorizedKeys.keyFiles != 0
+    ));
+  in listToAttrs (map mkAuthKeyFile usersWithKeys);
+
+  authPrincipalsFiles = let
+    mkAuthPrincipalsFile = u: nameValuePair "ssh/authorized_principals.d/${u.name}" {
+      mode = "0444";
+      text = concatStringsSep "\n" u.openssh.authorizedPrincipals;
+    };
+    usersWithPrincipals = attrValues (flip filterAttrs config.users.users (n: u:
+      length u.openssh.authorizedPrincipals != 0
+    ));
+  in listToAttrs (map mkAuthPrincipalsFile usersWithPrincipals);
+
+in
+
+{
+  imports = [
+    (mkAliasOptionModuleMD [ "services" "sshd" "enable" ] [ "services" "openssh" "enable" ])
+    (mkAliasOptionModuleMD [ "services" "openssh" "knownHosts" ] [ "programs" "ssh" "knownHosts" ])
+    (mkRenamedOptionModule [ "services" "openssh" "challengeResponseAuthentication" ] [ "services" "openssh" "kbdInteractiveAuthentication" ])
+
+    (mkRenamedOptionModule [ "services" "openssh" "kbdInteractiveAuthentication" ] [  "services" "openssh" "settings" "KbdInteractiveAuthentication" ])
+    (mkRenamedOptionModule [ "services" "openssh" "passwordAuthentication" ] [  "services" "openssh" "settings" "PasswordAuthentication" ])
+    (mkRenamedOptionModule [ "services" "openssh" "useDns" ] [  "services" "openssh" "settings" "UseDns" ])
+    (mkRenamedOptionModule [ "services" "openssh" "permitRootLogin" ] [  "services" "openssh" "settings" "PermitRootLogin" ])
+    (mkRenamedOptionModule [ "services" "openssh" "logLevel" ] [  "services" "openssh" "settings" "LogLevel" ])
+    (mkRenamedOptionModule [ "services" "openssh" "macs" ] [  "services" "openssh" "settings" "Macs" ])
+    (mkRenamedOptionModule [ "services" "openssh" "ciphers" ] [  "services" "openssh" "settings" "Ciphers" ])
+    (mkRenamedOptionModule [ "services" "openssh" "kexAlgorithms" ] [  "services" "openssh" "settings" "KexAlgorithms" ])
+    (mkRenamedOptionModule [ "services" "openssh" "gatewayPorts" ] [  "services" "openssh" "settings" "GatewayPorts" ])
+    (mkRenamedOptionModule [ "services" "openssh" "forwardX11" ] [  "services" "openssh" "settings" "X11Forwarding" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    services.openssh = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the OpenSSH secure shell daemon, which
+          allows secure remote logins.
+        '';
+      };
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, {command}`sshd` is socket-activated; that
+          is, instead of having it permanently running as a daemon,
+          systemd will start an instance for each incoming connection.
+        '';
+      };
+
+      allowSFTP = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the SFTP subsystem in the SSH daemon.  This
+          enables the use of commands such as {command}`sftp` and
+          {command}`sshfs`.
+        '';
+      };
+
+      sftpServerExecutable = mkOption {
+        type = types.str;
+        example = "internal-sftp";
+        description = lib.mdDoc ''
+          The sftp server executable.  Can be a path or "internal-sftp" to use
+          the sftp server built into the sshd binary.
+        '';
+      };
+
+      sftpFlags = mkOption {
+        type = with types; listOf str;
+        default = [];
+        example = [ "-f AUTHPRIV" "-l INFO" ];
+        description = lib.mdDoc ''
+          Commandline flags to add to sftp-server.
+        '';
+      };
+
+      ports = mkOption {
+        type = types.listOf types.port;
+        default = [22];
+        description = lib.mdDoc ''
+          Specifies on which ports the SSH daemon listens.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to automatically open the specified ports in the firewall.
+        '';
+      };
+
+      listenAddresses = mkOption {
+        type = with types; listOf (submodule {
+          options = {
+            addr = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                Host, IPv4 or IPv6 address to listen to.
+              '';
+            };
+            port = mkOption {
+              type = types.nullOr types.int;
+              default = null;
+              description = lib.mdDoc ''
+                Port to listen to.
+              '';
+            };
+          };
+        });
+        default = [];
+        example = [ { addr = "192.168.3.1"; port = 22; } { addr = "0.0.0.0"; port = 64022; } ];
+        description = lib.mdDoc ''
+          List of addresses and ports to listen on (ListenAddress directive
+          in config). If port is not specified for address sshd will listen
+          on all ports specified by `ports` option.
+          NOTE: this will override default listening on all local addresses and port 22.
+          NOTE: setting this option won't automatically enable given ports
+          in firewall configuration.
+        '';
+      };
+
+      hostKeys = mkOption {
+        type = types.listOf types.attrs;
+        default =
+          [ { type = "rsa"; bits = 4096; path = "/etc/ssh/ssh_host_rsa_key"; }
+            { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; }
+          ];
+        example =
+          [ { type = "rsa"; bits = 4096; path = "/etc/ssh/ssh_host_rsa_key"; rounds = 100; openSSHFormat = true; }
+            { type = "ed25519"; path = "/etc/ssh/ssh_host_ed25519_key"; rounds = 100; comment = "key comment"; }
+          ];
+        description = lib.mdDoc ''
+          NixOS can automatically generate SSH host keys.  This option
+          specifies the path, type and size of each key.  See
+          {manpage}`ssh-keygen(1)` for supported types
+          and sizes.
+        '';
+      };
+
+      banner = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Message to display to the remote user before authentication is allowed.
+        '';
+      };
+
+      authorizedKeysFiles = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Specify the rules for which files to read on the host.
+
+          This is an advanced option. If you're looking to configure user
+          keys, you can generally use [](#opt-users.users._name_.openssh.authorizedKeys.keys)
+          or [](#opt-users.users._name_.openssh.authorizedKeys.keyFiles).
+
+          These are paths relative to the host root file system or home
+          directories and they are subject to certain token expansion rules.
+          See AuthorizedKeysFile in man sshd_config for details.
+        '';
+      };
+
+      authorizedKeysCommand = mkOption {
+        type = types.str;
+        default = "none";
+        description = lib.mdDoc ''
+          Specifies a program to be used to look up the user's public
+          keys. The program must be owned by root, not writable by group
+          or others and specified by an absolute path.
+        '';
+      };
+
+      authorizedKeysCommandUser = mkOption {
+        type = types.str;
+        default = "nobody";
+        description = lib.mdDoc ''
+          Specifies the user under whose account the AuthorizedKeysCommand
+          is run. It is recommended to use a dedicated user that has no
+          other role on the host than running authorized keys commands.
+        '';
+      };
+
+
+
+      settings = mkOption {
+        description = lib.mdDoc "Configuration for `sshd_config(5)`.";
+        default = { };
+        example = literalExpression ''
+          {
+            UseDns = true;
+            PasswordAuthentication = false;
+          }
+        '';
+        type = types.submodule ({name, ...}: {
+          freeformType = settingsFormat.type;
+          options = {
+            AuthorizedPrincipalsFile = mkOption {
+              type = types.str;
+              default = "none"; # upstream default
+              description = lib.mdDoc ''
+                Specifies a file that lists principal names that are accepted for certificate authentication. The default
+                is `"none"`, i.e. not to use	a principals file.
+              '';
+            };
+            LogLevel = mkOption {
+              type = types.enum [ "QUIET" "FATAL" "ERROR" "INFO" "VERBOSE" "DEBUG" "DEBUG1" "DEBUG2" "DEBUG3" ];
+              default = "INFO"; # upstream default
+              description = lib.mdDoc ''
+                Gives the verbosity level that is used when logging messages from sshd(8). Logging with a DEBUG level
+                violates the privacy of users and is not recommended.
+              '';
+            };
+            UseDns = mkOption {
+              type = types.bool;
+              # apply if cfg.useDns then "yes" else "no"
+              default = false;
+              description = lib.mdDoc ''
+                Specifies whether sshd(8) should look up the remote host name, and to check that the resolved host name for
+                the remote IP address maps back to the very same IP address.
+                If this option is set to no (the default) then only addresses and not host names may be used in
+                ~/.ssh/authorized_keys from and sshd_config Match Host directives.
+              '';
+            };
+            X11Forwarding = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether to allow X11 connections to be forwarded.
+              '';
+            };
+            PasswordAuthentication = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Specifies whether password authentication is allowed.
+              '';
+            };
+            PermitRootLogin = mkOption {
+              default = "prohibit-password";
+              type = types.enum ["yes" "without-password" "prohibit-password" "forced-commands-only" "no"];
+              description = lib.mdDoc ''
+                Whether the root user can login using ssh.
+              '';
+            };
+            KbdInteractiveAuthentication = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Specifies whether keyboard-interactive authentication is allowed.
+              '';
+            };
+            GatewayPorts = mkOption {
+              type = types.str;
+              default = "no";
+              description = lib.mdDoc ''
+                Specifies whether remote hosts are allowed to connect to
+                ports forwarded for the client.  See
+                {manpage}`sshd_config(5)`.
+              '';
+            };
+            KexAlgorithms = mkOption {
+              type = types.listOf types.str;
+              default = [
+                "sntrup761x25519-sha512@openssh.com"
+                "curve25519-sha256"
+                "curve25519-sha256@libssh.org"
+                "diffie-hellman-group-exchange-sha256"
+              ];
+              description = lib.mdDoc ''
+                Allowed key exchange algorithms
+
+                Uses the lower bound recommended in both
+                <https://stribika.github.io/2015/01/04/secure-secure-shell.html>
+                and
+                <https://infosec.mozilla.org/guidelines/openssh#modern-openssh-67>
+              '';
+            };
+            Macs = mkOption {
+              type = types.listOf types.str;
+              default = [
+                "hmac-sha2-512-etm@openssh.com"
+                "hmac-sha2-256-etm@openssh.com"
+                "umac-128-etm@openssh.com"
+              ];
+              description = lib.mdDoc ''
+                Allowed MACs
+
+                Defaults to recommended settings from both
+                <https://stribika.github.io/2015/01/04/secure-secure-shell.html>
+                and
+                <https://infosec.mozilla.org/guidelines/openssh#modern-openssh-67>
+              '';
+            };
+            StrictModes = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether sshd should check file modes and ownership of directories
+              '';
+            };
+            Ciphers = mkOption {
+              type = types.listOf types.str;
+              default = [
+                "chacha20-poly1305@openssh.com"
+                "aes256-gcm@openssh.com"
+                "aes128-gcm@openssh.com"
+                "aes256-ctr"
+                "aes192-ctr"
+                "aes128-ctr"
+              ];
+              description = lib.mdDoc ''
+                Allowed ciphers
+
+                Defaults to recommended settings from both
+                <https://stribika.github.io/2015/01/04/secure-secure-shell.html>
+                and
+                <https://infosec.mozilla.org/guidelines/openssh#modern-openssh-67>
+              '';
+            };
+            AllowUsers = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                If specified, login is allowed only for the listed users.
+                See {manpage}`sshd_config(5)` for details.
+              '';
+            };
+            DenyUsers = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                If specified, login is denied for all listed users. Takes
+                precedence over [](#opt-services.openssh.settings.AllowUsers).
+                See {manpage}`sshd_config(5)` for details.
+              '';
+            };
+            AllowGroups = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                If specified, login is allowed only for users part of the
+                listed groups.
+                See {manpage}`sshd_config(5)` for details.
+              '';
+            };
+            DenyGroups = mkOption {
+              type = with types; nullOr (listOf str);
+              default = null;
+              description = lib.mdDoc ''
+                If specified, login is denied for all users part of the listed
+                groups. Takes precedence over
+                [](#opt-services.openssh.settings.AllowGroups). See
+                {manpage}`sshd_config(5)` for details.
+              '';
+            };
+          };
+        });
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Verbatim contents of {file}`sshd_config`.";
+      };
+
+      moduliFile = mkOption {
+        example = "/etc/my-local-ssh-moduli;";
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to `moduli` file to install in
+          `/etc/ssh/moduli`. If this option is unset, then
+          the `moduli` file shipped with OpenSSH will be used.
+        '';
+      };
+    };
+
+    users.users = mkOption {
+      type = with types; attrsOf (submodule userOptions);
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users = {
+      sshd = {
+        isSystemUser = true;
+        group = "sshd";
+        description = "SSH privilege separation user";
+      };
+    } // (optionalAttrs (cfg.authorizedKeysCommand != null) {
+      ${cfg.authorizedKeysCommandUser} = {};
+    });
+    users.groups.sshd = {};
+
+    services.openssh.moduliFile = mkDefault "${cfgc.package}/etc/ssh/moduli";
+    services.openssh.sftpServerExecutable = mkDefault "${cfgc.package}/libexec/sftp-server";
+
+    environment.etc = authKeysFiles // authPrincipalsFiles //
+      { "ssh/moduli".source = cfg.moduliFile;
+        "ssh/sshd_config".source = sshconf;
+      };
+
+    systemd =
+      let
+        service =
+          { description = "SSH Daemon";
+            wantedBy = optional (!cfg.startWhenNeeded) "multi-user.target";
+            after = [ "network.target" ];
+            stopIfChanged = false;
+            path = [ cfgc.package pkgs.gawk ];
+            environment.LD_LIBRARY_PATH = nssModulesPath;
+
+            restartTriggers = optionals (!cfg.startWhenNeeded) [
+              config.environment.etc."ssh/sshd_config".source
+            ];
+
+            preStart =
+              ''
+                # Make sure we don't write to stdout, since in case of
+                # socket activation, it goes to the remote side (#19589).
+                exec >&2
+
+                ${flip concatMapStrings cfg.hostKeys (k: ''
+                  if ! [ -s "${k.path}" ]; then
+                      if ! [ -h "${k.path}" ]; then
+                          rm -f "${k.path}"
+                      fi
+                      mkdir -m 0755 -p "$(dirname '${k.path}')"
+                      ssh-keygen \
+                        -t "${k.type}" \
+                        ${optionalString (k ? bits) "-b ${toString k.bits}"} \
+                        ${optionalString (k ? rounds) "-a ${toString k.rounds}"} \
+                        ${optionalString (k ? comment) "-C '${k.comment}'"} \
+                        ${optionalString (k ? openSSHFormat && k.openSSHFormat) "-o"} \
+                        -f "${k.path}" \
+                        -N ""
+                  fi
+                '')}
+              '';
+
+            serviceConfig =
+              { ExecStart =
+                  (optionalString cfg.startWhenNeeded "-") +
+                  "${cfgc.package}/bin/sshd " + (optionalString cfg.startWhenNeeded "-i ") +
+                  "-D " +  # don't detach into a daemon process
+                  "-f /etc/ssh/sshd_config";
+                KillMode = "process";
+              } // (if cfg.startWhenNeeded then {
+                StandardInput = "socket";
+                StandardError = "journal";
+              } else {
+                Restart = "always";
+                Type = "simple";
+              });
+
+          };
+      in
+
+      if cfg.startWhenNeeded then {
+
+        sockets.sshd =
+          { description = "SSH Socket";
+            wantedBy = [ "sockets.target" ];
+            socketConfig.ListenStream = if cfg.listenAddresses != [] then
+              map (l: "${l.addr}:${toString (if l.port != null then l.port else 22)}") cfg.listenAddresses
+            else
+              cfg.ports;
+            socketConfig.Accept = true;
+            # Prevent brute-force attacks from shutting down socket
+            socketConfig.TriggerLimitIntervalSec = 0;
+          };
+
+        services."sshd@" = service;
+
+      } else {
+
+        services.sshd = service;
+
+      };
+
+    networking.firewall.allowedTCPPorts = optionals cfg.openFirewall cfg.ports;
+
+    security.pam.services.sshd =
+      { startSession = true;
+        showMotd = true;
+        unixAuth = cfg.settings.PasswordAuthentication;
+      };
+
+    # These values are merged with the ones defined externally, see:
+    # https://github.com/NixOS/nixpkgs/pull/10155
+    # https://github.com/NixOS/nixpkgs/pull/41745
+    services.openssh.authorizedKeysFiles =
+      [ "%h/.ssh/authorized_keys" "/etc/ssh/authorized_keys.d/%u" ];
+
+    services.openssh.settings.AuthorizedPrincipalsFile = mkIf (authPrincipalsFiles != {}) "/etc/ssh/authorized_principals.d/%u";
+
+    services.openssh.extraConfig = mkOrder 0
+      ''
+        UsePAM yes
+
+        Banner ${if cfg.banner == null then "none" else pkgs.writeText "ssh_banner" cfg.banner}
+
+        AddressFamily ${if config.networking.enableIPv6 then "any" else "inet"}
+        ${concatMapStrings (port: ''
+          Port ${toString port}
+        '') cfg.ports}
+
+        ${concatMapStrings ({ port, addr, ... }: ''
+          ListenAddress ${addr}${optionalString (port != null) (":" + toString port)}
+        '') cfg.listenAddresses}
+
+        ${optionalString cfgc.setXAuthLocation ''
+            XAuthLocation ${pkgs.xorg.xauth}/bin/xauth
+        ''}
+        ${optionalString cfg.allowSFTP ''
+          Subsystem sftp ${cfg.sftpServerExecutable} ${concatStringsSep " " cfg.sftpFlags}
+        ''}
+        PrintMotd no # handled by pam_motd
+        AuthorizedKeysFile ${toString cfg.authorizedKeysFiles}
+        ${optionalString (cfg.authorizedKeysCommand != "none") ''
+          AuthorizedKeysCommand ${cfg.authorizedKeysCommand}
+          AuthorizedKeysCommandUser ${cfg.authorizedKeysCommandUser}
+        ''}
+
+        ${flip concatMapStrings cfg.hostKeys (k: ''
+          HostKey ${k.path}
+        '')}
+      '';
+
+    system.checks = [
+      (pkgs.runCommand "check-sshd-config"
+        {
+          nativeBuildInputs = [ validationPackage ];
+        } ''
+        ${concatMapStringsSep "\n"
+          (lport: "sshd -G -T -C lport=${toString lport} -f ${sshconf} > /dev/null")
+          cfg.ports}
+        ${concatMapStringsSep "\n"
+          (la: "sshd -G -T -C ${escapeShellArg "laddr=${la.addr},lport=${toString la.port}"} -f ${sshconf} > /dev/null")
+          cfg.listenAddresses}
+        touch $out
+      '')
+    ];
+
+    assertions = [{ assertion = if cfg.settings.X11Forwarding then cfgc.setXAuthLocation else true;
+                    message = "cannot enable X11 forwarding without setting xauth location";}
+                  (let
+                    duplicates =
+                      # Filter out the groups with more than 1 element
+                      lib.filter (l: lib.length l > 1) (
+                        # Grab the groups, we don't care about the group identifiers
+                        lib.attrValues (
+                          # Group the settings that are the same in lower case
+                          lib.groupBy lib.strings.toLower (attrNames cfg.settings)
+                        )
+                      );
+                    formattedDuplicates = lib.concatMapStringsSep ", " (dupl: "(${lib.concatStringsSep ", " dupl})") duplicates;
+                  in
+                  {
+                    assertion = lib.length duplicates == 0;
+                    message = ''Duplicate sshd config key; does your capitalization match the option's? Duplicate keys: ${formattedDuplicates}'';
+                  })]
+      ++ forEach cfg.listenAddresses ({ addr, ... }: {
+        assertion = addr != null;
+        message = "addr must be specified in each listenAddresses entry";
+      });
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/sslh.nix b/nixpkgs/nixos/modules/services/networking/sslh.nix
new file mode 100644
index 000000000000..dd29db510020
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/sslh.nix
@@ -0,0 +1,227 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sslh;
+  user = "sslh";
+
+  configFormat = pkgs.formats.libconfig {};
+  configFile = configFormat.generate "sslh.conf" cfg.settings;
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "sslh" "listenAddress" ] [ "services" "sslh" "listenAddresses" ])
+    (mkRenamedOptionModule [ "services" "sslh" "timeout" ] [ "services" "sslh" "settings" "timeout" ])
+    (mkRenamedOptionModule [ "services" "sslh" "transparent" ] [ "services" "sslh" "settings" "transparent" ])
+    (mkRemovedOptionModule [ "services" "sslh" "appendConfig" ] "Use services.sslh.settings instead")
+    (mkChangedOptionModule [ "services" "sslh" "verbose" ] [ "services" "sslh" "settings" "verbose-connections" ]
+      (config: if config.services.sslh.verbose then 1 else 0))
+  ];
+
+  meta.buildDocsInSandbox = false;
+
+  options.services.sslh = {
+    enable = mkEnableOption (lib.mdDoc "sslh, protocol demultiplexer");
+
+    method = mkOption {
+      type = types.enum [ "fork" "select" "ev" ];
+      default = "fork";
+      description = lib.mdDoc ''
+        The method to use for handling connections:
+
+          - `fork` forks a new process for each incoming connection. It is
+          well-tested and very reliable, but incurs the overhead of many
+          processes.
+
+          - `select` uses only one thread, which monitors all connections at once.
+          It has lower overhead per connection, but if it stops, you'll lose all
+          connections.
+
+          - `ev` is implemented using libev, it's similar to `select` but
+            scales better to a large number of connections.
+      '';
+    };
+
+    listenAddresses = mkOption {
+      type = with types; coercedTo str singleton (listOf str);
+      default = [ "0.0.0.0" "[::]" ];
+      description = lib.mdDoc "Listening addresses or hostnames.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 443;
+      description = lib.mdDoc "Listening port.";
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = configFormat.type;
+
+        options.timeout = mkOption {
+          type = types.ints.unsigned;
+          default = 2;
+          description = lib.mdDoc "Timeout in seconds.";
+        };
+
+        options.transparent = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether the services behind sslh (Apache, sshd and so on) will see the
+            external IP and ports as if the external world connected directly to
+            them.
+          '';
+        };
+
+        options.verbose-connections = mkOption {
+          type = types.ints.between 0 4;
+          default = 0;
+          description = lib.mdDoc ''
+            Where to log connections information. Possible values are:
+
+             0. don't log anything
+             1. write log to stdout
+             2. write log to syslog
+             3. write log to both stdout and syslog
+             4. write to a log file ({option}`sslh.settings.logfile`)
+          '';
+        };
+
+        options.numeric = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to disable reverse DNS lookups, thus keeping IP
+            address literals in the log.
+          '';
+        };
+
+        options.protocols = mkOption {
+          type = types.listOf configFormat.type;
+          default = [
+            { name = "ssh";     host = "localhost"; port =  "22"; service= "ssh"; }
+            { name = "openvpn"; host = "localhost"; port = "1194"; }
+            { name = "xmpp";    host = "localhost"; port = "5222"; }
+            { name = "http";    host = "localhost"; port =   "80"; }
+            { name = "tls";     host = "localhost"; port =  "443"; }
+            { name = "anyprot"; host = "localhost"; port =  "443"; }
+          ];
+          description = lib.mdDoc ''
+            List of protocols sslh will probe for and redirect.
+            Each protocol entry consists of:
+
+              - `name`: name of the probe.
+
+              - `service`: libwrap service name (see {manpage}`hosts_access(5)`),
+
+              - `host`, `port`: where to connect when this probe succeeds,
+
+              - `log_level`: to log incoming connections,
+
+              - `transparent`: proxy this protocol transparently,
+
+              - etc.
+
+            See the documentation for all options, including probe-specific ones.
+          '';
+        };
+      };
+      description = lib.mdDoc "sslh configuration. See {manpage}`sslh(8)` for available settings.";
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      systemd.services.sslh = {
+        description = "Applicative Protocol Multiplexer (e.g. share SSH and HTTPS on the same port)";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          DynamicUser          = true;
+          User                 = "sslh";
+          PermissionsStartOnly = true;
+          Restart              = "always";
+          RestartSec           = "1s";
+          ExecStart            = "${pkgs.sslh}/bin/sslh-${cfg.method} -F${configFile}";
+          KillMode             = "process";
+          AmbientCapabilities  = ["CAP_NET_BIND_SERVICE" "CAP_NET_ADMIN" "CAP_SETGID" "CAP_SETUID"];
+          PrivateTmp           = true;
+          PrivateDevices       = true;
+          ProtectSystem        = "full";
+          ProtectHome          = true;
+        };
+      };
+
+      services.sslh.settings = {
+        # Settings defined here are not supposed to be changed: doing so will
+        # break the module, as such you need `lib.mkForce` to override them.
+        foreground = true;
+        inetd = false;
+        listen = map (addr: { host = addr; port = toString cfg.port; }) cfg.listenAddresses;
+      };
+
+    })
+
+    # code from https://github.com/yrutschle/sslh#transparent-proxy-support
+    # the only difference is using iptables mark 0x2 instead of 0x1 to avoid conflicts with nixos/nat module
+    (mkIf (cfg.enable && cfg.settings.transparent) {
+      # Set route_localnet = 1 on all interfaces so that ssl can use "localhost" as destination
+      boot.kernel.sysctl."net.ipv4.conf.default.route_localnet" = 1;
+      boot.kernel.sysctl."net.ipv4.conf.all.route_localnet"     = 1;
+
+      systemd.services.sslh = let
+        iptablesCommands = [
+          # DROP martian packets as they would have been if route_localnet was zero
+          # Note: packets not leaving the server aren't affected by this, thus sslh will still work
+          { table = "raw";    command = "PREROUTING  ! -i lo -d 127.0.0.0/8 -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s 127.0.0.0/8 -j DROP"; }
+          # Mark all connections made by ssl for special treatment (here sslh is run as user ${user})
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          # Outgoing packets that should go to sslh instead have to be rerouted, so mark them accordingly (copying over the connection mark)
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+        ip6tablesCommands = [
+          { table = "raw";    command = "PREROUTING  ! -i lo -d ::1/128     -j DROP"; }
+          { table = "mangle"; command = "POSTROUTING ! -o lo -s ::1/128     -j DROP"; }
+          { table = "nat";    command = "OUTPUT -m owner --uid-owner ${user} -p tcp --tcp-flags FIN,SYN,RST,ACK SYN -j CONNMARK --set-xmark 0x02/0x0f"; }
+          { table = "mangle"; command = "OUTPUT ! -o lo -p tcp -m connmark --mark 0x02/0x0f -j CONNMARK --restore-mark --mask 0x0f"; }
+        ];
+      in {
+        path = [ pkgs.iptables pkgs.iproute2 pkgs.procps ];
+
+        preStart = ''
+          # Cleanup old iptables entries which might be still there
+          ${concatMapStringsSep "\n" ({table, command}: "while iptables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") iptablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "iptables -w -t ${table} -A ${command}"                           ) iptablesCommands}
+
+          # Configure routing for those marked packets
+          ip rule  add fwmark 0x2 lookup 100
+          ip route add local 0.0.0.0/0 dev lo table 100
+
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "while ip6tables -w -t ${table} -D ${command} 2>/dev/null; do echo; done") ip6tablesCommands}
+          ${concatMapStringsSep "\n" ({table, command}:       "ip6tables -w -t ${table} -A ${command}"                           ) ip6tablesCommands}
+
+          ip -6 rule  add fwmark 0x2 lookup 100
+          ip -6 route add local ::/0 dev lo table 100
+        '';
+
+        postStop = ''
+          ${concatMapStringsSep "\n" ({table, command}: "iptables -w -t ${table} -D ${command}") iptablesCommands}
+
+          ip rule  del fwmark 0x2 lookup 100
+          ip route del local 0.0.0.0/0 dev lo table 100
+        '' + optionalString config.networking.enableIPv6 ''
+          ${concatMapStringsSep "\n" ({table, command}: "ip6tables -w -t ${table} -D ${command}") ip6tablesCommands}
+
+          ip -6 rule  del fwmark 0x2 lookup 100
+          ip -6 route del local ::/0 dev lo table 100
+        '';
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/module.nix b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/module.nix
new file mode 100644
index 000000000000..bfea89969728
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/module.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+with (import ./param-lib.nix lib);
+
+let
+  cfg = config.services.strongswan-swanctl;
+  swanctlParams = import ./swanctl-params.nix lib;
+in  {
+  options.services.strongswan-swanctl = {
+    enable = mkEnableOption (lib.mdDoc "strongswan-swanctl service");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.strongswan;
+      defaultText = literalExpression "pkgs.strongswan";
+      description = lib.mdDoc ''
+        The strongswan derivation to use.
+      '';
+    };
+
+    strongswan.extraConfig = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Contents of the `strongswan.conf` file.
+      '';
+    };
+
+    swanctl = paramsToOptions swanctlParams;
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.services.strongswan.enable;
+        message = "cannot enable both services.strongswan and services.strongswan-swanctl. Choose either one.";
+      }
+    ];
+
+    environment.etc."swanctl/swanctl.conf".text =
+      paramsToConf cfg.swanctl swanctlParams;
+
+    # The swanctl command complains when the following directories don't exist:
+    # See: https://wiki.strongswan.org/projects/strongswan/wiki/Swanctldirectory
+    systemd.tmpfiles.rules = [
+      "d /etc/swanctl/x509 -"     # Trusted X.509 end entity certificates
+      "d /etc/swanctl/x509ca -"   # Trusted X.509 Certificate Authority certificates
+      "d /etc/swanctl/x509ocsp -"
+      "d /etc/swanctl/x509aa -"   # Trusted X.509 Attribute Authority certificates
+      "d /etc/swanctl/x509ac -"   # Attribute Certificates
+      "d /etc/swanctl/x509crl -"  # Certificate Revocation Lists
+      "d /etc/swanctl/pubkey -"   # Raw public keys
+      "d /etc/swanctl/private -"  # Private keys in any format
+      "d /etc/swanctl/rsa -"      # PKCS#1 encoded RSA private keys
+      "d /etc/swanctl/ecdsa -"    # Plain ECDSA private keys
+      "d /etc/swanctl/bliss -"
+      "d /etc/swanctl/pkcs8 -"    # PKCS#8 encoded private keys of any type
+      "d /etc/swanctl/pkcs12 -"   # PKCS#12 containers
+    ];
+
+    systemd.services.strongswan-swanctl = {
+      description = "strongSwan IPsec IKEv1/IKEv2 daemon using swanctl";
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network-online.target" ];
+      path     = with pkgs; [ kmod iproute2 iptables util-linux ];
+      environment = {
+        STRONGSWAN_CONF = pkgs.writeTextFile {
+          name = "strongswan.conf";
+          text = cfg.strongswan.extraConfig;
+        };
+        SWANCTL_DIR = "/etc/swanctl";
+      };
+      restartTriggers = [ config.environment.etc."swanctl/swanctl.conf".source ];
+      serviceConfig = {
+        ExecStart     = "${cfg.package}/sbin/charon-systemd";
+        Type          = "notify";
+        ExecStartPost = "${cfg.package}/sbin/swanctl --load-all --noprompt";
+        ExecReload    = "${cfg.package}/sbin/swanctl --reload";
+        Restart       = "on-abnormal";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-constructors.nix b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-constructors.nix
new file mode 100644
index 000000000000..dc6d8f48e626
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-constructors.nix
@@ -0,0 +1,163 @@
+# In the following context a parameter is an attribute set that
+# contains a NixOS option and a render function. It also contains the
+# attribute: '_type = "param"' so we can distinguish it from other
+# sets.
+#
+# The render function is used to convert the value of the option to a
+# snippet of strongswan.conf. Most parameters simply render their
+# value to a string. For example, take the following parameter:
+#
+#   threads = mkIntParam 10 "Threads to use for request handling.";
+#
+# When a users defines the corresponding option as for example:
+#
+#   services.strongswan-swanctl.strongswan.threads = 32;
+#
+# It will get rendered to the following snippet in strongswan.conf:
+#
+#   threads = 32
+#
+# Some parameters however need to be able to change the attribute
+# name. For example, take the following parameter:
+#
+#   id = mkPrefixedAttrsOfParam (mkOptionalStrParam "") "...";
+#
+# A user can define the corresponding option as for example:
+#
+#   id = {
+#     "foo" = "bar";
+#     "baz" = "qux";
+#   };
+#
+# This will get rendered to the following snippet:
+#
+#   foo-id = bar
+#   baz-id = qux
+#
+# For this reason the render function is not simply a function from
+# value -> string but a function from a value to an attribute set:
+# { "${name}" = string }. This allows parameters to change the attribute
+# name like in the previous example.
+
+lib :
+
+with lib;
+with (import ./param-lib.nix lib);
+
+rec {
+  mkParamOfType = type : strongswanDefault : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.nullOr type;
+      default = null;
+      description = documentDefault description strongswanDefault;
+    };
+    render = single toString;
+  };
+
+  documentDefault = description : strongswanDefault :
+    if strongswanDefault == null
+    then mdDoc description
+    else mdDoc (description + ''
+
+
+      StrongSwan default: ````${builtins.toJSON strongswanDefault}````
+    '');
+
+  single = f: name: value: { ${name} = f value; };
+
+  mkStrParam         = mkParamOfType types.str;
+  mkOptionalStrParam = mkStrParam null;
+
+  mkEnumParam = values : mkParamOfType (types.enum values);
+
+  mkIntParam         = mkParamOfType types.int;
+  mkOptionalIntParam = mkIntParam null;
+
+  # We should have floats in Nix...
+  mkFloatParam = mkStrParam;
+
+  # TODO: Check for hex format:
+  mkHexParam         = mkStrParam;
+  mkOptionalHexParam = mkOptionalStrParam;
+
+  # TODO: Check for duration format:
+  mkDurationParam         = mkStrParam;
+  mkOptionalDurationParam = mkOptionalStrParam;
+
+  mkYesNoParam = strongswanDefault : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.nullOr types.bool;
+      default = null;
+      description = documentDefault description strongswanDefault;
+    };
+    render = single (b: if b then "yes" else "no");
+  };
+  yes = true;
+  no  = false;
+
+  mkSpaceSepListParam = mkSepListParam " ";
+  mkCommaSepListParam = mkSepListParam ",";
+
+  mkSepListParam = sep : strongswanDefault : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = documentDefault description strongswanDefault;
+    };
+    render = single (value: concatStringsSep sep value);
+  };
+
+  mkAttrsOfParams = params :
+    mkAttrsOf params (types.submodule {options = paramsToOptions params;});
+
+  mkAttrsOfParam = param :
+    mkAttrsOf param param.option.type;
+
+  mkAttrsOf = param : option : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.attrsOf option;
+      default = {};
+      description = mdDoc description;
+    };
+    render = single (attrs:
+      (paramsToRenderedStrings attrs
+        (mapAttrs (_n: _v: param) attrs)));
+  };
+
+  mkPrefixedAttrsOfParams = params :
+    mkPrefixedAttrsOf params (types.submodule {options = paramsToOptions params;});
+
+  mkPrefixedAttrsOfParam = param :
+    mkPrefixedAttrsOf param param.option.type;
+
+  mkPrefixedAttrsOf = p : option : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.attrsOf option;
+      default = {};
+      description = mdDoc description;
+    };
+    render = prefix: attrs:
+      let prefixedAttrs = mapAttrs' (name: nameValuePair "${prefix}-${name}") attrs;
+      in paramsToRenderedStrings prefixedAttrs
+           (mapAttrs (_n: _v: p) prefixedAttrs);
+  };
+
+  mkPostfixedAttrsOfParams = params : description : {
+    _type = "param";
+    option = mkOption {
+      type = types.attrsOf (types.submodule {options = paramsToOptions params;});
+      default = {};
+      description = lib.mdDoc description;
+    };
+    render = postfix: attrs:
+      let postfixedAttrs = mapAttrs' (name: nameValuePair "${name}-${postfix}") attrs;
+      in paramsToRenderedStrings postfixedAttrs
+           (mapAttrs (_n: _v: params) postfixedAttrs);
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-lib.nix b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-lib.nix
new file mode 100644
index 000000000000..2bbb39a76049
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/param-lib.nix
@@ -0,0 +1,82 @@
+lib :
+
+with lib;
+
+rec {
+  paramsToConf = cfg : ps : mkConf 0 (paramsToRenderedStrings cfg ps);
+
+  # mkConf takes an indentation level (which usually starts at 0) and a nested
+  # attribute set of strings and will render that set to a strongswan.conf style
+  # configuration format. For example:
+  #
+  #   mkConf 0 {a = "1"; b = { c = { "foo" = "2"; "bar" = "3"; }; d = "4";};}   =>   ''
+  #   a = 1
+  #   b {
+  #     c {
+  #       foo = 2
+  #       bar = 3
+  #     }
+  #     d = 4
+  #   }''
+  mkConf = indent : ps :
+    concatMapStringsSep "\n"
+      (name:
+        let value = ps.${name};
+            indentation = replicate indent " ";
+        in
+        indentation + (
+          if isAttrs value
+          then "${name} {\n" +
+                 mkConf (indent + 2) value + "\n" +
+               indentation + "}"
+          else "${name} = ${value}"
+        )
+      )
+      (attrNames ps);
+
+  replicate = n : c : concatStrings (builtins.genList (_x : c) n);
+
+  # `paramsToRenderedStrings cfg ps` converts the NixOS configuration `cfg`
+  # (typically the "config" argument of a NixOS module) and the set of
+  # parameters `ps` (an attribute set where the values are constructed using the
+  # parameter constructors in ./param-constructors.nix) to a nested attribute
+  # set of strings (rendered parameters).
+  paramsToRenderedStrings = cfg : ps :
+    filterEmptySets (
+      (mapParamsRecursive (path: name: param:
+        let value = attrByPath path null cfg;
+        in optionalAttrs (value != null) (param.render name value)
+      ) ps));
+
+  filterEmptySets = set : filterAttrs (n: v: (v != null)) (mapAttrs (name: value:
+    if isAttrs value
+    then let value' = filterEmptySets value;
+         in if value' == {}
+            then null
+            else value'
+    else value
+  ) set);
+
+  # Recursively map over every parameter in the given attribute set.
+  mapParamsRecursive = mapAttrsRecursiveCond' (as: (!(as ? _type && as._type == "param")));
+
+  mapAttrsRecursiveCond' = cond: f: set:
+    let
+      recurse = path: set:
+        let
+          g =
+            name: value:
+            if isAttrs value && cond value
+              then { ${name} = recurse (path ++ [name]) value; }
+              else f (path ++ [name]) name value;
+        in mapAttrs'' g set;
+    in recurse [] set;
+
+  mapAttrs'' = f: set:
+    foldl' (a: b: a // b) {} (map (attr: f attr set.${attr}) (attrNames set));
+
+  # Extract the options from the given set of parameters.
+  paramsToOptions = ps :
+    mapParamsRecursive (_path: name: param: { ${name} = param.option; }) ps;
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
new file mode 100644
index 000000000000..1ad5fdbcef02
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/strongswan-swanctl/swanctl-params.nix
@@ -0,0 +1,1265 @@
+# See: https://wiki.strongswan.org/projects/strongswan/wiki/Swanctlconf
+#
+# When strongSwan is upgraded please update the parameters in this file. You can
+# see which parameters should be deleted, changed or added by diffing
+# swanctl.opt:
+#
+#   git clone https://github.com/strongswan/strongswan.git
+#   cd strongswan
+#   git diff 5.7.2..5.8.0 src/swanctl/swanctl.opt
+
+lib: with (import ./param-constructors.nix lib);
+
+let
+  certParams = {
+    file = mkOptionalStrParam ''
+      Absolute path to the certificate to load. Passed as-is to the daemon, so
+      it must be readable by it.
+
+      Configure either this or {option}`handle`, but not both, in one section.
+    '';
+
+    handle = mkOptionalHexParam ''
+      Hex-encoded CKA_ID or handle of the certificate on a token or TPM,
+      respectively.
+
+      Configure either this or {option}`file`, but not both, in one section.
+    '';
+
+    slot = mkOptionalIntParam ''
+      Optional slot number of the token that stores the certificate.
+    '';
+
+    module = mkOptionalStrParam ''
+      Optional PKCS#11 module name.
+    '';
+  };
+in {
+  authorities = mkAttrsOfParams ({
+
+    cacert = mkOptionalStrParam ''
+      The certificates may use a relative path from the swanctl
+      `x509ca` directory or an absolute path.
+
+      Configure one of {option}`cacert`,
+      {option}`file`, or
+      {option}`handle` per section.
+    '';
+
+    cert_uri_base = mkOptionalStrParam ''
+      Defines the base URI for the Hash and URL feature supported by
+      IKEv2. Instead of exchanging complete certificates, IKEv2 allows one to
+      send an URI that resolves to the DER encoded certificate. The certificate
+      URIs are built by appending the SHA1 hash of the DER encoded certificates
+      to this base URI.
+    '';
+
+    crl_uris = mkCommaSepListParam [] ''
+      List of CRL distribution points (ldap, http, or file URI).
+    '';
+
+    ocsp_uris = mkCommaSepListParam [] ''
+      List of OCSP URIs.
+    '';
+
+  } // certParams) ''
+    Section defining complementary attributes of certification authorities, each
+    in its own subsection with an arbitrary yet unique name
+  '';
+
+  connections = mkAttrsOfParams {
+
+    version = mkIntParam 0 ''
+      IKE major version to use for connection.
+
+      - 1 uses IKEv1 aka ISAKMP,
+      - 2 uses IKEv2.
+      - A connection using the default of 0 accepts both IKEv1 and IKEv2 as
+        responder, and initiates the connection actively with IKEv2.
+    '';
+
+    local_addrs	= mkCommaSepListParam [] ''
+      Local address(es) to use for IKE communication. Takes
+      single IPv4/IPv6 addresses, DNS names, CIDR subnets or IP address ranges.
+
+      As initiator, the first non-range/non-subnet is used to initiate the
+      connection from. As responder, the local destination address must match at
+      least to one of the specified addresses, subnets or ranges.
+
+      If FQDNs are assigned they are resolved every time a configuration lookup
+      is done. If DNS resolution times out, the lookup is delayed for that time.
+    '';
+
+    remote_addrs = mkCommaSepListParam [] ''
+      Remote address(es) to use for IKE communication. Takes
+      single IPv4/IPv6 addresses, DNS names, CIDR subnets or IP address ranges.
+
+      As initiator, the first non-range/non-subnet is used to initiate the
+      connection to. As responder, the initiator source address must match at
+      least to one of the specified addresses, subnets or ranges.
+
+      If FQDNs are assigned they are resolved every time a configuration lookup
+      is done. If DNS resolution times out, the lookup is delayed for that time.
+      To initiate a connection, at least one specific address or DNS name must
+      be specified.
+    '';
+
+    local_port = mkIntParam 500 ''
+      Local UDP port for IKE communication. By default the port of the socket
+      backend is used, which is usually `500`. If port
+      `500` is used, automatic IKE port floating to port
+      `4500` is used to work around NAT issues.
+
+      Using a non-default local IKE port requires support from the socket
+      backend in use (socket-dynamic).
+    '';
+
+    remote_port = mkIntParam 500 ''
+      Remote UDP port for IKE communication. If the default of port
+      `500` is used, automatic IKE port floating to port
+      `4500` is used to work around NAT issues.
+    '';
+
+    proposals = mkCommaSepListParam ["default"] ''
+      A proposal is a set of algorithms. For non-AEAD algorithms, this includes
+      for IKE an encryption algorithm, an integrity algorithm, a pseudo random
+      function and a Diffie-Hellman group. For AEAD algorithms, instead of
+      encryption and integrity algorithms, a combined algorithm is used.
+
+      In IKEv2, multiple algorithms of the same kind can be specified in a
+      single proposal, from which one gets selected. In IKEv1, only one
+      algorithm per kind is allowed per proposal, more algorithms get implicitly
+      stripped. Use multiple proposals to offer different algorithms
+      combinations in IKEv1.
+
+      Algorithm keywords get separated using dashes. Multiple proposals may be
+      specified in a list. The special value `default` forms a
+      default proposal of supported algorithms considered safe, and is usually a
+      good choice for interoperability.
+    '';
+
+    vips = mkCommaSepListParam [] ''
+      List of virtual IPs to request in IKEv2 configuration payloads or IKEv1
+      Mode Config. The wildcard addresses `0.0.0.0` and
+      `::` request an arbitrary address, specific addresses may
+      be defined. The responder may return a different address, though, or none
+      at all.
+    '';
+
+    aggressive = mkYesNoParam no ''
+      Enables Aggressive Mode instead of Main Mode with Identity
+      Protection. Aggressive Mode is considered less secure, because the ID and
+      HASH payloads are exchanged unprotected. This allows a passive attacker to
+      snoop peer identities, and even worse, start dictionary attacks on the
+      Preshared Key.
+    '';
+
+    pull = mkYesNoParam yes ''
+      If the default of yes is used, Mode Config works in pull mode, where the
+      initiator actively requests a virtual IP. With no, push mode is used,
+      where the responder pushes down a virtual IP to the initiating peer.
+
+      Push mode is currently supported for IKEv1, but not in IKEv2. It is used
+      by a few implementations only, pull mode is recommended.
+    '';
+
+    dscp = mkStrParam "000000" ''
+      Differentiated Services Field Codepoint to set on outgoing IKE packets for
+      this connection. The value is a six digit binary encoded string specifying
+      the Codepoint to set, as defined in RFC 2474.
+    '';
+
+    encap = mkYesNoParam no ''
+      To enforce UDP encapsulation of ESP packets, the IKE daemon can fake the
+      NAT detection payloads. This makes the peer believe that NAT takes place
+      on the path, forcing it to encapsulate ESP packets in UDP.
+
+      Usually this is not required, but it can help to work around connectivity
+      issues with too restrictive intermediary firewalls.
+    '';
+
+    mobike = mkYesNoParam yes ''
+      Enables MOBIKE on IKEv2 connections. MOBIKE is enabled by default on IKEv2
+      connections, and allows mobility of clients and multi-homing on servers by
+      migrating active IPsec tunnels.
+
+      Usually keeping MOBIKE enabled is unproblematic, as it is not used if the
+      peer does not indicate support for it. However, due to the design of
+      MOBIKE, IKEv2 always floats to port 4500 starting from the second
+      exchange. Some implementations don't like this behavior, hence it can be
+      disabled.
+    '';
+
+    dpd_delay = mkDurationParam "0s" ''
+      Interval to check the liveness of a peer actively using IKEv2
+      INFORMATIONAL exchanges or IKEv1 R_U_THERE messages. Active DPD checking
+      is only enforced if no IKE or ESP/AH packet has been received for the
+      configured DPD delay.
+    '';
+
+    dpd_timeout = mkDurationParam "0s" ''
+      Charon by default uses the normal retransmission mechanism and timeouts to
+      check the liveness of a peer, as all messages are used for liveness
+      checking. For compatibility reasons, with IKEv1 a custom interval may be
+      specified; this option has no effect on connections using IKEv2.
+    '';
+
+    fragmentation = mkEnumParam ["yes" "accept" "force" "no"] "yes" ''
+      Use IKE fragmentation (proprietary IKEv1 extension or RFC 7383 IKEv2
+      fragmentation). Acceptable values are `yes` (the default
+      since 5.5.1), `accept` (since versions:5.5.3),
+      `force` and `no`.
+
+      - If set to `yes`, and the peer
+        supports it, oversized IKE messages will be sent in fragments.
+      - If set to
+        `accept`, support for fragmentation is announced to the peer but the daemon
+        does not send its own messages in fragments.
+      - If set to `force` (only
+        supported for IKEv1) the initial IKE message will already be fragmented if
+        required.
+      - Finally, setting the option to `no` will disable announcing
+        support for this feature.
+
+      Note that fragmented IKE messages sent by a peer are always processed
+      irrespective of the value of this option (even when set to no).
+    '';
+
+    childless = mkEnumParam [ "allow" "prefer" "force" "never" ] "allow" ''
+      Use childless IKE_SA initiation (_allow_, _prefer_, _force_ or _never_).
+
+      Use childless IKE_SA initiation (RFC 6023) for IKEv2, with the first
+      CHILD_SA created with a separate CREATE_CHILD_SA exchange (e.g. to use an
+      independent DH exchange for all CHILD_SAs).  Acceptable values are `allow`
+      (the default), `prefer`, `force` and `never`. If set to `allow`, responders
+      will accept childless IKE_SAs (as indicated via notify in the IKE_SA_INIT
+      response) while initiators continue to create regular IKE_SAs with the first
+      CHILD_SA created during IKE_AUTH, unless the IKE_SA is initiated explicitly
+      without any children (which will fail if the responder does not support or
+      has disabled this extension). The effect of `prefer` is the same as `allow`
+      on responders, but as initiator a childless IKE_SA is initiated if the
+      responder supports it. If set to `force`, only childless initiation is
+      accepted in either role.  Finally, setting the option to `never` disables
+      support for childless IKE_SAs as responder.
+    '';
+
+    send_certreq = mkYesNoParam yes ''
+      Send certificate request payloads to offer trusted root CA certificates to
+      the peer. Certificate requests help the peer to choose an appropriate
+      certificate/private key for authentication and are enabled by default.
+      Disabling certificate requests can be useful if too many trusted root CA
+      certificates are installed, as each certificate request increases the size
+      of the initial IKE packets.
+   '';
+
+    send_cert = mkEnumParam ["always" "never" "ifasked" ] "ifasked" ''
+      Send certificate payloads when using certificate authentication.
+
+      - With the default of `ifasked` the daemon sends
+        certificate payloads only if certificate requests have been received.
+      - `never` disables sending of certificate payloads
+        altogether,
+      - `always` causes certificate payloads to be sent
+        unconditionally whenever certificate authentication is used.
+    '';
+
+    ppk_id = mkOptionalStrParam ''
+       String identifying the Postquantum Preshared Key (PPK) to be used.
+    '';
+
+    ppk_required = mkYesNoParam no ''
+       Whether a Postquantum Preshared Key (PPK) is required for this connection.
+    '';
+
+    keyingtries = mkIntParam 1 ''
+      Number of retransmission sequences to perform during initial
+      connect. Instead of giving up initiation after the first retransmission
+      sequence with the default value of `1`, additional
+      sequences may be started according to the configured value. A value of
+      `0` initiates a new sequence until the connection
+      establishes or fails with a permanent error.
+    '';
+
+    unique = mkEnumParam ["no" "never" "keep" "replace"] "no" ''
+      Connection uniqueness policy to enforce. To avoid multiple connections
+      from the same user, a uniqueness policy can be enforced.
+
+      - The value `never` does never enforce such a policy, even
+        if a peer included INITIAL_CONTACT notification messages,
+      - whereas `no` replaces existing connections for the same
+        identity if a new one has the INITIAL_CONTACT notify.
+      - `keep` rejects new connection attempts if the same user
+        already has an active connection,
+      - `replace` deletes any existing connection if a new one
+        for the same user gets established.
+
+      To compare connections for uniqueness, the remote IKE identity is used. If
+      EAP or XAuth authentication is involved, the EAP-Identity or XAuth
+      username is used to enforce the uniqueness policy instead.
+
+      On initiators this setting specifies whether an INITIAL_CONTACT notify is
+      sent during IKE_AUTH if no existing connection is found with the remote
+      peer (determined by the identities of the first authentication
+      round). Unless set to `never` the client will send a notify.
+    '';
+
+    reauth_time	= mkDurationParam "0s" ''
+      Time to schedule IKE reauthentication. IKE reauthentication recreates the
+      IKE/ISAKMP SA from scratch and re-evaluates the credentials. In asymmetric
+      configurations (with EAP or configuration payloads) it might not be
+      possible to actively reauthenticate as responder. The IKEv2
+      reauthentication lifetime negotiation can instruct the client to perform
+      reauthentication.
+
+      Reauthentication is disabled by default. Enabling it usually may lead to
+      small connection interruptions, as strongSwan uses a break-before-make
+      policy with IKEv2 to avoid any conflicts with associated tunnel resources.
+    '';
+
+    rekey_time = mkDurationParam "4h" ''
+      IKE rekeying refreshes key material using a Diffie-Hellman exchange, but
+      does not re-check associated credentials. It is supported in IKEv2 only,
+      IKEv1 performs a reauthentication procedure instead.
+
+      With the default value IKE rekeying is scheduled every 4 hours, minus the
+      configured rand_time. If a reauth_time is configured, rekey_time defaults
+      to zero, disabling rekeying; explicitly set both to enforce rekeying and
+      reauthentication.
+    '';
+
+    over_time = mkOptionalDurationParam ''
+      Hard IKE_SA lifetime if rekey/reauth does not complete, as time. To avoid
+      having an IKE/ISAKMP kept alive if IKE reauthentication or rekeying fails
+      perpetually, a maximum hard lifetime may be specified. If the IKE_SA fails
+      to rekey or reauthenticate within the specified time, the IKE_SA gets
+      closed.
+
+      In contrast to CHILD_SA rekeying, over_time is relative in time to the
+      rekey_time and reauth_time values, as it applies to both.
+
+      The default is 10% of the longer of {option}`rekey_time` and
+      {option}`reauth_time`.
+    '';
+
+    rand_time = mkOptionalDurationParam ''
+      Time range from which to choose a random value to subtract from
+      rekey/reauth times. To avoid having both peers initiating the rekey/reauth
+      procedure simultaneously, a random time gets subtracted from the
+      rekey/reauth times.
+
+      The default is equal to the configured {option}`over_time`.
+    '';
+
+    pools = mkCommaSepListParam [] ''
+      List of named IP pools to allocate virtual IP addresses
+      and other configuration attributes from. Each name references a pool by
+      name from either the pools section or an external pool.
+    '';
+
+    if_id_in = mkStrParam "0" ''
+      XFRM interface ID set on inbound policies/SA, can be overridden by child
+      config, see there for details.
+
+      The special value `%unique` allocates a unique interface ID per IKE_SA,
+      which is inherited by all its CHILD_SAs (unless overridden there), beyond
+      that the value `%unique-dir` assigns a different unique interface ID for
+      each direction (in/out).
+
+    '';
+
+    if_id_out = mkStrParam "0" ''
+      XFRM interface ID set on outbound policies/SA, can be overridden by child
+      config, see there for details.
+
+      The special value `%unique` allocates a unique interface ID per IKE_SA,
+      which is inherited by all its CHILD_SAs (unless overridden there), beyond
+      that the value `%unique-dir` assigns a different unique interface ID for
+      each direction (in/out).
+    '';
+
+    mediation = mkYesNoParam no ''
+      Whether this connection is a mediation connection, that is, whether this
+      connection is used to mediate other connections using the IKEv2 Mediation
+      Extension. Mediation connections create no CHILD_SA.
+    '';
+
+    mediated_by = mkOptionalStrParam ''
+      The name of the connection to mediate this connection through. If given,
+      the connection will be mediated through the named mediation
+      connection. The mediation connection must have mediation enabled.
+    '';
+
+    mediation_peer = mkOptionalStrParam ''
+      Identity under which the peer is registered at the mediation server, that
+      is, the IKE identity the other end of this connection uses as its local
+      identity on its connection to the mediation server. This is the identity
+      we request the mediation server to mediate us with. Only relevant on
+      connections that set mediated_by. If it is not given, the remote IKE
+      identity of the first authentication round of this connection will be
+      used.
+    '';
+
+    local = mkPrefixedAttrsOfParams {
+
+      round = mkIntParam 0 ''
+        Optional numeric identifier by which authentication rounds are
+        sorted. If not specified rounds are ordered by their position in the
+        config file/vici message.
+      '';
+
+      certs = mkCommaSepListParam [] ''
+        List of certificate candidates to use for
+        authentication. The certificates may use a relative path from the
+        swanctl `x509` directory or an absolute path.
+
+        The certificate used for authentication is selected based on the
+        received certificate request payloads. If no appropriate CA can be
+        located, the first certificate is used.
+      '';
+
+      cert = mkPostfixedAttrsOfParams certParams ''
+        Section for a certificate candidate to use for
+        authentication. Certificates in certs are transmitted as binary blobs,
+        these sections offer more flexibility.
+      '';
+
+      pubkeys = mkCommaSepListParam [] ''
+        List of raw public key candidates to use for
+        authentication. The public keys may use a relative path from the swanctl
+        `pubkey` directory or an absolute path.
+
+        Even though multiple local public keys could be defined in principle,
+        only the first public key in the list is used for authentication.
+      '';
+
+      auth = mkStrParam "pubkey" ''
+        Authentication to perform locally.
+
+        - The default `pubkey` uses public key authentication
+          using a private key associated to a usable certificate.
+        - `psk` uses pre-shared key authentication.
+        - The IKEv1 specific `xauth` is used for XAuth or Hybrid
+          authentication,
+        - while the IKEv2 specific `eap` keyword defines EAP
+          authentication.
+        - For `xauth`, a specific backend name may be appended,
+          separated by a dash. The appropriate `xauth` backend is
+          selected to perform the XAuth exchange. For traditional XAuth, the
+          `xauth` method is usually defined in the second
+          authentication round following an initial `pubkey` (or
+          `psk`) round. Using `xauth` in the
+          first round performs Hybrid Mode client authentication.
+        - For `eap`, a specific EAP method name may be appended, separated by a
+          dash. An EAP module implementing the appropriate method is selected to
+          perform the EAP conversation.
+        - Since 5.4.0, if both peers support RFC 7427 ("Signature Authentication
+          in IKEv2") specific hash algorithms to be used during IKEv2
+          authentication may be configured. To do so use `ike:`
+          followed by a trust chain signature scheme constraint (see description
+          of the {option}`remote` section's {option}`auth`
+          keyword). For example, with `ike:pubkey-sha384-sha256`
+          a public key signature scheme with either SHA-384 or SHA-256 would get
+          used for authentication, in that order and depending on the hash
+          algorithms supported by the peer. If no specific hash algorithms are
+          configured, the default is to prefer an algorithm that matches or
+          exceeds the strength of the signature key. If no constraints with
+          `ike:` prefix are configured any signature scheme
+          constraint (without `ike:` prefix) will also apply to
+          IKEv2 authentication, unless this is disabled in
+          `strongswan.conf`. To use RSASSA-PSS signatures use
+          `rsa/pss` instead of `pubkey` or
+          `rsa` as in e.g.
+          `ike:rsa/pss-sha256`. If `pubkey` or
+          `rsa` constraints are configured RSASSA-PSS signatures
+          will only be used if enabled in `strongswan.conf`(5).
+      '';
+
+      id = mkOptionalStrParam ''
+        IKE identity to use for authentication round. When using certificate
+        authentication, the IKE identity must be contained in the certificate,
+        either as subject or as subjectAltName.
+      '';
+
+      eap_id = mkOptionalStrParam ''
+        Client EAP-Identity to use in EAP-Identity exchange and the EAP method.
+      '';
+
+      aaa_id = mkOptionalStrParam ''
+        Server side EAP-Identity to expect in the EAP method. Some EAP methods,
+        such as EAP-TLS, use an identity for the server to perform mutual
+        authentication. This identity may differ from the IKE identity,
+        especially when EAP authentication is delegated from the IKE responder
+        to an AAA backend.
+
+        For EAP-(T)TLS, this defines the identity for which the server must
+        provide a certificate in the TLS exchange.
+      '';
+
+      xauth_id = mkOptionalStrParam ''
+        Client XAuth username used in the XAuth exchange.
+      '';
+
+    } ''
+      Section for a local authentication round. A local authentication round
+      defines the rules how authentication is performed for the local
+      peer. Multiple rounds may be defined to use IKEv2 RFC 4739 Multiple
+      Authentication or IKEv1 XAuth.
+
+      Each round is defined in a section having `local` as
+      prefix, and an optional unique suffix. To define a single authentication
+      round, the suffix may be omitted.
+    '';
+
+    remote = mkPrefixedAttrsOfParams {
+
+      round = mkIntParam 0 ''
+        Optional numeric identifier by which authentication rounds are
+        sorted. If not specified rounds are ordered by their position in the
+        config file/vici message.
+      '';
+
+      id = mkStrParam "%any" ''
+        IKE identity to expect for authentication round. When using certificate
+        authentication, the IKE identity must be contained in the certificate,
+        either as subject or as subjectAltName.
+      '';
+
+      eap_id = mkOptionalStrParam ''
+        Identity to use as peer identity during EAP authentication. If set to
+        `%any` the EAP-Identity method will be used to ask the
+        client for an EAP identity.
+      '';
+
+      groups = mkCommaSepListParam [] ''
+        Authorization group memberships to require. The peer
+        must prove membership to at least one of the specified groups. Group
+        membership can be certified by different means, for example by
+        appropriate Attribute Certificates or by an AAA backend involved in the
+        authentication.
+      '';
+
+      cert_policy = mkCommaSepListParam [] ''
+        List of certificate policy OIDs the peer's certificate
+        must have. OIDs are specified using the numerical dotted representation.
+      '';
+
+      certs = mkCommaSepListParam [] ''
+        List of certificates to accept for authentication. The certificates may
+        use a relative path from the swanctl `x509` directory
+        or an absolute path.
+      '';
+
+      cert = mkPostfixedAttrsOfParams certParams ''
+        Section for a certificate candidate to use for
+        authentication. Certificates in certs are transmitted as binary blobs,
+        these sections offer more flexibility.
+      '';
+
+      ca_id = mkOptionalStrParam ''
+        Identity in CA certificate to accept for authentication. The specified
+        identity must be contained in one (intermediate) CA of the remote peer
+        trustchain, either as subject or as subjectAltName. This has the same
+        effect as specifying `cacerts` to force clients under
+        a CA to specific connections; it does not require the CA certificate
+        to be available locally, and can be received from the peer during the
+        IKE exchange.
+      '';
+
+      cacerts = mkCommaSepListParam [] ''
+        List of CA certificates to accept for
+        authentication. The certificates may use a relative path from the
+        swanctl `x509ca` directory or an absolute path.
+      '';
+
+      cacert = mkPostfixedAttrsOfParams certParams ''
+        Section for a CA certificate to accept for authentication. Certificates
+        in cacerts are transmitted as binary blobs, these sections offer more
+        flexibility.
+      '';
+
+      pubkeys = mkCommaSepListParam [] ''
+        List of raw public keys to accept for
+        authentication. The public keys may use a relative path from the swanctl
+        `pubkey` directory or an absolute path.
+      '';
+
+      revocation = mkEnumParam ["strict" "ifuri" "relaxed"] "relaxed" ''
+        Certificate revocation policy for CRL or OCSP revocation.
+
+        - A `strict` revocation policy fails if no revocation information is
+          available, i.e. the certificate is not known to be unrevoked.
+        - `ifuri` fails only if a CRL/OCSP URI is available, but certificate
+          revocation checking fails, i.e. there should be revocation information
+          available, but it could not be obtained.
+        - The default revocation policy `relaxed` fails only if a certificate is
+          revoked, i.e. it is explicitly known that it is bad.
+      '';
+
+      auth = mkStrParam "pubkey" ''
+        Authentication to expect from remote. See the {option}`local`
+        section's {option}`auth` keyword description about the details of
+        supported mechanisms.
+
+        Since 5.4.0, to require a trustchain public key strength for the remote
+        side, specify the key type followed by the minimum strength in bits (for
+        example `ecdsa-384` or
+        `rsa-2048-ecdsa-256`). To limit the acceptable set of
+        hashing algorithms for trustchain validation, append hash algorithms to
+        pubkey or a key strength definition (for example
+        `pubkey-sha256-sha512`,
+        `rsa-2048-sha256-sha384-sha512` or
+        `rsa-2048-sha256-ecdsa-256-sha256-sha384`).
+        Unless disabled in `strongswan.conf`, or explicit IKEv2
+        signature constraints are configured (refer to the description of the
+        {option}`local` section's {option}`auth` keyword for
+        details), such key types and hash algorithms are also applied as
+        constraints against IKEv2 signature authentication schemes used by the
+        remote side. To require RSASSA-PSS signatures use
+        `rsa/pss` instead of `pubkey` or
+        `rsa` as in e.g. `rsa/pss-sha256`. If
+        `pubkey` or `rsa` constraints are
+        configured RSASSA-PSS signatures will only be accepted if enabled in
+        `strongswan.conf`(5).
+
+        To specify trust chain constraints for EAP-(T)TLS, append a colon to the
+        EAP method, followed by the key type/size and hash algorithm as
+        discussed above (e.g. `eap-tls:ecdsa-384-sha384`).
+      '';
+
+    } ''
+      Section for a remote authentication round. A remote authentication round
+      defines the constraints how the peers must authenticate to use this
+      connection. Multiple rounds may be defined to use IKEv2 RFC 4739 Multiple
+      Authentication or IKEv1 XAuth.
+
+      Each round is defined in a section having `remote` as
+      prefix, and an optional unique suffix. To define a single authentication
+      round, the suffix may be omitted.
+    '';
+
+    children = mkAttrsOfParams {
+      ah_proposals = mkCommaSepListParam [] ''
+        AH proposals to offer for the CHILD_SA. A proposal is a set of
+        algorithms. For AH, this includes an integrity algorithm and an optional
+        Diffie-Hellman group. If a DH group is specified, CHILD_SA/Quick Mode
+        rekeying and initial negotiation uses a separate Diffie-Hellman exchange
+        using the specified group (refer to esp_proposals for details).
+
+        In IKEv2, multiple algorithms of the same kind can be specified in a
+        single proposal, from which one gets selected. In IKEv1, only one
+        algorithm per kind is allowed per proposal, more algorithms get
+        implicitly stripped. Use multiple proposals to offer different algorithms
+        combinations in IKEv1.
+
+        Algorithm keywords get separated using dashes. Multiple proposals may be
+        specified in a list. The special value `default` forms
+        a default proposal of supported algorithms considered safe, and is
+        usually a good choice for interoperability. By default no AH proposals
+        are included, instead ESP is proposed.
+     '';
+
+      esp_proposals = mkCommaSepListParam ["default"] ''
+        ESP proposals to offer for the CHILD_SA. A proposal is a set of
+        algorithms. For ESP non-AEAD proposals, this includes an integrity
+        algorithm, an encryption algorithm, an optional Diffie-Hellman group and
+        an optional Extended Sequence Number Mode indicator. For AEAD proposals,
+        a combined mode algorithm is used instead of the separate
+        encryption/integrity algorithms.
+
+        If a DH group is specified, CHILD_SA/Quick Mode rekeying and initial
+        negotiation use a separate Diffie-Hellman exchange using the specified
+        group. However, for IKEv2, the keys of the CHILD_SA created implicitly
+        with the IKE_SA will always be derived from the IKE_SA's key material. So
+        any DH group specified here will only apply when the CHILD_SA is later
+        rekeyed or is created with a separate CREATE_CHILD_SA exchange. A
+        proposal mismatch might, therefore, not immediately be noticed when the
+        SA is established, but may later cause rekeying to fail.
+
+        Extended Sequence Number support may be indicated with the
+        `esn` and `noesn` values, both may be
+        included to indicate support for both modes. If omitted,
+        `noesn` is assumed.
+
+        In IKEv2, multiple algorithms of the same kind can be specified in a
+        single proposal, from which one gets selected. In IKEv1, only one
+        algorithm per kind is allowed per proposal, more algorithms get
+        implicitly stripped. Use multiple proposals to offer different algorithms
+        combinations in IKEv1.
+
+        Algorithm keywords get separated using dashes. Multiple proposals may be
+        specified as a list. The special value `default` forms
+        a default proposal of supported algorithms considered safe, and is
+        usually a good choice for interoperability. If no algorithms are
+        specified for AH nor ESP, the default set of algorithms for ESP is
+        included.
+      '';
+
+      sha256_96 = mkYesNoParam no ''
+        HMAC-SHA-256 is used with 128-bit truncation with IPsec. For
+        compatibility with implementations that incorrectly use 96-bit truncation
+        this option may be enabled to configure the shorter truncation length in
+        the kernel. This is not negotiated, so this only works with peers that
+        use the incorrect truncation length (or have this option enabled).
+      '';
+
+      local_ts = mkCommaSepListParam ["dynamic"] ''
+        List of local traffic selectors to include in CHILD_SA. Each selector is
+        a CIDR subnet definition, followed by an optional proto/port
+        selector. The special value `dynamic` may be used
+        instead of a subnet definition, which gets replaced by the tunnel outer
+        address or the virtual IP, if negotiated. This is the default.
+
+        A protocol/port selector is surrounded by opening and closing square
+        brackets. Between these brackets, a numeric or getservent(3) protocol
+        name may be specified. After the optional protocol restriction, an
+        optional port restriction may be specified, separated by a slash. The
+        port restriction may be numeric, a getservent(3) service name, or the
+        special value `opaque` for RFC 4301 OPAQUE
+        selectors. Port ranges may be specified as well, none of the kernel
+        backends currently support port ranges, though.
+
+        When IKEv1 is used only the first selector is interpreted, except if the
+        Cisco Unity extension plugin is used. This is due to a limitation of the
+        IKEv1 protocol, which only allows a single pair of selectors per
+        CHILD_SA. So to tunnel traffic matched by several pairs of selectors when
+        using IKEv1 several children (CHILD_SAs) have to be defined that cover
+        the selectors.  The IKE daemon uses traffic selector narrowing for IKEv1,
+        the same way it is standardized and implemented for IKEv2. However, this
+        may lead to problems with other implementations. To avoid that, configure
+        identical selectors in such scenarios.
+      '';
+
+      remote_ts = mkCommaSepListParam ["dynamic"] ''
+        List of remote selectors to include in CHILD_SA. See
+        {option}`local_ts` for a description of the selector syntax.
+      '';
+
+      rekey_time = mkDurationParam "1h" ''
+        Time to schedule CHILD_SA rekeying. CHILD_SA rekeying refreshes key
+        material, optionally using a Diffie-Hellman exchange if a group is
+        specified in the proposal.  To avoid rekey collisions initiated by both
+        ends simultaneously, a value in the range of {option}`rand_time`
+        gets subtracted to form the effective soft lifetime.
+
+        By default CHILD_SA rekeying is scheduled every hour, minus
+        {option}`rand_time`.
+      '';
+
+      life_time = mkOptionalDurationParam ''
+        Maximum lifetime before CHILD_SA gets closed. Usually this hard lifetime
+        is never reached, because the CHILD_SA gets rekeyed before. If that fails
+        for whatever reason, this limit closes the CHILD_SA.  The default is 10%
+        more than the {option}`rekey_time`.
+      '';
+
+      rand_time = mkOptionalDurationParam ''
+        Time range from which to choose a random value to subtract from
+        {option}`rekey_time`. The default is the difference between
+        {option}`life_time` and {option}`rekey_time`.
+      '';
+
+      rekey_bytes = mkIntParam 0 ''
+        Number of bytes processed before initiating CHILD_SA rekeying. CHILD_SA
+        rekeying refreshes key material, optionally using a Diffie-Hellman
+        exchange if a group is specified in the proposal.
+
+        To avoid rekey collisions initiated by both ends simultaneously, a value
+        in the range of {option}`rand_bytes` gets subtracted to form the
+        effective soft volume limit.
+
+        Volume based CHILD_SA rekeying is disabled by default.
+      '';
+
+      life_bytes = mkOptionalIntParam ''
+        Maximum bytes processed before CHILD_SA gets closed. Usually this hard
+        volume limit is never reached, because the CHILD_SA gets rekeyed
+        before. If that fails for whatever reason, this limit closes the
+        CHILD_SA.  The default is 10% more than {option}`rekey_bytes`.
+      '';
+
+      rand_bytes = mkOptionalIntParam ''
+        Byte range from which to choose a random value to subtract from
+        {option}`rekey_bytes`. The default is the difference between
+        {option}`life_bytes` and {option}`rekey_bytes`.
+      '';
+
+      rekey_packets = mkIntParam 0 ''
+        Number of packets processed before initiating CHILD_SA rekeying. CHILD_SA
+        rekeying refreshes key material, optionally using a Diffie-Hellman
+        exchange if a group is specified in the proposal.
+
+        To avoid rekey collisions initiated by both ends simultaneously, a value
+        in the range of {option}`rand_packets` gets subtracted to form
+        the effective soft packet count limit.
+
+        Packet count based CHILD_SA rekeying is disabled by default.
+      '';
+
+      life_packets = mkOptionalIntParam ''
+        Maximum number of packets processed before CHILD_SA gets closed. Usually
+        this hard packets limit is never reached, because the CHILD_SA gets
+        rekeyed before. If that fails for whatever reason, this limit closes the
+        CHILD_SA.
+
+        The default is 10% more than {option}`rekey_bytes`.
+      '';
+
+      rand_packets = mkOptionalIntParam ''
+        Packet range from which to choose a random value to subtract from
+        {option}`rekey_packets`. The default is the difference between
+        {option}`life_packets` and {option}`rekey_packets`.
+      '';
+
+      updown = mkOptionalStrParam ''
+        Updown script to invoke on CHILD_SA up and down events.
+      '';
+
+      hostaccess = mkYesNoParam no ''
+        Hostaccess variable to pass to `updown` script.
+      '';
+
+      mode = mkEnumParam [ "tunnel"
+                           "transport"
+                           "transport_proxy"
+                           "beet"
+                           "pass"
+                           "drop"
+                         ] "tunnel" ''
+        IPsec Mode to establish CHILD_SA with.
+
+        - `tunnel` negotiates the CHILD_SA in IPsec Tunnel Mode,
+        - whereas `transport` uses IPsec Transport Mode.
+        - `transport_proxy` signifying the special Mobile IPv6
+          Transport Proxy Mode.
+        - `beet` is the Bound End to End Tunnel mixture mode,
+          working with fixed inner addresses without the need to include them in
+          each packet.
+        - Both `transport` and `beet` modes are
+          subject to mode negotiation; `tunnel` mode is
+          negotiated if the preferred mode is not available.
+        - `pass` and `drop` are used to install
+          shunt policies which explicitly bypass the defined traffic from IPsec
+          processing or drop it, respectively.
+      '';
+
+      policies = mkYesNoParam yes ''
+        Whether to install IPsec policies or not. Disabling this can be useful in
+        some scenarios e.g. MIPv6, where policies are not managed by the IKE
+        daemon. Since 5.3.3.
+      '';
+
+      policies_fwd_out = mkYesNoParam no ''
+        Whether to install outbound FWD IPsec policies or not. Enabling this is
+        required in case there is a drop policy that would match and block
+        forwarded traffic for this CHILD_SA. Since 5.5.1.
+      '';
+
+      dpd_action = mkEnumParam ["clear" "trap" "restart"] "clear" ''
+        Action to perform for this CHILD_SA on DPD timeout. The default clear
+        closes the CHILD_SA and does not take further action. trap installs a
+        trap policy, which will catch matching traffic and tries to re-negotiate
+        the tunnel on-demand. restart immediately tries to re-negotiate the
+        CHILD_SA under a fresh IKE_SA.
+      '';
+
+      ipcomp = mkYesNoParam no ''
+        Enable IPComp compression before encryption. If enabled, IKE tries to
+        negotiate IPComp compression to compress ESP payload data prior to
+        encryption.
+      '';
+
+      inactivity = mkDurationParam "0s" ''
+        Timeout before closing CHILD_SA after inactivity. If no traffic has been
+        processed in either direction for the configured timeout, the CHILD_SA
+        gets closed due to inactivity. The default value of 0 disables inactivity
+        checks.
+      '';
+
+      reqid = mkIntParam 0 ''
+        Fixed reqid to use for this CHILD_SA. This might be helpful in some
+        scenarios, but works only if each CHILD_SA configuration is instantiated
+        not more than once. The default of 0 uses dynamic reqids, allocated
+        incrementally.
+      '';
+
+      priority = mkIntParam 0 ''
+        Optional fixed priority for IPsec policies. This could be useful to
+        install high-priority drop policies. The default of 0 uses dynamically
+        calculated priorities based on the size of the traffic selectors.
+      '';
+
+      interface = mkOptionalStrParam ''
+        Optional interface name to restrict outbound IPsec policies.
+      '';
+
+      mark_in = mkStrParam "0/0x00000000" ''
+        Netfilter mark and mask for input traffic. On Linux, Netfilter may
+        require marks on each packet to match an SA/policy having that option
+        set. This allows installing duplicate policies and enables Netfilter
+        rules to select specific SAs/policies for incoming traffic. Note that
+        inbound marks are only set on policies, by default, unless
+        {option}`mark_in_sa` is enabled. The special value
+        `%unique` sets a unique mark on each CHILD_SA instance,
+        beyond that the value `%unique-dir` assigns a different
+        unique mark for each
+
+        An additional mask may be appended to the mark, separated by
+        `/`. The default mask if omitted is
+        `0xffffffff`.
+      '';
+
+      mark_in_sa = mkYesNoParam no ''
+        Whether to set {option}`mark_in` on the inbound SA. By default,
+        the inbound mark is only set on the inbound policy. The tuple destination
+        address, protocol and SPI is unique and the mark is not required to find
+        the correct SA, allowing to mark traffic after decryption instead (where
+        more specific selectors may be used) to match different policies. Marking
+        packets before decryption is still possible, even if no mark is set on
+        the SA.
+      '';
+
+      mark_out = mkStrParam "0/0x00000000" ''
+        Netfilter mark and mask for output traffic. On Linux, Netfilter may
+        require marks on each packet to match a policy/SA having that option
+        set. This allows installing duplicate policies and enables Netfilter
+        rules to select specific policies/SAs for outgoing traffic. The special
+        value `%unique` sets a unique mark on each CHILD_SA
+        instance, beyond that the value `%unique-dir` assigns a
+        different unique mark for each CHILD_SA direction (in/out).
+
+        An additional mask may be appended to the mark, separated by
+        `/`. The default mask if omitted is
+        `0xffffffff`.
+      '';
+
+      set_mark_in = mkStrParam "0/0x00000000" ''
+        Netfilter mark applied to packets after the inbound IPsec SA processed
+        them. This way it's not necessary to mark packets via Netfilter before
+        decryption or right afterwards to match policies or process them
+        differently (e.g. via policy routing).
+
+        An additional mask may be appended to the mark, separated by
+        `/`. The default mask if omitted is 0xffffffff. The
+        special value `%same` uses the value (but not the mask)
+        from {option}`mark_in` as mark value, which can be fixed,
+        `%unique` or `%unique-dir`.
+
+        Setting marks in XFRM input requires Linux 4.19 or higher.
+      '';
+
+      set_mark_out = mkStrParam "0/0x00000000" ''
+        Netfilter mark applied to packets after the outbound IPsec SA processed
+        them. This allows processing ESP packets differently than the original
+        traffic (e.g. via policy routing).
+
+        An additional mask may be appended to the mark, separated by
+        `/`. The default mask if omitted is 0xffffffff. The
+        special value `%same` uses the value (but not the mask)
+        from {option}`mark_out` as mark value, which can be fixed,
+        `%unique_` or `%unique-dir`.
+
+        Setting marks in XFRM output is supported since Linux 4.14. Setting a
+        mask requires at least Linux 4.19.
+      '';
+
+      if_id_in = mkStrParam "0" ''
+        XFRM interface ID set on inbound policies/SA. This allows installing
+        duplicate policies/SAs and associates them with an interface with the
+        same ID. The special value `%unique` sets a unique
+        interface ID on each CHILD_SA instance, beyond that the value
+        `%unique-dir` assigns a different unique interface ID
+        for each CHILD_SA direction (in/out).
+      '';
+
+      if_id_out = mkStrParam "0" ''
+        XFRM interface ID set on outbound policies/SA. This allows installing
+        duplicate policies/SAs and associates them with an interface with the
+        same ID. The special value `%unique` sets a unique
+        interface ID on each CHILD_SA instance, beyond that the value
+        `%unique-dir` assigns a different unique interface ID
+        for each CHILD_SA direction (in/out).
+
+        The daemon will not install routes for CHILD_SAs that have this option set.
+     '';
+
+      tfc_padding = mkParamOfType (with lib.types; either int (enum ["mtu"])) 0 ''
+        Pads ESP packets with additional data to have a consistent ESP packet
+        size for improved Traffic Flow Confidentiality. The padding defines the
+        minimum size of all ESP packets sent.  The default value of
+        `0` disables TFC padding, the special value
+        `mtu` adds TFC padding to create a packet size equal to
+        the Path Maximum Transfer Unit.
+      '';
+
+      replay_window = mkIntParam 32 ''
+        IPsec replay window to configure for this CHILD_SA. Larger values than
+        the default of `32` are supported using the Netlink
+        backend only, a value of `0` disables IPsec replay
+        protection.
+      '';
+
+      hw_offload = mkEnumParam ["yes" "no" "auto" "crypto" "packet"] "no" ''
+        Enable hardware offload for this CHILD_SA, if supported by the IPsec
+        implementation. The values `crypto` or `packet` enforce crypto or full
+        packet offloading and the installation will fail if the selected mode is not
+        supported by either kernel or device. On Linux, `packet` also offloads
+        policies, including trap policies. The value `auto` enables full packet
+        or crypto offloading, if either is supported, but the installation does not
+        fail otherwise.
+      '';
+
+      copy_df = mkYesNoParam yes ''
+        Whether to copy the DF bit to the outer IPv4 header in tunnel mode. This
+        effectively disables Path MTU discovery (PMTUD). Controlling this
+        behavior is not supported by all kernel interfaces.
+      '';
+
+      copy_ecn = mkYesNoParam yes ''
+        Whether to copy the ECN (Explicit Congestion Notification) header field
+        to/from the outer IP header in tunnel mode. Controlling this behavior is
+        not supported by all kernel interfaces.
+      '';
+
+      copy_dscp = mkEnumParam [ "out" "in" "yes" "no" ] "out" ''
+        Whether to copy the DSCP (Differentiated Services Field Codepoint)
+        header field to/from the outer IP header in tunnel mode. The value
+        `out` only copies the field from the inner to the outer
+        header, the value `in` does the opposite and only
+        copies the field from the outer to the inner header when decapsulating,
+        the value `yes` copies the field in both directions,
+        and the value `no` disables copying the field
+        altogether. Setting this to `yes` or
+        `in` could allow an attacker to adversely affect other
+        traffic at the receiver, which is why the default is
+        `out`. Controlling this behavior is not supported by
+        all kernel interfaces.
+      '';
+
+      start_action = mkEnumParam ["none" "trap" "start"] "none" ''
+        Action to perform after loading the configuration.
+
+        - The default of `none` loads the connection only, which
+          then can be manually initiated or used as a responder configuration.
+        - The value `trap` installs a trap policy, which triggers
+          the tunnel as soon as matching traffic has been detected.
+        - The value `start` initiates the connection actively.
+
+        When unloading or replacing a CHILD_SA configuration having a
+        {option}`start_action` different from `none`,
+        the inverse action is performed. Configurations with
+        `start` get closed, while such with
+        `trap` get uninstalled.
+      '';
+
+      close_action = mkEnumParam ["none" "trap" "start"] "none" ''
+        Action to perform after a CHILD_SA gets closed by the peer.
+
+        - The default of `none` does not take any action,
+        - `trap` installs a trap policy for the CHILD_SA.
+        - `start` tries to re-create the CHILD_SA.
+
+        {option}`close_action` does not provide any guarantee that the
+        CHILD_SA is kept alive. It acts on explicit close messages only, but not
+        on negotiation failures. Use trap policies to reliably re-create failed
+        CHILD_SAs.
+      '';
+
+    } ''
+      CHILD_SA configuration sub-section. Each connection definition may have
+      one or more sections in its {option}`children` subsection. The
+      section name defines the name of the CHILD_SA configuration, which must be
+      unique within the connection (denoted \<child\> below).
+    '';
+  } ''
+    Section defining IKE connection configurations, each in its own subsection
+    with an arbitrary yet unique name
+  '';
+
+  secrets = let
+    mkEapXauthParams = mkPrefixedAttrsOfParams {
+      secret = mkOptionalStrParam ''
+        Value of the EAP/XAuth secret. It may either be an ASCII string, a hex
+        encoded string if it has a 0x prefix or a Base64 encoded string if it
+        has a 0s prefix in its value.
+      '';
+
+      id = mkPrefixedAttrsOfParam (mkOptionalStrParam "") ''
+        Identity the EAP/XAuth secret belongs to. Multiple unique identities may
+        be specified, each having an `id` prefix, if a secret
+        is shared between multiple users.
+      '';
+
+    } ''
+      EAP secret section for a specific secret. Each EAP secret is defined in a
+      unique section having the `eap` prefix. EAP secrets are
+      used for XAuth authentication as well.
+    '';
+
+  in {
+
+    eap   = mkEapXauthParams;
+    xauth = mkEapXauthParams;
+
+    ntlm = mkPrefixedAttrsOfParams {
+      secret = mkOptionalStrParam ''
+        Value of the NTLM secret, which is the NT Hash of the actual secret,
+        that is, MD4(UTF-16LE(secret)). The resulting 16-byte value may either
+        be given as a hex encoded string with a 0x prefix or as a Base64 encoded
+        string with a 0s prefix.
+      '';
+
+      id = mkPrefixedAttrsOfParam (mkOptionalStrParam "") ''
+        Identity the NTLM secret belongs to. Multiple unique identities may be
+        specified, each having an id prefix, if a secret is shared between
+        multiple users.
+      '';
+    } ''
+      NTLM secret section for a specific secret. Each NTLM secret is defined in
+      a unique section having the `ntlm` prefix. NTLM secrets
+      may only be used for EAP-MSCHAPv2 authentication.
+    '';
+
+    ike = mkPrefixedAttrsOfParams {
+      secret = mkOptionalStrParam ''
+        Value of the IKE preshared secret. It may either be an ASCII string, a
+        hex encoded string if it has a 0x prefix or a Base64 encoded string if
+        it has a 0s prefix in its value.
+      '';
+
+      id = mkPrefixedAttrsOfParam (mkOptionalStrParam "") ''
+        IKE identity the IKE preshared secret belongs to. Multiple unique
+        identities may be specified, each having an `id`
+        prefix, if a secret is shared between multiple peers.
+      '';
+    } ''
+      IKE preshared secret section for a specific secret. Each IKE PSK is
+      defined in a unique section having the `ike` prefix.
+    '';
+
+    ppk = mkPrefixedAttrsOfParams {
+      secret = mkOptionalStrParam ''
+        Value of the PPK. It may either be an ASCII string, a hex encoded string
+        if it has a `0x` prefix or a Base64 encoded string if
+        it has a `0s` prefix in its value. Should have at least
+        256 bits of entropy for 128-bit security.
+      '';
+
+      id = mkPrefixedAttrsOfParam (mkOptionalStrParam "") ''
+        PPK identity the PPK belongs to. Multiple unique identities may be
+        specified, each having an `id` prefix, if a secret is
+        shared between multiple peers.
+      '';
+    } ''
+      Postquantum Preshared Key (PPK) section for a specific secret. Each PPK is
+      defined in a unique section having the `ppk` prefix.
+    '';
+
+    private = mkPrefixedAttrsOfParams {
+      file = mkOptionalStrParam ''
+        File name in the private folder for which this passphrase should be used.
+      '';
+
+      secret = mkOptionalStrParam ''
+        Value of decryption passphrase for private key.
+      '';
+    } ''
+      Private key decryption passphrase for a key in the
+      `private` folder.
+    '';
+
+    rsa = mkPrefixedAttrsOfParams {
+      file = mkOptionalStrParam ''
+        File name in the `rsa` folder for which this passphrase
+        should be used.
+      '';
+      secret = mkOptionalStrParam ''
+        Value of decryption passphrase for RSA key.
+      '';
+    } ''
+      Private key decryption passphrase for a key in the `rsa`
+      folder.
+    '';
+
+    ecdsa = mkPrefixedAttrsOfParams {
+      file = mkOptionalStrParam ''
+        File name in the `ecdsa` folder for which this
+        passphrase should be used.
+      '';
+      secret = mkOptionalStrParam ''
+        Value of decryption passphrase for ECDSA key.
+      '';
+    } ''
+      Private key decryption passphrase for a key in the
+      `ecdsa` folder.
+    '';
+
+    pkcs8 = mkPrefixedAttrsOfParams {
+      file = mkOptionalStrParam ''
+        File name in the `pkcs8` folder for which this
+        passphrase should be used.
+      '';
+      secret = mkOptionalStrParam ''
+        Value of decryption passphrase for PKCS#8 key.
+      '';
+    } ''
+      Private key decryption passphrase for a key in the
+      `pkcs8` folder.
+    '';
+
+    pkcs12 = mkPrefixedAttrsOfParams {
+      file = mkOptionalStrParam ''
+        File name in the `pkcs12` folder for which this
+        passphrase should be used.
+      '';
+      secret = mkOptionalStrParam ''
+        Value of decryption passphrase for PKCS#12 container.
+      '';
+    } ''
+      PKCS#12 decryption passphrase for a container in the
+      `pkcs12` folder.
+    '';
+
+    token = mkPrefixedAttrsOfParams {
+      handle = mkOptionalHexParam ''
+        Hex-encoded CKA_ID or handle of the private key on the token or TPM,
+        respectively.
+      '';
+
+      slot = mkOptionalIntParam ''
+        Optional slot number to access the token.
+      '';
+
+      module = mkOptionalStrParam ''
+        Optional PKCS#11 module name to access the token.
+      '';
+
+      pin = mkOptionalStrParam ''
+        Optional PIN required to access the key on the token. If none is
+        provided the user is prompted during an interactive
+        `--load-creds` call.
+      '';
+    } "Definition for a private key that's stored on a token/smartcard/TPM.";
+
+  };
+
+  pools = mkAttrsOfParams {
+    addrs = mkOptionalStrParam ''
+      Subnet or range defining addresses allocated in pool. Accepts a single
+      CIDR subnet defining the pool to allocate addresses from or an address
+      range (\<from\>-\<to\>). Pools must be unique and non-overlapping.
+    '';
+
+    dns           = mkCommaSepListParam [] "Address or CIDR subnets";
+    nbns          = mkCommaSepListParam [] "Address or CIDR subnets";
+    dhcp          = mkCommaSepListParam [] "Address or CIDR subnets";
+    netmask       = mkCommaSepListParam [] "Address or CIDR subnets";
+    server        = mkCommaSepListParam [] "Address or CIDR subnets";
+    subnet        = mkCommaSepListParam [] "Address or CIDR subnets";
+    split_include = mkCommaSepListParam [] "Address or CIDR subnets";
+    split_exclude = mkCommaSepListParam [] "Address or CIDR subnets";
+  } ''
+    Section defining named pools. Named pools may be referenced by connections
+    with the pools option to assign virtual IPs and other configuration
+    attributes. Each pool must have a unique name (denoted \<name\> below).
+  '';
+}
diff --git a/nixpkgs/nixos/modules/services/networking/strongswan.nix b/nixpkgs/nixos/modules/services/networking/strongswan.nix
new file mode 100644
index 000000000000..e58526814d1a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/strongswan.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inherit (builtins) toFile;
+  inherit (lib) concatMapStringsSep concatStringsSep mapAttrsToList
+                mkIf mkEnableOption mkOption types literalExpression optionalString;
+
+  cfg = config.services.strongswan;
+
+  ipsecSecrets = secrets: toFile "ipsec.secrets" (
+    concatMapStringsSep "\n" (f: "include ${f}") secrets
+  );
+
+  ipsecConf = {setup, connections, ca}:
+    let
+      # https://wiki.strongswan.org/projects/strongswan/wiki/IpsecConf
+      makeSections = type: sections: concatStringsSep "\n\n" (
+        mapAttrsToList (sec: attrs:
+          "${type} ${sec}\n" +
+            (concatStringsSep "\n" ( mapAttrsToList (k: v: "  ${k}=${v}") attrs ))
+        ) sections
+      );
+      setupConf       = makeSections "config" { inherit setup; };
+      connectionsConf = makeSections "conn" connections;
+      caConf          = makeSections "ca" ca;
+
+    in
+    builtins.toFile "ipsec.conf" ''
+      ${setupConf}
+      ${connectionsConf}
+      ${caConf}
+    '';
+
+  strongswanConf = {setup, connections, ca, secretsFile, managePlugins, enabledPlugins}: toFile "strongswan.conf" ''
+    charon {
+      ${optionalString managePlugins "load_modular = no"}
+      ${optionalString managePlugins ("load = " + (concatStringsSep " " enabledPlugins))}
+      plugins {
+        stroke {
+          secrets_file = ${secretsFile}
+        }
+      }
+    }
+
+    starter {
+      config_file = ${ipsecConf { inherit setup connections ca; }}
+    }
+  '';
+
+in
+{
+  options.services.strongswan = {
+    enable = mkEnableOption (lib.mdDoc "strongSwan");
+
+    secrets = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "/run/keys/ipsec-foo.secret" ];
+      description = lib.mdDoc ''
+        A list of paths to IPSec secret files. These
+        files will be included into the main ipsec.secrets file with
+        the `include` directive. It is safer if these
+        paths are absolute.
+      '';
+    };
+
+    setup = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = { cachecrls = "yes"; strictcrlpolicy = "yes"; };
+      description = lib.mdDoc ''
+        A set of options for the ‘config setup’ section of the
+        {file}`ipsec.conf` file. Defines general
+        configuration parameters.
+      '';
+    };
+
+    connections = mkOption {
+      type = types.attrsOf (types.attrsOf types.str);
+      default = {};
+      example = literalExpression ''
+        {
+          "%default" = {
+            keyexchange = "ikev2";
+            keyingtries = "1";
+          };
+          roadwarrior = {
+            auto       = "add";
+            leftcert   = "/run/keys/moonCert.pem";
+            leftid     = "@moon.strongswan.org";
+            leftsubnet = "10.1.0.0/16";
+            right      = "%any";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        A set of connections and their options for the ‘conn xxx’
+        sections of the {file}`ipsec.conf` file.
+      '';
+    };
+
+    ca = mkOption {
+      type = types.attrsOf (types.attrsOf types.str);
+      default = {};
+      example = {
+        strongswan = {
+          auto   = "add";
+          cacert = "/run/keys/strongswanCert.pem";
+          crluri = "http://crl2.strongswan.org/strongswan.crl";
+        };
+      };
+      description = lib.mdDoc ''
+        A set of CAs (certification authorities) and their options for
+        the ‘ca xxx’ sections of the {file}`ipsec.conf`
+        file.
+      '';
+    };
+
+    managePlugins = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If set to true, this option will disable automatic plugin loading and
+        then tell strongSwan to enable the plugins specified in the
+        {option}`enabledPlugins` option.
+      '';
+    };
+
+    enabledPlugins = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        A list of additional plugins to enable if
+        {option}`managePlugins` is true.
+      '';
+    };
+  };
+
+
+  config = with cfg;
+  let
+    secretsFile = ipsecSecrets cfg.secrets;
+  in
+  mkIf enable
+    {
+
+    # here we should use the default strongswan ipsec.secrets and
+    # append to it (default one is empty so not a pb for now)
+    environment.etc."ipsec.secrets".source = secretsFile;
+
+    systemd.services.strongswan = {
+      description = "strongSwan IPSec Service";
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ kmod iproute2 iptables util-linux ]; # XXX Linux
+      after = [ "network-online.target" ];
+      environment = {
+        STRONGSWAN_CONF = strongswanConf { inherit setup connections ca secretsFile managePlugins enabledPlugins; };
+      };
+      serviceConfig = {
+        ExecStart  = "${pkgs.strongswan}/sbin/ipsec start --nofork";
+      };
+      preStart = ''
+        # with 'nopeerdns' setting, ppp writes into this folder
+        mkdir -m 700 -p /etc/ppp
+      '';
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/stubby.nix b/nixpkgs/nixos/modules/services/networking/stubby.nix
new file mode 100644
index 000000000000..183002ff72b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/stubby.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.stubby;
+  settingsFormat = pkgs.formats.yaml { };
+  confFile = settingsFormat.generate "stubby.yml" cfg.settings;
+in {
+  imports = [
+    (mkRemovedOptionModule [ "stubby" "debugLogging" ] "Use services.stubby.logLevel = \"debug\"; instead.")
+  ] ++ map (x:
+    (mkRemovedOptionModule [ "services" "stubby" x ]
+      "Stubby configuration moved to services.stubby.settings.")) [
+        "authenticationMode"
+        "fallbackProtocols"
+        "idleTimeout"
+        "listenAddresses"
+        "queryPaddingBlocksize"
+        "roundRobinUpstreams"
+        "subnetPrivate"
+        "upstreamServers"
+      ];
+
+  options = {
+    services.stubby = {
+
+      enable = mkEnableOption (lib.mdDoc "Stubby DNS resolver");
+
+      settings = mkOption {
+        type = types.attrsOf settingsFormat.type;
+        example = lib.literalExpression ''
+          pkgs.stubby.passthru.settingsExample // {
+            upstream_recursive_servers = [{
+              address_data = "158.64.1.29";
+              tls_auth_name = "kaitain.restena.lu";
+              tls_pubkey_pinset = [{
+                digest = "sha256";
+                value = "7ftvIkA+UeN/ktVkovd/7rPZ6mbkhVI7/8HnFJIiLa4=";
+              }];
+            }];
+          };
+        '';
+        description = lib.mdDoc ''
+          Content of the Stubby configuration file. All Stubby settings may be set or queried
+          here. The default settings are available at
+          `pkgs.stubby.passthru.settingsExample`. See
+          <https://dnsprivacy.org/wiki/display/DP/Configuring+Stubby>.
+          A list of the public recursive servers can be found here:
+          <https://dnsprivacy.org/wiki/display/DP/DNS+Privacy+Test+Servers>.
+        '';
+      };
+
+      logLevel = let
+        logLevels = {
+          emerg = 0;
+          alert = 1;
+          crit = 2;
+          error = 3;
+          warning = 4;
+          notice = 5;
+          info = 6;
+          debug = 7;
+        };
+      in mkOption {
+        default = null;
+        type = types.nullOr (types.enum (attrNames logLevels ++ attrValues logLevels));
+        apply = v: if isString v then logLevels.${v} else v;
+        description = lib.mdDoc "Log verbosity (syslog keyword or level).";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion =
+        (cfg.settings.resolution_type or "") == "GETDNS_RESOLUTION_STUB";
+      message = ''
+        services.stubby.settings.resolution_type must be set to "GETDNS_RESOLUTION_STUB".
+        Is services.stubby.settings unset?
+      '';
+    }];
+
+    services.stubby.settings.appdata_dir = "/var/cache/stubby";
+
+    systemd.services.stubby = {
+      description = "Stubby local DNS resolver";
+      after = [ "network.target" ];
+      before = [ "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "notify";
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        ExecStart = "${pkgs.stubby}/bin/stubby -C ${confFile} ${optionalString (cfg.logLevel != null) "-v ${toString cfg.logLevel}"}";
+        DynamicUser = true;
+        CacheDirectory = "stubby";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/stunnel.nix b/nixpkgs/nixos/modules/services/networking/stunnel.nix
new file mode 100644
index 000000000000..996e9b225392
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/stunnel.nix
@@ -0,0 +1,192 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.stunnel;
+  yesNo = val: if val then "yes" else "no";
+
+  verifyRequiredField = type: field: n: c: {
+    assertion = hasAttr field c;
+    message =  "stunnel: \"${n}\" ${type} configuration - Field ${field} is required.";
+  };
+
+  verifyChainPathAssert = n: c: {
+    assertion = (c.verifyHostname or null) == null || (c.verifyChain || c.verifyPeer);
+    message =  "stunnel: \"${n}\" client configuration - hostname verification " +
+      "is not possible without either verifyChain or verifyPeer enabled";
+  };
+
+  removeNulls = mapAttrs (_: filterAttrs (_: v: v != null));
+  mkValueString = v:
+    if v == true then "yes"
+    else if v == false then "no"
+    else generators.mkValueStringDefault {} v;
+  generateConfig = c:
+    generators.toINI {
+      mkSectionName = id;
+      mkKeyValue = k: v: "${k} = ${mkValueString v}";
+    } (removeNulls c);
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.stunnel = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the stunnel TLS tunneling service.";
+      };
+
+      user = mkOption {
+        type = with types; nullOr str;
+        default = "nobody";
+        description = lib.mdDoc "The user under which stunnel runs.";
+      };
+
+      group = mkOption {
+        type = with types; nullOr str;
+        default = "nogroup";
+        description = lib.mdDoc "The group under which stunnel runs.";
+      };
+
+      logLevel = mkOption {
+        type = types.enum [ "emerg" "alert" "crit" "err" "warning" "notice" "info" "debug" ];
+        default = "info";
+        description = lib.mdDoc "Verbosity of stunnel output.";
+      };
+
+      fipsMode = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable FIPS 140-2 mode required for compliance.";
+      };
+
+      enableInsecureSSLv3 = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable support for the insecure SSLv3 protocol.";
+      };
+
+
+      servers = mkOption {
+        description = lib.mdDoc ''
+          Define the server configurations.
+
+          See "SERVICE-LEVEL OPTIONS" in {manpage}`stunnel(8)`.
+        '';
+        type = with types; attrsOf (attrsOf (nullOr (oneOf [bool int str])));
+        example = {
+          fancyWebserver = {
+            accept = 443;
+            connect = 8080;
+            cert = "/path/to/pem/file";
+          };
+        };
+        default = { };
+      };
+
+      clients = mkOption {
+        description = lib.mdDoc ''
+          Define the client configurations.
+
+          By default, verifyChain and OCSPaia are enabled and a CAFile is provided from pkgs.cacert.
+
+          See "SERVICE-LEVEL OPTIONS" in {manpage}`stunnel(8)`.
+        '';
+        type = with types; attrsOf (attrsOf (nullOr (oneOf [bool int str])));
+
+        apply = let
+          applyDefaults = c:
+            {
+              CAFile = "${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt";
+              OCSPaia = true;
+              verifyChain = true;
+            } // c;
+          setCheckHostFromVerifyHostname = c:
+            # To preserve backward-compatibility with the old NixOS stunnel module
+            # definition, allow "verifyHostname" as an alias for "checkHost".
+            c // {
+              checkHost = c.checkHost or c.verifyHostname or null;
+              verifyHostname = null; # Not a real stunnel configuration setting
+            };
+          forceClient = c: c // { client = true; };
+        in mapAttrs (_: c: forceClient (setCheckHostFromVerifyHostname (applyDefaults c)));
+
+        example = {
+          foobar = {
+            accept = "0.0.0.0:8080";
+            connect = "nixos.org:443";
+            verifyChain = false;
+          };
+        };
+        default = { };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = concatLists [
+      (singleton {
+        assertion = (length (attrValues cfg.servers) != 0) || ((length (attrValues cfg.clients)) != 0);
+        message = "stunnel: At least one server- or client-configuration has to be present.";
+      })
+
+      (mapAttrsToList verifyChainPathAssert cfg.clients)
+      (mapAttrsToList (verifyRequiredField "client" "accept") cfg.clients)
+      (mapAttrsToList (verifyRequiredField "client" "connect") cfg.clients)
+      (mapAttrsToList (verifyRequiredField "server" "accept") cfg.servers)
+      (mapAttrsToList (verifyRequiredField "server" "cert") cfg.servers)
+      (mapAttrsToList (verifyRequiredField "server" "connect") cfg.servers)
+    ];
+
+    environment.systemPackages = [ pkgs.stunnel ];
+
+    environment.etc."stunnel.cfg".text = ''
+      ${ optionalString (cfg.user != null) "setuid = ${cfg.user}" }
+      ${ optionalString (cfg.group != null) "setgid = ${cfg.group}" }
+
+      debug = ${cfg.logLevel}
+
+      ${ optionalString cfg.fipsMode "fips = yes" }
+      ${ optionalString cfg.enableInsecureSSLv3 "options = -NO_SSLv3" }
+
+      ; ----- SERVER CONFIGURATIONS -----
+      ${ generateConfig cfg.servers }
+
+      ; ----- CLIENT CONFIGURATIONS -----
+      ${ generateConfig cfg.clients }
+    '';
+
+    systemd.services.stunnel = {
+      description = "stunnel TLS tunneling service";
+      after = [ "network.target" ];
+      wants = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."stunnel.cfg".source ];
+      serviceConfig = {
+        ExecStart = "${pkgs.stunnel}/bin/stunnel ${config.environment.etc."stunnel.cfg".source}";
+        Type = "forking";
+      };
+    };
+
+    meta.maintainers = with maintainers; [
+      # Server side
+      lschuermann
+      # Client side
+      das_j
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/supplicant.nix b/nixpkgs/nixos/modules/services/networking/supplicant.nix
new file mode 100644
index 000000000000..13d84736e2c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/supplicant.nix
@@ -0,0 +1,240 @@
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.supplicant;
+
+  # We must escape interfaces due to the systemd interpretation
+  subsystemDevice = interface:
+    "sys-subsystem-net-devices-${utils.escapeSystemdPath interface}.device";
+
+  serviceName = iface: "supplicant-${if (iface=="WLAN") then "wlan@" else (
+                                     if (iface=="LAN") then "lan@" else (
+                                     if (iface=="DBUS") then "dbus"
+                                     else (replaceStrings [" "] ["-"] iface)))}";
+
+  # TODO: Use proper privilege separation for wpa_supplicant
+  supplicantService = iface: suppl:
+    let
+      deps = (if (iface=="WLAN"||iface=="LAN") then ["sys-subsystem-net-devices-%i.device"] else (
+             if (iface=="DBUS") then ["dbus.service"]
+             else (map subsystemDevice (splitString " " iface))))
+             ++ optional (suppl.bridge!="") (subsystemDevice suppl.bridge);
+
+      ifaceArg = concatStringsSep " -N " (map (i: "-i${i}") (splitString " " iface));
+      driverArg = optionalString (suppl.driver != null) "-D${suppl.driver}";
+      bridgeArg = optionalString (suppl.bridge!="") "-b${suppl.bridge}";
+      confFileArg = optionalString (suppl.configFile.path!=null) "-c${suppl.configFile.path}";
+      extraConfFile = pkgs.writeText "supplicant-extra-conf-${replaceStrings [" "] ["-"] iface}" ''
+        ${optionalString suppl.userControlled.enable "ctrl_interface=DIR=${suppl.userControlled.socketDir} GROUP=${suppl.userControlled.group}"}
+        ${optionalString suppl.configFile.writable "update_config=1"}
+        ${suppl.extraConf}
+      '';
+    in
+      { description = "Supplicant ${iface}${optionalString (iface=="WLAN"||iface=="LAN") " %I"}";
+        wantedBy = [ "multi-user.target" ] ++ deps;
+        wants = [ "network.target" ];
+        bindsTo = deps;
+        after = deps;
+        before = [ "network.target" ];
+
+        path = [ pkgs.coreutils ];
+
+        preStart = ''
+          ${optionalString (suppl.configFile.path!=null && suppl.configFile.writable) ''
+            (umask 077 && touch -a "${suppl.configFile.path}")
+          ''}
+          ${optionalString suppl.userControlled.enable ''
+            install -dm770 -g "${suppl.userControlled.group}" "${suppl.userControlled.socketDir}"
+          ''}
+        '';
+
+        serviceConfig.ExecStart = "${pkgs.wpa_supplicant}/bin/wpa_supplicant -s ${driverArg} ${confFileArg} -I${extraConfFile} ${bridgeArg} ${suppl.extraCmdArgs} ${if (iface=="WLAN"||iface=="LAN") then "-i%I" else (if (iface=="DBUS") then "-u" else ifaceArg)}";
+
+      };
+
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.supplicant = mkOption {
+      type = with types; attrsOf (submodule {
+        options = {
+
+          configFile = {
+
+            path = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              example = literalExpression "/etc/wpa_supplicant.conf";
+              description = lib.mdDoc ''
+                External `wpa_supplicant.conf` configuration file.
+                The configuration options defined declaratively within `networking.supplicant` have
+                precedence over options defined in `configFile`.
+              '';
+            };
+
+            writable = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether the configuration file at `configFile.path` should be written to by
+                `wpa_supplicant`.
+              '';
+            };
+
+          };
+
+          extraConf = mkOption {
+            type = types.lines;
+            default = "";
+            example = ''
+              ap_scan=1
+              device_name=My-NixOS-Device
+              device_type=1-0050F204-1
+              driver_param=use_p2p_group_interface=1
+              disable_scan_offload=1
+              p2p_listen_reg_class=81
+              p2p_listen_channel=1
+              p2p_oper_reg_class=81
+              p2p_oper_channel=1
+              manufacturer=NixOS
+              model_name=NixOS_Unstable
+              model_number=2015
+            '';
+            description = lib.mdDoc ''
+              Configuration options for `wpa_supplicant.conf`.
+              Options defined here have precedence over options in `configFile`.
+              NOTE: Do not write sensitive data into `extraConf` as it will
+              be world-readable in the `nix-store`. For sensitive information
+              use the `configFile` instead.
+            '';
+          };
+
+          extraCmdArgs = mkOption {
+            type = types.str;
+            default = "";
+            example = "-e/run/wpa_supplicant/entropy.bin";
+            description =
+              lib.mdDoc "Command line arguments to add when executing `wpa_supplicant`.";
+          };
+
+          driver = mkOption {
+            type = types.nullOr types.str;
+            default = "nl80211,wext";
+            description = lib.mdDoc "Force a specific wpa_supplicant driver.";
+          };
+
+          bridge = mkOption {
+            type = types.str;
+            default = "";
+            description = lib.mdDoc "Name of the bridge interface that wpa_supplicant should listen at.";
+          };
+
+          userControlled = {
+
+            enable = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Allow normal users to control wpa_supplicant through wpa_gui or wpa_cli.
+                This is useful for laptop users that switch networks a lot and don't want
+                to depend on a large package such as NetworkManager just to pick nearby
+                access points.
+              '';
+            };
+
+            socketDir = mkOption {
+              type = types.str;
+              default = "/run/wpa_supplicant";
+              description = lib.mdDoc "Directory of sockets for controlling wpa_supplicant.";
+            };
+
+            group = mkOption {
+              type = types.str;
+              default = "wheel";
+              example = "network";
+              description = lib.mdDoc "Members of this group can control wpa_supplicant.";
+            };
+
+          };
+        };
+      });
+
+      default = { };
+
+      example = literalExpression ''
+        { "wlan0 wlan1" = {
+            configFile.path = "/etc/wpa_supplicant.conf";
+            userControlled.group = "network";
+            extraConf = '''
+              ap_scan=1
+              p2p_disabled=1
+            ''';
+            extraCmdArgs = "-u -W";
+            bridge = "br0";
+          };
+        }
+      '';
+
+      description = lib.mdDoc ''
+        Interfaces for which to start {command}`wpa_supplicant`.
+        The supplicant is used to scan for and associate with wireless networks,
+        or to authenticate with 802.1x capable network switches.
+
+        The value of this option is an attribute set. Each attribute configures a
+        {command}`wpa_supplicant` service, where the attribute name specifies
+        the name of the interface that {command}`wpa_supplicant` operates on.
+        The attribute name can be a space separated list of interfaces.
+        The attribute names `WLAN`, `LAN` and `DBUS`
+        have a special meaning. `WLAN` and `LAN` are
+        configurations for universal {command}`wpa_supplicant` service that is
+        started for each WLAN interface or for each LAN interface, respectively.
+        `DBUS` defines a device-unrelated {command}`wpa_supplicant`
+        service that can be accessed through `D-Bus`.
+      '';
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg != {}) {
+
+    environment.systemPackages =  [ pkgs.wpa_supplicant ];
+
+    services.dbus.packages = [ pkgs.wpa_supplicant ];
+
+    systemd.services = mapAttrs' (n: v: nameValuePair (serviceName n) (supplicantService n v)) cfg;
+
+    services.udev.packages = [
+      (pkgs.writeTextFile {
+        name = "99-zzz-60-supplicant.rules";
+        destination = "/etc/udev/rules.d/99-zzz-60-supplicant.rules";
+        text = ''
+          ${flip (concatMapStringsSep "\n") (filter (n: n!="WLAN" && n!="LAN" && n!="DBUS") (attrNames cfg)) (iface:
+            flip (concatMapStringsSep "\n") (splitString " " iface) (i: ''
+              ACTION=="add", SUBSYSTEM=="net", ENV{INTERFACE}=="${i}", TAG+="systemd", ENV{SYSTEMD_WANTS}+="supplicant-${replaceStrings [" "] ["-"] iface}.service", TAG+="SUPPLICANT_ASSIGNED"''))}
+
+          ${optionalString (hasAttr "WLAN" cfg) ''
+            ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", TAG!="SUPPLICANT_ASSIGNED", TAG+="systemd", PROGRAM="/run/current-system/systemd/bin/systemd-escape -p %E{INTERFACE}", ENV{SYSTEMD_WANTS}+="supplicant-wlan@$result.service"
+          ''}
+          ${optionalString (hasAttr "LAN" cfg) ''
+            ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="lan", TAG!="SUPPLICANT_ASSIGNED", TAG+="systemd", PROGRAM="/run/current-system/systemd/bin/systemd-escape -p %E{INTERFACE}", ENV{SYSTEMD_WANTS}+="supplicant-lan@$result.service"
+          ''}
+        '';
+      })];
+
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/supybot.nix b/nixpkgs/nixos/modules/services/networking/supybot.nix
new file mode 100644
index 000000000000..22ba015cc55d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/supybot.nix
@@ -0,0 +1,163 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg  = config.services.supybot;
+  isStateDirHome = hasPrefix "/home/" cfg.stateDir;
+  isStateDirVar = cfg.stateDir == "/var/lib/supybot";
+  pyEnv = pkgs.python3.withPackages (p: [ p.limnoria ] ++ (cfg.extraPackages p));
+in
+{
+  options = {
+
+    services.supybot = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable Supybot, an IRC bot (also known as Limnoria).";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = if versionAtLeast config.system.stateVersion "20.09"
+          then "/var/lib/supybot"
+          else "/home/supybot";
+        defaultText = literalExpression "/var/lib/supybot";
+        description = lib.mdDoc "The root directory, logs and plugins are stored here";
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to initial supybot config file. This can be generated by
+          running supybot-wizard.
+
+          Note: all paths should include the full path to the stateDir
+          directory (backup conf data logs logs/plugins plugins tmp web).
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        description = lib.mdDoc ''
+          Attribute set of additional plugins that will be symlinked to the
+          {file}`plugin` subdirectory.
+
+          Please note that you still need to add the plugins to the config
+          file (or with `!load`) using their attribute name.
+        '';
+        example = literalExpression ''
+          let
+            plugins = pkgs.fetchzip {
+              url = "https://github.com/ProgVal/Supybot-plugins/archive/57c2450c.zip";
+              sha256 = "077snf84ibnva3sbpzdfpfma6hcdw7dflwnhg6pw7mgnf0nd84qd";
+            };
+          in
+          {
+            Wikipedia = "''${plugins}/Wikipedia";
+            Decide = ./supy-decide;
+          }
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = p: [];
+        defaultText = literalExpression "p: []";
+        description = lib.mdDoc ''
+          Extra Python packages available to supybot plugins. The
+          value must be a function which receives the attrset defined
+          in {var}`python3Packages` as the sole argument.
+        '';
+        example = literalExpression "p: [ p.lxml p.requests ]";
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.python3Packages.limnoria ];
+
+    users.users.supybot = {
+      uid = config.ids.uids.supybot;
+      group = "supybot";
+      description = "Supybot IRC bot user";
+      home = cfg.stateDir;
+      isSystemUser = true;
+    };
+
+    users.groups.supybot = {
+      gid = config.ids.gids.supybot;
+    };
+
+    systemd.services.supybot = {
+      description = "Supybot, an IRC bot";
+      documentation = [ "https://limnoria.readthedocs.io/" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        # This needs to be created afresh every time
+        rm -f '${cfg.stateDir}/supybot.cfg.bak'
+      '';
+
+      startLimitIntervalSec = 5 * 60;  # 5 min
+      startLimitBurst = 1;
+      serviceConfig = {
+        ExecStart = "${pyEnv}/bin/supybot ${cfg.stateDir}/supybot.cfg";
+        PIDFile = "/run/supybot.pid";
+        User = "supybot";
+        Group = "supybot";
+        UMask = "0007";
+        Restart = "on-abort";
+
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RemoveIPC = true;
+        ProtectHostname = true;
+        CapabilityBoundingSet = "";
+        ProtectSystem = "full";
+      }
+      // optionalAttrs isStateDirVar {
+        StateDirectory = "supybot";
+        ProtectSystem = "strict";
+      }
+      // optionalAttrs (!isStateDirHome) {
+        ProtectHome = true;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}'              0700 supybot supybot - -"
+      "d '${cfg.stateDir}/backup'       0750 supybot supybot - -"
+      "d '${cfg.stateDir}/conf'         0750 supybot supybot - -"
+      "d '${cfg.stateDir}/data'         0750 supybot supybot - -"
+      "d '${cfg.stateDir}/plugins'      0750 supybot supybot - -"
+      "d '${cfg.stateDir}/logs'         0750 supybot supybot - -"
+      "d '${cfg.stateDir}/logs/plugins' 0750 supybot supybot - -"
+      "d '${cfg.stateDir}/tmp'          0750 supybot supybot - -"
+      "d '${cfg.stateDir}/web'          0750 supybot supybot - -"
+      "L '${cfg.stateDir}/supybot.cfg'  -    -       -       - ${cfg.configFile}"
+    ]
+    ++ (flip mapAttrsToList cfg.plugins (name: dest:
+      "L+ '${cfg.stateDir}/plugins/${name}' - - - - ${dest}"
+    ));
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/syncplay.nix b/nixpkgs/nixos/modules/services/networking/syncplay.nix
new file mode 100644
index 000000000000..0a66d93bf153
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/syncplay.nix
@@ -0,0 +1,130 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.syncplay;
+
+  cmdArgs =
+    [ "--port" cfg.port ]
+    ++ optionals (cfg.salt != null) [ "--salt" cfg.salt ]
+    ++ optionals (cfg.certDir != null) [ "--tls" cfg.certDir ]
+    ++ cfg.extraArgs;
+
+in
+{
+  options = {
+    services.syncplay = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "If enabled, start the Syncplay server.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8999;
+        description = lib.mdDoc ''
+          TCP port to bind to.
+        '';
+      };
+
+      salt = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Salt to allow room operator passwords generated by this server
+          instance to still work when the server is restarted.  The salt will be
+          readable in the nix store and the processlist.  If this is not
+          intended use `saltFile` instead.  Mutually exclusive with
+          <option>services.syncplay.saltFile</option>.
+        '';
+      };
+
+      saltFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the file that contains the server salt.  This allows room
+          operator passwords generated by this server instance to still work
+          when the server is restarted.  `null`, the server doesn't load the
+          salt from a file.  Mutually exclusive with
+          <option>services.syncplay.salt</option>.
+        '';
+      };
+
+      certDir = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          TLS certificates directory to use for encryption. See
+          <https://github.com/Syncplay/syncplay/wiki/TLS-support>.
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional arguments to be passed to the service.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nobody";
+        description = lib.mdDoc ''
+          User to use when running Syncplay.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nogroup";
+        description = lib.mdDoc ''
+          Group to use when running Syncplay.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to the file that contains the server password. If
+          `null`, the server doesn't require a password.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.salt == null || cfg.saltFile == null;
+        message = "services.syncplay.salt and services.syncplay.saltFile are mutually exclusive.";
+      }
+    ];
+    systemd.services.syncplay = {
+      description = "Syncplay Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        LoadCredential = lib.optional (cfg.passwordFile != null) "password:${cfg.passwordFile}"
+          ++ lib.optional (cfg.saltFile != null) "salt:${cfg.saltFile}";
+      };
+
+      script = ''
+        ${lib.optionalString (cfg.passwordFile != null) ''
+          export SYNCPLAY_PASSWORD=$(cat "''${CREDENTIALS_DIRECTORY}/password")
+        ''}
+        ${lib.optionalString (cfg.saltFile != null) ''
+          export SYNCPLAY_SALT=$(cat "''${CREDENTIALS_DIRECTORY}/salt")
+        ''}
+        exec ${pkgs.syncplay-nogui}/bin/syncplay-server ${escapeShellArgs cmdArgs}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/syncthing-relay.nix b/nixpkgs/nixos/modules/services/networking/syncthing-relay.nix
new file mode 100644
index 000000000000..64c4e731b982
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/syncthing-relay.nix
@@ -0,0 +1,121 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.syncthing.relay;
+
+  dataDirectory = "/var/lib/syncthing-relay";
+
+  relayOptions =
+    [
+      "--keys=${dataDirectory}"
+      "--listen=${cfg.listenAddress}:${toString cfg.port}"
+      "--status-srv=${cfg.statusListenAddress}:${toString cfg.statusPort}"
+      "--provided-by=${escapeShellArg cfg.providedBy}"
+    ]
+    ++ optional (cfg.pools != null) "--pools=${escapeShellArg (concatStringsSep "," cfg.pools)}"
+    ++ optional (cfg.globalRateBps != null) "--global-rate=${toString cfg.globalRateBps}"
+    ++ optional (cfg.perSessionRateBps != null) "--per-session-rate=${toString cfg.perSessionRateBps}"
+    ++ cfg.extraOptions;
+in {
+  ###### interface
+
+  options.services.syncthing.relay = {
+    enable = mkEnableOption (lib.mdDoc "Syncthing relay service");
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "";
+      example = "1.2.3.4";
+      description = lib.mdDoc ''
+        Address to listen on for relay traffic.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 22067;
+      description = lib.mdDoc ''
+        Port to listen on for relay traffic. This port should be added to
+        `networking.firewall.allowedTCPPorts`.
+      '';
+    };
+
+    statusListenAddress = mkOption {
+      type = types.str;
+      default = "";
+      example = "1.2.3.4";
+      description = lib.mdDoc ''
+        Address to listen on for serving the relay status API.
+      '';
+    };
+
+    statusPort = mkOption {
+      type = types.port;
+      default = 22070;
+      description = lib.mdDoc ''
+        Port to listen on for serving the relay status API. This port should be
+        added to `networking.firewall.allowedTCPPorts`.
+      '';
+    };
+
+    pools = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = lib.mdDoc ''
+        Relay pools to join. If null, uses the default global pool.
+      '';
+    };
+
+    providedBy = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        Human-readable description of the provider of the relay (you).
+      '';
+    };
+
+    globalRateBps = mkOption {
+      type = types.nullOr types.ints.positive;
+      default = null;
+      description = lib.mdDoc ''
+        Global bandwidth rate limit in bytes per second.
+      '';
+    };
+
+    perSessionRateBps = mkOption {
+      type = types.nullOr types.ints.positive;
+      default = null;
+      description = lib.mdDoc ''
+        Per session bandwidth rate limit in bytes per second.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra command line arguments to pass to strelaysrv.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.syncthing-relay = {
+      description = "Syncthing relay service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = baseNameOf dataDirectory;
+
+        Restart = "on-failure";
+        ExecStart = "${pkgs.syncthing-relay}/bin/strelaysrv ${concatStringsSep " " relayOptions}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/syncthing.nix b/nixpkgs/nixos/modules/services/networking/syncthing.nix
new file mode 100644
index 000000000000..6d9af6141f12
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/syncthing.nix
@@ -0,0 +1,713 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.syncthing;
+  opt = options.services.syncthing;
+  defaultUser = "syncthing";
+  defaultGroup = defaultUser;
+  settingsFormat = pkgs.formats.json { };
+  cleanedConfig = converge (filterAttrsRecursive (_: v: v != null && v != {})) cfg.settings;
+
+  isUnixGui = (builtins.substring 0 1 cfg.guiAddress) == "/";
+
+  # Syncthing supports serving the GUI over Unix sockets. If that happens, the
+  # API is served over the Unix socket as well.  This function returns the correct
+  # curl arguments for the address portion of the curl command for both network
+  # and Unix socket addresses.
+  curlAddressArgs = path: if isUnixGui
+    # if cfg.guiAddress is a unix socket, tell curl explicitly about it
+    # note that the dot in front of `${path}` is the hostname, which is
+    # required.
+    then "--unix-socket ${cfg.guiAddress} http://.${path}"
+    # no adjustements are needed if cfg.guiAddress is a network address
+    else "${cfg.guiAddress}${path}"
+    ;
+
+  devices = mapAttrsToList (_: device: device // {
+    deviceID = device.id;
+  }) cfg.settings.devices;
+
+  folders = mapAttrsToList (_: folder: folder //
+    throwIf (folder?rescanInterval || folder?watch || folder?watchDelay) ''
+      The options services.syncthing.settings.folders.<name>.{rescanInterval,watch,watchDelay}
+      were removed. Please use, respectively, {rescanIntervalS,fsWatcherEnabled,fsWatcherDelayS} instead.
+    '' {
+    devices = map (device:
+      if builtins.isString device then
+        { deviceId = cfg.settings.devices.${device}.id; }
+      else
+        device
+    ) folder.devices;
+  }) (filterAttrs (_: folder:
+    folder.enable
+  ) cfg.settings.folders);
+
+  jq = "${pkgs.jq}/bin/jq";
+  updateConfig = pkgs.writers.writeBash "merge-syncthing-config" (''
+    set -efu
+
+    # be careful not to leak secrets in the filesystem or in process listings
+    umask 0077
+
+    curl() {
+        # get the api key by parsing the config.xml
+        while
+            ! ${pkgs.libxml2}/bin/xmllint \
+                --xpath 'string(configuration/gui/apikey)' \
+                ${cfg.configDir}/config.xml \
+                >"$RUNTIME_DIRECTORY/api_key"
+        do sleep 1; done
+        (printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers"
+        ${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \
+            --retry 1000 --retry-delay 1 --retry-all-errors \
+            "$@"
+    }
+  '' +
+
+  /* Syncthing's rest API for the folders and devices is almost identical.
+  Hence we iterate them using lib.pipe and generate shell commands for both at
+  the sime time. */
+  (lib.pipe {
+    # The attributes below are the only ones that are different for devices /
+    # folders.
+    devs = {
+      new_conf_IDs = map (v: v.id) devices;
+      GET_IdAttrName = "deviceID";
+      override = cfg.overrideDevices;
+      conf = devices;
+      baseAddress = curlAddressArgs "/rest/config/devices";
+    };
+    dirs = {
+      new_conf_IDs = map (v: v.id) folders;
+      GET_IdAttrName = "id";
+      override = cfg.overrideFolders;
+      conf = folders;
+      baseAddress = curlAddressArgs "/rest/config/folders";
+    };
+  } [
+    # Now for each of these attributes, write the curl commands that are
+    # identical to both folders and devices.
+    (mapAttrs (conf_type: s:
+      # We iterate the `conf` list now, and run a curl -X POST command for each, that
+      # should update that device/folder only.
+      lib.pipe s.conf [
+        # Quoting https://docs.syncthing.net/rest/config.html:
+        #
+        # > PUT takes an array and POST a single object. In both cases if a
+        # given folder/device already exists, it’s replaced, otherwise a new
+        # one is added.
+        #
+        # What's not documented, is that using PUT will remove objects that
+        # don't exist in the array given. That's why we use here `POST`, and
+        # only if s.override == true then we DELETE the relevant folders
+        # afterwards.
+        (map (new_cfg: ''
+          curl -d ${lib.escapeShellArg (builtins.toJSON new_cfg)} -X POST ${s.baseAddress}
+        ''))
+        (lib.concatStringsSep "\n")
+      ]
+      /* If we need to override devices/folders, we iterate all currently configured
+      IDs, via another `curl -X GET`, and we delete all IDs that are not part of
+      the Nix configured list of IDs
+      */
+      + lib.optionalString s.override ''
+        stale_${conf_type}_ids="$(curl -X GET ${s.baseAddress} | ${jq} \
+          --argjson new_ids ${lib.escapeShellArg (builtins.toJSON s.new_conf_IDs)} \
+          --raw-output \
+          '[.[].${s.GET_IdAttrName}] - $new_ids | .[]'
+        )"
+        for id in ''${stale_${conf_type}_ids}; do
+          curl -X DELETE ${s.baseAddress}/$id
+        done
+      ''
+    ))
+    builtins.attrValues
+    (lib.concatStringsSep "\n")
+  ]) +
+  /* Now we update the other settings defined in cleanedConfig which are not
+  "folders" or "devices". */
+  (lib.pipe cleanedConfig [
+    builtins.attrNames
+    (lib.subtractLists ["folders" "devices"])
+    (map (subOption: ''
+      curl -X PUT -d ${lib.escapeShellArg (builtins.toJSON cleanedConfig.${subOption})} ${curlAddressArgs "/rest/config/${subOption}"}
+    ''))
+    (lib.concatStringsSep "\n")
+  ]) + ''
+    # restart Syncthing if required
+    if curl ${curlAddressArgs "/rest/config/restart-required"} |
+       ${jq} -e .requiresRestart > /dev/null; then
+        curl -X POST ${curlAddressArgs "/rest/system/restart"}
+    fi
+  '');
+in {
+  ###### interface
+  options = {
+    services.syncthing = {
+
+      enable = mkEnableOption
+        (lib.mdDoc "Syncthing, a self-hosted open-source alternative to Dropbox and Bittorrent Sync");
+
+      cert = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = mdDoc ''
+          Path to the `cert.pem` file, which will be copied into Syncthing's
+          [configDir](#opt-services.syncthing.configDir).
+        '';
+      };
+
+      key = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = mdDoc ''
+          Path to the `key.pem` file, which will be copied into Syncthing's
+          [configDir](#opt-services.syncthing.configDir).
+        '';
+      };
+
+      overrideDevices = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Whether to delete the devices which are not configured via the
+          [devices](#opt-services.syncthing.settings.devices) option.
+          If set to `false`, devices added via the web
+          interface will persist and will have to be deleted manually.
+        '';
+      };
+
+      overrideFolders = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Whether to delete the folders which are not configured via the
+          [folders](#opt-services.syncthing.settings.folders) option.
+          If set to `false`, folders added via the web
+          interface will persist and will have to be deleted manually.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+          options = {
+            # global options
+            options = mkOption {
+              default = {};
+              description = mdDoc ''
+                The options element contains all other global configuration options
+              '';
+              type = types.submodule ({ name, ... }: {
+                freeformType = settingsFormat.type;
+                options = {
+                  localAnnounceEnabled = mkOption {
+                    type = types.nullOr types.bool;
+                    default = null;
+                    description = lib.mdDoc ''
+                      Whether to send announcements to the local LAN, also use such announcements to find other devices.
+                    '';
+                  };
+
+                  localAnnouncePort = mkOption {
+                    type = types.nullOr types.int;
+                    default = null;
+                    description = lib.mdDoc ''
+                      The port on which to listen and send IPv4 broadcast announcements to.
+                    '';
+                  };
+
+                  relaysEnabled = mkOption {
+                    type = types.nullOr types.bool;
+                    default = null;
+                    description = lib.mdDoc ''
+                      When true, relays will be connected to and potentially used for device to device connections.
+                    '';
+                  };
+
+                  urAccepted = mkOption {
+                    type = types.nullOr types.int;
+                    default = null;
+                    description = lib.mdDoc ''
+                      Whether the user has accepted to submit anonymous usage data.
+                      The default, 0, mean the user has not made a choice, and Syncthing will ask at some point in the future.
+                      "-1" means no, a number above zero means that that version of usage reporting has been accepted.
+                    '';
+                  };
+
+                  limitBandwidthInLan = mkOption {
+                    type = types.nullOr types.bool;
+                    default = null;
+                    description = lib.mdDoc ''
+                      Whether to apply bandwidth limits to devices in the same broadcast domain as the local device.
+                    '';
+                  };
+
+                  maxFolderConcurrency = mkOption {
+                    type = types.nullOr types.int;
+                    default = null;
+                    description = lib.mdDoc ''
+                      This option controls how many folders may concurrently be in I/O-intensive operations such as syncing or scanning.
+                      The mechanism is described in detail in a [separate chapter](https://docs.syncthing.net/advanced/option-max-concurrency.html).
+                    '';
+                  };
+                };
+              });
+            };
+
+            # device settings
+            devices = mkOption {
+              default = {};
+              description = mdDoc ''
+                Peers/devices which Syncthing should communicate with.
+
+                Note that you can still add devices manually, but those changes
+                will be reverted on restart if [overrideDevices](#opt-services.syncthing.overrideDevices)
+                is enabled.
+              '';
+              example = {
+                bigbox = {
+                  id = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU";
+                  addresses = [ "tcp://192.168.0.10:51820" ];
+                };
+              };
+              type = types.attrsOf (types.submodule ({ name, ... }: {
+                freeformType = settingsFormat.type;
+                options = {
+
+                  name = mkOption {
+                    type = types.str;
+                    default = name;
+                    description = lib.mdDoc ''
+                      The name of the device.
+                    '';
+                  };
+
+                  id = mkOption {
+                    type = types.str;
+                    description = mdDoc ''
+                      The device ID. See <https://docs.syncthing.net/dev/device-ids.html>.
+                    '';
+                  };
+
+                  autoAcceptFolders = mkOption {
+                    type = types.bool;
+                    default = false;
+                    description = mdDoc ''
+                      Automatically create or share folders that this device advertises at the default path.
+                      See <https://docs.syncthing.net/users/config.html?highlight=autoaccept#config-file-format>.
+                    '';
+                  };
+
+                };
+              }));
+            };
+
+            # folder settings
+            folders = mkOption {
+              default = {};
+              description = mdDoc ''
+                Folders which should be shared by Syncthing.
+
+                Note that you can still add folders manually, but those changes
+                will be reverted on restart if [overrideFolders](#opt-services.syncthing.overrideFolders)
+                is enabled.
+              '';
+              example = literalExpression ''
+                {
+                  "/home/user/sync" = {
+                    id = "syncme";
+                    devices = [ "bigbox" ];
+                  };
+                }
+              '';
+              type = types.attrsOf (types.submodule ({ name, ... }: {
+                freeformType = settingsFormat.type;
+                options = {
+
+                  enable = mkOption {
+                    type = types.bool;
+                    default = true;
+                    description = lib.mdDoc ''
+                      Whether to share this folder.
+                      This option is useful when you want to define all folders
+                      in one place, but not every machine should share all folders.
+                    '';
+                  };
+
+                  path = mkOption {
+                    # TODO for release 23.05: allow relative paths again and set
+                    # working directory to cfg.dataDir
+                    type = types.str // {
+                      check = x: types.str.check x && (substring 0 1 x == "/" || substring 0 2 x == "~/");
+                      description = types.str.description + " starting with / or ~/";
+                    };
+                    default = name;
+                    description = lib.mdDoc ''
+                      The path to the folder which should be shared.
+                      Only absolute paths (starting with `/`) and paths relative to
+                      the [user](#opt-services.syncthing.user)'s home directory
+                      (starting with `~/`) are allowed.
+                    '';
+                  };
+
+                  id = mkOption {
+                    type = types.str;
+                    default = name;
+                    description = lib.mdDoc ''
+                      The ID of the folder. Must be the same on all devices.
+                    '';
+                  };
+
+                  label = mkOption {
+                    type = types.str;
+                    default = name;
+                    description = lib.mdDoc ''
+                      The label of the folder.
+                    '';
+                  };
+
+                  devices = mkOption {
+                    type = types.listOf types.str;
+                    default = [];
+                    description = mdDoc ''
+                      The devices this folder should be shared with. Each device must
+                      be defined in the [devices](#opt-services.syncthing.settings.devices) option.
+                    '';
+                  };
+
+                  versioning = mkOption {
+                    default = null;
+                    description = mdDoc ''
+                      How to keep changed/deleted files with Syncthing.
+                      There are 4 different types of versioning with different parameters.
+                      See <https://docs.syncthing.net/users/versioning.html>.
+                    '';
+                    example = literalExpression ''
+                      [
+                        {
+                          versioning = {
+                            type = "simple";
+                            params.keep = "10";
+                          };
+                        }
+                        {
+                          versioning = {
+                            type = "trashcan";
+                            params.cleanoutDays = "1000";
+                          };
+                        }
+                        {
+                          versioning = {
+                            type = "staggered";
+                            fsPath = "/syncthing/backup";
+                            params = {
+                              cleanInterval = "3600";
+                              maxAge = "31536000";
+                            };
+                          };
+                        }
+                        {
+                          versioning = {
+                            type = "external";
+                            params.versionsPath = pkgs.writers.writeBash "backup" '''
+                              folderpath="$1"
+                              filepath="$2"
+                              rm -rf "$folderpath/$filepath"
+                            ''';
+                          };
+                        }
+                      ]
+                    '';
+                    type = with types; nullOr (submodule {
+                      freeformType = settingsFormat.type;
+                      options = {
+                        type = mkOption {
+                          type = enum [ "external" "simple" "staggered" "trashcan" ];
+                          description = mdDoc ''
+                            The type of versioning.
+                            See <https://docs.syncthing.net/users/versioning.html>.
+                          '';
+                        };
+                      };
+                    });
+                  };
+
+                  copyOwnershipFromParent = mkOption {
+                    type = types.bool;
+                    default = false;
+                    description = mdDoc ''
+                      On Unix systems, tries to copy file/folder ownership from the parent directory (the directory it’s located in).
+                      Requires running Syncthing as a privileged user, or granting it additional capabilities (e.g. CAP_CHOWN on Linux).
+                    '';
+                  };
+                };
+              }));
+            };
+
+          };
+        };
+        default = {};
+        description = mdDoc ''
+          Extra configuration options for Syncthing.
+          See <https://docs.syncthing.net/users/config.html>.
+          Note that this attribute set does not exactly match the documented
+          xml format. Instead, this is the format of the json rest api. There
+          are slight differences. For example, this xml:
+          ```xml
+          <options>
+            <listenAddress>default</listenAddress>
+            <minHomeDiskFree unit="%">1</minHomeDiskFree>
+          </options>
+          ```
+          corresponds to the json:
+          ```json
+          {
+            options: {
+              listenAddresses = [
+                "default"
+              ];
+              minHomeDiskFree = {
+                unit = "%";
+                value = 1;
+              };
+            };
+          }
+          ```
+        '';
+        example = {
+          options.localAnnounceEnabled = false;
+          gui.theme = "black";
+        };
+      };
+
+      guiAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1:8384";
+        description = lib.mdDoc ''
+          The address to serve the web interface at.
+        '';
+      };
+
+      systemService = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to auto-launch Syncthing as a system service.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        example = "yourUser";
+        description = mdDoc ''
+          The user to run Syncthing as.
+          By default, a user named `${defaultUser}` will be created whose home
+          directory is [dataDir](#opt-services.syncthing.dataDir).
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = defaultGroup;
+        example = "yourGroup";
+        description = mdDoc ''
+          The group to run Syncthing under.
+          By default, a group named `${defaultGroup}` will be created.
+        '';
+      };
+
+      all_proxy = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "socks5://address.com:1234";
+        description = mdDoc ''
+          Overwrites the all_proxy environment variable for the Syncthing process to
+          the given value. This is normally used to let Syncthing connect
+          through a SOCKS5 proxy server.
+          See <https://docs.syncthing.net/users/proxying.html>.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/syncthing";
+        example = "/home/yourUser";
+        description = lib.mdDoc ''
+          The path where synchronised directories will exist.
+        '';
+      };
+
+      configDir = let
+        cond = versionAtLeast config.system.stateVersion "19.03";
+      in mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The path where the settings and keys will exist.
+        '';
+        default = cfg.dataDir + optionalString cond "/.config/syncthing";
+        defaultText = literalMD ''
+          * if `stateVersion >= 19.03`:
+
+                config.${opt.dataDir} + "/.config/syncthing"
+          * otherwise:
+
+                config.${opt.dataDir}
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--reset-deltas" ];
+        description = lib.mdDoc ''
+          Extra flags passed to the syncthing command in the service definition.
+        '';
+      };
+
+      openDefaultPorts = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Whether to open the default ports in the firewall: TCP/UDP 22000 for transfers
+          and UDP 21027 for discovery.
+
+          If multiple users are running Syncthing on this machine, you will need
+          to manually open a set of ports for each instance and leave this disabled.
+          Alternatively, if you are running only a single instance on this machine
+          using the default ports, enable this.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.syncthing;
+        defaultText = literalExpression "pkgs.syncthing";
+        description = lib.mdDoc ''
+          The Syncthing package to use.
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "syncthing" "useInotify" ] ''
+      This option was removed because Syncthing now has the inotify functionality included under the name "fswatcher".
+      It can be enabled on a per-folder basis through the web interface.
+    '')
+    (mkRenamedOptionModule [ "services" "syncthing" "extraOptions" ] [ "services" "syncthing" "settings" ])
+    (mkRenamedOptionModule [ "services" "syncthing" "folders" ] [ "services" "syncthing" "settings" "folders" ])
+    (mkRenamedOptionModule [ "services" "syncthing" "devices" ] [ "services" "syncthing" "settings" "devices" ])
+    (mkRenamedOptionModule [ "services" "syncthing" "options" ] [ "services" "syncthing" "settings" "options" ])
+  ] ++ map (o:
+    mkRenamedOptionModule [ "services" "syncthing" "declarative" o ] [ "services" "syncthing" o ]
+  ) [ "cert" "key" "devices" "folders" "overrideDevices" "overrideFolders" "extraOptions"];
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    networking.firewall = mkIf cfg.openDefaultPorts {
+      allowedTCPPorts = [ 22000 ];
+      allowedUDPPorts = [ 21027 22000 ];
+    };
+
+    systemd.packages = [ pkgs.syncthing ];
+
+    users.users = mkIf (cfg.systemService && cfg.user == defaultUser) {
+      ${defaultUser} =
+        { group = cfg.group;
+          home  = cfg.dataDir;
+          createHome = true;
+          uid = config.ids.uids.syncthing;
+          description = "Syncthing daemon user";
+        };
+    };
+
+    users.groups = mkIf (cfg.systemService && cfg.group == defaultGroup) {
+      ${defaultGroup}.gid =
+        config.ids.gids.syncthing;
+    };
+
+    systemd.services = {
+      # upstream reference:
+      # https://github.com/syncthing/syncthing/blob/main/etc/linux-systemd/system/syncthing%40.service
+      syncthing = mkIf cfg.systemService {
+        description = "Syncthing service";
+        after = [ "network.target" ];
+        environment = {
+          STNORESTART = "yes";
+          STNOUPGRADE = "yes";
+          inherit (cfg) all_proxy;
+        } // config.networking.proxy.envVars;
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          Restart = "on-failure";
+          SuccessExitStatus = "3 4";
+          RestartForceExitStatus="3 4";
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStartPre = mkIf (cfg.cert != null || cfg.key != null)
+            "+${pkgs.writers.writeBash "syncthing-copy-keys" ''
+              install -dm700 -o ${cfg.user} -g ${cfg.group} ${cfg.configDir}
+              ${optionalString (cfg.cert != null) ''
+                install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.cert} ${cfg.configDir}/cert.pem
+              ''}
+              ${optionalString (cfg.key != null) ''
+                install -Dm400 -o ${cfg.user} -g ${cfg.group} ${toString cfg.key} ${cfg.configDir}/key.pem
+              ''}
+            ''}"
+          ;
+          ExecStart = ''
+            ${cfg.package}/bin/syncthing \
+              -no-browser \
+              -gui-address=${if isUnixGui then "unix://" else ""}${cfg.guiAddress} \
+              -config=${cfg.configDir} \
+              -data=${cfg.dataDir} \
+              ${escapeShellArgs cfg.extraFlags}
+          '';
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateMounts = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProtectControlGroups = true;
+          ProtectHostname = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          CapabilityBoundingSet = [
+            "~CAP_SYS_PTRACE" "~CAP_SYS_ADMIN"
+            "~CAP_SETGID" "~CAP_SETUID" "~CAP_SETPCAP"
+            "~CAP_SYS_TIME" "~CAP_KILL"
+          ];
+        };
+      };
+      syncthing-init = mkIf (cleanedConfig != {}) {
+        description = "Syncthing configuration updater";
+        requisite = [ "syncthing.service" ];
+        after = [ "syncthing.service" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          User = cfg.user;
+          RemainAfterExit = true;
+          RuntimeDirectory = "syncthing-init";
+          Type = "oneshot";
+          ExecStart = updateConfig;
+        };
+      };
+
+      syncthing-resume = {
+        wantedBy = [ "suspend.target" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tailscale.nix b/nixpkgs/nixos/modules/services/networking/tailscale.nix
new file mode 100644
index 000000000000..a5d171e0baab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tailscale.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.tailscale;
+  isNetworkd = config.networking.useNetworkd;
+in {
+  meta.maintainers = with maintainers; [ danderson mbaillie twitchyliquid64 mfrw ];
+
+  options.services.tailscale = {
+    enable = mkEnableOption (lib.mdDoc "Tailscale client daemon");
+
+    port = mkOption {
+      type = types.port;
+      default = 41641;
+      description = lib.mdDoc "The port to listen on for tunnel traffic (0=autoselect).";
+    };
+
+    interfaceName = mkOption {
+      type = types.str;
+      default = "tailscale0";
+      description = lib.mdDoc ''The interface name for tunnel traffic. Use "userspace-networking" (beta) to not use TUN.'';
+    };
+
+    permitCertUid = mkOption {
+      type = types.nullOr types.nonEmptyStr;
+      default = null;
+      description = lib.mdDoc "Username or user ID of the user allowed to to fetch Tailscale TLS certificates for the node.";
+    };
+
+    package = lib.mkPackageOptionMD pkgs "tailscale" {};
+
+    openFirewall = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc "Whether to open the firewall for the specified port.";
+    };
+
+    useRoutingFeatures = mkOption {
+      type = types.enum [ "none" "client" "server" "both" ];
+      default = "none";
+      example = "server";
+      description = lib.mdDoc ''
+        Enables settings required for Tailscale's routing features like subnet routers and exit nodes.
+
+        To use these these features, you will still need to call `sudo tailscale up` with the relevant flags like `--advertise-exit-node` and `--exit-node`.
+
+        When set to `client` or `both`, reverse path filtering will be set to loose instead of strict.
+        When set to `server` or `both`, IP forwarding will be enabled.
+      '';
+    };
+
+    authKeyFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/secrets/tailscale_key";
+      description = lib.mdDoc ''
+        A file containing the auth key.
+      '';
+    };
+
+    extraUpFlags = mkOption {
+      description = lib.mdDoc "Extra flags to pass to {command}`tailscale up`.";
+      type = types.listOf types.str;
+      default = [];
+      example = ["--ssh"];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ]; # for the CLI
+    systemd.packages = [ cfg.package ];
+    systemd.services.tailscaled = {
+      wantedBy = [ "multi-user.target" ];
+      path = [
+        config.networking.resolvconf.package # for configuring DNS in some configs
+        pkgs.procps     # for collecting running services (opt-in feature)
+        pkgs.getent     # for `getent` to look up user shells
+        pkgs.kmod       # required to pass tailscale's v6nat check
+      ];
+      serviceConfig.Environment = [
+        "PORT=${toString cfg.port}"
+        ''"FLAGS=--tun ${lib.escapeShellArg cfg.interfaceName}"''
+      ] ++ (lib.optionals (cfg.permitCertUid != null) [
+        "TS_PERMIT_CERT_UID=${cfg.permitCertUid}"
+      ]);
+      # Restart tailscaled with a single `systemctl restart` at the
+      # end of activation, rather than a `stop` followed by a later
+      # `start`. Activation over Tailscale can hang for tens of
+      # seconds in the stop+start setup, if the activation script has
+      # a significant delay between the stop and start phases
+      # (e.g. script blocked on another unit with a slow shutdown).
+      #
+      # Tailscale is aware of the correctness tradeoff involved, and
+      # already makes its upstream systemd unit robust against unit
+      # version mismatches on restart for compatibility with other
+      # linux distros.
+      stopIfChanged = false;
+    };
+
+    systemd.services.tailscaled-autoconnect = mkIf (cfg.authKeyFile != null) {
+      after = ["tailscale.service"];
+      wants = ["tailscale.service"];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+      };
+      script = ''
+        status=$(${config.systemd.package}/bin/systemctl show -P StatusText tailscaled.service)
+        if [[ $status != Connected* ]]; then
+          ${cfg.package}/bin/tailscale up --auth-key 'file:${cfg.authKeyFile}' ${escapeShellArgs cfg.extraUpFlags}
+        fi
+      '';
+    };
+
+    boot.kernel.sysctl = mkIf (cfg.useRoutingFeatures == "server" || cfg.useRoutingFeatures == "both") {
+      "net.ipv4.conf.all.forwarding" = mkOverride 97 true;
+      "net.ipv6.conf.all.forwarding" = mkOverride 97 true;
+    };
+
+    networking.firewall.allowedUDPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    networking.firewall.checkReversePath = mkIf (cfg.useRoutingFeatures == "client" || cfg.useRoutingFeatures == "both") "loose";
+
+    networking.dhcpcd.denyInterfaces = [ cfg.interfaceName ];
+
+    systemd.network.networks."50-tailscale" = mkIf isNetworkd {
+      matchConfig = {
+        Name = cfg.interfaceName;
+      };
+      linkConfig = {
+        Unmanaged = true;
+        ActivationPolicy = "manual";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tayga.nix b/nixpkgs/nixos/modules/services/networking/tayga.nix
new file mode 100644
index 000000000000..299ae2777f7c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tayga.nix
@@ -0,0 +1,195 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.tayga;
+
+  # Converts an address set to a string
+  strAddr = addr: "${addr.address}/${toString addr.prefixLength}";
+
+  configFile = pkgs.writeText "tayga.conf" ''
+    tun-device ${cfg.tunDevice}
+
+    ipv4-addr ${cfg.ipv4.address}
+    ${optionalString (cfg.ipv6.address != null) "ipv6-addr ${cfg.ipv6.address}"}
+
+    prefix ${strAddr cfg.ipv6.pool}
+    dynamic-pool ${strAddr cfg.ipv4.pool}
+    data-dir ${cfg.dataDir}
+  '';
+
+  addrOpts = v:
+    assert v == 4 || v == 6;
+    {
+      options = {
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc "IPv${toString v} address.";
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+          description = lib.mdDoc ''
+            Subnet mask of the interface, specified as the number of
+            bits in the prefix ("${if v == 4 then "24" else "64"}").
+          '';
+        };
+      };
+    };
+
+  versionOpts = v: {
+    options = {
+      router = {
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc "The IPv${toString v} address of the router.";
+        };
+      };
+
+      address = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "The source IPv${toString v} address of the TAYGA server.";
+      };
+
+      pool = mkOption {
+        type = with types; nullOr (submodule (addrOpts v));
+        description = lib.mdDoc "The pool of IPv${toString v} addresses which are used for translation.";
+      };
+    };
+  };
+in
+{
+  options = {
+    services.tayga = {
+      enable = mkEnableOption (lib.mdDoc "Tayga");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.tayga;
+        defaultText = lib.literalMD "pkgs.tayga";
+        description = lib.mdDoc "This option specifies the TAYGA package to use.";
+      };
+
+      ipv4 = mkOption {
+        type = types.submodule (versionOpts 4);
+        description = lib.mdDoc "IPv4-specific configuration.";
+        example = literalExpression ''
+          {
+            address = "192.0.2.0";
+            router = {
+              address = "192.0.2.1";
+            };
+            pool = {
+              address = "192.0.2.1";
+              prefixLength = 24;
+            };
+          }
+        '';
+      };
+
+      ipv6 = mkOption {
+        type = types.submodule (versionOpts 6);
+        description = lib.mdDoc "IPv6-specific configuration.";
+        example = literalExpression ''
+          {
+            address = "2001:db8::1";
+            router = {
+              address = "64:ff9b::1";
+            };
+            pool = {
+              address = "64:ff9b::";
+              prefixLength = 96;
+            };
+          }
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/tayga";
+        description = lib.mdDoc "Directory for persistent data";
+      };
+
+      tunDevice = mkOption {
+        type = types.str;
+        default = "nat64";
+        description = lib.mdDoc "Name of the nat64 tun device";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.interfaces."${cfg.tunDevice}" = {
+      virtual = true;
+      virtualType = "tun";
+      virtualOwner = mkIf config.networking.useNetworkd "";
+      ipv4 = {
+        addresses = [
+          { address = cfg.ipv4.router.address; prefixLength = 32; }
+        ];
+        routes = [
+          cfg.ipv4.pool
+        ];
+      };
+      ipv6 = {
+        addresses = [
+          { address = cfg.ipv6.router.address; prefixLength = 128; }
+        ];
+        routes = [
+          cfg.ipv6.pool
+        ];
+      };
+    };
+
+    systemd.services.tayga = {
+      description = "Stateless NAT64 implementation";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/tayga -d --nodetach --config ${configFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+        Restart = "always";
+
+        # Hardening Score:
+        #  - nixos-scripts: 2.1
+        #  - systemd-networkd: 1.6
+        ProtectHome = true;
+        SystemCallFilter = [
+          "@network-io"
+          "@system-service"
+          "~@privileged"
+          "~@resources"
+        ];
+        ProtectKernelLogs = true;
+        AmbientCapabilities = [
+          "CAP_NET_ADMIN"
+        ];
+        CapabilityBoundingSet = "";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_NETLINK"
+        ];
+        StateDirectory = "tayga";
+        DynamicUser = mkIf config.networking.useNetworkd true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        ProtectHostname = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictNamespaces = true;
+        NoNewPrivileges = true;
+        ProtectControlGroups = true;
+        SystemCallArchitectures = "native";
+        PrivateTmp = true;
+        LockPersonality = true;
+        ProtectSystem = true;
+        PrivateUsers = true;
+        ProtectProc = "invisible";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tcpcrypt.nix b/nixpkgs/nixos/modules/services/networking/tcpcrypt.nix
new file mode 100644
index 000000000000..f2115a6660cb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tcpcrypt.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.tcpcrypt;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.tcpcrypt.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable opportunistic TCP encryption. If the other end
+        speaks Tcpcrypt, then your traffic will be encrypted; otherwise
+        it will be sent in clear text. Thus, Tcpcrypt alone provides no
+        guarantees -- it is best effort. If, however, a Tcpcrypt
+        connection is successful and any attackers that exist are
+        passive, then Tcpcrypt guarantees privacy.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.tcpcryptd = {
+      uid = config.ids.uids.tcpcryptd;
+      description = "tcpcrypt daemon user";
+    };
+
+    systemd.services.tcpcrypt = {
+      description = "tcpcrypt";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      path = [ pkgs.iptables pkgs.tcpcrypt pkgs.procps ];
+
+      preStart = ''
+        mkdir -p /run/tcpcryptd
+        chown tcpcryptd /run/tcpcryptd
+        sysctl -n net.ipv4.tcp_ecn > /run/tcpcryptd/pre-tcpcrypt-ecn-state
+        sysctl -w net.ipv4.tcp_ecn=0
+
+        iptables -t raw -N nixos-tcpcrypt
+        iptables -t raw -A nixos-tcpcrypt -p tcp -m mark --mark 0x0/0x10 -j NFQUEUE --queue-num 666
+        iptables -t raw -I PREROUTING -j nixos-tcpcrypt
+
+        iptables -t mangle -N nixos-tcpcrypt
+        iptables -t mangle -A nixos-tcpcrypt -p tcp -m mark --mark 0x0/0x10 -j NFQUEUE --queue-num 666
+        iptables -t mangle -I POSTROUTING -j nixos-tcpcrypt
+      '';
+
+      script = "tcpcryptd -x 0x10";
+
+      postStop = ''
+        if [ -f /run/tcpcryptd/pre-tcpcrypt-ecn-state ]; then
+          sysctl -w net.ipv4.tcp_ecn=$(cat /run/tcpcryptd/pre-tcpcrypt-ecn-state)
+        fi
+
+        iptables -t mangle -D POSTROUTING -j nixos-tcpcrypt || true
+        iptables -t raw -D PREROUTING -j nixos-tcpcrypt || true
+
+        iptables -t raw -F nixos-tcpcrypt || true
+        iptables -t raw -X nixos-tcpcrypt || true
+
+        iptables -t mangle -F nixos-tcpcrypt || true
+        iptables -t mangle -X nixos-tcpcrypt || true
+      '';
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/teamspeak3.nix b/nixpkgs/nixos/modules/services/networking/teamspeak3.nix
new file mode 100644
index 000000000000..f09ef1a959ed
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/teamspeak3.nix
@@ -0,0 +1,161 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  ts3 = pkgs.teamspeak_server;
+  cfg = config.services.teamspeak3;
+  user = "teamspeak";
+  group = "teamspeak";
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.teamspeak3 = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run the Teamspeak3 voice communication server daemon.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/teamspeak3-server";
+        description = lib.mdDoc ''
+          Directory to store TS3 database and other state/data files.
+        '';
+      };
+
+      logPath = mkOption {
+        type = types.path;
+        default = "/var/log/teamspeak3-server/";
+        description = lib.mdDoc ''
+          Directory to store log files in.
+        '';
+      };
+
+      voiceIP = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "[::]";
+        description = lib.mdDoc ''
+          IP on which the server instance will listen for incoming voice connections. Defaults to any IP.
+        '';
+      };
+
+      defaultVoicePort = mkOption {
+        type = types.int;
+        default = 9987;
+        description = lib.mdDoc ''
+          Default UDP port for clients to connect to virtual servers - used for first virtual server, subsequent ones will open on incrementing port numbers by default.
+        '';
+      };
+
+      fileTransferIP = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "[::]";
+        description = lib.mdDoc ''
+          IP on which the server instance will listen for incoming file transfer connections. Defaults to any IP.
+        '';
+      };
+
+      fileTransferPort = mkOption {
+        type = types.int;
+        default = 30033;
+        description = lib.mdDoc ''
+          TCP port opened for file transfers.
+        '';
+      };
+
+      queryIP = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.0.0.0";
+        description = lib.mdDoc ''
+          IP on which the server instance will listen for incoming ServerQuery connections. Defaults to any IP.
+        '';
+      };
+
+      queryPort = mkOption {
+        type = types.int;
+        default = 10011;
+        description = lib.mdDoc ''
+          TCP port opened for ServerQuery connections.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the TeamSpeak3 server.";
+      };
+
+      openFirewallServerQuery = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Open ports in the firewall for the TeamSpeak3 serverquery (administration) system. Requires openFirewall.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    users.users.teamspeak = {
+      description = "Teamspeak3 voice communication server daemon";
+      group = group;
+      uid = config.ids.uids.teamspeak;
+      home = cfg.dataDir;
+      createHome = true;
+    };
+
+    users.groups.teamspeak = {
+      gid = config.ids.gids.teamspeak;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.logPath}' - ${user} ${group} - -"
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.fileTransferPort ] ++ optionals (cfg.openFirewallServerQuery) [ cfg.queryPort (cfg.queryPort + 11) ];
+      # subsequent vServers will use the incremented voice port, let's just open the next 10
+      allowedUDPPortRanges = [ { from = cfg.defaultVoicePort; to = cfg.defaultVoicePort + 10; } ];
+    };
+
+    systemd.services.teamspeak3-server = {
+      description = "Teamspeak3 voice communication server daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = ''
+          ${ts3}/bin/ts3server \
+            dbsqlpath=${ts3}/lib/teamspeak/sql/ logpath=${cfg.logPath} \
+            ${optionalString (cfg.voiceIP != null) "voice_ip=${cfg.voiceIP}"} \
+            default_voice_port=${toString cfg.defaultVoicePort} \
+            ${optionalString (cfg.fileTransferIP != null) "filetransfer_ip=${cfg.fileTransferIP}"} \
+            filetransfer_port=${toString cfg.fileTransferPort} \
+            ${optionalString (cfg.queryIP != null) "query_ip=${cfg.queryIP}"} \
+            query_port=${toString cfg.queryPort} license_accepted=1
+        '';
+        WorkingDirectory = cfg.dataDir;
+        User = user;
+        Group = group;
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ arobyn ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/teleport.nix b/nixpkgs/nixos/modules/services/networking/teleport.nix
new file mode 100644
index 000000000000..399af711c0e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/teleport.nix
@@ -0,0 +1,107 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.teleport;
+  settingsYaml = pkgs.formats.yaml { };
+in
+{
+  options = {
+    services.teleport = with lib.types; {
+      enable = mkEnableOption (lib.mdDoc "the Teleport service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.teleport;
+        defaultText = lib.literalMD "pkgs.teleport";
+        example = lib.literalMD "pkgs.teleport_11";
+        description = lib.mdDoc "The teleport package to use";
+      };
+
+      settings = mkOption {
+        type = settingsYaml.type;
+        default = { };
+        example = literalExpression ''
+          {
+            teleport = {
+              nodename = "client";
+              advertise_ip = "192.168.1.2";
+              auth_token = "60bdc117-8ff4-478d-95e4-9914597847eb";
+              auth_servers = [ "192.168.1.1:3025" ];
+              log.severity = "DEBUG";
+            };
+            ssh_service = {
+              enabled = true;
+              labels = {
+                role = "client";
+              };
+            };
+            proxy_service.enabled = false;
+            auth_service.enabled = false;
+          }
+        '';
+        description = lib.mdDoc ''
+          Contents of the `teleport.yaml` config file.
+          The `--config` arguments will only be passed if this set is not empty.
+
+          See <https://goteleport.com/docs/setup/reference/config/>.
+        '';
+      };
+
+      insecure.enable = mkEnableOption (lib.mdDoc ''
+        starting teleport in insecure mode.
+
+        This is dangerous!
+        Sensitive information will be logged to console and certificates will not be verified.
+        Proceed with caution!
+
+        Teleport starts with disabled certificate validation on Proxy Service, validation still occurs on Auth Service
+      '');
+
+      diag = {
+        enable = mkEnableOption (lib.mdDoc ''
+          endpoints for monitoring purposes.
+
+          See <https://goteleport.com/docs/setup/admin/troubleshooting/#troubleshooting/>
+        '');
+
+        addr = mkOption {
+          type = str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "Metrics and diagnostics address.";
+        };
+
+        port = mkOption {
+          type = port;
+          default = 3000;
+          description = lib.mdDoc "Metrics and diagnostics port.";
+        };
+      };
+    };
+  };
+
+  config = mkIf config.services.teleport.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.teleport = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/teleport start \
+            ${optionalString cfg.insecure.enable "--insecure"} \
+            ${optionalString cfg.diag.enable "--diag-addr=${cfg.diag.addr}:${toString cfg.diag.port}"} \
+            ${optionalString (cfg.settings != { }) "--config=${settingsYaml.generate "teleport.yaml" cfg.settings}"}
+        '';
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        LimitNOFILE = 65536;
+        Restart = "always";
+        RestartSec = "5s";
+        RuntimeDirectory = "teleport";
+        Type = "simple";
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/tetrd.nix b/nixpkgs/nixos/modules/services/networking/tetrd.nix
new file mode 100644
index 000000000000..6284a5b1fb1b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tetrd.nix
@@ -0,0 +1,96 @@
+{ config, lib, pkgs, ... }:
+
+{
+  options.services.tetrd.enable = lib.mkEnableOption (lib.mdDoc "tetrd");
+
+  config = lib.mkIf config.services.tetrd.enable {
+    environment = {
+      systemPackages = [ pkgs.tetrd ];
+      etc."resolv.conf".source = "/etc/tetrd/resolv.conf";
+    };
+
+    systemd = {
+      tmpfiles.rules = [ "f /etc/tetrd/resolv.conf - - -" ];
+
+      services.tetrd = {
+        description = pkgs.tetrd.meta.description;
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          ExecStart = "${pkgs.tetrd}/opt/Tetrd/bin/tetrd";
+          Restart = "always";
+          RuntimeDirectory = "tetrd";
+          RootDirectory = "/run/tetrd";
+          DynamicUser = true;
+          UMask = "006";
+          DeviceAllow = "usb_device";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateMounts = true;
+          PrivateNetwork = lib.mkDefault false;
+          PrivateTmp = true;
+          PrivateUsers = lib.mkDefault false;
+          ProtectClock = lib.mkDefault false;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+
+          SystemCallFilter = [
+            "@system-service"
+            "~@aio"
+            "~@chown"
+            "~@clock"
+            "~@cpu-emulation"
+            "~@debug"
+            "~@keyring"
+            "~@memlock"
+            "~@module"
+            "~@mount"
+            "~@obsolete"
+            "~@pkey"
+            "~@raw-io"
+            "~@reboot"
+            "~@swap"
+            "~@sync"
+          ];
+
+          BindReadOnlyPaths = [
+            builtins.storeDir
+            "/etc/ssl"
+            "/etc/static/ssl"
+            "${pkgs.nettools}/bin/route:/usr/bin/route"
+            "${pkgs.nettools}/bin/ifconfig:/usr/bin/ifconfig"
+          ];
+
+          BindPaths = [
+            "/etc/tetrd/resolv.conf:/etc/resolv.conf"
+            "/run"
+            "/var/log"
+          ];
+
+          CapabilityBoundingSet = [
+            "CAP_DAC_OVERRIDE"
+            "CAP_NET_ADMIN"
+          ];
+
+          AmbientCapabilities = [
+            "CAP_DAC_OVERRIDE"
+            "CAP_NET_ADMIN"
+          ];
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tftpd.nix b/nixpkgs/nixos/modules/services/networking/tftpd.nix
new file mode 100644
index 000000000000..a4dc137daa4c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tftpd.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.tftpd.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable tftpd, a Trivial File Transfer Protocol server.
+        The server will be run as an xinetd service.
+      '';
+    };
+
+    services.tftpd.path = mkOption {
+      type = types.path;
+      default = "/srv/tftp";
+      description = lib.mdDoc ''
+        Where the tftp server files are stored.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.tftpd.enable {
+
+    services.xinetd.enable = true;
+
+    services.xinetd.services = singleton
+      { name = "tftp";
+        protocol = "udp";
+        server = "${pkgs.netkittftp}/sbin/in.tftpd";
+        serverArgs = "${config.services.tftpd.path}";
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/thelounge.nix b/nixpkgs/nixos/modules/services/networking/thelounge.nix
new file mode 100644
index 000000000000..321e46fb5d4d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/thelounge.nix
@@ -0,0 +1,110 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.thelounge;
+  dataDir = "/var/lib/thelounge";
+  configJsData = "module.exports = " + builtins.toJSON (
+    { inherit (cfg) public port; } // cfg.extraConfig
+  );
+  pluginManifest = {
+    dependencies = builtins.listToAttrs (builtins.map (pkg: { name = getName pkg; value = getVersion pkg; }) cfg.plugins);
+  };
+  plugins = pkgs.runCommandLocal "thelounge-plugins" { } ''
+    mkdir -p $out/node_modules
+    echo ${escapeShellArg (builtins.toJSON pluginManifest)} >> $out/package.json
+    ${concatMapStringsSep "\n" (pkg: ''
+    ln -s ${pkg}/lib/node_modules/${getName pkg} $out/node_modules/${getName pkg}
+    '') cfg.plugins}
+  '';
+in
+{
+  imports = [ (mkRemovedOptionModule [ "services" "thelounge" "private" ] "The option was renamed to `services.thelounge.public` to follow upstream changes.") ];
+
+  options.services.thelounge = {
+    enable = mkEnableOption (lib.mdDoc "The Lounge web IRC client");
+
+    package = mkPackageOptionMD pkgs "thelounge" { };
+
+    public = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Make your The Lounge instance public.
+        Setting this to `false` will require you to configure user
+        accounts by using the ({command}`thelounge`) command or by adding
+        entries in {file}`${dataDir}/users`. You might need to restart
+        The Lounge after making changes to the state directory.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 9000;
+      description = lib.mdDoc "TCP port to listen on for http connections.";
+    };
+
+    extraConfig = mkOption {
+      default = { };
+      type = types.attrs;
+      example = literalExpression ''
+        {
+          reverseProxy = true;
+          defaults = {
+            name = "Your Network";
+            host = "localhost";
+            port = 6697;
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        The Lounge's {file}`config.js` contents as attribute set (will be
+        converted to JSON to generate the configuration file).
+
+        The options defined here will be merged to the default configuration file.
+        Note: In case of duplicate configuration, options from {option}`extraConfig` have priority.
+
+        Documentation: <https://thelounge.chat/docs/server/configuration>
+      '';
+    };
+
+    plugins = mkOption {
+      default = [ ];
+      type = types.listOf types.package;
+      example = literalExpression "[ pkgs.theLoungePlugins.themes.solarized ]";
+      description = lib.mdDoc ''
+        The Lounge plugins to install. Plugins can be found in
+        `pkgs.theLoungePlugins.plugins` and `pkgs.theLoungePlugins.themes`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.thelounge = {
+      description = "The Lounge service user";
+      group = "thelounge";
+      isSystemUser = true;
+    };
+
+    users.groups.thelounge = { };
+
+    systemd.services.thelounge = {
+      description = "The Lounge web IRC client";
+      wantedBy = [ "multi-user.target" ];
+      preStart = "ln -sf ${pkgs.writeText "config.js" configJsData} ${dataDir}/config.js";
+      environment.THELOUNGE_PACKAGES = mkIf (cfg.plugins != [ ]) "${plugins}";
+      serviceConfig = {
+        User = "thelounge";
+        StateDirectory = baseNameOf dataDir;
+        ExecStart = "${getExe cfg.package} start";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ winter ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tinc.nix b/nixpkgs/nixos/modules/services/networking/tinc.nix
new file mode 100644
index 000000000000..7db83e6a584b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tinc.nix
@@ -0,0 +1,442 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.tinc;
+
+  mkValueString = value:
+    if value == true then "yes"
+    else if value == false then "no"
+    else generators.mkValueStringDefault { } value;
+
+  toTincConf = generators.toKeyValue {
+    listsAsDuplicateKeys = true;
+    mkKeyValue = generators.mkKeyValueDefault { inherit mkValueString; } "=";
+  };
+
+  tincConfType = with types;
+    let
+      valueType = oneOf [ bool str int ];
+    in
+    attrsOf (either valueType (listOf valueType));
+
+  addressSubmodule = {
+    options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The external IP address or hostname where the host can be reached.";
+      };
+
+      port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        description = lib.mdDoc ''
+          The port where the host can be reached.
+
+          If no port is specified, the default Port is used.
+        '';
+      };
+    };
+  };
+
+  subnetSubmodule = {
+    options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The subnet of this host.
+
+          Subnets can either be single MAC, IPv4 or IPv6 addresses, in which case
+          a subnet consisting of only that single address is assumed, or they can
+          be a IPv4 or IPv6 network address with a prefix length.
+
+          IPv4 subnets are notated like 192.168.1.0/24, IPv6 subnets are notated
+          like fec0:0:0:1::/64. MAC addresses are notated like 0:1a:2b:3c:4d:5e.
+
+          Note that subnets like 192.168.1.1/24 are invalid.
+        '';
+      };
+
+      prefixLength = mkOption {
+        type = with types; nullOr (addCheck int (n: n >= 0 && n <= 128));
+        default = null;
+        description = lib.mdDoc ''
+          The prefix length of the subnet.
+
+          If null, a subnet consisting of only that single address is assumed.
+
+          This conforms to standard CIDR notation as described in RFC1519.
+        '';
+      };
+
+      weight = mkOption {
+        type = types.ints.unsigned;
+        default = 10;
+        description = lib.mdDoc ''
+          Indicates the priority over identical Subnets owned by different nodes.
+
+          Lower values indicate higher priority. Packets will be sent to the
+          node with the highest priority, unless that node is not reachable, in
+          which case the node with the next highest priority will be tried, and
+          so on.
+        '';
+      };
+    };
+  };
+
+  hostSubmodule = { config, ... }: {
+    options = {
+      addresses = mkOption {
+        type = types.listOf (types.submodule addressSubmodule);
+        default = [ ];
+        description = lib.mdDoc ''
+          The external address where the host can be reached. This will set this
+          host's {option}`settings.Address` option.
+
+          This variable is only required if you want to connect to this host.
+        '';
+      };
+
+      subnets = mkOption {
+        type = types.listOf (types.submodule subnetSubmodule);
+        default = [ ];
+        description = lib.mdDoc ''
+          The subnets which this tinc daemon will serve. This will set this
+          host's {option}`settings.Subnet` option.
+
+          Tinc tries to look up which other daemon it should send a packet to by
+          searching the appropriate subnet. If the packet matches a subnet, it
+          will be sent to the daemon who has this subnet in his host
+          configuration file.
+        '';
+      };
+
+      rsaPublicKey = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Legacy RSA public key of the host in PEM format, including start and
+          end markers.
+
+          This will be appended as-is in the host's configuration file.
+
+          The ed25519 public key can be specified using the
+          {option}`settings.Ed25519PublicKey` option instead.
+        '';
+      };
+
+      settings = mkOption {
+        default = { };
+        type = types.submodule { freeformType = tincConfType; };
+        description = lib.mdDoc ''
+          Configuration for this host.
+
+          See <https://tinc-vpn.org/documentation-1.1/Host-configuration-variables.html>
+          for supported values.
+        '';
+      };
+    };
+
+    config.settings = {
+      Address = mkDefault (map
+        (address: "${address.address} ${toString address.port}")
+        config.addresses);
+
+      Subnet = mkDefault (map
+        (subnet:
+          if subnet.prefixLength == null then "${subnet.address}#${toString subnet.weight}"
+          else "${subnet.address}/${toString subnet.prefixLength}#${toString subnet.weight}")
+        config.subnets);
+    };
+  };
+
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.tinc = {
+
+      networks = mkOption {
+        default = { };
+        type = with types; attrsOf (submodule ({ config, ... }: {
+          options = {
+
+            extraConfig = mkOption {
+              default = "";
+              type = types.lines;
+              description = lib.mdDoc ''
+                Extra lines to add to the tinc service configuration file.
+
+                Note that using the declarative {option}`service.tinc.networks.<name>.settings`
+                option is preferred.
+              '';
+            };
+
+            name = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The name of the node which is used as an identifier when communicating
+                with the remote nodes in the mesh. If null then the hostname of the system
+                is used to derive a name (note that tinc may replace non-alphanumeric characters in
+                hostnames by underscores).
+              '';
+            };
+
+            ed25519PrivateKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                Path of the private ed25519 keyfile.
+              '';
+            };
+
+            rsaPrivateKeyFile = mkOption {
+              default = null;
+              type = types.nullOr types.path;
+              description = lib.mdDoc ''
+                Path of the private RSA keyfile.
+              '';
+            };
+
+            debugLevel = mkOption {
+              default = 0;
+              type = types.addCheck types.int (l: l >= 0 && l <= 5);
+              description = lib.mdDoc ''
+                The amount of debugging information to add to the log. 0 means little
+                logging while 5 is the most logging. {command}`man tincd` for
+                more details.
+              '';
+            };
+
+            hosts = mkOption {
+              default = { };
+              type = types.attrsOf types.lines;
+              description = lib.mdDoc ''
+                The name of the host in the network as well as the configuration for that host.
+                This name should only contain alphanumerics and underscores.
+
+                Note that using the declarative {option}`service.tinc.networks.<name>.hostSettings`
+                option is preferred.
+              '';
+            };
+
+            hostSettings = mkOption {
+              default = { };
+              example = literalExpression ''
+                {
+                  host1 = {
+                    addresses = [
+                      { address = "192.168.1.42"; }
+                      { address = "192.168.1.42"; port = 1655; }
+                    ];
+                    subnets = [ { address = "10.0.0.42"; } ];
+                    rsaPublicKey = "...";
+                    settings = {
+                      Ed25519PublicKey = "...";
+                    };
+                  };
+                  host2 = {
+                    subnets = [ { address = "10.0.1.0"; prefixLength = 24; weight = 2; } ];
+                    rsaPublicKey = "...";
+                    settings = {
+                      Compression = 10;
+                    };
+                  };
+                }
+              '';
+              type = types.attrsOf (types.submodule hostSubmodule);
+              description = lib.mdDoc ''
+                The name of the host in the network as well as the configuration for that host.
+                This name should only contain alphanumerics and underscores.
+              '';
+            };
+
+            interfaceType = mkOption {
+              default = "tun";
+              type = types.enum [ "tun" "tap" ];
+              description = lib.mdDoc ''
+                The type of virtual interface used for the network connection.
+              '';
+            };
+
+            listenAddress = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The ip address to listen on for incoming connections.
+              '';
+            };
+
+            bindToAddress = mkOption {
+              default = null;
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The ip address to bind to (both listen on and send packets from).
+              '';
+            };
+
+            package = mkOption {
+              type = types.package;
+              default = pkgs.tinc_pre;
+              defaultText = literalExpression "pkgs.tinc_pre";
+              description = lib.mdDoc ''
+                The package to use for the tinc daemon's binary.
+              '';
+            };
+
+            chroot = mkOption {
+              default = false;
+              type = types.bool;
+              description = lib.mdDoc ''
+                Change process root directory to the directory where the config file is located (/etc/tinc/netname/), for added security.
+                The chroot is performed after all the initialization is done, after writing pid files and opening network sockets.
+
+                Note that this currently breaks dns resolution and tinc can't run scripts anymore (such as tinc-down or host-up), unless it is setup to be runnable inside chroot environment.
+              '';
+            };
+
+            settings = mkOption {
+              default = { };
+              type = types.submodule { freeformType = tincConfType; };
+              example = literalExpression ''
+                {
+                  Interface = "custom.interface";
+                  DirectOnly = true;
+                  Mode = "switch";
+                }
+              '';
+              description = lib.mdDoc ''
+                Configuration of the Tinc daemon for this network.
+
+                See <https://tinc-vpn.org/documentation-1.1/Main-configuration-variables.html>
+                for supported values.
+              '';
+            };
+          };
+
+          config = {
+            hosts = mapAttrs
+              (hostname: host: ''
+                ${toTincConf host.settings}
+                ${host.rsaPublicKey}
+              '')
+              config.hostSettings;
+
+            settings = {
+              DeviceType = mkDefault config.interfaceType;
+              Name = mkDefault (if config.name == null then "$HOST" else config.name);
+              Ed25519PrivateKeyFile = mkIf (config.ed25519PrivateKeyFile != null) (mkDefault config.ed25519PrivateKeyFile);
+              PrivateKeyFile = mkIf (config.rsaPrivateKeyFile != null) (mkDefault config.rsaPrivateKeyFile);
+              ListenAddress = mkIf (config.listenAddress != null) (mkDefault config.listenAddress);
+              BindToAddress = mkIf (config.bindToAddress != null) (mkDefault config.bindToAddress);
+            };
+          };
+        }));
+
+        description = lib.mdDoc ''
+          Defines the tinc networks which will be started.
+          Each network invokes a different daemon.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.networks != { }) (
+    let
+      etcConfig = foldr (a: b: a // b) { }
+        (flip mapAttrsToList cfg.networks (network: data:
+          flip mapAttrs' data.hosts (host: text: nameValuePair
+            ("tinc/${network}/hosts/${host}")
+            ({ mode = "0644"; user = "tinc.${network}"; inherit text; })
+          ) // {
+            "tinc/${network}/tinc.conf" = {
+              mode = "0444";
+              text = ''
+                ${toTincConf ({ Interface = "tinc.${network}"; } // data.settings)}
+                ${data.extraConfig}
+              '';
+            };
+          }
+        ));
+    in {
+      environment.etc = etcConfig;
+
+      systemd.services = flip mapAttrs' cfg.networks (network: data: nameValuePair
+        ("tinc.${network}")
+        (let version = getVersion data.package; in {
+          description = "Tinc Daemon - ${network}";
+          wantedBy = [ "multi-user.target" ];
+          path = [ data.package ];
+          reloadTriggers = mkIf (versionAtLeast version "1.1pre") [ (builtins.toJSON etcConfig) ];
+          restartTriggers = mkIf (versionOlder version "1.1pre") [ (builtins.toJSON etcConfig) ];
+          serviceConfig = {
+            Type = "simple";
+            Restart = "always";
+            RestartSec = "3";
+            ExecReload = mkIf (versionAtLeast version "1.1pre") "${data.package}/bin/tinc -n ${network} reload";
+            ExecStart = "${data.package}/bin/tincd -D -U tinc.${network} -n ${network} ${optionalString (data.chroot) "-R"} --pidfile /run/tinc.${network}.pid -d ${toString data.debugLevel}";
+          };
+          preStart = ''
+            mkdir -p /etc/tinc/${network}/hosts
+            chown tinc.${network} /etc/tinc/${network}/hosts
+            mkdir -p /etc/tinc/${network}/invitations
+            chown tinc.${network} /etc/tinc/${network}/invitations
+
+            # Determine how we should generate our keys
+            if type tinc >/dev/null 2>&1; then
+              # Tinc 1.1+ uses the tinc helper application for key generation
+            ${if data.ed25519PrivateKeyFile != null then "  # ed25519 Keyfile managed by nix" else ''
+              # Prefer ED25519 keys (only in 1.1+)
+              [ -f "/etc/tinc/${network}/ed25519_key.priv" ] || tinc -n ${network} generate-ed25519-keys
+            ''}
+            ${if data.rsaPrivateKeyFile != null then "  # RSA Keyfile managed by nix" else ''
+              [ -f "/etc/tinc/${network}/rsa_key.priv" ] || tinc -n ${network} generate-rsa-keys 4096
+            ''}
+              # In case there isn't anything to do
+              true
+            else
+              # Tinc 1.0 uses the tincd application
+              [ -f "/etc/tinc/${network}/rsa_key.priv" ] || tincd -n ${network} -K 4096
+            fi
+          '';
+        })
+      );
+
+      environment.systemPackages = let
+        cli-wrappers = pkgs.stdenv.mkDerivation {
+          name = "tinc-cli-wrappers";
+          nativeBuildInputs = [ pkgs.makeWrapper ];
+          buildCommand = ''
+            mkdir -p $out/bin
+            ${concatStringsSep "\n" (mapAttrsToList (network: data:
+              optionalString (versionAtLeast data.package.version "1.1pre") ''
+                makeWrapper ${data.package}/bin/tinc "$out/bin/tinc.${network}" \
+                  --add-flags "--pidfile=/run/tinc.${network}.pid" \
+                  --add-flags "--config=/etc/tinc/${network}"
+              '') cfg.networks)}
+          '';
+        };
+      in [ cli-wrappers ];
+
+      users.users = flip mapAttrs' cfg.networks (network: _:
+        nameValuePair ("tinc.${network}") ({
+          description = "Tinc daemon user for ${network}";
+          isSystemUser = true;
+          group = "tinc.${network}";
+        })
+      );
+      users.groups = flip mapAttrs' cfg.networks (network: _:
+        nameValuePair "tinc.${network}" {}
+      );
+    });
+
+  meta.maintainers = with maintainers; [ minijackson mic92 ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tinydns.nix b/nixpkgs/nixos/modules/services/networking/tinydns.nix
new file mode 100644
index 000000000000..ea91af5f1967
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tinydns.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+    services.tinydns = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to run the tinydns dns server";
+      };
+
+      data = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "The DNS data to serve, in the format described by tinydns-data(8)";
+      };
+
+      ip = mkOption {
+        default = "0.0.0.0";
+        type = types.str;
+        description = lib.mdDoc "IP address on which to listen for connections";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf config.services.tinydns.enable {
+    environment.systemPackages = [ pkgs.djbdns ];
+
+    users.users.tinydns = {
+      isSystemUser = true;
+      group = "tinydns";
+    };
+    users.groups.tinydns = {};
+
+    systemd.services.tinydns = {
+      description = "djbdns tinydns server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [ daemontools djbdns ];
+      preStart = ''
+        rm -rf /var/lib/tinydns
+        tinydns-conf tinydns tinydns /var/lib/tinydns ${config.services.tinydns.ip}
+        cd /var/lib/tinydns/root/
+        ln -sf ${pkgs.writeText "tinydns-data" config.services.tinydns.data} data
+        tinydns-data
+      '';
+      script = ''
+        cd /var/lib/tinydns
+        exec ./run
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tinyproxy.nix b/nixpkgs/nixos/modules/services/networking/tinyproxy.nix
new file mode 100644
index 000000000000..9bcd8bfd814b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tinyproxy.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.tinyproxy;
+  mkValueStringTinyproxy = with lib; v:
+        if true  ==         v then "yes"
+        else if false ==    v then "no"
+        else generators.mkValueStringDefault {} v;
+  mkKeyValueTinyproxy = {
+    mkValueString ? mkValueStringDefault {}
+  }: sep: k: v:
+    if null     ==  v then ""
+    else "${lib.strings.escape [sep] k}${sep}${mkValueString v}";
+
+  settingsFormat = (pkgs.formats.keyValue {
+      mkKeyValue = mkKeyValueTinyproxy {
+        mkValueString = mkValueStringTinyproxy;
+      } " ";
+      listsAsDuplicateKeys= true;
+  });
+  configFile = settingsFormat.generate "tinyproxy.conf" cfg.settings;
+
+in
+{
+
+  options = {
+    services.tinyproxy = {
+      enable = mkEnableOption (lib.mdDoc "Tinyproxy daemon");
+      package = mkPackageOptionMD pkgs "tinyproxy" {};
+      settings = mkOption {
+        description = lib.mdDoc "Configuration for [tinyproxy](https://tinyproxy.github.io/).";
+        default = { };
+        example = literalExpression ''{
+          Port 8888;
+          Listen 127.0.0.1;
+          Timeout 600;
+          Allow 127.0.0.1;
+          Anonymous = ['"Host"' '"Authorization"'];
+          ReversePath = '"/example/" "http://www.example.com/"';
+        }'';
+        type = types.submodule ({name, ...}: {
+          freeformType = settingsFormat.type;
+          options = {
+            Listen = mkOption {
+              type = types.str;
+              default = "127.0.0.1";
+              description = lib.mdDoc ''
+              Specify which address to listen to.
+              '';
+            };
+            Port = mkOption {
+              type = types.int;
+              default = 8888;
+              description = lib.mdDoc ''
+              Specify which port to listen to.
+              '';
+            };
+            Anonymous = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = lib.mdDoc ''
+              If an `Anonymous` keyword is present, then anonymous proxying is enabled. The headers listed with `Anonymous` are allowed through, while all others are denied. If no Anonymous keyword is present, then all headers are allowed through. You must include quotes around the headers.
+              '';
+            };
+            Filter = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = lib.mdDoc ''
+              Tinyproxy supports filtering of web sites based on URLs or domains. This option specifies the location of the file containing the filter rules, one rule per line.
+              '';
+            };
+          };
+        });
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.tinyproxy = {
+      description = "TinyProxy daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = "tinyproxy";
+        Group = "tinyproxy";
+        Type = "simple";
+        ExecStart = "${getExe pkgs.tinyproxy} -d -c ${configFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+        KillSignal = "SIGINT";
+        TimeoutStopSec = "30s";
+        Restart = "on-failure";
+      };
+    };
+
+    users.users.tinyproxy = {
+        group = "tinyproxy";
+        isSystemUser = true;
+    };
+    users.groups.tinyproxy = {};
+  };
+  meta.maintainers = with maintainers; [ tcheronneau ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tmate-ssh-server.nix b/nixpkgs/nixos/modules/services/networking/tmate-ssh-server.nix
new file mode 100644
index 000000000000..ff4ce0773309
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tmate-ssh-server.nix
@@ -0,0 +1,122 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.tmate-ssh-server;
+
+  defaultKeysDir = "/etc/tmate-ssh-server-keys";
+  edKey = "${defaultKeysDir}/ssh_host_ed25519_key";
+  rsaKey = "${defaultKeysDir}/ssh_host_rsa_key";
+
+  keysDir =
+    if cfg.keysDir == null
+    then defaultKeysDir
+    else cfg.keysDir;
+
+  domain = config.networking.domain;
+in
+{
+  options.services.tmate-ssh-server = {
+    enable = mkEnableOption (mdDoc "tmate ssh server");
+
+    package = mkOption {
+      type = types.package;
+      description = mdDoc "The package containing tmate-ssh-server";
+      defaultText = literalExpression "pkgs.tmate-ssh-server";
+      default = pkgs.tmate-ssh-server;
+    };
+
+    host = mkOption {
+      type = types.str;
+      description = mdDoc "External host name";
+      defaultText = lib.literalExpression "config.networking.domain or config.networking.hostName";
+      default =
+        if domain == null then
+          config.networking.hostName
+        else
+          domain;
+    };
+
+    port = mkOption {
+      type = types.port;
+      description = mdDoc "Listen port for the ssh server";
+      default = 2222;
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc "Whether to automatically open the specified ports in the firewall.";
+    };
+
+    advertisedPort = mkOption {
+      type = types.port;
+      description = mdDoc "External port advertised to clients";
+    };
+
+    keysDir = mkOption {
+      type = with types; nullOr str;
+      description = mdDoc "Directory containing ssh keys, defaulting to auto-generation";
+      default = null;
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    networking.firewall.allowedTCPPorts = optionals cfg.openFirewall [ cfg.port ];
+
+    services.tmate-ssh-server = {
+      advertisedPort = mkDefault cfg.port;
+    };
+
+    environment.systemPackages =
+      let
+        tmate-config = pkgs.writeText "tmate.conf"
+          ''
+            set -g tmate-server-host "${cfg.host}"
+            set -g tmate-server-port ${toString cfg.port}
+            set -g tmate-server-ed25519-fingerprint "@ed25519_fingerprint@"
+            set -g tmate-server-rsa-fingerprint "@rsa_fingerprint@"
+          '';
+      in
+      [
+        (pkgs.writeShellApplication {
+          name = "tmate-client-config";
+          runtimeInputs = with pkgs;[ openssh coreutils sd ];
+          text = ''
+            RSA_SIG="$(ssh-keygen -l -E SHA256 -f "${keysDir}/ssh_host_rsa_key.pub" | cut -d ' ' -f 2)"
+            ED25519_SIG="$(ssh-keygen -l -E SHA256 -f "${keysDir}/ssh_host_ed25519_key.pub" | cut -d ' ' -f 2)"
+            sd -sp '@ed25519_fingerprint@' "$ED25519_SIG" ${tmate-config} | \
+              sd -sp '@rsa_fingerprint@' "$RSA_SIG"
+          '';
+        })
+      ];
+
+    systemd.services.tmate-ssh-server = {
+      description = "tmate SSH Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/tmate-ssh-server -h ${cfg.host} -p ${toString cfg.port} -q ${toString cfg.advertisedPort} -k ${keysDir}";
+      };
+      preStart = mkIf (cfg.keysDir == null) ''
+        if [[ ! -d ${defaultKeysDir} ]]
+        then
+          mkdir -p ${defaultKeysDir}
+        fi
+        if [[ ! -f ${edKey} ]]
+        then
+          ${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f ${edKey} -N ""
+        fi
+        if [[ ! -f ${rsaKey} ]]
+        then
+          ${pkgs.openssh}/bin/ssh-keygen -t rsa -f ${rsaKey} -N ""
+        fi
+      '';
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ jlesquembre ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tox-bootstrapd.nix b/nixpkgs/nixos/modules/services/networking/tox-bootstrapd.nix
new file mode 100644
index 000000000000..0f310a28d266
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tox-bootstrapd.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  WorkingDirectory = "/var/lib/tox-bootstrapd";
+  PIDFile = "${WorkingDirectory}/pid";
+
+  pkg = pkgs.libtoxcore;
+  cfg = config.services.toxBootstrapd;
+  cfgFile = builtins.toFile "tox-bootstrapd.conf"
+    ''
+      port = ${toString cfg.port}
+      keys_file_path = "${WorkingDirectory}/keys"
+      pid_file_path = "${PIDFile}"
+      ${cfg.extraConfig}
+    '';
+in
+{
+  options =
+    { services.toxBootstrapd =
+        { enable = mkOption {
+            type = types.bool;
+            default = false;
+            description =
+              lib.mdDoc ''
+                Whether to enable the Tox DHT bootstrap daemon.
+              '';
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 33445;
+            description = lib.mdDoc "Listening port (UDP).";
+          };
+
+          keysFile = mkOption {
+            type = types.str;
+            default = "${WorkingDirectory}/keys";
+            description = lib.mdDoc "Node key file.";
+          };
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description =
+              lib.mdDoc ''
+                Configuration for bootstrap daemon.
+                See <https://github.com/irungentoo/toxcore/blob/master/other/bootstrap_daemon/tox-bootstrapd.conf>
+                and <https://wiki.tox.chat/users/nodes>.
+             '';
+          };
+      };
+
+    };
+
+  config = mkIf config.services.toxBootstrapd.enable {
+
+    systemd.services.tox-bootstrapd = {
+      description = "Tox DHT bootstrap daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+        { ExecStart = "${pkg}/bin/tox-bootstrapd --config=${cfgFile}";
+          Type = "forking";
+          inherit PIDFile WorkingDirectory;
+          AmbientCapabilities = ["CAP_NET_BIND_SERVICE"];
+          DynamicUser = true;
+          StateDirectory = "tox-bootstrapd";
+        };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tox-node.nix b/nixpkgs/nixos/modules/services/networking/tox-node.nix
new file mode 100644
index 000000000000..884fd55dae51
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tox-node.nix
@@ -0,0 +1,90 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  pkg = pkgs.tox-node;
+  cfg = config.services.tox-node;
+  homeDir = "/var/lib/tox-node";
+
+  configFile = let
+    src = "${pkg.src}/tox_node/dpkg/config.yml";
+    confJSON = pkgs.writeText "config.json" (
+      builtins.toJSON {
+        log-type = cfg.logType;
+        keys-file = cfg.keysFile;
+        udp-address = cfg.udpAddress;
+        tcp-addresses = cfg.tcpAddresses;
+        tcp-connections-limit = cfg.tcpConnectionLimit;
+        lan-discovery = cfg.lanDiscovery;
+        threads = cfg.threads;
+        motd = cfg.motd;
+      }
+    );
+  in with pkgs; runCommand "config.yml" {} ''
+    ${remarshal}/bin/remarshal -if yaml -of json ${src} -o src.json
+    ${jq}/bin/jq -s '(.[0] | with_entries( select(.key == "bootstrap-nodes"))) * .[1]' src.json ${confJSON} > $out
+  '';
+
+in {
+  options.services.tox-node = {
+    enable = mkEnableOption (lib.mdDoc "Tox Node service");
+
+    logType = mkOption {
+      type = types.enum [ "Stderr" "Stdout" "Syslog" "None" ];
+      default = "Stderr";
+      description = lib.mdDoc "Logging implementation.";
+    };
+    keysFile = mkOption {
+      type = types.str;
+      default = "${homeDir}/keys";
+      description = lib.mdDoc "Path to the file where DHT keys are stored.";
+    };
+    udpAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0:33445";
+      description = lib.mdDoc "UDP address to run DHT node.";
+    };
+    tcpAddresses = mkOption {
+      type = types.listOf types.str;
+      default = [ "0.0.0.0:33445" ];
+      description = lib.mdDoc "TCP addresses to run TCP relay.";
+    };
+    tcpConnectionLimit = mkOption {
+      type = types.int;
+      default = 8192;
+      description = lib.mdDoc "Maximum number of active TCP connections relay can hold";
+    };
+    lanDiscovery = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Enable local network discovery.";
+    };
+    threads = mkOption {
+      type = types.int;
+      default = 1;
+      description = lib.mdDoc "Number of threads for execution";
+    };
+    motd = mkOption {
+      type = types.str;
+      default = "Hi from tox-rs! I'm up {{uptime}}. TCP: incoming {{tcp_packets_in}}, outgoing {{tcp_packets_out}}, UDP: incoming {{udp_packets_in}}, outgoing {{udp_packets_out}}";
+      description = lib.mdDoc "Message of the day";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.tox-node = {
+      description = "Tox Node";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkg}/bin/tox-node config ${configFile}";
+        StateDirectory = "tox-node";
+        DynamicUser = true;
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/toxvpn.nix b/nixpkgs/nixos/modules/services/networking/toxvpn.nix
new file mode 100644
index 000000000000..3a14b5f73091
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/toxvpn.nix
@@ -0,0 +1,70 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    services.toxvpn = {
+      enable = mkEnableOption (lib.mdDoc "toxvpn running on startup");
+
+      localip = mkOption {
+        type        = types.str;
+        default     = "10.123.123.1";
+        description = lib.mdDoc "your ip on the vpn";
+      };
+
+      port = mkOption {
+        type        = types.port;
+        default     = 33445;
+        description = lib.mdDoc "udp port for toxcore, port-forward to help with connectivity if you run many nodes behind one NAT";
+      };
+
+      auto_add_peers = mkOption {
+        type        = types.listOf types.str;
+        default     = [];
+        example     = [ "toxid1" "toxid2" ];
+        description = lib.mdDoc "peers to automatically connect to on startup";
+      };
+    };
+  };
+
+  config = mkIf config.services.toxvpn.enable {
+    systemd.services.toxvpn = {
+      description = "toxvpn daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        mkdir -p /run/toxvpn || true
+        chown toxvpn /run/toxvpn
+      '';
+
+      path = [ pkgs.toxvpn ];
+
+      script = ''
+        exec toxvpn -i ${config.services.toxvpn.localip} -l /run/toxvpn/control -u toxvpn -p ${toString config.services.toxvpn.port} ${lib.concatMapStringsSep " " (x: "-a ${x}") config.services.toxvpn.auto_add_peers}
+      '';
+
+      serviceConfig = {
+        KillMode  = "process";
+        Restart   = "on-success";
+        Type      = "notify";
+      };
+
+      restartIfChanged = false; # Likely to be used for remote admin
+    };
+
+    environment.systemPackages = [ pkgs.toxvpn ];
+
+    users.users = {
+      toxvpn = {
+        isSystemUser = true;
+        group = "toxvpn";
+        home       = "/var/lib/toxvpn";
+        createHome = true;
+      };
+    };
+    users.groups.toxvpn = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/trickster.nix b/nixpkgs/nixos/modules/services/networking/trickster.nix
new file mode 100644
index 000000000000..0b696e412b4d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/trickster.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.trickster;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "trickster" "origin" ] [ "services" "trickster" "origin-url" ])
+  ];
+
+  options = {
+    services.trickster = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Trickster.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.trickster;
+        defaultText = literalExpression "pkgs.trickster";
+        description = lib.mdDoc ''
+          Package that should be used for trickster.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Path to configuration file.
+        '';
+      };
+
+      instance-id = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Instance ID for when running multiple processes (default null).
+        '';
+      };
+
+      log-level = mkOption {
+        type = types.str;
+        default = "info";
+        description = lib.mdDoc ''
+          Level of Logging to use (debug, info, warn, error) (default "info").
+        '';
+      };
+
+      metrics-port = mkOption {
+        type = types.port;
+        default = 8082;
+        description = lib.mdDoc ''
+          Port that the /metrics endpoint will listen on.
+        '';
+      };
+
+      origin-type = mkOption {
+        type = types.enum [ "prometheus" "influxdb" ];
+        default = "prometheus";
+        description = lib.mdDoc ''
+          Type of origin (prometheus, influxdb)
+        '';
+      };
+
+      origin-url = mkOption {
+        type = types.str;
+        default = "http://prometheus:9090";
+        description = lib.mdDoc ''
+          URL to the Origin. Enter it like you would in grafana, e.g., http://prometheus:9090 (default http://prometheus:9090).
+        '';
+      };
+
+      profiler-port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        description = lib.mdDoc ''
+          Port that the /debug/pprof endpoint will listen on.
+        '';
+      };
+
+      proxy-port = mkOption {
+        type = types.port;
+        default = 9090;
+        description = lib.mdDoc ''
+          Port that the Proxy server will listen on.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.trickster = {
+      description = "Reverse proxy cache and time series dashboard accelerator";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = ''
+          ${cfg.package}/bin/trickster \
+          -log-level ${cfg.log-level} \
+          -metrics-port ${toString cfg.metrics-port} \
+          -origin-type ${cfg.origin-type} \
+          -origin-url ${cfg.origin-url} \
+          -proxy-port ${toString cfg.proxy-port} \
+          ${optionalString (cfg.configFile != null) "-config ${cfg.configFile}"} \
+          ${optionalString (cfg.profiler-port != null) "-profiler-port ${cfg.profiler-port}"} \
+          ${optionalString (cfg.instance-id != null) "-instance-id ${cfg.instance-id}"}
+        '';
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "always";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ _1000101 ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/trust-dns.nix b/nixpkgs/nixos/modules/services/networking/trust-dns.nix
new file mode 100644
index 000000000000..758e33f16d38
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/trust-dns.nix
@@ -0,0 +1,176 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.trust-dns;
+  toml = pkgs.formats.toml { };
+
+  configFile = toml.generate "trust-dns.toml" (
+    lib.filterAttrsRecursive (_: v: v != null) cfg.settings
+  );
+
+  zoneType = lib.types.submodule ({ config, ... }: {
+    options = with lib; {
+      zone = mkOption {
+        type = types.str;
+        description = mdDoc ''
+          Zone name, like "example.com", "localhost", or "0.0.127.in-addr.arpa".
+        '';
+      };
+      zone_type = mkOption {
+        type = types.enum [ "Primary" "Secondary" "Hint" "Forward" ];
+        default = "Primary";
+        description = mdDoc ''
+          One of:
+          - "Primary" (the master, authority for the zone).
+          - "Secondary" (the slave, replicated from the primary).
+          - "Hint" (a cached zone with recursive resolver abilities).
+          - "Forward" (a cached zone where all requests are forwarded to another resolver).
+
+          For more details about these zone types, consult the documentation for BIND,
+          though note that trust-dns supports only a subset of BIND's zone types:
+          <https://bind9.readthedocs.io/en/v9_18_4/reference.html#type>
+        '';
+      };
+      file = mkOption {
+        type = types.either types.path types.str;
+        default = "${config.zone}.zone";
+        defaultText = literalExpression ''"''${config.zone}.zone"'';
+        description = mdDoc ''
+          Path to the .zone file.
+          If not fully-qualified, this path will be interpreted relative to the `directory` option.
+          If omitted, defaults to the value of the `zone` option suffixed with ".zone".
+        '';
+      };
+    };
+  });
+in
+{
+  meta.maintainers = with lib.maintainers; [ colinsane ];
+  options = {
+    services.trust-dns = with lib; {
+      enable = mkEnableOption (lib.mdDoc "trust-dns");
+      package = mkOption {
+        type = types.package;
+        default = pkgs.trust-dns;
+        defaultText = "pkgs.trust-dns";
+        description = mdDoc ''
+          Trust-dns package to use.
+          The package must provide `meta.mainProgram` which names the server binary; any other utilities (client, resolver) are not needed.
+        '';
+      };
+      quiet = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Log ERROR level messages only.
+          This option is mutually exclusive with the `debug` option.
+          If neither `quiet` nor `debug` are enabled, logging defaults to the INFO level.
+        '';
+      };
+      debug = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Log DEBUG, INFO, WARN and ERROR messages.
+          This option is mutually exclusive with the `debug` option.
+          If neither `quiet` nor `debug` are enabled, logging defaults to the INFO level.
+        '';
+      };
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Settings for trust-dns. The options enumerated here are not exhaustive.
+          Refer to upstream documentation for all available options:
+          - [Example settings](https://github.com/bluejekyll/trust-dns/blob/main/tests/test-data/test_configs/example.toml)
+        '';
+        type = types.submodule {
+          freeformType = toml.type;
+          options = {
+            listen_addrs_ipv4 = mkOption {
+              type = types.listOf types.str;
+              default = [ "0.0.0.0" ];
+              description = mdDoc ''
+              List of ipv4 addresses on which to listen for DNS queries.
+              '';
+            };
+            listen_addrs_ipv6 = mkOption {
+              type = types.listOf types.str;
+              default = lib.optional config.networking.enableIPv6 "::0";
+              defaultText = literalExpression ''lib.optional config.networking.enableIPv6 "::0"'';
+              description = mdDoc ''
+                List of ipv6 addresses on which to listen for DNS queries.
+              '';
+            };
+            listen_port = mkOption {
+              type = types.port;
+              default = 53;
+              description = mdDoc ''
+                Port to listen on (applies to all listen addresses).
+              '';
+            };
+            directory = mkOption {
+              type = types.str;
+              default = "/var/lib/trust-dns";
+              description = mdDoc ''
+                The directory in which trust-dns should look for .zone files,
+                whenever zones aren't specified by absolute path.
+              '';
+            };
+            zones = mkOption {
+              description = mdDoc "List of zones to serve.";
+              default = {};
+              type = types.listOf (types.coercedTo types.str (zone: { inherit zone; }) zoneType);
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.trust-dns = {
+      description = "trust-dns Domain Name Server";
+      unitConfig.Documentation = "https://trust-dns.org/";
+      serviceConfig = {
+        ExecStart =
+        let
+          flags =  (lib.optional cfg.debug "--debug") ++ (lib.optional cfg.quiet "--quiet");
+          flagsStr = builtins.concatStringsSep " " flags;
+        in ''
+          ${cfg.package}/bin/${cfg.package.meta.mainProgram} --config ${configFile} ${flagsStr}
+        '';
+        Type = "simple";
+        Restart = "on-failure";
+        RestartSec = "10s";
+        DynamicUser = true;
+
+        StateDirectory = "trust-dns";
+        ReadWritePaths = [ cfg.settings.directory ];
+
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "full";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+      };
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/tvheadend.nix b/nixpkgs/nixos/modules/services/networking/tvheadend.nix
new file mode 100644
index 000000000000..466dbbccad53
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/tvheadend.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg     = config.services.tvheadend;
+    pidFile = "${config.users.users.tvheadend.home}/tvheadend.pid";
+in
+
+{
+  options = {
+    services.tvheadend = {
+      enable = mkEnableOption (lib.mdDoc "Tvheadend");
+      httpPort = mkOption {
+        type        = types.int;
+        default     = 9981;
+        description = lib.mdDoc "Port to bind HTTP to.";
+      };
+
+      htspPort = mkOption {
+        type        = types.int;
+        default     = 9982;
+        description = lib.mdDoc "Port to bind HTSP to.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.tvheadend = {
+      description = "Tvheadend Service user";
+      home        = "/var/lib/tvheadend";
+      createHome  = true;
+      isSystemUser = true;
+      group = "tvheadend";
+    };
+    users.groups.tvheadend = {};
+
+    systemd.services.tvheadend = {
+      description = "Tvheadend TV streaming server";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+
+      serviceConfig = {
+        Type         = "forking";
+        PIDFile      = pidFile;
+        Restart      = "always";
+        RestartSec   = 5;
+        User         = "tvheadend";
+        Group        = "video";
+        ExecStart    = ''
+                       ${pkgs.tvheadend}/bin/tvheadend \
+                       --http_port ${toString cfg.httpPort} \
+                       --htsp_port ${toString cfg.htspPort} \
+                       -f \
+                       -C \
+                       -p ${pidFile} \
+                       -u tvheadend \
+                       -g video
+                       '';
+        ExecStop     = "${pkgs.coreutils}/bin/rm ${pidFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/twingate.nix b/nixpkgs/nixos/modules/services/networking/twingate.nix
new file mode 100644
index 000000000000..03c68fc874f0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/twingate.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.twingate;
+in
+{
+  options.services.twingate = {
+    enable = lib.mkEnableOption (lib.mdDoc "Twingate Client daemon");
+    package = lib.mkPackageOptionMD pkgs "twingate" { };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+    systemd.services.twingate = {
+      preStart = "cp -r --update=none ${cfg.package}/etc/twingate/. /etc/twingate/";
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    networking.firewall.checkReversePath = lib.mkDefault "loose";
+    services.resolved.enable = lib.mkIf (!config.networking.networkmanager.enable) true;
+
+    environment.systemPackages = [ cfg.package ]; # For the CLI.
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/ucarp.nix b/nixpkgs/nixos/modules/services/networking/ucarp.nix
new file mode 100644
index 000000000000..1214cec63f54
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/ucarp.nix
@@ -0,0 +1,183 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.networking.ucarp;
+
+  ucarpExec = concatStringsSep " " (
+    [
+      "${cfg.package}/bin/ucarp"
+      "--interface=${cfg.interface}"
+      "--srcip=${cfg.srcIp}"
+      "--vhid=${toString cfg.vhId}"
+      "--passfile=${cfg.passwordFile}"
+      "--addr=${cfg.addr}"
+      "--advbase=${toString cfg.advBase}"
+      "--advskew=${toString cfg.advSkew}"
+      "--upscript=${cfg.upscript}"
+      "--downscript=${cfg.downscript}"
+      "--deadratio=${toString cfg.deadratio}"
+    ]
+    ++ (optional cfg.preempt "--preempt")
+    ++ (optional cfg.neutral "--neutral")
+    ++ (optional cfg.shutdown "--shutdown")
+    ++ (optional cfg.ignoreIfState "--ignoreifstate")
+    ++ (optional cfg.noMcast "--nomcast")
+    ++ (optional (cfg.extraParam != null) "--xparam=${cfg.extraParam}")
+  );
+in {
+  options.networking.ucarp = {
+    enable = mkEnableOption (lib.mdDoc "ucarp, userspace implementation of CARP");
+
+    interface = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Network interface to bind to.";
+      example = "eth0";
+    };
+
+    srcIp = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Source (real) IP address of this host.";
+    };
+
+    vhId = mkOption {
+      type = types.ints.between 1 255;
+      description = lib.mdDoc "Virtual IP identifier shared between CARP hosts.";
+      example = 1;
+    };
+
+    passwordFile = mkOption {
+      type = types.str;
+      description = lib.mdDoc "File containing shared password between CARP hosts.";
+      example = "/run/keys/ucarp-password";
+    };
+
+    preempt = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enable preemptive failover.
+        Thus, this host becomes the CARP master as soon as possible.
+      '';
+      default = false;
+    };
+
+    neutral = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Do not run downscript at start if the host is the backup.";
+      default = false;
+    };
+
+    addr = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Virtual shared IP address.";
+    };
+
+    advBase = mkOption {
+      type = types.ints.unsigned;
+      description = lib.mdDoc "Advertisement frequency in seconds.";
+      default = 1;
+    };
+
+    advSkew = mkOption {
+      type = types.ints.unsigned;
+      description = lib.mdDoc "Advertisement skew in seconds.";
+      default = 0;
+    };
+
+    upscript = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Command to run after become master, the interface name, virtual address
+        and optional extra parameters are passed as arguments.
+      '';
+      example = literalExpression ''
+        pkgs.writeScript "upscript" '''
+          #!/bin/sh
+          ''${pkgs.iproute2}/bin/ip addr add "$2"/24 dev "$1"
+        ''';
+      '';
+    };
+
+    downscript = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Command to run after become backup, the interface name, virtual address
+        and optional extra parameters are passed as arguments.
+      '';
+      example = literalExpression ''
+        pkgs.writeScript "downscript" '''
+          #!/bin/sh
+          ''${pkgs.iproute2}/bin/ip addr del "$2"/24 dev "$1"
+        ''';
+      '';
+    };
+
+    deadratio = mkOption {
+      type = types.ints.unsigned;
+      description = lib.mdDoc "Ratio to consider a host as dead.";
+      default = 3;
+    };
+
+    shutdown = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Call downscript at exit.";
+      default = false;
+    };
+
+    ignoreIfState = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Ignore interface state, e.g., down or no carrier.";
+      default = false;
+    };
+
+    noMcast = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Use broadcast instead of multicast advertisements.";
+      default = false;
+    };
+
+    extraParam = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Extra parameter to pass to the up/down scripts.";
+      default = null;
+    };
+
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc ''
+        Package that should be used for ucarp.
+
+        Please note that the default package, pkgs.ucarp, has not received any
+        upstream updates for a long time and can be considered as unmaintained.
+      '';
+      default = pkgs.ucarp;
+      defaultText = literalExpression "pkgs.ucarp";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.ucarp = {
+      description = "ucarp, userspace implementation of CARP";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        Type = "exec";
+        ExecStart = ucarpExec;
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        ProtectClock = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ oxzi ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/unbound.nix b/nixpkgs/nixos/modules/services/networking/unbound.nix
new file mode 100644
index 000000000000..b6579af10a79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/unbound.nix
@@ -0,0 +1,311 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.unbound;
+
+  yesOrNo = v: if v then "yes" else "no";
+
+  toOption = indent: n: v: "${indent}${toString n}: ${v}";
+
+  toConf = indent: n: v:
+    if builtins.isFloat v then (toOption indent n (builtins.toJSON v))
+    else if isInt v       then (toOption indent n (toString v))
+    else if isBool v      then (toOption indent n (yesOrNo v))
+    else if isString v    then (toOption indent n v)
+    else if isList v      then (concatMapStringsSep "\n" (toConf indent n) v)
+    else if isAttrs v     then (concatStringsSep "\n" (
+                                  ["${indent}${n}:"] ++ (
+                                    mapAttrsToList (toConf "${indent}  ") v
+                                  )
+                                ))
+    else throw (traceSeq v "services.unbound.settings: unexpected type");
+
+  confNoServer = concatStringsSep "\n" ((mapAttrsToList (toConf "") (builtins.removeAttrs cfg.settings [ "server" ])) ++ [""]);
+  confServer = concatStringsSep "\n" (mapAttrsToList (toConf "  ") (builtins.removeAttrs cfg.settings.server [ "define-tag" ]));
+
+  confFile = pkgs.writeText "unbound.conf" ''
+    server:
+    ${optionalString (cfg.settings.server.define-tag != "") (toOption "  " "define-tag" cfg.settings.server.define-tag)}
+    ${confServer}
+    ${confNoServer}
+  '';
+
+  rootTrustAnchorFile = "${cfg.stateDir}/root.key";
+
+in {
+
+  ###### interface
+
+  options = {
+    services.unbound = {
+
+      enable = mkEnableOption (lib.mdDoc "Unbound domain name server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.unbound-with-systemd;
+        defaultText = literalExpression "pkgs.unbound-with-systemd";
+        description = lib.mdDoc "The unbound package to use";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "unbound";
+        description = lib.mdDoc "User account under which unbound runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "unbound";
+        description = lib.mdDoc "Group under which unbound runs.";
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/lib/unbound";
+        description = lib.mdDoc "Directory holding all state for unbound to run.";
+      };
+
+      resolveLocalQueries = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether unbound should resolve local queries (i.e. add 127.0.0.1 to
+          /etc/resolv.conf).
+        '';
+      };
+
+      enableRootTrustAnchor = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Use and update root trust anchor for DNSSEC validation.";
+      };
+
+      localControlSocketPath = mkOption {
+        default = null;
+        # FIXME: What is the proper type here so users can specify strings,
+        # paths and null?
+        # My guess would be `types.nullOr (types.either types.str types.path)`
+        # but I haven't verified yet.
+        type = types.nullOr types.str;
+        example = "/run/unbound/unbound.ctl";
+        description = lib.mdDoc ''
+          When not set to `null` this option defines the path
+          at which the unbound remote control socket should be created at. The
+          socket will be owned by the unbound user (`unbound`)
+          and group will be `nogroup`.
+
+          Users that should be permitted to access the socket must be in the
+          `config.services.unbound.group` group.
+
+          If this option is `null` remote control will not be
+          enabled. Unbounds default values apply.
+        '';
+      };
+
+      settings = mkOption {
+        default = {};
+        type = with types; submodule {
+
+          freeformType = let
+            validSettingsPrimitiveTypes = oneOf [ int str bool float ];
+            validSettingsTypes = oneOf [ validSettingsPrimitiveTypes (listOf validSettingsPrimitiveTypes) ];
+            settingsType = oneOf [ str (attrsOf validSettingsTypes) ];
+          in attrsOf (oneOf [ settingsType (listOf settingsType) ])
+              // { description = ''
+                unbound.conf configuration type. The format consist of an attribute
+                set of settings. Each settings can be either one value, a list of
+                values or an attribute set. The allowed values are integers,
+                strings, booleans or floats.
+              '';
+            };
+
+          options = {
+            remote-control.control-enable = mkOption {
+              type = bool;
+              default = false;
+              internal = true;
+            };
+          };
+        };
+        example = literalExpression ''
+          {
+            server = {
+              interface = [ "127.0.0.1" ];
+            };
+            forward-zone = [
+              {
+                name = ".";
+                forward-addr = "1.1.1.1@853#cloudflare-dns.com";
+              }
+              {
+                name = "example.org.";
+                forward-addr = [
+                  "1.1.1.1@853#cloudflare-dns.com"
+                  "1.0.0.1@853#cloudflare-dns.com"
+                ];
+              }
+            ];
+            remote-control.control-enable = true;
+          };
+        '';
+        description = lib.mdDoc ''
+          Declarative Unbound configuration
+          See the {manpage}`unbound.conf(5)` manpage for a list of
+          available options.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.unbound.settings = {
+      server = {
+        directory = mkDefault cfg.stateDir;
+        username = ''""'';
+        chroot = ''""'';
+        pidfile = ''""'';
+        # when running under systemd there is no need to daemonize
+        do-daemonize = false;
+        interface = mkDefault ([ "127.0.0.1" ] ++ (optional config.networking.enableIPv6 "::1"));
+        access-control = mkDefault ([ "127.0.0.0/8 allow" ] ++ (optional config.networking.enableIPv6 "::1/128 allow"));
+        auto-trust-anchor-file = mkIf cfg.enableRootTrustAnchor rootTrustAnchorFile;
+        tls-cert-bundle = mkDefault "/etc/ssl/certs/ca-certificates.crt";
+        # prevent race conditions on system startup when interfaces are not yet
+        # configured
+        ip-freebind = mkDefault true;
+        define-tag = mkDefault "";
+      };
+      remote-control = {
+        control-enable = mkDefault false;
+        control-interface = mkDefault ([ "127.0.0.1" ] ++ (optional config.networking.enableIPv6 "::1"));
+        server-key-file = mkDefault "${cfg.stateDir}/unbound_server.key";
+        server-cert-file = mkDefault "${cfg.stateDir}/unbound_server.pem";
+        control-key-file = mkDefault "${cfg.stateDir}/unbound_control.key";
+        control-cert-file = mkDefault "${cfg.stateDir}/unbound_control.pem";
+      } // optionalAttrs (cfg.localControlSocketPath != null) {
+        control-enable = true;
+        control-interface = cfg.localControlSocketPath;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = mkIf (cfg.user == "unbound") {
+      unbound = {
+        description = "unbound daemon user";
+        isSystemUser = true;
+        group = cfg.group;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "unbound") {
+      unbound = {};
+    };
+
+    networking = mkIf cfg.resolveLocalQueries {
+      resolvconf = {
+        useLocalResolver = mkDefault true;
+      };
+
+      networkmanager.dns = "unbound";
+    };
+
+    environment.etc."unbound/unbound.conf".source = confFile;
+
+    systemd.services.unbound = {
+      description = "Unbound recursive Domain Name Server";
+      after = [ "network.target" ];
+      before = [ "nss-lookup.target" ];
+      wantedBy = [ "multi-user.target" "nss-lookup.target" ];
+
+      path = mkIf cfg.settings.remote-control.control-enable [ pkgs.openssl ];
+
+      preStart = ''
+        ${optionalString cfg.enableRootTrustAnchor ''
+          ${cfg.package}/bin/unbound-anchor -a ${rootTrustAnchorFile} || echo "Root anchor updated!"
+        ''}
+        ${optionalString cfg.settings.remote-control.control-enable ''
+          ${cfg.package}/bin/unbound-control-setup -d ${cfg.stateDir}
+        ''}
+      '';
+
+      restartTriggers = [
+        confFile
+      ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/unbound -p -d -c /etc/unbound/unbound.conf";
+        ExecReload = "+/run/current-system/sw/bin/kill -HUP $MAINPID";
+
+        NotifyAccess = "main";
+        Type = "notify";
+
+        AmbientCapabilities = [
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW" # needed if ip-transparent is set to true
+        ];
+        CapabilityBoundingSet = [
+          "CAP_NET_BIND_SERVICE"
+          "CAP_NET_RAW"
+        ];
+
+        User = cfg.user;
+        Group = cfg.group;
+
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelTunables = true;
+        RuntimeDirectory = "unbound";
+        ConfigurationDirectory = "unbound";
+        StateDirectory = "unbound";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" "AF_UNIX" ];
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictSUIDSGID = true;
+
+        ReadWritePaths = [ cfg.stateDir ];
+
+        Restart = "on-failure";
+        RestartSec = "5s";
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "unbound" "interfaces" ] [ "services" "unbound" "settings" "server" "interface" ])
+    (mkChangedOptionModule [ "services" "unbound" "allowedAccess" ] [ "services" "unbound" "settings" "server" "access-control" ] (
+      config: map (value: "${value} allow") (getAttrFromPath [ "services" "unbound" "allowedAccess" ] config)
+    ))
+    (mkRemovedOptionModule [ "services" "unbound" "forwardAddresses" ] ''
+      Add a new setting:
+      services.unbound.settings.forward-zone = [{
+        name = ".";
+        forward-addr = [ # Your current services.unbound.forwardAddresses ];
+      }];
+      If any of those addresses are local addresses (127.0.0.1 or ::1), you must
+      also set services.unbound.settings.server.do-not-query-localhost to false.
+    '')
+    (mkRemovedOptionModule [ "services" "unbound" "extraConfig" ] ''
+      You can use services.unbound.settings to add any configuration you want.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/unifi.nix b/nixpkgs/nixos/modules/services/networking/unifi.nix
new file mode 100644
index 000000000000..537a4db95ca7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/unifi.nix
@@ -0,0 +1,210 @@
+{ config, options, lib, pkgs, utils, ... }:
+let
+  cfg = config.services.unifi;
+  stateDir = "/var/lib/unifi";
+  cmd = lib.escapeShellArgs ([ "@${cfg.jrePackage}/bin/java" "java" ]
+    ++ lib.optionals (lib.versionAtLeast (lib.getVersion cfg.jrePackage) "16") [
+      "--add-opens=java.base/java.lang=ALL-UNNAMED"
+      "--add-opens=java.base/java.time=ALL-UNNAMED"
+      "--add-opens=java.base/sun.security.util=ALL-UNNAMED"
+      "--add-opens=java.base/java.io=ALL-UNNAMED"
+      "--add-opens=java.rmi/sun.rmi.transport=ALL-UNNAMED"
+    ]
+    ++ (lib.optional (cfg.initialJavaHeapSize != null) "-Xms${(toString cfg.initialJavaHeapSize)}m")
+    ++ (lib.optional (cfg.maximumJavaHeapSize != null) "-Xmx${(toString cfg.maximumJavaHeapSize)}m")
+    ++ cfg.extraJvmOptions
+    ++ [ "-jar" "${stateDir}/lib/ace.jar" ]);
+in
+{
+
+  options = {
+
+    services.unifi.enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not to enable the unifi controller service.
+      '';
+    };
+
+    services.unifi.jrePackage = lib.mkOption {
+      type = lib.types.package;
+      default = if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3") then pkgs.jdk11 else pkgs.jre8;
+      defaultText = lib.literalExpression ''if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.5") then pkgs.jdk17_headless else if (lib.versionAtLeast (lib.getVersion cfg.unifiPackage) "7.3" then pkgs.jdk11 else pkgs.jre8'';
+      description = lib.mdDoc ''
+        The JRE package to use. Check the release notes to ensure it is supported.
+      '';
+    };
+
+    services.unifi.unifiPackage = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.unifi5;
+      defaultText = lib.literalExpression "pkgs.unifi5";
+      description = lib.mdDoc ''
+        The unifi package to use.
+      '';
+    };
+
+    services.unifi.mongodbPackage = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.mongodb-4_4;
+      defaultText = lib.literalExpression "pkgs.mongodb";
+      description = lib.mdDoc ''
+        The mongodb package to use. Please note: unifi7 officially only supports mongodb up until 3.6 but works with 4.4.
+      '';
+    };
+
+    services.unifi.openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not to open the minimum required ports on the firewall.
+
+        This is necessary to allow firmware upgrades and device discovery to
+        work. For remote login, you should additionally open (or forward) port
+        8443.
+      '';
+    };
+
+    services.unifi.initialJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
+      default = null;
+      example = 1024;
+      description = lib.mdDoc ''
+        Set the initial heap size for the JVM in MB. If this option isn't set, the
+        JVM will decide this value at runtime.
+      '';
+    };
+
+    services.unifi.maximumJavaHeapSize = lib.mkOption {
+      type = with lib.types; nullOr int;
+      default = null;
+      example = 4096;
+      description = lib.mdDoc ''
+        Set the maximum heap size for the JVM in MB. If this option isn't set, the
+        JVM will decide this value at runtime.
+      '';
+    };
+
+    services.unifi.extraJvmOptions = lib.mkOption {
+      type = with lib.types; listOf str;
+      default = [ ];
+      example = lib.literalExpression ''["-Xlog:gc"]'';
+      description = lib.mdDoc ''
+        Set extra options to pass to the JVM.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    users.users.unifi = {
+      isSystemUser = true;
+      group = "unifi";
+      description = "UniFi controller daemon user";
+      home = "${stateDir}";
+    };
+    users.groups.unifi = {};
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      # https://help.ubnt.com/hc/en-us/articles/218506997
+      allowedTCPPorts = [
+        8080  # Port for UAP to inform controller.
+        8880  # Port for HTTP portal redirect, if guest portal is enabled.
+        8843  # Port for HTTPS portal redirect, ditto.
+        6789  # Port for UniFi mobile speed test.
+      ];
+      allowedUDPPorts = [
+        3478  # UDP port used for STUN.
+        10001 # UDP port used for device discovery.
+      ];
+    };
+
+    systemd.services.unifi = {
+      description = "UniFi controller daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      # This a HACK to fix missing dependencies of dynamic libs extracted from jars
+      environment.LD_LIBRARY_PATH = with pkgs.stdenv; "${cc.cc.lib}/lib";
+      # Make sure package upgrades trigger a service restart
+      restartTriggers = [ cfg.unifiPackage cfg.mongodbPackage ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cmd} start";
+        ExecStop = "${cmd} stop";
+        Restart = "on-failure";
+        TimeoutSec = "5min";
+        User = "unifi";
+        UMask = "0077";
+        WorkingDirectory = "${stateDir}";
+        # the stop command exits while the main process is still running, and unifi
+        # wants to manage its own child processes. this means we have to set KillSignal
+        # to something the main process ignores, otherwise every stop will have unifi.service
+        # fail with SIGTERM status.
+        KillSignal = "SIGCONT";
+
+        # Hardening
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [ "@system-service" ];
+
+        StateDirectory = "unifi";
+        RuntimeDirectory = "unifi";
+        LogsDirectory = "unifi";
+        CacheDirectory = "unifi";
+
+        TemporaryFileSystem = [
+          # required as we want to create bind mounts below
+          "${stateDir}/webapps:rw"
+        ];
+
+        # We must create the binary directories as bind mounts instead of symlinks
+        # This is because the controller resolves all symlinks to absolute paths
+        # to be used as the working directory.
+        BindPaths = [
+          "/var/log/unifi:${stateDir}/logs"
+          "/run/unifi:${stateDir}/run"
+          "${cfg.unifiPackage}/dl:${stateDir}/dl"
+          "${cfg.unifiPackage}/lib:${stateDir}/lib"
+          "${cfg.mongodbPackage}/bin:${stateDir}/bin"
+          "${cfg.unifiPackage}/webapps/ROOT:${stateDir}/webapps/ROOT"
+        ];
+
+        # Needs network access
+        PrivateNetwork = false;
+        # Cannot be true due to OpenJDK
+        MemoryDenyWriteExecute = false;
+      };
+    };
+
+  };
+  imports = [
+    (lib.mkRemovedOptionModule [ "services" "unifi" "dataDir" ] "You should move contents of dataDir to /var/lib/unifi/data")
+    (lib.mkRenamedOptionModule [ "services" "unifi" "openPorts" ] [ "services" "unifi" "openFirewall" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/uptermd.nix b/nixpkgs/nixos/modules/services/networking/uptermd.nix
new file mode 100644
index 000000000000..f824d617f59e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/uptermd.nix
@@ -0,0 +1,109 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.uptermd;
+in
+{
+  options = {
+    services.uptermd = {
+      enable = mkEnableOption (lib.mdDoc "uptermd");
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the firewall for the port in {option}`services.uptermd.port`.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 2222;
+        description = lib.mdDoc ''
+          Port the server will listen on.
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "[::]";
+        example = "127.0.0.1";
+        description = lib.mdDoc ''
+          Address the server will listen on.
+        '';
+      };
+
+      hostKey = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/upterm_host_ed25519_key";
+        description = lib.mdDoc ''
+          Path to SSH host key. If not defined, an ed25519 keypair is generated automatically.
+        '';
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--debug" ];
+        description = lib.mdDoc ''
+          Extra flags passed to the uptermd command.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+    systemd.services.uptermd = {
+      description = "Upterm Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      path = [ pkgs.openssh ];
+
+      preStart = mkIf (cfg.hostKey == null) ''
+        if ! [ -f ssh_host_ed25519_key ]; then
+          ssh-keygen \
+            -t ed25519 \
+            -f ssh_host_ed25519_key \
+            -N ""
+        fi
+      '';
+
+      serviceConfig = {
+        StateDirectory = "uptermd";
+        WorkingDirectory = "/var/lib/uptermd";
+        ExecStart = "${pkgs.upterm}/bin/uptermd --ssh-addr ${cfg.listenAddress}:${toString cfg.port} --private-key ${if cfg.hostKey == null then "ssh_host_ed25519_key" else cfg.hostKey} ${concatStringsSep " " cfg.extraFlags}";
+
+        # Hardening
+        AmbientCapabilities = mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        PrivateUsers = cfg.port >= 1024;
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        # AF_UNIX is for ssh-keygen, which relies on nscd to resolve the uid to a user
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = "@system-service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/v2ray.nix b/nixpkgs/nixos/modules/services/networking/v2ray.nix
new file mode 100644
index 000000000000..ba2aa5bc1de7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/v2ray.nix
@@ -0,0 +1,97 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+
+    services.v2ray = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run v2ray server.
+
+          Either `configFile` or `config` must be specified.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.v2ray;
+        defaultText = literalExpression "pkgs.v2ray";
+        description = lib.mdDoc ''
+          Which v2ray package to use.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/etc/v2ray/config.json";
+        description = lib.mdDoc ''
+          The absolute path to the configuration file.
+
+          Either `configFile` or `config` must be specified.
+
+          See <https://www.v2fly.org/en_US/v5/config/overview.html>.
+        '';
+      };
+
+      config = mkOption {
+        type = types.nullOr (types.attrsOf types.unspecified);
+        default = null;
+        example = {
+          inbounds = [{
+            port = 1080;
+            listen = "127.0.0.1";
+            protocol = "http";
+          }];
+          outbounds = [{
+            protocol = "freedom";
+          }];
+        };
+        description = lib.mdDoc ''
+          The configuration object.
+
+          Either `configFile` or `config` must be specified.
+
+          See <https://www.v2fly.org/en_US/v5/config/overview.html>.
+        '';
+      };
+    };
+
+  };
+
+  config = let
+    cfg = config.services.v2ray;
+    configFile = if cfg.configFile != null
+      then cfg.configFile
+      else pkgs.writeTextFile {
+        name = "v2ray.json";
+        text = builtins.toJSON cfg.config;
+        checkPhase = ''
+          ${cfg.package}/bin/v2ray test -c $out
+        '';
+      };
+
+  in mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.configFile == null) != (cfg.config == null);
+        message = "Either but not both `configFile` and `config` should be specified for v2ray.";
+      }
+    ];
+
+    environment.etc."v2ray/config.json".source = configFile;
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.v2ray = {
+      restartTriggers = [ config.environment.etc."v2ray/config.json".source ];
+
+      # Workaround: https://github.com/NixOS/nixpkgs/issues/81138
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/v2raya.nix b/nixpkgs/nixos/modules/services/networking/v2raya.nix
new file mode 100644
index 000000000000..0bea73798daf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/v2raya.nix
@@ -0,0 +1,50 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    services.v2raya = {
+      enable = options.mkEnableOption (mdDoc "the v2rayA service");
+    };
+  };
+
+  config = mkIf config.services.v2raya.enable {
+    environment.systemPackages = [ pkgs.v2raya ];
+
+    systemd.services.v2raya =
+      let
+        nftablesEnabled = config.networking.nftables.enable;
+        iptablesServices = [
+          "iptables.service"
+        ] ++ optional config.networking.enableIPv6 "ip6tables.service";
+        tableServices = if nftablesEnabled then [ "nftables.service" ] else iptablesServices;
+      in
+      {
+        unitConfig = {
+          Description = "v2rayA service";
+          Documentation = "https://github.com/v2rayA/v2rayA/wiki";
+          After = [
+            "network.target"
+            "nss-lookup.target"
+          ] ++ tableServices;
+          Wants = [ "network.target" ];
+        };
+
+        serviceConfig = {
+          User = "root";
+          ExecStart = "${getExe pkgs.v2raya} --log-disable-timestamp";
+          Environment = [ "V2RAYA_LOG_FILE=/var/log/v2raya/v2raya.log" ];
+          LimitNPROC = 500;
+          LimitNOFILE = 1000000;
+          Restart = "on-failure";
+          Type = "simple";
+        };
+
+        wantedBy = [ "multi-user.target" ];
+        path = with pkgs; [ iptables bash iproute2 ]; # required by v2rayA TProxy functionality
+      };
+  };
+
+  meta.maintainers = with maintainers; [ elliot ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/vdirsyncer.nix b/nixpkgs/nixos/modules/services/networking/vdirsyncer.nix
new file mode 100644
index 000000000000..f9b880c763e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/vdirsyncer.nix
@@ -0,0 +1,214 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.vdirsyncer;
+
+  toIniJson = with generators; toINI {
+    mkKeyValue = mkKeyValueDefault {
+      mkValueString = builtins.toJSON;
+    } "=";
+  };
+
+  toConfigFile = name: cfg':
+    if
+      cfg'.configFile != null
+    then
+      cfg'.configFile
+    else
+      pkgs.writeText "vdirsyncer-${name}.conf" (toIniJson (
+        {
+          general = cfg'.config.general // (lib.optionalAttrs (cfg'.config.statusPath == null) {
+            status_path = "/var/lib/vdirsyncer/${name}";
+          });
+        } // (
+          mapAttrs' (name: nameValuePair "pair ${name}") cfg'.config.pairs
+        ) // (
+          mapAttrs' (name: nameValuePair "storage ${name}") cfg'.config.storages
+        )
+      ));
+
+  userUnitConfig = name: cfg': {
+    serviceConfig = {
+      User = if cfg'.user == null then "vdirsyncer" else cfg'.user;
+      Group = if cfg'.group == null then "vdirsyncer" else cfg'.group;
+    }  // (optionalAttrs (cfg'.user == null) {
+      DynamicUser = true;
+    }) // (optionalAttrs (cfg'.additionalGroups != []) {
+      SupplementaryGroups = cfg'.additionalGroups;
+    }) // (optionalAttrs (cfg'.config.statusPath == null) {
+      StateDirectory = "vdirsyncer/${name}";
+      StateDirectoryMode = "0700";
+    });
+  };
+
+  commonUnitConfig = {
+    after = [ "network.target" ];
+    serviceConfig = {
+      Type = "oneshot";
+      # Sandboxing
+      PrivateTmp = true;
+      NoNewPrivileges = true;
+      ProtectSystem = "strict";
+      ProtectHome = true;
+      ProtectKernelTunables = true;
+      ProtectKernelModules = true;
+      ProtectControlGroups = true;
+      RestrictNamespaces = true;
+      MemoryDenyWriteExecute = true;
+      RestrictRealtime = true;
+      RestrictSUIDSGID = true;
+      RestrictAddressFamilies = "AF_INET AF_INET6";
+      LockPersonality = true;
+    };
+  };
+
+in
+{
+  options = {
+    services.vdirsyncer = {
+      enable = mkEnableOption (mdDoc "vdirsyncer");
+
+      package = mkPackageOptionMD pkgs "vdirsyncer" {};
+
+      jobs = mkOption {
+        description = mdDoc "vdirsyncer job configurations";
+        type = types.attrsOf (types.submodule {
+          options = {
+            enable = (mkEnableOption (mdDoc "this vdirsyncer job")) // {
+              default = true;
+              example = false;
+            };
+
+            user = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = mdDoc ''
+                User account to run vdirsyncer as, otherwise as a systemd
+                dynamic user
+              '';
+            };
+
+            group = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = mdDoc "group to run vdirsyncer as";
+            };
+
+            additionalGroups = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              description = mdDoc "additional groups to add the dynamic user to";
+            };
+
+            forceDiscover = mkOption {
+              type = types.bool;
+              default = false;
+              description = mdDoc ''
+                Run `yes | vdirsyncer discover` prior to `vdirsyncer sync`
+              '';
+            };
+
+            timerConfig = mkOption {
+              type = types.attrs;
+              default = {
+                OnBootSec = "1h";
+                OnUnitActiveSec = "6h";
+              };
+              description = mdDoc "systemd timer configuration";
+            };
+
+            configFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc "existing configuration file";
+            };
+
+            config = {
+              statusPath = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                defaultText = literalExpression "/var/lib/vdirsyncer/\${attrName}";
+                description = mdDoc "vdirsyncer's status path";
+              };
+
+              general = mkOption {
+                type = types.attrs;
+                default = {};
+                description = mdDoc "general configuration";
+              };
+
+              pairs = mkOption {
+                type = types.attrsOf types.attrs;
+                default = {};
+                description = mdDoc "vdirsyncer pair configurations";
+                example = literalExpression ''
+                  {
+                    my_contacts = {
+                      a = "my_cloud_contacts";
+                      b = "my_local_contacts";
+                      collections = [ "from a" ];
+                      conflict_resolution = "a wins";
+                      metadata = [ "color" "displayname" ];
+                    };
+                  };
+                '';
+              };
+
+              storages = mkOption {
+                type = types.attrsOf types.attrs;
+                default = {};
+                description = mdDoc "vdirsyncer storage configurations";
+                example = literalExpression ''
+                  {
+                    my_cloud_contacts = {
+                      type = "carddav";
+                      url = "https://dav.example.com/";
+                      read_only = true;
+                      username = "user";
+                      "password.fetch" = [ "command" "cat" "/etc/vdirsyncer/cloud.passwd" ];
+                    };
+                    my_local_contacts = {
+                      type = "carddav";
+                      url = "https://localhost/";
+                      username = "user";
+                      "password.fetch" = [ "command" "cat" "/etc/vdirsyncer/local.passwd" ];
+                    };
+                  }
+                '';
+              };
+            };
+          };
+        });
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = mapAttrs' (name: cfg': nameValuePair "vdirsyncer@${name}" (
+      foldr recursiveUpdate {} [
+        commonUnitConfig
+        (userUnitConfig name cfg')
+        {
+          description = "synchronize calendars and contacts (${name})";
+          environment.VDIRSYNCER_CONFIG = toConfigFile name cfg';
+          serviceConfig.ExecStart =
+            (optional cfg'.forceDiscover (
+              pkgs.writeShellScript "vdirsyncer-discover-yes" ''
+                set -e
+                yes | ${cfg.package}/bin/vdirsyncer discover
+              ''
+            )) ++ [ "${cfg.package}/bin/vdirsyncer sync" ];
+        }
+      ]
+    )) (filterAttrs (name: cfg': cfg'.enable) cfg.jobs);
+
+    systemd.timers = mapAttrs' (name: cfg': nameValuePair "vdirsyncer@${name}" {
+      wantedBy = [ "timers.target" ];
+      description = "synchronize calendars and contacts (${name})";
+      inherit (cfg') timerConfig;
+    }) cfg.jobs;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/vsftpd.nix b/nixpkgs/nixos/modules/services/networking/vsftpd.nix
new file mode 100644
index 000000000000..318ceb4e5094
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/vsftpd.nix
@@ -0,0 +1,330 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  /* minimal secure setup:
+
+   enable = true;
+   forceLocalLoginsSSL = true;
+   forceLocalDataSSL = true;
+   userlistDeny = false;
+   localUsers = true;
+   userlist = ["non-root-user" "other-non-root-user"];
+   rsaCertFile = "/var/vsftpd/vsftpd.pem";
+
+  */
+
+  cfg = config.services.vsftpd;
+
+  inherit (pkgs) vsftpd;
+
+  yesNoOption = nixosName: vsftpdName: default: description: {
+    cfgText = "${vsftpdName}=${if getAttr nixosName cfg then "YES" else "NO"}";
+
+    nixosOption = {
+      type = types.bool;
+      name = nixosName;
+      value = mkOption {
+        description = lib.mdDoc description;
+        inherit default;
+        type = types.bool;
+      };
+    };
+  };
+
+  optionDescription = [
+    (yesNoOption "allowWriteableChroot" "allow_writeable_chroot" false ''
+      Allow the use of writeable root inside chroot().
+    '')
+    (yesNoOption "virtualUseLocalPrivs" "virtual_use_local_privs" false ''
+      If enabled, virtual users will use the same privileges as local
+      users. By default, virtual users will use the same privileges as
+      anonymous users, which tends to be more restrictive (especially
+      in terms of write access).
+    '')
+    (yesNoOption "anonymousUser" "anonymous_enable" false ''
+      Whether to enable the anonymous FTP user.
+    '')
+    (yesNoOption "anonymousUserNoPassword" "no_anon_password" false ''
+      Whether to disable the password for the anonymous FTP user.
+    '')
+    (yesNoOption "localUsers" "local_enable" false ''
+      Whether to enable FTP for local users.
+    '')
+    (yesNoOption "writeEnable" "write_enable" false ''
+      Whether any write activity is permitted to users.
+    '')
+    (yesNoOption "anonymousUploadEnable" "anon_upload_enable" false ''
+      Whether any uploads are permitted to anonymous users.
+    '')
+    (yesNoOption "anonymousMkdirEnable" "anon_mkdir_write_enable" false ''
+      Whether any uploads are permitted to anonymous users.
+    '')
+    (yesNoOption "chrootlocalUser" "chroot_local_user" false ''
+      Whether local users are confined to their home directory.
+    '')
+    (yesNoOption "userlistEnable" "userlist_enable" false ''
+      Whether users are included.
+    '')
+    (yesNoOption "userlistDeny" "userlist_deny" false ''
+      Specifies whether {option}`userlistFile` is a list of user
+      names to allow or deny access.
+      The default `false` means whitelist/allow.
+    '')
+    (yesNoOption "forceLocalLoginsSSL" "force_local_logins_ssl" false ''
+      Only applies if {option}`sslEnable` is true. Non anonymous (local) users
+      must use a secure SSL connection to send a password.
+    '')
+    (yesNoOption "forceLocalDataSSL" "force_local_data_ssl" false ''
+      Only applies if {option}`sslEnable` is true. Non anonymous (local) users
+      must use a secure SSL connection for sending/receiving data on data connection.
+    '')
+    (yesNoOption "portPromiscuous" "port_promiscuous" false ''
+      Set to YES if you want to disable the PORT security check that ensures that
+      outgoing data connections can only connect to the client. Only enable if you
+      know what you are doing!
+    '')
+    (yesNoOption "ssl_tlsv1" "ssl_tlsv1" true  ''
+      Only applies if {option}`ssl_enable` is activated. If
+      enabled, this option will permit TLS v1 protocol connections.
+      TLS v1 connections are preferred.
+    '')
+    (yesNoOption "ssl_sslv2" "ssl_sslv2" false ''
+      Only applies if {option}`ssl_enable` is activated. If
+      enabled, this option will permit SSL v2 protocol connections.
+      TLS v1 connections are preferred.
+    '')
+    (yesNoOption "ssl_sslv3" "ssl_sslv3" false ''
+      Only applies if {option}`ssl_enable` is activated. If
+      enabled, this option will permit SSL v3 protocol connections.
+      TLS v1 connections are preferred.
+    '')
+  ];
+
+  configFile = pkgs.writeText "vsftpd.conf"
+    ''
+      ${concatMapStrings (x: "${x.cfgText}\n") optionDescription}
+      ${optionalString (cfg.rsaCertFile != null) ''
+        ssl_enable=YES
+        rsa_cert_file=${cfg.rsaCertFile}
+      ''}
+      ${optionalString (cfg.rsaKeyFile != null) ''
+        rsa_private_key_file=${cfg.rsaKeyFile}
+      ''}
+      ${optionalString (cfg.userlistFile != null) ''
+        userlist_file=${cfg.userlistFile}
+      ''}
+      background=YES
+      listen=NO
+      listen_ipv6=YES
+      nopriv_user=vsftpd
+      secure_chroot_dir=/var/empty
+      ${optionalString (cfg.localRoot != null) ''
+        local_root=${cfg.localRoot}
+      ''}
+      syslog_enable=YES
+      ${optionalString (pkgs.stdenv.hostPlatform.system == "x86_64-linux") ''
+        seccomp_sandbox=NO
+      ''}
+      anon_umask=${cfg.anonymousUmask}
+      ${optionalString cfg.anonymousUser ''
+        anon_root=${cfg.anonymousUserHome}
+      ''}
+      ${optionalString cfg.enableVirtualUsers ''
+        guest_enable=YES
+        guest_username=vsftpd
+      ''}
+      pam_service_name=vsftpd
+      ${cfg.extraConfig}
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.vsftpd = {
+
+      enable = mkEnableOption (lib.mdDoc "vsftpd");
+
+      userlist = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc "See {option}`userlistFile`.";
+      };
+
+      userlistFile = mkOption {
+        type = types.path;
+        default = pkgs.writeText "userlist" (concatMapStrings (x: "${x}\n") cfg.userlist);
+        defaultText = literalExpression ''pkgs.writeText "userlist" (concatMapStrings (x: "''${x}\n") cfg.userlist)'';
+        description = lib.mdDoc ''
+          Newline separated list of names to be allowed/denied if {option}`userlistEnable`
+          is `true`. Meaning see {option}`userlistDeny`.
+
+          The default is a file containing the users from {option}`userlist`.
+
+          If explicitly set to null userlist_file will not be set in vsftpd's config file.
+        '';
+      };
+
+      enableVirtualUsers = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the `pam_userdb`-based
+          virtual user system
+        '';
+      };
+
+      userDbPath = mkOption {
+        type = types.nullOr types.str;
+        example = "/etc/vsftpd/userDb";
+        default = null;
+        description = lib.mdDoc ''
+          Only applies if {option}`enableVirtualUsers` is true.
+          Path pointing to the `pam_userdb` user
+          database used by vsftpd to authenticate the virtual users.
+
+          This user list should be stored in the Berkeley DB database
+          format.
+
+          To generate a new user database, create a text file, add
+          your users using the following format:
+          ```
+          user1
+          password1
+          user2
+          password2
+          ```
+
+          You can then install `pkgs.db` to generate
+          the Berkeley DB using
+          ```
+          db_load -T -t hash -f logins.txt userDb.db
+          ```
+
+          Caution: `pam_userdb` will automatically
+          append a `.db` suffix to the filename you
+          provide though this option. This option shouldn't include
+          this filetype suffix.
+        '';
+      };
+
+      localRoot = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/var/www/$USER";
+        description = lib.mdDoc ''
+          This option represents a directory which vsftpd will try to
+          change into after a local (i.e. non- anonymous) login.
+
+          Failure is silently ignored.
+        '';
+      };
+
+      anonymousUserHome = mkOption {
+        type = types.path;
+        default = "/home/ftp/";
+        description = lib.mdDoc ''
+          Directory to consider the HOME of the anonymous user.
+        '';
+      };
+
+      rsaCertFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "RSA certificate file.";
+      };
+
+      rsaKeyFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "RSA private key file.";
+      };
+
+      anonymousUmask = mkOption {
+        type = types.str;
+        default = "077";
+        example = "002";
+        description = lib.mdDoc "Anonymous write umask.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = "ftpd_banner=Hello";
+        description = lib.mdDoc "Extra configuration to add at the bottom of the generated configuration file.";
+      };
+
+    } // (listToAttrs (catAttrs "nixosOption" optionDescription));
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion =
+              (cfg.forceLocalLoginsSSL -> cfg.rsaCertFile != null)
+          &&  (cfg.forceLocalDataSSL -> cfg.rsaCertFile != null);
+        message = "vsftpd: If forceLocalLoginsSSL or forceLocalDataSSL is true then a rsaCertFile must be provided!";
+      }
+      {
+        assertion = (cfg.enableVirtualUsers -> cfg.userDbPath != null)
+                 && (cfg.enableVirtualUsers -> cfg.localUsers != null);
+        message = "vsftpd: If enableVirtualUsers is true, you need to setup both the userDbPath and localUsers options.";
+      }];
+
+    users.users = {
+      "vsftpd" = {
+        group = "vsftpd";
+        isSystemUser = true;
+        description = "VSFTPD user";
+        home = if cfg.localRoot != null
+               then cfg.localRoot # <= Necessary for virtual users.
+               else "/homeless-shelter";
+      };
+    } // optionalAttrs cfg.anonymousUser {
+      "ftp" = { name = "ftp";
+          uid = config.ids.uids.ftp;
+          group = "ftp";
+          description = "Anonymous FTP user";
+          home = cfg.anonymousUserHome;
+        };
+    };
+
+    users.groups.vsftpd = {};
+    users.groups.ftp.gid = config.ids.gids.ftp;
+
+    # If you really have to access root via FTP use mkOverride or userlistDeny
+    # = false and whitelist root
+    services.vsftpd.userlist = optional cfg.userlistDeny "root";
+
+    systemd = {
+      tmpfiles.rules = optional cfg.anonymousUser
+        #Type Path                       Mode User   Gr    Age Arg
+        "d    '${builtins.toString cfg.anonymousUserHome}' 0555 'ftp'  'ftp' -   -";
+      services.vsftpd = {
+        description = "Vsftpd Server";
+
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig.ExecStart = "@${vsftpd}/sbin/vsftpd vsftpd ${configFile}";
+        serviceConfig.Restart = "always";
+        serviceConfig.Type = "forking";
+      };
+    };
+
+    security.pam.services.vsftpd.text = mkIf (cfg.enableVirtualUsers && cfg.userDbPath != null)''
+      auth required pam_userdb.so db=${cfg.userDbPath}
+      account required pam_userdb.so db=${cfg.userDbPath}
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wasabibackend.nix b/nixpkgs/nixos/modules/services/networking/wasabibackend.nix
new file mode 100644
index 000000000000..938145b35ee8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wasabibackend.nix
@@ -0,0 +1,160 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.wasabibackend;
+  opt = options.services.wasabibackend;
+
+  inherit (lib) literalExpression mkEnableOption mkIf mkOption optionalAttrs optionalString types;
+
+  confOptions = {
+      BitcoinRpcConnectionString = "${cfg.rpc.user}:${cfg.rpc.password}";
+  } // optionalAttrs (cfg.network == "mainnet") {
+      Network = "Main";
+      MainNetBitcoinP2pEndPoint = "${cfg.endpoint.ip}:${toString cfg.endpoint.port}";
+      MainNetBitcoinCoreRpcEndPoint = "${cfg.rpc.ip}:${toString cfg.rpc.port}";
+  } // optionalAttrs (cfg.network == "testnet") {
+      Network = "TestNet";
+      TestNetBitcoinP2pEndPoint = "${cfg.endpoint.ip}:${toString cfg.endpoint.port}";
+      TestNetBitcoinCoreRpcEndPoint = "${cfg.rpc.ip}:${toString cfg.rpc.port}";
+  } // optionalAttrs (cfg.network == "regtest") {
+      Network = "RegTest";
+      RegTestBitcoinP2pEndPoint = "${cfg.endpoint.ip}:${toString cfg.endpoint.port}";
+      RegTestBitcoinCoreRpcEndPoint = "${cfg.rpc.ip}:${toString cfg.rpc.port}";
+  };
+
+  configFile = pkgs.writeText "wasabibackend.conf" (builtins.toJSON confOptions);
+
+in {
+
+  options = {
+
+    services.wasabibackend = {
+      enable = mkEnableOption (lib.mdDoc "Wasabi backend service");
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/wasabibackend";
+        description = lib.mdDoc "The data directory for the Wasabi backend node.";
+      };
+
+      customConfigFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "Defines the path to a custom configuration file that is copied to the user's directory. Overrides any config options.";
+      };
+
+      network = mkOption {
+        type = types.enum [ "mainnet" "testnet" "regtest" ];
+        default = "mainnet";
+        description = lib.mdDoc "The network to use for the Wasabi backend service.";
+      };
+
+      endpoint = {
+        ip = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "IP address for P2P connection to bitcoind.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8333;
+          description = lib.mdDoc "Port for P2P connection to bitcoind.";
+        };
+      };
+
+      rpc = {
+        ip = mkOption {
+          type = types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc "IP address for RPC connection to bitcoind.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8332;
+          description = lib.mdDoc "Port for RPC connection to bitcoind.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "bitcoin";
+          description = lib.mdDoc "RPC user for the bitcoin endpoint.";
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "password";
+          description = lib.mdDoc "RPC password for the bitcoin endpoint. Warning: this is stored in cleartext in the Nix store! Use `configFile` or `passwordFile` if needed.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          description = lib.mdDoc "File that contains the password of the RPC user.";
+        };
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "wasabibackend";
+        description = lib.mdDoc "The user as which to run the wasabibackend node.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = cfg.user;
+        defaultText = literalExpression "config.${opt.user}";
+        description = lib.mdDoc "The group as which to run the wasabibackend node.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0770 '${cfg.user}' '${cfg.group}' - -"
+    ];
+
+    systemd.services.wasabibackend = {
+      description = "wasabibackend server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      environment = {
+        DOTNET_PRINT_TELEMETRY_MESSAGE = "false";
+        DOTNET_CLI_TELEMETRY_OPTOUT = "true";
+      };
+      preStart = ''
+        mkdir -p ${cfg.dataDir}/.walletwasabi/backend
+        ${if cfg.customConfigFile != null then ''
+          cp -v ${cfg.customConfigFile} ${cfg.dataDir}/.walletwasabi/backend/Config.json
+        '' else ''
+          cp -v ${configFile} ${cfg.dataDir}/.walletwasabi/backend/Config.json
+          ${optionalString (cfg.rpc.passwordFile != null) ''
+            CONFIGTMP=$(mktemp)
+            cat ${cfg.dataDir}/.walletwasabi/backend/Config.json | ${pkgs.jq}/bin/jq --arg rpconnection "${cfg.rpc.user}:$(cat "${cfg.rpc.passwordFile}")" '. + { BitcoinRpcConnectionString: $rpconnection }' > $CONFIGTMP
+            mv $CONFIGTMP ${cfg.dataDir}/.walletwasabi/backend/Config.json
+          ''}
+        ''}
+        chmod ug+w ${cfg.dataDir}/.walletwasabi/backend/Config.json
+      '';
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        ExecStart = "${pkgs.wasabibackend}/bin/WasabiBackend";
+        ProtectSystem = "full";
+      };
+    };
+
+    users.users.${cfg.user} = {
+      name = cfg.user;
+      group = cfg.group;
+      description = "wasabibackend daemon user";
+      home = cfg.dataDir;
+      isSystemUser = true;
+    };
+
+    users.groups.${cfg.group} = {};
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/webhook.nix b/nixpkgs/nixos/modules/services/networking/webhook.nix
new file mode 100644
index 000000000000..2a78491941cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/webhook.nix
@@ -0,0 +1,214 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.webhook;
+  defaultUser = "webhook";
+
+  hookFormat = pkgs.formats.json {};
+
+  hookType = types.submodule ({ name, ... }: {
+    freeformType = hookFormat.type;
+    options = {
+      id = mkOption {
+        type = types.str;
+        default = name;
+        description = mdDoc ''
+          The ID of your hook. This value is used to create the HTTP endpoint (`protocol://yourserver:port/prefix/''${id}`).
+        '';
+      };
+      execute-command = mkOption {
+        type = types.str;
+        description = mdDoc "The command that should be executed when the hook is triggered.";
+      };
+    };
+  });
+
+  hookFiles = mapAttrsToList (name: hook: hookFormat.generate "webhook-${name}.json" [ hook ]) cfg.hooks
+           ++ mapAttrsToList (name: hook: pkgs.writeText "webhook-${name}.json.tmpl" "[${hook}]") cfg.hooksTemplated;
+
+in {
+  options = {
+    services.webhook = {
+      enable = mkEnableOption (mdDoc ''
+        [Webhook](https://github.com/adnanh/webhook), a server written in Go that allows you to create HTTP endpoints (hooks),
+        which execute configured commands for any person or service that knows the URL
+      '');
+
+      package = mkPackageOptionMD pkgs "webhook" {};
+      user = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = mdDoc ''
+          Webhook will be run under this user.
+
+          If set, you must create this user yourself!
+        '';
+      };
+      group = mkOption {
+        type = types.str;
+        default = defaultUser;
+        description = mdDoc ''
+          Webhook will be run under this group.
+
+          If set, you must create this group yourself!
+        '';
+      };
+      ip = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = mdDoc ''
+          The IP webhook should serve hooks on.
+
+          The default means it can be reached on any interface if `openFirewall = true`.
+        '';
+      };
+      port = mkOption {
+        type = types.port;
+        default = 9000;
+        description = mdDoc "The port webhook should be reachable from.";
+      };
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open the configured port in the firewall for external ingress traffic.
+          Preferably the Webhook server is instead put behind a reverse proxy.
+        '';
+      };
+      enableTemplates = mkOption {
+        type = types.bool;
+        default = cfg.hooksTemplated != {};
+        defaultText = literalExpression "hooksTemplated != {}";
+        description = mdDoc ''
+          Enable the generated hooks file to be parsed as a Go template.
+          See [the documentation](https://github.com/adnanh/webhook/blob/master/docs/Templates.md) for more information.
+        '';
+      };
+      urlPrefix = mkOption {
+        type = types.str;
+        default = "hooks";
+        description = mdDoc ''
+          The URL path prefix to use for served hooks (`protocol://yourserver:port/''${prefix}/hook-id`).
+        '';
+      };
+      hooks = mkOption {
+        type = types.attrsOf hookType;
+        default = {};
+        example = {
+          echo = {
+            execute-command = "echo";
+            response-message = "Webhook is reachable!";
+          };
+          redeploy-webhook = {
+            execute-command = "/var/scripts/redeploy.sh";
+            command-working-directory = "/var/webhook";
+          };
+        };
+        description = mdDoc ''
+          The actual configuration of which hooks will be served.
+
+          Read more on the [project homepage] and on the [hook definition] page.
+          At least one hook needs to be configured.
+
+          [hook definition]: https://github.com/adnanh/webhook/blob/master/docs/Hook-Definition.md
+          [project homepage]: https://github.com/adnanh/webhook#configuration
+        '';
+      };
+      hooksTemplated = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = {
+          echo-template = ''
+            {
+              "id": "echo-template",
+              "execute-command": "echo",
+              "response-message": "{{ getenv "MESSAGE" }}"
+            }
+          '';
+        };
+        description = mdDoc ''
+          Same as {option}`hooks`, but these hooks are specified as literal strings instead of Nix values,
+          and hence can include [template syntax](https://github.com/adnanh/webhook/blob/master/docs/Templates.md)
+          which might not be representable as JSON.
+
+          Template syntax requires the {option}`enableTemplates` option to be set to `true`, which is
+          done by default if this option is set.
+        '';
+      };
+      verbose = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc "Whether to show verbose output.";
+      };
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-secure" ];
+        description = mdDoc ''
+          These are arguments passed to the webhook command in the systemd service.
+          You can find the available arguments and options in the [documentation][parameters].
+
+          [parameters]: https://github.com/adnanh/webhook/blob/master/docs/Webhook-Parameters.md
+        '';
+      };
+      environment = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = mdDoc "Extra environment variables passed to webhook.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = let
+      overlappingHooks = builtins.intersectAttrs cfg.hooks cfg.hooksTemplated;
+    in [
+      {
+        assertion = hookFiles != [];
+        message = "At least one hook needs to be configured for webhook to run.";
+      }
+      {
+        assertion = overlappingHooks == {};
+        message = "`services.webhook.hooks` and `services.webhook.hooksTemplated` have overlapping attribute(s): ${concatStringsSep ", " (builtins.attrNames overlappingHooks)}";
+      }
+    ];
+
+    users.users = mkIf (cfg.user == defaultUser) {
+      ${defaultUser} =
+        {
+          isSystemUser = true;
+          group = cfg.group;
+          description = "Webhook daemon user";
+        };
+    };
+
+    users.groups = mkIf (cfg.user == defaultUser && cfg.group == defaultUser) {
+      ${defaultUser} = {};
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    systemd.services.webhook = {
+      description = "Webhook service";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = config.networking.proxy.envVars // cfg.environment;
+      script = let
+        args = [ "-ip" cfg.ip "-port" (toString cfg.port) "-urlprefix" cfg.urlPrefix ]
+            ++ concatMap (hook: [ "-hooks" hook ]) hookFiles
+            ++ optional cfg.enableTemplates "-template"
+            ++ optional cfg.verbose "-verbose"
+            ++ cfg.extraArgs;
+      in ''
+        ${cfg.package}/bin/webhook ${escapeShellArgs args}
+      '';
+      serviceConfig = {
+        Restart = "on-failure";
+        User = cfg.user;
+        Group = cfg.group;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/websockify.nix b/nixpkgs/nixos/modules/services/networking/websockify.nix
new file mode 100644
index 000000000000..27ad8953d3fa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/websockify.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.networking.websockify; in {
+  options = {
+    services.networking.websockify = {
+      enable = mkOption {
+        description = lib.mdDoc "Whether to enable websockify to forward websocket connections to TCP connections.";
+
+        default = false;
+
+        type = types.bool;
+      };
+
+      sslCert = mkOption {
+        description = lib.mdDoc "Path to the SSL certificate.";
+        type = types.path;
+      };
+
+      sslKey = mkOption {
+        description = lib.mdDoc "Path to the SSL key.";
+        default = cfg.sslCert;
+        defaultText = literalExpression "config.services.networking.websockify.sslCert";
+        type = types.path;
+      };
+
+      portMap = mkOption {
+        description = lib.mdDoc "Ports to map by default.";
+        default = {};
+        type = types.attrsOf types.int;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services."websockify@" = {
+      description = "Service to forward websocket connections to TCP connections (from port:to port %I)";
+      script = ''
+        IFS=':' read -a array <<< "$1"
+        ${pkgs.python3Packages.websockify}/bin/websockify --ssl-only \
+          --cert=${cfg.sslCert} --key=${cfg.sslKey} 0.0.0.0:''${array[0]} 0.0.0.0:''${array[1]}
+      '';
+      scriptArgs = "%i";
+    };
+
+    systemd.targets.default-websockify = {
+      description = "Target to start all default websockify@ services";
+      unitConfig.X-StopOnReconfiguration = true;
+      wants = mapAttrsToList (name: value: "websockify@${name}:${toString value}.service") cfg.portMap;
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wg-netmanager.nix b/nixpkgs/nixos/modules/services/networking/wg-netmanager.nix
new file mode 100644
index 000000000000..b260c573726b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wg-netmanager.nix
@@ -0,0 +1,42 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.wg-netmanager;
+in
+{
+
+  options = {
+    services.wg-netmanager = {
+      enable = mkEnableOption (lib.mdDoc "Wireguard network manager");
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    # NOTE: wg-netmanager runs as root
+    systemd.services.wg-netmanager = {
+      description = "Wireguard network manager";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [ wireguard-tools iproute2 wireguard-go ];
+      serviceConfig = {
+        Type = "simple";
+        Restart = "on-failure";
+        ExecStart = "${pkgs.wg-netmanager}/bin/wg_netmanager";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+
+        ReadWritePaths = [
+          "/tmp"  # wg-netmanager creates files in /tmp before deleting them after use
+        ];
+      };
+      unitConfig =  {
+        ConditionPathExists = ["/etc/wg_netmanager/network.yaml" "/etc/wg_netmanager/peer.yaml"];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ gin66 ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wg-quick.nix b/nixpkgs/nixos/modules/services/networking/wg-quick.nix
new file mode 100644
index 000000000000..68e0e06d0469
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wg-quick.nix
@@ -0,0 +1,345 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.networking.wg-quick;
+
+  kernel = config.boot.kernelPackages;
+
+  # interface options
+
+  interfaceOpts = { ... }: {
+    options = {
+
+      configFile = mkOption {
+        example = "/secret/wg0.conf";
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          wg-quick .conf file, describing the interface.
+          Using this option can be a useful means of configuring WireGuard if
+          one has an existing .conf file.
+          This overrides any other configuration interface configuration options.
+          See wg-quick manpage for more details.
+        '';
+      };
+
+      address = mkOption {
+        example = [ "192.168.2.1/24" ];
+        default = [];
+        type = with types; listOf str;
+        description = lib.mdDoc "The IP addresses of the interface.";
+      };
+
+      autostart = mkOption {
+        description = lib.mdDoc "Whether to bring up this interface automatically during boot.";
+        default = true;
+        example = false;
+        type = types.bool;
+      };
+
+      dns = mkOption {
+        example = [ "192.168.2.2" ];
+        default = [];
+        type = with types; listOf str;
+        description = lib.mdDoc "The IP addresses of DNS servers to configure.";
+      };
+
+      privateKey = mkOption {
+        example = "yAnz5TF+lXXJte14tji3zlMNq+hd2rYUIgJBgB3fBmk=";
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Base64 private key generated by {command}`wg genkey`.
+
+          Warning: Consider using privateKeyFile instead if you do not
+          want to store the key in the world-readable Nix store.
+        '';
+      };
+
+      privateKeyFile = mkOption {
+        example = "/private/wireguard_key";
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Private key file as generated by {command}`wg genkey`.
+        '';
+      };
+
+      listenPort = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 51820;
+        description = lib.mdDoc ''
+          16-bit port for listening. Optional; if not specified,
+          automatically generated based on interface name.
+        '';
+      };
+
+      preUp = mkOption {
+        example = literalExpression ''"''${pkgs.iproute2}/bin/ip netns add foo"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc ''
+          Commands called at the start of the interface setup.
+        '';
+      };
+
+      preDown = mkOption {
+        example = literalExpression ''"''${pkgs.iproute2}/bin/ip netns del foo"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc ''
+          Command called before the interface is taken down.
+        '';
+      };
+
+      postUp = mkOption {
+        example = literalExpression ''"''${pkgs.iproute2}/bin/ip netns add foo"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc ''
+          Commands called after the interface setup.
+        '';
+      };
+
+      postDown = mkOption {
+        example = literalExpression ''"''${pkgs.iproute2}/bin/ip netns del foo"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc ''
+          Command called after the interface is taken down.
+        '';
+      };
+
+      table = mkOption {
+        example = "main";
+        default = null;
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          The kernel routing table to add this interface's
+          associated routes to. Setting this is useful for e.g. policy routing
+          ("ip rule") or virtual routing and forwarding ("ip vrf"). Both
+          numeric table IDs and table names (/etc/rt_tables) can be used.
+          Defaults to "main".
+        '';
+      };
+
+      mtu = mkOption {
+        example = 1248;
+        default = null;
+        type = with types; nullOr int;
+        description = lib.mdDoc ''
+          If not specified, the MTU is automatically determined
+          from the endpoint addresses or the system default route, which is usually
+          a sane choice. However, to manually specify an MTU to override this
+          automatic discovery, this value may be specified explicitly.
+        '';
+      };
+
+      peers = mkOption {
+        default = [];
+        description = lib.mdDoc "Peers linked to the interface.";
+        type = with types; listOf (submodule peerOpts);
+      };
+    };
+  };
+
+  # peer options
+
+  peerOpts = {
+    options = {
+      publicKey = mkOption {
+        example = "xTIBA5rboUvnH4htodjb6e697QjLERt1NAB4mZqp8Dg=";
+        type = types.str;
+        description = lib.mdDoc "The base64 public key to the peer.";
+      };
+
+      presharedKey = mkOption {
+        default = null;
+        example = "rVXs/Ni9tu3oDBLS4hOyAUAa1qTWVA3loR8eL20os3I=";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          Base64 preshared key generated by {command}`wg genpsk`.
+          Optional, and may be omitted. This option adds an additional layer of
+          symmetric-key cryptography to be mixed into the already existing
+          public-key cryptography, for post-quantum resistance.
+
+          Warning: Consider using presharedKeyFile instead if you do not
+          want to store the key in the world-readable Nix store.
+        '';
+      };
+
+      presharedKeyFile = mkOption {
+        default = null;
+        example = "/private/wireguard_psk";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          File pointing to preshared key as generated by {command}`wg genpsk`.
+          Optional, and may be omitted. This option adds an additional layer of
+          symmetric-key cryptography to be mixed into the already existing
+          public-key cryptography, for post-quantum resistance.
+        '';
+      };
+
+      allowedIPs = mkOption {
+        example = [ "10.192.122.3/32" "10.192.124.1/24" ];
+        type = with types; listOf str;
+        description = lib.mdDoc ''List of IP (v4 or v6) addresses with CIDR masks from
+        which this peer is allowed to send incoming traffic and to which
+        outgoing traffic for this peer is directed. The catch-all 0.0.0.0/0 may
+        be specified for matching all IPv4 addresses, and ::/0 may be specified
+        for matching all IPv6 addresses.'';
+      };
+
+      endpoint = mkOption {
+        default = null;
+        example = "demo.wireguard.io:12913";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''Endpoint IP or hostname of the peer, followed by a colon,
+        and then a port number of the peer.'';
+      };
+
+      persistentKeepalive = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 25;
+        description = lib.mdDoc ''This is optional and is by default off, because most
+        users will not need it. It represents, in seconds, between 1 and 65535
+        inclusive, how often to send an authenticated empty packet to the peer,
+        for the purpose of keeping a stateful firewall or NAT mapping valid
+        persistently. For example, if the interface very rarely sends traffic,
+        but it might at anytime receive traffic from a peer, and it is behind
+        NAT, the interface might benefit from having a persistent keepalive
+        interval of 25 seconds; however, most users will not need this.'';
+      };
+    };
+  };
+
+  writeScriptFile = name: text: ((pkgs.writeShellScriptBin name text) + "/bin/${name}");
+
+  generateUnit = name: values:
+    assert assertMsg (values.configFile != null || ((values.privateKey != null) != (values.privateKeyFile != null))) "Only one of privateKey, configFile or privateKeyFile may be set";
+    let
+      preUpFile = if values.preUp != "" then writeScriptFile "preUp.sh" values.preUp else null;
+      postUp =
+            optional (values.privateKeyFile != null) "wg set ${name} private-key <(cat ${values.privateKeyFile})" ++
+            (concatMap (peer: optional (peer.presharedKeyFile != null) "wg set ${name} peer ${peer.publicKey} preshared-key <(cat ${peer.presharedKeyFile})") values.peers) ++
+            optional (values.postUp != "") values.postUp;
+      postUpFile = if postUp != [] then writeScriptFile "postUp.sh" (concatMapStringsSep "\n" (line: line) postUp) else null;
+      preDownFile = if values.preDown != "" then writeScriptFile "preDown.sh" values.preDown else null;
+      postDownFile = if values.postDown != "" then writeScriptFile "postDown.sh" values.postDown else null;
+      configDir = pkgs.writeTextFile {
+        name = "config-${name}";
+        executable = false;
+        destination = "/${name}.conf";
+        text =
+        ''
+        [interface]
+        ${concatMapStringsSep "\n" (address:
+          "Address = ${address}"
+        ) values.address}
+        ${concatMapStringsSep "\n" (dns:
+          "DNS = ${dns}"
+        ) values.dns}
+        '' +
+        optionalString (values.table != null) "Table = ${values.table}\n" +
+        optionalString (values.mtu != null) "MTU = ${toString values.mtu}\n" +
+        optionalString (values.privateKey != null) "PrivateKey = ${values.privateKey}\n" +
+        optionalString (values.listenPort != null) "ListenPort = ${toString values.listenPort}\n" +
+        optionalString (preUpFile != null) "PreUp = ${preUpFile}\n" +
+        optionalString (postUpFile != null) "PostUp = ${postUpFile}\n" +
+        optionalString (preDownFile != null) "PreDown = ${preDownFile}\n" +
+        optionalString (postDownFile != null) "PostDown = ${postDownFile}\n" +
+        concatMapStringsSep "\n" (peer:
+          assert assertMsg (!((peer.presharedKeyFile != null) && (peer.presharedKey != null))) "Only one of presharedKey or presharedKeyFile may be set";
+          "[Peer]\n" +
+          "PublicKey = ${peer.publicKey}\n" +
+          optionalString (peer.presharedKey != null) "PresharedKey = ${peer.presharedKey}\n" +
+          optionalString (peer.endpoint != null) "Endpoint = ${peer.endpoint}\n" +
+          optionalString (peer.persistentKeepalive != null) "PersistentKeepalive = ${toString peer.persistentKeepalive}\n" +
+          optionalString (peer.allowedIPs != []) "AllowedIPs = ${concatStringsSep "," peer.allowedIPs}\n"
+        ) values.peers;
+      };
+      configPath =
+        if values.configFile != null then
+          # This uses bind-mounted private tmp folder (/tmp/systemd-private-***)
+          "/tmp/${name}.conf"
+        else
+          "${configDir}/${name}.conf";
+    in
+    nameValuePair "wg-quick-${name}"
+      {
+        description = "wg-quick WireGuard Tunnel - ${name}";
+        requires = [ "network-online.target" ];
+        after = [ "network.target" "network-online.target" ];
+        wantedBy = optional values.autostart "multi-user.target";
+        environment.DEVICE = name;
+        path = [
+          pkgs.wireguard-tools
+          config.networking.firewall.package   # iptables or nftables
+          config.networking.resolvconf.package # openresolv or systemd
+        ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+
+        script = ''
+          ${optionalString (!config.boot.isContainer) "${pkgs.kmod}/bin/modprobe wireguard"}
+          ${optionalString (values.configFile != null) ''
+            cp ${values.configFile} ${configPath}
+          ''}
+          wg-quick up ${configPath}
+        '';
+
+        serviceConfig = {
+          # Used to privately store renamed copies of external config files during activation
+          PrivateTmp = true;
+        };
+
+        preStop = ''
+          wg-quick down ${configPath}
+        '';
+      };
+in {
+
+  ###### interface
+
+  options = {
+    networking.wg-quick = {
+      interfaces = mkOption {
+        description = lib.mdDoc "Wireguard interfaces.";
+        default = {};
+        example = {
+          wg0 = {
+            address = [ "192.168.20.4/24" ];
+            privateKey = "yAnz5TF+lXXJte14tji3zlMNq+hd2rYUIgJBgB3fBmk=";
+            peers = [
+              { allowedIPs = [ "192.168.20.1/32" ];
+                publicKey  = "xTIBA5rboUvnH4htodjb6e697QjLERt1NAB4mZqp8Dg=";
+                endpoint   = "demo.wireguard.io:12913"; }
+            ];
+          };
+        };
+        type = with types; attrsOf (submodule interfaceOpts);
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg.interfaces != {}) {
+    boot.extraModulePackages = optional (versionOlder kernel.kernel.version "5.6") kernel.wireguard;
+    environment.systemPackages = [ pkgs.wireguard-tools ];
+    systemd.services = mapAttrs' generateUnit cfg.interfaces;
+
+    # Prevent networkd from clearing the rules set by wg-quick when restarted (e.g. when waking up from suspend).
+    systemd.network.config.networkConfig.ManageForeignRoutingPolicyRules = mkDefault false;
+
+    # WireGuard interfaces should be ignored in determining whether the network is online.
+    systemd.network.wait-online.ignoredInterfaces = builtins.attrNames cfg.interfaces;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wgautomesh.nix b/nixpkgs/nixos/modules/services/networking/wgautomesh.nix
new file mode 100644
index 000000000000..094281403f73
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wgautomesh.nix
@@ -0,0 +1,163 @@
+{ lib, config, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.wgautomesh;
+  settingsFormat = pkgs.formats.toml { };
+  configFile =
+    # Have to remove nulls manually as TOML generator will not just skip key
+    # if value is null
+    settingsFormat.generate "wgautomesh-config.toml"
+      (filterAttrs (k: v: v != null)
+        (mapAttrs
+          (k: v:
+            if k == "peers"
+            then map (e: filterAttrs (k: v: v != null) e) v
+            else v)
+          cfg.settings));
+  runtimeConfigFile =
+    if cfg.enableGossipEncryption
+    then "/run/wgautomesh/wgautomesh.toml"
+    else configFile;
+in
+{
+  options.services.wgautomesh = {
+    enable = mkEnableOption (mdDoc "the wgautomesh daemon");
+    logLevel = mkOption {
+      type = types.enum [ "trace" "debug" "info" "warn" "error" ];
+      default = "info";
+      description = mdDoc "wgautomesh log level.";
+    };
+    enableGossipEncryption = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc "Enable encryption of gossip traffic.";
+    };
+    gossipSecretFile = mkOption {
+      type = types.path;
+      description = mdDoc ''
+        File containing the gossip secret, a shared secret key to use for gossip
+        encryption.  Required if `enableGossipEncryption` is set.  This file
+        may contain any arbitrary-length utf8 string.  To generate a new gossip
+        secret, use a command such as `openssl rand -base64 32`.
+      '';
+    };
+    enablePersistence = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc "Enable persistence of Wireguard peer info between restarts.";
+    };
+    openFirewall = mkOption {
+      type = types.bool;
+      default = true;
+      description = mdDoc "Automatically open gossip port in firewall (recommended).";
+    };
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+        options = {
+
+          interface = mkOption {
+            type = types.str;
+            description = mdDoc ''
+              Wireguard interface to manage (it is NOT created by wgautomesh, you
+              should use another NixOS option to create it such as
+              `networking.wireguard.interfaces.wg0 = {...};`).
+            '';
+            example = "wg0";
+          };
+          gossip_port = mkOption {
+            type = types.port;
+            description = mdDoc ''
+              wgautomesh gossip port, this MUST be the same number on all nodes in
+              the wgautomesh network.
+            '';
+            default = 1666;
+          };
+          lan_discovery = mkOption {
+            type = types.bool;
+            default = true;
+            description = mdDoc "Enable discovery of peers on the same LAN using UDP broadcast.";
+          };
+          upnp_forward_external_port = mkOption {
+            type = types.nullOr types.port;
+            default = null;
+            description = mdDoc ''
+              Public port number to try to redirect to this machine's Wireguard
+              daemon using UPnP IGD.
+            '';
+          };
+          peers = mkOption {
+            type = types.listOf (types.submodule {
+              options = {
+                pubkey = mkOption {
+                  type = types.str;
+                  description = mdDoc "Wireguard public key of this peer.";
+                };
+                address = mkOption {
+                  type = types.str;
+                  description = mdDoc ''
+                    Wireguard address of this peer (a single IP address, multiple
+                    addresses or address ranges are not supported).
+                  '';
+                  example = "10.0.0.42";
+                };
+                endpoint = mkOption {
+                  type = types.nullOr types.str;
+                  description = mdDoc ''
+                    Bootstrap endpoint for connecting to this Wireguard peer if no
+                    other address is known or none are working.
+                  '';
+                  default = null;
+                  example = "wgnode.mydomain.example:51820";
+                };
+              };
+            });
+            default = [ ];
+            description = mdDoc "wgautomesh peer list.";
+          };
+        };
+
+      };
+      default = { };
+      description = mdDoc "Configuration for wgautomesh.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.wgautomesh.settings = {
+      gossip_secret_file = mkIf cfg.enableGossipEncryption "$CREDENTIALS_DIRECTORY/gossip_secret";
+      persist_file = mkIf cfg.enablePersistence "/var/lib/wgautomesh/state";
+    };
+
+    systemd.services.wgautomesh = {
+      path = [ pkgs.wireguard-tools ];
+      environment = { RUST_LOG = "wgautomesh=${cfg.logLevel}"; };
+      description = "wgautomesh";
+      serviceConfig = {
+        Type = "simple";
+
+        ExecStart = "${getExe pkgs.wgautomesh} ${runtimeConfigFile}";
+        Restart = "always";
+        RestartSec = "30";
+        LoadCredential = mkIf cfg.enableGossipEncryption [ "gossip_secret:${cfg.gossipSecretFile}" ];
+
+        ExecStartPre = mkIf cfg.enableGossipEncryption [
+          ''${pkgs.envsubst}/bin/envsubst \
+              -i ${configFile} \
+              -o ${runtimeConfigFile}''
+        ];
+
+        DynamicUser = true;
+        StateDirectory = "wgautomesh";
+        StateDirectoryMode = "0700";
+        RuntimeDirectory = "wgautomesh";
+        AmbientCapabilities = "CAP_NET_ADMIN";
+        CapabilityBoundingSet = "CAP_NET_ADMIN";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+    networking.firewall.allowedUDPPorts =
+      mkIf cfg.openFirewall [ cfg.settings.gossip_port ];
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/networking/wireguard.nix b/nixpkgs/nixos/modules/services/networking/wireguard.nix
new file mode 100644
index 000000000000..d36be87daf60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wireguard.nix
@@ -0,0 +1,602 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.networking.wireguard;
+  opt = options.networking.wireguard;
+
+  kernel = config.boot.kernelPackages;
+
+  # interface options
+
+  interfaceOpts = { ... }: {
+
+    options = {
+
+      ips = mkOption {
+        example = [ "192.168.2.1/24" ];
+        default = [];
+        type = with types; listOf str;
+        description = lib.mdDoc "The IP addresses of the interface.";
+      };
+
+      privateKey = mkOption {
+        example = "yAnz5TF+lXXJte14tji3zlMNq+hd2rYUIgJBgB3fBmk=";
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Base64 private key generated by {command}`wg genkey`.
+
+          Warning: Consider using privateKeyFile instead if you do not
+          want to store the key in the world-readable Nix store.
+        '';
+      };
+
+      generatePrivateKeyFile = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Automatically generate a private key with
+          {command}`wg genkey`, at the privateKeyFile location.
+        '';
+      };
+
+      privateKeyFile = mkOption {
+        example = "/private/wireguard_key";
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc ''
+          Private key file as generated by {command}`wg genkey`.
+        '';
+      };
+
+      listenPort = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 51820;
+        description = lib.mdDoc ''
+          16-bit port for listening. Optional; if not specified,
+          automatically generated based on interface name.
+        '';
+      };
+
+      preSetup = mkOption {
+        example = literalExpression ''"''${pkgs.iproute2}/bin/ip netns add foo"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc ''
+          Commands called at the start of the interface setup.
+        '';
+      };
+
+      postSetup = mkOption {
+        example = literalExpression ''
+          '''printf "nameserver 10.200.100.1" | ''${pkgs.openresolv}/bin/resolvconf -a wg0 -m 0'''
+        '';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc "Commands called at the end of the interface setup.";
+      };
+
+      postShutdown = mkOption {
+        example = literalExpression ''"''${pkgs.openresolv}/bin/resolvconf -d wg0"'';
+        default = "";
+        type = with types; coercedTo (listOf str) (concatStringsSep "\n") lines;
+        description = lib.mdDoc "Commands called after shutting down the interface.";
+      };
+
+      table = mkOption {
+        default = "main";
+        type = types.str;
+        description = lib.mdDoc ''
+          The kernel routing table to add this interface's
+          associated routes to. Setting this is useful for e.g. policy routing
+          ("ip rule") or virtual routing and forwarding ("ip vrf"). Both
+          numeric table IDs and table names (/etc/rt_tables) can be used.
+          Defaults to "main".
+        '';
+      };
+
+      peers = mkOption {
+        default = [];
+        description = lib.mdDoc "Peers linked to the interface.";
+        type = with types; listOf (submodule peerOpts);
+      };
+
+      allowedIPsAsRoutes = mkOption {
+        example = false;
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Determines whether to add allowed IPs as routes or not.
+        '';
+      };
+
+      socketNamespace = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        example = "container";
+        description = lib.mdDoc ''The pre-existing network namespace in which the
+        WireGuard interface is created, and which retains the socket even if the
+        interface is moved via {option}`interfaceNamespace`. When
+        `null`, the interface is created in the init namespace.
+        See [documentation](https://www.wireguard.com/netns/).
+        '';
+      };
+
+      interfaceNamespace = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        example = "init";
+        description = lib.mdDoc ''The pre-existing network namespace the WireGuard
+        interface is moved to. The special value `init` means
+        the init namespace. When `null`, the interface is not
+        moved.
+        See [documentation](https://www.wireguard.com/netns/).
+        '';
+      };
+
+      fwMark = mkOption {
+        default = null;
+        type = with types; nullOr str;
+        example = "0x6e6978";
+        description = lib.mdDoc ''
+          Mark all wireguard packets originating from
+          this interface with the given firewall mark. The firewall mark can be
+          used in firewalls or policy routing to filter the wireguard packets.
+          This can be useful for setup where all traffic goes through the
+          wireguard tunnel, because the wireguard packets need to be routed
+          differently.
+        '';
+      };
+
+      mtu = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 1280;
+        description = lib.mdDoc ''
+          Set the maximum transmission unit in bytes for the wireguard
+          interface. Beware that the wireguard packets have a header that may
+          add up to 80 bytes to the mtu. By default, the MTU is (1500 - 80) =
+          1420. However, if the MTU of the upstream network is lower, the MTU
+          of the wireguard network has to be adjusted as well.
+        '';
+      };
+
+      metric = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 700;
+        description = lib.mdDoc ''
+          Set the metric of routes related to this Wireguard interface.
+        '';
+      };
+    };
+
+  };
+
+  # peer options
+
+  peerOpts = self: {
+
+    options = {
+
+      name = mkOption {
+        default =
+          replaceStrings
+            [ "/" "-"     " "     "+"     "="     ]
+            [ "-" "\\x2d" "\\x20" "\\x2b" "\\x3d" ]
+            self.config.publicKey;
+        defaultText = literalExpression "publicKey";
+        example = "bernd";
+        type = types.str;
+        description = lib.mdDoc "Name used to derive peer unit name.";
+      };
+
+      publicKey = mkOption {
+        example = "xTIBA5rboUvnH4htodjb6e697QjLERt1NAB4mZqp8Dg=";
+        type = types.singleLineStr;
+        description = lib.mdDoc "The base64 public key of the peer.";
+      };
+
+      presharedKey = mkOption {
+        default = null;
+        example = "rVXs/Ni9tu3oDBLS4hOyAUAa1qTWVA3loR8eL20os3I=";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          Base64 preshared key generated by {command}`wg genpsk`.
+          Optional, and may be omitted. This option adds an additional layer of
+          symmetric-key cryptography to be mixed into the already existing
+          public-key cryptography, for post-quantum resistance.
+
+          Warning: Consider using presharedKeyFile instead if you do not
+          want to store the key in the world-readable Nix store.
+        '';
+      };
+
+      presharedKeyFile = mkOption {
+        default = null;
+        example = "/private/wireguard_psk";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          File pointing to preshared key as generated by {command}`wg genpsk`.
+          Optional, and may be omitted. This option adds an additional layer of
+          symmetric-key cryptography to be mixed into the already existing
+          public-key cryptography, for post-quantum resistance.
+        '';
+      };
+
+      allowedIPs = mkOption {
+        example = [ "10.192.122.3/32" "10.192.124.1/24" ];
+        type = with types; listOf str;
+        description = lib.mdDoc ''List of IP (v4 or v6) addresses with CIDR masks from
+        which this peer is allowed to send incoming traffic and to which
+        outgoing traffic for this peer is directed. The catch-all 0.0.0.0/0 may
+        be specified for matching all IPv4 addresses, and ::/0 may be specified
+        for matching all IPv6 addresses.'';
+      };
+
+      endpoint = mkOption {
+        default = null;
+        example = "demo.wireguard.io:12913";
+        type = with types; nullOr str;
+        description = lib.mdDoc ''
+          Endpoint IP or hostname of the peer, followed by a colon,
+          and then a port number of the peer.
+
+          Warning for endpoints with changing IPs:
+          The WireGuard kernel side cannot perform DNS resolution.
+          Thus DNS resolution is done once by the `wg` userspace
+          utility, when setting up WireGuard. Consequently, if the IP address
+          behind the name changes, WireGuard will not notice.
+          This is especially common for dynamic-DNS setups, but also applies to
+          any other DNS-based setup.
+          If you do not use IP endpoints, you likely want to set
+          {option}`networking.wireguard.dynamicEndpointRefreshSeconds`
+          to refresh the IPs periodically.
+        '';
+      };
+
+      dynamicEndpointRefreshSeconds = mkOption {
+        default = 0;
+        example = 5;
+        type = with types; int;
+        description = lib.mdDoc ''
+          Periodically re-execute the `wg` utility every
+          this many seconds in order to let WireGuard notice DNS / hostname
+          changes.
+
+          Setting this to `0` disables periodic reexecution.
+        '';
+      };
+
+      dynamicEndpointRefreshRestartSeconds = mkOption {
+        default = null;
+        example = 5;
+        type = with types; nullOr ints.unsigned;
+        description = lib.mdDoc ''
+          When the dynamic endpoint refresh that is configured via
+          dynamicEndpointRefreshSeconds exits (likely due to a failure),
+          restart that service after this many seconds.
+
+          If set to `null` the value of
+          {option}`networking.wireguard.dynamicEndpointRefreshSeconds`
+          will be used as the default.
+        '';
+      };
+
+      persistentKeepalive = mkOption {
+        default = null;
+        type = with types; nullOr int;
+        example = 25;
+        description = lib.mdDoc ''This is optional and is by default off, because most
+        users will not need it. It represents, in seconds, between 1 and 65535
+        inclusive, how often to send an authenticated empty packet to the peer,
+        for the purpose of keeping a stateful firewall or NAT mapping valid
+        persistently. For example, if the interface very rarely sends traffic,
+        but it might at anytime receive traffic from a peer, and it is behind
+        NAT, the interface might benefit from having a persistent keepalive
+        interval of 25 seconds; however, most users will not need this.'';
+      };
+
+    };
+
+  };
+
+  generateKeyServiceUnit = name: values:
+    assert values.generatePrivateKeyFile;
+    nameValuePair "wireguard-${name}-key"
+      {
+        description = "WireGuard Tunnel - ${name} - Key Generator";
+        wantedBy = [ "wireguard-${name}.service" ];
+        requiredBy = [ "wireguard-${name}.service" ];
+        before = [ "wireguard-${name}.service" ];
+        path = with pkgs; [ wireguard-tools ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+
+        script = ''
+          set -e
+
+          # If the parent dir does not already exist, create it.
+          # Otherwise, does nothing, keeping existing permissions intact.
+          mkdir -p --mode 0755 "${dirOf values.privateKeyFile}"
+
+          if [ ! -f "${values.privateKeyFile}" ]; then
+            # Write private key file with atomically-correct permissions.
+            (set -e; umask 077; wg genkey > "${values.privateKeyFile}")
+          fi
+        '';
+      };
+
+  peerUnitServiceName = interfaceName: peerName: dynamicRefreshEnabled:
+    let
+      refreshSuffix = optionalString dynamicRefreshEnabled "-refresh";
+    in
+      "wireguard-${interfaceName}-peer-${peerName}${refreshSuffix}";
+
+  generatePeerUnit = { interfaceName, interfaceCfg, peer }:
+    let
+      psk =
+        if peer.presharedKey != null
+          then pkgs.writeText "wg-psk" peer.presharedKey
+          else peer.presharedKeyFile;
+      src = interfaceCfg.socketNamespace;
+      dst = interfaceCfg.interfaceNamespace;
+      ip = nsWrap "ip" src dst;
+      wg = nsWrap "wg" src dst;
+      dynamicRefreshEnabled = peer.dynamicEndpointRefreshSeconds != 0;
+      # We generate a different name (a `-refresh` suffix) when `dynamicEndpointRefreshSeconds`
+      # to avoid that the same service switches `Type` (`oneshot` vs `simple`),
+      # with the intent to make scripting more obvious.
+      serviceName = peerUnitServiceName interfaceName peer.name dynamicRefreshEnabled;
+    in nameValuePair serviceName
+      {
+        description = "WireGuard Peer - ${interfaceName} - ${peer.name}"
+          + optionalString (peer.name != peer.publicKey) " (${peer.publicKey})";
+        requires = [ "wireguard-${interfaceName}.service" ];
+        wants = [ "network-online.target" ];
+        after = [ "wireguard-${interfaceName}.service" "network-online.target" ];
+        wantedBy = [ "wireguard-${interfaceName}.service" ];
+        environment.DEVICE = interfaceName;
+        environment.WG_ENDPOINT_RESOLUTION_RETRIES = "infinity";
+        path = with pkgs; [ iproute2 wireguard-tools ];
+
+        serviceConfig =
+          if !dynamicRefreshEnabled
+            then
+              {
+                Type = "oneshot";
+                RemainAfterExit = true;
+              }
+            else
+              {
+                Type = "simple"; # re-executes 'wg' indefinitely
+                # Note that `Type = "oneshot"` services with `RemainAfterExit = true`
+                # cannot be used with systemd timers (see `man systemd.timer`),
+                # which is why `simple` with a loop is the best choice here.
+                # It also makes starting and stopping easiest.
+                #
+                # Restart if the service exits (e.g. when wireguard gives up after "Name or service not known" dns failures):
+                Restart = "always";
+                RestartSec = if null != peer.dynamicEndpointRefreshRestartSeconds
+                             then peer.dynamicEndpointRefreshRestartSeconds
+                             else peer.dynamicEndpointRefreshSeconds;
+              };
+        unitConfig = lib.optionalAttrs dynamicRefreshEnabled {
+          StartLimitIntervalSec = 0;
+        };
+
+        script = let
+          wg_setup = concatStringsSep " " (
+            [ ''${wg} set ${interfaceName} peer "${peer.publicKey}"'' ]
+            ++ optional (psk != null) ''preshared-key "${psk}"''
+            ++ optional (peer.endpoint != null) ''endpoint "${peer.endpoint}"''
+            ++ optional (peer.persistentKeepalive != null) ''persistent-keepalive "${toString peer.persistentKeepalive}"''
+            ++ optional (peer.allowedIPs != []) ''allowed-ips "${concatStringsSep "," peer.allowedIPs}"''
+          );
+          route_setup =
+            optionalString interfaceCfg.allowedIPsAsRoutes
+              (concatMapStringsSep "\n"
+                (allowedIP:
+                  ''${ip} route replace "${allowedIP}" dev "${interfaceName}" table "${interfaceCfg.table}" ${optionalString (interfaceCfg.metric != null) "metric ${toString interfaceCfg.metric}"}''
+                ) peer.allowedIPs);
+        in ''
+          ${wg_setup}
+          ${route_setup}
+
+          ${optionalString (peer.dynamicEndpointRefreshSeconds != 0) ''
+            # Re-execute 'wg' periodically to notice DNS / hostname changes.
+            # Note this will not time out on transient DNS failures such as DNS names
+            # because we have set 'WG_ENDPOINT_RESOLUTION_RETRIES=infinity'.
+            # Also note that 'wg' limits its maximum retry delay to 20 seconds as of writing.
+            while ${wg_setup}; do
+              sleep "${toString peer.dynamicEndpointRefreshSeconds}";
+            done
+          ''}
+        '';
+
+        postStop = let
+          route_destroy = optionalString interfaceCfg.allowedIPsAsRoutes
+            (concatMapStringsSep "\n"
+              (allowedIP:
+                ''${ip} route delete "${allowedIP}" dev "${interfaceName}" table "${interfaceCfg.table}"''
+              ) peer.allowedIPs);
+        in ''
+          ${wg} set "${interfaceName}" peer "${peer.publicKey}" remove
+          ${route_destroy}
+        '';
+      };
+
+  # the target is required to start new peer units when they are added
+  generateInterfaceTarget = name: values:
+    let
+      mkPeerUnit = peer: (peerUnitServiceName name peer.name (peer.dynamicEndpointRefreshSeconds != 0)) + ".service";
+    in
+    nameValuePair "wireguard-${name}"
+      rec {
+        description = "WireGuard Tunnel - ${name}";
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "wireguard-${name}.service" ] ++ map mkPeerUnit values.peers;
+        after = wants;
+      };
+
+  generateInterfaceUnit = name: values:
+    # exactly one way to specify the private key must be set
+    #assert (values.privateKey != null) != (values.privateKeyFile != null);
+    let privKey = if values.privateKeyFile != null then values.privateKeyFile else pkgs.writeText "wg-key" values.privateKey;
+        src = values.socketNamespace;
+        dst = values.interfaceNamespace;
+        ipPreMove  = nsWrap "ip" src null;
+        ipPostMove = nsWrap "ip" src dst;
+        wg = nsWrap "wg" src dst;
+        ns = if dst == "init" then "1" else dst;
+
+    in
+    nameValuePair "wireguard-${name}"
+      {
+        description = "WireGuard Tunnel - ${name}";
+        after = [ "network-pre.target" ];
+        wants = [ "network.target" ];
+        before = [ "network.target" ];
+        environment.DEVICE = name;
+        path = with pkgs; [ kmod iproute2 wireguard-tools ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+
+        script = ''
+          ${optionalString (!config.boot.isContainer) "modprobe wireguard || true"}
+
+          ${values.preSetup}
+
+          ${ipPreMove} link add dev "${name}" type wireguard
+          ${optionalString (values.interfaceNamespace != null && values.interfaceNamespace != values.socketNamespace) ''${ipPreMove} link set "${name}" netns "${ns}"''}
+          ${optionalString (values.mtu != null) ''${ipPostMove} link set "${name}" mtu ${toString values.mtu}''}
+
+          ${concatMapStringsSep "\n" (ip:
+            ''${ipPostMove} address add "${ip}" dev "${name}"''
+          ) values.ips}
+
+          ${concatStringsSep " " (
+            [ ''${wg} set "${name}" private-key "${privKey}"'' ]
+            ++ optional (values.listenPort != null) ''listen-port "${toString values.listenPort}"''
+            ++ optional (values.fwMark != null) ''fwmark "${values.fwMark}"''
+          )}
+
+          ${ipPostMove} link set up dev "${name}"
+
+          ${values.postSetup}
+        '';
+
+        postStop = ''
+          ${ipPostMove} link del dev "${name}"
+          ${values.postShutdown}
+        '';
+      };
+
+  nsWrap = cmd: src: dst:
+    let
+      nsList = filter (ns: ns != null) [ src dst ];
+      ns = last nsList;
+    in
+      if (length nsList > 0 && ns != "init") then ''ip netns exec "${ns}" "${cmd}"'' else cmd;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.wireguard = {
+
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Whether to enable WireGuard.
+
+          Please note that {option}`systemd.network.netdevs` has more features
+          and is better maintained. When building new things, it is advised to
+          use that instead.
+        '';
+        type = types.bool;
+        # 2019-05-25: Backwards compatibility.
+        default = cfg.interfaces != {};
+        defaultText = literalExpression "config.${opt.interfaces} != { }";
+        example = true;
+      };
+
+      interfaces = mkOption {
+        description = lib.mdDoc ''
+          WireGuard interfaces.
+
+          Please note that {option}`systemd.network.netdevs` has more features
+          and is better maintained. When building new things, it is advised to
+          use that instead.
+        '';
+        default = {};
+        example = {
+          wg0 = {
+            ips = [ "192.168.20.4/24" ];
+            privateKey = "yAnz5TF+lXXJte14tji3zlMNq+hd2rYUIgJBgB3fBmk=";
+            peers = [
+              { allowedIPs = [ "192.168.20.1/32" ];
+                publicKey  = "xTIBA5rboUvnH4htodjb6e697QjLERt1NAB4mZqp8Dg=";
+                endpoint   = "demo.wireguard.io:12913"; }
+            ];
+          };
+        };
+        type = with types; attrsOf (submodule interfaceOpts);
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable (let
+    all_peers = flatten
+      (mapAttrsToList (interfaceName: interfaceCfg:
+        map (peer: { inherit interfaceName interfaceCfg peer;}) interfaceCfg.peers
+      ) cfg.interfaces);
+  in {
+
+    assertions = (attrValues (
+        mapAttrs (name: value: {
+          assertion = (value.privateKey != null) != (value.privateKeyFile != null);
+          message = "Either networking.wireguard.interfaces.${name}.privateKey or networking.wireguard.interfaces.${name}.privateKeyFile must be set.";
+        }) cfg.interfaces))
+      ++ (attrValues (
+        mapAttrs (name: value: {
+          assertion = value.generatePrivateKeyFile -> (value.privateKey == null);
+          message = "networking.wireguard.interfaces.${name}.generatePrivateKeyFile must not be set if networking.wireguard.interfaces.${name}.privateKey is set.";
+        }) cfg.interfaces))
+        ++ map ({ interfaceName, peer, ... }: {
+          assertion = (peer.presharedKey == null) || (peer.presharedKeyFile == null);
+          message = "networking.wireguard.interfaces.${interfaceName} peer «${peer.publicKey}» has both presharedKey and presharedKeyFile set, but only one can be used.";
+        }) all_peers;
+
+    boot.extraModulePackages = optional (versionOlder kernel.kernel.version "5.6") kernel.wireguard;
+    boot.kernelModules = [ "wireguard" ];
+    environment.systemPackages = [ pkgs.wireguard-tools ];
+
+    systemd.services =
+      (mapAttrs' generateInterfaceUnit cfg.interfaces)
+      // (listToAttrs (map generatePeerUnit all_peers))
+      // (mapAttrs' generateKeyServiceUnit
+      (filterAttrs (name: value: value.generatePrivateKeyFile) cfg.interfaces));
+
+      systemd.targets = mapAttrs' generateInterfaceTarget cfg.interfaces;
+    }
+  );
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
new file mode 100644
index 000000000000..90d9c68433cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wpa_supplicant.nix
@@ -0,0 +1,534 @@
+{ config, lib, options, pkgs, utils, ... }:
+
+with lib;
+
+let
+  package = if cfg.allowAuxiliaryImperativeNetworks
+    then pkgs.wpa_supplicant_ro_ssids
+    else pkgs.wpa_supplicant;
+
+  cfg = config.networking.wireless;
+  opt = options.networking.wireless;
+
+  wpa3Protocols = [ "SAE" "FT-SAE" ];
+  hasMixedWPA = opts:
+    let
+      hasWPA3 = !mutuallyExclusive opts.authProtocols wpa3Protocols;
+      others = subtractLists wpa3Protocols opts.authProtocols;
+    in hasWPA3 && others != [];
+
+  # Gives a WPA3 network higher priority
+  increaseWPA3Priority = opts:
+    opts // optionalAttrs (hasMixedWPA opts)
+      { priority = if opts.priority == null
+                     then 1
+                     else opts.priority + 1;
+      };
+
+  # Creates a WPA2 fallback network
+  mkWPA2Fallback = opts:
+    opts // { authProtocols = subtractLists wpa3Protocols opts.authProtocols; };
+
+  # Networks attrset as a list
+  networkList = mapAttrsToList (ssid: opts: opts // { inherit ssid; })
+                cfg.networks;
+
+  # List of all networks (normal + generated fallbacks)
+  allNetworks =
+    if cfg.fallbackToWPA2
+      then map increaseWPA3Priority networkList
+           ++ map mkWPA2Fallback (filter hasMixedWPA networkList)
+      else networkList;
+
+  # Content of wpa_supplicant.conf
+  generatedConfig = concatStringsSep "\n" (
+    (map mkNetwork allNetworks)
+    ++ optional cfg.userControlled.enable (concatStringsSep "\n"
+      [ "ctrl_interface=/run/wpa_supplicant"
+        "ctrl_interface_group=${cfg.userControlled.group}"
+        "update_config=1"
+      ])
+    ++ [ "pmf=1" ]
+    ++ optional cfg.scanOnLowSignal ''bgscan="simple:30:-70:3600"''
+    ++ optional (cfg.extraConfig != "") cfg.extraConfig);
+
+  configIsGenerated = with cfg;
+    networks != {} || extraConfig != "" || userControlled.enable;
+
+  # the original configuration file
+  configFile =
+    if configIsGenerated
+      then pkgs.writeText "wpa_supplicant.conf" generatedConfig
+      else "/etc/wpa_supplicant.conf";
+  # the config file with environment variables replaced
+  finalConfig = ''"$RUNTIME_DIRECTORY"/wpa_supplicant.conf'';
+
+  # Creates a network block for wpa_supplicant.conf
+  mkNetwork = opts:
+  let
+    quote = x: ''"${x}"'';
+    indent = x: "  " + x;
+
+    pskString = if opts.psk != null
+      then quote opts.psk
+      else opts.pskRaw;
+
+    options = [
+      "ssid=${quote opts.ssid}"
+      (if pskString != null || opts.auth != null
+        then "key_mgmt=${concatStringsSep " " opts.authProtocols}"
+        else "key_mgmt=NONE")
+    ] ++ optional opts.hidden "scan_ssid=1"
+      ++ optional (pskString != null) "psk=${pskString}"
+      ++ optionals (opts.auth != null) (filter (x: x != "") (splitString "\n" opts.auth))
+      ++ optional (opts.priority != null) "priority=${toString opts.priority}"
+      ++ optional (opts.extraConfig != "") opts.extraConfig;
+  in ''
+    network={
+    ${concatMapStringsSep "\n" indent options}
+    }
+  '';
+
+  # Creates a systemd unit for wpa_supplicant bound to a given (or any) interface
+  mkUnit = iface:
+    let
+      deviceUnit = optional (iface != null) "sys-subsystem-net-devices-${utils.escapeSystemdPath iface}.device";
+      configStr = if cfg.allowAuxiliaryImperativeNetworks
+        then "-c /etc/wpa_supplicant.conf -I ${finalConfig}"
+        else "-c ${finalConfig}";
+    in {
+      description = "WPA Supplicant instance" + optionalString (iface != null) " for interface ${iface}";
+
+      after = deviceUnit;
+      before = [ "network.target" ];
+      wants = [ "network.target" ];
+      requires = deviceUnit;
+      wantedBy = [ "multi-user.target" ];
+      stopIfChanged = false;
+
+      path = [ package ];
+      serviceConfig.RuntimeDirectory = "wpa_supplicant";
+      serviceConfig.RuntimeDirectoryMode = "700";
+      serviceConfig.EnvironmentFile = mkIf (cfg.environmentFile != null)
+        (builtins.toString cfg.environmentFile);
+
+      script =
+      ''
+        ${optionalString (configIsGenerated && !cfg.allowAuxiliaryImperativeNetworks) ''
+          if [ -f /etc/wpa_supplicant.conf ]; then
+            echo >&2 "<3>/etc/wpa_supplicant.conf present but ignored. Generated ${configFile} is used instead."
+          fi
+        ''}
+
+        # substitute environment variables
+        if [ -f "${configFile}" ]; then
+          ${pkgs.gawk}/bin/awk '{
+            for(varname in ENVIRON)
+              gsub("@"varname"@", ENVIRON[varname])
+            print
+          }' "${configFile}" > "${finalConfig}"
+        else
+          touch "${finalConfig}"
+        fi
+
+        iface_args="-s ${optionalString cfg.dbusControlled "-u"} -D${cfg.driver} ${configStr}"
+
+        ${if iface == null then ''
+          # detect interfaces automatically
+
+          # check if there are no wireless interfaces
+          if ! find -H /sys/class/net/* -name wireless | grep -q .; then
+            # if so, wait until one appears
+            echo "Waiting for wireless interfaces"
+            grep -q '^ACTION=add' < <(stdbuf -oL -- udevadm monitor -s net/wlan -pu)
+            # Note: the above line has been carefully written:
+            # 1. The process substitution avoids udevadm hanging (after grep has quit)
+            #    until it tries to write to the pipe again. Not even pipefail works here.
+            # 2. stdbuf is needed because udevadm output is buffered by default and grep
+            #    may hang until more udev events enter the pipe.
+          fi
+
+          # add any interface found to the daemon arguments
+          for name in $(find -H /sys/class/net/* -name wireless | cut -d/ -f 5); do
+            echo "Adding interface $name"
+            args+="''${args:+ -N} -i$name $iface_args"
+          done
+        '' else ''
+          # add known interface to the daemon arguments
+          args="-i${iface} $iface_args"
+        ''}
+
+        # finally start daemon
+        exec wpa_supplicant $args
+      '';
+    };
+
+  systemctl = "/run/current-system/systemd/bin/systemctl";
+
+in {
+  options = {
+    networking.wireless = {
+      enable = mkEnableOption (lib.mdDoc "wpa_supplicant");
+
+      interfaces = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "wlan0" "wlan1" ];
+        description = lib.mdDoc ''
+          The interfaces {command}`wpa_supplicant` will use. If empty, it will
+          automatically use all wireless interfaces.
+
+          ::: {.note}
+          A separate wpa_supplicant instance will be started for each interface.
+          :::
+        '';
+      };
+
+      driver = mkOption {
+        type = types.str;
+        default = "nl80211,wext";
+        description = lib.mdDoc "Force a specific wpa_supplicant driver.";
+      };
+
+      allowAuxiliaryImperativeNetworks = mkEnableOption (lib.mdDoc "support for imperative & declarative networks") // {
+        description = lib.mdDoc ''
+          Whether to allow configuring networks "imperatively" (e.g. via
+          `wpa_supplicant_gui`) and declaratively via
+          [](#opt-networking.wireless.networks).
+
+          Please note that this adds a custom patch to `wpa_supplicant`.
+        '';
+      };
+
+      scanOnLowSignal = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to periodically scan for (better) networks when the signal of
+          the current one is low. This will make roaming between access points
+          faster, but will consume more power.
+        '';
+      };
+
+      fallbackToWPA2 = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to fall back to WPA2 authentication protocols if WPA3 failed.
+          This allows old wireless cards (that lack recent features required by
+          WPA3) to connect to mixed WPA2/WPA3 access points.
+
+          To avoid possible downgrade attacks, disable this options.
+        '';
+      };
+
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/secrets/wireless.env";
+        description = lib.mdDoc ''
+          File consisting of lines of the form `varname=value`
+          to define variables for the wireless configuration.
+
+          See section "EnvironmentFile=" in {manpage}`systemd.exec(5)` for a syntax reference.
+
+          Secrets (PSKs, passwords, etc.) can be provided without adding them to
+          the world-readable Nix store by defining them in the environment file and
+          referring to them in option {option}`networking.wireless.networks`
+          with the syntax `@varname@`. Example:
+
+          ```
+          # content of /run/secrets/wireless.env
+          PSK_HOME=mypassword
+          PASS_WORK=myworkpassword
+          ```
+
+          ```
+          # wireless-related configuration
+          networking.wireless.environmentFile = "/run/secrets/wireless.env";
+          networking.wireless.networks = {
+            home.psk = "@PSK_HOME@";
+            work.auth = '''
+              eap=PEAP
+              identity="my-user@example.com"
+              password="@PASS_WORK@"
+            ''';
+          };
+          ```
+        '';
+      };
+
+      networks = mkOption {
+        type = types.attrsOf (types.submodule {
+          options = {
+            psk = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                The network's pre-shared key in plaintext defaulting
+                to being a network without any authentication.
+
+                ::: {.warning}
+                Be aware that this will be written to the nix store
+                in plaintext! Use an environment variable instead.
+                :::
+
+                ::: {.note}
+                Mutually exclusive with {var}`pskRaw`.
+                :::
+              '';
+            };
+
+            pskRaw = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                The network's pre-shared key in hex defaulting
+                to being a network without any authentication.
+
+                ::: {.warning}
+                Be aware that this will be written to the nix store
+                in plaintext! Use an environment variable instead.
+                :::
+
+                ::: {.note}
+                Mutually exclusive with {var}`psk`.
+                :::
+              '';
+            };
+
+            authProtocols = mkOption {
+              default = [
+                # WPA2 and WPA3
+                "WPA-PSK" "WPA-EAP" "SAE"
+                # 802.11r variants of the above
+                "FT-PSK" "FT-EAP" "FT-SAE"
+              ];
+              # The list can be obtained by running this command
+              # awk '
+              #   /^# key_mgmt: /{ run=1 }
+              #   /^#$/{ run=0 }
+              #   /^# [A-Z0-9-]{2,}/{ if(run){printf("\"%s\"\n", $2)} }
+              # ' /run/current-system/sw/share/doc/wpa_supplicant/wpa_supplicant.conf.example
+              type = types.listOf (types.enum [
+                "WPA-PSK"
+                "WPA-EAP"
+                "IEEE8021X"
+                "NONE"
+                "WPA-NONE"
+                "FT-PSK"
+                "FT-EAP"
+                "FT-EAP-SHA384"
+                "WPA-PSK-SHA256"
+                "WPA-EAP-SHA256"
+                "SAE"
+                "FT-SAE"
+                "WPA-EAP-SUITE-B"
+                "WPA-EAP-SUITE-B-192"
+                "OSEN"
+                "FILS-SHA256"
+                "FILS-SHA384"
+                "FT-FILS-SHA256"
+                "FT-FILS-SHA384"
+                "OWE"
+                "DPP"
+              ]);
+              description = lib.mdDoc ''
+                The list of authentication protocols accepted by this network.
+                This corresponds to the `key_mgmt` option in wpa_supplicant.
+              '';
+            };
+
+            auth = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              example = ''
+                eap=PEAP
+                identity="user@example.com"
+                password="@EXAMPLE_PASSWORD@"
+              '';
+              description = lib.mdDoc ''
+                Use this option to configure advanced authentication methods like EAP.
+                See
+                {manpage}`wpa_supplicant.conf(5)`
+                for example configurations.
+
+                ::: {.warning}
+                Be aware that this will be written to the nix store
+                in plaintext! Use an environment variable for secrets.
+                :::
+
+                ::: {.note}
+                Mutually exclusive with {var}`psk` and
+                {var}`pskRaw`.
+                :::
+              '';
+            };
+
+            hidden = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Set this to `true` if the SSID of the network is hidden.
+              '';
+              example = literalExpression ''
+                { echelon = {
+                    hidden = true;
+                    psk = "abcdefgh";
+                  };
+                }
+              '';
+            };
+
+            priority = mkOption {
+              type = types.nullOr types.int;
+              default = null;
+              description = lib.mdDoc ''
+                By default, all networks will get same priority group (0). If some of the
+                networks are more desirable, this field can be used to change the order in
+                which wpa_supplicant goes through the networks when selecting a BSS. The
+                priority groups will be iterated in decreasing priority (i.e., the larger the
+                priority value, the sooner the network is matched against the scan results).
+                Within each priority group, networks will be selected based on security
+                policy, signal strength, etc.
+              '';
+            };
+
+            extraConfig = mkOption {
+              type = types.str;
+              default = "";
+              example = ''
+                bssid_blacklist=02:11:22:33:44:55 02:22:aa:44:55:66
+              '';
+              description = lib.mdDoc ''
+                Extra configuration lines appended to the network block.
+                See
+                {manpage}`wpa_supplicant.conf(5)`
+                for available options.
+              '';
+            };
+
+          };
+        });
+        description = lib.mdDoc ''
+          The network definitions to automatically connect to when
+           {command}`wpa_supplicant` is running. If this
+           parameter is left empty wpa_supplicant will use
+          /etc/wpa_supplicant.conf as the configuration file.
+        '';
+        default = {};
+        example = literalExpression ''
+          { echelon = {                   # SSID with no spaces or special characters
+              psk = "abcdefgh";           # (password will be written to /nix/store!)
+            };
+
+            echelon = {                   # safe version of the above: read PSK from the
+              psk = "@PSK_ECHELON@";      # variable PSK_ECHELON, defined in environmentFile,
+            };                            # this won't leak into /nix/store
+
+            "echelon's AP" = {            # SSID with spaces and/or special characters
+               psk = "ijklmnop";          # (password will be written to /nix/store!)
+            };
+
+            "free.wifi" = {};             # Public wireless network
+          }
+        '';
+      };
+
+      userControlled = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Allow normal users to control wpa_supplicant through wpa_gui or wpa_cli.
+            This is useful for laptop users that switch networks a lot and don't want
+            to depend on a large package such as NetworkManager just to pick nearby
+            access points.
+
+            When using a declarative network specification you cannot persist any
+            settings via wpa_gui or wpa_cli.
+          '';
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "wheel";
+          example = "network";
+          description = lib.mdDoc "Members of this group can control wpa_supplicant.";
+        };
+      };
+
+      dbusControlled = mkOption {
+        type = types.bool;
+        default = lib.length cfg.interfaces < 2;
+        defaultText = literalExpression "length config.${opt.interfaces} < 2";
+        description = lib.mdDoc ''
+          Whether to enable the DBus control interface.
+          This is only needed when using NetworkManager or connman.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.str;
+        default = "";
+        example = ''
+          p2p_disabled=1
+        '';
+        description = lib.mdDoc ''
+          Extra lines appended to the configuration file.
+          See
+          {manpage}`wpa_supplicant.conf(5)`
+          for available options.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = flip mapAttrsToList cfg.networks (name: cfg: {
+      assertion = with cfg; count (x: x != null) [ psk pskRaw auth ] <= 1;
+      message = ''options networking.wireless."${name}".{psk,pskRaw,auth} are mutually exclusive'';
+    }) ++ [
+      {
+        assertion = length cfg.interfaces > 1 -> !cfg.dbusControlled;
+        message =
+          let daemon = if config.networking.networkmanager.enable then "NetworkManager" else
+                       if config.services.connman.enable then "connman" else null;
+              n = toString (length cfg.interfaces);
+          in ''
+            It's not possible to run multiple wpa_supplicant instances with DBus support.
+            Note: you're seeing this error because `networking.wireless.interfaces` has
+            ${n} entries, implying an equal number of wpa_supplicant instances.
+          '' + optionalString (daemon != null) ''
+            You don't need to change `networking.wireless.interfaces` when using ${daemon}:
+            in this case the interfaces will be configured automatically for you.
+          '';
+      }
+    ];
+
+    hardware.wirelessRegulatoryDatabase = true;
+
+    environment.systemPackages = [ package ];
+    services.dbus.packages = optional cfg.dbusControlled package;
+
+    systemd.services =
+      if cfg.interfaces == []
+        then { wpa_supplicant = mkUnit null; }
+        else listToAttrs (map (i: nameValuePair "wpa_supplicant-${i}" (mkUnit i)) cfg.interfaces);
+
+    # Restart wpa_supplicant after resuming from sleep
+    powerManagement.resumeCommands = concatStringsSep "\n" (
+      optional (cfg.interfaces == []) "${systemctl} try-restart wpa_supplicant"
+      ++ map (i: "${systemctl} try-restart wpa_supplicant-${i}") cfg.interfaces
+    );
+
+    # Restart wpa_supplicant when a wlan device appears or disappears. This is
+    # only needed when an interface hasn't been specified by the user.
+    services.udev.extraRules = optionalString (cfg.interfaces == []) ''
+      ACTION=="add|remove", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", \
+      RUN+="${systemctl} try-restart wpa_supplicant.service"
+    '';
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/wstunnel.nix b/nixpkgs/nixos/modules/services/networking/wstunnel.nix
new file mode 100644
index 000000000000..3c3ecc3e04d7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/wstunnel.nix
@@ -0,0 +1,429 @@
+{ config, lib, options, pkgs, utils, ... }:
+with lib;
+let
+  cfg = config.services.wstunnel;
+  attrsToArgs = attrs: utils.escapeSystemdExecArgs (
+    mapAttrsToList
+    (name: value: if value == true then "--${name}" else "--${name}=${value}")
+    attrs
+  );
+  hostPortSubmodule = {
+    options = {
+      host = mkOption {
+        description = mdDoc "The hostname.";
+        type = types.str;
+      };
+      port = mkOption {
+        description = mdDoc "The port.";
+        type = types.port;
+      };
+    };
+  };
+  localRemoteSubmodule = {
+    options = {
+      local = mkOption {
+        description = mdDoc "Local address and port to listen on.";
+        type = types.submodule hostPortSubmodule;
+        example = {
+          host = "127.0.0.1";
+          port = 51820;
+        };
+      };
+      remote = mkOption {
+        description = mdDoc "Address and port on remote to forward traffic to.";
+        type = types.submodule hostPortSubmodule;
+        example = {
+          host = "127.0.0.1";
+          port = 51820;
+        };
+      };
+    };
+  };
+  hostPortToString = { host, port }: "${host}:${builtins.toString port}";
+  localRemoteToString = { local, remote }: utils.escapeSystemdExecArg "${hostPortToString local}:${hostPortToString remote}";
+  commonOptions = {
+    enable = mkOption {
+      description = mdDoc "Whether to enable this `wstunnel` instance.";
+      type = types.bool;
+      default = true;
+    };
+
+    package = mkPackageOptionMD pkgs "wstunnel" {};
+
+    autoStart = mkOption {
+      description = mdDoc "Whether this tunnel server should be started automatically.";
+      type = types.bool;
+      default = true;
+    };
+
+    extraArgs = mkOption {
+      description = mdDoc "Extra command line arguments to pass to `wstunnel`. Attributes of the form `argName = true;` will be translated to `--argName`, and `argName = \"value\"` to `--argName=value`.";
+      type = with types; attrsOf (either str bool);
+      default = {};
+      example = {
+        "someNewOption" = true;
+        "someNewOptionWithValue" = "someValue";
+      };
+    };
+
+    verboseLogging = mkOption {
+      description = mdDoc "Enable verbose logging.";
+      type = types.bool;
+      default = false;
+    };
+
+    environmentFile = mkOption {
+      description = mdDoc "Environment file to be passed to the systemd service. Useful for passing secrets to the service to prevent them from being world-readable in the Nix store. Note however that the secrets are passed to `wstunnel` through the command line, which makes them locally readable for all users of the system at runtime.";
+      type = types.nullOr types.path;
+      default = null;
+      example = "/var/lib/secrets/wstunnelSecrets";
+    };
+  };
+
+  serverSubmodule = { config, ...}: {
+    options = commonOptions // {
+      listen = mkOption {
+        description = mdDoc "Address and port to listen on. Setting the port to a value below 1024 will also give the process the required `CAP_NET_BIND_SERVICE` capability.";
+        type = types.submodule hostPortSubmodule;
+        default = {
+          host = "0.0.0.0";
+          port = if config.enableHTTPS then 443 else 80;
+        };
+        defaultText = literalExpression ''
+          {
+            host = "0.0.0.0";
+            port = if enableHTTPS then 443 else 80;
+          }
+        '';
+      };
+
+      restrictTo = mkOption {
+        description = mdDoc "Accepted traffic will be forwarded only to this service. Set to `null` to allow forwarding to arbitrary addresses.";
+        type = types.nullOr (types.submodule hostPortSubmodule);
+        example = {
+          host = "127.0.0.1";
+          port = 51820;
+        };
+      };
+
+      enableHTTPS = mkOption {
+        description = mdDoc "Use HTTPS for the tunnel server.";
+        type = types.bool;
+        default = true;
+      };
+
+      tlsCertificate = mkOption {
+        description = mdDoc "TLS certificate to use instead of the hardcoded one in case of HTTPS connections. Use together with `tlsKey`.";
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/secrets/cert.pem";
+      };
+
+      tlsKey = mkOption {
+        description = mdDoc "TLS key to use instead of the hardcoded on in case of HTTPS connections. Use together with `tlsCertificate`.";
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/lib/secrets/key.pem";
+      };
+
+      useACMEHost = mkOption {
+        description = mdDoc "Use a certificate generated by the NixOS ACME module for the given host. Note that this will not generate a new certificate - you will need to do so with `security.acme.certs`.";
+        type = types.nullOr types.str;
+        default = null;
+        example = "example.com";
+      };
+    };
+  };
+  clientSubmodule = { config, ... }: {
+    options = commonOptions // {
+      connectTo = mkOption {
+        description = mdDoc "Server address and port to connect to.";
+        type = types.submodule hostPortSubmodule;
+        example = {
+          host = "example.com";
+        };
+      };
+
+      enableHTTPS = mkOption {
+        description = mdDoc "Enable HTTPS when connecting to the server.";
+        type = types.bool;
+        default = true;
+      };
+
+      localToRemote = mkOption {
+        description = mdDoc "Local hosts and ports to listen on, plus the hosts and ports on remote to forward traffic to. Setting a local port to a value less than 1024 will additionally give the process the required CAP_NET_BIND_SERVICE capability.";
+        type = types.listOf (types.submodule localRemoteSubmodule);
+        default = [];
+        example = [ {
+          local = {
+            host = "127.0.0.1";
+            port = 8080;
+          };
+          remote = {
+            host = "127.0.0.1";
+            port = 8080;
+          };
+        } ];
+      };
+
+      dynamicToRemote = mkOption {
+        description = mdDoc "Host and port for the SOCKS5 proxy to dynamically forward traffic to. Leave this at `null` to disable the SOCKS5 proxy. Setting the port to a value less than 1024 will additionally give the service the required CAP_NET_BIND_SERVICE capability.";
+        type = types.nullOr (types.submodule hostPortSubmodule);
+        default = null;
+        example = {
+          host = "127.0.0.1";
+          port = 1080;
+        };
+      };
+
+      udp = mkOption {
+        description = mdDoc "Whether to forward UDP instead of TCP traffic.";
+        type = types.bool;
+        default = false;
+      };
+
+      udpTimeout = mkOption {
+        description = mdDoc "When using UDP forwarding, timeout in seconds after which the tunnel connection is closed. `-1` means no timeout.";
+        type = types.int;
+        default = 30;
+      };
+
+      httpProxy = mkOption {
+        description = mdDoc ''
+          Proxy to use to connect to the wstunnel server (`USER:PASS@HOST:PORT`).
+
+          ::: {.warning}
+          Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `PROXY_PASSWORD=<your-password-here>` and set this option to `<user>:$PROXY_PASSWORD@<host>:<port>`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
+
+          :::
+        '';
+        type = types.nullOr types.str;
+        default = null;
+      };
+
+      soMark = mkOption {
+        description = mdDoc "Mark network packets with the SO_MARK sockoption with the specified value. Setting this option will also enable the required `CAP_NET_ADMIN` capability for the systemd service.";
+        type = types.nullOr types.int;
+        default = null;
+      };
+
+      upgradePathPrefix = mkOption {
+        description = mdDoc "Use a specific HTTP path prefix that will show up in the upgrade request to the `wstunnel` server. Useful when running `wstunnel` behind a reverse proxy.";
+        type = types.nullOr types.str;
+        default = null;
+        example = "wstunnel";
+      };
+
+      hostHeader = mkOption {
+        description = mdDoc "Use this as the HTTP host header instead of the real hostname. Useful for circumventing hostname-based firewalls.";
+        type = types.nullOr types.str;
+        default = null;
+      };
+
+      tlsSNI = mkOption {
+        description = mdDoc "Use this as the SNI while connecting via TLS. Useful for circumventing hostname-based firewalls.";
+        type = types.nullOr types.str;
+        default = null;
+      };
+
+      tlsVerifyCertificate = mkOption {
+        description = mdDoc "Whether to verify the TLS certificate of the server. It might be useful to set this to `false` when working with the `tlsSNI` option.";
+        type = types.bool;
+        default = true;
+      };
+
+      # The original argument name `websocketPingFrequency` is a misnomer, as the frequency is the inverse of the interval.
+      websocketPingInterval = mkOption {
+        description = mdDoc "Do a heartbeat ping every N seconds to keep up the websocket connection.";
+        type = types.nullOr types.ints.unsigned;
+        default = null;
+      };
+
+      upgradeCredentials = mkOption {
+        description = mdDoc ''
+          Use these credentials to authenticate during the HTTP upgrade request (Basic authorization type, `USER:[PASS]`).
+
+          ::: {.warning}
+          Passwords specified here will be world-readable in the Nix store! To pass a password to the service, point the `environmentFile` option to a file containing `HTTP_PASSWORD=<your-password-here>` and set this option to `<user>:$HTTP_PASSWORD`. Note however that this will also locally leak the passwords at runtime via e.g. /proc/<pid>/cmdline.
+          :::
+        '';
+        type = types.nullOr types.str;
+        default = null;
+      };
+
+      customHeaders = mkOption {
+        description = mdDoc "Custom HTTP headers to send during the upgrade request.";
+        type = types.attrsOf types.str;
+        default = {};
+        example = {
+          "X-Some-Header" = "some-value";
+        };
+      };
+    };
+  };
+  generateServerUnit = name: serverCfg: {
+    name = "wstunnel-server-${name}";
+    value = {
+      description = "wstunnel server - ${name}";
+      requires = [ "network.target" "network-online.target" ];
+      after = [ "network.target" "network-online.target" ];
+      wantedBy = optional serverCfg.autoStart "multi-user.target";
+
+      serviceConfig = let
+        certConfig = config.security.acme.certs."${serverCfg.useACMEHost}";
+      in {
+        Type = "simple";
+        ExecStart = with serverCfg; let
+          resolvedTlsCertificate = if useACMEHost != null
+            then "${certConfig.directory}/fullchain.pem"
+            else tlsCertificate;
+          resolvedTlsKey = if useACMEHost != null
+            then "${certConfig.directory}/key.pem"
+            else tlsKey;
+        in ''
+          ${package}/bin/wstunnel \
+            --server \
+            ${optionalString (restrictTo != null)     "--restrictTo=${utils.escapeSystemdExecArg (hostPortToString restrictTo)}"} \
+            ${optionalString (resolvedTlsCertificate != null) "--tlsCertificate=${utils.escapeSystemdExecArg resolvedTlsCertificate}"} \
+            ${optionalString (resolvedTlsKey != null)         "--tlsKey=${utils.escapeSystemdExecArg resolvedTlsKey}"} \
+            ${optionalString verboseLogging "--verbose"} \
+            ${attrsToArgs extraArgs} \
+            ${utils.escapeSystemdExecArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString listen}"}
+        '';
+        EnvironmentFile = optional (serverCfg.environmentFile != null) serverCfg.environmentFile;
+        DynamicUser = true;
+        SupplementaryGroups = optional (serverCfg.useACMEHost != null) certConfig.group;
+        PrivateTmp = true;
+        AmbientCapabilities = optionals (serverCfg.listen.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        NoNewPrivileges = true;
+        RestrictNamespaces = "uts ipc pid user cgroup";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+
+      };
+    };
+  };
+  generateClientUnit = name: clientCfg: {
+    name = "wstunnel-client-${name}";
+    value = {
+      description = "wstunnel client - ${name}";
+      requires = [ "network.target" "network-online.target" ];
+      after = [ "network.target" "network-online.target" ];
+      wantedBy = optional clientCfg.autoStart "multi-user.target";
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = with clientCfg; ''
+          ${package}/bin/wstunnel \
+            ${concatStringsSep " " (builtins.map (x:          "--localToRemote=${localRemoteToString x}") localToRemote)} \
+            ${concatStringsSep " " (mapAttrsToList (n: v:     "--customHeaders=\"${n}: ${v}\"") customHeaders)} \
+            ${optionalString (dynamicToRemote != null)        "--dynamicToRemote=${utils.escapeSystemdExecArg (hostPortToString dynamicToRemote)}"} \
+            ${optionalString udp                              "--udp"} \
+            ${optionalString (httpProxy != null)              "--httpProxy=${httpProxy}"} \
+            ${optionalString (soMark != null)                 "--soMark=${toString soMark}"} \
+            ${optionalString (upgradePathPrefix != null)      "--upgradePathPrefix=${upgradePathPrefix}"} \
+            ${optionalString (hostHeader != null)             "--hostHeader=${hostHeader}"} \
+            ${optionalString (tlsSNI != null)                 "--tlsSNI=${tlsSNI}"} \
+            ${optionalString tlsVerifyCertificate             "--tlsVerifyCertificate"} \
+            ${optionalString (websocketPingInterval != null)  "--websocketPingFrequency=${toString websocketPingInterval}"} \
+            ${optionalString (upgradeCredentials != null)     "--upgradeCredentials=${upgradeCredentials}"} \
+            --udpTimeoutSec=${toString udpTimeout} \
+            ${optionalString verboseLogging "--verbose"} \
+            ${attrsToArgs extraArgs} \
+            ${utils.escapeSystemdExecArg "${if enableHTTPS then "wss" else "ws"}://${hostPortToString connectTo}"}
+        '';
+        EnvironmentFile = optional (clientCfg.environmentFile != null) clientCfg.environmentFile;
+        DynamicUser = true;
+        PrivateTmp = true;
+        AmbientCapabilities = (optionals (clientCfg.soMark != null) [ "CAP_NET_ADMIN" ]) ++ (optionals ((clientCfg.dynamicToRemote.port or 1024) < 1024 || (any (x: x.local.port < 1024) clientCfg.localToRemote)) [ "CAP_NET_BIND_SERVICE" ]);
+        NoNewPrivileges = true;
+        RestrictNamespaces = "uts ipc pid user cgroup";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        PrivateDevices = true;
+        RestrictSUIDSGID = true;
+      };
+    };
+  };
+in {
+  options.services.wstunnel = {
+    enable = mkEnableOption (mdDoc "wstunnel");
+
+    servers = mkOption {
+      description = mdDoc "`wstunnel` servers to set up.";
+      type = types.attrsOf (types.submodule serverSubmodule);
+      default = {};
+      example = {
+        "wg-tunnel" = {
+          listen.port = 8080;
+          enableHTTPS = true;
+          tlsCertificate = "/var/lib/secrets/fullchain.pem";
+          tlsKey = "/var/lib/secrets/key.pem";
+          restrictTo = {
+            host = "127.0.0.1";
+            port = 51820;
+          };
+        };
+      };
+    };
+
+    clients = mkOption {
+      description = mdDoc "`wstunnel` clients to set up.";
+      type = types.attrsOf (types.submodule clientSubmodule);
+      default = {};
+      example = {
+        "wg-tunnel" = {
+          connectTo = {
+            host = "example.com";
+            port = 8080;
+          };
+          enableHTTPS = true;
+          localToRemote = {
+            local = {
+              host = "127.0.0.1";
+              port = 51820;
+            };
+            remote = {
+              host = "127.0.0.1";
+              port = 51820;
+            };
+          };
+          udp = true;
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services = (mapAttrs' generateServerUnit (filterAttrs (n: v: v.enable) cfg.servers)) // (mapAttrs' generateClientUnit (filterAttrs (n: v: v.enable) cfg.clients));
+
+    assertions = (mapAttrsToList (name: serverCfg: {
+      assertion = !(serverCfg.useACMEHost != null && (serverCfg.tlsCertificate != null || serverCfg.tlsKey != null));
+      message = ''
+        Options services.wstunnel.servers."${name}".useACMEHost and services.wstunnel.servers."${name}".{tlsCertificate, tlsKey} are mutually exclusive.
+      '';
+    }) cfg.servers) ++
+    (mapAttrsToList (name: serverCfg: {
+      assertion = !((serverCfg.tlsCertificate != null || serverCfg.tlsKey != null) && !(serverCfg.tlsCertificate != null && serverCfg.tlsKey != null));
+      message = ''
+        services.wstunnel.servers."${name}".tlsCertificate and services.wstunnel.servers."${name}".tlsKey need to be set together.
+      '';
+    }) cfg.servers) ++
+    (mapAttrsToList (name: clientCfg: {
+      assertion = !(clientCfg.localToRemote == [] && clientCfg.dynamicToRemote == null);
+      message = ''
+        Either one of services.wstunnel.clients."${name}".localToRemote or services.wstunnel.clients."${name}".dynamicToRemote must be set.
+      '';
+    }) cfg.clients);
+  };
+
+  meta.maintainers = with maintainers; [ alyaeanyx ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/x2goserver.nix b/nixpkgs/nixos/modules/services/networking/x2goserver.nix
new file mode 100644
index 000000000000..1242229a0b60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/x2goserver.nix
@@ -0,0 +1,164 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.x2goserver;
+
+  defaults = {
+    superenicer = { enable = cfg.superenicer.enable; };
+  };
+  confText = generators.toINI {} (recursiveUpdate defaults cfg.settings);
+  x2goServerConf = pkgs.writeText "x2goserver.conf" confText;
+
+  x2goAgentOptions = pkgs.writeText "x2goagent.options" ''
+    X2GO_NXOPTIONS=""
+    X2GO_NXAGENT_DEFAULT_OPTIONS="${concatStringsSep " " cfg.nxagentDefaultOptions}"
+  '';
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "programs" "x2goserver" ] [ "services" "x2goserver" ])
+  ];
+
+  options.services.x2goserver = {
+    enable = mkEnableOption (lib.mdDoc "x2goserver") // {
+      description = lib.mdDoc ''
+        Enables the x2goserver module.
+        NOTE: This will create a good amount of symlinks in `/usr/local/bin`
+      '';
+    };
+
+    superenicer = {
+      enable = mkEnableOption (lib.mdDoc "superenicer") // {
+        description = lib.mdDoc ''
+          Enables the SupeReNicer code in x2gocleansessions, this will renice
+          suspended sessions to nice level 19 and renice them to level 0 if the
+          session becomes marked as running again
+        '';
+      };
+    };
+
+    nxagentDefaultOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ "-extension GLX" "-nolisten tcp" ];
+      description = lib.mdDoc ''
+        List of default nx agent options.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.attrsOf types.attrs;
+      default = {};
+      description = lib.mdDoc ''
+        x2goserver.conf ini configuration as nix attributes. See
+        `x2goserver.conf(5)` for details
+      '';
+      example = literalExpression ''
+        {
+          superenicer = {
+            "enable" = "yes";
+            "idle-nice-level" = 19;
+          };
+          telekinesis = { "enable" = "no"; };
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    # x2goserver can run X11 program even if "services.xserver.enable = false"
+    xdg = {
+      autostart.enable = true;
+      menus.enable = true;
+      mime.enable = true;
+      icons.enable = true;
+    };
+
+    environment.systemPackages = [ pkgs.x2goserver ];
+
+    users.groups.x2go = {};
+    users.users.x2go = {
+      home = "/var/lib/x2go/db";
+      group = "x2go";
+      isSystemUser = true;
+    };
+
+    security.wrappers.x2gosqliteWrapper = {
+      source = "${pkgs.x2goserver}/lib/x2go/libx2go-server-db-sqlite3-wrapper.pl";
+      owner = "x2go";
+      group = "x2go";
+      setuid = false;
+      setgid = true;
+    };
+    security.wrappers.x2goprintWrapper = {
+      source = "${pkgs.x2goserver}/bin/x2goprint";
+      owner = "x2go";
+      group = "x2go";
+      setuid = false;
+      setgid = true;
+    };
+
+    systemd.tmpfiles.rules = with pkgs; [
+      "d /var/lib/x2go/ - x2go x2go - -"
+      "d /var/lib/x2go/db - x2go x2go - -"
+      "d /var/lib/x2go/conf - x2go x2go - -"
+      "d /run/x2go 0755 x2go x2go - -"
+    ] ++
+    # x2goclient sends SSH commands with preset PATH set to
+    # "/usr/local/bin;/usr/bin;/bin". Since we cannot filter arbitrary ssh
+    # commands, we have to make the following executables available.
+    map (f: "L+ /usr/local/bin/${f} - - - - ${x2goserver}/bin/${f}") [
+      "x2goagent" "x2gobasepath" "x2gocleansessions" "x2gocmdexitmessage"
+      "x2godbadmin" "x2gofeature" "x2gofeaturelist" "x2gofm" "x2gogetapps"
+      "x2gogetservers" "x2golistdesktops" "x2golistmounts" "x2golistsessions"
+      "x2golistsessions_root" "x2golistshadowsessions" "x2gomountdirs"
+      "x2gopath" "x2goprint" "x2goresume-desktopsharing" "x2goresume-session"
+      "x2goruncommand" "x2goserver-run-extensions" "x2gosessionlimit"
+      "x2gosetkeyboard" "x2goshowblocks" "x2gostartagent"
+      "x2gosuspend-desktopsharing" "x2gosuspend-session"
+      "x2goterminate-desktopsharing" "x2goterminate-session"
+      "x2goumount-session" "x2goversion"
+    ] ++ [
+      "L+ /usr/local/bin/awk - - - - ${gawk}/bin/awk"
+      "L+ /usr/local/bin/chmod - - - - ${coreutils}/bin/chmod"
+      "L+ /usr/local/bin/cp - - - - ${coreutils}/bin/cp"
+      "L+ /usr/local/bin/sed - - - - ${gnused}/bin/sed"
+      "L+ /usr/local/bin/setsid - - - - ${util-linux}/bin/setsid"
+      "L+ /usr/local/bin/xrandr - - - - ${xorg.xrandr}/bin/xrandr"
+      "L+ /usr/local/bin/xmodmap - - - - ${xorg.xmodmap}/bin/xmodmap"
+    ];
+
+    systemd.services.x2goserver = {
+      description = "X2Go Server Daemon";
+      wantedBy = [ "multi-user.target" ];
+      unitConfig.Documentation = "man:x2goserver.conf(5)";
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.x2goserver}/bin/x2gocleansessions";
+        PIDFile = "/run/x2go/x2goserver.pid";
+        User = "x2go";
+        Group = "x2go";
+        RuntimeDirectory = "x2go";
+        StateDirectory = "x2go";
+      };
+      preStart = ''
+        if [ ! -e /var/lib/x2go/setup_ran ]
+        then
+          mkdir -p /var/lib/x2go/conf
+          cp -r ${pkgs.x2goserver}/etc/x2go/* /var/lib/x2go/conf/
+          ln -sf ${x2goServerConf} /var/lib/x2go/conf/x2goserver.conf
+          ln -sf ${x2goAgentOptions} /var/lib/x2go/conf/x2goagent.options
+          ${pkgs.x2goserver}/bin/x2godbadmin --createdb
+          touch /var/lib/x2go/setup_ran
+        fi
+      '';
+    };
+
+    # https://bugs.x2go.org/cgi-bin/bugreport.cgi?bug=276
+    security.sudo.extraConfig = ''
+      Defaults  env_keep+=QT_GRAPHICSSYSTEM
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/xandikos.nix b/nixpkgs/nixos/modules/services/networking/xandikos.nix
new file mode 100644
index 000000000000..6d1ddc74c719
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xandikos.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xandikos;
+in
+{
+
+  options = {
+    services.xandikos = {
+      enable = mkEnableOption (lib.mdDoc "Xandikos CalDAV and CardDAV server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xandikos;
+        defaultText = literalExpression "pkgs.xandikos";
+        description = lib.mdDoc "The Xandikos package to use.";
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc ''
+          The IP address on which Xandikos will listen.
+          By default listens on localhost.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc "The port of the Xandikos web application";
+      };
+
+      routePrefix = mkOption {
+        type = types.str;
+        default = "/";
+        description = lib.mdDoc ''
+          Path to Xandikos.
+          Useful when Xandikos is behind a reverse proxy.
+        '';
+      };
+
+      extraOptions = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = literalExpression ''
+          [ "--autocreate"
+            "--defaults"
+            "--current-user-principal user"
+            "--dump-dav-xml"
+          ]
+        '';
+        description = lib.mdDoc ''
+          Extra command line arguments to pass to xandikos.
+        '';
+      };
+
+      nginx = mkOption {
+        default = {};
+        description = lib.mdDoc ''
+          Configuration for nginx reverse proxy.
+        '';
+
+        type = types.submodule {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Configure the nginx reverse proxy settings.
+              '';
+            };
+
+            hostName = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                The hostname use to setup the virtualhost configuration
+              '';
+            };
+          };
+        };
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable (
+    mkMerge [
+      {
+        meta.maintainers = with lib.maintainers; [ _0x4A6F ];
+
+        systemd.services.xandikos = {
+          description = "A Simple Calendar and Contact Server";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+
+          serviceConfig = {
+            User = "xandikos";
+            Group = "xandikos";
+            DynamicUser = "yes";
+            RuntimeDirectory = "xandikos";
+            StateDirectory = "xandikos";
+            StateDirectoryMode = "0700";
+            PrivateDevices = true;
+            # Sandboxing
+            CapabilityBoundingSet = "CAP_NET_RAW CAP_NET_ADMIN";
+            ProtectSystem = "strict";
+            ProtectHome = true;
+            PrivateTmp = true;
+            ProtectKernelTunables = true;
+            ProtectKernelModules = true;
+            ProtectControlGroups = true;
+            RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX AF_PACKET AF_NETLINK";
+            RestrictNamespaces = true;
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            ExecStart = ''
+              ${cfg.package}/bin/xandikos \
+                --directory /var/lib/xandikos \
+                --listen-address ${cfg.address} \
+                --port ${toString cfg.port} \
+                --route-prefix ${cfg.routePrefix} \
+                ${lib.concatStringsSep " " cfg.extraOptions}
+            '';
+          };
+        };
+      }
+
+      (
+        mkIf cfg.nginx.enable {
+          services.nginx = {
+            enable = true;
+            virtualHosts."${cfg.nginx.hostName}" = {
+              locations."/" = {
+                proxyPass = "http://${cfg.address}:${toString cfg.port}/";
+              };
+            };
+          };
+        }
+      )
+    ]
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/networking/xinetd.nix b/nixpkgs/nixos/modules/services/networking/xinetd.nix
new file mode 100644
index 000000000000..fb3de7077e31
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xinetd.nix
@@ -0,0 +1,147 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xinetd;
+
+  configFile = pkgs.writeText "xinetd.conf"
+    ''
+      defaults
+      {
+        log_type       = SYSLOG daemon info
+        log_on_failure = HOST
+        log_on_success = PID HOST DURATION EXIT
+        ${cfg.extraDefaults}
+      }
+
+      ${concatMapStrings makeService cfg.services}
+    '';
+
+  makeService = srv:
+    ''
+      service ${srv.name}
+      {
+        protocol    = ${srv.protocol}
+        ${optionalString srv.unlisted "type        = UNLISTED"}
+        ${optionalString (srv.flags != "") "flags = ${srv.flags}"}
+        socket_type = ${if srv.protocol == "udp" then "dgram" else "stream"}
+        ${optionalString (srv.port != 0) "port        = ${toString srv.port}"}
+        wait        = ${if srv.protocol == "udp" then "yes" else "no"}
+        user        = ${srv.user}
+        server      = ${srv.server}
+        ${optionalString (srv.serverArgs != "") "server_args = ${srv.serverArgs}"}
+        ${srv.extraConfig}
+      }
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xinetd.enable = mkEnableOption (lib.mdDoc "the xinetd super-server daemon");
+
+    services.xinetd.extraDefaults = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Additional configuration lines added to the default section of xinetd's configuration.
+      '';
+    };
+
+    services.xinetd.services = mkOption {
+      default = [];
+      description = lib.mdDoc ''
+        A list of services provided by xinetd.
+      '';
+
+      type = with types; listOf (submodule ({
+
+        options = {
+
+          name = mkOption {
+            type = types.str;
+            example = "login";
+            description = lib.mdDoc "Name of the service.";
+          };
+
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description =
+              lib.mdDoc "Protocol of the service.  Usually `tcp` or `udp`.";
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 0;
+            example = 123;
+            description = lib.mdDoc "Port number of the service.";
+          };
+
+          user = mkOption {
+            type = types.str;
+            default = "nobody";
+            description = lib.mdDoc "User account for the service";
+          };
+
+          server = mkOption {
+            type = types.str;
+            example = "/foo/bin/ftpd";
+            description = lib.mdDoc "Path of the program that implements the service.";
+          };
+
+          serverArgs = mkOption {
+            type = types.separatedString " ";
+            default = "";
+            description = lib.mdDoc "Command-line arguments for the server program.";
+          };
+
+          flags = mkOption {
+            type = types.str;
+            default = "";
+            description = lib.mdDoc "";
+          };
+
+          unlisted = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether this server is listed in
+              {file}`/etc/services`.  If so, the port
+              number can be omitted.
+            '';
+          };
+
+          extraConfig = mkOption {
+            type = types.lines;
+            default = "";
+            description = lib.mdDoc "Extra configuration-lines added to the section of the service.";
+          };
+
+        };
+
+      }));
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.xinetd = {
+      description = "xinetd server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.xinetd ];
+      script = "exec xinetd -syslog daemon -dontfork -stayalive -f ${configFile}";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/xl2tpd.nix b/nixpkgs/nixos/modules/services/networking/xl2tpd.nix
new file mode 100644
index 000000000000..7d2595707612
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xl2tpd.nix
@@ -0,0 +1,143 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options = {
+    services.xl2tpd = {
+      enable = mkEnableOption (lib.mdDoc "xl2tpd, the Layer 2 Tunnelling Protocol Daemon");
+
+      serverIp = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "The server-side IP address.";
+        default     = "10.125.125.1";
+      };
+
+      clientIpRange = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "The range from which client IPs are drawn.";
+        default     = "10.125.125.2-11";
+      };
+
+      extraXl2tpOptions = mkOption {
+        type        = types.lines;
+        description = lib.mdDoc "Adds extra lines to the xl2tpd configuration file.";
+        default     = "";
+      };
+
+      extraPppdOptions = mkOption {
+        type        = types.lines;
+        description = lib.mdDoc "Adds extra lines to the pppd options file.";
+        default     = "";
+        example     = ''
+          ms-dns 8.8.8.8
+          ms-dns 8.8.4.4
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.xl2tpd.enable {
+    systemd.services.xl2tpd = let
+      cfg = config.services.xl2tpd;
+
+      # Config files from https://help.ubuntu.com/community/L2TPServer
+      xl2tpd-conf = pkgs.writeText "xl2tpd.conf" ''
+        [global]
+        ipsec saref = no
+
+        [lns default]
+        local ip = ${cfg.serverIp}
+        ip range = ${cfg.clientIpRange}
+        pppoptfile = ${pppd-options}
+        length bit = yes
+
+        ; Extra
+        ${cfg.extraXl2tpOptions}
+      '';
+
+      pppd-options = pkgs.writeText "ppp-options-xl2tpd.conf" ''
+        refuse-pap
+        refuse-chap
+        refuse-mschap
+        require-mschap-v2
+        # require-mppe-128
+        asyncmap 0
+        auth
+        crtscts
+        idle 1800
+        mtu 1200
+        mru 1200
+        lock
+        hide-password
+        local
+        # debug
+        name xl2tpd
+        # proxyarp
+        lcp-echo-interval 30
+        lcp-echo-failure 4
+
+        # Extra:
+        ${cfg.extraPppdOptions}
+      '';
+
+      xl2tpd-ppp-wrapped = pkgs.stdenv.mkDerivation {
+        name         = "xl2tpd-ppp-wrapped";
+        phases       = [ "installPhase" ];
+        nativeBuildInputs  = with pkgs; [ makeWrapper ];
+        installPhase = ''
+          mkdir -p $out/bin
+
+          makeWrapper ${pkgs.ppp}/sbin/pppd $out/bin/pppd \
+            --set LD_PRELOAD    "${pkgs.libredirect}/lib/libredirect.so" \
+            --set NIX_REDIRECTS "/etc/ppp=/etc/xl2tpd/ppp"
+
+          makeWrapper ${pkgs.xl2tpd}/bin/xl2tpd $out/bin/xl2tpd \
+            --set LD_PRELOAD    "${pkgs.libredirect}/lib/libredirect.so" \
+            --set NIX_REDIRECTS "${pkgs.ppp}/sbin/pppd=$out/bin/pppd"
+        '';
+      };
+    in {
+      description = "xl2tpd server";
+
+      requires = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        mkdir -p -m 700 /etc/xl2tpd
+
+        pushd /etc/xl2tpd > /dev/null
+
+        mkdir -p -m 700 ppp
+
+        [ -f ppp/chap-secrets ] || cat > ppp/chap-secrets << EOF
+        # Secrets for authentication using CHAP
+        # client	server	secret		IP addresses
+        #username	xl2tpd	password	*
+        EOF
+
+        chown root:root ppp/chap-secrets
+        chmod 600 ppp/chap-secrets
+
+        # The documentation says this file should be present but doesn't explain why and things work even if not there:
+        [ -f l2tp-secrets ] || (echo -n "* * "; ${pkgs.apg}/bin/apg -n 1 -m 32 -x 32 -a 1 -M LCN) > l2tp-secrets
+        chown root:root l2tp-secrets
+        chmod 600 l2tp-secrets
+
+        popd > /dev/null
+
+        mkdir -p /run/xl2tpd
+        chown root:root /run/xl2tpd
+        chmod 700       /run/xl2tpd
+      '';
+
+      serviceConfig = {
+        ExecStart = "${xl2tpd-ppp-wrapped}/bin/xl2tpd -D -c ${xl2tpd-conf} -s /etc/xl2tpd/l2tp-secrets -p /run/xl2tpd/pid -C /run/xl2tpd/control";
+        KillMode  = "process";
+        Restart   = "on-success";
+        Type      = "simple";
+        PIDFile   = "/run/xl2tpd/pid";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/xray.nix b/nixpkgs/nixos/modules/services/networking/xray.nix
new file mode 100644
index 000000000000..83655a2f88ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xray.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  options = {
+
+    services.xray = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to run xray server.
+
+          Either `settingsFile` or `settings` must be specified.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xray;
+        defaultText = literalExpression "pkgs.xray";
+        description = lib.mdDoc ''
+          Which xray package to use.
+        '';
+      };
+
+      settingsFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/etc/xray/config.json";
+        description = lib.mdDoc ''
+          The absolute path to the configuration file.
+
+          Either `settingsFile` or `settings` must be specified.
+
+          See <https://www.v2fly.org/en_US/config/overview.html>.
+        '';
+      };
+
+      settings = mkOption {
+        type = types.nullOr (types.attrsOf types.unspecified);
+        default = null;
+        example = {
+          inbounds = [{
+            port = 1080;
+            listen = "127.0.0.1";
+            protocol = "http";
+          }];
+          outbounds = [{
+            protocol = "freedom";
+          }];
+        };
+        description = lib.mdDoc ''
+          The configuration object.
+
+          Either `settingsFile` or `settings` must be specified.
+
+          See <https://www.v2fly.org/en_US/config/overview.html>.
+        '';
+      };
+    };
+
+  };
+
+  config = let
+    cfg = config.services.xray;
+    settingsFile = if cfg.settingsFile != null
+      then cfg.settingsFile
+      else pkgs.writeTextFile {
+        name = "xray.json";
+        text = builtins.toJSON cfg.settings;
+        checkPhase = ''
+          ${cfg.package}/bin/xray -test -config $out
+        '';
+      };
+
+  in mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.settingsFile == null) != (cfg.settings == null);
+        message = "Either but not both `settingsFile` and `settings` should be specified for xray.";
+      }
+    ];
+
+    systemd.services.xray = {
+      description = "xray Daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/xray -config ${settingsFile}";
+        CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
+        AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
+        NoNewPrivileges = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/xrdp.nix b/nixpkgs/nixos/modules/services/networking/xrdp.nix
new file mode 100644
index 000000000000..218b440aab3c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/xrdp.nix
@@ -0,0 +1,185 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xrdp;
+  confDir = pkgs.runCommand "xrdp.conf" { preferLocalBuild = true; } ''
+    mkdir $out
+
+    cp ${cfg.package}/etc/xrdp/{km-*,xrdp,sesman,xrdp_keyboard}.ini $out
+
+    cat > $out/startwm.sh <<EOF
+    #!/bin/sh
+    . /etc/profile
+    ${cfg.defaultWindowManager}
+    EOF
+    chmod +x $out/startwm.sh
+
+    substituteInPlace $out/xrdp.ini \
+      --replace "#rsakeys_ini=" "rsakeys_ini=/run/xrdp/rsakeys.ini" \
+      --replace "certificate=" "certificate=${cfg.sslCert}" \
+      --replace "key_file=" "key_file=${cfg.sslKey}" \
+      --replace LogFile=xrdp.log LogFile=/dev/null \
+      --replace EnableSyslog=true EnableSyslog=false
+
+    substituteInPlace $out/sesman.ini \
+      --replace LogFile=xrdp-sesman.log LogFile=/dev/null \
+      --replace EnableSyslog=1 EnableSyslog=0
+
+    # Ensure that clipboard works for non-ASCII characters
+    sed -i -e '/.*SessionVariables.*/ a\
+    LANG=${config.i18n.defaultLocale}\
+    LOCALE_ARCHIVE=${config.i18n.glibcLocales}/lib/locale/locale-archive
+    ' $out/sesman.ini
+  '';
+in
+{
+
+  ###### interface
+
+  options = {
+
+    services.xrdp = {
+
+      enable = mkEnableOption (lib.mdDoc "xrdp, the Remote Desktop Protocol server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.xrdp;
+        defaultText = literalExpression "pkgs.xrdp";
+        description = lib.mdDoc ''
+          The package to use for the xrdp daemon's binary.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3389;
+        description = lib.mdDoc ''
+          Specifies on which port the xrdp daemon listens.
+        '';
+      };
+
+      openFirewall = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to open the firewall for the specified RDP port.";
+      };
+
+      sslKey = mkOption {
+        type = types.str;
+        default = "/etc/xrdp/key.pem";
+        example = "/path/to/your/key.pem";
+        description = lib.mdDoc ''
+          ssl private key path
+          A self-signed certificate will be generated if file not exists.
+        '';
+      };
+
+      sslCert = mkOption {
+        type = types.str;
+        default = "/etc/xrdp/cert.pem";
+        example = "/path/to/your/cert.pem";
+        description = lib.mdDoc ''
+          ssl certificate path
+          A self-signed certificate will be generated if file not exists.
+        '';
+      };
+
+      defaultWindowManager = mkOption {
+        type = types.str;
+        default = "xterm";
+        example = "xfce4-session";
+        description = lib.mdDoc ''
+          The script to run when user log in, usually a window manager, e.g. "icewm", "xfce4-session"
+          This is per-user overridable, if file ~/startwm.sh exists it will be used instead.
+        '';
+      };
+
+      confDir = mkOption {
+        type = types.path;
+        default = confDir;
+        defaultText = literalMD "generated from configuration";
+        description = lib.mdDoc "The location of the config files for xrdp.";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    # xrdp can run X11 program even if "services.xserver.enable = false"
+    xdg = {
+      autostart.enable = true;
+      menus.enable = true;
+      mime.enable = true;
+      icons.enable = true;
+    };
+
+    fonts.enableDefaultPackages = mkDefault true;
+
+    systemd = {
+      services.xrdp = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        description = "xrdp daemon";
+        requires = [ "xrdp-sesman.service" ];
+        preStart = ''
+          # prepare directory for unix sockets (the sockets will be owned by loggedinuser:xrdp)
+          mkdir -p /tmp/.xrdp || true
+          chown xrdp:xrdp /tmp/.xrdp
+          chmod 3777 /tmp/.xrdp
+
+          # generate a self-signed certificate
+          if [ ! -s ${cfg.sslCert} -o ! -s ${cfg.sslKey} ]; then
+            mkdir -p $(dirname ${cfg.sslCert}) || true
+            mkdir -p $(dirname ${cfg.sslKey}) || true
+            ${pkgs.openssl.bin}/bin/openssl req -x509 -newkey rsa:2048 -sha256 -nodes -days 365 \
+              -subj /C=US/ST=CA/L=Sunnyvale/O=xrdp/CN=www.xrdp.org \
+              -config ${cfg.package}/share/xrdp/openssl.conf \
+              -keyout ${cfg.sslKey} -out ${cfg.sslCert}
+            chown root:xrdp ${cfg.sslKey} ${cfg.sslCert}
+            chmod 440 ${cfg.sslKey} ${cfg.sslCert}
+          fi
+          if [ ! -s /run/xrdp/rsakeys.ini ]; then
+            mkdir -p /run/xrdp
+            ${cfg.package}/bin/xrdp-keygen xrdp /run/xrdp/rsakeys.ini
+          fi
+        '';
+        serviceConfig = {
+          User = "xrdp";
+          Group = "xrdp";
+          PermissionsStartOnly = true;
+          ExecStart = "${cfg.package}/bin/xrdp --nodaemon --port ${toString cfg.port} --config ${cfg.confDir}/xrdp.ini";
+        };
+      };
+
+      services.xrdp-sesman = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        description = "xrdp session manager";
+        restartIfChanged = false; # do not restart on "nixos-rebuild switch". like "display-manager", it can have many interactive programs as children
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/xrdp-sesman --nodaemon --config ${cfg.confDir}/sesman.ini";
+          ExecStop  = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+        };
+      };
+
+    };
+
+    users.users.xrdp = {
+      description   = "xrdp daemon user";
+      isSystemUser  = true;
+      group         = "xrdp";
+    };
+    users.groups.xrdp = {};
+
+    security.pam.services.xrdp-sesman = { allowNullPassword = true; startSession = true; };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/networking/yggdrasil.md b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
new file mode 100644
index 000000000000..bbaea5bc74aa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/yggdrasil.md
@@ -0,0 +1,141 @@
+# Yggdrasil {#module-services-networking-yggdrasil}
+
+*Source:* {file}`modules/services/networking/yggdrasil/default.nix`
+
+*Upstream documentation:* <https://yggdrasil-network.github.io/>
+
+Yggdrasil is an early-stage implementation of a fully end-to-end encrypted,
+self-arranging IPv6 network.
+
+## Configuration {#module-services-networking-yggdrasil-configuration}
+
+### Simple ephemeral node {#module-services-networking-yggdrasil-configuration-simple}
+
+An annotated example of a simple configuration:
+```
+{
+  services.yggdrasil = {
+    enable = true;
+    persistentKeys = false;
+      # The NixOS module will generate new keys and a new IPv6 address each time
+      # it is started if persistentKeys is not enabled.
+
+    settings = {
+      Peers = [
+        # Yggdrasil will automatically connect and "peer" with other nodes it
+        # discovers via link-local multicast announcements. Unless this is the
+        # case (it probably isn't) a node needs peers within the existing
+        # network that it can tunnel to.
+        "tcp://1.2.3.4:1024"
+        "tcp://1.2.3.5:1024"
+        # Public peers can be found at
+        # https://github.com/yggdrasil-network/public-peers
+      ];
+    };
+  };
+}
+```
+
+### Persistent node with prefix {#module-services-networking-yggdrasil-configuration-prefix}
+
+A node with a fixed address that announces a prefix:
+```
+let
+  address = "210:5217:69c0:9afc:1b95:b9f:8718:c3d2";
+  prefix = "310:5217:69c0:9afc";
+  # taken from the output of "yggdrasilctl getself".
+in {
+
+  services.yggdrasil = {
+    enable = true;
+    persistentKeys = true; # Maintain a fixed public key and IPv6 address.
+    settings = {
+      Peers = [ "tcp://1.2.3.4:1024" "tcp://1.2.3.5:1024" ];
+      NodeInfo = {
+        # This information is visible to the network.
+        name = config.networking.hostName;
+        location = "The North Pole";
+      };
+    };
+  };
+
+  boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
+    # Forward traffic under the prefix.
+
+  networking.interfaces.${eth0}.ipv6.addresses = [{
+    # Set a 300::/8 address on the local physical device.
+    address = prefix + "::1";
+    prefixLength = 64;
+  }];
+
+  services.radvd = {
+    # Announce the 300::/8 prefix to eth0.
+    enable = true;
+    config = ''
+      interface eth0
+      {
+        AdvSendAdvert on;
+        prefix ${prefix}::/64 {
+          AdvOnLink on;
+          AdvAutonomous on;
+        };
+        route 200::/8 {};
+      };
+    '';
+  };
+}
+```
+
+### Yggdrasil attached Container {#module-services-networking-yggdrasil-configuration-container}
+
+A NixOS container attached to the Yggdrasil network via a node running on the
+host:
+```
+let
+  yggPrefix64 = "310:5217:69c0:9afc";
+    # Again, taken from the output of "yggdrasilctl getself".
+in
+{
+  boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
+  # Enable IPv6 forwarding.
+
+  networking = {
+    bridges.br0.interfaces = [ ];
+    # A bridge only to containers…
+
+    interfaces.br0 = {
+      # … configured with a prefix address.
+      ipv6.addresses = [{
+        address = "${yggPrefix64}::1";
+        prefixLength = 64;
+      }];
+    };
+  };
+
+  containers.foo = {
+    autoStart = true;
+    privateNetwork = true;
+    hostBridge = "br0";
+    # Attach the container to the bridge only.
+    config = { config, pkgs, ... }: {
+      networking.interfaces.eth0.ipv6 = {
+        addresses = [{
+          # Configure a prefix address.
+          address = "${yggPrefix64}::2";
+          prefixLength = 64;
+        }];
+        routes = [{
+          # Configure the prefix route.
+          address = "200::";
+          prefixLength = 7;
+          via = "${yggPrefix64}::1";
+        }];
+      };
+
+      services.httpd.enable = true;
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+  };
+
+}
+```
diff --git a/nixpkgs/nixos/modules/services/networking/yggdrasil.nix b/nixpkgs/nixos/modules/services/networking/yggdrasil.nix
new file mode 100644
index 000000000000..56d81fb04013
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/yggdrasil.nix
@@ -0,0 +1,234 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  keysPath = "/var/lib/yggdrasil/keys.json";
+
+  cfg = config.services.yggdrasil;
+  settingsProvided = cfg.settings != { };
+  configFileProvided = cfg.configFile != null;
+
+  format = pkgs.formats.json { };
+in
+{
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "yggdrasil" "config" ]
+      [ "services" "yggdrasil" "settings" ])
+  ];
+
+  options = with types; {
+    services.yggdrasil = {
+      enable = mkEnableOption (lib.mdDoc "the yggdrasil system service");
+
+      settings = mkOption {
+        type = format.type;
+        default = { };
+        example = {
+          Peers = [
+            "tcp://aa.bb.cc.dd:eeeee"
+            "tcp://[aaaa:bbbb:cccc:dddd::eeee]:fffff"
+          ];
+          Listen = [
+            "tcp://0.0.0.0:xxxxx"
+          ];
+        };
+        description = lib.mdDoc ''
+          Configuration for yggdrasil, as a Nix attribute set.
+
+          Warning: this is stored in the WORLD-READABLE Nix store!
+          Therefore, it is not appropriate for private keys. If you
+          wish to specify the keys, use {option}`configFile`.
+
+          If the {option}`persistentKeys` is enabled then the
+          keys that are generated during activation will override
+          those in {option}`settings` or
+          {option}`configFile`.
+
+          If no keys are specified then ephemeral keys are generated
+          and the Yggdrasil interface will have a random IPv6 address
+          each time the service is started. This is the default.
+
+          If both {option}`configFile` and {option}`settings`
+          are supplied, they will be combined, with values from
+          {option}`configFile` taking precedence.
+
+          You can use the command `nix-shell -p yggdrasil --run "yggdrasil -genconf"`
+          to generate default configuration values with documentation.
+        '';
+      };
+
+      configFile = mkOption {
+        type = nullOr path;
+        default = null;
+        example = "/run/keys/yggdrasil.conf";
+        description = lib.mdDoc ''
+          A file which contains JSON or HJSON configuration for yggdrasil. See
+          the {option}`settings` option for more information.
+
+          Note: This file must not be larger than 1 MB because it is passed to
+          the yggdrasil process via systemd‘s LoadCredential mechanism. For
+          details, see <https://systemd.io/CREDENTIALS/> and `man 5
+          systemd.exec`.
+        '';
+      };
+
+      group = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "wheel";
+        description = lib.mdDoc "Group to grant access to the Yggdrasil control socket. If `null`, only root can access the socket.";
+      };
+
+      openMulticastPort = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the UDP port used for multicast peer discovery. The
+          NixOS firewall blocks link-local communication, so in order to make
+          incoming local peering work you will also need to configure
+          `MulticastInterfaces` in your Yggdrasil configuration
+          ({option}`settings` or {option}`configFile`). You will then have to
+          add the ports that you configure there to your firewall configuration
+          ({option}`networking.firewall.allowedTCPPorts` or
+          {option}`networking.firewall.interfaces.<name>.allowedTCPPorts`).
+        '';
+      };
+
+      denyDhcpcdInterfaces = mkOption {
+        type = listOf str;
+        default = [ ];
+        example = [ "tap*" ];
+        description = lib.mdDoc ''
+          Disable the DHCP client for any interface whose name matches
+          any of the shell glob patterns in this list.  Use this
+          option to prevent the DHCP client from broadcasting requests
+          on the yggdrasil network.  It is only necessary to do so
+          when yggdrasil is running in TAP mode, because TUN
+          interfaces do not support broadcasting.
+        '';
+      };
+
+      package = mkOption {
+        type = package;
+        default = pkgs.yggdrasil;
+        defaultText = literalExpression "pkgs.yggdrasil";
+        description = lib.mdDoc "Yggdrasil package to use.";
+      };
+
+      persistentKeys = mkEnableOption (lib.mdDoc ''
+        persistent keys. If enabled then keys will be generated once and Yggdrasil
+        will retain the same IPv6 address when the service is
+        restarted. Keys are stored at ${keysPath}
+      '');
+
+      extraArgs = mkOption {
+        type = listOf str;
+        default = [ ];
+        example = [ "-loglevel" "info" ];
+        description = lib.mdDoc "Extra command line arguments.";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable (
+    let
+      binYggdrasil = "${cfg.package}/bin/yggdrasil";
+      binHjson = "${pkgs.hjson-go}/bin/hjson-cli";
+    in
+    {
+      assertions = [{
+        assertion = config.networking.enableIPv6;
+        message = "networking.enableIPv6 must be true for yggdrasil to work";
+      }];
+
+      system.activationScripts.yggdrasil = mkIf cfg.persistentKeys ''
+        if [ ! -e ${keysPath} ]
+        then
+          mkdir --mode=700 -p ${builtins.dirOf keysPath}
+          ${binYggdrasil} -genconf -json \
+            | ${pkgs.jq}/bin/jq \
+                'to_entries|map(select(.key|endswith("Key")))|from_entries' \
+            > ${keysPath}
+        fi
+      '';
+
+      systemd.services.yggdrasil = {
+        description = "Yggdrasil Network Service";
+        after = [ "network-pre.target" ];
+        wants = [ "network.target" ];
+        before = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        # This script first prepares the config file, then it starts Yggdrasil.
+        # The preparation could also be done in ExecStartPre/preStart but only
+        # systemd versions >= v252 support reading credentials in ExecStartPre. As
+        # of February 2023, systemd v252 is not yet in the stable branch of NixOS.
+        #
+        # This could be changed in the future once systemd version v252 has
+        # reached NixOS but it does not have to be. Config file preparation is
+        # fast enough, it does not need elevated privileges, and `set -euo
+        # pipefail` should make sure that the service is not started if the
+        # preparation fails. Therefore, it is not necessary to move the
+        # preparation to ExecStartPre.
+        script = ''
+          set -euo pipefail
+
+          # prepare config file
+          ${(if settingsProvided || configFileProvided || cfg.persistentKeys then
+            "echo "
+
+            + (lib.optionalString settingsProvided
+              "'${builtins.toJSON cfg.settings}'")
+            + (lib.optionalString configFileProvided
+              "$(${binHjson} -c \"$CREDENTIALS_DIRECTORY/yggdrasil.conf\")")
+            + (lib.optionalString cfg.persistentKeys "$(cat ${keysPath})")
+            + " | ${pkgs.jq}/bin/jq -s add | ${binYggdrasil} -normaliseconf -useconf"
+          else
+            "${binYggdrasil} -genconf") + " > /run/yggdrasil/yggdrasil.conf"}
+
+          # start yggdrasil
+          ${binYggdrasil} -useconffile /run/yggdrasil/yggdrasil.conf ${lib.strings.escapeShellArgs cfg.extraArgs}
+        '';
+
+        serviceConfig = {
+          ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+          Restart = "always";
+
+          DynamicUser = true;
+          StateDirectory = "yggdrasil";
+          RuntimeDirectory = "yggdrasil";
+          RuntimeDirectoryMode = "0750";
+          BindReadOnlyPaths = lib.optional cfg.persistentKeys keysPath;
+          LoadCredential =
+            mkIf configFileProvided "yggdrasil.conf:${cfg.configFile}";
+
+          AmbientCapabilities = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
+          CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_BIND_SERVICE";
+          MemoryDenyWriteExecute = true;
+          ProtectControlGroups = true;
+          ProtectHome = "tmpfs";
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged @keyring" ];
+        } // (if (cfg.group != null) then {
+          Group = cfg.group;
+        } else { });
+      };
+
+      networking.dhcpcd.denyInterfaces = cfg.denyDhcpcdInterfaces;
+      networking.firewall.allowedUDPPorts = mkIf cfg.openMulticastPort [ 9001 ];
+
+      # Make yggdrasilctl available on the command line.
+      environment.systemPackages = [ cfg.package ];
+    }
+  );
+  meta = {
+    doc = ./yggdrasil.md;
+    maintainers = with lib.maintainers; [ gazally ehmry ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/zerobin.nix b/nixpkgs/nixos/modules/services/networking/zerobin.nix
new file mode 100644
index 000000000000..735d4fa25fb1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/zerobin.nix
@@ -0,0 +1,101 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.zerobin;
+
+  zerobin_config = pkgs.writeText "zerobin-config.py" ''
+  PASTE_FILES_ROOT = "${cfg.dataDir}"
+  ${cfg.extraConfig}
+  '';
+
+in
+  {
+    options = {
+      services.zerobin = {
+        enable = mkEnableOption (lib.mdDoc "0bin");
+
+        dataDir = mkOption {
+          type = types.str;
+          default = "/var/lib/zerobin";
+          description = lib.mdDoc ''
+          Path to the 0bin data directory
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zerobin";
+          description = lib.mdDoc ''
+          The user 0bin should run as
+          '';
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "zerobin";
+          description = lib.mdDoc ''
+          The group 0bin should run as
+          '';
+        };
+
+        listenPort = mkOption {
+          type = types.int;
+          default = 8000;
+          example = 1357;
+          description = lib.mdDoc ''
+          The port zerobin should listen on
+          '';
+        };
+
+        listenAddress = mkOption {
+          type = types.str;
+          default = "localhost";
+          example = "127.0.0.1";
+          description = lib.mdDoc ''
+          The address zerobin should listen to
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          example = ''
+          MENU = (
+          ('Home', '/'),
+          )
+          COMPRESSED_STATIC_FILE = True
+          '';
+          description = lib.mdDoc ''
+          Extra configuration to be appended to the 0bin config file
+          (see https://0bin.readthedocs.org/en/latest/en/options.html)
+          '';
+        };
+      };
+    };
+
+    config = mkIf (cfg.enable) {
+      users.users.${cfg.user} =
+      optionalAttrs (cfg.user == "zerobin") {
+        isSystemUser = true;
+        group = cfg.group;
+        home = cfg.dataDir;
+        createHome = true;
+      };
+      users.groups.${cfg.group} = {};
+
+      systemd.services.zerobin = {
+        enable = true;
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig.ExecStart = "${pkgs.zerobin}/bin/zerobin ${cfg.listenAddress} ${toString cfg.listenPort} false ${cfg.user} ${cfg.group} ${zerobin_config}";
+        serviceConfig.PrivateTmp="yes";
+        serviceConfig.User = cfg.user;
+        serviceConfig.Group = cfg.group;
+        preStart = ''
+          mkdir -p ${cfg.dataDir}
+          chown ${cfg.user} ${cfg.dataDir}
+        '';
+      };
+    };
+  }
+
diff --git a/nixpkgs/nixos/modules/services/networking/zeronet.nix b/nixpkgs/nixos/modules/services/networking/zeronet.nix
new file mode 100644
index 000000000000..1f3711bd0d72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/zeronet.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) generators literalExpression mkEnableOption mkIf mkOption recursiveUpdate types;
+  cfg = config.services.zeronet;
+  dataDir = "/var/lib/zeronet";
+  configFile = pkgs.writeText "zeronet.conf" (generators.toINI {} (recursiveUpdate defaultSettings cfg.settings));
+
+  defaultSettings = {
+    global = {
+      data_dir = dataDir;
+      log_dir = dataDir;
+      ui_port = cfg.port;
+      fileserver_port = cfg.fileserverPort;
+      tor = if !cfg.tor then "disable" else if cfg.torAlways then "always" else "enable";
+    };
+  };
+in with lib; {
+  options.services.zeronet = {
+    enable = mkEnableOption (lib.mdDoc "zeronet");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.zeronet;
+      defaultText = literalExpression "pkgs.zeronet";
+      description = lib.mdDoc "ZeroNet package to use";
+    };
+
+    settings = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+      default = {};
+      example = literalExpression "{ global.tor = enable; }";
+
+      description = lib.mdDoc ''
+        {file}`zeronet.conf` configuration. Refer to
+        <https://zeronet.readthedocs.io/en/latest/faq/#is-it-possible-to-use-a-configuration-file>
+        for details on supported values;
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 43110;
+      description = lib.mdDoc "Optional zeronet web UI port.";
+    };
+
+    fileserverPort = mkOption {
+      # Not optional: when absent zeronet tries to write one to the
+      # read-only config file and crashes
+      type = types.port;
+      default = 12261;
+      description = lib.mdDoc "Zeronet fileserver port.";
+    };
+
+    tor = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use TOR for zeronet traffic where possible.";
+    };
+
+    torAlways = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use TOR for all zeronet traffic.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.tor = mkIf cfg.tor {
+      enable = true;
+      controlPort = 9051;
+
+      extraConfig = ''
+        CacheDirectoryGroupReadable 1
+        CookieAuthentication 1
+        CookieAuthFileGroupReadable 1
+      '';
+    };
+
+    systemd.services.zeronet = {
+      description = "zeronet";
+      after = [ "network.target" ] ++ optional cfg.tor "tor.service";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "zeronet";
+        DynamicUser = true;
+        StateDirectory = "zeronet";
+        SupplementaryGroups = mkIf cfg.tor [ "tor" ];
+        ExecStart = "${cfg.package}/bin/zeronet --config_file ${configFile}";
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "zeronet" "dataDir" ] "Zeronet will store data by default in /var/lib/zeronet")
+    (mkRemovedOptionModule [ "services" "zeronet" "logDir" ] "Zeronet will log by default in /var/lib/zeronet")
+  ];
+
+  meta.maintainers = with maintainers; [ Madouura ];
+}
diff --git a/nixpkgs/nixos/modules/services/networking/zerotierone.nix b/nixpkgs/nixos/modules/services/networking/zerotierone.nix
new file mode 100644
index 000000000000..f78fd8642ba0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/zerotierone.nix
@@ -0,0 +1,83 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.zerotierone;
+in
+{
+  options.services.zerotierone.enable = mkEnableOption (lib.mdDoc "ZeroTierOne");
+
+  options.services.zerotierone.joinNetworks = mkOption {
+    default = [];
+    example = [ "a8a2c3c10c1a68de" ];
+    type = types.listOf types.str;
+    description = lib.mdDoc ''
+      List of ZeroTier Network IDs to join on startup.
+      Note that networks are only ever joined, but not automatically left after removing them from the list.
+      To remove networks, use the ZeroTier CLI: `zerotier-cli leave <network-id>`
+    '';
+  };
+
+  options.services.zerotierone.port = mkOption {
+    default = 9993;
+    type = types.port;
+    description = lib.mdDoc ''
+      Network port used by ZeroTier.
+    '';
+  };
+
+  options.services.zerotierone.package = mkOption {
+    default = pkgs.zerotierone;
+    defaultText = literalExpression "pkgs.zerotierone";
+    type = types.package;
+    description = lib.mdDoc ''
+      ZeroTier One package to use.
+    '';
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.zerotierone = {
+      description = "ZeroTierOne";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      wants = [ "network-online.target" ];
+
+      path = [ cfg.package ];
+
+      preStart = ''
+        mkdir -p /var/lib/zerotier-one/networks.d
+        chmod 700 /var/lib/zerotier-one
+        chown -R root:root /var/lib/zerotier-one
+      '' + (concatMapStrings (netId: ''
+        touch "/var/lib/zerotier-one/networks.d/${netId}.conf"
+      '') cfg.joinNetworks);
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/zerotier-one -p${toString cfg.port}";
+        Restart = "always";
+        KillMode = "process";
+        TimeoutStopSec = 5;
+      };
+    };
+
+    # ZeroTier does not issue DHCP leases, but some strangers might...
+    networking.dhcpcd.denyInterfaces = [ "zt*" ];
+
+    # ZeroTier receives UDP transmissions
+    networking.firewall.allowedUDPPorts = [ cfg.port ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    # Prevent systemd from potentially changing the MAC address
+    systemd.network.links."50-zerotier" = {
+      matchConfig = {
+        OriginalName = "zt*";
+      };
+      linkConfig = {
+        AutoNegotiation = false;
+        MACAddressPolicy = "none";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/znc/default.nix b/nixpkgs/nixos/modules/services/networking/znc/default.nix
new file mode 100644
index 000000000000..d3ba4a524197
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/znc/default.nix
@@ -0,0 +1,328 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+let
+
+  cfg = config.services.znc;
+
+  defaultUser = "znc";
+
+  modules = pkgs.buildEnv {
+    name = "znc-modules";
+    paths = cfg.modulePackages;
+  };
+
+  listenerPorts = concatMap (l: optional (l ? Port) l.Port)
+    (attrValues (cfg.config.Listener or {}));
+
+  # Converts the config option to a string
+  semanticString = let
+
+      sortedAttrs = set: sort (l: r:
+        if l == "extraConfig" then false # Always put extraConfig last
+        else if isAttrs set.${l} == isAttrs set.${r} then l < r
+        else isAttrs set.${r} # Attrsets should be last, makes for a nice config
+        # This last case occurs when any side (but not both) is an attrset
+        # The order of these is correct when the attrset is on the right
+        # which we're just returning
+      ) (attrNames set);
+
+      # Specifies an attrset that encodes the value according to its type
+      encode = name: value: {
+          null = [];
+          bool = [ "${name} = ${boolToString value}" ];
+          int = [ "${name} = ${toString value}" ];
+
+          # extraConfig should be inserted verbatim
+          string = [ (if name == "extraConfig" then value else "${name} = ${value}") ];
+
+          # Values like `Foo = [ "bar" "baz" ];` should be transformed into
+          #   Foo=bar
+          #   Foo=baz
+          list = concatMap (encode name) value;
+
+          # Values like `Foo = { bar = { Baz = "baz"; Qux = "qux"; Florps = null; }; };` should be transmed into
+          #   <Foo bar>
+          #     Baz=baz
+          #     Qux=qux
+          #   </Foo>
+          set = concatMap (subname: optionals (value.${subname} != null) ([
+              "<${name} ${subname}>"
+            ] ++ map (line: "\t${line}") (toLines value.${subname}) ++ [
+              "</${name}>"
+            ])) (filter (v: v != null) (attrNames value));
+
+        }.${builtins.typeOf value};
+
+      # One level "above" encode, acts upon a set and uses encode on each name,value pair
+      toLines = set: concatMap (name: encode name set.${name}) (sortedAttrs set);
+
+    in
+      concatStringsSep "\n" (toLines cfg.config);
+
+  semanticTypes = with types; rec {
+    zncAtom = nullOr (oneOf [ int bool str ]);
+    zncAttr = attrsOf (nullOr zncConf);
+    zncAll = oneOf [ zncAtom (listOf zncAtom) zncAttr ];
+    zncConf = attrsOf (zncAll // {
+      # Since this is a recursive type and the description by default contains
+      # the description of its subtypes, infinite recursion would occur without
+      # explicitly breaking this cycle
+      description = "znc values (null, atoms (str, int, bool), list of atoms, or attrsets of znc values)";
+    });
+  };
+
+in
+
+{
+
+  imports = [ ./options.nix ];
+
+  options = {
+    services.znc = {
+      enable = mkEnableOption (lib.mdDoc "ZNC");
+
+      user = mkOption {
+        default = "znc";
+        example = "john";
+        type = types.str;
+        description = lib.mdDoc ''
+          The name of an existing user account to use to own the ZNC server
+          process. If not specified, a default user will be created.
+        '';
+      };
+
+      group = mkOption {
+        default = defaultUser;
+        example = "users";
+        type = types.str;
+        description = lib.mdDoc ''
+          Group to own the ZNC process.
+        '';
+      };
+
+      dataDir = mkOption {
+        default = "/var/lib/znc";
+        example = "/home/john/.znc";
+        type = types.path;
+        description = lib.mdDoc ''
+          The state directory for ZNC. The config and the modules will be linked
+          to from this directory as well.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open ports in the firewall for ZNC. Does work with
+          ports for listeners specified in
+          {option}`services.znc.config.Listener`.
+        '';
+      };
+
+      config = mkOption {
+        type = semanticTypes.zncConf;
+        default = {};
+        example = literalExpression ''
+          {
+            LoadModule = [ "webadmin" "adminlog" ];
+            User.paul = {
+              Admin = true;
+              Nick = "paul";
+              AltNick = "paul1";
+              LoadModule = [ "chansaver" "controlpanel" ];
+              Network.libera = {
+                Server = "irc.libera.chat +6697";
+                LoadModule = [ "simple_away" ];
+                Chan = {
+                  "#nixos" = { Detached = false; };
+                  "##linux" = { Disabled = true; };
+                };
+              };
+              Pass.password = {
+                Method = "sha256";
+                Hash = "e2ce303c7ea75c571d80d8540a8699b46535be6a085be3414947d638e48d9e93";
+                Salt = "l5Xryew4g*!oa(ECfX2o";
+              };
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          Configuration for ZNC, see
+          <https://wiki.znc.in/Configuration> for details. The
+          Nix value declared here will be translated directly to the xml-like
+          format ZNC expects. This is much more flexible than the legacy options
+          under {option}`services.znc.confOptions.*`, but also can't do
+          any type checking.
+
+          You can use {command}`nix-instantiate --eval --strict '<nixpkgs/nixos>' -A config.services.znc.config`
+          to view the current value. By default it contains a listener for port
+          5000 with SSL enabled.
+
+          Nix attributes called `extraConfig` will be inserted
+          verbatim into the resulting config file.
+
+          If {option}`services.znc.useLegacyConfig` is turned on, the
+          option values in {option}`services.znc.confOptions.*` will be
+          gracefully be applied to this option.
+
+          If you intend to update the configuration through this option, be sure
+          to disable {option}`services.znc.mutable`, otherwise none of the
+          changes here will be applied after the initial deploy.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        example = literalExpression "~/.znc/configs/znc.conf";
+        description = lib.mdDoc ''
+          Configuration file for ZNC. It is recommended to use the
+          {option}`config` option instead.
+
+          Setting this option will override any auto-generated config file
+          through the {option}`confOptions` or {option}`config`
+          options.
+        '';
+      };
+
+      modulePackages = mkOption {
+        type = types.listOf types.package;
+        default = [ ];
+        example = literalExpression "[ pkgs.zncModules.fish pkgs.zncModules.push ]";
+        description = lib.mdDoc ''
+          A list of global znc module packages to add to znc.
+        '';
+      };
+
+      mutable = mkOption {
+        default = true; # TODO: Default to true when config is set, make sure to not delete the old config if present
+        type = types.bool;
+        description = lib.mdDoc ''
+          Indicates whether to allow the contents of the
+          `dataDir` directory to be changed by the user at
+          run-time.
+
+          If enabled, modifications to the ZNC configuration after its initial
+          creation are not overwritten by a NixOS rebuild. If disabled, the
+          ZNC configuration is rebuilt on every NixOS rebuild.
+
+          If the user wants to manage the ZNC service using the web admin
+          interface, this option should be enabled.
+        '';
+      };
+
+      extraFlags = mkOption {
+        default = [ ];
+        example = [ "--debug" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Extra arguments to use for executing znc.
+        '';
+      };
+    };
+  };
+
+
+  ###### Implementation
+
+  config = mkIf cfg.enable {
+
+    services.znc = {
+      configFile = mkDefault (pkgs.writeText "znc-generated.conf" semanticString);
+      config = {
+        Version = lib.getVersion pkgs.znc;
+        Listener.l.Port = mkDefault 5000;
+        Listener.l.SSL = mkDefault true;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall listenerPorts;
+
+    systemd.services.znc = {
+      description = "ZNC Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "always";
+        ExecStart = "${pkgs.znc}/bin/znc --foreground --datadir ${cfg.dataDir} ${escapeShellArgs cfg.extraFlags}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DevicePolicy = "closed";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        ReadWritePaths = [ cfg.dataDir ];
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0027";
+      };
+      preStart = ''
+        mkdir -p ${cfg.dataDir}/configs
+
+        # If mutable, regenerate conf file every time.
+        ${optionalString (!cfg.mutable) ''
+          echo "znc is set to be system-managed. Now deleting old znc.conf file to be regenerated."
+          rm -f ${cfg.dataDir}/configs/znc.conf
+        ''}
+
+        # Ensure essential files exist.
+        if [[ ! -f ${cfg.dataDir}/configs/znc.conf ]]; then
+            echo "No znc.conf file found in ${cfg.dataDir}. Creating one now."
+            cp --no-preserve=ownership --no-clobber ${cfg.configFile} ${cfg.dataDir}/configs/znc.conf
+            chmod u+rw ${cfg.dataDir}/configs/znc.conf
+        fi
+
+        if [[ ! -f ${cfg.dataDir}/znc.pem ]]; then
+          echo "No znc.pem file found in ${cfg.dataDir}. Creating one now."
+          ${pkgs.znc}/bin/znc --makepem --datadir ${cfg.dataDir}
+        fi
+
+        # Symlink modules
+        rm ${cfg.dataDir}/modules || true
+        ln -fs ${modules}/lib/znc ${cfg.dataDir}/modules
+      '';
+    };
+
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        { description = "ZNC server daemon owner";
+          group = defaultUser;
+          uid = config.ids.uids.znc;
+          home = cfg.dataDir;
+          createHome = true;
+        };
+      };
+
+    users.groups = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        { gid = config.ids.gids.znc;
+          members = [ defaultUser ];
+        };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/networking/znc/options.nix b/nixpkgs/nixos/modules/services/networking/znc/options.nix
new file mode 100644
index 000000000000..bd67ec86d513
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/networking/znc/options.nix
@@ -0,0 +1,269 @@
+{ lib, config, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.znc;
+
+  networkOpts = {
+    options = {
+
+      server = mkOption {
+        type = types.str;
+        example = "irc.libera.chat";
+        description = lib.mdDoc ''
+          IRC server address.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 6697;
+        description = lib.mdDoc ''
+          IRC server port.
+        '';
+      };
+
+      password = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          IRC server password, such as for a Slack gateway.
+        '';
+      };
+
+      useSSL = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to use SSL to connect to the IRC server.
+        '';
+      };
+
+      modules = mkOption {
+        type = types.listOf types.str;
+        default = [ "simple_away" ];
+        example = literalExpression ''[ "simple_away" "sasl" ]'';
+        description = lib.mdDoc ''
+          ZNC network modules to load.
+        '';
+      };
+
+      channels = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "nixos" ];
+        description = lib.mdDoc ''
+          IRC channels to join.
+        '';
+      };
+
+      hasBitlbeeControlChannel = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to add the special Bitlbee operations channel.
+        '';
+      };
+
+      extraConf = mkOption {
+        default = "";
+        type = types.lines;
+        example = ''
+          Encoding = ^UTF-8
+          FloodBurst = 4
+          FloodRate = 1.00
+          IRCConnectEnabled = true
+          Ident = johntron
+          JoinDelay = 0
+          Nick = johntron
+        '';
+        description = lib.mdDoc ''
+          Extra config for the network. Consider using
+          {option}`services.znc.config` instead.
+        '';
+      };
+    };
+  };
+
+in
+
+{
+
+  options = {
+    services.znc = {
+
+      useLegacyConfig = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to propagate the legacy options under
+          {option}`services.znc.confOptions.*` to the znc config. If this
+          is turned on, the znc config will contain a user with the default name
+          "znc", global modules "webadmin" and "adminlog" will be enabled by
+          default, and more, all controlled through the
+          {option}`services.znc.confOptions.*` options.
+          You can use {command}`nix-instantiate --eval --strict '<nixpkgs/nixos>' -A config.services.znc.config`
+          to view the current value of the config.
+
+          In any case, if you need more flexibility,
+          {option}`services.znc.config` can be used to override/add to
+          all of the legacy options.
+        '';
+      };
+
+      confOptions = {
+        modules = mkOption {
+          type = types.listOf types.str;
+          default = [ "webadmin" "adminlog" ];
+          example = [ "partyline" "webadmin" "adminlog" "log" ];
+          description = lib.mdDoc ''
+            A list of modules to include in the `znc.conf` file.
+          '';
+        };
+
+        userModules = mkOption {
+          type = types.listOf types.str;
+          default = [ "chansaver" "controlpanel" ];
+          example = [ "chansaver" "controlpanel" "fish" "push" ];
+          description = lib.mdDoc ''
+            A list of user modules to include in the `znc.conf` file.
+          '';
+        };
+
+        userName = mkOption {
+          default = "znc";
+          example = "johntron";
+          type = types.str;
+          description = lib.mdDoc ''
+            The user name used to log in to the ZNC web admin interface.
+          '';
+        };
+
+        networks = mkOption {
+          default = { };
+          type = with types; attrsOf (submodule networkOpts);
+          description = lib.mdDoc ''
+            IRC networks to connect the user to.
+          '';
+          example = literalExpression ''
+            {
+              "libera" = {
+                server = "irc.libera.chat";
+                port = 6697;
+                useSSL = true;
+                modules = [ "simple_away" ];
+              };
+            };
+          '';
+        };
+
+        nick = mkOption {
+          default = "znc-user";
+          example = "john";
+          type = types.str;
+          description = lib.mdDoc ''
+            The IRC nick.
+          '';
+        };
+
+        passBlock = mkOption {
+          example = ''
+            &lt;Pass password&gt;
+               Method = sha256
+               Hash = e2ce303c7ea75c571d80d8540a8699b46535be6a085be3414947d638e48d9e93
+               Salt = l5Xryew4g*!oa(ECfX2o
+            &lt;/Pass&gt;
+          '';
+          type = types.str;
+          description = lib.mdDoc ''
+            Generate with {command}`nix-shell -p znc --command "znc --makepass"`.
+            This is the password used to log in to the ZNC web admin interface.
+            You can also set this through
+            {option}`services.znc.config.User.<username>.Pass.Method`
+            and co.
+          '';
+        };
+
+        port = mkOption {
+          default = 5000;
+          type = types.port;
+          description = lib.mdDoc ''
+            Specifies the port on which to listen.
+          '';
+        };
+
+        useSSL = mkOption {
+          default = true;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Indicates whether the ZNC server should use SSL when listening on
+            the specified port. A self-signed certificate will be generated.
+          '';
+        };
+
+        uriPrefix = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "/znc/";
+          description = lib.mdDoc ''
+            An optional URI prefix for the ZNC web interface. Can be
+            used to make ZNC available behind a reverse proxy.
+          '';
+        };
+
+        extraZncConf = mkOption {
+          default = "";
+          type = types.lines;
+          description = lib.mdDoc ''
+            Extra config to `znc.conf` file.
+          '';
+        };
+      };
+
+    };
+  };
+
+  config = mkIf cfg.useLegacyConfig {
+
+    services.znc.config = let
+      c = cfg.confOptions;
+      # defaults here should override defaults set in the non-legacy part
+      mkDefault = mkOverride 900;
+    in {
+      LoadModule = mkDefault c.modules;
+      Listener.l = {
+        Port = mkDefault c.port;
+        IPv4 = mkDefault true;
+        IPv6 = mkDefault true;
+        SSL = mkDefault c.useSSL;
+        URIPrefix = c.uriPrefix;
+      };
+      User.${c.userName} = {
+        Admin = mkDefault true;
+        Nick = mkDefault c.nick;
+        AltNick = mkDefault "${c.nick}_";
+        Ident = mkDefault c.nick;
+        RealName = mkDefault c.nick;
+        LoadModule = mkDefault c.userModules;
+        Network = mapAttrs (name: net: {
+          LoadModule = mkDefault net.modules;
+          Server = mkDefault "${net.server} ${optionalString net.useSSL "+"}${toString net.port} ${net.password}";
+          Chan = optionalAttrs net.hasBitlbeeControlChannel { "&bitlbee" = mkDefault {}; } //
+            listToAttrs (map (n: nameValuePair "#${n}" (mkDefault {})) net.channels);
+          extraConfig = if net.extraConf == "" then mkDefault null else net.extraConf;
+        }) c.networks;
+        extraConfig = [ c.passBlock ];
+      };
+      extraConfig = optional (c.extraZncConf != "") c.extraZncConf;
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "znc" "zncConf"] ''
+      Instead of `services.znc.zncConf = "... foo ...";`, use
+      `services.znc.configFile = pkgs.writeText "znc.conf" "... foo ...";`.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/printing/cups-pdf.nix b/nixpkgs/nixos/modules/services/printing/cups-pdf.nix
new file mode 100644
index 000000000000..07f24367132f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/printing/cups-pdf.nix
@@ -0,0 +1,185 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  # cups calls its backends as user `lp` (which is good!),
+  # but cups-pdf wants to be called as `root`, so it can change ownership of files.
+  # We add a suid wrapper and a wrapper script to trick cups into calling the suid wrapper.
+  # Note that a symlink to the suid wrapper alone wouldn't suffice, cups would complain
+  # > File "/nix/store/xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx-cups-progs/lib/cups/backend/cups-pdf" has insecure permissions (0104554/uid=0/gid=20)
+
+  # wrapper script that redirects calls to the suid wrapper
+  cups-pdf-wrapper = pkgs.writeTextFile {
+    name = "${pkgs.cups-pdf-to-pdf.name}-wrapper.sh";
+    executable = true;
+    destination = "/lib/cups/backend/cups-pdf";
+    checkPhase = ''
+      ${pkgs.stdenv.shellDryRun} "$target"
+      ${lib.getExe pkgs.shellcheck} "$target"
+    '';
+    text = ''
+      #! ${pkgs.runtimeShell}
+      exec "${config.security.wrapperDir}/cups-pdf" "$@"
+    '';
+  };
+
+  # wrapped cups-pdf package that uses the suid wrapper
+  cups-pdf-wrapped = pkgs.buildEnv {
+    name = "${pkgs.cups-pdf-to-pdf.name}-wrapped";
+    # using the wrapper as first path ensures it is used
+    paths = [ cups-pdf-wrapper pkgs.cups-pdf-to-pdf ];
+    ignoreCollisions = true;
+  };
+
+  instanceSettings = name: {
+    freeformType = with lib.types; nullOr (oneOf [ int str path package ]);
+    # override defaults:
+    # inject instance name into paths,
+    # also avoid conflicts between user names and special dirs
+    options.Out = lib.mkOption {
+      type = with lib.types; nullOr singleLineStr;
+      default = "/var/spool/cups-pdf-${name}/users/\${USER}";
+      defaultText = "/var/spool/cups-pdf-{instance-name}/users/\${USER}";
+      example = "\${HOME}/cups-pdf";
+      description = lib.mdDoc ''
+        output directory;
+        `''${HOME}` will be expanded to the user's home directory,
+        `''${USER}` will be expanded to the user name.
+      '';
+    };
+    options.AnonDirName = lib.mkOption {
+      type = with lib.types; nullOr singleLineStr;
+      default = "/var/spool/cups-pdf-${name}/anonymous";
+      defaultText = "/var/spool/cups-pdf-{instance-name}/anonymous";
+      example = "/var/lib/cups-pdf";
+      description = lib.mdDoc "path for anonymously created PDF files";
+    };
+    options.Spool = lib.mkOption {
+      type = with lib.types; nullOr singleLineStr;
+      default = "/var/spool/cups-pdf-${name}/spool";
+      defaultText = "/var/spool/cups-pdf-{instance-name}/spool";
+      example = "/var/lib/cups-pdf";
+      description = lib.mdDoc "spool directory";
+    };
+    options.Anonuser = lib.mkOption {
+      type = lib.types.singleLineStr;
+      default = "root";
+      description = lib.mdDoc ''
+        User for anonymous PDF creation.
+        An empty string disables this feature.
+      '';
+    };
+    options.GhostScript = lib.mkOption {
+      type = with lib.types; nullOr path;
+      default = lib.getExe pkgs.ghostscript;
+      defaultText = lib.literalExpression "lib.getExe pkgs.ghostscript";
+      example = lib.literalExpression ''''${pkgs.ghostscript}/bin/ps2pdf'';
+      description = lib.mdDoc "location of GhostScript binary";
+    };
+  };
+
+  instanceConfig = { name, config, ... }: {
+    options = {
+      enable = (lib.mkEnableOption (lib.mdDoc "this cups-pdf instance")) // { default = true; };
+      installPrinter = (lib.mkEnableOption (lib.mdDoc ''
+        a CUPS printer queue for this instance.
+        The queue will be named after the instance and will use the {file}`CUPS-PDF_opt.ppd` ppd file.
+        If this is disabled, you need to add the queue yourself to use the instance
+      '')) // { default = true; };
+      confFileText = lib.mkOption {
+        type = lib.types.lines;
+        description = lib.mdDoc ''
+          This will contain the contents of {file}`cups-pdf.conf` for this instance, derived from {option}`settings`.
+          You can use this option to append text to the file.
+        '';
+      };
+      settings = lib.mkOption {
+        type = lib.types.submodule (instanceSettings name);
+        default = {};
+        example = {
+          Out = "\${HOME}/cups-pdf";
+          UserUMask = "0033";
+        };
+        description = lib.mdDoc ''
+          Settings for a cups-pdf instance, see the descriptions in the template config file in the cups-pdf package.
+          The key value pairs declared here will be translated into proper key value pairs for {file}`cups-pdf.conf`.
+          Setting a value to `null` disables the option and removes it from the file.
+        '';
+      };
+    };
+    config.confFileText = lib.pipe config.settings [
+      (lib.filterAttrs (key: value: value != null))
+      (lib.mapAttrs (key: builtins.toString))
+      (lib.mapAttrsToList (key: value: "${key} ${value}\n"))
+      lib.concatStrings
+    ];
+  };
+
+  cupsPdfCfg = config.services.printing.cups-pdf;
+
+  copyConfigFileCmds = lib.pipe cupsPdfCfg.instances [
+    (lib.filterAttrs (name: lib.getAttr "enable"))
+    (lib.mapAttrs (name: lib.getAttr "confFileText"))
+    (lib.mapAttrs (name: pkgs.writeText "cups-pdf-${name}.conf"))
+    (lib.mapAttrsToList (name: confFile: "ln --symbolic --no-target-directory ${confFile} /var/lib/cups/cups-pdf-${name}.conf\n"))
+    lib.concatStrings
+  ];
+
+  printerSettings = lib.pipe cupsPdfCfg.instances [
+    (lib.filterAttrs (name: lib.getAttr "enable"))
+    (lib.filterAttrs (name: lib.getAttr "installPrinter"))
+    (lib.mapAttrsToList (name: instance: (lib.mapAttrs (key: lib.mkDefault) {
+      inherit name;
+      model = "CUPS-PDF_opt.ppd";
+      deviceUri = "cups-pdf:/${name}";
+      description = "virtual printer for cups-pdf instance ${name}";
+      location = instance.settings.Out;
+    })))
+  ];
+
+in
+
+{
+
+  options.services.printing.cups-pdf = {
+    enable = lib.mkEnableOption (lib.mdDoc ''
+      the cups-pdf virtual pdf printer backend.
+      By default, this will install a single printer `pdf`.
+      but this can be changed/extended with {option}`services.printing.cups-pdf.instances`
+    '');
+    instances = lib.mkOption {
+      type = lib.types.attrsOf (lib.types.submodule instanceConfig);
+      default.pdf = {};
+      example.pdf.settings = {
+        Out = "\${HOME}/cups-pdf";
+        UserUMask = "0033";
+      };
+      description = lib.mdDoc ''
+        Permits to raise one or more cups-pdf instances.
+        Each instance is named by an attribute name, and the attribute's values control the instance' configuration.
+      '';
+    };
+  };
+
+  config = lib.mkIf cupsPdfCfg.enable {
+    services.printing.enable = true;
+    services.printing.drivers = [ cups-pdf-wrapped ];
+    hardware.printers.ensurePrinters = printerSettings;
+    # the cups module will install the default config file,
+    # but we don't need it and it would confuse cups-pdf
+    systemd.services.cups.preStart = lib.mkAfter ''
+      rm -f /var/lib/cups/cups-pdf.conf
+      ${copyConfigFileCmds}
+    '';
+    security.wrappers.cups-pdf = {
+      group = "lp";
+      owner = "root";
+      permissions = "+r,ug+x";
+      setuid = true;
+      source = "${pkgs.cups-pdf-to-pdf}/lib/cups/backend/cups-pdf";
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/printing/cupsd.nix b/nixpkgs/nixos/modules/services/printing/cupsd.nix
new file mode 100644
index 000000000000..3a2744303474
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/printing/cupsd.nix
@@ -0,0 +1,492 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) cups cups-pk-helper cups-filters xdg-utils;
+
+  cfg = config.services.printing;
+
+  avahiEnabled = config.services.avahi.enable;
+  polkitEnabled = config.security.polkit.enable;
+
+  additionalBackends = pkgs.runCommand "additional-cups-backends" {
+      preferLocalBuild = true;
+    } ''
+      mkdir -p $out
+      if [ ! -e ${cups.out}/lib/cups/backend/smb ]; then
+        mkdir -p $out/lib/cups/backend
+        ln -sv ${pkgs.samba}/bin/smbspool $out/lib/cups/backend/smb
+      fi
+
+      # Provide support for printing via HTTPS.
+      if [ ! -e ${cups.out}/lib/cups/backend/https ]; then
+        mkdir -p $out/lib/cups/backend
+        ln -sv ${cups.out}/lib/cups/backend/ipp $out/lib/cups/backend/https
+      fi
+    '';
+
+  # Here we can enable additional backends, filters, etc. that are not
+  # part of CUPS itself, e.g. the SMB backend is part of Samba.  Since
+  # we can't update ${cups.out}/lib/cups itself, we create a symlink tree
+  # here and add the additional programs.  The ServerBin directive in
+  # cups-files.conf tells cupsd to use this tree.
+  bindir = pkgs.buildEnv {
+    name = "cups-progs";
+    paths =
+      [ cups.out additionalBackends cups-filters pkgs.ghostscript ]
+      ++ cfg.drivers;
+    pathsToLink = [ "/lib" "/share/cups" "/bin" ];
+    postBuild = cfg.bindirCmds;
+    ignoreCollisions = true;
+  };
+
+  writeConf = name: text: pkgs.writeTextFile {
+    inherit name text;
+    destination = "/etc/cups/${name}";
+  };
+
+  cupsFilesFile = writeConf "cups-files.conf" ''
+    SystemGroup root wheel
+
+    ServerBin ${bindir}/lib/cups
+    DataDir ${bindir}/share/cups
+    DocumentRoot ${cups.out}/share/doc/cups
+
+    AccessLog syslog
+    ErrorLog syslog
+    PageLog syslog
+
+    TempDir ${cfg.tempDir}
+
+    SetEnv PATH /var/lib/cups/path/lib/cups/filter:/var/lib/cups/path/bin
+
+    # User and group used to run external programs, including
+    # those that actually send the job to the printer.  Note that
+    # Udev sets the group of printer devices to `lp', so we want
+    # these programs to run as `lp' as well.
+    User cups
+    Group lp
+
+    ${cfg.extraFilesConf}
+  '';
+
+  cupsdFile = writeConf "cupsd.conf" ''
+    ${concatMapStrings (addr: ''
+      Listen ${addr}
+    '') cfg.listenAddresses}
+    Listen /run/cups/cups.sock
+
+    DefaultShared ${if cfg.defaultShared then "Yes" else "No"}
+
+    Browsing ${if cfg.browsing then "Yes" else "No"}
+
+    WebInterface ${if cfg.webInterface then "Yes" else "No"}
+
+    LogLevel ${cfg.logLevel}
+
+    ${cfg.extraConf}
+  '';
+
+  browsedFile = writeConf "cups-browsed.conf" cfg.browsedConf;
+
+  rootdir = pkgs.buildEnv {
+    name = "cups-progs";
+    paths = [
+      cupsFilesFile
+      cupsdFile
+      (writeConf "client.conf" cfg.clientConf)
+      (writeConf "snmp.conf" cfg.snmpConf)
+    ] ++ optional avahiEnabled browsedFile
+      ++ cfg.drivers;
+    pathsToLink = [ "/etc/cups" ];
+    ignoreCollisions = true;
+  };
+
+  filterGutenprint = filter (pkg: pkg.meta.isGutenprint or false == true);
+  containsGutenprint = pkgs: length (filterGutenprint pkgs) > 0;
+  getGutenprint = pkgs: head (filterGutenprint pkgs);
+
+  parsePorts = addresses: let
+    splitAddress = addr: strings.splitString ":" addr;
+    extractPort = addr: builtins.foldl' (a: b: b) "" (splitAddress addr);
+  in
+    builtins.map (address: strings.toInt (extractPort address)) addresses;
+
+in
+
+{
+
+  imports = [
+    (mkChangedOptionModule [ "services" "printing" "gutenprint" ] [ "services" "printing" "drivers" ]
+      (config:
+        let enabled = getAttrFromPath [ "services" "printing" "gutenprint" ] config;
+        in if enabled then [ pkgs.gutenprint ] else [ ]))
+    (mkRemovedOptionModule [ "services" "printing" "cupsFilesConf" ] "")
+    (mkRemovedOptionModule [ "services" "printing" "cupsdConf" ] "")
+  ];
+
+  ###### interface
+
+  options = {
+    services.printing = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable printing support through the CUPS daemon.
+        '';
+      };
+
+      stateless = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If set, all state directories relating to CUPS will be removed on
+          startup of the service.
+        '';
+      };
+
+      startWhenNeeded = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If set, CUPS is socket-activated; that is,
+          instead of having it permanently running as a daemon,
+          systemd will start it on the first incoming connection.
+        '';
+      };
+
+      listenAddresses = mkOption {
+        type = types.listOf types.str;
+        default = [ "localhost:631" ];
+        example = [ "*:631" ];
+        description = lib.mdDoc ''
+          A list of addresses and ports on which to listen.
+        '';
+      };
+
+      allowFrom = mkOption {
+        type = types.listOf types.str;
+        default = [ "localhost" ];
+        example = [ "all" ];
+        apply = concatMapStringsSep "\n" (x: "Allow ${x}");
+        description = lib.mdDoc ''
+          From which hosts to allow unconditional access.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to open the firewall for TCP/UDP ports specified in
+          listenAdrresses option.
+        '';
+      };
+
+      bindirCmds = mkOption {
+        type = types.lines;
+        internal = true;
+        default = "";
+        description = lib.mdDoc ''
+          Additional commands executed while creating the directory
+          containing the CUPS server binaries.
+        '';
+      };
+
+      defaultShared = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Specifies whether local printers are shared by default.
+        '';
+      };
+
+      browsing = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Specifies whether shared printers are advertised.
+        '';
+      };
+
+      webInterface = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Specifies whether the web interface is enabled.
+        '';
+      };
+
+      logLevel = mkOption {
+        type = types.str;
+        default = "info";
+        example = "debug";
+        description = lib.mdDoc ''
+          Specifies the cupsd logging verbosity.
+        '';
+      };
+
+      extraFilesConf = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra contents of the configuration file of the CUPS daemon
+          ({file}`cups-files.conf`).
+        '';
+      };
+
+      extraConf = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            BrowsePoll cups.example.com
+            MaxCopies 42
+          '';
+        description = lib.mdDoc ''
+          Extra contents of the configuration file of the CUPS daemon
+          ({file}`cupsd.conf`).
+        '';
+      };
+
+      clientConf = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            ServerName server.example.com
+            Encryption Never
+          '';
+        description = lib.mdDoc ''
+          The contents of the client configuration.
+          ({file}`client.conf`)
+        '';
+      };
+
+      browsedConf = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            BrowsePoll cups.example.com
+          '';
+        description = lib.mdDoc ''
+          The contents of the configuration. file of the CUPS Browsed daemon
+          ({file}`cups-browsed.conf`)
+        '';
+      };
+
+      snmpConf = mkOption {
+        type = types.lines;
+        default = ''
+          Address @LOCAL
+        '';
+        description = lib.mdDoc ''
+          The contents of {file}`/etc/cups/snmp.conf`. See "man
+          cups-snmp.conf" for a complete description.
+        '';
+      };
+
+      drivers = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = literalExpression "with pkgs; [ gutenprint hplip splix ]";
+        description = lib.mdDoc ''
+          CUPS drivers to use. Drivers provided by CUPS, cups-filters,
+          Ghostscript and Samba are added unconditionally. If this list contains
+          Gutenprint (i.e. a derivation with
+          `meta.isGutenprint = true`) the PPD files in
+          {file}`/var/lib/cups/ppd` will be updated automatically
+          to avoid errors due to incompatible versions.
+        '';
+      };
+
+      tempDir = mkOption {
+        type = types.path;
+        default = "/tmp";
+        example = "/tmp/cups";
+        description = lib.mdDoc ''
+          CUPSd temporary directory.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.printing.enable {
+
+    users.users.cups =
+      { uid = config.ids.uids.cups;
+        group = "lp";
+        description = "CUPS printing services";
+      };
+
+    # We need xdg-open (part of xdg-utils) for the desktop-file to proper open the users default-browser when opening "Manage Printing"
+    # https://github.com/NixOS/nixpkgs/pull/237994#issuecomment-1597510969
+    environment.systemPackages = [ cups.out xdg-utils ] ++ optional polkitEnabled cups-pk-helper;
+    environment.etc.cups.source = "/var/lib/cups";
+
+    services.dbus.packages = [ cups.out ] ++ optional polkitEnabled cups-pk-helper;
+    services.udev.packages = cfg.drivers;
+
+    # Allow asswordless printer admin for members of wheel group
+    security.polkit.extraConfig = mkIf polkitEnabled ''
+      polkit.addRule(function(action, subject) {
+          if (action.id == "org.opensuse.cupspkhelper.mechanism.all-edit" &&
+              subject.isInGroup("wheel")){
+              return polkit.Result.YES;
+          }
+      });
+    '';
+
+    # Cups uses libusb to talk to printers, and does not use the
+    # linux kernel driver. If the driver is not in a black list, it
+    # gets loaded, and then cups cannot access the printers.
+    boot.blacklistedKernelModules = [ "usblp" ];
+
+    # Some programs like print-manager rely on this value to get
+    # printer test pages.
+    environment.sessionVariables.CUPS_DATADIR = "${bindir}/share/cups";
+
+    systemd.packages = [ cups.out ];
+
+    systemd.sockets.cups = mkIf cfg.startWhenNeeded {
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "" "/run/cups/cups.sock" ]
+        ++ map (x: replaceStrings ["localhost"] ["127.0.0.1"] (removePrefix "*:" x)) cfg.listenAddresses;
+    };
+
+    systemd.services.cups =
+      { wantedBy = optionals (!cfg.startWhenNeeded) [ "multi-user.target" ];
+        wants = [ "network.target" ];
+        after = [ "network.target" ];
+
+        path = [ cups.out ];
+
+        preStart = lib.optionalString cfg.stateless ''
+          rm -rf /var/cache/cups /var/lib/cups /var/spool/cups
+        '' + ''
+            mkdir -m 0700 -p /var/cache/cups
+            mkdir -m 0700 -p /var/spool/cups
+            mkdir -m 0755 -p ${cfg.tempDir}
+
+            mkdir -m 0755 -p /var/lib/cups
+            # While cups will automatically create self-signed certificates if accessed via TLS,
+            # this directory to store the certificates needs to be created manually.
+            mkdir -m 0700 -p /var/lib/cups/ssl
+
+            # Backwards compatibility
+            if [ ! -L /etc/cups ]; then
+              mv /etc/cups/* /var/lib/cups
+              rmdir /etc/cups
+              ln -s /var/lib/cups /etc/cups
+            fi
+            # First, clean existing symlinks
+            if [ -n "$(ls /var/lib/cups)" ]; then
+              for i in /var/lib/cups/*; do
+                [ -L "$i" ] && rm "$i"
+              done
+            fi
+            # Then, populate it with static files
+            cd ${rootdir}/etc/cups
+            for i in *; do
+              [ ! -e "/var/lib/cups/$i" ] && ln -s "${rootdir}/etc/cups/$i" "/var/lib/cups/$i"
+            done
+
+            #update path reference
+            [ -L /var/lib/cups/path ] && \
+              rm /var/lib/cups/path
+            [ ! -e /var/lib/cups/path ] && \
+              ln -s ${bindir} /var/lib/cups/path
+
+            ${optionalString (containsGutenprint cfg.drivers) ''
+              if [ -d /var/lib/cups/ppd ]; then
+                ${getGutenprint cfg.drivers}/bin/cups-genppdupdate -p /var/lib/cups/ppd
+              fi
+            ''}
+          '';
+
+          serviceConfig.PrivateTmp = true;
+      };
+
+    systemd.services.cups-browsed = mkIf avahiEnabled
+      { description = "CUPS Remote Printer Discovery";
+
+        wantedBy = [ "multi-user.target" ];
+        wants = [ "avahi-daemon.service" ] ++ optional (!cfg.startWhenNeeded) "cups.service";
+        bindsTo = [ "avahi-daemon.service" ] ++ optional (!cfg.startWhenNeeded) "cups.service";
+        partOf = [ "avahi-daemon.service" ] ++ optional (!cfg.startWhenNeeded) "cups.service";
+        after = [ "avahi-daemon.service" ] ++ optional (!cfg.startWhenNeeded) "cups.service";
+
+        path = [ cups ];
+
+        serviceConfig.ExecStart = "${cups-filters}/bin/cups-browsed";
+
+        restartTriggers = [ browsedFile ];
+      };
+
+    services.printing.extraConf =
+      ''
+        DefaultAuthType Basic
+
+        <Location />
+          Order allow,deny
+          ${cfg.allowFrom}
+        </Location>
+
+        <Location /admin>
+          Order allow,deny
+          ${cfg.allowFrom}
+        </Location>
+
+        <Location /admin/conf>
+          AuthType Basic
+          Require user @SYSTEM
+          Order allow,deny
+          ${cfg.allowFrom}
+        </Location>
+
+        <Policy default>
+          <Limit Send-Document Send-URI Hold-Job Release-Job Restart-Job Purge-Jobs Set-Job-Attributes Create-Job-Subscription Renew-Subscription Cancel-Subscription Get-Notifications Reprocess-Job Cancel-Current-Job Suspend-Current-Job Resume-Job CUPS-Move-Job>
+            Require user @OWNER @SYSTEM
+            Order deny,allow
+          </Limit>
+
+          <Limit Pause-Printer Resume-Printer Set-Printer-Attributes Enable-Printer Disable-Printer Pause-Printer-After-Current-Job Hold-New-Jobs Release-Held-New-Jobs Deactivate-Printer Activate-Printer Restart-Printer Shutdown-Printer Startup-Printer Promote-Job Schedule-Job-After CUPS-Add-Printer CUPS-Delete-Printer CUPS-Add-Class CUPS-Delete-Class CUPS-Accept-Jobs CUPS-Reject-Jobs CUPS-Set-Default>
+            AuthType Basic
+            Require user @SYSTEM
+            Order deny,allow
+          </Limit>
+
+          <Limit Cancel-Job CUPS-Authenticate-Job>
+            Require user @OWNER @SYSTEM
+            Order deny,allow
+          </Limit>
+
+          <Limit All>
+            Order deny,allow
+          </Limit>
+        </Policy>
+      '';
+
+    security.pam.services.cups = {};
+
+    networking.firewall = let
+      listenPorts = parsePorts cfg.listenAddresses;
+    in mkIf cfg.openFirewall {
+      allowedTCPPorts = listenPorts;
+      allowedUDPPorts = listenPorts;
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ matthewbauer ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/printing/ipp-usb.nix b/nixpkgs/nixos/modules/services/printing/ipp-usb.nix
new file mode 100644
index 000000000000..8ed2ff826871
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/printing/ipp-usb.nix
@@ -0,0 +1,63 @@
+{ config, lib, pkgs, ... }: {
+  options = {
+    services.ipp-usb = {
+      enable = lib.mkEnableOption (lib.mdDoc "ipp-usb, a daemon to turn an USB printer/scanner supporting IPP everywhere (aka AirPrint, WSD, AirScan) into a locally accessible network printer/scanner");
+    };
+  };
+  config = lib.mkIf config.services.ipp-usb.enable {
+    systemd.services.ipp-usb = {
+      description = "Daemon for IPP over USB printer support";
+      after = [ "cups.service" "avahi-daemon.service" ];
+      wants = [ "avahi-daemon.service" ];
+      serviceConfig = {
+        ExecStart = [ "${pkgs.ipp-usb}/bin/ipp-usb" ];
+        Type = "simple";
+        Restart = "on-failure";
+        StateDirectory = "ipp-usb";
+        LogsDirectory = "ipp-usb";
+
+        # hardening.
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectControlGroups = true;
+        MemoryDenyWriteExecute = true;
+        # breaks the daemon, presumably because it messes with DeviceAllow
+        ProtectClock = false;
+        ProtectKernelTunables = true;
+        ProtectKernelLogs = true;
+        ProtectSystem = "strict";
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        PrivateMounts = true;
+        ProtectHostname = true;
+        ProtectKernelModules = true;
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_NETLINK" "AF_INET" "AF_INET6" ];
+        ProtectProc = "noaccess";
+      };
+    };
+
+    # starts the systemd service
+    services.udev.packages = [ pkgs.ipp-usb ];
+    services.avahi = {
+      enable = true;
+      publish = {
+        enable = true;
+        userServices = true;
+      };
+    };
+    # enable printing and scanning by default, but not required.
+    services.printing.enable = lib.mkDefault true;
+    hardware.sane.enable = lib.mkDefault true;
+    # so that sane discovers scanners
+    hardware.sane.extraBackends = [ pkgs.sane-airscan ];
+  };
+}
+
+
diff --git a/nixpkgs/nixos/modules/services/scheduling/atd.nix b/nixpkgs/nixos/modules/services/scheduling/atd.nix
new file mode 100644
index 000000000000..235d4f348e5e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/scheduling/atd.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.atd;
+
+  inherit (pkgs) at;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.atd.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the {command}`at` daemon, a command scheduler.
+      '';
+    };
+
+    services.atd.allowEveryone = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to make {file}`/var/spool/at{jobs,spool}`
+        writeable by everyone (and sticky).  This is normally not
+        needed since the {command}`at` commands are
+        setuid/setgid `atd`.
+     '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    # Not wrapping "batch" because it's a shell script (kernel drops perms
+    # anyway) and it's patched to invoke the "at" setuid wrapper.
+    security.wrappers = builtins.listToAttrs (
+      map (program: { name = "${program}"; value = {
+      source = "${at}/bin/${program}";
+      owner = "atd";
+      group = "atd";
+      setuid = true;
+      setgid = true;
+    };}) [ "at" "atq" "atrm" ]);
+
+    environment.systemPackages = [ at ];
+
+    security.pam.services.atd = {};
+
+    users.users.atd =
+      {
+        uid = config.ids.uids.atd;
+        group = "atd";
+        description = "atd user";
+        home = "/var/empty";
+      };
+
+    users.groups.atd.gid = config.ids.gids.atd;
+
+    systemd.services.atd = {
+      description = "Job Execution Daemon (atd)";
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ at ];
+
+      preStart = ''
+        # Snippets taken and adapted from the original `install' rule of
+        # the makefile.
+
+        # We assume these values are those actually used in Nixpkgs for
+        # `at'.
+        spooldir=/var/spool/atspool
+        jobdir=/var/spool/atjobs
+        etcdir=/etc/at
+
+        install -dm755 -o atd -g atd "$etcdir"
+        spool_and_job_dir_perms=${if cfg.allowEveryone then "1777" else "1770"}
+        install -dm"$spool_and_job_dir_perms" -o atd -g atd "$spooldir" "$jobdir"
+        if [ ! -f "$etcdir"/at.deny ]; then
+            touch "$etcdir"/at.deny
+            chown root:atd "$etcdir"/at.deny
+            chmod 640 "$etcdir"/at.deny
+        fi
+        if [ ! -f "$jobdir"/.SEQ ]; then
+            touch "$jobdir"/.SEQ
+            chown atd:atd "$jobdir"/.SEQ
+            chmod 600 "$jobdir"/.SEQ
+        fi
+      '';
+
+      script = "atd";
+
+      serviceConfig.Type = "forking";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/scheduling/cron.nix b/nixpkgs/nixos/modules/services/scheduling/cron.nix
new file mode 100644
index 000000000000..6e8fe5d9d031
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/scheduling/cron.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  # Put all the system cronjobs together.
+  systemCronJobsFile = pkgs.writeText "system-crontab"
+    ''
+      SHELL=${pkgs.bash}/bin/bash
+      PATH=${config.system.path}/bin:${config.system.path}/sbin
+      ${optionalString (config.services.cron.mailto != null) ''
+        MAILTO="${config.services.cron.mailto}"
+      ''}
+      NIX_CONF_DIR=/etc/nix
+      ${lib.concatStrings (map (job: job + "\n") config.services.cron.systemCronJobs)}
+    '';
+
+  # Vixie cron requires build-time configuration for the sendmail path.
+  cronNixosPkg = pkgs.cron.override {
+    # The mail.nix nixos module, if there is any local mail system enabled,
+    # should have sendmail in this path.
+    sendmailPath = "/run/wrappers/bin/sendmail";
+  };
+
+  allFiles =
+    optional (config.services.cron.systemCronJobs != []) systemCronJobsFile
+    ++ config.services.cron.cronFiles;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.cron = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Vixie cron daemon.";
+      };
+
+      mailto = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Email address to which job output will be mailed.";
+      };
+
+      systemCronJobs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = literalExpression ''
+          [ "* * * * *  test   ls -l / > /tmp/cronout 2>&1"
+            "* * * * *  eelco  echo Hello World > /home/eelco/cronout"
+          ]
+        '';
+        description = lib.mdDoc ''
+          A list of Cron jobs to be appended to the system-wide
+          crontab.  See the manual page for crontab for the expected
+          format. If you want to get the results mailed you must setuid
+          sendmail. See {option}`security.wrappers`
+
+          If neither /var/cron/cron.deny nor /var/cron/cron.allow exist only root
+          is allowed to have its own crontab file. The /var/cron/cron.deny file
+          is created automatically for you, so every user can use a crontab.
+
+          Many nixos modules set systemCronJobs, so if you decide to disable vixie cron
+          and enable another cron daemon, you may want it to get its system crontab
+          based on systemCronJobs.
+        '';
+      };
+
+      cronFiles = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          A list of extra crontab files that will be read and appended to the main
+          crontab file when the cron service starts.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+
+    { services.cron.enable = mkDefault (allFiles != []); }
+    (mkIf (config.services.cron.enable) {
+      security.wrappers.crontab =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${cronNixosPkg}/bin/crontab";
+        };
+      environment.systemPackages = [ cronNixosPkg ];
+      environment.etc.crontab =
+        { source = pkgs.runCommand "crontabs" { inherit allFiles; preferLocalBuild = true; }
+            ''
+              touch $out
+              for i in $allFiles; do
+                cat "$i" >> $out
+              done
+            '';
+          mode = "0600"; # Cron requires this.
+        };
+
+      systemd.services.cron =
+        { description = "Cron Daemon";
+
+          wantedBy = [ "multi-user.target" ];
+
+          preStart =
+            ''
+              mkdir -m 710 -p /var/cron
+
+              # By default, allow all users to create a crontab.  This
+              # is denoted by the existence of an empty cron.deny file.
+              if ! test -e /var/cron/cron.allow -o -e /var/cron/cron.deny; then
+                  touch /var/cron/cron.deny
+              fi
+            '';
+
+          restartTriggers = [ config.time.timeZone ];
+          serviceConfig.ExecStart = "${cronNixosPkg}/bin/cron -n";
+        };
+
+    })
+
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/scheduling/fcron.nix b/nixpkgs/nixos/modules/services/scheduling/fcron.nix
new file mode 100644
index 000000000000..47bd358f979d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/scheduling/fcron.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.fcron;
+
+  queuelen = optionalString (cfg.queuelen != null) "-q ${toString cfg.queuelen}";
+
+  # Duplicate code, also found in cron.nix. Needs deduplication.
+  systemCronJobs =
+    ''
+      SHELL=${pkgs.bash}/bin/bash
+      PATH=${config.system.path}/bin:${config.system.path}/sbin
+      ${optionalString (config.services.cron.mailto != null) ''
+        MAILTO="${config.services.cron.mailto}"
+      ''}
+      NIX_CONF_DIR=/etc/nix
+      ${lib.concatStrings (map (job: job + "\n") config.services.cron.systemCronJobs)}
+    '';
+
+  allowdeny = target: users:
+    { source = pkgs.writeText "fcron.${target}" (concatStringsSep "\n" users);
+      target = "fcron.${target}";
+      mode = "644";
+      gid = config.ids.gids.fcron;
+    };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.fcron = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the {command}`fcron` daemon.";
+      };
+
+      allow = mkOption {
+        type = types.listOf types.str;
+        default = [ "all" ];
+        description = lib.mdDoc ''
+          Users allowed to use fcrontab and fcrondyn (one name per
+          line, `all` for everyone).
+        '';
+      };
+
+      deny = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Users forbidden from using fcron.";
+      };
+
+      maxSerialJobs = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "Maximum number of serial jobs which can run simultaneously.";
+      };
+
+      queuelen = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc "Number of jobs the serial queue and the lavg queue can contain.";
+      };
+
+      systab = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''The "system" crontab contents.'';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.fcron.systab = systemCronJobs;
+
+    environment.etc = listToAttrs
+      (map (x: { name = x.target; value = x; })
+      [ (allowdeny "allow" (cfg.allow))
+        (allowdeny "deny" cfg.deny)
+        # see man 5 fcron.conf
+        { source =
+            let
+              isSendmailWrapped =
+                lib.hasAttr "sendmail" config.security.wrappers;
+              sendmailPath =
+                if isSendmailWrapped then "/run/wrappers/bin/sendmail"
+                else "${config.system.path}/bin/sendmail";
+            in
+            pkgs.writeText "fcron.conf" ''
+              fcrontabs   =       /var/spool/fcron
+              pidfile     =       /run/fcron.pid
+              fifofile    =       /run/fcron.fifo
+              fcronallow  =       /etc/fcron.allow
+              fcrondeny   =       /etc/fcron.deny
+              shell       =       /bin/sh
+              sendmail    =       ${sendmailPath}
+              editor      =       ${pkgs.vim}/bin/vim
+            '';
+          target = "fcron.conf";
+          gid = config.ids.gids.fcron;
+          mode = "0644";
+        }
+      ]);
+
+    environment.systemPackages = [ pkgs.fcron ];
+    users.users.fcron = {
+      uid = config.ids.uids.fcron;
+      home = "/var/spool/fcron";
+      group = "fcron";
+    };
+    users.groups.fcron.gid = config.ids.gids.fcron;
+
+    security.wrappers = {
+      fcrontab = {
+        source = "${pkgs.fcron}/bin/fcrontab";
+        owner = "fcron";
+        group = "fcron";
+        setgid = true;
+        setuid = true;
+      };
+      fcrondyn = {
+        source = "${pkgs.fcron}/bin/fcrondyn";
+        owner = "fcron";
+        group = "fcron";
+        setgid = true;
+        setuid = false;
+      };
+      fcronsighup = {
+        source = "${pkgs.fcron}/bin/fcronsighup";
+        owner = "root";
+        group = "fcron";
+        setuid = true;
+      };
+    };
+    systemd.services.fcron = {
+      description = "fcron daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ pkgs.fcron ];
+
+      preStart = ''
+        install \
+          --mode 0770 \
+          --owner fcron \
+          --group fcron \
+          --directory /var/spool/fcron
+        # load system crontab file
+        /run/wrappers/bin/fcrontab -u systab - < ${pkgs.writeText "systab" cfg.systab}
+      '';
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.fcron}/sbin/fcron -m ${toString cfg.maxSerialJobs} ${queuelen}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/elasticsearch-curator.nix b/nixpkgs/nixos/modules/services/search/elasticsearch-curator.nix
new file mode 100644
index 000000000000..0a21d705ef87
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/elasticsearch-curator.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+    cfg = config.services.elasticsearch-curator;
+    curatorConfig = pkgs.writeTextFile {
+      name = "config.yaml";
+      text = ''
+        ---
+        # Remember, leave a key empty if there is no value.  None will be a string,
+        # not a Python "NoneType"
+        client:
+          hosts: ${builtins.toJSON cfg.hosts}
+          port: ${toString cfg.port}
+          url_prefix:
+          use_ssl: False
+          certificate:
+          client_cert:
+          client_key:
+          ssl_no_validate: False
+          http_auth:
+          timeout: 30
+          master_only: False
+        logging:
+          loglevel: INFO
+          logfile:
+          logformat: default
+          blacklist: ['elasticsearch', 'urllib3']
+        '';
+    };
+    curatorAction = pkgs.writeTextFile {
+      name = "action.yaml";
+      text = cfg.actionYAML;
+    };
+in {
+
+  options.services.elasticsearch-curator = {
+
+    enable = mkEnableOption (lib.mdDoc "elasticsearch curator");
+    interval = mkOption {
+      description = lib.mdDoc "The frequency to run curator, a systemd.time such as 'hourly'";
+      default = "hourly";
+      type = types.str;
+    };
+    hosts = mkOption {
+      description = lib.mdDoc "a list of elasticsearch hosts to connect to";
+      type = types.listOf types.str;
+      default = ["localhost"];
+    };
+    port = mkOption {
+      description = lib.mdDoc "the port that elasticsearch is listening on";
+      type = types.port;
+      default = 9200;
+    };
+    actionYAML = mkOption {
+      description = lib.mdDoc "curator action.yaml file contents, alternatively use curator-cli which takes a simple action command";
+      type = types.lines;
+      example = ''
+        ---
+        actions:
+          1:
+            action: delete_indices
+            description: >-
+              Delete indices older than 45 days (based on index name), for logstash-
+              prefixed indices. Ignore the error if the filter does not result in an
+              actionable list of indices (ignore_empty_list) and exit cleanly.
+            options:
+              ignore_empty_list: True
+              disable_action: False
+            filters:
+            - filtertype: pattern
+              kind: prefix
+              value: logstash-
+            - filtertype: age
+              source: name
+              direction: older
+              timestring: '%Y.%m.%d'
+              unit: days
+              unit_count: 45
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.elasticsearch-curator = {
+      startAt = cfg.interval;
+      serviceConfig = {
+        ExecStart =
+          "${pkgs.elasticsearch-curator}/bin/curator" +
+          " --config ${curatorConfig} ${curatorAction}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/elasticsearch.nix b/nixpkgs/nixos/modules/services/search/elasticsearch.nix
new file mode 100644
index 000000000000..fa1627566ebe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/elasticsearch.nix
@@ -0,0 +1,239 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.elasticsearch;
+
+  es7 = builtins.compareVersions cfg.package.version "7" >= 0;
+
+  esConfig = ''
+    network.host: ${cfg.listenAddress}
+    cluster.name: ${cfg.cluster_name}
+    ${lib.optionalString cfg.single_node "discovery.type: single-node"}
+    ${lib.optionalString (cfg.single_node && es7) "gateway.auto_import_dangling_indices: true"}
+
+    http.port: ${toString cfg.port}
+    transport.port: ${toString cfg.tcp_port}
+
+    ${cfg.extraConf}
+  '';
+
+  configDir = cfg.dataDir + "/config";
+
+  elasticsearchYml = pkgs.writeTextFile {
+    name = "elasticsearch.yml";
+    text = esConfig;
+  };
+
+  loggingConfigFilename = "log4j2.properties";
+  loggingConfigFile = pkgs.writeTextFile {
+    name = loggingConfigFilename;
+    text = cfg.logging;
+  };
+
+  esPlugins = pkgs.buildEnv {
+    name = "elasticsearch-plugins";
+    paths = cfg.plugins;
+    postBuild = "${pkgs.coreutils}/bin/mkdir -p $out/plugins";
+  };
+
+in
+{
+
+  ###### interface
+
+  options.services.elasticsearch = {
+    enable = mkOption {
+      description = lib.mdDoc "Whether to enable elasticsearch.";
+      default = false;
+      type = types.bool;
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "Elasticsearch package to use.";
+      default = pkgs.elasticsearch;
+      defaultText = literalExpression "pkgs.elasticsearch";
+      type = types.package;
+    };
+
+    listenAddress = mkOption {
+      description = lib.mdDoc "Elasticsearch listen address.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    port = mkOption {
+      description = lib.mdDoc "Elasticsearch port to listen for HTTP traffic.";
+      default = 9200;
+      type = types.port;
+    };
+
+    tcp_port = mkOption {
+      description = lib.mdDoc "Elasticsearch port for the node to node communication.";
+      default = 9300;
+      type = types.int;
+    };
+
+    cluster_name = mkOption {
+      description = lib.mdDoc "Elasticsearch name that identifies your cluster for auto-discovery.";
+      default = "elasticsearch";
+      type = types.str;
+    };
+
+    single_node = mkOption {
+      description = lib.mdDoc "Start a single-node cluster";
+      default = true;
+      type = types.bool;
+    };
+
+    extraConf = mkOption {
+      description = lib.mdDoc "Extra configuration for elasticsearch.";
+      default = "";
+      type = types.str;
+      example = ''
+        node.name: "elasticsearch"
+        node.master: true
+        node.data: false
+      '';
+    };
+
+    logging = mkOption {
+      description = lib.mdDoc "Elasticsearch logging configuration.";
+      default = ''
+        logger.action.name = org.elasticsearch.action
+        logger.action.level = info
+
+        appender.console.type = Console
+        appender.console.name = console
+        appender.console.layout.type = PatternLayout
+        appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+        rootLogger.level = info
+        rootLogger.appenderRef.console.ref = console
+      '';
+      type = types.str;
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/elasticsearch";
+      description = lib.mdDoc ''
+        Data directory for elasticsearch.
+      '';
+    };
+
+    extraCmdLineOptions = mkOption {
+      description = lib.mdDoc "Extra command line options for the elasticsearch launcher.";
+      default = [ ];
+      type = types.listOf types.str;
+    };
+
+    extraJavaOptions = mkOption {
+      description = lib.mdDoc "Extra command line options for Java.";
+      default = [ ];
+      type = types.listOf types.str;
+      example = [ "-Djava.net.preferIPv4Stack=true" ];
+    };
+
+    plugins = mkOption {
+      description = lib.mdDoc "Extra elasticsearch plugins";
+      default = [ ];
+      type = types.listOf types.package;
+      example = lib.literalExpression "[ pkgs.elasticsearchPlugins.discovery-ec2 ]";
+    };
+
+    restartIfChanged  = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        Automatically restart the service on config change.
+        This can be set to false to defer restarts on a server or cluster.
+        Please consider the security implications of inadvertently running an older version,
+        and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+      '';
+      default = true;
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.elasticsearch = {
+      description = "Elasticsearch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ pkgs.inetutils ];
+      inherit (cfg) restartIfChanged;
+      environment = {
+        ES_HOME = cfg.dataDir;
+        ES_JAVA_OPTS = toString cfg.extraJavaOptions;
+        ES_PATH_CONF = configDir;
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/elasticsearch ${toString cfg.extraCmdLineOptions}";
+        User = "elasticsearch";
+        PermissionsStartOnly = true;
+        LimitNOFILE = "1024000";
+        Restart = "always";
+        TimeoutStartSec = "infinity";
+      };
+      preStart = ''
+        ${optionalString (!config.boot.isContainer) ''
+          # Only set vm.max_map_count if lower than ES required minimum
+          # This avoids conflict if configured via boot.kernel.sysctl
+          if [ `${pkgs.procps}/bin/sysctl -n vm.max_map_count` -lt 262144 ]; then
+            ${pkgs.procps}/bin/sysctl -w vm.max_map_count=262144
+          fi
+        ''}
+
+        mkdir -m 0700 -p ${cfg.dataDir}
+
+        # Install plugins
+        ln -sfT ${esPlugins}/plugins ${cfg.dataDir}/plugins
+        ln -sfT ${cfg.package}/lib ${cfg.dataDir}/lib
+        ln -sfT ${cfg.package}/modules ${cfg.dataDir}/modules
+
+        # elasticsearch needs to create the elasticsearch.keystore in the config directory
+        # so this directory needs to be writable.
+        mkdir -m 0700 -p ${configDir}
+
+        # Note that we copy config files from the nix store instead of symbolically linking them
+        # because otherwise X-Pack Security will raise the following exception:
+        # java.security.AccessControlException:
+        # access denied ("java.io.FilePermission" "/var/lib/elasticsearch/config/elasticsearch.yml" "read")
+
+        cp ${elasticsearchYml} ${configDir}/elasticsearch.yml
+        # Make sure the logging configuration for old elasticsearch versions is removed:
+        rm -f "${configDir}/logging.yml"
+        cp ${loggingConfigFile} ${configDir}/${loggingConfigFilename}
+        mkdir -p ${configDir}/scripts
+        cp ${cfg.package}/config/jvm.options ${configDir}/jvm.options
+        # redirect jvm logs to the data directory
+        mkdir -m 0700 -p ${cfg.dataDir}/logs
+        ${pkgs.sd}/bin/sd 'logs/gc.log' '${cfg.dataDir}/logs/gc.log' ${configDir}/jvm.options \
+
+        if [ "$(id -u)" = 0 ]; then chown -R elasticsearch:elasticsearch ${cfg.dataDir}; fi
+      '';
+      postStart = ''
+        # Make sure elasticsearch is up and running before dependents
+        # are started
+        while ! ${pkgs.curl}/bin/curl -sS -f http://${cfg.listenAddress}:${toString cfg.port} 2>/dev/null; do
+          sleep 1
+        done
+      '';
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    users = {
+      groups.elasticsearch.gid = config.ids.gids.elasticsearch;
+      users.elasticsearch = {
+        uid = config.ids.uids.elasticsearch;
+        description = "Elasticsearch daemon user";
+        home = cfg.dataDir;
+        group = "elasticsearch";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/hound.nix b/nixpkgs/nixos/modules/services/search/hound.nix
new file mode 100644
index 000000000000..b41a2e2bae1f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/hound.nix
@@ -0,0 +1,126 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.hound;
+in {
+  options = {
+    services.hound = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the hound code search daemon.
+        '';
+      };
+
+      user = mkOption {
+        default = "hound";
+        type = types.str;
+        description = lib.mdDoc ''
+          User the hound daemon should execute under.
+        '';
+      };
+
+      group = mkOption {
+        default = "hound";
+        type = types.str;
+        description = lib.mdDoc ''
+          Group the hound daemon should execute under.
+        '';
+      };
+
+      extraGroups = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "dialout" ];
+        description = lib.mdDoc ''
+          List of extra groups that the "hound" user should be a part of.
+        '';
+      };
+
+      home = mkOption {
+        default = "/var/lib/hound";
+        type = types.path;
+        description = lib.mdDoc ''
+          The path to use as hound's $HOME. If the default user
+          "hound" is configured then this is the home of the "hound"
+          user.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.hound;
+        defaultText = literalExpression "pkgs.hound";
+        type = types.package;
+        description = lib.mdDoc ''
+          Package for running hound.
+        '';
+      };
+
+      config = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The full configuration of the Hound daemon. Note the dbpath
+          should be an absolute path to a writable location on disk.
+        '';
+        example = literalExpression ''
+          '''
+            {
+              "max-concurrent-indexers" : 2,
+              "dbpath" : "''${services.hound.home}/data",
+              "repos" : {
+                  "nixpkgs": {
+                    "url" : "https://www.github.com/NixOS/nixpkgs.git"
+                  }
+              }
+            }
+          '''
+        '';
+      };
+
+      listen = mkOption {
+        type = types.str;
+        default = "0.0.0.0:6080";
+        example = "127.0.0.1:6080 or just :6080";
+        description = lib.mdDoc ''
+          Listen on this IP:port / :port
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups = optionalAttrs (cfg.group == "hound") {
+      hound.gid = config.ids.gids.hound;
+    };
+
+    users.users = optionalAttrs (cfg.user == "hound") {
+      hound = {
+        description = "hound code search";
+        createHome = true;
+        home = cfg.home;
+        group = cfg.group;
+        extraGroups = cfg.extraGroups;
+        uid = config.ids.uids.hound;
+      };
+    };
+
+    systemd.services.hound = {
+      description = "Hound Code Search";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.home;
+        ExecStartPre = "${pkgs.git}/bin/git config --global --replace-all http.sslCAinfo /etc/ssl/certs/ca-certificates.crt";
+        ExecStart = "${cfg.package}/bin/houndd" +
+                    " -addr ${cfg.listen}" +
+                    " -conf ${pkgs.writeText "hound.json" cfg.config}";
+
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/search/meilisearch.md b/nixpkgs/nixos/modules/services/search/meilisearch.md
new file mode 100644
index 000000000000..299f56bf8293
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/meilisearch.md
@@ -0,0 +1,39 @@
+# Meilisearch {#module-services-meilisearch}
+
+Meilisearch is a lightweight, fast and powerful search engine. Think elastic search with a much smaller footprint.
+
+## Quickstart {#module-services-meilisearch-quickstart}
+
+the minimum to start meilisearch is
+
+```nix
+services.meilisearch.enable = true;
+```
+
+this will start the http server included with meilisearch on port 7700.
+
+test with `curl -X GET 'http://localhost:7700/health'`
+
+## Usage {#module-services-meilisearch-usage}
+
+you first need to add documents to an index before you can search for documents.
+
+### Add a documents to the `movies` index {#module-services-meilisearch-quickstart-add}
+
+`curl -X POST 'http://127.0.0.1:7700/indexes/movies/documents' --data '[{"id": "123", "title": "Superman"}, {"id": 234, "title": "Batman"}]'`
+
+### Search documents in the `movies` index {#module-services-meilisearch-quickstart-search}
+
+`curl 'http://127.0.0.1:7700/indexes/movies/search' --data '{ "q": "botman" }'` (note the typo is intentional and there to demonstrate the typo tolerant capabilities)
+
+## Defaults {#module-services-meilisearch-defaults}
+
+- The default nixos package doesn't come with the [dashboard](https://docs.meilisearch.com/learn/getting_started/quick_start.html#search), since the dashboard features makes some assets downloads at compile time.
+
+- Anonymized Analytics sent to meilisearch are disabled by default.
+
+- Default deployment is development mode. It doesn't require a secret master key. All routes are not protected and accessible.
+
+## Missing {#module-services-meilisearch-missing}
+
+- the snapshot feature is not yet configurable from the module, it's just a matter of adding the relevant environment variables.
diff --git a/nixpkgs/nixos/modules/services/search/meilisearch.nix b/nixpkgs/nixos/modules/services/search/meilisearch.nix
new file mode 100644
index 000000000000..7c9fa62ae954
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/meilisearch.nix
@@ -0,0 +1,130 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.meilisearch;
+
+in
+{
+
+  meta.maintainers = with maintainers; [ Br1ght0ne happysalada ];
+  meta.doc = ./meilisearch.md;
+
+  ###### interface
+
+  options.services.meilisearch = {
+    enable = mkEnableOption (lib.mdDoc "MeiliSearch - a RESTful search API");
+
+    package = mkOption {
+      description = lib.mdDoc "The package to use for meilisearch. Use this if you require specific features to be enabled. The default package has no features.";
+      default = pkgs.meilisearch;
+      defaultText = lib.literalExpression "pkgs.meilisearch";
+      type = types.package;
+    };
+
+    listenAddress = mkOption {
+      description = lib.mdDoc "MeiliSearch listen address.";
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    listenPort = mkOption {
+      description = lib.mdDoc "MeiliSearch port to listen on.";
+      default = 7700;
+      type = types.port;
+    };
+
+    environment = mkOption {
+      description = lib.mdDoc "Defines the running environment of MeiliSearch.";
+      default = "development";
+      type = types.enum [ "development" "production" ];
+    };
+
+    # TODO change this to LoadCredentials once possible
+    masterKeyEnvironmentFile = mkOption {
+      description = lib.mdDoc ''
+        Path to file which contains the master key.
+        By doing so, all routes will be protected and will require a key to be accessed.
+        If no master key is provided, all routes can be accessed without requiring any key.
+        The format is the following:
+        MEILI_MASTER_KEY=my_secret_key
+      '';
+      default = null;
+      type = with types; nullOr path;
+    };
+
+    noAnalytics = mkOption {
+      description = lib.mdDoc ''
+        Deactivates analytics.
+        Analytics allow MeiliSearch to know how many users are using MeiliSearch,
+        which versions and which platforms are used.
+        This process is entirely anonymous.
+      '';
+      default = true;
+      type = types.bool;
+    };
+
+    logLevel = mkOption {
+      description = lib.mdDoc ''
+        Defines how much detail should be present in MeiliSearch's logs.
+        MeiliSearch currently supports four log levels, listed in order of increasing verbosity:
+        - 'ERROR': only log unexpected events indicating MeiliSearch is not functioning as expected
+        - 'WARN:' log all unexpected events, regardless of their severity
+        - 'INFO:' log all events. This is the default value
+        - 'DEBUG': log all events and including detailed information on MeiliSearch's internal processes.
+          Useful when diagnosing issues and debugging
+      '';
+      default = "INFO";
+      type = types.str;
+    };
+
+    maxIndexSize = mkOption {
+      description = lib.mdDoc ''
+        Sets the maximum size of the index.
+        Value must be given in bytes or explicitly stating a base unit.
+        For example, the default value can be written as 107374182400, '107.7Gb', or '107374 Mb'.
+        Default is 100 GiB
+      '';
+      default = "107374182400";
+      type = types.str;
+    };
+
+    payloadSizeLimit = mkOption {
+      description = lib.mdDoc ''
+        Sets the maximum size of accepted JSON payloads.
+        Value must be given in bytes or explicitly stating a base unit.
+        For example, the default value can be written as 107374182400, '107.7Gb', or '107374 Mb'.
+        Default is ~ 100 MB
+      '';
+      default = "104857600";
+      type = types.str;
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.services.meilisearch = {
+      description = "MeiliSearch daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        MEILI_DB_PATH = "/var/lib/meilisearch";
+        MEILI_HTTP_ADDR = "${cfg.listenAddress}:${toString cfg.listenPort}";
+        MEILI_NO_ANALYTICS = toString cfg.noAnalytics;
+        MEILI_ENV = cfg.environment;
+        MEILI_DUMP_DIR = "/var/lib/meilisearch/dumps";
+        MEILI_LOG_LEVEL = cfg.logLevel;
+        MEILI_MAX_INDEX_SIZE = cfg.maxIndexSize;
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/meilisearch";
+        DynamicUser = true;
+        StateDirectory = "meilisearch";
+        EnvironmentFile = mkIf (cfg.masterKeyEnvironmentFile != null) cfg.masterKeyEnvironmentFile;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/opensearch.nix b/nixpkgs/nixos/modules/services/search/opensearch.nix
new file mode 100644
index 000000000000..ae79d5545fd7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/opensearch.nix
@@ -0,0 +1,267 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.opensearch;
+
+  settingsFormat = pkgs.formats.yaml {};
+
+  configDir = cfg.dataDir + "/config";
+
+  usingDefaultDataDir = cfg.dataDir == "/var/lib/opensearch";
+  usingDefaultUserAndGroup = cfg.user == "opensearch" && cfg.group == "opensearch";
+
+  opensearchYml = settingsFormat.generate "opensearch.yml" cfg.settings;
+
+  loggingConfigFilename = "log4j2.properties";
+  loggingConfigFile = pkgs.writeTextFile {
+    name = loggingConfigFilename;
+    text = cfg.logging;
+  };
+in
+{
+
+  options.services.opensearch = {
+    enable = mkEnableOption (lib.mdDoc "OpenSearch");
+
+    package = lib.mkPackageOptionMD pkgs "OpenSearch" {
+      default = [ "opensearch" ];
+    };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options."network.host" = lib.mkOption {
+          type = lib.types.str;
+          default = "127.0.0.1";
+          description = lib.mdDoc ''
+            Which port this service should listen on.
+          '';
+        };
+
+        options."cluster.name" = lib.mkOption {
+          type = lib.types.str;
+          default = "opensearch";
+          description = lib.mdDoc ''
+            The name of the cluster.
+          '';
+        };
+
+        options."discovery.type" = lib.mkOption {
+          type = lib.types.str;
+          default = "single-node";
+          description = lib.mdDoc ''
+            The type of discovery to use.
+          '';
+        };
+
+        options."http.port" = lib.mkOption {
+          type = lib.types.port;
+          default = 9200;
+          description = lib.mdDoc ''
+            The port to listen on for HTTP traffic.
+          '';
+        };
+
+        options."transport.port" = lib.mkOption {
+          type = lib.types.port;
+          default = 9300;
+          description = lib.mdDoc ''
+            The port to listen on for transport traffic.
+          '';
+        };
+
+        options."plugins.security.disabled" = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to enable the security plugin,
+            `plugins.security.ssl.transport.keystore_filepath` or
+            `plugins.security.ssl.transport.server.pemcert_filepath` and
+            `plugins.security.ssl.transport.client.pemcert_filepath`
+            must be set for this plugin to be enabled.
+          '';
+        };
+      };
+
+      default = {};
+
+      description = lib.mdDoc ''
+        OpenSearch configuration.
+      '';
+    };
+
+    logging = lib.mkOption {
+      description = lib.mdDoc "opensearch logging configuration.";
+
+      default = ''
+        logger.action.name = org.opensearch.action
+        logger.action.level = info
+
+        appender.console.type = Console
+        appender.console.name = console
+        appender.console.layout.type = PatternLayout
+        appender.console.layout.pattern = [%d{ISO8601}][%-5p][%-25c{1.}] %marker%m%n
+
+        rootLogger.level = info
+        rootLogger.appenderRef.console.ref = console
+      '';
+      type = types.str;
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.path;
+      default = "/var/lib/opensearch";
+      apply = converge (removeSuffix "/");
+      description = lib.mdDoc ''
+        Data directory for OpenSearch. If you change this, you need to
+        manually create the directory. You also need to create the
+        `opensearch` user and group, or change
+        [](#opt-services.opensearch.user) and
+        [](#opt-services.opensearch.group) to existing ones with
+        access to the directory.
+      '';
+    };
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "opensearch";
+      description = lib.mdDoc ''
+        The user OpenSearch runs as. Should be left at default unless
+        you have very specific needs.
+      '';
+    };
+
+    group = lib.mkOption {
+      type = lib.types.str;
+      default = "opensearch";
+      description = lib.mdDoc ''
+        The group OpenSearch runs as. Should be left at default unless
+        you have very specific needs.
+      '';
+    };
+
+    extraCmdLineOptions = lib.mkOption {
+      description = lib.mdDoc "Extra command line options for the OpenSearch launcher.";
+      default = [ ];
+      type = lib.types.listOf lib.types.str;
+    };
+
+    extraJavaOptions = lib.mkOption {
+      description = lib.mdDoc "Extra command line options for Java.";
+      default = [ ];
+      type = lib.types.listOf lib.types.str;
+      example = [ "-Djava.net.preferIPv4Stack=true" ];
+    };
+
+    restartIfChanged = lib.mkOption {
+      type = lib.types.bool;
+      description = lib.mdDoc ''
+        Automatically restart the service on config change.
+        This can be set to false to defer restarts on a server or cluster.
+        Please consider the security implications of inadvertently running an older version,
+        and the possibility of unexpected behavior caused by inconsistent versions across a cluster when disabling this option.
+      '';
+      default = true;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.opensearch = {
+      description = "OpenSearch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ pkgs.inetutils ];
+      inherit (cfg) restartIfChanged;
+      environment = {
+        OPENSEARCH_HOME = cfg.dataDir;
+        OPENSEARCH_JAVA_OPTS = toString cfg.extraJavaOptions;
+        OPENSEARCH_PATH_CONF = configDir;
+      };
+      serviceConfig = {
+        ExecStartPre =
+          let
+            startPreFullPrivileges = ''
+              set -o errexit -o pipefail -o nounset -o errtrace
+              shopt -s inherit_errexit
+            '' + (optionalString (!config.boot.isContainer) ''
+              # Only set vm.max_map_count if lower than ES required minimum
+              # This avoids conflict if configured via boot.kernel.sysctl
+              if [ $(${pkgs.procps}/bin/sysctl -n vm.max_map_count) -lt 262144 ]; then
+                ${pkgs.procps}/bin/sysctl -w vm.max_map_count=262144
+              fi
+            '');
+            startPreUnprivileged = ''
+              set -o errexit -o pipefail -o nounset -o errtrace
+              shopt -s inherit_errexit
+
+              # Install plugins
+
+              # remove plugins directory if it is empty.
+              if [ -z "$(ls -A ${cfg.dataDir}/plugins)" ]; then
+                rm -r "${cfg.dataDir}/plugins"
+              fi
+
+              ln -sfT "${cfg.package}/plugins" "${cfg.dataDir}/plugins"
+              ln -sfT ${cfg.package}/lib ${cfg.dataDir}/lib
+              ln -sfT ${cfg.package}/modules ${cfg.dataDir}/modules
+
+              # opensearch needs to create the opensearch.keystore in the config directory
+              # so this directory needs to be writable.
+              mkdir -p ${configDir}
+              chmod 0700 ${configDir}
+
+              # Note that we copy config files from the nix store instead of symbolically linking them
+              # because otherwise X-Pack Security will raise the following exception:
+              # java.security.AccessControlException:
+              # access denied ("java.io.FilePermission" "/var/lib/opensearch/config/opensearch.yml" "read")
+
+              rm -f ${configDir}/opensearch.yml
+              cp ${opensearchYml} ${configDir}/opensearch.yml
+
+              # Make sure the logging configuration for old OpenSearch versions is removed:
+              rm -f "${configDir}/logging.yml"
+              rm -f ${configDir}/${loggingConfigFilename}
+              cp ${loggingConfigFile} ${configDir}/${loggingConfigFilename}
+              mkdir -p ${configDir}/scripts
+
+              rm -f ${configDir}/jvm.options
+              cp ${cfg.package}/config/jvm.options ${configDir}/jvm.options
+
+              # redirect jvm logs to the data directory
+              mkdir -p ${cfg.dataDir}/logs
+              chmod 0700 ${cfg.dataDir}/logs
+              sed -e '#logs/gc.log#${cfg.dataDir}/logs/gc.log#' -i ${configDir}/jvm.options
+            '';
+          in [
+            "+${pkgs.writeShellScript "opensearch-start-pre-full-privileges" startPreFullPrivileges}"
+            "${pkgs.writeShellScript "opensearch-start-pre-unprivileged" startPreUnprivileged}"
+          ];
+        ExecStartPost = pkgs.writeShellScript "opensearch-start-post" ''
+          set -o errexit -o pipefail -o nounset -o errtrace
+          shopt -s inherit_errexit
+
+          # Make sure opensearch is up and running before dependents
+          # are started
+          while ! ${pkgs.curl}/bin/curl -sS -f http://${cfg.settings."network.host"}:${toString cfg.settings."http.port"} 2>/dev/null; do
+            sleep 1
+          done
+        '';
+        ExecStart = "${cfg.package}/bin/opensearch ${toString cfg.extraCmdLineOptions}";
+        User = cfg.user;
+        Group = cfg.group;
+        LimitNOFILE = "1024000";
+        Restart = "always";
+        TimeoutStartSec = "infinity";
+        DynamicUser = usingDefaultUserAndGroup && usingDefaultDataDir;
+      } // (optionalAttrs (usingDefaultDataDir) {
+        StateDirectory = "opensearch";
+        StateDirectoryMode = "0700";
+      });
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/qdrant.nix b/nixpkgs/nixos/modules/services/search/qdrant.nix
new file mode 100644
index 000000000000..e1f7365d951a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/qdrant.nix
@@ -0,0 +1,129 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.qdrant;
+
+  settingsFormat = pkgs.formats.yaml { };
+  configFile = settingsFormat.generate "config.yaml" cfg.settings;
+in {
+
+  options = {
+    services.qdrant = {
+      enable = mkEnableOption (lib.mdDoc "Vector Search Engine for the next generation of AI applications");
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Configuration for Qdrant
+          Refer to <https://github.com/qdrant/qdrant/blob/master/config/config.yaml> for details on supported values.
+        '';
+
+        type = settingsFormat.type;
+
+        example = {
+          storage = {
+            storage_path = "/var/lib/qdrant/storage";
+            snapshots_path = "/var/lib/qdrant/snapshots";
+          };
+          hsnw_index = {
+            on_disk = true;
+          };
+          service = {
+            host = "127.0.0.1";
+            http_port = 6333;
+            grpc_port = 6334;
+          };
+          telemetry_disabled = true;
+        };
+
+        defaultText = literalExpression ''
+          {
+            storage = {
+              storage_path = "/var/lib/qdrant/storage";
+              snapshots_path = "/var/lib/qdrant/snapshots";
+            };
+            hsnw_index = {
+              on_disk = true;
+            };
+            service = {
+              host = "127.0.0.1";
+              http_port = 6333;
+              grpc_port = 6334;
+            };
+            telemetry_disabled = true;
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.qdrant.settings = {
+      storage.storage_path = mkDefault "/var/lib/qdrant/storage";
+      storage.snapshots_path = mkDefault "/var/lib/qdrant/snapshots";
+      # The following default values are the same as in the default config,
+      # they are just written here for convenience.
+      storage.on_disk_payload = mkDefault true;
+      storage.wal.wal_capacity_mb = mkDefault 32;
+      storage.wal.wal_segments_ahead = mkDefault 0;
+      storage.performance.max_search_threads = mkDefault 0;
+      storage.performance.max_optimization_threads = mkDefault 1;
+      storage.optimizers.deleted_threshold = mkDefault 0.2;
+      storage.optimizers.vacuum_min_vector_number = mkDefault 1000;
+      storage.optimizers.default_segment_number = mkDefault 0;
+      storage.optimizers.max_segment_size_kb = mkDefault null;
+      storage.optimizers.memmap_threshold_kb = mkDefault null;
+      storage.optimizers.indexing_threshold_kb = mkDefault 20000;
+      storage.optimizers.flush_interval_sec = mkDefault 5;
+      storage.optimizers.max_optimization_threads = mkDefault 1;
+      storage.hnsw_index.m = mkDefault 16;
+      storage.hnsw_index.ef_construct = mkDefault 100;
+      storage.hnsw_index.full_scan_threshold_kb = mkDefault 10000;
+      storage.hnsw_index.max_indexing_threads = mkDefault 0;
+      storage.hnsw_index.on_disk = mkDefault false;
+      storage.hnsw_index.payload_m = mkDefault null;
+      service.max_request_size_mb = mkDefault 32;
+      service.max_workers = mkDefault 0;
+      service.http_port = mkDefault 6333;
+      service.grpc_port = mkDefault 6334;
+      service.enable_cors = mkDefault true;
+      cluster.enabled = mkDefault false;
+      # the following have been altered for security
+      service.host = mkDefault "127.0.0.1";
+      telemetry_disabled = mkDefault true;
+    };
+
+    systemd.services.qdrant = {
+      description = "Vector Search Engine for the next generation of AI applications";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        LimitNOFILE=65536;
+        ExecStart = "${pkgs.qdrant}/bin/qdrant --config-path ${configFile}";
+        DynamicUser = true;
+        Restart = "on-failure";
+        StateDirectory = "qdrant";
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/search/typesense.nix b/nixpkgs/nixos/modules/services/search/typesense.nix
new file mode 100644
index 000000000000..c158d04fea23
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/search/typesense.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }: let
+  inherit
+    (lib)
+    concatMapStringsSep
+    generators
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOption
+    optionalString
+    types
+    ;
+
+  cfg = config.services.typesense;
+  settingsFormatIni = pkgs.formats.ini {
+    listToValue = concatMapStringsSep " " (generators.mkValueStringDefault { });
+    mkKeyValue = generators.mkKeyValueDefault
+      {
+        mkValueString = v:
+          if v == null then ""
+          else generators.mkValueStringDefault { } v;
+      }
+      "=";
+  };
+  configFile = settingsFormatIni.generate "typesense.ini" cfg.settings;
+in {
+  options.services.typesense = {
+    enable = mkEnableOption "typesense";
+    package = mkPackageOption pkgs "typesense" {};
+
+    apiKeyFile = mkOption {
+      type = types.path;
+      description = ''
+        Sets the admin api key for typesense. Always use this option
+        instead of {option}`settings.server.api-key` to prevent the key
+        from being written to the world-readable nix store.
+      '';
+    };
+
+    settings = mkOption {
+      description = mdDoc "Typesense configuration. Refer to [the documentation](https://typesense.org/docs/0.24.1/api/server-configuration.html) for supported values.";
+      default = {};
+      type = types.submodule {
+        freeformType = settingsFormatIni.type;
+        options.server = {
+          data-dir = mkOption {
+            type = types.str;
+            default = "/var/lib/typesense";
+            description = mdDoc "Path to the directory where data will be stored on disk.";
+          };
+
+          api-address = mkOption {
+            type = types.str;
+            description = mdDoc "Address to which Typesense API service binds.";
+          };
+
+          api-port = mkOption {
+            type = types.port;
+            default = 8108;
+            description = mdDoc "Port on which the Typesense API service listens.";
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.typesense = {
+      description = "Typesense search engine";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      script = ''
+        export TYPESENSE_API_KEY=$(cat ${cfg.apiKeyFile})
+        exec ${cfg.package}/bin/typesense-server --config ${configFile}
+      '';
+
+      serviceConfig = {
+        Restart = "on-failure";
+        DynamicUser = true;
+        User = "typesense";
+        Group = "typesense";
+
+        StateDirectory = "typesense";
+        StateDirectoryMode = "0750";
+
+        # Hardening
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        # MemoryDenyWriteExecute = true; needed since 0.25.1
+        NoNewPrivileges = true;
+        PrivateUsers = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/aesmd.nix b/nixpkgs/nixos/modules/services/security/aesmd.nix
new file mode 100644
index 000000000000..8b3f010d7c4d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/aesmd.nix
@@ -0,0 +1,251 @@
+{ config, options, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.services.aesmd;
+  opt = options.services.aesmd;
+
+  sgx-psw = pkgs.sgx-psw.override { inherit (cfg) debug; };
+
+  configFile = with cfg.settings; pkgs.writeText "aesmd.conf" (
+    concatStringsSep "\n" (
+      optional (whitelistUrl != null) "whitelist url = ${whitelistUrl}" ++
+      optional (proxy != null) "aesm proxy = ${proxy}" ++
+      optional (proxyType != null) "proxy type = ${proxyType}" ++
+      optional (defaultQuotingType != null) "default quoting type = ${defaultQuotingType}" ++
+      # Newline at end of file
+      [ "" ]
+    )
+  );
+in
+{
+  options.services.aesmd = {
+    enable = mkEnableOption (lib.mdDoc "Intel's Architectural Enclave Service Manager (AESM) for Intel SGX");
+    debug = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Whether to build the PSW package in debug mode.";
+    };
+    environment = mkOption {
+      type = with types; attrsOf str;
+      default = { };
+      description = mdDoc "Additional environment variables to pass to the AESM service.";
+      # Example environment variable for `sgx-azure-dcap-client` provider library
+      example = {
+        AZDCAP_COLLATERAL_VERSION = "v2";
+        AZDCAP_DEBUG_LOG_LEVEL = "INFO";
+      };
+    };
+    quoteProviderLibrary = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = literalExpression "pkgs.sgx-azure-dcap-client";
+      description = lib.mdDoc "Custom quote provider library to use.";
+    };
+    settings = mkOption {
+      description = lib.mdDoc "AESM configuration";
+      default = { };
+      type = types.submodule {
+        options.whitelistUrl = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          example = "http://whitelist.trustedservices.intel.com/SGX/LCWL/Linux/sgx_white_list_cert.bin";
+          description = lib.mdDoc "URL to retrieve authorized Intel SGX enclave signers.";
+        };
+        options.proxy = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          example = "http://proxy_url:1234";
+          description = lib.mdDoc "HTTP network proxy.";
+        };
+        options.proxyType = mkOption {
+          type = with types; nullOr (enum [ "default" "direct" "manual" ]);
+          default = if (cfg.settings.proxy != null) then "manual" else null;
+          defaultText = literalExpression ''
+            if (config.${opt.settings}.proxy != null) then "manual" else null
+          '';
+          example = "default";
+          description = lib.mdDoc ''
+            Type of proxy to use. The `default` uses the system's default proxy.
+            If `direct` is given, uses no proxy.
+            A value of `manual` uses the proxy from
+            {option}`services.aesmd.settings.proxy`.
+          '';
+        };
+        options.defaultQuotingType = mkOption {
+          type = with types; nullOr (enum [ "ecdsa_256" "epid_linkable" "epid_unlinkable" ]);
+          default = null;
+          example = "ecdsa_256";
+          description = lib.mdDoc "Attestation quote type.";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [{
+      assertion = !(config.boot.specialFileSystems."/dev".options ? "noexec");
+      message = "SGX requires exec permission for /dev";
+    }];
+
+    hardware.cpu.intel.sgx.provision.enable = true;
+
+    # Make sure the AESM service can find the SGX devices until
+    # https://github.com/intel/linux-sgx/issues/772 is resolved
+    # and updated in nixpkgs.
+    hardware.cpu.intel.sgx.enableDcapCompat = mkForce true;
+
+    systemd.services.aesmd =
+      let
+        storeAesmFolder = "${sgx-psw}/aesm";
+        # Hardcoded path AESM_DATA_FOLDER in psw/ae/aesm_service/source/oal/linux/aesm_util.cpp
+        aesmDataFolder = "/var/opt/aesmd/data";
+      in
+      {
+        description = "Intel Architectural Enclave Service Manager";
+        wantedBy = [ "multi-user.target" ];
+
+        after = [
+          "auditd.service"
+          "network.target"
+          "syslog.target"
+        ];
+
+        environment = {
+          NAME = "aesm_service";
+          AESM_PATH = storeAesmFolder;
+          LD_LIBRARY_PATH = makeLibraryPath [ cfg.quoteProviderLibrary ];
+        } // cfg.environment;
+
+        # Make sure any of the SGX application enclave devices is available
+        unitConfig.AssertPathExists = [
+          # legacy out-of-tree driver
+          "|/dev/isgx"
+          # DCAP driver
+          "|/dev/sgx/enclave"
+          # in-tree driver
+          "|/dev/sgx_enclave"
+        ];
+
+        serviceConfig = rec {
+          ExecStartPre = pkgs.writeShellScript "copy-aesmd-data-files.sh" ''
+            set -euo pipefail
+            whiteListFile="${aesmDataFolder}/white_list_cert_to_be_verify.bin"
+            if [[ ! -f "$whiteListFile" ]]; then
+              ${pkgs.coreutils}/bin/install -m 644 -D \
+                "${storeAesmFolder}/data/white_list_cert_to_be_verify.bin" \
+                "$whiteListFile"
+            fi
+          '';
+          ExecStart = "${sgx-psw}/bin/aesm_service --no-daemon";
+          ExecReload = ''${pkgs.coreutils}/bin/kill -SIGHUP "$MAINPID"'';
+
+          Restart = "on-failure";
+          RestartSec = "15s";
+
+          DynamicUser = true;
+          Group = "sgx";
+          SupplementaryGroups = [
+            config.hardware.cpu.intel.sgx.provision.group
+          ];
+
+          Type = "simple";
+
+          WorkingDirectory = storeAesmFolder;
+          StateDirectory = "aesmd";
+          StateDirectoryMode = "0700";
+          RuntimeDirectory = "aesmd";
+          RuntimeDirectoryMode = "0750";
+
+          # Hardening
+
+          # chroot into the runtime directory
+          RootDirectory = "%t/aesmd";
+          BindReadOnlyPaths = [
+            builtins.storeDir
+            # Hardcoded path AESM_CONFIG_FILE in psw/ae/aesm_service/source/utils/aesm_config.cpp
+            "${configFile}:/etc/aesmd.conf"
+          ];
+          BindPaths = [
+            # Hardcoded path CONFIG_SOCKET_PATH in psw/ae/aesm_service/source/core/ipc/SocketConfig.h
+            "%t/aesmd:/var/run/aesmd"
+            "%S/aesmd:/var/opt/aesmd"
+          ];
+
+          # PrivateDevices=true will mount /dev noexec which breaks AESM
+          PrivateDevices = false;
+          DevicePolicy = "closed";
+          DeviceAllow = [
+            # legacy out-of-tree driver
+            "/dev/isgx rw"
+            # DCAP driver
+            "/dev/sgx rw"
+            # in-tree driver
+            "/dev/sgx_enclave rw"
+            "/dev/sgx_provision rw"
+          ];
+
+          # Requires Internet access for attestation
+          PrivateNetwork = false;
+
+          RestrictAddressFamilies = [
+            # Allocates the socket /var/run/aesmd/aesm.socket
+            "AF_UNIX"
+            # Uses the HTTP protocol to initialize some services
+            "AF_INET"
+            "AF_INET6"
+          ];
+
+          # True breaks stuff
+          MemoryDenyWriteExecute = false;
+
+          # needs the ipc syscall in order to run
+          SystemCallFilter = [
+            "@system-service"
+            "~@aio"
+            "~@chown"
+            "~@clock"
+            "~@cpu-emulation"
+            "~@debug"
+            "~@keyring"
+            "~@memlock"
+            "~@module"
+            "~@mount"
+            "~@privileged"
+            "~@raw-io"
+            "~@reboot"
+            "~@resources"
+            "~@setuid"
+            "~@swap"
+            "~@sync"
+            "~@timer"
+          ];
+          SystemCallArchitectures = "native";
+          SystemCallErrorNumber = "EPERM";
+
+          CapabilityBoundingSet = "";
+          KeyringMode = "private";
+          LockPersonality = true;
+          NoNewPrivileges = true;
+          NotifyAccess = "none";
+          PrivateMounts = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          RemoveIPC = true;
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          UMask = "0066";
+        };
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/authelia.nix b/nixpkgs/nixos/modules/services/security/authelia.nix
new file mode 100644
index 000000000000..cc55260e20f8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/authelia.nix
@@ -0,0 +1,401 @@
+{ lib
+, pkgs
+, config
+, ...
+}:
+
+let
+  cfg = config.services.authelia;
+
+  format = pkgs.formats.yaml { };
+  configFile = format.generate "config.yml" cfg.settings;
+
+  autheliaOpts = with lib; { name, ... }: {
+    options = {
+      enable = mkEnableOption (mdDoc "Authelia instance");
+
+      name = mkOption {
+        type = types.str;
+        default = name;
+        description = mdDoc ''
+          Name is used as a suffix for the service name, user, and group.
+          By default it takes the value you use for `<instance>` in:
+          {option}`services.authelia.<instance>`
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.authelia;
+        type = types.package;
+        defaultText = literalExpression "pkgs.authelia";
+        description = mdDoc "Authelia derivation to use.";
+      };
+
+      user = mkOption {
+        default = "authelia-${name}";
+        type = types.str;
+        description = mdDoc "The name of the user for this authelia instance.";
+      };
+
+      group = mkOption {
+        default = "authelia-${name}";
+        type = types.str;
+        description = mdDoc "The name of the group for this authelia instance.";
+      };
+
+      secrets = mkOption {
+        description = mdDoc ''
+          It is recommended you keep your secrets separate from the configuration.
+          It's especially important to keep the raw secrets out of your nix configuration,
+          as the values will be preserved in your nix store.
+          This attribute allows you to configure the location of secret files to be loaded at runtime.
+
+          https://www.authelia.com/configuration/methods/secrets/
+        '';
+        default = { };
+        type = types.submodule {
+          options = {
+            manual = mkOption {
+              default = false;
+              example = true;
+              description = mdDoc ''
+                Configuring authelia's secret files via the secrets attribute set
+                is intended to be convenient and help catch cases where values are required
+                to run at all.
+                If a user wants to set these values themselves and bypass the validation they can set this value to true.
+              '';
+              type = types.bool;
+            };
+
+            # required
+            jwtSecretFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc ''
+                Path to your JWT secret used during identity verificaton.
+              '';
+            };
+
+            oidcIssuerPrivateKeyFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc ''
+                Path to your private key file used to encrypt OIDC JWTs.
+              '';
+            };
+
+            oidcHmacSecretFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc ''
+                Path to your HMAC secret used to sign OIDC JWTs.
+              '';
+            };
+
+            sessionSecretFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc ''
+                Path to your session secret. Only used when redis is used as session storage.
+              '';
+            };
+
+            # required
+            storageEncryptionKeyFile = mkOption {
+              type = types.nullOr types.path;
+              default = null;
+              description = mdDoc ''
+                Path to your storage encryption key.
+              '';
+            };
+          };
+        };
+      };
+
+      environmentVariables = mkOption {
+        type = types.attrsOf types.str;
+        description = mdDoc ''
+          Additional environment variables to provide to authelia.
+          If you are providing secrets please consider the options under {option}`services.authelia.<instance>.secrets`
+          or make sure you use the `_FILE` suffix.
+          If you provide the raw secret rather than the location of a secret file that secret will be preserved in the nix store.
+          For more details: https://www.authelia.com/configuration/methods/secrets/
+        '';
+        default = { };
+      };
+
+      settings = mkOption {
+        description = mdDoc ''
+          Your Authelia config.yml as a Nix attribute set.
+          There are several values that are defined and documented in nix such as `default_2fa_method`,
+          but additional items can also be included.
+
+          https://github.com/authelia/authelia/blob/master/config.template.yml
+        '';
+        default = { };
+        example = ''
+          {
+            theme = "light";
+            default_2fa_method = "totp";
+            log.level = "debug";
+            server.disable_healthcheck = true;
+          }
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            theme = mkOption {
+              type = types.enum [ "light" "dark" "grey" "auto" ];
+              default = "light";
+              example = "dark";
+              description = mdDoc "The theme to display.";
+            };
+
+            default_2fa_method = mkOption {
+              type = types.enum [ "" "totp" "webauthn" "mobile_push" ];
+              default = "";
+              example = "webauthn";
+              description = mdDoc ''
+                Default 2FA method for new users and fallback for preferred but disabled methods.
+              '';
+            };
+
+            server = {
+              host = mkOption {
+                type = types.str;
+                default = "localhost";
+                example = "0.0.0.0";
+                description = mdDoc "The address to listen on.";
+              };
+
+              port = mkOption {
+                type = types.port;
+                default = 9091;
+                description = mdDoc "The port to listen on.";
+              };
+            };
+
+            log = {
+              level = mkOption {
+                type = types.enum [ "info" "debug" "trace" ];
+                default = "debug";
+                example = "info";
+                description = mdDoc "Level of verbosity for logs: info, debug, trace.";
+              };
+
+              format = mkOption {
+                type = types.enum [ "json" "text" ];
+                default = "json";
+                example = "text";
+                description = mdDoc "Format the logs are written as.";
+              };
+
+              file_path = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                example = "/var/log/authelia/authelia.log";
+                description = mdDoc "File path where the logs will be written. If not set logs are written to stdout.";
+              };
+
+              keep_stdout = mkOption {
+                type = types.bool;
+                default = false;
+                example = true;
+                description = mdDoc "Whether to also log to stdout when a `file_path` is defined.";
+              };
+            };
+
+            telemetry = {
+              metrics = {
+                enabled = mkOption {
+                  type = types.bool;
+                  default = false;
+                  example = true;
+                  description = mdDoc "Enable Metrics.";
+                };
+
+                address = mkOption {
+                  type = types.str;
+                  default = "tcp://127.0.0.1:9959";
+                  example = "tcp://0.0.0.0:8888";
+                  description = mdDoc "The address to listen on for metrics. This should be on a different port to the main `server.port` value.";
+                };
+              };
+            };
+          };
+        };
+      };
+
+      settingsFiles = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        example = [ "/etc/authelia/config.yml" "/etc/authelia/access-control.yml" "/etc/authelia/config/" ];
+        description = mdDoc ''
+          Here you can provide authelia with configuration files or directories.
+          It is possible to give authelia multiple files and use the nix generated configuration
+          file set via {option}`services.authelia.<instance>.settings`.
+        '';
+      };
+    };
+  };
+in
+{
+  options.services.authelia.instances = with lib; mkOption {
+    default = { };
+    type = types.attrsOf (types.submodule autheliaOpts);
+    description = mdDoc ''
+      Multi-domain protection currently requires multiple instances of Authelia.
+      If you don't require multiple instances of Authelia you can define just the one.
+
+      https://www.authelia.com/roadmap/active/multi-domain-protection/
+    '';
+    example = ''
+      {
+        main = {
+          enable = true;
+          secrets.storageEncryptionKeyFile = "/etc/authelia/storageEncryptionKeyFile";
+          secrets.jwtSecretFile = "/etc/authelia/jwtSecretFile";
+          settings = {
+            theme = "light";
+            default_2fa_method = "totp";
+            log.level = "debug";
+            server.disable_healthcheck = true;
+          };
+        };
+        preprod = {
+          enable = false;
+          secrets.storageEncryptionKeyFile = "/mnt/pre-prod/authelia/storageEncryptionKeyFile";
+          secrets.jwtSecretFile = "/mnt/pre-prod/jwtSecretFile";
+          settings = {
+            theme = "dark";
+            default_2fa_method = "webauthn";
+            server.host = "0.0.0.0";
+          };
+        };
+        test.enable = true;
+        test.secrets.manual = true;
+        test.settings.theme = "grey";
+        test.settings.server.disable_healthcheck = true;
+        test.settingsFiles = [ "/mnt/test/authelia" "/mnt/test-authelia.conf" ];
+        };
+      }
+    '';
+  };
+
+  config =
+    let
+      mkInstanceServiceConfig = instance:
+        let
+          execCommand = "${instance.package}/bin/authelia";
+          configFile = format.generate "config.yml" instance.settings;
+          configArg = "--config ${builtins.concatStringsSep "," (lib.concatLists [[configFile] instance.settingsFiles])}";
+        in
+        {
+          description = "Authelia authentication and authorization server";
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+          environment =
+            (lib.filterAttrs (_: v: v != null) {
+              AUTHELIA_JWT_SECRET_FILE = instance.secrets.jwtSecretFile;
+              AUTHELIA_STORAGE_ENCRYPTION_KEY_FILE = instance.secrets.storageEncryptionKeyFile;
+              AUTHELIA_SESSION_SECRET_FILE = instance.secrets.sessionSecretFile;
+              AUTHELIA_IDENTITY_PROVIDERS_OIDC_ISSUER_PRIVATE_KEY_FILE = instance.secrets.oidcIssuerPrivateKeyFile;
+              AUTHELIA_IDENTITY_PROVIDERS_OIDC_HMAC_SECRET_FILE = instance.secrets.oidcHmacSecretFile;
+            })
+            // instance.environmentVariables;
+
+          preStart = "${execCommand} ${configArg} validate-config";
+          serviceConfig = {
+            User = instance.user;
+            Group = instance.group;
+            ExecStart = "${execCommand} ${configArg}";
+            Restart = "always";
+            RestartSec = "5s";
+            StateDirectory = "authelia-${instance.name}";
+            StateDirectoryMode = "0700";
+
+            # Security options:
+            AmbientCapabilities = "";
+            CapabilityBoundingSet = "";
+            DeviceAllow = "";
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            NoNewPrivileges = true;
+
+            PrivateTmp = true;
+            PrivateDevices = true;
+            PrivateUsers = true;
+
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHome = "read-only";
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            ProtectProc = "noaccess";
+            ProtectSystem = "strict";
+
+            RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+
+            SystemCallArchitectures = "native";
+            SystemCallErrorNumber = "EPERM";
+            SystemCallFilter = [
+              "@system-service"
+              "~@cpu-emulation"
+              "~@debug"
+              "~@keyring"
+              "~@memlock"
+              "~@obsolete"
+              "~@privileged"
+              "~@setuid"
+            ];
+          };
+        };
+      mkInstanceUsersConfig = instance: {
+        groups."authelia-${instance.name}" =
+          lib.mkIf (instance.group == "authelia-${instance.name}") {
+            name = "authelia-${instance.name}";
+          };
+        users."authelia-${instance.name}" =
+          lib.mkIf (instance.user == "authelia-${instance.name}") {
+            name = "authelia-${instance.name}";
+            isSystemUser = true;
+            group = instance.group;
+          };
+      };
+      instances = lib.attrValues cfg.instances;
+    in
+    {
+      assertions = lib.flatten (lib.flip lib.mapAttrsToList cfg.instances (name: instance:
+        [
+          {
+            assertion = instance.secrets.manual || (instance.secrets.jwtSecretFile != null && instance.secrets.storageEncryptionKeyFile != null);
+            message = ''
+              Authelia requires a JWT Secret and a Storage Encryption Key to work.
+              Either set them like so:
+              services.authelia.${name}.secrets.jwtSecretFile = /my/path/to/jwtsecret;
+              services.authelia.${name}.secrets.storageEncryptionKeyFile = /my/path/to/encryptionkey;
+              Or set services.authelia.${name}.secrets.manual = true and provide them yourself via
+              environmentVariables or settingsFiles.
+              Do not include raw secrets in nix settings.
+            '';
+          }
+        ]
+      ));
+
+      systemd.services = lib.mkMerge
+        (map
+          (instance: lib.mkIf instance.enable {
+            "authelia-${instance.name}" = mkInstanceServiceConfig instance;
+          })
+          instances);
+      users = lib.mkMerge
+        (map
+          (instance: lib.mkIf instance.enable (mkInstanceUsersConfig instance))
+          instances);
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/security/certmgr.nix b/nixpkgs/nixos/modules/services/security/certmgr.nix
new file mode 100644
index 000000000000..ca4cf5084722
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/certmgr.nix
@@ -0,0 +1,201 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.certmgr;
+
+  specs = mapAttrsToList (n: v: rec {
+    name = n + ".json";
+    path = if isAttrs v then pkgs.writeText name (builtins.toJSON v) else v;
+  }) cfg.specs;
+
+  allSpecs = pkgs.linkFarm "certmgr.d" specs;
+
+  certmgrYaml = pkgs.writeText "certmgr.yaml" (builtins.toJSON {
+    dir = allSpecs;
+    default_remote = cfg.defaultRemote;
+    svcmgr = cfg.svcManager;
+    before = cfg.validMin;
+    interval = cfg.renewInterval;
+    inherit (cfg) metricsPort metricsAddress;
+  });
+
+  specPaths = map dirOf (concatMap (spec:
+    if isAttrs spec then
+      collect isString (filterAttrsRecursive (n: v: isAttrs v || n == "path") spec)
+    else
+      [ spec ]
+  ) (attrValues cfg.specs));
+
+  preStart = ''
+    ${concatStringsSep " \\\n" (["mkdir -p"] ++ map escapeShellArg specPaths)}
+    ${cfg.package}/bin/certmgr -f ${certmgrYaml} check
+  '';
+in
+{
+  options.services.certmgr = {
+    enable = mkEnableOption (lib.mdDoc "certmgr");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.certmgr;
+      defaultText = literalExpression "pkgs.certmgr";
+      description = lib.mdDoc "Which certmgr package to use in the service.";
+    };
+
+    defaultRemote = mkOption {
+      type = types.str;
+      default = "127.0.0.1:8888";
+      description = lib.mdDoc "The default CA host:port to use.";
+    };
+
+    validMin = mkOption {
+      default = "72h";
+      type = types.str;
+      description = lib.mdDoc "The interval before a certificate expires to start attempting to renew it.";
+    };
+
+    renewInterval = mkOption {
+      default = "30m";
+      type = types.str;
+      description = lib.mdDoc "How often to check certificate expirations and how often to update the cert_next_expires metric.";
+    };
+
+    metricsAddress = mkOption {
+      default = "127.0.0.1";
+      type = types.str;
+      description = lib.mdDoc "The address for the Prometheus HTTP endpoint.";
+    };
+
+    metricsPort = mkOption {
+      default = 9488;
+      type = types.ints.u16;
+      description = lib.mdDoc "The port for the Prometheus HTTP endpoint.";
+    };
+
+    specs = mkOption {
+      default = {};
+      example = literalExpression ''
+      {
+        exampleCert =
+        let
+          domain = "example.com";
+          secret = name: "/var/lib/secrets/''${name}.pem";
+        in {
+          service = "nginx";
+          action = "reload";
+          authority = {
+            file.path = secret "ca";
+          };
+          certificate = {
+            path = secret domain;
+          };
+          private_key = {
+            owner = "root";
+            group = "root";
+            mode = "0600";
+            path = secret "''${domain}-key";
+          };
+          request = {
+            CN = domain;
+            hosts = [ "mail.''${domain}" "www.''${domain}" ];
+            key = {
+              algo = "rsa";
+              size = 2048;
+            };
+            names = {
+              O = "Example Organization";
+              C = "USA";
+            };
+          };
+        };
+        otherCert = "/var/certmgr/specs/other-cert.json";
+      }
+      '';
+      type = with types; attrsOf (either path (submodule {
+        options = {
+          service = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "The service on which to perform \<action\> after fetching.";
+          };
+
+          action = mkOption {
+            type = addCheck str (x: cfg.svcManager == "command" || elem x ["restart" "reload" "nop"]);
+            default = "nop";
+            description = lib.mdDoc "The action to take after fetching.";
+          };
+
+          # These ought all to be specified according to certmgr spec def.
+          authority = mkOption {
+            type = attrs;
+            description = lib.mdDoc "certmgr spec authority object.";
+          };
+
+          certificate = mkOption {
+            type = nullOr attrs;
+            description = lib.mdDoc "certmgr spec certificate object.";
+          };
+
+          private_key = mkOption {
+            type = nullOr attrs;
+            description = lib.mdDoc "certmgr spec private_key object.";
+          };
+
+          request = mkOption {
+            type = nullOr attrs;
+            description = lib.mdDoc "certmgr spec request object.";
+          };
+        };
+    }));
+      description = lib.mdDoc ''
+        Certificate specs as described by:
+        <https://github.com/cloudflare/certmgr#certificate-specs>
+        These will be added to the Nix store, so they will be world readable.
+      '';
+    };
+
+    svcManager = mkOption {
+      default = "systemd";
+      type = types.enum [ "circus" "command" "dummy" "openrc" "systemd" "sysv" ];
+      description = lib.mdDoc ''
+        This specifies the service manager to use for restarting or reloading services.
+        See: <https://github.com/cloudflare/certmgr#certmgryaml>.
+        For how to use the "command" service manager in particular,
+        see: <https://github.com/cloudflare/certmgr#command-svcmgr-and-how-to-use-it>.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.specs != {};
+        message = "Certmgr specs cannot be empty.";
+      }
+      {
+        assertion = !any (hasAttrByPath [ "authority" "auth_key" ]) (attrValues cfg.specs);
+        message = ''
+          Inline services.certmgr.specs are added to the Nix store rendering them world readable.
+          Specify paths as specs, if you want to use include auth_key - or use the auth_key_file option."
+        '';
+      }
+    ];
+
+    systemd.services.certmgr = {
+      description = "certmgr";
+      path = mkIf (cfg.svcManager == "command") [ pkgs.bash ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      inherit preStart;
+
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "10s";
+        ExecStart = "${cfg.package}/bin/certmgr -f ${certmgrYaml}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/cfssl.nix b/nixpkgs/nixos/modules/services/security/cfssl.nix
new file mode 100644
index 000000000000..202db98e222c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/cfssl.nix
@@ -0,0 +1,222 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cfssl;
+in {
+  options.services.cfssl = {
+    enable = mkEnableOption (lib.mdDoc "the CFSSL CA api-server");
+
+    dataDir = mkOption {
+      default = "/var/lib/cfssl";
+      type = types.path;
+      description = lib.mdDoc ''
+        The work directory for CFSSL.
+
+        ::: {.note}
+        If left as the default value this directory will automatically be
+        created before the CFSSL server starts, otherwise you are
+        responsible for ensuring the directory exists with appropriate
+        ownership and permissions.
+        :::
+      '';
+    };
+
+    address = mkOption {
+      default = "127.0.0.1";
+      type = types.str;
+      description = lib.mdDoc "Address to bind.";
+    };
+
+    port = mkOption {
+      default = 8888;
+      type = types.port;
+      description = lib.mdDoc "Port to bind.";
+    };
+
+    ca = mkOption {
+      defaultText = literalExpression ''"''${cfg.dataDir}/ca.pem"'';
+      type = types.str;
+      description = lib.mdDoc "CA used to sign the new certificate -- accepts '[file:]fname' or 'env:varname'.";
+    };
+
+    caKey = mkOption {
+      defaultText = literalExpression ''"file:''${cfg.dataDir}/ca-key.pem"'';
+      type = types.str;
+      description = lib.mdDoc "CA private key -- accepts '[file:]fname' or 'env:varname'.";
+    };
+
+    caBundle = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Path to root certificate store.";
+    };
+
+    intBundle = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Path to intermediate certificate store.";
+    };
+
+    intDir = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Intermediates directory.";
+    };
+
+    metadata = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        Metadata file for root certificate presence.
+        The content of the file is a json dictionary (k,v): each key k is
+        a SHA-1 digest of a root certificate while value v is a list of key
+        store filenames.
+      '';
+    };
+
+    remote = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Remote CFSSL server.";
+    };
+
+    configFile = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Path to configuration file. Do not put this in nix-store as it might contain secrets.";
+    };
+
+    responder = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Certificate for OCSP responder.";
+    };
+
+    responderKey = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Private key for OCSP responder certificate. Do not put this in nix-store.";
+    };
+
+    tlsKey = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Other endpoint's CA private key. Do not put this in nix-store.";
+    };
+
+    tlsCert = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Other endpoint's CA to set up TLS protocol.";
+    };
+
+    mutualTlsCa = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Mutual TLS - require clients be signed by this CA.";
+    };
+
+    mutualTlsCn = mkOption {
+      default = null;
+      type = types.nullOr types.str;
+      description = lib.mdDoc "Mutual TLS - regex for whitelist of allowed client CNs.";
+    };
+
+    tlsRemoteCa = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "CAs to trust for remote TLS requests.";
+    };
+
+    mutualTlsClientCert = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Mutual TLS - client certificate to call remote instance requiring client certs.";
+    };
+
+    mutualTlsClientKey = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Mutual TLS - client key to call remote instance requiring client certs. Do not put this in nix-store.";
+    };
+
+    dbConfig = mkOption {
+      default = null;
+      type = types.nullOr types.path;
+      description = lib.mdDoc "Certificate db configuration file. Path must be writeable.";
+    };
+
+    logLevel = mkOption {
+      default = 1;
+      type = types.enum [ 0 1 2 3 4 5 ];
+      description = lib.mdDoc "Log level (0 = DEBUG, 5 = FATAL).";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.groups.cfssl = {
+      gid = config.ids.gids.cfssl;
+    };
+
+    users.users.cfssl = {
+      description = "cfssl user";
+      home = cfg.dataDir;
+      group = "cfssl";
+      uid = config.ids.uids.cfssl;
+    };
+
+    systemd.services.cfssl = {
+      description = "CFSSL CA API server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = lib.mkMerge [
+        {
+          WorkingDirectory = cfg.dataDir;
+          Restart = "always";
+          User = "cfssl";
+          Group = "cfssl";
+
+          ExecStart = with cfg; let
+            opt = n: v: optionalString (v != null) ''-${n}="${v}"'';
+          in
+            lib.concatStringsSep " \\\n" [
+              "${pkgs.cfssl}/bin/cfssl serve"
+              (opt "address" address)
+              (opt "port" (toString port))
+              (opt "ca" ca)
+              (opt "ca-key" caKey)
+              (opt "ca-bundle" caBundle)
+              (opt "int-bundle" intBundle)
+              (opt "int-dir" intDir)
+              (opt "metadata" metadata)
+              (opt "remote" remote)
+              (opt "config" configFile)
+              (opt "responder" responder)
+              (opt "responder-key" responderKey)
+              (opt "tls-key" tlsKey)
+              (opt "tls-cert" tlsCert)
+              (opt "mutual-tls-ca" mutualTlsCa)
+              (opt "mutual-tls-cn" mutualTlsCn)
+              (opt "mutual-tls-client-key" mutualTlsClientKey)
+              (opt "mutual-tls-client-cert" mutualTlsClientCert)
+              (opt "tls-remote-ca" tlsRemoteCa)
+              (opt "db-config" dbConfig)
+              (opt "loglevel" (toString logLevel))
+            ];
+        }
+        (mkIf (cfg.dataDir == options.services.cfssl.dataDir.default) {
+          StateDirectory = baseNameOf cfg.dataDir;
+          StateDirectoryMode = 700;
+        })
+      ];
+    };
+
+    services.cfssl = {
+      ca = mkDefault "${cfg.dataDir}/ca.pem";
+      caKey = mkDefault "${cfg.dataDir}/ca-key.pem";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/clamav.nix b/nixpkgs/nixos/modules/services/security/clamav.nix
new file mode 100644
index 000000000000..34897a9ac7db
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/clamav.nix
@@ -0,0 +1,151 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  clamavUser = "clamav";
+  stateDir = "/var/lib/clamav";
+  runDir = "/run/clamav";
+  clamavGroup = clamavUser;
+  cfg = config.services.clamav;
+  pkg = pkgs.clamav;
+
+  toKeyValue = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault { } " ";
+    listsAsDuplicateKeys = true;
+  };
+
+  clamdConfigFile = pkgs.writeText "clamd.conf" (toKeyValue cfg.daemon.settings);
+  freshclamConfigFile = pkgs.writeText "freshclam.conf" (toKeyValue cfg.updater.settings);
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "clamav" "updater" "config" ] "Use services.clamav.updater.settings instead.")
+    (mkRemovedOptionModule [ "services" "clamav" "updater" "extraConfig" ] "Use services.clamav.updater.settings instead.")
+    (mkRemovedOptionModule [ "services" "clamav" "daemon" "extraConfig" ] "Use services.clamav.daemon.settings instead.")
+  ];
+
+  options = {
+    services.clamav = {
+      daemon = {
+        enable = mkEnableOption (lib.mdDoc "ClamAV clamd daemon");
+
+        settings = mkOption {
+          type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
+          default = { };
+          description = lib.mdDoc ''
+            ClamAV configuration. Refer to <https://linux.die.net/man/5/clamd.conf>,
+            for details on supported values.
+          '';
+        };
+      };
+      updater = {
+        enable = mkEnableOption (lib.mdDoc "ClamAV freshclam updater");
+
+        frequency = mkOption {
+          type = types.int;
+          default = 12;
+          description = lib.mdDoc ''
+            Number of database checks per day.
+          '';
+        };
+
+        interval = mkOption {
+          type = types.str;
+          default = "hourly";
+          description = lib.mdDoc ''
+            How often freshclam is invoked. See systemd.time(7) for more
+            information about the format.
+          '';
+        };
+
+        settings = mkOption {
+          type = with types; attrsOf (oneOf [ bool int str (listOf str) ]);
+          default = { };
+          description = lib.mdDoc ''
+            freshclam configuration. Refer to <https://linux.die.net/man/5/freshclam.conf>,
+            for details on supported values.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf (cfg.updater.enable || cfg.daemon.enable) {
+    environment.systemPackages = [ pkg ];
+
+    users.users.${clamavUser} = {
+      uid = config.ids.uids.clamav;
+      group = clamavGroup;
+      description = "ClamAV daemon user";
+      home = stateDir;
+    };
+
+    users.groups.${clamavGroup} =
+      { gid = config.ids.gids.clamav; };
+
+    services.clamav.daemon.settings = {
+      DatabaseDirectory = stateDir;
+      LocalSocket = "${runDir}/clamd.ctl";
+      PidFile = "${runDir}/clamd.pid";
+      TemporaryDirectory = "/tmp";
+      User = "clamav";
+      Foreground = true;
+    };
+
+    services.clamav.updater.settings = {
+      DatabaseDirectory = stateDir;
+      Foreground = true;
+      Checks = cfg.updater.frequency;
+      DatabaseMirror = [ "database.clamav.net" ];
+    };
+
+    environment.etc."clamav/freshclam.conf".source = freshclamConfigFile;
+    environment.etc."clamav/clamd.conf".source = clamdConfigFile;
+
+    systemd.services.clamav-daemon = mkIf cfg.daemon.enable {
+      description = "ClamAV daemon (clamd)";
+      after = optional cfg.updater.enable "clamav-freshclam.service";
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ clamdConfigFile ];
+
+      preStart = ''
+        mkdir -m 0755 -p ${runDir}
+        chown ${clamavUser}:${clamavGroup} ${runDir}
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkg}/bin/clamd";
+        ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
+        PrivateTmp = "yes";
+        PrivateDevices = "yes";
+        PrivateNetwork = "yes";
+      };
+    };
+
+    systemd.timers.clamav-freshclam = mkIf cfg.updater.enable {
+      description = "Timer for ClamAV virus database updater (freshclam)";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = cfg.updater.interval;
+        Unit = "clamav-freshclam.service";
+      };
+    };
+
+    systemd.services.clamav-freshclam = mkIf cfg.updater.enable {
+      description = "ClamAV virus database updater (freshclam)";
+      restartTriggers = [ freshclamConfigFile ];
+      after = [ "network-online.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+        chown ${clamavUser}:${clamavGroup} ${stateDir}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkg}/bin/freshclam";
+        SuccessExitStatus = "1"; # if databases are up to date
+        PrivateTmp = "yes";
+        PrivateDevices = "yes";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/endlessh-go.nix b/nixpkgs/nixos/modules/services/security/endlessh-go.nix
new file mode 100644
index 000000000000..6557ec953cd8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/endlessh-go.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.endlessh-go;
+in
+{
+  options.services.endlessh-go = {
+    enable = mkEnableOption (mdDoc "endlessh-go service");
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      example = "[::]";
+      description = mdDoc ''
+        Interface address to bind the endlessh-go daemon to SSH connections.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 2222;
+      example = 22;
+      description = mdDoc ''
+        Specifies on which port the endlessh-go daemon listens for SSH
+        connections.
+
+        Setting this to `22` may conflict with {option}`services.openssh`.
+      '';
+    };
+
+    prometheus = {
+      enable = mkEnableOption (mdDoc "Prometheus integration");
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        example = "[::]";
+        description = mdDoc ''
+          Interface address to bind the endlessh-go daemon to answer Prometheus
+          queries.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 2112;
+        example = 9119;
+        description = mdDoc ''
+          Specifies on which port the endlessh-go daemon listens for Prometheus
+          queries.
+        '';
+      };
+    };
+
+    extraOptions = mkOption {
+      type = with types; listOf str;
+      default = [ ];
+      example = [ "-conn_type=tcp4" "-max_clients=8192" ];
+      description = mdDoc ''
+        Additional command line options to pass to the endlessh-go daemon.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open a firewall port for the SSH listener.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.endlessh-go = {
+      description = "SSH tarpit";
+      requires = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+        let
+          needsPrivileges = cfg.port < 1024 || cfg.prometheus.port < 1024;
+          capabilities = [ "" ] ++ optionals needsPrivileges [ "CAP_NET_BIND_SERVICE" ];
+          rootDirectory = "/run/endlessh-go";
+        in
+        {
+          Restart = "always";
+          ExecStart = with cfg; concatStringsSep " " ([
+            "${pkgs.endlessh-go}/bin/endlessh-go"
+            "-logtostderr"
+            "-host=${listenAddress}"
+            "-port=${toString port}"
+          ] ++ optionals prometheus.enable [
+            "-enable_prometheus"
+            "-prometheus_host=${prometheus.listenAddress}"
+            "-prometheus_port=${toString prometheus.port}"
+          ] ++ extraOptions);
+          DynamicUser = true;
+          RootDirectory = rootDirectory;
+          BindReadOnlyPaths = [ builtins.storeDir ];
+          InaccessiblePaths = [ "-+${rootDirectory}" ];
+          RuntimeDirectory = baseNameOf rootDirectory;
+          RuntimeDirectoryMode = "700";
+          AmbientCapabilities = capabilities;
+          CapabilityBoundingSet = capabilities;
+          UMask = "0077";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = !needsPrivileges;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectSystem = "strict";
+          ProtectProc = "noaccess";
+          ProcSubset = "pid";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+        };
+    };
+
+    networking.firewall.allowedTCPPorts = with cfg;
+      optionals openFirewall [ port prometheus.port ];
+  };
+
+  meta.maintainers = with maintainers; [ azahi ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/endlessh.nix b/nixpkgs/nixos/modules/services/security/endlessh.nix
new file mode 100644
index 000000000000..e99b4dadcd58
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/endlessh.nix
@@ -0,0 +1,99 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.endlessh;
+in
+{
+  options.services.endlessh = {
+    enable = mkEnableOption (mdDoc "endlessh service");
+
+    port = mkOption {
+      type = types.port;
+      default = 2222;
+      example = 22;
+      description = mdDoc ''
+        Specifies on which port the endlessh daemon listens for SSH
+        connections.
+
+        Setting this to `22` may conflict with {option}`services.openssh`.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = with types; listOf str;
+      default = [ ];
+      example = [ "-6" "-d 9000" "-v" ];
+      description = mdDoc ''
+        Additional command line options to pass to the endlessh daemon.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open a firewall port for the SSH listener.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.endlessh = {
+      description = "SSH tarpit";
+      requires = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig =
+        let
+          needsPrivileges = cfg.port < 1024;
+          capabilities = [ "" ] ++ optionals needsPrivileges [ "CAP_NET_BIND_SERVICE" ];
+          rootDirectory = "/run/endlessh";
+        in
+        {
+          Restart = "always";
+          ExecStart = with cfg; concatStringsSep " " ([
+            "${pkgs.endlessh}/bin/endlessh"
+            "-p ${toString port}"
+          ] ++ extraOptions);
+          DynamicUser = true;
+          RootDirectory = rootDirectory;
+          BindReadOnlyPaths = [ builtins.storeDir ];
+          InaccessiblePaths = [ "-+${rootDirectory}" ];
+          RuntimeDirectory = baseNameOf rootDirectory;
+          RuntimeDirectoryMode = "700";
+          AmbientCapabilities = capabilities;
+          CapabilityBoundingSet = capabilities;
+          UMask = "0077";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = !needsPrivileges;
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectSystem = "strict";
+          ProtectProc = "noaccess";
+          ProcSubset = "pid";
+          RemoveIPC = true;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+        };
+    };
+
+    networking.firewall.allowedTCPPorts = with cfg;
+      optionals openFirewall [ port ];
+  };
+
+  meta.maintainers = with maintainers; [ azahi ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/esdm.nix b/nixpkgs/nixos/modules/services/security/esdm.nix
new file mode 100644
index 000000000000..2b246fff7e96
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/esdm.nix
@@ -0,0 +1,102 @@
+{ lib, config, pkgs, ... }:
+
+let
+  cfg = config.services.esdm;
+in
+{
+  options.services.esdm = {
+    enable = lib.mkEnableOption (lib.mdDoc "ESDM service configuration");
+    package = lib.mkPackageOptionMD pkgs "esdm" { };
+    serverEnable = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable option for ESDM server service. If serverEnable == false, then the esdm-server
+        will not start. Also the subsequent services esdm-cuse-random, esdm-cuse-urandom
+        and esdm-proc will not start as these have the entry Want=esdm-server.service.
+      '';
+    };
+    cuseRandomEnable = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable option for ESDM cuse-random service. Determines if the esdm-cuse-random.service
+        is started.
+      '';
+    };
+    cuseUrandomEnable = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable option for ESDM cuse-urandom service. Determines if the esdm-cuse-urandom.service
+        is started.
+      '';
+    };
+    procEnable = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable option for ESDM proc service. Determines if the esdm-proc.service
+        is started.
+      '';
+    };
+    verbose = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable verbose ExecStart for ESDM. If verbose == true, then the corresponding "ExecStart"
+        values of the 4 aforementioned services are overwritten with the option
+        for the highest verbosity.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable (
+    lib.mkMerge [
+      ({
+        systemd.packages = [ cfg.package ];
+      })
+      # It is necessary to set those options for these services to be started by systemd in NixOS
+      (lib.mkIf cfg.serverEnable {
+        systemd.services."esdm-server".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-server".serviceConfig = lib.mkIf cfg.verbose {
+          ExecStart = [
+            " " # unset previous value defined in 'esdm-server.service'
+            "${cfg.package}/bin/esdm-server -f -vvvvvv"
+          ];
+        };
+      })
+
+      (lib.mkIf cfg.cuseRandomEnable {
+        systemd.services."esdm-cuse-random".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-cuse-random".serviceConfig = lib.mkIf cfg.verbose {
+          ExecStart = [
+            " " # unset previous value defined in 'esdm-cuse-random.service'
+            "${cfg.package}/bin/esdm-cuse-random -f -v 6"
+          ];
+        };
+      })
+
+      (lib.mkIf cfg.cuseUrandomEnable {
+        systemd.services."esdm-cuse-urandom".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-cuse-urandom".serviceConfig = lib.mkIf cfg.verbose {
+          ExecStart = [
+            " " # unset previous value defined in 'esdm-cuse-urandom.service'
+            "${config.services.esdm.package}/bin/esdm-cuse-urandom -f -v 6"
+          ];
+        };
+      })
+
+      (lib.mkIf cfg.procEnable {
+        systemd.services."esdm-proc".wantedBy = [ "basic.target" ];
+        systemd.services."esdm-proc".serviceConfig = lib.mkIf cfg.verbose {
+          ExecStart = [
+            " " # unset previous value defined in 'esdm-proc.service'
+            "${cfg.package}/bin/esdm-proc --relabel -f -o allow_other /proc/sys/kernel/random -v 6"
+          ];
+        };
+      })
+    ]);
+
+  meta.maintainers = with lib.maintainers; [ orichter thillux ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/fail2ban.nix b/nixpkgs/nixos/modules/services/security/fail2ban.nix
new file mode 100644
index 000000000000..235f29ab8a6a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/fail2ban.nix
@@ -0,0 +1,414 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fail2ban;
+
+  settingsFormat = pkgs.formats.keyValue { };
+
+  configFormat = pkgs.formats.ini {
+    mkKeyValue = generators.mkKeyValueDefault { } " = ";
+  };
+
+  mkJailConfig = name: attrs:
+    optionalAttrs (name != "DEFAULT") { inherit (attrs) enabled; } //
+    optionalAttrs (attrs.filter != null) { filter = if (builtins.isString filter) then filter else name; } //
+    attrs.settings;
+
+  mkFilter = name: attrs: nameValuePair "fail2ban/filter.d/${name}.conf" {
+    source = configFormat.generate "filter.d/${name}.conf" attrs.filter;
+  };
+
+  fail2banConf = configFormat.generate "fail2ban.local" cfg.daemonSettings;
+
+  strJails = filterAttrs (_: builtins.isString) cfg.jails;
+  attrsJails = filterAttrs (_: builtins.isAttrs) cfg.jails;
+
+  jailConf =
+    let
+      configFile = configFormat.generate "jail.local" (
+        { INCLUDES.before = "paths-nixos.conf"; } // (mapAttrs mkJailConfig attrsJails)
+      );
+      extraConfig = concatStringsSep "\n" (attrValues (mapAttrs
+        (name: def:
+          optionalString (def != "")
+            ''
+              [${name}]
+              ${def}
+            '')
+        strJails));
+
+    in
+    pkgs.concatText "jail.local" [ configFile (pkgs.writeText "extra-jail.local" extraConfig) ];
+
+  pathsConf = pkgs.writeText "paths-nixos.conf" ''
+    # NixOS
+
+    [INCLUDES]
+
+    before = paths-common.conf
+
+    after  = paths-overrides.local
+
+    [DEFAULT]
+  '';
+in
+
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "fail2ban" "daemonConfig" ] "The daemon is now configured through the attribute set `services.fail2ban.daemonSettings`.")
+    (mkRemovedOptionModule [ "services" "fail2ban" "extraSettings" ] "The extra default configuration can now be set using `services.fail2ban.jails.DEFAULT.settings`.")
+  ];
+
+  ###### interface
+
+  options = {
+    services.fail2ban = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the fail2ban service.
+
+          See the documentation of {option}`services.fail2ban.jails`
+          for what jails are enabled by default.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.fail2ban;
+        defaultText = literalExpression "pkgs.fail2ban";
+        type = types.package;
+        example = literalExpression "pkgs.fail2ban_0_11";
+        description = lib.mdDoc "The fail2ban package to use for running the fail2ban service.";
+      };
+
+      packageFirewall = mkOption {
+        default = config.networking.firewall.package;
+        defaultText = literalExpression "config.networking.firewall.package";
+        type = types.package;
+        description = lib.mdDoc "The firewall package used by fail2ban service. Defaults to the package for your firewall (iptables or nftables).";
+      };
+
+      extraPackages = mkOption {
+        default = [ ];
+        type = types.listOf types.package;
+        example = lib.literalExpression "[ pkgs.ipset ]";
+        description = lib.mdDoc ''
+          Extra packages to be made available to the fail2ban service. The example contains
+          the packages needed by the `iptables-ipset-proto6` action.
+        '';
+      };
+
+      bantime = mkOption {
+        default = "10m";
+        type = types.str;
+        example = "1h";
+        description = lib.mdDoc "Number of seconds that a host is banned.";
+      };
+
+      maxretry = mkOption {
+        default = 3;
+        type = types.ints.unsigned;
+        description = lib.mdDoc "Number of failures before a host gets banned.";
+      };
+
+      banaction = mkOption {
+        default = if config.networking.nftables.enable then "nftables-multiport" else "iptables-multiport";
+        defaultText = literalExpression ''if config.networking.nftables.enable then "nftables-multiport" else "iptables-multiport"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          Default banning action (e.g. iptables, iptables-new, iptables-multiport,
+          iptables-ipset-proto6-allports, shorewall, etc). It is used to
+          define action_* variables. Can be overridden globally or per
+          section within jail.local file
+        '';
+      };
+
+      banaction-allports = mkOption {
+        default = if config.networking.nftables.enable then "nftables-allport" else "iptables-allport";
+        defaultText = literalExpression ''if config.networking.nftables.enable then "nftables-allport" else "iptables-allport"'';
+        type = types.str;
+        description = lib.mdDoc ''
+          Default banning action (e.g. iptables, iptables-new, iptables-multiport,
+          shorewall, etc) for "allports" jails. It is used to define action_* variables. Can be overridden
+          globally or per section within jail.local file
+        '';
+      };
+
+      bantime-increment.enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          "bantime.increment" allows to use database for searching of previously banned ip's to increase
+          a default ban time using special formula, default it is banTime * 1, 2, 4, 8, 16, 32 ...
+        '';
+      };
+
+      bantime-increment.rndtime = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "8m";
+        description = lib.mdDoc ''
+          "bantime.rndtime" is the max number of seconds using for mixing with random time
+          to prevent "clever" botnets calculate exact time IP can be unbanned again
+        '';
+      };
+
+      bantime-increment.maxtime = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "48h";
+        description = lib.mdDoc ''
+          "bantime.maxtime" is the max number of seconds using the ban time can reach (don't grows further)
+        '';
+      };
+
+      bantime-increment.factor = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "4";
+        description = lib.mdDoc ''
+          "bantime.factor" is a coefficient to calculate exponent growing of the formula or common multiplier,
+          default value of factor is 1 and with default value of formula, the ban time grows by 1, 2, 4, 8, 16 ...
+        '';
+      };
+
+      bantime-increment.formula = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "ban.Time * math.exp(float(ban.Count+1)*banFactor)/math.exp(1*banFactor)";
+        description = lib.mdDoc ''
+          "bantime.formula" used by default to calculate next value of ban time, default value bellow,
+          the same ban time growing will be reached by multipliers 1, 2, 4, 8, 16, 32 ...
+        '';
+      };
+
+      bantime-increment.multipliers = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "1 2 4 8 16 32 64";
+        description = lib.mdDoc ''
+          "bantime.multipliers" used to calculate next value of ban time instead of formula, corresponding
+          previously ban count and given "bantime.factor" (for multipliers default is 1);
+          following example grows ban time by 1, 2, 4, 8, 16 ... and if last ban count greater as multipliers count,
+          always used last multiplier (64 in example), for factor '1' and original ban time 600 - 10.6 hours
+        '';
+      };
+
+      bantime-increment.overalljails = mkOption {
+        default = null;
+        type = types.nullOr types.bool;
+        example = true;
+        description = lib.mdDoc ''
+          "bantime.overalljails" (if true) specifies the search of IP in the database will be executed
+          cross over all jails, if false (default), only current jail of the ban IP will be searched.
+        '';
+      };
+
+      ignoreIP = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        example = [ "192.168.0.0/16" "2001:DB8::42" ];
+        description = lib.mdDoc ''
+          "ignoreIP" can be a list of IP addresses, CIDR masks or DNS hosts. Fail2ban will not ban a host which
+          matches an address in this list. Several addresses can be defined using space (and/or comma) separator.
+        '';
+      };
+
+      daemonSettings = mkOption {
+        inherit (configFormat) type;
+
+        defaultText = literalExpression ''
+          {
+            Definition = {
+              logtarget = "SYSLOG";
+              socket = "/run/fail2ban/fail2ban.sock";
+              pidfile = "/run/fail2ban/fail2ban.pid";
+              dbfile = "/var/lib/fail2ban/fail2ban.sqlite3";
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          The contents of Fail2ban's main configuration file.
+          It's generally not necessary to change it.
+        '';
+      };
+
+      jails = mkOption {
+        default = { };
+        example = literalExpression ''
+          {
+            apache-nohome-iptables = {
+              settings = {
+                # Block an IP address if it accesses a non-existent
+                # home directory more than 5 times in 10 minutes,
+                # since that indicates that it's scanning.
+                filter = "apache-nohome";
+                action = '''iptables-multiport[name=HTTP, port="http,https"]''';
+                logpath = "/var/log/httpd/error_log*";
+                backend = "auto";
+                findtime = 600;
+                bantime = 600;
+                maxretry = 5;
+              };
+            };
+            dovecot = {
+              settings = {
+                # block IPs which failed to log-in
+                # aggressive mode add blocking for aborted connections
+                filter = "dovecot[mode=aggressive]";
+                maxretry = 3;
+              };
+            };
+          };
+        '';
+        type = with types; attrsOf (either lines (submodule ({ name, ... }: {
+          options = {
+            enabled = mkEnableOption "this jail." // {
+              default = true;
+              readOnly = name == "DEFAULT";
+            };
+
+            filter = mkOption {
+              type = nullOr (either str configFormat.type);
+
+              default = null;
+              description = lib.mdDoc "Content of the filter used for this jail.";
+            };
+
+            settings = mkOption {
+              inherit (settingsFormat) type;
+
+              default = { };
+              description = lib.mdDoc "Additional settings for this jail.";
+            };
+          };
+        })));
+        description = lib.mdDoc ''
+          The configuration of each Fail2ban “jailâ€.  A jail
+          consists of an action (such as blocking a port using
+          {command}`iptables`) that is triggered when a
+          filter applied to a log file triggers more than a certain
+          number of times in a certain time period.  Actions are
+          defined in {file}`/etc/fail2ban/action.d`,
+          while filters are defined in
+          {file}`/etc/fail2ban/filter.d`.
+
+          NixOS comes with a default `sshd` jail;
+          for it to work well,
+          {option}`services.openssh.logLevel` should be set to
+          `"VERBOSE"` or higher so that fail2ban
+          can observe failed login attempts.
+          This module sets it to `"VERBOSE"` if
+          not set otherwise, so enabling fail2ban can make SSH logs
+          more verbose.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.bantime-increment.formula == null || cfg.bantime-increment.multipliers == null;
+        message = ''
+          Options `services.fail2ban.bantime-increment.formula` and `services.fail2ban.bantime-increment.multipliers` cannot be both specified.
+        '';
+      }
+    ];
+
+    warnings = mkIf (!config.networking.firewall.enable && !config.networking.nftables.enable) [
+      "fail2ban can not be used without a firewall"
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc = {
+      "fail2ban/fail2ban.local".source = fail2banConf;
+      "fail2ban/jail.local".source = jailConf;
+      "fail2ban/fail2ban.conf".source = "${cfg.package}/etc/fail2ban/fail2ban.conf";
+      "fail2ban/jail.conf".source = "${cfg.package}/etc/fail2ban/jail.conf";
+      "fail2ban/paths-common.conf".source = "${cfg.package}/etc/fail2ban/paths-common.conf";
+      "fail2ban/paths-nixos.conf".source = pathsConf;
+      "fail2ban/action.d".source = "${cfg.package}/etc/fail2ban/action.d/*.conf";
+      "fail2ban/filter.d".source = "${cfg.package}/etc/fail2ban/filter.d/*.conf";
+    } // (mapAttrs' mkFilter (filterAttrs (_: v: v.filter != null && !builtins.isString v.filter) attrsJails));
+
+    systemd.packages = [ cfg.package ];
+    systemd.services.fail2ban = {
+      wantedBy = [ "multi-user.target" ];
+      partOf = optional config.networking.firewall.enable "firewall.service";
+
+      restartTriggers = [ fail2banConf jailConf pathsConf ];
+
+      path = [ cfg.package cfg.packageFirewall pkgs.iproute2 ] ++ cfg.extraPackages;
+
+      serviceConfig = {
+        # Capabilities
+        CapabilityBoundingSet = [ "CAP_AUDIT_READ" "CAP_DAC_READ_SEARCH" "CAP_NET_ADMIN" "CAP_NET_RAW" ];
+        # Security
+        NoNewPrivileges = true;
+        # Directory
+        RuntimeDirectory = "fail2ban";
+        RuntimeDirectoryMode = "0750";
+        StateDirectory = "fail2ban";
+        StateDirectoryMode = "0750";
+        LogsDirectory = "fail2ban";
+        LogsDirectoryMode = "0750";
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+      };
+    };
+
+    # Defaults for the daemon settings
+    services.fail2ban.daemonSettings.Definition = {
+      logtarget = mkDefault "SYSLOG";
+      socket = mkDefault "/run/fail2ban/fail2ban.sock";
+      pidfile = mkDefault "/run/fail2ban/fail2ban.pid";
+      dbfile = mkDefault "/var/lib/fail2ban/fail2ban.sqlite3";
+    };
+
+    # Add some reasonable default jails.  The special "DEFAULT" jail
+    # sets default values for all other jails.
+    services.fail2ban.jails = mkMerge [
+      {
+        DEFAULT.settings = (optionalAttrs cfg.bantime-increment.enable
+          ({ "bantime.increment" = cfg.bantime-increment.enable; } // (mapAttrs'
+            (name: nameValuePair "bantime.${name}")
+            (filterAttrs (n: v: v != null && n != "enable") cfg.bantime-increment))
+          )
+        ) // {
+          # Miscellaneous options
+          inherit (cfg) banaction maxretry bantime;
+          ignoreip = ''127.0.0.1/8 ${optionalString config.networking.enableIPv6 "::1"} ${concatStringsSep " " cfg.ignoreIP}'';
+          backend = "systemd";
+          # Actions
+          banaction_allports = cfg.banaction-allports;
+        };
+      }
+
+      # Block SSH if there are too many failing connection attempts.
+      (mkIf config.services.openssh.enable {
+        sshd.settings.port = mkDefault (concatMapStringsSep "," builtins.toString config.services.openssh.ports);
+      })
+    ];
+
+    # Benefits from verbose sshd logging to observe failed login attempts,
+    # so we set that here unless the user overrode it.
+    services.openssh.settings.LogLevel = mkDefault "VERBOSE";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/fprintd.nix b/nixpkgs/nixos/modules/services/security/fprintd.nix
new file mode 100644
index 000000000000..28f9b5908b53
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/fprintd.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.fprintd;
+  fprintdPkg = if cfg.tod.enable then pkgs.fprintd-tod else pkgs.fprintd;
+
+in
+
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.fprintd = {
+
+      enable = mkEnableOption (lib.mdDoc "fprintd daemon and PAM module for fingerprint readers handling");
+
+      package = mkOption {
+        type = types.package;
+        default = fprintdPkg;
+        defaultText = literalExpression "if config.services.fprintd.tod.enable then pkgs.fprintd-tod else pkgs.fprintd";
+        description = lib.mdDoc ''
+          fprintd package to use.
+        '';
+      };
+
+      tod = {
+
+        enable = mkEnableOption (lib.mdDoc "Touch OEM Drivers library support");
+
+        driver = mkOption {
+          type = types.package;
+          example = literalExpression "pkgs.libfprint-2-tod1-goodix";
+          description = lib.mdDoc ''
+            Touch OEM Drivers (TOD) package to use.
+          '';
+        };
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.dbus.packages = [ cfg.package ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.fprintd.environment = mkIf cfg.tod.enable {
+      FP_TOD_DRIVERS_DIR = "${cfg.tod.driver}${cfg.tod.driver.driverPath}";
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/haka.nix b/nixpkgs/nixos/modules/services/security/haka.nix
new file mode 100644
index 000000000000..c93638f44d60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/haka.nix
@@ -0,0 +1,156 @@
+# This module defines global configuration for Haka.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.haka;
+
+  haka = cfg.package;
+
+  hakaConf = pkgs.writeText "haka.conf"
+  ''
+    [general]
+    configuration = ${if lib.strings.hasPrefix "/" cfg.configFile
+      then "${cfg.configFile}"
+      else "${haka}/share/haka/sample/${cfg.configFile}"}
+    ${optionalString (builtins.lessThan 0 cfg.threads) "thread = ${cfg.threads}"}
+
+    [packet]
+    ${optionalString cfg.pcap ''module = "packet/pcap"''}
+    ${optionalString cfg.nfqueue ''module = "packet/nqueue"''}
+    ${optionalString cfg.dump.enable ''dump = "yes"''}
+    ${optionalString cfg.dump.enable ''dump_input = "${cfg.dump.input}"''}
+    ${optionalString cfg.dump.enable ''dump_output = "${cfg.dump.output}"''}
+
+    interfaces = "${lib.strings.concatStringsSep "," cfg.interfaces}"
+
+    [log]
+    # Select the log module
+    module = "log/syslog"
+
+    # Set the default logging level
+    #level = "info,packet=debug"
+
+    [alert]
+    # Select the alert module
+    module = "alert/syslog"
+
+    # Disable alert on standard output
+    #alert_on_stdout = no
+
+    # alert/file module option
+    #file = "/dev/null"
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.haka = {
+
+      enable = mkEnableOption (lib.mdDoc "Haka");
+
+      package = mkOption {
+        default = pkgs.haka;
+        defaultText = literalExpression "pkgs.haka";
+        type = types.package;
+        description = lib.mdDoc ''
+          Which Haka derivation to use.
+        '';
+      };
+
+      configFile = mkOption {
+        default = "empty.lua";
+        example = "/srv/haka/myfilter.lua";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specify which configuration file Haka uses.
+          It can be absolute path or a path relative to the sample directory of
+          the haka git repo.
+        '';
+      };
+
+      interfaces = mkOption {
+        default = [ "eth0" ];
+        example = [ "any" ];
+        type = with types; listOf str;
+        description = lib.mdDoc ''
+          Specify which interface(s) Haka listens to.
+          Use 'any' to listen to all interfaces.
+        '';
+      };
+
+      threads = mkOption {
+        default = 0;
+        example = 4;
+        type = types.int;
+        description = lib.mdDoc ''
+          The number of threads that will be used.
+          All system threads are used by default.
+        '';
+      };
+
+      pcap = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable pcap";
+      };
+
+      nfqueue = mkEnableOption (lib.mdDoc "nfqueue");
+
+      dump.enable = mkEnableOption (lib.mdDoc "dump");
+      dump.input  = mkOption {
+        default = "/tmp/input.pcap";
+        example = "/path/to/file.pcap";
+        type = types.path;
+        description = lib.mdDoc "Path to file where incoming packets are dumped";
+      };
+
+      dump.output  = mkOption {
+        default = "/tmp/output.pcap";
+        example = "/path/to/file.pcap";
+        type = types.path;
+        description = lib.mdDoc "Path to file where outgoing packets are dumped";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.pcap != cfg.nfqueue;
+        message = "either pcap or nfqueue can be enabled, not both.";
+      }
+      { assertion = cfg.nfqueue -> !dump.enable;
+        message = "dump can only be used with nfqueue.";
+      }
+      { assertion = cfg.interfaces != [];
+        message = "at least one interface must be specified.";
+      }];
+
+
+    environment.systemPackages = [ haka ];
+
+    systemd.services.haka = {
+      description = "Haka";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        ExecStart = "${haka}/bin/haka -c ${hakaConf}";
+        ExecStop = "${haka}/bin/hakactl stop";
+        User = "root";
+        Type = "forking";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/haveged.nix b/nixpkgs/nixos/modules/services/security/haveged.nix
new file mode 100644
index 000000000000..db12a28a7d0b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/haveged.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.haveged;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.haveged = {
+
+      enable = mkEnableOption (lib.mdDoc ''
+        haveged entropy daemon, which refills /dev/random when low.
+        NOTE: does nothing on kernels newer than 5.6.
+      '');
+      # source for the note https://github.com/jirka-h/haveged/issues/57
+
+      refill_threshold = mkOption {
+        type = types.int;
+        default = 1024;
+        description = lib.mdDoc ''
+          The number of bits of available entropy beneath which
+          haveged should refill the entropy pool.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    # https://github.com/jirka-h/haveged/blob/a4b69d65a8dfc5a9f52ff8505c7f58dcf8b9234f/contrib/Fedora/haveged.service
+    systemd.services.haveged = {
+      description = "Entropy Daemon based on the HAVEGE algorithm";
+      unitConfig = {
+        Documentation = "man:haveged(8)";
+        DefaultDependencies = false;
+        ConditionKernelVersion = "<5.6";
+      };
+      wantedBy = [ "sysinit.target" ];
+      after = [ "systemd-tmpfiles-setup-dev.service" ];
+      before = [ "sysinit.target" "shutdown.target" "systemd-journald.service" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.haveged}/bin/haveged -w ${toString cfg.refill_threshold} --Foreground -v 1";
+        Restart = "always";
+        SuccessExitStatus = "137 143";
+        SecureBits = "noroot-locked";
+        CapabilityBoundingSet = [ "CAP_SYS_ADMIN" "CAP_SYS_CHROOT" ];
+        # We can *not* set PrivateTmp=true as it can cause an ordering cycle.
+        PrivateTmp = false;
+        PrivateDevices = true;
+        ProtectSystem = "full";
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "newuname" "~@mount" ];
+        SystemCallErrorNumber = "EPERM";
+      };
+
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/hockeypuck.nix b/nixpkgs/nixos/modules/services/security/hockeypuck.nix
new file mode 100644
index 000000000000..56c13d791920
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/hockeypuck.nix
@@ -0,0 +1,106 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hockeypuck;
+  settingsFormat = pkgs.formats.toml { };
+in {
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  options.services.hockeypuck = {
+    enable = lib.mkEnableOption (lib.mdDoc "Hockeypuck OpenPGP Key Server");
+
+    port = lib.mkOption {
+      default = 11371;
+      type = lib.types.port;
+      description = lib.mdDoc "HKP port to listen on.";
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = { };
+      example = lib.literalExpression ''
+        {
+          hockeypuck = {
+            loglevel = "INFO";
+            logfile = "/var/log/hockeypuck/hockeypuck.log";
+            indexTemplate = "''${pkgs.hockeypuck-web}/share/templates/index.html.tmpl";
+            vindexTemplate = "''${pkgs.hockeypuck-web}/share/templates/index.html.tmpl";
+            statsTemplate = "''${pkgs.hockeypuck-web}/share/templates/stats.html.tmpl";
+            webroot = "''${pkgs.hockeypuck-web}/share/webroot";
+
+            hkp.bind = ":''${toString cfg.port}";
+
+            openpgp.db = {
+              driver = "postgres-jsonb";
+              dsn = "database=hockeypuck host=/var/run/postgresql sslmode=disable";
+            };
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Configuration file for hockeypuck, here you can override
+        certain settings (`loglevel` and
+        `openpgp.db.dsn`) by just setting those values.
+
+        For other settings you need to use lib.mkForce to override them.
+
+        This service doesn't provision or enable postgres on your
+        system, it rather assumes that you enable postgres and create
+        the database yourself.
+
+        Example:
+        ```
+          services.postgresql = {
+            enable = true;
+            ensureDatabases = [ "hockeypuck" ];
+            ensureUsers = [{
+              name = "hockeypuck";
+              ensureDBOwnership = true;
+            }];
+          };
+        ```
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.hockeypuck.settings.hockeypuck = {
+      loglevel = lib.mkDefault "INFO";
+      logfile = "/var/log/hockeypuck/hockeypuck.log";
+      indexTemplate = "${pkgs.hockeypuck-web}/share/templates/index.html.tmpl";
+      vindexTemplate = "${pkgs.hockeypuck-web}/share/templates/index.html.tmpl";
+      statsTemplate = "${pkgs.hockeypuck-web}/share/templates/stats.html.tmpl";
+      webroot = "${pkgs.hockeypuck-web}/share/webroot";
+
+      hkp.bind = ":${toString cfg.port}";
+
+      openpgp.db = {
+        driver = "postgres-jsonb";
+        dsn = lib.mkDefault "database=hockeypuck host=/var/run/postgresql sslmode=disable";
+      };
+    };
+
+    users.users.hockeypuck = {
+      isSystemUser = true;
+      group = "hockeypuck";
+      description = "Hockeypuck user";
+    };
+    users.groups.hockeypuck = {};
+
+    systemd.services.hockeypuck = {
+      description = "Hockeypuck OpenPGP Key Server";
+      after = [ "network.target" "postgresql.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        WorkingDirectory = "/var/lib/hockeypuck";
+        User = "hockeypuck";
+        ExecStart = "${pkgs.hockeypuck}/bin/hockeypuck -config ${settingsFormat.generate "config.toml" cfg.settings}";
+        Restart = "always";
+        RestartSec = "5s";
+        LogsDirectory = "hockeypuck";
+        LogsDirectoryMode = "0755";
+        StateDirectory = "hockeypuck";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/hologram-agent.nix b/nixpkgs/nixos/modules/services/security/hologram-agent.nix
new file mode 100644
index 000000000000..666d95b9b94a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/hologram-agent.nix
@@ -0,0 +1,58 @@
+{pkgs, config, lib, ...}:
+
+with lib;
+
+let
+  cfg = config.services.hologram-agent;
+
+  cfgFile = pkgs.writeText "hologram-agent.json" (builtins.toJSON {
+    host = cfg.dialAddress;
+  });
+in {
+  options = {
+    services.hologram-agent = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Hologram agent for AWS instance credentials";
+      };
+
+      dialAddress = mkOption {
+        type        = types.str;
+        default     = "localhost:3100";
+        description = lib.mdDoc "Hologram server and port.";
+      };
+
+      httpPort = mkOption {
+        type        = types.str;
+        default     = "80";
+        description = lib.mdDoc "Port for metadata service to listen on.";
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.kernelModules = [ "dummy" ];
+
+    networking.interfaces.dummy0.ipv4.addresses = [
+      { address = "169.254.169.254"; prefixLength = 32; }
+    ];
+
+    systemd.services.hologram-agent = {
+      description = "Provide EC2 instance credentials to machines outside of EC2";
+      after       = [ "network.target" ];
+      wantedBy    = [ "multi-user.target" ];
+      requires    = [ "network-link-dummy0.service" "network-addresses-dummy0.service" ];
+      preStart = ''
+        /run/current-system/sw/bin/rm -fv /run/hologram.sock
+      '';
+      serviceConfig = {
+        ExecStart = "${pkgs.hologram}/bin/hologram-agent -debug -conf ${cfgFile} -port ${cfg.httpPort}";
+      };
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/hologram-server.nix b/nixpkgs/nixos/modules/services/security/hologram-server.nix
new file mode 100644
index 000000000000..e995bc79b112
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/hologram-server.nix
@@ -0,0 +1,130 @@
+{pkgs, config, lib, ...}:
+
+with lib;
+
+let
+  cfg = config.services.hologram-server;
+
+  cfgFile = pkgs.writeText "hologram-server.json" (builtins.toJSON {
+    ldap = {
+      host = cfg.ldapHost;
+      bind = {
+        dn       = cfg.ldapBindDN;
+        password = cfg.ldapBindPassword;
+      };
+      insecureldap    = cfg.ldapInsecure;
+      userattr        = cfg.ldapUserAttr;
+      baseDN          = cfg.ldapBaseDN;
+      enableldapRoles = cfg.enableLdapRoles;
+      roleAttr        = cfg.roleAttr;
+      groupClassAttr  = cfg.groupClassAttr;
+    };
+    aws = {
+      account     = cfg.awsAccount;
+      defaultrole = cfg.awsDefaultRole;
+    };
+    stats        = cfg.statsAddress;
+    listen       = cfg.listenAddress;
+    cachetimeout = cfg.cacheTimeoutSeconds;
+  });
+in {
+  options = {
+    services.hologram-server = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Hologram server for AWS instance credentials";
+      };
+
+      listenAddress = mkOption {
+        type        = types.str;
+        default     = "0.0.0.0:3100";
+        description = lib.mdDoc "Address and port to listen on";
+      };
+
+      ldapHost = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "Address of the LDAP server to use";
+      };
+
+      ldapInsecure = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "Whether to connect to LDAP over SSL or not";
+      };
+
+      ldapUserAttr = mkOption {
+        type        = types.str;
+        default     = "cn";
+        description = lib.mdDoc "The LDAP attribute for usernames";
+      };
+
+      ldapBaseDN = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "The base DN for your Hologram users";
+      };
+
+      ldapBindDN = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "DN of account to use to query the LDAP server";
+      };
+
+      ldapBindPassword = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "Password of account to use to query the LDAP server";
+      };
+
+      enableLdapRoles = mkOption {
+        type        = types.bool;
+        default     = false;
+        description = lib.mdDoc "Whether to assign user roles based on the user's LDAP group memberships";
+      };
+
+      groupClassAttr = mkOption {
+        type = types.str;
+        default = "groupOfNames";
+        description = lib.mdDoc "The objectclass attribute to search for groups when enableLdapRoles is true";
+      };
+
+      roleAttr = mkOption {
+        type        = types.str;
+        default     = "businessCategory";
+        description = lib.mdDoc "Which LDAP group attribute to search for authorized role ARNs";
+      };
+
+      awsAccount = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "AWS account number";
+      };
+
+      awsDefaultRole = mkOption {
+        type        = types.str;
+        description = lib.mdDoc "AWS default role";
+      };
+
+      statsAddress = mkOption {
+        type        = types.str;
+        default     = "";
+        description = lib.mdDoc "Address of statsd server";
+      };
+
+      cacheTimeoutSeconds = mkOption {
+        type        = types.int;
+        default     = 3600;
+        description = lib.mdDoc "How often (in seconds) to refresh the LDAP cache";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.hologram-server = {
+      description = "Provide EC2 instance credentials to machines outside of EC2";
+      after       = [ "network.target" ];
+      wantedBy    = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.hologram}/bin/hologram-server --debug --conf ${cfgFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/infnoise.nix b/nixpkgs/nixos/modules/services/security/infnoise.nix
new file mode 100644
index 000000000000..739a0a84d90b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/infnoise.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.infnoise;
+in {
+  options = {
+    services.infnoise = {
+      enable = mkEnableOption (lib.mdDoc "the Infinite Noise TRNG driver");
+
+      fillDevRandom = mkOption {
+        description = lib.mdDoc ''
+          Whether to run the infnoise driver as a daemon to refill /dev/random.
+
+          If disabled, you can use the `infnoise` command-line tool to
+          manually obtain randomness.
+        '';
+        type = types.bool;
+        default = true;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.infnoise ];
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="usb", ATTRS{idVendor}=="0403", ATTRS{idProduct}=="6015", SYMLINK+="infnoise", TAG+="systemd", GROUP="dialout", MODE="0664", ENV{SYSTEMD_WANTS}="infnoise.service"
+    '';
+
+    systemd.services.infnoise = mkIf cfg.fillDevRandom {
+      description = "Infinite Noise TRNG driver";
+
+      bindsTo = [ "dev-infnoise.device" ];
+      after = [ "dev-infnoise.device" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.infnoise}/bin/infnoise --dev-random --debug";
+        Restart = "always";
+        User = "infnoise";
+        DynamicUser = true;
+        SupplementaryGroups = [ "dialout" ];
+        DeviceAllow = [ "/dev/infnoise" ];
+        DevicePolicy = "closed";
+        PrivateNetwork = true;
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true; # only reads entropy pool size and watermark
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/jitterentropy-rngd.nix b/nixpkgs/nixos/modules/services/security/jitterentropy-rngd.nix
new file mode 100644
index 000000000000..7bfacb5ddc5d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/jitterentropy-rngd.nix
@@ -0,0 +1,18 @@
+{ lib, config, pkgs, ... }:
+let
+  cfg = config.services.jitterentropy-rngd;
+in
+{
+  options.services.jitterentropy-rngd = {
+    enable =
+      lib.mkEnableOption (lib.mdDoc "jitterentropy-rngd service configuration");
+    package = lib.mkPackageOptionMD pkgs "jitterentropy-rngd" { };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+    systemd.services."jitterentropy".wantedBy = [ "basic.target" ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ thillux ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/kanidm.nix b/nixpkgs/nixos/modules/services/security/kanidm.nix
new file mode 100644
index 000000000000..6f4d1dc382ab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/kanidm.nix
@@ -0,0 +1,385 @@
+{ config, lib, options, pkgs, ... }:
+let
+  cfg = config.services.kanidm;
+  settingsFormat = pkgs.formats.toml { };
+  # Remove null values, so we can document optional values that don't end up in the generated TOML file.
+  filterConfig = lib.converge (lib.filterAttrsRecursive (_: v: v != null));
+  serverConfigFile = settingsFormat.generate "server.toml" (filterConfig cfg.serverSettings);
+  clientConfigFile = settingsFormat.generate "kanidm-config.toml" (filterConfig cfg.clientSettings);
+  unixConfigFile = settingsFormat.generate "kanidm-unixd.toml" (filterConfig cfg.unixSettings);
+  certPaths = builtins.map builtins.dirOf [ cfg.serverSettings.tls_chain cfg.serverSettings.tls_key ];
+
+  # Merge bind mount paths and remove paths where a prefix is already mounted.
+  # This makes sure that if e.g. the tls_chain is in the nix store and /nix/store is already in the mount
+  # paths, no new bind mount is added. Adding subpaths caused problems on ofborg.
+  hasPrefixInList = list: newPath: lib.any (path: lib.hasPrefix (builtins.toString path) (builtins.toString newPath)) list;
+  mergePaths = lib.foldl' (merged: newPath: let
+      # If the new path is a prefix to some existing path, we need to filter it out
+      filteredPaths = lib.filter (p: !lib.hasPrefix (builtins.toString newPath) (builtins.toString p)) merged;
+      # If a prefix of the new path is already in the list, do not add it
+      filteredNew = lib.optional (!hasPrefixInList filteredPaths newPath) newPath;
+    in filteredPaths ++ filteredNew) [];
+
+  defaultServiceConfig = {
+    BindReadOnlyPaths = [
+      "/nix/store"
+      "-/etc/resolv.conf"
+      "-/etc/nsswitch.conf"
+      "-/etc/hosts"
+      "-/etc/localtime"
+    ];
+    CapabilityBoundingSet = [];
+    # ProtectClock= adds DeviceAllow=char-rtc r
+    DeviceAllow = "";
+    # Implies ProtectSystem=strict, which re-mounts all paths
+    # DynamicUser = true;
+    LockPersonality = true;
+    MemoryDenyWriteExecute = true;
+    NoNewPrivileges = true;
+    PrivateDevices = true;
+    PrivateMounts = true;
+    PrivateNetwork = true;
+    PrivateTmp = true;
+    PrivateUsers = true;
+    ProcSubset = "pid";
+    ProtectClock = true;
+    ProtectHome = true;
+    ProtectHostname = true;
+    # Would re-mount paths ignored by temporary root
+    #ProtectSystem = "strict";
+    ProtectControlGroups = true;
+    ProtectKernelLogs = true;
+    ProtectKernelModules = true;
+    ProtectKernelTunables = true;
+    ProtectProc = "invisible";
+    RestrictAddressFamilies = [ ];
+    RestrictNamespaces = true;
+    RestrictRealtime = true;
+    RestrictSUIDSGID = true;
+    SystemCallArchitectures = "native";
+    SystemCallFilter = [ "@system-service" "~@privileged @resources @setuid @keyring" ];
+    # Does not work well with the temporary root
+    #UMask = "0066";
+  };
+
+in
+{
+  options.services.kanidm = {
+    enableClient = lib.mkEnableOption (lib.mdDoc "the Kanidm client");
+    enableServer = lib.mkEnableOption (lib.mdDoc "the Kanidm server");
+    enablePam = lib.mkEnableOption (lib.mdDoc "the Kanidm PAM and NSS integration");
+
+    package = lib.mkPackageOptionMD pkgs "kanidm" {};
+
+    serverSettings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options = {
+          bindaddress = lib.mkOption {
+            description = lib.mdDoc "Address/port combination the webserver binds to.";
+            example = "[::1]:8443";
+            type = lib.types.str;
+          };
+          # Should be optional but toml does not accept null
+          ldapbindaddress = lib.mkOption {
+            description = lib.mdDoc ''
+              Address and port the LDAP server is bound to. Setting this to `null` disables the LDAP interface.
+            '';
+            example = "[::1]:636";
+            default = null;
+            type = lib.types.nullOr lib.types.str;
+          };
+          origin = lib.mkOption {
+            description = lib.mdDoc "The origin of your Kanidm instance. Must have https as protocol.";
+            example = "https://idm.example.org";
+            type = lib.types.strMatching "^https://.*";
+          };
+          domain = lib.mkOption {
+            description = lib.mdDoc ''
+              The `domain` that Kanidm manages. Must be below or equal to the domain
+              specified in `serverSettings.origin`.
+              This can be left at `null`, only if your instance has the role `ReadOnlyReplica`.
+              While it is possible to change the domain later on, it requires extra steps!
+              Please consider the warnings and execute the steps described
+              [in the documentation](https://kanidm.github.io/kanidm/stable/administrivia.html#rename-the-domain).
+            '';
+            example = "example.org";
+            default = null;
+            type = lib.types.nullOr lib.types.str;
+          };
+          db_path = lib.mkOption {
+            description = lib.mdDoc "Path to Kanidm database.";
+            default = "/var/lib/kanidm/kanidm.db";
+            readOnly = true;
+            type = lib.types.path;
+          };
+          tls_chain = lib.mkOption {
+            description = lib.mdDoc "TLS chain in pem format.";
+            type = lib.types.path;
+          };
+          tls_key = lib.mkOption {
+            description = lib.mdDoc "TLS key in pem format.";
+            type = lib.types.path;
+          };
+          log_level = lib.mkOption {
+            description = lib.mdDoc "Log level of the server.";
+            default = "info";
+            type = lib.types.enum [ "info" "debug" "trace" ];
+          };
+          role = lib.mkOption {
+            description = lib.mdDoc "The role of this server. This affects the replication relationship and thereby available features.";
+            default = "WriteReplica";
+            type = lib.types.enum [ "WriteReplica" "WriteReplicaNoUI" "ReadOnlyReplica" ];
+          };
+        };
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Settings for Kanidm, see
+        [the documentation](https://kanidm.github.io/kanidm/stable/server_configuration.html)
+        and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/server.toml)
+        for possible values.
+      '';
+    };
+
+    clientSettings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options.uri = lib.mkOption {
+          description = lib.mdDoc "Address of the Kanidm server.";
+          example = "http://127.0.0.1:8080";
+          type = lib.types.str;
+        };
+      };
+      description = lib.mdDoc ''
+        Configure Kanidm clients, needed for the PAM daemon. See
+        [the documentation](https://kanidm.github.io/kanidm/stable/client_tools.html#kanidm-configuration)
+        and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/config)
+        for possible values.
+      '';
+    };
+
+    unixSettings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+
+        options.pam_allowed_login_groups = lib.mkOption {
+          description = lib.mdDoc "Kanidm groups that are allowed to login using PAM.";
+          example = "my_pam_group";
+          type = lib.types.listOf lib.types.str;
+        };
+      };
+      description = lib.mdDoc ''
+        Configure Kanidm unix daemon.
+        See [the documentation](https://kanidm.github.io/kanidm/stable/integrations/pam_and_nsswitch.html#the-unix-daemon)
+        and [example configuration](https://github.com/kanidm/kanidm/blob/master/examples/unixd)
+        for possible values.
+      '';
+    };
+  };
+
+  config = lib.mkIf (cfg.enableClient || cfg.enableServer || cfg.enablePam) {
+    assertions =
+      [
+        {
+          assertion = !cfg.enableServer || ((cfg.serverSettings.tls_chain or null) == null) || (!lib.isStorePath cfg.serverSettings.tls_chain);
+          message = ''
+            <option>services.kanidm.serverSettings.tls_chain</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+        }
+        {
+          assertion = !cfg.enableServer || ((cfg.serverSettings.tls_key or null) == null) || (!lib.isStorePath cfg.serverSettings.tls_key);
+          message = ''
+            <option>services.kanidm.serverSettings.tls_key</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+        }
+        {
+          assertion = !cfg.enableClient || options.services.kanidm.clientSettings.isDefined;
+          message = ''
+            <option>services.kanidm.clientSettings</option> needs to be configured
+            if the client is enabled.
+          '';
+        }
+        {
+          assertion = !cfg.enablePam || options.services.kanidm.clientSettings.isDefined;
+          message = ''
+            <option>services.kanidm.clientSettings</option> needs to be configured
+            for the PAM daemon to connect to the Kanidm server.
+          '';
+        }
+        {
+          assertion = !cfg.enableServer || (cfg.serverSettings.domain == null
+            -> cfg.serverSettings.role == "WriteReplica" || cfg.serverSettings.role == "WriteReplicaNoUI");
+          message = ''
+            <option>services.kanidm.serverSettings.domain</option> can only be set if this instance
+            is not a ReadOnlyReplica. Otherwise the db would inherit it from
+            the instance it follows.
+          '';
+        }
+      ];
+
+    environment.systemPackages = lib.mkIf cfg.enableClient [ cfg.package ];
+
+    systemd.services.kanidm = lib.mkIf cfg.enableServer {
+      description = "kanidm identity management daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      serviceConfig = lib.mkMerge [
+        # Merge paths and ignore existing prefixes needs to sidestep mkMerge
+        (defaultServiceConfig // {
+          BindReadOnlyPaths = mergePaths (defaultServiceConfig.BindReadOnlyPaths ++ certPaths);
+        })
+        {
+          StateDirectory = "kanidm";
+          StateDirectoryMode = "0700";
+          RuntimeDirectory = "kanidmd";
+          ExecStart = "${cfg.package}/bin/kanidmd server -c ${serverConfigFile}";
+          User = "kanidm";
+          Group = "kanidm";
+
+          BindPaths = [
+            # To create the socket
+            "/run/kanidmd:/run/kanidmd"
+          ];
+
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+          CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+          # This would otherwise override the CAP_NET_BIND_SERVICE capability.
+          PrivateUsers = lib.mkForce false;
+          # Port needs to be exposed to the host network
+          PrivateNetwork = lib.mkForce false;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+          TemporaryFileSystem = "/:ro";
+        }
+      ];
+      environment.RUST_LOG = "info";
+    };
+
+    systemd.services.kanidm-unixd = lib.mkIf cfg.enablePam {
+      description = "Kanidm PAM daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      restartTriggers = [ unixConfigFile clientConfigFile ];
+      serviceConfig = lib.mkMerge [
+        defaultServiceConfig
+        {
+          CacheDirectory = "kanidm-unixd";
+          CacheDirectoryMode = "0700";
+          RuntimeDirectory = "kanidm-unixd";
+          ExecStart = "${cfg.package}/bin/kanidm_unixd";
+          User = "kanidm-unixd";
+          Group = "kanidm-unixd";
+
+          BindReadOnlyPaths = [
+            "-/etc/kanidm"
+            "-/etc/static/kanidm"
+            "-/etc/ssl"
+            "-/etc/static/ssl"
+            "-/etc/passwd"
+            "-/etc/group"
+          ];
+          BindPaths = [
+            # To create the socket
+            "/run/kanidm-unixd:/var/run/kanidm-unixd"
+          ];
+          # Needs to connect to kanidmd
+          PrivateNetwork = lib.mkForce false;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+          TemporaryFileSystem = "/:ro";
+        }
+      ];
+      environment.RUST_LOG = "info";
+    };
+
+    systemd.services.kanidm-unixd-tasks = lib.mkIf cfg.enablePam {
+      description = "Kanidm PAM home management daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "kanidm-unixd.service" ];
+      partOf = [ "kanidm-unixd.service" ];
+      restartTriggers = [ unixConfigFile clientConfigFile ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/kanidm_unixd_tasks";
+
+        BindReadOnlyPaths = [
+          "/nix/store"
+          "-/etc/resolv.conf"
+          "-/etc/nsswitch.conf"
+          "-/etc/hosts"
+          "-/etc/localtime"
+          "-/etc/kanidm"
+          "-/etc/static/kanidm"
+        ];
+        BindPaths = [
+          # To manage home directories
+          "/home"
+          # To connect to kanidm-unixd
+          "/run/kanidm-unixd:/var/run/kanidm-unixd"
+        ];
+        # CAP_DAC_OVERRIDE is needed to ignore ownership of unixd socket
+        CapabilityBoundingSet = [ "CAP_CHOWN" "CAP_FOWNER" "CAP_DAC_OVERRIDE" "CAP_DAC_READ_SEARCH" ];
+        IPAddressDeny = "any";
+        # Need access to users
+        PrivateUsers = false;
+        # Need access to home directories
+        ProtectHome = false;
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        TemporaryFileSystem = "/:ro";
+        Restart = "on-failure";
+      };
+      environment.RUST_LOG = "info";
+    };
+
+    # These paths are hardcoded
+    environment.etc = lib.mkMerge [
+      (lib.mkIf cfg.enableServer {
+        "kanidm/server.toml".source = serverConfigFile;
+      })
+      (lib.mkIf options.services.kanidm.clientSettings.isDefined {
+        "kanidm/config".source = clientConfigFile;
+      })
+      (lib.mkIf cfg.enablePam {
+        "kanidm/unixd".source = unixConfigFile;
+      })
+    ];
+
+    system.nssModules = lib.mkIf cfg.enablePam [ cfg.package ];
+
+    system.nssDatabases.group = lib.optional cfg.enablePam "kanidm";
+    system.nssDatabases.passwd = lib.optional cfg.enablePam "kanidm";
+
+    users.groups = lib.mkMerge [
+      (lib.mkIf cfg.enableServer {
+        kanidm = { };
+      })
+      (lib.mkIf cfg.enablePam {
+        kanidm-unixd = { };
+      })
+    ];
+    users.users = lib.mkMerge [
+      (lib.mkIf cfg.enableServer {
+        kanidm = {
+          description = "Kanidm server";
+          isSystemUser = true;
+          group = "kanidm";
+          packages = [ cfg.package ];
+        };
+      })
+      (lib.mkIf cfg.enablePam {
+        kanidm-unixd = {
+          description = "Kanidm PAM daemon";
+          isSystemUser = true;
+          group = "kanidm-unixd";
+        };
+      })
+    ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ erictapen Flakebi ];
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/security/munge.nix b/nixpkgs/nixos/modules/services/security/munge.nix
new file mode 100644
index 000000000000..4d6fe33f697b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/munge.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.munge;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.munge = {
+      enable = mkEnableOption (lib.mdDoc "munge service");
+
+      password = mkOption {
+        default = "/etc/munge/munge.key";
+        type = types.path;
+        description = lib.mdDoc ''
+          The path to a daemon's secret key.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.munge ];
+
+    users.users.munge = {
+      description   = "Munge daemon user";
+      isSystemUser  = true;
+      group         = "munge";
+    };
+
+    users.groups.munge = {};
+
+    systemd.services.munged = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      path = [ pkgs.munge pkgs.coreutils ];
+
+      serviceConfig = {
+        ExecStartPre = "+${pkgs.coreutils}/bin/chmod 0400 ${cfg.password}";
+        ExecStart = "${pkgs.munge}/bin/munged --syslog --key-file ${cfg.password}";
+        PIDFile = "/run/munge/munged.pid";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = "munge";
+        Group = "munge";
+        StateDirectory = "munge";
+        StateDirectoryMode = "0711";
+        RuntimeDirectory = "munge";
+      };
+
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/nginx-sso.nix b/nixpkgs/nixos/modules/services/security/nginx-sso.nix
new file mode 100644
index 000000000000..971f22ed3476
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/nginx-sso.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx.sso;
+  pkg = getBin cfg.package;
+  configYml = pkgs.writeText "nginx-sso.yml" (builtins.toJSON cfg.configuration);
+in {
+  options.services.nginx.sso = {
+    enable = mkEnableOption (lib.mdDoc "nginx-sso service");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.nginx-sso;
+      defaultText = literalExpression "pkgs.nginx-sso";
+      description = lib.mdDoc ''
+        The nginx-sso package that should be used.
+      '';
+    };
+
+    configuration = mkOption {
+      type = types.attrsOf types.unspecified;
+      default = {};
+      example = literalExpression ''
+        {
+          listen = { addr = "127.0.0.1"; port = 8080; };
+
+          providers.token.tokens = {
+            myuser = "MyToken";
+          };
+
+          acl = {
+            rule_sets = [
+              {
+                rules = [ { field = "x-application"; equals = "MyApp"; } ];
+                allow = [ "myuser" ];
+              }
+            ];
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        nginx-sso configuration
+        ([documentation](https://github.com/Luzifer/nginx-sso/wiki/Main-Configuration))
+        as a Nix attribute set.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.nginx-sso = {
+      description = "Nginx SSO Backend";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkg}/bin/nginx-sso \
+            --config ${configYml} \
+            --frontend-dir ${pkg}/share/frontend
+        '';
+        Restart = "always";
+        DynamicUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix b/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix
new file mode 100644
index 000000000000..718c3d2498ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/oauth2_proxy.nix
@@ -0,0 +1,593 @@
+# NixOS module for oauth2_proxy.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.oauth2_proxy;
+
+  # oauth2_proxy provides many options that are only relevant if you are using
+  # a certain provider. This set maps from provider name to a function that
+  # takes the configuration and returns a string that can be inserted into the
+  # command-line to launch oauth2_proxy.
+  providerSpecificOptions = {
+    azure = cfg: {
+      azure-tenant = cfg.azure.tenant;
+      resource = cfg.azure.resource;
+    };
+
+    github = cfg: { github = {
+      inherit (cfg.github) org team;
+    }; };
+
+    google = cfg: { google = with cfg.google; optionalAttrs (groups != []) {
+      admin-email = adminEmail;
+      service-account = serviceAccountJSON;
+      group = groups;
+    }; };
+  };
+
+  authenticatedEmailsFile = pkgs.writeText "authenticated-emails" cfg.email.addresses;
+
+  getProviderOptions = cfg: provider: providerSpecificOptions.${provider} or (_: {}) cfg;
+
+  allConfig = with cfg; {
+    inherit (cfg) provider scope upstream;
+    approval-prompt = approvalPrompt;
+    basic-auth-password = basicAuthPassword;
+    client-id = clientID;
+    client-secret = clientSecret;
+    custom-templates-dir = customTemplatesDir;
+    email-domain = email.domains;
+    http-address = httpAddress;
+    login-url = loginURL;
+    pass-access-token = passAccessToken;
+    pass-basic-auth = passBasicAuth;
+    pass-host-header = passHostHeader;
+    reverse-proxy = reverseProxy;
+    proxy-prefix = proxyPrefix;
+    profile-url = profileURL;
+    redeem-url = redeemURL;
+    redirect-url = redirectURL;
+    request-logging = requestLogging;
+    skip-auth-regex = skipAuthRegexes;
+    signature-key = signatureKey;
+    validate-url = validateURL;
+    htpasswd-file = htpasswd.file;
+    cookie = {
+      inherit (cookie) domain secure expire name secret refresh;
+      httponly = cookie.httpOnly;
+    };
+    set-xauthrequest = setXauthrequest;
+  } // lib.optionalAttrs (cfg.email.addresses != null) {
+    authenticated-emails-file = authenticatedEmailsFile;
+  } // lib.optionalAttrs (cfg.passBasicAuth) {
+    basic-auth-password = cfg.basicAuthPassword;
+  } // lib.optionalAttrs (cfg.htpasswd.file != null) {
+    display-htpasswd-file = cfg.htpasswd.displayForm;
+  } // lib.optionalAttrs tls.enable {
+    tls-cert-file = tls.certificate;
+    tls-key-file = tls.key;
+    https-address = tls.httpsAddress;
+  } // (getProviderOptions cfg cfg.provider) // cfg.extraConfig;
+
+  mapConfig = key: attr:
+  optionalString (attr != null && attr != []) (
+    if isDerivation attr then mapConfig key (toString attr) else
+    if (builtins.typeOf attr) == "set" then concatStringsSep " "
+      (mapAttrsToList (name: value: mapConfig (key + "-" + name) value) attr) else
+    if (builtins.typeOf attr) == "list" then concatMapStringsSep " " (mapConfig key) attr else
+    if (builtins.typeOf attr) == "bool" then "--${key}=${boolToString attr}" else
+    if (builtins.typeOf attr) == "string" then "--${key}='${attr}'" else
+    "--${key}=${toString attr}");
+
+  configString = concatStringsSep " " (mapAttrsToList mapConfig allConfig);
+in
+{
+  options.services.oauth2_proxy = {
+    enable = mkEnableOption (lib.mdDoc "oauth2_proxy");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.oauth2-proxy;
+      defaultText = literalExpression "pkgs.oauth2-proxy";
+      description = lib.mdDoc ''
+        The package that provides oauth2-proxy.
+      '';
+    };
+
+    ##############################################
+    # PROVIDER configuration
+    # Taken from: https://github.com/oauth2-proxy/oauth2-proxy/blob/master/providers/providers.go
+    provider = mkOption {
+      type = types.enum [
+        "adfs"
+        "azure"
+        "bitbucket"
+        "digitalocean"
+        "facebook"
+        "github"
+        "gitlab"
+        "google"
+        "keycloak"
+        "keycloak-oidc"
+        "linkedin"
+        "login.gov"
+        "nextcloud"
+        "oidc"
+      ];
+      default = "google";
+      description = lib.mdDoc ''
+        OAuth provider.
+      '';
+    };
+
+    approvalPrompt = mkOption {
+      type = types.enum ["force" "auto"];
+      default = "force";
+      description = lib.mdDoc ''
+        OAuth approval_prompt.
+      '';
+    };
+
+    clientID = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        The OAuth Client ID.
+      '';
+      example = "123456.apps.googleusercontent.com";
+    };
+
+    clientSecret = mkOption {
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        The OAuth Client Secret.
+      '';
+    };
+
+    skipAuthRegexes = mkOption {
+     type = types.listOf types.str;
+     default = [];
+     description = lib.mdDoc ''
+       Skip authentication for requests matching any of these regular
+       expressions.
+     '';
+    };
+
+    # XXX: Not clear whether these two options are mutually exclusive or not.
+    email = {
+      domains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Authenticate emails with the specified domains. Use
+          `*` to authenticate any email.
+        '';
+      };
+
+      addresses = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          Line-separated email addresses that are allowed to authenticate.
+        '';
+      };
+    };
+
+    loginURL = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Authentication endpoint.
+
+        You only need to set this if you are using a self-hosted provider (e.g.
+        Github Enterprise). If you're using a publicly hosted provider
+        (e.g github.com), then the default works.
+      '';
+      example = "https://provider.example.com/oauth/authorize";
+    };
+
+    redeemURL = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Token redemption endpoint.
+
+        You only need to set this if you are using a self-hosted provider (e.g.
+        Github Enterprise). If you're using a publicly hosted provider
+        (e.g github.com), then the default works.
+      '';
+      example = "https://provider.example.com/oauth/token";
+    };
+
+    validateURL = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Access token validation endpoint.
+
+        You only need to set this if you are using a self-hosted provider (e.g.
+        Github Enterprise). If you're using a publicly hosted provider
+        (e.g github.com), then the default works.
+      '';
+      example = "https://provider.example.com/user/emails";
+    };
+
+    redirectURL = mkOption {
+      # XXX: jml suspects this is always necessary, but the command-line
+      # doesn't require it so making it optional.
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The OAuth2 redirect URL.
+      '';
+      example = "https://internalapp.yourcompany.com/oauth2/callback";
+    };
+
+    azure = {
+      tenant = mkOption {
+        type = types.str;
+        default = "common";
+        description = lib.mdDoc ''
+          Go to a tenant-specific or common (tenant-independent) endpoint.
+        '';
+      };
+
+      resource = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The resource that is protected.
+        '';
+      };
+    };
+
+    google = {
+      adminEmail = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The Google Admin to impersonate for API calls.
+
+          Only users with access to the Admin APIs can access the Admin SDK
+          Directory API, thus the service account needs to impersonate one of
+          those users to access the Admin SDK Directory API.
+
+          See <https://developers.google.com/admin-sdk/directory/v1/guides/delegation#delegate_domain-wide_authority_to_your_service_account>.
+        '';
+      };
+
+      groups = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Restrict logins to members of these Google groups.
+        '';
+      };
+
+      serviceAccountJSON = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          The path to the service account JSON credentials.
+        '';
+      };
+    };
+
+    github = {
+      org = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Restrict logins to members of this organisation.
+        '';
+      };
+
+      team = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Restrict logins to members of this team.
+        '';
+      };
+    };
+
+
+    ####################################################
+    # UPSTREAM Configuration
+    upstream = mkOption {
+      type = with types; coercedTo str (x: [x]) (listOf str);
+      default = [];
+      description = lib.mdDoc ''
+        The http url(s) of the upstream endpoint or `file://`
+        paths for static files. Routing is based on the path.
+      '';
+    };
+
+    passAccessToken = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Pass OAuth access_token to upstream via X-Forwarded-Access-Token header.
+      '';
+    };
+
+    passBasicAuth = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Pass HTTP Basic Auth, X-Forwarded-User and X-Forwarded-Email information to upstream.
+      '';
+    };
+
+    basicAuthPassword = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The password to set when passing the HTTP Basic Auth header.
+      '';
+    };
+
+    passHostHeader = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Pass the request Host Header to upstream.
+      '';
+    };
+
+    signatureKey = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        GAP-Signature request signature key.
+      '';
+      example = "sha1:secret0";
+    };
+
+    cookie = {
+      domain = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Optional cookie domains to force cookies to (ie: `.yourcompany.com`).
+          The longest domain matching the request's host will be used (or the shortest
+          cookie domain if there is no match).
+        '';
+        example = ".yourcompany.com";
+      };
+
+      expire = mkOption {
+        type = types.str;
+        default = "168h0m0s";
+        description = lib.mdDoc ''
+          Expire timeframe for cookie.
+        '';
+      };
+
+      httpOnly = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Set HttpOnly cookie flag.
+        '';
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "_oauth2_proxy";
+        description = lib.mdDoc ''
+          The name of the cookie that the oauth_proxy creates.
+        '';
+      };
+
+      refresh = mkOption {
+        # XXX: Unclear what the behavior is when this is not specified.
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Refresh the cookie after this duration; 0 to disable.
+        '';
+        example = "168h0m0s";
+      };
+
+      secret = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          The seed string for secure cookies.
+        '';
+      };
+
+      secure = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Set secure (HTTPS) cookie flag.
+        '';
+      };
+    };
+
+    ####################################################
+    # OAUTH2 PROXY configuration
+
+    httpAddress = mkOption {
+      type = types.str;
+      default = "http://127.0.0.1:4180";
+      description = lib.mdDoc ''
+        HTTPS listening address.  This module does not expose the port by
+        default. If you want this URL to be accessible to other machines, please
+        add the port to `networking.firewall.allowedTCPPorts`.
+      '';
+    };
+
+    htpasswd = {
+      file = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Additionally authenticate against a htpasswd file. Entries must be
+          created with `htpasswd -s` for SHA encryption.
+        '';
+      };
+
+      displayForm = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Display username / password login form if an htpasswd file is provided.
+        '';
+      };
+    };
+
+    customTemplatesDir = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to custom HTML templates.
+      '';
+    };
+
+    reverseProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        In case when running behind a reverse proxy, controls whether headers
+        like `X-Real-Ip` are accepted. Usage behind a reverse
+        proxy will require this flag to be set to avoid logging the reverse
+        proxy IP address.
+      '';
+    };
+
+    proxyPrefix = mkOption {
+      type = types.str;
+      default = "/oauth2";
+      description = lib.mdDoc ''
+        The url root path that this proxy should be nested under.
+      '';
+    };
+
+    tls = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to serve over TLS.
+        '';
+      };
+
+      certificate = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to certificate file.
+        '';
+      };
+
+      key = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to private key file.
+        '';
+      };
+
+      httpsAddress = mkOption {
+        type = types.str;
+        default = ":443";
+        description = lib.mdDoc ''
+          `addr:port` to listen on for HTTPS clients.
+
+          Remember to add `port` to
+          `allowedTCPPorts` if you want other machines to be
+          able to connect to it.
+        '';
+      };
+    };
+
+    requestLogging = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Log requests to stdout.
+      '';
+    };
+
+    ####################################################
+    # UNKNOWN
+
+    # XXX: Is this mandatory? Is it part of another group? Is it part of the provider specification?
+    scope = mkOption {
+      # XXX: jml suspects this is always necessary, but the command-line
+      # doesn't require it so making it optional.
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        OAuth scope specification.
+      '';
+    };
+
+    profileURL = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Profile access endpoint.
+      '';
+    };
+
+    setXauthrequest = mkOption {
+      type = types.nullOr types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Set X-Auth-Request-User and X-Auth-Request-Email response headers (useful in Nginx auth_request mode). Setting this to 'null' means using the upstream default (false).
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = {};
+      type = types.attrsOf types.anything;
+      description = lib.mdDoc ''
+        Extra config to pass to oauth2-proxy.
+      '';
+    };
+
+    keyFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        oauth2-proxy allows passing sensitive configuration via environment variables.
+        Make a file that contains lines like
+        OAUTH2_PROXY_CLIENT_SECRET=asdfasdfasdf.apps.googleuserscontent.com
+        and specify the path here.
+      '';
+      example = "/run/keys/oauth2_proxy";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.oauth2_proxy = mkIf (cfg.keyFile != null) {
+      clientID = mkDefault null;
+      clientSecret = mkDefault null;
+      cookie.secret = mkDefault null;
+    };
+
+    users.users.oauth2_proxy = {
+      description = "OAuth2 Proxy";
+      isSystemUser = true;
+      group = "oauth2_proxy";
+    };
+
+    users.groups.oauth2_proxy = {};
+
+    systemd.services.oauth2_proxy = {
+      description = "OAuth2 Proxy";
+      path = [ cfg.package ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        User = "oauth2_proxy";
+        Restart = "always";
+        ExecStart = "${cfg.package}/bin/oauth2-proxy ${configString}";
+        EnvironmentFile = mkIf (cfg.keyFile != null) cfg.keyFile;
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
new file mode 100644
index 000000000000..b8e45f67cf78
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/oauth2_proxy_nginx.nix
@@ -0,0 +1,66 @@
+{ config, lib, ... }:
+with lib;
+let
+  cfg = config.services.oauth2_proxy.nginx;
+in
+{
+  options.services.oauth2_proxy.nginx = {
+    proxy = mkOption {
+      type = types.str;
+      default = config.services.oauth2_proxy.httpAddress;
+      defaultText = literalExpression "config.services.oauth2_proxy.httpAddress";
+      description = lib.mdDoc ''
+        The address of the reverse proxy endpoint for oauth2_proxy
+      '';
+    };
+    virtualHosts = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        A list of nginx virtual hosts to put behind the oauth2 proxy
+      '';
+    };
+  };
+  config.services.oauth2_proxy = mkIf (cfg.virtualHosts != [] && (hasPrefix "127.0.0.1:" cfg.proxy)) {
+    enable = true;
+  };
+  config.services.nginx = mkIf config.services.oauth2_proxy.enable (mkMerge
+  ((optional (cfg.virtualHosts != []) {
+    recommendedProxySettings = true; # needed because duplicate headers
+  }) ++ (map (vhost: {
+    virtualHosts.${vhost} = {
+      locations."/oauth2/" = {
+        proxyPass = cfg.proxy;
+        extraConfig = ''
+          proxy_set_header X-Scheme                $scheme;
+          proxy_set_header X-Auth-Request-Redirect $scheme://$host$request_uri;
+        '';
+      };
+      locations."/oauth2/auth" = {
+        proxyPass = cfg.proxy;
+        extraConfig = ''
+          proxy_set_header X-Scheme         $scheme;
+          # nginx auth_request includes headers but not body
+          proxy_set_header Content-Length   "";
+          proxy_pass_request_body           off;
+        '';
+      };
+      locations."/".extraConfig = ''
+        auth_request /oauth2/auth;
+        error_page 401 = /oauth2/sign_in;
+
+        # pass information via X-User and X-Email headers to backend,
+        # requires running with --set-xauthrequest flag
+        auth_request_set $user   $upstream_http_x_auth_request_user;
+        auth_request_set $email  $upstream_http_x_auth_request_email;
+        proxy_set_header X-User  $user;
+        proxy_set_header X-Email $email;
+
+        # if you enabled --cookie-refresh, this is needed for it to work with auth_request
+        auth_request_set $auth_cookie $upstream_http_set_cookie;
+        add_header Set-Cookie $auth_cookie;
+      '';
+
+    };
+  }) cfg.virtualHosts)));
+}
diff --git a/nixpkgs/nixos/modules/services/security/opensnitch.nix b/nixpkgs/nixos/modules/services/security/opensnitch.nix
new file mode 100644
index 000000000000..97ac3a72804c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/opensnitch.nix
@@ -0,0 +1,190 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.opensnitch;
+  format = pkgs.formats.json {};
+
+  predefinedRules = flip mapAttrs cfg.rules (name: cfg: {
+    file = pkgs.writeText "rule" (builtins.toJSON cfg);
+  });
+
+in {
+  options = {
+    services.opensnitch = {
+      enable = mkEnableOption (mdDoc "Opensnitch application firewall");
+
+      rules = mkOption {
+        default = {};
+        example = literalExpression ''
+          {
+            "tor" = {
+              "name" = "tor";
+              "enabled" = true;
+              "action" = "allow";
+              "duration" = "always";
+              "operator" = {
+                "type" ="simple";
+                "sensitive" = false;
+                "operand" = "process.path";
+                "data" = "''${lib.getBin pkgs.tor}/bin/tor";
+              };
+            };
+          };
+        '';
+
+        description = mdDoc ''
+          Declarative configuration of firewall rules.
+          All rules will be stored in `/var/lib/opensnitch/rules`.
+          See [upstream documentation](https://github.com/evilsocket/opensnitch/wiki/Rules)
+          for available options.
+        '';
+
+        type = types.submodule {
+          freeformType = format.type;
+        };
+      };
+
+      settings = mkOption {
+        type = types.submodule {
+          freeformType = format.type;
+
+          options = {
+            Server = {
+
+              Address = mkOption {
+                type = types.str;
+                description = mdDoc ''
+                  Unix socket path (unix:///tmp/osui.sock, the "unix:///" part is
+                  mandatory) or TCP socket (192.168.1.100:50051).
+                '';
+              };
+
+              LogFile = mkOption {
+                type = types.path;
+                description = mdDoc ''
+                  File to write logs to (use /dev/stdout to write logs to standard
+                  output).
+                '';
+              };
+
+            };
+
+            DefaultAction = mkOption {
+              type = types.enum [ "allow" "deny" ];
+              description = mdDoc ''
+                Default action whether to block or allow application internet
+                access.
+              '';
+            };
+
+            DefaultDuration = mkOption {
+              type = types.enum [
+                "once" "always" "until restart" "30s" "5m" "15m" "30m" "1h"
+              ];
+              description = mdDoc ''
+                Default duration of firewall rule.
+              '';
+            };
+
+            InterceptUnknown = mkOption {
+              type = types.bool;
+              description = mdDoc ''
+                Whether to intercept spare connections.
+              '';
+            };
+
+            ProcMonitorMethod = mkOption {
+              type = types.enum [ "ebpf" "proc" "ftrace" "audit" ];
+              description = mdDoc ''
+                Which process monitoring method to use.
+              '';
+            };
+
+            LogLevel = mkOption {
+              type = types.enum [ 0 1 2 3 4 ];
+              description = mdDoc ''
+                Default log level from 0 to 4 (debug, info, important, warning,
+                error).
+              '';
+            };
+
+            Firewall = mkOption {
+              type = types.enum [ "iptables" "nftables" ];
+              description = mdDoc ''
+                Which firewall backend to use.
+              '';
+            };
+
+            Stats = {
+
+              MaxEvents = mkOption {
+                type = types.int;
+                description = mdDoc ''
+                  Max events to send to the GUI.
+                '';
+              };
+
+              MaxStats = mkOption {
+                type = types.int;
+                description = mdDoc ''
+                  Max stats per item to keep in backlog.
+                '';
+              };
+
+            };
+          };
+        };
+        description = mdDoc ''
+          opensnitchd configuration. Refer to [upstream documentation](https://github.com/evilsocket/opensnitch/wiki/Configurations)
+          for details on supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    # pkg.opensnitch is referred to elsewhere in the module so we don't need to worry about it being garbage collected
+    services.opensnitch.settings = mapAttrs (_: v: mkDefault v) (builtins.fromJSON (builtins.unsafeDiscardStringContext (builtins.readFile "${pkgs.opensnitch}/etc/opensnitchd/default-config.json")));
+
+    systemd = {
+      packages = [ pkgs.opensnitch ];
+      services.opensnitchd.wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services.opensnitchd.preStart = mkIf (cfg.rules != {}) (let
+      rules = flip mapAttrsToList predefinedRules (file: content: {
+        inherit (content) file;
+        local = "/var/lib/opensnitch/rules/${file}.json";
+      });
+    in ''
+      # Remove all firewall rules from `/var/lib/opensnitch/rules` that are symlinks to a store-path,
+      # but aren't declared in `cfg.rules` (i.e. all networks that were "removed" from
+      # `cfg.rules`).
+      find /var/lib/opensnitch/rules -type l -lname '${builtins.storeDir}/*' ${optionalString (rules != {}) ''
+        -not \( ${concatMapStringsSep " -o " ({ local, ... }:
+          "-name '${baseNameOf local}*'")
+        rules} \) \
+      ''} -delete
+      ${concatMapStrings ({ file, local }: ''
+        ln -sf '${file}' "${local}"
+      '') rules}
+
+      if [ ! -f /etc/opensnitchd/system-fw.json ]; then
+        cp "${pkgs.opensnitch}/etc/opensnitchd/system-fw.json" "/etc/opensnitchd/system-fw.json"
+      fi
+    '');
+
+    environment.etc = mkMerge [ ({
+      "opensnitchd/default-config.json".source = format.generate "default-config.json" cfg.settings;
+    }) (mkIf (cfg.settings.ProcMonitorMethod == "ebpf") {
+      "opensnitchd/opensnitch.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch.o";
+      "opensnitchd/opensnitch-dns.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch-dns.o";
+      "opensnitchd/opensnitch-procs.o".source = "${config.boot.kernelPackages.opensnitch-ebpf}/etc/opensnitchd/opensnitch-procs.o";
+    })];
+
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/security/pass-secret-service.nix b/nixpkgs/nixos/modules/services/security/pass-secret-service.nix
new file mode 100644
index 000000000000..c3c70d97ff59
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/pass-secret-service.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.passSecretService;
+in
+{
+  options.services.passSecretService = {
+    enable = mkEnableOption (lib.mdDoc "pass secret service");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.pass-secret-service;
+      defaultText = literalExpression "pkgs.pass-secret-service";
+      description = lib.mdDoc "Which pass-secret-service package to use.";
+      example = literalExpression "pkgs.pass-secret-service.override { python3 = pkgs.python310 }";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+    services.dbus.packages = [ cfg.package ];
+  };
+
+  meta.maintainers = with maintainers; [ aidalgol ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/physlock.nix b/nixpkgs/nixos/modules/services/security/physlock.nix
new file mode 100644
index 000000000000..cd7747659152
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/physlock.nix
@@ -0,0 +1,147 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.physlock;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.physlock = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the {command}`physlock` screen locking mechanism.
+
+          Enable this and then run {command}`systemctl start physlock`
+          to securely lock the screen.
+
+          This will switch to a new virtual terminal, turn off console
+          switching and disable SysRq mechanism (when
+          {option}`services.physlock.disableSysRq` is set)
+          until the root or user password is given.
+        '';
+      };
+
+      allowAnyUser = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to allow any user to lock the screen. This will install a
+          setuid wrapper to allow any user to start physlock as root, which
+          is a minor security risk. Call the physlock binary to use this instead
+          of using the systemd service.
+        '';
+      };
+
+      disableSysRq = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to disable SysRq when locked with physlock.
+        '';
+      };
+
+      lockMessage = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Message to show on physlock login terminal.
+        '';
+      };
+
+      muteKernelMessages = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Disable kernel messages on console while physlock is running.
+        '';
+      };
+
+      lockOn = {
+
+        suspend = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to lock screen with physlock just before suspend.
+          '';
+        };
+
+        hibernate = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether to lock screen with physlock just before hibernate.
+          '';
+        };
+
+        extraTargets = mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "display-manager.service" ];
+          description = lib.mdDoc ''
+            Other targets to lock the screen just before.
+
+            Useful if you want to e.g. both autologin to X11 so that
+            your {file}`~/.xsession` gets executed and
+            still to have the screen locked so that the system can be
+            booted relatively unattended.
+          '';
+        };
+
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+
+      # for physlock -l and physlock -L
+      environment.systemPackages = [ pkgs.physlock ];
+
+      systemd.services.physlock = {
+        enable = true;
+        description = "Physlock";
+        wantedBy = optional cfg.lockOn.suspend   "suspend.target"
+                ++ optional cfg.lockOn.hibernate "hibernate.target"
+                ++ cfg.lockOn.extraTargets;
+        before   = optional cfg.lockOn.suspend   "systemd-suspend.service"
+                ++ optional cfg.lockOn.hibernate "systemd-hibernate.service"
+                ++ optional (cfg.lockOn.hibernate || cfg.lockOn.suspend) "systemd-suspend-then-hibernate.service"
+                ++ cfg.lockOn.extraTargets;
+        serviceConfig = {
+          Type = "forking";
+          ExecStart = "${pkgs.physlock}/bin/physlock -d${optionalString cfg.muteKernelMessages "m"}${optionalString cfg.disableSysRq "s"}${optionalString (cfg.lockMessage != "") " -p \"${cfg.lockMessage}\""}";
+        };
+      };
+
+      security.pam.services.physlock = {};
+
+    }
+
+    (mkIf cfg.allowAnyUser {
+
+      security.wrappers.physlock =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.physlock}/bin/physlock";
+        };
+
+    })
+  ]);
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/shibboleth-sp.nix b/nixpkgs/nixos/modules/services/security/shibboleth-sp.nix
new file mode 100644
index 000000000000..e7897c3324cf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/shibboleth-sp.nix
@@ -0,0 +1,75 @@
+{pkgs, config, lib, ...}:
+
+with lib;
+let
+  cfg = config.services.shibboleth-sp;
+in {
+  options = {
+    services.shibboleth-sp = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the shibboleth service";
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        example = literalExpression ''"''${pkgs.shibboleth-sp}/etc/shibboleth/shibboleth2.xml"'';
+        description = lib.mdDoc "Path to shibboleth config file";
+      };
+
+      fastcgi.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to include the shibauthorizer and shibresponder FastCGI processes";
+      };
+
+      fastcgi.shibAuthorizerPort = mkOption {
+        type = types.int;
+        default = 9100;
+        description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
+      };
+
+      fastcgi.shibResponderPort = mkOption {
+        type = types.int;
+        default = 9101;
+        description = lib.mdDoc "Port for shibauthorizer FastCGI process to bind to";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.shibboleth-sp = {
+      description = "Provides SSO and federation for web applications";
+      after       = lib.optionals cfg.fastcgi.enable [ "shibresponder.service" "shibauthorizer.service" ];
+      wantedBy    = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.shibboleth-sp}/bin/shibd -F -d ${pkgs.shibboleth-sp} -c ${cfg.configFile}";
+      };
+    };
+
+    systemd.services.shibresponder = mkIf cfg.fastcgi.enable {
+      description = "Provides SSO through Shibboleth via FastCGI";
+      after       = [ "network.target" ];
+      wantedBy    = [ "multi-user.target" ];
+      path    	  = [ "${pkgs.spawn_fcgi}" ];
+      environment.SHIBSP_CONFIG = "${cfg.configFile}";
+      serviceConfig = {
+        ExecStart = "${pkgs.spawn_fcgi}/bin/spawn-fcgi -n -p ${toString cfg.fastcgi.shibResponderPort} ${pkgs.shibboleth-sp}/lib/shibboleth/shibresponder";
+      };
+    };
+
+    systemd.services.shibauthorizer = mkIf cfg.fastcgi.enable {
+      description = "Provides SSO through Shibboleth via FastCGI";
+      after       = [ "network.target" ];
+      wantedBy    = [ "multi-user.target" ];
+      path    	  = [ "${pkgs.spawn_fcgi}" ];
+      environment.SHIBSP_CONFIG = "${cfg.configFile}";
+      serviceConfig = {
+        ExecStart = "${pkgs.spawn_fcgi}/bin/spawn-fcgi -n -p ${toString cfg.fastcgi.shibAuthorizerPort} ${pkgs.shibboleth-sp}/lib/shibboleth/shibauthorizer";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ jammerful ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/sks.nix b/nixpkgs/nixos/modules/services/security/sks.nix
new file mode 100644
index 000000000000..550b61916a22
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/sks.nix
@@ -0,0 +1,146 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sks;
+  sksPkg = cfg.package;
+  dbConfig = pkgs.writeText "DB_CONFIG" ''
+    ${cfg.extraDbConfig}
+  '';
+
+in {
+  meta.maintainers = with maintainers; [ primeos calbrecht jcumming ];
+
+  options = {
+
+    services.sks = {
+
+      enable = mkEnableOption (lib.mdDoc ''
+        SKS (synchronizing key server for OpenPGP) and start the database
+        server. You need to create "''${dataDir}/dump/*.gpg" for the initial
+        import'');
+
+      package = mkOption {
+        default = pkgs.sks;
+        defaultText = literalExpression "pkgs.sks";
+        type = types.package;
+        description = lib.mdDoc "Which SKS derivation to use.";
+      };
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/db/sks";
+        example = "/var/lib/sks";
+        # TODO: The default might change to "/var/lib/sks" as this is more
+        # common. There's also https://github.com/NixOS/nixpkgs/issues/26256
+        # and "/var/db" is not FHS compliant (seems to come from BSD).
+        description = lib.mdDoc ''
+          Data directory (-basedir) for SKS, where the database and all
+          configuration files are located (e.g. KDB, PTree, membership and
+          sksconf).
+        '';
+      };
+
+      extraDbConfig = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Set contents of the files "KDB/DB_CONFIG" and "PTree/DB_CONFIG" within
+          the ''${dataDir} directory. This is used to configure options for the
+          database for the sks key server.
+
+          Documentation of available options are available in the file named
+          "sampleConfig/DB_CONFIG" in the following repository:
+          https://bitbucket.org/skskeyserver/sks-keyserver/src
+        '';
+      };
+
+      hkpAddress = mkOption {
+        default = [ "127.0.0.1" "::1" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Domain names, IPv4 and/or IPv6 addresses to listen on for HKP
+          requests.
+        '';
+      };
+
+      hkpPort = mkOption {
+        default = 11371;
+        type = types.ints.u16;
+        description = lib.mdDoc "HKP port to listen on.";
+      };
+
+      webroot = mkOption {
+        type = types.nullOr types.path;
+        default = "${sksPkg.webSamples}/OpenPKG";
+        defaultText = literalExpression ''"''${package.webSamples}/OpenPKG"'';
+        description = lib.mdDoc ''
+          Source directory (will be symlinked, if not null) for the files the
+          built-in webserver should serve. SKS (''${pkgs.sks.webSamples})
+          provides the following examples: "HTML5", "OpenPKG", and "XHTML+ES".
+          The index file can be named index.html, index.htm, index.xhtm, or
+          index.xhtml. Files with the extensions .css, .es, .js, .jpg, .jpeg,
+          .png, or .gif are supported. Subdirectories and filenames with
+          anything other than alphanumeric characters and the '.' character
+          will be ignored.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users = {
+      users.sks = {
+        isSystemUser = true;
+        description = "SKS user";
+        home = cfg.dataDir;
+        createHome = true;
+        group = "sks";
+        useDefaultShell = true;
+        packages = [ sksPkg pkgs.db ];
+      };
+      groups.sks = { };
+    };
+
+    systemd.services = let
+      hkpAddress = "'" + (builtins.concatStringsSep " " cfg.hkpAddress) + "'" ;
+      hkpPort = builtins.toString cfg.hkpPort;
+    in {
+      sks-db = {
+        description = "SKS database server";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        preStart = ''
+          ${lib.optionalString (cfg.webroot != null)
+            "ln -sfT \"${cfg.webroot}\" web"}
+          mkdir -p dump
+          ${sksPkg}/bin/sks build dump/*.gpg -n 10 -cache 100 || true #*/
+          ${sksPkg}/bin/sks cleandb || true
+          ${sksPkg}/bin/sks pbuild -cache 20 -ptree_cache 70 || true
+          # Check that both database configs are symlinks before overwriting them
+          # TODO: The initial build will be without DB_CONFIG, but this will
+          # hopefully not cause any significant problems. It might be better to
+          # create both directories manually but we have to check that this does
+          # not affect the initial build of the DB.
+          for CONFIG_FILE in KDB/DB_CONFIG PTree/DB_CONFIG; do
+            if [ -e $CONFIG_FILE ] && [ ! -L $CONFIG_FILE ]; then
+              echo "$CONFIG_FILE exists but is not a symlink." >&2
+              echo "Please remove $PWD/$CONFIG_FILE manually to continue." >&2
+              exit 1
+            fi
+            ln -sf ${dbConfig} $CONFIG_FILE
+          done
+        '';
+        serviceConfig = {
+          WorkingDirectory = "~";
+          User = "sks";
+          Group = "sks";
+          Restart = "always";
+          ExecStart = "${sksPkg}/bin/sks db -hkp_address ${hkpAddress} -hkp_port ${hkpPort}";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/sshguard.nix b/nixpkgs/nixos/modules/services/security/sshguard.nix
new file mode 100644
index 000000000000..4e9d9571de5e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/sshguard.nix
@@ -0,0 +1,161 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sshguard;
+
+  configFile = let
+    args = lib.concatStringsSep " " ([
+      "-afb"
+      "-p info"
+      "-o cat"
+      "-n1"
+    ] ++ (map (name: "-t ${escapeShellArg name}") cfg.services));
+    backend = if config.networking.nftables.enable
+      then "sshg-fw-nft-sets"
+      else "sshg-fw-ipset";
+  in pkgs.writeText "sshguard.conf" ''
+    BACKEND="${pkgs.sshguard}/libexec/${backend}"
+    LOGREADER="LANG=C ${config.systemd.package}/bin/journalctl ${args}"
+  '';
+
+in {
+
+  ###### interface
+
+  options = {
+
+    services.sshguard = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether to enable the sshguard service.";
+      };
+
+      attack_threshold = mkOption {
+        default = 30;
+        type = types.int;
+        description = lib.mdDoc ''
+            Block attackers when their cumulative attack score exceeds threshold. Most attacks have a score of 10.
+          '';
+      };
+
+      blacklist_threshold = mkOption {
+        default = null;
+        example = 120;
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+            Blacklist an attacker when its score exceeds threshold. Blacklisted addresses are loaded from and added to blacklist-file.
+          '';
+      };
+
+      blacklist_file = mkOption {
+        default = "/var/lib/sshguard/blacklist.db";
+        type = types.path;
+        description = lib.mdDoc ''
+            Blacklist an attacker when its score exceeds threshold. Blacklisted addresses are loaded from and added to blacklist-file.
+          '';
+      };
+
+      blocktime = mkOption {
+        default = 120;
+        type = types.int;
+        description = lib.mdDoc ''
+            Block attackers for initially blocktime seconds after exceeding threshold. Subsequent blocks increase by a factor of 1.5.
+
+            sshguard unblocks attacks at random intervals, so actual block times will be longer.
+          '';
+      };
+
+      detection_time = mkOption {
+        default = 1800;
+        type = types.int;
+        description = lib.mdDoc ''
+            Remember potential attackers for up to detection_time seconds before resetting their score.
+          '';
+      };
+
+      whitelist = mkOption {
+        default = [ ];
+        example = [ "198.51.100.56" "198.51.100.2" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+            Whitelist a list of addresses, hostnames, or address blocks.
+          '';
+      };
+
+      services = mkOption {
+        default = [ "sshd" ];
+        example = [ "sshd" "exim" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+            Systemd services sshguard should receive logs of.
+          '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.etc."sshguard.conf".source = configFile;
+
+    systemd.services.sshguard = {
+      description = "SSHGuard brute-force attacks protection system";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      partOf = optional config.networking.firewall.enable "firewall.service";
+
+      restartTriggers = [ configFile ];
+
+      path = with pkgs; if config.networking.nftables.enable
+        then [ nftables iproute2 systemd ]
+        else [ iptables ipset iproute2 systemd ];
+
+      # The sshguard ipsets must exist before we invoke
+      # iptables. sshguard creates the ipsets after startup if
+      # necessary, but if we let sshguard do it, we can't reliably add
+      # the iptables rules because postStart races with the creation
+      # of the ipsets. So instead, we create both the ipsets and
+      # firewall rules before sshguard starts.
+      preStart = optionalString config.networking.firewall.enable ''
+        ${pkgs.ipset}/bin/ipset -quiet create -exist sshguard4 hash:net family inet
+        ${pkgs.iptables}/bin/iptables  -I INPUT -m set --match-set sshguard4 src -j DROP
+      '' + optionalString (config.networking.firewall.enable && config.networking.enableIPv6) ''
+        ${pkgs.ipset}/bin/ipset -quiet create -exist sshguard6 hash:net family inet6
+        ${pkgs.iptables}/bin/ip6tables -I INPUT -m set --match-set sshguard6 src -j DROP
+      '';
+
+      postStop = optionalString config.networking.firewall.enable ''
+        ${pkgs.iptables}/bin/iptables  -D INPUT -m set --match-set sshguard4 src -j DROP
+        ${pkgs.ipset}/bin/ipset -quiet destroy sshguard4
+      '' + optionalString (config.networking.firewall.enable && config.networking.enableIPv6) ''
+        ${pkgs.iptables}/bin/ip6tables -D INPUT -m set --match-set sshguard6 src -j DROP
+        ${pkgs.ipset}/bin/ipset -quiet destroy sshguard6
+      '';
+
+      unitConfig.Documentation = "man:sshguard(8)";
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = let
+          args = lib.concatStringsSep " " ([
+            "-a ${toString cfg.attack_threshold}"
+            "-p ${toString cfg.blocktime}"
+            "-s ${toString cfg.detection_time}"
+            (optionalString (cfg.blacklist_threshold != null) "-b ${toString cfg.blacklist_threshold}:${cfg.blacklist_file}")
+          ] ++ (map (name: "-w ${escapeShellArg name}") cfg.whitelist));
+        in "${pkgs.sshguard}/bin/sshguard ${args}";
+        Restart = "always";
+        ProtectSystem = "strict";
+        ProtectHome = "tmpfs";
+        RuntimeDirectory = "sshguard";
+        StateDirectory = "sshguard";
+        CapabilityBoundingSet = "CAP_NET_ADMIN CAP_NET_RAW";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/sslmate-agent.nix b/nixpkgs/nixos/modules/services/security/sslmate-agent.nix
new file mode 100644
index 000000000000..2d72406f0db8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/sslmate-agent.nix
@@ -0,0 +1,32 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sslmate-agent;
+
+in {
+  meta.maintainers = with maintainers; [ wolfangaukang ];
+
+  options = {
+    services.sslmate-agent = {
+      enable = mkEnableOption (lib.mdDoc "sslmate-agent, a daemon for managing SSL/TLS certificates on a server");
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = with pkgs; [ sslmate-agent ];
+
+    systemd = {
+      packages = [ pkgs.sslmate-agent ];
+      services.sslmate-agent = {
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ConfigurationDirectory = "sslmate-agent";
+          LogsDirectory = "sslmate-agent";
+          StateDirectory = "sslmate-agent";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/step-ca.nix b/nixpkgs/nixos/modules/services/security/step-ca.nix
new file mode 100644
index 000000000000..433f162ecb86
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/step-ca.nix
@@ -0,0 +1,142 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.step-ca;
+  settingsFormat = (pkgs.formats.json { });
+in
+{
+  meta.maintainers = with lib.maintainers; [ mohe2015 ];
+
+  options = {
+    services.step-ca = {
+      enable = lib.mkEnableOption (lib.mdDoc "the smallstep certificate authority server");
+      openFirewall = lib.mkEnableOption (lib.mdDoc "opening the certificate authority server port");
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.step-ca;
+        defaultText = lib.literalExpression "pkgs.step-ca";
+        description = lib.mdDoc "Which step-ca package to use.";
+      };
+      address = lib.mkOption {
+        type = lib.types.str;
+        example = "127.0.0.1";
+        description = lib.mdDoc ''
+          The address (without port) the certificate authority should listen at.
+          This combined with {option}`services.step-ca.port` overrides {option}`services.step-ca.settings.address`.
+        '';
+      };
+      port = lib.mkOption {
+        type = lib.types.port;
+        example = 8443;
+        description = lib.mdDoc ''
+          The port the certificate authority should listen on.
+          This combined with {option}`services.step-ca.address` overrides {option}`services.step-ca.settings.address`.
+        '';
+      };
+      settings = lib.mkOption {
+        type = with lib.types; attrsOf anything;
+        description = lib.mdDoc ''
+          Settings that go into {file}`ca.json`. See
+          [the step-ca manual](https://smallstep.com/docs/step-ca/configuration)
+          for more information. The easiest way to
+          configure this module would be to run `step ca init`
+          to generate {file}`ca.json` and then import it using
+          `builtins.fromJSON`.
+          [This article](https://smallstep.com/docs/step-cli/basic-crypto-operations#run-an-offline-x509-certificate-authority)
+          may also be useful if you want to customize certain aspects of
+          certificate generation for your CA.
+          You need to change the database storage path to {file}`/var/lib/step-ca/db`.
+
+          ::: {.warning}
+          The {option}`services.step-ca.settings.address` option
+          will be ignored and overwritten by
+          {option}`services.step-ca.address` and
+          {option}`services.step-ca.port`.
+          :::
+        '';
+      };
+      intermediatePasswordFile = lib.mkOption {
+        type = lib.types.path;
+        example = "/run/keys/smallstep-password";
+        description = lib.mdDoc ''
+          Path to the file containing the password for the intermediate
+          certificate private key.
+
+          ::: {.warning}
+          Make sure to use a quoted absolute path instead of a path literal
+          to prevent it from being copied to the globally readable Nix
+          store.
+          :::
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf config.services.step-ca.enable (
+    let
+      configFile = settingsFormat.generate "ca.json" (cfg.settings // {
+        address = cfg.address + ":" + toString cfg.port;
+      });
+    in
+    {
+      assertions =
+        [
+          {
+            assertion = !lib.isStorePath cfg.intermediatePasswordFile;
+            message = ''
+              <option>services.step-ca.intermediatePasswordFile</option> points to
+              a file in the Nix store. You should use a quoted absolute path to
+              prevent this.
+            '';
+          }
+        ];
+
+      systemd.packages = [ cfg.package ];
+
+      # configuration file indirection is needed to support reloading
+      environment.etc."smallstep/ca.json".source = configFile;
+
+      systemd.services."step-ca" = {
+        wantedBy = [ "multi-user.target" ];
+        restartTriggers = [ configFile ];
+        unitConfig = {
+          ConditionFileNotEmpty = ""; # override upstream
+        };
+        serviceConfig = {
+          User = "step-ca";
+          Group = "step-ca";
+          UMask = "0077";
+          Environment = "HOME=%S/step-ca";
+          WorkingDirectory = ""; # override upstream
+          ReadWriteDirectories = ""; # override upstream
+
+          # LocalCredential handles file permission problems arising from the use of DynamicUser.
+          LoadCredential = "intermediate_password:${cfg.intermediatePasswordFile}";
+
+          ExecStart = [
+            "" # override upstream
+            "${cfg.package}/bin/step-ca /etc/smallstep/ca.json --password-file \${CREDENTIALS_DIRECTORY}/intermediate_password"
+          ];
+
+          # ProtectProc = "invisible"; # not supported by upstream yet
+          # ProcSubset = "pid"; # not supported by upstream yet
+          # PrivateUsers = true; # doesn't work with privileged ports therefore not supported by upstream
+
+          DynamicUser = true;
+          StateDirectory = "step-ca";
+        };
+      };
+
+      users.users.step-ca = {
+        home = "/var/lib/step-ca";
+        group = "step-ca";
+        isSystemUser = true;
+      };
+
+      users.groups.step-ca = {};
+
+      networking.firewall = lib.mkIf cfg.openFirewall {
+        allowedTCPPorts = [ cfg.port ];
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/security/tang.nix b/nixpkgs/nixos/modules/services/security/tang.nix
new file mode 100644
index 000000000000..9cb0a22fca42
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/tang.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.tang;
+in
+{
+  options.services.tang = {
+    enable = mkEnableOption "tang";
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.tang;
+      defaultText = literalExpression "pkgs.tang";
+      description = mdDoc "The tang package to use.";
+    };
+
+    listenStream = mkOption {
+      type = with types; listOf str;
+      default = [ "7654" ];
+      example = [ "198.168.100.1:7654" "[2001:db8::1]:7654" "7654" ];
+      description = mdDoc ''
+        Addresses and/or ports on which tang should listen.
+        For detailed syntax see ListenStream in {manpage}`systemd.socket(5)`.
+      '';
+    };
+
+    ipAddressAllow = mkOption {
+      example = [ "192.168.1.0/24" ];
+      type = types.listOf types.str;
+      description = ''
+        Whitelist a list of address prefixes.
+        Preferably, internal addresses should be used.
+      '';
+    };
+
+  };
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services."tangd@" = {
+      description = "Tang server";
+      path = [ cfg.package ];
+      serviceConfig = {
+        StandardInput = "socket";
+        StandardOutput = "socket";
+        StandardError = "journal";
+        DynamicUser = true;
+        StateDirectory = "tang";
+        RuntimeDirectory = "tang";
+        StateDirectoryMode = "700";
+        UMask = "0077";
+        CapabilityBoundingSet = [ "" ];
+        ExecStart = "${cfg.package}/libexec/tangd %S/tang";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        DeviceAllow = [ "/dev/stdin" ];
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        DevicePolicy = "strict";
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        IPAddressDeny = "any";
+        IPAddressAllow = cfg.ipAddressAllow;
+      };
+    };
+
+    systemd.sockets.tangd = {
+      description = "Tang server";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = cfg.listenStream;
+        Accept = "yes";
+        IPAddressDeny = "any";
+        IPAddressAllow = cfg.ipAddressAllow;
+      };
+    };
+  };
+  meta.maintainers = with lib.maintainers; [ jfroche julienmalka ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/tor.nix b/nixpkgs/nixos/modules/services/security/tor.nix
new file mode 100644
index 000000000000..9e786eb2bf06
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/tor.nix
@@ -0,0 +1,1031 @@
+{ config, lib, options, pkgs, ... }:
+
+with builtins;
+with lib;
+
+let
+  cfg = config.services.tor;
+  opt = options.services.tor;
+  stateDir = "/var/lib/tor";
+  runDir = "/run/tor";
+  descriptionGeneric = option: ''
+    See [torrc manual](https://2019.www.torproject.org/docs/tor-manual.html.en#${option}).
+  '';
+  bindsPrivilegedPort =
+    any (p0:
+      let p1 = if p0 ? "port" then p0.port else p0; in
+      if p1 == "auto" then false
+      else let p2 = if isInt p1 then p1 else toInt p1; in
+        p1 != null && 0 < p2 && p2 < 1024)
+    (flatten [
+      cfg.settings.ORPort
+      cfg.settings.DirPort
+      cfg.settings.DNSPort
+      cfg.settings.ExtORPort
+      cfg.settings.HTTPTunnelPort
+      cfg.settings.NATDPort
+      cfg.settings.SOCKSPort
+      cfg.settings.TransPort
+    ]);
+  optionBool = optionName: mkOption {
+    type = with types; nullOr bool;
+    default = null;
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionInt = optionName: mkOption {
+    type = with types; nullOr int;
+    default = null;
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionString = optionName: mkOption {
+    type = with types; nullOr str;
+    default = null;
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionStrings = optionName: mkOption {
+    type = with types; listOf str;
+    default = [];
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionAddress = mkOption {
+    type = with types; nullOr str;
+    default = null;
+    example = "0.0.0.0";
+    description = lib.mdDoc ''
+      IPv4 or IPv6 (if between brackets) address.
+    '';
+  };
+  optionUnix = mkOption {
+    type = with types; nullOr path;
+    default = null;
+    description = lib.mdDoc ''
+      Unix domain socket path to use.
+    '';
+  };
+  optionPort = mkOption {
+    type = with types; nullOr (oneOf [port (enum ["auto"])]);
+    default = null;
+  };
+  optionPorts = optionName: mkOption {
+    type = with types; listOf port;
+    default = [];
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionIsolablePort = with types; oneOf [
+    port (enum ["auto"])
+    (submodule ({config, ...}: {
+      options = {
+        addr = optionAddress;
+        port = optionPort;
+        flags = optionFlags;
+        SessionGroup = mkOption { type = nullOr int; default = null; };
+      } // genAttrs isolateFlags (name: mkOption { type = types.bool; default = false; });
+      config = {
+        flags = filter (name: config.${name} == true) isolateFlags ++
+                optional (config.SessionGroup != null) "SessionGroup=${toString config.SessionGroup}";
+      };
+    }))
+  ];
+  optionIsolablePorts = optionName: mkOption {
+    default = [];
+    type = with types; either optionIsolablePort (listOf optionIsolablePort);
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  isolateFlags = [
+    "IsolateClientAddr"
+    "IsolateClientProtocol"
+    "IsolateDestAddr"
+    "IsolateDestPort"
+    "IsolateSOCKSAuth"
+    "KeepAliveIsolateSOCKSAuth"
+  ];
+  optionSOCKSPort = doConfig: let
+    flags = [
+      "CacheDNS" "CacheIPv4DNS" "CacheIPv6DNS" "GroupWritable" "IPv6Traffic"
+      "NoDNSRequest" "NoIPv4Traffic" "NoOnionTraffic" "OnionTrafficOnly"
+      "PreferIPv6" "PreferIPv6Automap" "PreferSOCKSNoAuth" "UseDNSCache"
+      "UseIPv4Cache" "UseIPv6Cache" "WorldWritable"
+    ] ++ isolateFlags;
+    in with types; oneOf [
+      port (submodule ({config, ...}: {
+        options = {
+          unix = optionUnix;
+          addr = optionAddress;
+          port = optionPort;
+          flags = optionFlags;
+          SessionGroup = mkOption { type = nullOr int; default = null; };
+        } // genAttrs flags (name: mkOption { type = types.bool; default = false; });
+        config = mkIf doConfig { # Only add flags in SOCKSPort to avoid duplicates
+          flags = filter (name: config.${name} == true) flags ++
+                  optional (config.SessionGroup != null) "SessionGroup=${toString config.SessionGroup}";
+        };
+      }))
+    ];
+  optionFlags = mkOption {
+    type = with types; listOf str;
+    default = [];
+  };
+  optionORPort = optionName: mkOption {
+    default = [];
+    example = 443;
+    type = with types; oneOf [port (enum ["auto"]) (listOf (oneOf [
+      port
+      (enum ["auto"])
+      (submodule ({config, ...}:
+        let flags = [ "IPv4Only" "IPv6Only" "NoAdvertise" "NoListen" ];
+        in {
+        options = {
+          addr = optionAddress;
+          port = optionPort;
+          flags = optionFlags;
+        } // genAttrs flags (name: mkOption { type = types.bool; default = false; });
+        config = {
+          flags = filter (name: config.${name} == true) flags;
+        };
+      }))
+    ]))];
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionBandwidth = optionName: mkOption {
+    type = with types; nullOr (either int str);
+    default = null;
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+  optionPath = optionName: mkOption {
+    type = with types; nullOr path;
+    default = null;
+    description = lib.mdDoc (descriptionGeneric optionName);
+  };
+
+  mkValueString = k: v:
+    if v == null then ""
+    else if isBool v then
+      (if v then "1" else "0")
+    else if v ? "unix" && v.unix != null then
+      "unix:"+v.unix +
+      optionalString (v ? "flags") (" " + concatStringsSep " " v.flags)
+    else if v ? "port" && v.port != null then
+      optionalString (v ? "addr" && v.addr != null) "${v.addr}:" +
+      toString v.port +
+      optionalString (v ? "flags") (" " + concatStringsSep " " v.flags)
+    else if k == "ServerTransportPlugin" then
+      optionalString (v.transports != []) "${concatStringsSep "," v.transports} exec ${v.exec}"
+    else if k == "HidServAuth" then
+      v.onion + " " + v.auth
+    else generators.mkValueStringDefault {} v;
+  genTorrc = settings:
+    generators.toKeyValue {
+      listsAsDuplicateKeys = true;
+      mkKeyValue = k: generators.mkKeyValueDefault { mkValueString = mkValueString k; } " " k;
+    }
+    (lib.mapAttrs (k: v:
+      # Not necesssary, but prettier rendering
+      if elem k [ "AutomapHostsSuffixes" "DirPolicy" "ExitPolicy" "SocksPolicy" ]
+      && v != []
+      then concatStringsSep "," v
+      else v)
+    (lib.filterAttrs (k: v: !(v == null || v == ""))
+    settings));
+  torrc = pkgs.writeText "torrc" (
+    genTorrc cfg.settings +
+    concatStrings (mapAttrsToList (name: onion:
+      "HiddenServiceDir ${onion.path}\n" +
+      genTorrc onion.settings) cfg.relay.onionServices)
+  );
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "tor" "client" "dns" "automapHostsSuffixes" ] [ "services" "tor" "settings" "AutomapHostsSuffixes" ])
+    (mkRemovedOptionModule [ "services" "tor" "client" "dns" "isolationOptions" ] "Use services.tor.settings.DNSPort instead.")
+    (mkRemovedOptionModule [ "services" "tor" "client" "dns" "listenAddress" ] "Use services.tor.settings.DNSPort instead.")
+    (mkRemovedOptionModule [ "services" "tor" "client" "privoxy" "enable" ] "Use services.privoxy.enable and services.privoxy.enableTor instead.")
+    (mkRemovedOptionModule [ "services" "tor" "client" "socksIsolationOptions" ] "Use services.tor.settings.SOCKSPort instead.")
+    (mkRemovedOptionModule [ "services" "tor" "client" "socksListenAddressFaster" ] "Use services.tor.settings.SOCKSPort instead.")
+    (mkRenamedOptionModule [ "services" "tor" "client" "socksPolicy" ] [ "services" "tor" "settings" "SocksPolicy" ])
+    (mkRemovedOptionModule [ "services" "tor" "client" "transparentProxy" "isolationOptions" ] "Use services.tor.settings.TransPort instead.")
+    (mkRemovedOptionModule [ "services" "tor" "client" "transparentProxy" "listenAddress" ] "Use services.tor.settings.TransPort instead.")
+    (mkRenamedOptionModule [ "services" "tor" "controlPort" ] [ "services" "tor" "settings" "ControlPort" ])
+    (mkRemovedOptionModule [ "services" "tor" "extraConfig" ] "Please use services.tor.settings instead.")
+    (mkRenamedOptionModule [ "services" "tor" "hiddenServices" ] [ "services" "tor" "relay" "onionServices" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "accountingMax" ] [ "services" "tor" "settings" "AccountingMax" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "accountingStart" ] [ "services" "tor" "settings" "AccountingStart" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "address" ] [ "services" "tor" "settings" "Address" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "bandwidthBurst" ] [ "services" "tor" "settings" "BandwidthBurst" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "bandwidthRate" ] [ "services" "tor" "settings" "BandwidthRate" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "bridgeTransports" ] [ "services" "tor" "settings" "ServerTransportPlugin" "transports" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "contactInfo" ] [ "services" "tor" "settings" "ContactInfo" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "exitPolicy" ] [ "services" "tor" "settings" "ExitPolicy" ])
+    (mkRemovedOptionModule [ "services" "tor" "relay" "isBridge" ] "Use services.tor.relay.role instead.")
+    (mkRemovedOptionModule [ "services" "tor" "relay" "isExit" ] "Use services.tor.relay.role instead.")
+    (mkRenamedOptionModule [ "services" "tor" "relay" "nickname" ] [ "services" "tor" "settings" "Nickname" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "port" ] [ "services" "tor" "settings" "ORPort" ])
+    (mkRenamedOptionModule [ "services" "tor" "relay" "portSpec" ] [ "services" "tor" "settings" "ORPort" ])
+  ];
+
+  options = {
+    services.tor = {
+      enable = mkEnableOption (lib.mdDoc ''Tor daemon.
+        By default, the daemon is run without
+        relay, exit, bridge or client connectivity'');
+
+      openFirewall = mkEnableOption (lib.mdDoc "opening of the relay port(s) in the firewall");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.tor;
+        defaultText = literalExpression "pkgs.tor";
+        description = lib.mdDoc "Tor package to use.";
+      };
+
+      enableGeoIP = mkEnableOption (lib.mdDoc ''use of GeoIP databases.
+        Disabling this will disable by-country statistics for bridges and relays
+        and some client and third-party software functionality'') // { default = true; };
+
+      controlSocket.enable = mkEnableOption (lib.mdDoc ''control socket,
+        created in `${runDir}/control`'');
+
+      client = {
+        enable = mkEnableOption (lib.mdDoc ''the routing of application connections.
+          You might want to disable this if you plan running a dedicated Tor relay'');
+
+        transparentProxy.enable = mkEnableOption (lib.mdDoc "transparent proxy");
+        dns.enable = mkEnableOption (lib.mdDoc "DNS resolver");
+
+        socksListenAddress = mkOption {
+          type = optionSOCKSPort false;
+          default = {addr = "127.0.0.1"; port = 9050; IsolateDestAddr = true;};
+          example = {addr = "192.168.0.1"; port = 9090; IsolateDestAddr = true;};
+          description = lib.mdDoc ''
+            Bind to this address to listen for connections from
+            Socks-speaking applications.
+          '';
+        };
+
+        onionServices = mkOption {
+          description = lib.mdDoc (descriptionGeneric "HiddenServiceDir");
+          default = {};
+          example = {
+            "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" = {
+              clientAuthorizations = ["/run/keys/tor/alice.prv.x25519"];
+            };
+          };
+          type = types.attrsOf (types.submodule ({name, config, ...}: {
+            options.clientAuthorizations = mkOption {
+              description = lib.mdDoc ''
+                Clients' authorizations for a v3 onion service,
+                as a list of files containing each one private key, in the format:
+                ```
+                descriptor:x25519:<base32-private-key>
+                ```
+                ${descriptionGeneric "_client_authorization"}
+              '';
+              type = with types; listOf path;
+              default = [];
+              example = ["/run/keys/tor/alice.prv.x25519"];
+            };
+          }));
+        };
+      };
+
+      relay = {
+        enable = mkEnableOption (lib.mdDoc "tor relaying") // {
+          description = lib.mdDoc ''
+            Whether to enable relaying of Tor traffic for others.
+
+            See <https://www.torproject.org/docs/tor-doc-relay>
+            for details.
+
+            Setting this to true requires setting
+            {option}`services.tor.relay.role`
+            and
+            {option}`services.tor.settings.ORPort`
+            options.
+          '';
+        };
+
+        role = mkOption {
+          type = types.enum [ "exit" "relay" "bridge" "private-bridge" ];
+          description = lib.mdDoc ''
+            Your role in Tor network. There're several options:
+
+            - `exit`:
+              An exit relay. This allows Tor users to access regular
+              Internet services through your public IP.
+
+              You can specify which services Tor users may access via
+              your exit relay using {option}`settings.ExitPolicy` option.
+
+            - `relay`:
+              Regular relay. This allows Tor users to relay onion
+              traffic to other Tor nodes, but not to public
+              Internet.
+
+              See
+              <https://www.torproject.org/docs/tor-doc-relay.html.en>
+              for more info.
+
+            - `bridge`:
+              Regular bridge. Works like a regular relay, but
+              doesn't list you in the public relay directory and
+              hides your Tor node behind obfs4proxy.
+
+              Using this option will make Tor advertise your bridge
+              to users through various mechanisms like
+              <https://bridges.torproject.org/>, though.
+
+              See <https://www.torproject.org/docs/bridges.html.en>
+              for more info.
+
+            - `private-bridge`:
+              Private bridge. Works like regular bridge, but does
+              not advertise your node in any way.
+
+              Using this role means that you won't contribute to Tor
+              network in any way unless you advertise your node
+              yourself in some way.
+
+              Use this if you want to run a private bridge, for
+              example because you'll give out your bridge addr
+              manually to your friends.
+
+              Switching to this role after measurable time in
+              "bridge" role is pretty useless as some Tor users
+              would have learned about your node already. In the
+              latter case you can still change
+              {option}`port` option.
+
+              See <https://www.torproject.org/docs/bridges.html.en>
+              for more info.
+
+            ::: {.important}
+            Running an exit relay may expose you to abuse
+            complaints. See
+            <https://www.torproject.org/faq.html.en#ExitPolicies>
+            for more info.
+            :::
+
+            ::: {.important}
+            Note that some misconfigured and/or disrespectful
+            towards privacy sites will block you even if your
+            relay is not an exit relay. That is, just being listed
+            in a public relay directory can have unwanted
+            consequences.
+
+            Which means you might not want to use
+            this role if you browse public Internet from the same
+            network as your relay, unless you want to write
+            e-mails to those sites (you should!).
+            :::
+
+            ::: {.important}
+            WARNING: THE FOLLOWING PARAGRAPH IS NOT LEGAL ADVICE.
+            Consult with your lawyer when in doubt.
+
+            The `bridge` role should be safe to use in most situations
+            (unless the act of forwarding traffic for others is
+            a punishable offence under your local laws, which
+            would be pretty insane as it would make ISP illegal).
+            :::
+          '';
+        };
+
+        onionServices = mkOption {
+          description = lib.mdDoc (descriptionGeneric "HiddenServiceDir");
+          default = {};
+          example = {
+            "example.org/www" = {
+              map = [ 80 ];
+              authorizedClients = [
+                "descriptor:x25519:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
+              ];
+            };
+          };
+          type = types.attrsOf (types.submodule ({name, config, ...}: {
+            options.path = mkOption {
+              type = types.path;
+              description = lib.mdDoc ''
+                Path where to store the data files of the hidden service.
+                If the {option}`secretKey` is null
+                this defaults to `${stateDir}/onion/$onion`,
+                otherwise to `${runDir}/onion/$onion`.
+              '';
+            };
+            options.secretKey = mkOption {
+              type = with types; nullOr path;
+              default = null;
+              example = "/run/keys/tor/onion/expyuzz4wqqyqhjn/hs_ed25519_secret_key";
+              description = lib.mdDoc ''
+                Secret key of the onion service.
+                If null, Tor reuses any preexisting secret key (in {option}`path`)
+                or generates a new one.
+                The associated public key and hostname are deterministically regenerated
+                from this file if they do not exist.
+              '';
+            };
+            options.authorizeClient = mkOption {
+              description = lib.mdDoc (descriptionGeneric "HiddenServiceAuthorizeClient");
+              default = null;
+              type = types.nullOr (types.submodule ({...}: {
+                options = {
+                  authType = mkOption {
+                    type = types.enum [ "basic" "stealth" ];
+                    description = lib.mdDoc ''
+                      Either `"basic"` for a general-purpose authorization protocol
+                      or `"stealth"` for a less scalable protocol
+                      that also hides service activity from unauthorized clients.
+                    '';
+                  };
+                  clientNames = mkOption {
+                    type = with types; nonEmptyListOf (strMatching "[A-Za-z0-9+-_]+");
+                    description = lib.mdDoc ''
+                      Only clients that are listed here are authorized to access the hidden service.
+                      Generated authorization data can be found in {file}`${stateDir}/onion/$name/hostname`.
+                      Clients need to put this authorization data in their configuration file using
+                      [](#opt-services.tor.settings.HidServAuth).
+                    '';
+                  };
+                };
+              }));
+            };
+            options.authorizedClients = mkOption {
+              description = lib.mdDoc ''
+                Authorized clients for a v3 onion service,
+                as a list of public key, in the format:
+                ```
+                descriptor:x25519:<base32-public-key>
+                ```
+                ${descriptionGeneric "_client_authorization"}
+              '';
+              type = with types; listOf str;
+              default = [];
+              example = ["descriptor:x25519:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"];
+            };
+            options.map = mkOption {
+              description = lib.mdDoc (descriptionGeneric "HiddenServicePort");
+              type = with types; listOf (oneOf [
+                port (submodule ({...}: {
+                  options = {
+                    port = optionPort;
+                    target = mkOption {
+                      default = null;
+                      type = nullOr (submodule ({...}: {
+                        options = {
+                          unix = optionUnix;
+                          addr = optionAddress;
+                          port = optionPort;
+                        };
+                      }));
+                    };
+                  };
+                }))
+              ]);
+              apply = map (v: if isInt v then {port=v; target=null;} else v);
+            };
+            options.version = mkOption {
+              description = lib.mdDoc (descriptionGeneric "HiddenServiceVersion");
+              type = with types; nullOr (enum [2 3]);
+              default = null;
+            };
+            options.settings = mkOption {
+              description = lib.mdDoc ''
+                Settings of the onion service.
+                ${descriptionGeneric "_hidden_service_options"}
+              '';
+              default = {};
+              type = types.submodule {
+                freeformType = with types;
+                  (attrsOf (nullOr (oneOf [str int bool (listOf str)]))) // {
+                    description = "settings option";
+                  };
+                options.HiddenServiceAllowUnknownPorts = optionBool "HiddenServiceAllowUnknownPorts";
+                options.HiddenServiceDirGroupReadable = optionBool "HiddenServiceDirGroupReadable";
+                options.HiddenServiceExportCircuitID = mkOption {
+                  description = lib.mdDoc (descriptionGeneric "HiddenServiceExportCircuitID");
+                  type = with types; nullOr (enum ["haproxy"]);
+                  default = null;
+                };
+                options.HiddenServiceMaxStreams = mkOption {
+                  description = lib.mdDoc (descriptionGeneric "HiddenServiceMaxStreams");
+                  type = with types; nullOr (ints.between 0 65535);
+                  default = null;
+                };
+                options.HiddenServiceMaxStreamsCloseCircuit = optionBool "HiddenServiceMaxStreamsCloseCircuit";
+                options.HiddenServiceNumIntroductionPoints = mkOption {
+                  description = lib.mdDoc (descriptionGeneric "HiddenServiceNumIntroductionPoints");
+                  type = with types; nullOr (ints.between 0 20);
+                  default = null;
+                };
+                options.HiddenServiceSingleHopMode = optionBool "HiddenServiceSingleHopMode";
+                options.RendPostPeriod = optionString "RendPostPeriod";
+              };
+            };
+            config = {
+              path = mkDefault ((if config.secretKey == null then stateDir else runDir) + "/onion/${name}");
+              settings.HiddenServiceVersion = config.version;
+              settings.HiddenServiceAuthorizeClient =
+                if config.authorizeClient != null then
+                  config.authorizeClient.authType + " " +
+                  concatStringsSep "," config.authorizeClient.clientNames
+                else null;
+              settings.HiddenServicePort = map (p: mkValueString "" p.port + " " + mkValueString "" p.target) config.map;
+            };
+          }));
+        };
+      };
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          See [torrc manual](https://2019.www.torproject.org/docs/tor-manual.html.en)
+          for documentation.
+        '';
+        default = {};
+        type = types.submodule {
+          freeformType = with types;
+            (attrsOf (nullOr (oneOf [str int bool (listOf str)]))) // {
+              description = "settings option";
+            };
+          options.Address = optionString "Address";
+          options.AssumeReachable = optionBool "AssumeReachable";
+          options.AccountingMax = optionBandwidth "AccountingMax";
+          options.AccountingStart = optionString "AccountingStart";
+          options.AuthDirHasIPv6Connectivity = optionBool "AuthDirHasIPv6Connectivity";
+          options.AuthDirListBadExits = optionBool "AuthDirListBadExits";
+          options.AuthDirPinKeys = optionBool "AuthDirPinKeys";
+          options.AuthDirSharedRandomness = optionBool "AuthDirSharedRandomness";
+          options.AuthDirTestEd25519LinkKeys = optionBool "AuthDirTestEd25519LinkKeys";
+          options.AuthoritativeDirectory = optionBool "AuthoritativeDirectory";
+          options.AutomapHostsOnResolve = optionBool "AutomapHostsOnResolve";
+          options.AutomapHostsSuffixes = optionStrings "AutomapHostsSuffixes" // {
+            default = [".onion" ".exit"];
+            example = [".onion"];
+          };
+          options.BandwidthBurst = optionBandwidth "BandwidthBurst";
+          options.BandwidthRate = optionBandwidth "BandwidthRate";
+          options.BridgeAuthoritativeDir = optionBool "BridgeAuthoritativeDir";
+          options.BridgeRecordUsageByCountry = optionBool "BridgeRecordUsageByCountry";
+          options.BridgeRelay = optionBool "BridgeRelay" // { default = false; };
+          options.CacheDirectory = optionPath "CacheDirectory";
+          options.CacheDirectoryGroupReadable = optionBool "CacheDirectoryGroupReadable"; # default is null and like "auto"
+          options.CellStatistics = optionBool "CellStatistics";
+          options.ClientAutoIPv6ORPort = optionBool "ClientAutoIPv6ORPort";
+          options.ClientDNSRejectInternalAddresses = optionBool "ClientDNSRejectInternalAddresses";
+          options.ClientOnionAuthDir = mkOption {
+            description = lib.mdDoc (descriptionGeneric "ClientOnionAuthDir");
+            default = null;
+            type = with types; nullOr path;
+          };
+          options.ClientPreferIPv6DirPort = optionBool "ClientPreferIPv6DirPort"; # default is null and like "auto"
+          options.ClientPreferIPv6ORPort = optionBool "ClientPreferIPv6ORPort"; # default is null and like "auto"
+          options.ClientRejectInternalAddresses = optionBool "ClientRejectInternalAddresses";
+          options.ClientUseIPv4 = optionBool "ClientUseIPv4";
+          options.ClientUseIPv6 = optionBool "ClientUseIPv6";
+          options.ConnDirectionStatistics = optionBool "ConnDirectionStatistics";
+          options.ConstrainedSockets = optionBool "ConstrainedSockets";
+          options.ContactInfo = optionString "ContactInfo";
+          options.ControlPort = mkOption rec {
+            description = lib.mdDoc (descriptionGeneric "ControlPort");
+            default = [];
+            example = [{port = 9051;}];
+            type = with types; oneOf [port (enum ["auto"]) (listOf (oneOf [
+              port (enum ["auto"]) (submodule ({config, ...}: let
+                flags = ["GroupWritable" "RelaxDirModeCheck" "WorldWritable"];
+                in {
+                options = {
+                  unix = optionUnix;
+                  flags = optionFlags;
+                  addr = optionAddress;
+                  port = optionPort;
+                } // genAttrs flags (name: mkOption { type = types.bool; default = false; });
+                config = {
+                  flags = filter (name: config.${name} == true) flags;
+                };
+              }))
+            ]))];
+          };
+          options.ControlPortFileGroupReadable= optionBool "ControlPortFileGroupReadable";
+          options.ControlPortWriteToFile = optionPath "ControlPortWriteToFile";
+          options.ControlSocket = optionPath "ControlSocket";
+          options.ControlSocketsGroupWritable = optionBool "ControlSocketsGroupWritable";
+          options.CookieAuthFile = optionPath "CookieAuthFile";
+          options.CookieAuthFileGroupReadable = optionBool "CookieAuthFileGroupReadable";
+          options.CookieAuthentication = optionBool "CookieAuthentication";
+          options.DataDirectory = optionPath "DataDirectory" // { default = stateDir; };
+          options.DataDirectoryGroupReadable = optionBool "DataDirectoryGroupReadable";
+          options.DirPortFrontPage = optionPath "DirPortFrontPage";
+          options.DirAllowPrivateAddresses = optionBool "DirAllowPrivateAddresses";
+          options.DormantCanceledByStartup = optionBool "DormantCanceledByStartup";
+          options.DormantOnFirstStartup = optionBool "DormantOnFirstStartup";
+          options.DormantTimeoutDisabledByIdleStreams = optionBool "DormantTimeoutDisabledByIdleStreams";
+          options.DirCache = optionBool "DirCache";
+          options.DirPolicy = mkOption {
+            description = lib.mdDoc (descriptionGeneric "DirPolicy");
+            type = with types; listOf str;
+            default = [];
+            example = ["accept *:*"];
+          };
+          options.DirPort = optionORPort "DirPort";
+          options.DirReqStatistics = optionBool "DirReqStatistics";
+          options.DisableAllSwap = optionBool "DisableAllSwap";
+          options.DisableDebuggerAttachment = optionBool "DisableDebuggerAttachment";
+          options.DisableNetwork = optionBool "DisableNetwork";
+          options.DisableOOSCheck = optionBool "DisableOOSCheck";
+          options.DNSPort = optionIsolablePorts "DNSPort";
+          options.DoSCircuitCreationEnabled = optionBool "DoSCircuitCreationEnabled";
+          options.DoSConnectionEnabled = optionBool "DoSConnectionEnabled"; # default is null and like "auto"
+          options.DoSRefuseSingleHopClientRendezvous = optionBool "DoSRefuseSingleHopClientRendezvous";
+          options.DownloadExtraInfo = optionBool "DownloadExtraInfo";
+          options.EnforceDistinctSubnets = optionBool "EnforceDistinctSubnets";
+          options.EntryStatistics = optionBool "EntryStatistics";
+          options.ExitPolicy = optionStrings "ExitPolicy" // {
+            default = ["reject *:*"];
+            example = ["accept *:*"];
+          };
+          options.ExitPolicyRejectLocalInterfaces = optionBool "ExitPolicyRejectLocalInterfaces";
+          options.ExitPolicyRejectPrivate = optionBool "ExitPolicyRejectPrivate";
+          options.ExitPortStatistics = optionBool "ExitPortStatistics";
+          options.ExitRelay = optionBool "ExitRelay"; # default is null and like "auto"
+          options.ExtORPort = mkOption {
+            description = lib.mdDoc (descriptionGeneric "ExtORPort");
+            default = null;
+            type = with types; nullOr (oneOf [
+              port (enum ["auto"]) (submodule ({...}: {
+                options = {
+                  addr = optionAddress;
+                  port = optionPort;
+                };
+              }))
+            ]);
+            apply = p: if isInt p || isString p then { port = p; } else p;
+          };
+          options.ExtORPortCookieAuthFile = optionPath "ExtORPortCookieAuthFile";
+          options.ExtORPortCookieAuthFileGroupReadable = optionBool "ExtORPortCookieAuthFileGroupReadable";
+          options.ExtendAllowPrivateAddresses = optionBool "ExtendAllowPrivateAddresses";
+          options.ExtraInfoStatistics = optionBool "ExtraInfoStatistics";
+          options.FascistFirewall = optionBool "FascistFirewall";
+          options.FetchDirInfoEarly = optionBool "FetchDirInfoEarly";
+          options.FetchDirInfoExtraEarly = optionBool "FetchDirInfoExtraEarly";
+          options.FetchHidServDescriptors = optionBool "FetchHidServDescriptors";
+          options.FetchServerDescriptors = optionBool "FetchServerDescriptors";
+          options.FetchUselessDescriptors = optionBool "FetchUselessDescriptors";
+          options.ReachableAddresses = optionStrings "ReachableAddresses";
+          options.ReachableDirAddresses = optionStrings "ReachableDirAddresses";
+          options.ReachableORAddresses = optionStrings "ReachableORAddresses";
+          options.GeoIPFile = optionPath "GeoIPFile";
+          options.GeoIPv6File = optionPath "GeoIPv6File";
+          options.GuardfractionFile = optionPath "GuardfractionFile";
+          options.HidServAuth = mkOption {
+            description = lib.mdDoc (descriptionGeneric "HidServAuth");
+            default = [];
+            type = with types; listOf (oneOf [
+              (submodule {
+                options = {
+                  onion = mkOption {
+                    type = strMatching "[a-z2-7]{16}\\.onion";
+                    description = lib.mdDoc "Onion address.";
+                    example = "xxxxxxxxxxxxxxxx.onion";
+                  };
+                  auth = mkOption {
+                    type = strMatching "[A-Za-z0-9+/]{22}";
+                    description = lib.mdDoc "Authentication cookie.";
+                  };
+                };
+              })
+            ]);
+            example = [
+              {
+                onion = "xxxxxxxxxxxxxxxx.onion";
+                auth = "xxxxxxxxxxxxxxxxxxxxxx";
+              }
+            ];
+          };
+          options.HiddenServiceNonAnonymousMode = optionBool "HiddenServiceNonAnonymousMode";
+          options.HiddenServiceStatistics = optionBool "HiddenServiceStatistics";
+          options.HSLayer2Nodes = optionStrings "HSLayer2Nodes";
+          options.HSLayer3Nodes = optionStrings "HSLayer3Nodes";
+          options.HTTPTunnelPort = optionIsolablePorts "HTTPTunnelPort";
+          options.IPv6Exit = optionBool "IPv6Exit";
+          options.KeyDirectory = optionPath "KeyDirectory";
+          options.KeyDirectoryGroupReadable = optionBool "KeyDirectoryGroupReadable";
+          options.LogMessageDomains = optionBool "LogMessageDomains";
+          options.LongLivedPorts = optionPorts "LongLivedPorts";
+          options.MainloopStats = optionBool "MainloopStats";
+          options.MaxAdvertisedBandwidth = optionBandwidth "MaxAdvertisedBandwidth";
+          options.MaxCircuitDirtiness = optionInt "MaxCircuitDirtiness";
+          options.MaxClientCircuitsPending = optionInt "MaxClientCircuitsPending";
+          options.NATDPort = optionIsolablePorts "NATDPort";
+          options.NewCircuitPeriod = optionInt "NewCircuitPeriod";
+          options.Nickname = optionString "Nickname";
+          options.ORPort = optionORPort "ORPort";
+          options.OfflineMasterKey = optionBool "OfflineMasterKey";
+          options.OptimisticData = optionBool "OptimisticData"; # default is null and like "auto"
+          options.PaddingStatistics = optionBool "PaddingStatistics";
+          options.PerConnBWBurst = optionBandwidth "PerConnBWBurst";
+          options.PerConnBWRate = optionBandwidth "PerConnBWRate";
+          options.PidFile = optionPath "PidFile";
+          options.ProtocolWarnings = optionBool "ProtocolWarnings";
+          options.PublishHidServDescriptors = optionBool "PublishHidServDescriptors";
+          options.PublishServerDescriptor = mkOption {
+            description = lib.mdDoc (descriptionGeneric "PublishServerDescriptor");
+            type = with types; nullOr (enum [false true 0 1 "0" "1" "v3" "bridge"]);
+            default = null;
+          };
+          options.ReducedExitPolicy = optionBool "ReducedExitPolicy";
+          options.RefuseUnknownExits = optionBool "RefuseUnknownExits"; # default is null and like "auto"
+          options.RejectPlaintextPorts = optionPorts "RejectPlaintextPorts";
+          options.RelayBandwidthBurst = optionBandwidth "RelayBandwidthBurst";
+          options.RelayBandwidthRate = optionBandwidth "RelayBandwidthRate";
+          #options.RunAsDaemon
+          options.Sandbox = optionBool "Sandbox";
+          options.ServerDNSAllowBrokenConfig = optionBool "ServerDNSAllowBrokenConfig";
+          options.ServerDNSAllowNonRFC953Hostnames = optionBool "ServerDNSAllowNonRFC953Hostnames";
+          options.ServerDNSDetectHijacking = optionBool "ServerDNSDetectHijacking";
+          options.ServerDNSRandomizeCase = optionBool "ServerDNSRandomizeCase";
+          options.ServerDNSResolvConfFile = optionPath "ServerDNSResolvConfFile";
+          options.ServerDNSSearchDomains = optionBool "ServerDNSSearchDomains";
+          options.ServerTransportPlugin = mkOption {
+            description = lib.mdDoc (descriptionGeneric "ServerTransportPlugin");
+            default = null;
+            type = with types; nullOr (submodule ({...}: {
+              options = {
+                transports = mkOption {
+                  description = lib.mdDoc "List of pluggable transports.";
+                  type = listOf str;
+                  example = ["obfs2" "obfs3" "obfs4" "scramblesuit"];
+                };
+                exec = mkOption {
+                  type = types.str;
+                  description = lib.mdDoc "Command of pluggable transport.";
+                };
+              };
+            }));
+          };
+          options.ShutdownWaitLength = mkOption {
+            type = types.int;
+            default = 30;
+            description = lib.mdDoc (descriptionGeneric "ShutdownWaitLength");
+          };
+          options.SocksPolicy = optionStrings "SocksPolicy" // {
+            example = ["accept *:*"];
+          };
+          options.SOCKSPort = mkOption {
+            description = lib.mdDoc (descriptionGeneric "SOCKSPort");
+            default = lib.optionals cfg.settings.HiddenServiceNonAnonymousMode [{port = 0;}];
+            defaultText = literalExpression ''
+              if config.${opt.settings}.HiddenServiceNonAnonymousMode == true
+              then [ { port = 0; } ]
+              else [ ]
+            '';
+            example = [{port = 9090;}];
+            type = types.listOf (optionSOCKSPort true);
+          };
+          options.TestingTorNetwork = optionBool "TestingTorNetwork";
+          options.TransPort = optionIsolablePorts "TransPort";
+          options.TransProxyType = mkOption {
+            description = lib.mdDoc (descriptionGeneric "TransProxyType");
+            type = with types; nullOr (enum ["default" "TPROXY" "ipfw" "pf-divert"]);
+            default = null;
+          };
+          #options.TruncateLogFile
+          options.UnixSocksGroupWritable = optionBool "UnixSocksGroupWritable";
+          options.UseDefaultFallbackDirs = optionBool "UseDefaultFallbackDirs";
+          options.UseMicrodescriptors = optionBool "UseMicrodescriptors";
+          options.V3AuthUseLegacyKey = optionBool "V3AuthUseLegacyKey";
+          options.V3AuthoritativeDirectory = optionBool "V3AuthoritativeDirectory";
+          options.VersioningAuthoritativeDirectory = optionBool "VersioningAuthoritativeDirectory";
+          options.VirtualAddrNetworkIPv4 = optionString "VirtualAddrNetworkIPv4";
+          options.VirtualAddrNetworkIPv6 = optionString "VirtualAddrNetworkIPv6";
+          options.WarnPlaintextPorts = optionPorts "WarnPlaintextPorts";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Not sure if `cfg.relay.role == "private-bridge"` helps as tor
+    # sends a lot of stats
+    warnings = optional (cfg.settings.BridgeRelay &&
+      flatten (mapAttrsToList (n: o: o.map) cfg.relay.onionServices) != [])
+      ''
+        Running Tor hidden services on a public relay makes the
+        presence of hidden services visible through simple statistical
+        analysis of publicly available data.
+        See https://trac.torproject.org/projects/tor/ticket/8742
+
+        You can safely ignore this warning if you don't intend to
+        actually hide your hidden services. In either case, you can
+        always create a container/VM with a separate Tor daemon instance.
+      '' ++
+      flatten (mapAttrsToList (n: o:
+        optionals (o.settings.HiddenServiceVersion == 2) [
+          (optional (o.settings.HiddenServiceExportCircuitID != null) ''
+            HiddenServiceExportCircuitID is used in the HiddenService: ${n}
+            but this option is only for v3 hidden services.
+          '')
+        ] ++
+        optionals (o.settings.HiddenServiceVersion != 2) [
+          (optional (o.settings.HiddenServiceAuthorizeClient != null) ''
+            HiddenServiceAuthorizeClient is used in the HiddenService: ${n}
+            but this option is only for v2 hidden services.
+          '')
+          (optional (o.settings.RendPostPeriod != null) ''
+            RendPostPeriod is used in the HiddenService: ${n}
+            but this option is only for v2 hidden services.
+          '')
+        ]
+      ) cfg.relay.onionServices);
+
+    users.groups.tor.gid = config.ids.gids.tor;
+    users.users.tor =
+      { description = "Tor Daemon User";
+        createHome  = true;
+        home        = stateDir;
+        group       = "tor";
+        uid         = config.ids.uids.tor;
+      };
+
+    services.tor.settings = mkMerge [
+      (mkIf cfg.enableGeoIP {
+        GeoIPFile = "${cfg.package.geoip}/share/tor/geoip";
+        GeoIPv6File = "${cfg.package.geoip}/share/tor/geoip6";
+      })
+      (mkIf cfg.controlSocket.enable {
+        ControlPort = [ { unix = runDir + "/control"; GroupWritable=true; RelaxDirModeCheck=true; } ];
+      })
+      (mkIf cfg.relay.enable (
+        optionalAttrs (cfg.relay.role != "exit") {
+          ExitPolicy = mkForce ["reject *:*"];
+        } //
+        optionalAttrs (elem cfg.relay.role ["bridge" "private-bridge"]) {
+          BridgeRelay = true;
+          ExtORPort.port = mkDefault "auto";
+          ServerTransportPlugin.transports = mkDefault ["obfs4"];
+          ServerTransportPlugin.exec = mkDefault "${pkgs.obfs4}/bin/obfs4proxy managed";
+        } // optionalAttrs (cfg.relay.role == "private-bridge") {
+          ExtraInfoStatistics = false;
+          PublishServerDescriptor = false;
+        }
+      ))
+      (mkIf (!cfg.relay.enable) {
+        # Avoid surprises when leaving ORPort/DirPort configurations in cfg.settings,
+        # because it would still enable Tor as a relay,
+        # which can trigger all sort of problems when not carefully done,
+        # like the blocklisting of the machine's IP addresses
+        # by some hosting providers...
+        DirPort = mkForce [];
+        ORPort = mkForce [];
+        PublishServerDescriptor = mkForce false;
+      })
+      (mkIf (!cfg.client.enable) {
+        # Make sure application connections via SOCKS are disabled
+        # when services.tor.client.enable is false
+        SOCKSPort = mkForce [ 0 ];
+      })
+      (mkIf cfg.client.enable (
+        { SOCKSPort = [ cfg.client.socksListenAddress ];
+        } // optionalAttrs cfg.client.transparentProxy.enable {
+          TransPort = [{ addr = "127.0.0.1"; port = 9040; }];
+        } // optionalAttrs cfg.client.dns.enable {
+          DNSPort = [{ addr = "127.0.0.1"; port = 9053; }];
+          AutomapHostsOnResolve = true;
+        } // optionalAttrs (flatten (mapAttrsToList (n: o: o.clientAuthorizations) cfg.client.onionServices) != []) {
+          ClientOnionAuthDir = runDir + "/ClientOnionAuthDir";
+        }
+      ))
+    ];
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts =
+        concatMap (o:
+          if isInt o && o > 0 then [o]
+          else optionals (o ? "port" && isInt o.port && o.port > 0) [o.port]
+        ) (flatten [
+          cfg.settings.ORPort
+          cfg.settings.DirPort
+        ]);
+    };
+
+    systemd.services.tor = {
+      description = "Tor Daemon";
+      path = [ pkgs.tor ];
+
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+      restartTriggers = [ torrc ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = "tor";
+        Group = "tor";
+        ExecStartPre = [
+          "${cfg.package}/bin/tor -f ${torrc} --verify-config"
+          # DOC: Appendix G of https://spec.torproject.org/rend-spec-v3
+          ("+" + pkgs.writeShellScript "ExecStartPre" (concatStringsSep "\n" (flatten (["set -eu"] ++
+            mapAttrsToList (name: onion:
+              optional (onion.authorizedClients != []) ''
+                rm -rf ${escapeShellArg onion.path}/authorized_clients
+                install -d -o tor -g tor -m 0700 ${escapeShellArg onion.path} ${escapeShellArg onion.path}/authorized_clients
+              '' ++
+              imap0 (i: pubKey: ''
+                echo ${pubKey} |
+                install -o tor -g tor -m 0400 /dev/stdin ${escapeShellArg onion.path}/authorized_clients/${toString i}.auth
+              '') onion.authorizedClients ++
+              optional (onion.secretKey != null) ''
+                install -d -o tor -g tor -m 0700 ${escapeShellArg onion.path}
+                key="$(cut -f1 -d: ${escapeShellArg onion.secretKey} | head -1)"
+                case "$key" in
+                 ("== ed25519v"*"-secret")
+                  install -o tor -g tor -m 0400 ${escapeShellArg onion.secretKey} ${escapeShellArg onion.path}/hs_ed25519_secret_key;;
+                 (*) echo >&2 "NixOS does not (yet) support secret key type for onion: ${name}"; exit 1;;
+                esac
+              ''
+            ) cfg.relay.onionServices ++
+            mapAttrsToList (name: onion: imap0 (i: prvKeyPath:
+              let hostname = removeSuffix ".onion" name; in ''
+              printf "%s:" ${escapeShellArg hostname} | cat - ${escapeShellArg prvKeyPath} |
+              install -o tor -g tor -m 0700 /dev/stdin \
+               ${runDir}/ClientOnionAuthDir/${escapeShellArg hostname}.${toString i}.auth_private
+            '') onion.clientAuthorizations)
+            cfg.client.onionServices
+          ))))
+        ];
+        ExecStart = "${cfg.package}/bin/tor -f ${torrc}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        KillSignal = "SIGINT";
+        TimeoutSec = cfg.settings.ShutdownWaitLength + 30; # Wait a bit longer than ShutdownWaitLength before actually timing out
+        Restart = "on-failure";
+        LimitNOFILE = 32768;
+        RuntimeDirectory = [
+          # g+x allows access to the control socket
+          "tor"
+          "tor/root"
+          # g+x can't be removed in ExecStart=, but will be removed by Tor
+          "tor/ClientOnionAuthDir"
+        ];
+        RuntimeDirectoryMode = "0710";
+        StateDirectoryMode = "0700";
+        StateDirectory = [
+            "tor"
+            "tor/onion"
+          ] ++
+          flatten (mapAttrsToList (name: onion:
+            optional (onion.secretKey == null) "tor/onion/${name}"
+          ) cfg.relay.onionServices);
+        # The following options are only to optimize:
+        # systemd-analyze security tor
+        RootDirectory = runDir + "/root";
+        RootDirectoryStartOnly = true;
+        #InaccessiblePaths = [ "-+${runDir}/root" ];
+        UMask = "0066";
+        BindPaths = [ stateDir ];
+        BindReadOnlyPaths = [ storeDir "/etc" ] ++
+          optionals config.services.resolved.enable [
+            "/run/systemd/resolve/stub-resolv.conf"
+            "/run/systemd/resolve/resolv.conf"
+          ];
+        AmbientCapabilities   = [""] ++ lib.optional bindsPrivilegedPort "CAP_NET_BIND_SERVICE";
+        CapabilityBoundingSet = [""] ++ lib.optional bindsPrivilegedPort "CAP_NET_BIND_SERVICE";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateNetwork = mkDefault false;
+        PrivateTmp = true;
+        # Tor cannot currently bind privileged port when PrivateUsers=true,
+        # see https://gitlab.torproject.org/legacy/trac/-/issues/20930
+        PrivateUsers = !bindsPrivilegedPort;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        # See also the finer but experimental option settings.Sandbox
+        SystemCallFilter = [
+          "@system-service"
+          # Groups in @system-service which do not contain a syscall listed by:
+          # perf stat -x, 2>perf.log -e 'syscalls:sys_enter_*' tor
+          # in tests, and seem likely not necessary for tor.
+          "~@aio" "~@chown" "~@keyring" "~@memlock" "~@resources" "~@setuid" "~@timer"
+        ];
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ julm ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/torify.nix b/nixpkgs/nixos/modules/services/security/torify.nix
new file mode 100644
index 000000000000..4d311adebcae
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/torify.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+
+  cfg = config.services.tor;
+
+  torify = pkgs.writeTextFile {
+    name = "tsocks";
+    text = ''
+        #!${pkgs.runtimeShell}
+        TSOCKS_CONF_FILE=${pkgs.writeText "tsocks.conf" cfg.tsocks.config} LD_PRELOAD="${pkgs.tsocks}/lib/libtsocks.so $LD_PRELOAD" "$@"
+    '';
+    executable = true;
+    destination = "/bin/tsocks";
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.tor.tsocks = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to build tsocks wrapper script to relay application traffic via Tor.
+
+          ::: {.important}
+          You shouldn't use this unless you know what you're
+          doing because your installation of Tor already comes with
+          its own superior (doesn't leak DNS queries)
+          `torsocks` wrapper which does pretty much
+          exactly the same thing as this.
+          :::
+        '';
+      };
+
+      server = mkOption {
+        type = types.str;
+        default = "localhost:9050";
+        example = "192.168.0.20";
+        description = lib.mdDoc ''
+          IP address of TOR client to use.
+        '';
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration. Contents will be added verbatim to TSocks
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.tsocks.enable {
+
+    environment.systemPackages = [ torify ];  # expose it to the users
+
+    services.tor.tsocks.config = ''
+      server = ${toString(head (splitString ":" cfg.tsocks.server))}
+      server_port = ${toString(tail (splitString ":" cfg.tsocks.server))}
+
+      local = 127.0.0.0/255.128.0.0
+      local = 127.128.0.0/255.192.0.0
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/torsocks.nix b/nixpkgs/nixos/modules/services/security/torsocks.nix
new file mode 100644
index 000000000000..0647d7eb49bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/torsocks.nix
@@ -0,0 +1,121 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.tor.torsocks;
+  optionalNullStr = b: v: optionalString (b != null) v;
+
+  configFile = server: ''
+    TorAddress ${toString (head (splitString ":" server))}
+    TorPort    ${toString (tail (splitString ":" server))}
+
+    OnionAddrRange ${cfg.onionAddrRange}
+
+    ${optionalNullStr cfg.socks5Username
+        "SOCKS5Username ${cfg.socks5Username}"}
+    ${optionalNullStr cfg.socks5Password
+        "SOCKS5Password ${cfg.socks5Password}"}
+
+    AllowInbound ${if cfg.allowInbound then "1" else "0"}
+  '';
+
+  wrapTorsocks = name: server: pkgs.writeTextFile {
+    name = name;
+    text = ''
+        #!${pkgs.runtimeShell}
+        TORSOCKS_CONF_FILE=${pkgs.writeText "torsocks.conf" (configFile server)} ${pkgs.torsocks}/bin/torsocks "$@"
+    '';
+    executable = true;
+    destination = "/bin/${name}";
+  };
+
+in
+{
+  options = {
+    services.tor.torsocks = {
+      enable = mkOption {
+        type        = types.bool;
+        default     = config.services.tor.enable && config.services.tor.client.enable;
+        defaultText = literalExpression "config.services.tor.enable && config.services.tor.client.enable";
+        description = lib.mdDoc ''
+          Whether to build `/etc/tor/torsocks.conf`
+          containing the specified global torsocks configuration.
+        '';
+      };
+
+      server = mkOption {
+        type    = types.str;
+        default = "127.0.0.1:9050";
+        example = "192.168.0.20:1234";
+        description = lib.mdDoc ''
+          IP/Port of the Tor SOCKS server. Currently, hostnames are
+          NOT supported by torsocks.
+        '';
+      };
+
+      fasterServer = mkOption {
+        type    = types.str;
+        default = "127.0.0.1:9063";
+        example = "192.168.0.20:1234";
+        description = lib.mdDoc ''
+          IP/Port of the Tor SOCKS server for torsocks-faster wrapper suitable for HTTP.
+          Currently, hostnames are NOT supported by torsocks.
+        '';
+      };
+
+      onionAddrRange = mkOption {
+        type    = types.str;
+        default = "127.42.42.0/24";
+        description = lib.mdDoc ''
+          Tor hidden sites do not have real IP addresses. This
+          specifies what range of IP addresses will be handed to the
+          application as "cookies" for .onion names.  Of course, you
+          should pick a block of addresses which you aren't going to
+          ever need to actually connect to. This is similar to the
+          MapAddress feature of the main tor daemon.
+        '';
+      };
+
+      socks5Username = mkOption {
+        type    = types.nullOr types.str;
+        default = null;
+        example = "bob";
+        description = lib.mdDoc ''
+          SOCKS5 username. The `TORSOCKS_USERNAME`
+          environment variable overrides this option if it is set.
+        '';
+      };
+
+      socks5Password = mkOption {
+        type    = types.nullOr types.str;
+        default = null;
+        example = "sekret";
+        description = lib.mdDoc ''
+          SOCKS5 password. The `TORSOCKS_PASSWORD`
+          environment variable overrides this option if it is set.
+        '';
+      };
+
+      allowInbound = mkOption {
+        type    = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Set Torsocks to accept inbound connections. If set to
+          `true`, listen() and accept() will be
+          allowed to be used with non localhost address.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.torsocks (wrapTorsocks "torsocks-faster" cfg.fasterServer) ];
+
+    environment.etc."tor/torsocks.conf" =
+      {
+        source = pkgs.writeText "torsocks.conf" (configFile cfg.server);
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/security/usbguard.nix b/nixpkgs/nixos/modules/services/security/usbguard.nix
new file mode 100644
index 000000000000..071e69975143
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/usbguard.nix
@@ -0,0 +1,265 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.usbguard;
+
+  # valid policy options
+  policy = (types.enum [ "allow" "block" "reject" "keep" "apply-policy" ]);
+
+  # decide what file to use for rules
+  ruleFile = if cfg.rules != null then pkgs.writeText "usbguard-rules" cfg.rules else cfg.ruleFile;
+
+  daemonConf = ''
+    # generated by nixos/modules/services/security/usbguard.nix
+    RuleFile=${ruleFile}
+    ImplicitPolicyTarget=${cfg.implicitPolicyTarget}
+    PresentDevicePolicy=${cfg.presentDevicePolicy}
+    PresentControllerPolicy=${cfg.presentControllerPolicy}
+    InsertedDevicePolicy=${cfg.insertedDevicePolicy}
+    RestoreControllerDeviceState=${boolToString cfg.restoreControllerDeviceState}
+    # this does not seem useful for endusers to change
+    DeviceManagerBackend=uevent
+    IPCAllowedUsers=${concatStringsSep " " cfg.IPCAllowedUsers}
+    IPCAllowedGroups=${concatStringsSep " " cfg.IPCAllowedGroups}
+    IPCAccessControlFiles=/var/lib/usbguard/IPCAccessControl.d/
+    DeviceRulesWithPort=${boolToString cfg.deviceRulesWithPort}
+    # HACK: that way audit logs still land in the journal
+    AuditFilePath=/dev/null
+  '';
+
+  daemonConfFile = pkgs.writeText "usbguard-daemon-conf" daemonConf;
+
+in
+{
+
+  ###### interface
+
+  options = {
+    services.usbguard = {
+      enable = mkEnableOption (lib.mdDoc "USBGuard daemon");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.usbguard;
+        defaultText = literalExpression "pkgs.usbguard";
+        description = lib.mdDoc ''
+          The usbguard package to use. If you do not need the Qt GUI, use
+          `pkgs.usbguard-nox` to save disk space.
+        '';
+      };
+
+      ruleFile = mkOption {
+        type = types.nullOr types.path;
+        default = "/var/lib/usbguard/rules.conf";
+        example = "/run/secrets/usbguard-rules";
+        description = lib.mdDoc ''
+          This tells the USBGuard daemon which file to load as policy rule set.
+
+          The file can be changed manually or via the IPC interface assuming it has the right file permissions.
+
+          For more details see {manpage}`usbguard-rules.conf(5)`.
+        '';
+
+      };
+      rules = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        example = ''
+          allow with-interface equals { 08:*:* }
+        '';
+        description = lib.mdDoc ''
+          The USBGuard daemon will load this as the policy rule set.
+          As these rules are NixOS managed they are immutable and can't
+          be changed by the IPC interface.
+
+          If you do not set this option, the USBGuard daemon will load
+          it's policy rule set from the option configured in `services.usbguard.ruleFile`.
+
+          Running `usbguard generate-policy` as root will
+          generate a config for your currently plugged in devices.
+
+          For more details see {manpage}`usbguard-rules.conf(5)`.
+        '';
+      };
+
+      implicitPolicyTarget = mkOption {
+        type = policy;
+        default = "block";
+        description = lib.mdDoc ''
+          How to treat USB devices that don't match any rule in the policy.
+          Target should be one of allow, block or reject (logically remove the
+          device node from the system).
+        '';
+      };
+
+      presentDevicePolicy = mkOption {
+        type = policy;
+        default = "apply-policy";
+        description = lib.mdDoc ''
+          How to treat USB devices that are already connected when the daemon
+          starts. Policy should be one of allow, block, reject, keep (keep
+          whatever state the device is currently in) or apply-policy (evaluate
+          the rule set for every present device).
+        '';
+      };
+
+      presentControllerPolicy = mkOption {
+        type = policy;
+        default = "keep";
+        description = lib.mdDoc ''
+          How to treat USB controller devices that are already connected when
+          the daemon starts. One of allow, block, reject, keep or apply-policy.
+        '';
+      };
+
+      insertedDevicePolicy = mkOption {
+        type = policy;
+        default = "apply-policy";
+        description = lib.mdDoc ''
+          How to treat USB devices that are already connected after the daemon
+          starts. One of block, reject, apply-policy.
+        '';
+      };
+
+      restoreControllerDeviceState = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          The  USBGuard  daemon  modifies  some attributes of controller
+          devices like the default authorization state of new child device
+          instances. Using this setting, you can control whether the daemon
+          will try to restore the attribute values to the state before
+          modification on shutdown.
+        '';
+      };
+
+      IPCAllowedUsers = mkOption {
+        type = types.listOf types.str;
+        default = [ "root" ];
+        example = [ "root" "yourusername" ];
+        description = lib.mdDoc ''
+          A list of usernames that the daemon will accept IPC connections from.
+        '';
+      };
+
+      IPCAllowedGroups = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "wheel" ];
+        description = lib.mdDoc ''
+          A list of groupnames that the daemon will accept IPC connections
+          from.
+        '';
+      };
+
+      deviceRulesWithPort = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Generate device specific rules including the "via-port" attribute.
+        '';
+      };
+
+      dbus.enable = mkEnableOption (lib.mdDoc "USBGuard dbus daemon");
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services = {
+      usbguard = {
+        description = "USBGuard daemon";
+
+        wantedBy = [ "basic.target" ];
+        wants = [ "systemd-udevd.service" ];
+
+        # make sure an empty rule file exists
+        preStart = ''[ -f "${ruleFile}" ] || touch ${ruleFile}'';
+
+        serviceConfig = {
+          Type = "simple";
+          ExecStart = "${cfg.package}/bin/usbguard-daemon -P -k -c ${daemonConfFile}";
+          Restart = "on-failure";
+
+          StateDirectory = [
+            "usbguard"
+            "usbguard/IPCAccessControl.d"
+          ];
+
+          AmbientCapabilities = "";
+          CapabilityBoundingSet = "CAP_CHOWN CAP_FOWNER";
+          DeviceAllow = "/dev/null rw";
+          DevicePolicy = "strict";
+          IPAddressDeny = "any";
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectKernelModules = true;
+          ProtectSystem = true;
+          ReadOnlyPaths = "-/";
+          ReadWritePaths = "-/dev/shm -/tmp";
+          RestrictAddressFamilies = [ "AF_UNIX" "AF_NETLINK" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = "@system-service";
+          UMask = "0077";
+        };
+      };
+
+      usbguard-dbus = mkIf cfg.dbus.enable {
+        description = "USBGuard D-Bus Service";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "usbguard.service" ];
+
+        serviceConfig = {
+          Type = "dbus";
+          BusName = "org.usbguard1";
+          ExecStart = "${cfg.package}/bin/usbguard-dbus --system";
+          Restart = "on-failure";
+        };
+
+        aliases = [ "dbus-org.usbguard.service" ];
+      };
+    };
+
+    security.polkit.extraConfig =
+      let
+        groupCheck = (lib.concatStrings (map
+          (g: "subject.isInGroup(\"${g}\") || ")
+          cfg.IPCAllowedGroups))
+        + "false";
+      in
+      optionalString cfg.dbus.enable ''
+        polkit.addRule(function(action, subject) {
+            if ((action.id == "org.usbguard.Policy1.listRules" ||
+                 action.id == "org.usbguard.Policy1.appendRule" ||
+                 action.id == "org.usbguard.Policy1.removeRule" ||
+                 action.id == "org.usbguard.Devices1.applyDevicePolicy" ||
+                 action.id == "org.usbguard.Devices1.listDevices" ||
+                 action.id == "org.usbguard1.getParameter" ||
+                 action.id == "org.usbguard1.setParameter") &&
+                subject.active == true && subject.local == true &&
+                (${groupCheck})) {
+                    return polkit.Result.YES;
+            }
+        });
+      '';
+  };
+  imports = [
+    (mkRemovedOptionModule [ "services" "usbguard" "IPCAccessControlFiles" ] "The usbguard module now hardcodes IPCAccessControlFiles to /var/lib/usbguard/IPCAccessControl.d.")
+    (mkRemovedOptionModule [ "services" "usbguard" "auditFilePath" ] "Removed usbguard module audit log files. Audit logs can be found in the systemd journal.")
+    (mkRenamedOptionModule [ "services" "usbguard" "implictPolicyTarget" ] [ "services" "usbguard" "implicitPolicyTarget" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/security/vault-agent.nix b/nixpkgs/nixos/modules/services/security/vault-agent.nix
new file mode 100644
index 000000000000..17b8ff83592e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/vault-agent.nix
@@ -0,0 +1,128 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  format = pkgs.formats.json { };
+  commonOptions = { pkgName, flavour ? pkgName }: mkOption {
+    default = { };
+    description = mdDoc ''
+      Attribute set of ${flavour} instances.
+      Creates independent `${flavour}-''${name}.service` systemd units for each instance defined here.
+    '';
+    type = with types; attrsOf (submodule ({ name, ... }: {
+      options = {
+        enable = mkEnableOption (mdDoc "this ${flavour} instance") // { default = true; };
+
+        package = mkPackageOptionMD pkgs pkgName { };
+
+        user = mkOption {
+          type = types.str;
+          default = "root";
+          description = mdDoc ''
+            User under which this instance runs.
+          '';
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "root";
+          description = mdDoc ''
+            Group under which this instance runs.
+          '';
+        };
+
+        settings = mkOption {
+          type = types.submodule {
+            freeformType = format.type;
+
+            options = {
+              pid_file = mkOption {
+                default = "/run/${flavour}/${name}.pid";
+                type = types.str;
+                description = mdDoc ''
+                  Path to use for the pid file.
+                '';
+              };
+
+              template = mkOption {
+                default = [ ];
+                type = with types; listOf (attrsOf anything);
+                description =
+                  let upstreamDocs =
+                    if flavour == "vault-agent"
+                    then "https://developer.hashicorp.com/vault/docs/agent/template"
+                    else "https://github.com/hashicorp/consul-template/blob/main/docs/configuration.md#templates";
+                  in
+                  mdDoc ''
+                    Template section of ${flavour}.
+                    Refer to <${upstreamDocs}> for supported values.
+                  '';
+              };
+            };
+          };
+
+          default = { };
+
+          description =
+            let upstreamDocs =
+              if flavour == "vault-agent"
+              then "https://developer.hashicorp.com/vault/docs/agent#configuration-file-options"
+              else "https://github.com/hashicorp/consul-template/blob/main/docs/configuration.md#configuration-file";
+            in
+            mdDoc ''
+              Free-form settings written directly to the `config.json` file.
+              Refer to <${upstreamDocs}> for supported values.
+
+              ::: {.note}
+              Resulting format is JSON not HCL.
+              Refer to <https://www.hcl2json.com/> if you are unsure how to convert HCL options to JSON.
+              :::
+            '';
+        };
+      };
+    }));
+  };
+
+  createAgentInstance = { instance, name, flavour }:
+    let
+      configFile = format.generate "${name}.json" instance.settings;
+    in
+    mkIf (instance.enable) {
+      description = "${flavour} daemon - ${name}";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ pkgs.getent ];
+      startLimitIntervalSec = 60;
+      startLimitBurst = 3;
+      serviceConfig = {
+        User = instance.user;
+        Group = instance.group;
+        RuntimeDirectory = flavour;
+        ExecStart = "${getExe instance.package} ${optionalString ((getName instance.package) == "vault") "agent"} -config ${configFile}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+        KillSignal = "SIGINT";
+        TimeoutStopSec = "30s";
+        Restart = "on-failure";
+      };
+    };
+in
+{
+  options = {
+    services.consul-template.instances = commonOptions { pkgName = "consul-template"; };
+    services.vault-agent.instances = commonOptions { pkgName = "vault"; flavour = "vault-agent"; };
+  };
+
+  config = mkMerge (map
+    (flavour:
+      let cfg = config.services.${flavour}; in
+      mkIf (cfg.instances != { }) {
+        systemd.services = mapAttrs'
+          (name: instance: nameValuePair "${flavour}-${name}" (createAgentInstance { inherit name instance flavour; }))
+          cfg.instances;
+      })
+    [ "consul-template" "vault-agent" ]);
+
+  meta.maintainers = with maintainers; [ emilylange tcheronneau ];
+}
+
diff --git a/nixpkgs/nixos/modules/services/security/vault.nix b/nixpkgs/nixos/modules/services/security/vault.nix
new file mode 100644
index 000000000000..18d981cdb0d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/vault.nix
@@ -0,0 +1,234 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vault;
+  opt = options.services.vault;
+
+  configFile = pkgs.writeText "vault.hcl" ''
+    # vault in dev mode will refuse to start if its configuration sets listener
+    ${lib.optionalString (!cfg.dev) ''
+    listener "tcp" {
+      address = "${cfg.address}"
+      ${if (cfg.tlsCertFile == null || cfg.tlsKeyFile == null) then ''
+          tls_disable = "true"
+        '' else ''
+          tls_cert_file = "${cfg.tlsCertFile}"
+          tls_key_file = "${cfg.tlsKeyFile}"
+        ''}
+      ${cfg.listenerExtraConfig}
+    }
+    ''}
+    storage "${cfg.storageBackend}" {
+      ${optionalString (cfg.storagePath   != null) ''path = "${cfg.storagePath}"''}
+      ${optionalString (cfg.storageConfig != null) cfg.storageConfig}
+    }
+    ${optionalString (cfg.telemetryConfig != "") ''
+        telemetry {
+          ${cfg.telemetryConfig}
+        }
+      ''}
+    ${cfg.extraConfig}
+  '';
+
+  allConfigPaths = [configFile] ++ cfg.extraSettingsPaths;
+  configOptions = escapeShellArgs
+    (lib.optional cfg.dev "-dev" ++
+     lib.optional (cfg.dev && cfg.devRootTokenID != null) "-dev-root-token-id=${cfg.devRootTokenID}"
+      ++ (concatMap (p: ["-config" p]) allConfigPaths));
+
+in
+
+{
+  options = {
+    services.vault = {
+      enable = mkEnableOption (lib.mdDoc "Vault daemon");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.vault;
+        defaultText = literalExpression "pkgs.vault";
+        description = lib.mdDoc "This option specifies the vault package to use.";
+      };
+
+      dev = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          In this mode, Vault runs in-memory and starts unsealed. This option is not meant production but for development and testing i.e. for nixos tests.
+        '';
+      };
+
+      devRootTokenID = mkOption {
+        type = types.str;
+        default = false;
+        description = lib.mdDoc ''
+          Initial root token. This only applies when {option}`services.vault.dev` is true
+        '';
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "127.0.0.1:8200";
+        description = lib.mdDoc "The name of the ip interface to listen to";
+      };
+
+      tlsCertFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/path/to/your/cert.pem";
+        description = lib.mdDoc "TLS certificate file. TLS will be disabled unless this option is set";
+      };
+
+      tlsKeyFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/path/to/your/key.pem";
+        description = lib.mdDoc "TLS private key file. TLS will be disabled unless this option is set";
+      };
+
+      listenerExtraConfig = mkOption {
+        type = types.lines;
+        default = ''
+          tls_min_version = "tls12"
+        '';
+        description = lib.mdDoc "Extra text appended to the listener section.";
+      };
+
+      storageBackend = mkOption {
+        type = types.enum [ "inmem" "file" "consul" "zookeeper" "s3" "azure" "dynamodb" "etcd" "mssql" "mysql" "postgresql" "swift" "gcs" "raft" ];
+        default = "inmem";
+        description = lib.mdDoc "The name of the type of storage backend";
+      };
+
+      storagePath = mkOption {
+        type = types.nullOr types.path;
+        default = if cfg.storageBackend == "file" || cfg.storageBackend == "raft" then "/var/lib/vault" else null;
+        defaultText = literalExpression ''
+          if config.${opt.storageBackend} == "file" || cfg.storageBackend == "raft"
+          then "/var/lib/vault"
+          else null
+        '';
+        description = lib.mdDoc "Data directory for file backend";
+      };
+
+      storageConfig = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        description = lib.mdDoc ''
+          HCL configuration to insert in the storageBackend section.
+
+          Confidential values should not be specified here because this option's
+          value is written to the Nix store, which is publicly readable.
+          Provide credentials and such in a separate file using
+          [](#opt-services.vault.extraSettingsPaths).
+        '';
+      };
+
+      telemetryConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Telemetry configuration";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Extra text appended to {file}`vault.hcl`.";
+      };
+
+      extraSettingsPaths = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc ''
+          Configuration files to load besides the immutable one defined by the NixOS module.
+          This can be used to avoid putting credentials in the Nix store, which can be read by any user.
+
+          Each path can point to a JSON- or HCL-formatted file, or a directory
+          to be scanned for files with `.hcl` or
+          `.json` extensions.
+
+          To upload the confidential file with NixOps, use for example:
+
+          ```
+          # https://releases.nixos.org/nixops/latest/manual/manual.html#opt-deployment.keys
+          deployment.keys."vault.hcl" = let db = import ./db-credentials.nix; in {
+            text = ${"''"}
+              storage "postgresql" {
+                connection_url = "postgres://''${db.username}:''${db.password}@host.example.com/exampledb?sslmode=verify-ca"
+              }
+            ${"''"};
+            user = "vault";
+          };
+          services.vault.extraSettingsPaths = ["/run/keys/vault.hcl"];
+          services.vault.storageBackend = "postgresql";
+          users.users.vault.extraGroups = ["keys"];
+          ```
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.storageBackend == "inmem" -> (cfg.storagePath == null && cfg.storageConfig == null);
+        message = ''The "inmem" storage expects no services.vault.storagePath nor services.vault.storageConfig'';
+      }
+      {
+        assertion = (
+          (cfg.storageBackend == "file" -> (cfg.storagePath != null && cfg.storageConfig == null)) &&
+          (cfg.storagePath != null -> (cfg.storageBackend == "file" || cfg.storageBackend == "raft"))
+        );
+        message = ''You must set services.vault.storagePath only when using the "file" or "raft" backend'';
+      }
+    ];
+
+    users.users.vault = {
+      name = "vault";
+      group = "vault";
+      uid = config.ids.uids.vault;
+      description = "Vault daemon user";
+    };
+    users.groups.vault.gid = config.ids.gids.vault;
+
+    systemd.tmpfiles.rules = optional (cfg.storagePath != null)
+      "d '${cfg.storagePath}' 0700 vault vault - -";
+
+    systemd.services.vault = {
+      description = "Vault server daemon";
+
+      wantedBy = ["multi-user.target"];
+      after = [ "network.target" ]
+           ++ optional (config.services.consul.enable && cfg.storageBackend == "consul") "consul.service";
+
+      restartIfChanged = false; # do not restart on "nixos-rebuild switch". It would seal the storage and disrupt the clients.
+
+      startLimitIntervalSec = 60;
+      startLimitBurst = 3;
+      serviceConfig = {
+        User = "vault";
+        Group = "vault";
+        ExecStart = "${cfg.package}/bin/vault server ${configOptions}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+        StateDirectory = "vault";
+        # In `dev` mode vault will put its token here
+        Environment = lib.optional (cfg.dev) "HOME=/var/lib/vault";
+        PrivateDevices = true;
+        PrivateTmp = true;
+        ProtectSystem = "full";
+        ProtectHome = "read-only";
+        AmbientCapabilities = "cap_ipc_lock";
+        NoNewPrivileges = true;
+        LimitCORE = 0;
+        KillSignal = "SIGINT";
+        TimeoutStopSec = "30s";
+        Restart = "on-failure";
+      };
+
+      unitConfig.RequiresMountsFor = optional (cfg.storagePath != null) cfg.storagePath;
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/security/vaultwarden/backup.sh b/nixpkgs/nixos/modules/services/security/vaultwarden/backup.sh
new file mode 100644
index 000000000000..2a3de0ab1dee
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/vaultwarden/backup.sh
@@ -0,0 +1,17 @@
+#!/usr/bin/env bash
+
+# Based on: https://github.com/dani-garcia/vaultwarden/wiki/Backing-up-your-vault
+if ! mkdir -p "$BACKUP_FOLDER"; then
+  echo "Could not create backup folder '$BACKUP_FOLDER'" >&2
+  exit 1
+fi
+
+if [[ ! -f "$DATA_FOLDER"/db.sqlite3 ]]; then
+  echo "Could not find SQLite database file '$DATA_FOLDER/db.sqlite3'" >&2
+  exit 1
+fi
+
+sqlite3 "$DATA_FOLDER"/db.sqlite3 ".backup '$BACKUP_FOLDER/db.sqlite3'"
+cp "$DATA_FOLDER"/rsa_key.{der,pem,pub.der} "$BACKUP_FOLDER"
+cp -r "$DATA_FOLDER"/attachments "$BACKUP_FOLDER"
+cp -r "$DATA_FOLDER"/icon_cache "$BACKUP_FOLDER"
diff --git a/nixpkgs/nixos/modules/services/security/vaultwarden/default.nix b/nixpkgs/nixos/modules/services/security/vaultwarden/default.nix
new file mode 100644
index 000000000000..0517615a4c6a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/vaultwarden/default.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vaultwarden;
+  user = config.users.users.vaultwarden.name;
+  group = config.users.groups.vaultwarden.name;
+
+  # Convert name from camel case (e.g. disable2FARemember) to upper case snake case (e.g. DISABLE_2FA_REMEMBER).
+  nameToEnvVar = name:
+    let
+      parts = builtins.split "([A-Z0-9]+)" name;
+      partsToEnvVar = parts: foldl' (key: x: let last = stringLength key - 1; in
+        if isList x then key + optionalString (key != "" && substring last 1 key != "_") "_" + head x
+        else if key != "" && elem (substring 0 1 x) lowerChars then # to handle e.g. [ "disable" [ "2FAR" ] "emember" ]
+          substring 0 last key + optionalString (substring (last - 1) 1 key != "_") "_" + substring last 1 key + toUpper x
+        else key + toUpper x) "" parts;
+    in if builtins.match "[A-Z0-9_]+" name != null then name else partsToEnvVar parts;
+
+  # Due to the different naming schemes allowed for config keys,
+  # we can only check for values consistently after converting them to their corresponding environment variable name.
+  configEnv =
+    let
+      configEnv = concatMapAttrs (name: value: optionalAttrs (value != null) {
+        ${nameToEnvVar name} = if isBool value then boolToString value else toString value;
+      }) cfg.config;
+    in { DATA_FOLDER = "/var/lib/bitwarden_rs"; } // optionalAttrs (!(configEnv ? WEB_VAULT_ENABLED) || configEnv.WEB_VAULT_ENABLED == "true") {
+      WEB_VAULT_FOLDER = "${cfg.webVaultPackage}/share/vaultwarden/vault";
+    } // configEnv;
+
+  configFile = pkgs.writeText "vaultwarden.env" (concatStrings (mapAttrsToList (name: value: "${name}=${value}\n") configEnv));
+
+  vaultwarden = cfg.package.override { inherit (cfg) dbBackend; };
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "bitwarden_rs" ] [ "services" "vaultwarden" ])
+  ];
+
+  options.services.vaultwarden = with types; {
+    enable = mkEnableOption (lib.mdDoc "vaultwarden");
+
+    dbBackend = mkOption {
+      type = enum [ "sqlite" "mysql" "postgresql" ];
+      default = "sqlite";
+      description = lib.mdDoc ''
+        Which database backend vaultwarden will be using.
+      '';
+    };
+
+    backupDir = mkOption {
+      type = nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        The directory under which vaultwarden will backup its persistent data.
+      '';
+    };
+
+    config = mkOption {
+      type = attrsOf (nullOr (oneOf [ bool int str ]));
+      default = {
+        ROCKET_ADDRESS = "::1"; # default to localhost
+        ROCKET_PORT = 8222;
+      };
+      example = literalExpression ''
+        {
+          DOMAIN = "https://bitwarden.example.com";
+          SIGNUPS_ALLOWED = false;
+
+          # Vaultwarden currently recommends running behind a reverse proxy
+          # (nginx or similar) for TLS termination, see
+          # https://github.com/dani-garcia/vaultwarden/wiki/Hardening-Guide#reverse-proxying
+          # > you should avoid enabling HTTPS via vaultwarden's built-in Rocket TLS support,
+          # > especially if your instance is publicly accessible.
+          #
+          # A suitable NixOS nginx reverse proxy example config might be:
+          #
+          #     services.nginx.virtualHosts."bitwarden.example.com" = {
+          #       enableACME = true;
+          #       forceSSL = true;
+          #       locations."/" = {
+          #         proxyPass = "http://127.0.0.1:''${toString config.services.vaultwarden.config.ROCKET_PORT}";
+          #       };
+          #     };
+          ROCKET_ADDRESS = "127.0.0.1";
+          ROCKET_PORT = 8222;
+
+          ROCKET_LOG = "critical";
+
+          # This example assumes a mailserver running on localhost,
+          # thus without transport encryption.
+          # If you use an external mail server, follow:
+          #   https://github.com/dani-garcia/vaultwarden/wiki/SMTP-configuration
+          SMTP_HOST = "127.0.0.1";
+          SMTP_PORT = 25;
+          SMTP_SSL = false;
+
+          SMTP_FROM = "admin@bitwarden.example.com";
+          SMTP_FROM_NAME = "example.com Bitwarden server";
+        }
+      '';
+      description = lib.mdDoc ''
+        The configuration of vaultwarden is done through environment variables,
+        therefore it is recommended to use upper snake case (e.g. {env}`DISABLE_2FA_REMEMBER`).
+
+        However, camel case (e.g. `disable2FARemember`) is also supported:
+        The NixOS module will convert it automatically to
+        upper case snake case (e.g. {env}`DISABLE_2FA_REMEMBER`).
+        In this conversion digits (0-9) are handled just like upper case characters,
+        so `foo2` would be converted to {env}`FOO_2`.
+        Names already in this format remain unchanged, so `FOO2` remains `FOO2` if passed as such,
+        even though `foo2` would have been converted to {env}`FOO_2`.
+        This allows working around any potential future conflicting naming conventions.
+
+        Based on the attributes passed to this config option an environment file will be generated
+        that is passed to vaultwarden's systemd service.
+
+        The available configuration options can be found in
+        [the environment template file](https://github.com/dani-garcia/vaultwarden/blob/${vaultwarden.version}/.env.template).
+
+        See [](#opt-services.vaultwarden.environmentFile) for how
+        to set up access to the Admin UI to invite initial users.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/var/lib/vaultwarden.env";
+      description = lib.mdDoc ''
+        Additional environment file as defined in {manpage}`systemd.exec(5)`.
+
+        Secrets like {env}`ADMIN_TOKEN` and {env}`SMTP_PASSWORD`
+        may be passed to the service without adding them to the world-readable Nix store.
+
+        Note that this file needs to be available on the host on which
+        `vaultwarden` is running.
+
+        As a concrete example, to make the Admin UI available
+        (from which new users can be invited initially),
+        the secret {env}`ADMIN_TOKEN` needs to be defined as described
+        [here](https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page).
+        Setting `environmentFile` to `/var/lib/vaultwarden.env`
+        and ensuring permissions with e.g.
+        `chown vaultwarden:vaultwarden /var/lib/vaultwarden.env`
+        (the `vaultwarden` user will only exist after activating with
+        `enable = true;` before this), we can set the contents of the file to have
+        contents such as:
+
+        ```
+        # Admin secret token, see
+        # https://github.com/dani-garcia/vaultwarden/wiki/Enabling-admin-page
+        ADMIN_TOKEN=...copy-paste a unique generated secret token here...
+        ```
+      '';
+    };
+
+    package = mkOption {
+      type = package;
+      default = pkgs.vaultwarden;
+      defaultText = literalExpression "pkgs.vaultwarden";
+      description = lib.mdDoc "Vaultwarden package to use.";
+    };
+
+    webVaultPackage = mkOption {
+      type = package;
+      default = pkgs.vaultwarden.webvault;
+      defaultText = literalExpression "pkgs.vaultwarden.webvault";
+      description = lib.mdDoc "Web vault package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = cfg.backupDir != null -> cfg.dbBackend == "sqlite";
+      message = "Backups for database backends other than sqlite will need customization";
+    } ];
+
+    users.users.vaultwarden = {
+      inherit group;
+      isSystemUser = true;
+    };
+    users.groups.vaultwarden = { };
+
+    systemd.services.vaultwarden = {
+      aliases = [ "bitwarden_rs.service" ];
+      after = [ "network.target" ];
+      path = with pkgs; [ openssl ];
+      serviceConfig = {
+        User = user;
+        Group = group;
+        EnvironmentFile = [ configFile ] ++ optional (cfg.environmentFile != null) cfg.environmentFile;
+        ExecStart = "${vaultwarden}/bin/vaultwarden";
+        LimitNOFILE = "1048576";
+        PrivateTmp = "true";
+        PrivateDevices = "true";
+        ProtectHome = "true";
+        ProtectSystem = "strict";
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        StateDirectory = "bitwarden_rs";
+        StateDirectoryMode = "0700";
+        Restart = "always";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services.backup-vaultwarden = mkIf (cfg.backupDir != null) {
+      aliases = [ "backup-bitwarden_rs.service" ];
+      description = "Backup vaultwarden";
+      environment = {
+        DATA_FOLDER = "/var/lib/bitwarden_rs";
+        BACKUP_FOLDER = cfg.backupDir;
+      };
+      path = with pkgs; [ sqlite ];
+      # if both services are started at the same time, vaultwarden fails with "database is locked"
+      before = [ "vaultwarden.service" ];
+      serviceConfig = {
+        SyslogIdentifier = "backup-vaultwarden";
+        Type = "oneshot";
+        User = mkDefault user;
+        Group = mkDefault group;
+        ExecStart = "${pkgs.bash}/bin/bash ${./backup.sh}";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.timers.backup-vaultwarden = mkIf (cfg.backupDir != null) {
+      aliases = [ "backup-bitwarden_rs.timer" ];
+      description = "Backup vaultwarden on time";
+      timerConfig = {
+        OnCalendar = mkDefault "23:00";
+        Persistent = "true";
+        Unit = "backup-vaultwarden.service";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/security/yubikey-agent.nix b/nixpkgs/nixos/modules/services/security/yubikey-agent.nix
new file mode 100644
index 000000000000..ee57ec8bf812
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/security/yubikey-agent.nix
@@ -0,0 +1,69 @@
+# Global configuration for yubikey-agent.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.yubikey-agent;
+
+  # reuse the pinentryFlavor option from the gnupg module
+  pinentryFlavor = config.programs.gnupg.agent.pinentryFlavor;
+in
+{
+  ###### interface
+
+  meta.maintainers = with maintainers; [ philandstuff rawkode jwoudenberg ];
+
+  options = {
+
+    services.yubikey-agent = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to start yubikey-agent when you log in.  Also sets
+          SSH_AUTH_SOCK to point at yubikey-agent.
+
+          Note that yubikey-agent will use whatever pinentry is
+          specified in programs.gnupg.agent.pinentryFlavor.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.yubikey-agent;
+        defaultText = literalExpression "pkgs.yubikey-agent";
+        description = lib.mdDoc ''
+          The package used for the yubikey-agent daemon.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+    systemd.packages = [ cfg.package ];
+
+    # This overrides the systemd user unit shipped with the
+    # yubikey-agent package
+    systemd.user.services.yubikey-agent = mkIf (pinentryFlavor != null) {
+      path = [ pkgs.pinentry.${pinentryFlavor} ];
+      wantedBy = [
+        (if pinentryFlavor == "tty" || pinentryFlavor == "curses" then
+          "default.target"
+        else
+          "graphical-session.target")
+      ];
+    };
+
+    # Yubikey-agent expects pcsd to be running in order to function.
+    services.pcscd.enable = true;
+
+    environment.extraInit = ''
+      if [ -z "$SSH_AUTH_SOCK" -a -n "$XDG_RUNTIME_DIR" ]; then
+        export SSH_AUTH_SOCK="$XDG_RUNTIME_DIR/yubikey-agent/yubikey-agent.sock"
+      fi
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/automatic-timezoned.nix b/nixpkgs/nixos/modules/services/system/automatic-timezoned.nix
new file mode 100644
index 000000000000..9bdd64dd33a3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/automatic-timezoned.nix
@@ -0,0 +1,92 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.automatic-timezoned;
+in
+{
+  options = {
+    services.automatic-timezoned = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Enable `automatic-timezoned`, simple daemon for keeping the system
+          timezone up-to-date based on the current location. It uses geoclue2 to
+          determine the current location and systemd-timedated to actually set
+          the timezone.
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.automatic-timezoned;
+        defaultText = literalExpression "pkgs.automatic-timezoned";
+        description = mdDoc ''
+          Which `automatic-timezoned` package to use.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    security.polkit.extraConfig = ''
+      polkit.addRule(function(action, subject) {
+        if (action.id == "org.freedesktop.timedate1.set-timezone"
+            && subject.user == "automatic-timezoned") {
+          return polkit.Result.YES;
+        }
+      });
+    '';
+
+    services.geoclue2 = {
+      enable = true;
+      appConfig.automatic-timezoned = {
+        isAllowed = true;
+        isSystem = true;
+        users = [ (toString config.ids.uids.automatic-timezoned) ];
+      };
+    };
+
+    systemd.services = {
+
+      automatic-timezoned = {
+        description = "Automatically update system timezone based on location";
+        requires = [ "automatic-timezoned-geoclue-agent.service" ];
+        after = [ "automatic-timezoned-geoclue-agent.service" ];
+        serviceConfig = {
+          Type = "exec";
+          User = "automatic-timezoned";
+          ExecStart = "${cfg.package}/bin/automatic-timezoned --zoneinfo-path=${pkgs.tzdata}/share/zoneinfo/zone1970.tab";
+        };
+        wantedBy = [ "default.target" ];
+      };
+
+      automatic-timezoned-geoclue-agent = {
+        description = "Geoclue agent for automatic-timezoned";
+        requires = [ "geoclue.service" ];
+        after = [ "geoclue.service" ];
+        serviceConfig = {
+          Type = "exec";
+          User = "automatic-timezoned";
+          ExecStart = "${pkgs.geoclue2-with-demo-agent}/libexec/geoclue-2.0/demos/agent";
+          Restart = "on-failure";
+          PrivateTmp = true;
+        };
+        wantedBy = [ "default.target" ];
+      };
+
+    };
+
+    users = {
+      users.automatic-timezoned = {
+        description = "automatic-timezoned";
+        uid = config.ids.uids.automatic-timezoned;
+        group = "automatic-timezoned";
+      };
+      groups.automatic-timezoned = {
+        gid = config.ids.gids.automatic-timezoned;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/bpftune.nix b/nixpkgs/nixos/modules/services/system/bpftune.nix
new file mode 100644
index 000000000000..d656a19c0ad1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/bpftune.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.bpftune;
+in
+{
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  options = {
+    services.bpftune = {
+      enable = lib.mkEnableOption (lib.mdDoc "bpftune BPF driven auto-tuning");
+
+      package = lib.mkPackageOptionMD pkgs "bpftune" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.packages = [ cfg.package ];
+    systemd.services.bpftune.wantedBy = [ "multi-user.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/cachix-agent/default.nix b/nixpkgs/nixos/modules/services/system/cachix-agent/default.nix
new file mode 100644
index 000000000000..06494ddb631a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/cachix-agent/default.nix
@@ -0,0 +1,80 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cachix-agent;
+in {
+  meta.maintainers = [ lib.maintainers.domenkozar ];
+
+  options.services.cachix-agent = {
+    enable = mkEnableOption (lib.mdDoc "Cachix Deploy Agent: https://docs.cachix.org/deploy/");
+
+    name = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Agent name, usually same as the hostname";
+      default = config.networking.hostName;
+      defaultText = "config.networking.hostName";
+    };
+
+    verbose = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Enable verbose output";
+      default = false;
+    };
+
+    profile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Profile name, defaults to 'system' (NixOS).";
+    };
+
+    host = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Cachix uri to use.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.cachix;
+      defaultText = literalExpression "pkgs.cachix";
+      description = lib.mdDoc "Cachix Client package to use.";
+    };
+
+    credentialsFile = mkOption {
+      type = types.path;
+      default = "/etc/cachix-agent.token";
+      description = lib.mdDoc ''
+        Required file that needs to contain CACHIX_AGENT_TOKEN=...
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.cachix-agent = {
+      description = "Cachix Deploy Agent";
+      after = ["network-online.target"];
+      path = [ config.nix.package ];
+      wantedBy = [ "multi-user.target" ];
+
+      # Cachix requires $USER to be set
+      environment.USER = "root";
+
+      # don't stop the service if the unit disappears
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig = {
+        # we don't want to kill children processes as those are deployments
+        KillMode = "process";
+        Restart = "always";
+        RestartSec = 5;
+        EnvironmentFile = cfg.credentialsFile;
+        ExecStart = ''
+          ${cfg.package}/bin/cachix ${lib.optionalString cfg.verbose "--verbose"} ${lib.optionalString (cfg.host != null) "--host ${cfg.host}"} \
+            deploy agent ${cfg.name} ${optionalString (cfg.profile != null) cfg.profile}
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/cachix-watch-store.nix b/nixpkgs/nixos/modules/services/system/cachix-watch-store.nix
new file mode 100644
index 000000000000..89157b460b9a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/cachix-watch-store.nix
@@ -0,0 +1,93 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cachix-watch-store;
+in
+{
+  meta.maintainers = [ lib.maintainers.jfroche lib.maintainers.domenkozar ];
+
+  options.services.cachix-watch-store = {
+    enable = mkEnableOption (lib.mdDoc "Cachix Watch Store: https://docs.cachix.org");
+
+    cacheName = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Cachix binary cache name";
+    };
+
+    cachixTokenFile = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Required file that needs to contain the cachix auth token.
+      '';
+    };
+
+    compressionLevel = mkOption {
+      type = types.nullOr types.int;
+      description = lib.mdDoc "The compression level for ZSTD compression (between 0 and 16)";
+      default = null;
+    };
+
+    jobs = mkOption {
+      type = types.nullOr types.int;
+      description = lib.mdDoc "Number of threads used for pushing store paths";
+      default = null;
+    };
+
+    host = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Cachix host to connect to";
+    };
+
+    verbose = mkOption {
+      type = types.bool;
+      description = lib.mdDoc "Enable verbose output";
+      default = false;
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.cachix;
+      defaultText = literalExpression "pkgs.cachix";
+      description = lib.mdDoc "Cachix Client package to use.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.cachix-watch-store-agent = {
+      description = "Cachix watch store Agent";
+      after = [ "network-online.target" ];
+      path = [ config.nix.package ];
+      wantedBy = [ "multi-user.target" ];
+      unitConfig = {
+        # allow to restart indefinitely
+        StartLimitIntervalSec = 0;
+      };
+      serviceConfig = {
+        # don't put too much stress on the machine when restarting
+        RestartSec = 1;
+        # we don't want to kill children processes as those are deployments
+        KillMode = "process";
+        Restart = "on-failure";
+        DynamicUser = true;
+        LoadCredential = [
+          "cachix-token:${toString cfg.cachixTokenFile}"
+        ];
+      };
+      script =
+        let
+          command = [ "${cfg.package}/bin/cachix" ]
+            ++ (lib.optional cfg.verbose "--verbose") ++ (lib.optionals (cfg.host != null) [ "--host" cfg.host ])
+            ++ [ "watch-store" ] ++ (lib.optionals (cfg.compressionLevel != null) [ "--compression-level" (toString cfg.compressionLevel) ])
+            ++ (lib.optionals (cfg.jobs != null) [ "--jobs" (toString cfg.jobs) ]) ++ [ cfg.cacheName ];
+        in
+        ''
+          export CACHIX_AUTH_TOKEN="$(<"$CREDENTIALS_DIRECTORY/cachix-token")"
+          ${lib.escapeShellArgs command}
+        '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/cloud-init.nix b/nixpkgs/nixos/modules/services/system/cloud-init.nix
new file mode 100644
index 000000000000..d782bb1a3666
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/cloud-init.nix
@@ -0,0 +1,239 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cloud-init;
+  path = with pkgs; [
+    cloud-init
+    iproute2
+    nettools
+    openssh
+    shadow
+    util-linux
+    busybox
+  ]
+  ++ optional cfg.btrfs.enable btrfs-progs
+  ++ optional cfg.ext4.enable e2fsprogs
+  ++ optional cfg.xfs.enable xfsprogs
+  ;
+  settingsFormat = pkgs.formats.yaml { };
+  cfgfile = settingsFormat.generate "cloud.cfg" cfg.settings;
+in
+{
+  options = {
+    services.cloud-init = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Enable the cloud-init service. This services reads
+          configuration metadata in a cloud environment and configures
+          the machine according to this metadata.
+
+          This configuration is not completely compatible with the
+          NixOS way of doing configuration, as configuration done by
+          cloud-init might be overridden by a subsequent nixos-rebuild
+          call. However, some parts of cloud-init fall outside of
+          NixOS's responsibility, like filesystem resizing and ssh
+          public key provisioning, and cloud-init is useful for that
+          parts. Thus, be wary that using cloud-init in NixOS might
+          come as some cost.
+        '';
+      };
+
+      btrfs.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Allow the cloud-init service to operate `btrfs` filesystem.
+        '';
+      };
+
+      ext4.enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Allow the cloud-init service to operate `ext4` filesystem.
+        '';
+      };
+
+      xfs.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Allow the cloud-init service to operate `xfs` filesystem.
+        '';
+      };
+
+      network.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc ''
+          Allow the cloud-init service to configure network interfaces
+          through systemd-networkd.
+        '';
+      };
+
+      settings = mkOption {
+        description = mdDoc ''
+          Structured cloud-init configuration.
+        '';
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+        };
+        default = { };
+      };
+
+      config = mkOption {
+        type = types.str;
+        default = "";
+        description = mdDoc ''
+          raw cloud-init configuration.
+
+          Takes precedence over the `settings` option if set.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    services.cloud-init.settings = {
+      system_info = mkDefault {
+        distro = "nixos";
+        network = {
+          renderers = [ "networkd" ];
+        };
+      };
+
+      users = mkDefault [ "root" ];
+      disable_root = mkDefault false;
+      preserve_hostname = mkDefault false;
+
+      cloud_init_modules = mkDefault [
+        "migrator"
+        "seed_random"
+        "bootcmd"
+        "write-files"
+        "growpart"
+        "resizefs"
+        "update_hostname"
+        "resolv_conf"
+        "ca-certs"
+        "rsyslog"
+        "users-groups"
+      ];
+
+      cloud_config_modules = mkDefault [
+        "disk_setup"
+        "mounts"
+        "ssh-import-id"
+        "set-passwords"
+        "timezone"
+        "disable-ec2-metadata"
+        "runcmd"
+        "ssh"
+      ];
+
+      cloud_final_modules = mkDefault [
+        "rightscale_userdata"
+        "scripts-vendor"
+        "scripts-per-once"
+        "scripts-per-boot"
+        "scripts-per-instance"
+        "scripts-user"
+        "ssh-authkey-fingerprints"
+        "keys-to-console"
+        "phone-home"
+        "final-message"
+        "power-state-change"
+      ];
+    };
+
+    environment.etc."cloud/cloud.cfg" =
+      if cfg.config == "" then
+        { source = cfgfile; }
+      else
+        { text = cfg.config; }
+    ;
+
+    systemd.network.enable = cfg.network.enable;
+
+    systemd.services.cloud-init-local = {
+      description = "Initial cloud-init job (pre-networking)";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "systemd-networkd.service" ];
+      path = path;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.cloud-init}/bin/cloud-init init --local";
+        RemainAfterExit = "yes";
+        TimeoutSec = "infinity";
+        StandardOutput = "journal+console";
+      };
+    };
+
+    systemd.services.cloud-init = {
+      description = "Initial cloud-init job (metadata service crawler)";
+      wantedBy = [ "multi-user.target" ];
+      wants = [
+        "network-online.target"
+        "cloud-init-local.service"
+        "sshd.service"
+        "sshd-keygen.service"
+      ];
+      after = [ "network-online.target" "cloud-init-local.service" ];
+      before = [ "sshd.service" "sshd-keygen.service" ];
+      requires = [ "network.target" ];
+      path = path;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.cloud-init}/bin/cloud-init init";
+        RemainAfterExit = "yes";
+        TimeoutSec = "infinity";
+        StandardOutput = "journal+console";
+      };
+    };
+
+    systemd.services.cloud-config = {
+      description = "Apply the settings specified in cloud-config";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" "syslog.target" "cloud-config.target" ];
+
+      path = path;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=config";
+        RemainAfterExit = "yes";
+        TimeoutSec = "infinity";
+        StandardOutput = "journal+console";
+      };
+    };
+
+    systemd.services.cloud-final = {
+      description = "Execute cloud user/final scripts";
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" "syslog.target" "cloud-config.service" "rc-local.service" ];
+      requires = [ "cloud-config.target" ];
+      path = path;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.cloud-init}/bin/cloud-init modules --mode=final";
+        RemainAfterExit = "yes";
+        TimeoutSec = "infinity";
+        StandardOutput = "journal+console";
+      };
+    };
+
+    systemd.targets.cloud-config = {
+      description = "Cloud-config availability";
+      requires = [ "cloud-init-local.service" "cloud-init.service" ];
+    };
+  };
+
+  meta.maintainers = [ maintainers.zimbatm ];
+}
diff --git a/nixpkgs/nixos/modules/services/system/dbus.nix b/nixpkgs/nixos/modules/services/system/dbus.nix
new file mode 100644
index 000000000000..8d5b25e61762
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/dbus.nix
@@ -0,0 +1,210 @@
+# D-Bus configuration and system bus daemon.
+
+{ config, lib, pkgs, ... }:
+
+let
+
+  cfg = config.services.dbus;
+
+  homeDir = "/run/dbus";
+
+  configDir = pkgs.makeDBusConf {
+    inherit (cfg) apparmor;
+    suidHelper = "${config.security.wrapperDir}/dbus-daemon-launch-helper";
+    serviceDirectories = cfg.packages;
+  };
+
+  inherit (lib) mkOption mkEnableOption mkIf mkMerge types;
+
+in
+
+{
+  options = {
+
+    boot.initrd.systemd.dbus = {
+      enable = mkEnableOption (lib.mdDoc "dbus in stage 1");
+    };
+
+    services.dbus = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        internal = true;
+        description = lib.mdDoc ''
+          Whether to start the D-Bus message bus daemon, which is
+          required by many other system services and applications.
+        '';
+      };
+
+      implementation = mkOption {
+        type = types.enum [ "dbus" "broker" ];
+        default = "dbus";
+        description = lib.mdDoc ''
+          The implementation to use for the message bus defined by the D-Bus specification.
+          Can be either the classic dbus daemon or dbus-broker, which aims to provide high
+          performance and reliability, while keeping compatibility to the D-Bus
+          reference implementation.
+        '';
+
+      };
+
+      packages = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        description = lib.mdDoc ''
+          Packages whose D-Bus configuration files should be included in
+          the configuration of the D-Bus system-wide or session-wide
+          message bus.  Specifically, files in the following directories
+          will be included into their respective DBus configuration paths:
+          {file}`«pkg»/etc/dbus-1/system.d`
+          {file}`«pkg»/share/dbus-1/system.d`
+          {file}`«pkg»/share/dbus-1/system-services`
+          {file}`«pkg»/etc/dbus-1/session.d`
+          {file}`«pkg»/share/dbus-1/session.d`
+          {file}`«pkg»/share/dbus-1/services`
+        '';
+      };
+
+      apparmor = mkOption {
+        type = types.enum [ "enabled" "disabled" "required" ];
+        description = lib.mdDoc ''
+          AppArmor mode for dbus.
+
+          `enabled` enables mediation when it's
+          supported in the kernel, `disabled`
+          always disables AppArmor even with kernel support, and
+          `required` fails when AppArmor was not found
+          in the kernel.
+        '';
+        default = "disabled";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    {
+      environment.etc."dbus-1".source = configDir;
+
+      environment.pathsToLink = [
+        "/etc/dbus-1"
+        "/share/dbus-1"
+      ];
+
+      users.users.messagebus = {
+        uid = config.ids.uids.messagebus;
+        description = "D-Bus system message bus daemon user";
+        home = homeDir;
+        group = "messagebus";
+      };
+
+      users.groups.messagebus.gid = config.ids.gids.messagebus;
+
+      # You still need the dbus reference implementation installed to use dbus-broker
+      systemd.packages = [
+        pkgs.dbus
+      ];
+
+      services.dbus.packages = [
+        pkgs.dbus
+        config.system.path
+      ];
+
+      systemd.user.sockets.dbus.wantedBy = [
+        "sockets.target"
+      ];
+    }
+
+    (mkIf config.boot.initrd.systemd.dbus.enable {
+      boot.initrd.systemd = {
+        users.messagebus = { };
+        groups.messagebus = { };
+        contents."/etc/dbus-1".source = pkgs.makeDBusConf {
+          inherit (cfg) apparmor;
+          suidHelper = "/bin/false";
+          serviceDirectories = [ pkgs.dbus ];
+        };
+        packages = [ pkgs.dbus ];
+        storePaths = [ "${pkgs.dbus}/bin/dbus-daemon" ];
+        targets.sockets.wants = [ "dbus.socket" ];
+      };
+    })
+
+    (mkIf (cfg.implementation == "dbus") {
+      environment.systemPackages = [
+        pkgs.dbus
+      ];
+
+      security.wrappers.dbus-daemon-launch-helper = {
+        source = "${pkgs.dbus}/libexec/dbus-daemon-launch-helper";
+        owner = "root";
+        group = "messagebus";
+        setuid = true;
+        setgid = false;
+        permissions = "u+rx,g+rx,o-rx";
+      };
+
+      systemd.services.dbus = {
+        # Don't restart dbus-daemon. Bad things tend to happen if we do.
+        reloadIfChanged = true;
+        restartTriggers = [
+          configDir
+        ];
+        environment = {
+          LD_LIBRARY_PATH = config.system.nssModules.path;
+        };
+      };
+
+      systemd.user.services.dbus = {
+        # Don't restart dbus-daemon. Bad things tend to happen if we do.
+        reloadIfChanged = true;
+        restartTriggers = [
+          configDir
+        ];
+      };
+
+    })
+
+    (mkIf (cfg.implementation == "broker") {
+      environment.systemPackages = [
+        pkgs.dbus-broker
+      ];
+
+      systemd.packages = [
+        pkgs.dbus-broker
+      ];
+
+      # Just to be sure we don't restart through the unit alias
+      systemd.services.dbus.reloadIfChanged = true;
+      systemd.user.services.dbus.reloadIfChanged = true;
+
+      # NixOS Systemd Module doesn't respect 'Install'
+      # https://github.com/NixOS/nixpkgs/issues/108643
+      systemd.services.dbus-broker = {
+        aliases = [
+          "dbus.service"
+        ];
+        # Don't restart dbus. Bad things tend to happen if we do.
+        reloadIfChanged = true;
+        restartTriggers = [
+          configDir
+        ];
+        environment = {
+          LD_LIBRARY_PATH = config.system.nssModules.path;
+        };
+      };
+
+      systemd.user.services.dbus-broker = {
+        aliases = [
+          "dbus.service"
+        ];
+        # Don't restart dbus. Bad things tend to happen if we do.
+        reloadIfChanged = true;
+        restartTriggers = [
+          configDir
+        ];
+      };
+    })
+
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/system/earlyoom.nix b/nixpkgs/nixos/modules/services/system/earlyoom.nix
new file mode 100644
index 000000000000..38805eba2ca1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/earlyoom.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.earlyoom;
+
+  inherit (lib)
+    mkDefault mkEnableOption mkIf mkOption types
+    mkRemovedOptionModule literalExpression
+    escapeShellArg concatStringsSep optional optionalString;
+
+in
+{
+  options.services.earlyoom = {
+    enable = mkEnableOption (lib.mdDoc "early out of memory killing");
+
+    freeMemThreshold = mkOption {
+      type = types.ints.between 1 100;
+      default = 10;
+      description = lib.mdDoc ''
+        Minimum available memory (in percent).
+
+        If the available memory falls below this threshold (and the analog is true for
+        {option}`freeSwapThreshold`) the killing begins.
+        SIGTERM is sent first to the process that uses the most memory; then, if the available
+        memory falls below {option}`freeMemKillThreshold` (and the analog is true for
+        {option}`freeSwapKillThreshold`), SIGKILL is sent.
+
+        See [README](https://github.com/rfjakob/earlyoom#command-line-options) for details.
+      '';
+    };
+
+    freeMemKillThreshold = mkOption {
+      type = types.nullOr (types.ints.between 1 100);
+      default = null;
+      description = lib.mdDoc ''
+        Minimum available memory (in percent) before sending SIGKILL.
+        If unset, this defaults to half of {option}`freeMemThreshold`.
+
+        See the description of [](#opt-services.earlyoom.freeMemThreshold).
+      '';
+    };
+
+    freeSwapThreshold = mkOption {
+      type = types.ints.between 1 100;
+      default = 10;
+      description = lib.mdDoc ''
+        Minimum free swap space (in percent) before sending SIGTERM.
+
+        See the description of [](#opt-services.earlyoom.freeMemThreshold).
+      '';
+    };
+
+    freeSwapKillThreshold = mkOption {
+      type = types.nullOr (types.ints.between 1 100);
+      default = null;
+      description = lib.mdDoc ''
+        Minimum free swap space (in percent) before sending SIGKILL.
+        If unset, this defaults to half of {option}`freeSwapThreshold`.
+
+        See the description of [](#opt-services.earlyoom.freeMemThreshold).
+      '';
+    };
+
+    enableDebugInfo = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable debugging messages.
+      '';
+    };
+
+    enableNotifications = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Send notifications about killed processes via the system d-bus.
+
+        WARNING: enabling this option (while convenient) should *not* be done on a
+        machine where you do not trust the other users as it allows any other
+        local user to DoS your session by spamming notifications.
+
+        To actually see the notifications in your GUI session, you need to have
+        `systembus-notify` running as your user, which this
+        option handles by enabling {option}`services.systembus-notify`.
+
+        See [README](https://github.com/rfjakob/earlyoom#notifications) for details.
+      '';
+    };
+
+    killHook = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = literalExpression ''
+        pkgs.writeShellScript "earlyoom-kill-hook" '''
+          echo "Process $EARLYOOM_NAME ($EARLYOOM_PID) was killed" >> /path/to/log
+        '''
+      '';
+      description = lib.mdDoc ''
+        An absolute path to an executable to be run for each process killed.
+        Some environment variables are available, see
+        [README](https://github.com/rfjakob/earlyoom#notifications) and
+        [the man page](https://github.com/rfjakob/earlyoom/blob/master/MANPAGE.md#-n-pathtoscript)
+        for details.
+      '';
+    };
+
+    reportInterval = mkOption {
+      type = types.int;
+      default = 3600;
+      example = 0;
+      description = lib.mdDoc "Interval (in seconds) at which a memory report is printed (set to 0 to disable).";
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "-g" "--prefer '(^|/)(java|chromium)$'" ];
+      description = lib.mdDoc "Extra command-line arguments to be passed to earlyoom.";
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "earlyoom" "useKernelOOMKiller" ] ''
+      This option is deprecated and ignored by earlyoom since 1.2.
+    '')
+    (mkRemovedOptionModule [ "services" "earlyoom" "notificationsCommand" ] ''
+      This option was removed in earlyoom 1.6, but was reimplemented in 1.7
+      and is available as the new option `services.earlyoom.killHook`.
+    '')
+    (mkRemovedOptionModule [ "services" "earlyoom" "ignoreOOMScoreAdjust" ] ''
+      This option is deprecated and ignored by earlyoom since 1.7.
+    '')
+  ];
+
+  config = mkIf cfg.enable {
+    services.systembus-notify.enable = mkDefault cfg.enableNotifications;
+
+    systemd.services.earlyoom = {
+      description = "Early OOM Daemon for Linux";
+      wantedBy = [ "multi-user.target" ];
+      path = optional cfg.enableNotifications pkgs.dbus;
+      serviceConfig = {
+        StandardError = "journal";
+        ExecStart = concatStringsSep " " ([
+          "${pkgs.earlyoom}/bin/earlyoom"
+          ("-m ${toString cfg.freeMemThreshold}"
+            + optionalString (cfg.freeMemKillThreshold != null) ",${toString cfg.freeMemKillThreshold}")
+          ("-s ${toString cfg.freeSwapThreshold}"
+            + optionalString (cfg.freeSwapKillThreshold != null) ",${toString cfg.freeSwapKillThreshold}")
+          "-r ${toString cfg.reportInterval}"
+        ]
+        ++ optional cfg.enableDebugInfo "-d"
+        ++ optional cfg.enableNotifications "-n"
+        ++ optional (cfg.killHook != null) "-N ${escapeShellArg cfg.killHook}"
+        ++ cfg.extraArgs
+        );
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/kerberos/default.nix b/nixpkgs/nixos/modules/services/system/kerberos/default.nix
new file mode 100644
index 000000000000..4ed48e463741
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/kerberos/default.nix
@@ -0,0 +1,75 @@
+{config, lib, ...}:
+
+let
+  inherit (lib) mkOption mkIf types length attrNames;
+  cfg = config.services.kerberos_server;
+  kerberos = config.krb5.kerberos;
+
+  aclEntry = {
+    options = {
+      principal = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Which principal the rule applies to";
+      };
+      access = mkOption {
+        type = types.either
+          (types.listOf (types.enum ["add" "cpw" "delete" "get" "list" "modify"]))
+          (types.enum ["all"]);
+        default = "all";
+        description = lib.mdDoc "The changes the principal is allowed to make.";
+      };
+      target = mkOption {
+        type = types.str;
+        default = "*";
+        description = lib.mdDoc "The principals that 'access' applies to.";
+      };
+    };
+  };
+
+  realm = {
+    options = {
+      acl = mkOption {
+        type = types.listOf (types.submodule aclEntry);
+        default = [
+          { principal = "*/admin"; access = "all"; }
+          { principal = "admin"; access = "all"; }
+        ];
+        description = lib.mdDoc ''
+          The privileges granted to a user.
+        '';
+      };
+    };
+  };
+in
+
+{
+  imports = [
+    ./mit.nix
+    ./heimdal.nix
+  ];
+
+  ###### interface
+  options = {
+    services.kerberos_server = {
+      enable = lib.mkEnableOption (lib.mdDoc "the kerberos authentication server");
+
+      realms = mkOption {
+        type = types.attrsOf (types.submodule realm);
+        description = lib.mdDoc ''
+          The realm(s) to serve keys for.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ kerberos ];
+    assertions = [{
+      assertion = length (attrNames cfg.realms) <= 1;
+      message = "Only one realm per server is currently supported.";
+    }];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/kerberos/heimdal.nix b/nixpkgs/nixos/modules/services/system/kerberos/heimdal.nix
new file mode 100644
index 000000000000..837c59caa562
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/kerberos/heimdal.nix
@@ -0,0 +1,68 @@
+{ pkgs, config, lib, ... } :
+
+let
+  inherit (lib) mkIf concatStringsSep concatMapStrings toList mapAttrs
+    mapAttrsToList;
+  cfg = config.services.kerberos_server;
+  kerberos = config.krb5.kerberos;
+  stateDir = "/var/heimdal";
+  aclFiles = mapAttrs
+    (name: {acl, ...}: pkgs.writeText "${name}.acl" (concatMapStrings ((
+      {principal, access, target, ...} :
+      "${principal}\t${concatStringsSep "," (toList access)}\t${target}\n"
+    )) acl)) cfg.realms;
+
+  kdcConfigs = mapAttrsToList (name: value: ''
+    database = {
+      dbname = ${stateDir}/heimdal
+      acl_file = ${value}
+    }
+  '') aclFiles;
+  kdcConfFile = pkgs.writeText "kdc.conf" ''
+    [kdc]
+    ${concatStringsSep "\n" kdcConfigs}
+  '';
+in
+
+{
+  # No documentation about correct triggers, so guessing at them.
+
+  config = mkIf (cfg.enable && kerberos == pkgs.heimdal) {
+    systemd.services.kadmind = {
+      description = "Kerberos Administration Daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+      '';
+      serviceConfig.ExecStart =
+        "${kerberos}/libexec/heimdal/kadmind --config-file=/etc/heimdal-kdc/kdc.conf";
+      restartTriggers = [ kdcConfFile ];
+    };
+
+    systemd.services.kdc = {
+      description = "Key Distribution Center daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+      '';
+      serviceConfig.ExecStart =
+        "${kerberos}/libexec/heimdal/kdc --config-file=/etc/heimdal-kdc/kdc.conf";
+      restartTriggers = [ kdcConfFile ];
+    };
+
+    systemd.services.kpasswdd = {
+      description = "Kerberos Password Changing daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+      '';
+      serviceConfig.ExecStart = "${kerberos}/libexec/heimdal/kpasswdd";
+      restartTriggers = [ kdcConfFile ];
+    };
+
+    environment.etc = {
+      # Can be set via the --config-file option to KDC
+      "heimdal-kdc/kdc.conf".source = kdcConfFile;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/kerberos/mit.nix b/nixpkgs/nixos/modules/services/system/kerberos/mit.nix
new file mode 100644
index 000000000000..112000140453
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/kerberos/mit.nix
@@ -0,0 +1,68 @@
+{ pkgs, config, lib, ... } :
+
+let
+  inherit (lib) mkIf concatStrings concatStringsSep concatMapStrings toList
+    mapAttrs mapAttrsToList;
+  cfg = config.services.kerberos_server;
+  kerberos = config.krb5.kerberos;
+  stateDir = "/var/lib/krb5kdc";
+  PIDFile = "/run/kdc.pid";
+  aclMap = {
+    add = "a"; cpw = "c"; delete = "d"; get = "i"; list = "l"; modify = "m";
+    all = "*";
+  };
+  aclFiles = mapAttrs
+    (name: {acl, ...}: (pkgs.writeText "${name}.acl" (concatMapStrings (
+      {principal, access, target, ...} :
+      let access_code = map (a: aclMap.${a}) (toList access); in
+      "${principal} ${concatStrings access_code} ${target}\n"
+    ) acl))) cfg.realms;
+  kdcConfigs = mapAttrsToList (name: value: ''
+    ${name} = {
+      acl_file = ${value}
+    }
+  '') aclFiles;
+  kdcConfFile = pkgs.writeText "kdc.conf" ''
+    [realms]
+    ${concatStringsSep "\n" kdcConfigs}
+  '';
+  env = {
+    # What Debian uses, could possibly link directly to Nix store?
+    KRB5_KDC_PROFILE = "/etc/krb5kdc/kdc.conf";
+  };
+in
+
+{
+  config = mkIf (cfg.enable && kerberos == pkgs.krb5) {
+    systemd.services.kadmind = {
+      description = "Kerberos Administration Daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+      '';
+      serviceConfig.ExecStart = "${kerberos}/bin/kadmind -nofork";
+      restartTriggers = [ kdcConfFile ];
+      environment = env;
+    };
+
+    systemd.services.kdc = {
+      description = "Key Distribution Center daemon";
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        mkdir -m 0755 -p ${stateDir}
+      '';
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = PIDFile;
+        ExecStart = "${kerberos}/bin/krb5kdc -P ${PIDFile}";
+      };
+      restartTriggers = [ kdcConfFile ];
+      environment = env;
+    };
+
+    environment.etc = {
+      "krb5kdc/kdc.conf".source = kdcConfFile;
+    };
+    environment.variables = env;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/localtimed.nix b/nixpkgs/nixos/modules/services/system/localtimed.nix
new file mode 100644
index 000000000000..345bdbd8dda0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/localtimed.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.localtimed;
+in {
+  imports = [ (lib.mkRenamedOptionModule [ "services" "localtime" ] [ "services" "localtimed" ]) ];
+
+  options = {
+    services.localtimed = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable `localtimed`, a simple daemon for keeping the
+          system timezone up-to-date based on the current location. It uses
+          geoclue2 to determine the current location.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.geoclue2.appConfig.localtimed = {
+      isAllowed = true;
+      isSystem = true;
+      users = [ (toString config.ids.uids.localtimed) ];
+    };
+
+    # Install the polkit rules.
+    environment.systemPackages = [ pkgs.localtime ];
+
+    systemd.services.localtimed = {
+      wantedBy = [ "multi-user.target" ];
+      partOf = [ "localtimed-geoclue-agent.service" ];
+      after = [ "localtimed-geoclue-agent.service" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.localtime}/bin/localtimed";
+        Restart = "on-failure";
+        Type = "exec";
+        User = "localtimed";
+      };
+    };
+
+    systemd.services.localtimed-geoclue-agent = {
+      wantedBy = [ "multi-user.target" ];
+      partOf = [ "geoclue.service" ];
+      after = [ "geoclue.service" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.geoclue2-with-demo-agent}/libexec/geoclue-2.0/demos/agent";
+        Restart = "on-failure";
+        Type = "exec";
+        User = "localtimed";
+      };
+    };
+
+    users = {
+      users.localtimed = {
+        uid = config.ids.uids.localtimed;
+        group = "localtimed";
+      };
+      groups.localtimed.gid = config.ids.gids.localtimed;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/nix-daemon.nix b/nixpkgs/nixos/modules/services/system/nix-daemon.nix
new file mode 100644
index 000000000000..ce255cd8d0a4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/nix-daemon.nix
@@ -0,0 +1,259 @@
+/*
+  Declares what makes the nix-daemon work on systemd.
+
+  See also
+   - nixos/modules/config/nix.nix: the nix.conf
+   - nixos/modules/config/nix-remote-build.nix: the nix.conf
+*/
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.nix;
+
+  nixPackage = cfg.package.out;
+
+  isNixAtLeast = versionAtLeast (getVersion nixPackage);
+
+  makeNixBuildUser = nr: {
+    name = "nixbld${toString nr}";
+    value = {
+      description = "Nix build user ${toString nr}";
+
+      /*
+        For consistency with the setgid(2), setuid(2), and setgroups(2)
+        calls in `libstore/build.cc', don't add any supplementary group
+        here except "nixbld".
+      */
+      uid = builtins.add config.ids.uids.nixbld nr;
+      isSystemUser = true;
+      group = "nixbld";
+      extraGroups = [ "nixbld" ];
+    };
+  };
+
+  nixbldUsers = listToAttrs (map makeNixBuildUser (range 1 cfg.nrBuildUsers));
+
+in
+
+{
+  imports = [
+    (mkRenamedOptionModuleWith { sinceRelease = 2205; from = [ "nix" "daemonIONiceLevel" ]; to = [ "nix" "daemonIOSchedPriority" ]; })
+    (mkRenamedOptionModuleWith { sinceRelease = 2211; from = [ "nix" "readOnlyStore" ]; to = [ "boot" "readOnlyNixStore" ]; })
+    (mkRemovedOptionModule [ "nix" "daemonNiceLevel" ] "Consider nix.daemonCPUSchedPolicy instead.")
+  ];
+
+  ###### interface
+
+  options = {
+
+    nix = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable Nix.
+          Disabling Nix makes the system hard to modify and the Nix programs and configuration will not be made available by NixOS itself.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.nix;
+        defaultText = literalExpression "pkgs.nix";
+        description = lib.mdDoc ''
+          This option specifies the Nix package instance to use throughout the system.
+        '';
+      };
+
+      daemonCPUSchedPolicy = mkOption {
+        type = types.enum [ "other" "batch" "idle" ];
+        default = "other";
+        example = "batch";
+        description = lib.mdDoc ''
+          Nix daemon process CPU scheduling policy. This policy propagates to
+          build processes. `other` is the default scheduling
+          policy for regular tasks. The `batch` policy is
+          similar to `other`, but optimised for
+          non-interactive tasks. `idle` is for extremely
+          low-priority tasks that should only be run when no other task
+          requires CPU time.
+
+          Please note that while using the `idle` policy may
+          greatly improve responsiveness of a system performing expensive
+          builds, it may also slow down and potentially starve crucial
+          configuration updates during load.
+
+          `idle` may therefore be a sensible policy for
+          systems that experience only intermittent phases of high CPU load,
+          such as desktop or portable computers used interactively. Other
+          systems should use the `other` or
+          `batch` policy instead.
+
+          For more fine-grained resource control, please refer to
+          {manpage}`systemd.resource-control(5)` and adjust
+          {option}`systemd.services.nix-daemon` directly.
+      '';
+      };
+
+      daemonIOSchedClass = mkOption {
+        type = types.enum [ "best-effort" "idle" ];
+        default = "best-effort";
+        example = "idle";
+        description = lib.mdDoc ''
+          Nix daemon process I/O scheduling class. This class propagates to
+          build processes. `best-effort` is the default
+          class for regular tasks. The `idle` class is for
+          extremely low-priority tasks that should only perform I/O when no
+          other task does.
+
+          Please note that while using the `idle` scheduling
+          class can improve responsiveness of a system performing expensive
+          builds, it might also slow down or starve crucial configuration
+          updates during load.
+
+          `idle` may therefore be a sensible class for
+          systems that experience only intermittent phases of high I/O load,
+          such as desktop or portable computers used interactively. Other
+          systems should use the `best-effort` class.
+      '';
+      };
+
+      daemonIOSchedPriority = mkOption {
+        type = types.int;
+        default = 4;
+        example = 1;
+        description = lib.mdDoc ''
+          Nix daemon process I/O scheduling priority. This priority propagates
+          to build processes. The supported priorities depend on the
+          scheduling policy: With idle, priorities are not used in scheduling
+          decisions. best-effort supports values in the range 0 (high) to 7
+          (low).
+        '';
+      };
+
+      # Environment variables for running Nix.
+      envVars = mkOption {
+        type = types.attrs;
+        internal = true;
+        default = { };
+        description = lib.mdDoc "Environment variables used by Nix.";
+      };
+
+      nrBuildUsers = mkOption {
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of `nixbld` user accounts created to
+          perform secure concurrent builds.  If you receive an error
+          message saying that “all build users are currently in useâ€,
+          you should increase this value.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages =
+      [
+        nixPackage
+        pkgs.nix-info
+      ]
+      ++ optional (config.programs.bash.enableCompletion) pkgs.nix-bash-completions;
+
+    systemd.packages = [ nixPackage ];
+
+    systemd.tmpfiles = mkMerge [
+      (mkIf (isNixAtLeast "2.8") {
+        packages = [ nixPackage ];
+      })
+      (mkIf (!isNixAtLeast "2.8") {
+        rules = [
+          "d /nix/var/nix/daemon-socket 0755 root root - -"
+        ];
+      })
+    ];
+
+    systemd.sockets.nix-daemon.wantedBy = [ "sockets.target" ];
+
+    systemd.services.nix-daemon =
+      {
+        path = [ nixPackage pkgs.util-linux config.programs.ssh.package ]
+          ++ optionals cfg.distributedBuilds [ pkgs.gzip ];
+
+        environment = cfg.envVars
+          // { CURL_CA_BUNDLE = "/etc/ssl/certs/ca-certificates.crt"; }
+          // config.networking.proxy.envVars;
+
+        unitConfig.RequiresMountsFor = "/nix/store";
+
+        serviceConfig =
+          {
+            CPUSchedulingPolicy = cfg.daemonCPUSchedPolicy;
+            IOSchedulingClass = cfg.daemonIOSchedClass;
+            IOSchedulingPriority = cfg.daemonIOSchedPriority;
+            LimitNOFILE = 1048576;
+          };
+
+        restartTriggers = [ config.environment.etc."nix/nix.conf".source ];
+
+        # `stopIfChanged = false` changes to switch behavior
+        # from   stop -> update units -> start
+        #   to   update units -> restart
+        #
+        # The `stopIfChanged` setting therefore controls a trade-off between a
+        # more predictable lifecycle, which runs the correct "version" of
+        # the `ExecStop` line, and on the other hand the availability of
+        # sockets during the switch, as the effectiveness of the stop operation
+        # depends on the socket being stopped as well.
+        #
+        # As `nix-daemon.service` does not make use of `ExecStop`, we prefer
+        # to keep the socket up and available. This is important for machines
+        # that run Nix-based services, such as automated build, test, and deploy
+        # services, that expect the daemon socket to be available at all times.
+        #
+        # Notably, the Nix client does not retry on failure to connect to the
+        # daemon socket, and the in-process RemoteStore instance will disable
+        # itself. This makes retries infeasible even for services that are
+        # aware of the issue. Failure to connect can affect not only new client
+        # processes, but also new RemoteStore instances in existing processes,
+        # as well as existing RemoteStore instances that have not saturated
+        # their connection pool.
+        #
+        # Also note that `stopIfChanged = true` does not kill existing
+        # connection handling daemons, as one might wish to happen before a
+        # breaking Nix upgrade (which is rare). The daemon forks that handle
+        # the individual connections split off into their own sessions, causing
+        # them not to be stopped by systemd.
+        # If a Nix upgrade does require all existing daemon processes to stop,
+        # nix-daemon must do so on its own accord, and only when the new version
+        # starts and detects that Nix's persistent state needs an upgrade.
+        stopIfChanged = false;
+
+      };
+
+    # Set up the environment variables for running Nix.
+    environment.sessionVariables = cfg.envVars;
+
+    nix.nrBuildUsers = mkDefault (
+      if cfg.settings.auto-allocate-uids or false then 0
+      else max 32 (if cfg.settings.max-jobs == "auto" then 0 else cfg.settings.max-jobs)
+    );
+
+    users.users = nixbldUsers;
+
+    services.xserver.displayManager.hiddenUsers = attrNames nixbldUsers;
+
+    # Legacy configuration conversion.
+    nix.settings = mkMerge [
+      (mkIf (isNixAtLeast "2.3pre") { sandbox-fallback = false; })
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/system/nscd.conf b/nixpkgs/nixos/modules/services/system/nscd.conf
new file mode 100644
index 000000000000..722b883ba420
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/nscd.conf
@@ -0,0 +1,34 @@
+# We basically use nscd as a proxy for forwarding nss requests to appropriate
+# nss modules, as we run nscd with LD_LIBRARY_PATH set to the directory
+# containing all such modules
+# Note that we can not use `enable-cache no` As this will actually cause nscd
+# to just reject the nss requests it receives, which then causes glibc to
+# fallback to trying to handle the request by itself. Which won't work as glibc
+# is not aware of the path in which the nss modules live.  As a workaround, we
+# have `enable-cache yes` with an explicit ttl of 0
+server-user             nscd
+
+enable-cache            passwd          yes
+positive-time-to-live   passwd          0
+negative-time-to-live   passwd          0
+shared                  passwd          yes
+
+enable-cache            group           yes
+positive-time-to-live   group           0
+negative-time-to-live   group           0
+shared                  group           yes
+
+enable-cache            netgroup        yes
+positive-time-to-live   netgroup        0
+negative-time-to-live   netgroup        0
+shared                  netgroup        yes
+
+enable-cache            hosts           yes
+positive-time-to-live   hosts           0
+negative-time-to-live   hosts           0
+shared                  hosts           yes
+
+enable-cache            services        yes
+positive-time-to-live   services        0
+negative-time-to-live   services        0
+shared                  services        yes
diff --git a/nixpkgs/nixos/modules/services/system/nscd.nix b/nixpkgs/nixos/modules/services/system/nscd.nix
new file mode 100644
index 000000000000..971dffbadc13
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/nscd.nix
@@ -0,0 +1,153 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  nssModulesPath = config.system.nssModules.path;
+  cfg = config.services.nscd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.nscd = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable the Name Service Cache Daemon.
+          Disabling this is strongly discouraged, as this effectively disables NSS Lookups
+          from all non-glibc NSS modules, including the ones provided by systemd.
+        '';
+      };
+
+      enableNsncd = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to use nsncd instead of nscd from glibc.
+          This is a nscd-compatible daemon, that proxies lookups, without any caching.
+          Using nscd from glibc is discouraged.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nscd";
+        description = lib.mdDoc ''
+          User account under which nscd runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nscd";
+        description = lib.mdDoc ''
+          User group under which nscd runs.
+        '';
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = builtins.readFile ./nscd.conf;
+        description = lib.mdDoc ''
+          Configuration to use for Name Service Cache Daemon.
+          Only used in case glibc-nscd is used.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default =
+          if pkgs.stdenv.hostPlatform.libc == "glibc"
+          then pkgs.stdenv.cc.libc.bin
+          else pkgs.glibc.bin;
+        defaultText = lib.literalExpression ''
+          if pkgs.stdenv.hostPlatform.libc == "glibc"
+            then pkgs.stdenv.cc.libc.bin
+            else pkgs.glibc.bin;
+        '';
+        description = lib.mdDoc ''
+          package containing the nscd binary to be used by the service.
+          Ignored when enableNsncd is set to true.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.etc."nscd.conf".text = cfg.config;
+
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      group = cfg.group;
+    };
+
+    users.groups.${cfg.group} = { };
+
+    systemd.services.nscd =
+      {
+        description = "Name Service Cache Daemon"
+          + lib.optionalString cfg.enableNsncd " (nsncd)";
+
+        before = [ "nss-lookup.target" "nss-user-lookup.target" ];
+        wants = [ "nss-lookup.target" "nss-user-lookup.target" ];
+        wantedBy = [ "multi-user.target" ];
+        requiredBy = [ "nss-lookup.target" "nss-user-lookup.target" ];
+
+        environment = { LD_LIBRARY_PATH = nssModulesPath; };
+
+        restartTriggers = lib.optionals (!cfg.enableNsncd) ([
+          config.environment.etc.hosts.source
+          config.environment.etc."nsswitch.conf".source
+          config.environment.etc."nscd.conf".source
+        ] ++ optionals config.users.mysql.enable [
+          config.environment.etc."libnss-mysql.cfg".source
+          config.environment.etc."libnss-mysql-root.cfg".source
+        ]);
+
+        # In some configurations, nscd needs to be started as root; it will
+        # drop privileges after all the NSS modules have read their
+        # configuration files. So prefix the ExecStart command with "!" to
+        # prevent systemd from dropping privileges early. See ExecStart in
+        # systemd.service(5). We use a static user, because some NSS modules
+        # sill want to read their configuration files after the privilege drop
+        # and so users can set the owner of those files to the nscd user.
+        serviceConfig =
+          {
+            ExecStart =
+              if cfg.enableNsncd then "${pkgs.nsncd}/bin/nsncd"
+              else "!@${cfg.package}/bin/nscd nscd";
+            Type = if cfg.enableNsncd then "notify" else "forking";
+            User = cfg.user;
+            Group = cfg.group;
+            RemoveIPC = true;
+            PrivateTmp = true;
+            NoNewPrivileges = true;
+            RestrictSUIDSGID = true;
+            ProtectSystem = "strict";
+            ProtectHome = "read-only";
+            RuntimeDirectory = "nscd";
+            PIDFile = "/run/nscd/nscd.pid";
+            Restart = "always";
+            ExecReload =
+              lib.optionals (!cfg.enableNsncd) [
+                "${cfg.package}/bin/nscd --invalidate passwd"
+                "${cfg.package}/bin/nscd --invalidate group"
+                "${cfg.package}/bin/nscd --invalidate hosts"
+              ];
+          };
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/saslauthd.nix b/nixpkgs/nixos/modules/services/system/saslauthd.nix
new file mode 100644
index 000000000000..09720146aaa9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/saslauthd.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.saslauthd;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.saslauthd = {
+
+      enable = mkEnableOption (lib.mdDoc "saslauthd, the Cyrus SASL authentication daemon");
+
+      package = mkOption {
+        default = pkgs.cyrus_sasl.bin;
+        defaultText = literalExpression "pkgs.cyrus_sasl.bin";
+        type = types.package;
+        description = lib.mdDoc "Cyrus SASL package to use.";
+      };
+
+      mechanism = mkOption {
+        type = types.str;
+        default = "pam";
+        description = lib.mdDoc "Auth mechanism to use";
+      };
+
+      config = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Configuration to use for Cyrus SASL authentication daemon.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.saslauthd = {
+      description = "Cyrus SASL authentication daemon";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/sbin/saslauthd saslauthd -a ${cfg.mechanism} -O ${pkgs.writeText "saslauthd.conf" cfg.config}";
+        Type = "forking";
+        PIDFile = "/run/saslauthd/saslauthd.pid";
+        Restart = "always";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/self-deploy.nix b/nixpkgs/nixos/modules/services/system/self-deploy.nix
new file mode 100644
index 000000000000..b5d8ea3f56e7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/self-deploy.nix
@@ -0,0 +1,177 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.self-deploy;
+
+  workingDirectory = "/var/lib/nixos-self-deploy";
+  repositoryDirectory = "${workingDirectory}/repo";
+  outPath = "${workingDirectory}/system";
+
+  gitWithRepo = "git -C ${repositoryDirectory}";
+
+  renderNixArgs = args:
+    let
+      toArg = key: value:
+        if builtins.isString value
+        then " --argstr ${lib.escapeShellArg key} ${lib.escapeShellArg value}"
+        else " --arg ${lib.escapeShellArg key} ${lib.escapeShellArg (toString value)}";
+    in
+    lib.concatStrings (lib.mapAttrsToList toArg args);
+
+  isPathType = x: lib.types.path.check x;
+
+in
+{
+  options.services.self-deploy = {
+    enable = lib.mkEnableOption (lib.mdDoc "self-deploy");
+
+    nixFile = lib.mkOption {
+      type = lib.types.path;
+
+      default = "/default.nix";
+
+      description = lib.mdDoc ''
+        Path to nix file in repository. Leading '/' refers to root of
+        git repository.
+      '';
+    };
+
+    nixAttribute = lib.mkOption {
+      type = with lib.types; nullOr str;
+
+      default = null;
+
+      description = lib.mdDoc ''
+        Attribute of `nixFile` that builds the current system.
+      '';
+    };
+
+    nixArgs = lib.mkOption {
+      type = lib.types.attrs;
+
+      default = { };
+
+      description = lib.mdDoc ''
+        Arguments to `nix-build` passed as `--argstr` or `--arg` depending on
+        the type.
+      '';
+    };
+
+    switchCommand = lib.mkOption {
+      type = lib.types.enum [ "boot" "switch" "dry-activate" "test" ];
+
+      default = "switch";
+
+      description = lib.mdDoc ''
+        The `switch-to-configuration` subcommand used.
+      '';
+    };
+
+    repository = lib.mkOption {
+      type = with lib.types; oneOf [ path str ];
+
+      description = lib.mdDoc ''
+        The repository to fetch from. Must be properly formatted for git.
+
+        If this value is set to a path (must begin with `/`) then it's
+        assumed that the repository is local and the resulting service
+        won't wait for the network to be up.
+
+        If the repository will be fetched over SSH, you must add an
+        entry to `programs.ssh.knownHosts` for the SSH host for the fetch
+        to be successful.
+      '';
+    };
+
+    sshKeyFile = lib.mkOption {
+      type = with lib.types; nullOr path;
+
+      default = null;
+
+      description = lib.mdDoc ''
+        Path to SSH private key used to fetch private repositories over
+        SSH.
+      '';
+    };
+
+    branch = lib.mkOption {
+      type = lib.types.str;
+
+      default = "master";
+
+      description = lib.mdDoc ''
+        Branch to track
+
+        Technically speaking any ref can be specified here, as this is
+        passed directly to a `git fetch`, but for the use-case of
+        continuous deployment you're likely to want to specify a branch.
+      '';
+    };
+
+    startAt = lib.mkOption {
+      type = with lib.types; either str (listOf str);
+
+      default = "hourly";
+
+      description = lib.mdDoc ''
+        The schedule on which to run the `self-deploy` service. Format
+        specified by `systemd.time 7`.
+
+        This value can also be a list of `systemd.time 7` formatted
+        strings, in which case the service will be started on multiple
+        schedules.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.self-deploy = rec {
+      inherit (cfg) startAt;
+
+      serviceConfig.Type = "oneshot";
+
+      requires = lib.mkIf (!(isPathType cfg.repository)) [ "network-online.target" ];
+
+      after = requires;
+
+      environment.GIT_SSH_COMMAND = lib.mkIf (cfg.sshKeyFile != null)
+        "${pkgs.openssh}/bin/ssh -i ${lib.escapeShellArg cfg.sshKeyFile}";
+
+      restartIfChanged = false;
+
+      path = with pkgs; [
+        git
+        gnutar
+        gzip
+        nix
+      ] ++ lib.optionals (cfg.switchCommand == "boot") [ systemd ];
+
+      script = ''
+        if [ ! -e ${repositoryDirectory} ]; then
+          mkdir --parents ${repositoryDirectory}
+          git init ${repositoryDirectory}
+        fi
+
+        ${gitWithRepo} fetch ${lib.escapeShellArg cfg.repository} ${lib.escapeShellArg cfg.branch}
+
+        ${gitWithRepo} checkout FETCH_HEAD
+
+        nix-build${renderNixArgs cfg.nixArgs} ${lib.cli.toGNUCommandLineShell { } {
+          attr = cfg.nixAttribute;
+          out-link = outPath;
+        }} ${lib.escapeShellArg "${repositoryDirectory}${cfg.nixFile}"}
+
+        ${lib.optionalString (cfg.switchCommand != "test")
+          "nix-env --profile /nix/var/nix/profiles/system --set ${outPath}"}
+
+        ${outPath}/bin/switch-to-configuration ${cfg.switchCommand}
+
+        rm ${outPath}
+
+        ${gitWithRepo} gc --prune=all
+
+        ${lib.optionalString (cfg.switchCommand == "boot") "systemctl reboot"}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/systembus-notify.nix b/nixpkgs/nixos/modules/services/system/systembus-notify.nix
new file mode 100644
index 000000000000..f79879fa1360
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/systembus-notify.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.systembus-notify;
+
+  inherit (lib) mkEnableOption mkIf;
+
+in
+{
+  options.services.systembus-notify = {
+    enable = mkEnableOption (lib.mdDoc ''
+      System bus notification support
+
+      WARNING: enabling this option (while convenient) should *not* be done on a
+      machine where you do not trust the other users as it allows any other
+      local user to DoS your session by spamming notifications
+    '');
+  };
+
+  config = mkIf cfg.enable {
+    systemd = {
+      packages = with pkgs; [ systembus-notify ];
+
+      user.services.systembus-notify.wantedBy = [ "graphical-session.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/uptimed.nix b/nixpkgs/nixos/modules/services/system/uptimed.nix
new file mode 100644
index 000000000000..df08c0f26e98
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/uptimed.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.uptimed;
+  stateDir = "/var/lib/uptimed";
+in
+{
+  options = {
+    services.uptimed = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable `uptimed`, allowing you to track
+          your highest uptimes.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.uptimed ];
+
+    users.users.uptimed = {
+      description = "Uptimed daemon user";
+      home        = stateDir;
+      uid         = config.ids.uids.uptimed;
+      group       = "uptimed";
+    };
+    users.groups.uptimed = {};
+
+    systemd.services.uptimed = {
+      unitConfig.Documentation = "man:uptimed(8) man:uprecords(1)";
+      description = "uptimed service";
+      wantedBy    = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Restart                 = "on-failure";
+        User                    = "uptimed";
+        Nice                    = 19;
+        IOSchedulingClass       = "idle";
+        PrivateTmp              = "yes";
+        PrivateNetwork          = "yes";
+        NoNewPrivileges         = "yes";
+        StateDirectory          = [ "uptimed" ];
+        InaccessibleDirectories = "/home";
+        ExecStart               = "${pkgs.uptimed}/sbin/uptimed -f -p ${stateDir}/pid";
+      };
+
+      preStart = ''
+        if ! test -f ${stateDir}/bootid ; then
+          ${pkgs.uptimed}/sbin/uptimed -b
+        fi
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/system/zram-generator.nix b/nixpkgs/nixos/modules/services/system/zram-generator.nix
new file mode 100644
index 000000000000..5902eda55696
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/system/zram-generator.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.zram-generator;
+  settingsFormat = pkgs.formats.ini { };
+in
+{
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  options.services.zram-generator = {
+    enable = lib.mkEnableOption (lib.mdDoc "Systemd unit generator for zram devices");
+
+    package = lib.mkPackageOptionMD pkgs "zram-generator" { };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = settingsFormat.type;
+      };
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for zram-generator,
+        see https://github.com/systemd/zram-generator for documentation.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isModule "ZRAM")
+    ];
+
+    systemd.packages = [ cfg.package ];
+    systemd.services."systemd-zram-setup@".path = [ pkgs.util-linux ]; # for mkswap
+
+    environment.etc."systemd/zram-generator.conf".source = settingsFormat.generate "zram-generator.conf" cfg.settings;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/deluge.nix b/nixpkgs/nixos/modules/services/torrent/deluge.nix
new file mode 100644
index 000000000000..003f7b2613b7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/deluge.nix
@@ -0,0 +1,279 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.deluge;
+  cfg_web = config.services.deluge.web;
+  isDeluge1 = versionOlder cfg.package.version "2.0.0";
+
+  openFilesLimit = 4096;
+  listenPortsDefault = [ 6881 6889 ];
+
+  listToRange = x: { from = elemAt x 0; to = elemAt x 1; };
+
+  configDir = "${cfg.dataDir}/.config/deluge";
+  configFile = pkgs.writeText "core.conf" (builtins.toJSON cfg.config);
+  declarativeLockFile = "${configDir}/.declarative";
+
+  preStart = if cfg.declarative then ''
+    if [ -e ${declarativeLockFile} ]; then
+      # Was declarative before, no need to back up anything
+      ${if isDeluge1 then "ln -sf" else "cp"} ${configFile} ${configDir}/core.conf
+      ln -sf ${cfg.authFile} ${configDir}/auth
+    else
+      # Declarative for the first time, backup stateful files
+      ${if isDeluge1 then "ln -s" else "cp"} -b --suffix=.stateful ${configFile} ${configDir}/core.conf
+      ln -sb --suffix=.stateful ${cfg.authFile} ${configDir}/auth
+      echo "Autogenerated file that signifies that this server configuration is managed declaratively by NixOS" \
+        > ${declarativeLockFile}
+    fi
+  '' else ''
+    if [ -e ${declarativeLockFile} ]; then
+      rm ${declarativeLockFile}
+    fi
+  '';
+in {
+  options = {
+    services = {
+      deluge = {
+        enable = mkEnableOption (lib.mdDoc "Deluge daemon");
+
+        openFilesLimit = mkOption {
+          default = openFilesLimit;
+          type = types.either types.int types.str;
+          description = lib.mdDoc ''
+            Number of files to allow deluged to open.
+          '';
+        };
+
+        config = mkOption {
+          type = types.attrs;
+          default = {};
+          example = literalExpression ''
+            {
+              download_location = "/srv/torrents/";
+              max_upload_speed = "1000.0";
+              share_ratio_limit = "2.0";
+              allow_remote = true;
+              daemon_port = 58846;
+              listen_ports = [ ${toString listenPortsDefault} ];
+            }
+          '';
+          description = lib.mdDoc ''
+            Deluge core configuration for the core.conf file. Only has an effect
+            when {option}`services.deluge.declarative` is set to
+            `true`. String values must be quoted, integer and
+            boolean values must not. See
+            <https://git.deluge-torrent.org/deluge/tree/deluge/core/preferencesmanager.py#n41>
+            for the available options.
+          '';
+        };
+
+        declarative = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to use a declarative deluge configuration.
+            Only if set to `true`, the options
+            {option}`services.deluge.config`,
+            {option}`services.deluge.openFirewall` and
+            {option}`services.deluge.authFile` will be
+            applied.
+          '';
+        };
+
+        openFirewall = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Whether to open the firewall for the ports in
+            {option}`services.deluge.config.listen_ports`. It only takes effet if
+            {option}`services.deluge.declarative` is set to
+            `true`.
+
+            It does NOT apply to the daemon port nor the web UI port. To access those
+            ports securely check the documentation
+            <https://dev.deluge-torrent.org/wiki/UserGuide/ThinClient#CreateSSHTunnel>
+            or use a VPN or configure certificates for deluge.
+          '';
+        };
+
+        dataDir = mkOption {
+          type = types.path;
+          default = "/var/lib/deluge";
+          description = lib.mdDoc ''
+            The directory where deluge will create files.
+          '';
+        };
+
+        authFile = mkOption {
+          type = types.path;
+          example = "/run/keys/deluge-auth";
+          description = lib.mdDoc ''
+            The file managing the authentication for deluge, the format of this
+            file is straightforward, each line contains a
+            username:password:level tuple in plaintext. It only has an effect
+            when {option}`services.deluge.declarative` is set to
+            `true`.
+            See <https://dev.deluge-torrent.org/wiki/UserGuide/Authentication> for
+            more information.
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "deluge";
+          description = lib.mdDoc ''
+            User account under which deluge runs.
+          '';
+        };
+
+        group = mkOption {
+          type = types.str;
+          default = "deluge";
+          description = lib.mdDoc ''
+            Group under which deluge runs.
+          '';
+        };
+
+        extraPackages = mkOption {
+          type = types.listOf types.package;
+          default = [];
+          description = lib.mdDoc ''
+            Extra packages available at runtime to enable Deluge's plugins. For example,
+            extraction utilities are required for the built-in "Extractor" plugin.
+            This always contains unzip, gnutar, xz and bzip2.
+          '';
+        };
+
+        package = mkOption {
+          type = types.package;
+          example = literalExpression "pkgs.deluge-2_x";
+          description = lib.mdDoc ''
+            Deluge package to use.
+          '';
+        };
+      };
+
+      deluge.web = {
+        enable = mkEnableOption (lib.mdDoc "Deluge Web daemon");
+
+        port = mkOption {
+          type = types.port;
+          default = 8112;
+          description = lib.mdDoc ''
+            Deluge web UI port.
+          '';
+        };
+
+        openFirewall = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Open ports in the firewall for deluge web daemon
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.deluge.package = mkDefault (
+      if versionAtLeast config.system.stateVersion "20.09" then
+        pkgs.deluge-2_x
+      else
+        # deluge-1_x is no longer packaged and this will resolve to an error
+        # thanks to the alias for this name.  This is left here so that anyone
+        # using NixOS older than 20.09 receives that error when they upgrade
+        # and is forced to make an intentional choice to switch to deluge-2_x.
+        # That might be slightly inconvenient but there is no path to
+        # downgrade from 2.x to 1.x so NixOS should not automatically perform
+        # this state migration.
+        pkgs.deluge-1_x
+    );
+
+    # Provide a default set of `extraPackages`.
+    services.deluge.extraPackages = with pkgs; [ unzip gnutar xz bzip2 ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0770 ${cfg.user} ${cfg.group}"
+      "d '${cfg.dataDir}/.config' 0770 ${cfg.user} ${cfg.group}"
+      "d '${cfg.dataDir}/.config/deluge' 0770 ${cfg.user} ${cfg.group}"
+    ]
+    ++ optional (cfg.config ? download_location)
+      "d '${cfg.config.download_location}' 0770 ${cfg.user} ${cfg.group}"
+    ++ optional (cfg.config ? torrentfiles_location)
+      "d '${cfg.config.torrentfiles_location}' 0770 ${cfg.user} ${cfg.group}"
+    ++ optional (cfg.config ? move_completed_path)
+      "d '${cfg.config.move_completed_path}' 0770 ${cfg.user} ${cfg.group}";
+
+    systemd.services.deluged = {
+      after = [ "network.target" ];
+      description = "Deluge BitTorrent Daemon";
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package ] ++ cfg.extraPackages;
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/deluged \
+            --do-not-daemonize \
+            --config ${configDir}
+        '';
+        # To prevent "Quit & shutdown daemon" from working; we want systemd to
+        # manage it!
+        Restart = "on-success";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0002";
+        LimitNOFILE = cfg.openFilesLimit;
+      };
+      preStart = preStart;
+    };
+
+    systemd.services.delugeweb = mkIf cfg_web.enable {
+      after = [ "network.target" "deluged.service"];
+      requires = [ "deluged.service" ];
+      description = "Deluge BitTorrent WebUI";
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/deluge-web \
+            ${optionalString (!isDeluge1) "--do-not-daemonize"} \
+            --config ${configDir} \
+            --port ${toString cfg.web.port}
+        '';
+        User = cfg.user;
+        Group = cfg.group;
+      };
+    };
+
+    networking.firewall = mkMerge [
+      (mkIf (cfg.declarative && cfg.openFirewall && !(cfg.config.random_port or true)) {
+        allowedTCPPortRanges = singleton (listToRange (cfg.config.listen_ports or listenPortsDefault));
+        allowedUDPPortRanges = singleton (listToRange (cfg.config.listen_ports or listenPortsDefault));
+      })
+      (mkIf (cfg.web.openFirewall) {
+        allowedTCPPorts = [ cfg.web.port ];
+      })
+    ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = mkIf (cfg.user == "deluge") {
+      deluge = {
+        group = cfg.group;
+        uid = config.ids.uids.deluge;
+        home = cfg.dataDir;
+        description = "Deluge Daemon user";
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "deluge") {
+      deluge = {
+        gid = config.ids.gids.deluge;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/flexget.nix b/nixpkgs/nixos/modules/services/torrent/flexget.nix
new file mode 100644
index 000000000000..58a4b7001497
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/flexget.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.flexget;
+  pkg = cfg.package;
+  ymlFile = pkgs.writeText "flexget.yml" ''
+    ${cfg.config}
+
+    ${optionalString cfg.systemScheduler "schedules: no"}
+'';
+  configFile = "${toString cfg.homeDir}/flexget.yml";
+in {
+  options = {
+    services.flexget = {
+      enable = mkEnableOption (lib.mdDoc "FlexGet daemon");
+
+      package = mkPackageOptionMD pkgs "flexget" {};
+
+      user = mkOption {
+        default = "deluge";
+        example = "some_user";
+        type = types.str;
+        description = lib.mdDoc "The user under which to run flexget.";
+      };
+
+      homeDir = mkOption {
+        default = "/var/lib/deluge";
+        example = "/home/flexget";
+        type = types.path;
+        description = lib.mdDoc "Where files live.";
+      };
+
+      interval = mkOption {
+        default = "10m";
+        example = "1h";
+        type = types.str;
+        description = lib.mdDoc "When to perform a {command}`flexget` run. See {command}`man 7 systemd.time` for the format.";
+      };
+
+      systemScheduler = mkOption {
+        default = true;
+        example = false;
+        type = types.bool;
+        description = lib.mdDoc "When true, execute the runs via the flexget-runner.timer. If false, you have to specify the settings yourself in the YML file.";
+      };
+
+      config = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "The YAML configuration for FlexGet.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkg ];
+
+    systemd.services = {
+      flexget = {
+        description = "FlexGet Daemon";
+        path = [ pkg ];
+        serviceConfig = {
+          User = cfg.user;
+          ExecStartPre = "${pkgs.coreutils}/bin/install -m644 ${ymlFile} ${configFile}";
+          ExecStart = "${pkg}/bin/flexget -c ${configFile} daemon start";
+          ExecStop = "${pkg}/bin/flexget -c ${configFile} daemon stop";
+          ExecReload = "${pkg}/bin/flexget -c ${configFile} daemon reload";
+          Restart = "on-failure";
+          PrivateTmp = true;
+          WorkingDirectory = toString cfg.homeDir;
+        };
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      flexget-runner = mkIf cfg.systemScheduler {
+        description = "FlexGet Runner";
+        after = [ "flexget.service" ];
+        wants = [ "flexget.service" ];
+        serviceConfig = {
+          User = cfg.user;
+          ExecStart = "${pkg}/bin/flexget -c ${configFile} execute";
+          PrivateTmp = true;
+          WorkingDirectory = toString cfg.homeDir;
+        };
+      };
+    };
+
+    systemd.timers.flexget-runner = mkIf cfg.systemScheduler {
+      description = "Run FlexGet every ${cfg.interval}";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnBootSec = "5m";
+        OnUnitInactiveSec = cfg.interval;
+        Unit = "flexget-runner.service";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/magnetico.nix b/nixpkgs/nixos/modules/services/torrent/magnetico.nix
new file mode 100644
index 000000000000..dc6b4e9aa734
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/magnetico.nix
@@ -0,0 +1,218 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.magnetico;
+
+  dataDir = "/var/lib/magnetico";
+
+  credFile = with cfg.web;
+    if credentialsFile != null
+      then credentialsFile
+      else pkgs.writeText "magnetico-credentials"
+        (concatStrings (mapAttrsToList
+          (user: hash: "${user}:${hash}\n")
+          cfg.web.credentials));
+
+  # default options in magneticod/main.go
+  dbURI = concatStrings
+    [ "sqlite3://${dataDir}/database.sqlite3"
+      "?_journal_mode=WAL"
+      "&_busy_timeout=3000"
+      "&_foreign_keys=true"
+    ];
+
+  crawlerArgs = with cfg.crawler; escapeShellArgs
+    ([ "--database=${dbURI}"
+       "--indexer-addr=${address}:${toString port}"
+       "--indexer-max-neighbors=${toString maxNeighbors}"
+       "--leech-max-n=${toString maxLeeches}"
+     ] ++ extraOptions);
+
+  webArgs = with cfg.web; escapeShellArgs
+    ([ "--database=${dbURI}"
+       (if (cfg.web.credentialsFile != null || cfg.web.credentials != { })
+         then "--credentials=${toString credFile}"
+         else "--no-auth")
+       "--addr=${address}:${toString port}"
+     ] ++ extraOptions);
+
+in {
+
+  ###### interface
+
+  options.services.magnetico = {
+    enable = mkEnableOption (lib.mdDoc "Magnetico, Bittorrent DHT crawler");
+
+    crawler.address = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      example = "1.2.3.4";
+      description = lib.mdDoc ''
+        Address to be used for indexing DHT nodes.
+      '';
+    };
+
+    crawler.port = mkOption {
+      type = types.port;
+      default = 0;
+      description = lib.mdDoc ''
+        Port to be used for indexing DHT nodes.
+        This port should be added to
+        {option}`networking.firewall.allowedTCPPorts`.
+      '';
+    };
+
+    crawler.maxNeighbors = mkOption {
+      type = types.ints.positive;
+      default = 1000;
+      description = lib.mdDoc ''
+        Maximum number of simultaneous neighbors of an indexer.
+        Be careful changing this number: high values can very
+        easily cause your network to be congested or even crash
+        your router.
+      '';
+    };
+
+    crawler.maxLeeches = mkOption {
+      type = types.ints.positive;
+      default = 200;
+      description = lib.mdDoc ''
+        Maximum number of simultaneous leeches.
+      '';
+    };
+
+    crawler.extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra command line arguments to pass to magneticod.
+      '';
+    };
+
+    web.address = mkOption {
+      type = types.str;
+      default = "localhost";
+      example = "1.2.3.4";
+      description = lib.mdDoc ''
+        Address the web interface will listen to.
+      '';
+    };
+
+    web.port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        Port the web interface will listen to.
+      '';
+    };
+
+    web.credentials = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = lib.literalExpression ''
+        {
+          myuser = "$2y$12$YE01LZ8jrbQbx6c0s2hdZO71dSjn2p/O9XsYJpz.5968yCysUgiaG";
+        }
+      '';
+      description = lib.mdDoc ''
+        The credentials to access the web interface, in case authentication is
+        enabled, in the format `username:hash`. If unset no
+        authentication will be required.
+
+        Usernames must start with a lowercase ([a-z]) ASCII character, might
+        contain non-consecutive underscores except at the end, and consists of
+        small-case a-z characters and digits 0-9.  The
+        {command}`htpasswd` tool from the `apacheHttpd`
+        package may be used to generate the hash:
+        {command}`htpasswd -bnBC 12 username password`
+
+        ::: {.warning}
+        The hashes will be stored world-readable in the nix store.
+        Consider using the `credentialsFile` option if you
+        don't want this.
+        :::
+      '';
+    };
+
+    web.credentialsFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        The path to the file holding the credentials to access the web
+        interface. If unset no authentication will be required.
+
+        The file must contain user names and password hashes in the format
+        `username:hash`, one for each line.  Usernames must
+        start with a lowecase ([a-z]) ASCII character, might contain
+        non-consecutive underscores except at the end, and consists of
+        small-case a-z characters and digits 0-9.
+        The {command}`htpasswd` tool from the `apacheHttpd`
+        package may be used to generate the hash:
+        {command}`htpasswd -bnBC 12 username password`
+      '';
+    };
+
+    web.extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        Extra command line arguments to pass to magneticow.
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    users.users.magnetico = {
+      description = "Magnetico daemons user";
+      group = "magnetico";
+      isSystemUser = true;
+    };
+    users.groups.magnetico = {};
+
+    systemd.services.magneticod = {
+      description = "Magnetico DHT crawler";
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" ];
+
+      serviceConfig = {
+        User      = "magnetico";
+        Restart   = "on-failure";
+        ExecStart = "${pkgs.magnetico}/bin/magneticod ${crawlerArgs}";
+      };
+    };
+
+    systemd.services.magneticow = {
+      description = "Magnetico web interface";
+      wantedBy = [ "multi-user.target" ];
+      after    = [ "network.target" "magneticod.service"];
+
+      serviceConfig = {
+        User           = "magnetico";
+        StateDirectory = "magnetico";
+        Restart        = "on-failure";
+        ExecStart      = "${pkgs.magnetico}/bin/magneticow ${webArgs}";
+      };
+    };
+
+    assertions =
+    [
+      {
+        assertion = cfg.web.credentialsFile == null || cfg.web.credentials == { };
+        message = ''
+          The options services.magnetico.web.credentialsFile and
+          services.magnetico.web.credentials are mutually exclusives.
+        '';
+      }
+    ];
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/opentracker.nix b/nixpkgs/nixos/modules/services/torrent/opentracker.nix
new file mode 100644
index 000000000000..7d67491c1191
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/opentracker.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.opentracker;
+in {
+  options.services.opentracker = {
+    enable = mkEnableOption (lib.mdDoc "opentracker");
+
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc ''
+        opentracker package to use
+      '';
+      default = pkgs.opentracker;
+      defaultText = literalExpression "pkgs.opentracker";
+    };
+
+    extraOptions = mkOption {
+      type = types.separatedString " ";
+      description = lib.mdDoc ''
+        Configuration Arguments for opentracker
+        See https://erdgeist.org/arts/software/opentracker/ for all params
+      '';
+      default = "";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services.opentracker = {
+      description = "opentracker server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartIfChanged = true;
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/opentracker ${cfg.extraOptions}";
+        PrivateTmp = true;
+        WorkingDirectory = "/var/empty";
+        # By default opentracker drops all privileges and runs in chroot after starting up as root.
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/torrent/peerflix.nix b/nixpkgs/nixos/modules/services/torrent/peerflix.nix
new file mode 100644
index 000000000000..ea74d0f8b9c4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/peerflix.nix
@@ -0,0 +1,71 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.peerflix;
+  opt = options.services.peerflix;
+
+  configFile = pkgs.writeText "peerflix-config.json" ''
+    {
+      "connections": 50,
+      "tmp": "${cfg.downloadDir}"
+    }
+  '';
+
+in {
+
+  ###### interface
+
+  options.services.peerflix = {
+    enable = mkOption {
+      description = lib.mdDoc "Whether to enable peerflix service.";
+      default = false;
+      type = types.bool;
+    };
+
+    stateDir = mkOption {
+      description = lib.mdDoc "Peerflix state directory.";
+      default = "/var/lib/peerflix";
+      type = types.path;
+    };
+
+    downloadDir = mkOption {
+      description = lib.mdDoc "Peerflix temporary download directory.";
+      default = "${cfg.stateDir}/torrents";
+      defaultText = literalExpression ''"''${config.${opt.stateDir}}/torrents"'';
+      type = types.path;
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' - peerflix - - -"
+    ];
+
+    systemd.services.peerflix = {
+      description = "Peerflix Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment.HOME = cfg.stateDir;
+
+      preStart = ''
+        mkdir -p "${cfg.stateDir}"/{torrents,.config/peerflix-server}
+        ln -fs "${configFile}" "${cfg.stateDir}/.config/peerflix-server/config.json"
+      '';
+
+      serviceConfig = {
+        ExecStart = "${pkgs.nodePackages.peerflix-server}/bin/peerflix-server";
+        User = "peerflix";
+      };
+    };
+
+    users.users.peerflix = {
+      isSystemUser = true;
+      group = "peerflix";
+    };
+    users.groups.peerflix = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/rtorrent.nix b/nixpkgs/nixos/modules/services/torrent/rtorrent.nix
new file mode 100644
index 000000000000..64cda7fb675f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/rtorrent.nix
@@ -0,0 +1,220 @@
+{ config, options, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.rtorrent;
+  opt = options.services.rtorrent;
+
+in {
+  options.services.rtorrent = {
+    enable = mkEnableOption (lib.mdDoc "rtorrent");
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/rtorrent";
+      description = lib.mdDoc ''
+        The directory where rtorrent stores its data files.
+      '';
+    };
+
+    dataPermissions = mkOption {
+      type = types.str;
+      default = "0750";
+      example = "0755";
+      description = lib.mdDoc ''
+        Unix Permissions in octal on the rtorrent directory.
+      '';
+    };
+
+    downloadDir = mkOption {
+      type = types.str;
+      default = "${cfg.dataDir}/download";
+      defaultText = literalExpression ''"''${config.${opt.dataDir}}/download"'';
+      description = lib.mdDoc ''
+        Where to put downloaded files.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "rtorrent";
+      description = lib.mdDoc ''
+        User account under which rtorrent runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "rtorrent";
+      description = lib.mdDoc ''
+        Group under which rtorrent runs.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.rtorrent;
+      defaultText = literalExpression "pkgs.rtorrent";
+      description = lib.mdDoc ''
+        The rtorrent package to use.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 50000;
+      description = lib.mdDoc ''
+        The rtorrent port.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open the firewall for the port in {option}`services.rtorrent.port`.
+      '';
+    };
+
+    rpcSocket = mkOption {
+      type = types.str;
+      readOnly = true;
+      default = "/run/rtorrent/rpc.sock";
+      description = lib.mdDoc ''
+        RPC socket path.
+      '';
+    };
+
+    configText = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        The content of {file}`rtorrent.rc`. The [modernized configuration template](https://rtorrent-docs.readthedocs.io/en/latest/cookbook.html#modernized-configuration-template) with the values specified in this module will be prepended using mkBefore. You can use mkForce to overwrite the config completely.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    users.groups = mkIf (cfg.group == "rtorrent") {
+      rtorrent = {};
+    };
+
+    users.users = mkIf (cfg.user == "rtorrent") {
+      rtorrent = {
+        group = cfg.group;
+        shell = pkgs.bashInteractive;
+        home = cfg.dataDir;
+        description = "rtorrent Daemon user";
+        isSystemUser = true;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = mkIf (cfg.openFirewall) [ cfg.port ];
+
+    services.rtorrent.configText = mkBefore ''
+      # Instance layout (base paths)
+      method.insert = cfg.basedir, private|const|string, (cat,"${cfg.dataDir}/")
+      method.insert = cfg.watch,   private|const|string, (cat,(cfg.basedir),"watch/")
+      method.insert = cfg.logs,    private|const|string, (cat,(cfg.basedir),"log/")
+      method.insert = cfg.logfile, private|const|string, (cat,(cfg.logs),(system.time),".log")
+      method.insert = cfg.rpcsock, private|const|string, (cat,"${cfg.rpcSocket}")
+
+      # Create instance directories
+      execute.throw = sh, -c, (cat, "mkdir -p ", (cfg.basedir), "/session ", (cfg.watch), " ", (cfg.logs))
+
+      # Listening port for incoming peer traffic (fixed; you can also randomize it)
+      network.port_range.set = ${toString cfg.port}-${toString cfg.port}
+      network.port_random.set = no
+
+      # Tracker-less torrent and UDP tracker support
+      # (conservative settings for 'private' trackers, change for 'public')
+      dht.mode.set = disable
+      protocol.pex.set = no
+      trackers.use_udp.set = no
+
+      # Peer settings
+      throttle.max_uploads.set = 100
+      throttle.max_uploads.global.set = 250
+
+      throttle.min_peers.normal.set = 20
+      throttle.max_peers.normal.set = 60
+      throttle.min_peers.seed.set = 30
+      throttle.max_peers.seed.set = 80
+      trackers.numwant.set = 80
+
+      protocol.encryption.set = allow_incoming,try_outgoing,enable_retry
+
+      # Limits for file handle resources, this is optimized for
+      # an `ulimit` of 1024 (a common default). You MUST leave
+      # a ceiling of handles reserved for rTorrent's internal needs!
+      network.http.max_open.set = 50
+      network.max_open_files.set = 600
+      network.max_open_sockets.set = 3000
+
+      # Memory resource usage (increase if you have a large number of items loaded,
+      # and/or the available resources to spend)
+      pieces.memory.max.set = 1800M
+      network.xmlrpc.size_limit.set = 4M
+
+      # Basic operational settings (no need to change these)
+      session.path.set = (cat, (cfg.basedir), "session/")
+      directory.default.set = "${cfg.downloadDir}"
+      log.execute = (cat, (cfg.logs), "execute.log")
+      ##log.xmlrpc = (cat, (cfg.logs), "xmlrpc.log")
+      execute.nothrow = sh, -c, (cat, "echo >", (session.path), "rtorrent.pid", " ", (system.pid))
+
+      # Other operational settings (check & adapt)
+      encoding.add = utf8
+      system.umask.set = 0027
+      system.cwd.set = (cfg.basedir)
+      network.http.dns_cache_timeout.set = 25
+      schedule2 = monitor_diskspace, 15, 60, ((close_low_diskspace, 1000M))
+
+      # Watch directories (add more as you like, but use unique schedule names)
+      #schedule2 = watch_start, 10, 10, ((load.start, (cat, (cfg.watch), "start/*.torrent")))
+      #schedule2 = watch_load, 11, 10, ((load.normal, (cat, (cfg.watch), "load/*.torrent")))
+
+      # Logging:
+      #   Levels = critical error warn notice info debug
+      #   Groups = connection_* dht_* peer_* rpc_* storage_* thread_* tracker_* torrent_*
+      print = (cat, "Logging to ", (cfg.logfile))
+      log.open_file = "log", (cfg.logfile)
+      log.add_output = "info", "log"
+      ##log.add_output = "tracker_debug", "log"
+
+      # XMLRPC
+      scgi_local = (cfg.rpcsock)
+      schedule = scgi_group,0,0,"execute.nothrow=chown,\":rtorrent\",(cfg.rpcsock)"
+      schedule = scgi_permission,0,0,"execute.nothrow=chmod,\"g+w,o=\",(cfg.rpcsock)"
+    '';
+
+    systemd = {
+      services = {
+        rtorrent = let
+          rtorrentConfigFile = pkgs.writeText "rtorrent.rc" cfg.configText;
+        in {
+          description = "rTorrent system service";
+          after = [ "network.target" ];
+          path = [ cfg.package pkgs.bash ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            User = cfg.user;
+            Group = cfg.group;
+            Type = "simple";
+            Restart = "on-failure";
+            WorkingDirectory = cfg.dataDir;
+            ExecStartPre=''${pkgs.bash}/bin/bash -c "if test -e ${cfg.dataDir}/session/rtorrent.lock && test -z $(${pkgs.procps}/bin/pidof rtorrent); then rm -f ${cfg.dataDir}/session/rtorrent.lock; fi"'';
+            ExecStart="${cfg.package}/bin/rtorrent -n -o system.daemon.set=true -o import=${rtorrentConfigFile}";
+            RuntimeDirectory = "rtorrent";
+            RuntimeDirectoryMode = 755;
+          };
+        };
+      };
+
+      tmpfiles.rules = [ "d '${cfg.dataDir}' ${cfg.dataPermissions} ${cfg.user} ${cfg.group} -" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/torrent/transmission.nix b/nixpkgs/nixos/modules/services/torrent/transmission.nix
new file mode 100644
index 000000000000..5efb9334ea03
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/torrent/transmission.nix
@@ -0,0 +1,497 @@
+{ config, lib, pkgs, options, ... }:
+
+with lib;
+
+let
+  cfg = config.services.transmission;
+  opt = options.services.transmission;
+  inherit (config.environment) etc;
+  apparmor = config.security.apparmor;
+  rootDir = "/run/transmission";
+  settingsDir = ".config/transmission-daemon";
+  downloadsDir = "Downloads";
+  incompleteDir = ".incomplete";
+  watchDir = "watchdir";
+  settingsFormat = pkgs.formats.json {};
+  settingsFile = settingsFormat.generate "settings.json" cfg.settings;
+in
+{
+  imports = [
+    (mkRenamedOptionModule ["services" "transmission" "port"]
+                           ["services" "transmission" "settings" "rpc-port"])
+    (mkAliasOptionModuleMD ["services" "transmission" "openFirewall"]
+                           ["services" "transmission" "openPeerPorts"])
+  ];
+  options = {
+    services.transmission = {
+      enable = mkEnableOption (lib.mdDoc "transmission") // {
+        description = lib.mdDoc ''
+          Whether to enable the headless Transmission BitTorrent daemon.
+
+          Transmission daemon can be controlled via the RPC interface using
+          transmission-remote, the WebUI (http://127.0.0.1:9091/ by default),
+          or other clients like stig or tremc.
+
+          Torrents are downloaded to [](#opt-services.transmission.home)/${downloadsDir} by default and are
+          accessible to users in the "transmission" group.
+        '';
+      };
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Settings whose options overwrite fields in
+          `.config/transmission-daemon/settings.json`
+          (each time the service starts).
+
+          See [Transmission's Wiki](https://github.com/transmission/transmission/wiki/Editing-Configuration-Files)
+          for documentation of settings not explicitly covered by this module.
+        '';
+        default = {};
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+          options.download-dir = mkOption {
+            type = types.path;
+            default = "${cfg.home}/${downloadsDir}";
+            defaultText = literalExpression ''"''${config.${opt.home}}/${downloadsDir}"'';
+            description = lib.mdDoc "Directory where to download torrents.";
+          };
+          options.incomplete-dir = mkOption {
+            type = types.path;
+            default = "${cfg.home}/${incompleteDir}";
+            defaultText = literalExpression ''"''${config.${opt.home}}/${incompleteDir}"'';
+            description = lib.mdDoc ''
+              When enabled with
+              services.transmission.home
+              [](#opt-services.transmission.settings.incomplete-dir-enabled),
+              new torrents will download the files to this directory.
+              When complete, the files will be moved to download-dir
+              [](#opt-services.transmission.settings.download-dir).
+            '';
+          };
+          options.incomplete-dir-enabled = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "";
+          };
+          options.message-level = mkOption {
+            type = types.ints.between 0 3;
+            default = 2;
+            description = lib.mdDoc "Set verbosity of transmission messages.";
+          };
+          options.peer-port = mkOption {
+            type = types.port;
+            default = 51413;
+            description = lib.mdDoc "The peer port to listen for incoming connections.";
+          };
+          options.peer-port-random-high = mkOption {
+            type = types.port;
+            default = 65535;
+            description = lib.mdDoc ''
+              The maximum peer port to listen to for incoming connections
+              when [](#opt-services.transmission.settings.peer-port-random-on-start) is enabled.
+            '';
+          };
+          options.peer-port-random-low = mkOption {
+            type = types.port;
+            default = 65535;
+            description = lib.mdDoc ''
+              The minimal peer port to listen to for incoming connections
+              when [](#opt-services.transmission.settings.peer-port-random-on-start) is enabled.
+            '';
+          };
+          options.peer-port-random-on-start = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Randomize the peer port.";
+          };
+          options.rpc-bind-address = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            example = "0.0.0.0";
+            description = lib.mdDoc ''
+              Where to listen for RPC connections.
+              Use `0.0.0.0` to listen on all interfaces.
+            '';
+          };
+          options.rpc-port = mkOption {
+            type = types.port;
+            default = 9091;
+            description = lib.mdDoc "The RPC port to listen to.";
+          };
+          options.script-torrent-done-enabled = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether to run
+              [](#opt-services.transmission.settings.script-torrent-done-filename)
+              at torrent completion.
+            '';
+          };
+          options.script-torrent-done-filename = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            description = lib.mdDoc "Executable to be run at torrent completion.";
+          };
+          options.umask = mkOption {
+            type = types.int;
+            default = 2;
+            description = lib.mdDoc ''
+              Sets transmission's file mode creation mask.
+              See the umask(2) manpage for more information.
+              Users who want their saved torrents to be world-writable
+              may want to set this value to 0.
+              Bear in mind that the json markup language only accepts numbers in base 10,
+              so the standard umask(2) octal notation "022" is written in settings.json as 18.
+            '';
+          };
+          options.utp-enabled = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Whether to enable [Micro Transport Protocol (µTP)](https://en.wikipedia.org/wiki/Micro_Transport_Protocol).
+            '';
+          };
+          options.watch-dir = mkOption {
+            type = types.path;
+            default = "${cfg.home}/${watchDir}";
+            defaultText = literalExpression ''"''${config.${opt.home}}/${watchDir}"'';
+            description = lib.mdDoc "Watch a directory for torrent files and add them to transmission.";
+          };
+          options.watch-dir-enabled = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''Whether to enable the
+              [](#opt-services.transmission.settings.watch-dir).
+            '';
+          };
+          options.trash-original-torrent-files = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''Whether to delete torrents added from the
+              [](#opt-services.transmission.settings.watch-dir).
+            '';
+          };
+        };
+      };
+
+      package = mkPackageOptionMD pkgs "transmission" {};
+
+      downloadDirPermissions = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "770";
+        description = lib.mdDoc ''
+          If not `null`, is used as the permissions
+          set by `system.activationScripts.transmission-daemon`
+          on the directories [](#opt-services.transmission.settings.download-dir),
+          [](#opt-services.transmission.settings.incomplete-dir).
+          and [](#opt-services.transmission.settings.watch-dir).
+          Note that you may also want to change
+          [](#opt-services.transmission.settings.umask).
+        '';
+      };
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/transmission";
+        description = lib.mdDoc ''
+          The directory where Transmission will create `${settingsDir}`.
+          as well as `${downloadsDir}/` unless
+          [](#opt-services.transmission.settings.download-dir) is changed,
+          and `${incompleteDir}/` unless
+          [](#opt-services.transmission.settings.incomplete-dir) is changed.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "transmission";
+        description = lib.mdDoc "User account under which Transmission runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "transmission";
+        description = lib.mdDoc "Group account under which Transmission runs.";
+      };
+
+      credentialsFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to a JSON file to be merged with the settings.
+          Useful to merge a file which is better kept out of the Nix store
+          to set secret config parameters like `rpc-password`.
+        '';
+        default = "/dev/null";
+        example = "/var/lib/secrets/transmission/settings.json";
+      };
+
+      extraFlags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--log-debug" ];
+        description = lib.mdDoc ''
+          Extra flags passed to the transmission command in the service definition.
+        '';
+      };
+
+      openPeerPorts = mkEnableOption (lib.mdDoc "opening of the peer port(s) in the firewall");
+
+      openRPCPort = mkEnableOption (lib.mdDoc "opening of the RPC port in the firewall");
+
+      performanceNetParameters = mkEnableOption (lib.mdDoc "performance tweaks") // {
+        description = lib.mdDoc ''
+          Whether to enable tweaking of kernel parameters
+          to open many more connections at the same time.
+
+          Note that you may also want to increase
+          `peer-limit-global`.
+          And be aware that these settings are quite aggressive
+          and might not suite your regular desktop use.
+          For instance, SSH sessions may time out more easily.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Note that using systemd.tmpfiles would not work here
+    # because it would fail when creating a directory
+    # with a different owner than its parent directory, by saying:
+    # Detected unsafe path transition /home/foo → /home/foo/Downloads during canonicalization of /home/foo/Downloads
+    # when /home/foo is not owned by cfg.user.
+    # Note also that using an ExecStartPre= wouldn't work either
+    # because BindPaths= needs these directories before.
+    system.activationScripts = mkIf (cfg.downloadDirPermissions != null)
+      { transmission-daemon = ''
+        install -d -m 700 '${cfg.home}/${settingsDir}'
+        chown -R '${cfg.user}:${cfg.group}' ${cfg.home}/${settingsDir}
+        install -d -m '${cfg.downloadDirPermissions}' -o '${cfg.user}' -g '${cfg.group}' '${cfg.settings.download-dir}'
+        '' + optionalString cfg.settings.incomplete-dir-enabled ''
+        install -d -m '${cfg.downloadDirPermissions}' -o '${cfg.user}' -g '${cfg.group}' '${cfg.settings.incomplete-dir}'
+        '' + optionalString cfg.settings.watch-dir-enabled ''
+        install -d -m '${cfg.downloadDirPermissions}' -o '${cfg.user}' -g '${cfg.group}' '${cfg.settings.watch-dir}'
+        '';
+      };
+
+    systemd.services.transmission = {
+      description = "Transmission BitTorrent Service";
+      after = [ "network.target" ] ++ optional apparmor.enable "apparmor.service";
+      requires = optional apparmor.enable "apparmor.service";
+      wantedBy = [ "multi-user.target" ];
+      environment.CURL_CA_BUNDLE = etc."ssl/certs/ca-certificates.crt".source;
+
+      serviceConfig = {
+        # Use "+" because credentialsFile may not be accessible to User= or Group=.
+        ExecStartPre = [("+" + pkgs.writeShellScript "transmission-prestart" ''
+          set -eu${lib.optionalString (cfg.settings.message-level >= 3) "x"}
+          ${pkgs.jq}/bin/jq --slurp add ${settingsFile} '${cfg.credentialsFile}' |
+          install -D -m 600 -o '${cfg.user}' -g '${cfg.group}' /dev/stdin \
+           '${cfg.home}/${settingsDir}/settings.json'
+        '')];
+        ExecStart="${cfg.package}/bin/transmission-daemon -f -g ${cfg.home}/${settingsDir} ${escapeShellArgs cfg.extraFlags}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        User = cfg.user;
+        Group = cfg.group;
+        # Create rootDir in the host's mount namespace.
+        RuntimeDirectory = [(baseNameOf rootDir)];
+        RuntimeDirectoryMode = "755";
+        # This is for BindPaths= and BindReadOnlyPaths=
+        # to allow traversal of directories they create in RootDirectory=.
+        UMask = "0066";
+        # Using RootDirectory= makes it possible
+        # to use the same paths download-dir/incomplete-dir
+        # (which appear in user's interfaces) without requiring cfg.user
+        # to have access to their parent directories,
+        # by using BindPaths=/BindReadOnlyPaths=.
+        # Note that TemporaryFileSystem= could have been used instead
+        # but not without adding some BindPaths=/BindReadOnlyPaths=
+        # that would only be needed for ExecStartPre=,
+        # because RootDirectoryStartOnly=true would not help.
+        RootDirectory = rootDir;
+        RootDirectoryStartOnly = true;
+        MountAPIVFS = true;
+        BindPaths =
+          [ "${cfg.home}/${settingsDir}"
+            cfg.settings.download-dir
+          ] ++
+          optional cfg.settings.incomplete-dir-enabled
+            cfg.settings.incomplete-dir ++
+          optional (cfg.settings.watch-dir-enabled && cfg.settings.trash-original-torrent-files)
+            cfg.settings.watch-dir;
+        BindReadOnlyPaths = [
+          # No confinement done of /nix/store here like in systemd-confinement.nix,
+          # an AppArmor profile is provided to get a confinement based upon paths and rights.
+          builtins.storeDir
+          "/etc"
+          "/run"
+          ] ++
+          optional (cfg.settings.script-torrent-done-enabled &&
+                    cfg.settings.script-torrent-done-filename != null)
+            cfg.settings.script-torrent-done-filename ++
+          optional (cfg.settings.watch-dir-enabled && !cfg.settings.trash-original-torrent-files)
+            cfg.settings.watch-dir;
+        StateDirectory = [
+          "transmission"
+          "transmission/.config/transmission-daemon"
+          "transmission/.incomplete"
+          "transmission/Downloads"
+          "transmission/watch-dir"
+        ];
+        StateDirectoryMode = mkDefault 750;
+        # The following options are only for optimizing:
+        # systemd-analyze security transmission
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateNetwork = mkDefault false;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        # ProtectHome=true would not allow BindPaths= to work across /home,
+        # and ProtectHome=tmpfs would break statfs(),
+        # preventing transmission-daemon to report the available free space.
+        # However, RootDirectory= is used, so this is not a security concern
+        # since there would be nothing in /home but any BindPaths= wanted by the user.
+        ProtectHome = "read-only";
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        # AF_UNIX may become usable one day:
+        # https://github.com/transmission/transmission/issues/441
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallFilter = [
+          "@system-service"
+          # Groups in @system-service which do not contain a syscall
+          # listed by perf stat -e 'syscalls:sys_enter_*' transmission-daemon -f
+          # in tests, and seem likely not necessary for transmission-daemon.
+          "~@aio" "~@chown" "~@keyring" "~@memlock" "~@resources" "~@setuid" "~@timer"
+          # In the @privileged group, but reached when querying infos through RPC (eg. with stig).
+          "quotactl"
+        ];
+        SystemCallArchitectures = "native";
+      };
+    };
+
+    # It's useful to have transmission in path, e.g. for remote control
+    environment.systemPackages = [ cfg.package ];
+
+    users.users = optionalAttrs (cfg.user == "transmission") ({
+      transmission = {
+        group = cfg.group;
+        uid = config.ids.uids.transmission;
+        description = "Transmission BitTorrent user";
+        home = cfg.home;
+      };
+    });
+
+    users.groups = optionalAttrs (cfg.group == "transmission") ({
+      transmission = {
+        gid = config.ids.gids.transmission;
+      };
+    });
+
+    networking.firewall = mkMerge [
+      (mkIf cfg.openPeerPorts (
+        if cfg.settings.peer-port-random-on-start
+        then
+          { allowedTCPPortRanges =
+              [ { from = cfg.settings.peer-port-random-low;
+                  to   = cfg.settings.peer-port-random-high;
+                }
+              ];
+            allowedUDPPortRanges =
+              [ { from = cfg.settings.peer-port-random-low;
+                  to   = cfg.settings.peer-port-random-high;
+                }
+              ];
+          }
+        else
+          { allowedTCPPorts = [ cfg.settings.peer-port ];
+            allowedUDPPorts = [ cfg.settings.peer-port ];
+          }
+      ))
+      (mkIf cfg.openRPCPort { allowedTCPPorts = [ cfg.settings.rpc-port ]; })
+    ];
+
+    boot.kernel.sysctl = mkMerge [
+      # Transmission uses a single UDP socket in order to implement multiple uTP sockets,
+      # and thus expects large kernel buffers for the UDP socket,
+      # https://trac.transmissionbt.com/browser/trunk/libtransmission/tr-udp.c?rev=11956.
+      # at least up to the values hardcoded here:
+      (mkIf cfg.settings.utp-enabled {
+        "net.core.rmem_max" = mkDefault 4194304; # 4MB
+        "net.core.wmem_max" = mkDefault "1048576"; # 1MB
+      })
+      (mkIf cfg.performanceNetParameters {
+        # Increase the number of available source (local) TCP and UDP ports to 49151.
+        # Usual default is 32768 60999, ie. 28231 ports.
+        # Find out your current usage with: ss -s
+        "net.ipv4.ip_local_port_range" = mkDefault "16384 65535";
+        # Timeout faster generic TCP states.
+        # Usual default is 600.
+        # Find out your current usage with: watch -n 1 netstat -nptuo
+        "net.netfilter.nf_conntrack_generic_timeout" = mkDefault 60;
+        # Timeout faster established but inactive connections.
+        # Usual default is 432000.
+        "net.netfilter.nf_conntrack_tcp_timeout_established" = mkDefault 600;
+        # Clear immediately TCP states after timeout.
+        # Usual default is 120.
+        "net.netfilter.nf_conntrack_tcp_timeout_time_wait" = mkDefault 1;
+        # Increase the number of trackable connections.
+        # Usual default is 262144.
+        # Find out your current usage with: conntrack -C
+        "net.netfilter.nf_conntrack_max" = mkDefault 1048576;
+      })
+    ];
+
+    security.apparmor.policies."bin.transmission-daemon".profile = ''
+      include "${cfg.package.apparmor}/bin.transmission-daemon"
+    '';
+    security.apparmor.includes."local/bin.transmission-daemon" = ''
+      r ${config.systemd.services.transmission.environment.CURL_CA_BUNDLE},
+
+      owner rw ${cfg.home}/${settingsDir}/**,
+      rw ${cfg.settings.download-dir}/**,
+      ${optionalString cfg.settings.incomplete-dir-enabled ''
+        rw ${cfg.settings.incomplete-dir}/**,
+      ''}
+      ${optionalString cfg.settings.watch-dir-enabled ''
+        r${optionalString cfg.settings.trash-original-torrent-files "w"} ${cfg.settings.watch-dir}/**,
+      ''}
+      profile dirs {
+        rw ${cfg.settings.download-dir}/**,
+        ${optionalString cfg.settings.incomplete-dir-enabled ''
+          rw ${cfg.settings.incomplete-dir}/**,
+        ''}
+        ${optionalString cfg.settings.watch-dir-enabled ''
+          r${optionalString cfg.settings.trash-original-torrent-files "w"} ${cfg.settings.watch-dir}/**,
+        ''}
+      }
+
+      ${optionalString (cfg.settings.script-torrent-done-enabled &&
+                        cfg.settings.script-torrent-done-filename != null) ''
+        # Stack transmission_directories profile on top of
+        # any existing profile for script-torrent-done-filename
+        # FIXME: to be tested as I'm not sure it works well with NoNewPrivileges=
+        # https://gitlab.com/apparmor/apparmor/-/wikis/AppArmorStacking#seccomp-and-no_new_privs
+        px ${cfg.settings.script-torrent-done-filename} -> &@{dirs},
+      ''}
+    '';
+  };
+
+  meta.maintainers = with lib.maintainers; [ julm ];
+}
diff --git a/nixpkgs/nixos/modules/services/tracing/tempo.nix b/nixpkgs/nixos/modules/services/tracing/tempo.nix
new file mode 100644
index 000000000000..0b9ca2398b16
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/tracing/tempo.nix
@@ -0,0 +1,80 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.tempo;
+
+  settingsFormat = pkgs.formats.yaml {};
+in {
+  options.services.tempo = {
+    enable = mkEnableOption (lib.mdDoc "Grafana Tempo");
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Specify the configuration for Tempo in Nix.
+
+        See https://grafana.com/docs/tempo/latest/configuration/ for available options.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Specify a path to a configuration file that Tempo should use.
+      '';
+    };
+
+    extraFlags = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = lib.literalExpression
+        ''
+          [ "-config.expand-env=true" ]
+        '';
+      description = lib.mdDoc ''
+        Additional flags to pass to the `ExecStart=` in `tempo.service`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # for tempo-cli and friends
+    environment.systemPackages = [ pkgs.tempo ];
+
+    assertions = [{
+      assertion = (
+        (cfg.settings == {}) != (cfg.configFile == null)
+      );
+      message  = ''
+        Please specify a configuration for Tempo with either
+        'services.tempo.settings' or
+        'services.tempo.configFile'.
+      '';
+    }];
+
+    systemd.services.tempo = {
+      description = "Grafana Tempo Service Daemon";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = let
+        conf = if cfg.configFile == null
+               then settingsFormat.generate "config.yaml" cfg.settings
+               else cfg.configFile;
+      in
+      {
+        ExecStart = "${pkgs.tempo}/bin/tempo --config.file=${conf} ${lib.escapeShellArgs cfg.extraFlags}";
+        DynamicUser = true;
+        Restart = "always";
+        ProtectSystem = "full";
+        DevicePolicy = "closed";
+        NoNewPrivileges = true;
+        WorkingDirectory = "/var/lib/tempo";
+        StateDirectory = "tempo";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/ttys/getty.nix b/nixpkgs/nixos/modules/services/ttys/getty.nix
new file mode 100644
index 000000000000..22ae9c27e5bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/ttys/getty.nix
@@ -0,0 +1,161 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.getty;
+
+  baseArgs = [
+    "--login-program" "${cfg.loginProgram}"
+  ] ++ optionals (cfg.autologinUser != null) [
+    "--autologin" cfg.autologinUser
+  ] ++ optionals (cfg.loginOptions != null) [
+    "--login-options" cfg.loginOptions
+  ] ++ cfg.extraArgs;
+
+  gettyCmd = args:
+    "@${pkgs.util-linux}/sbin/agetty agetty ${escapeShellArgs baseArgs} ${args}";
+
+in
+
+{
+
+  ###### interface
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "mingetty" ] [ "services" "getty" ])
+    (mkRemovedOptionModule [ "services" "getty" "serialSpeed" ] ''set non-standard baudrates with `boot.kernelParams` i.e. boot.kernelParams = ["console=ttyS2,1500000"];'')
+  ];
+
+  options = {
+
+    services.getty = {
+
+      autologinUser = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Username of the account that will be automatically logged in at the console.
+          If unspecified, a login prompt is shown as usual.
+        '';
+      };
+
+      loginProgram = mkOption {
+        type = types.path;
+        default = "${pkgs.shadow}/bin/login";
+        defaultText = literalExpression ''"''${pkgs.shadow}/bin/login"'';
+        description = lib.mdDoc ''
+          Path to the login binary executed by agetty.
+        '';
+      };
+
+      loginOptions = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Template for arguments to be passed to
+          {manpage}`login(1)`.
+
+          See {manpage}`agetty(1)` for details,
+          including security considerations.  If unspecified, agetty
+          will not be invoked with a {option}`--login-options`
+          option.
+        '';
+        example = "-h darkstar -- \\u";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional arguments passed to agetty.
+        '';
+        example = [ "--nohostname" ];
+      };
+
+      greetingLine = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Welcome line printed by agetty.
+          The default shows current NixOS version label, machine type and tty.
+        '';
+      };
+
+      helpLine = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Help line printed by agetty below the welcome line.
+          Used by the installation CD to give some hints on
+          how to proceed.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+    # Note: this is set here rather than up there so that changing
+    # nixos.label would not rebuild manual pages
+    services.getty.greetingLine = mkDefault ''<<< Welcome to NixOS ${config.system.nixos.label} (\m) - \l >>>'';
+    services.getty.helpLine = mkIf (config.documentation.nixos.enable && config.documentation.doc.enable) "\nRun 'nixos-help' for the NixOS manual.";
+
+    systemd.services."getty@" =
+      { serviceConfig.ExecStart = [
+          "" # override upstream default with an empty ExecStart
+          (gettyCmd "--noclear --keep-baud %I 115200,38400,9600 $TERM")
+        ];
+        restartIfChanged = false;
+      };
+
+    systemd.services."serial-getty@" =
+      { serviceConfig.ExecStart = [
+          "" # override upstream default with an empty ExecStart
+          (gettyCmd "%I --keep-baud $TERM")
+        ];
+        restartIfChanged = false;
+      };
+
+    systemd.services."autovt@" =
+      { serviceConfig.ExecStart = [
+          "" # override upstream default with an empty ExecStart
+          (gettyCmd "--noclear %I $TERM")
+        ];
+        restartIfChanged = false;
+      };
+
+    systemd.services."container-getty@" =
+      { serviceConfig.ExecStart = [
+          "" # override upstream default with an empty ExecStart
+          (gettyCmd "--noclear --keep-baud pts/%I 115200,38400,9600 $TERM")
+        ];
+        restartIfChanged = false;
+      };
+
+    systemd.services.console-getty =
+      { serviceConfig.ExecStart = [
+          "" # override upstream default with an empty ExecStart
+          (gettyCmd "--noclear --keep-baud console 115200,38400,9600 $TERM")
+        ];
+        serviceConfig.Restart = "always";
+        restartIfChanged = false;
+        enable = mkDefault config.boot.isContainer;
+      };
+
+    environment.etc.issue = mkDefault
+      { # Friendly greeting on the virtual consoles.
+        source = pkgs.writeText "issue" ''
+
+          ${config.services.getty.greetingLine}
+          ${config.services.getty.helpLine}
+
+        '';
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/ttys/gpm.nix b/nixpkgs/nixos/modules/services/ttys/gpm.nix
new file mode 100644
index 000000000000..378f6b17732f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/ttys/gpm.nix
@@ -0,0 +1,57 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.gpm;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.gpm = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable GPM, the General Purpose Mouse daemon,
+          which enables mouse support in virtual consoles.
+        '';
+      };
+
+      protocol = mkOption {
+        type = types.str;
+        default = "ps/2";
+        description = lib.mdDoc "Mouse protocol to use.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    systemd.services.gpm =
+      { description = "Console Mouse Daemon";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "dev-input-mice.device" ];
+        after = [ "dev-input-mice.device" ];
+
+        serviceConfig.ExecStart = "@${pkgs.gpm}/sbin/gpm gpm -m /dev/input/mice -t ${cfg.protocol}";
+        serviceConfig.Type = "forking";
+        serviceConfig.PIDFile = "/run/gpm.pid";
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/ttys/kmscon.nix b/nixpkgs/nixos/modules/services/ttys/kmscon.nix
new file mode 100644
index 000000000000..0a12ef48d084
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/ttys/kmscon.nix
@@ -0,0 +1,117 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib) mapAttrs mkIf mkOption optional optionals types;
+
+  cfg = config.services.kmscon;
+
+  autologinArg = lib.optionalString (cfg.autologinUser != null) "-f ${cfg.autologinUser}";
+
+  configDir = pkgs.writeTextFile { name = "kmscon-config"; destination = "/kmscon.conf"; text = cfg.extraConfig; };
+in {
+  options = {
+    services.kmscon = {
+      enable = mkOption {
+        description = lib.mdDoc ''
+          Use kmscon as the virtual console instead of gettys.
+          kmscon is a kms/dri-based userspace virtual terminal implementation.
+          It supports a richer feature set than the standard linux console VT,
+          including full unicode support, and when the video card supports drm
+          should be much faster.
+        '';
+        type = types.bool;
+        default = false;
+      };
+
+      hwRender = mkOption {
+        description = lib.mdDoc "Whether to use 3D hardware acceleration to render the console.";
+        type = types.bool;
+        default = false;
+      };
+
+      fonts = mkOption {
+        description = lib.mdDoc "Fonts used by kmscon, in order of priority.";
+        default = null;
+        example = lib.literalExpression ''[ { name = "Source Code Pro"; package = pkgs.source-code-pro; } ]'';
+        type = with types;
+          let fontType = submodule {
+                options = {
+                  name = mkOption { type = str; description = lib.mdDoc "Font name, as used by fontconfig."; };
+                  package = mkOption { type = package; description = lib.mdDoc "Package providing the font."; };
+                };
+          }; in nullOr (nonEmptyListOf fontType);
+      };
+
+      extraConfig = mkOption {
+        description = lib.mdDoc "Extra contents of the kmscon.conf file.";
+        type = types.lines;
+        default = "";
+        example = "font-size=14";
+      };
+
+      extraOptions = mkOption {
+        description = lib.mdDoc "Extra flags to pass to kmscon.";
+        type = types.separatedString " ";
+        default = "";
+        example = "--term xterm-256color";
+      };
+
+      autologinUser = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          Username of the account that will be automatically logged in at the console.
+          If unspecified, a login prompt is shown as usual.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # Largely copied from unit provided with kmscon source
+    systemd.units."kmsconvt@.service".text = ''
+      [Unit]
+      Description=KMS System Console on %I
+      Documentation=man:kmscon(1)
+      After=systemd-user-sessions.service
+      After=plymouth-quit-wait.service
+      After=systemd-logind.service
+      After=systemd-vconsole-setup.service
+      Requires=systemd-logind.service
+      Before=getty.target
+      Conflicts=getty@%i.service
+      OnFailure=getty@%i.service
+      IgnoreOnIsolate=yes
+      ConditionPathExists=/dev/tty0
+
+      [Service]
+      ExecStart=
+      ExecStart=${pkgs.kmscon}/bin/kmscon "--vt=%I" ${cfg.extraOptions} --seats=seat0 --no-switchvt --configdir ${configDir} --login -- ${pkgs.shadow}/bin/login -p ${autologinArg}
+      UtmpIdentifier=%I
+      TTYPath=/dev/%I
+      TTYReset=yes
+      TTYVHangup=yes
+      TTYVTDisallocate=yes
+
+      X-RestartIfChanged=false
+    '';
+
+    systemd.suppressedSystemUnits = [ "autovt@.service" ];
+    systemd.units."kmsconvt@.service".aliases = [ "autovt@.service" ];
+
+    systemd.services.systemd-vconsole-setup.enable = false;
+    systemd.services.reload-systemd-vconsole-setup.enable = false;
+
+    services.kmscon.extraConfig =
+      let
+        render = optionals cfg.hwRender [ "drm" "hwaccel" ];
+        fonts = optional (cfg.fonts != null) "font-name=${lib.concatMapStringsSep ", " (f: f.name) cfg.fonts}";
+      in lib.concatStringsSep "\n" (render ++ fonts);
+
+    hardware.opengl.enable = mkIf cfg.hwRender true;
+
+    fonts = mkIf (cfg.fonts != null) {
+      fontconfig.enable = true;
+      packages = map (f: f.package) cfg.fonts;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/epgstation/default.nix b/nixpkgs/nixos/modules/services/video/epgstation/default.nix
new file mode 100644
index 000000000000..fca483b0dbd7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/epgstation/default.nix
@@ -0,0 +1,346 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+  cfg = config.services.epgstation;
+  opt = options.services.epgstation;
+
+  description = "EPGStation: DVR system for Mirakurun-managed TV tuners";
+
+  username = config.users.users.epgstation.name;
+  groupname = config.users.users.epgstation.group;
+  mirakurun = {
+    sock = config.services.mirakurun.unixSocket;
+    option = options.services.mirakurun.unixSocket;
+  };
+
+  yaml = pkgs.formats.yaml { };
+  settingsTemplate = yaml.generate "config.yml" cfg.settings;
+  preStartScript = pkgs.writeScript "epgstation-prestart" ''
+    #!${pkgs.runtimeShell}
+
+    DB_PASSWORD_FILE=${lib.escapeShellArg cfg.database.passwordFile}
+
+    if [[ ! -f "$DB_PASSWORD_FILE" ]]; then
+      printf "[FATAL] File containing the DB password was not found in '%s'. Double check the NixOS option '%s'." \
+        "$DB_PASSWORD_FILE" ${lib.escapeShellArg opt.database.passwordFile} >&2
+      exit 1
+    fi
+
+    DB_PASSWORD="$(head -n1 ${lib.escapeShellArg cfg.database.passwordFile})"
+
+    # setup configuration
+    touch /etc/epgstation/config.yml
+    chmod 640 /etc/epgstation/config.yml
+    sed \
+      -e "s,@dbPassword@,$DB_PASSWORD,g" \
+      ${settingsTemplate} > /etc/epgstation/config.yml
+    chown "${username}:${groupname}" /etc/epgstation/config.yml
+
+    # NOTE: Use password authentication, since mysqljs does not yet support auth_socket
+    if [ ! -e /var/lib/epgstation/db-created ]; then
+      ${pkgs.mariadb}/bin/mysql -e \
+        "GRANT ALL ON \`${cfg.database.name}\`.* TO '${username}'@'localhost' IDENTIFIED by '$DB_PASSWORD';"
+      touch /var/lib/epgstation/db-created
+    fi
+  '';
+
+  streamingConfig = lib.importJSON ./streaming.json;
+  logConfig = yaml.generate "logConfig.yml" {
+    appenders.stdout.type = "stdout";
+    categories = {
+      default = { appenders = [ "stdout" ]; level = "info"; };
+      system = { appenders = [ "stdout" ]; level = "info"; };
+      access = { appenders = [ "stdout" ]; level = "info"; };
+      stream = { appenders = [ "stdout" ]; level = "info"; };
+    };
+  };
+
+  # Deprecate top level options that are redundant.
+  deprecateTopLevelOption = config:
+    lib.mkRenamedOptionModule
+      ([ "services" "epgstation" ] ++ config)
+      ([ "services" "epgstation" "settings" ] ++ config);
+
+  removeOption = config: instruction:
+    lib.mkRemovedOptionModule
+      ([ "services" "epgstation" ] ++ config)
+      instruction;
+in
+{
+  meta.maintainers = with lib.maintainers; [ midchildan ];
+
+  imports = [
+    (deprecateTopLevelOption [ "port" ])
+    (deprecateTopLevelOption [ "socketioPort" ])
+    (deprecateTopLevelOption [ "clientSocketioPort" ])
+    (removeOption [ "basicAuth" ]
+      "Use a TLS-terminated reverse proxy with authentication instead.")
+  ];
+
+  options.services.epgstation = {
+    enable = lib.mkEnableOption (lib.mdDoc description);
+
+    package = lib.mkPackageOptionMD pkgs "epgstation" { };
+
+    ffmpeg = lib.mkPackageOptionMD pkgs "ffmpeg" {
+      default = [ "ffmpeg-headless" ];
+      example = "pkgs.ffmpeg-full";
+    };
+
+    usePreconfiguredStreaming = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Use preconfigured default streaming options.
+
+        Upstream defaults:
+        <https://github.com/l3tnun/EPGStation/blob/master/config/config.yml.template>
+      '';
+    };
+
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports in the firewall for the EPGStation web interface.
+
+        ::: {.warning}
+        Exposing EPGStation to the open internet is generally advised
+        against. Only use it inside a trusted local network, or consider
+        putting it behind a VPN if you want remote access.
+        :::
+      '';
+    };
+
+    database = {
+      name = lib.mkOption {
+        type = lib.types.str;
+        default = "epgstation";
+        description = lib.mdDoc ''
+          Name of the MySQL database that holds EPGStation's data.
+        '';
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.path;
+        example = "/run/keys/epgstation-db-password";
+        description = lib.mdDoc ''
+          A file containing the password for the database named
+          {option}`database.name`.
+        '';
+      };
+    };
+
+    # The defaults for some options come from the upstream template
+    # configuration, which is the one that users would get if they follow the
+    # upstream instructions. This is, in some cases, different from the
+    # application defaults. Some options like encodeProcessNum and
+    # concurrentEncodeNum doesn't have an optimal default value that works for
+    # all hardware setups and/or performance requirements. For those kind of
+    # options, the application default wouldn't always result in the expected
+    # out-of-the-box behavior because it's the responsibility of the user to
+    # configure them according to their needs. In these cases, the value in the
+    # upstream template configuration should serve as a "good enough" default.
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Options to add to config.yml.
+
+        Documentation:
+        <https://github.com/l3tnun/EPGStation/blob/master/doc/conf-manual.md>
+      '';
+
+      default = { };
+      example = {
+        recPriority = 20;
+        conflictPriority = 10;
+      };
+
+      type = lib.types.submodule {
+        freeformType = yaml.type;
+
+        options.port = lib.mkOption {
+          type = lib.types.port;
+          default = 20772;
+          description = lib.mdDoc ''
+            HTTP port for EPGStation to listen on.
+          '';
+        };
+
+        options.socketioPort = lib.mkOption {
+          type = lib.types.port;
+          default = cfg.settings.port + 1;
+          defaultText = lib.literalExpression "config.${opt.settings}.port + 1";
+          description = lib.mdDoc ''
+            Socket.io port for EPGStation to listen on. It is valid to share
+            ports with {option}`${opt.settings}.port`.
+          '';
+        };
+
+        options.clientSocketioPort = lib.mkOption {
+          type = lib.types.port;
+          default = cfg.settings.socketioPort;
+          defaultText = lib.literalExpression "config.${opt.settings}.socketioPort";
+          description = lib.mdDoc ''
+            Socket.io port that the web client is going to connect to. This may
+            be different from {option}`${opt.settings}.socketioPort` if
+            EPGStation is hidden behind a reverse proxy.
+          '';
+        };
+
+        options.mirakurunPath = with mirakurun; lib.mkOption {
+          type = lib.types.str;
+          default = "http+unix://${lib.replaceStrings ["/"] ["%2F"] sock}";
+          defaultText = lib.literalExpression ''
+            "http+unix://''${lib.replaceStrings ["/"] ["%2F"] config.${option}}"
+          '';
+          example = "http://localhost:40772";
+          description = lib.mdDoc "URL to connect to Mirakurun.";
+        };
+
+        options.encodeProcessNum = lib.mkOption {
+          type = lib.types.ints.positive;
+          default = 4;
+          description = lib.mdDoc ''
+            The maximum number of processes that EPGStation would allow to run
+            at the same time for encoding or streaming videos.
+          '';
+        };
+
+        options.concurrentEncodeNum = lib.mkOption {
+          type = lib.types.ints.positive;
+          default = 1;
+          description = lib.mdDoc ''
+            The maximum number of encoding jobs that EPGStation would run at the
+            same time.
+          '';
+        };
+
+        options.encode = lib.mkOption {
+          type = with lib.types; listOf attrs;
+          description = lib.mdDoc "Encoding presets for recorded videos.";
+          default = [
+            {
+              name = "H.264";
+              cmd = "%NODE% ${cfg.package}/libexec/enc.js";
+              suffix = ".mp4";
+            }
+          ];
+          defaultText = lib.literalExpression ''
+            [
+              {
+                name = "H.264";
+                cmd = "%NODE% config.${opt.package}/libexec/enc.js";
+                suffix = ".mp4";
+              }
+            ]
+          '';
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !(lib.hasAttr "readOnlyOnce" cfg.settings);
+        message = ''
+          The option config.${opt.settings}.readOnlyOnce can no longer be used
+          since it's been removed. No replacements are available.
+        '';
+      }
+    ];
+
+    environment.etc = {
+      "epgstation/epgUpdaterLogConfig.yml".source = logConfig;
+      "epgstation/operatorLogConfig.yml".source = logConfig;
+      "epgstation/serviceLogConfig.yml".source = logConfig;
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = with cfg.settings; [ port socketioPort ];
+    };
+
+    users.users.epgstation = {
+      description = "EPGStation user";
+      group = config.users.groups.epgstation.name;
+      isSystemUser = true;
+
+      # NPM insists on creating ~/.npm
+      home = "/var/cache/epgstation";
+    };
+
+    users.groups.epgstation = { };
+
+    services.mirakurun.enable = lib.mkDefault true;
+
+    services.mysql = {
+      enable = lib.mkDefault true;
+      package = lib.mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      # FIXME: enable once mysqljs supports auth_socket
+      # https://github.com/mysqljs/mysql/issues/1507
+      #
+      # ensureUsers = [ {
+      #   name = username;
+      #   ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+      # } ];
+    };
+
+    services.epgstation.settings =
+      let
+        defaultSettings = {
+          dbtype = lib.mkDefault "mysql";
+          mysql = {
+            socketPath = lib.mkDefault "/run/mysqld/mysqld.sock";
+            user = username;
+            password = lib.mkDefault "@dbPassword@";
+            database = cfg.database.name;
+          };
+
+          ffmpeg = lib.mkDefault "${cfg.ffmpeg}/bin/ffmpeg";
+          ffprobe = lib.mkDefault "${cfg.ffmpeg}/bin/ffprobe";
+
+          # for disambiguation with TypeScript files
+          recordedFileExtension = lib.mkDefault ".m2ts";
+        };
+      in
+      lib.mkMerge [
+        defaultSettings
+        (lib.mkIf cfg.usePreconfiguredStreaming streamingConfig)
+      ];
+
+    systemd.tmpfiles.rules = [
+      "d '/var/lib/epgstation/key' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/streamfiles' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/drop' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/recorded' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/thumbnail' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/db/subscribers' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/db/migrations/mysql' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/db/migrations/postgres' - ${username} ${groupname} - -"
+      "d '/var/lib/epgstation/db/migrations/sqlite' - ${username} ${groupname} - -"
+    ];
+
+    systemd.services.epgstation = {
+      inherit description;
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ]
+        ++ lib.optional config.services.mirakurun.enable "mirakurun.service"
+        ++ lib.optional config.services.mysql.enable "mysql.service";
+
+      environment.NODE_ENV = "production";
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/epgstation start";
+        ExecStartPre = "+${preStartScript}";
+        User = username;
+        Group = groupname;
+        CacheDirectory = "epgstation";
+        StateDirectory = "epgstation";
+        LogsDirectory = "epgstation";
+        ConfigurationDirectory = "epgstation";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/epgstation/streaming.json b/nixpkgs/nixos/modules/services/video/epgstation/streaming.json
new file mode 100644
index 000000000000..7f8df0817fc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/epgstation/streaming.json
@@ -0,0 +1,140 @@
+{
+  "urlscheme": {
+    "m2ts": {
+      "ios": "vlc-x-callback://x-callback-url/stream?url=PROTOCOL://ADDRESS",
+      "android": "intent://ADDRESS#Intent;package=org.videolan.vlc;type=video;scheme=PROTOCOL;end"
+    },
+    "video": {
+      "ios": "infuse://x-callback-url/play?url=PROTOCOL://ADDRESS",
+      "android": "intent://ADDRESS#Intent;package=com.mxtech.videoplayer.ad;type=video;scheme=PROTOCOL;end"
+    },
+    "download": {
+      "ios": "vlc-x-callback://x-callback-url/download?url=PROTOCOL://ADDRESS&filename=FILENAME"
+    }
+  },
+  "stream": {
+    "live": {
+      "ts": {
+        "m2ts": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -y -f mpegts pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -y -f mpegts pipe:1"
+          },
+          {
+            "name": "無変æ›"
+          }
+        ],
+        "m2tsll": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -f mpegts -analyzeduration 500000 -i pipe:0 -map 0 -c:s copy -c:d copy -ignore_unknown -fflags nobuffer -flags low_delay -max_delay 250000 -max_interleave_delta 1 -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -flags +cgop -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -y -f mpegts pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -f mpegts -analyzeduration 500000 -i pipe:0 -map 0 -c:s copy -c:d copy -ignore_unknown -fflags nobuffer -flags low_delay -max_delay 250000 -max_interleave_delta 1 -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -flags +cgop -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -y -f mpegts pipe:1"
+          }
+        ],
+        "webm": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 2 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          }
+        ],
+        "mp4": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          }
+        ],
+        "hls": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -re -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 17 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          }
+        ]
+      }
+    },
+    "recorded": {
+      "ts": {
+        "webm": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf yadif,scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          }
+        ],
+        "mp4": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          }
+        ],
+        "hls": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf yadif,scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -i pipe:0 -sn -map 0 -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf yadif,scale=-2:480 -b:v 1500k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          }
+        ]
+      },
+      "encoded": {
+        "webm": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 192k -ac 2 -c:v libvpx-vp9 -vf scale=-2:720 -b:v 3000k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 3 -c:a libvorbis -ar 48000 -b:a 128k -ac 2 -c:v libvpx-vp9 -vf scale=-2:480 -b:v 1500k -deadline realtime -speed 4 -cpu-used -8 -y -f webm pipe:1"
+          }
+        ],
+        "mp4": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf scale=-2:720 -b:v 3000k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf scale=-2:480 -b:v 1500k -profile:v baseline -preset veryfast -tune fastdecode,zerolatency -movflags frag_keyframe+empty_moov+faststart+default_base_moof -y -f mp4 pipe:1"
+          }
+        ],
+        "hls": [
+          {
+            "name": "720p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 192k -ac 2 -c:v libx264 -vf scale=-2:720 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          },
+          {
+            "name": "480p",
+            "cmd": "%FFMPEG% -dual_mono_mode main -ss %SS% -i %INPUT% -sn -threads 0 -ignore_unknown -max_muxing_queue_size 1024 -f hls -hls_time 3 -hls_list_size 0 -hls_allow_cache 1 -hls_segment_filename %streamFileDir%/stream%streamNum%-%09d.ts -hls_flags delete_segments -c:a aac -ar 48000 -b:a 128k -ac 2 -c:v libx264 -vf scale=-2:480 -b:v 3000k -preset veryfast -flags +loop-global_header %OUTPUT%"
+          }
+        ]
+      }
+    }
+  }
+}
diff --git a/nixpkgs/nixos/modules/services/video/frigate.nix b/nixpkgs/nixos/modules/services/video/frigate.nix
new file mode 100644
index 000000000000..8db2bfae80ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/frigate.nix
@@ -0,0 +1,381 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  inherit (lib)
+    literalExpression
+    mkDefault
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    types;
+
+  cfg = config.services.frigate;
+
+  format = pkgs.formats.yaml {};
+
+  filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! lib.elem v [ null ])) cfg.settings;
+
+  cameraFormat = with types; submodule {
+    freeformType = format.type;
+    options = {
+      ffmpeg = {
+        inputs = mkOption {
+          description = mdDoc ''
+            List of inputs for this camera.
+          '';
+          type = listOf (submodule {
+            freeformType = format.type;
+            options = {
+              path = mkOption {
+                type = str;
+                example = "rtsp://192.0.2.1:554/rtsp";
+                description = mdDoc ''
+                  Stream URL
+                '';
+              };
+              roles = mkOption {
+                type = listOf (enum [ "detect" "record" "rtmp" ]);
+                example = literalExpression ''
+                  [ "detect" "rtmp" ]
+                '';
+                description = mdDoc ''
+                  List of roles for this stream
+                '';
+              };
+            };
+          });
+        };
+      };
+    };
+  };
+
+in
+
+{
+  meta.buildDocsInSandbox = false;
+
+  options.services.frigate = with types; {
+    enable = mkEnableOption (mdDoc "Frigate NVR");
+
+    package = mkOption {
+      type = package;
+      default = pkgs.frigate;
+      description = mdDoc ''
+        The frigate package to use.
+      '';
+    };
+
+    hostname = mkOption {
+      type = str;
+      example = "frigate.exampe.com";
+      description = mdDoc ''
+        Hostname of the nginx vhost to configure.
+
+        Only nginx is supported by upstream for direct reverse proxying.
+      '';
+    };
+
+    settings = mkOption {
+      type = submodule {
+        freeformType = format.type;
+        options = {
+          cameras = mkOption {
+            type = attrsOf cameraFormat;
+            description = mdDoc ''
+              Attribute set of cameras configurations.
+
+              https://docs.frigate.video/configuration/cameras
+            '';
+          };
+
+          database = {
+            path = mkOption {
+              type = path;
+              default = "/var/lib/frigate/frigate.db";
+              description = mdDoc ''
+                Path to the SQLite database used
+              '';
+            };
+          };
+
+          mqtt = {
+            enabled = mkEnableOption (mdDoc "MQTT support");
+
+            host = mkOption {
+              type = nullOr str;
+              default = null;
+              example = "mqtt.example.com";
+              description = mdDoc ''
+                MQTT server hostname
+              '';
+            };
+          };
+        };
+      };
+      default = {};
+      description = mdDoc ''
+        Frigate configuration as a nix attribute set.
+
+        See the project documentation for how to configure frigate.
+        - [Creating a config file](https://docs.frigate.video/guides/getting_started)
+        - [Configuration reference](https://docs.frigate.video/configuration/index)
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.nginx = {
+      enable =true;
+      additionalModules = with pkgs.nginxModules; [
+        secure-token
+        rtmp
+        vod
+      ];
+      recommendedProxySettings = mkDefault true;
+      recommendedGzipSettings = mkDefault true;
+      upstreams = {
+        frigate-api.servers = {
+          "127.0.0.1:5001" = {};
+        };
+        frigate-mqtt-ws.servers = {
+          "127.0.0.1:5002" = {};
+        };
+        frigate-jsmpeg.servers = {
+          "127.0.0.1:8082" = {};
+        };
+        frigate-go2rtc.servers = {
+          "127.0.0.1:1984" = {};
+        };
+      };
+      # Based on https://github.com/blakeblackshear/frigate/blob/v0.12.0/docker/rootfs/usr/local/nginx/conf/nginx.conf
+      virtualHosts."${cfg.hostname}" = {
+        locations = {
+          "/api/" = {
+            proxyPass = "http://frigate-api/";
+          };
+          "~* /api/.*\.(jpg|jpeg|png)$" = {
+            proxyPass = "http://frigate-api";
+            extraConfig = ''
+              add_header 'Access-Control-Allow-Origin' '*';
+              add_header 'Access-Control-Allow-Methods' 'GET, POST, PUT, DELETE, OPTIONS';
+              rewrite ^/api/(.*)$ $1 break;
+            '';
+          };
+          "/vod/" = {
+            extraConfig = ''
+              aio threads;
+              vod hls;
+
+              secure_token $args;
+              secure_token_types application/vnd.apple.mpegurl;
+
+              add_header Access-Control-Allow-Headers '*';
+              add_header Access-Control-Expose-Headers 'Server,range,Content-Length,Content-Range';
+              add_header Access-Control-Allow-Methods 'GET, HEAD, OPTIONS';
+              add_header Access-Control-Allow-Origin '*';
+              add_header Cache-Control "no-store";
+              expires off;
+            '';
+          };
+          "/stream/" = {
+            # TODO
+          };
+          "/ws" = {
+            proxyPass = "http://frigate-mqtt-ws/";
+            proxyWebsockets = true;
+          };
+          "/live/jsmpeg" = {
+            proxyPass = "http://frigate-jsmpeg/";
+            proxyWebsockets = true;
+          };
+          "/live/mse/" = {
+            proxyPass = "http://frigate-go2rtc/";
+            proxyWebsockets = true;
+          };
+          "/live/webrtc/" = {
+            proxyPass = "http://frigate-go2rtc/";
+            proxyWebsockets = true;
+          };
+          "/cache/" = {
+            alias = "/var/cache/frigate/";
+          };
+          "/clips/" = {
+            root = "/var/lib/frigate";
+            extraConfig = ''
+              add_header 'Access-Control-Allow-Origin' "$http_origin" always;
+              add_header 'Access-Control-Allow-Credentials' 'true';
+              add_header 'Access-Control-Expose-Headers' 'Content-Length';
+              if ($request_method = 'OPTIONS') {
+                  add_header 'Access-Control-Allow-Origin' "$http_origin";
+                  add_header 'Access-Control-Max-Age' 1728000;
+                  add_header 'Content-Type' 'text/plain charset=UTF-8';
+                  add_header 'Content-Length' 0;
+                  return 204;
+              }
+
+              types {
+                  video/mp4 mp4;
+                  image/jpeg jpg;
+              }
+
+              autoindex on;
+            '';
+          };
+          "/recordings/" = {
+            root = "/var/lib/frigate";
+            extraConfig = ''
+              add_header 'Access-Control-Allow-Origin' "$http_origin" always;
+              add_header 'Access-Control-Allow-Credentials' 'true';
+              add_header 'Access-Control-Expose-Headers' 'Content-Length';
+              if ($request_method = 'OPTIONS') {
+                  add_header 'Access-Control-Allow-Origin' "$http_origin";
+                  add_header 'Access-Control-Max-Age' 1728000;
+                  add_header 'Content-Type' 'text/plain charset=UTF-8';
+                  add_header 'Content-Length' 0;
+                  return 204;
+              }
+
+              types {
+                  video/mp4 mp4;
+              }
+
+              autoindex on;
+              autoindex_format json;
+            '';
+          };
+          "/assets/" = {
+            root = cfg.package.web;
+            extraConfig = ''
+              access_log off;
+              expires 1y;
+              add_header Cache-Control "public";
+            '';
+          };
+          "/" = {
+            root = cfg.package.web;
+            tryFiles = "$uri $uri/ /index.html";
+            extraConfig = ''
+              add_header Cache-Control "no-store";
+              expires off;
+
+              sub_filter 'href="/BASE_PATH/' 'href="$http_x_ingress_path/';
+              sub_filter 'url(/BASE_PATH/' 'url($http_x_ingress_path/';
+              sub_filter '"/BASE_PATH/dist/' '"$http_x_ingress_path/dist/';
+              sub_filter '"/BASE_PATH/js/' '"$http_x_ingress_path/js/';
+              sub_filter '"/BASE_PATH/assets/' '"$http_x_ingress_path/assets/';
+              sub_filter '"/BASE_PATH/monacoeditorwork/' '"$http_x_ingress_path/assets/';
+              sub_filter 'return"/BASE_PATH/"' 'return window.baseUrl';
+              sub_filter '<body>' '<body><script>window.baseUrl="$http_x_ingress_path/";</script>';
+              sub_filter_types text/css application/javascript;
+              sub_filter_once off;
+            '';
+          };
+        };
+        extraConfig = ''
+          # vod settings
+          vod_base_url "";
+          vod_segments_base_url "";
+          vod_mode mapped;
+          vod_max_mapping_response_size 1m;
+          vod_upstream_location /api;
+          vod_align_segments_to_key_frames on;
+          vod_manifest_segment_durations_mode accurate;
+          vod_ignore_edit_list on;
+          vod_segment_duration 10000;
+          vod_hls_mpegts_align_frames off;
+          vod_hls_mpegts_interleave_frames on;
+          # file handle caching / aio
+          open_file_cache max=1000 inactive=5m;
+          open_file_cache_valid 2m;
+          open_file_cache_min_uses 1;
+          open_file_cache_errors on;
+          aio on;
+          # https://github.com/kaltura/nginx-vod-module#vod_open_file_thread_pool
+          vod_open_file_thread_pool default;
+          # vod caches
+          vod_metadata_cache metadata_cache 512m;
+          vod_mapping_cache mapping_cache 5m 10m;
+          # gzip manifest
+          gzip_types application/vnd.apple.mpegurl;
+        '';
+      };
+      appendConfig = ''
+        rtmp {
+            server {
+                listen 1935;
+                chunk_size 4096;
+                allow publish 127.0.0.1;
+                deny publish all;
+                allow play all;
+                application live {
+                    live on;
+                    record off;
+                    meta copy;
+                }
+            }
+        }
+      '';
+    };
+
+    systemd.services.nginx.serviceConfig.SupplementaryGroups = [
+      "frigate"
+    ];
+
+    users.users.frigate = {
+      isSystemUser = true;
+      group = "frigate";
+    };
+    users.groups.frigate = {};
+
+    systemd.services.frigate = {
+      after = [
+        "go2rtc.service"
+        "network.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      environment = {
+        CONFIG_FILE = format.generate "frigate.yml" filteredConfig;
+        HOME = "/var/lib/frigate";
+        PYTHONPATH = cfg.package.pythonPath;
+      };
+      path = with pkgs; [
+        # unfree:
+        # config.boot.kernelPackages.nvidiaPackages.latest.bin
+        ffmpeg_5-headless
+        libva-utils
+        procps
+        radeontop
+      ] ++ lib.optionals (!stdenv.isAarch64) [
+        # not available on aarch64-linux
+        intel-gpu-tools
+      ];
+      serviceConfig = {
+        ExecStart = "${cfg.package.python.interpreter} -m frigate";
+
+        User = "frigate";
+        Group = "frigate";
+
+        UMask = "0027";
+
+        StateDirectory = "frigate";
+        StateDirectoryMode = "0750";
+
+        # Caches
+        PrivateTmp = true;
+        CacheDirectory = "frigate";
+        CacheDirectoryMode = "0750";
+
+        BindPaths = [
+          "/migrations:${cfg.package}/share/frigate/migrations:ro"
+        ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/go2rtc/default.nix b/nixpkgs/nixos/modules/services/video/go2rtc/default.nix
new file mode 100644
index 000000000000..1151d31b68e6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/go2rtc/default.nix
@@ -0,0 +1,115 @@
+{ lib
+, config
+, options
+, pkgs
+, ...
+}:
+
+let
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkEnableOption
+    mkOption
+    mkPackageOptionMD
+    types
+    ;
+
+  cfg = config.services.go2rtc;
+  opt = options.services.go2rtc;
+
+  format = pkgs.formats.yaml {};
+  configFile = format.generate "go2rtc.yaml" cfg.settings;
+in
+
+{
+  meta.buildDocsInSandbox = false;
+
+  options.services.go2rtc = with types; {
+    enable = mkEnableOption (mdDoc "go2rtc streaming server");
+
+    package = mkPackageOptionMD pkgs "go2rtc" { };
+
+    settings = mkOption {
+      default = {};
+      description = mdDoc ''
+        go2rtc configuration as a Nix attribute set.
+
+        See the [wiki](https://github.com/AlexxIT/go2rtc/wiki/Configuration) for possible configuration options.
+      '';
+      type = submodule {
+        freeformType = format.type;
+        options = {
+          # https://github.com/AlexxIT/go2rtc/blob/v1.5.0/README.md#module-api
+          api = {
+            listen = mkOption {
+              type = str;
+              default = ":1984";
+              example = "127.0.0.1:1984";
+              description = mdDoc ''
+                API listen address, conforming to a Go address string.
+              '';
+            };
+          };
+
+          # https://github.com/AlexxIT/go2rtc/blob/v1.5.0/README.md#source-ffmpeg
+          ffmpeg = {
+            bin = mkOption {
+              type = path;
+              default = "${lib.getBin pkgs.ffmpeg_6-headless}/bin/ffmpeg";
+              defaultText = literalExpression "\${lib.getBin pkgs.ffmpeg_6-headless}/bin/ffmpeg";
+              description = mdDoc ''
+                The ffmpeg package to use for transcoding.
+              '';
+            };
+          };
+
+          # TODO: https://github.com/AlexxIT/go2rtc/blob/v1.5.0/README.md#module-rtsp
+          rtsp = {
+          };
+
+          streams = mkOption {
+            type = attrsOf (either str (listOf str));
+            default = {};
+            example = literalExpression ''
+              {
+                cam1 = "onvif://admin:password@192.168.1.123:2020";
+                cam2 = "tcp://192.168.1.123:12345";
+              }
+            '';
+            description = mdDoc ''
+              Stream source configuration. Multiple source types are supported.
+
+              Check the [configuration reference](https://github.com/AlexxIT/go2rtc/blob/v${cfg.package.version}/README.md#module-streams) for possible options.
+            '';
+          };
+
+          # TODO: https://github.com/AlexxIT/go2rtc/blob/v1.5.0/README.md#module-webrtc
+          webrtc = {
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.go2rtc = {
+      after = [
+        "network-online.target"
+      ];
+      wantedBy = [
+        "multi-user.target"
+      ];
+      serviceConfig = {
+        DynamicUser = true;
+        User = "go2rtc";
+        SupplementaryGroups = [
+          # for v4l2 devices
+          "video"
+        ];
+        StateDirectory = "go2rtc";
+        ExecStart = "${cfg.package}/bin/go2rtc -config ${configFile}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/mediamtx.nix b/nixpkgs/nixos/modules/services/video/mediamtx.nix
new file mode 100644
index 000000000000..50f8e8810278
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/mediamtx.nix
@@ -0,0 +1,67 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.mediamtx;
+  format = pkgs.formats.yaml {};
+in
+{
+  meta.maintainers = with lib.maintainers; [ fpletz ];
+
+  options = {
+    services.mediamtx = {
+      enable = lib.mkEnableOption (lib.mdDoc "MediaMTX");
+
+      package = lib.mkPackageOptionMD pkgs "mediamtx" { };
+
+      settings = lib.mkOption {
+        description = lib.mdDoc ''
+          Settings for MediaMTX. Refer to the defaults at
+          <https://github.com/bluenviron/mediamtx/blob/main/mediamtx.yml>.
+        '';
+        type = format.type;
+        default = {};
+        example = {
+          paths = {
+            cam = {
+              runOnInit = "\${lib.getExe pkgs.ffmpeg} -f v4l2 -i /dev/video0 -f rtsp rtsp://localhost:$RTSP_PORT/$RTSP_PATH";
+              runOnInitRestart = true;
+            };
+          };
+        };
+      };
+
+      env = lib.mkOption {
+        type = with lib.types; attrsOf anything;
+        description = lib.mdDoc "Extra environment variables for MediaMTX";
+        default = {};
+        example = {
+          MTX_CONFKEY = "mykey";
+        };
+      };
+
+      allowVideoAccess = lib.mkEnableOption (lib.mdDoc ''
+        access to video devices like cameras on the system
+      '');
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # NOTE: mediamtx watches this file and automatically reloads if it changes
+    environment.etc."mediamtx.yaml".source = format.generate "mediamtx.yaml" cfg.settings;
+
+    systemd.services.mediamtx = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = cfg.env;
+
+      serviceConfig = {
+        DynamicUser = true;
+        User = "mediamtx";
+        Group = "mediamtx";
+        SupplementaryGroups = lib.mkIf cfg.allowVideoAccess "video";
+        ExecStart = "${cfg.package}/bin/mediamtx /etc/mediamtx.yaml";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/mirakurun.nix b/nixpkgs/nixos/modules/services/video/mirakurun.nix
new file mode 100644
index 000000000000..31f90650ba9a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/mirakurun.nix
@@ -0,0 +1,207 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mirakurun;
+  mirakurun = pkgs.mirakurun;
+  username = config.users.users.mirakurun.name;
+  groupname = config.users.users.mirakurun.group;
+  settingsFmt = pkgs.formats.yaml {};
+
+  polkitRule = pkgs.writeTextDir "share/polkit-1/rules.d/10-mirakurun.rules" ''
+    polkit.addRule(function (action, subject) {
+      if (
+        (action.id == "org.debian.pcsc-lite.access_pcsc" ||
+          action.id == "org.debian.pcsc-lite.access_card") &&
+        subject.user == "${username}"
+      ) {
+        return polkit.Result.YES;
+      }
+    });
+  '';
+in
+  {
+    options = {
+      services.mirakurun = {
+        enable = mkEnableOption (lib.mdDoc "the Mirakurun DVR Tuner Server");
+
+        port = mkOption {
+          type = with types; nullOr port;
+          default = 40772;
+          description = lib.mdDoc ''
+            Port to listen on. If `null`, it won't listen on
+            any port.
+          '';
+        };
+
+        openFirewall = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Open ports in the firewall for Mirakurun.
+
+            ::: {.warning}
+            Exposing Mirakurun to the open internet is generally advised
+            against. Only use it inside a trusted local network, or
+            consider putting it behind a VPN if you want remote access.
+            :::
+          '';
+        };
+
+        unixSocket = mkOption {
+          type = with types; nullOr path;
+          default = "/var/run/mirakurun/mirakurun.sock";
+          description = lib.mdDoc ''
+            Path to unix socket to listen on. If `null`, it
+            won't listen on any unix sockets.
+          '';
+        };
+
+        allowSmartCardAccess = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Install polkit rules to allow Mirakurun to access smart card readers
+            which is commonly used along with tuner devices.
+          '';
+        };
+
+        serverSettings = mkOption {
+          type = settingsFmt.type;
+          default = {};
+          example = literalExpression ''
+            {
+              highWaterMark = 25165824;
+              overflowTimeLimit = 30000;
+            };
+          '';
+          description = lib.mdDoc ''
+            Options for server.yml.
+
+            Documentation:
+            <https://github.com/Chinachu/Mirakurun/blob/master/doc/Configuration.md>
+          '';
+        };
+
+        tunerSettings = mkOption {
+          type = with types; nullOr settingsFmt.type;
+          default = null;
+          example = literalExpression ''
+            [
+              {
+                name = "tuner-name";
+                types = [ "GR" "BS" "CS" "SKY" ];
+                dvbDevicePath = "/dev/dvb/adapterX/dvrX";
+              }
+            ];
+          '';
+          description = lib.mdDoc ''
+            Options which are added to tuners.yml. If none is specified, it will
+            automatically be generated at runtime.
+
+            Documentation:
+            <https://github.com/Chinachu/Mirakurun/blob/master/doc/Configuration.md>
+          '';
+        };
+
+        channelSettings = mkOption {
+          type = with types; nullOr settingsFmt.type;
+          default = null;
+          example = literalExpression ''
+            [
+              {
+                name = "channel";
+                types = "GR";
+                channel = "0";
+              }
+            ];
+          '';
+          description = lib.mdDoc ''
+            Options which are added to channels.yml. If none is specified, it
+            will automatically be generated at runtime.
+
+            Documentation:
+            <https://github.com/Chinachu/Mirakurun/blob/master/doc/Configuration.md>
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = [ mirakurun ] ++ optional cfg.allowSmartCardAccess polkitRule;
+      environment.etc = {
+        "mirakurun/server.yml".source = settingsFmt.generate "server.yml" cfg.serverSettings;
+        "mirakurun/tuners.yml" = mkIf (cfg.tunerSettings != null) {
+          source = settingsFmt.generate "tuners.yml" cfg.tunerSettings;
+          mode = "0644";
+          user = username;
+          group = groupname;
+        };
+        "mirakurun/channels.yml" = mkIf (cfg.channelSettings != null) {
+          source = settingsFmt.generate "channels.yml" cfg.channelSettings;
+          mode = "0644";
+          user = username;
+          group = groupname;
+        };
+      };
+
+      networking.firewall = mkIf cfg.openFirewall {
+        allowedTCPPorts = mkIf (cfg.port != null) [ cfg.port ];
+      };
+
+      users.users.mirakurun = {
+        description = "Mirakurun user";
+        group = "video";
+        isSystemUser = true;
+
+        # NPM insists on creating ~/.npm
+        home = "/var/cache/mirakurun";
+      };
+
+      services.mirakurun.serverSettings = {
+        logLevel = mkDefault 2;
+        path = mkIf (cfg.unixSocket != null) cfg.unixSocket;
+        port = mkIf (cfg.port != null) cfg.port;
+      };
+
+      systemd.tmpfiles.rules = [
+        "d '/etc/mirakurun' - ${username} ${groupname} - -"
+      ];
+
+      systemd.services.mirakurun = {
+        description = mirakurun.meta.description;
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${mirakurun}/bin/mirakurun start";
+          User = username;
+          Group = groupname;
+          CacheDirectory = "mirakurun";
+          RuntimeDirectory="mirakurun";
+          StateDirectory="mirakurun";
+          Nice = -10;
+          IOSchedulingClass = "realtime";
+          IOSchedulingPriority = 7;
+        };
+
+        environment = {
+          SERVER_CONFIG_PATH = "/etc/mirakurun/server.yml";
+          TUNERS_CONFIG_PATH = "/etc/mirakurun/tuners.yml";
+          CHANNELS_CONFIG_PATH = "/etc/mirakurun/channels.yml";
+          SERVICES_DB_PATH = "/var/lib/mirakurun/services.json";
+          PROGRAMS_DB_PATH = "/var/lib/mirakurun/programs.json";
+          LOGO_DATA_DIR_PATH = "/var/lib/mirakurun/logos";
+          NODE_ENV = "production";
+        };
+
+        restartTriggers = let
+          getconf = target: config.environment.etc."mirakurun/${target}.yml".source;
+          targets = [
+            "server"
+          ] ++ optional (cfg.tunerSettings != null) "tuners"
+            ++ optional (cfg.channelSettings != null) "channels";
+        in (map getconf targets);
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/video/replay-sorcery.nix b/nixpkgs/nixos/modules/services/video/replay-sorcery.nix
new file mode 100644
index 000000000000..1be02f4d6da5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/replay-sorcery.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.replay-sorcery;
+  configFile = generators.toKeyValue {} cfg.settings;
+in
+{
+  options = with types; {
+    services.replay-sorcery = {
+      enable = mkEnableOption (lib.mdDoc "the ReplaySorcery service for instant-replays");
+
+      enableSysAdminCapability = mkEnableOption (lib.mdDoc ''
+        the system admin capability to support hardware accelerated
+        video capture. This is equivalent to running ReplaySorcery as
+        root, so use with caution'');
+
+      autoStart = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc "Automatically start ReplaySorcery when graphical-session.target starts.";
+      };
+
+      settings = mkOption {
+        type = attrsOf (oneOf [ str int ]);
+        default = {};
+        description = lib.mdDoc "System-wide configuration for ReplaySorcery (/etc/replay-sorcery.conf).";
+        example = literalExpression ''
+          {
+            videoInput = "hwaccel"; # requires `services.replay-sorcery.enableSysAdminCapability = true`
+            videoFramerate = 60;
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment = {
+      systemPackages = [ pkgs.replay-sorcery ];
+      etc."replay-sorcery.conf".text = configFile;
+    };
+
+    security.wrappers = mkIf cfg.enableSysAdminCapability {
+      replay-sorcery = {
+        owner = "root";
+        group = "root";
+        capabilities = "cap_sys_admin+ep";
+        source = "${pkgs.replay-sorcery}/bin/replay-sorcery";
+      };
+    };
+
+    systemd = {
+      packages = [ pkgs.replay-sorcery ];
+      user.services.replay-sorcery = {
+        wantedBy = mkIf cfg.autoStart [ "graphical-session.target" ];
+        partOf = mkIf cfg.autoStart [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = mkIf cfg.enableSysAdminCapability [
+            "" # Tell systemd to clear the existing ExecStart list, to prevent appending to it.
+            "${config.security.wrapperDir}/replay-sorcery"
+          ];
+        };
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ kira-bruneau ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/video/unifi-video.nix b/nixpkgs/nixos/modules/services/video/unifi-video.nix
new file mode 100644
index 000000000000..5c93f60cbd79
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/unifi-video.nix
@@ -0,0 +1,271 @@
+{ config, lib, options, pkgs, utils, ... }:
+with lib;
+let
+  cfg = config.services.unifi-video;
+  opt = options.services.unifi-video;
+  mainClass = "com.ubnt.airvision.Main";
+  cmd = ''
+    ${pkgs.jsvc}/bin/jsvc \
+    -cwd ${stateDir} \
+    -debug \
+    -verbose:class \
+    -nodetach \
+    -user unifi-video \
+    -home ${cfg.jrePackage}/lib/openjdk \
+    -cp ${pkgs.commonsDaemon}/share/java/commons-daemon-1.2.4.jar:${stateDir}/lib/airvision.jar \
+    -pidfile ${cfg.pidFile} \
+    -procname unifi-video \
+    -Djava.security.egd=file:/dev/./urandom \
+    -Xmx${toString cfg.maximumJavaHeapSize}M \
+    -Xss512K \
+    -XX:+UseG1GC \
+    -XX:+UseStringDeduplication \
+    -XX:MaxMetaspaceSize=768M \
+    -Djava.library.path=${stateDir}/lib \
+    -Djava.awt.headless=true \
+    -Djavax.net.ssl.trustStore=${stateDir}/etc/ufv-truststore \
+    -Dfile.encoding=UTF-8 \
+    -Dav.tempdir=/var/cache/unifi-video
+  '';
+
+  mongoConf = pkgs.writeTextFile {
+    name = "mongo.conf";
+    executable = false;
+    text = ''
+      # for documentation of all options, see https://www.mongodb.com/docs/manual/reference/configuration-options/
+
+      storage:
+         dbPath: ${cfg.dataDir}/db
+         journal:
+            enabled: true
+         syncPeriodSecs: 60
+
+      systemLog:
+         destination: file
+         logAppend: true
+         path: ${stateDir}/logs/mongod.log
+
+      net:
+         port: 7441
+         bindIp: 127.0.0.1
+         http:
+            enabled: false
+
+      operationProfiling:
+         slowOpThresholdMs: 500
+         mode: off
+    '';
+  };
+
+
+  mongoWtConf = pkgs.writeTextFile {
+    name = "mongowt.conf";
+    executable = false;
+    text = ''
+      # for documentation of all options, see:
+      #   https://www.mongodb.com/docs/manual/reference/configuration-options/
+
+      storage:
+         dbPath: ${cfg.dataDir}/db-wt
+         journal:
+            enabled: true
+         wiredTiger:
+            engineConfig:
+               cacheSizeGB: 1
+
+      systemLog:
+         destination: file
+         logAppend: true
+         path: logs/mongod.log
+
+      net:
+         port: 7441
+         bindIp: 127.0.0.1
+
+      operationProfiling:
+         slowOpThresholdMs: 500
+         mode: off
+    '';
+  };
+
+  stateDir = "/var/lib/unifi-video";
+
+in
+{
+
+  options.services.unifi-video = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not to enable the unifi-video service.
+      '';
+    };
+
+    jrePackage = mkOption {
+      type = types.package;
+      default = pkgs.jre8;
+      defaultText = literalExpression "pkgs.jre8";
+      description = lib.mdDoc ''
+        The JRE package to use. Check the release notes to ensure it is supported.
+      '';
+    };
+
+    unifiVideoPackage = mkOption {
+      type = types.package;
+      default = pkgs.unifi-video;
+      defaultText = literalExpression "pkgs.unifi-video";
+      description = lib.mdDoc ''
+        The unifi-video package to use.
+      '';
+    };
+
+    mongodbPackage = mkOption {
+      type = types.package;
+      default = pkgs.mongodb-4_4;
+      defaultText = literalExpression "pkgs.mongodb";
+      description = lib.mdDoc ''
+        The mongodb package to use.
+      '';
+    };
+
+    logDir = mkOption {
+      type = types.str;
+      default = "${stateDir}/logs";
+      description = lib.mdDoc ''
+        Where to store the logs.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "${stateDir}/data";
+      description = lib.mdDoc ''
+        Where to store the database and other data.
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not to open the required ports on the firewall.
+      '';
+    };
+
+    maximumJavaHeapSize = mkOption {
+      type = types.nullOr types.int;
+      default = 1024;
+      example = 4096;
+      description = lib.mdDoc ''
+        Set the maximum heap size for the JVM in MB.
+      '';
+    };
+
+    pidFile = mkOption {
+      type = types.path;
+      default = "${cfg.dataDir}/unifi-video.pid";
+      defaultText = literalExpression ''"''${config.${opt.dataDir}}/unifi-video.pid"'';
+      description = lib.mdDoc "Location of unifi-video pid file.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    warnings = optional
+      (options.services.unifi-video.openFirewall.highestPrio >= (mkOptionDefault null).priority)
+      "The current services.unifi-video.openFirewall = true default is deprecated and will change to false in 22.11. Set it explicitly to silence this warning.";
+
+    users.users.unifi-video = {
+      description = "UniFi Video controller daemon user";
+      home = stateDir;
+      group = "unifi-video";
+      isSystemUser = true;
+    };
+    users.groups.unifi-video = {};
+
+    networking.firewall = mkIf cfg.openFirewall {
+      # https://help.ui.com/hc/en-us/articles/217875218-UniFi-Video-Ports-Used
+      allowedTCPPorts = [
+        7080 # HTTP portal
+        7443 # HTTPS portal
+        7445 # Video over HTTP (mobile app)
+        7446 # Video over HTTPS (mobile app)
+        7447 # RTSP via the controller
+        7442 # Camera management from cameras to NVR over WAN
+      ];
+      allowedUDPPorts = [
+        6666 # Inbound camera streams sent over WAN
+      ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${stateDir}' 0700 unifi-video unifi-video - -"
+      "d '/var/cache/unifi-video' 0700 unifi-video unifi-video - -"
+
+      "d '${stateDir}/logs' 0700 unifi-video unifi-video - -"
+      "C '${stateDir}/etc' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/etc"
+      "C '${stateDir}/webapps' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/webapps"
+      "C '${stateDir}/email' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/email"
+      "C '${stateDir}/fw' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/fw"
+      "C '${stateDir}/lib' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/lib"
+
+      "d '${stateDir}/data' 0700 unifi-video unifi-video - -"
+      "d '${stateDir}/data/db' 0700 unifi-video unifi-video - -"
+      "C '${stateDir}/data/system.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/etc/system.properties"
+
+      "d '${stateDir}/bin' 0700 unifi-video unifi-video - -"
+      "f '${stateDir}/bin/evostreamms' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/evostreamms"
+      "f '${stateDir}/bin/libavcodec.so.54' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavcodec.so.54"
+      "f '${stateDir}/bin/libavformat.so.54' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavformat.so.54"
+      "f '${stateDir}/bin/libavutil.so.52' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/libavutil.so.52"
+      "f '${stateDir}/bin/ubnt.avtool' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/ubnt.avtool"
+      "f '${stateDir}/bin/ubnt.updater' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/bin/ubnt.updater"
+      "C '${stateDir}/bin/mongo' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongo"
+      "C '${stateDir}/bin/mongod' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongod"
+      "C '${stateDir}/bin/mongoperf' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongoperf"
+      "C '${stateDir}/bin/mongos' 0700 unifi-video unifi-video - ${cfg.mongodbPackage}/bin/mongos"
+
+      "d '${stateDir}/conf' 0700 unifi-video unifi-video - -"
+      "C '${stateDir}/conf/evostream' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/evostream"
+      "Z '${stateDir}/conf/evostream' 0700 unifi-video unifi-video - -"
+      "L+ '${stateDir}/conf/mongodv3.0+.conf' 0700 unifi-video unifi-video - ${mongoConf}"
+      "L+ '${stateDir}/conf/mongodv3.6+.conf' 0700 unifi-video unifi-video - ${mongoConf}"
+      "L+ '${stateDir}/conf/mongod-wt.conf' 0700 unifi-video unifi-video - ${mongoWtConf}"
+      "L+ '${stateDir}/conf/catalina.policy' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/catalina.policy"
+      "L+ '${stateDir}/conf/catalina.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/catalina.properties"
+      "L+ '${stateDir}/conf/context.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/context.xml"
+      "L+ '${stateDir}/conf/logging.properties' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/logging.properties"
+      "L+ '${stateDir}/conf/server.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/server.xml"
+      "L+ '${stateDir}/conf/tomcat-users.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/tomcat-users.xml"
+      "L+ '${stateDir}/conf/web.xml' 0700 unifi-video unifi-video - ${pkgs.unifi-video}/lib/unifi-video/conf/web.xml"
+    ];
+
+    systemd.services.unifi-video = {
+      description = "UniFi Video NVR daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ] ;
+      unitConfig.RequiresMountsFor = stateDir;
+      # Make sure package upgrades trigger a service restart
+      restartTriggers = [ cfg.unifiVideoPackage cfg.mongodbPackage ];
+      path = with pkgs; [ gawk coreutils busybox which jre8 lsb-release libcap util-linux ];
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${(removeSuffix "\n" cmd)} ${mainClass} start";
+        ExecStop = "${(removeSuffix "\n" cmd)} stop ${mainClass} stop";
+        Restart = "on-failure";
+        UMask = "0077";
+        User = "unifi-video";
+        WorkingDirectory = "${stateDir}";
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "unifi-video" "openPorts" ] [ "services" "unifi-video" "openFirewall" ])
+  ];
+
+  meta.maintainers = with lib.maintainers; [ rsynnest ];
+}
diff --git a/nixpkgs/nixos/modules/services/video/v4l2-relayd.nix b/nixpkgs/nixos/modules/services/video/v4l2-relayd.nix
new file mode 100644
index 000000000000..2a9dbe00158f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/video/v4l2-relayd.nix
@@ -0,0 +1,199 @@
+{ config, lib, pkgs, utils, ... }:
+let
+
+  inherit (lib) attrValues concatStringsSep filterAttrs length listToAttrs literalExpression
+    makeSearchPathOutput mkEnableOption mkIf mkOption nameValuePair optionals types;
+  inherit (utils) escapeSystemdPath;
+
+  cfg = config.services.v4l2-relayd;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  gst = (with pkgs.gst_all_1; [
+    gst-plugins-bad
+    gst-plugins-base
+    gst-plugins-good
+    gstreamer.out
+  ]);
+
+  instanceOpts = { name, ... }: {
+    options = {
+      enable = mkEnableOption (lib.mdDoc "this v4l2-relayd instance");
+
+      name = mkOption {
+        type = types.str;
+        default = name;
+        description = lib.mdDoc ''
+          The name of the instance.
+        '';
+      };
+
+      cardLabel = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The name the camera will show up as.
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = with types; listOf package;
+        default = [ ];
+        description = lib.mdDoc ''
+          Extra packages to add to {env}`GST_PLUGIN_PATH` for the instance.
+        '';
+      };
+
+      input = {
+        pipeline = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            The gstreamer-pipeline to use for the input-stream.
+          '';
+        };
+
+        format = mkOption {
+          type = types.str;
+          default = "YUY2";
+          description = lib.mdDoc ''
+            The video-format to read from input-stream.
+          '';
+        };
+
+        width = mkOption {
+          type = types.ints.positive;
+          default = 1280;
+          description = lib.mdDoc ''
+            The width to read from input-stream.
+          '';
+        };
+
+        height = mkOption {
+          type = types.ints.positive;
+          default = 720;
+          description = lib.mdDoc ''
+            The height to read from input-stream.
+          '';
+        };
+
+        framerate = mkOption {
+          type = types.ints.positive;
+          default = 30;
+          description = lib.mdDoc ''
+            The framerate to read from input-stream.
+          '';
+        };
+      };
+
+      output = {
+        format = mkOption {
+          type = types.str;
+          default = "YUY2";
+          description = lib.mdDoc ''
+            The video-format to write to output-stream.
+          '';
+        };
+      };
+
+    };
+  };
+
+in
+{
+
+  options.services.v4l2-relayd = {
+
+    instances = mkOption {
+      type = with types; attrsOf (submodule instanceOpts);
+      default = { };
+      example = literalExpression ''
+        {
+          example = {
+            cardLabel = "Example card";
+            input.pipeline = "videotestsrc";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        v4l2-relayd instances to be created.
+      '';
+    };
+
+  };
+
+  config =
+    let
+
+      mkInstanceService = instance: {
+        description = "Streaming relay for v4l2loopback using GStreamer";
+
+        after = [ "modprobe@v4l2loopback.service" "systemd-logind.service" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          Type = "simple";
+          Restart = "always";
+          PrivateNetwork = true;
+          PrivateTmp = true;
+          LimitNPROC = 1;
+        };
+
+        environment = {
+          GST_PLUGIN_PATH = makeSearchPathOutput "lib" "lib/gstreamer-1.0" (gst ++ instance.extraPackages);
+          V4L2_DEVICE_FILE = "/run/v4l2-relayd-${instance.name}/device";
+        };
+
+        script =
+          let
+            appsrcOptions = concatStringsSep "," [
+              "caps=video/x-raw"
+              "format=${instance.input.format}"
+              "width=${toString instance.input.width}"
+              "height=${toString instance.input.height}"
+              "framerate=${toString instance.input.framerate}/1"
+            ];
+
+            outputPipeline = [
+              "appsrc name=appsrc ${appsrcOptions}"
+              "videoconvert"
+            ] ++ optionals (instance.input.format != instance.output.format) [
+              "video/x-raw,format=${instance.output.format}"
+              "queue"
+            ] ++ [ "v4l2sink name=v4l2sink device=$(cat $V4L2_DEVICE_FILE)" ];
+          in
+          ''
+            exec ${pkgs.v4l2-relayd}/bin/v4l2-relayd -i "${instance.input.pipeline}" -o "${concatStringsSep " ! " outputPipeline}"
+          '';
+
+        preStart = ''
+          mkdir -p $(dirname $V4L2_DEVICE_FILE)
+          ${kernelPackages.v4l2loopback.bin}/bin/v4l2loopback-ctl add -x 1 -n "${instance.cardLabel}" > $V4L2_DEVICE_FILE
+        '';
+
+        postStop = ''
+          ${kernelPackages.v4l2loopback.bin}/bin/v4l2loopback-ctl delete $(cat $V4L2_DEVICE_FILE)
+          rm -rf $(dirname $V4L2_DEVICE_FILE)
+        '';
+      };
+
+      mkInstanceServices = instances: listToAttrs (map
+        (instance:
+          nameValuePair "v4l2-relayd-${escapeSystemdPath instance.name}" (mkInstanceService instance)
+        )
+        instances);
+
+      enabledInstances = attrValues (filterAttrs (n: v: v.enable) cfg.instances);
+
+    in
+    {
+
+      boot = mkIf ((length enabledInstances) > 0) {
+        extraModulePackages = [ kernelPackages.v4l2loopback ];
+        kernelModules = [ "v4l2loopback" ];
+      };
+
+      systemd.services = mkInstanceServices enabledInstances;
+
+    };
+
+  meta.maintainers = with lib.maintainers; [ betaboon ];
+}
diff --git a/nixpkgs/nixos/modules/services/wayland/cage.nix b/nixpkgs/nixos/modules/services/wayland/cage.nix
new file mode 100644
index 000000000000..cf4c0798cd48
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/wayland/cage.nix
@@ -0,0 +1,113 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cage;
+in {
+  options.services.cage.enable = mkEnableOption (lib.mdDoc "cage kiosk service");
+
+  options.services.cage.user = mkOption {
+    type = types.str;
+    default = "demo";
+    description = lib.mdDoc ''
+      User to log-in as.
+    '';
+  };
+
+  options.services.cage.extraArguments = mkOption {
+    type = types.listOf types.str;
+    default = [];
+    defaultText = literalExpression "[]";
+    description = lib.mdDoc "Additional command line arguments to pass to Cage.";
+    example = ["-d"];
+  };
+
+  options.services.cage.environment = mkOption {
+    type = types.attrsOf types.str;
+    default = {};
+    example = {
+      WLR_LIBINPUT_NO_DEVICES = "1";
+    };
+    description = lib.mdDoc "Additional environment variables to pass to Cage.";
+  };
+
+  options.services.cage.program = mkOption {
+    type = types.path;
+    default = "${pkgs.xterm}/bin/xterm";
+    defaultText = literalExpression ''"''${pkgs.xterm}/bin/xterm"'';
+    description = lib.mdDoc ''
+      Program to run in cage.
+    '';
+  };
+
+  config = mkIf cfg.enable {
+
+    # The service is partially based off of the one provided in the
+    # cage wiki at
+    # https://github.com/Hjdskes/cage/wiki/Starting-Cage-on-boot-with-systemd.
+    systemd.services."cage-tty1" = {
+      enable = true;
+      after = [
+        "systemd-user-sessions.service"
+        "plymouth-start.service"
+        "plymouth-quit.service"
+        "systemd-logind.service"
+        "getty@tty1.service"
+      ];
+      before = [ "graphical.target" ];
+      wants = [ "dbus.socket" "systemd-logind.service" "plymouth-quit.service"];
+      wantedBy = [ "graphical.target" ];
+      conflicts = [ "getty@tty1.service" ];
+
+      restartIfChanged = false;
+      unitConfig.ConditionPathExists = "/dev/tty1";
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.cage}/bin/cage \
+            ${escapeShellArgs cfg.extraArguments} \
+            -- ${cfg.program}
+        '';
+        User = cfg.user;
+
+        IgnoreSIGPIPE = "no";
+
+        # Log this user with utmp, letting it show up with commands 'w' and
+        # 'who'. This is needed since we replace (a)getty.
+        UtmpIdentifier = "%n";
+        UtmpMode = "user";
+        # A virtual terminal is needed.
+        TTYPath = "/dev/tty1";
+        TTYReset = "yes";
+        TTYVHangup = "yes";
+        TTYVTDisallocate = "yes";
+        # Fail to start if not controlling the virtual terminal.
+        StandardInput = "tty-fail";
+        StandardOutput = "journal";
+        StandardError = "journal";
+        # Set up a full (custom) user session for the user, required by Cage.
+        PAMName = "cage";
+      };
+      environment = cfg.environment;
+    };
+
+    security.polkit.enable = true;
+
+    security.pam.services.cage.text = ''
+      auth    required pam_unix.so nullok
+      account required pam_unix.so
+      session required pam_unix.so
+      session required pam_env.so conffile=/etc/pam/environment readenv=0
+      session required ${config.systemd.package}/lib/security/pam_systemd.so
+    '';
+
+    hardware.opengl.enable = mkDefault true;
+
+    systemd.targets.graphical.wants = [ "cage-tty1.service" ];
+
+    systemd.defaultUnit = "graphical.target";
+  };
+
+  meta.maintainers = with lib.maintainers; [ matthewbauer ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/akkoma.md b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
new file mode 100644
index 000000000000..83dd1a8b35f2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/akkoma.md
@@ -0,0 +1,332 @@
+# Akkoma {#module-services-akkoma}
+
+[Akkoma](https://akkoma.dev/) is a lightweight ActivityPub microblogging server forked from Pleroma.
+
+## Service configuration {#modules-services-akkoma-service-configuration}
+
+The Elixir configuration file required by Akkoma is generated automatically from
+[{option}`services.akkoma.config`](options.html#opt-services.akkoma.config). Secrets must be
+included from external files outside of the Nix store by setting the configuration option to
+an attribute set containing the attribute {option}`_secret` – a string pointing to the file
+containing the actual value of the option.
+
+For the mandatory configuration settings these secrets will be generated automatically if the
+referenced file does not exist during startup, unless disabled through
+[{option}`services.akkoma.initSecrets`](options.html#opt-services.akkoma.initSecrets).
+
+The following configuration binds Akkoma to the Unix socket `/run/akkoma/socket`, expecting to
+be run behind a HTTP proxy on `fediverse.example.com`.
+
+
+```nix
+services.akkoma.enable = true;
+services.akkoma.config = {
+  ":pleroma" = {
+    ":instance" = {
+      name = "My Akkoma instance";
+      description = "More detailed description";
+      email = "admin@example.com";
+      registration_open = false;
+    };
+
+    "Pleroma.Web.Endpoint" = {
+      url.host = "fediverse.example.com";
+    };
+  };
+};
+```
+
+Please refer to the [configuration cheat sheet](https://docs.akkoma.dev/stable/configuration/cheatsheet/)
+for additional configuration options.
+
+## User management {#modules-services-akkoma-user-management}
+
+After the Akkoma service is running, the administration utility can be used to
+[manage users](https://docs.akkoma.dev/stable/administration/CLI_tasks/user/). In particular an
+administrative user can be created with
+
+```ShellSession
+$ pleroma_ctl user new <nickname> <email> --admin --moderator --password <password>
+```
+
+## Proxy configuration {#modules-services-akkoma-proxy-configuration}
+
+Although it is possible to expose Akkoma directly, it is common practice to operate it behind an
+HTTP reverse proxy such as nginx.
+
+```nix
+services.akkoma.nginx = {
+  enableACME = true;
+  forceSSL = true;
+};
+
+services.nginx = {
+  enable = true;
+
+  clientMaxBodySize = "16m";
+  recommendedTlsSettings = true;
+  recommendedOptimisation = true;
+  recommendedGzipSettings = true;
+};
+```
+
+Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
+
+### Media proxy {#modules-services-akkoma-media-proxy}
+
+Without the media proxy function, Akkoma does not store any remote media like pictures or video
+locally, and clients have to fetch them directly from the source server.
+
+```nix
+# Enable nginx slice module distributed with Tengine
+services.nginx.package = pkgs.tengine;
+
+# Enable media proxy
+services.akkoma.config.":pleroma".":media_proxy" = {
+  enabled = true;
+  proxy_opts.redirect_on_failure = true;
+};
+
+# Adjust the persistent cache size as needed:
+#  Assuming an average object size of 128 KiB, around 1 MiB
+#  of memory is required for the key zone per GiB of cache.
+# Ensure that the cache directory exists and is writable by nginx.
+services.nginx.commonHttpConfig = ''
+  proxy_cache_path /var/cache/nginx/cache/akkoma-media-cache
+    levels= keys_zone=akkoma_media_cache:16m max_size=16g
+    inactive=1y use_temp_path=off;
+'';
+
+services.akkoma.nginx = {
+  locations."/proxy" = {
+    proxyPass = "http://unix:/run/akkoma/socket";
+
+    extraConfig = ''
+      proxy_cache akkoma_media_cache;
+
+      # Cache objects in slices of 1 MiB
+      slice 1m;
+      proxy_cache_key $host$uri$is_args$args$slice_range;
+      proxy_set_header Range $slice_range;
+
+      # Decouple proxy and upstream responses
+      proxy_buffering on;
+      proxy_cache_lock on;
+      proxy_ignore_client_abort on;
+
+      # Default cache times for various responses
+      proxy_cache_valid 200 1y;
+      proxy_cache_valid 206 301 304 1h;
+
+      # Allow serving of stale items
+      proxy_cache_use_stale error timeout invalid_header updating;
+    '';
+  };
+};
+```
+
+#### Prefetch remote media {#modules-services-akkoma-prefetch-remote-media}
+
+The following example enables the `MediaProxyWarmingPolicy` MRF policy which automatically
+fetches all media associated with a post through the media proxy, as soon as the post is
+received by the instance.
+
+```nix
+services.akkoma.config.":pleroma".":mrf".policies =
+  map (pkgs.formats.elixirConf { }).lib.mkRaw [
+    "Pleroma.Web.ActivityPub.MRF.MediaProxyWarmingPolicy"
+];
+```
+
+#### Media previews {#modules-services-akkoma-media-previews}
+
+Akkoma can generate previews for media.
+
+```nix
+services.akkoma.config.":pleroma".":media_preview_proxy" = {
+  enabled = true;
+  thumbnail_max_width = 1920;
+  thumbnail_max_height = 1080;
+};
+```
+
+## Frontend management {#modules-services-akkoma-frontend-management}
+
+Akkoma will be deployed with the `akkoma-fe` and `admin-fe` frontends by default. These can be
+modified by setting
+[{option}`services.akkoma.frontends`](options.html#opt-services.akkoma.frontends).
+
+The following example overrides the primary frontend’s default configuration using a custom
+derivation.
+
+```nix
+services.akkoma.frontends.primary.package = pkgs.runCommand "akkoma-fe" {
+  config = builtins.toJSON {
+    expertLevel = 1;
+    collapseMessageWithSubject = false;
+    stopGifs = false;
+    replyVisibility = "following";
+    webPushHideIfCW = true;
+    hideScopeNotice = true;
+    renderMisskeyMarkdown = false;
+    hideSiteFavicon = true;
+    postContentType = "text/markdown";
+    showNavShortcuts = false;
+  };
+  nativeBuildInputs = with pkgs; [ jq xorg.lndir ];
+  passAsFile = [ "config" ];
+} ''
+  mkdir $out
+  lndir ${pkgs.akkoma-frontends.akkoma-fe} $out
+
+  rm $out/static/config.json
+  jq -s add ${pkgs.akkoma-frontends.akkoma-fe}/static/config.json ${config} \
+    >$out/static/config.json
+'';
+```
+
+## Federation policies {#modules-services-akkoma-federation-policies}
+
+Akkoma comes with a number of modules to police federation with other ActivityPub instances.
+The most valuable for typical users is the
+[`:mrf_simple`](https://docs.akkoma.dev/stable/configuration/cheatsheet/#mrf_simple) module
+which allows limiting federation based on instance hostnames.
+
+This configuration snippet provides an example on how these can be used. Choosing an adequate
+federation policy is not trivial and entails finding a balance between connectivity to the rest
+of the fediverse and providing a pleasant experience to the users of an instance.
+
+
+```nix
+services.akkoma.config.":pleroma" = with (pkgs.formats.elixirConf { }).lib; {
+  ":mrf".policies = map mkRaw [
+    "Pleroma.Web.ActivityPub.MRF.SimplePolicy"
+  ];
+
+  ":mrf_simple" = {
+    # Tag all media as sensitive
+    media_nsfw = mkMap {
+      "nsfw.weird.kinky" = "Untagged NSFW content";
+    };
+
+    # Reject all activities except deletes
+    reject = mkMap {
+      "kiwifarms.cc" = "Persistent harassment of users, no moderation";
+    };
+
+    # Force posts to be visible by followers only
+    followers_only = mkMap {
+      "beta.birdsite.live" = "Avoid polluting timelines with Twitter posts";
+    };
+  };
+};
+```
+
+## Upload filters {#modules-services-akkoma-upload-filters}
+
+This example strips GPS and location metadata from uploads, deduplicates them and anonymises the
+the file name.
+
+```nix
+services.akkoma.config.":pleroma"."Pleroma.Upload".filters =
+  map (pkgs.formats.elixirConf { }).lib.mkRaw [
+    "Pleroma.Upload.Filter.Exiftool"
+    "Pleroma.Upload.Filter.Dedupe"
+    "Pleroma.Upload.Filter.AnonymizeFilename"
+  ];
+```
+
+## Migration from Pleroma {#modules-services-akkoma-migration-pleroma}
+
+Pleroma instances can be migrated to Akkoma either by copying the database and upload data or by
+pointing Akkoma to the existing data. The necessary database migrations are run automatically
+during startup of the service.
+
+The configuration has to be copyâ€edited manually.
+
+Depending on the size of the database, the initial migration may take a long time and exceed the
+startup timeout of the system manager. To work around this issue one may adjust the startup timeout
+{option}`systemd.services.akkoma.serviceConfig.TimeoutStartSec` or simply run the migrations
+manually:
+
+```ShellSession
+pleroma_ctl migrate
+```
+
+### Copying data {#modules-services-akkoma-migration-pleroma-copy}
+
+Copying the Pleroma data instead of reâ€using it in place may permit easier reversion to Pleroma,
+but allows the two data sets to diverge.
+
+First disable Pleroma and then copy its database and upload data:
+
+```ShellSession
+# Create a copy of the database
+nix-shell -p postgresql --run 'createdb -T pleroma akkoma'
+
+# Copy upload data
+mkdir /var/lib/akkoma
+cp -R --reflink=auto /var/lib/pleroma/uploads /var/lib/akkoma/
+```
+
+After the data has been copied, enable the Akkoma service and verify that the migration has been
+successful. If no longer required, the original data may then be deleted:
+
+```ShellSession
+# Delete original database
+nix-shell -p postgresql --run 'dropdb pleroma'
+
+# Delete original Pleroma state
+rm -r /var/lib/pleroma
+```
+
+### Reâ€using data {#modules-services-akkoma-migration-pleroma-reuse}
+
+To reâ€use the Pleroma data in place, disable Pleroma and enable Akkoma, pointing it to the
+Pleroma database and upload directory.
+
+```nix
+# Adjust these settings according to the database name and upload directory path used by Pleroma
+services.akkoma.config.":pleroma"."Pleroma.Repo".database = "pleroma";
+services.akkoma.config.":pleroma".":instance".upload_dir = "/var/lib/pleroma/uploads";
+```
+
+Please keep in mind that after the Akkoma service has been started, any migrations applied by
+Akkoma have to be rolled back before the database can be used again with Pleroma. This can be
+achieved through `pleroma_ctl ecto.rollback`. Refer to the
+[Ecto SQL documentation](https://hexdocs.pm/ecto_sql/Mix.Tasks.Ecto.Rollback.html) for
+details.
+
+## Advanced deployment options {#modules-services-akkoma-advanced-deployment}
+
+### Confinement {#modules-services-akkoma-confinement}
+
+The Akkoma systemd service may be confined to a chroot with
+
+```nix
+services.systemd.akkoma.confinement.enable = true;
+```
+
+Confinement of services is not generally supported in NixOS and therefore disabled by default.
+Depending on the Akkoma configuration, the default confinement settings may be insufficient and
+lead to subtle errors at run time, requiring adjustment:
+
+Use
+[{option}`services.systemd.akkoma.confinement.packages`](options.html#opt-systemd.services._name_.confinement.packages)
+to make packages available in the chroot.
+
+{option}`services.systemd.akkoma.serviceConfig.BindPaths` and
+{option}`services.systemd.akkoma.serviceConfig.BindReadOnlyPaths` permit access to outside paths
+through bind mounts. Refer to
+[`BindPaths=`](https://www.freedesktop.org/software/systemd/man/systemd.exec.html#BindPaths=)
+of {manpage}`systemd.exec(5)` for details.
+
+### Distributed deployment {#modules-services-akkoma-distributed-deployment}
+
+Being an Elixir application, Akkoma can be deployed in a distributed fashion.
+
+This requires setting
+[{option}`services.akkoma.dist.address`](options.html#opt-services.akkoma.dist.address) and
+[{option}`services.akkoma.dist.cookie`](options.html#opt-services.akkoma.dist.cookie). The
+specifics depend strongly on the deployment environment. For more information please check the
+relevant [Erlang documentation](https://www.erlang.org/doc/reference_manual/distributed.html).
diff --git a/nixpkgs/nixos/modules/services/web-apps/akkoma.nix b/nixpkgs/nixos/modules/services/web-apps/akkoma.nix
new file mode 100644
index 000000000000..8a8ed49115fd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/akkoma.nix
@@ -0,0 +1,1093 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.akkoma;
+  ex = cfg.config;
+  db = ex.":pleroma"."Pleroma.Repo";
+  web = ex.":pleroma"."Pleroma.Web.Endpoint";
+
+  isConfined = config.systemd.services.akkoma.confinement.enable;
+  hasSmtp = (attrByPath [ ":pleroma" "Pleroma.Emails.Mailer" "adapter" "value" ] null ex) == "Swoosh.Adapters.SMTP";
+
+  isAbsolutePath = v: isString v && substring 0 1 v == "/";
+  isSecret = v: isAttrs v && v ? _secret && isAbsolutePath v._secret;
+
+  absolutePath = with types; mkOptionType {
+    name = "absolutePath";
+    description = "absolute path";
+    descriptionClass = "noun";
+    check = isAbsolutePath;
+    inherit (str) merge;
+  };
+
+  secret = mkOptionType {
+    name = "secret";
+    description = "secret value";
+    descriptionClass = "noun";
+    check = isSecret;
+    nestedTypes = {
+      _secret = absolutePath;
+    };
+  };
+
+  ipAddress = with types; mkOptionType {
+    name = "ipAddress";
+    description = "IPv4 or IPv6 address";
+    descriptionClass = "conjunction";
+    check = x: str.check x && builtins.match "[.0-9:A-Fa-f]+" x != null;
+    inherit (str) merge;
+  };
+
+  elixirValue = let
+    elixirValue' = with types;
+      nullOr (oneOf [ bool int float str (attrsOf elixirValue') (listOf elixirValue') ]) // {
+        description = "Elixir value";
+      };
+  in elixirValue';
+
+  frontend = {
+    options = {
+      package = mkOption {
+        type = types.package;
+        description = mdDoc "Akkoma frontend package.";
+        example = literalExpression "pkgs.akkoma-frontends.akkoma-fe";
+      };
+
+      name = mkOption {
+        type = types.nonEmptyStr;
+        description = mdDoc "Akkoma frontend name.";
+        example = "akkoma-fe";
+      };
+
+      ref = mkOption {
+        type = types.nonEmptyStr;
+        description = mdDoc "Akkoma frontend reference.";
+        example = "stable";
+      };
+    };
+  };
+
+  sha256 = builtins.hashString "sha256";
+
+  replaceSec = let
+    replaceSec' = { }@args: v:
+      if isAttrs v
+        then if v ? _secret
+          then if isAbsolutePath v._secret
+            then sha256 v._secret
+            else abort "Invalid secret path (_secret = ${v._secret})"
+          else mapAttrs (_: val: replaceSec' args val) v
+        else if isList v
+          then map (replaceSec' args) v
+          else v;
+    in replaceSec' { };
+
+  # Erlang/Elixir uses a somewhat special format for IP addresses
+  erlAddr = addr: fileContents
+    (pkgs.runCommand addr {
+      nativeBuildInputs = [ cfg.package.elixirPackage ];
+      code = ''
+        case :inet.parse_address('${addr}') do
+          {:ok, addr} -> IO.inspect addr
+          {:error, _} -> System.halt(65)
+        end
+      '';
+      passAsFile = [ "code" ];
+    } ''elixir "$codePath" >"$out"'');
+
+  format = pkgs.formats.elixirConf { elixir = cfg.package.elixirPackage; };
+  configFile = format.generate "config.exs"
+    (replaceSec
+      (attrsets.updateManyAttrsByPath [{
+        path = [ ":pleroma" "Pleroma.Web.Endpoint" "http" "ip" ];
+        update = addr:
+          if isAbsolutePath addr
+            then format.lib.mkTuple
+              [ (format.lib.mkAtom ":local") addr ]
+            else format.lib.mkRaw (erlAddr addr);
+      }] cfg.config));
+
+  writeShell = { name, text, runtimeInputs ? [ ] }:
+    pkgs.writeShellApplication { inherit name text runtimeInputs; } + "/bin/${name}";
+
+  genScript = writeShell {
+    name = "akkoma-gen-cookie";
+    runtimeInputs = with pkgs; [ coreutils util-linux ];
+    text = ''
+      install -m 0400 \
+        -o ${escapeShellArg cfg.user } \
+        -g ${escapeShellArg cfg.group} \
+        <(hexdump -n 16 -e '"%02x"' /dev/urandom) \
+        "$RUNTIME_DIRECTORY/cookie"
+    '';
+  };
+
+  copyScript = writeShell {
+    name = "akkoma-copy-cookie";
+    runtimeInputs = with pkgs; [ coreutils ];
+    text = ''
+      install -m 0400 \
+        -o ${escapeShellArg cfg.user} \
+        -g ${escapeShellArg cfg.group} \
+        ${escapeShellArg cfg.dist.cookie._secret} \
+        "$RUNTIME_DIRECTORY/cookie"
+    '';
+  };
+
+  secretPaths = catAttrs "_secret" (collect isSecret cfg.config);
+
+  vapidKeygen = pkgs.writeText "vapidKeygen.exs" ''
+    [public_path, private_path] = System.argv()
+    {public_key, private_key} = :crypto.generate_key :ecdh, :prime256v1
+    File.write! public_path, Base.url_encode64(public_key, padding: false)
+    File.write! private_path, Base.url_encode64(private_key, padding: false)
+  '';
+
+  initSecretsScript = writeShell {
+    name = "akkoma-init-secrets";
+    runtimeInputs = with pkgs; [ coreutils cfg.package.elixirPackage ];
+    text = let
+      key-base = web.secret_key_base;
+      jwt-signer = ex.":joken".":default_signer";
+      signing-salt = web.signing_salt;
+      liveview-salt = web.live_view.signing_salt;
+      vapid-private = ex.":web_push_encryption".":vapid_details".private_key;
+      vapid-public = ex.":web_push_encryption".":vapid_details".public_key;
+    in ''
+      secret() {
+        # Generate default secret if nonâ€existent
+        test -e "$2" || install -D -m 0600 <(tr -dc 'A-Za-z-._~' </dev/urandom | head -c "$1") "$2"
+        if [ "$(stat --dereference --format='%s' "$2")" -lt "$1" ]; then
+          echo "Secret '$2' is smaller than minimum size of $1 bytes." >&2
+          exit 65
+        fi
+      }
+
+      secret 64 ${escapeShellArg key-base._secret}
+      secret 64 ${escapeShellArg jwt-signer._secret}
+      secret 8 ${escapeShellArg signing-salt._secret}
+      secret 8 ${escapeShellArg liveview-salt._secret}
+
+      ${optionalString (isSecret vapid-public) ''
+        { test -e ${escapeShellArg vapid-private._secret} && \
+          test -e ${escapeShellArg vapid-public._secret}; } || \
+            elixir ${escapeShellArgs [ vapidKeygen vapid-public._secret vapid-private._secret ]}
+      ''}
+    '';
+  };
+
+  configScript = writeShell {
+    name = "akkoma-config";
+    runtimeInputs = with pkgs; [ coreutils replace-secret ];
+    text = ''
+      cd "$RUNTIME_DIRECTORY"
+      tmp="$(mktemp config.exs.XXXXXXXXXX)"
+      trap 'rm -f "$tmp"' EXIT TERM
+
+      cat ${escapeShellArg configFile} >"$tmp"
+      ${concatMapStrings (file: ''
+        replace-secret ${escapeShellArgs [ (sha256 file) file ]} "$tmp"
+      '') secretPaths}
+
+      chown ${escapeShellArg cfg.user}:${escapeShellArg cfg.group} "$tmp"
+      chmod 0400 "$tmp"
+      mv -f "$tmp" config.exs
+    '';
+  };
+
+  pgpass = let
+    esc = escape [ ":" ''\'' ];
+  in if (cfg.initDb.password != null)
+    then pkgs.writeText "pgpass.conf" ''
+      *:*:*${esc cfg.initDb.username}:${esc (sha256 cfg.initDb.password._secret)}
+    ''
+    else null;
+
+  escapeSqlId = x: ''"${replaceStrings [ ''"'' ] [ ''""'' ] x}"'';
+  escapeSqlStr = x: "'${replaceStrings [ "'" ] [ "''" ] x}'";
+
+  setupSql = pkgs.writeText "setup.psql" ''
+    \set ON_ERROR_STOP on
+
+    ALTER ROLE ${escapeSqlId db.username}
+      LOGIN PASSWORD ${if db ? password
+        then "${escapeSqlStr (sha256 db.password._secret)}"
+        else "NULL"};
+
+    ALTER DATABASE ${escapeSqlId db.database}
+      OWNER TO ${escapeSqlId db.username};
+
+    \connect ${escapeSqlId db.database}
+    CREATE EXTENSION IF NOT EXISTS citext;
+    CREATE EXTENSION IF NOT EXISTS pg_trgm;
+    CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
+  '';
+
+  dbHost = if db ? socket_dir then db.socket_dir
+    else if db ? socket then db.socket
+      else if db ? hostname then db.hostname
+        else null;
+
+  initDbScript = writeShell {
+    name = "akkoma-initdb";
+    runtimeInputs = with pkgs; [ coreutils replace-secret config.services.postgresql.package ];
+    text = ''
+      pgpass="$(mktemp -t pgpass-XXXXXXXXXX.conf)"
+      setupSql="$(mktemp -t setup-XXXXXXXXXX.psql)"
+      trap 'rm -f "$pgpass $setupSql"' EXIT TERM
+
+      ${optionalString (dbHost != null) ''
+        export PGHOST=${escapeShellArg dbHost}
+      ''}
+      export PGUSER=${escapeShellArg cfg.initDb.username}
+      ${optionalString (pgpass != null) ''
+        cat ${escapeShellArg pgpass} >"$pgpass"
+        replace-secret ${escapeShellArgs [
+          (sha256 cfg.initDb.password._secret) cfg.initDb.password._secret ]} "$pgpass"
+        export PGPASSFILE="$pgpass"
+      ''}
+
+      cat ${escapeShellArg setupSql} >"$setupSql"
+      ${optionalString (db ? password) ''
+        replace-secret ${escapeShellArgs [
+         (sha256 db.password._secret) db.password._secret ]} "$setupSql"
+      ''}
+
+      # Create role if nonâ€existent
+      psql -tAc "SELECT 1 FROM pg_roles
+        WHERE rolname = "${escapeShellArg (escapeSqlStr db.username)} | grep -F -q 1 || \
+        psql -tAc "CREATE ROLE "${escapeShellArg (escapeSqlId db.username)}
+
+      # Create database if nonâ€existent
+      psql -tAc "SELECT 1 FROM pg_database
+        WHERE datname = "${escapeShellArg (escapeSqlStr db.database)} | grep -F -q 1 || \
+        psql -tAc "CREATE DATABASE "${escapeShellArg (escapeSqlId db.database)}"
+          OWNER "${escapeShellArg (escapeSqlId db.username)}"
+          TEMPLATE template0
+          ENCODING 'utf8'
+          LOCALE 'C'"
+
+      psql -f "$setupSql"
+    '';
+  };
+
+  envWrapper = let
+    script = writeShell {
+      name = "akkoma-env";
+      text = ''
+        cd "${cfg.package}"
+
+        RUNTIME_DIRECTORY="''${RUNTIME_DIRECTORY:-/run/akkoma}"
+        AKKOMA_CONFIG_PATH="$RUNTIME_DIRECTORY/config.exs" \
+        ERL_EPMD_ADDRESS="${cfg.dist.address}" \
+        ERL_EPMD_PORT="${toString cfg.dist.epmdPort}" \
+        ERL_FLAGS=${lib.escapeShellArg (lib.escapeShellArgs ([
+          "-kernel" "inet_dist_use_interface" (erlAddr cfg.dist.address)
+          "-kernel" "inet_dist_listen_min" (toString cfg.dist.portMin)
+          "-kernel" "inet_dist_listen_max" (toString cfg.dist.portMax)
+        ] ++ cfg.dist.extraFlags))} \
+        RELEASE_COOKIE="$(<"$RUNTIME_DIRECTORY/cookie")" \
+        RELEASE_NAME="akkoma" \
+          exec "${cfg.package}/bin/$(basename "$0")" "$@"
+      '';
+    };
+  in pkgs.runCommandLocal "akkoma-env" { } ''
+    mkdir -p "$out/bin"
+
+    ln -r -s ${escapeShellArg script} "$out/bin/pleroma"
+    ln -r -s ${escapeShellArg script} "$out/bin/pleroma_ctl"
+  '';
+
+  userWrapper = pkgs.writeShellApplication {
+    name = "pleroma_ctl";
+    text = ''
+      if [ "''${1-}" == "update" ]; then
+        echo "OTP releases are not supported on NixOS." >&2
+        exit 64
+      fi
+
+      exec sudo -u ${escapeShellArg cfg.user} \
+        "${envWrapper}/bin/pleroma_ctl" "$@"
+    '';
+  };
+
+  socketScript = if isAbsolutePath web.http.ip
+    then writeShell {
+      name = "akkoma-socket";
+      runtimeInputs = with pkgs; [ coreutils inotify-tools ];
+      text = ''
+        coproc {
+          inotifywait -q -m -e create ${escapeShellArg (dirOf web.http.ip)}
+        }
+
+        trap 'kill "$COPROC_PID"' EXIT TERM
+
+        until test -S ${escapeShellArg web.http.ip}
+          do read -r -u "''${COPROC[0]}"
+        done
+
+        chmod 0666 ${escapeShellArg web.http.ip}
+      '';
+    }
+    else null;
+
+  staticDir = ex.":pleroma".":instance".static_dir;
+  uploadDir = ex.":pleroma".":instance".upload_dir;
+
+  staticFiles = pkgs.runCommandLocal "akkoma-static" { } ''
+    ${concatStringsSep "\n" (mapAttrsToList (key: val: ''
+      mkdir -p $out/frontends/${escapeShellArg val.name}/
+      ln -s ${escapeShellArg val.package} $out/frontends/${escapeShellArg val.name}/${escapeShellArg val.ref}
+    '') cfg.frontends)}
+
+    ${optionalString (cfg.extraStatic != null)
+      (concatStringsSep "\n" (mapAttrsToList (key: val: ''
+        mkdir -p "$out/$(dirname ${escapeShellArg key})"
+        ln -s ${escapeShellArg val} $out/${escapeShellArg key}
+      '') cfg.extraStatic))}
+  '';
+in {
+  options = {
+    services.akkoma = {
+      enable = mkEnableOption (mdDoc "Akkoma");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.akkoma;
+        defaultText = literalExpression "pkgs.akkoma";
+        description = mdDoc "Akkoma package to use.";
+      };
+
+      user = mkOption {
+        type = types.nonEmptyStr;
+        default = "akkoma";
+        description = mdDoc "User account under which Akkoma runs.";
+      };
+
+      group = mkOption {
+        type = types.nonEmptyStr;
+        default = "akkoma";
+        description = mdDoc "Group account under which Akkoma runs.";
+      };
+
+      initDb = {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = mdDoc ''
+            Whether to automatically initialise the database on startup. This will create a
+            database role and database if they do not already exist, and (re)set the role password
+            and the ownership of the database.
+
+            This setting can be used safely even if the database already exists and contains data.
+
+            The database settings are configured through
+            [{option}`config.services.akkoma.config.":pleroma"."Pleroma.Repo"`](#opt-services.akkoma.config.__pleroma_._Pleroma.Repo_).
+
+            If disabled, the database has to be set up manually:
+
+            ```SQL
+            CREATE ROLE akkoma LOGIN;
+
+            CREATE DATABASE akkoma
+              OWNER akkoma
+              TEMPLATE template0
+              ENCODING 'utf8'
+              LOCALE 'C';
+
+            \connect akkoma
+            CREATE EXTENSION IF NOT EXISTS citext;
+            CREATE EXTENSION IF NOT EXISTS pg_trgm;
+            CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
+            ```
+          '';
+        };
+
+        username = mkOption {
+          type = types.nonEmptyStr;
+          default = config.services.postgresql.superUser;
+          defaultText = literalExpression "config.services.postgresql.superUser";
+          description = mdDoc ''
+            Name of the database user to initialise the database with.
+
+            This user is required to have the `CREATEROLE` and `CREATEDB` capabilities.
+          '';
+        };
+
+        password = mkOption {
+          type = types.nullOr secret;
+          default = null;
+          description = mdDoc ''
+            Password of the database user to initialise the database with.
+
+            If set to `null`, no password will be used.
+
+            The attribute `_secret` should point to a file containing the secret.
+          '';
+        };
+      };
+
+      initSecrets = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Whether to initialise nonâ€existent secrets with random values.
+
+          If enabled, appropriate secrets for the following options will be created automatically
+          if the files referenced in the `_secrets` attribute do not exist during startup.
+
+          - {option}`config.":pleroma"."Pleroma.Web.Endpoint".secret_key_base`
+          - {option}`config.":pleroma"."Pleroma.Web.Endpoint".signing_salt`
+          - {option}`config.":pleroma"."Pleroma.Web.Endpoint".live_view.signing_salt`
+          - {option}`config.":web_push_encryption".":vapid_details".private_key`
+          - {option}`config.":web_push_encryption".":vapid_details".public_key`
+          - {option}`config.":joken".":default_signer"`
+        '';
+      };
+
+      installWrapper = mkOption {
+        type = types.bool;
+        default = true;
+        description = mdDoc ''
+          Whether to install a wrapper around `pleroma_ctl` to simplify administration of the
+          Akkoma instance.
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = with types; listOf package;
+        default = with pkgs; [ exiftool ffmpeg_5-headless graphicsmagick-imagemagick-compat ];
+        defaultText = literalExpression "with pkgs; [ exiftool graphicsmagick-imagemagick-compat ffmpeg_5-headless ]";
+        example = literalExpression "with pkgs; [ exiftool imagemagick ffmpeg_5-full ]";
+        description = mdDoc ''
+          List of extra packages to include in the executable search path of the service unit.
+          These are needed by various configurable components such as:
+
+          - ExifTool for the `Pleroma.Upload.Filter.Exiftool` upload filter,
+          - ImageMagick for still image previews in the media proxy as well as for the
+            `Pleroma.Upload.Filters.Mogrify` upload filter, and
+          - ffmpeg for video previews in the media proxy.
+        '';
+      };
+
+      frontends = mkOption {
+        description = mdDoc "Akkoma frontends.";
+        type = with types; attrsOf (submodule frontend);
+        default = {
+          primary = {
+            package = pkgs.akkoma-frontends.akkoma-fe;
+            name = "akkoma-fe";
+            ref = "stable";
+          };
+          admin = {
+            package = pkgs.akkoma-frontends.admin-fe;
+            name = "admin-fe";
+            ref = "stable";
+          };
+        };
+        defaultText = literalExpression ''
+          {
+            primary = {
+              package = pkgs.akkoma-frontends.akkoma-fe;
+              name = "akkoma-fe";
+              ref = "stable";
+            };
+            admin = {
+              package = pkgs.akkoma-frontends.admin-fe;
+              name = "admin-fe";
+              ref = "stable";
+            };
+          }
+        '';
+      };
+
+      extraStatic = mkOption {
+        type = with types; nullOr (attrsOf package);
+        description = mdDoc ''
+          Attribute set of extra packages to add to the static files directory.
+
+          Do not add frontends here. These should be configured through
+          [{option}`services.akkoma.frontends`](#opt-services.akkoma.frontends).
+        '';
+        default = null;
+        example = literalExpression ''
+          {
+            "emoji/blobs.gg" = pkgs.akkoma-emoji.blobs_gg;
+            "static/terms-of-service.html" = pkgs.writeText "terms-of-service.html" '''
+              …
+            ''';
+            "favicon.png" = let
+              rev = "697a8211b0f427a921e7935a35d14bb3e32d0a2c";
+            in pkgs.stdenvNoCC.mkDerivation {
+              name = "favicon.png";
+
+              src = pkgs.fetchurl {
+                url = "https://raw.githubusercontent.com/TilCreator/NixOwO/''${rev}/NixOwO_plain.svg";
+                hash = "sha256-tWhHMfJ3Od58N9H5yOKPMfM56hYWSOnr/TGCBi8bo9E=";
+              };
+
+              nativeBuildInputs = with pkgs; [ librsvg ];
+
+              dontUnpack = true;
+              installPhase = '''
+                rsvg-convert -o $out -w 96 -h 96 $src
+              ''';
+            };
+          }
+        '';
+      };
+
+      dist = {
+        address = mkOption {
+          type = ipAddress;
+          default = "127.0.0.1";
+          description = mdDoc ''
+            Listen address for Erlang distribution protocol and Port Mapper Daemon (epmd).
+          '';
+        };
+
+        epmdPort = mkOption {
+          type = types.port;
+          default = 4369;
+          description = mdDoc "TCP port to bind Erlang Port Mapper Daemon to.";
+        };
+
+        extraFlags = mkOption {
+          type = with types; listOf str;
+          default = [ ];
+          description = mdDoc "Extra flags to pass to Erlang";
+          example = [ "+sbwt" "none" "+sbwtdcpu" "none" "+sbwtdio" "none" ];
+        };
+
+        portMin = mkOption {
+          type = types.port;
+          default = 49152;
+          description = mdDoc "Lower bound for Erlang distribution protocol TCP port.";
+        };
+
+        portMax = mkOption {
+          type = types.port;
+          default = 65535;
+          description = mdDoc "Upper bound for Erlang distribution protocol TCP port.";
+        };
+
+        cookie = mkOption {
+          type = types.nullOr secret;
+          default = null;
+          example = { _secret = "/var/lib/secrets/akkoma/releaseCookie"; };
+          description = mdDoc ''
+            Erlang release cookie.
+
+            If set to `null`, a temporary random cookie will be generated.
+          '';
+        };
+      };
+
+      config = mkOption {
+        description = mdDoc ''
+          Configuration for Akkoma. The attributes are serialised to Elixir DSL.
+
+          Refer to <https://docs.akkoma.dev/stable/configuration/cheatsheet/> for
+          configuration options.
+
+          Settings containing secret data should be set to an attribute set containing the
+          attribute `_secret` - a string pointing to a file containing the value the option
+          should be set to.
+        '';
+        type = types.submodule {
+          freeformType = format.type;
+          options = {
+            ":pleroma" = {
+              ":instance" = {
+                name = mkOption {
+                  type = types.nonEmptyStr;
+                  description = mdDoc "Instance name.";
+                };
+
+                email = mkOption {
+                  type = types.nonEmptyStr;
+                  description = mdDoc "Instance administrator email.";
+                };
+
+                description = mkOption {
+                  type = types.nonEmptyStr;
+                  description = mdDoc "Instance description.";
+                };
+
+                static_dir = mkOption {
+                  type = types.path;
+                  default = toString staticFiles;
+                  defaultText = literalMD ''
+                    Derivation gathering the following paths into a directory:
+
+                    - [{option}`services.akkoma.frontends`](#opt-services.akkoma.frontends)
+                    - [{option}`services.akkoma.extraStatic`](#opt-services.akkoma.extraStatic)
+                  '';
+                  description = mdDoc ''
+                    Directory of static files.
+
+                    This directory can be built using a derivation, or it can be managed as mutable
+                    state by setting the option to an absolute path.
+                  '';
+                };
+
+                upload_dir = mkOption {
+                  type = absolutePath;
+                  default = "/var/lib/akkoma/uploads";
+                  description = mdDoc ''
+                    Directory where Akkoma will put uploaded files.
+                  '';
+                };
+              };
+
+              "Pleroma.Repo" = mkOption {
+                type = elixirValue;
+                default = {
+                  adapter = format.lib.mkRaw "Ecto.Adapters.Postgres";
+                  socket_dir = "/run/postgresql";
+                  username = cfg.user;
+                  database = "akkoma";
+                };
+                defaultText = literalExpression ''
+                  {
+                    adapter = (pkgs.formats.elixirConf { }).lib.mkRaw "Ecto.Adapters.Postgres";
+                    socket_dir = "/run/postgresql";
+                    username = config.services.akkoma.user;
+                    database = "akkoma";
+                  }
+                '';
+                description = mdDoc ''
+                  Database configuration.
+
+                  Refer to
+                  <https://hexdocs.pm/ecto_sql/Ecto.Adapters.Postgres.html#module-connection-options>
+                  for options.
+                '';
+              };
+
+              "Pleroma.Web.Endpoint" = {
+                url = {
+                  host = mkOption {
+                    type = types.nonEmptyStr;
+                    default = config.networking.fqdn;
+                    defaultText = literalExpression "config.networking.fqdn";
+                    description = mdDoc "Domain name of the instance.";
+                  };
+
+                  scheme = mkOption {
+                    type = types.nonEmptyStr;
+                    default = "https";
+                    description = mdDoc "URL scheme.";
+                  };
+
+                  port = mkOption {
+                    type = types.port;
+                    default = 443;
+                    description = mdDoc "External port number.";
+                  };
+                };
+
+                http = {
+                  ip = mkOption {
+                    type = types.either absolutePath ipAddress;
+                    default = "/run/akkoma/socket";
+                    example = "::1";
+                    description = mdDoc ''
+                      Listener IP address or Unix socket path.
+
+                      The value is automatically converted to Elixir’s internal address
+                      representation during serialisation.
+                    '';
+                  };
+
+                  port = mkOption {
+                    type = types.port;
+                    default = if isAbsolutePath web.http.ip then 0 else 4000;
+                    defaultText = literalExpression ''
+                      if isAbsolutePath config.services.akkoma.config.:pleroma"."Pleroma.Web.Endpoint".http.ip
+                        then 0
+                        else 4000;
+                    '';
+                    description = mdDoc ''
+                      Listener port number.
+
+                      Must be 0 if using a Unix socket.
+                    '';
+                  };
+                };
+
+                secret_key_base = mkOption {
+                  type = secret;
+                  default = { _secret = "/var/lib/secrets/akkoma/key-base"; };
+                  description = mdDoc ''
+                    Secret key used as a base to generate further secrets for encrypting and
+                    signing data.
+
+                    The attribute `_secret` should point to a file containing the secret.
+
+                    This key can generated can be generated as follows:
+
+                    ```ShellSession
+                    $ tr -dc 'A-Za-z-._~' </dev/urandom | head -c 64
+                    ```
+                  '';
+                };
+
+                live_view = {
+                  signing_salt = mkOption {
+                    type = secret;
+                    default = { _secret = "/var/lib/secrets/akkoma/liveview-salt"; };
+                    description = mdDoc ''
+                      LiveView signing salt.
+
+                      The attribute `_secret` should point to a file containing the secret.
+
+                      This salt can be generated as follows:
+
+                      ```ShellSession
+                      $ tr -dc 'A-Za-z0-9-._~' </dev/urandom | head -c 8
+                      ```
+                    '';
+                  };
+                };
+
+                signing_salt = mkOption {
+                  type = secret;
+                  default = { _secret = "/var/lib/secrets/akkoma/signing-salt"; };
+                  description = mdDoc ''
+                    Signing salt.
+
+                    The attribute `_secret` should point to a file containing the secret.
+
+                    This salt can be generated as follows:
+
+                    ```ShellSession
+                    $ tr -dc 'A-Za-z0-9-._~' </dev/urandom | head -c 8
+                    ```
+                  '';
+                };
+              };
+
+              ":frontends" = mkOption {
+                type = elixirValue;
+                default = mapAttrs
+                  (key: val: format.lib.mkMap { name = val.name; ref = val.ref; })
+                  cfg.frontends;
+                defaultText = literalExpression ''
+                  lib.mapAttrs (key: val:
+                    (pkgs.formats.elixirConf { }).lib.mkMap { name = val.name; ref = val.ref; })
+                    config.services.akkoma.frontends;
+                '';
+                description = mdDoc ''
+                  Frontend configuration.
+
+                  Users should rely on the default value and prefer to configure frontends through
+                  [{option}`config.services.akkoma.frontends`](#opt-services.akkoma.frontends).
+                '';
+              };
+            };
+
+            ":web_push_encryption" = mkOption {
+              default = { };
+              description = mdDoc ''
+                Web Push Notifications configuration.
+
+                The necessary key pair can be generated as follows:
+
+                ```ShellSession
+                $ nix-shell -p nodejs --run 'npx web-push generate-vapid-keys'
+                ```
+              '';
+              type = types.submodule {
+                freeformType = elixirValue;
+                options = {
+                  ":vapid_details" = {
+                    subject = mkOption {
+                      type = types.nonEmptyStr;
+                      default = "mailto:${ex.":pleroma".":instance".email}";
+                      defaultText = literalExpression ''
+                        "mailto:''${config.services.akkoma.config.":pleroma".":instance".email}"
+                      '';
+                      description = mdDoc "mailto URI for administrative contact.";
+                    };
+
+                    public_key = mkOption {
+                      type = with types; either nonEmptyStr secret;
+                      default = { _secret = "/var/lib/secrets/akkoma/vapid-public"; };
+                      description = mdDoc "base64-encoded public ECDH key.";
+                    };
+
+                    private_key = mkOption {
+                      type = secret;
+                      default = { _secret = "/var/lib/secrets/akkoma/vapid-private"; };
+                      description = mdDoc ''
+                        base64-encoded private ECDH key.
+
+                        The attribute `_secret` should point to a file containing the secret.
+                      '';
+                    };
+                  };
+                };
+              };
+            };
+
+            ":joken" = {
+              ":default_signer" = mkOption {
+                type = secret;
+                default = { _secret = "/var/lib/secrets/akkoma/jwt-signer"; };
+                description = mdDoc ''
+                  JWT signing secret.
+
+                  The attribute `_secret` should point to a file containing the secret.
+
+                  This secret can be generated as follows:
+
+                  ```ShellSession
+                  $ tr -dc 'A-Za-z0-9-._~' </dev/urandom | head -c 64
+                  ```
+                '';
+              };
+            };
+
+            ":logger" = {
+              ":backends" = mkOption {
+                type = types.listOf elixirValue;
+                visible = false;
+                default = with format.lib; [
+                  (mkTuple [ (mkRaw "ExSyslogger") (mkAtom ":ex_syslogger") ])
+                ];
+              };
+
+              ":ex_syslogger" = {
+                ident = mkOption {
+                  type = types.str;
+                  visible = false;
+                  default = "akkoma";
+                };
+
+                level = mkOption {
+                  type = types.nonEmptyStr;
+                  apply = format.lib.mkAtom;
+                  default = ":info";
+                  example = ":warning";
+                  description = mdDoc ''
+                    Log level.
+
+                    Refer to
+                    <https://hexdocs.pm/logger/Logger.html#module-levels>
+                    for options.
+                  '';
+                };
+              };
+            };
+
+            ":tzdata" = {
+              ":data_dir" = mkOption {
+                type = elixirValue;
+                internal = true;
+                default = format.lib.mkRaw ''
+                  Path.join(System.fetch_env!("CACHE_DIRECTORY"), "tzdata")
+                '';
+              };
+            };
+          };
+        };
+      };
+
+      nginx = mkOption {
+        type = with types; nullOr (submodule
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }));
+        default = null;
+        description = mdDoc ''
+          Extra configuration for the nginx virtual host of Akkoma.
+
+          If set to `null`, no virtual host will be added to the nginx configuration.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = optionals (with config.security; (!sudo.enable) && (!sudo-rs.enable)) [''
+      The pleroma_ctl wrapper enabled by the installWrapper option relies on
+      sudo, which appears to have been disabled through security.sudo.enable.
+    ''];
+
+    users = {
+      users."${cfg.user}" = {
+        description = "Akkoma user";
+        group = cfg.group;
+        isSystemUser = true;
+      };
+      groups."${cfg.group}" = { };
+    };
+
+    # Confinement of the main service unit requires separation of the
+    # configuration generation into a separate unit to permit access to secrets
+    # residing outside of the chroot.
+    systemd.services.akkoma-config = {
+      description = "Akkoma social network configuration";
+      reloadTriggers = [ configFile ] ++ secretPaths;
+
+      unitConfig.PropagatesReloadTo = [ "akkoma.service" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        UMask = "0077";
+
+        RuntimeDirectory = "akkoma";
+
+        ExecStart = mkMerge [
+          (mkIf (cfg.dist.cookie == null) [ genScript ])
+          (mkIf (cfg.dist.cookie != null) [ copyScript ])
+          (mkIf cfg.initSecrets [ initSecretsScript ])
+          [ configScript ]
+        ];
+
+        ExecReload = mkMerge [
+          (mkIf cfg.initSecrets [ initSecretsScript ])
+          [ configScript ]
+        ];
+      };
+    };
+
+    systemd.services.akkoma-initdb = mkIf cfg.initDb.enable {
+      description = "Akkoma social network database setup";
+      requires = [ "akkoma-config.service" ];
+      requiredBy = [ "akkoma.service" ];
+      after = [ "akkoma-config.service" "postgresql.service" ];
+      before = [ "akkoma.service" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = mkIf (db ? socket_dir || db ? socket)
+          cfg.initDb.username;
+        RemainAfterExit = true;
+        UMask = "0077";
+        ExecStart = initDbScript;
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.services.akkoma = let
+      runtimeInputs = with pkgs; [ coreutils gawk gnused ] ++ cfg.extraPackages;
+    in {
+      description = "Akkoma social network";
+      documentation = [ "https://docs.akkoma.dev/stable/" ];
+
+      # This service depends on network-online.target and is sequenced after
+      # it because it requires access to the Internet to function properly.
+      bindsTo = [ "akkoma-config.service" ];
+      wants = [ "network-online.service" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [
+        "akkoma-config.target"
+        "network.target"
+        "network-online.target"
+        "postgresql.service"
+      ];
+
+      confinement.packages = mkIf isConfined runtimeInputs;
+      path = runtimeInputs;
+
+      serviceConfig = {
+        Type = "exec";
+        User = cfg.user;
+        Group = cfg.group;
+        UMask = "0077";
+
+        # The runâ€time directory is preserved as it is managed by the akkoma-config.service unit.
+        RuntimeDirectory = "akkoma";
+        RuntimeDirectoryPreserve = true;
+
+        CacheDirectory = "akkoma";
+
+        BindPaths = [ "${uploadDir}:${uploadDir}:norbind" ];
+        BindReadOnlyPaths = mkMerge [
+          (mkIf (!isStorePath staticDir) [ "${staticDir}:${staticDir}:norbind" ])
+          (mkIf isConfined (mkMerge [
+            [ "/etc/hosts" "/etc/resolv.conf" ]
+            (mkIf (isStorePath staticDir) (map (dir: "${dir}:${dir}:norbind")
+              (splitString "\n" (readFile ((pkgs.closureInfo { rootPaths = staticDir; }) + "/store-paths")))))
+            (mkIf (db ? socket_dir) [ "${db.socket_dir}:${db.socket_dir}:norbind" ])
+            (mkIf (db ? socket) [ "${db.socket}:${db.socket}:norbind" ])
+          ]))
+        ];
+
+        ExecStartPre = "${envWrapper}/bin/pleroma_ctl migrate";
+        ExecStart = "${envWrapper}/bin/pleroma start";
+        ExecStartPost = socketScript;
+        ExecStop = "${envWrapper}/bin/pleroma stop";
+        ExecStopPost = mkIf (isAbsolutePath web.http.ip)
+          "${pkgs.coreutils}/bin/rm -f '${web.http.ip}'";
+
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectSystem = mkIf (!isConfined) "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+
+        CapabilityBoundingSet = mkIf
+          (any (port: port > 0 && port < 1024)
+            [ web.http.port cfg.dist.epmdPort cfg.dist.portMin ])
+          [ "CAP_NET_BIND_SERVICE" ];
+
+        NoNewPrivileges = true;
+        SystemCallFilter = [ "@system-service" "~@privileged" "@chown" ];
+        SystemCallArchitectures = "native";
+
+        DeviceAllow = null;
+        DevicePolicy = "closed";
+
+        # SMTP adapter uses dynamic port 0 binding, which is incompatible with bind address filtering
+        SocketBindAllow = mkIf (!hasSmtp) (mkMerge [
+          [ "tcp:${toString cfg.dist.epmdPort}" "tcp:${toString cfg.dist.portMin}-${toString cfg.dist.portMax}" ]
+          (mkIf (web.http.port != 0) [ "tcp:${toString web.http.port}" ])
+        ]);
+        SocketBindDeny = mkIf (!hasSmtp) "any";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${uploadDir}  0700 ${cfg.user} ${cfg.group} - -"
+      "Z ${uploadDir} ~0700 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    environment.systemPackages = mkIf (cfg.installWrapper) [ userWrapper ];
+
+    services.nginx.virtualHosts = mkIf (cfg.nginx != null) {
+      ${web.url.host} = mkMerge [ cfg.nginx {
+        locations."/" = {
+          proxyPass =
+            if isAbsolutePath web.http.ip
+              then "http://unix:${web.http.ip}"
+              else if hasInfix ":" web.http.ip
+                then "http://[${web.http.ip}]:${toString web.http.port}"
+                else "http://${web.http.ip}:${toString web.http.port}";
+
+          proxyWebsockets = true;
+          recommendedProxySettings = true;
+        };
+      }];
+    };
+  };
+
+  meta.maintainers = with maintainers; [ mvs ];
+  meta.doc = ./akkoma.md;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/alps.nix b/nixpkgs/nixos/modules/services/web-apps/alps.nix
new file mode 100644
index 000000000000..05fb676102df
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/alps.nix
@@ -0,0 +1,132 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.alps;
+in {
+  options.services.alps = {
+    enable = mkEnableOption (lib.mdDoc "alps");
+
+    port = mkOption {
+      type = types.port;
+      default = 1323;
+      description = lib.mdDoc ''
+        TCP port the service should listen on.
+      '';
+    };
+
+    bindIP = mkOption {
+      default = "[::]";
+      type = types.str;
+      description = lib.mdDoc ''
+        The IP the service should listen on.
+      '';
+    };
+
+    theme = mkOption {
+      type = types.enum [ "alps" "sourcehut" ];
+      default = "sourcehut";
+      description = lib.mdDoc ''
+        The frontend's theme to use.
+      '';
+    };
+
+    imaps = {
+      port = mkOption {
+        type = types.port;
+        default = 993;
+        description = lib.mdDoc ''
+          The IMAPS server port.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "[::1]";
+        example = "mail.example.org";
+        description = lib.mdDoc ''
+          The IMAPS server address.
+        '';
+      };
+    };
+
+    smtps = {
+      port = mkOption {
+        type = types.port;
+        default = 465;
+        description = lib.mdDoc ''
+          The SMTPS server port.
+        '';
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = cfg.imaps.host;
+        defaultText = "services.alps.imaps.host";
+        example = "mail.example.org";
+        description = lib.mdDoc ''
+          The SMTPS server address.
+        '';
+      };
+    };
+
+    package = mkOption {
+      internal = true;
+      type = types.package;
+      default = pkgs.alps;
+    };
+
+    args = mkOption {
+      internal = true;
+      type = types.listOf types.str;
+      default = [
+        "-addr" "${cfg.bindIP}:${toString cfg.port}"
+        "-theme" "${cfg.theme}"
+        "imaps://${cfg.imaps.host}:${toString cfg.imaps.port}"
+        "smtps://${cfg.smtps.host}:${toString cfg.smtps.port}"
+      ];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.alps = {
+      description = "alps is a simple and extensible webmail.";
+      documentation = [ "https://git.sr.ht/~migadu/alps" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/alps ${escapeShellArgs cfg.args}";
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SocketBindAllow = cfg.port;
+        SocketBindDeny = "any";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @obsolete" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/anuko-time-tracker.nix b/nixpkgs/nixos/modules/services/web-apps/anuko-time-tracker.nix
new file mode 100644
index 000000000000..f43cbc40ec7a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/anuko-time-tracker.nix
@@ -0,0 +1,388 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.anuko-time-tracker;
+  configFile = let
+    smtpPassword = if cfg.settings.email.smtpPasswordFile == null
+                   then "''"
+                   else "trim(file_get_contents('${cfg.settings.email.smtpPasswordFile}'))";
+
+  in pkgs.writeText "config.php" ''
+    <?php
+    // Set include path for PEAR and its modules, which we include in the distribution.
+    // Updated for the correct location in the nix store.
+    set_include_path('${cfg.package}/WEB-INF/lib/pear' . PATH_SEPARATOR . get_include_path());
+    define('DSN', 'mysqli://${cfg.database.user}@${cfg.database.host}/${cfg.database.name}?charset=utf8mb4');
+    define('MULTIORG_MODE', ${lib.boolToString cfg.settings.multiorgMode});
+    define('EMAIL_REQUIRED', ${lib.boolToString cfg.settings.emailRequired});
+    define('WEEKEND_START_DAY', ${toString cfg.settings.weekendStartDay});
+    define('FORUM_LINK', '${cfg.settings.forumLink}');
+    define('HELP_LINK', '${cfg.settings.helpLink}');
+    define('SENDER', '${cfg.settings.email.sender}');
+    define('MAIL_MODE', '${cfg.settings.email.mode}');
+    define('MAIL_SMTP_HOST', '${toString cfg.settings.email.smtpHost}');
+    define('MAIL_SMTP_PORT', '${toString cfg.settings.email.smtpPort}');
+    define('MAIL_SMTP_USER', '${cfg.settings.email.smtpUser}');
+    define('MAIL_SMTP_PASSWORD', ${smtpPassword});
+    define('MAIL_SMTP_AUTH', ${lib.boolToString cfg.settings.email.smtpAuth});
+    define('MAIL_SMTP_DEBUG', ${lib.boolToString cfg.settings.email.smtpDebug});
+    define('DEFAULT_CSS', 'default.css');
+    define('RTL_CSS', 'rtl.css'); // For right to left languages.
+    define('LANG_DEFAULT', '${cfg.settings.defaultLanguage}');
+    define('CURRENCY_DEFAULT', '${cfg.settings.defaultCurrency}');
+    define('EXPORT_DECIMAL_DURATION', ${lib.boolToString cfg.settings.exportDecimalDuration});
+    define('REPORT_FOOTER', ${lib.boolToString cfg.settings.reportFooter});
+    define('AUTH_MODULE', 'db');
+  '';
+  package = pkgs.stdenv.mkDerivation rec {
+    pname = "anuko-time-tracker";
+    inherit (src) version;
+    src = cfg.package;
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+
+      # Link config file
+      ln -s ${configFile} $out/WEB-INF/config.php
+
+      # Link writable templates_c directory
+      rm -rf $out/WEB-INF/templates_c
+      ln -s ${cfg.dataDir}/templates_c $out/WEB-INF/templates_c
+
+      # Remove unsafe dbinstall.php
+      rm -f $out/dbinstall.php
+    '';
+  };
+in
+{
+  options.services.anuko-time-tracker = {
+    enable = lib.mkEnableOption (lib.mdDoc "Anuko Time Tracker");
+
+    package = lib.mkPackageOptionMD pkgs "anuko-time-tracker" {};
+
+    database = {
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+
+      host = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Database host.";
+        default = "localhost";
+      };
+
+      name = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Database name.";
+        default = "anuko_time_tracker";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Database username.";
+        default = "anuko_time_tracker";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        description = lib.mdDoc "Database user password file.";
+        default = null;
+      };
+    };
+
+    poolConfig = lib.mkOption {
+      type = lib.types.attrsOf (lib.types.oneOf [ lib.types.str lib.types.int lib.types.bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for Anuko Time Tracker's PHP-FPM pool.
+      '';
+    };
+
+    hostname = lib.mkOption {
+      type = lib.types.str;
+      default =
+        if config.networking.domain != null
+        then config.networking.fqdn
+        else config.networking.hostName;
+      defaultText = lib.literalExpression "config.networking.fqdn";
+      example = "anuko.example.com";
+      description = lib.mdDoc ''
+        The hostname to serve Anuko Time Tracker on.
+      '';
+    };
+
+    nginx = lib.mkOption {
+      type = lib.types.submodule (
+        lib.recursiveUpdate
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
+      );
+      default = {};
+      example = lib.literalExpression ''
+        {
+          serverAliases = [
+            "anuko.''${config.networking.domain}"
+          ];
+
+          # To enable encryption and let let's encrypt take care of certificate
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        With this option, you can customize the Nginx virtualHost settings.
+      '';
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/anuko-time-tracker";
+      description = lib.mdDoc "Default data folder for Anuko Time Tracker.";
+      example = "/mnt/anuko-time-tracker";
+    };
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "anuko_time_tracker";
+      description = lib.mdDoc "User under which Anuko Time Tracker runs.";
+    };
+
+    settings = {
+      multiorgMode = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Defines whether users see the Register option in the menu of Time Tracker that allows them
+          to self-register and create new organizations (top groups).
+        '';
+      };
+
+      emailRequired = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Defines whether an email is required for new registrations.";
+      };
+
+      weekendStartDay = lib.mkOption {
+        type = lib.types.int;
+        default = 6;
+        description = lib.mdDoc ''
+          This option defines which days are highlighted with weekend color.
+          6 means Saturday. For Saudi Arabia, etc. set it to 4 for Thursday and Friday to be
+          weekend days.
+        '';
+      };
+
+      forumLink = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Forum link from the main menu.";
+        default = "https://www.anuko.com/forum/viewforum.php?f=4";
+      };
+
+      helpLink = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc "Help link from the main menu.";
+        default = "https://www.anuko.com/time-tracker/user-guide/index.htm";
+      };
+
+      email = {
+        sender = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc "Default sender for mail.";
+          default = "Anuko Time Tracker <bounces@example.com>";
+        };
+
+        mode = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc "Mail sending mode. Can be 'mail' or 'smtp'.";
+          default = "smtp";
+        };
+
+        smtpHost = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc "MTA hostname.";
+          default = "localhost";
+        };
+
+        smtpPort = lib.mkOption {
+          type = lib.types.int;
+          description = lib.mdDoc "MTA port.";
+          default = 25;
+        };
+
+        smtpUser = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc "MTA authentication username.";
+          default = "";
+        };
+
+        smtpAuth = lib.mkOption {
+          type = lib.types.bool;
+          default = false;
+          description = lib.mdDoc "MTA requires authentication.";
+        };
+
+        smtpPasswordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/anuko-time-tracker/secrets/smtp-password";
+          description = lib.mdDoc ''
+            Path to file containing the MTA authentication password.
+          '';
+        };
+
+        smtpDebug = lib.mkOption {
+          type = lib.types.bool;
+          default = false;
+          description = lib.mdDoc "Debug mail sending.";
+        };
+      };
+
+      defaultLanguage = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc ''
+          Defines Anuko Time Tracker default language. It is used on Time Tracker login page.
+          After login, a language set for user group is used.
+          Empty string means the language is defined by user browser.
+        '';
+        default = "";
+        example = "nl";
+      };
+
+      defaultCurrency = lib.mkOption {
+        type = lib.types.str;
+        description = lib.mdDoc ''
+          Defines a default currency symbol for new groups.
+          Use €, £, a more specific dollar like US$, CAD, etc.
+        '';
+        default = "$";
+        example = "€";
+      };
+
+      exportDecimalDuration = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Defines whether time duration values are decimal in CSV and XML data
+          exports (1.25 vs 1:15).
+        '';
+      };
+
+      reportFooter = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Defines whether to use a footer on reports.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = ''
+          <option>services.anuko-time-tracker.database.passwordFile</option> cannot be specified if
+          <option>services.anuko-time-tracker.database.createLocally</option> is set to true.
+        '';
+      }
+      {
+        assertion = cfg.settings.email.smtpAuth -> (cfg.settings.email.smtpPasswordFile != null);
+        message = ''
+          <option>services.anuko-time-tracker.settings.email.smtpPasswordFile</option> needs to be set if
+          <option>services.anuko-time-tracker.settings.email.smtpAuth</option> is enabled.
+        '';
+      }
+    ];
+
+    services.phpfpm = {
+      pools.anuko-time-tracker = {
+        inherit (cfg) user;
+        group = config.services.nginx.group;
+        settings = {
+          "listen.owner" = config.services.nginx.user;
+          "listen.group" = config.services.nginx.group;
+        } // cfg.poolConfig;
+      };
+    };
+
+    services.nginx = {
+      enable = lib.mkDefault true;
+      recommendedTlsSettings = true;
+      recommendedOptimisation = true;
+      recommendedGzipSettings = true;
+      virtualHosts."${cfg.hostname}" = lib.mkMerge [
+        cfg.nginx
+        {
+          root = lib.mkForce "${package}";
+          locations = {
+            "/".index = "index.php";
+            "~ [^/]\\.php(/|$)" = {
+              extraConfig = ''
+                fastcgi_split_path_info ^(.+?\.php)(/.*)$;
+                fastcgi_pass unix:${config.services.phpfpm.pools.anuko-time-tracker.socket};
+              '';
+            };
+          };
+        }
+      ];
+    };
+
+    services.mysql = lib.mkIf cfg.database.createLocally {
+      enable = lib.mkDefault true;
+      package = lib.mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = {
+          "${cfg.database.name}.*" = "ALL PRIVILEGES";
+        };
+      }];
+    };
+
+    systemd = {
+      services = {
+        anuko-time-tracker-setup-database = lib.mkIf cfg.database.createLocally {
+          description = "Set up Anuko Time Tracker database";
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+          wantedBy = [ "phpfpm-anuko-time-tracker.service" ];
+          after = [ "mysql.service" ];
+          script =
+            let
+              mysql = "${config.services.mysql.package}/bin/mysql";
+            in
+            ''
+              if [ ! -f ${cfg.dataDir}/.dbexists ]; then
+                # Load database schema provided with package
+                ${mysql} ${cfg.database.name} < ${cfg.package}/mysql.sql
+
+                touch ${cfg.dataDir}/.dbexists
+              fi
+            '';
+        };
+      };
+      tmpfiles.rules = [
+        "d ${cfg.dataDir} 0750 ${cfg.user} ${config.services.nginx.group} -"
+        "d ${cfg.dataDir}/templates_c 0750 ${cfg.user} ${config.services.nginx.group} -"
+      ];
+    };
+
+    users.users."${cfg.user}" = {
+      isSystemUser = true;
+      group = config.services.nginx.group;
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ michaelshmitty ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/atlassian/confluence.nix b/nixpkgs/nixos/modules/services/web-apps/atlassian/confluence.nix
new file mode 100644
index 000000000000..fe98c1777ea0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/atlassian/confluence.nix
@@ -0,0 +1,228 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.confluence;
+
+  pkg = cfg.package.override (optionalAttrs cfg.sso.enable {
+    enableSSO = cfg.sso.enable;
+  });
+
+  crowdProperties = pkgs.writeText "crowd.properties" ''
+    application.name                        ${cfg.sso.applicationName}
+    application.password                    ${if cfg.sso.applicationPassword != null then cfg.sso.applicationPassword else "@NIXOS_CONFLUENCE_CROWD_SSO_PWD@"}
+    application.login.url                   ${cfg.sso.crowd}/console/
+
+    crowd.server.url                        ${cfg.sso.crowd}/services/
+    crowd.base.url                          ${cfg.sso.crowd}/
+
+    session.isauthenticated                 session.isauthenticated
+    session.tokenkey                        session.tokenkey
+    session.validationinterval              ${toString cfg.sso.validationInterval}
+    session.lastvalidation                  session.lastvalidation
+  '';
+
+in
+
+{
+  options = {
+    services.confluence = {
+      enable = mkEnableOption (lib.mdDoc "Atlassian Confluence service");
+
+      user = mkOption {
+        type = types.str;
+        default = "confluence";
+        description = lib.mdDoc "User which runs confluence.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "confluence";
+        description = lib.mdDoc "Group which runs confluence.";
+      };
+
+      home = mkOption {
+        type = types.str;
+        default = "/var/lib/confluence";
+        description = lib.mdDoc "Home directory of the confluence instance.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "Address to listen on.";
+      };
+
+      listenPort = mkOption {
+        type = types.port;
+        default = 8090;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      catalinaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-Xms1024m" "-Xmx2048m" "-Dconfluence.disable.peopledirectory.all=true" ];
+        description = lib.mdDoc "Java options to pass to catalina/tomcat.";
+      };
+
+      proxy = {
+        enable = mkEnableOption (lib.mdDoc "proxy support");
+
+        name = mkOption {
+          type = types.str;
+          example = "confluence.example.com";
+          description = lib.mdDoc "Virtual hostname at the proxy";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 443;
+          example = 80;
+          description = lib.mdDoc "Port used at the proxy";
+        };
+
+        scheme = mkOption {
+          type = types.str;
+          default = "https";
+          example = "http";
+          description = lib.mdDoc "Protocol used at the proxy.";
+        };
+      };
+
+      sso = {
+        enable = mkEnableOption (lib.mdDoc "SSO with Atlassian Crowd");
+
+        crowd = mkOption {
+          type = types.str;
+          example = "http://localhost:8095/crowd";
+          description = lib.mdDoc "Crowd Base URL without trailing slash";
+        };
+
+        applicationName = mkOption {
+          type = types.str;
+          example = "jira";
+          description = lib.mdDoc "Exact name of this Confluence instance in Crowd";
+        };
+
+        applicationPassword = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc "Application password of this Confluence instance in Crowd";
+        };
+
+        applicationPasswordFile = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc "Path to the application password for Crowd of Confluence.";
+        };
+
+        validationInterval = mkOption {
+          type = types.int;
+          default = 2;
+          example = 0;
+          description = lib.mdDoc ''
+            Set to 0, if you want authentication checks to occur on each
+            request. Otherwise set to the number of minutes between request
+            to validate if the user is logged in or out of the Crowd SSO
+            server. Setting this value to 1 or higher will increase the
+            performance of Crowd's integration.
+          '';
+        };
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-confluence;
+        defaultText = literalExpression "pkgs.atlassian-confluence";
+        description = lib.mdDoc "Atlassian Confluence package to use.";
+      };
+
+      jrePackage = mkOption {
+        type = types.package;
+        default = pkgs.oraclejre8;
+        defaultText = literalExpression "pkgs.oraclejre8";
+        description = lib.mdDoc "Note that Atlassian only support the Oracle JRE (JRASERVER-46152).";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      group = cfg.group;
+    };
+
+    assertions = [
+      { assertion = cfg.sso.enable -> ((cfg.sso.applicationPassword == null) != (cfg.sso.applicationPasswordFile));
+        message = "Please set either applicationPassword or applicationPasswordFile";
+      }
+    ];
+
+    warnings = mkIf (cfg.sso.enable && cfg.sso.applicationPassword != null) [
+      "Using `services.confluence.sso.applicationPassword` is deprecated! Use `applicationPasswordFile` instead!"
+    ];
+
+    users.groups.${cfg.group} = {};
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.home}' - ${cfg.user} - - -"
+      "d /run/confluence - - - - -"
+
+      "L+ /run/confluence/home - - - - ${cfg.home}"
+      "L+ /run/confluence/logs - - - - ${cfg.home}/logs"
+      "L+ /run/confluence/temp - - - - ${cfg.home}/temp"
+      "L+ /run/confluence/work - - - - ${cfg.home}/work"
+      "L+ /run/confluence/server.xml - - - - ${cfg.home}/server.xml"
+    ];
+
+    systemd.services.confluence = {
+      description = "Atlassian Confluence";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+
+      path = [ cfg.jrePackage pkgs.bash ];
+
+      environment = {
+        CONF_USER = cfg.user;
+        JAVA_HOME = "${cfg.jrePackage}";
+        CATALINA_OPTS = concatStringsSep " " cfg.catalinaOptions;
+        JAVA_OPTS = mkIf cfg.sso.enable "-Dcrowd.properties=${cfg.home}/crowd.properties";
+      };
+
+      preStart = ''
+        mkdir -p ${cfg.home}/{logs,work,temp,deploy}
+
+        sed -e 's,port="8090",port="${toString cfg.listenPort}" address="${cfg.listenAddress}",' \
+        '' + (lib.optionalString cfg.proxy.enable ''
+          -e 's,protocol="org.apache.coyote.http11.Http11NioProtocol",protocol="org.apache.coyote.http11.Http11NioProtocol" proxyName="${cfg.proxy.name}" proxyPort="${toString cfg.proxy.port}" scheme="${cfg.proxy.scheme}",' \
+        '') + ''
+          ${pkg}/conf/server.xml.dist > ${cfg.home}/server.xml
+
+        ${optionalString cfg.sso.enable ''
+          install -m660 ${crowdProperties} ${cfg.home}/crowd.properties
+          ${optionalString (cfg.sso.applicationPasswordFile != null) ''
+            ${pkgs.replace-secret}/bin/replace-secret \
+              '@NIXOS_CONFLUENCE_CROWD_SSO_PWD@' \
+              ${cfg.sso.applicationPasswordFile} \
+              ${cfg.home}/crowd.properties
+          ''}
+        ''}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        PrivateTmp = true;
+        Restart = "on-failure";
+        RestartSec = "10";
+        ExecStart = "${pkg}/bin/start-confluence.sh -fg";
+        ExecStop = "${pkg}/bin/stop-confluence.sh";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/atlassian/crowd.nix b/nixpkgs/nixos/modules/services/web-apps/atlassian/crowd.nix
new file mode 100644
index 000000000000..c8d1eaef31d8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/atlassian/crowd.nix
@@ -0,0 +1,197 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.crowd;
+
+  pkg = cfg.package.override {
+    home = cfg.home;
+    port = cfg.listenPort;
+    openidPassword = cfg.openidPassword;
+  } // (optionalAttrs cfg.proxy.enable {
+    proxyUrl = "${cfg.proxy.scheme}://${cfg.proxy.name}:${toString cfg.proxy.port}";
+  });
+
+  crowdPropertiesFile = pkgs.writeText "crowd.properties" ''
+    application.name                        crowd-openid-server
+    application.password @NIXOS_CROWD_OPENID_PW@
+    application.base.url                    http://localhost:${toString cfg.listenPort}/openidserver
+    application.login.url                   http://localhost:${toString cfg.listenPort}/openidserver
+    application.login.url.template          http://localhost:${toString cfg.listenPort}/openidserver?returnToUrl=''${RETURN_TO_URL}
+
+    crowd.server.url                        http://localhost:${toString cfg.listenPort}/crowd/services/
+
+    session.isauthenticated                 session.isauthenticated
+    session.tokenkey                        session.tokenkey
+    session.validationinterval              0
+    session.lastvalidation                  session.lastvalidation
+  '';
+
+in
+
+{
+  options = {
+    services.crowd = {
+      enable = mkEnableOption (lib.mdDoc "Atlassian Crowd service");
+
+      user = mkOption {
+        type = types.str;
+        default = "crowd";
+        description = lib.mdDoc "User which runs Crowd.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "crowd";
+        description = lib.mdDoc "Group which runs Crowd.";
+      };
+
+      home = mkOption {
+        type = types.str;
+        default = "/var/lib/crowd";
+        description = lib.mdDoc "Home directory of the Crowd instance.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "Address to listen on.";
+      };
+
+      listenPort = mkOption {
+        type = types.port;
+        default = 8092;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      openidPassword = mkOption {
+        type = types.str;
+        default = "WILL_NEVER_BE_SET";
+        description = lib.mdDoc "Application password for OpenID server.";
+      };
+
+      openidPasswordFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Path to the file containing the application password for OpenID server.";
+      };
+
+      catalinaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-Xms1024m" "-Xmx2048m" ];
+        description = lib.mdDoc "Java options to pass to catalina/tomcat.";
+      };
+
+      proxy = {
+        enable = mkEnableOption (lib.mdDoc "reverse proxy support");
+
+        name = mkOption {
+          type = types.str;
+          example = "crowd.example.com";
+          description = lib.mdDoc "Virtual hostname at the proxy";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 443;
+          example = 80;
+          description = lib.mdDoc "Port used at the proxy";
+        };
+
+        scheme = mkOption {
+          type = types.str;
+          default = "https";
+          example = "http";
+          description = lib.mdDoc "Protocol used at the proxy.";
+        };
+
+        secure = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether the connections to the proxy should be considered secure.";
+        };
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-crowd;
+        defaultText = literalExpression "pkgs.atlassian-crowd";
+        description = lib.mdDoc "Atlassian Crowd package to use.";
+      };
+
+      jrePackage = mkOption {
+        type = types.package;
+        default = pkgs.oraclejre8;
+        defaultText = literalExpression "pkgs.oraclejre8";
+        description = lib.mdDoc "Note that Atlassian only support the Oracle JRE (JRASERVER-46152).";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      group = cfg.group;
+    };
+
+    users.groups.${cfg.group} = {};
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.home}' - ${cfg.user} ${cfg.group} - -"
+      "d /run/atlassian-crowd - - - - -"
+
+      "L+ /run/atlassian-crowd/database - - - - ${cfg.home}/database"
+      "L+ /run/atlassian-crowd/logs - - - - ${cfg.home}/logs"
+      "L+ /run/atlassian-crowd/work - - - - ${cfg.home}/work"
+      "L+ /run/atlassian-crowd/server.xml - - - - ${cfg.home}/server.xml"
+    ];
+
+    systemd.services.atlassian-crowd = {
+      description = "Atlassian Crowd";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+
+      path = [ cfg.jrePackage ];
+
+      environment = {
+        JAVA_HOME = "${cfg.jrePackage}";
+        CATALINA_OPTS = concatStringsSep " " cfg.catalinaOptions;
+        CATALINA_TMPDIR = "/tmp";
+        JAVA_OPTS = mkIf (cfg.openidPasswordFile != null) "-Dcrowd.properties=${cfg.home}/crowd.properties";
+      };
+
+      preStart = ''
+        rm -rf ${cfg.home}/work
+        mkdir -p ${cfg.home}/{logs,database,work}
+
+        sed -e 's,port="8095",port="${toString cfg.listenPort}" address="${cfg.listenAddress}",' \
+        '' + (lib.optionalString cfg.proxy.enable ''
+          -e 's,compression="on",compression="off" protocol="HTTP/1.1" proxyName="${cfg.proxy.name}" proxyPort="${toString cfg.proxy.port}" scheme="${cfg.proxy.scheme}" secure="${boolToString cfg.proxy.secure}",' \
+        '') + ''
+          ${pkg}/apache-tomcat/conf/server.xml.dist > ${cfg.home}/server.xml
+
+        ${optionalString (cfg.openidPasswordFile != null) ''
+          install -m660 ${crowdPropertiesFile} ${cfg.home}/crowd.properties
+          ${pkgs.replace-secret}/bin/replace-secret \
+            '@NIXOS_CROWD_OPENID_PW@' \
+            ${cfg.openidPasswordFile} \
+            ${cfg.home}/crowd.properties
+        ''}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        PrivateTmp = true;
+        Restart = "on-failure";
+        RestartSec = "10";
+        ExecStart = "${pkg}/start_crowd.sh -fg";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/atlassian/jira.nix b/nixpkgs/nixos/modules/services/web-apps/atlassian/jira.nix
new file mode 100644
index 000000000000..4cc858216944
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/atlassian/jira.nix
@@ -0,0 +1,223 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.jira;
+
+  pkg = cfg.package.override (optionalAttrs cfg.sso.enable {
+    enableSSO = cfg.sso.enable;
+  });
+
+  crowdProperties = pkgs.writeText "crowd.properties" ''
+    application.name                        ${cfg.sso.applicationName}
+    application.password                    @NIXOS_JIRA_CROWD_SSO_PWD@
+    application.login.url                   ${cfg.sso.crowd}/console/
+
+    crowd.server.url                        ${cfg.sso.crowd}/services/
+    crowd.base.url                          ${cfg.sso.crowd}/
+
+    session.isauthenticated                 session.isauthenticated
+    session.tokenkey                        session.tokenkey
+    session.validationinterval              ${toString cfg.sso.validationInterval}
+    session.lastvalidation                  session.lastvalidation
+  '';
+
+in
+
+{
+  options = {
+    services.jira = {
+      enable = mkEnableOption (lib.mdDoc "Atlassian JIRA service");
+
+      user = mkOption {
+        type = types.str;
+        default = "jira";
+        description = lib.mdDoc "User which runs JIRA.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "jira";
+        description = lib.mdDoc "Group which runs JIRA.";
+      };
+
+      home = mkOption {
+        type = types.str;
+        default = "/var/lib/jira";
+        description = lib.mdDoc "Home directory of the JIRA instance.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "Address to listen on.";
+      };
+
+      listenPort = mkOption {
+        type = types.port;
+        default = 8091;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      catalinaOptions = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-Xms1024m" "-Xmx2048m" ];
+        description = lib.mdDoc "Java options to pass to catalina/tomcat.";
+      };
+
+      proxy = {
+        enable = mkEnableOption (lib.mdDoc "reverse proxy support");
+
+        name = mkOption {
+          type = types.str;
+          example = "jira.example.com";
+          description = lib.mdDoc "Virtual hostname at the proxy";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 443;
+          example = 80;
+          description = lib.mdDoc "Port used at the proxy";
+        };
+
+        scheme = mkOption {
+          type = types.str;
+          default = "https";
+          example = "http";
+          description = lib.mdDoc "Protocol used at the proxy.";
+        };
+
+        secure = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether the connections to the proxy should be considered secure.";
+        };
+      };
+
+      sso = {
+        enable = mkEnableOption (lib.mdDoc "SSO with Atlassian Crowd");
+
+        crowd = mkOption {
+          type = types.str;
+          example = "http://localhost:8095/crowd";
+          description = lib.mdDoc "Crowd Base URL without trailing slash";
+        };
+
+        applicationName = mkOption {
+          type = types.str;
+          example = "jira";
+          description = lib.mdDoc "Exact name of this JIRA instance in Crowd";
+        };
+
+        applicationPasswordFile = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Path to the file containing the application password of this JIRA instance in Crowd";
+        };
+
+        validationInterval = mkOption {
+          type = types.int;
+          default = 2;
+          example = 0;
+          description = lib.mdDoc ''
+            Set to 0, if you want authentication checks to occur on each
+            request. Otherwise set to the number of minutes between request
+            to validate if the user is logged in or out of the Crowd SSO
+            server. Setting this value to 1 or higher will increase the
+            performance of Crowd's integration.
+          '';
+        };
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.atlassian-jira;
+        defaultText = literalExpression "pkgs.atlassian-jira";
+        description = lib.mdDoc "Atlassian JIRA package to use.";
+      };
+
+      jrePackage = mkOption {
+        type = types.package;
+        default = pkgs.oraclejre8;
+        defaultText = literalExpression "pkgs.oraclejre8";
+        description = lib.mdDoc "Note that Atlassian only support the Oracle JRE (JRASERVER-46152).";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      group = cfg.group;
+      home = cfg.home;
+    };
+
+    users.groups.${cfg.group} = {};
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.home}' - ${cfg.user} - - -"
+      "d /run/atlassian-jira - - - - -"
+
+      "L+ /run/atlassian-jira/home - - - - ${cfg.home}"
+      "L+ /run/atlassian-jira/logs - - - - ${cfg.home}/logs"
+      "L+ /run/atlassian-jira/work - - - - ${cfg.home}/work"
+      "L+ /run/atlassian-jira/temp - - - - ${cfg.home}/temp"
+      "L+ /run/atlassian-jira/server.xml - - - - ${cfg.home}/server.xml"
+    ];
+
+    systemd.services.atlassian-jira = {
+      description = "Atlassian JIRA";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+
+      path = [ cfg.jrePackage pkgs.bash ];
+
+      environment = {
+        JIRA_USER = cfg.user;
+        JIRA_HOME = cfg.home;
+        JAVA_HOME = "${cfg.jrePackage}";
+        CATALINA_OPTS = concatStringsSep " " cfg.catalinaOptions;
+        JAVA_OPTS = mkIf cfg.sso.enable "-Dcrowd.properties=${cfg.home}/crowd.properties";
+      };
+
+      preStart = ''
+        mkdir -p ${cfg.home}/{logs,work,temp,deploy}
+
+        sed -e 's,port="8080",port="${toString cfg.listenPort}" address="${cfg.listenAddress}",' \
+        '' + (lib.optionalString cfg.proxy.enable ''
+          -e 's,protocol="HTTP/1.1",protocol="HTTP/1.1" proxyName="${cfg.proxy.name}" proxyPort="${toString cfg.proxy.port}" scheme="${cfg.proxy.scheme}" secure="${toString cfg.proxy.secure}",' \
+        '') + ''
+          ${pkg}/conf/server.xml.dist > ${cfg.home}/server.xml
+
+        ${optionalString cfg.sso.enable ''
+          install -m660 ${crowdProperties} ${cfg.home}/crowd.properties
+          ${pkgs.replace-secret}/bin/replace-secret \
+            '@NIXOS_JIRA_CROWD_SSO_PWD@' \
+            ${cfg.sso.applicationPasswordFile} \
+            ${cfg.home}/crowd.properties
+        ''}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        PrivateTmp = true;
+        Restart = "on-failure";
+        RestartSec = "10";
+        ExecStart = "${pkg}/bin/start-jira.sh -fg";
+        ExecStop = "${pkg}/bin/stop-jira.sh";
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "jira" "sso" "applicationPassword" ] ''
+      Use `applicationPasswordFile` instead!
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/audiobookshelf.nix b/nixpkgs/nixos/modules/services/web-apps/audiobookshelf.nix
new file mode 100644
index 000000000000..84dffc5f9d3c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/audiobookshelf.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.audiobookshelf;
+in
+{
+  options = {
+    services.audiobookshelf = {
+      enable = mkEnableOption "Audiobookshelf, self-hosted audiobook and podcast server.";
+
+      package = mkPackageOption pkgs "audiobookshelf" { };
+
+      dataDir = mkOption {
+        description = "Path to Audiobookshelf config and metadata inside of /var/lib.";
+        default = "audiobookshelf";
+        type = types.str;
+      };
+
+      host = mkOption {
+        description = "The host Audiobookshelf binds to.";
+        default = "127.0.0.1";
+        example = "0.0.0.0";
+        type = types.str;
+      };
+
+      port = mkOption {
+        description = "The TCP port Audiobookshelf will listen on.";
+        default = 8000;
+        type = types.port;
+      };
+
+      user = mkOption {
+        description = "User account under which Audiobookshelf runs.";
+        default = "audiobookshelf";
+        type = types.str;
+      };
+
+      group = mkOption {
+        description = "Group under which Audiobookshelf runs.";
+        default = "audiobookshelf";
+        type = types.str;
+      };
+
+      openFirewall = mkOption {
+        description = "Open ports in the firewall for the Audiobookshelf web interface.";
+        default = false;
+        type = types.bool;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.audiobookshelf = {
+      description = "Audiobookshelf is a self-hosted audiobook and podcast server";
+
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = cfg.dataDir;
+        WorkingDirectory = "/var/lib/${cfg.dataDir}";
+        ExecStart = "${cfg.package}/bin/audiobookshelf --host ${cfg.host} --port ${toString cfg.port}";
+        Restart = "on-failure";
+      };
+    };
+
+    users.users = mkIf (cfg.user == "audiobookshelf") {
+      audiobookshelf = {
+        isSystemUser = true;
+        group = cfg.group;
+        home = "/var/lib/${cfg.dataDir}";
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "audiobookshelf") {
+      audiobookshelf = { };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+  };
+
+  meta.maintainers = with maintainers; [ wietsedv ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/bookstack.nix b/nixpkgs/nixos/modules/services/web-apps/bookstack.nix
new file mode 100644
index 000000000000..d846c98577c8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/bookstack.nix
@@ -0,0 +1,446 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.bookstack;
+  bookstack = pkgs.bookstack.override {
+    dataDir = cfg.dataDir;
+  };
+  db = cfg.database;
+  mail = cfg.mail;
+
+  user = cfg.user;
+  group = cfg.group;
+
+  # shell script for local administration
+  artisan = pkgs.writeScriptBin "bookstack" ''
+    #! ${pkgs.runtimeShell}
+    cd ${bookstack}
+    sudo=exec
+    if [[ "$USER" != ${user} ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${user}'
+    fi
+    $sudo ${pkgs.php}/bin/php artisan $*
+  '';
+
+  tlsEnabled = cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME;
+
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "bookstack" "extraConfig" ] "Use services.bookstack.config instead.")
+    (mkRemovedOptionModule [ "services" "bookstack" "cacheDir" ] "The cache directory is now handled automatically.")
+  ];
+
+  options.services.bookstack = {
+
+    enable = mkEnableOption (lib.mdDoc "BookStack");
+
+    user = mkOption {
+      default = "bookstack";
+      description = lib.mdDoc "User bookstack runs as.";
+      type = types.str;
+    };
+
+    group = mkOption {
+      default = "bookstack";
+      description = lib.mdDoc "Group bookstack runs as.";
+      type = types.str;
+    };
+
+    appKeyFile = mkOption {
+      description = lib.mdDoc ''
+        A file containing the Laravel APP_KEY - a 32 character long,
+        base64 encoded key used for encryption where needed. Can be
+        generated with `head -c 32 /dev/urandom | base64`.
+      '';
+      example = "/run/keys/bookstack-appkey";
+      type = types.path;
+    };
+
+    hostname = lib.mkOption {
+      type = lib.types.str;
+      default = config.networking.fqdnOrHostName;
+      defaultText = lib.literalExpression "config.networking.fqdnOrHostName";
+      example = "bookstack.example.com";
+      description = lib.mdDoc ''
+        The hostname to serve BookStack on.
+      '';
+    };
+
+    appURL = mkOption {
+      description = lib.mdDoc ''
+        The root URL that you want to host BookStack on. All URLs in BookStack will be generated using this value.
+        If you change this in the future you may need to run a command to update stored URLs in the database. Command example: `php artisan bookstack:update-url https://old.example.com https://new.example.com`
+      '';
+      default = "http${lib.optionalString tlsEnabled "s"}://${cfg.hostname}";
+      defaultText = ''http''${lib.optionalString tlsEnabled "s"}://''${cfg.hostname}'';
+      example = "https://example.com";
+      type = types.str;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "BookStack data directory";
+      default = "/var/lib/bookstack";
+      type = types.path;
+    };
+
+    database = {
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = lib.mdDoc "Database host port.";
+      };
+      name = mkOption {
+        type = types.str;
+        default = "bookstack";
+        description = lib.mdDoc "Database name.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = user;
+        defaultText = literalExpression "user";
+        description = lib.mdDoc "Database username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/bookstack-dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`database.user`.
+        '';
+      };
+      createLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+    };
+
+    mail = {
+      driver = mkOption {
+        type = types.enum [ "smtp" "sendmail" ];
+        default = "smtp";
+        description = lib.mdDoc "Mail driver to use.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Mail host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 1025;
+        description = lib.mdDoc "Mail host port.";
+      };
+      fromName = mkOption {
+        type = types.str;
+        default = "BookStack";
+        description = lib.mdDoc "Mail \"from\" name.";
+      };
+      from = mkOption {
+        type = types.str;
+        default = "mail@bookstackapp.com";
+        description = lib.mdDoc "Mail \"from\" email.";
+      };
+      user = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "bookstack";
+        description = lib.mdDoc "Mail username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/bookstack-mailpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`mail.user`.
+        '';
+      };
+      encryption = mkOption {
+        type = with types; nullOr (enum [ "tls" ]);
+        default = null;
+        description = lib.mdDoc "SMTP encryption mechanism to use.";
+      };
+    };
+
+    maxUploadSize = mkOption {
+      type = types.str;
+      default = "18M";
+      example = "1G";
+      description = lib.mdDoc "The maximum size for uploads (e.g. images).";
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the bookstack PHP pool. See the documentation on `php-fpm.conf`
+        for details on configuration directives.
+      '';
+    };
+
+    nginx = mkOption {
+      type = types.submodule (
+        recursiveUpdate
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
+      );
+      default = {};
+      example = literalExpression ''
+        {
+          serverAliases = [
+            "bookstack.''${config.networking.domain}"
+          ];
+          # To enable encryption and let let's encrypt take care of certificate
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        With this option, you can customize the nginx virtualHost settings.
+      '';
+    };
+
+    config = mkOption {
+      type = with types;
+        attrsOf
+          (nullOr
+            (either
+              (oneOf [
+                bool
+                int
+                port
+                path
+                str
+              ])
+              (submodule {
+                options = {
+                  _secret = mkOption {
+                    type = nullOr str;
+                    description = lib.mdDoc ''
+                      The path to a file containing the value the
+                      option should be set to in the final
+                      configuration file.
+                    '';
+                  };
+                };
+              })));
+      default = {};
+      example = literalExpression ''
+        {
+          ALLOWED_IFRAME_HOSTS = "https://example.com";
+          WKHTMLTOPDF = "/home/user/bins/wkhtmltopdf";
+          AUTH_METHOD = "oidc";
+          OIDC_NAME = "MyLogin";
+          OIDC_DISPLAY_NAME_CLAIMS = "name";
+          OIDC_CLIENT_ID = "bookstack";
+          OIDC_CLIENT_SECRET = {_secret = "/run/keys/oidc_secret"};
+          OIDC_ISSUER = "https://keycloak.example.com/auth/realms/My%20Realm";
+          OIDC_ISSUER_DISCOVER = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        BookStack configuration options to set in the
+        {file}`.env` file.
+
+        Refer to <https://www.bookstackapp.com/docs/>
+        for details on supported values.
+
+        Settings containing secret data should be set to an attribute
+        set containing the attribute `_secret` - a
+        string pointing to a file containing the value the option
+        should be set to. See the example to get a better picture of
+        this: in the resulting {file}`.env` file, the
+        `OIDC_CLIENT_SECRET` key will be set to the
+        contents of the {file}`/run/keys/oidc_secret`
+        file.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = db.createLocally -> db.user == user;
+        message = "services.bookstack.database.user must be set to ${user} if services.bookstack.database.createLocally is set true.";
+      }
+      { assertion = db.createLocally -> db.passwordFile == null;
+        message = "services.bookstack.database.passwordFile cannot be specified if services.bookstack.database.createLocally is set to true.";
+      }
+    ];
+
+    services.bookstack.config = {
+      APP_KEY._secret = cfg.appKeyFile;
+      APP_URL = cfg.appURL;
+      DB_HOST = db.host;
+      DB_PORT = db.port;
+      DB_DATABASE = db.name;
+      DB_USERNAME = db.user;
+      MAIL_DRIVER = mail.driver;
+      MAIL_FROM_NAME = mail.fromName;
+      MAIL_FROM = mail.from;
+      MAIL_HOST = mail.host;
+      MAIL_PORT = mail.port;
+      MAIL_USERNAME = mail.user;
+      MAIL_ENCRYPTION = mail.encryption;
+      DB_PASSWORD._secret = db.passwordFile;
+      MAIL_PASSWORD._secret = mail.passwordFile;
+      APP_SERVICES_CACHE = "/run/bookstack/cache/services.php";
+      APP_PACKAGES_CACHE = "/run/bookstack/cache/packages.php";
+      APP_CONFIG_CACHE = "/run/bookstack/cache/config.php";
+      APP_ROUTES_CACHE = "/run/bookstack/cache/routes-v7.php";
+      APP_EVENTS_CACHE = "/run/bookstack/cache/events.php";
+      SESSION_SECURE_COOKIE = tlsEnabled;
+    };
+
+    environment.systemPackages = [ artisan ];
+
+    services.mysql = mkIf db.createLocally {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ db.name ];
+      ensureUsers = [
+        { name = db.user;
+          ensurePermissions = { "${db.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.phpfpm.pools.bookstack = {
+      inherit user;
+      inherit group;
+      phpOptions = ''
+        log_errors = on
+        post_max_size = ${cfg.maxUploadSize}
+        upload_max_filesize = ${cfg.maxUploadSize}
+      '';
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = user;
+        "listen.group" = group;
+      } // cfg.poolConfig;
+    };
+
+    services.nginx = {
+      enable = mkDefault true;
+      recommendedTlsSettings = true;
+      recommendedOptimisation = true;
+      recommendedGzipSettings = true;
+      virtualHosts.${cfg.hostname} = mkMerge [ cfg.nginx {
+        root = mkForce "${bookstack}/public";
+        locations = {
+          "/" = {
+            index = "index.php";
+            tryFiles = "$uri $uri/ /index.php?$query_string";
+          };
+          "~ \.php$".extraConfig = ''
+            fastcgi_pass unix:${config.services.phpfpm.pools."bookstack".socket};
+          '';
+          "~ \.(js|css|gif|png|ico|jpg|jpeg)$" = {
+            extraConfig = "expires 365d;";
+          };
+        };
+      }];
+    };
+
+    systemd.services.bookstack-setup = {
+      description = "Preparation tasks for BookStack";
+      before = [ "phpfpm-bookstack.service" ];
+      after = optional db.createLocally "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = user;
+        WorkingDirectory = "${bookstack}";
+        RuntimeDirectory = "bookstack/cache";
+        RuntimeDirectoryMode = "0700";
+      };
+      path = [ pkgs.replace-secret ];
+      script =
+        let
+          isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+          bookstackEnvVars = lib.generators.toKeyValue {
+            mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
+              mkValueString = v: with builtins;
+                if isInt         v then toString v
+                else if isString v then v
+                else if true  == v then "true"
+                else if false == v then "false"
+                else if isSecret v then hashString "sha256" v._secret
+                else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+            };
+          };
+          secretPaths = lib.mapAttrsToList (_: v: v._secret) (lib.filterAttrs (_: isSecret) cfg.config);
+          mkSecretReplacement = file: ''
+            replace-secret ${escapeShellArgs [ (builtins.hashString "sha256" file) file "${cfg.dataDir}/.env" ]}
+          '';
+          secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+          filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ {} null ])) cfg.config;
+          bookstackEnv = pkgs.writeText "bookstack.env" (bookstackEnvVars filteredConfig);
+        in ''
+        # error handling
+        set -euo pipefail
+
+        # set permissions
+        umask 077
+
+        # create .env file
+        install -T -m 0600 -o ${user} ${bookstackEnv} "${cfg.dataDir}/.env"
+        ${secretReplacements}
+        if ! grep 'APP_KEY=base64:' "${cfg.dataDir}/.env" >/dev/null; then
+            sed -i 's/APP_KEY=/APP_KEY=base64:/' "${cfg.dataDir}/.env"
+        fi
+
+        # migrate db
+        ${pkgs.php}/bin/php artisan migrate --force
+      '';
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.dataDir}                            0710 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public                     0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads             0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage                    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/app                0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/fonts              0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework          0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/cache    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/sessions 0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/views    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/logs               0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/uploads            0700 ${user} ${group} - -"
+    ];
+
+    users = {
+      users = mkIf (user == "bookstack") {
+        bookstack = {
+          inherit group;
+          isSystemUser = true;
+        };
+        "${config.services.nginx.user}".extraGroups = [ group ];
+      };
+      groups = mkIf (group == "bookstack") {
+        bookstack = {};
+      };
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ ymarkus ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
new file mode 100644
index 000000000000..236953bd4ff7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.md
@@ -0,0 +1,42 @@
+# c2FmZQ {#module-services-c2fmzq}
+
+c2FmZQ is an application that can securely encrypt, store, and share files,
+including but not limited to pictures and videos.
+
+The service `c2fmzq-server` can be enabled by setting
+```
+{
+  services.c2fmzq-server.enable = true;
+}
+```
+This will spin up an instance of the server which is API-compatible with
+[Stingle Photos](https://stingle.org) and an experimental Progressive Web App
+(PWA) to interact with the storage via the browser.
+
+In principle the server can be exposed directly on a public interface and there
+are command line options to manage HTTPS certificates directly, but the module
+is designed to be served behind a reverse proxy or only accessed via localhost.
+
+```
+{
+  services.c2fmzq-server = {
+    enable = true;
+    bindIP = "127.0.0.1"; # default
+    port = 8080; # default
+  };
+
+  services.nginx = {
+    enable = true;
+    recommendedProxySettings = true;
+    virtualHosts."example.com" = {
+      enableACME = true;
+      forceSSL = true;
+      locations."/" = {
+        proxyPass = "http://127.0.0.1:8080";
+      };
+    };
+  };
+}
+```
+
+For more information, see <https://github.com/c2FmZQ/c2FmZQ/>.
diff --git a/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.nix b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.nix
new file mode 100644
index 000000000000..2749c2a5a87a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/c2fmzq-server.nix
@@ -0,0 +1,125 @@
+{ lib, pkgs, config, ... }:
+
+let
+  inherit (lib) mkEnableOption mkPackageOption mkOption types;
+
+  cfg = config.services.c2fmzq-server;
+
+  argsFormat = {
+    type = with lib.types; nullOr (oneOf [ bool int str ]);
+    generate = lib.cli.toGNUCommandLineShell { };
+  };
+in {
+  options.services.c2fmzq-server = {
+    enable = mkEnableOption "c2fmzq-server";
+
+    bindIP = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = "The local address to use.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = "The local port to use.";
+    };
+
+    passphraseFile = mkOption {
+      type = types.str;
+      example = "/run/secrets/c2fmzq/pwfile";
+      description = "Path to file containing the database passphrase";
+    };
+
+    package = mkPackageOption pkgs "c2fmzq" { };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = argsFormat.type;
+
+        options = {
+          address = mkOption {
+            internal = true;
+            type = types.str;
+            default = "${cfg.bindIP}:${toString cfg.port}";
+          };
+
+          database = mkOption {
+            type = types.str;
+            default = "%S/c2fmzq-server/data";
+            description = "Path of the database";
+          };
+
+          verbose = mkOption {
+            type = types.ints.between 1 3;
+            default = 2;
+            description = "The level of logging verbosity: 1:Error 2:Info 3:Debug";
+          };
+        };
+      };
+      description = ''
+        Configuration for c2FmZQ-server passed as CLI arguments.
+        Run {command}`c2FmZQ-server help` for supported values.
+      '';
+      example = {
+        verbose = 3;
+        allow-new-accounts = true;
+        auto-approve-new-accounts = true;
+        encrypt-metadata = true;
+        enable-webapp = true;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.c2fmzq-server = {
+      description = "c2FmZQ-server";
+      documentation = [ "https://github.com/c2FmZQ/c2FmZQ/blob/main/README.md" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package} ${argsFormat.generate cfg.settings}";
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        DynamicUser = true;
+        Environment = "C2FMZQ_PASSPHRASE_FILE=%d/passphrase-file";
+        IPAccounting = true;
+        IPAddressAllow = cfg.bindIP;
+        IPAddressDeny = "any";
+        LoadCredential = "passphrase-file:${cfg.passphraseFile}";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SocketBindAllow = cfg.port;
+        SocketBindDeny = "any";
+        StateDirectory = "c2fmzq-server";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @obsolete" ];
+      };
+    };
+  };
+
+  meta = {
+    doc = ./c2fmzq-server.md;
+    maintainers = with lib.maintainers; [ hmenke ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/calibre-web.nix b/nixpkgs/nixos/modules/services/web-apps/calibre-web.nix
new file mode 100644
index 000000000000..80567db10c97
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/calibre-web.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.calibre-web;
+
+  inherit (lib) concatStringsSep mkEnableOption mkIf mkOption optional optionalString types;
+in
+{
+  options = {
+    services.calibre-web = {
+      enable = mkEnableOption (lib.mdDoc "Calibre-Web");
+
+      package = lib.mkPackageOption pkgs "calibre-web" { };
+
+      listen = {
+        ip = mkOption {
+          type = types.str;
+          default = "::1";
+          description = lib.mdDoc ''
+            IP address that Calibre-Web should listen on.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 8083;
+          description = lib.mdDoc ''
+            Listen port for Calibre-Web.
+          '';
+        };
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "calibre-web";
+        description = lib.mdDoc ''
+          The directory below {file}`/var/lib` where Calibre-Web stores its data.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "calibre-web";
+        description = lib.mdDoc "User account under which Calibre-Web runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "calibre-web";
+        description = lib.mdDoc "Group account under which Calibre-Web runs.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Open ports in the firewall for the server.
+        '';
+      };
+
+      options = {
+        calibreLibrary = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          description = lib.mdDoc ''
+            Path to Calibre library.
+          '';
+        };
+
+        enableBookConversion = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Configure path to the Calibre's ebook-convert in the DB.
+          '';
+        };
+
+        enableKepubify = mkEnableOption (lib.mdDoc "kebup conversion support");
+
+        enableBookUploading = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Allow books to be uploaded via Calibre-Web UI.
+          '';
+        };
+
+        reverseProxyAuth = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Enable authorization using auth proxy.
+            '';
+          };
+
+          header = mkOption {
+            type = types.str;
+            default = "";
+            description = lib.mdDoc ''
+              Auth proxy header name.
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.calibre-web = let
+      appDb = "/var/lib/${cfg.dataDir}/app.db";
+      gdriveDb = "/var/lib/${cfg.dataDir}/gdrive.db";
+      calibreWebCmd = "${cfg.package}/bin/calibre-web -p ${appDb} -g ${gdriveDb}";
+
+      settings = concatStringsSep ", " (
+        [
+          "config_port = ${toString cfg.listen.port}"
+          "config_uploading = ${if cfg.options.enableBookUploading then "1" else "0"}"
+          "config_allow_reverse_proxy_header_login = ${if cfg.options.reverseProxyAuth.enable then "1" else "0"}"
+          "config_reverse_proxy_login_header_name = '${cfg.options.reverseProxyAuth.header}'"
+        ]
+        ++ optional (cfg.options.calibreLibrary != null) "config_calibre_dir = '${cfg.options.calibreLibrary}'"
+        ++ optional cfg.options.enableBookConversion "config_converterpath = '${pkgs.calibre}/bin/ebook-convert'"
+        ++ optional cfg.options.enableKepubify "config_kepubifypath = '${pkgs.kepubify}/bin/kepubify'"
+      );
+    in
+      {
+        description = "Web app for browsing, reading and downloading eBooks stored in a Calibre database";
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+
+        serviceConfig = {
+          Type = "simple";
+          User = cfg.user;
+          Group = cfg.group;
+
+          StateDirectory = cfg.dataDir;
+          ExecStartPre = pkgs.writeShellScript "calibre-web-pre-start" (
+            ''
+              __RUN_MIGRATIONS_AND_EXIT=1 ${calibreWebCmd}
+
+              ${pkgs.sqlite}/bin/sqlite3 ${appDb} "update settings set ${settings}"
+            '' + optionalString (cfg.options.calibreLibrary != null) ''
+              test -f "${cfg.options.calibreLibrary}/metadata.db" || { echo "Invalid Calibre library"; exit 1; }
+            ''
+          );
+
+          ExecStart = "${calibreWebCmd} -i ${cfg.listen.ip}";
+          Restart = "on-failure";
+        };
+      };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.listen.port ];
+    };
+
+    users.users = mkIf (cfg.user == "calibre-web") {
+      calibre-web = {
+        isSystemUser = true;
+        group = cfg.group;
+      };
+    };
+
+    users.groups = mkIf (cfg.group == "calibre-web") {
+      calibre-web = {};
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ pborzenkov ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/changedetection-io.nix b/nixpkgs/nixos/modules/services/web-apps/changedetection-io.nix
new file mode 100644
index 000000000000..bbf4c2aed186
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/changedetection-io.nix
@@ -0,0 +1,220 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.changedetection-io;
+in
+{
+  options.services.changedetection-io = {
+    enable = mkEnableOption (lib.mdDoc "changedetection-io");
+
+    user = mkOption {
+      default = "changedetection-io";
+      type = types.str;
+      description = lib.mdDoc ''
+        User account under which changedetection-io runs.
+      '';
+    };
+
+    group = mkOption {
+      default = "changedetection-io";
+      type = types.str;
+      description = lib.mdDoc ''
+        Group account under which changedetection-io runs.
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Address the server will listen on.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5000;
+      description = lib.mdDoc "Port the server will listen on.";
+    };
+
+    datastorePath = mkOption {
+      type = types.str;
+      default = "/var/lib/changedetection-io";
+      description = lib.mdDoc ''
+        The directory used to store all data for changedetection-io.
+      '';
+    };
+
+    baseURL = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "https://changedetection-io.example";
+      description = lib.mdDoc ''
+        The base url used in notifications and `{base_url}` token.
+      '';
+    };
+
+    behindProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable this option when changedetection-io runs behind a reverse proxy, so that it trusts X-* headers.
+        It is recommend to run changedetection-io behind a TLS reverse proxy.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/run/secrets/changedetection-io.env";
+      description = lib.mdDoc ''
+        Securely pass environment variabels to changedetection-io.
+
+        This can be used to set for example a frontend password reproducible via `SALTED_PASS`
+        which convinetly also deactivates nags about the hosted version.
+        `SALTED_PASS` should be 64 characters long while the first 32 are the salt and the second the frontend password.
+        It can easily be retrieved from the settings file when first set via the frontend with the following command:
+        ``jq -r .settings.application.password /var/lib/changedetection-io/url-watches.json``
+      '';
+    };
+
+    webDriverSupport = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable support for fetching web pages using WebDriver and Chromium.
+        This starts a headless chromium controlled by puppeteer in an oci container.
+
+        ::: {.note}
+        Playwright can currently leak memory.
+        See https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher#playwright-memory-leak
+        :::
+      '';
+    };
+
+    playwrightSupport = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable support for fetching web pages using playwright and Chromium.
+        This starts a headless Chromium controlled by puppeteer in an oci container.
+
+        ::: {.note}
+        Playwright can currently leak memory.
+        See https://github.com/dgtlmoon/changedetection.io/wiki/Playwright-content-fetcher#playwright-memory-leak
+        :::
+      '';
+    };
+
+    chromePort = mkOption {
+      type = types.port;
+      default = 4444;
+      description = lib.mdDoc ''
+        A free port on which webDriverSupport or playwrightSupport listen on localhost.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = !((cfg.webDriverSupport == true) && (cfg.playwrightSupport == true));
+        message = "'services.changedetection-io.webDriverSupport' and 'services.changedetection-io.playwrightSupport' cannot be used together.";
+      }
+    ];
+
+    systemd = let
+      defaultStateDir = cfg.datastorePath == "/var/lib/changedetection-io";
+    in {
+      services.changedetection-io = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" ];
+        preStart = ''
+          mkdir -p ${cfg.datastorePath}
+        '';
+        serviceConfig = {
+          User = cfg.user;
+          Group = cfg.group;
+          StateDirectory = mkIf defaultStateDir "changedetection-io";
+          StateDirectoryMode = mkIf defaultStateDir "0750";
+          WorkingDirectory = cfg.datastorePath;
+          Environment = [ "HIDE_REFERER=true" ]
+            ++ lib.optional (cfg.baseURL != null) "BASE_URL=${cfg.baseURL}"
+            ++ lib.optional cfg.behindProxy "USE_X_SETTINGS=1"
+            ++ lib.optional cfg.webDriverSupport "WEBDRIVER_URL=http://127.0.0.1:${toString cfg.chromePort}/wd/hub"
+            ++ lib.optional cfg.playwrightSupport "PLAYWRIGHT_DRIVER_URL=ws://127.0.0.1:${toString cfg.chromePort}/?stealth=1&--disable-web-security=true";
+          EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+          ExecStart = ''
+            ${pkgs.changedetection-io}/bin/changedetection.py \
+              -h ${cfg.listenAddress} -p ${toString cfg.port} -d ${cfg.datastorePath}
+          '';
+          ProtectHome = true;
+          ProtectSystem = true;
+          Restart = "on-failure";
+        };
+      };
+      tmpfiles.rules = mkIf defaultStateDir [
+        "d ${cfg.datastorePath} 0750 ${cfg.user} ${cfg.group} - -"
+      ];
+    };
+
+    users = {
+      users = optionalAttrs (cfg.user == "changedetection-io") {
+        "changedetection-io" = {
+          isSystemUser = true;
+          group = "changedetection-io";
+        };
+      };
+
+      groups = optionalAttrs (cfg.group == "changedetection-io") {
+        "changedetection-io" = { };
+      };
+    };
+
+    virtualisation = {
+      oci-containers.containers = lib.mkMerge [
+        (mkIf cfg.webDriverSupport {
+          changedetection-io-webdriver = {
+            image = "selenium/standalone-chrome";
+            environment = {
+              VNC_NO_PASSWORD = "1";
+              SCREEN_WIDTH = "1920";
+              SCREEN_HEIGHT = "1080";
+              SCREEN_DEPTH = "24";
+            };
+            ports = [
+              "127.0.0.1:${toString cfg.chromePort}:4444"
+            ];
+            volumes = [
+              "/dev/shm:/dev/shm"
+            ];
+            extraOptions = [ "--network=bridge" ];
+          };
+        })
+
+        (mkIf cfg.playwrightSupport {
+          changedetection-io-playwright = {
+            image = "browserless/chrome";
+            environment = {
+              SCREEN_WIDTH = "1920";
+              SCREEN_HEIGHT = "1024";
+              SCREEN_DEPTH = "16";
+              ENABLE_DEBUGGER = "false";
+              PREBOOT_CHROME = "true";
+              CONNECTION_TIMEOUT = "300000";
+              MAX_CONCURRENT_SESSIONS = "10";
+              CHROME_REFRESH_TIME = "600000";
+              DEFAULT_BLOCK_ADS = "true";
+              DEFAULT_STEALTH = "true";
+            };
+            ports = [
+              "127.0.0.1:${toString cfg.chromePort}:3000"
+            ];
+            extraOptions = [ "--network=bridge" ];
+          };
+        })
+      ];
+      podman.defaultNetwork.settings.dns_enabled = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/chatgpt-retrieval-plugin.nix b/nixpkgs/nixos/modules/services/web-apps/chatgpt-retrieval-plugin.nix
new file mode 100644
index 000000000000..f29d095bc10b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/chatgpt-retrieval-plugin.nix
@@ -0,0 +1,106 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.chatgpt-retrieval-plugin;
+in
+{
+  options.services.chatgpt-retrieval-plugin = {
+    enable = mkEnableOption (lib.mdDoc "chatgpt-retrieval-plugin service");
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc "Port the chatgpt-retrieval-plugin service listens on.";
+    };
+
+    host = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      example = "0.0.0.0";
+      description = lib.mdDoc "The hostname or IP address for chatgpt-retrieval-plugin to bind to.";
+    };
+
+    bearerTokenPath = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to the secret bearer token used for the http api authentication.
+      '';
+      default = "";
+      example = "config.age.secrets.CHATGPT_RETRIEVAL_PLUGIN_BEARER_TOKEN.path";
+    };
+
+    openaiApiKeyPath = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to the secret openai api key used for embeddings.
+      '';
+      default = "";
+      example = "config.age.secrets.CHATGPT_RETRIEVAL_PLUGIN_OPENAI_API_KEY.path";
+    };
+
+    datastore = mkOption {
+      type = types.enum [ "pinecone" "weaviate" "zilliz" "milvus" "qdrant" "redis" ];
+      default = "qdrant";
+      description = lib.mdDoc "This specifies the vector database provider you want to use to store and query embeddings.";
+    };
+
+    qdrantCollection = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        name of the qdrant collection used to store documents.
+      '';
+      default = "document_chunks";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.bearerTokenPath != "";
+        message = "services.chatgpt-retrieval-plugin.bearerTokenPath should not be an empty string.";
+      }
+      {
+        assertion = cfg.openaiApiKeyPath != "";
+        message = "services.chatgpt-retrieval-plugin.openaiApiKeyPath should not be an empty string.";
+      }
+    ];
+
+    systemd.services.chatgpt-retrieval-plugin = {
+      description = "ChatGPT Retrieval Plugin";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        DynamicUser = true;
+        Restart = "always";
+        LoadCredential = [
+          "BEARER_TOKEN:${cfg.bearerTokenPath}"
+          "OPENAI_API_KEY:${cfg.openaiApiKeyPath}"
+        ];
+        StateDirectory = "chatgpt-retrieval-plugin";
+        StateDirectoryMode = "0755";
+      };
+
+      # it doesn't make sense to pass secrets as env vars, this is a hack until
+      # upstream has proper secret management.
+      script = ''
+        export BEARER_TOKEN=$(${pkgs.systemd}/bin/systemd-creds cat BEARER_TOKEN)
+        export OPENAI_API_KEY=$(${pkgs.systemd}/bin/systemd-creds cat OPENAI_API_KEY)
+        exec ${pkgs.chatgpt-retrieval-plugin}/bin/start --host ${cfg.host} --port ${toString cfg.port}
+      '';
+
+      environment = {
+        DATASTORE = cfg.datastore;
+        QDRANT_COLLECTION = mkIf (cfg.datastore == "qdrant") cfg.qdrantCollection;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      # create the directory for static files for fastapi
+      "C /var/lib/chatgpt-retrieval-plugin/.well-known - - - - ${pkgs.chatgpt-retrieval-plugin}/${pkgs.python3Packages.python.sitePackages}/.well-known"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix b/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix
new file mode 100644
index 000000000000..5519d6967a12
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/cloudlog.nix
@@ -0,0 +1,503 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.cloudlog;
+  dbFile = let
+    password = if cfg.database.createLocally
+               then "''"
+               else "trim(file_get_contents('${cfg.database.passwordFile}'))";
+  in pkgs.writeText "database.php" ''
+    <?php
+    defined('BASEPATH') OR exit('No direct script access allowed');
+    $active_group = 'default';
+    $query_builder = TRUE;
+    $db['default'] = array(
+      'dsn' => "",
+      'hostname' => '${cfg.database.host}',
+      'username' => '${cfg.database.user}',
+      'password' => ${password},
+      'database' => '${cfg.database.name}',
+      'dbdriver' => 'mysqli',
+      'dbprefix' => "",
+      'pconnect' => TRUE,
+      'db_debug' => (ENVIRONMENT !== 'production'),
+      'cache_on' => FALSE,
+      'cachedir' => "",
+      'char_set' => 'utf8mb4',
+      'dbcollat' => 'utf8mb4_general_ci',
+      'swap_pre' => "",
+      'encrypt' => FALSE,
+      'compress' => FALSE,
+      'stricton' => FALSE,
+      'failover' => array(),
+      'save_queries' => TRUE
+    );
+  '';
+  configFile = pkgs.writeText "config.php" ''
+    <?php
+    include('${pkgs.cloudlog}/install/config/config.php');
+    $config['datadir'] = "${cfg.dataDir}/";
+    $config['base_url'] = "${cfg.baseUrl}";
+    ${cfg.extraConfig}
+  '';
+  package = pkgs.stdenv.mkDerivation rec {
+    pname = "cloudlog";
+    version = src.version;
+    src = pkgs.cloudlog;
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+
+      ln -s ${configFile} $out/application/config/config.php
+      ln -s ${dbFile} $out/application/config/database.php
+
+      # link writable directories
+      for directory in updates uploads backup logbook; do
+        rm -rf $out/$directory
+        ln -s ${cfg.dataDir}/$directory $out/$directory
+      done
+
+      # link writable asset files
+      for asset in dok sota wwff; do
+        rm -rf $out/assets/json/$asset.txt
+        ln -s ${cfg.dataDir}/assets/json/$asset.txt $out/assets/json/$asset.txt
+      done
+    '';
+  };
+in
+{
+  options.services.cloudlog = with types; {
+    enable = mkEnableOption (mdDoc "Cloudlog");
+    dataDir = mkOption {
+      type = str;
+      default = "/var/lib/cloudlog";
+      description = mdDoc "Cloudlog data directory.";
+    };
+    baseUrl = mkOption {
+      type = str;
+      default = "http://localhost";
+      description = mdDoc "Cloudlog base URL";
+    };
+    user = mkOption {
+      type = str;
+      default = "cloudlog";
+      description = mdDoc "User account under which Cloudlog runs.";
+    };
+    database = {
+      createLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+      host = mkOption {
+        type = str;
+        description = mdDoc "MySQL database host";
+        default = "localhost";
+      };
+      name = mkOption {
+        type = str;
+        description = mdDoc "MySQL database name.";
+        default = "cloudlog";
+      };
+      user = mkOption {
+        type = str;
+        description = mdDoc "MySQL user name.";
+        default = "cloudlog";
+      };
+      passwordFile = mkOption {
+        type = nullOr str;
+        description = mdDoc "MySQL user password file.";
+        default = null;
+      };
+    };
+    poolConfig = mkOption {
+      type = attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = mdDoc ''
+        Options for Cloudlog's PHP-FPM pool.
+      '';
+    };
+    virtualHost = mkOption {
+      type = nullOr str;
+      default = "localhost";
+      description = mdDoc ''
+        Name of the nginx virtualhost to use and setup. If null, do not setup
+         any virtualhost.
+      '';
+    };
+    extraConfig = mkOption {
+      description = mdDoc ''
+       Any additional text to be appended to the config.php
+       configuration file. This is a PHP script. For configuration
+       settings, see <https://github.com/magicbug/Cloudlog/wiki/Cloudlog.php-Configuration-File>.
+      '';
+      default = "";
+      type = str;
+      example = ''
+        $config['show_time'] = TRUE;
+      '';
+    };
+    upload-lotw = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically upload logs to LoTW. If enabled, a systemd
+          timer will run the log upload task as specified by the interval
+           option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "daily";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the
+          time at which the LoTW upload will occur.
+        '';
+      };
+    };
+    upload-clublog = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically upload logs to Clublog. If enabled, a systemd
+          timer will run the log upload task as specified by the interval option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "daily";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the time
+          at which the Clublog upload will occur.
+        '';
+      };
+    };
+    update-lotw-users = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically update the list of LoTW users. If enabled, a
+          systemd timer will run the update task as specified by the interval
+          option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "weekly";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the
+          time at which the LoTW user update will occur.
+        '';
+      };
+    };
+    update-dok = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically update the DOK resource file. If enabled, a
+          systemd timer will run the update task as specified by the interval option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "monthly";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the
+          time at which the DOK update will occur.
+        '';
+      };
+    };
+    update-clublog-scp = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically update the Clublog SCP database. If enabled,
+          a systemd timer will run the update task as specified by the interval
+          option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "monthly";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the time
+          at which the Clublog SCP update will occur.
+        '';
+      };
+    };
+    update-wwff = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically update the WWFF database. If enabled, a
+          systemd timer will run the update task as specified by the interval
+          option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "monthly";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the time
+          at which the WWFF update will occur.
+        '';
+      };
+    };
+    upload-qrz = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically upload logs to QRZ. If enabled, a systemd
+          timer will run the update task as specified by the interval option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "daily";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the
+          time at which the QRZ upload will occur.
+        '';
+      };
+    };
+    update-sota = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = mdDoc ''
+          Whether to periodically update the SOTA database. If enabled, a
+          systemd timer will run the update task as specified by the interval option.
+        '';
+      };
+      interval = mkOption {
+        type = str;
+        default = "monthly";
+        description = mdDoc ''
+          Specification (in the format described by systemd.time(7)) of the time
+          at which the SOTA update will occur.
+        '';
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "services.cloudlog.database.passwordFile cannot be specified if services.cloudlog.database.createLocally is set to true.";
+      }
+    ];
+
+    services.phpfpm = {
+      pools.cloudlog = {
+        inherit (cfg) user;
+        group = config.services.nginx.group;
+        settings =  {
+          "listen.owner" = config.services.nginx.user;
+          "listen.group" = config.services.nginx.group;
+        } // cfg.poolConfig;
+      };
+    };
+
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      virtualHosts = {
+        "${cfg.virtualHost}" = {
+          root = "${package}";
+          locations."/".tryFiles = "$uri /index.php$is_args$args";
+          locations."~ ^/index.php(/|$)".extraConfig = ''
+              include ${config.services.nginx.package}/conf/fastcgi_params;
+              include ${pkgs.nginx}/conf/fastcgi.conf;
+              fastcgi_split_path_info ^(.+\.php)(.+)$;
+              fastcgi_pass unix:${config.services.phpfpm.pools.cloudlog.socket};
+              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+            '';
+        };
+      };
+    };
+
+    services.mysql = mkIf cfg.database.createLocally {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = {
+          "${cfg.database.name}.*" = "ALL PRIVILEGES";
+        };
+      }];
+    };
+
+    systemd = {
+      services = {
+        cloudlog-setup-database = mkIf cfg.database.createLocally {
+          description = "Set up cloudlog database";
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+          wantedBy = [ "phpfpm-cloudlog.service" ];
+          after = [ "mysql.service" ];
+          script = let
+            mysql = "${config.services.mysql.package}/bin/mysql";
+          in ''
+            if [ ! -f ${cfg.dataDir}/.dbexists ]; then
+              ${mysql} ${cfg.database.name} < ${pkgs.cloudlog}/install/assets/install.sql
+              touch ${cfg.dataDir}/.dbexists
+            fi
+        '';
+        };
+        cloudlog-upload-lotw = {
+          description = "Upload QSOs to LoTW if certs have been provided";
+          enable = cfg.upload-lotw.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/lotw/lotw_upload";
+        };
+        cloudlog-update-lotw-users = {
+          description = "Update LOTW Users Database";
+          enable = cfg.update-lotw-users.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/lotw/load_users";
+        };
+        cloudlog-update-dok = {
+          description = "Update DOK File for autocomplete";
+          enable = cfg.update-dok.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/update/update_dok";
+        };
+        cloudlog-update-clublog-scp = {
+          description = "Update Clublog SCP Database File";
+          enable = cfg.update-clublog-scp.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/update/update_clublog_scp";
+        };
+        cloudlog-update-wwff = {
+          description = "Update WWFF File for autocomplete";
+          enable = cfg.update-wwff.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/update/update_wwff";
+        };
+        cloudlog-upload-qrz = {
+          description = "Upload QSOs to QRZ Logbook";
+          enable = cfg.upload-qrz.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/qrz/upload";
+        };
+        cloudlog-update-sota = {
+          description = "Update SOTA File for autocomplete";
+          enable = cfg.update-sota.enable;
+          script = "${pkgs.curl}/bin/curl -s ${cfg.baseUrl}/update/update_sota";
+        };
+      };
+      timers = {
+        cloudlog-upload-lotw = {
+          enable = cfg.upload-lotw.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-upload-lotw.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.upload-lotw.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-upload-clublog = {
+          enable = cfg.upload-clublog.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-upload-clublog.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.upload-clublog.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-update-lotw-users = {
+          enable = cfg.update-lotw-users.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-update-lotw-users.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.update-lotw-users.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-update-dok = {
+          enable = cfg.update-dok.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-update-dok.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.update-dok.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-update-clublog-scp = {
+          enable = cfg.update-clublog-scp.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-update-clublog-scp.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.update-clublog-scp.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-update-wwff =  {
+          enable = cfg.update-wwff.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-update-wwff.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.update-wwff.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-upload-qrz = {
+          enable = cfg.upload-qrz.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-upload-qrz.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.upload-qrz.interval;
+            Persistent = true;
+          };
+        };
+        cloudlog-update-sota = {
+          enable = cfg.update-sota.enable;
+          wantedBy = [ "timers.target" ];
+          partOf = [ "cloudlog-update-sota.service" ];
+          after = [ "phpfpm-cloudlog.service" ];
+          timerConfig = {
+            OnCalendar = cfg.update-sota.interval;
+            Persistent = true;
+          };
+        };
+      };
+      tmpfiles.rules = let
+        group = config.services.nginx.group;
+      in [
+        "d ${cfg.dataDir}                0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/updates        0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/uploads        0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/backup         0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/logbook        0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/assets/json    0750 ${cfg.user} ${group} - -"
+        "d ${cfg.dataDir}/assets/qslcard 0750 ${cfg.user} ${group} - -"
+      ];
+    };
+
+    users.users."${cfg.user}" = {
+      isSystemUser = true;
+      group = config.services.nginx.group;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ melling ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/coder.nix b/nixpkgs/nixos/modules/services/web-apps/coder.nix
new file mode 100644
index 000000000000..f65211308c40
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/coder.nix
@@ -0,0 +1,215 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.coder;
+  name = "coder";
+in {
+  options = {
+    services.coder = {
+      enable = mkEnableOption (lib.mdDoc "Coder service");
+
+      user = mkOption {
+        type = types.str;
+        default = "coder";
+        description = lib.mdDoc ''
+          User under which the coder service runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise it needs to be configured manually.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "coder";
+        description = lib.mdDoc ''
+          Group under which the coder service runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise it needs to be configured manually.
+          :::
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.coder;
+        description = lib.mdDoc ''
+          Package to use for the service.
+        '';
+        defaultText = literalExpression "pkgs.coder";
+      };
+
+      homeDir = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Home directory for coder user.
+        '';
+        default = "/var/lib/coder";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Listen address.
+        '';
+        default = "127.0.0.1:3000";
+      };
+
+      accessUrl = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Access URL should be a external IP address or domain with DNS records pointing to Coder.
+        '';
+        default = null;
+        example = "https://coder.example.com";
+      };
+
+      wildcardAccessUrl = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          If you are providing TLS certificates directly to the Coder server, you must use a single certificate for the root and wildcard domains.
+        '';
+        default = null;
+        example = "*.coder.example.com";
+      };
+
+      database = {
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Create the database and database user locally.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "/run/postgresql";
+          description = lib.mdDoc ''
+            Hostname hosting the database.
+          '';
+        };
+
+        database = mkOption {
+          type = types.str;
+          default = "coder";
+          description = lib.mdDoc ''
+            Name of database.
+          '';
+        };
+
+        username = mkOption {
+          type = types.str;
+          default = "coder";
+          description = lib.mdDoc ''
+            Username for accessing the database.
+          '';
+        };
+
+        password = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Password for accessing the database.
+          '';
+        };
+
+        sslmode = mkOption {
+          type = types.nullOr types.str;
+          default = "disable";
+          description = lib.mdDoc ''
+            Password for accessing the database.
+          '';
+        };
+      };
+
+      tlsCert = mkOption {
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          The path to the TLS certificate.
+        '';
+        default = null;
+      };
+
+      tlsKey = mkOption {
+        type = types.nullOr types.path;
+        description = lib.mdDoc ''
+          The path to the TLS key.
+        '';
+        default = null;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.username == name && cfg.database.database == cfg.database.username;
+        message = "services.coder.database.username must be set to ${name} if services.coder.database.createLocally is set true";
+      }
+    ];
+
+    systemd.services.coder = {
+      description = "Coder - Self-hosted developer workspaces on your infra";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        CODER_ACCESS_URL = cfg.accessUrl;
+        CODER_WILDCARD_ACCESS_URL = cfg.wildcardAccessUrl;
+        CODER_PG_CONNECTION_URL = "user=${cfg.database.username} ${optionalString (cfg.database.password != null) "password=${cfg.database.password}"} database=${cfg.database.database} host=${cfg.database.host} ${optionalString (cfg.database.sslmode != null) "sslmode=${cfg.database.sslmode}"}";
+        CODER_ADDRESS = cfg.listenAddress;
+        CODER_TLS_ENABLE = optionalString (cfg.tlsCert != null) "1";
+        CODER_TLS_CERT_FILE = cfg.tlsCert;
+        CODER_TLS_KEY_FILE = cfg.tlsKey;
+      };
+
+      serviceConfig = {
+        ProtectSystem = "full";
+        PrivateTmp = "yes";
+        PrivateDevices = "yes";
+        SecureBits = "keep-caps";
+        AmbientCapabilities = "CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
+        CacheDirectory = "coder";
+        CapabilityBoundingSet = "CAP_SYSLOG CAP_IPC_LOCK CAP_NET_BIND_SERVICE";
+        KillSignal = "SIGINT";
+        KillMode = "mixed";
+        NoNewPrivileges = "yes";
+        Restart = "on-failure";
+        ExecStart = "${cfg.package}/bin/coder server";
+        User = cfg.user;
+        Group = cfg.group;
+      };
+    };
+
+    services.postgresql = lib.mkIf cfg.database.createLocally {
+      enable = true;
+      ensureDatabases = [
+        cfg.database.database
+      ];
+      ensureUsers = [{
+        name = cfg.user;
+        ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    users.groups = optionalAttrs (cfg.group == name) {
+      "${cfg.group}" = {};
+    };
+    users.users = optionalAttrs (cfg.user == name) {
+      ${name} = {
+        description = "Coder service user";
+        group = cfg.group;
+        home = cfg.homeDir;
+        createHome = true;
+        isSystemUser = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/convos.nix b/nixpkgs/nixos/modules/services/web-apps/convos.nix
new file mode 100644
index 000000000000..cd9f9d885d69
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/convos.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.convos;
+in
+{
+  options.services.convos = {
+    enable = mkEnableOption (lib.mdDoc "Convos");
+    listenPort = mkOption {
+      type = types.port;
+      default = 3000;
+      example = 8080;
+      description = lib.mdDoc "Port the web interface should listen on";
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "*";
+      example = "127.0.0.1";
+      description = lib.mdDoc "Address or host the web interface should listen on";
+    };
+    reverseProxy = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enables reverse proxy support. This will allow Convos to automatically
+        pick up the `X-Forwarded-For` and
+        `X-Request-Base` HTTP headers set in your reverse proxy
+        web server. Note that enabling this option without a reverse proxy in
+        front will be a security issue.
+      '';
+    };
+  };
+  config = mkIf cfg.enable {
+    systemd.services.convos = {
+      description = "Convos Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      environment = {
+        CONVOS_HOME = "%S/convos";
+        CONVOS_REVERSE_PROXY = if cfg.reverseProxy then "1" else "0";
+        MOJO_LISTEN = "http://${toString cfg.listenAddress}:${toString cfg.listenPort}";
+      };
+      serviceConfig = {
+        ExecStart = "${pkgs.convos}/bin/convos daemon";
+        Restart = "on-failure";
+        StateDirectory = "convos";
+        WorkingDirectory = "%S/convos";
+        DynamicUser = true;
+        MemoryDenyWriteExecute = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectHostname = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+        LockPersonality = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6"];
+        SystemCallFilter = "@system-service";
+        SystemCallArchitectures = "native";
+        CapabilityBoundingSet = "";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/dex.nix b/nixpkgs/nixos/modules/services/web-apps/dex.nix
new file mode 100644
index 000000000000..0c4a71c6dfe4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/dex.nix
@@ -0,0 +1,132 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.dex;
+  fixClient = client: if client ? secretFile then ((builtins.removeAttrs client [ "secretFile" ]) // { secret = client.secretFile; }) else client;
+  filteredSettings = mapAttrs (n: v: if n == "staticClients" then (builtins.map fixClient v) else v) cfg.settings;
+  secretFiles = flatten (builtins.map (c: optional (c ? secretFile) c.secretFile) (cfg.settings.staticClients or []));
+
+  settingsFormat = pkgs.formats.yaml {};
+  configFile = settingsFormat.generate "config.yaml" filteredSettings;
+
+  startPreScript = pkgs.writeShellScript "dex-start-pre"
+    (concatStringsSep "\n" (map (file: ''
+      replace-secret '${file}' '${file}' /run/dex/config.yaml
+    '')
+    secretFiles));
+in
+{
+  options.services.dex = {
+    enable = mkEnableOption (lib.mdDoc "the OpenID Connect and OAuth2 identity provider");
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Environment file (see `systemd.exec(5)`
+        "EnvironmentFile=" section for the syntax) to define variables for dex.
+        This option can be used to safely include secret keys into the dex configuration.
+      '';
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      example = literalExpression ''
+        {
+          # External url
+          issuer = "http://127.0.0.1:5556/dex";
+          storage = {
+            type = "postgres";
+            config.host = "/var/run/postgres";
+          };
+          web = {
+            http = "127.0.0.1:5556";
+          };
+          enablePasswordDB = true;
+          staticClients = [
+            {
+              id = "oidcclient";
+              name = "Client";
+              redirectURIs = [ "https://example.com/callback" ];
+              secretFile = "/etc/dex/oidcclient"; # The content of `secretFile` will be written into to the config as `secret`.
+            }
+          ];
+        }
+      '';
+      description = lib.mdDoc ''
+        The available options can be found in
+        [the example configuration](https://github.com/dexidp/dex/blob/v${pkgs.dex-oidc.version}/config.yaml.dist).
+
+        It's also possible to refer to environment variables (defined in [services.dex.environmentFile](#opt-services.dex.environmentFile))
+        using the syntax `$VARIABLE_NAME`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.dex = {
+      description = "dex identity provider";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ] ++ (optional (cfg.settings.storage.type == "postgres") "postgresql.service");
+      path = with pkgs; [ replace-secret ];
+      serviceConfig = {
+        ExecStart = "${pkgs.dex-oidc}/bin/dex serve /run/dex/config.yaml";
+        ExecStartPre = [
+          "${pkgs.coreutils}/bin/install -m 600 ${configFile} /run/dex/config.yaml"
+          "+${startPreScript}"
+        ];
+
+        RuntimeDirectory = "dex";
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        BindReadOnlyPaths = [
+          "/nix/store"
+          "-/etc/dex"
+          "-/etc/hosts"
+          "-/etc/localtime"
+          "-/etc/nsswitch.conf"
+          "-/etc/resolv.conf"
+          "-/etc/ssl/certs/ca-certificates.crt"
+        ];
+        BindPaths = optional (cfg.settings.storage.type == "postgres") "/var/run/postgresql";
+        CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        # Port needs to be exposed to the host network
+        #PrivateNetwork = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectSystem = "strict";
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged @setuid @keyring" ];
+        UMask = "0066";
+      } // optionalAttrs (cfg.environmentFile != null) {
+        EnvironmentFile = cfg.environmentFile;
+      };
+    };
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/discourse.md b/nixpkgs/nixos/modules/services/web-apps/discourse.md
new file mode 100644
index 000000000000..35180bea87d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/discourse.md
@@ -0,0 +1,286 @@
+# Discourse {#module-services-discourse}
+
+[Discourse](https://www.discourse.org/) is a
+modern and open source discussion platform.
+
+## Basic usage {#module-services-discourse-basic-usage}
+
+A minimal configuration using Let's Encrypt for TLS certificates looks like this:
+```
+services.discourse = {
+  enable = true;
+  hostname = "discourse.example.com";
+  admin = {
+    email = "admin@example.com";
+    username = "admin";
+    fullName = "Administrator";
+    passwordFile = "/path/to/password_file";
+  };
+  secretKeyBaseFile = "/path/to/secret_key_base_file";
+};
+security.acme.email = "me@example.com";
+security.acme.acceptTerms = true;
+```
+
+Provided a proper DNS setup, you'll be able to connect to the
+instance at `discourse.example.com` and log in
+using the credentials provided in
+`services.discourse.admin`.
+
+## Using a regular TLS certificate {#module-services-discourse-tls}
+
+To set up TLS using a regular certificate and key on file, use
+the [](#opt-services.discourse.sslCertificate)
+and [](#opt-services.discourse.sslCertificateKey)
+options:
+
+```
+services.discourse = {
+  enable = true;
+  hostname = "discourse.example.com";
+  sslCertificate = "/path/to/ssl_certificate";
+  sslCertificateKey = "/path/to/ssl_certificate_key";
+  admin = {
+    email = "admin@example.com";
+    username = "admin";
+    fullName = "Administrator";
+    passwordFile = "/path/to/password_file";
+  };
+  secretKeyBaseFile = "/path/to/secret_key_base_file";
+};
+```
+
+## Database access {#module-services-discourse-database}
+
+Discourse uses PostgreSQL to store most of its
+data. A database will automatically be enabled and a database
+and role created unless [](#opt-services.discourse.database.host) is changed from
+its default of `null` or [](#opt-services.discourse.database.createLocally) is set
+to `false`.
+
+External database access can also be configured by setting
+[](#opt-services.discourse.database.host),
+[](#opt-services.discourse.database.username) and
+[](#opt-services.discourse.database.passwordFile) as
+appropriate. Note that you need to manually create a database
+called `discourse` (or the name you chose in
+[](#opt-services.discourse.database.name)) and
+allow the configured database user full access to it.
+
+## Email {#module-services-discourse-mail}
+
+In addition to the basic setup, you'll want to configure an SMTP
+server Discourse can use to send user
+registration and password reset emails, among others. You can
+also optionally let Discourse receive
+email, which enables people to reply to threads and conversations
+via email.
+
+A basic setup which assumes you want to use your configured
+[hostname](#opt-services.discourse.hostname) as
+email domain can be done like this:
+
+```
+services.discourse = {
+  enable = true;
+  hostname = "discourse.example.com";
+  sslCertificate = "/path/to/ssl_certificate";
+  sslCertificateKey = "/path/to/ssl_certificate_key";
+  admin = {
+    email = "admin@example.com";
+    username = "admin";
+    fullName = "Administrator";
+    passwordFile = "/path/to/password_file";
+  };
+  mail.outgoing = {
+    serverAddress = "smtp.emailprovider.com";
+    port = 587;
+    username = "user@emailprovider.com";
+    passwordFile = "/path/to/smtp_password_file";
+  };
+  mail.incoming.enable = true;
+  secretKeyBaseFile = "/path/to/secret_key_base_file";
+};
+```
+
+This assumes you have set up an MX record for the address you've
+set in [hostname](#opt-services.discourse.hostname) and
+requires proper SPF, DKIM and DMARC configuration to be done for
+the domain you're sending from, in order for email to be reliably delivered.
+
+If you want to use a different domain for your outgoing email
+(for example `example.com` instead of
+`discourse.example.com`) you should set
+[](#opt-services.discourse.mail.notificationEmailAddress) and
+[](#opt-services.discourse.mail.contactEmailAddress) manually.
+
+::: {.note}
+Setup of TLS for incoming email is currently only configured
+automatically when a regular TLS certificate is used, i.e. when
+[](#opt-services.discourse.sslCertificate) and
+[](#opt-services.discourse.sslCertificateKey) are
+set.
+:::
+
+## Additional settings {#module-services-discourse-settings}
+
+Additional site settings and backend settings, for which no
+explicit NixOS options are provided,
+can be set in [](#opt-services.discourse.siteSettings) and
+[](#opt-services.discourse.backendSettings) respectively.
+
+### Site settings {#module-services-discourse-site-settings}
+
+"Site settings" are the settings that can be
+changed through the Discourse
+UI. Their *default* values can be set using
+[](#opt-services.discourse.siteSettings).
+
+Settings are expressed as a Nix attribute set which matches the
+structure of the configuration in
+[config/site_settings.yml](https://github.com/discourse/discourse/blob/master/config/site_settings.yml).
+To find a setting's path, you only need to care about the first
+two levels; i.e. its category (e.g. `login`)
+and name (e.g. `invite_only`).
+
+Settings containing secret data should be set to an attribute
+set containing the attribute `_secret` - a
+string pointing to a file containing the value the option
+should be set to. See the example.
+
+### Backend settings {#module-services-discourse-backend-settings}
+
+Settings are expressed as a Nix attribute set which matches the
+structure of the configuration in
+[config/discourse.conf](https://github.com/discourse/discourse/blob/stable/config/discourse_defaults.conf).
+Empty parameters can be defined by setting them to
+`null`.
+
+### Example {#module-services-discourse-settings-example}
+
+The following example sets the title and description of the
+Discourse instance and enables
+GitHub login in the site settings,
+and changes a few request limits in the backend settings:
+```
+services.discourse = {
+  enable = true;
+  hostname = "discourse.example.com";
+  sslCertificate = "/path/to/ssl_certificate";
+  sslCertificateKey = "/path/to/ssl_certificate_key";
+  admin = {
+    email = "admin@example.com";
+    username = "admin";
+    fullName = "Administrator";
+    passwordFile = "/path/to/password_file";
+  };
+  mail.outgoing = {
+    serverAddress = "smtp.emailprovider.com";
+    port = 587;
+    username = "user@emailprovider.com";
+    passwordFile = "/path/to/smtp_password_file";
+  };
+  mail.incoming.enable = true;
+  siteSettings = {
+    required = {
+      title = "My Cats";
+      site_description = "Discuss My Cats (and be nice plz)";
+    };
+    login = {
+      enable_github_logins = true;
+      github_client_id = "a2f6dfe838cb3206ce20";
+      github_client_secret._secret = /run/keys/discourse_github_client_secret;
+    };
+  };
+  backendSettings = {
+    max_reqs_per_ip_per_minute = 300;
+    max_reqs_per_ip_per_10_seconds = 60;
+    max_asset_reqs_per_ip_per_10_seconds = 250;
+    max_reqs_per_ip_mode = "warn+block";
+  };
+  secretKeyBaseFile = "/path/to/secret_key_base_file";
+};
+```
+
+In the resulting site settings file, the
+`login.github_client_secret` key will be set
+to the contents of the
+{file}`/run/keys/discourse_github_client_secret`
+file.
+
+## Plugins {#module-services-discourse-plugins}
+
+You can install Discourse plugins
+using the [](#opt-services.discourse.plugins)
+option. Pre-packaged plugins are provided in
+`<your_discourse_package_here>.plugins`. If
+you want the full suite of plugins provided through
+`nixpkgs`, you can also set the [](#opt-services.discourse.package) option to
+`pkgs.discourseAllPlugins`.
+
+Plugins can be built with the
+`<your_discourse_package_here>.mkDiscoursePlugin`
+function. Normally, it should suffice to provide a
+`name` and `src` attribute. If
+the plugin has Ruby dependencies, however, they need to be
+packaged in accordance with the [Developing with Ruby](https://nixos.org/manual/nixpkgs/stable/#developing-with-ruby)
+section of the Nixpkgs manual and the
+appropriate gem options set in `bundlerEnvArgs`
+(normally `gemdir` is sufficient). A plugin's
+Ruby dependencies are listed in its
+{file}`plugin.rb` file as function calls to
+`gem`. To construct the corresponding
+{file}`Gemfile` manually, run {command}`bundle init`, then add the `gem` lines to it
+verbatim.
+
+Much of the packaging can be done automatically by the
+{file}`nixpkgs/pkgs/servers/web-apps/discourse/update.py`
+script - just add the plugin to the `plugins`
+list in the `update_plugins` function and run
+the script:
+```bash
+./update.py update-plugins
+```
+
+Some plugins provide [site settings](#module-services-discourse-site-settings).
+Their defaults can be configured using [](#opt-services.discourse.siteSettings), just like
+regular site settings. To find the names of these settings, look
+in the `config/settings.yml` file of the plugin
+repo.
+
+For example, to add the [discourse-spoiler-alert](https://github.com/discourse/discourse-spoiler-alert)
+and [discourse-solved](https://github.com/discourse/discourse-solved)
+plugins, and disable `discourse-spoiler-alert`
+by default:
+
+```
+services.discourse = {
+  enable = true;
+  hostname = "discourse.example.com";
+  sslCertificate = "/path/to/ssl_certificate";
+  sslCertificateKey = "/path/to/ssl_certificate_key";
+  admin = {
+    email = "admin@example.com";
+    username = "admin";
+    fullName = "Administrator";
+    passwordFile = "/path/to/password_file";
+  };
+  mail.outgoing = {
+    serverAddress = "smtp.emailprovider.com";
+    port = 587;
+    username = "user@emailprovider.com";
+    passwordFile = "/path/to/smtp_password_file";
+  };
+  mail.incoming.enable = true;
+  plugins = with config.services.discourse.package.plugins; [
+    discourse-spoiler-alert
+    discourse-solved
+  ];
+  siteSettings = {
+    plugins = {
+      spoiler_enabled = false;
+    };
+  };
+  secretKeyBaseFile = "/path/to/secret_key_base_file";
+};
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/discourse.nix b/nixpkgs/nixos/modules/services/web-apps/discourse.nix
new file mode 100644
index 000000000000..da1dba7d940b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/discourse.nix
@@ -0,0 +1,1093 @@
+{ config, options, lib, pkgs, utils, ... }:
+
+let
+  json = pkgs.formats.json {};
+
+  cfg = config.services.discourse;
+  opt = options.services.discourse;
+
+  # Keep in sync with https://github.com/discourse/discourse_docker/blob/main/image/base/slim.Dockerfile#L5
+  upstreamPostgresqlVersion = lib.getVersion pkgs.postgresql_13;
+
+  postgresqlPackage = if config.services.postgresql.enable then
+                        config.services.postgresql.package
+                      else
+                        pkgs.postgresql;
+
+  postgresqlVersion = lib.getVersion postgresqlPackage;
+
+  # We only want to create a database if we're actually going to connect to it.
+  databaseActuallyCreateLocally = cfg.database.createLocally && cfg.database.host == null;
+
+  tlsEnabled = cfg.enableACME
+                || cfg.sslCertificate != null
+                || cfg.sslCertificateKey != null;
+in
+{
+  options = {
+    services.discourse = {
+      enable = lib.mkEnableOption (lib.mdDoc "Discourse, an open source discussion platform");
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.discourse;
+        apply = p: p.override {
+          plugins = lib.unique (p.enabledPlugins ++ cfg.plugins);
+        };
+        defaultText = lib.literalExpression "pkgs.discourse";
+        description = lib.mdDoc ''
+          The discourse package to use.
+        '';
+      };
+
+      hostname = lib.mkOption {
+        type = lib.types.str;
+        default = config.networking.fqdnOrHostName;
+        defaultText = lib.literalExpression "config.networking.fqdnOrHostName";
+        example = "discourse.example.com";
+        description = lib.mdDoc ''
+          The hostname to serve Discourse on.
+        '';
+      };
+
+      secretKeyBaseFile = lib.mkOption {
+        type = with lib.types; nullOr path;
+        default = null;
+        example = "/run/keys/secret_key_base";
+        description = lib.mdDoc ''
+          The path to a file containing the
+          `secret_key_base` secret.
+
+          Discourse uses `secret_key_base` to encrypt
+          the cookie store, which contains session data, and to digest
+          user auth tokens.
+
+          Needs to be a 64 byte long string of hexadecimal
+          characters. You can generate one by running
+
+          ```
+          openssl rand -hex 64 >/path/to/secret_key_base_file
+          ```
+
+          This should be a string, not a nix path, since nix paths are
+          copied into the world-readable nix store.
+        '';
+      };
+
+      sslCertificate = lib.mkOption {
+        type = with lib.types; nullOr path;
+        default = null;
+        example = "/run/keys/ssl.cert";
+        description = lib.mdDoc ''
+          The path to the server SSL certificate. Set this to enable
+          SSL.
+        '';
+      };
+
+      sslCertificateKey = lib.mkOption {
+        type = with lib.types; nullOr path;
+        default = null;
+        example = "/run/keys/ssl.key";
+        description = lib.mdDoc ''
+          The path to the server SSL certificate key. Set this to
+          enable SSL.
+        '';
+      };
+
+      enableACME = lib.mkOption {
+        type = lib.types.bool;
+        default = cfg.sslCertificate == null && cfg.sslCertificateKey == null;
+        defaultText = lib.literalMD ''
+          `true`, unless {option}`services.discourse.sslCertificate`
+          and {option}`services.discourse.sslCertificateKey` are set.
+        '';
+        description = lib.mdDoc ''
+          Whether an ACME certificate should be used to secure
+          connections to the server.
+        '';
+      };
+
+      backendSettings = lib.mkOption {
+        type = with lib.types; attrsOf (nullOr (oneOf [ str int bool float ]));
+        default = {};
+        example = lib.literalExpression ''
+          {
+            max_reqs_per_ip_per_minute = 300;
+            max_reqs_per_ip_per_10_seconds = 60;
+            max_asset_reqs_per_ip_per_10_seconds = 250;
+            max_reqs_per_ip_mode = "warn+block";
+          };
+        '';
+        description = lib.mdDoc ''
+          Additional settings to put in the
+          {file}`discourse.conf` file.
+
+          Look in the
+          [discourse_defaults.conf](https://github.com/discourse/discourse/blob/master/config/discourse_defaults.conf)
+          file in the upstream distribution to find available options.
+
+          Setting an option to `null` means
+          “define variable, but leave right-hand side emptyâ€.
+        '';
+      };
+
+      siteSettings = lib.mkOption {
+        type = json.type;
+        default = {};
+        example = lib.literalExpression ''
+          {
+            required = {
+              title = "My Cats";
+              site_description = "Discuss My Cats (and be nice plz)";
+            };
+            login = {
+              enable_github_logins = true;
+              github_client_id = "a2f6dfe838cb3206ce20";
+              github_client_secret._secret = /run/keys/discourse_github_client_secret;
+            };
+          };
+        '';
+        description = lib.mdDoc ''
+          Discourse site settings. These are the settings that can be
+          changed from the UI. This only defines their default values:
+          they can still be overridden from the UI.
+
+          Available settings can be found by looking in the
+          [site_settings.yml](https://github.com/discourse/discourse/blob/master/config/site_settings.yml)
+          file of the upstream distribution. To find a setting's path,
+          you only need to care about the first two levels; i.e. its
+          category and name. See the example.
+
+          Settings containing secret data should be set to an
+          attribute set containing the attribute
+          `_secret` - a string pointing to a file
+          containing the value the option should be set to. See the
+          example to get a better picture of this: in the resulting
+          {file}`config/nixos_site_settings.json` file,
+          the `login.github_client_secret` key will
+          be set to the contents of the
+          {file}`/run/keys/discourse_github_client_secret`
+          file.
+        '';
+      };
+
+      admin = {
+        skipCreate = lib.mkOption {
+          type = lib.types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Do not create the admin account, instead rely on other
+            existing admin accounts.
+          '';
+        };
+
+        email = lib.mkOption {
+          type = lib.types.str;
+          example = "admin@example.com";
+          description = lib.mdDoc ''
+            The admin user email address.
+          '';
+        };
+
+        username = lib.mkOption {
+          type = lib.types.str;
+          example = "admin";
+          description = lib.mdDoc ''
+            The admin user username.
+          '';
+        };
+
+        fullName = lib.mkOption {
+          type = lib.types.str;
+          description = lib.mdDoc ''
+            The admin user's full name.
+          '';
+        };
+
+        passwordFile = lib.mkOption {
+          type = lib.types.path;
+          description = lib.mdDoc ''
+            A path to a file containing the admin user's password.
+
+            This should be a string, not a nix path, since nix paths are
+            copied into the world-readable nix store.
+          '';
+        };
+      };
+
+      nginx.enable = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether an `nginx` virtual host should be
+          set up to serve Discourse. Only disable if you're planning
+          to use a different web server, which is not recommended.
+        '';
+      };
+
+      database = {
+        pool = lib.mkOption {
+          type = lib.types.int;
+          default = 8;
+          description = lib.mdDoc ''
+            Database connection pool size.
+          '';
+        };
+
+        host = lib.mkOption {
+          type = with lib.types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            Discourse database hostname. `null` means
+            “prefer local unix socket connectionâ€.
+          '';
+        };
+
+        passwordFile = lib.mkOption {
+          type = with lib.types; nullOr path;
+          default = null;
+          description = lib.mdDoc ''
+            File containing the Discourse database user password.
+
+            This should be a string, not a nix path, since nix paths are
+            copied into the world-readable nix store.
+          '';
+        };
+
+        createLocally = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether a database should be automatically created on the
+            local host. Set this to `false` if you plan
+            on provisioning a local database yourself. This has no effect
+            if {option}`services.discourse.database.host` is customized.
+          '';
+        };
+
+        name = lib.mkOption {
+          type = lib.types.str;
+          default = "discourse";
+          description = lib.mdDoc ''
+            Discourse database name.
+          '';
+        };
+
+        username = lib.mkOption {
+          type = lib.types.str;
+          default = "discourse";
+          description = lib.mdDoc ''
+            Discourse database user.
+          '';
+        };
+
+        ignorePostgresqlVersion = lib.mkOption {
+          type = lib.types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether to allow other versions of PostgreSQL than the
+            recommended one. Only effective when
+            {option}`services.discourse.database.createLocally`
+            is enabled.
+          '';
+        };
+      };
+
+      redis = {
+        host = lib.mkOption {
+          type = lib.types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Redis server hostname.
+          '';
+        };
+
+        passwordFile = lib.mkOption {
+          type = with lib.types; nullOr path;
+          default = null;
+          description = lib.mdDoc ''
+            File containing the Redis password.
+
+            This should be a string, not a nix path, since nix paths are
+            copied into the world-readable nix store.
+          '';
+        };
+
+        dbNumber = lib.mkOption {
+          type = lib.types.int;
+          default = 0;
+          description = lib.mdDoc ''
+            Redis database number.
+          '';
+        };
+
+        useSSL = lib.mkOption {
+          type = lib.types.bool;
+          default = cfg.redis.host != "localhost";
+          defaultText = lib.literalExpression ''config.${opt.redis.host} != "localhost"'';
+          description = lib.mdDoc ''
+            Connect to Redis with SSL.
+          '';
+        };
+      };
+
+      mail = {
+        notificationEmailAddress = lib.mkOption {
+          type = lib.types.str;
+          default = "${if cfg.mail.incoming.enable then "notifications" else "noreply"}@${cfg.hostname}";
+          defaultText = lib.literalExpression ''
+            "''${if config.services.discourse.mail.incoming.enable then "notifications" else "noreply"}@''${config.services.discourse.hostname}"
+          '';
+          description = lib.mdDoc ''
+            The `from:` email address used when
+            sending all essential system emails. The domain specified
+            here must have SPF, DKIM and reverse PTR records set
+            correctly for email to arrive.
+          '';
+        };
+
+        contactEmailAddress = lib.mkOption {
+          type = lib.types.str;
+          default = "";
+          description = lib.mdDoc ''
+            Email address of key contact responsible for this
+            site. Used for critical notifications, as well as on the
+            `/about` contact form for urgent matters.
+          '';
+        };
+
+        outgoing = {
+          serverAddress = lib.mkOption {
+            type = lib.types.str;
+            default = "localhost";
+            description = lib.mdDoc ''
+              The address of the SMTP server Discourse should use to
+              send email.
+            '';
+          };
+
+          port = lib.mkOption {
+            type = lib.types.port;
+            default = 25;
+            description = lib.mdDoc ''
+              The port of the SMTP server Discourse should use to
+              send email.
+            '';
+          };
+
+          username = lib.mkOption {
+            type = with lib.types; nullOr str;
+            default = null;
+            description = lib.mdDoc ''
+              The username of the SMTP server.
+            '';
+          };
+
+          passwordFile = lib.mkOption {
+            type = lib.types.nullOr lib.types.path;
+            default = null;
+            description = lib.mdDoc ''
+              A file containing the password of the SMTP server account.
+
+              This should be a string, not a nix path, since nix paths
+              are copied into the world-readable nix store.
+            '';
+          };
+
+          domain = lib.mkOption {
+            type = lib.types.str;
+            default = cfg.hostname;
+            defaultText = lib.literalExpression "config.${opt.hostname}";
+            description = lib.mdDoc ''
+              HELO domain to use for outgoing mail.
+            '';
+          };
+
+          authentication = lib.mkOption {
+            type = with lib.types; nullOr (enum ["plain" "login" "cram_md5"]);
+            default = null;
+            description = lib.mdDoc ''
+              Authentication type to use, see https://api.rubyonrails.org/classes/ActionMailer/Base.html
+            '';
+          };
+
+          enableStartTLSAuto = lib.mkOption {
+            type = lib.types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Whether to try to use StartTLS.
+            '';
+          };
+
+          opensslVerifyMode = lib.mkOption {
+            type = lib.types.str;
+            default = "peer";
+            description = lib.mdDoc ''
+              How OpenSSL checks the certificate, see https://api.rubyonrails.org/classes/ActionMailer/Base.html
+            '';
+          };
+
+          forceTLS = lib.mkOption {
+            type = lib.types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Force implicit TLS as per RFC 8314 3.3.
+            '';
+          };
+        };
+
+        incoming = {
+          enable = lib.mkOption {
+            type = lib.types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether to set up Postfix to receive incoming mail.
+            '';
+          };
+
+          replyEmailAddress = lib.mkOption {
+            type = lib.types.str;
+            default = "%{reply_key}@${cfg.hostname}";
+            defaultText = lib.literalExpression ''"%{reply_key}@''${config.services.discourse.hostname}"'';
+            description = lib.mdDoc ''
+              Template for reply by email incoming email address, for
+              example: %{reply_key}@reply.example.com or
+              replies+%{reply_key}@example.com
+            '';
+          };
+
+          mailReceiverPackage = lib.mkOption {
+            type = lib.types.package;
+            default = pkgs.discourse-mail-receiver;
+            defaultText = lib.literalExpression "pkgs.discourse-mail-receiver";
+            description = lib.mdDoc ''
+              The discourse-mail-receiver package to use.
+            '';
+          };
+
+          apiKeyFile = lib.mkOption {
+            type = lib.types.nullOr lib.types.path;
+            default = null;
+            description = lib.mdDoc ''
+              A file containing the Discourse API key used to add
+              posts and messages from mail. If left at its default
+              value `null`, one will be automatically
+              generated.
+
+              This should be a string, not a nix path, since nix paths
+              are copied into the world-readable nix store.
+            '';
+          };
+        };
+      };
+
+      plugins = lib.mkOption {
+        type = lib.types.listOf lib.types.package;
+        default = [];
+        example = lib.literalExpression ''
+          with config.services.discourse.package.plugins; [
+            discourse-canned-replies
+            discourse-github
+          ];
+        '';
+        description = lib.mdDoc ''
+          Plugins to install as part of Discourse, expressed as a list of derivations.
+        '';
+      };
+
+      sidekiqProcesses = lib.mkOption {
+        type = lib.types.int;
+        default = 1;
+        description = lib.mdDoc ''
+          How many Sidekiq processes should be spawned.
+        '';
+      };
+
+      unicornTimeout = lib.mkOption {
+        type = lib.types.int;
+        default = 30;
+        description = lib.mdDoc ''
+          Time in seconds before a request to Unicorn times out.
+
+          This can be raised if the system Discourse is running on is
+          too slow to handle many requests within 30 seconds.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (cfg.database.host != null) -> (cfg.database.passwordFile != null);
+        message = "When services.gitlab.database.host is customized, services.discourse.database.passwordFile must be set!";
+      }
+      {
+        assertion = cfg.hostname != "";
+        message = "Could not automatically determine hostname, set service.discourse.hostname manually.";
+      }
+      {
+        assertion = cfg.database.ignorePostgresqlVersion || (databaseActuallyCreateLocally -> upstreamPostgresqlVersion == postgresqlVersion);
+        message = "The PostgreSQL version recommended for use with Discourse is ${upstreamPostgresqlVersion}, you're using ${postgresqlVersion}. "
+                  + "Either update your PostgreSQL package to the correct version or set services.discourse.database.ignorePostgresqlVersion. "
+                  + "See https://nixos.org/manual/nixos/stable/index.html#module-postgresql for details on how to upgrade PostgreSQL.";
+      }
+    ];
+
+
+    # Default config values are from `config/discourse_defaults.conf`
+    # upstream.
+    services.discourse.backendSettings = lib.mapAttrs (_: lib.mkDefault) {
+      db_pool = cfg.database.pool;
+      db_timeout = 5000;
+      db_connect_timeout = 5;
+      db_socket = null;
+      db_host = cfg.database.host;
+      db_backup_host = null;
+      db_port = null;
+      db_backup_port = 5432;
+      db_name = cfg.database.name;
+      db_username = if databaseActuallyCreateLocally then "discourse" else cfg.database.username;
+      db_password = cfg.database.passwordFile;
+      db_prepared_statements = false;
+      db_replica_host = null;
+      db_replica_port = null;
+      db_advisory_locks = true;
+
+      inherit (cfg) hostname;
+      backup_hostname = null;
+
+      smtp_address = cfg.mail.outgoing.serverAddress;
+      smtp_port = cfg.mail.outgoing.port;
+      smtp_domain = cfg.mail.outgoing.domain;
+      smtp_user_name = cfg.mail.outgoing.username;
+      smtp_password = cfg.mail.outgoing.passwordFile;
+      smtp_authentication = cfg.mail.outgoing.authentication;
+      smtp_enable_start_tls = cfg.mail.outgoing.enableStartTLSAuto;
+      smtp_openssl_verify_mode = cfg.mail.outgoing.opensslVerifyMode;
+      smtp_force_tls = cfg.mail.outgoing.forceTLS;
+
+      load_mini_profiler = true;
+      mini_profiler_snapshots_period = 0;
+      mini_profiler_snapshots_transport_url = null;
+      mini_profiler_snapshots_transport_auth_key = null;
+
+      cdn_url = null;
+      cdn_origin_hostname = null;
+      developer_emails = null;
+
+      redis_host = cfg.redis.host;
+      redis_port = 6379;
+      redis_replica_host = null;
+      redis_replica_port = 6379;
+      redis_db = cfg.redis.dbNumber;
+      redis_password = cfg.redis.passwordFile;
+      redis_skip_client_commands = false;
+      redis_use_ssl = cfg.redis.useSSL;
+
+      message_bus_redis_enabled = false;
+      message_bus_redis_host = "localhost";
+      message_bus_redis_port = 6379;
+      message_bus_redis_replica_host = null;
+      message_bus_redis_replica_port = 6379;
+      message_bus_redis_db = 0;
+      message_bus_redis_password = null;
+      message_bus_redis_skip_client_commands = false;
+
+      enable_cors = false;
+      cors_origin = "";
+      serve_static_assets = false;
+      sidekiq_workers = 5;
+      connection_reaper_age = 30;
+      connection_reaper_interval = 30;
+      relative_url_root = null;
+      message_bus_max_backlog_size = 100;
+      message_bus_clear_every = 50;
+      secret_key_base = cfg.secretKeyBaseFile;
+      fallback_assets_path = null;
+
+      s3_bucket = null;
+      s3_region = null;
+      s3_access_key_id = null;
+      s3_secret_access_key = null;
+      s3_use_iam_profile = null;
+      s3_cdn_url = null;
+      s3_endpoint = null;
+      s3_http_continue_timeout = null;
+      s3_install_cors_rule = null;
+      s3_asset_cdn_url = null;
+
+      max_user_api_reqs_per_minute = 20;
+      max_user_api_reqs_per_day = 2880;
+      max_admin_api_reqs_per_minute = 60;
+      max_reqs_per_ip_per_minute = 200;
+      max_reqs_per_ip_per_10_seconds = 50;
+      max_asset_reqs_per_ip_per_10_seconds = 200;
+      max_reqs_per_ip_mode = "block";
+      max_reqs_rate_limit_on_private = false;
+      skip_per_ip_rate_limit_trust_level = 1;
+      force_anonymous_min_queue_seconds = 1;
+      force_anonymous_min_per_10_seconds = 3;
+      background_requests_max_queue_length = 0.5;
+      reject_message_bus_queue_seconds = 0.1;
+      disable_search_queue_threshold = 1;
+      max_old_rebakes_per_15_minutes = 300;
+      max_logster_logs = 1000;
+      refresh_maxmind_db_during_precompile_days = 2;
+      maxmind_backup_path = null;
+      maxmind_license_key = null;
+      enable_performance_http_headers = false;
+      enable_js_error_reporting = true;
+      mini_scheduler_workers = 5;
+      compress_anon_cache = false;
+      anon_cache_store_threshold = 2;
+      allowed_theme_repos = null;
+      enable_email_sync_demon = false;
+      max_digests_enqueued_per_30_mins_per_site = 10000;
+      cluster_name = null;
+      multisite_config_path = "config/multisite.yml";
+      enable_long_polling = null;
+      long_polling_interval = null;
+      preload_link_header = false;
+      redirect_avatar_requests = false;
+      pg_force_readonly_mode = false;
+      dns_query_timeout_secs = null;
+      regex_timeout_seconds = 2;
+      allow_impersonation = true;
+    };
+
+    services.redis.servers.discourse =
+      lib.mkIf (lib.elem cfg.redis.host [ "localhost" "127.0.0.1" ]) {
+        enable = true;
+        bind = cfg.redis.host;
+        port = cfg.backendSettings.redis_port;
+      };
+
+    services.postgresql = lib.mkIf databaseActuallyCreateLocally {
+      enable = true;
+      ensureUsers = [{ name = "discourse"; }];
+    };
+
+    # The postgresql module doesn't currently support concepts like
+    # objects owners and extensions; for now we tack on what's needed
+    # here.
+    systemd.services.discourse-postgresql =
+      let
+        pgsql = config.services.postgresql;
+      in
+        lib.mkIf databaseActuallyCreateLocally {
+          after = [ "postgresql.service" ];
+          bindsTo = [ "postgresql.service" ];
+          wantedBy = [ "discourse.service" ];
+          partOf = [ "discourse.service" ];
+          path = [
+            pgsql.package
+          ];
+          script = ''
+            set -o errexit -o pipefail -o nounset -o errtrace
+            shopt -s inherit_errexit
+
+            psql -tAc "SELECT 1 FROM pg_database WHERE datname = 'discourse'" | grep -q 1 || psql -tAc 'CREATE DATABASE "discourse" OWNER "discourse"'
+            psql '${cfg.database.name}' -tAc "CREATE EXTENSION IF NOT EXISTS pg_trgm"
+            psql '${cfg.database.name}' -tAc "CREATE EXTENSION IF NOT EXISTS hstore"
+          '';
+
+          serviceConfig = {
+            User = pgsql.superUser;
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+        };
+
+    systemd.services.discourse = {
+      wantedBy = [ "multi-user.target" ];
+      after = [
+        "redis-discourse.service"
+        "postgresql.service"
+        "discourse-postgresql.service"
+      ];
+      bindsTo = [
+        "redis-discourse.service"
+      ] ++ lib.optionals (cfg.database.host == null) [
+        "postgresql.service"
+        "discourse-postgresql.service"
+      ];
+      path = cfg.package.runtimeDeps ++ [
+        postgresqlPackage
+        pkgs.replace-secret
+        cfg.package.rake
+      ];
+      environment = cfg.package.runtimeEnv // {
+        UNICORN_TIMEOUT = builtins.toString cfg.unicornTimeout;
+        UNICORN_SIDEKIQS = builtins.toString cfg.sidekiqProcesses;
+        MALLOC_ARENA_MAX = "2";
+      };
+
+      preStart =
+        let
+          discourseKeyValue = lib.generators.toKeyValue {
+            mkKeyValue = lib.flip lib.generators.mkKeyValueDefault " = " {
+              mkValueString = v: with builtins;
+                if isInt           v then toString v
+                else if isString   v then ''"${v}"''
+                else if true  ==   v then "true"
+                else if false ==   v then "false"
+                else if null  ==   v then ""
+                else if isFloat    v then lib.strings.floatToString v
+                else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+            };
+          };
+
+          discourseConf = pkgs.writeText "discourse.conf" (discourseKeyValue cfg.backendSettings);
+
+          mkSecretReplacement = file:
+            lib.optionalString (file != null) ''
+              replace-secret '${file}' '${file}' /run/discourse/config/discourse.conf
+            '';
+
+          mkAdmin = ''
+            export ADMIN_EMAIL="${cfg.admin.email}"
+            export ADMIN_NAME="${cfg.admin.fullName}"
+            export ADMIN_USERNAME="${cfg.admin.username}"
+            ADMIN_PASSWORD="$(<${cfg.admin.passwordFile})"
+            export ADMIN_PASSWORD
+            discourse-rake admin:create_noninteractively
+          '';
+
+        in ''
+          set -o errexit -o pipefail -o nounset -o errtrace
+          shopt -s inherit_errexit
+
+          umask u=rwx,g=rx,o=
+
+          rm -rf /var/lib/discourse/tmp/*
+
+          cp -r ${cfg.package}/share/discourse/config.dist/* /run/discourse/config/
+          cp -r ${cfg.package}/share/discourse/public.dist/* /run/discourse/public/
+          ln -sf /var/lib/discourse/uploads /run/discourse/public/uploads
+          ln -sf /var/lib/discourse/backups /run/discourse/public/backups
+
+          (
+              umask u=rwx,g=,o=
+
+              ${utils.genJqSecretsReplacementSnippet
+                  cfg.siteSettings
+                  "/run/discourse/config/nixos_site_settings.json"
+              }
+              install -T -m 0600 -o discourse ${discourseConf} /run/discourse/config/discourse.conf
+              ${mkSecretReplacement cfg.database.passwordFile}
+              ${mkSecretReplacement cfg.mail.outgoing.passwordFile}
+              ${mkSecretReplacement cfg.redis.passwordFile}
+              ${mkSecretReplacement cfg.secretKeyBaseFile}
+              chmod 0400 /run/discourse/config/discourse.conf
+          )
+
+          discourse-rake db:migrate >>/var/log/discourse/db_migration.log
+          chmod -R u+w /var/lib/discourse/tmp/
+
+          ${lib.optionalString (!cfg.admin.skipCreate) mkAdmin}
+
+          discourse-rake themes:update
+          discourse-rake uploads:regenerate_missing_optimized
+        '';
+
+      serviceConfig = {
+        Type = "simple";
+        User = "discourse";
+        Group = "discourse";
+        RuntimeDirectory = map (p: "discourse/" + p) [
+          "config"
+          "home"
+          "assets/javascripts/plugins"
+          "public"
+          "sockets"
+        ];
+        RuntimeDirectoryMode = "0750";
+        StateDirectory = map (p: "discourse/" + p) [
+          "uploads"
+          "backups"
+          "tmp"
+        ];
+        StateDirectoryMode = "0750";
+        LogsDirectory = "discourse";
+        TimeoutSec = "infinity";
+        Restart = "on-failure";
+        WorkingDirectory = "${cfg.package}/share/discourse";
+
+        RemoveIPC = true;
+        PrivateTmp = true;
+        NoNewPrivileges = true;
+        RestrictSUIDSGID = true;
+        ProtectSystem = "strict";
+        ProtectHome = "read-only";
+
+        ExecStart = "${cfg.package.rubyEnv}/bin/bundle exec config/unicorn_launcher -E production -c config/unicorn.conf.rb";
+      };
+    };
+
+    services.nginx = lib.mkIf cfg.nginx.enable {
+      enable = true;
+
+      recommendedTlsSettings = true;
+      recommendedOptimisation = true;
+      recommendedBrotliSettings = true;
+      recommendedGzipSettings = true;
+      recommendedProxySettings = true;
+
+      upstreams.discourse.servers."unix:/run/discourse/sockets/unicorn.sock" = {};
+
+      appendHttpConfig = ''
+        # inactive means we keep stuff around for 1440m minutes regardless of last access (1 week)
+        # levels means it is a 2 deep hierarchy cause we can have lots of files
+        # max_size limits the size of the cache
+        proxy_cache_path /var/cache/nginx inactive=1440m levels=1:2 keys_zone=discourse:10m max_size=600m;
+
+        # see: https://meta.discourse.org/t/x/74060
+        proxy_buffer_size 8k;
+      '';
+
+      virtualHosts.${cfg.hostname} = {
+        inherit (cfg) sslCertificate sslCertificateKey enableACME;
+        forceSSL = lib.mkDefault tlsEnabled;
+
+        root = "${cfg.package}/share/discourse/public";
+
+        locations =
+          let
+            proxy = { extraConfig ? "" }: {
+              proxyPass = "http://discourse";
+              extraConfig = extraConfig + ''
+                proxy_set_header X-Request-Start "t=''${msec}";
+              '';
+            };
+            cache = time: ''
+              expires ${time};
+              add_header Cache-Control public,immutable;
+            '';
+            cache_1y = cache "1y";
+            cache_1d = cache "1d";
+          in
+            {
+              "/".tryFiles = "$uri @discourse";
+              "@discourse" = proxy {};
+              "^~ /backups/".extraConfig = ''
+                internal;
+              '';
+              "/favicon.ico" = {
+                return = "204";
+                extraConfig = ''
+                  access_log off;
+                  log_not_found off;
+                '';
+              };
+              "~ ^/uploads/short-url/" = proxy {};
+              "~ ^/secure-media-uploads/" = proxy {};
+              "~* (fonts|assets|plugins|uploads)/.*\.(eot|ttf|woff|woff2|ico|otf)$".extraConfig = cache_1y + ''
+                add_header Access-Control-Allow-Origin *;
+              '';
+              "/srv/status" = proxy {
+                extraConfig = ''
+                  access_log off;
+                  log_not_found off;
+                '';
+              };
+              "~ ^/javascripts/".extraConfig = cache_1d;
+              "~ ^/assets/(?<asset_path>.+)$".extraConfig = cache_1y + ''
+                # asset pipeline enables this
+                brotli_static on;
+                gzip_static on;
+              '';
+              "~ ^/plugins/".extraConfig = cache_1y;
+              "~ /images/emoji/".extraConfig = cache_1y;
+              "~ ^/uploads/" = proxy {
+                extraConfig = cache_1y + ''
+                  proxy_set_header X-Sendfile-Type X-Accel-Redirect;
+                  proxy_set_header X-Accel-Mapping ${cfg.package}/share/discourse/public/=/downloads/;
+
+                  # custom CSS
+                  location ~ /stylesheet-cache/ {
+                      try_files $uri =404;
+                  }
+                  # this allows us to bypass rails
+                  location ~* \.(gif|png|jpg|jpeg|bmp|tif|tiff|ico|webp)$ {
+                      try_files $uri =404;
+                  }
+                  # SVG needs an extra header attached
+                  location ~* \.(svg)$ {
+                  }
+                  # thumbnails & optimized images
+                  location ~ /_?optimized/ {
+                      try_files $uri =404;
+                  }
+                '';
+              };
+              "~ ^/admin/backups/" = proxy {
+                extraConfig = ''
+                  proxy_set_header X-Sendfile-Type X-Accel-Redirect;
+                  proxy_set_header X-Accel-Mapping ${cfg.package}/share/discourse/public/=/downloads/;
+                '';
+              };
+              "~ ^/(svg-sprite/|letter_avatar/|letter_avatar_proxy/|user_avatar|highlight-js|stylesheets|theme-javascripts|favicon/proxied|service-worker)" = proxy {
+                extraConfig = ''
+                  # if Set-Cookie is in the response nothing gets cached
+                  # this is double bad cause we are not passing last modified in
+                  proxy_ignore_headers "Set-Cookie";
+                  proxy_hide_header "Set-Cookie";
+                  proxy_hide_header "X-Discourse-Username";
+                  proxy_hide_header "X-Runtime";
+
+                  # note x-accel-redirect can not be used with proxy_cache
+                  proxy_cache discourse;
+                  proxy_cache_key "$scheme,$host,$request_uri";
+                  proxy_cache_valid 200 301 302 7d;
+                '';
+              };
+              "/message-bus/" = proxy {
+                extraConfig = ''
+                  proxy_http_version 1.1;
+                  proxy_buffering off;
+                '';
+              };
+              "/downloads/".extraConfig = ''
+                internal;
+                alias ${cfg.package}/share/discourse/public/;
+              '';
+            };
+      };
+    };
+
+    systemd.services.discourse-mail-receiver-setup = lib.mkIf cfg.mail.incoming.enable (
+      let
+        mail-receiver-environment = {
+          MAIL_DOMAIN = cfg.hostname;
+          DISCOURSE_BASE_URL = "http${lib.optionalString tlsEnabled "s"}://${cfg.hostname}";
+          DISCOURSE_API_KEY = "@api-key@";
+          DISCOURSE_API_USERNAME = "system";
+        };
+        mail-receiver-json = json.generate "mail-receiver.json" mail-receiver-environment;
+      in
+        {
+          before = [ "postfix.service" ];
+          after = [ "discourse.service" ];
+          wantedBy = [ "discourse.service" ];
+          partOf = [ "discourse.service" ];
+          path = [
+            cfg.package.rake
+            pkgs.jq
+          ];
+          preStart = lib.optionalString (cfg.mail.incoming.apiKeyFile == null) ''
+            set -o errexit -o pipefail -o nounset -o errtrace
+            shopt -s inherit_errexit
+
+            if [[ ! -e /var/lib/discourse-mail-receiver/api_key ]]; then
+                discourse-rake api_key:create_master[email-receiver] >/var/lib/discourse-mail-receiver/api_key
+            fi
+          '';
+          script =
+            let
+              apiKeyPath =
+                if cfg.mail.incoming.apiKeyFile == null then
+                  "/var/lib/discourse-mail-receiver/api_key"
+                else
+                  cfg.mail.incoming.apiKeyFile;
+            in ''
+              set -o errexit -o pipefail -o nounset -o errtrace
+              shopt -s inherit_errexit
+
+              api_key=$(<'${apiKeyPath}')
+              export api_key
+
+              jq <${mail-receiver-json} \
+                 '.DISCOURSE_API_KEY = $ENV.api_key' \
+                 >'/run/discourse-mail-receiver/mail-receiver-environment.json'
+            '';
+
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            RuntimeDirectory = "discourse-mail-receiver";
+            RuntimeDirectoryMode = "0700";
+            StateDirectory = "discourse-mail-receiver";
+            User = "discourse";
+            Group = "discourse";
+          };
+        });
+
+    services.discourse.siteSettings = {
+      required = {
+        notification_email = cfg.mail.notificationEmailAddress;
+        contact_email = cfg.mail.contactEmailAddress;
+      };
+      security.force_https = tlsEnabled;
+      email = {
+        manual_polling_enabled = cfg.mail.incoming.enable;
+        reply_by_email_enabled = cfg.mail.incoming.enable;
+        reply_by_email_address = cfg.mail.incoming.replyEmailAddress;
+      };
+    };
+
+    services.postfix = lib.mkIf cfg.mail.incoming.enable {
+      enable = true;
+      sslCert = lib.optionalString (cfg.sslCertificate != null) cfg.sslCertificate;
+      sslKey = lib.optionalString (cfg.sslCertificateKey != null) cfg.sslCertificateKey;
+
+      origin = cfg.hostname;
+      relayDomains = [ cfg.hostname ];
+      config = {
+        smtpd_recipient_restrictions = "check_policy_service unix:private/discourse-policy";
+        append_dot_mydomain = lib.mkDefault false;
+        compatibility_level = "2";
+        smtputf8_enable = false;
+        smtpd_banner = lib.mkDefault "ESMTP server";
+        myhostname = lib.mkDefault cfg.hostname;
+        mydestination = lib.mkDefault "localhost";
+      };
+      transport = ''
+        ${cfg.hostname} discourse-mail-receiver:
+      '';
+      masterConfig = {
+        "discourse-mail-receiver" = {
+          type = "unix";
+          privileged = true;
+          chroot = false;
+          command = "pipe";
+          args = [
+            "user=discourse"
+            "argv=${cfg.mail.incoming.mailReceiverPackage}/bin/receive-mail"
+            "\${recipient}"
+          ];
+        };
+        "discourse-policy" = {
+          type = "unix";
+          privileged = true;
+          chroot = false;
+          command = "spawn";
+          args = [
+            "user=discourse"
+            "argv=${cfg.mail.incoming.mailReceiverPackage}/bin/discourse-smtp-fast-rejection"
+          ];
+        };
+      };
+    };
+
+    users.users = {
+      discourse = {
+        group = "discourse";
+        isSystemUser = true;
+      };
+    } // (lib.optionalAttrs cfg.nginx.enable {
+      ${config.services.nginx.user}.extraGroups = [ "discourse" ];
+    });
+
+    users.groups = {
+      discourse = {};
+    };
+
+    environment.systemPackages = [
+      cfg.package.rake
+    ];
+  };
+
+  meta.doc = ./discourse.md;
+  meta.maintainers = [ lib.maintainers.talyz ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/documize.nix b/nixpkgs/nixos/modules/services/web-apps/documize.nix
new file mode 100644
index 000000000000..f70da0829f44
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/documize.nix
@@ -0,0 +1,137 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.documize;
+
+  mkParams = optional: concatMapStrings (name: let
+    predicate = optional -> cfg.${name} != null;
+    template = " -${name} '${toString cfg.${name}}'";
+  in optionalString predicate template);
+
+in {
+  options.services.documize = {
+    enable = mkEnableOption (lib.mdDoc "Documize Wiki");
+
+    stateDirectoryName = mkOption {
+      type = types.str;
+      default = "documize";
+      description = lib.mdDoc ''
+        The name of the directory below {file}`/var/lib/private`
+        where documize runs in and stores, for example, backups.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.documize-community;
+      defaultText = literalExpression "pkgs.documize-community";
+      description = lib.mdDoc ''
+        Which package to use for documize.
+      '';
+    };
+
+    salt = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "3edIYV6c8B28b19fh";
+      description = lib.mdDoc ''
+        The salt string used to encode JWT tokens, if not set a random value will be generated.
+      '';
+    };
+
+    cert = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The {file}`cert.pem` file used for https.
+      '';
+    };
+
+    key = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The {file}`key.pem` file used for https.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5001;
+      description = lib.mdDoc ''
+        The http/https port number.
+      '';
+    };
+
+    forcesslport = mkOption {
+      type = types.nullOr types.port;
+      default = null;
+      description = lib.mdDoc ''
+        Redirect given http port number to TLS.
+      '';
+    };
+
+    offline = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Set `true` for offline mode.
+      '';
+      apply = v: if true == v then 1 else 0;
+    };
+
+    dbtype = mkOption {
+      type = types.enum [ "mysql" "percona" "mariadb" "postgresql" "sqlserver" ];
+      default = "postgresql";
+      description = lib.mdDoc ''
+        Specify the database provider: `mysql`, `percona`, `mariadb`, `postgresql`, `sqlserver`
+      '';
+    };
+
+    db = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Database specific connection string for example:
+        - MySQL/Percona/MariaDB:
+          `user:password@tcp(host:3306)/documize`
+        - MySQLv8+:
+          `user:password@tcp(host:3306)/documize?allowNativePasswords=true`
+        - PostgreSQL:
+          `host=localhost port=5432 dbname=documize user=admin password=secret sslmode=disable`
+        - MSSQL:
+          `sqlserver://username:password@localhost:1433?database=Documize` or
+          `sqlserver://sa@localhost/SQLExpress?database=Documize`
+      '';
+    };
+
+    location = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        reserved
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.documize-server = {
+      description = "Documize Wiki";
+      documentation = [ "https://documize.com/" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = concatStringsSep " " [
+          "${cfg.package}/bin/documize"
+          (mkParams false [ "db" "dbtype" "port" ])
+          (mkParams true [ "offline" "location" "forcesslport" "key" "cert" "salt" ])
+        ];
+        Restart = "always";
+        DynamicUser = "yes";
+        StateDirectory = cfg.stateDirectoryName;
+        WorkingDirectory = "/var/lib/${cfg.stateDirectoryName}";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix b/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix
new file mode 100644
index 000000000000..9e9bfb1bfd83
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/dokuwiki.nix
@@ -0,0 +1,602 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  inherit (lib.options) showOption showFiles;
+
+  cfg = config.services.dokuwiki;
+  eachSite = cfg.sites;
+  user = "dokuwiki";
+  webserver = config.services.${cfg.webserver};
+
+  mkPhpIni = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {} " = ";
+  };
+  mkPhpPackage = cfg: cfg.phpPackage.buildEnv {
+    extraConfig = mkPhpIni cfg.phpOptions;
+  };
+
+  dokuwikiAclAuthConfig = hostName: cfg: let
+    inherit (cfg) acl;
+    acl_gen = concatMapStringsSep "\n" (l: "${l.page} \t ${l.actor} \t ${toString l.level}");
+  in pkgs.writeText "acl.auth-${hostName}.php" ''
+    # acl.auth.php
+    # <?php exit()?>
+    #
+    # Access Control Lists
+    #
+    ${if isString acl then acl else acl_gen acl}
+  '';
+
+  mergeConfig = cfg: {
+    useacl = false; # Dokuwiki default
+    savedir = cfg.stateDir;
+  } // cfg.settings;
+
+  writePhpFile = name: text: pkgs.writeTextFile {
+    inherit name;
+    text = "<?php\n${text}";
+    checkPhase = "${pkgs.php81}/bin/php --syntax-check $target";
+  };
+
+  mkPhpValue = v: let
+    isHasAttr = s: isAttrs v && hasAttr s v;
+  in
+    if isString v then escapeShellArg v
+    # NOTE: If any value contains a , (comma) this will not get escaped
+    else if isList v && any lib.strings.isCoercibleToString v then escapeShellArg (concatMapStringsSep "," toString v)
+    else if isInt v then toString v
+    else if isBool v then toString (if v then 1 else 0)
+    else if isHasAttr "_file" then "trim(file_get_contents(${lib.escapeShellArg v._file}))"
+    else if isHasAttr "_raw" then v._raw
+    else abort "The dokuwiki localConf value ${lib.generators.toPretty {} v} can not be encoded."
+  ;
+
+  mkPhpAttrVals = v: flatten (mapAttrsToList mkPhpKeyVal v);
+  mkPhpKeyVal = k: v: let
+    values = if (isAttrs v && (hasAttr "_file" v || hasAttr "_raw" v )) || !isAttrs v then
+      [" = ${mkPhpValue v};"]
+    else
+      mkPhpAttrVals v;
+  in map (e: "[${escapeShellArg k}]${e}") (flatten values);
+
+  dokuwikiLocalConfig = hostName: cfg: let
+    conf_gen = c: map (v: "$conf${v}") (mkPhpAttrVals c);
+  in writePhpFile "local-${hostName}.php" ''
+    ${concatStringsSep "\n" (conf_gen cfg.mergedConfig)}
+  '';
+
+  dokuwikiPluginsLocalConfig = hostName: cfg: let
+    pc = cfg.pluginsConfig;
+    pc_gen = pc: concatStringsSep "\n" (mapAttrsToList (n: v: "$plugins['${n}'] = ${boolToString v};") pc);
+  in writePhpFile "plugins.local-${hostName}.php" ''
+    ${if isString pc then pc else pc_gen pc}
+  '';
+
+
+  pkg = hostName: cfg: cfg.package.combine {
+    inherit (cfg) plugins templates;
+
+    pname = p: "${p.pname}-${hostName}";
+
+    basePackage = cfg.package;
+    localConfig = dokuwikiLocalConfig hostName cfg;
+    pluginsConfig = dokuwikiPluginsLocalConfig hostName cfg;
+    aclConfig = if cfg.settings.useacl && cfg.acl != null then dokuwikiAclAuthConfig hostName cfg else null;
+  };
+
+  aclOpts = { ... }: {
+    options = {
+
+      page = mkOption {
+        type = types.str;
+        description = lib.mdDoc "Page or namespace to restrict";
+        example = "start";
+      };
+
+      actor = mkOption {
+        type = types.str;
+        description = lib.mdDoc "User or group to restrict";
+        example = "@external";
+      };
+
+      level = let
+        available = {
+          "none" = 0;
+          "read" = 1;
+          "edit" = 2;
+          "create" = 4;
+          "upload" = 8;
+          "delete" = 16;
+        };
+      in mkOption {
+        type = types.enum ((attrValues available) ++ (attrNames available));
+        apply = x: if isInt x then x else available.${x};
+        description = lib.mdDoc ''
+          Permission level to restrict the actor(s) to.
+          See <https://www.dokuwiki.org/acl#background_info> for explanation
+        '';
+        example = "read";
+      };
+    };
+  };
+
+  # The current implementations of `doRename`,  `mkRenamedOptionModule` do not provide the full options path when used with submodules.
+  # They would only show `settings.useacl' instead of `services.dokuwiki.sites."site1.local".settings.useacl'
+  # The partial re-implementation of these functions is done to help users in debugging by showing the full path.
+  mkRenamed = from: to: { config, options, name, ... }: let
+    pathPrefix = [ "services" "dokuwiki" "sites" name ];
+    fromPath = pathPrefix  ++ from;
+    fromOpt = getAttrFromPath from options;
+    toOp = getAttrsFromPath to config;
+    toPath = pathPrefix ++ to;
+  in {
+    options = setAttrByPath from (mkOption {
+      visible = false;
+      description = lib.mdDoc "Alias of {option}${showOption toPath}";
+      apply = x: builtins.trace "Obsolete option `${showOption fromPath}' is used. It was renamed to ${showOption toPath}" toOp;
+    });
+    config = mkMerge [
+      {
+        warnings = optional fromOpt.isDefined
+          "The option `${showOption fromPath}' defined in ${showFiles fromOpt.files} has been renamed to `${showOption toPath}'.";
+      }
+      (lib.modules.mkAliasAndWrapDefsWithPriority (setAttrByPath to) fromOpt)
+    ];
+  };
+
+  siteOpts = { options, config, lib, name, ... }:
+    {
+      imports = [
+        (mkRenamed [ "aclUse" ] [ "settings" "useacl" ])
+        (mkRenamed [ "superUser" ] [ "settings" "superuser" ])
+        (mkRenamed [ "disableActions" ] [ "settings"  "disableactions" ])
+        ({ config, options, ... }: let
+          showPath = suffix: lib.options.showOption ([ "services" "dokuwiki" "sites" name ] ++ suffix);
+          replaceExtraConfig = "Please use `${showPath ["settings"]}' to pass structured settings instead.";
+          ecOpt = options.extraConfig;
+          ecPath = showPath [ "extraConfig" ];
+        in {
+          options.extraConfig = mkOption {
+            visible = false;
+            apply = x: throw "The option ${ecPath} can no longer be used since it's been removed.\n${replaceExtraConfig}";
+          };
+          config.assertions = [
+            {
+              assertion = !ecOpt.isDefined;
+              message = "The option definition `${ecPath}' in ${showFiles ecOpt.files} no longer has any effect; please remove it.\n${replaceExtraConfig}";
+            }
+            {
+              assertion = config.mergedConfig.useacl -> (config.acl != null || config.aclFile != null);
+              message = "Either ${showPath [ "acl" ]} or ${showPath [ "aclFile" ]} is mandatory if ${showPath [ "settings" "useacl" ]} is true";
+            }
+            {
+              assertion = config.usersFile != null -> config.mergedConfig.useacl != false;
+              message = "${showPath [ "settings" "useacl" ]} is required when ${showPath [ "usersFile" ]} is set (Currently defined as `${config.usersFile}' in ${showFiles options.usersFile.files}).";
+            }
+          ];
+        })
+      ];
+
+      options = {
+        enable = mkEnableOption (lib.mdDoc "DokuWiki web application");
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.dokuwiki;
+          defaultText = literalExpression "pkgs.dokuwiki";
+          description = lib.mdDoc "Which DokuWiki package to use.";
+        };
+
+        stateDir = mkOption {
+          type = types.path;
+          default = "/var/lib/dokuwiki/${name}/data";
+          description = lib.mdDoc "Location of the DokuWiki state directory.";
+        };
+
+        acl = mkOption {
+          type = with types; nullOr (listOf (submodule aclOpts));
+          default = null;
+          example = literalExpression ''
+            [
+              {
+                page = "start";
+                actor = "@external";
+                level = "read";
+              }
+              {
+                page = "*";
+                actor = "@users";
+                level = "upload";
+              }
+            ]
+          '';
+          description = lib.mdDoc ''
+            Access Control Lists: see <https://www.dokuwiki.org/acl>
+            Mutually exclusive with services.dokuwiki.aclFile
+            Set this to a value other than null to take precedence over aclFile option.
+
+            Warning: Consider using aclFile instead if you do not
+            want to store the ACL in the world-readable Nix store.
+          '';
+        };
+
+        aclFile = mkOption {
+          type = with types; nullOr str;
+          default = if (config.mergedConfig.useacl && config.acl == null) then "/var/lib/dokuwiki/${name}/acl.auth.php" else null;
+          description = lib.mdDoc ''
+            Location of the dokuwiki acl rules. Mutually exclusive with services.dokuwiki.acl
+            Mutually exclusive with services.dokuwiki.acl which is preferred.
+            Consult documentation <https://www.dokuwiki.org/acl> for further instructions.
+            Example: <https://github.com/splitbrain/dokuwiki/blob/master/conf/acl.auth.php.dist>
+          '';
+          example = "/var/lib/dokuwiki/${name}/acl.auth.php";
+        };
+
+        pluginsConfig = mkOption {
+          type = with types; attrsOf bool;
+          default = {
+            authad = false;
+            authldap = false;
+            authmysql = false;
+            authpgsql = false;
+          };
+          description = lib.mdDoc ''
+            List of the dokuwiki (un)loaded plugins.
+          '';
+        };
+
+        usersFile = mkOption {
+          type = with types; nullOr str;
+          default = if config.mergedConfig.useacl then "/var/lib/dokuwiki/${name}/users.auth.php" else null;
+          description = lib.mdDoc ''
+            Location of the dokuwiki users file. List of users. Format:
+
+                login:passwordhash:Real Name:email:groups,comma,separated
+
+            Create passwordHash easily by using:
+
+                mkpasswd -5 password `pwgen 8 1`
+
+            Example: <https://github.com/splitbrain/dokuwiki/blob/master/conf/users.auth.php.dist>
+            '';
+          example = "/var/lib/dokuwiki/${name}/users.auth.php";
+        };
+
+        plugins = mkOption {
+          type = types.listOf types.path;
+          default = [];
+          description = lib.mdDoc ''
+                List of path(s) to respective plugin(s) which are copied from the 'plugin' directory.
+
+                ::: {.note}
+                These plugins need to be packaged before use, see example.
+                :::
+          '';
+          example = literalExpression ''
+                let
+                  plugin-icalevents = pkgs.stdenv.mkDerivation rec {
+                    name = "icalevents";
+                    version = "2017-06-16";
+                    src = pkgs.fetchzip {
+                      stripRoot = false;
+                      url = "https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/''${version}/dokuwiki-plugin-icalevents-''${version}.zip";
+                      hash = "sha256-IPs4+qgEfe8AAWevbcCM9PnyI0uoyamtWeg4rEb+9Wc=";
+                    };
+                    installPhase = "mkdir -p $out; cp -R * $out/";
+                  };
+                # And then pass this theme to the plugin list like this:
+                in [ plugin-icalevents ]
+          '';
+        };
+
+        templates = mkOption {
+          type = types.listOf types.path;
+          default = [];
+          description = lib.mdDoc ''
+                List of path(s) to respective template(s) which are copied from the 'tpl' directory.
+
+                ::: {.note}
+                These templates need to be packaged before use, see example.
+                :::
+          '';
+          example = literalExpression ''
+                let
+                  template-bootstrap3 = pkgs.stdenv.mkDerivation rec {
+                  name = "bootstrap3";
+                  version = "2022-07-27";
+                  src = pkgs.fetchFromGitHub {
+                    owner = "giterlizzi";
+                    repo = "dokuwiki-template-bootstrap3";
+                    rev = "v''${version}";
+                    hash = "sha256-B3Yd4lxdwqfCnfmZdp+i/Mzwn/aEuZ0ovagDxuR6lxo=";
+                  };
+                  installPhase = "mkdir -p $out; cp -R * $out/";
+                };
+                # And then pass this theme to the template list like this:
+                in [ template-bootstrap3 ]
+          '';
+        };
+
+        poolConfig = mkOption {
+          type = with types; attrsOf (oneOf [ str int bool ]);
+          default = {
+            "pm" = "dynamic";
+            "pm.max_children" = 32;
+            "pm.start_servers" = 2;
+            "pm.min_spare_servers" = 2;
+            "pm.max_spare_servers" = 4;
+            "pm.max_requests" = 500;
+          };
+          description = lib.mdDoc ''
+            Options for the DokuWiki PHP pool. See the documentation on `php-fpm.conf`
+            for details on configuration directives.
+          '';
+        };
+
+        phpPackage = mkOption {
+          type = types.package;
+          relatedPackages = [ "php81" "php82" ];
+          default = pkgs.php81;
+          defaultText = "pkgs.php81";
+          description = lib.mdDoc ''
+            PHP package to use for this dokuwiki site.
+          '';
+        };
+
+        phpOptions = mkOption {
+          type = types.attrsOf types.str;
+          default = {};
+          description = lib.mdDoc ''
+            Options for PHP's php.ini file for this dokuwiki site.
+          '';
+          example = literalExpression ''
+          {
+            "opcache.interned_strings_buffer" = "8";
+            "opcache.max_accelerated_files" = "10000";
+            "opcache.memory_consumption" = "128";
+            "opcache.revalidate_freq" = "15";
+            "opcache.fast_shutdown" = "1";
+          }
+          '';
+        };
+
+        settings = mkOption {
+          type = types.attrsOf types.anything;
+          default = {
+            useacl = true;
+            superuser = "admin";
+          };
+          description = lib.mdDoc ''
+            Structural DokuWiki configuration.
+            Refer to <https://www.dokuwiki.org/config>
+            for details and supported values.
+            Settings can either be directly set from nix,
+            loaded from a file using `._file` or obtained from any
+            PHP function calls using `._raw`.
+          '';
+          example = literalExpression ''
+            {
+              title = "My Wiki";
+              userewrite = 1;
+              disableactions = [ "register" ]; # Will be concatenated with commas
+              plugin.smtp = {
+                smtp_pass._file = "/var/run/secrets/dokuwiki/smtp_pass";
+                smtp_user._raw = "getenv('DOKUWIKI_SMTP_USER')";
+              };
+            }
+          '';
+        };
+
+        mergedConfig = mkOption {
+          readOnly = true;
+          default = mergeConfig config;
+          defaultText = literalExpression ''
+            {
+              useacl = true;
+            }
+          '';
+          description = lib.mdDoc ''
+            Read only representation of the final configuration.
+          '';
+        };
+
+      # Required for the mkRenamedOptionModule
+      # TODO: Remove me once https://github.com/NixOS/nixpkgs/issues/96006 is fixed
+      # or we don't have any more notes about the removal of extraConfig, ...
+      warnings = mkOption {
+        type = types.listOf types.unspecified;
+        default = [ ];
+        visible = false;
+        internal = true;
+      };
+      assertions = mkOption {
+        type = types.listOf types.unspecified;
+        default = [ ];
+        visible = false;
+        internal = true;
+      };
+    };
+  };
+in
+{
+  options = {
+    services.dokuwiki = {
+
+      sites = mkOption {
+        type = types.attrsOf (types.submodule siteOpts);
+        default = {};
+        description = lib.mdDoc "Specification of one or more DokuWiki sites to serve";
+      };
+
+      webserver = mkOption {
+        type = types.enum [ "nginx" "caddy" ];
+        default = "nginx";
+        description = lib.mdDoc ''
+          Whether to use nginx or caddy for virtual host management.
+
+          Further nginx configuration can be done by adapting `services.nginx.virtualHosts.<name>`.
+          See [](#opt-services.nginx.virtualHosts) for further information.
+
+          Further caddy configuration can be done by adapting `services.caddy.virtualHosts.<name>`.
+          See [](#opt-services.caddy.virtualHosts) for further information.
+        '';
+      };
+
+    };
+  };
+
+  # implementation
+  config = mkIf (eachSite != {}) (mkMerge [{
+
+    warnings = flatten (mapAttrsToList (_: cfg: cfg.warnings) eachSite);
+
+    assertions = flatten (mapAttrsToList (_: cfg: cfg.assertions) eachSite);
+
+    services.phpfpm.pools = mapAttrs' (hostName: cfg: (
+      nameValuePair "dokuwiki-${hostName}" {
+        inherit user;
+        group = webserver.group;
+
+        phpPackage = mkPhpPackage cfg;
+        phpEnv = optionalAttrs (cfg.usersFile != null) {
+          DOKUWIKI_USERS_AUTH_CONFIG = "${cfg.usersFile}";
+        } // optionalAttrs (cfg.mergedConfig.useacl) {
+          DOKUWIKI_ACL_AUTH_CONFIG = if (cfg.acl != null) then "${dokuwikiAclAuthConfig hostName cfg}" else "${toString cfg.aclFile}";
+        };
+
+        settings = {
+          "listen.owner" = webserver.user;
+          "listen.group" = webserver.group;
+        } // cfg.poolConfig;
+      }
+    )) eachSite;
+
+  }
+
+  {
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (hostName: cfg: [
+      "d ${cfg.stateDir}/attic 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/cache 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/index 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/locks 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/log 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/media 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/media_attic 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/media_meta 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/meta 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/pages 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/tmp 0750 ${user} ${webserver.group} - -"
+    ] ++ lib.optional (cfg.aclFile != null) "C ${cfg.aclFile} 0640 ${user} ${webserver.group} - ${pkg hostName cfg}/share/dokuwiki/conf/acl.auth.php.dist"
+    ++ lib.optional (cfg.usersFile != null) "C ${cfg.usersFile} 0640 ${user} ${webserver.group} - ${pkg hostName cfg}/share/dokuwiki/conf/users.auth.php.dist"
+    ) eachSite);
+
+    users.users.${user} = {
+      group = webserver.group;
+      isSystemUser = true;
+    };
+  }
+
+  (mkIf (cfg.webserver == "nginx") {
+    services.nginx = {
+      enable = true;
+      virtualHosts = mapAttrs (hostName: cfg: {
+        serverName = mkDefault hostName;
+        root = "${pkg hostName cfg}/share/dokuwiki";
+
+        locations = {
+          "~ /(conf/|bin/|inc/|install.php)" = {
+            extraConfig = "deny all;";
+          };
+
+          "~ ^/data/" = {
+            root = "${cfg.stateDir}";
+            extraConfig = "internal;";
+          };
+
+          "~ ^/lib.*\.(js|css|gif|png|ico|jpg|jpeg)$" = {
+            extraConfig = "expires 365d;";
+          };
+
+          "/" = {
+            priority = 1;
+            index = "doku.php";
+            extraConfig = ''try_files $uri $uri/ @dokuwiki;'';
+          };
+
+          "@dokuwiki" = {
+            extraConfig = ''
+              # rewrites "doku.php/" out of the URLs if you set the userwrite setting to .htaccess in dokuwiki config page
+              rewrite ^/_media/(.*) /lib/exe/fetch.php?media=$1 last;
+              rewrite ^/_detail/(.*) /lib/exe/detail.php?media=$1 last;
+              rewrite ^/_export/([^/]+)/(.*) /doku.php?do=export_$1&id=$2 last;
+              rewrite ^/(.*) /doku.php?id=$1&$args last;
+            '';
+          };
+
+          "~ \\.php$" = {
+            extraConfig = ''
+              try_files $uri $uri/ /doku.php;
+              include ${config.services.nginx.package}/conf/fastcgi_params;
+              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              fastcgi_param REDIRECT_STATUS 200;
+              fastcgi_pass unix:${config.services.phpfpm.pools."dokuwiki-${hostName}".socket};
+              '';
+          };
+
+        };
+      }) eachSite;
+    };
+  })
+
+  (mkIf (cfg.webserver == "caddy") {
+    services.caddy = {
+      enable = true;
+      virtualHosts = mapAttrs' (hostName: cfg: (
+        nameValuePair "http://${hostName}" {
+          extraConfig = ''
+            root * ${pkg hostName cfg}/share/dokuwiki
+            file_server
+
+            encode zstd gzip
+            php_fastcgi unix/${config.services.phpfpm.pools."dokuwiki-${hostName}".socket}
+
+            @restrict_files {
+              path /data/* /conf/* /bin/* /inc/* /vendor/* /install.php
+            }
+
+            respond @restrict_files 404
+
+            @allow_media {
+              path_regexp path ^/_media/(.*)$
+            }
+            rewrite @allow_media /lib/exe/fetch.php?media=/{http.regexp.path.1}
+
+            @allow_detail   {
+              path /_detail*
+            }
+            rewrite @allow_detail /lib/exe/detail.php?media={path}
+
+            @allow_export   {
+              path /_export*
+              path_regexp export /([^/]+)/(.*)
+            }
+            rewrite @allow_export /doku.php?do=export_{http.regexp.export.1}&id={http.regexp.export.2}
+
+            try_files {path} {path}/ /doku.php?id={path}&{query}
+          '';
+        }
+      )) eachSite;
+    };
+  })
+
+  ]);
+
+  meta.maintainers = with maintainers; [
+    _1000101
+    onny
+    dandellion
+    e1mo
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/dolibarr.nix b/nixpkgs/nixos/modules/services/web-apps/dolibarr.nix
new file mode 100644
index 000000000000..453229c130c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/dolibarr.nix
@@ -0,0 +1,323 @@
+{ config, pkgs, lib, ... }:
+let
+  inherit (lib) any boolToString concatStringsSep isBool isString mapAttrsToList mkDefault mkEnableOption mkIf mkMerge mkOption optionalAttrs types;
+
+  package = pkgs.dolibarr.override { inherit (cfg) stateDir; };
+
+  cfg = config.services.dolibarr;
+  vhostCfg = lib.optionalAttrs (cfg.nginx != null) config.services.nginx.virtualHosts."${cfg.domain}";
+
+  mkConfigFile = filename: settings:
+    let
+      # hack in special logic for secrets so we read them from a separate file avoiding the nix store
+      secretKeys = [ "force_install_databasepass" "dolibarr_main_db_pass" "dolibarr_main_instance_unique_id" ];
+
+      toStr = k: v:
+        if (any (str: k == str) secretKeys) then v
+        else if isString v then "'${v}'"
+        else if isBool v then boolToString v
+        else if v == null then "null"
+        else toString v
+      ;
+    in
+      pkgs.writeText filename ''
+        <?php
+        ${concatStringsSep "\n" (mapAttrsToList (k: v: "\$${k} = ${toStr k v};") settings)}
+      '';
+
+  # see https://github.com/Dolibarr/dolibarr/blob/develop/htdocs/install/install.forced.sample.php for all possible values
+  install = {
+    force_install_noedit = 2;
+    force_install_main_data_root = "${cfg.stateDir}/documents";
+    force_install_nophpinfo = true;
+    force_install_lockinstall = "444";
+    force_install_distrib = "nixos";
+    force_install_type = "mysqli";
+    force_install_dbserver = cfg.database.host;
+    force_install_port = toString cfg.database.port;
+    force_install_database = cfg.database.name;
+    force_install_databaselogin = cfg.database.user;
+
+    force_install_mainforcehttps = vhostCfg.forceSSL or false;
+    force_install_createuser = false;
+    force_install_dolibarrlogin = null;
+  } // optionalAttrs (cfg.database.passwordFile != null) {
+    force_install_databasepass = ''file_get_contents("${cfg.database.passwordFile}")'';
+  };
+in
+{
+  # interface
+  options.services.dolibarr = {
+    enable = mkEnableOption (lib.mdDoc "dolibarr");
+
+    domain = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc ''
+        Domain name of your server.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "dolibarr";
+      description = lib.mdDoc ''
+        User account under which dolibarr runs.
+
+        ::: {.note}
+        If left as the default value this user will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the user exists before the dolibarr application starts.
+        :::
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "dolibarr";
+      description = lib.mdDoc ''
+        Group account under which dolibarr runs.
+
+        ::: {.note}
+        If left as the default value this group will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the group exists before the dolibarr application starts.
+        :::
+      '';
+    };
+
+    stateDir = mkOption {
+      type = types.str;
+      default = "/var/lib/dolibarr";
+      description = lib.mdDoc ''
+        State and configuration directory dolibarr will use.
+      '';
+    };
+
+    database = {
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = lib.mdDoc "Database host port.";
+      };
+      name = mkOption {
+        type = types.str;
+        default = "dolibarr";
+        description = lib.mdDoc "Database name.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = "dolibarr";
+        description = lib.mdDoc "Database username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/dolibarr-dbpassword";
+        description = lib.mdDoc "Database password file.";
+      };
+      createLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+    };
+
+    settings = mkOption {
+      type = with types; (attrsOf (oneOf [ bool int str ]));
+      default = { };
+      description = lib.mdDoc "Dolibarr settings, see <https://github.com/Dolibarr/dolibarr/blob/develop/htdocs/conf/conf.php.example> for details.";
+    };
+
+    nginx = mkOption {
+      type = types.nullOr (types.submodule (
+        lib.recursiveUpdate
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
+          {
+            # enable encryption by default,
+            # as sensitive login and Dolibarr (ERP) data should not be transmitted in clear text.
+            options.forceSSL.default = true;
+            options.enableACME.default = true;
+          }
+      ));
+      default = null;
+      example = lib.literalExpression ''
+        {
+          serverAliases = [
+            "dolibarr.''${config.networking.domain}"
+            "erp.''${config.networking.domain}"
+          ];
+          enableACME = false;
+        }
+      '';
+      description = lib.mdDoc ''
+          With this option, you can customize an nginx virtual host which already has sensible defaults for Dolibarr.
+          Set to {} if you do not need any customization to the virtual host.
+          If enabled, then by default, the {option}`serverName` is
+          `''${domain}`,
+          SSL is active, and certificates are acquired via ACME.
+          If this is set to null (the default), no nginx virtualHost will be configured.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the Dolibarr PHP pool. See the documentation on [`php-fpm.conf`](https://www.php.net/manual/en/install.fpm.configuration.php)
+        for details on configuration directives.
+      '';
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable (mkMerge [
+    {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.user == cfg.user;
+        message = "services.dolibarr.database.user must match services.dolibarr.user if the database is to be automatically provisioned";
+      }
+    ];
+
+    services.dolibarr.settings = {
+      dolibarr_main_url_root = "https://${cfg.domain}";
+      dolibarr_main_document_root = "${package}/htdocs";
+      dolibarr_main_url_root_alt = "/custom";
+      dolibarr_main_data_root = "${cfg.stateDir}/documents";
+
+      dolibarr_main_db_host = cfg.database.host;
+      dolibarr_main_db_port = toString cfg.database.port;
+      dolibarr_main_db_name = cfg.database.name;
+      dolibarr_main_db_prefix = "llx_";
+      dolibarr_main_db_user = cfg.database.user;
+      dolibarr_main_db_pass = mkIf (cfg.database.passwordFile != null) ''
+        file_get_contents("${cfg.database.passwordFile}")
+      '';
+      dolibarr_main_db_type = "mysqli";
+      dolibarr_main_db_character_set = mkDefault "utf8";
+      dolibarr_main_db_collation = mkDefault "utf8_unicode_ci";
+
+      # Authentication settings
+      dolibarr_main_authentication = mkDefault "dolibarr";
+
+      # Security settings
+      dolibarr_main_prod = true;
+      dolibarr_main_force_https = vhostCfg.forceSSL or false;
+      dolibarr_main_restrict_os_commands = "${pkgs.mariadb}/bin/mysqldump, ${pkgs.mariadb}/bin/mysql";
+      dolibarr_nocsrfcheck = false;
+      dolibarr_main_instance_unique_id = ''
+        file_get_contents("${cfg.stateDir}/dolibarr_main_instance_unique_id")
+      '';
+      dolibarr_mailing_limit_sendbyweb = false;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group}"
+      "d '${cfg.stateDir}/documents' 0750 ${cfg.user} ${cfg.group}"
+      "f '${cfg.stateDir}/conf.php' 0660 ${cfg.user} ${cfg.group}"
+      "L '${cfg.stateDir}/install.forced.php' - ${cfg.user} ${cfg.group} - ${mkConfigFile "install.forced.php" install}"
+    ];
+
+    services.mysql = mkIf cfg.database.createLocally {
+      enable = mkDefault true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.nginx.enable = mkIf (cfg.nginx != null) true;
+    services.nginx.virtualHosts."${cfg.domain}" = mkIf (cfg.nginx != null) (lib.mkMerge [
+      cfg.nginx
+      ({
+        root = lib.mkForce "${package}/htdocs";
+        locations."/".index = "index.php";
+        locations."~ [^/]\\.php(/|$)" = {
+          extraConfig = ''
+            fastcgi_split_path_info ^(.+?\.php)(/.*)$;
+            fastcgi_pass unix:${config.services.phpfpm.pools.dolibarr.socket};
+          '';
+        };
+      })
+    ]);
+
+    systemd.services."phpfpm-dolibarr".after = mkIf cfg.database.createLocally [ "mysql.service" ];
+    services.phpfpm.pools.dolibarr = {
+      inherit (cfg) user group;
+      phpPackage = pkgs.php.buildEnv {
+        extensions = { enabled, all }: enabled ++ [ all.calendar ];
+        # recommended by dolibarr web application
+        extraConfig = ''
+          session.use_strict_mode = 1
+          session.cookie_samesite = "Lax"
+          ; open_basedir = "${package}/htdocs, ${cfg.stateDir}"
+          allow_url_fopen = 0
+          disable_functions = "pcntl_alarm, pcntl_fork, pcntl_waitpid, pcntl_wait, pcntl_wifexited, pcntl_wifstopped, pcntl_wifsignaled, pcntl_wifcontinued, pcntl_wexitstatus, pcntl_wtermsig, pcntl_wstopsig, pcntl_signal, pcntl_signal_get_handler, pcntl_signal_dispatch, pcntl_get_last_error, pcntl_strerror, pcntl_sigprocmask, pcntl_sigwaitinfo, pcntl_sigtimedwait, pcntl_exec, pcntl_getpriority, pcntl_setpriority, pcntl_async_signals"
+        '';
+      };
+
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = cfg.user;
+        "listen.group" = cfg.group;
+      } // cfg.poolConfig;
+    };
+
+    # there are several challenges with dolibarr and NixOS which we can address here
+    # - the dolibarr installer cannot be entirely automated, though it can partially be by including a file called install.forced.php
+    # - the dolibarr installer requires write access to its config file during installation, though not afterwards
+    # - the dolibarr config file generally holds secrets generated by the installer, though the config file is a php file so we can read and write these secrets from an external file
+    systemd.services.dolibarr-config = {
+      description = "dolibarr configuration file management via NixOS";
+      wantedBy = [ "multi-user.target" ];
+
+      script = ''
+        # extract the 'main instance unique id' secret that the dolibarr installer generated for us, store it in a file for use by our own NixOS generated configuration file
+        ${pkgs.php}/bin/php -r "include '${cfg.stateDir}/conf.php'; file_put_contents('${cfg.stateDir}/dolibarr_main_instance_unique_id', \$dolibarr_main_instance_unique_id);"
+
+        # replace configuration file generated by installer with the NixOS generated configuration file
+        install -m 644 ${mkConfigFile "conf.php" cfg.settings} '${cfg.stateDir}/conf.php'
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        Group = cfg.group;
+        RemainAfterExit = "yes";
+      };
+
+      unitConfig = {
+        ConditionFileNotEmpty = "${cfg.stateDir}/conf.php";
+      };
+    };
+
+    users.users.dolibarr = mkIf (cfg.user == "dolibarr" ) {
+      isSystemUser = true;
+      group = cfg.group;
+    };
+
+    users.groups = optionalAttrs (cfg.group == "dolibarr") {
+      dolibarr = { };
+    };
+  }
+  (mkIf (cfg.nginx != null) {
+    users.users."${config.services.nginx.group}".extraGroups = mkIf (cfg.nginx != null) [ cfg.group ];
+  })
+]);
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/engelsystem.nix b/nixpkgs/nixos/modules/services/web-apps/engelsystem.nix
new file mode 100644
index 000000000000..138e2f3f1b90
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/engelsystem.nix
@@ -0,0 +1,187 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  inherit (lib) mkDefault mkEnableOption mkIf mkOption types literalExpression;
+  cfg = config.services.engelsystem;
+in {
+  options = {
+    services.engelsystem = {
+      enable = mkOption {
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Whether to enable engelsystem, an online tool for coordinating volunteers
+          and shifts on large events.
+        '';
+        type = lib.types.bool;
+      };
+
+      domain = mkOption {
+        type = types.str;
+        example = "engelsystem.example.com";
+        description = lib.mdDoc "Domain to serve on.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        description = lib.mdDoc "Engelsystem package used for the service.";
+        default = pkgs.engelsystem;
+        defaultText = literalExpression "pkgs.engelsystem";
+      };
+
+      createDatabase = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to create a local database automatically.
+          This will override every database setting in {option}`services.engelsystem.config`.
+        '';
+      };
+    };
+
+    services.engelsystem.config = mkOption {
+      type = types.attrs;
+      default = {
+        database = {
+          host = "localhost";
+          database = "engelsystem";
+          username = "engelsystem";
+        };
+      };
+      example = {
+        maintenance = false;
+        database = {
+          host = "database.example.com";
+          database = "engelsystem";
+          username = "engelsystem";
+          password._secret = "/var/keys/engelsystem/database";
+        };
+        email = {
+          driver = "smtp";
+          host = "smtp.example.com";
+          port = 587;
+          from.address = "engelsystem@example.com";
+          from.name = "example engelsystem";
+          encryption = "tls";
+          username = "engelsystem@example.com";
+          password._secret = "/var/keys/engelsystem/mail";
+        };
+        autoarrive = true;
+        min_password_length = 6;
+        default_locale = "de_DE";
+      };
+      description = lib.mdDoc ''
+        Options to be added to config.php, as a nix attribute set. Options containing secret data
+        should be set to an attribute set containing the attribute _secret - a string pointing to a
+        file containing the value the option should be set to. See the example to get a better
+        picture of this: in the resulting config.php file, the email.password key will be set to
+        the contents of the /var/keys/engelsystem/mail file.
+
+        See https://engelsystem.de/doc/admin/configuration/ for available options.
+
+        Note that the admin user login credentials cannot be set here - they always default to
+        admin:asdfasdf. Log in and change them immediately.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # create database
+    services.mysql = mkIf cfg.createDatabase {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureUsers = [{
+        name = "engelsystem";
+        ensurePermissions = { "engelsystem.*" = "ALL PRIVILEGES"; };
+      }];
+      ensureDatabases = [ "engelsystem" ];
+    };
+
+    environment.etc."engelsystem/config.php".source =
+      pkgs.writeText "config.php" ''
+        <?php
+        return json_decode(file_get_contents("/var/lib/engelsystem/config.json"), true);
+      '';
+
+    services.phpfpm.pools.engelsystem = {
+      phpPackage = pkgs.php81;
+      user = "engelsystem";
+      settings = {
+        "listen.owner" = config.services.nginx.user;
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.max_requests" = 500;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 5;
+        "php_admin_value[error_log]" = "stderr";
+        "php_admin_flag[log_errors]" = true;
+        "catch_workers_output" = true;
+      };
+    };
+
+    services.nginx = {
+      enable = true;
+      virtualHosts."${cfg.domain}".locations = {
+        "/" = {
+          root = "${cfg.package}/share/engelsystem/public";
+          extraConfig = ''
+            index index.php;
+            try_files $uri $uri/ /index.php?$args;
+            autoindex off;
+          '';
+        };
+        "~ \\.php$" = {
+          root = "${cfg.package}/share/engelsystem/public";
+          extraConfig = ''
+            fastcgi_pass unix:${config.services.phpfpm.pools.engelsystem.socket};
+            fastcgi_index index.php;
+            fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+            include ${config.services.nginx.package}/conf/fastcgi_params;
+            include ${config.services.nginx.package}/conf/fastcgi.conf;
+          '';
+        };
+      };
+    };
+
+    systemd.services."engelsystem-init" = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = { Type = "oneshot"; };
+      script =
+        let
+          genConfigScript = pkgs.writeScript "engelsystem-gen-config.sh"
+            (utils.genJqSecretsReplacementSnippet cfg.config "config.json");
+        in ''
+          umask 077
+          mkdir -p /var/lib/engelsystem/storage/app
+          mkdir -p /var/lib/engelsystem/storage/cache/views
+          cd /var/lib/engelsystem
+          ${genConfigScript}
+          chmod 400 config.json
+          chown -R engelsystem .
+      '';
+    };
+    systemd.services."engelsystem-migrate" = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        User = "engelsystem";
+        Group = "engelsystem";
+      };
+      script = ''
+        ${cfg.package}/bin/migrate
+      '';
+      after = [ "engelsystem-init.service" "mysql.service" ];
+    };
+    systemd.services."phpfpm-engelsystem".after =
+      [ "engelsystem-migrate.service" ];
+
+    users.users.engelsystem = {
+      isSystemUser = true;
+      createHome = true;
+      home = "/var/lib/engelsystem/storage";
+      group = "engelsystem";
+    };
+    users.groups.engelsystem = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/ethercalc.nix b/nixpkgs/nixos/modules/services/web-apps/ethercalc.nix
new file mode 100644
index 000000000000..a5be86a34aa6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/ethercalc.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ethercalc;
+in {
+  options = {
+    services.ethercalc = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          ethercalc, an online collaborative spreadsheet server.
+
+          Persistent state will be maintained under
+          {file}`/var/lib/ethercalc`. Upstream supports using a
+          redis server for storage and recommends the redis backend for
+          intensive use; however, the Nix module doesn't currently support
+          redis.
+
+          Note that while ethercalc is a good and robust project with an active
+          issue tracker, there haven't been new commits since the end of 2020.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.ethercalc;
+        defaultText = literalExpression "pkgs.ethercalc";
+        type = types.package;
+        description = lib.mdDoc "Ethercalc package to use.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "Address to listen on (use 0.0.0.0 to allow access from any address).";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8000;
+        description = lib.mdDoc "Port to bind to.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.ethercalc = {
+      description = "Ethercalc service";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+      serviceConfig = {
+        DynamicUser    =   true;
+        ExecStart        = "${cfg.package}/bin/ethercalc --host ${cfg.host} --port ${toString cfg.port}";
+        Restart          = "always";
+        StateDirectory   = "ethercalc";
+        WorkingDirectory = "/var/lib/ethercalc";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/fluidd.nix b/nixpkgs/nixos/modules/services/web-apps/fluidd.nix
new file mode 100644
index 000000000000..d4b86b9dfb39
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/fluidd.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.fluidd;
+  moonraker = config.services.moonraker;
+in
+{
+  options.services.fluidd = {
+    enable = mkEnableOption (lib.mdDoc "Fluidd, a Klipper web interface for managing your 3d printer");
+
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc "Fluidd package to be used in the module";
+      default = pkgs.fluidd;
+      defaultText = literalExpression "pkgs.fluidd";
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Hostname to serve fluidd on";
+    };
+
+    nginx = mkOption {
+      type = types.submodule
+        (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = { };
+      example = literalExpression ''
+        {
+          serverAliases = [ "fluidd.''${config.networking.domain}" ];
+        }
+      '';
+      description = lib.mdDoc "Extra configuration for the nginx virtual host of fluidd.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.nginx = {
+      enable = true;
+      upstreams.fluidd-apiserver.servers."${moonraker.address}:${toString moonraker.port}" = { };
+      virtualHosts."${cfg.hostName}" = mkMerge [
+        cfg.nginx
+        {
+          root = mkForce "${cfg.package}/share/fluidd/htdocs";
+          locations = {
+            "/" = {
+              index = "index.html";
+              tryFiles = "$uri $uri/ /index.html";
+            };
+            "/index.html".extraConfig = ''
+              add_header Cache-Control "no-store, no-cache, must-revalidate";
+            '';
+            "/websocket" = {
+              proxyWebsockets = true;
+              proxyPass = "http://fluidd-apiserver/websocket";
+            };
+            "~ ^/(printer|api|access|machine|server)/" = {
+              proxyWebsockets = true;
+              proxyPass = "http://fluidd-apiserver$request_uri";
+            };
+          };
+        }
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/freshrss.nix b/nixpkgs/nixos/modules/services/web-apps/freshrss.nix
new file mode 100644
index 000000000000..8b4ea2aa53c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/freshrss.nix
@@ -0,0 +1,312 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.freshrss;
+
+  poolName = "freshrss";
+in
+{
+  meta.maintainers = with maintainers; [ etu stunkymonkey mattchrist ];
+
+  options.services.freshrss = {
+    enable = mkEnableOption (mdDoc "FreshRSS feed reader");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.freshrss;
+      defaultText = lib.literalExpression "pkgs.freshrss";
+      description = mdDoc "Which FreshRSS package to use.";
+    };
+
+    defaultUser = mkOption {
+      type = types.str;
+      default = "admin";
+      description = mdDoc "Default username for FreshRSS.";
+      example = "eva";
+    };
+
+    passwordFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = mdDoc "Password for the defaultUser for FreshRSS.";
+      example = "/run/secrets/freshrss";
+    };
+
+    baseUrl = mkOption {
+      type = types.str;
+      description = mdDoc "Default URL for FreshRSS.";
+      example = "https://freshrss.example.com";
+    };
+
+    language = mkOption {
+      type = types.str;
+      default = "en";
+      description = mdDoc "Default language for FreshRSS.";
+      example = "de";
+    };
+
+    database = {
+      type = mkOption {
+        type = types.enum [ "sqlite" "pgsql" "mysql" ];
+        default = "sqlite";
+        description = mdDoc "Database type.";
+        example = "pgsql";
+      };
+
+      host = mkOption {
+        type = types.nullOr types.str;
+        default = "localhost";
+        description = mdDoc "Database host for FreshRSS.";
+      };
+
+      port = mkOption {
+        type = types.nullOr types.port;
+        default = null;
+        description = mdDoc "Database port for FreshRSS.";
+        example = 3306;
+      };
+
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = "freshrss";
+        description = mdDoc "Database user for FreshRSS.";
+      };
+
+      passFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = mdDoc "Database password file for FreshRSS.";
+        example = "/run/secrets/freshrss";
+      };
+
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = "freshrss";
+        description = mdDoc "Database name for FreshRSS.";
+      };
+
+      tableprefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = mdDoc "Database table prefix for FreshRSS.";
+        example = "freshrss";
+      };
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/freshrss";
+      description = mdDoc "Default data folder for FreshRSS.";
+      example = "/mnt/freshrss";
+    };
+
+    virtualHost = mkOption {
+      type = types.nullOr types.str;
+      default = "freshrss";
+      description = mdDoc ''
+        Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
+      '';
+    };
+
+    pool = mkOption {
+      type = types.str;
+      default = poolName;
+      description = mdDoc ''
+        Name of the phpfpm pool to use and setup. If not specified, a pool will be created
+        with default values.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "freshrss";
+      description = lib.mdDoc "User under which FreshRSS runs.";
+    };
+
+    authType = mkOption {
+      type = types.enum [ "form" "http_auth" "none" ];
+      default = "form";
+      description = mdDoc "Authentication type for FreshRSS.";
+    };
+  };
+
+  config =
+    let
+      defaultServiceConfig = {
+        ReadWritePaths = "${cfg.dataDir}";
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+        DeviceAllow = "";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@resources" "~@privileged" ];
+        UMask = "0007";
+        Type = "oneshot";
+        User = cfg.user;
+        Group = config.users.users.${cfg.user}.group;
+        StateDirectory = "freshrss";
+        WorkingDirectory = cfg.package;
+      };
+    in
+    mkIf cfg.enable {
+      assertions = mkIf (cfg.authType == "form") [
+        {
+          assertion = cfg.passwordFile != null;
+          message = ''
+            `passwordFile` must be supplied when using "form" authentication!
+          '';
+        }
+      ];
+      # Set up a Nginx virtual host.
+      services.nginx = mkIf (cfg.virtualHost != null) {
+        enable = true;
+        virtualHosts.${cfg.virtualHost} = {
+          root = "${cfg.package}/p";
+
+          # php files handling
+          # this regex is mandatory because of the API
+          locations."~ ^.+?\.php(/.*)?$".extraConfig = ''
+            fastcgi_pass unix:${config.services.phpfpm.pools.${cfg.pool}.socket};
+            fastcgi_split_path_info ^(.+\.php)(/.*)$;
+            # By default, the variable PATH_INFO is not set under PHP-FPM
+            # But FreshRSS API greader.php need it. If you have a “Bad Request†error, double check this var!
+            # NOTE: the separate $path_info variable is required. For more details, see:
+            # https://trac.nginx.org/nginx/ticket/321
+            set $path_info $fastcgi_path_info;
+            fastcgi_param PATH_INFO $path_info;
+            include ${pkgs.nginx}/conf/fastcgi_params;
+            include ${pkgs.nginx}/conf/fastcgi.conf;
+          '';
+
+          locations."/" = {
+            tryFiles = "$uri $uri/ index.php";
+            index = "index.php index.html index.htm";
+          };
+        };
+      };
+
+      # Set up phpfpm pool
+      services.phpfpm.pools = mkIf (cfg.pool == poolName) {
+        ${poolName} = {
+          user = "freshrss";
+          settings = {
+            "listen.owner" = "nginx";
+            "listen.group" = "nginx";
+            "listen.mode" = "0600";
+            "pm" = "dynamic";
+            "pm.max_children" = 32;
+            "pm.max_requests" = 500;
+            "pm.start_servers" = 2;
+            "pm.min_spare_servers" = 2;
+            "pm.max_spare_servers" = 5;
+            "catch_workers_output" = true;
+          };
+          phpEnv = {
+            DATA_PATH = "${cfg.dataDir}";
+          };
+        };
+      };
+
+      users.users."${cfg.user}" = {
+        description = "FreshRSS service user";
+        isSystemUser = true;
+        group = "${cfg.user}";
+        home = cfg.dataDir;
+      };
+      users.groups."${cfg.user}" = { };
+
+      systemd.tmpfiles.rules = [
+        "d '${cfg.dataDir}' - ${cfg.user} ${config.users.users.${cfg.user}.group} - -"
+      ];
+
+      systemd.services.freshrss-config =
+        let
+          settingsFlags = concatStringsSep " \\\n    "
+            (mapAttrsToList (k: v: "${k} ${toString v}") {
+              "--default_user" = ''"${cfg.defaultUser}"'';
+              "--auth_type" = ''"${cfg.authType}"'';
+              "--base_url" = ''"${cfg.baseUrl}"'';
+              "--language" = ''"${cfg.language}"'';
+              "--db-type" = ''"${cfg.database.type}"'';
+              # The following attributes are optional depending on the type of
+              # database.  Those that evaluate to null on the left hand side
+              # will be omitted.
+              ${if cfg.database.name != null then "--db-base" else null} = ''"${cfg.database.name}"'';
+              ${if cfg.database.passFile != null then "--db-password" else null} = ''"$(cat ${cfg.database.passFile})"'';
+              ${if cfg.database.user != null then "--db-user" else null} = ''"${cfg.database.user}"'';
+              ${if cfg.database.tableprefix != null then "--db-prefix" else null} = ''"${cfg.database.tableprefix}"'';
+              ${if cfg.database.host != null && cfg.database.port != null then "--db-host" else null} = ''"${cfg.database.host}:${toString cfg.database.port}"'';
+            });
+        in
+        {
+          description = "Set up the state directory for FreshRSS before use";
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = defaultServiceConfig //{
+            Type = "oneshot";
+            User = "freshrss";
+            Group = "freshrss";
+            StateDirectory = "freshrss";
+            WorkingDirectory = cfg.package;
+          };
+          environment = {
+            DATA_PATH = cfg.dataDir;
+          };
+
+          script =
+            let
+              userScriptArgs = ''--user ${cfg.defaultUser} --password "$(cat ${cfg.passwordFile})"'';
+              updateUserScript = optionalString (cfg.authType == "form") ''
+                ./cli/update-user.php ${userScriptArgs}
+              '';
+              createUserScript = optionalString (cfg.authType == "form") ''
+                ./cli/create-user.php ${userScriptArgs}
+              '';
+            in
+            ''
+              # do installation or reconfigure
+              if test -f ${cfg.dataDir}/config.php; then
+                # reconfigure with settings
+                ./cli/reconfigure.php ${settingsFlags}
+                ${updateUserScript}
+              else
+                # check correct folders in data folder
+                ./cli/prepare.php
+                # install with settings
+                ./cli/do-install.php ${settingsFlags}
+                ${createUserScript}
+              fi
+            '';
+        };
+
+      systemd.services.freshrss-updater = {
+        description = "FreshRSS feed updater";
+        after = [ "freshrss-config.service" ];
+        wantedBy = [ "multi-user.target" ];
+        startAt = "*:0/5";
+        environment = {
+          DATA_PATH = cfg.dataDir;
+        };
+        serviceConfig = defaultServiceConfig //{
+          ExecStart = "${cfg.package}/app/actualize_script.php";
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/galene.nix b/nixpkgs/nixos/modules/services/web-apps/galene.nix
new file mode 100644
index 000000000000..81fed8a0b99a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/galene.nix
@@ -0,0 +1,214 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.galene;
+  opt = options.services.galene;
+  defaultstateDir = "/var/lib/galene";
+  defaultrecordingsDir = "${cfg.stateDir}/recordings";
+  defaultgroupsDir = "${cfg.stateDir}/groups";
+  defaultdataDir = "${cfg.stateDir}/data";
+in
+{
+  options = {
+    services.galene = {
+      enable = mkEnableOption (lib.mdDoc "Galene Service");
+
+      stateDir = mkOption {
+        default = defaultstateDir;
+        type = types.str;
+        description = lib.mdDoc ''
+          The directory where Galene stores its internal state. If left as the default
+          value this directory will automatically be created before the Galene server
+          starts, otherwise the sysadmin is responsible for ensuring the directory
+          exists with appropriate ownership and permissions.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "galene";
+        description = lib.mdDoc "User account under which galene runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "galene";
+        description = lib.mdDoc "Group under which galene runs.";
+      };
+
+      insecure = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether Galene should listen in http or in https. If left as the default
+          value (false), Galene needs to be fed a private key and a certificate.
+        '';
+      };
+
+      certFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/path/to/your/cert.pem";
+        description = lib.mdDoc ''
+          Path to the server's certificate. The file is copied at runtime to
+          Galene's data directory where it needs to reside.
+        '';
+      };
+
+      keyFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/path/to/your/key.pem";
+        description = lib.mdDoc ''
+          Path to the server's private key. The file is copied at runtime to
+          Galene's data directory where it needs to reside.
+        '';
+      };
+
+      httpAddress = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc "HTTP listen address for galene.";
+      };
+
+      httpPort = mkOption {
+        type = types.port;
+        default = 8443;
+        description = lib.mdDoc "HTTP listen port.";
+      };
+
+      staticDir = mkOption {
+        type = types.str;
+        default = "${cfg.package.static}/static";
+        defaultText = literalExpression ''"''${package.static}/static"'';
+        example = "/var/lib/galene/static";
+        description = lib.mdDoc "Web server directory.";
+      };
+
+      recordingsDir = mkOption {
+        type = types.str;
+        default = defaultrecordingsDir;
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/recordings"'';
+        example = "/var/lib/galene/recordings";
+        description = lib.mdDoc "Recordings directory.";
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = defaultdataDir;
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/data"'';
+        example = "/var/lib/galene/data";
+        description = lib.mdDoc "Data directory.";
+      };
+
+      groupsDir = mkOption {
+        type = types.str;
+        default = defaultgroupsDir;
+        defaultText = literalExpression ''"''${config.${opt.stateDir}}/groups"'';
+        example = "/var/lib/galene/groups";
+        description = lib.mdDoc "Web server directory.";
+      };
+
+      package = mkOption {
+        default = pkgs.galene;
+        defaultText = literalExpression "pkgs.galene";
+        type = types.package;
+        description = lib.mdDoc ''
+          Package for running Galene.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.insecure || (cfg.certFile != null && cfg.keyFile != null);
+        message = ''
+          Galene needs both certFile and keyFile defined for encryption, or
+          the insecure flag.
+        '';
+      }
+    ];
+
+    systemd.services.galene = {
+      description = "galene";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = ''
+        ${optionalString (cfg.insecure != true) ''
+           install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.certFile} ${cfg.dataDir}/cert.pem
+           install -m 700 -o '${cfg.user}' -g '${cfg.group}' ${cfg.keyFile} ${cfg.dataDir}/key.pem
+        ''}
+      '';
+
+      serviceConfig = mkMerge [
+        {
+          Type = "simple";
+          User = cfg.user;
+          Group = cfg.group;
+          WorkingDirectory = cfg.stateDir;
+          ExecStart = ''${cfg.package}/bin/galene \
+          ${optionalString (cfg.insecure) "-insecure"} \
+          -data ${cfg.dataDir} \
+          -groups ${cfg.groupsDir} \
+          -recordings ${cfg.recordingsDir} \
+          -static ${cfg.staticDir}'';
+          Restart = "always";
+          # Upstream Requirements
+          LimitNOFILE = 65536;
+          StateDirectory = [ ] ++
+            optional (cfg.stateDir == defaultstateDir) "galene" ++
+            optional (cfg.dataDir == defaultdataDir) "galene/data" ++
+            optional (cfg.groupsDir == defaultgroupsDir) "galene/groups" ++
+            optional (cfg.recordingsDir == defaultrecordingsDir) "galene/recordings";
+
+          # Hardening
+          CapabilityBoundingSet = [ "" ];
+          DeviceAllow = [ "" ];
+          LockPersonality = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          PrivateUsers = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          ReadWritePaths = cfg.recordingsDir;
+          RemoveIPC = true;
+          RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_NETLINK" ];
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+          UMask = "0077";
+        }
+      ];
+    };
+
+    users.users = mkIf (cfg.user == "galene")
+      {
+        galene = {
+          description = "galene Service";
+          group = cfg.group;
+          isSystemUser = true;
+        };
+      };
+
+    users.groups = mkIf (cfg.group == "galene") {
+      galene = { };
+    };
+  };
+  meta.maintainers = with lib.maintainers; [ rgrunbla ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/gerrit.nix b/nixpkgs/nixos/modules/services/web-apps/gerrit.nix
new file mode 100644
index 000000000000..ab2eeea09bdc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/gerrit.nix
@@ -0,0 +1,242 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.gerrit;
+
+  # NixOS option type for git-like configs
+  gitIniType = with types;
+    let
+      primitiveType = either str (either bool int);
+      multipleType = either primitiveType (listOf primitiveType);
+      sectionType = lazyAttrsOf multipleType;
+      supersectionType = lazyAttrsOf (either multipleType sectionType);
+    in lazyAttrsOf supersectionType;
+
+  gerritConfig = pkgs.writeText "gerrit.conf" (
+    lib.generators.toGitINI cfg.settings
+  );
+
+  replicationConfig = pkgs.writeText "replication.conf" (
+    lib.generators.toGitINI cfg.replicationSettings
+  );
+
+  # Wrap the gerrit java with all the java options so it can be called
+  # like a normal CLI app
+  gerrit-cli = pkgs.writeShellScriptBin "gerrit" ''
+    set -euo pipefail
+    jvmOpts=(
+      ${lib.escapeShellArgs cfg.jvmOpts}
+      -Xmx${cfg.jvmHeapLimit}
+    )
+    exec ${cfg.jvmPackage}/bin/java \
+      "''${jvmOpts[@]}" \
+      -jar ${cfg.package}/webapps/${cfg.package.name}.war \
+      "$@"
+  '';
+
+  gerrit-plugins = pkgs.runCommand
+    "gerrit-plugins"
+    {
+      buildInputs = [ gerrit-cli ];
+    }
+    ''
+      shopt -s nullglob
+      mkdir $out
+
+      for name in ${toString cfg.builtinPlugins}; do
+        echo "Installing builtin plugin $name.jar"
+        gerrit cat plugins/$name.jar > $out/$name.jar
+      done
+
+      for file in ${toString cfg.plugins}; do
+        name=$(echo "$file" | cut -d - -f 2-)
+        echo "Installing plugin $name"
+        ln -sf "$file" $out/$name
+      done
+    '';
+in
+{
+  options = {
+    services.gerrit = {
+      enable = mkEnableOption (lib.mdDoc "Gerrit service");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.gerrit;
+        defaultText = literalExpression "pkgs.gerrit";
+        description = lib.mdDoc "Gerrit package to use";
+      };
+
+      jvmPackage = mkOption {
+        type = types.package;
+        default = pkgs.jre_headless;
+        defaultText = literalExpression "pkgs.jre_headless";
+        description = lib.mdDoc "Java Runtime Environment package to use";
+      };
+
+      jvmOpts = mkOption {
+        type = types.listOf types.str;
+        default = [
+          "-Dflogger.backend_factory=com.google.common.flogger.backend.log4j.Log4jBackendFactory#getInstance"
+          "-Dflogger.logging_context=com.google.gerrit.server.logging.LoggingContext#getInstance"
+        ];
+        description = lib.mdDoc "A list of JVM options to start gerrit with.";
+      };
+
+      jvmHeapLimit = mkOption {
+        type = types.str;
+        default = "1024m";
+        description = lib.mdDoc ''
+          How much memory to allocate to the JVM heap
+        '';
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "[::]:8080";
+        description = lib.mdDoc ''
+          `hostname:port` to listen for HTTP traffic.
+
+          This is bound using the systemd socket activation.
+        '';
+      };
+
+      settings = mkOption {
+        type = gitIniType;
+        default = {};
+        description = lib.mdDoc ''
+          Gerrit configuration. This will be generated to the
+          `etc/gerrit.config` file.
+        '';
+      };
+
+      replicationSettings = mkOption {
+        type = gitIniType;
+        default = {};
+        description = lib.mdDoc ''
+          Replication configuration. This will be generated to the
+          `etc/replication.config` file.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          List of plugins to add to Gerrit. Each derivation is a jar file
+          itself where the name of the derivation is the name of plugin.
+        '';
+      };
+
+      builtinPlugins = mkOption {
+        type = types.listOf (types.enum cfg.package.passthru.plugins);
+        default = [];
+        description = lib.mdDoc ''
+          List of builtins plugins to install. Those are shipped in the
+          `gerrit.war` file.
+        '';
+      };
+
+      serverId = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Set a UUID that uniquely identifies the server.
+
+          This can be generated with
+          `nix-shell -p util-linux --run uuidgen`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.replicationSettings != {} -> elem "replication" cfg.builtinPlugins;
+        message = "Gerrit replicationSettings require enabling the replication plugin";
+      }
+    ];
+
+    services.gerrit.settings = {
+      cache.directory = "/var/cache/gerrit";
+      container.heapLimit = cfg.jvmHeapLimit;
+      gerrit.basePath = lib.mkDefault "git";
+      gerrit.serverId = cfg.serverId;
+      httpd.inheritChannel = "true";
+      httpd.listenUrl = lib.mkDefault "http://${cfg.listenAddress}";
+      index.type = lib.mkDefault "lucene";
+    };
+
+    # Add the gerrit CLI to the system to run `gerrit init` and friends.
+    environment.systemPackages = [ gerrit-cli ];
+
+    systemd.sockets.gerrit = {
+      unitConfig.Description = "Gerrit HTTP socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ cfg.listenAddress ];
+    };
+
+    systemd.services.gerrit = {
+      description = "Gerrit";
+
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "gerrit.socket" ];
+      after = [ "gerrit.socket" "network.target" ];
+
+      path = [
+        gerrit-cli
+        pkgs.bash
+        pkgs.coreutils
+        pkgs.git
+        pkgs.openssh
+      ];
+
+      environment = {
+        GERRIT_HOME = "%S/gerrit";
+        GERRIT_TMP = "%T";
+        HOME = "%S/gerrit";
+        XDG_CONFIG_HOME = "%S/gerrit/.config";
+      };
+
+      preStart = ''
+        set -euo pipefail
+
+        # bootstrap if nothing exists
+        if [[ ! -d git ]]; then
+          gerrit init --batch --no-auto-start
+        fi
+
+        # install gerrit.war for the plugin manager
+        rm -rf bin
+        mkdir bin
+        ln -sfv ${cfg.package}/webapps/${cfg.package.name}.war bin/gerrit.war
+
+        # copy the config, keep it mutable because Gerrit
+        ln -sfv ${gerritConfig} etc/gerrit.config
+        ln -sfv ${replicationConfig} etc/replication.config
+
+        # install the plugins
+        rm -rf plugins
+        ln -sv ${gerrit-plugins} plugins
+      ''
+      ;
+
+      serviceConfig = {
+        CacheDirectory = "gerrit";
+        DynamicUser = true;
+        ExecStart = "${gerrit-cli}/bin/gerrit daemon --console-log";
+        LimitNOFILE = 4096;
+        StandardInput = "socket";
+        StandardOutput = "journal";
+        StateDirectory = "gerrit";
+        WorkingDirectory = "%S/gerrit";
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ edef zimbatm ];
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/gotify-server.nix b/nixpkgs/nixos/modules/services/web-apps/gotify-server.nix
new file mode 100644
index 000000000000..8db3a8ef3e81
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/gotify-server.nix
@@ -0,0 +1,49 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gotify;
+in {
+  options = {
+    services.gotify = {
+      enable = mkEnableOption (lib.mdDoc "Gotify webserver");
+
+      port = mkOption {
+        type = types.port;
+        description = lib.mdDoc ''
+          Port the server listens to.
+        '';
+      };
+
+      stateDirectoryName = mkOption {
+        type = types.str;
+        default = "gotify-server";
+        description = lib.mdDoc ''
+          The name of the directory below {file}`/var/lib` where
+          gotify stores its runtime data.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.gotify-server = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      description = "Simple server for sending and receiving messages";
+
+      environment = {
+        GOTIFY_SERVER_PORT = toString cfg.port;
+      };
+
+      serviceConfig = {
+        WorkingDirectory = "/var/lib/${cfg.stateDirectoryName}";
+        StateDirectory = cfg.stateDirectoryName;
+        Restart = "always";
+        DynamicUser = "yes";
+        ExecStart = "${pkgs.gotify-server}/bin/server";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/gotosocial.md b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
new file mode 100644
index 000000000000..a290d7d1893a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/gotosocial.md
@@ -0,0 +1,64 @@
+# GoToSocial {#module-services-gotosocial}
+
+[GoToSocial](https://gotosocial.org/) is an ActivityPub social network server, written in Golang.
+
+## Service configuration {#modules-services-gotosocial-service-configuration}
+
+The following configuration sets up the PostgreSQL as database backend and binds
+GoToSocial to `127.0.0.1:8080`, expecting to be run behind a HTTP proxy on `gotosocial.example.com`.
+
+```nix
+services.gotosocial = {
+  enable = true;
+  setupPostgresqlDB = true;
+  settings = {
+    application-name = "My GoToSocial";
+    host = "gotosocial.example.com";
+    protocol = "https";
+    bind-address = "127.0.0.1";
+    port = 8080;
+  };
+};
+```
+
+Please refer to the [GoToSocial Documentation](https://docs.gotosocial.org/en/latest/configuration/general/)
+for additional configuration options.
+
+## Proxy configuration {#modules-services-gotosocial-proxy-configuration}
+
+Although it is possible to expose GoToSocial directly, it is common practice to operate it behind an
+HTTP reverse proxy such as nginx.
+
+```nix
+networking.firewall.allowedTCPPorts = [ 80 443 ];
+services.nginx = {
+  enable = true;
+  clientMaxBodySize = "40M";
+  virtualHosts = with config.services.gotosocial.settings; {
+    "${host}" = {
+      enableACME = true;
+      forceSSL = true;
+      locations = {
+        "/" = {
+          recommendedProxySettings = true;
+          proxyWebsockets = true;
+          proxyPass = "http://${bind-address}:${toString port}";
+        };
+      };
+    };
+  };
+};
+```
+
+Please refer to [](#module-security-acme) for details on how to provision an SSL/TLS certificate.
+
+## User management {#modules-services-gotosocial-user-management}
+
+After the GoToSocial service is running, the `gotosocial-admin` utility can be used to manage users. In particular an
+administrative user can be created with
+
+```ShellSession
+$ sudo gotosocial-admin account create --username <nickname> --email <email> --password <password>
+$ sudo gotosocial-admin account confirm --username <nickname>
+$ sudo gotosocial-admin account promote --username <nickname>
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/gotosocial.nix b/nixpkgs/nixos/modules/services/web-apps/gotosocial.nix
new file mode 100644
index 000000000000..9c21719a5759
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/gotosocial.nix
@@ -0,0 +1,171 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.gotosocial;
+  settingsFormat = pkgs.formats.yaml { };
+  configFile = settingsFormat.generate "config.yml" cfg.settings;
+  defaultSettings = {
+    application-name = "gotosocial";
+
+    protocol = "https";
+
+    bind-address = "127.0.0.1";
+    port = 8080;
+
+    storage-local-base-path = "/var/lib/gotosocial/storage";
+
+    db-type = "sqlite";
+    db-address = "/var/lib/gotosocial/database.sqlite";
+  };
+  gotosocial-admin = pkgs.writeShellScriptBin "gotosocial-admin" ''
+    exec systemd-run \
+      -u gotosocial-admin.service \
+      -p Group=gotosocial \
+      -p User=gotosocial \
+      -q -t -G --wait --service-type=exec \
+      ${cfg.package}/bin/gotosocial --config-path ${configFile} admin "$@"
+  '';
+in
+{
+  meta.doc = ./gotosocial.md;
+  meta.maintainers = with lib.maintainers; [ misuzu ];
+
+  options.services.gotosocial = {
+    enable = lib.mkEnableOption (lib.mdDoc "ActivityPub social network server");
+
+    package = lib.mkPackageOptionMD pkgs "gotosocial" { };
+
+    openFirewall = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open the configured port in the firewall.
+        Using a reverse proxy instead is highly recommended.
+      '';
+    };
+
+    setupPostgresqlDB = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to setup a local postgres database and populate the
+        `db-type` fields in `services.gotosocial.settings`.
+      '';
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = defaultSettings;
+      example = {
+        application-name = "My GoToSocial";
+        host = "gotosocial.example.com";
+      };
+      description = lib.mdDoc ''
+        Contents of the GoToSocial YAML config.
+
+        Please refer to the
+        [documentation](https://docs.gotosocial.org/en/latest/configuration/)
+        and
+        [example config](https://github.com/superseriousbusiness/gotosocial/blob/main/example/config.yaml).
+
+        Please note that the `host` option cannot be changed later so it is important to configure this correctly before you start GoToSocial.
+      '';
+    };
+
+    environmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      description = lib.mdDoc ''
+        File path containing environment variables for configuring the GoToSocial service
+        in the format of an EnvironmentFile as described by systemd.exec(5).
+
+        This option could be used to pass sensitive configuration to the GoToSocial daemon.
+
+        Please refer to the Environment Variables section in the
+        [documentation](https://docs.gotosocial.org/en/latest/configuration/).
+      '';
+      default = null;
+      example = "/root/nixos/secrets/gotosocial.env";
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.settings.host or null != null;
+        message = ''
+          You have to define a hostname for GoToSocial (`services.gotosocial.settings.host`), it cannot be changed later without starting over!
+        '';
+      }
+    ];
+
+    services.gotosocial.settings = (lib.mapAttrs (name: lib.mkDefault) (
+      defaultSettings // {
+        web-asset-base-dir = "${cfg.package}/share/gotosocial/web/assets/";
+        web-template-base-dir = "${cfg.package}/share/gotosocial/web/template/";
+      }
+    )) // (lib.optionalAttrs cfg.setupPostgresqlDB {
+      db-type = "postgres";
+      db-address = "/run/postgresql";
+      db-database = "gotosocial";
+      db-user = "gotosocial";
+    });
+
+    environment.systemPackages = [ gotosocial-admin ];
+
+    users.groups.gotosocial = { };
+    users.users.gotosocial = {
+      group = "gotosocial";
+      isSystemUser = true;
+    };
+
+    networking.firewall = lib.mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.settings.port ];
+    };
+
+    services.postgresql = lib.mkIf cfg.setupPostgresqlDB {
+      enable = true;
+      ensureDatabases = [ "gotosocial" ];
+      ensureUsers = [
+        {
+          name = "gotosocial";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    systemd.services.gotosocial = {
+      description = "ActivityPub social network server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ]
+        ++ lib.optional cfg.setupPostgresqlDB "postgresql.service";
+      requires = lib.optional cfg.setupPostgresqlDB "postgresql.service";
+      restartTriggers = [ configFile ];
+
+      serviceConfig = {
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        ExecStart = "${cfg.package}/bin/gotosocial --config-path ${configFile} server start";
+        Restart = "on-failure";
+        Group = "gotosocial";
+        User = "gotosocial";
+        StateDirectory = "gotosocial";
+        WorkingDirectory = "/var/lib/gotosocial";
+
+        # Security options:
+        # Based on https://github.com/superseriousbusiness/gotosocial/blob/v0.8.1/example/gotosocial.service
+        AmbientCapabilities = lib.optional (cfg.settings.port < 1024) "CAP_NET_BIND_SERVICE";
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        DevicePolicy = "closed";
+        ProtectSystem = "full";
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        LockPersonality = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/grocy.md b/nixpkgs/nixos/modules/services/web-apps/grocy.md
new file mode 100644
index 000000000000..62aad4b103df
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/grocy.md
@@ -0,0 +1,66 @@
+# Grocy {#module-services-grocy}
+
+[Grocy](https://grocy.info/) is a web-based self-hosted groceries
+& household management solution for your home.
+
+## Basic usage {#module-services-grocy-basic-usage}
+
+A very basic configuration may look like this:
+```
+{ pkgs, ... }:
+{
+  services.grocy = {
+    enable = true;
+    hostName = "grocy.tld";
+  };
+}
+```
+This configures a simple vhost using [nginx](#opt-services.nginx.enable)
+which listens to `grocy.tld` with fully configured ACME/LE (this can be
+disabled by setting [services.grocy.nginx.enableSSL](#opt-services.grocy.nginx.enableSSL)
+to `false`). After the initial setup the credentials `admin:admin`
+can be used to login.
+
+The application's state is persisted at `/var/lib/grocy/grocy.db` in a
+`sqlite3` database. The migration is applied when requesting the `/`-route
+of the application.
+
+## Settings {#module-services-grocy-settings}
+
+The configuration for `grocy` is located at `/etc/grocy/config.php`.
+By default, the following settings can be defined in the NixOS-configuration:
+```
+{ pkgs, ... }:
+{
+  services.grocy.settings = {
+    # The default currency in the system for invoices etc.
+    # Please note that exchange rates aren't taken into account, this
+    # is just the setting for what's shown in the frontend.
+    currency = "EUR";
+
+    # The display language (and locale configuration) for grocy.
+    culture = "de";
+
+    calendar = {
+      # Whether or not to show the week-numbers
+      # in the calendar.
+      showWeekNumber = true;
+
+      # Index of the first day to be shown in the calendar (0=Sunday, 1=Monday,
+      # 2=Tuesday and so on).
+      firstDayOfWeek = 2;
+    };
+  };
+}
+```
+
+If you want to alter the configuration file on your own, you can do this manually with
+an expression like this:
+```
+{ lib, ... }:
+{
+  environment.etc."grocy/config.php".text = lib.mkAfter ''
+    // Arbitrary PHP code in grocy's configuration file
+  '';
+}
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/grocy.nix b/nixpkgs/nixos/modules/services/web-apps/grocy.nix
new file mode 100644
index 000000000000..fe40a3c20941
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/grocy.nix
@@ -0,0 +1,184 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.grocy;
+in {
+  options.services.grocy = {
+    enable = mkEnableOption (lib.mdDoc "grocy");
+
+    package = mkPackageOptionMD pkgs "grocy" { };
+
+    hostName = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        FQDN for the grocy instance.
+      '';
+    };
+
+    nginx.enableSSL = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether or not to enable SSL (with ACME and let's encrypt)
+        for the grocy vhost.
+      '';
+    };
+
+    phpfpm.settings = mkOption {
+      type = with types; attrsOf (oneOf [ int str bool ]);
+      default = {
+        "pm" = "dynamic";
+        "php_admin_value[error_log]" = "stderr";
+        "php_admin_flag[log_errors]" = true;
+        "listen.owner" = "nginx";
+        "catch_workers_output" = true;
+        "pm.max_children" = "32";
+        "pm.start_servers" = "2";
+        "pm.min_spare_servers" = "2";
+        "pm.max_spare_servers" = "4";
+        "pm.max_requests" = "500";
+      };
+
+      description = lib.mdDoc ''
+        Options for grocy's PHPFPM pool.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/grocy";
+      description = lib.mdDoc ''
+        Home directory of the `grocy` user which contains
+        the application's state.
+      '';
+    };
+
+    settings = {
+      currency = mkOption {
+        type = types.str;
+        default = "USD";
+        example = "EUR";
+        description = lib.mdDoc ''
+          ISO 4217 code for the currency to display.
+        '';
+      };
+
+      culture = mkOption {
+        type = types.enum [ "de" "en" "da" "en_GB" "es" "fr" "hu" "it" "nl" "no" "pl" "pt_BR" "ru" "sk_SK" "sv_SE" "tr" ];
+        default = "en";
+        description = lib.mdDoc ''
+          Display language of the frontend.
+        '';
+      };
+
+      calendar = {
+        showWeekNumber = mkOption {
+          default = true;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Show the number of the weeks in the calendar views.
+          '';
+        };
+        firstDayOfWeek = mkOption {
+          default = null;
+          type = types.nullOr (types.enum (range 0 6));
+          description = lib.mdDoc ''
+            Which day of the week (0=Sunday, 1=Monday etc.) should be the
+            first day.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."grocy/config.php".text = ''
+      <?php
+      Setting('CULTURE', '${cfg.settings.culture}');
+      Setting('CURRENCY', '${cfg.settings.currency}');
+      Setting('CALENDAR_FIRST_DAY_OF_WEEK', '${toString cfg.settings.calendar.firstDayOfWeek}');
+      Setting('CALENDAR_SHOW_WEEK_OF_YEAR', ${boolToString cfg.settings.calendar.showWeekNumber});
+    '';
+
+    users.users.grocy = {
+      isSystemUser = true;
+      createHome = true;
+      home = cfg.dataDir;
+      group = "nginx";
+    };
+
+    systemd.tmpfiles.rules = map (
+      dirName: "d '${cfg.dataDir}/${dirName}' - grocy nginx - -"
+    ) [ "viewcache" "plugins" "settingoverrides" "storage" ];
+
+    services.phpfpm.pools.grocy = {
+      user = "grocy";
+      group = "nginx";
+
+      # PHP 8.1 and 8.2 are the only version which are supported/tested by upstream:
+      # https://github.com/grocy/grocy/blob/v4.0.2/README.md#platform-support
+      phpPackage = pkgs.php82;
+
+      inherit (cfg.phpfpm) settings;
+
+      phpEnv = {
+        GROCY_CONFIG_FILE = "/etc/grocy/config.php";
+        GROCY_DB_FILE = "${cfg.dataDir}/grocy.db";
+        GROCY_STORAGE_DIR = "${cfg.dataDir}/storage";
+        GROCY_PLUGIN_DIR = "${cfg.dataDir}/plugins";
+        GROCY_CACHE_DIR = "${cfg.dataDir}/viewcache";
+      };
+    };
+
+    # After an update of grocy, the viewcache needs to be deleted. Otherwise grocy will not work
+    # https://github.com/grocy/grocy#how-to-update
+    systemd.services.grocy-setup = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-grocy.service" ];
+      script = ''
+        rm -rf ${cfg.dataDir}/viewcache/*
+      '';
+    };
+
+    services.nginx = {
+      enable = true;
+      virtualHosts."${cfg.hostName}" = mkMerge [
+        { root = "${cfg.package}/public";
+          locations."/".extraConfig = ''
+            rewrite ^ /index.php;
+          '';
+          locations."~ \\.php$".extraConfig = ''
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+            fastcgi_pass unix:${config.services.phpfpm.pools.grocy.socket};
+            include ${config.services.nginx.package}/conf/fastcgi.conf;
+            include ${config.services.nginx.package}/conf/fastcgi_params;
+          '';
+          locations."~ \\.(js|css|ttf|woff2?|png|jpe?g|svg)$".extraConfig = ''
+            add_header Cache-Control "public, max-age=15778463";
+            add_header X-Content-Type-Options nosniff;
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Robots-Tag none;
+            add_header X-Download-Options noopen;
+            add_header X-Permitted-Cross-Domain-Policies none;
+            add_header Referrer-Policy no-referrer;
+            access_log off;
+          '';
+          extraConfig = ''
+            try_files $uri /index.php;
+          '';
+        }
+        (mkIf cfg.nginx.enableSSL {
+          enableACME = true;
+          forceSSL = true;
+        })
+      ];
+    };
+  };
+
+  meta = {
+    maintainers = with maintainers; [ n0emis ];
+    doc = ./grocy.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/guacamole-client.nix b/nixpkgs/nixos/modules/services/web-apps/guacamole-client.nix
new file mode 100644
index 000000000000..c12f6582468c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/guacamole-client.nix
@@ -0,0 +1,60 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.guacamole-client;
+  settingsFormat = pkgs.formats.javaProperties { };
+in
+{
+  options = {
+    services.guacamole-client = {
+      enable = lib.mkEnableOption (lib.mdDoc "Apache Guacamole Client (Tomcat)");
+      package = lib.mkPackageOptionMD pkgs "guacamole-client" { };
+
+      settings = lib.mkOption {
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type;
+        };
+        default = {
+          guacd-hostname = "localhost";
+          guacd-port = 4822;
+        };
+        description = lib.mdDoc ''
+          Configuration written to `guacamole.properties`.
+
+          ::: {.note}
+          The Guacamole web application uses one main configuration file called
+          `guacamole.properties`. This file is the common location for all
+          configuration properties read by Guacamole or any extension of
+          Guacamole, including authentication providers.
+          :::
+        '';
+      };
+
+      enableWebserver = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the Guacamole web application in a Tomcat webserver.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.etc."guacamole/guacamole.properties" = lib.mkIf
+      (cfg.settings != {})
+      { source = (settingsFormat.generate "guacamole.properties" cfg.settings); };
+
+    services = lib.mkIf cfg.enableWebserver {
+      tomcat = {
+        enable = true;
+        webapps = [
+          cfg.package
+        ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/guacamole-server.nix b/nixpkgs/nixos/modules/services/web-apps/guacamole-server.nix
new file mode 100644
index 000000000000..0cffdce83d83
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/guacamole-server.nix
@@ -0,0 +1,83 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.guacamole-server;
+in
+{
+  options = {
+    services.guacamole-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "Apache Guacamole Server (guacd)");
+      package = lib.mkPackageOptionMD pkgs "guacamole-server" { };
+
+      extraEnvironment = lib.mkOption {
+        type = lib.types.attrsOf lib.types.str;
+        default = { };
+        example = lib.literalExpression ''
+          {
+            ENVIRONMENT = "production";
+          }
+        '';
+        description = lib.mdDoc "Environment variables to pass to guacd.";
+      };
+
+      host = lib.mkOption {
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 4822;
+        description = lib.mdDoc ''
+          The port the guacd server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      logbackXml = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/path/to/logback.xml";
+        description = lib.mdDoc ''
+          Configuration file that correspond to `logback.xml`.
+        '';
+      };
+
+      userMappingXml = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/path/to/user-mapping.xml";
+        description = lib.mdDoc ''
+          Configuration file that correspond to `user-mapping.xml`.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # Setup configuration files.
+    environment.etc."guacamole/logback.xml" = lib.mkIf (cfg.logbackXml != null) { source = cfg.logbackXml; };
+    environment.etc."guacamole/user-mapping.xml" = lib.mkIf (cfg.userMappingXml != null) { source = cfg.userMappingXml; };
+
+    systemd.services.guacamole-server = {
+      description = "Apache Guacamole server (guacd)";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = {
+        HOME = "/run/guacamole-server";
+      } // cfg.extraEnvironment;
+      serviceConfig = {
+        ExecStart = "${lib.getExe cfg.package} -f -b ${cfg.host} -l ${toString cfg.port}";
+        RuntimeDirectory = "guacamole-server";
+        DynamicUser = true;
+        PrivateTmp = "yes";
+        Restart = "on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/healthchecks.nix b/nixpkgs/nixos/modules/services/web-apps/healthchecks.nix
new file mode 100644
index 000000000000..b92525075541
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/healthchecks.nix
@@ -0,0 +1,276 @@
+{ config, lib, options, pkgs, buildEnv, ... }:
+
+with lib;
+
+let
+  defaultUser = "healthchecks";
+  cfg = config.services.healthchecks;
+  opt = options.services.healthchecks;
+  pkg = cfg.package;
+  boolToPython = b: if b then "True" else "False";
+  environment = {
+    PYTHONPATH = pkg.pythonPath;
+    STATIC_ROOT = cfg.dataDir + "/static";
+  } // cfg.settings;
+
+  environmentFile = pkgs.writeText "healthchecks-environment" (lib.generators.toKeyValue { } environment);
+
+  healthchecksManageScript = pkgs.writeShellScriptBin "healthchecks-manage" ''
+    sudo=exec
+    if [[ "$USER" != "${cfg.user}" ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${cfg.user} --preserve-env --preserve-env=PYTHONPATH'
+    fi
+    export $(cat ${environmentFile} | xargs)
+    $sudo ${pkg}/opt/healthchecks/manage.py "$@"
+  '';
+in
+{
+  options.services.healthchecks = {
+    enable = mkEnableOption (lib.mdDoc "healthchecks") // {
+      description = lib.mdDoc ''
+        Enable healthchecks.
+        It is expected to be run behind a HTTP reverse proxy.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.healthchecks;
+      defaultText = literalExpression "pkgs.healthchecks";
+      type = types.package;
+      description = lib.mdDoc "healthchecks package to use.";
+    };
+
+    user = mkOption {
+      default = defaultUser;
+      type = types.str;
+      description = lib.mdDoc ''
+        User account under which healthchecks runs.
+
+        ::: {.note}
+        If left as the default value this user will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the user exists before the healthchecks service starts.
+        :::
+      '';
+    };
+
+    group = mkOption {
+      default = defaultUser;
+      type = types.str;
+      description = lib.mdDoc ''
+        Group account under which healthchecks runs.
+
+        ::: {.note}
+        If left as the default value this group will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the group exists before the healthchecks service starts.
+        :::
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Address the server will listen on.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8000;
+      description = lib.mdDoc "Port the server will listen on.";
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/healthchecks";
+      description = lib.mdDoc ''
+        The directory used to store all data for healthchecks.
+
+        ::: {.note}
+        If left as the default value this directory will automatically be created before
+        the healthchecks server starts, otherwise you are responsible for ensuring the
+        directory exists with appropriate ownership and permissions.
+        :::
+      '';
+    };
+
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Environment variables which are read by healthchecks `(local)_settings.py`.
+
+        Settings which are explicitly covered in options below, are type-checked and/or transformed
+        before added to the environment, everything else is passed as a string.
+
+        See <https://healthchecks.io/docs/self_hosted_configuration/>
+        for a full documentation of settings.
+
+        We add additional variables to this list inside the packages `local_settings.py.`
+        - `STATIC_ROOT` to set a state directory for dynamically generated static files.
+        - `SECRET_KEY_FILE` to read `SECRET_KEY` from a file at runtime and keep it out of
+          /nix/store.
+        - `_FILE` variants for several values that hold sensitive information in
+          [Healthchecks configuration](https://healthchecks.io/docs/self_hosted_configuration/) so
+          that they also can be read from a file and kept out of /nix/store. To see which values
+          have support for a `_FILE` variant, run:
+          - `nix-instantiate --eval --expr '(import <nixpkgs> {}).healthchecks.secrets'`
+          - or `nix eval 'nixpkgs#healthchecks.secrets'` if the flake support has been enabled.
+      '';
+      type = types.submodule (settings: {
+        freeformType = types.attrsOf types.str;
+        options = {
+          ALLOWED_HOSTS = lib.mkOption {
+            type = types.listOf types.str;
+            default = [ "*" ];
+            description = lib.mdDoc "The host/domain names that this site can serve.";
+            apply = lib.concatStringsSep ",";
+          };
+
+          SECRET_KEY_FILE = mkOption {
+            type = types.path;
+            description = lib.mdDoc "Path to a file containing the secret key.";
+          };
+
+          DEBUG = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Enable debug mode.";
+            apply = boolToPython;
+          };
+
+          REGISTRATION_OPEN = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              A boolean that controls whether site visitors can create new accounts.
+              Set it to false if you are setting up a private Healthchecks instance,
+              but it needs to be publicly accessible (so, for example, your cloud
+              services can send pings to it).
+              If you close new user registration, you can still selectively invite
+              users to your team account.
+            '';
+            apply = boolToPython;
+          };
+
+          DB = mkOption {
+            type = types.enum [ "sqlite" "postgres" "mysql" ];
+            default = "sqlite";
+            description = lib.mdDoc "Database engine to use.";
+          };
+
+          DB_NAME = mkOption {
+            type = types.str;
+            default =
+              if settings.config.DB == "sqlite"
+              then "${cfg.dataDir}/healthchecks.sqlite"
+              else "hc";
+            defaultText = lib.literalExpression ''
+              if config.${settings.options.DB} == "sqlite"
+              then "''${config.${opt.dataDir}}/healthchecks.sqlite"
+              else "hc"
+            '';
+            description = lib.mdDoc "Database name.";
+          };
+        };
+      });
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ healthchecksManageScript ];
+
+    systemd.targets.healthchecks = {
+      description = "Target for all Healthchecks services";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+    };
+
+    systemd.services =
+      let
+        commonConfig = {
+          WorkingDirectory = cfg.dataDir;
+          User = cfg.user;
+          Group = cfg.group;
+          EnvironmentFile = [ environmentFile ];
+          StateDirectory = mkIf (cfg.dataDir == "/var/lib/healthchecks") "healthchecks";
+          StateDirectoryMode = mkIf (cfg.dataDir == "/var/lib/healthchecks") "0750";
+        };
+      in
+      {
+        healthchecks-migration = {
+          description = "Healthchecks migrations";
+          wantedBy = [ "healthchecks.target" ];
+
+          serviceConfig = commonConfig // {
+            Restart = "on-failure";
+            Type = "oneshot";
+            ExecStart = ''
+              ${pkg}/opt/healthchecks/manage.py migrate
+            '';
+          };
+        };
+
+        healthchecks = {
+          description = "Healthchecks WSGI Service";
+          wantedBy = [ "healthchecks.target" ];
+          after = [ "healthchecks-migration.service" ];
+
+          preStart = ''
+            ${pkg}/opt/healthchecks/manage.py collectstatic --no-input
+            ${pkg}/opt/healthchecks/manage.py remove_stale_contenttypes --no-input
+            ${pkg}/opt/healthchecks/manage.py compress
+          '';
+
+          serviceConfig = commonConfig // {
+            Restart = "always";
+            ExecStart = ''
+              ${pkgs.python3Packages.gunicorn}/bin/gunicorn hc.wsgi \
+                --bind ${cfg.listenAddress}:${toString cfg.port} \
+                --pythonpath ${pkg}/opt/healthchecks
+            '';
+          };
+        };
+
+        healthchecks-sendalerts = {
+          description = "Healthchecks Alert Service";
+          wantedBy = [ "healthchecks.target" ];
+          after = [ "healthchecks.service" ];
+
+          serviceConfig = commonConfig // {
+            Restart = "always";
+            ExecStart = ''
+              ${pkg}/opt/healthchecks/manage.py sendalerts
+            '';
+          };
+        };
+
+        healthchecks-sendreports = {
+          description = "Healthchecks Reporting Service";
+          wantedBy = [ "healthchecks.target" ];
+          after = [ "healthchecks.service" ];
+
+          serviceConfig = commonConfig // {
+            Restart = "always";
+            ExecStart = ''
+              ${pkg}/opt/healthchecks/manage.py sendreports --loop
+            '';
+          };
+        };
+      };
+
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        {
+          description = "healthchecks service owner";
+          isSystemUser = true;
+          group = defaultUser;
+        };
+    };
+
+    users.groups = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} =
+        {
+          members = [ defaultUser ];
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/hedgedoc.nix b/nixpkgs/nixos/modules/services/web-apps/hedgedoc.nix
new file mode 100644
index 000000000000..1a66f077b09d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/hedgedoc.nix
@@ -0,0 +1,321 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkOption types mdDoc literalExpression;
+
+  cfg = config.services.hedgedoc;
+
+  # 21.03 will not be an official release - it was instead 21.05.  This
+  # versionAtLeast statement remains set to 21.03 for backwards compatibility.
+  # See https://github.com/NixOS/nixpkgs/pull/108899 and
+  # https://github.com/NixOS/rfcs/blob/master/rfcs/0080-nixos-release-schedule.md.
+  name = if lib.versionAtLeast config.system.stateVersion "21.03" then
+    "hedgedoc"
+  else
+    "codimd";
+
+  settingsFormat = pkgs.formats.json { };
+in
+{
+  meta.maintainers = with lib.maintainers; [ SuperSandro2000 h7x4 ];
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "codimd" ] [ "services" "hedgedoc" ])
+    (lib.mkRenamedOptionModule [ "services" "hedgedoc" "configuration" ] [ "services" "hedgedoc" "settings" ])
+    (lib.mkRenamedOptionModule [ "services" "hedgedoc" "groups" ] [ "users" "users" "hedgedoc" "extraGroups" ])
+    (lib.mkRemovedOptionModule [ "services" "hedgedoc" "workDir" ] ''
+      This option has been removed in favor of systemd managing the state directory.
+
+      If you have set this option without specifying `services.settings.uploadsDir`,
+      please move these files to `/var/lib/hedgedoc/uploads`, or set the option to point
+      at the correct location.
+    '')
+  ];
+
+  options.services.hedgedoc = {
+    package = lib.mkPackageOptionMD pkgs "hedgedoc" { };
+    enable = lib.mkEnableOption (mdDoc "the HedgeDoc Markdown Editor");
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+        options = {
+          domain = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            example = "hedgedoc.org";
+            description = mdDoc ''
+              Domain to use for website.
+
+              This is useful if you are trying to run hedgedoc behind
+              a reverse proxy.
+            '';
+          };
+          urlPath = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            example = "hedgedoc";
+            description = mdDoc ''
+              URL path for the website.
+
+              This is useful if you are hosting hedgedoc on a path like
+              `www.example.com/hedgedoc`
+            '';
+          };
+          host = mkOption {
+            type = with types; nullOr str;
+            default = "localhost";
+            description = mdDoc ''
+              Address to listen on.
+            '';
+          };
+          port = mkOption {
+            type = types.port;
+            default = 3000;
+            example = 80;
+            description = mdDoc ''
+              Port to listen on.
+            '';
+          };
+          path = mkOption {
+            type = with types; nullOr path;
+            default = null;
+            example = "/run/hedgedoc/hedgedoc.sock";
+            description = mdDoc ''
+              Path to UNIX domain socket to listen on
+
+              ::: {.note}
+                If specified, {option}`host` and {option}`port` will be ignored.
+              :::
+            '';
+          };
+          protocolUseSSL = mkOption {
+            type = types.bool;
+            default = false;
+            example = true;
+            description = mdDoc ''
+              Use `https://` for all links.
+
+              This is useful if you are trying to run hedgedoc behind
+              a reverse proxy.
+
+              ::: {.note}
+                Only applied if {option}`domain` is set.
+              :::
+            '';
+          };
+          allowOrigin = mkOption {
+            type = with types; listOf str;
+            default = with cfg.settings; [ host ] ++ lib.optionals (domain != null) [ domain ];
+            defaultText = literalExpression ''
+              with config.services.hedgedoc.settings; [ host ] ++ lib.optionals (domain != null) [ domain ]
+            '';
+            example = [ "localhost" "hedgedoc.org" ];
+            description = mdDoc ''
+              List of domains to whitelist.
+            '';
+          };
+          db = mkOption {
+            type = types.attrs;
+            default = {
+              dialect = "sqlite";
+              storage = "/var/lib/${name}/db.sqlite";
+            };
+            defaultText = literalExpression ''
+              {
+                dialect = "sqlite";
+                storage = "/var/lib/hedgedoc/db.sqlite";
+              }
+            '';
+            example = literalExpression ''
+              db = {
+                username = "hedgedoc";
+                database = "hedgedoc";
+                host = "localhost:5432";
+                # or via socket
+                # host = "/run/postgresql";
+                dialect = "postgresql";
+              };
+            '';
+            description = mdDoc ''
+              Specify the configuration for sequelize.
+              HedgeDoc supports `mysql`, `postgres`, `sqlite` and `mssql`.
+              See <https://sequelize.readthedocs.io/en/v3/>
+              for more information.
+
+              ::: {.note}
+                The relevant parts will be overriden if you set {option}`dbURL`.
+              :::
+            '';
+          };
+          useSSL = mkOption {
+            type = types.bool;
+            default = false;
+            description = mdDoc ''
+              Enable to use SSL server.
+
+              ::: {.note}
+                This will also enable {option}`protocolUseSSL`.
+
+                It will also require you to set the following:
+
+                - {option}`sslKeyPath`
+                - {option}`sslCertPath`
+                - {option}`sslCAPath`
+                - {option}`dhParamPath`
+              :::
+            '';
+          };
+          uploadsPath = mkOption {
+            type = types.path;
+            default = "/var/lib/${name}/uploads";
+            defaultText = "/var/lib/hedgedoc/uploads";
+            description = mdDoc ''
+              Directory for storing uploaded images.
+            '';
+          };
+
+          # Declared because we change the default to false.
+          allowGravatar = mkOption {
+            type = types.bool;
+            default = false;
+            example = true;
+            description = mdDoc ''
+              Whether to enable [Libravatar](https://wiki.libravatar.org/) as
+              profile picture source on your instance.
+
+              Despite the naming of the setting, Hedgedoc replaced Gravatar
+              with Libravatar in [CodiMD 1.4.0](https://hedgedoc.org/releases/1.4.0/)
+            '';
+          };
+        };
+      };
+
+      description = mdDoc ''
+        HedgeDoc configuration, see
+        <https://docs.hedgedoc.org/configuration/>
+        for documentation.
+      '';
+    };
+
+    environmentFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/var/lib/hedgedoc/hedgedoc.env";
+      description = mdDoc ''
+        Environment file as defined in {manpage}`systemd.exec(5)`.
+
+        Secrets may be passed to the service without adding them to the world-readable
+        Nix store, by specifying placeholder variables as the option value in Nix and
+        setting these variables accordingly in the environment file.
+
+        ```
+          # snippet of HedgeDoc-related config
+          services.hedgedoc.settings.dbURL = "postgres://hedgedoc:\''${DB_PASSWORD}@db-host:5432/hedgedocdb";
+          services.hedgedoc.settings.minio.secretKey = "$MINIO_SECRET_KEY";
+        ```
+
+        ```
+          # content of the environment file
+          DB_PASSWORD=verysecretdbpassword
+          MINIO_SECRET_KEY=verysecretminiokey
+        ```
+
+        Note that this file needs to be available on the host on which
+        `HedgeDoc` is running.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    users.groups.${name} = { };
+    users.users.${name} = {
+      description = "HedgeDoc service user";
+      group = name;
+      isSystemUser = true;
+    };
+
+    services.hedgedoc.settings = {
+      defaultNotePath = lib.mkDefault "${cfg.package}/public/default.md";
+      docsPath = lib.mkDefault "${cfg.package}/public/docs";
+      viewPath = lib.mkDefault "${cfg.package}/public/views";
+    };
+
+    systemd.services.hedgedoc = {
+      description = "HedgeDoc Service";
+      documentation = [ "https://docs.hedgedoc.org/" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      preStart =
+        let
+          configFile = settingsFormat.generate "hedgedoc-config.json" {
+            production = cfg.settings;
+          };
+        in
+        ''
+          ${pkgs.envsubst}/bin/envsubst \
+            -o /run/${name}/config.json \
+            -i ${configFile}
+          ${pkgs.coreutils}/bin/mkdir -p ${cfg.settings.uploadsPath}
+        '';
+      serviceConfig = {
+        User = name;
+        Group = name;
+
+        Restart = "always";
+        ExecStart = "${cfg.package}/bin/hedgedoc";
+        RuntimeDirectory = [ name ];
+        StateDirectory = [ name ];
+        WorkingDirectory = "/run/${name}";
+        ReadWritePaths = [
+          "-${cfg.settings.uploadsPath}"
+        ] ++ lib.optionals (cfg.settings.db ? "storage") [ "-${cfg.settings.db.storage}" ];
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) [ cfg.environmentFile ];
+        Environment = [
+          "CMD_CONFIG_FILE=/run/${name}/config.json"
+          "NODE_ENV=production"
+        ];
+
+        # Hardening
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+          # Required for connecting to database sockets,
+          # and listening to unix socket at `cfg.settings.path`
+          "AF_UNIX"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SocketBindAllow = lib.mkIf (cfg.settings.path == null) cfg.settings.port;
+        SocketBindDeny = "any";
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged @obsolete"
+          "@pkey"
+        ];
+        UMask = "0007";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/hledger-web.nix b/nixpkgs/nixos/modules/services/web-apps/hledger-web.nix
new file mode 100644
index 000000000000..be8ecc645e59
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/hledger-web.nix
@@ -0,0 +1,142 @@
+{ lib, pkgs, config, ... }:
+with lib;
+let
+  cfg = config.services.hledger-web;
+in {
+  options.services.hledger-web = {
+
+    enable = mkEnableOption (lib.mdDoc "hledger-web service");
+
+    serveApi = mkEnableOption (lib.mdDoc "serving only the JSON web API, without the web UI");
+
+    host = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        Address to listen on.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 5000;
+      example = 80;
+      description = lib.mdDoc ''
+        Port to listen on.
+      '';
+    };
+
+    capabilities = {
+      view = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable the view capability.
+        '';
+      };
+      add = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the add capability.
+        '';
+      };
+      manage = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable the manage capability.
+        '';
+      };
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/lib/hledger-web";
+      description = lib.mdDoc ''
+        Path the service has access to. If left as the default value this
+        directory will automatically be created before the hledger-web server
+        starts, otherwise the sysadmin is responsible for ensuring the
+        directory exists with appropriate ownership and permissions.
+      '';
+    };
+
+    journalFiles = mkOption {
+      type = types.listOf types.str;
+      default = [ ".hledger.journal" ];
+      description = lib.mdDoc ''
+        Paths to journal files relative to {option}`services.hledger-web.stateDir`.
+      '';
+    };
+
+    baseUrl = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "https://example.org";
+      description = lib.mdDoc ''
+        Base URL, when sharing over a network.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "--forecast" ];
+      description = lib.mdDoc ''
+        Extra command line arguments to pass to hledger-web.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    users.users.hledger = {
+      name = "hledger";
+      group = "hledger";
+      isSystemUser = true;
+      home = cfg.stateDir;
+      useDefaultShell = true;
+    };
+
+    users.groups.hledger = {};
+
+    systemd.services.hledger-web = let
+      capabilityString = with cfg.capabilities; concatStringsSep "," (
+        (optional view "view")
+        ++ (optional add "add")
+        ++ (optional manage "manage")
+      );
+      serverArgs = with cfg; escapeShellArgs ([
+        "--serve"
+        "--host=${host}"
+        "--port=${toString port}"
+        "--capabilities=${capabilityString}"
+        (optionalString (cfg.baseUrl != null) "--base-url=${cfg.baseUrl}")
+        (optionalString (cfg.serveApi) "--serve-api")
+      ] ++ (map (f: "--file=${stateDir}/${f}") cfg.journalFiles)
+        ++ extraOptions);
+    in {
+      description = "hledger-web - web-app for the hledger accounting tool.";
+      documentation = [ "https://hledger.org/hledger-web.html" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      serviceConfig = mkMerge [
+        {
+          ExecStart = "${pkgs.hledger-web}/bin/hledger-web ${serverArgs}";
+          Restart = "always";
+          WorkingDirectory = cfg.stateDir;
+          User = "hledger";
+          Group = "hledger";
+          PrivateTmp = true;
+        }
+        (mkIf (cfg.stateDir == "/var/lib/hledger-web") {
+          StateDirectory = "hledger-web";
+        })
+      ];
+    };
+
+  };
+
+  meta.maintainers = with lib.maintainers; [ marijanp erictapen ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/honk.md b/nixpkgs/nixos/modules/services/web-apps/honk.md
new file mode 100644
index 000000000000..f34085f7dc52
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/honk.md
@@ -0,0 +1,23 @@
+# Honk {#module-services-honk}
+
+With Honk on NixOS you can quickly configure a complete ActivityPub server with
+minimal setup and support costs.
+
+## Basic usage {#module-services-honk-basic-usage}
+
+A minimal configuration looks like this:
+
+```nix
+{
+  services.honk = {
+    enable = true;
+    host = "0.0.0.0";
+    port = 8080;
+    username = "username";
+    passwordFile = "/etc/honk/password.txt";
+    servername = "honk.example.com";
+  };
+
+  networking.firewall.allowedTCPPorts = [ 8080 ];
+}
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/honk.nix b/nixpkgs/nixos/modules/services/web-apps/honk.nix
new file mode 100644
index 000000000000..d47b17e54ffb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/honk.nix
@@ -0,0 +1,153 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+let
+  cfg = config.services.honk;
+
+  honk-initdb-script = cfg: pkgs.writeShellApplication {
+    name = "honk-initdb-script";
+
+    runtimeInputs = with pkgs; [ coreutils ];
+
+    text = ''
+      PW=$(cat "$CREDENTIALS_DIRECTORY/honk_passwordFile")
+
+      echo -e "${cfg.username}\n''$PW\n${cfg.host}:${toString cfg.port}\n${cfg.servername}" | ${lib.getExe cfg.package} -datadir "$STATE_DIRECTORY" init
+    '';
+  };
+in
+{
+  options = {
+    services.honk = {
+      enable = lib.mkEnableOption (lib.mdDoc "the Honk server");
+      package = lib.mkPackageOptionMD pkgs "honk" { };
+
+      host = lib.mkOption {
+        default = "127.0.0.1";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 8080;
+        description = lib.mdDoc ''
+          The port the server should listen to.
+        '';
+        type = lib.types.port;
+      };
+
+      username = lib.mkOption {
+        description = lib.mdDoc ''
+          The admin account username.
+        '';
+        type = lib.types.str;
+      };
+
+      passwordFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Password for admin account.
+          NOTE: Should be string not a store path, to prevent the password from being world readable
+        '';
+        type = lib.types.path;
+      };
+
+      servername = lib.mkOption {
+        description = lib.mdDoc ''
+          The server name.
+        '';
+        type = lib.types.str;
+      };
+
+      extraJS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra JavaScript file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+
+      extraCSS = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          An extra CSS file to be loaded by the client.
+        '';
+        type = lib.types.nullOr lib.types.path;
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.username or "" != "";
+        message = ''
+          You have to define a username for Honk (`services.honk.username`).
+        '';
+      }
+      {
+        assertion = cfg.servername or "" != "";
+        message = ''
+          You have to define a servername for Honk (`services.honk.servername`).
+        '';
+      }
+    ];
+
+    systemd.services.honk-initdb = {
+      description = "Honk server database setup";
+      requiredBy = [ "honk.service" ];
+      before = [ "honk.service" ];
+
+      serviceConfig = {
+        LoadCredential = [
+          "honk_passwordFile:${cfg.passwordFile}"
+        ];
+        Type = "oneshot";
+        StateDirectory = "honk";
+        DynamicUser = true;
+        RemainAfterExit = true;
+        ExecStart = lib.getExe (honk-initdb-script cfg);
+        PrivateTmp = true;
+      };
+
+      unitConfig = {
+        ConditionPathExists = [
+          # Skip this service if the database already exists
+          "!%S/honk/honk.db"
+        ];
+      };
+    };
+
+    systemd.services.honk = {
+      description = "Honk server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      bindsTo = [ "honk-initdb.service" ];
+      preStart = ''
+        mkdir -p $STATE_DIRECTORY/views
+        ${lib.optionalString (cfg.extraJS != null) "ln -fs ${cfg.extraJS} $STATE_DIRECTORY/views/local.js"}
+        ${lib.optionalString (cfg.extraCSS != null) "ln -fs ${cfg.extraCSS} $STATE_DIRECTORY/views/local.css"}
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk backup $STATE_DIRECTORY/backup
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk upgrade
+        ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk cleanup
+      '';
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} -datadir $STATE_DIRECTORY -viewdir ${cfg.package}/share/honk
+        '';
+        StateDirectory = "honk";
+        DynamicUser = true;
+        PrivateTmp = "yes";
+        Restart = "on-failure";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ drupol ];
+    doc = ./honk.md;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix b/nixpkgs/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
new file mode 100644
index 000000000000..67d235ab4475
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/icingaweb2/icingaweb2.nix
@@ -0,0 +1,262 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2;
+  fpm = config.services.phpfpm.pools.${poolName};
+  poolName = "icingaweb2";
+
+  defaultConfig = {
+    global = {
+      module_path = "${pkgs.icingaweb2}/modules";
+    };
+  };
+in {
+  meta.maintainers = with maintainers; [ das_j ];
+
+  options.services.icingaweb2 = with types; {
+    enable = mkEnableOption (lib.mdDoc "the icingaweb2 web interface");
+
+    pool = mkOption {
+      type = str;
+      default = poolName;
+      description = lib.mdDoc ''
+         Name of existing PHP-FPM pool that is used to run Icingaweb2.
+         If not specified, a pool will automatically created with default values.
+      '';
+    };
+
+    libraryPaths = mkOption {
+      type = attrsOf package;
+      default = { };
+      description = lib.mdDoc ''
+        Libraries to add to the Icingaweb2 library path.
+        The name of the attribute is the name of the library, the value
+        is the package to add.
+      '';
+    };
+
+    virtualHost = mkOption {
+      type = nullOr str;
+      default = "icingaweb2";
+      description = lib.mdDoc ''
+        Name of the nginx virtualhost to use and setup. If null, no virtualhost is set up.
+      '';
+    };
+
+    timezone = mkOption {
+      type = str;
+      default = "UTC";
+      example = "Europe/Berlin";
+      description = lib.mdDoc "PHP-compliant timezone specification";
+    };
+
+    modules = {
+      doc.enable = mkEnableOption (lib.mdDoc "the icingaweb2 doc module");
+      migrate.enable = mkEnableOption (lib.mdDoc "the icingaweb2 migrate module");
+      setup.enable = mkEnableOption (lib.mdDoc "the icingaweb2 setup module");
+      test.enable = mkEnableOption (lib.mdDoc "the icingaweb2 test module");
+      translation.enable = mkEnableOption (lib.mdDoc "the icingaweb2 translation module");
+    };
+
+    modulePackages = mkOption {
+      type = attrsOf package;
+      default = {};
+      example = literalExpression ''
+        {
+          "snow" = icingaweb2Modules.theme-snow;
+        }
+      '';
+      description = lib.mdDoc ''
+        Name-package attrset of Icingaweb 2 modules packages to enable.
+
+        If you enable modules manually (e.g. via the web ui), they will not be touched.
+      '';
+    };
+
+    generalConfig = mkOption {
+      type = nullOr attrs;
+      default = null;
+      example = {
+        general = {
+          showStacktraces = 1;
+          config_resource = "icingaweb_db";
+        };
+        logging = {
+          log = "syslog";
+          level = "CRITICAL";
+        };
+      };
+      description = lib.mdDoc ''
+        config.ini contents.
+        Will automatically be converted to a .ini file.
+        If you don't set global.module_path, the module will take care of it.
+
+        If the value is null, no config.ini is created and you can
+        modify it manually (e.g. via the web interface).
+        Note that you need to update module_path manually.
+      '';
+    };
+
+    resources = mkOption {
+      type = nullOr attrs;
+      default = null;
+      example = {
+        icingaweb_db = {
+          type = "db";
+          db = "mysql";
+          host = "localhost";
+          username = "icingaweb2";
+          password = "icingaweb2";
+          dbname = "icingaweb2";
+        };
+      };
+      description = lib.mdDoc ''
+        resources.ini contents.
+        Will automatically be converted to a .ini file.
+
+        If the value is null, no resources.ini is created and you can
+        modify it manually (e.g. via the web interface).
+        Note that if you set passwords here, they will go into the nix store.
+      '';
+    };
+
+    authentications = mkOption {
+      type = nullOr attrs;
+      default = null;
+      example = {
+        icingaweb = {
+          backend = "db";
+          resource = "icingaweb_db";
+        };
+      };
+      description = lib.mdDoc ''
+        authentication.ini contents.
+        Will automatically be converted to a .ini file.
+
+        If the value is null, no authentication.ini is created and you can
+        modify it manually (e.g. via the web interface).
+      '';
+    };
+
+    groupBackends = mkOption {
+      type = nullOr attrs;
+      default = null;
+      example = {
+        icingaweb = {
+          backend = "db";
+          resource = "icingaweb_db";
+        };
+      };
+      description = lib.mdDoc ''
+        groups.ini contents.
+        Will automatically be converted to a .ini file.
+
+        If the value is null, no groups.ini is created and you can
+        modify it manually (e.g. via the web interface).
+      '';
+    };
+
+    roles = mkOption {
+      type = nullOr attrs;
+      default = null;
+      example = {
+        Administrators = {
+          users = "admin";
+          permissions = "*";
+        };
+      };
+      description = lib.mdDoc ''
+        roles.ini contents.
+        Will automatically be converted to a .ini file.
+
+        If the value is null, no roles.ini is created and you can
+        modify it manually (e.g. via the web interface).
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.phpfpm.pools = mkIf (cfg.pool == "${poolName}") {
+      ${poolName} = {
+        user = "icingaweb2";
+        phpEnv = {
+          ICINGAWEB_LIBDIR = toString (pkgs.linkFarm "icingaweb2-libdir" (mapAttrsToList (name: path: { inherit name path; }) cfg.libraryPaths));
+        };
+        phpPackage = pkgs.php.withExtensions ({ enabled, all }: [ all.imagick ] ++ enabled);
+        phpOptions = ''
+          date.timezone = "${cfg.timezone}"
+        '';
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = "nginx";
+          "listen.group" = "nginx";
+          "listen.mode" = "0600";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 2;
+          "pm.min_spare_servers" = 2;
+          "pm.max_spare_servers" = 10;
+        };
+      };
+    };
+
+    services.icingaweb2.libraryPaths = {
+      ipl = pkgs.icingaweb2-ipl;
+      thirdparty = pkgs.icingaweb2-thirdparty;
+    };
+
+    systemd.services."phpfpm-${poolName}".serviceConfig.ReadWritePaths = [ "/etc/icingaweb2" ];
+
+    services.nginx = {
+      enable = true;
+      virtualHosts = mkIf (cfg.virtualHost != null) {
+        ${cfg.virtualHost} = {
+          root = "${pkgs.icingaweb2}/public";
+
+          extraConfig = ''
+            index index.php;
+            try_files $1 $uri $uri/ /index.php$is_args$args;
+          '';
+
+          locations."~ ..*/.*.php$".extraConfig = ''
+            return 403;
+          '';
+
+          locations."~ ^/index.php(.*)$".extraConfig = ''
+            fastcgi_intercept_errors on;
+            fastcgi_index index.php;
+            include ${config.services.nginx.package}/conf/fastcgi.conf;
+            try_files $uri =404;
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+            fastcgi_pass unix:${fpm.socket};
+            fastcgi_param SCRIPT_FILENAME ${pkgs.icingaweb2}/public/index.php;
+          '';
+        };
+      };
+    };
+
+    # /etc/icingaweb2
+    environment.etc = let
+      doModule = name: optionalAttrs (cfg.modules.${name}.enable) { "icingaweb2/enabledModules/${name}".source = "${pkgs.icingaweb2}/modules/${name}"; };
+    in {}
+      # Module packages
+      // (mapAttrs' (k: v: nameValuePair "icingaweb2/enabledModules/${k}" { source = v; }) cfg.modulePackages)
+      # Built-in modules
+      // doModule "doc"
+      // doModule "migrate"
+      // doModule "setup"
+      // doModule "test"
+      // doModule "translation"
+      # Configs
+      // optionalAttrs (cfg.generalConfig != null) { "icingaweb2/config.ini".text = generators.toINI {} (defaultConfig // cfg.generalConfig); }
+      // optionalAttrs (cfg.resources != null) { "icingaweb2/resources.ini".text = generators.toINI {} cfg.resources; }
+      // optionalAttrs (cfg.authentications != null) { "icingaweb2/authentication.ini".text = generators.toINI {} cfg.authentications; }
+      // optionalAttrs (cfg.groupBackends != null) { "icingaweb2/groups.ini".text = generators.toINI {} cfg.groupBackends; }
+      // optionalAttrs (cfg.roles != null) { "icingaweb2/roles.ini".text = generators.toINI {} cfg.roles; };
+
+    # User and group
+    users.groups.icingaweb2 = {};
+    users.users.icingaweb2 = {
+      description = "Icingaweb2 service user";
+      group = "icingaweb2";
+      isSystemUser = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix b/nixpkgs/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
new file mode 100644
index 000000000000..9a848870e9da
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/icingaweb2/module-monitoring.nix
@@ -0,0 +1,157 @@
+{ config, lib, pkgs, ... }: with lib; let
+  cfg = config.services.icingaweb2.modules.monitoring;
+
+  configIni = ''
+    [security]
+    protected_customvars = "${concatStringsSep "," cfg.generalConfig.protectedVars}"
+  '';
+
+  backendsIni = let
+    formatBool = b: if b then "1" else "0";
+  in concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "ido"
+    resource = "${config.resource}"
+    disabled = "${formatBool config.disabled}"
+  '') cfg.backends);
+
+  transportsIni = concatStringsSep "\n" (mapAttrsToList (name: config: ''
+    [${name}]
+    type = "${config.type}"
+    ${optionalString (config.instance != null) ''instance = "${config.instance}"''}
+    ${optionalString (config.type == "local" || config.type == "remote") ''path = "${config.path}"''}
+    ${optionalString (config.type != "local") ''
+      host = "${config.host}"
+      ${optionalString (config.port != null) ''port = "${toString config.port}"''}
+      user${optionalString (config.type == "api") "name"} = "${config.username}"
+    ''}
+    ${optionalString (config.type == "api") ''password = "${config.password}"''}
+    ${optionalString (config.type == "remote") ''resource = "${config.resource}"''}
+  '') cfg.transports);
+
+in {
+  options.services.icingaweb2.modules.monitoring = with types; {
+    enable = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc "Whether to enable the icingaweb2 monitoring module.";
+    };
+
+    generalConfig = {
+      mutable = mkOption {
+        type = bool;
+        default = false;
+        description = lib.mdDoc "Make config.ini of the monitoring module mutable (e.g. via the web interface).";
+      };
+
+      protectedVars = mkOption {
+        type = listOf str;
+        default = [ "*pw*" "*pass*" "community" ];
+        description = lib.mdDoc "List of string patterns for custom variables which should be excluded from user’s view.";
+      };
+    };
+
+    mutableBackends = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc "Make backends.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    backends = mkOption {
+      default = { icinga = { resource = "icinga_ido"; }; };
+      description = lib.mdDoc "Monitoring backends to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = lib.mdDoc "Name of this backend";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = lib.mdDoc "Name of the IDO resource";
+          };
+
+          disabled = mkOption {
+            type = bool;
+            default = false;
+            description = lib.mdDoc "Disable this backend";
+          };
+        };
+      }));
+    };
+
+    mutableTransports = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc "Make commandtransports.ini of the monitoring module mutable (e.g. via the web interface).";
+    };
+
+    transports = mkOption {
+      default = {};
+      description = lib.mdDoc "Command transports to define";
+      type = attrsOf (submodule ({ name, ... }: {
+        options = {
+          name = mkOption {
+            visible = false;
+            default = name;
+            type = str;
+            description = lib.mdDoc "Name of this transport";
+          };
+
+          type = mkOption {
+            type = enum [ "api" "local" "remote" ];
+            default = "api";
+            description = lib.mdDoc "Type of  this transport";
+          };
+
+          instance = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Assign a icinga instance to this transport";
+          };
+
+          path = mkOption {
+            type = str;
+            description = lib.mdDoc "Path to the socket for local or remote transports";
+          };
+
+          host = mkOption {
+            type = str;
+            description = lib.mdDoc "Host for the api or remote transport";
+          };
+
+          port = mkOption {
+            type = nullOr str;
+            default = null;
+            description = lib.mdDoc "Port to connect to for the api or remote transport";
+          };
+
+          username = mkOption {
+            type = str;
+            description = lib.mdDoc "Username for the api or remote transport";
+          };
+
+          password = mkOption {
+            type = str;
+            description = lib.mdDoc "Password for the api transport";
+          };
+
+          resource = mkOption {
+            type = str;
+            description = lib.mdDoc "SSH identity resource for the remote transport";
+          };
+        };
+      }));
+    };
+  };
+
+  config = mkIf (config.services.icingaweb2.enable && cfg.enable) {
+    environment.etc = { "icingaweb2/enabledModules/monitoring" = { source = "${pkgs.icingaweb2}/modules/monitoring"; }; }
+      // optionalAttrs (!cfg.generalConfig.mutable) { "icingaweb2/modules/monitoring/config.ini".text = configIni; }
+      // optionalAttrs (!cfg.mutableBackends) { "icingaweb2/modules/monitoring/backends.ini".text = backendsIni; }
+      // optionalAttrs (!cfg.mutableTransports) { "icingaweb2/modules/monitoring/commandtransports.ini".text = transportsIni; };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/invidious.nix b/nixpkgs/nixos/modules/services/web-apps/invidious.nix
new file mode 100644
index 000000000000..e4fbc6fd9368
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/invidious.nix
@@ -0,0 +1,313 @@
+{ lib, config, pkgs, options, ... }:
+let
+  cfg = config.services.invidious;
+  # To allow injecting secrets with jq, json (instead of yaml) is used
+  settingsFormat = pkgs.formats.json { };
+  inherit (lib) types;
+
+  settingsFile = settingsFormat.generate "invidious-settings" cfg.settings;
+
+  generatedHmacKeyFile = "/var/lib/invidious/hmac_key";
+  generateHmac = cfg.hmacKeyFile == null;
+
+  serviceConfig = {
+    systemd.services.invidious = {
+      description = "Invidious (An alternative YouTube front-end)";
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      preStart = lib.optionalString generateHmac ''
+        if [[ ! -e "${generatedHmacKeyFile}" ]]; then
+          ${pkgs.pwgen}/bin/pwgen 20 1 > "${generatedHmacKeyFile}"
+          chmod 0600 "${generatedHmacKeyFile}"
+        fi
+      '';
+
+      script = ''
+        configParts=()
+      ''
+      # autogenerated hmac_key
+      + lib.optionalString generateHmac ''
+        configParts+=("$(${pkgs.jq}/bin/jq -R '{"hmac_key":.}' <"${generatedHmacKeyFile}")")
+      ''
+      # generated settings file
+      + ''
+        configParts+=("$(< ${lib.escapeShellArg settingsFile})")
+      ''
+      # optional database password file
+      + lib.optionalString (cfg.database.host != null) ''
+        configParts+=("$(${pkgs.jq}/bin/jq -R '{"db":{"password":.}}' ${lib.escapeShellArg cfg.database.passwordFile})")
+      ''
+      # optional extra settings file
+      + lib.optionalString (cfg.extraSettingsFile != null) ''
+        configParts+=("$(< ${lib.escapeShellArg cfg.extraSettingsFile})")
+      ''
+      # explicitly specified hmac key file
+      + lib.optionalString (cfg.hmacKeyFile != null) ''
+        configParts+=("$(< ${lib.escapeShellArg cfg.hmacKeyFile})")
+      ''
+      # merge all parts into a single configuration with later elements overriding previous elements
+      + ''
+        export INVIDIOUS_CONFIG="$(${pkgs.jq}/bin/jq -s 'reduce .[] as $item ({}; . * $item)' <<<"''${configParts[*]}")"
+        exec ${cfg.package}/bin/invidious
+      '';
+
+      serviceConfig = {
+        RestartSec = "2s";
+        DynamicUser = true;
+        StateDirectory = "invidious";
+        StateDirectoryMode = "0750";
+
+        CapabilityBoundingSet = "";
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectHome = true;
+        ProtectKernelLogs = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+
+        # Because of various issues Invidious must be restarted often, at least once a day, ideally
+        # every hour.
+        # This option enables the automatic restarting of the Invidious instance.
+        Restart = lib.mkDefault "always";
+        RuntimeMaxSec = lib.mkDefault "1h";
+      };
+    };
+
+    services.invidious.settings = {
+      inherit (cfg) port;
+
+      # Automatically initialises and migrates the database if necessary
+      check_tables = true;
+
+      db = {
+        user = lib.mkDefault "kemal";
+        dbname = lib.mkDefault "invidious";
+        port = cfg.database.port;
+        # Blank for unix sockets, see
+        # https://github.com/will/crystal-pg/blob/1548bb255210/src/pq/conninfo.cr#L100-L108
+        host = lib.optionalString (cfg.database.host != null) cfg.database.host;
+        # Not needed because peer authentication is enabled
+        password = lib.mkIf (cfg.database.host == null) "";
+      };
+    } // (lib.optionalAttrs (cfg.domain != null) {
+      inherit (cfg) domain;
+    });
+
+    assertions = [{
+      assertion = cfg.database.host != null -> cfg.database.passwordFile != null;
+      message = "If database host isn't null, database password needs to be set";
+    }];
+  };
+
+  # Settings necessary for running with an automatically managed local database
+  localDatabaseConfig = lib.mkIf cfg.database.createLocally {
+    # Default to using the local database if we create it
+    services.invidious.database.host = lib.mkDefault null;
+
+
+    # TODO(raitobezarius to maintainers of invidious): I strongly advise to clean up the kemal specific
+    # thing for 24.05 and use `ensureDBOwnership`.
+    # See https://github.com/NixOS/nixpkgs/issues/216989
+    systemd.services.postgresql.postStart = lib.mkAfter ''
+      $PSQL -tAc 'ALTER DATABASE "${cfg.settings.db.dbname}" OWNER TO "${cfg.settings.db.user}";'
+    '';
+    services.postgresql = {
+      enable = true;
+      ensureUsers = lib.singleton { name = cfg.settings.db.user; ensureDBOwnership = false; };
+      ensureDatabases = lib.singleton cfg.settings.db.dbname;
+      # This is only needed because the unix user invidious isn't the same as
+      # the database user. This tells postgres to map one to the other.
+      identMap = ''
+        invidious invidious ${cfg.settings.db.user}
+      '';
+      # And this specifically enables peer authentication for only this
+      # database, which allows passwordless authentication over the postgres
+      # unix socket for the user map given above.
+      authentication = ''
+        local ${cfg.settings.db.dbname} ${cfg.settings.db.user} peer map=invidious
+      '';
+    };
+
+    systemd.services.invidious-db-clean = {
+      description = "Invidious database cleanup";
+      documentation = [ "https://docs.invidious.io/Database-Information-and-Maintenance.md" ];
+      startAt = lib.mkDefault "weekly";
+      path = [ config.services.postgresql.package ];
+      after = [ "postgresql.service" ];
+      script = ''
+        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "DELETE FROM nonces * WHERE expire < current_timestamp"
+        psql ${cfg.settings.db.dbname} ${cfg.settings.db.user} -c "TRUNCATE TABLE videos"
+      '';
+      serviceConfig = {
+        DynamicUser = true;
+        User = "invidious";
+      };
+    };
+
+    systemd.services.invidious = {
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+
+      serviceConfig = {
+        User = "invidious";
+      };
+    };
+  };
+
+  nginxConfig = lib.mkIf cfg.nginx.enable {
+    services.invidious.settings = {
+      https_only = config.services.nginx.virtualHosts.${cfg.domain}.forceSSL;
+      external_port = 80;
+    };
+
+    services.nginx = {
+      enable = true;
+      virtualHosts.${cfg.domain} = {
+        locations."/".proxyPass = "http://127.0.0.1:${toString cfg.port}";
+
+        enableACME = lib.mkDefault true;
+        forceSSL = lib.mkDefault true;
+      };
+    };
+
+    assertions = [{
+      assertion = cfg.domain != null;
+      message = "To use services.invidious.nginx, you need to set services.invidious.domain";
+    }];
+  };
+in
+{
+  options.services.invidious = {
+    enable = lib.mkEnableOption (lib.mdDoc "Invidious");
+
+    package = lib.mkOption {
+      type = types.package;
+      default = pkgs.invidious;
+      defaultText = lib.literalExpression "pkgs.invidious";
+      description = lib.mdDoc "The Invidious package to use.";
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = { };
+      description = lib.mdDoc ''
+        The settings Invidious should use.
+
+        See [config.example.yml](https://github.com/iv-org/invidious/blob/master/config/config.example.yml) for a list of all possible options.
+      '';
+    };
+
+    hmacKeyFile = lib.mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        A path to a file containing the `hmac_key`. If `null`, a key will be generated automatically on first
+        start.
+
+        If non-`null`, this option overrides any `hmac_key` specified in {option}`services.invidious.settings` or
+        via {option}`services.invidious.extraSettingsFile`.
+      '';
+    };
+
+    extraSettingsFile = lib.mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        A file including Invidious settings.
+
+        It gets merged with the settings specified in {option}`services.invidious.settings`
+        and can be used to store secrets like `hmac_key` outside of the nix store.
+      '';
+    };
+
+    # This needs to be outside of settings to avoid infinite recursion
+    # (determining if nginx should be enabled and therefore the settings
+    # modified).
+    domain = lib.mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The FQDN Invidious is reachable on.
+
+        This is used to configure nginx and for building absolute URLs.
+      '';
+    };
+
+    port = lib.mkOption {
+      type = types.port;
+      # Default from https://docs.invidious.io/Configuration.md
+      default = 3000;
+      description = lib.mdDoc ''
+        The port Invidious should listen on.
+
+        To allow access from outside,
+        you can use either {option}`services.invidious.nginx`
+        or add `config.services.invidious.port` to {option}`networking.firewall.allowedTCPPorts`.
+      '';
+    };
+
+    database = {
+      createLocally = lib.mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to create a local database with PostgreSQL.
+        '';
+      };
+
+      host = lib.mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The database host Invidious should use.
+
+          If `null`, the local unix socket is used. Otherwise
+          TCP is used.
+        '';
+      };
+
+      port = lib.mkOption {
+        type = types.port;
+        default = options.services.postgresql.port.default;
+        defaultText = lib.literalExpression "options.services.postgresql.port.default";
+        description = lib.mdDoc ''
+          The port of the database Invidious should use.
+
+          Defaults to the the default postgresql port.
+        '';
+      };
+
+      passwordFile = lib.mkOption {
+        type = types.nullOr types.str;
+        apply = lib.mapNullable toString;
+        default = null;
+        description = lib.mdDoc ''
+          Path to file containing the database password.
+        '';
+      };
+    };
+
+    nginx.enable = lib.mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to configure nginx as a reverse proxy for Invidious.
+
+        It serves it under the domain specified in {option}`services.invidious.settings.domain` with enabled TLS and ACME.
+        Further configuration can be done through {option}`services.nginx.virtualHosts.''${config.services.invidious.settings.domain}.*`,
+        which can also be used to disable AMCE and TLS.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+    serviceConfig
+    localDatabaseConfig
+    nginxConfig
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/invoiceplane.nix b/nixpkgs/nixos/modules/services/web-apps/invoiceplane.nix
new file mode 100644
index 000000000000..f419b75cf70f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/invoiceplane.nix
@@ -0,0 +1,358 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.invoiceplane;
+  eachSite = cfg.sites;
+  user = "invoiceplane";
+  webserver = config.services.${cfg.webserver};
+
+  invoiceplane-config = hostName: cfg: pkgs.writeText "ipconfig.php" ''
+    IP_URL=http://${hostName}
+    ENABLE_DEBUG=false
+    DISABLE_SETUP=false
+    REMOVE_INDEXPHP=false
+    DB_HOSTNAME=${cfg.database.host}
+    DB_USERNAME=${cfg.database.user}
+    # NOTE: file_get_contents adds newline at the end of returned string
+    DB_PASSWORD=${optionalString (cfg.database.passwordFile != null) "trim(file_get_contents('${cfg.database.passwordFile}'), \"\\r\\n\")"}
+    DB_DATABASE=${cfg.database.name}
+    DB_PORT=${toString cfg.database.port}
+    SESS_EXPIRATION=864000
+    ENABLE_INVOICE_DELETION=false
+    DISABLE_READ_ONLY=false
+    ENCRYPTION_KEY=
+    ENCRYPTION_CIPHER=AES-256
+    SETUP_COMPLETED=false
+    REMOVE_INDEXPHP=true
+  '';
+
+  extraConfig = hostName: cfg: pkgs.writeText "extraConfig.php" ''
+    ${toString cfg.extraConfig}
+  '';
+
+  pkg = hostName: cfg: pkgs.stdenv.mkDerivation rec {
+    pname = "invoiceplane-${hostName}";
+    version = src.version;
+    src = pkgs.invoiceplane;
+
+    postPhase = ''
+      # Patch index.php file to load additional config file
+      substituteInPlace index.php \
+        --replace "require('vendor/autoload.php');" "require('vendor/autoload.php'); \$dotenv = Dotenv\Dotenv::createImmutable(__DIR__, 'extraConfig.php'); \$dotenv->load();";
+    '';
+
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+
+      # symlink uploads and log directories
+      rm -r $out/uploads $out/application/logs $out/vendor/mpdf/mpdf/tmp
+      ln -sf ${cfg.stateDir}/uploads $out/
+      ln -sf ${cfg.stateDir}/logs $out/application/
+      ln -sf ${cfg.stateDir}/tmp $out/vendor/mpdf/mpdf/
+
+      # symlink the InvoicePlane config
+      ln -s ${cfg.stateDir}/ipconfig.php $out/ipconfig.php
+
+      # symlink the extraConfig file
+      ln -s ${extraConfig hostName cfg} $out/extraConfig.php
+
+      # symlink additional templates
+      ${concatMapStringsSep "\n" (template: "cp -r ${template}/. $out/application/views/invoice_templates/pdf/") cfg.invoiceTemplates}
+    '';
+  };
+
+  siteOpts = { lib, name, ... }:
+    {
+      options = {
+
+        enable = mkEnableOption (lib.mdDoc "InvoicePlane web application");
+
+        stateDir = mkOption {
+          type = types.path;
+          default = "/var/lib/invoiceplane/${name}";
+          description = lib.mdDoc ''
+            This directory is used for uploads of attachments and cache.
+            The directory passed here is automatically created and permissions
+            adjusted as required.
+          '';
+        };
+
+        database = {
+          host = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = lib.mdDoc "Database host address.";
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 3306;
+            description = lib.mdDoc "Database host port.";
+          };
+
+          name = mkOption {
+            type = types.str;
+            default = "invoiceplane";
+            description = lib.mdDoc "Database name.";
+          };
+
+          user = mkOption {
+            type = types.str;
+            default = "invoiceplane";
+            description = lib.mdDoc "Database user.";
+          };
+
+          passwordFile = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            example = "/run/keys/invoiceplane-dbpassword";
+            description = lib.mdDoc ''
+              A file containing the password corresponding to
+              {option}`database.user`.
+            '';
+          };
+
+          createLocally = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Create the database and database user locally.";
+          };
+        };
+
+        invoiceTemplates = mkOption {
+          type = types.listOf types.path;
+          default = [];
+          description = lib.mdDoc ''
+            List of path(s) to respective template(s) which are copied from the 'invoice_templates/pdf' directory.
+
+            ::: {.note}
+            These templates need to be packaged before use, see example.
+            :::
+          '';
+          example = literalExpression ''
+            let
+              # Let's package an example template
+              template-vtdirektmarketing = pkgs.stdenv.mkDerivation {
+                name = "vtdirektmarketing";
+                # Download the template from a public repository
+                src = pkgs.fetchgit {
+                  url = "https://git.project-insanity.org/onny/invoiceplane-vtdirektmarketing.git";
+                  sha256 = "1hh0q7wzsh8v8x03i82p6qrgbxr4v5fb05xylyrpp975l8axyg2z";
+                };
+                sourceRoot = ".";
+                # Installing simply means copying template php file to the output directory
+                installPhase = ""
+                  mkdir -p $out
+                  cp invoiceplane-vtdirektmarketing/vtdirektmarketing.php $out/
+                "";
+              };
+            # And then pass this package to the template list like this:
+            in [ template-vtdirektmarketing ]
+          '';
+        };
+
+        poolConfig = mkOption {
+          type = with types; attrsOf (oneOf [ str int bool ]);
+          default = {
+            "pm" = "dynamic";
+            "pm.max_children" = 32;
+            "pm.start_servers" = 2;
+            "pm.min_spare_servers" = 2;
+            "pm.max_spare_servers" = 4;
+            "pm.max_requests" = 500;
+          };
+          description = lib.mdDoc ''
+            Options for the InvoicePlane PHP pool. See the documentation on `php-fpm.conf`
+            for details on configuration directives.
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = types.nullOr types.lines;
+          default = null;
+          example = ''
+            SETUP_COMPLETED=true
+            DISABLE_SETUP=true
+            IP_URL=https://invoice.example.com
+          '';
+          description = lib.mdDoc ''
+            InvoicePlane configuration. Refer to
+            <https://github.com/InvoicePlane/InvoicePlane/blob/master/ipconfig.php.example>
+            for details on supported values.
+          '';
+        };
+
+        cron = {
+
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Enable cron service which periodically runs Invoiceplane tasks.
+              Requires key taken from the administration page. Refer to
+              <https://wiki.invoiceplane.com/en/1.0/modules/recurring-invoices>
+              on how to configure it.
+            '';
+          };
+
+          key = mkOption {
+            type = types.str;
+            description = lib.mdDoc "Cron key taken from the administration page.";
+          };
+
+        };
+
+      };
+
+    };
+in
+{
+  # interface
+  options = {
+    services.invoiceplane = mkOption {
+      type = types.submodule {
+
+        options.sites = mkOption {
+          type = types.attrsOf (types.submodule siteOpts);
+          default = {};
+          description = lib.mdDoc "Specification of one or more WordPress sites to serve";
+        };
+
+        options.webserver = mkOption {
+          type = types.enum [ "caddy" ];
+          default = "caddy";
+          description = lib.mdDoc ''
+            Which webserver to use for virtual host management. Currently only
+            caddy is supported.
+          '';
+        };
+      };
+      default = {};
+      description = lib.mdDoc "InvoicePlane configuration.";
+    };
+
+  };
+
+  # implementation
+  config = mkIf (eachSite != {}) (mkMerge [{
+
+    assertions = flatten (mapAttrsToList (hostName: cfg:
+      [{ assertion = cfg.database.createLocally -> cfg.database.user == user;
+        message = ''services.invoiceplane.sites."${hostName}".database.user must be ${user} if the database is to be automatically provisioned'';
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = ''services.invoiceplane.sites."${hostName}".database.passwordFile cannot be specified if services.invoiceplane.sites."${hostName}".database.createLocally is set to true.'';
+      }
+      { assertion = cfg.cron.enable -> cfg.cron.key != null;
+        message = ''services.invoiceplane.sites."${hostName}".cron.key must be set in order to use cron service.'';
+      }
+    ]) eachSite);
+
+    services.mysql = mkIf (any (v: v.database.createLocally) (attrValues eachSite)) {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = mapAttrsToList (hostName: cfg: cfg.database.name) eachSite;
+      ensureUsers = mapAttrsToList (hostName: cfg:
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ) eachSite;
+    };
+
+    services.phpfpm = {
+      phpPackage = pkgs.php81;
+      pools = mapAttrs' (hostName: cfg: (
+        nameValuePair "invoiceplane-${hostName}" {
+          inherit user;
+          group = webserver.group;
+          settings = {
+            "listen.owner" = webserver.user;
+            "listen.group" = webserver.group;
+          } // cfg.poolConfig;
+        }
+      )) eachSite;
+    };
+
+  }
+
+  {
+
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (hostName: cfg: [
+      "d ${cfg.stateDir} 0750 ${user} ${webserver.group} - -"
+      "f ${cfg.stateDir}/ipconfig.php 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/logs 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/uploads 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/uploads/archive 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/uploads/customer_files 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/uploads/temp 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/uploads/temp/mpdf 0750 ${user} ${webserver.group} - -"
+      "d ${cfg.stateDir}/tmp 0750 ${user} ${webserver.group} - -"
+    ]) eachSite);
+
+    systemd.services.invoiceplane-config = {
+      serviceConfig.Type = "oneshot";
+      script = concatStrings (mapAttrsToList (hostName: cfg:
+        ''
+          mkdir -p ${cfg.stateDir}/logs \
+                   ${cfg.stateDir}/uploads
+          if ! grep -q IP_URL "${cfg.stateDir}/ipconfig.php"; then
+            cp "${invoiceplane-config hostName cfg}" "${cfg.stateDir}/ipconfig.php"
+          fi
+        '') eachSite);
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    users.users.${user} = {
+      group = webserver.group;
+      isSystemUser = true;
+    };
+
+  }
+  {
+
+    # Cron service implementation
+
+    systemd.timers = mapAttrs' (hostName: cfg: (
+      nameValuePair "invoiceplane-cron-${hostName}" (mkIf cfg.cron.enable {
+        wantedBy = [ "timers.target" ];
+        timerConfig = {
+          OnBootSec = "5m";
+          OnUnitActiveSec = "5m";
+          Unit = "invoiceplane-cron-${hostName}.service";
+        };
+      })
+    )) eachSite;
+
+    systemd.services =
+      mapAttrs' (hostName: cfg: (
+        nameValuePair "invoiceplane-cron-${hostName}" (mkIf cfg.cron.enable {
+          serviceConfig = {
+            Type = "oneshot";
+            User = user;
+            ExecStart = "${pkgs.curl}/bin/curl --header 'Host: ${hostName}' http://localhost/invoices/cron/recur/${cfg.cron.key}";
+          };
+        })
+    )) eachSite;
+
+  }
+
+  (mkIf (cfg.webserver == "caddy") {
+    services.caddy = {
+      enable = true;
+      virtualHosts = mapAttrs' (hostName: cfg: (
+        nameValuePair "http://${hostName}" {
+          extraConfig = ''
+            root * ${pkg hostName cfg}
+            file_server
+            php_fastcgi unix/${config.services.phpfpm.pools."invoiceplane-${hostName}".socket}
+          '';
+        }
+      )) eachSite;
+    };
+  })
+
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/isso.nix b/nixpkgs/nixos/modules/services/web-apps/isso.nix
new file mode 100644
index 000000000000..6cb2d9ec785e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/isso.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption types literalExpression;
+
+  cfg = config.services.isso;
+
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "isso.conf" cfg.settings;
+in {
+
+  options = {
+    services.isso = {
+      enable = mkEnableOption (lib.mdDoc ''
+        isso, a commenting server similar to Disqus.
+
+        Note: The application's author suppose to run isso behind a reverse proxy.
+        The embedded solution offered by NixOS is also only suitable for small installations
+        below 20 requests per second
+      '');
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Configuration for `isso`.
+
+          See [Isso Server Configuration](https://posativ.org/isso/docs/configuration/server/)
+          for supported values.
+        '';
+
+        type = types.submodule {
+          freeformType = settingsFormat.type;
+        };
+
+        example = literalExpression ''
+          {
+            general = {
+              host = "http://localhost";
+            };
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.isso.settings.general.dbpath = lib.mkDefault "/var/lib/isso/comments.db";
+
+    systemd.services.isso = {
+      description = "isso, a commenting server similar to Disqus";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "isso";
+        Group = "isso";
+
+        DynamicUser = true;
+
+        StateDirectory = "isso";
+
+        ExecStart = ''
+          ${pkgs.isso}/bin/isso -c ${configFile}
+        '';
+
+        Restart = "on-failure";
+        RestartSec = 1;
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0077";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/jirafeau.nix b/nixpkgs/nixos/modules/services/web-apps/jirafeau.nix
new file mode 100644
index 000000000000..b2e274167164
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/jirafeau.nix
@@ -0,0 +1,173 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.jirafeau;
+
+  group = config.services.nginx.group;
+  user = config.services.nginx.user;
+
+  withTrailingSlash = str: if hasSuffix "/" str then str else "${str}/";
+
+  localConfig = pkgs.writeText "config.local.php" ''
+    <?php
+      $cfg['admin_password'] = '${cfg.adminPasswordSha256}';
+      $cfg['web_root'] = 'http://${withTrailingSlash cfg.hostName}';
+      $cfg['var_root'] = '${withTrailingSlash cfg.dataDir}';
+      $cfg['maximal_upload_size'] = ${builtins.toString cfg.maxUploadSizeMegabytes};
+      $cfg['installation_done'] = true;
+
+      ${cfg.extraConfig}
+  '';
+in
+{
+  options.services.jirafeau = {
+    adminPasswordSha256 = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc ''
+        SHA-256 of the desired administration password. Leave blank/unset for no password.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/jirafeau/data/";
+      description = lib.mdDoc "Location of Jirafeau storage directory.";
+    };
+
+    enable = mkEnableOption (lib.mdDoc "Jirafeau file upload application");
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        $cfg['style'] = 'courgette';
+        $cfg['organisation'] = 'ACME';
+      '';
+      description =  let
+        documentationLink =
+          "https://gitlab.com/mojo42/Jirafeau/-/blob/${cfg.package.version}/lib/config.original.php";
+      in
+        lib.mdDoc ''
+          Jirefeau configuration. Refer to <${documentationLink}> for supported
+          values.
+        '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "URL of instance. Must have trailing slash.";
+    };
+
+    maxUploadSizeMegabytes = mkOption {
+      type = types.int;
+      default = 0;
+      description = lib.mdDoc "Maximum upload size of accepted files.";
+    };
+
+    maxUploadTimeout = mkOption {
+      type = types.str;
+      default = "30m";
+      description = let
+        nginxCoreDocumentation = "http://nginx.org/en/docs/http/ngx_http_core_module.html";
+      in
+        lib.mdDoc ''
+          Timeout for reading client request bodies and headers. Refer to
+          <${nginxCoreDocumentation}#client_body_timeout> and
+          <${nginxCoreDocumentation}#client_header_timeout> for accepted values.
+        '';
+    };
+
+    nginxConfig = mkOption {
+      type = types.submodule
+        (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = {};
+      example = literalExpression ''
+        {
+          serverAliases = [ "wiki.''${config.networking.domain}" ];
+        }
+      '';
+      description = lib.mdDoc "Extra configuration for the nginx virtual host of Jirafeau.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.jirafeau;
+      defaultText = literalExpression "pkgs.jirafeau";
+      description = lib.mdDoc "Jirafeau package to use";
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for Jirafeau PHP pool. See documentation on `php-fpm.conf` for
+        details on configuration directives.
+      '';
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+    services = {
+      nginx = {
+        enable = true;
+        virtualHosts."${cfg.hostName}" = mkMerge [
+          cfg.nginxConfig
+          {
+            extraConfig = let
+              clientMaxBodySize =
+                if cfg.maxUploadSizeMegabytes == 0 then "0" else "${cfg.maxUploadSizeMegabytes}m";
+            in
+              ''
+                index index.php;
+                client_max_body_size ${clientMaxBodySize};
+                client_body_timeout ${cfg.maxUploadTimeout};
+                client_header_timeout ${cfg.maxUploadTimeout};
+              '';
+            locations = {
+              "~ \\.php$".extraConfig = ''
+                include ${config.services.nginx.package}/conf/fastcgi_params;
+                fastcgi_split_path_info ^(.+\.php)(/.+)$;
+                fastcgi_index index.php;
+                fastcgi_pass unix:${config.services.phpfpm.pools.jirafeau.socket};
+                fastcgi_param PATH_INFO $fastcgi_path_info;
+                fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              '';
+            };
+            root = mkForce "${cfg.package}";
+          }
+        ];
+      };
+
+      phpfpm.pools.jirafeau = {
+        inherit group user;
+        phpEnv."JIRAFEAU_CONFIG" = "${localConfig}";
+        settings = {
+          "listen.mode" = "0660";
+          "listen.owner" = user;
+          "listen.group" = group;
+        } // cfg.poolConfig;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.dataDir} 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/files/ 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/links/ 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/async/ 0750 ${user} ${group} - -"
+    ];
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
new file mode 100644
index 000000000000..060ef9752650
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.md
@@ -0,0 +1,45 @@
+# Jitsi Meet {#module-services-jitsi-meet}
+
+With Jitsi Meet on NixOS you can quickly configure a complete,
+private, self-hosted video conferencing solution.
+
+## Basic usage {#module-services-jitsi-basic-usage}
+
+A minimal configuration using Let's Encrypt for TLS certificates looks like this:
+```
+{
+  services.jitsi-meet = {
+    enable = true;
+    hostName = "jitsi.example.com";
+  };
+  services.jitsi-videobridge.openFirewall = true;
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  security.acme.email = "me@example.com";
+  security.acme.acceptTerms = true;
+}
+```
+
+## Configuration {#module-services-jitsi-configuration}
+
+Here is the minimal configuration with additional configurations:
+```
+{
+  services.jitsi-meet = {
+    enable = true;
+    hostName = "jitsi.example.com";
+    config = {
+      enableWelcomePage = false;
+      prejoinPageEnabled = true;
+      defaultLang = "fi";
+    };
+    interfaceConfig = {
+      SHOW_JITSI_WATERMARK = false;
+      SHOW_WATERMARK_FOR_GUESTS = false;
+    };
+  };
+  services.jitsi-videobridge.openFirewall = true;
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+  security.acme.email = "me@example.com";
+  security.acme.acceptTerms = true;
+}
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.nix b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.nix
new file mode 100644
index 000000000000..c0f9d785eea2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/jitsi-meet.nix
@@ -0,0 +1,599 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.jitsi-meet;
+
+  # The configuration files are JS of format "var <<string>> = <<JSON>>;". In order to
+  # override only some settings, we need to extract the JSON, use jq to merge it with
+  # the config provided by user, and then reconstruct the file.
+  overrideJs =
+    source: varName: userCfg: appendExtra:
+    let
+      extractor = pkgs.writeText "extractor.js" ''
+        var fs = require("fs");
+        eval(fs.readFileSync(process.argv[2], 'utf8'));
+        process.stdout.write(JSON.stringify(eval(process.argv[3])));
+      '';
+      userJson = pkgs.writeText "user.json" (builtins.toJSON userCfg);
+    in (pkgs.runCommand "${varName}.js" { } ''
+      ${pkgs.nodejs}/bin/node ${extractor} ${source} ${varName} > default.json
+      (
+        echo "var ${varName} = "
+        ${pkgs.jq}/bin/jq -s '.[0] * .[1]' default.json ${userJson}
+        echo ";"
+        echo ${escapeShellArg appendExtra}
+      ) > $out
+    '');
+
+  # Essential config - it's probably not good to have these as option default because
+  # types.attrs doesn't do merging. Let's merge explicitly, can still be overridden if
+  # user desires.
+  defaultCfg = {
+    hosts = {
+      domain = cfg.hostName;
+      muc = "conference.${cfg.hostName}";
+      focus = "focus.${cfg.hostName}";
+    };
+    bosh = "//${cfg.hostName}/http-bind";
+    websocket = "wss://${cfg.hostName}/xmpp-websocket";
+
+    fileRecordingsEnabled = true;
+    liveStreamingEnabled = true;
+    hiddenDomain = "recorder.${cfg.hostName}";
+  };
+in
+{
+  options.services.jitsi-meet = with types; {
+    enable = mkEnableOption (lib.mdDoc "Jitsi Meet - Secure, Simple and Scalable Video Conferences");
+
+    hostName = mkOption {
+      type = str;
+      example = "meet.example.org";
+      description = lib.mdDoc ''
+        FQDN of the Jitsi Meet instance.
+      '';
+    };
+
+    config = mkOption {
+      type = attrs;
+      default = { };
+      example = literalExpression ''
+        {
+          enableWelcomePage = false;
+          defaultLang = "fi";
+        }
+      '';
+      description = lib.mdDoc ''
+        Client-side web application settings that override the defaults in {file}`config.js`.
+
+        See <https://github.com/jitsi/jitsi-meet/blob/master/config.js> for default
+        configuration with comments.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = lines;
+      default = "";
+      description = lib.mdDoc ''
+        Text to append to {file}`config.js` web application config file.
+
+        Can be used to insert JavaScript logic to determine user's region in cascading bridges setup.
+      '';
+    };
+
+    interfaceConfig = mkOption {
+      type = attrs;
+      default = { };
+      example = literalExpression ''
+        {
+          SHOW_JITSI_WATERMARK = false;
+          SHOW_WATERMARK_FOR_GUESTS = false;
+        }
+      '';
+      description = lib.mdDoc ''
+        Client-side web-app interface settings that override the defaults in {file}`interface_config.js`.
+
+        See <https://github.com/jitsi/jitsi-meet/blob/master/interface_config.js> for
+        default configuration with comments.
+      '';
+    };
+
+    videobridge = {
+      enable = mkOption {
+        type = bool;
+        default = true;
+        description = lib.mdDoc ''
+          Jitsi Videobridge instance and configure it to connect to Prosody.
+
+          Additional configuration is possible with {option}`services.jitsi-videobridge`
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = nullOr str;
+        default = null;
+        example = "/run/keys/videobridge";
+        description = lib.mdDoc ''
+          File containing password to the Prosody account for videobridge.
+
+          If `null`, a file with password will be generated automatically. Setting
+          this option is useful if you plan to connect additional videobridges to the XMPP server.
+        '';
+      };
+    };
+
+    jicofo.enable = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable JiCoFo instance and configure it to connect to Prosody.
+
+        Additional configuration is possible with {option}`services.jicofo`.
+      '';
+    };
+
+    jibri.enable = mkOption {
+      type = bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable a Jibri instance and configure it to connect to Prosody.
+
+        Additional configuration is possible with {option}`services.jibri`, and
+        {option}`services.jibri.finalizeScript` is especially useful.
+      '';
+    };
+
+    nginx.enable = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable nginx virtual host that will serve the javascript application and act as
+        a proxy for the XMPP server. Further nginx configuration can be done by adapting
+        {option}`services.nginx.virtualHosts.<hostName>`.
+        When this is enabled, ACME will be used to retrieve a TLS certificate by default. To disable
+        this, set the {option}`services.nginx.virtualHosts.<hostName>.enableACME` to
+        `false` and if appropriate do the same for
+        {option}`services.nginx.virtualHosts.<hostName>.forceSSL`.
+      '';
+    };
+
+    caddy.enable = mkEnableOption (lib.mdDoc "Whether to enable caddy reverse proxy to expose jitsi-meet");
+
+    prosody.enable = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to configure Prosody to relay XMPP messages between Jitsi Meet components. Turn this
+        off if you want to configure it manually.
+      '';
+    };
+
+    excalidraw.enable = mkEnableOption (lib.mdDoc "Excalidraw collaboration backend for Jitsi");
+    excalidraw.port = mkOption {
+      type = types.port;
+      default = 3002;
+      description = lib.mdDoc ''The port which the Excalidraw backend for Jitsi should listen to.'';
+    };
+
+    secureDomain.enable = mkEnableOption (lib.mdDoc "Authenticated room creation");
+  };
+
+  config = mkIf cfg.enable {
+    services.prosody = mkIf cfg.prosody.enable {
+      enable = mkDefault true;
+      xmppComplianceSuite = mkDefault false;
+      modules = {
+        admin_adhoc = mkDefault false;
+        bosh = mkDefault true;
+        ping = mkDefault true;
+        roster = mkDefault true;
+        saslauth = mkDefault true;
+        smacks = mkDefault true;
+        tls = mkDefault true;
+        websocket = mkDefault true;
+      };
+      muc = [
+        {
+          domain = "conference.${cfg.hostName}";
+          name = "Jitsi Meet MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            restrict_room_creation = true
+            storage = "memory"
+            admins = { "focus@auth.${cfg.hostName}" }
+          '';
+        }
+        {
+          domain = "breakout.${cfg.hostName}";
+          name = "Jitsi Meet Breakout MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            restrict_room_creation = true
+            storage = "memory"
+            admins = { "focus@auth.${cfg.hostName}" }
+          '';
+        }
+        {
+          domain = "internal.auth.${cfg.hostName}";
+          name = "Jitsi Meet Videobridge MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            storage = "memory"
+            admins = { "focus@auth.${cfg.hostName}", "jvb@auth.${cfg.hostName}" }
+          '';
+          #-- muc_room_cache_size = 1000
+        }
+        {
+          domain = "lobby.${cfg.hostName}";
+          name = "Jitsi Meet Lobby MUC";
+          roomLocking = false;
+          roomDefaultPublicJids = true;
+          extraConfig = ''
+            restrict_room_creation = true
+            storage = "memory"
+          '';
+        }
+      ];
+      extraModules = [
+        "pubsub"
+        "smacks"
+        "speakerstats"
+        "external_services"
+        "conference_duration"
+        "end_conference"
+        "muc_lobby_rooms"
+        "muc_breakout_rooms"
+        "av_moderation"
+        "muc_hide_all"
+        "muc_meeting_id"
+        "muc_domain_mapper"
+        "muc_rate_limit"
+        "limits_exception"
+        "persistent_lobby"
+        "room_metadata"
+      ];
+      extraPluginPaths = [ "${pkgs.jitsi-meet-prosody}/share/prosody-plugins" ];
+      extraConfig = lib.mkMerge [
+        (mkAfter ''
+          Component "focus.${cfg.hostName}" "client_proxy"
+            target_address = "focus@auth.${cfg.hostName}"
+
+          Component "speakerstats.${cfg.hostName}" "speakerstats_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "conferenceduration.${cfg.hostName}" "conference_duration_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "endconference.${cfg.hostName}" "end_conference"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "avmoderation.${cfg.hostName}" "av_moderation_component"
+            muc_component = "conference.${cfg.hostName}"
+
+          Component "metadata.${cfg.hostName}" "room_metadata_component"
+            muc_component = "conference.${cfg.hostName}"
+            breakout_rooms_component = "breakout.${cfg.hostName}"
+        '')
+        (mkBefore ''
+          muc_mapper_domain_base = "${cfg.hostName}"
+
+          cross_domain_websocket = true;
+          consider_websocket_secure = true;
+
+          unlimited_jids = {
+            "focus@auth.${cfg.hostName}",
+            "jvb@auth.${cfg.hostName}"
+          }
+        '')
+      ];
+      virtualHosts.${cfg.hostName} = {
+        enabled = true;
+        domain = cfg.hostName;
+        extraConfig = ''
+          authentication = ${if cfg.secureDomain.enable then "\"internal_hashed\"" else "\"jitsi-anonymous\""}
+          c2s_require_encryption = false
+          admins = { "focus@auth.${cfg.hostName}" }
+          smacks_max_unacked_stanzas = 5
+          smacks_hibernation_time = 60
+          smacks_max_hibernated_sessions = 1
+          smacks_max_old_sessions = 1
+
+          av_moderation_component = "avmoderation.${cfg.hostName}"
+          speakerstats_component = "speakerstats.${cfg.hostName}"
+          conference_duration_component = "conferenceduration.${cfg.hostName}"
+          end_conference_component = "endconference.${cfg.hostName}"
+
+          c2s_require_encryption = false
+          lobby_muc = "lobby.${cfg.hostName}"
+          breakout_rooms_muc = "breakout.${cfg.hostName}"
+          room_metadata_component = "metadata.${cfg.hostName}"
+          main_muc = "conference.${cfg.hostName}"
+        '';
+        ssl = {
+          cert = "/var/lib/jitsi-meet/jitsi-meet.crt";
+          key = "/var/lib/jitsi-meet/jitsi-meet.key";
+        };
+      };
+      virtualHosts."auth.${cfg.hostName}" = {
+        enabled = true;
+        domain = "auth.${cfg.hostName}";
+        extraConfig = ''
+          authentication = "internal_hashed"
+        '';
+        ssl = {
+          cert = "/var/lib/jitsi-meet/jitsi-meet.crt";
+          key = "/var/lib/jitsi-meet/jitsi-meet.key";
+        };
+      };
+      virtualHosts."recorder.${cfg.hostName}" = {
+        enabled = true;
+        domain = "recorder.${cfg.hostName}";
+        extraConfig = ''
+          authentication = "internal_plain"
+          c2s_require_encryption = false
+        '';
+      };
+      virtualHosts."guest.${cfg.hostName}" = {
+        enabled = true;
+        domain = "guest.${cfg.hostName}";
+        extraConfig = ''
+          authentication = "anonymous"
+          c2s_require_encryption = false
+        '';
+      };
+    };
+    systemd.services.prosody = mkIf cfg.prosody.enable {
+      preStart = let
+        videobridgeSecret = if cfg.videobridge.passwordFile != null then cfg.videobridge.passwordFile else "/var/lib/jitsi-meet/videobridge-secret";
+      in ''
+        ${config.services.prosody.package}/bin/prosodyctl register focus auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jicofo-user-secret)"
+        ${config.services.prosody.package}/bin/prosodyctl register jvb auth.${cfg.hostName} "$(cat ${videobridgeSecret})"
+        ${config.services.prosody.package}/bin/prosodyctl mod_roster_command subscribe focus.${cfg.hostName} focus@auth.${cfg.hostName}
+        ${config.services.prosody.package}/bin/prosodyctl register jibri auth.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-auth-secret)"
+        ${config.services.prosody.package}/bin/prosodyctl register recorder recorder.${cfg.hostName} "$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"
+      '';
+      serviceConfig = {
+        EnvironmentFile = [ "/var/lib/jitsi-meet/secrets-env" ];
+        SupplementaryGroups = [ "jitsi-meet" ];
+      };
+      reloadIfChanged = true;
+    };
+
+    users.groups.jitsi-meet = { };
+    systemd.tmpfiles.rules = [
+      "d '/var/lib/jitsi-meet' 0750 root jitsi-meet - -"
+    ];
+
+    systemd.services.jitsi-meet-init-secrets = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "jicofo.service" "jitsi-videobridge2.service" ] ++ (optional cfg.prosody.enable "prosody.service");
+      serviceConfig = {
+        Type = "oneshot";
+      };
+
+      script = let
+        secrets = [ "jicofo-component-secret" "jicofo-user-secret" "jibri-auth-secret" "jibri-recorder-secret" ] ++ (optional (cfg.videobridge.passwordFile == null) "videobridge-secret");
+      in
+      ''
+        cd /var/lib/jitsi-meet
+        ${concatMapStringsSep "\n" (s: ''
+          if [ ! -f ${s} ]; then
+            tr -dc a-zA-Z0-9 </dev/urandom | head -c 64 > ${s}
+            chown root:jitsi-meet ${s}
+            chmod 640 ${s}
+          fi
+        '') secrets}
+
+        # for easy access in prosody
+        echo "JICOFO_COMPONENT_SECRET=$(cat jicofo-component-secret)" > secrets-env
+        chown root:jitsi-meet secrets-env
+        chmod 640 secrets-env
+      ''
+      + optionalString cfg.prosody.enable ''
+        # generate self-signed certificates
+        if [ ! -f /var/lib/jitsi-meet.crt ]; then
+          ${getBin pkgs.openssl}/bin/openssl req \
+            -x509 \
+            -newkey rsa:4096 \
+            -keyout /var/lib/jitsi-meet/jitsi-meet.key \
+            -out /var/lib/jitsi-meet/jitsi-meet.crt \
+            -days 36500 \
+            -nodes \
+            -subj '/CN=${cfg.hostName}/CN=auth.${cfg.hostName}'
+          chmod 640 /var/lib/jitsi-meet/jitsi-meet.{crt,key}
+          chown root:jitsi-meet /var/lib/jitsi-meet/jitsi-meet.{crt,key}
+        fi
+      '';
+    };
+
+    systemd.services.jitsi-excalidraw = mkIf cfg.excalidraw.enable {
+      description = "Excalidraw collaboration backend for Jitsi";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment.PORT = toString cfg.excalidraw.port;
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pkgs.jitsi-excalidraw}/bin/jitsi-excalidraw-backend";
+        Restart = "on-failure";
+        Group = "jitsi-meet";
+      };
+    };
+
+    services.nginx = mkIf cfg.nginx.enable {
+      enable = mkDefault true;
+      virtualHosts.${cfg.hostName} = {
+        enableACME = mkDefault true;
+        forceSSL = mkDefault true;
+        root = pkgs.jitsi-meet;
+        extraConfig = ''
+          ssi on;
+        '';
+        locations."@root_path".extraConfig = ''
+          rewrite ^/(.*)$ / break;
+        '';
+        locations."~ ^/([^/\\?&:'\"]+)$".tryFiles = "$uri @root_path";
+        locations."^~ /xmpp-websocket" = {
+          priority = 100;
+          proxyPass = "http://localhost:5280/xmpp-websocket";
+          proxyWebsockets = true;
+        };
+        locations."=/http-bind" = {
+          proxyPass = "http://localhost:5280/http-bind";
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header Host $host;
+          '';
+        };
+        locations."=/external_api.js" = mkDefault {
+          alias = "${pkgs.jitsi-meet}/libs/external_api.min.js";
+        };
+        locations."=/_api/room-info" = {
+          proxyPass = "http://localhost:5280/room-info";
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header Host $host;
+          '';
+        };
+        locations."=/config.js" = mkDefault {
+          alias = overrideJs "${pkgs.jitsi-meet}/config.js" "config" (recursiveUpdate defaultCfg cfg.config) cfg.extraConfig;
+        };
+        locations."=/interface_config.js" = mkDefault {
+          alias = overrideJs "${pkgs.jitsi-meet}/interface_config.js" "interfaceConfig" cfg.interfaceConfig "";
+        };
+        locations."/socket.io/" = mkIf cfg.excalidraw.enable {
+          proxyPass = "http://127.0.0.1:${toString cfg.excalidraw.port}";
+          proxyWebsockets = true;
+        };
+      };
+    };
+
+    services.caddy = mkIf cfg.caddy.enable {
+      enable = mkDefault true;
+      virtualHosts.${cfg.hostName} = {
+        extraConfig =
+        let
+          templatedJitsiMeet = pkgs.runCommand "templated-jitsi-meet" { } ''
+            cp -R ${pkgs.jitsi-meet}/* .
+            for file in *.html **/*.html ; do
+              ${pkgs.sd}/bin/sd '<!--#include virtual="(.*)" -->' '{{ include "$1" }}' $file
+            done
+            rm config.js
+            rm interface_config.js
+            cp -R . $out
+            cp ${overrideJs "${pkgs.jitsi-meet}/config.js" "config" (recursiveUpdate defaultCfg cfg.config) cfg.extraConfig} $out/config.js
+            cp ${overrideJs "${pkgs.jitsi-meet}/interface_config.js" "interfaceConfig" cfg.interfaceConfig ""} $out/interface_config.js
+            cp ./libs/external_api.min.js $out/external_api.js
+          '';
+        in ''
+          handle /http-bind {
+            header Host ${cfg.hostName}
+            reverse_proxy 127.0.0.1:5280
+          }
+          handle /xmpp-websocket {
+            reverse_proxy 127.0.0.1:5280
+          }
+          handle {
+            templates
+            root * ${templatedJitsiMeet}
+            try_files {path} {path}
+            try_files {path} /index.html
+            file_server
+          }
+        '';
+      };
+    };
+
+    services.jitsi-meet.config = recursiveUpdate
+      (mkIf cfg.excalidraw.enable {
+        whiteboard = {
+          enabled = true;
+          collabServerBaseUrl = "https://${cfg.hostName}";
+        };
+      })
+      (mkIf cfg.secureDomain.enable {
+        hosts.anonymousdomain = "guest.${cfg.hostName}";
+      });
+
+    services.jitsi-videobridge = mkIf cfg.videobridge.enable {
+      enable = true;
+      xmppConfigs."localhost" = {
+        userName = "jvb";
+        domain = "auth.${cfg.hostName}";
+        passwordFile = "/var/lib/jitsi-meet/videobridge-secret";
+        mucJids = "jvbbrewery@internal.auth.${cfg.hostName}";
+        disableCertificateVerification = true;
+      };
+    };
+
+    services.jicofo = mkIf cfg.jicofo.enable {
+      enable = true;
+      xmppHost = "localhost";
+      xmppDomain = cfg.hostName;
+      userDomain = "auth.${cfg.hostName}";
+      userName = "focus";
+      userPasswordFile = "/var/lib/jitsi-meet/jicofo-user-secret";
+      componentPasswordFile = "/var/lib/jitsi-meet/jicofo-component-secret";
+      bridgeMuc = "jvbbrewery@internal.auth.${cfg.hostName}";
+      config = mkMerge [{
+        jicofo.xmpp.service.disable-certificate-verification = true;
+        jicofo.xmpp.client.disable-certificate-verification = true;
+      }
+        (lib.mkIf (config.services.jibri.enable || cfg.jibri.enable) {
+          jicofo.jibri = {
+            brewery-jid = "JibriBrewery@internal.auth.${cfg.hostName}";
+            pending-timeout = "90";
+          };
+        })
+        (lib.mkIf cfg.secureDomain.enable {
+          jicofo = {
+            authentication = {
+              enabled = "true";
+              type = "XMPP";
+              login-url = cfg.hostName;
+            };
+            xmpp.client.client-proxy = "focus.${cfg.hostName}";
+          };
+        })];
+    };
+
+    services.jibri = mkIf cfg.jibri.enable {
+      enable = true;
+
+      xmppEnvironments."jitsi-meet" = {
+        xmppServerHosts = [ "localhost" ];
+        xmppDomain = cfg.hostName;
+
+        control.muc = {
+          domain = "internal.auth.${cfg.hostName}";
+          roomName = "JibriBrewery";
+          nickname = "jibri";
+        };
+
+        control.login = {
+          domain = "auth.${cfg.hostName}";
+          username = "jibri";
+          passwordFile = "/var/lib/jitsi-meet/jibri-auth-secret";
+        };
+
+        call.login = {
+          domain = "recorder.${cfg.hostName}";
+          username = "recorder";
+          passwordFile = "/var/lib/jitsi-meet/jibri-recorder-secret";
+        };
+
+        usageTimeout = "0";
+        disableCertificateVerification = true;
+        stripFromRoomDomain = "conference.";
+      };
+    };
+  };
+
+  meta.doc = ./jitsi-meet.md;
+  meta.maintainers = lib.teams.jitsi.members;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/kasmweb/default.nix b/nixpkgs/nixos/modules/services/web-apps/kasmweb/default.nix
new file mode 100644
index 000000000000..0d78025ecf0f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/kasmweb/default.nix
@@ -0,0 +1,275 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.kasmweb;
+in
+{
+  options.services.kasmweb = {
+    enable = lib.mkEnableOption (lib.mdDoc "kasmweb");
+
+    networkSubnet = lib.mkOption {
+      default = "172.20.0.0/16";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        The network subnet to use for the containers.
+      '';
+    };
+
+    postgres = {
+      user = lib.mkOption {
+        default = "kasmweb";
+        type = lib.types.str;
+        description = lib.mdDoc ''
+          Username to use for the postgres database.
+        '';
+      };
+      password = lib.mkOption {
+        default = "kasmweb";
+        type = lib.types.str;
+        description = lib.mdDoc ''
+          password to use for the postgres database.
+        '';
+      };
+    };
+
+    redisPassword = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        password to use for the redis cache.
+      '';
+    };
+
+    defaultAdminPassword = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        default admin password to use.
+      '';
+    };
+
+    defaultUserPassword = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        default user password to use.
+      '';
+    };
+
+    defaultManagerToken = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        default manager token to use.
+      '';
+    };
+
+    defaultGuacToken = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        default guac token to use.
+      '';
+    };
+
+    defaultRegistrationToken = lib.mkOption {
+      default = "kasmweb";
+      type = lib.types.str;
+      description = lib.mdDoc ''
+        default registration token to use.
+      '';
+    };
+
+    datastorePath = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/kasmweb";
+      description = lib.mdDoc ''
+        The directory used to store all data for kasmweb.
+      '';
+    };
+
+    listenAddress = lib.mkOption {
+      type = lib.types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        The address on which kasmweb should listen.
+      '';
+    };
+
+    listenPort = lib.mkOption {
+      type = lib.types.int;
+      default = 443;
+      description = lib.mdDoc ''
+        The port on which kasmweb should listen.
+      '';
+    };
+
+    sslCertificate = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        The SSL certificate to be used for kasmweb.
+      '';
+    };
+
+    sslCertificateKey = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        The SSL certificate's key to be used for kasmweb. Make sure to specify
+        this as a string and not a literal path, so that it is not accidentally
+        included in your nixstore.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services = {
+      "init-kasmweb" = {
+        wantedBy = [
+          "docker-kasm_db.service"
+        ];
+        before = [
+          "docker-kasm_db.service"
+          "docker-kasm_redis.service"
+          "docker-kasm_db_init.service"
+          "docker-kasm_api.service"
+          "docker-kasm_agent.service"
+          "docker-kasm_manager.service"
+          "docker-kasm_share.service"
+          "docker-kasm_guac.service"
+          "docker-kasm_proxy.service"
+        ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = pkgs.substituteAll {
+            src = ./initialize_kasmweb.sh;
+            isExecutable = true;
+            binPath = lib.makeBinPath [ pkgs.docker pkgs.openssl pkgs.gnused ];
+            runtimeShell = pkgs.runtimeShell;
+            kasmweb = pkgs.kasmweb;
+            postgresUser = cfg.postgres.user;
+            postgresPassword = cfg.postgres.password;
+            inherit (cfg)
+              datastorePath
+              sslCertificate
+              sslCertificateKey
+              redisPassword
+              defaultUserPassword
+              defaultAdminPassword
+              defaultManagerToken
+              defaultRegistrationToken
+              defaultGuacToken;
+          };
+        };
+      };
+    };
+
+    virtualisation = {
+      oci-containers.containers = {
+        kasm_db = {
+          image = "postgres:12-alpine";
+          environment = {
+            POSTGRES_PASSWORD = cfg.postgres.password;
+            POSTGRES_USER = cfg.postgres.user;
+            POSTGRES_DB = "kasm";
+          };
+          volumes = [
+            "${cfg.datastorePath}/conf/database/data.sql:/docker-entrypoint-initdb.d/data.sql"
+            "${cfg.datastorePath}/conf/database/:/tmp/"
+            "kasmweb_db:/var/lib/postgresql/data"
+          ];
+          extraOptions = [ "--network=kasm_default_network" ];
+        };
+        kasm_db_init = {
+          image = "kasmweb/api:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+            "kasmweb_api_data:/tmp"
+          ];
+          dependsOn = [ "kasm_db" ];
+          entrypoint = "/bin/bash";
+          cmd = [ "/opt/kasm/current/init_seeds.sh" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" ];
+        };
+        kasm_redis = {
+          image = "redis:5-alpine";
+          entrypoint = "/bin/sh";
+          cmd = [
+            "-c"
+            "redis-server --requirepass ${cfg.redisPassword}"
+          ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" ];
+        };
+        kasm_api = {
+          image = "kasmweb/api:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+            "kasmweb_api_data:/tmp"
+          ];
+          dependsOn = [ "kasm_db_init" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host"  ];
+        };
+        kasm_manager = {
+          image = "kasmweb/manager:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+          ];
+          dependsOn = [ "kasm_db" "kasm_api" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" "--read-only"];
+        };
+        kasm_agent = {
+          image = "kasmweb/agent:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+            "/var/run/docker.sock:/var/run/docker.sock"
+            "${pkgs.docker}/bin/docker:/usr/bin/docker"
+            "${cfg.datastorePath}/conf/nginx:/etc/nginx/conf.d"
+          ];
+          dependsOn = [ "kasm_manager" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" "--read-only" ];
+        };
+        kasm_share = {
+          image = "kasmweb/share:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+          ];
+          dependsOn = [ "kasm_db" "kasm_redis" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" "--read-only" ];
+        };
+        kasm_guac = {
+          image = "kasmweb/kasm-guac:${pkgs.kasmweb.version}";
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/:/opt/kasm/current/"
+          ];
+          dependsOn = [ "kasm_db" "kasm_redis" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host" "--read-only" ];
+        };
+        kasm_proxy = {
+          image = "kasmweb/nginx:latest";
+          ports = [ "${cfg.listenAddress}:${toString cfg.listenPort}:443" ];
+          user = "root:root";
+          volumes = [
+            "${cfg.datastorePath}/conf/nginx:/etc/nginx/conf.d:ro"
+            "${cfg.datastorePath}/certs/kasm_nginx.key:/etc/ssl/private/kasm_nginx.key"
+            "${cfg.datastorePath}/certs/kasm_nginx.crt:/etc/ssl/certs/kasm_nginx.crt"
+            "${cfg.datastorePath}/www:/srv/www:ro"
+            "${cfg.datastorePath}/log/nginx:/var/log/external/nginx"
+            "${cfg.datastorePath}/log/logrotate:/var/log/external/logrotate"
+          ];
+          dependsOn = [ "kasm_manager" "kasm_api" "kasm_agent" "kasm_share"
+          "kasm_guac" ];
+          extraOptions = [ "--network=kasm_default_network" "--userns=host"
+          "--network-alias=proxy"];
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/kasmweb/initialize_kasmweb.sh b/nixpkgs/nixos/modules/services/web-apps/kasmweb/initialize_kasmweb.sh
new file mode 100644
index 000000000000..dbf043b98693
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/kasmweb/initialize_kasmweb.sh
@@ -0,0 +1,114 @@
+#! @runtimeShell@
+export PATH=@binPath@:$PATH
+
+mkdir -p @datastorePath@/log
+chmod -R a+rw @datastorePath@
+
+ln -sf @kasmweb@/bin @datastorePath@
+rm -r @datastorePath@/conf
+cp -r @kasmweb@/conf @datastorePath@
+mkdir -p @datastorePath@/conf/nginx/containers.d
+chmod -R a+rw @datastorePath@/conf
+ln -sf @kasmweb@/www @datastorePath@
+
+
+docker network inspect kasm_default_network >/dev/null || docker network create kasm_default_network --subnet @networkSubnet@
+if docker volume inspect kasmweb_db >/dev/null; then
+    source @datastorePath@/ids.env
+    echo 'echo "skipping database init"' > @datastorePath@/init_seeds.sh
+    echo 'while true; do sleep 10 ; done' >> @datastorePath@/init_seeds.sh
+else
+    API_SERVER_ID=$(cat /proc/sys/kernel/random/uuid)
+    MANAGER_ID=$(cat /proc/sys/kernel/random/uuid)
+    SHARE_ID=$(cat /proc/sys/kernel/random/uuid)
+    SERVER_ID=$(cat /proc/sys/kernel/random/uuid)
+    echo "export API_SERVER_ID=$API_SERVER_ID" > @datastorePath@/ids.env
+    echo "export MANAGER_ID=$MANAGER_ID" >> @datastorePath@/ids.env
+    echo "export SHARE_ID=$SHARE_ID" >> @datastorePath@/ids.env
+    echo "export SERVER_ID=$SERVER_ID" >> @datastorePath@/ids.env
+
+    mkdir -p @datastorePath@/certs
+    openssl req -x509 -nodes -days 1825 -newkey rsa:2048 -keyout @datastorePath@/certs/kasm_nginx.key -out @datastorePath@/certs/kasm_nginx.crt -subj "/C=US/ST=VA/L=None/O=None/OU=DoFu/CN=$(hostname)/emailAddress=none@none.none" 2> /dev/null
+
+    docker volume create kasmweb_db
+    rm @datastorePath@/.done_initing_data
+    cat >@datastorePath@/init_seeds.sh <<EOF
+#!/bin/bash
+if [ ! -e /opt/kasm/current/.done_initing_data ]; then
+  sleep 4
+  /usr/bin/kasm_server.so --initialize-database --cfg \
+    /opt/kasm/current/conf/app/api.app.config.yaml \
+    --populate-production \
+    --seed-file \
+    /opt/kasm/current/conf/database/seed_data/default_properties.yaml \
+    2>&1 | grep -v UserWarning
+  /usr/bin/kasm_server.so --cfg \
+    /opt/kasm/current/conf/app/api.app.config.yaml \
+    --populate-production \
+    --seed-file \
+    /opt/kasm/current/conf/database/seed_data/default_agents.yaml \
+    2>&1 | grep -v UserWarning
+  /usr/bin/kasm_server.so --cfg \
+    /opt/kasm/current/conf/app/api.app.config.yaml \
+    --populate-production \
+    --seed-file \
+    /opt/kasm/current/conf/database/seed_data/default_connection_proxies.yaml \
+    2>&1 | grep -v UserWarning
+  /usr/bin/kasm_server.so --cfg \
+    /opt/kasm/current/conf/app/api.app.config.yaml \
+    --populate-production \
+    --seed-file \
+    /opt/kasm/current/conf/database/seed_data/default_images_amd64.yaml \
+    2>&1 | grep -v UserWarning
+  touch /opt/kasm/current/.done_initing_data
+  while true; do sleep 10 ; done
+else
+ echo "skipping database init"
+  while true; do sleep 10 ; done
+fi
+EOF
+fi
+
+chmod +x @datastorePath@/init_seeds.sh
+chmod a+w @datastorePath@/init_seeds.sh
+
+if [ -e @sslCertificate@ ]; then
+    cp @sslCertificate@ @datastorePath@/certs/kasm_nginx.crt
+    cp @sslCertificateKey@ @datastorePath@/certs/kasm_nginx.key
+fi
+
+sed -i -e "s/username.*/username: @postgresUser@/g" \
+    -e "s/password.*/password: @postgresPassword@/g" \
+    -e "s/host.*db/host: kasm_db/g" \
+    -e "s/ssl: true/ssl: false/g" \
+    -e "s/redisPassword.*/redisPassword: @redisPassword@/g" \
+    -e "s/server_hostname.*/server_hostname: kasm_api/g" \
+    -e "s/server_id.*/server_id: $API_SERVER_ID/g" \
+    -e "s/manager_id.*/manager_id: $MANAGER_ID/g" \
+    -e "s/share_id.*/share_id: $SHARE_ID/g" \
+    @datastorePath@/conf/app/api.app.config.yaml
+
+sed -i -e "s/ token:.*/ token: \"@defaultManagerToken@\"/g" \
+    -e "s/hostnames: \['proxy.*/hostnames: \['kasm_proxy'\]/g" \
+    -e "s/server_id.*/server_id: $SERVER_ID/g" \
+    @datastorePath@/conf/app/agent.app.config.yaml
+
+
+sed -i -e "s/password: admin.*/password: \"@defaultAdminPassword@\"/g" \
+    -e "s/password: user.*/password: \"@defaultUserPassword@\"/g" \
+    -e "s/default-manager-token/@defaultManagerToken@/g" \
+    -e "s/default-registration-token/@defaultRegistrationToken@/g" \
+    -e "s/upstream_auth_address:.*/upstream_auth_address: 'proxy'/g" \
+    @datastorePath@/conf/database/seed_data/default_properties.yaml
+
+sed -i -e "s/GUACTOKEN/@defaultGuacToken@/g" \
+    -e "s/APIHOSTNAME/proxy/g" \
+    @datastorePath@/conf/app/kasmguac.app.config.yaml
+
+sed -i -e "s/GUACTOKEN/@defaultGuacToken@/g" \
+    -e "s/APIHOSTNAME/proxy/g" \
+    @datastorePath@/conf/database/seed_data/default_connection_proxies.yaml
+
+sed -i "s/00000000-0000-0000-0000-000000000000/$SERVER_ID/g" \
+    @datastorePath@/conf/database/seed_data/default_agents.yaml
+
diff --git a/nixpkgs/nixos/modules/services/web-apps/kavita.nix b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
new file mode 100644
index 000000000000..ca9cd01d403d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/kavita.nix
@@ -0,0 +1,83 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.kavita;
+in {
+  options.services.kavita = {
+    enable = lib.mkEnableOption (lib.mdDoc "Kavita reading server");
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "kavita";
+      description = lib.mdDoc "User account under which Kavita runs.";
+    };
+
+    package = lib.mkPackageOptionMD pkgs "kavita" { };
+
+    dataDir = lib.mkOption {
+      default = "/var/lib/kavita";
+      type = lib.types.str;
+      description = lib.mdDoc "The directory where Kavita stores its state.";
+    };
+
+    tokenKeyFile = lib.mkOption {
+      type = lib.types.path;
+      description = lib.mdDoc ''
+        A file containing the TokenKey, a secret with at 128+ bits.
+        It can be generated with `head -c 32 /dev/urandom | base64`.
+      '';
+    };
+    port = lib.mkOption {
+      default = 5000;
+      type = lib.types.port;
+      description = lib.mdDoc "Port to bind to.";
+    };
+    ipAdresses = lib.mkOption {
+      default = ["0.0.0.0" "::"];
+      type = lib.types.listOf lib.types.str;
+      description = lib.mdDoc "IP Addresses to bind to. The default is to bind
+      to all IPv4 and IPv6 addresses.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.kavita = {
+      description = "Kavita";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      preStart = ''
+        umask u=rwx,g=rx,o=
+        cat > "${cfg.dataDir}/config/appsettings.json" <<EOF
+        {
+          "TokenKey": "$(cat ${cfg.tokenKeyFile})",
+          "Port": ${toString cfg.port},
+          "IpAddresses": "${lib.concatStringsSep "," cfg.ipAdresses}"
+        }
+        EOF
+      '';
+      serviceConfig = {
+        WorkingDirectory = cfg.dataDir;
+        ExecStart = "${lib.getExe cfg.package}";
+        Restart = "always";
+        User = cfg.user;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}'        0750 ${cfg.user} ${cfg.user} - -"
+      "d '${cfg.dataDir}/config' 0750 ${cfg.user} ${cfg.user} - -"
+    ];
+
+    users = {
+      users.${cfg.user} = {
+        description = "kavita service user";
+        isSystemUser = true;
+        group = cfg.user;
+        home = cfg.dataDir;
+      };
+      groups.${cfg.user} = { };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ misterio77 ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/keycloak.md b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
new file mode 100644
index 000000000000..aa8de40d642b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/keycloak.md
@@ -0,0 +1,141 @@
+# Keycloak {#module-services-keycloak}
+
+[Keycloak](https://www.keycloak.org/) is an
+open source identity and access management server with support for
+[OpenID Connect](https://openid.net/connect/),
+[OAUTH 2.0](https://oauth.net/2/) and
+[SAML 2.0](https://en.wikipedia.org/wiki/SAML_2.0).
+
+## Administration {#module-services-keycloak-admin}
+
+An administrative user with the username
+`admin` is automatically created in the
+`master` realm. Its initial password can be
+configured by setting [](#opt-services.keycloak.initialAdminPassword)
+and defaults to `changeme`. The password is
+not stored safely and should be changed immediately in the
+admin panel.
+
+Refer to the [Keycloak Server Administration Guide](
+  https://www.keycloak.org/docs/latest/server_admin/index.html
+) for information on
+how to administer your Keycloak
+instance.
+
+## Database access {#module-services-keycloak-database}
+
+Keycloak can be used with either PostgreSQL, MariaDB or
+MySQL. Which one is used can be
+configured in [](#opt-services.keycloak.database.type). The selected
+database will automatically be enabled and a database and role
+created unless [](#opt-services.keycloak.database.host) is changed
+from its default of `localhost` or
+[](#opt-services.keycloak.database.createLocally) is set to `false`.
+
+External database access can also be configured by setting
+[](#opt-services.keycloak.database.host),
+[](#opt-services.keycloak.database.name),
+[](#opt-services.keycloak.database.username),
+[](#opt-services.keycloak.database.useSSL) and
+[](#opt-services.keycloak.database.caCert) as
+appropriate. Note that you need to manually create the database
+and allow the configured database user full access to it.
+
+[](#opt-services.keycloak.database.passwordFile)
+must be set to the path to a file containing the password used
+to log in to the database. If [](#opt-services.keycloak.database.host)
+and [](#opt-services.keycloak.database.createLocally)
+are kept at their defaults, the database role
+`keycloak` with that password is provisioned
+on the local database instance.
+
+::: {.warning}
+The path should be provided as a string, not a Nix path, since Nix
+paths are copied into the world readable Nix store.
+:::
+
+## Hostname {#module-services-keycloak-hostname}
+
+The hostname is used to build the public URL used as base for
+all frontend requests and must be configured through
+[](#opt-services.keycloak.settings.hostname).
+
+::: {.note}
+If you're migrating an old Wildfly based Keycloak instance
+and want to keep compatibility with your current clients,
+you'll likely want to set [](#opt-services.keycloak.settings.http-relative-path)
+to `/auth`. See the option description
+for more details.
+:::
+
+[](#opt-services.keycloak.settings.hostname-strict-backchannel)
+determines whether Keycloak should force all requests to go
+through the frontend URL. By default,
+Keycloak allows backend requests to
+instead use its local hostname or IP address and may also
+advertise it to clients through its OpenID Connect Discovery
+endpoint.
+
+For more information on hostname configuration, see the [Hostname
+section of the Keycloak Server Installation and Configuration
+Guide](https://www.keycloak.org/server/hostname).
+
+## Setting up TLS/SSL {#module-services-keycloak-tls}
+
+By default, Keycloak won't accept
+unsecured HTTP connections originating from outside its local
+network.
+
+HTTPS support requires a TLS/SSL certificate and a private key,
+both [PEM formatted](https://en.wikipedia.org/wiki/Privacy-Enhanced_Mail).
+Their paths should be set through
+[](#opt-services.keycloak.sslCertificate) and
+[](#opt-services.keycloak.sslCertificateKey).
+
+::: {.warning}
+ The paths should be provided as a strings, not a Nix paths,
+since Nix paths are copied into the world readable Nix store.
+:::
+
+## Themes {#module-services-keycloak-themes}
+
+You can package custom themes and make them visible to
+Keycloak through [](#opt-services.keycloak.themes). See the
+[Themes section of the Keycloak Server Development Guide](
+  https://www.keycloak.org/docs/latest/server_development/#_themes
+) and the description of the aforementioned NixOS option for
+more information.
+
+## Configuration file settings {#module-services-keycloak-settings}
+
+Keycloak server configuration parameters can be set in
+[](#opt-services.keycloak.settings). These correspond
+directly to options in
+{file}`conf/keycloak.conf`. Some of the most
+important parameters are documented as suboptions, the rest can
+be found in the [All
+configuration section of the Keycloak Server Installation and
+Configuration Guide](https://www.keycloak.org/server/all-config).
+
+Options containing secret data should be set to an attribute
+set containing the attribute `_secret` - a
+string pointing to a file containing the value the option
+should be set to. See the description of
+[](#opt-services.keycloak.settings) for an example.
+
+## Example configuration {#module-services-keycloak-example-config}
+
+A basic configuration with some custom settings could look like this:
+```
+services.keycloak = {
+  enable = true;
+  settings = {
+    hostname = "keycloak.example.com";
+    hostname-strict-backchannel = true;
+  };
+  initialAdminPassword = "e6Wcm0RrtegMEHl";  # change on first login
+  sslCertificate = "/run/keys/ssl_cert";
+  sslCertificateKey = "/run/keys/ssl_key";
+  database.passwordFile = "/run/keys/db_password";
+};
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/keycloak.nix b/nixpkgs/nixos/modules/services/web-apps/keycloak.nix
new file mode 100644
index 000000000000..a7e4fab8ea28
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/keycloak.nix
@@ -0,0 +1,679 @@
+{ config, options, pkgs, lib, ... }:
+
+let
+  cfg = config.services.keycloak;
+  opt = options.services.keycloak;
+
+  inherit (lib)
+    types
+    mkMerge
+    mkOption
+    mkChangedOptionModule
+    mkRenamedOptionModule
+    mkRemovedOptionModule
+    concatStringsSep
+    mapAttrsToList
+    escapeShellArg
+    mkIf
+    optionalString
+    optionals
+    mkDefault
+    literalExpression
+    isAttrs
+    literalMD
+    maintainers
+    catAttrs
+    collect
+    splitString
+    hasPrefix
+    ;
+
+  inherit (builtins)
+    elem
+    typeOf
+    isInt
+    isString
+    hashString
+    isPath
+    ;
+
+  prefixUnlessEmpty = prefix: string: optionalString (string != "") "${prefix}${string}";
+in
+{
+  imports =
+    [
+      (mkRenamedOptionModule
+        [ "services" "keycloak" "bindAddress" ]
+        [ "services" "keycloak" "settings" "http-host" ])
+      (mkRenamedOptionModule
+        [ "services" "keycloak" "forceBackendUrlToFrontendUrl"]
+        [ "services" "keycloak" "settings" "hostname-strict-backchannel"])
+      (mkChangedOptionModule
+        [ "services" "keycloak" "httpPort" ]
+        [ "services" "keycloak" "settings" "http-port" ]
+        (config:
+          builtins.fromJSON config.services.keycloak.httpPort))
+      (mkChangedOptionModule
+        [ "services" "keycloak" "httpsPort" ]
+        [ "services" "keycloak" "settings" "https-port" ]
+        (config:
+          builtins.fromJSON config.services.keycloak.httpsPort))
+      (mkRemovedOptionModule
+        [ "services" "keycloak" "frontendUrl" ]
+        ''
+          Set `services.keycloak.settings.hostname' and `services.keycloak.settings.http-relative-path' instead.
+          NOTE: You likely want to set 'http-relative-path' to '/auth' to keep compatibility with your clients.
+                See its description for more information.
+        '')
+      (mkRemovedOptionModule
+        [ "services" "keycloak" "extraConfig" ]
+        "Use `services.keycloak.settings' instead.")
+    ];
+
+  options.services.keycloak =
+    let
+      inherit (types)
+        bool
+        str
+        int
+        nullOr
+        attrsOf
+        oneOf
+        path
+        enum
+        package
+        port;
+
+      assertStringPath = optionName: value:
+        if isPath value then
+          throw ''
+            services.keycloak.${optionName}:
+              ${toString value}
+              is a Nix path, but should be a string, since Nix
+              paths are copied into the world-readable Nix store.
+          ''
+        else value;
+    in
+    {
+      enable = mkOption {
+        type = bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc ''
+          Whether to enable the Keycloak identity and access management
+          server.
+        '';
+      };
+
+      sslCertificate = mkOption {
+        type = nullOr path;
+        default = null;
+        example = "/run/keys/ssl_cert";
+        apply = assertStringPath "sslCertificate";
+        description = lib.mdDoc ''
+          The path to a PEM formatted certificate to use for TLS/SSL
+          connections.
+        '';
+      };
+
+      sslCertificateKey = mkOption {
+        type = nullOr path;
+        default = null;
+        example = "/run/keys/ssl_key";
+        apply = assertStringPath "sslCertificateKey";
+        description = lib.mdDoc ''
+          The path to a PEM formatted private key to use for TLS/SSL
+          connections.
+        '';
+      };
+
+      plugins = lib.mkOption {
+        type = lib.types.listOf lib.types.path;
+        default = [ ];
+        description = lib.mdDoc ''
+          Keycloak plugin jar, ear files or derivations containing
+          them. Packaged plugins are available through
+          `pkgs.keycloak.plugins`.
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = enum [ "mysql" "mariadb" "postgresql" ];
+          default = "postgresql";
+          example = "mariadb";
+          description = lib.mdDoc ''
+            The type of database Keycloak should connect to.
+          '';
+        };
+
+        host = mkOption {
+          type = str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Hostname of the database to connect to.
+          '';
+        };
+
+        port =
+          let
+            dbPorts = {
+              postgresql = 5432;
+              mariadb = 3306;
+              mysql = 3306;
+            };
+          in
+          mkOption {
+            type = port;
+            default = dbPorts.${cfg.database.type};
+            defaultText = literalMD "default port of selected database";
+            description = lib.mdDoc ''
+              Port of the database to connect to.
+            '';
+          };
+
+        useSSL = mkOption {
+          type = bool;
+          default = cfg.database.host != "localhost";
+          defaultText = literalExpression ''config.${opt.database.host} != "localhost"'';
+          description = lib.mdDoc ''
+            Whether the database connection should be secured by SSL /
+            TLS.
+          '';
+        };
+
+        caCert = mkOption {
+          type = nullOr path;
+          default = null;
+          description = lib.mdDoc ''
+            The SSL / TLS CA certificate that verifies the identity of the
+            database server.
+
+            Required when PostgreSQL is used and SSL is turned on.
+
+            For MySQL, if left at `null`, the default
+            Java keystore is used, which should suffice if the server
+            certificate is issued by an official CA.
+          '';
+        };
+
+        createLocally = mkOption {
+          type = bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether a database should be automatically created on the
+            local host. Set this to false if you plan on provisioning a
+            local database yourself. This has no effect if
+            services.keycloak.database.host is customized.
+          '';
+        };
+
+        name = mkOption {
+          type = str;
+          default = "keycloak";
+          description = lib.mdDoc ''
+            Database name to use when connecting to an external or
+            manually provisioned database; has no effect when a local
+            database is automatically provisioned.
+
+            To use this with a local database, set [](#opt-services.keycloak.database.createLocally) to
+            `false` and create the database and user
+            manually.
+          '';
+        };
+
+        username = mkOption {
+          type = str;
+          default = "keycloak";
+          description = lib.mdDoc ''
+            Username to use when connecting to an external or manually
+            provisioned database; has no effect when a local database is
+            automatically provisioned.
+
+            To use this with a local database, set [](#opt-services.keycloak.database.createLocally) to
+            `false` and create the database and user
+            manually.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = path;
+          example = "/run/keys/db_password";
+          apply = assertStringPath "passwordFile";
+          description = lib.mdDoc ''
+            The path to a file containing the database password.
+          '';
+        };
+      };
+
+      package = mkOption {
+        type = package;
+        default = pkgs.keycloak;
+        defaultText = literalExpression "pkgs.keycloak";
+        description = lib.mdDoc ''
+          Keycloak package to use.
+        '';
+      };
+
+      initialAdminPassword = mkOption {
+        type = str;
+        default = "changeme";
+        description = lib.mdDoc ''
+          Initial password set for the `admin`
+          user. The password is not stored safely and should be changed
+          immediately in the admin panel.
+        '';
+      };
+
+      themes = mkOption {
+        type = attrsOf package;
+        default = { };
+        description = lib.mdDoc ''
+          Additional theme packages for Keycloak. Each theme is linked into
+          subdirectory with a corresponding attribute name.
+
+          Theme packages consist of several subdirectories which provide
+          different theme types: for example, `account`,
+          `login` etc. After adding a theme to this option you
+          can select it by its name in Keycloak administration console.
+        '';
+      };
+
+      settings = mkOption {
+        type = lib.types.submodule {
+          freeformType = attrsOf (nullOr (oneOf [ str int bool (attrsOf path) ]));
+
+          options = {
+            http-host = mkOption {
+              type = str;
+              default = "0.0.0.0";
+              example = "127.0.0.1";
+              description = lib.mdDoc ''
+                On which address Keycloak should accept new connections.
+              '';
+            };
+
+            http-port = mkOption {
+              type = port;
+              default = 80;
+              example = 8080;
+              description = lib.mdDoc ''
+                On which port Keycloak should listen for new HTTP connections.
+              '';
+            };
+
+            https-port = mkOption {
+              type = port;
+              default = 443;
+              example = 8443;
+              description = lib.mdDoc ''
+                On which port Keycloak should listen for new HTTPS connections.
+              '';
+            };
+
+            http-relative-path = mkOption {
+              type = str;
+              default = "/";
+              example = "/auth";
+              apply = x: if !(hasPrefix "/") x then "/" + x else x;
+              description = lib.mdDoc ''
+                The path relative to `/` for serving
+                resources.
+
+                ::: {.note}
+                In versions of Keycloak using Wildfly (&lt;17),
+                this defaulted to `/auth`. If
+                upgrading from the Wildfly version of Keycloak,
+                i.e. a NixOS version before 22.05, you'll likely
+                want to set this to `/auth` to
+                keep compatibility with your clients.
+
+                See <https://www.keycloak.org/migration/migrating-to-quarkus>
+                for more information on migrating from Wildfly to Quarkus.
+                :::
+              '';
+            };
+
+            hostname = mkOption {
+              type = str;
+              example = "keycloak.example.com";
+              description = lib.mdDoc ''
+                The hostname part of the public URL used as base for
+                all frontend requests.
+
+                See <https://www.keycloak.org/server/hostname>
+                for more information about hostname configuration.
+              '';
+            };
+
+            hostname-strict-backchannel = mkOption {
+              type = bool;
+              default = false;
+              example = true;
+              description = lib.mdDoc ''
+                Whether Keycloak should force all requests to go
+                through the frontend URL. By default, Keycloak allows
+                backend requests to instead use its local hostname or
+                IP address and may also advertise it to clients
+                through its OpenID Connect Discovery endpoint.
+
+                See <https://www.keycloak.org/server/hostname>
+                for more information about hostname configuration.
+              '';
+            };
+
+            proxy = mkOption {
+              type = enum [ "edge" "reencrypt" "passthrough" "none" ];
+              default = "none";
+              example = "edge";
+              description = lib.mdDoc ''
+                The proxy address forwarding mode if the server is
+                behind a reverse proxy.
+
+                - `edge`:
+                  Enables communication through HTTP between the
+                  proxy and Keycloak.
+                - `reencrypt`:
+                  Requires communication through HTTPS between the
+                  proxy and Keycloak.
+                - `passthrough`:
+                  Enables communication through HTTP or HTTPS between
+                  the proxy and Keycloak.
+
+                See <https://www.keycloak.org/server/reverseproxy> for more information.
+              '';
+            };
+          };
+        };
+
+        example = literalExpression ''
+          {
+            hostname = "keycloak.example.com";
+            proxy = "reencrypt";
+            https-key-store-file = "/path/to/file";
+            https-key-store-password = { _secret = "/run/keys/store_password"; };
+          }
+        '';
+
+        description = lib.mdDoc ''
+          Configuration options corresponding to parameters set in
+          {file}`conf/keycloak.conf`.
+
+          Most available options are documented at <https://www.keycloak.org/server/all-config>.
+
+          Options containing secret data should be set to an attribute
+          set containing the attribute `_secret` - a
+          string pointing to a file containing the value the option
+          should be set to. See the example to get a better picture of
+          this: in the resulting
+          {file}`conf/keycloak.conf` file, the
+          `https-key-store-password` key will be set
+          to the contents of the
+          {file}`/run/keys/store_password` file.
+        '';
+      };
+    };
+
+  config =
+    let
+      # We only want to create a database if we're actually going to
+      # connect to it.
+      databaseActuallyCreateLocally = cfg.database.createLocally && cfg.database.host == "localhost";
+      createLocalPostgreSQL = databaseActuallyCreateLocally && cfg.database.type == "postgresql";
+      createLocalMySQL = databaseActuallyCreateLocally && elem cfg.database.type [ "mysql" "mariadb" ];
+
+      mySqlCaKeystore = pkgs.runCommand "mysql-ca-keystore" { } ''
+        ${pkgs.jre}/bin/keytool -importcert -trustcacerts -alias MySQLCACert -file ${cfg.database.caCert} -keystore $out -storepass notsosecretpassword -noprompt
+      '';
+
+      # Both theme and theme type directories need to be actual
+      # directories in one hierarchy to pass Keycloak checks.
+      themesBundle = pkgs.runCommand "keycloak-themes" { } ''
+        linkTheme() {
+          theme="$1"
+          name="$2"
+
+          mkdir "$out/$name"
+          for typeDir in "$theme"/*; do
+            if [ -d "$typeDir" ]; then
+              type="$(basename "$typeDir")"
+              mkdir "$out/$name/$type"
+              for file in "$typeDir"/*; do
+                ln -sn "$file" "$out/$name/$type/$(basename "$file")"
+              done
+            fi
+          done
+        }
+
+        mkdir -p "$out"
+        for theme in ${keycloakBuild}/themes/*; do
+          if [ -d "$theme" ]; then
+            linkTheme "$theme" "$(basename "$theme")"
+          fi
+        done
+
+        ${concatStringsSep "\n" (mapAttrsToList (name: theme: "linkTheme ${theme} ${escapeShellArg name}") cfg.themes)}
+      '';
+
+      keycloakConfig = lib.generators.toKeyValue {
+        mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
+          mkValueString = v: with builtins;
+            if isInt v then toString v
+            else if isString v then v
+            else if true == v then "true"
+            else if false == v then "false"
+            else if isSecret v then hashString "sha256" v._secret
+            else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+        };
+      };
+
+      isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+      filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [{ } null])) cfg.settings;
+      confFile = pkgs.writeText "keycloak.conf" (keycloakConfig filteredConfig);
+      keycloakBuild = cfg.package.override {
+        inherit confFile;
+        plugins = cfg.package.enabledPlugins ++ cfg.plugins;
+      };
+    in
+    mkIf cfg.enable
+      {
+        assertions = [
+          {
+            assertion = (cfg.database.useSSL && cfg.database.type == "postgresql") -> (cfg.database.caCert != null);
+            message = "A CA certificate must be specified (in 'services.keycloak.database.caCert') when PostgreSQL is used with SSL";
+          }
+          {
+            assertion = createLocalPostgreSQL -> config.services.postgresql.settings.standard_conforming_strings or true;
+            message = "Setting up a local PostgreSQL db for Keycloak requires `standard_conforming_strings` turned on to work reliably";
+          }
+        ];
+
+        environment.systemPackages = [ keycloakBuild ];
+
+        services.keycloak.settings =
+          let
+            postgresParams = concatStringsSep "&" (
+              optionals cfg.database.useSSL [
+                "ssl=true"
+              ] ++ optionals (cfg.database.caCert != null) [
+                "sslrootcert=${cfg.database.caCert}"
+                "sslmode=verify-ca"
+              ]
+            );
+            mariadbParams = concatStringsSep "&" ([
+              "characterEncoding=UTF-8"
+            ] ++ optionals cfg.database.useSSL [
+              "useSSL=true"
+              "requireSSL=true"
+              "verifyServerCertificate=true"
+            ] ++ optionals (cfg.database.caCert != null) [
+              "trustCertificateKeyStoreUrl=file:${mySqlCaKeystore}"
+              "trustCertificateKeyStorePassword=notsosecretpassword"
+            ]);
+            dbProps = if cfg.database.type == "postgresql" then postgresParams else mariadbParams;
+          in
+          mkMerge [
+            {
+              db = if cfg.database.type == "postgresql" then "postgres" else cfg.database.type;
+              db-username = if databaseActuallyCreateLocally then "keycloak" else cfg.database.username;
+              db-password._secret = cfg.database.passwordFile;
+              db-url-host = cfg.database.host;
+              db-url-port = toString cfg.database.port;
+              db-url-database = if databaseActuallyCreateLocally then "keycloak" else cfg.database.name;
+              db-url-properties = prefixUnlessEmpty "?" dbProps;
+              db-url = null;
+            }
+            (mkIf (cfg.sslCertificate != null && cfg.sslCertificateKey != null) {
+              https-certificate-file = "/run/keycloak/ssl/ssl_cert";
+              https-certificate-key-file = "/run/keycloak/ssl/ssl_key";
+            })
+          ];
+
+        systemd.services.keycloakPostgreSQLInit = mkIf createLocalPostgreSQL {
+          after = [ "postgresql.service" ];
+          before = [ "keycloak.service" ];
+          bindsTo = [ "postgresql.service" ];
+          path = [ config.services.postgresql.package ];
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            User = "postgres";
+            Group = "postgres";
+            LoadCredential = [ "db_password:${cfg.database.passwordFile}" ];
+          };
+          script = ''
+            set -o errexit -o pipefail -o nounset -o errtrace
+            shopt -s inherit_errexit
+
+            create_role="$(mktemp)"
+            trap 'rm -f "$create_role"' EXIT
+
+            # Read the password from the credentials directory and
+            # escape any single quotes by adding additional single
+            # quotes after them, following the rules laid out here:
+            # https://www.postgresql.org/docs/current/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS
+            db_password="$(<"$CREDENTIALS_DIRECTORY/db_password")"
+            db_password="''${db_password//\'/\'\'}"
+
+            echo "CREATE ROLE keycloak WITH LOGIN PASSWORD '$db_password' CREATEDB" > "$create_role"
+            psql -tAc "SELECT 1 FROM pg_roles WHERE rolname='keycloak'" | grep -q 1 || psql -tA --file="$create_role"
+            psql -tAc "SELECT 1 FROM pg_database WHERE datname = 'keycloak'" | grep -q 1 || psql -tAc 'CREATE DATABASE "keycloak" OWNER "keycloak"'
+          '';
+        };
+
+        systemd.services.keycloakMySQLInit = mkIf createLocalMySQL {
+          after = [ "mysql.service" ];
+          before = [ "keycloak.service" ];
+          bindsTo = [ "mysql.service" ];
+          path = [ config.services.mysql.package ];
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            User = config.services.mysql.user;
+            Group = config.services.mysql.group;
+            LoadCredential = [ "db_password:${cfg.database.passwordFile}" ];
+          };
+          script = ''
+            set -o errexit -o pipefail -o nounset -o errtrace
+            shopt -s inherit_errexit
+
+            # Read the password from the credentials directory and
+            # escape any single quotes by adding additional single
+            # quotes after them, following the rules laid out here:
+            # https://dev.mysql.com/doc/refman/8.0/en/string-literals.html
+            db_password="$(<"$CREDENTIALS_DIRECTORY/db_password")"
+            db_password="''${db_password//\'/\'\'}"
+
+            ( echo "SET sql_mode = 'NO_BACKSLASH_ESCAPES';"
+              echo "CREATE USER IF NOT EXISTS 'keycloak'@'localhost' IDENTIFIED BY '$db_password';"
+              echo "CREATE DATABASE IF NOT EXISTS keycloak CHARACTER SET utf8 COLLATE utf8_unicode_ci;"
+              echo "GRANT ALL PRIVILEGES ON keycloak.* TO 'keycloak'@'localhost';"
+            ) | mysql -N
+          '';
+        };
+
+        systemd.services.keycloak =
+          let
+            databaseServices =
+              if createLocalPostgreSQL then [
+                "keycloakPostgreSQLInit.service"
+                "postgresql.service"
+              ]
+              else if createLocalMySQL then [
+                "keycloakMySQLInit.service"
+                "mysql.service"
+              ]
+              else [ ];
+            secretPaths = catAttrs "_secret" (collect isSecret cfg.settings);
+            mkSecretReplacement = file: ''
+              replace-secret ${hashString "sha256" file} $CREDENTIALS_DIRECTORY/${baseNameOf file} /run/keycloak/conf/keycloak.conf
+            '';
+            secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+          in
+          {
+            after = databaseServices;
+            bindsTo = databaseServices;
+            wantedBy = [ "multi-user.target" ];
+            path = with pkgs; [
+              keycloakBuild
+              openssl
+              replace-secret
+            ];
+            environment = {
+              KC_HOME_DIR = "/run/keycloak";
+              KC_CONF_DIR = "/run/keycloak/conf";
+            };
+            serviceConfig = {
+              LoadCredential =
+                map (p: "${baseNameOf p}:${p}") secretPaths
+                ++ optionals (cfg.sslCertificate != null && cfg.sslCertificateKey != null) [
+                  "ssl_cert:${cfg.sslCertificate}"
+                  "ssl_key:${cfg.sslCertificateKey}"
+                ];
+              User = "keycloak";
+              Group = "keycloak";
+              DynamicUser = true;
+              RuntimeDirectory = "keycloak";
+              RuntimeDirectoryMode = "0700";
+              AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+            };
+            script = ''
+              set -o errexit -o pipefail -o nounset -o errtrace
+              shopt -s inherit_errexit
+
+              umask u=rwx,g=,o=
+
+              ln -s ${themesBundle} /run/keycloak/themes
+              ln -s ${keycloakBuild}/providers /run/keycloak/
+
+              install -D -m 0600 ${confFile} /run/keycloak/conf/keycloak.conf
+
+              ${secretReplacements}
+
+              # Escape any backslashes in the db parameters, since
+              # they're otherwise unexpectedly read as escape
+              # sequences.
+              sed -i '/db-/ s|\\|\\\\|g' /run/keycloak/conf/keycloak.conf
+
+            '' + optionalString (cfg.sslCertificate != null && cfg.sslCertificateKey != null) ''
+              mkdir -p /run/keycloak/ssl
+              cp $CREDENTIALS_DIRECTORY/ssl_{cert,key} /run/keycloak/ssl/
+            '' + ''
+              export KEYCLOAK_ADMIN=admin
+              export KEYCLOAK_ADMIN_PASSWORD=${escapeShellArg cfg.initialAdminPassword}
+              kc.sh start --optimized
+            '';
+          };
+
+        services.postgresql.enable = mkDefault createLocalPostgreSQL;
+        services.mysql.enable = mkDefault createLocalMySQL;
+        services.mysql.package =
+          let
+            dbPkg = if cfg.database.type == "mariadb" then pkgs.mariadb else pkgs.mysql80;
+          in
+          mkIf createLocalMySQL (mkDefault dbPkg);
+      };
+
+  meta.doc = ./keycloak.md;
+  meta.maintainers = [ maintainers.talyz ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/komga.nix b/nixpkgs/nixos/modules/services/web-apps/komga.nix
new file mode 100644
index 000000000000..31f475fc7b04
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/komga.nix
@@ -0,0 +1,99 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.komga;
+
+in {
+  options = {
+    services.komga = {
+      enable = mkEnableOption (lib.mdDoc "Komga, a free and open source comics/mangas media server");
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc ''
+          The port that Komga will listen on.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "komga";
+        description = lib.mdDoc ''
+          User account under which Komga runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "komga";
+        description = lib.mdDoc ''
+          Group under which Komga runs.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.str;
+        default = "/var/lib/komga";
+        description = lib.mdDoc ''
+          State and configuration directory Komga will use.
+        '';
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to open the firewall for the port in {option}`services.komga.port`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    networking.firewall.allowedTCPPorts = mkIf cfg.openFirewall [ cfg.port ];
+
+    users.groups = mkIf (cfg.group == "komga") {
+      komga = {};
+    };
+
+    users.users = mkIf (cfg.user == "komga") {
+      komga = {
+        group = cfg.group;
+        home = cfg.stateDir;
+        description = "Komga Daemon user";
+        isSystemUser = true;
+      };
+    };
+
+    systemd.services.komga = {
+      environment = {
+        SERVER_PORT = builtins.toString cfg.port;
+        KOMGA_CONFIGDIR = cfg.stateDir;
+      };
+
+      description = "Komga is a free and open source comics/mangas media server";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+
+        Type = "simple";
+        Restart = "on-failure";
+        ExecStart = "${pkgs.komga}/bin/komga";
+
+        StateDirectory = mkIf (cfg.stateDir == "/var/lib/komga") "komga";
+      };
+
+    };
+  };
+
+  meta.maintainers = with maintainers; [ govanify ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/lanraragi.nix b/nixpkgs/nixos/modules/services/web-apps/lanraragi.nix
new file mode 100644
index 000000000000..f1ab8b8b4eb4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/lanraragi.nix
@@ -0,0 +1,100 @@
+{ pkgs, lib, config, ... }:
+
+let
+  cfg = config.services.lanraragi;
+in
+{
+  meta.maintainers = with lib.maintainers; [ tomasajt ];
+
+  options.services = {
+    lanraragi = {
+      enable = lib.mkEnableOption (lib.mdDoc "LANraragi");
+      package = lib.mkPackageOptionMD pkgs "lanraragi" { };
+
+      port = lib.mkOption {
+        type = lib.types.port;
+        default = 3000;
+        description = lib.mdDoc "Port for LANraragi's web interface.";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/lanraragi-password";
+        description = lib.mdDoc ''
+          A file containing the password for LANraragi's admin interface.
+        '';
+      };
+
+      redis = {
+        port = lib.mkOption {
+          type = lib.types.port;
+          default = 6379;
+          description = lib.mdDoc "Port for LANraragi's Redis server.";
+        };
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/run/keys/redis-lanraragi-password";
+          description = lib.mdDoc ''
+            A file containing the password for LANraragi's Redis server.
+          '';
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.redis.servers.lanraragi = {
+      enable = true;
+      port = cfg.redis.port;
+      requirePassFile = cfg.redis.passwordFile;
+    };
+
+    systemd.services.lanraragi = {
+      description = "LANraragi main service";
+      after = [ "network.target" "redis-lanraragi.service" ];
+      requires = [ "redis-lanraragi.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = lib.getExe cfg.package;
+        DynamicUser = true;
+        StateDirectory = "lanraragi";
+        RuntimeDirectory = "lanraragi";
+        LogsDirectory = "lanraragi";
+        Restart = "on-failure";
+        WorkingDirectory = "/var/lib/lanraragi";
+      };
+      environment = {
+        "LRR_TEMP_DIRECTORY" = "/run/lanraragi";
+        "LRR_LOG_DIRECTORY" = "/var/log/lanraragi";
+        "LRR_NETWORK" = "http://*:${toString cfg.port}";
+        "HOME" = "/var/lib/lanraragi";
+      };
+      preStart = ''
+        REDIS_PASS=${lib.optionalString (cfg.redis.passwordFile != null) "$(head -n1 ${cfg.redis.passwordFile})"}
+        cat > lrr.conf <<EOF
+        {
+          redis_address => "127.0.0.1:${toString cfg.redis.port}",
+          redis_password => "$REDIS_PASS",
+          redis_database => "0",
+          redis_database_minion => "1",
+          redis_database_config => "2",
+          redis_database_search => "3",
+        }
+        EOF
+      '' + lib.optionalString (cfg.passwordFile != null) ''
+        PASS_HASH=$(
+          PASS=$(head -n1 ${cfg.passwordFile}) ${cfg.package.perlEnv}/bin/perl -I${cfg.package}/share/lanraragi/lib -e \
+            'use LANraragi::Controller::Config; print LANraragi::Controller::Config::make_password_hash($ENV{PASS})' \
+            2>/dev/null
+        )
+
+        ${lib.getExe pkgs.redis} -h 127.0.0.1 -p ${toString cfg.redis.port} -a "$REDIS_PASS" <<EOF
+          SELECT 2
+          HSET LRR_CONFIG password $PASS_HASH
+        EOF
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/lemmy.md b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
new file mode 100644
index 000000000000..faafe096d138
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/lemmy.md
@@ -0,0 +1,31 @@
+# Lemmy {#module-services-lemmy}
+
+Lemmy is a federated alternative to reddit in rust.
+
+## Quickstart {#module-services-lemmy-quickstart}
+
+the minimum to start lemmy is
+
+```nix
+services.lemmy = {
+  enable = true;
+  settings = {
+    hostname = "lemmy.union.rocks";
+    database.createLocally = true;
+  };
+  caddy.enable = true;
+}
+```
+
+this will start the backend on port 8536 and the frontend on port 1234.
+It will expose your instance with a caddy reverse proxy to the hostname you've provided.
+Postgres will be initialized on that same instance automatically.
+
+## Usage {#module-services-lemmy-usage}
+
+On first connection you will be asked to define an admin user.
+
+## Missing {#module-services-lemmy-missing}
+
+- Exposing with nginx is not implemented yet.
+- This has been tested using a local database with a unix socket connection. Using different database settings will likely require modifications
diff --git a/nixpkgs/nixos/modules/services/web-apps/lemmy.nix b/nixpkgs/nixos/modules/services/web-apps/lemmy.nix
new file mode 100644
index 000000000000..32389f7a59dd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/lemmy.nix
@@ -0,0 +1,314 @@
+{ lib, pkgs, config, utils, ... }:
+with lib;
+let
+  cfg = config.services.lemmy;
+  settingsFormat = pkgs.formats.json { };
+in
+{
+  meta.maintainers = with maintainers; [ happysalada ];
+  meta.doc = ./lemmy.md;
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "lemmy" "jwtSecretPath" ] "As of v0.13.0, Lemmy auto-generates the JWT secret.")
+  ];
+
+  options.services.lemmy = {
+
+    enable = mkEnableOption (lib.mdDoc "lemmy a federated alternative to reddit in rust");
+
+    server = {
+      package = mkPackageOptionMD pkgs "lemmy-server" {};
+    };
+
+    ui = {
+      package = mkPackageOptionMD pkgs "lemmy-ui" {};
+
+      port = mkOption {
+        type = types.port;
+        default = 1234;
+        description = lib.mdDoc "Port where lemmy-ui should listen for incoming requests.";
+      };
+    };
+
+    caddy.enable = mkEnableOption (lib.mdDoc "exposing lemmy with the caddy reverse proxy");
+    nginx.enable = mkEnableOption (lib.mdDoc "exposing lemmy with the nginx reverse proxy");
+
+    database = {
+      createLocally = mkEnableOption (lib.mdDoc "creation of database on the instance");
+
+      uri = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        description = lib.mdDoc "The connection URI to use. Takes priority over the configuration file if set.";
+      };
+
+      uriFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        description = lib.mdDoc "File which contains the database uri.";
+      };
+    };
+
+    pictrsApiKeyFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc "File which contains the value of `pictrs.api_key`.";
+    };
+
+    smtpPasswordFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc "File which contains the value of `email.smtp_password`.";
+    };
+
+    adminPasswordFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc "File which contains the value of `setup.admin_password`.";
+    };
+
+    settings = mkOption {
+      default = { };
+      description = lib.mdDoc "Lemmy configuration";
+
+      type = types.submodule {
+        freeformType = settingsFormat.type;
+
+        options.hostname = mkOption {
+          type = types.str;
+          default = null;
+          description = lib.mdDoc "The domain name of your instance (eg 'lemmy.ml').";
+        };
+
+        options.port = mkOption {
+          type = types.port;
+          default = 8536;
+          description = lib.mdDoc "Port where lemmy should listen for incoming requests.";
+        };
+
+        options.captcha = {
+          enabled = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Enable Captcha.";
+          };
+          difficulty = mkOption {
+            type = types.enum [ "easy" "medium" "hard" ];
+            default = "medium";
+            description = lib.mdDoc "The difficultly of the captcha to solve.";
+          };
+        };
+      };
+    };
+  };
+
+  config =
+    let
+      secretOptions = {
+        pictrsApiKeyFile = { setting = [ "pictrs" "api_key" ]; path = cfg.pictrsApiKeyFile; };
+        smtpPasswordFile = { setting = [ "email" "smtp_password" ]; path = cfg.smtpPasswordFile; };
+        adminPasswordFile = { setting = [ "setup" "admin_password" ]; path = cfg.adminPasswordFile; };
+        uriFile = { setting = [ "database" "uri" ]; path = cfg.database.uriFile; };
+      };
+      secrets = lib.filterAttrs (option: data: data.path != null) secretOptions;
+    in
+    lib.mkIf cfg.enable {
+      services.lemmy.settings = lib.attrsets.recursiveUpdate (mapAttrs (name: mkDefault)
+        {
+          bind = "127.0.0.1";
+          tls_enabled = true;
+          pictrs = {
+            url = with config.services.pict-rs; "http://${address}:${toString port}";
+          };
+          actor_name_max_length = 20;
+
+          rate_limit.message = 180;
+          rate_limit.message_per_second = 60;
+          rate_limit.post = 6;
+          rate_limit.post_per_second = 600;
+          rate_limit.register = 3;
+          rate_limit.register_per_second = 3600;
+          rate_limit.image = 6;
+          rate_limit.image_per_second = 3600;
+        } // {
+          database = mapAttrs (name: mkDefault) {
+            user = "lemmy";
+            host = "/run/postgresql";
+            port = 5432;
+            database = "lemmy";
+            pool_size = 5;
+          };
+        }) (lib.foldlAttrs (acc: option: data: acc // lib.setAttrByPath data.setting { _secret = option; }) {} secrets);
+        # the option name is the id of the credential loaded by LoadCredential
+
+      services.postgresql = mkIf cfg.database.createLocally {
+        enable = true;
+        ensureDatabases = [ cfg.settings.database.database ];
+        ensureUsers = [{
+          name = cfg.settings.database.user;
+          ensureDBOwnership = true;
+        }];
+      };
+
+      services.pict-rs.enable = true;
+
+      services.caddy = mkIf cfg.caddy.enable {
+        enable = mkDefault true;
+        virtualHosts."${cfg.settings.hostname}" = {
+          extraConfig = ''
+            handle_path /static/* {
+              root * ${cfg.ui.package}/dist
+              file_server
+            }
+            handle_path /static/${cfg.ui.package.passthru.commit_sha}/* {
+              root * ${cfg.ui.package}/dist
+              file_server
+            }
+            @for_backend {
+              path /api/* /pictrs/* /feeds/* /nodeinfo/*
+            }
+            handle @for_backend {
+              reverse_proxy 127.0.0.1:${toString cfg.settings.port}
+            }
+            @post {
+              method POST
+            }
+            handle @post {
+              reverse_proxy 127.0.0.1:${toString cfg.settings.port}
+            }
+            @jsonld {
+              header Accept "application/activity+json"
+              header Accept "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\""
+            }
+            handle @jsonld {
+              reverse_proxy 127.0.0.1:${toString cfg.settings.port}
+            }
+            handle {
+              reverse_proxy 127.0.0.1:${toString cfg.ui.port}
+            }
+          '';
+        };
+      };
+
+      services.nginx = mkIf cfg.nginx.enable {
+        enable = mkDefault true;
+        virtualHosts."${cfg.settings.hostname}".locations = let
+          ui = "http://127.0.0.1:${toString cfg.ui.port}";
+          backend = "http://127.0.0.1:${toString cfg.settings.port}";
+        in {
+          "~ ^/(api|pictrs|feeds|nodeinfo|.well-known)" = {
+            # backend requests
+            proxyPass = backend;
+            proxyWebsockets = true;
+            recommendedProxySettings = true;
+          };
+          "/" = {
+            # mixed frontend and backend requests, based on the request headers
+            proxyPass = "$proxpass";
+            recommendedProxySettings = true;
+            extraConfig = ''
+              set $proxpass "${ui}";
+              if ($http_accept = "application/activity+json") {
+                set $proxpass "${backend}";
+              }
+              if ($http_accept = "application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"") {
+                set $proxpass "${backend}";
+              }
+              if ($request_method = POST) {
+                set $proxpass "${backend}";
+              }
+
+              # Cuts off the trailing slash on URLs to make them valid
+              rewrite ^(.+)/+$ $1 permanent;
+            '';
+          };
+        };
+      };
+
+      assertions = [
+        {
+          assertion = cfg.database.createLocally -> cfg.settings.database.host == "localhost" || cfg.settings.database.host == "/run/postgresql";
+          message = "if you want to create the database locally, you need to use a local database";
+        }
+        {
+          assertion = (!(hasAttrByPath ["federation"] cfg.settings)) && (!(hasAttrByPath ["federation" "enabled"] cfg.settings));
+          message = "`services.lemmy.settings.federation` was removed in 0.17.0 and no longer has any effect";
+        }
+        {
+          assertion = cfg.database.uriFile != null -> cfg.database.uri == null && !cfg.database.createLocally;
+          message = "specifying a database uri while also specifying a database uri file is not allowed";
+        }
+      ];
+
+      systemd.services.lemmy = let
+        substitutedConfig = "/run/lemmy/config.hjson";
+      in {
+        description = "Lemmy server";
+
+        environment = {
+          LEMMY_CONFIG_LOCATION = if secrets == {} then settingsFormat.generate "config.hjson" cfg.settings else substitutedConfig;
+          LEMMY_DATABASE_URL = if cfg.database.uri != null then cfg.database.uri else (mkIf (cfg.database.createLocally) "postgres:///lemmy?host=/run/postgresql&user=lemmy");
+        };
+
+        documentation = [
+          "https://join-lemmy.org/docs/en/admins/from_scratch.html"
+          "https://join-lemmy.org/docs/en/"
+        ];
+
+        wantedBy = [ "multi-user.target" ];
+
+        after = [ "pict-rs.service" ] ++ lib.optionals cfg.database.createLocally [ "postgresql.service" ];
+
+        requires = lib.optionals cfg.database.createLocally [ "postgresql.service" ];
+
+        # substitute secrets and prevent others from reading the result
+        # if somehow $CREDENTIALS_DIRECTORY is not set we fail
+        preStart = mkIf (secrets != {}) ''
+          set -u
+          umask u=rw,g=,o=
+          cd "$CREDENTIALS_DIRECTORY"
+          ${utils.genJqSecretsReplacementSnippet cfg.settings substitutedConfig}
+        '';
+
+        serviceConfig = {
+          DynamicUser = true;
+          RuntimeDirectory = "lemmy";
+          ExecStart = "${cfg.server.package}/bin/lemmy_server";
+          LoadCredential = lib.foldlAttrs (acc: option: data: acc ++ [ "${option}:${toString data.path}" ]) [] secrets;
+          PrivateTmp = true;
+          MemoryDenyWriteExecute = true;
+          NoNewPrivileges = true;
+        };
+      };
+
+      systemd.services.lemmy-ui = {
+        description = "Lemmy ui";
+
+        environment = {
+          LEMMY_UI_HOST = "127.0.0.1:${toString cfg.ui.port}";
+          LEMMY_UI_LEMMY_INTERNAL_HOST = "127.0.0.1:${toString cfg.settings.port}";
+          LEMMY_UI_LEMMY_EXTERNAL_HOST = cfg.settings.hostname;
+          LEMMY_UI_HTTPS = "false";
+          NODE_ENV = "production";
+        };
+
+        documentation = [
+          "https://join-lemmy.org/docs/en/admins/from_scratch.html"
+          "https://join-lemmy.org/docs/en/"
+        ];
+
+        wantedBy = [ "multi-user.target" ];
+
+        after = [ "lemmy.service" ];
+
+        requires = [ "lemmy.service" ];
+
+        serviceConfig = {
+          DynamicUser = true;
+          WorkingDirectory = "${cfg.ui.package}";
+          ExecStart = "${pkgs.nodejs}/bin/node ${cfg.ui.package}/dist/js/server.js";
+        };
+      };
+    };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/limesurvey.nix b/nixpkgs/nixos/modules/services/web-apps/limesurvey.nix
new file mode 100644
index 000000000000..920e6928ef5c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/limesurvey.nix
@@ -0,0 +1,309 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption;
+  inherit (lib) literalExpression mapAttrs optional optionalString types;
+
+  cfg = config.services.limesurvey;
+  fpm = config.services.phpfpm.pools.limesurvey;
+
+  user = "limesurvey";
+  group = config.services.httpd.group;
+  stateDir = "/var/lib/limesurvey";
+
+  pkg = pkgs.limesurvey;
+
+  configType = with types; oneOf [ (attrsOf configType) str int bool ] // {
+    description = "limesurvey config type (str, int, bool or attribute set thereof)";
+  };
+
+  limesurveyConfig = pkgs.writeText "config.php" ''
+    <?php
+      return json_decode('${builtins.toJSON cfg.config}', true);
+    ?>
+  '';
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+in
+{
+  # interface
+
+  options.services.limesurvey = {
+    enable = mkEnableOption (lib.mdDoc "Limesurvey web application");
+
+    encryptionKey = mkOption {
+      type = types.str;
+      default = "E17687FC77CEE247F0E22BB3ECF27FDE8BEC310A892347EC13013ABA11AA7EB5";
+      description = lib.mdDoc ''
+        This is a 32-byte key used to encrypt variables in the database.
+        You _must_ change this from the default value.
+      '';
+    };
+
+    encryptionNonce = mkOption {
+      type = types.str;
+      default = "1ACC8555619929DB91310BE848025A427B0F364A884FFA77";
+      description = lib.mdDoc ''
+        This is a 24-byte nonce used to encrypt variables in the database.
+        You _must_ change this from the default value.
+      '';
+    };
+
+    database = {
+      type = mkOption {
+        type = types.enum [ "mysql" "pgsql" "odbc" "mssql" ];
+        example = "pgsql";
+        default = "mysql";
+        description = lib.mdDoc "Database engine to use.";
+      };
+
+      dbEngine = mkOption {
+        type = types.enum [ "MyISAM" "InnoDB" ];
+        default = "InnoDB";
+        description = lib.mdDoc "Database storage engine to use.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = if cfg.database.type == "pgsql" then 5442 else 3306;
+        defaultText = literalExpression "3306";
+        description = lib.mdDoc "Database host port.";
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "limesurvey";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "limesurvey";
+        description = lib.mdDoc "Database user.";
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/limesurvey-dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`database.user`.
+        '';
+      };
+
+      socket = mkOption {
+        type = types.nullOr types.path;
+        default =
+          if mysqlLocal then "/run/mysqld/mysqld.sock"
+          else if pgsqlLocal then "/run/postgresql"
+          else null
+        ;
+        defaultText = literalExpression "/run/mysqld/mysqld.sock";
+        description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+      };
+
+      createLocally = mkOption {
+        type = types.bool;
+        default = cfg.database.type == "mysql";
+        defaultText = literalExpression "true";
+        description = lib.mdDoc ''
+          Create the database and database user locally.
+          This currently only applies if database type "mysql" is selected.
+        '';
+      };
+    };
+
+    virtualHost = mkOption {
+      type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+      example = literalExpression ''
+        {
+          hostName = "survey.example.org";
+          adminAddr = "webmaster@example.org";
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        Apache configuration can be done by adapting `services.httpd.virtualHosts.<name>`.
+        See [](#opt-services.httpd.virtualHosts) for further information.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the LimeSurvey PHP pool. See the documentation on `php-fpm.conf`
+        for details on configuration directives.
+      '';
+    };
+
+    config = mkOption {
+      type = configType;
+      default = {};
+      description = lib.mdDoc ''
+        LimeSurvey configuration. Refer to
+        <https://manual.limesurvey.org/Optional_settings>
+        for details on supported values.
+      '';
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.type == "mysql";
+        message = "services.limesurvey.createLocally is currently only supported for database type 'mysql'";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.user == user;
+        message = "services.limesurvey.database.user must be set to ${user} if services.limesurvey.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.socket != null;
+        message = "services.limesurvey.database.socket must be set if services.limesurvey.database.createLocally is set to true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.limesurvey.database.createLocally is set to true";
+      }
+    ];
+
+    services.limesurvey.config = mapAttrs (name: mkDefault) {
+      runtimePath = "${stateDir}/tmp/runtime";
+      components = {
+        db = {
+          connectionString = "${cfg.database.type}:dbname=${cfg.database.name};host=${if pgsqlLocal then cfg.database.socket else cfg.database.host};port=${toString cfg.database.port}" +
+            optionalString mysqlLocal ";socket=${cfg.database.socket}";
+          username = cfg.database.user;
+          password = mkIf (cfg.database.passwordFile != null) "file_get_contents(\"${toString cfg.database.passwordFile}\");";
+          tablePrefix = "limesurvey_";
+        };
+        assetManager.basePath = "${stateDir}/tmp/assets";
+        urlManager = {
+          urlFormat = "path";
+          showScriptName = false;
+        };
+      };
+      config = {
+        tempdir = "${stateDir}/tmp";
+        uploaddir = "${stateDir}/upload";
+        encryptionnonce = cfg.encryptionNonce;
+        encryptionsecretboxkey = cfg.encryptionKey;
+        force_ssl = mkIf (cfg.virtualHost.addSSL || cfg.virtualHost.forceSSL || cfg.virtualHost.onlySSL) "on";
+        config.defaultlang = "en";
+      };
+    };
+
+    services.mysql = mkIf mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = {
+            "${cfg.database.name}.*" = "SELECT, CREATE, INSERT, UPDATE, DELETE, ALTER, DROP, INDEX";
+          };
+        }
+      ];
+    };
+
+    services.phpfpm.pools.limesurvey = {
+      inherit user group;
+      phpPackage = pkgs.php81;
+      phpEnv.DBENGINE = "${cfg.database.dbEngine}";
+      phpEnv.LIMESURVEY_CONFIG = "${limesurveyConfig}";
+      settings = {
+        "listen.owner" = config.services.httpd.user;
+        "listen.group" = config.services.httpd.group;
+      } // cfg.poolConfig;
+    };
+
+    services.httpd = {
+      enable = true;
+      adminAddr = mkDefault cfg.virtualHost.adminAddr;
+      extraModules = [ "proxy_fcgi" ];
+      virtualHosts.${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost {
+        documentRoot = mkForce "${pkg}/share/limesurvey";
+        extraConfig = ''
+          Alias "/tmp" "${stateDir}/tmp"
+          <Directory "${stateDir}">
+            AllowOverride all
+            Require all granted
+            Options -Indexes +FollowSymlinks
+          </Directory>
+
+          Alias "/upload" "${stateDir}/upload"
+          <Directory "${stateDir}/upload">
+            AllowOverride all
+            Require all granted
+            Options -Indexes
+          </Directory>
+
+          <Directory "${pkg}/share/limesurvey">
+            <FilesMatch "\.php$">
+              <If "-f %{REQUEST_FILENAME}">
+                SetHandler "proxy:unix:${fpm.socket}|fcgi://localhost/"
+              </If>
+            </FilesMatch>
+
+            AllowOverride all
+            Options -Indexes
+            DirectoryIndex index.php
+          </Directory>
+        '';
+      } ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${stateDir} 0750 ${user} ${group} - -"
+      "d ${stateDir}/tmp 0750 ${user} ${group} - -"
+      "d ${stateDir}/tmp/assets 0750 ${user} ${group} - -"
+      "d ${stateDir}/tmp/runtime 0750 ${user} ${group} - -"
+      "d ${stateDir}/tmp/upload 0750 ${user} ${group} - -"
+      "C ${stateDir}/upload 0750 ${user} ${group} - ${pkg}/share/limesurvey/upload"
+    ];
+
+    systemd.services.limesurvey-init = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-limesurvey.service" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+      environment.DBENGINE = "${cfg.database.dbEngine}";
+      environment.LIMESURVEY_CONFIG = limesurveyConfig;
+      script = ''
+        # update or install the database as required
+        ${pkgs.php81}/bin/php ${pkg}/share/limesurvey/application/commands/console.php updatedb || \
+        ${pkgs.php81}/bin/php ${pkg}/share/limesurvey/application/commands/console.php install admin password admin admin@example.com verbose
+      '';
+      serviceConfig = {
+        User = user;
+        Group = group;
+        Type = "oneshot";
+      };
+    };
+
+    systemd.services.httpd.after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/mainsail.nix b/nixpkgs/nixos/modules/services/web-apps/mainsail.nix
new file mode 100644
index 000000000000..f335d9b015d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/mainsail.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.mainsail;
+  moonraker = config.services.moonraker;
+in
+{
+  options.services.mainsail = {
+    enable = mkEnableOption (lib.mdDoc "a modern and responsive user interface for Klipper");
+
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc "Mainsail package to be used in the module";
+      default = pkgs.mainsail;
+      defaultText = literalExpression "pkgs.mainsail";
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Hostname to serve mainsail on";
+    };
+
+    nginx = mkOption {
+      type = types.submodule
+        (import ../web-servers/nginx/vhost-options.nix { inherit config lib; });
+      default = { };
+      example = literalExpression ''
+        {
+          serverAliases = [ "mainsail.''${config.networking.domain}" ];
+        }
+      '';
+      description = lib.mdDoc "Extra configuration for the nginx virtual host of mainsail.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.nginx = {
+      enable = true;
+      upstreams.mainsail-apiserver.servers."${moonraker.address}:${toString moonraker.port}" = { };
+      virtualHosts."${cfg.hostName}" = mkMerge [
+        cfg.nginx
+        {
+          root = mkForce "${cfg.package}/share/mainsail";
+          locations = {
+            "/" = {
+              index = "index.html";
+              tryFiles = "$uri $uri/ /index.html";
+            };
+            "/index.html".extraConfig = ''
+              add_header Cache-Control "no-store, no-cache, must-revalidate";
+            '';
+            "/websocket" = {
+              proxyWebsockets = true;
+              proxyPass = "http://mainsail-apiserver/websocket";
+            };
+            "~ ^/(printer|api|access|machine|server)/" = {
+              proxyWebsockets = true;
+              proxyPass = "http://mainsail-apiserver$request_uri";
+            };
+          };
+        }
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/mastodon.nix b/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
new file mode 100644
index 000000000000..8686506b1c28
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/mastodon.nix
@@ -0,0 +1,873 @@
+{ lib, pkgs, config, options, ... }:
+
+let
+  cfg = config.services.mastodon;
+  opt = options.services.mastodon;
+
+  # We only want to create a database if we're actually going to connect to it.
+  databaseActuallyCreateLocally = cfg.database.createLocally && cfg.database.host == "/run/postgresql";
+
+  env = {
+    RAILS_ENV = "production";
+    NODE_ENV = "production";
+
+    LD_PRELOAD = "${pkgs.jemalloc}/lib/libjemalloc.so";
+
+    # mastodon-web concurrency.
+    WEB_CONCURRENCY = toString cfg.webProcesses;
+    MAX_THREADS = toString cfg.webThreads;
+
+    DB_USER = cfg.database.user;
+
+    REDIS_HOST = cfg.redis.host;
+    REDIS_PORT = toString(cfg.redis.port);
+    DB_HOST = cfg.database.host;
+    DB_NAME = cfg.database.name;
+    LOCAL_DOMAIN = cfg.localDomain;
+    SMTP_SERVER = cfg.smtp.host;
+    SMTP_PORT = toString(cfg.smtp.port);
+    SMTP_FROM_ADDRESS = cfg.smtp.fromAddress;
+    PAPERCLIP_ROOT_PATH = "/var/lib/mastodon/public-system";
+    PAPERCLIP_ROOT_URL = "/system";
+    ES_ENABLED = if (cfg.elasticsearch.host != null) then "true" else "false";
+
+    TRUSTED_PROXY_IP = cfg.trustedProxy;
+  }
+  // lib.optionalAttrs (cfg.database.host != "/run/postgresql" && cfg.database.port != null) { DB_PORT = toString cfg.database.port; }
+  // lib.optionalAttrs cfg.smtp.authenticate { SMTP_LOGIN  = cfg.smtp.user; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_HOST = cfg.elasticsearch.host; }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PORT = toString(cfg.elasticsearch.port); }
+  // lib.optionalAttrs (cfg.elasticsearch.host != null) { ES_PRESET = cfg.elasticsearch.preset; }
+  // lib.optionalAttrs (cfg.elasticsearch.user != null) { ES_USER = cfg.elasticsearch.user; }
+  // cfg.extraConfig;
+
+  systemCallsList = [ "@cpu-emulation" "@debug" "@keyring" "@ipc" "@mount" "@obsolete" "@privileged" "@setuid" ];
+
+  cfgService = {
+    # User and group
+    User = cfg.user;
+    Group = cfg.group;
+    # Working directory
+    WorkingDirectory = cfg.package;
+    # State directory and mode
+    StateDirectory = "mastodon";
+    StateDirectoryMode = "0750";
+    # Logs directory and mode
+    LogsDirectory = "mastodon";
+    LogsDirectoryMode = "0750";
+    # Proc filesystem
+    ProcSubset = "pid";
+    ProtectProc = "invisible";
+    # Access write directories
+    UMask = "0027";
+    # Capabilities
+    CapabilityBoundingSet = "";
+    # Security
+    NoNewPrivileges = true;
+    # Sandboxing
+    ProtectSystem = "strict";
+    ProtectHome = true;
+    PrivateTmp = true;
+    PrivateDevices = true;
+    PrivateUsers = true;
+    ProtectClock = true;
+    ProtectHostname = true;
+    ProtectKernelLogs = true;
+    ProtectKernelModules = true;
+    ProtectKernelTunables = true;
+    ProtectControlGroups = true;
+    RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+    RestrictNamespaces = true;
+    LockPersonality = true;
+    MemoryDenyWriteExecute = false;
+    RestrictRealtime = true;
+    RestrictSUIDSGID = true;
+    RemoveIPC = true;
+    PrivateMounts = true;
+    # System Call Filtering
+    SystemCallArchitectures = "native";
+  };
+
+  envFile = pkgs.writeText "mastodon.env" (lib.concatMapStrings (s: s + "\n") (
+    (lib.concatLists (lib.mapAttrsToList (name: value:
+      lib.optional (value != null) ''${name}="${toString value}"''
+    ) env))));
+
+  mastodonTootctl = let
+    sourceExtraEnv = lib.concatMapStrings (p: "source ${p}\n") cfg.extraEnvFiles;
+  in pkgs.writeShellScriptBin "mastodon-tootctl" ''
+    set -a
+    export RAILS_ROOT="${cfg.package}"
+    source "${envFile}"
+    source /var/lib/mastodon/.secrets_env
+    ${sourceExtraEnv}
+
+    sudo=exec
+    if [[ "$USER" != ${cfg.user} ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${cfg.user} --preserve-env'
+    fi
+    $sudo ${cfg.package}/bin/tootctl "$@"
+  '';
+
+  sidekiqUnits = lib.attrsets.mapAttrs' (name: processCfg:
+    lib.nameValuePair "mastodon-sidekiq-${name}" (let
+      jobClassArgs = toString (builtins.map (c: "-q ${c}") processCfg.jobClasses);
+      jobClassLabel = toString ([""] ++ processCfg.jobClasses);
+      threads = toString (if processCfg.threads == null then cfg.sidekiqThreads else processCfg.threads);
+    in {
+      after = [ "network.target" "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+      requires = [ "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+      description = "Mastodon sidekiq${jobClassLabel}";
+      wantedBy = [ "mastodon.target" ];
+      environment = env // {
+        PORT = toString(cfg.sidekiqPort);
+        DB_POOL = threads;
+      };
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/sidekiq ${jobClassArgs} -c ${threads} -r ${cfg.package}";
+        Restart = "always";
+        RestartSec = 20;
+        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+        WorkingDirectory = cfg.package;
+        # System Call Filtering
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
+      } // cfgService;
+      path = with pkgs; [ file imagemagick ffmpeg ];
+    })
+  ) cfg.sidekiqProcesses;
+
+  streamingUnits = builtins.listToAttrs
+      (map (i: {
+        name = "mastodon-streaming-${toString i}";
+        value = {
+          after = [ "network.target" "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          requires = [ "mastodon-init-dirs.service" ]
+            ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+            ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+          wantedBy = [ "mastodon.target" "mastodon-streaming.target" ];
+          description = "Mastodon streaming ${toString i}";
+          environment = env // { SOCKET = "/run/mastodon-streaming/streaming-${toString i}.socket"; };
+          serviceConfig = {
+            ExecStart = "${cfg.package}/run-streaming.sh";
+            Restart = "always";
+            RestartSec = 20;
+            EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+            WorkingDirectory = cfg.package;
+            # Runtime directory and mode
+            RuntimeDirectory = "mastodon-streaming";
+            RuntimeDirectoryMode = "0750";
+            # System Call Filtering
+            SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@memlock" "@resources" ])) "pipe" "pipe2" ];
+          } // cfgService;
+        };
+      })
+      (lib.range 1 cfg.streamingProcesses));
+
+in {
+
+  imports = [
+    (lib.mkRemovedOptionModule
+      [ "services" "mastodon" "streamingPort" ]
+      "Mastodon currently doesn't support streaming via TCP ports. Please open a PR if you need this."
+    )
+  ];
+
+  options = {
+    services.mastodon = {
+      enable = lib.mkEnableOption (lib.mdDoc "Mastodon, a federated social network server");
+
+      configureNginx = lib.mkOption {
+        description = lib.mdDoc ''
+          Configure nginx as a reverse proxy for mastodon.
+          Note that this makes some assumptions on your setup, and sets settings that will
+          affect other virtualHosts running on your nginx instance, if any.
+          Alternatively you can configure a reverse-proxy of your choice to serve these paths:
+
+          `/ -> $(nix-instantiate --eval '<nixpkgs>' -A mastodon.outPath)/public`
+
+          `/ -> 127.0.0.1:{{ webPort }} `(If there was no file in the directory above.)
+
+          `/system/ -> /var/lib/mastodon/public-system/`
+
+          `/api/v1/streaming/ -> 127.0.0.1:{{ streamingPort }}`
+
+          Make sure that websockets are forwarded properly. You might want to set up caching
+          of some requests. Take a look at mastodon's provided nginx configuration at
+          `https://github.com/mastodon/mastodon/blob/master/dist/nginx.conf`.
+        '';
+        type = lib.types.bool;
+        default = false;
+      };
+
+      user = lib.mkOption {
+        description = lib.mdDoc ''
+          User under which mastodon runs. If it is set to "mastodon",
+          that user will be created, otherwise it should be set to the
+          name of a user created elsewhere.
+          In both cases, the `mastodon` package will be added to the user's package set
+          and a tootctl wrapper to system packages that switches to the configured account
+          and load the right environment.
+        '';
+        type = lib.types.str;
+        default = "mastodon";
+      };
+
+      group = lib.mkOption {
+        description = lib.mdDoc ''
+          Group under which mastodon runs.
+        '';
+        type = lib.types.str;
+        default = "mastodon";
+      };
+
+      streamingProcesses = lib.mkOption {
+        description = lib.mdDoc ''
+          Number of processes used by the mastodon-streaming service.
+          Recommended is the amount of your CPU cores minus one.
+        '';
+        type = lib.types.ints.positive;
+        example = 3;
+      };
+
+      webPort = lib.mkOption {
+        description = lib.mdDoc "TCP port used by the mastodon-web service.";
+        type = lib.types.port;
+        default = 55001;
+      };
+      webProcesses = lib.mkOption {
+        description = lib.mdDoc "Processes used by the mastodon-web service.";
+        type = lib.types.int;
+        default = 2;
+      };
+      webThreads = lib.mkOption {
+        description = lib.mdDoc "Threads per process used by the mastodon-web service.";
+        type = lib.types.int;
+        default = 5;
+      };
+
+      sidekiqPort = lib.mkOption {
+        description = lib.mdDoc "TCP port used by the mastodon-sidekiq service.";
+        type = lib.types.port;
+        default = 55002;
+      };
+
+      sidekiqThreads = lib.mkOption {
+        description = lib.mdDoc "Worker threads used by the mastodon-sidekiq-all service. If `sidekiqProcesses` is configured and any processes specify null `threads`, this value is used.";
+        type = lib.types.int;
+        default = 25;
+      };
+
+      sidekiqProcesses = lib.mkOption {
+        description = lib.mdDoc "How many Sidekiq processes should be used to handle background jobs, and which job classes they handle. *Read the [upstream documentation](https://docs.joinmastodon.org/admin/scaling/#sidekiq) before configuring this!*";
+        type = with lib.types; attrsOf (submodule {
+          options = {
+            jobClasses = lib.mkOption {
+              type = listOf (enum [ "default" "push" "pull" "mailers" "scheduler" "ingress" ]);
+              description = lib.mdDoc "If not empty, which job classes should be executed by this process. *Only one process should handle the 'scheduler' class. If left empty, this process will handle the 'scheduler' class.*";
+            };
+            threads = lib.mkOption {
+              type = nullOr int;
+              description = lib.mdDoc "Number of threads this process should use for executing jobs. If null, the configured `sidekiqThreads` are used.";
+            };
+          };
+        });
+        default = {
+          all = {
+            jobClasses = [ ];
+            threads = null;
+          };
+        };
+        example = {
+          all = {
+            jobClasses = [ ];
+            threads = null;
+          };
+          ingress = {
+            jobClasses = [ "ingress" ];
+            threads = 5;
+          };
+          default = {
+            jobClasses = [ "default" ];
+            threads = 10;
+          };
+          push-pull = {
+            jobClasses = [ "push" "pull" ];
+            threads = 5;
+          };
+        };
+      };
+
+      vapidPublicKeyFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Path to file containing the public key used for Web Push
+          Voluntary Application Server Identification.  A new keypair can
+          be generated by running:
+
+          `nix build -f '<nixpkgs>' mastodon; cd result; bin/rake webpush:generate_keys`
+
+          If {option}`mastodon.vapidPrivateKeyFile`does not
+          exist, it and this file will be created with a new keypair.
+        '';
+        default = "/var/lib/mastodon/secrets/vapid-public-key";
+        type = lib.types.str;
+      };
+
+      localDomain = lib.mkOption {
+        description = lib.mdDoc "The domain serving your Mastodon instance.";
+        example = "social.example.org";
+        type = lib.types.str;
+      };
+
+      secretKeyBaseFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Path to file containing the secret key base.
+          A new secret key base can be generated by running:
+
+          `nix build -f '<nixpkgs>' mastodon; cd result; bin/rake secret`
+
+          If this file does not exist, it will be created with a new secret key base.
+        '';
+        default = "/var/lib/mastodon/secrets/secret-key-base";
+        type = lib.types.str;
+      };
+
+      otpSecretFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Path to file containing the OTP secret.
+          A new OTP secret can be generated by running:
+
+          `nix build -f '<nixpkgs>' mastodon; cd result; bin/rake secret`
+
+          If this file does not exist, it will be created with a new OTP secret.
+        '';
+        default = "/var/lib/mastodon/secrets/otp-secret";
+        type = lib.types.str;
+      };
+
+      vapidPrivateKeyFile = lib.mkOption {
+        description = lib.mdDoc ''
+          Path to file containing the private key used for Web Push
+          Voluntary Application Server Identification.  A new keypair can
+          be generated by running:
+
+          `nix build -f '<nixpkgs>' mastodon; cd result; bin/rake webpush:generate_keys`
+
+          If this file does not exist, it will be created with a new
+          private key.
+        '';
+        default = "/var/lib/mastodon/secrets/vapid-private-key";
+        type = lib.types.str;
+      };
+
+      trustedProxy = lib.mkOption {
+        description = lib.mdDoc ''
+          You need to set it to the IP from which your reverse proxy sends requests to Mastodon's web process,
+          otherwise Mastodon will record the reverse proxy's own IP as the IP of all requests, which would be
+          bad because IP addresses are used for important rate limits and security functions.
+        '';
+        type = lib.types.str;
+        default = "127.0.0.1";
+      };
+
+      enableUnixSocket = lib.mkOption {
+        description = lib.mdDoc ''
+          Instead of binding to an IP address like 127.0.0.1, you may bind to a Unix socket. This variable
+          is process-specific, e.g. you need different values for every process, and it works for both web (Puma)
+          processes and streaming API (Node.js) processes.
+        '';
+        type = lib.types.bool;
+        default = true;
+      };
+
+      redis = {
+        createLocally = lib.mkOption {
+          description = lib.mdDoc "Configure local Redis server for Mastodon.";
+          type = lib.types.bool;
+          default = true;
+        };
+
+        host = lib.mkOption {
+          description = lib.mdDoc "Redis host.";
+          type = lib.types.str;
+          default = "127.0.0.1";
+        };
+
+        port = lib.mkOption {
+          description = lib.mdDoc "Redis port.";
+          type = lib.types.port;
+          default = 31637;
+        };
+      };
+
+      database = {
+        createLocally = lib.mkOption {
+          description = lib.mdDoc "Configure local PostgreSQL database server for Mastodon.";
+          type = lib.types.bool;
+          default = true;
+        };
+
+        host = lib.mkOption {
+          type = lib.types.str;
+          default = "/run/postgresql";
+          example = "192.168.23.42";
+          description = lib.mdDoc "Database host address or unix socket.";
+        };
+
+        port = lib.mkOption {
+          type = lib.types.nullOr lib.types.port;
+          default = if cfg.database.createLocally then null else 5432;
+          defaultText = lib.literalExpression ''
+            if config.${opt.database.createLocally}
+            then null
+            else 5432
+          '';
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = lib.mkOption {
+          type = lib.types.str;
+          default = "mastodon";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = lib.mkOption {
+          type = lib.types.str;
+          default = "mastodon";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/mastodon/secrets/db-password";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+      };
+
+      smtp = {
+        createLocally = lib.mkOption {
+          description = lib.mdDoc "Configure local Postfix SMTP server for Mastodon.";
+          type = lib.types.bool;
+          default = true;
+        };
+
+        authenticate = lib.mkOption {
+          description = lib.mdDoc "Authenticate with the SMTP server using username and password.";
+          type = lib.types.bool;
+          default = false;
+        };
+
+        host = lib.mkOption {
+          description = lib.mdDoc "SMTP host used when sending emails to users.";
+          type = lib.types.str;
+          default = "127.0.0.1";
+        };
+
+        port = lib.mkOption {
+          description = lib.mdDoc "SMTP port used when sending emails to users.";
+          type = lib.types.port;
+          default = 25;
+        };
+
+        fromAddress = lib.mkOption {
+          description = lib.mdDoc ''"From" address used when sending Emails to users.'';
+          type = lib.types.str;
+        };
+
+        user = lib.mkOption {
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "mastodon@example.com";
+          description = lib.mdDoc "SMTP login name.";
+        };
+
+        passwordFile = lib.mkOption {
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/mastodon/secrets/smtp-password";
+          description = lib.mdDoc ''
+            Path to file containing the SMTP password.
+          '';
+        };
+      };
+
+      elasticsearch = {
+        host = lib.mkOption {
+          description = lib.mdDoc ''
+            Elasticsearch host.
+            If it is not null, Elasticsearch full text search will be enabled.
+          '';
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+        };
+
+        port = lib.mkOption {
+          description = lib.mdDoc "Elasticsearch port.";
+          type = lib.types.port;
+          default = 9200;
+        };
+
+        preset = lib.mkOption {
+          description = lib.mdDoc ''
+            It controls the ElasticSearch indices configuration (number of shards and replica).
+          '';
+          type = lib.types.enum [ "single_node_cluster" "small_cluster" "large_cluster" ];
+          default = "single_node_cluster";
+          example = "large_cluster";
+        };
+
+        user = lib.mkOption {
+          description = lib.mdDoc "Used for optionally authenticating with Elasticsearch.";
+          type = lib.types.nullOr lib.types.str;
+          default = null;
+          example = "elasticsearch-mastodon";
+        };
+
+        passwordFile = lib.mkOption {
+          description = lib.mdDoc ''
+            Path to file containing password for optionally authenticating with Elasticsearch.
+          '';
+          type = lib.types.nullOr lib.types.path;
+          default = null;
+          example = "/var/lib/mastodon/secrets/elasticsearch-password";
+        };
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.mastodon;
+        defaultText = lib.literalExpression "pkgs.mastodon";
+        description = lib.mdDoc "Mastodon package to use.";
+      };
+
+      extraConfig = lib.mkOption {
+        type = lib.types.attrs;
+        default = {};
+        description = lib.mdDoc ''
+          Extra environment variables to pass to all mastodon services.
+        '';
+      };
+
+      extraEnvFiles = lib.mkOption {
+        type = with lib.types; listOf path;
+        default = [];
+        description = lib.mdDoc ''
+          Extra environment files to pass to all mastodon services. Useful for passing down environmental secrets.
+        '';
+        example = [ "/etc/mastodon/s3config.env" ];
+      };
+
+      automaticMigrations = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Do automatic database migrations.
+        '';
+      };
+
+      mediaAutoRemove = {
+        enable = lib.mkOption {
+          type = lib.types.bool;
+          default = true;
+          example = false;
+          description = lib.mdDoc ''
+            Automatically remove remote media attachments and preview cards older than the configured amount of days.
+
+            Recommended in https://docs.joinmastodon.org/admin/setup/.
+          '';
+        };
+
+        startAt = lib.mkOption {
+          type = lib.types.str;
+          default = "daily";
+          example = "hourly";
+          description = lib.mdDoc ''
+            How often to remove remote media.
+
+            The format is described in {manpage}`systemd.time(7)`.
+          '';
+        };
+
+        olderThanDays = lib.mkOption {
+          type = lib.types.int;
+          default = 30;
+          example = 14;
+          description = lib.mdDoc ''
+            How old remote media needs to be in order to be removed.
+          '';
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [{
+    assertions = [
+      {
+        assertion = databaseActuallyCreateLocally -> (cfg.user == cfg.database.user && cfg.database.user == cfg.database.name);
+        message = ''
+          For local automatic database provisioning (services.mastodon.database.createLocally == true) with peer
+            authentication (services.mastodon.database.host == "/run/postgresql") to work services.mastodon.user
+            and services.mastodon.database.user must be identical.
+        '';
+      }
+      {
+        assertion = !databaseActuallyCreateLocally -> (cfg.database.host != "/run/postgresql");
+        message = ''
+          <option>services.mastodon.database.host</option> needs to be set if
+            <option>services.mastodon.database.createLocally</option> is not enabled.
+        '';
+      }
+      {
+        assertion = cfg.smtp.authenticate -> (cfg.smtp.user != null);
+        message = ''
+          <option>services.mastodon.smtp.user</option> needs to be set if
+            <option>services.mastodon.smtp.authenticate</option> is enabled.
+        '';
+      }
+      {
+        assertion = cfg.smtp.authenticate -> (cfg.smtp.passwordFile != null);
+        message = ''
+          <option>services.mastodon.smtp.passwordFile</option> needs to be set if
+            <option>services.mastodon.smtp.authenticate</option> is enabled.
+        '';
+      }
+      {
+        assertion = 1 ==
+          (lib.count (x: x)
+            (lib.mapAttrsToList
+              (_: v: builtins.elem "scheduler" v.jobClasses || v.jobClasses == [ ])
+              cfg.sidekiqProcesses));
+        message = "There must be exactly one Sidekiq queue in services.mastodon.sidekiqProcesses with jobClass \"scheduler\".";
+      }
+    ];
+
+    environment.systemPackages = [ mastodonTootctl ];
+
+    systemd.targets.mastodon = {
+      description = "Target for all Mastodon services";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+    };
+
+    systemd.targets.mastodon-streaming = {
+      description = "Target for all Mastodon streaming services";
+      wantedBy = [ "multi-user.target" "mastodon.target" ];
+      after = [ "network.target" ];
+    };
+
+    systemd.services.mastodon-init-dirs = {
+      script = ''
+        umask 077
+
+        if ! test -f ${cfg.secretKeyBaseFile}; then
+          mkdir -p $(dirname ${cfg.secretKeyBaseFile})
+          bin/rake secret > ${cfg.secretKeyBaseFile}
+        fi
+        if ! test -f ${cfg.otpSecretFile}; then
+          mkdir -p $(dirname ${cfg.otpSecretFile})
+          bin/rake secret > ${cfg.otpSecretFile}
+        fi
+        if ! test -f ${cfg.vapidPrivateKeyFile}; then
+          mkdir -p $(dirname ${cfg.vapidPrivateKeyFile}) $(dirname ${cfg.vapidPublicKeyFile})
+          keypair=$(bin/rake webpush:generate_keys)
+          echo $keypair | grep --only-matching "Private -> [^ ]\+" | sed 's/^Private -> //' > ${cfg.vapidPrivateKeyFile}
+          echo $keypair | grep --only-matching "Public -> [^ ]\+" | sed 's/^Public -> //' > ${cfg.vapidPublicKeyFile}
+        fi
+
+        cat > /var/lib/mastodon/.secrets_env <<EOF
+        SECRET_KEY_BASE="$(cat ${cfg.secretKeyBaseFile})"
+        OTP_SECRET="$(cat ${cfg.otpSecretFile})"
+        VAPID_PRIVATE_KEY="$(cat ${cfg.vapidPrivateKeyFile})"
+        VAPID_PUBLIC_KEY="$(cat ${cfg.vapidPublicKeyFile})"
+      '' + lib.optionalString (cfg.database.passwordFile != null) ''
+        DB_PASS="$(cat ${cfg.database.passwordFile})"
+      '' + lib.optionalString cfg.smtp.authenticate ''
+        SMTP_PASSWORD="$(cat ${cfg.smtp.passwordFile})"
+      '' + lib.optionalString (cfg.elasticsearch.passwordFile != null) ''
+        ES_PASS="$(cat ${cfg.elasticsearch.passwordFile})"
+      '' + ''
+        EOF
+      '';
+      environment = env;
+      serviceConfig = {
+        Type = "oneshot";
+        SyslogIdentifier = "mastodon-init-dirs";
+        # System Call Filtering
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@resources" ])) "@chown" "pipe" "pipe2" ];
+      } // cfgService;
+
+      after = [ "network.target" ];
+    };
+
+    systemd.services.mastodon-init-db = lib.mkIf cfg.automaticMigrations {
+      script = lib.optionalString (!databaseActuallyCreateLocally) ''
+        umask 077
+
+        export PGPASSFILE
+        PGPASSFILE=$(mktemp)
+        cat > $PGPASSFILE <<EOF
+        ${cfg.database.host}:${toString cfg.database.port}:${cfg.database.name}:${cfg.database.user}:$(cat ${cfg.database.passwordFile})
+        EOF
+
+      '' + ''
+        if [ `psql ${cfg.database.name} -c \
+                "select count(*) from pg_class c \
+                join pg_namespace s on s.oid = c.relnamespace \
+                where s.nspname not in ('pg_catalog', 'pg_toast', 'information_schema') \
+                and s.nspname not like 'pg_temp%';" | sed -n 3p` -eq 0 ]; then
+          SAFETY_ASSURED=1 rails db:schema:load
+          rails db:seed
+        else
+          rails db:migrate
+        fi
+      '' +  lib.optionalString (!databaseActuallyCreateLocally) ''
+        rm $PGPASSFILE
+        unset PGPASSFILE
+      '';
+      path = [ cfg.package pkgs.postgresql ];
+      environment = env // lib.optionalAttrs (!databaseActuallyCreateLocally) {
+        PGHOST = cfg.database.host;
+        PGUSER = cfg.database.user;
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+        WorkingDirectory = cfg.package;
+        # System Call Filtering
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " (systemCallsList ++ [ "@resources" ])) "@chown" "pipe" "pipe2" ];
+      } // cfgService;
+      after = [ "network.target" "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service";
+      requires = [ "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service";
+    };
+
+    systemd.services.mastodon-web = {
+      after = [ "network.target" "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+      requires = [ "mastodon-init-dirs.service" ]
+        ++ lib.optional databaseActuallyCreateLocally "postgresql.service"
+        ++ lib.optional cfg.automaticMigrations "mastodon-init-db.service";
+      wantedBy = [ "mastodon.target" ];
+      description = "Mastodon web";
+      environment = env // (if cfg.enableUnixSocket
+        then { SOCKET = "/run/mastodon-web/web.socket"; }
+        else { PORT = toString(cfg.webPort); }
+      );
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/puma -C config/puma.rb";
+        Restart = "always";
+        RestartSec = 20;
+        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+        WorkingDirectory = cfg.package;
+        # Runtime directory and mode
+        RuntimeDirectory = "mastodon-web";
+        RuntimeDirectoryMode = "0750";
+        # System Call Filtering
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "@chown" "pipe" "pipe2" ];
+      } // cfgService;
+      path = with pkgs; [ file imagemagick ffmpeg ];
+    };
+
+    systemd.services.mastodon-media-auto-remove = lib.mkIf cfg.mediaAutoRemove.enable {
+      description = "Mastodon media auto remove";
+      environment = env;
+      serviceConfig = {
+        Type = "oneshot";
+        EnvironmentFile = [ "/var/lib/mastodon/.secrets_env" ] ++ cfg.extraEnvFiles;
+      } // cfgService;
+      script = let
+        olderThanDays = toString cfg.mediaAutoRemove.olderThanDays;
+      in ''
+        ${cfg.package}/bin/tootctl media remove --days=${olderThanDays}
+        ${cfg.package}/bin/tootctl preview_cards remove --days=${olderThanDays}
+      '';
+      startAt = cfg.mediaAutoRemove.startAt;
+    };
+
+    services.nginx = lib.mkIf cfg.configureNginx {
+      enable = true;
+      recommendedProxySettings = true; # required for redirections to work
+      virtualHosts."${cfg.localDomain}" = {
+        root = "${cfg.package}/public/";
+        # mastodon only supports https, but you can override this if you offload tls elsewhere.
+        forceSSL = lib.mkDefault true;
+        enableACME = lib.mkDefault true;
+
+        locations."/system/".alias = "/var/lib/mastodon/public-system/";
+
+        locations."/" = {
+          tryFiles = "$uri @proxy";
+        };
+
+        locations."@proxy" = {
+          proxyPass = (if cfg.enableUnixSocket then "http://unix:/run/mastodon-web/web.socket" else "http://127.0.0.1:${toString(cfg.webPort)}");
+          proxyWebsockets = true;
+        };
+
+        locations."/api/v1/streaming/" = {
+          proxyPass = "http://mastodon-streaming";
+          proxyWebsockets = true;
+        };
+      };
+      upstreams.mastodon-streaming = {
+        extraConfig = ''
+          least_conn;
+        '';
+        servers = builtins.listToAttrs
+          (map (i: {
+            name = "unix:/run/mastodon-streaming/streaming-${toString i}.socket";
+            value = { };
+          }) (lib.range 1 cfg.streamingProcesses));
+      };
+    };
+
+    services.postfix = lib.mkIf (cfg.smtp.createLocally && cfg.smtp.host == "127.0.0.1") {
+      enable = true;
+      hostname = lib.mkDefault "${cfg.localDomain}";
+    };
+    services.redis.servers.mastodon = lib.mkIf (cfg.redis.createLocally && cfg.redis.host == "127.0.0.1") {
+      enable = true;
+      port = cfg.redis.port;
+      bind = "127.0.0.1";
+    };
+    services.postgresql = lib.mkIf databaseActuallyCreateLocally {
+      enable = true;
+      ensureUsers = [
+        {
+          name = cfg.database.name;
+          ensureDBOwnership = true;
+        }
+      ];
+      ensureDatabases = [ cfg.database.name ];
+    };
+
+    users.users = lib.mkMerge [
+      (lib.mkIf (cfg.user == "mastodon") {
+        mastodon = {
+          isSystemUser = true;
+          home = cfg.package;
+          inherit (cfg) group;
+        };
+      })
+      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package pkgs.imagemagick ])
+    ];
+
+    users.groups.${cfg.group}.members = lib.optional cfg.configureNginx config.services.nginx.user;
+  }
+  { systemd.services = lib.mkMerge [ sidekiqUnits streamingUnits ]; }
+  ]);
+
+  meta.maintainers = with lib.maintainers; [ happy-river erictapen ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/matomo.md b/nixpkgs/nixos/modules/services/web-apps/matomo.md
new file mode 100644
index 000000000000..e750c0c14775
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/matomo.md
@@ -0,0 +1,77 @@
+# Matomo {#module-services-matomo}
+
+Matomo is a real-time web analytics application. This module configures
+php-fpm as backend for Matomo, optionally configuring an nginx vhost as well.
+
+An automatic setup is not supported by Matomo, so you need to configure Matomo
+itself in the browser-based Matomo setup.
+
+## Database Setup {#module-services-matomo-database-setup}
+
+You also need to configure a MariaDB or MySQL database and -user for Matomo
+yourself, and enter those credentials in your browser. You can use
+passwordless database authentication via the UNIX_SOCKET authentication
+plugin with the following SQL commands:
+```
+# For MariaDB
+INSTALL PLUGIN unix_socket SONAME 'auth_socket';
+CREATE DATABASE matomo;
+CREATE USER 'matomo'@'localhost' IDENTIFIED WITH unix_socket;
+GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
+
+# For MySQL
+INSTALL PLUGIN auth_socket SONAME 'auth_socket.so';
+CREATE DATABASE matomo;
+CREATE USER 'matomo'@'localhost' IDENTIFIED WITH auth_socket;
+GRANT ALL PRIVILEGES ON matomo.* TO 'matomo'@'localhost';
+```
+Then fill in `matomo` as database user and database name,
+and leave the password field blank. This authentication works by allowing
+only the `matomo` unix user to authenticate as the
+`matomo` database user (without needing a password), but no
+other users. For more information on passwordless login, see
+<https://mariadb.com/kb/en/mariadb/unix_socket-authentication-plugin/>.
+
+Of course, you can use password based authentication as well, e.g. when the
+database is not on the same host.
+
+## Archive Processing {#module-services-matomo-archive-processing}
+
+This module comes with the systemd service
+`matomo-archive-processing.service` and a timer that
+automatically triggers archive processing every hour. This means that you
+can safely
+[disable browser triggers for Matomo archiving](
+https://matomo.org/docs/setup-auto-archiving/#disable-browser-triggers-for-matomo-archiving-and-limit-matomo-reports-to-updating-every-hour
+) at
+`Administration > System > General Settings`.
+
+With automatic archive processing, you can now also enable to
+[delete old visitor logs](https://matomo.org/docs/privacy/#step-2-delete-old-visitors-logs)
+at `Administration > System > Privacy`, but make sure that you run `systemctl start
+matomo-archive-processing.service` at least once without errors if
+you have already collected data before, so that the reports get archived
+before the source data gets deleted.
+
+## Backup {#module-services-matomo-backups}
+
+You only need to take backups of your MySQL database and the
+{file}`/var/lib/matomo/config/config.ini.php` file. Use a user
+in the `matomo` group or root to access the file. For more
+information, see
+<https://matomo.org/faq/how-to-install/faq_138/>.
+
+## Issues {#module-services-matomo-issues}
+
+  - Matomo will warn you that the JavaScript tracker is not writable. This is
+    because it's located in the read-only nix store. You can safely ignore
+    this, unless you need a plugin that needs JavaScript tracker access.
+
+## Using other Web Servers than nginx {#module-services-matomo-other-web-servers}
+
+You can use other web servers by forwarding calls for
+{file}`index.php` and {file}`piwik.php` to the
+[`services.phpfpm.pools.<name>.socket`](#opt-services.phpfpm.pools._name_.socket)
+fastcgi unix socket. You can use
+the nginx configuration in the module code as a reference to what else
+should be configured.
diff --git a/nixpkgs/nixos/modules/services/web-apps/matomo.nix b/nixpkgs/nixos/modules/services/web-apps/matomo.nix
new file mode 100644
index 000000000000..eadf8b62b977
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/matomo.nix
@@ -0,0 +1,331 @@
+{ config, lib, options, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.matomo;
+  fpm = config.services.phpfpm.pools.${pool};
+
+  user = "matomo";
+  dataDir = "/var/lib/${user}";
+  deprecatedDataDir = "/var/lib/piwik";
+
+  pool = user;
+  phpExecutionUnit = "phpfpm-${pool}";
+  databaseService = "mysql.service";
+
+in {
+  imports = [
+    (mkRenamedOptionModule [ "services" "piwik" "enable" ] [ "services" "matomo" "enable" ])
+    (mkRenamedOptionModule [ "services" "piwik" "webServerUser" ] [ "services" "matomo" "webServerUser" ])
+    (mkRemovedOptionModule [ "services" "piwik" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
+    (mkRemovedOptionModule [ "services" "matomo" "phpfpmProcessManagerConfig" ] "Use services.phpfpm.pools.<name>.settings")
+    (mkRenamedOptionModule [ "services" "piwik" "nginx" ] [ "services" "matomo" "nginx" ])
+    (mkRenamedOptionModule [ "services" "matomo" "periodicArchiveProcessingUrl" ] [ "services" "matomo" "hostname" ])
+  ];
+
+  options = {
+    services.matomo = {
+      # NixOS PR for database setup: https://github.com/NixOS/nixpkgs/pull/6963
+      # Matomo issue for automatic Matomo setup: https://github.com/matomo-org/matomo/issues/10257
+      # TODO: find a nice way to do this when more NixOS MySQL and / or Matomo automatic setup stuff is implemented.
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable Matomo web analytics with php-fpm backend.
+          Either the nginx option or the webServerUser option is mandatory.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        description = lib.mdDoc ''
+          Matomo package for the service to use.
+          This can be used to point to newer releases from nixos-unstable,
+          as they don't get backported if they are not security-relevant.
+        '';
+        default = pkgs.matomo;
+        defaultText = literalExpression "pkgs.matomo";
+      };
+
+      webServerUser = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "lighttpd";
+        description = lib.mdDoc ''
+          Name of the web server user that forwards requests to {option}`services.phpfpm.pools.<name>.socket` the fastcgi socket for Matomo if the nginx
+          option is not used. Either this option or the nginx option is mandatory.
+          If you want to use another webserver than nginx, you need to set this to that server's user
+          and pass fastcgi requests to `index.php`, `matomo.php` and `piwik.php` (legacy name) to this socket.
+        '';
+      };
+
+      periodicArchiveProcessing = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Enable periodic archive processing, which generates aggregated reports from the visits.
+
+          This means that you can safely disable browser triggers for Matomo archiving,
+          and safely enable to delete old visitor logs.
+          Before deleting visitor logs,
+          make sure though that you run `systemctl start matomo-archive-processing.service`
+          at least once without errors if you have already collected data before.
+        '';
+      };
+
+      hostname = mkOption {
+        type = types.str;
+        default = "${user}.${config.networking.fqdnOrHostName}";
+        defaultText = literalExpression ''
+          "${user}.''${config.${options.networking.fqdnOrHostName}}"
+        '';
+        example = "matomo.yourdomain.org";
+        description = lib.mdDoc ''
+          URL of the host, without https prefix. You may want to change it if you
+          run Matomo on a different URL than matomo.yourdomain.
+        '';
+      };
+
+      nginx = mkOption {
+        type = types.nullOr (types.submodule (
+          recursiveUpdate
+            (import ../web-servers/nginx/vhost-options.nix { inherit config lib; })
+            {
+              # enable encryption by default,
+              # as sensitive login and Matomo data should not be transmitted in clear text.
+              options.forceSSL.default = true;
+              options.enableACME.default = true;
+            }
+        )
+        );
+        default = null;
+        example = literalExpression ''
+          {
+            serverAliases = [
+              "matomo.''${config.networking.domain}"
+              "stats.''${config.networking.domain}"
+            ];
+            enableACME = false;
+          }
+        '';
+        description = lib.mdDoc ''
+            With this option, you can customize an nginx virtualHost which already has sensible defaults for Matomo.
+            Either this option or the webServerUser option is mandatory.
+            Set this to {} to just enable the virtualHost if you don't need any customization.
+            If enabled, then by default, the {option}`serverName` is
+            `''${user}.''${config.networking.hostName}.''${config.networking.domain}`,
+            SSL is active, and certificates are acquired via ACME.
+            If this is set to null (the default), no nginx virtualHost will be configured.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = mkIf (cfg.nginx != null && cfg.webServerUser != null) [
+      "If services.matomo.nginx is set, services.matomo.nginx.webServerUser is ignored and should be removed."
+    ];
+
+    assertions = [ {
+        assertion = cfg.nginx != null || cfg.webServerUser != null;
+        message = "Either services.matomo.nginx or services.matomo.nginx.webServerUser is mandatory";
+    }];
+
+    users.users.${user} = {
+      isSystemUser = true;
+      createHome = true;
+      home = dataDir;
+      group  = user;
+    };
+    users.groups.${user} = {};
+
+    systemd.services.matomo-setup-update = {
+      # everything needs to set up and up to date before Matomo php files are executed
+      requiredBy = [ "${phpExecutionUnit}.service" ];
+      before = [ "${phpExecutionUnit}.service" ];
+      # the update part of the script can only work if the database is already up and running
+      requires = [ databaseService ];
+      after = [ databaseService ];
+      path = [ cfg.package ];
+      environment.PIWIK_USER_PATH = dataDir;
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        # hide especially config.ini.php from other
+        UMask = "0007";
+        # TODO: might get renamed to MATOMO_USER_PATH in future versions
+        # chown + chmod in preStart needs root
+        PermissionsStartOnly = true;
+      };
+
+      # correct ownership and permissions in case they're not correct anymore,
+      # e.g. after restoring from backup or moving from another system.
+      # Note that ${dataDir}/config/config.ini.php might contain the MySQL password.
+      preStart = ''
+        # migrate data from piwik to Matomo folder
+        if [ -d ${deprecatedDataDir} ]; then
+          echo "Migrating from ${deprecatedDataDir} to ${dataDir}"
+          mv -T ${deprecatedDataDir} ${dataDir}
+        fi
+        chown -R ${user}:${user} ${dataDir}
+        chmod -R ug+rwX,o-rwx ${dataDir}
+
+        if [ -e ${dataDir}/current-package ]; then
+          CURRENT_PACKAGE=$(readlink ${dataDir}/current-package)
+          NEW_PACKAGE=${cfg.package}
+          if [ "$CURRENT_PACKAGE" != "$NEW_PACKAGE" ]; then
+            # keeping tmp around between upgrades seems to bork stuff, so delete it
+            rm -rf ${dataDir}/tmp
+          fi
+        elif [ -e ${dataDir}/tmp ]; then
+          # upgrade from 4.4.1
+          rm -rf ${dataDir}/tmp
+        fi
+        ln -sfT ${cfg.package} ${dataDir}/current-package
+        '';
+      script = ''
+            # Use User-Private Group scheme to protect Matomo data, but allow administration / backup via 'matomo' group
+            # Copy config folder
+            chmod g+s "${dataDir}"
+            cp -r "${cfg.package}/share/config" "${dataDir}/"
+            mkdir -p "${dataDir}/misc"
+            chmod -R u+rwX,g+rwX,o-rwx "${dataDir}"
+
+            # check whether user setup has already been done
+            if test -f "${dataDir}/config/config.ini.php"; then
+              # then execute possibly pending database upgrade
+              matomo-console core:update --yes
+            fi
+      '';
+    };
+
+    # If this is run regularly via the timer,
+    # 'Browser trigger archiving' can be disabled in Matomo UI > Settings > General Settings.
+    systemd.services.matomo-archive-processing = {
+      description = "Archive Matomo reports";
+      # the archiving can only work if the database is already up and running
+      requires = [ databaseService ];
+      after = [ databaseService ];
+
+      # TODO: might get renamed to MATOMO_USER_PATH in future versions
+      environment.PIWIK_USER_PATH = dataDir;
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        UMask = "0007";
+        CPUSchedulingPolicy = "idle";
+        IOSchedulingClass = "idle";
+        ExecStart = "${cfg.package}/bin/matomo-console core:archive --url=https://${cfg.hostname}";
+      };
+    };
+
+    systemd.timers.matomo-archive-processing = mkIf cfg.periodicArchiveProcessing {
+      description = "Automatically archive Matomo reports every hour";
+
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "hourly";
+        Persistent = "yes";
+        AccuracySec = "10m";
+      };
+    };
+
+    systemd.services.${phpExecutionUnit} = {
+      # stop phpfpm on package upgrade, do database upgrade via matomo-setup-update, and then restart
+      restartTriggers = [ cfg.package ];
+      # stop config.ini.php from getting written with read permission for others
+      serviceConfig.UMask = "0007";
+    };
+
+    services.phpfpm.pools = let
+      # workaround for when both are null and need to generate a string,
+      # which is illegal, but as assertions apparently are being triggered *after* config generation,
+      # we have to avoid already throwing errors at this previous stage.
+      socketOwner = if (cfg.nginx != null) then config.services.nginx.user
+      else if (cfg.webServerUser != null) then cfg.webServerUser else "";
+    in {
+      ${pool} = {
+        inherit user;
+        phpOptions = ''
+          error_log = 'stderr'
+          log_errors = on
+        '';
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = socketOwner;
+          "listen.group" = "root";
+          "listen.mode" = "0660";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = true;
+        };
+        phpEnv.PIWIK_USER_PATH = dataDir;
+      };
+    };
+
+
+    services.nginx.virtualHosts = mkIf (cfg.nginx != null) {
+      # References:
+      # https://fralef.me/piwik-hardening-with-nginx-and-php-fpm.html
+      # https://github.com/perusio/piwik-nginx
+      "${cfg.hostname}" = mkMerge [ cfg.nginx {
+        # don't allow to override the root easily, as it will almost certainly break Matomo.
+        # disadvantage: not shown as default in docs.
+        root = mkForce "${cfg.package}/share";
+
+        # define locations here instead of as the submodule option's default
+        # so that they can easily be extended with additional locations if required
+        # without needing to redefine the Matomo ones.
+        # disadvantage: not shown as default in docs.
+        locations."/" = {
+          index = "index.php";
+        };
+        # allow index.php for webinterface
+        locations."= /index.php".extraConfig = ''
+          fastcgi_pass unix:${fpm.socket};
+        '';
+        # allow matomo.php for tracking
+        locations."= /matomo.php".extraConfig = ''
+          fastcgi_pass unix:${fpm.socket};
+        '';
+        # allow piwik.php for tracking (deprecated name)
+        locations."= /piwik.php".extraConfig = ''
+          fastcgi_pass unix:${fpm.socket};
+        '';
+        # Any other attempt to access any php files is forbidden
+        locations."~* ^.+\\.php$".extraConfig = ''
+          return 403;
+        '';
+        # Disallow access to unneeded directories
+        # config and tmp are already removed
+        locations."~ ^/(?:core|lang|misc)/".extraConfig = ''
+          return 403;
+        '';
+        # Disallow access to several helper files
+        locations."~* \\.(?:bat|git|ini|sh|txt|tpl|xml|md)$".extraConfig = ''
+          return 403;
+        '';
+        # No crawling of this site for bots that obey robots.txt - no useful information here.
+        locations."= /robots.txt".extraConfig = ''
+          return 200 "User-agent: *\nDisallow: /\n";
+        '';
+        # let browsers cache matomo.js
+        locations."= /matomo.js".extraConfig = ''
+          expires 1M;
+        '';
+        # let browsers cache piwik.js (deprecated name)
+        locations."= /piwik.js".extraConfig = ''
+          expires 1M;
+        '';
+      }];
+    };
+  };
+
+  meta = {
+    doc = ./matomo.md;
+    maintainers = with lib.maintainers; [ florianjacob ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/mattermost.nix b/nixpkgs/nixos/modules/services/web-apps/mattermost.nix
new file mode 100644
index 000000000000..24f3b3331845
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/mattermost.nix
@@ -0,0 +1,360 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.mattermost;
+
+  database = "postgres://${cfg.localDatabaseUser}:${cfg.localDatabasePassword}@localhost:5432/${cfg.localDatabaseName}?sslmode=disable&connect_timeout=10";
+
+  postgresPackage = config.services.postgresql.package;
+
+  createDb = {
+    statePath ? cfg.statePath,
+    localDatabaseUser ? cfg.localDatabaseUser,
+    localDatabasePassword ? cfg.localDatabasePassword,
+    localDatabaseName ? cfg.localDatabaseName,
+    useSudo ? true
+  }: ''
+    if ! test -e ${escapeShellArg "${statePath}/.db-created"}; then
+      ${lib.optionalString useSudo "${pkgs.sudo}/bin/sudo -u ${escapeShellArg config.services.postgresql.superUser} \\"}
+        ${postgresPackage}/bin/psql postgres -c \
+          "CREATE ROLE ${localDatabaseUser} WITH LOGIN NOCREATEDB NOCREATEROLE ENCRYPTED PASSWORD '${localDatabasePassword}'"
+      ${lib.optionalString useSudo "${pkgs.sudo}/bin/sudo -u ${escapeShellArg config.services.postgresql.superUser} \\"}
+        ${postgresPackage}/bin/createdb \
+          --owner ${escapeShellArg localDatabaseUser} ${escapeShellArg localDatabaseName}
+      touch ${escapeShellArg "${statePath}/.db-created"}
+    fi
+  '';
+
+  mattermostPluginDerivations = with pkgs;
+    map (plugin: stdenv.mkDerivation {
+      name = "mattermost-plugin";
+      installPhase = ''
+        mkdir -p $out/share
+        cp ${plugin} $out/share/plugin.tar.gz
+      '';
+      dontUnpack = true;
+      dontPatch = true;
+      dontConfigure = true;
+      dontBuild = true;
+      preferLocalBuild = true;
+    }) cfg.plugins;
+
+  mattermostPlugins = with pkgs;
+    if mattermostPluginDerivations == [] then null
+    else stdenv.mkDerivation {
+      name = "${cfg.package.name}-plugins";
+      nativeBuildInputs = [
+        autoPatchelfHook
+      ] ++ mattermostPluginDerivations;
+      buildInputs = [
+        cfg.package
+      ];
+      installPhase = ''
+        mkdir -p $out/data/plugins
+        plugins=(${escapeShellArgs (map (plugin: "${plugin}/share/plugin.tar.gz") mattermostPluginDerivations)})
+        for plugin in "''${plugins[@]}"; do
+          hash="$(sha256sum "$plugin" | cut -d' ' -f1)"
+          mkdir -p "$hash"
+          tar -C "$hash" -xzf "$plugin"
+          autoPatchelf "$hash"
+          GZIP_OPT=-9 tar -C "$hash" -cvzf "$out/data/plugins/$hash.tar.gz" .
+          rm -rf "$hash"
+        done
+      '';
+
+      dontUnpack = true;
+      dontPatch = true;
+      dontConfigure = true;
+      dontBuild = true;
+      preferLocalBuild = true;
+    };
+
+  mattermostConfWithoutPlugins = recursiveUpdate
+    { ServiceSettings.SiteURL = cfg.siteUrl;
+      ServiceSettings.ListenAddress = cfg.listenAddress;
+      TeamSettings.SiteName = cfg.siteName;
+      SqlSettings.DriverName = "postgres";
+      SqlSettings.DataSource = database;
+      PluginSettings.Directory = "${cfg.statePath}/plugins/server";
+      PluginSettings.ClientDirectory = "${cfg.statePath}/plugins/client";
+    }
+    cfg.extraConfig;
+
+  mattermostConf = recursiveUpdate
+    mattermostConfWithoutPlugins
+    (
+      lib.optionalAttrs (mattermostPlugins != null) {
+        PluginSettings = {
+          Enable = true;
+        };
+      }
+    );
+
+  mattermostConfJSON = pkgs.writeText "mattermost-config.json" (builtins.toJSON mattermostConf);
+
+in
+
+{
+  options = {
+    services.mattermost = {
+      enable = mkEnableOption (lib.mdDoc "Mattermost chat server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.mattermost;
+        defaultText = lib.literalExpression "pkgs.mattermost";
+        description = lib.mdDoc "Mattermost derivation to use.";
+      };
+
+      statePath = mkOption {
+        type = types.str;
+        default = "/var/lib/mattermost";
+        description = lib.mdDoc "Mattermost working directory";
+      };
+
+      siteUrl = mkOption {
+        type = types.str;
+        example = "https://chat.example.com";
+        description = lib.mdDoc ''
+          URL this Mattermost instance is reachable under, without trailing slash.
+        '';
+      };
+
+      siteName = mkOption {
+        type = types.str;
+        default = "Mattermost";
+        description = lib.mdDoc "Name of this Mattermost site.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = ":8065";
+        example = "[::1]:8065";
+        description = lib.mdDoc ''
+          Address and port this Mattermost instance listens to.
+        '';
+      };
+
+      mutableConfig = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether the Mattermost config.json is writeable by Mattermost.
+
+          Most of the settings can be edited in the system console of
+          Mattermost if this option is enabled. A template config using
+          the options specified in services.mattermost will be generated
+          but won't be overwritten on changes or rebuilds.
+
+          If this option is disabled, changes in the system console won't
+          be possible (default). If an config.json is present, it will be
+          overwritten!
+        '';
+      };
+
+      preferNixConfig = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          If both mutableConfig and this option are set, the Nix configuration
+          will take precedence over any settings configured in the server
+          console.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.attrs;
+        default = { };
+        description = lib.mdDoc ''
+          Additional configuration options as Nix attribute set in config.json schema.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf (types.oneOf [types.path types.package]);
+        default = [];
+        example = "[ ./com.github.moussetc.mattermost.plugin.giphy-2.0.0.tar.gz ]";
+        description = lib.mdDoc ''
+          Plugins to add to the configuration. Overrides any installed if non-null.
+          This is a list of paths to .tar.gz files or derivations evaluating to
+          .tar.gz files.
+        '';
+      };
+      environmentFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          Environment file (see {manpage}`systemd.exec(5)`
+          "EnvironmentFile=" section for the syntax) which sets config options
+          for mattermost (see [the mattermost documentation](https://docs.mattermost.com/configure/configuration-settings.html#environment-variables)).
+
+          Settings defined in the environment file will overwrite settings
+          set via nix or via the {option}`services.mattermost.extraConfig`
+          option.
+
+          Useful for setting config options without their value ending up in the
+          (world-readable) nix store, e.g. for a database password.
+        '';
+      };
+
+      localDatabaseCreate = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Create a local PostgreSQL database for Mattermost automatically.
+        '';
+      };
+
+      localDatabaseName = mkOption {
+        type = types.str;
+        default = "mattermost";
+        description = lib.mdDoc ''
+          Local Mattermost database name.
+        '';
+      };
+
+      localDatabaseUser = mkOption {
+        type = types.str;
+        default = "mattermost";
+        description = lib.mdDoc ''
+          Local Mattermost database username.
+        '';
+      };
+
+      localDatabasePassword = mkOption {
+        type = types.str;
+        default = "mmpgsecret";
+        description = lib.mdDoc ''
+          Password for local Mattermost database user.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "mattermost";
+        description = lib.mdDoc ''
+          User which runs the Mattermost service.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "mattermost";
+        description = lib.mdDoc ''
+          Group which runs the Mattermost service.
+        '';
+      };
+
+      matterircd = {
+        enable = mkEnableOption (lib.mdDoc "Mattermost IRC bridge");
+        package = mkOption {
+          type = types.package;
+          default = pkgs.matterircd;
+          defaultText = lib.literalExpression "pkgs.matterircd";
+          description = lib.mdDoc "matterircd derivation to use.";
+        };
+        parameters = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [ "-mmserver chat.example.com" "-bind [::]:6667" ];
+          description = lib.mdDoc ''
+            Set commandline parameters to pass to matterircd. See
+            https://github.com/42wim/matterircd#usage for more information.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      users.users = optionalAttrs (cfg.user == "mattermost") {
+        mattermost = {
+          group = cfg.group;
+          uid = config.ids.uids.mattermost;
+          home = cfg.statePath;
+        };
+      };
+
+      users.groups = optionalAttrs (cfg.group == "mattermost") {
+        mattermost.gid = config.ids.gids.mattermost;
+      };
+
+      services.postgresql.enable = cfg.localDatabaseCreate;
+
+      # The systemd service will fail to execute the preStart hook
+      # if the WorkingDirectory does not exist
+      systemd.tmpfiles.rules = [
+        ''d "${cfg.statePath}" -''
+      ];
+
+      systemd.services.mattermost = {
+        description = "Mattermost chat service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "network.target" "postgresql.service" ];
+
+        preStart = ''
+          mkdir -p "${cfg.statePath}"/{data,config,logs,plugins}
+          mkdir -p "${cfg.statePath}/plugins"/{client,server}
+          ln -sf ${cfg.package}/{bin,fonts,i18n,templates,client} "${cfg.statePath}"
+        '' + lib.optionalString (mattermostPlugins != null) ''
+          rm -rf "${cfg.statePath}/data/plugins"
+          ln -sf ${mattermostPlugins}/data/plugins "${cfg.statePath}/data"
+        '' + lib.optionalString (!cfg.mutableConfig) ''
+          rm -f "${cfg.statePath}/config/config.json"
+          ${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${cfg.package}/config/config.json ${mattermostConfJSON} > "${cfg.statePath}/config/config.json"
+        '' + lib.optionalString cfg.mutableConfig ''
+          if ! test -e "${cfg.statePath}/config/.initial-created"; then
+            rm -f ${cfg.statePath}/config/config.json
+            ${pkgs.jq}/bin/jq -s '.[0] * .[1]' ${cfg.package}/config/config.json ${mattermostConfJSON} > "${cfg.statePath}/config/config.json"
+            touch "${cfg.statePath}/config/.initial-created"
+          fi
+        '' + lib.optionalString (cfg.mutableConfig && cfg.preferNixConfig) ''
+          new_config="$(${pkgs.jq}/bin/jq -s '.[0] * .[1]' "${cfg.statePath}/config/config.json" ${mattermostConfJSON})"
+
+          rm -f "${cfg.statePath}/config/config.json"
+          echo "$new_config" > "${cfg.statePath}/config/config.json"
+        '' + lib.optionalString cfg.localDatabaseCreate (createDb {}) + ''
+          # Don't change permissions recursively on the data, current, and symlinked directories (see ln -sf command above).
+          # This dramatically decreases startup times for installations with a lot of files.
+          find . -maxdepth 1 -not -name data -not -name client -not -name templates -not -name i18n -not -name fonts -not -name bin -not -name . \
+            -exec chown "${cfg.user}:${cfg.group}" -R {} \; -exec chmod u+rw,g+r,o-rwx -R {} \;
+
+          chown "${cfg.user}:${cfg.group}" "${cfg.statePath}/data" .
+          chmod u+rw,g+r,o-rwx "${cfg.statePath}/data" .
+        '';
+
+        serviceConfig = {
+          PermissionsStartOnly = true;
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${cfg.package}/bin/mattermost";
+          WorkingDirectory = "${cfg.statePath}";
+          Restart = "always";
+          RestartSec = "10";
+          LimitNOFILE = "49152";
+          EnvironmentFile = cfg.environmentFile;
+        };
+        unitConfig.JoinsNamespaceOf = mkIf cfg.localDatabaseCreate "postgresql.service";
+      };
+    })
+    (mkIf cfg.matterircd.enable {
+      systemd.services.matterircd = {
+        description = "Mattermost IRC bridge service";
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          User = "nobody";
+          Group = "nogroup";
+          ExecStart = "${cfg.matterircd.package}/bin/matterircd ${escapeShellArgs cfg.matterircd.parameters}";
+          WorkingDirectory = "/tmp";
+          PrivateTmp = true;
+          Restart = "always";
+          RestartSec = "5";
+        };
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/mediawiki.nix b/nixpkgs/nixos/modules/services/web-apps/mediawiki.nix
new file mode 100644
index 000000000000..ce7bcd94b3f0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/mediawiki.nix
@@ -0,0 +1,647 @@
+{ config, pkgs, lib, ... }:
+
+let
+
+  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption;
+  inherit (lib) concatStringsSep literalExpression mapAttrsToList optional optionals optionalString types;
+
+  cfg = config.services.mediawiki;
+  fpm = config.services.phpfpm.pools.mediawiki;
+  user = "mediawiki";
+  group =
+    if cfg.webserver == "apache" then
+      config.services.httpd.group
+    else if cfg.webserver == "nginx" then
+      config.services.nginx.group
+    else "mediawiki";
+
+  cacheDir = "/var/cache/mediawiki";
+  stateDir = "/var/lib/mediawiki";
+
+  pkg = pkgs.stdenv.mkDerivation rec {
+    pname = "mediawiki-full";
+    version = src.version;
+    src = cfg.package;
+
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+
+      rm -rf $out/share/mediawiki/skins/*
+      rm -rf $out/share/mediawiki/extensions/*
+
+      ${concatStringsSep "\n" (mapAttrsToList (k: v: ''
+        ln -s ${v} $out/share/mediawiki/skins/${k}
+      '') cfg.skins)}
+
+      ${concatStringsSep "\n" (mapAttrsToList (k: v: ''
+        ln -s ${if v != null then v else "$src/share/mediawiki/extensions/${k}"} $out/share/mediawiki/extensions/${k}
+      '') cfg.extensions)}
+    '';
+  };
+
+  mediawikiScripts = pkgs.runCommand "mediawiki-scripts" {
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+    preferLocalBuild = true;
+  } ''
+    mkdir -p $out/bin
+    for i in changePassword.php createAndPromote.php userOptions.php edit.php nukePage.php update.php; do
+      makeWrapper ${pkgs.php}/bin/php $out/bin/mediawiki-$(basename $i .php) \
+        --set MEDIAWIKI_CONFIG ${mediawikiConfig} \
+        --add-flags ${pkg}/share/mediawiki/maintenance/$i
+    done
+  '';
+
+  dbAddr = if cfg.database.socket == null then
+    "${cfg.database.host}:${toString cfg.database.port}"
+  else if cfg.database.type == "mysql" then
+    "${cfg.database.host}:${cfg.database.socket}"
+  else if cfg.database.type == "postgres" then
+    "${cfg.database.socket}"
+  else
+    throw "Unsupported database type: ${cfg.database.type} for socket: ${cfg.database.socket}";
+
+  mediawikiConfig = pkgs.writeText "LocalSettings.php" ''
+    <?php
+      # Protect against web entry
+      if ( !defined( 'MEDIAWIKI' ) ) {
+        exit;
+      }
+
+      $wgSitename = "${cfg.name}";
+      $wgMetaNamespace = false;
+
+      ## The URL base path to the directory containing the wiki;
+      ## defaults for all runtime URL paths are based off of this.
+      ## For more information on customizing the URLs
+      ## (like /w/index.php/Page_title to /wiki/Page_title) please see:
+      ## https://www.mediawiki.org/wiki/Manual:Short_URL
+      $wgScriptPath = "${lib.optionalString (cfg.webserver == "nginx") "/w"}";
+
+      ## The protocol and server name to use in fully-qualified URLs
+      $wgServer = "${cfg.url}";
+
+      ## The URL path to static resources (images, scripts, etc.)
+      $wgResourceBasePath = $wgScriptPath;
+
+      ${lib.optionalString (cfg.webserver == "nginx") ''
+        $wgArticlePath = "/wiki/$1";
+        $wgUsePathInfo = true;
+      ''}
+
+      ## The URL path to the logo.  Make sure you change this from the default,
+      ## or else you'll overwrite your logo when you upgrade!
+      $wgLogo = "$wgResourceBasePath/resources/assets/wiki.png";
+
+      ## UPO means: this is also a user preference option
+
+      $wgEnableEmail = true;
+      $wgEnableUserEmail = true; # UPO
+
+      $wgPasswordSender = "${cfg.passwordSender}";
+
+      $wgEnotifUserTalk = false; # UPO
+      $wgEnotifWatchlist = false; # UPO
+      $wgEmailAuthentication = true;
+
+      ## Database settings
+      $wgDBtype = "${cfg.database.type}";
+      $wgDBserver = "${dbAddr}";
+      $wgDBport = "${toString cfg.database.port}";
+      $wgDBname = "${cfg.database.name}";
+      $wgDBuser = "${cfg.database.user}";
+      ${optionalString (cfg.database.passwordFile != null) "$wgDBpassword = file_get_contents(\"${cfg.database.passwordFile}\");"}
+
+      ${optionalString (cfg.database.type == "mysql" && cfg.database.tablePrefix != null) ''
+        # MySQL specific settings
+        $wgDBprefix = "${cfg.database.tablePrefix}";
+      ''}
+
+      ${optionalString (cfg.database.type == "mysql") ''
+        # MySQL table options to use during installation or update
+        $wgDBTableOptions = "ENGINE=InnoDB, DEFAULT CHARSET=binary";
+      ''}
+
+      ## Shared memory settings
+      $wgMainCacheType = CACHE_NONE;
+      $wgMemCachedServers = [];
+
+      ${optionalString (cfg.uploadsDir != null) ''
+        $wgEnableUploads = true;
+        $wgUploadDirectory = "${cfg.uploadsDir}";
+      ''}
+
+      $wgUseImageMagick = true;
+      $wgImageMagickConvertCommand = "${pkgs.imagemagick}/bin/convert";
+
+      # InstantCommons allows wiki to use images from https://commons.wikimedia.org
+      $wgUseInstantCommons = false;
+
+      # Periodically send a pingback to https://www.mediawiki.org/ with basic data
+      # about this MediaWiki instance. The Wikimedia Foundation shares this data
+      # with MediaWiki developers to help guide future development efforts.
+      $wgPingback = true;
+
+      ## If you use ImageMagick (or any other shell command) on a
+      ## Linux server, this will need to be set to the name of an
+      ## available UTF-8 locale
+      $wgShellLocale = "C.UTF-8";
+
+      ## Set $wgCacheDirectory to a writable directory on the web server
+      ## to make your wiki go slightly faster. The directory should not
+      ## be publicly accessible from the web.
+      $wgCacheDirectory = "${cacheDir}";
+
+      # Site language code, should be one of the list in ./languages/data/Names.php
+      $wgLanguageCode = "en";
+
+      $wgSecretKey = file_get_contents("${stateDir}/secret.key");
+
+      # Changing this will log out all existing sessions.
+      $wgAuthenticationTokenVersion = "";
+
+      ## For attaching licensing metadata to pages, and displaying an
+      ## appropriate copyright notice / icon. GNU Free Documentation
+      ## License and Creative Commons licenses are supported so far.
+      $wgRightsPage = ""; # Set to the title of a wiki page that describes your license/copyright
+      $wgRightsUrl = "";
+      $wgRightsText = "";
+      $wgRightsIcon = "";
+
+      # Path to the GNU diff3 utility. Used for conflict resolution.
+      $wgDiff = "${pkgs.diffutils}/bin/diff";
+      $wgDiff3 = "${pkgs.diffutils}/bin/diff3";
+
+      # Enabled skins.
+      ${concatStringsSep "\n" (mapAttrsToList (k: v: "wfLoadSkin('${k}');") cfg.skins)}
+
+      # Enabled extensions.
+      ${concatStringsSep "\n" (mapAttrsToList (k: v: "wfLoadExtension('${k}');") cfg.extensions)}
+
+
+      # End of automatically generated settings.
+      # Add more configuration options below.
+
+      ${cfg.extraConfig}
+  '';
+
+  withTrailingSlash = str: if lib.hasSuffix "/" str then str else "${str}/";
+in
+{
+  # interface
+  options = {
+    services.mediawiki = {
+
+      enable = mkEnableOption (lib.mdDoc "MediaWiki");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.mediawiki;
+        defaultText = literalExpression "pkgs.mediawiki";
+        description = lib.mdDoc "Which MediaWiki package to use.";
+      };
+
+      finalPackage = mkOption {
+        type = types.package;
+        readOnly = true;
+        default = pkg;
+        defaultText = literalExpression "pkg";
+        description = lib.mdDoc ''
+          The final package used by the module. This is the package that will have extensions and skins installed.
+        '';
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "MediaWiki";
+        example = "Foobar Wiki";
+        description = lib.mdDoc "Name of the wiki.";
+      };
+
+      url = mkOption {
+        type = types.str;
+        default =
+          if cfg.webserver == "apache" then
+            "${if cfg.httpd.virtualHost.addSSL || cfg.httpd.virtualHost.forceSSL || cfg.httpd.virtualHost.onlySSL then "https" else "http"}://${cfg.httpd.virtualHost.hostName}"
+          else if cfg.webserver == "nginx" then
+            let
+              hasSSL = host: host.forceSSL || host.addSSL;
+            in
+            "${if hasSSL config.services.nginx.virtualHosts.${cfg.nginx.hostName} then "https" else "http"}://${cfg.nginx.hostName}"
+          else
+            "http://localhost";
+        defaultText = literalExpression ''
+          if cfg.webserver == "apache" then
+            "''${if cfg.httpd.virtualHost.addSSL || cfg.httpd.virtualHost.forceSSL || cfg.httpd.virtualHost.onlySSL then "https" else "http"}://''${cfg.httpd.virtualHost.hostName}"
+          else
+            "http://localhost";
+        '';
+        example = "https://wiki.example.org";
+        description = lib.mdDoc "URL of the wiki.";
+      };
+
+      uploadsDir = mkOption {
+        type = types.nullOr types.path;
+        default = "${stateDir}/uploads";
+        description = lib.mdDoc ''
+          This directory is used for uploads of pictures. The directory passed here is automatically
+          created and permissions adjusted as required.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc "A file containing the initial password for the admin user.";
+        example = "/run/keys/mediawiki-password";
+      };
+
+      passwordSender = mkOption {
+        type = types.str;
+        default =
+          if cfg.webserver == "apache" then
+            if cfg.httpd.virtualHost.adminAddr != null then
+              cfg.httpd.virtualHost.adminAddr
+            else
+              config.services.httpd.adminAddr else "root@localhost";
+        defaultText = literalExpression ''
+          if cfg.webserver == "apache" then
+            if cfg.httpd.virtualHost.adminAddr != null then
+              cfg.httpd.virtualHost.adminAddr
+            else
+              config.services.httpd.adminAddr else "root@localhost"
+        '';
+        description = lib.mdDoc "Contact address for password reset.";
+      };
+
+      skins = mkOption {
+        default = {};
+        type = types.attrsOf types.path;
+        description = lib.mdDoc ''
+          Attribute set of paths whose content is copied to the {file}`skins`
+          subdirectory of the MediaWiki installation in addition to the default skins.
+        '';
+      };
+
+      extensions = mkOption {
+        default = {};
+        type = types.attrsOf (types.nullOr types.path);
+        description = lib.mdDoc ''
+          Attribute set of paths whose content is copied to the {file}`extensions`
+          subdirectory of the MediaWiki installation and enabled in configuration.
+
+          Use `null` instead of path to enable extensions that are part of MediaWiki.
+        '';
+        example = literalExpression ''
+          {
+            Matomo = pkgs.fetchzip {
+              url = "https://github.com/DaSchTour/matomo-mediawiki-extension/archive/v4.0.1.tar.gz";
+              sha256 = "0g5rd3zp0avwlmqagc59cg9bbkn3r7wx7p6yr80s644mj6dlvs1b";
+            };
+            ParserFunctions = null;
+          }
+        '';
+      };
+
+      webserver = mkOption {
+        type = types.enum [ "apache" "none" "nginx" ];
+        default = "apache";
+        description = lib.mdDoc "Webserver to use.";
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "postgres" "sqlite" "mssql" "oracle" ];
+          default = "mysql";
+          description = lib.mdDoc "Database engine to use. MySQL/MariaDB is the database of choice by MediaWiki developers.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = if cfg.database.type == "mysql" then 3306 else 5432;
+          defaultText = literalExpression "3306";
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "mediawiki";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "mediawiki";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/mediawiki-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        tablePrefix = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            If you only have access to a single database and wish to install more than
+            one version of MediaWiki, or have other applications that also use the
+            database, you can give the table names a unique prefix to stop any naming
+            conflicts or confusion.
+            See <https://www.mediawiki.org/wiki/Manual:$wgDBprefix>.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = if (cfg.database.type == "mysql" && cfg.database.createLocally) then
+              "/run/mysqld/mysqld.sock"
+            else if (cfg.database.type == "postgres" && cfg.database.createLocally) then
+              "/run/postgresql"
+            else
+              null;
+          defaultText = literalExpression "/run/mysqld/mysqld.sock";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = cfg.database.type == "mysql" || cfg.database.type == "postgres";
+          defaultText = literalExpression "true";
+          description = lib.mdDoc ''
+            Create the database and database user locally.
+            This currently only applies if database type "mysql" is selected.
+          '';
+        };
+      };
+
+      nginx.hostName = mkOption {
+        type = types.str;
+        example = literalExpression ''wiki.example.com'';
+        default = "localhost";
+        description = lib.mdDoc ''
+          The hostname to use for the nginx virtual host.
+          This is used to generate the nginx configuration.
+        '';
+      };
+
+      httpd.virtualHost = mkOption {
+        type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+        example = literalExpression ''
+          {
+            hostName = "mediawiki.example.org";
+            adminAddr = "webmaster@example.org";
+            forceSSL = true;
+            enableACME = true;
+          }
+        '';
+        description = lib.mdDoc ''
+          Apache configuration can be done by adapting {option}`services.httpd.virtualHosts`.
+          See [](#opt-services.httpd.virtualHosts) for further information.
+        '';
+      };
+
+      poolConfig = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool ]);
+        default = {
+          "pm" = "dynamic";
+          "pm.max_children" = 32;
+          "pm.start_servers" = 2;
+          "pm.min_spare_servers" = 2;
+          "pm.max_spare_servers" = 4;
+          "pm.max_requests" = 500;
+        };
+        description = lib.mdDoc ''
+          Options for the MediaWiki PHP pool. See the documentation on `php-fpm.conf`
+          for details on configuration directives.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          Any additional text to be appended to MediaWiki's
+          LocalSettings.php configuration file. For configuration
+          settings, see <https://www.mediawiki.org/wiki/Manual:Configuration_settings>.
+        '';
+        default = "";
+        example = ''
+          $wgEnableEmail = false;
+        '';
+      };
+
+    };
+  };
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "mediawiki" "virtualHost" ] [ "services" "mediawiki" "httpd" "virtualHost" ])
+  ];
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> (cfg.database.type == "mysql" || cfg.database.type == "postgres");
+        message = "services.mediawiki.createLocally is currently only supported for database type 'mysql' and 'postgres'";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.name == cfg.database.user;
+        message = "services.mediawiki.database.user must be set to ${user} if services.mediawiki.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.socket != null;
+        message = "services.mediawiki.database.socket must be set if services.mediawiki.database.createLocally is set to true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.mediawiki.database.createLocally is set to true";
+      }
+    ];
+
+    services.mediawiki.skins = {
+      MonoBook = "${cfg.package}/share/mediawiki/skins/MonoBook";
+      Timeless = "${cfg.package}/share/mediawiki/skins/Timeless";
+      Vector = "${cfg.package}/share/mediawiki/skins/Vector";
+    };
+
+    services.mysql = mkIf (cfg.database.type == "mysql" && cfg.database.createLocally) {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+      }];
+    };
+
+    services.postgresql = mkIf (cfg.database.type == "postgres" && cfg.database.createLocally) {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensureDBOwnership = true;
+      }];
+    };
+
+    services.phpfpm.pools.mediawiki = {
+      inherit user group;
+      phpEnv.MEDIAWIKI_CONFIG = "${mediawikiConfig}";
+      # https://www.mediawiki.org/wiki/Compatibility
+      phpPackage = pkgs.php81;
+      settings = (if (cfg.webserver == "apache") then {
+        "listen.owner" = config.services.httpd.user;
+        "listen.group" = config.services.httpd.group;
+      } else if (cfg.webserver == "nginx") then {
+        "listen.owner" = config.services.nginx.user;
+        "listen.group" = config.services.nginx.group;
+      } else {
+        "listen.owner" = user;
+        "listen.group" = group;
+      }) // cfg.poolConfig;
+    };
+
+    services.httpd = lib.mkIf (cfg.webserver == "apache") {
+      enable = true;
+      extraModules = [ "proxy_fcgi" ];
+      virtualHosts.${cfg.httpd.virtualHost.hostName} = mkMerge [
+        cfg.httpd.virtualHost
+        {
+          documentRoot = mkForce "${pkg}/share/mediawiki";
+          extraConfig = ''
+            <Directory "${pkg}/share/mediawiki">
+              <FilesMatch "\.php$">
+                <If "-f %{REQUEST_FILENAME}">
+                  SetHandler "proxy:unix:${fpm.socket}|fcgi://localhost/"
+                </If>
+              </FilesMatch>
+
+              Require all granted
+              DirectoryIndex index.php
+              AllowOverride All
+            </Directory>
+          '' + optionalString (cfg.uploadsDir != null) ''
+            Alias "/images" "${cfg.uploadsDir}"
+            <Directory "${cfg.uploadsDir}">
+              Require all granted
+            </Directory>
+          '';
+        }
+      ];
+    };
+    # inspired by https://www.mediawiki.org/wiki/Manual:Short_URL/Nginx
+    services.nginx = lib.mkIf (cfg.webserver == "nginx") {
+      enable = true;
+      virtualHosts.${config.services.mediawiki.nginx.hostName} = {
+        root = "${pkg}/share/mediawiki";
+        locations = {
+          "~ ^/w/(index|load|api|thumb|opensearch_desc|rest|img_auth)\\.php$".extraConfig = ''
+            rewrite ^/w/(.*) /$1 break;
+            include ${config.services.nginx.package}/conf/fastcgi_params;
+            fastcgi_index index.php;
+            fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+            fastcgi_pass unix:${config.services.phpfpm.pools.mediawiki.socket};
+          '';
+          "/w/images/".alias = withTrailingSlash cfg.uploadsDir;
+          # Deny access to deleted images folder
+          "/w/images/deleted".extraConfig = ''
+            deny all;
+          '';
+          # MediaWiki assets (usually images)
+          "~ ^/w/resources/(assets|lib|src)".extraConfig = ''
+            rewrite ^/w(/.*) $1 break;
+            add_header Cache-Control "public";
+            expires 7d;
+          '';
+          # Assets, scripts and styles from skins and extensions
+          "~ ^/w/(skins|extensions)/.+\\.(css|js|gif|jpg|jpeg|png|svg|wasm|ttf|woff|woff2)$".extraConfig = ''
+            rewrite ^/w(/.*) $1 break;
+            add_header Cache-Control "public";
+            expires 7d;
+          '';
+
+          # Handling for Mediawiki REST API, see [[mw:API:REST_API]]
+          "/w/rest.php/".tryFiles = "$uri $uri/ /w/rest.php?$query_string";
+
+          # Handling for the article path (pretty URLs)
+          "/wiki/".extraConfig = ''
+            rewrite ^/wiki/(?<pagename>.*)$ /w/index.php;
+          '';
+
+          # Explicit access to the root website, redirect to main page (adapt as needed)
+          "= /".extraConfig = ''
+            return 301 /wiki/Main_Page;
+          '';
+
+          # Every other entry point will be disallowed.
+          # Add specific rules for other entry points/images as needed above this
+          "/".extraConfig = ''
+             return 404;
+          '';
+        };
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${stateDir}' 0750 ${user} ${group} - -"
+      "d '${cacheDir}' 0750 ${user} ${group} - -"
+    ] ++ optionals (cfg.uploadsDir != null) [
+      "d '${cfg.uploadsDir}' 0750 ${user} ${group} - -"
+      "Z '${cfg.uploadsDir}' 0750 ${user} ${group} - -"
+    ];
+
+    systemd.services.mediawiki-init = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-mediawiki.service" ];
+      after = optional (cfg.database.type == "mysql" && cfg.database.createLocally) "mysql.service"
+              ++ optional (cfg.database.type == "postgres" && cfg.database.createLocally) "postgresql.service";
+      script = ''
+        if ! test -e "${stateDir}/secret.key"; then
+          tr -dc A-Za-z0-9 </dev/urandom 2>/dev/null | head -c 64 > ${stateDir}/secret.key
+        fi
+
+        echo "exit( wfGetDB( DB_MASTER )->tableExists( 'user' ) ? 1 : 0 );" | \
+        ${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/eval.php --conf ${mediawikiConfig} && \
+        ${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/install.php \
+          --confpath /tmp \
+          --scriptpath / \
+          --dbserver "${dbAddr}" \
+          --dbport ${toString cfg.database.port} \
+          --dbname ${cfg.database.name} \
+          ${optionalString (cfg.database.tablePrefix != null) "--dbprefix ${cfg.database.tablePrefix}"} \
+          --dbuser ${cfg.database.user} \
+          ${optionalString (cfg.database.passwordFile != null) "--dbpassfile ${cfg.database.passwordFile}"} \
+          --passfile ${cfg.passwordFile} \
+          --dbtype ${cfg.database.type} \
+          ${cfg.name} \
+          admin
+
+        ${pkgs.php}/bin/php ${pkg}/share/mediawiki/maintenance/update.php --conf ${mediawikiConfig} --quick
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        Group = group;
+        PrivateTmp = true;
+      };
+    };
+
+    systemd.services.httpd.after = optional (cfg.webserver == "apache" && cfg.database.createLocally && cfg.database.type == "mysql") "mysql.service"
+      ++ optional (cfg.webserver == "apache" && cfg.database.createLocally && cfg.database.type == "postgres") "postgresql.service";
+
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
+    users.groups.${group} = {};
+
+    environment.systemPackages = [ mediawikiScripts ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/meme-bingo-web.nix b/nixpkgs/nixos/modules/services/web-apps/meme-bingo-web.nix
new file mode 100644
index 000000000000..652dc8840252
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/meme-bingo-web.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkEnableOption mkIf mkOption mdDoc types literalExpression;
+
+  cfg = config.services.meme-bingo-web;
+in {
+  options = {
+    services.meme-bingo-web = {
+      enable = mkEnableOption (mdDoc ''
+        a web app for the meme bingo, rendered entirely on the web server and made interactive with forms.
+
+        Note: The application's author suppose to run meme-bingo-web behind a reverse proxy for SSL and HTTP/3
+      '');
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.meme-bingo-web;
+        defaultText = literalExpression "pkgs.meme-bingo-web";
+        description = mdDoc "meme-bingo-web package to use.";
+      };
+
+      baseUrl = mkOption {
+        description = mdDoc ''
+          URL to be used for the HTML <base> element on all HTML routes.
+        '';
+        type = types.str;
+        default = "http://localhost:41678/";
+        example = "https://bingo.example.com/";
+      };
+      port = mkOption {
+        description = mdDoc ''
+          Port to be used for the web server.
+        '';
+        type = types.port;
+        default = 41678;
+        example = 21035;
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.meme-bingo-web = {
+      description = "A web app for playing meme bingos.";
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        MEME_BINGO_BASE = cfg.baseUrl;
+        MEME_BINGO_PORT = toString cfg.port;
+      };
+      path = [ cfg.package ];
+
+      serviceConfig = {
+        User = "meme-bingo-web";
+        Group = "meme-bingo-web";
+
+        DynamicUser = true;
+
+        ExecStart = "${cfg.package}/bin/meme-bingo-web";
+
+        Restart = "always";
+        RestartSec = 1;
+
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "/dev/random" ];
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectSystem = "strict";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" "~@resources" ];
+        UMask = "0077";
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        NoNewPrivileges = true;
+        MemoryDenyWriteExecute = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/microbin.nix b/nixpkgs/nixos/modules/services/web-apps/microbin.nix
new file mode 100644
index 000000000000..233bfac6e699
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/microbin.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.microbin;
+in
+{
+  options.services.microbin = {
+    enable = lib.mkEnableOption (lib.mdDoc "MicroBin is a super tiny, feature rich, configurable paste bin web application");
+
+    package = lib.mkPackageOption pkgs "microbin" { };
+
+    settings = lib.mkOption {
+      type = lib.types.submodule { freeformType = with lib.types; attrsOf (oneOf [ bool int str ]); };
+      default = { };
+      example = {
+        MICROBIN_PORT = 8080;
+        MICROBIN_HIDE_LOGO = false;
+      };
+      description = lib.mdDoc ''
+        Additional configuration for MicroBin, see
+        <https://microbin.eu/docs/installation-and-configuration/configuration/>
+        for supported values.
+
+        For secrets use passwordFile option instead.
+      '';
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/microbin";
+      description = lib.mdDoc "Default data folder for MicroBin.";
+    };
+
+    passwordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/secrets/microbin.env";
+      description = lib.mdDoc ''
+        Path to file containing environment variables.
+        Useful for passing down secrets.
+        Variables that can be considered secrets are:
+         - MICROBIN_BASIC_AUTH_USERNAME
+         - MICROBIN_BASIC_AUTH_PASSWORD
+         - MICROBIN_ADMIN_USERNAME
+         - MICROBIN_ADMIN_PASSWORD
+         - MICROBIN_UPLOADER_PASSWORD
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.microbin.settings = with lib; {
+      MICROBIN_BIND = mkDefault "0.0.0.0";
+      MICROBIN_DISABLE_TELEMETRY = mkDefault true;
+      MICROBIN_LIST_SERVER = mkDefault false;
+      MICROBIN_PORT = mkDefault "8080";
+    };
+
+    systemd.services.microbin = {
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      environment = lib.mapAttrs (_: v: if lib.isBool v then lib.boolToString v else toString v) cfg.settings;
+      serviceConfig = {
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+        DevicePolicy = "closed";
+        DynamicUser = true;
+        EnvironmentFile = lib.optional (cfg.passwordFile != null) cfg.passwordFile;
+        ExecStart = "${cfg.package}/bin/microbin";
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ReadWritePaths = cfg.dataDir;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        StateDirectory = "microbin";
+        SystemCallArchitectures = [ "native" ];
+        SystemCallFilter = [ "@system-service" ];
+        WorkingDirectory = cfg.dataDir;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ surfaceflinger ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/miniflux.nix b/nixpkgs/nixos/modules/services/web-apps/miniflux.nix
new file mode 100644
index 000000000000..5c8c93c13c43
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/miniflux.nix
@@ -0,0 +1,141 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.miniflux;
+
+  defaultAddress = "localhost:8080";
+
+  pgbin = "${config.services.postgresql.package}/bin";
+  preStart = pkgs.writeScript "miniflux-pre-start" ''
+    #!${pkgs.runtimeShell}
+    ${pgbin}/psql "miniflux" -c "CREATE EXTENSION IF NOT EXISTS hstore"
+  '';
+in
+
+{
+  options = {
+    services.miniflux = {
+      enable = mkEnableOption (lib.mdDoc "miniflux and creates a local postgres database for it");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.miniflux;
+        defaultText = literalExpression "pkgs.miniflux";
+        description = lib.mdDoc "Miniflux package to use.";
+      };
+
+      config = mkOption {
+        type = types.attrsOf types.str;
+        example = literalExpression ''
+          {
+            CLEANUP_FREQUENCY = "48";
+            LISTEN_ADDR = "localhost:8080";
+          }
+        '';
+        description = lib.mdDoc ''
+          Configuration for Miniflux, refer to
+          <https://miniflux.app/docs/configuration.html>
+          for documentation on the supported values.
+
+          Correct configuration for the database is already provided.
+          By default, listens on ${defaultAddress}.
+        '';
+      };
+
+      adminCredentialsFile = mkOption  {
+        type = types.path;
+        description = lib.mdDoc ''
+          File containing the ADMIN_USERNAME and
+          ADMIN_PASSWORD (length >= 6) in the format of
+          an EnvironmentFile=, as described by systemd.exec(5).
+        '';
+        example = "/etc/nixos/miniflux-admin-credentials";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.miniflux.config =  {
+      LISTEN_ADDR = mkDefault defaultAddress;
+      DATABASE_URL = "user=miniflux host=/run/postgresql dbname=miniflux";
+      RUN_MIGRATIONS = "1";
+      CREATE_ADMIN = "1";
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureUsers = [ {
+        name = "miniflux";
+        ensureDBOwnership = true;
+      } ];
+      ensureDatabases = [ "miniflux" ];
+    };
+
+    systemd.services.miniflux-dbsetup = {
+      description = "Miniflux database setup";
+      requires = [ "postgresql.service" ];
+      after = [ "network.target" "postgresql.service" ];
+      serviceConfig = {
+        Type = "oneshot";
+        User = config.services.postgresql.superUser;
+        ExecStart = preStart;
+      };
+    };
+
+    systemd.services.miniflux = {
+      description = "Miniflux service";
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "miniflux-dbsetup.service" ];
+      after = [ "network.target" "postgresql.service" "miniflux-dbsetup.service" ];
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/miniflux";
+        User = "miniflux";
+        DynamicUser = true;
+        RuntimeDirectory = "miniflux";
+        RuntimeDirectoryMode = "0700";
+        EnvironmentFile = cfg.adminCredentialsFile;
+        # Hardening
+        CapabilityBoundingSet = [ "" ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+        UMask = "0077";
+      };
+
+      environment = cfg.config;
+    };
+    environment.systemPackages = [ cfg.package ];
+
+    security.apparmor.policies."bin.miniflux".profile = ''
+      include <tunables/global>
+      ${cfg.package}/bin/miniflux {
+        include <abstractions/base>
+        include <abstractions/nameservice>
+        include <abstractions/ssl_certs>
+        include "${pkgs.apparmorRulesFromClosure { name = "miniflux"; } cfg.package}"
+        r ${cfg.package}/bin/miniflux,
+        r @{sys}/kernel/mm/transparent_hugepage/hpage_pmd_size,
+      }
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/mobilizon.nix b/nixpkgs/nixos/modules/services/web-apps/mobilizon.nix
new file mode 100644
index 000000000000..bb4319b51a2f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/mobilizon.nix
@@ -0,0 +1,449 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mobilizon;
+
+  user = "mobilizon";
+  group = "mobilizon";
+
+  settingsFormat = pkgs.formats.elixirConf { elixir = cfg.package.elixirPackage; };
+
+  configFile = settingsFormat.generate "mobilizon-config.exs" cfg.settings;
+
+  # Make a package containing launchers with the correct envirenment, instead of
+  # setting it with systemd services, so that the user can also use them without
+  # troubles
+  launchers = pkgs.stdenv.mkDerivation rec {
+    pname = "${cfg.package.pname}-launchers";
+    inherit (cfg.package) version;
+
+    src = cfg.package;
+
+    nativeBuildInputs = with pkgs; [ makeWrapper ];
+
+    dontBuild = true;
+
+    installPhase = ''
+      mkdir -p $out/bin
+
+      makeWrapper \
+        $src/bin/mobilizon \
+        $out/bin/mobilizon \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+
+      makeWrapper \
+        $src/bin/mobilizon_ctl \
+        $out/bin/mobilizon_ctl \
+        --run '. ${secretEnvFile}' \
+        --set MOBILIZON_CONFIG_PATH "${configFile}" \
+        --set-default RELEASE_TMP "/tmp"
+    '';
+  };
+
+  repoSettings = cfg.settings.":mobilizon"."Mobilizon.Storage.Repo";
+  instanceSettings = cfg.settings.":mobilizon".":instance";
+
+  isLocalPostgres = repoSettings.socket_dir != null;
+
+  dbUser = if repoSettings.username != null then repoSettings.username else "mobilizon";
+
+  postgresql = config.services.postgresql.package;
+  postgresqlSocketDir = "/var/run/postgresql";
+
+  secretEnvFile = "/var/lib/mobilizon/secret-env.sh";
+in
+{
+  options = {
+    services.mobilizon = {
+      enable = mkEnableOption
+        (lib.mdDoc "Mobilizon federated organization and mobilization platform");
+
+      nginx.enable = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether an Nginx virtual host should be
+          set up to serve Mobilizon.
+        '';
+      };
+
+      package = mkPackageOptionMD pkgs "mobilizon" { };
+
+      settings = mkOption {
+        type =
+          let
+            elixirTypes = settingsFormat.lib.types;
+          in
+          types.submodule {
+            freeformType = settingsFormat.type;
+
+            options = {
+              ":mobilizon" = {
+
+                "Mobilizon.Web.Endpoint" = {
+                  url.host = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = lib.literalMD ''
+                      ''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = lib.mdDoc ''
+                      Your instance's hostname for generating URLs throughout the app
+                    '';
+                  };
+
+                  http = {
+                    port = mkOption {
+                      type = elixirTypes.port;
+                      default = 4000;
+                      description = lib.mdDoc ''
+                        The port to run the server
+                      '';
+                    };
+                    ip = mkOption {
+                      type = elixirTypes.tuple;
+                      default = settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ];
+                      description = lib.mdDoc ''
+                        The IP address to listen on. Defaults to [::1] notated as a byte tuple.
+                      '';
+                    };
+                  };
+
+                  has_reverse_proxy = mkOption {
+                    type = elixirTypes.bool;
+                    default = true;
+                    description = lib.mdDoc ''
+                      Whether you use a reverse proxy
+                    '';
+                  };
+                };
+
+                ":instance" = {
+                  name = mkOption {
+                    type = elixirTypes.str;
+                    description = lib.mdDoc ''
+                      The fallback instance name if not configured into the admin UI
+                    '';
+                  };
+
+                  hostname = mkOption {
+                    type = elixirTypes.str;
+                    description = lib.mdDoc ''
+                      Your instance's hostname
+                    '';
+                  };
+
+                  email_from = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      noreply@''${settings.":mobilizon".":instance".hostname}
+                    '';
+                    description = lib.mdDoc ''
+                      The email for the From: header in emails
+                    '';
+                  };
+
+                  email_reply_to = mkOption {
+                    type = elixirTypes.str;
+                    defaultText = literalExpression ''
+                      ''${email_from}
+                    '';
+                    description = lib.mdDoc ''
+                      The email for the Reply-To: header in emails
+                    '';
+                  };
+                };
+
+                "Mobilizon.Storage.Repo" = {
+                  socket_dir = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = postgresqlSocketDir;
+                    description = lib.mdDoc ''
+                      Path to the postgres socket directory.
+
+                      Set this to null if you want to connect to a remote database.
+
+                      If non-null, the local PostgreSQL server will be configured with
+                      the configured database, permissions, and required extensions.
+
+                      If connecting to a remote database, please follow the
+                      instructions on how to setup your database:
+                      <https://docs.joinmobilizon.org/administration/install/release/#database-setup>
+                    '';
+                  };
+
+                  username = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = user;
+                    description = lib.mdDoc ''
+                      User used to connect to the database
+                    '';
+                  };
+
+                  database = mkOption {
+                    type = types.nullOr elixirTypes.str;
+                    default = "mobilizon_prod";
+                    description = lib.mdDoc ''
+                      Name of the database
+                    '';
+                  };
+                };
+              };
+            };
+          };
+        default = { };
+
+        description = lib.mdDoc ''
+          Mobilizon Elixir documentation, see
+          <https://docs.joinmobilizon.org/administration/configure/reference/>
+          for supported values.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.nginx.enable -> (cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.ip == settingsFormat.lib.mkTuple [ 0 0 0 0 0 0 0 1 ]);
+        message = "Setting the IP mobilizon listens on is only possible when the nginx config is not used, as it is hardcoded there.";
+      }
+    ];
+
+    services.mobilizon.settings = {
+      ":mobilizon" = {
+        "Mobilizon.Web.Endpoint" = {
+          server = true;
+          url.host = mkDefault instanceSettings.hostname;
+          secret_key_base =
+            settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_INSTANCE_SECRET"; };
+        };
+
+        "Mobilizon.Web.Auth.Guardian".secret_key =
+          settingsFormat.lib.mkGetEnv { envVariable = "MOBILIZON_AUTH_SECRET"; };
+
+        ":instance" = {
+          registrations_open = mkDefault false;
+          demo = mkDefault false;
+          email_from = mkDefault "noreply@${instanceSettings.hostname}";
+          email_reply_to = mkDefault instanceSettings.email_from;
+        };
+
+        "Mobilizon.Storage.Repo" = {
+          # Forced by upstream since it uses PostgreSQL-specific extensions
+          adapter = settingsFormat.lib.mkAtom "Ecto.Adapters.Postgres";
+          pool_size = mkDefault 10;
+        };
+      };
+
+      ":tzdata".":data_dir" = "/var/lib/mobilizon/tzdata/";
+    };
+
+    # This somewhat follows upstream's systemd service here:
+    # https://framagit.org/framasoft/mobilizon/-/blob/master/support/systemd/mobilizon.service
+    systemd.services.mobilizon = {
+      description = "Mobilizon federated organization and mobilization platform";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [
+        gawk
+        imagemagick
+        libwebp
+        file
+
+        # Optional:
+        gifsicle
+        jpegoptim
+        optipng
+        pngquant
+      ];
+
+      serviceConfig = {
+        ExecStartPre = "${launchers}/bin/mobilizon_ctl migrate";
+        ExecStart = "${launchers}/bin/mobilizon start";
+        ExecStop = "${launchers}/bin/mobilizon stop";
+
+        User = user;
+        Group = group;
+
+        StateDirectory = "mobilizon";
+
+        Restart = "on-failure";
+
+        PrivateTmp = true;
+        ProtectSystem = "full";
+        NoNewPrivileges = true;
+
+        ReadWritePaths = mkIf isLocalPostgres postgresqlSocketDir;
+      };
+    };
+
+    # Create the needed secrets before running Mobilizon, so that they are not
+    # in the nix store
+    #
+    # Since some of these tasks are quite common for Elixir projects (COOKIE for
+    # every BEAM project, Phoenix and Guardian are also quite common), this
+    # service could be abstracted in the future, and used by other Elixir
+    # projects.
+    systemd.services.mobilizon-setup-secrets = {
+      description = "Mobilizon setup secrets";
+      before = [ "mobilizon.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      script =
+        let
+          # Taken from here:
+          # https://framagit.org/framasoft/mobilizon/-/blob/1.0.7/lib/mix/tasks/mobilizon/instance.ex#L132-133
+          genSecret =
+            "IO.puts(:crypto.strong_rand_bytes(64)" +
+            "|> Base.encode64()" +
+            "|> binary_part(0, 64))";
+
+          # Taken from here:
+          # https://github.com/elixir-lang/elixir/blob/v1.11.3/lib/mix/lib/mix/release.ex#L499
+          genCookie = "IO.puts(Base.encode32(:crypto.strong_rand_bytes(32)))";
+
+          evalElixir = str: ''
+            ${cfg.package.elixirPackage}/bin/elixir --eval '${str}'
+          '';
+        in
+        ''
+          set -euxo pipefail
+
+          if [ ! -f "${secretEnvFile}" ]; then
+            install -m 600 /dev/null "${secretEnvFile}"
+            cat > "${secretEnvFile}" <<EOF
+          # This file was automatically generated by mobilizon-setup-secrets.service
+          export MOBILIZON_AUTH_SECRET='$(${evalElixir genSecret})'
+          export MOBILIZON_INSTANCE_SECRET='$(${evalElixir genSecret})'
+          export RELEASE_COOKIE='$(${evalElixir genCookie})'
+          EOF
+          fi
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        Group = group;
+        StateDirectory = "mobilizon";
+      };
+    };
+
+    # Add the required PostgreSQL extensions to the local PostgreSQL server,
+    # if local PostgreSQL is configured.
+    systemd.services.mobilizon-postgresql = mkIf isLocalPostgres {
+      description = "Mobilizon PostgreSQL setup";
+
+      after = [ "postgresql.service" ];
+      before = [ "mobilizon.service" "mobilizon-setup-secrets.service" ];
+      wantedBy = [ "mobilizon.service" ];
+
+      path = [ postgresql ];
+
+      # Taken from here:
+      # https://framagit.org/framasoft/mobilizon/-/blob/1.1.0/priv/templates/setup_db.eex
+      # TODO(to maintainers of mobilizon): the owner database alteration is necessary
+      # as PostgreSQL 15 changed their behaviors w.r.t. to privileges.
+      # See https://github.com/NixOS/nixpkgs/issues/216989 to get rid
+      # of that workaround.
+      script =
+        ''
+          psql "${repoSettings.database}" -c "\
+            CREATE EXTENSION IF NOT EXISTS postgis; \
+            CREATE EXTENSION IF NOT EXISTS pg_trgm; \
+            CREATE EXTENSION IF NOT EXISTS unaccent;"
+          psql -tAc 'ALTER DATABASE "${repoSettings.database}" OWNER TO "${dbUser}";'
+
+        '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = config.services.postgresql.superUser;
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/mobilizon/uploads/exports/csv 700 mobilizon mobilizon - -"
+      "Z /var/lib/mobilizon 700 mobilizon mobilizon - -"
+    ];
+
+    services.postgresql = mkIf isLocalPostgres {
+      enable = true;
+      ensureDatabases = [ repoSettings.database ];
+      ensureUsers = [
+        {
+          name = dbUser;
+          # Given that `dbUser` is potentially arbitrarily custom, we will perform
+          # manual fixups in mobilizon-postgres.
+          # TODO(to maintainers of mobilizon): Feel free to simplify your setup by using `ensureDBOwnership`.
+          ensureDBOwnership = false;
+        }
+      ];
+      extraPlugins = with postgresql.pkgs; [ postgis ];
+    };
+
+    # Nginx config taken from support/nginx/mobilizon-release.conf
+    services.nginx =
+      let
+        inherit (cfg.settings.":mobilizon".":instance") hostname;
+        proxyPass = "http://[::1]:"
+          + toString cfg.settings.":mobilizon"."Mobilizon.Web.Endpoint".http.port;
+      in
+      lib.mkIf cfg.nginx.enable {
+        enable = true;
+        virtualHosts."${hostname}" = {
+          enableACME = lib.mkDefault true;
+          forceSSL = lib.mkDefault true;
+          extraConfig = ''
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection "upgrade";
+            proxy_set_header Host $host;
+            proxy_set_header X-Real-IP $remote_addr;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+          '';
+          locations."/" = {
+            inherit proxyPass;
+          };
+          locations."~ ^/(js|css|img)" = {
+            root = "${cfg.package}/lib/mobilizon-${cfg.package.version}/priv/static";
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+          locations."~ ^/(media|proxy)" = {
+            inherit proxyPass;
+            extraConfig = ''
+              etag off;
+              access_log off;
+              add_header Cache-Control "public, max-age=31536000, immutable";
+            '';
+          };
+        };
+      };
+
+    users.users.${user} = {
+      description = "Mobilizon daemon user";
+      group = group;
+      isSystemUser = true;
+    };
+
+    users.groups.${group} = { };
+
+    # So that we have the `mobilizon` and `mobilizon_ctl` commands.
+    # The `mobilizon remote` command is useful for dropping a shell into the
+    # running Mobilizon instance, and `mobilizon_ctl` is used for common
+    # management tasks (e.g. adding users).
+    environment.systemPackages = [ launchers ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/monica.nix b/nixpkgs/nixos/modules/services/web-apps/monica.nix
new file mode 100644
index 000000000000..2bff42f7ffa4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/monica.nix
@@ -0,0 +1,468 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+with lib; let
+  cfg = config.services.monica;
+  monica = pkgs.monica.override {
+    dataDir = cfg.dataDir;
+  };
+  db = cfg.database;
+  mail = cfg.mail;
+
+  user = cfg.user;
+  group = cfg.group;
+
+  # shell script for local administration
+  artisan = pkgs.writeScriptBin "monica" ''
+    #! ${pkgs.runtimeShell}
+    cd ${monica}
+    sudo() {
+      if [[ "$USER" != ${user} ]]; then
+        exec /run/wrappers/bin/sudo -u ${user} "$@"
+      else
+        exec "$@"
+      fi
+    }
+    sudo ${pkgs.php}/bin/php artisan "$@"
+  '';
+
+  tlsEnabled = cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME;
+in {
+  options.services.monica = {
+    enable = mkEnableOption (lib.mdDoc "monica");
+
+    user = mkOption {
+      default = "monica";
+      description = lib.mdDoc "User monica runs as.";
+      type = types.str;
+    };
+
+    group = mkOption {
+      default = "monica";
+      description = lib.mdDoc "Group monica runs as.";
+      type = types.str;
+    };
+
+    appKeyFile = mkOption {
+      description = lib.mdDoc ''
+        A file containing the Laravel APP_KEY - a 32 character long,
+        base64 encoded key used for encryption where needed. Can be
+        generated with <code>head -c 32 /dev/urandom | base64</code>.
+      '';
+      example = "/run/keys/monica-appkey";
+      type = types.path;
+    };
+
+    hostname = lib.mkOption {
+      type = lib.types.str;
+      default =
+        if config.networking.domain != null
+        then config.networking.fqdn
+        else config.networking.hostName;
+      defaultText = lib.literalExpression "config.networking.fqdn";
+      example = "monica.example.com";
+      description = lib.mdDoc ''
+        The hostname to serve monica on.
+      '';
+    };
+
+    appURL = mkOption {
+      description = lib.mdDoc ''
+        The root URL that you want to host monica on. All URLs in monica will be generated using this value.
+        If you change this in the future you may need to run a command to update stored URLs in the database.
+        Command example: <code>php artisan monica:update-url https://old.example.com https://new.example.com</code>
+      '';
+      default = "http${lib.optionalString tlsEnabled "s"}://${cfg.hostname}";
+      defaultText = ''http''${lib.optionalString tlsEnabled "s"}://''${cfg.hostname}'';
+      example = "https://example.com";
+      type = types.str;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "monica data directory";
+      default = "/var/lib/monica";
+      type = types.path;
+    };
+
+    database = {
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = lib.mdDoc "Database host port.";
+      };
+      name = mkOption {
+        type = types.str;
+        default = "monica";
+        description = lib.mdDoc "Database name.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = user;
+        defaultText = lib.literalExpression "user";
+        description = lib.mdDoc "Database username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/monica-dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          <option>database.user</option>.
+        '';
+      };
+      createLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+    };
+
+    mail = {
+      driver = mkOption {
+        type = types.enum ["smtp" "sendmail"];
+        default = "smtp";
+        description = lib.mdDoc "Mail driver to use.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Mail host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 1025;
+        description = lib.mdDoc "Mail host port.";
+      };
+      fromName = mkOption {
+        type = types.str;
+        default = "monica";
+        description = lib.mdDoc "Mail \"from\" name.";
+      };
+      from = mkOption {
+        type = types.str;
+        default = "mail@monica.com";
+        description = lib.mdDoc "Mail \"from\" email.";
+      };
+      user = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "monica";
+        description = lib.mdDoc "Mail username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/monica-mailpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          <option>mail.user</option>.
+        '';
+      };
+      encryption = mkOption {
+        type = with types; nullOr (enum ["tls"]);
+        default = null;
+        description = lib.mdDoc "SMTP encryption mechanism to use.";
+      };
+    };
+
+    maxUploadSize = mkOption {
+      type = types.str;
+      default = "18M";
+      example = "1G";
+      description = lib.mdDoc "The maximum size for uploads (e.g. images).";
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [str int bool]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the monica PHP pool. See the documentation on <literal>php-fpm.conf</literal>
+        for details on configuration directives.
+      '';
+    };
+
+    nginx = mkOption {
+      type = types.submodule (
+        recursiveUpdate
+        (import ../web-servers/nginx/vhost-options.nix {inherit config lib;}) {}
+      );
+      default = {};
+      example = ''
+        {
+          serverAliases = [
+            "monica.''${config.networking.domain}"
+          ];
+          # To enable encryption and let let's encrypt take care of certificate
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        With this option, you can customize the nginx virtualHost settings.
+      '';
+    };
+
+    config = mkOption {
+      type = with types;
+        attrsOf
+        (nullOr
+          (either
+            (oneOf [
+              bool
+              int
+              port
+              path
+              str
+            ])
+            (submodule {
+              options = {
+                _secret = mkOption {
+                  type = nullOr str;
+                  description = lib.mdDoc ''
+                    The path to a file containing the value the
+                    option should be set to in the final
+                    configuration file.
+                  '';
+                };
+              };
+            })));
+      default = {};
+      example = ''
+        {
+          ALLOWED_IFRAME_HOSTS = "https://example.com";
+          WKHTMLTOPDF = "/home/user/bins/wkhtmltopdf";
+          AUTH_METHOD = "oidc";
+          OIDC_NAME = "MyLogin";
+          OIDC_DISPLAY_NAME_CLAIMS = "name";
+          OIDC_CLIENT_ID = "monica";
+          OIDC_CLIENT_SECRET = {_secret = "/run/keys/oidc_secret"};
+          OIDC_ISSUER = "https://keycloak.example.com/auth/realms/My%20Realm";
+          OIDC_ISSUER_DISCOVER = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        monica configuration options to set in the
+        <filename>.env</filename> file.
+
+        Refer to <link xlink:href="https://github.com/monicahq/monica"/>
+        for details on supported values.
+
+        Settings containing secret data should be set to an attribute
+        set containing the attribute <literal>_secret</literal> - a
+        string pointing to a file containing the value the option
+        should be set to. See the example to get a better picture of
+        this: in the resulting <filename>.env</filename> file, the
+        <literal>OIDC_CLIENT_SECRET</literal> key will be set to the
+        contents of the <filename>/run/keys/oidc_secret</filename>
+        file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = db.createLocally -> db.user == user;
+        message = "services.monica.database.user must be set to ${user} if services.monica.database.createLocally is set true.";
+      }
+      {
+        assertion = db.createLocally -> db.passwordFile == null;
+        message = "services.monica.database.passwordFile cannot be specified if services.monica.database.createLocally is set to true.";
+      }
+    ];
+
+    services.monica.config = {
+      APP_ENV = "production";
+      APP_KEY._secret = cfg.appKeyFile;
+      APP_URL = cfg.appURL;
+      DB_HOST = db.host;
+      DB_PORT = db.port;
+      DB_DATABASE = db.name;
+      DB_USERNAME = db.user;
+      MAIL_DRIVER = mail.driver;
+      MAIL_FROM_NAME = mail.fromName;
+      MAIL_FROM = mail.from;
+      MAIL_HOST = mail.host;
+      MAIL_PORT = mail.port;
+      MAIL_USERNAME = mail.user;
+      MAIL_ENCRYPTION = mail.encryption;
+      DB_PASSWORD._secret = db.passwordFile;
+      MAIL_PASSWORD._secret = mail.passwordFile;
+      APP_SERVICES_CACHE = "/run/monica/cache/services.php";
+      APP_PACKAGES_CACHE = "/run/monica/cache/packages.php";
+      APP_CONFIG_CACHE = "/run/monica/cache/config.php";
+      APP_ROUTES_CACHE = "/run/monica/cache/routes-v7.php";
+      APP_EVENTS_CACHE = "/run/monica/cache/events.php";
+      SESSION_SECURE_COOKIE = tlsEnabled;
+    };
+
+    environment.systemPackages = [artisan];
+
+    services.mysql = mkIf db.createLocally {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [db.name];
+      ensureUsers = [
+        {
+          name = db.user;
+          ensurePermissions = {"${db.name}.*" = "ALL PRIVILEGES";};
+        }
+      ];
+    };
+
+    services.phpfpm.pools.monica = {
+      inherit user group;
+      phpOptions = ''
+        log_errors = on
+        post_max_size = ${cfg.maxUploadSize}
+        upload_max_filesize = ${cfg.maxUploadSize}
+      '';
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = user;
+        "listen.group" = group;
+      } // cfg.poolConfig;
+    };
+
+    services.nginx = {
+      enable = mkDefault true;
+      recommendedTlsSettings = true;
+      recommendedOptimisation = true;
+      recommendedGzipSettings = true;
+      recommendedBrotliSettings = true;
+      recommendedProxySettings = true;
+      virtualHosts.${cfg.hostname} = mkMerge [
+        cfg.nginx
+        {
+          root = mkForce "${monica}/public";
+          locations = {
+            "/" = {
+              index = "index.php";
+              tryFiles = "$uri $uri/ /index.php?$query_string";
+            };
+            "~ \.php$".extraConfig = ''
+              fastcgi_pass unix:${config.services.phpfpm.pools."monica".socket};
+            '';
+            "~ \.(js|css|gif|png|ico|jpg|jpeg)$" = {
+              extraConfig = "expires 365d;";
+            };
+          };
+        }
+      ];
+    };
+
+    systemd.services.monica-setup = {
+      description = "Preparation tasks for monica";
+      before = ["phpfpm-monica.service"];
+      after = optional db.createLocally "mysql.service";
+      wantedBy = ["multi-user.target"];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = user;
+        UMask = 077;
+        WorkingDirectory = "${monica}";
+        RuntimeDirectory = "monica/cache";
+        RuntimeDirectoryMode = 0700;
+      };
+      path = [pkgs.replace-secret];
+      script = let
+        isSecret = v: isAttrs v && v ? _secret && isString v._secret;
+        monicaEnvVars = lib.generators.toKeyValue {
+          mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
+            mkValueString = v:
+              with builtins;
+                if isInt v
+                then toString v
+                else if isString v
+                then v
+                else if true == v
+                then "true"
+                else if false == v
+                then "false"
+                else if isSecret v
+                then hashString "sha256" v._secret
+                else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+          };
+        };
+        secretPaths = lib.mapAttrsToList (_: v: v._secret) (lib.filterAttrs (_: isSecret) cfg.config);
+        mkSecretReplacement = file: ''
+          replace-secret ${escapeShellArgs [(builtins.hashString "sha256" file) file "${cfg.dataDir}/.env"]}
+        '';
+        secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+        filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [{} null])) cfg.config;
+        monicaEnv = pkgs.writeText "monica.env" (monicaEnvVars filteredConfig);
+      in ''
+        # error handling
+        set -euo pipefail
+
+        # create .env file
+        install -T -m 0600 -o ${user} ${monicaEnv} "${cfg.dataDir}/.env"
+        ${secretReplacements}
+        if ! grep 'APP_KEY=base64:' "${cfg.dataDir}/.env" >/dev/null; then
+          sed -i 's/APP_KEY=/APP_KEY=base64:/' "${cfg.dataDir}/.env"
+        fi
+
+        # migrate & seed db
+        ${pkgs.php}/bin/php artisan key:generate --force
+        ${pkgs.php}/bin/php artisan setup:production -v --force
+      '';
+    };
+
+    systemd.services.monica-scheduler = {
+      description = "Background tasks for monica";
+      startAt = "minutely";
+      after = ["monica-setup.service"];
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        WorkingDirectory = "${monica}";
+        ExecStart = "${pkgs.php}/bin/php ${monica}/artisan schedule:run -v";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.dataDir}                            0710 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public                     0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads             0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage                    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/app                0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/fonts              0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework          0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/cache    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/sessions 0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/views    0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/logs               0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/uploads            0700 ${user} ${group} - -"
+    ];
+
+    users = {
+      users = mkIf (user == "monica") {
+        monica = {
+          inherit group;
+          isSystemUser = true;
+        };
+        "${config.services.nginx.user}".extraGroups = [group];
+      };
+      groups = mkIf (group == "monica") {
+        monica = {};
+      };
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/web-apps/moodle.nix b/nixpkgs/nixos/modules/services/web-apps/moodle.nix
new file mode 100644
index 000000000000..04ae6bd7f175
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/moodle.nix
@@ -0,0 +1,318 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption types;
+  inherit (lib) concatStringsSep literalExpression mapAttrsToList optional optionalString;
+
+  cfg = config.services.moodle;
+  fpm = config.services.phpfpm.pools.moodle;
+
+  user = "moodle";
+  group = config.services.httpd.group;
+  stateDir = "/var/lib/moodle";
+
+  moodleConfig = pkgs.writeText "config.php" ''
+  <?php  // Moodle configuration file
+
+  unset($CFG);
+  global $CFG;
+  $CFG = new stdClass();
+
+  $CFG->dbtype    = '${ { mysql = "mariadb"; pgsql = "pgsql"; }.${cfg.database.type} }';
+  $CFG->dblibrary = 'native';
+  $CFG->dbhost    = '${cfg.database.host}';
+  $CFG->dbname    = '${cfg.database.name}';
+  $CFG->dbuser    = '${cfg.database.user}';
+  ${optionalString (cfg.database.passwordFile != null) "$CFG->dbpass = file_get_contents('${cfg.database.passwordFile}');"}
+  $CFG->prefix    = 'mdl_';
+  $CFG->dboptions = array (
+    'dbpersist' => 0,
+    'dbport' => '${toString cfg.database.port}',
+    ${optionalString (cfg.database.socket != null) "'dbsocket' => '${cfg.database.socket}',"}
+    'dbcollation' => 'utf8mb4_unicode_ci',
+  );
+
+  $CFG->wwwroot   = '${if cfg.virtualHost.addSSL || cfg.virtualHost.forceSSL || cfg.virtualHost.onlySSL then "https" else "http"}://${cfg.virtualHost.hostName}';
+  $CFG->dataroot  = '${stateDir}';
+  $CFG->admin     = 'admin';
+
+  $CFG->directorypermissions = 02777;
+  $CFG->disableupdateautodeploy = true;
+
+  $CFG->pathtogs = '${pkgs.ghostscript}/bin/gs';
+  $CFG->pathtophp = '${phpExt}/bin/php';
+  $CFG->pathtodu = '${pkgs.coreutils}/bin/du';
+  $CFG->aspellpath = '${pkgs.aspell}/bin/aspell';
+  $CFG->pathtodot = '${pkgs.graphviz}/bin/dot';
+
+  ${cfg.extraConfig}
+
+  require_once('${cfg.package}/share/moodle/lib/setup.php');
+
+  // There is no php closing tag in this file,
+  // it is intentional because it prevents trailing whitespace problems!
+  '';
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+  phpExt = pkgs.php81.buildEnv {
+    extensions = { all, ... }: with all; [ iconv mbstring curl openssl tokenizer soap ctype zip gd simplexml dom intl sqlite3 pgsql pdo_sqlite pdo_pgsql pdo_odbc pdo_mysql pdo mysqli session zlib xmlreader fileinfo filter opcache exif sodium ];
+    extraConfig = "max_input_vars = 5000";
+  };
+in
+{
+  # interface
+  options.services.moodle = {
+    enable = mkEnableOption (lib.mdDoc "Moodle web application");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.moodle;
+      defaultText = literalExpression "pkgs.moodle";
+      description = lib.mdDoc "The Moodle package to use.";
+    };
+
+    initialPassword = mkOption {
+      type = types.str;
+      example = "correcthorsebatterystaple";
+      description = lib.mdDoc ''
+        Specifies the initial password for the admin, i.e. the password assigned if the user does not already exist.
+        The password specified here is world-readable in the Nix store, so it should be changed promptly.
+      '';
+    };
+
+    database = {
+      type = mkOption {
+        type = types.enum [ "mysql" "pgsql" ];
+        default = "mysql";
+        description = lib.mdDoc "Database engine to use.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        description = lib.mdDoc "Database host port.";
+        default = {
+          mysql = 3306;
+          pgsql = 5432;
+        }.${cfg.database.type};
+        defaultText = literalExpression "3306";
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "moodle";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "moodle";
+        description = lib.mdDoc "Database user.";
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/run/keys/moodle-dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`database.user`.
+        '';
+      };
+
+      socket = mkOption {
+        type = types.nullOr types.path;
+        default =
+          if mysqlLocal then "/run/mysqld/mysqld.sock"
+          else if pgsqlLocal then "/run/postgresql"
+          else null;
+        defaultText = literalExpression "/run/mysqld/mysqld.sock";
+        description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+      };
+
+      createLocally = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+    };
+
+    virtualHost = mkOption {
+      type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+      example = literalExpression ''
+        {
+          hostName = "moodle.example.org";
+          adminAddr = "webmaster@example.org";
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        Apache configuration can be done by adapting {option}`services.httpd.virtualHosts`.
+        See [](#opt-services.httpd.virtualHosts) for further information.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the Moodle PHP pool. See the documentation on `php-fpm.conf`
+        for details on configuration directives.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Any additional text to be appended to the config.php
+        configuration file. This is a PHP script. For configuration
+        details, see <https://docs.moodle.org/37/en/Configuration_file>.
+      '';
+      example = ''
+        $CFG->disableupdatenotifications = true;
+      '';
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.database.createLocally -> cfg.database.user == user && cfg.database.user == cfg.database.name;
+        message = "services.moodle.database.user must be set to ${user} if services.moodle.database.createLocally is set true";
+      }
+      { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+        message = "a password cannot be specified if services.moodle.database.createLocally is set to true";
+      }
+    ];
+
+    services.mysql = mkIf mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensurePermissions = {
+            "${cfg.database.name}.*" = "SELECT, INSERT, UPDATE, DELETE, CREATE, CREATE TEMPORARY TABLES, DROP, INDEX, ALTER";
+          };
+        }
+      ];
+    };
+
+    services.postgresql = mkIf pgsqlLocal {
+      enable = true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    services.phpfpm.pools.moodle = {
+      inherit user group;
+      phpPackage = phpExt;
+      phpEnv.MOODLE_CONFIG = "${moodleConfig}";
+      phpOptions = ''
+        zend_extension = opcache.so
+        opcache.enable = 1
+        max_input_vars = 5000
+      '';
+      settings = {
+        "listen.owner" = config.services.httpd.user;
+        "listen.group" = config.services.httpd.group;
+      } // cfg.poolConfig;
+    };
+
+    services.httpd = {
+      enable = true;
+      adminAddr = mkDefault cfg.virtualHost.adminAddr;
+      extraModules = [ "proxy_fcgi" ];
+      virtualHosts.${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost {
+        documentRoot = mkForce "${cfg.package}/share/moodle";
+        extraConfig = ''
+          <Directory "${cfg.package}/share/moodle">
+            <FilesMatch "\.php$">
+              <If "-f %{REQUEST_FILENAME}">
+                SetHandler "proxy:unix:${fpm.socket}|fcgi://localhost/"
+              </If>
+            </FilesMatch>
+            Options -Indexes
+            DirectoryIndex index.php
+          </Directory>
+        '';
+      } ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${stateDir}' 0750 ${user} ${group} - -"
+    ];
+
+    systemd.services.moodle-init = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "phpfpm-moodle.service" ];
+      after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+      environment.MOODLE_CONFIG = moodleConfig;
+      script = ''
+        ${phpExt}/bin/php ${cfg.package}/share/moodle/admin/cli/check_database_schema.php && rc=$? || rc=$?
+
+        [ "$rc" == 1 ] && ${phpExt}/bin/php ${cfg.package}/share/moodle/admin/cli/upgrade.php \
+          --non-interactive \
+          --allow-unstable
+
+        [ "$rc" == 2 ] && ${phpExt}/bin/php ${cfg.package}/share/moodle/admin/cli/install_database.php \
+          --agree-license \
+          --adminpass=${cfg.initialPassword}
+
+        true
+      '';
+      serviceConfig = {
+        User = user;
+        Group = group;
+        Type = "oneshot";
+      };
+    };
+
+    systemd.services.moodle-cron = {
+      description = "Moodle cron service";
+      after = [ "moodle-init.service" ];
+      environment.MOODLE_CONFIG = moodleConfig;
+      serviceConfig = {
+        User = user;
+        Group = group;
+        ExecStart = "${phpExt}/bin/php ${cfg.package}/share/moodle/admin/cli/cron.php";
+      };
+    };
+
+    systemd.timers.moodle-cron = {
+      description = "Moodle cron timer";
+      wantedBy = [ "timers.target" ];
+      timerConfig = {
+        OnCalendar = "minutely";
+      };
+    };
+
+    systemd.services.httpd.after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+
+    users.users.${user} = {
+      group = group;
+      isSystemUser = true;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/netbox.nix b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
new file mode 100644
index 000000000000..3b9434e3d345
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/netbox.nix
@@ -0,0 +1,389 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.netbox;
+  pythonFmt = pkgs.formats.pythonVars {};
+  staticDir = cfg.dataDir + "/static";
+
+  settingsFile = pythonFmt.generate "netbox-settings.py" cfg.settings;
+  extraConfigFile = pkgs.writeTextFile {
+    name = "netbox-extraConfig.py";
+    text = cfg.extraConfig;
+  };
+  configFile = pkgs.concatText "configuration.py" [ settingsFile extraConfigFile ];
+
+  pkg = (cfg.package.overrideAttrs (old: {
+    installPhase = old.installPhase + ''
+      ln -s ${configFile} $out/opt/netbox/netbox/netbox/configuration.py
+    '' + lib.optionalString cfg.enableLdap ''
+      ln -s ${cfg.ldapConfigPath} $out/opt/netbox/netbox/netbox/ldap_config.py
+    '';
+  })).override {
+    inherit (cfg) plugins;
+  };
+  netboxManageScript = with pkgs; (writeScriptBin "netbox-manage" ''
+    #!${stdenv.shell}
+    export PYTHONPATH=${pkg.pythonPath}
+    sudo -u netbox ${pkg}/bin/netbox "$@"
+  '');
+
+in {
+  options.services.netbox = {
+    enable = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable Netbox.
+
+        This module requires a reverse proxy that serves `/static` separately.
+        See this [example](https://github.com/netbox-community/netbox/blob/develop/contrib/nginx.conf/) on how to configure this.
+      '';
+    };
+
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Configuration options to set in `configuration.py`.
+        See the [documentation](https://docs.netbox.dev/en/stable/configuration/) for more possible options.
+      '';
+
+      default = { };
+
+      type = lib.types.submodule {
+        freeformType = pythonFmt.type;
+
+        options = {
+          ALLOWED_HOSTS = lib.mkOption {
+            type = with lib.types; listOf str;
+            default = ["*"];
+            description = lib.mdDoc ''
+              A list of valid fully-qualified domain names (FQDNs) and/or IP
+              addresses that can be used to reach the NetBox service.
+            '';
+          };
+        };
+      };
+    };
+
+    listenAddress = lib.mkOption {
+      type = lib.types.str;
+      default = "[::1]";
+      description = lib.mdDoc ''
+        Address the server will listen on.
+      '';
+    };
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default =
+        if lib.versionAtLeast config.system.stateVersion "23.11"
+        then pkgs.netbox_3_6
+        else if lib.versionAtLeast config.system.stateVersion "23.05"
+        then pkgs.netbox_3_5
+        else pkgs.netbox_3_3;
+      defaultText = lib.literalExpression ''
+        if lib.versionAtLeast config.system.stateVersion "23.11"
+        then pkgs.netbox_3_6
+        else if lib.versionAtLeast config.system.stateVersion "23.05"
+        then pkgs.netbox_3_5
+        else pkgs.netbox_3_3;
+      '';
+      description = lib.mdDoc ''
+        NetBox package to use.
+      '';
+    };
+
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 8001;
+      description = lib.mdDoc ''
+        Port the server will listen on.
+      '';
+    };
+
+    plugins = lib.mkOption {
+      type = with lib.types; functionTo (listOf package);
+      default = _: [];
+      defaultText = lib.literalExpression ''
+        python3Packages: with python3Packages; [];
+      '';
+      description = lib.mdDoc ''
+        List of plugin packages to install.
+      '';
+    };
+
+    dataDir = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/netbox";
+      description = lib.mdDoc ''
+        Storage path of netbox.
+      '';
+    };
+
+    secretKeyFile = lib.mkOption {
+      type = lib.types.path;
+      description = lib.mdDoc ''
+        Path to a file containing the secret key.
+      '';
+    };
+
+    extraConfig = lib.mkOption {
+      type = lib.types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Additional lines of configuration appended to the `configuration.py`.
+        See the [documentation](https://docs.netbox.dev/en/stable/configuration/) for more possible options.
+      '';
+    };
+
+    enableLdap = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable LDAP-Authentication for Netbox.
+
+        This requires a configuration file being pass through `ldapConfigPath`.
+      '';
+    };
+
+    ldapConfigPath = lib.mkOption {
+      type = lib.types.path;
+      default = "";
+      description = lib.mdDoc ''
+        Path to the Configuration-File for LDAP-Authentication, will be loaded as `ldap_config.py`.
+        See the [documentation](https://netbox.readthedocs.io/en/stable/installation/6-ldap/#configuration) for possible options.
+      '';
+      example = ''
+        import ldap
+        from django_auth_ldap.config import LDAPSearch, PosixGroupType
+
+        AUTH_LDAP_SERVER_URI = "ldaps://ldap.example.com/"
+
+        AUTH_LDAP_USER_SEARCH = LDAPSearch(
+            "ou=accounts,ou=posix,dc=example,dc=com",
+            ldap.SCOPE_SUBTREE,
+            "(uid=%(user)s)",
+        )
+
+        AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
+            "ou=groups,ou=posix,dc=example,dc=com",
+            ldap.SCOPE_SUBTREE,
+            "(objectClass=posixGroup)",
+        )
+        AUTH_LDAP_GROUP_TYPE = PosixGroupType()
+
+        # Mirror LDAP group assignments.
+        AUTH_LDAP_MIRROR_GROUPS = True
+
+        # For more granular permissions, we can map LDAP groups to Django groups.
+        AUTH_LDAP_FIND_GROUP_PERMS = True
+      '';
+    };
+    keycloakClientSecret = lib.mkOption {
+      type = with lib.types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        File that contains the keycloak client secret.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.netbox = {
+      plugins = lib.mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
+      settings = {
+        STATIC_ROOT = staticDir;
+        MEDIA_ROOT = "${cfg.dataDir}/media";
+        REPORTS_ROOT = "${cfg.dataDir}/reports";
+        SCRIPTS_ROOT = "${cfg.dataDir}/scripts";
+
+        GIT_PATH = "${pkgs.gitMinimal}/bin/git";
+
+        DATABASE = {
+          NAME = "netbox";
+          USER = "netbox";
+          HOST = "/run/postgresql";
+        };
+
+        # Redis database settings. Redis is used for caching and for queuing
+        # background tasks such as webhook events. A separate configuration
+        # exists for each. Full connection details are required in both
+        # sections, and it is strongly recommended to use two separate database
+        # IDs.
+        REDIS = {
+            tasks = {
+                URL = "unix://${config.services.redis.servers.netbox.unixSocket}?db=0";
+                SSL = false;
+            };
+            caching =  {
+                URL = "unix://${config.services.redis.servers.netbox.unixSocket}?db=1";
+                SSL = false;
+            };
+        };
+
+        REMOTE_AUTH_BACKEND = lib.mkIf cfg.enableLdap "netbox.authentication.LDAPBackend";
+
+        LOGGING = lib.mkDefault {
+          version = 1;
+
+          formatters.precise.format = "[%(levelname)s@%(name)s] %(message)s";
+
+          handlers.console = {
+            class = "logging.StreamHandler";
+            formatter = "precise";
+          };
+
+          # log to console/systemd instead of file
+          root = {
+            level = "INFO";
+            handlers = [ "console" ];
+          };
+        };
+      };
+
+      extraConfig = ''
+        with open("${cfg.secretKeyFile}", "r") as file:
+            SECRET_KEY = file.readline()
+      '' + (lib.optionalString (cfg.keycloakClientSecret != null) ''
+        with open("${cfg.keycloakClientSecret}", "r") as file:
+            SOCIAL_AUTH_KEYCLOAK_SECRET = file.readline()
+      '');
+    };
+
+    services.redis.servers.netbox.enable = true;
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "netbox" ];
+      ensureUsers = [
+        {
+          name = "netbox";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    environment.systemPackages = [ netboxManageScript ];
+
+    systemd.targets.netbox = {
+      description = "Target for all NetBox services";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "redis-netbox.service" ];
+    };
+
+    systemd.services = let
+      defaultServiceConfig = {
+        WorkingDirectory = "${cfg.dataDir}";
+        User = "netbox";
+        Group = "netbox";
+        StateDirectory = "netbox";
+        StateDirectoryMode = "0750";
+        Restart = "on-failure";
+        RestartSec = 30;
+      };
+    in {
+      netbox = {
+        description = "NetBox WSGI Service";
+        documentation = [ "https://docs.netbox.dev/" ];
+
+        wantedBy = [ "netbox.target" ];
+
+        after = [ "network-online.target" ];
+        wants = [ "network-online.target" ];
+
+        environment.PYTHONPATH = pkg.pythonPath;
+
+        preStart = ''
+          # On the first run, or on upgrade / downgrade, run migrations and related.
+          # This mostly correspond to upstream NetBox's 'upgrade.sh' script.
+          versionFile="${cfg.dataDir}/version"
+
+          if [[ -e "$versionFile" && "$(cat "$versionFile")" == "${cfg.package.version}" ]]; then
+            exit 0
+          fi
+
+          ${pkg}/bin/netbox migrate
+          ${pkg}/bin/netbox trace_paths --no-input
+          ${pkg}/bin/netbox collectstatic --no-input
+          ${pkg}/bin/netbox remove_stale_contenttypes --no-input
+          # TODO: remove the condition when we remove netbox_3_3
+          ${lib.optionalString
+            (lib.versionAtLeast cfg.package.version "3.5.0")
+            "${pkg}/bin/netbox reindex --lazy"}
+          ${pkg}/bin/netbox clearsessions
+          ${pkg}/bin/netbox clearcache
+
+          echo "${cfg.package.version}" > "$versionFile"
+        '';
+
+        serviceConfig = defaultServiceConfig // {
+          ExecStart = ''
+            ${pkgs.python3Packages.gunicorn}/bin/gunicorn netbox.wsgi \
+              --bind ${cfg.listenAddress}:${toString cfg.port} \
+              --pythonpath ${pkg}/opt/netbox/netbox
+          '';
+          PrivateTmp = true;
+        };
+      };
+
+      netbox-rq = {
+        description = "NetBox Request Queue Worker";
+        documentation = [ "https://docs.netbox.dev/" ];
+
+        wantedBy = [ "netbox.target" ];
+        after = [ "netbox.service" ];
+
+        environment.PYTHONPATH = pkg.pythonPath;
+
+        serviceConfig = defaultServiceConfig // {
+          ExecStart = ''
+            ${pkg}/bin/netbox rqworker high default low
+          '';
+          PrivateTmp = true;
+        };
+      };
+
+      netbox-housekeeping = {
+        description = "NetBox housekeeping job";
+        documentation = [ "https://docs.netbox.dev/" ];
+
+        wantedBy = [ "multi-user.target" ];
+
+        after = [ "network-online.target" "netbox.service" ];
+        wants = [ "network-online.target" ];
+
+        environment.PYTHONPATH = pkg.pythonPath;
+
+        serviceConfig = defaultServiceConfig // {
+          Type = "oneshot";
+          ExecStart = ''
+            ${pkg}/bin/netbox housekeeping
+          '';
+        };
+      };
+    };
+
+    systemd.timers.netbox-housekeeping = {
+      description = "Run NetBox housekeeping job";
+      documentation = [ "https://docs.netbox.dev/" ];
+
+      wantedBy = [ "multi-user.target" ];
+
+      after = [ "network-online.target" "netbox.service" ];
+      wants = [ "network-online.target" ];
+
+      timerConfig = {
+        OnCalendar = "daily";
+        AccuracySec = "1h";
+        Persistent = true;
+      };
+    };
+
+    users.users.netbox = {
+      home = "${cfg.dataDir}";
+      isSystemUser = true;
+      group = "netbox";
+    };
+    users.groups.netbox = {};
+    users.groups."${config.services.redis.servers.netbox.user}".members = [ "netbox" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix b/nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix
new file mode 100644
index 000000000000..759daa0c50dc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud-notify_push.nix
@@ -0,0 +1,123 @@
+{ config, options, lib, pkgs, ... }:
+
+let
+  cfg = config.services.nextcloud.notify_push;
+  cfgN = config.services.nextcloud;
+in
+{
+  options.services.nextcloud.notify_push = {
+    enable = lib.mkEnableOption (lib.mdDoc "Notify push");
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.nextcloud-notify_push;
+      defaultText = lib.literalMD "pkgs.nextcloud-notify_push";
+      description = lib.mdDoc "Which package to use for notify_push";
+    };
+
+    socketPath = lib.mkOption {
+      type = lib.types.str;
+      default = "/run/nextcloud-notify_push/sock";
+      description = lib.mdDoc "Socket path to use for notify_push";
+    };
+
+    logLevel = lib.mkOption {
+      type = lib.types.enum [ "error" "warn" "info" "debug" "trace" ];
+      default = "error";
+      description = lib.mdDoc "Log level";
+    };
+
+    bendDomainToLocalhost = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to add an entry to `/etc/hosts` for the configured nextcloud domain to point to `localhost` and add `localhost `to nextcloud's `trusted_proxies` config option.
+
+        This is useful when nextcloud's domain is not a static IP address and when the reverse proxy cannot be bypassed because the backend connection is done via unix socket.
+      '';
+    };
+  } // (
+    lib.genAttrs [
+      "dbtype"
+      "dbname"
+      "dbuser"
+      "dbpassFile"
+      "dbhost"
+      "dbport"
+      "dbtableprefix"
+    ] (
+      opt: options.services.nextcloud.config.${opt} // {
+        default = config.services.nextcloud.config.${opt};
+        defaultText = "config.services.nextcloud.config.${opt}";
+      }
+    )
+  );
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.nextcloud-notify_push = let
+      nextcloudUrl = "http${lib.optionalString cfgN.https "s"}://${cfgN.hostName}";
+    in {
+      description = "Push daemon for Nextcloud clients";
+      documentation = [ "https://github.com/nextcloud/notify_push" ];
+      after = [
+        "phpfpm-nextcloud.service"
+        "redis-nextcloud.service"
+      ];
+      wantedBy = [ "multi-user.target" ];
+      environment = {
+        NEXTCLOUD_URL = nextcloudUrl;
+        SOCKET_PATH = cfg.socketPath;
+        DATABASE_PREFIX = cfg.dbtableprefix;
+        LOG = cfg.logLevel;
+      };
+      postStart = ''
+        ${cfgN.occ}/bin/nextcloud-occ notify_push:setup ${nextcloudUrl}/push
+      '';
+      script = let
+        dbType = if cfg.dbtype == "pgsql" then "postgresql" else cfg.dbtype;
+        dbUser = lib.optionalString (cfg.dbuser != null) cfg.dbuser;
+        dbPass = lib.optionalString (cfg.dbpassFile != null) ":$DATABASE_PASSWORD";
+        isSocket = lib.hasPrefix "/" (toString cfg.dbhost);
+        dbHost = lib.optionalString (cfg.dbhost != null) (if
+          isSocket then
+            if dbType == "postgresql" then "?host=${cfg.dbhost}" else
+            if dbType == "mysql" then "?socket=${cfg.dbhost}" else throw "unsupported dbtype"
+          else
+            "@${cfg.dbhost}");
+        dbName = lib.optionalString (cfg.dbname != null) "/${cfg.dbname}";
+        dbUrl = "${dbType}://${dbUser}${dbPass}${lib.optionalString (!isSocket) dbHost}${dbName}${lib.optionalString isSocket dbHost}";
+      in lib.optionalString (dbPass != "") ''
+        export DATABASE_PASSWORD="$(<"${cfg.dbpassFile}")"
+      '' + ''
+        export DATABASE_URL="${dbUrl}"
+        ${cfg.package}/bin/notify_push '${cfgN.datadir}/config/config.php'
+      '';
+      serviceConfig = {
+        User = "nextcloud";
+        Group = "nextcloud";
+        RuntimeDirectory = [ "nextcloud-notify_push" ];
+        Restart = "on-failure";
+        RestartSec = "5s";
+      };
+    };
+
+    networking.hosts = lib.mkIf cfg.bendDomainToLocalhost {
+      "127.0.0.1" = [ cfgN.hostName ];
+      "::1" = [ cfgN.hostName ];
+    };
+
+    services = lib.mkMerge [
+      {
+        nginx.virtualHosts.${cfgN.hostName}.locations."^~ /push/" = {
+          proxyPass = "http://unix:${cfg.socketPath}";
+          proxyWebsockets = true;
+          recommendedProxySettings = true;
+        };
+      }
+
+      (lib.mkIf cfg.bendDomainToLocalhost {
+        nextcloud.extraOptions.trusted_proxies = [ "127.0.0.1" "::1" ];
+      })
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.md b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
new file mode 100644
index 000000000000..ecc7f380592a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.md
@@ -0,0 +1,221 @@
+# Nextcloud {#module-services-nextcloud}
+
+[Nextcloud](https://nextcloud.com/) is an open-source,
+self-hostable cloud platform. The server setup can be automated using
+[services.nextcloud](#opt-services.nextcloud.enable). A
+desktop client is packaged at `pkgs.nextcloud-client`.
+
+The current default by NixOS is `nextcloud27` which is also the latest
+major version available.
+
+## Basic usage {#module-services-nextcloud-basic-usage}
+
+Nextcloud is a PHP-based application which requires an HTTP server
+([`services.nextcloud`](#opt-services.nextcloud.enable)
+and optionally supports
+[`services.nginx`](#opt-services.nginx.enable)).
+
+For the database, you can set
+[`services.nextcloud.config.dbtype`](#opt-services.nextcloud.config.dbtype) to
+either `sqlite` (the default), `mysql`, or `pgsql`. The simplest is `sqlite`,
+which will be automatically created and managed by the application. For the
+last two, you can easily create a local database by setting
+[`services.nextcloud.database.createLocally`](#opt-services.nextcloud.database.createLocally)
+to `true`, Nextcloud will automatically be configured to connect to it through
+socket.
+
+A very basic configuration may look like this:
+```
+{ pkgs, ... }:
+{
+  services.nextcloud = {
+    enable = true;
+    hostName = "nextcloud.tld";
+    database.createLocally = true;
+    config = {
+      dbtype = "pgsql";
+      adminpassFile = "/path/to/admin-pass-file";
+    };
+  };
+
+  networking.firewall.allowedTCPPorts = [ 80 443 ];
+}
+```
+
+The `hostName` option is used internally to configure an HTTP
+server using [`PHP-FPM`](https://php-fpm.org/)
+and `nginx`. The `config` attribute set is
+used by the imperative installer and all values are written to an additional file
+to ensure that changes can be applied by changing the module's options.
+
+In case the application serves multiple domains (those are checked with
+[`$_SERVER['HTTP_HOST']`](https://www.php.net/manual/en/reserved.variables.server.php))
+it's needed to add them to
+[`services.nextcloud.config.extraTrustedDomains`](#opt-services.nextcloud.config.extraTrustedDomains).
+
+Auto updates for Nextcloud apps can be enabled using
+[`services.nextcloud.autoUpdateApps`](#opt-services.nextcloud.autoUpdateApps.enable).
+
+## Common problems {#module-services-nextcloud-pitfalls-during-upgrade}
+
+  - **General notes.**
+    Unfortunately Nextcloud appears to be very stateful when it comes to
+    managing its own configuration. The config file lives in the home directory
+    of the `nextcloud` user (by default
+    `/var/lib/nextcloud/config/config.php`) and is also used to
+    track several states of the application (e.g., whether installed or not).
+
+     All configuration parameters are also stored in
+    {file}`/var/lib/nextcloud/config/override.config.php` which is generated by
+    the module and linked from the store to ensure that all values from
+    {file}`config.php` can be modified by the module.
+    However {file}`config.php` manages the application's state and shouldn't be
+    touched manually because of that.
+
+    ::: {.warning}
+    Don't delete {file}`config.php`! This file
+    tracks the application's state and a deletion can cause unwanted
+    side-effects!
+    :::
+
+    ::: {.warning}
+    Don't rerun `nextcloud-occ maintenance:install`!
+    This command tries to install the application
+    and can cause unwanted side-effects!
+    :::
+  - **Multiple version upgrades.**
+    Nextcloud doesn't allow to move more than one major-version forward. E.g., if you're on
+    `v16`, you cannot upgrade to `v18`, you need to upgrade to
+    `v17` first. This is ensured automatically as long as the
+    [stateVersion](#opt-system.stateVersion) is declared properly. In that case
+    the oldest version available (one major behind the one from the previous NixOS
+    release) will be selected by default and the module will generate a warning that reminds
+    the user to upgrade to latest Nextcloud *after* that deploy.
+  - **`Error: Command "upgrade" is not defined.`**
+    This error usually occurs if the initial installation
+    ({command}`nextcloud-occ maintenance:install`) has failed. After that, the application
+    is not installed, but the upgrade is attempted to be executed. Further context can
+    be found in [NixOS/nixpkgs#111175](https://github.com/NixOS/nixpkgs/issues/111175).
+
+    First of all, it makes sense to find out what went wrong by looking at the logs
+    of the installation via {command}`journalctl -u nextcloud-setup` and try to fix
+    the underlying issue.
+
+    - If this occurs on an *existing* setup, this is most likely because
+      the maintenance mode is active. It can be deactivated by running
+      {command}`nextcloud-occ maintenance:mode --off`. It's advisable though to
+      check the logs first on why the maintenance mode was activated.
+    - ::: {.warning}
+      Only perform the following measures on
+      *freshly installed instances!*
+      :::
+
+      A re-run of the installer can be forced by *deleting*
+      {file}`/var/lib/nextcloud/config/config.php`. This is the only time
+      advisable because the fresh install doesn't have any state that can be lost.
+      In case that doesn't help, an entire re-creation can be forced via
+      {command}`rm -rf ~nextcloud/`.
+
+  - **Server-side encryption.**
+    Nextcloud supports [server-side encryption (SSE)](https://docs.nextcloud.com/server/latest/admin_manual/configuration_files/encryption_configuration.html).
+    This is not an end-to-end encryption, but can be used to encrypt files that will be persisted
+    to external storage such as S3.
+
+## Using an alternative webserver as reverse-proxy (e.g. `httpd`) {#module-services-nextcloud-httpd}
+
+By default, `nginx` is used as reverse-proxy for `nextcloud`.
+However, it's possible to use e.g. `httpd` by explicitly disabling
+`nginx` using [](#opt-services.nginx.enable) and fixing the
+settings `listen.owner` &amp; `listen.group` in the
+[corresponding `phpfpm` pool](#opt-services.phpfpm.pools).
+
+An exemplary configuration may look like this:
+```
+{ config, lib, pkgs, ... }: {
+  services.nginx.enable = false;
+  services.nextcloud = {
+    enable = true;
+    hostName = "localhost";
+
+    /* further, required options */
+  };
+  services.phpfpm.pools.nextcloud.settings = {
+    "listen.owner" = config.services.httpd.user;
+    "listen.group" = config.services.httpd.group;
+  };
+  services.httpd = {
+    enable = true;
+    adminAddr = "webmaster@localhost";
+    extraModules = [ "proxy_fcgi" ];
+    virtualHosts."localhost" = {
+      documentRoot = config.services.nextcloud.package;
+      extraConfig = ''
+        <Directory "${config.services.nextcloud.package}">
+          <FilesMatch "\.php$">
+            <If "-f %{REQUEST_FILENAME}">
+              SetHandler "proxy:unix:${config.services.phpfpm.pools.nextcloud.socket}|fcgi://localhost/"
+            </If>
+          </FilesMatch>
+          <IfModule mod_rewrite.c>
+            RewriteEngine On
+            RewriteBase /
+            RewriteRule ^index\.php$ - [L]
+            RewriteCond %{REQUEST_FILENAME} !-f
+            RewriteCond %{REQUEST_FILENAME} !-d
+            RewriteRule . /index.php [L]
+          </IfModule>
+          DirectoryIndex index.php
+          Require all granted
+          Options +FollowSymLinks
+        </Directory>
+      '';
+    };
+  };
+}
+```
+
+## Installing Apps and PHP extensions {#installing-apps-php-extensions-nextcloud}
+
+Nextcloud apps are installed statefully through the web interface.
+Some apps may require extra PHP extensions to be installed.
+This can be configured with the [](#opt-services.nextcloud.phpExtraExtensions) setting.
+
+Alternatively, extra apps can also be declared with the [](#opt-services.nextcloud.extraApps) setting.
+When using this setting, apps can no longer be managed statefully because this can lead to Nextcloud updating apps
+that are managed by Nix. If you want automatic updates it is recommended that you use web interface to install apps.
+
+## Maintainer information {#module-services-nextcloud-maintainer-info}
+
+As stated in the previous paragraph, we must provide a clean upgrade-path for Nextcloud
+since it cannot move more than one major version forward on a single upgrade. This chapter
+adds some notes how Nextcloud updates should be rolled out in the future.
+
+While minor and patch-level updates are no problem and can be done directly in the
+package-expression (and should be backported to supported stable branches after that),
+major-releases should be added in a new attribute (e.g. Nextcloud `v19.0.0`
+should be available in `nixpkgs` as `pkgs.nextcloud19`).
+To provide simple upgrade paths it's generally useful to backport those as well to stable
+branches. As long as the package-default isn't altered, this won't break existing setups.
+After that, the versioning-warning in the `nextcloud`-module should be
+updated to make sure that the
+[package](#opt-services.nextcloud.package)-option selects the latest version
+on fresh setups.
+
+If major-releases will be abandoned by upstream, we should check first if those are needed
+in NixOS for a safe upgrade-path before removing those. In that case we should keep those
+packages, but mark them as insecure in an expression like this (in
+`<nixpkgs/pkgs/servers/nextcloud/default.nix>`):
+```
+/* ... */
+{
+  nextcloud17 = generic {
+    version = "17.0.x";
+    sha256 = "0000000000000000000000000000000000000000000000000000";
+    eol = true;
+  };
+}
+```
+
+Ideally we should make sure that it's possible to jump two NixOS versions forward:
+i.e. the warnings and the logic in the module should guard a user to upgrade from a
+Nextcloud on e.g. 19.09 to a Nextcloud on 20.09.
diff --git a/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
new file mode 100644
index 000000000000..f1ac3770d403
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/nextcloud.nix
@@ -0,0 +1,1201 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nextcloud;
+  fpm = config.services.phpfpm.pools.nextcloud;
+
+  jsonFormat = pkgs.formats.json {};
+
+  defaultPHPSettings = {
+    short_open_tag = "Off";
+    expose_php = "Off";
+    error_reporting = "E_ALL & ~E_DEPRECATED & ~E_STRICT";
+    display_errors = "stderr";
+    "opcache.enable_cli" = "1";
+    "opcache.interned_strings_buffer" = "8";
+    "opcache.max_accelerated_files" = "10000";
+    "opcache.memory_consumption" = "128";
+    "opcache.revalidate_freq" = "1";
+    "opcache.fast_shutdown" = "1";
+    "openssl.cafile" = "/etc/ssl/certs/ca-certificates.crt";
+    catch_workers_output = "yes";
+  };
+
+  inherit (cfg) datadir;
+
+  phpPackage = cfg.phpPackage.buildEnv {
+    extensions = { enabled, all }:
+      (with all; enabled
+        ++ optional cfg.enableImagemagick imagick
+        # Optionally enabled depending on caching settings
+        ++ optional cfg.caching.apcu apcu
+        ++ optional cfg.caching.redis redis
+        ++ optional cfg.caching.memcached memcached
+      )
+      ++ cfg.phpExtraExtensions all; # Enabled by user
+    extraConfig = toKeyValue cfg.phpOptions;
+  };
+
+  toKeyValue = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {} " = ";
+  };
+
+  occ = pkgs.writeScriptBin "nextcloud-occ" ''
+    #! ${pkgs.runtimeShell}
+    cd ${cfg.package}
+    sudo=exec
+    if [[ "$USER" != nextcloud ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u nextcloud --preserve-env=NEXTCLOUD_CONFIG_DIR --preserve-env=OC_PASS'
+    fi
+    export NEXTCLOUD_CONFIG_DIR="${datadir}/config"
+    $sudo \
+      ${phpPackage}/bin/php \
+      occ "$@"
+  '';
+
+  inherit (config.system) stateVersion;
+
+  mysqlLocal = cfg.database.createLocally && cfg.config.dbtype == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.config.dbtype == "pgsql";
+
+  # https://github.com/nextcloud/documentation/pull/11179
+  ocmProviderIsNotAStaticDirAnymore = versionAtLeast cfg.package.version "27.1.2";
+
+in {
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "nextcloud" "config" "adminpass" ] ''
+      Please use `services.nextcloud.config.adminpassFile' instead!
+    '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "config" "dbpass" ] ''
+      Please use `services.nextcloud.config.dbpassFile' instead!
+    '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "nginx" "enable" ] ''
+      The nextcloud module supports `nginx` as reverse-proxy by default and doesn't
+      support other reverse-proxies officially.
+
+      However it's possible to use an alternative reverse-proxy by
+
+        * disabling nginx
+        * setting `listen.owner` & `listen.group` in the phpfpm-pool to a different value
+
+      Further details about this can be found in the `Nextcloud`-section of the NixOS-manual
+      (which can be opened e.g. by running `nixos-help`).
+    '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "enableBrokenCiphersForSSE" ] ''
+      This option has no effect since there's no supported Nextcloud version packaged here
+      using OpenSSL for RC4 SSE.
+    '')
+    (mkRemovedOptionModule [ "services" "nextcloud" "disableImagemagick" ] ''
+      Use services.nextcloud.enableImagemagick instead.
+    '')
+  ];
+
+  options.services.nextcloud = {
+    enable = mkEnableOption (lib.mdDoc "nextcloud");
+
+    hostName = mkOption {
+      type = types.str;
+      description = lib.mdDoc "FQDN for the nextcloud instance.";
+    };
+    home = mkOption {
+      type = types.str;
+      default = "/var/lib/nextcloud";
+      description = lib.mdDoc "Storage path of nextcloud.";
+    };
+    datadir = mkOption {
+      type = types.str;
+      default = config.services.nextcloud.home;
+      defaultText = literalExpression "config.services.nextcloud.home";
+      description = lib.mdDoc ''
+        Nextcloud's data storage path.  Will be [](#opt-services.nextcloud.home) by default.
+        This folder will be populated with a config.php file and a data folder which contains the state of the instance (excluding the database).";
+      '';
+      example = "/mnt/nextcloud-file";
+    };
+    extraApps = mkOption {
+      type = types.attrsOf types.package;
+      default = { };
+      description = lib.mdDoc ''
+        Extra apps to install. Should be an attrSet of appid to packages generated by fetchNextcloudApp.
+        The appid must be identical to the "id" value in the apps appinfo/info.xml.
+        Using this will disable the appstore to prevent Nextcloud from updating these apps (see [](#opt-services.nextcloud.appstoreEnable)).
+      '';
+      example = literalExpression ''
+        {
+          maps = pkgs.fetchNextcloudApp {
+            name = "maps";
+            sha256 = "007y80idqg6b6zk6kjxg4vgw0z8fsxs9lajnv49vv1zjy6jx2i1i";
+            url = "https://github.com/nextcloud/maps/releases/download/v0.1.9/maps-0.1.9.tar.gz";
+            version = "0.1.9";
+          };
+          phonetrack = pkgs.fetchNextcloudApp {
+            name = "phonetrack";
+            sha256 = "0qf366vbahyl27p9mshfma1as4nvql6w75zy2zk5xwwbp343vsbc";
+            url = "https://gitlab.com/eneiluj/phonetrack-oc/-/wikis/uploads/931aaaf8dca24bf31a7e169a83c17235/phonetrack-0.6.9.tar.gz";
+            version = "0.6.9";
+          };
+        }
+        '';
+    };
+    extraAppsEnable = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Automatically enable the apps in [](#opt-services.nextcloud.extraApps) every time Nextcloud starts.
+        If set to false, apps need to be enabled in the Nextcloud web user interface or with `nextcloud-occ app:enable`.
+      '';
+    };
+    appstoreEnable = mkOption {
+      type = types.nullOr types.bool;
+      default = null;
+      example = true;
+      description = lib.mdDoc ''
+        Allow the installation and updating of apps from the Nextcloud appstore.
+        Enabled by default unless there are packages in [](#opt-services.nextcloud.extraApps).
+        Set this to true to force enable the store even if [](#opt-services.nextcloud.extraApps) is used.
+        Set this to false to disable the installation of apps from the global appstore. App management is always enabled regardless of this setting.
+      '';
+    };
+    logLevel = mkOption {
+      type = types.ints.between 0 4;
+      default = 2;
+      description = lib.mdDoc ''
+        Log level value between 0 (DEBUG) and 4 (FATAL).
+
+        - 0 (debug): Log all activity.
+
+        - 1 (info): Log activity such as user logins and file activities, plus warnings, errors, and fatal errors.
+
+        - 2 (warn): Log successful operations, as well as warnings of potential problems, errors and fatal errors.
+
+        - 3 (error): Log failed operations and fatal errors.
+
+        - 4 (fatal): Log only fatal errors that cause the server to stop.
+      '';
+    };
+    logType = mkOption {
+      type = types.enum [ "errorlog" "file" "syslog" "systemd" ];
+      default = "syslog";
+      description = lib.mdDoc ''
+        Logging backend to use.
+        systemd requires the php-systemd package to be added to services.nextcloud.phpExtraExtensions.
+        See the [nextcloud documentation](https://docs.nextcloud.com/server/latest/admin_manual/configuration_server/logging_configuration.html) for details.
+      '';
+    };
+    https = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Use HTTPS for generated links.";
+    };
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc "Which package to use for the Nextcloud instance.";
+      relatedPackages = [ "nextcloud26" "nextcloud27" ];
+    };
+    phpPackage = mkOption {
+      type = types.package;
+      relatedPackages = [ "php81" "php82" ];
+      defaultText = "pkgs.php";
+      description = lib.mdDoc ''
+        PHP package to use for Nextcloud.
+      '';
+    };
+
+    maxUploadSize = mkOption {
+      default = "512M";
+      type = types.str;
+      description = lib.mdDoc ''
+        The upload limit for files. This changes the relevant options
+        in php.ini and nginx if enabled.
+      '';
+    };
+
+    skeletonDirectory = mkOption {
+      default = "";
+      type = types.str;
+      description = lib.mdDoc ''
+        The directory where the skeleton files are located. These files will be
+        copied to the data directory of new users. Leave empty to not copy any
+        skeleton files.
+      '';
+    };
+
+    webfinger = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable this option if you plan on using the webfinger plugin.
+        The appropriate nginx rewrite rules will be added to your configuration.
+      '';
+    };
+
+    phpExtraExtensions = mkOption {
+      type = with types; functionTo (listOf package);
+      default = all: [];
+      defaultText = literalExpression "all: []";
+      description = lib.mdDoc ''
+        Additional PHP extensions to use for Nextcloud.
+        By default, only extensions necessary for a vanilla Nextcloud installation are enabled,
+        but you may choose from the list of available extensions and add further ones.
+        This is sometimes necessary to be able to install a certain Nextcloud app that has additional requirements.
+      '';
+      example = literalExpression ''
+        all: [ all.pdlib all.bz2 ]
+      '';
+    };
+
+    phpOptions = mkOption {
+      type = types.attrsOf types.str;
+      defaultText = literalExpression (generators.toPretty { } defaultPHPSettings);
+      description = lib.mdDoc ''
+        Options for PHP's php.ini file for nextcloud.
+
+        Please note that this option is _additive_ on purpose while the
+        attribute values inside the default are option defaults: that means that
+
+        ```nix
+        {
+          services.nextcloud.phpOptions."opcache.interned_strings_buffer" = "23";
+        }
+        ```
+
+        will override the `php.ini` option `opcache.interned_strings_buffer` without
+        discarding the rest of the defaults.
+
+        Overriding all of `phpOptions` (including `upload_max_filesize`, `post_max_size`
+        and `memory_limit` which all point to [](#opt-services.nextcloud.maxUploadSize)
+        by default) can be done like this:
+
+        ```nix
+        {
+          services.nextcloud.phpOptions = lib.mkForce {
+            /* ... */
+          };
+        }
+        ```
+      '';
+    };
+
+    poolSettings = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = "32";
+        "pm.start_servers" = "2";
+        "pm.min_spare_servers" = "2";
+        "pm.max_spare_servers" = "4";
+        "pm.max_requests" = "500";
+      };
+      description = lib.mdDoc ''
+        Options for nextcloud's PHP pool. See the documentation on `php-fpm.conf` for details on configuration directives.
+      '';
+    };
+
+    poolConfig = mkOption {
+      type = types.nullOr types.lines;
+      default = null;
+      description = lib.mdDoc ''
+        Options for Nextcloud's PHP pool. See the documentation on `php-fpm.conf` for details on configuration directives.
+      '';
+    };
+
+    fastcgiTimeout = mkOption {
+      type = types.int;
+      default = 120;
+      description = lib.mdDoc ''
+        FastCGI timeout for database connection in seconds.
+      '';
+    };
+
+    database = {
+
+      createLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to create the database and database user locally.
+        '';
+      };
+
+    };
+
+
+    config = {
+      dbtype = mkOption {
+        type = types.enum [ "sqlite" "pgsql" "mysql" ];
+        default = "sqlite";
+        description = lib.mdDoc "Database type.";
+      };
+      dbname = mkOption {
+        type = types.nullOr types.str;
+        default = "nextcloud";
+        description = lib.mdDoc "Database name.";
+      };
+      dbuser = mkOption {
+        type = types.nullOr types.str;
+        default = "nextcloud";
+        description = lib.mdDoc "Database user.";
+      };
+      dbpassFile = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc ''
+          The full path to a file that contains the database password.
+        '';
+      };
+      dbhost = mkOption {
+        type = types.nullOr types.str;
+        default =
+          if pgsqlLocal then "/run/postgresql"
+          else if mysqlLocal then "localhost:/run/mysqld/mysqld.sock"
+          else "localhost";
+        defaultText = "localhost";
+        description = lib.mdDoc ''
+          Database host or socket path.
+          If [](#opt-services.nextcloud.database.createLocally) is true and
+          [](#opt-services.nextcloud.config.dbtype) is either `pgsql` or `mysql`,
+          defaults to the correct Unix socket instead.
+        '';
+      };
+      dbport = mkOption {
+        type = with types; nullOr (either int str);
+        default = null;
+        description = lib.mdDoc "Database port.";
+      };
+      dbtableprefix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Table prefix in Nextcloud's database.";
+      };
+      adminuser = mkOption {
+        type = types.str;
+        default = "root";
+        description = lib.mdDoc ''
+          Username for the admin account. The username is only set during the
+          initial setup of Nextcloud! Since the username also acts as unique
+          ID internally, it cannot be changed later!
+        '';
+      };
+      adminpassFile = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The full path to a file that contains the admin's password. Must be
+          readable by user `nextcloud`. The password is set only in the initial
+          setup of Nextcloud by the systemd service `nextcloud-setup.service`.
+        '';
+      };
+
+      extraTrustedDomains = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Trusted domains from which the Nextcloud installation will be
+          accessible.  You don't need to add
+          `services.nextcloud.hostname` here.
+        '';
+      };
+
+      trustedProxies = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc ''
+          Trusted proxies to provide if the Nextcloud installation is being
+          proxied to secure against, e.g. spoofing.
+        '';
+      };
+
+      overwriteProtocol = mkOption {
+        type = types.nullOr (types.enum [ "http" "https" ]);
+        default = null;
+        example = "https";
+
+        description = lib.mdDoc ''
+          Force Nextcloud to always use HTTP or HTTPS i.e. for link generation.
+          Nextcloud uses the currently used protocol by default, but when
+          behind a reverse-proxy, it may use `http` for everything although
+          Nextcloud may be served via HTTPS.
+        '';
+      };
+
+      defaultPhoneRegion = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        example = "DE";
+        description = lib.mdDoc ''
+          An [ISO 3166-1](https://www.iso.org/iso-3166-country-codes.html)
+          country code which replaces automatic phone-number detection
+          without a country code.
+
+          As an example, with `DE` set as the default phone region,
+          the `+49` prefix can be omitted for phone numbers.
+        '';
+      };
+
+      objectstore = {
+        s3 = {
+          enable = mkEnableOption (lib.mdDoc ''
+            S3 object storage as primary storage.
+
+            This mounts a bucket on an Amazon S3 object storage or compatible
+            implementation into the virtual filesystem.
+
+            Further details about this feature can be found in the
+            [upstream documentation](https://docs.nextcloud.com/server/22/admin_manual/configuration_files/primary_storage.html).
+          '');
+          bucket = mkOption {
+            type = types.str;
+            example = "nextcloud";
+            description = lib.mdDoc ''
+              The name of the S3 bucket.
+            '';
+          };
+          autocreate = mkOption {
+            type = types.bool;
+            description = lib.mdDoc ''
+              Create the objectstore if it does not exist.
+            '';
+          };
+          key = mkOption {
+            type = types.str;
+            example = "EJ39ITYZEUH5BGWDRUFY";
+            description = lib.mdDoc ''
+              The access key for the S3 bucket.
+            '';
+          };
+          secretFile = mkOption {
+            type = types.str;
+            example = "/var/nextcloud-objectstore-s3-secret";
+            description = lib.mdDoc ''
+              The full path to a file that contains the access secret. Must be
+              readable by user `nextcloud`.
+            '';
+          };
+          hostname = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "example.com";
+            description = lib.mdDoc ''
+              Required for some non-Amazon implementations.
+            '';
+          };
+          port = mkOption {
+            type = types.nullOr types.port;
+            default = null;
+            description = lib.mdDoc ''
+              Required for some non-Amazon implementations.
+            '';
+          };
+          useSsl = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc ''
+              Use SSL for objectstore access.
+            '';
+          };
+          region = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "REGION";
+            description = lib.mdDoc ''
+              Required for some non-Amazon implementations.
+            '';
+          };
+          usePathStyle = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Required for some non-Amazon S3 implementations.
+
+              Ordinarily, requests will be made with
+              `http://bucket.hostname.domain/`, but with path style
+              enabled requests are made with
+              `http://hostname.domain/bucket` instead.
+            '';
+          };
+          sseCKeyFile = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            example = "/var/nextcloud-objectstore-s3-sse-c-key";
+            description = lib.mdDoc ''
+              If provided this is the full path to a file that contains the key
+              to enable [server-side encryption with customer-provided keys][1]
+              (SSE-C).
+
+              The file must contain a random 32-byte key encoded as a base64
+              string, e.g. generated with the command
+
+              ```
+              openssl rand 32 | base64
+              ```
+
+              Must be readable by user `nextcloud`.
+
+              [1]: https://docs.aws.amazon.com/AmazonS3/latest/userguide/ServerSideEncryptionCustomerKeys.html
+            '';
+          };
+        };
+      };
+    };
+
+    enableImagemagick = mkEnableOption (lib.mdDoc ''
+        the ImageMagick module for PHP.
+        This is used by the theming app and for generating previews of certain images (e.g. SVG and HEIF).
+        You may want to disable it for increased security. In that case, previews will still be available
+        for some images (e.g. JPEG and PNG).
+        See <https://github.com/nextcloud/server/issues/13099>.
+    '') // {
+      default = true;
+    };
+
+    configureRedis = lib.mkOption {
+      type = lib.types.bool;
+      default = config.services.nextcloud.notify_push.enable;
+      defaultText = literalExpression "config.services.nextcloud.notify_push.enable";
+      description = lib.mdDoc ''
+        Whether to configure Nextcloud to use the recommended Redis settings for small instances.
+
+        ::: {.note}
+        The `notify_push` app requires Redis to be configured. If this option is turned off, this must be configured manually.
+        :::
+      '';
+    };
+
+    caching = {
+      apcu = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to load the APCu module into PHP.
+        '';
+      };
+      redis = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to load the Redis module into PHP.
+          You still need to enable Redis in your config.php.
+          See https://docs.nextcloud.com/server/14/admin_manual/configuration_server/caching_configuration.html
+        '';
+      };
+      memcached = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to load the Memcached module into PHP.
+          You still need to enable Memcached in your config.php.
+          See https://docs.nextcloud.com/server/14/admin_manual/configuration_server/caching_configuration.html
+        '';
+      };
+    };
+    autoUpdateApps = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Run a regular auto-update of all apps installed from the Nextcloud app store.
+        '';
+      };
+      startAt = mkOption {
+        type = with types; either str (listOf str);
+        default = "05:00:00";
+        example = "Sun 14:00:00";
+        description = lib.mdDoc ''
+          When to run the update. See `systemd.services.<name>.startAt`.
+        '';
+      };
+    };
+    occ = mkOption {
+      type = types.package;
+      default = occ;
+      defaultText = literalMD "generated script";
+      internal = true;
+      description = lib.mdDoc ''
+        The nextcloud-occ program preconfigured to target this Nextcloud instance.
+      '';
+    };
+    globalProfiles = mkEnableOption (lib.mdDoc "global profiles") // {
+      description = lib.mdDoc ''
+        Makes user-profiles globally available under `nextcloud.tld/u/user.name`.
+        Even though it's enabled by default in Nextcloud, it must be explicitly enabled
+        here because it has the side-effect that personal information is even accessible to
+        unauthenticated users by default.
+
+        By default, the following properties are set to “Show to everyoneâ€
+        if this flag is enabled:
+        - About
+        - Full name
+        - Headline
+        - Organisation
+        - Profile picture
+        - Role
+        - Twitter
+        - Website
+
+        Only has an effect in Nextcloud 23 and later.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = jsonFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Extra options which should be appended to Nextcloud's config.php file.
+      '';
+      example = literalExpression '' {
+        redis = {
+          host = "/run/redis/redis.sock";
+          port = 0;
+          dbindex = 0;
+          password = "secret";
+          timeout = 1.5;
+        };
+      } '';
+    };
+
+    secretFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Secret options which will be appended to Nextcloud's config.php file (written as JSON, in the same
+        form as the [](#opt-services.nextcloud.extraOptions) option), for example
+        `{"redis":{"password":"secret"}}`.
+      '';
+    };
+
+    nginx = {
+      recommendedHttpHeaders = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable additional recommended HTTP response headers";
+      };
+      hstsMaxAge = mkOption {
+        type = types.ints.positive;
+        default = 15552000;
+        description = lib.mdDoc ''
+          Value for the `max-age` directive of the HTTP
+          `Strict-Transport-Security` header.
+
+          See section 6.1.1 of IETF RFC 6797 for detailed information on this
+          directive and header.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [
+    { warnings = let
+        latest = 27;
+        upgradeWarning = major: nixos:
+          ''
+            A legacy Nextcloud install (from before NixOS ${nixos}) may be installed.
+
+            After nextcloud${toString major} is installed successfully, you can safely upgrade
+            to ${toString (major + 1)}. The latest version available is Nextcloud${toString latest}.
+
+            Please note that Nextcloud doesn't support upgrades across multiple major versions
+            (i.e. an upgrade from 16 is possible to 17, but not 16 to 18).
+
+            The package can be upgraded by explicitly declaring the service-option
+            `services.nextcloud.package`.
+          '';
+
+      in (optional (cfg.poolConfig != null) ''
+          Using config.services.nextcloud.poolConfig is deprecated and will become unsupported in a future release.
+          Please migrate your configuration to config.services.nextcloud.poolSettings.
+        '')
+        ++ (optional (versionOlder cfg.package.version "25") (upgradeWarning 24 "22.11"))
+        ++ (optional (versionOlder cfg.package.version "26") (upgradeWarning 25 "23.05"))
+        ++ (optional (versionOlder cfg.package.version "27") (upgradeWarning 26 "23.11"));
+
+      services.nextcloud.package = with pkgs;
+        mkDefault (
+          if pkgs ? nextcloud
+            then throw ''
+              The `pkgs.nextcloud`-attribute has been removed. If it's supposed to be the default
+              nextcloud defined in an overlay, please set `services.nextcloud.package` to
+              `pkgs.nextcloud`.
+            ''
+          else if versionOlder stateVersion "22.11" then nextcloud24
+          else if versionOlder stateVersion "23.05" then nextcloud25
+          else if versionOlder stateVersion "23.11" then nextcloud26
+          else nextcloud27
+        );
+
+      services.nextcloud.phpPackage =
+        if versionOlder cfg.package.version "26" then pkgs.php81
+        else pkgs.php82;
+
+      services.nextcloud.phpOptions = mkMerge [
+        (mapAttrs (const mkOptionDefault) defaultPHPSettings)
+        {
+          upload_max_filesize = cfg.maxUploadSize;
+          post_max_size = cfg.maxUploadSize;
+          memory_limit = cfg.maxUploadSize;
+        }
+        (mkIf cfg.caching.apcu {
+          "apc.enable_cli" = "1";
+        })
+      ];
+    }
+
+    { assertions = [
+      { assertion = cfg.database.createLocally -> cfg.config.dbpassFile == null;
+        message = ''
+          Using `services.nextcloud.database.createLocally` with database
+          password authentication is no longer supported.
+
+          If you use an external database (or want to use password auth for any
+          other reason), set `services.nextcloud.database.createLocally` to
+          `false`. The database won't be managed for you (use `services.mysql`
+          if you want to set it up).
+
+          If you want this module to manage your nextcloud database for you,
+          unset `services.nextcloud.config.dbpassFile` and
+          `services.nextcloud.config.dbhost` to use socket authentication
+          instead of password.
+        '';
+      }
+    ]; }
+
+    { systemd.timers.nextcloud-cron = {
+        wantedBy = [ "timers.target" ];
+        after = [ "nextcloud-setup.service" ];
+        timerConfig.OnBootSec = "5m";
+        timerConfig.OnUnitActiveSec = "5m";
+        timerConfig.Unit = "nextcloud-cron.service";
+      };
+
+      systemd.tmpfiles.rules = ["d ${cfg.home} 0750 nextcloud nextcloud"];
+
+      systemd.services = {
+        # When upgrading the Nextcloud package, Nextcloud can report errors such as
+        # "The files of the app [all apps in /var/lib/nextcloud/apps] were not replaced correctly"
+        # Restarting phpfpm on Nextcloud package update fixes these issues (but this is a workaround).
+        phpfpm-nextcloud.restartTriggers = [ cfg.package ];
+
+        nextcloud-setup = let
+          c = cfg.config;
+          writePhpArray = a: "[${concatMapStringsSep "," (val: ''"${toString val}"'') a}]";
+          requiresReadSecretFunction = c.dbpassFile != null || c.objectstore.s3.enable;
+          objectstoreConfig = let s3 = c.objectstore.s3; in optionalString s3.enable ''
+            'objectstore' => [
+              'class' => '\\OC\\Files\\ObjectStore\\S3',
+              'arguments' => [
+                'bucket' => '${s3.bucket}',
+                'autocreate' => ${boolToString s3.autocreate},
+                'key' => '${s3.key}',
+                'secret' => nix_read_secret('${s3.secretFile}'),
+                ${optionalString (s3.hostname != null) "'hostname' => '${s3.hostname}',"}
+                ${optionalString (s3.port != null) "'port' => ${toString s3.port},"}
+                'use_ssl' => ${boolToString s3.useSsl},
+                ${optionalString (s3.region != null) "'region' => '${s3.region}',"}
+                'use_path_style' => ${boolToString s3.usePathStyle},
+                ${optionalString (s3.sseCKeyFile != null) "'sse_c_key' => nix_read_secret('${s3.sseCKeyFile}'),"}
+              ],
+            ]
+          '';
+
+          showAppStoreSetting = cfg.appstoreEnable != null || cfg.extraApps != {};
+          renderedAppStoreSetting =
+            let
+              x = cfg.appstoreEnable;
+            in
+              if x == null then "false"
+              else boolToString x;
+
+          nextcloudGreaterOrEqualThan = req: versionAtLeast cfg.package.version req;
+
+          overrideConfig = pkgs.writeText "nextcloud-config.php" ''
+            <?php
+            ${optionalString requiresReadSecretFunction ''
+              function nix_read_secret($file) {
+                if (!file_exists($file)) {
+                  throw new \RuntimeException(sprintf(
+                    "Cannot start Nextcloud, secret file %s set by NixOS doesn't seem to "
+                    . "exist! Please make sure that the file exists and has appropriate "
+                    . "permissions for user & group 'nextcloud'!",
+                    $file
+                  ));
+                }
+                return trim(file_get_contents($file));
+              }''}
+            function nix_decode_json_file($file, $error) {
+              if (!file_exists($file)) {
+                throw new \RuntimeException(sprintf($error, $file));
+              }
+              $decoded = json_decode(file_get_contents($file), true);
+
+              if (json_last_error() !== JSON_ERROR_NONE) {
+                throw new \RuntimeException(sprintf("Cannot decode %s, because: %s", $file, json_last_error_msg()));
+              }
+
+              return $decoded;
+            }
+            $CONFIG = [
+              'apps_paths' => [
+                ${optionalString (cfg.extraApps != { }) "[ 'path' => '${cfg.home}/nix-apps', 'url' => '/nix-apps', 'writable' => false ],"}
+                [ 'path' => '${cfg.home}/apps', 'url' => '/apps', 'writable' => false ],
+                [ 'path' => '${cfg.home}/store-apps', 'url' => '/store-apps', 'writable' => true ],
+              ],
+              ${optionalString (showAppStoreSetting) "'appstoreenabled' => ${renderedAppStoreSetting},"}
+              'datadirectory' => '${datadir}/data',
+              'skeletondirectory' => '${cfg.skeletonDirectory}',
+              ${optionalString cfg.caching.apcu "'memcache.local' => '\\OC\\Memcache\\APCu',"}
+              'log_type' => '${cfg.logType}',
+              'loglevel' => '${builtins.toString cfg.logLevel}',
+              ${optionalString (c.overwriteProtocol != null) "'overwriteprotocol' => '${c.overwriteProtocol}',"}
+              ${optionalString (c.dbname != null) "'dbname' => '${c.dbname}',"}
+              ${optionalString (c.dbhost != null) "'dbhost' => '${c.dbhost}',"}
+              ${optionalString (c.dbport != null) "'dbport' => '${toString c.dbport}',"}
+              ${optionalString (c.dbuser != null) "'dbuser' => '${c.dbuser}',"}
+              ${optionalString (c.dbtableprefix != null) "'dbtableprefix' => '${toString c.dbtableprefix}',"}
+              ${optionalString (c.dbpassFile != null) ''
+                  'dbpassword' => nix_read_secret(
+                    "${c.dbpassFile}"
+                  ),
+                ''
+              }
+              'dbtype' => '${c.dbtype}',
+              'trusted_domains' => ${writePhpArray ([ cfg.hostName ] ++ c.extraTrustedDomains)},
+              'trusted_proxies' => ${writePhpArray (c.trustedProxies)},
+              ${optionalString (c.defaultPhoneRegion != null) "'default_phone_region' => '${c.defaultPhoneRegion}',"}
+              ${optionalString (nextcloudGreaterOrEqualThan "23") "'profile.enabled' => ${boolToString cfg.globalProfiles},"}
+              ${objectstoreConfig}
+            ];
+
+            $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+              "${jsonFormat.generate "nextcloud-extraOptions.json" cfg.extraOptions}",
+              "impossible: this should never happen (decoding generated extraOptions file %s failed)"
+            ));
+
+            ${optionalString (cfg.secretFile != null) ''
+              $CONFIG = array_replace_recursive($CONFIG, nix_decode_json_file(
+                "${cfg.secretFile}",
+                "Cannot start Nextcloud, secrets file %s set by NixOS doesn't exist!"
+              ));
+            ''}
+          '';
+          occInstallCmd = let
+            mkExport = { arg, value }: "export ${arg}=${value}";
+            dbpass = {
+              arg = "DBPASS";
+              value = if c.dbpassFile != null
+                then ''"$(<"${toString c.dbpassFile}")"''
+                else ''""'';
+            };
+            adminpass = {
+              arg = "ADMINPASS";
+              value = ''"$(<"${toString c.adminpassFile}")"'';
+            };
+            installFlags = concatStringsSep " \\\n    "
+              (mapAttrsToList (k: v: "${k} ${toString v}") {
+              "--database" = ''"${c.dbtype}"'';
+              # The following attributes are optional depending on the type of
+              # database.  Those that evaluate to null on the left hand side
+              # will be omitted.
+              ${if c.dbname != null then "--database-name" else null} = ''"${c.dbname}"'';
+              ${if c.dbhost != null then "--database-host" else null} = ''"${c.dbhost}"'';
+              ${if c.dbport != null then "--database-port" else null} = ''"${toString c.dbport}"'';
+              ${if c.dbuser != null then "--database-user" else null} = ''"${c.dbuser}"'';
+              "--database-pass" = "\"\$${dbpass.arg}\"";
+              "--admin-user" = ''"${c.adminuser}"'';
+              "--admin-pass" = "\"\$${adminpass.arg}\"";
+              "--data-dir" = ''"${datadir}/data"'';
+            });
+          in ''
+            ${mkExport dbpass}
+            ${mkExport adminpass}
+            ${occ}/bin/nextcloud-occ maintenance:install \
+                ${installFlags}
+          '';
+          occSetTrustedDomainsCmd = concatStringsSep "\n" (imap0
+            (i: v: ''
+              ${occ}/bin/nextcloud-occ config:system:set trusted_domains \
+                ${toString i} --value="${toString v}"
+            '') ([ cfg.hostName ] ++ cfg.config.extraTrustedDomains));
+
+        in {
+          wantedBy = [ "multi-user.target" ];
+          before = [ "phpfpm-nextcloud.service" ];
+          after = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+          requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+          path = [ occ ];
+          script = ''
+            ${optionalString (c.dbpassFile != null) ''
+              if [ ! -r "${c.dbpassFile}" ]; then
+                echo "dbpassFile ${c.dbpassFile} is not readable by nextcloud:nextcloud! Aborting..."
+                exit 1
+              fi
+              if [ -z "$(<${c.dbpassFile})" ]; then
+                echo "dbpassFile ${c.dbpassFile} is empty!"
+                exit 1
+              fi
+            ''}
+            if [ ! -r "${c.adminpassFile}" ]; then
+              echo "adminpassFile ${c.adminpassFile} is not readable by nextcloud:nextcloud! Aborting..."
+              exit 1
+            fi
+            if [ -z "$(<${c.adminpassFile})" ]; then
+              echo "adminpassFile ${c.adminpassFile} is empty!"
+              exit 1
+            fi
+
+            ln -sf ${cfg.package}/apps ${cfg.home}/
+
+            # Install extra apps
+            ln -sfT \
+              ${pkgs.linkFarm "nix-apps"
+                (mapAttrsToList (name: path: { inherit name path; }) cfg.extraApps)} \
+              ${cfg.home}/nix-apps
+
+            # create nextcloud directories.
+            # if the directories exist already with wrong permissions, we fix that
+            for dir in ${datadir}/config ${datadir}/data ${cfg.home}/store-apps ${cfg.home}/nix-apps; do
+              if [ ! -e $dir ]; then
+                install -o nextcloud -g nextcloud -d $dir
+              elif [ $(stat -c "%G" $dir) != "nextcloud" ]; then
+                chgrp -R nextcloud $dir
+              fi
+            done
+
+            ln -sf ${overrideConfig} ${datadir}/config/override.config.php
+
+            # Do not install if already installed
+            if [[ ! -e ${datadir}/config/config.php ]]; then
+              ${occInstallCmd}
+            fi
+
+            ${occ}/bin/nextcloud-occ upgrade
+
+            ${occ}/bin/nextcloud-occ config:system:delete trusted_domains
+
+            ${optionalString (cfg.extraAppsEnable && cfg.extraApps != { }) ''
+                # Try to enable apps
+                ${occ}/bin/nextcloud-occ app:enable ${concatStringsSep " " (attrNames cfg.extraApps)}
+            ''}
+
+            ${occSetTrustedDomainsCmd}
+          '';
+          serviceConfig.Type = "oneshot";
+          serviceConfig.User = "nextcloud";
+          # On Nextcloud ≥ 26, it is not necessary to patch the database files to prevent
+          # an automatic creation of the database user.
+          environment.NC_setup_create_db_user = lib.mkIf (nextcloudGreaterOrEqualThan "26") "false";
+        };
+        nextcloud-cron = {
+          after = [ "nextcloud-setup.service" ];
+          environment.NEXTCLOUD_CONFIG_DIR = "${datadir}/config";
+          serviceConfig.Type = "oneshot";
+          serviceConfig.User = "nextcloud";
+          serviceConfig.ExecStart = "${phpPackage}/bin/php -f ${cfg.package}/cron.php";
+        };
+        nextcloud-update-plugins = mkIf cfg.autoUpdateApps.enable {
+          after = [ "nextcloud-setup.service" ];
+          serviceConfig.Type = "oneshot";
+          serviceConfig.ExecStart = "${occ}/bin/nextcloud-occ app:update --all";
+          serviceConfig.User = "nextcloud";
+          startAt = cfg.autoUpdateApps.startAt;
+        };
+      };
+
+      services.phpfpm = {
+        pools.nextcloud = {
+          user = "nextcloud";
+          group = "nextcloud";
+          phpPackage = phpPackage;
+          phpEnv = {
+            NEXTCLOUD_CONFIG_DIR = "${datadir}/config";
+            PATH = "/run/wrappers/bin:/nix/var/nix/profiles/default/bin:/run/current-system/sw/bin:/usr/bin:/bin";
+          };
+          settings = mapAttrs (name: mkDefault) {
+            "listen.owner" = config.services.nginx.user;
+            "listen.group" = config.services.nginx.group;
+          } // cfg.poolSettings;
+          extraConfig = cfg.poolConfig;
+        };
+      };
+
+      users.users.nextcloud = {
+        home = "${cfg.home}";
+        group = "nextcloud";
+        isSystemUser = true;
+      };
+      users.groups.nextcloud.members = [ "nextcloud" config.services.nginx.user ];
+
+      environment.systemPackages = [ occ ];
+
+      services.mysql = lib.mkIf mysqlLocal {
+        enable = true;
+        package = lib.mkDefault pkgs.mariadb;
+        ensureDatabases = [ cfg.config.dbname ];
+        ensureUsers = [{
+          name = cfg.config.dbuser;
+          ensurePermissions = { "${cfg.config.dbname}.*" = "ALL PRIVILEGES"; };
+        }];
+      };
+
+      services.postgresql = mkIf pgsqlLocal {
+        enable = true;
+        ensureDatabases = [ cfg.config.dbname ];
+        ensureUsers = [{
+          name = cfg.config.dbuser;
+          ensureDBOwnership = true;
+        }];
+      };
+
+      services.redis.servers.nextcloud = lib.mkIf cfg.configureRedis {
+        enable = true;
+        user = "nextcloud";
+      };
+
+      services.nextcloud = lib.mkIf cfg.configureRedis {
+        caching.redis = true;
+        extraOptions = {
+          "memcache.distributed" = ''\OC\Memcache\Redis'';
+          "memcache.locking" = ''\OC\Memcache\Redis'';
+          redis = {
+            host = config.services.redis.servers.nextcloud.unixSocket;
+            port = 0;
+          };
+        };
+      };
+
+      services.nginx.enable = mkDefault true;
+
+      services.nginx.virtualHosts.${cfg.hostName} = {
+        root = cfg.package;
+        locations = {
+          "= /robots.txt" = {
+            priority = 100;
+            extraConfig = ''
+              allow all;
+              access_log off;
+            '';
+          };
+          "= /" = {
+            priority = 100;
+            extraConfig = ''
+              if ( $http_user_agent ~ ^DavClnt ) {
+                return 302 /remote.php/webdav/$is_args$args;
+              }
+            '';
+          };
+          "~ ^/store-apps" = {
+            priority = 201;
+            extraConfig = "root ${cfg.home};";
+          };
+          "~ ^/nix-apps" = {
+            priority = 201;
+            extraConfig = "root ${cfg.home};";
+          };
+          "^~ /.well-known" = {
+            priority = 210;
+            extraConfig = ''
+              absolute_redirect off;
+              location = /.well-known/carddav {
+                return 301 /remote.php/dav;
+              }
+              location = /.well-known/caldav {
+                return 301 /remote.php/dav;
+              }
+              location ~ ^/\.well-known/(?!acme-challenge|pki-validation) {
+                return 301 /index.php$request_uri;
+              }
+              try_files $uri $uri/ =404;
+            '';
+          };
+          "~ ^/(?:build|tests|config|lib|3rdparty|templates|data)(?:$|/)" = {
+            priority = 450;
+            extraConfig = ''
+              return 404;
+            '';
+          };
+          "~ ^/(?:\\.|autotest|occ|issue|indie|db_|console)" = {
+            priority = 450;
+            extraConfig = ''
+              return 404;
+            '';
+          };
+          "~ \\.php(?:$|/)" = {
+            priority = 500;
+            extraConfig = ''
+              # legacy support (i.e. static files and directories in cfg.package)
+              rewrite ^/(?!index|remote|public|cron|core\/ajax\/update|status|ocs\/v[12]|updater\/.+|oc[s${optionalString (!ocmProviderIsNotAStaticDirAnymore) "m"}]-provider\/.+|.+\/richdocumentscode\/proxy) /index.php$request_uri;
+              include ${config.services.nginx.package}/conf/fastcgi.conf;
+              fastcgi_split_path_info ^(.+?\.php)(\\/.*)$;
+              set $path_info $fastcgi_path_info;
+              try_files $fastcgi_script_name =404;
+              fastcgi_param PATH_INFO $path_info;
+              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              fastcgi_param HTTPS ${if cfg.https then "on" else "off"};
+              fastcgi_param modHeadersAvailable true;
+              fastcgi_param front_controller_active true;
+              fastcgi_pass unix:${fpm.socket};
+              fastcgi_intercept_errors on;
+              fastcgi_request_buffering off;
+              fastcgi_read_timeout ${builtins.toString cfg.fastcgiTimeout}s;
+            '';
+          };
+          "~ \\.(?:css|js|mjs|svg|gif|png|jpg|jpeg|ico|wasm|tflite|map|html|ttf|bcmap|mp4|webm)$".extraConfig = ''
+            try_files $uri /index.php$request_uri;
+            expires 6M;
+            access_log off;
+            location ~ \.wasm$ {
+              default_type application/wasm;
+            }
+          '';
+          "~ ^\\/(?:updater|ocs-provider${optionalString (!ocmProviderIsNotAStaticDirAnymore) "|ocm-provider"})(?:$|\\/)".extraConfig = ''
+            try_files $uri/ =404;
+            index index.php;
+          '';
+          "/remote" = {
+            priority = 1500;
+            extraConfig = ''
+              return 301 /remote.php$request_uri;
+            '';
+          };
+          "/" = {
+            priority = 1600;
+            extraConfig = ''
+              try_files $uri $uri/ /index.php$request_uri;
+            '';
+          };
+        };
+        extraConfig = ''
+          index index.php index.html /index.php$request_uri;
+          ${optionalString (cfg.nginx.recommendedHttpHeaders) ''
+            add_header X-Content-Type-Options nosniff;
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Robots-Tag "noindex, nofollow";
+            add_header X-Download-Options noopen;
+            add_header X-Permitted-Cross-Domain-Policies none;
+            add_header X-Frame-Options sameorigin;
+            add_header Referrer-Policy no-referrer;
+          ''}
+          ${optionalString (cfg.https) ''
+            add_header Strict-Transport-Security "max-age=${toString cfg.nginx.hstsMaxAge}; includeSubDomains" always;
+          ''}
+          client_max_body_size ${cfg.maxUploadSize};
+          fastcgi_buffers 64 4K;
+          fastcgi_hide_header X-Powered-By;
+          gzip on;
+          gzip_vary on;
+          gzip_comp_level 4;
+          gzip_min_length 256;
+          gzip_proxied expired no-cache no-store private no_last_modified no_etag auth;
+          gzip_types application/atom+xml application/javascript application/json application/ld+json application/manifest+json application/rss+xml application/vnd.geo+json application/vnd.ms-fontobject application/x-font-ttf application/x-web-app-manifest+json application/xhtml+xml application/xml font/opentype image/bmp image/svg+xml image/x-icon text/cache-manifest text/css text/plain text/vcard text/vnd.rim.location.xloc text/vtt text/x-component text/x-cross-domain-policy;
+
+          ${optionalString cfg.webfinger ''
+            rewrite ^/.well-known/host-meta /public.php?service=host-meta last;
+            rewrite ^/.well-known/host-meta.json /public.php?service=host-meta-json last;
+          ''}
+        '';
+      };
+    }
+  ]);
+
+  meta.doc = ./nextcloud.md;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nexus.nix b/nixpkgs/nixos/modules/services/web-apps/nexus.nix
new file mode 100644
index 000000000000..c67562d38992
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/nexus.nix
@@ -0,0 +1,152 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.nexus;
+
+in
+{
+  options = {
+    services.nexus = {
+      enable = mkEnableOption (lib.mdDoc "Sonatype Nexus3 OSS service");
+
+      package = lib.mkPackageOption pkgs "nexus" { };
+
+      jdkPackage = lib.mkPackageOption pkgs "openjdk8" { };
+
+      user = mkOption {
+        type = types.str;
+        default = "nexus";
+        description = lib.mdDoc "User which runs Nexus3.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nexus";
+        description = lib.mdDoc "Group which runs Nexus3.";
+      };
+
+      home = mkOption {
+        type = types.str;
+        default = "/var/lib/sonatype-work";
+        description = lib.mdDoc "Home directory of the Nexus3 instance.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = lib.mdDoc "Address to listen on.";
+      };
+
+      listenPort = mkOption {
+        type = types.int;
+        default = 8081;
+        description = lib.mdDoc "Port to listen on.";
+      };
+
+      jvmOpts = mkOption {
+        type = types.lines;
+        default = ''
+          -Xms1200M
+          -Xmx1200M
+          -XX:MaxDirectMemorySize=2G
+          -XX:+UnlockDiagnosticVMOptions
+          -XX:+UnsyncloadClass
+          -XX:+LogVMOutput
+          -XX:LogFile=${cfg.home}/nexus3/log/jvm.log
+          -XX:-OmitStackTraceInFastThrow
+          -Djava.net.preferIPv4Stack=true
+          -Dkaraf.home=${cfg.package}
+          -Dkaraf.base=${cfg.package}
+          -Dkaraf.etc=${cfg.package}/etc/karaf
+          -Djava.util.logging.config.file=${cfg.package}/etc/karaf/java.util.logging.properties
+          -Dkaraf.data=${cfg.home}/nexus3
+          -Djava.io.tmpdir=${cfg.home}/nexus3/tmp
+          -Dkaraf.startLocalConsole=false
+          -Djava.endorsed.dirs=${cfg.package}/lib/endorsed
+        '';
+        defaultText = literalExpression ''
+          '''
+            -Xms1200M
+            -Xmx1200M
+            -XX:MaxDirectMemorySize=2G
+            -XX:+UnlockDiagnosticVMOptions
+            -XX:+UnsyncloadClass
+            -XX:+LogVMOutput
+            -XX:LogFile=''${home}/nexus3/log/jvm.log
+            -XX:-OmitStackTraceInFastThrow
+            -Djava.net.preferIPv4Stack=true
+            -Dkaraf.home=''${package}
+            -Dkaraf.base=''${package}
+            -Dkaraf.etc=''${package}/etc/karaf
+            -Djava.util.logging.config.file=''${package}/etc/karaf/java.util.logging.properties
+            -Dkaraf.data=''${home}/nexus3
+            -Djava.io.tmpdir=''${home}/nexus3/tmp
+            -Dkaraf.startLocalConsole=false
+            -Djava.endorsed.dirs=''${package}/lib/endorsed
+          '''
+        '';
+
+        description = lib.mdDoc ''
+          Options for the JVM written to `nexus.jvmopts`.
+          Please refer to the docs (https://help.sonatype.com/repomanager3/installation/configuring-the-runtime-environment)
+          for further information.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.${cfg.user} = {
+      isSystemUser = true;
+      inherit (cfg) group home;
+      createHome = true;
+    };
+
+    users.groups.${cfg.group} = { };
+
+    systemd.services.nexus = {
+      description = "Sonatype Nexus3";
+
+      wantedBy = [ "multi-user.target" ];
+
+      path = [ cfg.home ];
+
+      environment = {
+        NEXUS_USER = cfg.user;
+        NEXUS_HOME = cfg.home;
+
+        INSTALL4J_JAVA_HOME = cfg.jdkPackage;
+        VM_OPTS_FILE = pkgs.writeText "nexus.vmoptions" cfg.jvmOpts;
+      };
+
+      preStart = ''
+        mkdir -p ${cfg.home}/nexus3/etc
+
+        if [ ! -f ${cfg.home}/nexus3/etc/nexus.properties ]; then
+          echo "# Jetty section" > ${cfg.home}/nexus3/etc/nexus.properties
+          echo "application-port=${toString cfg.listenPort}" >> ${cfg.home}/nexus3/etc/nexus.properties
+          echo "application-host=${toString cfg.listenAddress}" >> ${cfg.home}/nexus3/etc/nexus.properties
+        else
+          sed 's/^application-port=.*/application-port=${toString cfg.listenPort}/' -i ${cfg.home}/nexus3/etc/nexus.properties
+          sed 's/^# application-port=.*/application-port=${toString cfg.listenPort}/' -i ${cfg.home}/nexus3/etc/nexus.properties
+          sed 's/^application-host=.*/application-host=${toString cfg.listenAddress}/' -i ${cfg.home}/nexus3/etc/nexus.properties
+          sed 's/^# application-host=.*/application-host=${toString cfg.listenAddress}/' -i ${cfg.home}/nexus3/etc/nexus.properties
+        fi
+      '';
+
+      script = "${cfg.package}/bin/nexus run";
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        PrivateTmp = true;
+        LimitNOFILE = 102642;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ ironpinguin ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/nifi.nix b/nixpkgs/nixos/modules/services/web-apps/nifi.nix
new file mode 100644
index 000000000000..5ce561077836
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/nifi.nix
@@ -0,0 +1,316 @@
+{ lib, pkgs, config, options, ... }:
+
+let
+  cfg = config.services.nifi;
+  opt = options.services.nifi;
+
+  env = {
+    NIFI_OVERRIDE_NIFIENV = "true";
+    NIFI_HOME = "/var/lib/nifi";
+    NIFI_PID_DIR = "/run/nifi";
+    NIFI_LOG_DIR = "/var/log/nifi";
+  };
+
+  envFile = pkgs.writeText "nifi.env" (lib.concatMapStrings (s: s + "\n") (
+    (lib.concatLists (lib.mapAttrsToList (name: value:
+      lib.optional (value != null) ''${name}="${toString value}"''
+    ) env))));
+
+  nifiEnv = pkgs.writeShellScriptBin "nifi-env" ''
+    set -a
+    source "${envFile}"
+    eval -- "\$@"
+  '';
+
+in {
+  options = {
+    services.nifi = {
+      enable = lib.mkEnableOption (lib.mdDoc "Apache NiFi");
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.nifi;
+        defaultText = lib.literalExpression "pkgs.nifi";
+        description = lib.mdDoc "Apache NiFi package to use.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        default = "nifi";
+        description = lib.mdDoc "User account where Apache NiFi runs.";
+      };
+
+      group = lib.mkOption {
+        type = lib.types.str;
+        default = "nifi";
+        description = lib.mdDoc "Group account where Apache NiFi runs.";
+      };
+
+      enableHTTPS = lib.mkOption {
+        type = lib.types.bool;
+        default = true;
+        description = lib.mdDoc "Enable HTTPS protocol. Don`t use in production.";
+      };
+
+      listenHost = lib.mkOption {
+        type = lib.types.str;
+        default = if cfg.enableHTTPS then "0.0.0.0" else "127.0.0.1";
+        defaultText = lib.literalExpression ''
+          if config.${opt.enableHTTPS}
+          then "0.0.0.0"
+          else "127.0.0.1"
+        '';
+        description = lib.mdDoc "Bind to an ip for Apache NiFi web-ui.";
+      };
+
+      listenPort = lib.mkOption {
+        type = lib.types.int;
+        default = if cfg.enableHTTPS then 8443 else 8080;
+        defaultText = lib.literalExpression ''
+          if config.${opt.enableHTTPS}
+          then "8443"
+          else "8000"
+        '';
+        description = lib.mdDoc "Bind to a port for Apache NiFi web-ui.";
+      };
+
+      proxyHost = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = if cfg.enableHTTPS then "0.0.0.0" else null;
+        defaultText = lib.literalExpression ''
+          if config.${opt.enableHTTPS}
+          then "0.0.0.0"
+          else null
+        '';
+        description = lib.mdDoc "Allow requests from a specific host.";
+      };
+
+      proxyPort = lib.mkOption {
+        type = lib.types.nullOr lib.types.int;
+        default = if cfg.enableHTTPS then 8443 else null;
+        defaultText = lib.literalExpression ''
+          if config.${opt.enableHTTPS}
+          then "8443"
+          else null
+        '';
+        description = lib.mdDoc "Allow requests from a specific port.";
+      };
+
+      initUser = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = null;
+        description = lib.mdDoc "Initial user account for Apache NiFi. Username must be at least 4 characters.";
+      };
+
+      initPasswordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/nifi/password-nifi";
+        description = lib.mdDoc "nitial password for Apache NiFi. Password must be at least 12 characters.";
+      };
+
+      initJavaHeapSize = lib.mkOption {
+        type = lib.types.nullOr lib.types.int;
+        default = null;
+        example = 1024;
+        description = lib.mdDoc "Set the initial heap size for the JVM in MB.";
+      };
+
+      maxJavaHeapSize = lib.mkOption {
+        type = lib.types.nullOr lib.types.int;
+        default = null;
+        example = 2048;
+        description = lib.mdDoc "Set the initial heap size for the JVM in MB.";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.initUser!=null || cfg.initPasswordFile==null;
+          message = ''
+            <option>services.nifi.initUser</option> needs to be set if <option>services.nifi.initPasswordFile</option> enabled.
+          '';
+      }
+      { assertion = cfg.initUser==null || cfg.initPasswordFile!=null;
+          message = ''
+            <option>services.nifi.initPasswordFile</option> needs to be set if <option>services.nifi.initUser</option> enabled.
+          '';
+      }
+      { assertion = cfg.proxyHost==null || cfg.proxyPort!=null;
+          message = ''
+            <option>services.nifi.proxyPort</option> needs to be set if <option>services.nifi.proxyHost</option> value specified.
+          '';
+      }
+      { assertion = cfg.proxyHost!=null || cfg.proxyPort==null;
+          message = ''
+            <option>services.nifi.proxyHost</option> needs to be set if <option>services.nifi.proxyPort</option> value specified.
+          '';
+      }
+      { assertion = cfg.initJavaHeapSize==null || cfg.maxJavaHeapSize!=null;
+          message = ''
+            <option>services.nifi.maxJavaHeapSize</option> needs to be set if <option>services.nifi.initJavaHeapSize</option> value specified.
+          '';
+      }
+      { assertion = cfg.initJavaHeapSize!=null || cfg.maxJavaHeapSize==null;
+          message = ''
+            <option>services.nifi.initJavaHeapSize</option> needs to be set if <option>services.nifi.maxJavaHeapSize</option> value specified.
+          '';
+      }
+    ];
+
+    warnings = lib.optional (cfg.enableHTTPS==false) ''
+      Please do not disable HTTPS mode in production. In this mode, access to the nifi is opened without authentication.
+    '';
+
+    systemd.tmpfiles.rules = [
+      "d '/var/lib/nifi/conf' 0750 ${cfg.user} ${cfg.group}"
+      "L+ '/var/lib/nifi/lib' - - - - ${cfg.package}/lib"
+    ];
+
+
+    systemd.services.nifi = {
+      description = "Apache NiFi";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = env;
+      path = [ pkgs.gawk ];
+
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/nifi/nifi.pid";
+        ExecStartPre = pkgs.writeScript "nifi-pre-start.sh" ''
+          #!/bin/sh
+          umask 077
+          test -f '/var/lib/nifi/conf/authorizers.xml'                      || (cp '${cfg.package}/share/nifi/conf/authorizers.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/authorizers.xml')
+          test -f '/var/lib/nifi/conf/bootstrap.conf'                       || (cp '${cfg.package}/share/nifi/conf/bootstrap.conf' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/bootstrap.conf')
+          test -f '/var/lib/nifi/conf/bootstrap-hashicorp-vault.conf'       || (cp '${cfg.package}/share/nifi/conf/bootstrap-hashicorp-vault.conf' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/bootstrap-hashicorp-vault.conf')
+          test -f '/var/lib/nifi/conf/bootstrap-notification-services.xml'  || (cp '${cfg.package}/share/nifi/conf/bootstrap-notification-services.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/bootstrap-notification-services.xml')
+          test -f '/var/lib/nifi/conf/logback.xml'                          || (cp '${cfg.package}/share/nifi/conf/logback.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/logback.xml')
+          test -f '/var/lib/nifi/conf/login-identity-providers.xml'         || (cp '${cfg.package}/share/nifi/conf/login-identity-providers.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/login-identity-providers.xml')
+          test -f '/var/lib/nifi/conf/nifi.properties'                      || (cp '${cfg.package}/share/nifi/conf/nifi.properties' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/nifi.properties')
+          test -f '/var/lib/nifi/conf/stateless-logback.xml'                || (cp '${cfg.package}/share/nifi/conf/stateless-logback.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/stateless-logback.xml')
+          test -f '/var/lib/nifi/conf/stateless.properties'                 || (cp '${cfg.package}/share/nifi/conf/stateless.properties' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/stateless.properties')
+          test -f '/var/lib/nifi/conf/state-management.xml'                 || (cp '${cfg.package}/share/nifi/conf/state-management.xml' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/state-management.xml')
+          test -f '/var/lib/nifi/conf/zookeeper.properties'                 || (cp '${cfg.package}/share/nifi/conf/zookeeper.properties' '/var/lib/nifi/conf/' && chmod 0640 '/var/lib/nifi/conf/zookeeper.properties')
+          test -d '/var/lib/nifi/docs/html'                                 || (mkdir -p /var/lib/nifi/docs && cp -r '${cfg.package}/share/nifi/docs/html' '/var/lib/nifi/docs/html')
+          ${lib.optionalString ((cfg.initUser != null) && (cfg.initPasswordFile != null)) ''
+            awk -F'[<|>]' '/property name="Username"/ {if ($3!="") f=1} END{exit !f}' /var/lib/nifi/conf/login-identity-providers.xml || ${cfg.package}/bin/nifi.sh set-single-user-credentials ${cfg.initUser} $(cat ${cfg.initPasswordFile})
+          ''}
+          ${lib.optionalString (cfg.enableHTTPS == false) ''
+            sed -i /var/lib/nifi/conf/nifi.properties \
+              -e 's|nifi.remote.input.secure=.*|nifi.remote.input.secure=false|g' \
+              -e 's|nifi.web.http.host=.*|nifi.web.http.host=${cfg.listenHost}|g' \
+              -e 's|nifi.web.http.port=.*|nifi.web.http.port=${(toString cfg.listenPort)}|g' \
+              -e 's|nifi.web.https.host=.*|nifi.web.https.host=|g' \
+              -e 's|nifi.web.https.port=.*|nifi.web.https.port=|g' \
+              -e 's|nifi.security.keystore=.*|nifi.security.keystore=|g' \
+              -e 's|nifi.security.keystoreType=.*|nifi.security.keystoreType=|g' \
+              -e 's|nifi.security.truststore=.*|nifi.security.truststore=|g' \
+              -e 's|nifi.security.truststoreType=.*|nifi.security.truststoreType=|g' \
+              -e '/nifi.security.keystorePasswd/s|^|#|' \
+              -e '/nifi.security.keyPasswd/s|^|#|' \
+              -e '/nifi.security.truststorePasswd/s|^|#|'
+          ''}
+          ${lib.optionalString (cfg.enableHTTPS == true) ''
+            sed -i /var/lib/nifi/conf/nifi.properties \
+              -e 's|nifi.remote.input.secure=.*|nifi.remote.input.secure=true|g' \
+              -e 's|nifi.web.http.host=.*|nifi.web.http.host=|g' \
+              -e 's|nifi.web.http.port=.*|nifi.web.http.port=|g' \
+              -e 's|nifi.web.https.host=.*|nifi.web.https.host=${cfg.listenHost}|g' \
+              -e 's|nifi.web.https.port=.*|nifi.web.https.port=${(toString cfg.listenPort)}|g' \
+              -e 's|nifi.security.keystore=.*|nifi.security.keystore=./conf/keystore.p12|g' \
+              -e 's|nifi.security.keystoreType=.*|nifi.security.keystoreType=PKCS12|g' \
+              -e 's|nifi.security.truststore=.*|nifi.security.truststore=./conf/truststore.p12|g' \
+              -e 's|nifi.security.truststoreType=.*|nifi.security.truststoreType=PKCS12|g' \
+              -e '/nifi.security.keystorePasswd/s|^#\+||' \
+              -e '/nifi.security.keyPasswd/s|^#\+||' \
+              -e '/nifi.security.truststorePasswd/s|^#\+||'
+          ''}
+          ${lib.optionalString ((cfg.enableHTTPS == true) && (cfg.proxyHost != null) && (cfg.proxyPort != null)) ''
+            sed -i /var/lib/nifi/conf/nifi.properties \
+              -e 's|nifi.web.proxy.host=.*|nifi.web.proxy.host=${cfg.proxyHost}:${(toString cfg.proxyPort)}|g'
+          ''}
+          ${lib.optionalString ((cfg.enableHTTPS == false) || (cfg.proxyHost == null) && (cfg.proxyPort == null)) ''
+            sed -i /var/lib/nifi/conf/nifi.properties \
+              -e 's|nifi.web.proxy.host=.*|nifi.web.proxy.host=|g'
+          ''}
+          ${lib.optionalString ((cfg.initJavaHeapSize != null) && (cfg.maxJavaHeapSize != null))''
+            sed -i /var/lib/nifi/conf/bootstrap.conf \
+              -e 's|java.arg.2=.*|java.arg.2=-Xms${(toString cfg.initJavaHeapSize)}m|g' \
+              -e 's|java.arg.3=.*|java.arg.3=-Xmx${(toString cfg.maxJavaHeapSize)}m|g'
+          ''}
+          ${lib.optionalString ((cfg.initJavaHeapSize == null) && (cfg.maxJavaHeapSize == null))''
+            sed -i /var/lib/nifi/conf/bootstrap.conf \
+              -e 's|java.arg.2=.*|java.arg.2=-Xms512m|g' \
+              -e 's|java.arg.3=.*|java.arg.3=-Xmx512m|g'
+          ''}
+        '';
+        ExecStart = "${cfg.package}/bin/nifi.sh start";
+        ExecStop = "${cfg.package}/bin/nifi.sh stop";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+        # Runtime directory and mode
+        RuntimeDirectory = "nifi";
+        RuntimeDirectoryMode = "0750";
+        # State directory and mode
+        StateDirectory = "nifi";
+        StateDirectoryMode = "0750";
+        # Logs directory and mode
+        LogsDirectory = "nifi";
+        LogsDirectoryMode = "0750";
+        # Proc filesystem
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        # Access write directories
+        ReadWritePaths = [ cfg.initPasswordFile ];
+        UMask = "0027";
+        # Capabilities
+        CapabilityBoundingSet = "";
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateIPC = true;
+        PrivateUsers = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_INET AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute  = false;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "~@cpu-emulation @debug @keyring @memlock @mount @obsolete @resources @privileged @setuid" "@chown" ];
+      };
+    };
+
+    users.users = lib.mkMerge [
+      (lib.mkIf (cfg.user == "nifi") {
+        nifi = {
+          group = cfg.group;
+          isSystemUser = true;
+          home = cfg.package;
+        };
+      })
+      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package nifiEnv ])
+    ];
+
+    users.groups = lib.optionalAttrs (cfg.group == "nifi") {
+      nifi = { };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/node-red.nix b/nixpkgs/nixos/modules/services/web-apps/node-red.nix
new file mode 100644
index 000000000000..f4d4ad9681a6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/node-red.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.node-red;
+  defaultUser = "node-red";
+  finalPackage = if cfg.withNpmAndGcc then node-red_withNpmAndGcc else cfg.package;
+  node-red_withNpmAndGcc = pkgs.runCommand "node-red" {
+    nativeBuildInputs = [ pkgs.makeWrapper ];
+  }
+  ''
+    mkdir -p $out/bin
+    makeWrapper ${pkgs.nodePackages.node-red}/bin/node-red $out/bin/node-red \
+      --set PATH '${lib.makeBinPath [ pkgs.nodePackages.npm pkgs.gcc ]}:$PATH' \
+  '';
+in
+{
+  options.services.node-red = {
+    enable = mkEnableOption (lib.mdDoc "the Node-RED service");
+
+    package = mkOption {
+      default = pkgs.nodePackages.node-red;
+      defaultText = literalExpression "pkgs.nodePackages.node-red";
+      type = types.package;
+      description = lib.mdDoc "Node-RED package to use.";
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Open ports in the firewall for the server.
+      '';
+    };
+
+    withNpmAndGcc = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Give Node-RED access to NPM and GCC at runtime, so 'Nodes' can be
+        downloaded and managed imperatively via the 'Palette Manager'.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      default = "${cfg.package}/lib/node_modules/node-red/settings.js";
+      defaultText = literalExpression ''"''${package}/lib/node_modules/node-red/settings.js"'';
+      description = lib.mdDoc ''
+        Path to the JavaScript configuration file.
+        See <https://github.com/node-red/node-red/blob/master/packages/node_modules/node-red/settings.js>
+        for a configuration example.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 1880;
+      description = lib.mdDoc "Listening port.";
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = lib.mdDoc ''
+        User under which Node-RED runs.If left as the default value this user
+        will automatically be created on system activation, otherwise the
+        sysadmin is responsible for ensuring the user exists.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = lib.mdDoc ''
+        Group under which Node-RED runs.If left as the default value this group
+        will automatically be created on system activation, otherwise the
+        sysadmin is responsible for ensuring the group exists.
+      '';
+    };
+
+    userDir = mkOption {
+      type = types.path;
+      default = "/var/lib/node-red";
+      description = lib.mdDoc ''
+        The directory to store all user data, such as flow and credential files and all library data. If left
+        as the default value this directory will automatically be created before the node-red service starts,
+        otherwise the sysadmin is responsible for ensuring the directory exists with appropriate ownership
+        and permissions.
+      '';
+    };
+
+    safe = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Whether to launch Node-RED in --safe mode.";
+    };
+
+    define = mkOption {
+      type = types.attrs;
+      default = {};
+      description = lib.mdDoc "List of settings.js overrides to pass via -D to Node-RED.";
+      example = literalExpression ''
+        {
+          "logging.console.level" = "trace";
+        }
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users = optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        isSystemUser = true;
+        group = defaultUser;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
+    };
+
+    networking.firewall = mkIf cfg.openFirewall {
+      allowedTCPPorts = [ cfg.port ];
+    };
+
+    systemd.services.node-red = {
+      description = "Node-RED Service";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+      environment = {
+        HOME = cfg.userDir;
+      };
+      serviceConfig = mkMerge [
+        {
+          User = cfg.user;
+          Group = cfg.group;
+          ExecStart = "${finalPackage}/bin/node-red ${pkgs.lib.optionalString cfg.safe "--safe"} --settings ${cfg.configFile} --port ${toString cfg.port} --userDir ${cfg.userDir} ${concatStringsSep " " (mapAttrsToList (name: value: "-D ${name}=${value}") cfg.define)}";
+          PrivateTmp = true;
+          Restart = "always";
+          WorkingDirectory = cfg.userDir;
+        }
+        (mkIf (cfg.userDir == "/var/lib/node-red") { StateDirectory = "node-red"; })
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/onlyoffice.nix b/nixpkgs/nixos/modules/services/web-apps/onlyoffice.nix
new file mode 100644
index 000000000000..f958566b91f0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/onlyoffice.nix
@@ -0,0 +1,296 @@
+{ lib, config, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.onlyoffice;
+in
+{
+  options.services.onlyoffice = {
+    enable = mkEnableOption (lib.mdDoc "OnlyOffice DocumentServer");
+
+    enableExampleServer = mkEnableOption (lib.mdDoc "OnlyOffice example server");
+
+    hostname = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "FQDN for the onlyoffice instance.";
+    };
+
+    jwtSecretFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a file that contains the secret to sign web requests using JSON Web Tokens.
+        If left at the default value null signing is disabled.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.onlyoffice-documentserver;
+      defaultText = lib.literalExpression "pkgs.onlyoffice-documentserver";
+      description = lib.mdDoc "Which package to use for the OnlyOffice instance.";
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8000;
+      description = lib.mdDoc "Port the OnlyOffice DocumentServer should listens on.";
+    };
+
+    examplePort = mkOption {
+      type = types.port;
+      default = null;
+      description = lib.mdDoc "Port the OnlyOffice Example server should listens on.";
+    };
+
+    postgresHost = mkOption {
+      type = types.str;
+      default = "/run/postgresql";
+      description = lib.mdDoc "The Postgresql hostname or socket path OnlyOffice should connect to.";
+    };
+
+    postgresName = mkOption {
+      type = types.str;
+      default = "onlyoffice";
+      description = lib.mdDoc "The name of database OnlyOffice should user.";
+    };
+
+    postgresPasswordFile = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Path to a file that contains the password OnlyOffice should use to connect to Postgresql.
+        Unused when using socket authentication.
+      '';
+    };
+
+    postgresUser = mkOption {
+      type = types.str;
+      default = "onlyoffice";
+      description = lib.mdDoc ''
+        The username OnlyOffice should use to connect to Postgresql.
+        Unused when using socket authentication.
+      '';
+    };
+
+    rabbitmqUrl = mkOption {
+      type = types.str;
+      default = "amqp://guest:guest@localhost:5672";
+      description = lib.mdDoc "The Rabbitmq in amqp URI style OnlyOffice should connect to.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services = {
+      nginx = {
+        enable = mkDefault true;
+        # misses text/csv, font/ttf, application/x-font-ttf, application/rtf, application/wasm
+        recommendedGzipSettings = mkDefault true;
+        recommendedProxySettings = mkDefault true;
+
+        upstreams = {
+          # /etc/nginx/includes/http-common.conf
+          onlyoffice-docservice = {
+            servers = { "localhost:${toString cfg.port}" = { }; };
+          };
+          onlyoffice-example = lib.mkIf cfg.enableExampleServer {
+            servers = { "localhost:${toString cfg.examplePort}" = { }; };
+          };
+        };
+
+        virtualHosts.${cfg.hostname} = {
+          locations = {
+            # /etc/nginx/includes/ds-docservice.conf
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(web-apps\/apps\/api\/documents\/api\.js)$".extraConfig = ''
+              expires -1;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver/$2;
+            '';
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(web-apps)(\/.*\.json)$".extraConfig = ''
+              expires 365d;
+              error_log /dev/null crit;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver/$2$3;
+            '';
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(sdkjs-plugins)(\/.*\.json)$".extraConfig = ''
+              expires 365d;
+              error_log /dev/null crit;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver/$2$3;
+            '';
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(web-apps|sdkjs|sdkjs-plugins|fonts)(\/.*)$".extraConfig = ''
+              expires 365d;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver/$2$3;
+            '';
+            "~* ^(\/cache\/files.*)(\/.*)".extraConfig = ''
+              alias /var/lib/onlyoffice/documentserver/App_Data$1;
+              add_header Content-Disposition "attachment; filename*=UTF-8''$arg_filename";
+
+              set $secret_string verysecretstring;
+              secure_link $arg_md5,$arg_expires;
+              secure_link_md5 "$secure_link_expires$uri$secret_string";
+
+              if ($secure_link = "") {
+                return 403;
+              }
+
+              if ($secure_link = "0") {
+                return 410;
+              }
+            '';
+            "~* ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(internal)(\/.*)$".extraConfig = ''
+              allow 127.0.0.1;
+              deny all;
+              proxy_pass http://onlyoffice-docservice/$2$3;
+            '';
+            "~* ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(info)(\/.*)$".extraConfig = ''
+              allow 127.0.0.1;
+              deny all;
+              proxy_pass http://onlyoffice-docservice/$2$3;
+            '';
+            "/".extraConfig = ''
+              proxy_pass http://onlyoffice-docservice;
+            '';
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?(\/doc\/.*)".extraConfig = ''
+              proxy_pass http://onlyoffice-docservice$2;
+              proxy_http_version 1.1;
+            '';
+            "/${cfg.package.version}/".extraConfig = ''
+              proxy_pass http://onlyoffice-docservice/;
+            '';
+            "~ ^(\/[\d]+\.[\d]+\.[\d]+[\.|-][\d]+)?\/(dictionaries)(\/.*)$".extraConfig = ''
+              expires 365d;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver/$2$3;
+            '';
+            # /etc/nginx/includes/ds-example.conf
+            "~ ^(\/welcome\/.*)$".extraConfig = ''
+              expires 365d;
+              alias ${cfg.package}/var/www/onlyoffice/documentserver-example$1;
+              index docker.html;
+            '';
+            "/example/".extraConfig = lib.mkIf cfg.enableExampleServer ''
+              proxy_pass http://onlyoffice-example/;
+              proxy_set_header X-Forwarded-Path /example;
+            '';
+          };
+          extraConfig = ''
+            rewrite ^/$ /welcome/ redirect;
+            rewrite ^\/OfficeWeb(\/apps\/.*)$ /${cfg.package.version}/web-apps$1 redirect;
+            rewrite ^(\/web-apps\/apps\/(?!api\/).*)$ /${cfg.package.version}$1 redirect;
+
+            # based on https://github.com/ONLYOFFICE/document-server-package/blob/master/common/documentserver/nginx/includes/http-common.conf.m4#L29-L34
+            # without variable indirection and correct variable names
+            proxy_set_header Host $host;
+            proxy_set_header X-Forwarded-Host $host;
+            proxy_set_header X-Forwarded-Proto $scheme;
+            # required for CSP to take effect
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            # required for websocket
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection $connection_upgrade;
+          '';
+        };
+      };
+
+      rabbitmq.enable = lib.mkDefault true;
+
+      postgresql = {
+        enable = lib.mkDefault true;
+        ensureDatabases = [ "onlyoffice" ];
+        ensureUsers = [{
+          name = "onlyoffice";
+          ensureDBOwnership = true;
+        }];
+      };
+    };
+
+    systemd.services = {
+      onlyoffice-converter = {
+        description = "onlyoffice converter";
+        after = [ "network.target" "onlyoffice-docservice.service" "postgresql.service" ];
+        requires = [ "network.target" "onlyoffice-docservice.service" "postgresql.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${cfg.package.fhs}/bin/onlyoffice-wrapper FileConverter/converter /run/onlyoffice/config";
+          Group = "onlyoffice";
+          Restart = "always";
+          RuntimeDirectory = "onlyoffice";
+          StateDirectory = "onlyoffice";
+          Type = "simple";
+          User = "onlyoffice";
+        };
+      };
+
+      onlyoffice-docservice =
+        let
+          onlyoffice-prestart = pkgs.writeShellScript "onlyoffice-prestart" ''
+            PATH=$PATH:${lib.makeBinPath (with pkgs; [ jq moreutils config.services.postgresql.package ])}
+            umask 077
+            mkdir -p /run/onlyoffice/config/ /var/lib/onlyoffice/documentserver/sdkjs/{slide/themes,common}/ /var/lib/onlyoffice/documentserver/{fonts,server/FileConverter/bin}/
+            cp -r ${cfg.package}/etc/onlyoffice/documentserver/* /run/onlyoffice/config/
+            chmod u+w /run/onlyoffice/config/default.json
+
+            # Allow members of the onlyoffice group to serve files under /var/lib/onlyoffice/documentserver/App_Data
+            chmod g+x /var/lib/onlyoffice/documentserver
+
+            cp /run/onlyoffice/config/default.json{,.orig}
+
+            # for a mapping of environment variables from the docker container to json options see
+            # https://github.com/ONLYOFFICE/Docker-DocumentServer/blob/master/run-document-server.sh
+            jq '
+              .services.CoAuthoring.server.port = ${toString cfg.port} |
+              .services.CoAuthoring.sql.dbHost = "${cfg.postgresHost}" |
+              .services.CoAuthoring.sql.dbName = "${cfg.postgresName}" |
+            ${lib.optionalString (cfg.postgresPasswordFile != null) ''
+              .services.CoAuthoring.sql.dbPass = "'"$(cat ${cfg.postgresPasswordFile})"'" |
+            ''}
+              .services.CoAuthoring.sql.dbUser = "${cfg.postgresUser}" |
+            ${lib.optionalString (cfg.jwtSecretFile != null) ''
+              .services.CoAuthoring.token.enable.browser = true |
+              .services.CoAuthoring.token.enable.request.inbox = true |
+              .services.CoAuthoring.token.enable.request.outbox = true |
+              .services.CoAuthoring.secret.inbox.string = "'"$(cat ${cfg.jwtSecretFile})"'" |
+              .services.CoAuthoring.secret.outbox.string = "'"$(cat ${cfg.jwtSecretFile})"'" |
+              .services.CoAuthoring.secret.session.string = "'"$(cat ${cfg.jwtSecretFile})"'" |
+            ''}
+              .rabbitmq.url = "${cfg.rabbitmqUrl}"
+              ' /run/onlyoffice/config/default.json | sponge /run/onlyoffice/config/default.json
+
+            if psql -d onlyoffice -c "SELECT 'task_result'::regclass;" >/dev/null; then
+              psql -f ${cfg.package}/var/www/onlyoffice/documentserver/server/schema/postgresql/removetbl.sql
+              psql -f ${cfg.package}/var/www/onlyoffice/documentserver/server/schema/postgresql/createdb.sql
+            else
+              psql -f ${cfg.package}/var/www/onlyoffice/documentserver/server/schema/postgresql/createdb.sql
+            fi
+          '';
+        in
+        {
+          description = "onlyoffice documentserver";
+          after = [ "network.target" "postgresql.service" ];
+          requires = [ "postgresql.service" ];
+          wantedBy = [ "multi-user.target" ];
+          serviceConfig = {
+            ExecStart = "${cfg.package.fhs}/bin/onlyoffice-wrapper DocService/docservice /run/onlyoffice/config";
+            ExecStartPre = [ onlyoffice-prestart ];
+            Group = "onlyoffice";
+            Restart = "always";
+            RuntimeDirectory = "onlyoffice";
+            StateDirectory = "onlyoffice";
+            Type = "simple";
+            User = "onlyoffice";
+          };
+        };
+    };
+
+    users.users = {
+      onlyoffice = {
+        description = "OnlyOffice Service";
+        group = "onlyoffice";
+        isSystemUser = true;
+      };
+
+      nginx.extraGroups = [ "onlyoffice" ];
+    };
+
+    users.groups.onlyoffice = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/openvscode-server.nix b/nixpkgs/nixos/modules/services/web-apps/openvscode-server.nix
new file mode 100644
index 000000000000..3daf238c57e1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/openvscode-server.nix
@@ -0,0 +1,212 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.openvscode-server;
+  defaultUser = "openvscode-server";
+  defaultGroup = defaultUser;
+in
+{
+  options = {
+    services.openvscode-server = {
+      enable = lib.mkEnableOption (lib.mdDoc "openvscode-server");
+
+      package = lib.mkPackageOptionMD pkgs "openvscode-server" { };
+
+      extraPackages = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional packages to add to the openvscode-server {env}`PATH`.
+        '';
+        example = lib.literalExpression "[ pkgs.go ]";
+        type = lib.types.listOf lib.types.package;
+      };
+
+      extraEnvironment = lib.mkOption {
+        type = lib.types.attrsOf lib.types.str;
+        description = lib.mdDoc ''
+          Additional environment variables to pass to openvscode-server.
+        '';
+        default = { };
+        example = { PKG_CONFIG_PATH = "/run/current-system/sw/lib/pkgconfig"; };
+      };
+
+      extraArguments = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          Additional arguments to pass to openvscode-server.
+        '';
+        example = lib.literalExpression ''[ "--log=info" ]'';
+        type = lib.types.listOf lib.types.str;
+      };
+
+      host = lib.mkOption {
+        default = "localhost";
+        description = lib.mdDoc ''
+          The host name or IP address the server should listen to.
+        '';
+        type = lib.types.str;
+      };
+
+      port = lib.mkOption {
+        default = 3000;
+        description = lib.mdDoc ''
+          The port the server should listen to. If 0 is passed a random free port is picked. If a range in the format num-num is passed, a free port from the range (end inclusive) is selected.
+        '';
+        type = lib.types.port;
+      };
+
+      user = lib.mkOption {
+        default = defaultUser;
+        example = "yourUser";
+        description = lib.mdDoc ''
+          The user to run openvscode-server as.
+          By default, a user named `${defaultUser}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      group = lib.mkOption {
+        default = defaultGroup;
+        example = "yourGroup";
+        description = lib.mdDoc ''
+          The group to run openvscode-server under.
+          By default, a group named `${defaultGroup}` will be created.
+        '';
+        type = lib.types.str;
+      };
+
+      extraGroups = lib.mkOption {
+        default = [ ];
+        description = lib.mdDoc ''
+          An array of additional groups for the `${defaultUser}` user.
+        '';
+        example = [ "docker" ];
+        type = lib.types.listOf lib.types.str;
+      };
+
+      withoutConnectionToken = lib.mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Run without a connection token. Only use this if the connection is secured by other means.
+        '';
+        example = true;
+        type = lib.types.bool;
+      };
+
+      socketPath = lib.mkOption {
+        default = null;
+        example = "/run/openvscode/socket";
+        description = lib.mdDoc ''
+          The path to a socket file for the server to listen to.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      userDataDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the directory that user data is kept in. Can be used to open multiple distinct instances of Code.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      serverDataDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Specifies the directory that server data is kept in.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      extensionsDir = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Set the root path for extensions.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      telemetryLevel = lib.mkOption {
+        default = null;
+        example = "crash";
+        description = lib.mdDoc ''
+          Sets the initial telemetry level. Valid levels are: 'off', 'crash', 'error' and 'all'.
+        '';
+        type = lib.types.nullOr (lib.types.enum [ "off" "crash" "error" "all" ]);
+      };
+
+      connectionToken = lib.mkOption {
+        default = null;
+        example = "secret-token";
+        description = lib.mdDoc ''
+          A secret that must be included with all requests.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+      connectionTokenFile = lib.mkOption {
+        default = null;
+        description = lib.mdDoc ''
+          Path to a file that contains the connection token.
+        '';
+        type = lib.types.nullOr lib.types.str;
+      };
+
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.openvscode-server = {
+      description = "OpenVSCode server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" ];
+      path = cfg.extraPackages;
+      environment = cfg.extraEnvironment;
+      serviceConfig = {
+        ExecStart = ''
+          ${lib.getExe cfg.package} \
+            --accept-server-license-terms \
+            --host=${cfg.host} \
+            --port=${toString cfg.port} \
+        '' + lib.optionalString (cfg.telemetryLevel != null) ''
+          --telemetry-level=${cfg.telemetryLevel} \
+        '' + lib.optionalString (cfg.withoutConnectionToken) ''
+          --without-connection-token \
+        '' + lib.optionalString (cfg.socketPath != null) ''
+          --socket-path=${cfg.socketPath} \
+        '' + lib.optionalString (cfg.userDataDir != null) ''
+          --user-data-dir=${cfg.userDataDir} \
+        '' + lib.optionalString (cfg.serverDataDir != null) ''
+          --server-data-dir=${cfg.serverDataDir} \
+        '' + lib.optionalString (cfg.extensionsDir != null) ''
+          --extensions-dir=${cfg.extensionsDir} \
+        '' + lib.optionalString (cfg.connectionToken != null) ''
+          --connection-token=${cfg.connectionToken} \
+        '' + lib.optionalString (cfg.connectionTokenFile != null) ''
+          --connection-token-file=${cfg.connectionTokenFile} \
+        '' + lib.escapeShellArgs cfg.extraArguments;
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        RuntimeDirectory = cfg.user;
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "on-failure";
+      };
+    };
+
+    users.users."${cfg.user}" = lib.mkMerge [
+      (lib.mkIf (cfg.user == defaultUser) {
+        isNormalUser = true;
+        description = "openvscode-server user";
+        inherit (cfg) group;
+      })
+      {
+        packages = cfg.extraPackages;
+        inherit (cfg) extraGroups;
+      }
+    ];
+
+    users.groups."${defaultGroup}" = lib.mkIf (cfg.group == defaultGroup) { };
+  };
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/openwebrx.nix b/nixpkgs/nixos/modules/services/web-apps/openwebrx.nix
new file mode 100644
index 000000000000..72c5d6c7818c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/openwebrx.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.services.openwebrx;
+in
+{
+  options.services.openwebrx = with lib; {
+    enable = mkEnableOption (lib.mdDoc "OpenWebRX Web interface for Software-Defined Radios on http://localhost:8073");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openwebrx;
+      defaultText = literalExpression "pkgs.openwebrx";
+      description = lib.mdDoc "OpenWebRX package to use for the service";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.openwebrx = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [
+        csdr
+        digiham
+        codec2
+        js8call
+        m17-cxx-demod
+        alsaUtils
+        netcat
+      ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/openwebrx";
+        Restart = "always";
+        DynamicUser = true;
+        # openwebrx uses /var/lib/openwebrx by default
+        StateDirectory = [ "openwebrx" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/outline.nix b/nixpkgs/nixos/modules/services/web-apps/outline.nix
new file mode 100644
index 000000000000..d97b45d62418
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/outline.nix
@@ -0,0 +1,758 @@
+{ config, lib, pkgs, ...}:
+
+let
+  defaultUser = "outline";
+  cfg = config.services.outline;
+  inherit (lib) mkRemovedOptionModule;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "outline" "sequelizeArguments" ] "Database migration are run agains configurated database by outline directly")
+  ];
+  # See here for a reference of all the options:
+  #   https://github.com/outline/outline/blob/v0.67.0/.env.sample
+  #   https://github.com/outline/outline/blob/v0.67.0/app.json
+  #   https://github.com/outline/outline/blob/v0.67.0/server/env.ts
+  #   https://github.com/outline/outline/blob/v0.67.0/shared/types.ts
+  # The order is kept the same here to make updating easier.
+  options.services.outline = {
+    enable = lib.mkEnableOption (lib.mdDoc "outline");
+
+    package = lib.mkOption {
+      default = pkgs.outline;
+      defaultText = lib.literalExpression "pkgs.outline";
+      type = lib.types.package;
+      example = lib.literalExpression ''
+        pkgs.outline.overrideAttrs (super: {
+          # Ignore the domain part in emails that come from OIDC. This is might
+          # be helpful if you want multiple users with different email providers
+          # to still land in the same team. Note that this effectively makes
+          # Outline a single-team instance.
+          patchPhase = ${"''"}
+            sed -i 's/const domain = parts\.length && parts\[1\];/const domain = "example.com";/g' plugins/oidc/server/auth/oidc.ts
+          ${"''"};
+        })
+      '';
+      description = lib.mdDoc "Outline package to use.";
+    };
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = defaultUser;
+      description = lib.mdDoc ''
+        User under which the service should run. If this is the default value,
+        the user will be created, with the specified group as the primary
+        group.
+      '';
+    };
+
+    group = lib.mkOption {
+      type = lib.types.str;
+      default = defaultUser;
+      description = lib.mdDoc ''
+        Group under which the service should run. If this is the default value,
+        the group will be created.
+      '';
+    };
+
+    #
+    # Required options
+    #
+
+    secretKeyFile = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/outline/secret_key";
+      description = lib.mdDoc ''
+        File path that contains the application secret key. It must be 32
+        bytes long and hex-encoded. If the file does not exist, a new key will
+        be generated and saved here.
+      '';
+    };
+
+    utilsSecretFile = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/outline/utils_secret";
+      description = lib.mdDoc ''
+        File path that contains the utility secret key. If the file does not
+        exist, a new key will be generated and saved here.
+      '';
+    };
+
+    databaseUrl = lib.mkOption {
+      type = lib.types.str;
+      default = "local";
+      description = lib.mdDoc ''
+        URI to use for the main PostgreSQL database. If this needs to include
+        credentials that shouldn't be world-readable in the Nix store, set an
+        environment file on the systemd service and override the
+        `DATABASE_URL` entry. Pass the string
+        `local` to setup a database on the local server.
+      '';
+    };
+
+    redisUrl = lib.mkOption {
+      type = lib.types.str;
+      default = "local";
+      description = lib.mdDoc ''
+        Connection to a redis server. If this needs to include credentials
+        that shouldn't be world-readable in the Nix store, set an environment
+        file on the systemd service and override the
+        `REDIS_URL` entry. Pass the string
+        `local` to setup a local Redis database.
+      '';
+    };
+
+    publicUrl = lib.mkOption {
+      type = lib.types.str;
+      default = "http://localhost:3000";
+      description = lib.mdDoc "The fully qualified, publicly accessible URL";
+    };
+
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 3000;
+      description = lib.mdDoc "Listening port.";
+    };
+
+    storage = lib.mkOption {
+      description = lib.mdDoc ''
+        To support uploading of images for avatars and document attachments an
+        s3-compatible storage can be provided. AWS S3 is recommended for
+        redundancy however if you want to keep all file storage local an
+        alternative such as [minio](https://github.com/minio/minio)
+        can be used.
+        Local filesystem storage can also be used.
+
+        A more detailed guide on setting up storage is available
+        [here](https://docs.getoutline.com/s/hosting/doc/file-storage-N4M0T6Ypu7).
+      '';
+      example = lib.literalExpression ''
+        {
+          accessKey = "...";
+          secretKeyFile = "/somewhere";
+          uploadBucketUrl = "https://minio.example.com";
+          uploadBucketName = "outline";
+          region = "us-east-1";
+        }
+      '';
+      type = lib.types.submodule {
+        options = {
+          storageType = lib.mkOption {
+            type = lib.types.enum [ "local" "s3" ];
+            description = lib.mdDoc "File storage type, it can be local or s3.";
+            default = "s3";
+          };
+          localRootDir = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              If `storageType` is `local`, this sets the parent directory
+              under which all attachments/images go.
+            '';
+            default = "/var/lib/outline/data";
+          };
+          accessKey = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "S3 access key.";
+          };
+          secretKeyFile = lib.mkOption {
+            type = lib.types.path;
+            description = lib.mdDoc "File path that contains the S3 secret key.";
+          };
+          region = lib.mkOption {
+            type = lib.types.str;
+            default = "xx-xxxx-x";
+            description = lib.mdDoc "AWS S3 region name.";
+          };
+          uploadBucketUrl = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              URL endpoint of an S3-compatible API where uploads should be
+              stored.
+            '';
+          };
+          uploadBucketName = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Name of the bucket where uploads should be stored.";
+          };
+          uploadMaxSize = lib.mkOption {
+            type = lib.types.int;
+            default = 26214400;
+            description = lib.mdDoc "Maxmium file size for uploads.";
+          };
+          forcePathStyle = lib.mkOption {
+            type = lib.types.bool;
+            default = true;
+            description = lib.mdDoc "Force S3 path style.";
+          };
+          acl = lib.mkOption {
+            type = lib.types.str;
+            default = "private";
+            description = lib.mdDoc "ACL setting.";
+          };
+        };
+      };
+    };
+
+    #
+    # Authentication
+    #
+
+    slackAuthentication = lib.mkOption {
+      description = lib.mdDoc ''
+        To configure Slack auth, you'll need to create an Application at
+        https://api.slack.com/apps
+
+        When configuring the Client ID, add a redirect URL under "OAuth & Permissions"
+        to `https://[publicUrl]/auth/slack.callback`.
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          clientId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Authentication key.";
+          };
+          secretFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path containing the authentication secret.";
+          };
+        };
+      });
+    };
+
+    googleAuthentication = lib.mkOption {
+      description = lib.mdDoc ''
+        To configure Google auth, you'll need to create an OAuth Client ID at
+        https://console.cloud.google.com/apis/credentials
+
+        When configuring the Client ID, add an Authorized redirect URI to
+        `https://[publicUrl]/auth/google.callback`.
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          clientId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Authentication client identifier.";
+          };
+          clientSecretFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path containing the authentication secret.";
+          };
+        };
+      });
+    };
+
+    azureAuthentication = lib.mkOption {
+      description = lib.mdDoc ''
+        To configure Microsoft/Azure auth, you'll need to create an OAuth
+        Client. See
+        [the guide](https://wiki.generaloutline.com/share/dfa77e56-d4d2-4b51-8ff8-84ea6608faa4)
+        for details on setting up your Azure App.
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          clientId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Authentication client identifier.";
+          };
+          clientSecretFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path containing the authentication secret.";
+          };
+          resourceAppId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Authentication application resource ID.";
+          };
+        };
+      });
+    };
+
+    oidcAuthentication = lib.mkOption {
+      description = lib.mdDoc ''
+        To configure generic OIDC auth, you'll need some kind of identity
+        provider. See the documentation for whichever IdP you use to fill out
+        all the fields. The redirect URL is
+        `https://[publicUrl]/auth/oidc.callback`.
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          clientId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Authentication client identifier.";
+          };
+          clientSecretFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path containing the authentication secret.";
+          };
+          authUrl = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "OIDC authentication URL endpoint.";
+          };
+          tokenUrl = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "OIDC token URL endpoint.";
+          };
+          userinfoUrl = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "OIDC userinfo URL endpoint.";
+          };
+          usernameClaim = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              Specify which claims to derive user information from. Supports any
+              valid JSON path with the JWT payload
+            '';
+            default = "preferred_username";
+          };
+          displayName = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Display name for OIDC authentication.";
+            default = "OpenID";
+          };
+          scopes = lib.mkOption {
+            type = lib.types.listOf lib.types.str;
+            description = lib.mdDoc "OpenID authentication scopes.";
+            default = [ "openid" "profile" "email" ];
+          };
+        };
+      });
+    };
+
+    #
+    # Optional configuration
+    #
+
+    sslKeyFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        File path that contains the Base64-encoded private key for HTTPS
+        termination. This is only required if you do not use an external reverse
+        proxy. See
+        [the documentation](https://wiki.generaloutline.com/share/dfa77e56-d4d2-4b51-8ff8-84ea6608faa4).
+      '';
+    };
+    sslCertFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        File path that contains the Base64-encoded certificate for HTTPS
+        termination. This is only required if you do not use an external reverse
+        proxy. See
+        [the documentation](https://wiki.generaloutline.com/share/dfa77e56-d4d2-4b51-8ff8-84ea6608faa4).
+      '';
+    };
+
+    cdnUrl = lib.mkOption {
+      type = lib.types.str;
+      default = "";
+      description = lib.mdDoc ''
+        If using a Cloudfront/Cloudflare distribution or similar it can be set
+        using this option. This will cause paths to JavaScript files,
+        stylesheets and images to be updated to the hostname defined here. In
+        your CDN configuration the origin server should be set to public URL.
+      '';
+    };
+
+    forceHttps = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Auto-redirect to HTTPS in production. The default is
+        `true` but you may set this to `false`
+        if you can be sure that SSL is terminated at an external loadbalancer.
+      '';
+    };
+
+    enableUpdateCheck = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Have the installation check for updates by sending anonymized statistics
+        to the maintainers.
+      '';
+    };
+
+    concurrency = lib.mkOption {
+      type = lib.types.int;
+      default = 1;
+      description = lib.mdDoc ''
+        How many processes should be spawned. For a rough estimate, divide your
+        server's available memory by 512.
+      '';
+    };
+
+    maximumImportSize = lib.mkOption {
+      type = lib.types.int;
+      default = 5120000;
+      description = lib.mdDoc ''
+        The maximum size of document imports. Overriding this could be required
+        if you have especially large Word documents with embedded imagery.
+      '';
+    };
+
+    debugOutput = lib.mkOption {
+      type = lib.types.nullOr (lib.types.enum [ "http" ]);
+      default = null;
+      description = lib.mdDoc "Set this to `http` log HTTP requests.";
+    };
+
+    slackIntegration = lib.mkOption {
+      description = lib.mdDoc ''
+        For a complete Slack integration with search and posting to channels
+        this configuration is also needed. See here for details:
+        https://wiki.generaloutline.com/share/be25efd1-b3ef-4450-b8e5-c4a4fc11e02a
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          verificationTokenFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path containing the verification token.";
+          };
+          appId = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Application ID.";
+          };
+          messageActions = lib.mkOption {
+            type = lib.types.bool;
+            default = true;
+            description = lib.mdDoc "Whether to enable message actions.";
+          };
+        };
+      });
+    };
+
+    googleAnalyticsId = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Optionally enable Google Analytics to track page views in the knowledge
+        base.
+      '';
+    };
+
+    sentryDsn = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Optionally enable [Sentry](https://sentry.io/) to
+        track errors and performance.
+      '';
+    };
+
+    sentryTunnel = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Optionally add a
+        [Sentry proxy tunnel](https://docs.sentry.io/platforms/javascript/troubleshooting/#using-the-tunnel-option)
+        for bypassing ad blockers in the UI.
+      '';
+    };
+
+    logo = lib.mkOption {
+      type = lib.types.nullOr lib.types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Custom logo displayed on the authentication screen. This will be scaled
+        to a height of 60px.
+      '';
+    };
+
+    smtp = lib.mkOption {
+      description = lib.mdDoc ''
+        To support sending outgoing transactional emails such as
+        "document updated" or "you've been invited" you'll need to provide
+        authentication for an SMTP server.
+      '';
+      default = null;
+      type = lib.types.nullOr (lib.types.submodule {
+        options = {
+          host = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Host name or IP address of the SMTP server.";
+          };
+          port = lib.mkOption {
+            type = lib.types.port;
+            description = lib.mdDoc "TCP port of the SMTP server.";
+          };
+          username = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Username to authenticate with.";
+          };
+          passwordFile = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc ''
+              File path containing the password to authenticate with.
+            '';
+          };
+          fromEmail = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Sender email in outgoing mail.";
+          };
+          replyEmail = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Reply address in outgoing mail.";
+          };
+          tlsCiphers = lib.mkOption {
+            type = lib.types.str;
+            default = "";
+            description = lib.mdDoc "Override SMTP cipher configuration.";
+          };
+          secure = lib.mkOption {
+            type = lib.types.bool;
+            default = true;
+            description = lib.mdDoc "Use a secure SMTP connection.";
+          };
+        };
+      });
+    };
+
+    defaultLanguage = lib.mkOption {
+      type = lib.types.enum [
+         "da_DK"
+         "de_DE"
+         "en_US"
+         "es_ES"
+         "fa_IR"
+         "fr_FR"
+         "it_IT"
+         "ja_JP"
+         "ko_KR"
+         "nl_NL"
+         "pl_PL"
+         "pt_BR"
+         "pt_PT"
+         "ru_RU"
+         "sv_SE"
+         "th_TH"
+         "vi_VN"
+         "zh_CN"
+         "zh_TW"
+      ];
+      default = "en_US";
+      description = lib.mdDoc ''
+        The default interface language. See
+        [translate.getoutline.com](https://translate.getoutline.com/)
+        for a list of available language codes and their rough percentage
+        translated.
+      '';
+    };
+
+    rateLimiter.enable = lib.mkEnableOption (lib.mdDoc "rate limiter for the application web server");
+    rateLimiter.requests = lib.mkOption {
+      type = lib.types.int;
+      default = 5000;
+      description = lib.mdDoc "Maximum number of requests in a throttling window.";
+    };
+    rateLimiter.durationWindow = lib.mkOption {
+      type = lib.types.int;
+      default = 60;
+      description = lib.mdDoc "Length of a throttling window.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    users.users = lib.optionalAttrs (cfg.user == defaultUser) {
+      ${defaultUser} = {
+        isSystemUser = true;
+        group = cfg.group;
+      };
+    };
+
+    users.groups = lib.optionalAttrs (cfg.group == defaultUser) {
+      ${defaultUser} = { };
+    };
+
+    systemd.tmpfiles.rules = [
+      "f ${cfg.secretKeyFile} 0600 ${cfg.user} ${cfg.group} -"
+      "f ${cfg.utilsSecretFile} 0600 ${cfg.user} ${cfg.group} -"
+      (if (cfg.storage.storageType == "s3") then
+        "f ${cfg.storage.secretKeyFile} 0600 ${cfg.user} ${cfg.group} -"
+      else
+        "d ${cfg.storage.localRootDir} 0700 ${cfg.user} ${cfg.group} - -")
+    ];
+
+    services.postgresql = lib.mkIf (cfg.databaseUrl == "local") {
+      enable = true;
+      ensureUsers = [{
+        name = "outline";
+        ensureDBOwnership = true;
+      }];
+      ensureDatabases = [ "outline" ];
+    };
+
+    services.redis.servers.outline = lib.mkIf (cfg.redisUrl == "local") {
+      enable = true;
+      user = config.services.outline.user;
+      port = 0; # Disable the TCP listener
+    };
+
+    systemd.services.outline = let
+      localRedisUrl = "redis+unix:///run/redis-outline/redis.sock";
+      localPostgresqlUrl = "postgres://localhost/outline?host=/run/postgresql";
+    in {
+      description = "Outline wiki and knowledge base";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ]
+        ++ lib.optional (cfg.databaseUrl == "local") "postgresql.service"
+        ++ lib.optional (cfg.redisUrl == "local") "redis-outline.service";
+      requires = lib.optional (cfg.databaseUrl == "local") "postgresql.service"
+        ++ lib.optional (cfg.redisUrl == "local") "redis-outline.service";
+      path = [
+        pkgs.openssl # Required by the preStart script
+      ];
+
+
+      environment = lib.mkMerge [
+        {
+          NODE_ENV = "production";
+
+          REDIS_URL = if cfg.redisUrl == "local" then localRedisUrl else cfg.redisUrl;
+          URL = cfg.publicUrl;
+          PORT = builtins.toString cfg.port;
+
+          CDN_URL = cfg.cdnUrl;
+          FORCE_HTTPS = builtins.toString cfg.forceHttps;
+          ENABLE_UPDATES = builtins.toString cfg.enableUpdateCheck;
+          WEB_CONCURRENCY = builtins.toString cfg.concurrency;
+          MAXIMUM_IMPORT_SIZE = builtins.toString cfg.maximumImportSize;
+          DEBUG = cfg.debugOutput;
+          GOOGLE_ANALYTICS_ID = lib.optionalString (cfg.googleAnalyticsId != null) cfg.googleAnalyticsId;
+          SENTRY_DSN = lib.optionalString (cfg.sentryDsn != null) cfg.sentryDsn;
+          SENTRY_TUNNEL = lib.optionalString (cfg.sentryTunnel != null) cfg.sentryTunnel;
+          TEAM_LOGO = lib.optionalString (cfg.logo != null) cfg.logo;
+          DEFAULT_LANGUAGE = cfg.defaultLanguage;
+
+          RATE_LIMITER_ENABLED = builtins.toString cfg.rateLimiter.enable;
+          RATE_LIMITER_REQUESTS = builtins.toString cfg.rateLimiter.requests;
+          RATE_LIMITER_DURATION_WINDOW = builtins.toString cfg.rateLimiter.durationWindow;
+
+          FILE_STORAGE = cfg.storage.storageType;
+          FILE_STORAGE_UPLOAD_MAX_SIZE = builtins.toString cfg.storage.uploadMaxSize;
+          FILE_STORAGE_LOCAL_ROOT_DIR = cfg.storage.localRootDir;
+        }
+
+        (lib.mkIf (cfg.storage.storageType == "s3") {
+          AWS_ACCESS_KEY_ID = cfg.storage.accessKey;
+          AWS_REGION = cfg.storage.region;
+          AWS_S3_UPLOAD_BUCKET_URL = cfg.storage.uploadBucketUrl;
+          AWS_S3_UPLOAD_BUCKET_NAME = cfg.storage.uploadBucketName;
+          AWS_S3_FORCE_PATH_STYLE = builtins.toString cfg.storage.forcePathStyle;
+          AWS_S3_ACL = cfg.storage.acl;
+        })
+
+        (lib.mkIf (cfg.slackAuthentication != null) {
+          SLACK_CLIENT_ID = cfg.slackAuthentication.clientId;
+        })
+
+        (lib.mkIf (cfg.googleAuthentication != null) {
+          GOOGLE_CLIENT_ID = cfg.googleAuthentication.clientId;
+        })
+
+        (lib.mkIf (cfg.azureAuthentication != null) {
+          AZURE_CLIENT_ID = cfg.azureAuthentication.clientId;
+          AZURE_RESOURCE_APP_ID = cfg.azureAuthentication.resourceAppId;
+        })
+
+        (lib.mkIf (cfg.oidcAuthentication != null) {
+          OIDC_CLIENT_ID = cfg.oidcAuthentication.clientId;
+          OIDC_AUTH_URI = cfg.oidcAuthentication.authUrl;
+          OIDC_TOKEN_URI = cfg.oidcAuthentication.tokenUrl;
+          OIDC_USERINFO_URI = cfg.oidcAuthentication.userinfoUrl;
+          OIDC_USERNAME_CLAIM = cfg.oidcAuthentication.usernameClaim;
+          OIDC_DISPLAY_NAME = cfg.oidcAuthentication.displayName;
+          OIDC_SCOPES = lib.concatStringsSep " " cfg.oidcAuthentication.scopes;
+        })
+
+        (lib.mkIf (cfg.slackIntegration != null) {
+          SLACK_APP_ID = cfg.slackIntegration.appId;
+          SLACK_MESSAGE_ACTIONS = builtins.toString cfg.slackIntegration.messageActions;
+        })
+
+        (lib.mkIf (cfg.smtp != null) {
+          SMTP_HOST = cfg.smtp.host;
+          SMTP_PORT = builtins.toString cfg.smtp.port;
+          SMTP_USERNAME = cfg.smtp.username;
+          SMTP_FROM_EMAIL = cfg.smtp.fromEmail;
+          SMTP_REPLY_EMAIL = cfg.smtp.replyEmail;
+          SMTP_TLS_CIPHERS = cfg.smtp.tlsCiphers;
+          SMTP_SECURE = builtins.toString cfg.smtp.secure;
+        })
+      ];
+
+      preStart = ''
+        if [ ! -s ${lib.escapeShellArg cfg.secretKeyFile} ]; then
+          openssl rand -hex 32 > ${lib.escapeShellArg cfg.secretKeyFile}
+        fi
+        if [ ! -s ${lib.escapeShellArg cfg.utilsSecretFile} ]; then
+          openssl rand -hex 32 > ${lib.escapeShellArg cfg.utilsSecretFile}
+        fi
+
+      '';
+
+      script = ''
+        export SECRET_KEY="$(head -n1 ${lib.escapeShellArg cfg.secretKeyFile})"
+        export UTILS_SECRET="$(head -n1 ${lib.escapeShellArg cfg.utilsSecretFile})"
+        ${lib.optionalString (cfg.storage.storageType == "s3") ''
+          export AWS_SECRET_ACCESS_KEY="$(head -n1 ${lib.escapeShellArg cfg.storage.secretKeyFile})"
+        ''}
+        ${lib.optionalString (cfg.slackAuthentication != null) ''
+          export SLACK_CLIENT_SECRET="$(head -n1 ${lib.escapeShellArg cfg.slackAuthentication.secretFile})"
+        ''}
+        ${lib.optionalString (cfg.googleAuthentication != null) ''
+          export GOOGLE_CLIENT_SECRET="$(head -n1 ${lib.escapeShellArg cfg.googleAuthentication.clientSecretFile})"
+        ''}
+        ${lib.optionalString (cfg.azureAuthentication != null) ''
+          export AZURE_CLIENT_SECRET="$(head -n1 ${lib.escapeShellArg cfg.azureAuthentication.clientSecretFile})"
+        ''}
+        ${lib.optionalString (cfg.oidcAuthentication != null) ''
+          export OIDC_CLIENT_SECRET="$(head -n1 ${lib.escapeShellArg cfg.oidcAuthentication.clientSecretFile})"
+        ''}
+        ${lib.optionalString (cfg.sslKeyFile != null) ''
+          export SSL_KEY="$(head -n1 ${lib.escapeShellArg cfg.sslKeyFile})"
+        ''}
+        ${lib.optionalString (cfg.sslCertFile != null) ''
+          export SSL_CERT="$(head -n1 ${lib.escapeShellArg cfg.sslCertFile})"
+        ''}
+        ${lib.optionalString (cfg.slackIntegration != null) ''
+          export SLACK_VERIFICATION_TOKEN="$(head -n1 ${lib.escapeShellArg cfg.slackIntegration.verificationTokenFile})"
+        ''}
+        ${lib.optionalString (cfg.smtp != null) ''
+          export SMTP_PASSWORD="$(head -n1 ${lib.escapeShellArg cfg.smtp.passwordFile})"
+        ''}
+
+        ${if (cfg.databaseUrl == "local") then ''
+          export DATABASE_URL=${lib.escapeShellArg localPostgresqlUrl}
+          export PGSSLMODE=disable
+        '' else ''
+          export DATABASE_URL=${lib.escapeShellArg cfg.databaseUrl}
+        ''}
+
+        ${cfg.package}/bin/outline-server
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Restart = "always";
+        ProtectSystem = "strict";
+        PrivateHome = true;
+        PrivateTmp = true;
+        UMask = "0007";
+
+        StateDirectory = "outline";
+        StateDirectoryMode = "0750";
+        RuntimeDirectory = "outline";
+        RuntimeDirectoryMode = "0750";
+        # This working directory is required to find stuff like the set of
+        # onboarding files:
+        WorkingDirectory = "${cfg.package}/share/outline";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/peering-manager.nix b/nixpkgs/nixos/modules/services/web-apps/peering-manager.nix
new file mode 100644
index 000000000000..d6f6077268d4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/peering-manager.nix
@@ -0,0 +1,343 @@
+{ config, lib, pkgs, buildEnv, ... }:
+
+let
+  cfg = config.services.peering-manager;
+
+  pythonFmt = pkgs.formats.pythonVars {};
+  settingsFile = pythonFmt.generate "peering-manager-settings.py" cfg.settings;
+  extraConfigFile = pkgs.writeTextFile {
+    name = "peering-manager-extraConfig.py";
+    text = cfg.extraConfig;
+  };
+  configFile = pkgs.concatText "configuration.py" [ settingsFile extraConfigFile ];
+
+  pkg = (pkgs.peering-manager.overrideAttrs (old: {
+    postInstall = ''
+      ln -s ${configFile} $out/opt/peering-manager/peering_manager/configuration.py
+    '' + lib.optionalString cfg.enableLdap ''
+      ln -s ${cfg.ldapConfigPath} $out/opt/peering-manager/peering_manager/ldap_config.py
+    '';
+  })).override {
+    inherit (cfg) plugins;
+  };
+  peeringManagerManageScript = pkgs.writeScriptBin "peering-manager-manage" ''
+    #!${pkgs.stdenv.shell}
+    export PYTHONPATH=${pkg.pythonPath}
+    sudo -u peering-manager ${pkg}/bin/peering-manager "$@"
+  '';
+
+in {
+  options.services.peering-manager = with lib; {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Enable Peering Manager.
+
+        This module requires a reverse proxy that serves `/static` separately.
+        See this [example](https://github.com/peering-manager/contrib/blob/main/nginx.conf on how to configure this.
+      '';
+    };
+
+    enableScheduledTasks = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Set up [scheduled tasks](https://peering-manager.readthedocs.io/en/stable/setup/8-scheduled-tasks/)
+      '';
+    };
+
+    listenAddress = mkOption {
+      type = types.str;
+      default = "[::1]";
+      description = mdDoc ''
+        Address the server will listen on.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8001;
+      description = mdDoc ''
+        Port the server will listen on.
+      '';
+    };
+
+    plugins = mkOption {
+      type = types.functionTo (types.listOf types.package);
+      default = _: [];
+      defaultText = literalExpression ''
+        python3Packages: with python3Packages; [];
+      '';
+      description = mdDoc ''
+        List of plugin packages to install.
+      '';
+    };
+
+    secretKeyFile = mkOption {
+      type = types.path;
+      description = mdDoc ''
+        Path to a file containing the secret key.
+      '';
+    };
+
+    peeringdbApiKeyFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = mdDoc ''
+        Path to a file containing the PeeringDB API key.
+      '';
+    };
+
+    settings = lib.mkOption {
+      description = lib.mdDoc ''
+        Configuration options to set in `configuration.py`.
+        See the [documentation](https://peering-manager.readthedocs.io/en/stable/configuration/optional-settings/) for more possible options.
+      '';
+
+      default = { };
+
+      type = lib.types.submodule {
+        freeformType = pythonFmt.type;
+
+        options = {
+          ALLOWED_HOSTS = lib.mkOption {
+            type = with lib.types; listOf str;
+            default = ["*"];
+            description = lib.mdDoc ''
+              A list of valid fully-qualified domain names (FQDNs) and/or IP
+              addresses that can be used to reach the peering manager service.
+            '';
+          };
+        };
+      };
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = mdDoc ''
+        Additional lines of configuration appended to the `configuration.py`.
+        See the [documentation](https://peering-manager.readthedocs.io/en/stable/configuration/optional-settings/) for more possible options.
+      '';
+    };
+
+    enableLdap = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc ''
+        Enable LDAP-Authentication for Peering Manager.
+
+        This requires a configuration file being pass through `ldapConfigPath`.
+      '';
+    };
+
+    ldapConfigPath = mkOption {
+      type = types.path;
+      description = mdDoc ''
+        Path to the Configuration-File for LDAP-Authentication, will be loaded as `ldap_config.py`.
+        See the [documentation](https://peering-manager.readthedocs.io/en/stable/setup/6-ldap/#configuration) for possible options.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.peering-manager = {
+      settings = {
+        DATABASE = {
+          NAME = "peering-manager";
+          USER = "peering-manager";
+          HOST = "/run/postgresql";
+        };
+
+        # Redis database settings. Redis is used for caching and for queuing background tasks such as webhook events. A separate
+        # configuration exists for each. Full connection details are required in both sections, and it is strongly recommended
+        # to use two separate database IDs.
+        REDIS = {
+          tasks = {
+            UNIX_SOCKET_PATH = config.services.redis.servers.peering-manager.unixSocket;
+            DATABASE = 0;
+          };
+          caching = {
+            UNIX_SOCKET_PATH = config.services.redis.servers.peering-manager.unixSocket;
+            DATABASE = 1;
+          };
+        };
+      };
+
+      extraConfig = ''
+        with open("${cfg.secretKeyFile}", "r") as file:
+          SECRET_KEY = file.readline()
+      '' + lib.optionalString (cfg.peeringdbApiKeyFile != null) ''
+        with open("${cfg.peeringdbApiKeyFile}", "r") as file:
+          PEERINGDB_API_KEY = file.readline()
+      '';
+
+      plugins = lib.mkIf cfg.enableLdap (ps: [ ps.django-auth-ldap ]);
+    };
+
+    system.build.peeringManagerPkg = pkg;
+
+    services.redis.servers.peering-manager.enable = true;
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "peering-manager" ];
+      ensureUsers = [
+        {
+          name = "peering-manager";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    environment.systemPackages = [ peeringManagerManageScript ];
+
+    systemd.targets.peering-manager = {
+      description = "Target for all Peering Manager services";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "redis-peering-manager.service" ];
+    };
+
+    systemd.services = let
+      defaults = {
+        environment = {
+          PYTHONPATH = pkg.pythonPath;
+        };
+        serviceConfig = {
+          WorkingDirectory = "/var/lib/peering-manager";
+          User = "peering-manager";
+          Group = "peering-manager";
+          StateDirectory = "peering-manager";
+          StateDirectoryMode = "0750";
+          Restart = "on-failure";
+        };
+      };
+    in {
+      peering-manager-migration = lib.recursiveUpdate defaults {
+        description = "Peering Manager migrations";
+        wantedBy = [ "peering-manager.target" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager migrate";
+        };
+      };
+
+      peering-manager = lib.recursiveUpdate defaults {
+        description = "Peering Manager WSGI Service";
+        wantedBy = [ "peering-manager.target" ];
+        after = [ "peering-manager-migration.service" ];
+
+        preStart = ''
+          ${pkg}/bin/peering-manager remove_stale_contenttypes --no-input
+        '';
+
+        serviceConfig = {
+          ExecStart = ''
+            ${pkg.python.pkgs.gunicorn}/bin/gunicorn peering_manager.wsgi \
+              --bind ${cfg.listenAddress}:${toString cfg.port} \
+              --pythonpath ${pkg}/opt/peering-manager
+          '';
+        };
+      };
+
+      peering-manager-rq = lib.recursiveUpdate defaults {
+        description = "Peering Manager Request Queue Worker";
+        wantedBy = [ "peering-manager.target" ];
+        after = [ "peering-manager.service" ];
+        serviceConfig.ExecStart = "${pkg}/bin/peering-manager rqworker high default low";
+      };
+
+      peering-manager-housekeeping = lib.recursiveUpdate defaults {
+        description = "Peering Manager housekeeping job";
+        after = [ "peering-manager.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager housekeeping";
+        };
+      };
+
+      peering-manager-peeringdb-sync = lib.recursiveUpdate defaults {
+        description = "PeeringDB sync";
+        after = [ "peering-manager.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager peeringdb_sync";
+        };
+      };
+
+      peering-manager-prefix-fetch = lib.recursiveUpdate defaults {
+        description = "Fetch IRR AS-SET prefixes";
+        after = [ "peering-manager.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager grab_prefixes";
+        };
+      };
+
+      peering-manager-configuration-deployment = lib.recursiveUpdate defaults {
+        description = "Push configuration to routers";
+        after = [ "peering-manager.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager configure_routers";
+        };
+      };
+
+      peering-manager-session-poll = lib.recursiveUpdate defaults {
+        description = "Poll peering sessions from routers";
+        after = [ "peering-manager.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "${pkg}/bin/peering-manager poll_bgp_sessions --all";
+        };
+      };
+    };
+
+    systemd.timers = {
+      peering-manager-housekeeping = {
+        description = "Run Peering Manager housekeeping job";
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnCalendar = "daily";
+      };
+
+      peering-manager-peeringdb-sync = {
+        enable = lib.mkDefault cfg.enableScheduledTasks;
+        description = "Sync PeeringDB at 2:30";
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnCalendar = "02:30:00";
+      };
+
+      peering-manager-prefix-fetch = {
+        enable = lib.mkDefault cfg.enableScheduledTasks;
+        description = "Fetch IRR AS-SET prefixes at 4:30";
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnCalendar = "04:30:00";
+      };
+
+      peering-manager-configuration-deployment = {
+        enable = lib.mkDefault cfg.enableScheduledTasks;
+        description = "Push router configuration every hour 5 minutes before full hour";
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnCalendar = "*:55:00";
+      };
+
+      peering-manager-session-poll = {
+        enable = lib.mkDefault cfg.enableScheduledTasks;
+        description = "Poll peering sessions from routers every hour";
+        wantedBy = [ "timers.target" ];
+        timerConfig.OnCalendar = "*:00:00";
+      };
+    };
+
+    users.users.peering-manager = {
+      home = "/var/lib/peering-manager";
+      isSystemUser = true;
+      group = "peering-manager";
+    };
+    users.groups.peering-manager = {};
+    users.groups."${config.services.redis.servers.peering-manager.user}".members = [ "peering-manager" ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ yuka ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/peertube.nix b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
new file mode 100644
index 000000000000..a22467611410
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/peertube.nix
@@ -0,0 +1,861 @@
+{ lib, pkgs, config, options, ... }:
+
+let
+  cfg = config.services.peertube;
+  opt = options.services.peertube;
+
+  settingsFormat = pkgs.formats.json {};
+  configFile = settingsFormat.generate "production.json" cfg.settings;
+
+  env = {
+    NODE_CONFIG_DIR = "/var/lib/peertube/config";
+    NODE_ENV = "production";
+    NODE_EXTRA_CA_CERTS = "/etc/ssl/certs/ca-certificates.crt";
+    NPM_CONFIG_CACHE = "/var/cache/peertube/.npm";
+    NPM_CONFIG_PREFIX = cfg.package;
+    HOME = cfg.package;
+  };
+
+  systemCallsList = [ "@cpu-emulation" "@debug" "@keyring" "@ipc" "@memlock" "@mount" "@obsolete" "@privileged" "@setuid" ];
+
+  cfgService = {
+    # Proc filesystem
+    ProcSubset = "pid";
+    ProtectProc = "invisible";
+    # Access write directories
+    UMask = "0027";
+    # Capabilities
+    CapabilityBoundingSet = "";
+    # Security
+    NoNewPrivileges = true;
+    # Sandboxing
+    ProtectSystem = "strict";
+    ProtectHome = true;
+    PrivateTmp = true;
+    PrivateDevices = true;
+    PrivateUsers = true;
+    ProtectClock = true;
+    ProtectHostname = true;
+    ProtectKernelLogs = true;
+    ProtectKernelModules = true;
+    ProtectKernelTunables = true;
+    ProtectControlGroups = true;
+    RestrictNamespaces = true;
+    LockPersonality = true;
+    RestrictRealtime = true;
+    RestrictSUIDSGID = true;
+    RemoveIPC = true;
+    PrivateMounts = true;
+    # System Call Filtering
+    SystemCallArchitectures = "native";
+  };
+
+  envFile = pkgs.writeText "peertube.env" (lib.concatMapStrings (s: s + "\n") (
+    (lib.concatLists (lib.mapAttrsToList (name: value:
+      lib.optional (value != null) ''${name}="${toString value}"''
+    ) env))));
+
+  peertubeEnv = pkgs.writeShellScriptBin "peertube-env" ''
+    set -a
+    source "${envFile}"
+    eval -- "\$@"
+  '';
+
+  peertubeCli = pkgs.writeShellScriptBin "peertube" ''
+    node ~/dist/server/tools/peertube.js $@
+  '';
+
+  nginxCommonHeaders = lib.optionalString cfg.enableWebHttps ''
+    add_header Strict-Transport-Security      'max-age=63072000; includeSubDomains';
+  '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
+    add_header Alt-Svc                        'h3=":443"; ma=86400';
+  '' + ''
+    add_header Access-Control-Allow-Origin    '*';
+    add_header Access-Control-Allow-Methods   'GET, OPTIONS';
+    add_header Access-Control-Allow-Headers   'Range,DNT,X-CustomHeader,Keep-Alive,User-Agent,X-Requested-With,If-Modified-Since,Cache-Control,Content-Type';
+  '';
+
+in {
+  options.services.peertube = {
+    enable = lib.mkEnableOption (lib.mdDoc "Peertube");
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "peertube";
+      description = lib.mdDoc "User account under which Peertube runs.";
+    };
+
+    group = lib.mkOption {
+      type = lib.types.str;
+      default = "peertube";
+      description = lib.mdDoc "Group under which Peertube runs.";
+    };
+
+    localDomain = lib.mkOption {
+      type = lib.types.str;
+      example = "peertube.example.com";
+      description = lib.mdDoc "The domain serving your PeerTube instance.";
+    };
+
+    listenHttp = lib.mkOption {
+      type = lib.types.port;
+      default = 9000;
+      description = lib.mdDoc "listen port for HTTP server.";
+    };
+
+    listenWeb = lib.mkOption {
+      type = lib.types.port;
+      default = 9000;
+      description = lib.mdDoc "listen port for WEB server.";
+    };
+
+    enableWebHttps = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Enable or disable HTTPS protocol.";
+    };
+
+    dataDirs = lib.mkOption {
+      type = lib.types.listOf lib.types.path;
+      default = [ ];
+      example = [ "/opt/peertube/storage" "/var/cache/peertube" ];
+      description = lib.mdDoc "Allow access to custom data locations.";
+    };
+
+    serviceEnvironmentFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      example = "/run/keys/peertube/password-init-root";
+      description = lib.mdDoc ''
+        Set environment variables for the service. Mainly useful for setting the initial root password.
+        For example write to file:
+        PT_INITIAL_ROOT_PASSWORD=changeme
+      '';
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      example = lib.literalExpression ''
+        {
+          listen = {
+            hostname = "0.0.0.0";
+          };
+          log = {
+            level = "debug";
+          };
+          storage = {
+            tmp = "/opt/data/peertube/storage/tmp/";
+            logs = "/opt/data/peertube/storage/logs/";
+            cache = "/opt/data/peertube/storage/cache/";
+          };
+        }
+      '';
+      description = lib.mdDoc "Configuration for peertube.";
+    };
+
+    configureNginx = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Configure nginx as a reverse proxy for peertube.";
+    };
+
+    secrets = {
+      secretsFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/secrets/peertube";
+        description = lib.mdDoc ''
+          Secrets to run PeerTube.
+          Generate one using `openssl rand -hex 32`
+        '';
+      };
+    };
+
+    database = {
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Configure local PostgreSQL database server for PeerTube.";
+      };
+
+      host = lib.mkOption {
+        type = lib.types.str;
+        default = if cfg.database.createLocally then "/run/postgresql" else null;
+        defaultText = lib.literalExpression ''
+          if config.${opt.database.createLocally}
+          then "/run/postgresql"
+          else null
+        '';
+        example = "192.168.15.47";
+        description = lib.mdDoc "Database host address or unix socket.";
+      };
+
+      port = lib.mkOption {
+        type = lib.types.port;
+        default = 5432;
+        description = lib.mdDoc "Database host port.";
+      };
+
+      name = lib.mkOption {
+        type = lib.types.str;
+        default = "peertube";
+        description = lib.mdDoc "Database name.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        default = "peertube";
+        description = lib.mdDoc "Database user.";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/peertube/password-postgresql";
+        description = lib.mdDoc "Password for PostgreSQL database.";
+      };
+    };
+
+    redis = {
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Configure local Redis server for PeerTube.";
+      };
+
+      host = lib.mkOption {
+        type = lib.types.nullOr lib.types.str;
+        default = if cfg.redis.createLocally && !cfg.redis.enableUnixSocket then "127.0.0.1" else null;
+        defaultText = lib.literalExpression ''
+          if config.${opt.redis.createLocally} && !config.${opt.redis.enableUnixSocket}
+          then "127.0.0.1"
+          else null
+        '';
+        description = lib.mdDoc "Redis host.";
+      };
+
+      port = lib.mkOption {
+        type = lib.types.nullOr lib.types.port;
+        default = if cfg.redis.createLocally && cfg.redis.enableUnixSocket then null else 31638;
+        defaultText = lib.literalExpression ''
+          if config.${opt.redis.createLocally} && config.${opt.redis.enableUnixSocket}
+          then null
+          else 6379
+        '';
+        description = lib.mdDoc "Redis port.";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/peertube/password-redis-db";
+        description = lib.mdDoc "Password for redis database.";
+      };
+
+      enableUnixSocket = lib.mkOption {
+        type = lib.types.bool;
+        default = cfg.redis.createLocally;
+        defaultText = lib.literalExpression "config.${opt.redis.createLocally}";
+        description = lib.mdDoc "Use Unix socket.";
+      };
+    };
+
+    smtp = {
+      createLocally = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Configure local Postfix SMTP server for PeerTube.";
+      };
+
+      passwordFile = lib.mkOption {
+        type = lib.types.nullOr lib.types.path;
+        default = null;
+        example = "/run/keys/peertube/password-smtp";
+        description = lib.mdDoc "Password for smtp server.";
+      };
+    };
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.peertube;
+      defaultText = lib.literalExpression "pkgs.peertube";
+      description = lib.mdDoc "Peertube package to use.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.serviceEnvironmentFile == null || !lib.hasPrefix builtins.storeDir cfg.serviceEnvironmentFile;
+          message = ''
+            <option>services.peertube.serviceEnvironmentFile</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+      }
+      { assertion = cfg.secrets.secretsFile != null;
+          message = ''
+            <option>services.peertube.secrets.secretsFile</option> needs to be set.
+          '';
+      }
+      { assertion = !(cfg.redis.enableUnixSocket && (cfg.redis.host != null || cfg.redis.port != null));
+          message = ''
+            <option>services.peertube.redis.createLocally</option> and redis network connection (<option>services.peertube.redis.host</option> or <option>services.peertube.redis.port</option>) enabled. Disable either of them.
+        '';
+      }
+      { assertion = cfg.redis.enableUnixSocket || (cfg.redis.host != null && cfg.redis.port != null);
+          message = ''
+            <option>services.peertube.redis.host</option> and <option>services.peertube.redis.port</option> needs to be set if <option>services.peertube.redis.enableUnixSocket</option> is not enabled.
+        '';
+      }
+      { assertion = cfg.redis.passwordFile == null || !lib.hasPrefix builtins.storeDir cfg.redis.passwordFile;
+          message = ''
+            <option>services.peertube.redis.passwordFile</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+      }
+      { assertion = cfg.database.passwordFile == null || !lib.hasPrefix builtins.storeDir cfg.database.passwordFile;
+          message = ''
+            <option>services.peertube.database.passwordFile</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+      }
+      { assertion = cfg.smtp.passwordFile == null || !lib.hasPrefix builtins.storeDir cfg.smtp.passwordFile;
+          message = ''
+            <option>services.peertube.smtp.passwordFile</option> points to
+            a file in the Nix store. You should use a quoted absolute path to
+            prevent this.
+          '';
+      }
+    ];
+
+    services.peertube.settings = lib.mkMerge [
+      {
+        listen = {
+          port = cfg.listenHttp;
+        };
+        webserver = {
+          https = (if cfg.enableWebHttps then true else false);
+          hostname = "${cfg.localDomain}";
+          port = cfg.listenWeb;
+        };
+        database = {
+          hostname = "${cfg.database.host}";
+          port = cfg.database.port;
+          name = "${cfg.database.name}";
+          username = "${cfg.database.user}";
+        };
+        redis = {
+          hostname = "${toString cfg.redis.host}";
+          port = (lib.optionalString (cfg.redis.port != null) cfg.redis.port);
+        };
+        storage = {
+          tmp = lib.mkDefault "/var/lib/peertube/storage/tmp/";
+          tmp_persistent = lib.mkDefault "/var/lib/peertube/storage/tmp_persistent/";
+          bin = lib.mkDefault "/var/lib/peertube/storage/bin/";
+          avatars = lib.mkDefault "/var/lib/peertube/storage/avatars/";
+          videos = lib.mkDefault "/var/lib/peertube/storage/videos/";
+          streaming_playlists = lib.mkDefault "/var/lib/peertube/storage/streaming-playlists/";
+          redundancy = lib.mkDefault "/var/lib/peertube/storage/redundancy/";
+          logs = lib.mkDefault "/var/lib/peertube/storage/logs/";
+          previews = lib.mkDefault "/var/lib/peertube/storage/previews/";
+          thumbnails = lib.mkDefault "/var/lib/peertube/storage/thumbnails/";
+          torrents = lib.mkDefault "/var/lib/peertube/storage/torrents/";
+          captions = lib.mkDefault "/var/lib/peertube/storage/captions/";
+          cache = lib.mkDefault "/var/lib/peertube/storage/cache/";
+          plugins = lib.mkDefault "/var/lib/peertube/storage/plugins/";
+          well_known = lib.mkDefault "/var/lib/peertube/storage/well_known/";
+          client_overrides = lib.mkDefault "/var/lib/peertube/storage/client-overrides/";
+        };
+        import = {
+          videos = {
+            http = {
+              youtube_dl_release = {
+                python_path = "${pkgs.python3}/bin/python";
+              };
+            };
+          };
+        };
+      }
+      (lib.mkIf cfg.redis.enableUnixSocket { redis = { socket = "/run/redis-peertube/redis.sock"; }; })
+    ];
+
+    systemd.tmpfiles.rules = [
+      "d '/var/lib/peertube/config' 0700 ${cfg.user} ${cfg.group} - -"
+      "z '/var/lib/peertube/config' 0700 ${cfg.user} ${cfg.group} - -"
+      "d '/var/lib/peertube/www' 0750 ${cfg.user} ${cfg.group} - -"
+      "z '/var/lib/peertube/www' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.peertube-init-db = lib.mkIf cfg.database.createLocally {
+      description = "Initialization database for PeerTube daemon";
+      after = [ "network.target" "postgresql.service" ];
+      requires = [ "postgresql.service" ];
+
+      script = let
+        psqlSetupCommands = pkgs.writeText "peertube-init.sql" ''
+          SELECT 'CREATE USER "${cfg.database.user}"' WHERE NOT EXISTS (SELECT FROM pg_roles WHERE rolname = '${cfg.database.user}')\gexec
+          SELECT 'CREATE DATABASE "${cfg.database.name}" OWNER "${cfg.database.user}" TEMPLATE template0 ENCODING UTF8' WHERE NOT EXISTS (SELECT FROM pg_database WHERE datname = '${cfg.database.name}')\gexec
+          \c '${cfg.database.name}'
+          CREATE EXTENSION IF NOT EXISTS pg_trgm;
+          CREATE EXTENSION IF NOT EXISTS unaccent;
+        '';
+      in "${config.services.postgresql.package}/bin/psql -f ${psqlSetupCommands}";
+
+      serviceConfig = {
+        Type = "oneshot";
+        WorkingDirectory = cfg.package;
+        # User and group
+        User = "postgres";
+        Group = "postgres";
+        # Sandboxing
+        RestrictAddressFamilies = [ "AF_UNIX" ];
+        MemoryDenyWriteExecute = true;
+        # System Call Filtering
+        SystemCallFilter = "~" + lib.concatStringsSep " " (systemCallsList ++ [ "@resources" ]);
+      } // cfgService;
+    };
+
+    systemd.services.peertube = {
+      description = "PeerTube daemon";
+      after = [ "network.target" ]
+        ++ lib.optional cfg.redis.createLocally "redis-peertube.service"
+        ++ lib.optionals cfg.database.createLocally [ "postgresql.service" "peertube-init-db.service" ];
+      requires = lib.optional cfg.redis.createLocally "redis-peertube.service"
+        ++ lib.optionals cfg.database.createLocally [ "postgresql.service" "peertube-init-db.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = env;
+
+      path = with pkgs; [ bashInteractive ffmpeg nodejs_18 openssl yarn python3 ];
+
+      script = ''
+        #!/bin/sh
+        umask 077
+        cat > /var/lib/peertube/config/local.yaml <<EOF
+        ${lib.optionalString (cfg.secrets.secretsFile != null) ''
+        secrets:
+          peertube: '$(cat ${cfg.secrets.secretsFile})'
+        ''}
+        ${lib.optionalString ((!cfg.database.createLocally) && (cfg.database.passwordFile != null)) ''
+        database:
+          password: '$(cat ${cfg.database.passwordFile})'
+        ''}
+        ${lib.optionalString (cfg.redis.passwordFile != null) ''
+        redis:
+          auth: '$(cat ${cfg.redis.passwordFile})'
+        ''}
+        ${lib.optionalString (cfg.smtp.passwordFile != null) ''
+        smtp:
+          password: '$(cat ${cfg.smtp.passwordFile})'
+        ''}
+        EOF
+        umask 027
+        ln -sf ${configFile} /var/lib/peertube/config/production.json
+        ln -sf ${cfg.package}/config/default.yaml /var/lib/peertube/config/default.yaml
+        ln -sf ${cfg.package}/client/dist -T /var/lib/peertube/www/client
+        ln -sf ${cfg.settings.storage.client_overrides} -T /var/lib/peertube/www/client-overrides
+        npm start
+      '';
+      serviceConfig = {
+        Type = "simple";
+        Restart = "always";
+        RestartSec = 20;
+        TimeoutSec = 60;
+        WorkingDirectory = cfg.package;
+        SyslogIdentifier = "peertube";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+        # State directory and mode
+        StateDirectory = "peertube";
+        StateDirectoryMode = "0750";
+        # Cache directory and mode
+        CacheDirectory = "peertube";
+        CacheDirectoryMode = "0750";
+        # Access write directories
+        ReadWritePaths = cfg.dataDirs;
+        # Environment
+        EnvironmentFile = cfg.serviceEnvironmentFile;
+        # Sandboxing
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" "AF_NETLINK" ];
+        MemoryDenyWriteExecute = false;
+        # System Call Filtering
+        SystemCallFilter = [ ("~" + lib.concatStringsSep " " systemCallsList) "pipe" "pipe2" ];
+      } // cfgService;
+    };
+
+    services.nginx = lib.mkIf cfg.configureNginx {
+      enable = true;
+      virtualHosts."${cfg.localDomain}" = {
+        root = "/var/lib/peertube/www";
+
+        # Application
+        locations."/" = {
+          tryFiles = "/dev/null @api";
+          priority = 1110;
+        };
+
+        locations."= /api/v1/videos/upload-resumable" = {
+          tryFiles = "/dev/null @api";
+          priority = 1120;
+
+          extraConfig = ''
+            client_max_body_size                        0;
+            proxy_request_buffering                     off;
+          '';
+        };
+
+        locations."~ ^/api/v1/videos/(upload|([^/]+/studio/edit))$" = {
+          tryFiles = "/dev/null @api";
+          root = cfg.settings.storage.tmp;
+          priority = 1130;
+
+          extraConfig = ''
+            client_max_body_size                        12G;
+            add_header X-File-Maximum-Size              8G always;
+          '' + lib.optionalString cfg.enableWebHttps ''
+            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
+          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
+            add_header Alt-Svc                          'h3=":443"; ma=86400';
+          '';
+        };
+
+        locations."~ ^/api/v1/runners/jobs/[^/]+/(update|success)$" = {
+          tryFiles = "/dev/null @api";
+          root = cfg.settings.storage.tmp;
+          priority = 1135;
+
+          extraConfig = ''
+            client_max_body_size                        12G;
+            add_header X-File-Maximum-Size              8G always;
+          '' + lib.optionalString cfg.enableWebHttps ''
+            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
+          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
+            add_header Alt-Svc                          'h3=":443"; ma=86400';
+          '';
+        };
+
+        locations."~ ^/api/v1/(videos|video-playlists|video-channels|users/me)" = {
+          tryFiles = "/dev/null @api";
+          priority = 1140;
+
+          extraConfig = ''
+            client_max_body_size                        6M;
+            add_header X-File-Maximum-Size              4M always;
+          '' + lib.optionalString cfg.enableWebHttps ''
+            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
+          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
+            add_header Alt-Svc                          'h3=":443"; ma=86400';
+          '';
+        };
+
+        locations."@api" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1150;
+
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_connect_timeout                       10m;
+
+            proxy_send_timeout                          10m;
+            proxy_read_timeout                          10m;
+
+            client_max_body_size                        100k;
+            send_timeout                                10m;
+          '';
+        };
+
+        # Websocket
+        locations."/socket.io" = {
+          tryFiles = "/dev/null @api_websocket";
+          priority = 1210;
+        };
+
+        locations."/tracker/socket" = {
+          tryFiles = "/dev/null @api_websocket";
+          priority = 1220;
+
+          extraConfig = ''
+            proxy_read_timeout                          15m;
+          '';
+        };
+
+        locations."~ ^/plugins/[^/]+(/[^/]+)?/ws/" = {
+          tryFiles = "/dev/null @api_websocket";
+          priority = 1230;
+        };
+
+        locations."@api_websocket" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1240;
+
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+            proxy_set_header Upgrade                    $http_upgrade;
+            proxy_set_header Connection                 'upgrade';
+
+            proxy_http_version                          1.1;
+          '';
+        };
+
+        # Bypass PeerTube for performance reasons.
+        locations."~ ^/client/(assets/images/(icons/icon-36x36\.png|icons/icon-48x48\.png|icons/icon-72x72\.png|icons/icon-96x96\.png|icons/icon-144x144\.png|icons/icon-192x192\.png|icons/icon-512x512\.png|logo\.svg|favicon\.png|default-playlist\.jpg|default-avatar-account\.png|default-avatar-account-48x48\.png|default-avatar-video-channel\.png|default-avatar-video-channel-48x48\.png))$" = {
+          tryFiles = "/client-overrides/$1 /client/$1 $1";
+          priority = 1310;
+        };
+
+        locations."~ ^/client/(.*\.(js|css|png|svg|woff2|otf|ttf|woff|eot))$" = {
+          alias = "${cfg.package}/client/dist/$1";
+          priority = 1320;
+          extraConfig = ''
+            add_header Cache-Control                    'public, max-age=604800, immutable';
+          '' + lib.optionalString cfg.enableWebHttps ''
+            add_header Strict-Transport-Security        'max-age=63072000; includeSubDomains';
+          '' + lib.optionalString config.services.nginx.virtualHosts.${cfg.localDomain}.http3 ''
+            add_header Alt-Svc                          'h3=":443"; ma=86400';
+          '';
+        };
+
+        locations."^~ /download/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1410;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
+        locations."^~ /static/streaming-playlists/private/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1420;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
+        locations."^~ /static/web-videos/private/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1430;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
+        locations."^~ /static/webseed/private/" = {
+          proxyPass = "http://127.0.0.1:${toString cfg.listenHttp}";
+          priority = 1440;
+          extraConfig = ''
+            proxy_set_header X-Forwarded-For            $proxy_add_x_forwarded_for;
+            proxy_set_header Host                       $host;
+            proxy_set_header X-Real-IP                  $remote_addr;
+
+            proxy_limit_rate                            5M;
+          '';
+        };
+
+        locations."^~ /static/redundancy/" = {
+          tryFiles = "$uri @api";
+          root = cfg.settings.storage.redundancy;
+          priority = 1450;
+          extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
+            if ($request_method = 'OPTIONS') {
+              ${nginxCommonHeaders}
+              add_header Access-Control-Max-Age         1728000;
+              add_header Content-Type                   'text/plain charset=UTF-8';
+              add_header Content-Length                 0;
+              return                                    204;
+            }
+            if ($request_method = 'GET') {
+              ${nginxCommonHeaders}
+
+              access_log                                off;
+            }
+
+            aio                                         threads;
+            sendfile                                    on;
+            sendfile_max_chunk                          1M;
+
+            limit_rate                                  $peertube_limit_rate;
+            limit_rate_after                            5M;
+
+            rewrite ^/static/redundancy/(.*)$           /$1 break;
+          '';
+        };
+
+        locations."^~ /static/streaming-playlists/" = {
+          tryFiles = "$uri @api";
+          root = cfg.settings.storage.streaming_playlists;
+          priority = 1460;
+          extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
+            if ($request_method = 'OPTIONS') {
+              ${nginxCommonHeaders}
+              add_header Access-Control-Max-Age         1728000;
+              add_header Content-Type                   'text/plain charset=UTF-8';
+              add_header Content-Length                 0;
+              return                                    204;
+            }
+            if ($request_method = 'GET') {
+              ${nginxCommonHeaders}
+
+              access_log                                off;
+            }
+
+            aio                                         threads;
+            sendfile                                    on;
+            sendfile_max_chunk                          1M;
+
+            limit_rate                                  $peertube_limit_rate;
+            limit_rate_after                            5M;
+
+            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+          '';
+        };
+
+        locations."^~ /static/web-videos/" = {
+          tryFiles = "$uri @api";
+          root = cfg.settings.storage.streaming_playlists;
+          priority = 1470;
+          extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
+            if ($request_method = 'OPTIONS') {
+              ${nginxCommonHeaders}
+              add_header Access-Control-Max-Age         1728000;
+              add_header Content-Type                   'text/plain charset=UTF-8';
+              add_header Content-Length                 0;
+              return                                    204;
+            }
+            if ($request_method = 'GET') {
+              ${nginxCommonHeaders}
+
+              access_log                                off;
+            }
+
+            aio                                         threads;
+            sendfile                                    on;
+            sendfile_max_chunk                          1M;
+
+            limit_rate                                  $peertube_limit_rate;
+            limit_rate_after                            5M;
+
+            rewrite ^/static/streaming-playlists/(.*)$  /$1 break;
+          '';
+        };
+
+        locations."^~ /static/webseed/" = {
+          tryFiles = "$uri @api";
+          root = cfg.settings.storage.videos;
+          priority = 1480;
+          extraConfig = ''
+            set $peertube_limit_rate                    800k;
+
+            if ($request_uri ~ -fragmented.mp4$) {
+              set $peertube_limit_rate                  5M;
+            }
+
+            if ($request_method = 'OPTIONS') {
+              ${nginxCommonHeaders}
+              add_header Access-Control-Max-Age         1728000;
+              add_header Content-Type                   'text/plain charset=UTF-8';
+              add_header Content-Length                 0;
+              return                                    204;
+            }
+            if ($request_method = 'GET') {
+              ${nginxCommonHeaders}
+
+              access_log                                off;
+            }
+
+            aio                                         threads;
+            sendfile                                    on;
+            sendfile_max_chunk                          1M;
+
+            limit_rate                                  $peertube_limit_rate;
+            limit_rate_after                            5M;
+
+            rewrite ^/static/webseed/(.*)$              /$1 break;
+          '';
+        };
+
+        extraConfig = lib.optionalString cfg.enableWebHttps ''
+          add_header Strict-Transport-Security          'max-age=63072000; includeSubDomains';
+        '';
+      };
+    };
+
+    services.postgresql = lib.mkIf cfg.database.createLocally {
+      enable = true;
+    };
+
+    services.redis.servers.peertube = lib.mkMerge [
+      (lib.mkIf cfg.redis.createLocally {
+        enable = true;
+      })
+      (lib.mkIf (cfg.redis.createLocally && !cfg.redis.enableUnixSocket) {
+        bind = "127.0.0.1";
+        port = cfg.redis.port;
+      })
+      (lib.mkIf (cfg.redis.createLocally && cfg.redis.enableUnixSocket) {
+        unixSocket = "/run/redis-peertube/redis.sock";
+        unixSocketPerm = 660;
+      })
+    ];
+
+    services.postfix = lib.mkIf cfg.smtp.createLocally {
+      enable = true;
+      hostname = lib.mkDefault "${cfg.localDomain}";
+    };
+
+    users.users = lib.mkMerge [
+      (lib.mkIf (cfg.user == "peertube") {
+        peertube = {
+          isSystemUser = true;
+          group = cfg.group;
+          home = cfg.package;
+        };
+      })
+      (lib.attrsets.setAttrByPath [ cfg.user "packages" ] [ cfg.package peertubeEnv peertubeCli pkgs.ffmpeg pkgs.nodejs_18 pkgs.yarn ])
+      (lib.mkIf cfg.redis.enableUnixSocket {${config.services.peertube.user}.extraGroups = [ "redis-peertube" ];})
+    ];
+
+    users.groups = {
+      ${cfg.group} = {
+        members = lib.optional cfg.configureNginx config.services.nginx.user;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/pgpkeyserver-lite.nix b/nixpkgs/nixos/modules/services/web-apps/pgpkeyserver-lite.nix
new file mode 100644
index 000000000000..dd51bacd75ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/pgpkeyserver-lite.nix
@@ -0,0 +1,78 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.pgpkeyserver-lite;
+  sksCfg = config.services.sks;
+  sksOpt = options.services.sks;
+
+  webPkg = cfg.package;
+
+in
+
+{
+
+  options = {
+
+    services.pgpkeyserver-lite = {
+
+      enable = mkEnableOption (lib.mdDoc "pgpkeyserver-lite on a nginx vHost proxying to a gpg keyserver");
+
+      package = mkOption {
+        default = pkgs.pgpkeyserver-lite;
+        defaultText = literalExpression "pkgs.pgpkeyserver-lite";
+        type = types.package;
+        description = lib.mdDoc ''
+          Which webgui derivation to use.
+        '';
+      };
+
+      hostname = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Which hostname to set the vHost to that is proxying to sks.
+        '';
+      };
+
+      hkpAddress = mkOption {
+        default = builtins.head sksCfg.hkpAddress;
+        defaultText = literalExpression "head config.${sksOpt.hkpAddress}";
+        type = types.str;
+        description = lib.mdDoc ''
+          Which IP address the sks-keyserver is listening on.
+        '';
+      };
+
+      hkpPort = mkOption {
+        default = sksCfg.hkpPort;
+        defaultText = literalExpression "config.${sksOpt.hkpPort}";
+        type = types.int;
+        description = lib.mdDoc ''
+          Which port the sks-keyserver is listening on.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.nginx.enable = true;
+
+    services.nginx.virtualHosts = let
+      hkpPort = builtins.toString cfg.hkpPort;
+    in {
+      ${cfg.hostname} = {
+        root = webPkg;
+        locations = {
+          "/pks".extraConfig = ''
+            proxy_pass         http://${cfg.hkpAddress}:${hkpPort};
+            proxy_pass_header  Server;
+            add_header         Via "1.1 ${cfg.hostname}";
+          '';
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/photoprism.nix b/nixpkgs/nixos/modules/services/web-apps/photoprism.nix
new file mode 100644
index 000000000000..423ad5375baa
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/photoprism.nix
@@ -0,0 +1,155 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.photoprism;
+
+  env = {
+    PHOTOPRISM_ORIGINALS_PATH = cfg.originalsPath;
+    PHOTOPRISM_STORAGE_PATH = cfg.storagePath;
+    PHOTOPRISM_IMPORT_PATH = cfg.importPath;
+    PHOTOPRISM_HTTP_HOST = cfg.address;
+    PHOTOPRISM_HTTP_PORT = toString cfg.port;
+  } // (
+    lib.mapAttrs (_: toString) cfg.settings
+  );
+
+  manage =
+    let
+      setupEnv = lib.concatStringsSep "\n" (lib.mapAttrsToList (name: val: "export ${name}=${lib.escapeShellArg val}") env);
+    in
+    pkgs.writeShellScript "manage" ''
+      ${setupEnv}
+      exec ${cfg.package}/bin/photoprism "$@"
+    '';
+in
+{
+  meta.maintainers = with lib.maintainers; [ stunkymonkey ];
+
+  options.services.photoprism = {
+
+    enable = lib.mkEnableOption (lib.mdDoc "Photoprism web server");
+
+    passwordFile = lib.mkOption {
+      type = lib.types.nullOr lib.types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Admin password file.
+      '';
+    };
+
+    address = lib.mkOption {
+      type = lib.types.str;
+      default = "localhost";
+      description = lib.mdDoc ''
+        Web interface address.
+      '';
+    };
+
+    port = lib.mkOption {
+      type = lib.types.port;
+      default = 2342;
+      description = lib.mdDoc ''
+        Web interface port.
+      '';
+    };
+
+    originalsPath = lib.mkOption {
+      type = lib.types.path;
+      default = null;
+      example = "/data/photos";
+      description = lib.mdDoc ''
+        Storage path of your original media files (photos and videos).
+      '';
+    };
+
+    importPath = lib.mkOption {
+      type = lib.types.str;
+      default = "import";
+      description = lib.mdDoc ''
+        Relative or absolute to the `originalsPath` from where the files should be imported.
+      '';
+    };
+
+    storagePath = lib.mkOption {
+      type = lib.types.path;
+      default = "/var/lib/photoprism";
+      description = lib.mdDoc ''
+        Location for sidecar, cache, and database files.
+      '';
+    };
+
+    package = lib.mkPackageOptionMD pkgs "photoprism" { };
+
+    settings = lib.mkOption {
+      type = lib.types.attrsOf lib.types.str;
+      default = { };
+      description = lib.mdDoc ''
+        See [the getting-started guide](https://docs.photoprism.app/getting-started/config-options/) for available options.
+      '';
+      example = {
+        PHOTOPRISM_DEFAULT_LOCALE = "de";
+        PHOTOPRISM_ADMIN_USER = "root";
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.photoprism = {
+      description = "Photoprism server";
+
+      serviceConfig = {
+        Restart = "on-failure";
+        User = "photoprism";
+        Group = "photoprism";
+        DynamicUser = true;
+        StateDirectory = "photoprism";
+        WorkingDirectory = "/var/lib/photoprism";
+        RuntimeDirectory = "photoprism";
+
+        LoadCredential = lib.optionalString (cfg.passwordFile != null)
+          "PHOTOPRISM_ADMIN_PASSWORD:${cfg.passwordFile}";
+
+        CapabilityBoundingSet = "";
+        LockPersonality = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" "~@setuid @keyring" ];
+        UMask = "0066";
+      } // lib.optionalAttrs (cfg.port < 1024) {
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+      };
+
+      wantedBy = [ "multi-user.target" ];
+      environment = env;
+
+      # reminder: easier password configuration will come in https://github.com/photoprism/photoprism/pull/2302
+      preStart = ''
+        ln -sf ${manage} photoprism-manage
+
+        ${lib.optionalString (cfg.passwordFile != null) ''
+          export PHOTOPRISM_ADMIN_PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/PHOTOPRISM_ADMIN_PASSWORD")
+        ''}
+        exec ${cfg.package}/bin/photoprism migrations run -f
+      '';
+
+      script = ''
+        ${lib.optionalString (cfg.passwordFile != null) ''
+          export PHOTOPRISM_ADMIN_PASSWORD=$(cat "$CREDENTIALS_DIRECTORY/PHOTOPRISM_ADMIN_PASSWORD")
+        ''}
+        exec ${cfg.package}/bin/photoprism start
+      '';
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/modules/services/web-apps/phylactery.nix b/nixpkgs/nixos/modules/services/web-apps/phylactery.nix
new file mode 100644
index 000000000000..723b38ee75d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/phylactery.nix
@@ -0,0 +1,51 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let cfg = config.services.phylactery;
+in {
+  options.services.phylactery = {
+    enable = mkEnableOption (lib.mdDoc "Phylactery server");
+
+    host = mkOption {
+      type = types.str;
+      default = "localhost";
+      description = lib.mdDoc "Listen host for Phylactery";
+    };
+
+    port = mkOption {
+      type = types.port;
+      description = lib.mdDoc "Listen port for Phylactery";
+    };
+
+    library = mkOption {
+      type = types.path;
+      description = lib.mdDoc "Path to CBZ library";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.phylactery;
+      defaultText = literalExpression "pkgs.phylactery";
+      description = lib.mdDoc "The Phylactery package to use";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.phylactery = {
+      environment = {
+        PHYLACTERY_ADDRESS = "${cfg.host}:${toString cfg.port}";
+        PHYLACTERY_LIBRARY = "${cfg.library}";
+      };
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ConditionPathExists = cfg.library;
+        DynamicUser = true;
+        ExecStart = "${cfg.package}/bin/phylactery";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ McSinyx ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/pict-rs.md b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
new file mode 100644
index 000000000000..2fa6bb3aebce
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/pict-rs.md
@@ -0,0 +1,89 @@
+# Pict-rs {#module-services-pict-rs}
+
+pict-rs is a  a simple image hosting service.
+
+## Quickstart {#module-services-pict-rs-quickstart}
+
+the minimum to start pict-rs is
+
+```nix
+services.pict-rs.enable = true;
+```
+
+this will start the http server on port 8080 by default.
+
+## Usage {#module-services-pict-rs-usage}
+
+pict-rs offers the following endpoints:
+
+- `POST /image` for uploading an image. Uploaded content must be valid multipart/form-data with an
+    image array located within the `images[]` key
+
+    This endpoint returns the following JSON structure on success with a 201 Created status
+    ```json
+    {
+        "files": [
+            {
+                "delete_token": "JFvFhqJA98",
+                "file": "lkWZDRvugm.jpg"
+            },
+            {
+                "delete_token": "kAYy9nk2WK",
+                "file": "8qFS0QooAn.jpg"
+            },
+            {
+                "delete_token": "OxRpM3sf0Y",
+                "file": "1hJaYfGE01.jpg"
+            }
+        ],
+        "msg": "ok"
+    }
+    ```
+- `GET /image/download?url=...` Download an image from a remote server, returning the same JSON
+    payload as the `POST` endpoint
+- `GET /image/original/{file}` for getting a full-resolution image. `file` here is the `file` key from the
+    `/image` endpoint's JSON
+- `GET /image/details/original/{file}` for getting the details of a full-resolution image.
+    The returned JSON is structured like so:
+    ```json
+    {
+        "width": 800,
+        "height": 537,
+        "content_type": "image/webp",
+        "created_at": [
+            2020,
+            345,
+            67376,
+            394363487
+        ]
+    }
+    ```
+- `GET /image/process.{ext}?src={file}&...` get a file with transformations applied.
+    existing transformations include
+    - `identity=true`: apply no changes
+    - `blur={float}`: apply a gaussian blur to the file
+    - `thumbnail={int}`: produce a thumbnail of the image fitting inside an `{int}` by `{int}`
+        square using raw pixel sampling
+    - `resize={int}`: produce a thumbnail of the image fitting inside an `{int}` by `{int}` square
+        using a Lanczos2 filter. This is slower than sampling but looks a bit better in some cases
+    - `crop={int-w}x{int-h}`: produce a cropped version of the image with an `{int-w}` by `{int-h}`
+        aspect ratio. The resulting crop will be centered on the image. Either the width or height
+        of the image will remain full-size, depending on the image's aspect ratio and the requested
+        aspect ratio. For example, a 1600x900 image cropped with a 1x1 aspect ratio will become 900x900. A
+        1600x1100 image cropped with a 16x9 aspect ratio will become 1600x900.
+
+    Supported `ext` file extensions include `png`, `jpg`, and `webp`
+
+    An example of usage could be
+    ```
+    GET /image/process.jpg?src=asdf.png&thumbnail=256&blur=3.0
+    ```
+    which would create a 256x256px JPEG thumbnail and blur it
+- `GET /image/details/process.{ext}?src={file}&...` for getting the details of a processed image.
+    The returned JSON is the same format as listed for the full-resolution details endpoint.
+- `DELETE /image/delete/{delete_token}/{file}` or `GET /image/delete/{delete_token}/{file}` to
+    delete a file, where `delete_token` and `file` are from the `/image` endpoint's JSON
+
+## Missing {#module-services-pict-rs-missing}
+
+- Configuring the secure-api-key is not included yet. The envisioned basic use case is consumption on localhost by other services without exposing the service to the internet.
diff --git a/nixpkgs/nixos/modules/services/web-apps/pict-rs.nix b/nixpkgs/nixos/modules/services/web-apps/pict-rs.nix
new file mode 100644
index 000000000000..e1b8c8333553
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/pict-rs.nix
@@ -0,0 +1,106 @@
+{ lib, pkgs, config, ... }:
+
+let
+  cfg = config.services.pict-rs;
+  inherit (lib) maintainers mkOption types;
+
+  is03 = lib.versionOlder cfg.package.version "0.4.0";
+
+in
+{
+  meta.maintainers = with maintainers; [ happysalada ];
+  meta.doc = ./pict-rs.md;
+
+  options.services.pict-rs = {
+    enable = lib.mkEnableOption (lib.mdDoc "pict-rs server");
+
+    package = mkOption {
+      type = types.package;
+      example = lib.literalExpression "pkgs.pict-rs";
+      description = lib.mdDoc ''
+        pict-rs package to use.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/pict-rs";
+      description = lib.mdDoc ''
+        The directory where to store the uploaded images & database.
+      '';
+    };
+
+    repoPath = mkOption {
+      type = types.nullOr (types.path);
+      default = null;
+      description = lib.mdDoc ''
+        The directory where to store the database.
+        This option takes precedence over dataDir.
+      '';
+    };
+
+    storePath = mkOption {
+      type = types.nullOr (types.path);
+      default = null;
+      description = lib.mdDoc ''
+        The directory where to store the uploaded images.
+        This option takes precedence over dataDir.
+      '';
+    };
+
+    address = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        The IPv4 address to deploy the service to.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        The port which to bind the service to.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.pict-rs.package = lib.mkDefault (
+      # An incompatible db change happened in the transition from 0.3 to 0.4.
+      if lib.versionAtLeast config.system.stateVersion "23.11"
+      then pkgs.pict-rs
+      else pkgs.pict-rs_0_3
+    );
+
+    # Account for config differences between 0.3 and 0.4
+    assertions = [
+      {
+        assertion = !is03 || (cfg.repoPath == null && cfg.storePath == null);
+        message = ''
+          Using `services.pict-rs.repoPath` or `services.pict-rs.storePath` with pict-rs 0.3 or older has no effect.
+        '';
+      }
+    ];
+
+    systemd.services.pict-rs = {
+      # Pict-rs split it's database and image storage paths in 0.4.0.
+      environment =
+        if is03 then {
+          PICTRS__PATH = cfg.dataDir;
+          PICTRS__ADDR = "${cfg.address}:${toString cfg.port}";
+        } else {
+          PICTRS__REPO__PATH = if cfg.repoPath != null then cfg.repoPath else "${cfg.dataDir}/sled-repo";
+          PICTRS__STORE__PATH = if cfg.storePath != null then cfg.storePath else "${cfg.dataDir}/files";
+          PICTRS__SERVER__ADDR = "${cfg.address}:${toString cfg.port}";
+        };
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = "pict-rs";
+        ExecStart = if is03 then "${lib.getBin cfg.package}/bin/pict-rs" else "${lib.getBin cfg.package}/bin/pict-rs run";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/pixelfed.nix b/nixpkgs/nixos/modules/services/web-apps/pixelfed.nix
new file mode 100644
index 000000000000..b0a25dcce9ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/pixelfed.nix
@@ -0,0 +1,482 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.pixelfed;
+  user = cfg.user;
+  group = cfg.group;
+  pixelfed = cfg.package.override { inherit (cfg) dataDir runtimeDir; };
+  # https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L185-L190
+  extraPrograms = with pkgs; [ jpegoptim optipng pngquant gifsicle ffmpeg ];
+  # Ensure PHP extensions: https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L135-L147
+  phpPackage = cfg.phpPackage.buildEnv {
+    extensions = { enabled, all }:
+      enabled
+      ++ (with all; [ bcmath ctype curl mbstring gd intl zip redis imagick ]);
+  };
+  configFile =
+    pkgs.writeText "pixelfed-env" (lib.generators.toKeyValue { } cfg.settings);
+  # Management script
+  pixelfed-manage = pkgs.writeShellScriptBin "pixelfed-manage" ''
+    cd ${pixelfed}
+    sudo=exec
+    if [[ "$USER" != ${user} ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${user}'
+    fi
+    $sudo ${phpPackage}/bin/php artisan "$@"
+  '';
+  dbSocket = {
+    "pgsql" = "/run/postgresql";
+    "mysql" = "/run/mysqld/mysqld.sock";
+  }.${cfg.database.type};
+  dbService = {
+    "pgsql" = "postgresql.service";
+    "mysql" = "mysql.service";
+  }.${cfg.database.type};
+  redisService = "redis-pixelfed.service";
+in {
+  options.services = {
+    pixelfed = {
+      enable = mkEnableOption (lib.mdDoc "a Pixelfed instance");
+      package = mkPackageOptionMD pkgs "pixelfed" { };
+      phpPackage = mkPackageOptionMD pkgs "php81" { };
+
+      user = mkOption {
+        type = types.str;
+        default = "pixelfed";
+        description = lib.mdDoc ''
+          User account under which pixelfed runs.
+
+          ::: {.note}
+          If left as the default value this user will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the user exists before the pixelfed application starts.
+          :::
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "pixelfed";
+        description = lib.mdDoc ''
+          Group account under which pixelfed runs.
+
+          ::: {.note}
+          If left as the default value this group will automatically be created
+          on system activation, otherwise you are responsible for
+          ensuring the group exists before the pixelfed application starts.
+          :::
+        '';
+      };
+
+      domain = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          FQDN for the Pixelfed instance.
+        '';
+      };
+
+      secretFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          A secret file to be sourced for the .env settings.
+          Place `APP_KEY` and other settings that should not end up in the Nix store here.
+        '';
+      };
+
+      settings = mkOption {
+        type = with types; (attrsOf (oneOf [ bool int str ]));
+        description = lib.mdDoc ''
+          .env settings for Pixelfed.
+          Secrets should use `secretFile` option instead.
+        '';
+      };
+
+      nginx = mkOption {
+        type = types.nullOr (types.submodule
+          (import ../web-servers/nginx/vhost-options.nix {
+            inherit config lib;
+          }));
+        default = null;
+        example = lib.literalExpression ''
+          {
+            serverAliases = [
+              "pics.''${config.networking.domain}"
+            ];
+            enableACME = true;
+            forceHttps = true;
+          }
+        '';
+        description = lib.mdDoc ''
+          With this option, you can customize an nginx virtual host which already has sensible defaults for Dolibarr.
+          Set to {} if you do not need any customization to the virtual host.
+          If enabled, then by default, the {option}`serverName` is
+          `''${domain}`,
+          If this is set to null (the default), no nginx virtualHost will be configured.
+        '';
+      };
+
+      redis.createLocally = mkEnableOption
+        (lib.mdDoc "a local Redis database using UNIX socket authentication")
+        // {
+          default = true;
+        };
+
+      database = {
+        createLocally = mkEnableOption
+          (lib.mdDoc "a local database using UNIX socket authentication") // {
+            default = true;
+          };
+        automaticMigrations = mkEnableOption
+          (lib.mdDoc "automatic migrations for database schema and data") // {
+            default = true;
+          };
+
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" ];
+          example = "pgsql";
+          default = "mysql";
+          description = lib.mdDoc ''
+            Database engine to use.
+            Note that PGSQL is not well supported: https://github.com/pixelfed/pixelfed/issues/2727
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "pixelfed";
+          description = lib.mdDoc "Database name.";
+        };
+      };
+
+      maxUploadSize = mkOption {
+        type = types.str;
+        default = "8M";
+        description = lib.mdDoc ''
+          Max upload size with units.
+        '';
+      };
+
+      poolConfig = mkOption {
+        type = with types; attrsOf (oneOf [ int str bool ]);
+        default = { };
+
+        description = lib.mdDoc ''
+          Options for Pixelfed's PHP-FPM pool.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/pixelfed";
+        description = lib.mdDoc ''
+          State directory of the `pixelfed` user which holds
+          the application's state and data.
+        '';
+      };
+
+      runtimeDir = mkOption {
+        type = types.str;
+        default = "/run/pixelfed";
+        description = lib.mdDoc ''
+          Ruutime directory of the `pixelfed` user which holds
+          the application's caches and temporary files.
+        '';
+      };
+
+      schedulerInterval = mkOption {
+        type = types.str;
+        default = "1d";
+        description = lib.mdDoc "How often the Pixelfed cron task should run";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    users.users.pixelfed = mkIf (cfg.user == "pixelfed") {
+      isSystemUser = true;
+      group = cfg.group;
+      extraGroups = lib.optional cfg.redis.createLocally "redis-pixelfed";
+    };
+    users.groups.pixelfed = mkIf (cfg.group == "pixelfed") { };
+
+    services.redis.servers.pixelfed.enable = lib.mkIf cfg.redis.createLocally true;
+    services.pixelfed.settings = mkMerge [
+      ({
+        APP_ENV = mkDefault "production";
+        APP_DEBUG = mkDefault false;
+        # https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L312-L316
+        APP_URL = mkDefault "https://${cfg.domain}";
+        ADMIN_DOMAIN = mkDefault cfg.domain;
+        APP_DOMAIN = mkDefault cfg.domain;
+        SESSION_DOMAIN = mkDefault cfg.domain;
+        SESSION_SECURE_COOKIE = mkDefault true;
+        OPEN_REGISTRATION = mkDefault false;
+        # ActivityPub: https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L360-L364
+        ACTIVITY_PUB = mkDefault true;
+        AP_REMOTE_FOLLOW = mkDefault true;
+        AP_INBOX = mkDefault true;
+        AP_OUTBOX = mkDefault true;
+        AP_SHAREDINBOX = mkDefault true;
+        # Image optimization: https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L367-L404
+        PF_OPTIMIZE_IMAGES = mkDefault true;
+        IMAGE_DRIVER = mkDefault "imagick";
+        # Mobile APIs
+        OAUTH_ENABLED = mkDefault true;
+        # https://github.com/pixelfed/pixelfed/blob/dev/app/Console/Commands/Installer.php#L351
+        EXP_EMC = mkDefault true;
+        # Defer to systemd
+        LOG_CHANNEL = mkDefault "stderr";
+        # TODO: find out the correct syntax?
+        # TRUST_PROXIES = mkDefault "127.0.0.1/8, ::1/128";
+      })
+      (mkIf (cfg.redis.createLocally) {
+        BROADCAST_DRIVER = mkDefault "redis";
+        CACHE_DRIVER = mkDefault "redis";
+        QUEUE_DRIVER = mkDefault "redis";
+        SESSION_DRIVER = mkDefault "redis";
+        WEBSOCKET_REPLICATION_MODE = mkDefault "redis";
+        # Support phpredis and predis configuration-style.
+        REDIS_SCHEME = "unix";
+        REDIS_HOST = config.services.redis.servers.pixelfed.unixSocket;
+        REDIS_PATH = config.services.redis.servers.pixelfed.unixSocket;
+      })
+      (mkIf (cfg.database.createLocally) {
+        DB_CONNECTION = cfg.database.type;
+        DB_SOCKET = dbSocket;
+        DB_DATABASE = cfg.database.name;
+        DB_USERNAME = user;
+        # No TCP/IP connection.
+        DB_PORT = 0;
+      })
+    ];
+
+    environment.systemPackages = [ pixelfed-manage ];
+
+    services.mysql =
+      mkIf (cfg.database.createLocally && cfg.database.type == "mysql") {
+        enable = mkDefault true;
+        package = mkDefault pkgs.mariadb;
+        ensureDatabases = [ cfg.database.name ];
+        ensureUsers = [{
+          name = user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }];
+      };
+
+    services.postgresql =
+      mkIf (cfg.database.createLocally && cfg.database.type == "pgsql") {
+        enable = mkDefault true;
+        ensureDatabases = [ cfg.database.name ];
+        ensureUsers = [{
+          name = user;
+        }];
+      };
+
+    # Make each individual option overridable with lib.mkDefault.
+    services.pixelfed.poolConfig = lib.mapAttrs' (n: v: lib.nameValuePair n (lib.mkDefault v)) {
+      "pm" = "dynamic";
+      "php_admin_value[error_log]" = "stderr";
+      "php_admin_flag[log_errors]" = true;
+      "catch_workers_output" = true;
+      "pm.max_children" = "32";
+      "pm.start_servers" = "2";
+      "pm.min_spare_servers" = "2";
+      "pm.max_spare_servers" = "4";
+      "pm.max_requests" = "500";
+    };
+
+    services.phpfpm.pools.pixelfed = {
+      inherit user group;
+      inherit phpPackage;
+
+      phpOptions = ''
+        post_max_size = ${toString cfg.maxUploadSize}
+        upload_max_filesize = ${toString cfg.maxUploadSize}
+        max_execution_time = 600;
+      '';
+
+      settings = {
+        "listen.owner" = user;
+        "listen.group" = group;
+        "listen.mode" = "0660";
+        "catch_workers_output" = "yes";
+      } // cfg.poolConfig;
+    };
+
+    systemd.services.phpfpm-pixelfed.after = [ "pixelfed-data-setup.service" ];
+    systemd.services.phpfpm-pixelfed.requires =
+      [ "pixelfed-horizon.service" "pixelfed-data-setup.service" ]
+      ++ lib.optional cfg.database.createLocally dbService
+      ++ lib.optional cfg.redis.createLocally redisService;
+    # Ensure image optimizations programs are available.
+    systemd.services.phpfpm-pixelfed.path = extraPrograms;
+
+    systemd.services.pixelfed-horizon = {
+      description = "Pixelfed task queueing via Laravel Horizon framework";
+      after = [ "network.target" "pixelfed-data-setup.service" ];
+      requires = [ "pixelfed-data-setup.service" ]
+        ++ (lib.optional cfg.database.createLocally dbService)
+        ++ (lib.optional cfg.redis.createLocally redisService);
+      wantedBy = [ "multi-user.target" ];
+      # Ensure image optimizations programs are available.
+      path = extraPrograms;
+
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${pixelfed-manage}/bin/pixelfed-manage horizon";
+        StateDirectory =
+          lib.mkIf (cfg.dataDir == "/var/lib/pixelfed") "pixelfed";
+        User = user;
+        Group = group;
+        Restart = "on-failure";
+      };
+    };
+
+    systemd.timers.pixelfed-cron = {
+      description = "Pixelfed periodic tasks timer";
+      after = [ "pixelfed-data-setup.service" ];
+      requires = [ "phpfpm-pixelfed.service" ];
+      wantedBy = [ "timers.target" ];
+
+      timerConfig = {
+        OnBootSec = cfg.schedulerInterval;
+        OnUnitActiveSec = cfg.schedulerInterval;
+      };
+    };
+
+    systemd.services.pixelfed-cron = {
+      description = "Pixelfed periodic tasks";
+      # Ensure image optimizations programs are available.
+      path = extraPrograms;
+
+      serviceConfig = {
+        ExecStart = "${pixelfed-manage}/bin/pixelfed-manage schedule:run";
+        User = user;
+        Group = group;
+        StateDirectory =
+          lib.mkIf (cfg.dataDir == "/var/lib/pixelfed") "pixelfed";
+      };
+    };
+
+    systemd.services.pixelfed-data-setup = {
+      description =
+        "Pixelfed setup: migrations, environment file update, cache reload, data changes";
+      wantedBy = [ "multi-user.target" ];
+      after = lib.optional cfg.database.createLocally dbService;
+      requires = lib.optional cfg.database.createLocally dbService;
+      path = with pkgs; [ bash pixelfed-manage rsync ] ++ extraPrograms;
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = user;
+        Group = group;
+        StateDirectory =
+          lib.mkIf (cfg.dataDir == "/var/lib/pixelfed") "pixelfed";
+        LoadCredential = "env-secrets:${cfg.secretFile}";
+        UMask = "077";
+      };
+
+      script = ''
+        # Before running any PHP program, cleanup the code cache.
+        # It's necessary if you upgrade the application otherwise you might
+        # try to import non-existent modules.
+        rm -f ${cfg.runtimeDir}/app.php
+        rm -rf ${cfg.runtimeDir}/cache/*
+
+        # Concatenate non-secret .env and secret .env
+        rm -f ${cfg.dataDir}/.env
+        cp --no-preserve=all ${configFile} ${cfg.dataDir}/.env
+        echo -e '\n' >> ${cfg.dataDir}/.env
+        cat "$CREDENTIALS_DIRECTORY/env-secrets" >> ${cfg.dataDir}/.env
+
+        # Link the static storage (package provided) to the runtime storage
+        # Necessary for cities.json and static images.
+        mkdir -p ${cfg.dataDir}/storage
+        rsync -av --no-perms ${pixelfed}/storage-static/ ${cfg.dataDir}/storage
+        chmod -R +w ${cfg.dataDir}/storage
+
+        chmod g+x ${cfg.dataDir}/storage ${cfg.dataDir}/storage/app
+        chmod -R g+rX ${cfg.dataDir}/storage/app/public
+
+        # Link the app.php in the runtime folder.
+        # We cannot link the cache folder only because bootstrap folder needs to be writeable.
+        ln -sf ${pixelfed}/bootstrap-static/app.php ${cfg.runtimeDir}/app.php
+
+        # https://laravel.com/docs/10.x/filesystem#the-public-disk
+        # Creating the public/storage → storage/app/public link
+        # is unnecessary as it's part of the installPhase of pixelfed.
+
+        # Install Horizon
+        # FIXME: require write access to public/ — should be done as part of install — pixelfed-manage horizon:publish
+
+        # Perform the first migration.
+        [[ ! -f ${cfg.dataDir}/.initial-migration ]] && pixelfed-manage migrate --force && touch ${cfg.dataDir}/.initial-migration
+
+        ${lib.optionalString cfg.database.automaticMigrations ''
+          # Force migrate the database.
+          pixelfed-manage migrate --force
+        ''}
+
+        # Import location data
+        pixelfed-manage import:cities
+
+        ${lib.optionalString cfg.settings.ACTIVITY_PUB ''
+          # ActivityPub federation bookkeeping
+          [[ ! -f ${cfg.dataDir}/.instance-actor-created ]] && pixelfed-manage instance:actor && touch ${cfg.dataDir}/.instance-actor-created
+        ''}
+
+        ${lib.optionalString cfg.settings.OAUTH_ENABLED ''
+          # Generate Passport encryption keys
+          [[ ! -f ${cfg.dataDir}/.passport-keys-generated ]] && pixelfed-manage passport:keys && touch ${cfg.dataDir}/.passport-keys-generated
+        ''}
+
+        pixelfed-manage route:cache
+        pixelfed-manage view:cache
+        pixelfed-manage config:cache
+      '';
+    };
+
+    systemd.tmpfiles.rules = [
+      # Cache must live across multiple systemd units runtimes.
+      "d ${cfg.runtimeDir}/                         0700 ${user} ${group} - -"
+      "d ${cfg.runtimeDir}/cache                    0700 ${user} ${group} - -"
+    ];
+
+    # Enable NGINX to access our phpfpm-socket.
+    users.users."${config.services.nginx.user}".extraGroups = [ cfg.group ];
+    services.nginx = mkIf (cfg.nginx != null) {
+      enable = true;
+      virtualHosts."${cfg.domain}" = mkMerge [
+        cfg.nginx
+        {
+          root = lib.mkForce "${pixelfed}/public/";
+          locations."/".tryFiles = "$uri $uri/ /index.php?$query_string";
+          locations."/favicon.ico".extraConfig = ''
+            access_log off; log_not_found off;
+          '';
+          locations."/robots.txt".extraConfig = ''
+            access_log off; log_not_found off;
+          '';
+          locations."~ \\.php$".extraConfig = ''
+            fastcgi_split_path_info ^(.+\.php)(/.+)$;
+            fastcgi_pass unix:${config.services.phpfpm.pools.pixelfed.socket};
+            fastcgi_index index.php;
+          '';
+          locations."~ /\\.(?!well-known).*".extraConfig = ''
+            deny all;
+          '';
+          extraConfig = ''
+            add_header X-Frame-Options "SAMEORIGIN";
+            add_header X-XSS-Protection "1; mode=block";
+            add_header X-Content-Type-Options "nosniff";
+            index index.html index.htm index.php;
+            error_page 404 /index.php;
+            client_max_body_size ${toString cfg.maxUploadSize};
+          '';
+        }
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/plantuml-server.nix b/nixpkgs/nixos/modules/services/web-apps/plantuml-server.nix
new file mode 100644
index 000000000000..1fa69814c6c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/plantuml-server.nix
@@ -0,0 +1,154 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    literalExpression
+    mdDoc
+    mkEnableOption
+    mkIf
+    mkOption
+    mkPackageOptionMD
+    mkRemovedOptionModule
+    types
+    ;
+
+  cfg = config.services.plantuml-server;
+
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "plantuml-server" "allowPlantumlInclude" ] "This option has been removed from PlantUML.")
+  ];
+
+  options = {
+    services.plantuml-server = {
+      enable = mkEnableOption (mdDoc "PlantUML server");
+
+      package = mkPackageOptionMD pkgs "plantuml-server" { };
+
+      packages = {
+        jdk = mkPackageOptionMD pkgs "jdk" { };
+        jetty = mkPackageOptionMD pkgs "jetty" {
+          default = "jetty_11";
+          extraDescription = ''
+            At the time of writing (v1.2023.12), PlantUML Server does not support
+            Jetty versions higher than 12.x.
+
+            Jetty 12.x has introduced major breaking changes, see
+            <https://github.com/jetty/jetty.project/releases/tag/jetty-12.0.0> and
+            <https://eclipse.dev/jetty/documentation/jetty-12/programming-guide/index.html#pg-migration-11-to-12>
+          '';
+        };
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "plantuml";
+        description = mdDoc "User which runs PlantUML server.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "plantuml";
+        description = mdDoc "Group which runs PlantUML server.";
+      };
+
+      home = mkOption {
+        type = types.path;
+        default = "/var/lib/plantuml";
+        description = mdDoc "Home directory of the PlantUML server instance.";
+      };
+
+      listenHost = mkOption {
+        type = types.str;
+        default = "127.0.0.1";
+        description = mdDoc "Host to listen on.";
+      };
+
+      listenPort = mkOption {
+        type = types.int;
+        default = 8080;
+        description = mdDoc "Port to listen on.";
+      };
+
+      plantumlLimitSize = mkOption {
+        type = types.int;
+        default = 4096;
+        description = mdDoc "Limits image width and height.";
+      };
+
+      graphvizPackage = mkPackageOptionMD pkgs "graphviz" { };
+
+      plantumlStats = mkOption {
+        type = types.bool;
+        default = false;
+        description = mdDoc "Set it to on to enable statistics report (https://plantuml.com/statistics-report).";
+      };
+
+      httpAuthorization = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = mdDoc "When calling the proxy endpoint, the value of HTTP_AUTHORIZATION will be used to set the HTTP Authorization header.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.plantuml-server = {
+      description = "PlantUML server";
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.home ];
+
+      environment = {
+        PLANTUML_LIMIT_SIZE = builtins.toString cfg.plantumlLimitSize;
+        GRAPHVIZ_DOT = "${cfg.graphvizPackage}/bin/dot";
+        PLANTUML_STATS = if cfg.plantumlStats then "on" else "off";
+        HTTP_AUTHORIZATION = cfg.httpAuthorization;
+      };
+      script = ''
+      ${cfg.packages.jdk}/bin/java \
+        -jar ${cfg.packages.jetty}/start.jar \
+          --module=deploy,http,jsp \
+          jetty.home=${cfg.packages.jetty} \
+          jetty.base=${cfg.package} \
+          jetty.http.host=${cfg.listenHost} \
+          jetty.http.port=${builtins.toString cfg.listenPort}
+      '';
+
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        StateDirectory = mkIf (cfg.home == "/var/lib/plantuml") "plantuml";
+        StateDirectoryMode = mkIf (cfg.home == "/var/lib/plantuml") "0750";
+
+        # Hardening
+        AmbientCapabilities = [ "" ];
+        CapabilityBoundingSet = [ "" ];
+        DynamicUser = true;
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateNetwork = false;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectSystem = "strict";
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "@system-service" ];
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ truh anthonyroussel ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/plausible.md b/nixpkgs/nixos/modules/services/web-apps/plausible.md
new file mode 100644
index 000000000000..1328ce69441a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/plausible.md
@@ -0,0 +1,35 @@
+# Plausible {#module-services-plausible}
+
+[Plausible](https://plausible.io/) is a privacy-friendly alternative to
+Google analytics.
+
+## Basic Usage {#module-services-plausible-basic-usage}
+
+At first, a secret key is needed to be generated. This can be done with e.g.
+```ShellSession
+$ openssl rand -base64 64
+```
+
+After that, `plausible` can be deployed like this:
+```
+{
+  services.plausible = {
+    enable = true;
+    adminUser = {
+      # activate is used to skip the email verification of the admin-user that's
+      # automatically created by plausible. This is only supported if
+      # postgresql is configured by the module. This is done by default, but
+      # can be turned off with services.plausible.database.postgres.setup.
+      activate = true;
+      email = "admin@localhost";
+      passwordFile = "/run/secrets/plausible-admin-pwd";
+    };
+    server = {
+      baseUrl = "http://analytics.example.org";
+      # secretKeybaseFile is a path to the file which contains the secret generated
+      # with openssl as described above.
+      secretKeybaseFile = "/run/secrets/plausible-secret-key-base";
+    };
+  };
+}
+```
diff --git a/nixpkgs/nixos/modules/services/web-apps/plausible.nix b/nixpkgs/nixos/modules/services/web-apps/plausible.nix
new file mode 100644
index 000000000000..300a0f892ef7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/plausible.nix
@@ -0,0 +1,331 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.plausible;
+
+in {
+  options.services.plausible = {
+    enable = mkEnableOption (lib.mdDoc "plausible");
+
+    package = mkPackageOptionMD pkgs "plausible" { };
+
+    adminUser = {
+      name = mkOption {
+        default = "admin";
+        type = types.str;
+        description = lib.mdDoc ''
+          Name of the admin user that plausible will created on initial startup.
+        '';
+      };
+
+      email = mkOption {
+        type = types.str;
+        example = "admin@localhost";
+        description = lib.mdDoc ''
+          Email-address of the admin-user.
+        '';
+      };
+
+      passwordFile = mkOption {
+        type = types.either types.str types.path;
+        description = lib.mdDoc ''
+          Path to the file which contains the password of the admin user.
+        '';
+      };
+
+      activate = mkEnableOption (lib.mdDoc "activating the freshly created admin-user");
+    };
+
+    database = {
+      clickhouse = {
+        setup = mkEnableOption (lib.mdDoc "creating a clickhouse instance") // { default = true; };
+        url = mkOption {
+          default = "http://localhost:8123/default";
+          type = types.str;
+          description = lib.mdDoc ''
+            The URL to be used to connect to `clickhouse`.
+          '';
+        };
+      };
+      postgres = {
+        setup = mkEnableOption (lib.mdDoc "creating a postgresql instance") // { default = true; };
+        dbname = mkOption {
+          default = "plausible";
+          type = types.str;
+          description = lib.mdDoc ''
+            Name of the database to use.
+          '';
+        };
+        socket = mkOption {
+          default = "/run/postgresql";
+          type = types.str;
+          description = lib.mdDoc ''
+            Path to the UNIX domain-socket to communicate with `postgres`.
+          '';
+        };
+      };
+    };
+
+    server = {
+      disableRegistration = mkOption {
+        default = true;
+        type = types.enum [true false "invite_only"];
+        description = lib.mdDoc ''
+          Whether to prohibit creating an account in plausible's UI or allow on `invite_only`.
+        '';
+      };
+      secretKeybaseFile = mkOption {
+        type = types.either types.path types.str;
+        description = lib.mdDoc ''
+          Path to the secret used by the `phoenix`-framework. Instructions
+          how to generate one are documented in the
+          [
+          framework docs](https://hexdocs.pm/phoenix/Mix.Tasks.Phx.Gen.Secret.html#content).
+        '';
+      };
+      listenAddress = mkOption {
+        default = "127.0.0.1";
+        type = types.str;
+        description = lib.mdDoc ''
+          The IP address on which the server is listening.
+        '';
+      };
+      port = mkOption {
+        default = 8000;
+        type = types.port;
+        description = lib.mdDoc ''
+          Port where the service should be available.
+        '';
+      };
+      baseUrl = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Public URL where plausible is available.
+
+          Note that `/path` components are currently ignored:
+          [
+            https://github.com/plausible/analytics/issues/1182
+          ](https://github.com/plausible/analytics/issues/1182).
+        '';
+      };
+    };
+
+    mail = {
+      email = mkOption {
+        default = "hello@plausible.local";
+        type = types.str;
+        description = lib.mdDoc ''
+          The email id to use for as *from* address of all communications
+          from Plausible.
+        '';
+      };
+      smtp = {
+        hostAddr = mkOption {
+          default = "localhost";
+          type = types.str;
+          description = lib.mdDoc ''
+            The host address of your smtp server.
+          '';
+        };
+        hostPort = mkOption {
+          default = 25;
+          type = types.port;
+          description = lib.mdDoc ''
+            The port of your smtp server.
+          '';
+        };
+        user = mkOption {
+          default = null;
+          type = types.nullOr types.str;
+          description = lib.mdDoc ''
+            The username/email in case SMTP auth is enabled.
+          '';
+        };
+        passwordFile = mkOption {
+          default = null;
+          type = with types; nullOr (either str path);
+          description = lib.mdDoc ''
+            The path to the file with the password in case SMTP auth is enabled.
+          '';
+        };
+        enableSSL = mkEnableOption (lib.mdDoc "SSL when connecting to the SMTP server");
+        retries = mkOption {
+          type = types.ints.unsigned;
+          default = 2;
+          description = lib.mdDoc ''
+            Number of retries to make until mailer gives up.
+          '';
+        };
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "plausible" "releaseCookiePath" ] "Plausible uses no distributed Erlang features, so this option is no longer necessary and was removed")
+  ];
+
+  config = mkIf cfg.enable {
+    assertions = [
+      { assertion = cfg.adminUser.activate -> cfg.database.postgres.setup;
+        message = ''
+          Unable to automatically activate the admin-user if no locally managed DB for
+          postgres (`services.plausible.database.postgres.setup') is enabled!
+        '';
+      }
+    ];
+
+    services.postgresql = mkIf cfg.database.postgres.setup {
+      enable = true;
+    };
+
+    services.clickhouse = mkIf cfg.database.clickhouse.setup {
+      enable = true;
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services = mkMerge [
+      {
+        plausible = {
+          inherit (cfg.package.meta) description;
+          documentation = [ "https://plausible.io/docs/self-hosting" ];
+          wantedBy = [ "multi-user.target" ];
+          after = optional cfg.database.clickhouse.setup "clickhouse.service"
+          ++ optionals cfg.database.postgres.setup [
+              "postgresql.service"
+              "plausible-postgres.service"
+            ];
+          requires = optional cfg.database.clickhouse.setup "clickhouse.service"
+            ++ optionals cfg.database.postgres.setup [
+              "postgresql.service"
+              "plausible-postgres.service"
+            ];
+
+          environment = {
+            # NixOS specific option to avoid that it's trying to write into its store-path.
+            # See also https://github.com/lau/tzdata#data-directory-and-releases
+            STORAGE_DIR = "/var/lib/plausible/elixir_tzdata";
+
+            # Configuration options from
+            # https://plausible.io/docs/self-hosting-configuration
+            PORT = toString cfg.server.port;
+            LISTEN_IP = cfg.server.listenAddress;
+
+            # Note [plausible-needs-no-erlang-distributed-features]:
+            # Plausible does not use, and does not plan to use, any of
+            # Erlang's distributed features, see:
+            #     https://github.com/plausible/analytics/pull/1190#issuecomment-1018820934
+            # Thus, disable distribution for improved simplicity and security:
+            #
+            # When distribution is enabled,
+            # Elixir spwans the Erlang VM, which will listen by default on all
+            # interfaces for messages between Erlang nodes (capable of
+            # remote code execution); it can be protected by a cookie; see
+            # https://erlang.org/doc/reference_manual/distributed.html#security).
+            #
+            # It would be possible to restrict the interface to one of our choice
+            # (e.g. localhost or a VPN IP) similar to how we do it with `listenAddress`
+            # for the Plausible web server; if distribution is ever needed in the future,
+            # https://github.com/NixOS/nixpkgs/pull/130297 shows how to do it.
+            #
+            # But since Plausible does not use this feature in any way,
+            # we just disable it.
+            RELEASE_DISTRIBUTION = "none";
+            # Additional safeguard, in case `RELEASE_DISTRIBUTION=none` ever
+            # stops disabling the start of EPMD.
+            ERL_EPMD_ADDRESS = "127.0.0.1";
+
+            DISABLE_REGISTRATION = if isBool cfg.server.disableRegistration then boolToString cfg.server.disableRegistration else cfg.server.disableRegistration;
+
+            RELEASE_TMP = "/var/lib/plausible/tmp";
+            # Home is needed to connect to the node with iex
+            HOME = "/var/lib/plausible";
+
+            ADMIN_USER_NAME = cfg.adminUser.name;
+            ADMIN_USER_EMAIL = cfg.adminUser.email;
+
+            DATABASE_SOCKET_DIR = cfg.database.postgres.socket;
+            DATABASE_NAME = cfg.database.postgres.dbname;
+            CLICKHOUSE_DATABASE_URL = cfg.database.clickhouse.url;
+
+            BASE_URL = cfg.server.baseUrl;
+
+            MAILER_EMAIL = cfg.mail.email;
+            SMTP_HOST_ADDR = cfg.mail.smtp.hostAddr;
+            SMTP_HOST_PORT = toString cfg.mail.smtp.hostPort;
+            SMTP_RETRIES = toString cfg.mail.smtp.retries;
+            SMTP_HOST_SSL_ENABLED = boolToString cfg.mail.smtp.enableSSL;
+
+            SELFHOST = "true";
+          } // (optionalAttrs (cfg.mail.smtp.user != null) {
+            SMTP_USER_NAME = cfg.mail.smtp.user;
+          });
+
+          path = [ cfg.package ]
+            ++ optional cfg.database.postgres.setup config.services.postgresql.package;
+          script = ''
+            # Elixir does not start up if `RELEASE_COOKIE` is not set,
+            # even though we set `RELEASE_DISTRIBUTION=none` so the cookie should be unused.
+            # Thus, make a random one, which should then be ignored.
+            export RELEASE_COOKIE=$(tr -dc A-Za-z0-9 < /dev/urandom | head -c 20)
+            export ADMIN_USER_PWD="$(< $CREDENTIALS_DIRECTORY/ADMIN_USER_PWD )"
+            export SECRET_KEY_BASE="$(< $CREDENTIALS_DIRECTORY/SECRET_KEY_BASE )"
+
+            ${lib.optionalString (cfg.mail.smtp.passwordFile != null)
+              ''export SMTP_USER_PWD="$(< $CREDENTIALS_DIRECTORY/SMTP_USER_PWD )"''}
+
+            # setup
+            ${cfg.package}/createdb.sh
+            ${cfg.package}/migrate.sh
+            export IP_GEOLOCATION_DB=${pkgs.dbip-country-lite}/share/dbip/dbip-country-lite.mmdb
+            ${cfg.package}/bin/plausible eval "(Plausible.Release.prepare() ; Plausible.Auth.create_user(\"$ADMIN_USER_NAME\", \"$ADMIN_USER_EMAIL\", \"$ADMIN_USER_PWD\"))"
+            ${optionalString cfg.adminUser.activate ''
+              psql -d plausible <<< "UPDATE users SET email_verified=true where email = '$ADMIN_USER_EMAIL';"
+            ''}
+
+            exec plausible start
+          '';
+
+          serviceConfig = {
+            DynamicUser = true;
+            PrivateTmp = true;
+            WorkingDirectory = "/var/lib/plausible";
+            StateDirectory = "plausible";
+            LoadCredential = [
+              "ADMIN_USER_PWD:${cfg.adminUser.passwordFile}"
+              "SECRET_KEY_BASE:${cfg.server.secretKeybaseFile}"
+            ] ++ lib.optionals (cfg.mail.smtp.passwordFile != null) [ "SMTP_USER_PWD:${cfg.mail.smtp.passwordFile}"];
+          };
+        };
+      }
+      (mkIf cfg.database.postgres.setup {
+        # `plausible' requires the `citext'-extension.
+        plausible-postgres = {
+          after = [ "postgresql.service" ];
+          partOf = [ "plausible.service" ];
+          serviceConfig = {
+            Type = "oneshot";
+            User = config.services.postgresql.superUser;
+            RemainAfterExit = true;
+          };
+          script = with cfg.database.postgres; ''
+            PSQL() {
+              ${config.services.postgresql.package}/bin/psql --port=5432 "$@"
+            }
+            # check if the database already exists
+            if ! PSQL -lqt | ${pkgs.coreutils}/bin/cut -d \| -f 1 | ${pkgs.gnugrep}/bin/grep -qw ${dbname} ; then
+              PSQL -tAc "CREATE ROLE plausible WITH LOGIN;"
+              PSQL -tAc "CREATE DATABASE ${dbname} WITH OWNER plausible;"
+              PSQL -d ${dbname} -tAc "CREATE EXTENSION IF NOT EXISTS citext;"
+            fi
+          '';
+        };
+      })
+    ];
+  };
+
+  meta.maintainers = with maintainers; [ ];
+  meta.doc = ./plausible.md;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/powerdns-admin.nix b/nixpkgs/nixos/modules/services/web-apps/powerdns-admin.nix
new file mode 100644
index 000000000000..7b6fb06e3565
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/powerdns-admin.nix
@@ -0,0 +1,153 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.powerdns-admin;
+
+  configText = ''
+    ${cfg.config}
+  ''
+  + optionalString (cfg.secretKeyFile != null) ''
+    with open('${cfg.secretKeyFile}') as file:
+      SECRET_KEY = file.read()
+  ''
+  + optionalString (cfg.saltFile != null) ''
+    with open('${cfg.saltFile}') as file:
+      SALT = file.read()
+  '';
+in
+{
+  options.services.powerdns-admin = {
+    enable = mkEnableOption (lib.mdDoc "the PowerDNS web interface");
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = literalExpression ''
+        [ "-b" "127.0.0.1:8000" ]
+      '';
+      description = lib.mdDoc ''
+        Extra arguments passed to powerdns-admin.
+      '';
+    };
+
+    config = mkOption {
+      type = types.str;
+      default = "";
+      example = ''
+        BIND_ADDRESS = '127.0.0.1'
+        PORT = 8000
+        SQLALCHEMY_DATABASE_URI = 'postgresql://powerdnsadmin@/powerdnsadmin?host=/run/postgresql'
+      '';
+      description = lib.mdDoc ''
+        Configuration python file.
+        See [the example configuration](https://github.com/ngoduykhanh/PowerDNS-Admin/blob/v${pkgs.powerdns-admin.version}/configs/development.py)
+        for options.
+      '';
+    };
+
+    secretKeyFile = mkOption {
+      type = types.nullOr types.path;
+      example = "/etc/powerdns-admin/secret";
+      description = lib.mdDoc ''
+        The secret used to create cookies.
+        This needs to be set, otherwise the default is used and everyone can forge valid login cookies.
+        Set this to null to ignore this setting and configure it through another way.
+      '';
+    };
+
+    saltFile = mkOption {
+      type = types.nullOr types.path;
+      example = "/etc/powerdns-admin/salt";
+      description = lib.mdDoc ''
+        The salt used for serialization.
+        This should be set, otherwise the default is used.
+        Set this to null to ignore this setting and configure it through another way.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.powerdns-admin = {
+      description = "PowerDNS web interface";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "networking.target" ];
+
+      environment.FLASK_CONF = builtins.toFile "powerdns-admin-config.py" configText;
+      environment.PYTHONPATH = pkgs.powerdns-admin.pythonPath;
+      serviceConfig = {
+        ExecStart = "${pkgs.powerdns-admin}/bin/powerdns-admin --pid /run/powerdns-admin/pid ${escapeShellArgs cfg.extraArgs}";
+        # Set environment variables only for starting flask database upgrade
+        ExecStartPre = "${pkgs.coreutils}/bin/env FLASK_APP=${pkgs.powerdns-admin}/share/powerdnsadmin/__init__.py SESSION_TYPE= ${pkgs.python3Packages.flask}/bin/flask db upgrade -d ${pkgs.powerdns-admin}/share/migrations";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -TERM $MAINPID";
+        PIDFile = "/run/powerdns-admin/pid";
+        RuntimeDirectory = "powerdns-admin";
+        User = "powerdnsadmin";
+        Group = "powerdnsadmin";
+
+        AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        BindReadOnlyPaths = [
+          "/nix/store"
+          "-/etc/resolv.conf"
+          "-/etc/nsswitch.conf"
+          "-/etc/hosts"
+          "-/etc/localtime"
+        ]
+        ++ (optional (cfg.secretKeyFile != null) cfg.secretKeyFile)
+        ++ (optional (cfg.saltFile != null) cfg.saltFile);
+        CapabilityBoundingSet = "CAP_NET_BIND_SERVICE";
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+        # Implies ProtectSystem=strict, which re-mounts all paths
+        #DynamicUser = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        # Needs to start a server
+        #PrivateNetwork = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        # Would re-mount paths ignored by temporary root
+        #ProtectSystem = "strict";
+        ProtectControlGroups = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" "AF_UNIX" ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        # gunicorn needs setuid
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged @resources @keyring"
+          # These got removed by the line above but are needed
+          "@setuid @chown"
+        ];
+        TemporaryFileSystem = "/:ro";
+        # Does not work well with the temporary root
+        #UMask = "0066";
+      };
+    };
+
+    users.groups.powerdnsadmin = { };
+    users.users.powerdnsadmin = {
+      description = "PowerDNS web interface user";
+      isSystemUser = true;
+      group = "powerdnsadmin";
+    };
+  };
+
+  # uses attributes of the linked package
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/prosody-filer.nix b/nixpkgs/nixos/modules/services/web-apps/prosody-filer.nix
new file mode 100644
index 000000000000..84953546d8e0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/prosody-filer.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.services.prosody-filer;
+
+  settingsFormat = pkgs.formats.toml { };
+  configFile = settingsFormat.generate "prosody-filer.toml" cfg.settings;
+in {
+
+  options = {
+    services.prosody-filer = {
+      enable = mkEnableOption (lib.mdDoc "Prosody Filer XMPP upload file server");
+
+      settings = mkOption {
+        description = lib.mdDoc ''
+          Configuration for Prosody Filer.
+          Refer to <https://github.com/ThomasLeister/prosody-filer#configure-prosody-filer> for details on supported values.
+        '';
+
+        type = settingsFormat.type;
+
+        example = {
+          secret = "mysecret";
+          storeDir = "/srv/http/nginx/prosody-upload";
+        };
+
+        defaultText = literalExpression ''
+          {
+            listenport = mkDefault "127.0.0.1:5050";
+            uploadSubDir = mkDefault "upload/";
+          }
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.prosody-filer.settings = {
+      listenport = mkDefault "127.0.0.1:5050";
+      uploadSubDir = mkDefault "upload/";
+    };
+
+    users.users.prosody-filer = {
+      group = "prosody-filer";
+      isSystemUser = true;
+    };
+
+    users.groups.prosody-filer = { };
+
+    systemd.services.prosody-filer = {
+      description = "Prosody file upload server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      serviceConfig = {
+        User = "prosody-filer";
+        Group = "prosody-filer";
+        ExecStart = "${pkgs.prosody-filer}/bin/prosody-filer -config ${configFile}";
+        Restart = "on-failure";
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateTmp = true;
+        PrivateMounts = true;
+        ProtectHome = true;
+        ProtectClock = true;
+        ProtectProc = "noaccess";
+        ProcSubset = "pid";
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        RestrictSUIDSGID = true;
+        RestrictRealtime = true;
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        RemoveIPC = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        SystemCallFilter = [ "@system-service" "~@privileged" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/restya-board.nix b/nixpkgs/nixos/modules/services/web-apps/restya-board.nix
new file mode 100644
index 000000000000..959bcbc5c9f1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/restya-board.nix
@@ -0,0 +1,380 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+# TODO: are these php-packages needed?
+#imagick
+#php-geoip -> php.ini: extension = geoip.so
+#expat
+
+let
+  cfg = config.services.restya-board;
+  fpm = config.services.phpfpm.pools.${poolName};
+
+  runDir = "/run/restya-board";
+
+  poolName = "restya-board";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.restya-board = {
+
+      enable = mkEnableOption (lib.mdDoc "restya-board");
+
+      dataDir = mkOption {
+        type = types.path;
+        default = "/var/lib/restya-board";
+        description = lib.mdDoc ''
+          Data of the application.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "restya-board";
+        description = lib.mdDoc ''
+          User account under which the web-application runs.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nginx";
+        description = lib.mdDoc ''
+          Group account under which the web-application runs.
+        '';
+      };
+
+      virtualHost = {
+        serverName = mkOption {
+          type = types.str;
+          default = "restya.board";
+          description = lib.mdDoc ''
+            Name of the nginx virtualhost to use.
+          '';
+        };
+
+        listenHost = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Listen address for the virtualhost to use.
+          '';
+        };
+
+        listenPort = mkOption {
+          type = types.port;
+          default = 3000;
+          description = lib.mdDoc ''
+            Listen port for the virtualhost to use.
+          '';
+        };
+      };
+
+      database = {
+        host = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Host of the database. Leave 'null' to use a local PostgreSQL database.
+            A local PostgreSQL database is initialized automatically.
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.int;
+          default = 5432;
+          description = lib.mdDoc ''
+            The database's port.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "restya_board";
+          description = lib.mdDoc ''
+            Name of the database. The database must exist.
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "restya_board";
+          description = lib.mdDoc ''
+            The database user. The user must exist and have access to
+            the specified database.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          description = lib.mdDoc ''
+            The database user's password. 'null' if no password is set.
+          '';
+        };
+      };
+
+      email = {
+        server = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          example = "localhost";
+          description = lib.mdDoc ''
+            Hostname to send outgoing mail. Null to use the system MTA.
+          '';
+        };
+
+        port = mkOption {
+          type = types.port;
+          default = 25;
+          description = lib.mdDoc ''
+            Port used to connect to SMTP server.
+          '';
+        };
+
+        login = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            SMTP authentication login used when sending outgoing mail.
+          '';
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            SMTP authentication password used when sending outgoing mail.
+
+            ATTENTION: The password is stored world-readable in the nix-store!
+          '';
+        };
+      };
+
+      timezone = mkOption {
+        type = types.lines;
+        default = "GMT";
+        description = lib.mdDoc ''
+          Timezone the web-app runs in.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.phpfpm.pools = {
+      ${poolName} = {
+        inherit (cfg) user group;
+
+        phpOptions = ''
+          date.timezone = "CET"
+
+          ${optionalString (cfg.email.server != null) ''
+            SMTP = ${cfg.email.server}
+            smtp_port = ${toString cfg.email.port}
+            auth_username = ${cfg.email.login}
+            auth_password = ${cfg.email.password}
+          ''}
+        '';
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = "nginx";
+          "listen.group" = "nginx";
+          "listen.mode" = "0600";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = 1;
+        };
+      };
+    };
+
+    services.nginx.enable = true;
+    services.nginx.virtualHosts.${cfg.virtualHost.serverName} = {
+      listen = [ { addr = cfg.virtualHost.listenHost; port = cfg.virtualHost.listenPort; } ];
+      serverName = cfg.virtualHost.serverName;
+      root = runDir;
+      extraConfig = ''
+        index index.html index.php;
+
+        gzip on;
+
+        gzip_comp_level 6;
+        gzip_min_length  1100;
+        gzip_buffers 16 8k;
+        gzip_proxied any;
+        gzip_types text/plain application/xml text/css text/js text/xml application/x-javascript text/javascript application/json application/xml+rss;
+
+        client_max_body_size 300M;
+
+        rewrite ^/oauth/authorize$ /server/php/authorize.php last;
+        rewrite ^/oauth_callback/([a-zA-Z0-9_\.]*)/([a-zA-Z0-9_\.]*)$ /server/php/oauth_callback.php?plugin=$1&code=$2 last;
+        rewrite ^/download/([0-9]*)/([a-zA-Z0-9_\.]*)$ /server/php/download.php?id=$1&hash=$2 last;
+        rewrite ^/ical/([0-9]*)/([0-9]*)/([a-z0-9]*).ics$ /server/php/ical.php?board_id=$1&user_id=$2&hash=$3 last;
+        rewrite ^/api/(.*)$ /server/php/R/r.php?_url=$1&$args last;
+        rewrite ^/api_explorer/api-docs/$ /client/api_explorer/api-docs/index.php last;
+      '';
+
+      locations."/".root = "${runDir}/client";
+
+      locations."~ \\.php$" = {
+        tryFiles = "$uri =404";
+        extraConfig = ''
+          include ${config.services.nginx.package}/conf/fastcgi_params;
+          fastcgi_pass    unix:${fpm.socket};
+          fastcgi_index   index.php;
+          fastcgi_param   SCRIPT_FILENAME $document_root$fastcgi_script_name;
+          fastcgi_param   PHP_VALUE "upload_max_filesize=9G \n post_max_size=9G \n max_execution_time=200 \n max_input_time=200 \n memory_limit=256M";
+        '';
+      };
+
+      locations."~* \\.(css|js|less|html|ttf|woff|jpg|jpeg|gif|png|bmp|ico)" = {
+        root = "${runDir}/client";
+        extraConfig = ''
+          if (-f $request_filename) {
+                  break;
+          }
+          rewrite ^/img/([a-zA-Z_]*)/([a-zA-Z_]*)/([a-zA-Z0-9_\.]*)$ /server/php/image.php?size=$1&model=$2&filename=$3 last;
+          add_header        Cache-Control public;
+          add_header        Cache-Control must-revalidate;
+          expires           7d;
+        '';
+      };
+    };
+
+    systemd.services.restya-board-init = {
+      description = "Restya board initialization";
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+
+      wantedBy = [ "multi-user.target" ];
+      requires = lib.optional (cfg.database.host != null) "postgresql.service";
+      after = [ "network.target" ] ++ (lib.optional (cfg.database.host != null) "postgresql.service");
+
+      script = ''
+        rm -rf "${runDir}"
+        mkdir -m 750 -p "${runDir}"
+        cp -r "${pkgs.restya-board}/"* "${runDir}"
+        sed -i "s/@restya.com/@${cfg.virtualHost.serverName}/g" "${runDir}/sql/restyaboard_with_empty_data.sql"
+        rm -rf "${runDir}/media"
+        rm -rf "${runDir}/client/img"
+        chmod -R 0750 "${runDir}"
+
+        sed -i "s@^php@${config.services.phpfpm.phpPackage}/bin/php@" "${runDir}/server/php/shell/"*.sh
+
+        ${if (cfg.database.host == null) then ''
+          sed -i "s/^.*'R_DB_HOST'.*$/define('R_DB_HOST', 'localhost');/g" "${runDir}/server/php/config.inc.php"
+          sed -i "s/^.*'R_DB_PASSWORD'.*$/define('R_DB_PASSWORD', 'restya');/g" "${runDir}/server/php/config.inc.php"
+        '' else ''
+          sed -i "s/^.*'R_DB_HOST'.*$/define('R_DB_HOST', '${cfg.database.host}');/g" "${runDir}/server/php/config.inc.php"
+          sed -i "s/^.*'R_DB_PASSWORD'.*$/define('R_DB_PASSWORD', ${if cfg.database.passwordFile == null then "''" else "'$(cat ${cfg.database.passwordFile})');/g"}" "${runDir}/server/php/config.inc.php"
+        ''}
+        sed -i "s/^.*'R_DB_PORT'.*$/define('R_DB_PORT', '${toString cfg.database.port}');/g" "${runDir}/server/php/config.inc.php"
+        sed -i "s/^.*'R_DB_NAME'.*$/define('R_DB_NAME', '${cfg.database.name}');/g" "${runDir}/server/php/config.inc.php"
+        sed -i "s/^.*'R_DB_USER'.*$/define('R_DB_USER', '${cfg.database.user}');/g" "${runDir}/server/php/config.inc.php"
+
+        chmod 0400 "${runDir}/server/php/config.inc.php"
+
+        ln -sf "${cfg.dataDir}/media" "${runDir}/media"
+        ln -sf "${cfg.dataDir}/client/img" "${runDir}/client/img"
+
+        chmod g+w "${runDir}/tmp/cache"
+        chown -R "${cfg.user}":"${cfg.group}" "${runDir}"
+
+
+        mkdir -m 0750 -p "${cfg.dataDir}"
+        mkdir -m 0750 -p "${cfg.dataDir}/media"
+        mkdir -m 0750 -p "${cfg.dataDir}/client/img"
+        cp -r "${pkgs.restya-board}/media/"* "${cfg.dataDir}/media"
+        cp -r "${pkgs.restya-board}/client/img/"* "${cfg.dataDir}/client/img"
+        chown "${cfg.user}":"${cfg.group}" "${cfg.dataDir}"
+        chown -R "${cfg.user}":"${cfg.group}" "${cfg.dataDir}/media"
+        chown -R "${cfg.user}":"${cfg.group}" "${cfg.dataDir}/client/img"
+
+        ${optionalString (cfg.database.host == null) ''
+          if ! [ -e "${cfg.dataDir}/.db-initialized" ]; then
+            ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} \
+              ${config.services.postgresql.package}/bin/psql -U ${config.services.postgresql.superUser} \
+              -c "CREATE USER ${cfg.database.user} WITH ENCRYPTED PASSWORD 'restya'"
+
+            ${pkgs.sudo}/bin/sudo -u ${config.services.postgresql.superUser} \
+              ${config.services.postgresql.package}/bin/psql -U ${config.services.postgresql.superUser} \
+              -c "CREATE DATABASE ${cfg.database.name} OWNER ${cfg.database.user} ENCODING 'UTF8' TEMPLATE template0"
+
+            ${pkgs.sudo}/bin/sudo -u ${cfg.user} \
+              ${config.services.postgresql.package}/bin/psql -U ${cfg.database.user} \
+              -d ${cfg.database.name} -f "${runDir}/sql/restyaboard_with_empty_data.sql"
+
+            touch "${cfg.dataDir}/.db-initialized"
+          fi
+        ''}
+      '';
+    };
+
+    systemd.timers.restya-board = {
+      description = "restya-board scripts for e.g. email notification";
+      wantedBy = [ "timers.target" ];
+      after = [ "restya-board-init.service" ];
+      requires = [ "restya-board-init.service" ];
+      timerConfig = {
+        OnUnitInactiveSec = "60s";
+        Unit = "restya-board-timers.service";
+      };
+    };
+
+    systemd.services.restya-board-timers = {
+      description = "restya-board scripts for e.g. email notification";
+      serviceConfig.Type = "oneshot";
+      serviceConfig.User = cfg.user;
+
+      after = [ "restya-board-init.service" ];
+      requires = [ "restya-board-init.service" ];
+
+      script = ''
+        /bin/sh ${runDir}/server/php/shell/instant_email_notification.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/periodic_email_notification.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/imap.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/webhook.sh 2> /dev/null || true
+        /bin/sh ${runDir}/server/php/shell/card_due_notification.sh 2> /dev/null || true
+      '';
+    };
+
+    users.users.restya-board = {
+      isSystemUser = true;
+      createHome = false;
+      home = runDir;
+      group  = "restya-board";
+    };
+    users.groups.restya-board = {};
+
+    services.postgresql.enable = mkIf (cfg.database.host == null) true;
+
+    services.postgresql.identMap = optionalString (cfg.database.host == null)
+      ''
+        restya-board-users restya-board restya_board
+      '';
+
+    services.postgresql.authentication = optionalString (cfg.database.host == null)
+      ''
+        local restya_board all ident map=restya-board-users
+      '';
+
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/services/web-apps/rimgo.nix b/nixpkgs/nixos/modules/services/web-apps/rimgo.nix
new file mode 100644
index 000000000000..4d35473fda31
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/rimgo.nix
@@ -0,0 +1,107 @@
+{
+  config,
+  lib,
+  pkgs,
+  ...
+}:
+let
+  cfg = config.services.rimgo;
+  inherit (lib)
+    mkOption
+    mkEnableOption
+    mkPackageOption
+    mkDefault
+    mkIf
+    types
+    literalExpression
+    optionalString
+    getExe
+    mapAttrs
+  ;
+in
+{
+  options.services.rimgo = {
+    enable = mkEnableOption "rimgo";
+    package = mkPackageOption pkgs "rimgo" { };
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = with types; attrsOf str;
+        options = {
+          PORT = mkOption {
+            type = types.port;
+            default = 3000;
+            example = 69420;
+            description = "The port to use.";
+          };
+          ADDRESS = mkOption {
+            type = types.str;
+            default = "127.0.0.1";
+            example = "1.1.1.1";
+            description = "The address to listen on.";
+          };
+        };
+      };
+      example = literalExpression ''
+        {
+          PORT = 69420;
+          FORCE_WEBP = "1";
+        }
+      '';
+      description = ''
+        Settings for rimgo, see [the official documentation](https://rimgo.codeberg.page/docs/usage/configuration/) for supported options.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.rimgo = {
+      description = "Rimgo";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      environment = mapAttrs (_: toString) cfg.settings;
+      serviceConfig = {
+        ExecStart = getExe cfg.package;
+        AmbientCapabilities = mkIf (cfg.settings.PORT < 1024) [
+          "CAP_NET_BIND_SERVICE"
+        ];
+        DynamicUser = true;
+        Restart = "on-failure";
+        RestartSec = "5s";
+        CapabilityBoundingSet = [
+          (optionalString (cfg.settings.PORT < 1024) "CAP_NET_BIND_SERVICE")
+        ];
+        DeviceAllow = [ "" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        PrivateDevices = true;
+        PrivateUsers = cfg.settings.PORT >= 1024;
+        ProcSubset = "pid";
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        RestrictAddressFamilies = [
+          "AF_INET"
+          "AF_INET6"
+        ];
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [
+          "@system-service"
+          "~@privileged"
+        ];
+        UMask = "0077";
+      };
+    };
+  };
+
+  meta = {
+    maintainers = with lib.maintainers; [ quantenzitrone ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/rss-bridge.nix b/nixpkgs/nixos/modules/services/web-apps/rss-bridge.nix
new file mode 100644
index 000000000000..1a710f4a6a67
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/rss-bridge.nix
@@ -0,0 +1,125 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.rss-bridge;
+
+  poolName = "rss-bridge";
+
+  whitelist = pkgs.writeText "rss-bridge_whitelist.txt"
+    (concatStringsSep "\n" cfg.whitelist);
+in
+{
+  options = {
+    services.rss-bridge = {
+      enable = mkEnableOption (lib.mdDoc "rss-bridge");
+
+      user = mkOption {
+        type = types.str;
+        default = "nginx";
+        description = lib.mdDoc ''
+          User account under which both the service and the web-application run.
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nginx";
+        description = lib.mdDoc ''
+          Group under which the web-application run.
+        '';
+      };
+
+      pool = mkOption {
+        type = types.str;
+        default = poolName;
+        description = lib.mdDoc ''
+          Name of existing phpfpm pool that is used to run web-application.
+          If not specified a pool will be created automatically with
+          default values.
+        '';
+      };
+
+      dataDir = mkOption {
+        type = types.str;
+        default = "/var/lib/rss-bridge";
+        description = lib.mdDoc ''
+          Location in which cache directory will be created.
+          You can put `config.ini.php` in here.
+        '';
+      };
+
+      virtualHost = mkOption {
+        type = types.nullOr types.str;
+        default = "rss-bridge";
+        description = lib.mdDoc ''
+          Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
+        '';
+      };
+
+      whitelist = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = options.literalExpression ''
+          [
+            "Facebook"
+            "Instagram"
+            "Twitter"
+          ]
+        '';
+        description = lib.mdDoc ''
+          List of bridges to be whitelisted.
+          If the list is empty, rss-bridge will use whitelist.default.txt.
+          Use `[ "*" ]` to whitelist all.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.phpfpm.pools = mkIf (cfg.pool == poolName) {
+      ${poolName} = {
+        user = cfg.user;
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = cfg.user;
+          "listen.group" = cfg.user;
+          "listen.mode" = "0600";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = 1;
+        };
+      };
+    };
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}/cache' 0750 ${cfg.user} ${cfg.group} - -"
+      (mkIf (cfg.whitelist != []) "L+ ${cfg.dataDir}/whitelist.txt - - - - ${whitelist}")
+      "z '${cfg.dataDir}/config.ini.php' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      virtualHosts = {
+        ${cfg.virtualHost} = {
+          root = "${pkgs.rss-bridge}";
+
+          locations."/" = {
+            tryFiles = "$uri /index.php$is_args$args";
+          };
+
+          locations."~ ^/index.php(/|$)" = {
+            extraConfig = ''
+              include ${config.services.nginx.package}/conf/fastcgi_params;
+              fastcgi_split_path_info ^(.+\.php)(/.+)$;
+              fastcgi_pass unix:${config.services.phpfpm.pools.${cfg.pool}.socket};
+              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              fastcgi_param RSSBRIDGE_DATA ${cfg.dataDir};
+            '';
+          };
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/selfoss.nix b/nixpkgs/nixos/modules/services/web-apps/selfoss.nix
new file mode 100644
index 000000000000..8debd4904e88
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/selfoss.nix
@@ -0,0 +1,164 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.selfoss;
+
+  poolName = "selfoss_pool";
+
+  dataDir = "/var/lib/selfoss";
+
+  selfoss-config =
+  let
+    db_type = cfg.database.type;
+    default_port = if (db_type == "mysql") then 3306 else 5342;
+  in
+  pkgs.writeText "selfoss-config.ini" ''
+    [globals]
+    ${lib.optionalString (db_type != "sqlite") ''
+      db_type=${db_type}
+      db_host=${cfg.database.host}
+      db_database=${cfg.database.name}
+      db_username=${cfg.database.user}
+      db_password=${cfg.database.password}
+      db_port=${toString (if (cfg.database.port != null) then cfg.database.port
+                    else default_port)}
+    ''
+    }
+    ${cfg.extraConfig}
+  '';
+in
+  {
+    options = {
+      services.selfoss = {
+        enable = mkEnableOption (lib.mdDoc "selfoss");
+
+        user = mkOption {
+          type = types.str;
+          default = "nginx";
+          description = lib.mdDoc ''
+            User account under which both the service and the web-application run.
+          '';
+        };
+
+        pool = mkOption {
+          type = types.str;
+          default = "${poolName}";
+          description = lib.mdDoc ''
+            Name of existing phpfpm pool that is used to run web-application.
+            If not specified a pool will be created automatically with
+            default values.
+          '';
+        };
+
+      database = {
+        type = mkOption {
+          type = types.enum ["pgsql" "mysql" "sqlite"];
+          default = "sqlite";
+          description = lib.mdDoc ''
+            Database to store feeds. Supported are sqlite, pgsql and mysql.
+          '';
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "localhost";
+          description = lib.mdDoc ''
+            Host of the database (has no effect if type is "sqlite").
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "tt_rss";
+          description = lib.mdDoc ''
+            Name of the existing database (has no effect if type is "sqlite").
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "tt_rss";
+          description = lib.mdDoc ''
+            The database user. The user must exist and has access to
+            the specified database (has no effect if type is "sqlite").
+          '';
+        };
+
+        password = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The database user's password (has no effect if type is "sqlite").
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.int;
+          default = null;
+          description = lib.mdDoc ''
+            The database's port. If not set, the default ports will be
+            provided (5432 and 3306 for pgsql and mysql respectively)
+            (has no effect if type is "sqlite").
+          '';
+        };
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration added to config.ini
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.phpfpm.pools = mkIf (cfg.pool == "${poolName}") {
+      ${poolName} = {
+        user = "nginx";
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = "nginx";
+          "listen.group" = "nginx";
+          "listen.mode" = "0600";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = 1;
+        };
+      };
+    };
+
+    systemd.services.selfoss-config = {
+      serviceConfig.Type = "oneshot";
+      script = ''
+        mkdir -m 755 -p ${dataDir}
+        cd ${dataDir}
+
+        # Delete all but the "data" folder
+        ls | grep -v data | while read line; do rm -rf $line; done || true
+
+        # Create the files
+        cp -r "${pkgs.selfoss}/"* "${dataDir}"
+        ln -sf "${selfoss-config}" "${dataDir}/config.ini"
+        chown -R "${cfg.user}" "${dataDir}"
+        chmod -R 755 "${dataDir}"
+      '';
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services.selfoss-update = {
+      serviceConfig = {
+        ExecStart = "${pkgs.php}/bin/php ${dataDir}/cliupdate.php";
+        User = "${cfg.user}";
+      };
+      startAt = "hourly";
+      after = [ "selfoss-config.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/sftpgo.nix b/nixpkgs/nixos/modules/services/web-apps/sftpgo.nix
new file mode 100644
index 000000000000..846478ecbd6d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/sftpgo.nix
@@ -0,0 +1,375 @@
+{ options, config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.sftpgo;
+  defaultUser = "sftpgo";
+  settingsFormat = pkgs.formats.json {};
+  configFile = settingsFormat.generate "sftpgo.json" cfg.settings;
+  hasPrivilegedPorts = any (port: port > 0 && port < 1024) (
+    catAttrs "port" (cfg.settings.httpd.bindings
+      ++ cfg.settings.ftpd.bindings
+      ++ cfg.settings.sftpd.bindings
+      ++ cfg.settings.webdavd.bindings
+    )
+  );
+in
+{
+  options.services.sftpgo = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = mdDoc "sftpgo";
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.sftpgo;
+      defaultText = literalExpression "pkgs.sftpgo";
+      description = mdDoc ''
+        Which SFTPGo package to use.
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = with types; listOf str;
+      default = [];
+      description = mdDoc ''
+        Additional command line arguments to pass to the sftpgo daemon.
+      '';
+      example = [ "--log-level" "info" ];
+    };
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/sftpgo";
+      description = mdDoc ''
+        The directory where SFTPGo stores its data files.
+      '';
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = mdDoc ''
+        User account name under which SFTPGo runs.
+      '';
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = defaultUser;
+      description = mdDoc ''
+        Group name under which SFTPGo runs.
+      '';
+    };
+
+    loadDataFile = mkOption {
+      default = null;
+      type = with types; nullOr path;
+      description = mdDoc ''
+        Path to a json file containing users and folders to load (or update) on startup.
+        Check the [documentation](https://github.com/drakkan/sftpgo/blob/main/docs/full-configuration.md)
+        for the `--loaddata-from` command line argument for more info.
+      '';
+    };
+
+    settings = mkOption {
+      default = {};
+      description = mdDoc ''
+        The primary sftpgo configuration. See the
+        [configuration reference](https://github.com/drakkan/sftpgo/blob/main/docs/full-configuration.md)
+        for possible values.
+      '';
+      type = with types; submodule {
+        freeformType = settingsFormat.type;
+        options = {
+          httpd.bindings = mkOption {
+            default = [];
+            description = mdDoc ''
+              Configure listen addresses and ports for httpd.
+            '';
+            type = types.listOf (types.submodule {
+              freeformType = settingsFormat.type;
+              options = {
+                address = mkOption {
+                  type = types.str;
+                  default = "127.0.0.1";
+                  description = mdDoc ''
+                    Network listen address. Leave blank to listen on all available network interfaces.
+                    On *NIX you can specify an absolute path to listen on a Unix-domain socket.
+                  '';
+                };
+
+                port = mkOption {
+                  type = types.port;
+                  default = 8080;
+                  description = mdDoc ''
+                    The port for serving HTTP(S) requests.
+
+                    Setting the port to `0` disables listening on this interface binding.
+                  '';
+                };
+
+                enable_web_admin = mkOption {
+                  type = types.bool;
+                  default = true;
+                  description = mdDoc ''
+                    Enable the built-in web admin for this interface binding.
+                  '';
+                };
+
+                enable_web_client = mkOption {
+                  type = types.bool;
+                  default = true;
+                  description = mdDoc ''
+                    Enable the built-in web client for this interface binding.
+                  '';
+                };
+              };
+            });
+          };
+
+          ftpd.bindings = mkOption {
+            default = [];
+            description = mdDoc ''
+              Configure listen addresses and ports for ftpd.
+            '';
+            type = types.listOf (types.submodule {
+              freeformType = settingsFormat.type;
+              options = {
+                address = mkOption {
+                  type = types.str;
+                  default = "127.0.0.1";
+                  description = mdDoc ''
+                    Network listen address. Leave blank to listen on all available network interfaces.
+                    On *NIX you can specify an absolute path to listen on a Unix-domain socket.
+                  '';
+                };
+
+                port = mkOption {
+                  type = types.port;
+                  default = 0;
+                  description = mdDoc ''
+                    The port for serving FTP requests.
+
+                    Setting the port to `0` disables listening on this interface binding.
+                  '';
+                };
+              };
+            });
+          };
+
+          sftpd.bindings = mkOption {
+            default = [];
+            description = mdDoc ''
+              Configure listen addresses and ports for sftpd.
+            '';
+            type = types.listOf (types.submodule {
+              freeformType = settingsFormat.type;
+              options = {
+                address = mkOption {
+                  type = types.str;
+                  default = "127.0.0.1";
+                  description = mdDoc ''
+                    Network listen address. Leave blank to listen on all available network interfaces.
+                    On *NIX you can specify an absolute path to listen on a Unix-domain socket.
+                  '';
+                };
+
+                port = mkOption {
+                  type = types.port;
+                  default = 0;
+                  description = mdDoc ''
+                    The port for serving SFTP requests.
+
+                    Setting the port to `0` disables listening on this interface binding.
+                  '';
+                };
+              };
+            });
+          };
+
+          webdavd.bindings = mkOption {
+            default = [];
+            description = mdDoc ''
+              Configure listen addresses and ports for webdavd.
+            '';
+            type = types.listOf (types.submodule {
+              freeformType = settingsFormat.type;
+              options = {
+                address = mkOption {
+                  type = types.str;
+                  default = "127.0.0.1";
+                  description = mdDoc ''
+                    Network listen address. Leave blank to listen on all available network interfaces.
+                    On *NIX you can specify an absolute path to listen on a Unix-domain socket.
+                  '';
+                };
+
+                port = mkOption {
+                  type = types.port;
+                  default = 0;
+                  description = mdDoc ''
+                    The port for serving WebDAV requests.
+
+                    Setting the port to `0` disables listening on this interface binding.
+                  '';
+                };
+              };
+            });
+          };
+
+          smtp = mkOption {
+            default = {};
+            description = mdDoc ''
+              SMTP configuration section.
+            '';
+            type = types.submodule {
+              freeformType = settingsFormat.type;
+              options = {
+                host = mkOption {
+                  type = types.str;
+                  default = "";
+                  description = mdDoc ''
+                    Location of SMTP email server. Leave empty to disable email sending capabilities.
+                  '';
+                };
+
+                port = mkOption {
+                  type = types.port;
+                  default = 465;
+                  description = mdDoc "Port of the SMTP Server.";
+                };
+
+                encryption = mkOption {
+                  type = types.enum [ 0 1 2 ];
+                  default = 1;
+                  description = mdDoc ''
+                    Encryption scheme:
+                    - `0`: No encryption
+                    - `1`: TLS
+                    - `2`: STARTTLS
+                  '';
+                };
+
+                auth_type = mkOption {
+                  type = types.enum [ 0 1 2 ];
+                  default = 0;
+                  description = mdDoc ''
+                    - `0`: Plain
+                    - `1`: Login
+                    - `2`: CRAM-MD5
+                  '';
+                };
+
+                user = mkOption {
+                  type = types.str;
+                  default = "sftpgo";
+                  description = mdDoc "SMTP username.";
+                };
+
+                from = mkOption {
+                  type = types.str;
+                  default = "SFTPGo <sftpgo@example.com>";
+                  description = mdDoc ''
+                    From address.
+                  '';
+                };
+              };
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.sftpgo.settings = (mapAttrs (name: mkDefault) {
+      ftpd.bindings = [{ port = 0; }];
+      httpd.bindings = [{ port = 0; }];
+      sftpd.bindings = [{ port = 0; }];
+      webdavd.bindings = [{ port = 0; }];
+      httpd.openapi_path = "${cfg.package}/share/sftpgo/openapi";
+      httpd.templates_path = "${cfg.package}/share/sftpgo/templates";
+      httpd.static_files_path = "${cfg.package}/share/sftpgo/static";
+      smtp.templates_path = "${cfg.package}/share/sftpgo/templates";
+    });
+
+    users = optionalAttrs (cfg.user == defaultUser) {
+      users = {
+        ${defaultUser} = {
+          description = "SFTPGo system user";
+          isSystemUser = true;
+          group = defaultUser;
+          home = cfg.dataDir;
+        };
+      };
+
+      groups = {
+        ${defaultUser} = {
+          members = [ defaultUser ];
+        };
+      };
+    };
+
+    systemd.services.sftpgo = {
+      description = "SFTPGo daemon";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = {
+        SFTPGO_CONFIG_FILE = mkDefault configFile;
+        SFTPGO_LOG_FILE_PATH = mkDefault ""; # log to journal
+        SFTPGO_LOADDATA_FROM = mkIf (cfg.loadDataFile != null) cfg.loadDataFile;
+      };
+
+      serviceConfig = mkMerge [
+        ({
+          Type = "simple";
+          User = cfg.user;
+          Group = cfg.group;
+          WorkingDirectory = cfg.dataDir;
+          ReadWritePaths = [ cfg.dataDir ];
+          LimitNOFILE = 8192; # taken from upstream
+          KillMode = "mixed";
+          ExecStart = "${cfg.package}/bin/sftpgo serve ${utils.escapeSystemdExecArgs cfg.extraArgs}";
+          ExecReload = "${pkgs.util-linux}/bin/kill -s HUP $MAINPID";
+
+          # Service hardening
+          CapabilityBoundingSet = [ (optionalString hasPrivilegedPorts "CAP_NET_BIND_SERVICE") ];
+          DevicePolicy = "closed";
+          LockPersonality = true;
+          NoNewPrivileges = true;
+          PrivateDevices = true;
+          PrivateTmp = true;
+          ProcSubset = "pid";
+          ProtectClock = true;
+          ProtectControlGroups = true;
+          ProtectHome = true;
+          ProtectHostname = true;
+          ProtectKernelLogs = true;
+          ProtectKernelModules = true;
+          ProtectKernelTunables = true;
+          ProtectProc = "invisible";
+          ProtectSystem = "strict";
+          RemoveIPC = true;
+          RestrictAddressFamilies = "AF_INET AF_INET6 AF_UNIX";
+          RestrictNamespaces = true;
+          RestrictRealtime = true;
+          RestrictSUIDSGID = true;
+          SystemCallArchitectures = "native";
+          SystemCallFilter = [ "@system-service" "~@privileged" ];
+          UMask = "0077";
+        })
+        (mkIf hasPrivilegedPorts {
+          AmbientCapabilities = "CAP_NET_BIND_SERVICE";
+        })
+        (mkIf (cfg.dataDir == options.services.sftpgo.dataDir.default) {
+          StateDirectory = baseNameOf cfg.dataDir;
+        })
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/shiori.nix b/nixpkgs/nixos/modules/services/web-apps/shiori.nix
new file mode 100644
index 000000000000..71b5ad4d4c06
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/shiori.nix
@@ -0,0 +1,103 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.shiori;
+in {
+  options = {
+    services.shiori = {
+      enable = mkEnableOption (lib.mdDoc "Shiori simple bookmarks manager");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.shiori;
+        defaultText = literalExpression "pkgs.shiori";
+        description = lib.mdDoc "The Shiori package to use.";
+      };
+
+      address = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          The IP address on which Shiori will listen.
+          If empty, listens on all interfaces.
+        '';
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 8080;
+        description = lib.mdDoc "The port of the Shiori web application";
+      };
+
+      webRoot = mkOption {
+        type = types.str;
+        default = "/";
+        example = "/shiori";
+        description = lib.mdDoc "The root of the Shiori web application";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.shiori = with cfg; {
+      description = "Shiori simple bookmarks manager";
+      wantedBy = [ "multi-user.target" ];
+
+      environment.SHIORI_DIR = "/var/lib/shiori";
+
+      serviceConfig = {
+        ExecStart = "${package}/bin/shiori serve --address '${address}' --port '${toString port}' --webroot '${webRoot}'";
+
+        DynamicUser = true;
+        StateDirectory = "shiori";
+        # As the RootDirectory
+        RuntimeDirectory = "shiori";
+
+        # Security options
+
+        BindReadOnlyPaths = [
+          "/nix/store"
+
+          # For SSL certificates, and the resolv.conf
+          "/etc"
+        ];
+
+        CapabilityBoundingSet = "";
+
+        DeviceAllow = "";
+
+        LockPersonality = true;
+
+        MemoryDenyWriteExecute = true;
+
+        PrivateDevices = true;
+        PrivateUsers = true;
+
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+
+        RootDirectory = "/run/shiori";
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation" "~@debug" "~@keyring" "~@memlock" "~@obsolete" "~@privileged" "~@setuid"
+        ];
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ minijackson ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/slskd.nix b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
new file mode 100644
index 000000000000..33353a59440c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/slskd.nix
@@ -0,0 +1,211 @@
+{ lib, pkgs, config, ... }:
+
+let
+  settingsFormat = pkgs.formats.yaml {};
+in {
+  options.services.slskd = with lib; with types; {
+    enable = mkEnableOption "enable slskd";
+
+    rotateLogs = mkEnableOption "enable an unit and timer that will rotate logs in /var/slskd/logs";
+
+    package = mkPackageOptionMD pkgs "slskd" { };
+
+    nginx = mkOption {
+      description = lib.mdDoc "options for nginx";
+      example = {
+        enable = true;
+        domain = "example.com";
+        contextPath = "/slskd";
+      };
+      type = submodule ({name, config, ...}: {
+        options = {
+          enable = mkEnableOption "enable nginx as a reverse proxy";
+
+          domainName = mkOption {
+            type = str;
+            description = "Domain you want to use";
+          };
+          contextPath = mkOption {
+            type = types.path;
+            default = "/";
+            description = lib.mdDoc ''
+              The context path, i.e., the last part of the slskd
+              URL. Typically '/' or '/slskd'. Default '/'
+            '';
+          };
+        };
+      });
+    };
+
+    environmentFile = mkOption {
+      type = path;
+      description = ''
+        Path to a file containing secrets.
+        It must at least contain the variable `SLSKD_SLSK_PASSWORD`
+      '';
+    };
+
+    openFirewall = mkOption {
+      type = bool;
+      description = ''
+        Whether to open the firewall for services.slskd.settings.listen_port";
+      '';
+      default = false;
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        Configuration for slskd, see
+        [available options](https://github.com/slskd/slskd/blob/master/docs/config.md)
+        `APP_DIR` is set to /var/lib/slskd, where default download & incomplete directories,
+        log and databases will be created.
+      '';
+      default = {};
+      type = submodule {
+        freeformType = settingsFormat.type;
+        options = {
+
+          soulseek = {
+            username = mkOption {
+              type = str;
+              description = "Username on the Soulseek Network";
+            };
+            listen_port = mkOption {
+              type = port;
+              description = "Port to use for communication on the Soulseek Network";
+              default = 50000;
+            };
+          };
+
+          web = {
+            port = mkOption {
+              type = port;
+              default = 5001;
+              description = "The HTTP listen port";
+            };
+            url_base = mkOption {
+              type = path;
+              default = config.services.slskd.nginx.contextPath;
+              defaultText = "config.services.slskd.nginx.contextPath";
+              description = lib.mdDoc ''
+                The context path, i.e., the last part of the slskd URL
+              '';
+            };
+          };
+
+          shares = {
+            directories = mkOption {
+              type = listOf str;
+              description = lib.mdDoc ''
+                Paths to your shared directories. See
+                [documentation](https://github.com/slskd/slskd/blob/master/docs/config.md#directories)
+                for advanced usage
+              '';
+            };
+          };
+
+          directories = {
+            incomplete = mkOption {
+              type = nullOr path;
+              description = "Directory where downloading files are stored";
+              defaultText = "<APP_DIR>/incomplete";
+              default = null;
+            };
+            downloads = mkOption {
+              type = nullOr path;
+              description = "Directory where downloaded files are stored";
+              defaultText = "<APP_DIR>/downloads";
+              default = null;
+            };
+          };
+        };
+      };
+    };
+  };
+
+  config = let
+    cfg = config.services.slskd;
+
+    confWithoutNullValues = (lib.filterAttrs (key: value: value != null) cfg.settings);
+
+    configurationYaml = settingsFormat.generate "slskd.yml" confWithoutNullValues;
+
+  in lib.mkIf cfg.enable {
+
+    users = {
+      users.slskd = {
+        isSystemUser = true;
+        group = "slskd";
+      };
+      groups.slskd = {};
+    };
+
+    # Reverse proxy configuration
+    services.nginx.enable = true;
+    services.nginx.virtualHosts."${cfg.nginx.domainName}" = {
+      forceSSL = true;
+      enableACME = true;
+      locations = {
+        "${cfg.nginx.contextPath}" = {
+          proxyPass = "http://localhost:${toString cfg.settings.web.port}";
+          proxyWebsockets = true;
+        };
+      };
+    };
+
+    # Hide state & logs
+    systemd.tmpfiles.rules = [
+      "d /var/lib/slskd/data 0750 slskd slskd - -"
+      "d /var/lib/slskd/logs 0750 slskd slskd - -"
+    ];
+
+    systemd.services.slskd = {
+      description = "A modern client-server application for the Soulseek file sharing network";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "simple";
+        User = "slskd";
+        EnvironmentFile = lib.mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        StateDirectory = "slskd";
+        ExecStart = "${cfg.package}/bin/slskd --app-dir /var/lib/slskd --config ${configurationYaml}";
+        Restart = "on-failure";
+        ReadOnlyPaths = map (d: builtins.elemAt (builtins.split "[^/]*(/.+)" d) 1) cfg.settings.shares.directories;
+        LockPersonality = true;
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        PrivateMounts = true;
+        PrivateTmp = true;
+        PrivateUsers = true;
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHome = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectProc = "invisible";
+        ProtectSystem = "strict";
+        RemoveIPC = true;
+        RestrictNamespaces = true;
+        RestrictSUIDSGID = true;
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = lib.optional cfg.openFirewall cfg.settings.soulseek.listen_port;
+
+    systemd.services.slskd-rotatelogs = lib.mkIf cfg.rotateLogs {
+      description = "Rotate slskd logs";
+      serviceConfig = {
+        Type = "oneshot";
+        User = "slskd";
+        ExecStart = [
+          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +10 -delete"
+          "${pkgs.findutils}/bin/find /var/lib/slskd/logs/ -type f -mtime +1  -exec ${pkgs.gzip}/bin/gzip -q {} ';'"
+        ];
+      };
+      startAt = "daily";
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/snipe-it.nix b/nixpkgs/nixos/modules/services/web-apps/snipe-it.nix
new file mode 100644
index 000000000000..4fbf2bad750b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/snipe-it.nix
@@ -0,0 +1,515 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.snipe-it;
+  snipe-it = pkgs.snipe-it.override {
+    dataDir = cfg.dataDir;
+  };
+  db = cfg.database;
+  mail = cfg.mail;
+
+  user = cfg.user;
+  group = cfg.group;
+
+  tlsEnabled = cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME;
+
+  inherit (snipe-it.passthru) phpPackage;
+
+  # shell script for local administration
+  artisan = (pkgs.writeScriptBin "snipe-it" ''
+    #! ${pkgs.runtimeShell}
+    cd "${snipe-it}/share/php/snipe-it"
+    sudo=exec
+    if [[ "$USER" != ${user} ]]; then
+      sudo='exec /run/wrappers/bin/sudo -u ${user}'
+    fi
+    $sudo ${phpPackage}/bin/php artisan $*
+  '').overrideAttrs (old: {
+    meta = old.meta // {
+      mainProgram = "snipe-it";
+    };
+  });
+in {
+  options.services.snipe-it = {
+
+    enable = mkEnableOption (lib.mdDoc "snipe-it, a free open source IT asset/license management system");
+
+    user = mkOption {
+      default = "snipeit";
+      description = lib.mdDoc "User snipe-it runs as.";
+      type = types.str;
+    };
+
+    group = mkOption {
+      default = "snipeit";
+      description = lib.mdDoc "Group snipe-it runs as.";
+      type = types.str;
+    };
+
+    appKeyFile = mkOption {
+      description = lib.mdDoc ''
+        A file containing the Laravel APP_KEY - a 32 character long,
+        base64 encoded key used for encryption where needed. Can be
+        generated with `head -c 32 /dev/urandom | base64`.
+      '';
+      example = "/run/keys/snipe-it/appkey";
+      type = types.path;
+    };
+
+    hostName = lib.mkOption {
+      type = lib.types.str;
+      default = config.networking.fqdnOrHostName;
+      defaultText = lib.literalExpression "config.networking.fqdnOrHostName";
+      example = "snipe-it.example.com";
+      description = lib.mdDoc ''
+        The hostname to serve Snipe-IT on.
+      '';
+    };
+
+    appURL = mkOption {
+      description = lib.mdDoc ''
+        The root URL that you want to host Snipe-IT on. All URLs in Snipe-IT will be generated using this value.
+        If you change this in the future you may need to run a command to update stored URLs in the database.
+        Command example: `snipe-it snipe-it:update-url https://old.example.com https://new.example.com`
+      '';
+      default = "http${lib.optionalString tlsEnabled "s"}://${cfg.hostName}";
+      defaultText = ''
+        http''${lib.optionalString tlsEnabled "s"}://''${cfg.hostName}
+      '';
+      example = "https://example.com";
+      type = types.str;
+    };
+
+    dataDir = mkOption {
+      description = lib.mdDoc "snipe-it data directory";
+      default = "/var/lib/snipe-it";
+      type = types.path;
+    };
+
+    database = {
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = lib.mdDoc "Database host port.";
+      };
+      name = mkOption {
+        type = types.str;
+        default = "snipeit";
+        description = lib.mdDoc "Database name.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = user;
+        defaultText = literalExpression "user";
+        description = lib.mdDoc "Database username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/snipe-it/dbpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`database.user`.
+        '';
+      };
+      createLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Create the database and database user locally.";
+      };
+    };
+
+    mail = {
+      driver = mkOption {
+        type = types.enum [ "smtp" "sendmail" ];
+        default = "smtp";
+        description = lib.mdDoc "Mail driver to use.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Mail host address.";
+      };
+      port = mkOption {
+        type = types.port;
+        default = 1025;
+        description = lib.mdDoc "Mail host port.";
+      };
+      encryption = mkOption {
+        type = with types; nullOr (enum [ "tls" "ssl" ]);
+        default = null;
+        description = lib.mdDoc "SMTP encryption mechanism to use.";
+      };
+      user = mkOption {
+        type = with types; nullOr str;
+        default = null;
+        example = "snipeit";
+        description = lib.mdDoc "Mail username.";
+      };
+      passwordFile = mkOption {
+        type = with types; nullOr path;
+        default = null;
+        example = "/run/keys/snipe-it/mailpassword";
+        description = lib.mdDoc ''
+          A file containing the password corresponding to
+          {option}`mail.user`.
+        '';
+      };
+      backupNotificationAddress = mkOption {
+        type = types.str;
+        default = "backup@example.com";
+        description = lib.mdDoc "Email Address to send Backup Notifications to.";
+      };
+      from = {
+        name = mkOption {
+          type = types.str;
+          default = "Snipe-IT Asset Management";
+          description = lib.mdDoc "Mail \"from\" name.";
+        };
+        address = mkOption {
+          type = types.str;
+          default = "mail@example.com";
+          description = lib.mdDoc "Mail \"from\" address.";
+        };
+      };
+      replyTo = {
+        name = mkOption {
+          type = types.str;
+          default = "Snipe-IT Asset Management";
+          description = lib.mdDoc "Mail \"reply-to\" name.";
+        };
+        address = mkOption {
+          type = types.str;
+          default = "mail@example.com";
+          description = lib.mdDoc "Mail \"reply-to\" address.";
+        };
+      };
+    };
+
+    maxUploadSize = mkOption {
+      type = types.str;
+      default = "18M";
+      example = "1G";
+      description = lib.mdDoc "The maximum size for uploads (e.g. images).";
+    };
+
+    poolConfig = mkOption {
+      type = with types; attrsOf (oneOf [ str int bool ]);
+      default = {
+        "pm" = "dynamic";
+        "pm.max_children" = 32;
+        "pm.start_servers" = 2;
+        "pm.min_spare_servers" = 2;
+        "pm.max_spare_servers" = 4;
+        "pm.max_requests" = 500;
+      };
+      description = lib.mdDoc ''
+        Options for the snipe-it PHP pool. See the documentation on `php-fpm.conf`
+        for details on configuration directives.
+      '';
+    };
+
+    nginx = mkOption {
+      type = types.submodule (
+        recursiveUpdate
+          (import ../web-servers/nginx/vhost-options.nix { inherit config lib; }) {}
+      );
+      default = {};
+      example = literalExpression ''
+        {
+          serverAliases = [
+            "snipe-it.''${config.networking.domain}"
+          ];
+          # To enable encryption and let let's encrypt take care of certificate
+          forceSSL = true;
+          enableACME = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        With this option, you can customize the nginx virtualHost settings.
+      '';
+    };
+
+    config = mkOption {
+      type = with types;
+        attrsOf
+          (nullOr
+            (either
+              (oneOf [
+                bool
+                int
+                port
+                path
+                str
+              ])
+              (submodule {
+                options = {
+                  _secret = mkOption {
+                    type = nullOr (oneOf [ str path ]);
+                    description = lib.mdDoc ''
+                      The path to a file containing the value the
+                      option should be set to in the final
+                      configuration file.
+                    '';
+                  };
+                };
+              })));
+      default = {};
+      example = literalExpression ''
+        {
+          ALLOWED_IFRAME_HOSTS = "https://example.com";
+          WKHTMLTOPDF = "''${pkgs.wkhtmltopdf}/bin/wkhtmltopdf";
+          AUTH_METHOD = "oidc";
+          OIDC_NAME = "MyLogin";
+          OIDC_DISPLAY_NAME_CLAIMS = "name";
+          OIDC_CLIENT_ID = "snipe-it";
+          OIDC_CLIENT_SECRET = {_secret = "/run/keys/oidc_secret"};
+          OIDC_ISSUER = "https://keycloak.example.com/auth/realms/My%20Realm";
+          OIDC_ISSUER_DISCOVER = true;
+        }
+      '';
+      description = lib.mdDoc ''
+        Snipe-IT configuration options to set in the
+        {file}`.env` file.
+        Refer to <https://snipe-it.readme.io/docs/configuration>
+        for details on supported values.
+
+        Settings containing secret data should be set to an attribute
+        set containing the attribute `_secret` - a
+        string pointing to a file containing the value the option
+        should be set to. See the example to get a better picture of
+        this: in the resulting {file}`.env` file, the
+        `OIDC_CLIENT_SECRET` key will be set to the
+        contents of the {file}`/run/keys/oidc_secret`
+        file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = db.createLocally -> db.user == user;
+        message = "services.snipe-it.database.user must be set to ${user} if services.snipe-it.database.createLocally is set true.";
+      }
+      { assertion = db.createLocally -> db.passwordFile == null;
+        message = "services.snipe-it.database.passwordFile cannot be specified if services.snipe-it.database.createLocally is set to true.";
+      }
+    ];
+
+    environment.systemPackages = [ artisan ];
+
+    services.snipe-it.config = {
+      APP_ENV = "production";
+      APP_KEY._secret = cfg.appKeyFile;
+      APP_URL = cfg.appURL;
+      DB_HOST = db.host;
+      DB_PORT = db.port;
+      DB_DATABASE = db.name;
+      DB_USERNAME = db.user;
+      DB_PASSWORD._secret = db.passwordFile;
+      MAIL_DRIVER = mail.driver;
+      MAIL_FROM_NAME = mail.from.name;
+      MAIL_FROM_ADDR = mail.from.address;
+      MAIL_REPLYTO_NAME = mail.from.name;
+      MAIL_REPLYTO_ADDR = mail.from.address;
+      MAIL_BACKUP_NOTIFICATION_ADDRESS = mail.backupNotificationAddress;
+      MAIL_HOST = mail.host;
+      MAIL_PORT = mail.port;
+      MAIL_USERNAME = mail.user;
+      MAIL_ENCRYPTION = mail.encryption;
+      MAIL_PASSWORD._secret = mail.passwordFile;
+      APP_SERVICES_CACHE = "/run/snipe-it/cache/services.php";
+      APP_PACKAGES_CACHE = "/run/snipe-it/cache/packages.php";
+      APP_CONFIG_CACHE = "/run/snipe-it/cache/config.php";
+      APP_ROUTES_CACHE = "/run/snipe-it/cache/routes-v7.php";
+      APP_EVENTS_CACHE = "/run/snipe-it/cache/events.php";
+      SESSION_SECURE_COOKIE = tlsEnabled;
+    };
+
+    services.mysql = mkIf db.createLocally {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ db.name ];
+      ensureUsers = [
+        { name = db.user;
+          ensurePermissions = { "${db.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ];
+    };
+
+    services.phpfpm.pools.snipe-it = {
+      inherit user group phpPackage;
+      phpOptions = ''
+        post_max_size = ${cfg.maxUploadSize}
+        upload_max_filesize = ${cfg.maxUploadSize}
+      '';
+      settings = {
+        "listen.mode" = "0660";
+        "listen.owner" = user;
+        "listen.group" = group;
+      } // cfg.poolConfig;
+    };
+
+    services.nginx = {
+      enable = mkDefault true;
+      virtualHosts."${cfg.hostName}" = mkMerge [ cfg.nginx {
+        root = mkForce "${snipe-it}/share/php/snipe-it/public";
+        extraConfig = optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "fastcgi_param HTTPS on;";
+        locations = {
+          "/" = {
+            index = "index.php";
+            extraConfig = ''try_files $uri $uri/ /index.php?$query_string;'';
+          };
+          "~ \.php$" = {
+            extraConfig = ''
+              try_files $uri $uri/ /index.php?$query_string;
+              include ${config.services.nginx.package}/conf/fastcgi_params;
+              fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;
+              fastcgi_param REDIRECT_STATUS 200;
+              fastcgi_pass unix:${config.services.phpfpm.pools."snipe-it".socket};
+              ${optionalString (cfg.nginx.addSSL || cfg.nginx.forceSSL || cfg.nginx.onlySSL || cfg.nginx.enableACME) "fastcgi_param HTTPS on;"}
+            '';
+          };
+          "~ \.(js|css|gif|png|ico|jpg|jpeg)$" = {
+            extraConfig = "expires 365d;";
+          };
+        };
+      }];
+    };
+
+    systemd.services.snipe-it-setup = {
+      description = "Preparation tasks for snipe-it";
+      before = [ "phpfpm-snipe-it.service" ];
+      after = optional db.createLocally "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = user;
+        WorkingDirectory = snipe-it;
+        RuntimeDirectory = "snipe-it/cache";
+        RuntimeDirectoryMode = "0700";
+      };
+      path = [ pkgs.replace-secret artisan ];
+      script =
+        let
+          isSecret  = v: isAttrs v && v ? _secret && (isString v._secret || builtins.isPath v._secret);
+          snipeITEnvVars = lib.generators.toKeyValue {
+            mkKeyValue = lib.flip lib.generators.mkKeyValueDefault "=" {
+              mkValueString = v: with builtins;
+                if isInt             v then toString v
+                else if isString     v then "\"${v}\""
+                else if true  ==     v then "true"
+                else if false ==     v then "false"
+                else if isSecret     v then
+                  if (isString v._secret) then
+                    hashString "sha256" v._secret
+                  else
+                    hashString "sha256" (builtins.readFile v._secret)
+                else throw "unsupported type ${typeOf v}: ${(lib.generators.toPretty {}) v}";
+            };
+          };
+          secretPaths = lib.mapAttrsToList (_: v: v._secret) (lib.filterAttrs (_: isSecret) cfg.config);
+          mkSecretReplacement = file: ''
+            replace-secret ${escapeShellArgs [
+              (
+                if (isString file) then
+                  builtins.hashString "sha256" file
+                else
+                  builtins.hashString "sha256" (builtins.readFile file)
+              )
+              file
+              "${cfg.dataDir}/.env"
+            ]}
+          '';
+          secretReplacements = lib.concatMapStrings mkSecretReplacement secretPaths;
+          filteredConfig = lib.converge (lib.filterAttrsRecursive (_: v: ! elem v [ {} null ])) cfg.config;
+          snipeITEnv = pkgs.writeText "snipeIT.env" (snipeITEnvVars filteredConfig);
+        in ''
+          # error handling
+          set -euo pipefail
+
+          # set permissions
+          umask 077
+
+          # create .env file
+          install -T -m 0600 -o ${user} ${snipeITEnv} "${cfg.dataDir}/.env"
+
+          # replace secrets
+          ${secretReplacements}
+
+          # prepend `base64:` if it does not exist in APP_KEY
+          if ! grep 'APP_KEY=base64:' "${cfg.dataDir}/.env" >/dev/null; then
+              sed -i 's/APP_KEY=/APP_KEY=base64:/' "${cfg.dataDir}/.env"
+          fi
+
+          # purge cache
+          rm "${cfg.dataDir}"/bootstrap/cache/*.php || true
+
+          # migrate db
+          ${lib.getExe artisan} migrate --force
+
+          # A placeholder file for invalid barcodes
+          invalid_barcode_location="${cfg.dataDir}/public/uploads/barcodes/invalid_barcode.gif"
+          if [ ! -e "$invalid_barcode_location" ]; then
+              cp ${snipe-it}/share/snipe-it/invalid_barcode.gif "$invalid_barcode_location"
+          fi
+        '';
+    };
+
+    systemd.tmpfiles.rules = [
+      "d ${cfg.dataDir}                              0710 ${user} ${group} - -"
+      "d ${cfg.dataDir}/bootstrap                    0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/bootstrap/cache              0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public                       0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads               0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/accessories   0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/assets        0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/avatars       0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/barcodes      0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/categories    0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/companies     0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/components    0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/consumables   0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/departments   0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/locations     0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/manufacturers 0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/models        0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/public/uploads/suppliers     0750 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage                      0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/app                  0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/fonts                0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework            0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/cache      0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/sessions   0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/framework/views      0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/logs                 0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/uploads              0700 ${user} ${group} - -"
+      "d ${cfg.dataDir}/storage/private_uploads      0700 ${user} ${group} - -"
+    ];
+
+    users = {
+      users = mkIf (user == "snipeit") {
+        snipeit = {
+          inherit group;
+          isSystemUser = true;
+        };
+        "${config.services.nginx.user}".extraGroups = [ group ];
+      };
+      groups = mkIf (group == "snipeit") {
+        snipeit = {};
+      };
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ yayayayaka ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/sogo.nix b/nixpkgs/nixos/modules/services/web-apps/sogo.nix
new file mode 100644
index 000000000000..9427eff35d14
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/sogo.nix
@@ -0,0 +1,271 @@
+{ config, pkgs, lib, ... }: with lib; let
+  cfg = config.services.sogo;
+
+  preStart = pkgs.writeShellScriptBin "sogo-prestart" ''
+    touch /etc/sogo/sogo.conf
+    chown sogo:sogo /etc/sogo/sogo.conf
+    chmod 640 /etc/sogo/sogo.conf
+
+    ${if (cfg.configReplaces != {}) then ''
+      # Insert secrets
+      ${concatStringsSep "\n" (mapAttrsToList (k: v: ''export ${k}="$(cat "${v}" | tr -d '\n')"'') cfg.configReplaces)}
+
+      ${pkgs.perl}/bin/perl -p ${concatStringsSep " " (mapAttrsToList (k: v: '' -e 's/${k}/''${ENV{"${k}"}}/g;' '') cfg.configReplaces)} /etc/sogo/sogo.conf.raw > /etc/sogo/sogo.conf
+    '' else ''
+      cp /etc/sogo/sogo.conf.raw /etc/sogo/sogo.conf
+    ''}
+  '';
+
+in {
+  options.services.sogo = with types; {
+    enable = mkEnableOption (lib.mdDoc "SOGo groupware");
+
+    vhostName = mkOption {
+      description = lib.mdDoc "Name of the nginx vhost";
+      type = str;
+      default = "sogo";
+    };
+
+    timezone = mkOption {
+      description = lib.mdDoc "Timezone of your SOGo instance";
+      type = str;
+      example = "America/Montreal";
+    };
+
+    language = mkOption {
+      description = lib.mdDoc "Language of SOGo";
+      type = str;
+      default = "English";
+    };
+
+    ealarmsCredFile = mkOption {
+      description = lib.mdDoc "Optional path to a credentials file for email alarms";
+      type = nullOr str;
+      default = null;
+    };
+
+    configReplaces = mkOption {
+      description = lib.mdDoc ''
+        Replacement-filepath mapping for sogo.conf.
+        Every key is replaced with the contents of the file specified as value.
+
+        In the example, every occurrence of LDAP_BINDPW will be replaced with the text of the
+        specified file.
+      '';
+      type = attrsOf str;
+      default = {};
+      example = {
+        LDAP_BINDPW = "/var/lib/secrets/sogo/ldappw";
+      };
+    };
+
+    extraConfig = mkOption {
+      description = lib.mdDoc "Extra sogo.conf configuration lines";
+      type = lines;
+      default = "";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.sogo ];
+
+    environment.etc."sogo/sogo.conf.raw".text = ''
+      {
+        // Mandatory parameters
+        SOGoTimeZone = "${cfg.timezone}";
+        SOGoLanguage = "${cfg.language}";
+        // Paths
+        WOSendMail = "/run/wrappers/bin/sendmail";
+        SOGoMailSpoolPath = "/var/lib/sogo/spool";
+        // Enable CSRF protection
+        SOGoXSRFValidationEnabled = YES;
+        // Remove dates from log (jornald does that)
+        NGLogDefaultLogEventFormatterClass = "NGLogEventFormatter";
+        // Extra config
+        ${cfg.extraConfig}
+      }
+    '';
+
+    systemd.services.sogo = {
+      description = "SOGo groupware";
+      after = [ "postgresql.service" "mysql.service" "memcached.service" "openldap.service" "dovecot2.service" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ config.environment.etc."sogo/sogo.conf.raw".source ];
+
+      environment.LDAPTLS_CACERT = "/etc/ssl/certs/ca-certificates.crt";
+
+      serviceConfig = {
+        Type = "forking";
+        ExecStartPre = "+" + preStart + "/bin/sogo-prestart";
+        ExecStart = "${pkgs.sogo}/bin/sogod -WOLogFile - -WOPidFile /run/sogo/sogo.pid";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        RuntimeDirectory = "sogo";
+        StateDirectory = "sogo/spool";
+
+        User = "sogo";
+        Group = "sogo";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+
+        LockPersonality = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+        MemoryDenyWriteExecute = true;
+        SystemCallFilter = "@basic-io @file-system @network-io @system-service @timer";
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+      };
+    };
+
+    systemd.services.sogo-tmpwatch = {
+      description = "SOGo tmpwatch";
+
+      startAt = [ "hourly" ];
+      script = ''
+        SOGOSPOOL=/var/lib/sogo/spool
+
+        find "$SOGOSPOOL" -type f -user sogo -atime +23 -delete > /dev/null
+        find "$SOGOSPOOL" -mindepth 1 -type d -user sogo -empty -delete > /dev/null
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        StateDirectory = "sogo/spool";
+
+        User = "sogo";
+        Group = "sogo";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+
+        LockPersonality = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+        PrivateNetwork = true;
+        SystemCallFilter = "@basic-io @file-system @system-service";
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "";
+      };
+    };
+
+    systemd.services.sogo-ealarms = {
+      description = "SOGo email alarms";
+
+      after = [ "postgresql.service" "mysqld.service" "memcached.service" "openldap.service" "dovecot2.service" "sogo.service" ];
+      restartTriggers = [ config.environment.etc."sogo/sogo.conf.raw".source ];
+
+      startAt = [ "minutely" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.sogo}/bin/sogo-ealarms-notify${optionalString (cfg.ealarmsCredFile != null) " -p ${cfg.ealarmsCredFile}"}";
+
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectControlGroups = true;
+        StateDirectory = "sogo/spool";
+
+        User = "sogo";
+        Group = "sogo";
+
+        CapabilityBoundingSet = "";
+        NoNewPrivileges = true;
+
+        LockPersonality = true;
+        RestrictRealtime = true;
+        PrivateMounts = true;
+        PrivateUsers = true;
+        MemoryDenyWriteExecute = true;
+        SystemCallFilter = "@basic-io @file-system @network-io @system-service";
+        SystemCallArchitectures = "native";
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6";
+      };
+    };
+
+    # nginx vhost
+    services.nginx.virtualHosts."${cfg.vhostName}" = {
+      locations."/".extraConfig = ''
+        rewrite ^ https://$server_name/SOGo;
+        allow all;
+      '';
+
+      # For iOS 7
+      locations."/principals/".extraConfig = ''
+        rewrite ^ https://$server_name/SOGo/dav;
+        allow all;
+      '';
+
+      locations."^~/SOGo".extraConfig = ''
+        proxy_pass http://127.0.0.1:20000;
+        proxy_redirect http://127.0.0.1:20000 default;
+
+        proxy_set_header X-Real-IP $remote_addr;
+        proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+        proxy_set_header Host $host;
+        proxy_set_header x-webobjects-server-protocol HTTP/1.0;
+        proxy_set_header x-webobjects-remote-host 127.0.0.1;
+        proxy_set_header x-webobjects-server-port $server_port;
+        proxy_set_header x-webobjects-server-name $server_name;
+        proxy_set_header x-webobjects-server-url $scheme://$host;
+        proxy_connect_timeout 90;
+        proxy_send_timeout 90;
+        proxy_read_timeout 90;
+        proxy_buffer_size 64k;
+        proxy_buffers 8 64k;
+        proxy_busy_buffers_size 64k;
+        proxy_temp_file_write_size 64k;
+        client_max_body_size 50m;
+        client_body_buffer_size 128k;
+        break;
+      '';
+
+      locations."/SOGo.woa/WebServerResources/".extraConfig = ''
+        alias ${pkgs.sogo}/lib/GNUstep/SOGo/WebServerResources/;
+        allow all;
+      '';
+
+      locations."/SOGo/WebServerResources/".extraConfig = ''
+        alias ${pkgs.sogo}/lib/GNUstep/SOGo/WebServerResources/;
+        allow all;
+      '';
+
+      locations."~ ^/SOGo/so/ControlPanel/Products/([^/]*)/Resources/(.*)$".extraConfig = ''
+        alias ${pkgs.sogo}/lib/GNUstep/SOGo/$1.SOGo/Resources/$2;
+      '';
+
+      locations."~ ^/SOGo/so/ControlPanel/Products/[^/]*UI/Resources/.*\\.(jpg|png|gif|css|js)$".extraConfig = ''
+        alias ${pkgs.sogo}/lib/GNUstep/SOGo/$1.SOGo/Resources/$2;
+      '';
+    };
+
+    # User and group
+    users.groups.sogo = {};
+    users.users.sogo = {
+      group = "sogo";
+      isSystemUser = true;
+      description = "SOGo service user";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/trilium.nix b/nixpkgs/nixos/modules/services/web-apps/trilium.nix
new file mode 100644
index 000000000000..a91d64f620b6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/trilium.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.trilium-server;
+  configIni = pkgs.writeText "trilium-config.ini" ''
+    [General]
+    # Instance name can be used to distinguish between different instances
+    instanceName=${cfg.instanceName}
+
+    # Disable automatically generating desktop icon
+    noDesktopIcon=true
+    noBackup=${lib.boolToString cfg.noBackup}
+    noAuthentication=${lib.boolToString cfg.noAuthentication}
+
+    [Network]
+    # host setting is relevant only for web deployments - set the host on which the server will listen
+    host=${cfg.host}
+    # port setting is relevant only for web deployments, desktop builds run on random free port
+    port=${toString cfg.port}
+    # true for TLS/SSL/HTTPS (secure), false for HTTP (unsecure).
+    https=false
+  '';
+in
+{
+
+  options.services.trilium-server = with lib; {
+    enable = mkEnableOption (lib.mdDoc "trilium-server");
+
+    dataDir = mkOption {
+      type = types.str;
+      default = "/var/lib/trilium";
+      description = lib.mdDoc ''
+        The directory storing the notes database and the configuration.
+      '';
+    };
+
+    instanceName = mkOption {
+      type = types.str;
+      default = "Trilium";
+      description = lib.mdDoc ''
+        Instance name used to distinguish between different instances
+      '';
+    };
+
+    noBackup = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Disable periodic database backups.
+      '';
+    };
+
+    noAuthentication = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If set to true, no password is required to access the web frontend.
+      '';
+    };
+
+    host = mkOption {
+      type = types.str;
+      default = "127.0.0.1";
+      description = lib.mdDoc ''
+        The host address to bind to (defaults to localhost).
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 8080;
+      description = lib.mdDoc ''
+        The port number to bind to.
+      '';
+    };
+
+    nginx = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Configuration for nginx reverse proxy.
+      '';
+
+      type = types.submodule {
+        options = {
+          enable = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Configure the nginx reverse proxy settings.
+            '';
+          };
+
+          hostName = mkOption {
+            type = types.str;
+            description = lib.mdDoc ''
+              The hostname use to setup the virtualhost configuration
+            '';
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable (lib.mkMerge [
+  {
+    meta.maintainers = with lib.maintainers; [ fliegendewurst ];
+
+    users.groups.trilium = {};
+    users.users.trilium = {
+      description = "Trilium User";
+      group = "trilium";
+      home = cfg.dataDir;
+      isSystemUser = true;
+    };
+
+    systemd.services.trilium-server = {
+      wantedBy = [ "multi-user.target" ];
+      environment.TRILIUM_DATA_DIR = cfg.dataDir;
+      serviceConfig = {
+        ExecStart = "${pkgs.trilium-server}/bin/trilium-server";
+        User = "trilium";
+        Group = "trilium";
+        PrivateTmp = "true";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d  ${cfg.dataDir}            0750 trilium trilium - -"
+      "L+ ${cfg.dataDir}/config.ini -    -       -       - ${configIni}"
+    ];
+
+  }
+
+  (lib.mkIf cfg.nginx.enable {
+    services.nginx = {
+      enable = true;
+      virtualHosts."${cfg.nginx.hostName}" = {
+        locations."/" = {
+          proxyPass = "http://${cfg.host}:${toString cfg.port}/";
+          extraConfig = ''
+            proxy_http_version 1.1;
+            proxy_set_header Upgrade $http_upgrade;
+            proxy_set_header Connection 'upgrade';
+            proxy_set_header Host $host;
+            proxy_cache_bypass $http_upgrade;
+          '';
+        };
+        extraConfig = ''
+          client_max_body_size 0;
+        '';
+      };
+    };
+  })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix b/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix
new file mode 100644
index 000000000000..a8fb37d2c5ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/tt-rss.nix
@@ -0,0 +1,658 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.tt-rss;
+
+  configVersion = 26;
+
+  dbPort = if cfg.database.port == null
+    then (if cfg.database.type == "pgsql" then 5432 else 3306)
+    else cfg.database.port;
+
+  poolName = "tt-rss";
+
+  mysqlLocal = cfg.database.createLocally && cfg.database.type == "mysql";
+  pgsqlLocal = cfg.database.createLocally && cfg.database.type == "pgsql";
+
+  tt-rss-config = let
+    password =
+      if (cfg.database.password != null) then
+        "'${(escape ["'" "\\"] cfg.database.password)}'"
+      else if (cfg.database.passwordFile != null) then
+        "file_get_contents('${cfg.database.passwordFile}')"
+      else
+        null
+      ;
+  in pkgs.writeText "config.php" ''
+    <?php
+      putenv('TTRSS_PHP_EXECUTABLE=${pkgs.php}/bin/php');
+
+      putenv('TTRSS_LOCK_DIRECTORY=${cfg.root}/lock');
+      putenv('TTRSS_CACHE_DIR=${cfg.root}/cache');
+      putenv('TTRSS_ICONS_DIR=${cfg.root}/feed-icons');
+      putenv('TTRSS_ICONS_URL=feed-icons');
+      putenv('TTRSS_SELF_URL_PATH=${cfg.selfUrlPath}');
+
+      putenv('TTRSS_MYSQL_CHARSET=UTF8');
+
+      putenv('TTRSS_DB_TYPE=${cfg.database.type}');
+      putenv('TTRSS_DB_HOST=${optionalString (cfg.database.host != null) cfg.database.host}');
+      putenv('TTRSS_DB_USER=${cfg.database.user}');
+      putenv('TTRSS_DB_NAME=${cfg.database.name}');
+      putenv('TTRSS_DB_PASS=' ${optionalString (password != null) ". ${password}"});
+      putenv('TTRSS_DB_PORT=${toString dbPort}');
+
+      putenv('TTRSS_AUTH_AUTO_CREATE=${boolToString cfg.auth.autoCreate}');
+      putenv('TTRSS_AUTH_AUTO_LOGIN=${boolToString cfg.auth.autoLogin}');
+
+      putenv('TTRSS_FEED_CRYPT_KEY=${escape ["'" "\\"] cfg.feedCryptKey}');
+
+
+      putenv('TTRSS_SINGLE_USER_MODE=${boolToString cfg.singleUserMode}');
+
+      putenv('TTRSS_SIMPLE_UPDATE_MODE=${boolToString cfg.simpleUpdateMode}');
+
+      # Never check for updates - the running version of the code should
+      # be controlled entirely by the version of TT-RSS active in the
+      # current Nix profile. If TT-RSS updates itself to a version
+      # requiring a database schema upgrade, and then the SystemD
+      # tt-rss.service is restarted, the old code copied from the Nix
+      # store will overwrite the updated version, causing the code to
+      # detect the need for a schema "upgrade" (since the schema version
+      # in the database is different than in the code), but the update
+      # schema operation in TT-RSS will do nothing because the schema
+      # version in the database is newer than that in the code.
+      putenv('TTRSS_CHECK_FOR_UPDATES=false');
+
+      putenv('TTRSS_FORCE_ARTICLE_PURGE=${toString cfg.forceArticlePurge}');
+      putenv('TTRSS_SESSION_COOKIE_LIFETIME=${toString cfg.sessionCookieLifetime}');
+      putenv('TTRSS_ENABLE_GZIP_OUTPUT=${boolToString cfg.enableGZipOutput}');
+
+      putenv('TTRSS_PLUGINS=${builtins.concatStringsSep "," cfg.plugins}');
+
+      putenv('TTRSS_LOG_DESTINATION=${cfg.logDestination}');
+      putenv('TTRSS_CONFIG_VERSION=${toString configVersion}');
+
+
+      putenv('TTRSS_PUBSUBHUBBUB_ENABLED=${boolToString cfg.pubSubHubbub.enable}');
+      putenv('TTRSS_PUBSUBHUBBUB_HUB=${cfg.pubSubHubbub.hub}');
+
+      putenv('TTRSS_SPHINX_SERVER=${cfg.sphinx.server}');
+      putenv('TTRSS_SPHINX_INDEX=${builtins.concatStringsSep "," cfg.sphinx.index}');
+
+      putenv('TTRSS_ENABLE_REGISTRATION=${boolToString cfg.registration.enable}');
+      putenv('TTRSS_REG_NOTIFY_ADDRESS=${cfg.registration.notifyAddress}');
+      putenv('TTRSS_REG_MAX_USERS=${toString cfg.registration.maxUsers}');
+
+      putenv('TTRSS_SMTP_SERVER=${cfg.email.server}');
+      putenv('TTRSS_SMTP_LOGIN=${cfg.email.login}');
+      putenv('TTRSS_SMTP_PASSWORD=${escape ["'" "\\"] cfg.email.password}');
+      putenv('TTRSS_SMTP_SECURE=${cfg.email.security}');
+
+      putenv('TTRSS_SMTP_FROM_NAME=${escape ["'" "\\"] cfg.email.fromName}');
+      putenv('TTRSS_SMTP_FROM_ADDRESS=${escape ["'" "\\"] cfg.email.fromAddress}');
+      putenv('TTRSS_DIGEST_SUBJECT=${escape ["'" "\\"] cfg.email.digestSubject}');
+
+      ${cfg.extraConfig}
+  '';
+
+  # tt-rss and plugins and themes and config.php
+  servedRoot = pkgs.runCommand "tt-rss-served-root" {} ''
+    cp --no-preserve=mode -r ${pkgs.tt-rss} $out
+    cp ${tt-rss-config} $out/config.php
+    ${optionalString (cfg.pluginPackages != []) ''
+    for plugin in ${concatStringsSep " " cfg.pluginPackages}; do
+    cp -r "$plugin"/* "$out/plugins.local/"
+    done
+    ''}
+    ${optionalString (cfg.themePackages != []) ''
+    for theme in ${concatStringsSep " " cfg.themePackages}; do
+    cp -r "$theme"/* "$out/themes.local/"
+    done
+    ''}
+  '';
+
+ in {
+
+  ###### interface
+
+  options = {
+
+    services.tt-rss = {
+
+      enable = mkEnableOption (lib.mdDoc "tt-rss");
+
+      root = mkOption {
+        type = types.path;
+        default = "/var/lib/tt-rss";
+        description = lib.mdDoc ''
+          Root of the application.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "tt_rss";
+        description = lib.mdDoc ''
+          User account under which both the update daemon and the web-application run.
+        '';
+      };
+
+      pool = mkOption {
+        type = types.str;
+        default = "${poolName}";
+        description = lib.mdDoc ''
+          Name of existing phpfpm pool that is used to run web-application.
+          If not specified a pool will be created automatically with
+          default values.
+        '';
+      };
+
+      virtualHost = mkOption {
+        type = types.nullOr types.str;
+        default = "tt-rss";
+        description = lib.mdDoc ''
+          Name of the nginx virtualhost to use and setup. If null, do not setup any virtualhost.
+        '';
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum ["pgsql" "mysql"];
+          default = "pgsql";
+          description = lib.mdDoc ''
+            Database to store feeds. Supported are pgsql and mysql.
+          '';
+        };
+
+        host = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            Host of the database. Leave null to use Unix domain socket.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "tt_rss";
+          description = lib.mdDoc ''
+            Name of the existing database.
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "tt_rss";
+          description = lib.mdDoc ''
+            The database user. The user must exist and has access to
+            the specified database.
+          '';
+        };
+
+        password = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The database user's password.
+          '';
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.str;
+          default = null;
+          description = lib.mdDoc ''
+            The database user's password.
+          '';
+        };
+
+        port = mkOption {
+          type = types.nullOr types.port;
+          default = null;
+          description = lib.mdDoc ''
+            The database's port. If not set, the default ports will be provided (5432
+            and 3306 for pgsql and mysql respectively).
+          '';
+        };
+
+        createLocally = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Create the database and database user locally.";
+        };
+      };
+
+      auth = {
+        autoCreate = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Allow authentication modules to auto-create users in tt-rss internal
+            database when authenticated successfully.
+          '';
+        };
+
+        autoLogin = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Automatically login user on remote or other kind of externally supplied
+            authentication, otherwise redirect to login form as normal.
+            If set to true, users won't be able to set application language
+            and settings profile.
+          '';
+        };
+      };
+
+      pubSubHubbub = {
+        hub = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            URL to a PubSubHubbub-compatible hub server. If defined, "Published
+            articles" generated feed would automatically become PUSH-enabled.
+          '';
+        };
+
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Enable client PubSubHubbub support in tt-rss. When disabled, tt-rss
+            won't try to subscribe to PUSH feed updates.
+          '';
+        };
+      };
+
+      sphinx = {
+        server = mkOption {
+          type = types.str;
+          default = "localhost:9312";
+          description = lib.mdDoc ''
+            Hostname:port combination for the Sphinx server.
+          '';
+        };
+
+        index = mkOption {
+          type = types.listOf types.str;
+          default = ["ttrss" "delta"];
+          description = lib.mdDoc ''
+            Index names in Sphinx configuration. Example configuration
+            files are available on tt-rss wiki.
+          '';
+        };
+      };
+
+      registration = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Allow users to register themselves. Please be aware that allowing
+            random people to access your tt-rss installation is a security risk
+            and potentially might lead to data loss or server exploit. Disabled
+            by default.
+          '';
+        };
+
+        notifyAddress = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            Email address to send new user notifications to.
+          '';
+        };
+
+        maxUsers = mkOption {
+          type = types.int;
+          default = 0;
+          description = lib.mdDoc ''
+            Maximum amount of users which will be allowed to register on this
+            system. 0 - no limit.
+          '';
+        };
+      };
+
+      email = {
+        server = mkOption {
+          type = types.str;
+          default = "";
+          example = "localhost:25";
+          description = lib.mdDoc ''
+            Hostname:port combination to send outgoing mail. Blank - use system
+            MTA.
+          '';
+        };
+
+        login = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            SMTP authentication login used when sending outgoing mail.
+          '';
+        };
+
+        password = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            SMTP authentication password used when sending outgoing mail.
+          '';
+        };
+
+        security = mkOption {
+          type = types.enum ["" "ssl" "tls"];
+          default = "";
+          description = lib.mdDoc ''
+            Used to select a secure SMTP connection. Allowed values: ssl, tls,
+            or empty.
+          '';
+        };
+
+        fromName = mkOption {
+          type = types.str;
+          default = "Tiny Tiny RSS";
+          description = lib.mdDoc ''
+            Name for sending outgoing mail. This applies to password reset
+            notifications, digest emails and any other mail.
+          '';
+        };
+
+        fromAddress = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc ''
+            Address for sending outgoing mail. This applies to password reset
+            notifications, digest emails and any other mail.
+          '';
+        };
+
+        digestSubject = mkOption {
+          type = types.str;
+          default = "[tt-rss] New headlines for last 24 hours";
+          description = lib.mdDoc ''
+            Subject line for email digests.
+          '';
+        };
+      };
+
+      sessionCookieLifetime = mkOption {
+        type = types.int;
+        default = 86400;
+        description = lib.mdDoc ''
+          Default lifetime of a session (e.g. login) cookie. In seconds,
+          0 means cookie will be deleted when browser closes.
+        '';
+      };
+
+      selfUrlPath = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          Full URL of your tt-rss installation. This should be set to the
+          location of tt-rss directory, e.g. http://example.org/tt-rss/
+          You need to set this option correctly otherwise several features
+          including PUSH, bookmarklets and browser integration will not work properly.
+        '';
+        example = "http://localhost";
+      };
+
+      feedCryptKey = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Key used for encryption of passwords for password-protected feeds
+          in the database. A string of 24 random characters. If left blank, encryption
+          is not used. Requires mcrypt functions.
+          Warning: changing this key will make your stored feed passwords impossible
+          to decrypt.
+        '';
+      };
+
+      singleUserMode = mkOption {
+        type = types.bool;
+        default = false;
+
+        description = lib.mdDoc ''
+          Operate in single user mode, disables all functionality related to
+          multiple users and authentication. Enabling this assumes you have
+          your tt-rss directory protected by other means (e.g. http auth).
+        '';
+      };
+
+      simpleUpdateMode = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enables fallback update mode where tt-rss tries to update feeds in
+          background while tt-rss is open in your browser.
+          If you don't have a lot of feeds and don't want to or can't run
+          background processes while not running tt-rss, this method is generally
+          viable to keep your feeds up to date.
+          Still, there are more robust (and recommended) updating methods
+          available, you can read about them here: <https://tt-rss.org/wiki/UpdatingFeeds>
+        '';
+      };
+
+      forceArticlePurge = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          When this option is not 0, users ability to control feed purging
+          intervals is disabled and all articles (which are not starred)
+          older than this amount of days are purged.
+        '';
+      };
+
+      enableGZipOutput = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Selectively gzip output to improve wire performance. This requires
+          PHP Zlib extension on the server.
+          Enabling this can break tt-rss in several httpd/php configurations,
+          if you experience weird errors and tt-rss failing to start, blank pages
+          after login, or content encoding errors, disable it.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.str;
+        default = ["auth_internal" "note"];
+        description = lib.mdDoc ''
+          List of plugins to load automatically for all users.
+          System plugins have to be specified here. Please enable at least one
+          authentication plugin here (auth_*).
+          Users may enable other user plugins from Preferences/Plugins but may not
+          disable plugins specified in this list.
+          Disabling auth_internal in this list would automatically disable
+          reset password link on the login form.
+        '';
+      };
+
+      pluginPackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          List of plugins to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the `plugins.local` directory.
+        '';
+      };
+
+      themePackages = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        description = lib.mdDoc ''
+          List of themes to install. The list elements are expected to
+          be derivations. All elements in this derivation are automatically
+          copied to the `themes.local` directory.
+        '';
+      };
+
+      logDestination = mkOption {
+        type = types.enum ["" "sql" "syslog"];
+        default = "sql";
+        description = lib.mdDoc ''
+          Log destination to use. Possible values: sql (uses internal logging
+          you can read in Preferences -> System), syslog - logs to system log.
+          Setting this to blank uses PHP logging (usually to http server
+          error.log).
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional lines to append to `config.php`.
+        '';
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule ["services" "tt-rss" "checkForUpdates"] ''
+      This option was removed because setting this to true will cause TT-RSS
+      to be unable to start if an automatic update of the code in
+      services.tt-rss.root leads to a database schema upgrade that is not
+      supported by the code active in the Nix store.
+    '')
+  ];
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = cfg.database.password != null -> cfg.database.passwordFile == null;
+        message = "Cannot set both password and passwordFile";
+      }
+      {
+        assertion = cfg.database.createLocally -> cfg.database.name == cfg.user && cfg.database.user == cfg.user;
+        message = ''
+          When creating a database via NixOS, the db user and db name must be equal!
+          If you already have an existing DB+user and this assertion is new, you can safely set
+          `services.tt-rss.database.createLocally` to `false` because removal of `ensureUsers`
+          and `ensureDatabases` doesn't have any effect.
+        '';
+      }
+    ];
+
+    services.phpfpm.pools = mkIf (cfg.pool == "${poolName}") {
+      ${poolName} = {
+        inherit (cfg) user;
+        phpPackage = pkgs.php81;
+        settings = mapAttrs (name: mkDefault) {
+          "listen.owner" = "nginx";
+          "listen.group" = "nginx";
+          "listen.mode" = "0600";
+          "pm" = "dynamic";
+          "pm.max_children" = 75;
+          "pm.start_servers" = 10;
+          "pm.min_spare_servers" = 5;
+          "pm.max_spare_servers" = 20;
+          "pm.max_requests" = 500;
+          "catch_workers_output" = 1;
+        };
+      };
+    };
+
+    # NOTE: No configuration is done if not using virtual host
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      enable = true;
+      virtualHosts = {
+        ${cfg.virtualHost} = {
+          root = "${cfg.root}/www";
+
+          locations."/" = {
+            index = "index.php";
+          };
+
+          locations."^~ /feed-icons" = {
+            root = "${cfg.root}";
+          };
+
+          locations."~ \\.php$" = {
+            extraConfig = ''
+              fastcgi_split_path_info ^(.+\.php)(/.+)$;
+              fastcgi_pass unix:${config.services.phpfpm.pools.${cfg.pool}.socket};
+              fastcgi_index index.php;
+            '';
+          };
+        };
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.root}' 0555 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/lock' 0755 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/cache' 0755 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/cache/upload' 0755 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/cache/images' 0755 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/cache/export' 0755 ${cfg.user} tt_rss - -"
+      "d '${cfg.root}/feed-icons' 0755 ${cfg.user} tt_rss - -"
+      "L+ '${cfg.root}/www' - - - - ${servedRoot}"
+    ];
+
+    systemd.services = {
+      phpfpm-tt-rss = mkIf (cfg.pool == "${poolName}") {
+        restartTriggers = [ servedRoot ];
+      };
+
+      tt-rss = {
+        description = "Tiny Tiny RSS feeds update daemon";
+
+        preStart = ''
+          ${pkgs.php81}/bin/php ${cfg.root}/www/update.php --update-schema
+        '';
+
+        serviceConfig = {
+          User = "${cfg.user}";
+          Group = "tt_rss";
+          ExecStart = "${pkgs.php}/bin/php ${cfg.root}/www/update.php --daemon --quiet";
+          Restart = "on-failure";
+          RestartSec = "60";
+          SyslogIdentifier = "tt-rss";
+        };
+
+        wantedBy = [ "multi-user.target" ];
+        requires = optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+        after = [ "network.target" ] ++ optional mysqlLocal "mysql.service" ++ optional pgsqlLocal "postgresql.service";
+      };
+    };
+
+    services.mysql = mkIf mysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        {
+          name = cfg.user;
+          ensurePermissions = {
+            "${cfg.database.name}.*" = "ALL PRIVILEGES";
+          };
+        }
+      ];
+    };
+
+    services.postgresql = mkIf pgsqlLocal {
+      enable = mkDefault true;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [
+        { name = cfg.database.user;
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    users.users.tt_rss = optionalAttrs (cfg.user == "tt_rss") {
+      description = "tt-rss service user";
+      isSystemUser = true;
+      group = "tt_rss";
+    };
+
+    users.groups.tt_rss = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/vikunja.nix b/nixpkgs/nixos/modules/services/web-apps/vikunja.nix
new file mode 100644
index 000000000000..6b1d4da532bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/vikunja.nix
@@ -0,0 +1,155 @@
+{ pkgs, lib, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.vikunja;
+  format = pkgs.formats.yaml {};
+  configFile = format.generate "config.yaml" cfg.settings;
+  useMysql = cfg.database.type == "mysql";
+  usePostgresql = cfg.database.type == "postgres";
+in {
+  options.services.vikunja = with lib; {
+    enable = mkEnableOption (lib.mdDoc "vikunja service");
+    package-api = mkOption {
+      default = pkgs.vikunja-api;
+      type = types.package;
+      defaultText = literalExpression "pkgs.vikunja-api";
+      description = lib.mdDoc "vikunja-api derivation to use.";
+    };
+    package-frontend = mkOption {
+      default = pkgs.vikunja-frontend;
+      type = types.package;
+      defaultText = literalExpression "pkgs.vikunja-frontend";
+      description = lib.mdDoc "vikunja-frontend derivation to use.";
+    };
+    environmentFiles = mkOption {
+      type = types.listOf types.path;
+      default = [ ];
+      description = lib.mdDoc ''
+        List of environment files set in the vikunja systemd service.
+        For example passwords should be set in one of these files.
+      '';
+    };
+    setupNginx = mkOption {
+      type = types.bool;
+      default = config.services.nginx.enable;
+      defaultText = literalExpression "config.services.nginx.enable";
+      description = lib.mdDoc ''
+        Whether to setup NGINX.
+        Further nginx configuration can be done by changing
+        {option}`services.nginx.virtualHosts.<frontendHostname>`.
+        This does not enable TLS or ACME by default. To enable this, set the
+        {option}`services.nginx.virtualHosts.<frontendHostname>.enableACME` to
+        `true` and if appropriate do the same for
+        {option}`services.nginx.virtualHosts.<frontendHostname>.forceSSL`.
+      '';
+    };
+    frontendScheme = mkOption {
+      type = types.enum [ "http" "https" ];
+      description = lib.mdDoc ''
+        Whether the site is available via http or https.
+        This does not configure https or ACME in nginx!
+      '';
+    };
+    frontendHostname = mkOption {
+      type = types.str;
+      description = lib.mdDoc "The Hostname under which the frontend is running.";
+    };
+    port = mkOption {
+      type = types.port;
+      default = 3456;
+      description = lib.mdDoc "The TCP port exposed by the API.";
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = {};
+      description = lib.mdDoc ''
+        Vikunja configuration. Refer to
+        <https://vikunja.io/docs/config-options/>
+        for details on supported values.
+        '';
+    };
+    database = {
+      type = mkOption {
+        type = types.enum [ "sqlite" "mysql" "postgres" ];
+        example = "postgres";
+        default = "sqlite";
+        description = lib.mdDoc "Database engine to use.";
+      };
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "Database host address. Can also be a socket.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = "vikunja";
+        description = lib.mdDoc "Database user.";
+      };
+      database = mkOption {
+        type = types.str;
+        default = "vikunja";
+        description = lib.mdDoc "Database name.";
+      };
+      path = mkOption {
+        type = types.str;
+        default = "/var/lib/vikunja/vikunja.db";
+        description = lib.mdDoc "Path to the sqlite3 database file.";
+      };
+    };
+  };
+  config = lib.mkIf cfg.enable {
+    services.vikunja.settings = {
+      database = {
+        inherit (cfg.database) type host user database path;
+      };
+      service = {
+        interface = ":${toString cfg.port}";
+        frontendurl = "${cfg.frontendScheme}://${cfg.frontendHostname}/";
+      };
+      files = {
+        basepath = "/var/lib/vikunja/files";
+      };
+    };
+
+    systemd.services.vikunja-api = {
+      description = "vikunja-api";
+      after = [ "network.target" ] ++ lib.optional usePostgresql "postgresql.service" ++ lib.optional useMysql "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+      path = [ cfg.package-api ];
+      restartTriggers = [ configFile ];
+
+      serviceConfig = {
+        Type = "simple";
+        DynamicUser = true;
+        StateDirectory = "vikunja";
+        ExecStart = "${cfg.package-api}/bin/vikunja";
+        Restart = "always";
+        EnvironmentFile = cfg.environmentFiles;
+      };
+    };
+
+    services.nginx.virtualHosts."${cfg.frontendHostname}" = mkIf cfg.setupNginx {
+      locations = {
+        "/" = {
+          root = cfg.package-frontend;
+          tryFiles = "try_files $uri $uri/ /";
+        };
+        "~* ^/(api|dav|\\.well-known)/" = {
+          proxyPass = "http://localhost:${toString cfg.port}";
+          extraConfig = ''
+            client_max_body_size 20M;
+          '';
+        };
+      };
+    };
+
+    environment.etc."vikunja/config.yaml".source = configFile;
+
+    environment.systemPackages = [
+      cfg.package-api # for admin `vikunja` CLI
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/whitebophir.nix b/nixpkgs/nixos/modules/services/web-apps/whitebophir.nix
new file mode 100644
index 000000000000..b673a7c1179e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/whitebophir.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.whitebophir;
+in {
+  options = {
+    services.whitebophir = {
+      enable = mkEnableOption (lib.mdDoc "whitebophir, an online collaborative whiteboard server (persistent state will be maintained under {file}`/var/lib/whitebophir`)");
+
+      package = mkOption {
+        default = pkgs.whitebophir;
+        defaultText = literalExpression "pkgs.whitebophir";
+        type = types.package;
+        description = lib.mdDoc "Whitebophir package to use.";
+      };
+
+      listenAddress = mkOption {
+        type = types.str;
+        default = "0.0.0.0";
+        description = lib.mdDoc "Address to listen on (use 0.0.0.0 to allow access from any address).";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 5001;
+        description = lib.mdDoc "Port to bind to.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.whitebophir = {
+      description = "Whitebophir Service";
+      wantedBy    = [ "multi-user.target" ];
+      after       = [ "network.target" ];
+      environment = {
+        PORT            = toString cfg.port;
+        HOST            = toString cfg.listenAddress;
+        WBO_HISTORY_DIR = "/var/lib/whitebophir";
+      };
+
+      serviceConfig = {
+        DynamicUser    = true;
+        ExecStart      = "${cfg.package}/bin/whitebophir";
+        Restart        = "always";
+        StateDirectory = "whitebophir";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/wiki-js.nix b/nixpkgs/nixos/modules/services/web-apps/wiki-js.nix
new file mode 100644
index 000000000000..631740f51ce3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/wiki-js.nix
@@ -0,0 +1,142 @@
+{ lib, pkgs, config, ... }:
+
+with lib;
+
+let
+  cfg = config.services.wiki-js;
+
+  format = pkgs.formats.json { };
+
+  configFile = format.generate "wiki-js.yml" cfg.settings;
+in {
+  options.services.wiki-js = {
+    enable = mkEnableOption (lib.mdDoc "wiki-js");
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/root/wiki-js.env";
+      description = lib.mdDoc ''
+        Environment file to inject e.g. secrets into the configuration.
+      '';
+    };
+
+    stateDirectoryName = mkOption {
+      default = "wiki-js";
+      type = types.str;
+      description = lib.mdDoc ''
+        Name of the directory in {file}`/var/lib`.
+      '';
+    };
+
+    settings = mkOption {
+      default = {};
+      type = types.submodule {
+        freeformType = format.type;
+        options = {
+          port = mkOption {
+            type = types.port;
+            default = 3000;
+            description = lib.mdDoc ''
+              TCP port the process should listen to.
+            '';
+          };
+
+          bindIP = mkOption {
+            default = "0.0.0.0";
+            type = types.str;
+            description = lib.mdDoc ''
+              IPs the service should listen to.
+            '';
+          };
+
+          db = {
+            type = mkOption {
+              default = "postgres";
+              type = types.enum [ "postgres" "mysql" "mariadb" "mssql" ];
+              description = lib.mdDoc ''
+                Database driver to use for persistence. Please note that `sqlite`
+                is currently not supported as the build process for it is currently not implemented
+                in `pkgs.wiki-js` and it's not recommended by upstream for
+                production use.
+              '';
+            };
+            host = mkOption {
+              type = types.str;
+              example = "/run/postgresql";
+              description = lib.mdDoc ''
+                Hostname or socket-path to connect to.
+              '';
+            };
+            db = mkOption {
+              default = "wiki";
+              type = types.str;
+              description = lib.mdDoc ''
+                Name of the database to use.
+              '';
+            };
+          };
+
+          logLevel = mkOption {
+            default = "info";
+            type = types.enum [ "error" "warn" "info" "verbose" "debug" "silly" ];
+            description = lib.mdDoc ''
+              Define how much detail is supposed to be logged at runtime.
+            '';
+          };
+
+          offline = mkEnableOption (lib.mdDoc "offline mode") // {
+            description = lib.mdDoc ''
+              Disable latest file updates and enable
+              [sideloading](https://docs.requarks.io/install/sideload).
+            '';
+          };
+        };
+      };
+      description = lib.mdDoc ''
+        Settings to configure `wiki-js`. This directly
+        corresponds to [the upstream configuration options](https://docs.requarks.io/install/config).
+
+        Secrets can be injected via the environment by
+        - specifying [](#opt-services.wiki-js.environmentFile)
+          to contain secrets
+        - and setting sensitive values to `$(ENVIRONMENT_VAR)`
+          with this value defined in the environment-file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.wiki-js.settings.dataPath = "/var/lib/${cfg.stateDirectoryName}";
+    systemd.services.wiki-js = {
+      description = "A modern and powerful wiki app built on Node.js";
+      documentation = [ "https://docs.requarks.io/" ];
+      wantedBy = [ "multi-user.target" ];
+
+      path = with pkgs; [
+        # Needed for git storage.
+        git
+        # Needed for git+ssh storage.
+        openssh
+      ];
+
+      preStart = ''
+        ln -sf ${configFile} /var/lib/${cfg.stateDirectoryName}/config.yml
+        ln -sf ${pkgs.wiki-js}/server /var/lib/${cfg.stateDirectoryName}
+        ln -sf ${pkgs.wiki-js}/assets /var/lib/${cfg.stateDirectoryName}
+        ln -sf ${pkgs.wiki-js}/package.json /var/lib/${cfg.stateDirectoryName}/package.json
+      '';
+
+      serviceConfig = {
+        EnvironmentFile = mkIf (cfg.environmentFile != null) cfg.environmentFile;
+        StateDirectory = cfg.stateDirectoryName;
+        WorkingDirectory = "/var/lib/${cfg.stateDirectoryName}";
+        DynamicUser = true;
+        PrivateTmp = true;
+        ExecStart = "${pkgs.nodejs_18}/bin/node ${pkgs.wiki-js}/server";
+      };
+    };
+  };
+
+  meta.maintainers = with maintainers; [ ma27 ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/wordpress.nix b/nixpkgs/nixos/modules/services/web-apps/wordpress.nix
new file mode 100644
index 000000000000..5d2e775d4521
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/wordpress.nix
@@ -0,0 +1,573 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.wordpress;
+  eachSite = cfg.sites;
+  user = "wordpress";
+  webserver = config.services.${cfg.webserver};
+  stateDir = hostName: "/var/lib/wordpress/${hostName}";
+
+  pkg = hostName: cfg: pkgs.stdenv.mkDerivation rec {
+    pname = "wordpress-${hostName}";
+    version = src.version;
+    src = cfg.package;
+
+    installPhase = ''
+      mkdir -p $out
+      cp -r * $out/
+
+      # symlink the wordpress config
+      ln -s ${wpConfig hostName cfg} $out/share/wordpress/wp-config.php
+      # symlink uploads directory
+      ln -s ${cfg.uploadsDir} $out/share/wordpress/wp-content/uploads
+      ln -s ${cfg.fontsDir} $out/share/wordpress/wp-content/fonts
+
+      # https://github.com/NixOS/nixpkgs/pull/53399
+      #
+      # Symlinking works for most plugins and themes, but Avada, for instance, fails to
+      # understand the symlink, causing its file path stripping to fail. This results in
+      # requests that look like: https://example.com/wp-content//nix/store/...plugin/path/some-file.js
+      # Since hard linking directories is not allowed, copying is the next best thing.
+
+      # copy additional plugin(s), theme(s) and language(s)
+      ${concatStringsSep "\n" (mapAttrsToList (name: theme: "cp -r ${theme} $out/share/wordpress/wp-content/themes/${name}") cfg.themes)}
+      ${concatStringsSep "\n" (mapAttrsToList (name: plugin: "cp -r ${plugin} $out/share/wordpress/wp-content/plugins/${name}") cfg.plugins)}
+      ${concatMapStringsSep "\n" (language: "cp -r ${language}/* $out/share/wordpress/wp-content/languages/") cfg.languages}
+    '';
+  };
+
+  mergeConfig = cfg: {
+    # wordpress is installed onto a read-only file system
+    DISALLOW_FILE_EDIT = true;
+    AUTOMATIC_UPDATER_DISABLED = true;
+    DB_NAME = cfg.database.name;
+    DB_HOST = "${cfg.database.host}:${if cfg.database.socket != null then cfg.database.socket else toString cfg.database.port}";
+    DB_USER = cfg.database.user;
+    DB_CHARSET = "utf8";
+    # Always set DB_PASSWORD even when passwordFile is not set. This is the
+    # default Wordpress behaviour.
+    DB_PASSWORD =  if (cfg.database.passwordFile != null) then { _file = cfg.database.passwordFile; } else "";
+  } // cfg.settings;
+
+  wpConfig = hostName: cfg: let
+    conf_gen = c: mapAttrsToList (k: v: "define('${k}', ${mkPhpValue v});") cfg.mergedConfig;
+  in pkgs.writeTextFile {
+    name = "wp-config-${hostName}.php";
+    text = ''
+      <?php
+        $table_prefix  = '${cfg.database.tablePrefix}';
+
+        require_once('${stateDir hostName}/secret-keys.php');
+
+        ${cfg.extraConfig}
+        ${concatStringsSep "\n" (conf_gen cfg.mergedConfig)}
+
+        if ( !defined('ABSPATH') )
+          define('ABSPATH', dirname(__FILE__) . '/');
+
+        require_once(ABSPATH . 'wp-settings.php');
+      ?>
+    '';
+    checkPhase = "${pkgs.php81}/bin/php --syntax-check $target";
+  };
+
+  mkPhpValue = v: let
+    isHasAttr = s: isAttrs v && hasAttr s v;
+  in
+    if isString v then escapeShellArg v
+    # NOTE: If any value contains a , (comma) this will not get escaped
+    else if isList v && any lib.strings.isCoercibleToString v then escapeShellArg (concatMapStringsSep "," toString v)
+    else if isInt v then toString v
+    else if isBool v then boolToString v
+    else if isHasAttr "_file" then "trim(file_get_contents(${lib.escapeShellArg v._file}))"
+    else if isHasAttr "_raw" then v._raw
+    else abort "The Wordpress config value ${lib.generators.toPretty {} v} can not be encoded."
+  ;
+
+  secretsVars = [ "AUTH_KEY" "SECURE_AUTH_KEY" "LOGGED_IN_KEY" "NONCE_KEY" "AUTH_SALT" "SECURE_AUTH_SALT" "LOGGED_IN_SALT" "NONCE_SALT" ];
+  secretsScript = hostStateDir: ''
+    # The match in this line is not a typo, see https://github.com/NixOS/nixpkgs/pull/124839
+    grep -q "LOOGGED_IN_KEY" "${hostStateDir}/secret-keys.php" && rm "${hostStateDir}/secret-keys.php"
+    if ! test -e "${hostStateDir}/secret-keys.php"; then
+      umask 0177
+      echo "<?php" >> "${hostStateDir}/secret-keys.php"
+      ${concatMapStringsSep "\n" (var: ''
+        echo "define('${var}', '`tr -dc a-zA-Z0-9 </dev/urandom | head -c 64`');" >> "${hostStateDir}/secret-keys.php"
+      '') secretsVars}
+      echo "?>" >> "${hostStateDir}/secret-keys.php"
+      chmod 440 "${hostStateDir}/secret-keys.php"
+    fi
+  '';
+
+  siteOpts = { lib, name, config, ... }:
+    {
+      options = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.wordpress;
+          defaultText = literalExpression "pkgs.wordpress";
+          description = lib.mdDoc "Which WordPress package to use.";
+        };
+
+        uploadsDir = mkOption {
+          type = types.path;
+          default = "/var/lib/wordpress/${name}/uploads";
+          description = lib.mdDoc ''
+            This directory is used for uploads of pictures. The directory passed here is automatically
+            created and permissions adjusted as required.
+          '';
+        };
+
+        fontsDir = mkOption {
+          type = types.path;
+          default = "/var/lib/wordpress/${name}/fonts";
+          description = lib.mdDoc ''
+            This directory is used to download fonts from a remote location, e.g.
+            to host google fonts locally.
+          '';
+        };
+
+        plugins = mkOption {
+          type = with types; coercedTo
+            (listOf path)
+            (l: warn "setting this option with a list is deprecated"
+              listToAttrs (map (p: nameValuePair (p.name or (throw "${p} does not have a name")) p) l))
+            (attrsOf path);
+          default = {};
+          description = lib.mdDoc ''
+            Path(s) to respective plugin(s) which are copied from the 'plugins' directory.
+
+            ::: {.note}
+            These plugins need to be packaged before use, see example.
+            :::
+          '';
+          example = literalExpression ''
+            {
+              inherit (pkgs.wordpressPackages.plugins) embed-pdf-viewer-plugin;
+            }
+          '';
+        };
+
+        themes = mkOption {
+          type = with types; coercedTo
+            (listOf path)
+            (l: warn "setting this option with a list is deprecated"
+              listToAttrs (map (p: nameValuePair (p.name or (throw "${p} does not have a name")) p) l))
+            (attrsOf path);
+          default = { inherit (pkgs.wordpressPackages.themes) twentytwentythree; };
+          defaultText = literalExpression "{ inherit (pkgs.wordpressPackages.themes) twentytwentythree; }";
+          description = lib.mdDoc ''
+            Path(s) to respective theme(s) which are copied from the 'theme' directory.
+
+            ::: {.note}
+            These themes need to be packaged before use, see example.
+            :::
+          '';
+          example = literalExpression ''
+            {
+              inherit (pkgs.wordpressPackages.themes) responsive-theme;
+            }
+          '';
+        };
+
+        languages = mkOption {
+          type = types.listOf types.path;
+          default = [];
+          description = lib.mdDoc ''
+            List of path(s) to respective language(s) which are copied from the 'languages' directory.
+          '';
+          example = literalExpression ''
+            [(
+              # Let's package the German language.
+              # For other languages try to replace language and country code in the download URL with your desired one.
+              # Reference https://translate.wordpress.org for available translations and
+              # codes.
+              language-de = pkgs.stdenv.mkDerivation {
+                name = "language-de";
+                src = pkgs.fetchurl {
+                  url = "https://de.wordpress.org/wordpress-''${pkgs.wordpress.version}-de_DE.tar.gz";
+                  # Name is required to invalidate the hash when wordpress is updated
+                  name = "wordpress-''${pkgs.wordpress.version}-language-de"
+                  sha256 = "sha256-dlas0rXTSV4JAl8f/UyMbig57yURRYRhTMtJwF9g8h0=";
+                };
+                installPhase = "mkdir -p $out; cp -r ./wp-content/languages/* $out/";
+              };
+            )];
+          '';
+        };
+
+        database = {
+          host = mkOption {
+            type = types.str;
+            default = "localhost";
+            description = lib.mdDoc "Database host address.";
+          };
+
+          port = mkOption {
+            type = types.port;
+            default = 3306;
+            description = lib.mdDoc "Database host port.";
+          };
+
+          name = mkOption {
+            type = types.str;
+            default = "wordpress";
+            description = lib.mdDoc "Database name.";
+          };
+
+          user = mkOption {
+            type = types.str;
+            default = "wordpress";
+            description = lib.mdDoc "Database user.";
+          };
+
+          passwordFile = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            example = "/run/keys/wordpress-dbpassword";
+            description = lib.mdDoc ''
+              A file containing the password corresponding to
+              {option}`database.user`.
+            '';
+          };
+
+          tablePrefix = mkOption {
+            type = types.str;
+            default = "wp_";
+            description = lib.mdDoc ''
+              The $table_prefix is the value placed in the front of your database tables.
+              Change the value if you want to use something other than wp_ for your database
+              prefix. Typically this is changed if you are installing multiple WordPress blogs
+              in the same database.
+
+              See <https://codex.wordpress.org/Editing_wp-config.php#table_prefix>.
+            '';
+          };
+
+          socket = mkOption {
+            type = types.nullOr types.path;
+            default = null;
+            defaultText = literalExpression "/run/mysqld/mysqld.sock";
+            description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+          };
+
+          createLocally = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Create the database and database user locally.";
+          };
+        };
+
+        virtualHost = mkOption {
+          type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+          example = literalExpression ''
+            {
+              adminAddr = "webmaster@example.org";
+              forceSSL = true;
+              enableACME = true;
+            }
+          '';
+          description = lib.mdDoc ''
+            Apache configuration can be done by adapting {option}`services.httpd.virtualHosts`.
+          '';
+        };
+
+        poolConfig = mkOption {
+          type = with types; attrsOf (oneOf [ str int bool ]);
+          default = {
+            "pm" = "dynamic";
+            "pm.max_children" = 32;
+            "pm.start_servers" = 2;
+            "pm.min_spare_servers" = 2;
+            "pm.max_spare_servers" = 4;
+            "pm.max_requests" = 500;
+          };
+          description = lib.mdDoc ''
+            Options for the WordPress PHP pool. See the documentation on `php-fpm.conf`
+            for details on configuration directives.
+          '';
+        };
+
+        settings = mkOption {
+          type = types.attrsOf types.anything;
+          default = {};
+          description = lib.mdDoc ''
+            Structural Wordpress configuration.
+            Refer to <https://developer.wordpress.org/apis/wp-config-php>
+            for details and supported values.
+          '';
+          example = literalExpression ''
+            {
+              WP_DEFAULT_THEME = "twentytwentytwo";
+              WP_SITEURL = "https://example.org";
+              WP_HOME = "https://example.org";
+              WP_DEBUG = true;
+              WP_DEBUG_DISPLAY = true;
+              WPLANG = "de_DE";
+              FORCE_SSL_ADMIN = true;
+              AUTOMATIC_UPDATER_DISABLED = true;
+            }
+          '';
+        };
+
+        mergedConfig = mkOption {
+          readOnly = true;
+          default = mergeConfig config;
+          defaultText = literalExpression ''
+            {
+              DISALLOW_FILE_EDIT = true;
+              AUTOMATIC_UPDATER_DISABLED = true;
+            }
+          '';
+          description = lib.mdDoc ''
+            Read only representation of the final configuration.
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc ''
+            Any additional text to be appended to the wp-config.php
+            configuration file. This is a PHP script. For configuration
+            settings, see <https://codex.wordpress.org/Editing_wp-config.php>.
+
+            **Note**: Please pass structured settings via
+            `services.wordpress.sites.${name}.settings` instead.
+          '';
+          example = ''
+            @ini_set( 'log_errors', 'Off' );
+            @ini_set( 'display_errors', 'On' );
+          '';
+        };
+
+      };
+
+      config.virtualHost.hostName = mkDefault name;
+    };
+in
+{
+  # interface
+  options = {
+    services.wordpress = {
+
+      sites = mkOption {
+        type = types.attrsOf (types.submodule siteOpts);
+        default = {};
+        description = lib.mdDoc "Specification of one or more WordPress sites to serve";
+      };
+
+      webserver = mkOption {
+        type = types.enum [ "httpd" "nginx" "caddy" ];
+        default = "httpd";
+        description = lib.mdDoc ''
+          Whether to use apache2 or nginx for virtual host management.
+
+          Further nginx configuration can be done by adapting `services.nginx.virtualHosts.<name>`.
+          See [](#opt-services.nginx.virtualHosts) for further information.
+
+          Further apache2 configuration can be done by adapting `services.httpd.virtualHosts.<name>`.
+          See [](#opt-services.httpd.virtualHosts) for further information.
+        '';
+      };
+
+    };
+  };
+
+  # implementation
+  config = mkIf (eachSite != {}) (mkMerge [{
+
+    assertions =
+      (mapAttrsToList (hostName: cfg:
+        { assertion = cfg.database.createLocally -> cfg.database.user == user;
+          message = ''services.wordpress.sites."${hostName}".database.user must be ${user} if the database is to be automatically provisioned'';
+        }) eachSite) ++
+      (mapAttrsToList (hostName: cfg:
+        { assertion = cfg.database.createLocally -> cfg.database.passwordFile == null;
+          message = ''services.wordpress.sites."${hostName}".database.passwordFile cannot be specified if services.wordpress.sites."${hostName}".database.createLocally is set to true.'';
+        }) eachSite);
+
+
+    services.mysql = mkIf (any (v: v.database.createLocally) (attrValues eachSite)) {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = mapAttrsToList (hostName: cfg: cfg.database.name) eachSite;
+      ensureUsers = mapAttrsToList (hostName: cfg:
+        { name = cfg.database.user;
+          ensurePermissions = { "${cfg.database.name}.*" = "ALL PRIVILEGES"; };
+        }
+      ) eachSite;
+    };
+
+    services.phpfpm.pools = mapAttrs' (hostName: cfg: (
+      nameValuePair "wordpress-${hostName}" {
+        inherit user;
+        group = webserver.group;
+        settings = {
+          "listen.owner" = webserver.user;
+          "listen.group" = webserver.group;
+        } // cfg.poolConfig;
+      }
+    )) eachSite;
+
+  }
+
+  (mkIf (cfg.webserver == "httpd") {
+    services.httpd = {
+      enable = true;
+      extraModules = [ "proxy_fcgi" ];
+      virtualHosts = mapAttrs (hostName: cfg: mkMerge [ cfg.virtualHost {
+        documentRoot = mkForce "${pkg hostName cfg}/share/wordpress";
+        extraConfig = ''
+          <Directory "${pkg hostName cfg}/share/wordpress">
+            <FilesMatch "\.php$">
+              <If "-f %{REQUEST_FILENAME}">
+                SetHandler "proxy:unix:${config.services.phpfpm.pools."wordpress-${hostName}".socket}|fcgi://localhost/"
+              </If>
+            </FilesMatch>
+
+            # standard wordpress .htaccess contents
+            <IfModule mod_rewrite.c>
+              RewriteEngine On
+              RewriteBase /
+              RewriteRule ^index\.php$ - [L]
+              RewriteCond %{REQUEST_FILENAME} !-f
+              RewriteCond %{REQUEST_FILENAME} !-d
+              RewriteRule . /index.php [L]
+            </IfModule>
+
+            DirectoryIndex index.php
+            Require all granted
+            Options +FollowSymLinks -Indexes
+          </Directory>
+
+          # https://wordpress.org/support/article/hardening-wordpress/#securing-wp-config-php
+          <Files wp-config.php>
+            Require all denied
+          </Files>
+        '';
+      } ]) eachSite;
+    };
+  })
+
+  {
+    systemd.tmpfiles.rules = flatten (mapAttrsToList (hostName: cfg: [
+      "d '${stateDir hostName}' 0750 ${user} ${webserver.group} - -"
+      "d '${cfg.uploadsDir}' 0750 ${user} ${webserver.group} - -"
+      "Z '${cfg.uploadsDir}' 0750 ${user} ${webserver.group} - -"
+      "d '${cfg.fontsDir}' 0750 ${user} ${webserver.group} - -"
+      "Z '${cfg.fontsDir}' 0750 ${user} ${webserver.group} - -"
+    ]) eachSite);
+
+    systemd.services = mkMerge [
+      (mapAttrs' (hostName: cfg: (
+        nameValuePair "wordpress-init-${hostName}" {
+          wantedBy = [ "multi-user.target" ];
+          before = [ "phpfpm-wordpress-${hostName}.service" ];
+          after = optional cfg.database.createLocally "mysql.service";
+          script = secretsScript (stateDir hostName);
+
+          serviceConfig = {
+            Type = "oneshot";
+            User = user;
+            Group = webserver.group;
+          };
+      })) eachSite)
+
+      (optionalAttrs (any (v: v.database.createLocally) (attrValues eachSite)) {
+        httpd.after = [ "mysql.service" ];
+      })
+    ];
+
+    users.users.${user} = {
+      group = webserver.group;
+      isSystemUser = true;
+    };
+  }
+
+  (mkIf (cfg.webserver == "nginx") {
+    services.nginx = {
+      enable = true;
+      virtualHosts = mapAttrs (hostName: cfg: {
+        serverName = mkDefault hostName;
+        root = "${pkg hostName cfg}/share/wordpress";
+        extraConfig = ''
+          index index.php;
+        '';
+        locations = {
+          "/" = {
+            priority = 200;
+            extraConfig = ''
+              try_files $uri $uri/ /index.php$is_args$args;
+            '';
+          };
+          "~ \\.php$" = {
+            priority = 500;
+            extraConfig = ''
+              fastcgi_split_path_info ^(.+\.php)(/.+)$;
+              fastcgi_pass unix:${config.services.phpfpm.pools."wordpress-${hostName}".socket};
+              fastcgi_index index.php;
+              include "${config.services.nginx.package}/conf/fastcgi.conf";
+              fastcgi_param PATH_INFO $fastcgi_path_info;
+              fastcgi_param PATH_TRANSLATED $document_root$fastcgi_path_info;
+              # Mitigate https://httpoxy.org/ vulnerabilities
+              fastcgi_param HTTP_PROXY "";
+              fastcgi_intercept_errors off;
+              fastcgi_buffer_size 16k;
+              fastcgi_buffers 4 16k;
+              fastcgi_connect_timeout 300;
+              fastcgi_send_timeout 300;
+              fastcgi_read_timeout 300;
+            '';
+          };
+          "~ /\\." = {
+            priority = 800;
+            extraConfig = "deny all;";
+          };
+          "~* /(?:uploads|files)/.*\\.php$" = {
+            priority = 900;
+            extraConfig = "deny all;";
+          };
+          "~* \\.(js|css|png|jpg|jpeg|gif|ico)$" = {
+            priority = 1000;
+            extraConfig = ''
+              expires max;
+              log_not_found off;
+            '';
+          };
+        };
+      }) eachSite;
+    };
+  })
+
+  (mkIf (cfg.webserver == "caddy") {
+    services.caddy = {
+      enable = true;
+      virtualHosts = mapAttrs' (hostName: cfg: (
+        nameValuePair "http://${hostName}" {
+          extraConfig = ''
+            root    * /${pkg hostName cfg}/share/wordpress
+            file_server
+
+            php_fastcgi unix/${config.services.phpfpm.pools."wordpress-${hostName}".socket}
+
+            @uploads {
+              path_regexp path /uploads\/(.*)\.php
+            }
+            rewrite @uploads /
+
+            @wp-admin {
+              path  not ^\/wp-admin/*
+            }
+            rewrite @wp-admin {path}/index.php?{query}
+          '';
+        }
+      )) eachSite;
+    };
+  })
+
+
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/writefreely.nix b/nixpkgs/nixos/modules/services/web-apps/writefreely.nix
new file mode 100644
index 000000000000..f92afa9276e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/writefreely.nix
@@ -0,0 +1,484 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (builtins) toString;
+  inherit (lib) types mkIf mkOption mkDefault;
+  inherit (lib) optional optionals optionalAttrs optionalString;
+
+  inherit (pkgs) sqlite;
+
+  format = pkgs.formats.ini {
+    mkKeyValue = key: value:
+      let
+        value' = lib.optionalString (value != null)
+          (if builtins.isBool value then
+            if value == true then "true" else "false"
+          else
+            toString value);
+      in "${key} = ${value'}";
+  };
+
+  cfg = config.services.writefreely;
+
+  isSqlite = cfg.database.type == "sqlite3";
+  isMysql = cfg.database.type == "mysql";
+  isMysqlLocal = isMysql && cfg.database.createLocally == true;
+
+  hostProtocol = if cfg.acme.enable then "https" else "http";
+
+  settings = cfg.settings // {
+    app = cfg.settings.app or { } // {
+      host = cfg.settings.app.host or "${hostProtocol}://${cfg.host}";
+    };
+
+    database = if cfg.database.type == "sqlite3" then {
+      type = "sqlite3";
+      filename = cfg.settings.database.filename or "writefreely.db";
+      database = cfg.database.name;
+    } else {
+      type = "mysql";
+      username = cfg.database.user;
+      password = "#dbpass#";
+      database = cfg.database.name;
+      host = cfg.database.host;
+      port = cfg.database.port;
+      tls = cfg.database.tls;
+    };
+
+    server = cfg.settings.server or { } // {
+      bind = cfg.settings.server.bind or "localhost";
+      gopher_port = cfg.settings.server.gopher_port or 0;
+      autocert = !cfg.nginx.enable && cfg.acme.enable;
+      templates_parent_dir =
+        cfg.settings.server.templates_parent_dir or cfg.package.src;
+      static_parent_dir = cfg.settings.server.static_parent_dir or assets;
+      pages_parent_dir =
+        cfg.settings.server.pages_parent_dir or cfg.package.src;
+      keys_parent_dir = cfg.settings.server.keys_parent_dir or cfg.stateDir;
+    };
+  };
+
+  configFile = format.generate "config.ini" settings;
+
+  assets = pkgs.stdenvNoCC.mkDerivation {
+    pname = "writefreely-assets";
+
+    inherit (cfg.package) version src;
+
+    nativeBuildInputs = with pkgs.nodePackages; [ less ];
+
+    buildPhase = ''
+      mkdir -p $out
+
+      cp -r static $out/
+    '';
+
+    installPhase = ''
+      less_dir=$src/less
+      css_dir=$out/static/css
+
+      lessc $less_dir/app.less $css_dir/write.css
+      lessc $less_dir/fonts.less $css_dir/fonts.css
+      lessc $less_dir/icons.less $css_dir/icons.css
+      lessc $less_dir/prose.less $css_dir/prose.css
+    '';
+  };
+
+  withConfigFile = text: ''
+    db_pass=${
+      optionalString (cfg.database.passwordFile != null)
+      "$(head -n1 ${cfg.database.passwordFile})"
+    }
+
+    cp -f ${configFile} '${cfg.stateDir}/config.ini'
+    sed -e "s,#dbpass#,$db_pass,g" -i '${cfg.stateDir}/config.ini'
+    chmod 440 '${cfg.stateDir}/config.ini'
+
+    ${text}
+  '';
+
+  withMysql = text:
+    withConfigFile ''
+      query () {
+        local result=$(${config.services.mysql.package}/bin/mysql \
+          --user=${cfg.database.user} \
+          --password=$db_pass \
+          --database=${cfg.database.name} \
+          --silent \
+          --raw \
+          --skip-column-names \
+          --execute "$1" \
+        )
+
+        echo $result
+      }
+
+      ${text}
+    '';
+
+  withSqlite = text:
+    withConfigFile ''
+      query () {
+        local result=$(${sqlite}/bin/sqlite3 \
+          '${cfg.stateDir}/${settings.database.filename}' \
+          "$1" \
+        )
+
+        echo $result
+      }
+
+      ${text}
+    '';
+in {
+  options.services.writefreely = {
+    enable =
+      lib.mkEnableOption (lib.mdDoc "Writefreely, build a digital writing community");
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.writefreely;
+      defaultText = lib.literalExpression "pkgs.writefreely";
+      description = lib.mdDoc "Writefreely package to use.";
+    };
+
+    stateDir = mkOption {
+      type = types.path;
+      default = "/var/lib/writefreely";
+      description = lib.mdDoc "The state directory where keys and data are stored.";
+    };
+
+    user = mkOption {
+      type = types.str;
+      default = "writefreely";
+      description = lib.mdDoc "User under which Writefreely is ran.";
+    };
+
+    group = mkOption {
+      type = types.str;
+      default = "writefreely";
+      description = lib.mdDoc "Group under which Writefreely is ran.";
+    };
+
+    host = mkOption {
+      type = types.str;
+      default = "";
+      description = lib.mdDoc "The public host name to serve.";
+      example = "example.com";
+    };
+
+    settings = mkOption {
+      default = { };
+      description = lib.mdDoc ''
+        Writefreely configuration ({file}`config.ini`). Refer to
+        <https://writefreely.org/docs/latest/admin/config>
+        for details.
+      '';
+
+      type = types.submodule {
+        freeformType = format.type;
+
+        options = {
+          app = {
+            theme = mkOption {
+              type = types.str;
+              default = "write";
+              description = lib.mdDoc "The theme to apply.";
+            };
+          };
+
+          server = {
+            port = mkOption {
+              type = types.port;
+              default = if cfg.nginx.enable then 18080 else 80;
+              defaultText = "80";
+              description = lib.mdDoc "The port WriteFreely should listen on.";
+            };
+          };
+        };
+      };
+    };
+
+    database = {
+      type = mkOption {
+        type = types.enum [ "sqlite3" "mysql" ];
+        default = "sqlite3";
+        description = lib.mdDoc "The database provider to use.";
+      };
+
+      name = mkOption {
+        type = types.str;
+        default = "writefreely";
+        description = lib.mdDoc "The name of the database to store data in.";
+      };
+
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = if cfg.database.type == "mysql" then "writefreely" else null;
+        defaultText = "writefreely";
+        description = lib.mdDoc "The database user to connect as.";
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "The file to load the database password from.";
+      };
+
+      host = mkOption {
+        type = types.str;
+        default = "localhost";
+        description = lib.mdDoc "The database host to connect to.";
+      };
+
+      port = mkOption {
+        type = types.port;
+        default = 3306;
+        description = lib.mdDoc "The port used when connecting to the database host.";
+      };
+
+      tls = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc "Whether or not TLS should be used for the database connection.";
+      };
+
+      migrate = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc "Whether or not to automatically run migrations on startup.";
+      };
+
+      createLocally = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          When {option}`services.writefreely.database.type` is set to
+          `"mysql"`, this option will enable the MySQL service locally.
+        '';
+      };
+    };
+
+    admin = {
+      name = mkOption {
+        type = types.nullOr types.str;
+        description = lib.mdDoc "The name of the first admin user.";
+        default = null;
+      };
+
+      initialPasswordFile = mkOption {
+        type = types.path;
+        description = lib.mdDoc ''
+          Path to a file containing the initial password for the admin user.
+          If not provided, the default password will be set to `nixos`.
+        '';
+        default = pkgs.writeText "default-admin-pass" "nixos";
+        defaultText = "/nix/store/xxx-default-admin-pass";
+      };
+    };
+
+    nginx = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc "Whether or not to enable and configure nginx as a proxy for WriteFreely.";
+      };
+
+      forceSSL = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether or not to force the use of SSL.";
+      };
+    };
+
+    acme = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc "Whether or not to automatically fetch and configure SSL certs.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = cfg.host != "";
+        message = "services.writefreely.host must be set";
+      }
+      {
+        assertion = isMysqlLocal -> cfg.database.passwordFile != null;
+        message =
+          "services.writefreely.database.passwordFile must be set if services.writefreely.database.createLocally is set to true";
+      }
+      {
+        assertion = isSqlite -> !cfg.database.createLocally;
+        message =
+          "services.writefreely.database.createLocally has no use when services.writefreely.database.type is set to sqlite3";
+      }
+    ];
+
+    users = {
+      users = optionalAttrs (cfg.user == "writefreely") {
+        writefreely = {
+          group = cfg.group;
+          home = cfg.stateDir;
+          isSystemUser = true;
+        };
+      };
+
+      groups =
+        optionalAttrs (cfg.group == "writefreely") { writefreely = { }; };
+    };
+
+    systemd.tmpfiles.rules =
+      [ "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -" ];
+
+    systemd.services.writefreely = {
+      after = [ "network.target" ]
+        ++ optional isSqlite "writefreely-sqlite-init.service"
+        ++ optional isMysql "writefreely-mysql-init.service"
+        ++ optional isMysqlLocal "mysql.service";
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        Restart = "always";
+        RestartSec = 20;
+        ExecStart =
+          "${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' serve";
+        AmbientCapabilities =
+          optionalString (settings.server.port < 1024) "cap_net_bind_service";
+      };
+
+      preStart = ''
+        if ! test -d "${cfg.stateDir}/keys"; then
+          mkdir -p ${cfg.stateDir}/keys
+
+          # Key files end up with the wrong permissions by default.
+          # We need to correct them so that Writefreely can read them.
+          chmod -R 750 "${cfg.stateDir}/keys"
+
+          ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' keys generate
+        fi
+      '';
+    };
+
+    systemd.services.writefreely-sqlite-init = mkIf isSqlite {
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ReadOnlyPaths = optional (cfg.admin.initialPasswordFile != null)
+          cfg.admin.initialPasswordFile;
+      };
+
+      script = let
+        migrateDatabase = optionalString cfg.database.migrate ''
+          ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' db migrate
+        '';
+
+        createAdmin = optionalString (cfg.admin.name != null) ''
+          if [[ $(query "SELECT COUNT(*) FROM users") == 0 ]]; then
+            admin_pass=$(head -n1 ${cfg.admin.initialPasswordFile})
+
+            ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' --create-admin ${cfg.admin.name}:$admin_pass
+          fi
+        '';
+      in withSqlite ''
+        if ! test -f '${settings.database.filename}'; then
+          ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' db init
+        fi
+
+        ${migrateDatabase}
+
+        ${createAdmin}
+      '';
+    };
+
+    systemd.services.writefreely-mysql-init = mkIf isMysql {
+      wantedBy = [ "multi-user.target" ];
+      after = optional isMysqlLocal "mysql.service";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = cfg.user;
+        Group = cfg.group;
+        WorkingDirectory = cfg.stateDir;
+        ReadOnlyPaths = optional isMysqlLocal cfg.database.passwordFile
+          ++ optional (cfg.admin.initialPasswordFile != null)
+          cfg.admin.initialPasswordFile;
+      };
+
+      script = let
+        updateUser = optionalString isMysqlLocal ''
+          # WriteFreely currently *requires* a password for authentication, so we
+          # need to update the user in MySQL accordingly. By default MySQL users
+          # authenticate with auth_socket or unix_socket.
+          # See: https://github.com/writefreely/writefreely/issues/568
+          ${config.services.mysql.package}/bin/mysql --skip-column-names --execute "ALTER USER '${cfg.database.user}'@'localhost' IDENTIFIED VIA unix_socket OR mysql_native_password USING PASSWORD('$db_pass'); FLUSH PRIVILEGES;"
+        '';
+
+        migrateDatabase = optionalString cfg.database.migrate ''
+          ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' db migrate
+        '';
+
+        createAdmin = optionalString (cfg.admin.name != null) ''
+          if [[ $(query 'SELECT COUNT(*) FROM users') == 0 ]]; then
+            admin_pass=$(head -n1 ${cfg.admin.initialPasswordFile})
+            ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' --create-admin ${cfg.admin.name}:$admin_pass
+          fi
+        '';
+      in withMysql ''
+        ${updateUser}
+
+        if [[ $(query "SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '${cfg.database.name}'") == 0 ]]; then
+          ${cfg.package}/bin/writefreely -c '${cfg.stateDir}/config.ini' db init
+        fi
+
+        ${migrateDatabase}
+
+        ${createAdmin}
+      '';
+    };
+
+    services.mysql = mkIf isMysqlLocal {
+      enable = true;
+      package = mkDefault pkgs.mariadb;
+      ensureDatabases = [ cfg.database.name ];
+      ensureUsers = [{
+        name = cfg.database.user;
+        ensurePermissions = {
+          "${cfg.database.name}.*" = "ALL PRIVILEGES";
+          # WriteFreely requires the use of passwords, so we need permissions
+          # to `ALTER` the user to add password support and also to reload
+          # permissions so they can be used.
+          "*.*" = "CREATE USER, RELOAD";
+        };
+      }];
+    };
+
+    services.nginx = lib.mkIf cfg.nginx.enable {
+      enable = true;
+      recommendedProxySettings = true;
+
+      virtualHosts."${cfg.host}" = {
+        enableACME = cfg.acme.enable;
+        forceSSL = cfg.nginx.forceSSL;
+
+        locations."/" = {
+          proxyPass = "http://127.0.0.1:${toString settings.server.port}";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/youtrack.nix b/nixpkgs/nixos/modules/services/web-apps/youtrack.nix
new file mode 100644
index 000000000000..09a2b9e965c0
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/youtrack.nix
@@ -0,0 +1,181 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.youtrack;
+
+  extraAttr = concatStringsSep " " (mapAttrsToList (k: v: "-D${k}=${v}") (stdParams // cfg.extraParams));
+  mergeAttrList = lib.foldl' lib.mergeAttrs {};
+
+  stdParams = mergeAttrList [
+    (optionalAttrs (cfg.baseUrl != null) {
+      "jetbrains.youtrack.baseUrl" = cfg.baseUrl;
+    })
+    {
+    "java.aws.headless" = "true";
+    "jetbrains.youtrack.disableBrowser" = "true";
+    }
+  ];
+in
+{
+  options.services.youtrack = {
+
+    enable = mkEnableOption (lib.mdDoc "YouTrack service");
+
+    address = mkOption {
+      description = lib.mdDoc ''
+        The interface youtrack will listen on.
+      '';
+      default = "127.0.0.1";
+      type = types.str;
+    };
+
+    baseUrl = mkOption {
+      description = lib.mdDoc ''
+        Base URL for youtrack. Will be auto-detected and stored in database.
+      '';
+      type = types.nullOr types.str;
+      default = null;
+    };
+
+    extraParams = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Extra parameters to pass to youtrack. See
+        https://www.jetbrains.com/help/youtrack/standalone/YouTrack-Java-Start-Parameters.html
+        for more information.
+      '';
+      example = literalExpression ''
+        {
+          "jetbrains.youtrack.overrideRootPassword" = "tortuga";
+        }
+      '';
+      type = types.attrsOf types.str;
+    };
+
+    package = mkOption {
+      description = lib.mdDoc ''
+        Package to use.
+      '';
+      type = types.package;
+      default = pkgs.youtrack;
+      defaultText = literalExpression "pkgs.youtrack";
+    };
+
+    port = mkOption {
+      description = lib.mdDoc ''
+        The port youtrack will listen on.
+      '';
+      default = 8080;
+      type = types.port;
+    };
+
+    statePath = mkOption {
+      description = lib.mdDoc ''
+        Where to keep the youtrack database.
+      '';
+      type = types.path;
+      default = "/var/lib/youtrack";
+    };
+
+    virtualHost = mkOption {
+      description = lib.mdDoc ''
+        Name of the nginx virtual host to use and setup.
+        If null, do not setup anything.
+      '';
+      default = null;
+      type = types.nullOr types.str;
+    };
+
+    jvmOpts = mkOption {
+      description = lib.mdDoc ''
+        Extra options to pass to the JVM.
+        See https://www.jetbrains.com/help/youtrack/standalone/Configure-JVM-Options.html
+        for more information.
+      '';
+      type = types.separatedString " ";
+      example = "-XX:MetaspaceSize=250m";
+      default = "";
+    };
+
+    maxMemory = mkOption {
+      description = lib.mdDoc ''
+        Maximum Java heap size
+      '';
+      type = types.str;
+      default = "1g";
+    };
+
+    maxMetaspaceSize = mkOption {
+      description = lib.mdDoc ''
+        Maximum java Metaspace memory.
+      '';
+      type = types.str;
+      default = "350m";
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.youtrack = {
+      environment.HOME = cfg.statePath;
+      environment.YOUTRACK_JVM_OPTS = "${extraAttr}";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ unixtools.hostname ];
+      serviceConfig = {
+        Type = "simple";
+        User = "youtrack";
+        Group = "youtrack";
+        Restart = "on-failure";
+        ExecStart = ''${cfg.package}/bin/youtrack --J-Xmx${cfg.maxMemory} --J-XX:MaxMetaspaceSize=${cfg.maxMetaspaceSize} ${cfg.jvmOpts} ${cfg.address}:${toString cfg.port}'';
+      };
+    };
+
+    users.users.youtrack = {
+      description = "Youtrack service user";
+      isSystemUser = true;
+      home = cfg.statePath;
+      createHome = true;
+      group = "youtrack";
+    };
+
+    users.groups.youtrack = {};
+
+    services.nginx = mkIf (cfg.virtualHost != null) {
+      upstreams.youtrack.servers."${cfg.address}:${toString cfg.port}" = {};
+      virtualHosts.${cfg.virtualHost}.locations = {
+        "/" = {
+          proxyPass = "http://youtrack";
+          extraConfig = ''
+            client_max_body_size 10m;
+            proxy_http_version 1.1;
+            proxy_set_header X-Forwarded-Host $http_host;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+          '';
+        };
+
+        "/api/eventSourceBus" = {
+          proxyPass = "http://youtrack";
+          extraConfig = ''
+            proxy_cache off;
+            proxy_buffering off;
+            proxy_read_timeout 86400s;
+            proxy_send_timeout 86400s;
+            proxy_set_header Connection "";
+            chunked_transfer_encoding off;
+            client_max_body_size 10m;
+            proxy_http_version 1.1;
+            proxy_set_header X-Forwarded-Host $http_host;
+            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
+            proxy_set_header X-Forwarded-Proto $scheme;
+          '';
+        };
+
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/zabbix.nix b/nixpkgs/nixos/modules/services/web-apps/zabbix.nix
new file mode 100644
index 000000000000..2cea7e7cea72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/zabbix.nix
@@ -0,0 +1,238 @@
+{ config, lib, options, pkgs, ... }:
+
+let
+
+  inherit (lib) mkDefault mkEnableOption mkForce mkIf mkMerge mkOption types;
+  inherit (lib) literalExpression mapAttrs optionalString versionAtLeast;
+
+  cfg = config.services.zabbixWeb;
+  opt = options.services.zabbixWeb;
+  fpm = config.services.phpfpm.pools.zabbix;
+
+  user = "zabbix";
+  group = "zabbix";
+  stateDir = "/var/lib/zabbix";
+
+  zabbixConfig = pkgs.writeText "zabbix.conf.php" ''
+    <?php
+    // Zabbix GUI configuration file.
+    global $DB;
+    $DB['TYPE'] = '${ { mysql = "MYSQL"; pgsql = "POSTGRESQL"; oracle = "ORACLE"; }.${cfg.database.type} }';
+    $DB['SERVER'] = '${cfg.database.host}';
+    $DB['PORT'] = '${toString cfg.database.port}';
+    $DB['DATABASE'] = '${cfg.database.name}';
+    $DB['USER'] = '${cfg.database.user}';
+    # NOTE: file_get_contents adds newline at the end of returned string
+    $DB['PASSWORD'] = ${if cfg.database.passwordFile != null then "trim(file_get_contents('${cfg.database.passwordFile}'), \"\\r\\n\")" else "''"};
+    // Schema name. Used for IBM DB2 and PostgreSQL.
+    $DB['SCHEMA'] = ''';
+    $ZBX_SERVER = '${cfg.server.address}';
+    $ZBX_SERVER_PORT = '${toString cfg.server.port}';
+    $ZBX_SERVER_NAME = ''';
+    $IMAGE_FORMAT_DEFAULT = IMAGE_FORMAT_PNG;
+
+    ${cfg.extraConfig}
+  '';
+
+in
+{
+  # interface
+
+  options.services = {
+    zabbixWeb = {
+      enable = mkEnableOption (lib.mdDoc "the Zabbix web interface");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.zabbix.web;
+        defaultText = literalExpression "zabbix.web";
+        description = lib.mdDoc "Which Zabbix package to use.";
+      };
+
+      server = {
+        port = mkOption {
+          type = types.port;
+          description = lib.mdDoc "The port of the Zabbix server to connect to.";
+          default = 10051;
+        };
+
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc "The IP address or hostname of the Zabbix server to connect to.";
+          default = "localhost";
+        };
+      };
+
+      database = {
+        type = mkOption {
+          type = types.enum [ "mysql" "pgsql" "oracle" ];
+          example = "mysql";
+          default = "pgsql";
+          description = lib.mdDoc "Database engine to use.";
+        };
+
+        host = mkOption {
+          type = types.str;
+          default = "";
+          description = lib.mdDoc "Database host address.";
+        };
+
+        port = mkOption {
+          type = types.port;
+          default =
+            if cfg.database.type == "mysql" then config.services.mysql.port
+            else if cfg.database.type == "pgsql" then config.services.postgresql.port
+            else 1521;
+          defaultText = literalExpression ''
+            if config.${opt.database.type} == "mysql" then config.${options.services.mysql.port}
+            else if config.${opt.database.type} == "pgsql" then config.${options.services.postgresql.port}
+            else 1521
+          '';
+          description = lib.mdDoc "Database host port.";
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = lib.mdDoc "Database name.";
+        };
+
+        user = mkOption {
+          type = types.str;
+          default = "zabbix";
+          description = lib.mdDoc "Database user.";
+        };
+
+        passwordFile = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/keys/zabbix-dbpassword";
+          description = lib.mdDoc ''
+            A file containing the password corresponding to
+            {option}`database.user`.
+          '';
+        };
+
+        socket = mkOption {
+          type = types.nullOr types.path;
+          default = null;
+          example = "/run/postgresql";
+          description = lib.mdDoc "Path to the unix socket file to use for authentication.";
+        };
+      };
+
+      virtualHost = mkOption {
+        type = types.submodule (import ../web-servers/apache-httpd/vhost-options.nix);
+        example = literalExpression ''
+          {
+            hostName = "zabbix.example.org";
+            adminAddr = "webmaster@example.org";
+            forceSSL = true;
+            enableACME = true;
+          }
+        '';
+        description = lib.mdDoc ''
+          Apache configuration can be done by adapting `services.httpd.virtualHosts.<name>`.
+          See [](#opt-services.httpd.virtualHosts) for further information.
+        '';
+      };
+
+      poolConfig = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool ]);
+        default = {
+          "pm" = "dynamic";
+          "pm.max_children" = 32;
+          "pm.start_servers" = 2;
+          "pm.min_spare_servers" = 2;
+          "pm.max_spare_servers" = 4;
+          "pm.max_requests" = 500;
+        };
+        description = lib.mdDoc ''
+          Options for the Zabbix PHP pool. See the documentation on `php-fpm.conf` for details on configuration directives.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Additional configuration to be copied verbatim into {file}`zabbix.conf.php`.
+        '';
+      };
+
+    };
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    services.zabbixWeb.extraConfig = optionalString ((versionAtLeast config.system.stateVersion "20.09") && (versionAtLeast cfg.package.version "5.0.0")) ''
+      $DB['DOUBLE_IEEE754'] = 'true';
+    '';
+
+    systemd.tmpfiles.rules = [
+      "d '${stateDir}' 0750 ${user} ${group} - -"
+      "d '${stateDir}/session' 0750 ${user} ${config.services.httpd.group} - -"
+    ];
+
+    services.phpfpm.pools.zabbix = {
+      inherit user;
+      group = config.services.httpd.group;
+      phpOptions = ''
+        # https://www.zabbix.com/documentation/current/manual/installation/install
+        memory_limit = 128M
+        post_max_size = 16M
+        upload_max_filesize = 2M
+        max_execution_time = 300
+        max_input_time = 300
+        session.auto_start = 0
+        mbstring.func_overload = 0
+        always_populate_raw_post_data = -1
+        # https://bbs.archlinux.org/viewtopic.php?pid=1745214#p1745214
+        session.save_path = ${stateDir}/session
+      '' + optionalString (config.time.timeZone != null) ''
+        date.timezone = "${config.time.timeZone}"
+      '' + optionalString (cfg.database.type == "oracle") ''
+        extension=${pkgs.phpPackages.oci8}/lib/php/extensions/oci8.so
+      '';
+      phpEnv.ZABBIX_CONFIG = "${zabbixConfig}";
+      settings = {
+        "listen.owner" = config.services.httpd.user;
+        "listen.group" = config.services.httpd.group;
+      } // cfg.poolConfig;
+    };
+
+    services.httpd = {
+      enable = true;
+      adminAddr = mkDefault cfg.virtualHost.adminAddr;
+      extraModules = [ "proxy_fcgi" ];
+      virtualHosts.${cfg.virtualHost.hostName} = mkMerge [ cfg.virtualHost {
+        documentRoot = mkForce "${cfg.package}/share/zabbix";
+        extraConfig = ''
+          <Directory "${cfg.package}/share/zabbix">
+            <FilesMatch "\.php$">
+              <If "-f %{REQUEST_FILENAME}">
+                SetHandler "proxy:unix:${fpm.socket}|fcgi://localhost/"
+              </If>
+            </FilesMatch>
+            AllowOverride all
+            Options -Indexes
+            DirectoryIndex index.php
+          </Directory>
+        '';
+      } ];
+    };
+
+    users.users.${user} = mapAttrs (name: mkDefault) {
+      description = "Zabbix daemon user";
+      uid = config.ids.uids.zabbix;
+      inherit group;
+    };
+
+    users.groups.${group} = mapAttrs (name: mkDefault) {
+      gid = config.ids.gids.zabbix;
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-apps/zitadel.nix b/nixpkgs/nixos/modules/services/web-apps/zitadel.nix
new file mode 100644
index 000000000000..99b0a0bc56f6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-apps/zitadel.nix
@@ -0,0 +1,223 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.services.zitadel;
+
+  settingsFormat = pkgs.formats.yaml { };
+in
+{
+  options.services.zitadel =
+    let inherit (lib) mkEnableOption mkOption mkPackageOption types;
+    in {
+      enable = mkEnableOption "ZITADEL, a user and identity access management platform";
+
+      package = mkPackageOption pkgs "ZITADEL" { default = [ "zitadel" ]; };
+
+      user = mkOption {
+        type = types.str;
+        default = "zitadel";
+        description = "The user to run ZITADEL under.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "zitadel";
+        description = "The group to run ZITADEL under.";
+      };
+
+      openFirewall = mkOption {
+        type = types.bool;
+        default = false;
+        description = ''
+          Whether to open the port specified in `listenPort` in the firewall.
+        '';
+      };
+
+      masterKeyFile = mkOption {
+        type = types.path;
+        description = ''
+          Path to a file containing a master encryption key for ZITADEL. The
+          key must be 32 bytes.
+        '';
+      };
+
+      tlsMode = mkOption {
+        type = types.enum [ "external" "enabled" "disabled" ];
+        default = "external";
+        example = "enabled";
+        description = ''
+          The TLS mode to use. Options are:
+
+          - enabled: ZITADEL accepts HTTPS connections directly. You must
+            configure TLS if this option is selected.
+          - external: ZITADEL forces HTTPS connections, with TLS terminated at a
+            reverse proxy.
+          - disabled: ZITADEL accepts HTTP connections only. Should only be used
+            for testing.
+        '';
+      };
+
+      settings = mkOption {
+        type = lib.types.submodule {
+          freeformType = settingsFormat.type;
+
+          options = {
+            Port = mkOption {
+              type = types.port;
+              default = 8080;
+              description = "The port that ZITADEL listens on.";
+            };
+
+            TLS = {
+              KeyPath = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                description = "Path to the TLS certificate private key.";
+              };
+              Key = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                description = ''
+                  The TLS certificate private key, as a base64-encoded string.
+
+                  Note that the contents of this option will be added to the Nix
+                  store as world-readable plain text. Set
+                  [KeyPath](#opt-services.zitadel.settings.TLS.KeyPath) instead
+                  if this is undesired.
+                '';
+              };
+              CertPath = mkOption {
+                type = types.nullOr types.path;
+                default = null;
+                description = "Path to the TLS certificate.";
+              };
+              Cert = mkOption {
+                type = types.nullOr types.str;
+                default = null;
+                description = ''
+                  The TLS certificate, as a base64-encoded string.
+
+                  Note that the contents of this option will be added to the Nix
+                  store as world-readable plain text. Set
+                  [CertPath](#opt-services.zitadel.settings.TLS.CertPath) instead
+                  if this is undesired.
+                '';
+              };
+            };
+          };
+        };
+        default = { };
+        example = lib.literalExpression ''
+          {
+            Port = 8123;
+            ExternalDomain = "example.com";
+            TLS = {
+              CertPath = "/path/to/cert.pem";
+              KeyPath = "/path/to/cert.key";
+            };
+            Database.cockroach.Host = "db.example.com";
+          };
+        '';
+        description = ''
+          Contents of the runtime configuration file. See
+          https://zitadel.com/docs/self-hosting/manage/configure for more
+          details.
+        '';
+      };
+
+      extraSettingsPaths = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        description = ''
+          A list of paths to extra settings files. These will override the
+          values set in [settings](#opt-services.zitadel.settings). Useful if
+          you want to keep sensitive secrets out of the Nix store.
+        '';
+      };
+
+      steps = mkOption {
+        type = settingsFormat.type;
+        default = { };
+        example = lib.literalExpression ''
+          {
+            FirstInstance = {
+              InstanceName = "Example";
+              Org.Human = {
+                UserName = "foobar";
+                FirstName = "Foo";
+                LastName = "Bar";
+              };
+            };
+          }
+        '';
+        description = ''
+          Contents of the database initialization config file. See
+          https://zitadel.com/docs/self-hosting/manage/configure for more
+          details.
+        '';
+      };
+
+      extraStepsPaths = mkOption {
+        type = types.listOf types.path;
+        default = [ ];
+        description = ''
+          A list of paths to extra steps files. These will override the values
+          set in [steps](#opt-services.zitadel.steps). Useful if you want to
+          keep sensitive secrets out of the Nix store.
+        '';
+      };
+    };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = cfg.tlsMode == "enabled"
+        -> ((cfg.settings.TLS.Key != null || cfg.settings.TLS.KeyPath != null)
+        && (cfg.settings.TLS.Cert != null || cfg.settings.TLS.CertPath
+        != null));
+      message = ''
+        A TLS certificate and key must be configured in
+        services.zitadel.settings.TLS if services.zitadel.tlsMode is enabled.
+      '';
+    }];
+
+    networking.firewall.allowedTCPPorts =
+      lib.mkIf cfg.openFirewall [ cfg.settings.Port ];
+
+    systemd.services.zitadel =
+      let
+        configFile = settingsFormat.generate "config.yaml" cfg.settings;
+        stepsFile = settingsFormat.generate "steps.yaml" cfg.steps;
+
+        args = lib.cli.toGNUCommandLineShell { } {
+          config = cfg.extraSettingsPaths ++ [ configFile ];
+          steps = cfg.extraStepsPaths ++ [ stepsFile ];
+          masterkeyFile = cfg.masterKeyFile;
+          inherit (cfg) tlsMode;
+        };
+      in
+      {
+        description = "ZITADEL identity access management";
+        path = [ cfg.package ];
+        wantedBy = [ "multi-user.target" ];
+
+        script = ''
+          zitadel start-from-init ${args}
+        '';
+
+        serviceConfig = {
+          Type = "simple";
+          User = cfg.user;
+          Group = cfg.group;
+          Restart = "on-failure";
+        };
+      };
+
+    users.users.zitadel = lib.mkIf (cfg.user == "zitadel") {
+      isSystemUser = true;
+      group = cfg.group;
+    };
+    users.groups.zitadel = lib.mkIf (cfg.group == "zitadel") { };
+  };
+
+  meta.maintainers = with lib.maintainers; [ Sorixelle ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/agate.nix b/nixpkgs/nixos/modules/services/web-servers/agate.nix
new file mode 100644
index 000000000000..a0c8a8c94ee5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/agate.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.agate;
+in
+{
+  options = {
+    services.agate = {
+      enable = mkEnableOption (lib.mdDoc "Agate Server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.agate;
+        defaultText = literalExpression "pkgs.agate";
+        description = lib.mdDoc "The package to use";
+      };
+
+      addresses = mkOption {
+        type = types.listOf types.str;
+        default = [ "0.0.0.0:1965" ];
+        description = lib.mdDoc ''
+          Addresses to listen on, IP:PORT, if you haven't disabled forwarding
+          only set IPv4.
+        '';
+      };
+
+      contentDir = mkOption {
+        default = "/var/lib/agate/content";
+        type = types.path;
+        description = lib.mdDoc "Root of the content directory.";
+      };
+
+      certificatesDir = mkOption {
+        default = "/var/lib/agate/certificates";
+        type = types.path;
+        description = lib.mdDoc "Root of the certificate directory.";
+      };
+
+      hostnames = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Domain name of this Gemini server, enables checking hostname and port
+          in requests. (multiple occurrences means basic vhosts)
+        '';
+      };
+
+      language = mkOption {
+        default = null;
+        type = types.nullOr types.str;
+        description = lib.mdDoc "RFC 4646 Language code for text/gemini documents.";
+      };
+
+      onlyTls_1_3 = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Only use TLSv1.3 (default also allows TLSv1.2).";
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [ "" ];
+        example = [ "--log-ip" ];
+        description = lib.mdDoc "Extra arguments to use running agate.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # available for generating certs by hand
+    # it can be a bit arduous with openssl
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.agate = {
+      description = "Agate";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "network-online.target" ];
+
+      script =
+        let
+          prefixKeyList = key: list: concatMap (v: [ key v ]) list;
+          addresses = prefixKeyList "--addr" cfg.addresses;
+          hostnames = prefixKeyList "--hostname" cfg.hostnames;
+        in
+        ''
+          exec ${cfg.package}/bin/agate ${
+            escapeShellArgs (
+              [
+                "--content" "${cfg.contentDir}"
+                "--certs" "${cfg.certificatesDir}"
+              ] ++
+              addresses ++
+              (optionals (cfg.hostnames != []) hostnames) ++
+              (optionals (cfg.language != null) [ "--lang" cfg.language ]) ++
+              (optionals cfg.onlyTls_1_3 [ "--only-tls13" ]) ++
+              (optionals (cfg.extraArgs != []) cfg.extraArgs)
+            )
+          }
+        '';
+
+      serviceConfig = {
+        Restart = "always";
+        RestartSec = "5s";
+        DynamicUser = true;
+        StateDirectory = "agate";
+
+        # Security options:
+        AmbientCapabilities = "";
+        CapabilityBoundingSet = "";
+
+        # ProtectClock= adds DeviceAllow=char-rtc r
+        DeviceAllow = "";
+
+        LockPersonality = true;
+
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = true;
+
+        ProtectClock = true;
+        ProtectControlGroups = true;
+        ProtectHostname = true;
+        ProtectKernelLogs = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+
+        RestrictNamespaces = true;
+        RestrictAddressFamilies = [ "AF_INET" "AF_INET6" ];
+        RestrictRealtime = true;
+
+        SystemCallArchitectures = "native";
+        SystemCallErrorNumber = "EPERM";
+        SystemCallFilter = [
+          "@system-service"
+          "~@cpu-emulation"
+          "~@debug"
+          "~@keyring"
+          "~@memlock"
+          "~@obsolete"
+          "~@privileged"
+          "~@setuid"
+        ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
new file mode 100644
index 000000000000..588f5ee4d003
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/default.nix
@@ -0,0 +1,842 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.httpd;
+
+  certs = config.security.acme.certs;
+
+  runtimeDir = "/run/httpd";
+
+  pkg = cfg.package.out;
+
+  apachectl = pkgs.runCommand "apachectl" { meta.priority = -1; } ''
+    mkdir -p $out/bin
+    cp ${pkg}/bin/apachectl $out/bin/apachectl
+    sed -i $out/bin/apachectl -e 's|$HTTPD -t|$HTTPD -t -f /etc/httpd/httpd.conf|'
+  '';
+
+  php = cfg.phpPackage.override { apxs2Support = true; apacheHttpd = pkg; };
+
+  phpModuleName = let
+    majorVersion = lib.versions.major (lib.getVersion php);
+  in (if majorVersion == "8" then "php" else "php${majorVersion}");
+
+  mod_perl = pkgs.apacheHttpdPackages.mod_perl.override { apacheHttpd = pkg; };
+
+  vhosts = attrValues cfg.virtualHosts;
+
+  # certName is used later on to determine systemd service names.
+  acmeEnabledVhosts = map (hostOpts: hostOpts // {
+    certName = if hostOpts.useACMEHost != null then hostOpts.useACMEHost else hostOpts.hostName;
+  }) (filter (hostOpts: hostOpts.enableACME || hostOpts.useACMEHost != null) vhosts);
+
+  dependentCertNames = unique (map (hostOpts: hostOpts.certName) acmeEnabledVhosts);
+
+  mkListenInfo = hostOpts:
+    if hostOpts.listen != [] then
+      hostOpts.listen
+    else
+      optionals (hostOpts.onlySSL || hostOpts.addSSL || hostOpts.forceSSL) (map (addr: { ip = addr; port = 443; ssl = true; }) hostOpts.listenAddresses) ++
+      optionals (!hostOpts.onlySSL) (map (addr: { ip = addr; port = 80; ssl = false; }) hostOpts.listenAddresses)
+    ;
+
+  listenInfo = unique (concatMap mkListenInfo vhosts);
+
+  enableHttp2 = any (vhost: vhost.http2) vhosts;
+  enableSSL = any (listen: listen.ssl) listenInfo;
+  enableUserDir = any (vhost: vhost.enableUserDir) vhosts;
+
+  # NOTE: generally speaking order of modules is very important
+  modules =
+    [ # required apache modules our httpd service cannot run without
+      "authn_core" "authz_core"
+      "log_config"
+      "mime" "autoindex" "negotiation" "dir"
+      "alias" "rewrite"
+      "unixd" "slotmem_shm" "socache_shmcb"
+      "mpm_${cfg.mpm}"
+    ]
+    ++ (if cfg.mpm == "prefork" then [ "cgi" ] else [ "cgid" ])
+    ++ optional enableHttp2 "http2"
+    ++ optional enableSSL "ssl"
+    ++ optional enableUserDir "userdir"
+    ++ optional cfg.enableMellon { name = "auth_mellon"; path = "${pkgs.apacheHttpdPackages.mod_auth_mellon}/modules/mod_auth_mellon.so"; }
+    ++ optional cfg.enablePHP { name = phpModuleName; path = "${php}/modules/lib${phpModuleName}.so"; }
+    ++ optional cfg.enablePerl { name = "perl"; path = "${mod_perl}/modules/mod_perl.so"; }
+    ++ cfg.extraModules;
+
+  loggingConf = (if cfg.logFormat != "none" then ''
+    ErrorLog ${cfg.logDir}/error.log
+
+    LogLevel notice
+
+    LogFormat "%h %l %u %t \"%r\" %>s %b \"%{Referer}i\" \"%{User-Agent}i\"" combined
+    LogFormat "%h %l %u %t \"%r\" %>s %b" common
+    LogFormat "%{Referer}i -> %U" referer
+    LogFormat "%{User-agent}i" agent
+
+    CustomLog ${cfg.logDir}/access.log ${cfg.logFormat}
+  '' else ''
+    ErrorLog /dev/null
+  '');
+
+
+  browserHacks = ''
+    <IfModule mod_setenvif.c>
+        BrowserMatch "Mozilla/2" nokeepalive
+        BrowserMatch "MSIE 4\.0b2;" nokeepalive downgrade-1.0 force-response-1.0
+        BrowserMatch "RealPlayer 4\.0" force-response-1.0
+        BrowserMatch "Java/1\.0" force-response-1.0
+        BrowserMatch "JDK/1\.0" force-response-1.0
+        BrowserMatch "Microsoft Data Access Internet Publishing Provider" redirect-carefully
+        BrowserMatch "^WebDrive" redirect-carefully
+        BrowserMatch "^WebDAVFS/1.[012]" redirect-carefully
+        BrowserMatch "^gnome-vfs" redirect-carefully
+    </IfModule>
+  '';
+
+
+  sslConf = ''
+    <IfModule mod_ssl.c>
+        SSLSessionCache shmcb:${runtimeDir}/ssl_scache(512000)
+
+        Mutex posixsem
+
+        SSLRandomSeed startup builtin
+        SSLRandomSeed connect builtin
+
+        SSLProtocol ${cfg.sslProtocols}
+        SSLCipherSuite ${cfg.sslCiphers}
+        SSLHonorCipherOrder on
+    </IfModule>
+  '';
+
+
+  mimeConf = ''
+    TypesConfig ${pkg}/conf/mime.types
+
+    AddType application/x-x509-ca-cert .crt
+    AddType application/x-pkcs7-crl    .crl
+    AddType application/x-httpd-php    .php .phtml
+
+    <IfModule mod_mime_magic.c>
+        MIMEMagicFile ${pkg}/conf/magic
+    </IfModule>
+  '';
+
+  luaSetPaths = let
+    # support both lua and lua.withPackages derivations
+    luaversion = cfg.package.lua5.lua.luaversion or cfg.package.lua5.luaversion;
+    in
+  ''
+    <IfModule mod_lua.c>
+      LuaPackageCPath ${cfg.package.lua5}/lib/lua/${luaversion}/?.so
+      LuaPackagePath  ${cfg.package.lua5}/share/lua/${luaversion}/?.lua
+    </IfModule>
+  '';
+
+  mkVHostConf = hostOpts:
+    let
+      adminAddr = if hostOpts.adminAddr != null then hostOpts.adminAddr else cfg.adminAddr;
+      listen = filter (listen: !listen.ssl) (mkListenInfo hostOpts);
+      listenSSL = filter (listen: listen.ssl) (mkListenInfo hostOpts);
+
+      useACME = hostOpts.enableACME || hostOpts.useACMEHost != null;
+      sslCertDir =
+        if hostOpts.enableACME then certs.${hostOpts.hostName}.directory
+        else if hostOpts.useACMEHost != null then certs.${hostOpts.useACMEHost}.directory
+        else abort "This case should never happen.";
+
+      sslServerCert = if useACME then "${sslCertDir}/fullchain.pem" else hostOpts.sslServerCert;
+      sslServerKey = if useACME then "${sslCertDir}/key.pem" else hostOpts.sslServerKey;
+      sslServerChain = if useACME then "${sslCertDir}/chain.pem" else hostOpts.sslServerChain;
+
+      acmeChallenge = optionalString (useACME && hostOpts.acmeRoot != null) ''
+        Alias /.well-known/acme-challenge/ "${hostOpts.acmeRoot}/.well-known/acme-challenge/"
+        <Directory "${hostOpts.acmeRoot}">
+            AllowOverride None
+            Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec
+            Require method GET POST OPTIONS
+            Require all granted
+        </Directory>
+      '';
+    in
+      optionalString (listen != []) ''
+        <VirtualHost ${concatMapStringsSep " " (listen: "${listen.ip}:${toString listen.port}") listen}>
+            ServerName ${hostOpts.hostName}
+            ${concatMapStrings (alias: "ServerAlias ${alias}\n") hostOpts.serverAliases}
+            ${optionalString (adminAddr != null) "ServerAdmin ${adminAddr}"}
+            <IfModule mod_ssl.c>
+                SSLEngine off
+            </IfModule>
+            ${acmeChallenge}
+            ${if hostOpts.forceSSL then ''
+              <IfModule mod_rewrite.c>
+                  RewriteEngine on
+                  RewriteCond %{REQUEST_URI} !^/.well-known/acme-challenge [NC]
+                  RewriteCond %{HTTPS} off
+                  RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
+              </IfModule>
+            '' else mkVHostCommonConf hostOpts}
+        </VirtualHost>
+      '' +
+      optionalString (listenSSL != []) ''
+        <VirtualHost ${concatMapStringsSep " " (listen: "${listen.ip}:${toString listen.port}") listenSSL}>
+            ServerName ${hostOpts.hostName}
+            ${concatMapStrings (alias: "ServerAlias ${alias}\n") hostOpts.serverAliases}
+            ${optionalString (adminAddr != null) "ServerAdmin ${adminAddr}"}
+            SSLEngine on
+            SSLCertificateFile ${sslServerCert}
+            SSLCertificateKeyFile ${sslServerKey}
+            ${optionalString (sslServerChain != null) "SSLCertificateChainFile ${sslServerChain}"}
+            ${optionalString hostOpts.http2 "Protocols h2 h2c http/1.1"}
+            ${acmeChallenge}
+            ${mkVHostCommonConf hostOpts}
+        </VirtualHost>
+      ''
+  ;
+
+  mkVHostCommonConf = hostOpts:
+    let
+      documentRoot = if hostOpts.documentRoot != null
+        then hostOpts.documentRoot
+        else pkgs.emptyDirectory
+      ;
+
+      mkLocations = locations: concatStringsSep "\n" (map (config: ''
+        <Location ${config.location}>
+          ${optionalString (config.proxyPass != null) ''
+            <IfModule mod_proxy.c>
+                ProxyPass ${config.proxyPass}
+                ProxyPassReverse ${config.proxyPass}
+            </IfModule>
+          ''}
+          ${optionalString (config.index != null) ''
+            <IfModule mod_dir.c>
+                DirectoryIndex ${config.index}
+            </IfModule>
+          ''}
+          ${optionalString (config.alias != null) ''
+            <IfModule mod_alias.c>
+                Alias "${config.alias}"
+            </IfModule>
+          ''}
+          ${config.extraConfig}
+        </Location>
+      '') (sortProperties (mapAttrsToList (k: v: v // { location = k; }) locations)));
+    in
+      ''
+        ${optionalString cfg.logPerVirtualHost ''
+          ErrorLog ${cfg.logDir}/error-${hostOpts.hostName}.log
+          CustomLog ${cfg.logDir}/access-${hostOpts.hostName}.log ${hostOpts.logFormat}
+        ''}
+
+        ${optionalString (hostOpts.robotsEntries != "") ''
+          Alias /robots.txt ${pkgs.writeText "robots.txt" hostOpts.robotsEntries}
+        ''}
+
+        DocumentRoot "${documentRoot}"
+
+        <Directory "${documentRoot}">
+            Options Indexes FollowSymLinks
+            AllowOverride None
+            Require all granted
+        </Directory>
+
+        ${optionalString hostOpts.enableUserDir ''
+          UserDir public_html
+          UserDir disabled root
+          <Directory "/home/*/public_html">
+              AllowOverride FileInfo AuthConfig Limit Indexes
+              Options MultiViews Indexes SymLinksIfOwnerMatch IncludesNoExec
+              <Limit GET POST OPTIONS>
+                  Require all granted
+              </Limit>
+              <LimitExcept GET POST OPTIONS>
+                  Require all denied
+              </LimitExcept>
+          </Directory>
+        ''}
+
+        ${optionalString (hostOpts.globalRedirect != null && hostOpts.globalRedirect != "") ''
+          RedirectPermanent / ${hostOpts.globalRedirect}
+        ''}
+
+        ${
+          let makeDirConf = elem: ''
+                Alias ${elem.urlPath} ${elem.dir}/
+                <Directory ${elem.dir}>
+                    Options +Indexes
+                    Require all granted
+                    AllowOverride All
+                </Directory>
+              '';
+          in concatMapStrings makeDirConf hostOpts.servedDirs
+        }
+
+        ${mkLocations hostOpts.locations}
+        ${hostOpts.extraConfig}
+      ''
+  ;
+
+
+  confFile = pkgs.writeText "httpd.conf" ''
+
+    ServerRoot ${pkg}
+    ServerName ${config.networking.hostName}
+    DefaultRuntimeDir ${runtimeDir}/runtime
+
+    PidFile ${runtimeDir}/httpd.pid
+
+    ${optionalString (cfg.mpm != "prefork") ''
+      # mod_cgid requires this.
+      ScriptSock ${runtimeDir}/cgisock
+    ''}
+
+    <IfModule prefork.c>
+        MaxClients           ${toString cfg.maxClients}
+        MaxRequestsPerChild  ${toString cfg.maxRequestsPerChild}
+    </IfModule>
+
+    ${let
+        toStr = listen: "Listen ${listen.ip}:${toString listen.port} ${if listen.ssl then "https" else "http"}";
+        uniqueListen = uniqList {inputList = map toStr listenInfo;};
+      in concatStringsSep "\n" uniqueListen
+    }
+
+    User ${cfg.user}
+    Group ${cfg.group}
+
+    ${let
+        mkModule = module:
+          if isString module then { name = module; path = "${pkg}/modules/mod_${module}.so"; }
+          else if isAttrs module then { inherit (module) name path; }
+          else throw "Expecting either a string or attribute set including a name and path.";
+      in
+        concatMapStringsSep "\n" (module: "LoadModule ${module.name}_module ${module.path}") (unique (map mkModule modules))
+    }
+
+    AddHandler type-map var
+
+    <Files ~ "^\.ht">
+        Require all denied
+    </Files>
+
+    ${mimeConf}
+    ${loggingConf}
+    ${browserHacks}
+
+    Include ${pkg}/conf/extra/httpd-default.conf
+    Include ${pkg}/conf/extra/httpd-autoindex.conf
+    Include ${pkg}/conf/extra/httpd-multilang-errordoc.conf
+    Include ${pkg}/conf/extra/httpd-languages.conf
+
+    TraceEnable off
+
+    ${sslConf}
+
+    ${optionalString cfg.package.luaSupport luaSetPaths}
+
+    # Fascist default - deny access to everything.
+    <Directory />
+        Options FollowSymLinks
+        AllowOverride None
+        Require all denied
+    </Directory>
+
+    # But do allow access to files in the store so that we don't have
+    # to generate <Directory> clauses for every generated file that we
+    # want to serve.
+    <Directory /nix/store>
+        Require all granted
+    </Directory>
+
+    ${cfg.extraConfig}
+
+    ${concatMapStringsSep "\n" mkVHostConf vhosts}
+  '';
+
+  # Generate the PHP configuration file.  Should probably be factored
+  # out into a separate module.
+  phpIni = pkgs.runCommand "php.ini"
+    { options = cfg.phpOptions;
+      preferLocalBuild = true;
+    }
+    ''
+      cat ${php}/etc/php.ini > $out
+      cat ${php.phpIni} > $out
+      echo "$options" >> $out
+    '';
+
+  mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
+in
+
+
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "httpd" "extraSubservices" ] "Most existing subservices have been ported to the NixOS module system. Please update your configuration accordingly.")
+    (mkRemovedOptionModule [ "services" "httpd" "stateDir" ] "The httpd module now uses /run/httpd as a runtime directory.")
+    (mkRenamedOptionModule [ "services" "httpd" "multiProcessingModule" ] [ "services" "httpd" "mpm" ])
+
+    # virtualHosts options
+    (mkRemovedOptionModule [ "services" "httpd" "documentRoot" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "enableSSL" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "enableUserDir" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "globalRedirect" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "hostName" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "listen" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "robotsEntries" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "servedDirs" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "servedFiles" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "serverAliases" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "sslServerCert" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "sslServerChain" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+    (mkRemovedOptionModule [ "services" "httpd" "sslServerKey" ] "Please define a virtual host using `services.httpd.virtualHosts`.")
+  ];
+
+  # interface
+
+  options = {
+
+    services.httpd = {
+
+      enable = mkEnableOption (lib.mdDoc "the Apache HTTP Server");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.apacheHttpd;
+        defaultText = literalExpression "pkgs.apacheHttpd";
+        description = lib.mdDoc ''
+          Overridable attribute of the Apache HTTP Server package to use.
+        '';
+      };
+
+      configFile = mkOption {
+        type = types.path;
+        default = confFile;
+        defaultText = literalExpression "confFile";
+        example = literalExpression ''pkgs.writeText "httpd.conf" "# my custom config file ..."'';
+        description = lib.mdDoc ''
+          Override the configuration file used by Apache. By default,
+          NixOS generates one automatically.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines appended to the generated Apache
+          configuration file. Note that this mechanism will not work
+          when {option}`configFile` is overridden.
+        '';
+      };
+
+      extraModules = mkOption {
+        type = types.listOf types.unspecified;
+        default = [];
+        example = literalExpression ''
+          [
+            "proxy_connect"
+            { name = "jk"; path = "''${pkgs.tomcat_connectors}/modules/mod_jk.so"; }
+          ]
+        '';
+        description = lib.mdDoc ''
+          Additional Apache modules to be used. These can be
+          specified as a string in the case of modules distributed
+          with Apache, or as an attribute set specifying the
+          {var}`name` and {var}`path` of the
+          module.
+        '';
+      };
+
+      adminAddr = mkOption {
+        type = types.nullOr types.str;
+        example = "admin@example.org";
+        default = null;
+        description = lib.mdDoc "E-mail address of the server administrator.";
+      };
+
+      logFormat = mkOption {
+        type = types.str;
+        default = "common";
+        example = "combined";
+        description = lib.mdDoc ''
+          Log format for log files. Possible values are: combined, common, referer, agent, none.
+          See <https://httpd.apache.org/docs/2.4/logs.html> for more details.
+        '';
+      };
+
+      logPerVirtualHost = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If enabled, each virtual host gets its own
+          {file}`access.log` and
+          {file}`error.log`, namely suffixed by the
+          {option}`hostName` of the virtual host.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "wwwrun";
+        description = lib.mdDoc ''
+          User account under which httpd children processes run.
+
+          If you require the main httpd process to run as
+          `root` add the following configuration:
+          ```
+          systemd.services.httpd.serviceConfig.User = lib.mkForce "root";
+          ```
+        '';
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "wwwrun";
+        description = lib.mdDoc ''
+          Group under which httpd children processes run.
+        '';
+      };
+
+      logDir = mkOption {
+        type = types.path;
+        default = "/var/log/httpd";
+        description = lib.mdDoc ''
+          Directory for Apache's log files. It is created automatically.
+        '';
+      };
+
+      virtualHosts = mkOption {
+        type = with types; attrsOf (submodule (import ./vhost-options.nix));
+        default = {
+          localhost = {
+            documentRoot = "${pkg}/htdocs";
+          };
+        };
+        defaultText = literalExpression ''
+          {
+            localhost = {
+              documentRoot = "''${package.out}/htdocs";
+            };
+          }
+        '';
+        example = literalExpression ''
+          {
+            "foo.example.com" = {
+              forceSSL = true;
+              documentRoot = "/var/www/foo.example.com"
+            };
+            "bar.example.com" = {
+              addSSL = true;
+              documentRoot = "/var/www/bar.example.com";
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          Specification of the virtual hosts served by Apache. Each
+          element should be an attribute set specifying the
+          configuration of the virtual host.
+        '';
+      };
+
+      enableMellon = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the mod_auth_mellon module.";
+      };
+
+      enablePHP = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the PHP module.";
+      };
+
+      phpPackage = mkOption {
+        type = types.package;
+        default = pkgs.php;
+        defaultText = literalExpression "pkgs.php";
+        description = lib.mdDoc ''
+          Overridable attribute of the PHP package to use.
+        '';
+      };
+
+      enablePerl = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the Perl module (mod_perl).";
+      };
+
+      phpOptions = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            date.timezone = "CET"
+          '';
+        description = lib.mdDoc ''
+          Options appended to the PHP configuration file {file}`php.ini`.
+        '';
+      };
+
+      mpm = mkOption {
+        type = types.enum [ "event" "prefork" "worker" ];
+        default = "event";
+        example = "worker";
+        description =
+          lib.mdDoc ''
+            Multi-processing module to be used by Apache. Available
+            modules are `prefork` (handles each
+            request in a separate child process), `worker`
+            (hybrid approach that starts a number of child processes
+            each running a number of threads) and `event`
+            (the default; a recent variant of `worker`
+            that handles persistent connections more efficiently).
+          '';
+      };
+
+      maxClients = mkOption {
+        type = types.int;
+        default = 150;
+        example = 8;
+        description = lib.mdDoc "Maximum number of httpd processes (prefork)";
+      };
+
+      maxRequestsPerChild = mkOption {
+        type = types.int;
+        default = 0;
+        example = 500;
+        description = lib.mdDoc ''
+          Maximum number of httpd requests answered per httpd child (prefork), 0 means unlimited.
+        '';
+      };
+
+      sslCiphers = mkOption {
+        type = types.str;
+        default = "HIGH:!aNULL:!MD5:!EXP";
+        description = lib.mdDoc "Cipher Suite available for negotiation in SSL proxy handshake.";
+      };
+
+      sslProtocols = mkOption {
+        type = types.str;
+        default = "All -SSLv2 -SSLv3 -TLSv1 -TLSv1.1";
+        example = "All -SSLv2 -SSLv3";
+        description = lib.mdDoc "Allowed SSL/TLS protocol versions.";
+      };
+    };
+
+  };
+
+  # implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = all (hostOpts: !hostOpts.enableSSL) vhosts;
+        message = ''
+          The option `services.httpd.virtualHosts.<name>.enableSSL` no longer has any effect; please remove it.
+          Select one of `services.httpd.virtualHosts.<name>.addSSL`, `services.httpd.virtualHosts.<name>.forceSSL`,
+          or `services.httpd.virtualHosts.<name>.onlySSL`.
+        '';
+      }
+      {
+        assertion = all (hostOpts: with hostOpts; !(addSSL && onlySSL) && !(forceSSL && onlySSL) && !(addSSL && forceSSL)) vhosts;
+        message = ''
+          Options `services.httpd.virtualHosts.<name>.addSSL`,
+          `services.httpd.virtualHosts.<name>.onlySSL` and `services.httpd.virtualHosts.<name>.forceSSL`
+          are mutually exclusive.
+        '';
+      }
+      {
+        assertion = all (hostOpts: !(hostOpts.enableACME && hostOpts.useACMEHost != null)) vhosts;
+        message = ''
+          Options `services.httpd.virtualHosts.<name>.enableACME` and
+          `services.httpd.virtualHosts.<name>.useACMEHost` are mutually exclusive.
+        '';
+      }
+      {
+        assertion = cfg.enablePHP -> php.ztsSupport;
+        message = ''
+          The php package provided by `services.httpd.phpPackage` is not built with zts support. Please
+          ensure the php has zts support by settings `services.httpd.phpPackage = php.override { ztsSupport = true; }`
+        '';
+      }
+    ] ++ map (name: mkCertOwnershipAssertion {
+      inherit (cfg) group user;
+      cert = config.security.acme.certs.${name};
+      groups = config.users.groups;
+    }) dependentCertNames;
+
+    warnings =
+      mapAttrsToList (name: hostOpts: ''
+        Using config.services.httpd.virtualHosts."${name}".servedFiles is deprecated and will become unsupported in a future release. Your configuration will continue to work as is but please migrate your configuration to config.services.httpd.virtualHosts."${name}".locations before the 20.09 release of NixOS.
+      '') (filterAttrs (name: hostOpts: hostOpts.servedFiles != []) cfg.virtualHosts);
+
+    users.users = optionalAttrs (cfg.user == "wwwrun") {
+      wwwrun = {
+        group = cfg.group;
+        description = "Apache httpd user";
+        uid = config.ids.uids.wwwrun;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "wwwrun") {
+      wwwrun.gid = config.ids.gids.wwwrun;
+    };
+
+    security.acme.certs = let
+      acmePairs = map (hostOpts: let
+        hasRoot = hostOpts.acmeRoot != null;
+      in nameValuePair hostOpts.hostName {
+        group = mkDefault cfg.group;
+        # if acmeRoot is null inherit config.security.acme
+        # Since config.security.acme.certs.<cert>.webroot's own default value
+        # should take precedence set priority higher than mkOptionDefault
+        webroot = mkOverride (if hasRoot then 1000 else 2000) hostOpts.acmeRoot;
+        # Also nudge dnsProvider to null in case it is inherited
+        dnsProvider = mkOverride (if hasRoot then 1000 else 2000) null;
+        extraDomainNames = hostOpts.serverAliases;
+        # Use the vhost-specific email address if provided, otherwise let
+        # security.acme.email or security.acme.certs.<cert>.email be used.
+        email = mkOverride 2000 (if hostOpts.adminAddr != null then hostOpts.adminAddr else cfg.adminAddr);
+      # Filter for enableACME-only vhosts. Don't want to create dud certs
+      }) (filter (hostOpts: hostOpts.useACMEHost == null) acmeEnabledVhosts);
+    in listToAttrs acmePairs;
+
+    # httpd requires a stable path to the configuration file for reloads
+    environment.etc."httpd/httpd.conf".source = cfg.configFile;
+    environment.systemPackages = [
+      apachectl
+      pkg
+    ];
+
+    services.logrotate = optionalAttrs (cfg.logFormat != "none") {
+      enable = mkDefault true;
+      settings.httpd = {
+        files = "${cfg.logDir}/*.log";
+        su = "${cfg.user} ${cfg.group}";
+        frequency = "daily";
+        rotate = 28;
+        sharedscripts = true;
+        compress = true;
+        delaycompress = true;
+        postrotate = "systemctl reload httpd.service > /dev/null 2>/dev/null || true";
+      };
+    };
+
+    services.httpd.phpOptions =
+      ''
+        ; Don't advertise PHP
+        expose_php = off
+      '' + optionalString (config.time.timeZone != null) ''
+
+        ; Apparently PHP doesn't use $TZ.
+        date.timezone = "${config.time.timeZone}"
+      '';
+
+    services.httpd.extraModules = mkBefore [
+      # HTTP authentication mechanisms: basic and digest.
+      "auth_basic" "auth_digest"
+
+      # Authentication: is the user who he claims to be?
+      "authn_file" "authn_dbm" "authn_anon"
+
+      # Authorization: is the user allowed access?
+      "authz_user" "authz_groupfile" "authz_host"
+
+      # Other modules.
+      "ext_filter" "include" "env" "mime_magic"
+      "cern_meta" "expires" "headers" "usertrack" "setenvif"
+      "dav" "status" "asis" "info" "dav_fs"
+      "vhost_alias" "imagemap" "actions" "speling"
+      "proxy" "proxy_http"
+      "cache" "cache_disk"
+
+      # For compatibility with old configurations, the new module mod_access_compat is provided.
+      "access_compat"
+    ];
+
+    systemd.tmpfiles.rules =
+      let
+        svc = config.systemd.services.httpd.serviceConfig;
+      in
+        [
+          "d '${cfg.logDir}' 0700 ${svc.User} ${svc.Group}"
+          "Z '${cfg.logDir}' - ${svc.User} ${svc.Group}"
+        ];
+
+    systemd.services.httpd = {
+        description = "Apache HTTPD";
+        wantedBy = [ "multi-user.target" ];
+        wants = concatLists (map (certName: [ "acme-finished-${certName}.target" ]) dependentCertNames);
+        after = [ "network.target" ] ++ map (certName: "acme-selfsigned-${certName}.service") dependentCertNames;
+        before = map (certName: "acme-${certName}.service") dependentCertNames;
+        restartTriggers = [ cfg.configFile ];
+
+        path = [ pkg pkgs.coreutils pkgs.gnugrep ];
+
+        environment =
+          optionalAttrs cfg.enablePHP { PHPRC = phpIni; }
+          // optionalAttrs cfg.enableMellon { LD_LIBRARY_PATH  = "${pkgs.xmlsec}/lib"; };
+
+        preStart =
+          ''
+            # Get rid of old semaphores.  These tend to accumulate across
+            # server restarts, eventually preventing it from restarting
+            # successfully.
+            for i in $(${pkgs.util-linux}/bin/ipcs -s | grep ' ${cfg.user} ' | cut -f2 -d ' '); do
+                ${pkgs.util-linux}/bin/ipcrm -s $i
+            done
+          '';
+
+        serviceConfig = {
+          ExecStart = "@${pkg}/bin/httpd httpd -f /etc/httpd/httpd.conf";
+          ExecStop = "${pkg}/bin/httpd -f /etc/httpd/httpd.conf -k graceful-stop";
+          ExecReload = "${pkg}/bin/httpd -f /etc/httpd/httpd.conf -k graceful";
+          User = cfg.user;
+          Group = cfg.group;
+          Type = "forking";
+          PIDFile = "${runtimeDir}/httpd.pid";
+          Restart = "always";
+          RestartSec = "5s";
+          RuntimeDirectory = "httpd httpd/runtime";
+          RuntimeDirectoryMode = "0750";
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        };
+      };
+
+    # postRun hooks on cert renew can't be used to restart Apache since renewal
+    # runs as the unprivileged acme user. sslTargets are added to wantedBy + before
+    # which allows the acme-finished-$cert.target to signify the successful updating
+    # of certs end-to-end.
+    systemd.services.httpd-config-reload = let
+      sslServices = map (certName: "acme-${certName}.service") dependentCertNames;
+      sslTargets = map (certName: "acme-finished-${certName}.target") dependentCertNames;
+    in mkIf (sslServices != []) {
+      wantedBy = sslServices ++ [ "multi-user.target" ];
+      # Before the finished targets, after the renew services.
+      # This service might be needed for HTTP-01 challenges, but we only want to confirm
+      # certs are updated _after_ config has been reloaded.
+      before = sslTargets;
+      after = sslServices;
+      restartTriggers = [ cfg.configFile ];
+      # Block reloading if not all certs exist yet.
+      # Happens when config changes add new vhosts/certs.
+      unitConfig.ConditionPathExists = map (certName: certs.${certName}.directory + "/fullchain.pem") dependentCertNames;
+      serviceConfig = {
+        Type = "oneshot";
+        TimeoutSec = 60;
+        ExecCondition = "/run/current-system/systemd/bin/systemctl -q is-active httpd.service";
+        ExecStartPre = "${pkg}/bin/httpd -f /etc/httpd/httpd.conf -t";
+        ExecStart = "/run/current-system/systemd/bin/systemctl reload httpd.service";
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix
new file mode 100644
index 000000000000..f2d4f8357047
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/location-options.nix
@@ -0,0 +1,54 @@
+{ config, lib, name, ... }:
+let
+  inherit (lib) mkOption types;
+in
+{
+  options = {
+
+    proxyPass = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "http://www.example.org/";
+      description = lib.mdDoc ''
+        Sets up a simple reverse proxy as described by <https://httpd.apache.org/docs/2.4/howto/reverse_proxy.html#simple>.
+      '';
+    };
+
+    index = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      example = "index.php index.html";
+      description = lib.mdDoc ''
+        Adds DirectoryIndex directive. See <https://httpd.apache.org/docs/2.4/mod/mod_dir.html#directoryindex>.
+      '';
+    };
+
+    alias = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = "/your/alias/directory";
+      description = lib.mdDoc ''
+        Alias directory for requests. See <https://httpd.apache.org/docs/2.4/mod/mod_alias.html#alias>.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        These lines go to the end of the location verbatim.
+      '';
+    };
+
+    priority = mkOption {
+      type = types.int;
+      default = 1000;
+      description = lib.mdDoc ''
+        Order of this location block in relation to the others in the vhost.
+        The semantics are the same as with `lib.mkOrder`. Smaller values have
+        a greater priority.
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix
new file mode 100644
index 000000000000..7b87f9ef4bde
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/apache-httpd/vhost-options.nix
@@ -0,0 +1,291 @@
+{ config, lib, name, ... }:
+let
+  inherit (lib) literalExpression mkOption nameValuePair types;
+in
+{
+  options = {
+
+    hostName = mkOption {
+      type = types.str;
+      default = name;
+      description = lib.mdDoc "Canonical hostname for the server.";
+    };
+
+    serverAliases = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = ["www.example.org" "www.example.org:8080" "example.org"];
+      description = lib.mdDoc ''
+        Additional names of virtual hosts served by this virtual host configuration.
+      '';
+    };
+
+    listen = mkOption {
+      type = with types; listOf (submodule ({
+        options = {
+          port = mkOption {
+            type = types.port;
+            description = lib.mdDoc "Port to listen on";
+          };
+          ip = mkOption {
+            type = types.str;
+            default = "*";
+            description = lib.mdDoc "IP to listen on. 0.0.0.0 for IPv4 only, * for all.";
+          };
+          ssl = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc "Whether to enable SSL (https) support.";
+          };
+        };
+      }));
+      default = [];
+      example = [
+        { ip = "195.154.1.1"; port = 443; ssl = true;}
+        { ip = "192.154.1.1"; port = 80; }
+        { ip = "*"; port = 8080; }
+      ];
+      description = lib.mdDoc ''
+        Listen addresses and ports for this virtual host.
+
+        ::: {.note}
+        This option overrides `addSSL`, `forceSSL` and `onlySSL`.
+
+        If you only want to set the addresses manually and not the ports, take a look at `listenAddresses`.
+        :::
+      '';
+    };
+
+    listenAddresses = mkOption {
+      type = with types; nonEmptyListOf str;
+
+      description = lib.mdDoc ''
+        Listen addresses for this virtual host.
+        Compared to `listen` this only sets the addresses
+        and the ports are chosen automatically.
+      '';
+      default = [ "*" ];
+      example = [ "127.0.0.1" ];
+    };
+
+    enableSSL = mkOption {
+      type = types.bool;
+      visible = false;
+      default = false;
+    };
+
+    addSSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable HTTPS in addition to plain HTTP. This will set defaults for
+        `listen` to listen on all interfaces on the respective default
+        ports (80, 443).
+      '';
+    };
+
+    onlySSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable HTTPS and reject plain HTTP connections. This will set
+        defaults for `listen` to listen on all interfaces on port 443.
+      '';
+    };
+
+    forceSSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to add a separate nginx server block that permanently redirects (301)
+        all plain HTTP traffic to HTTPS. This will set defaults for
+        `listen` to listen on all interfaces on the respective default
+        ports (80, 443), where the non-SSL listens are used for the redirect vhosts.
+      '';
+    };
+
+    enableACME = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to ask Let's Encrypt to sign a certificate for this vhost.
+        Alternately, you can use an existing certificate through {option}`useACMEHost`.
+      '';
+    };
+
+    useACMEHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        A host of an existing Let's Encrypt certificate to use.
+        This is useful if you have many subdomains and want to avoid hitting the
+        [rate limit](https://letsencrypt.org/docs/rate-limits).
+        Alternately, you can generate a certificate through {option}`enableACME`.
+        *Note that this option does not create any certificates, nor it does add subdomains to existing ones – you will need to create them manually using [](#opt-security.acme.certs).*
+      '';
+    };
+
+    acmeRoot = mkOption {
+      type = types.nullOr types.str;
+      default = "/var/lib/acme/acme-challenge";
+      description = lib.mdDoc ''
+        Directory for the acme challenge which is PUBLIC, don't put certs or keys in here.
+        Set to null to inherit from config.security.acme.
+      '';
+    };
+
+    sslServerCert = mkOption {
+      type = types.path;
+      example = "/var/host.cert";
+      description = lib.mdDoc "Path to server SSL certificate.";
+    };
+
+    sslServerKey = mkOption {
+      type = types.path;
+      example = "/var/host.key";
+      description = lib.mdDoc "Path to server SSL certificate key.";
+    };
+
+    sslServerChain = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/var/ca.pem";
+      description = lib.mdDoc "Path to server SSL chain file.";
+    };
+
+    http2 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable HTTP 2. HTTP/2 is supported in all multi-processing modules that come with httpd. *However, if you use the prefork mpm, there will
+        be severe restrictions.* Refer to <https://httpd.apache.org/docs/2.4/howto/http2.html#mpm-config> for details.
+      '';
+    };
+
+    adminAddr = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "admin@example.org";
+      description = lib.mdDoc "E-mail address of the server administrator.";
+    };
+
+    documentRoot = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/data/webserver/docs";
+      description = lib.mdDoc ''
+        The path of Apache's document root directory.  If left undefined,
+        an empty directory in the Nix store will be used as root.
+      '';
+    };
+
+    servedDirs = mkOption {
+      type = types.listOf types.attrs;
+      default = [];
+      example = [
+        { urlPath = "/nix";
+          dir = "/home/eelco/Dev/nix-homepage";
+        }
+      ];
+      description = lib.mdDoc ''
+        This option provides a simple way to serve static directories.
+      '';
+    };
+
+    servedFiles = mkOption {
+      type = types.listOf types.attrs;
+      default = [];
+      example = [
+        { urlPath = "/foo/bar.png";
+          file = "/home/eelco/some-file.png";
+        }
+      ];
+      description = lib.mdDoc ''
+        This option provides a simple way to serve individual, static files.
+
+        ::: {.note}
+        This option has been deprecated and will be removed in a future
+        version of NixOS. You can achieve the same result by making use of
+        the `locations.<name>.alias` option.
+        :::
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        <Directory /home>
+          Options FollowSymlinks
+          AllowOverride All
+        </Directory>
+      '';
+      description = lib.mdDoc ''
+        These lines go to httpd.conf verbatim. They will go after
+        directories and directory aliases defined by default.
+      '';
+    };
+
+    enableUserDir = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable serving {file}`~/public_html` as
+        `/~«username»`.
+      '';
+    };
+
+    globalRedirect = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "http://newserver.example.org/";
+      description = lib.mdDoc ''
+        If set, all requests for this host are redirected permanently to
+        the given URL.
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.str;
+      default = "common";
+      example = "combined";
+      description = lib.mdDoc ''
+        Log format for Apache's log files. Possible values are: combined, common, referer, agent.
+      '';
+    };
+
+    robotsEntries = mkOption {
+      type = types.lines;
+      default = "";
+      example = "Disallow: /foo/";
+      description = lib.mdDoc ''
+        Specification of pages to be ignored by web crawlers. See <http://www.robotstxt.org/> for details.
+      '';
+    };
+
+    locations = mkOption {
+      type = with types; attrsOf (submodule (import ./location-options.nix));
+      default = {};
+      example = literalExpression ''
+        {
+          "/" = {
+            proxyPass = "http://localhost:3000";
+          };
+          "/foo/bar.png" = {
+            alias = "/home/eelco/some-file.png";
+          };
+        };
+      '';
+      description = lib.mdDoc ''
+        Declarative location config. See <https://httpd.apache.org/docs/2.4/mod/core.html#location> for details.
+      '';
+    };
+
+  };
+
+  config = {
+
+    locations = builtins.listToAttrs (map (elem: nameValuePair elem.urlPath { alias = elem.file; }) config.servedFiles);
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix b/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix
new file mode 100644
index 000000000000..dcacb40e4681
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/caddy/default.nix
@@ -0,0 +1,413 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.caddy;
+
+  virtualHosts = attrValues cfg.virtualHosts;
+  acmeVHosts = filter (hostOpts: hostOpts.useACMEHost != null) virtualHosts;
+
+  mkVHostConf = hostOpts:
+    let
+      sslCertDir = config.security.acme.certs.${hostOpts.useACMEHost}.directory;
+    in
+      ''
+        ${hostOpts.hostName} ${concatStringsSep " " hostOpts.serverAliases} {
+          ${optionalString (hostOpts.listenAddresses != [ ]) "bind ${concatStringsSep " " hostOpts.listenAddresses}"}
+          ${optionalString (hostOpts.useACMEHost != null) "tls ${sslCertDir}/cert.pem ${sslCertDir}/key.pem"}
+          log {
+            ${hostOpts.logFormat}
+          }
+
+          ${hostOpts.extraConfig}
+        }
+      '';
+
+  settingsFormat = pkgs.formats.json { };
+
+  configFile =
+    if cfg.settings != { } then
+      settingsFormat.generate "caddy.json" cfg.settings
+    else
+      let
+        Caddyfile = pkgs.writeTextDir "Caddyfile" ''
+          {
+            ${cfg.globalConfig}
+          }
+          ${cfg.extraConfig}
+          ${concatMapStringsSep "\n" mkVHostConf virtualHosts}
+        '';
+
+        Caddyfile-formatted = pkgs.runCommand "Caddyfile-formatted" { nativeBuildInputs = [ cfg.package ]; } ''
+          mkdir -p $out
+          cp --no-preserve=mode ${Caddyfile}/Caddyfile $out/Caddyfile
+          caddy fmt --overwrite $out/Caddyfile
+        '';
+      in
+      "${if pkgs.stdenv.buildPlatform == pkgs.stdenv.hostPlatform then Caddyfile-formatted else Caddyfile}/Caddyfile";
+
+  etcConfigFile = "caddy/caddy_config";
+
+  configPath = "/etc/${etcConfigFile}";
+
+  acmeHosts = unique (catAttrs "useACMEHost" acmeVHosts);
+
+  mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "services" "caddy" "agree" ] "this option is no longer necessary for Caddy 2")
+    (mkRenamedOptionModule [ "services" "caddy" "ca" ] [ "services" "caddy" "acmeCA" ])
+    (mkRenamedOptionModule [ "services" "caddy" "config" ] [ "services" "caddy" "extraConfig" ])
+  ];
+
+  # interface
+  options.services.caddy = {
+    enable = mkEnableOption (lib.mdDoc "Caddy web server");
+
+    user = mkOption {
+      default = "caddy";
+      type = types.str;
+      description = lib.mdDoc ''
+        User account under which caddy runs.
+
+        ::: {.note}
+        If left as the default value this user will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the user exists before the Caddy service starts.
+        :::
+      '';
+    };
+
+    group = mkOption {
+      default = "caddy";
+      type = types.str;
+      description = lib.mdDoc ''
+        Group account under which caddy runs.
+
+        ::: {.note}
+        If left as the default value this user will automatically be created
+        on system activation, otherwise you are responsible for
+        ensuring the user exists before the Caddy service starts.
+        :::
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.caddy;
+      defaultText = literalExpression "pkgs.caddy";
+      type = types.package;
+      description = lib.mdDoc ''
+        Caddy package to use.
+      '';
+    };
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/caddy";
+      description = lib.mdDoc ''
+        The data directory for caddy.
+
+        ::: {.note}
+        If left as the default value this directory will automatically be created
+        before the Caddy server starts, otherwise you are responsible for ensuring
+        the directory exists with appropriate ownership and permissions.
+
+        Caddy v2 replaced `CADDYPATH` with XDG directories.
+        See <https://caddyserver.com/docs/conventions#file-locations>.
+        :::
+      '';
+    };
+
+    logDir = mkOption {
+      type = types.path;
+      default = "/var/log/caddy";
+      description = lib.mdDoc ''
+        Directory for storing Caddy access logs.
+
+        ::: {.note}
+        If left as the default value this directory will automatically be created
+        before the Caddy server starts, otherwise the sysadmin is responsible for
+        ensuring the directory exists with appropriate ownership and permissions.
+        :::
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.lines;
+      default = ''
+        level ERROR
+      '';
+      example = literalExpression ''
+        mkForce "level INFO";
+      '';
+      description = lib.mdDoc ''
+        Configuration for the default logger. See
+        <https://caddyserver.com/docs/caddyfile/options#log>
+        for details.
+      '';
+    };
+
+    configFile = mkOption {
+      type = types.path;
+      default = configFile;
+      defaultText = "A Caddyfile automatically generated by values from services.caddy.*";
+      example = literalExpression ''
+        pkgs.writeTextDir "Caddyfile" '''
+          example.com
+
+          root * /var/www/wordpress
+          php_fastcgi unix//run/php/php-version-fpm.sock
+          file_server
+        ''';
+      '';
+      description = lib.mdDoc ''
+        Override the configuration file used by Caddy. By default,
+        NixOS generates one automatically.
+
+        The configuration file is exposed at {file}`${configPath}`.
+      '';
+    };
+
+    adapter = mkOption {
+      default = if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null;
+      defaultText = literalExpression ''
+        if (builtins.baseNameOf cfg.configFile) == "Caddyfile" then "caddyfile" else null
+      '';
+      example = literalExpression "nginx";
+      type = with types; nullOr str;
+      description = lib.mdDoc ''
+        Name of the config adapter to use.
+        See <https://caddyserver.com/docs/config-adapters>
+        for the full list.
+
+        If `null` is specified, the `--adapter` argument is omitted when
+        starting or restarting Caddy. Notably, this allows specification of a
+        configuration file in Caddy's native JSON format, as long as the
+        filename does not start with `Caddyfile` (in which case the `caddyfile`
+        adapter is implicitly enabled). See
+        <https://caddyserver.com/docs/command-line#caddy-run> for details.
+
+        ::: {.note}
+        Any value other than `null` or `caddyfile` is only valid when providing
+        your own `configFile`.
+        :::
+      '';
+    };
+
+    resume = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Use saved config, if any (and prefer over any specified configuration passed with `--config`).
+      '';
+    };
+
+    globalConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        debug
+        servers {
+          protocol {
+            experimental_http3
+          }
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional lines of configuration appended to the global config section
+        of the `Caddyfile`.
+
+        Refer to <https://caddyserver.com/docs/caddyfile/options#global-options>
+        for details on supported values.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        example.com {
+          encode gzip
+          log
+          root /srv/http
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional lines of configuration appended to the automatically
+        generated `Caddyfile`.
+      '';
+    };
+
+    virtualHosts = mkOption {
+      type = with types; attrsOf (submodule (import ./vhost-options.nix { inherit cfg; }));
+      default = {};
+      example = literalExpression ''
+        {
+          "hydra.example.com" = {
+            serverAliases = [ "www.hydra.example.com" ];
+            extraConfig = '''
+              encode gzip
+              root /srv/http
+            ''';
+          };
+        };
+      '';
+      description = lib.mdDoc ''
+        Declarative specification of virtual hosts served by Caddy.
+      '';
+    };
+
+    acmeCA = mkOption {
+      default = null;
+      example = "https://acme-v02.api.letsencrypt.org/directory";
+      type = with types; nullOr str;
+      description = lib.mdDoc ''
+        ::: {.note}
+        Sets the [`acme_ca` option](https://caddyserver.com/docs/caddyfile/options#acme-ca)
+        in the global options block of the resulting Caddyfile.
+        :::
+
+        The URL to the ACME CA's directory. It is strongly recommended to set
+        this to `https://acme-staging-v02.api.letsencrypt.org/directory` for
+        Let's Encrypt's [staging endpoint](https://letsencrypt.org/docs/staging-environment/)
+        while testing or in development.
+
+        Value `null` should be prefered for production setups,
+        as it omits the `acme_ca` option to enable
+        [automatic issuer fallback](https://caddyserver.com/docs/automatic-https#issuer-fallback).
+      '';
+    };
+
+    email = mkOption {
+      default = null;
+      type = with types; nullOr str;
+      description = lib.mdDoc ''
+        Your email address. Mainly used when creating an ACME account with your
+        CA, and is highly recommended in case there are problems with your
+        certificates.
+      '';
+    };
+
+    enableReload = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Reload Caddy instead of restarting it when configuration file changes.
+
+        Note that enabling this option requires the [admin API](https://caddyserver.com/docs/caddyfile/options#admin)
+        to not be turned off.
+
+        If you enable this option, consider setting [`grace_period`](https://caddyserver.com/docs/caddyfile/options#grace-period)
+        to a non-infinite value in {option}`services.caddy.globalConfig`
+        to prevent Caddy waiting for active connections to finish,
+        which could delay the reload essentially indefinitely.
+      '';
+    };
+
+    settings = mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Structured configuration for Caddy to generate a Caddy JSON configuration file.
+        See <https://caddyserver.com/docs/json/> for available options.
+
+        ::: {.warning}
+        Using a [Caddyfile](https://caddyserver.com/docs/caddyfile) instead of a JSON config is highly recommended by upstream.
+        There are only very few exception to this.
+
+        Please use a Caddyfile via {option}`services.caddy.configFile`, {option}`services.caddy.virtualHosts` or
+        {option}`services.caddy.extraConfig` with {option}`services.caddy.globalConfig` instead.
+        :::
+
+        ::: {.note}
+        Takes presence over most `services.caddy.*` options, such as {option}`services.caddy.configFile` and {option}`services.caddy.virtualHosts`, if specified.
+        :::
+      '';
+    };
+  };
+
+  # implementation
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = cfg.configFile == configFile -> cfg.adapter == "caddyfile" || cfg.adapter == null;
+        message = "To specify an adapter other than 'caddyfile' please provide your own configuration via `services.caddy.configFile`";
+      }
+    ] ++ map (name: mkCertOwnershipAssertion {
+      inherit (cfg) group user;
+      cert = config.security.acme.certs.${name};
+      groups = config.users.groups;
+    }) acmeHosts;
+
+    services.caddy.globalConfig = ''
+      ${optionalString (cfg.email != null) "email ${cfg.email}"}
+      ${optionalString (cfg.acmeCA != null) "acme_ca ${cfg.acmeCA}"}
+      log {
+        ${cfg.logFormat}
+      }
+    '';
+
+    # https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size
+    boot.kernel.sysctl."net.core.rmem_max" = mkDefault 2500000;
+
+    systemd.packages = [ cfg.package ];
+    systemd.services.caddy = {
+      wants = map (hostOpts: "acme-finished-${hostOpts.useACMEHost}.target") acmeVHosts;
+      after = map (hostOpts: "acme-selfsigned-${hostOpts.useACMEHost}.service") acmeVHosts;
+      before = map (hostOpts: "acme-${hostOpts.useACMEHost}.service") acmeVHosts;
+
+      wantedBy = [ "multi-user.target" ];
+      startLimitIntervalSec = 14400;
+      startLimitBurst = 10;
+      reloadTriggers = optional cfg.enableReload cfg.configFile;
+
+      serviceConfig = let
+        runOptions = ''--config ${configPath} ${optionalString (cfg.adapter != null) "--adapter ${cfg.adapter}"}'';
+      in {
+        # https://www.freedesktop.org/software/systemd/man/systemd.service.html#ExecStart=
+        # If the empty string is assigned to this option, the list of commands to start is reset, prior assignments of this option will have no effect.
+        ExecStart = [ "" ''${cfg.package}/bin/caddy run ${runOptions} ${optionalString cfg.resume "--resume"}'' ];
+        # Validating the configuration before applying it ensures we’ll get a proper error that will be reported when switching to the configuration
+        ExecReload = [ "" ''${cfg.package}/bin/caddy reload ${runOptions} --force'' ];
+        User = cfg.user;
+        Group = cfg.group;
+        ReadWriteDirectories = cfg.dataDir;
+        StateDirectory = mkIf (cfg.dataDir == "/var/lib/caddy") [ "caddy" ];
+        LogsDirectory = mkIf (cfg.logDir == "/var/log/caddy") [ "caddy" ];
+        Restart = "on-failure";
+        RestartPreventExitStatus = 1;
+        RestartSecs = "5s";
+
+        # TODO: attempt to upstream these options
+        NoNewPrivileges = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "caddy") {
+      caddy = {
+        group = cfg.group;
+        uid = config.ids.uids.caddy;
+        home = cfg.dataDir;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "caddy") {
+      caddy.gid = config.ids.gids.caddy;
+    };
+
+    security.acme.certs =
+      let
+        certCfg = map (useACMEHost: nameValuePair useACMEHost {
+          group = mkDefault cfg.group;
+          reloadServices = [ "caddy.service" ];
+        }) acmeHosts;
+      in
+        listToAttrs certCfg;
+
+    environment.etc.${etcConfigFile}.source = cfg.configFile;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/caddy/vhost-options.nix b/nixpkgs/nixos/modules/services/web-servers/caddy/vhost-options.nix
new file mode 100644
index 000000000000..229b53efb49f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/caddy/vhost-options.nix
@@ -0,0 +1,77 @@
+{ cfg }:
+{ config, lib, name, ... }:
+let
+  inherit (lib) literalExpression mkOption types;
+in
+{
+  options = {
+
+    hostName = mkOption {
+      type = types.str;
+      default = name;
+      description = lib.mdDoc "Canonical hostname for the server.";
+    };
+
+    serverAliases = mkOption {
+      type = with types; listOf str;
+      default = [ ];
+      example = [ "www.example.org" "example.org" ];
+      description = lib.mdDoc ''
+        Additional names of virtual hosts served by this virtual host configuration.
+      '';
+    };
+
+    listenAddresses = mkOption {
+      type = with types; listOf str;
+      description = lib.mdDoc ''
+        A list of host interfaces to bind to for this virtual host.
+      '';
+      default = [ ];
+      example = [ "127.0.0.1" "::1" ];
+    };
+
+    useACMEHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        A host of an existing Let's Encrypt certificate to use.
+        This is mostly useful if you use DNS challenges but Caddy does not
+        currently support your provider.
+
+        *Note that this option does not create any certificates, nor
+        does it add subdomains to existing ones – you will need to create them
+        manually using [](#opt-security.acme.certs).*
+      '';
+    };
+
+    logFormat = mkOption {
+      type = types.lines;
+      default = ''
+        output file ${cfg.logDir}/access-${config.hostName}.log
+      '';
+      defaultText = ''
+        output file ''${config.services.caddy.logDir}/access-''${hostName}.log
+      '';
+      example = literalExpression ''
+        mkForce '''
+          output discard
+        ''';
+      '';
+      description = lib.mdDoc ''
+        Configuration for HTTP request logging (also known as access logs). See
+        <https://caddyserver.com/docs/caddyfile/directives/log#log>
+        for details.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Additional lines of configuration appended to this virtual host in the
+        automatically generated `Caddyfile`.
+      '';
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/darkhttpd.nix b/nixpkgs/nixos/modules/services/web-servers/darkhttpd.nix
new file mode 100644
index 000000000000..1e3a7166bc41
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/darkhttpd.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.darkhttpd;
+
+  args = concatStringsSep " " ([
+    cfg.rootDir
+    "--port ${toString cfg.port}"
+    "--addr ${cfg.address}"
+  ] ++ cfg.extraArgs
+    ++ optional cfg.hideServerId             "--no-server-id"
+    ++ optional config.networking.enableIPv6 "--ipv6");
+
+in {
+  options.services.darkhttpd = with types; {
+    enable = mkEnableOption (lib.mdDoc "DarkHTTPd web server");
+
+    port = mkOption {
+      default = 80;
+      type = types.port;
+      description = lib.mdDoc ''
+        Port to listen on.
+        Pass 0 to let the system choose any free port for you.
+      '';
+    };
+
+    address = mkOption {
+      default = "127.0.0.1";
+      type = str;
+      description = lib.mdDoc ''
+        Address to listen on.
+        Pass `all` to listen on all interfaces.
+      '';
+    };
+
+    rootDir = mkOption {
+      type = path;
+      description = lib.mdDoc ''
+        Path from which to serve files.
+      '';
+    };
+
+    hideServerId = mkOption {
+      type = bool;
+      default = true;
+      description = lib.mdDoc ''
+        Don't identify the server type in headers or directory listings.
+      '';
+    };
+
+    extraArgs = mkOption {
+      type = listOf str;
+      default = [];
+      description = lib.mdDoc ''
+        Additional configuration passed to the executable.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.darkhttpd = {
+      description = "Dark HTTPd";
+      wants = [ "network.target" ];
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.darkhttpd}/bin/darkhttpd ${args}";
+        AmbientCapabilities = lib.mkIf (cfg.port < 1024) [ "CAP_NET_BIND_SERVICE" ];
+        Restart = "on-failure";
+        RestartSec = "2s";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/fcgiwrap.nix b/nixpkgs/nixos/modules/services/web-servers/fcgiwrap.nix
new file mode 100644
index 000000000000..649b058bd22f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/fcgiwrap.nix
@@ -0,0 +1,74 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.fcgiwrap;
+in {
+
+  options = {
+    services.fcgiwrap = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable fcgiwrap, a server for running CGI applications over FastCGI.";
+      };
+
+      preforkProcesses = mkOption {
+        type = types.int;
+        default = 1;
+        description = lib.mdDoc "Number of processes to prefork.";
+      };
+
+      socketType = mkOption {
+        type = types.enum [ "unix" "tcp" "tcp6" ];
+        default = "unix";
+        description = lib.mdDoc "Socket type: 'unix', 'tcp' or 'tcp6'.";
+      };
+
+      socketAddress = mkOption {
+        type = types.str;
+        default = "/run/fcgiwrap.sock";
+        example = "1.2.3.4:5678";
+        description = lib.mdDoc "Socket address. In case of a UNIX socket, this should be its filesystem path.";
+      };
+
+      user = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "User permissions for the socket.";
+      };
+
+      group = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Group permissions for the socket.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.fcgiwrap = {
+      after = [ "nss-user-lookup.target" ];
+      wantedBy = optional (cfg.socketType != "unix") "multi-user.target";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.fcgiwrap}/sbin/fcgiwrap -c ${builtins.toString cfg.preforkProcesses} ${
+          optionalString (cfg.socketType != "unix") "-s ${cfg.socketType}:${cfg.socketAddress}"
+        }";
+      } // (if cfg.user != null && cfg.group != null then {
+        User = cfg.user;
+        Group = cfg.group;
+      } else {
+        DynamicUser = true;
+      });
+    };
+
+    systemd.sockets = if (cfg.socketType == "unix") then {
+      fcgiwrap = {
+        wantedBy = [ "sockets.target" ];
+        socketConfig.ListenStream = cfg.socketAddress;
+      };
+    } else { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/garage.md b/nixpkgs/nixos/modules/services/web-servers/garage.md
new file mode 100644
index 000000000000..3a9b85ce0603
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/garage.md
@@ -0,0 +1,96 @@
+# Garage {#module-services-garage}
+
+[Garage](https://garagehq.deuxfleurs.fr/)
+is an open-source, self-hostable S3 store, simpler than MinIO, for geodistributed stores.
+The server setup can be automated using
+[services.garage](#opt-services.garage.enable). A
+ client configured to your local Garage instance is available in
+ the global environment as `garage-manage`.
+
+The current default by NixOS is `garage_0_8` which is also the latest
+major version available.
+
+## General considerations on upgrades {#module-services-garage-upgrade-scenarios}
+
+Garage provides a cookbook documentation on how to upgrade:
+<https://garagehq.deuxfleurs.fr/documentation/cookbook/upgrading/>
+
+::: {.warning}
+Garage has two types of upgrades: patch-level upgrades and minor/major version upgrades.
+
+In all cases, you should read the changelog and ideally test the upgrade on a staging cluster.
+
+Checking the health of your cluster can be achieved using `garage-manage repair`.
+:::
+
+::: {.warning}
+Until 1.0 is released, patch-level upgrades are considered as minor version upgrades.
+Minor version upgrades are considered as major version upgrades.
+i.e. 0.6 to 0.7 is a major version upgrade.
+:::
+
+  - **Straightforward upgrades (patch-level upgrades).**
+    Upgrades must be performed one by one, i.e. for each node, stop it, upgrade it : change [stateVersion](#opt-system.stateVersion) or [services.garage.package](#opt-services.garage.package), restart it if it was not already by switching.
+  - **Multiple version upgrades.**
+    Garage do not provide any guarantee on moving more than one major-version forward.
+    E.g., if you're on `0.7`, you cannot upgrade to `0.9`.
+    You need to upgrade to `0.8` first.
+    As long as [stateVersion](#opt-system.stateVersion) is declared properly,
+    this is enforced automatically. The module will issue a warning to remind the user to upgrade to latest
+    Garage *after* that deploy.
+
+## Advanced upgrades (minor/major version upgrades) {#module-services-garage-advanced-upgrades}
+
+Here are some baseline instructions to handle advanced upgrades in Garage, when in doubt, please refer to upstream instructions.
+
+  - Disable API and web access to Garage.
+  - Perform `garage-manage repair --all-nodes --yes tables` and `garage-manage repair --all-nodes --yes blocks`.
+  - Verify the resulting logs and check that data is synced properly between all nodes.
+    If you have time, do additional checks (`scrub`, `block_refs`, etc.).
+  - Check if queues are empty by `garage-manage stats` or through monitoring tools.
+  - Run `systemctl stop garage` to stop the actual Garage version.
+  - Backup the metadata folder of ALL your nodes, e.g. for a metadata directory (the default one) in `/var/lib/garage/meta`,
+    you can run `pushd /var/lib/garage; tar -acf meta-v0.7.tar.zst meta/; popd`.
+  - Run the offline migration: `nix-shell -p garage_0_8 --run "garage offline-repair --yes"`, this can take some time depending on how many objects are stored in your cluster.
+  - Bump Garage version in your NixOS configuration, either by changing [stateVersion](#opt-system.stateVersion) or bumping [services.garage.package](#opt-services.garage.package), this should restart Garage automatically.
+  - Perform `garage-manage repair --all-nodes --yes tables` and `garage-manage repair --all-nodes --yes blocks`.
+  - Wait for a full table sync to run.
+
+Your upgraded cluster should be in a working state, re-enable API and web access.
+
+## Maintainer information {#module-services-garage-maintainer-info}
+
+As stated in the previous paragraph, we must provide a clean upgrade-path for Garage
+since it cannot move more than one major version forward on a single upgrade. This chapter
+adds some notes how Garage updates should be rolled out in the future.
+This is inspired from how Nextcloud does it.
+
+While patch-level updates are no problem and can be done directly in the
+package-expression (and should be backported to supported stable branches after that),
+major-releases should be added in a new attribute (e.g. Garage `v0.8.0`
+should be available in `nixpkgs` as `pkgs.garage_0_8_0`).
+To provide simple upgrade paths it's generally useful to backport those as well to stable
+branches. As long as the package-default isn't altered, this won't break existing setups.
+After that, the versioning-warning in the `garage`-module should be
+updated to make sure that the
+[package](#opt-services.garage.package)-option selects the latest version
+on fresh setups.
+
+If major-releases will be abandoned by upstream, we should check first if those are needed
+in NixOS for a safe upgrade-path before removing those. In that case we should keep those
+packages, but mark them as insecure in an expression like this (in
+`<nixpkgs/pkgs/tools/filesystem/garage/default.nix>`):
+```
+/* ... */
+{
+  garage_0_7_3 = generic {
+    version = "0.7.3";
+    sha256 = "0000000000000000000000000000000000000000000000000000";
+    eol = true;
+  };
+}
+```
+
+Ideally we should make sure that it's possible to jump two NixOS versions forward:
+i.e. the warnings and the logic in the module should guard a user to upgrade from a
+Garage on e.g. 22.11 to a Garage on 23.11.
diff --git a/nixpkgs/nixos/modules/services/web-servers/garage.nix b/nixpkgs/nixos/modules/services/web-servers/garage.nix
new file mode 100644
index 000000000000..47b4c6ab416e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/garage.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.garage;
+  toml = pkgs.formats.toml { };
+  configFile = toml.generate "garage.toml" cfg.settings;
+in
+{
+  meta = {
+    doc = ./garage.md;
+    maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  };
+
+  options.services.garage = {
+    enable = mkEnableOption (lib.mdDoc "Garage Object Storage (S3 compatible)");
+
+    extraEnvironment = mkOption {
+      type = types.attrsOf types.str;
+      description = lib.mdDoc "Extra environment variables to pass to the Garage server.";
+      default = { };
+      example = { RUST_BACKTRACE = "yes"; };
+    };
+
+    environmentFile = mkOption {
+      type = types.nullOr types.path;
+      description = lib.mdDoc "File containing environment variables to be passed to the Garage server.";
+      default = null;
+    };
+
+    logLevel = mkOption {
+      type = types.enum ([ "info" "debug" "trace" ]);
+      default = "info";
+      example = "debug";
+      description = lib.mdDoc "Garage log level, see <https://garagehq.deuxfleurs.fr/documentation/quick-start/#launching-the-garage-server> for examples.";
+    };
+
+    settings = mkOption {
+      type = types.submodule {
+        freeformType = toml.type;
+
+        options = {
+          metadata_dir = mkOption {
+            default = "/var/lib/garage/meta";
+            type = types.path;
+            description = lib.mdDoc "The metadata directory, put this on a fast disk (e.g. SSD) if possible.";
+          };
+
+          data_dir = mkOption {
+            default = "/var/lib/garage/data";
+            type = types.path;
+            description = lib.mdDoc "The main data storage, put this on your large storage (e.g. high capacity HDD)";
+          };
+
+          replication_mode = mkOption {
+            default = "none";
+            type = types.enum ([ "none" "1" "2" "3" "2-dangerous" "3-dangerous" "3-degraded" 1 2 3 ]);
+            apply = v: toString v;
+            description = lib.mdDoc "Garage replication mode, defaults to none, see: <https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/#replication-mode> for reference.";
+          };
+        };
+      };
+      description = lib.mdDoc "Garage configuration, see <https://garagehq.deuxfleurs.fr/documentation/reference-manual/configuration/> for reference.";
+    };
+
+    package = mkOption {
+      type = types.package;
+      description = lib.mdDoc "Garage package to use, needs to be set explicitly. If you are upgrading from a major version, please read NixOS and Garage release notes for upgrade instructions.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc."garage.toml" = {
+      source = configFile;
+    };
+
+    environment.systemPackages = [ cfg.package ]; # For administration
+
+    systemd.services.garage = {
+      description = "Garage Object Storage (S3 compatible)";
+      after = [ "network.target" "network-online.target" ];
+      wants = [ "network.target" "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      restartTriggers = [ configFile ] ++ (lib.optional (cfg.environmentFile != null) cfg.environmentFile);
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/garage server";
+
+        StateDirectory = mkIf (hasPrefix "/var/lib/garage" cfg.settings.data_dir || hasPrefix "/var/lib/garage" cfg.settings.metadata_dir) "garage";
+        DynamicUser = lib.mkDefault true;
+        ProtectHome = true;
+        NoNewPrivileges = true;
+        EnvironmentFile = lib.optional (cfg.environmentFile != null) cfg.environmentFile;
+      };
+      environment = {
+        RUST_LOG = lib.mkDefault "garage=${cfg.logLevel}";
+      } // cfg.extraEnvironment;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/hitch/default.nix b/nixpkgs/nixos/modules/services/web-servers/hitch/default.nix
new file mode 100644
index 000000000000..6c8b3cda5f72
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/hitch/default.nix
@@ -0,0 +1,111 @@
+{ config, lib, pkgs, ...}:
+let
+  cfg = config.services.hitch;
+  ocspDir = lib.optionalString cfg.ocsp-stapling.enabled "/var/cache/hitch/ocsp";
+  hitchConfig = with lib; pkgs.writeText "hitch.conf" (concatStringsSep "\n" [
+    ("backend = \"${cfg.backend}\"")
+    (concatMapStrings (s: "frontend = \"${s}\"\n") cfg.frontend)
+    (concatMapStrings (s: "pem-file = \"${s}\"\n") cfg.pem-files)
+    ("ciphers = \"${cfg.ciphers}\"")
+    ("ocsp-dir = \"${ocspDir}\"")
+    "user = \"${cfg.user}\""
+    "group = \"${cfg.group}\""
+    cfg.extraConfig
+  ]);
+in
+with lib;
+{
+  options = {
+    services.hitch = {
+      enable = mkEnableOption (lib.mdDoc "Hitch Server");
+
+      backend = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          The host and port Hitch connects to when receiving
+          a connection in the form [HOST]:PORT
+        '';
+      };
+
+      ciphers = mkOption {
+        type = types.str;
+        default = "EECDH+AESGCM:EDH+AESGCM:AES256+EECDH:AES256+EDH";
+        description = lib.mdDoc "The list of ciphers to use";
+      };
+
+      frontend = mkOption {
+        type = types.either types.str (types.listOf types.str);
+        default = "[127.0.0.1]:443";
+        description = lib.mdDoc ''
+          The port and interface of the listen endpoint in the
+          form [HOST]:PORT[+CERT].
+        '';
+        apply = toList;
+      };
+
+      pem-files = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description = lib.mdDoc "PEM files to use";
+      };
+
+      ocsp-stapling = {
+        enabled = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc "Whether to enable OCSP Stapling";
+        };
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "hitch";
+        description = lib.mdDoc "The user to run as";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "hitch";
+        description = lib.mdDoc "The group to run as";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional configuration lines";
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.hitch = {
+      description = "Hitch";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      preStart = ''
+        ${pkgs.hitch}/sbin/hitch -t --config ${hitchConfig}
+      '' + (optionalString cfg.ocsp-stapling.enabled ''
+        mkdir -p ${ocspDir}
+        chown -R hitch:hitch ${ocspDir}
+      '');
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = "${pkgs.hitch}/sbin/hitch --daemon --config ${hitchConfig}";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        Restart = "always";
+        RestartSec = "5s";
+        LimitNOFILE = 131072;
+      };
+    };
+
+    environment.systemPackages = [ pkgs.hitch ];
+
+    users.users.hitch = {
+      group = "hitch";
+      isSystemUser = true;
+    };
+    users.groups.hitch = {};
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/hydron.nix b/nixpkgs/nixos/modules/services/web-servers/hydron.nix
new file mode 100644
index 000000000000..9d30fdc0caab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/hydron.nix
@@ -0,0 +1,164 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.hydron;
+in with lib; {
+  options.services.hydron = {
+    enable = mkEnableOption (lib.mdDoc "hydron");
+
+    dataDir = mkOption {
+      type = types.path;
+      default = "/var/lib/hydron";
+      example = "/home/okina/hydron";
+      description = lib.mdDoc "Location where hydron runs and stores data.";
+    };
+
+    interval = mkOption {
+      type = types.str;
+      default = "weekly";
+      example = "06:00";
+      description = lib.mdDoc ''
+        How often we run hydron import and possibly fetch tags. Runs by default every week.
+
+        The format is described in
+        {manpage}`systemd.time(7)`.
+      '';
+    };
+
+    password = mkOption {
+      type = types.str;
+      default = "hydron";
+      example = "dumbpass";
+      description = lib.mdDoc "Password for the hydron database.";
+    };
+
+    passwordFile = mkOption {
+      type = types.path;
+      default = "/run/keys/hydron-password-file";
+      example = "/home/okina/hydron/keys/pass";
+      description = lib.mdDoc "Password file for the hydron database.";
+    };
+
+    postgresArgs = mkOption {
+      type = types.str;
+      description = lib.mdDoc "Postgresql connection arguments.";
+      example = ''
+        {
+          "driver": "postgres",
+          "connection": "user=hydron password=dumbpass dbname=hydron sslmode=disable"
+        }
+      '';
+    };
+
+    postgresArgsFile = mkOption {
+      type = types.path;
+      default = "/run/keys/hydron-postgres-args";
+      example = "/home/okina/hydron/keys/postgres";
+      description = lib.mdDoc "Postgresql connection arguments file.";
+    };
+
+    listenAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "127.0.0.1:8010";
+      description = lib.mdDoc "Listen on a specific IP address and port.";
+    };
+
+    importPaths = mkOption {
+      type = types.listOf types.path;
+      default = [];
+      example = [ "/home/okina/Pictures" ];
+      description = lib.mdDoc "Paths that hydron will recursively import.";
+    };
+
+    fetchTags = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc "Fetch tags for imported images and webm from gelbooru.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.hydron.passwordFile = mkDefault (pkgs.writeText "hydron-password-file" cfg.password);
+    services.hydron.postgresArgsFile = mkDefault (pkgs.writeText "hydron-postgres-args" cfg.postgresArgs);
+    services.hydron.postgresArgs = mkDefault ''
+      {
+        "driver": "postgres",
+        "connection": "user=hydron password=${cfg.password} host=/run/postgresql dbname=hydron sslmode=disable"
+      }
+    '';
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "hydron" ];
+      ensureUsers = [
+        { name = "hydron";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.dataDir}' 0750 hydron hydron - -"
+      "d '${cfg.dataDir}/.hydron' - hydron hydron - -"
+      "d '${cfg.dataDir}/images' - hydron hydron - -"
+      "Z '${cfg.dataDir}' - hydron hydron - -"
+
+      "L+ '${cfg.dataDir}/.hydron/db_conf.json' - - - - ${cfg.postgresArgsFile}"
+    ];
+
+    systemd.services.hydron = {
+      description = "hydron";
+      after = [ "network.target" "postgresql.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        User = "hydron";
+        Group = "hydron";
+        ExecStart = "${pkgs.hydron}/bin/hydron serve"
+        + optionalString (cfg.listenAddress != null) " -a ${cfg.listenAddress}";
+      };
+    };
+
+    systemd.services.hydron-fetch = {
+      description = "Import paths into hydron and possibly fetch tags";
+
+      serviceConfig = {
+        Type = "oneshot";
+        User = "hydron";
+        Group = "hydron";
+        ExecStart = "${pkgs.hydron}/bin/hydron import "
+        + optionalString cfg.fetchTags "-f "
+        + (escapeShellArg cfg.dataDir) + "/images " + (escapeShellArgs cfg.importPaths);
+      };
+    };
+
+    systemd.timers.hydron-fetch = {
+      description = "Automatically import paths into hydron and possibly fetch tags";
+      after = [ "network.target" "hydron.service" ];
+      wantedBy = [ "timers.target" ];
+
+      timerConfig = {
+        Persistent = true;
+        OnCalendar = cfg.interval;
+      };
+    };
+
+    users = {
+      groups.hydron.gid = config.ids.gids.hydron;
+
+      users.hydron = {
+        description = "hydron server service user";
+        home = cfg.dataDir;
+        group = "hydron";
+        uid = config.ids.uids.hydron;
+      };
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "hydron" "baseDir" ] [ "services" "hydron" "dataDir" ])
+  ];
+
+  meta.maintainers = with maintainers; [ Madouura ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/jboss/builder.sh b/nixpkgs/nixos/modules/services/web-servers/jboss/builder.sh
new file mode 100644
index 000000000000..8c49b87db060
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/jboss/builder.sh
@@ -0,0 +1,73 @@
+set -e
+
+if [ -e "$NIX_ATTRS_SH_FILE" ]; then . "$NIX_ATTRS_SH_FILE"; elif [ -f .attrs.sh ]; then . .attrs.sh; fi
+source $stdenv/setup
+
+mkdir -p $out/bin
+
+cat > $out/bin/control <<EOF
+mkdir -p $logDir
+chown -R $user $logDir
+export PATH=$PATH:$su/bin
+
+start()
+{
+  su $user -s /bin/sh -c "$jboss/bin/run.sh \
+      -Djboss.server.base.dir=$serverDir \
+      -Djboss.server.base.url=file://$serverDir \
+      -Djboss.server.temp.dir=$tempDir \
+      -Djboss.server.log.dir=$logDir \
+      -Djboss.server.lib.url=$libUrl \
+      -c default"
+}
+
+stop()
+{
+  su $user -s /bin/sh -c "$jboss/bin/shutdown.sh -S"
+}
+
+if test "\$1" = start
+then
+  trap stop 15
+
+  start
+elif test "\$1" = stop
+then
+  stop
+elif test "\$1" = init
+then
+  echo "Are you sure you want to create a new server instance (old server instance will be lost!)?"
+  read answer
+
+  if ! test \$answer = "yes"
+  then
+    exit 1
+  fi
+
+  rm -rf $serverDir
+  mkdir -p $serverDir
+  cd $serverDir
+  cp -av $jboss/server/default .
+  sed -i -e "s|deploy/|$deployDir|" default/conf/jboss-service.xml
+
+  if ! test "$useJK" = ""
+  then
+    sed -i -e 's|<attribute name="UseJK">false</attribute>|<attribute name="UseJK">true</attribute>|' default/deploy/jboss-web.deployer/META-INF/jboss-service.xml
+    sed -i -e 's|<Engine name="jboss.web" defaultHost="localhost">|<Engine name="jboss.web" defaultHost="localhost" jvmRoute="node1">|' default/deploy/jboss-web.deployer/server.xml
+  fi
+
+  # Make files accessible for the server user
+
+  chown -R $user $serverDir
+  for i in \`find $serverDir -type d\`
+  do
+    chmod 755 \$i
+  done
+  for i in \`find $serverDir -type f\`
+  do
+    chmod 644 \$i
+  done
+fi
+EOF
+
+chmod +x $out/bin/*
diff --git a/nixpkgs/nixos/modules/services/web-servers/jboss/default.nix b/nixpkgs/nixos/modules/services/web-servers/jboss/default.nix
new file mode 100644
index 000000000000..05b354d567fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/jboss/default.nix
@@ -0,0 +1,88 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.jboss;
+
+  jbossService = pkgs.stdenv.mkDerivation {
+    name = "jboss-server";
+    builder = ./builder.sh;
+    inherit (pkgs) jboss su;
+    inherit (cfg) tempDir logDir libUrl deployDir serverDir user useJK;
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.jboss = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable JBoss. WARNING : this package is outdated and is known to have vulnerabilities.";
+      };
+
+      tempDir = mkOption {
+        default = "/tmp";
+        type = types.str;
+        description = lib.mdDoc "Location where JBoss stores its temp files";
+      };
+
+      logDir = mkOption {
+        default = "/var/log/jboss";
+        type = types.str;
+        description = lib.mdDoc "Location of the logfile directory of JBoss";
+      };
+
+      serverDir = mkOption {
+        description = lib.mdDoc "Location of the server instance files";
+        default = "/var/jboss/server";
+        type = types.str;
+      };
+
+      deployDir = mkOption {
+        description = lib.mdDoc "Location of the deployment files";
+        default = "/nix/var/nix/profiles/default/server/default/deploy/";
+        type = types.str;
+      };
+
+      libUrl = mkOption {
+        default = "file:///nix/var/nix/profiles/default/server/default/lib";
+        description = lib.mdDoc "Location where the shared library JARs are stored";
+        type = types.str;
+      };
+
+      user = mkOption {
+        default = "nobody";
+        description = lib.mdDoc "User account under which jboss runs.";
+        type = types.str;
+      };
+
+      useJK = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to use to connector to the Apache HTTP server";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.jboss.enable {
+    systemd.services.jboss = {
+      description = "JBoss server";
+      script = "${jbossService}/bin/control start";
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/keter/bundle.nix b/nixpkgs/nixos/modules/services/web-servers/keter/bundle.nix
new file mode 100644
index 000000000000..32b08c3be206
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/keter/bundle.nix
@@ -0,0 +1,40 @@
+/* This makes a keter bundle as described on the github page:
+  https://github.com/snoyberg/keter#bundling-your-app-for-keter
+*/
+{ keterDomain
+, keterExecutable
+, gnutar
+, writeTextFile
+, lib
+, stdenv
+, ...
+}:
+
+let
+  str.stanzas = [{
+    # we just use nix as an absolute path so we're not bundling any binaries
+    type = "webapp";
+    /* Note that we're not actually putting the executable in the bundle,
+      we already can use the nix store for copying, so we just
+      symlink to the app. */
+    exec = keterExecutable;
+    host = keterDomain;
+  }];
+  configFile = writeTextFile {
+    name = "keter.yml";
+    text = (lib.generators.toYAML { } str);
+  };
+
+in
+stdenv.mkDerivation {
+  name = "keter-bundle";
+  buildCommand = ''
+    mkdir -p config
+    cp ${configFile} config/keter.yaml
+
+    echo 'create a gzipped tarball'
+    mkdir -p $out
+    tar -zcvf $out/bundle.tar.gz.keter ./.
+  '';
+  buildInputs = [ gnutar ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/keter/default.nix b/nixpkgs/nixos/modules/services/web-servers/keter/default.nix
new file mode 100644
index 000000000000..0cd9c30cea14
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/keter/default.nix
@@ -0,0 +1,191 @@
+{ config, pkgs, lib, ... }:
+let
+  cfg = config.services.keter;
+  yaml = pkgs.formats.yaml { };
+in
+{
+  meta = {
+    maintainers = with lib.maintainers; [ jappie ];
+  };
+
+  imports = [
+    (lib.mkRenamedOptionModule [ "services" "keter" "keterRoot" ] [ "services" "keter" "root" ])
+    (lib.mkRenamedOptionModule [ "services" "keter" "keterPackage" ] [ "services" "keter" "package" ])
+  ];
+
+  options.services.keter = {
+    enable = lib.mkEnableOption (lib.mdDoc ''keter, a web app deployment manager.
+Note that this module only support loading of webapps:
+Keep an old app running and swap the ports when the new one is booted
+'');
+
+    root = lib.mkOption {
+      type = lib.types.str;
+      default = "/var/lib/keter";
+      description = lib.mdDoc "Mutable state folder for keter";
+    };
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = pkgs.haskellPackages.keter;
+      defaultText = lib.literalExpression "pkgs.haskellPackages.keter";
+      description = lib.mdDoc "The keter package to be used";
+    };
+
+
+    globalKeterConfig = lib.mkOption {
+      type = lib.types.submodule {
+        freeformType = yaml.type;
+        options = {
+          ip-from-header = lib.mkOption {
+            default = true;
+            type = lib.types.bool;
+            description = lib.mdDoc "You want that ip-from-header in the nginx setup case. It allows nginx setting the original ip address rather then it being localhost (due to reverse proxying)";
+          };
+          listeners = lib.mkOption {
+            default = [{ host = "*"; port = 6981; }];
+            type = lib.types.listOf (lib.types.submodule {
+              options = {
+                host = lib.mkOption {
+                  type = lib.types.str;
+                  description = lib.mdDoc "host";
+                };
+                port = lib.mkOption {
+                  type = lib.types.port;
+                  description = lib.mdDoc "port";
+                };
+              };
+            });
+            description = lib.mdDoc ''
+              You want that ip-from-header in
+              the nginx setup case.
+              It allows nginx setting the original ip address rather
+              then it being localhost (due to reverse proxying).
+              However if you configure keter to accept connections
+              directly you may want to set this to false.'';
+          };
+          rotate-logs = lib.mkOption {
+            default = false;
+            type = lib.types.bool;
+            description = lib.mdDoc ''
+              emits keter logs and it's applications to stderr.
+              which allows journald to capture them.
+              Set to true to let keter put the logs in files
+              (useful on non systemd systems, this is the old approach
+              where keter handled log management)'';
+          };
+        };
+      };
+      description = lib.mdDoc "Global config for keter, see <https://github.com/snoyberg/keter/blob/master/etc/keter-config.yaml> for reference";
+    };
+
+    bundle = {
+      appName = lib.mkOption {
+        type = lib.types.str;
+        default = "myapp";
+        description = lib.mdDoc "The name keter assigns to this bundle";
+      };
+
+      executable = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc "The executable to be run";
+      };
+
+      domain = lib.mkOption {
+        type = lib.types.str;
+        default = "example.com";
+        description = lib.mdDoc "The domain keter will bind to";
+      };
+
+      publicScript = lib.mkOption {
+        type = lib.types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Allows loading of public environment variables,
+          these are emitted to the log so it shouldn't contain secrets.
+        '';
+        example = "ADMIN_EMAIL=hi@example.com";
+      };
+
+      secretScript = lib.mkOption {
+        type = lib.types.str;
+        default = "";
+        description = lib.mdDoc "Allows loading of private environment variables";
+        example = "MY_AWS_KEY=$(cat /run/keys/AWS_ACCESS_KEY_ID)";
+      };
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable (
+    let
+      incoming = "${cfg.root}/incoming";
+
+
+      globalKeterConfigFile = pkgs.writeTextFile {
+        name = "keter-config.yml";
+        text = (lib.generators.toYAML { } (cfg.globalKeterConfig // { root = cfg.root; }));
+      };
+
+      # If things are expected to change often, put it in the bundle!
+      bundle = pkgs.callPackage ./bundle.nix
+        (cfg.bundle // { keterExecutable = executable; keterDomain = cfg.bundle.domain; });
+
+      # This indirection is required to ensure the nix path
+      # gets copied over to the target machine in remote deployments.
+      # Furthermore, it's important that we use exec to
+      # run the binary otherwise we get process leakage due to this
+      # being executed on every change.
+      executable = pkgs.writeShellScript "bundle-wrapper" ''
+        set -e
+        ${cfg.bundle.secretScript}
+        set -xe
+        ${cfg.bundle.publicScript}
+        exec ${cfg.bundle.executable}
+      '';
+
+    in
+    {
+      systemd.services.keter = {
+        description = "keter app loader";
+        script = ''
+          set -xe
+          mkdir -p ${incoming}
+          ${lib.getExe cfg.package} ${globalKeterConfigFile};
+        '';
+        wantedBy = [ "multi-user.target" "nginx.service" ];
+
+        serviceConfig = {
+          Restart = "always";
+          RestartSec = "10s";
+        };
+
+        after = [
+          "network.target"
+          "local-fs.target"
+          "postgresql.service"
+        ];
+      };
+
+      # On deploy this will load our app, by moving it into the incoming dir
+      # If the bundle content changes, this will run again.
+      # Because the bundle content contains the nix path to the executable,
+      # we inherit nix based cache busting.
+      systemd.services.load-keter-bundle = {
+        description = "load keter bundle into incoming folder";
+        after = [ "keter.service" ];
+        wantedBy = [ "multi-user.target" ];
+        # we can't override keter bundles because it'll stop the previous app
+        # https://github.com/snoyberg/keter#deploying
+        script = ''
+          set -xe
+          cp ${bundle}/bundle.tar.gz.keter ${incoming}/${cfg.bundle.appName}.keter
+        '';
+        path = [
+          executable
+          cfg.bundle.executable
+        ]; # this is a hack to get the executable copied over to the machine.
+      };
+    }
+  );
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/lighttpd/cgit.nix b/nixpkgs/nixos/modules/services/web-servers/lighttpd/cgit.nix
new file mode 100644
index 000000000000..e9f42c41183b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/lighttpd/cgit.nix
@@ -0,0 +1,93 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lighttpd.cgit;
+  pathPrefix = optionalString (stringLength cfg.subdir != 0) ("/" + cfg.subdir);
+  configFile = pkgs.writeText "cgitrc"
+    ''
+      # default paths to static assets
+      css=${pathPrefix}/cgit.css
+      logo=${pathPrefix}/cgit.png
+      favicon=${pathPrefix}/favicon.ico
+
+      # user configuration
+      ${cfg.configText}
+    '';
+in
+{
+
+  options.services.lighttpd.cgit = {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        If true, enable cgit (fast web interface for git repositories) as a
+        sub-service in lighttpd.
+      '';
+    };
+
+    subdir = mkOption {
+      default = "cgit";
+      example = "";
+      type = types.str;
+      description = lib.mdDoc ''
+        The subdirectory in which to serve cgit. The web application will be
+        accessible at http://yourserver/''${subdir}
+      '';
+    };
+
+    configText = mkOption {
+      default = "";
+      example = literalExpression ''
+        '''
+          source-filter=''${pkgs.cgit}/lib/cgit/filters/syntax-highlighting.py
+          about-filter=''${pkgs.cgit}/lib/cgit/filters/about-formatting.sh
+          cache-size=1000
+          scan-path=/srv/git
+        '''
+      '';
+      type = types.lines;
+      description = lib.mdDoc ''
+        Verbatim contents of the cgit runtime configuration file. Documentation
+        (with cgitrc example file) is available in "man cgitrc". Or online:
+        http://git.zx2c4.com/cgit/tree/cgitrc.5.txt
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    # make the cgitrc manpage available
+    environment.systemPackages = [ pkgs.cgit ];
+
+    # declare module dependencies
+    services.lighttpd.enableModules = [ "mod_cgi" "mod_alias" "mod_setenv" ];
+
+    services.lighttpd.extraConfig = ''
+      $HTTP["url"] =~ "^/${cfg.subdir}" {
+          cgi.assign = (
+              "cgit.cgi" => "${pkgs.cgit}/cgit/cgit.cgi"
+          )
+          alias.url = (
+              "${pathPrefix}/cgit.css" => "${pkgs.cgit}/cgit/cgit.css",
+              "${pathPrefix}/cgit.png" => "${pkgs.cgit}/cgit/cgit.png",
+              "${pathPrefix}"          => "${pkgs.cgit}/cgit/cgit.cgi"
+          )
+          setenv.add-environment = (
+              "CGIT_CONFIG" => "${configFile}"
+          )
+      }
+    '';
+
+    systemd.services.lighttpd.preStart = ''
+      mkdir -p /var/cache/cgit
+      chown lighttpd:lighttpd /var/cache/cgit
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/lighttpd/collectd.nix b/nixpkgs/nixos/modules/services/web-servers/lighttpd/collectd.nix
new file mode 100644
index 000000000000..9a4285e3e2d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/lighttpd/collectd.nix
@@ -0,0 +1,62 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.lighttpd.collectd;
+  opt = options.services.lighttpd.collectd;
+
+  collectionConf = pkgs.writeText "collection.conf" ''
+    datadir: "${config.services.collectd.dataDir}"
+    libdir: "${config.services.collectd.package}/lib/collectd"
+  '';
+
+  defaultCollectionCgi = config.services.collectd.package.overrideDerivation(old: {
+    name = "collection.cgi";
+    dontConfigure = true;
+    buildPhase = "true";
+    installPhase = ''
+      substituteInPlace contrib/collection.cgi --replace '"/etc/collection.conf"' '$ENV{COLLECTION_CONF}'
+      cp contrib/collection.cgi $out
+    '';
+  });
+in
+{
+
+  options.services.lighttpd.collectd = {
+
+    enable = mkEnableOption (lib.mdDoc "collectd subservice accessible at http://yourserver/collectd");
+
+    collectionCgi = mkOption {
+      type = types.path;
+      default = defaultCollectionCgi;
+      defaultText = literalMD ''
+        `config.${options.services.collectd.package}` configured for lighttpd
+      '';
+      description = lib.mdDoc ''
+        Path to collection.cgi script from (collectd sources)/contrib/collection.cgi
+        This option allows to use a customized version
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.lighttpd.enableModules = [ "mod_cgi" "mod_alias" "mod_setenv" ];
+
+    services.lighttpd.extraConfig = ''
+      $HTTP["url"] =~ "^/collectd" {
+        cgi.assign = (
+          ".cgi" => "${pkgs.perl}/bin/perl"
+        )
+        alias.url = (
+          "/collectd" => "${cfg.collectionCgi}"
+        )
+        setenv.add-environment = (
+          "PERL5LIB" => "${with pkgs.perlPackages; makePerlPath [ CGI HTMLParser URI pkgs.rrdtool ]}",
+          "COLLECTION_CONF" => "${collectionConf}"
+        )
+      }
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/lighttpd/default.nix b/nixpkgs/nixos/modules/services/web-servers/lighttpd/default.nix
new file mode 100644
index 000000000000..eaa113c0d52c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/lighttpd/default.nix
@@ -0,0 +1,269 @@
+# NixOS module for lighttpd web server
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.lighttpd;
+
+  # List of known lighttpd modules, ordered by how the lighttpd documentation
+  # recommends them being imported:
+  # https://redmine.lighttpd.net/projects/1/wiki/Server_modulesDetails
+  #
+  # Some modules are always imported and should not appear in the config:
+  # disallowedModules = [ "mod_indexfile" "mod_dirlisting" "mod_staticfile" ];
+  #
+  # For full module list, see the output of running ./configure in the lighttpd
+  # source.
+  allKnownModules = [
+    "mod_rewrite"
+    "mod_redirect"
+    "mod_alias"
+    "mod_access"
+    "mod_auth"
+    "mod_status"
+    "mod_simple_vhost"
+    "mod_evhost"
+    "mod_userdir"
+    "mod_secdownload"
+    "mod_fastcgi"
+    "mod_proxy"
+    "mod_cgi"
+    "mod_ssi"
+    "mod_compress"
+    "mod_usertrack"
+    "mod_expire"
+    "mod_rrdtool"
+    "mod_accesslog"
+    # Remaining list of modules, order assumed to be unimportant.
+    "mod_authn_dbi"
+    "mod_authn_file"
+    "mod_authn_gssapi"
+    "mod_authn_ldap"
+    "mod_authn_mysql"
+    "mod_authn_pam"
+    "mod_authn_sasl"
+    "mod_cml"
+    "mod_deflate"
+    "mod_evasive"
+    "mod_extforward"
+    "mod_flv_streaming"
+    "mod_geoip"
+    "mod_magnet"
+    "mod_mysql_vhost"
+    "mod_openssl"  # since v1.4.46
+    "mod_scgi"
+    "mod_setenv"
+    "mod_trigger_b4_dl"
+    "mod_uploadprogress"
+    "mod_vhostdb"  # since v1.4.46
+    "mod_webdav"
+    "mod_wstunnel"  # since v1.4.46
+  ];
+
+  maybeModuleString = moduleName:
+    optionalString (elem moduleName cfg.enableModules) ''"${moduleName}"'';
+
+  modulesIncludeString = concatStringsSep ",\n"
+    (filter (x: x != "") (map maybeModuleString allKnownModules));
+
+  configFile = if cfg.configText != "" then
+    pkgs.writeText "lighttpd.conf" ''
+      ${cfg.configText}
+    ''
+    else
+    pkgs.writeText "lighttpd.conf" ''
+      server.document-root = "${cfg.document-root}"
+      server.port = ${toString cfg.port}
+      server.username = "lighttpd"
+      server.groupname = "lighttpd"
+
+      # As for why all modules are loaded here, instead of having small
+      # server.modules += () entries in each sub-service extraConfig snippet,
+      # read this:
+      #
+      #   https://redmine.lighttpd.net/projects/1/wiki/Server_modulesDetails
+      #   https://redmine.lighttpd.net/issues/2337
+      #
+      # Basically, lighttpd doesn't want to load (or even silently ignore) a
+      # module for a second time, and there is no way to check if a module has
+      # been loaded already. So if two services were to put the same module in
+      # server.modules += (), that would break the lighttpd configuration.
+      server.modules = (
+          ${modulesIncludeString}
+      )
+
+      # Logging (logs end up in systemd journal)
+      accesslog.use-syslog = "enable"
+      server.errorlog-use-syslog = "enable"
+
+      ${lib.optionalString cfg.enableUpstreamMimeTypes ''
+      include "${pkgs.lighttpd}/share/lighttpd/doc/config/conf.d/mime.conf"
+      ''}
+
+      static-file.exclude-extensions = ( ".fcgi", ".php", ".rb", "~", ".inc" )
+      index-file.names = ( "index.html" )
+
+      ${optionalString cfg.mod_userdir ''
+        userdir.path = "public_html"
+      ''}
+
+      ${optionalString cfg.mod_status ''
+        status.status-url = "/server-status"
+        status.statistics-url = "/server-statistics"
+        status.config-url = "/server-config"
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+
+in
+
+{
+
+  options = {
+
+    services.lighttpd = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable the lighttpd web server.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.lighttpd;
+        defaultText = lib.literalExpression "pkgs.lighttpd";
+        type = types.package;
+        description = lib.mdDoc ''
+          lighttpd package to use.
+        '';
+      };
+
+      port = mkOption {
+        default = 80;
+        type = types.port;
+        description = lib.mdDoc ''
+          TCP port number for lighttpd to bind to.
+        '';
+      };
+
+      document-root = mkOption {
+        default = "/srv/www";
+        type = types.path;
+        description = lib.mdDoc ''
+          Document-root of the web server. Must be readable by the "lighttpd" user.
+        '';
+      };
+
+      mod_userdir = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If true, requests in the form /~user/page.html are rewritten to take
+          the file public_html/page.html from the home directory of the user.
+        '';
+      };
+
+      enableModules = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [ "mod_cgi" "mod_status" ];
+        description = lib.mdDoc ''
+          List of lighttpd modules to enable. Sub-services take care of
+          enabling modules as needed, so this option is mainly for when you
+          want to add custom stuff to
+          {option}`services.lighttpd.extraConfig` that depends on a
+          certain module.
+        '';
+      };
+
+      enableUpstreamMimeTypes = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to include the list of mime types bundled with lighttpd
+          (upstream). If you disable this, no mime types will be added by
+          NixOS and you will have to add your own mime types in
+          {option}`services.lighttpd.extraConfig`.
+        '';
+      };
+
+      mod_status = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Show server status overview at /server-status, statistics at
+          /server-statistics and list of loaded modules at /server-config.
+        '';
+      };
+
+      configText = mkOption {
+        default = "";
+        type = types.lines;
+        example = "...verbatim config file contents...";
+        description = lib.mdDoc ''
+          Overridable config file contents to use for lighttpd. By default, use
+          the contents automatically generated by NixOS.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          These configuration lines will be appended to the generated lighttpd
+          config file. Note that this mechanism does not work when the manual
+          {option}`configText` option is used.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = all (x: elem x allKnownModules) cfg.enableModules;
+        message = ''
+          One (or more) modules in services.lighttpd.enableModules are
+          unrecognized.
+
+          Known modules: ${toString allKnownModules}
+
+          services.lighttpd.enableModules: ${toString cfg.enableModules}
+        '';
+      }
+    ];
+
+    services.lighttpd.enableModules = mkMerge
+      [ (mkIf cfg.mod_status [ "mod_status" ])
+        (mkIf cfg.mod_userdir [ "mod_userdir" ])
+        # always load mod_accesslog so that we can log to the journal
+        [ "mod_accesslog" ]
+      ];
+
+    systemd.services.lighttpd = {
+      description = "Lighttpd Web Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.ExecStart = "${cfg.package}/sbin/lighttpd -D -f ${configFile}";
+      serviceConfig.ExecReload = "${pkgs.coreutils}/bin/kill -SIGUSR1 $MAINPID";
+      # SIGINT => graceful shutdown
+      serviceConfig.KillSignal = "SIGINT";
+    };
+
+    users.users.lighttpd = {
+      group = "lighttpd";
+      description = "lighttpd web server privilege separation user";
+      uid = config.ids.uids.lighttpd;
+    };
+
+    users.groups.lighttpd.gid = config.ids.gids.lighttpd;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/lighttpd/gitweb.nix b/nixpkgs/nixos/modules/services/web-servers/lighttpd/gitweb.nix
new file mode 100644
index 000000000000..e129e8bc1666
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/lighttpd/gitweb.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.gitweb;
+  package = pkgs.gitweb.override (optionalAttrs cfg.gitwebTheme {
+    gitwebTheme = true;
+  });
+
+in
+{
+
+  options.services.lighttpd.gitweb = {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        If true, enable gitweb in lighttpd. Access it at http://yourserver/gitweb
+      '';
+    };
+
+  };
+
+  config = mkIf config.services.lighttpd.gitweb.enable {
+
+    # declare module dependencies
+    services.lighttpd.enableModules = [ "mod_cgi" "mod_redirect" "mod_alias" "mod_setenv" ];
+
+    services.lighttpd.extraConfig = ''
+      $HTTP["url"] =~ "^/gitweb" {
+          cgi.assign = (
+              ".cgi" => "${pkgs.perl}/bin/perl"
+          )
+          url.redirect = (
+              "^/gitweb$" => "/gitweb/"
+          )
+          alias.url = (
+              "/gitweb/static/" => "${package}/static/",
+              "/gitweb/"        => "${package}/gitweb.cgi"
+          )
+          setenv.add-environment = (
+              "GITWEB_CONFIG" => "${cfg.gitwebConfigFile}",
+              "HOME" => "${cfg.projectroot}"
+          )
+      }
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/merecat.nix b/nixpkgs/nixos/modules/services/web-servers/merecat.nix
new file mode 100644
index 000000000000..aad93605b717
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/merecat.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.merecat;
+  format = pkgs.formats.keyValue {
+    mkKeyValue = generators.mkKeyValueDefault {
+      mkValueString = v:
+        # In merecat.conf, booleans are "true" and "false"
+        if builtins.isBool v
+        then if v then "true" else "false"
+        else generators.mkValueStringDefault {} v;
+    } "=";
+  };
+  configFile = format.generate "merecat.conf" cfg.settings;
+
+in {
+
+  options.services.merecat = {
+
+    enable = mkEnableOption (lib.mdDoc "Merecat HTTP server");
+
+    settings = mkOption {
+      inherit (format) type;
+      default = { };
+      description = lib.mdDoc ''
+        Merecat configuration. Refer to merecat(8) for details on supported values.
+      '';
+      example = {
+        hostname = "localhost";
+        port = 8080;
+        virtual-host = true;
+        directory = "/srv/www";
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.merecat = {
+      description = "Merecat HTTP server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        ExecStart = "${pkgs.merecat}/bin/merecat -n -f ${configFile}";
+        AmbientCapabilities = lib.mkIf ((cfg.settings.port or 80) < 1024) [ "CAP_NET_BIND_SERVICE" ];
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix b/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix
new file mode 100644
index 000000000000..bdd6d8b62aa3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/mighttpd2.nix
@@ -0,0 +1,132 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.mighttpd2;
+  configFile = pkgs.writeText "mighty-config" cfg.config;
+  routingFile = pkgs.writeText "mighty-routing" cfg.routing;
+in {
+  options.services.mighttpd2 = {
+    enable = mkEnableOption (lib.mdDoc "Mighttpd2 web server");
+
+    config = mkOption {
+      default = "";
+      example = ''
+        # Example configuration for Mighttpd 2
+        Port: 80
+        # IP address or "*"
+        Host: *
+        Debug_Mode: Yes # Yes or No
+        # If available, "nobody" is much more secure for User:.
+        User: root
+        # If available, "nobody" is much more secure for Group:.
+        Group: root
+        Pid_File: /run/mighty.pid
+        Logging: Yes # Yes or No
+        Log_File: /var/log/mighty # The directory must be writable by User:
+        Log_File_Size: 16777216 # bytes
+        Log_Backup_Number: 10
+        Index_File: index.html
+        Index_Cgi: index.cgi
+        Status_File_Dir: /usr/local/share/mighty/status
+        Connection_Timeout: 30 # seconds
+        Fd_Cache_Duration: 10 # seconds
+        # Server_Name: Mighttpd/3.x.y
+        Tls_Port: 443
+        Tls_Cert_File: cert.pem # should change this with an absolute path
+        # should change this with comma-separated absolute paths
+        Tls_Chain_Files: chain.pem
+        # Currently, Tls_Key_File must not be encrypted.
+        Tls_Key_File: privkey.pem # should change this with an absolute path
+        Service: 0 # 0 is HTTP only, 1 is HTTPS only, 2 is both
+      '';
+      type = types.lines;
+      description = lib.mdDoc ''
+        Verbatim config file to use
+        (see https://kazu-yamamoto.github.io/mighttpd2/config.html)
+      '';
+    };
+
+    routing = mkOption {
+      default = "";
+      example = ''
+        # Example routing for Mighttpd 2
+
+        # Domain lists
+        [localhost www.example.com]
+
+        # Entries are looked up in the specified order
+        # All paths must end with "/"
+
+        # A path to CGI scripts should be specified with "=>"
+        /~alice/cgi-bin/ => /home/alice/public_html/cgi-bin/
+
+        # A path to static files should be specified with "->"
+        /~alice/         -> /home/alice/public_html/
+        /cgi-bin/        => /export/cgi-bin/
+
+        # Reverse proxy rules should be specified with ">>"
+        # /path >> host:port/path2
+        # Either "host" or ":port" can be committed, but not both.
+        /app/cal/        >> example.net/calendar/
+        # Yesod app in the same server
+        /app/wiki/       >> 127.0.0.1:3000/
+
+        /                -> /export/www/
+      '';
+      type = types.lines;
+      description = lib.mdDoc ''
+        Verbatim routing file to use
+        (see https://kazu-yamamoto.github.io/mighttpd2/config.html)
+      '';
+    };
+
+    cores = mkOption {
+      default = null;
+      type = types.nullOr types.int;
+      description = lib.mdDoc ''
+        How many cores to use.
+        If null it will be determined automatically
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions =
+      [ { assertion = cfg.routing != "";
+          message = "You need at least one rule in mighttpd2.routing";
+        }
+      ];
+    systemd.services.mighttpd2 = {
+      description = "Mighttpd2 web server";
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.haskellPackages.mighttpd2}/bin/mighty \
+            ${configFile} \
+            ${routingFile} \
+            +RTS -N${optionalString (cfg.cores != null) "${cfg.cores}"}
+        '';
+        Type = "simple";
+        User = "mighttpd2";
+        Group = "mighttpd2";
+        Restart = "on-failure";
+        AmbientCapabilities = "cap_net_bind_service";
+        CapabilityBoundingSet = "cap_net_bind_service";
+      };
+    };
+
+    users.users.mighttpd2 = {
+      group = "mighttpd2";
+      uid = config.ids.uids.mighttpd2;
+      isSystemUser = true;
+    };
+
+    users.groups.mighttpd2.gid = config.ids.gids.mighttpd2;
+  };
+
+  meta.maintainers = with lib.maintainers; [ fgaz ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/minio.nix b/nixpkgs/nixos/modules/services/web-servers/minio.nix
new file mode 100644
index 000000000000..0bc7421a0e32
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/minio.nix
@@ -0,0 +1,163 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.minio;
+
+  legacyCredentials = cfg: pkgs.writeText "minio-legacy-credentials" ''
+    MINIO_ROOT_USER=${cfg.accessKey}
+    MINIO_ROOT_PASSWORD=${cfg.secretKey}
+  '';
+in
+{
+  meta.maintainers = [ maintainers.bachp ];
+
+  options.services.minio = {
+    enable = mkEnableOption (lib.mdDoc "Minio Object Storage");
+
+    listenAddress = mkOption {
+      default = ":9000";
+      type = types.str;
+      description = lib.mdDoc "IP address and port of the server.";
+    };
+
+    consoleAddress = mkOption {
+      default = ":9001";
+      type = types.str;
+      description = lib.mdDoc "IP address and port of the web UI (console).";
+    };
+
+    dataDir = mkOption {
+      default = [ "/var/lib/minio/data" ];
+      type = types.listOf (types.either types.path types.str);
+      description = lib.mdDoc "The list of data directories or nodes for storing the objects. Use one path for regular operation and the minimum of 4 endpoints for Erasure Code mode.";
+    };
+
+    configDir = mkOption {
+      default = "/var/lib/minio/config";
+      type = types.path;
+      description = lib.mdDoc "The config directory, for the access keys and other settings.";
+    };
+
+    accessKey = mkOption {
+      default = "";
+      type = types.str;
+      description = lib.mdDoc ''
+        Access key of 5 to 20 characters in length that clients use to access the server.
+        This overrides the access key that is generated by minio on first startup and stored inside the
+        `configDir` directory.
+      '';
+    };
+
+    secretKey = mkOption {
+      default = "";
+      type = types.str;
+      description = lib.mdDoc ''
+        Specify the Secret key of 8 to 40 characters in length that clients use to access the server.
+        This overrides the secret key that is generated by minio on first startup and stored inside the
+        `configDir` directory.
+      '';
+    };
+
+    rootCredentialsFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        File containing the MINIO_ROOT_USER, default is "minioadmin", and
+        MINIO_ROOT_PASSWORD (length >= 8), default is "minioadmin"; in the format of
+        an EnvironmentFile=, as described by systemd.exec(5).
+      '';
+      example = "/etc/nixos/minio-root-credentials";
+    };
+
+    region = mkOption {
+      default = "us-east-1";
+      type = types.str;
+      description = lib.mdDoc ''
+        The physical location of the server. By default it is set to us-east-1, which is same as AWS S3's and Minio's default region.
+      '';
+    };
+
+    browser = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Enable or disable access to web UI.";
+    };
+
+    package = mkOption {
+      default = pkgs.minio;
+      defaultText = literalExpression "pkgs.minio";
+      type = types.package;
+      description = lib.mdDoc "Minio package to use.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    warnings = optional ((cfg.accessKey != "") || (cfg.secretKey != "")) "services.minio.`accessKey` and services.minio.`secretKey` are deprecated, please use services.minio.`rootCredentialsFile` instead.";
+
+    systemd = lib.mkMerge [{
+      tmpfiles.rules = [
+        "d '${cfg.configDir}' - minio minio - -"
+      ] ++ (map (x: "d '" + x + "' - minio minio - - ") (builtins.filter lib.types.path.check cfg.dataDir));
+
+      services.minio = {
+        description = "Minio Object Storage";
+        after = [ "network-online.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/minio server --json --address ${cfg.listenAddress} --console-address ${cfg.consoleAddress} --config-dir=${cfg.configDir} ${toString cfg.dataDir}";
+          Type = "simple";
+          User = "minio";
+          Group = "minio";
+          LimitNOFILE = 65536;
+          EnvironmentFile =
+            if (cfg.rootCredentialsFile != null) then cfg.rootCredentialsFile
+            else if ((cfg.accessKey != "") || (cfg.secretKey != "")) then (legacyCredentials cfg)
+            else null;
+        };
+        environment = {
+          MINIO_REGION = "${cfg.region}";
+          MINIO_BROWSER = "${if cfg.browser then "on" else "off"}";
+        };
+      };
+    }
+
+      (lib.mkIf (cfg.rootCredentialsFile != null) {
+        # The service will fail if the credentials file is missing
+        services.minio.unitConfig.ConditionPathExists = cfg.rootCredentialsFile;
+
+        # The service will not restart if the credentials file has
+        # been changed. This can cause stale root credentials.
+        paths.minio-root-credentials = {
+          wantedBy = [ "multi-user.target" ];
+
+          pathConfig = {
+            PathChanged = [ cfg.rootCredentialsFile ];
+            Unit = "minio-restart.service";
+          };
+        };
+
+        services.minio-restart = {
+          description = "Restart MinIO";
+
+          script = ''
+            systemctl restart minio.service
+          '';
+
+          serviceConfig = {
+            Type = "oneshot";
+            Restart = "on-failure";
+            RestartSec = 5;
+          };
+        };
+      })];
+
+    users.users.minio = {
+      group = "minio";
+      uid = config.ids.uids.minio;
+    };
+
+    users.groups.minio.gid = config.ids.uids.minio;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/molly-brown.nix b/nixpkgs/nixos/modules/services/web-servers/molly-brown.nix
new file mode 100644
index 000000000000..6d7ca0c12ef7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/molly-brown.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.molly-brown;
+  settingsFormat = pkgs.formats.toml { };
+ configFile = settingsFormat.generate "molly-brown.toml" cfg.settings;
+in {
+
+  options.services.molly-brown = {
+
+    enable = mkEnableOption (lib.mdDoc "Molly-Brown Gemini server");
+
+    port = mkOption {
+      default = 1965;
+      type = types.port;
+      description = lib.mdDoc ''
+        TCP port for molly-brown to bind to.
+      '';
+    };
+
+    hostName = mkOption {
+      type = types.str;
+      default = config.networking.hostName;
+      defaultText = literalExpression "config.networking.hostName";
+      description = lib.mdDoc ''
+        The hostname to respond to requests for. Requests for URLs with
+        other hosts will result in a status 53 (PROXY REQUEST REFUSED)
+        response.
+      '';
+    };
+
+    certPath = mkOption {
+      type = types.path;
+      example = "/var/lib/acme/example.com/cert.pem";
+      description = lib.mdDoc ''
+        Path to TLS certificate. An ACME certificate and key may be
+        shared with an HTTP server, but only if molly-brown has
+        permissions allowing it to read such keys.
+
+        As an example:
+        ```
+        systemd.services.molly-brown.serviceConfig.SupplementaryGroups =
+          [ config.security.acme.certs."example.com".group ];
+        ```
+      '';
+    };
+
+    keyPath = mkOption {
+      type = types.path;
+      example = "/var/lib/acme/example.com/key.pem";
+      description = lib.mdDoc "Path to TLS key. See {option}`CertPath`.";
+    };
+
+    docBase = mkOption {
+      type = types.path;
+      example = "/var/lib/molly-brown";
+      description = lib.mdDoc "Base directory for Gemini content.";
+    };
+
+    settings = mkOption {
+      inherit (settingsFormat) type;
+      default = { };
+      description = lib.mdDoc ''
+        molly-brown configuration. Refer to
+        <https://tildegit.org/solderpunk/molly-brown/src/branch/master/example.conf>
+        for details on supported values.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.molly-brown.settings = let logDir = "/var/log/molly-brown";
+    in {
+      Port = cfg.port;
+      Hostname = cfg.hostName;
+      CertPath = cfg.certPath;
+      KeyPath = cfg.keyPath;
+      DocBase = cfg.docBase;
+      AccessLog = "${logDir}/access.log";
+      ErrorLog = "${logDir}/error.log";
+    };
+
+    systemd.services.molly-brown = {
+      description = "Molly Brown gemini server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        DynamicUser = true;
+        LogsDirectory = "molly-brown";
+        ExecStart = "${pkgs.molly-brown}/bin/molly-brown -c ${configFile}";
+        Restart = "always";
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix
new file mode 100644
index 000000000000..cf70dc325945
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/default.nix
@@ -0,0 +1,1359 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx;
+  inherit (config.security.acme) certs;
+  vhostsConfigs = mapAttrsToList (vhostName: vhostConfig: vhostConfig) virtualHosts;
+  acmeEnabledVhosts = filter (vhostConfig: vhostConfig.enableACME || vhostConfig.useACMEHost != null) vhostsConfigs;
+  dependentCertNames = unique (map (hostOpts: hostOpts.certName) acmeEnabledVhosts);
+  virtualHosts = mapAttrs (vhostName: vhostConfig:
+    let
+      serverName = if vhostConfig.serverName != null
+        then vhostConfig.serverName
+        else vhostName;
+      certName = if vhostConfig.useACMEHost != null
+        then vhostConfig.useACMEHost
+        else serverName;
+    in
+    vhostConfig // {
+      inherit serverName certName;
+    } // (optionalAttrs (vhostConfig.enableACME || vhostConfig.useACMEHost != null) {
+      sslCertificate = "${certs.${certName}.directory}/fullchain.pem";
+      sslCertificateKey = "${certs.${certName}.directory}/key.pem";
+      sslTrustedCertificate = if vhostConfig.sslTrustedCertificate != null
+                              then vhostConfig.sslTrustedCertificate
+                              else "${certs.${certName}.directory}/chain.pem";
+    })
+  ) cfg.virtualHosts;
+  inherit (config.networking) enableIPv6;
+
+  # Mime.types values are taken from brotli sample configuration - https://github.com/google/ngx_brotli
+  # and Nginx Server Configs - https://github.com/h5bp/server-configs-nginx
+  # "text/html" is implicitly included in {brotli,gzip,zstd}_types
+  compressMimeTypes = [
+    "application/atom+xml"
+    "application/geo+json"
+    "application/javascript" # Deprecated by IETF RFC 9239, but still widely used
+    "application/json"
+    "application/ld+json"
+    "application/manifest+json"
+    "application/rdf+xml"
+    "application/vnd.ms-fontobject"
+    "application/wasm"
+    "application/x-rss+xml"
+    "application/x-web-app-manifest+json"
+    "application/xhtml+xml"
+    "application/xliff+xml"
+    "application/xml"
+    "font/collection"
+    "font/otf"
+    "font/ttf"
+    "image/bmp"
+    "image/svg+xml"
+    "image/vnd.microsoft.icon"
+    "text/cache-manifest"
+    "text/calendar"
+    "text/css"
+    "text/csv"
+    "text/javascript"
+    "text/markdown"
+    "text/plain"
+    "text/vcard"
+    "text/vnd.rim.location.xloc"
+    "text/vtt"
+    "text/x-component"
+    "text/xml"
+  ];
+
+  defaultFastcgiParams = {
+    SCRIPT_FILENAME   = "$document_root$fastcgi_script_name";
+    QUERY_STRING      = "$query_string";
+    REQUEST_METHOD    = "$request_method";
+    CONTENT_TYPE      = "$content_type";
+    CONTENT_LENGTH    = "$content_length";
+
+    SCRIPT_NAME       = "$fastcgi_script_name";
+    REQUEST_URI       = "$request_uri";
+    DOCUMENT_URI      = "$document_uri";
+    DOCUMENT_ROOT     = "$document_root";
+    SERVER_PROTOCOL   = "$server_protocol";
+    REQUEST_SCHEME    = "$scheme";
+    HTTPS             = "$https if_not_empty";
+
+    GATEWAY_INTERFACE = "CGI/1.1";
+    SERVER_SOFTWARE   = "nginx/$nginx_version";
+
+    REMOTE_ADDR       = "$remote_addr";
+    REMOTE_PORT       = "$remote_port";
+    SERVER_ADDR       = "$server_addr";
+    SERVER_PORT       = "$server_port";
+    SERVER_NAME       = "$server_name";
+
+    REDIRECT_STATUS   = "200";
+  };
+
+  recommendedProxyConfig = pkgs.writeText "nginx-recommended-proxy-headers.conf" ''
+    proxy_set_header        Host $host;
+    proxy_set_header        X-Real-IP $remote_addr;
+    proxy_set_header        X-Forwarded-For $proxy_add_x_forwarded_for;
+    proxy_set_header        X-Forwarded-Proto $scheme;
+    proxy_set_header        X-Forwarded-Host $host;
+    proxy_set_header        X-Forwarded-Server $host;
+  '';
+
+  proxyCachePathConfig = concatStringsSep "\n" (mapAttrsToList (name: proxyCachePath: ''
+    proxy_cache_path ${concatStringsSep " " [
+      "/var/cache/nginx/${name}"
+      "keys_zone=${proxyCachePath.keysZoneName}:${proxyCachePath.keysZoneSize}"
+      "levels=${proxyCachePath.levels}"
+      "use_temp_path=${if proxyCachePath.useTempPath then "on" else "off"}"
+      "inactive=${proxyCachePath.inactive}"
+      "max_size=${proxyCachePath.maxSize}"
+    ]};
+  '') (filterAttrs (name: conf: conf.enable) cfg.proxyCachePath));
+
+  toUpstreamParameter = key: value:
+    if builtins.isBool value
+    then lib.optionalString value key
+    else "${key}=${toString value}";
+
+  upstreamConfig = toString (flip mapAttrsToList cfg.upstreams (name: upstream: ''
+    upstream ${name} {
+      ${toString (flip mapAttrsToList upstream.servers (name: server: ''
+        server ${name} ${concatStringsSep " " (mapAttrsToList toUpstreamParameter server)};
+      ''))}
+      ${upstream.extraConfig}
+    }
+  ''));
+
+  commonHttpConfig = ''
+      # Load mime types.
+      include ${cfg.defaultMimeTypes};
+      # When recommendedOptimisation is disabled nginx fails to start because the mailmap mime.types database
+      # contains 1026 entries and the default is only 1024. Setting to a higher number to remove the need to
+      # overwrite it because nginx does not allow duplicated settings.
+      types_hash_max_size 4096;
+
+      include ${cfg.package}/conf/fastcgi.conf;
+      include ${cfg.package}/conf/uwsgi_params;
+
+      default_type application/octet-stream;
+  '';
+
+  configFile = pkgs.writers.writeNginxConfig "nginx.conf" ''
+    pid /run/nginx/nginx.pid;
+    error_log ${cfg.logError};
+    daemon off;
+
+    ${optionalString cfg.enableQuicBPF ''
+      quic_bpf on;
+    ''}
+
+    ${cfg.config}
+
+    ${optionalString (cfg.eventsConfig != "" || cfg.config == "") ''
+    events {
+      ${cfg.eventsConfig}
+    }
+    ''}
+
+    ${optionalString (cfg.httpConfig == "" && cfg.config == "") ''
+    http {
+      ${commonHttpConfig}
+
+      ${optionalString (cfg.resolver.addresses != []) ''
+        resolver ${toString cfg.resolver.addresses} ${optionalString (cfg.resolver.valid != "") "valid=${cfg.resolver.valid}"} ${optionalString (!cfg.resolver.ipv6) "ipv6=off"};
+      ''}
+      ${upstreamConfig}
+
+      ${optionalString cfg.recommendedOptimisation ''
+        # optimisation
+        sendfile on;
+        tcp_nopush on;
+        tcp_nodelay on;
+        keepalive_timeout 65;
+      ''}
+
+      ssl_protocols ${cfg.sslProtocols};
+      ${optionalString (cfg.sslCiphers != null) "ssl_ciphers ${cfg.sslCiphers};"}
+      ${optionalString (cfg.sslDhparam != null) "ssl_dhparam ${cfg.sslDhparam};"}
+
+      ${optionalString cfg.recommendedTlsSettings ''
+        # Keep in sync with https://ssl-config.mozilla.org/#server=nginx&config=intermediate
+
+        ssl_session_timeout 1d;
+        ssl_session_cache shared:SSL:10m;
+        # Breaks forward secrecy: https://github.com/mozilla/server-side-tls/issues/135
+        ssl_session_tickets off;
+        # We don't enable insecure ciphers by default, so this allows
+        # clients to pick the most performant, per https://github.com/mozilla/server-side-tls/issues/260
+        ssl_prefer_server_ciphers off;
+
+        # OCSP stapling
+        ssl_stapling on;
+        ssl_stapling_verify on;
+      ''}
+
+      ${optionalString cfg.recommendedBrotliSettings ''
+        brotli on;
+        brotli_static on;
+        brotli_comp_level 5;
+        brotli_window 512k;
+        brotli_min_length 256;
+        brotli_types ${lib.concatStringsSep " " compressMimeTypes};
+      ''}
+
+      ${optionalString cfg.recommendedGzipSettings
+        # https://docs.nginx.com/nginx/admin-guide/web-server/compression/
+      ''
+        gzip on;
+        gzip_static on;
+        gzip_vary on;
+        gzip_comp_level 5;
+        gzip_min_length 256;
+        gzip_proxied expired no-cache no-store private auth;
+        gzip_types ${lib.concatStringsSep " " compressMimeTypes};
+      ''}
+
+      ${optionalString cfg.recommendedZstdSettings ''
+        zstd on;
+        zstd_comp_level 9;
+        zstd_min_length 256;
+        zstd_static on;
+        zstd_types ${lib.concatStringsSep " " compressMimeTypes};
+      ''}
+
+      ${optionalString cfg.recommendedProxySettings ''
+        proxy_redirect          off;
+        proxy_connect_timeout   ${cfg.proxyTimeout};
+        proxy_send_timeout      ${cfg.proxyTimeout};
+        proxy_read_timeout      ${cfg.proxyTimeout};
+        proxy_http_version      1.1;
+        # don't let clients close the keep-alive connection to upstream. See the nginx blog for details:
+        # https://www.nginx.com/blog/avoiding-top-10-nginx-configuration-mistakes/#no-keepalives
+        proxy_set_header        "Connection" "";
+        include ${recommendedProxyConfig};
+      ''}
+
+      ${optionalString (cfg.mapHashBucketSize != null) ''
+        map_hash_bucket_size ${toString cfg.mapHashBucketSize};
+      ''}
+
+      ${optionalString (cfg.mapHashMaxSize != null) ''
+        map_hash_max_size ${toString cfg.mapHashMaxSize};
+      ''}
+
+      ${optionalString (cfg.serverNamesHashBucketSize != null) ''
+        server_names_hash_bucket_size ${toString cfg.serverNamesHashBucketSize};
+      ''}
+
+      ${optionalString (cfg.serverNamesHashMaxSize != null) ''
+        server_names_hash_max_size ${toString cfg.serverNamesHashMaxSize};
+      ''}
+
+      # $connection_upgrade is used for websocket proxying
+      map $http_upgrade $connection_upgrade {
+          default upgrade;
+          '''      close;
+      }
+      client_max_body_size ${cfg.clientMaxBodySize};
+
+      server_tokens ${if cfg.serverTokens then "on" else "off"};
+
+      ${cfg.commonHttpConfig}
+
+      ${proxyCachePathConfig}
+
+      ${vhosts}
+
+      ${cfg.appendHttpConfig}
+    }''}
+
+    ${optionalString (cfg.httpConfig != "") ''
+    http {
+      ${commonHttpConfig}
+      ${cfg.httpConfig}
+    }''}
+
+    ${optionalString (cfg.streamConfig != "") ''
+    stream {
+      ${cfg.streamConfig}
+    }
+    ''}
+
+    ${cfg.appendConfig}
+  '';
+
+  configPath = if cfg.enableReload
+    then "/etc/nginx/nginx.conf"
+    else configFile;
+
+  execCommand = "${cfg.package}/bin/nginx -c '${configPath}'";
+
+  vhosts = concatStringsSep "\n" (mapAttrsToList (vhostName: vhost:
+    let
+        onlySSL = vhost.onlySSL || vhost.enableSSL;
+        hasSSL = onlySSL || vhost.addSSL || vhost.forceSSL;
+
+        # First evaluation of defaultListen based on a set of listen lines.
+        mkDefaultListenVhost = listenLines:
+          # If this vhost has SSL or is a SSL rejection host.
+          # We enable a TLS variant for lines without explicit ssl or ssl = true.
+          optionals (hasSSL || vhost.rejectSSL)
+            (map (listen: { port = cfg.defaultSSLListenPort; ssl = true; } // listen)
+            (filter (listen: !(listen ? ssl) || listen.ssl) listenLines))
+          # If this vhost is supposed to serve HTTP
+          # We provide listen lines for those without explicit ssl or ssl = false.
+          ++ optionals (!onlySSL)
+            (map (listen: { port = cfg.defaultHTTPListenPort; ssl = false; } // listen)
+            (filter (listen: !(listen ? ssl) || !listen.ssl) listenLines));
+
+        defaultListen =
+          if vhost.listen != [] then vhost.listen
+          else
+          if cfg.defaultListen != [] then mkDefaultListenVhost
+            # Cleanup nulls which will mess up with //.
+            # TODO: is there a better way to achieve this? i.e. mergeButIgnoreNullPlease?
+            (map (listenLine: filterAttrs (_: v: (v != null)) listenLine) cfg.defaultListen)
+          else
+            let addrs = if vhost.listenAddresses != [] then vhost.listenAddresses else cfg.defaultListenAddresses;
+            in mkDefaultListenVhost (map (addr: { inherit addr; }) addrs);
+
+
+        hostListen =
+          if vhost.forceSSL
+            then filter (x: x.ssl) defaultListen
+            else defaultListen;
+
+        listenString = { addr, port, ssl, proxyProtocol ? false, extraParameters ? [], ... }:
+          # UDP listener for QUIC transport protocol.
+          (optionalString (ssl && vhost.quic) ("
+            listen ${addr}${optionalString (port != null) ":${toString port}"} quic "
+          + optionalString vhost.default "default_server "
+          + optionalString vhost.reuseport "reuseport "
+          + optionalString (extraParameters != []) (concatStringsSep " "
+            (let inCompatibleParameters = [ "ssl" "proxy_protocol" "http2" ];
+                isCompatibleParameter = param: !(any (p: p == param) inCompatibleParameters);
+            in filter isCompatibleParameter extraParameters))
+          + ";"))
+          + "
+            listen ${addr}${optionalString (port != null) ":${toString port}"} "
+          + optionalString (ssl && vhost.http2 && oldHTTP2) "http2 "
+          + optionalString ssl "ssl "
+          + optionalString vhost.default "default_server "
+          + optionalString vhost.reuseport "reuseport "
+          + optionalString proxyProtocol "proxy_protocol "
+          + optionalString (extraParameters != []) (concatStringsSep " " extraParameters)
+          + ";";
+
+        redirectListen = filter (x: !x.ssl) defaultListen;
+
+        # The acme-challenge location doesn't need to be added if we are not using any automated
+        # certificate provisioning and can also be omitted when we use a certificate obtained via a DNS-01 challenge
+        acmeLocation = optionalString (vhost.enableACME || (vhost.useACMEHost != null && config.security.acme.certs.${vhost.useACMEHost}.dnsProvider == null)) ''
+          # Rule for legitimate ACME Challenge requests (like /.well-known/acme-challenge/xxxxxxxxx)
+          # We use ^~ here, so that we don't check any regexes (which could
+          # otherwise easily override this intended match accidentally).
+          location ^~ /.well-known/acme-challenge/ {
+            ${optionalString (vhost.acmeFallbackHost != null) "try_files $uri @acme-fallback;"}
+            ${optionalString (vhost.acmeRoot != null) "root ${vhost.acmeRoot};"}
+            auth_basic off;
+          }
+          ${optionalString (vhost.acmeFallbackHost != null) ''
+            location @acme-fallback {
+              auth_basic off;
+              proxy_pass http://${vhost.acmeFallbackHost};
+            }
+          ''}
+        '';
+
+      in ''
+        ${optionalString vhost.forceSSL ''
+          server {
+            ${concatMapStringsSep "\n" listenString redirectListen}
+
+            server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
+            ${acmeLocation}
+            location / {
+              return 301 https://$host$request_uri;
+            }
+          }
+        ''}
+
+        server {
+          ${concatMapStringsSep "\n" listenString hostListen}
+          server_name ${vhost.serverName} ${concatStringsSep " " vhost.serverAliases};
+          ${optionalString (hasSSL && vhost.http2 && !oldHTTP2) ''
+            http2 on;
+          ''}
+          ${optionalString (hasSSL && vhost.quic) ''
+            http3 ${if vhost.http3 then "on" else "off"};
+            http3_hq ${if vhost.http3_hq then "on" else "off"};
+          ''}
+          ${acmeLocation}
+          ${optionalString (vhost.root != null) "root ${vhost.root};"}
+          ${optionalString (vhost.globalRedirect != null) ''
+            location / {
+              return 301 http${optionalString hasSSL "s"}://${vhost.globalRedirect}$request_uri;
+            }
+          ''}
+          ${optionalString hasSSL ''
+            ssl_certificate ${vhost.sslCertificate};
+            ssl_certificate_key ${vhost.sslCertificateKey};
+          ''}
+          ${optionalString (hasSSL && vhost.sslTrustedCertificate != null) ''
+            ssl_trusted_certificate ${vhost.sslTrustedCertificate};
+          ''}
+          ${optionalString vhost.rejectSSL ''
+            ssl_reject_handshake on;
+          ''}
+          ${optionalString (hasSSL && vhost.kTLS) ''
+            ssl_conf_command Options KTLS;
+          ''}
+
+          ${optionalString (hasSSL && vhost.quic && vhost.http3)
+            # Advertise that HTTP/3 is available
+          ''
+            add_header Alt-Svc 'h3=":$server_port"; ma=86400';
+          ''}
+
+          ${mkBasicAuth vhostName vhost}
+
+          ${mkLocations vhost.locations}
+
+          ${vhost.extraConfig}
+        }
+      ''
+  ) virtualHosts);
+  mkLocations = locations: concatStringsSep "\n" (map (config: ''
+    location ${config.location} {
+      ${optionalString (config.proxyPass != null && !cfg.proxyResolveWhileRunning)
+        "proxy_pass ${config.proxyPass};"
+      }
+      ${optionalString (config.proxyPass != null && cfg.proxyResolveWhileRunning) ''
+        set $nix_proxy_target "${config.proxyPass}";
+        proxy_pass $nix_proxy_target;
+      ''}
+      ${optionalString config.proxyWebsockets ''
+        proxy_http_version 1.1;
+        proxy_set_header Upgrade $http_upgrade;
+        proxy_set_header Connection $connection_upgrade;
+      ''}
+      ${concatStringsSep "\n"
+        (mapAttrsToList (n: v: ''fastcgi_param ${n} "${v}";'')
+          (optionalAttrs (config.fastcgiParams != {})
+            (defaultFastcgiParams // config.fastcgiParams)))}
+      ${optionalString (config.index != null) "index ${config.index};"}
+      ${optionalString (config.tryFiles != null) "try_files ${config.tryFiles};"}
+      ${optionalString (config.root != null) "root ${config.root};"}
+      ${optionalString (config.alias != null) "alias ${config.alias};"}
+      ${optionalString (config.return != null) "return ${config.return};"}
+      ${config.extraConfig}
+      ${optionalString (config.proxyPass != null && config.recommendedProxySettings) "include ${recommendedProxyConfig};"}
+      ${mkBasicAuth "sublocation" config}
+    }
+  '') (sortProperties (mapAttrsToList (k: v: v // { location = k; }) locations)));
+
+  mkBasicAuth = name: zone: optionalString (zone.basicAuthFile != null || zone.basicAuth != {}) (let
+    auth_file = if zone.basicAuthFile != null
+      then zone.basicAuthFile
+      else mkHtpasswd name zone.basicAuth;
+  in ''
+    auth_basic secured;
+    auth_basic_user_file ${auth_file};
+  '');
+  mkHtpasswd = name: authDef: pkgs.writeText "${name}.htpasswd" (
+    concatStringsSep "\n" (mapAttrsToList (user: password: ''
+      ${user}:{PLAIN}${password}
+    '') authDef)
+  );
+
+  mkCertOwnershipAssertion = import ../../../security/acme/mk-cert-ownership-assertion.nix;
+
+  oldHTTP2 = versionOlder cfg.package.version "1.25.1";
+in
+
+{
+  options = {
+    services.nginx = {
+      enable = mkEnableOption (lib.mdDoc "Nginx Web Server");
+
+      statusPage = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable status page reachable from localhost on http://127.0.0.1/nginx_status.
+        '';
+      };
+
+      recommendedTlsSettings = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable recommended TLS settings.
+        '';
+      };
+
+      recommendedOptimisation = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable recommended optimisation settings.
+        '';
+      };
+
+      recommendedBrotliSettings = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable recommended brotli settings.
+          Learn more about compression in Brotli format [here](https://github.com/google/ngx_brotli/).
+
+          This adds `pkgs.nginxModules.brotli` to `services.nginx.additionalModules`.
+        '';
+      };
+
+      recommendedGzipSettings = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable recommended gzip settings.
+          Learn more about compression in Gzip format [here](https://docs.nginx.com/nginx/admin-guide/web-server/compression/).
+        '';
+      };
+
+      recommendedZstdSettings = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable recommended zstd settings.
+          Learn more about compression in Zstd format [here](https://github.com/tokers/zstd-nginx-module).
+
+          This adds `pkgs.nginxModules.zstd` to `services.nginx.additionalModules`.
+        '';
+      };
+
+      recommendedProxySettings = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable recommended proxy settings if a vhost does not specify the option manually.
+        '';
+      };
+
+      proxyTimeout = mkOption {
+        type = types.str;
+        default = "60s";
+        example = "20s";
+        description = lib.mdDoc ''
+          Change the proxy related timeouts in recommendedProxySettings.
+        '';
+      };
+
+      defaultListen = mkOption {
+        type = with types; listOf (submodule {
+          options = {
+            addr = mkOption {
+              type = str;
+              description = lib.mdDoc "IP address.";
+            };
+            port = mkOption {
+              type = nullOr port;
+              description = lib.mdDoc "Port number.";
+              default = null;
+            };
+            ssl  = mkOption {
+              type = nullOr bool;
+              default = null;
+              description = lib.mdDoc "Enable SSL.";
+            };
+            proxyProtocol = mkOption {
+              type = bool;
+              description = lib.mdDoc "Enable PROXY protocol.";
+              default = false;
+            };
+            extraParameters = mkOption {
+              type = listOf str;
+              description = lib.mdDoc "Extra parameters of this listen directive.";
+              default = [ ];
+              example = [ "backlog=1024" "deferred" ];
+            };
+          };
+        });
+        default = [];
+        example = literalExpression ''
+          [
+            { addr = "10.0.0.12"; proxyProtocol = true; ssl = true; }
+            { addr = "0.0.0.0"; }
+            { addr = "[::0]"; }
+          ]
+        '';
+        description = lib.mdDoc ''
+          If vhosts do not specify listen, use these addresses by default.
+          This option takes precedence over {option}`defaultListenAddresses` and
+          other listen-related defaults options.
+        '';
+      };
+
+      defaultListenAddresses = mkOption {
+        type = types.listOf types.str;
+        default = [ "0.0.0.0" ] ++ optional enableIPv6 "[::0]";
+        defaultText = literalExpression ''[ "0.0.0.0" ] ++ lib.optional config.networking.enableIPv6 "[::0]"'';
+        example = literalExpression ''[ "10.0.0.12" "[2002:a00:1::]" ]'';
+        description = lib.mdDoc ''
+          If vhosts do not specify listenAddresses, use these addresses by default.
+          This is akin to writing `defaultListen = [ { addr = "0.0.0.0" } ]`.
+        '';
+      };
+
+      defaultHTTPListenPort = mkOption {
+        type = types.port;
+        default = 80;
+        example = 8080;
+        description = lib.mdDoc ''
+          If vhosts do not specify listen.port, use these ports for HTTP by default.
+        '';
+      };
+
+      defaultSSLListenPort = mkOption {
+        type = types.port;
+        default = 443;
+        example = 8443;
+        description = lib.mdDoc ''
+          If vhosts do not specify listen.port, use these ports for SSL by default.
+        '';
+      };
+
+      defaultMimeTypes = mkOption {
+        type = types.path;
+        default = "${pkgs.mailcap}/etc/nginx/mime.types";
+        defaultText = literalExpression "$''{pkgs.mailcap}/etc/nginx/mime.types";
+        example = literalExpression "$''{pkgs.nginx}/conf/mime.types";
+        description = lib.mdDoc ''
+          Default MIME types for NGINX, as MIME types definitions from NGINX are very incomplete,
+          we use by default the ones bundled in the mailcap package, used by most of the other
+          Linux distributions.
+        '';
+      };
+
+      package = mkOption {
+        default = pkgs.nginxStable;
+        defaultText = literalExpression "pkgs.nginxStable";
+        type = types.package;
+        apply = p: p.override {
+          modules = lib.unique (p.modules ++ cfg.additionalModules);
+        };
+        description = lib.mdDoc ''
+          Nginx package to use. This defaults to the stable version. Note
+          that the nginx team recommends to use the mainline version which
+          available in nixpkgs as `nginxMainline`.
+        '';
+      };
+
+      additionalModules = mkOption {
+        default = [];
+        type = types.listOf (types.attrsOf types.anything);
+        example = literalExpression "[ pkgs.nginxModules.echo ]";
+        description = lib.mdDoc ''
+          Additional [third-party nginx modules](https://www.nginx.com/resources/wiki/modules/)
+          to install. Packaged modules are available in `pkgs.nginxModules`.
+        '';
+      };
+
+      logError = mkOption {
+        default = "stderr";
+        type = types.str;
+        description = lib.mdDoc ''
+          Configures logging.
+          The first parameter defines a file that will store the log. The
+          special value stderr selects the standard error file. Logging to
+          syslog can be configured by specifying the “syslog:†prefix.
+          The second parameter determines the level of logging, and can be
+          one of the following: debug, info, notice, warn, error, crit,
+          alert, or emerg. Log levels above are listed in the order of
+          increasing severity. Setting a certain log level will cause all
+          messages of the specified and more severe log levels to be logged.
+          If this parameter is omitted then error is used.
+        '';
+      };
+
+      preStart =  mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed before the service's nginx is started.
+        '';
+      };
+
+      config = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Verbatim {file}`nginx.conf` configuration.
+          This is mutually exclusive to any other config option for
+          {file}`nginx.conf` except for
+          - [](#opt-services.nginx.appendConfig)
+          - [](#opt-services.nginx.httpConfig)
+          - [](#opt-services.nginx.logError)
+
+          If additional verbatim config in addition to other options is needed,
+          [](#opt-services.nginx.appendConfig) should be used instead.
+        '';
+      };
+
+      appendConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines appended to the generated Nginx
+          configuration file. Commonly used by different modules
+          providing http snippets. {option}`appendConfig`
+          can be specified more than once and its value will be
+          concatenated (contrary to {option}`config` which
+          can be set only once).
+        '';
+      };
+
+      commonHttpConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          resolver 127.0.0.1 valid=5s;
+
+          log_format myformat '$remote_addr - $remote_user [$time_local] '
+                              '"$request" $status $body_bytes_sent '
+                              '"$http_referer" "$http_user_agent"';
+        '';
+        description = lib.mdDoc ''
+          With nginx you must provide common http context definitions before
+          they are used, e.g. log_format, resolver, etc. inside of server
+          or location contexts. Use this attribute to set these definitions
+          at the appropriate location.
+        '';
+      };
+
+      httpConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines to be set inside the http block.
+          This is mutually exclusive with the structured configuration
+          via virtualHosts and the recommendedXyzSettings configuration
+          options. See appendHttpConfig for appending to the generated http block.
+        '';
+      };
+
+      streamConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          server {
+            listen 127.0.0.1:53 udp reuseport;
+            proxy_timeout 20s;
+            proxy_pass 192.168.0.1:53535;
+          }
+        '';
+        description = lib.mdDoc ''
+          Configuration lines to be set inside the stream block.
+        '';
+      };
+
+      eventsConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines to be set inside the events block.
+        '';
+      };
+
+      appendHttpConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Configuration lines to be appended to the generated http block.
+          This is mutually exclusive with using config and httpConfig for
+          specifying the whole http block verbatim.
+        '';
+      };
+
+      enableReload = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Reload nginx when configuration file changes (instead of restart).
+          The configuration file is exposed at {file}`/etc/nginx/nginx.conf`.
+          See also `systemd.services.*.restartIfChanged`.
+        '';
+      };
+
+      enableQuicBPF = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables routing of QUIC packets using eBPF. When enabled, this allows
+          to support QUIC connection migration. The directive is only supported
+          on Linux 5.7+.
+          Note that enabling this option will make nginx run with extended
+          capabilities that are usually limited to processes running as root
+          namely `CAP_SYS_ADMIN` and `CAP_NET_ADMIN`.
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "nginx";
+        description = lib.mdDoc "User account under which nginx runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "nginx";
+        description = lib.mdDoc "Group account under which nginx runs.";
+      };
+
+      serverTokens = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Show nginx version in headers and error pages.";
+      };
+
+      clientMaxBodySize = mkOption {
+        type = types.str;
+        default = "10m";
+        description = lib.mdDoc "Set nginx global client_max_body_size.";
+      };
+
+      sslCiphers = mkOption {
+        type = types.nullOr types.str;
+        # Keep in sync with https://ssl-config.mozilla.org/#server=nginx&config=intermediate
+        default = "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
+        description = lib.mdDoc "Ciphers to choose from when negotiating TLS handshakes.";
+      };
+
+      sslProtocols = mkOption {
+        type = types.str;
+        default = "TLSv1.2 TLSv1.3";
+        example = "TLSv1 TLSv1.1 TLSv1.2 TLSv1.3";
+        description = lib.mdDoc "Allowed TLS protocol versions.";
+      };
+
+      sslDhparam = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/path/to/dhparams.pem";
+        description = lib.mdDoc "Path to DH parameters file.";
+      };
+
+      proxyResolveWhileRunning = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Resolves domains of proxyPass targets at runtime
+          and not only at start, you have to set
+          services.nginx.resolver, too.
+        '';
+      };
+
+      mapHashBucketSize = mkOption {
+        type = types.nullOr (types.enum [ 32 64 128 ]);
+        default = null;
+        description = lib.mdDoc ''
+            Sets the bucket size for the map variables hash tables. Default
+            value depends on the processor’s cache line size.
+          '';
+      };
+
+      mapHashMaxSize = mkOption {
+        type = types.nullOr types.ints.positive;
+        default = null;
+        description = lib.mdDoc ''
+            Sets the maximum size of the map variables hash tables.
+          '';
+      };
+
+      serverNamesHashBucketSize = mkOption {
+        type = types.nullOr types.ints.positive;
+        default = null;
+        description = lib.mdDoc ''
+            Sets the bucket size for the server names hash tables. Default
+            value depends on the processor’s cache line size.
+          '';
+      };
+
+      serverNamesHashMaxSize = mkOption {
+        type = types.nullOr types.ints.positive;
+        default = null;
+        description = lib.mdDoc ''
+            Sets the maximum size of the server names hash tables.
+          '';
+      };
+
+      proxyCachePath = mkOption {
+        type = types.attrsOf (types.submodule ({ ... }: {
+          options = {
+            enable = mkEnableOption (lib.mdDoc "this proxy cache path entry");
+
+            keysZoneName = mkOption {
+              type = types.str;
+              default = "cache";
+              example = "my_cache";
+              description = lib.mdDoc "Set name to shared memory zone.";
+            };
+
+            keysZoneSize = mkOption {
+              type = types.str;
+              default = "10m";
+              example = "32m";
+              description = lib.mdDoc "Set size to shared memory zone.";
+            };
+
+            levels = mkOption {
+              type = types.str;
+              default = "1:2";
+              example = "1:2:2";
+              description = lib.mdDoc ''
+                The levels parameter defines structure of subdirectories in cache: from
+                1 to 3, each level accepts values 1 or 2. Сan be used any combination of
+                1 and 2 in these formats: x, x:x and x:x:x.
+              '';
+            };
+
+            useTempPath = mkOption {
+              type = types.bool;
+              default = false;
+              example = true;
+              description = lib.mdDoc ''
+                Nginx first writes files that are destined for the cache to a temporary
+                storage area, and the use_temp_path=off directive instructs Nginx to
+                write them to the same directories where they will be cached. Recommended
+                that you set this parameter to off to avoid unnecessary copying of data
+                between file systems.
+              '';
+            };
+
+            inactive = mkOption {
+              type = types.str;
+              default = "10m";
+              example = "1d";
+              description = lib.mdDoc ''
+                Cached data that has not been accessed for the time specified by
+                the inactive parameter is removed from the cache, regardless of
+                its freshness.
+              '';
+            };
+
+            maxSize = mkOption {
+              type = types.str;
+              default = "1g";
+              example = "2048m";
+              description = lib.mdDoc "Set maximum cache size";
+            };
+          };
+        }));
+        default = {};
+        description = lib.mdDoc ''
+          Configure a proxy cache path entry.
+          See <https://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_cache_path> for documentation.
+        '';
+      };
+
+      resolver = mkOption {
+        type = types.submodule {
+          options = {
+            addresses = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = literalExpression ''[ "[::1]" "127.0.0.1:5353" ]'';
+              description = lib.mdDoc "List of resolvers to use";
+            };
+            valid = mkOption {
+              type = types.str;
+              default = "";
+              example = "30s";
+              description = lib.mdDoc ''
+                By default, nginx caches answers using the TTL value of a response.
+                An optional valid parameter allows overriding it
+              '';
+            };
+            ipv6 = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                By default, nginx will look up both IPv4 and IPv6 addresses while resolving.
+                If looking up of IPv6 addresses is not desired, the ipv6=off parameter can be
+                specified.
+              '';
+            };
+          };
+        };
+        description = lib.mdDoc ''
+          Configures name servers used to resolve names of upstream servers into addresses
+        '';
+        default = {};
+      };
+
+      upstreams = mkOption {
+        type = types.attrsOf (types.submodule {
+          options = {
+            servers = mkOption {
+              type = types.attrsOf (types.submodule {
+                freeformType = types.attrsOf (types.oneOf [ types.bool types.int types.str ]);
+                options = {
+                  backup = mkOption {
+                    type = types.bool;
+                    default = false;
+                    description = lib.mdDoc ''
+                      Marks the server as a backup server. It will be passed
+                      requests when the primary servers are unavailable.
+                    '';
+                  };
+                };
+              });
+              description = lib.mdDoc ''
+                Defines the address and other parameters of the upstream servers.
+                See [the documentation](https://nginx.org/en/docs/http/ngx_http_upstream_module.html#server)
+                for the available parameters.
+              '';
+              default = {};
+              example = lib.literalMD "see [](#opt-services.nginx.upstreams)";
+            };
+            extraConfig = mkOption {
+              type = types.lines;
+              default = "";
+              description = lib.mdDoc ''
+                These lines go to the end of the upstream verbatim.
+              '';
+            };
+          };
+        });
+        description = lib.mdDoc ''
+          Defines a group of servers to use as proxy target.
+        '';
+        default = {};
+        example = {
+          "backend" = {
+            servers = {
+              "backend1.example.com:8080" = { weight = 5; };
+              "backend2.example.com" = { max_fails = 3; fail_timeout = "30s"; };
+              "backend3.example.com" = {};
+              "backup1.example.com" = { backup = true; };
+              "backup2.example.com" = { backup = true; };
+            };
+            extraConfig = ''
+              keepalive 16;
+            '';
+          };
+          "memcached" = {
+            servers."unix:/run//memcached/memcached.sock" = {};
+          };
+        };
+      };
+
+      virtualHosts = mkOption {
+        type = types.attrsOf (types.submodule (import ./vhost-options.nix {
+          inherit config lib;
+        }));
+        default = {
+          localhost = {};
+        };
+        example = literalExpression ''
+          {
+            "hydra.example.com" = {
+              forceSSL = true;
+              enableACME = true;
+              locations."/" = {
+                proxyPass = "http://localhost:3000";
+              };
+            };
+          };
+        '';
+        description = lib.mdDoc "Declarative vhost config";
+      };
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "nginx" "stateDir" ] ''
+      The Nginx log directory has been moved to /var/log/nginx, the cache directory
+      to /var/cache/nginx. The option services.nginx.stateDir has been removed.
+    '')
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "inactive" ] [ "services" "nginx" "proxyCachePath" "" "inactive" ])
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "useTempPath" ] [ "services" "nginx" "proxyCachePath" "" "useTempPath" ])
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "levels" ] [ "services" "nginx" "proxyCachePath" "" "levels" ])
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "keysZoneSize" ] [ "services" "nginx" "proxyCachePath" "" "keysZoneSize" ])
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "keysZoneName" ] [ "services" "nginx" "proxyCachePath" "" "keysZoneName" ])
+    (mkRenamedOptionModule [ "services" "nginx" "proxyCache" "enable" ] [ "services" "nginx" "proxyCachePath" "" "enable" ])
+  ];
+
+  config = mkIf cfg.enable {
+    warnings =
+    let
+      deprecatedSSL = name: config: optional config.enableSSL
+      ''
+        config.services.nginx.virtualHosts.<name>.enableSSL is deprecated,
+        use config.services.nginx.virtualHosts.<name>.onlySSL instead.
+      '';
+
+    in flatten (mapAttrsToList deprecatedSSL virtualHosts);
+
+    assertions =
+    let
+      hostOrAliasIsNull = l: l.root == null || l.alias == null;
+    in [
+      {
+        assertion = all (host: all hostOrAliasIsNull (attrValues host.locations)) (attrValues virtualHosts);
+        message = "Only one of nginx root or alias can be specified on a location.";
+      }
+
+      {
+        assertion = all (host: with host;
+          count id [ addSSL (onlySSL || enableSSL) forceSSL rejectSSL ] <= 1
+        ) (attrValues virtualHosts);
+        message = ''
+          Options services.nginx.service.virtualHosts.<name>.addSSL,
+          services.nginx.virtualHosts.<name>.onlySSL,
+          services.nginx.virtualHosts.<name>.forceSSL and
+          services.nginx.virtualHosts.<name>.rejectSSL are mutually exclusive.
+        '';
+      }
+
+      {
+        assertion = any (host: host.rejectSSL) (attrValues virtualHosts) -> versionAtLeast cfg.package.version "1.19.4";
+        message = ''
+          services.nginx.virtualHosts.<name>.rejectSSL requires nginx version
+          1.19.4 or above; see the documentation for services.nginx.package.
+        '';
+      }
+
+      {
+        assertion = any (host: host.kTLS) (attrValues virtualHosts) -> versionAtLeast cfg.package.version "1.21.4";
+        message = ''
+          services.nginx.virtualHosts.<name>.kTLS requires nginx version
+          1.21.4 or above; see the documentation for services.nginx.package.
+        '';
+      }
+
+      {
+        assertion = all (host: !(host.enableACME && host.useACMEHost != null)) (attrValues virtualHosts);
+        message = ''
+          Options services.nginx.service.virtualHosts.<name>.enableACME and
+          services.nginx.virtualHosts.<name>.useACMEHost are mutually exclusive.
+        '';
+      }
+
+      {
+        assertion = cfg.package.pname != "nginxQuic" -> !(cfg.enableQuicBPF);
+        message = ''
+          services.nginx.enableQuicBPF requires using nginxQuic package,
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+        '';
+      }
+
+      {
+        assertion = cfg.package.pname != "nginxQuic" -> all (host: !host.quic) (attrValues virtualHosts);
+        message = ''
+          services.nginx.service.virtualHosts.<name>.quic requires using nginxQuic package,
+          which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+        '';
+      }
+
+      {
+        # The idea is to understand whether there is a virtual host with a listen configuration
+        # that requires ACME configuration but has no HTTP listener which will make deterministically fail
+        # this operation.
+        # Options' priorities are the following at the moment:
+        # listen (vhost) > defaultListen (server) > listenAddresses (vhost) > defaultListenAddresses (server)
+        assertion =
+        let
+          hasAtLeastHttpListener = listenOptions: any (listenLine: if listenLine ? proxyProtocol then !listenLine.proxyProtocol else true) listenOptions;
+          hasAtLeastDefaultHttpListener = if cfg.defaultListen != [] then hasAtLeastHttpListener cfg.defaultListen else (cfg.defaultListenAddresses != []);
+        in
+          all (host:
+            let
+              hasAtLeastVhostHttpListener = if host.listen != [] then hasAtLeastHttpListener host.listen else (host.listenAddresses != []);
+              vhostAuthority = host.listen != [] || (cfg.defaultListen == [] && host.listenAddresses != []);
+            in
+              # Either vhost has precedence and we need a vhost specific http listener
+              # Either vhost set nothing and inherit from server settings
+              host.enableACME -> ((vhostAuthority && hasAtLeastVhostHttpListener) || (!vhostAuthority && hasAtLeastDefaultHttpListener))
+          ) (attrValues virtualHosts);
+        message = ''
+          services.nginx.virtualHosts.<name>.enableACME requires a HTTP listener
+          to answer to ACME requests.
+        '';
+      }
+    ] ++ map (name: mkCertOwnershipAssertion {
+      inherit (cfg) group user;
+      cert = config.security.acme.certs.${name};
+      groups = config.users.groups;
+    }) dependentCertNames;
+
+    services.nginx.additionalModules = optional cfg.recommendedBrotliSettings pkgs.nginxModules.brotli
+      ++ lib.optional cfg.recommendedZstdSettings pkgs.nginxModules.zstd;
+
+    services.nginx.virtualHosts.localhost = mkIf cfg.statusPage {
+      listenAddresses = lib.mkDefault ([
+        "0.0.0.0"
+      ] ++ lib.optional enableIPv6 "[::]");
+      locations."/nginx_status" = {
+        extraConfig = ''
+          stub_status on;
+          access_log off;
+          allow 127.0.0.1;
+          ${optionalString enableIPv6 "allow ::1;"}
+          deny all;
+        '';
+      };
+    };
+
+    systemd.services.nginx = {
+      description = "Nginx Web Server";
+      wantedBy = [ "multi-user.target" ];
+      wants = concatLists (map (certName: [ "acme-finished-${certName}.target" ]) dependentCertNames);
+      after = [ "network.target" ] ++ map (certName: "acme-selfsigned-${certName}.service") dependentCertNames;
+      # Nginx needs to be started in order to be able to request certificates
+      # (it's hosting the acme challenge after all)
+      # This fixes https://github.com/NixOS/nixpkgs/issues/81842
+      before = map (certName: "acme-${certName}.service") dependentCertNames;
+      stopIfChanged = false;
+      preStart = ''
+        ${cfg.preStart}
+        ${execCommand} -t
+      '';
+
+      startLimitIntervalSec = 60;
+      serviceConfig = {
+        ExecStart = execCommand;
+        ExecReload = [
+          "${execCommand} -t"
+          "${pkgs.coreutils}/bin/kill -HUP $MAINPID"
+        ];
+        Restart = "always";
+        RestartSec = "10s";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+        # Runtime directory and mode
+        RuntimeDirectory = "nginx";
+        RuntimeDirectoryMode = "0750";
+        # Cache directory and mode
+        CacheDirectory = "nginx";
+        CacheDirectoryMode = "0750";
+        # Logs directory and mode
+        LogsDirectory = "nginx";
+        LogsDirectoryMode = "0750";
+        # Proc filesystem
+        ProcSubset = "pid";
+        ProtectProc = "invisible";
+        # New file permissions
+        UMask = "0027"; # 0640 / 0750
+        # Capabilities
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" "CAP_SYS_RESOURCE" ] ++ optionals cfg.enableQuicBPF [ "CAP_SYS_ADMIN" "CAP_NET_ADMIN" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" "CAP_SYS_RESOURCE" ] ++ optionals cfg.enableQuicBPF [ "CAP_SYS_ADMIN" "CAP_NET_ADMIN" ];
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing (sorted by occurrence in https://www.freedesktop.org/software/systemd/man/systemd.exec.html)
+        ProtectSystem = "strict";
+        ProtectHome = mkDefault true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        RestrictNamespaces = true;
+        LockPersonality = true;
+        MemoryDenyWriteExecute = !((builtins.any (mod: (mod.allowMemoryWriteExecute or false)) cfg.package.modules) || (cfg.package == pkgs.openresty));
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        RemoveIPC = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+        SystemCallFilter = [ "~@cpu-emulation @debug @keyring @mount @obsolete @privileged @setuid" ]
+          ++ optional cfg.enableQuicBPF [ "bpf" ]
+          ++ optionals ((cfg.package != pkgs.tengine) && (cfg.package != pkgs.openresty) && (!lib.any (mod: (mod.disableIPC or false)) cfg.package.modules)) [ "~@ipc" ];
+      };
+    };
+
+    environment.etc."nginx/nginx.conf" = mkIf cfg.enableReload {
+      source = configFile;
+    };
+
+    # This service waits for all certificates to be available
+    # before reloading nginx configuration.
+    # sslTargets are added to wantedBy + before
+    # which allows the acme-finished-$cert.target to signify the successful updating
+    # of certs end-to-end.
+    systemd.services.nginx-config-reload = let
+      sslServices = map (certName: "acme-${certName}.service") dependentCertNames;
+      sslTargets = map (certName: "acme-finished-${certName}.target") dependentCertNames;
+    in mkIf (cfg.enableReload || sslServices != []) {
+      wants = optionals cfg.enableReload [ "nginx.service" ];
+      wantedBy = sslServices ++ [ "multi-user.target" ];
+      # Before the finished targets, after the renew services.
+      # This service might be needed for HTTP-01 challenges, but we only want to confirm
+      # certs are updated _after_ config has been reloaded.
+      before = sslTargets;
+      after = sslServices;
+      restartTriggers = optionals cfg.enableReload [ configFile ];
+      # Block reloading if not all certs exist yet.
+      # Happens when config changes add new vhosts/certs.
+      unitConfig.ConditionPathExists = optionals (sslServices != []) (map (certName: certs.${certName}.directory + "/fullchain.pem") dependentCertNames);
+      serviceConfig = {
+        Type = "oneshot";
+        TimeoutSec = 60;
+        ExecCondition = "/run/current-system/systemd/bin/systemctl -q is-active nginx.service";
+        ExecStart = "/run/current-system/systemd/bin/systemctl reload nginx.service";
+      };
+    };
+
+    security.acme.certs = let
+      acmePairs = map (vhostConfig: let
+        hasRoot = vhostConfig.acmeRoot != null;
+      in nameValuePair vhostConfig.serverName {
+        group = mkDefault cfg.group;
+        # if acmeRoot is null inherit config.security.acme
+        # Since config.security.acme.certs.<cert>.webroot's own default value
+        # should take precedence set priority higher than mkOptionDefault
+        webroot = mkOverride (if hasRoot then 1000 else 2000) vhostConfig.acmeRoot;
+        # Also nudge dnsProvider to null in case it is inherited
+        dnsProvider = mkOverride (if hasRoot then 1000 else 2000) null;
+        extraDomainNames = vhostConfig.serverAliases;
+      # Filter for enableACME-only vhosts. Don't want to create dud certs
+      }) (filter (vhostConfig: vhostConfig.useACMEHost == null) acmeEnabledVhosts);
+    in listToAttrs acmePairs;
+
+    users.users = optionalAttrs (cfg.user == "nginx") {
+      nginx = {
+        group = cfg.group;
+        isSystemUser = true;
+        uid = config.ids.uids.nginx;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "nginx") {
+      nginx.gid = config.ids.gids.nginx;
+    };
+
+    # do not delete the default temp directories created upon nginx startup
+    systemd.tmpfiles.rules = [
+      "X /tmp/systemd-private-%b-nginx.service-*/tmp/nginx_*"
+    ];
+
+    services.logrotate.settings.nginx = mapAttrs (_: mkDefault) {
+      files = "/var/log/nginx/*.log";
+      frequency = "weekly";
+      su = "${cfg.user} ${cfg.group}";
+      rotate = 26;
+      compress = true;
+      delaycompress = true;
+      postrotate = "[ ! -f /var/run/nginx/nginx.pid ] || kill -USR1 `cat /var/run/nginx/nginx.pid`";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix
new file mode 100644
index 000000000000..ec2c432ca573
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/gitweb.nix
@@ -0,0 +1,94 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.nginx.gitweb;
+  gitwebConfig = config.services.gitweb;
+  package = pkgs.gitweb.override (optionalAttrs gitwebConfig.gitwebTheme {
+    gitwebTheme = true;
+  });
+
+in
+{
+
+  options.services.nginx.gitweb = {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        If true, enable gitweb in nginx.
+      '';
+    };
+
+    location = mkOption {
+      default = "/gitweb";
+      type = types.str;
+      description = lib.mdDoc ''
+        Location to serve gitweb on.
+      '';
+    };
+
+    user = mkOption {
+      default = "nginx";
+      type = types.str;
+      description = lib.mdDoc ''
+        Existing user that the CGI process will belong to. (Default almost surely will do.)
+      '';
+    };
+
+    group = mkOption {
+      default = "nginx";
+      type = types.str;
+      description = lib.mdDoc ''
+        Group that the CGI process will belong to. (Set to `config.services.gitolite.group` if you are using gitolite.)
+      '';
+    };
+
+    virtualHost = mkOption {
+      default = "_";
+      type = types.str;
+      description = lib.mdDoc ''
+        VirtualHost to serve gitweb on. Default is catch-all.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.gitweb = {
+      description = "GitWeb service";
+      script = "${package}/gitweb.cgi --fastcgi --nproc=1";
+      environment  = {
+        FCGI_SOCKET_PATH = "/run/gitweb/gitweb.sock";
+      };
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        RuntimeDirectory = [ "gitweb" ];
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    services.nginx = {
+      virtualHosts.${cfg.virtualHost} = {
+        locations."${cfg.location}/static/" = {
+          alias = "${package}/static/";
+        };
+        locations."${cfg.location}/" = {
+          extraConfig = ''
+            include ${config.services.nginx.package}/conf/fastcgi_params;
+            fastcgi_param GITWEB_CONFIG ${gitwebConfig.gitwebConfigFile};
+            fastcgi_pass unix:/run/gitweb/gitweb.sock;
+          '';
+        };
+      };
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/location-options.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/location-options.nix
new file mode 100644
index 000000000000..2728852058ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/location-options.nix
@@ -0,0 +1,141 @@
+# This file defines the options that can be used both for the Nginx
+# main server configuration, and for the virtual hosts.  (The latter
+# has additional options that affect the web server as a whole, like
+# the user/group to run under.)
+
+{ lib, config }:
+
+with lib;
+
+{
+  options = {
+    basicAuth = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = literalExpression ''
+        {
+          user = "password";
+        };
+      '';
+      description = lib.mdDoc ''
+        Basic Auth protection for a vhost.
+
+        WARNING: This is implemented to store the password in plain text in the
+        Nix store.
+      '';
+    };
+
+    basicAuthFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Basic Auth password file for a vhost.
+        Can be created via: {command}`htpasswd -c <filename> <username>`.
+
+        WARNING: The generate file contains the users' passwords in a
+        non-cryptographically-securely hashed way.
+      '';
+    };
+
+    proxyPass = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "http://www.example.org/";
+      description = lib.mdDoc ''
+        Adds proxy_pass directive and sets recommended proxy headers if
+        recommendedProxySettings is enabled.
+      '';
+    };
+
+    proxyWebsockets = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc ''
+        Whether to support proxying websocket connections with HTTP/1.1.
+      '';
+    };
+
+    index = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "index.php index.html";
+      description = lib.mdDoc ''
+        Adds index directive.
+      '';
+    };
+
+    tryFiles = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "$uri =404";
+      description = lib.mdDoc ''
+        Adds try_files directive.
+      '';
+    };
+
+    root = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/your/root/directory";
+      description = lib.mdDoc ''
+        Root directory for requests.
+      '';
+    };
+
+    alias = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/your/alias/directory";
+      description = lib.mdDoc ''
+        Alias directory for requests.
+      '';
+    };
+
+    return = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "301 http://example.com$request_uri";
+      description = lib.mdDoc ''
+        Adds a return directive, for e.g. redirections.
+      '';
+    };
+
+    fastcgiParams = mkOption {
+      type = types.attrsOf (types.either types.str types.path);
+      default = {};
+      description = lib.mdDoc ''
+        FastCGI parameters to override.  Unlike in the Nginx
+        configuration file, overriding only some default parameters
+        won't unset the default values for other parameters.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        These lines go to the end of the location verbatim.
+      '';
+    };
+
+    priority = mkOption {
+      type = types.int;
+      default = 1000;
+      description = lib.mdDoc ''
+        Order of this location block in relation to the others in the vhost.
+        The semantics are the same as with `lib.mkOrder`. Smaller values have
+        a greater priority.
+      '';
+    };
+
+    recommendedProxySettings = mkOption {
+      type = types.bool;
+      default = config.services.nginx.recommendedProxySettings;
+      defaultText = literalExpression "config.services.nginx.recommendedProxySettings";
+      description = lib.mdDoc ''
+        Enable recommended proxy settings.
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix b/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix
new file mode 100644
index 000000000000..9db4c8e23025
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/nginx/vhost-options.nix
@@ -0,0 +1,358 @@
+# This file defines the options that can be used both for the Nginx
+# main server configuration, and for the virtual hosts.  (The latter
+# has additional options that affect the web server as a whole, like
+# the user/group to run under.)
+
+{ config, lib, ... }:
+
+with lib;
+{
+  options = {
+    serverName = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Name of this virtual host. Defaults to attribute name in virtualHosts.
+      '';
+      example = "example.org";
+    };
+
+    serverAliases = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "www.example.org" "example.org" ];
+      description = lib.mdDoc ''
+        Additional names of virtual hosts served by this virtual host configuration.
+      '';
+    };
+
+    listen = mkOption {
+      type = with types; listOf (submodule {
+        options = {
+          addr = mkOption {
+            type = str;
+            description = lib.mdDoc "Listen address.";
+          };
+          port = mkOption {
+            type = types.nullOr port;
+            description = lib.mdDoc ''
+              Port number to listen on.
+              If unset and the listen address is not a socket then nginx defaults to 80.
+            '';
+            default = null;
+          };
+          ssl = mkOption {
+            type = bool;
+            description = lib.mdDoc "Enable SSL.";
+            default = false;
+          };
+          proxyProtocol = mkOption {
+            type = bool;
+            description = lib.mdDoc "Enable PROXY protocol.";
+            default = false;
+          };
+          extraParameters = mkOption {
+            type = listOf str;
+            description = lib.mdDoc "Extra parameters of this listen directive.";
+            default = [ ];
+            example = [ "backlog=1024" "deferred" ];
+          };
+        };
+      });
+      default = [];
+      example = [
+        { addr = "195.154.1.1"; port = 443; ssl = true; }
+        { addr = "192.154.1.1"; port = 80; }
+        { addr = "unix:/var/run/nginx.sock"; }
+      ];
+      description = lib.mdDoc ''
+        Listen addresses and ports for this virtual host.
+        IPv6 addresses must be enclosed in square brackets.
+        Note: this option overrides `addSSL`
+        and `onlySSL`.
+
+        If you only want to set the addresses manually and not
+        the ports, take a look at `listenAddresses`.
+      '';
+    };
+
+    listenAddresses = mkOption {
+      type = with types; listOf str;
+
+      description = lib.mdDoc ''
+        Listen addresses for this virtual host.
+        Compared to `listen` this only sets the addresses
+        and the ports are chosen automatically.
+
+        Note: This option overrides `enableIPv6`
+      '';
+      default = [];
+      example = [ "127.0.0.1" "[::1]" ];
+    };
+
+    enableACME = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to ask Let's Encrypt to sign a certificate for this vhost.
+        Alternately, you can use an existing certificate through {option}`useACMEHost`.
+      '';
+    };
+
+    useACMEHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        A host of an existing Let's Encrypt certificate to use.
+        This is useful if you have many subdomains and want to avoid hitting the
+        [rate limit](https://letsencrypt.org/docs/rate-limits).
+        Alternately, you can generate a certificate through {option}`enableACME`.
+        *Note that this option does not create any certificates, nor it does add subdomains to existing ones – you will need to create them manually using [](#opt-security.acme.certs).*
+      '';
+    };
+
+    acmeRoot = mkOption {
+      type = types.nullOr types.str;
+      default = "/var/lib/acme/acme-challenge";
+      description = lib.mdDoc ''
+        Directory for the ACME challenge, which is **public**. Don't put certs or keys in here.
+        Set to null to inherit from config.security.acme.
+      '';
+    };
+
+    acmeFallbackHost = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Host which to proxy requests to if ACME challenge is not found. Useful
+        if you want multiple hosts to be able to verify the same domain name.
+
+        With this option, you could request certificates for the present domain
+        with an ACME client that is running on another host, which you would
+        specify here.
+      '';
+    };
+
+    addSSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable HTTPS in addition to plain HTTP. This will set defaults for
+        `listen` to listen on all interfaces on the respective default
+        ports (80, 443).
+      '';
+    };
+
+    onlySSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable HTTPS and reject plain HTTP connections. This will set
+        defaults for `listen` to listen on all interfaces on port 443.
+      '';
+    };
+
+    enableSSL = mkOption {
+      type = types.bool;
+      visible = false;
+      default = false;
+    };
+
+    forceSSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to add a separate nginx server block that permanently redirects (301)
+        all plain HTTP traffic to HTTPS. This will set defaults for
+        `listen` to listen on all interfaces on the respective default
+        ports (80, 443), where the non-SSL listens are used for the redirect vhosts.
+      '';
+    };
+
+    rejectSSL = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to listen for and reject all HTTPS connections to this vhost. Useful in
+        [default](#opt-services.nginx.virtualHosts._name_.default)
+        server blocks to avoid serving the certificate for another vhost. Uses the
+        `ssl_reject_handshake` directive available in nginx versions
+        1.19.4 and above.
+      '';
+    };
+
+    kTLS = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable kTLS support.
+        Implementing TLS in the kernel (kTLS) improves performance by significantly
+        reducing the need for copying operations between user space and the kernel.
+        Required Nginx version 1.21.4 or later.
+      '';
+    };
+
+    sslCertificate = mkOption {
+      type = types.path;
+      example = "/var/host.cert";
+      description = lib.mdDoc "Path to server SSL certificate.";
+    };
+
+    sslCertificateKey = mkOption {
+      type = types.path;
+      example = "/var/host.key";
+      description = lib.mdDoc "Path to server SSL certificate key.";
+    };
+
+    sslTrustedCertificate = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = literalExpression ''"''${pkgs.cacert}/etc/ssl/certs/ca-bundle.crt"'';
+      description = lib.mdDoc "Path to root SSL certificate for stapling and client certificates.";
+    };
+
+    http2 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable the HTTP/2 protocol.
+        Note that (as of writing) due to nginx's implementation, to disable
+        HTTP/2 you have to disable it on all vhosts that use a given
+        IP address / port.
+        If there is one server block configured to enable http2, then it is
+        enabled for all server blocks on this IP.
+        See https://stackoverflow.com/a/39466948/263061.
+      '';
+    };
+
+    http3 = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable the HTTP/3 protocol.
+        This requires using `pkgs.nginxQuic` package
+        which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
+        and activate the QUIC transport protocol
+        `services.nginx.virtualHosts.<name>.quic = true;`.
+        Note that HTTP/3 support is experimental and
+        *not* yet recommended for production.
+        Read more at https://quic.nginx.org/
+      '';
+    };
+
+    http3_hq = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the HTTP/0.9 protocol negotiation used in QUIC interoperability tests.
+        This requires using `pkgs.nginxQuic` package
+        which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`
+        and activate the QUIC transport protocol
+        `services.nginx.virtualHosts.<name>.quic = true;`.
+        Note that special application protocol support is experimental and
+        *not* yet recommended for production.
+        Read more at https://quic.nginx.org/
+      '';
+    };
+
+    quic = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable the QUIC transport protocol.
+        This requires using `pkgs.nginxQuic` package
+        which can be achieved by setting `services.nginx.package = pkgs.nginxQuic;`.
+        Note that QUIC support is experimental and
+        *not* yet recommended for production.
+        Read more at https://quic.nginx.org/
+      '';
+    };
+
+    reuseport = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Create an individual listening socket .
+        It is required to specify only once on one of the hosts.
+      '';
+    };
+
+    root = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/data/webserver/docs";
+      description = lib.mdDoc ''
+        The path of the web root directory.
+      '';
+    };
+
+    default = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Makes this vhost the default.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        These lines go to the end of the vhost verbatim.
+      '';
+    };
+
+    globalRedirect = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "newserver.example.org";
+      description = lib.mdDoc ''
+        If set, all requests for this host are redirected permanently to
+        the given hostname.
+      '';
+    };
+
+    basicAuth = mkOption {
+      type = types.attrsOf types.str;
+      default = {};
+      example = literalExpression ''
+        {
+          user = "password";
+        };
+      '';
+      description = lib.mdDoc ''
+        Basic Auth protection for a vhost.
+
+        WARNING: This is implemented to store the password in plain text in the
+        Nix store.
+      '';
+    };
+
+    basicAuthFile = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc ''
+        Basic Auth password file for a vhost.
+        Can be created via: {command}`htpasswd -c <filename> <username>`.
+
+        WARNING: The generate file contains the users' passwords in a
+        non-cryptographically-securely hashed way.
+      '';
+    };
+
+    locations = mkOption {
+      type = types.attrsOf (types.submodule (import ./location-options.nix {
+        inherit lib config;
+      }));
+      default = {};
+      example = literalExpression ''
+        {
+          "/" = {
+            proxyPass = "http://localhost:3000";
+          };
+        };
+      '';
+      description = lib.mdDoc "Declarative location config";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/phpfpm/default.nix b/nixpkgs/nixos/modules/services/web-servers/phpfpm/default.nix
new file mode 100644
index 000000000000..0bd1d5b29b31
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/phpfpm/default.nix
@@ -0,0 +1,285 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.phpfpm;
+
+  runtimeDir = "/run/phpfpm";
+
+  toStr = value:
+    if true == value then "yes"
+    else if false == value then "no"
+    else toString value;
+
+  fpmCfgFile = pool: poolOpts: pkgs.writeText "phpfpm-${pool}.conf" ''
+    [global]
+    ${concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${toStr v}") cfg.settings)}
+    ${optionalString (cfg.extraConfig != null) cfg.extraConfig}
+
+    [${pool}]
+    ${concatStringsSep "\n" (mapAttrsToList (n: v: "${n} = ${toStr v}") poolOpts.settings)}
+    ${concatStringsSep "\n" (mapAttrsToList (n: v: "env[${n}] = ${toStr v}") poolOpts.phpEnv)}
+    ${optionalString (poolOpts.extraConfig != null) poolOpts.extraConfig}
+  '';
+
+  phpIni = poolOpts: pkgs.runCommand "php.ini" {
+    inherit (poolOpts) phpPackage phpOptions;
+    preferLocalBuild = true;
+    passAsFile = [ "phpOptions" ];
+  } ''
+    cat ${poolOpts.phpPackage}/etc/php.ini $phpOptionsPath > $out
+  '';
+
+  poolOpts = { name, ... }:
+    let
+      poolOpts = cfg.pools.${name};
+    in
+    {
+      options = {
+        socket = mkOption {
+          type = types.str;
+          readOnly = true;
+          description = lib.mdDoc ''
+            Path to the unix socket file on which to accept FastCGI requests.
+
+            ::: {.note}
+            This option is read-only and managed by NixOS.
+            :::
+          '';
+          example = "${runtimeDir}/<name>.sock";
+        };
+
+        listen = mkOption {
+          type = types.str;
+          default = "";
+          example = "/path/to/unix/socket";
+          description = lib.mdDoc ''
+            The address on which to accept FastCGI requests.
+          '';
+        };
+
+        phpPackage = mkOption {
+          type = types.package;
+          default = cfg.phpPackage;
+          defaultText = literalExpression "config.services.phpfpm.phpPackage";
+          description = lib.mdDoc ''
+            The PHP package to use for running this PHP-FPM pool.
+          '';
+        };
+
+        phpOptions = mkOption {
+          type = types.lines;
+          description = lib.mdDoc ''
+            "Options appended to the PHP configuration file {file}`php.ini` used for this PHP-FPM pool."
+          '';
+        };
+
+        phpEnv = lib.mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = lib.mdDoc ''
+            Environment variables used for this PHP-FPM pool.
+          '';
+          example = literalExpression ''
+            {
+              HOSTNAME = "$HOSTNAME";
+              TMP = "/tmp";
+              TMPDIR = "/tmp";
+              TEMP = "/tmp";
+            }
+          '';
+        };
+
+        user = mkOption {
+          type = types.str;
+          description = lib.mdDoc "User account under which this pool runs.";
+        };
+
+        group = mkOption {
+          type = types.str;
+          description = lib.mdDoc "Group account under which this pool runs.";
+        };
+
+        settings = mkOption {
+          type = with types; attrsOf (oneOf [ str int bool ]);
+          default = {};
+          description = lib.mdDoc ''
+            PHP-FPM pool directives. Refer to the "List of pool directives" section of
+            <https://www.php.net/manual/en/install.fpm.configuration.php>
+            for details. Note that settings names must be enclosed in quotes (e.g.
+            `"pm.max_children"` instead of `pm.max_children`).
+          '';
+          example = literalExpression ''
+            {
+              "pm" = "dynamic";
+              "pm.max_children" = 75;
+              "pm.start_servers" = 10;
+              "pm.min_spare_servers" = 5;
+              "pm.max_spare_servers" = 20;
+              "pm.max_requests" = 500;
+            }
+          '';
+        };
+
+        extraConfig = mkOption {
+          type = with types; nullOr lines;
+          default = null;
+          description = lib.mdDoc ''
+            Extra lines that go into the pool configuration.
+            See the documentation on `php-fpm.conf` for
+            details on configuration directives.
+          '';
+        };
+      };
+
+      config = {
+        socket = if poolOpts.listen == "" then "${runtimeDir}/${name}.sock" else poolOpts.listen;
+        group = mkDefault poolOpts.user;
+        phpOptions = mkBefore cfg.phpOptions;
+
+        settings = mapAttrs (name: mkDefault){
+          listen = poolOpts.socket;
+          user = poolOpts.user;
+          group = poolOpts.group;
+        };
+      };
+    };
+
+in {
+  imports = [
+    (mkRemovedOptionModule [ "services" "phpfpm" "poolConfigs" ] "Use services.phpfpm.pools instead.")
+    (mkRemovedOptionModule [ "services" "phpfpm" "phpIni" ] "")
+  ];
+
+  options = {
+    services.phpfpm = {
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool ]);
+        default = {};
+        description = lib.mdDoc ''
+          PHP-FPM global directives. Refer to the "List of global php-fpm.conf directives" section of
+          <https://www.php.net/manual/en/install.fpm.configuration.php>
+          for details. Note that settings names must be enclosed in quotes (e.g.
+          `"pm.max_children"` instead of `pm.max_children`).
+          You need not specify the options `error_log` or
+          `daemonize` here, since they are generated by NixOS.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = with types; nullOr lines;
+        default = null;
+        description = lib.mdDoc ''
+          Extra configuration that should be put in the global section of
+          the PHP-FPM configuration file. Do not specify the options
+          `error_log` or
+          `daemonize` here, since they are generated by
+          NixOS.
+        '';
+      };
+
+      phpPackage = mkOption {
+        type = types.package;
+        default = pkgs.php;
+        defaultText = literalExpression "pkgs.php";
+        description = lib.mdDoc ''
+          The PHP package to use for running the PHP-FPM service.
+        '';
+      };
+
+      phpOptions = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            date.timezone = "CET"
+          '';
+        description = lib.mdDoc ''
+          Options appended to the PHP configuration file {file}`php.ini`.
+        '';
+      };
+
+      pools = mkOption {
+        type = types.attrsOf (types.submodule poolOpts);
+        default = {};
+        example = literalExpression ''
+         {
+           mypool = {
+             user = "php";
+             group = "php";
+             phpPackage = pkgs.php;
+             settings = {
+               "pm" = "dynamic";
+               "pm.max_children" = 75;
+               "pm.start_servers" = 10;
+               "pm.min_spare_servers" = 5;
+               "pm.max_spare_servers" = 20;
+               "pm.max_requests" = 500;
+             };
+           }
+         }'';
+        description = lib.mdDoc ''
+          PHP-FPM pools. If no pools are defined, the PHP-FPM
+          service is disabled.
+        '';
+      };
+    };
+  };
+
+  config = mkIf (cfg.pools != {}) {
+
+    warnings =
+      mapAttrsToList (pool: poolOpts: ''
+        Using config.services.phpfpm.pools.${pool}.listen is deprecated and will become unsupported in a future release. Please reference the read-only option config.services.phpfpm.pools.${pool}.socket to access the path of your socket.
+      '') (filterAttrs (pool: poolOpts: poolOpts.listen != "") cfg.pools) ++
+      mapAttrsToList (pool: poolOpts: ''
+        Using config.services.phpfpm.pools.${pool}.extraConfig is deprecated and will become unsupported in a future release. Please migrate your configuration to config.services.phpfpm.pools.${pool}.settings.
+      '') (filterAttrs (pool: poolOpts: poolOpts.extraConfig != null) cfg.pools) ++
+      optional (cfg.extraConfig != null) ''
+        Using config.services.phpfpm.extraConfig is deprecated and will become unsupported in a future release. Please migrate your configuration to config.services.phpfpm.settings.
+      ''
+    ;
+
+    services.phpfpm.settings = {
+      error_log = "syslog";
+      daemonize = false;
+    };
+
+    systemd.slices.phpfpm = {
+      description = "PHP FastCGI Process manager pools slice";
+    };
+
+    systemd.targets.phpfpm = {
+      description = "PHP FastCGI Process manager pools target";
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services = mapAttrs' (pool: poolOpts:
+      nameValuePair "phpfpm-${pool}" {
+        description = "PHP FastCGI Process Manager service for pool ${pool}";
+        after = [ "network.target" ];
+        wantedBy = [ "phpfpm.target" ];
+        partOf = [ "phpfpm.target" ];
+        serviceConfig = let
+          cfgFile = fpmCfgFile pool poolOpts;
+          iniFile = phpIni poolOpts;
+        in {
+          Slice = "phpfpm.slice";
+          PrivateDevices = true;
+          PrivateTmp = true;
+          ProtectSystem = "full";
+          ProtectHome = true;
+          # XXX: We need AF_NETLINK to make the sendmail SUID binary from postfix work
+          RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+          Type = "notify";
+          ExecStart = "${poolOpts.phpPackage}/bin/php-fpm -y ${cfgFile} -c ${iniFile}";
+          ExecReload = "${pkgs.coreutils}/bin/kill -USR2 $MAINPID";
+          RuntimeDirectory = "phpfpm";
+          RuntimeDirectoryPreserve = true; # Relevant when multiple processes are running
+          Restart = "always";
+        };
+      }
+    ) cfg.pools;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/pomerium.nix b/nixpkgs/nixos/modules/services/web-servers/pomerium.nix
new file mode 100644
index 000000000000..90748f74d24e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/pomerium.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  format = pkgs.formats.yaml {};
+in
+{
+  options.services.pomerium = {
+    enable = mkEnableOption (lib.mdDoc "the Pomerium authenticating reverse proxy");
+
+    configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc "Path to Pomerium config YAML. If set, overrides services.pomerium.settings.";
+    };
+
+    useACMEHost = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        If set, use a NixOS-generated ACME certificate with the specified name.
+
+        Note that this will require you to use a non-HTTP-based challenge, or
+        disable Pomerium's in-built HTTP redirect server by setting
+        http_redirect_addr to null and use a different HTTP server for serving
+        the challenge response.
+
+        If you're using an HTTP-based challenge, you should use the
+        Pomerium-native autocert option instead.
+      '';
+    };
+
+    settings = mkOption {
+      description = lib.mdDoc ''
+        The contents of Pomerium's config.yaml, in Nix expressions.
+
+        Specifying configFile will override this in its entirety.
+
+        See [the Pomerium
+        configuration reference](https://pomerium.io/reference/) for more information about what to put
+        here.
+      '';
+      default = {};
+      type = format.type;
+    };
+
+    secretsFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        Path to file containing secrets for Pomerium, in systemd
+        EnvironmentFile format. See the systemd.exec(5) man page.
+      '';
+    };
+  };
+
+  config = let
+    cfg = config.services.pomerium;
+    cfgFile = if cfg.configFile != null then cfg.configFile else (format.generate "pomerium.yaml" cfg.settings);
+  in mkIf cfg.enable ({
+    systemd.services.pomerium = {
+      description = "Pomerium authenticating reverse proxy";
+      wants = [ "network.target" ] ++ (optional (cfg.useACMEHost != null) "acme-finished-${cfg.useACMEHost}.target");
+      after = [ "network.target" ] ++ (optional (cfg.useACMEHost != null) "acme-finished-${cfg.useACMEHost}.target");
+      wantedBy = [ "multi-user.target" ];
+      environment = optionalAttrs (cfg.useACMEHost != null) {
+        CERTIFICATE_FILE = "fullchain.pem";
+        CERTIFICATE_KEY_FILE = "key.pem";
+      };
+      startLimitIntervalSec = 60;
+      script = ''
+        if [[ -v CREDENTIALS_DIRECTORY ]]; then
+          cd "$CREDENTIALS_DIRECTORY"
+        fi
+        exec "${pkgs.pomerium}/bin/pomerium" -config "${cfgFile}"
+      '';
+
+      serviceConfig = {
+        DynamicUser = true;
+        StateDirectory = [ "pomerium" ];
+
+        PrivateUsers = false;  # breaks CAP_NET_BIND_SERVICE
+        MemoryDenyWriteExecute = false;  # breaks LuaJIT
+
+        NoNewPrivileges = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        DevicePolicy = "closed";
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        ProtectControlGroups = true;
+        ProtectKernelModules = true;
+        ProtectKernelTunables = true;
+        ProtectKernelLogs = true;
+        RestrictAddressFamilies = "AF_UNIX AF_INET AF_INET6 AF_NETLINK";
+        RestrictNamespaces = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        LockPersonality = true;
+        SystemCallArchitectures = "native";
+
+        EnvironmentFile = cfg.secretsFile;
+        AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        CapabilityBoundingSet = [ "CAP_NET_BIND_SERVICE" ];
+
+        LoadCredential = optionals (cfg.useACMEHost != null) [
+          "fullchain.pem:/var/lib/acme/${cfg.useACMEHost}/fullchain.pem"
+          "key.pem:/var/lib/acme/${cfg.useACMEHost}/key.pem"
+        ];
+      };
+    };
+
+    # postRun hooks on cert renew can't be used to restart Nginx since renewal
+    # runs as the unprivileged acme user. sslTargets are added to wantedBy + before
+    # which allows the acme-finished-$cert.target to signify the successful updating
+    # of certs end-to-end.
+    systemd.services.pomerium-config-reload = mkIf (cfg.useACMEHost != null) {
+      # TODO(lukegb): figure out how to make config reloading work with credentials.
+
+      wantedBy = [ "acme-finished-${cfg.useACMEHost}.target" "multi-user.target" ];
+      # Before the finished targets, after the renew services.
+      before = [ "acme-finished-${cfg.useACMEHost}.target" ];
+      after = [ "acme-${cfg.useACMEHost}.service" ];
+      # Block reloading if not all certs exist yet.
+      unitConfig.ConditionPathExists = [ "${config.security.acme.certs.${cfg.useACMEHost}.directory}/fullchain.pem" ];
+      serviceConfig = {
+        Type = "oneshot";
+        TimeoutSec = 60;
+        ExecCondition = "/run/current-system/systemd/bin/systemctl -q is-active pomerium.service";
+        ExecStart = "/run/current-system/systemd/bin/systemctl --no-block restart pomerium.service";
+      };
+    };
+  });
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/rustus.nix b/nixpkgs/nixos/modules/services/web-servers/rustus.nix
new file mode 100644
index 000000000000..6d3b2e6a65d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/rustus.nix
@@ -0,0 +1,256 @@
+{ lib, pkgs, config, ... }:
+with lib;
+let
+  cfg = config.services.rustus;
+in
+{
+  meta.maintainers = with maintainers; [ happysalada ];
+
+  options.services.rustus = {
+
+    enable = mkEnableOption (lib.mdDoc "TUS protocol implementation in Rust");
+
+    host = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        The host that rustus will connect to.
+      '';
+      default = "127.0.0.1";
+      example = "127.0.0.1";
+    };
+
+    port = mkOption {
+      type = types.port;
+      description = lib.mdDoc ''
+        The port that rustus will connect to.
+      '';
+      default = 1081;
+      example = 1081;
+    };
+
+    log_level = mkOption {
+      type = types.enum [ "DEBUG" "INFO" "ERROR" ];
+      description = lib.mdDoc ''
+        Desired log level
+      '';
+      default = "INFO";
+      example = "ERROR";
+    };
+
+    max_body_size = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        Maximum body size in bytes
+      '';
+      default = "10000000"; # 10 mb
+      example = "100000000";
+    };
+
+    url = mkOption {
+      type = types.str;
+      description = lib.mdDoc ''
+        url path for uploads
+      '';
+      default = "/files";
+    };
+
+    disable_health_access_logs = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        disable access log for /health endpoint
+      '';
+      default = false;
+    };
+
+    cors = mkOption {
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        list of origins allowed to upload
+      '';
+      default = ["*"];
+      example = ["*.staging.domain" "*.prod.domain"];
+    };
+
+    tus_extensions = mkOption {
+      type = types.listOf (types.enum [
+        "getting"
+        "creation"
+        "termination"
+        "creation-with-upload"
+        "creation-defer-length"
+        "concatenation"
+        "checksum"
+      ]);
+      description = lib.mdDoc ''
+        Since TUS protocol offers extensibility you can turn off some protocol extensions.
+      '';
+      default = [
+        "getting"
+        "creation"
+        "termination"
+        "creation-with-upload"
+        "creation-defer-length"
+        "concatenation"
+        "checksum"
+      ];
+    };
+
+    remove_parts = mkOption {
+      type = types.bool;
+      description = lib.mdDoc ''
+        remove parts files after successful concatenation
+      '';
+      default = true;
+      example = false;
+    };
+
+    storage = lib.mkOption {
+      description = lib.mdDoc ''
+        Storages are used to actually store your files. You can configure where you want to store files.
+      '';
+      default = {};
+      example = lib.literalExpression ''
+        {
+          type = "hybrid-s3"
+          s3_access_key_file = konfig.age.secrets.R2_ACCESS_KEY.path;
+          s3_secret_key_file = konfig.age.secrets.R2_SECRET_KEY.path;
+          s3_bucket = "my_bucket";
+          s3_url = "https://s3.example.com";
+        }
+      '';
+      type = lib.types.submodule {
+        options = {
+          type = lib.mkOption {
+            type = lib.types.enum ["file-storage" "hybrid-s3"];
+            description = lib.mdDoc "Type of storage to use";
+          };
+          s3_access_key_file = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "File path that contains the S3 access key.";
+          };
+          s3_secret_key_file = lib.mkOption {
+            type = lib.types.path;
+            description = lib.mdDoc "File path that contains the S3 secret key.";
+          };
+          s3_region = lib.mkOption {
+            type = lib.types.str;
+            default = "us-east-1";
+            description = lib.mdDoc "S3 region name.";
+          };
+          s3_bucket = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "S3 bucket.";
+          };
+          s3_url = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "S3 url.";
+          };
+
+          force_sync = lib.mkOption {
+            type = lib.types.bool;
+            description = lib.mdDoc "calls fsync system call after every write to disk in local storage";
+            default = true;
+          };
+          data_dir = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "path to the local directory where all files are stored";
+            default = "/var/lib/rustus";
+          };
+          dir_structure = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "pattern of a directory structure locally and on s3";
+            default = "{year}/{month}/{day}";
+          };
+        };
+      };
+    };
+
+    info_storage = lib.mkOption {
+      description = lib.mdDoc ''
+        Info storages are used to store information about file uploads. These storages must be persistent, because every time chunk is uploaded rustus updates information about upload. And when someone wants to download file, information about it requested from storage to get actual path of an upload.
+      '';
+      default = {};
+      type = lib.types.submodule {
+        options = {
+          type = lib.mkOption {
+            type = lib.types.enum ["file-info-storage"];
+            description = lib.mdDoc "Type of info storage to use";
+            default = "file-info-storage";
+          };
+          dir = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "directory to store info about uploads";
+            default = "/var/lib/rustus";
+          };
+        };
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.services.rustus =
+      let
+        isHybridS3 = cfg.storage.type == "hybrid-s3";
+      in
+    {
+      description = "Rustus server";
+      documentation = [ "https://s3rius.github.io/rustus/" ];
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      environment = {
+        RUSTUS_SERVER_HOST = cfg.host;
+        RUSTUS_SERVER_PORT = toString cfg.port;
+        RUSTUS_LOG_LEVEL = cfg.log_level;
+        RUSTUS_MAX_BODY_SIZE = cfg.max_body_size;
+        RUSTUS_URL = cfg.url;
+        RUSTUS_DISABLE_HEALTH_ACCESS_LOG = lib.mkIf cfg.disable_health_access_logs "true";
+        RUSTUS_CORS = lib.concatStringsSep "," cfg.cors;
+        RUSTUS_TUS_EXTENSIONS = lib.concatStringsSep "," cfg.tus_extensions;
+        RUSTUS_REMOVE_PARTS= if cfg.remove_parts then "true" else "false";
+        RUSTUS_STORAGE = cfg.storage.type;
+        RUSTUS_DATA_DIR = cfg.storage.data_dir;
+        RUSTUS_DIR_STRUCTURE = cfg.storage.dir_structure;
+        RUSTUS_FORCE_FSYNC = if cfg.storage.force_sync then "true" else "false";
+        RUSTUS_S3_URL = mkIf isHybridS3 cfg.storage.s3_url;
+        RUSTUS_S3_BUCKET = mkIf isHybridS3 cfg.storage.s3_bucket;
+        RUSTUS_S3_REGION = mkIf isHybridS3 cfg.storage.s3_region;
+        RUSTUS_S3_ACCESS_KEY_PATH = mkIf isHybridS3 "%d/S3_ACCESS_KEY_PATH";
+        RUSTUS_S3_SECRET_KEY_PATH = mkIf isHybridS3 "%d/S3_SECRET_KEY_PATH";
+        RUSTUS_INFO_STORAGE = cfg.info_storage.type;
+        RUSTUS_INFO_DIR = cfg.info_storage.dir;
+      };
+
+      serviceConfig = {
+        ExecStart = "${pkgs.rustus}/bin/rustus";
+        StateDirectory = "rustus";
+        # User name is defined here to enable restoring a backup for example
+        # You will run the backup restore command as sudo -u rustus in order
+        # to have write permissions to /var/lib
+        User = "rustus";
+        DynamicUser = true;
+        LoadCredential = lib.optionals isHybridS3 [
+          "S3_ACCESS_KEY_PATH:${cfg.storage.s3_access_key_file}"
+          "S3_SECRET_KEY_PATH:${cfg.storage.s3_secret_key_file}"
+        ];
+        # hardening
+        RestrictRealtime=true;
+        RestrictNamespaces=true;
+        LockPersonality=true;
+        ProtectKernelModules=true;
+        ProtectKernelTunables=true;
+        ProtectKernelLogs=true;
+        ProtectControlGroups=true;
+        ProtectHostUserNamespaces=true;
+        ProtectClock=true;
+        RestrictSUIDSGID=true;
+        SystemCallArchitectures="native";
+        CapabilityBoundingSet="";
+        ProtectProc = "invisible";
+        # TODO consider SystemCallFilter LimitAS ProcSubset
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/stargazer.nix b/nixpkgs/nixos/modules/services/web-servers/stargazer.nix
new file mode 100644
index 000000000000..18f57363137c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/stargazer.nix
@@ -0,0 +1,224 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.stargazer;
+  globalSection = ''
+    listen = ${lib.concatStringsSep " " cfg.listen}
+    connection-logging = ${lib.boolToString cfg.connectionLogging}
+    log-ip = ${lib.boolToString cfg.ipLog}
+    log-ip-partial = ${lib.boolToString cfg.ipLogPartial}
+    request-timeout = ${toString cfg.requestTimeout}
+    response-timeout = ${toString cfg.responseTimeout}
+
+    [:tls]
+    store = ${toString cfg.store}
+    organization = ${cfg.certOrg}
+    gen-certs = ${lib.boolToString cfg.genCerts}
+    regen-certs = ${lib.boolToString cfg.regenCerts}
+    ${lib.optionalString (cfg.certLifetime != "") "cert-lifetime = ${cfg.certLifetime}"}
+
+  '';
+  genINI = lib.generators.toINI { };
+  configFile = pkgs.writeText "config.ini" (lib.strings.concatStrings (
+    [ globalSection ] ++ (lib.lists.forEach cfg.routes (section:
+      let
+        name = section.route;
+        params = builtins.removeAttrs section [ "route" ];
+      in
+      genINI
+        {
+          "${name}" = params;
+        } + "\n"
+    ))
+  ));
+in
+{
+  options.services.stargazer = {
+    enable = lib.mkEnableOption (lib.mdDoc "Stargazer Gemini server");
+
+    listen = lib.mkOption {
+      type = lib.types.listOf lib.types.str;
+      default = [ "0.0.0.0" ] ++ lib.optional config.networking.enableIPv6 "[::0]";
+      defaultText = lib.literalExpression ''[ "0.0.0.0" ] ++ lib.optional config.networking.enableIPv6 "[::0]"'';
+      example = lib.literalExpression ''[ "10.0.0.12" "[2002:a00:1::]" ]'';
+      description = lib.mdDoc ''
+        Address and port to listen on.
+      '';
+    };
+
+    connectionLogging = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc "Whether or not to log connections to stdout.";
+    };
+
+    ipLog = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Log client IP addresses in the connection log.";
+    };
+
+    ipLogPartial = lib.mkOption {
+      type = lib.types.bool;
+      default = false;
+      description = lib.mdDoc "Log partial client IP addresses in the connection log.";
+    };
+
+    requestTimeout = lib.mkOption {
+      type = lib.types.int;
+      default = 5;
+      description = lib.mdDoc ''
+        Number of seconds to wait for the client to send a complete
+        request. Set to 0 to disable.
+      '';
+    };
+
+    responseTimeout = lib.mkOption {
+      type = lib.types.int;
+      default = 0;
+      description = lib.mdDoc ''
+        Number of seconds to wait for the client to send a complete
+        request and for stargazer to finish sending the response.
+        Set to 0 to disable.
+      '';
+    };
+
+    store = lib.mkOption {
+      type = lib.types.path;
+      default = /var/lib/gemini/certs;
+      description = lib.mdDoc ''
+        Path to the certificate store on disk. This should be a
+        persistent directory writable by Stargazer.
+      '';
+    };
+
+    certOrg = lib.mkOption {
+      type = lib.types.str;
+      default = "stargazer";
+      description = lib.mdDoc ''
+        The name of the organization responsible for the X.509
+        certificate's /O name.
+      '';
+    };
+
+    genCerts = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Set to false to disable automatic certificate generation.
+        Use if you want to provide your own certs.
+      '';
+    };
+
+    regenCerts = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Set to false to turn off automatic regeneration of expired certificates.
+        Use if you want to provide your own certs.
+      '';
+    };
+
+    certLifetime = lib.mkOption {
+      type = lib.types.str;
+      default = "";
+      description = lib.mdDoc ''
+        How long certs generated by Stargazer should live for.
+        Certs live forever by default.
+      '';
+      example = lib.literalExpression "\"1y\"";
+    };
+
+    routes = lib.mkOption {
+      type = lib.types.listOf
+        (lib.types.submodule {
+          freeformType = with lib.types; attrsOf (nullOr
+            (oneOf [
+              bool
+              int
+              float
+              str
+            ]) // {
+            description = "INI atom (null, bool, int, float or string)";
+          });
+          options.route = lib.mkOption {
+            type = lib.types.str;
+            description = lib.mdDoc "Route section name";
+          };
+        });
+      default = [ ];
+      description = lib.mdDoc ''
+        Routes that Stargazer should server.
+
+        Expressed as a list of attribute sets. Each set must have a key `route`
+        that becomes the section name for that route in the stargazer ini cofig.
+        The remaining keys and values become the parameters for that route.
+
+        [Refer to upstream docs for other params](https://git.sr.ht/~zethra/stargazer/tree/main/item/doc/stargazer.ini.5.txt)
+      '';
+      example = lib.literalExpression ''
+        [
+          {
+            route = "example.com";
+            root = "/srv/gemini/example.com"
+          }
+          {
+            route = "example.com:/man";
+            root = "/cgi-bin";
+            cgi = true;
+          }
+          {
+            route = "other.org~(.*)";
+            redirect = "gemini://example.com";
+            rewrite = "\1";
+          }
+        ]
+      '';
+    };
+
+    user = lib.mkOption {
+      type = lib.types.str;
+      default = "stargazer";
+      description = lib.mdDoc "User account under which stargazer runs.";
+    };
+
+    group = lib.mkOption {
+      type = lib.types.str;
+      default = "stargazer";
+      description = lib.mdDoc "Group account under which stargazer runs.";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.services.stargazer = {
+      description = "Stargazer gemini server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        ExecStart = "${pkgs.stargazer}/bin/stargazer ${configFile}";
+        Restart = "always";
+        # User and group
+        User = cfg.user;
+        Group = cfg.group;
+      };
+    };
+
+    # Create default cert store
+    systemd.tmpfiles.rules = lib.mkIf (cfg.store == /var/lib/gemini/certs) [
+      ''d /var/lib/gemini/certs - "${cfg.user}" "${cfg.group}" -''
+    ];
+
+    users.users = lib.optionalAttrs (cfg.user == "stargazer") {
+      stargazer = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = lib.optionalAttrs (cfg.group == "stargazer") {
+      stargazer = { };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ gaykitty ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/static-web-server.nix b/nixpkgs/nixos/modules/services/web-servers/static-web-server.nix
new file mode 100644
index 000000000000..07187f00fecc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/static-web-server.nix
@@ -0,0 +1,68 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.static-web-server;
+  toml = pkgs.formats.toml {};
+  configFilePath = toml.generate "config.toml" cfg.configuration;
+in {
+  options = {
+    services.static-web-server = {
+      enable = lib.mkEnableOption (lib.mdDoc ''Static Web Server'');
+      listen = lib.mkOption {
+        default = "[::]:8787";
+        type = lib.types.str;
+        description = lib.mdDoc ''
+          The "ListenStream" used in static-web-server.socket.
+          This is equivalent to SWS's "host" and "port" options.
+          See here for specific syntax: <https://www.freedesktop.org/software/systemd/man/systemd.socket.html#ListenStream=>
+        '';
+      };
+      root = lib.mkOption {
+        type = lib.types.path;
+        description = lib.mdDoc ''
+          The location of files for SWS to serve. Equivalent to SWS's "root" config value.
+          NOTE: This folder must exist before starting SWS.
+        '';
+      };
+      configuration = lib.mkOption {
+        default = { };
+        type = toml.type;
+        example = {
+          general = { log-level = "error"; directory-listing = true; };
+        };
+        description = lib.mdDoc ''
+          Configuration for Static Web Server. See
+          <https://static-web-server.net/configuration/config-file/>.
+          NOTE: Don't set "host", "port", or "root" here. They will be ignored.
+          Use the top-level "listen" and "root" options instead.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.static-web-server ];
+    systemd.packages = [ pkgs.static-web-server ];
+    # Have to set wantedBy since systemd.packages ignores the "Install" section
+    systemd.sockets.static-web-server = {
+      wantedBy = [ "sockets.target" ];
+      # Start with empty string to reset upstream option
+      listenStreams = [ "" cfg.listen ];
+    };
+    systemd.services.static-web-server = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        # Remove upstream sample environment file; use config.toml exclusively
+        EnvironmentFile = [ "" ];
+        ExecStart = [ "" "${pkgs.static-web-server}/bin/static-web-server --fd 0 --config-file ${configFilePath} --root ${cfg.root}" ];
+        # Supplementary groups doesn't work unless we create the group ourselves
+        SupplementaryGroups = [ "" ];
+        # If the user is serving files from their home dir, override ProtectHome to allow that
+        ProtectHome = if lib.hasPrefix "/home" cfg.root then "tmpfs" else "true";
+        BindReadOnlyPaths = cfg.root;
+      };
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ mac-chaffee ];
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/tomcat.nix b/nixpkgs/nixos/modules/services/web-servers/tomcat.nix
new file mode 100644
index 000000000000..30d6b99fcfda
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/tomcat.nix
@@ -0,0 +1,398 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  cfg = config.services.tomcat;
+  tomcat = cfg.package;
+in
+
+{
+  meta = {
+    maintainers = with lib.maintainers; [ danbst ];
+  };
+
+  ###### interface
+
+  options = {
+    services.tomcat = {
+      enable = lib.mkEnableOption (lib.mdDoc "Apache Tomcat");
+
+      package = lib.mkPackageOptionMD pkgs "tomcat9" { };
+
+      purifyOnStart = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          On startup, the `baseDir` directory is populated with various files,
+          subdirectories and symlinks. If this option is enabled, these items
+          (except for the `logs` and `work` subdirectories) are first removed.
+          This prevents interference from remainders of an old configuration
+          (libraries, webapps, etc.), so it's recommended to enable this option.
+        '';
+      };
+
+      baseDir = lib.mkOption {
+        type = lib.types.path;
+        default = "/var/tomcat";
+        description = lib.mdDoc ''
+          Location where Tomcat stores configuration files, web applications
+          and logfiles. Note that it is partially cleared on each service startup
+          if `purifyOnStart` is enabled.
+        '';
+      };
+
+      logDirs = lib.mkOption {
+        default = [ ];
+        type = lib.types.listOf lib.types.path;
+        description = lib.mdDoc "Directories to create in baseDir/logs/";
+      };
+
+      extraConfigFiles = lib.mkOption {
+        default = [ ];
+        type = lib.types.listOf lib.types.path;
+        description = lib.mdDoc "Extra configuration files to pull into the tomcat conf directory";
+      };
+
+      extraEnvironment = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        example = [ "ENVIRONMENT=production" ];
+        description = lib.mdDoc "Environment Variables to pass to the tomcat service";
+      };
+
+      extraGroups = lib.mkOption {
+        default = [ ];
+        type = lib.types.listOf lib.types.str;
+        example = [ "users" ];
+        description = lib.mdDoc "Defines extra groups to which the tomcat user belongs.";
+      };
+
+      user = lib.mkOption {
+        type = lib.types.str;
+        default = "tomcat";
+        description = lib.mdDoc "User account under which Apache Tomcat runs.";
+      };
+
+      group = lib.mkOption {
+        type = lib.types.str;
+        default = "tomcat";
+        description = lib.mdDoc "Group account under which Apache Tomcat runs.";
+      };
+
+      javaOpts = lib.mkOption {
+        type = lib.types.either (lib.types.listOf lib.types.str) lib.types.str;
+        default = "";
+        description = lib.mdDoc "Parameters to pass to the Java Virtual Machine which spawns Apache Tomcat";
+      };
+
+      catalinaOpts = lib.mkOption {
+        type = lib.types.either (lib.types.listOf lib.types.str) lib.types.str;
+        default = "";
+        description = lib.mdDoc "Parameters to pass to the Java Virtual Machine which spawns the Catalina servlet container";
+      };
+
+      sharedLibs = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        description = lib.mdDoc "List containing JAR files or directories with JAR files which are libraries shared by the web applications";
+      };
+
+      serverXml = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Verbatim server.xml configuration.
+          This is mutually exclusive with the virtualHosts options.
+        '';
+      };
+
+      commonLibs = lib.mkOption {
+        type = lib.types.listOf lib.types.str;
+        default = [ ];
+        description = lib.mdDoc "List containing JAR files or directories with JAR files which are libraries shared by the web applications and the servlet container";
+      };
+
+      webapps = lib.mkOption {
+        type = lib.types.listOf lib.types.path;
+        default = [ tomcat.webapps ];
+        defaultText = lib.literalExpression "[ config.services.tomcat.package.webapps ]";
+        description = lib.mdDoc "List containing WAR files or directories with WAR files which are web applications to be deployed on Tomcat";
+      };
+
+      virtualHosts = lib.mkOption {
+        type = lib.types.listOf (lib.types.submodule {
+          options = {
+            name = lib.mkOption {
+              type = lib.types.str;
+              description = lib.mdDoc "name of the virtualhost";
+            };
+            aliases = lib.mkOption {
+              type = lib.types.listOf lib.types.str;
+              description = lib.mdDoc "aliases of the virtualhost";
+              default = [ ];
+            };
+            webapps = lib.mkOption {
+              type = lib.types.listOf lib.types.path;
+              description = lib.mdDoc ''
+                List containing web application WAR files and/or directories containing
+                web applications and configuration files for the virtual host.
+              '';
+              default = [ ];
+            };
+          };
+        });
+        default = [ ];
+        description = lib.mdDoc "List consisting of a virtual host name and a list of web applications to deploy on each virtual host";
+      };
+
+      logPerVirtualHost = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable logging per virtual host.";
+      };
+
+      jdk = lib.mkPackageOptionMD pkgs "jdk" { };
+
+      axis2 = {
+        enable = lib.mkEnableOption "Apache Axis2 container";
+
+        services = lib.mkOption {
+          default = [ ];
+          type = lib.types.listOf lib.types.str;
+          description = lib.mdDoc "List containing AAR files or directories with AAR files which are web services to be deployed on Axis2";
+        };
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf config.services.tomcat.enable {
+
+    users.groups.tomcat.gid = config.ids.gids.tomcat;
+
+    users.users.tomcat =
+      {
+        uid = config.ids.uids.tomcat;
+        description = "Tomcat user";
+        home = "/homeless-shelter";
+        group = "tomcat";
+        extraGroups = cfg.extraGroups;
+      };
+
+    systemd.services.tomcat = {
+      description = "Apache Tomcat server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+
+      preStart = ''
+        ${lib.optionalString cfg.purifyOnStart ''
+          # Delete most directories/symlinks we create from the existing base directory,
+          # to get rid of remainders of an old configuration.
+          # The list of directories to delete is taken from the "mkdir" command below,
+          # excluding "logs" (because logs are valuable) and "work" (because normally
+          # session files are there), and additionally including "bin".
+          rm -rf ${cfg.baseDir}/{conf,virtualhosts,temp,lib,shared/lib,webapps,bin}
+        ''}
+
+        # Create the base directory
+        mkdir -p \
+          ${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
+        chown ${cfg.user}:${cfg.group} \
+          ${cfg.baseDir}/{conf,virtualhosts,logs,temp,lib,shared/lib,webapps,work}
+
+        # Create a symlink to the bin directory of the tomcat component
+        ln -sfn ${tomcat}/bin ${cfg.baseDir}/bin
+
+        # Symlink the config files in the conf/ directory (except for catalina.properties and server.xml)
+        for i in $(ls ${tomcat}/conf | grep -v catalina.properties | grep -v server.xml); do
+          ln -sfn ${tomcat}/conf/$i ${cfg.baseDir}/conf/`basename $i`
+        done
+
+        ${lib.optionalString (cfg.extraConfigFiles != []) ''
+          for i in ${toString cfg.extraConfigFiles}; do
+            ln -sfn $i ${cfg.baseDir}/conf/`basename $i`
+          done
+        ''}
+
+        # Create a modified catalina.properties file
+        # Change all references from CATALINA_HOME to CATALINA_BASE and add support for shared libraries
+        sed -e 's|''${catalina.home}|''${catalina.base}|g' \
+          -e 's|shared.loader=|shared.loader=''${catalina.base}/shared/lib/*.jar|' \
+          ${tomcat}/conf/catalina.properties > ${cfg.baseDir}/conf/catalina.properties
+
+        ${if cfg.serverXml != "" then ''
+          cp -f ${pkgs.writeTextDir "server.xml" cfg.serverXml}/* ${cfg.baseDir}/conf/
+        '' else
+          let
+            hostElementForVirtualHost = virtualHost: ''
+              <Host name="${virtualHost.name}" appBase="virtualhosts/${virtualHost.name}/webapps"
+                    unpackWARs="true" autoDeploy="true" xmlValidation="false" xmlNamespaceAware="false">
+            '' + lib.concatStrings (innerElementsForVirtualHost virtualHost) + ''
+              </Host>
+            '';
+            innerElementsForVirtualHost = virtualHost:
+              (map (alias: ''
+                <Alias>${alias}</Alias>
+              '') virtualHost.aliases)
+              ++ (lib.optional cfg.logPerVirtualHost ''
+                <Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs/${virtualHost.name}"
+                       prefix="${virtualHost.name}_access_log." pattern="combined" resolveHosts="false"/>
+              '');
+            hostElementsString = lib.concatMapStringsSep "\n" hostElementForVirtualHost cfg.virtualHosts;
+            hostElementsSedString = lib.replaceStrings ["\n"] ["\\\n"] hostElementsString;
+          in ''
+            # Create a modified server.xml which also includes all virtual hosts
+            sed -e "/<Engine name=\"Catalina\" defaultHost=\"localhost\">/a\\"${lib.escapeShellArg hostElementsSedString} \
+                  ${tomcat}/conf/server.xml > ${cfg.baseDir}/conf/server.xml
+          ''
+        }
+        ${lib.optionalString (cfg.logDirs != []) ''
+          for i in ${toString cfg.logDirs}; do
+            mkdir -p ${cfg.baseDir}/logs/$i
+            chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs/$i
+          done
+        ''}
+        ${lib.optionalString cfg.logPerVirtualHost (toString (map (h: ''
+          mkdir -p ${cfg.baseDir}/logs/${h.name}
+          chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/logs/${h.name}
+        '') cfg.virtualHosts))}
+
+        # Symlink all the given common libs files or paths into the lib/ directory
+        for i in ${tomcat} ${toString cfg.commonLibs}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the common/lib/ directory
+            ln -sfn $i ${cfg.baseDir}/lib/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/lib/*; do
+              ln -sfn $j ${cfg.baseDir}/lib/`basename $j`
+            done
+          fi
+        done
+
+        # Symlink all the given shared libs files or paths into the shared/lib/ directory
+        for i in ${toString cfg.sharedLibs}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the common/lib/ directory
+            ln -sfn $i ${cfg.baseDir}/shared/lib/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/shared/lib/*; do
+              ln -sfn $j ${cfg.baseDir}/shared/lib/`basename $j`
+            done
+          fi
+        done
+
+        # Symlink all the given web applications files or paths into the webapps/ directory
+        for i in ${toString cfg.webapps}; do
+          if [ -f $i ]; then
+            # If the given web application is a file, symlink it into the webapps/ directory
+            ln -sfn $i ${cfg.baseDir}/webapps/`basename $i`
+          elif [ -d $i ]; then
+            # If the given web application is a directory, then iterate over the files
+            # in the special purpose directories and symlink them into the tomcat tree
+
+            for j in $i/webapps/*; do
+              ln -sfn $j ${cfg.baseDir}/webapps/`basename $j`
+            done
+
+            # Also symlink the configuration files if they are included
+            if [ -d $i/conf/Catalina ]; then
+              for j in $i/conf/Catalina/*; do
+                mkdir -p ${cfg.baseDir}/conf/Catalina/localhost
+                ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
+              done
+            fi
+          fi
+        done
+
+        ${toString (map (virtualHost: ''
+          # Create webapps directory for the virtual host
+          mkdir -p ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps
+
+          # Modify ownership
+          chown ${cfg.user}:${cfg.group} ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps
+
+          # Symlink all the given web applications files or paths into the webapps/ directory
+          # of this virtual host
+          for i in "${lib.optionalString (virtualHost ? webapps) (toString virtualHost.webapps)}"; do
+            if [ -f $i ]; then
+              # If the given web application is a file, symlink it into the webapps/ directory
+              ln -sfn $i ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $i`
+            elif [ -d $i ]; then
+              # If the given web application is a directory, then iterate over the files
+              # in the special purpose directories and symlink them into the tomcat tree
+
+              for j in $i/webapps/*; do
+                ln -sfn $j ${cfg.baseDir}/virtualhosts/${virtualHost.name}/webapps/`basename $j`
+              done
+
+              # Also symlink the configuration files if they are included
+              if [ -d $i/conf/Catalina ]; then
+                for j in $i/conf/Catalina/*; do
+                  mkdir -p ${cfg.baseDir}/conf/Catalina/${virtualHost.name}
+                  ln -sfn $j ${cfg.baseDir}/conf/Catalina/${virtualHost.name}/`basename $j`
+                done
+              fi
+            fi
+          done
+        '') cfg.virtualHosts)}
+
+        ${lib.optionalString cfg.axis2.enable ''
+          # Copy the Axis2 web application
+          cp -av ${pkgs.axis2}/webapps/axis2 ${cfg.baseDir}/webapps
+
+          # Turn off addressing, which causes many errors
+          sed -i -e 's%<module ref="addressing"/>%<!-- <module ref="addressing"/> -->%' ${cfg.baseDir}/webapps/axis2/WEB-INF/conf/axis2.xml
+
+          # Modify permissions on the Axis2 application
+          chown -R ${cfg.user}:${cfg.group} ${cfg.baseDir}/webapps/axis2
+
+          # Symlink all the given web service files or paths into the webapps/axis2/WEB-INF/services directory
+          for i in ${toString cfg.axis2.services}; do
+            if [ -f $i ]; then
+              # If the given web service is a file, symlink it into the webapps/axis2/WEB-INF/services
+              ln -sfn $i ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $i`
+            elif [ -d $i ]; then
+              # If the given web application is a directory, then iterate over the files
+              # in the special purpose directories and symlink them into the tomcat tree
+
+              for j in $i/webapps/axis2/WEB-INF/services/*; do
+                ln -sfn $j ${cfg.baseDir}/webapps/axis2/WEB-INF/services/`basename $j`
+              done
+
+              # Also symlink the configuration files if they are included
+              if [ -d $i/conf/Catalina ]; then
+                for j in $i/conf/Catalina/*; do
+                  ln -sfn $j ${cfg.baseDir}/conf/Catalina/localhost/`basename $j`
+                done
+              fi
+            fi
+          done
+        ''}
+      '';
+
+      serviceConfig = {
+        Type = "forking";
+        PermissionsStartOnly = true;
+        PIDFile = "/run/tomcat/tomcat.pid";
+        RuntimeDirectory = "tomcat";
+        User = cfg.user;
+        Environment = [
+          "CATALINA_BASE=${cfg.baseDir}"
+          "CATALINA_PID=/run/tomcat/tomcat.pid"
+          "JAVA_HOME='${cfg.jdk}'"
+          "JAVA_OPTS='${builtins.toString cfg.javaOpts}'"
+          "CATALINA_OPTS='${builtins.toString cfg.catalinaOpts}'"
+        ] ++ cfg.extraEnvironment;
+        ExecStart = "${tomcat}/bin/startup.sh";
+        ExecStop = "${tomcat}/bin/shutdown.sh";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/traefik.nix b/nixpkgs/nixos/modules/services/web-servers/traefik.nix
new file mode 100644
index 000000000000..42fb95a52200
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/traefik.nix
@@ -0,0 +1,191 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.traefik;
+  jsonValue = with types;
+    let
+      valueType = nullOr (oneOf [
+        bool
+        int
+        float
+        str
+        (lazyAttrsOf valueType)
+        (listOf valueType)
+      ]) // {
+        description = "JSON value";
+        emptyValue.value = { };
+      };
+    in valueType;
+  dynamicConfigFile = if cfg.dynamicConfigFile == null then
+    pkgs.runCommand "config.toml" {
+      buildInputs = [ pkgs.remarshal ];
+      preferLocalBuild = true;
+    } ''
+      remarshal -if json -of toml \
+        < ${
+          pkgs.writeText "dynamic_config.json"
+          (builtins.toJSON cfg.dynamicConfigOptions)
+        } \
+        > $out
+    ''
+  else
+    cfg.dynamicConfigFile;
+  staticConfigFile = if cfg.staticConfigFile == null then
+    pkgs.runCommand "config.toml" {
+      buildInputs = [ pkgs.yj ];
+      preferLocalBuild = true;
+    } ''
+      yj -jt -i \
+        < ${
+          pkgs.writeText "static_config.json" (builtins.toJSON
+            (recursiveUpdate cfg.staticConfigOptions {
+              providers.file.filename = "${dynamicConfigFile}";
+            }))
+        } \
+        > $out
+    ''
+  else
+    cfg.staticConfigFile;
+
+  finalStaticConfigFile =
+    if cfg.environmentFiles == []
+    then staticConfigFile
+    else "/run/traefik/config.toml";
+in {
+  options.services.traefik = {
+    enable = mkEnableOption (lib.mdDoc "Traefik web server");
+
+    staticConfigFile = mkOption {
+      default = null;
+      example = literalExpression "/path/to/static_config.toml";
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        Path to traefik's static configuration to use.
+        (Using that option has precedence over `staticConfigOptions` and `dynamicConfigOptions`)
+      '';
+    };
+
+    staticConfigOptions = mkOption {
+      description = lib.mdDoc ''
+        Static configuration for Traefik.
+      '';
+      type = jsonValue;
+      default = { entryPoints.http.address = ":80"; };
+      example = {
+        entryPoints.web.address = ":8080";
+        entryPoints.http.address = ":80";
+
+        api = { };
+      };
+    };
+
+    dynamicConfigFile = mkOption {
+      default = null;
+      example = literalExpression "/path/to/dynamic_config.toml";
+      type = types.nullOr types.path;
+      description = lib.mdDoc ''
+        Path to traefik's dynamic configuration to use.
+        (Using that option has precedence over `dynamicConfigOptions`)
+      '';
+    };
+
+    dynamicConfigOptions = mkOption {
+      description = lib.mdDoc ''
+        Dynamic configuration for Traefik.
+      '';
+      type = jsonValue;
+      default = { };
+      example = {
+        http.routers.router1 = {
+          rule = "Host(`localhost`)";
+          service = "service1";
+        };
+
+        http.services.service1.loadBalancer.servers =
+          [{ url = "http://localhost:8080"; }];
+      };
+    };
+
+    dataDir = mkOption {
+      default = "/var/lib/traefik";
+      type = types.path;
+      description = lib.mdDoc ''
+        Location for any persistent data traefik creates, ie. acme
+      '';
+    };
+
+    group = mkOption {
+      default = "traefik";
+      type = types.str;
+      example = "docker";
+      description = lib.mdDoc ''
+        Set the group that traefik runs under.
+        For the docker backend this needs to be set to `docker` instead.
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.traefik;
+      defaultText = literalExpression "pkgs.traefik";
+      type = types.package;
+      description = lib.mdDoc "Traefik package to use.";
+    };
+
+    environmentFiles = mkOption {
+      default = [];
+      type = types.listOf types.path;
+      example = [ "/run/secrets/traefik.env" ];
+      description = lib.mdDoc ''
+        Files to load as environment file. Environment variables from this file
+        will be substituted into the static configuration file using envsubst.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = [ "d '${cfg.dataDir}' 0700 traefik traefik - -" ];
+
+    systemd.services.traefik = {
+      description = "Traefik web server";
+      after = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      startLimitIntervalSec = 86400;
+      startLimitBurst = 5;
+      serviceConfig = {
+        EnvironmentFile = cfg.environmentFiles;
+        ExecStartPre = lib.optional (cfg.environmentFiles != [])
+          (pkgs.writeShellScript "pre-start" ''
+            umask 077
+            ${pkgs.envsubst}/bin/envsubst -i "${staticConfigFile}" > "${finalStaticConfigFile}"
+          '');
+        ExecStart = "${cfg.package}/bin/traefik --configfile=${finalStaticConfigFile}";
+        Type = "simple";
+        User = "traefik";
+        Group = cfg.group;
+        Restart = "on-failure";
+        AmbientCapabilities = "cap_net_bind_service";
+        CapabilityBoundingSet = "cap_net_bind_service";
+        NoNewPrivileges = true;
+        LimitNPROC = 64;
+        LimitNOFILE = 1048576;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        ProtectHome = true;
+        ProtectSystem = "full";
+        ReadWriteDirectories = cfg.dataDir;
+        RuntimeDirectory = "traefik";
+      };
+    };
+
+    users.users.traefik = {
+      group = "traefik";
+      home = cfg.dataDir;
+      createHome = true;
+      isSystemUser = true;
+    };
+
+    users.groups.traefik = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/trafficserver/default.nix b/nixpkgs/nixos/modules/services/web-servers/trafficserver/default.nix
new file mode 100644
index 000000000000..17dece8746a1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/trafficserver/default.nix
@@ -0,0 +1,310 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.trafficserver;
+  user = config.users.users.trafficserver.name;
+  group = config.users.groups.trafficserver.name;
+
+  getManualUrl = name: "https://docs.trafficserver.apache.org/en/latest/admin-guide/files/${name}.en.html";
+
+  yaml = pkgs.formats.yaml { };
+
+  mkYamlConf = name: cfg:
+    if cfg != null then {
+      "trafficserver/${name}.yaml".source = yaml.generate "${name}.yaml" cfg;
+    } else {
+      "trafficserver/${name}.yaml".text = "";
+    };
+
+  mkRecordLines = path: value:
+    if isAttrs value then
+      lib.mapAttrsToList (n: v: mkRecordLines (path ++ [ n ]) v) value
+    else if isInt value then
+      "CONFIG ${concatStringsSep "." path} INT ${toString value}"
+    else if isFloat value then
+      "CONFIG ${concatStringsSep "." path} FLOAT ${toString value}"
+    else
+      "CONFIG ${concatStringsSep "." path} STRING ${toString value}";
+
+  mkRecordsConfig = cfg: concatStringsSep "\n" (flatten (mkRecordLines [ ] cfg));
+  mkPluginConfig = cfg: concatStringsSep "\n" (map (p: "${p.path} ${p.arg}") cfg);
+in
+{
+  options.services.trafficserver = {
+    enable = mkEnableOption (lib.mdDoc "Apache Traffic Server");
+
+    cache = mkOption {
+      type = types.lines;
+      default = "";
+      example = "dest_domain=example.com suffix=js action=never-cache";
+      description = lib.mdDoc ''
+        Caching rules that overrule the origin's caching policy.
+
+        Consult the [upstream
+        documentation](${getManualUrl "cache.config"}) for more details.
+      '';
+    };
+
+    hosting = mkOption {
+      type = types.lines;
+      default = "";
+      example = "domain=example.com volume=1";
+      description = lib.mdDoc ''
+        Partition the cache according to origin server or domain
+
+        Consult the [
+        upstream documentation](${getManualUrl "hosting.config"}) for more details.
+      '';
+    };
+
+    ipAllow = mkOption {
+      type = types.nullOr yaml.type;
+      default = lib.importJSON ./ip_allow.json;
+      defaultText = literalMD "upstream defaults";
+      example = literalExpression ''
+        {
+          ip_allow = [{
+            apply = "in";
+            ip_addrs = "127.0.0.1";
+            action = "allow";
+            methods = "ALL";
+          }];
+        }
+      '';
+      description = lib.mdDoc ''
+        Control client access to Traffic Server and Traffic Server connections
+        to upstream servers.
+
+        Consult the [upstream
+        documentation](${getManualUrl "ip_allow.yaml"}) for more details.
+      '';
+    };
+
+    logging = mkOption {
+      type = types.nullOr yaml.type;
+      default = lib.importJSON ./logging.json;
+      defaultText = literalMD "upstream defaults";
+      example = { };
+      description = lib.mdDoc ''
+        Configure logs.
+
+        Consult the [upstream
+        documentation](${getManualUrl "logging.yaml"}) for more details.
+      '';
+    };
+
+    parent = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        dest_domain=. method=get parent="p1.example:8080; p2.example:8080" round_robin=true
+      '';
+      description = lib.mdDoc ''
+        Identify the parent proxies used in an cache hierarchy.
+
+        Consult the [upstream
+        documentation](${getManualUrl "parent.config"}) for more details.
+      '';
+    };
+
+    plugins = mkOption {
+      default = [ ];
+
+      description = lib.mdDoc ''
+        Controls run-time loadable plugins available to Traffic Server, as
+        well as their configuration.
+
+        Consult the [upstream
+        documentation](${getManualUrl "plugin.config"}) for more details.
+      '';
+
+      type = with types;
+        listOf (submodule {
+          options.path = mkOption {
+            type = str;
+            example = "xdebug.so";
+            description = lib.mdDoc ''
+              Path to plugin. The path can either be absolute, or relative to
+              the plugin directory.
+            '';
+          };
+          options.arg = mkOption {
+            type = str;
+            default = "";
+            example = "--header=ATS-My-Debug";
+            description = lib.mdDoc "arguments to pass to the plugin";
+          };
+        });
+    };
+
+    records = mkOption {
+      type = with types;
+        let valueType = (attrsOf (oneOf [ int float str valueType ])) // {
+          description = "Traffic Server records value";
+        };
+        in
+        valueType;
+      default = { };
+      example = { proxy.config.proxy_name = "my_server"; };
+      description = lib.mdDoc ''
+        List of configurable variables used by Traffic Server.
+
+        Consult the [
+        upstream documentation](${getManualUrl "records.config"}) for more details.
+      '';
+    };
+
+    remap = mkOption {
+      type = types.lines;
+      default = "";
+      example = "map http://from.example http://origin.example";
+      description = lib.mdDoc ''
+        URL remapping rules used by Traffic Server.
+
+        Consult the [
+        upstream documentation](${getManualUrl "remap.config"}) for more details.
+      '';
+    };
+
+    splitDns = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        dest_domain=internal.corp.example named="255.255.255.255:212 255.255.255.254" def_domain=corp.example search_list="corp.example corp1.example"
+        dest_domain=!internal.corp.example named=255.255.255.253
+      '';
+      description = lib.mdDoc ''
+        Specify the DNS server that Traffic Server should use under specific
+        conditions.
+
+        Consult the [
+        upstream documentation](${getManualUrl "splitdns.config"}) for more details.
+      '';
+    };
+
+    sslMulticert = mkOption {
+      type = types.lines;
+      default = "";
+      example = "dest_ip=* ssl_cert_name=default.pem";
+      description = lib.mdDoc ''
+        Configure SSL server certificates to terminate the SSL sessions.
+
+        Consult the [
+        upstream documentation](${getManualUrl "ssl_multicert.config"}) for more details.
+      '';
+    };
+
+    sni = mkOption {
+      type = types.nullOr yaml.type;
+      default = null;
+      example = literalExpression ''
+        {
+          sni = [{
+            fqdn = "no-http2.example.com";
+            https = "off";
+          }];
+        }
+      '';
+      description = lib.mdDoc ''
+        Configure aspects of TLS connection handling for both inbound and
+        outbound connections.
+
+        Consult the [upstream
+        documentation](${getManualUrl "sni.yaml"}) for more details.
+      '';
+    };
+
+    storage = mkOption {
+      type = types.lines;
+      default = "/var/cache/trafficserver 256M";
+      example = "/dev/disk/by-id/XXXXX volume=1";
+      description = lib.mdDoc ''
+        List all the storage that make up the Traffic Server cache.
+
+        Consult the [
+        upstream documentation](${getManualUrl "storage.config"}) for more details.
+      '';
+    };
+
+    strategies = mkOption {
+      type = types.nullOr yaml.type;
+      default = null;
+      description = lib.mdDoc ''
+        Specify the next hop proxies used in an cache hierarchy and the
+        algorithms used to select the next proxy.
+
+        Consult the [
+        upstream documentation](${getManualUrl "strategies.yaml"}) for more details.
+      '';
+    };
+
+    volume = mkOption {
+      type = types.nullOr yaml.type;
+      default = "";
+      example = "volume=1 scheme=http size=20%";
+      description = lib.mdDoc ''
+        Manage cache space more efficiently and restrict disk usage by
+        creating cache volumes of different sizes.
+
+        Consult the [
+        upstream documentation](${getManualUrl "volume.config"}) for more details.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.etc = {
+      "trafficserver/cache.config".text = cfg.cache;
+      "trafficserver/hosting.config".text = cfg.hosting;
+      "trafficserver/parent.config".text = cfg.parent;
+      "trafficserver/plugin.config".text = mkPluginConfig cfg.plugins;
+      "trafficserver/records.config".text = mkRecordsConfig cfg.records;
+      "trafficserver/remap.config".text = cfg.remap;
+      "trafficserver/splitdns.config".text = cfg.splitDns;
+      "trafficserver/ssl_multicert.config".text = cfg.sslMulticert;
+      "trafficserver/storage.config".text = cfg.storage;
+      "trafficserver/volume.config".text = cfg.volume;
+    } // (mkYamlConf "ip_allow" cfg.ipAllow)
+    // (mkYamlConf "logging" cfg.logging)
+    // (mkYamlConf "sni" cfg.sni)
+    // (mkYamlConf "strategies" cfg.strategies);
+
+    environment.systemPackages = [ pkgs.trafficserver ];
+    systemd.packages = [ pkgs.trafficserver ];
+
+    # Traffic Server does privilege handling independently of systemd, and
+    # therefore should be started as root
+    systemd.services.trafficserver = {
+      enable = true;
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    # These directories can't be created by systemd because:
+    #
+    #   1. Traffic Servers starts as root and switches to an unprivileged user
+    #      afterwards. The runtime directories defined below are assumed to be
+    #      owned by that user.
+    #   2. The bin/trafficserver script assumes these directories exist.
+    systemd.tmpfiles.rules = [
+      "d '/run/trafficserver' - ${user} ${group} - -"
+      "d '/var/cache/trafficserver' - ${user} ${group} - -"
+      "d '/var/lib/trafficserver' - ${user} ${group} - -"
+      "d '/var/log/trafficserver' - ${user} ${group} - -"
+    ];
+
+    services.trafficserver = {
+      records.proxy.config.admin.user_id = user;
+      records.proxy.config.body_factory.template_sets_dir =
+        "${pkgs.trafficserver}/etc/trafficserver/body_factory";
+    };
+
+    users.users.trafficserver = {
+      description = "Apache Traffic Server";
+      isSystemUser = true;
+      inherit group;
+    };
+    users.groups.trafficserver = { };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/trafficserver/ip_allow.json b/nixpkgs/nixos/modules/services/web-servers/trafficserver/ip_allow.json
new file mode 100644
index 000000000000..fc2db8037286
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/trafficserver/ip_allow.json
@@ -0,0 +1,36 @@
+{
+  "ip_allow": [
+    {
+      "apply": "in",
+      "ip_addrs": "127.0.0.1",
+      "action": "allow",
+      "methods": "ALL"
+    },
+    {
+      "apply": "in",
+      "ip_addrs": "::1",
+      "action": "allow",
+      "methods": "ALL"
+    },
+    {
+      "apply": "in",
+      "ip_addrs": "0/0",
+      "action": "deny",
+      "methods": [
+        "PURGE",
+        "PUSH",
+        "DELETE"
+      ]
+    },
+    {
+      "apply": "in",
+      "ip_addrs": "::/0",
+      "action": "deny",
+      "methods": [
+        "PURGE",
+        "PUSH",
+        "DELETE"
+      ]
+    }
+  ]
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/trafficserver/logging.json b/nixpkgs/nixos/modules/services/web-servers/trafficserver/logging.json
new file mode 100644
index 000000000000..81e7ba0186c6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/trafficserver/logging.json
@@ -0,0 +1,37 @@
+{
+  "logging": {
+    "formats": [
+      {
+        "name": "welf",
+        "format": "id=firewall time=\"%<cqtd> %<cqtt>\" fw=%<phn> pri=6 proto=%<cqus> duration=%<ttmsf> sent=%<psql> rcvd=%<cqhl> src=%<chi> dst=%<shi> dstname=%<shn> user=%<caun> op=%<cqhm> arg=\"%<cqup>\" result=%<pssc> ref=\"%<{Referer}cqh>\" agent=\"%<{user-agent}cqh>\" cache=%<crc>"
+      },
+      {
+        "name": "squid_seconds_only_timestamp",
+        "format": "%<cqts> %<ttms> %<chi> %<crc>/%<pssc> %<psql> %<cqhm> %<cquc> %<caun> %<phr>/%<shn> %<psct>"
+      },
+      {
+        "name": "squid",
+        "format": "%<cqtq> %<ttms> %<chi> %<crc>/%<pssc> %<psql> %<cqhm> %<cquc> %<caun> %<phr>/%<shn> %<psct>"
+      },
+      {
+        "name": "common",
+        "format": "%<chi> - %<caun> [%<cqtn>] \"%<cqtx>\" %<pssc> %<pscl>"
+      },
+      {
+        "name": "extended",
+        "format": "%<chi> - %<caun> [%<cqtn>] \"%<cqtx>\" %<pssc> %<pscl> %<sssc> %<sscl> %<cqcl> %<pqcl> %<cqhl> %<pshl> %<pqhl> %<sshl> %<tts>"
+      },
+      {
+        "name": "extended2",
+        "format": "%<chi> - %<caun> [%<cqtn>] \"%<cqtx>\" %<pssc> %<pscl> %<sssc> %<sscl> %<cqcl> %<pqcl> %<cqhl> %<pshl> %<pqhl> %<sshl> %<tts> %<phr> %<cfsc> %<pfsc> %<crc>"
+      }
+    ],
+    "logs": [
+      {
+        "filename": "squid",
+        "format": "squid",
+        "mode": "binary"
+      }
+    ]
+  }
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/ttyd.nix b/nixpkgs/nixos/modules/services/web-servers/ttyd.nix
new file mode 100644
index 000000000000..3b1d87ccb483
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/ttyd.nix
@@ -0,0 +1,197 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.ttyd;
+
+  # Command line arguments for the ttyd daemon
+  args = [ "--port" (toString cfg.port) ]
+         ++ optionals (cfg.socket != null) [ "--interface" cfg.socket ]
+         ++ optionals (cfg.interface != null) [ "--interface" cfg.interface ]
+         ++ [ "--signal" (toString cfg.signal) ]
+         ++ (concatLists (mapAttrsToList (_k: _v: [ "--client-option" "${_k}=${_v}" ]) cfg.clientOptions))
+         ++ [ "--terminal-type" cfg.terminalType ]
+         ++ optionals cfg.checkOrigin [ "--check-origin" ]
+         ++ [ "--max-clients" (toString cfg.maxClients) ]
+         ++ optionals (cfg.indexFile != null) [ "--index" cfg.indexFile ]
+         ++ optionals cfg.enableIPv6 [ "--ipv6" ]
+         ++ optionals cfg.enableSSL [ "--ssl-cert" cfg.certFile
+                                      "--ssl-key" cfg.keyFile
+                                      "--ssl-ca" cfg.caFile ]
+         ++ [ "--debug" (toString cfg.logLevel) ];
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.ttyd = {
+      enable = mkEnableOption (lib.mdDoc "ttyd daemon");
+
+      port = mkOption {
+        type = types.port;
+        default = 7681;
+        description = lib.mdDoc "Port to listen on (use 0 for random port)";
+      };
+
+      socket = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        example = "/var/run/ttyd.sock";
+        description = lib.mdDoc "UNIX domain socket path to bind.";
+      };
+
+      interface = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "eth0";
+        description = lib.mdDoc "Network interface to bind.";
+      };
+
+      username = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "Username for basic authentication.";
+      };
+
+      passwordFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        apply = value: if value == null then null else toString value;
+        description = lib.mdDoc ''
+          File containing the password to use for basic authentication.
+          For insecurely putting the password in the globally readable store use
+          `pkgs.writeText "ttydpw" "MyPassword"`.
+        '';
+      };
+
+      signal = mkOption {
+        type = types.ints.u8;
+        default = 1;
+        description = lib.mdDoc "Signal to send to the command on session close.";
+      };
+
+      clientOptions = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        example = literalExpression ''
+          {
+            fontSize = "16";
+            fontFamily = "Fira Code";
+          }
+        '';
+        description = lib.mdDoc ''
+          Attribute set of client options for xtermjs.
+          <https://xtermjs.org/docs/api/terminal/interfaces/iterminaloptions/>
+        '';
+      };
+
+      terminalType = mkOption {
+        type = types.str;
+        default = "xterm-256color";
+        description = lib.mdDoc "Terminal type to report.";
+      };
+
+      checkOrigin = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to allow a websocket connection from a different origin.";
+      };
+
+      maxClients = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc "Maximum clients to support (0, no limit)";
+      };
+
+      indexFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "Custom index.html path";
+      };
+
+      enableIPv6 = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether or not to enable IPv6 support.";
+      };
+
+      enableSSL = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether or not to enable SSL (https) support.";
+      };
+
+      certFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "SSL certificate file path.";
+      };
+
+      keyFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        apply = value: if value == null then null else toString value;
+        description = lib.mdDoc ''
+          SSL key file path.
+          For insecurely putting the keyFile in the globally readable store use
+          `pkgs.writeText "ttydKeyFile" "SSLKEY"`.
+        '';
+      };
+
+      caFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc "SSL CA file path for client certificate verification.";
+      };
+
+      logLevel = mkOption {
+        type = types.int;
+        default = 7;
+        description = lib.mdDoc "Set log level.";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions =
+      [ { assertion = cfg.enableSSL
+            -> cfg.certFile != null && cfg.keyFile != null && cfg.caFile != null;
+          message = "SSL is enabled for ttyd, but no certFile, keyFile or caFile has been specified."; }
+        { assertion = ! (cfg.interface != null && cfg.socket != null);
+          message = "Cannot set both interface and socket for ttyd."; }
+        { assertion = (cfg.username != null) == (cfg.passwordFile != null);
+          message = "Need to set both username and passwordFile for ttyd"; }
+      ];
+
+    systemd.services.ttyd = {
+      description = "ttyd Web Server Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        # Runs login which needs to be run as root
+        # login: Cannot possibly work without effective root
+        User = "root";
+      };
+
+      script = if cfg.passwordFile != null then ''
+        PASSWORD=$(cat ${escapeShellArg cfg.passwordFile})
+        ${pkgs.ttyd}/bin/ttyd ${lib.escapeShellArgs args} \
+          --credential ${escapeShellArg cfg.username}:"$PASSWORD" \
+          ${pkgs.shadow}/bin/login
+      ''
+      else ''
+        ${pkgs.ttyd}/bin/ttyd ${lib.escapeShellArgs args} \
+          ${pkgs.shadow}/bin/login
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/unit/default.nix b/nixpkgs/nixos/modules/services/web-servers/unit/default.nix
new file mode 100644
index 000000000000..1515779c9064
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/unit/default.nix
@@ -0,0 +1,155 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.unit;
+
+  configFile = pkgs.writeText "unit.json" cfg.config;
+
+in {
+  options = {
+    services.unit = {
+      enable = mkEnableOption (lib.mdDoc "Unit App Server");
+      package = mkOption {
+        type = types.package;
+        default = pkgs.unit;
+        defaultText = literalExpression "pkgs.unit";
+        description = lib.mdDoc "Unit package to use.";
+      };
+      user = mkOption {
+        type = types.str;
+        default = "unit";
+        description = lib.mdDoc "User account under which unit runs.";
+      };
+      group = mkOption {
+        type = types.str;
+        default = "unit";
+        description = lib.mdDoc "Group account under which unit runs.";
+      };
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/spool/unit";
+        description = lib.mdDoc "Unit data directory.";
+      };
+      logDir = mkOption {
+        type = types.path;
+        default = "/var/log/unit";
+        description = lib.mdDoc "Unit log directory.";
+      };
+      config = mkOption {
+        type = types.str;
+        default = ''
+          {
+            "listeners": {},
+            "applications": {}
+          }
+        '';
+        example = ''
+          {
+            "listeners": {
+              "*:8300": {
+                "application": "example-php-72"
+              }
+            },
+            "applications": {
+              "example-php-72": {
+                "type": "php 7.2",
+                "processes": 4,
+                "user": "nginx",
+                "group": "nginx",
+                "root": "/var/www",
+                "index": "index.php",
+                "options": {
+                  "file": "/etc/php.d/default.ini",
+                  "admin": {
+                    "max_execution_time": "30",
+                    "max_input_time": "30",
+                    "display_errors": "off",
+                    "display_startup_errors": "off",
+                    "open_basedir": "/dev/urandom:/proc/cpuinfo:/proc/meminfo:/etc/ssl/certs:/var/www",
+                    "disable_functions": "exec,passthru,shell_exec,system"
+                  }
+                }
+              }
+            }
+          }
+        '';
+        description = lib.mdDoc "Unit configuration in JSON format. More details here https://unit.nginx.org/configuration";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.tmpfiles.rules = [
+      "d '${cfg.stateDir}' 0750 ${cfg.user} ${cfg.group} - -"
+      "d '${cfg.logDir}' 0750 ${cfg.user} ${cfg.group} - -"
+    ];
+
+    systemd.services.unit = {
+      description = "Unit App Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      preStart = ''
+        [ ! -e '${cfg.stateDir}/conf.json' ] || rm -f '${cfg.stateDir}/conf.json'
+      '';
+      postStart = ''
+        ${pkgs.curl}/bin/curl -X PUT --data-binary '@${configFile}' --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
+      '';
+      serviceConfig = {
+        Type = "forking";
+        PIDFile = "/run/unit/unit.pid";
+        ExecStart = ''
+          ${cfg.package}/bin/unitd --control 'unix:/run/unit/control.unit.sock' --pid '/run/unit/unit.pid' \
+                                   --log '${cfg.logDir}/unit.log' --statedir '${cfg.stateDir}' --tmpdir '/tmp' \
+                                   --user ${cfg.user} --group ${cfg.group}
+        '';
+        ExecStop = ''
+          ${pkgs.curl}/bin/curl -X DELETE --unix-socket '/run/unit/control.unit.sock' 'http://localhost/config'
+        '';
+        # Runtime directory and mode
+        RuntimeDirectory = "unit";
+        RuntimeDirectoryMode = "0750";
+        # Access write directories
+        ReadWritePaths = [ cfg.stateDir cfg.logDir ];
+        # Security
+        NoNewPrivileges = true;
+        # Sandboxing
+        ProtectSystem = "strict";
+        ProtectHome = true;
+        PrivateTmp = true;
+        PrivateDevices = true;
+        PrivateUsers = false;
+        ProtectHostname = true;
+        ProtectClock = true;
+        ProtectKernelTunables = true;
+        ProtectKernelModules = true;
+        ProtectKernelLogs = true;
+        ProtectControlGroups = true;
+        RestrictAddressFamilies = [ "AF_UNIX" "AF_INET" "AF_INET6" ];
+        LockPersonality = true;
+        MemoryDenyWriteExecute = true;
+        RestrictRealtime = true;
+        RestrictSUIDSGID = true;
+        PrivateMounts = true;
+        # System Call Filtering
+        SystemCallArchitectures = "native";
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "unit") {
+      unit = {
+        group = cfg.group;
+        isSystemUser = true;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "unit") {
+      unit = { };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/uwsgi.nix b/nixpkgs/nixos/modules/services/web-servers/uwsgi.nix
new file mode 100644
index 000000000000..6d3a18d71e91
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/uwsgi.nix
@@ -0,0 +1,233 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.uwsgi;
+
+  isEmperor = cfg.instance.type == "emperor";
+
+  imperialPowers =
+    [
+      # spawn other user processes
+      "CAP_SETUID" "CAP_SETGID"
+      "CAP_SYS_CHROOT"
+      # transfer capabilities
+      "CAP_SETPCAP"
+      # create other user sockets
+      "CAP_CHOWN"
+    ];
+
+  buildCfg = name: c:
+    let
+      plugins' =
+        if any (n: !any (m: m == n) cfg.plugins) (c.plugins or [])
+        then throw "`plugins` attribute in uWSGI configuration contains plugins not in config.services.uwsgi.plugins"
+        else c.plugins or cfg.plugins;
+      plugins = unique plugins';
+
+      hasPython = v: filter (n: n == "python${v}") plugins != [];
+      hasPython2 = hasPython "2";
+      hasPython3 = hasPython "3";
+
+      python =
+        if hasPython2 && hasPython3 then
+          throw "`plugins` attribute in uWSGI configuration shouldn't contain both python2 and python3"
+        else if hasPython2 then cfg.package.python2
+        else if hasPython3 then cfg.package.python3
+        else null;
+
+      pythonEnv = python.withPackages (c.pythonPackages or (self: []));
+
+      uwsgiCfg = {
+        uwsgi =
+          if c.type == "normal"
+            then {
+              inherit plugins;
+            } // removeAttrs c [ "type" "pythonPackages" ]
+              // optionalAttrs (python != null) {
+                pyhome = "${pythonEnv}";
+                env =
+                  # Argh, uwsgi expects list of key-values there instead of a dictionary.
+                  let envs = partition (hasPrefix "PATH=") (c.env or []);
+                      oldPaths = map (x: substring (stringLength "PATH=") (stringLength x) x) envs.right;
+                      paths = oldPaths ++ [ "${pythonEnv}/bin" ];
+                  in [ "PATH=${concatStringsSep ":" paths}" ] ++ envs.wrong;
+              }
+          else if isEmperor
+            then {
+              emperor = if builtins.typeOf c.vassals != "set" then c.vassals
+                        else pkgs.buildEnv {
+                          name = "vassals";
+                          paths = mapAttrsToList buildCfg c.vassals;
+                        };
+            } // removeAttrs c [ "type" "vassals" ]
+          else throw "`type` attribute in uWSGI configuration should be either 'normal' or 'emperor'";
+      };
+
+    in pkgs.writeTextDir "${name}.json" (builtins.toJSON uwsgiCfg);
+
+in {
+
+  options = {
+    services.uwsgi = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable uWSGI";
+      };
+
+      runDir = mkOption {
+        type = types.path;
+        default = "/run/uwsgi";
+        description = lib.mdDoc "Where uWSGI communication sockets can live";
+      };
+
+      package = mkOption {
+        type = types.package;
+        internal = true;
+      };
+
+      instance = mkOption {
+        type =  with types; let
+          valueType = nullOr (oneOf [
+            bool
+            int
+            float
+            str
+            (lazyAttrsOf valueType)
+            (listOf valueType)
+            (mkOptionType {
+              name = "function";
+              description = "function";
+              check = x: isFunction x;
+              merge = mergeOneOption;
+            })
+          ]) // {
+            description = "Json value or lambda";
+            emptyValue.value = {};
+          };
+        in valueType;
+        default = {
+          type = "normal";
+        };
+        example = literalExpression ''
+          {
+            type = "emperor";
+            vassals = {
+              moin = {
+                type = "normal";
+                pythonPackages = self: with self; [ moinmoin ];
+                socket = "''${config.services.uwsgi.runDir}/uwsgi.sock";
+              };
+            };
+          }
+        '';
+        description = lib.mdDoc ''
+          uWSGI configuration. It awaits an attribute `type` inside which can be either
+          `normal` or `emperor`.
+
+          For `normal` mode you can specify `pythonPackages` as a function
+          from libraries set into a list of libraries. `pythonpath` will be set accordingly.
+
+          For `emperor` mode, you should use `vassals` attribute
+          which should be either a set of names and configurations or a path to a directory.
+
+          Other attributes will be used in configuration file as-is. Notice that you can redefine
+          `plugins` setting here.
+        '';
+      };
+
+      plugins = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        description = lib.mdDoc "Plugins used with uWSGI";
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "uwsgi";
+        description = lib.mdDoc "User account under which uWSGI runs.";
+      };
+
+      group = mkOption {
+        type = types.str;
+        default = "uwsgi";
+        description = lib.mdDoc "Group account under which uWSGI runs.";
+      };
+
+      capabilities = mkOption {
+        type = types.listOf types.str;
+        apply = caps: caps ++ optionals isEmperor imperialPowers;
+        default = [ ];
+        example = literalExpression ''
+          [
+            "CAP_NET_BIND_SERVICE" # bind on ports <1024
+            "CAP_NET_RAW"          # open raw sockets
+          ]
+        '';
+        description = lib.mdDoc ''
+          Grant capabilities to the uWSGI instance. See the
+          `capabilities(7)` for available values.
+
+          ::: {.note}
+          uWSGI runs as an unprivileged user (even as Emperor) with the minimal
+          capabilities required. This option can be used to add fine-grained
+          permissions without running the service as root.
+
+          When in Emperor mode, any capability to be inherited by a vassal must
+          be specified again in the vassal configuration using `cap`.
+          See the uWSGI [docs](https://uwsgi-docs.readthedocs.io/en/latest/Capabilities.html)
+          for more information.
+          :::
+        '';
+      };
+
+      extraArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--chmod-socket=664" ];
+        description = lib.mdDoc "Extra command line arguments for uwsgi.";
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.tmpfiles.rules = optional (cfg.runDir != "/run/uwsgi") ''
+      d ${cfg.runDir} 775 ${cfg.user} ${cfg.group}
+    '';
+
+    systemd.services.uwsgi = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        User = cfg.user;
+        Group = cfg.group;
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/uwsgi ${escapeShellArgs cfg.extraArgs} --json ${buildCfg "server" cfg.instance}/server.json";
+        ExecReload = "${pkgs.coreutils}/bin/kill -HUP $MAINPID";
+        ExecStop = "${pkgs.coreutils}/bin/kill -INT $MAINPID";
+        NotifyAccess = "main";
+        KillSignal = "SIGQUIT";
+        AmbientCapabilities = cfg.capabilities;
+        CapabilityBoundingSet = cfg.capabilities;
+        RuntimeDirectory = mkIf (cfg.runDir == "/run/uwsgi") "uwsgi";
+      };
+    };
+
+    users.users = optionalAttrs (cfg.user == "uwsgi") {
+      uwsgi = {
+        group = cfg.group;
+        uid = config.ids.uids.uwsgi;
+      };
+    };
+
+    users.groups = optionalAttrs (cfg.group == "uwsgi") {
+      uwsgi.gid = config.ids.gids.uwsgi;
+    };
+
+    services.uwsgi.package = pkgs.uwsgi.override {
+      plugins = unique cfg.plugins;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/varnish/default.nix b/nixpkgs/nixos/modules/services/web-servers/varnish/default.nix
new file mode 100644
index 000000000000..d7f19be0cec4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/varnish/default.nix
@@ -0,0 +1,115 @@
+{ config, lib, pkgs, ...}:
+
+with lib;
+
+let
+  cfg = config.services.varnish;
+
+  commandLine = "-f ${pkgs.writeText "default.vcl" cfg.config}" +
+      optionalString (cfg.extraModules != []) " -p vmod_path='${makeSearchPathOutput "lib" "lib/varnish/vmods" ([cfg.package] ++ cfg.extraModules)}' -r vmod_path";
+in
+{
+  options = {
+    services.varnish = {
+      enable = mkEnableOption (lib.mdDoc "Varnish Server");
+
+      enableConfigCheck = mkEnableOption (lib.mdDoc "checking the config during build time") // { default = true; };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.varnish;
+        defaultText = literalExpression "pkgs.varnish";
+        description = lib.mdDoc ''
+          The package to use
+        '';
+      };
+
+      http_address = mkOption {
+        type = types.str;
+        default = "*:6081";
+        description = lib.mdDoc ''
+          HTTP listen address and port.
+        '';
+      };
+
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          Verbatim default.vcl configuration.
+        '';
+      };
+
+      stateDir = mkOption {
+        type = types.path;
+        default = "/var/spool/varnish/${config.networking.hostName}";
+        defaultText = literalExpression ''"/var/spool/varnish/''${config.networking.hostName}"'';
+        description = lib.mdDoc ''
+          Directory holding all state for Varnish to run.
+        '';
+      };
+
+      extraModules = mkOption {
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.varnishPackages.geoip ]";
+        description = lib.mdDoc ''
+          Varnish modules (except 'std').
+        '';
+      };
+
+      extraCommandLine = mkOption {
+        type = types.str;
+        default = "";
+        example = "-s malloc,256M";
+        description = lib.mdDoc ''
+          Command line switches for varnishd (run 'varnishd -?' to get list of options)
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    systemd.services.varnish = {
+      description = "Varnish";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      preStart = ''
+        mkdir -p ${cfg.stateDir}
+        chown -R varnish:varnish ${cfg.stateDir}
+      '';
+      postStop = ''
+        rm -rf ${cfg.stateDir}
+      '';
+      serviceConfig = {
+        Type = "simple";
+        PermissionsStartOnly = true;
+        ExecStart = "${cfg.package}/sbin/varnishd -a ${cfg.http_address} -n ${cfg.stateDir} -F ${cfg.extraCommandLine} ${commandLine}";
+        Restart = "always";
+        RestartSec = "5s";
+        User = "varnish";
+        Group = "varnish";
+        AmbientCapabilities = "cap_net_bind_service";
+        NoNewPrivileges = true;
+        LimitNOFILE = 131072;
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    # check .vcl syntax at compile time (e.g. before nixops deployment)
+    system.checks = mkIf cfg.enableConfigCheck [
+      (pkgs.runCommand "check-varnish-syntax" {} ''
+        ${cfg.package}/bin/varnishd -C ${commandLine} 2> $out || (cat $out; exit 1)
+      '')
+    ];
+
+    users.users.varnish = {
+      group = "varnish";
+      uid = config.ids.uids.varnish;
+    };
+
+    users.groups.varnish.gid = config.ids.uids.varnish;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/web-servers/zope2.nix b/nixpkgs/nixos/modules/services/web-servers/zope2.nix
new file mode 100644
index 000000000000..a17fe6bc2082
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/web-servers/zope2.nix
@@ -0,0 +1,262 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.zope2;
+
+  zope2Opts = { name, ... }: {
+    options = {
+
+      name = mkOption {
+        default = "${name}";
+        type = types.str;
+        description = lib.mdDoc "The name of the zope2 instance. If undefined, the name of the attribute set will be used.";
+      };
+
+      threads = mkOption {
+        default = 2;
+        type = types.int;
+        description = lib.mdDoc "Specify the number of threads that Zope's ZServer web server will use to service requests. ";
+      };
+
+      http_address = mkOption {
+        default = "localhost:8080";
+        type = types.str;
+        description = lib.mdDoc "Give a port and address for the HTTP server.";
+      };
+
+      user = mkOption {
+        default = "zope2";
+        type = types.str;
+        description = lib.mdDoc "The name of the effective user for the Zope process.";
+      };
+
+      clientHome = mkOption {
+        default = "/var/lib/zope2/${name}";
+        type = types.path;
+        description = lib.mdDoc "Home directory of zope2 instance.";
+      };
+      extra = mkOption {
+        default =
+          ''
+          <zodb_db main>
+            mount-point /
+            cache-size 30000
+            <blobstorage>
+                blob-dir /var/lib/zope2/${name}/blobstorage
+                <filestorage>
+                path /var/lib/zope2/${name}/filestorage/Data.fs
+                </filestorage>
+            </blobstorage>
+          </zodb_db>
+          '';
+        type = types.lines;
+        description = lib.mdDoc "Extra zope.conf";
+      };
+
+      packages = mkOption {
+        type = types.listOf types.package;
+        description = lib.mdDoc "The list of packages you want to make available to the zope2 instance.";
+      };
+
+    };
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.zope2.instances = mkOption {
+      default = {};
+      type = with types; attrsOf (submodule zope2Opts);
+      example = literalExpression ''
+        {
+          plone01 = {
+            http_address = "127.0.0.1:8080";
+            extra =
+              '''
+              <zodb_db main>
+                mount-point /
+                cache-size 30000
+                <blobstorage>
+                    blob-dir /var/lib/zope2/plone01/blobstorage
+                    <filestorage>
+                    path /var/lib/zope2/plone01/filestorage/Data.fs
+                    </filestorage>
+                </blobstorage>
+              </zodb_db>
+              ''';
+          };
+        }
+      '';
+      description = lib.mdDoc "zope2 instances to be created automatically by the system.";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (cfg.instances != {}) {
+
+    users.users.zope2 = {
+      isSystemUser = true;
+      group = "zope2";
+    };
+    users.groups.zope2 = {};
+
+    systemd.services =
+      let
+
+        createZope2Instance = opts: name:
+          let
+            interpreter = pkgs.writeScript "interpreter"
+              ''
+              import sys
+
+              _interactive = True
+              if len(sys.argv) > 1:
+                  _options, _args = __import__("getopt").getopt(sys.argv[1:], 'ic:m:')
+                  _interactive = False
+                  for (_opt, _val) in _options:
+                      if _opt == '-i':
+                          _interactive = True
+                      elif _opt == '-c':
+                          exec _val
+                      elif _opt == '-m':
+                          sys.argv[1:] = _args
+                          _args = []
+                          __import__("runpy").run_module(
+                              _val, {}, "__main__", alter_sys=True)
+
+                  if _args:
+                      sys.argv[:] = _args
+                      __file__ = _args[0]
+                      del _options, _args
+                      execfile(__file__)
+
+              if _interactive:
+                  del _interactive
+                  __import__("code").interact(banner="", local=globals())
+              '';
+            env = pkgs.buildEnv {
+              name = "zope2-${name}-env";
+              paths = [
+                pkgs.python27
+                pkgs.python27Packages.recursivePthLoader
+                pkgs.python27Packages."plone.recipe.zope2instance"
+              ] ++ attrValues pkgs.python27.modules
+                ++ opts.packages;
+              postBuild =
+                ''
+                echo "#!$out/bin/python" > $out/bin/interpreter
+                cat ${interpreter} >> $out/bin/interpreter
+                '';
+            };
+            conf = pkgs.writeText "zope2-${name}-conf"
+              ''
+              %define INSTANCEHOME ${env}
+              instancehome $INSTANCEHOME
+              %define CLIENTHOME ${opts.clientHome}/${opts.name}
+              clienthome $CLIENTHOME
+
+              debug-mode off
+              security-policy-implementation C
+              verbose-security off
+              default-zpublisher-encoding utf-8
+              zserver-threads ${toString opts.threads}
+              effective-user ${opts.user}
+
+              pid-filename ${opts.clientHome}/${opts.name}/pid
+              lock-filename ${opts.clientHome}/${opts.name}/lock
+              python-check-interval 1000
+              enable-product-installation off
+
+              <environment>
+                zope_i18n_compile_mo_files false
+              </environment>
+
+              <eventlog>
+              level INFO
+              <logfile>
+                  path /var/log/zope2/${name}.log
+                  level INFO
+              </logfile>
+              </eventlog>
+
+              <logger access>
+              level WARN
+              <logfile>
+                  path /var/log/zope2/${name}-Z2.log
+                  format %(message)s
+              </logfile>
+              </logger>
+
+              <http-server>
+              address ${opts.http_address}
+              </http-server>
+
+              <zodb_db temporary>
+              <temporarystorage>
+                  name temporary storage for sessioning
+              </temporarystorage>
+              mount-point /temp_folder
+              container-class Products.TemporaryFolder.TemporaryContainer
+              </zodb_db>
+
+              ${opts.extra}
+              '';
+            ctlScript = pkgs.writeScript "zope2-${name}-ctl-script"
+              ''
+              #!${env}/bin/python
+
+              import sys
+              import plone.recipe.zope2instance.ctl
+
+              if __name__ == '__main__':
+                  sys.exit(plone.recipe.zope2instance.ctl.main(
+                      ["-C", "${conf}"]
+                      + sys.argv[1:]))
+              '';
+
+            ctl = pkgs.writeScript "zope2-${name}-ctl"
+              ''
+              #!${pkgs.bash}/bin/bash -e
+              export PYTHONHOME=${env}
+              exec ${ctlScript} "$@"
+              '';
+          in {
+            #description = "${name} instance";
+            after = [ "network.target" ];  # with RelStorage also add "postgresql.service"
+            wantedBy = [ "multi-user.target" ];
+            path = opts.packages;
+            preStart =
+              ''
+              mkdir -p /var/log/zope2/
+              touch /var/log/zope2/${name}.log
+              touch /var/log/zope2/${name}-Z2.log
+              chown ${opts.user} /var/log/zope2/${name}.log
+              chown ${opts.user} /var/log/zope2/${name}-Z2.log
+
+              mkdir -p ${opts.clientHome}/filestorage ${opts.clientHome}/blobstorage
+              mkdir -p ${opts.clientHome}/${opts.name}
+              chown ${opts.user} ${opts.clientHome} -R
+
+              ${ctl} adduser admin admin
+              '';
+
+            serviceConfig.Type = "forking";
+            serviceConfig.ExecStart = "${ctl} start";
+            serviceConfig.ExecStop = "${ctl} stop";
+            serviceConfig.ExecReload = "${ctl} restart";
+          };
+
+      in listToAttrs (map (name: { name = "zope2-${name}"; value = createZope2Instance (builtins.getAttr name cfg.instances) name; }) (builtins.attrNames cfg.instances));
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/clight.nix b/nixpkgs/nixos/modules/services/x11/clight.nix
new file mode 100644
index 000000000000..0f66e191fe28
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/clight.nix
@@ -0,0 +1,125 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.clight;
+
+  toConf = v:
+    if builtins.isFloat v then toString v
+    else if isInt v       then toString v
+    else if isBool v      then boolToString v
+    else if isString v    then ''"${escape [''"''] v}"''
+    else if isList v      then "[ " + concatMapStringsSep ", " toConf v + " ]"
+    else if isAttrs v     then "\n{\n" + convertAttrs v + "\n}"
+    else abort "clight.toConf: unexpected type (v = ${v})";
+
+  getSep = v:
+    if isAttrs v then ":"
+    else "=";
+
+  convertAttrs = attrs: concatStringsSep "\n" (mapAttrsToList
+    (name: value: "${toString name} ${getSep value} ${toConf value};")
+    attrs);
+
+  clightConf = pkgs.writeText "clight.conf" (convertAttrs
+    (filterAttrs
+      (_: value: value != null)
+      cfg.settings));
+in {
+  options.services.clight = {
+    enable = mkEnableOption (lib.mdDoc "clight");
+
+    temperature = {
+      day = mkOption {
+        type = types.int;
+        default = 5500;
+        description = lib.mdDoc ''
+          Colour temperature to use during the day, between
+          `1000` and `25000` K.
+        '';
+      };
+      night = mkOption {
+        type = types.int;
+        default = 3700;
+        description = lib.mdDoc ''
+          Colour temperature to use at night, between
+          `1000` and `25000` K.
+        '';
+      };
+    };
+
+    settings = let
+      validConfigTypes = with types; oneOf [ int str bool float ];
+      collectionTypes = with types; oneOf [ validConfigTypes (listOf validConfigTypes) ];
+    in mkOption {
+      type = with types; attrsOf (nullOr (either collectionTypes (attrsOf collectionTypes)));
+      default = {};
+      example = { captures = 20; gamma_long_transition = true; ac_capture_timeouts = [ 120 300 60 ]; };
+      description = lib.mdDoc ''
+        Additional configuration to extend clight.conf. See
+        <https://github.com/FedeDP/Clight/blob/master/Extra/clight.conf> for a
+        sample configuration file.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = let
+      inRange = v: l: r: v >= l && v <= r;
+    in [
+      { assertion = config.location.provider == "manual" ->
+          inRange config.location.latitude (-90) 90 && inRange config.location.longitude (-180) 180;
+        message = "You must specify a valid latitude and longitude if manually providing location"; }
+    ];
+
+    boot.kernelModules = [ "i2c_dev" ];
+    environment.systemPackages = with pkgs; [ clight clightd ];
+    services.dbus.packages = with pkgs; [ clight clightd ];
+    services.upower.enable = true;
+
+    services.clight.settings = {
+      gamma.temp = with cfg.temperature; mkDefault [ day night ];
+    } // (optionalAttrs (config.location.provider == "manual") {
+      daytime.latitude = mkDefault config.location.latitude;
+      daytime.longitude = mkDefault config.location.longitude;
+    });
+
+    services.geoclue2.appConfig.clightc = {
+      isAllowed = true;
+      isSystem = true;
+    };
+
+    systemd.services.clightd = {
+      requires = [ "polkit.service" ];
+      wantedBy = [ "multi-user.target" ];
+
+      description = "Bus service to manage various screen related properties (gamma, dpms, backlight)";
+      serviceConfig = {
+        Type = "dbus";
+        BusName = "org.clightd.clightd";
+        Restart = "on-failure";
+        RestartSec = 5;
+        ExecStart = ''
+          ${pkgs.clightd}/bin/clightd
+        '';
+      };
+    };
+
+    systemd.user.services.clight = {
+      after = [ "upower.service" "clightd.service" ];
+      wants = [ "upower.service" "clightd.service" ];
+      partOf = [ "graphical-session.target" ];
+      wantedBy = [ "graphical-session.target" ];
+
+      description = "C daemon to adjust screen brightness to match ambient brightness, as computed capturing frames from webcam";
+      serviceConfig = {
+        Restart = "on-failure";
+        RestartSec = 5;
+        ExecStart = ''
+          ${pkgs.clight}/bin/clight --conf-file ${clightConf}
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/colord.nix b/nixpkgs/nixos/modules/services/x11/colord.nix
new file mode 100644
index 000000000000..cb7b9096e5db
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/colord.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.colord;
+
+in {
+
+  options = {
+
+    services.colord = {
+      enable = mkEnableOption (lib.mdDoc "colord, the color management daemon");
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.colord ];
+
+    services.dbus.packages = [ pkgs.colord ];
+
+    services.udev.packages = [ pkgs.colord ];
+
+    systemd.packages = [ pkgs.colord ];
+
+    systemd.tmpfiles.packages = [ pkgs.colord ];
+
+    users.users.colord = {
+      isSystemUser = true;
+      home = "/var/lib/colord";
+      group = "colord";
+    };
+
+    users.groups.colord = {};
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/budgie.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/budgie.nix
new file mode 100644
index 000000000000..a4f8bd5051ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/budgie.nix
@@ -0,0 +1,250 @@
+{ lib, pkgs, config, utils, ... }:
+
+let
+  inherit (lib) concatMapStrings literalExpression mdDoc mkDefault mkEnableOption mkIf mkOption types;
+
+  cfg = config.services.xserver.desktopManager.budgie;
+
+  nixos-background-light = pkgs.nixos-artwork.wallpapers.nineish;
+  nixos-background-dark = pkgs.nixos-artwork.wallpapers.nineish-dark-gray;
+
+  nixos-gsettings-overrides = pkgs.budgie.budgie-gsettings-overrides.override {
+    inherit (cfg) extraGSettingsOverrides extraGSettingsOverridePackages;
+    inherit nixos-background-dark nixos-background-light;
+  };
+
+  nixos-background-info = pkgs.writeTextFile {
+    name = "nixos-background-info";
+    text = ''
+      <?xml version="1.0"?>
+      <!DOCTYPE wallpapers SYSTEM "gnome-wp-list.dtd">
+      <wallpapers>
+        <wallpaper deleted="false">
+          <name>Nineish</name>
+          <filename>${nixos-background-light.gnomeFilePath}</filename>
+          <options>zoom</options>
+          <shade_type>solid</shade_type>
+          <pcolor>#d1dcf8</pcolor>
+          <scolor>#e3ebfe</scolor>
+        </wallpaper>
+        <wallpaper deleted="false">
+          <name>Nineish Dark Gray</name>
+          <filename>${nixos-background-dark.gnomeFilePath}</filename>
+          <options>zoom</options>
+          <shade_type>solid</shade_type>
+          <pcolor>#151515</pcolor>
+          <scolor>#262626</scolor>
+        </wallpaper>
+      </wallpapers>
+    '';
+    destination = "/share/gnome-background-properties/nixos.xml";
+  };
+in {
+  options = {
+    services.xserver.desktopManager.budgie = {
+      enable = mkEnableOption (mdDoc "the Budgie desktop");
+
+      sessionPath = mkOption {
+        description = lib.mdDoc ''
+          Additional list of packages to be added to the session search path.
+          Useful for GSettings-conditional autostart.
+
+          Note that this should be a last resort; patching the package is preferred (see GPaste).
+        '';
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.gnome.gpaste ]";
+      };
+
+      extraGSettingsOverrides = mkOption {
+        description = mdDoc "Additional GSettings overrides.";
+        type = types.lines;
+        default = "";
+      };
+
+      extraGSettingsOverridePackages = mkOption {
+        description = mdDoc "List of packages for which GSettings are overridden.";
+        type = types.listOf types.path;
+        default = [];
+      };
+
+      extraPlugins = mkOption {
+        description = mdDoc "Extra plugins for the Budgie desktop";
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.budgiePlugins.budgie-analogue-clock-applet ]";
+      };
+    };
+
+    environment.budgie.excludePackages = mkOption {
+      description = mdDoc "Which packages Budgie should exclude from the default environment.";
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "[ pkgs.mate-terminal ]";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.displayManager.sessionPackages = with pkgs; [
+      budgie.budgie-desktop
+    ];
+
+    services.xserver.displayManager.lightdm.greeters.slick = {
+      enable = mkDefault true;
+      theme = mkDefault { name = "Qogir"; package = pkgs.qogir-theme; };
+      iconTheme = mkDefault { name = "Qogir"; package = pkgs.qogir-icon-theme; };
+      cursorTheme = mkDefault { name = "Qogir"; package = pkgs.qogir-icon-theme; };
+    };
+
+    services.xserver.desktopManager.budgie.sessionPath = [ pkgs.budgie.budgie-desktop-view ];
+
+    environment.extraInit = ''
+      ${concatMapStrings (p: ''
+        if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+          export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+        fi
+        if [ -d "${p}/lib/girepository-1.0" ]; then
+          export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+          export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+        fi
+      '') cfg.sessionPath}
+    '';
+
+    environment.systemPackages = with pkgs;
+      [
+        # Budgie Desktop.
+        budgie.budgie-backgrounds
+        budgie.budgie-control-center
+        (budgie.budgie-desktop-with-plugins.override { plugins = cfg.extraPlugins; })
+        budgie.budgie-desktop-view
+        budgie.budgie-screensaver
+
+        # Required by the Budgie Desktop session.
+        (gnome.gnome-session.override { gnomeShellSupport = false; })
+
+        # Required by Budgie Menu.
+        gnome-menus
+
+        # Required by Budgie Control Center.
+        gnome.zenity
+
+        # Provides `gsettings`.
+        glib
+
+        # Update user directories.
+        xdg-user-dirs
+      ]
+      ++ lib.optional config.networking.networkmanager.enable pkgs.networkmanagerapplet
+      ++ (utils.removePackagesByName [
+          cinnamon.nemo
+          mate.eom
+          mate.pluma
+          mate.atril
+          mate.engrampa
+          mate.mate-calc
+          mate.mate-terminal
+          mate.mate-system-monitor
+          vlc
+
+          # Desktop themes.
+          qogir-theme
+          qogir-icon-theme
+          nixos-background-info
+
+          # Default settings.
+          nixos-gsettings-overrides
+        ] config.environment.budgie.excludePackages)
+      ++ cfg.sessionPath;
+
+    # Fonts.
+    fonts.packages = mkDefault [
+      pkgs.noto-fonts
+      pkgs.hack-font
+    ];
+    fonts.fontconfig.defaultFonts = {
+      sansSerif = mkDefault ["Noto Sans"];
+      monospace = mkDefault ["Hack"];
+    };
+
+    # Qt application style.
+    qt = {
+      enable = mkDefault true;
+      style = mkDefault "gtk2";
+      platformTheme = mkDefault "gtk2";
+    };
+
+    environment.pathsToLink = [
+      "/share" # TODO: https://github.com/NixOS/nixpkgs/issues/47173
+    ];
+
+    # GSettings overrides.
+    environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+
+    # Required by Budgie Desktop.
+    services.xserver.updateDbusEnvironment = true;
+    programs.dconf.enable = true;
+
+    # Required by Budgie Screensaver.
+    security.pam.services.budgie-screensaver = {};
+
+    # Required by Budgie's Polkit Dialog.
+    security.polkit.enable = mkDefault true;
+
+    # Required by Budgie Panel plugins and/or Budgie Control Center panels.
+    networking.networkmanager.enable = mkDefault true; # for BCC's Network panel.
+    programs.nm-applet.enable = config.networking.networkmanager.enable; # Budgie has no Network applet.
+    programs.nm-applet.indicator = true; # Budgie uses AppIndicators.
+
+    hardware.bluetooth.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Bluetooth panel.
+    hardware.pulseaudio.enable = mkDefault true; # for Budgie's Status Indicator and BCC's Sound panel.
+
+    xdg.portal.enable = mkDefault true; # for BCC's Applications panel.
+    xdg.portal.extraPortals = with pkgs; [
+      xdg-desktop-portal-gtk # provides a XDG Portals implementation.
+    ];
+
+    services.geoclue2.enable = mkDefault true; # for BCC's Privacy > Location Services panel.
+    services.upower.enable = config.powerManagement.enable; # for Budgie's Status Indicator and BCC's Power panel.
+    services.xserver.libinput.enable = mkDefault true; # for BCC's Mouse panel.
+    services.colord.enable = mkDefault true; # for BCC's Color panel.
+    services.gnome.at-spi2-core.enable = mkDefault true; # for BCC's A11y panel.
+    services.accounts-daemon.enable = mkDefault true; # for BCC's Users panel.
+    services.fprintd.enable = mkDefault true; # for BCC's Users panel.
+    services.udisks2.enable = mkDefault true; # for BCC's Details panel.
+
+    # For BCC's Online Accounts panel.
+    services.gnome.gnome-online-accounts.enable = mkDefault true;
+    services.gnome.gnome-online-miners.enable = true;
+
+    # For BCC's Printers panel.
+    services.printing.enable = mkDefault true;
+    services.system-config-printer.enable = config.services.printing.enable;
+
+    # For BCC's Sharing panel.
+    services.dleyna-renderer.enable = mkDefault true;
+    services.dleyna-server.enable = mkDefault true;
+    services.gnome.gnome-user-share.enable = mkDefault true;
+    services.gnome.rygel.enable = mkDefault true;
+
+    # Other default services.
+    services.gnome.evolution-data-server.enable = mkDefault true;
+    services.gnome.glib-networking.enable = mkDefault true;
+    services.gnome.gnome-keyring.enable = mkDefault true;
+    services.gnome.gnome-settings-daemon.enable = mkDefault true;
+    services.gvfs.enable = mkDefault true;
+
+    # Register packages for DBus.
+    services.dbus.packages = with pkgs; [
+      budgie.budgie-control-center
+    ];
+
+    # Register packages for udev.
+    services.udev.packages = with pkgs; [
+      budgie.magpie
+    ];
+
+    # Shell integration for MATE Terminal.
+    programs.bash.vteIntegration = true;
+    programs.zsh.vteIntegration = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/cde.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/cde.nix
new file mode 100644
index 000000000000..ad4b5d27f9d9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/cde.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.cde;
+in {
+  options.services.xserver.desktopManager.cde = {
+    enable = mkEnableOption (lib.mdDoc "Common Desktop Environment");
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs.xorg; [
+        xclock bitmap xlsfonts xfd xrefresh xload xwininfo xdpyinfo xwd xwud
+      ];
+      defaultText = literalExpression ''
+        with pkgs.xorg; [
+          xclock bitmap xlsfonts xfd xrefresh xload xwininfo xdpyinfo xwd xwud
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed system wide.
+      '';
+    };
+  };
+
+  config = mkIf (xcfg.enable && cfg.enable) {
+    environment.systemPackages = cfg.extraPackages;
+
+    services.rpcbind.enable = true;
+
+    services.xinetd.enable = true;
+    services.xinetd.services = [
+      {
+        name = "cmsd";
+        protocol = "udp";
+        user = "root";
+        server = "${pkgs.cdesktopenv}/bin/rpc.cmsd";
+        extraConfig = ''
+          type  = RPC UNLISTED
+          rpc_number  = 100068
+          rpc_version = 2-5
+          only_from   = 127.0.0.1/0
+        '';
+      }
+    ];
+
+    users.groups.mail = {};
+    security.wrappers = {
+      dtmail = {
+        setgid = true;
+        owner = "root";
+        group = "mail";
+        source = "${pkgs.cdesktopenv}/bin/dtmail";
+      };
+    };
+
+    system.activationScripts.setup-cde = ''
+      mkdir -p /var/dt/{tmp,appconfig/appmanager}
+      chmod a+w+t /var/dt/{tmp,appconfig/appmanager}
+    '';
+
+    services.xserver.desktopManager.session = [
+    { name = "CDE";
+      start = ''
+        exec ${pkgs.cdesktopenv}/bin/Xsession
+      '';
+    }];
+  };
+
+  meta.maintainers = [ ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/cinnamon.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/cinnamon.nix
new file mode 100644
index 000000000000..f1e4d9304021
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/cinnamon.nix
@@ -0,0 +1,255 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.desktopManager.cinnamon;
+  serviceCfg = config.services.cinnamon;
+
+  nixos-gsettings-overrides = pkgs.cinnamon.cinnamon-gsettings-overrides.override {
+    extraGSettingsOverridePackages = cfg.extraGSettingsOverridePackages;
+    extraGSettingsOverrides = cfg.extraGSettingsOverrides;
+  };
+
+  notExcluded = pkg: (!(lib.elem pkg config.environment.cinnamon.excludePackages));
+in
+
+{
+  options = {
+    services.cinnamon = {
+      apps.enable = mkEnableOption (lib.mdDoc "Cinnamon default applications");
+    };
+
+    services.xserver.desktopManager.cinnamon = {
+      enable = mkEnableOption (lib.mdDoc "the cinnamon desktop manager");
+
+      sessionPath = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        example = literalExpression "[ pkgs.gnome.gpaste ]";
+        description = lib.mdDoc ''
+          Additional list of packages to be added to the session search path.
+          Useful for GSettings-conditional autostart.
+
+          Note that this should be a last resort; patching the package is preferred (see GPaste).
+        '';
+      };
+
+      extraGSettingsOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "Additional gsettings overrides.";
+      };
+
+      extraGSettingsOverridePackages = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = lib.mdDoc "List of packages for which gsettings are overridden.";
+      };
+    };
+
+    environment.cinnamon.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.cinnamon.blueberry ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which packages cinnamon should exclude from the default environment";
+    };
+
+  };
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      services.xserver.displayManager.sessionPackages = [ pkgs.cinnamon.cinnamon-common ];
+
+      services.xserver.displayManager.lightdm.greeters.slick = {
+        enable = mkDefault true;
+
+        # Taken from mint-artwork.gschema.override
+        theme = mkIf (notExcluded pkgs.cinnamon.mint-themes) {
+          name = mkDefault "Mint-Y-Aqua";
+          package = mkDefault pkgs.cinnamon.mint-themes;
+        };
+        iconTheme = mkIf (notExcluded pkgs.cinnamon.mint-y-icons) {
+          name = mkDefault "Mint-Y-Sand";
+          package = mkDefault pkgs.cinnamon.mint-y-icons;
+        };
+        cursorTheme = mkIf (notExcluded pkgs.cinnamon.mint-cursor-themes) {
+          name = mkDefault "Bibata-Modern-Classic";
+          package = mkDefault pkgs.cinnamon.mint-cursor-themes;
+        };
+      };
+      services.xserver.displayManager.sessionCommands = ''
+        if test "$XDG_CURRENT_DESKTOP" = "Cinnamon"; then
+            true
+            ${concatMapStrings (p: ''
+              if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+                export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+              fi
+
+              if [ -d "${p}/lib/girepository-1.0" ]; then
+                export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+                export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+              fi
+            '') cfg.sessionPath}
+        fi
+      '';
+
+      # Default services
+      services.blueman.enable = mkDefault true;
+      hardware.bluetooth.enable = mkDefault true;
+      hardware.pulseaudio.enable = mkDefault true;
+      security.polkit.enable = true;
+      services.accounts-daemon.enable = true;
+      services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+      services.dbus.packages = with pkgs.cinnamon; [
+        cinnamon-common
+        cinnamon-screensaver
+        nemo-with-extensions
+        xapp
+      ];
+      services.cinnamon.apps.enable = mkDefault true;
+      services.gnome.evolution-data-server.enable = true;
+      services.gnome.glib-networking.enable = true;
+      services.gnome.gnome-keyring.enable = true;
+      services.gvfs.enable = true;
+      services.switcherooControl.enable = mkDefault true; # xapp-gpu-offload-helper
+      services.touchegg.enable = mkDefault true;
+      services.udisks2.enable = true;
+      services.upower.enable = mkDefault config.powerManagement.enable;
+      services.xserver.libinput.enable = mkDefault true;
+      services.xserver.updateDbusEnvironment = true;
+      networking.networkmanager.enable = mkDefault true;
+
+      # Enable colord server
+      services.colord.enable = true;
+
+      # Enable dconf
+      programs.dconf.enable = true;
+
+      # Enable org.a11y.Bus
+      services.gnome.at-spi2-core.enable = true;
+
+      # Fix lockscreen
+      security.pam.services = {
+        cinnamon-screensaver = {};
+      };
+
+      environment.systemPackages = with pkgs.cinnamon // pkgs; ([
+        desktop-file-utils
+
+        # common-files
+        cinnamon-common
+        cinnamon-session
+        cinnamon-desktop
+        cinnamon-menus
+        cinnamon-translations
+
+        # utils needed by some scripts
+        killall
+
+        # session requirements
+        cinnamon-screensaver
+        # cinnamon-killer-daemon: provided by cinnamon-common
+        networkmanagerapplet # session requirement - also nm-applet not needed
+
+        # For a polkit authentication agent
+        polkit_gnome
+
+        # packages
+        nemo-with-extensions
+        cinnamon-control-center
+        cinnamon-settings-daemon
+        libgnomekbd
+
+        # theme
+        gnome.adwaita-icon-theme
+        gnome.gnome-themes-extra
+        gtk3.out
+
+        # other
+        glib # for gsettings
+        xdg-user-dirs
+      ] ++ utils.removePackagesByName [
+        # accessibility
+        onboard
+        orca
+
+        # theme
+        sound-theme-freedesktop
+        nixos-artwork.wallpapers.simple-dark-gray
+        mint-artwork
+        mint-cursor-themes
+        mint-l-icons
+        mint-l-theme
+        mint-themes
+        mint-x-icons
+        mint-y-icons
+        xapp # provides some xapp-* icons
+      ] config.environment.cinnamon.excludePackages);
+
+      xdg.mime.enable = true;
+      xdg.icons.enable = true;
+
+      xdg.portal.enable = true;
+      xdg.portal.extraPortals = [
+        pkgs.xdg-desktop-portal-xapp
+        (pkgs.xdg-desktop-portal-gtk.override {
+          # Do not build portals that we already have.
+          buildPortalsInGnome = false;
+        })
+      ];
+
+      # Override GSettings schemas
+      environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+
+      environment.pathsToLink = [
+        # FIXME: modules should link subdirs of `/share` rather than relying on this
+        "/share" # TODO: https://github.com/NixOS/nixpkgs/issues/47173
+      ];
+
+      # Shell integration for VTE terminals
+      programs.bash.vteIntegration = mkDefault true;
+      programs.zsh.vteIntegration = mkDefault true;
+
+      # Qt application style
+      qt = {
+        enable = mkDefault true;
+        style = mkDefault "gtk2";
+        platformTheme = mkDefault "gtk2";
+      };
+
+      # Default Fonts
+      fonts.packages = with pkgs; [
+        dejavu_fonts # Default monospace font in LMDE 6+
+        ubuntu_font_family # required for default theme
+      ];
+    })
+
+    (mkIf serviceCfg.apps.enable {
+      programs.geary.enable = mkDefault true;
+      programs.gnome-disks.enable = mkDefault true;
+      programs.gnome-terminal.enable = mkDefault true;
+      programs.file-roller.enable = mkDefault true;
+
+      environment.systemPackages = with pkgs // pkgs.gnome // pkgs.cinnamon; utils.removePackagesByName [
+        # cinnamon team apps
+        bulky
+        warpinator
+
+        # cinnamon xapp
+        xviewer
+        xreader
+        xed-editor
+        xplayer
+        pix
+
+        # external apps shipped with linux-mint
+        hexchat
+        gnome-calculator
+        gnome-calendar
+        gnome-screenshot
+      ] config.environment.cinnamon.excludePackages;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix
new file mode 100644
index 000000000000..28d751305892
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/deepin.nix
@@ -0,0 +1,208 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.deepin;
+
+  nixos-gsettings-overrides = pkgs.deepin.dde-gsettings-schemas.override {
+    extraGSettingsOverridePackages = cfg.extraGSettingsOverridePackages;
+    extraGSettingsOverrides = cfg.extraGSettingsOverrides;
+  };
+in
+{
+  options = {
+
+    services.xserver.desktopManager.deepin = {
+      enable = mkEnableOption (lib.mdDoc "Deepin desktop manager");
+      extraGSettingsOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "Additional gsettings overrides.";
+      };
+      extraGSettingsOverridePackages = mkOption {
+        default = [ ];
+        type = types.listOf types.path;
+        description = lib.mdDoc "List of packages for which gsettings are overridden.";
+      };
+    };
+
+    environment.deepin.excludePackages = mkOption {
+      default = [ ];
+      type = types.listOf types.package;
+      description = lib.mdDoc "List of default packages to exclude from the configuration";
+    };
+
+  };
+
+  config = mkIf cfg.enable
+    {
+      services.xserver.displayManager.sessionPackages = [ pkgs.deepin.startdde ];
+      services.xserver.displayManager.defaultSession = mkDefault "deepin";
+
+      # Update the DBus activation environment after launching the desktop manager.
+      services.xserver.displayManager.sessionCommands = ''
+        ${lib.getBin pkgs.dbus}/bin/dbus-update-activation-environment --systemd --all
+      '';
+
+      hardware.bluetooth.enable = mkDefault true;
+      hardware.pulseaudio.enable = mkDefault true;
+      security.polkit.enable = true;
+
+      services.deepin.dde-daemon.enable = mkForce true;
+      services.deepin.dde-api.enable = mkForce true;
+      services.deepin.app-services.enable = mkForce true;
+
+      services.colord.enable = mkDefault true;
+      services.accounts-daemon.enable = mkDefault true;
+      services.gvfs.enable = mkDefault true;
+      services.gnome.glib-networking.enable = mkDefault true;
+      services.gnome.gnome-keyring.enable = mkDefault true;
+      services.bamf.enable = mkDefault true;
+
+      services.xserver.libinput.enable = mkDefault true;
+      services.udisks2.enable = true;
+      services.upower.enable = mkDefault config.powerManagement.enable;
+      networking.networkmanager.enable = mkDefault true;
+      programs.dconf.enable = mkDefault true;
+
+      fonts.packages = with pkgs; [ noto-fonts ];
+      xdg.mime.enable = true;
+      xdg.menus.enable = true;
+      xdg.icons.enable = true;
+      xdg.portal.enable = mkDefault true;
+      xdg.portal.extraPortals = mkDefault [
+        (pkgs.xdg-desktop-portal-gtk.override {
+          buildPortalsInGnome = false;
+        })
+      ];
+
+      environment.sessionVariables = {
+        NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-overrides}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+        DDE_POLKIT_AGENT_PLUGINS_DIRS = [ "${pkgs.deepin.dpa-ext-gnomekeyring}/lib/polkit-1-dde/plugins" ];
+      };
+
+      environment.pathsToLink = [
+        "/lib/dde-dock/plugins"
+        "/lib/dde-control-center"
+        "/lib/dde-session-shell"
+        "/lib/dde-file-manager"
+        "/share/backgrounds"
+        "/share/wallpapers"
+      ];
+
+      environment.etc = {
+        "distribution.info".text = ''
+          [Distribution]
+          Name=NixOS
+          WebsiteName=www.nixos.org
+          Website=https://www.nixos.org
+          Logo=${pkgs.nixos-icons}/share/icons/hicolor/96x96/apps/nix-snowflake.png
+          LogoLight=${pkgs.nixos-icons}/share/icons/hicolor/32x32/apps/nix-snowflake.png
+          LogoTransparent=${pkgs.deepin.deepin-desktop-base}/share/pixmaps/distribution_logo_transparent.svg
+        '';
+        "deepin-installer.conf".text = ''
+          system_info_vendor_name="Copyright (c) 2003-2023 NixOS contributors"
+        '';
+      };
+
+      systemd.tmpfiles.rules = [
+        "d /var/lib/AccountsService 0775 root root - -"
+        "C /var/lib/AccountsService/icons 0775 root root - ${pkgs.deepin.dde-account-faces}/var/lib/AccountsService/icons"
+      ];
+
+      security.pam.services.dde-lock.text = ''
+        # original at {dde-session-shell}/etc/pam.d/dde-lock
+        auth      substack      login
+        account   include       login
+        password  substack      login
+        session   include       login
+      '';
+
+      environment.systemPackages = with pkgs; with deepin;
+        let
+          requiredPackages = [
+            pciutils # for dtkcore/startdde
+            xdotool # for dde-daemon
+            glib # for gsettings program / gdbus
+            gtk3 # for gtk-launch program
+            xdg-user-dirs # Update user dirs
+            util-linux # runuser
+            polkit_gnome
+            librsvg # dde-api use rsvg-convert
+            lshw # for dtkcore
+            libsForQt5.kde-gtk-config # deepin-api/gtk-thumbnailer need
+            libsForQt5.kglobalaccel
+            xsettingsd # lightdm-deepin-greeter
+            qt5platform-plugins
+            deepin-pw-check
+            deepin-turbo
+
+            dde-account-faces
+            deepin-icon-theme
+            deepin-sound-theme
+            deepin-gtk-theme
+            deepin-wallpapers
+
+            startdde
+            dde-dock
+            dde-launcher
+            dde-session-ui
+            dde-session-shell
+            dde-file-manager
+            dde-control-center
+            dde-network-core
+            dde-clipboard
+            dde-calendar
+            dde-polkit-agent
+            dpa-ext-gnomekeyring
+            deepin-desktop-schemas
+            deepin-terminal
+            dde-kwin
+            deepin-kwin
+          ];
+          optionalPackages = [
+            onboard # dde-dock plugin
+            deepin-camera
+            deepin-calculator
+            deepin-compressor
+            deepin-editor
+            deepin-picker
+            deepin-draw
+            deepin-album
+            deepin-image-viewer
+            deepin-music
+            deepin-movie-reborn
+            deepin-system-monitor
+            deepin-screen-recorder
+            deepin-shortcut-viewer
+          ];
+        in
+        requiredPackages
+        ++ utils.removePackagesByName optionalPackages config.environment.deepin.excludePackages;
+
+      services.dbus.packages = with pkgs.deepin; [
+        dde-dock
+        dde-launcher
+        dde-session-ui
+        dde-session-shell
+        dde-file-manager
+        dde-control-center
+        dde-calendar
+        dde-clipboard
+        dde-kwin
+        deepin-kwin
+        deepin-pw-check
+      ];
+
+      systemd.packages = with pkgs.deepin; [
+        dde-launcher
+        dde-file-manager
+        dde-calendar
+        dde-clipboard
+        deepin-kwin
+      ];
+    };
+}
+
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix
new file mode 100644
index 000000000000..66cb4ee29c0a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/default.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager;
+
+  # If desktop manager `d' isn't capable of setting a background and
+  # the xserver is enabled, `feh' or `xsetroot' are used as a fallback.
+  needBGCond = d: ! (d ? bgSupport && d.bgSupport) && xcfg.enable;
+
+in
+
+{
+  # Note: the order in which desktop manager modules are imported here
+  # determines the default: later modules (if enabled) are preferred.
+  # E.g., if Plasma 5 is enabled, it supersedes xterm.
+  imports = [
+    ./none.nix ./xterm.nix ./phosh.nix ./xfce.nix ./plasma5.nix ./lumina.nix
+    ./lxqt.nix ./enlightenment.nix ./gnome.nix ./retroarch.nix ./kodi.nix
+    ./mate.nix ./pantheon.nix ./surf-display.nix ./cde.nix
+    ./cinnamon.nix ./budgie.nix ./deepin.nix
+  ];
+
+  options = {
+
+    services.xserver.desktopManager = {
+
+      wallpaper = {
+        mode = mkOption {
+          type = types.enum [ "center" "fill" "max" "scale" "tile" ];
+          default = "scale";
+          example = "fill";
+          description = lib.mdDoc ''
+            The file {file}`~/.background-image` is used as a background image.
+            This option specifies the placement of this image onto your desktop.
+
+            Possible values:
+            `center`: Center the image on the background. If it is too small, it will be surrounded by a black border.
+            `fill`: Like `scale`, but preserves aspect ratio by zooming the image until it fits. Either a horizontal or a vertical part of the image will be cut off.
+            `max`: Like `fill`, but scale the image to the maximum size that fits the screen with black borders on one side.
+            `scale`: Fit the file into the background without repeating it, cutting off stuff or using borders. But the aspect ratio is not preserved either.
+            `tile`: Tile (repeat) the image in case it is too small for the screen.
+          '';
+        };
+
+        combineScreens = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            When set to `true` the wallpaper will stretch across all screens.
+            When set to `false` the wallpaper is duplicated to all screens.
+          '';
+        };
+      };
+
+      session = mkOption {
+        internal = true;
+        default = [];
+        example = singleton
+          { name = "kde";
+            bgSupport = true;
+            start = "...";
+          };
+        description = lib.mdDoc ''
+          Internal option used to add some common line to desktop manager
+          scripts before forwarding the value to the
+          `displayManager`.
+        '';
+        apply = map (d: d // {
+          manage = "desktop";
+          start = d.start
+          # literal newline to ensure d.start's last line is not appended to
+          + optionalString (needBGCond d) ''
+
+            if [ -e $HOME/.background-image ]; then
+              ${pkgs.feh}/bin/feh --bg-${cfg.wallpaper.mode} ${optionalString cfg.wallpaper.combineScreens "--no-xinerama"} $HOME/.background-image
+            fi
+          '';
+        });
+      };
+
+      default = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "none";
+        description = lib.mdDoc ''
+          **Deprecated**, please use [](#opt-services.xserver.displayManager.defaultSession) instead.
+
+          Default desktop manager loaded if none have been chosen.
+        '';
+      };
+
+    };
+
+  };
+
+  config.services.xserver.displayManager.session = cfg.session;
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/enlightenment.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/enlightenment.nix
new file mode 100644
index 000000000000..28dd408c923c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/enlightenment.nix
@@ -0,0 +1,124 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+
+  e = pkgs.enlightenment;
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.enlightenment;
+  GST_PLUGIN_PATH = lib.makeSearchPathOutput "lib" "lib/gstreamer-1.0" [
+    pkgs.gst_all_1.gst-plugins-base
+    pkgs.gst_all_1.gst-plugins-good
+    pkgs.gst_all_1.gst-plugins-bad
+    pkgs.gst_all_1.gst-libav ];
+
+in
+
+{
+  meta = {
+    maintainers = teams.enlightenment.members;
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "xserver" "desktopManager" "e19" "enable" ] [ "services" "xserver" "desktopManager" "enlightenment" "enable" ])
+  ];
+
+  options = {
+
+    services.xserver.desktopManager.enlightenment.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable the Enlightenment desktop environment.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = with pkgs; [
+      enlightenment.econnman
+      enlightenment.efl
+      enlightenment.enlightenment
+      enlightenment.ecrire
+      enlightenment.ephoto
+      enlightenment.rage
+      enlightenment.terminology
+      xorg.xcursorthemes
+    ];
+
+    environment.pathsToLink = [
+      "/etc/enlightenment"
+      "/share/enlightenment"
+      "/share/elementary"
+      "/share/locale"
+    ];
+
+    services.xserver.displayManager.sessionPackages = [ pkgs.enlightenment.enlightenment ];
+
+    services.xserver.displayManager.sessionCommands = ''
+      if test "$XDG_CURRENT_DESKTOP" = "Enlightenment"; then
+        export GST_PLUGIN_PATH="${GST_PLUGIN_PATH}"
+
+        # make available for D-BUS user services
+        #export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}:${config.system.path}/share:${e.efl}/share
+
+        # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+        ${pkgs.xdg-user-dirs}/bin/xdg-user-dirs-update
+      fi
+    '';
+
+    # Wrappers for programs installed by enlightenment that should be setuid
+    security.wrappers = {
+      enlightenment_ckpasswd =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.enlightenment.enlightenment}/lib/enlightenment/utils/enlightenment_ckpasswd";
+        };
+      enlightenment_sys =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.enlightenment.enlightenment}/lib/enlightenment/utils/enlightenment_sys";
+        };
+      enlightenment_system =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.enlightenment.enlightenment}/lib/enlightenment/utils/enlightenment_system";
+        };
+    };
+
+    environment.etc."X11/xkb".source = xcfg.xkb.dir;
+
+    fonts.packages = [ pkgs.dejavu_fonts pkgs.ubuntu_font_family ];
+
+    services.udisks2.enable = true;
+    services.upower.enable = config.powerManagement.enable;
+    services.xserver.libinput.enable = mkDefault true;
+
+    services.dbus.packages = [ e.efl ];
+
+    systemd.user.services.efreet =
+      { enable = true;
+        description = "org.enlightenment.Efreet";
+        serviceConfig =
+          { ExecStart = "${e.efl}/bin/efreetd";
+            StandardOutput = "null";
+          };
+      };
+
+    systemd.user.services.ethumb =
+      { enable = true;
+        description = "org.enlightenment.Ethumb";
+        serviceConfig =
+          { ExecStart = "${e.efl}/bin/ethumbd";
+            StandardOutput = "null";
+          };
+      };
+
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
new file mode 100644
index 000000000000..d9e75bfe6bdd
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.md
@@ -0,0 +1,167 @@
+# GNOME Desktop {#chap-gnome}
+
+GNOME provides a simple, yet full-featured desktop environment with a focus on productivity. Its Mutter compositor supports both Wayland and X server, and the GNOME Shell user interface is fully customizable by extensions.
+
+## Enabling GNOME {#sec-gnome-enable}
+
+All of the core apps, optional apps, games, and core developer tools from GNOME are available.
+
+To enable the GNOME desktop use:
+
+```
+services.xserver.desktopManager.gnome.enable = true;
+services.xserver.displayManager.gdm.enable = true;
+```
+
+::: {.note}
+While it is not strictly necessary to use GDM as the display manager with GNOME, it is recommended, as some features such as screen lock [might not work](#sec-gnome-faq-can-i-use-lightdm-with-gnome) without it.
+:::
+
+The default applications used in NixOS are very minimal, inspired by the defaults used in [gnome-build-meta](https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/40.0/elements/core/meta-gnome-core-utilities.bst).
+
+### GNOME without the apps {#sec-gnome-without-the-apps}
+
+If you’d like to only use the GNOME desktop and not the apps, you can disable them with:
+
+```
+services.gnome.core-utilities.enable = false;
+```
+
+and none of them will be installed.
+
+If you’d only like to omit a subset of the core utilities, you can use
+[](#opt-environment.gnome.excludePackages).
+Note that this mechanism can only exclude core utilities, games and core developer tools.
+
+### Disabling GNOME services {#sec-gnome-disabling-services}
+
+It is also possible to disable many of the [core services](https://github.com/NixOS/nixpkgs/blob/b8ec4fd2a4edc4e30d02ba7b1a2cc1358f3db1d5/nixos/modules/services/x11/desktop-managers/gnome.nix#L329-L348). For example, if you do not need indexing files, you can disable Tracker with:
+
+```
+services.gnome.tracker-miners.enable = false;
+services.gnome.tracker.enable = false;
+```
+
+Note, however, that doing so is not supported and might break some applications. Notably, GNOME Music cannot work without Tracker.
+
+### GNOME games {#sec-gnome-games}
+
+You can install all of the GNOME games with:
+
+```
+services.gnome.games.enable = true;
+```
+
+### GNOME core developer tools {#sec-gnome-core-developer-tools}
+
+You can install GNOME core developer tools with:
+
+```
+services.gnome.core-developer-tools.enable = true;
+```
+
+## Enabling GNOME Flashback {#sec-gnome-enable-flashback}
+
+GNOME Flashback provides a desktop environment based on the classic GNOME 2 architecture. You can enable the default GNOME Flashback session, which uses the Metacity window manager, with:
+
+```
+services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+```
+
+It is also possible to create custom sessions that replace Metacity with a different window manager using [](#opt-services.xserver.desktopManager.gnome.flashback.customSessions).
+
+The following example uses `xmonad` window manager:
+
+```
+services.xserver.desktopManager.gnome.flashback.customSessions = [
+  {
+    wmName = "xmonad";
+    wmLabel = "XMonad";
+    wmCommand = "${pkgs.haskellPackages.xmonad}/bin/xmonad";
+    enableGnomePanel = false;
+  }
+];
+```
+
+## Icons and GTK Themes {#sec-gnome-icons-and-gtk-themes}
+
+Icon themes and GTK themes don’t require any special option to install in NixOS.
+
+You can add them to [](#opt-environment.systemPackages) and switch to them with GNOME Tweaks.
+If you’d like to do this manually in dconf, change the values of the following keys:
+
+```
+/org/gnome/desktop/interface/gtk-theme
+/org/gnome/desktop/interface/icon-theme
+```
+
+in `dconf-editor`
+
+## Shell Extensions {#sec-gnome-shell-extensions}
+
+Most Shell extensions are packaged under the `gnomeExtensions` attribute.
+Some packages that include Shell extensions, like `gnome.gpaste`, don’t have their extension decoupled under this attribute.
+
+You can install them like any other package:
+
+```
+environment.systemPackages = [
+  gnomeExtensions.dash-to-dock
+  gnomeExtensions.gsconnect
+  gnomeExtensions.mpris-indicator-button
+];
+```
+
+Unfortunately, we lack a way for these to be managed in a completely declarative way.
+So you have to enable them manually with an Extensions application.
+It is possible to use a [GSettings override](#sec-gnome-gsettings-overrides) for this on `org.gnome.shell.enabled-extensions`, but that will only influence the default value.
+
+## GSettings Overrides {#sec-gnome-gsettings-overrides}
+
+Majority of software building on the GNOME platform use GLib’s [GSettings](https://developer.gnome.org/gio/unstable/GSettings.html) system to manage runtime configuration. For our purposes, the system consists of XML schemas describing the individual configuration options, stored in the package, and a settings backend, where the values of the settings are stored. On NixOS, like on most Linux distributions, dconf database is used as the backend.
+
+[GSettings vendor overrides](https://developer.gnome.org/gio/unstable/GSettings.html#id-1.4.19.2.9.25) can be used to adjust the default values for settings of the GNOME desktop and apps by replacing the default values specified in the XML schemas. Using overrides will allow you to pre-seed user settings before you even start the session.
+
+::: {.warning}
+Overrides really only change the default values for GSettings keys so if you or an application changes the setting value, the value set by the override will be ignored. Until [NixOS’s dconf module implements changing values](https://github.com/NixOS/nixpkgs/issues/54150), you will either need to keep that in mind and clear the setting from the backend using `dconf reset` command when that happens, or use the [module from home-manager](https://nix-community.github.io/home-manager/options.html#opt-dconf.settings).
+:::
+
+You can override the default GSettings values using the
+[](#opt-services.xserver.desktopManager.gnome.extraGSettingsOverrides) option.
+
+Take note that whatever packages you want to override GSettings for, you need to add them to
+[](#opt-services.xserver.desktopManager.gnome.extraGSettingsOverridePackages).
+
+You can use `dconf-editor` tool to explore which GSettings you can set.
+
+### Example {#sec-gnome-gsettings-overrides-example}
+
+```
+services.xserver.desktopManager.gnome = {
+  extraGSettingsOverrides = ''
+    # Change default background
+    [org.gnome.desktop.background]
+    picture-uri='file://${pkgs.nixos-artwork.wallpapers.mosaic-blue.gnomeFilePath}'
+
+    # Favorite apps in gnome-shell
+    [org.gnome.shell]
+    favorite-apps=['org.gnome.Photos.desktop', 'org.gnome.Nautilus.desktop']
+  '';
+
+  extraGSettingsOverridePackages = [
+    pkgs.gsettings-desktop-schemas # for org.gnome.desktop
+    pkgs.gnome.gnome-shell # for org.gnome.shell
+  ];
+};
+```
+
+## Frequently Asked Questions {#sec-gnome-faq}
+
+### Can I use LightDM with GNOME? {#sec-gnome-faq-can-i-use-lightdm-with-gnome}
+
+Yes you can, and any other display-manager in NixOS.
+
+However, it doesn’t work correctly for the Wayland session of GNOME Shell yet, and
+won’t be able to lock your screen.
+
+See [this issue.](https://github.com/NixOS/nixpkgs/issues/56342)
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.nix
new file mode 100644
index 000000000000..12bdd9333377
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/gnome.nix
@@ -0,0 +1,570 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.desktopManager.gnome;
+  serviceCfg = config.services.gnome;
+
+  # Prioritize nautilus by default when opening directories
+  mimeAppsList = pkgs.writeTextFile {
+    name = "gnome-mimeapps";
+    destination = "/share/applications/mimeapps.list";
+    text = ''
+      [Default Applications]
+      inode/directory=nautilus.desktop;org.gnome.Nautilus.desktop
+    '';
+  };
+
+  defaultFavoriteAppsOverride = ''
+    [org.gnome.shell]
+    favorite-apps=[ 'org.gnome.Epiphany.desktop', 'org.gnome.Geary.desktop', 'org.gnome.Calendar.desktop', 'org.gnome.Music.desktop', 'org.gnome.Photos.desktop', 'org.gnome.Nautilus.desktop' ]
+  '';
+
+  nixos-background-light = pkgs.nixos-artwork.wallpapers.simple-blue;
+  nixos-background-dark = pkgs.nixos-artwork.wallpapers.simple-dark-gray;
+
+  # TODO: Having https://github.com/NixOS/nixpkgs/issues/54150 would supersede this
+  nixos-gsettings-desktop-schemas = pkgs.gnome.nixos-gsettings-overrides.override {
+    inherit (cfg) extraGSettingsOverrides extraGSettingsOverridePackages favoriteAppsOverride;
+    inherit flashbackEnabled nixos-background-dark nixos-background-light;
+  };
+
+  nixos-background-info = pkgs.writeTextFile rec {
+    name = "nixos-background-info";
+    text = ''
+      <?xml version="1.0"?>
+      <!DOCTYPE wallpapers SYSTEM "gnome-wp-list.dtd">
+      <wallpapers>
+        <wallpaper deleted="false">
+          <name>Blobs</name>
+          <filename>${nixos-background-light.gnomeFilePath}</filename>
+          <filename-dark>${nixos-background-dark.gnomeFilePath}</filename-dark>
+          <options>zoom</options>
+          <shade_type>solid</shade_type>
+          <pcolor>#3a4ba0</pcolor>
+          <scolor>#2f302f</scolor>
+        </wallpaper>
+      </wallpapers>
+    '';
+    destination = "/share/gnome-background-properties/nixos.xml";
+  };
+
+  flashbackEnabled = cfg.flashback.enableMetacity || length cfg.flashback.customSessions > 0;
+  flashbackWms = optional cfg.flashback.enableMetacity {
+    wmName = "metacity";
+    wmLabel = "Metacity";
+    wmCommand = "${pkgs.gnome.metacity}/bin/metacity";
+    enableGnomePanel = true;
+  } ++ cfg.flashback.customSessions;
+
+  notExcluded = pkg: mkDefault (!(lib.elem pkg config.environment.gnome.excludePackages));
+
+in
+
+{
+
+  meta = {
+    doc = ./gnome.md;
+    maintainers = teams.gnome.members;
+  };
+
+  imports = [
+    # Added 2021-05-07
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "core-os-services" "enable" ]
+      [ "services" "gnome" "core-os-services" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "core-shell" "enable" ]
+      [ "services" "gnome" "core-shell" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "core-utilities" "enable" ]
+      [ "services" "gnome" "core-utilities" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "core-developer-tools" "enable" ]
+      [ "services" "gnome" "core-developer-tools" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "games" "enable" ]
+      [ "services" "gnome" "games" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "gnome3" "experimental-features" "realtime-scheduling" ]
+      [ "services" "gnome" "experimental-features" "realtime-scheduling" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "enable" ]
+      [ "services" "xserver" "desktopManager" "gnome" "enable" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "sessionPath" ]
+      [ "services" "xserver" "desktopManager" "gnome" "sessionPath" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "favoriteAppsOverride" ]
+      [ "services" "xserver" "desktopManager" "gnome" "favoriteAppsOverride" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "extraGSettingsOverrides" ]
+      [ "services" "xserver" "desktopManager" "gnome" "extraGSettingsOverrides" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "extraGSettingsOverridePackages" ]
+      [ "services" "xserver" "desktopManager" "gnome" "extraGSettingsOverridePackages" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "debug" ]
+      [ "services" "xserver" "desktopManager" "gnome" "debug" ]
+    )
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "gnome3" "flashback" ]
+      [ "services" "xserver" "desktopManager" "gnome" "flashback" ]
+    )
+    (mkRenamedOptionModule
+      [ "environment" "gnome3" "excludePackages" ]
+      [ "environment" "gnome" "excludePackages" ]
+    )
+    (mkRemovedOptionModule
+      [ "services" "gnome" "experimental-features" "realtime-scheduling" ]
+      "Set `security.rtkit.enable = true;` to make realtime scheduling possible. (Still needs to be enabled using GSettings.)"
+    )
+  ];
+
+  options = {
+
+    services.gnome = {
+      core-os-services.enable = mkEnableOption (lib.mdDoc "essential services for GNOME3");
+      core-shell.enable = mkEnableOption (lib.mdDoc "GNOME Shell services");
+      core-utilities.enable = mkEnableOption (lib.mdDoc "GNOME core utilities");
+      core-developer-tools.enable = mkEnableOption (lib.mdDoc "GNOME core developer tools");
+      games.enable = mkEnableOption (lib.mdDoc "GNOME games");
+    };
+
+    services.xserver.desktopManager.gnome = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable GNOME desktop manager.";
+      };
+
+      sessionPath = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        example = literalExpression "[ pkgs.gnome.gpaste ]";
+        description = lib.mdDoc ''
+          Additional list of packages to be added to the session search path.
+          Useful for GNOME Shell extensions or GSettings-conditional autostart.
+
+          Note that this should be a last resort; patching the package is preferred (see GPaste).
+        '';
+      };
+
+      favoriteAppsOverride = mkOption {
+        internal = true; # this is messy
+        default = defaultFavoriteAppsOverride;
+        type = types.lines;
+        example = literalExpression ''
+          '''
+            [org.gnome.shell]
+            favorite-apps=[ 'firefox.desktop', 'org.gnome.Calendar.desktop' ]
+          '''
+        '';
+        description = lib.mdDoc "List of desktop files to put as favorite apps into gnome-shell. These need to be installed somehow globally.";
+      };
+
+      extraGSettingsOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "Additional gsettings overrides.";
+      };
+
+      extraGSettingsOverridePackages = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = lib.mdDoc "List of packages for which gsettings are overridden.";
+      };
+
+      debug = mkEnableOption (lib.mdDoc "gnome-session debug messages");
+
+      flashback = {
+        enableMetacity = mkEnableOption (lib.mdDoc "the standard GNOME Flashback session with Metacity");
+
+        customSessions = mkOption {
+          type = types.listOf (types.submodule {
+            options = {
+              wmName = mkOption {
+                type = types.strMatching "[a-zA-Z0-9_-]+";
+                description = lib.mdDoc "A unique identifier for the window manager.";
+                example = "xmonad";
+              };
+
+              wmLabel = mkOption {
+                type = types.str;
+                description = lib.mdDoc "The name of the window manager to show in the session chooser.";
+                example = "XMonad";
+              };
+
+              wmCommand = mkOption {
+                type = types.str;
+                description = lib.mdDoc "The executable of the window manager to use.";
+                example = literalExpression ''"''${pkgs.haskellPackages.xmonad}/bin/xmonad"'';
+              };
+
+              enableGnomePanel = mkOption {
+                type = types.bool;
+                default = true;
+                example = false;
+                description = lib.mdDoc "Whether to enable the GNOME panel in this session.";
+              };
+            };
+          });
+          default = [];
+          description = lib.mdDoc "Other GNOME Flashback sessions to enable.";
+        };
+
+        panelModulePackages = mkOption {
+          default = [ pkgs.gnome.gnome-applets ];
+          defaultText = literalExpression "[ pkgs.gnome.gnome-applets ]";
+          type = types.listOf types.package;
+          description = lib.mdDoc ''
+            Packages containing modules that should be made available to `gnome-panel` (usually for applets).
+
+            If you're packaging something to use here, please install the modules in `$out/lib/gnome-panel/modules`.
+          '';
+        };
+      };
+    };
+
+    environment.gnome.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.gnome.totem ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which packages gnome should exclude from the default environment";
+    };
+
+  };
+
+  config = mkMerge [
+    (mkIf (cfg.enable || flashbackEnabled) {
+      # Seed our configuration into nixos-generate-config
+      system.nixos-generate-config.desktopConfiguration = [''
+        # Enable the GNOME Desktop Environment.
+        services.xserver.displayManager.gdm.enable = true;
+        services.xserver.desktopManager.gnome.enable = true;
+      ''];
+
+      services.gnome.core-os-services.enable = true;
+      services.gnome.core-shell.enable = true;
+      services.gnome.core-utilities.enable = mkDefault true;
+
+      services.xserver.displayManager.sessionPackages = [ pkgs.gnome.gnome-session.sessions ];
+
+      environment.extraInit = ''
+        ${concatMapStrings (p: ''
+          if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+            export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+          fi
+
+          if [ -d "${p}/lib/girepository-1.0" ]; then
+            export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+            export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+          fi
+        '') cfg.sessionPath}
+      '';
+
+      environment.systemPackages = cfg.sessionPath;
+
+      environment.sessionVariables.GNOME_SESSION_DEBUG = mkIf cfg.debug "1";
+
+      # Override GSettings schemas
+      environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-desktop-schemas}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+    })
+
+    (mkIf flashbackEnabled {
+      services.xserver.displayManager.sessionPackages =
+        let
+          wmNames = map (wm: wm.wmName) flashbackWms;
+          namesAreUnique = lib.unique wmNames == wmNames;
+        in
+          assert (assertMsg namesAreUnique "Flashback WM names must be unique.");
+          map
+            (wm:
+              pkgs.gnome.gnome-flashback.mkSessionForWm {
+                inherit (wm) wmName wmLabel wmCommand;
+              }
+            ) flashbackWms;
+
+      security.pam.services.gnome-flashback = {
+        enableGnomeKeyring = true;
+      };
+
+      systemd.packages = with pkgs.gnome; [
+        gnome-flashback
+      ] ++ map gnome-flashback.mkSystemdTargetForWm flashbackWms;
+
+      environment.systemPackages = with pkgs.gnome; [
+        gnome-flashback
+        (gnome-panel-with-modules.override {
+          panelModulePackages = cfg.flashback.panelModulePackages;
+        })
+      ]
+      # For /share/applications/${wmName}.desktop
+      ++ (map (wm: gnome-flashback.mkWmApplication { inherit (wm) wmName wmLabel wmCommand; }) flashbackWms)
+      # For /share/gnome-session/sessions/gnome-flashback-${wmName}.session
+      ++ (map (wm: gnome-flashback.mkGnomeSession { inherit (wm) wmName wmLabel enableGnomePanel; }) flashbackWms);
+    })
+
+    (mkIf serviceCfg.core-os-services.enable {
+      hardware.bluetooth.enable = mkDefault true;
+      hardware.pulseaudio.enable = mkDefault true;
+      programs.dconf.enable = true;
+      security.polkit.enable = true;
+      services.accounts-daemon.enable = true;
+      services.dleyna-renderer.enable = mkDefault true;
+      services.dleyna-server.enable = mkDefault true;
+      services.power-profiles-daemon.enable = mkDefault true;
+      services.gnome.at-spi2-core.enable = true;
+      services.gnome.evolution-data-server.enable = true;
+      services.gnome.gnome-keyring.enable = true;
+      services.gnome.gnome-online-accounts.enable = mkDefault true;
+      services.gnome.gnome-online-miners.enable = true;
+      services.gnome.tracker-miners.enable = mkDefault true;
+      services.gnome.tracker.enable = mkDefault true;
+      services.hardware.bolt.enable = mkDefault true;
+      # TODO: Enable once #177946 is resolved
+      # services.packagekit.enable = mkDefault true;
+      services.udisks2.enable = true;
+      services.upower.enable = config.powerManagement.enable;
+      services.xserver.libinput.enable = mkDefault true; # for controlling touchpad settings via gnome control center
+
+      # Explicitly enabled since GNOME will be severely broken without these.
+      xdg.mime.enable = true;
+      xdg.icons.enable = true;
+
+      xdg.portal.enable = true;
+      xdg.portal.extraPortals = [
+        pkgs.xdg-desktop-portal-gnome
+        (pkgs.xdg-desktop-portal-gtk.override {
+          # Do not build portals that we already have.
+          buildPortalsInGnome = false;
+        })
+      ];
+
+      networking.networkmanager.enable = mkDefault true;
+
+      services.xserver.updateDbusEnvironment = true;
+
+      # gnome has a custom alert theme but it still
+      # inherits from the freedesktop theme.
+      environment.systemPackages = with pkgs; [
+        sound-theme-freedesktop
+      ];
+
+      # Needed for themes and backgrounds
+      environment.pathsToLink = [
+        "/share" # TODO: https://github.com/NixOS/nixpkgs/issues/47173
+      ];
+    })
+
+    (mkIf serviceCfg.core-shell.enable {
+      services.xserver.desktopManager.gnome.sessionPath =
+        let
+          mandatoryPackages = [
+            pkgs.gnome.gnome-shell
+          ];
+          optionalPackages = [
+            pkgs.gnome.gnome-shell-extensions
+          ];
+        in
+        mandatoryPackages
+        ++ utils.removePackagesByName optionalPackages config.environment.gnome.excludePackages;
+
+      services.colord.enable = mkDefault true;
+      services.gnome.glib-networking.enable = true;
+      services.gnome.gnome-browser-connector.enable = mkDefault true;
+      services.gnome.gnome-initial-setup.enable = mkDefault true;
+      services.gnome.gnome-remote-desktop.enable = mkDefault true;
+      services.gnome.gnome-settings-daemon.enable = true;
+      services.gnome.gnome-user-share.enable = mkDefault true;
+      services.gnome.rygel.enable = mkDefault true;
+      services.gvfs.enable = true;
+      services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+
+      systemd.packages = with pkgs.gnome; [
+        gnome-session
+        gnome-shell
+      ];
+
+      services.udev.packages = with pkgs.gnome; [
+        # Force enable KMS modifiers for devices that require them.
+        # https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1443
+        mutter
+      ];
+
+      services.avahi.enable = mkDefault true;
+
+      xdg.portal.extraPortals = [
+        pkgs.gnome.gnome-shell
+      ];
+
+      services.geoclue2.enable = mkDefault true;
+      services.geoclue2.enableDemoAgent = false; # GNOME has its own geoclue agent
+
+      services.geoclue2.appConfig.gnome-datetime-panel = {
+        isAllowed = true;
+        isSystem = true;
+      };
+      services.geoclue2.appConfig.gnome-color-panel = {
+        isAllowed = true;
+        isSystem = true;
+      };
+      services.geoclue2.appConfig."org.gnome.Shell" = {
+        isAllowed = true;
+        isSystem = true;
+      };
+
+      fonts.packages = with pkgs; [
+        cantarell-fonts
+        dejavu_fonts
+        source-code-pro # Default monospace font in 3.32
+        source-sans
+      ];
+
+      # Adapt from https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/gnome-3-38/elements/core/meta-gnome-core-shell.bst
+      environment.systemPackages =
+        let
+          mandatoryPackages = with pkgs.gnome; [
+            gnome-shell
+          ];
+          optionalPackages = with pkgs.gnome; [
+            adwaita-icon-theme
+            nixos-background-info
+            gnome-backgrounds
+            gnome-bluetooth
+            gnome-color-manager
+            gnome-control-center
+            gnome-shell-extensions
+            gnome-themes-extra
+            pkgs.gnome-tour # GNOME Shell detects the .desktop file on first log-in.
+            pkgs.gnome-user-docs
+            pkgs.orca
+            pkgs.glib # for gsettings program
+            pkgs.gnome-menus
+            pkgs.gtk3.out # for gtk-launch program
+            pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+          ];
+        in
+        mandatoryPackages
+        ++ utils.removePackagesByName optionalPackages config.environment.gnome.excludePackages;
+    })
+
+    # Adapt from https://gitlab.gnome.org/GNOME/gnome-build-meta/blob/gnome-3-38/elements/core/meta-gnome-core-utilities.bst
+    (mkIf serviceCfg.core-utilities.enable {
+      environment.systemPackages =
+        with pkgs.gnome;
+        utils.removePackagesByName
+          ([
+            baobab
+            cheese
+            eog
+            epiphany
+            pkgs.gnome-text-editor
+            gnome-calculator
+            gnome-calendar
+            gnome-characters
+            gnome-clocks
+            pkgs.gnome-console
+            gnome-contacts
+            gnome-font-viewer
+            gnome-logs
+            gnome-maps
+            gnome-music
+            pkgs.gnome-photos
+            gnome-system-monitor
+            gnome-weather
+            nautilus
+            pkgs.gnome-connections
+            simple-scan
+            totem
+            yelp
+          ] ++ lib.optionals config.services.flatpak.enable [
+            # Since PackageKit Nix support is not there yet,
+            # only install gnome-software if flatpak is enabled.
+            gnome-software
+          ])
+          config.environment.gnome.excludePackages;
+
+      # Enable default program modules
+      # Since some of these have a corresponding package, we only
+      # enable that program module if the package hasn't been excluded
+      # through `environment.gnome.excludePackages`
+      programs.evince.enable = notExcluded pkgs.gnome.evince;
+      programs.file-roller.enable = notExcluded pkgs.gnome.file-roller;
+      programs.geary.enable = notExcluded pkgs.gnome.geary;
+      programs.gnome-disks.enable = notExcluded pkgs.gnome.gnome-disk-utility;
+      programs.seahorse.enable = notExcluded pkgs.gnome.seahorse;
+      services.gnome.sushi.enable = notExcluded pkgs.gnome.sushi;
+
+      # VTE shell integration for gnome-console
+      programs.bash.vteIntegration = mkDefault true;
+      programs.zsh.vteIntegration = mkDefault true;
+
+      # Let nautilus find extensions
+      # TODO: Create nautilus-with-extensions package
+      environment.sessionVariables.NAUTILUS_4_EXTENSION_DIR = "${config.system.path}/lib/nautilus/extensions-4";
+
+      # Override default mimeapps for nautilus
+      environment.sessionVariables.XDG_DATA_DIRS = [ "${mimeAppsList}/share" ];
+
+      environment.pathsToLink = [
+        "/share/nautilus-python/extensions"
+      ];
+    })
+
+    (mkIf serviceCfg.games.enable {
+      environment.systemPackages = with pkgs.gnome; utils.removePackagesByName [
+        aisleriot
+        atomix
+        five-or-more
+        four-in-a-row
+        pkgs.gnome-2048
+        gnome-chess
+        gnome-klotski
+        gnome-mahjongg
+        gnome-mines
+        gnome-nibbles
+        gnome-robots
+        gnome-sudoku
+        gnome-taquin
+        gnome-tetravex
+        hitori
+        iagno
+        lightsoff
+        quadrapassel
+        swell-foop
+        tali
+      ] config.environment.gnome.excludePackages;
+    })
+
+    # Adapt from https://gitlab.gnome.org/GNOME/gnome-build-meta/-/blob/3.38.0/elements/core/meta-gnome-core-developer-tools.bst
+    (mkIf serviceCfg.core-developer-tools.enable {
+      environment.systemPackages = with pkgs.gnome; utils.removePackagesByName [
+        dconf-editor
+        devhelp
+        pkgs.gnome-builder
+        # boxes would make sense in this option, however
+        # it doesn't function well enough to be included
+        # in default configurations.
+        # https://github.com/NixOS/nixpkgs/issues/60908
+        /* gnome-boxes */
+      ] config.environment.gnome.excludePackages;
+
+      services.sysprof.enable = notExcluded pkgs.sysprof;
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix
new file mode 100644
index 000000000000..43904cd00e84
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/kodi.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.desktopManager.kodi;
+in
+
+{
+  options = {
+    services.xserver.desktopManager.kodi = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the kodi multimedia center.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.kodi;
+        defaultText = literalExpression "pkgs.kodi";
+        example = literalExpression "pkgs.kodi.withPackages (p: with p; [ jellyfin pvr-iptvsimple vfs-sftp ])";
+        description = lib.mdDoc ''
+          Package that should be used for Kodi.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.desktopManager.session = [{
+      name = "kodi";
+      start = ''
+        LIRC_SOCKET_PATH=/run/lirc/lircd ${cfg.package}/bin/kodi --standalone &
+        waitPID=$!
+      '';
+    }];
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/lumina.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/lumina.nix
new file mode 100644
index 000000000000..7b694106bf7e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/lumina.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.lumina;
+
+in
+
+{
+  meta = {
+    maintainers = teams.lumina.members;
+  };
+
+  options = {
+
+    services.xserver.desktopManager.lumina.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable the Lumina desktop manager";
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    services.xserver.displayManager.sessionPackages = [
+      pkgs.lumina.lumina
+    ];
+
+    environment.systemPackages =
+      pkgs.lumina.preRequisitePackages ++
+      pkgs.lumina.corePackages;
+
+    # Link some extra directories in /run/current-system/software/share
+    environment.pathsToLink = [
+      "/share/lumina"
+      # FIXME: modules should link subdirs of `/share` rather than relying on this
+      "/share"
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/lxqt.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/lxqt.nix
new file mode 100644
index 000000000000..b69da41c9fc9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/lxqt.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.lxqt;
+
+in
+
+{
+  meta = {
+    maintainers = teams.lxqt.members;
+  };
+
+  options = {
+
+    services.xserver.desktopManager.lxqt.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable the LXQt desktop manager";
+    };
+
+    environment.lxqt.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.lxqt.qterminal ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which LXQt packages to exclude from the default environment";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.xserver.desktopManager.session = singleton {
+      name = "lxqt";
+      bgSupport = true;
+      start = ''
+        # Upstream installs default configuration files in
+        # $prefix/share/lxqt instead of $prefix/etc/xdg, (arguably)
+        # giving distributors freedom to ship custom default
+        # configuration files more easily. In order to let the session
+        # manager find them the share subdirectory is added to the
+        # XDG_CONFIG_DIRS environment variable.
+        #
+        # For an explanation see
+        # https://github.com/lxqt/lxqt/issues/1521#issuecomment-405097453
+        #
+        export XDG_CONFIG_DIRS=$XDG_CONFIG_DIRS''${XDG_CONFIG_DIRS:+:}${config.system.path}/share
+
+        exec ${pkgs.lxqt.lxqt-session}/bin/startlxqt
+      '';
+    };
+
+    environment.systemPackages =
+      pkgs.lxqt.preRequisitePackages ++
+      pkgs.lxqt.corePackages ++
+      (utils.removePackagesByName
+        pkgs.lxqt.optionalPackages
+        config.environment.lxqt.excludePackages);
+
+    # Link some extra directories in /run/current-system/software/share
+    environment.pathsToLink = [ "/share" ];
+
+    # virtual file systems support for PCManFM-QT
+    services.gvfs.enable = true;
+
+    services.upower.enable = config.powerManagement.enable;
+
+    services.xserver.libinput.enable = mkDefault true;
+
+    xdg.portal.lxqt.enable = true;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/mate.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/mate.nix
new file mode 100644
index 000000000000..c93f120bed7f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/mate.nix
@@ -0,0 +1,83 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.mate;
+
+in
+
+{
+  options = {
+
+    services.xserver.desktopManager.mate = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the MATE desktop environment";
+      };
+
+      debug = mkEnableOption (lib.mdDoc "mate-session debug messages");
+    };
+
+    environment.mate.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.mate.mate-terminal pkgs.mate.pluma ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which MATE packages to exclude from the default environment";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.xserver.displayManager.sessionPackages = [
+      pkgs.mate.mate-session-manager
+    ];
+
+    # Let caja find extensions
+    environment.sessionVariables.CAJA_EXTENSION_DIRS = [ "${config.system.path}/lib/caja/extensions-2.0" ];
+
+    # Let mate-panel find applets
+    environment.sessionVariables."MATE_PANEL_APPLETS_DIR" = "${config.system.path}/share/mate-panel/applets";
+    environment.sessionVariables."MATE_PANEL_EXTRA_MODULES" = "${config.system.path}/lib/mate-panel/applets";
+
+    # Debugging
+    environment.sessionVariables.MATE_SESSION_DEBUG = mkIf cfg.debug "1";
+
+    environment.systemPackages = utils.removePackagesByName
+      (pkgs.mate.basePackages ++
+      pkgs.mate.extraPackages ++
+      [
+        pkgs.desktop-file-utils
+        pkgs.glib
+        pkgs.gtk3.out
+        pkgs.shared-mime-info
+        pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+        pkgs.yelp # for 'Contents' in 'Help' menus
+      ])
+      config.environment.mate.excludePackages;
+
+    programs.dconf.enable = true;
+    # Shell integration for VTE terminals
+    programs.bash.vteIntegration = mkDefault true;
+    programs.zsh.vteIntegration = mkDefault true;
+
+    # Mate uses this for printing
+    programs.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+
+    services.gnome.at-spi2-core.enable = true;
+    services.gnome.gnome-keyring.enable = true;
+    services.udev.packages = [ pkgs.mate.mate-settings-daemon ];
+    services.gvfs.enable = true;
+    services.upower.enable = config.powerManagement.enable;
+    services.xserver.libinput.enable = mkDefault true;
+
+    security.pam.services.mate-screensaver.unixAuth = true;
+
+    environment.pathsToLink = [ "/share" ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/none.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/none.nix
new file mode 100644
index 000000000000..074b729cc3f3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/none.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  runXdgAutostart = config.services.xserver.desktopManager.runXdgAutostartIfNone;
+in
+{
+  options = {
+    services.xserver.desktopManager.runXdgAutostartIfNone = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to run XDG autostart files for sessions without a desktop manager
+        (with only a window manager), these sessions usually don't handle XDG
+        autostart files by default.
+
+        Some services like {option}`i18n.inputMethod` and
+        {option}`service.earlyoom` use XDG autostart files to start.
+        If this option is not set to `true` and you are using
+        a window manager without a desktop manager, you need to manually start
+        them or running `dex` somewhere.
+      '';
+    };
+  };
+
+  config = mkMerge [
+    {
+      services.xserver.desktopManager.session = [
+        {
+          name = "none";
+          start = optionalString runXdgAutostart ''
+            /run/current-system/systemd/bin/systemctl --user start xdg-autostart-if-no-desktop-manager.target
+          '';
+        }
+      ];
+    }
+    (mkIf runXdgAutostart {
+      systemd.user.targets.xdg-autostart-if-no-desktop-manager = {
+        description = "Run XDG autostart files";
+        # From `plasma-workspace`, `share/systemd/user/plasma-workspace@.target`.
+        requires = [ "xdg-desktop-autostart.target" "graphical-session.target" ];
+        before = [ "xdg-desktop-autostart.target" "graphical-session.target" ];
+        bindsTo = [ "graphical-session.target" ];
+      };
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
new file mode 100644
index 000000000000..1c14ede84749
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.md
@@ -0,0 +1,74 @@
+# Pantheon Desktop {#chap-pantheon}
+
+Pantheon is the desktop environment created for the elementary OS distribution. It is written from scratch in Vala, utilizing GNOME technologies with GTK and Granite.
+
+## Enabling Pantheon {#sec-pantheon-enable}
+
+All of Pantheon is working in NixOS and the applications should be available, aside from a few [exceptions](https://github.com/NixOS/nixpkgs/issues/58161). To enable Pantheon, set
+```
+services.xserver.desktopManager.pantheon.enable = true;
+```
+This automatically enables LightDM and Pantheon's LightDM greeter. If you'd like to disable this, set
+```
+services.xserver.displayManager.lightdm.greeters.pantheon.enable = false;
+services.xserver.displayManager.lightdm.enable = false;
+```
+but please be aware using Pantheon without LightDM as a display manager will break screenlocking from the UI. The NixOS module for Pantheon installs all of Pantheon's default applications. If you'd like to not install Pantheon's apps, set
+```
+services.pantheon.apps.enable = false;
+```
+You can also use [](#opt-environment.pantheon.excludePackages) to remove any other app (like `elementary-mail`).
+
+## Wingpanel and Switchboard plugins {#sec-pantheon-wingpanel-switchboard}
+
+Wingpanel and Switchboard work differently than they do in other distributions, as far as using plugins. You cannot install a plugin globally (like with {option}`environment.systemPackages`) to start using it. You should instead be using the following options:
+
+  - [](#opt-services.xserver.desktopManager.pantheon.extraWingpanelIndicators)
+  - [](#opt-services.xserver.desktopManager.pantheon.extraSwitchboardPlugs)
+
+to configure the programs with plugs or indicators.
+
+The difference in NixOS is both these programs are patched to load plugins from a directory that is the value of an environment variable. All of which is controlled in Nix. If you need to configure the particular packages manually you can override the packages like:
+```
+wingpanel-with-indicators.override {
+  indicators = [
+    pkgs.some-special-indicator
+  ];
+};
+
+switchboard-with-plugs.override {
+  plugs = [
+    pkgs.some-special-plug
+  ];
+};
+```
+please note that, like how the NixOS options describe these as extra plugins, this would only add to the default plugins included with the programs. If for some reason you'd like to configure which plugins to use exactly, both packages have an argument for this:
+```
+wingpanel-with-indicators.override {
+  useDefaultIndicators = false;
+  indicators = specialListOfIndicators;
+};
+
+switchboard-with-plugs.override {
+  useDefaultPlugs = false;
+  plugs = specialListOfPlugs;
+};
+```
+this could be most useful for testing a particular plug-in in isolation.
+
+## FAQ {#sec-pantheon-faq}
+
+[I have switched from a different desktop and Pantheon’s theming looks messed up.]{#sec-pantheon-faq-messed-up-theme}
+  : Open Switchboard and go to: Administration → About → Restore Default Settings → Restore Settings. This will reset any dconf settings to their Pantheon defaults. Note this could reset certain GNOME specific preferences if that desktop was used prior.
+
+[I cannot enable both GNOME and Pantheon.]{#sec-pantheon-faq-gnome-and-pantheon}
+  : This is a known [issue](https://github.com/NixOS/nixpkgs/issues/64611) and there is no known workaround.
+
+[Does AppCenter work, or is it available?]{#sec-pantheon-faq-appcenter}
+  : AppCenter has been available since 20.03. Starting from 21.11, the Flatpak backend should work so you can install some Flatpak applications using it. However, due to missing appstream metadata, the Packagekit backend does not function currently. See this [issue](https://github.com/NixOS/nixpkgs/issues/15932).
+
+    If you are using Pantheon, AppCenter should be installed by default if you have [Flatpak support](#module-services-flatpak) enabled. If you also wish to add the `appcenter` Flatpak remote:
+
+    ```ShellSession
+    $ flatpak remote-add --if-not-exists appcenter https://flatpak.elementary.io/repo.flatpakrepo
+    ```
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix
new file mode 100644
index 000000000000..d82d19b26cda
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/pantheon.nix
@@ -0,0 +1,326 @@
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.desktopManager.pantheon;
+  serviceCfg = config.services.pantheon;
+
+  nixos-gsettings-desktop-schemas = pkgs.pantheon.elementary-gsettings-schemas.override {
+    extraGSettingsOverridePackages = cfg.extraGSettingsOverridePackages;
+    extraGSettingsOverrides = cfg.extraGSettingsOverrides;
+  };
+
+in
+
+{
+
+  meta = {
+    doc = ./pantheon.md;
+    maintainers = teams.pantheon.members;
+  };
+
+  options = {
+
+    services.pantheon = {
+
+      contractor = {
+         enable = mkEnableOption (lib.mdDoc "contractor, a desktop-wide extension service used by Pantheon");
+      };
+
+      apps.enable = mkEnableOption (lib.mdDoc "Pantheon default applications");
+
+    };
+
+    services.xserver.desktopManager.pantheon = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the pantheon desktop manager";
+      };
+
+      sessionPath = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        example = literalExpression "[ pkgs.gnome.gpaste ]";
+        description = lib.mdDoc ''
+          Additional list of packages to be added to the session search path.
+          Useful for GSettings-conditional autostart.
+
+          Note that this should be a last resort; patching the package is preferred (see GPaste).
+        '';
+      };
+
+      extraWingpanelIndicators = mkOption {
+        default = null;
+        type = with types; nullOr (listOf package);
+        description = lib.mdDoc "Indicators to add to Wingpanel.";
+      };
+
+      extraSwitchboardPlugs = mkOption {
+        default = null;
+        type = with types; nullOr (listOf package);
+        description = lib.mdDoc "Plugs to add to Switchboard.";
+      };
+
+      extraGSettingsOverrides = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc "Additional gsettings overrides.";
+      };
+
+      extraGSettingsOverridePackages = mkOption {
+        default = [];
+        type = types.listOf types.path;
+        description = lib.mdDoc "List of packages for which gsettings are overridden.";
+      };
+
+      debug = mkEnableOption (lib.mdDoc "gnome-session debug messages");
+
+    };
+
+    environment.pantheon.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.pantheon.elementary-camera ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which packages pantheon should exclude from the default environment";
+    };
+
+  };
+
+
+  config = mkMerge [
+    (mkIf cfg.enable {
+      services.xserver.desktopManager.pantheon.sessionPath = utils.removePackagesByName [
+        pkgs.pantheon.pantheon-agent-geoclue2
+      ] config.environment.pantheon.excludePackages;
+
+      services.xserver.displayManager.sessionPackages = [ pkgs.pantheon.elementary-session-settings ];
+
+      # Ensure lightdm is used when Pantheon is enabled
+      # Without it screen locking will be nonfunctional because of the use of lightlocker
+      warnings = optional (config.services.xserver.displayManager.lightdm.enable != true)
+        ''
+          Using Pantheon without LightDM as a displayManager will break screenlocking from the UI.
+        '';
+
+      services.xserver.displayManager.lightdm.greeters.pantheon.enable = mkDefault true;
+
+      # Without this, elementary LightDM greeter will pre-select non-existent `default` session
+      # https://github.com/elementary/greeter/issues/368
+      services.xserver.displayManager.defaultSession = mkDefault "pantheon";
+
+      services.xserver.displayManager.sessionCommands = ''
+        if test "$XDG_CURRENT_DESKTOP" = "Pantheon"; then
+            true
+            ${concatMapStrings (p: ''
+              if [ -d "${p}/share/gsettings-schemas/${p.name}" ]; then
+                export XDG_DATA_DIRS=$XDG_DATA_DIRS''${XDG_DATA_DIRS:+:}${p}/share/gsettings-schemas/${p.name}
+              fi
+
+              if [ -d "${p}/lib/girepository-1.0" ]; then
+                export GI_TYPELIB_PATH=$GI_TYPELIB_PATH''${GI_TYPELIB_PATH:+:}${p}/lib/girepository-1.0
+                export LD_LIBRARY_PATH=$LD_LIBRARY_PATH''${LD_LIBRARY_PATH:+:}${p}/lib
+              fi
+            '') cfg.sessionPath}
+        fi
+      '';
+
+      # Default services
+      hardware.bluetooth.enable = mkDefault true;
+      hardware.pulseaudio.enable = mkDefault true;
+      security.polkit.enable = true;
+      services.accounts-daemon.enable = true;
+      services.bamf.enable = true;
+      services.colord.enable = mkDefault true;
+      services.fwupd.enable = mkDefault true;
+      # TODO: Enable once #177946 is resolved
+      # services.packagekit.enable = mkDefault true;
+      services.power-profiles-daemon.enable = mkDefault true;
+      services.touchegg.enable = mkDefault true;
+      services.touchegg.package = pkgs.pantheon.touchegg;
+      services.tumbler.enable = mkDefault true;
+      services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+      services.dbus.packages = with pkgs.pantheon; [
+        switchboard-plug-power
+        elementary-default-settings # accountsservice extensions
+      ];
+      services.pantheon.apps.enable = mkDefault true;
+      services.pantheon.contractor.enable = mkDefault true;
+      services.gnome.at-spi2-core.enable = true;
+      services.gnome.evolution-data-server.enable = true;
+      services.gnome.glib-networking.enable = true;
+      services.gnome.gnome-keyring.enable = true;
+      services.gvfs.enable = true;
+      services.gnome.rygel.enable = mkDefault true;
+      services.gsignond.enable = mkDefault true;
+      services.gsignond.plugins = with pkgs.gsignondPlugins; [ lastfm mail oauth ];
+      services.udisks2.enable = true;
+      services.upower.enable = config.powerManagement.enable;
+      services.xserver.libinput.enable = mkDefault true;
+      services.xserver.updateDbusEnvironment = true;
+      services.zeitgeist.enable = mkDefault true;
+      services.geoclue2.enable = mkDefault true;
+      # pantheon has pantheon-agent-geoclue2
+      services.geoclue2.enableDemoAgent = false;
+      services.geoclue2.appConfig."io.elementary.desktop.agent-geoclue2" = {
+        isAllowed = true;
+        isSystem = true;
+      };
+      services.udev.packages = [
+        pkgs.pantheon.gnome-settings-daemon
+        # Force enable KMS modifiers for devices that require them.
+        # https://gitlab.gnome.org/GNOME/mutter/-/merge_requests/1443
+        pkgs.pantheon.mutter
+      ];
+      systemd.packages = [
+        pkgs.pantheon.gnome-settings-daemon
+      ];
+      programs.dconf.enable = true;
+      networking.networkmanager.enable = mkDefault true;
+
+      # Global environment
+      environment.systemPackages = (with pkgs.pantheon; [
+        elementary-session-settings
+        elementary-settings-daemon
+        gala
+        gnome-settings-daemon
+        (switchboard-with-plugs.override {
+          plugs = cfg.extraSwitchboardPlugs;
+        })
+        (wingpanel-with-indicators.override {
+          indicators = cfg.extraWingpanelIndicators;
+        })
+      ]) ++ utils.removePackagesByName ((with pkgs; [
+        desktop-file-utils
+        glib # for gsettings program
+        gnome-menus
+        gnome.adwaita-icon-theme
+        gtk3.out # for gtk-launch program
+        onboard
+        orca # elementary/greeter#668
+        sound-theme-freedesktop
+        xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+      ]) ++ (with pkgs.pantheon; [
+        # Artwork
+        elementary-gtk-theme
+        elementary-icon-theme
+        elementary-sound-theme
+        elementary-wallpapers
+
+        # Desktop
+        elementary-default-settings
+        elementary-dock
+        elementary-shortcut-overlay
+
+        # Services
+        elementary-capnet-assist
+        elementary-notifications
+        pantheon-agent-geoclue2
+        pantheon-agent-polkit
+      ])) config.environment.pantheon.excludePackages;
+
+      # Settings from elementary-default-settings
+      environment.etc."gtk-3.0/settings.ini".source = "${pkgs.pantheon.elementary-default-settings}/etc/gtk-3.0/settings.ini";
+
+      xdg.mime.enable = true;
+      xdg.icons.enable = true;
+
+      xdg.portal.enable = true;
+      xdg.portal.extraPortals = [
+        # Some Pantheon apps enforce portal usage, we need this for e.g. notifications.
+        # Currently we have buildPortalsInGnome enabled, if you run into issues related
+        # to https://github.com/flatpak/xdg-desktop-portal/issues/656 please report to us.
+        pkgs.xdg-desktop-portal-gtk
+      ] ++ (with pkgs.pantheon; [
+        elementary-files
+        elementary-settings-daemon
+        xdg-desktop-portal-pantheon
+      ]);
+
+      # Override GSettings schemas
+      environment.sessionVariables.NIX_GSETTINGS_OVERRIDES_DIR = "${nixos-gsettings-desktop-schemas}/share/gsettings-schemas/nixos-gsettings-overrides/glib-2.0/schemas";
+
+      environment.sessionVariables.GNOME_SESSION_DEBUG = mkIf cfg.debug "1";
+
+      environment.pathsToLink = [
+        # FIXME: modules should link subdirs of `/share` rather than relying on this
+        "/share"
+      ];
+
+      # Otherwise you can't store NetworkManager Secrets with
+      # "Store the password only for this user"
+      programs.nm-applet.enable = true;
+      # Pantheon has its own network indicator
+      programs.nm-applet.indicator = false;
+
+      # Shell integration for VTE terminals
+      programs.bash.vteIntegration = mkDefault true;
+      programs.zsh.vteIntegration = mkDefault true;
+
+      # Use native GTK file chooser on Qt apps. This is because Qt does not know Pantheon.
+      # https://invent.kde.org/qt/qt/qtbase/-/blob/6.6/src/gui/platform/unix/qgenericunixthemes.cpp#L1312
+      # https://github.com/elementary/default-settings/blob/7.0.2/profile.d/qt-qpa-platformtheme.sh
+      environment.variables.QT_QPA_PLATFORMTHEME = mkDefault "gtk3";
+
+      # Default Fonts
+      fonts.packages = with pkgs; [
+        inter
+        open-dyslexic
+        open-sans
+        roboto-mono
+      ];
+
+      fonts.fontconfig.defaultFonts = {
+        monospace = [ "Roboto Mono" ];
+        sansSerif = [ "Inter" ];
+      };
+    })
+
+    (mkIf serviceCfg.apps.enable {
+      programs.evince.enable = mkDefault true;
+      programs.file-roller.enable = mkDefault true;
+
+      environment.systemPackages = utils.removePackagesByName ([
+        pkgs.gnome.gnome-font-viewer
+      ] ++ (with pkgs.pantheon; [
+        elementary-calculator
+        elementary-calendar
+        elementary-camera
+        elementary-code
+        elementary-files
+        elementary-mail
+        elementary-music
+        elementary-photos
+        elementary-screenshot
+        elementary-tasks
+        elementary-terminal
+        elementary-videos
+        epiphany
+      ] ++ lib.optionals config.services.flatpak.enable [
+        # Only install appcenter if flatpak is enabled before
+        # https://github.com/NixOS/nixpkgs/issues/15932 is resolved.
+        appcenter
+        sideload
+      ])) config.environment.pantheon.excludePackages;
+
+      # needed by screenshot
+      fonts.packages = [
+        pkgs.pantheon.elementary-redacted-script
+      ];
+    })
+
+    (mkIf serviceCfg.contractor.enable {
+      environment.systemPackages = with pkgs.pantheon; [
+        contractor
+        file-roller-contract
+      ];
+
+      environment.pathsToLink = [
+        "/share/contractor"
+      ];
+    })
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/phosh.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/phosh.nix
new file mode 100644
index 000000000000..e4cd9fd99e40
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/phosh.nix
@@ -0,0 +1,223 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.desktopManager.phosh;
+
+  # Based on https://source.puri.sm/Librem5/librem5-base/-/blob/4596c1056dd75ac7f043aede07887990fd46f572/default/sm.puri.OSK0.desktop
+  oskItem = pkgs.makeDesktopItem {
+    name = "sm.puri.OSK0";
+    desktopName = "On-screen keyboard";
+    exec = "${pkgs.squeekboard}/bin/squeekboard";
+    categories = [ "GNOME" "Core" ];
+    onlyShowIn = [ "GNOME" ];
+    noDisplay = true;
+    extraConfig = {
+      X-GNOME-Autostart-Phase = "Panel";
+      X-GNOME-Provides = "inputmethod";
+      X-GNOME-Autostart-Notify = "true";
+      X-GNOME-AutoRestart = "true";
+    };
+  };
+
+  phocConfigType = types.submodule {
+    options = {
+      xwayland = mkOption {
+        description = lib.mdDoc ''
+          Whether to enable XWayland support.
+
+          To start XWayland immediately, use `immediate`.
+        '';
+        type = types.enum [ "true" "false" "immediate" ];
+        default = "false";
+      };
+      cursorTheme = mkOption {
+        description = lib.mdDoc ''
+          Cursor theme to use in Phosh.
+        '';
+        type = types.str;
+        default = "default";
+      };
+      outputs = mkOption {
+        description = lib.mdDoc ''
+          Output configurations.
+        '';
+        type = types.attrsOf phocOutputType;
+        default = {
+          DSI-1 = {
+            scale = 2;
+          };
+        };
+      };
+    };
+  };
+
+  phocOutputType = types.submodule {
+    options = {
+      modeline = mkOption {
+        description = lib.mdDoc ''
+          One or more modelines.
+        '';
+        type = types.either types.str (types.listOf types.str);
+        default = [];
+        example = [
+          "87.25 720 776 848  976 1440 1443 1453 1493 -hsync +vsync"
+          "65.13 768 816 896 1024 1024 1025 1028 1060 -HSync +VSync"
+        ];
+      };
+      mode = mkOption {
+        description = lib.mdDoc ''
+          Default video mode.
+        '';
+        type = types.nullOr types.str;
+        default = null;
+        example = "768x1024";
+      };
+      scale = mkOption {
+        description = lib.mdDoc ''
+          Display scaling factor.
+        '';
+        type = types.nullOr (
+          types.addCheck
+          (types.either types.int types.float)
+          (x : x > 0)
+        ) // {
+          description = "null or positive integer or float";
+        };
+        default = null;
+        example = 2;
+      };
+      rotate = mkOption {
+        description = lib.mdDoc ''
+          Screen transformation.
+        '';
+        type = types.enum [
+          "90" "180" "270" "flipped" "flipped-90" "flipped-180" "flipped-270" null
+        ];
+        default = null;
+      };
+    };
+  };
+
+  optionalKV = k: v: optionalString (v != null) "${k} = ${builtins.toString v}";
+
+  renderPhocOutput = name: output: let
+    modelines = if builtins.isList output.modeline
+      then output.modeline
+      else [ output.modeline ];
+    renderModeline = l: "modeline = ${l}";
+  in ''
+    [output:${name}]
+    ${concatStringsSep "\n" (map renderModeline modelines)}
+    ${optionalKV "mode" output.mode}
+    ${optionalKV "scale" output.scale}
+    ${optionalKV "rotate" output.rotate}
+  '';
+
+  renderPhocConfig = phoc: let
+    outputs = mapAttrsToList renderPhocOutput phoc.outputs;
+  in ''
+    [core]
+    xwayland = ${phoc.xwayland}
+    ${concatStringsSep "\n" outputs}
+    [cursor]
+    theme = ${phoc.cursorTheme}
+  '';
+in
+
+{
+  options = {
+    services.xserver.desktopManager.phosh = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the Phone Shell.";
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.phosh;
+        defaultText = literalExpression "pkgs.phosh";
+        example = literalExpression "pkgs.phosh";
+        description = lib.mdDoc ''
+          Package that should be used for Phosh.
+        '';
+      };
+
+      user = mkOption {
+        description = lib.mdDoc "The user to run the Phosh service.";
+        type = types.str;
+        example = "alice";
+      };
+
+      group = mkOption {
+        description = lib.mdDoc "The group to run the Phosh service.";
+        type = types.str;
+        example = "users";
+      };
+
+      phocConfig = mkOption {
+        description = lib.mdDoc ''
+          Configurations for the Phoc compositor.
+        '';
+        type = types.oneOf [ types.lines types.path phocConfigType ];
+        default = {};
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.defaultUnit = "graphical.target";
+    # Inspired by https://gitlab.gnome.org/World/Phosh/phosh/-/blob/main/data/phosh.service
+    systemd.services.phosh = {
+      wantedBy = [ "graphical.target" ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/phosh-session";
+        User = cfg.user;
+        Group = cfg.group;
+        PAMName = "login";
+        WorkingDirectory = "~";
+        Restart = "always";
+
+        TTYPath = "/dev/tty7";
+        TTYReset = "yes";
+        TTYVHangup = "yes";
+        TTYVTDisallocate = "yes";
+
+        # Fail to start if not controlling the tty.
+        StandardInput = "tty-fail";
+        StandardOutput = "journal";
+        StandardError = "journal";
+
+        # Log this user with utmp, letting it show up with commands 'w' and 'who'.
+        UtmpIdentifier = "tty7";
+        UtmpMode = "user";
+      };
+    };
+
+    environment.systemPackages = [
+      pkgs.phoc
+      cfg.package
+      pkgs.squeekboard
+      oskItem
+    ];
+
+    systemd.packages = [ cfg.package ];
+
+    programs.feedbackd.enable = true;
+
+    security.pam.services.phosh = {};
+
+    hardware.opengl.enable = mkDefault true;
+
+    services.gnome.core-shell.enable = true;
+    services.gnome.core-os-services.enable = true;
+    services.xserver.displayManager.sessionPackages = [ cfg.package ];
+
+    environment.etc."phosh/phoc.ini".source =
+      if builtins.isPath cfg.phocConfig then cfg.phocConfig
+      else if builtins.isString cfg.phocConfig then pkgs.writeText "phoc.ini" cfg.phocConfig
+      else pkgs.writeText "phoc.ini" (renderPhocConfig cfg.phocConfig);
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/plasma5.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/plasma5.nix
new file mode 100644
index 000000000000..361dbe879a18
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/plasma5.nix
@@ -0,0 +1,564 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  xcfg = config.services.xserver;
+  cfg = xcfg.desktopManager.plasma5;
+
+  # Use only for **internal** options.
+  # This is not exactly user-friendly.
+  kdeConfigurationType = with types;
+    let
+      valueTypes = (oneOf [
+        bool
+        float
+        int
+        str
+      ]) // {
+        description = "KDE Configuration value";
+        emptyValue.value = "";
+      };
+      set = (nullOr (lazyAttrsOf valueTypes)) // {
+        description = "KDE Configuration set";
+        emptyValue.value = {};
+      };
+    in (lazyAttrsOf set) // {
+        description = "KDE Configuration file";
+        emptyValue.value = {};
+      };
+
+  libsForQt5 = pkgs.plasma5Packages;
+  inherit (libsForQt5) kdeGear kdeFrameworks plasma5;
+  inherit (lib)
+    getBin optionalAttrs optionalString literalExpression
+    mkRemovedOptionModule mkRenamedOptionModule
+    mkDefault mkIf mkMerge mkOption mkPackageOptionMD types;
+
+  activationScript = ''
+    ${set_XDG_CONFIG_HOME}
+
+    # The KDE icon cache is supposed to update itself automatically, but it uses
+    # the timestamp on the icon theme directory as a trigger. This doesn't work
+    # on NixOS because the timestamp never changes. As a workaround, delete the
+    # icon cache at login and session activation.
+    # See also: http://lists-archives.org/kde-devel/26175-what-when-will-icon-cache-refresh.html
+    rm -fv $HOME/.cache/icon-cache.kcache
+
+    # xdg-desktop-settings generates this empty file but
+    # it makes kbuildsyscoca5 fail silently. To fix this
+    # remove that menu if it exists.
+    rm -fv ''${XDG_CONFIG_HOME}/menus/applications-merged/xdg-desktop-menu-dummy.menu
+
+    # Qt writes a weird ‘libraryPath’ line to
+    # ~/.config/Trolltech.conf that causes the KDE plugin
+    # paths of previous KDE invocations to be searched.
+    # Obviously using mismatching KDE libraries is potentially
+    # disastrous, so here we nuke references to the Nix store
+    # in Trolltech.conf.  A better solution would be to stop
+    # Qt from doing this wackiness in the first place.
+    trolltech_conf="''${XDG_CONFIG_HOME}/Trolltech.conf"
+    if [ -e "$trolltech_conf" ]; then
+      ${getBin pkgs.gnused}/bin/sed -i "$trolltech_conf" -e '/nix\\store\|nix\/store/ d'
+    fi
+
+    # Remove the kbuildsyscoca5 cache. It will be regenerated
+    # immediately after. This is necessary for kbuildsyscoca5 to
+    # recognize that software that has been removed.
+    rm -fv $HOME/.cache/ksycoca*
+
+    ${libsForQt5.kservice}/bin/kbuildsycoca5
+  '';
+
+  set_XDG_CONFIG_HOME = ''
+    # Set the default XDG_CONFIG_HOME if it is unset.
+    # Per the XDG Base Directory Specification:
+    # https://specifications.freedesktop.org/basedir-spec/latest
+    # 1. Never export this variable! If it is unset, then child processes are
+    # expected to set the default themselves.
+    # 2. Contaminate / if $HOME is unset; do not check if $HOME is set.
+    XDG_CONFIG_HOME=''${XDG_CONFIG_HOME:-$HOME/.config}
+  '';
+
+in
+
+{
+  options = {
+    services.xserver.desktopManager.plasma5 = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the Plasma 5 (KDE 5) desktop environment.";
+      };
+
+      phononBackend = mkOption {
+        type = types.enum [ "gstreamer" "vlc" ];
+        default = "vlc";
+        example = "gstreamer";
+        description = lib.mdDoc "Phonon audio backend to install.";
+      };
+
+      useQtScaling = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable HiDPI scaling in Qt.";
+      };
+
+      runUsingSystemd = mkOption {
+        description = lib.mdDoc "Use systemd to manage the Plasma session";
+        type = types.bool;
+        default = true;
+      };
+
+      notoPackage = mkPackageOptionMD pkgs "Noto fonts" {
+        default = [ "noto-fonts" ];
+        example = "noto-fonts-lgc-plus";
+      };
+
+      # Internally allows configuring kdeglobals globally
+      kdeglobals = mkOption {
+        internal = true;
+        default = {};
+        type = kdeConfigurationType;
+      };
+
+      # Internally allows configuring kwin globally
+      kwinrc = mkOption {
+        internal = true;
+        default = {};
+        type = kdeConfigurationType;
+      };
+
+      mobile.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable support for running the Plasma Mobile shell.
+        '';
+      };
+
+      mobile.installRecommendedSoftware = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Installs software recommended for use with Plasma Mobile, but which
+          is not strictly required for Plasma Mobile to run.
+        '';
+      };
+
+      bigscreen.enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable support for running the Plasma Bigscreen session.
+        '';
+      };
+    };
+    environment.plasma5.excludePackages = mkOption {
+        description = lib.mdDoc "List of default packages to exclude from the configuration";
+        type = types.listOf types.package;
+        default = [];
+        example = literalExpression "[ pkgs.plasma5Packages.oxygen ]";
+      };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "enableQt4Support" ] "Phonon no longer supports Qt 4.")
+    (mkRemovedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "supportDDC" ] "DDC/CI is no longer supported upstream.")
+    (mkRenamedOptionModule [ "services" "xserver" "desktopManager" "kde5" ] [ "services" "xserver" "desktopManager" "plasma5" ])
+    (mkRenamedOptionModule [ "services" "xserver" "desktopManager" "plasma5" "excludePackages" ] [ "environment" "plasma5" "excludePackages" ])
+  ];
+
+  config = mkMerge [
+    # Common Plasma dependencies
+    (mkIf (cfg.enable || cfg.mobile.enable || cfg.bigscreen.enable) {
+
+      security.wrappers = {
+        kwin_wayland = {
+          owner = "root";
+          group = "root";
+          capabilities = "cap_sys_nice+ep";
+          source = "${getBin plasma5.kwin}/bin/kwin_wayland";
+        };
+      } // optionalAttrs (!cfg.runUsingSystemd) {
+        start_kdeinit = {
+          setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${getBin libsForQt5.kinit}/libexec/kf5/start_kdeinit";
+        };
+      };
+
+      environment.systemPackages =
+        with libsForQt5;
+        with plasma5; with kdeGear; with kdeFrameworks;
+        let
+          requiredPackages = [
+            frameworkintegration
+            kactivities
+            kauth
+            kcmutils
+            kconfig
+            kconfigwidgets
+            kcoreaddons
+            kdoctools
+            kdbusaddons
+            kdeclarative
+            kded
+            kdesu
+            kdnssd
+            kemoticons
+            kfilemetadata
+            kglobalaccel
+            kguiaddons
+            kiconthemes
+            kidletime
+            kimageformats
+            kinit
+            kirigami2 # In system profile for SDDM theme. TODO: wrapper.
+            kio
+            kjobwidgets
+            knewstuff
+            knotifications
+            knotifyconfig
+            kpackage
+            kparts
+            kpeople
+            krunner
+            kservice
+            ktextwidgets
+            kwallet
+            kwallet-pam
+            kwalletmanager
+            kwayland
+            kwayland-integration
+            kwidgetsaddons
+            kxmlgui
+            kxmlrpcclient
+            plasma-framework
+            solid
+            sonnet
+            threadweaver
+
+            breeze-qt5
+            kactivitymanagerd
+            kde-cli-tools
+            kdecoration
+            kdeplasma-addons
+            kgamma5
+            khotkeys
+            kscreen
+            kscreenlocker
+            kwayland
+            kwin
+            kwrited
+            libkscreen
+            libksysguard
+            milou
+            plasma-integration
+            polkit-kde-agent
+
+            plasma-desktop
+            plasma-workspace
+            plasma-workspace-wallpapers
+
+            oxygen-sounds
+
+            breeze-icons
+            pkgs.hicolor-icon-theme
+
+            kde-gtk-config
+            breeze-gtk
+
+            qtvirtualkeyboard
+
+            pkgs.xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+          ];
+          optionalPackages = [
+            pkgs.aha # needed by kinfocenter for fwupd support
+            plasma-browser-integration
+            konsole
+            oxygen
+            (lib.getBin qttools) # Expose qdbus in PATH
+          ];
+        in
+        requiredPackages
+        ++ utils.removePackagesByName optionalPackages config.environment.plasma5.excludePackages
+
+        # Phonon audio backend
+        ++ lib.optional (cfg.phononBackend == "gstreamer") libsForQt5.phonon-backend-gstreamer
+        ++ lib.optional (cfg.phononBackend == "vlc") libsForQt5.phonon-backend-vlc
+
+        # Optional hardware support features
+        ++ lib.optionals config.hardware.bluetooth.enable [ bluedevil bluez-qt pkgs.openobex pkgs.obexftp ]
+        ++ lib.optional config.networking.networkmanager.enable plasma-nm
+        ++ lib.optional config.hardware.pulseaudio.enable plasma-pa
+        ++ lib.optional config.services.pipewire.pulse.enable plasma-pa
+        ++ lib.optional config.powerManagement.enable powerdevil
+        ++ lib.optional config.services.colord.enable pkgs.colord-kde
+        ++ lib.optional config.services.hardware.bolt.enable pkgs.plasma5Packages.plasma-thunderbolt
+        ++ lib.optionals config.services.samba.enable [ kdenetwork-filesharing pkgs.samba ]
+        ++ lib.optional config.services.xserver.wacom.enable pkgs.wacomtablet
+        ++ lib.optional config.services.flatpak.enable flatpak-kcm;
+
+      # Extra services for D-Bus activation
+      services.dbus.packages = [
+        plasma5.kactivitymanagerd
+      ];
+
+      environment.pathsToLink = [
+        # FIXME: modules should link subdirs of `/share` rather than relying on this
+        "/share"
+      ];
+
+      environment.etc."X11/xkb".source = xcfg.xkb.dir;
+
+      environment.sessionVariables = {
+        PLASMA_USE_QT_SCALING = mkIf cfg.useQtScaling "1";
+
+        # Needed for things that depend on other store.kde.org packages to install correctly,
+        # notably Plasma look-and-feel packages (a.k.a. Global Themes)
+        #
+        # FIXME: this is annoyingly impure and should really be fixed at source level somehow,
+        # but kpackage is a library so we can't just wrap the one thing invoking it and be done.
+        # This also means things won't work for people not on Plasma, but at least this way it
+        # works for SOME people.
+        KPACKAGE_DEP_RESOLVERS_PATH = "${pkgs.plasma5Packages.frameworkintegration.out}/libexec/kf5/kpackagehandlers";
+      };
+
+      # Enable GTK applications to load SVG icons
+      services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
+
+      fonts.packages = with pkgs; [ cfg.notoPackage hack-font ];
+      fonts.fontconfig.defaultFonts = {
+        monospace = [ "Hack" "Noto Sans Mono" ];
+        sansSerif = [ "Noto Sans" ];
+        serif = [ "Noto Serif" ];
+      };
+
+      programs.ssh.askPassword = mkDefault "${plasma5.ksshaskpass.out}/bin/ksshaskpass";
+
+      # Enable helpful DBus services.
+      services.accounts-daemon.enable = true;
+      # when changing an account picture the accounts-daemon reads a temporary file containing the image which systemsettings5 may place under /tmp
+      systemd.services.accounts-daemon.serviceConfig.PrivateTmp = false;
+      services.power-profiles-daemon.enable = mkDefault true;
+      services.system-config-printer.enable = mkIf config.services.printing.enable (mkDefault true);
+      services.udisks2.enable = true;
+      services.upower.enable = config.powerManagement.enable;
+      services.xserver.libinput.enable = mkDefault true;
+
+      # Extra UDEV rules used by Solid
+      services.udev.packages = [
+        # libmtp has "bin", "dev", "out" outputs. UDEV rules file is in "out".
+        pkgs.libmtp.out
+        pkgs.media-player-info
+      ];
+
+      services.xserver.displayManager.sddm = {
+        theme = mkDefault "breeze";
+      };
+
+      security.pam.services.kde = { allowNullPassword = true; };
+
+      security.pam.services.login.enableKwallet = true;
+
+      systemd.user.services = {
+        plasma-early-setup = mkIf cfg.runUsingSystemd {
+          description = "Early Plasma setup";
+          wantedBy = [ "graphical-session-pre.target" ];
+          serviceConfig.Type = "oneshot";
+          script = activationScript;
+        };
+      };
+
+      xdg.portal.enable = true;
+      xdg.portal.extraPortals = [ plasma5.xdg-desktop-portal-kde ];
+      # xdg-desktop-portal-kde expects PipeWire to be running.
+      # This does not, by default, replace PulseAudio.
+      services.pipewire.enable = mkDefault true;
+
+      # Update the start menu for each user that is currently logged in
+      system.userActivationScripts.plasmaSetup = activationScript;
+
+      programs.firefox.nativeMessagingHosts.packages = [ pkgs.plasma5Packages.plasma-browser-integration ];
+    })
+
+    (mkIf (cfg.kwinrc != {}) {
+      environment.etc."xdg/kwinrc".text = lib.generators.toINI {} cfg.kwinrc;
+    })
+
+    (mkIf (cfg.kdeglobals != {}) {
+      environment.etc."xdg/kdeglobals".text = lib.generators.toINI {} cfg.kdeglobals;
+    })
+
+    # Plasma Desktop
+    (mkIf cfg.enable {
+
+      # Seed our configuration into nixos-generate-config
+      system.nixos-generate-config.desktopConfiguration = [
+        ''
+          # Enable the Plasma 5 Desktop Environment.
+          services.xserver.displayManager.sddm.enable = true;
+          services.xserver.desktopManager.plasma5.enable = true;
+        ''
+      ];
+
+      services.xserver.displayManager.sessionPackages = [ pkgs.libsForQt5.plasma5.plasma-workspace ];
+      # Default to be `plasma` (X11) instead of `plasmawayland`, since plasma wayland currently has
+      # many tiny bugs.
+      # See: https://github.com/NixOS/nixpkgs/issues/143272
+      services.xserver.displayManager.defaultSession = mkDefault "plasma";
+
+      environment.systemPackages =
+        with libsForQt5;
+        with plasma5; with kdeGear; with kdeFrameworks;
+        let
+          requiredPackages = [
+            ksystemstats
+            kinfocenter
+            kmenuedit
+            plasma-systemmonitor
+            spectacle
+            systemsettings
+
+            dolphin
+            dolphin-plugins
+            ffmpegthumbs
+            kdegraphics-thumbnailers
+            kde-inotify-survey
+            kio-admin
+            kio-extras
+          ];
+          optionalPackages = [
+            ark
+            elisa
+            gwenview
+            okular
+            khelpcenter
+            print-manager
+          ];
+      in requiredPackages ++ utils.removePackagesByName optionalPackages config.environment.plasma5.excludePackages;
+
+      systemd.user.services = {
+        plasma-run-with-systemd = {
+          description = "Run KDE Plasma via systemd";
+          wantedBy = [ "basic.target" ];
+          serviceConfig.Type = "oneshot";
+          script = ''
+            ${set_XDG_CONFIG_HOME}
+
+            ${kdeFrameworks.kconfig}/bin/kwriteconfig5 \
+              --file startkderc --group General --key systemdBoot ${lib.boolToString cfg.runUsingSystemd}
+          '';
+        };
+      };
+    })
+
+    # Plasma Mobile
+    (mkIf cfg.mobile.enable {
+      assertions = [
+        {
+          # The user interface breaks without NetworkManager
+          assertion = config.networking.networkmanager.enable;
+          message = "Plasma Mobile requires NetworkManager.";
+        }
+        {
+          # The user interface breaks without bluetooth
+          assertion = config.hardware.bluetooth.enable;
+          message = "Plasma Mobile requires Bluetooth.";
+        }
+        {
+          # The user interface breaks without pulse
+          assertion = config.hardware.pulseaudio.enable || (config.services.pipewire.enable && config.services.pipewire.pulse.enable);
+          message = "Plasma Mobile requires pulseaudio.";
+        }
+      ];
+
+      environment.systemPackages =
+        with libsForQt5;
+        with plasma5; with kdeApplications; with kdeFrameworks;
+        [
+          # Basic packages without which Plasma Mobile fails to work properly.
+          plasma-mobile
+          plasma-nano
+          pkgs.maliit-framework
+          pkgs.maliit-keyboard
+        ]
+        ++ lib.optionals (cfg.mobile.installRecommendedSoftware) (with libsForQt5.plasmaMobileGear;[
+          # Additional software made for Plasma Mobile.
+          alligator
+          angelfish
+          audiotube
+          calindori
+          kalk
+          kasts
+          kclock
+          keysmith
+          koko
+          krecorder
+          ktrip
+          kweather
+          plasma-dialer
+          plasma-phonebook
+          plasma-settings
+          spacebar
+        ])
+      ;
+
+      # The following services are needed or the UI is broken.
+      hardware.bluetooth.enable = true;
+      hardware.pulseaudio.enable = true;
+      networking.networkmanager.enable = true;
+      # Required for autorotate
+      hardware.sensor.iio.enable = lib.mkDefault true;
+
+      # Recommendations can be found here:
+      #  - https://invent.kde.org/plasma-mobile/plasma-phone-settings/-/tree/master/etc/xdg
+      # This configuration is the minimum required for Plasma Mobile to *work*.
+      services.xserver.desktopManager.plasma5 = {
+        kdeglobals = {
+          KDE = {
+            # This forces a numeric PIN for the lockscreen, which is the
+            # recommendation from upstream.
+            LookAndFeelPackage = lib.mkDefault "org.kde.plasma.phone";
+          };
+        };
+        kwinrc = {
+          "Wayland" = {
+            "InputMethod[$e]" = "/run/current-system/sw/share/applications/com.github.maliit.keyboard.desktop";
+            "VirtualKeyboardEnabled" = "true";
+          };
+          "org.kde.kdecoration2" = {
+            # No decorations (title bar)
+            NoPlugin = lib.mkDefault "true";
+          };
+        };
+      };
+
+      services.xserver.displayManager.sessionPackages = [ pkgs.libsForQt5.plasma5.plasma-mobile ];
+    })
+
+    # Plasma Bigscreen
+    (mkIf cfg.bigscreen.enable {
+      environment.systemPackages =
+        with pkgs.plasma5Packages;
+        [
+          plasma-nano
+          plasma-settings
+          plasma-bigscreen
+          plasma-remotecontrollers
+
+          aura-browser
+          plank-player
+
+          plasma-pa
+          plasma-nm
+          kdeconnect-kde
+        ];
+
+      services.xserver.displayManager.sessionPackages = [ pkgs.plasma5Packages.plasma-bigscreen ];
+
+      # required for plasma-remotecontrollers to work correctly
+      hardware.uinput.enable = true;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/retroarch.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/retroarch.nix
new file mode 100644
index 000000000000..5552f37612a2
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/retroarch.nix
@@ -0,0 +1,40 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xserver.desktopManager.retroarch;
+
+in {
+  options.services.xserver.desktopManager.retroarch = {
+    enable = mkEnableOption (lib.mdDoc "RetroArch");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.retroarch;
+      defaultText = literalExpression "pkgs.retroarch";
+      example = literalExpression "pkgs.retroarch-full";
+      description = lib.mdDoc "RetroArch package to use.";
+    };
+
+    extraArgs = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" "--host" ];
+      description = lib.mdDoc "Extra arguments to pass to RetroArch.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.desktopManager.session = [{
+      name = "RetroArch";
+      start = ''
+        ${cfg.package}/bin/retroarch -f ${escapeShellArgs cfg.extraArgs} &
+        waitPID=$!
+      '';
+    }];
+
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with maintainers; [ j0hax ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/surf-display.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/surf-display.nix
new file mode 100644
index 000000000000..38ebb9d02b4a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/surf-display.nix
@@ -0,0 +1,128 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.desktopManager.surf-display;
+
+  surfDisplayConf = ''
+    # Surf Kiosk Display: Wrap around surf browser and turn your
+    # system into a browser screen in KIOSK-mode.
+
+    # default download URI for all display screens if not configured individually
+    DEFAULT_WWW_URI="${cfg.defaultWwwUri}"
+
+    # Enforce fixed resolution for all displays (default: not set):
+    #DEFAULT_RESOLUTION="1920x1080"
+
+    # HTTP proxy URL, if needed (default: not set).
+    #HTTP_PROXY_URL="http://webcache:3128"
+
+    # Setting for internal inactivity timer to restart surf-display
+    # if the user goes inactive/idle.
+    INACTIVITY_INTERVAL="${builtins.toString cfg.inactivityInterval}"
+
+    # log to syslog instead of .xsession-errors
+    LOG_TO_SYSLOG="yes"
+
+    # Launch pulseaudio daemon if not already running.
+    WITH_PULSEAUDIO="yes"
+
+    # screensaver settings, see "man 1 xset" for possible options
+    SCREENSAVER_SETTINGS="${cfg.screensaverSettings}"
+
+    # disable right and middle pointer device click in browser sessions while keeping
+    # scrolling wheels' functionality intact... (consider "pointer" subcommand on
+    # xmodmap man page for details).
+    POINTER_BUTTON_MAP="${cfg.pointerButtonMap}"
+
+    # Hide idle mouse pointer.
+    HIDE_IDLE_POINTER="${cfg.hideIdlePointer}"
+
+    ${cfg.extraConfig}
+  '';
+
+in {
+  options = {
+    services.xserver.desktopManager.surf-display = {
+      enable = mkEnableOption (lib.mdDoc "surf-display as a kiosk browser session");
+
+      defaultWwwUri = mkOption {
+        type = types.str;
+        default = "${pkgs.surf-display}/share/surf-display/empty-page.html";
+        defaultText = literalExpression ''"''${pkgs.surf-display}/share/surf-display/empty-page.html"'';
+        example = "https://www.example.com/";
+        description = lib.mdDoc "Default URI to display.";
+      };
+
+      inactivityInterval = mkOption {
+        type = types.int;
+        default = 300;
+        example = 0;
+        description = lib.mdDoc ''
+          Setting for internal inactivity timer to restart surf-display if the
+          user goes inactive/idle to get a fresh session for the next user of
+          the kiosk.
+
+          If this value is set to zero, the whole feature of restarting due to
+          inactivity is disabled.
+        '';
+      };
+
+      screensaverSettings = mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description = lib.mdDoc ''
+          Screensaver settings, see `man 1 xset` for possible options.
+        '';
+      };
+
+      pointerButtonMap = mkOption {
+        type = types.str;
+        default = "1 0 0 4 5 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0";
+        description = lib.mdDoc ''
+          Disable right and middle pointer device click in browser sessions
+          while keeping scrolling wheels' functionality intact. See pointer
+          subcommand on `man xmodmap` for details.
+        '';
+      };
+
+      hideIdlePointer = mkOption {
+        type = types.str;
+        default = "yes";
+        example = "no";
+        description = lib.mdDoc "Hide idle mouse pointer.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          # Enforce fixed resolution for all displays (default: not set):
+          DEFAULT_RESOLUTION="1920x1080"
+
+          # HTTP proxy URL, if needed (default: not set).
+          HTTP_PROXY_URL="http://webcache:3128"
+
+          # Configure individual display screens with host specific parameters:
+          DISPLAYS['display-host-0']="www_uri=https://www.displayserver.comany.net/display-1/index.html"
+          DISPLAYS['display-host-1']="www_uri=https://www.displayserver.comany.net/display-2/index.html"
+          DISPLAYS['display-host-2']="www_uri=https://www.displayserver.comany.net/display-3/index.html|res=1920x1280"
+          DISPLAYS['display-host-3']="www_uri=https://www.displayserver.comany.net/display-4/index.html"|res=1280x1024"
+          DISPLAYS['display-host-local-file']="www_uri=file:///usr/share/doc/surf-display/empty-page.html"
+        '';
+        description = lib.mdDoc ''
+          Extra configuration options to append to `/etc/default/surf-display`.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.displayManager.sessionPackages = [
+      pkgs.surf-display
+    ];
+
+    environment.etc."default/surf-display".text = surfDisplayConf;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix
new file mode 100644
index 000000000000..191b3690c02f
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/xfce.nix
@@ -0,0 +1,182 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.desktopManager.xfce;
+  excludePackages = config.environment.xfce.excludePackages;
+
+in
+{
+  meta = {
+    maintainers = teams.xfce.members;
+  };
+
+  imports = [
+    # added 2019-08-18
+    # needed to preserve some semblance of UI familarity
+    # with original XFCE module
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce4-14" "extraSessionCommands" ]
+      [ "services" "xserver" "displayManager" "sessionCommands" ])
+
+    # added 2019-11-04
+    # xfce4-14 module removed and promoted to xfce.
+    # Needed for configs that used xfce4-14 module to migrate to this one.
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce4-14" "enable" ]
+      [ "services" "xserver" "desktopManager" "xfce" "enable" ])
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce4-14" "noDesktop" ]
+      [ "services" "xserver" "desktopManager" "xfce" "noDesktop" ])
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce4-14" "enableXfwm" ]
+      [ "services" "xserver" "desktopManager" "xfce" "enableXfwm" ])
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce" "extraSessionCommands" ]
+      [ "services" "xserver" "displayManager" "sessionCommands" ])
+    (mkRemovedOptionModule [ "services" "xserver" "desktopManager" "xfce" "screenLock" ] "")
+
+    # added 2022-06-26
+    # thunar has its own module
+    (mkRenamedOptionModule
+      [ "services" "xserver" "desktopManager" "xfce" "thunarPlugins" ]
+      [ "programs" "thunar" "plugins" ])
+  ];
+
+  options = {
+    services.xserver.desktopManager.xfce = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable the Xfce desktop environment.";
+      };
+
+      noDesktop = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Don't install XFCE desktop components (xfdesktop and panel).";
+      };
+
+      enableXfwm = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable the XFWM (default) window manager.";
+      };
+
+      enableScreensaver = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Enable the XFCE screensaver.";
+      };
+    };
+
+    environment.xfce.excludePackages = mkOption {
+      default = [];
+      example = literalExpression "[ pkgs.xfce.xfce4-volumed-pulse ]";
+      type = types.listOf types.package;
+      description = lib.mdDoc "Which packages XFCE should exclude from the default environment";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = utils.removePackagesByName (with pkgs.xfce // pkgs; [
+      glib # for gsettings
+      gtk3.out # gtk-update-icon-cache
+
+      gnome.gnome-themes-extra
+      gnome.adwaita-icon-theme
+      hicolor-icon-theme
+      tango-icon-theme
+      xfce4-icon-theme
+
+      desktop-file-utils
+      shared-mime-info # for update-mime-database
+
+      # For a polkit authentication agent
+      polkit_gnome
+
+      # Needed by Xfce's xinitrc script
+      xdg-user-dirs # Update user dirs as described in https://freedesktop.org/wiki/Software/xdg-user-dirs/
+
+      exo
+      garcon
+      libxfce4ui
+
+      mousepad
+      parole
+      ristretto
+      xfce4-appfinder
+      xfce4-notifyd
+      xfce4-screenshooter
+      xfce4-session
+      xfce4-settings
+      xfce4-taskmanager
+      xfce4-terminal
+    ] # TODO: NetworkManager doesn't belong here
+      ++ optional config.networking.networkmanager.enable networkmanagerapplet
+      ++ optional config.powerManagement.enable xfce4-power-manager
+      ++ optionals config.hardware.pulseaudio.enable [
+        pavucontrol
+        # volume up/down keys support:
+        # xfce4-pulseaudio-plugin includes all the functionalities of xfce4-volumed-pulse
+        # but can only be used with xfce4-panel, so for no-desktop usage we still include
+        # xfce4-volumed-pulse
+        (if cfg.noDesktop then xfce4-volumed-pulse else xfce4-pulseaudio-plugin)
+      ] ++ optionals cfg.enableXfwm [
+        xfwm4
+        xfwm4-themes
+      ] ++ optionals (!cfg.noDesktop) [
+        xfce4-panel
+        xfdesktop
+      ] ++ optional cfg.enableScreensaver xfce4-screensaver) excludePackages;
+
+    programs.xfconf.enable = true;
+    programs.thunar.enable = true;
+
+    environment.pathsToLink = [
+      "/share/xfce4"
+      "/lib/xfce4"
+      "/share/gtksourceview-3.0"
+      "/share/gtksourceview-4.0"
+    ];
+
+    services.xserver.desktopManager.session = [{
+      name = "xfce";
+      desktopNames = [ "XFCE" ];
+      bgSupport = true;
+      start = ''
+        ${pkgs.runtimeShell} ${pkgs.xfce.xfce4-session.xinitrc} &
+        waitPID=$!
+      '';
+    }];
+
+    services.xserver.updateDbusEnvironment = true;
+    services.xserver.gdk-pixbuf.modulePackages = [ pkgs.librsvg ];
+
+    # Enable helpful DBus services.
+    services.udisks2.enable = true;
+    security.polkit.enable = true;
+    services.accounts-daemon.enable = true;
+    services.upower.enable = config.powerManagement.enable;
+    services.gnome.glib-networking.enable = true;
+    services.gvfs.enable = true;
+    services.tumbler.enable = true;
+    services.system-config-printer.enable = (mkIf config.services.printing.enable (mkDefault true));
+    services.xserver.libinput.enable = mkDefault true; # used in xfce4-settings-manager
+
+    # Enable default programs
+    programs.dconf.enable = true;
+
+    # Shell integration for VTE terminals
+    programs.bash.vteIntegration = mkDefault true;
+    programs.zsh.vteIntegration = mkDefault true;
+
+    # Systemd services
+    systemd.packages = utils.removePackagesByName (with pkgs.xfce; [
+      xfce4-notifyd
+    ]) excludePackages;
+
+    security.pam.services.xfce4-screensaver.unixAuth = cfg.enableScreensaver;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/desktop-managers/xterm.nix b/nixpkgs/nixos/modules/services/x11/desktop-managers/xterm.nix
new file mode 100644
index 000000000000..2b439effabe5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/desktop-managers/xterm.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.desktopManager.xterm;
+  xSessionEnabled = config.services.xserver.enable;
+
+in
+
+{
+  options = {
+
+    services.xserver.desktopManager.xterm.enable = mkOption {
+      type = types.bool;
+      default = versionOlder config.system.stateVersion "19.09" && xSessionEnabled;
+      defaultText = literalExpression ''versionOlder config.system.stateVersion "19.09" && config.services.xserver.enable;'';
+      description = lib.mdDoc "Enable a xterm terminal as a desktop manager.";
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    services.xserver.desktopManager.session = singleton
+      { name = "xterm";
+        start = ''
+          ${pkgs.xterm}/bin/xterm -ls &
+          waitPID=$!
+        '';
+      };
+
+    environment.systemPackages = [ pkgs.xterm ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/account-service-util.nix b/nixpkgs/nixos/modules/services/x11/display-managers/account-service-util.nix
new file mode 100644
index 000000000000..00ffd91cb2f6
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/account-service-util.nix
@@ -0,0 +1,44 @@
+{ accountsservice
+, glib
+, gobject-introspection
+, python3
+, wrapGAppsNoGuiHook
+, lib
+}:
+
+python3.pkgs.buildPythonApplication {
+  name = "set-session";
+
+  format = "other";
+
+  src = ./set-session.py;
+
+  dontUnpack = true;
+
+  strictDeps = false;
+
+  nativeBuildInputs = [
+    wrapGAppsNoGuiHook
+    gobject-introspection
+  ];
+
+  buildInputs = [
+    accountsservice
+    glib
+  ];
+
+  propagatedBuildInputs = with python3.pkgs; [
+    pygobject3
+    ordered-set
+  ];
+
+  installPhase = ''
+    mkdir -p $out/bin
+    cp $src $out/bin/set-session
+    chmod +x $out/bin/set-session
+  '';
+
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/default.nix b/nixpkgs/nixos/modules/services/x11/display-managers/default.nix
new file mode 100644
index 000000000000..16a7ff1a4bd5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/default.nix
@@ -0,0 +1,530 @@
+# This module declares the options to define a *display manager*, the
+# program responsible for handling X logins (such as LightDM, GDM, or SDDM).
+# The display manager allows the user to select a *session
+# type*. When the user logs in, the display manager starts the
+# *session script* ("xsession" below) to launch the selected session
+# type. The session type defines two things: the *desktop manager*
+# (e.g., KDE, Gnome or a plain xterm), and optionally the *window
+# manager* (e.g. kwin or twm).
+
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver;
+  opt = options.services.xserver;
+  xorg = pkgs.xorg;
+
+  fontconfig = config.fonts.fontconfig;
+  xresourcesXft = pkgs.writeText "Xresources-Xft" ''
+    Xft.antialias: ${if fontconfig.antialias then "1" else "0"}
+    Xft.rgba: ${fontconfig.subpixel.rgba}
+    Xft.lcdfilter: lcd${fontconfig.subpixel.lcdfilter}
+    Xft.hinting: ${if fontconfig.hinting.enable then "1" else "0"}
+    Xft.autohint: ${if fontconfig.hinting.autohint then "1" else "0"}
+    Xft.hintstyle: ${fontconfig.hinting.style}
+  '';
+
+  # FIXME: this is an ugly hack.
+  # Some sessions (read: most WMs) don't activate systemd's `graphical-session.target`.
+  # Other sessions (read: most non-WMs) expect `graphical-session.target` to be reached
+  # when the entire session is actually ready. We used to just unconditionally force
+  # `graphical-session.target` to be activated in the session wrapper so things like
+  # xdg-autostart-generator work on sessions that are wrong, but this broke sessions
+  # that do things right. So, preserve this behavior (with some extra steps) by matching
+  # on XDG_CURRENT_DESKTOP and deliberately ignoring sessions we know can do the right thing.
+  fakeSession = action: ''
+      session_is_systemd_aware=$(
+        IFS=:
+        for i in $XDG_CURRENT_DESKTOP; do
+          case $i in
+            KDE|GNOME|X-NIXOS-SYSTEMD-AWARE) echo "1"; exit; ;;
+            *) ;;
+          esac
+        done
+      )
+
+      if [ -z "$session_is_systemd_aware" ]; then
+        /run/current-system/systemd/bin/systemctl --user ${action} nixos-fake-graphical-session.target
+      fi
+  '';
+
+  # file provided by services.xserver.displayManager.sessionData.wrapper
+  xsessionWrapper = pkgs.writeScript "xsession-wrapper"
+    ''
+      #! ${pkgs.bash}/bin/bash
+
+      # Shared environment setup for graphical sessions.
+
+      . /etc/profile
+      if test -f ~/.profile; then
+          source ~/.profile
+      fi
+
+      cd "$HOME"
+
+      # Allow the user to execute commands at the beginning of the X session.
+      if test -f ~/.xprofile; then
+          source ~/.xprofile
+      fi
+
+      ${optionalString cfg.displayManager.job.logToJournal ''
+        if [ -z "$_DID_SYSTEMD_CAT" ]; then
+          export _DID_SYSTEMD_CAT=1
+          exec ${config.systemd.package}/bin/systemd-cat -t xsession "$0" "$@"
+        fi
+      ''}
+
+      ${optionalString cfg.displayManager.job.logToFile ''
+        exec &> >(tee ~/.xsession-errors)
+      ''}
+
+      # Load X defaults. This should probably be safe on wayland too.
+      ${xorg.xrdb}/bin/xrdb -merge ${xresourcesXft}
+      if test -e ~/.Xresources; then
+          ${xorg.xrdb}/bin/xrdb -merge ~/.Xresources
+      elif test -e ~/.Xdefaults; then
+          ${xorg.xrdb}/bin/xrdb -merge ~/.Xdefaults
+      fi
+
+      # Import environment variables into the systemd user environment.
+      ${optionalString (cfg.displayManager.importedVariables != []) (
+        "/run/current-system/systemd/bin/systemctl --user import-environment "
+          + toString (unique cfg.displayManager.importedVariables)
+      )}
+
+      # Speed up application start by 50-150ms according to
+      # https://kdemonkey.blogspot.com/2008/04/magic-trick.html
+      compose_cache="''${XCOMPOSECACHE:-$HOME/.compose-cache}"
+      mkdir -p "$compose_cache"
+      # To avoid accidentally deleting a wrongly set up XCOMPOSECACHE directory,
+      # defensively try to delete cache *files* only, following the file format specified in
+      # https://gitlab.freedesktop.org/xorg/lib/libx11/-/blob/master/modules/im/ximcp/imLcIm.c#L353-358
+      # sprintf (*res, "%s/%c%d_%03x_%08x_%08x", dir, _XimGetMyEndian(), XIM_CACHE_VERSION, (unsigned int)sizeof (DefTree), hash, hash2);
+      ${pkgs.findutils}/bin/find "$compose_cache" -maxdepth 1 -regextype posix-extended -regex '.*/[Bl][0-9]+_[0-9a-f]{3}_[0-9a-f]{8}_[0-9a-f]{8}' -delete
+      unset compose_cache
+
+      # Work around KDE errors when a user first logs in and
+      # .local/share doesn't exist yet.
+      mkdir -p "''${XDG_DATA_HOME:-$HOME/.local/share}"
+
+      unset _DID_SYSTEMD_CAT
+
+      ${cfg.displayManager.sessionCommands}
+
+      ${fakeSession "start"}
+
+      # Allow the user to setup a custom session type.
+      if test -x ~/.xsession; then
+          eval exec ~/.xsession "$@"
+      fi
+
+      if test "$1"; then
+          # Run the supplied session command. Remove any double quotes with eval.
+          eval exec "$@"
+      else
+          # TODO: Do we need this? Should not the session always exist?
+          echo "error: unknown session $1" 1>&2
+          exit 1
+      fi
+    '';
+
+  installedSessions = pkgs.runCommand "desktops"
+    { # trivial derivation
+      preferLocalBuild = true;
+      allowSubstitutes = false;
+    }
+    ''
+      mkdir -p "$out/share/"{xsessions,wayland-sessions}
+
+      ${concatMapStrings (pkg: ''
+        for n in ${concatStringsSep " " pkg.providedSessions}; do
+          if ! test -f ${pkg}/share/wayland-sessions/$n.desktop -o \
+                    -f ${pkg}/share/xsessions/$n.desktop; then
+            echo "Couldn't find provided session name, $n.desktop, in session package ${pkg.name}:"
+            echo "  ${pkg}"
+            return 1
+          fi
+        done
+
+        if test -d ${pkg}/share/xsessions; then
+          ${pkgs.buildPackages.xorg.lndir}/bin/lndir ${pkg}/share/xsessions $out/share/xsessions
+        fi
+        if test -d ${pkg}/share/wayland-sessions; then
+          ${pkgs.buildPackages.xorg.lndir}/bin/lndir ${pkg}/share/wayland-sessions $out/share/wayland-sessions
+        fi
+      '') cfg.displayManager.sessionPackages}
+    '';
+
+  dmDefault = cfg.desktopManager.default;
+  # fallback default for cases when only default wm is set
+  dmFallbackDefault = if dmDefault != null then dmDefault else "none";
+  wmDefault = cfg.windowManager.default;
+
+  defaultSessionFromLegacyOptions = dmFallbackDefault + optionalString (wmDefault != null && wmDefault != "none") "+${wmDefault}";
+
+in
+
+{
+  options = {
+
+    services.xserver.displayManager = {
+
+      xauthBin = mkOption {
+        internal = true;
+        default = "${xorg.xauth}/bin/xauth";
+        defaultText = literalExpression ''"''${pkgs.xorg.xauth}/bin/xauth"'';
+        description = lib.mdDoc "Path to the {command}`xauth` program used by display managers.";
+      };
+
+      xserverBin = mkOption {
+        type = types.path;
+        description = lib.mdDoc "Path to the X server used by display managers.";
+      };
+
+      xserverArgs = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "-ac" "-logverbose" "-verbose" "-nolisten tcp" ];
+        description = lib.mdDoc "List of arguments for the X server.";
+      };
+
+      setupCommands = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Shell commands executed just after the X server has started.
+
+          This option is only effective for display managers for which this feature
+          is supported; currently these are LightDM, GDM and SDDM.
+        '';
+      };
+
+      sessionCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            xmessage "Hello World!" &
+          '';
+        description = lib.mdDoc ''
+          Shell commands executed just before the window or desktop manager is
+          started. These commands are not currently sourced for Wayland sessions.
+        '';
+      };
+
+      hiddenUsers = mkOption {
+        type = types.listOf types.str;
+        default = [ "nobody" ];
+        description = lib.mdDoc ''
+          A list of users which will not be shown in the display manager.
+        '';
+      };
+
+      sessionPackages = mkOption {
+        type = with types; listOf (package // {
+          description = "package with provided sessions";
+          check = p: assertMsg
+            (package.check p && p ? providedSessions
+            && p.providedSessions != [] && all isString p.providedSessions)
+            ''
+              Package, '${p.name}', did not specify any session names, as strings, in
+              'passthru.providedSessions'. This is required when used as a session package.
+
+              The session names can be looked up in:
+                ${p}/share/xsessions
+                ${p}/share/wayland-sessions
+           '';
+        });
+        default = [];
+        description = lib.mdDoc ''
+          A list of packages containing x11 or wayland session files to be passed to the display manager.
+        '';
+      };
+
+      session = mkOption {
+        default = [];
+        type = types.listOf types.attrs;
+        example = literalExpression
+          ''
+            [ { manage = "desktop";
+                name = "xterm";
+                start = '''
+                  ''${pkgs.xterm}/bin/xterm -ls &
+                  waitPID=$!
+                ''';
+              }
+            ]
+          '';
+        description = lib.mdDoc ''
+          List of sessions supported with the command used to start each
+          session.  Each session script can set the
+          {var}`waitPID` shell variable to make this script
+          wait until the end of the user session.  Each script is used
+          to define either a window manager or a desktop manager.  These
+          can be differentiated by setting the attribute
+          {var}`manage` either to `"window"`
+          or `"desktop"`.
+
+          The list of desktop manager and window manager should appear
+          inside the display manager with the desktop manager name
+          followed by the window manager name.
+        '';
+      };
+
+      sessionData = mkOption {
+        description = lib.mdDoc "Data exported for display managers’ convenience";
+        internal = true;
+        default = {};
+        apply = val: {
+          wrapper = xsessionWrapper;
+          desktops = installedSessions;
+          sessionNames = concatMap (p: p.providedSessions) cfg.displayManager.sessionPackages;
+          # We do not want to force users to set defaultSession when they have only single DE.
+          autologinSession =
+            if cfg.displayManager.defaultSession != null then
+              cfg.displayManager.defaultSession
+            else if cfg.displayManager.sessionData.sessionNames != [] then
+              head cfg.displayManager.sessionData.sessionNames
+            else
+              null;
+        };
+      };
+
+      defaultSession = mkOption {
+        type = with types; nullOr str // {
+          description = "session name";
+          check = d:
+            assertMsg (d != null -> (str.check d && elem d cfg.displayManager.sessionData.sessionNames)) ''
+                Default graphical session, '${d}', not found.
+                Valid names for 'services.xserver.displayManager.defaultSession' are:
+                  ${concatStringsSep "\n  " cfg.displayManager.sessionData.sessionNames}
+              '';
+        };
+        default =
+          if dmDefault != null || wmDefault != null then
+            defaultSessionFromLegacyOptions
+          else
+            null;
+        defaultText = literalMD ''
+          Taken from display manager settings or window manager settings, if either is set.
+        '';
+        example = "gnome";
+        description = lib.mdDoc ''
+          Graphical session to pre-select in the session chooser (only effective for GDM, LightDM and SDDM).
+
+          On GDM, LightDM and SDDM, it will also be used as a session for auto-login.
+        '';
+      };
+
+      importedVariables = mkOption {
+        type = types.listOf (types.strMatching "[a-zA-Z_][a-zA-Z0-9_]*");
+        visible = false;
+        description = lib.mdDoc ''
+          Environment variables to import into the systemd user environment.
+        '';
+      };
+
+      job = {
+
+        preStart = mkOption {
+          type = types.lines;
+          default = "";
+          example = "rm -f /var/log/my-display-manager.log";
+          description = lib.mdDoc "Script executed before the display manager is started.";
+        };
+
+        execCmd = mkOption {
+          type = types.str;
+          example = literalExpression ''"''${pkgs.lightdm}/bin/lightdm"'';
+          description = lib.mdDoc "Command to start the display manager.";
+        };
+
+        environment = mkOption {
+          type = types.attrsOf types.unspecified;
+          default = {};
+          description = lib.mdDoc "Additional environment variables needed by the display manager.";
+        };
+
+        logToFile = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            Whether the display manager redirects the output of the
+            session script to {file}`~/.xsession-errors`.
+          '';
+        };
+
+        logToJournal = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Whether the display manager redirects the output of the
+            session script to the systemd journal.
+          '';
+        };
+
+      };
+
+      # Configuration for automatic login. Common for all DM.
+      autoLogin = mkOption {
+        type = types.submodule ({ config, options, ... }: {
+          options = {
+            enable = mkOption {
+              type = types.bool;
+              default = config.user != null;
+              defaultText = literalExpression "config.${options.user} != null";
+              description = lib.mdDoc ''
+                Automatically log in as {option}`autoLogin.user`.
+              '';
+            };
+
+            user = mkOption {
+              type = types.nullOr types.str;
+              default = null;
+              description = lib.mdDoc ''
+                User to be used for the automatic login.
+              '';
+            };
+          };
+        });
+
+        default = {};
+        description = lib.mdDoc ''
+          Auto login configuration attrset.
+        '';
+      };
+
+    };
+
+  };
+
+  config = {
+    assertions = [
+      { assertion = cfg.displayManager.autoLogin.enable -> cfg.displayManager.autoLogin.user != null;
+        message = ''
+          services.xserver.displayManager.autoLogin.enable requires services.xserver.displayManager.autoLogin.user to be set
+        '';
+      }
+      {
+        assertion = cfg.desktopManager.default != null || cfg.windowManager.default != null -> cfg.displayManager.defaultSession == defaultSessionFromLegacyOptions;
+        message = "You cannot use both services.xserver.displayManager.defaultSession option and legacy options (services.xserver.desktopManager.default and services.xserver.windowManager.default).";
+      }
+    ];
+
+    warnings =
+      mkIf (dmDefault != null || wmDefault != null) [
+        ''
+          The following options are deprecated:
+            ${concatStringsSep "\n  " (map ({c, t}: t) (filter ({c, t}: c != null) [
+            { c = dmDefault; t = "- services.xserver.desktopManager.default"; }
+            { c = wmDefault; t = "- services.xserver.windowManager.default"; }
+            ]))}
+          Please use
+            services.xserver.displayManager.defaultSession = "${defaultSessionFromLegacyOptions}";
+          instead.
+        ''
+      ];
+
+    services.xserver.displayManager.xserverBin = "${xorg.xorgserver.out}/bin/X";
+
+    services.xserver.displayManager.importedVariables = [
+      # This is required by user units using the session bus.
+      "DBUS_SESSION_BUS_ADDRESS"
+      # These are needed by the ssh-agent unit.
+      "DISPLAY"
+      "XAUTHORITY"
+      # This is required to specify session within user units (e.g. loginctl lock-session).
+      "XDG_SESSION_ID"
+    ];
+
+    systemd.user.targets.nixos-fake-graphical-session = {
+      unitConfig = {
+        Description = "Fake graphical-session target for non-systemd-aware sessions";
+        BindsTo = "graphical-session.target";
+      };
+    };
+
+    # Create desktop files and scripts for starting sessions for WMs/DMs
+    # that do not have upstream session files (those defined using services.{display,desktop,window}Manager.session options).
+    services.xserver.displayManager.sessionPackages =
+      let
+        dms = filter (s: s.manage == "desktop") cfg.displayManager.session;
+        wms = filter (s: s.manage == "window") cfg.displayManager.session;
+
+        # Script responsible for starting the window manager and the desktop manager.
+        xsession = dm: wm: pkgs.writeScript "xsession" ''
+          #! ${pkgs.bash}/bin/bash
+
+          # Legacy session script used to construct .desktop files from
+          # `services.xserver.displayManager.session` entries. Called from
+          # `sessionWrapper`.
+
+          # Start the window manager.
+          ${wm.start}
+
+          # Start the desktop manager.
+          ${dm.start}
+
+          ${optionalString cfg.updateDbusEnvironment ''
+            ${lib.getBin pkgs.dbus}/bin/dbus-update-activation-environment --systemd --all
+          ''}
+
+          test -n "$waitPID" && wait "$waitPID"
+
+          ${fakeSession "stop"}
+
+          exit 0
+        '';
+      in
+        # We will generate every possible pair of WM and DM.
+        concatLists (
+            builtins.map
+            ({dm, wm}: let
+              sessionName = "${dm.name}${optionalString (wm.name != "none") ("+" + wm.name)}";
+              script = xsession dm wm;
+              desktopNames = if dm ? desktopNames
+                             then concatStringsSep ";" dm.desktopNames
+                             else sessionName;
+            in
+              optional (dm.name != "none" || wm.name != "none")
+                (pkgs.writeTextFile {
+                  name = "${sessionName}-xsession";
+                  destination = "/share/xsessions/${sessionName}.desktop";
+                  # Desktop Entry Specification:
+                  # - https://standards.freedesktop.org/desktop-entry-spec/latest/
+                  # - https://standards.freedesktop.org/desktop-entry-spec/latest/ar01s06.html
+                  text = ''
+                    [Desktop Entry]
+                    Version=1.0
+                    Type=XSession
+                    TryExec=${script}
+                    Exec=${script}
+                    Name=${sessionName}
+                    DesktopNames=${desktopNames}
+                  '';
+                } // {
+                  providedSessions = [ sessionName ];
+                })
+            )
+            (cartesianProductOfSets { dm = dms; wm = wms; })
+          );
+
+    # Make xsessions and wayland sessions available in XDG_DATA_DIRS
+    # as some programs have behavior that depends on them being present
+    environment.sessionVariables.XDG_DATA_DIRS = [
+      "${cfg.displayManager.sessionData.desktops}/share"
+    ];
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "xserver" "displayManager" "desktopManagerHandlesLidAndPower" ]
+     "The option is no longer necessary because all display managers have already delegated lid management to systemd.")
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "job" "logsXsession" ] [ "services" "xserver" "displayManager" "job" "logToFile" ])
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "logToJournal" ] [ "services" "xserver" "displayManager" "job" "logToJournal" ])
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "extraSessionFilesPackages" ] [ "services" "xserver" "displayManager" "sessionPackages" ])
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
new file mode 100644
index 000000000000..400e5601dc59
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/gdm.nix
@@ -0,0 +1,330 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.displayManager;
+  gdm = pkgs.gnome.gdm;
+  settingsFormat = pkgs.formats.ini { };
+  configFile = settingsFormat.generate "custom.conf" cfg.gdm.settings;
+
+  xSessionWrapper = if (cfg.setupCommands == "") then null else
+    pkgs.writeScript "gdm-x-session-wrapper" ''
+      #!${pkgs.bash}/bin/bash
+      ${cfg.setupCommands}
+      exec "$@"
+    '';
+
+  # Solves problems like:
+  # https://wiki.archlinux.org/index.php/Talk:Bluetooth_headset#GDMs_pulseaudio_instance_captures_bluetooth_headset
+  # Instead of blacklisting plugins, we use Fedora's PulseAudio configuration for GDM:
+  # https://src.fedoraproject.org/rpms/gdm/blob/master/f/default.pa-for-gdm
+  pulseConfig = pkgs.writeText "default.pa" ''
+    load-module module-device-restore
+    load-module module-card-restore
+    load-module module-udev-detect
+    load-module module-native-protocol-unix
+    load-module module-default-device-restore
+    load-module module-always-sink
+    load-module module-intended-roles
+    load-module module-suspend-on-idle
+    load-module module-position-event-sounds
+  '';
+
+  defaultSessionName = config.services.xserver.displayManager.defaultSession;
+
+  setSessionScript = pkgs.callPackage ./account-service-util.nix { };
+in
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "gdm" "autoLogin" "enable" ] [
+      "services"
+      "xserver"
+      "displayManager"
+      "autoLogin"
+      "enable"
+    ])
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "gdm" "autoLogin" "user" ] [
+      "services"
+      "xserver"
+      "displayManager"
+      "autoLogin"
+      "user"
+    ])
+
+    (mkRemovedOptionModule [ "services" "xserver" "displayManager" "gdm" "nvidiaWayland" ] "We defer to GDM whether Wayland should be enabled.")
+  ];
+
+  meta = {
+    maintainers = teams.gnome.members;
+  };
+
+  ###### interface
+
+  options = {
+
+    services.xserver.displayManager.gdm = {
+
+      enable = mkEnableOption (lib.mdDoc "GDM, the GNOME Display Manager");
+
+      debug = mkEnableOption (lib.mdDoc "debugging messages in GDM");
+
+      # Auto login options specific to GDM
+      autoLogin.delay = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Seconds of inactivity after which the autologin will be performed.
+        '';
+      };
+
+      wayland = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Allow GDM to run on Wayland instead of Xserver.
+        '';
+      };
+
+      autoSuspend = mkOption {
+        default = true;
+        description = lib.mdDoc ''
+          On the GNOME Display Manager login screen, suspend the machine after inactivity.
+          (Does not affect automatic suspend while logged in, or at lock screen.)
+        '';
+        type = types.bool;
+      };
+
+      banner = mkOption {
+        type = types.nullOr types.lines;
+        default = null;
+        example = ''
+          foo
+          bar
+          baz
+        '';
+        description = lib.mdDoc ''
+          Optional message to display on the login screen.
+        '';
+      };
+
+      settings = mkOption {
+        type = settingsFormat.type;
+        default = { };
+        example = {
+          debug.enable = true;
+        };
+        description = lib.mdDoc ''
+          Options passed to the gdm daemon.
+          See [here](https://help.gnome.org/admin/gdm/stable/configuration.html.en#daemonconfig) for supported options.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.gdm.enable {
+
+    services.xserver.displayManager.lightdm.enable = false;
+
+    users.users.gdm =
+      { name = "gdm";
+        uid = config.ids.uids.gdm;
+        group = "gdm";
+        home = "/run/gdm";
+        description = "GDM user";
+      };
+
+    users.groups.gdm.gid = config.ids.gids.gdm;
+
+    # GDM needs different xserverArgs, presumable because using wayland by default.
+    services.xserver.tty = null;
+    services.xserver.display = null;
+    services.xserver.verbose = null;
+
+    services.xserver.displayManager.job =
+      {
+        environment = {
+          GDM_X_SERVER_EXTRA_ARGS = toString
+            (filter (arg: arg != "-terminate") cfg.xserverArgs);
+          XDG_DATA_DIRS = lib.makeSearchPath "share" [
+            gdm # for gnome-login.session
+            cfg.sessionData.desktops
+            pkgs.gnome.gnome-control-center # for accessibility icon
+            pkgs.gnome.adwaita-icon-theme
+            pkgs.hicolor-icon-theme # empty icon theme as a base
+          ];
+        } // optionalAttrs (xSessionWrapper != null) {
+          # Make GDM use this wrapper before running the session, which runs the
+          # configured setupCommands. This relies on a patched GDM which supports
+          # this environment variable.
+          GDM_X_SESSION_WRAPPER = "${xSessionWrapper}";
+        };
+        execCmd = "exec ${gdm}/bin/gdm";
+        preStart = optionalString (defaultSessionName != null) ''
+          # Set default session in session chooser to a specified values – basically ignore session history.
+          ${setSessionScript}/bin/set-session ${cfg.sessionData.autologinSession}
+        '';
+      };
+
+    systemd.tmpfiles.rules = [
+      "d /run/gdm/.config 0711 gdm gdm"
+    ] ++ optionals config.hardware.pulseaudio.enable [
+      "d /run/gdm/.config/pulse 0711 gdm gdm"
+      "L+ /run/gdm/.config/pulse/${pulseConfig.name} - - - - ${pulseConfig}"
+    ] ++ optionals config.services.gnome.gnome-initial-setup.enable [
+      # Create stamp file for gnome-initial-setup to prevent it starting in GDM.
+      "f /run/gdm/.config/gnome-initial-setup-done 0711 gdm gdm - yes"
+    ];
+
+    # Otherwise GDM will not be able to start correctly and display Wayland sessions
+    systemd.packages = with pkgs.gnome; [ gdm gnome-session gnome-shell ];
+    environment.systemPackages = [ pkgs.gnome.adwaita-icon-theme ];
+
+    # We dont use the upstream gdm service
+    # it has to be disabled since the gdm package has it
+    # https://github.com/NixOS/nixpkgs/issues/108672
+    systemd.services.gdm.enable = false;
+
+    systemd.services.display-manager.wants = [
+      # Because sd_login_monitor_new requires /run/systemd/machines
+      "systemd-machined.service"
+      # setSessionScript wants AccountsService
+      "accounts-daemon.service"
+    ];
+
+    systemd.services.display-manager.after = [
+      "rc-local.service"
+      "systemd-machined.service"
+      "systemd-user-sessions.service"
+      "getty@tty${gdm.initialVT}.service"
+      "plymouth-quit.service"
+      "plymouth-start.service"
+    ];
+    systemd.services.display-manager.conflicts = [
+      "getty@tty${gdm.initialVT}.service"
+      "plymouth-quit.service"
+    ];
+    systemd.services.display-manager.onFailure = [
+      "plymouth-quit.service"
+    ];
+
+    # Prevent nixos-rebuild switch from bringing down the graphical
+    # session. (If multi-user.target wants plymouth-quit.service which
+    # conflicts display-manager.service, then when nixos-rebuild
+    # switch starts multi-user.target, display-manager.service is
+    # stopped so plymouth-quit.service can be started.)
+    systemd.services.plymouth-quit = mkIf config.boot.plymouth.enable {
+      wantedBy = lib.mkForce [];
+    };
+
+    systemd.services.display-manager.serviceConfig = {
+      # Restart = "always"; - already defined in xserver.nix
+      KillMode = "mixed";
+      IgnoreSIGPIPE = "no";
+      BusName = "org.gnome.DisplayManager";
+      StandardError = "inherit";
+      ExecReload = "${pkgs.coreutils}/bin/kill -SIGHUP $MAINPID";
+      KeyringMode = "shared";
+      EnvironmentFile = "-/etc/locale.conf";
+    };
+
+    systemd.services.display-manager.path = [ pkgs.gnome.gnome-session ];
+
+    # Allow choosing an user account
+    services.accounts-daemon.enable = true;
+
+    services.dbus.packages = [ gdm ];
+
+    systemd.user.services.dbus.wantedBy = [ "default.target" ];
+
+    programs.dconf.profiles.gdm.databases = lib.optionals (!cfg.gdm.autoSuspend) [{
+      settings."org/gnome/settings-daemon/plugins/power" = {
+        sleep-inactive-ac-type = "nothing";
+        sleep-inactive-battery-type = "nothing";
+        sleep-inactive-ac-timeout = lib.gvariant.mkInt32 0;
+        sleep-inactive-battery-timeout = lib.gvariant.mkInt32 0;
+      };
+    }] ++ lib.optionals (cfg.gdm.banner != null) [{
+      settings."org/gnome/login-screen" = {
+        banner-message-enable = true;
+        banner-message-text = cfg.gdm.banner;
+      };
+    }] ++ [ "${gdm}/share/gdm/greeter-dconf-defaults" ];
+
+    # Use AutomaticLogin if delay is zero, because it's immediate.
+    # Otherwise with TimedLogin with zero seconds the prompt is still
+    # presented and there's a little delay.
+    services.xserver.displayManager.gdm.settings = {
+      daemon = mkMerge [
+        { WaylandEnable = cfg.gdm.wayland; }
+        # nested if else didn't work
+        (mkIf (cfg.autoLogin.enable && cfg.gdm.autoLogin.delay != 0 ) {
+          TimedLoginEnable = true;
+          TimedLogin = cfg.autoLogin.user;
+          TimedLoginDelay = cfg.gdm.autoLogin.delay;
+        })
+        (mkIf (cfg.autoLogin.enable && cfg.gdm.autoLogin.delay == 0 ) {
+          AutomaticLoginEnable = true;
+          AutomaticLogin = cfg.autoLogin.user;
+        })
+      ];
+      debug = mkIf cfg.gdm.debug {
+        Enable = true;
+      };
+    };
+
+    environment.etc."gdm/custom.conf".source = configFile;
+
+    environment.etc."gdm/Xsession".source = config.services.xserver.displayManager.sessionData.wrapper;
+
+    # GDM LFS PAM modules, adapted somehow to NixOS
+    security.pam.services = {
+      gdm-launch-environment.text = ''
+        auth     required       pam_succeed_if.so audit quiet_success user = gdm
+        auth     optional       pam_permit.so
+
+        account  required       pam_succeed_if.so audit quiet_success user = gdm
+        account  sufficient     pam_unix.so
+
+        password required       pam_deny.so
+
+        session  required       pam_succeed_if.so audit quiet_success user = gdm
+        session  required       pam_env.so conffile=/etc/pam/environment readenv=0
+        session  optional       ${config.systemd.package}/lib/security/pam_systemd.so
+        session  optional       pam_keyinit.so force revoke
+        session  optional       pam_permit.so
+      '';
+
+      gdm-password.text = ''
+        auth      substack      login
+        account   include       login
+        password  substack      login
+        session   include       login
+      '';
+
+      gdm-autologin.text = ''
+        auth      requisite     pam_nologin.so
+
+        auth      required      pam_succeed_if.so uid >= 1000 quiet
+        auth      required      pam_permit.so
+
+        account   sufficient    pam_unix.so
+
+        password  requisite     pam_unix.so nullok yescrypt
+
+        session   optional      pam_keyinit.so revoke
+        session   include       login
+      '';
+
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
new file mode 100644
index 000000000000..412bcc4091b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/enso-os.nix
@@ -0,0 +1,140 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.enso;
+
+  theme = cfg.theme.package;
+  icons = cfg.iconTheme.package;
+  cursors = cfg.cursorTheme.package;
+
+  ensoGreeterConf = pkgs.writeText "lightdm-enso-os-greeter.conf" ''
+    [greeter]
+    default-wallpaper=${ldmcfg.background}
+    gtk-theme=${cfg.theme.name}
+    icon-theme=${cfg.iconTheme.name}
+    cursor-theme=${cfg.cursorTheme.name}
+    blur=${toString cfg.blur}
+    brightness=${toString cfg.brightness}
+    ${cfg.extraConfig}
+  '';
+in {
+  options = {
+    services.xserver.displayManager.lightdm.greeters.enso = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable enso-os-greeter as the lightdm greeter
+        '';
+      };
+
+      theme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.gnome-themes-extra;
+          defaultText = literalExpression "pkgs.gnome.gnome-themes-extra";
+          description = lib.mdDoc ''
+            The package path that contains the theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      iconTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.papirus-icon-theme;
+          defaultText = literalExpression "pkgs.papirus-icon-theme";
+          description = lib.mdDoc ''
+            The package path that contains the icon theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "ePapirus";
+          description = lib.mdDoc ''
+            Name of the icon theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      cursorTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.capitaine-cursors;
+          defaultText = literalExpression "pkgs.capitaine-cursors";
+          description = lib.mdDoc ''
+            The package path that contains the cursor theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "capitane-cursors";
+          description = lib.mdDoc ''
+            Name of the cursor theme to use for the lightdm-enso-os-greeter
+          '';
+        };
+      };
+
+      blur = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether or not to enable blur
+        '';
+      };
+
+      brightness = mkOption {
+        type = types.int;
+        default = 7;
+        description = lib.mdDoc ''
+          Brightness
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration that should be put in the greeter.conf
+          configuration file
+        '';
+      };
+    };
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+    environment.etc."lightdm/greeter.conf".source = ensoGreeterConf;
+
+    environment.systemPackages = [
+      cursors
+      icons
+      theme
+    ];
+
+    services.xserver.displayManager.lightdm = {
+      greeter = mkDefault {
+        package = pkgs.lightdm-enso-os-greeter.xgreeters;
+        name = "pantheon-greeter";
+      };
+
+      greeters = {
+        gtk = {
+          enable = mkDefault false;
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
new file mode 100644
index 000000000000..c050367e74df
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/gtk.nix
@@ -0,0 +1,174 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  xcfg = config.services.xserver;
+  cfg = ldmcfg.greeters.gtk;
+
+  inherit (pkgs) writeText;
+
+  theme = cfg.theme.package;
+  icons = cfg.iconTheme.package;
+  cursors = cfg.cursorTheme.package;
+
+  gtkGreeterConf = writeText "lightdm-gtk-greeter.conf"
+    ''
+    [greeter]
+    theme-name = ${cfg.theme.name}
+    icon-theme-name = ${cfg.iconTheme.name}
+    cursor-theme-name = ${cfg.cursorTheme.name}
+    cursor-theme-size = ${toString cfg.cursorTheme.size}
+    background = ${ldmcfg.background}
+    ${optionalString (cfg.clock-format != null) "clock-format = ${cfg.clock-format}"}
+    ${optionalString (cfg.indicators != null) "indicators = ${concatStringsSep ";" cfg.indicators}"}
+    ${optionalString (xcfg.dpi != null) "xft-dpi=${toString xcfg.dpi}"}
+    ${cfg.extraConfig}
+    '';
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.gtk = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable lightdm-gtk-greeter as the lightdm greeter.
+        '';
+      };
+
+      theme = {
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.gnome-themes-extra;
+          defaultText = literalExpression "pkgs.gnome.gnome-themes-extra";
+          description = lib.mdDoc ''
+            The package path that contains the theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the theme to use for the lightdm-gtk-greeter.
+          '';
+        };
+
+      };
+
+      iconTheme = {
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.adwaita-icon-theme;
+          defaultText = literalExpression "pkgs.gnome.adwaita-icon-theme";
+          description = lib.mdDoc ''
+            The package path that contains the icon theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the icon theme to use for the lightdm-gtk-greeter.
+          '';
+        };
+
+      };
+
+      cursorTheme = {
+
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.adwaita-icon-theme;
+          defaultText = literalExpression "pkgs.gnome.adwaita-icon-theme";
+          description = lib.mdDoc ''
+            The package path that contains the cursor theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the cursor theme to use for the lightdm-gtk-greeter.
+          '';
+        };
+
+        size = mkOption {
+          type = types.int;
+          default = 16;
+          description = lib.mdDoc ''
+            Size of the cursor theme to use for the lightdm-gtk-greeter.
+          '';
+        };
+      };
+
+      clock-format = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "%F";
+        description = lib.mdDoc ''
+          Clock format string (as expected by strftime, e.g. "%H:%M")
+          to use with the lightdm gtk greeter panel.
+
+          If set to null the default clock format is used.
+        '';
+      };
+
+      indicators = mkOption {
+        type = types.nullOr (types.listOf types.str);
+        default = null;
+        example = [ "~host" "~spacer" "~clock" "~spacer" "~session" "~language" "~a11y" "~power" ];
+        description = lib.mdDoc ''
+          List of allowed indicator modules to use for the lightdm gtk
+          greeter panel.
+
+          Built-in indicators include "~a11y", "~language", "~session",
+          "~power", "~clock", "~host", "~spacer". Unity indicators can be
+          represented by short name (e.g. "sound", "power"), service file name,
+          or absolute path.
+
+          If set to null the default indicators are used.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration that should be put in the lightdm-gtk-greeter.conf
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = pkgs.lightdm-gtk-greeter.xgreeters;
+      name = "lightdm-gtk-greeter";
+    };
+
+    environment.systemPackages = [
+      cursors
+      icons
+      theme
+    ];
+
+    environment.etc."lightdm/lightdm-gtk-greeter.conf".source = gtkGreeterConf;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
new file mode 100644
index 000000000000..f4195c4c2dc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mini.nix
@@ -0,0 +1,100 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.mini;
+
+  miniGreeterConf = pkgs.writeText "lightdm-mini-greeter.conf"
+    ''
+    [greeter]
+    user = ${cfg.user}
+    show-password-label = true
+    password-label-text = Password:
+    invalid-password-text = Invalid Password
+    show-input-cursor = true
+    password-alignment = right
+
+    [greeter-hotkeys]
+    mod-key = meta
+    shutdown-key = s
+    restart-key = r
+    hibernate-key = h
+    suspend-key = u
+
+    [greeter-theme]
+    font = Sans
+    font-size = 1em
+    font-weight = bold
+    font-style = normal
+    text-color = "#080800"
+    error-color = "#F8F8F0"
+    background-image = "${ldmcfg.background}"
+    background-color = "#1B1D1E"
+    window-color = "#F92672"
+    border-color = "#080800"
+    border-width = 2px
+    layout-space = 15
+    password-color = "#F8F8F0"
+    password-background-color = "#1B1D1E"
+    password-border-color = "#080800"
+    password-border-width = 2px
+
+    ${cfg.extraConfig}
+    '';
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.mini = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable lightdm-mini-greeter as the lightdm greeter.
+
+          Note that this greeter starts only the default X session.
+          You can configure the default X session using
+          [](#opt-services.xserver.displayManager.defaultSession).
+        '';
+      };
+
+      user = mkOption {
+        type = types.str;
+        default = "root";
+        description = lib.mdDoc ''
+          The user to login as.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration that should be put in the lightdm-mini-greeter.conf
+          configuration file.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = pkgs.lightdm-mini-greeter.xgreeters;
+      name = "lightdm-mini-greeter";
+    };
+
+    environment.etc."lightdm/lightdm-mini-greeter.conf".source = miniGreeterConf;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mobile.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mobile.nix
new file mode 100644
index 000000000000..31cc9b3deaa1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/mobile.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.mobile;
+in
+{
+  options = {
+    services.xserver.displayManager.lightdm.greeters.mobile = {
+      enable = mkEnableOption (lib.mdDoc
+        "lightdm-mobile-greeter as the lightdm greeter"
+      );
+    };
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = pkgs.lightdm-mobile-greeter.xgreeters;
+      name = "lightdm-mobile-greeter";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
new file mode 100644
index 000000000000..10707e001e82
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/pantheon.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.pantheon;
+
+in
+{
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.pantheon = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable elementary-greeter as the lightdm greeter.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter = mkDefault {
+      package = pkgs.pantheon.elementary-greeter.xgreeters;
+      name = "io.elementary.greeter";
+    };
+
+    # Show manual login card.
+    services.xserver.displayManager.lightdm.extraSeatDefaults = "greeter-show-manual-login=true";
+
+    environment.etc."lightdm/io.elementary.greeter.conf".source = "${pkgs.pantheon.elementary-greeter}/etc/lightdm/io.elementary.greeter.conf";
+    environment.etc."wingpanel.d/io.elementary.greeter.allowed".source = "${pkgs.pantheon.elementary-default-settings}/etc/wingpanel.d/io.elementary.greeter.allowed";
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/slick.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/slick.nix
new file mode 100644
index 000000000000..ee9b4016c8ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/slick.nix
@@ -0,0 +1,149 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  ldmcfg = config.services.xserver.displayManager.lightdm;
+  cfg = ldmcfg.greeters.slick;
+
+  inherit (pkgs) writeText;
+
+  theme = cfg.theme.package;
+  icons = cfg.iconTheme.package;
+  font = cfg.font.package;
+  cursors = cfg.cursorTheme.package;
+
+  slickGreeterConf = writeText "slick-greeter.conf" ''
+    [Greeter]
+    background=${ldmcfg.background}
+    theme-name=${cfg.theme.name}
+    icon-theme-name=${cfg.iconTheme.name}
+    font-name=${cfg.font.name}
+    cursor-theme-name=${cfg.cursorTheme.name}
+    cursor-theme-size=${toString cfg.cursorTheme.size}
+    draw-user-backgrounds=${boolToString cfg.draw-user-backgrounds}
+    ${cfg.extraConfig}
+  '';
+in
+{
+  options = {
+    services.xserver.displayManager.lightdm.greeters.slick = {
+      enable = mkEnableOption (lib.mdDoc "lightdm-slick-greeter as the lightdm greeter");
+
+      theme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.gnome-themes-extra;
+          defaultText = literalExpression "pkgs.gnome.gnome-themes-extra";
+          description = lib.mdDoc ''
+            The package path that contains the theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the theme to use for the lightdm-slick-greeter.
+          '';
+        };
+      };
+
+      iconTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.adwaita-icon-theme;
+          defaultText = literalExpression "pkgs.gnome.adwaita-icon-theme";
+          description = lib.mdDoc ''
+            The package path that contains the icon theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the icon theme to use for the lightdm-slick-greeter.
+          '';
+        };
+      };
+
+      font = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.ubuntu_font_family;
+          defaultText = literalExpression "pkgs.ubuntu_font_family";
+          description = lib.mdDoc ''
+            The package path that contains the font given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Ubuntu 11";
+          description = lib.mdDoc ''
+            Name of the font to use.
+          '';
+        };
+      };
+
+      cursorTheme = {
+        package = mkOption {
+          type = types.package;
+          default = pkgs.gnome.adwaita-icon-theme;
+          defaultText = literalExpression "pkgs.gnome.adwaita-icon-theme";
+          description = lib.mdDoc ''
+            The package path that contains the cursor theme given in the name option.
+          '';
+        };
+
+        name = mkOption {
+          type = types.str;
+          default = "Adwaita";
+          description = lib.mdDoc ''
+            Name of the cursor theme to use for the lightdm-slick-greeter.
+          '';
+        };
+
+        size = mkOption {
+          type = types.int;
+          default = 24;
+          description = lib.mdDoc ''
+            Size of the cursor theme to use for the lightdm-slick-greeter.
+          '';
+        };
+      };
+
+      draw-user-backgrounds = mkEnableOption (lib.mdDoc "draw user backgrounds");
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra configuration that should be put in the lightdm-slick-greeter.conf
+          configuration file.
+        '';
+      };
+    };
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+    services.xserver.displayManager.lightdm = {
+      greeters.gtk.enable = false;
+      greeter = mkDefault {
+        package = pkgs.lightdm-slick-greeter.xgreeters;
+        name = "lightdm-slick-greeter";
+      };
+    };
+
+    environment.systemPackages = [
+      cursors
+      icons
+      theme
+    ];
+
+    fonts.packages = [ font ];
+
+    environment.etc."lightdm/slick-greeter.conf".source = slickGreeterConf;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/tiny.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/tiny.nix
new file mode 100644
index 000000000000..dede7680ecb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm-greeters/tiny.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  dmcfg = config.services.xserver.displayManager;
+  ldmcfg = dmcfg.lightdm;
+  cfg = ldmcfg.greeters.tiny;
+
+in
+{
+  options = {
+
+    services.xserver.displayManager.lightdm.greeters.tiny = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable lightdm-tiny-greeter as the lightdm greeter.
+
+          Note that this greeter starts only the default X session.
+          You can configure the default X session using
+          [](#opt-services.xserver.displayManager.defaultSession).
+        '';
+      };
+
+      label = {
+        user = mkOption {
+          type = types.str;
+          default = "Username";
+          description = lib.mdDoc ''
+            The string to represent the user_text label.
+          '';
+        };
+
+        pass = mkOption {
+          type = types.str;
+          default = "Password";
+          description = lib.mdDoc ''
+            The string to represent the pass_text label.
+          '';
+        };
+      };
+
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Section to describe style and ui.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf (ldmcfg.enable && cfg.enable) {
+
+    services.xserver.displayManager.lightdm.greeters.gtk.enable = false;
+
+    services.xserver.displayManager.lightdm.greeter =
+    let
+      configHeader = ''
+        #include <gtk/gtk.h>
+        static const char *user_text = "${cfg.label.user}";
+        static const char *pass_text = "${cfg.label.pass}";
+        static const char *session = "${dmcfg.defaultSession}";
+      '';
+      config = optionalString (cfg.extraConfig != "") (configHeader + cfg.extraConfig);
+      package = pkgs.lightdm-tiny-greeter.override { conf = config; };
+    in
+      mkDefault {
+        package = package.xgreeters;
+        name = "lightdm-tiny-greeter";
+      };
+
+    assertions = [
+      {
+        assertion = dmcfg.defaultSession != null;
+        message = ''
+          Please set: services.xserver.displayManager.defaultSession
+        '';
+      }
+    ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/lightdm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm.nix
new file mode 100644
index 000000000000..548d3c5bc46a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/lightdm.nix
@@ -0,0 +1,329 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  xcfg = config.services.xserver;
+  dmcfg = xcfg.displayManager;
+  xEnv = config.systemd.services.display-manager.environment;
+  cfg = dmcfg.lightdm;
+  sessionData = dmcfg.sessionData;
+
+  setSessionScript = pkgs.callPackage ./account-service-util.nix { };
+
+  inherit (pkgs) lightdm writeScript writeText;
+
+  # lightdm runs with clearenv(), but we need a few things in the environment for X to startup
+  xserverWrapper = writeScript "xserver-wrapper"
+    ''
+      #! ${pkgs.bash}/bin/bash
+      ${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)}
+
+      display=$(echo "$@" | xargs -n 1 | grep -P ^:\\d\$ | head -n 1 | sed s/^://)
+      if [ -z "$display" ]
+      then additionalArgs=":0 -logfile /var/log/X.0.log"
+      else additionalArgs="-logfile /var/log/X.$display.log"
+      fi
+
+      exec ${dmcfg.xserverBin} ${toString dmcfg.xserverArgs} $additionalArgs "$@"
+    '';
+
+  usersConf = writeText "users.conf"
+    ''
+      [UserList]
+      minimum-uid=1000
+      hidden-users=${concatStringsSep " " dmcfg.hiddenUsers}
+      hidden-shells=/run/current-system/sw/bin/nologin
+    '';
+
+  lightdmConf = writeText "lightdm.conf"
+    ''
+      [LightDM]
+      ${optionalString cfg.greeter.enable ''
+        greeter-user = ${config.users.users.lightdm.name}
+        greeters-directory = ${cfg.greeter.package}
+      ''}
+      sessions-directory = ${dmcfg.sessionData.desktops}/share/xsessions:${dmcfg.sessionData.desktops}/share/wayland-sessions
+      ${cfg.extraConfig}
+
+      [Seat:*]
+      xserver-command = ${xserverWrapper}
+      session-wrapper = ${dmcfg.sessionData.wrapper}
+      ${optionalString cfg.greeter.enable ''
+        greeter-session = ${cfg.greeter.name}
+      ''}
+      ${optionalString dmcfg.autoLogin.enable ''
+        autologin-user = ${dmcfg.autoLogin.user}
+        autologin-user-timeout = ${toString cfg.autoLogin.timeout}
+        autologin-session = ${sessionData.autologinSession}
+      ''}
+      ${optionalString (dmcfg.setupCommands != "") ''
+        display-setup-script=${pkgs.writeScript "lightdm-display-setup" ''
+          #!${pkgs.bash}/bin/bash
+          ${dmcfg.setupCommands}
+        ''}
+      ''}
+      ${cfg.extraSeatDefaults}
+    '';
+
+in
+{
+  meta = with lib; {
+    maintainers = with maintainers; [ ] ++ teams.pantheon.members;
+  };
+
+  # Note: the order in which lightdm greeter modules are imported
+  # here determines the default: later modules (if enable) are
+  # preferred.
+  imports = [
+    ./lightdm-greeters/gtk.nix
+    ./lightdm-greeters/mini.nix
+    ./lightdm-greeters/enso-os.nix
+    ./lightdm-greeters/pantheon.nix
+    ./lightdm-greeters/tiny.nix
+    ./lightdm-greeters/slick.nix
+    ./lightdm-greeters/mobile.nix
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "lightdm" "autoLogin" "enable" ] [
+      "services"
+      "xserver"
+      "displayManager"
+      "autoLogin"
+      "enable"
+    ])
+    (mkRenamedOptionModule [ "services" "xserver" "displayManager" "lightdm" "autoLogin" "user" ] [
+     "services"
+     "xserver"
+     "displayManager"
+     "autoLogin"
+     "user"
+    ])
+  ];
+
+  options = {
+
+    services.xserver.displayManager.lightdm = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable lightdm as the display manager.
+        '';
+      };
+
+      greeter =  {
+        enable = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            If set to false, run lightdm in greeterless mode. This only works if autologin
+            is enabled and autoLogin.timeout is zero.
+          '';
+        };
+        package = mkOption {
+          type = types.package;
+          description = lib.mdDoc ''
+            The LightDM greeter to login via. The package should be a directory
+            containing a .desktop file matching the name in the 'name' option.
+          '';
+
+        };
+        name = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            The name of a .desktop file in the directory specified
+            in the 'package' option.
+          '';
+        };
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          user-authority-in-system-dir = true
+        '';
+        description = lib.mdDoc "Extra lines to append to LightDM section.";
+      };
+
+      background = mkOption {
+        type = types.either types.path (types.strMatching "^#[0-9]\{6\}$");
+        # Manual cannot depend on packages, we are actually setting the default in config below.
+        defaultText = literalExpression "pkgs.nixos-artwork.wallpapers.simple-dark-gray-bottom.gnomeFilePath";
+        description = lib.mdDoc ''
+          The background image or color to use.
+        '';
+      };
+
+      extraSeatDefaults = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          greeter-show-manual-login=true
+        '';
+        description = lib.mdDoc "Extra lines to append to SeatDefaults section.";
+      };
+
+      # Configuration for automatic login specific to LightDM
+      autoLogin.timeout = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Show the greeter for this many seconds before automatic login occurs.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = xcfg.enable;
+        message = ''
+          LightDM requires services.xserver.enable to be true
+        '';
+      }
+      { assertion = dmcfg.autoLogin.enable -> sessionData.autologinSession != null;
+        message = ''
+          LightDM auto-login requires that services.xserver.displayManager.defaultSession is set.
+        '';
+      }
+      { assertion = !cfg.greeter.enable -> (dmcfg.autoLogin.enable && cfg.autoLogin.timeout == 0);
+        message = ''
+          LightDM can only run without greeter if automatic login is enabled and the timeout for it
+          is set to zero.
+        '';
+      }
+    ];
+
+    # Keep in sync with the defaultText value from the option definition.
+    services.xserver.displayManager.lightdm.background = mkDefault pkgs.nixos-artwork.wallpapers.simple-dark-gray-bottom.gnomeFilePath;
+
+    # Set default session in session chooser to a specified values – basically ignore session history.
+    # Auto-login is already covered by a config value.
+    services.xserver.displayManager.job.preStart = optionalString (!dmcfg.autoLogin.enable && dmcfg.defaultSession != null) ''
+      ${setSessionScript}/bin/set-session ${dmcfg.defaultSession}
+    '';
+
+    # setSessionScript needs session-files in XDG_DATA_DIRS
+    services.xserver.displayManager.job.environment.XDG_DATA_DIRS = "${dmcfg.sessionData.desktops}/share/";
+
+    # setSessionScript wants AccountsService
+    systemd.services.display-manager.wants = [
+      "accounts-daemon.service"
+    ];
+
+    # lightdm relaunches itself via just `lightdm`, so needs to be on the PATH
+    services.xserver.displayManager.job.execCmd = ''
+      export PATH=${lightdm}/sbin:$PATH
+      exec ${lightdm}/sbin/lightdm
+    '';
+
+    # Replaces getty
+    systemd.services.display-manager.conflicts = [
+      "getty@tty7.service"
+      # TODO: Add "plymouth-quit.service" so LightDM can control when plymouth
+      # quits. Currently this breaks switching to configurations with plymouth.
+     ];
+
+    # Pull in dependencies of services we replace.
+    systemd.services.display-manager.after = [
+      "rc-local.service"
+      "systemd-machined.service"
+      "systemd-user-sessions.service"
+      "getty@tty7.service"
+      "user.slice"
+    ];
+
+    # user.slice needs to be present
+    systemd.services.display-manager.requires = [
+      "user.slice"
+    ];
+
+    # lightdm stops plymouth so when it fails make sure plymouth stops.
+    systemd.services.display-manager.onFailure = [
+      "plymouth-quit.service"
+    ];
+
+    systemd.services.display-manager.serviceConfig = {
+      BusName = "org.freedesktop.DisplayManager";
+      IgnoreSIGPIPE = "no";
+      # This allows lightdm to pass the LUKS password through to PAM.
+      # login keyring is unlocked automatic when autologin is used.
+      KeyringMode = "shared";
+      KillMode = "mixed";
+      StandardError = "inherit";
+    };
+
+    environment.etc."lightdm/lightdm.conf".source = lightdmConf;
+    environment.etc."lightdm/users.conf".source = usersConf;
+
+    services.dbus.enable = true;
+    services.dbus.packages = [ lightdm ];
+
+    # lightdm uses the accounts daemon to remember language/window-manager per user
+    services.accounts-daemon.enable = true;
+
+    # Enable the accounts daemon to find lightdm's dbus interface
+    environment.systemPackages = [ lightdm ];
+
+    security.polkit.enable = true;
+
+    security.pam.services.lightdm.text = ''
+        auth      substack      login
+        account   include       login
+        password  substack      login
+        session   include       login
+    '';
+
+    security.pam.services.lightdm-greeter.text = ''
+        auth     required       pam_succeed_if.so audit quiet_success user = lightdm
+        auth     optional       pam_permit.so
+
+        account  required       pam_succeed_if.so audit quiet_success user = lightdm
+        account  sufficient     pam_unix.so
+
+        password required       pam_deny.so
+
+        session  required       pam_succeed_if.so audit quiet_success user = lightdm
+        session  required       pam_env.so conffile=/etc/pam/environment readenv=0
+        session  optional       ${config.systemd.package}/lib/security/pam_systemd.so
+        session  optional       pam_keyinit.so force revoke
+        session  optional       pam_permit.so
+    '';
+
+    security.pam.services.lightdm-autologin.text = ''
+        auth      requisite     pam_nologin.so
+
+        auth      required      pam_succeed_if.so uid >= 1000 quiet
+        auth      required      pam_permit.so
+
+        account   sufficient    pam_unix.so
+
+        password  requisite     pam_unix.so nullok yescrypt
+
+        session   optional      pam_keyinit.so revoke
+        session   include       login
+    '';
+
+    users.users.lightdm = {
+      home = "/var/lib/lightdm";
+      group = "lightdm";
+      uid = config.ids.uids.lightdm;
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /run/lightdm 0711 lightdm lightdm -"
+      "d /var/cache/lightdm 0711 root lightdm -"
+      "d /var/lib/lightdm 1770 lightdm lightdm -"
+      "d /var/lib/lightdm-data 1775 lightdm lightdm -"
+      "d /var/log/lightdm 0711 root lightdm -"
+    ];
+
+    users.groups.lightdm.gid = config.ids.gids.lightdm;
+    services.xserver.tty     = null; # We might start multiple X servers so let the tty increment themselves..
+    services.xserver.display = null; # We specify our own display (and logfile) in xserver-wrapper up there
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix b/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix
new file mode 100644
index 000000000000..6ca7a4425f89
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/sddm.nix
@@ -0,0 +1,316 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  xcfg = config.services.xserver;
+  dmcfg = xcfg.displayManager;
+  cfg = dmcfg.sddm;
+  xEnv = config.systemd.services.display-manager.environment;
+
+  sddm = pkgs.libsForQt5.sddm;
+
+  iniFmt = pkgs.formats.ini { };
+
+  xserverWrapper = pkgs.writeShellScript "xserver-wrapper" ''
+    ${concatMapStrings (n: "export ${n}=\"${getAttr n xEnv}\"\n") (attrNames xEnv)}
+    exec systemd-cat -t xserver-wrapper ${dmcfg.xserverBin} ${toString dmcfg.xserverArgs} "$@"
+  '';
+
+  Xsetup = pkgs.writeShellScript "Xsetup" ''
+    ${cfg.setupScript}
+    ${dmcfg.setupCommands}
+  '';
+
+  Xstop = pkgs.writeShellScript "Xstop" ''
+    ${cfg.stopScript}
+  '';
+
+  defaultConfig = {
+    General = {
+      HaltCommand = "/run/current-system/systemd/bin/systemctl poweroff";
+      RebootCommand = "/run/current-system/systemd/bin/systemctl reboot";
+      Numlock = if cfg.autoNumlock then "on" else "none"; # on, off none
+
+      # Implementation is done via pkgs/applications/display-managers/sddm/sddm-default-session.patch
+      DefaultSession = optionalString (dmcfg.defaultSession != null) "${dmcfg.defaultSession}.desktop";
+
+      DisplayServer = if cfg.wayland.enable then "wayland" else "x11";
+    };
+
+    Theme = {
+      Current = cfg.theme;
+      ThemeDir = "/run/current-system/sw/share/sddm/themes";
+      FacesDir = "/run/current-system/sw/share/sddm/faces";
+    };
+
+    Users = {
+      MaximumUid = config.ids.uids.nixbld;
+      HideUsers = concatStringsSep "," dmcfg.hiddenUsers;
+      HideShells = "/run/current-system/sw/bin/nologin";
+    };
+
+    X11 = {
+      MinimumVT = if xcfg.tty != null then xcfg.tty else 7;
+      ServerPath = toString xserverWrapper;
+      XephyrPath = "${pkgs.xorg.xorgserver.out}/bin/Xephyr";
+      SessionCommand = toString dmcfg.sessionData.wrapper;
+      SessionDir = "${dmcfg.sessionData.desktops}/share/xsessions";
+      XauthPath = "${pkgs.xorg.xauth}/bin/xauth";
+      DisplayCommand = toString Xsetup;
+      DisplayStopCommand = toString Xstop;
+      EnableHiDPI = cfg.enableHidpi;
+    };
+
+    Wayland = {
+      EnableHiDPI = cfg.enableHidpi;
+      SessionDir = "${dmcfg.sessionData.desktops}/share/wayland-sessions";
+      CompositorCommand = lib.optionalString cfg.wayland.enable cfg.wayland.compositorCommand;
+    };
+  } // lib.optionalAttrs dmcfg.autoLogin.enable {
+    Autologin = {
+      User = dmcfg.autoLogin.user;
+      Session = autoLoginSessionName;
+      Relogin = cfg.autoLogin.relogin;
+    };
+  };
+
+  cfgFile =
+    iniFmt.generate "sddm.conf" (lib.recursiveUpdate defaultConfig cfg.settings);
+
+  autoLoginSessionName =
+    "${dmcfg.sessionData.autologinSession}.desktop";
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule
+      [ "services" "xserver" "displayManager" "sddm" "themes" ]
+      "Set the option `services.xserver.displayManager.sddm.package' instead.")
+    (mkRenamedOptionModule
+      [ "services" "xserver" "displayManager" "sddm" "autoLogin" "enable" ]
+      [ "services" "xserver" "displayManager" "autoLogin" "enable" ])
+    (mkRenamedOptionModule
+      [ "services" "xserver" "displayManager" "sddm" "autoLogin" "user" ]
+      [ "services" "xserver" "displayManager" "autoLogin" "user" ])
+    (mkRemovedOptionModule
+      [ "services" "xserver" "displayManager" "sddm" "extraConfig" ]
+      "Set the option `services.xserver.displayManager.sddm.settings' instead.")
+  ];
+
+  options = {
+
+    services.xserver.displayManager.sddm = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable sddm as the display manager.
+        '';
+      };
+
+      enableHidpi = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to enable automatic HiDPI mode.
+        '';
+      };
+
+      settings = mkOption {
+        type = iniFmt.type;
+        default = { };
+        example = {
+          Autologin = {
+            User = "john";
+            Session = "plasma.desktop";
+          };
+        };
+        description = lib.mdDoc ''
+          Extra settings merged in and overwriting defaults in sddm.conf.
+        '';
+      };
+
+      theme = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          Greeter theme to use.
+        '';
+      };
+
+      autoNumlock = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable numlock at login.
+        '';
+      };
+
+      setupScript = mkOption {
+        type = types.str;
+        default = "";
+        example = ''
+          # workaround for using NVIDIA Optimus without Bumblebee
+          xrandr --setprovideroutputsource modesetting NVIDIA-0
+          xrandr --auto
+        '';
+        description = lib.mdDoc ''
+          A script to execute when starting the display server. DEPRECATED, please
+          use {option}`services.xserver.displayManager.setupCommands`.
+        '';
+      };
+
+      stopScript = mkOption {
+        type = types.str;
+        default = "";
+        description = lib.mdDoc ''
+          A script to execute when stopping the display server.
+        '';
+      };
+
+      # Configuration for automatic login specific to SDDM
+      autoLogin = {
+        relogin = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            If true automatic login will kick in again on session exit (logout), otherwise it
+            will only log in automatically when the display-manager is started.
+          '';
+        };
+
+        minimumUid = mkOption {
+          type = types.ints.u16;
+          default = 1000;
+          description = lib.mdDoc ''
+            Minimum user ID for auto-login user.
+          '';
+        };
+      };
+
+      # Experimental Wayland support
+      wayland = {
+        enable = mkEnableOption "experimental Wayland support";
+
+        compositorCommand = mkOption {
+          type = types.str;
+          internal = true;
+
+          # This is basically the upstream default, but with Weston referenced by full path
+          # and the configuration generated from NixOS options.
+          default = let westonIni = (pkgs.formats.ini {}).generate "weston.ini" {
+              libinput = {
+                enable-tap = xcfg.libinput.mouse.tapping;
+                left-handed = xcfg.libinput.mouse.leftHanded;
+              };
+              keyboard = {
+                keymap_model = xcfg.xkb.model;
+                keymap_layout = xcfg.xkb.layout;
+                keymap_variant = xcfg.xkb.variant;
+                keymap_options = xcfg.xkb.options;
+              };
+            }; in "${pkgs.weston}/bin/weston --shell=fullscreen-shell.so -c ${westonIni}";
+          description = lib.mdDoc "Command used to start the selected compositor";
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = xcfg.enable;
+        message = ''
+          SDDM requires services.xserver.enable to be true
+        '';
+      }
+      {
+        assertion = dmcfg.autoLogin.enable -> autoLoginSessionName != null;
+        message = ''
+          SDDM auto-login requires that services.xserver.displayManager.defaultSession is set.
+        '';
+      }
+    ];
+
+    services.xserver.displayManager.job = {
+      environment = {
+        # Load themes from system environment
+        QT_PLUGIN_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtPluginPrefix;
+        QML2_IMPORT_PATH = "/run/current-system/sw/" + pkgs.qt5.qtbase.qtQmlPrefix;
+      };
+
+      execCmd = "exec /run/current-system/sw/bin/sddm";
+    };
+
+    security.pam.services = {
+      sddm.text = ''
+        auth      substack      login
+        account   include       login
+        password  substack      login
+        session   include       login
+      '';
+
+      sddm-greeter.text = ''
+        auth     required       pam_succeed_if.so audit quiet_success user = sddm
+        auth     optional       pam_permit.so
+
+        account  required       pam_succeed_if.so audit quiet_success user = sddm
+        account  sufficient     pam_unix.so
+
+        password required       pam_deny.so
+
+        session  required       pam_succeed_if.so audit quiet_success user = sddm
+        session  required       pam_env.so conffile=/etc/pam/environment readenv=0
+        session  optional       ${config.systemd.package}/lib/security/pam_systemd.so
+        session  optional       pam_keyinit.so force revoke
+        session  optional       pam_permit.so
+      '';
+
+      sddm-autologin.text = ''
+        auth     requisite pam_nologin.so
+        auth     required  pam_succeed_if.so uid >= ${toString cfg.autoLogin.minimumUid} quiet
+        auth     required  pam_permit.so
+
+        account  include   sddm
+
+        password include   sddm
+
+        session  include   sddm
+      '';
+    };
+
+    users.users.sddm = {
+      createHome = true;
+      home = "/var/lib/sddm";
+      group = "sddm";
+      uid = config.ids.uids.sddm;
+    };
+
+    environment.etc."sddm.conf".source = cfgFile;
+    environment.pathsToLink = [
+      "/share/sddm"
+    ];
+
+    users.groups.sddm.gid = config.ids.gids.sddm;
+
+    environment.systemPackages = [ sddm ];
+    services.dbus.packages = [ sddm ];
+    systemd.tmpfiles.packages = [ sddm ];
+
+    # We're not using the upstream unit, so copy these: https://github.com/sddm/sddm/blob/develop/services/sddm.service.in
+    systemd.services.display-manager.after = [
+      "systemd-user-sessions.service"
+      "getty@tty7.service"
+      "plymouth-quit.service"
+      "systemd-logind.service"
+    ];
+    systemd.services.display-manager.conflicts = [
+      "getty@tty7.service"
+    ];
+
+    # To enable user switching, allow sddm to allocate TTYs/displays dynamically.
+    services.xserver.tty = null;
+    services.xserver.display = null;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/set-session.py b/nixpkgs/nixos/modules/services/x11/display-managers/set-session.py
new file mode 100755
index 000000000000..75940efe32b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/set-session.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+
+import gi, argparse, os, logging, sys
+
+gi.require_version("AccountsService", "1.0")
+from gi.repository import AccountsService, GLib
+from ordered_set import OrderedSet
+
+
+def get_session_file(session):
+    system_data_dirs = GLib.get_system_data_dirs()
+
+    session_dirs = OrderedSet(
+        os.path.join(data_dir, session)
+        for data_dir in system_data_dirs
+        for session in {"wayland-sessions", "xsessions"}
+    )
+
+    session_files = OrderedSet(
+        os.path.join(dir, session + ".desktop")
+        for dir in session_dirs
+        if os.path.exists(os.path.join(dir, session + ".desktop"))
+    )
+
+    # Deal with duplicate wayland-sessions and xsessions.
+    # Needed for the situation in gnome-session, where there's
+    # a xsession named the same as a wayland session.
+    if any(map(is_session_wayland, session_files)):
+        session_files = OrderedSet(
+            session for session in session_files if is_session_wayland(session)
+        )
+    else:
+        session_files = OrderedSet(
+            session for session in session_files if is_session_xsession(session)
+        )
+
+    if len(session_files) == 0:
+        logging.warning("No session files are found.")
+        sys.exit(0)
+    else:
+        return session_files[0]
+
+
+def is_session_xsession(session_file):
+    return "/xsessions/" in session_file
+
+
+def is_session_wayland(session_file):
+    return "/wayland-sessions/" in session_file
+
+
+def main():
+    parser = argparse.ArgumentParser(
+        description="Set session type for all normal users."
+    )
+    parser.add_argument("session", help="Name of session to set.")
+
+    args = parser.parse_args()
+
+    session = getattr(args, "session")
+    session_file = get_session_file(session)
+
+    user_manager = AccountsService.UserManager.get_default()
+    users = user_manager.list_users()
+
+    for user in users:
+        if user.is_system_account():
+            continue
+        else:
+            if is_session_wayland(session_file):
+                logging.debug(
+                    f"Setting session name: {session}, as we found the existing wayland-session: {session_file}"
+                )
+                user.set_session(session)
+                user.set_session_type("wayland")
+            elif is_session_xsession(session_file):
+                logging.debug(
+                    f"Setting session name: {session}, as we found the existing xsession: {session_file}"
+                )
+                user.set_x_session(session)
+                user.set_session(session)
+                user.set_session_type("x11")
+            else:
+                logging.error(f"Couldn't figure out session type for {session_file}")
+                sys.exit(1)
+
+
+if __name__ == "__main__":
+    main()
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/slim.nix b/nixpkgs/nixos/modules/services/x11/display-managers/slim.nix
new file mode 100644
index 000000000000..4b0948a5b7a5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/slim.nix
@@ -0,0 +1,16 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  # added 2019-11-11
+  imports = [
+    (mkRemovedOptionModule [ "services" "xserver" "displayManager" "slim" ] ''
+      The SLIM project is abandoned and their last release was in 2013.
+      Because of this it poses a security risk to your system.
+      Other issues include it not fully supporting systemd and logind sessions.
+      Please use a different display manager such as LightDM, SDDM, or GDM.
+      You can also use the startx module which uses Xinitrc.
+    '')
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/startx.nix b/nixpkgs/nixos/modules/services/x11/display-managers/startx.nix
new file mode 100644
index 000000000000..f4bb7a89d03b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/startx.nix
@@ -0,0 +1,54 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.displayManager.startx;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.displayManager.startx = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the dummy "startx" pseudo-display manager,
+          which allows users to start X manually via the "startx" command
+          from a vt shell. The X server runs under the user's id, not as root.
+          The user must provide a ~/.xinitrc file containing session startup
+          commands, see startx(1). This is not automatically generated
+          from the desktopManager and windowManager settings.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver = {
+      exportConfiguration = true;
+    };
+
+    # Other displayManagers log to /dev/null because they're services and put
+    # Xorg's stdout in the journal
+    #
+    # To send log to Xorg's default log location ($XDG_DATA_HOME/xorg/), we do
+    # not specify a log file when running X
+    services.xserver.logFile = mkDefault null;
+
+    # Implement xserverArgs via xinit's system-wide xserverrc
+    environment.etc."X11/xinit/xserverrc".source = pkgs.writeShellScript "xserverrc" ''
+      exec ${pkgs.xorg.xorgserver}/bin/X ${toString config.services.xserver.displayManager.xserverArgs} "$@"
+    '';
+    environment.systemPackages =  with pkgs; [ xorg.xinit ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/sx.nix b/nixpkgs/nixos/modules/services/x11/display-managers/sx.nix
new file mode 100644
index 000000000000..6a7fc1a040e7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/sx.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xserver.displayManager.sx;
+
+in {
+  options = {
+    services.xserver.displayManager.sx = {
+      enable = mkEnableOption (lib.mdDoc "sx pseudo-display manager") // {
+        description = lib.mdDoc ''
+          Whether to enable the "sx" pseudo-display manager, which allows users
+          to start manually via the "sx" command from a vt shell. The X server
+          runs under the user's id, not as root. The user must provide a
+          ~/.config/sx/sxrc file containing session startup commands, see
+          sx(1). This is not automatically generated from the desktopManager
+          and windowManager settings. sx doesn't have a way to directly set
+          X server flags, but it can be done by overriding its xorgserver
+          dependency.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.sx ];
+    services.xserver = {
+      exportConfiguration = true;
+      logFile = mkDefault null;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ figsoda ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
new file mode 100644
index 000000000000..0861530f21e8
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/display-managers/xpra.nix
@@ -0,0 +1,259 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.displayManager.xpra;
+  dmcfg = config.services.xserver.displayManager;
+
+in
+
+{
+  ###### interface
+
+  options = {
+    services.xserver.displayManager.xpra = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable xpra as display manager.";
+      };
+
+      bindTcp = mkOption {
+        default = "127.0.0.1:10000";
+        example = "0.0.0.0:10000";
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Bind xpra to TCP";
+      };
+
+      desktop = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "gnome-shell";
+        description = lib.mdDoc "Start a desktop environment instead of seamless mode";
+      };
+
+      auth = mkOption {
+        type = types.str;
+        default = "pam";
+        example = "password:value=mysecret";
+        description = lib.mdDoc "Authentication to use when connecting to xpra";
+      };
+
+      pulseaudio = mkEnableOption (lib.mdDoc "pulseaudio audio streaming");
+
+      extraOptions = mkOption {
+        description = lib.mdDoc "Extra xpra options";
+        default = [];
+        type = types.listOf types.str;
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.videoDrivers = ["dummy"];
+
+    services.xserver.monitorSection = ''
+      HorizSync   1.0 - 2000.0
+      VertRefresh 1.0 - 200.0
+      #To add your own modes here, use a modeline calculator, like:
+      # cvt:
+      # https://www.x.org/archive/X11R7.5/doc/man/man1/cvt.1.html
+      # xtiming:
+      # https://xtiming.sourceforge.net/cgi-bin/xtiming.pl
+      # gtf:
+      # https://gtf.sourceforge.net/
+      #This can be used to get a specific DPI, but only for the default resolution:
+      #DisplaySize 508 317
+      #NOTE: the highest modes will not work without increasing the VideoRam
+      # for the dummy video card.
+      #Modeline "16000x15000" 300.00  16000 16408 18000 20000  15000 15003 15013 15016
+      #Modeline "15000x15000" 281.25  15000 15376 16872 18744  15000 15003 15013 15016
+      #Modeline "16384x8192" 167.75  16384 16800 18432 20480  8192 8195 8205 8208
+      #Modeline "15360x8640" 249.00 15360 15752 17280 19200 8640 8643 8648 8651
+      Modeline "8192x4096" 193.35 8192 8224 8952 8984 4096 4196 4200 4301
+      Modeline "7680x4320" 208.00 7680 7880 8640 9600 4320 4323 4328 4335
+      Modeline "6400x4096" 151.38 6400 6432 7000 7032 4096 4196 4200 4301
+      Modeline "6400x2560" 91.59 6400 6432 6776 6808 2560 2623 2626 2689
+      Modeline "6400x2160" 160.51 6400 6432 7040 7072 2160 2212 2216 2269
+      Modeline "5760x2160" 149.50 5760 5768 6320 6880 2160 2161 2164 2173
+      Modeline "5680x1440" 142.66 5680 5712 6248 6280 1440 1474 1478 1513
+      Modeline "5496x1200" 199.13 5496 5528 6280 6312 1200 1228 1233 1261
+      Modeline "5280x2560" 75.72 5280 5312 5592 5624 2560 2623 2626 2689
+      Modeline "5280x1920" 56.04 5280 5312 5520 5552 1920 1967 1969 2017
+      Modeline "5280x1200" 191.40 5280 5312 6032 6064 1200 1228 1233 1261
+      Modeline "5280x1080" 169.96 5280 5312 5952 5984 1080 1105 1110 1135
+      Modeline "5120x3200" 199.75 5120 5152 5904 5936 3200 3277 3283 3361
+      Modeline "5120x2560" 73.45 5120 5152 5424 5456 2560 2623 2626 2689
+      Modeline "5120x2880" 185.50 5120 5256 5760 6400 2880 2883 2888 2899
+      Modeline "4800x1200" 64.42 4800 4832 5072 5104 1200 1229 1231 1261
+      Modeline "4720x3840" 227.86 4720 4752 5616 5648 3840 3933 3940 4033
+      Modeline "4400x2560" 133.70 4400 4432 4936 4968 2560 2622 2627 2689
+      Modeline "4480x1440" 72.94 4480 4512 4784 4816 1440 1475 1478 1513
+      Modeline "4240x1440" 69.09 4240 4272 4528 4560 1440 1475 1478 1513
+      Modeline "4160x1440" 67.81 4160 4192 4448 4480 1440 1475 1478 1513
+      Modeline "4096x2304" 249.25 4096 4296 4720 5344 2304 2307 2312 2333
+      Modeline "4096x2160" 111.25 4096 4200 4608 5120 2160 2163 2173 2176
+      Modeline "4000x1660" 170.32 4000 4128 4536 5072 1660 1661 1664 1679
+      Modeline "4000x1440" 145.00 4000 4088 4488 4976 1440 1441 1444 1457
+      Modeline "3904x1440" 63.70 3904 3936 4176 4208 1440 1475 1478 1513
+      Modeline "3840x2880" 133.43 3840 3872 4376 4408 2880 2950 2955 3025
+      Modeline "3840x2560" 116.93 3840 3872 4312 4344 2560 2622 2627 2689
+      Modeline "3840x2160" 104.25 3840 3944 4320 4800 2160 2163 2168 2175
+      Modeline "3840x2048" 91.45 3840 3872 4216 4248 2048 2097 2101 2151
+      Modeline "3840x1200" 108.89 3840 3872 4280 4312 1200 1228 1232 1261
+      Modeline "3840x1080" 100.38 3840 3848 4216 4592 1080 1081 1084 1093
+      Modeline "3864x1050" 94.58 3864 3896 4248 4280 1050 1074 1078 1103
+      Modeline "3600x1200" 106.06 3600 3632 3984 4368 1200 1201 1204 1214
+      Modeline "3600x1080" 91.02 3600 3632 3976 4008 1080 1105 1109 1135
+      Modeline "3520x1196" 99.53 3520 3552 3928 3960 1196 1224 1228 1256
+      Modeline "3360x2560" 102.55 3360 3392 3776 3808 2560 2622 2627 2689
+      Modeline "3360x1050" 293.75 3360 3576 3928 4496 1050 1053 1063 1089
+      Modeline "3288x1080" 39.76 3288 3320 3464 3496 1080 1106 1108 1135
+      Modeline "3200x1800" 233.00 3200 3384 3720 4240  1800 1803 1808 1834
+      Modeline "3200x1080" 236.16 3200 3232 4128 4160 1080 1103 1112 1135
+      Modeline "3120x2560" 95.36 3120 3152 3512 3544 2560 2622 2627 2689
+      Modeline "3120x1050" 272.75 3120 3320 3648 4176 1050 1053 1063 1089
+      Modeline "3072x2560" 93.92 3072 3104 3456 3488 2560 2622 2627 2689
+      Modeline "3008x1692" 130.93 3008 3112 3416 3824 1692 1693 1696 1712
+      Modeline "3000x2560" 91.77 3000 3032 3376 3408 2560 2622 2627 2689
+      Modeline "2880x1620" 396.25 2880 3096 3408 3936 1620 1623 1628 1679
+      Modeline "2728x1680" 148.02 2728 2760 3320 3352 1680 1719 1726 1765
+      Modeline "2560x2240" 151.55 2560 2688 2952 3344 2240 2241 2244 2266
+      Modeline "2560x1600" 47.12 2560 2592 2768 2800 1600 1639 1642 1681
+      Modeline "2560x1440" 42.12 2560 2592 2752 2784 1440 1475 1478 1513
+      Modeline "2560x1400" 267.86 2560 2592 3608 3640 1400 1429 1441 1471
+      Modeline "2048x2048" 49.47 2048 2080 2264 2296 2048 2097 2101 2151
+      Modeline "2048x1536" 80.06 2048 2104 2312 2576 1536 1537 1540 1554
+      Modeline "2048x1152" 197.97 2048 2184 2408 2768 1152 1153 1156 1192
+      Modeline "2048x1152" 165.92 2048 2080 2704 2736 1152 1176 1186 1210
+      Modeline "1920x1440" 69.47 1920 1960 2152 2384 1440 1441 1444 1457
+      Modeline "1920x1200" 26.28 1920 1952 2048 2080 1200 1229 1231 1261
+      Modeline "1920x1080" 23.53 1920 1952 2040 2072 1080 1106 1108 1135
+      Modeline "1728x1520" 205.42 1728 1760 2536 2568 1520 1552 1564 1597
+      Modeline "1680x1050" 20.08 1680 1712 1784 1816 1050 1075 1077 1103
+      Modeline "1600x1200" 22.04 1600 1632 1712 1744 1200 1229 1231 1261
+      Modeline "1600x900" 33.92 1600 1632 1760 1792 900 921 924 946
+      Modeline "1440x900" 30.66 1440 1472 1584 1616 900 921 924 946
+      Modeline "1400x900" 103.50 1400 1480 1624 1848 900 903 913 934
+      ModeLine "1366x768" 72.00 1366 1414 1446 1494  768 771 777 803
+      Modeline "1360x768" 24.49 1360 1392 1480 1512 768 786 789 807
+      Modeline "1280x1024" 31.50 1280 1312 1424 1456 1024 1048 1052 1076
+      Modeline "1280x800" 24.15 1280 1312 1400 1432 800 819 822 841
+      Modeline "1280x768" 23.11 1280 1312 1392 1424 768 786 789 807
+      Modeline "1280x720" 59.42 1280 1312 1536 1568 720 735 741 757
+      Modeline "1024x768" 18.71 1024 1056 1120 1152 768 786 789 807
+      Modeline "1024x640" 41.98 1024 1056 1208 1240 640 653 659 673
+      Modeline "1024x576" 46.50 1024 1064 1160 1296  576 579 584 599
+      Modeline "768x1024" 19.50 768 800 872 904 1024 1048 1052 1076
+      Modeline "960x540" 40.75 960 992 1088 1216 540 543 548 562
+      Modeline "864x486"  32.50 864 888 968 1072 486 489 494 506
+      Modeline "720x405" 22.50 720 744 808 896  405 408 413 422
+      Modeline "640x360" 14.75 640 664 720 800 360 363 368 374
+      #common resolutions for android devices (both orientations):
+      Modeline "800x1280" 25.89 800 832 928 960 1280 1310 1315 1345
+      Modeline "1280x800" 24.15 1280 1312 1400 1432 800 819 822 841
+      Modeline "720x1280" 30.22 720 752 864 896 1280 1309 1315 1345
+      Modeline "1280x720" 27.41 1280 1312 1416 1448 720 737 740 757
+      Modeline "768x1024" 24.93 768 800 888 920 1024 1047 1052 1076
+      Modeline "1024x768" 23.77 1024 1056 1144 1176 768 785 789 807
+      Modeline "600x1024" 19.90 600 632 704 736 1024 1047 1052 1076
+      Modeline "1024x600" 18.26 1024 1056 1120 1152 600 614 617 631
+      Modeline "536x960" 16.74 536 568 624 656 960 982 986 1009
+      Modeline "960x536" 15.23 960 992 1048 1080 536 548 551 563
+      Modeline "600x800" 15.17 600 632 688 720 800 818 822 841
+      Modeline "800x600" 14.50 800 832 880 912 600 614 617 631
+      Modeline "480x854" 13.34 480 512 560 592 854 873 877 897
+      Modeline "848x480" 12.09 848 880 920 952 480 491 493 505
+      Modeline "480x800" 12.43 480 512 552 584 800 818 822 841
+      Modeline "800x480" 11.46 800 832 872 904 480 491 493 505
+      #resolutions for android devices (both orientations)
+      #minus the status bar
+      #38px status bar (and width rounded up)
+      Modeline "800x1242" 25.03 800 832 920 952 1242 1271 1275 1305
+      Modeline "1280x762" 22.93 1280 1312 1392 1424 762 780 783 801
+      Modeline "720x1242" 29.20 720 752 856 888 1242 1271 1276 1305
+      Modeline "1280x682" 25.85 1280 1312 1408 1440 682 698 701 717
+      Modeline "768x986" 23.90 768 800 888 920 986 1009 1013 1036
+      Modeline "1024x730" 22.50 1024 1056 1136 1168 730 747 750 767
+      Modeline "600x986" 19.07 600 632 704 736 986 1009 1013 1036
+      Modeline "1024x562" 17.03 1024 1056 1120 1152 562 575 578 591
+      Modeline "536x922" 16.01 536 568 624 656 922 943 947 969
+      Modeline "960x498" 14.09 960 992 1040 1072 498 509 511 523
+      Modeline "600x762" 14.39 600 632 680 712 762 779 783 801
+      Modeline "800x562" 13.52 800 832 880 912 562 575 578 591
+      Modeline "480x810" 12.59 480 512 552 584 810 828 832 851
+      Modeline "848x442" 11.09 848 880 920 952 442 452 454 465
+      Modeline "480x762" 11.79 480 512 552 584 762 779 783 801
+    '';
+
+    services.xserver.resolutions = [
+      {x="8192"; y="4096";}
+      {x="5120"; y="3200";}
+      {x="3840"; y="2880";}
+      {x="3840"; y="2560";}
+      {x="3840"; y="2048";}
+      {x="3840"; y="2160";}
+      {x="2048"; y="2048";}
+      {x="2560"; y="1600";}
+      {x="1920"; y="1440";}
+      {x="1920"; y="1200";}
+      {x="1920"; y="1080";}
+      {x="1600"; y="1200";}
+      {x="1680"; y="1050";}
+      {x="1600"; y="900";}
+      {x="1400"; y="1050";}
+      {x="1440"; y="900";}
+      {x="1280"; y="1024";}
+      {x="1366"; y="768";}
+      {x="1280"; y="800";}
+      {x="1024"; y="768";}
+      {x="1024"; y="600";}
+      {x="800"; y="600";}
+      {x="320"; y="200";}
+    ];
+
+    services.xserver.serverFlagsSection = ''
+      Option "DontVTSwitch" "true"
+      Option "PciForceNone" "true"
+      Option "AutoEnableDevices" "false"
+      Option "AutoAddDevices" "false"
+    '';
+
+    services.xserver.deviceSection = ''
+      VideoRam 192000
+    '';
+
+    services.xserver.displayManager.job.execCmd = ''
+      ${optionalString (cfg.pulseaudio)
+        "export PULSE_COOKIE=/run/pulse/.config/pulse/cookie"}
+      exec ${pkgs.xpra}/bin/xpra ${if cfg.desktop == null then "start" else "start-desktop --start=${cfg.desktop}"} \
+        --daemon=off \
+        --log-dir=/var/log \
+        --log-file=xpra.log \
+        --opengl=on \
+        --clipboard=on \
+        --notifications=on \
+        --speaker=yes \
+        --mdns=no \
+        --pulseaudio=no \
+        ${optionalString (cfg.pulseaudio) "--sound-source=pulse"} \
+        --socket-dirs=/run/xpra \
+        --xvfb="xpra_Xdummy ${concatStringsSep " " dmcfg.xserverArgs}" \
+        ${optionalString (cfg.bindTcp != null) "--bind-tcp=${cfg.bindTcp}"} \
+        --auth=${cfg.auth} \
+        ${concatStringsSep " " cfg.extraOptions}
+    '';
+
+    services.xserver.terminateOnReset = false;
+
+    environment.systemPackages = [pkgs.xpra];
+
+    virtualisation.virtualbox.guest.x11 = false;
+    hardware.pulseaudio.enable = mkDefault cfg.pulseaudio;
+    hardware.pulseaudio.systemWide = mkDefault cfg.pulseaudio;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/extra-layouts.nix b/nixpkgs/nixos/modules/services/x11/extra-layouts.nix
new file mode 100644
index 000000000000..ab7e39739eeb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/extra-layouts.nix
@@ -0,0 +1,143 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  layouts = config.services.xserver.xkb.extraLayouts;
+
+  layoutOpts = {
+    options = {
+      description = mkOption {
+        type = types.str;
+        description = lib.mdDoc "A short description of the layout.";
+      };
+
+      languages = mkOption {
+        type = types.listOf types.str;
+        description =
+          lib.mdDoc ''
+            A list of languages provided by the layout.
+            (Use ISO 639-2 codes, for example: "eng" for english)
+          '';
+      };
+
+      compatFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the xkb compat file.
+          This file sets the compatibility state, used to preserve
+          compatibility with xkb-unaware programs.
+          It must contain a `xkb_compat "name" { ... }` block.
+        '';
+      };
+
+      geometryFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the xkb geometry file.
+          This (completely optional) file describes the physical layout of
+          keyboard, which maybe be used by programs to depict it.
+          It must contain a `xkb_geometry "name" { ... }` block.
+        '';
+      };
+
+      keycodesFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the xkb keycodes file.
+          This file specifies the range and the interpretation of the raw
+          keycodes sent by the keyboard.
+          It must contain a `xkb_keycodes "name" { ... }` block.
+        '';
+      };
+
+      symbolsFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the xkb symbols file.
+          This is the most important file: it defines which symbol or action
+          maps to each key and must contain a
+          `xkb_symbols "name" { ... }` block.
+        '';
+      };
+
+      typesFile = mkOption {
+        type = types.nullOr types.path;
+        default = null;
+        description = lib.mdDoc ''
+          The path to the xkb types file.
+          This file specifies the key types that can be associated with
+          the various keyboard keys.
+          It must contain a `xkb_types "name" { ... }` block.
+        '';
+      };
+
+    };
+  };
+
+  xkb_patched = pkgs.xorg.xkeyboardconfig_custom {
+    layouts = config.services.xserver.xkb.extraLayouts;
+  };
+
+in
+
+{
+
+  imports = [
+    (lib.mkRenamedOptionModuleWith {
+      sinceRelease = 2311;
+      from = [ "services" "xserver" "extraLayouts" ];
+      to = [ "services" "xserver" "xkb" "extraLayouts" ];
+    })
+  ];
+
+  ###### interface
+
+  options.services.xserver.xkb = {
+    extraLayouts = mkOption {
+      type = types.attrsOf (types.submodule layoutOpts);
+      default = { };
+      example = literalExpression
+        ''
+          {
+            mine = {
+              description = "My custom xkb layout.";
+              languages = [ "eng" ];
+              symbolsFile = /path/to/my/layout;
+            };
+          }
+        '';
+      description = lib.mdDoc ''
+        Extra custom layouts that will be included in the xkb configuration.
+        Information on how to create a new layout can be found here:
+        <https://www.x.org/releases/current/doc/xorg-docs/input/XKB-Enhancing.html#Defining_New_Layouts>.
+        For more examples see
+        <https://wiki.archlinux.org/index.php/X_KeyBoard_extension#Basic_examples>
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config = mkIf (layouts != { }) {
+
+    environment.sessionVariables = {
+      # runtime override supported by multiple libraries e. g. libxkbcommon
+      # https://xkbcommon.org/doc/current/group__include-path.html
+      XKB_CONFIG_ROOT = config.services.xserver.xkb.dir;
+    };
+
+    services.xserver = {
+      xkb.dir = "${xkb_patched}/etc/X11/xkb";
+      exportConfiguration = config.services.xserver.displayManager.startx.enable
+        || config.services.xserver.displayManager.sx.enable;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/fractalart.nix b/nixpkgs/nixos/modules/services/x11/fractalart.nix
new file mode 100644
index 000000000000..f7fc1ec96228
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/fractalart.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.fractalart;
+in {
+  options.services.fractalart = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc "Enable FractalArt for generating colorful wallpapers on login";
+    };
+
+    width = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      example = 1920;
+      description = lib.mdDoc "Screen width";
+    };
+
+    height = mkOption {
+      type = types.nullOr types.int;
+      default = null;
+      example = 1080;
+      description = lib.mdDoc "Screen height";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.haskellPackages.FractalArt ];
+    services.xserver.displayManager.sessionCommands =
+      "${pkgs.haskellPackages.FractalArt}/bin/FractalArt --no-bg -f .background-image"
+        + optionalString (cfg.width  != null) " -w ${toString cfg.width}"
+        + optionalString (cfg.height != null) " -h ${toString cfg.height}";
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/gdk-pixbuf.nix b/nixpkgs/nixos/modules/services/x11/gdk-pixbuf.nix
new file mode 100644
index 000000000000..9c088e4cc423
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/gdk-pixbuf.nix
@@ -0,0 +1,28 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.xserver.gdk-pixbuf;
+
+  loadersCache = pkgs.gnome._gdkPixbufCacheBuilder_DO_NOT_USE {
+    extraLoaders = lib.unique (cfg.modulePackages);
+  };
+in
+
+{
+  options = {
+    services.xserver.gdk-pixbuf.modulePackages = lib.mkOption {
+      type = lib.types.listOf lib.types.package;
+      default = [ ];
+      description = lib.mdDoc "Packages providing GDK-Pixbuf modules, for cache generation.";
+    };
+  };
+
+  # If there is any package configured in modulePackages, we generate the
+  # loaders.cache based on that and set the environment variable
+  # GDK_PIXBUF_MODULE_FILE to point to it.
+  config = lib.mkIf (cfg.modulePackages != []) {
+    environment.sessionVariables = {
+      GDK_PIXBUF_MODULE_FILE = "${loadersCache}";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/cmt.nix b/nixpkgs/nixos/modules/services/x11/hardware/cmt.nix
new file mode 100644
index 000000000000..a44221141c3c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/hardware/cmt.nix
@@ -0,0 +1,59 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+cfg = config.services.xserver.cmt;
+etcPath = "X11/xorg.conf.d";
+
+in {
+
+  options = {
+
+    services.xserver.cmt = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enable chrome multitouch input (cmt). Touchpad drivers that are configured for chromebooks.";
+      };
+      models = mkOption {
+        type = types.enum [ "atlas" "banjo" "candy" "caroline" "cave" "celes" "clapper" "cyan" "daisy" "elan" "elm" "enguarde" "eve" "expresso" "falco" "gandof" "glimmer" "gnawty" "heli" "kevin" "kip" "leon" "lulu" "orco" "pbody" "peppy" "pi" "pit" "puppy" "quawks" "rambi" "samus" "snappy" "spring" "squawks" "swanky" "winky" "wolf" "auron_paine" "auron_yuna" "daisy_skate" "nyan_big" "nyan_blaze" "veyron_jaq" "veyron_jerry" "veyron_mighty" "veyron_minnie" "veyron_speedy" ];
+        example = "banjo";
+        description = lib.mdDoc ''
+          Which models to enable cmt for. Enter the Code Name for your Chromebook.
+          Code Name can be found at <https://www.chromium.org/chromium-os/developer-information-for-chrome-os-devices>.
+        '';
+      };
+    }; #closes services
+  }; #closes options
+
+  config = mkIf cfg.enable {
+
+    services.xserver.modules = [ pkgs.xf86_input_cmt ];
+
+    environment.etc = {
+      "${etcPath}/40-touchpad-cmt.conf" = {
+        source = "${pkgs.chromium-xorg-conf}/40-touchpad-cmt.conf";
+      };
+      "${etcPath}/50-touchpad-cmt-${cfg.models}.conf" = {
+        source = "${pkgs.chromium-xorg-conf}/50-touchpad-cmt-${cfg.models}.conf";
+      };
+      "${etcPath}/60-touchpad-cmt-${cfg.models}.conf" = {
+        source = "${pkgs.chromium-xorg-conf}/60-touchpad-cmt-${cfg.models}.conf";
+      };
+    };
+
+    assertions = [
+      {
+        assertion = !config.services.xserver.libinput.enable;
+        message = ''
+          cmt and libinput are incompatible, meaning you cannot enable them both.
+          To use cmt you need to disable libinput with `services.xserver.libinput.enable = false`
+          If you haven't enabled it in configuration.nix, it's enabled by default on a
+          different xserver module.
+        '';
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/digimend.nix b/nixpkgs/nixos/modules/services/x11/hardware/digimend.nix
new file mode 100644
index 000000000000..f82aac41a320
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/hardware/digimend.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.digimend;
+
+  pkg = config.boot.kernelPackages.digimend;
+
+in
+
+{
+
+  options = {
+
+    services.xserver.digimend = {
+
+      enable = mkEnableOption (lib.mdDoc "the digimend drivers for Huion/XP-Pen/etc. tablets");
+
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    # digimend drivers use xsetwacom and wacom X11 drivers
+    services.xserver.wacom.enable = true;
+
+    boot.extraModulePackages = [ pkg ];
+
+    environment.etc."X11/xorg.conf.d/50-digimend.conf".source =
+      "${pkg}/usr/share/X11/xorg.conf.d/50-digimend.conf";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/libinput.nix b/nixpkgs/nixos/modules/services/x11/hardware/libinput.nix
new file mode 100644
index 000000000000..d2a5b5895e0a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/hardware/libinput.nix
@@ -0,0 +1,304 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xserver.libinput;
+
+    xorgBool = v: if v then "on" else "off";
+
+    mkConfigForDevice = deviceType: {
+      dev = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/dev/input/event0";
+        description =
+          lib.mdDoc ''
+            Path for ${deviceType} device.  Set to `null` to apply to any
+            auto-detected ${deviceType}.
+          '';
+      };
+
+      accelProfile = mkOption {
+        type = types.enum [ "flat" "adaptive" ];
+        default = "adaptive";
+        example = "flat";
+        description =
+          lib.mdDoc ''
+            Sets the pointer acceleration profile to the given profile.
+            Permitted values are `adaptive`, `flat`.
+            Not all devices support this option or all profiles.
+            If a profile is unsupported, the default profile for this is used.
+            `flat`: Pointer motion is accelerated by a constant
+            (device-specific) factor, depending on the current speed.
+            `adaptive`: Pointer acceleration depends on the input speed.
+            This is the default profile for most devices.
+          '';
+      };
+
+      accelSpeed = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "-0.5";
+        description = lib.mdDoc "Cursor acceleration (how fast speed increases from minSpeed to maxSpeed).";
+      };
+
+      buttonMapping = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "1 6 3 4 5 0 7";
+        description =
+          lib.mdDoc ''
+            Sets the logical button mapping for this device, see XSetPointerMapping(3). The string  must
+            be  a  space-separated  list  of  button mappings in the order of the logical buttons on the
+            device, starting with button 1.  The default mapping is "1 2 3 ... 32". A mapping of 0 deacâ€
+            tivates the button. Multiple buttons can have the same mapping.  Invalid mapping strings are
+            discarded and the default mapping is used for all buttons.  Buttons  not  specified  in  the
+            user's mapping use the default mapping. See section BUTTON MAPPING for more details.
+          '';
+      };
+
+      calibrationMatrix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.5 0 0 0 0.8 0.1 0 0 1";
+        description =
+          lib.mdDoc ''
+            A string of 9 space-separated floating point numbers. Sets the calibration matrix to the
+            3x3 matrix where the first row is (abc), the second row is (def) and the third row is (ghi).
+          '';
+      };
+
+      clickMethod = mkOption {
+        type = types.nullOr (types.enum [ "none" "buttonareas" "clickfinger" ]);
+        default = null;
+        example = "buttonareas";
+        description =
+          lib.mdDoc ''
+            Enables a click method. Permitted values are `none`,
+            `buttonareas`, `clickfinger`.
+            Not all devices support all methods, if an option is unsupported,
+            the default click method for this device is used.
+          '';
+      };
+
+      leftHanded = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enables left-handed button orientation, i.e. swapping left and right buttons.";
+      };
+
+      middleEmulation = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Enables middle button emulation. When enabled, pressing the left and right buttons
+            simultaneously produces a middle mouse button click.
+          '';
+      };
+
+      naturalScrolling = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Enables or disables natural scrolling behavior.";
+      };
+
+      scrollButton = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 1;
+        description =
+          lib.mdDoc ''
+            Designates a button as scroll button. If the ScrollMethod is button and the button is logically
+            held down, x/y axis movement is converted into scroll events.
+          '';
+      };
+
+      scrollMethod = mkOption {
+        type = types.enum [ "twofinger" "edge" "button" "none" ];
+        default = "twofinger";
+        example = "edge";
+        description =
+          lib.mdDoc ''
+            Specify the scrolling method: `twofinger`, `edge`,
+            `button`, or `none`
+          '';
+      };
+
+      horizontalScrolling = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Disables horizontal scrolling. When disabled, this driver will discard any horizontal scroll
+            events from libinput. Note that this does not disable horizontal scrolling, it merely
+            discards the horizontal axis from any scroll events.
+          '';
+      };
+
+      sendEventsMode = mkOption {
+        type = types.enum [ "disabled" "enabled" "disabled-on-external-mouse" ];
+        default = "enabled";
+        example = "disabled";
+        description =
+          lib.mdDoc ''
+            Sets the send events mode to `disabled`, `enabled`,
+            or `disabled-on-external-mouse`
+          '';
+      };
+
+      tapping = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Enables or disables tap-to-click behavior.
+          '';
+      };
+
+      tappingButtonMap = mkOption {
+        type = types.nullOr (types.enum [ "lrm" "lmr" ]);
+        default = null;
+        description = lib.mdDoc ''
+          Set the button mapping for 1/2/3-finger taps to left/right/middle or left/middle/right, respectively.
+        '';
+      };
+
+      tappingDragLock = mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Enables or disables drag lock during tapping behavior. When enabled, a finger up during tap-
+            and-drag will not immediately release the button. If the finger is set down again within the
+            timeout, the dragging process continues.
+          '';
+      };
+
+      transformationMatrix = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "0.5 0 0 0 0.8 0.1 0 0 1";
+        description = lib.mdDoc ''
+          A string of 9 space-separated floating point numbers. Sets the transformation matrix to
+          the 3x3 matrix where the first row is (abc), the second row is (def) and the third row is (ghi).
+        '';
+      };
+
+      disableWhileTyping = mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            Disable input method while typing.
+          '';
+      };
+
+      additionalOptions = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+        ''
+          Option "DragLockButtons" "L1 B1 L2 B2"
+        '';
+        description = lib.mdDoc ''
+          Additional options for libinput ${deviceType} driver. See
+          {manpage}`libinput(4)`
+          for available options.";
+        '';
+      };
+    };
+
+    mkX11ConfigForDevice = deviceType: matchIs: ''
+      Identifier "libinput ${deviceType} configuration"
+      MatchDriver "libinput"
+      MatchIs${matchIs} "${xorgBool true}"
+      ${optionalString (cfg.${deviceType}.dev != null) ''MatchDevicePath "${cfg.${deviceType}.dev}"''}
+      Option "AccelProfile" "${cfg.${deviceType}.accelProfile}"
+      ${optionalString (cfg.${deviceType}.accelSpeed != null) ''Option "AccelSpeed" "${cfg.${deviceType}.accelSpeed}"''}
+      ${optionalString (cfg.${deviceType}.buttonMapping != null) ''Option "ButtonMapping" "${cfg.${deviceType}.buttonMapping}"''}
+      ${optionalString (cfg.${deviceType}.calibrationMatrix != null) ''Option "CalibrationMatrix" "${cfg.${deviceType}.calibrationMatrix}"''}
+      ${optionalString (cfg.${deviceType}.transformationMatrix != null) ''Option "TransformationMatrix" "${cfg.${deviceType}.transformationMatrix}"''}
+      ${optionalString (cfg.${deviceType}.clickMethod != null) ''Option "ClickMethod" "${cfg.${deviceType}.clickMethod}"''}
+      Option "LeftHanded" "${xorgBool cfg.${deviceType}.leftHanded}"
+      Option "MiddleEmulation" "${xorgBool cfg.${deviceType}.middleEmulation}"
+      Option "NaturalScrolling" "${xorgBool cfg.${deviceType}.naturalScrolling}"
+      ${optionalString (cfg.${deviceType}.scrollButton != null) ''Option "ScrollButton" "${toString cfg.${deviceType}.scrollButton}"''}
+      Option "ScrollMethod" "${cfg.${deviceType}.scrollMethod}"
+      Option "HorizontalScrolling" "${xorgBool cfg.${deviceType}.horizontalScrolling}"
+      Option "SendEventsMode" "${cfg.${deviceType}.sendEventsMode}"
+      Option "Tapping" "${xorgBool cfg.${deviceType}.tapping}"
+      ${optionalString (cfg.${deviceType}.tappingButtonMap != null) ''Option "TappingButtonMap" "${cfg.${deviceType}.tappingButtonMap}"''}
+      Option "TappingDragLock" "${xorgBool cfg.${deviceType}.tappingDragLock}"
+      Option "DisableWhileTyping" "${xorgBool cfg.${deviceType}.disableWhileTyping}"
+      ${cfg.${deviceType}.additionalOptions}
+    '';
+in {
+
+  imports =
+    (map (option: mkRenamedOptionModule ([ "services" "xserver" "libinput" option ]) [ "services" "xserver" "libinput" "touchpad" option ]) [
+      "accelProfile"
+      "accelSpeed"
+      "buttonMapping"
+      "calibrationMatrix"
+      "clickMethod"
+      "leftHanded"
+      "middleEmulation"
+      "naturalScrolling"
+      "scrollButton"
+      "scrollMethod"
+      "horizontalScrolling"
+      "sendEventsMode"
+      "tapping"
+      "tappingButtonMap"
+      "tappingDragLock"
+      "transformationMatrix"
+      "disableWhileTyping"
+      "additionalOptions"
+    ]);
+
+  options = {
+
+    services.xserver.libinput = {
+      enable = mkEnableOption (lib.mdDoc "libinput") // {
+        default = config.services.xserver.enable;
+        defaultText = lib.literalExpression "config.services.xserver.enable";
+      };
+      mouse = mkConfigForDevice "mouse";
+      touchpad = mkConfigForDevice "touchpad";
+    };
+  };
+
+
+  config = mkIf cfg.enable {
+
+    services.xserver.modules = [ pkgs.xorg.xf86inputlibinput ];
+
+    environment.systemPackages = [ pkgs.xorg.xf86inputlibinput ];
+
+    environment.etc =
+      let cfgPath = "X11/xorg.conf.d/40-libinput.conf";
+      in {
+        ${cfgPath} = {
+          source = pkgs.xorg.xf86inputlibinput.out + "/share/" + cfgPath;
+        };
+      };
+
+    services.udev.packages = [ pkgs.libinput.out ];
+
+    services.xserver.inputClassSections = [
+      (mkX11ConfigForDevice "mouse" "Pointer")
+      (mkX11ConfigForDevice "touchpad" "Touchpad")
+    ];
+
+    assertions = [
+      # already present in synaptics.nix
+      /* {
+        assertion = !config.services.xserver.synaptics.enable;
+        message = "Synaptics and libinput are incompatible, you cannot enable both (in services.xserver).";
+      } */
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/synaptics.nix b/nixpkgs/nixos/modules/services/x11/hardware/synaptics.nix
new file mode 100644
index 000000000000..7b45222ac64c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/hardware/synaptics.nix
@@ -0,0 +1,218 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xserver.synaptics;
+    opt = options.services.xserver.synaptics;
+    tapConfig = if cfg.tapButtons then enabledTapConfig else disabledTapConfig;
+    enabledTapConfig = ''
+      Option "MaxTapTime" "180"
+      Option "MaxTapMove" "220"
+      Option "TapButton1" "${builtins.elemAt cfg.fingersMap 0}"
+      Option "TapButton2" "${builtins.elemAt cfg.fingersMap 1}"
+      Option "TapButton3" "${builtins.elemAt cfg.fingersMap 2}"
+    '';
+    disabledTapConfig = ''
+      Option "MaxTapTime" "0"
+      Option "MaxTapMove" "0"
+      Option "TapButton1" "0"
+      Option "TapButton2" "0"
+      Option "TapButton3" "0"
+    '';
+  pkg = pkgs.xorg.xf86inputsynaptics;
+  etcFile = "X11/xorg.conf.d/70-synaptics.conf";
+in {
+
+  options = {
+
+    services.xserver.synaptics = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable touchpad support. Deprecated: Consider services.xserver.libinput.enable.";
+      };
+
+      dev = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "/dev/input/event0";
+        description =
+          lib.mdDoc ''
+            Path for touchpad device.  Set to null to apply to any
+            auto-detected touchpad.
+          '';
+      };
+
+      accelFactor = mkOption {
+        type = types.nullOr types.str;
+        default = "0.001";
+        description = lib.mdDoc "Cursor acceleration (how fast speed increases from minSpeed to maxSpeed).";
+      };
+
+      minSpeed = mkOption {
+        type = types.nullOr types.str;
+        default = "0.6";
+        description = lib.mdDoc "Cursor speed factor for precision finger motion.";
+      };
+
+      maxSpeed = mkOption {
+        type = types.nullOr types.str;
+        default = "1.0";
+        description = lib.mdDoc "Cursor speed factor for highest-speed finger motion.";
+      };
+
+      scrollDelta = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 75;
+        description = lib.mdDoc "Move distance of the finger for a scroll event.";
+      };
+
+      twoFingerScroll = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable two-finger drag-scrolling. Overridden by horizTwoFingerScroll and vertTwoFingerScroll.";
+      };
+
+      horizTwoFingerScroll = mkOption {
+        type = types.bool;
+        default = cfg.twoFingerScroll;
+        defaultText = literalExpression "config.${opt.twoFingerScroll}";
+        description = lib.mdDoc "Whether to enable horizontal two-finger drag-scrolling.";
+      };
+
+      vertTwoFingerScroll = mkOption {
+        type = types.bool;
+        default = cfg.twoFingerScroll;
+        defaultText = literalExpression "config.${opt.twoFingerScroll}";
+        description = lib.mdDoc "Whether to enable vertical two-finger drag-scrolling.";
+      };
+
+      horizEdgeScroll = mkOption {
+        type = types.bool;
+        default = ! cfg.horizTwoFingerScroll;
+        defaultText = literalExpression "! config.${opt.horizTwoFingerScroll}";
+        description = lib.mdDoc "Whether to enable horizontal edge drag-scrolling.";
+      };
+
+      vertEdgeScroll = mkOption {
+        type = types.bool;
+        default = ! cfg.vertTwoFingerScroll;
+        defaultText = literalExpression "! config.${opt.vertTwoFingerScroll}";
+        description = lib.mdDoc "Whether to enable vertical edge drag-scrolling.";
+      };
+
+      tapButtons = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable tap buttons.";
+      };
+
+      buttonsMap = mkOption {
+        type = types.listOf types.int;
+        default = [1 2 3];
+        example = [1 3 2];
+        description = lib.mdDoc "Remap touchpad buttons.";
+        apply = map toString;
+      };
+
+      fingersMap = mkOption {
+        type = types.listOf types.int;
+        default = [1 2 3];
+        example = [1 3 2];
+        description = lib.mdDoc "Remap several-fingers taps.";
+        apply = map toString;
+      };
+
+      palmDetect = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable palm detection (hardware support required)";
+      };
+
+      palmMinWidth = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 5;
+        description = lib.mdDoc "Minimum finger width at which touch is considered a palm";
+      };
+
+      palmMinZ = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 20;
+        description = lib.mdDoc "Minimum finger pressure at which touch is considered a palm";
+      };
+
+      horizontalScroll = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc "Whether to enable horizontal scrolling (on touchpad)";
+      };
+
+      additionalOptions = mkOption {
+        type = types.str;
+        default = "";
+        example = ''
+          Option "RTCornerButton" "2"
+          Option "RBCornerButton" "3"
+        '';
+        description = lib.mdDoc ''
+          Additional options for synaptics touchpad driver.
+        '';
+      };
+
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    services.xserver.modules = [ pkg.out ];
+
+    environment.etc.${etcFile}.source =
+      "${pkg.out}/share/X11/xorg.conf.d/70-synaptics.conf";
+
+    environment.systemPackages = [ pkg ];
+
+    services.xserver.config =
+      ''
+        # Automatically enable the synaptics driver for all touchpads.
+        Section "InputClass"
+          Identifier "synaptics touchpad catchall"
+          MatchIsTouchpad "on"
+          ${optionalString (cfg.dev != null) ''MatchDevicePath "${cfg.dev}"''}
+          Driver "synaptics"
+          ${optionalString (cfg.minSpeed != null) ''Option "MinSpeed" "${cfg.minSpeed}"''}
+          ${optionalString (cfg.maxSpeed != null) ''Option "MaxSpeed" "${cfg.maxSpeed}"''}
+          ${optionalString (cfg.accelFactor != null) ''Option "AccelFactor" "${cfg.accelFactor}"''}
+          ${optionalString cfg.tapButtons tapConfig}
+          Option "ClickFinger1" "${builtins.elemAt cfg.buttonsMap 0}"
+          Option "ClickFinger2" "${builtins.elemAt cfg.buttonsMap 1}"
+          Option "ClickFinger3" "${builtins.elemAt cfg.buttonsMap 2}"
+          Option "VertTwoFingerScroll" "${if cfg.vertTwoFingerScroll then "1" else "0"}"
+          Option "HorizTwoFingerScroll" "${if cfg.horizTwoFingerScroll then "1" else "0"}"
+          Option "VertEdgeScroll" "${if cfg.vertEdgeScroll then "1" else "0"}"
+          Option "HorizEdgeScroll" "${if cfg.horizEdgeScroll then "1" else "0"}"
+          ${optionalString cfg.palmDetect ''Option "PalmDetect" "1"''}
+          ${optionalString (cfg.palmMinWidth != null) ''Option "PalmMinWidth" "${toString cfg.palmMinWidth}"''}
+          ${optionalString (cfg.palmMinZ != null) ''Option "PalmMinZ" "${toString cfg.palmMinZ}"''}
+          ${optionalString (cfg.scrollDelta != null) ''Option "VertScrollDelta" "${toString cfg.scrollDelta}"''}
+          ${if !cfg.horizontalScroll then ''Option "HorizScrollDelta" "0"''
+            else (optionalString (cfg.scrollDelta != null) ''Option "HorizScrollDelta" "${toString cfg.scrollDelta}"'')}
+          ${cfg.additionalOptions}
+        EndSection
+      '';
+
+    assertions = [
+      {
+        assertion = !config.services.xserver.libinput.enable;
+        message = "Synaptics and libinput are incompatible, you cannot enable both (in services.xserver).";
+      }
+    ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/hardware/wacom.nix b/nixpkgs/nixos/modules/services/x11/hardware/wacom.nix
new file mode 100644
index 000000000000..4994e5c1a2cc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/hardware/wacom.nix
@@ -0,0 +1,48 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.wacom;
+
+in
+
+{
+
+  options = {
+
+    services.xserver.wacom = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the Wacom touchscreen/digitizer/tablet.
+          If you ever have any issues such as, try switching to terminal (ctrl-alt-F1) and back
+          which will make Xorg reconfigure the device ?
+
+          If you're not satisfied by the default behaviour you can override
+          {option}`environment.etc."X11/xorg.conf.d/70-wacom.conf"` in
+          configuration.nix easily.
+        '';
+      };
+
+    };
+
+  };
+
+
+  config = mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.xf86_input_wacom ]; # provides xsetwacom
+
+    services.xserver.modules = [ pkgs.xf86_input_wacom ];
+
+    services.udev.packages = [ pkgs.xf86_input_wacom ];
+
+    environment.etc."X11/xorg.conf.d/70-wacom.conf".source = "${pkgs.xf86_input_wacom}/share/X11/xorg.conf.d/70-wacom.conf";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/imwheel.nix b/nixpkgs/nixos/modules/services/x11/imwheel.nix
new file mode 100644
index 000000000000..bd2bcb7bcd06
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/imwheel.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.xserver.imwheel;
+in
+  {
+    options = {
+      services.xserver.imwheel = {
+        enable = mkEnableOption (lib.mdDoc "IMWheel service");
+
+        extraOptions = mkOption {
+          type = types.listOf types.str;
+          default = [ "--buttons=45" ];
+          example = [ "--debug" ];
+          description = lib.mdDoc ''
+            Additional command-line arguments to pass to
+            {command}`imwheel`.
+          '';
+        };
+
+        rules = mkOption {
+          type = types.attrsOf types.str;
+          default = {};
+          example = literalExpression ''
+            {
+              ".*" = '''
+                None,      Up,   Button4, 8
+                None,      Down, Button5, 8
+                Shift_L,   Up,   Shift_L|Button4, 4
+                Shift_L,   Down, Shift_L|Button5, 4
+                Control_L, Up,   Control_L|Button4
+                Control_L, Down, Control_L|Button5
+              ''';
+            }
+          '';
+          description = lib.mdDoc ''
+            Window class translation rules.
+            /etc/X11/imwheelrc is generated based on this config
+            which means this config is global for all users.
+            See [official man pages](https://imwheel.sourceforge.net/imwheel.1.html)
+            for more information.
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = [ pkgs.imwheel ];
+
+      environment.etc."X11/imwheel/imwheelrc".source =
+        pkgs.writeText "imwheelrc" (concatStringsSep "\n\n"
+          (mapAttrsToList
+            (rule: conf: "\"${rule}\"\n${conf}") cfg.rules
+          ));
+
+      systemd.user.services.imwheel = {
+        description = "imwheel service";
+        wantedBy = [ "graphical-session.target" ];
+        partOf = [ "graphical-session.target" ];
+        serviceConfig = {
+          ExecStart = "${pkgs.imwheel}/bin/imwheel " + escapeShellArgs ([
+            "--detach"
+            "--kill"
+          ] ++ cfg.extraOptions);
+          ExecStop = "${pkgs.procps}/bin/pkill imwheel";
+          RestartSec = 3;
+          Restart = "always";
+        };
+      };
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/x11/picom.nix b/nixpkgs/nixos/modules/services/x11/picom.nix
new file mode 100644
index 000000000000..3df0ea9e60bb
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/picom.nix
@@ -0,0 +1,317 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.picom;
+  opt = options.services.picom;
+
+  pairOf = x: with types;
+    addCheck (listOf x) (y: length y == 2)
+    // { description = "pair of ${x.description}"; };
+
+  mkDefaultAttrs = mapAttrs (n: v: mkDefault v);
+
+  # Basically a tinkered lib.generators.mkKeyValueDefault
+  # It either serializes a top-level definition "key: { values };"
+  # or an expression "key = { values };"
+  mkAttrsString = top:
+    mapAttrsToList (k: v:
+      let sep = if (top && isAttrs v) then ":" else "=";
+      in "${escape [ sep ] k}${sep}${mkValueString v};");
+
+  # This serializes a Nix expression to the libconfig format.
+  mkValueString = v:
+         if types.bool.check  v then boolToString v
+    else if types.int.check   v then toString v
+    else if types.float.check v then toString v
+    else if types.str.check   v then "\"${escape [ "\"" ] v}\""
+    else if builtins.isList   v then "[ ${concatMapStringsSep " , " mkValueString v} ]"
+    else if types.attrs.check v then "{ ${concatStringsSep " " (mkAttrsString false v) } }"
+    else throw ''
+                 invalid expression used in option services.picom.settings:
+                 ${v}
+               '';
+
+  toConf = attrs: concatStringsSep "\n" (mkAttrsString true cfg.settings);
+
+  configFile = pkgs.writeText "picom.conf" (toConf cfg.settings);
+
+in {
+
+  imports = [
+    (mkAliasOptionModuleMD [ "services" "compton" ] [ "services" "picom" ])
+    (mkRemovedOptionModule [ "services" "picom" "refreshRate" ] ''
+      This option corresponds to `refresh-rate`, which has been unused
+      since picom v6 and was subsequently removed by upstream.
+      See https://github.com/yshui/picom/commit/bcbc410
+    '')
+    (mkRemovedOptionModule [ "services" "picom" "experimentalBackends" ] ''
+      This option was removed by upstream since picom v10.
+    '')
+  ];
+
+  options.services.picom = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether or not to enable Picom as the X.org composite manager.
+      '';
+    };
+
+    package = mkPackageOptionMD pkgs "picom" { };
+
+    fade = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Fade windows in and out.
+      '';
+    };
+
+    fadeDelta = mkOption {
+      type = types.ints.positive;
+      default = 10;
+      example = 5;
+      description = lib.mdDoc ''
+        Time between fade animation step (in ms).
+      '';
+    };
+
+    fadeSteps = mkOption {
+      type = pairOf (types.numbers.between 0.01 1);
+      default = [ 0.028 0.03 ];
+      example = [ 0.04 0.04 ];
+      description = lib.mdDoc ''
+        Opacity change between fade steps (in and out).
+      '';
+    };
+
+    fadeExclude = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "window_type *= 'menu'"
+        "name ~= 'Firefox$'"
+        "focused = 1"
+      ];
+      description = lib.mdDoc ''
+        List of conditions of windows that should not be faded.
+        See `picom(1)` man page for more examples.
+      '';
+    };
+
+    shadow = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Draw window shadows.
+      '';
+    };
+
+    shadowOffsets = mkOption {
+      type = pairOf types.int;
+      default = [ (-15) (-15) ];
+      example = [ (-10) (-15) ];
+      description = lib.mdDoc ''
+        Left and right offset for shadows (in pixels).
+      '';
+    };
+
+    shadowOpacity = mkOption {
+      type = types.numbers.between 0 1;
+      default = 0.75;
+      example = 0.8;
+      description = lib.mdDoc ''
+        Window shadows opacity.
+      '';
+    };
+
+    shadowExclude = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "window_type *= 'menu'"
+        "name ~= 'Firefox$'"
+        "focused = 1"
+      ];
+      description = lib.mdDoc ''
+        List of conditions of windows that should have no shadow.
+        See `picom(1)` man page for more examples.
+      '';
+    };
+
+    activeOpacity = mkOption {
+      type = types.numbers.between 0 1;
+      default = 1.0;
+      example = 0.8;
+      description = lib.mdDoc ''
+        Opacity of active windows.
+      '';
+    };
+
+    inactiveOpacity = mkOption {
+      type = types.numbers.between 0.1 1;
+      default = 1.0;
+      example = 0.8;
+      description = lib.mdDoc ''
+        Opacity of inactive windows.
+      '';
+    };
+
+    menuOpacity = mkOption {
+      type = types.numbers.between 0 1;
+      default = 1.0;
+      example = 0.8;
+      description = lib.mdDoc ''
+        Opacity of dropdown and popup menu.
+      '';
+    };
+
+    wintypes = mkOption {
+      type = types.attrs;
+      default = {
+        popup_menu = { opacity = cfg.menuOpacity; };
+        dropdown_menu = { opacity = cfg.menuOpacity; };
+      };
+      defaultText = literalExpression ''
+        {
+          popup_menu = { opacity = config.${opt.menuOpacity}; };
+          dropdown_menu = { opacity = config.${opt.menuOpacity}; };
+        }
+      '';
+      example = {};
+      description = lib.mdDoc ''
+        Rules for specific window types.
+      '';
+    };
+
+    opacityRules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [
+        "95:class_g = 'URxvt' && !_NET_WM_STATE@:32a"
+        "0:_NET_WM_STATE@:32a *= '_NET_WM_STATE_HIDDEN'"
+      ];
+      description = lib.mdDoc ''
+        Rules that control the opacity of windows, in format PERCENT:PATTERN.
+      '';
+    };
+
+    backend = mkOption {
+      type = types.enum [ "egl" "glx" "xrender" "xr_glx_hybrid" ];
+      default = "xrender";
+      description = lib.mdDoc ''
+        Backend to use: `egl`, `glx`, `xrender` or `xr_glx_hybrid`.
+      '';
+    };
+
+    vSync = mkOption {
+      type = with types; either bool
+        (enum [ "none" "drm" "opengl" "opengl-oml" "opengl-swc" "opengl-mswc" ]);
+      default = false;
+      apply = x:
+        let
+          res = x != "none";
+          msg = "The type of services.picom.vSync has changed to bool:"
+                + " interpreting ${x} as ${boolToString res}";
+        in
+          if isBool x then x
+          else warn msg res;
+
+      description = lib.mdDoc ''
+        Enable vertical synchronization. Chooses the best method
+        (drm, opengl, opengl-oml, opengl-swc, opengl-mswc) automatically.
+        The bool value should be used, the others are just for backwards compatibility.
+      '';
+    };
+
+    settings = with types;
+    let
+      scalar = oneOf [ bool int float str ]
+        // { description = "scalar types"; };
+
+      libConfig = oneOf [ scalar (listOf libConfig) (attrsOf libConfig) ]
+        // { description = "libconfig type"; };
+
+      topLevel = attrsOf libConfig
+        // { description = ''
+               libconfig configuration. The format consists of an attributes
+               set (called a group) of settings. Each setting can be a scalar type
+               (boolean, integer, floating point number or string), a list of
+               scalars or a group itself
+             '';
+           };
+
+    in mkOption {
+      type = topLevel;
+      default = { };
+      example = literalExpression ''
+        blur =
+          { method = "gaussian";
+            size = 10;
+            deviation = 5.0;
+          };
+      '';
+      description = lib.mdDoc ''
+        Picom settings. Use this option to configure Picom settings not exposed
+        in a NixOS option or to bypass one.  For the available options see the
+        CONFIGURATION FILES section at `picom(1)`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.picom.settings = mkDefaultAttrs {
+      # fading
+      fading           = cfg.fade;
+      fade-delta       = cfg.fadeDelta;
+      fade-in-step     = elemAt cfg.fadeSteps 0;
+      fade-out-step    = elemAt cfg.fadeSteps 1;
+      fade-exclude     = cfg.fadeExclude;
+
+      # shadows
+      shadow           = cfg.shadow;
+      shadow-offset-x  = elemAt cfg.shadowOffsets 0;
+      shadow-offset-y  = elemAt cfg.shadowOffsets 1;
+      shadow-opacity   = cfg.shadowOpacity;
+      shadow-exclude   = cfg.shadowExclude;
+
+      # opacity
+      active-opacity   = cfg.activeOpacity;
+      inactive-opacity = cfg.inactiveOpacity;
+
+      wintypes         = cfg.wintypes;
+
+      opacity-rule     = cfg.opacityRules;
+
+      # other options
+      backend          = cfg.backend;
+      vsync            = cfg.vSync;
+    };
+
+    systemd.user.services.picom = {
+      description = "Picom composite manager";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+
+      # Temporarily fixes corrupt colours with Mesa 18
+      environment = mkIf (cfg.backend == "glx") {
+        allow_rgb10_configs = "false";
+      };
+
+      serviceConfig = {
+        ExecStart = "${getExe cfg.package} --config ${configFile}";
+        RestartSec = 3;
+        Restart = "always";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/redshift.nix b/nixpkgs/nixos/modules/services/x11/redshift.nix
new file mode 100644
index 000000000000..3eb9e28edae9
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/redshift.nix
@@ -0,0 +1,138 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.redshift;
+  lcfg = config.location;
+
+in {
+
+  imports = [
+    (mkChangedOptionModule [ "services" "redshift" "latitude" ] [ "location" "latitude" ]
+      (config:
+        let value = getAttrFromPath [ "services" "redshift" "latitude" ] config;
+        in if value == null then
+          throw "services.redshift.latitude is set to null, you can remove this"
+          else builtins.fromJSON value))
+    (mkChangedOptionModule [ "services" "redshift" "longitude" ] [ "location" "longitude" ]
+      (config:
+        let value = getAttrFromPath [ "services" "redshift" "longitude" ] config;
+        in if value == null then
+          throw "services.redshift.longitude is set to null, you can remove this"
+          else builtins.fromJSON value))
+    (mkRenamedOptionModule [ "services" "redshift" "provider" ] [ "location" "provider" ])
+  ];
+
+  options.services.redshift = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable Redshift to change your screen's colour temperature depending on
+        the time of day.
+      '';
+    };
+
+    temperature = {
+      day = mkOption {
+        type = types.int;
+        default = 5500;
+        description = lib.mdDoc ''
+          Colour temperature to use during the day, between
+          `1000` and `25000` K.
+        '';
+      };
+      night = mkOption {
+        type = types.int;
+        default = 3700;
+        description = lib.mdDoc ''
+          Colour temperature to use at night, between
+          `1000` and `25000` K.
+        '';
+      };
+    };
+
+    brightness = {
+      day = mkOption {
+        type = types.str;
+        default = "1";
+        description = lib.mdDoc ''
+          Screen brightness to apply during the day,
+          between `0.1` and `1.0`.
+        '';
+      };
+      night = mkOption {
+        type = types.str;
+        default = "1";
+        description = lib.mdDoc ''
+          Screen brightness to apply during the night,
+          between `0.1` and `1.0`.
+        '';
+      };
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.redshift;
+      defaultText = literalExpression "pkgs.redshift";
+      description = lib.mdDoc ''
+        redshift derivation to use.
+      '';
+    };
+
+    executable = mkOption {
+      type = types.str;
+      default = "/bin/redshift";
+      example = "/bin/redshift-gtk";
+      description = lib.mdDoc ''
+        Redshift executable to use within the package.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "-v" "-m randr" ];
+      description = lib.mdDoc ''
+        Additional command-line arguments to pass to
+        {command}`redshift`.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    # needed so that .desktop files are installed, which geoclue cares about
+    environment.systemPackages = [ cfg.package ];
+
+    services.geoclue2.appConfig.redshift = {
+      isAllowed = true;
+      isSystem = true;
+    };
+
+    systemd.user.services.redshift =
+    let
+      providerString = if lcfg.provider == "manual"
+        then "${toString lcfg.latitude}:${toString lcfg.longitude}"
+        else lcfg.provider;
+    in
+    {
+      description = "Redshift colour temperature adjuster";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}${cfg.executable} \
+            -l ${providerString} \
+            -t ${toString cfg.temperature.day}:${toString cfg.temperature.night} \
+            -b ${toString cfg.brightness.day}:${toString cfg.brightness.night} \
+            ${lib.strings.concatStringsSep " " cfg.extraOptions}
+        '';
+        RestartSec = 3;
+        Restart = "always";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/terminal-server.nix b/nixpkgs/nixos/modules/services/x11/terminal-server.nix
new file mode 100644
index 000000000000..e6b50c21a952
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/terminal-server.nix
@@ -0,0 +1,56 @@
+# This module implements a terminal service based on ‘x11vnc’.  It
+# listens on port 5900 for VNC connections.  It then presents a login
+# screen to the user.  If the user successfully authenticates, x11vnc
+# checks to see if a X server is already running for that user.  If
+# not, a X server (Xvfb) is started for that user.  The Xvfb instances
+# persist across VNC sessions.
+
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  config = {
+
+    services.xserver.enable = true;
+    services.xserver.videoDrivers = [];
+
+    # Enable GDM.  Any display manager will do as long as it supports XDMCP.
+    services.xserver.displayManager.gdm.enable = true;
+
+    systemd.sockets.terminal-server =
+      { description = "Terminal Server Socket";
+        wantedBy = [ "sockets.target" ];
+        before = [ "multi-user.target" ];
+        socketConfig.Accept = true;
+        socketConfig.ListenStream = 5900;
+      };
+
+    systemd.services."terminal-server@" =
+      { description = "Terminal Server";
+
+        path =
+          [ pkgs.xorg.xorgserver.out pkgs.gawk pkgs.which pkgs.openssl pkgs.xorg.xauth
+            pkgs.nettools pkgs.shadow pkgs.procps pkgs.util-linux pkgs.bash
+          ];
+
+        environment.FD_GEOM = "1024x786x24";
+        environment.FD_XDMCP_IF = "127.0.0.1";
+        #environment.FIND_DISPLAY_OUTPUT = "/tmp/foo"; # to debug the "find display" script
+
+        serviceConfig =
+          { StandardInput = "socket";
+            StandardOutput = "socket";
+            StandardError = "journal";
+            ExecStart = "@${pkgs.x11vnc}/bin/x11vnc x11vnc -inetd -display WAIT:1024x786:cmd=FINDCREATEDISPLAY-Xvfb.xdmcp -unixpw -ssl SAVE";
+            # Don't kill the X server when the user quits the VNC
+            # connection.  FIXME: the X server should run in a
+            # separate systemd session.
+            KillMode = "process";
+          };
+      };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/touchegg.nix b/nixpkgs/nixos/modules/services/x11/touchegg.nix
new file mode 100644
index 000000000000..f1103c054c57
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/touchegg.nix
@@ -0,0 +1,38 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.touchegg;
+
+in {
+  meta = {
+    maintainers = teams.pantheon.members;
+  };
+
+  ###### interface
+  options.services.touchegg = {
+    enable = mkEnableOption (lib.mdDoc "touchegg, a multi-touch gesture recognizer");
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.touchegg;
+      defaultText = literalExpression "pkgs.touchegg";
+      description = lib.mdDoc "touchegg derivation to use.";
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.touchegg = {
+      description = "Touchegg Daemon";
+      serviceConfig = {
+        Type = "simple";
+        ExecStart = "${cfg.package}/bin/touchegg --daemon";
+        Restart = "on-failure";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/unclutter-xfixes.nix b/nixpkgs/nixos/modules/services/x11/unclutter-xfixes.nix
new file mode 100644
index 000000000000..4a35176c5833
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/unclutter-xfixes.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.unclutter-xfixes;
+
+in {
+  options.services.unclutter-xfixes = {
+
+    enable = mkOption {
+      description = lib.mdDoc "Enable unclutter-xfixes to hide your mouse cursor when inactive.";
+      type = types.bool;
+      default = false;
+    };
+
+    package = mkOption {
+      description = lib.mdDoc "unclutter-xfixes derivation to use.";
+      type = types.package;
+      default = pkgs.unclutter-xfixes;
+      defaultText = literalExpression "pkgs.unclutter-xfixes";
+    };
+
+    timeout = mkOption {
+      description = lib.mdDoc "Number of seconds before the cursor is marked inactive.";
+      type = types.int;
+      default = 1;
+    };
+
+    threshold = mkOption {
+      description = lib.mdDoc "Minimum number of pixels considered cursor movement.";
+      type = types.int;
+      default = 1;
+    };
+
+    extraOptions = mkOption {
+      description = lib.mdDoc "More arguments to pass to the unclutter-xfixes command.";
+      type = types.listOf types.str;
+      default = [];
+      example = [ "exclude-root" "ignore-scrolling" "fork" ];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.unclutter-xfixes = {
+      description = "unclutter-xfixes";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package}/bin/unclutter \
+          --timeout ${toString cfg.timeout} \
+          --jitter ${toString (cfg.threshold - 1)} \
+          ${concatMapStrings (x: " --"+x) cfg.extraOptions} \
+      '';
+      serviceConfig.RestartSec = 3;
+      serviceConfig.Restart = "always";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/unclutter.nix b/nixpkgs/nixos/modules/services/x11/unclutter.nix
new file mode 100644
index 000000000000..039214a575a7
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/unclutter.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.unclutter;
+
+in {
+  options.services.unclutter = {
+
+    enable = mkOption {
+      description = lib.mdDoc "Enable unclutter to hide your mouse cursor when inactive";
+      type = types.bool;
+      default = false;
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.unclutter;
+      defaultText = literalExpression "pkgs.unclutter";
+      description = lib.mdDoc "unclutter derivation to use.";
+    };
+
+    keystroke = mkOption {
+      description = lib.mdDoc "Wait for a keystroke before hiding the cursor";
+      type = types.bool;
+      default = false;
+    };
+
+    timeout = mkOption {
+      description = lib.mdDoc "Number of seconds before the cursor is marked inactive";
+      type = types.int;
+      default = 1;
+    };
+
+    threshold = mkOption {
+      description = lib.mdDoc "Minimum number of pixels considered cursor movement";
+      type = types.int;
+      default = 1;
+    };
+
+    excluded = mkOption {
+      description = lib.mdDoc "Names of windows where unclutter should not apply";
+      type = types.listOf types.str;
+      default = [];
+      example = [ "" ];
+    };
+
+    extraOptions = mkOption {
+      description = lib.mdDoc "More arguments to pass to the unclutter command";
+      type = types.listOf types.str;
+      default = [];
+      example = [ "noevent" "grab" ];
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.unclutter = {
+      description = "unclutter";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package}/bin/unclutter \
+          -idle ${toString cfg.timeout} \
+          -jitter ${toString (cfg.threshold - 1)} \
+          ${optionalString cfg.keystroke "-keystroke"} \
+          ${concatMapStrings (x: " -"+x) cfg.extraOptions} \
+          -not ${concatStringsSep " " cfg.excluded} \
+      '';
+      serviceConfig.PassEnvironment = "DISPLAY";
+      serviceConfig.RestartSec = 3;
+      serviceConfig.Restart = "always";
+    };
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "services" "unclutter" "threeshold" ]
+                           [ "services"  "unclutter" "threshold" ])
+  ];
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/urserver.nix b/nixpkgs/nixos/modules/services/x11/urserver.nix
new file mode 100644
index 000000000000..d0b6e0775e5d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/urserver.nix
@@ -0,0 +1,38 @@
+# urserver service
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.urserver;
+in {
+
+  options.services.urserver.enable = lib.mkEnableOption (lib.mdDoc "urserver");
+
+  config = lib.mkIf cfg.enable {
+
+    networking.firewall = {
+      allowedTCPPorts = [ 9510 9512 ];
+      allowedUDPPorts = [ 9511 9512 ];
+    };
+
+    systemd.user.services.urserver =  {
+      description = ''
+        Server for Unified Remote: The one-and-only remote for your computer.
+      '';
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      after = [ "network.target" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = ''
+          ${pkgs.urserver}/bin/urserver --daemon
+        '';
+        ExecStop = ''
+          ${pkgs.procps}/bin/pkill urserver
+        '';
+        RestartSec = 3;
+        Restart = "on-failure";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/urxvtd.nix b/nixpkgs/nixos/modules/services/x11/urxvtd.nix
new file mode 100644
index 000000000000..fedcb6c7293e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/urxvtd.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+# maintainer: siddharthist
+
+with lib;
+
+let
+  cfg = config.services.urxvtd;
+in {
+  options.services.urxvtd = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable urxvtd, the urxvt terminal daemon. To use urxvtd, run
+        "urxvtc".
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.rxvt-unicode;
+      defaultText = literalExpression "pkgs.rxvt-unicode";
+      description = lib.mdDoc ''
+        Package to install. Usually pkgs.rxvt-unicode.
+      '';
+      type = types.package;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.urxvtd = {
+      description = "urxvt terminal daemon";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      path = [ pkgs.xsel ];
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/urxvtd -o";
+        Environment = "RXVT_SOCKET=%t/urxvtd-socket";
+        Restart = "on-failure";
+        RestartSec = "5s";
+      };
+    };
+
+    environment.systemPackages = [ cfg.package ];
+    environment.variables.RXVT_SOCKET = "/run/user/$(id -u)/urxvtd-socket";
+  };
+
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/2bwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/2bwm.nix
new file mode 100644
index 000000000000..8483a74b9f6c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/2bwm.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.windowManager."2bwm";
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.windowManager."2bwm".enable = mkEnableOption (lib.mdDoc "2bwm");
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = singleton
+      { name = "2bwm";
+        start =
+          ''
+            ${pkgs._2bwm}/bin/2bwm &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ pkgs._2bwm ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/afterstep.nix b/nixpkgs/nixos/modules/services/x11/window-managers/afterstep.nix
new file mode 100644
index 000000000000..a06063597971
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/afterstep.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.afterstep;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.afterstep.enable = mkEnableOption (lib.mdDoc "afterstep");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "afterstep";
+      start = ''
+        ${pkgs.afterstep}/bin/afterstep &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.afterstep ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/awesome.nix b/nixpkgs/nixos/modules/services/x11/window-managers/awesome.nix
new file mode 100644
index 000000000000..c1231d3fbf38
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/awesome.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.windowManager.awesome;
+  awesome = cfg.package;
+  getLuaPath = lib: dir: "${lib}/${dir}/lua/${awesome.lua.luaversion}";
+  makeSearchPath = lib.concatMapStrings (path:
+    " --search " + (getLuaPath path "share") +
+    " --search " + (getLuaPath path "lib")
+  );
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xserver.windowManager.awesome = {
+
+      enable = mkEnableOption (lib.mdDoc "Awesome window manager");
+
+      luaModules = mkOption {
+        default = [];
+        type = types.listOf types.package;
+        description = lib.mdDoc "List of lua packages available for being used in the Awesome configuration.";
+        example = literalExpression "[ pkgs.luaPackages.vicious ]";
+      };
+
+      package = mkOption {
+        default = null;
+        type = types.nullOr types.package;
+        description = lib.mdDoc "Package to use for running the Awesome WM.";
+        apply = pkg: if pkg == null then pkgs.awesome else pkg;
+      };
+
+      noArgb = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Disable client transparency support, which can be greatly detrimental to performance in some setups";
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = singleton
+      { name = "awesome";
+        start =
+          ''
+            ${awesome}/bin/awesome ${lib.optionalString cfg.noArgb "--no-argb"} ${makeSearchPath cfg.luaModules} &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ awesome ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/berry.nix b/nixpkgs/nixos/modules/services/x11/window-managers/berry.nix
new file mode 100644
index 000000000000..eb5528602677
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/berry.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.berry;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.berry.enable = mkEnableOption (lib.mdDoc "berry");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "berry";
+      start = ''
+        ${pkgs.berry}/bin/berry &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.berry ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/bspwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/bspwm.nix
new file mode 100644
index 000000000000..c403f744cd43
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/bspwm.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.bspwm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.bspwm = {
+      enable = mkEnableOption (lib.mdDoc "bspwm");
+
+      package = mkOption {
+        type        = types.package;
+        default     = pkgs.bspwm;
+        defaultText = literalExpression "pkgs.bspwm";
+        example     = literalExpression "pkgs.bspwm-unstable";
+        description = lib.mdDoc ''
+          bspwm package to use.
+        '';
+      };
+      configFile = mkOption {
+        type        = with types; nullOr path;
+        example     = literalExpression ''"''${pkgs.bspwm}/share/doc/bspwm/examples/bspwmrc"'';
+        default     = null;
+        description = lib.mdDoc ''
+          Path to the bspwm configuration file.
+          If null, $HOME/.config/bspwm/bspwmrc will be used.
+        '';
+      };
+
+      sxhkd = {
+        package = mkOption {
+          type        = types.package;
+          default     = pkgs.sxhkd;
+          defaultText = literalExpression "pkgs.sxhkd";
+          example     = literalExpression "pkgs.sxhkd-unstable";
+          description = lib.mdDoc ''
+            sxhkd package to use.
+          '';
+        };
+        configFile = mkOption {
+          type        = with types; nullOr path;
+          example     = literalExpression ''"''${pkgs.bspwm}/share/doc/bspwm/examples/sxhkdrc"'';
+          default     = null;
+          description = lib.mdDoc ''
+            Path to the sxhkd configuration file.
+            If null, $HOME/.config/sxhkd/sxhkdrc will be used.
+          '';
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name  = "bspwm";
+      start = ''
+        export _JAVA_AWT_WM_NONREPARENTING=1
+        SXHKD_SHELL=/bin/sh ${cfg.sxhkd.package}/bin/sxhkd ${optionalString (cfg.sxhkd.configFile != null) "-c \"${cfg.sxhkd.configFile}\""} &
+        ${cfg.package}/bin/bspwm ${optionalString (cfg.configFile != null) "-c \"${cfg.configFile}\""} &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  imports = [
+   (mkRemovedOptionModule [ "services" "xserver" "windowManager" "bspwm-unstable" "enable" ]
+     "Use services.xserver.windowManager.bspwm.enable and set services.xserver.windowManager.bspwm.package to pkgs.bspwm-unstable to use the unstable version of bspwm.")
+   (mkRemovedOptionModule [ "services" "xserver" "windowManager" "bspwm" "startThroughSession" ]
+     "bspwm package does not provide bspwm-session anymore.")
+   (mkRemovedOptionModule [ "services" "xserver" "windowManager" "bspwm" "sessionScript" ]
+     "bspwm package does not provide bspwm-session anymore.")
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/clfswm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/clfswm.nix
new file mode 100644
index 000000000000..f2e4c2f91c9d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/clfswm.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.clfswm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.clfswm = {
+      enable = mkEnableOption (lib.mdDoc "clfswm");
+      package = mkOption {
+        type        = types.package;
+        default     = pkgs.lispPackages.clfswm;
+        defaultText = literalExpression "pkgs.lispPackages.clfswm";
+        description = lib.mdDoc ''
+          clfswm package to use.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "clfswm";
+      start = ''
+        ${cfg.package}/bin/clfswm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/cwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/cwm.nix
new file mode 100644
index 000000000000..9a143e7bccc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/cwm.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.cwm;
+in
+{
+  options = {
+    services.xserver.windowManager.cwm.enable = mkEnableOption (lib.mdDoc "cwm");
+  };
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton
+      { name = "cwm";
+        start =
+          ''
+            cwm &
+            waitPID=$!
+          '';
+      };
+    environment.systemPackages = [ pkgs.cwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/default.nix b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
new file mode 100644
index 000000000000..e180f2693e0c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/default.nix
@@ -0,0 +1,93 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager;
+in
+
+{
+  imports = [
+    ./2bwm.nix
+    ./afterstep.nix
+    ./berry.nix
+    ./bspwm.nix
+    ./cwm.nix
+    ./clfswm.nix
+    ./dk.nix
+    ./dwm.nix
+    ./e16.nix
+    ./evilwm.nix
+    ./exwm.nix
+    ./fluxbox.nix
+    ./fvwm2.nix
+    ./fvwm3.nix
+    ./hackedbox.nix
+    ./herbstluftwm.nix
+    ./hypr.nix
+    ./i3.nix
+    ./jwm.nix
+    ./leftwm.nix
+    ./lwm.nix
+    ./metacity.nix
+    ./mlvwm.nix
+    ./mwm.nix
+    ./openbox.nix
+    ./pekwm.nix
+    ./notion.nix
+    ./ragnarwm.nix
+    ./ratpoison.nix
+    ./sawfish.nix
+    ./smallwm.nix
+    ./stumpwm.nix
+    ./spectrwm.nix
+    ./tinywm.nix
+    ./twm.nix
+    ./windowmaker.nix
+    ./wmderland.nix
+    ./wmii.nix
+    ./xmonad.nix
+    ./yeahwm.nix
+    ./qtile.nix
+    ./none.nix ];
+
+  options = {
+
+    services.xserver.windowManager = {
+
+      session = mkOption {
+        internal = true;
+        default = [];
+        example = [{
+          name = "wmii";
+          start = "...";
+        }];
+        description = lib.mdDoc ''
+          Internal option used to add some common line to window manager
+          scripts before forwarding the value to the
+          `displayManager`.
+        '';
+        apply = map (d: d // {
+          manage = "window";
+        });
+      };
+
+      default = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "wmii";
+        description = lib.mdDoc ''
+          **Deprecated**, please use [](#opt-services.xserver.displayManager.defaultSession) instead.
+
+          Default window manager loaded if none have been chosen.
+        '';
+      };
+
+    };
+
+  };
+
+  config = {
+    services.xserver.displayManager.session = cfg.session;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/dk.nix b/nixpkgs/nixos/modules/services/x11/window-managers/dk.nix
new file mode 100644
index 000000000000..152c7bc8117b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/dk.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.xserver.windowManager.dk;
+in
+
+{
+  options = {
+    services.xserver.windowManager.dk = {
+      enable = lib.mkEnableOption (lib.mdDoc "dk");
+
+      package = lib.mkPackageOptionMD pkgs "dk" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.xserver.windowManager.session = lib.singleton {
+      name = "dk";
+      start = ''
+        export _JAVA_AWT_WM_NONREPARENTING=1
+        ${cfg.package}/bin/dk &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix
new file mode 100644
index 000000000000..82900fd30540
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/dwm.nix
@@ -0,0 +1,58 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.windowManager.dwm;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.dwm = {
+      enable = mkEnableOption (lib.mdDoc "dwm");
+      package = mkOption {
+        type        = types.package;
+        default     = pkgs.dwm;
+        defaultText = literalExpression "pkgs.dwm";
+        example     = literalExpression ''
+          pkgs.dwm.overrideAttrs (oldAttrs: rec {
+            patches = [
+              (super.fetchpatch {
+                url = "https://dwm.suckless.org/patches/steam/dwm-steam-6.2.diff";
+                sha256 = "sha256-f3lffBjz7+0Khyn9c9orzReoLTqBb/9gVGshYARGdVc=";
+              })
+            ];
+          })
+        '';
+        description = lib.mdDoc ''
+          dwm package to use.
+        '';
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = singleton
+      { name = "dwm";
+        start =
+          ''
+            export _JAVA_AWT_WM_NONREPARENTING=1
+            dwm &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ cfg.package ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix b/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix
new file mode 100644
index 000000000000..000feea12c2c
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/e16.nix
@@ -0,0 +1,26 @@
+{ config , lib , pkgs , ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.e16;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.e16.enable = mkEnableOption (lib.mdDoc "e16");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "E16";
+      start = ''
+        ${pkgs.e16}/bin/e16 &
+        waitPID=$!
+      '';
+    };
+
+    environment.systemPackages = [ pkgs.e16 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/evilwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/evilwm.nix
new file mode 100644
index 000000000000..842f84c2cfbe
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/evilwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.evilwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.evilwm.enable = mkEnableOption (lib.mdDoc "evilwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "evilwm";
+      start = ''
+        ${pkgs.evilwm}/bin/evilwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.evilwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/exwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/exwm.nix
new file mode 100644
index 000000000000..a97ed74ae881
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/exwm.nix
@@ -0,0 +1,69 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.exwm;
+  loadScript = pkgs.writeText "emacs-exwm-load" ''
+    ${cfg.loadScript}
+    ${optionalString cfg.enableDefaultConfig ''
+      (require 'exwm-config)
+      (exwm-config-default)
+    ''}
+  '';
+  packages = epkgs: cfg.extraPackages epkgs ++ [ epkgs.exwm ];
+  exwm-emacs = pkgs.emacsWithPackages packages;
+in
+
+{
+  options = {
+    services.xserver.windowManager.exwm = {
+      enable = mkEnableOption (lib.mdDoc "exwm");
+      loadScript = mkOption {
+        default = "(require 'exwm)";
+        type = types.lines;
+        example = ''
+          (require 'exwm)
+          (exwm-enable)
+        '';
+        description = lib.mdDoc ''
+          Emacs lisp code to be run after loading the user's init
+          file. If enableDefaultConfig is true, this will be run
+          before loading the default config.
+        '';
+      };
+      enableDefaultConfig = mkOption {
+        default = true;
+        type = lib.types.bool;
+        description = lib.mdDoc "Enable an uncustomised exwm configuration.";
+      };
+      extraPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = epkgs: [];
+        defaultText = literalExpression "epkgs: []";
+        example = literalExpression ''
+          epkgs: [
+            epkgs.emms
+            epkgs.magit
+            epkgs.proofgeneral
+          ]
+        '';
+        description = lib.mdDoc ''
+          Extra packages available to Emacs. The value must be a
+          function which receives the attrset defined in
+          {var}`emacs.pkgs` as the sole argument.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "exwm";
+      start = ''
+        ${exwm-emacs}/bin/emacs -l ${loadScript}
+      '';
+    };
+    environment.systemPackages = [ exwm-emacs ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/fluxbox.nix b/nixpkgs/nixos/modules/services/x11/window-managers/fluxbox.nix
new file mode 100644
index 000000000000..24165fb6fb07
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/fluxbox.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.fluxbox;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.fluxbox.enable = mkEnableOption (lib.mdDoc "fluxbox");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "fluxbox";
+      start = ''
+        ${pkgs.fluxbox}/bin/startfluxbox &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.fluxbox ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/fvwm2.nix b/nixpkgs/nixos/modules/services/x11/window-managers/fvwm2.nix
new file mode 100644
index 000000000000..aaf3c5c46906
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/fvwm2.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.fvwm2;
+  fvwm2 = pkgs.fvwm2.override { enableGestures = cfg.gestures; };
+in
+
+{
+
+  imports = [
+    (mkRenamedOptionModule
+      [ "services" "xserver" "windowManager" "fvwm" ]
+      [ "services" "xserver" "windowManager" "fvwm2" ])
+  ];
+
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.fvwm2 = {
+      enable = mkEnableOption (lib.mdDoc "Fvwm2 window manager");
+
+      gestures = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Whether or not to enable libstroke for gesture support";
+      };
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton
+      { name = "fvwm2";
+        start =
+          ''
+            ${fvwm2}/bin/fvwm &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ fvwm2 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/fvwm3.nix b/nixpkgs/nixos/modules/services/x11/window-managers/fvwm3.nix
new file mode 100644
index 000000000000..50c76b67eea3
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/fvwm3.nix
@@ -0,0 +1,35 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.fvwm3;
+  inherit (pkgs) fvwm3;
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.fvwm3 = {
+      enable = mkEnableOption (lib.mdDoc "Fvwm3 window manager");
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton
+      { name = "fvwm3";
+        start =
+          ''
+            ${fvwm3}/bin/fvwm3 &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ fvwm3 ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/hackedbox.nix b/nixpkgs/nixos/modules/services/x11/window-managers/hackedbox.nix
new file mode 100644
index 000000000000..61e911961f51
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/hackedbox.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.hackedbox;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.hackedbox.enable = mkEnableOption (lib.mdDoc "hackedbox");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "hackedbox";
+      start = ''
+        ${pkgs.hackedbox}/bin/hackedbox &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.hackedbox ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/herbstluftwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/herbstluftwm.nix
new file mode 100644
index 000000000000..93705ada116d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/herbstluftwm.nix
@@ -0,0 +1,47 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.herbstluftwm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.herbstluftwm = {
+      enable = mkEnableOption (lib.mdDoc "herbstluftwm");
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.herbstluftwm;
+        defaultText = literalExpression "pkgs.herbstluftwm";
+        description = lib.mdDoc ''
+          Herbstluftwm package to use.
+        '';
+      };
+
+      configFile = mkOption {
+        default     = null;
+        type        = with types; nullOr path;
+        description = lib.mdDoc ''
+          Path to the herbstluftwm configuration file.  If left at the
+          default value, $XDG_CONFIG_HOME/herbstluftwm/autostart will
+          be used.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "herbstluftwm";
+      start =
+        let configFileClause = optionalString
+            (cfg.configFile != null)
+            ''-c "${cfg.configFile}"''
+            ;
+        in "${cfg.package}/bin/herbstluftwm ${configFileClause} &";
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/hypr.nix b/nixpkgs/nixos/modules/services/x11/window-managers/hypr.nix
new file mode 100644
index 000000000000..4c1fea71f93e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/hypr.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.hypr;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.hypr.enable = mkEnableOption (lib.mdDoc "hypr");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "hypr";
+      start = ''
+        ${pkgs.hypr}/bin/Hypr &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.hypr ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/i3.nix b/nixpkgs/nixos/modules/services/x11/window-managers/i3.nix
new file mode 100644
index 000000000000..5bb73cd0bfb1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/i3.nix
@@ -0,0 +1,77 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.i3;
+in
+
+{
+  options.services.xserver.windowManager.i3 = {
+    enable = mkEnableOption (lib.mdDoc "i3 window manager");
+
+    configFile = mkOption {
+      default     = null;
+      type        = with types; nullOr path;
+      description = lib.mdDoc ''
+        Path to the i3 configuration file.
+        If left at the default value, $HOME/.i3/config will be used.
+      '';
+    };
+
+    extraSessionCommands = mkOption {
+      default     = "";
+      type        = types.lines;
+      description = lib.mdDoc ''
+        Shell commands executed just before i3 is started.
+      '';
+    };
+
+    package = mkOption {
+      type        = types.package;
+      default     = pkgs.i3;
+      defaultText = literalExpression "pkgs.i3";
+      description = lib.mdDoc ''
+        i3 package to use.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs; [ dmenu i3status i3lock ];
+      defaultText = literalExpression ''
+        with pkgs; [
+          dmenu
+          i3status
+          i3lock
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed system wide.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = [{
+      name  = "i3";
+      start = ''
+        ${cfg.extraSessionCommands}
+
+        ${cfg.package}/bin/i3 ${optionalString (cfg.configFile != null)
+          "-c /etc/i3/config"
+        } &
+        waitPID=$!
+      '';
+    }];
+    environment.systemPackages = [ cfg.package ] ++ cfg.extraPackages;
+    environment.etc."i3/config" = mkIf (cfg.configFile != null) {
+      source = cfg.configFile;
+    };
+  };
+
+  imports = [
+    (mkRemovedOptionModule [ "services" "xserver" "windowManager" "i3-gaps" "enable" ]
+      "i3-gaps was merged into i3. Use services.xserver.windowManager.i3.enable instead.")
+  ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/icewm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/icewm.nix
new file mode 100644
index 000000000000..48741aa41d85
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/icewm.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.icewm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.icewm.enable = mkEnableOption (lib.mdDoc "icewm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton
+      { name = "icewm";
+        start =
+          ''
+            ${pkgs.icewm}/bin/icewm &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ pkgs.icewm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/jwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/jwm.nix
new file mode 100644
index 000000000000..40758029bc65
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/jwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.jwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.jwm.enable = mkEnableOption (lib.mdDoc "jwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "jwm";
+      start = ''
+        ${pkgs.jwm}/bin/jwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.jwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/katriawm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/katriawm.nix
new file mode 100644
index 000000000000..9a3fd5f3ca44
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/katriawm.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib) mdDoc mkEnableOption mkIf mkPackageOptionMD singleton;
+  cfg = config.services.xserver.windowManager.katriawm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.katriawm = {
+      enable = mkEnableOption (mdDoc "katriawm");
+      package = mkPackageOptionMD pkgs "katriawm" {};
+    };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "katriawm";
+      start = ''
+        ${cfg.package}/bin/katriawm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ cfg.package ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/leftwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/leftwm.nix
new file mode 100644
index 000000000000..2571735ba8bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/leftwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.leftwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.leftwm.enable = mkEnableOption (lib.mdDoc "leftwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "leftwm";
+      start = ''
+        ${pkgs.leftwm}/bin/leftwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.leftwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/lwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/lwm.nix
new file mode 100644
index 000000000000..517abb23d4af
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/lwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.lwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.lwm.enable = mkEnableOption (lib.mdDoc "lwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "lwm";
+      start = ''
+        ${pkgs.lwm}/bin/lwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.lwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/metacity.nix b/nixpkgs/nixos/modules/services/x11/window-managers/metacity.nix
new file mode 100644
index 000000000000..1f69147af5bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/metacity.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.windowManager.metacity;
+  inherit (pkgs) gnome;
+in
+
+{
+  options = {
+    services.xserver.windowManager.metacity.enable = mkEnableOption (lib.mdDoc "metacity");
+  };
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = singleton
+      { name = "metacity";
+        start = ''
+          ${gnome.metacity}/bin/metacity &
+          waitPID=$!
+        '';
+      };
+
+    environment.systemPackages = [ gnome.metacity ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/mlvwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/mlvwm.nix
new file mode 100644
index 000000000000..fe0433c24b60
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/mlvwm.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xserver.windowManager.mlvwm;
+
+in
+{
+
+  options.services.xserver.windowManager.mlvwm = {
+    enable = mkEnableOption (lib.mdDoc "Macintosh-like Virtual Window Manager");
+
+    configFile = mkOption {
+      default = null;
+      type = with types; nullOr path;
+      description = lib.mdDoc ''
+        Path to the mlvwm configuration file.
+        If left at the default value, $HOME/.mlvwmrc will be used.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = [{
+      name = "mlvwm";
+      start = ''
+        ${pkgs.mlvwm}/bin/mlvwm ${optionalString (cfg.configFile != null)
+          "-f /etc/mlvwm/mlvwmrc"
+        } &
+        waitPID=$!
+      '';
+    }];
+
+    environment.etc."mlvwm/mlvwmrc" = mkIf (cfg.configFile != null) {
+      source = cfg.configFile;
+    };
+
+    environment.systemPackages = [ pkgs.mlvwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/mwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/mwm.nix
new file mode 100644
index 000000000000..9f8dc0939e5e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/mwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.mwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.mwm.enable = mkEnableOption (lib.mdDoc "mwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "mwm";
+      start = ''
+        ${pkgs.motif}/bin/mwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.motif ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix b/nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix
new file mode 100644
index 000000000000..de3192876024
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/nimdow.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.nimdow;
+in
+{
+  options = {
+    services.xserver.windowManager.nimdow.enable = mkEnableOption (lib.mdDoc "nimdow");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "nimdow";
+      start = ''
+        ${pkgs.nimdow}/bin/nimdow &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.nimdow ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/none.nix b/nixpkgs/nixos/modules/services/x11/window-managers/none.nix
new file mode 100644
index 000000000000..84cf1d770776
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/none.nix
@@ -0,0 +1,12 @@
+{
+  services = {
+    xserver = {
+      windowManager = {
+        session = [{
+          name = "none";
+          start = "";
+        }];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/notion.nix b/nixpkgs/nixos/modules/services/x11/window-managers/notion.nix
new file mode 100644
index 000000000000..0015e90a41c5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/notion.nix
@@ -0,0 +1,26 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.notion;
+in
+
+{
+  options = {
+    services.xserver.windowManager.notion.enable = mkEnableOption (lib.mdDoc "notion");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager = {
+      session = [{
+        name = "notion";
+        start = ''
+          ${pkgs.notion}/bin/notion &
+          waitPID=$!
+        '';
+      }];
+    };
+    environment.systemPackages = [ pkgs.notion ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/openbox.nix b/nixpkgs/nixos/modules/services/x11/window-managers/openbox.nix
new file mode 100644
index 000000000000..bf5a500f431a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/openbox.nix
@@ -0,0 +1,24 @@
+{lib, pkgs, config, ...}:
+
+with lib;
+let
+  cfg = config.services.xserver.windowManager.openbox;
+in
+
+{
+  options = {
+    services.xserver.windowManager.openbox.enable = mkEnableOption (lib.mdDoc "openbox");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager = {
+      session = [{
+        name = "openbox";
+        start = "
+          ${pkgs.openbox}/bin/openbox-session
+        ";
+      }];
+    };
+    environment.systemPackages = [ pkgs.openbox ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/pekwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/pekwm.nix
new file mode 100644
index 000000000000..8818f568647a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/pekwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.pekwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.pekwm.enable = mkEnableOption (lib.mdDoc "pekwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "pekwm";
+      start = ''
+        ${pkgs.pekwm}/bin/pekwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.pekwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/qtile.nix b/nixpkgs/nixos/modules/services/x11/window-managers/qtile.nix
new file mode 100644
index 000000000000..a362d5cdbeee
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/qtile.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.qtile;
+  pyEnv = pkgs.python3.withPackages (p: [ (cfg.package.unwrapped or cfg.package) ] ++ (cfg.extraPackages p));
+in
+
+{
+  options.services.xserver.windowManager.qtile = {
+    enable = mkEnableOption (lib.mdDoc "qtile");
+
+    package = mkPackageOptionMD pkgs "qtile-unwrapped" { };
+
+    configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      example = literalExpression "./your_config.py";
+      description = lib.mdDoc ''
+          Path to the qtile configuration file.
+          If null, $XDG_CONFIG_HOME/qtile/config.py will be used.
+      '';
+    };
+
+    backend = mkOption {
+      type = types.enum [ "x11" "wayland" ];
+      default = "x11";
+      description = lib.mdDoc ''
+          Backend to use in qtile: `x11` or `wayland`.
+      '';
+    };
+
+    extraPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = _: [];
+        defaultText = literalExpression ''
+          python3Packages: with python3Packages; [];
+        '';
+        description = lib.mdDoc ''
+          Extra Python packages available to Qtile.
+          An example would be to include `python3Packages.qtile-extras`
+          for additional unofficial widgets.
+        '';
+        example = literalExpression ''
+          python3Packages: with python3Packages; [
+            qtile-extras
+          ];
+        '';
+      };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = [{
+      name = "qtile";
+      start = ''
+        ${pyEnv}/bin/qtile start -b ${cfg.backend} \
+        ${optionalString (cfg.configFile != null)
+        "--config \"${cfg.configFile}\""} &
+        waitPID=$!
+      '';
+    }];
+
+    environment.systemPackages = [
+      # pkgs.qtile is currently a buildenv of qtile and its dependencies.
+      # For userland commands, we want the underlying package so that
+      # packages such as python don't bleed into userland and overwrite intended behavior.
+      (cfg.package.unwrapped or cfg.package)
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix
new file mode 100644
index 000000000000..0843b872dba5
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/ragnarwm.nix
@@ -0,0 +1,33 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.ragnarwm;
+in
+{
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.ragnarwm = {
+      enable = mkEnableOption (lib.mdDoc "ragnarwm");
+      package = mkOption {
+        type = types.package;
+        default = pkgs.ragnarwm;
+        defaultText = literalExpression "pkgs.ragnarwm";
+        description = lib.mdDoc ''
+          The ragnar package to use.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    services.xserver.displayManager.sessionPackages = [ cfg.package ];
+    environment.systemPackages = [ cfg.package ];
+  };
+
+  meta.maintainers = with lib.maintainers; [ sigmanificient ];
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/ratpoison.nix b/nixpkgs/nixos/modules/services/x11/window-managers/ratpoison.nix
new file mode 100644
index 000000000000..1de0fad3e54d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/ratpoison.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.ratpoison;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.ratpoison.enable = mkEnableOption (lib.mdDoc "ratpoison");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "ratpoison";
+      start = ''
+        ${pkgs.ratpoison}/bin/ratpoison &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.ratpoison ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/sawfish.nix b/nixpkgs/nixos/modules/services/x11/window-managers/sawfish.nix
new file mode 100644
index 000000000000..1945a1af6763
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/sawfish.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.sawfish;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.sawfish.enable = mkEnableOption (lib.mdDoc "sawfish");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "sawfish";
+      start = ''
+        ${pkgs.sawfish}/bin/sawfish &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.sawfish ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/smallwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/smallwm.nix
new file mode 100644
index 000000000000..e92b18690d8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/smallwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.smallwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.smallwm.enable = mkEnableOption (lib.mdDoc "smallwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "smallwm";
+      start = ''
+        ${pkgs.smallwm}/bin/smallwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.smallwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/spectrwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/spectrwm.nix
new file mode 100644
index 000000000000..c464803a0b6a
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/spectrwm.nix
@@ -0,0 +1,27 @@
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.spectrwm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.spectrwm.enable = mkEnableOption (lib.mdDoc "spectrwm");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager = {
+      session = [{
+        name = "spectrwm";
+        start = ''
+          ${pkgs.spectrwm}/bin/spectrwm &
+          waitPID=$!
+        '';
+      }];
+    };
+    environment.systemPackages = [ pkgs.spectrwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/stumpwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/stumpwm.nix
new file mode 100644
index 000000000000..c6fc49f5821b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/stumpwm.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.stumpwm;
+in
+
+{
+  options = {
+    services.xserver.windowManager.stumpwm.enable = mkEnableOption (lib.mdDoc "stumpwm");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "stumpwm";
+      start = ''
+        ${pkgs.sbclPackages.stumpwm}/bin/stumpwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.sbclPackages.stumpwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/tinywm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/tinywm.nix
new file mode 100644
index 000000000000..7418a6ddc760
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/tinywm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.tinywm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.tinywm.enable = mkEnableOption (lib.mdDoc "tinywm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "tinywm";
+      start = ''
+        ${pkgs.tinywm}/bin/tinywm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.tinywm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/twm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/twm.nix
new file mode 100644
index 000000000000..231817a26e66
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/twm.nix
@@ -0,0 +1,37 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.services.xserver.windowManager.twm;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    services.xserver.windowManager.twm.enable = mkEnableOption (lib.mdDoc "twm");
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.windowManager.session = singleton
+      { name = "twm";
+        start =
+          ''
+            ${pkgs.xorg.twm}/bin/twm &
+            waitPID=$!
+          '';
+      };
+
+    environment.systemPackages = [ pkgs.xorg.twm ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/windowlab.nix b/nixpkgs/nixos/modules/services/x11/window-managers/windowlab.nix
new file mode 100644
index 000000000000..9a0646b6ee7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/windowlab.nix
@@ -0,0 +1,22 @@
+{lib, pkgs, config, ...}:
+
+let
+  cfg = config.services.xserver.windowManager.windowlab;
+in
+
+{
+  options = {
+    services.xserver.windowManager.windowlab.enable =
+      lib.mkEnableOption (lib.mdDoc "windowlab");
+  };
+
+  config = lib.mkIf cfg.enable {
+    services.xserver.windowManager = {
+      session =
+        [{ name  = "windowlab";
+           start = "${pkgs.windowlab}/bin/windowlab";
+        }];
+    };
+    environment.systemPackages = [ pkgs.windowlab ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/windowmaker.nix b/nixpkgs/nixos/modules/services/x11/window-managers/windowmaker.nix
new file mode 100644
index 000000000000..a679e2b5bc80
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/windowmaker.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.windowmaker;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.windowmaker.enable = mkEnableOption (lib.mdDoc "windowmaker");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "windowmaker";
+      start = ''
+        ${pkgs.windowmaker}/bin/wmaker &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.windowmaker ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix b/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix
new file mode 100644
index 000000000000..ed515741f62e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/wmderland.nix
@@ -0,0 +1,61 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.wmderland;
+in
+
+{
+  options.services.xserver.windowManager.wmderland = {
+    enable = mkEnableOption (lib.mdDoc "wmderland");
+
+    extraSessionCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands executed just before wmderland is started.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = with pkgs; [
+        rofi
+        dunst
+        light
+        hsetroot
+        feh
+        rxvt-unicode
+      ];
+      defaultText = literalExpression ''
+        with pkgs; [
+          rofi
+          dunst
+          light
+          hsetroot
+          feh
+          rxvt-unicode
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed system wide.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "wmderland";
+      start = ''
+        ${cfg.extraSessionCommands}
+
+        ${pkgs.wmderland}/bin/wmderland &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [
+      pkgs.wmderland pkgs.wmderlandc
+    ] ++ cfg.extraPackages;
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/wmii.nix b/nixpkgs/nixos/modules/services/x11/window-managers/wmii.nix
new file mode 100644
index 000000000000..090aa31610ab
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/wmii.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.xserver.windowManager.wmii;
+  wmii = pkgs.wmii_hg;
+in
+{
+  options = {
+    services.xserver.windowManager.wmii.enable = mkEnableOption (lib.mdDoc "wmii");
+  };
+
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton
+      # stop wmii by
+      #   $wmiir xwrite /ctl quit
+      # this will cause wmii exiting with exit code 0
+      # (or "mod+a quit", which is bound to do the same thing in wmiirc
+      # by default)
+      #
+      # why this loop?
+      # wmii crashes once a month here. That doesn't matter that much
+      # wmii can recover very well. However without loop the X session
+      # terminates and then your workspace setup is lost and all
+      # applications running on X will terminate.
+      # Another use case is kill -9 wmii; after rotating screen.
+      # Note: we don't like kill for that purpose. But it works (->
+      # subject "wmii and xrandr" on mailinglist)
+      { name = "wmii";
+        start = ''
+          while :; do
+            ${wmii}/bin/wmii && break
+          done
+        '';
+      };
+
+    environment.systemPackages = [ wmii ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/xmonad.nix b/nixpkgs/nixos/modules/services/x11/window-managers/xmonad.nix
new file mode 100644
index 000000000000..c35446bf405b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/xmonad.nix
@@ -0,0 +1,204 @@
+{pkgs, lib, config, ...}:
+
+with lib;
+let
+  inherit (lib) mkOption mkIf optionals literalExpression optionalString;
+  cfg = config.services.xserver.windowManager.xmonad;
+
+  ghcWithPackages = cfg.haskellPackages.ghcWithPackages;
+  packages = self: cfg.extraPackages self ++
+                   optionals cfg.enableContribAndExtras
+                   [ self.xmonad-contrib self.xmonad-extras ];
+
+  xmonad-vanilla = pkgs.xmonad-with-packages.override {
+    inherit ghcWithPackages packages;
+  };
+
+  xmonad-config =
+    let
+      xmonadAndPackages = self: [ self.xmonad ] ++ packages self;
+      xmonadEnv = ghcWithPackages xmonadAndPackages;
+      configured = pkgs.writers.writeHaskellBin "xmonad" {
+        ghc = cfg.haskellPackages.ghc;
+        libraries = xmonadAndPackages cfg.haskellPackages;
+        inherit (cfg) ghcArgs;
+      } cfg.config;
+    in
+      pkgs.runCommandLocal "xmonad" {
+        nativeBuildInputs = [ pkgs.makeWrapper ];
+      } (''
+        install -D ${xmonadEnv}/share/man/man1/xmonad.1.gz $out/share/man/man1/xmonad.1.gz
+        makeWrapper ${configured}/bin/xmonad $out/bin/xmonad \
+      '' + optionalString cfg.enableConfiguredRecompile ''
+          --set XMONAD_GHC "${xmonadEnv}/bin/ghc" \
+      '' + ''
+          --set XMONAD_XMESSAGE "${pkgs.xorg.xmessage}/bin/xmessage"
+      '');
+
+  xmonad = if (cfg.config != null) then xmonad-config else xmonad-vanilla;
+in {
+  meta.maintainers = with maintainers; [ lassulus xaverdh ivanbrennan ];
+
+  options = {
+    services.xserver.windowManager.xmonad = {
+      enable = mkEnableOption (lib.mdDoc "xmonad");
+
+      haskellPackages = mkOption {
+        default = pkgs.haskellPackages;
+        defaultText = literalExpression "pkgs.haskellPackages";
+        example = literalExpression "pkgs.haskell.packages.ghc810";
+        type = types.attrs;
+        description = lib.mdDoc ''
+          haskellPackages used to build Xmonad and other packages.
+          This can be used to change the GHC version used to build
+          Xmonad and the packages listed in
+          {var}`extraPackages`.
+        '';
+      };
+
+      extraPackages = mkOption {
+        type = types.functionTo (types.listOf types.package);
+        default = self: [];
+        defaultText = literalExpression "self: []";
+        example = literalExpression ''
+          haskellPackages: [
+            haskellPackages.xmonad-contrib
+            haskellPackages.monad-logger
+          ]
+        '';
+        description = lib.mdDoc ''
+          Extra packages available to ghc when rebuilding Xmonad. The
+          value must be a function which receives the attrset defined
+          in {var}`haskellPackages` as the sole argument.
+        '';
+      };
+
+      enableContribAndExtras = mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc "Enable xmonad-{contrib,extras} in Xmonad.";
+      };
+
+      config = mkOption {
+        default = null;
+        type = with lib.types; nullOr (either path str);
+        description = lib.mdDoc ''
+          Configuration from which XMonad gets compiled. If no value is
+          specified, a vanilla xmonad binary is put in PATH, which will
+          attempt to recompile and exec your xmonad config from $HOME/.xmonad.
+          This setup is then analogous to other (non-NixOS) linux distributions.
+
+          If you do set this option, you likely want to use "launch" as your
+          entry point for xmonad (as in the example), to avoid xmonad's
+          recompilation logic on startup. Doing so will render the default
+          "mod+q" restart key binding dysfunctional though, because that attempts
+          to call your binary with the "--restart" command line option, unless
+          you implement that yourself. You way mant to bind "mod+q" to
+          `(restart "xmonad" True)` instead, which will just restart
+          xmonad from PATH. This allows e.g. switching to the new xmonad binary
+          after rebuilding your system with nixos-rebuild.
+          For the same reason, ghc is not added to the environment when this
+          option is set, unless {option}`enableConfiguredRecompile` is
+          set to `true`.
+
+          If you actually want to run xmonad with a config specified here, but
+          also be able to recompile and restart it from a copy of that source in
+          $HOME/.xmonad on the fly, set {option}`enableConfiguredRecompile`
+          to `true` and implement something like "compileRestart"
+          from the example.
+          This should allow you to switch at will between the local xmonad and
+          the one NixOS puts in your PATH.
+        '';
+        example = ''
+          import XMonad
+          import XMonad.Util.EZConfig (additionalKeys)
+          import Control.Monad (when)
+          import Text.Printf (printf)
+          import System.Posix.Process (executeFile)
+          import System.Info (arch,os)
+          import System.Environment (getArgs)
+          import System.FilePath ((</>))
+
+          compiledConfig = printf "xmonad-%s-%s" arch os
+
+          myConfig = defaultConfig
+            { modMask = mod4Mask -- Use Super instead of Alt
+            , terminal = "urxvt" }
+            `additionalKeys`
+            [ ( (mod4Mask,xK_r), compileRestart True)
+            , ( (mod4Mask,xK_q), restart "xmonad" True ) ]
+
+          compileRestart resume = do
+            dirs  <- asks directories
+            whenX (recompile dirs True) $ do
+              when resume writeStateToFile
+              catchIO
+                  ( do
+                      args <- getArgs
+                      executeFile (cacheDir dirs </> compiledConfig) False args Nothing
+                  )
+
+          main = getDirectories >>= launch myConfig
+
+          --------------------------------------------
+          {- For versions before 0.17.0 use this instead -}
+          --------------------------------------------
+          -- compileRestart resume =
+          --   whenX (recompile True) $
+          --     when resume writeStateToFile
+          --       *> catchIO
+          --         ( do
+          --             dir <- getXMonadDataDir
+          --             args <- getArgs
+          --             executeFile (dir </> compiledConfig) False args Nothing
+          --         )
+          --
+          -- main = launch myConfig
+          --------------------------------------------
+
+        '';
+      };
+
+      enableConfiguredRecompile = mkOption {
+        default = false;
+        type = lib.types.bool;
+        description = lib.mdDoc ''
+          Enable recompilation even if {option}`config` is set to a
+          non-null value. This adds the necessary Haskell dependencies (GHC with
+          packages) to the xmonad binary's environment.
+        '';
+      };
+
+      xmonadCliArgs = mkOption {
+        default = [];
+        type = with lib.types; listOf str;
+        description = lib.mdDoc ''
+          Command line arguments passed to the xmonad binary.
+        '';
+      };
+
+      ghcArgs = mkOption {
+        default = [];
+        type = with lib.types; listOf str;
+        description = lib.mdDoc ''
+          Command line arguments passed to the compiler (ghc)
+          invocation when xmonad.config is set.
+        '';
+      };
+
+    };
+  };
+  config = mkIf cfg.enable {
+    services.xserver.windowManager = {
+      session = [{
+        name = "xmonad";
+        start = ''
+           systemd-cat -t xmonad -- ${xmonad}/bin/xmonad ${lib.escapeShellArgs cfg.xmonadCliArgs} &
+           waitPID=$!
+        '';
+      }];
+    };
+
+    environment.systemPackages = [ xmonad ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/window-managers/yeahwm.nix b/nixpkgs/nixos/modules/services/x11/window-managers/yeahwm.nix
new file mode 100644
index 000000000000..9b40cecace26
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/window-managers/yeahwm.nix
@@ -0,0 +1,25 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.windowManager.yeahwm;
+in
+{
+  ###### interface
+  options = {
+    services.xserver.windowManager.yeahwm.enable = mkEnableOption (lib.mdDoc "yeahwm");
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    services.xserver.windowManager.session = singleton {
+      name = "yeahwm";
+      start = ''
+        ${pkgs.yeahwm}/bin/yeahwm &
+        waitPID=$!
+      '';
+    };
+    environment.systemPackages = [ pkgs.yeahwm ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/xautolock.nix b/nixpkgs/nixos/modules/services/x11/xautolock.nix
new file mode 100644
index 000000000000..5b8b748a086b
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/xautolock.nix
@@ -0,0 +1,141 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.xserver.xautolock;
+in
+  {
+    options = {
+      services.xserver.xautolock = {
+        enable = mkEnableOption (lib.mdDoc "xautolock");
+        enableNotifier = mkEnableOption (lib.mdDoc "xautolock.notify") // {
+          description = lib.mdDoc ''
+            Whether to enable the notifier feature of xautolock.
+            This publishes a notification before the autolock.
+          '';
+        };
+
+        time = mkOption {
+          default = 15;
+          type = types.int;
+
+          description = lib.mdDoc ''
+            Idle time (in minutes) to wait until xautolock locks the computer.
+          '';
+        };
+
+        locker = mkOption {
+          default = "${pkgs.xlockmore}/bin/xlock"; # default according to `man xautolock`
+          defaultText = literalExpression ''"''${pkgs.xlockmore}/bin/xlock"'';
+          example = literalExpression ''"''${pkgs.i3lock}/bin/i3lock -i /path/to/img"'';
+          type = types.str;
+
+          description = lib.mdDoc ''
+            The script to use when automatically locking the computer.
+          '';
+        };
+
+        nowlocker = mkOption {
+          default = null;
+          example = literalExpression ''"''${pkgs.i3lock}/bin/i3lock -i /path/to/img"'';
+          type = types.nullOr types.str;
+
+          description = lib.mdDoc ''
+            The script to use when manually locking the computer with {command}`xautolock -locknow`.
+          '';
+        };
+
+        notify = mkOption {
+          default = 10;
+          type = types.int;
+
+          description = lib.mdDoc ''
+            Time (in seconds) before the actual lock when the notification about the pending lock should be published.
+          '';
+        };
+
+        notifier = mkOption {
+          default = null;
+          example = literalExpression ''"''${pkgs.libnotify}/bin/notify-send 'Locking in 10 seconds'"'';
+          type = types.nullOr types.str;
+
+          description = lib.mdDoc ''
+            Notification script to be used to warn about the pending autolock.
+          '';
+        };
+
+        killer = mkOption {
+          default = null; # default according to `man xautolock` is none
+          example = "/run/current-system/systemd/bin/systemctl suspend";
+          type = types.nullOr types.str;
+
+          description = lib.mdDoc ''
+            The script to use when nothing has happened for as long as {option}`killtime`
+          '';
+        };
+
+        killtime = mkOption {
+          default = 20; # default according to `man xautolock`
+          type = types.int;
+
+          description = lib.mdDoc ''
+            Minutes xautolock waits until it executes the script specified in {option}`killer`
+            (Has to be at least 10 minutes)
+          '';
+        };
+
+        extraOptions = mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [ "-detectsleep" ];
+          description = lib.mdDoc ''
+            Additional command-line arguments to pass to
+            {command}`xautolock`.
+          '';
+        };
+      };
+    };
+
+    config = mkIf cfg.enable {
+      environment.systemPackages = with pkgs; [ xautolock ];
+      systemd.user.services.xautolock = {
+        description = "xautolock service";
+        wantedBy = [ "graphical-session.target" ];
+        partOf = [ "graphical-session.target" ];
+        serviceConfig = with lib; {
+          ExecStart = strings.concatStringsSep " " ([
+            "${pkgs.xautolock}/bin/xautolock"
+            "-noclose"
+            "-time ${toString cfg.time}"
+            "-locker '${cfg.locker}'"
+          ] ++ optionals cfg.enableNotifier [
+            "-notify ${toString cfg.notify}"
+            "-notifier '${cfg.notifier}'"
+          ] ++ optionals (cfg.nowlocker != null) [
+            "-nowlocker '${cfg.nowlocker}'"
+          ] ++ optionals (cfg.killer != null) [
+            "-killer '${cfg.killer}'"
+            "-killtime ${toString cfg.killtime}"
+          ] ++ cfg.extraOptions);
+          Restart = "always";
+        };
+      };
+      assertions = [
+        {
+          assertion = cfg.enableNotifier -> cfg.notifier != null;
+          message = "When enabling the notifier for xautolock, you also need to specify the notify script";
+        }
+        {
+          assertion = cfg.killer != null -> cfg.killtime >= 10;
+          message = "killtime has to be at least 10 minutes according to `man xautolock`";
+        }
+      ] ++ (lib.forEach [ "locker" "notifier" "nowlocker" "killer" ]
+        (option:
+        {
+          assertion = cfg.${option} != null -> builtins.substring 0 1 cfg.${option} == "/";
+          message = "Please specify a canonical path for `services.xserver.xautolock.${option}`";
+        })
+      );
+    };
+  }
diff --git a/nixpkgs/nixos/modules/services/x11/xbanish.nix b/nixpkgs/nixos/modules/services/x11/xbanish.nix
new file mode 100644
index 000000000000..de893fae75a1
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/xbanish.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.services.xbanish;
+
+in {
+  options.services.xbanish = {
+
+    enable = mkEnableOption (lib.mdDoc "xbanish");
+
+    arguments = mkOption {
+      description = lib.mdDoc "Arguments to pass to xbanish command";
+      default = "";
+      example = "-d -i shift";
+      type = types.str;
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.user.services.xbanish = {
+      description = "xbanish hides the mouse pointer";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+      serviceConfig.ExecStart = ''
+        ${pkgs.xbanish}/bin/xbanish ${cfg.arguments}
+      '';
+      serviceConfig.Restart = "always";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/xfs.conf b/nixpkgs/nixos/modules/services/x11/xfs.conf
new file mode 100644
index 000000000000..13dcf803db29
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/xfs.conf
@@ -0,0 +1,15 @@
+# font server configuration file
+# $Xorg: config.cpp,v 1.3 2000/08/17 19:54:19 cpqbld Exp $
+
+clone-self = on
+use-syslog = off
+error-file = /var/log/xfs.log
+# in decipoints
+default-point-size = 120
+default-resolutions = 75,75,100,100
+
+# font cache control, specified in KB
+cache-hi-mark = 2048
+cache-low-mark = 1433
+cache-balance = 70
+catalogue = /run/current-system/sw/share/X11-fonts/
diff --git a/nixpkgs/nixos/modules/services/x11/xfs.nix b/nixpkgs/nixos/modules/services/x11/xfs.nix
new file mode 100644
index 000000000000..591bf461496e
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/xfs.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  configFile = ./xfs.conf;
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    services.xfs = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the X Font Server.";
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.services.xfs.enable {
+    assertions = singleton
+      { assertion = config.fonts.enableFontDir;
+        message = "Please enable fonts.enableFontDir to use the X Font Server.";
+      };
+
+    systemd.services.xfs = {
+      description = "X Font Server";
+      after = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.xorg.xfs ];
+      script = "xfs -config ${configFile}";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/services/x11/xserver.nix b/nixpkgs/nixos/modules/services/x11/xserver.nix
new file mode 100644
index 000000000000..4a8f2f61caaf
--- /dev/null
+++ b/nixpkgs/nixos/modules/services/x11/xserver.nix
@@ -0,0 +1,923 @@
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+
+  # Abbreviations.
+  cfg = config.services.xserver;
+  xorg = pkgs.xorg;
+
+
+  # Map video driver names to driver packages. FIXME: move into card-specific modules.
+  knownVideoDrivers = {
+    # Alias so people can keep using "virtualbox" instead of "vboxvideo".
+    virtualbox = { modules = [ xorg.xf86videovboxvideo ]; driverName = "vboxvideo"; };
+
+    # Alias so that "radeon" uses the xf86-video-ati driver.
+    radeon = { modules = [ xorg.xf86videoati ]; driverName = "ati"; };
+
+    # modesetting does not have a xf86videomodesetting package as it is included in xorgserver
+    modesetting = {};
+  };
+
+  fontsForXServer =
+    config.fonts.packages ++
+    # We don't want these fonts in fonts.conf, because then modern,
+    # fontconfig-based applications will get horrible bitmapped
+    # Helvetica fonts.  It's better to get a substitution (like Nimbus
+    # Sans) than that horror.  But we do need the Adobe fonts for some
+    # old non-fontconfig applications.  (Possibly this could be done
+    # better using a fontconfig rule.)
+    [ pkgs.xorg.fontadobe100dpi
+      pkgs.xorg.fontadobe75dpi
+    ];
+
+  xrandrOptions = {
+    output = mkOption {
+      type = types.str;
+      example = "DVI-0";
+      description = lib.mdDoc ''
+        The output name of the monitor, as shown by
+        {manpage}`xrandr(1)` invoked without arguments.
+      '';
+    };
+
+    primary = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether this head is treated as the primary monitor,
+      '';
+    };
+
+    monitorConfig = mkOption {
+      type = types.lines;
+      default = "";
+      example = ''
+        DisplaySize 408 306
+        Option "DPMS" "false"
+      '';
+      description = lib.mdDoc ''
+        Extra lines to append to the `Monitor` section
+        verbatim. Available options are documented in the MONITOR section in
+        {manpage}`xorg.conf(5)`.
+      '';
+    };
+  };
+
+  # Just enumerate all heads without discarding XRandR output information.
+  xrandrHeads = let
+    mkHead = num: config: {
+      name = "multihead${toString num}";
+      inherit config;
+    };
+  in imap1 mkHead cfg.xrandrHeads;
+
+  xrandrDeviceSection = let
+    monitors = forEach xrandrHeads (h: ''
+      Option "monitor-${h.config.output}" "${h.name}"
+    '');
+  in concatStrings monitors;
+
+  # Here we chain every monitor from the left to right, so we have:
+  # m4 right of m3 right of m2 right of m1   .----.----.----.----.
+  # Which will end up in reverse ----------> | m1 | m2 | m3 | m4 |
+  #                                          `----^----^----^----'
+  xrandrMonitorSections = let
+    mkMonitor = previous: current: singleton {
+      inherit (current) name;
+      value = ''
+        Section "Monitor"
+          Identifier "${current.name}"
+          ${optionalString (current.config.primary) ''
+          Option "Primary" "true"
+          ''}
+          ${optionalString (previous != []) ''
+          Option "RightOf" "${(head previous).name}"
+          ''}
+          ${current.config.monitorConfig}
+        EndSection
+      '';
+    } ++ previous;
+    monitors = reverseList (foldl mkMonitor [] xrandrHeads);
+  in concatMapStrings (getAttr "value") monitors;
+
+  configFile = pkgs.runCommand "xserver.conf"
+    { fontpath = optionalString (cfg.fontPath != null)
+        ''FontPath "${cfg.fontPath}"'';
+      inherit (cfg) config;
+      preferLocalBuild = true;
+    }
+      ''
+        echo 'Section "Files"' >> $out
+        echo $fontpath >> $out
+
+        for i in ${toString fontsForXServer}; do
+          if test "''${i:0:''${#NIX_STORE}}" == "$NIX_STORE"; then
+            for j in $(find $i -name fonts.dir); do
+              echo "  FontPath \"$(dirname $j)\"" >> $out
+            done
+          fi
+        done
+
+        for i in $(find ${toString cfg.modules} -type d | sort); do
+          if test $(echo $i/*.so* | wc -w) -ne 0; then
+            echo "  ModulePath \"$i\"" >> $out
+          fi
+        done
+
+        echo '${cfg.filesSection}' >> $out
+        echo 'EndSection' >> $out
+        echo >> $out
+
+        echo "$config" >> $out
+      ''; # */
+
+  prefixStringLines = prefix: str:
+    concatMapStringsSep "\n" (line: prefix + line) (splitString "\n" str);
+
+  indent = prefixStringLines "  ";
+
+  # A scalable variant of the X11 "core" cursor
+  #
+  # If not running a fancy desktop environment, the cursor is likely set to
+  # the default `cursor.pcf` bitmap font. This is 17px wide, so it's very
+  # small and almost invisible on 4K displays.
+  fontcursormisc_hidpi = pkgs.xorg.fontxfree86type1.overrideAttrs (old:
+    let
+      # The scaling constant is 230/96: the scalable `left_ptr` glyph at
+      # about 23 points is rendered as 17px, on a 96dpi display.
+      # Note: the XLFD font size is in decipoints.
+      size = 2.39583 * cfg.dpi;
+      sizeString = builtins.head (builtins.split "\\." (toString size));
+    in
+    {
+      postInstall = ''
+        alias='cursor -xfree86-cursor-medium-r-normal--0-${sizeString}-0-0-p-0-adobe-fontspecific'
+        echo "$alias" > $out/lib/X11/fonts/Type1/fonts.alias
+      '';
+    });
+in
+
+{
+
+  imports =
+    [ ./display-managers/default.nix
+      ./window-managers/default.nix
+      ./desktop-managers/default.nix
+      (mkRemovedOptionModule [ "services" "xserver" "startGnuPGAgent" ]
+        "See the 16.09 release notes for more information.")
+      (mkRemovedOptionModule
+        [ "services" "xserver" "startDbusSession" ]
+        "The user D-Bus session is now always socket activated and this option can safely be removed.")
+      (mkRemovedOptionModule [ "services" "xserver" "useXFS" ]
+        "Use services.xserver.fontPath instead of useXFS")
+      (mkRemovedOptionModule [ "services" "xserver" "useGlamor" ]
+        "Option services.xserver.useGlamor was removed because it is unnecessary. Drivers that uses Glamor will use it automatically.")
+      (lib.mkRenamedOptionModuleWith {
+        sinceRelease = 2311;
+        from = [ "services" "xserver" "layout" ];
+        to = [ "services" "xserver" "xkb" "layout" ];
+      })
+      (lib.mkRenamedOptionModuleWith {
+        sinceRelease = 2311;
+        from = [ "services" "xserver" "xkbModel" ];
+        to = [ "services" "xserver" "xkb" "model" ];
+      })
+      (lib.mkRenamedOptionModuleWith {
+        sinceRelease = 2311;
+        from = [ "services" "xserver" "xkbOptions" ];
+        to = [ "services" "xserver" "xkb" "options" ];
+      })
+      (lib.mkRenamedOptionModuleWith {
+        sinceRelease = 2311;
+        from = [ "services" "xserver" "xkbVariant" ];
+        to = [ "services" "xserver" "xkb" "variant" ];
+      })
+      (lib.mkRenamedOptionModuleWith {
+        sinceRelease = 2311;
+        from = [ "services" "xserver" "xkbDir" ];
+        to = [ "services" "xserver" "xkb" "dir" ];
+      })
+    ];
+
+
+  ###### interface
+
+  options = {
+
+    services.xserver = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the X server.
+        '';
+      };
+
+      autorun = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to start the X server automatically.
+        '';
+      };
+
+      excludePackages = mkOption {
+        default = [];
+        example = literalExpression "[ pkgs.xterm ]";
+        type = types.listOf types.package;
+        description = lib.mdDoc "Which X11 packages to exclude from the default environment";
+      };
+
+      exportConfiguration = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to symlink the X server configuration under
+          {file}`/etc/X11/xorg.conf`.
+        '';
+      };
+
+      enableTCP = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to allow the X server to accept TCP connections.
+        '';
+      };
+
+      autoRepeatDelay = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Sets the autorepeat delay (length of time in milliseconds that a key must be depressed before autorepeat starts).
+        '';
+      };
+
+      autoRepeatInterval = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Sets the autorepeat interval (length of time in milliseconds that should elapse between autorepeat-generated keystrokes).
+        '';
+      };
+
+      inputClassSections = mkOption {
+        type = types.listOf types.lines;
+        default = [];
+        example = literalExpression ''
+          [ '''
+              Identifier      "Trackpoint Wheel Emulation"
+              MatchProduct    "ThinkPad USB Keyboard with TrackPoint"
+              Option          "EmulateWheel"          "true"
+              Option          "EmulateWheelButton"    "2"
+              Option          "Emulate3Buttons"       "false"
+            '''
+          ]
+        '';
+        description = lib.mdDoc "Content of additional InputClass sections of the X server configuration file.";
+      };
+
+      modules = mkOption {
+        type = types.listOf types.path;
+        default = [];
+        example = literalExpression "[ pkgs.xf86_input_wacom ]";
+        description = lib.mdDoc "Packages to be added to the module search path of the X server.";
+      };
+
+      resolutions = mkOption {
+        type = types.listOf types.attrs;
+        default = [];
+        example = [ { x = 1600; y = 1200; } { x = 1024; y = 786; } ];
+        description = lib.mdDoc ''
+          The screen resolutions for the X server.  The first element
+          is the default resolution.  If this list is empty, the X
+          server will automatically configure the resolution.
+        '';
+      };
+
+      videoDrivers = mkOption {
+        type = types.listOf types.str;
+        default = [ "modesetting" "fbdev" ];
+        example = [
+          "nvidia" "nvidiaLegacy390" "nvidiaLegacy340" "nvidiaLegacy304"
+          "amdgpu-pro"
+        ];
+        # TODO(@oxij): think how to easily add the rest, like those nvidia things
+        relatedPackages = concatLists
+          (mapAttrsToList (n: v:
+            optional (hasPrefix "xf86video" n) {
+              path  = [ "xorg" n ];
+              title = removePrefix "xf86video" n;
+            }) pkgs.xorg);
+        description = lib.mdDoc ''
+          The names of the video drivers the configuration
+          supports. They will be tried in order until one that
+          supports your card is found.
+          Don't combine those with "incompatible" OpenGL implementations,
+          e.g. free ones (mesa-based) with proprietary ones.
+
+          For unfree "nvidia*", the supported GPU lists are on
+          https://www.nvidia.com/object/unix.html
+        '';
+      };
+
+      videoDriver = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "i810";
+        description = lib.mdDoc ''
+          The name of the video driver for your graphics card.  This
+          option is obsolete; please set the
+          {option}`services.xserver.videoDrivers` instead.
+        '';
+      };
+
+      drivers = mkOption {
+        type = types.listOf types.attrs;
+        internal = true;
+        description = lib.mdDoc ''
+          A list of attribute sets specifying drivers to be loaded by
+          the X11 server.
+        '';
+      };
+
+      dpi = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        description = lib.mdDoc ''
+          Force global DPI resolution to use for X server. It's recommended to
+          use this only when DPI is detected incorrectly; also consider using
+          `Monitor` section in configuration file instead.
+        '';
+      };
+
+      updateDbusEnvironment = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to update the DBus activation environment after launching the
+          desktop manager.
+        '';
+      };
+
+      xkb = {
+        layout = mkOption {
+          type = types.str;
+          default = "us";
+          description = lib.mdDoc ''
+            X keyboard layout, or multiple keyboard layouts separated by commas.
+          '';
+        };
+
+        model = mkOption {
+          type = types.str;
+          default = "pc104";
+          example = "presario";
+          description = lib.mdDoc ''
+            X keyboard model.
+          '';
+        };
+
+        options = mkOption {
+          type = types.commas;
+          default = "terminate:ctrl_alt_bksp";
+          example = "grp:caps_toggle,grp_led:scroll";
+          description = lib.mdDoc ''
+            X keyboard options; layout switching goes here.
+          '';
+        };
+
+        variant = mkOption {
+          type = types.str;
+          default = "";
+          example = "colemak";
+          description = lib.mdDoc ''
+            X keyboard variant.
+          '';
+        };
+
+        dir = mkOption {
+          type = types.path;
+          default = "${pkgs.xkeyboard_config}/etc/X11/xkb";
+          defaultText = literalExpression ''"''${pkgs.xkeyboard_config}/etc/X11/xkb"'';
+          description = lib.mdDoc ''
+            Path used for -xkbdir xserver parameter.
+          '';
+        };
+      };
+
+      config = mkOption {
+        type = types.lines;
+        description = lib.mdDoc ''
+          The contents of the configuration file of the X server
+          ({file}`xorg.conf`).
+
+          This option is set by multiple modules, and the configs are
+          concatenated together.
+
+          In Xorg configs the last config entries take precedence,
+          so you may want to use `lib.mkAfter` on this option
+          to override NixOS's defaults.
+        '';
+      };
+
+      filesSection = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''FontPath "/path/to/my/fonts"'';
+        description = lib.mdDoc "Contents of the first `Files` section of the X server configuration file.";
+      };
+
+      deviceSection = mkOption {
+        type = types.lines;
+        default = "";
+        example = "VideoRAM 131072";
+        description = lib.mdDoc "Contents of the first Device section of the X server configuration file.";
+      };
+
+      screenSection = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          Option "RandRRotation" "on"
+        '';
+        description = lib.mdDoc "Contents of the first Screen section of the X server configuration file.";
+      };
+
+      monitorSection = mkOption {
+        type = types.lines;
+        default = "";
+        example = "HorizSync 28-49";
+        description = lib.mdDoc "Contents of the first Monitor section of the X server configuration file.";
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Additional contents (sections) included in the X server configuration file";
+      };
+
+      xrandrHeads = mkOption {
+        default = [];
+        example = [
+          "HDMI-0"
+          { output = "DVI-0"; primary = true; }
+          { output = "DVI-1"; monitorConfig = "Option \"Rotate\" \"left\""; }
+        ];
+        type = with types; listOf (coercedTo str (output: {
+          inherit output;
+        }) (submodule { options = xrandrOptions; }));
+        # Set primary to true for the first head if no other has been set
+        # primary already.
+        apply = heads: let
+          hasPrimary = any (x: x.primary) heads;
+          firstPrimary = head heads // { primary = true; };
+          newHeads = singleton firstPrimary ++ tail heads;
+        in if heads != [] && !hasPrimary then newHeads else heads;
+        description = lib.mdDoc ''
+          Multiple monitor configuration, just specify a list of XRandR
+          outputs. The individual elements should be either simple strings or
+          an attribute set of output options.
+
+          If the element is a string, it is denoting the physical output for a
+          monitor, if it's an attribute set, you must at least provide the
+          {option}`output` option.
+
+          The monitors will be mapped from left to right in the order of the
+          list.
+
+          By default, the first monitor will be set as the primary monitor if
+          none of the elements contain an option that has set
+          {option}`primary` to `true`.
+
+          ::: {.note}
+          Only one monitor is allowed to be primary.
+          :::
+
+          Be careful using this option with multiple graphic adapters or with
+          drivers that have poor support for XRandR, unexpected things might
+          happen with those.
+        '';
+      };
+
+      serverFlagsSection = mkOption {
+        default = "";
+        type = types.lines;
+        example =
+          ''
+          Option "BlankTime" "0"
+          Option "StandbyTime" "0"
+          Option "SuspendTime" "0"
+          Option "OffTime" "0"
+          '';
+        description = lib.mdDoc "Contents of the ServerFlags section of the X server configuration file.";
+      };
+
+      moduleSection = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            SubSection "extmod"
+            EndSubsection
+          '';
+        description = lib.mdDoc "Contents of the Module section of the X server configuration file.";
+      };
+
+      serverLayoutSection = mkOption {
+        type = types.lines;
+        default = "";
+        example =
+          ''
+            Option "AIGLX" "true"
+          '';
+        description = lib.mdDoc "Contents of the ServerLayout section of the X server configuration file.";
+      };
+
+      extraDisplaySettings = mkOption {
+        type = types.lines;
+        default = "";
+        example = "Virtual 2048 2048";
+        description = lib.mdDoc "Lines to be added to every Display subsection of the Screen section.";
+      };
+
+      defaultDepth = mkOption {
+        type = types.int;
+        default = 0;
+        example = 8;
+        description = lib.mdDoc "Default colour depth.";
+      };
+
+      fontPath = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "unix/:7100";
+        description = lib.mdDoc ''
+          Set the X server FontPath. Defaults to null, which
+          means the compiled in defaults will be used. See
+          man xorg.conf for details.
+        '';
+      };
+
+      tty = mkOption {
+        type = types.nullOr types.int;
+        default = 7;
+        description = lib.mdDoc "Virtual console for the X server.";
+      };
+
+      display = mkOption {
+        type = types.nullOr types.int;
+        default = 0;
+        description = lib.mdDoc "Display number for the X server.";
+      };
+
+      virtualScreen = mkOption {
+        type = types.nullOr types.attrs;
+        default = null;
+        example = { x = 2048; y = 2048; };
+        description = lib.mdDoc ''
+          Virtual screen size for Xrandr.
+        '';
+      };
+
+      logFile = mkOption {
+        type = types.nullOr types.str;
+        default = "/dev/null";
+        example = "/var/log/Xorg.0.log";
+        description = lib.mdDoc ''
+          Controls the file Xorg logs to.
+
+          The default of `/dev/null` is set so that systemd services (like `displayManagers`) only log to the journal and don't create their own log files.
+
+          Setting this to `null` will not pass the `-logfile` argument to Xorg which allows it to log to its default logfile locations instead (see `man Xorg`). You probably only want this behaviour when running Xorg manually (e.g. via `startx`).
+        '';
+      };
+
+      verbose = mkOption {
+        type = types.nullOr types.int;
+        default = 3;
+        example = 7;
+        description = lib.mdDoc ''
+          Controls verbosity of X logging.
+        '';
+      };
+
+      enableCtrlAltBackspace = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the DontZap option, which binds Ctrl+Alt+Backspace
+          to forcefully kill X. This can lead to data loss and is disabled
+          by default.
+        '';
+      };
+
+      terminateOnReset = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Whether to terminate X upon server reset.
+        '';
+      };
+
+      upscaleDefaultCursor = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Upscale the default X cursor to be more visible on high-density displays.
+          Requires `config.services.xserver.dpi` to be set.
+        '';
+      };
+    };
+
+  };
+
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    services.xserver.displayManager.lightdm.enable =
+      let dmConf = cfg.displayManager;
+          default = !(dmConf.gdm.enable
+                    || dmConf.sddm.enable
+                    || dmConf.xpra.enable
+                    || dmConf.sx.enable
+                    || dmConf.startx.enable
+                    || config.services.greetd.enable);
+      in mkIf (default) (mkDefault true);
+
+    # so that the service won't be enabled when only startx is used
+    systemd.services.display-manager.enable  =
+      let dmConf = cfg.displayManager;
+          noDmUsed = !(dmConf.gdm.enable
+                    || dmConf.sddm.enable
+                    || dmConf.xpra.enable
+                    || dmConf.lightdm.enable);
+      in mkIf (noDmUsed) (mkDefault false);
+
+    hardware.opengl.enable = mkDefault true;
+
+    services.xserver.videoDrivers = mkIf (cfg.videoDriver != null) [ cfg.videoDriver ];
+
+    # FIXME: somehow check for unknown driver names.
+    services.xserver.drivers = flip concatMap cfg.videoDrivers (name:
+      let driver =
+        attrByPath [name]
+          (if xorg ? ${"xf86video" + name}
+           then { modules = [xorg.${"xf86video" + name}]; }
+           else null)
+          knownVideoDrivers;
+      in optional (driver != null) ({ inherit name; modules = []; driverName = name; display = true; } // driver));
+
+    assertions = [
+      (let primaryHeads = filter (x: x.primary) cfg.xrandrHeads; in {
+        assertion = length primaryHeads < 2;
+        message = "Only one head is allowed to be primary in "
+                + "‘services.xserver.xrandrHeads’, but there are "
+                + "${toString (length primaryHeads)} heads set to primary: "
+                + concatMapStringsSep ", " (x: x.output) primaryHeads;
+      })
+      {
+        assertion = cfg.upscaleDefaultCursor -> cfg.dpi != null;
+        message = "Specify `config.services.xserver.dpi` to upscale the default cursor.";
+      }
+    ];
+
+    environment.etc =
+      (optionalAttrs cfg.exportConfiguration
+        {
+          "X11/xorg.conf".source = "${configFile}";
+          # -xkbdir command line option does not seems to be passed to xkbcomp.
+          "X11/xkb".source = "${cfg.xkb.dir}";
+        })
+      # localectl looks into 00-keyboard.conf
+      //{
+          "X11/xorg.conf.d/00-keyboard.conf".text = ''
+            Section "InputClass"
+              Identifier "Keyboard catchall"
+              MatchIsKeyboard "on"
+              Option "XkbModel" "${cfg.xkb.model}"
+              Option "XkbLayout" "${cfg.xkb.layout}"
+              Option "XkbOptions" "${cfg.xkb.options}"
+              Option "XkbVariant" "${cfg.xkb.variant}"
+            EndSection
+          '';
+        }
+      # Needed since 1.18; see https://bugs.freedesktop.org/show_bug.cgi?id=89023#c5
+      // (let cfgPath = "/X11/xorg.conf.d/10-evdev.conf"; in
+        {
+          ${cfgPath}.source = xorg.xf86inputevdev.out + "/share" + cfgPath;
+        });
+
+    environment.systemPackages = utils.removePackagesByName
+      [ xorg.xorgserver.out
+        xorg.xrandr
+        xorg.xrdb
+        xorg.setxkbmap
+        xorg.iceauth # required for KDE applications (it's called by dcopserver)
+        xorg.xlsclients
+        xorg.xset
+        xorg.xsetroot
+        xorg.xinput
+        xorg.xprop
+        xorg.xauth
+        pkgs.xterm
+        pkgs.xdg-utils
+        xorg.xf86inputevdev.out # get evdev.4 man page
+        pkgs.nixos-icons # needed for gnome and pantheon about dialog, nixos-manual and maybe more
+      ] config.services.xserver.excludePackages
+      ++ optional (elem "virtualbox" cfg.videoDrivers) xorg.xrefresh;
+
+    environment.pathsToLink = [ "/share/X11" ];
+
+    xdg = {
+      autostart.enable = true;
+      menus.enable = true;
+      mime.enable = true;
+      icons.enable = true;
+    };
+
+    # The default max inotify watches is 8192.
+    # Nowadays most apps require a good number of inotify watches,
+    # the value below is used by default on several other distros.
+    boot.kernel.sysctl."fs.inotify.max_user_instances" = mkDefault 524288;
+    boot.kernel.sysctl."fs.inotify.max_user_watches" = mkDefault 524288;
+
+    systemd.defaultUnit = mkIf cfg.autorun "graphical.target";
+
+    systemd.services.display-manager =
+      { description = "Display Manager";
+
+        after = [ "acpid.service" "systemd-logind.service" "systemd-user-sessions.service" ];
+
+        restartIfChanged = false;
+
+        environment =
+          optionalAttrs config.hardware.opengl.setLdLibraryPath
+            { LD_LIBRARY_PATH = lib.makeLibraryPath [ pkgs.addOpenGLRunpath.driverLink ]; }
+          // cfg.displayManager.job.environment;
+
+        preStart =
+          ''
+            ${cfg.displayManager.job.preStart}
+
+            rm -f /tmp/.X0-lock
+          '';
+
+        # TODO: move declaring the systemd service to its own mkIf
+        script = mkIf (config.systemd.services.display-manager.enable == true) "${cfg.displayManager.job.execCmd}";
+
+        # Stop restarting if the display manager stops (crashes) 2 times
+        # in one minute. Starting X typically takes 3-4s.
+        startLimitIntervalSec = 30;
+        startLimitBurst = 3;
+        serviceConfig = {
+          Restart = "always";
+          RestartSec = "200ms";
+          SyslogIdentifier = "display-manager";
+        };
+      };
+
+    services.xserver.displayManager.xserverArgs =
+      [ "-config ${configFile}"
+        "-xkbdir" "${cfg.xkb.dir}"
+      ] ++ optional (cfg.display != null) ":${toString cfg.display}"
+        ++ optional (cfg.tty     != null) "vt${toString cfg.tty}"
+        ++ optional (cfg.dpi     != null) "-dpi ${toString cfg.dpi}"
+        ++ optional (cfg.logFile != null) "-logfile ${toString cfg.logFile}"
+        ++ optional (cfg.verbose != null) "-verbose ${toString cfg.verbose}"
+        ++ optional (!cfg.enableTCP) "-nolisten tcp"
+        ++ optional (cfg.autoRepeatDelay != null) "-ardelay ${toString cfg.autoRepeatDelay}"
+        ++ optional (cfg.autoRepeatInterval != null) "-arinterval ${toString cfg.autoRepeatInterval}"
+        ++ optional cfg.terminateOnReset "-terminate";
+
+    services.xserver.modules =
+      concatLists (catAttrs "modules" cfg.drivers) ++
+      [ xorg.xorgserver.out
+        xorg.xf86inputevdev.out
+      ];
+
+    system.checks = singleton (pkgs.runCommand "xkb-validated" {
+      inherit (cfg.xkb) model layout variant options;
+      nativeBuildInputs = with pkgs.buildPackages; [ xkbvalidate ];
+      preferLocalBuild = true;
+    } ''
+      ${optionalString (config.environment.sessionVariables ? XKB_CONFIG_ROOT)
+        "export XKB_CONFIG_ROOT=${config.environment.sessionVariables.XKB_CONFIG_ROOT}"
+      }
+      xkbvalidate "$model" "$layout" "$variant" "$options"
+      touch "$out"
+    '');
+
+    services.xserver.config =
+      ''
+        Section "ServerFlags"
+          Option "AllowMouseOpenFail" "on"
+          Option "DontZap" "${if cfg.enableCtrlAltBackspace then "off" else "on"}"
+        ${indent cfg.serverFlagsSection}
+        EndSection
+
+        Section "Module"
+        ${indent cfg.moduleSection}
+        EndSection
+
+        Section "Monitor"
+          Identifier "Monitor[0]"
+        ${indent cfg.monitorSection}
+        EndSection
+
+        # Additional "InputClass" sections
+        ${flip (concatMapStringsSep "\n") cfg.inputClassSections (inputClassSection: ''
+          Section "InputClass"
+          ${indent inputClassSection}
+          EndSection
+        '')}
+
+
+        Section "ServerLayout"
+          Identifier "Layout[all]"
+        ${indent cfg.serverLayoutSection}
+          # Reference the Screen sections for each driver.  This will
+          # cause the X server to try each in turn.
+          ${flip concatMapStrings (filter (d: d.display) cfg.drivers) (d: ''
+            Screen "Screen-${d.name}[0]"
+          '')}
+        EndSection
+
+        # For each supported driver, add a "Device" and "Screen"
+        # section.
+        ${flip concatMapStrings cfg.drivers (driver: ''
+
+          Section "Device"
+            Identifier "Device-${driver.name}[0]"
+            Driver "${driver.driverName or driver.name}"
+          ${indent cfg.deviceSection}
+          ${indent (driver.deviceSection or "")}
+          ${indent xrandrDeviceSection}
+          EndSection
+          ${optionalString driver.display ''
+
+            Section "Screen"
+              Identifier "Screen-${driver.name}[0]"
+              Device "Device-${driver.name}[0]"
+              ${optionalString (cfg.monitorSection != "") ''
+                Monitor "Monitor[0]"
+              ''}
+
+            ${indent cfg.screenSection}
+            ${indent (driver.screenSection or "")}
+
+              ${optionalString (cfg.defaultDepth != 0) ''
+                DefaultDepth ${toString cfg.defaultDepth}
+              ''}
+
+              ${optionalString
+                (
+                  driver.name != "virtualbox"
+                  &&
+                  (cfg.resolutions != [] ||
+                    cfg.extraDisplaySettings != "" ||
+                    cfg.virtualScreen != null
+                  )
+                )
+                (let
+                  f = depth:
+                    ''
+                      SubSection "Display"
+                        Depth ${toString depth}
+                        ${optionalString (cfg.resolutions != [])
+                          "Modes ${concatMapStrings (res: ''"${toString res.x}x${toString res.y}"'') cfg.resolutions}"}
+                      ${indent cfg.extraDisplaySettings}
+                        ${optionalString (cfg.virtualScreen != null)
+                          "Virtual ${toString cfg.virtualScreen.x} ${toString cfg.virtualScreen.y}"}
+                      EndSubSection
+                    '';
+                in concatMapStrings f [8 16 24]
+              )}
+
+            EndSection
+          ''}
+        '')}
+
+        ${xrandrMonitorSections}
+
+        ${cfg.extraConfig}
+      '';
+
+    fonts.enableDefaultPackages = mkDefault true;
+    fonts.packages = [
+      (if cfg.upscaleDefaultCursor then fontcursormisc_hidpi else pkgs.xorg.fontcursormisc)
+      pkgs.xorg.fontmiscmisc
+    ];
+
+  };
+
+  # uses relatedPackages
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/system/activation/activatable-system.nix b/nixpkgs/nixos/modules/system/activation/activatable-system.nix
new file mode 100644
index 000000000000..3d941596747b
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/activatable-system.nix
@@ -0,0 +1,79 @@
+{ options, config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    mkOption
+    types
+    ;
+
+  systemBuilderArgs = {
+    activationScript = config.system.activationScripts.script;
+    dryActivationScript = config.system.dryActivationScript;
+  };
+
+in
+{
+  options = {
+    system.activatable = mkOption {
+      type = types.bool;
+      default = true;
+      description = ''
+        Whether to add the activation script to the system profile.
+
+        The default, to have the script available all the time, is what we normally
+        do, but for image based systems, this may not be needed or not be desirable.
+      '';
+    };
+    system.activatableSystemBuilderCommands = options.system.systemBuilderCommands // {
+      description = lib.mdDoc ''
+        Like `system.systemBuilderCommands`, but only for the commands that are
+        needed *both* when the system is activatable and when it isn't.
+
+        Disclaimer: This option might go away in the future. It might be
+        superseded by separating switch-to-configuration into a separate script
+        which will make this option superfluous. See
+        https://github.com/NixOS/nixpkgs/pull/263462#discussion_r1373104845 for
+        a discussion.
+      '';
+    };
+    system.build.separateActivationScript = mkOption {
+      type = types.package;
+      description = ''
+        A separate activation script package that's not part of the system profile.
+
+        This is useful for configurations where `system.activatable` is `false`.
+        Otherwise, you can just use `system.build.toplevel`.
+      '';
+    };
+  };
+  config = {
+    system.activatableSystemBuilderCommands = ''
+      echo "$activationScript" > $out/activate
+      echo "$dryActivationScript" > $out/dry-activate
+      substituteInPlace $out/activate --subst-var-by out ''${!toplevelVar}
+      substituteInPlace $out/dry-activate --subst-var-by out ''${!toplevelVar}
+      chmod u+x $out/activate $out/dry-activate
+      unset activationScript dryActivationScript
+    '';
+
+    system.systemBuilderCommands = lib.mkIf
+      config.system.activatable
+      config.system.activatableSystemBuilderCommands;
+    system.systemBuilderArgs = lib.mkIf config.system.activatable
+      (systemBuilderArgs // {
+        toplevelVar = "out";
+      });
+
+    system.build.separateActivationScript =
+      pkgs.runCommand
+        "separate-activation-script"
+        (systemBuilderArgs // {
+          toplevelVar = "toplevel";
+          toplevel = config.system.build.toplevel;
+        })
+        ''
+          mkdir $out
+          ${config.system.activatableSystemBuilderCommands}
+        '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/activation/activation-script.nix b/nixpkgs/nixos/modules/system/activation/activation-script.nix
new file mode 100644
index 000000000000..bc0b7266ce95
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/activation-script.nix
@@ -0,0 +1,284 @@
+# generate the script used to activate the configuration.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  addAttributeName = mapAttrs (a: v: v // {
+    text = ''
+      #### Activation script snippet ${a}:
+      _localstatus=0
+      ${v.text}
+
+      if (( _localstatus > 0 )); then
+        printf "Activation script snippet '%s' failed (%s)\n" "${a}" "$_localstatus"
+      fi
+    '';
+  });
+
+  systemActivationScript = set: onlyDry: let
+    set' = mapAttrs (_: v: if isString v then (noDepEntry v) // { supportsDryActivation = false; } else v) set;
+    withHeadlines = addAttributeName set';
+    # When building a dry activation script, this replaces all activation scripts
+    # that do not support dry mode with a comment that does nothing. Filtering these
+    # activation scripts out so they don't get generated into the dry activation script
+    # does not work because when an activation script that supports dry mode depends on
+    # an activation script that does not, the dependency cannot be resolved and the eval
+    # fails.
+    withDrySnippets = mapAttrs (a: v: if onlyDry && !v.supportsDryActivation then v // {
+      text = "#### Activation script snippet ${a} does not support dry activation.";
+    } else v) withHeadlines;
+  in
+    ''
+      #!${pkgs.runtimeShell}
+
+      systemConfig='@out@'
+
+      export PATH=/empty
+      for i in ${toString path}; do
+          PATH=$PATH:$i/bin:$i/sbin
+      done
+
+      _status=0
+      trap "_status=1 _localstatus=\$?" ERR
+
+      # Ensure a consistent umask.
+      umask 0022
+
+      ${textClosureMap id (withDrySnippets) (attrNames withDrySnippets)}
+
+    '' + optionalString (!onlyDry) ''
+      # Make this configuration the current configuration.
+      # The readlink is there to ensure that when $systemConfig = /system
+      # (which is a symlink to the store), /run/current-system is still
+      # used as a garbage collection root.
+      ln -sfn "$(readlink -f "$systemConfig")" /run/current-system
+
+      exit $_status
+    '';
+
+  path = with pkgs; map getBin
+    [ coreutils
+      gnugrep
+      findutils
+      getent
+      stdenv.cc.libc # nscd in update-users-groups.pl
+      shadow
+      nettools # needed for hostname
+      util-linux # needed for mount and mountpoint
+    ];
+
+  scriptType = withDry: with types;
+    let scriptOptions =
+      { deps = mkOption
+          { type = types.listOf types.str;
+            default = [ ];
+            description = lib.mdDoc "List of dependencies. The script will run after these.";
+          };
+        text = mkOption
+          { type = types.lines;
+            description = lib.mdDoc "The content of the script.";
+          };
+      } // optionalAttrs withDry {
+        supportsDryActivation = mkOption
+          { type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Whether this activation script supports being dry-activated.
+              These activation scripts will also be executed on dry-activate
+              activations with the environment variable
+              `NIXOS_ACTION` being set to `dry-activate`.
+              it's important that these activation scripts  don't
+              modify anything about the system when the variable is set.
+            '';
+          };
+      };
+    in either str (submodule { options = scriptOptions; });
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    system.activationScripts = mkOption {
+      default = {};
+
+      example = literalExpression ''
+        { stdio.text =
+          '''
+            # Needed by some programs.
+            ln -sfn /proc/self/fd /dev/fd
+            ln -sfn /proc/self/fd/0 /dev/stdin
+            ln -sfn /proc/self/fd/1 /dev/stdout
+            ln -sfn /proc/self/fd/2 /dev/stderr
+          ''';
+        }
+      '';
+
+      description = lib.mdDoc ''
+        A set of shell script fragments that are executed when a NixOS
+        system configuration is activated.  Examples are updating
+        /etc, creating accounts, and so on.  Since these are executed
+        every time you boot the system or run
+        {command}`nixos-rebuild`, it's important that they are
+        idempotent and fast.
+      '';
+
+      type = types.attrsOf (scriptType true);
+      apply = set: set // {
+        script = systemActivationScript set false;
+      };
+    };
+
+    system.dryActivationScript = mkOption {
+      description = lib.mdDoc "The shell script that is to be run when dry-activating a system.";
+      readOnly = true;
+      internal = true;
+      default = systemActivationScript (removeAttrs config.system.activationScripts [ "script" ]) true;
+      defaultText = literalMD "generated activation script";
+    };
+
+    system.userActivationScripts = mkOption {
+      default = {};
+
+      example = literalExpression ''
+        { plasmaSetup = {
+            text = '''
+              ''${pkgs.libsForQt5.kservice}/bin/kbuildsycoca5"
+            ''';
+            deps = [];
+          };
+        }
+      '';
+
+      description = lib.mdDoc ''
+        A set of shell script fragments that are executed by a systemd user
+        service when a NixOS system configuration is activated. Examples are
+        rebuilding the .desktop file cache for showing applications in the menu.
+        Since these are executed every time you run
+        {command}`nixos-rebuild`, it's important that they are
+        idempotent and fast.
+      '';
+
+      type = with types; attrsOf (scriptType false);
+
+      apply = set: {
+        script = ''
+          unset PATH
+          for i in ${toString path}; do
+            PATH=$PATH:$i/bin:$i/sbin
+          done
+
+          _status=0
+          trap "_status=1 _localstatus=\$?" ERR
+
+          ${
+            let
+              set' = mapAttrs (n: v: if isString v then noDepEntry v else v) set;
+              withHeadlines = addAttributeName set';
+            in textClosureMap id (withHeadlines) (attrNames withHeadlines)
+          }
+
+          exit $_status
+        '';
+      };
+
+    };
+
+    environment.usrbinenv = mkOption {
+      default = "${pkgs.coreutils}/bin/env";
+      defaultText = literalExpression ''"''${pkgs.coreutils}/bin/env"'';
+      example = literalExpression ''"''${pkgs.busybox}/bin/env"'';
+      type = types.nullOr types.path;
+      visible = false;
+      description = lib.mdDoc ''
+        The env(1) executable that is linked system-wide to
+        `/usr/bin/env`.
+      '';
+    };
+
+    system.build.installBootLoader = mkOption {
+      internal = true;
+      # "; true" => make the `$out` argument from switch-to-configuration.pl
+      #             go to `true` instead of `echo`, hiding the useless path
+      #             from the log.
+      default = "echo 'Warning: do not know how to make this configuration bootable; please enable a boot loader.' 1>&2; true";
+      description = lib.mdDoc ''
+        A program that writes a bootloader installation script to the path passed in the first command line argument.
+
+        See `nixos/modules/system/activation/switch-to-configuration.pl`.
+      '';
+      type = types.unique {
+        message = ''
+          Only one bootloader can be enabled at a time. This requirement has not
+          been checked until NixOS 22.05. Earlier versions defaulted to the last
+          definition. Change your configuration to enable only one bootloader.
+        '';
+      } (types.either types.str types.package);
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    system.activationScripts.stdio = ""; # obsolete
+    system.activationScripts.var = ""; # obsolete
+
+    systemd.tmpfiles.rules = [
+      # Prevent the current configuration from being garbage-collected.
+      "d /nix/var/nix/gcroots -"
+      "L+ /nix/var/nix/gcroots/current-system - - - - /run/current-system"
+      "D /var/empty 0555 root root -"
+      "h /var/empty - - - - +i"
+    ];
+
+    system.activationScripts.usrbinenv = if config.environment.usrbinenv != null
+      then ''
+        mkdir -p /usr/bin
+        chmod 0755 /usr/bin
+        ln -sfn ${config.environment.usrbinenv} /usr/bin/.env.tmp
+        mv /usr/bin/.env.tmp /usr/bin/env # atomically replace /usr/bin/env
+      ''
+      else ''
+        rm -f /usr/bin/env
+        rmdir --ignore-fail-on-non-empty /usr/bin /usr
+      '';
+
+    system.activationScripts.specialfs =
+      ''
+        specialMount() {
+          local device="$1"
+          local mountPoint="$2"
+          local options="$3"
+          local fsType="$4"
+
+          if mountpoint -q "$mountPoint"; then
+            local options="remount,$options"
+          else
+            mkdir -p "$mountPoint"
+            chmod 0755 "$mountPoint"
+          fi
+          mount -t "$fsType" -o "$options" "$device" "$mountPoint"
+        }
+        source ${config.system.build.earlyMountScript}
+      '';
+
+    systemd.user = {
+      services.nixos-activation = {
+        description = "Run user-specific NixOS activation";
+        script = config.system.userActivationScripts.script;
+        unitConfig.ConditionUser = "!@system";
+        serviceConfig.Type = "oneshot";
+        wantedBy = [ "default.target" ];
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/activation/bootspec.cue b/nixpkgs/nixos/modules/system/activation/bootspec.cue
new file mode 100644
index 000000000000..1f7b4afa87ac
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/bootspec.cue
@@ -0,0 +1,31 @@
+import "struct"
+
+#BootspecV1: {
+	system:         string
+	init:           string
+	initrd?:        string
+	initrdSecrets?: string
+	kernel:         string
+	kernelParams: [...string]
+	label:    string
+	toplevel: string
+}
+
+// A restricted document does not allow any official specialisation
+// information in it to avoid "recursive specialisations".
+#RestrictedDocument: struct.MinFields(1) & {
+	"org.nixos.bootspec.v1": #BootspecV1
+	[=~"^"]:                 #BootspecExtension
+}
+
+// Specialisations are a hashmap of strings
+#BootspecSpecialisationV1: [string]: #RestrictedDocument
+
+// Bootspec extensions are defined by the extension author.
+#BootspecExtension: {...}
+
+// A "full" document allows official specialisation information
+// in the top-level with a reserved namespaced key.
+Document: #RestrictedDocument & {
+	"org.nixos.specialisation.v1"?: #BootspecSpecialisationV1
+}
diff --git a/nixpkgs/nixos/modules/system/activation/bootspec.nix b/nixpkgs/nixos/modules/system/activation/bootspec.nix
new file mode 100644
index 000000000000..98c234bc340d
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/bootspec.nix
@@ -0,0 +1,118 @@
+# Note that these schemas are defined by RFC-0125.
+# This document is considered a stable API, and is depended upon by external tooling.
+# Changes to the structure of the document, or the semantics of the values should go through an RFC.
+#
+# See: https://github.com/NixOS/rfcs/pull/125
+{ config
+, pkgs
+, lib
+, ...
+}:
+let
+  cfg = config.boot.bootspec;
+  children = lib.mapAttrs (childName: childConfig: childConfig.configuration.system.build.toplevel) config.specialisation;
+  schemas = {
+    v1 = rec {
+      filename = "boot.json";
+      json =
+        pkgs.writeText filename
+        (builtins.toJSON
+          # Merge extensions first to not let them shadow NixOS bootspec data.
+          (cfg.extensions //
+          {
+            "org.nixos.bootspec.v1" = {
+              system = config.boot.kernelPackages.stdenv.hostPlatform.system;
+              kernel = "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
+              kernelParams = config.boot.kernelParams;
+              label = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} (Linux ${config.boot.kernelPackages.kernel.modDirVersion})";
+            } // lib.optionalAttrs config.boot.initrd.enable {
+              initrd = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+              initrdSecrets = "${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets";
+            };
+          }));
+
+      generator =
+        let
+          # NOTE: Be careful to not introduce excess newlines at the end of the
+          # injectors, as that may affect the pipes and redirects.
+
+          # Inject toplevel and init into the bootspec.
+          # This can only be done here because we *cannot* depend on $out
+          # referring to the toplevel, except by living in the toplevel itself.
+          toplevelInjector = lib.escapeShellArgs [
+            "${pkgs.buildPackages.jq}/bin/jq"
+            ''
+              ."org.nixos.bootspec.v1".toplevel = $toplevel |
+              ."org.nixos.bootspec.v1".init = $init
+            ''
+            "--sort-keys"
+            "--arg" "toplevel" "${placeholder "out"}"
+            "--arg" "init" "${placeholder "out"}/init"
+          ] + " < ${json}";
+
+          # We slurp all specialisations and inject them as values, such that
+          # `.specialisations.${name}` embeds the specialisation's bootspec
+          # document.
+          specialisationInjector =
+            let
+              specialisationLoader = (lib.mapAttrsToList
+                (childName: childToplevel: lib.escapeShellArgs [ "--slurpfile" childName "${childToplevel}/${filename}" ])
+                children);
+            in
+            lib.escapeShellArgs [
+              "${pkgs.buildPackages.jq}/bin/jq"
+              "--sort-keys"
+              ''."org.nixos.specialisation.v1" = ($ARGS.named | map_values(. | first))''
+            ] + " ${lib.concatStringsSep " " specialisationLoader}";
+        in
+        "${toplevelInjector} | ${specialisationInjector} > $out/${filename}";
+
+      validator = pkgs.writeCueValidator ./bootspec.cue {
+        document = "Document"; # Universal validator for any version as long the schema is correctly set.
+      };
+    };
+  };
+in
+{
+  options.boot.bootspec = {
+    enable = lib.mkEnableOption (lib.mdDoc "the generation of RFC-0125 bootspec in $system/boot.json, e.g. /run/current-system/boot.json")
+      // { default = true; internal = true; };
+    enableValidation = lib.mkEnableOption (lib.mdDoc ''the validation of bootspec documents for each build.
+      This will introduce Go in the build-time closure as we are relying on [Cuelang](https://cuelang.org/) for schema validation.
+      Enable this option if you want to ascertain that your documents are correct
+      ''
+    );
+
+    extensions = lib.mkOption {
+      # NOTE(RaitoBezarius): this is not enough to validate: extensions."osRelease" = drv; those are picked up by cue validation.
+      type = lib.types.attrsOf lib.types.anything; # <namespace>: { ...namespace-specific fields }
+      default = { };
+      description = lib.mdDoc ''
+        User-defined data that extends the bootspec document.
+
+        To reduce incompatibility and prevent names from clashing
+        between applications, it is **highly recommended** to use a
+        unique namespace for your extensions.
+      '';
+    };
+
+    # This will be run as a part of the `systemBuilder` in ./top-level.nix. This
+    # means `$out` points to the output of `config.system.build.toplevel` and can
+    # be used for a variety of things (though, for now, it's only used to report
+    # the path of the `toplevel` itself and the `init` executable).
+    writer = lib.mkOption {
+      internal = true;
+      default = schemas.v1.generator;
+    };
+
+    validator = lib.mkOption {
+      internal = true;
+      default = schemas.v1.validator;
+    };
+
+    filename = lib.mkOption {
+      internal = true;
+      default = schemas.v1.filename;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/activation/no-clone.nix b/nixpkgs/nixos/modules/system/activation/no-clone.nix
new file mode 100644
index 000000000000..912420347dc0
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/no-clone.nix
@@ -0,0 +1,8 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  boot.loader.grub.device = mkOverride 0 "nodev";
+  specialisation = mkOverride 0 {};
+}
diff --git a/nixpkgs/nixos/modules/system/activation/specialisation.nix b/nixpkgs/nixos/modules/system/activation/specialisation.nix
new file mode 100644
index 000000000000..86603c847641
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/specialisation.nix
@@ -0,0 +1,85 @@
+{ config, lib, pkgs, extendModules, noUserModules, ... }:
+
+let
+  inherit (lib)
+    concatStringsSep
+    mapAttrs
+    mapAttrsToList
+    mkOption
+    types
+    ;
+
+  # This attribute is responsible for creating boot entries for
+  # child configuration. They are only (directly) accessible
+  # when the parent configuration is boot default. For example,
+  # you can provide an easy way to boot the same configuration
+  # as you use, but with another kernel
+  # !!! fix this
+  children =
+    mapAttrs
+      (childName: childConfig: childConfig.configuration.system.build.toplevel)
+      config.specialisation;
+
+in
+{
+  options = {
+
+    specialisation = mkOption {
+      default = { };
+      example = lib.literalExpression "{ fewJobsManyCores.configuration = { nix.settings = { core = 0; max-jobs = 1; }; }; }";
+      description = lib.mdDoc ''
+        Additional configurations to build. If
+        `inheritParentConfig` is true, the system
+        will be based on the overall system configuration.
+
+        To switch to a specialised configuration
+        (e.g. `fewJobsManyCores`) at runtime, run:
+
+        ```
+        sudo /run/current-system/specialisation/fewJobsManyCores/bin/switch-to-configuration test
+        ```
+      '';
+      type = types.attrsOf (types.submodule (
+        local@{ ... }:
+        let
+          extend =
+            if local.config.inheritParentConfig
+            then extendModules
+            else noUserModules.extendModules;
+        in
+        {
+          options.inheritParentConfig = mkOption {
+            type = types.bool;
+            default = true;
+            description = lib.mdDoc "Include the entire system's configuration. Set to false to make a completely differently configured system.";
+          };
+
+          options.configuration = mkOption {
+            default = { };
+            description = lib.mdDoc ''
+              Arbitrary NixOS configuration.
+
+              Anything you can add to a normal NixOS configuration, you can add
+              here, including imports and config values, although nested
+              specialisations will be ignored.
+            '';
+            visible = "shallow";
+            inherit (extend { modules = [ ./no-clone.nix ]; }) type;
+          };
+        }
+      ));
+    };
+
+  };
+
+  config = {
+    system.systemBuilderCommands = ''
+      mkdir $out/specialisation
+      ${concatStringsSep "\n"
+      (mapAttrsToList (name: path: "ln -s ${path} $out/specialisation/${name}") children)}
+    '';
+  };
+
+  # uses extendModules to generate a type
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/system/activation/switch-to-configuration.pl b/nixpkgs/nixos/modules/system/activation/switch-to-configuration.pl
new file mode 100755
index 000000000000..e2f66a287bc4
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/switch-to-configuration.pl
@@ -0,0 +1,992 @@
+#! @perl@/bin/perl
+
+# Issue #166838 uncovered a situation in which a configuration not suitable
+# for the target architecture caused a cryptic error message instead of
+# a clean failure. Due to this mismatch, the perl interpreter in the shebang
+# line wasn't able to be executed, causing this script to be misinterpreted
+# as a shell script.
+#
+# Let's detect this situation to give a more meaningful error
+# message. The following two lines are carefully written to be both valid Perl
+# and Bash.
+printf "Perl script erroneously interpreted as shell script,\ndoes target platform match nixpkgs.crossSystem platform?\n" && exit 1
+    if 0;
+
+use strict;
+use warnings;
+use Config::IniFiles;
+use File::Path qw(make_path);
+use File::Basename;
+use File::Slurp qw(read_file write_file edit_file);
+use JSON::PP;
+use IPC::Cmd;
+use Sys::Syslog qw(:standard :macros);
+use Cwd qw(abs_path);
+use Fcntl ':flock';
+
+## no critic(ControlStructures::ProhibitDeepNests)
+## no critic(ErrorHandling::RequireCarping)
+## no critic(CodeLayout::ProhibitParensWithBuiltins)
+## no critic(Variables::ProhibitPunctuationVars, Variables::RequireLocalizedPunctuationVars)
+## no critic(InputOutput::RequireCheckedSyscalls, InputOutput::RequireBracedFileHandleWithPrint, InputOutput::RequireBriefOpen)
+## no critic(ValuesAndExpressions::ProhibitNoisyQuotes, ValuesAndExpressions::ProhibitMagicNumbers, ValuesAndExpressions::ProhibitEmptyQuotes, ValuesAndExpressions::ProhibitInterpolationOfLiterals)
+## no critic(RegularExpressions::ProhibitEscapedMetacharacters)
+
+# Location of activation scripts
+my $out = "@out@";
+# System closure path to switch to
+my $toplevel = "@toplevel@";
+# Path to the directory containing systemd tools of the old system
+my $cur_systemd = abs_path("/run/current-system/sw/bin");
+# Path to the systemd store path of the new system
+my $new_systemd = "@systemd@";
+
+# To be robust against interruption, record what units need to be started etc.
+# We read these files again every time this script starts to make sure we continue
+# where the old (interrupted) script left off.
+my $start_list_file = "/run/nixos/start-list";
+my $restart_list_file = "/run/nixos/restart-list";
+my $reload_list_file = "/run/nixos/reload-list";
+
+# Parse restart/reload requests by the activation script.
+# Activation scripts may write newline-separated units to the restart
+# file and switch-to-configuration will handle them. While
+# `stopIfChanged = true` is ignored, switch-to-configuration will
+# handle `restartIfChanged = false` and `reloadIfChanged = true`.
+# This is the same as specifying a restart trigger in the NixOS module.
+#
+# The reload file asks the script to reload a unit. This is the same as
+# specifying a reload trigger in the NixOS module and can be ignored if
+# the unit is restarted in this activation.
+my $restart_by_activation_file = "/run/nixos/activation-restart-list";
+my $reload_by_activation_file = "/run/nixos/activation-reload-list";
+my $dry_restart_by_activation_file = "/run/nixos/dry-activation-restart-list";
+my $dry_reload_by_activation_file = "/run/nixos/dry-activation-reload-list";
+
+# The action that is to be performed (like switch, boot, test, dry-activate)
+# Also exposed via environment variable from now on
+my $action = shift(@ARGV);
+$ENV{NIXOS_ACTION} = $action;
+
+# Expose the locale archive as an environment variable for systemctl and the activation script
+if ("@localeArchive@" ne "") {
+    $ENV{LOCALE_ARCHIVE} = "@localeArchive@";
+}
+
+if (!defined($action) || ($action ne "switch" && $action ne "boot" && $action ne "test" && $action ne "dry-activate")) {
+    print STDERR <<"EOF";
+Usage: $0 [switch|boot|test|dry-activate]
+
+switch:       make the configuration the boot default and activate now
+boot:         make the configuration the boot default
+test:         activate the configuration, but don\'t make it the boot default
+dry-activate: show what would be done if this configuration were activated
+EOF
+    exit(1);
+}
+
+# This is a NixOS installation if it has /etc/NIXOS or a proper
+# /etc/os-release.
+if (!-f "/etc/NIXOS" && (read_file("/etc/os-release", err_mode => "quiet") // "") !~ /^ID="?@distroId@"?/msx) {
+    die("This is not a NixOS installation!\n");
+}
+
+make_path("/run/nixos", { mode => oct(755) });
+open(my $stc_lock, '>>', '/run/nixos/switch-to-configuration.lock') or die "Could not open lock - $!";
+flock($stc_lock, LOCK_EX) or die "Could not acquire lock - $!";
+openlog("nixos", "", LOG_USER);
+
+# Install or update the bootloader.
+if ($action eq "switch" || $action eq "boot") {
+    chomp(my $install_boot_loader = <<'EOFBOOTLOADER');
+@installBootLoader@
+EOFBOOTLOADER
+    system("$install_boot_loader $toplevel") == 0 or exit 1;
+}
+
+# Just in case the new configuration hangs the system, do a sync now.
+if (($ENV{"NIXOS_NO_SYNC"} // "") ne "1") {
+    system("@coreutils@/bin/sync", "-f", "/nix/store");
+}
+
+if ($action eq "boot") {
+    exit(0);
+}
+
+# Check if we can activate the new configuration.
+my $cur_init_interface_version = read_file("/run/current-system/init-interface-version", err_mode => "quiet") // "";
+my $new_init_interface_version = read_file("$toplevel/init-interface-version");
+
+if ($new_init_interface_version ne $cur_init_interface_version) {
+    print STDERR <<'EOF';
+Warning: the new NixOS configuration has an ‘init’ that is
+incompatible with the current configuration.  The new configuration
+won't take effect until you reboot the system.
+EOF
+    exit(100);
+}
+
+# Ignore SIGHUP so that we're not killed if we're running on (say)
+# virtual console 1 and we restart the "tty1" unit.
+$SIG{PIPE} = "IGNORE";
+
+# Replacement for Net::DBus that calls busctl of the current systemd, parses
+# it's json output and returns the response using only core modules to reduce
+# dependencies on perlPackages in baseSystem
+sub busctl_call_systemd1_mgr {
+    my (@args) = @_;
+    my $cmd = [
+        "$cur_systemd/busctl", "--json=short", "call", "org.freedesktop.systemd1",
+        "/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager",
+        @args
+    ];
+
+    my ($ok, $err, undef, $stdout) = IPC::Cmd::run(command => $cmd);
+    die $err unless $ok;
+
+    my $res = decode_json(join "", @$stdout);
+    return $res;
+}
+
+# Asks the currently running systemd instance via dbus which units are active.
+# Returns a hash where the key is the name of each unit and the value a hash
+# of load, state, substate.
+sub get_active_units {
+    my $units = busctl_call_systemd1_mgr("ListUnitsByPatterns", "asas", 0, 0)->{data}->[0];
+    my $res = {};
+    for my $item (@{$units}) {
+        my ($id, $description, $load_state, $active_state, $sub_state,
+            $following, $unit_path, $job_id, $job_type, $job_path) = @{$item};
+        if ($following ne "") {
+            next;
+        }
+        if ($job_id == 0 and $active_state eq "inactive") {
+            next;
+        }
+        $res->{$id} = { load => $load_state, state => $active_state, substate => $sub_state };
+    }
+    return $res;
+}
+
+# Asks the currently running systemd instance whether a unit is currently active.
+# Takes the name of the unit as an argument and returns a bool whether the unit is active or not.
+sub unit_is_active {
+    my ($unit_name) = @_;
+    my $units = busctl_call_systemd1_mgr("ListUnitsByNames", "as", 1, , "--", $unit_name)->{data}->[0];
+    if (scalar(@{$units}) == 0) {
+        return 0;
+    }
+    my $active_state = $units->[0]->[3];
+    return $active_state eq "active" || $active_state eq "activating";
+}
+
+# Parse a fstab file, given its path.
+# Returns a tuple of filesystems and swaps.
+#
+# Filesystems is a hash of mountpoint and { device, fsType, options }
+# Swaps is a hash of device and { options }
+sub parse_fstab {
+    my ($filename) = @_;
+    my ($fss, $swaps);
+    foreach my $line (read_file($filename, err_mode => "quiet")) {
+        chomp($line);
+        $line =~ s/^\s*\#.*//msx;
+        if ($line =~ /^\s*$/msx) {
+            next;
+        }
+        my @xs = split(/\s+/msx, $line);
+        if ($xs[2] eq "swap") {
+            $swaps->{$xs[0]} = { options => $xs[3] // "" };
+        } else {
+            $fss->{$xs[1]} = { device => $xs[0], fsType => $xs[2], options => $xs[3] // "" };
+        }
+    }
+    return ($fss, $swaps);
+}
+
+# This subroutine takes a single ini file that specified systemd configuration
+# like unit configuration and parses it into a hash where the keys are the sections
+# of the unit file and the values are hashes themselves. These hashes have the unit file
+# keys as their keys (left side of =) and an array of all values that were set as their
+# values. If a value is empty (for example `ExecStart=`), then all current definitions are
+# removed.
+#
+# Instead of returning the hash, this subroutine takes a hashref to return the data in. This
+# allows calling the subroutine multiple times with the same hash to parse override files.
+sub parse_systemd_ini {
+    my ($unit_contents, $path) = @_;
+    # Tie the ini file to a hash for easier access
+    tie(my %file_contents, "Config::IniFiles", (-file => $path, -allowempty => 1, -allowcontinue => 1)); ## no critic(Miscellanea::ProhibitTies)
+
+    # Copy over all sections
+    foreach my $section_name (keys(%file_contents)) {
+        if ($section_name eq "Install") {
+            # Skip the [Install] section because it has no relevant keys for us
+            next;
+        }
+        # Copy over all keys
+        foreach my $ini_key (keys(%{$file_contents{$section_name}})) {
+            # Ensure the value is an array so it's easier to work with
+            my $ini_value = $file_contents{$section_name}{$ini_key};
+            my @ini_values;
+            if (ref($ini_value) eq "ARRAY") {
+                @ini_values = @{$ini_value};
+            } else {
+                @ini_values = $ini_value;
+            }
+            # Go over all values
+            for my $ini_value (@ini_values) {
+                # If a value is empty, it's an override that tells us to clean the value
+                if ($ini_value eq "") {
+                    delete $unit_contents->{$section_name}->{$ini_key};
+                    next;
+                }
+                push(@{$unit_contents->{$section_name}->{$ini_key}}, $ini_value);
+            }
+        }
+    }
+    return;
+}
+
+# This subroutine takes the path to a systemd configuration file (like a unit configuration),
+# parses it, and returns a hash that contains the contents. The contents of this hash are
+# explained in the `parse_systemd_ini` subroutine. Neither the sections nor the keys inside
+# the sections are consistently sorted.
+#
+# If a directory with the same basename ending in .d exists next to the unit file, it will be
+# assumed to contain override files which will be parsed as well and handled properly.
+sub parse_unit {
+    my ($unit_path, $base_unit_path) = @_;
+
+    # Parse the main unit and all overrides
+    my %unit_data;
+    # Replace \ with \\ so glob() still works with units that have a \ in them
+    # Valid characters in unit names are ASCII letters, digits, ":", "-", "_", ".", and "\"
+    $base_unit_path =~ s/\\/\\\\/gmsx;
+    $unit_path =~ s/\\/\\\\/gmsx;
+
+    foreach (glob("${base_unit_path}{,.d/*.conf}")) {
+        parse_systemd_ini(\%unit_data, "$_")
+    }
+    # Handle drop-in template-unit instance overrides
+    if ($unit_path ne $base_unit_path) {
+        foreach (glob("${unit_path}.d/*.conf")) {
+            parse_systemd_ini(\%unit_data, "$_")
+        }
+    }
+    return %unit_data;
+}
+
+# Checks whether a specified boolean in a systemd unit is true
+# or false, with a default that is applied when the value is not set.
+sub parse_systemd_bool {
+    my ($unit_config, $section_name, $bool_name, $default) = @_;
+
+    my @values = @{$unit_config->{$section_name}{$bool_name} // []};
+    # Return default if value is not set
+    if ((scalar(@values) < 1) || (not defined($values[-1]))) {
+        return $default;
+    }
+    # If value is defined multiple times, use the last definition
+    my $last_value = $values[-1];
+    # These are valid values as of systemd.syntax(7)
+    return $last_value eq "1" || $last_value eq "yes" || $last_value eq "true" || $last_value eq "on";
+}
+
+# Writes a unit name into a given file to be more resilient against
+# crashes of the script. Does nothing when the action is dry-activate.
+sub record_unit {
+    my ($fn, $unit) = @_;
+    if ($action ne "dry-activate") {
+        write_file($fn, { append => 1 }, "$unit\n");
+    }
+    return;
+}
+
+# The opposite of record_unit, removes a unit name from a file
+sub unrecord_unit {
+    my ($fn, $unit) = @_;
+    if ($action ne "dry-activate") {
+        edit_file(sub { s/^$unit\n//msx }, $fn);
+    }
+    return;
+}
+
+# Compare the contents of two unit files and return whether the unit
+# needs to be restarted or reloaded. If the units differ, the service
+# is restarted unless the only difference is `X-Reload-Triggers` in the
+# `Unit` section. If this is the only modification, the unit is reloaded
+# instead of restarted. If the only difference is `Options` in the
+# `[Mount]` section, the unit is reloaded rather than restarted.
+# Returns:
+# - 0 if the units are equal
+# - 1 if the units are different and a restart action is required
+# - 2 if the units are different and a reload action is required
+sub compare_units { ## no critic(Subroutines::ProhibitExcessComplexity)
+    my ($cur_unit, $new_unit) = @_;
+    my $ret = 0;
+    # Keys to ignore in the [Unit] section
+    my %unit_section_ignores = map { $_ => 1 } qw(
+        X-Reload-Triggers
+        Description Documentation
+        OnFailure OnSuccess OnFailureJobMode
+        IgnoreOnIsolate StopWhenUnneeded
+        RefuseManualStart RefuseManualStop
+        AllowIsolate CollectMode
+        SourcePath
+    );
+
+    my $comp_array = sub {
+      my ($a, $b) = @_;
+      return join("\0", @{$a}) eq join("\0", @{$b});
+    };
+
+    # Comparison hash for the sections
+    my %section_cmp = map { $_ => 1 } keys(%{$new_unit});
+    # Iterate over the sections
+    foreach my $section_name (keys(%{$cur_unit})) {
+        # Missing section in the new unit?
+        if (not exists($section_cmp{$section_name})) {
+            # If the [Unit] section was removed, make sure that only keys
+            # were in it that are ignored
+            if ($section_name eq "Unit") {
+                foreach my $ini_key (keys(%{$cur_unit->{"Unit"}})) {
+                    if (not defined($unit_section_ignores{$ini_key})) {
+                        return 1;
+                    }
+                }
+                next; # check the next section
+            } else {
+                return 1;
+            }
+            if ($section_name eq "Unit" and %{$cur_unit->{"Unit"}} == 1 and defined(%{$cur_unit->{"Unit"}}{"X-Reload-Triggers"})) {
+                # If a new [Unit] section was removed that only contained X-Reload-Triggers,
+                # do nothing.
+                next;
+            } else {
+                return 1;
+            }
+        }
+        delete $section_cmp{$section_name};
+        # Comparison hash for the section contents
+        my %ini_cmp = map { $_ => 1 } keys(%{$new_unit->{$section_name}});
+        # Iterate over the keys of the section
+        foreach my $ini_key (keys(%{$cur_unit->{$section_name}})) {
+            delete $ini_cmp{$ini_key};
+            my @cur_value = @{$cur_unit->{$section_name}{$ini_key}};
+            # If the key is missing in the new unit, they are different...
+            if (not $new_unit->{$section_name}{$ini_key}) {
+                # ... unless the key that is now missing is one of the ignored keys
+                if ($section_name eq "Unit" and defined($unit_section_ignores{$ini_key})) {
+                    next;
+                }
+                return 1;
+            }
+            my @new_value = @{$new_unit->{$section_name}{$ini_key}};
+            # If the contents are different, the units are different
+            if (not $comp_array->(\@cur_value, \@new_value)) {
+                # Check if only the reload triggers changed or one of the ignored keys
+                if ($section_name eq "Unit") {
+                    if ($ini_key eq "X-Reload-Triggers") {
+                        $ret = 2;
+                        next;
+                    } elsif (defined($unit_section_ignores{$ini_key})) {
+                        next;
+                    }
+                }
+                # If this is a mount unit, check if it was only `Options`
+                if ($section_name eq "Mount" and $ini_key eq "Options") {
+                    $ret = 2;
+                    next;
+                }
+                return 1;
+            }
+        }
+        # A key was introduced that was missing in the previous unit
+        if (%ini_cmp) {
+            if ($section_name eq "Unit") {
+                foreach my $ini_key (keys(%ini_cmp)) {
+                    if ($ini_key eq "X-Reload-Triggers") {
+                        $ret = 2;
+                    } elsif (defined($unit_section_ignores{$ini_key})) {
+                        next;
+                    } else {
+                        return 1;
+                    }
+                }
+            } else {
+                return 1;
+            }
+        };
+    }
+    # A section was introduced that was missing in the previous unit
+    if (%section_cmp) {
+        if (%section_cmp == 1 and defined($section_cmp{"Unit"})) {
+            foreach my $ini_key (keys(%{$new_unit->{"Unit"}})) {
+                if (not defined($unit_section_ignores{$ini_key})) {
+                    return 1;
+                } elsif ($ini_key eq "X-Reload-Triggers") {
+                    $ret = 2;
+                }
+            }
+        } else {
+            return 1;
+        }
+    }
+
+    return $ret;
+}
+
+# Called when a unit exists in both the old systemd and the new system and the units
+# differ. This figures out of what units are to be stopped, restarted, reloaded, started, and skipped.
+sub handle_modified_unit { ## no critic(Subroutines::ProhibitManyArgs, Subroutines::ProhibitExcessComplexity)
+    my ($unit, $base_name, $new_unit_file, $new_base_unit_file, $new_unit_info, $active_cur, $units_to_stop, $units_to_start, $units_to_reload, $units_to_restart, $units_to_skip) = @_;
+
+    if ($unit eq "sysinit.target" || $unit eq "basic.target" || $unit eq "multi-user.target" || $unit eq "graphical.target" || $unit =~ /\.path$/msx || $unit =~ /\.slice$/msx) {
+        # Do nothing.  These cannot be restarted directly.
+
+        # Slices and Paths don't have to be restarted since
+        # properties (resource limits and inotify watches)
+        # seem to get applied on daemon-reload.
+    } elsif ($unit =~ /\.mount$/msx) {
+        # Just restart the unit. We wouldn't have gotten into this subroutine
+        # if only `Options` was changed, in which case the unit would be reloaded.
+        # The only exception is / and /nix because it's very unlikely we can safely
+        # unmount them so we reload them instead. This means that we may not get
+        # all changes into the running system but it's better than crashing it.
+        if ($unit eq "-.mount" or $unit eq "nix.mount") {
+            $units_to_reload->{$unit} = 1;
+            record_unit($reload_list_file, $unit);
+        } else {
+            $units_to_restart->{$unit} = 1;
+            record_unit($restart_list_file, $unit);
+        }
+    } elsif ($unit =~ /\.socket$/msx) {
+        # FIXME: do something?
+        # Attempt to fix this: https://github.com/NixOS/nixpkgs/pull/141192
+        # Revert of the attempt: https://github.com/NixOS/nixpkgs/pull/147609
+        # More details: https://github.com/NixOS/nixpkgs/issues/74899#issuecomment-981142430
+    } else {
+        my %new_unit_info = $new_unit_info ? %{$new_unit_info} : parse_unit($new_unit_file, $new_base_unit_file);
+        if (parse_systemd_bool(\%new_unit_info, "Service", "X-ReloadIfChanged", 0) and not $units_to_restart->{$unit} and not $units_to_stop->{$unit}) {
+            $units_to_reload->{$unit} = 1;
+            record_unit($reload_list_file, $unit);
+        }
+        elsif (!parse_systemd_bool(\%new_unit_info, "Service", "X-RestartIfChanged", 1) || parse_systemd_bool(\%new_unit_info, "Unit", "RefuseManualStop", 0) || parse_systemd_bool(\%new_unit_info, "Unit", "X-OnlyManualStart", 0)) {
+            $units_to_skip->{$unit} = 1;
+        } else {
+            # It doesn't make sense to stop and start non-services because
+            # they can't have ExecStop=
+            if (!parse_systemd_bool(\%new_unit_info, "Service", "X-StopIfChanged", 1) || $unit !~ /\.service$/msx) {
+                # This unit should be restarted instead of
+                # stopped and started.
+                $units_to_restart->{$unit} = 1;
+                record_unit($restart_list_file, $unit);
+                # Remove from units to reload so we don't restart and reload
+                if ($units_to_reload->{$unit}) {
+                    delete $units_to_reload->{$unit};
+                    unrecord_unit($reload_list_file, $unit);
+                }
+            } else {
+                # If this unit is socket-activated, then stop the
+                # socket unit(s) as well, and restart the
+                # socket(s) instead of the service.
+                my $socket_activated = 0;
+                if ($unit =~ /\.service$/msx) {
+                    my @sockets = split(/\s+/msx, join(" ", @{$new_unit_info{Service}{Sockets} // []}));
+                    if (scalar(@sockets) == 0) {
+                        @sockets = ("$base_name.socket");
+                    }
+                    foreach my $socket (@sockets) {
+                        if (defined($active_cur->{$socket})) {
+                            # We can now be sure this is a socket-activate unit
+
+                            $units_to_stop->{$socket} = 1;
+                            # Only restart sockets that actually
+                            # exist in new configuration:
+                            if (-e "$toplevel/etc/systemd/system/$socket") {
+                                $units_to_start->{$socket} = 1;
+                                if ($units_to_start eq $units_to_restart) {
+                                    record_unit($restart_list_file, $socket);
+                                } else {
+                                    record_unit($start_list_file, $socket);
+                                }
+                                $socket_activated = 1;
+                            }
+                            # Remove from units to reload so we don't restart and reload
+                            if ($units_to_reload->{$unit}) {
+                                delete $units_to_reload->{$unit};
+                                unrecord_unit($reload_list_file, $unit);
+                            }
+                        }
+                    }
+                }
+
+                # If the unit is not socket-activated, record
+                # that this unit needs to be started below.
+                # We write this to a file to ensure that the
+                # service gets restarted if we're interrupted.
+                if (!$socket_activated) {
+                    $units_to_start->{$unit} = 1;
+                    if ($units_to_start eq $units_to_restart) {
+                        record_unit($restart_list_file, $unit);
+                    } else {
+                        record_unit($start_list_file, $unit);
+                    }
+                }
+
+                $units_to_stop->{$unit} = 1;
+                # Remove from units to reload so we don't restart and reload
+                if ($units_to_reload->{$unit}) {
+                    delete $units_to_reload->{$unit};
+                    unrecord_unit($reload_list_file, $unit);
+                }
+            }
+        }
+    }
+    return;
+}
+
+# Figure out what units need to be stopped, started, restarted or reloaded.
+my (%units_to_stop, %units_to_skip, %units_to_start, %units_to_restart, %units_to_reload);
+
+my %units_to_filter; # units not shown
+
+%units_to_start = map { $_ => 1 }
+    split(/\n/msx, read_file($start_list_file, err_mode => "quiet") // "");
+
+%units_to_restart = map { $_ => 1 }
+    split(/\n/msx, read_file($restart_list_file, err_mode => "quiet") // "");
+
+%units_to_reload = map { $_ => 1 }
+    split(/\n/msx, read_file($reload_list_file, err_mode => "quiet") // "");
+
+my $active_cur = get_active_units();
+while (my ($unit, $state) = each(%{$active_cur})) {
+    my $cur_unit_file = "/etc/systemd/system/$unit";
+    my $new_unit_file = "$toplevel/etc/systemd/system/$unit";
+
+    my $base_unit = $unit;
+    my $cur_base_unit_file = $cur_unit_file;
+    my $new_base_unit_file = $new_unit_file;
+
+    # Detect template instances.
+    if (!-e $cur_unit_file && !-e $new_unit_file && $unit =~ /^(.*)@[^\.]*\.(.*)$/msx) {
+      $base_unit = "$1\@.$2";
+      $cur_base_unit_file = "/etc/systemd/system/$base_unit";
+      $new_base_unit_file = "$toplevel/etc/systemd/system/$base_unit";
+    }
+
+    my $base_name = $base_unit;
+    $base_name =~ s/\.[[:lower:]]*$//msx;
+
+    if (-e $cur_base_unit_file && ($state->{state} eq "active" || $state->{state} eq "activating")) {
+        if (! -e $new_base_unit_file || abs_path($new_base_unit_file) eq "/dev/null") {
+            my %cur_unit_info = parse_unit($cur_unit_file, $cur_base_unit_file);
+            if (parse_systemd_bool(\%cur_unit_info, "Unit", "X-StopOnRemoval", 1)) {
+                $units_to_stop{$unit} = 1;
+            }
+        }
+
+        elsif ($unit =~ /\.target$/msx) {
+            my %new_unit_info = parse_unit($new_unit_file, $new_base_unit_file);
+
+            # Cause all active target units to be restarted below.
+            # This should start most changed units we stop here as
+            # well as any new dependencies (including new mounts and
+            # swap devices).  FIXME: the suspend target is sometimes
+            # active after the system has resumed, which probably
+            # should not be the case.  Just ignore it.
+            if ($unit ne "suspend.target" && $unit ne "hibernate.target" && $unit ne "hybrid-sleep.target") {
+                if (!(parse_systemd_bool(\%new_unit_info, "Unit", "RefuseManualStart", 0) || parse_systemd_bool(\%new_unit_info, "Unit", "X-OnlyManualStart", 0))) {
+                    $units_to_start{$unit} = 1;
+                    record_unit($start_list_file, $unit);
+                    # Don't spam the user with target units that always get started.
+                    if (($ENV{"STC_DISPLAY_ALL_UNITS"} // "") ne "1") {
+                        $units_to_filter{$unit} = 1;
+                    }
+                }
+            }
+
+            # Stop targets that have X-StopOnReconfiguration set.
+            # This is necessary to respect dependency orderings
+            # involving targets: if unit X starts after target Y and
+            # target Y starts after unit Z, then if X and Z have both
+            # changed, then X should be restarted after Z.  However,
+            # if target Y is in the "active" state, X and Z will be
+            # restarted at the same time because X's dependency on Y
+            # is already satisfied.  Thus, we need to stop Y first.
+            # Stopping a target generally has no effect on other units
+            # (unless there is a PartOf dependency), so this is just a
+            # bookkeeping thing to get systemd to do the right thing.
+            if (parse_systemd_bool(\%new_unit_info, "Unit", "X-StopOnReconfiguration", 0)) {
+                $units_to_stop{$unit} = 1;
+            }
+        }
+
+        else {
+            my %cur_unit_info = parse_unit($cur_unit_file, $cur_base_unit_file);
+            my %new_unit_info = parse_unit($new_unit_file, $new_base_unit_file);
+            my $diff = compare_units(\%cur_unit_info, \%new_unit_info);
+            if ($diff == 1) {
+                handle_modified_unit($unit, $base_name, $new_unit_file, $new_base_unit_file, \%new_unit_info, $active_cur, \%units_to_stop, \%units_to_start, \%units_to_reload, \%units_to_restart, \%units_to_skip);
+            } elsif ($diff == 2 and not $units_to_restart{$unit}) {
+                $units_to_reload{$unit} = 1;
+                record_unit($reload_list_file, $unit);
+            }
+        }
+    }
+}
+
+# Converts a path to the name of a systemd mount unit that would be responsible
+# for mounting this path.
+sub path_to_unit_name {
+    my ($path) = @_;
+    # Use current version of systemctl binary before daemon is reexeced.
+    open(my $cmd, "-|", "$cur_systemd/systemd-escape", "--suffix=mount", "-p", $path)
+        or die "Unable to escape $path!\n";
+    my $escaped = do { local $/ = undef; <$cmd> };
+    chomp($escaped);
+    close($cmd) or die("Unable to close systemd-escape pipe");
+    return $escaped;
+}
+
+# Compare the previous and new fstab to figure out which filesystems
+# need a remount or need to be unmounted.  New filesystems are mounted
+# automatically by starting local-fs.target.  FIXME: might be nicer if
+# we generated units for all mounts; then we could unify this with the
+# unit checking code above.
+my ($cur_fss, $cur_swaps) = parse_fstab("/etc/fstab");
+my ($new_fss, $new_swaps) = parse_fstab("$toplevel/etc/fstab");
+foreach my $mount_point (keys(%{$cur_fss})) {
+    my $cur = $cur_fss->{$mount_point};
+    my $new = $new_fss->{$mount_point};
+    my $unit = path_to_unit_name($mount_point);
+    if (!defined($new)) {
+        # Filesystem entry disappeared, so unmount it.
+        $units_to_stop{$unit} = 1;
+    } elsif ($cur->{fsType} ne $new->{fsType} || $cur->{device} ne $new->{device}) {
+        if ($mount_point eq '/' or $mount_point eq '/nix') {
+            if ($cur->{options} ne $new->{options}) {
+                # Mount options changed, so remount it.
+                $units_to_reload{$unit} = 1;
+                record_unit($reload_list_file, $unit);
+            } else {
+                # Don't unmount / or /nix if the device changed
+                $units_to_skip{$unit} = 1;
+            }
+        } else {
+            # Filesystem type or device changed, so unmount and mount it.
+            $units_to_restart{$unit} = 1;
+            record_unit($restart_list_file, $unit);
+        }
+    } elsif ($cur->{options} ne $new->{options}) {
+        # Mount options changes, so remount it.
+        $units_to_reload{$unit} = 1;
+        record_unit($reload_list_file, $unit);
+    }
+}
+
+# Also handles swap devices.
+foreach my $device (keys(%{$cur_swaps})) {
+    my $cur = $cur_swaps->{$device};
+    my $new = $new_swaps->{$device};
+    if (!defined($new)) {
+        # Swap entry disappeared, so turn it off.  Can't use
+        # "systemctl stop" here because systemd has lots of alias
+        # units that prevent a stop from actually calling
+        # "swapoff".
+        if ($action eq "dry-activate") {
+            print STDERR "would stop swap device: $device\n";
+        } else {
+            print STDERR "stopping swap device: $device\n";
+            system("@utillinux@/sbin/swapoff", $device);
+        }
+    }
+    # FIXME: update swap options (i.e. its priority).
+}
+
+
+# Should we have systemd re-exec itself?
+my $cur_pid1_path = abs_path("/proc/1/exe") // "/unknown";
+my $cur_systemd_system_config = abs_path("/etc/systemd/system.conf") // "/unknown";
+my $new_pid1_path = abs_path("$new_systemd/lib/systemd/systemd") or die;
+my $new_systemd_system_config = abs_path("$toplevel/etc/systemd/system.conf") // "/unknown";
+
+my $restart_systemd = $cur_pid1_path ne $new_pid1_path;
+if ($cur_systemd_system_config ne $new_systemd_system_config) {
+    $restart_systemd = 1;
+}
+
+# Takes an array of unit names and returns an array with the same elements,
+# except all units that are also in the global variable `unitsToFilter`.
+sub filter_units {
+    my ($units) = @_;
+    my @res;
+    foreach my $unit (sort(keys(%{$units}))) {
+        if (!defined($units_to_filter{$unit})) {
+            push(@res, $unit);
+        }
+    }
+    return @res;
+}
+
+my @units_to_stop_filtered = filter_units(\%units_to_stop);
+
+
+# Show dry-run actions.
+if ($action eq "dry-activate") {
+    if (scalar(@units_to_stop_filtered) > 0) {
+        print STDERR "would stop the following units: ", join(", ", @units_to_stop_filtered), "\n";
+    }
+    if (scalar(keys(%units_to_skip)) > 0) {
+        print STDERR "would NOT stop the following changed units: ", join(", ", sort(keys(%units_to_skip))), "\n";
+    }
+
+    print STDERR "would activate the configuration...\n";
+    system("$out/dry-activate", "$out");
+
+    # Handle the activation script requesting the restart or reload of a unit.
+    foreach (split(/\n/msx, read_file($dry_restart_by_activation_file, err_mode => "quiet") // "")) {
+        my $unit = $_;
+        my $new_unit_file = "$toplevel/etc/systemd/system/$unit";
+        my $base_unit = $unit;
+        my $new_base_unit_file = $new_unit_file;
+
+        # Detect template instances.
+        if (!-e $new_unit_file && $unit =~ /^(.*)@[^\.]*\.(.*)$/msx) {
+          $base_unit = "$1\@.$2";
+          $new_base_unit_file = "$toplevel/etc/systemd/system/$base_unit";
+        }
+
+        my $base_name = $base_unit;
+        $base_name =~ s/\.[[:lower:]]*$//msx;
+
+        # Start units if they were not active previously
+        if (not defined($active_cur->{$unit})) {
+            $units_to_start{$unit} = 1;
+            next;
+        }
+
+        handle_modified_unit($unit, $base_name, $new_unit_file, $new_base_unit_file, undef, $active_cur, \%units_to_restart, \%units_to_restart, \%units_to_reload, \%units_to_restart, \%units_to_skip);
+    }
+    unlink($dry_restart_by_activation_file);
+
+    foreach (split(/\n/msx, read_file($dry_reload_by_activation_file, err_mode => "quiet") // "")) {
+        my $unit = $_;
+
+        if (defined($active_cur->{$unit}) and not $units_to_restart{$unit} and not $units_to_stop{$unit}) {
+            $units_to_reload{$unit} = 1;
+            record_unit($reload_list_file, $unit);
+        }
+    }
+    unlink($dry_reload_by_activation_file);
+
+    if ($restart_systemd) {
+        print STDERR "would restart systemd\n";
+    }
+    if (scalar(keys(%units_to_reload)) > 0) {
+        print STDERR "would reload the following units: ", join(", ", sort(keys(%units_to_reload))), "\n";
+    }
+    if (scalar(keys(%units_to_restart)) > 0) {
+        print STDERR "would restart the following units: ", join(", ", sort(keys(%units_to_restart))), "\n";
+    }
+    my @units_to_start_filtered = filter_units(\%units_to_start);
+    if (scalar(@units_to_start_filtered)) {
+        print STDERR "would start the following units: ", join(", ", @units_to_start_filtered), "\n";
+    }
+    exit 0;
+}
+
+
+syslog(LOG_NOTICE, "switching to system configuration $toplevel");
+
+if (scalar(keys(%units_to_stop)) > 0) {
+    if (scalar(@units_to_stop_filtered)) {
+        print STDERR "stopping the following units: ", join(", ", @units_to_stop_filtered), "\n";
+    }
+    # Use current version of systemctl binary before daemon is reexeced.
+    system("$cur_systemd/systemctl", "stop", "--", sort(keys(%units_to_stop)));
+}
+
+if (scalar(keys(%units_to_skip)) > 0) {
+    print STDERR "NOT restarting the following changed units: ", join(", ", sort(keys(%units_to_skip))), "\n";
+}
+
+# Activate the new configuration (i.e., update /etc, make accounts,
+# and so on).
+my $res = 0;
+print STDERR "activating the configuration...\n";
+system("$out/activate", "$out") == 0 or $res = 2;
+
+# Handle the activation script requesting the restart or reload of a unit.
+foreach (split(/\n/msx, read_file($restart_by_activation_file, err_mode => "quiet") // "")) {
+    my $unit = $_;
+    my $new_unit_file = "$toplevel/etc/systemd/system/$unit";
+    my $base_unit = $unit;
+    my $new_base_unit_file = $new_unit_file;
+
+    # Detect template instances.
+    if (!-e $new_unit_file && $unit =~ /^(.*)@[^\.]*\.(.*)$/msx) {
+      $base_unit = "$1\@.$2";
+      $new_base_unit_file = "$toplevel/etc/systemd/system/$base_unit";
+    }
+
+    my $base_name = $base_unit;
+    $base_name =~ s/\.[[:lower:]]*$//msx;
+
+    # Start units if they were not active previously
+    if (not defined($active_cur->{$unit})) {
+        $units_to_start{$unit} = 1;
+        record_unit($start_list_file, $unit);
+        next;
+    }
+
+    handle_modified_unit($unit, $base_name, $new_unit_file, $new_base_unit_file, undef, $active_cur, \%units_to_restart, \%units_to_restart, \%units_to_reload, \%units_to_restart, \%units_to_skip);
+}
+# We can remove the file now because it has been propagated to the other restart/reload files
+unlink($restart_by_activation_file);
+
+foreach (split(/\n/msx, read_file($reload_by_activation_file, err_mode => "quiet") // "")) {
+    my $unit = $_;
+
+    if (defined($active_cur->{$unit}) and not $units_to_restart{$unit} and not $units_to_stop{$unit}) {
+        $units_to_reload{$unit} = 1;
+        record_unit($reload_list_file, $unit);
+    }
+}
+# We can remove the file now because it has been propagated to the other reload file
+unlink($reload_by_activation_file);
+
+# Restart systemd if necessary. Note that this is done using the
+# current version of systemd, just in case the new one has trouble
+# communicating with the running pid 1.
+if ($restart_systemd) {
+    print STDERR "restarting systemd...\n";
+    system("$cur_systemd/systemctl", "daemon-reexec") == 0 or $res = 2;
+}
+
+# Forget about previously failed services.
+system("$new_systemd/bin/systemctl", "reset-failed");
+
+# Make systemd reload its units.
+system("$new_systemd/bin/systemctl", "daemon-reload") == 0 or $res = 3;
+
+# Reload user units
+open(my $list_active_users, "-|", "$new_systemd/bin/loginctl", "list-users", "--no-legend") || die("Unable to call loginctl");
+while (my $f = <$list_active_users>) {
+    if ($f !~ /^\s*(?<uid>\d+)\s+(?<user>\S+)/msx) {
+        next;
+    }
+    my ($uid, $name) = ($+{uid}, $+{user});
+    print STDERR "reloading user units for $name...\n";
+
+    system("@su@", "-s", "@shell@", "-l", $name, "-c",
+           "export XDG_RUNTIME_DIR=/run/user/$uid; " .
+           "$cur_systemd/systemctl --user daemon-reexec; " .
+           "$new_systemd/bin/systemctl --user start nixos-activation.service");
+}
+
+close($list_active_users) || die("Unable to close the file handle to loginctl");
+
+# Set the new tmpfiles
+print STDERR "setting up tmpfiles\n";
+system("$new_systemd/bin/systemd-tmpfiles", "--create", "--remove", "--exclude-prefix=/dev") == 0 or $res = 3;
+
+# Before reloading we need to ensure that the units are still active. They may have been
+# deactivated because one of their requirements got stopped. If they are inactive
+# but should have been reloaded, the user probably expects them to be started.
+if (scalar(keys(%units_to_reload)) > 0) {
+    for my $unit (keys(%units_to_reload)) {
+        if (!unit_is_active($unit)) {
+            # Figure out if we need to start the unit
+            my %unit_info = parse_unit("$toplevel/etc/systemd/system/$unit", "$toplevel/etc/systemd/system/$unit");
+            if (!(parse_systemd_bool(\%unit_info, "Unit", "RefuseManualStart", 0) || parse_systemd_bool(\%unit_info, "Unit", "X-OnlyManualStart", 0))) {
+                $units_to_start{$unit} = 1;
+                record_unit($start_list_file, $unit);
+            }
+            # Don't reload the unit, reloading would fail
+            delete %units_to_reload{$unit};
+            unrecord_unit($reload_list_file, $unit);
+        }
+    }
+}
+# Reload units that need it. This includes remounting changed mount
+# units.
+if (scalar(keys(%units_to_reload)) > 0) {
+    print STDERR "reloading the following units: ", join(", ", sort(keys(%units_to_reload))), "\n";
+    system("$new_systemd/bin/systemctl", "reload", "--", sort(keys(%units_to_reload))) == 0 or $res = 4;
+    unlink($reload_list_file);
+}
+
+# Restart changed services (those that have to be restarted rather
+# than stopped and started).
+if (scalar(keys(%units_to_restart)) > 0) {
+    print STDERR "restarting the following units: ", join(", ", sort(keys(%units_to_restart))), "\n";
+    system("$new_systemd/bin/systemctl", "restart", "--", sort(keys(%units_to_restart))) == 0 or $res = 4;
+    unlink($restart_list_file);
+}
+
+# Start all active targets, as well as changed units we stopped above.
+# The latter is necessary because some may not be dependencies of the
+# targets (i.e., they were manually started).  FIXME: detect units
+# that are symlinks to other units.  We shouldn't start both at the
+# same time because we'll get a "Failed to add path to set" error from
+# systemd.
+my @units_to_start_filtered = filter_units(\%units_to_start);
+if (scalar(@units_to_start_filtered)) {
+    print STDERR "starting the following units: ", join(", ", @units_to_start_filtered), "\n"
+}
+system("$new_systemd/bin/systemctl", "start", "--", sort(keys(%units_to_start))) == 0 or $res = 4;
+unlink($start_list_file);
+
+
+# Print failed and new units.
+my (@failed, @new);
+my $active_new = get_active_units();
+while (my ($unit, $state) = each(%{$active_new})) {
+    if ($state->{state} eq "failed") {
+        push(@failed, $unit);
+        next;
+    }
+
+    if ($state->{substate} eq "auto-restart") {
+        # A unit in auto-restart substate is a failure *if* it previously failed to start
+        open(my $main_status_fd, "-|", "$new_systemd/bin/systemctl", "show", "--value", "--property=ExecMainStatus", $unit) || die("Unable to call 'systemctl show'");
+        my $main_status = do { local $/ = undef; <$main_status_fd> };
+        close($main_status_fd) || die("Unable to close 'systemctl show' fd");
+        chomp($main_status);
+
+        if ($main_status ne "0") {
+            push(@failed, $unit);
+            next;
+        }
+    }
+
+    # Ignore scopes since they are not managed by this script but rather
+    # created and managed by third-party services via the systemd dbus API.
+    # This only lists units that are not failed (including ones that are in auto-restart but have not failed previously)
+    if ($state->{state} ne "failed" && !defined($active_cur->{$unit}) && $unit !~ /\.scope$/msx) {
+        push(@new, $unit);
+    }
+}
+
+if (scalar(@new) > 0) {
+    print STDERR "the following new units were started: ", join(", ", sort(@new)), "\n"
+}
+
+if (scalar(@failed) > 0) {
+    my @failed_sorted = sort(@failed);
+    print STDERR "warning: the following units failed: ", join(", ", @failed_sorted), "\n\n";
+    system("$new_systemd/bin/systemctl status --no-pager --full '" . join("' '", @failed_sorted) . "' >&2");
+    $res = 4;
+}
+
+if ($res == 0) {
+    syslog(LOG_NOTICE, "finished switching to system configuration $toplevel");
+} else {
+    syslog(LOG_ERR, "switching to system configuration $toplevel failed (status $res)");
+}
+
+close($stc_lock) or die "Could not close lock - $!";
+exit($res);
diff --git a/nixpkgs/nixos/modules/system/activation/switchable-system.nix b/nixpkgs/nixos/modules/system/activation/switchable-system.nix
new file mode 100644
index 000000000000..00bc18e48d1f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/switchable-system.nix
@@ -0,0 +1,55 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  perlWrapped = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
+
+in
+
+{
+
+  options = {
+    system.switch.enable = lib.mkOption {
+      type = lib.types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to include the capability to switch configurations.
+
+        Disabling this makes the system unable to be reconfigured via `nixos-rebuild`.
+
+        This is good for image based appliances where updates are handled
+        outside the image. Reducing features makes the image lighter and
+        slightly more secure.
+      '';
+    };
+  };
+
+  config = lib.mkIf config.system.switch.enable {
+    system.activatableSystemBuilderCommands = ''
+      mkdir $out/bin
+      substitute ${./switch-to-configuration.pl} $out/bin/switch-to-configuration \
+        --subst-var out \
+        --subst-var-by toplevel ''${!toplevelVar} \
+        --subst-var-by coreutils "${pkgs.coreutils}" \
+        --subst-var-by distroId ${lib.escapeShellArg config.system.nixos.distroId} \
+        --subst-var-by installBootLoader ${lib.escapeShellArg config.system.build.installBootLoader} \
+        --subst-var-by localeArchive "${config.i18n.glibcLocales}/lib/locale/locale-archive" \
+        --subst-var-by perl "${perlWrapped}" \
+        --subst-var-by shell "${pkgs.bash}/bin/sh" \
+        --subst-var-by su "${pkgs.shadow.su}/bin/su" \
+        --subst-var-by systemd "${config.systemd.package}" \
+        --subst-var-by utillinux "${pkgs.util-linux}" \
+        ;
+
+      chmod +x $out/bin/switch-to-configuration
+      ${lib.optionalString (pkgs.stdenv.hostPlatform == pkgs.stdenv.buildPlatform) ''
+        if ! output=$(${perlWrapped}/bin/perl -c $out/bin/switch-to-configuration 2>&1); then
+          echo "switch-to-configuration syntax is not valid:"
+          echo "$output"
+          exit 1
+        fi
+      ''}
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/activation/test.nix b/nixpkgs/nixos/modules/system/activation/test.nix
new file mode 100644
index 000000000000..8cf000451c6e
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/test.nix
@@ -0,0 +1,27 @@
+{ lib
+, nixos
+, expect
+, testers
+}:
+let
+  node-forbiddenDependencies-fail = nixos ({ ... }: {
+    system.forbiddenDependenciesRegex = "-dev$";
+    environment.etc."dev-dependency" = {
+      text = "${expect.dev}";
+    };
+    documentation.enable = false;
+    fileSystems."/".device = "ignore-root-device";
+    boot.loader.grub.enable = false;
+  });
+  node-forbiddenDependencies-succeed = nixos ({ ... }: {
+    system.forbiddenDependenciesRegex = "-dev$";
+    system.extraDependencies = [ expect.dev ];
+    documentation.enable = false;
+    fileSystems."/".device = "ignore-root-device";
+    boot.loader.grub.enable = false;
+  });
+in
+lib.recurseIntoAttrs {
+  test-forbiddenDependencies-fail = testers.testBuildFailure node-forbiddenDependencies-fail.config.system.build.toplevel;
+  test-forbiddenDependencies-succeed = node-forbiddenDependencies-succeed.config.system.build.toplevel;
+}
diff --git a/nixpkgs/nixos/modules/system/activation/top-level.nix b/nixpkgs/nixos/modules/system/activation/top-level.nix
new file mode 100644
index 000000000000..1f9ad570db7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/activation/top-level.nix
@@ -0,0 +1,335 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  systemBuilder =
+    ''
+      mkdir $out
+
+      ${if config.boot.initrd.systemd.enable then ''
+        cp ${config.system.build.bootStage2} $out/prepare-root
+        substituteInPlace $out/prepare-root --subst-var-by systemConfig $out
+        # This must not be a symlink or the abs_path of the grub builder for the tests
+        # will resolve the symlink and we end up with a path that doesn't point to a
+        # system closure.
+        cp "$systemd/lib/systemd/systemd" $out/init
+      '' else ''
+        cp ${config.system.build.bootStage2} $out/init
+        substituteInPlace $out/init --subst-var-by systemConfig $out
+      ''}
+
+      ln -s ${config.system.build.etc}/etc $out/etc
+      ln -s ${config.system.path} $out/sw
+      ln -s "$systemd" $out/systemd
+
+      echo -n "systemd ${toString config.systemd.package.interfaceVersion}" > $out/init-interface-version
+      echo -n "$nixosLabel" > $out/nixos-version
+      echo -n "${config.boot.kernelPackages.stdenv.hostPlatform.system}" > $out/system
+
+      ${config.system.systemBuilderCommands}
+
+      cp "$extraDependenciesPath" "$out/extra-dependencies"
+
+      ${optionalString (!config.boot.isContainer && config.boot.bootspec.enable) ''
+        ${config.boot.bootspec.writer}
+        ${optionalString config.boot.bootspec.enableValidation
+          ''${config.boot.bootspec.validator} "$out/${config.boot.bootspec.filename}"''}
+      ''}
+
+      ${config.system.extraSystemBuilderCmds}
+    '';
+
+  # Putting it all together.  This builds a store path containing
+  # symlinks to the various parts of the built configuration (the
+  # kernel, systemd units, init scripts, etc.) as well as a script
+  # `switch-to-configuration' that activates the configuration and
+  # makes it bootable. See `activatable-system.nix`.
+  baseSystem = pkgs.stdenvNoCC.mkDerivation ({
+    name = "nixos-system-${config.system.name}-${config.system.nixos.label}";
+    preferLocalBuild = true;
+    allowSubstitutes = false;
+    passAsFile = [ "extraDependencies" ];
+    buildCommand = systemBuilder;
+
+    systemd = config.systemd.package;
+
+    nixosLabel = config.system.nixos.label;
+
+    inherit (config.system) extraDependencies;
+  } // config.system.systemBuilderArgs);
+
+  # Handle assertions and warnings
+
+  failedAssertions = map (x: x.message) (filter (x: !x.assertion) config.assertions);
+
+  baseSystemAssertWarn = if failedAssertions != []
+    then throw "\nFailed assertions:\n${concatStringsSep "\n" (map (x: "- ${x}") failedAssertions)}"
+    else showWarnings config.warnings baseSystem;
+
+  # Replace runtime dependencies
+  system = foldr ({ oldDependency, newDependency }: drv:
+      pkgs.replaceDependency { inherit oldDependency newDependency drv; }
+    ) baseSystemAssertWarn config.system.replaceRuntimeDependencies;
+
+  systemWithBuildDeps = system.overrideAttrs (o: {
+    systemBuildClosure = pkgs.closureInfo { rootPaths = [ system.drvPath ]; };
+    buildCommand = o.buildCommand + ''
+      ln -sn $systemBuildClosure $out/build-closure
+    '';
+  });
+
+in
+
+{
+  imports = [
+    ../build.nix
+    (mkRemovedOptionModule [ "nesting" "clone" ] "Use `specialisation.«name» = { inheritParentConfig = true; configuration = { ... }; }` instead.")
+    (mkRemovedOptionModule [ "nesting" "children" ] "Use `specialisation.«name».configuration = { ... }` instead.")
+  ];
+
+  options = {
+
+    system.boot.loader.id = mkOption {
+      internal = true;
+      default = "";
+      description = lib.mdDoc ''
+        Id string of the used bootloader.
+      '';
+    };
+
+    system.boot.loader.kernelFile = mkOption {
+      internal = true;
+      default = pkgs.stdenv.hostPlatform.linux-kernel.target;
+      defaultText = literalExpression "pkgs.stdenv.hostPlatform.linux-kernel.target";
+      type = types.str;
+      description = lib.mdDoc ''
+        Name of the kernel file to be passed to the bootloader.
+      '';
+    };
+
+    system.boot.loader.initrdFile = mkOption {
+      internal = true;
+      default = "initrd";
+      type = types.str;
+      description = lib.mdDoc ''
+        Name of the initrd file to be passed to the bootloader.
+      '';
+    };
+
+    system.build = {
+      toplevel = mkOption {
+        type = types.package;
+        readOnly = true;
+        description = lib.mdDoc ''
+          This option contains the store path that typically represents a NixOS system.
+
+          You can read this path in a custom deployment tool for example.
+        '';
+      };
+    };
+
+
+    system.copySystemConfiguration = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        If enabled, copies the NixOS configuration file
+        (usually {file}`/etc/nixos/configuration.nix`)
+        and links it from the resulting system
+        (getting to {file}`/run/current-system/configuration.nix`).
+        Note that only this single file is copied, even if it imports others.
+      '';
+    };
+
+    system.systemBuilderCommands = mkOption {
+      type = types.lines;
+      internal = true;
+      default = "";
+      description = ''
+        This code will be added to the builder creating the system store path.
+      '';
+    };
+
+    system.systemBuilderArgs = mkOption {
+      type = types.attrsOf types.unspecified;
+      internal = true;
+      default = {};
+      description = lib.mdDoc ''
+        `lib.mkDerivation` attributes that will be passed to the top level system builder.
+      '';
+    };
+
+    system.forbiddenDependenciesRegex = mkOption {
+      default = "";
+      example = "-dev$";
+      type = types.str;
+      description = lib.mdDoc ''
+        A POSIX Extended Regular Expression that matches store paths that
+        should not appear in the system closure, with the exception of {option}`system.extraDependencies`, which is not checked.
+      '';
+    };
+
+    system.extraSystemBuilderCmds = mkOption {
+      type = types.lines;
+      internal = true;
+      default = "";
+      description = lib.mdDoc ''
+        This code will be added to the builder creating the system store path.
+      '';
+    };
+
+    system.extraDependencies = mkOption {
+      type = types.listOf types.pathInStore;
+      default = [];
+      description = lib.mdDoc ''
+        A list of paths that should be included in the system
+        closure but generally not visible to users.
+
+        This option has also been used for build-time checks, but the
+        `system.checks` option is more appropriate for that purpose as checks
+        should not leave a trace in the built system configuration.
+      '';
+    };
+
+    system.checks = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      description = lib.mdDoc ''
+        Packages that are added as dependencies of the system's build, usually
+        for the purpose of validating some part of the configuration.
+
+        Unlike `system.extraDependencies`, these store paths do not
+        become part of the built system configuration.
+      '';
+    };
+
+    system.replaceRuntimeDependencies = mkOption {
+      default = [];
+      example = lib.literalExpression "[ ({ original = pkgs.openssl; replacement = pkgs.callPackage /path/to/openssl { }; }) ]";
+      type = types.listOf (types.submodule (
+        { ... }: {
+          options.original = mkOption {
+            type = types.package;
+            description = lib.mdDoc "The original package to override.";
+          };
+
+          options.replacement = mkOption {
+            type = types.package;
+            description = lib.mdDoc "The replacement package.";
+          };
+        })
+      );
+      apply = map ({ original, replacement, ... }: {
+        oldDependency = original;
+        newDependency = replacement;
+      });
+      description = lib.mdDoc ''
+        List of packages to override without doing a full rebuild.
+        The original derivation and replacement derivation must have the same
+        name length, and ideally should have close-to-identical directory layout.
+      '';
+    };
+
+    system.name = mkOption {
+      type = types.str;
+      default =
+        if config.networking.hostName == ""
+        then "unnamed"
+        else config.networking.hostName;
+      defaultText = literalExpression ''
+        if config.networking.hostName == ""
+        then "unnamed"
+        else config.networking.hostName;
+      '';
+      description = lib.mdDoc ''
+        The name of the system used in the {option}`system.build.toplevel` derivation.
+
+        That derivation has the following name:
+        `"nixos-system-''${config.system.name}-''${config.system.nixos.label}"`
+      '';
+    };
+
+    system.includeBuildDependencies = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to include the build closure of the whole system in
+        its runtime closure.  This can be useful for making changes
+        fully offline, as it includes all sources, patches, and
+        intermediate outputs required to build all the derivations
+        that the system depends on.
+
+        Note that this includes _all_ the derivations, down from the
+        included applications to their sources, the compilers used to
+        build them, and even the bootstrap compiler used to compile
+        the compilers. This increases the size of the system and the
+        time needed to download its dependencies drastically: a
+        minimal configuration with no extra services enabled grows
+        from ~670MiB in size to 13.5GiB, and takes proportionally
+        longer to download.
+      '';
+    };
+
+  };
+
+
+  config = {
+    assertions = [
+      {
+        assertion = config.system.copySystemConfiguration -> !lib.inPureEvalMode;
+        message = "system.copySystemConfiguration is not supported with flakes";
+      }
+    ];
+
+    system.extraSystemBuilderCmds =
+      optionalString
+        config.system.copySystemConfiguration
+        ''ln -s '${import ../../../lib/from-env.nix "NIXOS_CONFIG" <nixos-config>}' \
+            "$out/configuration.nix"
+        '' +
+      optionalString
+        (config.system.forbiddenDependenciesRegex != "")
+        ''
+          if [[ $forbiddenDependenciesRegex != "" && -n $closureInfo ]]; then
+            if forbiddenPaths="$(grep -E -- "$forbiddenDependenciesRegex" $closureInfo/store-paths)"; then
+              echo -e "System closure $out contains the following disallowed paths:\n$forbiddenPaths"
+              exit 1
+            fi
+          fi
+        '';
+
+    system.systemBuilderArgs = {
+
+      # Legacy environment variables. These were used by the activation script,
+      # but some other script might still depend on them, although unlikely.
+      installBootLoader = config.system.build.installBootLoader;
+      localeArchive = "${config.i18n.glibcLocales}/lib/locale/locale-archive";
+      distroId = config.system.nixos.distroId;
+      perl = pkgs.perl.withPackages (p: with p; [ ConfigIniFiles FileSlurp ]);
+      # End if legacy environment variables
+
+
+      # Not actually used in the builder. `passedChecks` is just here to create
+      # the build dependencies. Checks are similar to build dependencies in the
+      # sense that if they fail, the system build fails. However, checks do not
+      # produce any output of value, so they are not used by the system builder.
+      # In fact, using them runs the risk of accidentally adding unneeded paths
+      # to the system closure, which defeats the purpose of the `system.checks`
+      # option, as opposed to `system.extraDependencies`.
+      passedChecks = concatStringsSep " " config.system.checks;
+    }
+    // lib.optionalAttrs (config.system.forbiddenDependenciesRegex != "") {
+      inherit (config.system) forbiddenDependenciesRegex;
+      closureInfo = pkgs.closureInfo { rootPaths = [
+        # override to avoid  infinite recursion (and to allow using extraDependencies to add forbidden dependencies)
+        (config.system.build.toplevel.overrideAttrs (_: { extraDependencies = []; closureInfo = null; }))
+      ]; };
+    };
+
+
+    system.build.toplevel = if config.system.includeBuildDependencies then systemWithBuildDeps else system;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/binfmt.nix b/nixpkgs/nixos/modules/system/boot/binfmt.nix
new file mode 100644
index 000000000000..d16152ab9dec
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/binfmt.nix
@@ -0,0 +1,338 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) mkOption mkDefault types optionalString stringAfter;
+
+  cfg = config.boot.binfmt;
+
+  makeBinfmtLine = name: { recognitionType, offset, magicOrExtension
+                         , mask, preserveArgvZero, openBinary
+                         , matchCredentials, fixBinary, ...
+                         }: let
+    type = if recognitionType == "magic" then "M" else "E";
+    offset' = toString offset;
+    mask' = toString mask;
+    interpreter = "/run/binfmt/${name}";
+    flags = if !(matchCredentials -> openBinary)
+              then throw "boot.binfmt.registrations.${name}: you can't specify openBinary = false when matchCredentials = true."
+            else optionalString preserveArgvZero "P" +
+                 optionalString (openBinary && !matchCredentials) "O" +
+                 optionalString matchCredentials "C" +
+                 optionalString fixBinary "F";
+  in ":${name}:${type}:${offset'}:${magicOrExtension}:${mask'}:${interpreter}:${flags}";
+
+  mkInterpreter = name: { interpreter, wrapInterpreterInShell, ... }:
+    if wrapInterpreterInShell
+    then pkgs.writeShellScript "${name}-interpreter" ''
+           #!${pkgs.bash}/bin/sh
+           exec -- ${interpreter} "$@"
+         ''
+    else interpreter;
+
+  getEmulator = system: (lib.systems.elaborate { inherit system; }).emulator pkgs;
+  getQemuArch = system: (lib.systems.elaborate { inherit system; }).qemuArch;
+
+  # Mapping of systems to “magicOrExtension†and “maskâ€. Mostly taken from:
+  # - https://github.com/cleverca22/nixos-configs/blob/master/qemu.nix
+  # and
+  # - https://github.com/qemu/qemu/blob/master/scripts/qemu-binfmt-conf.sh
+  # TODO: maybe put these in a JSON file?
+  magics = {
+    armv6l-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\x00\xff\xfe\xff\xff\xff'';
+    };
+    armv7l-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x28\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\x00\xff\xfe\xff\xff\xff'';
+    };
+    aarch64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\x00\xff\xfe\xff\xff\xff'';
+    };
+    aarch64_be-linux = {
+      magicOrExtension = ''\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xb7'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    i386-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x03\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    i486-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    i586-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    i686-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x06\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    x86_64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x3e\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    alpha-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x26\x90'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    sparc64-linux = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    sparc-linux = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x12'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    powerpc-linux = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x14'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    powerpc64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    powerpc64le-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x15\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\x00'';
+    };
+    mips-linux = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
+    };
+    mipsel-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
+    };
+    mips64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff'';
+    };
+    mips64el-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    mips64-linuxabin32 = {
+      magicOrExtension = ''\x7fELF\x01\x02\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20'';
+    };
+    mips64el-linuxabin32 = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x08\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x20\x00\x00\x00'';
+    };
+    riscv32-linux = {
+      magicOrExtension = ''\x7fELF\x01\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    riscv64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\xf3\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    loongarch64-linux = {
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x02\x01'';
+      mask = ''\xff\xff\xff\xff\xff\xff\xff\xfc\x00\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+    };
+    wasm32-wasi = {
+      magicOrExtension = ''\x00asm'';
+      mask = ''\xff\xff\xff\xff'';
+    };
+    wasm64-wasi = {
+      magicOrExtension = ''\x00asm'';
+      mask = ''\xff\xff\xff\xff'';
+    };
+    x86_64-windows.magicOrExtension = "MZ";
+    i686-windows.magicOrExtension = "MZ";
+  };
+
+in {
+  imports = [
+    (lib.mkRenamedOptionModule [ "boot" "binfmtMiscRegistrations" ] [ "boot" "binfmt" "registrations" ])
+  ];
+
+  options = {
+    boot.binfmt = {
+      registrations = mkOption {
+        default = {};
+
+        description = lib.mdDoc ''
+          Extra binary formats to register with the kernel.
+          See https://www.kernel.org/doc/html/latest/admin-guide/binfmt-misc.html for more details.
+        '';
+
+        type = types.attrsOf (types.submodule ({ config, ... }: {
+          options = {
+            recognitionType = mkOption {
+              default = "magic";
+              description = lib.mdDoc "Whether to recognize executables by magic number or extension.";
+              type = types.enum [ "magic" "extension" ];
+            };
+
+            offset = mkOption {
+              default = null;
+              description = lib.mdDoc "The byte offset of the magic number used for recognition.";
+              type = types.nullOr types.int;
+            };
+
+            magicOrExtension = mkOption {
+              description = lib.mdDoc "The magic number or extension to match on.";
+              type = types.str;
+            };
+
+            mask = mkOption {
+              default = null;
+              description =
+                lib.mdDoc "A mask to be ANDed with the byte sequence of the file before matching";
+              type = types.nullOr types.str;
+            };
+
+            interpreter = mkOption {
+              description = lib.mdDoc ''
+                The interpreter to invoke to run the program.
+
+                Note that the actual registration will point to
+                /run/binfmt/''${name}, so the kernel interpreter length
+                limit doesn't apply.
+              '';
+              type = types.path;
+            };
+
+            preserveArgvZero = mkOption {
+              default = false;
+              description = lib.mdDoc ''
+                Whether to pass the original argv[0] to the interpreter.
+
+                See the description of the 'P' flag in the kernel docs
+                for more details;
+              '';
+              type = types.bool;
+            };
+
+            openBinary = mkOption {
+              default = config.matchCredentials;
+              description = lib.mdDoc ''
+                Whether to pass the binary to the interpreter as an open
+                file descriptor, instead of a path.
+              '';
+              type = types.bool;
+            };
+
+            matchCredentials = mkOption {
+              default = false;
+              description = lib.mdDoc ''
+                Whether to launch with the credentials and security
+                token of the binary, not the interpreter (e.g. setuid
+                bit).
+
+                See the description of the 'C' flag in the kernel docs
+                for more details.
+
+                Implies/requires openBinary = true.
+              '';
+              type = types.bool;
+            };
+
+            fixBinary = mkOption {
+              default = false;
+              description = lib.mdDoc ''
+                Whether to open the interpreter file as soon as the
+                registration is loaded, rather than waiting for a
+                relevant file to be invoked.
+
+                See the description of the 'F' flag in the kernel docs
+                for more details.
+              '';
+              type = types.bool;
+            };
+
+            wrapInterpreterInShell = mkOption {
+              default = true;
+              description = lib.mdDoc ''
+                Whether to wrap the interpreter in a shell script.
+
+                This allows a shell command to be set as the interpreter.
+              '';
+              type = types.bool;
+            };
+
+            interpreterSandboxPath = mkOption {
+              internal = true;
+              default = null;
+              description = lib.mdDoc ''
+                Path of the interpreter to expose in the build sandbox.
+              '';
+              type = types.nullOr types.path;
+            };
+          };
+        }));
+      };
+
+      emulatedSystems = mkOption {
+        default = [];
+        example = [ "wasm32-wasi" "x86_64-windows" "aarch64-linux" ];
+        description = lib.mdDoc ''
+          List of systems to emulate. Will also configure Nix to
+          support your new systems.
+          Warning: the builder can execute all emulated systems within the same build, which introduces impurities in the case of cross compilation.
+        '';
+        type = types.listOf (types.enum (builtins.attrNames magics));
+      };
+    };
+  };
+
+  config = {
+    boot.binfmt.registrations = builtins.listToAttrs (map (system: {
+      name = system;
+      value = { config, ... }: let
+        interpreter = getEmulator system;
+        qemuArch = getQemuArch system;
+
+        preserveArgvZero = "qemu-${qemuArch}" == baseNameOf interpreter;
+        interpreterReg = let
+          wrapperName = "qemu-${qemuArch}-binfmt-P";
+          wrapper = pkgs.wrapQemuBinfmtP wrapperName interpreter;
+        in
+          if preserveArgvZero then "${wrapper}/bin/${wrapperName}"
+          else interpreter;
+      in ({
+        preserveArgvZero = mkDefault preserveArgvZero;
+
+        interpreter = mkDefault interpreterReg;
+        wrapInterpreterInShell = mkDefault (!config.preserveArgvZero);
+        interpreterSandboxPath = mkDefault (dirOf (dirOf config.interpreter));
+      } // (magics.${system} or (throw "Cannot create binfmt registration for system ${system}")));
+    }) cfg.emulatedSystems);
+    nix.settings = lib.mkIf (cfg.emulatedSystems != []) {
+      extra-platforms = cfg.emulatedSystems ++ lib.optional pkgs.stdenv.hostPlatform.isx86_64 "i686-linux";
+      extra-sandbox-paths = let
+        ruleFor = system: cfg.registrations.${system};
+        hasWrappedRule = lib.any (system: (ruleFor system).wrapInterpreterInShell) cfg.emulatedSystems;
+      in [ "/run/binfmt" ]
+        ++ lib.optional hasWrappedRule "${pkgs.bash}"
+        ++ (map (system: (ruleFor system).interpreterSandboxPath) cfg.emulatedSystems);
+    };
+
+    environment.etc."binfmt.d/nixos.conf".source = builtins.toFile "binfmt_nixos.conf"
+      (lib.concatStringsSep "\n" (lib.mapAttrsToList makeBinfmtLine config.boot.binfmt.registrations));
+
+    systemd = lib.mkMerge [
+      ({ tmpfiles.rules = [
+          "d /run/binfmt 0755 -"
+        ] ++ lib.mapAttrsToList
+          (name: interpreter:
+            "L+ /run/binfmt/${name} - - - - ${interpreter}"
+          )
+          (lib.mapAttrs mkInterpreter config.boot.binfmt.registrations);
+      })
+
+      (lib.mkIf (config.boot.binfmt.registrations != {}) {
+        additionalUpstreamSystemUnits = [
+          "proc-sys-fs-binfmt_misc.automount"
+          "proc-sys-fs-binfmt_misc.mount"
+          "systemd-binfmt.service"
+        ];
+        services.systemd-binfmt.restartTriggers = [ (builtins.toJSON config.boot.binfmt.registrations) ];
+      })
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/emergency-mode.nix b/nixpkgs/nixos/modules/system/boot/emergency-mode.nix
new file mode 100644
index 000000000000..a2163aa5ffb3
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/emergency-mode.nix
@@ -0,0 +1,37 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+
+    systemd.enableEmergencyMode = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable emergency mode, which is an
+        {command}`sulogin` shell started on the console if
+        mounting a filesystem fails.  Since some machines (like EC2
+        instances) have no console of any kind, emergency mode doesn't
+        make sense, and it's better to continue with the boot insofar
+        as possible.
+      '';
+    };
+
+  };
+
+  ###### implementation
+
+  config = {
+
+    systemd.additionalUpstreamSystemUnits = optionals
+      config.systemd.enableEmergencyMode [
+        "emergency.target" "emergency.service"
+      ];
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/grow-partition.nix b/nixpkgs/nixos/modules/system/boot/grow-partition.nix
new file mode 100644
index 000000000000..897602f9826a
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/grow-partition.nix
@@ -0,0 +1,54 @@
+# This module automatically grows the root partition.
+# This allows an instance to be created with a bigger root filesystem
+# than provided by the machine image.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "virtualisation" "growPartition" ] [ "boot" "growPartition" ])
+  ];
+
+  options = {
+    boot.growPartition = mkEnableOption (lib.mdDoc "growing the root partition on boot");
+  };
+
+  config = mkIf config.boot.growPartition {
+    assertions = [
+      {
+        assertion = !config.boot.initrd.systemd.repart.enable && !config.systemd.repart.enable;
+        message = "systemd-repart already grows the root partition and thus you should not use boot.growPartition";
+      }
+    ];
+    systemd.services.growpart = {
+      wantedBy = [ "-.mount" ];
+      after = [ "-.mount" ];
+      before = [ "systemd-growfs-root.service" ];
+      conflicts = [ "shutdown.target" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        TimeoutSec = "infinity";
+        # growpart returns 1 if the partition is already grown
+        SuccessExitStatus = "0 1";
+      };
+
+      script = ''
+        rootDevice="${config.fileSystems."/".device}"
+        rootDevice="$(readlink -f "$rootDevice")"
+        parentDevice="$rootDevice"
+        while [ "''${parentDevice%[0-9]}" != "''${parentDevice}" ]; do
+          parentDevice="''${parentDevice%[0-9]}";
+        done
+        partNum="''${rootDevice#''${parentDevice}}"
+        if [ "''${parentDevice%[0-9]p}" != "''${parentDevice}" ] && [ -b "''${parentDevice%p}" ]; then
+          parentDevice="''${parentDevice%p}"
+        fi
+        "${pkgs.cloud-utils.guest}/bin/growpart" "$parentDevice" "$partNum"
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/initrd-network.nix b/nixpkgs/nixos/modules/system/boot/initrd-network.nix
new file mode 100644
index 000000000000..88ba43caf003
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/initrd-network.nix
@@ -0,0 +1,162 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.boot.initrd.network;
+
+  dhcpInterfaces = lib.attrNames (lib.filterAttrs (iface: v: v.useDHCP == true) (config.networking.interfaces or {}));
+  doDhcp = cfg.udhcpc.enable || dhcpInterfaces != [];
+  dhcpIfShellExpr = if config.networking.useDHCP || cfg.udhcpc.enable
+                      then "$(ls /sys/class/net/ | grep -v ^lo$)"
+                      else lib.concatMapStringsSep " " lib.escapeShellArg dhcpInterfaces;
+
+  udhcpcScript = pkgs.writeScript "udhcp-script"
+    ''
+      #! /bin/sh
+      if [ "$1" = bound ]; then
+        ip address add "$ip/$mask" dev "$interface"
+        if [ -n "$mtu" ]; then
+          ip link set mtu "$mtu" dev "$interface"
+        fi
+        if [ -n "$staticroutes" ]; then
+          echo "$staticroutes" \
+            | sed -r "s@(\S+) (\S+)@ ip route add \"\1\" via \"\2\" dev \"$interface\" ; @g" \
+            | sed -r "s@ via \"0\.0\.0\.0\"@@g" \
+            | /bin/sh
+        fi
+        if [ -n "$router" ]; then
+          ip route add "$router" dev "$interface" # just in case if "$router" is not within "$ip/$mask" (e.g. Hetzner Cloud)
+          ip route add default via "$router" dev "$interface"
+        fi
+        if [ -n "$dns" ]; then
+          rm -f /etc/resolv.conf
+          for server in $dns; do
+            echo "nameserver $server" >> /etc/resolv.conf
+          done
+        fi
+      fi
+    '';
+
+  udhcpcArgs = toString cfg.udhcpc.extraArgs;
+
+in
+
+{
+
+  options = {
+
+    boot.initrd.network.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Add network connectivity support to initrd. The network may be
+        configured using the `ip` kernel parameter,
+        as described in [the kernel documentation](https://www.kernel.org/doc/Documentation/filesystems/nfs/nfsroot.txt).
+        Otherwise, if
+        {option}`networking.useDHCP` is enabled, an IP address
+        is acquired using DHCP.
+
+        You should add the module(s) required for your network card to
+        boot.initrd.availableKernelModules.
+        `lspci -v | grep -iA8 'network\|ethernet'`
+        will tell you which.
+      '';
+    };
+
+    boot.initrd.network.flushBeforeStage2 = mkOption {
+      type = types.bool;
+      default = !config.boot.initrd.systemd.enable;
+      defaultText = "!config.boot.initrd.systemd.enable";
+      description = lib.mdDoc ''
+        Whether to clear the configuration of the interfaces that were set up in
+        the initrd right before stage 2 takes over. Stage 2 will do the regular network
+        configuration based on the NixOS networking options.
+
+        The default is false when systemd is enabled in initrd,
+        because the systemd-networkd documentation suggests it.
+      '';
+    };
+
+    boot.initrd.network.udhcpc.enable = mkOption {
+      default = config.networking.useDHCP && !config.boot.initrd.systemd.enable;
+      defaultText = "networking.useDHCP";
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enables the udhcpc service during stage 1 of the boot process. This
+        defaults to {option}`networking.useDHCP`. Therefore, this useful if
+        useDHCP is off but the initramfs should do dhcp.
+      '';
+    };
+
+    boot.initrd.network.udhcpc.extraArgs = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        Additional command-line arguments passed verbatim to
+        udhcpc if {option}`boot.initrd.network.enable` and
+        {option}`boot.initrd.network.udhcpc.enable` are enabled.
+      '';
+    };
+
+    boot.initrd.network.postCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed after stage 1 of the
+        boot has initialised the network.
+      '';
+    };
+
+
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.initrd.kernelModules = [ "af_packet" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${pkgs.klibc}/lib/klibc/bin.static/ipconfig
+    '';
+
+    boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) (mkBefore (
+      # Search for interface definitions in command line.
+      ''
+        ifaces=""
+        for o in $(cat /proc/cmdline); do
+          case $o in
+            ip=*)
+              ipconfig $o && ifaces="$ifaces $(echo $o | cut -d: -f6)"
+              ;;
+          esac
+        done
+      ''
+
+      # Otherwise, use DHCP.
+      + optionalString doDhcp ''
+        # Bring up all interfaces.
+        for iface in ${dhcpIfShellExpr}; do
+          echo "bringing up network interface $iface..."
+          ip link set dev "$iface" up && ifaces="$ifaces $iface"
+        done
+
+        # Acquire DHCP leases.
+        for iface in ${dhcpIfShellExpr}; do
+          echo "acquiring IP address via DHCP on $iface..."
+          udhcpc --quit --now -i $iface -O staticroutes --script ${udhcpcScript} ${udhcpcArgs}
+        done
+      ''
+
+      + cfg.postCommands));
+
+    boot.initrd.postMountCommands = mkIf (cfg.flushBeforeStage2 && !config.boot.initrd.systemd.enable) ''
+      for iface in $ifaces; do
+        ip address flush dev "$iface"
+        ip link set dev "$iface" down
+      done
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/initrd-openvpn.nix b/nixpkgs/nixos/modules/system/boot/initrd-openvpn.nix
new file mode 100644
index 000000000000..2530240628e4
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/initrd-openvpn.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.boot.initrd.network.openvpn;
+
+in
+
+{
+
+  options = {
+
+    boot.initrd.network.openvpn.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Starts an OpenVPN client during initrd boot. It can be used to e.g.
+        remotely accessing the SSH service controlled by
+        {option}`boot.initrd.network.ssh` or other network services
+        included. Service is killed when stage-1 boot is finished.
+      '';
+    };
+
+    boot.initrd.network.openvpn.configuration = mkOption {
+      type = types.path; # Same type as boot.initrd.secrets
+      description = lib.mdDoc ''
+        The configuration file for OpenVPN.
+
+        ::: {.warning}
+        Unless your bootloader supports initrd secrets, this configuration
+        is stored insecurely in the global Nix store.
+        :::
+      '';
+      example = literalExpression "./configuration.ovpn";
+    };
+
+  };
+
+  config = mkIf (config.boot.initrd.network.enable && cfg.enable) {
+    assertions = [
+      {
+        assertion = cfg.configuration != null;
+        message = "You should specify a configuration for initrd OpenVPN";
+      }
+    ];
+
+    # Add kernel modules needed for OpenVPN
+    boot.initrd.kernelModules = [ "tun" "tap" ];
+
+    # Add openvpn and ip binaries to the initrd
+    # The shared libraries are required for DNS resolution
+    boot.initrd.extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${pkgs.openvpn}/bin/openvpn
+      copy_bin_and_libs ${pkgs.iproute2}/bin/ip
+
+      cp -pv ${pkgs.glibc}/lib/libresolv.so.2 $out/lib
+      cp -pv ${pkgs.glibc}/lib/libnss_dns.so.2 $out/lib
+    '';
+
+    boot.initrd.systemd.storePaths = [
+      "${pkgs.openvpn}/bin/openvpn"
+      "${pkgs.iproute2}/bin/ip"
+      "${pkgs.glibc}/lib/libresolv.so.2"
+      "${pkgs.glibc}/lib/libnss_dns.so.2"
+    ];
+
+    boot.initrd.secrets = {
+      "/etc/initrd.ovpn" = cfg.configuration;
+    };
+
+    # openvpn --version would exit with 1 instead of 0
+    boot.initrd.extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable) ''
+      $out/bin/openvpn --show-gateway
+    '';
+
+    boot.initrd.network.postCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      openvpn /etc/initrd.ovpn &
+    '';
+
+    boot.initrd.systemd.services.openvpn = {
+      wantedBy = [ "initrd.target" ];
+      path = [ pkgs.iproute2 ];
+      after = [ "network.target" "initrd-nixos-copy-secrets.service" ];
+      serviceConfig.ExecStart = "${pkgs.openvpn}/bin/openvpn /etc/initrd.ovpn";
+      serviceConfig.Type = "notify";
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
new file mode 100644
index 000000000000..a8cd2e8f05fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/initrd-ssh.nix
@@ -0,0 +1,267 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.boot.initrd.network.ssh;
+  shell = if cfg.shell == null then "/bin/ash" else cfg.shell;
+  inherit (config.programs.ssh) package;
+
+  enabled = let initrd = config.boot.initrd; in (initrd.network.enable || initrd.systemd.network.enable) && cfg.enable;
+
+in
+
+{
+
+  options.boot.initrd.network.ssh = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Start SSH service during initrd boot. It can be used to debug failing
+        boot on a remote server, enter pasphrase for an encrypted partition etc.
+        Service is killed when stage-1 boot is finished.
+
+        The sshd configuration is largely inherited from
+        {option}`services.openssh`.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 22;
+      description = lib.mdDoc ''
+        Port on which SSH initrd service should listen.
+      '';
+    };
+
+    shell = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      defaultText = ''"/bin/ash"'';
+      description = lib.mdDoc ''
+        Login shell of the remote user. Can be used to limit actions user can do.
+      '';
+    };
+
+    hostKeys = mkOption {
+      type = types.listOf (types.either types.str types.path);
+      default = [];
+      example = [
+        "/etc/secrets/initrd/ssh_host_rsa_key"
+        "/etc/secrets/initrd/ssh_host_ed25519_key"
+      ];
+      description = lib.mdDoc ''
+        Specify SSH host keys to import into the initrd.
+
+        To generate keys, use
+        {manpage}`ssh-keygen(1)`
+        as root:
+
+        ```
+        ssh-keygen -t rsa -N "" -f /etc/secrets/initrd/ssh_host_rsa_key
+        ssh-keygen -t ed25519 -N "" -f /etc/secrets/initrd/ssh_host_ed25519_key
+        ```
+
+        ::: {.warning}
+        Unless your bootloader supports initrd secrets, these keys
+        are stored insecurely in the global Nix store. Do NOT use
+        your regular SSH host private keys for this purpose or
+        you'll expose them to regular users!
+
+        Additionally, even if your initrd supports secrets, if
+        you're using initrd SSH to unlock an encrypted disk then
+        using your regular host keys exposes the private keys on
+        your unencrypted boot partition.
+        :::
+      '';
+    };
+
+    ignoreEmptyHostKeys = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Allow leaving {option}`config.boot.initrd.network.ssh` empty,
+        to deploy ssh host keys out of band.
+      '';
+    };
+
+    authorizedKeys = mkOption {
+      type = types.listOf types.str;
+      default = config.users.users.root.openssh.authorizedKeys.keys;
+      defaultText = literalExpression "config.users.users.root.openssh.authorizedKeys.keys";
+      description = lib.mdDoc ''
+        Authorized keys for the root user on initrd.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc "Verbatim contents of {file}`sshd_config`.";
+    };
+  };
+
+  imports =
+    map (opt: mkRemovedOptionModule ([ "boot" "initrd" "network" "ssh" ] ++ [ opt ]) ''
+      The initrd SSH functionality now uses OpenSSH rather than Dropbear.
+
+      If you want to keep your existing initrd SSH host keys, convert them with
+        $ dropbearconvert dropbear openssh dropbear_host_$type_key ssh_host_$type_key
+      and then set options.boot.initrd.network.ssh.hostKeys.
+    '') [ "hostRSAKey" "hostDSSKey" "hostECDSAKey" ];
+
+  config = let
+    # Nix complains if you include a store hash in initrd path names, so
+    # as an awful hack we drop the first character of the hash.
+    initrdKeyPath = path: if isString path
+      then path
+      else let name = builtins.baseNameOf path; in
+        builtins.unsafeDiscardStringContext ("/etc/ssh/" +
+          substring 1 (stringLength name) name);
+
+    sshdCfg = config.services.openssh;
+
+    sshdConfig = ''
+      UsePAM no
+      Port ${toString cfg.port}
+
+      PasswordAuthentication no
+      AuthorizedKeysFile %h/.ssh/authorized_keys %h/.ssh/authorized_keys2 /etc/ssh/authorized_keys.d/%u
+      ChallengeResponseAuthentication no
+
+      ${flip concatMapStrings cfg.hostKeys (path: ''
+        HostKey ${initrdKeyPath path}
+      '')}
+
+      KexAlgorithms ${concatStringsSep "," sshdCfg.settings.KexAlgorithms}
+      Ciphers ${concatStringsSep "," sshdCfg.settings.Ciphers}
+      MACs ${concatStringsSep "," sshdCfg.settings.Macs}
+
+      LogLevel ${sshdCfg.settings.LogLevel}
+
+      ${if sshdCfg.settings.UseDns then ''
+        UseDNS yes
+      '' else ''
+        UseDNS no
+      ''}
+
+      ${cfg.extraConfig}
+    '';
+  in mkIf enabled {
+    assertions = [
+      {
+        assertion = cfg.authorizedKeys != [];
+        message = "You should specify at least one authorized key for initrd SSH";
+      }
+
+      {
+        assertion = (cfg.hostKeys != []) || cfg.ignoreEmptyHostKeys;
+        message = ''
+          You must now pre-generate the host keys for initrd SSH.
+          See the boot.initrd.network.ssh.hostKeys documentation
+          for instructions.
+        '';
+      }
+    ];
+
+    warnings = lib.optional (config.boot.initrd.systemd.enable && cfg.shell != null) ''
+      Please set 'boot.initrd.systemd.users.root.shell' instead of 'boot.initrd.network.ssh.shell'
+    '';
+
+    boot.initrd.extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${package}/bin/sshd
+      cp -pv ${pkgs.glibc.out}/lib/libnss_files.so.* $out/lib
+    '';
+
+    boot.initrd.extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable) ''
+      # sshd requires a host key to check config, so we pass in the test's
+      tmpkey="$(mktemp initrd-ssh-testkey.XXXXXXXXXX)"
+      cp "${../../../tests/initrd-network-ssh/ssh_host_ed25519_key}" "$tmpkey"
+      # keys from Nix store are world-readable, which sshd doesn't like
+      chmod 600 "$tmpkey"
+      echo -n ${escapeShellArg sshdConfig} |
+        $out/bin/sshd -t -f /dev/stdin \
+        -h "$tmpkey"
+      rm "$tmpkey"
+    '';
+
+    boot.initrd.network.postCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      echo '${shell}' > /etc/shells
+      echo 'root:x:0:0:root:/root:${shell}' > /etc/passwd
+      echo 'sshd:x:1:1:sshd:/var/empty:/bin/nologin' >> /etc/passwd
+      echo 'passwd: files' > /etc/nsswitch.conf
+
+      mkdir -p /var/log /var/empty
+      touch /var/log/lastlog
+
+      mkdir -p /etc/ssh
+      echo -n ${escapeShellArg sshdConfig} > /etc/ssh/sshd_config
+
+      echo "export PATH=$PATH" >> /etc/profile
+      echo "export LD_LIBRARY_PATH=$LD_LIBRARY_PATH" >> /etc/profile
+
+      mkdir -p /root/.ssh
+      ${concatStrings (map (key: ''
+        echo ${escapeShellArg key} >> /root/.ssh/authorized_keys
+      '') cfg.authorizedKeys)}
+
+      ${flip concatMapStrings cfg.hostKeys (path: ''
+        # keys from Nix store are world-readable, which sshd doesn't like
+        chmod 0600 "${initrdKeyPath path}"
+      '')}
+
+      /bin/sshd -e
+    '';
+
+    boot.initrd.postMountCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      # Stop sshd cleanly before stage 2.
+      #
+      # If you want to keep it around to debug post-mount SSH issues,
+      # run `touch /.keep_sshd` (either from an SSH session or in
+      # another initrd hook like preDeviceCommands).
+      if ! [ -e /.keep_sshd ]; then
+        pkill -x sshd
+      fi
+    '';
+
+    boot.initrd.secrets = listToAttrs
+      (map (path: nameValuePair (initrdKeyPath path) path) cfg.hostKeys);
+
+    # Systemd initrd stuff
+    boot.initrd.systemd = mkIf config.boot.initrd.systemd.enable {
+      users.sshd = { uid = 1; group = "sshd"; };
+      groups.sshd = { gid = 1; };
+
+      users.root.shell = mkIf (config.boot.initrd.network.ssh.shell != null) config.boot.initrd.network.ssh.shell;
+
+      contents."/etc/ssh/authorized_keys.d/root".text =
+        concatStringsSep "\n" config.boot.initrd.network.ssh.authorizedKeys;
+      contents."/etc/ssh/sshd_config".text = sshdConfig;
+      storePaths = ["${package}/bin/sshd"];
+
+      services.sshd = {
+        description = "SSH Daemon";
+        wantedBy = ["initrd.target"];
+        after = ["network.target" "initrd-nixos-copy-secrets.service"];
+
+        # Keys from Nix store are world-readable, which sshd doesn't
+        # like. If this were a real nix store and not the initrd, we
+        # neither would nor could do this
+        preStart = flip concatMapStrings cfg.hostKeys (path: ''
+          /bin/chmod 0600 "${initrdKeyPath path}"
+        '');
+        unitConfig.DefaultDependencies = false;
+        serviceConfig = {
+          ExecStart = "${package}/bin/sshd -D -f /etc/ssh/sshd_config";
+          Type = "simple";
+          KillMode = "process";
+          Restart = "on-failure";
+        };
+      };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/kernel.nix b/nixpkgs/nixos/modules/system/boot/kernel.nix
new file mode 100644
index 000000000000..a46331ccd431
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/kernel.nix
@@ -0,0 +1,429 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (config.boot) kernelPatches;
+  inherit (config.boot.kernel) features randstructSeed;
+  inherit (config.boot.kernelPackages) kernel;
+
+  kernelModulesConf = pkgs.writeText "nixos.conf"
+    ''
+      ${concatStringsSep "\n" config.boot.kernelModules}
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+    boot.kernel.enable = mkEnableOption (lib.mdDoc "the Linux kernel. This is useful for systemd-like containers which do not require a kernel") // {
+      default = true;
+    };
+
+    boot.kernel.features = mkOption {
+      default = {};
+      example = literalExpression "{ debug = true; }";
+      internal = true;
+      description = lib.mdDoc ''
+        This option allows to enable or disable certain kernel features.
+        It's not API, because it's about kernel feature sets, that
+        make sense for specific use cases. Mostly along with programs,
+        which would have separate nixos options.
+        `grep features pkgs/os-specific/linux/kernel/common-config.nix`
+      '';
+    };
+
+    boot.kernelPackages = mkOption {
+      default = pkgs.linuxPackages;
+      type = types.raw;
+      apply = kernelPackages: kernelPackages.extend (self: super: {
+        kernel = super.kernel.override (originalArgs: {
+          inherit randstructSeed;
+          kernelPatches = (originalArgs.kernelPatches or []) ++ kernelPatches;
+          features = lib.recursiveUpdate super.kernel.features features;
+        });
+      });
+      # We don't want to evaluate all of linuxPackages for the manual
+      # - some of it might not even evaluate correctly.
+      defaultText = literalExpression "pkgs.linuxPackages";
+      example = literalExpression "pkgs.linuxKernel.packages.linux_5_10";
+      description = lib.mdDoc ''
+        This option allows you to override the Linux kernel used by
+        NixOS.  Since things like external kernel module packages are
+        tied to the kernel you're using, it also overrides those.
+        This option is a function that takes Nixpkgs as an argument
+        (as a convenience), and returns an attribute set containing at
+        the very least an attribute {var}`kernel`.
+        Additional attributes may be needed depending on your
+        configuration.  For instance, if you use the NVIDIA X driver,
+        then it also needs to contain an attribute
+        {var}`nvidia_x11`.
+
+        Please note that we strictly support kernel versions that are
+        maintained by the Linux developers only. More information on the
+        availability of kernel versions is documented
+        [in the Linux section of the manual](https://nixos.org/manual/nixos/unstable/index.html#sec-kernel-config).
+      '';
+    };
+
+    boot.kernelPatches = mkOption {
+      type = types.listOf types.attrs;
+      default = [];
+      example = literalExpression ''
+        [
+          {
+            name = "foo";
+            patch = ./foo.patch;
+            extraStructuredConfig.FOO = lib.kernel.yes;
+            features.foo = true;
+          }
+        ]
+      '';
+      description = lib.mdDoc ''
+        A list of additional patches to apply to the kernel.
+
+        Every item should be an attribute set with the following attributes:
+
+        ```nix
+        {
+          name = "foo";                 # descriptive name, required
+
+          patch = ./foo.patch;          # path or derivation that contains the patch source
+                                        # (required, but can be null if only config changes
+                                        # are needed)
+
+          extraStructuredConfig = {     # attrset of extra configuration parameters without the CONFIG_ prefix
+            FOO = lib.kernel.yes;       # (optional)
+          };                            # values should generally be lib.kernel.yes,
+                                        # lib.kernel.no or lib.kernel.module
+
+          features = {                  # attrset of extra "features" the kernel is considered to have
+            foo = true;                 # (may be checked by other NixOS modules, optional)
+          };
+
+          extraConfig = "FOO y";        # extra configuration options in string form without the CONFIG_ prefix
+                                        # (optional, multiple lines allowed to specify multiple options)
+                                        # (deprecated, use extraStructuredConfig instead)
+        }
+        ```
+
+        There's a small set of existing kernel patches in Nixpkgs, available as `pkgs.kernelPatches`,
+        that follow this format and can be used directly.
+      '';
+    };
+
+    boot.kernel.randstructSeed = mkOption {
+      type = types.str;
+      default = "";
+      example = "my secret seed";
+      description = lib.mdDoc ''
+        Provides a custom seed for the {var}`RANDSTRUCT` security
+        option of the Linux kernel. Note that {var}`RANDSTRUCT` is
+        only enabled in NixOS hardened kernels. Using a custom seed requires
+        building the kernel and dependent packages locally, since this
+        customization happens at build time.
+      '';
+    };
+
+    boot.kernelParams = mkOption {
+      type = types.listOf (types.strMatching ''([^"[:space:]]|"[^"]*")+'' // {
+        name = "kernelParam";
+        description = "string, with spaces inside double quotes";
+      });
+      default = [ ];
+      description = lib.mdDoc "Parameters added to the kernel command line.";
+    };
+
+    boot.consoleLogLevel = mkOption {
+      type = types.int;
+      default = 4;
+      description = lib.mdDoc ''
+        The kernel console `loglevel`. All Kernel Messages with a log level smaller
+        than this setting will be printed to the console.
+      '';
+    };
+
+    boot.vesa = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        (Deprecated) This option, if set, activates the VESA 800x600 video
+        mode on boot and disables kernel modesetting. It is equivalent to
+        specifying `[ "vga=0x317" "nomodeset" ]` in the
+        {option}`boot.kernelParams` option. This option is
+        deprecated as of 2020: Xorg now works better with modesetting, and
+        you might want a different VESA vga setting, anyway.
+      '';
+    };
+
+    boot.extraModulePackages = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "[ config.boot.kernelPackages.nvidia_x11 ]";
+      description = lib.mdDoc "A list of additional packages supplying kernel modules.";
+    };
+
+    boot.kernelModules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc ''
+        The set of kernel modules to be loaded in the second stage of
+        the boot process.  Note that modules that are needed to
+        mount the root file system should be added to
+        {option}`boot.initrd.availableKernelModules` or
+        {option}`boot.initrd.kernelModules`.
+      '';
+    };
+
+    boot.initrd.availableKernelModules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "sata_nv" "ext3" ];
+      description = lib.mdDoc ''
+        The set of kernel modules in the initial ramdisk used during the
+        boot process.  This set must include all modules necessary for
+        mounting the root device.  That is, it should include modules
+        for the physical device (e.g., SCSI drivers) and for the file
+        system (e.g., ext3).  The set specified here is automatically
+        closed under the module dependency relation, i.e., all
+        dependencies of the modules list here are included
+        automatically.  The modules listed here are available in the
+        initrd, but are only loaded on demand (e.g., the ext3 module is
+        loaded automatically when an ext3 filesystem is mounted, and
+        modules for PCI devices are loaded when they match the PCI ID
+        of a device in your system).  To force a module to be loaded,
+        include it in {option}`boot.initrd.kernelModules`.
+      '';
+    };
+
+    boot.initrd.kernelModules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      description = lib.mdDoc "List of modules that are always loaded by the initrd.";
+    };
+
+    boot.initrd.includeDefaultModules = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        This option, if set, adds a collection of default kernel modules
+        to {option}`boot.initrd.availableKernelModules` and
+        {option}`boot.initrd.kernelModules`.
+      '';
+    };
+
+    system.modulesTree = mkOption {
+      type = types.listOf types.path;
+      internal = true;
+      default = [];
+      description = lib.mdDoc ''
+        Tree of kernel modules.  This includes the kernel, plus modules
+        built outside of the kernel.  Combine these into a single tree of
+        symlinks because modprobe only supports one directory.
+      '';
+      # Convert the list of path to only one path.
+      apply = pkgs.aggregateModules;
+    };
+
+    system.requiredKernelConfig = mkOption {
+      default = [];
+      example = literalExpression ''
+        with config.lib.kernelConfig; [
+          (isYes "MODULES")
+          (isEnabled "FB_CON_DECOR")
+          (isEnabled "BLK_DEV_INITRD")
+        ]
+      '';
+      internal = true;
+      type = types.listOf types.attrs;
+      description = lib.mdDoc ''
+        This option allows modules to specify the kernel config options that
+        must be set (or unset) for the module to work. Please use the
+        lib.kernelConfig functions to build list elements.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge
+    [ (mkIf config.boot.initrd.enable {
+        boot.initrd.availableKernelModules =
+          optionals config.boot.initrd.includeDefaultModules ([
+            # Note: most of these (especially the SATA/PATA modules)
+            # shouldn't be included by default since nixos-generate-config
+            # detects them, but I'm keeping them for now for backwards
+            # compatibility.
+
+            # Some SATA/PATA stuff.
+            "ahci"
+            "sata_nv"
+            "sata_via"
+            "sata_sis"
+            "sata_uli"
+            "ata_piix"
+            "pata_marvell"
+
+            # NVMe
+            "nvme"
+
+            # Standard SCSI stuff.
+            "sd_mod"
+            "sr_mod"
+
+            # SD cards and internal eMMC drives.
+            "mmc_block"
+
+            # Support USB keyboards, in case the boot fails and we only have
+            # a USB keyboard, or for LUKS passphrase prompt.
+            "uhci_hcd"
+            "ehci_hcd"
+            "ehci_pci"
+            "ohci_hcd"
+            "ohci_pci"
+            "xhci_hcd"
+            "xhci_pci"
+            "usbhid"
+            "hid_generic" "hid_lenovo" "hid_apple" "hid_roccat"
+            "hid_logitech_hidpp" "hid_logitech_dj" "hid_microsoft" "hid_cherry"
+
+          ] ++ optionals pkgs.stdenv.hostPlatform.isx86 [
+            # Misc. x86 keyboard stuff.
+            "pcips2" "atkbd" "i8042"
+
+            # x86 RTC needed by the stage 2 init script.
+            "rtc_cmos"
+          ]);
+
+        boot.initrd.kernelModules =
+          optionals config.boot.initrd.includeDefaultModules [
+            # For LVM.
+            "dm_mod"
+          ];
+      })
+
+      (mkIf config.boot.kernel.enable {
+        system.build = { inherit kernel; };
+
+        system.modulesTree = [ kernel ] ++ config.boot.extraModulePackages;
+
+        # Not required for, e.g., containers as they don't have their own kernel or initrd.
+        # They boot directly into stage 2.
+        system.systemBuilderArgs.kernelParams = config.boot.kernelParams;
+        system.systemBuilderCommands =
+          let
+            kernelPath = "${config.boot.kernelPackages.kernel}/" +
+              "${config.system.boot.loader.kernelFile}";
+            initrdPath = "${config.system.build.initialRamdisk}/" +
+              "${config.system.boot.loader.initrdFile}";
+          in
+          ''
+            if [ ! -f ${kernelPath} ]; then
+              echo "The bootloader cannot find the proper kernel image."
+              echo "(Expecting ${kernelPath})"
+              false
+            fi
+
+            ln -s ${kernelPath} $out/kernel
+            ln -s ${config.system.modulesTree} $out/kernel-modules
+            ${optionalString (config.hardware.deviceTree.package != null) ''
+              ln -s ${config.hardware.deviceTree.package} $out/dtbs
+            ''}
+
+            echo -n "$kernelParams" > $out/kernel-params
+
+            ln -s ${initrdPath} $out/initrd
+
+            ln -s ${config.system.build.initialRamdiskSecretAppender}/bin/append-initrd-secrets $out
+
+            ln -s ${config.hardware.firmware}/lib/firmware $out/firmware
+          '';
+
+        # Implement consoleLogLevel both in early boot and using sysctl
+        # (so you don't need to reboot to have changes take effect).
+        boot.kernelParams =
+          [ "loglevel=${toString config.boot.consoleLogLevel}" ] ++
+          optionals config.boot.vesa [ "vga=0x317" "nomodeset" ];
+
+        boot.kernel.sysctl."kernel.printk" = mkDefault config.boot.consoleLogLevel;
+
+        boot.kernelModules = [ "loop" "atkbd" ];
+
+        # Create /etc/modules-load.d/nixos.conf, which is read by
+        # systemd-modules-load.service to load required kernel modules.
+        environment.etc =
+          { "modules-load.d/nixos.conf".source = kernelModulesConf;
+          };
+
+        systemd.services.systemd-modules-load =
+          { wantedBy = [ "multi-user.target" ];
+            restartTriggers = [ kernelModulesConf ];
+            serviceConfig =
+              { # Ignore failed module loads.  Typically some of the
+                # modules in ‘boot.kernelModules’ are "nice to have but
+                # not required" (e.g. acpi-cpufreq), so we don't want to
+                # barf on those.
+                SuccessExitStatus = "0 1";
+              };
+          };
+
+        lib.kernelConfig = {
+          isYes = option: {
+            assertion = config: config.isYes option;
+            message = "CONFIG_${option} is not yes!";
+            configLine = "CONFIG_${option}=y";
+          };
+
+          isNo = option: {
+            assertion = config: config.isNo option;
+            message = "CONFIG_${option} is not no!";
+            configLine = "CONFIG_${option}=n";
+          };
+
+          isModule = option: {
+            assertion = config: config.isModule option;
+            message = "CONFIG_${option} is not built as a module!";
+            configLine = "CONFIG_${option}=m";
+          };
+
+          ### Usually you will just want to use these two
+          # True if yes or module
+          isEnabled = option: {
+            assertion = config: config.isEnabled option;
+            message = "CONFIG_${option} is not enabled!";
+            configLine = "CONFIG_${option}=y";
+          };
+
+          # True if no or omitted
+          isDisabled = option: {
+            assertion = config: config.isDisabled option;
+            message = "CONFIG_${option} is not disabled!";
+            configLine = "CONFIG_${option}=n";
+          };
+        };
+
+        # The config options that all modules can depend upon
+        system.requiredKernelConfig = with config.lib.kernelConfig;
+          [
+            # !!! Should this really be needed?
+            (isYes "MODULES")
+            (isYes "BINFMT_ELF")
+          ] ++ (optional (randstructSeed != "") (isYes "GCC_PLUGIN_RANDSTRUCT"));
+
+        # nixpkgs kernels are assumed to have all required features
+        assertions = if config.boot.kernelPackages.kernel ? features then [] else
+          let cfg = config.boot.kernelPackages.kernel.config; in map (attrs:
+            { assertion = attrs.assertion cfg; inherit (attrs) message; }
+          ) config.system.requiredKernelConfig;
+
+      })
+
+    ];
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/kernel_config.nix b/nixpkgs/nixos/modules/system/boot/kernel_config.nix
new file mode 100644
index 000000000000..e618070f0dc3
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/kernel_config.nix
@@ -0,0 +1,116 @@
+{ lib, config, ... }:
+
+with lib;
+let
+  mergeFalseByDefault = locs: defs:
+    if defs == [] then abort "This case should never happen."
+    else if any (x: x == false) (getValues defs) then false
+    else true;
+
+  kernelItem = types.submodule {
+    options = {
+      tristate = mkOption {
+        type = types.enum [ "y" "m" "n" null ];
+        default = null;
+        internal = true;
+        visible = true;
+        description = lib.mdDoc ''
+          Use this field for tristate kernel options expecting a "y" or "m" or "n".
+        '';
+      };
+
+      freeform = mkOption {
+        type = types.nullOr types.str // {
+          merge = mergeEqualOption;
+        };
+        default = null;
+        example = ''MMC_BLOCK_MINORS.freeform = "32";'';
+        description = lib.mdDoc ''
+          Freeform description of a kernel configuration item value.
+        '';
+      };
+
+      optional = mkOption {
+        type = types.bool // { merge = mergeFalseByDefault; };
+        default = false;
+        description = lib.mdDoc ''
+          Whether option should generate a failure when unused.
+          Upon merging values, mandatory wins over optional.
+        '';
+      };
+    };
+  };
+
+  mkValue = with lib; val:
+  let
+    isNumber = c: elem c ["0" "1" "2" "3" "4" "5" "6" "7" "8" "9"];
+
+  in
+    if (val == "") then "\"\""
+    else if val == "y" || val == "m" || val == "n" then val
+    else if all isNumber (stringToCharacters val) then val
+    else if substring 0 2 val == "0x" then val
+    else val; # FIXME: fix quoting one day
+
+
+  # generate nix intermediate kernel config file of the form
+  #
+  #       VIRTIO_MMIO m
+  #       VIRTIO_BLK y
+  #       VIRTIO_CONSOLE n
+  #       NET_9P_VIRTIO? y
+  #
+  # Borrowed from copumpkin https://github.com/NixOS/nixpkgs/pull/12158
+  # returns a string, expr should be an attribute set
+  # Use mkValuePreprocess to preprocess option values, aka mark 'modules' as 'yes' or vice-versa
+  # use the identity if you don't want to override the configured values
+  generateNixKConf = exprs:
+  let
+    mkConfigLine = key: item:
+      let
+        val = if item.freeform != null then item.freeform else item.tristate;
+      in
+        optionalString (val != null)
+            (if (item.optional)
+            then "${key}? ${mkValue val}\n"
+            else "${key} ${mkValue val}\n");
+
+    mkConf = cfg: concatStrings (mapAttrsToList mkConfigLine cfg);
+  in mkConf exprs;
+
+in
+{
+
+  options = {
+
+    intermediateNixConfig = mkOption {
+      readOnly = true;
+      type = types.lines;
+      example = ''
+        USB? y
+        DEBUG n
+      '';
+      description = lib.mdDoc ''
+        The result of converting the structured kernel configuration in settings
+        to an intermediate string that can be parsed by generate-config.pl to
+        answer the kernel `make defconfig`.
+      '';
+    };
+
+    settings = mkOption {
+      type = types.attrsOf kernelItem;
+      example = literalExpression '' with lib.kernel; {
+        "9P_NET" = yes;
+        USB = option yes;
+        MMC_BLOCK_MINORS = freeform "32";
+      }'';
+      description = lib.mdDoc ''
+        Structured kernel configuration.
+      '';
+    };
+  };
+
+  config = {
+    intermediateNixConfig = generateNixKConf config.settings;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/kexec.nix b/nixpkgs/nixos/modules/system/boot/kexec.nix
new file mode 100644
index 000000000000..02c2713ede11
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/kexec.nix
@@ -0,0 +1,32 @@
+{ pkgs, lib, ... }:
+
+{
+  config = lib.mkIf (lib.meta.availableOn pkgs.stdenv.hostPlatform pkgs.kexec-tools) {
+    environment.systemPackages = [ pkgs.kexec-tools ];
+
+    systemd.services.prepare-kexec =
+      { description = "Preparation for kexec";
+        wantedBy = [ "kexec.target" ];
+        before = [ "systemd-kexec.service" ];
+        unitConfig.DefaultDependencies = false;
+        serviceConfig.Type = "oneshot";
+        path = [ pkgs.kexec-tools ];
+        script =
+          ''
+            # Don't load the current system profile if we already have a kernel loaded
+            if [[ 1 = "$(</sys/kernel/kexec_loaded)" ]] ; then
+              echo "kexec kernel has already been loaded, prepare-kexec skipped"
+              exit 0
+            fi
+
+            p=$(readlink -f /nix/var/nix/profiles/system)
+            if ! [[ -d $p ]]; then
+              echo "Could not find system profile for prepare-kexec"
+              exit 1
+            fi
+            echo "Loading NixOS system via kexec."
+            exec kexec --load $p/kernel --initrd=$p/initrd --append="$(cat $p/kernel-params) init=$p/init"
+          '';
+      };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/efi.nix b/nixpkgs/nixos/modules/system/boot/loader/efi.nix
new file mode 100644
index 000000000000..2661f362249d
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/efi.nix
@@ -0,0 +1,20 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  options.boot.loader.efi = {
+
+    canTouchEfiVariables = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc "Whether the installation process is allowed to modify EFI boot variables.";
+    };
+
+    efiSysMountPoint = mkOption {
+      default = "/boot";
+      type = types.str;
+      description = lib.mdDoc "Where the EFI System Partition is mounted.";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/external/external.md b/nixpkgs/nixos/modules/system/boot/loader/external/external.md
new file mode 100644
index 000000000000..4f5b559dfc40
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/external/external.md
@@ -0,0 +1,26 @@
+# External Bootloader Backends {#sec-bootloader-external}
+
+NixOS has support for several bootloader backends by default: systemd-boot, grub, uboot, etc.
+The built-in bootloader backend support is generic and supports most use cases.
+Some users may prefer to create advanced workflows around managing the bootloader and bootable entries.
+
+You can replace the built-in bootloader support with your own tooling using the "external" bootloader option.
+
+Imagine you have created a new package called FooBoot.
+FooBoot provides a program at `${pkgs.fooboot}/bin/fooboot-install` which takes the system closure's path as its only argument and configures the system's bootloader.
+
+You can enable FooBoot like this:
+
+```nix
+{ pkgs, ... }: {
+  boot.loader.external = {
+    enable = true;
+    installHook = "${pkgs.fooboot}/bin/fooboot-install";
+  };
+}
+```
+
+## Developing Custom Bootloader Backends {#sec-bootloader-external-developing}
+
+Bootloaders should use [RFC-0125](https://github.com/NixOS/rfcs/pull/125)'s Bootspec format and synthesis tools to identify the key properties for bootable system generations.
+
diff --git a/nixpkgs/nixos/modules/system/boot/loader/external/external.nix b/nixpkgs/nixos/modules/system/boot/loader/external/external.nix
new file mode 100644
index 000000000000..78982356a9ea
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/external/external.nix
@@ -0,0 +1,36 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.boot.loader.external;
+in
+{
+  meta = {
+    maintainers = with maintainers; [ cole-h grahamc raitobezarius ];
+    doc = ./external.md;
+  };
+
+  options.boot.loader.external = {
+    enable = mkEnableOption (lib.mdDoc "using an external tool to install your bootloader");
+
+    installHook = mkOption {
+      type = with types; path;
+      description = lib.mdDoc ''
+        The full path to a program of your choosing which performs the bootloader installation process.
+
+        The program will be called with an argument pointing to the output of the system's toplevel.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.loader = {
+      grub.enable = mkDefault false;
+      systemd-boot.enable = mkDefault false;
+      supportsInitrdSecrets = mkDefault false;
+    };
+
+    system.build.installBootLoader = cfg.installHook;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir-builder.sh b/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir-builder.sh
new file mode 100644
index 000000000000..8ae23dc988c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir-builder.sh
@@ -0,0 +1,106 @@
+#! @bash@/bin/sh -e
+
+shopt -s nullglob
+
+export PATH=/empty
+for i in @path@; do PATH=$PATH:$i/bin; done
+
+default=$1
+if test -z "$1"; then
+    echo "Syntax: generations-dir-builder.sh <DEFAULT-CONFIG>"
+    exit 1
+fi
+
+echo "updating the boot generations directory..."
+
+mkdir -p /boot
+
+rm -Rf /boot/system* || true
+
+target=/boot/grub/menu.lst
+tmp=$target.tmp
+
+# Convert a path to a file in the Nix store such as
+# /nix/store/<hash>-<name>/file to <hash>-<name>-<file>.
+cleanName() {
+    local path="$1"
+    echo "$path" | sed 's|^/nix/store/||' | sed 's|/|-|g'
+}
+
+# Copy a file from the Nix store to /boot/kernels.
+declare -A filesCopied
+
+copyToKernelsDir() {
+    local src="$1"
+    local dst="/boot/kernels/$(cleanName $src)"
+    # Don't copy the file if $dst already exists.  This means that we
+    # have to create $dst atomically to prevent partially copied
+    # kernels or initrd if this script is ever interrupted.
+    if ! test -e $dst; then
+        local dstTmp=$dst.tmp.$$
+        cp $src $dstTmp
+        mv $dstTmp $dst
+    fi
+    filesCopied[$dst]=1
+    result=$dst
+}
+
+
+# Copy its kernel and initrd to /boot/kernels.
+addEntry() {
+    local path="$1"
+    local generation="$2"
+    local outdir=/boot/system-$generation
+
+    if ! test -e $path/kernel -a -e $path/initrd; then
+        return
+    fi
+
+    local kernel=$(readlink -f $path/kernel)
+    local initrd=$(readlink -f $path/initrd)
+
+    if test -n "@copyKernels@"; then
+        copyToKernelsDir $kernel; kernel=$result
+        copyToKernelsDir $initrd; initrd=$result
+    fi
+
+    mkdir -p $outdir
+    ln -sf $(readlink -f $path) $outdir/system
+    ln -sf $(readlink -f $path/init) $outdir/init
+    ln -sf $initrd $outdir/initrd
+    ln -sf $kernel $outdir/kernel
+
+    if test $(readlink -f "$path") = "$default"; then
+      cp "$kernel" /boot/nixos-kernel
+      cp "$initrd" /boot/nixos-initrd
+      cp "$(readlink -f "$path/init")" /boot/nixos-init
+
+      mkdir -p /boot/default
+      # ln -sfT: overrides target even if it exists.
+      ln -sfT $(readlink -f $path) /boot/default/system
+      ln -sfT $(readlink -f $path/init) /boot/default/init
+      ln -sfT $initrd /boot/default/initrd
+      ln -sfT $kernel /boot/default/kernel
+    fi
+}
+
+if test -n "@copyKernels@"; then
+    mkdir -p /boot/kernels
+fi
+
+# Add all generations of the system profile to the menu, in reverse
+# (most recent to least recent) order.
+for generation in $(
+    (cd /nix/var/nix/profiles && ls -d system-*-link) \
+    | sed 's/system-\([0-9]\+\)-link/\1/' \
+    | sort -n -r); do
+    link=/nix/var/nix/profiles/system-$generation-link
+    addEntry $link $generation
+done
+
+# Remove obsolete files from /boot/kernels.
+for fn in /boot/kernels/*; do
+    if ! test "${filesCopied[$fn]}" = 1; then
+        rm -vf -- "$fn"
+    fi
+done
diff --git a/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir.nix b/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir.nix
new file mode 100644
index 000000000000..5ace5dd06fd4
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/generations-dir/generations-dir.nix
@@ -0,0 +1,62 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  generationsDirBuilder = pkgs.substituteAll {
+    src = ./generations-dir-builder.sh;
+    isExecutable = true;
+    inherit (pkgs) bash;
+    path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+    inherit (config.boot.loader.generationsDir) copyKernels;
+  };
+
+in
+
+{
+  options = {
+
+    boot.loader.generationsDir = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to create symlinks to the system generations under
+          `/boot`.  When enabled,
+          `/boot/default/kernel`,
+          `/boot/default/initrd`, etc., are updated to
+          point to the current generation's kernel image, initial RAM
+          disk, and other bootstrap files.
+
+          This optional is not necessary with boot loaders such as GNU GRUB
+          for which the menu is updated to point to the latest bootstrap
+          files.  However, it is needed for U-Boot on platforms where the
+          boot command line is stored in flash memory rather than in a
+          menu file.
+        '';
+      };
+
+      copyKernels = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether copy the necessary boot files into /boot, so
+          /nix/store is not needed by the boot loader.
+        '';
+      };
+
+    };
+
+  };
+
+
+  config = mkIf config.boot.loader.generationsDir.enable {
+
+    system.build.installBootLoader = generationsDirBuilder;
+    system.boot.loader.id = "generationsDir";
+    system.boot.loader.kernelFile = pkgs.stdenv.hostPlatform.linux-kernel.target;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/default.nix b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/default.nix
new file mode 100644
index 000000000000..13df60907116
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/default.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  blCfg = config.boot.loader;
+  dtCfg = config.hardware.deviceTree;
+  cfg = blCfg.generic-extlinux-compatible;
+
+  timeoutStr = if blCfg.timeout == null then "-1" else toString blCfg.timeout;
+
+  # The builder used to write during system activation
+  builder = import ./extlinux-conf-builder.nix { inherit pkgs; };
+  # The builder exposed in populateCmd, which runs on the build architecture
+  populateBuilder = import ./extlinux-conf-builder.nix { pkgs = pkgs.buildPackages; };
+in
+{
+  options = {
+    boot.loader.generic-extlinux-compatible = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to generate an extlinux-compatible configuration file
+          under `/boot/extlinux.conf`.  For instance,
+          U-Boot's generic distro boot support uses this file format.
+
+          See [U-boot's documentation](https://u-boot.readthedocs.io/en/latest/develop/distro.html)
+          for more information.
+        '';
+      };
+
+      useGenerationDeviceTree = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to generate Device Tree-related directives in the
+          extlinux configuration.
+
+          When enabled, the bootloader will attempt to load the device
+          tree binaries from the generation's kernel.
+
+          Note that this affects all generations, regardless of the
+          setting value used in their configurations.
+        '';
+      };
+
+      configurationLimit = mkOption {
+        default = 20;
+        example = 10;
+        type = types.int;
+        description = lib.mdDoc ''
+          Maximum number of configurations in the boot menu.
+        '';
+      };
+
+      populateCmd = mkOption {
+        type = types.str;
+        readOnly = true;
+        description = lib.mdDoc ''
+          Contains the builder command used to populate an image,
+          honoring all options except the `-c <path-to-default-configuration>`
+          argument.
+          Useful to have for sdImage.populateRootCommands
+        '';
+      };
+
+    };
+  };
+
+  config = let
+    builderArgs = "-g ${toString cfg.configurationLimit} -t ${timeoutStr}"
+      + lib.optionalString (dtCfg.name != null) " -n ${dtCfg.name}"
+      + lib.optionalString (!cfg.useGenerationDeviceTree) " -r";
+  in
+    mkIf cfg.enable {
+      system.build.installBootLoader = "${builder} ${builderArgs} -c";
+      system.boot.loader.id = "generic-extlinux-compatible";
+
+      boot.loader.generic-extlinux-compatible.populateCmd = "${populateBuilder} ${builderArgs}";
+    };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix
new file mode 100644
index 000000000000..576a07c1d272
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.nix
@@ -0,0 +1,8 @@
+{ pkgs }:
+
+pkgs.substituteAll {
+  src = ./extlinux-conf-builder.sh;
+  isExecutable = true;
+  path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+  inherit (pkgs) bash;
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh
new file mode 100644
index 000000000000..1a0da0050291
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/generic-extlinux-compatible/extlinux-conf-builder.sh
@@ -0,0 +1,157 @@
+#! @bash@/bin/sh -e
+
+shopt -s nullglob
+
+export PATH=/empty
+for i in @path@; do PATH=$PATH:$i/bin; done
+
+usage() {
+    echo "usage: $0 -t <timeout> -c <path-to-default-configuration> [-d <boot-dir>] [-g <num-generations>] [-n <dtbName>] [-r]" >&2
+    exit 1
+}
+
+timeout=                # Timeout in centiseconds
+default=                # Default configuration
+target=/boot            # Target directory
+numGenerations=0        # Number of other generations to include in the menu
+
+while getopts "t:c:d:g:n:r" opt; do
+    case "$opt" in
+        t) # U-Boot interprets '0' as infinite and negative as instant boot
+            if [ "$OPTARG" -lt 0 ]; then
+                timeout=0
+            elif [ "$OPTARG" = 0 ]; then
+                timeout=-10
+            else
+                timeout=$((OPTARG * 10))
+            fi
+            ;;
+        c) default="$OPTARG" ;;
+        d) target="$OPTARG" ;;
+        g) numGenerations="$OPTARG" ;;
+        n) dtbName="$OPTARG" ;;
+        r) noDeviceTree=1 ;;
+        \?) usage ;;
+    esac
+done
+
+[ "$timeout" = "" -o "$default" = "" ] && usage
+
+mkdir -p $target/nixos
+mkdir -p $target/extlinux
+
+# Convert a path to a file in the Nix store such as
+# /nix/store/<hash>-<name>/file to <hash>-<name>-<file>.
+cleanName() {
+    local path="$1"
+    echo "$path" | sed 's|^/nix/store/||' | sed 's|/|-|g'
+}
+
+# Copy a file from the Nix store to $target/nixos.
+declare -A filesCopied
+
+copyToKernelsDir() {
+    local src=$(readlink -f "$1")
+    local dst="$target/nixos/$(cleanName $src)"
+    # Don't copy the file if $dst already exists.  This means that we
+    # have to create $dst atomically to prevent partially copied
+    # kernels or initrd if this script is ever interrupted.
+    if ! test -e $dst; then
+        local dstTmp=$dst.tmp.$$
+        cp -r $src $dstTmp
+        mv $dstTmp $dst
+    fi
+    filesCopied[$dst]=1
+    result=$dst
+}
+
+# Copy its kernel, initrd and dtbs to $target/nixos, and echo out an
+# extlinux menu entry
+addEntry() {
+    local path=$(readlink -f "$1")
+    local tag="$2" # Generation number or 'default'
+
+    if ! test -e $path/kernel -a -e $path/initrd; then
+        return
+    fi
+
+    copyToKernelsDir "$path/kernel"; kernel=$result
+    copyToKernelsDir "$path/initrd"; initrd=$result
+    dtbDir=$(readlink -m "$path/dtbs")
+    if [ -e "$dtbDir" ]; then
+        copyToKernelsDir "$dtbDir"; dtbs=$result
+    fi
+
+    timestampEpoch=$(stat -L -c '%Z' $path)
+
+    timestamp=$(date "+%Y-%m-%d %H:%M" -d @$timestampEpoch)
+    nixosLabel="$(cat $path/nixos-version)"
+    extraParams="$(cat $path/kernel-params)"
+
+    echo
+    echo "LABEL nixos-$tag"
+    if [ "$tag" = "default" ]; then
+        echo "  MENU LABEL NixOS - Default"
+    else
+        echo "  MENU LABEL NixOS - Configuration $tag ($timestamp - $nixosLabel)"
+    fi
+    echo "  LINUX ../nixos/$(basename $kernel)"
+    echo "  INITRD ../nixos/$(basename $initrd)"
+    echo "  APPEND init=$path/init $extraParams"
+
+    if [ -n "$noDeviceTree" ]; then
+        return
+    fi
+
+    if [ -d "$dtbDir" ]; then
+        # if a dtbName was specified explicitly, use that, else use FDTDIR
+        if [ -n "$dtbName" ]; then
+            echo "  FDT ../nixos/$(basename $dtbs)/${dtbName}"
+        else
+            echo "  FDTDIR ../nixos/$(basename $dtbs)"
+        fi
+    else
+        if [ -n "$dtbName" ]; then
+            echo "Explicitly requested dtbName $dtbName, but there's no FDTDIR - bailing out." >&2
+            exit 1
+        fi
+    fi
+}
+
+tmpFile="$target/extlinux/extlinux.conf.tmp.$$"
+
+cat > $tmpFile <<EOF
+# Generated file, all changes will be lost on nixos-rebuild!
+
+# Change this to e.g. nixos-42 to temporarily boot to an older configuration.
+DEFAULT nixos-default
+
+MENU TITLE ------------------------------------------------------------
+TIMEOUT $timeout
+EOF
+
+addEntry $default default >> $tmpFile
+
+if [ "$numGenerations" -gt 0 ]; then
+    # Add up to $numGenerations generations of the system profile to the menu,
+    # in reverse (most recent to least recent) order.
+    for generation in $(
+            (cd /nix/var/nix/profiles && ls -d system-*-link) \
+            | sed 's/system-\([0-9]\+\)-link/\1/' \
+            | sort -n -r \
+            | head -n $numGenerations); do
+        link=/nix/var/nix/profiles/system-$generation-link
+        addEntry $link $generation
+    done >> $tmpFile
+fi
+
+mv -f $tmpFile $target/extlinux/extlinux.conf
+
+# Remove obsolete files from $target/nixos.
+for fn in $target/nixos/*; do
+    if ! test "${filesCopied[$fn]}" = 1; then
+        echo "Removing no longer needed boot file: $fn"
+        chmod +w -- "$fn"
+        rm -rf -- "$fn"
+    fi
+done
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix b/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix
new file mode 100644
index 000000000000..7097e1d83dca
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/grub.nix
@@ -0,0 +1,834 @@
+{ config, options, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    all
+    concatMap
+    concatMapStrings
+    concatStrings
+    concatStringsSep
+    escapeShellArg
+    flip
+    foldr
+    forEach
+    hasPrefix
+    mapAttrsToList
+    literalExpression
+    makeBinPath
+    mkDefault
+    mkIf
+    mkMerge
+    mkOption
+    mkRemovedOptionModule
+    mkRenamedOptionModule
+    optional
+    optionals
+    optionalString
+    replaceStrings
+    types
+  ;
+
+  cfg = config.boot.loader.grub;
+
+  efi = config.boot.loader.efi;
+
+  grubPkgs =
+    # Package set of targeted architecture
+    if cfg.forcei686 then pkgs.pkgsi686Linux else pkgs;
+
+  realGrub = if cfg.zfsSupport then grubPkgs.grub2.override { zfsSupport = true; }
+    else grubPkgs.grub2;
+
+  grub =
+    # Don't include GRUB if we're only generating a GRUB menu (e.g.,
+    # in EC2 instances).
+    if cfg.devices == ["nodev"]
+    then null
+    else realGrub;
+
+  grubEfi =
+    if cfg.efiSupport
+    then realGrub.override { efiSupport = cfg.efiSupport; }
+    else null;
+
+  f = x: optionalString (x != null) ("" + x);
+
+  grubConfig = args:
+    let
+      efiSysMountPoint = if args.efiSysMountPoint == null then args.path else args.efiSysMountPoint;
+      efiSysMountPoint' = replaceStrings [ "/" ] [ "-" ] efiSysMountPoint;
+    in
+    pkgs.writeText "grub-config.xml" (builtins.toXML
+    { splashImage = f cfg.splashImage;
+      splashMode = f cfg.splashMode;
+      backgroundColor = f cfg.backgroundColor;
+      entryOptions = f cfg.entryOptions;
+      subEntryOptions = f cfg.subEntryOptions;
+      # PC platforms (like x86_64-linux) have a non-EFI target (`grubTarget`), but other platforms
+      # (like aarch64-linux) have an undefined `grubTarget`. Avoid providing the path to a non-EFI
+      # GRUB on those platforms.
+      grub = f (if (grub.grubTarget or "") != "" then grub else "");
+      grubTarget = f (grub.grubTarget or "");
+      shell = "${pkgs.runtimeShell}";
+      fullName = lib.getName realGrub;
+      fullVersion = lib.getVersion realGrub;
+      grubEfi = f grubEfi;
+      grubTargetEfi = optionalString cfg.efiSupport (f (grubEfi.grubTarget or ""));
+      bootPath = args.path;
+      storePath = config.boot.loader.grub.storePath;
+      bootloaderId = if args.efiBootloaderId == null then "${config.system.nixos.distroName}${efiSysMountPoint'}" else args.efiBootloaderId;
+      timeout = if config.boot.loader.timeout == null then -1 else config.boot.loader.timeout;
+      theme = f cfg.theme;
+      inherit efiSysMountPoint;
+      inherit (args) devices;
+      inherit (efi) canTouchEfiVariables;
+      inherit (cfg)
+        extraConfig extraPerEntryConfig extraEntries forceInstall useOSProber
+        extraGrubInstallArgs
+        extraEntriesBeforeNixOS extraPrepareConfig configurationLimit copyKernels
+        default fsIdentifier efiSupport efiInstallAsRemovable gfxmodeEfi gfxmodeBios gfxpayloadEfi gfxpayloadBios
+        users
+        timeoutStyle
+      ;
+      path = with pkgs; makeBinPath (
+        [ coreutils gnused gnugrep findutils diffutils btrfs-progs util-linux mdadm ]
+        ++ optional cfg.efiSupport efibootmgr
+        ++ optionals cfg.useOSProber [ busybox os-prober ]);
+      font = lib.optionalString (cfg.font != null) (
+             if lib.last (lib.splitString "." cfg.font) == "pf2"
+             then cfg.font
+             else "${convertedFont}");
+    });
+
+  bootDeviceCounters = foldr (device: attr: attr // { ${device} = (attr.${device} or 0) + 1; }) {}
+    (concatMap (args: args.devices) cfg.mirroredBoots);
+
+  convertedFont = (pkgs.runCommand "grub-font-converted.pf2" {}
+           (builtins.concatStringsSep " "
+             ([ "${realGrub}/bin/grub-mkfont"
+               cfg.font
+               "--output" "$out"
+             ] ++ (optional (cfg.fontSize!=null) "--size ${toString cfg.fontSize}")))
+         );
+
+  defaultSplash = pkgs.nixos-artwork.wallpapers.simple-dark-gray-bootloader.gnomeFilePath;
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    boot.loader.grub = {
+
+      enable = mkOption {
+        default = !config.boot.isContainer;
+        defaultText = literalExpression "!config.boot.isContainer";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to enable the GNU GRUB boot loader.
+        '';
+      };
+
+      version = mkOption {
+        visible = false;
+        type = types.int;
+      };
+
+      device = mkOption {
+        default = "";
+        example = "/dev/disk/by-id/wwn-0x500001234567890a";
+        type = types.str;
+        description = lib.mdDoc ''
+          The device on which the GRUB boot loader will be installed.
+          The special value `nodev` means that a GRUB
+          boot menu will be generated, but GRUB itself will not
+          actually be installed.  To install GRUB on multiple devices,
+          use `boot.loader.grub.devices`.
+        '';
+      };
+
+      devices = mkOption {
+        default = [];
+        example = [ "/dev/disk/by-id/wwn-0x500001234567890a" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The devices on which the boot loader, GRUB, will be
+          installed. Can be used instead of `device` to
+          install GRUB onto multiple devices.
+        '';
+      };
+
+      users = mkOption {
+        default = {};
+        example = {
+          root = { hashedPasswordFile = "/path/to/file"; };
+        };
+        description = lib.mdDoc ''
+          User accounts for GRUB. When specified, the GRUB command line and
+          all boot options except the default are password-protected.
+          All passwords and hashes provided will be stored in /boot/grub/grub.cfg,
+          and will be visible to any local user who can read this file. Additionally,
+          any passwords and hashes provided directly in a Nix configuration
+          (as opposed to external files) will be copied into the Nix store, and
+          will be visible to all local users.
+        '';
+        type = types.attrsOf (types.submodule {
+          options = {
+            hashedPasswordFile = mkOption {
+              example = "/path/to/file";
+              default = null;
+              type = with types; uniq (nullOr str);
+              description = lib.mdDoc ''
+                Specifies the path to a file containing the password hash
+                for the account, generated with grub-mkpasswd-pbkdf2.
+                This hash will be stored in /boot/grub/grub.cfg, and will
+                be visible to any local user who can read this file.
+              '';
+            };
+            hashedPassword = mkOption {
+              example = "grub.pbkdf2.sha512.10000.674DFFDEF76E13EA...2CC972B102CF4355";
+              default = null;
+              type = with types; uniq (nullOr str);
+              description = lib.mdDoc ''
+                Specifies the password hash for the account,
+                generated with grub-mkpasswd-pbkdf2.
+                This hash will be copied to the Nix store, and will be visible to all local users.
+              '';
+            };
+            passwordFile = mkOption {
+              example = "/path/to/file";
+              default = null;
+              type = with types; uniq (nullOr str);
+              description = lib.mdDoc ''
+                Specifies the path to a file containing the
+                clear text password for the account.
+                This password will be stored in /boot/grub/grub.cfg, and will
+                be visible to any local user who can read this file.
+              '';
+            };
+            password = mkOption {
+              example = "Pa$$w0rd!";
+              default = null;
+              type = with types; uniq (nullOr str);
+              description = lib.mdDoc ''
+                Specifies the clear text password for the account.
+                This password will be copied to the Nix store, and will be visible to all local users.
+              '';
+            };
+          };
+        });
+      };
+
+      mirroredBoots = mkOption {
+        default = [ ];
+        example = [
+          { path = "/boot1"; devices = [ "/dev/disk/by-id/wwn-0x500001234567890a" ]; }
+          { path = "/boot2"; devices = [ "/dev/disk/by-id/wwn-0x500009876543210a" ]; }
+        ];
+        description = lib.mdDoc ''
+          Mirror the boot configuration to multiple partitions and install grub
+          to the respective devices corresponding to those partitions.
+        '';
+
+        type = with types; listOf (submodule {
+          options = {
+
+            path = mkOption {
+              example = "/boot1";
+              type = types.str;
+              description = lib.mdDoc ''
+                The path to the boot directory where GRUB will be written. Generally
+                this boot path should double as an EFI path.
+              '';
+            };
+
+            efiSysMountPoint = mkOption {
+              default = null;
+              example = "/boot1/efi";
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The path to the efi system mount point. Usually this is the same
+                partition as the above path and can be left as null.
+              '';
+            };
+
+            efiBootloaderId = mkOption {
+              default = null;
+              example = "NixOS-fsid";
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                The id of the bootloader to store in efi nvram.
+                The default is to name it NixOS and append the path or efiSysMountPoint.
+                This is only used if `boot.loader.efi.canTouchEfiVariables` is true.
+              '';
+            };
+
+            devices = mkOption {
+              default = [ ];
+              example = [ "/dev/disk/by-id/wwn-0x500001234567890a" "/dev/disk/by-id/wwn-0x500009876543210a" ];
+              type = types.listOf types.str;
+              description = lib.mdDoc ''
+                The path to the devices which will have the GRUB MBR written.
+                Note these are typically device paths and not paths to partitions.
+              '';
+            };
+
+          };
+        });
+      };
+
+      configurationName = mkOption {
+        default = "";
+        example = "Stable 2.6.21";
+        type = types.str;
+        description = lib.mdDoc ''
+          GRUB entry name instead of default.
+        '';
+      };
+
+      storePath = mkOption {
+        default = "/nix/store";
+        type = types.str;
+        description = lib.mdDoc ''
+          Path to the Nix store when looking for kernels at boot.
+          Only makes sense when copyKernels is false.
+        '';
+      };
+
+      extraPrepareConfig = mkOption {
+        default = "";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional bash commands to be run at the script that
+          prepares the GRUB menu entries.
+        '';
+      };
+
+      extraConfig = mkOption {
+        default = "";
+        example = ''
+          serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+          terminal_input --append serial
+          terminal_output --append serial
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional GRUB commands inserted in the configuration file
+          just before the menu entries.
+        '';
+      };
+
+      extraGrubInstallArgs = mkOption {
+        default = [ ];
+        example = [ "--modules=nativedisk ahci pata part_gpt part_msdos diskfilter mdraid1x lvm ext2" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Additional arguments passed to `grub-install`.
+
+          A use case for this is to build specific GRUB2 modules
+          directly into the GRUB2 kernel image, so that they are available
+          and activated even in the `grub rescue` shell.
+
+          They are also necessary when the BIOS/UEFI is bugged and cannot
+          correctly read large disks (e.g. above 2 TB), so GRUB2's own
+          `nativedisk` and related modules can be used
+          to use its own disk drivers. The example shows one such case.
+          This is also useful for booting from USB.
+          See the
+          [
+          GRUB source code
+          ](https://git.savannah.gnu.org/cgit/grub.git/tree/grub-core/commands/nativedisk.c?h=grub-2.04#n326)
+          for which disk modules are available.
+
+          The list elements are passed directly as `argv`
+          arguments to the `grub-install` program, in order.
+        '';
+      };
+
+      extraInstallCommands = mkOption {
+        default = "";
+        example = ''
+          # the example below generates detached signatures that GRUB can verify
+          # https://www.gnu.org/software/grub/manual/grub/grub.html#Using-digital-signatures
+          ''${pkgs.findutils}/bin/find /boot -not -path "/boot/efi/*" -type f -name '*.sig' -delete
+          old_gpg_home=$GNUPGHOME
+          export GNUPGHOME="$(mktemp -d)"
+          ''${pkgs.gnupg}/bin/gpg --import ''${priv_key} > /dev/null 2>&1
+          ''${pkgs.findutils}/bin/find /boot -not -path "/boot/efi/*" -type f -exec ''${pkgs.gnupg}/bin/gpg --detach-sign "{}" \; > /dev/null 2>&1
+          rm -rf $GNUPGHOME
+          export GNUPGHOME=$old_gpg_home
+        '';
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional shell commands inserted in the bootloader installer
+          script after generating menu entries.
+        '';
+      };
+
+      extraPerEntryConfig = mkOption {
+        default = "";
+        example = "root (hd0)";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Additional GRUB commands inserted in the configuration file
+          at the start of each NixOS menu entry.
+        '';
+      };
+
+      extraEntries = mkOption {
+        default = "";
+        type = types.lines;
+        example = ''
+          # GRUB 2 example
+          menuentry "Windows 7" {
+            chainloader (hd0,4)+1
+          }
+
+          # GRUB 2 with UEFI example, chainloading another distro
+          menuentry "Fedora" {
+            set root=(hd1,1)
+            chainloader /efi/fedora/grubx64.efi
+          }
+        '';
+        description = lib.mdDoc ''
+          Any additional entries you want added to the GRUB boot menu.
+        '';
+      };
+
+      extraEntriesBeforeNixOS = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether extraEntries are included before the default option.
+        '';
+      };
+
+      extraFiles = mkOption {
+        type = types.attrsOf types.path;
+        default = {};
+        example = literalExpression ''
+          { "memtest.bin" = "''${pkgs.memtest86plus}/memtest.bin"; }
+        '';
+        description = lib.mdDoc ''
+          A set of files to be copied to {file}`/boot`.
+          Each attribute name denotes the destination file name in
+          {file}`/boot`, while the corresponding
+          attribute value specifies the source file.
+        '';
+      };
+
+      useOSProber = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set to true, append entries for other OSs detected by os-prober.
+        '';
+      };
+
+      splashImage = mkOption {
+        type = types.nullOr types.path;
+        example = literalExpression "./my-background.png";
+        description = lib.mdDoc ''
+          Background image used for GRUB.
+          Set to `null` to run GRUB in text mode.
+
+          ::: {.note}
+          File must be one of .png, .tga, .jpg, or .jpeg. JPEG images must
+          not be progressive.
+          The image will be scaled if necessary to fit the screen.
+          :::
+        '';
+      };
+
+      backgroundColor = mkOption {
+        type = types.nullOr types.str;
+        example = "#7EBAE4";
+        default = null;
+        description = lib.mdDoc ''
+          Background color to be used for GRUB to fill the areas the image isn't filling.
+        '';
+      };
+
+      timeoutStyle = mkOption {
+        default = "menu";
+        type = types.enum [ "menu" "countdown" "hidden" ];
+        description = lib.mdDoc ''
+           - `menu` shows the menu.
+           - `countdown` uses a text-mode countdown.
+           - `hidden` hides GRUB entirely.
+
+          When using a theme, the default value (`menu`) is appropriate for the graphical countdown.
+
+          When attempting to do flicker-free boot, `hidden` should be used.
+
+          See the [GRUB documentation section about `timeout_style`](https://www.gnu.org/software/grub/manual/grub/html_node/timeout.html).
+
+          ::: {.note}
+          If this option is set to ‘countdown’ or ‘hidden’ [...] and ESC or F4 are pressed, or SHIFT is held down during that time, it will display the menu and wait for input.
+          :::
+
+          From: [Simple configuration handling page, under GRUB_TIMEOUT_STYLE](https://www.gnu.org/software/grub/manual/grub/html_node/Simple-configuration.html).
+        '';
+      };
+
+      entryOptions = mkOption {
+        default = "--class nixos --unrestricted";
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Options applied to the primary NixOS menu entry.
+        '';
+      };
+
+      subEntryOptions = mkOption {
+        default = "--class nixos";
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Options applied to the secondary NixOS submenu entry.
+        '';
+      };
+
+      theme = mkOption {
+        type = types.nullOr types.path;
+        example = literalExpression "pkgs.nixos-grub2-theme";
+        default = null;
+        description = lib.mdDoc ''
+          Grub theme to be used.
+        '';
+      };
+
+      splashMode = mkOption {
+        type = types.enum [ "normal" "stretch" ];
+        default = "stretch";
+        description = lib.mdDoc ''
+          Whether to stretch the image or show the image in the top-left corner unstretched.
+        '';
+      };
+
+      font = mkOption {
+        type = types.nullOr types.path;
+        default = "${realGrub}/share/grub/unicode.pf2";
+        defaultText = literalExpression ''"''${pkgs.grub2}/share/grub/unicode.pf2"'';
+        description = lib.mdDoc ''
+          Path to a TrueType, OpenType, or pf2 font to be used by Grub.
+        '';
+      };
+
+      fontSize = mkOption {
+        type = types.nullOr types.int;
+        example = 16;
+        default = null;
+        description = lib.mdDoc ''
+          Font size for the grub menu. Ignored unless `font`
+          is set to a ttf or otf font.
+        '';
+      };
+
+      gfxmodeEfi = mkOption {
+        default = "auto";
+        example = "1024x768";
+        type = types.str;
+        description = lib.mdDoc ''
+          The gfxmode to pass to GRUB when loading a graphical boot interface under EFI.
+        '';
+      };
+
+      gfxmodeBios = mkOption {
+        default = "1024x768";
+        example = "auto";
+        type = types.str;
+        description = lib.mdDoc ''
+          The gfxmode to pass to GRUB when loading a graphical boot interface under BIOS.
+        '';
+      };
+
+      gfxpayloadEfi = mkOption {
+        default = "keep";
+        example = "text";
+        type = types.str;
+        description = lib.mdDoc ''
+          The gfxpayload to pass to GRUB when loading a graphical boot interface under EFI.
+        '';
+      };
+
+      gfxpayloadBios = mkOption {
+        default = "text";
+        example = "keep";
+        type = types.str;
+        description = lib.mdDoc ''
+          The gfxpayload to pass to GRUB when loading a graphical boot interface under BIOS.
+        '';
+      };
+
+      configurationLimit = mkOption {
+        default = 100;
+        example = 120;
+        type = types.int;
+        description = lib.mdDoc ''
+          Maximum of configurations in boot menu. GRUB has problems when
+          there are too many entries.
+        '';
+      };
+
+      copyKernels = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether the GRUB menu builder should copy kernels and initial
+          ramdisks to /boot.  This is done automatically if /boot is
+          on a different partition than /.
+        '';
+      };
+
+      default = mkOption {
+        default = "0";
+        type = types.either types.int types.str;
+        apply = toString;
+        description = lib.mdDoc ''
+          Index of the default menu item to be booted.
+          Can also be set to "saved", which will make GRUB select
+          the menu item that was used at the last boot.
+        '';
+      };
+
+      fsIdentifier = mkOption {
+        default = "uuid";
+        type = types.enum [ "uuid" "label" "provided" ];
+        description = lib.mdDoc ''
+          Determines how GRUB will identify devices when generating the
+          configuration file. A value of uuid / label signifies that grub
+          will always resolve the uuid or label of the device before using
+          it in the configuration. A value of provided means that GRUB will
+          use the device name as show in {command}`df` or
+          {command}`mount`. Note, zfs zpools / datasets are ignored
+          and will always be mounted using their labels.
+        '';
+      };
+
+      zfsSupport = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether GRUB should be built against libzfs.
+        '';
+      };
+
+      efiSupport = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether GRUB should be built with EFI support.
+        '';
+      };
+
+      efiInstallAsRemovable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to invoke `grub-install` with
+          `--removable`.
+
+          Unless you turn this on, GRUB will install itself somewhere in
+          `boot.loader.efi.efiSysMountPoint` (exactly where
+          depends on other config variables). If you've set
+          `boot.loader.efi.canTouchEfiVariables` *AND* you
+          are currently booted in UEFI mode, then GRUB will use
+          `efibootmgr` to modify the boot order in the
+          EFI variables of your firmware to include this location. If you are
+          *not* booted in UEFI mode at the time GRUB is being installed, the
+          NVRAM will not be modified, and your system will not find GRUB at
+          boot time. However, GRUB will still return success so you may miss
+          the warning that gets printed ("`efibootmgr: EFI variables
+          are not supported on this system.`").
+
+          If you turn this feature on, GRUB will install itself in a
+          special location within `efiSysMountPoint` (namely
+          `EFI/boot/boot$arch.efi`) which the firmwares
+          are hardcoded to try first, regardless of NVRAM EFI variables.
+
+          To summarize, turn this on if:
+          - You are installing NixOS and want it to boot in UEFI mode,
+            but you are currently booted in legacy mode
+          - You want to make a drive that will boot regardless of
+            the NVRAM state of the computer (like a USB "removable" drive)
+          - You simply dislike the idea of depending on NVRAM
+            state to make your drive bootable
+        '';
+      };
+
+      enableCryptodisk = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable support for encrypted partitions. GRUB should automatically
+          unlock the correct encrypted partition and look for filesystems.
+        '';
+      };
+
+      forceInstall = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to try and forcibly install GRUB even if problems are
+          detected. It is not recommended to enable this unless you know what
+          you are doing.
+        '';
+      };
+
+      forcei686 = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to force the use of a ia32 boot loader on x64 systems. Required
+          to install and run NixOS on 64bit x86 systems with 32bit (U)EFI.
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkMerge [
+
+    { boot.loader.grub.splashImage = mkDefault defaultSplash; }
+
+    (mkIf (cfg.splashImage == defaultSplash) {
+      boot.loader.grub.backgroundColor = mkDefault "#2F302F";
+      boot.loader.grub.splashMode = mkDefault "normal";
+    })
+
+    (mkIf cfg.enable {
+
+      boot.loader.grub.devices = optional (cfg.device != "") cfg.device;
+
+      boot.loader.grub.mirroredBoots = optionals (cfg.devices != [ ]) [
+        { path = "/boot"; inherit (cfg) devices; inherit (efi) efiSysMountPoint; }
+      ];
+
+      boot.loader.supportsInitrdSecrets = true;
+
+      system.systemBuilderArgs.configurationName = cfg.configurationName;
+      system.systemBuilderCommands = ''
+        echo -n "$configurationName" > $out/configuration-name
+      '';
+
+      system.build.installBootLoader =
+        let
+          install-grub-pl = pkgs.substituteAll {
+            src = ./install-grub.pl;
+            utillinux = pkgs.util-linux;
+            btrfsprogs = pkgs.btrfs-progs;
+            inherit (config.system.nixos) distroName;
+          };
+          perl = pkgs.perl.withPackages (p: with p; [
+            FileSlurp FileCopyRecursive
+            XMLLibXML XMLSAX XMLSAXBase
+            ListCompare JSON
+          ]);
+        in pkgs.writeScript "install-grub.sh" (''
+        #!${pkgs.runtimeShell}
+        set -e
+        ${optionalString cfg.enableCryptodisk "export GRUB_ENABLE_CRYPTODISK=y"}
+      '' + flip concatMapStrings cfg.mirroredBoots (args: ''
+        ${perl}/bin/perl ${install-grub-pl} ${grubConfig args} $@
+      '') + cfg.extraInstallCommands);
+
+      system.build.grub = grub;
+
+      # Common attribute for boot loaders so only one of them can be
+      # set at once.
+      system.boot.loader.id = "grub";
+
+      environment.systemPackages = optional (grub != null) grub;
+
+      boot.loader.grub.extraPrepareConfig =
+        concatStrings (mapAttrsToList (n: v: ''
+          ${pkgs.coreutils}/bin/install -Dp "${v}" "${efi.efiSysMountPoint}/"${escapeShellArg n}
+        '') config.boot.loader.grub.extraFiles);
+
+      assertions = [
+        {
+          assertion = cfg.mirroredBoots != [ ];
+          message = "You must set the option ‘boot.loader.grub.devices’ or "
+            + "'boot.loader.grub.mirroredBoots' to make the system bootable.";
+        }
+        {
+          assertion = cfg.efiSupport || all (c: c < 2) (mapAttrsToList (n: c: if n == "nodev" then 0 else c) bootDeviceCounters);
+          message = "You cannot have duplicated devices in mirroredBoots";
+        }
+        {
+          assertion = cfg.efiInstallAsRemovable -> cfg.efiSupport;
+          message = "If you wish to to use boot.loader.grub.efiInstallAsRemovable, then turn on boot.loader.grub.efiSupport";
+        }
+        {
+          assertion = cfg.efiInstallAsRemovable -> !config.boot.loader.efi.canTouchEfiVariables;
+          message = "If you wish to to use boot.loader.grub.efiInstallAsRemovable, then turn off boot.loader.efi.canTouchEfiVariables";
+        }
+        {
+          assertion = !(options.boot.loader.grub.version.isDefined && cfg.version == 1);
+          message = "Support for version 0.9x of GRUB was removed after being unsupported upstream for around a decade";
+        }
+      ] ++ flip concatMap cfg.mirroredBoots (args: [
+        {
+          assertion = args.devices != [ ];
+          message = "A boot path cannot have an empty devices string in ${args.path}";
+        }
+        {
+          assertion = hasPrefix "/" args.path;
+          message = "Boot paths must be absolute, not ${args.path}";
+        }
+        {
+          assertion = if args.efiSysMountPoint == null then true else hasPrefix "/" args.efiSysMountPoint;
+          message = "EFI paths must be absolute, not ${args.efiSysMountPoint}";
+        }
+      ] ++ forEach args.devices (device: {
+        assertion = device == "nodev" || hasPrefix "/" device;
+        message = "GRUB devices must be absolute paths, not ${device} in ${args.path}";
+      }));
+    })
+
+    (mkIf options.boot.loader.grub.version.isDefined {
+      warnings = [ ''
+        The boot.loader.grub.version option does not have any effect anymore, please remove it from your configuration.
+      '' ];
+    })
+  ];
+
+
+  imports =
+    [ (mkRemovedOptionModule [ "boot" "loader" "grub" "bootDevice" ] "")
+      (mkRenamedOptionModule [ "boot" "copyKernels" ] [ "boot" "loader" "grub" "copyKernels" ])
+      (mkRenamedOptionModule [ "boot" "extraGrubEntries" ] [ "boot" "loader" "grub" "extraEntries" ])
+      (mkRenamedOptionModule [ "boot" "extraGrubEntriesBeforeNixos" ] [ "boot" "loader" "grub" "extraEntriesBeforeNixOS" ])
+      (mkRenamedOptionModule [ "boot" "grubDevice" ] [ "boot" "loader" "grub" "device" ])
+      (mkRenamedOptionModule [ "boot" "bootMount" ] [ "boot" "loader" "grub" "bootDevice" ])
+      (mkRenamedOptionModule [ "boot" "grubSplashImage" ] [ "boot" "loader" "grub" "splashImage" ])
+      (mkRemovedOptionModule [ "boot" "loader" "grub" "trustedBoot" ] ''
+        Support for Trusted GRUB has been removed, because the project
+        has been retired upstream.
+      '')
+      (mkRemovedOptionModule [ "boot" "loader" "grub" "extraInitrd" ] ''
+        This option has been replaced with the bootloader agnostic
+        boot.initrd.secrets option. To migrate to the initrd secrets system,
+        extract the extraInitrd archive into your main filesystem:
+
+          # zcat /boot/extra_initramfs.gz | cpio -idvmD /etc/secrets/initrd
+          /path/to/secret1
+          /path/to/secret2
+
+        then replace boot.loader.grub.extraInitrd with boot.initrd.secrets:
+
+          boot.initrd.secrets = {
+            "/path/to/secret1" = "/etc/secrets/initrd/path/to/secret1";
+            "/path/to/secret2" = "/etc/secrets/initrd/path/to/secret2";
+          };
+
+        See the boot.initrd.secrets option documentation for more information.
+      '')
+    ];
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
new file mode 100644
index 000000000000..d1e7a0cb8178
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/install-grub.pl
@@ -0,0 +1,801 @@
+use strict;
+use warnings;
+use Class::Struct;
+use XML::LibXML;
+use File::Basename;
+use File::Path;
+use File::stat;
+use File::Copy;
+use File::Copy::Recursive qw(rcopy pathrm);
+use File::Slurp;
+use File::Temp;
+use JSON;
+use File::Find;
+require List::Compare;
+use POSIX;
+use Cwd;
+
+# system.build.toplevel path
+my $defaultConfig = $ARGV[1] or die;
+
+# Grub config XML generated by grubConfig function in grub.nix
+my $dom = XML::LibXML->load_xml(location => $ARGV[0]);
+
+sub get { my ($name) = @_; return $dom->findvalue("/expr/attrs/attr[\@name = '$name']/*/\@value"); }
+
+sub getList {
+    my ($name) = @_;
+    my @list = ();
+    foreach my $entry ($dom->findnodes("/expr/attrs/attr[\@name = '$name']/list/string/\@value")) {
+        $entry = $entry->findvalue(".") or die;
+        push(@list, $entry);
+    }
+    return @list;
+}
+
+sub readFile {
+    my ($fn) = @_;
+    # enable slurp mode: read entire file in one go
+    local $/ = undef;
+    open my $fh, "<$fn" or return undef;
+    my $s = <$fh>;
+    close $fh;
+    # disable slurp mode
+    local $/ = "\n";
+    chomp $s;
+    return $s;
+}
+
+sub writeFile {
+    my ($fn, $s) = @_;
+    open my $fh, ">$fn" or die "cannot create $fn: $!\n";
+    print $fh $s or die "cannot write to $fn: $!\n";
+    close $fh or die "cannot close $fn: $!\n";
+}
+
+sub runCommand {
+    open(my $fh, "-|", @_) or die "Failed to execute: $@_\n";
+    my @ret = $fh->getlines();
+    close $fh;
+    return ($?, @ret);
+}
+
+my $grub = get("grub");
+my $grubTarget = get("grubTarget");
+my $extraConfig = get("extraConfig");
+my $extraPrepareConfig = get("extraPrepareConfig");
+my $extraPerEntryConfig = get("extraPerEntryConfig");
+my $extraEntries = get("extraEntries");
+my $extraEntriesBeforeNixOS = get("extraEntriesBeforeNixOS") eq "true";
+my $splashImage = get("splashImage");
+my $splashMode = get("splashMode");
+my $entryOptions = get("entryOptions");
+my $subEntryOptions = get("subEntryOptions");
+my $backgroundColor = get("backgroundColor");
+my $configurationLimit = int(get("configurationLimit"));
+my $copyKernels = get("copyKernels") eq "true";
+my $timeout = int(get("timeout"));
+my $timeoutStyle = get("timeoutStyle");
+my $defaultEntry = get("default");
+my $fsIdentifier = get("fsIdentifier");
+my $grubEfi = get("grubEfi");
+my $grubTargetEfi = get("grubTargetEfi");
+my $bootPath = get("bootPath");
+my $storePath = get("storePath");
+my $canTouchEfiVariables = get("canTouchEfiVariables");
+my $efiInstallAsRemovable = get("efiInstallAsRemovable");
+my $efiSysMountPoint = get("efiSysMountPoint");
+my $gfxmodeEfi = get("gfxmodeEfi");
+my $gfxmodeBios = get("gfxmodeBios");
+my $gfxpayloadEfi = get("gfxpayloadEfi");
+my $gfxpayloadBios = get("gfxpayloadBios");
+my $bootloaderId = get("bootloaderId");
+my $forceInstall = get("forceInstall");
+my $font = get("font");
+my $theme = get("theme");
+my $saveDefault = $defaultEntry eq "saved";
+$ENV{'PATH'} = get("path");
+
+print STDERR "updating GRUB 2 menu...\n";
+
+mkpath("$bootPath/grub", 0, 0700);
+
+# Discover whether the bootPath is on the same filesystem as / and
+# /nix/store.  If not, then all kernels and initrds must be copied to
+# the bootPath.
+if (stat($bootPath)->dev != stat("/nix/store")->dev) {
+    $copyKernels = 1;
+}
+
+# Discover information about the location of the bootPath
+struct(Fs => {
+    device => '$',
+    type => '$',
+    mount => '$',
+});
+sub PathInMount {
+    my ($path, $mount) = @_;
+    my @splitMount = split /\//, $mount;
+    my @splitPath = split /\//, $path;
+    if ($#splitPath < $#splitMount) {
+        return 0;
+    }
+    for (my $i = 0; $i <= $#splitMount; $i++) {
+        if ($splitMount[$i] ne $splitPath[$i]) {
+            return 0;
+        }
+    }
+    return 1;
+}
+
+# Figure out what filesystem is used for the directory with init/initrd/kernel files
+sub GetFs {
+    my ($dir) = @_;
+    my $bestFs = Fs->new(device => "", type => "", mount => "");
+    foreach my $fs (read_file("/proc/self/mountinfo")) {
+        chomp $fs;
+        my @fields = split / /, $fs;
+        my $mountPoint = $fields[4];
+        next unless -d $mountPoint;
+        my @mountOptions = split /,/, $fields[5];
+
+        # Skip the optional fields.
+        my $n = 6; $n++ while $fields[$n] ne "-"; $n++;
+        my $fsType = $fields[$n];
+        my $device = $fields[$n + 1];
+        my @superOptions = split /,/, $fields[$n + 2];
+
+        # Skip the bind-mount on /nix/store.
+        next if $mountPoint eq "/nix/store" && (grep { $_ eq "rw" } @superOptions);
+        # Skip mount point generated by systemd-efi-boot-generator?
+        next if $fsType eq "autofs";
+
+        # Ensure this matches the intended directory
+        next unless PathInMount($dir, $mountPoint);
+
+        # Is it better than our current match?
+        if (length($mountPoint) > length($bestFs->mount)) {
+            $bestFs = Fs->new(device => $device, type => $fsType, mount => $mountPoint);
+        }
+    }
+    return $bestFs;
+}
+struct (Grub => {
+    path => '$',
+    search => '$',
+});
+my $driveid = 1;
+sub GrubFs {
+    my ($dir) = @_;
+    my $fs = GetFs($dir);
+    my $path = substr($dir, length($fs->mount));
+    if (substr($path, 0, 1) ne "/") {
+        $path = "/$path";
+    }
+    my $search = "";
+
+    # ZFS is completely separate logic as zpools are always identified by a label
+    # or custom UUID
+    if ($fs->type eq 'zfs') {
+        my $sid = index($fs->device, '/');
+
+        if ($sid < 0) {
+            $search = '--label ' . $fs->device;
+            $path = '/@' . $path;
+        } else {
+            $search = '--label ' . substr($fs->device, 0, $sid);
+            $path = '/' . substr($fs->device, $sid) . '/@' . $path;
+        }
+    } else {
+        my %types = ('uuid' => '--fs-uuid', 'label' => '--label');
+
+        if ($fsIdentifier eq 'provided') {
+            # If the provided dev is identifying the partition using a label or uuid,
+            # we should get the label / uuid and do a proper search
+            my @matches = $fs->device =~ m/\/dev\/disk\/by-(label|uuid)\/(.*)/;
+            if ($#matches > 1) {
+                die "Too many matched devices"
+            } elsif ($#matches == 1) {
+                $search = "$types{$matches[0]} $matches[1]"
+            }
+        } else {
+            # Determine the identifying type
+            $search = $types{$fsIdentifier} . ' ';
+
+            # Based on the type pull in the identifier from the system
+            my ($status, @devInfo) = runCommand("@utillinux@/bin/blkid", "-o", "export", @{[$fs->device]});
+            if ($status != 0) {
+                die "Failed to get blkid info (returned $status) for @{[$fs->mount]} on @{[$fs->device]}";
+            }
+            my @matches = join("", @devInfo) =~ m/@{[uc $fsIdentifier]}=([^\n]*)/;
+            if ($#matches != 0) {
+                die "Couldn't find a $types{$fsIdentifier} for @{[$fs->device]}\n"
+            }
+            $search .= $matches[0];
+        }
+
+        # BTRFS is a special case in that we need to fix the referenced path based on subvolumes
+        if ($fs->type eq 'btrfs') {
+            my ($status, @id_info) = runCommand("@btrfsprogs@/bin/btrfs", "subvol", "show", @{[$fs->mount]});
+            if ($status != 0) {
+                die "Failed to retrieve subvolume info for @{[$fs->mount]}\n";
+            }
+            my @ids = join("\n", @id_info) =~ m/^(?!\/\n).*Subvolume ID:[ \t\n]*([0-9]+)/s;
+            if ($#ids > 0) {
+                die "Btrfs subvol name for @{[$fs->device]} listed multiple times in mount\n"
+            } elsif ($#ids == 0) {
+                my ($status, @path_info) = runCommand("@btrfsprogs@/bin/btrfs", "subvol", "list", @{[$fs->mount]});
+                if ($status != 0) {
+                    die "Failed to find @{[$fs->mount]} subvolume id from btrfs\n";
+                }
+                my @paths = join("", @path_info) =~ m/ID $ids[0] [^\n]* path ([^\n]*)/;
+                if ($#paths > 0) {
+                    die "Btrfs returned multiple paths for a single subvolume id, mountpoint @{[$fs->mount]}\n";
+                } elsif ($#paths != 0) {
+                    die "Btrfs did not return a path for the subvolume at @{[$fs->mount]}\n";
+                }
+                $path = "/$paths[0]$path";
+            }
+        }
+    }
+    if (not $search eq "") {
+        $search = "search --set=drive$driveid " . $search;
+        $path = "(\$drive$driveid)$path";
+        $driveid += 1;
+    }
+    return Grub->new(path => $path, search => $search);
+}
+my $grubBoot = GrubFs($bootPath);
+my $grubStore;
+if ($copyKernels == 0) {
+    $grubStore = GrubFs($storePath);
+}
+
+# Generate the header.
+my $conf .= "# Automatically generated.  DO NOT EDIT THIS FILE!\n";
+
+my @users = ();
+foreach my $user ($dom->findnodes('/expr/attrs/attr[@name = "users"]/attrs/attr')) {
+    my $name = $user->findvalue('@name') or die;
+    my $hashedPassword = $user->findvalue('./attrs/attr[@name = "hashedPassword"]/string/@value');
+    my $hashedPasswordFile = $user->findvalue('./attrs/attr[@name = "hashedPasswordFile"]/string/@value');
+    my $password = $user->findvalue('./attrs/attr[@name = "password"]/string/@value');
+    my $passwordFile = $user->findvalue('./attrs/attr[@name = "passwordFile"]/string/@value');
+
+    if ($hashedPasswordFile) {
+        open(my $f, '<', $hashedPasswordFile) or die "Can't read file '$hashedPasswordFile'!";
+        $hashedPassword = <$f>;
+        chomp $hashedPassword;
+    }
+    if ($passwordFile) {
+        open(my $f, '<', $passwordFile) or die "Can't read file '$passwordFile'!";
+        $password = <$f>;
+        chomp $password;
+    }
+
+    if ($hashedPassword) {
+        if (index($hashedPassword, "grub.pbkdf2.") == 0) {
+            $conf .= "\npassword_pbkdf2 $name $hashedPassword";
+        }
+        else {
+            die "Password hash for GRUB user '$name' is not valid!";
+        }
+    }
+    elsif ($password) {
+        $conf .= "\npassword $name $password";
+    }
+    else {
+        die "GRUB user '$name' has no password!";
+    }
+    push(@users, $name);
+}
+if (@users) {
+    $conf .= "\nset superusers=\"" . join(' ',@users) . "\"\n";
+}
+
+if ($copyKernels == 0) {
+    $conf .= "
+        " . $grubStore->search;
+}
+# FIXME: should use grub-mkconfig.
+my $defaultEntryText = $defaultEntry;
+if ($saveDefault) {
+    $defaultEntryText = "\"\${saved_entry}\"";
+}
+$conf .= "
+    " . $grubBoot->search . "
+    if [ -s \$prefix/grubenv ]; then
+      load_env
+    fi
+
+    # ‘grub-reboot’ sets a one-time saved entry, which we process here and
+    # then delete.
+    if [ \"\${next_entry}\" ]; then
+      set default=\"\${next_entry}\"
+      set next_entry=
+      save_env next_entry
+      set timeout=1
+      set boot_once=true
+    else
+      set default=$defaultEntryText
+      set timeout=$timeout
+    fi
+    set timeout_style=$timeoutStyle
+
+    function savedefault {
+        if [ -z \"\${boot_once}\"]; then
+        saved_entry=\"\${chosen}\"
+        save_env saved_entry
+        fi
+    }
+
+    # Setup the graphics stack for bios and efi systems
+    if [ \"\${grub_platform}\" = \"efi\" ]; then
+      insmod efi_gop
+      insmod efi_uga
+    else
+      insmod vbe
+    fi
+";
+
+if ($font) {
+    copy $font, "$bootPath/converted-font.pf2" or die "cannot copy $font to $bootPath: $!\n";
+    $conf .= "
+        insmod font
+        if loadfont " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/converted-font.pf2; then
+          insmod gfxterm
+          if [ \"\${grub_platform}\" = \"efi\" ]; then
+            set gfxmode=$gfxmodeEfi
+            set gfxpayload=$gfxpayloadEfi
+          else
+            set gfxmode=$gfxmodeBios
+            set gfxpayload=$gfxpayloadBios
+          fi
+          terminal_output gfxterm
+        fi
+    ";
+}
+if ($splashImage) {
+    # Keeps the image's extension.
+    my ($filename, $dirs, $suffix) = fileparse($splashImage, qr"\..[^.]*$");
+    # The module for jpg is jpeg.
+    if ($suffix eq ".jpg") {
+        $suffix = ".jpeg";
+    }
+    if ($backgroundColor) {
+        $conf .= "
+        background_color '$backgroundColor'
+        ";
+    }
+    copy $splashImage, "$bootPath/background$suffix" or die "cannot copy $splashImage to $bootPath: $!\n";
+    $conf .= "
+        insmod " . substr($suffix, 1) . "
+        if background_image --mode '$splashMode' " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/background$suffix; then
+          set color_normal=white/black
+          set color_highlight=black/white
+        else
+          set menu_color_normal=cyan/blue
+          set menu_color_highlight=white/blue
+        fi
+    ";
+}
+
+rmtree("$bootPath/theme") or die "cannot clean up theme folder in $bootPath\n" if -e "$bootPath/theme";
+
+if ($theme) {
+    # Copy theme
+    rcopy($theme, "$bootPath/theme") or die "cannot copy $theme to $bootPath\n";
+
+    # Detect which modules will need to be loaded
+    my $with_png = 0;
+    my $with_jpeg = 0;
+
+    find({ wanted => sub {
+            if ($_ =~ /\.png$/i) {
+                $with_png = 1;
+            }
+            elsif ($_ =~ /\.jpe?g$/i) {
+                $with_jpeg = 1;
+            }
+    }, no_chdir => 1 }, $theme);
+
+    if ($with_png) {
+        $conf .= "
+            insmod png
+        "
+    }
+    if ($with_jpeg) {
+        $conf .= "
+            insmod jpeg
+        "
+    }
+
+    $conf .= "
+        # Sets theme.
+        set theme=" . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/theme/theme.txt
+        export theme
+        # Load theme fonts, if any
+    ";
+
+    find( { wanted => sub {
+        if ($_ =~ /\.pf2$/i) {
+            $font = File::Spec->abs2rel($File::Find::name, $theme);
+            $conf .= "
+                loadfont " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/theme/$font
+            ";
+        }
+    }, no_chdir => 1 }, $theme );
+}
+
+$conf .= "$extraConfig\n";
+
+
+# Generate the menu entries.
+$conf .= "\n";
+
+my %copied;
+mkpath("$bootPath/kernels", 0, 0755) if $copyKernels;
+
+sub copyToKernelsDir {
+    my ($path) = @_;
+    return $grubStore->path . substr($path, length("/nix/store")) unless $copyKernels;
+    $path =~ /\/nix\/store\/(.*)/ or die;
+    my $name = $1; $name =~ s/\//-/g;
+    my $dst = "$bootPath/kernels/$name";
+    # Don't copy the file if $dst already exists.  This means that we
+    # have to create $dst atomically to prevent partially copied
+    # kernels or initrd if this script is ever interrupted.
+    if (! -e $dst) {
+        my $tmp = "$dst.tmp";
+        copy $path, $tmp or die "cannot copy $path to $tmp: $!\n";
+        rename $tmp, $dst or die "cannot rename $tmp to $dst: $!\n";
+    }
+    $copied{$dst} = 1;
+    return ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$name";
+}
+
+sub addEntry {
+    my ($name, $path, $options, $current) = @_;
+    return unless -e "$path/kernel" && -e "$path/initrd";
+
+    my $kernel = copyToKernelsDir(Cwd::abs_path("$path/kernel"));
+    my $initrd = copyToKernelsDir(Cwd::abs_path("$path/initrd"));
+
+    # Include second initrd with secrets
+    if (-e -x "$path/append-initrd-secrets") {
+        # Name the initrd secrets after the system from which they're derived.
+        my $systemName = basename(Cwd::abs_path("$path"));
+        my $initrdSecretsPath = "$bootPath/kernels/$systemName-secrets";
+
+        mkpath(dirname($initrdSecretsPath), 0, 0755);
+        my $oldUmask = umask;
+        # Make sure initrd is not world readable (won't work if /boot is FAT)
+        umask 0137;
+        my $initrdSecretsPathTemp = File::Temp::mktemp("$initrdSecretsPath.XXXXXXXX");
+        if (system("$path/append-initrd-secrets", $initrdSecretsPathTemp) != 0) {
+          if ($current) {
+              die "failed to create initrd secrets $!\n";
+          } else {
+              say STDERR "warning: failed to create initrd secrets for \"$name\", an older generation";
+              say STDERR "note: this is normal after having removed or renamed a file in `boot.initrd.secrets`";
+          }
+        }
+        # Check whether any secrets were actually added
+        if (-e $initrdSecretsPathTemp && ! -z _) {
+            rename $initrdSecretsPathTemp, $initrdSecretsPath or die "failed to move initrd secrets into place: $!\n";
+            $copied{$initrdSecretsPath} = 1;
+            $initrd .= " " . ($grubBoot->path eq "/" ? "" : $grubBoot->path) . "/kernels/$systemName-secrets";
+        } else {
+            unlink $initrdSecretsPathTemp;
+            rmdir dirname($initrdSecretsPathTemp);
+        }
+        umask $oldUmask;
+    }
+
+    my $xen = -e "$path/xen.gz" ? copyToKernelsDir(Cwd::abs_path("$path/xen.gz")) : undef;
+
+    # FIXME: $confName
+
+    my $kernelParams =
+        "init=" . Cwd::abs_path("$path/init") . " " .
+        readFile("$path/kernel-params");
+    my $xenParams = $xen && -e "$path/xen-params" ? readFile("$path/xen-params") : "";
+
+    $conf .= "menuentry \"$name\" " . $options . " {\n";
+    if ($saveDefault) {
+        $conf .= "  savedefault\n";
+    }
+    $conf .= $grubBoot->search . "\n";
+    if ($copyKernels == 0) {
+        $conf .= $grubStore->search . "\n";
+    }
+    $conf .= "  $extraPerEntryConfig\n" if $extraPerEntryConfig;
+    $conf .= "  multiboot $xen $xenParams\n" if $xen;
+    $conf .= "  " . ($xen ? "module" : "linux") . " $kernel $kernelParams\n";
+    $conf .= "  " . ($xen ? "module" : "initrd") . " $initrd\n";
+    $conf .= "}\n\n";
+}
+
+sub addGeneration {
+    my ($name, $nameSuffix, $path, $options, $current) = @_;
+
+    # Do not search for grand children
+    my @links = sort (glob "$path/specialisation/*");
+
+    if ($current != 1 && scalar(@links) != 0) {
+        $conf .= "submenu \"> $name$nameSuffix\" --class submenu {\n";
+    }
+
+    addEntry("$name" . (scalar(@links) == 0 ? "" : " - Default") . $nameSuffix, $path, $options, $current);
+
+    # Find all the children of the current default configuration
+    # Do not search for grand children
+    foreach my $link (@links) {
+
+        my $entryName = "";
+
+        my $cfgName = readFile("$link/configuration-name");
+
+        my $date = strftime("%F", localtime(lstat($link)->mtime));
+        my $version =
+            -e "$link/nixos-version"
+            ? readFile("$link/nixos-version")
+            : basename((glob(dirname(Cwd::abs_path("$link/kernel")) . "/lib/modules/*"))[0]);
+
+        if ($cfgName) {
+            $entryName = $cfgName;
+        } else {
+            my $linkname = basename($link);
+            $entryName = "($linkname - $date - $version)";
+        }
+        addEntry("$name - $entryName", $link, "", 1);
+    }
+
+    if ($current != 1 && scalar(@links) != 0) {
+        $conf .= "}\n";
+    }
+}
+
+# Add default entries.
+$conf .= "$extraEntries\n" if $extraEntriesBeforeNixOS;
+
+addGeneration("@distroName@", "", $defaultConfig, $entryOptions, 1);
+
+$conf .= "$extraEntries\n" unless $extraEntriesBeforeNixOS;
+
+my $grubBootPath = $grubBoot->path;
+# extraEntries could refer to @bootRoot@, which we have to substitute
+$conf =~ s/\@bootRoot\@/$grubBootPath/g;
+
+# Emit submenus for all system profiles.
+sub addProfile {
+    my ($profile, $description) = @_;
+
+    # Add entries for all generations of this profile.
+    $conf .= "submenu \"$description\" --class submenu {\n";
+
+    sub nrFromGen { my ($x) = @_; $x =~ /\/\w+-(\d+)-link/; return $1; }
+
+    my @links = sort
+        { nrFromGen($b) <=> nrFromGen($a) }
+        (glob "$profile-*-link");
+
+    my $curEntry = 0;
+    foreach my $link (@links) {
+        last if $curEntry++ >= $configurationLimit;
+        if (! -e "$link/nixos-version") {
+            warn "skipping corrupt system profile entry ‘$link’\n";
+            next;
+        }
+        my $date = strftime("%F", localtime(lstat($link)->mtime));
+        my $version =
+            -e "$link/nixos-version"
+            ? readFile("$link/nixos-version")
+            : basename((glob(dirname(Cwd::abs_path("$link/kernel")) . "/lib/modules/*"))[0]);
+        addGeneration("@distroName@ - Configuration " . nrFromGen($link), " ($date - $version)", $link, $subEntryOptions, 0);
+    }
+
+    $conf .= "}\n";
+}
+
+addProfile "/nix/var/nix/profiles/system", "@distroName@ - All configurations";
+
+for my $profile (glob "/nix/var/nix/profiles/system-profiles/*") {
+    my $name = basename($profile);
+    next unless $name =~ /^\w+$/;
+    addProfile $profile, "@distroName@ - Profile '$name'";
+}
+
+# extraPrepareConfig could refer to @bootPath@, which we have to substitute
+$extraPrepareConfig =~ s/\@bootPath\@/$bootPath/g;
+
+# Run extraPrepareConfig in sh
+if ($extraPrepareConfig ne "") {
+    system((get("shell"), "-c", $extraPrepareConfig));
+}
+
+# write the GRUB config.
+my $confFile = "$bootPath/grub/grub.cfg";
+my $tmpFile = $confFile . ".tmp";
+writeFile($tmpFile, $conf);
+
+
+# check whether to install GRUB EFI or not
+sub getEfiTarget {
+    if (($grub ne "") && ($grubEfi ne "")) {
+        # EFI can only be installed when target is set;
+        # A target is also required then for non-EFI grub
+        if (($grubTarget eq "") || ($grubTargetEfi eq "")) { die }
+        else { return "both" }
+    } elsif (($grub ne "") && ($grubEfi eq "")) {
+        # TODO: It would be safer to disallow non-EFI grub installation if no target is given.
+        #       If no target is given, then grub auto-detects the target which can lead to errors.
+        #       E.g. it seems as if grub would auto-detect a EFI target based on the availability
+        #       of a EFI partition.
+        #       However, it seems as auto-detection is currently relied on for non-x86_64 and non-i386
+        #       architectures in NixOS. That would have to be fixed in the nixos modules first.
+        return "no"
+    } elsif (($grub eq "") && ($grubEfi ne "")) {
+        # EFI can only be installed when target is set;
+        if ($grubTargetEfi eq "") { die }
+        else {return "only" }
+    } else {
+        # prevent an installation if neither grub nor grubEfi is given
+        return "neither"
+    }
+}
+
+my $efiTarget = getEfiTarget();
+
+# Append entries detected by os-prober
+if (get("useOSProber") eq "true") {
+    if ($saveDefault) {
+        # os-prober will read this to determine if "savedefault" should be added to generated entries
+        $ENV{'GRUB_SAVEDEFAULT'} = "true";
+    }
+
+    my $targetpackage = ($efiTarget eq "no") ? $grub : $grubEfi;
+    system(get("shell"), "-c", "pkgdatadir=$targetpackage/share/grub $targetpackage/etc/grub.d/30_os-prober >> $tmpFile");
+}
+
+# Atomically switch to the new config
+rename $tmpFile, $confFile or die "cannot rename $tmpFile to $confFile: $!\n";
+
+
+# Remove obsolete files from $bootPath/kernels.
+foreach my $fn (glob "$bootPath/kernels/*") {
+    next if defined $copied{$fn};
+    print STDERR "removing obsolete file $fn\n";
+    unlink $fn;
+}
+
+
+#
+# Install GRUB if the parameters changed from the last time we installed it.
+#
+
+struct(GrubState => {
+    name => '$',
+    version => '$',
+    efi => '$',
+    devices => '$',
+    efiMountPoint => '$',
+    extraGrubInstallArgs => '@',
+});
+# If you add something to the state file, only add it to the end
+# because it is read line-by-line.
+sub readGrubState {
+    my $defaultGrubState = GrubState->new(name => "", version => "", efi => "", devices => "", efiMountPoint => "", extraGrubInstallArgs => () );
+    open FILE, "<$bootPath/grub/state" or return $defaultGrubState;
+    local $/ = "\n";
+    my $name = <FILE>;
+    chomp($name);
+    my $version = <FILE>;
+    chomp($version);
+    my $efi = <FILE>;
+    chomp($efi);
+    my $devices = <FILE>;
+    chomp($devices);
+    my $efiMountPoint = <FILE>;
+    chomp($efiMountPoint);
+    # Historically, arguments in the state file were one per each line, but that
+    # gets really messy when newlines are involved, structured arguments
+    # like lists are needed (they have to have a separator encoding), or even worse,
+    # when we need to remove a setting in the future. Thus, the 6th line is a JSON
+    # object that can store structured data, with named keys, and all new state
+    # should go in there.
+    my $jsonStateLine = <FILE>;
+    # For historical reasons we do not check the values above for un-definedness
+    # (that is, when the state file has too few lines and EOF is reached),
+    # because the above come from the first version of this logic and are thus
+    # guaranteed to be present.
+    $jsonStateLine = defined $jsonStateLine ? $jsonStateLine : '{}'; # empty JSON object
+    chomp($jsonStateLine);
+    if ($jsonStateLine eq "") {
+        $jsonStateLine = '{}'; # empty JSON object
+    }
+    my %jsonState = %{decode_json($jsonStateLine)};
+    my @extraGrubInstallArgs = exists($jsonState{'extraGrubInstallArgs'}) ? @{$jsonState{'extraGrubInstallArgs'}} : ();
+    close FILE;
+    my $grubState = GrubState->new(name => $name, version => $version, efi => $efi, devices => $devices, efiMountPoint => $efiMountPoint, extraGrubInstallArgs => \@extraGrubInstallArgs );
+    return $grubState
+}
+
+my @deviceTargets = getList('devices');
+my $prevGrubState = readGrubState();
+my @prevDeviceTargets = split/,/, $prevGrubState->devices;
+my @extraGrubInstallArgs = getList('extraGrubInstallArgs');
+my @prevExtraGrubInstallArgs = @{$prevGrubState->extraGrubInstallArgs};
+
+my $devicesDiffer = scalar (List::Compare->new( '-u', '-a', \@deviceTargets, \@prevDeviceTargets)->get_symmetric_difference());
+my $extraGrubInstallArgsDiffer = scalar (List::Compare->new( '-u', '-a', \@extraGrubInstallArgs, \@prevExtraGrubInstallArgs)->get_symmetric_difference());
+my $nameDiffer = get("fullName") ne $prevGrubState->name;
+my $versionDiffer = get("fullVersion") ne $prevGrubState->version;
+my $efiDiffer = $efiTarget ne $prevGrubState->efi;
+my $efiMountPointDiffer = $efiSysMountPoint ne $prevGrubState->efiMountPoint;
+if (($ENV{'NIXOS_INSTALL_GRUB'} // "") eq "1") {
+    warn "NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER";
+    $ENV{'NIXOS_INSTALL_BOOTLOADER'} = "1";
+}
+my $requireNewInstall = $devicesDiffer || $extraGrubInstallArgsDiffer || $nameDiffer || $versionDiffer || $efiDiffer || $efiMountPointDiffer || (($ENV{'NIXOS_INSTALL_BOOTLOADER'} // "") eq "1");
+
+# install a symlink so that grub can detect the boot drive
+my $tmpDir = File::Temp::tempdir(CLEANUP => 1) or die "Failed to create temporary space: $!";
+symlink "$bootPath", "$tmpDir/boot" or die "Failed to symlink $tmpDir/boot: $!";
+
+# install non-EFI GRUB
+if (($requireNewInstall != 0) && ($efiTarget eq "no" || $efiTarget eq "both")) {
+    foreach my $dev (@deviceTargets) {
+        next if $dev eq "nodev";
+        print STDERR "installing the GRUB 2 boot loader on $dev...\n";
+        my @command = ("$grub/sbin/grub-install", "--recheck", "--root-directory=$tmpDir", Cwd::abs_path($dev), @extraGrubInstallArgs);
+        if ($forceInstall eq "true") {
+            push @command, "--force";
+        }
+        if ($grubTarget ne "") {
+            push @command, "--target=$grubTarget";
+        }
+        (system @command) == 0 or die "$0: installation of GRUB on $dev failed: $!\n";
+    }
+}
+
+
+# install EFI GRUB
+if (($requireNewInstall != 0) && ($efiTarget eq "only" || $efiTarget eq "both")) {
+    print STDERR "installing the GRUB 2 boot loader into $efiSysMountPoint...\n";
+    my @command = ("$grubEfi/sbin/grub-install", "--recheck", "--target=$grubTargetEfi", "--boot-directory=$bootPath", "--efi-directory=$efiSysMountPoint", @extraGrubInstallArgs);
+    if ($forceInstall eq "true") {
+        push @command, "--force";
+    }
+    push @command, "--bootloader-id=$bootloaderId";
+    if ($canTouchEfiVariables ne "true") {
+        push @command, "--no-nvram";
+        push @command, "--removable" if $efiInstallAsRemovable eq "true";
+    }
+
+    (system @command) == 0 or die "$0: installation of GRUB EFI into $efiSysMountPoint failed: $!\n";
+}
+
+
+# update GRUB state file
+if ($requireNewInstall != 0) {
+    # Temp file for atomic rename.
+    my $stateFile = "$bootPath/grub/state";
+    my $stateFileTmp = $stateFile . ".tmp";
+
+    open FILE, ">$stateFileTmp" or die "cannot create $stateFileTmp: $!\n";
+    print FILE get("fullName"), "\n" or die;
+    print FILE get("fullVersion"), "\n" or die;
+    print FILE $efiTarget, "\n" or die;
+    print FILE join( ",", @deviceTargets ), "\n" or die;
+    print FILE $efiSysMountPoint, "\n" or die;
+    my %jsonState = (
+        extraGrubInstallArgs => \@extraGrubInstallArgs
+    );
+    my $jsonStateLine = encode_json(\%jsonState);
+    print FILE $jsonStateLine, "\n" or die;
+    close FILE or die;
+
+    # Atomically switch to the new state file
+    rename $stateFileTmp, $stateFile or die "cannot rename $stateFileTmp to $stateFile: $!\n";
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/ipxe.nix b/nixpkgs/nixos/modules/system/boot/loader/grub/ipxe.nix
new file mode 100644
index 000000000000..d926b7ceaa6e
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/ipxe.nix
@@ -0,0 +1,60 @@
+# This module adds a scripted iPXE entry to the GRUB boot menu.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  scripts = builtins.attrNames config.boot.loader.grub.ipxe;
+
+  grubEntry = name:
+    ''
+      menuentry "iPXE - ${name}" {
+        linux16 @bootRoot@/ipxe.lkrn
+        initrd16 @bootRoot@/${name}.ipxe
+      }
+
+    '';
+
+  scriptFile = name:
+    let
+      value = builtins.getAttr name config.boot.loader.grub.ipxe;
+    in
+    if builtins.typeOf value == "path" then value
+    else builtins.toFile "${name}.ipxe" value;
+in
+{
+  options =
+    { boot.loader.grub.ipxe = mkOption {
+        type = types.attrsOf (types.either types.path types.str);
+        description =
+          lib.mdDoc ''
+            Set of iPXE scripts available for
+            booting from the GRUB boot menu.
+          '';
+        default = { };
+        example = literalExpression ''
+          { demo = '''
+              #!ipxe
+              dhcp
+              chain http://boot.ipxe.org/demo/boot.php
+            ''';
+          }
+        '';
+      };
+    };
+
+  config = mkIf (builtins.length scripts != 0) {
+
+    boot.loader.grub.extraEntries = toString (map grubEntry scripts);
+
+    boot.loader.grub.extraFiles =
+      { "ipxe.lkrn" = "${pkgs.ipxe}/ipxe.lkrn"; }
+      //
+      builtins.listToAttrs ( map
+        (name: { name = name+".ipxe"; value = scriptFile name; })
+        scripts
+      );
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/grub/memtest.nix b/nixpkgs/nixos/modules/system/boot/loader/grub/memtest.nix
new file mode 100644
index 000000000000..8e68431ac571
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/grub/memtest.nix
@@ -0,0 +1,69 @@
+# This module adds Memtest86+ to the GRUB boot menu.
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  memtest86 = pkgs.memtest86plus;
+  cfg = config.boot.loader.grub.memtest86;
+in
+
+{
+  options = {
+
+    boot.loader.grub.memtest86 = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Make Memtest86+, a memory testing program, available from the GRUB
+          boot menu.
+        '';
+      };
+
+      params = mkOption {
+        default = [];
+        example = [ "console=ttyS0,115200" ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Parameters added to the Memtest86+ command line. As of memtest86+ 5.01
+          the following list of (apparently undocumented) parameters are
+          accepted:
+
+          - `console=...`, set up a serial console.
+            Examples:
+            `console=ttyS0`,
+            `console=ttyS0,9600` or
+            `console=ttyS0,115200n8`.
+
+          - `btrace`, enable boot trace.
+
+          - `maxcpus=N`, limit number of CPUs.
+
+          - `onepass`, run one pass and exit if there
+            are no errors.
+
+          - `tstlist=...`, list of tests to run.
+            Example: `0,1,2`.
+
+          - `cpumask=...`, set a CPU mask, to select CPUs
+            to use for testing.
+
+          This list of command line options was obtained by reading the
+          Memtest86+ source code.
+        '';
+      };
+
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot.loader.grub.extraEntries = ''
+      menuentry "Memtest86+" {
+        linux @bootRoot@/memtest.bin ${toString cfg.params}
+      }
+    '';
+    boot.loader.grub.extraFiles."memtest.bin" = "${memtest86}/memtest.bin";
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script-builder.sh b/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script-builder.sh
new file mode 100644
index 000000000000..755ea259c425
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script-builder.sh
@@ -0,0 +1,92 @@
+#! @bash@/bin/sh -e
+
+shopt -s nullglob
+
+export PATH=/empty
+for i in @path@; do PATH=$PATH:$i/bin; done
+
+if test $# -ne 1; then
+    echo "Usage: init-script-builder.sh DEFAULT-CONFIG"
+    exit 1
+fi
+
+defaultConfig="$1"
+
+
+[ "$(stat -f -c '%i' /)" = "$(stat -f -c '%i' /boot)" ] || {
+  # see grub-menu-builder.sh
+  echo "WARNING: /boot being on a different filesystem not supported by init-script-builder.sh"
+}
+
+
+
+target="/sbin/init"
+targetOther="/boot/init-other-configurations-contents.txt"
+
+tmp="$target.tmp"
+tmpOther="$targetOther.tmp"
+
+
+configurationCounter=0
+numAlienEntries=`cat <<EOF | egrep '^[[:space:]]*title' | wc -l
+@extraEntries@
+EOF`
+
+
+
+
+# Add an entry to $targetOther
+addEntry() {
+    local name="$1"
+    local path="$2"
+    local shortSuffix="$3"
+
+    configurationCounter=$((configurationCounter + 1))
+
+    local stage2=$path/init
+
+    content="$(
+      echo "#!/bin/sh"
+      echo "# $name"
+      echo "# created by init-script-builder.sh"
+      echo "exec $stage2"
+    )"
+
+    [ "$path" != "$defaultConfig" ] || {
+      echo "$content" > $tmp
+      echo "# older configurations: $targetOther" >> $tmp
+      chmod +x $tmp
+    }
+
+    echo -e "$content\n\n" >> $tmpOther
+}
+
+
+mkdir -p /boot /sbin
+
+addEntry "@distroName@ - Default" $defaultConfig ""
+
+# Add all generations of the system profile to the menu, in reverse
+# (most recent to least recent) order.
+for link in $((ls -d $defaultConfig/specialisation/* ) | sort -n); do
+    date=$(stat --printf="%y\n" $link | sed 's/\..*//')
+    addEntry "@distroName@ - variation" $link ""
+done
+
+for generation in $(
+    (cd /nix/var/nix/profiles && ls -d system-*-link) \
+    | sed 's/system-\([0-9]\+\)-link/\1/' \
+    | sort -n -r); do
+    link=/nix/var/nix/profiles/system-$generation-link
+    date=$(stat --printf="%y\n" $link | sed 's/\..*//')
+    if [ -d $link/kernel ]; then
+      kernelVersion=$(cd $(dirname $(readlink -f $link/kernel))/lib/modules && echo *)
+      suffix="($date - $kernelVersion)"
+    else
+      suffix="($date)"
+    fi
+    addEntry "@distroName@ - Configuration $generation $suffix" $link "$generation ($date)"
+done
+
+mv $tmpOther $targetOther
+mv $tmp $target
diff --git a/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script.nix b/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script.nix
new file mode 100644
index 000000000000..4d33ed6b665b
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/init-script/init-script.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  initScriptBuilder = pkgs.substituteAll {
+    src = ./init-script-builder.sh;
+    isExecutable = true;
+    inherit (pkgs) bash;
+    inherit (config.system.nixos) distroName;
+    path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+  };
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    boot.loader.initScript = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Some systems require a /sbin/init script which is started.
+          Or having it makes starting NixOS easier.
+          This applies to some kind of hosting services and user mode linux.
+
+          Additionally this script will create
+          /boot/init-other-configurations-contents.txt containing
+          contents of remaining configurations. You can copy paste them into
+          /sbin/init manually running a rescue system or such.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.boot.loader.initScript.enable {
+
+    system.build.installBootLoader = initScriptBuilder;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/loader.nix b/nixpkgs/nixos/modules/system/boot/loader/loader.nix
new file mode 100644
index 000000000000..0e33264271bf
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/loader.nix
@@ -0,0 +1,20 @@
+{ lib, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRenamedOptionModule [ "boot" "loader" "grub" "timeout" ] [ "boot" "loader" "timeout" ])
+    (mkRenamedOptionModule [ "boot" "loader" "gummiboot" "timeout" ] [ "boot" "loader" "timeout" ])
+  ];
+
+    options = {
+        boot.loader.timeout =  mkOption {
+            default = 5;
+            type = types.nullOr types.int;
+            description = lib.mdDoc ''
+              Timeout (in seconds) until loader boots the default menu item. Use null if the loader menu should be displayed indefinitely.
+            '';
+        };
+    };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix
new file mode 100644
index 000000000000..64e106036abd
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.nix
@@ -0,0 +1,9 @@
+{ pkgs, configTxt, firmware ? pkgs.raspberrypifw }:
+
+pkgs.substituteAll {
+  src = ./raspberrypi-builder.sh;
+  isExecutable = true;
+  inherit (pkgs) bash;
+  path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+  inherit firmware configTxt;
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.sh b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.sh
new file mode 100644
index 000000000000..0541ca1ba622
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi-builder.sh
@@ -0,0 +1,143 @@
+#! @bash@/bin/sh
+
+# This can end up being called disregarding the shebang.
+set -e
+
+shopt -s nullglob
+
+export PATH=/empty
+for i in @path@; do PATH=$PATH:$i/bin; done
+
+usage() {
+    echo "usage: $0 -c <path-to-default-configuration> [-d <boot-dir>]" >&2
+    exit 1
+}
+
+default=                # Default configuration
+target=/boot            # Target directory
+
+while getopts "c:d:" opt; do
+    case "$opt" in
+        c) default="$OPTARG" ;;
+        d) target="$OPTARG" ;;
+        \?) usage ;;
+    esac
+done
+
+echo "updating the boot generations directory..."
+
+mkdir -p $target/old
+
+# Convert a path to a file in the Nix store such as
+# /nix/store/<hash>-<name>/file to <hash>-<name>-<file>.
+cleanName() {
+    local path="$1"
+    echo "$path" | sed 's|^/nix/store/||' | sed 's|/|-|g'
+}
+
+# Copy a file from the Nix store to $target/kernels.
+declare -A filesCopied
+
+copyToKernelsDir() {
+    local src="$1"
+    local dst="$target/old/$(cleanName $src)"
+    # Don't copy the file if $dst already exists.  This means that we
+    # have to create $dst atomically to prevent partially copied
+    # kernels or initrd if this script is ever interrupted.
+    if ! test -e $dst; then
+        local dstTmp=$dst.tmp.$$
+        cp $src $dstTmp
+        mv $dstTmp $dst
+    fi
+    filesCopied[$dst]=1
+    result=$dst
+}
+
+copyForced() {
+    local src="$1"
+    local dst="$2"
+    cp $src $dst.tmp
+    mv $dst.tmp $dst
+}
+
+outdir=$target/old
+mkdir -p $outdir || true
+
+# Copy its kernel and initrd to $target/old.
+addEntry() {
+    local path="$1"
+    local generation="$2"
+
+    if ! test -e $path/kernel -a -e $path/initrd; then
+        return
+    fi
+
+    local kernel=$(readlink -f $path/kernel)
+    local initrd=$(readlink -f $path/initrd)
+    local dtb_path=$(readlink -f $path/dtbs)
+
+    if test -n "@copyKernels@"; then
+        copyToKernelsDir $kernel; kernel=$result
+        copyToKernelsDir $initrd; initrd=$result
+    fi
+
+    echo $(readlink -f $path) > $outdir/$generation-system
+    echo $(readlink -f $path/init) > $outdir/$generation-init
+    cp $path/kernel-params $outdir/$generation-cmdline.txt
+    echo $initrd > $outdir/$generation-initrd
+    echo $kernel > $outdir/$generation-kernel
+
+    if test "$generation" = "default"; then
+      copyForced $kernel $target/kernel.img
+      copyForced $initrd $target/initrd
+      for dtb in $dtb_path/{broadcom,}/bcm*.dtb; do
+        dst="$target/$(basename $dtb)"
+        copyForced $dtb "$dst"
+        filesCopied[$dst]=1
+      done
+      cp "$(readlink -f "$path/init")" $target/nixos-init
+      echo "`cat $path/kernel-params` init=$path/init" >$target/cmdline.txt
+    fi
+}
+
+addEntry $default default
+
+# Add all generations of the system profile to the menu, in reverse
+# (most recent to least recent) order.
+for generation in $(
+    (cd /nix/var/nix/profiles && ls -d system-*-link) \
+    | sed 's/system-\([0-9]\+\)-link/\1/' \
+    | sort -n -r); do
+    link=/nix/var/nix/profiles/system-$generation-link
+    addEntry $link $generation
+done
+
+# Add the firmware files
+fwdir=@firmware@/share/raspberrypi/boot/
+copyForced $fwdir/bootcode.bin  $target/bootcode.bin
+copyForced $fwdir/fixup.dat     $target/fixup.dat
+copyForced $fwdir/fixup4.dat    $target/fixup4.dat
+copyForced $fwdir/fixup4cd.dat  $target/fixup4cd.dat
+copyForced $fwdir/fixup4db.dat  $target/fixup4db.dat
+copyForced $fwdir/fixup4x.dat   $target/fixup4x.dat
+copyForced $fwdir/fixup_cd.dat  $target/fixup_cd.dat
+copyForced $fwdir/fixup_db.dat  $target/fixup_db.dat
+copyForced $fwdir/fixup_x.dat   $target/fixup_x.dat
+copyForced $fwdir/start.elf     $target/start.elf
+copyForced $fwdir/start4.elf    $target/start4.elf
+copyForced $fwdir/start4cd.elf  $target/start4cd.elf
+copyForced $fwdir/start4db.elf  $target/start4db.elf
+copyForced $fwdir/start4x.elf   $target/start4x.elf
+copyForced $fwdir/start_cd.elf  $target/start_cd.elf
+copyForced $fwdir/start_db.elf  $target/start_db.elf
+copyForced $fwdir/start_x.elf   $target/start_x.elf
+
+# Add the config.txt
+copyForced @configTxt@ $target/config.txt
+
+# Remove obsolete files from $target and $target/old.
+for fn in $target/old/*linux* $target/old/*initrd-initrd* $target/bcm*.dtb; do
+    if ! test "${filesCopied[$fn]}" = 1; then
+        rm -vf -- "$fn"
+    fi
+done
diff --git a/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
new file mode 100644
index 000000000000..9c9bee93de8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/raspberrypi.nix
@@ -0,0 +1,151 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.boot.loader.raspberryPi;
+
+  builderUboot = import ./uboot-builder.nix { inherit pkgs configTxt; inherit (cfg) version; };
+  builderGeneric = import ./raspberrypi-builder.nix { inherit pkgs configTxt; };
+
+  builder =
+    if cfg.uboot.enable then
+      "${builderUboot} -g ${toString cfg.uboot.configurationLimit} -t ${timeoutStr} -c"
+    else
+      "${builderGeneric} -c";
+
+  blCfg = config.boot.loader;
+  timeoutStr = if blCfg.timeout == null then "-1" else toString blCfg.timeout;
+
+  isAarch64 = pkgs.stdenv.hostPlatform.isAarch64;
+  optional = pkgs.lib.optionalString;
+
+  configTxt =
+    pkgs.writeText "config.txt" (''
+      # U-Boot used to need this to work, regardless of whether UART is actually used or not.
+      # TODO: check when/if this can be removed.
+      enable_uart=1
+
+      # Prevent the firmware from smashing the framebuffer setup done by the mainline kernel
+      # when attempting to show low-voltage or overtemperature warnings.
+      avoid_warnings=1
+    '' + optional isAarch64 ''
+      # Boot in 64-bit mode.
+      arm_64bit=1
+    '' + (if cfg.uboot.enable then ''
+      kernel=u-boot-rpi.bin
+    '' else ''
+      kernel=kernel.img
+      initramfs initrd followkernel
+    '') + optional (cfg.firmwareConfig != null) cfg.firmwareConfig);
+
+in
+
+{
+  options = {
+
+    boot.loader.raspberryPi = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether to create files with the system generations in
+          `/boot`.
+          `/boot/old` will hold files from old generations.
+
+          ::: {.note}
+          These options are deprecated, unsupported, and may not work like expected.
+          :::
+        '';
+      };
+
+      version = mkOption {
+        default = 2;
+        type = types.enum [ 0 1 2 3 4 ];
+        description = lib.mdDoc "";
+      };
+
+      uboot = {
+        enable = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            Enable using uboot as bootmanager for the raspberry pi.
+
+            ::: {.note}
+            These options are deprecated, unsupported, and may not work like expected.
+            :::
+          '';
+        };
+
+        configurationLimit = mkOption {
+          default = 20;
+          example = 10;
+          type = types.int;
+          description = lib.mdDoc ''
+            Maximum number of configurations in the boot menu.
+
+            ::: {.note}
+            These options are deprecated, unsupported, and may not work like expected.
+            :::
+          '';
+        };
+
+      };
+
+      firmwareConfig = mkOption {
+        default = null;
+        type = types.nullOr types.lines;
+        description = lib.mdDoc ''
+          Extra options that will be appended to `/boot/config.txt` file.
+          For possible values, see: https://www.raspberrypi.com/documentation/computers/config_txt.html
+
+          ::: {.note}
+          These options are deprecated, unsupported, and may not work like expected.
+          :::
+        '';
+      };
+    };
+  };
+
+  config = mkMerge[
+    (mkIf cfg.uboot.enable {
+      warnings = [
+        ''
+          The option set for `boot.loader.raspberrypi.uboot` has been recommended against
+          for years, and is now formally deprecated.
+
+          It is possible it already did not work like you expected.
+
+          It never worked on the Raspberry Pi 4 family.
+
+          These options will be removed by NixOS 24.11.
+        ''
+      ];
+    })
+    (mkIf cfg.enable {
+      warnings = [
+        ''
+          The option set for `boot.loader.raspberrypi` has been recommended against
+          for years, and is now formally deprecated.
+
+          It is possible it already did not work like you expected.
+
+          It never worked on the Raspberry Pi 4 family.
+
+          These options will be removed by NixOS 24.11.
+        ''
+      ];
+    })
+    (mkIf cfg.enable {
+      assertions = singleton {
+        assertion = !pkgs.stdenv.hostPlatform.isAarch64 || cfg.version >= 3;
+        message = "Only Raspberry Pi >= 3 supports aarch64.";
+      };
+
+      system.build.installBootLoader = builder;
+      system.boot.loader.id = "raspberrypi";
+      system.boot.loader.kernelFile = pkgs.stdenv.hostPlatform.linux-kernel.target;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix
new file mode 100644
index 000000000000..a4352ab9a240
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.nix
@@ -0,0 +1,37 @@
+{ pkgs, version, configTxt }:
+
+let
+  isAarch64 = pkgs.stdenv.hostPlatform.isAarch64;
+
+  uboot =
+    if version == 0 then
+      pkgs.ubootRaspberryPiZero
+    else if version == 1 then
+      pkgs.ubootRaspberryPi
+    else if version == 2 then
+      pkgs.ubootRaspberryPi2
+    else if version == 3 then
+      if isAarch64 then
+        pkgs.ubootRaspberryPi3_64bit
+      else
+        pkgs.ubootRaspberryPi3_32bit
+    else
+      throw "U-Boot is not yet supported on the raspberry pi 4.";
+
+  extlinuxConfBuilder =
+    import ../generic-extlinux-compatible/extlinux-conf-builder.nix {
+      pkgs = pkgs.buildPackages;
+    };
+in
+pkgs.substituteAll {
+  src = ./uboot-builder.sh;
+  isExecutable = true;
+  inherit (pkgs) bash;
+  path = [pkgs.coreutils pkgs.gnused pkgs.gnugrep];
+  firmware = pkgs.raspberrypifw;
+  inherit uboot;
+  inherit configTxt;
+  inherit extlinuxConfBuilder;
+  inherit version;
+}
+
diff --git a/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.sh b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.sh
new file mode 100644
index 000000000000..ea591427179f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/raspberrypi/uboot-builder.sh
@@ -0,0 +1,38 @@
+#! @bash@/bin/sh -e
+
+target=/boot # Target directory
+
+while getopts "t:c:d:g:" opt; do
+    case "$opt" in
+        d) target="$OPTARG" ;;
+        *) ;;
+    esac
+done
+
+copyForced() {
+    local src="$1"
+    local dst="$2"
+    cp $src $dst.tmp
+    mv $dst.tmp $dst
+}
+
+# Call the extlinux builder
+"@extlinuxConfBuilder@" "$@"
+
+# Add the firmware files
+fwdir=@firmware@/share/raspberrypi/boot/
+copyForced $fwdir/bootcode.bin  $target/bootcode.bin
+copyForced $fwdir/fixup.dat     $target/fixup.dat
+copyForced $fwdir/fixup_cd.dat  $target/fixup_cd.dat
+copyForced $fwdir/fixup_db.dat  $target/fixup_db.dat
+copyForced $fwdir/fixup_x.dat   $target/fixup_x.dat
+copyForced $fwdir/start.elf     $target/start.elf
+copyForced $fwdir/start_cd.elf  $target/start_cd.elf
+copyForced $fwdir/start_db.elf  $target/start_db.elf
+copyForced $fwdir/start_x.elf   $target/start_x.elf
+
+# Add the uboot file
+copyForced @uboot@/u-boot.bin $target/u-boot-rpi.bin
+
+# Add the config.txt
+copyForced @configTxt@ $target/config.txt
diff --git a/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py b/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
new file mode 100644
index 000000000000..e2e7ffe59dcd
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot-builder.py
@@ -0,0 +1,344 @@
+#! @python3@/bin/python3 -B
+import argparse
+import ctypes
+import datetime
+import errno
+import glob
+import os
+import os.path
+import re
+import shutil
+import subprocess
+import sys
+import warnings
+import json
+from typing import NamedTuple, Dict, List
+from dataclasses import dataclass
+
+
+@dataclass
+class BootSpec:
+    init: str
+    initrd: str
+    initrdSecrets: str
+    kernel: str
+    kernelParams: List[str]
+    label: str
+    system: str
+    toplevel: str
+    specialisations: Dict[str, "BootSpec"]
+
+
+
+libc = ctypes.CDLL("libc.so.6")
+
+class SystemIdentifier(NamedTuple):
+    profile: str | None
+    generation: int
+    specialisation: str | None
+
+
+def copy_if_not_exists(source: str, dest: str) -> None:
+    if not os.path.exists(dest):
+        shutil.copyfile(source, dest)
+
+
+def generation_dir(profile: str | None, generation: int) -> str:
+    if profile:
+        return "/nix/var/nix/profiles/system-profiles/%s-%d-link" % (profile, generation)
+    else:
+        return "/nix/var/nix/profiles/system-%d-link" % (generation)
+
+def system_dir(profile: str | None, generation: int, specialisation: str | None) -> str:
+    d = generation_dir(profile, generation)
+    if specialisation:
+        return os.path.join(d, "specialisation", specialisation)
+    else:
+        return d
+
+BOOT_ENTRY = """title {title}
+version Generation {generation} {description}
+linux {kernel}
+initrd {initrd}
+options {kernel_params}
+"""
+
+def generation_conf_filename(profile: str | None, generation: int, specialisation: str | None) -> str:
+    pieces = [
+        "nixos",
+        profile or None,
+        "generation",
+        str(generation),
+        f"specialisation-{specialisation}" if specialisation else None,
+    ]
+    return "-".join(p for p in pieces if p) + ".conf"
+
+
+def write_loader_conf(profile: str | None, generation: int, specialisation: str | None) -> None:
+    with open("@efiSysMountPoint@/loader/loader.conf.tmp", 'w') as f:
+        if "@timeout@" != "":
+            f.write("timeout @timeout@\n")
+        f.write("default %s\n" % generation_conf_filename(profile, generation, specialisation))
+        if not @editor@:
+            f.write("editor 0\n")
+        f.write("console-mode @consoleMode@\n")
+        f.flush()
+        os.fsync(f.fileno())
+    os.rename("@efiSysMountPoint@/loader/loader.conf.tmp", "@efiSysMountPoint@/loader/loader.conf")
+
+
+def get_bootspec(profile: str | None, generation: int) -> BootSpec:
+    system_directory = system_dir(profile, generation, None)
+    boot_json_path = os.path.realpath("%s/%s" % (system_directory, "boot.json"))
+    if os.path.isfile(boot_json_path):
+        boot_json_f = open(boot_json_path, 'r')
+        bootspec_json = json.load(boot_json_f)
+    else:
+        boot_json_str = subprocess.check_output([
+        "@bootspecTools@/bin/synthesize",
+        "--version",
+        "1",
+        system_directory,
+        "/dev/stdout"],
+        universal_newlines=True)
+        bootspec_json = json.loads(boot_json_str)
+    return bootspec_from_json(bootspec_json)
+
+def bootspec_from_json(bootspec_json: Dict) -> BootSpec:
+    specialisations = bootspec_json['org.nixos.specialisation.v1']
+    specialisations = {k: bootspec_from_json(v) for k, v in specialisations.items()}
+    return BootSpec(**bootspec_json['org.nixos.bootspec.v1'], specialisations=specialisations)
+
+
+def copy_from_file(file: str, dry_run: bool = False) -> str:
+    store_file_path = os.path.realpath(file)
+    suffix = os.path.basename(store_file_path)
+    store_dir = os.path.basename(os.path.dirname(store_file_path))
+    efi_file_path = "/efi/nixos/%s-%s.efi" % (store_dir, suffix)
+    if not dry_run:
+        copy_if_not_exists(store_file_path, "@efiSysMountPoint@%s" % (efi_file_path))
+    return efi_file_path
+
+def write_entry(profile: str | None, generation: int, specialisation: str | None,
+                machine_id: str, bootspec: BootSpec, current: bool) -> None:
+    if specialisation:
+        bootspec = bootspec.specialisations[specialisation]
+    kernel = copy_from_file(bootspec.kernel)
+    initrd = copy_from_file(bootspec.initrd)
+
+    title = "@distroName@{profile}{specialisation}".format(
+        profile=" [" + profile + "]" if profile else "",
+        specialisation=" (%s)" % specialisation if specialisation else "")
+
+    try:
+        subprocess.check_call([bootspec.initrdSecrets, "@efiSysMountPoint@%s" % (initrd)])
+    except FileNotFoundError:
+        pass
+    except subprocess.CalledProcessError:
+        if current:
+            print("failed to create initrd secrets!", file=sys.stderr)
+            sys.exit(1)
+        else:
+            print("warning: failed to create initrd secrets "
+                  f'for "{title} - Configuration {generation}", an older generation', file=sys.stderr)
+            print("note: this is normal after having removed "
+                  "or renamed a file in `boot.initrd.secrets`", file=sys.stderr)
+    entry_file = "@efiSysMountPoint@/loader/entries/%s" % (
+        generation_conf_filename(profile, generation, specialisation))
+    tmp_path = "%s.tmp" % (entry_file)
+    kernel_params = "init=%s " % bootspec.init
+
+    kernel_params = kernel_params + " ".join(bootspec.kernelParams)
+    build_time = int(os.path.getctime(system_dir(profile, generation, specialisation)))
+    build_date = datetime.datetime.fromtimestamp(build_time).strftime('%F')
+
+    with open(tmp_path, 'w') as f:
+        f.write(BOOT_ENTRY.format(title=title,
+                    generation=generation,
+                    kernel=kernel,
+                    initrd=initrd,
+                    kernel_params=kernel_params,
+                    description=f"{bootspec.label}, built on {build_date}"))
+        if machine_id is not None:
+            f.write("machine-id %s\n" % machine_id)
+        f.flush()
+        os.fsync(f.fileno())
+    os.rename(tmp_path, entry_file)
+
+
+def get_generations(profile: str | None = None) -> list[SystemIdentifier]:
+    gen_list = subprocess.check_output([
+        "@nix@/bin/nix-env",
+        "--list-generations",
+        "-p",
+        "/nix/var/nix/profiles/%s" % ("system-profiles/" + profile if profile else "system"),
+        "--option", "build-users-group", ""],
+        universal_newlines=True)
+    gen_lines = gen_list.split('\n')
+    gen_lines.pop()
+
+    configurationLimit = @configurationLimit@
+    configurations = [
+        SystemIdentifier(
+            profile=profile,
+            generation=int(line.split()[0]),
+            specialisation=None
+        )
+        for line in gen_lines
+    ]
+    return configurations[-configurationLimit:]
+
+
+def remove_old_entries(gens: list[SystemIdentifier]) -> None:
+    rex_profile = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos-(.*)-generation-.*\.conf$")
+    rex_generation = re.compile(r"^@efiSysMountPoint@/loader/entries/nixos.*-generation-([0-9]+)(-specialisation-.*)?\.conf$")
+    known_paths = []
+    for gen in gens:
+        bootspec = get_bootspec(gen.profile, gen.generation)
+        known_paths.append(copy_from_file(bootspec.kernel, True))
+        known_paths.append(copy_from_file(bootspec.initrd, True))
+    for path in glob.iglob("@efiSysMountPoint@/loader/entries/nixos*-generation-[1-9]*.conf"):
+        if rex_profile.match(path):
+            prof = rex_profile.sub(r"\1", path)
+        else:
+            prof = None
+        try:
+            gen_number = int(rex_generation.sub(r"\1", path))
+        except ValueError:
+            continue
+        if not (prof, gen_number, None) in gens:
+            os.unlink(path)
+    for path in glob.iglob("@efiSysMountPoint@/efi/nixos/*"):
+        if not path in known_paths and not os.path.isdir(path):
+            os.unlink(path)
+
+
+def get_profiles() -> list[str]:
+    if os.path.isdir("/nix/var/nix/profiles/system-profiles/"):
+        return [x
+            for x in os.listdir("/nix/var/nix/profiles/system-profiles/")
+            if not x.endswith("-link")]
+    else:
+        return []
+
+def install_bootloader(args: argparse.Namespace) -> None:
+    try:
+        with open("/etc/machine-id") as machine_file:
+            machine_id = machine_file.readlines()[0]
+    except IOError as e:
+        if e.errno != errno.ENOENT:
+            raise
+        # Since systemd version 232 a machine ID is required and it might not
+        # be there on newly installed systems, so let's generate one so that
+        # bootctl can find it and we can also pass it to write_entry() later.
+        cmd = ["@systemd@/bin/systemd-machine-id-setup", "--print"]
+        machine_id = subprocess.run(
+          cmd, text=True, check=True, stdout=subprocess.PIPE
+        ).stdout.rstrip()
+
+    if os.getenv("NIXOS_INSTALL_GRUB") == "1":
+        warnings.warn("NIXOS_INSTALL_GRUB env var deprecated, use NIXOS_INSTALL_BOOTLOADER", DeprecationWarning)
+        os.environ["NIXOS_INSTALL_BOOTLOADER"] = "1"
+
+    # flags to pass to bootctl install/update
+    bootctl_flags = []
+
+    if "@canTouchEfiVariables@" != "1":
+        bootctl_flags.append("--no-variables")
+
+    if "@graceful@" == "1":
+        bootctl_flags.append("--graceful")
+
+    if os.getenv("NIXOS_INSTALL_BOOTLOADER") == "1":
+        # bootctl uses fopen() with modes "wxe" and fails if the file exists.
+        if os.path.exists("@efiSysMountPoint@/loader/loader.conf"):
+            os.unlink("@efiSysMountPoint@/loader/loader.conf")
+
+        subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["install"])
+    else:
+        # Update bootloader to latest if needed
+        available_out = subprocess.check_output(["@systemd@/bin/bootctl", "--version"], universal_newlines=True).split()[2]
+        installed_out = subprocess.check_output(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@", "status"], universal_newlines=True)
+
+        # See status_binaries() in systemd bootctl.c for code which generates this
+        installed_match = re.search(r"^\W+File:.*/EFI/(?:BOOT|systemd)/.*\.efi \(systemd-boot ([\d.]+[^)]*)\)$",
+                      installed_out, re.IGNORECASE | re.MULTILINE)
+
+        available_match = re.search(r"^\((.*)\)$", available_out)
+
+        if installed_match is None:
+            raise Exception("could not find any previously installed systemd-boot")
+
+        if available_match is None:
+            raise Exception("could not determine systemd-boot version")
+
+        installed_version = installed_match.group(1)
+        available_version = available_match.group(1)
+
+        if installed_version < available_version:
+            print("updating systemd-boot from %s to %s" % (installed_version, available_version))
+            subprocess.check_call(["@systemd@/bin/bootctl", "--esp-path=@efiSysMountPoint@"] + bootctl_flags + ["update"])
+
+    os.makedirs("@efiSysMountPoint@/efi/nixos", exist_ok=True)
+    os.makedirs("@efiSysMountPoint@/loader/entries", exist_ok=True)
+
+    gens = get_generations()
+    for profile in get_profiles():
+        gens += get_generations(profile)
+    remove_old_entries(gens)
+    for gen in gens:
+        try:
+            bootspec = get_bootspec(gen.profile, gen.generation)
+            is_default = os.path.dirname(bootspec.init) == args.default_config
+            write_entry(*gen, machine_id, bootspec, current=is_default)
+            for specialisation in bootspec.specialisations.keys():
+                write_entry(gen.profile, gen.generation, specialisation, machine_id, bootspec, current=is_default)
+            if is_default:
+                write_loader_conf(*gen)
+        except OSError as e:
+            # See https://github.com/NixOS/nixpkgs/issues/114552
+            if e.errno == errno.EINVAL:
+                profile = f"profile '{gen.profile}'" if gen.profile else "default profile"
+                print("ignoring {} in the list of boot entries because of the following error:\n{}".format(profile, e), file=sys.stderr)
+            else:
+                raise e
+
+    for root, _, files in os.walk('@efiSysMountPoint@/efi/nixos/.extra-files', topdown=False):
+        relative_root = root.removeprefix("@efiSysMountPoint@/efi/nixos/.extra-files").removeprefix("/")
+        actual_root = os.path.join("@efiSysMountPoint@", relative_root)
+
+        for file in files:
+            actual_file = os.path.join(actual_root, file)
+
+            if os.path.exists(actual_file):
+                os.unlink(actual_file)
+            os.unlink(os.path.join(root, file))
+
+        if not len(os.listdir(actual_root)):
+            os.rmdir(actual_root)
+        os.rmdir(root)
+
+    os.makedirs("@efiSysMountPoint@/efi/nixos/.extra-files", exist_ok=True)
+
+    subprocess.check_call("@copyExtraFiles@")
+
+
+def main() -> None:
+    parser = argparse.ArgumentParser(description='Update @distroName@-related systemd-boot files')
+    parser.add_argument('default_config', metavar='DEFAULT-CONFIG', help='The default @distroName@ config to boot')
+    args = parser.parse_args()
+
+    try:
+        install_bootloader(args)
+    finally:
+        # Since fat32 provides little recovery facilities after a crash,
+        # it can leave the system in an unbootable state, when a crash/outage
+        # happens shortly after an update. To decrease the likelihood of this
+        # event sync the efi filesystem after each update.
+        rc = libc.syncfs(os.open("@efiSysMountPoint@", os.O_RDONLY))
+        if rc != 0:
+            print("could not sync @efiSysMountPoint@: {}".format(os.strerror(rc)), file=sys.stderr)
+
+
+if __name__ == '__main__':
+    main()
diff --git a/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix b/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
new file mode 100644
index 000000000000..9d55c21077d1
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/loader/systemd-boot/systemd-boot.nix
@@ -0,0 +1,312 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.boot.loader.systemd-boot;
+
+  efi = config.boot.loader.efi;
+
+  systemdBootBuilder = pkgs.substituteAll {
+    src = ./systemd-boot-builder.py;
+
+    isExecutable = true;
+
+    inherit (pkgs) python3;
+
+    systemd = config.systemd.package;
+
+    bootspecTools = pkgs.bootspec;
+
+    nix = config.nix.package.out;
+
+    timeout = optionalString (config.boot.loader.timeout != null) config.boot.loader.timeout;
+
+    editor = if cfg.editor then "True" else "False";
+
+    configurationLimit = if cfg.configurationLimit == null then 0 else cfg.configurationLimit;
+
+    inherit (cfg) consoleMode graceful;
+
+    inherit (efi) efiSysMountPoint canTouchEfiVariables;
+
+    inherit (config.system.nixos) distroName;
+
+    memtest86 = optionalString cfg.memtest86.enable pkgs.memtest86plus;
+
+    netbootxyz = optionalString cfg.netbootxyz.enable pkgs.netbootxyz-efi;
+
+    copyExtraFiles = pkgs.writeShellScript "copy-extra-files" ''
+      empty_file=$(${pkgs.coreutils}/bin/mktemp)
+
+      ${concatStrings (mapAttrsToList (n: v: ''
+        ${pkgs.coreutils}/bin/install -Dp "${v}" "${efi.efiSysMountPoint}/"${escapeShellArg n}
+        ${pkgs.coreutils}/bin/install -D $empty_file "${efi.efiSysMountPoint}/efi/nixos/.extra-files/"${escapeShellArg n}
+      '') cfg.extraFiles)}
+
+      ${concatStrings (mapAttrsToList (n: v: ''
+        ${pkgs.coreutils}/bin/install -Dp "${pkgs.writeText n v}" "${efi.efiSysMountPoint}/loader/entries/"${escapeShellArg n}
+        ${pkgs.coreutils}/bin/install -D $empty_file "${efi.efiSysMountPoint}/efi/nixos/.extra-files/loader/entries/"${escapeShellArg n}
+      '') cfg.extraEntries)}
+    '';
+  };
+
+  checkedSystemdBootBuilder = pkgs.runCommand "systemd-boot" {
+    nativeBuildInputs = [ pkgs.mypy ];
+  } ''
+    install -m755 ${systemdBootBuilder} $out
+    mypy \
+      --no-implicit-optional \
+      --disallow-untyped-calls \
+      --disallow-untyped-defs \
+      $out
+  '';
+
+  finalSystemdBootBuilder = pkgs.writeScript "install-systemd-boot.sh" ''
+    #!${pkgs.runtimeShell}
+    ${checkedSystemdBootBuilder} "$@"
+    ${cfg.extraInstallCommands}
+  '';
+in {
+
+  meta.maintainers = with lib.maintainers; [ julienmalka ];
+
+  imports =
+    [ (mkRenamedOptionModule [ "boot" "loader" "gummiboot" "enable" ] [ "boot" "loader" "systemd-boot" "enable" ])
+    ];
+
+  options.boot.loader.systemd-boot = {
+    enable = mkOption {
+      default = false;
+
+      type = types.bool;
+
+      description = lib.mdDoc "Whether to enable the systemd-boot (formerly gummiboot) EFI boot manager";
+    };
+
+    editor = mkOption {
+      default = true;
+
+      type = types.bool;
+
+      description = lib.mdDoc ''
+        Whether to allow editing the kernel command-line before
+        boot. It is recommended to set this to false, as it allows
+        gaining root access by passing init=/bin/sh as a kernel
+        parameter. However, it is enabled by default for backwards
+        compatibility.
+      '';
+    };
+
+    configurationLimit = mkOption {
+      default = null;
+      example = 120;
+      type = types.nullOr types.int;
+      description = lib.mdDoc ''
+        Maximum number of latest generations in the boot menu.
+        Useful to prevent boot partition running out of disk space.
+
+        `null` means no limit i.e. all generations
+        that were not garbage collected yet.
+      '';
+    };
+
+    extraInstallCommands = mkOption {
+      default = "";
+      example = ''
+        default_cfg=$(cat /boot/loader/loader.conf | grep default | awk '{print $2}')
+        init_value=$(cat /boot/loader/entries/$default_cfg | grep init= | awk '{print $2}')
+        sed -i "s|@INIT@|$init_value|g" /boot/custom/config_with_placeholder.conf
+      '';
+      type = types.lines;
+      description = lib.mdDoc ''
+        Additional shell commands inserted in the bootloader installer
+        script after generating menu entries. It can be used to expand
+        on extra boot entries that cannot incorporate certain pieces of
+        information (such as the resulting `init=` kernel parameter).
+      '';
+    };
+
+    consoleMode = mkOption {
+      default = "keep";
+
+      type = types.enum [ "0" "1" "2" "auto" "max" "keep" ];
+
+      description = lib.mdDoc ''
+        The resolution of the console. The following values are valid:
+
+        - `"0"`: Standard UEFI 80x25 mode
+        - `"1"`: 80x50 mode, not supported by all devices
+        - `"2"`: The first non-standard mode provided by the device firmware, if any
+        - `"auto"`: Pick a suitable mode automatically using heuristics
+        - `"max"`: Pick the highest-numbered available mode
+        - `"keep"`: Keep the mode selected by firmware (the default)
+      '';
+    };
+
+    memtest86 = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Make Memtest86+ available from the systemd-boot menu. Memtest86+ is a
+          program for testing memory.
+        '';
+      };
+
+      entryFilename = mkOption {
+        default = "memtest86.conf";
+        type = types.str;
+        description = lib.mdDoc ''
+          `systemd-boot` orders the menu entries by the config file names,
+          so if you want something to appear after all the NixOS entries,
+          it should start with {file}`o` or onwards.
+        '';
+      };
+    };
+
+    netbootxyz = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Make `netboot.xyz` available from the
+          `systemd-boot` menu. `netboot.xyz`
+          is a menu system that allows you to boot OS installers and
+          utilities over the network.
+        '';
+      };
+
+      entryFilename = mkOption {
+        default = "o_netbootxyz.conf";
+        type = types.str;
+        description = lib.mdDoc ''
+          `systemd-boot` orders the menu entries by the config file names,
+          so if you want something to appear after all the NixOS entries,
+          it should start with {file}`o` or onwards.
+        '';
+      };
+    };
+
+    extraEntries = mkOption {
+      type = types.attrsOf types.lines;
+      default = {};
+      example = literalExpression ''
+        { "memtest86.conf" = '''
+          title Memtest86+
+          efi /efi/memtest86/memtest.efi
+        '''; }
+      '';
+      description = lib.mdDoc ''
+        Any additional entries you want added to the `systemd-boot` menu.
+        These entries will be copied to {file}`/boot/loader/entries`.
+        Each attribute name denotes the destination file name,
+        and the corresponding attribute value is the contents of the entry.
+
+        `systemd-boot` orders the menu entries by the config file names,
+        so if you want something to appear after all the NixOS entries,
+        it should start with {file}`o` or onwards.
+      '';
+    };
+
+    extraFiles = mkOption {
+      type = types.attrsOf types.path;
+      default = {};
+      example = literalExpression ''
+        { "efi/memtest86/memtest.efi" = "''${pkgs.memtest86plus}/memtest.efi"; }
+      '';
+      description = lib.mdDoc ''
+        A set of files to be copied to {file}`/boot`.
+        Each attribute name denotes the destination file name in
+        {file}`/boot`, while the corresponding
+        attribute value specifies the source file.
+      '';
+    };
+
+    graceful = mkOption {
+      default = false;
+
+      type = types.bool;
+
+      description = lib.mdDoc ''
+        Invoke `bootctl install` with the `--graceful` option,
+        which ignores errors when EFI variables cannot be written or when the EFI System Partition
+        cannot be found. Currently only applies to random seed operations.
+
+        Only enable this option if `systemd-boot` otherwise fails to install, as the
+        scope or implication of the `--graceful` option may change in the future.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = (config.boot.kernelPackages.kernel.features or { efiBootStub = true; }) ? efiBootStub;
+        message = "This kernel does not support the EFI boot stub";
+      }
+    ] ++ concatMap (filename: [
+      {
+        assertion = !(hasInfix "/" filename);
+        message = "boot.loader.systemd-boot.extraEntries.${lib.strings.escapeNixIdentifier filename} is invalid: entries within folders are not supported";
+      }
+      {
+        assertion = hasSuffix ".conf" filename;
+        message = "boot.loader.systemd-boot.extraEntries.${lib.strings.escapeNixIdentifier filename} is invalid: entries must have a .conf file extension";
+      }
+    ]) (builtins.attrNames cfg.extraEntries)
+      ++ concatMap (filename: [
+        {
+          assertion = !(hasPrefix "/" filename);
+          message = "boot.loader.systemd-boot.extraFiles.${lib.strings.escapeNixIdentifier filename} is invalid: paths must not begin with a slash";
+        }
+        {
+          assertion = !(hasInfix ".." filename);
+          message = "boot.loader.systemd-boot.extraFiles.${lib.strings.escapeNixIdentifier filename} is invalid: paths must not reference the parent directory";
+        }
+        {
+          assertion = !(hasInfix "nixos/.extra-files" (toLower filename));
+          message = "boot.loader.systemd-boot.extraFiles.${lib.strings.escapeNixIdentifier filename} is invalid: files cannot be placed in the nixos/.extra-files directory";
+        }
+      ]) (builtins.attrNames cfg.extraFiles);
+
+    boot.loader.grub.enable = mkDefault false;
+
+    boot.loader.supportsInitrdSecrets = true;
+
+    boot.loader.systemd-boot.extraFiles = mkMerge [
+      (mkIf cfg.memtest86.enable {
+        "efi/memtest86/memtest.efi" = "${pkgs.memtest86plus.efi}";
+      })
+      (mkIf cfg.netbootxyz.enable {
+        "efi/netbootxyz/netboot.xyz.efi" = "${pkgs.netbootxyz-efi}";
+      })
+    ];
+
+    boot.loader.systemd-boot.extraEntries = mkMerge [
+      (mkIf cfg.memtest86.enable {
+        "${cfg.memtest86.entryFilename}" = ''
+          title  Memtest86+
+          efi    /efi/memtest86/memtest.efi
+        '';
+      })
+      (mkIf cfg.netbootxyz.enable {
+        "${cfg.netbootxyz.entryFilename}" = ''
+          title  netboot.xyz
+          efi    /efi/netbootxyz/netboot.xyz.efi
+        '';
+      })
+    ];
+
+    system = {
+      build.installBootLoader = finalSystemdBootBuilder;
+
+      boot.loader.id = "systemd-boot";
+
+      requiredKernelConfig = with config.lib.kernelConfig; [
+        (isYes "EFI_STUB")
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/luksroot.nix b/nixpkgs/nixos/modules/system/boot/luksroot.nix
new file mode 100644
index 000000000000..ca560d63f3bd
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/luksroot.nix
@@ -0,0 +1,1086 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+
+let
+  luks = config.boot.initrd.luks;
+  kernelPackages = config.boot.kernelPackages;
+  defaultPrio = (mkOptionDefault {}).priority;
+
+  commonFunctions = ''
+    die() {
+        echo "$@" >&2
+        exit 1
+    }
+
+    dev_exist() {
+        local target="$1"
+        if [ -e $target ]; then
+            return 0
+        else
+            local uuid=$(echo -n $target | sed -e 's,UUID=\(.*\),\1,g')
+            blkid --uuid $uuid >/dev/null
+            return $?
+        fi
+    }
+
+    wait_target() {
+        local name="$1"
+        local target="$2"
+        local secs="''${3:-10}"
+        local desc="''${4:-$name $target to appear}"
+
+        if ! dev_exist $target; then
+            echo -n "Waiting $secs seconds for $desc..."
+            local success=false;
+            for try in $(seq $secs); do
+                echo -n "."
+                sleep 1
+                if dev_exist $target; then
+                    success=true
+                    break
+                fi
+            done
+            if [ $success == true ]; then
+                echo " - success";
+                return 0
+            else
+                echo " - failure";
+                return 1
+            fi
+        fi
+        return 0
+    }
+
+    wait_yubikey() {
+        local secs="''${1:-10}"
+
+        ykinfo -v 1>/dev/null 2>&1
+        if [ $? != 0 ]; then
+            echo -n "Waiting $secs seconds for YubiKey to appear..."
+            local success=false
+            for try in $(seq $secs); do
+                echo -n .
+                sleep 1
+                ykinfo -v 1>/dev/null 2>&1
+                if [ $? == 0 ]; then
+                    success=true
+                    break
+                fi
+            done
+            if [ $success == true ]; then
+                echo " - success";
+                return 0
+            else
+                echo " - failure";
+                return 1
+            fi
+        fi
+        return 0
+    }
+
+    wait_gpgcard() {
+        local secs="''${1:-10}"
+
+        gpg --card-status > /dev/null 2> /dev/null
+        if [ $? != 0 ]; then
+            echo -n "Waiting $secs seconds for GPG Card to appear"
+            local success=false
+            for try in $(seq $secs); do
+                echo -n .
+                sleep 1
+                gpg --card-status > /dev/null 2> /dev/null
+                if [ $? == 0 ]; then
+                    success=true
+                    break
+                fi
+            done
+            if [ $success == true ]; then
+                echo " - success";
+                return 0
+            else
+                echo " - failure";
+                return 1
+            fi
+        fi
+        return 0
+    }
+  '';
+
+  preCommands = ''
+    # A place to store crypto things
+
+    # A ramfs is used here to ensure that the file used to update
+    # the key slot with cryptsetup will never get swapped out.
+    # Warning: Do NOT replace with tmpfs!
+    mkdir -p /crypt-ramfs
+    mount -t ramfs none /crypt-ramfs
+
+    # Cryptsetup locking directory
+    mkdir -p /run/cryptsetup
+
+    # For YubiKey salt storage
+    mkdir -p /crypt-storage
+
+    ${optionalString luks.gpgSupport ''
+    export GPG_TTY=$(tty)
+    export GNUPGHOME=/crypt-ramfs/.gnupg
+
+    gpg-agent --daemon --scdaemon-program $out/bin/scdaemon > /dev/null 2> /dev/null
+    ''}
+
+    # Disable all input echo for the whole stage. We could use read -s
+    # instead but that would occasionally leak characters between read
+    # invocations.
+    stty -echo
+  '';
+
+  postCommands = ''
+    stty echo
+    umount /crypt-storage 2>/dev/null
+    umount /crypt-ramfs 2>/dev/null
+  '';
+
+  openCommand = name: dev: assert name == dev.name;
+  let
+    csopen = "cryptsetup luksOpen ${dev.device} ${dev.name}"
+           + optionalString dev.allowDiscards " --allow-discards"
+           + optionalString dev.bypassWorkqueues " --perf-no_read_workqueue --perf-no_write_workqueue"
+           + optionalString (dev.header != null) " --header=${dev.header}";
+    cschange = "cryptsetup luksChangeKey ${dev.device} ${optionalString (dev.header != null) "--header=${dev.header}"}";
+    fido2luksCredentials = dev.fido2.credentials ++ optional (dev.fido2.credential != null) dev.fido2.credential;
+  in ''
+    # Wait for luksRoot (and optionally keyFile and/or header) to appear, e.g.
+    # if on a USB drive.
+    wait_target "device" ${dev.device} || die "${dev.device} is unavailable"
+
+    ${optionalString (dev.header != null) ''
+      wait_target "header" ${dev.header} || die "${dev.header} is unavailable"
+    ''}
+
+    try_empty_passphrase() {
+        ${if dev.tryEmptyPassphrase then ''
+             echo "Trying empty passphrase!"
+             echo "" | ${csopen}
+             cs_status=$?
+             if [ $cs_status -eq 0 ]; then
+                 return 0
+             else
+                 return 1
+             fi
+        '' else "return 1"}
+    }
+
+
+    do_open_passphrase() {
+        local passphrase
+
+        while true; do
+            echo -n "Passphrase for ${dev.device}: "
+            passphrase=
+            while true; do
+                if [ -e /crypt-ramfs/passphrase ]; then
+                    echo "reused"
+                    passphrase=$(cat /crypt-ramfs/passphrase)
+                    break
+                else
+                    # ask cryptsetup-askpass
+                    echo -n "${dev.device}" > /crypt-ramfs/device
+
+                    # and try reading it from /dev/console with a timeout
+                    IFS= read -t 1 -r passphrase
+                    if [ -n "$passphrase" ]; then
+                       ${if luks.reusePassphrases then ''
+                         # remember it for the next device
+                         echo -n "$passphrase" > /crypt-ramfs/passphrase
+                       '' else ''
+                         # Don't save it to ramfs. We are very paranoid
+                       ''}
+                       echo
+                       break
+                    fi
+                fi
+            done
+            echo -n "Verifying passphrase for ${dev.device}..."
+            echo -n "$passphrase" | ${csopen} --key-file=-
+            if [ $? == 0 ]; then
+                echo " - success"
+                ${if luks.reusePassphrases then ''
+                  # we don't rm here because we might reuse it for the next device
+                '' else ''
+                  rm -f /crypt-ramfs/passphrase
+                ''}
+                break
+            else
+                echo " - failure"
+                # ask for a different one
+                rm -f /crypt-ramfs/passphrase
+            fi
+        done
+    }
+
+    # LUKS
+    open_normally() {
+        ${if (dev.keyFile != null) then ''
+        if wait_target "key file" ${dev.keyFile}; then
+            ${csopen} --key-file=${dev.keyFile} \
+              ${optionalString (dev.keyFileSize != null) "--keyfile-size=${toString dev.keyFileSize}"} \
+              ${optionalString (dev.keyFileOffset != null) "--keyfile-offset=${toString dev.keyFileOffset}"}
+            cs_status=$?
+            if [ $cs_status -ne 0 ]; then
+              echo "Key File ${dev.keyFile} failed!"
+              if ! try_empty_passphrase; then
+                ${if dev.fallbackToPassword then "echo" else "die"} "${dev.keyFile} is unavailable"
+                echo " - failing back to interactive password prompt"
+                do_open_passphrase
+              fi
+            fi
+        else
+            # If the key file never shows up we should also try the empty passphrase
+            if ! try_empty_passphrase; then
+               ${if dev.fallbackToPassword then "echo" else "die"} "${dev.keyFile} is unavailable"
+               echo " - failing back to interactive password prompt"
+               do_open_passphrase
+            fi
+        fi
+        '' else ''
+           if ! try_empty_passphrase; then
+              do_open_passphrase
+           fi
+        ''}
+    }
+
+    ${optionalString (luks.yubikeySupport && (dev.yubikey != null)) ''
+    # YubiKey
+    rbtohex() {
+        ( od -An -vtx1 | tr -d ' \n' )
+    }
+
+    hextorb() {
+        ( tr '[:lower:]' '[:upper:]' | sed -e 's/\([0-9A-F]\{2\}\)/\\\\\\x\1/gI' | xargs printf )
+    }
+
+    do_open_yubikey() {
+        # Make all of these local to this function
+        # to prevent their values being leaked
+        local salt
+        local iterations
+        local k_user
+        local challenge
+        local response
+        local k_luks
+        local opened
+        local new_salt
+        local new_iterations
+        local new_challenge
+        local new_response
+        local new_k_luks
+
+        mount -t ${dev.yubikey.storage.fsType} ${dev.yubikey.storage.device} /crypt-storage || \
+          die "Failed to mount YubiKey salt storage device"
+
+        salt="$(cat /crypt-storage${dev.yubikey.storage.path} | sed -n 1p | tr -d '\n')"
+        iterations="$(cat /crypt-storage${dev.yubikey.storage.path} | sed -n 2p | tr -d '\n')"
+        challenge="$(echo -n $salt | openssl-wrap dgst -binary -sha512 | rbtohex)"
+        response="$(ykchalresp -${toString dev.yubikey.slot} -x $challenge 2>/dev/null)"
+
+        for try in $(seq 3); do
+            ${optionalString dev.yubikey.twoFactor ''
+            echo -n "Enter two-factor passphrase: "
+            k_user=
+            while true; do
+                if [ -e /crypt-ramfs/passphrase ]; then
+                    echo "reused"
+                    k_user=$(cat /crypt-ramfs/passphrase)
+                    break
+                else
+                    # Try reading it from /dev/console with a timeout
+                    IFS= read -t 1 -r k_user
+                    if [ -n "$k_user" ]; then
+                       ${if luks.reusePassphrases then ''
+                         # Remember it for the next device
+                         echo -n "$k_user" > /crypt-ramfs/passphrase
+                       '' else ''
+                         # Don't save it to ramfs. We are very paranoid
+                       ''}
+                       echo
+                       break
+                    fi
+                fi
+            done
+            ''}
+
+            if [ ! -z "$k_user" ]; then
+                k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $iterations $response | rbtohex)"
+            else
+                k_luks="$(echo | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $iterations $response | rbtohex)"
+            fi
+
+            echo -n "$k_luks" | hextorb | ${csopen} --key-file=-
+
+            if [ $? == 0 ]; then
+                opened=true
+                ${if luks.reusePassphrases then ''
+                  # We don't rm here because we might reuse it for the next device
+                '' else ''
+                  rm -f /crypt-ramfs/passphrase
+                ''}
+                break
+            else
+                opened=false
+                echo "Authentication failed!"
+            fi
+        done
+
+        [ "$opened" == false ] && die "Maximum authentication errors reached"
+
+        echo -n "Gathering entropy for new salt (please enter random keys to generate entropy if this blocks for long)..."
+        for i in $(seq ${toString dev.yubikey.saltLength}); do
+            byte="$(dd if=/dev/random bs=1 count=1 2>/dev/null | rbtohex)";
+            new_salt="$new_salt$byte";
+            echo -n .
+        done;
+        echo "ok"
+
+        new_iterations="$iterations"
+        ${optionalString (dev.yubikey.iterationStep > 0) ''
+        new_iterations="$(($new_iterations + ${toString dev.yubikey.iterationStep}))"
+        ''}
+
+        new_challenge="$(echo -n $new_salt | openssl-wrap dgst -binary -sha512 | rbtohex)"
+
+        new_response="$(ykchalresp -${toString dev.yubikey.slot} -x $new_challenge 2>/dev/null)"
+
+        if [ -z "$new_response" ]; then
+            echo "Warning: Unable to generate new challenge response, current challenge persists!"
+            umount /crypt-storage
+            return
+        fi
+
+        if [ ! -z "$k_user" ]; then
+            new_k_luks="$(echo -n $k_user | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $new_iterations $new_response | rbtohex)"
+        else
+            new_k_luks="$(echo | pbkdf2-sha512 ${toString dev.yubikey.keyLength} $new_iterations $new_response | rbtohex)"
+        fi
+
+        echo -n "$new_k_luks" | hextorb > /crypt-ramfs/new_key
+        echo -n "$k_luks" | hextorb | ${cschange} --key-file=- /crypt-ramfs/new_key
+
+        if [ $? == 0 ]; then
+            echo -ne "$new_salt\n$new_iterations" > /crypt-storage${dev.yubikey.storage.path}
+            sync /crypt-storage${dev.yubikey.storage.path}
+        else
+            echo "Warning: Could not update LUKS key, current challenge persists!"
+        fi
+
+        rm -f /crypt-ramfs/new_key
+        umount /crypt-storage
+    }
+
+    open_with_hardware() {
+        if wait_yubikey ${toString dev.yubikey.gracePeriod}; then
+            do_open_yubikey
+        else
+            echo "No YubiKey found, falling back to non-YubiKey open procedure"
+            open_normally
+        fi
+    }
+    ''}
+
+    ${optionalString (luks.gpgSupport && (dev.gpgCard != null)) ''
+
+    do_open_gpg_card() {
+        # Make all of these local to this function
+        # to prevent their values being leaked
+        local pin
+        local opened
+
+        gpg --import /gpg-keys/${dev.device}/pubkey.asc > /dev/null 2> /dev/null
+
+        gpg --card-status > /dev/null 2> /dev/null
+
+        for try in $(seq 3); do
+            echo -n "PIN for GPG Card associated with device ${dev.device}: "
+            pin=
+            while true; do
+                if [ -e /crypt-ramfs/passphrase ]; then
+                    echo "reused"
+                    pin=$(cat /crypt-ramfs/passphrase)
+                    break
+                else
+                    # and try reading it from /dev/console with a timeout
+                    IFS= read -t 1 -r pin
+                    if [ -n "$pin" ]; then
+                       ${if luks.reusePassphrases then ''
+                         # remember it for the next device
+                         echo -n "$pin" > /crypt-ramfs/passphrase
+                       '' else ''
+                         # Don't save it to ramfs. We are very paranoid
+                       ''}
+                       echo
+                       break
+                    fi
+                fi
+            done
+            echo -n "Verifying passphrase for ${dev.device}..."
+            echo -n "$pin" | gpg -q --batch --passphrase-fd 0 --pinentry-mode loopback -d /gpg-keys/${dev.device}/cryptkey.gpg 2> /dev/null | ${csopen} --key-file=- > /dev/null 2> /dev/null
+            if [ $? == 0 ]; then
+                echo " - success"
+                ${if luks.reusePassphrases then ''
+                  # we don't rm here because we might reuse it for the next device
+                '' else ''
+                  rm -f /crypt-ramfs/passphrase
+                ''}
+                break
+            else
+                echo " - failure"
+                # ask for a different one
+                rm -f /crypt-ramfs/passphrase
+            fi
+        done
+
+        [ "$opened" == false ] && die "Maximum authentication errors reached"
+    }
+
+    open_with_hardware() {
+        if wait_gpgcard ${toString dev.gpgCard.gracePeriod}; then
+            do_open_gpg_card
+        else
+            echo "No GPG Card found, falling back to normal open procedure"
+            open_normally
+        fi
+    }
+    ''}
+
+    ${optionalString (luks.fido2Support && fido2luksCredentials != []) ''
+
+    open_with_hardware() {
+      local passsphrase
+
+        ${if dev.fido2.passwordLess then ''
+          export passphrase=""
+        '' else ''
+          read -rsp "FIDO2 salt for ${dev.device}: " passphrase
+          echo
+        ''}
+        ${optionalString (lib.versionOlder kernelPackages.kernel.version "5.4") ''
+          echo "On systems with Linux Kernel < 5.4, it might take a while to initialize the CRNG, you might want to use linuxPackages_latest."
+          echo "Please move your mouse to create needed randomness."
+        ''}
+          echo "Waiting for your FIDO2 device..."
+          fido2luks open${optionalString dev.allowDiscards " --allow-discards"} ${dev.device} ${dev.name} "${builtins.concatStringsSep "," fido2luksCredentials}" --await-dev ${toString dev.fido2.gracePeriod} --salt string:$passphrase
+        if [ $? -ne 0 ]; then
+          echo "No FIDO2 key found, falling back to normal open procedure"
+          open_normally
+        fi
+    }
+    ''}
+
+    # commands to run right before we mount our device
+    ${dev.preOpenCommands}
+
+    ${if (luks.yubikeySupport && (dev.yubikey != null)) || (luks.gpgSupport && (dev.gpgCard != null)) || (luks.fido2Support && fido2luksCredentials != []) then ''
+    open_with_hardware
+    '' else ''
+    open_normally
+    ''}
+
+    # commands to run right after we mounted our device
+    ${dev.postOpenCommands}
+  '';
+
+  askPass = pkgs.writeScriptBin "cryptsetup-askpass" ''
+    #!/bin/sh
+
+    ${commonFunctions}
+
+    while true; do
+        wait_target "luks" /crypt-ramfs/device 10 "LUKS to request a passphrase" || die "Passphrase is not requested now"
+        device=$(cat /crypt-ramfs/device)
+
+        echo -n "Passphrase for $device: "
+        IFS= read -rs passphrase
+        echo
+
+        rm /crypt-ramfs/device
+        echo -n "$passphrase" > /crypt-ramfs/passphrase
+    done
+  '';
+
+  preLVM = filterAttrs (n: v: v.preLVM) luks.devices;
+  postLVM = filterAttrs (n: v: !v.preLVM) luks.devices;
+
+
+  stage1Crypttab = pkgs.writeText "initrd-crypttab" (lib.concatStringsSep "\n" (lib.mapAttrsToList (n: v: let
+    opts = v.crypttabExtraOpts
+      ++ optional v.allowDiscards "discard"
+      ++ optionals v.bypassWorkqueues [ "no-read-workqueue" "no-write-workqueue" ]
+      ++ optional (v.header != null) "header=${v.header}"
+      ++ optional (v.keyFileOffset != null) "keyfile-offset=${toString v.keyFileOffset}"
+      ++ optional (v.keyFileSize != null) "keyfile-size=${toString v.keyFileSize}"
+      ++ optional (v.keyFileTimeout != null) "keyfile-timeout=${builtins.toString v.keyFileTimeout}s"
+      ++ optional (v.tryEmptyPassphrase) "try-empty-password=true"
+    ;
+  in "${n} ${v.device} ${if v.keyFile == null then "-" else v.keyFile} ${lib.concatStringsSep "," opts}") luks.devices));
+
+in
+{
+  imports = [
+    (mkRemovedOptionModule [ "boot" "initrd" "luks" "enable" ] "")
+  ];
+
+  options = {
+
+    boot.initrd.luks.mitigateDMAAttacks = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Unless enabled, encryption keys can be easily recovered by an attacker with physical
+        access to any machine with PCMCIA, ExpressCard, ThunderBolt or FireWire port.
+        More information is available at <https://en.wikipedia.org/wiki/DMA_attack>.
+
+        This option blacklists FireWire drivers, but doesn't remove them. You can manually
+        load the drivers if you need to use a FireWire device, but don't forget to unload them!
+      '';
+    };
+
+    boot.initrd.luks.cryptoModules = mkOption {
+      type = types.listOf types.str;
+      default =
+        [ "aes" "aes_generic" "blowfish" "twofish"
+          "serpent" "cbc" "xts" "lrw" "sha1" "sha256" "sha512"
+          "af_alg" "algif_skcipher"
+        ];
+      description = lib.mdDoc ''
+        A list of cryptographic kernel modules needed to decrypt the root device(s).
+        The default includes all common modules.
+      '';
+    };
+
+    boot.initrd.luks.forceLuksSupportInInitrd = mkOption {
+      type = types.bool;
+      default = false;
+      internal = true;
+      description = lib.mdDoc ''
+        Whether to configure luks support in the initrd, when no luks
+        devices are configured.
+      '';
+    };
+
+    boot.initrd.luks.reusePassphrases = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        When opening a new LUKS device try reusing last successful
+        passphrase.
+
+        Useful for mounting a number of devices that use the same
+        passphrase without retyping it several times.
+
+        Such setup can be useful if you use {command}`cryptsetup luksSuspend`.
+        Different LUKS devices will still have
+        different master keys even when using the same passphrase.
+      '';
+    };
+
+    boot.initrd.luks.devices = mkOption {
+      default = { };
+      example = { luksroot.device = "/dev/disk/by-uuid/430e9eff-d852-4f68-aa3b-2fa3599ebe08"; };
+      description = lib.mdDoc ''
+        The encrypted disk that should be opened before the root
+        filesystem is mounted. Both LVM-over-LUKS and LUKS-over-LVM
+        setups are supported. The unencrypted devices can be accessed as
+        {file}`/dev/mapper/«name»`.
+      '';
+
+      type = with types; attrsOf (submodule (
+        { name, ... }: { options = {
+
+          name = mkOption {
+            visible = false;
+            default = name;
+            example = "luksroot";
+            type = types.str;
+            description = lib.mdDoc "Name of the unencrypted device in {file}`/dev/mapper`.";
+          };
+
+          device = mkOption {
+            example = "/dev/disk/by-uuid/430e9eff-d852-4f68-aa3b-2fa3599ebe08";
+            type = types.str;
+            description = lib.mdDoc "Path of the underlying encrypted block device.";
+          };
+
+          header = mkOption {
+            default = null;
+            example = "/root/header.img";
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              The name of the file or block device that
+              should be used as header for the encrypted device.
+            '';
+          };
+
+          keyFile = mkOption {
+            default = null;
+            example = "/dev/sdb1";
+            type = types.nullOr types.str;
+            description = lib.mdDoc ''
+              The name of the file (can be a raw device or a partition) that
+              should be used as the decryption key for the encrypted device. If
+              not specified, you will be prompted for a passphrase instead.
+            '';
+          };
+
+          tryEmptyPassphrase = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc ''
+              If keyFile fails then try an empty passphrase first before
+              prompting for password.
+            '';
+          };
+
+          keyFileTimeout = mkOption {
+            default = null;
+            example = 5;
+            type = types.nullOr types.int;
+            description = lib.mdDoc ''
+              The amount of time in seconds for a keyFile to appear before
+              timing out and trying passwords.
+            '';
+          };
+
+          keyFileSize = mkOption {
+            default = null;
+            example = 4096;
+            type = types.nullOr types.int;
+            description = lib.mdDoc ''
+              The size of the key file. Use this if only the beginning of the
+              key file should be used as a key (often the case if a raw device
+              or partition is used as key file). If not specified, the whole
+              `keyFile` will be used decryption, instead of just
+              the first `keyFileSize` bytes.
+            '';
+          };
+
+          keyFileOffset = mkOption {
+            default = null;
+            example = 4096;
+            type = types.nullOr types.int;
+            description = lib.mdDoc ''
+              The offset of the key file. Use this in combination with
+              `keyFileSize` to use part of a file as key file
+              (often the case if a raw device or partition is used as a key file).
+              If not specified, the key begins at the first byte of
+              `keyFile`.
+            '';
+          };
+
+          # FIXME: get rid of this option.
+          preLVM = mkOption {
+            default = true;
+            type = types.bool;
+            description = lib.mdDoc "Whether the luksOpen will be attempted before LVM scan or after it.";
+          };
+
+          allowDiscards = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc ''
+              Whether to allow TRIM requests to the underlying device. This option
+              has security implications; please read the LUKS documentation before
+              activating it.
+              This option is incompatible with authenticated encryption (dm-crypt
+              stacked over dm-integrity).
+            '';
+          };
+
+          bypassWorkqueues = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc ''
+              Whether to bypass dm-crypt's internal read and write workqueues.
+              Enabling this should improve performance on SSDs; see
+              [here](https://wiki.archlinux.org/index.php/Dm-crypt/Specialties#Disable_workqueue_for_increased_solid_state_drive_(SSD)_performance)
+              for more information. Needs Linux 5.9 or later.
+            '';
+          };
+
+          fallbackToPassword = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc ''
+              Whether to fallback to interactive passphrase prompt if the keyfile
+              cannot be found. This will prevent unattended boot should the keyfile
+              go missing.
+            '';
+          };
+
+          gpgCard = mkOption {
+            default = null;
+            description = lib.mdDoc ''
+              The option to use this LUKS device with a GPG encrypted luks password by the GPG Smartcard.
+              If null (the default), GPG-Smartcard will be disabled for this device.
+            '';
+
+            type = with types; nullOr (submodule {
+              options = {
+                gracePeriod = mkOption {
+                  default = 10;
+                  type = types.int;
+                  description = lib.mdDoc "Time in seconds to wait for the GPG Smartcard.";
+                };
+
+                encryptedPass = mkOption {
+                  type = types.path;
+                  description = lib.mdDoc "Path to the GPG encrypted passphrase.";
+                };
+
+                publicKey = mkOption {
+                  type = types.path;
+                  description = lib.mdDoc "Path to the Public Key.";
+                };
+              };
+            });
+          };
+
+          fido2 = {
+            credential = mkOption {
+              default = null;
+              example = "f1d00200d8dc783f7fb1e10ace8da27f8312d72692abfca2f7e4960a73f48e82e1f7571f6ebfcee9fb434f9886ccc8fcc52a6614d8d2";
+              type = types.nullOr types.str;
+              description = lib.mdDoc "The FIDO2 credential ID.";
+            };
+
+            credentials = mkOption {
+              default = [];
+              example = [ "f1d00200d8dc783f7fb1e10ace8da27f8312d72692abfca2f7e4960a73f48e82e1f7571f6ebfcee9fb434f9886ccc8fcc52a6614d8d2" ];
+              type = types.listOf types.str;
+              description = lib.mdDoc ''
+                List of FIDO2 credential IDs.
+
+                Use this if you have multiple FIDO2 keys you want to use for the same luks device.
+              '';
+            };
+
+            gracePeriod = mkOption {
+              default = 10;
+              type = types.int;
+              description = lib.mdDoc "Time in seconds to wait for the FIDO2 key.";
+            };
+
+            passwordLess = mkOption {
+              default = false;
+              type = types.bool;
+              description = lib.mdDoc ''
+                Defines whatever to use an empty string as a default salt.
+
+                Enable only when your device is PIN protected, such as [Trezor](https://trezor.io/).
+              '';
+            };
+          };
+
+          yubikey = mkOption {
+            default = null;
+            description = lib.mdDoc ''
+              The options to use for this LUKS device in YubiKey-PBA.
+              If null (the default), YubiKey-PBA will be disabled for this device.
+            '';
+
+            type = with types; nullOr (submodule {
+              options = {
+                twoFactor = mkOption {
+                  default = true;
+                  type = types.bool;
+                  description = lib.mdDoc "Whether to use a passphrase and a YubiKey (true), or only a YubiKey (false).";
+                };
+
+                slot = mkOption {
+                  default = 2;
+                  type = types.int;
+                  description = lib.mdDoc "Which slot on the YubiKey to challenge.";
+                };
+
+                saltLength = mkOption {
+                  default = 16;
+                  type = types.int;
+                  description = lib.mdDoc "Length of the new salt in byte (64 is the effective maximum).";
+                };
+
+                keyLength = mkOption {
+                  default = 64;
+                  type = types.int;
+                  description = lib.mdDoc "Length of the LUKS slot key derived with PBKDF2 in byte.";
+                };
+
+                iterationStep = mkOption {
+                  default = 0;
+                  type = types.int;
+                  description = lib.mdDoc "How much the iteration count for PBKDF2 is increased at each successful authentication.";
+                };
+
+                gracePeriod = mkOption {
+                  default = 10;
+                  type = types.int;
+                  description = lib.mdDoc "Time in seconds to wait for the YubiKey.";
+                };
+
+                /* TODO: Add to the documentation of the current module:
+
+                   Options related to the storing the salt.
+                */
+                storage = {
+                  device = mkOption {
+                    default = "/dev/sda1";
+                    type = types.path;
+                    description = lib.mdDoc ''
+                      An unencrypted device that will temporarily be mounted in stage-1.
+                      Must contain the current salt to create the challenge for this LUKS device.
+                    '';
+                  };
+
+                  fsType = mkOption {
+                    default = "vfat";
+                    type = types.str;
+                    description = lib.mdDoc "The filesystem of the unencrypted device.";
+                  };
+
+                  path = mkOption {
+                    default = "/crypt-storage/default";
+                    type = types.str;
+                    description = lib.mdDoc ''
+                      Absolute path of the salt on the unencrypted device with
+                      that device's root directory as "/".
+                    '';
+                  };
+                };
+              };
+            });
+          };
+
+          preOpenCommands = mkOption {
+            type = types.lines;
+            default = "";
+            example = ''
+              mkdir -p /tmp/persistent
+              mount -t zfs rpool/safe/persistent /tmp/persistent
+            '';
+            description = lib.mdDoc ''
+              Commands that should be run right before we try to mount our LUKS device.
+              This can be useful, if the keys needed to open the drive is on another partition.
+            '';
+          };
+
+          postOpenCommands = mkOption {
+            type = types.lines;
+            default = "";
+            example = ''
+              umount /tmp/persistent
+            '';
+            description = lib.mdDoc ''
+              Commands that should be run right after we have mounted our LUKS device.
+            '';
+          };
+
+          crypttabExtraOpts = mkOption {
+            type = with types; listOf singleLineStr;
+            default = [];
+            example = [ "_netdev" ];
+            visible = false;
+            description = lib.mdDoc ''
+              Only used with systemd stage 1.
+
+              Extra options to append to the last column of the generated crypttab file.
+            '';
+          };
+        };
+      }));
+    };
+
+    boot.initrd.luks.gpgSupport = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enables support for authenticating with a GPG encrypted password.
+      '';
+    };
+
+    boot.initrd.luks.yubikeySupport = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+            Enables support for authenticating with a YubiKey on LUKS devices.
+            See the NixOS wiki for information on how to properly setup a LUKS device
+            and a YubiKey to work with this feature.
+          '';
+    };
+
+    boot.initrd.luks.fido2Support = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enables support for authenticating with FIDO2 devices.
+      '';
+    };
+
+  };
+
+  config = mkIf (luks.devices != {} || luks.forceLuksSupportInInitrd) {
+
+    assertions =
+      [ { assertion = !(luks.gpgSupport && luks.yubikeySupport);
+          message = "YubiKey and GPG Card may not be used at the same time.";
+        }
+
+        { assertion = !(luks.gpgSupport && luks.fido2Support);
+          message = "FIDO2 and GPG Card may not be used at the same time.";
+        }
+
+        { assertion = !(luks.fido2Support && luks.yubikeySupport);
+          message = "FIDO2 and YubiKey may not be used at the same time.";
+        }
+
+        { assertion = any (dev: dev.bypassWorkqueues) (attrValues luks.devices)
+                      -> versionAtLeast kernelPackages.kernel.version "5.9";
+          message = "boot.initrd.luks.devices.<name>.bypassWorkqueues is not supported for kernels older than 5.9";
+        }
+
+        { assertion = !config.boot.initrd.systemd.enable -> all (x: x.keyFileTimeout == null) (attrValues luks.devices);
+          message = "boot.initrd.luks.devices.<name>.keyFileTimeout is only supported for systemd initrd";
+        }
+
+        { assertion = config.boot.initrd.systemd.enable -> all (dev: !dev.fallbackToPassword) (attrValues luks.devices);
+          message = "boot.initrd.luks.devices.<name>.fallbackToPassword is implied by systemd stage 1.";
+        }
+        { assertion = config.boot.initrd.systemd.enable -> all (dev: dev.preLVM) (attrValues luks.devices);
+          message = "boot.initrd.luks.devices.<name>.preLVM is not used by systemd stage 1.";
+        }
+        { assertion = config.boot.initrd.systemd.enable -> options.boot.initrd.luks.reusePassphrases.highestPrio == defaultPrio;
+          message = "boot.initrd.luks.reusePassphrases has no effect with systemd stage 1.";
+        }
+        { assertion = config.boot.initrd.systemd.enable -> all (dev: dev.preOpenCommands == "" && dev.postOpenCommands == "") (attrValues luks.devices);
+          message = "boot.initrd.luks.devices.<name>.preOpenCommands and postOpenCommands is not supported by systemd stage 1. Please bind a service to cryptsetup.target or cryptsetup-pre.target instead.";
+        }
+        # TODO
+        { assertion = config.boot.initrd.systemd.enable -> !luks.gpgSupport;
+          message = "systemd stage 1 does not support GPG smartcards yet.";
+        }
+        { assertion = config.boot.initrd.systemd.enable -> !luks.fido2Support;
+          message = ''
+            systemd stage 1 does not support configuring FIDO2 unlocking through `boot.initrd.luks.devices.<name>.fido2`.
+            Use systemd-cryptenroll(1) to configure FIDO2 support.
+          '';
+        }
+        # TODO
+        { assertion = config.boot.initrd.systemd.enable -> !luks.yubikeySupport;
+          message = "systemd stage 1 does not support Yubikeys yet.";
+        }
+      ];
+
+    # actually, sbp2 driver is the one enabling the DMA attack, but this needs to be tested
+    boot.blacklistedKernelModules = optionals luks.mitigateDMAAttacks
+      ["firewire_ohci" "firewire_core" "firewire_sbp2"];
+
+    # Some modules that may be needed for mounting anything ciphered
+    boot.initrd.availableKernelModules = [ "dm_mod" "dm_crypt" "cryptd" "input_leds" ]
+      ++ luks.cryptoModules
+      # workaround until https://marc.info/?l=linux-crypto-vger&m=148783562211457&w=4 is merged
+      # remove once 'modprobe --show-depends xts' shows ecb as a dependency
+      ++ (optional (builtins.elem "xts" luks.cryptoModules) "ecb");
+
+    # copy the cryptsetup binary and it's dependencies
+    boot.initrd.extraUtilsCommands = let
+      pbkdf2-sha512 = pkgs.runCommandCC "pbkdf2-sha512" { buildInputs = [ pkgs.openssl ]; } ''
+        mkdir -p "$out/bin"
+        cc -O3 -lcrypto ${./pbkdf2-sha512.c} -o "$out/bin/pbkdf2-sha512"
+        strip -s "$out/bin/pbkdf2-sha512"
+      '';
+    in
+    mkIf (!config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${pkgs.cryptsetup}/bin/cryptsetup
+      copy_bin_and_libs ${askPass}/bin/cryptsetup-askpass
+      sed -i s,/bin/sh,$out/bin/sh, $out/bin/cryptsetup-askpass
+
+      ${optionalString luks.yubikeySupport ''
+        copy_bin_and_libs ${pkgs.yubikey-personalization}/bin/ykchalresp
+        copy_bin_and_libs ${pkgs.yubikey-personalization}/bin/ykinfo
+        copy_bin_and_libs ${pkgs.openssl.bin}/bin/openssl
+
+        copy_bin_and_libs ${pbkdf2-sha512}/bin/pbkdf2-sha512
+
+        mkdir -p $out/etc/ssl
+        cp -pdv ${pkgs.openssl.out}/etc/ssl/openssl.cnf $out/etc/ssl
+
+        cat > $out/bin/openssl-wrap <<EOF
+        #!$out/bin/sh
+        export OPENSSL_CONF=$out/etc/ssl/openssl.cnf
+        $out/bin/openssl "\$@"
+        EOF
+        chmod +x $out/bin/openssl-wrap
+      ''}
+
+      ${optionalString luks.fido2Support ''
+        copy_bin_and_libs ${pkgs.fido2luks}/bin/fido2luks
+      ''}
+
+
+      ${optionalString luks.gpgSupport ''
+        copy_bin_and_libs ${pkgs.gnupg}/bin/gpg
+        copy_bin_and_libs ${pkgs.gnupg}/bin/gpg-agent
+        copy_bin_and_libs ${pkgs.gnupg}/libexec/scdaemon
+
+        ${concatMapStringsSep "\n" (x:
+          optionalString (x.gpgCard != null)
+            ''
+              mkdir -p $out/secrets/gpg-keys/${x.device}
+              cp -a ${x.gpgCard.encryptedPass} $out/secrets/gpg-keys/${x.device}/cryptkey.gpg
+              cp -a ${x.gpgCard.publicKey} $out/secrets/gpg-keys/${x.device}/pubkey.asc
+            ''
+          ) (attrValues luks.devices)
+        }
+      ''}
+    '';
+
+    boot.initrd.extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable) ''
+      $out/bin/cryptsetup --version
+      ${optionalString luks.yubikeySupport ''
+        $out/bin/ykchalresp -V
+        $out/bin/ykinfo -V
+        $out/bin/openssl-wrap version
+      ''}
+      ${optionalString luks.gpgSupport ''
+        $out/bin/gpg --version
+        $out/bin/gpg-agent --version
+        $out/bin/scdaemon --version
+      ''}
+      ${optionalString luks.fido2Support ''
+        $out/bin/fido2luks --version
+      ''}
+    '';
+
+    boot.initrd.systemd = {
+      contents."/etc/crypttab".source = stage1Crypttab;
+
+      extraBin.systemd-cryptsetup = "${config.boot.initrd.systemd.package}/lib/systemd/systemd-cryptsetup";
+
+      additionalUpstreamUnits = [
+        "cryptsetup-pre.target"
+        "cryptsetup.target"
+        "remote-cryptsetup.target"
+      ];
+      storePaths = [
+        "${config.boot.initrd.systemd.package}/lib/systemd/systemd-cryptsetup"
+        "${config.boot.initrd.systemd.package}/lib/systemd/system-generators/systemd-cryptsetup-generator"
+      ];
+
+    };
+    # We do this because we need the udev rules from the package
+    boot.initrd.services.lvm.enable = true;
+
+    boot.initrd.preFailCommands = mkIf (!config.boot.initrd.systemd.enable) postCommands;
+    boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + preCommands + concatStrings (mapAttrsToList openCommand preLVM) + postCommands);
+    boot.initrd.postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + preCommands + concatStrings (mapAttrsToList openCommand postLVM) + postCommands);
+
+    environment.systemPackages = [ pkgs.cryptsetup ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/modprobe.nix b/nixpkgs/nixos/modules/system/boot/modprobe.nix
new file mode 100644
index 000000000000..d751c4462d3f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/modprobe.nix
@@ -0,0 +1,72 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  ###### interface
+
+  options = {
+    boot.modprobeConfig.enable = mkEnableOption (lib.mdDoc "modprobe config. This is useful for systems like containers which do not require a kernel") // {
+      default = true;
+    };
+
+    boot.blacklistedKernelModules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "cirrusfb" "i2c_piix4" ];
+      description = lib.mdDoc ''
+        List of names of kernel modules that should not be loaded
+        automatically by the hardware probing code.
+      '';
+    };
+
+    boot.extraModprobeConfig = mkOption {
+      default = "";
+      example =
+        ''
+          options parport_pc io=0x378 irq=7 dma=1
+        '';
+      description = lib.mdDoc ''
+        Any additional configuration to be appended to the generated
+        {file}`modprobe.conf`.  This is typically used to
+        specify module options.  See
+        {manpage}`modprobe.d(5)` for details.
+      '';
+      type = types.lines;
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf config.boot.modprobeConfig.enable {
+
+    environment.etc."modprobe.d/ubuntu.conf".source = "${pkgs.kmod-blacklist-ubuntu}/modprobe.conf";
+
+    environment.etc."modprobe.d/nixos.conf".text =
+      ''
+        ${flip concatMapStrings config.boot.blacklistedKernelModules (name: ''
+          blacklist ${name}
+        '')}
+        ${config.boot.extraModprobeConfig}
+      '';
+    environment.etc."modprobe.d/debian.conf".source = pkgs.kmod-debian-aliases;
+
+    environment.etc."modprobe.d/systemd.conf".source = "${config.systemd.package}/lib/modprobe.d/systemd.conf";
+
+    environment.systemPackages = [ pkgs.kmod ];
+
+    system.activationScripts.modprobe = stringAfter ["specialfs"]
+      ''
+        # Allow the kernel to find our wrapped modprobe (which searches
+        # in the right location in the Nix store for kernel modules).
+        # We need this when the kernel (or some module) auto-loads a
+        # module.
+        echo ${pkgs.kmod}/bin/modprobe > /proc/sys/kernel/modprobe
+      '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/networkd.nix b/nixpkgs/nixos/modules/system/boot/networkd.nix
new file mode 100644
index 000000000000..33261021480f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/networkd.nix
@@ -0,0 +1,3036 @@
+{ config, lib, pkgs, utils, ... }:
+
+with utils.systemdUtils.unitOptions;
+with utils.systemdUtils.lib;
+with utils.systemdUtils.network.units;
+with lib;
+
+let
+
+  check = {
+
+    global = {
+      sectionNetwork = checkUnitConfig "Network" [
+        (assertOnlyFields [
+          "SpeedMeter"
+          "SpeedMeterIntervalSec"
+          "ManageForeignRoutingPolicyRules"
+          "ManageForeignRoutes"
+          "RouteTable"
+        ])
+        (assertValueOneOf "SpeedMeter" boolValues)
+        (assertInt "SpeedMeterIntervalSec")
+        (assertValueOneOf "ManageForeignRoutingPolicyRules" boolValues)
+        (assertValueOneOf "ManageForeignRoutes" boolValues)
+      ];
+
+      sectionDHCPv4 = checkUnitConfig "DHCPv4" [
+        (assertOnlyFields [
+          "ClientIdentifier"
+          "DUIDType"
+          "DUIDRawData"
+        ])
+        (assertValueOneOf "ClientIdentifier" ["mac" "duid" "duid-only"])
+      ];
+
+      sectionDHCPv6 = checkUnitConfig "DHCPv6" [
+        (assertOnlyFields [
+          "DUIDType"
+          "DUIDRawData"
+        ])
+      ];
+    };
+
+    link = {
+
+      sectionLink = checkUnitConfig "Link" [
+        (assertOnlyFields [
+          "Description"
+          "Alias"
+          "MACAddressPolicy"
+          "MACAddress"
+          "NamePolicy"
+          "Name"
+          "AlternativeNamesPolicy"
+          "AlternativeName"
+          "MTUBytes"
+          "BitsPerSecond"
+          "Duplex"
+          "AutoNegotiation"
+          "WakeOnLan"
+          "Port"
+          "Advertise"
+          "ReceiveChecksumOffload"
+          "TransmitChecksumOffload"
+          "TCPSegmentationOffload"
+          "TCP6SegmentationOffload"
+          "GenericSegmentationOffload"
+          "GenericReceiveOffload"
+          "LargeReceiveOffload"
+          "RxChannels"
+          "TxChannels"
+          "OtherChannels"
+          "CombinedChannels"
+          "RxBufferSize"
+          "TxBufferSize"
+          "ReceiveQueues"
+          "TransmitQueues"
+          "TransmitQueueLength"
+        ])
+        (assertValueOneOf "MACAddressPolicy" ["persistent" "random" "none"])
+        (assertMacAddress "MACAddress")
+        (assertByteFormat "MTUBytes")
+        (assertByteFormat "BitsPerSecond")
+        (assertValueOneOf "Duplex" ["half" "full"])
+        (assertValueOneOf "AutoNegotiation" boolValues)
+        (assertValuesSomeOfOr "WakeOnLan" ["phy" "unicast" "multicast" "broadcast" "arp" "magic" "secureon"] "off")
+        (assertValueOneOf "Port" ["tp" "aui" "bnc" "mii" "fibre"])
+        (assertValueOneOf "ReceiveChecksumOffload" boolValues)
+        (assertValueOneOf "TransmitChecksumOffload" boolValues)
+        (assertValueOneOf "TCPSegmentationOffload" boolValues)
+        (assertValueOneOf "TCP6SegmentationOffload" boolValues)
+        (assertValueOneOf "GenericSegmentationOffload" boolValues)
+        (assertValueOneOf "GenericReceiveOffload" boolValues)
+        (assertValueOneOf "LargeReceiveOffload" boolValues)
+        (assertInt "RxChannels")
+        (assertRange "RxChannels" 1 4294967295)
+        (assertInt "TxChannels")
+        (assertRange "TxChannels" 1 4294967295)
+        (assertInt "OtherChannels")
+        (assertRange "OtherChannels" 1 4294967295)
+        (assertInt "CombinedChannels")
+        (assertRange "CombinedChannels" 1 4294967295)
+        (assertInt "RxBufferSize")
+        (assertInt "TxBufferSize")
+        (assertRange "ReceiveQueues" 1 4096)
+        (assertRange "TransmitQueues" 1 4096)
+        (assertRange "TransmitQueueLength" 1 4294967294)
+      ];
+    };
+
+    netdev = let
+
+      tunChecks = [
+        (assertOnlyFields [
+          "MultiQueue"
+          "PacketInfo"
+          "VNetHeader"
+          "User"
+          "Group"
+        ])
+        (assertValueOneOf "MultiQueue" boolValues)
+        (assertValueOneOf "PacketInfo" boolValues)
+        (assertValueOneOf "VNetHeader" boolValues)
+      ];
+
+      # See https://www.freedesktop.org/software/systemd/man/latest/systemd.netdev.html#%5BIPVTAP%5D%20Section%20Options
+      ipVlanVtapChecks = [
+        (assertOnlyFields [
+          "Mode"
+          "Flags"
+        ])
+        (assertValueOneOf "Mode" ["L2" "L3" "L3S" ])
+        (assertValueOneOf "Flags" ["private" "vepa" "bridge" ])
+      ];
+    in {
+
+      sectionNetdev = checkUnitConfig "Netdev" [
+        (assertOnlyFields [
+          "Description"
+          "Name"
+          "Kind"
+          "MTUBytes"
+          "MACAddress"
+        ])
+        (assertHasField "Name")
+        (assertHasField "Kind")
+        (assertValueOneOf "Kind" [
+          "bond"
+          "bridge"
+          "dummy"
+          "gre"
+          "gretap"
+          "erspan"
+          "ip6gre"
+          "ip6tnl"
+          "ip6gretap"
+          "ipip"
+          "ipvlan"
+          "ipvtap"
+          "macvlan"
+          "macvtap"
+          "sit"
+          "tap"
+          "tun"
+          "veth"
+          "vlan"
+          "vti"
+          "vti6"
+          "vxlan"
+          "geneve"
+          "l2tp"
+          "macsec"
+          "wlan"
+          "vrf"
+          "vcan"
+          "vxcan"
+          "wireguard"
+          "netdevsim"
+          "nlmon"
+          "fou"
+          "xfrm"
+          "ifb"
+          "batadv"
+        ])
+        (assertByteFormat "MTUBytes")
+        (assertNetdevMacAddress "MACAddress")
+      ];
+
+      sectionVLAN = checkUnitConfig "VLAN" [
+        (assertOnlyFields [
+          "Id"
+          "GVRP"
+          "MVRP"
+          "LooseBinding"
+          "ReorderHeader"
+        ])
+        (assertInt "Id")
+        (assertRange "Id" 0 4094)
+        (assertValueOneOf "GVRP" boolValues)
+        (assertValueOneOf "MVRP" boolValues)
+        (assertValueOneOf "LooseBinding" boolValues)
+        (assertValueOneOf "ReorderHeader" boolValues)
+      ];
+
+      sectionIPVLAN = checkUnitConfig "IPVLAN" ipVlanVtapChecks;
+
+      sectionIPVTAP = checkUnitConfig "IPVTAP" ipVlanVtapChecks;
+
+      sectionMACVLAN = checkUnitConfig "MACVLAN" [
+        (assertOnlyFields [
+          "Mode"
+        ])
+        (assertValueOneOf "Mode" ["private" "vepa" "bridge" "passthru"])
+      ];
+
+      sectionVXLAN = checkUnitConfig "VXLAN" [
+        (assertOnlyFields [
+          "VNI"
+          "Remote"
+          "Local"
+          "Group"
+          "TOS"
+          "TTL"
+          "MacLearning"
+          "FDBAgeingSec"
+          "MaximumFDBEntries"
+          "ReduceARPProxy"
+          "L2MissNotification"
+          "L3MissNotification"
+          "RouteShortCircuit"
+          "UDPChecksum"
+          "UDP6ZeroChecksumTx"
+          "UDP6ZeroChecksumRx"
+          "RemoteChecksumTx"
+          "RemoteChecksumRx"
+          "GroupPolicyExtension"
+          "GenericProtocolExtension"
+          "DestinationPort"
+          "PortRange"
+          "FlowLabel"
+          "IPDoNotFragment"
+          "Independent"
+        ])
+        (assertInt "VNI")
+        (assertRange "VNI" 1 16777215)
+        (assertValueOneOf "MacLearning" boolValues)
+        (assertInt "MaximumFDBEntries")
+        (assertValueOneOf "ReduceARPProxy" boolValues)
+        (assertValueOneOf "L2MissNotification" boolValues)
+        (assertValueOneOf "L3MissNotification" boolValues)
+        (assertValueOneOf "RouteShortCircuit" boolValues)
+        (assertValueOneOf "UDPChecksum" boolValues)
+        (assertValueOneOf "UDP6ZeroChecksumTx" boolValues)
+        (assertValueOneOf "UDP6ZeroChecksumRx" boolValues)
+        (assertValueOneOf "RemoteChecksumTx" boolValues)
+        (assertValueOneOf "RemoteChecksumRx" boolValues)
+        (assertValueOneOf "GroupPolicyExtension" boolValues)
+        (assertValueOneOf "GenericProtocolExtension" boolValues)
+        (assertInt "FlowLabel")
+        (assertRange "FlowLabel" 0 1048575)
+        (assertValueOneOf "IPDoNotFragment" (boolValues + ["inherit"]))
+        (assertValueOneOf "Independent" boolValues)
+      ];
+
+      sectionTunnel = checkUnitConfig "Tunnel" [
+        (assertOnlyFields [
+          "Local"
+          "Remote"
+          "TOS"
+          "TTL"
+          "DiscoverPathMTU"
+          "IPv6FlowLabel"
+          "CopyDSCP"
+          "EncapsulationLimit"
+          "Key"
+          "InputKey"
+          "OutputKey"
+          "Mode"
+          "Independent"
+          "AssignToLoopback"
+          "AllowLocalRemote"
+          "FooOverUDP"
+          "FOUDestinationPort"
+          "FOUSourcePort"
+          "Encapsulation"
+          "IPv6RapidDeploymentPrefix"
+          "ISATAP"
+          "SerializeTunneledPackets"
+          "ERSPANIndex"
+        ])
+        (assertInt "TTL")
+        (assertRange "TTL" 0 255)
+        (assertValueOneOf "DiscoverPathMTU" boolValues)
+        (assertValueOneOf "CopyDSCP" boolValues)
+        (assertValueOneOf "Mode" ["ip6ip6" "ipip6" "any"])
+        (assertValueOneOf "Independent" boolValues)
+        (assertValueOneOf "AssignToLoopback" boolValues)
+        (assertValueOneOf "AllowLocalRemote" boolValues)
+        (assertValueOneOf "FooOverUDP" boolValues)
+        (assertPort "FOUDestinationPort")
+        (assertPort "FOUSourcePort")
+        (assertValueOneOf "Encapsulation" ["FooOverUDP" "GenericUDPEncapsulation"])
+        (assertValueOneOf "ISATAP" boolValues)
+        (assertValueOneOf "SerializeTunneledPackets" boolValues)
+        (assertInt "ERSPANIndex")
+        (assertRange "ERSPANIndex" 1 1048575)
+      ];
+
+      sectionFooOverUDP = checkUnitConfig "FooOverUDP" [
+        (assertOnlyFields [
+          "Port"
+          "Encapsulation"
+          "Protocol"
+        ])
+        (assertPort "Port")
+        (assertValueOneOf "Encapsulation" ["FooOverUDP" "GenericUDPEncapsulation"])
+      ];
+
+      sectionPeer = checkUnitConfig "Peer" [
+        (assertOnlyFields [
+          "Name"
+          "MACAddress"
+        ])
+        (assertMacAddress "MACAddress")
+      ];
+
+      sectionTun = checkUnitConfig "Tun" tunChecks;
+
+      sectionTap = checkUnitConfig "Tap" tunChecks;
+
+      sectionL2TP = checkUnitConfig "L2TP" [
+        (assertOnlyFields [
+          "TunnelId"
+          "PeerTunnelId"
+          "Remote"
+          "Local"
+          "EncapsulationType"
+          "UDPSourcePort"
+          "UDPDestinationPort"
+          "UDPChecksum"
+          "UDP6ZeroChecksumTx"
+          "UDP6ZeroChecksumRx"
+        ])
+        (assertInt "TunnelId")
+        (assertRange "TunnelId" 1 4294967295)
+        (assertInt "PeerTunnelId")
+        (assertRange "PeerTunnelId" 1 4294967295)
+        (assertValueOneOf "EncapsulationType" [ "ip" "udp" ])
+        (assertPort "UDPSourcePort")
+        (assertPort "UDPDestinationPort")
+        (assertValueOneOf "UDPChecksum" boolValues)
+        (assertValueOneOf "UDP6ZeroChecksumTx" boolValues)
+        (assertValueOneOf "UDP6ZeroChecksumRx" boolValues)
+      ];
+
+      sectionL2TPSession = checkUnitConfig "L2TPSession" [
+        (assertOnlyFields [
+          "Name"
+          "SessionId"
+          "PeerSessionId"
+          "Layer2SpecificHeader"
+        ])
+        (assertHasField "Name")
+        (assertHasField "SessionId")
+        (assertInt "SessionId")
+        (assertRange "SessionId" 1 4294967295)
+        (assertHasField "PeerSessionId")
+        (assertInt "PeerSessionId")
+        (assertRange "PeerSessionId" 1 4294967295)
+        (assertValueOneOf "Layer2SpecificHeader" [ "none" "default" ])
+      ];
+
+      # NOTE The PrivateKey directive is missing on purpose here, please
+      # do not add it to this list. The nix store is world-readable let's
+      # refrain ourselves from providing a footgun.
+      sectionWireGuard = checkUnitConfig "WireGuard" [
+        (assertOnlyFields [
+          "PrivateKeyFile"
+          "ListenPort"
+          "FirewallMark"
+          "RouteTable"
+          "RouteMetric"
+        ])
+        (assertInt "FirewallMark")
+        (assertRange "FirewallMark" 1 4294967295)
+      ];
+
+      # NOTE The PresharedKey directive is missing on purpose here, please
+      # do not add it to this list. The nix store is world-readable,let's
+      # refrain ourselves from providing a footgun.
+      sectionWireGuardPeer = checkUnitConfig "WireGuardPeer" [
+        (assertOnlyFields [
+          "PublicKey"
+          "PresharedKeyFile"
+          "AllowedIPs"
+          "Endpoint"
+          "PersistentKeepalive"
+          "RouteTable"
+          "RouteMetric"
+        ])
+        (assertInt "PersistentKeepalive")
+        (assertRange "PersistentKeepalive" 0 65535)
+      ];
+
+      sectionBond = checkUnitConfig "Bond" [
+        (assertOnlyFields [
+          "Mode"
+          "TransmitHashPolicy"
+          "LACPTransmitRate"
+          "MIIMonitorSec"
+          "UpDelaySec"
+          "DownDelaySec"
+          "LearnPacketIntervalSec"
+          "AdSelect"
+          "AdActorSystemPriority"
+          "AdUserPortKey"
+          "AdActorSystem"
+          "FailOverMACPolicy"
+          "ARPValidate"
+          "ARPIntervalSec"
+          "ARPIPTargets"
+          "ARPAllTargets"
+          "PrimaryReselectPolicy"
+          "ResendIGMP"
+          "PacketsPerSlave"
+          "GratuitousARP"
+          "AllSlavesActive"
+          "DynamicTransmitLoadBalancing"
+          "MinLinks"
+        ])
+        (assertValueOneOf "Mode" [
+          "balance-rr"
+          "active-backup"
+          "balance-xor"
+          "broadcast"
+          "802.3ad"
+          "balance-tlb"
+          "balance-alb"
+        ])
+        (assertValueOneOf "TransmitHashPolicy" [
+          "layer2"
+          "layer3+4"
+          "layer2+3"
+          "encap2+3"
+          "encap3+4"
+        ])
+        (assertValueOneOf "LACPTransmitRate" ["slow" "fast"])
+        (assertValueOneOf "AdSelect" ["stable" "bandwidth" "count"])
+        (assertInt "AdActorSystemPriority")
+        (assertRange "AdActorSystemPriority" 1 65535)
+        (assertInt "AdUserPortKey")
+        (assertRange "AdUserPortKey" 0 1023)
+        (assertValueOneOf "FailOverMACPolicy" ["none" "active" "follow"])
+        (assertValueOneOf "ARPValidate" ["none" "active" "backup" "all"])
+        (assertValueOneOf "ARPAllTargets" ["any" "all"])
+        (assertValueOneOf "PrimaryReselectPolicy" ["always" "better" "failure"])
+        (assertInt "ResendIGMP")
+        (assertRange "ResendIGMP" 0 255)
+        (assertInt "PacketsPerSlave")
+        (assertRange "PacketsPerSlave" 0 65535)
+        (assertInt "GratuitousARP")
+        (assertRange "GratuitousARP" 0 255)
+        (assertValueOneOf "AllSlavesActive" boolValues)
+        (assertValueOneOf "DynamicTransmitLoadBalancing" boolValues)
+        (assertInt "MinLinks")
+        (assertMinimum "MinLinks" 0)
+      ];
+
+      sectionXfrm = checkUnitConfig "Xfrm" [
+        (assertOnlyFields [
+          "InterfaceId"
+          "Independent"
+        ])
+        (assertInt "InterfaceId")
+        (assertRange "InterfaceId" 1 4294967295)
+        (assertValueOneOf "Independent" boolValues)
+      ];
+
+      sectionVRF = checkUnitConfig "VRF" [
+        (assertOnlyFields [
+          "Table"
+        ])
+        (assertInt "Table")
+        (assertMinimum "Table" 0)
+      ];
+
+      sectionWLAN = checkUnitConfig "WLAN" [
+        (assertOnlyFields [
+          "PhysicalDevice"  # systemd supports both strings ("phy0") and indexes (0) here.
+          "Type"
+          "WDS"
+        ])
+        # See https://github.com/systemd/systemd/blob/main/src/basic/linux/nl80211.h#L3382
+        (assertValueOneOf "Type" [
+          "ad-hoc"
+          "station"
+          "ap"
+          "ap-vlan"
+          "wds"
+          "monitor"
+          "mesh-point"
+          "p2p-client"
+          "p2p-go"
+          "p2p-device"
+          "ocb"
+          "nan"
+        ])
+        (assertValueOneOf "WDS" boolValues)
+      ];
+
+      sectionBatmanAdvanced = checkUnitConfig "BatmanAdvanced" [
+        (assertOnlyFields [
+          "GatewayMode"
+          "Aggregation"
+          "BridgeLoopAvoidance"
+          "DistributedArpTable"
+          "Fragmentation"
+          "HopPenalty"
+          "OriginatorIntervalSec"
+          "GatewayBandwithDown"
+          "GatewayBandwithUp"
+          "RoutingAlgorithm"
+        ])
+        (assertValueOneOf "GatewayMode" ["off" "client" "server"])
+        (assertValueOneOf "Aggregation" boolValues)
+        (assertValueOneOf "BridgeLoopAvoidance" boolValues)
+        (assertValueOneOf "DistributedArpTable" boolValues)
+        (assertValueOneOf "Fragmentation" boolValues)
+        (assertInt "HopPenalty")
+        (assertRange "HopPenalty" 0 255)
+        (assertValueOneOf "RoutingAlgorithm" ["batman-v" "batman-iv"])
+      ];
+    };
+
+    network = {
+
+      sectionLink = checkUnitConfig "Link" [
+        (assertOnlyFields [
+          "MACAddress"
+          "MTUBytes"
+          "ARP"
+          "Multicast"
+          "AllMulticast"
+          "Unmanaged"
+          "Group"
+          "RequiredForOnline"
+          "RequiredFamilyForOnline"
+          "ActivationPolicy"
+          "Promiscuous"
+        ])
+        (assertMacAddress "MACAddress")
+        (assertByteFormat "MTUBytes")
+        (assertValueOneOf "ARP" boolValues)
+        (assertValueOneOf "Multicast" boolValues)
+        (assertValueOneOf "AllMulticast" boolValues)
+        (assertValueOneOf "Promiscuous" boolValues)
+        (assertValueOneOf "Unmanaged" boolValues)
+        (assertInt "Group")
+        (assertRange "Group" 0 2147483647)
+        (assertValueOneOf "RequiredForOnline" (boolValues ++ (
+          let
+            # https://freedesktop.org/software/systemd/man/networkctl.html#missing
+            operationalStates = [
+              "missing"
+              "off"
+              "no-carrier"
+              "dormant"
+              "degraded-carrier"
+              "carrier"
+              "degraded"
+              "enslaved"
+              "routable"
+            ];
+            operationalStateRanges = concatLists (imap0 (i: min: map (max: "${min}:${max}") (drop i operationalStates)) operationalStates);
+          in
+          operationalStates ++ operationalStateRanges
+        )))
+        (assertValueOneOf "RequiredFamilyForOnline" [
+          "ipv4"
+          "ipv6"
+          "both"
+          "any"
+        ])
+        (assertValueOneOf "ActivationPolicy" ([
+          "up"
+          "always-up"
+          "manual"
+          "always-down"
+          "down"
+          "bound"
+        ]))
+      ];
+
+      sectionNetwork = checkUnitConfig "Network" [
+        (assertOnlyFields [
+          "Description"
+          "DHCP"
+          "DHCPServer"
+          "LinkLocalAddressing"
+          "IPv4LLRoute"
+          "DefaultRouteOnDevice"
+          "LLMNR"
+          "MulticastDNS"
+          "DNSOverTLS"
+          "DNSSEC"
+          "DNSSECNegativeTrustAnchors"
+          "LLDP"
+          "EmitLLDP"
+          "BindCarrier"
+          "Address"
+          "Gateway"
+          "DNS"
+          "Domains"
+          "DNSDefaultRoute"
+          "NTP"
+          "IPForward"
+          "IPMasquerade"
+          "IPv6PrivacyExtensions"
+          "IPv6AcceptRA"
+          "IPv6DuplicateAddressDetection"
+          "IPv6HopLimit"
+          "IPv4ProxyARP"
+          "IPv6ProxyNDP"
+          "IPv6ProxyNDPAddress"
+          "IPv6SendRA"
+          "DHCPPrefixDelegation"
+          "IPv6MTUBytes"
+          "Bridge"
+          "Bond"
+          "VRF"
+          "VLAN"
+          "IPVLAN"
+          "IPVTAP"
+          "MACVLAN"
+          "MACVTAP"
+          "VXLAN"
+          "Tunnel"
+          "MACsec"
+          "ActiveSlave"
+          "PrimarySlave"
+          "ConfigureWithoutCarrier"
+          "IgnoreCarrierLoss"
+          "Xfrm"
+          "KeepConfiguration"
+          "BatmanAdvanced"
+        ])
+        # Note: For DHCP the values both, none, v4, v6 are deprecated
+        (assertValueOneOf "DHCP" ["yes" "no" "ipv4" "ipv6"])
+        (assertValueOneOf "DHCPServer" boolValues)
+        (assertValueOneOf "LinkLocalAddressing" ["yes" "no" "ipv4" "ipv6" "fallback" "ipv4-fallback"])
+        (assertValueOneOf "IPv4LLRoute" boolValues)
+        (assertValueOneOf "DefaultRouteOnDevice" boolValues)
+        (assertValueOneOf "LLMNR" (boolValues ++ ["resolve"]))
+        (assertValueOneOf "MulticastDNS" (boolValues ++ ["resolve"]))
+        (assertValueOneOf "DNSOverTLS" (boolValues ++ ["opportunistic"]))
+        (assertValueOneOf "DNSSEC" (boolValues ++ ["allow-downgrade"]))
+        (assertValueOneOf "LLDP" (boolValues ++ ["routers-only"]))
+        (assertValueOneOf "EmitLLDP" (boolValues ++ ["nearest-bridge" "non-tpmr-bridge" "customer-bridge"]))
+        (assertValueOneOf "DNSDefaultRoute" boolValues)
+        (assertValueOneOf "IPForward" (boolValues ++ ["ipv4" "ipv6"]))
+        (assertValueOneOf "IPMasquerade" (boolValues ++ ["ipv4" "ipv6" "both"]))
+        (assertValueOneOf "IPv6PrivacyExtensions" (boolValues ++ ["prefer-public" "kernel"]))
+        (assertValueOneOf "IPv6AcceptRA" boolValues)
+        (assertInt "IPv6DuplicateAddressDetection")
+        (assertMinimum "IPv6DuplicateAddressDetection" 0)
+        (assertInt "IPv6HopLimit")
+        (assertMinimum "IPv6HopLimit" 0)
+        (assertValueOneOf "IPv4ProxyARP" boolValues)
+        (assertValueOneOf "IPv6ProxyNDP" boolValues)
+        (assertValueOneOf "IPv6SendRA" boolValues)
+        (assertValueOneOf "DHCPPrefixDelegation" boolValues)
+        (assertByteFormat "IPv6MTUBytes")
+        (assertValueOneOf "ActiveSlave" boolValues)
+        (assertValueOneOf "PrimarySlave" boolValues)
+        (assertValueOneOf "ConfigureWithoutCarrier" boolValues)
+        (assertValueOneOf "KeepConfiguration" (boolValues ++ ["static" "dhcp-on-stop" "dhcp"]))
+      ];
+
+      sectionAddress = checkUnitConfig "Address" [
+        (assertOnlyFields [
+          "Address"
+          "Peer"
+          "Broadcast"
+          "Label"
+          "PreferredLifetime"
+          "Scope"
+          "RouteMetric"
+          "HomeAddress"
+          "DuplicateAddressDetection"
+          "ManageTemporaryAddress"
+          "AddPrefixRoute"
+          "AutoJoin"
+        ])
+        (assertHasField "Address")
+        (assertValueOneOf "PreferredLifetime" ["forever" "infinity" "0" 0])
+        (assertInt "RouteMetric")
+        (assertValueOneOf "HomeAddress" boolValues)
+        (assertValueOneOf "DuplicateAddressDetection" ["ipv4" "ipv6" "both" "none"])
+        (assertValueOneOf "ManageTemporaryAddress" boolValues)
+        (assertValueOneOf "AddPrefixRoute" boolValues)
+        (assertValueOneOf "AutoJoin" boolValues)
+      ];
+
+      sectionRoutingPolicyRule = checkUnitConfig "RoutingPolicyRule" [
+        (assertOnlyFields [
+          "TypeOfService"
+          "From"
+          "To"
+          "FirewallMark"
+          "Table"
+          "Priority"
+          "IncomingInterface"
+          "OutgoingInterface"
+          "SourcePort"
+          "DestinationPort"
+          "IPProtocol"
+          "InvertRule"
+          "Family"
+          "User"
+          "SuppressPrefixLength"
+          "Type"
+          "SuppressInterfaceGroup"
+        ])
+        (assertInt "TypeOfService")
+        (assertRange "TypeOfService" 0 255)
+        (assertInt "FirewallMark")
+        (assertRange "FirewallMark" 1 4294967295)
+        (assertInt "Priority")
+        (assertPort "SourcePort")
+        (assertPort "DestinationPort")
+        (assertValueOneOf "InvertRule" boolValues)
+        (assertValueOneOf "Family" ["ipv4" "ipv6" "both"])
+        (assertInt "SuppressPrefixLength")
+        (assertRange "SuppressPrefixLength" 0 128)
+        (assertValueOneOf "Type" ["blackhole" "unreachable" "prohibit"])
+        (assertRange "SuppressInterfaceGroup" 0 2147483647)
+      ];
+
+      sectionRoute = checkUnitConfig "Route" [
+        (assertOnlyFields [
+          "Gateway"
+          "GatewayOnLink"
+          "Destination"
+          "Source"
+          "Metric"
+          "IPv6Preference"
+          "Scope"
+          "PreferredSource"
+          "Table"
+          "Protocol"
+          "Type"
+          "InitialCongestionWindow"
+          "InitialAdvertisedReceiveWindow"
+          "QuickAck"
+          "FastOpenNoCookie"
+          "TTLPropagate"
+          "MTUBytes"
+          "IPServiceType"
+          "MultiPathRoute"
+        ])
+        (assertValueOneOf "GatewayOnLink" boolValues)
+        (assertInt "Metric")
+        (assertValueOneOf "IPv6Preference" ["low" "medium" "high"])
+        (assertValueOneOf "Scope" ["global" "site" "link" "host" "nowhere"])
+        (assertValueOneOf "Type" [
+          "unicast"
+          "local"
+          "broadcast"
+          "anycast"
+          "multicast"
+          "blackhole"
+          "unreachable"
+          "prohibit"
+          "throw"
+          "nat"
+          "xresolve"
+        ])
+        (assertValueOneOf "QuickAck" boolValues)
+        (assertValueOneOf "FastOpenNoCookie" boolValues)
+        (assertValueOneOf "TTLPropagate" boolValues)
+        (assertByteFormat "MTUBytes")
+        (assertValueOneOf "IPServiceType" ["CS6" "CS4"])
+      ];
+
+      sectionDHCPv4 = checkUnitConfig "DHCPv4" [
+        (assertOnlyFields [
+          "UseDNS"
+          "RoutesToDNS"
+          "UseNTP"
+          "UseSIP"
+          "UseMTU"
+          "Anonymize"
+          "SendHostname"
+          "UseHostname"
+          "Hostname"
+          "UseDomains"
+          "UseRoutes"
+          "UseTimezone"
+          "ClientIdentifier"
+          "VendorClassIdentifier"
+          "UserClass"
+          "MaxAttempts"
+          "DUIDType"
+          "DUIDRawData"
+          "IAID"
+          "RequestBroadcast"
+          "RouteMetric"
+          "RouteTable"
+          "RouteMTUBytes"
+          "ListenPort"
+          "SendRelease"
+          "SendDecline"
+          "BlackList"
+          "RequestOptions"
+          "SendOption"
+          "FallbackLeaseLifetimeSec"
+          "Label"
+          "Use6RD"
+        ])
+        (assertValueOneOf "UseDNS" boolValues)
+        (assertValueOneOf "RoutesToDNS" boolValues)
+        (assertValueOneOf "UseNTP" boolValues)
+        (assertValueOneOf "UseSIP" boolValues)
+        (assertValueOneOf "UseMTU" boolValues)
+        (assertValueOneOf "Anonymize" boolValues)
+        (assertValueOneOf "SendHostname" boolValues)
+        (assertValueOneOf "UseHostname" boolValues)
+        (assertValueOneOf "UseDomains" (boolValues ++ ["route"]))
+        (assertValueOneOf "UseRoutes" boolValues)
+        (assertValueOneOf "UseTimezone" boolValues)
+        (assertValueOneOf "ClientIdentifier" ["mac" "duid" "duid-only"])
+        (assertInt "IAID")
+        (assertValueOneOf "RequestBroadcast" boolValues)
+        (assertInt "RouteMetric")
+        (assertInt "RouteTable")
+        (assertRange "RouteTable" 0 4294967295)
+        (assertByteFormat "RouteMTUBytes")
+        (assertPort "ListenPort")
+        (assertValueOneOf "SendRelease" boolValues)
+        (assertValueOneOf "SendDecline" boolValues)
+        (assertValueOneOf "FallbackLeaseLifetimeSec" ["forever" "infinity"])
+        (assertValueOneOf "Use6RD" boolValues)
+      ];
+
+      sectionDHCPv6 = checkUnitConfig "DHCPv6" [
+        (assertOnlyFields [
+          "UseAddress"
+          "UseDNS"
+          "UseNTP"
+          "UseHostname"
+          "UseDomains"
+          "RouteMetric"
+          "RapidCommit"
+          "MUDURL"
+          "RequestOptions"
+          "SendVendorOption"
+          "PrefixDelegationHint"
+          "WithoutRA"
+          "SendOption"
+          "UserClass"
+          "VendorClass"
+          "DUIDType"
+          "DUIDRawData"
+          "IAID"
+          "UseDelegatedPrefix"
+          "SendRelease"
+        ])
+        (assertValueOneOf "UseAddress" boolValues)
+        (assertValueOneOf "UseDNS" boolValues)
+        (assertValueOneOf "UseNTP" boolValues)
+        (assertValueOneOf "UseHostname" boolValues)
+        (assertValueOneOf "UseDomains" (boolValues ++ ["route"]))
+        (assertInt "RouteMetric")
+        (assertValueOneOf "RapidCommit" boolValues)
+        (assertValueOneOf "WithoutRA" ["no" "solicit" "information-request"])
+        (assertRange "SendOption" 1 65536)
+        (assertInt "IAID")
+        (assertValueOneOf "UseDelegatedPrefix" boolValues)
+        (assertValueOneOf "SendRelease" boolValues)
+      ];
+
+      sectionDHCPPrefixDelegation = checkUnitConfig "DHCPPrefixDelegation" [
+        (assertOnlyFields [
+          "UplinkInterface"
+          "SubnetId"
+          "Announce"
+          "Assign"
+          "Token"
+          "ManageTemporaryAddress"
+          "RouteMetric"
+        ])
+        (assertValueOneOf "Announce" boolValues)
+        (assertValueOneOf "Assign" boolValues)
+        (assertValueOneOf "ManageTemporaryAddress" boolValues)
+        (assertRange "RouteMetric" 0 4294967295)
+      ];
+
+      sectionIPv6AcceptRA = checkUnitConfig "IPv6AcceptRA" [
+        (assertOnlyFields [
+          "UseDNS"
+          "UseDomains"
+          "RouteTable"
+          "UseAutonomousPrefix"
+          "UseOnLinkPrefix"
+          "RouterDenyList"
+          "RouterAllowList"
+          "PrefixDenyList"
+          "PrefixAllowList"
+          "RouteDenyList"
+          "RouteAllowList"
+          "DHCPv6Client"
+          "RouteMetric"
+          "UseMTU"
+          "UseGateway"
+          "UseRoutePrefix"
+          "Token"
+        ])
+        (assertValueOneOf "UseDNS" boolValues)
+        (assertValueOneOf "UseDomains" (boolValues ++ ["route"]))
+        (assertRange "RouteTable" 0 4294967295)
+        (assertValueOneOf "UseAutonomousPrefix" boolValues)
+        (assertValueOneOf "UseOnLinkPrefix" boolValues)
+        (assertValueOneOf "DHCPv6Client" (boolValues ++ ["always"]))
+        (assertValueOneOf "UseMTU" boolValues)
+        (assertValueOneOf "UseGateway" boolValues)
+        (assertValueOneOf "UseRoutePrefix" boolValues)
+      ];
+
+      sectionDHCPServer = checkUnitConfig "DHCPServer" [
+        (assertOnlyFields [
+          "ServerAddress"
+          "PoolOffset"
+          "PoolSize"
+          "DefaultLeaseTimeSec"
+          "MaxLeaseTimeSec"
+          "UplinkInterface"
+          "EmitDNS"
+          "DNS"
+          "EmitNTP"
+          "NTP"
+          "EmitSIP"
+          "SIP"
+          "EmitPOP3"
+          "POP3"
+          "EmitSMTP"
+          "SMTP"
+          "EmitLPR"
+          "LPR"
+          "EmitRouter"
+          "Router"
+          "EmitTimezone"
+          "Timezone"
+          "SendOption"
+          "SendVendorOption"
+          "BindToInterface"
+          "RelayTarget"
+          "RelayAgentCircuitId"
+          "RelayAgentRemoteId"
+          "BootServerAddress"
+          "BootServerName"
+          "BootFilename"
+        ])
+        (assertInt "PoolOffset")
+        (assertMinimum "PoolOffset" 0)
+        (assertInt "PoolSize")
+        (assertMinimum "PoolSize" 0)
+        (assertValueOneOf "EmitDNS" boolValues)
+        (assertValueOneOf "EmitNTP" boolValues)
+        (assertValueOneOf "EmitSIP" boolValues)
+        (assertValueOneOf "EmitPOP3" boolValues)
+        (assertValueOneOf "EmitSMTP" boolValues)
+        (assertValueOneOf "EmitLPR" boolValues)
+        (assertValueOneOf "EmitRouter" boolValues)
+        (assertValueOneOf "EmitTimezone" boolValues)
+        (assertValueOneOf "BindToInterface" boolValues)
+      ];
+
+      sectionIPv6SendRA = checkUnitConfig "IPv6SendRA" [
+        (assertOnlyFields [
+          "Managed"
+          "OtherInformation"
+          "RouterLifetimeSec"
+          "RouterPreference"
+          "UplinkInterface"
+          "EmitDNS"
+          "DNS"
+          "EmitDomains"
+          "Domains"
+          "DNSLifetimeSec"
+        ])
+        (assertValueOneOf "Managed" boolValues)
+        (assertValueOneOf "OtherInformation" boolValues)
+        (assertValueOneOf "RouterPreference" ["high" "medium" "low" "normal" "default"])
+        (assertValueOneOf "EmitDNS" boolValues)
+        (assertValueOneOf "EmitDomains" boolValues)
+      ];
+
+      sectionIPv6Prefix = checkUnitConfig "IPv6Prefix" [
+        (assertOnlyFields [
+          "AddressAutoconfiguration"
+          "OnLink"
+          "Prefix"
+          "PreferredLifetimeSec"
+          "ValidLifetimeSec"
+          "Assign"
+          "Token"
+        ])
+        (assertValueOneOf "AddressAutoconfiguration" boolValues)
+        (assertValueOneOf "OnLink" boolValues)
+        (assertValueOneOf "Assign" boolValues)
+      ];
+
+      sectionIPv6RoutePrefix = checkUnitConfig "IPv6RoutePrefix" [
+        (assertOnlyFields [
+          "Route"
+          "LifetimeSec"
+        ])
+        (assertHasField "Route")
+        (assertInt "LifetimeSec")
+      ];
+
+      sectionDHCPServerStaticLease = checkUnitConfig "DHCPServerStaticLease" [
+        (assertOnlyFields [
+          "MACAddress"
+          "Address"
+        ])
+        (assertHasField "MACAddress")
+        (assertHasField "Address")
+        (assertMacAddress "MACAddress")
+      ];
+
+      sectionBridge = checkUnitConfig "Bridge" [
+        (assertOnlyFields [
+          "UnicastFlood"
+          "MulticastFlood"
+          "MulticastToUnicast"
+          "NeighborSuppression"
+          "Learning"
+          "HairPin"
+          "Isolated"
+          "UseBPDU"
+          "FastLeave"
+          "AllowPortToBeRoot"
+          "ProxyARP"
+          "ProxyARPWiFi"
+          "MulticastRouter"
+          "Cost"
+          "Priority"
+        ])
+        (assertValueOneOf "UnicastFlood" boolValues)
+        (assertValueOneOf "MulticastFlood" boolValues)
+        (assertValueOneOf "MulticastToUnicast" boolValues)
+        (assertValueOneOf "NeighborSuppression" boolValues)
+        (assertValueOneOf "Learning" boolValues)
+        (assertValueOneOf "HairPin" boolValues)
+        (assertValueOneOf "Isolated" boolValues)
+        (assertValueOneOf "UseBPDU" boolValues)
+        (assertValueOneOf "FastLeave" boolValues)
+        (assertValueOneOf "AllowPortToBeRoot" boolValues)
+        (assertValueOneOf "ProxyARP" boolValues)
+        (assertValueOneOf "ProxyARPWiFi" boolValues)
+        (assertValueOneOf "MulticastRouter" [ "no" "query" "permanent" "temporary" ])
+        (assertInt "Cost")
+        (assertRange "Cost" 1 65535)
+        (assertInt "Priority")
+        (assertRange "Priority" 0 63)
+      ];
+
+      sectionBridgeFDB = checkUnitConfig "BridgeFDB" [
+        (assertOnlyFields [
+          "MACAddress"
+          "Destination"
+          "VLANId"
+          "VNI"
+          "AssociatedWith"
+          "OutgoingInterface"
+        ])
+        (assertHasField "MACAddress")
+        (assertInt "VLANId")
+        (assertRange "VLANId" 0 4094)
+        (assertInt "VNI")
+        (assertRange "VNI" 1 16777215)
+        (assertValueOneOf "AssociatedWith" [ "use" "self" "master" "router" ])
+      ];
+
+      sectionBridgeMDB = checkUnitConfig "BridgeMDB" [
+        (assertOnlyFields [
+          "MulticastGroupAddress"
+          "VLANId"
+        ])
+        (assertHasField "MulticastGroupAddress")
+        (assertInt "VLANId")
+        (assertRange "VLANId" 0 4094)
+      ];
+
+      sectionLLDP = checkUnitConfig "LLDP" [
+        (assertOnlyFields [
+          "MUDURL"
+        ])
+      ];
+
+      sectionCAN = checkUnitConfig "CAN" [
+        (assertOnlyFields [
+          "BitRate"
+          "SamplePoint"
+          "TimeQuantaNSec"
+          "PropagationSegment"
+          "PhaseBufferSegment1"
+          "PhaseBufferSegment2"
+          "SyncJumpWidth"
+          "DataBitRate"
+          "DataSamplePoint"
+          "DataTimeQuantaNSec"
+          "DataPropagationSegment"
+          "DataPhaseBufferSegment1"
+          "DataPhaseBufferSegment2"
+          "DataSyncJumpWidth"
+          "FDMode"
+          "FDNonISO"
+          "RestartSec"
+          "Termination"
+          "TripleSampling"
+          "BusErrorReporting"
+          "ListenOnly"
+          "Loopback"
+          "OneShot"
+          "PresumeAck"
+          "ClassicDataLengthCode"
+        ])
+        (assertInt "TimeQuantaNSec" )
+        (assertRange "TimeQuantaNSec" 0 4294967295 )
+        (assertInt "PropagationSegment" )
+        (assertRange "PropagationSegment" 0 4294967295 )
+        (assertInt "PhaseBufferSegment1" )
+        (assertRange "PhaseBufferSegment1" 0 4294967295 )
+        (assertInt "PhaseBufferSegment2" )
+        (assertRange "PhaseBufferSegment2" 0 4294967295 )
+        (assertInt "SyncJumpWidth" )
+        (assertRange "SyncJumpWidth" 0 4294967295 )
+        (assertInt "DataTimeQuantaNSec" )
+        (assertRange "DataTimeQuantaNSec" 0 4294967295 )
+        (assertInt "DataPropagationSegment" )
+        (assertRange "DataPropagationSegment" 0 4294967295 )
+        (assertInt "DataPhaseBufferSegment1" )
+        (assertRange "DataPhaseBufferSegment1" 0 4294967295 )
+        (assertInt "DataPhaseBufferSegment2" )
+        (assertRange "DataPhaseBufferSegment2" 0 4294967295 )
+        (assertInt "DataSyncJumpWidth" )
+        (assertRange "DataSyncJumpWidth" 0 4294967295 )
+        (assertValueOneOf "FDMode" boolValues)
+        (assertValueOneOf "FDNonISO" boolValues)
+        (assertValueOneOf "TripleSampling" boolValues)
+        (assertValueOneOf "BusErrorReporting" boolValues)
+        (assertValueOneOf "ListenOnly" boolValues)
+        (assertValueOneOf "Loopback" boolValues)
+        (assertValueOneOf "OneShot" boolValues)
+        (assertValueOneOf "PresumeAck" boolValues)
+        (assertValueOneOf "ClassicDataLengthCode" boolValues)
+      ];
+
+      sectionIPoIB = checkUnitConfig "IPoIB" [
+        (assertOnlyFields [
+          "Mode"
+          "IgnoreUserspaceMulticastGroup"
+        ])
+        (assertValueOneOf "Mode" [ "datagram" "connected" ])
+        (assertValueOneOf "IgnoreUserspaceMulticastGroup" boolValues)
+      ];
+
+      sectionQDisc = checkUnitConfig "QDisc" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+        ])
+        (assertValueOneOf "Parent" [ "clsact" "ingress" ])
+      ];
+
+      sectionNetworkEmulator = checkUnitConfig "NetworkEmulator" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "DelaySec"
+          "DelayJitterSec"
+          "PacketLimit"
+          "LossRate"
+          "DuplicateRate"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 0 4294967294)
+      ];
+
+      sectionTokenBucketFilter = checkUnitConfig "TokenBucketFilter" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "LatencySec"
+          "LimitBytes"
+          "BurstBytes"
+          "Rate"
+          "MPUBytes"
+          "PeakRate"
+          "MTUBytes"
+        ])
+      ];
+
+      sectionPIE = checkUnitConfig "PIE" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 1 4294967294)
+      ];
+
+      sectionFlowQueuePIE = checkUnitConfig "FlowQueuePIE" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 1 4294967294)
+      ];
+
+      sectionStochasticFairBlue = checkUnitConfig "StochasticFairBlue" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 1 4294967294)
+      ];
+
+      sectionStochasticFairnessQueueing = checkUnitConfig "StochasticFairnessQueueing" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PerturbPeriodSec"
+        ])
+        (assertInt "PerturbPeriodSec")
+      ];
+
+      sectionBFIFO = checkUnitConfig "BFIFO" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "LimitBytes"
+        ])
+      ];
+
+      sectionPFIFO = checkUnitConfig "PFIFO" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 0 4294967294)
+      ];
+
+      sectionPFIFOHeadDrop = checkUnitConfig "PFIFOHeadDrop" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 0 4294967294)
+      ];
+
+      sectionPFIFOFast = checkUnitConfig "PFIFOFast" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+        ])
+      ];
+
+      sectionCAKE = checkUnitConfig "CAKE" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "Bandwidth"
+          "AutoRateIngress"
+          "OverheadBytes"
+          "MPUBytes"
+          "CompensationMode"
+          "UseRawPacketSize"
+          "FlowIsolationMode"
+          "NAT"
+          "PriorityQueueingPreset"
+          "FirewallMark"
+          "Wash"
+          "SplitGSO"
+          "AckFilter"
+        ])
+        (assertValueOneOf "AutoRateIngress" boolValues)
+        (assertInt "OverheadBytes")
+        (assertRange "OverheadBytes" (-64) 256)
+        (assertInt "MPUBytes")
+        (assertRange "MPUBytes" 1 256)
+        (assertValueOneOf "CompensationMode" [ "none" "atm" "ptm" ])
+        (assertValueOneOf "UseRawPacketSize" boolValues)
+        (assertValueOneOf "FlowIsolationMode"
+          [
+            "none"
+            "src-host"
+            "dst-host"
+            "hosts"
+            "flows"
+            "dual-src-host"
+            "dual-dst-host"
+            "triple"
+          ])
+        (assertValueOneOf "NAT" boolValues)
+        (assertValueOneOf "PriorityQueueingPreset"
+          [
+            "besteffort"
+            "precedence"
+            "diffserv8"
+            "diffserv4"
+            "diffserv3"
+          ])
+        (assertInt "FirewallMark")
+        (assertRange "FirewallMark" 1 4294967295)
+        (assertValueOneOf "Wash" boolValues)
+        (assertValueOneOf "SplitGSO" boolValues)
+        (assertValueOneOf "AckFilter" (boolValues ++ ["aggressive"]))
+      ];
+
+      sectionControlledDelay = checkUnitConfig "ControlledDelay" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+          "TargetSec"
+          "IntervalSec"
+          "ECN"
+          "CEThresholdSec"
+        ])
+        (assertValueOneOf "ECN" boolValues)
+      ];
+
+      sectionDeficitRoundRobinScheduler = checkUnitConfig "DeficitRoundRobinScheduler" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+        ])
+      ];
+
+      sectionDeficitRoundRobinSchedulerClass = checkUnitConfig "DeficitRoundRobinSchedulerClass" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "QuantumBytes"
+        ])
+      ];
+
+      sectionEnhancedTransmissionSelection = checkUnitConfig "EnhancedTransmissionSelection" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "Bands"
+          "StrictBands"
+          "QuantumBytes"
+          "PriorityMap"
+        ])
+        (assertInt "Bands")
+        (assertRange "Bands" 1 16)
+        (assertInt "StrictBands")
+        (assertRange "StrictBands" 1 16)
+      ];
+
+      sectionGenericRandomEarlyDetection = checkUnitConfig "GenericRandomEarlyDetection" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "VirtualQueues"
+          "DefaultVirtualQueue"
+          "GenericRIO"
+        ])
+        (assertInt "VirtualQueues")
+        (assertRange "VirtualQueues" 1 16)
+        (assertInt "DefaultVirtualQueue")
+        (assertRange "DefaultVirtualQueue" 1 16)
+        (assertValueOneOf "GenericRIO" boolValues)
+      ];
+
+      sectionFairQueueingControlledDelay = checkUnitConfig "FairQueueingControlledDelay" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+          "MemoryLimitBytes"
+          "Flows"
+          "TargetSec"
+          "IntervalSec"
+          "QuantumBytes"
+          "ECN"
+          "CEThresholdSec"
+        ])
+        (assertInt "PacketLimit")
+        (assertInt "Flows")
+        (assertValueOneOf "ECN" boolValues)
+      ];
+
+      sectionFairQueueing = checkUnitConfig "FairQueueing" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+          "FlowLimit"
+          "QuantumBytes"
+          "InitualQuantumBytes"
+          "MaximumRate"
+          "Buckets"
+          "OrphanMask"
+          "Pacing"
+          "CEThresholdSec"
+        ])
+        (assertInt "PacketLimit")
+        (assertInt "FlowLimit")
+        (assertInt "OrphanMask")
+        (assertValueOneOf "Pacing" boolValues)
+      ];
+
+      sectionTrivialLinkEqualizer = checkUnitConfig "TrivialLinkEqualizer" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "Id"
+        ])
+      ];
+
+      sectionHierarchyTokenBucket = checkUnitConfig "HierarchyTokenBucket" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "DefaultClass"
+          "RateToQuantum"
+        ])
+        (assertInt "RateToQuantum")
+      ];
+
+      sectionHierarchyTokenBucketClass = checkUnitConfig "HierarchyTokenBucketClass" [
+        (assertOnlyFields [
+          "Parent"
+          "ClassId"
+          "Priority"
+          "QuantumBytes"
+          "MTUBytes"
+          "OverheadBytes"
+          "Rate"
+          "CeilRate"
+          "BufferBytes"
+          "CeilBufferBytes"
+        ])
+      ];
+
+      sectionHeavyHitterFilter = checkUnitConfig "HeavyHitterFilter" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+          "PacketLimit"
+        ])
+        (assertInt "PacketLimit")
+        (assertRange "PacketLimit" 0 4294967294)
+      ];
+
+      sectionQuickFairQueueing = checkUnitConfig "QuickFairQueueing" [
+        (assertOnlyFields [
+          "Parent"
+          "Handle"
+        ])
+      ];
+
+      sectionQuickFairQueueingClass = checkUnitConfig "QuickFairQueueingClass" [
+        (assertOnlyFields [
+          "Parent"
+          "ClassId"
+          "Weight"
+          "MaxPacketBytes"
+        ])
+        (assertInt "Weight")
+        (assertRange "Weight" 1 1023)
+      ];
+
+      sectionBridgeVLAN = checkUnitConfig "BridgeVLAN" [
+        (assertOnlyFields [
+          "VLAN"
+          "EgressUntagged"
+          "PVID"
+        ])
+        (assertInt "PVID")
+        (assertRange "PVID" 0 4094)
+      ];
+    };
+  };
+
+  commonNetworkOptions = {
+
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to manage network configuration using {command}`systemd-network`.
+
+        This also enables {option}`systemd.networkd.enable`.
+      '';
+    };
+
+    matchConfig = mkOption {
+      default = {};
+      example = { Name = "eth0"; };
+      type = types.attrsOf unitOption;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Match]` section of the unit.  See
+        {manpage}`systemd.link(5)`
+        {manpage}`systemd.netdev(5)`
+        {manpage}`systemd.network(5)`
+        for details.
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc "Extra configuration append to unit";
+    };
+  };
+
+  networkdOptions = {
+    networkConfig = mkOption {
+      default = {};
+      example = { SpeedMeter = true; ManageForeignRoutingPolicyRules = false; };
+      type = types.addCheck (types.attrsOf unitOption) check.global.sectionNetwork;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Network]` section of the networkd config.
+        See {manpage}`networkd.conf(5)` for details.
+      '';
+    };
+
+    dhcpV4Config = mkOption {
+      default = {};
+      example = { DUIDType = "vendor"; };
+      type = types.addCheck (types.attrsOf unitOption) check.global.sectionDHCPv4;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPv4]` section of the networkd config.
+        See {manpage}`networkd.conf(5)` for details.
+      '';
+    };
+
+    dhcpV6Config = mkOption {
+      default = {};
+      example = { DUIDType = "vendor"; };
+      type = types.addCheck (types.attrsOf unitOption) check.global.sectionDHCPv6;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPv6]` section of the networkd config.
+        See {manpage}`networkd.conf(5)` for details.
+      '';
+    };
+  };
+
+  linkOptions = commonNetworkOptions // {
+    # overwrite enable option from above
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable this .link unit. It's handled by udev no matter if {command}`systemd-networkd` is enabled or not
+      '';
+    };
+
+    linkConfig = mkOption {
+      default = {};
+      example = { MACAddress = "00:ff:ee:aa:cc:dd"; };
+      type = types.addCheck (types.attrsOf unitOption) check.link.sectionLink;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Link]` section of the unit.  See
+        {manpage}`systemd.link(5)` for details.
+      '';
+    };
+
+  };
+
+
+  l2tpSessionOptions = {
+    options = {
+      l2tpSessionConfig = mkOption {
+        default = {};
+        type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionL2TPSession;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[L2TPSession]` section of the unit.  See
+          {manpage}`systemd.netdev(5)` for details.
+        '';
+      };
+    };
+  };
+
+  wireguardPeerOptions = {
+    options = {
+      wireguardPeerConfig = mkOption {
+        default = {};
+        type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionWireGuardPeer;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[WireGuardPeer]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  netdevOptions = commonNetworkOptions // {
+
+    netdevConfig = mkOption {
+      example = { Name = "mybridge"; Kind = "bridge"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionNetdev;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Netdev]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    vlanConfig = mkOption {
+      default = {};
+      example = { Id = 4; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionVLAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[VLAN]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    ipvlanConfig = mkOption {
+      default = {};
+      example = { Mode = "L2"; Flags = "private"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionIPVLAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the `[IPVLAN]` section of the unit.
+        See {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    ipvtapConfig = mkOption {
+      default = {};
+      example = { Mode = "L3"; Flags = "vepa"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionIPVTAP;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the `[IPVTAP]` section of the unit.
+        See {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    macvlanConfig = mkOption {
+      default = {};
+      example = { Mode = "private"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionMACVLAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[MACVLAN]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    vxlanConfig = mkOption {
+      default = {};
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionVXLAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[VXLAN]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    tunnelConfig = mkOption {
+      default = {};
+      example = { Remote = "192.168.1.1"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionTunnel;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Tunnel]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    fooOverUDPConfig = mkOption {
+      default = { };
+      example = { Port = 9001; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionFooOverUDP;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[FooOverUDP]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    peerConfig = mkOption {
+      default = {};
+      example = { Name = "veth2"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionPeer;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Peer]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    tunConfig = mkOption {
+      default = {};
+      example = { User = "openvpn"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionTun;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Tun]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    tapConfig = mkOption {
+      default = {};
+      example = { User = "openvpn"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionTap;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Tap]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    l2tpConfig = mkOption {
+      default = {};
+      example = {
+        TunnelId = 10;
+        PeerTunnelId = 12;
+        Local = "static";
+        Remote = "192.168.30.101";
+        EncapsulationType = "ip";
+      };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionL2TP;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[L2TP]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    l2tpSessions = mkOption {
+      default = [];
+      example = [ { l2tpSessionConfig={
+        SessionId = 25;
+        PeerSessionId = 26;
+        Name = "l2tp-sess";
+      };}];
+      type = with types; listOf (submodule l2tpSessionOptions);
+      description = lib.mdDoc ''
+        Each item in this array specifies an option in the
+        `[L2TPSession]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    wireguardConfig = mkOption {
+      default = {};
+      example = {
+        PrivateKeyFile = "/etc/wireguard/secret.key";
+        ListenPort = 51820;
+        FirewallMark = 42;
+      };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionWireGuard;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[WireGuard]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+        Use `PrivateKeyFile` instead of
+        `PrivateKey`: the nix store is
+        world-readable.
+      '';
+    };
+
+    wireguardPeers = mkOption {
+      default = [];
+      example = [ { wireguardPeerConfig={
+        Endpoint = "192.168.1.1:51820";
+        PublicKey = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g=";
+        PresharedKeyFile = "/etc/wireguard/psk.key";
+        AllowedIPs = [ "10.0.0.1/32" ];
+        PersistentKeepalive = 15;
+      };}];
+      type = with types; listOf (submodule wireguardPeerOptions);
+      description = lib.mdDoc ''
+        Each item in this array specifies an option in the
+        `[WireGuardPeer]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+        Use `PresharedKeyFile` instead of
+        `PresharedKey`: the nix store is
+        world-readable.
+      '';
+    };
+
+    bondConfig = mkOption {
+      default = {};
+      example = { Mode = "802.3ad"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionBond;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Bond]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    xfrmConfig = mkOption {
+      default = {};
+      example = { InterfaceId = 1; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionXfrm;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Xfrm]` section of the unit.  See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    vrfConfig = mkOption {
+      default = {};
+      example = { Table = 2342; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionVRF;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[VRF]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+        A detailed explanation about how VRFs work can be found in the
+        [kernel docs](https://www.kernel.org/doc/Documentation/networking/vrf.txt).
+      '';
+    };
+
+    wlanConfig = mkOption {
+      default = {};
+      example = { PhysicalDevice = 0; Type = "station"; };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionWLAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the `[WLAN]` section of the unit.
+        See {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+    batmanAdvancedConfig = mkOption {
+      default = {};
+      example = {
+        GatewayMode = "server";
+        RoutingAlgorithm = "batman-v";
+      };
+      type = types.addCheck (types.attrsOf unitOption) check.netdev.sectionBatmanAdvanced;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[BatmanAdvanced]` section of the unit. See
+        {manpage}`systemd.netdev(5)` for details.
+      '';
+    };
+
+  };
+
+  addressOptions = {
+    options = {
+      addressConfig = mkOption {
+        example = { Address = "192.168.0.100/24"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionAddress;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Address]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  routingPolicyRulesOptions = {
+    options = {
+      routingPolicyRuleConfig = mkOption {
+        default = { };
+        example = { Table = 10; IncomingInterface = "eth1"; Family = "both"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionRoutingPolicyRule;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[RoutingPolicyRule]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  routeOptions = {
+    options = {
+      routeConfig = mkOption {
+        default = {};
+        example = { Gateway = "192.168.0.1"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionRoute;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Route]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  ipv6PrefixOptions = {
+    options = {
+      ipv6PrefixConfig = mkOption {
+        default = {};
+        example = { Prefix = "fd00::/64"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionIPv6Prefix;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[IPv6Prefix]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  ipv6RoutePrefixOptions = {
+    options = {
+      ipv6RoutePrefixConfig = mkOption {
+        default = {};
+        example = { Route = "fd00::/64"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionIPv6RoutePrefix;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[IPv6RoutePrefix]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  dhcpServerStaticLeaseOptions = {
+    options = {
+      dhcpServerStaticLeaseConfig = mkOption {
+        default = {};
+        example = { MACAddress = "65:43:4a:5b:d8:5f"; Address = "192.168.1.42"; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionDHCPServerStaticLease;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[DHCPServerStaticLease]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+
+          Make sure to configure the corresponding client interface to use
+          `ClientIdentifier=mac`.
+        '';
+      };
+    };
+  };
+
+  bridgeFDBOptions = {
+    options = {
+      bridgeFDBConfig = mkOption {
+        default = {};
+        example = { MACAddress = "65:43:4a:5b:d8:5f"; Destination = "192.168.1.42"; VNI = 20; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionBridgeFDB;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[BridgeFDB]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  bridgeMDBOptions = {
+    options = {
+      bridgeMDBConfig = mkOption {
+        default = {};
+        example = { MulticastGroupAddress = "ff02::1:2:3:4"; VLANId = 10; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionBridgeMDB;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[BridgeMDB]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  bridgeVLANOptions = {
+    options = {
+      bridgeVLANConfig = mkOption {
+        default = {};
+        example = { VLAN = 20; };
+        type = types.addCheck (types.attrsOf unitOption) check.network.sectionBridgeVLAN;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[BridgeVLAN]` section of the unit.  See
+          {manpage}`systemd.network(5)` for details.
+        '';
+      };
+    };
+  };
+
+  networkOptions = commonNetworkOptions // {
+
+    linkConfig = mkOption {
+      default = {};
+      example = { Unmanaged = true; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionLink;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Link]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    networkConfig = mkOption {
+      default = {};
+      example = { Description = "My Network"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionNetwork;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Network]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    # systemd.network.networks.*.dhcpConfig has been deprecated in favor of ….dhcpV4Config
+    # Produce a nice warning message so users know it is gone.
+    dhcpConfig = mkOption {
+      visible = false;
+      apply = _: throw "The option `systemd.network.networks.*.dhcpConfig` can no longer be used since it's been removed. Please use `systemd.network.networks.*.dhcpV4Config` instead.";
+    };
+
+    dhcpV4Config = mkOption {
+      default = {};
+      example = { UseDNS = true; UseRoutes = true; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDHCPv4;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPv4]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    dhcpV6Config = mkOption {
+      default = {};
+      example = { UseDNS = true; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDHCPv6;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPv6]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    dhcpV6PrefixDelegationConfig = mkOption {
+      visible = false;
+      apply = _: throw "The option `systemd.network.networks.<name>.dhcpV6PrefixDelegationConfig` has been renamed to `systemd.network.networks.<name>.dhcpPrefixDelegationConfig`.";
+    };
+
+    dhcpPrefixDelegationConfig = mkOption {
+      default = {};
+      example = { SubnetId = "auto"; Announce = true; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDHCPPrefixDelegation;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPPrefixDelegation]` section of the unit. See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    ipv6AcceptRAConfig = mkOption {
+      default = {};
+      example = { UseDNS = true; DHCPv6Client = "always"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionIPv6AcceptRA;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[IPv6AcceptRA]` section of the unit. See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    dhcpServerConfig = mkOption {
+      default = {};
+      example = { PoolOffset = 50; EmitDNS = false; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDHCPServer;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DHCPServer]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    # systemd.network.networks.*.ipv6PrefixDelegationConfig has been deprecated
+    # in 247 in favor of systemd.network.networks.*.ipv6SendRAConfig.
+    ipv6PrefixDelegationConfig = mkOption {
+      visible = false;
+      apply = _: throw "The option `systemd.network.networks.*.ipv6PrefixDelegationConfig` has been replaced by `systemd.network.networks.*.ipv6SendRAConfig`.";
+    };
+
+    ipv6SendRAConfig = mkOption {
+      default = {};
+      example = { EmitDNS = true; Managed = true; OtherInformation = true; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionIPv6SendRA;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[IPv6SendRA]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    dhcpServerStaticLeases = mkOption {
+      default = [];
+      example = [ { dhcpServerStaticLeaseConfig = { MACAddress = "65:43:4a:5b:d8:5f"; Address = "192.168.1.42"; }; } ];
+      type = with types; listOf (submodule dhcpServerStaticLeaseOptions);
+      description = lib.mdDoc ''
+        A list of DHCPServerStaticLease sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    ipv6Prefixes = mkOption {
+      default = [];
+      example = [ { ipv6PrefixConfig = { AddressAutoconfiguration = true; OnLink = true; }; } ];
+      type = with types; listOf (submodule ipv6PrefixOptions);
+      description = lib.mdDoc ''
+        A list of ipv6Prefix sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    ipv6RoutePrefixes = mkOption {
+      default = [];
+      example = [ { ipv6RoutePrefixConfig = { Route = "fd00::/64"; LifetimeSec = 3600; }; } ];
+      type = with types; listOf (submodule ipv6RoutePrefixOptions);
+      description = lib.mdDoc ''
+        A list of ipv6RoutePrefix sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bridgeConfig = mkOption {
+      default = {};
+      example = { MulticastFlood = false; Cost = 20; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionBridge;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[Bridge]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bridgeFDBs = mkOption {
+      default = [];
+      example = [ { bridgeFDBConfig = { MACAddress = "90:e2:ba:43:fc:71"; Destination = "192.168.100.4"; VNI = 3600; }; } ];
+      type = with types; listOf (submodule bridgeFDBOptions);
+      description = lib.mdDoc ''
+        A list of BridgeFDB sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bridgeMDBs = mkOption {
+      default = [];
+      example = [ { bridgeMDBConfig = { MulticastGroupAddress = "ff02::1:2:3:4"; VLANId = 10; } ; } ];
+      type = with types; listOf (submodule bridgeMDBOptions);
+      description = lib.mdDoc ''
+        A list of BridgeMDB sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    lldpConfig = mkOption {
+      default = {};
+      example = { MUDURL = "https://things.example.org/product_abc123/v5"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionLLDP;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[LLDP]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    canConfig = mkOption {
+      default = {};
+      example = { };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionCAN;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[CAN]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    ipoIBConfig = mkOption {
+      default = {};
+      example = { };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionIPoIB;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[IPoIB]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    qdiscConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionQDisc;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[QDisc]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    networkEmulatorConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; DelaySec = "20msec"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionNetworkEmulator;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[NetworkEmulator]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    tokenBucketFilterConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; Rate = "100k"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionTokenBucketFilter;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[TokenBucketFilter]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    pieConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PacketLimit = "3847"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionPIE;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[PIE]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    flowQueuePIEConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PacketLimit = "3847"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionFlowQueuePIE;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[FlowQueuePIE]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    stochasticFairBlueConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PacketLimit = "3847"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionStochasticFairBlue;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[StochasticFairBlue]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    stochasticFairnessQueueingConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PerturbPeriodSec = "30"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionStochasticFairnessQueueing;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[StochasticFairnessQueueing]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bfifoConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; LimitBytes = "20K"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionBFIFO;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[BFIFO]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    pfifoConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PacketLimit = "300"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionPFIFO;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[PFIFO]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    pfifoHeadDropConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; PacketLimit = "300"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionPFIFOHeadDrop;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[PFIFOHeadDrop]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    pfifoFastConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionPFIFOFast;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[PFIFOFast]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    cakeConfig = mkOption {
+      default = {};
+      example = { Bandwidth = "40M"; OverheadBytes = 8; CompensationMode = "ptm"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionCAKE;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[CAKE]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    controlledDelayConfig = mkOption {
+      default = {};
+      example = { Parent = "ingress"; TargetSec = "20msec"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionControlledDelay;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[ControlledDelay]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    deficitRoundRobinSchedulerConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDeficitRoundRobinScheduler;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DeficitRoundRobinScheduler]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    deficitRoundRobinSchedulerClassConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; QuantumBytes = "300k"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionDeficitRoundRobinSchedulerClass;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[DeficitRoundRobinSchedulerClass]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    enhancedTransmissionSelectionConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; QuantumBytes = "300k"; Bands = 3; PriorityMap = "100 200 300"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionEnhancedTransmissionSelection;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[EnhancedTransmissionSelection]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    genericRandomEarlyDetectionConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; VirtualQueues = 5; DefaultVirtualQueue = 3; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionGenericRandomEarlyDetection;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[GenericRandomEarlyDetection]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    fairQueueingControlledDelayConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; Flows = 5; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionFairQueueingControlledDelay;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[FairQueueingControlledDelay]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    fairQueueingConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; FlowLimit = 5; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionFairQueueing;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[FairQueueing]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    trivialLinkEqualizerConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; Id = 0; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionTrivialLinkEqualizer;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[TrivialLinkEqualizer]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    hierarchyTokenBucketConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionHierarchyTokenBucket;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[HierarchyTokenBucket]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    hierarchyTokenBucketClassConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; Rate = "10M"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionHierarchyTokenBucketClass;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[HierarchyTokenBucketClass]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    heavyHitterFilterConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; PacketLimit = 10000; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionHeavyHitterFilter;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[HeavyHitterFilter]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    quickFairQueueingConfig = mkOption {
+      default = {};
+      example = { Parent = "root"; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionQuickFairQueueing;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[QuickFairQueueing]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    quickFairQueueingConfigClass = mkOption {
+      default = {};
+      example = { Parent = "root"; Weight = 133; };
+      type = types.addCheck (types.attrsOf unitOption) check.network.sectionQuickFairQueueingClass;
+      description = lib.mdDoc ''
+        Each attribute in this set specifies an option in the
+        `[QuickFairQueueingClass]` section of the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bridgeVLANs = mkOption {
+      default = [];
+      example = [ { bridgeVLANConfig = { VLAN = "10-20"; }; } ];
+      type = with types; listOf (submodule bridgeVLANOptions);
+      description = lib.mdDoc ''
+        A list of BridgeVLAN sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    name = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        The name of the network interface to match against.
+      '';
+    };
+
+    DHCP = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc ''
+        Whether to enable DHCP on the interfaces matched.
+      '';
+    };
+
+    domains = mkOption {
+      type = types.nullOr (types.listOf types.str);
+      default = null;
+      description = lib.mdDoc ''
+        A list of domains to pass to the network config.
+      '';
+    };
+
+    address = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of addresses to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    gateway = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of gateways to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    dns = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of dns servers to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    ntp = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of ntp servers to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bridge = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of bridge interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    bond = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of bond interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    vrf = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of vrf interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    vlan = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of vlan interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    macvlan = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of macvlan interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    macvtap = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of macvtap interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    vxlan = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of vxlan interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    tunnel = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of tunnel interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    xfrm = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of xfrm interfaces to be added to the network section of the
+        unit.  See {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    addresses = mkOption {
+      default = [ ];
+      type = with types; listOf (submodule addressOptions);
+      description = lib.mdDoc ''
+        A list of address sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    routingPolicyRules = mkOption {
+      default = [ ];
+      type = with types; listOf (submodule routingPolicyRulesOptions);
+      description = lib.mdDoc ''
+        A list of routing policy rules sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+    routes = mkOption {
+      default = [ ];
+      type = with types; listOf (submodule routeOptions);
+      description = lib.mdDoc ''
+        A list of route sections to be added to the unit.  See
+        {manpage}`systemd.network(5)` for details.
+      '';
+    };
+
+  };
+
+  networkConfig = { config, ... }: {
+    config = {
+      matchConfig = optionalAttrs (config.name != null) {
+        Name = config.name;
+      };
+      networkConfig = optionalAttrs (config.DHCP != null) {
+        DHCP = config.DHCP;
+      } // optionalAttrs (config.domains != null) {
+        Domains = concatStringsSep " " config.domains;
+      };
+    };
+  };
+
+  networkdConfig = { config, ... }: {
+    options = {
+      routeTables = mkOption {
+        default = {};
+        example = { foo = 27; };
+        type = with types; attrsOf int;
+        description = lib.mdDoc ''
+          Defines route table names as an attrset of name to number.
+          See {manpage}`networkd.conf(5)` for details.
+        '';
+      };
+
+      addRouteTablesToIPRoute2 = mkOption {
+        default = true;
+        example = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If true and routeTables are set, then the specified route tables
+          will also be installed into /etc/iproute2/rt_tables.
+        '';
+      };
+    };
+
+    config = {
+      networkConfig = optionalAttrs (config.routeTables != { }) {
+        RouteTable = mapAttrsToList
+          (name: number: "${name}:${toString number}")
+          config.routeTables;
+      };
+    };
+  };
+
+  renderConfig = def:
+    { text = ''
+        [Network]
+        ${attrsToSection def.networkConfig}
+      ''
+      + optionalString (def.dhcpV4Config != { }) ''
+        [DHCPv4]
+        ${attrsToSection def.dhcpV4Config}
+      ''
+      + optionalString (def.dhcpV6Config != { }) ''
+        [DHCPv6]
+        ${attrsToSection def.dhcpV6Config}
+      ''; };
+
+  mkUnitFiles = prefix: cfg: listToAttrs (map (name: {
+    name = "${prefix}systemd/network/${name}";
+    value.source = "${cfg.units.${name}.unit}/${name}";
+  }) (attrNames cfg.units));
+
+  commonOptions = visible: {
+
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable networkd or not.
+      '';
+    };
+
+    links = mkOption {
+      default = {};
+      inherit visible;
+      type = with types; attrsOf (submodule [ { options = linkOptions; } ]);
+      description = lib.mdDoc "Definition of systemd network links.";
+    };
+
+    netdevs = mkOption {
+      default = {};
+      inherit visible;
+      type = with types; attrsOf (submodule [ { options = netdevOptions; } ]);
+      description = lib.mdDoc "Definition of systemd network devices.";
+    };
+
+    networks = mkOption {
+      default = {};
+      inherit visible;
+      type = with types; attrsOf (submodule [ { options = networkOptions; } networkConfig ]);
+      description = lib.mdDoc "Definition of systemd networks.";
+    };
+
+    config = mkOption {
+      default = {};
+      inherit visible;
+      type = with types; submodule [ { options = networkdOptions; } networkdConfig ];
+      description = lib.mdDoc "Definition of global systemd network config.";
+    };
+
+    units = mkOption {
+      description = lib.mdDoc "Definition of networkd units.";
+      default = {};
+      internal = true;
+      type = with types; attrsOf (submodule (
+        { name, config, ... }:
+        { options = mapAttrs (_: x: x // { internal = true; }) concreteUnitOptions;
+          config = {
+            unit = mkDefault (makeUnit name config);
+          };
+        }));
+    };
+
+    wait-online = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        example = false;
+        description = lib.mdDoc ''
+          Whether to enable the systemd-networkd-wait-online service.
+
+          systemd-networkd-wait-online can timeout and fail if there are no network interfaces
+          available for it to manage. When systemd-networkd is enabled but a different service is
+          responsible for managing the system's internet connection (for example, NetworkManager or
+          connman are used to manage WiFi connections), this service is unnecessary and can be
+          disabled.
+        '';
+      };
+      anyInterface = mkOption {
+        description = lib.mdDoc ''
+          Whether to consider the network online when any interface is online, as opposed to all of them.
+          This is useful on portable machines with a wired and a wireless interface, for example.
+
+          This is on by default if {option}`networking.useDHCP` is enabled.
+        '';
+        type = types.bool;
+        defaultText = "config.networking.useDHCP";
+        default = config.networking.useDHCP;
+      };
+
+      ignoredInterfaces = mkOption {
+        description = lib.mdDoc ''
+          Network interfaces to be ignored when deciding if the system is online.
+        '';
+        type = with types; listOf str;
+        default = [];
+        example = [ "wg0" ];
+      };
+
+      timeout = mkOption {
+        description = lib.mdDoc ''
+          Time to wait for the network to come online, in seconds. Set to 0 to disable.
+        '';
+        type = types.ints.unsigned;
+        default = 120;
+        example = 0;
+      };
+
+      extraArgs = mkOption {
+        description = lib.mdDoc ''
+          Extra command-line arguments to pass to systemd-networkd-wait-online.
+          These also affect per-interface `systemd-network-wait-online@` services.
+
+          See {manpage}`systemd-networkd-wait-online.service(8)` for all available options.
+        '';
+        type = with types; listOf str;
+        default = [];
+      };
+    };
+
+  };
+
+  commonConfig = config: let
+    cfg = config.systemd.network;
+    mkUnit = f: def: { inherit (def) enable; text = f def; };
+  in mkMerge [
+
+    # .link units are honored by udev, no matter if systemd-networkd is enabled or not.
+    {
+      systemd.network.units = mapAttrs' (n: v: nameValuePair "${n}.link" (mkUnit linkToUnit v)) cfg.links;
+
+      systemd.network.wait-online.extraArgs =
+        [ "--timeout=${toString cfg.wait-online.timeout}" ]
+        ++ optional cfg.wait-online.anyInterface "--any"
+        ++ map (i: "--ignore=${i}") cfg.wait-online.ignoredInterfaces;
+    }
+
+    (mkIf config.systemd.network.enable {
+
+      systemd.network.units = mapAttrs' (n: v: nameValuePair "${n}.netdev" (mkUnit netdevToUnit v)) cfg.netdevs
+        // mapAttrs' (n: v: nameValuePair "${n}.network" (mkUnit networkToUnit v)) cfg.networks;
+
+      # systemd-networkd is socket-activated by kernel netlink route change
+      # messages. It is important to have systemd buffer those on behalf of
+      # networkd.
+      systemd.sockets.systemd-networkd.wantedBy = [ "sockets.target" ];
+
+      systemd.services.systemd-networkd-wait-online = {
+        inherit (cfg.wait-online) enable;
+        wantedBy = [ "network-online.target" ];
+        serviceConfig.ExecStart = [
+          ""
+          "${config.systemd.package}/lib/systemd/systemd-networkd-wait-online ${utils.escapeSystemdExecArgs cfg.wait-online.extraArgs}"
+        ];
+      };
+
+      systemd.services."systemd-network-wait-online@" = {
+        description = "Wait for Network Interface %I to be Configured";
+        conflicts = [ "shutdown.target" ];
+        requisite = [ "systemd-networkd.service" ];
+        after = [ "systemd-networkd.service" ];
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          ExecStart = "${config.systemd.package}/lib/systemd/systemd-networkd-wait-online -i %I ${utils.escapeSystemdExecArgs cfg.wait-online.extraArgs}";
+        };
+      };
+
+    })
+  ];
+
+  stage2Config = let
+    cfg = config.systemd.network;
+    unitFiles = mkUnitFiles "" cfg;
+  in mkMerge [
+    (commonConfig config)
+
+    { environment.etc = unitFiles; }
+
+    (mkIf config.systemd.network.enable {
+
+      users.users.systemd-network.group = "systemd-network";
+
+      systemd.additionalUpstreamSystemUnits = [
+        "systemd-networkd-wait-online.service"
+        "systemd-networkd.service"
+        "systemd-networkd.socket"
+      ];
+
+      environment.etc."systemd/networkd.conf" = renderConfig cfg.config;
+
+      systemd.services.systemd-networkd = let
+        isReloadableUnitFileName = unitFileName: strings.hasSuffix ".network" unitFileName;
+        reloadableUnitFiles = attrsets.filterAttrs (k: v: isReloadableUnitFileName k) unitFiles;
+        nonReloadableUnitFiles = attrsets.filterAttrs (k: v: !isReloadableUnitFileName k) unitFiles;
+        unitFileSources = unitFiles: map (x: x.source) (attrValues unitFiles);
+      in {
+        wantedBy = [ "multi-user.target" ];
+        reloadTriggers = unitFileSources reloadableUnitFiles;
+        restartTriggers = unitFileSources nonReloadableUnitFiles ++ [
+          config.environment.etc."systemd/networkd.conf".source
+        ];
+        aliases = [ "dbus-org.freedesktop.network1.service" ];
+      };
+
+      networking.iproute2 = mkIf (cfg.config.addRouteTablesToIPRoute2 && cfg.config.routeTables != { }) {
+        enable = mkDefault true;
+        rttablesExtraConfig = ''
+
+          # Extra tables defined in NixOS systemd.networkd.config.routeTables.
+          ${concatStringsSep "\n" (mapAttrsToList (name: number: "${toString number} ${name}") cfg.config.routeTables)}
+        '';
+      };
+
+      services.resolved.enable = mkDefault true;
+
+    })
+  ];
+
+  stage1Options = {
+    options.boot.initrd.systemd.network.networks = mkOption {
+      type = with types; attrsOf (submodule {
+        # Default in initrd is dhcp-on-stop, which is correct if flushBeforeStage2 = false
+        config = mkIf config.boot.initrd.network.flushBeforeStage2 {
+          networkConfig.KeepConfiguration = mkDefault false;
+        };
+      });
+    };
+  };
+
+  stage1Config = let
+    cfg = config.boot.initrd.systemd.network;
+  in mkMerge [
+    (commonConfig config.boot.initrd)
+
+    {
+      systemd.network.enable = mkDefault config.boot.initrd.network.enable;
+      systemd.contents = mkUnitFiles "/etc/" cfg;
+
+      # Networkd link files are used early by udev to set up interfaces early.
+      # This must be done in stage 1 to avoid race conditions between udev and
+      # network daemons.
+      systemd.network.units = lib.filterAttrs (n: _: hasSuffix ".link" n) config.systemd.network.units;
+      systemd.storePaths = ["${config.boot.initrd.systemd.package}/lib/systemd/network/99-default.link"];
+    }
+
+    (mkIf cfg.enable {
+
+      # For networkctl
+      systemd.dbus.enable = mkDefault true;
+
+      systemd.additionalUpstreamUnits = [
+        "systemd-networkd-wait-online.service"
+        "systemd-networkd.service"
+        "systemd-networkd.socket"
+        "systemd-network-generator.service"
+        "network-online.target"
+        "network-pre.target"
+        "network.target"
+        "nss-lookup.target"
+        "nss-user-lookup.target"
+        "remote-fs-pre.target"
+        "remote-fs.target"
+      ];
+      systemd.users.systemd-network = {};
+      systemd.groups.systemd-network = {};
+
+      systemd.contents."/etc/systemd/networkd.conf" = renderConfig cfg.config;
+
+      systemd.services.systemd-networkd = {
+        wantedBy = [ "initrd.target" ];
+        # These before and conflicts lines can be removed when this PR makes it into a release:
+        # https://github.com/systemd/systemd/pull/27791
+        before = ["initrd-switch-root.target"];
+        conflicts = ["initrd-switch-root.target"];
+      };
+      systemd.sockets.systemd-networkd = {
+        wantedBy = [ "initrd.target" ];
+        before = ["initrd-switch-root.target"];
+        conflicts = ["initrd-switch-root.target"];
+      };
+
+      systemd.services.systemd-network-generator.wantedBy = [ "sysinit.target" ];
+
+      systemd.storePaths = [
+        "${config.boot.initrd.systemd.package}/lib/systemd/systemd-networkd"
+        "${config.boot.initrd.systemd.package}/lib/systemd/systemd-networkd-wait-online"
+        "${config.boot.initrd.systemd.package}/lib/systemd/systemd-network-generator"
+      ];
+      kernelModules = [ "af_packet" ];
+
+    })
+  ];
+
+in
+
+{
+  imports = [ stage1Options ];
+
+  options = {
+    systemd.network = commonOptions true;
+    boot.initrd.systemd.network = commonOptions "shallow";
+  };
+
+  config = mkMerge [
+    stage2Config
+    (mkIf config.boot.initrd.systemd.enable {
+      assertions = [{
+        assertion = !config.boot.initrd.network.udhcpc.enable && config.boot.initrd.network.udhcpc.extraArgs == [];
+        message = ''
+          systemd stage 1 networking does not support 'boot.initrd.network.udhcpc'. Configure
+          DHCP with 'networking.*' options or with 'boot.initrd.systemd.network' options.
+        '';
+      }];
+
+      boot.initrd = stage1Config;
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/pbkdf2-sha512.c b/nixpkgs/nixos/modules/system/boot/pbkdf2-sha512.c
new file mode 100644
index 000000000000..67e989957ba6
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/pbkdf2-sha512.c
@@ -0,0 +1,38 @@
+#include <stdint.h>
+#include <string.h>
+#include <stdio.h>
+#include <openssl/evp.h>
+
+void hextorb(uint8_t* hex, uint8_t* rb)
+{
+	while(sscanf(hex, "%2x", rb) == 1)
+	{
+		hex += 2;
+		rb += 1;
+	}
+	*rb = '\0';
+}
+
+int main(int argc, char** argv)
+{
+	uint8_t k_user[2048];
+	uint8_t salt[2048];
+	uint8_t key[4096];
+
+	uint32_t key_length = atoi(argv[1]);
+	uint32_t iteration_count = atoi(argv[2]);
+
+	hextorb(argv[3], salt);
+	uint32_t salt_length = strlen(argv[3]) / 2;
+
+	fgets(k_user, 2048, stdin);
+	uint32_t k_user_length = strlen(k_user);
+	if(k_user[k_user_length - 1] == '\n') {
+			k_user[k_user_length - 1] = '\0';
+	}
+
+	PKCS5_PBKDF2_HMAC(k_user, k_user_length, salt, salt_length, iteration_count, EVP_sha512(), key_length, key);
+	fwrite(key, 1, key_length, stdout);
+
+	return 0;
+}
diff --git a/nixpkgs/nixos/modules/system/boot/plymouth.nix b/nixpkgs/nixos/modules/system/boot/plymouth.nix
new file mode 100644
index 000000000000..b041b8951fa3
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/plymouth.nix
@@ -0,0 +1,349 @@
+{ config, lib, options, pkgs, ... }:
+
+with lib;
+
+let
+
+  inherit (pkgs) nixos-icons;
+  plymouth = pkgs.plymouth.override {
+    systemd = config.boot.initrd.systemd.package;
+  };
+
+  cfg = config.boot.plymouth;
+  opt = options.boot.plymouth;
+
+  nixosBreezePlymouth = pkgs.plasma5Packages.breeze-plymouth.override {
+    logoFile = cfg.logo;
+    logoName = "nixos";
+    osName = "NixOS";
+    osVersion = config.system.nixos.release;
+  };
+
+  plymouthLogos = pkgs.runCommand "plymouth-logos" { inherit (cfg) logo; } ''
+    mkdir -p $out
+
+    # For themes that are compiled with PLYMOUTH_LOGO_FILE
+    mkdir -p $out/etc/plymouth
+    ln -s $logo $out/etc/plymouth/logo.png
+
+    # Logo for bgrt theme
+    # Note this is technically an abuse of watermark for the bgrt theme
+    # See: https://gitlab.freedesktop.org/plymouth/plymouth/-/issues/95#note_813768
+    mkdir -p $out/share/plymouth/themes/spinner
+    ln -s $logo $out/share/plymouth/themes/spinner/watermark.png
+
+    # Logo for spinfinity theme
+    # See: https://gitlab.freedesktop.org/plymouth/plymouth/-/issues/106
+    mkdir -p $out/share/plymouth/themes/spinfinity
+    ln -s $logo $out/share/plymouth/themes/spinfinity/header-image.png
+  '';
+
+  themesEnv = pkgs.buildEnv {
+    name = "plymouth-themes";
+    paths = [
+      plymouth
+      plymouthLogos
+    ] ++ cfg.themePackages;
+  };
+
+  configFile = pkgs.writeText "plymouthd.conf" ''
+    [Daemon]
+    ShowDelay=0
+    DeviceTimeout=8
+    Theme=${cfg.theme}
+    ${cfg.extraConfig}
+  '';
+
+in
+
+{
+
+  options = {
+
+    boot.plymouth = {
+
+      enable = mkEnableOption (lib.mdDoc "Plymouth boot splash screen");
+
+      font = mkOption {
+        default = "${pkgs.dejavu_fonts.minimal}/share/fonts/truetype/DejaVuSans.ttf";
+        defaultText = literalExpression ''"''${pkgs.dejavu_fonts.minimal}/share/fonts/truetype/DejaVuSans.ttf"'';
+        type = types.path;
+        description = lib.mdDoc ''
+          Font file made available for displaying text on the splash screen.
+        '';
+      };
+
+      themePackages = mkOption {
+        default = lib.optional (cfg.theme == "breeze") nixosBreezePlymouth;
+        defaultText = literalMD ''
+          A NixOS branded variant of the breeze theme when
+          `config.${opt.theme} == "breeze"`, otherwise
+          `[ ]`.
+        '';
+        type = types.listOf types.package;
+        description = lib.mdDoc ''
+          Extra theme packages for plymouth.
+        '';
+      };
+
+      theme = mkOption {
+        default = "bgrt";
+        type = types.str;
+        description = lib.mdDoc ''
+          Splash screen theme.
+        '';
+      };
+
+      logo = mkOption {
+        type = types.path;
+        # Dimensions are 48x48 to match GDM logo
+        default = "${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png";
+        defaultText = literalExpression ''"''${nixos-icons}/share/icons/hicolor/48x48/apps/nix-snowflake-white.png"'';
+        example = literalExpression ''
+          pkgs.fetchurl {
+            url = "https://nixos.org/logo/nixos-hires.png";
+            sha256 = "1ivzgd7iz0i06y36p8m5w48fd8pjqwxhdaavc0pxs7w1g7mcy5si";
+          }
+        '';
+        description = lib.mdDoc ''
+          Logo which is displayed on the splash screen.
+        '';
+      };
+
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Literal string to append to `configFile`
+          and the config file generated by the plymouth module.
+        '';
+      };
+
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    boot.kernelParams = [ "splash" ];
+
+    # To be discoverable by systemd.
+    environment.systemPackages = [ plymouth ];
+
+    environment.etc."plymouth/plymouthd.conf".source = configFile;
+    environment.etc."plymouth/plymouthd.defaults".source = "${plymouth}/share/plymouth/plymouthd.defaults";
+    environment.etc."plymouth/logo.png".source = cfg.logo;
+    environment.etc."plymouth/themes".source = "${themesEnv}/share/plymouth/themes";
+    # XXX: Needed because we supply a different set of plugins in initrd.
+    environment.etc."plymouth/plugins".source = "${plymouth}/lib/plymouth";
+
+    systemd.tmpfiles.rules = [
+      "d /run/plymouth 0755 root root 0 -"
+      "L+ /run/plymouth/plymouthd.defaults - - - - /etc/plymouth/plymouthd.defaults"
+      "L+ /run/plymouth/themes - - - - /etc/plymouth/themes"
+      "L+ /run/plymouth/plugins - - - - /etc/plymouth/plugins"
+    ];
+
+    systemd.packages = [ plymouth ];
+
+    systemd.services.plymouth-kexec.wantedBy = [ "kexec.target" ];
+    systemd.services.plymouth-halt.wantedBy = [ "halt.target" ];
+    systemd.services.plymouth-quit-wait.wantedBy = [ "multi-user.target" ];
+    systemd.services.plymouth-quit.wantedBy = [ "multi-user.target" ];
+    systemd.services.plymouth-poweroff.wantedBy = [ "poweroff.target" ];
+    systemd.services.plymouth-reboot.wantedBy = [ "reboot.target" ];
+    systemd.services.plymouth-read-write.wantedBy = [ "sysinit.target" ];
+    systemd.services.systemd-ask-password-plymouth.wantedBy = [ "multi-user.target" ];
+    systemd.paths.systemd-ask-password-plymouth.wantedBy = [ "multi-user.target" ];
+
+    # Prevent Plymouth taking over the screen during system updates.
+    systemd.services.plymouth-start.restartIfChanged = false;
+
+    boot.initrd.systemd = {
+      extraBin.plymouth = "${plymouth}/bin/plymouth"; # for the recovery shell
+      storePaths = [
+        "${lib.getBin config.boot.initrd.systemd.package}/bin/systemd-tty-ask-password-agent"
+        "${plymouth}/bin/plymouthd"
+        "${plymouth}/sbin/plymouthd"
+      ];
+      packages = [ plymouth ]; # systemd units
+      contents = {
+        # Files
+        "/etc/plymouth/plymouthd.conf".source = configFile;
+        "/etc/plymouth/logo.png".source = cfg.logo;
+        "/etc/plymouth/plymouthd.defaults".source = "${plymouth}/share/plymouth/plymouthd.defaults";
+        # Directories
+        "/etc/plymouth/plugins".source = pkgs.runCommand "plymouth-initrd-plugins" {} ''
+          # Check if the actual requested theme is here
+          if [[ ! -d ${themesEnv}/share/plymouth/themes/${cfg.theme} ]]; then
+              echo "The requested theme: ${cfg.theme} is not provided by any of the packages in boot.plymouth.themePackages"
+              exit 1
+          fi
+
+          moduleName="$(sed -n 's,ModuleName *= *,,p' ${themesEnv}/share/plymouth/themes/${cfg.theme}/${cfg.theme}.plymouth)"
+
+          mkdir -p $out/renderers
+          # module might come from a theme
+          cp ${themesEnv}/lib/plymouth/*.so $out
+          cp ${plymouth}/lib/plymouth/renderers/*.so $out/renderers
+        '';
+        "/etc/plymouth/themes".source = pkgs.runCommand "plymouth-initrd-themes" {} ''
+          # Check if the actual requested theme is here
+          if [[ ! -d ${themesEnv}/share/plymouth/themes/${cfg.theme} ]]; then
+              echo "The requested theme: ${cfg.theme} is not provided by any of the packages in boot.plymouth.themePackages"
+              exit 1
+          fi
+
+          mkdir -p $out/${cfg.theme}
+          cp -r ${themesEnv}/share/plymouth/themes/${cfg.theme}/* $out/${cfg.theme}
+          # Copy more themes if the theme depends on others
+          for theme in $(grep -hRo '/share/plymouth/themes/.*$' $out | xargs -n1 basename); do
+              if [[ -d "${themesEnv}/share/plymouth/themes/$theme" ]]; then
+                  if [[ ! -d "$out/$theme" ]]; then
+                    echo "Adding dependent theme: $theme"
+                    mkdir -p "$out/$theme"
+                    cp -r "${themesEnv}/share/plymouth/themes/$theme"/* "$out/$theme"
+                  fi
+              else
+                echo "Missing theme dependency: $theme"
+              fi
+          done
+          # Fixup references
+          for theme in $out/*/*.plymouth; do
+            sed -i "s,${builtins.storeDir}/.*/share/plymouth/themes,$out," "$theme"
+          done
+        '';
+
+        # Fonts
+        "/etc/plymouth/fonts".source = pkgs.runCommand "plymouth-initrd-fonts" {} ''
+          mkdir -p $out
+          cp ${cfg.font} $out
+        '';
+        "/etc/fonts/fonts.conf".text = ''
+          <?xml version="1.0"?>
+          <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+          <fontconfig>
+              <dir>/etc/plymouth/fonts</dir>
+          </fontconfig>
+        '';
+      };
+      # Properly enable units. These are the units that arch copies
+      services = {
+        plymouth-halt.wantedBy = [ "halt.target" ];
+        plymouth-kexec.wantedBy = [ "kexec.target" ];
+        plymouth-poweroff.wantedBy = [ "poweroff.target" ];
+        plymouth-quit-wait.wantedBy = [ "multi-user.target" ];
+        plymouth-quit.wantedBy = [ "multi-user.target" ];
+        plymouth-read-write.wantedBy = [ "sysinit.target" ];
+        plymouth-reboot.wantedBy = [ "reboot.target" ];
+        plymouth-start.wantedBy = [ "initrd-switch-root.target" "sysinit.target" ];
+        plymouth-switch-root-initramfs.wantedBy = [ "halt.target" "kexec.target" "plymouth-switch-root-initramfs.service" "poweroff.target" "reboot.target" ];
+        plymouth-switch-root.wantedBy = [ "initrd-switch-root.target" ];
+      };
+      # Link in runtime files before starting
+      services.plymouth-start.preStart = ''
+        mkdir -p /run/plymouth
+        ln -sf /etc/plymouth/{plymouthd.defaults,themes,plugins} /run/plymouth/
+      '';
+    };
+
+    # Insert required udev rules. We take stage 2 systemd because the udev
+    # rules are only generated when building with logind.
+    boot.initrd.services.udev.packages = [ (pkgs.runCommand "initrd-plymouth-udev-rules" {} ''
+      mkdir -p $out/etc/udev/rules.d
+      cp ${config.systemd.package.out}/lib/udev/rules.d/{70-uaccess,71-seat}.rules $out/etc/udev/rules.d
+      sed -i '/loginctl/d' $out/etc/udev/rules.d/71-seat.rules
+    '') ];
+
+    boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${plymouth}/bin/plymouth
+      copy_bin_and_libs ${plymouth}/bin/plymouthd
+
+      # Check if the actual requested theme is here
+      if [[ ! -d ${themesEnv}/share/plymouth/themes/${cfg.theme} ]]; then
+          echo "The requested theme: ${cfg.theme} is not provided by any of the packages in boot.plymouth.themePackages"
+          exit 1
+      fi
+
+      moduleName="$(sed -n 's,ModuleName *= *,,p' ${themesEnv}/share/plymouth/themes/${cfg.theme}/${cfg.theme}.plymouth)"
+
+      mkdir -p $out/lib/plymouth/renderers
+      # module might come from a theme
+      cp ${themesEnv}/lib/plymouth/*.so $out/lib/plymouth
+      cp ${plymouth}/lib/plymouth/renderers/*.so $out/lib/plymouth/renderers
+
+      mkdir -p $out/share/plymouth/themes
+      cp ${plymouth}/share/plymouth/plymouthd.defaults $out/share/plymouth
+
+      # Copy themes into working directory for patching
+      mkdir themes
+
+      # Use -L to copy the directories proper, not the symlinks to them.
+      # Copy all themes because they're not large assets, and bgrt depends on the ImageDir of
+      # the spinner theme.
+      cp -r -L ${themesEnv}/share/plymouth/themes/* themes
+
+      # Patch out any attempted references to the theme or plymouth's themes directory
+      chmod -R +w themes
+      find themes -type f | while read file
+      do
+        sed -i "s,${builtins.storeDir}/.*/share/plymouth/themes,$out/share/plymouth/themes,g" $file
+      done
+
+      # Install themes
+      cp -r themes/* $out/share/plymouth/themes
+
+      # Install logo
+      mkdir -p $out/etc/plymouth
+      cp -r -L ${themesEnv}/etc/plymouth $out/etc
+
+      # Setup font
+      mkdir -p $out/share/fonts
+      cp ${cfg.font} $out/share/fonts
+      mkdir -p $out/etc/fonts
+      cat > $out/etc/fonts/fonts.conf <<EOF
+      <?xml version="1.0"?>
+      <!DOCTYPE fontconfig SYSTEM "urn:fontconfig:fonts.dtd">
+      <fontconfig>
+          <dir>$out/share/fonts</dir>
+      </fontconfig>
+      EOF
+    '';
+
+    boot.initrd.extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable) ''
+      $out/bin/plymouthd --help >/dev/null
+      $out/bin/plymouth --help >/dev/null
+    '';
+
+    boot.initrd.extraUdevRulesCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      cp ${config.systemd.package}/lib/udev/rules.d/{70-uaccess,71-seat}.rules $out
+      sed -i '/loginctl/d' $out/71-seat.rules
+    '';
+
+    # We use `mkAfter` to ensure that LUKS password prompt would be shown earlier than the splash screen.
+    boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) (mkAfter ''
+      mkdir -p /etc/plymouth
+      mkdir -p /run/plymouth
+      ln -s $extraUtils/etc/plymouth/logo.png /etc/plymouth/logo.png
+      ln -s ${configFile} /etc/plymouth/plymouthd.conf
+      ln -s $extraUtils/share/plymouth/plymouthd.defaults /run/plymouth/plymouthd.defaults
+      ln -s $extraUtils/share/plymouth/themes /run/plymouth/themes
+      ln -s $extraUtils/lib/plymouth /run/plymouth/plugins
+      ln -s $extraUtils/etc/fonts /etc/fonts
+
+      plymouthd --mode=boot --pid-file=/run/plymouth/pid --attach-to-session
+      plymouth show-splash
+    '');
+
+    boot.initrd.postMountCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+      plymouth update-root-fs --new-root-dir="$targetRoot"
+    '';
+
+    # `mkBefore` to ensure that any custom prompts would be visible.
+    boot.initrd.preFailCommands = mkIf (!config.boot.initrd.systemd.enable) (mkBefore ''
+      plymouth quit --wait
+    '');
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/resolved.nix b/nixpkgs/nixos/modules/system/boot/resolved.nix
new file mode 100644
index 000000000000..b898a6317962
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/resolved.nix
@@ -0,0 +1,160 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.resolved;
+
+  dnsmasqResolve = config.services.dnsmasq.enable &&
+                   config.services.dnsmasq.resolveLocalQueries;
+
+in
+{
+
+  options = {
+
+    services.resolved.enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable the systemd DNS resolver daemon, `systemd-resolved`.
+
+        Search for `services.resolved` to see all options.
+      '';
+    };
+
+    services.resolved.fallbackDns = mkOption {
+      default = [ ];
+      example = [ "8.8.8.8" "2001:4860:4860::8844" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of IPv4 and IPv6 addresses to use as the fallback DNS servers.
+        If this option is empty, a compiled-in list of DNS servers is used instead.
+      '';
+    };
+
+    services.resolved.domains = mkOption {
+      default = config.networking.search;
+      defaultText = literalExpression "config.networking.search";
+      example = [ "example.com" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        A list of domains. These domains are used as search suffixes
+        when resolving single-label host names (domain names which
+        contain no dot), in order to qualify them into fully-qualified
+        domain names (FQDNs).
+
+        For compatibility reasons, if this setting is not specified,
+        the search domains listed in
+        {file}`/etc/resolv.conf` are used instead, if
+        that file exists and any domains are configured in it.
+      '';
+    };
+
+    services.resolved.llmnr = mkOption {
+      default = "true";
+      example = "false";
+      type = types.enum [ "true" "resolve" "false" ];
+      description = lib.mdDoc ''
+        Controls Link-Local Multicast Name Resolution support
+        (RFC 4795) on the local host.
+
+        If set to
+        - `"true"`: Enables full LLMNR responder and resolver support.
+        - `"false"`: Disables both.
+        - `"resolve"`: Only resolution support is enabled, but responding is disabled.
+      '';
+    };
+
+    services.resolved.dnssec = mkOption {
+      default = "false";
+      example = "true";
+      type = types.enum [ "true" "allow-downgrade" "false" ];
+      description = lib.mdDoc ''
+        If set to
+        - `"true"`:
+            all DNS lookups are DNSSEC-validated locally (excluding
+            LLMNR and Multicast DNS). Note that this mode requires a
+            DNS server that supports DNSSEC. If the DNS server does
+            not properly support DNSSEC all validations will fail.
+        - `"allow-downgrade"`:
+            DNSSEC validation is attempted, but if the server does not
+            support DNSSEC properly, DNSSEC mode is automatically
+            disabled. Note that this mode makes DNSSEC validation
+            vulnerable to "downgrade" attacks, where an attacker might
+            be able to trigger a downgrade to non-DNSSEC mode by
+            synthesizing a DNS response that suggests DNSSEC was not
+            supported.
+        - `"false"`: DNS lookups are not DNSSEC validated.
+
+        At the time of September 2023, systemd upstream advise
+        to disable DNSSEC by default as the current code
+        is not robust enough to deal with "in the wild" non-compliant
+        servers, which will usually give you a broken bad experience
+        in addition of insecure.
+      '';
+    };
+
+    services.resolved.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Extra config to append to resolved.conf.
+      '';
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      { assertion = !config.networking.useHostResolvConf;
+        message = "Using host resolv.conf is not supported with systemd-resolved";
+      }
+    ];
+
+    users.users.systemd-resolve.group = "systemd-resolve";
+
+    # add resolve to nss hosts database if enabled and nscd enabled
+    # system.nssModules is configured in nixos/modules/system/boot/systemd.nix
+    # added with order 501 to allow modules to go before with mkBefore
+    system.nssDatabases.hosts = (mkOrder 501 ["resolve [!UNAVAIL=return]"]);
+
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-resolved.service"
+    ];
+
+    systemd.services.systemd-resolved = {
+      wantedBy = [ "multi-user.target" ];
+      aliases = [ "dbus-org.freedesktop.resolve1.service" ];
+      restartTriggers = [ config.environment.etc."systemd/resolved.conf".source ];
+    };
+
+    environment.etc = {
+      "systemd/resolved.conf".text = ''
+        [Resolve]
+        ${optionalString (config.networking.nameservers != [])
+          "DNS=${concatStringsSep " " config.networking.nameservers}"}
+        ${optionalString (cfg.fallbackDns != [])
+          "FallbackDNS=${concatStringsSep " " cfg.fallbackDns}"}
+        ${optionalString (cfg.domains != [])
+          "Domains=${concatStringsSep " " cfg.domains}"}
+        LLMNR=${cfg.llmnr}
+        DNSSEC=${cfg.dnssec}
+        ${config.services.resolved.extraConfig}
+      '';
+
+      # symlink the dynamic stub resolver of resolv.conf as recommended by upstream:
+      # https://www.freedesktop.org/software/systemd/man/systemd-resolved.html#/etc/resolv.conf
+      "resolv.conf".source = "/run/systemd/resolve/stub-resolv.conf";
+    } // optionalAttrs dnsmasqResolve {
+      "dnsmasq-resolv.conf".source = "/run/systemd/resolve/resolv.conf";
+    };
+
+    # If networkmanager is enabled, ask it to interface with resolved.
+    networking.networkmanager.dns = "systemd-resolved";
+
+    networking.resolvconf.package = pkgs.systemd;
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/shutdown.nix b/nixpkgs/nixos/modules/system/boot/shutdown.nix
new file mode 100644
index 000000000000..8cda7b3aabe8
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/shutdown.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+
+  # This unit saves the value of the system clock to the hardware
+  # clock on shutdown.
+  systemd.services.save-hwclock =
+    { description = "Save Hardware Clock";
+
+      wantedBy = [ "shutdown.target" ];
+
+      unitConfig = {
+        DefaultDependencies = false;
+        ConditionPathExists = "/dev/rtc";
+      };
+
+      serviceConfig = {
+        Type = "oneshot";
+        ExecStart = "${pkgs.util-linux}/sbin/hwclock --systohc ${if config.time.hardwareClockInLocalTime then "--localtime" else "--utc"}";
+      };
+    };
+
+  boot.kernel.sysctl."kernel.poweroff_cmd" = "${config.systemd.package}/sbin/poweroff";
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/stage-1-init.sh b/nixpkgs/nixos/modules/system/boot/stage-1-init.sh
new file mode 100644
index 000000000000..086e5d65da2f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/stage-1-init.sh
@@ -0,0 +1,664 @@
+#! @shell@
+
+targetRoot=/mnt-root
+console=tty1
+verbose="@verbose@"
+
+info() {
+    if [[ -n "$verbose" ]]; then
+        echo "$@"
+    fi
+}
+
+extraUtils="@extraUtils@"
+export LD_LIBRARY_PATH=@extraUtils@/lib
+export PATH=@extraUtils@/bin
+ln -s @extraUtils@/bin /bin
+# hardcoded in util-linux's mount helper search path `/run/wrappers/bin:/run/current-system/sw/bin:/sbin`
+ln -s @extraUtils@/bin /sbin
+
+# Copy the secrets to their needed location
+if [ -d "@extraUtils@/secrets" ]; then
+    for secret in $(cd "@extraUtils@/secrets"; find . -type f); do
+        mkdir -p $(dirname "/$secret")
+        ln -s "@extraUtils@/secrets/$secret" "$secret"
+    done
+fi
+
+# Stop LVM complaining about fd3
+export LVM_SUPPRESS_FD_WARNINGS=true
+
+fail() {
+    if [ -n "$panicOnFail" ]; then exit 1; fi
+
+    @preFailCommands@
+
+    # If starting stage 2 failed, allow the user to repair the problem
+    # in an interactive shell.
+    cat <<EOF
+
+An error occurred in stage 1 of the boot process, which must mount the
+root filesystem on \`$targetRoot' and then start stage 2.  Press one
+of the following keys:
+
+EOF
+    if [ -n "$allowShell" ]; then cat <<EOF
+  i) to launch an interactive shell
+  f) to start an interactive shell having pid 1 (needed if you want to
+     start stage 2's init manually)
+EOF
+    fi
+    cat <<EOF
+  r) to reboot immediately
+  *) to ignore the error and continue
+EOF
+
+    read -n 1 reply
+
+    if [ -n "$allowShell" -a "$reply" = f ]; then
+        exec setsid @shell@ -c "exec @shell@ < /dev/$console >/dev/$console 2>/dev/$console"
+    elif [ -n "$allowShell" -a "$reply" = i ]; then
+        echo "Starting interactive shell..."
+        setsid @shell@ -c "exec @shell@ < /dev/$console >/dev/$console 2>/dev/$console" || fail
+    elif [ "$reply" = r ]; then
+        echo "Rebooting..."
+        reboot -f
+    else
+        info "Continuing..."
+    fi
+}
+
+trap 'fail' 0
+
+
+# Print a greeting.
+info
+info "<<< @distroName@ Stage 1 >>>"
+info
+
+# Make several required directories.
+mkdir -p /etc/udev
+touch /etc/fstab # to shut up mount
+ln -s /proc/mounts /etc/mtab # to shut up mke2fs
+touch /etc/udev/hwdb.bin # to shut up udev
+touch /etc/initrd-release
+
+# Function for waiting for device(s) to appear.
+waitDevice() {
+    local device="$1"
+    # Split device string using ':' as a delimiter as bcachefs
+    # uses this for multi-device filesystems, i.e. /dev/sda1:/dev/sda2:/dev/sda3
+    local IFS=':'
+
+    # USB storage devices tend to appear with some delay.  It would be
+    # great if we had a way to synchronously wait for them, but
+    # alas...  So just wait for a few seconds for the device to
+    # appear.
+    for dev in $device; do
+        if test ! -e $dev; then
+            echo -n "waiting for device $dev to appear..."
+            try=20
+            while [ $try -gt 0 ]; do
+                sleep 1
+                # also re-try lvm activation now that new block devices might have appeared
+                lvm vgchange -ay
+                # and tell udev to create nodes for the new LVs
+                udevadm trigger --action=add
+                if test -e $dev; then break; fi
+                echo -n "."
+                try=$((try - 1))
+            done
+            echo
+            [ $try -ne 0 ]
+        fi
+    done
+}
+
+# Create the mount point if required.
+makeMountPoint() {
+    local device="$1"
+    local mountPoint="$2"
+    local options="$3"
+
+    local IFS=,
+
+    # If we're bind mounting a file, the mount point should also be a file.
+    if ! [ -d "$device" ]; then
+        for opt in $options; do
+            if [ "$opt" = bind ] || [ "$opt" = rbind ]; then
+                mkdir -p "$(dirname "/mnt-root$mountPoint")"
+                touch "/mnt-root$mountPoint"
+                return
+            fi
+        done
+    fi
+
+    mkdir -m 0755 -p "/mnt-root$mountPoint"
+}
+
+# Mount special file systems.
+specialMount() {
+  local device="$1"
+  local mountPoint="$2"
+  local options="$3"
+  local fsType="$4"
+
+  mkdir -m 0755 -p "$mountPoint"
+  mount -n -t "$fsType" -o "$options" "$device" "$mountPoint"
+}
+source @earlyMountScript@
+
+# Copy initrd secrets from /.initrd-secrets to their actual destinations
+if [ -d "/.initrd-secrets" ]; then
+    #
+    # Secrets are named by their full destination pathname and stored
+    # under /.initrd-secrets/
+    #
+    for secret in $(cd "/.initrd-secrets"; find . -type f); do
+        mkdir -p $(dirname "/$secret")
+        cp "/.initrd-secrets/$secret" "$secret"
+    done
+fi
+
+# Log the script output to /dev/kmsg or /run/log/stage-1-init.log.
+mkdir -p /tmp
+mkfifo /tmp/stage-1-init.log.fifo
+logOutFd=8 && logErrFd=9
+eval "exec $logOutFd>&1 $logErrFd>&2"
+if test -w /dev/kmsg; then
+    tee -i < /tmp/stage-1-init.log.fifo /proc/self/fd/"$logOutFd" | while read -r line; do
+        if test -n "$line"; then
+            echo "<7>stage-1-init: [$(date)] $line" > /dev/kmsg
+        fi
+    done &
+else
+    mkdir -p /run/log
+    tee -i < /tmp/stage-1-init.log.fifo /run/log/stage-1-init.log &
+fi
+exec > /tmp/stage-1-init.log.fifo 2>&1
+
+
+# Process the kernel command line.
+export stage2Init=/init
+for o in $(cat /proc/cmdline); do
+    case $o in
+        console=*)
+            set -- $(IFS==; echo $o)
+            params=$2
+            set -- $(IFS=,; echo $params)
+            console=$1
+            ;;
+        init=*)
+            set -- $(IFS==; echo $o)
+            stage2Init=$2
+            ;;
+        boot.persistence=*)
+            set -- $(IFS==; echo $o)
+            persistence=$2
+            ;;
+        boot.persistence.opt=*)
+            set -- $(IFS==; echo $o)
+            persistence_opt=$2
+            ;;
+        boot.trace|debugtrace)
+            # Show each command.
+            set -x
+            ;;
+        boot.shell_on_fail)
+            allowShell=1
+            ;;
+        boot.debug1|debug1) # stop right away
+            allowShell=1
+            fail
+            ;;
+        boot.debug1devices) # stop after loading modules and creating device nodes
+            allowShell=1
+            debug1devices=1
+            ;;
+        boot.debug1mounts) # stop after mounting file systems
+            allowShell=1
+            debug1mounts=1
+            ;;
+        boot.panic_on_fail|stage1panic=1)
+            panicOnFail=1
+            ;;
+        root=*)
+            # If a root device is specified on the kernel command
+            # line, make it available through the symlink /dev/root.
+            # Recognise LABEL= and UUID= to support UNetbootin.
+            set -- $(IFS==; echo $o)
+            if [ $2 = "LABEL" ]; then
+                root="/dev/disk/by-label/$3"
+            elif [ $2 = "UUID" ]; then
+                root="/dev/disk/by-uuid/$3"
+            else
+                root=$2
+            fi
+            ln -s "$root" /dev/root
+            ;;
+        copytoram)
+            copytoram=1
+            ;;
+        findiso=*)
+            # if an iso name is supplied, try to find the device where
+            # the iso resides on
+            set -- $(IFS==; echo $o)
+            isoPath=$2
+            ;;
+    esac
+done
+
+# Set hostid before modules are loaded.
+# This is needed by the spl/zfs modules.
+@setHostId@
+
+# Load the required kernel modules.
+echo @extraUtils@/bin/modprobe > /proc/sys/kernel/modprobe
+for i in @kernelModules@; do
+    info "loading module $(basename $i)..."
+    modprobe $i
+done
+
+
+# Create device nodes in /dev.
+@preDeviceCommands@
+info "running udev..."
+ln -sfn /proc/self/fd /dev/fd
+ln -sfn /proc/self/fd/0 /dev/stdin
+ln -sfn /proc/self/fd/1 /dev/stdout
+ln -sfn /proc/self/fd/2 /dev/stderr
+mkdir -p /etc/systemd
+ln -sfn @linkUnits@ /etc/systemd/network
+mkdir -p /etc/udev
+ln -sfn @udevRules@ /etc/udev/rules.d
+mkdir -p /dev/.mdadm
+systemd-udevd --daemon
+udevadm trigger --action=add
+udevadm settle
+
+
+# XXX: Use case usb->lvm will still fail, usb->luks->lvm is covered
+@preLVMCommands@
+
+info "starting device mapper and LVM..."
+lvm vgchange -ay
+
+if test -n "$debug1devices"; then fail; fi
+
+
+@postDeviceCommands@
+
+
+# Check the specified file system, if appropriate.
+checkFS() {
+    local device="$1"
+    local fsType="$2"
+
+    # Only check block devices.
+    if [ ! -b "$device" ]; then return 0; fi
+
+    # Don't check ROM filesystems.
+    if [ "$fsType" = iso9660 -o "$fsType" = udf ]; then return 0; fi
+
+    # Don't check resilient COWs as they validate the fs structures at mount time
+    if [ "$fsType" = btrfs -o "$fsType" = zfs -o "$fsType" = bcachefs ]; then return 0; fi
+
+    # Skip fsck for apfs as the fsck utility does not support repairing the filesystem (no -a option)
+    if [ "$fsType" = apfs ]; then return 0; fi
+
+    # Skip fsck for nilfs2 - not needed by design and no fsck tool for this filesystem.
+    if [ "$fsType" = nilfs2 ]; then return 0; fi
+
+    # Skip fsck for inherently readonly filesystems.
+    if [ "$fsType" = squashfs ]; then return 0; fi
+
+    # Skip fsck.erofs because it is still experimental.
+    if [ "$fsType" = erofs ]; then return 0; fi
+
+    # If we couldn't figure out the FS type, then skip fsck.
+    if [ "$fsType" = auto ]; then
+        echo 'cannot check filesystem with type "auto"!'
+        return 0
+    fi
+
+    # Device might be already mounted manually
+    # e.g. NBD-device or the host filesystem of the file which contains encrypted root fs
+    if mount | grep -q "^$device on "; then
+        echo "skip checking already mounted $device"
+        return 0
+    fi
+
+    # Optionally, skip fsck on journaling filesystems.  This option is
+    # a hack - it's mostly because e2fsck on ext3 takes much longer to
+    # recover the journal than the ext3 implementation in the kernel
+    # does (minutes versus seconds).
+    if test -z "@checkJournalingFS@" -a \
+        \( "$fsType" = ext3 -o "$fsType" = ext4 -o "$fsType" = reiserfs \
+        -o "$fsType" = xfs -o "$fsType" = jfs -o "$fsType" = f2fs \)
+    then
+        return 0
+    fi
+
+    echo "checking $device..."
+
+    fsck -V -a "$device"
+    fsckResult=$?
+
+    if test $(($fsckResult | 2)) = $fsckResult; then
+        echo "fsck finished, rebooting..."
+        sleep 3
+        reboot -f
+    fi
+
+    if test $(($fsckResult | 4)) = $fsckResult; then
+        echo "$device has unrepaired errors, please fix them manually."
+        fail
+    fi
+
+    if test $fsckResult -ge 8; then
+        echo "fsck on $device failed."
+        fail
+    fi
+
+    return 0
+}
+
+escapeFstab() {
+    local original="$1"
+
+    # Replace space
+    local escaped="${original// /\\040}"
+    # Replace tab
+    echo "${escaped//$'\t'/\\011}"
+}
+
+# Function for mounting a file system.
+mountFS() {
+    local device="$1"
+    local mountPoint="$2"
+    local options="$3"
+    local fsType="$4"
+
+    if [ "$fsType" = auto ]; then
+        fsType=$(blkid -o value -s TYPE "$device")
+        if [ -z "$fsType" ]; then fsType=auto; fi
+    fi
+
+    # Filter out x- options, which busybox doesn't do yet.
+    local optionsFiltered="$(IFS=,; for i in $options; do if [ "${i:0:2}" != "x-" ]; then echo -n $i,; fi; done)"
+    # Prefix (lower|upper|work)dir with /mnt-root (overlayfs)
+    local optionsPrefixed="$( echo "$optionsFiltered" | sed -E 's#\<(lowerdir|upperdir|workdir)=#\1=/mnt-root#g' )"
+
+    echo "$device /mnt-root$mountPoint $fsType $optionsPrefixed" >> /etc/fstab
+
+    checkFS "$device" "$fsType"
+
+    # Create backing directories for overlayfs
+    if [ "$fsType" = overlay ]; then
+        for i in upper work; do
+             dir="$( echo "$optionsPrefixed" | grep -o "${i}dir=[^,]*" )"
+             mkdir -m 0700 -p "${dir##*=}"
+        done
+    fi
+
+    info "mounting $device on $mountPoint..."
+
+    makeMountPoint "$device" "$mountPoint" "$optionsPrefixed"
+
+    # For ZFS and CIFS mounts, retry a few times before giving up.
+    # We do this for ZFS as a workaround for issue NixOS/nixpkgs#25383.
+    local n=0
+    while true; do
+        mount "/mnt-root$mountPoint" && break
+        if [ \( "$fsType" != cifs -a "$fsType" != zfs \) -o "$n" -ge 10 ]; then fail; break; fi
+        echo "retrying..."
+        sleep 1
+        n=$((n + 1))
+    done
+
+    # For bind mounts, busybox has a tendency to ignore options, which can be a
+    # security issue (e.g. "nosuid"). Remounting the partition seems to fix the
+    # issue.
+    mount "/mnt-root$mountPoint" -o "remount,$optionsPrefixed"
+
+    [ "$mountPoint" == "/" ] &&
+        [ -f "/mnt-root/etc/NIXOS_LUSTRATE" ] &&
+        lustrateRoot "/mnt-root"
+
+    true
+}
+
+lustrateRoot () {
+    local root="$1"
+
+    echo
+    echo -e "\e[1;33m<<< @distroName@ is now lustrating the root filesystem (cruft goes to /old-root) >>>\e[0m"
+    echo
+
+    mkdir -m 0755 -p "$root/old-root.tmp"
+
+    echo
+    echo "Moving impurities out of the way:"
+    for d in "$root"/*
+    do
+        [ "$d" == "$root/nix"          ] && continue
+        [ "$d" == "$root/boot"         ] && continue # Don't render the system unbootable
+        [ "$d" == "$root/old-root.tmp" ] && continue
+
+        mv -v "$d" "$root/old-root.tmp"
+    done
+
+    # Use .tmp to make sure subsequent invocations don't clash
+    mv -v "$root/old-root.tmp" "$root/old-root"
+
+    mkdir -m 0755 -p "$root/etc"
+    touch "$root/etc/NIXOS"
+
+    exec 4< "$root/old-root/etc/NIXOS_LUSTRATE"
+
+    echo
+    echo "Restoring selected impurities:"
+    while read -u 4 keeper; do
+        dirname="$(dirname "$keeper")"
+        mkdir -m 0755 -p "$root/$dirname"
+        cp -av "$root/old-root/$keeper" "$root/$keeper"
+    done
+
+    exec 4>&-
+}
+
+
+
+if test -e /sys/power/resume -a -e /sys/power/disk; then
+    if test -n "@resumeDevice@" && waitDevice "@resumeDevice@"; then
+        resumeDev="@resumeDevice@"
+        resumeInfo="$(udevadm info -q property "$resumeDev" )"
+    else
+        for sd in @resumeDevices@; do
+            # Try to detect resume device. According to Ubuntu bug:
+            # https://bugs.launchpad.net/ubuntu/+source/pm-utils/+bug/923326/comments/1
+            # when there are multiple swap devices, we can't know where the hibernate
+            # image will reside. We can check all of them for swsuspend blkid.
+            if waitDevice "$sd"; then
+                resumeInfo="$(udevadm info -q property "$sd")"
+                if [ "$(echo "$resumeInfo" | sed -n 's/^ID_FS_TYPE=//p')" = "swsuspend" ]; then
+                    resumeDev="$sd"
+                    break
+                fi
+            fi
+        done
+    fi
+    if test -n "$resumeDev"; then
+        resumeMajor="$(echo "$resumeInfo" | sed -n 's/^MAJOR=//p')"
+        resumeMinor="$(echo "$resumeInfo" | sed -n 's/^MINOR=//p')"
+        echo "$resumeMajor:$resumeMinor" > /sys/power/resume 2> /dev/null || echo "failed to resume..."
+    fi
+fi
+
+@postResumeCommands@
+
+# If we have a path to an iso file, find the iso and link it to /dev/root
+if [ -n "$isoPath" ]; then
+  mkdir -p /findiso
+
+  for delay in 5 10; do
+    blkid | while read -r line; do
+      device=$(echo "$line" | sed 's/:.*//')
+      type=$(echo "$line" | sed 's/.*TYPE="\([^"]*\)".*/\1/')
+
+      mount -t "$type" "$device" /findiso
+      if [ -e "/findiso$isoPath" ]; then
+        ln -sf "/findiso$isoPath" /dev/root
+        break 2
+      else
+        umount /findiso
+      fi
+    done
+
+    sleep "$delay"
+  done
+fi
+
+# Try to find and mount the root device.
+mkdir -p $targetRoot
+
+exec 3< @fsInfo@
+
+while read -u 3 mountPoint; do
+    read -u 3 device
+    read -u 3 fsType
+    read -u 3 options
+
+    # !!! Really quick hack to support bind mounts, i.e., where the
+    # "device" should be taken relative to /mnt-root, not /.  Assume
+    # that every device that starts with / but doesn't start with /dev
+    # is a bind mount.
+    pseudoDevice=
+    case $device in
+        /dev/*)
+            ;;
+        //*)
+            # Don't touch SMB/CIFS paths.
+            pseudoDevice=1
+            ;;
+        /*)
+            device=/mnt-root$device
+            ;;
+        *)
+            # Not an absolute path; assume that it's a pseudo-device
+            # like an NFS path (e.g. "server:/path").
+            pseudoDevice=1
+            ;;
+    esac
+
+    if test -z "$pseudoDevice" && ! waitDevice "$device"; then
+        # If it doesn't appear, try to mount it anyway (and
+        # probably fail).  This is a fallback for non-device "devices"
+        # that we don't properly recognise.
+        echo "Timed out waiting for device $device, trying to mount anyway."
+    fi
+
+    # Wait once more for the udev queue to empty, just in case it's
+    # doing something with $device right now.
+    udevadm settle
+
+    # If copytoram is enabled: skip mounting the ISO and copy its content to a tmpfs.
+    if [ -n "$copytoram" ] && [ "$device" = /dev/root ] && [ "$mountPoint" = /iso ]; then
+      fsType=$(blkid -o value -s TYPE "$device")
+      fsSize=$(blockdev --getsize64 "$device" || stat -Lc '%s' "$device")
+
+      mkdir -p /tmp-iso
+      mount -t "$fsType" /dev/root /tmp-iso
+      mountFS tmpfs /iso size="$fsSize" tmpfs
+
+      cp -r /tmp-iso/* /mnt-root/iso/
+
+      umount /tmp-iso
+      rmdir /tmp-iso
+      if [ -n "$isoPath" ] && [ $fsType = "iso9660" ] && mountpoint -q /findiso; then
+       umount /findiso
+      fi
+      continue
+    fi
+
+    if [ "$mountPoint" = / ] && [ "$device" = tmpfs ] && [ ! -z "$persistence" ]; then
+        echo persistence...
+        waitDevice "$persistence"
+        echo enabling persistence...
+        mountFS "$persistence" "$mountPoint" "$persistence_opt" "auto"
+        continue
+    fi
+
+    mountFS "$device" "$(escapeFstab "$mountPoint")" "$(escapeFstab "$options")" "$fsType"
+done
+
+exec 3>&-
+
+
+@postMountCommands@
+
+
+# Emit a udev rule for /dev/root to prevent systemd from complaining.
+if [ -e /mnt-root/iso ]; then
+    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=/mnt-root/iso)
+else
+    eval $(udevadm info --export --export-prefix=ROOT_ --device-id-of-file=$targetRoot)
+fi
+if [ "$ROOT_MAJOR" -a "$ROOT_MINOR" -a "$ROOT_MAJOR" != 0 ]; then
+    mkdir -p /run/udev/rules.d
+    echo 'ACTION=="add|change", SUBSYSTEM=="block", ENV{MAJOR}=="'$ROOT_MAJOR'", ENV{MINOR}=="'$ROOT_MINOR'", SYMLINK+="root"' > /run/udev/rules.d/61-dev-root-link.rules
+fi
+
+
+# Stop udevd.
+udevadm control --exit
+
+# Reset the logging file descriptors.
+# Do this just before pkill, which will kill the tee process.
+exec 1>&$logOutFd 2>&$logErrFd
+eval "exec $logOutFd>&- $logErrFd>&-"
+
+# Kill any remaining processes, just to be sure we're not taking any
+# with us into stage 2. But keep storage daemons like unionfs-fuse.
+#
+# Storage daemons are distinguished by an @ in front of their command line:
+# https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/
+for pid in $(pgrep -v -f '^@'); do
+    # Make sure we don't kill kernel processes, see #15226 and:
+    # http://stackoverflow.com/questions/12213445/identifying-kernel-threads
+    readlink "/proc/$pid/exe" &> /dev/null || continue
+    # Try to avoid killing ourselves.
+    [ $pid -eq $$ ] && continue
+    kill -9 "$pid"
+done
+
+if test -n "$debug1mounts"; then fail; fi
+
+
+# Restore /proc/sys/kernel/modprobe to its original value.
+echo /sbin/modprobe > /proc/sys/kernel/modprobe
+
+
+# Start stage 2.  `switch_root' deletes all files in the ramfs on the
+# current root.  The path has to be valid in the chroot not outside.
+if [ ! -e "$targetRoot/$stage2Init" ]; then
+    stage2Check=${stage2Init}
+    while [ "$stage2Check" != "${stage2Check%/*}" ] && [ ! -L "$targetRoot/$stage2Check" ]; do
+        stage2Check=${stage2Check%/*}
+    done
+    if [ ! -L "$targetRoot/$stage2Check" ]; then
+        echo "stage 2 init script ($targetRoot/$stage2Init) not found"
+        fail
+    fi
+fi
+
+mkdir -m 0755 -p $targetRoot/proc $targetRoot/sys $targetRoot/dev $targetRoot/run
+
+mount --move /proc $targetRoot/proc
+mount --move /sys $targetRoot/sys
+mount --move /dev $targetRoot/dev
+mount --move /run $targetRoot/run
+
+exec env -i $(type -P switch_root) "$targetRoot" "$stage2Init"
+
+fail # should never be reached
diff --git a/nixpkgs/nixos/modules/system/boot/stage-1.nix b/nixpkgs/nixos/modules/system/boot/stage-1.nix
new file mode 100644
index 000000000000..e990aeea7a14
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/stage-1.nix
@@ -0,0 +1,735 @@
+# This module builds the initial ramdisk, which contains an init
+# script that performs the first stage of booting the system: it loads
+# the modules necessary to mount the root file system, then calls the
+# init in the root file system to start the second boot stage.
+
+{ config, lib, utils, pkgs, ... }:
+
+with lib;
+
+let
+
+  udev = config.systemd.package;
+
+  kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
+
+  modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
+  firmware = config.hardware.firmware;
+
+
+  # Determine the set of modules that we need to mount the root FS.
+  modulesClosure = pkgs.makeModulesClosure {
+    rootModules = config.boot.initrd.availableKernelModules ++ config.boot.initrd.kernelModules;
+    kernel = modulesTree;
+    firmware = firmware;
+    allowMissing = false;
+  };
+
+
+  # The initrd only has to mount `/` or any FS marked as necessary for
+  # booting (such as the FS containing `/nix/store`, or an FS needed for
+  # mounting `/`, like `/` on a loopback).
+  fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
+
+  # Determine whether zfs-mount(8) is needed.
+  zfsRequiresMountHelper = any (fs: lib.elem "zfsutil" fs.options) fileSystems;
+
+  # A utility for enumerating the shared-library dependencies of a program
+  findLibs = pkgs.buildPackages.writeShellScriptBin "find-libs" ''
+    set -euo pipefail
+
+    declare -A seen
+    left=()
+
+    patchelf="${pkgs.buildPackages.patchelf}/bin/patchelf"
+
+    function add_needed {
+      rpath="$($patchelf --print-rpath $1)"
+      dir="$(dirname $1)"
+      for lib in $($patchelf --print-needed $1); do
+        left+=("$lib" "$rpath" "$dir")
+      done
+    }
+
+    add_needed "$1"
+
+    while [ ''${#left[@]} -ne 0 ]; do
+      next=''${left[0]}
+      rpath=''${left[1]}
+      ORIGIN=''${left[2]}
+      left=("''${left[@]:3}")
+      if [ -z ''${seen[$next]+x} ]; then
+        seen[$next]=1
+
+        # Ignore the dynamic linker which for some reason appears as a DT_NEEDED of glibc but isn't in glibc's RPATH.
+        case "$next" in
+          ld*.so.?) continue;;
+        esac
+
+        IFS=: read -ra paths <<< $rpath
+        res=
+        for path in "''${paths[@]}"; do
+          path=$(eval "echo $path")
+          if [ -f "$path/$next" ]; then
+              res="$path/$next"
+              echo "$res"
+              add_needed "$res"
+              break
+          fi
+        done
+        if [ -z "$res" ]; then
+          echo "Couldn't satisfy dependency $next" >&2
+          exit 1
+        fi
+      fi
+    done
+  '';
+
+  # Some additional utilities needed in stage 1, like mount, lvm, fsck
+  # etc.  We don't want to bring in all of those packages, so we just
+  # copy what we need.  Instead of using statically linked binaries,
+  # we just copy what we need from Glibc and use patchelf to make it
+  # work.
+  extraUtils = pkgs.runCommand "extra-utils"
+    { nativeBuildInputs = with pkgs.buildPackages; [ nukeReferences bintools ];
+      allowedReferences = [ "out" ]; # prevent accidents like glibc being included in the initrd
+    }
+    ''
+      set +o pipefail
+
+      mkdir -p $out/bin $out/lib
+      ln -s $out/bin $out/sbin
+
+      copy_bin_and_libs () {
+        [ -f "$out/bin/$(basename $1)" ] && rm "$out/bin/$(basename $1)"
+        cp -pdv $1 $out/bin
+      }
+
+      # Copy BusyBox.
+      for BIN in ${pkgs.busybox}/{s,}bin/*; do
+        copy_bin_and_libs $BIN
+      done
+
+      ${optionalString zfsRequiresMountHelper ''
+        # Filesystems using the "zfsutil" option are mounted regardless of the
+        # mount.zfs(8) helper, but it is required to ensure that ZFS properties
+        # are used as mount options.
+        #
+        # BusyBox does not use the ZFS helper in the first place.
+        # util-linux searches /sbin/ as last path for helpers (stage-1-init.sh
+        # must symlink it to the store PATH).
+        # Without helper program, both `mount`s silently fails back to internal
+        # code, using default options and effectively ignore security relevant
+        # ZFS properties such as `setuid=off` and `exec=off` (unless manually
+        # duplicated in `fileSystems.*.options`, defeating "zfsutil"'s purpose).
+        copy_bin_and_libs ${lib.getOutput "mount" pkgs.util-linux}/bin/mount
+        copy_bin_and_libs ${config.boot.zfs.package}/bin/mount.zfs
+      ''}
+
+      # Copy some util-linux stuff.
+      copy_bin_and_libs ${pkgs.util-linux}/sbin/blkid
+
+      # Copy dmsetup and lvm.
+      copy_bin_and_libs ${getBin pkgs.lvm2}/bin/dmsetup
+      copy_bin_and_libs ${getBin pkgs.lvm2}/bin/lvm
+
+      # Copy udev.
+      copy_bin_and_libs ${udev}/bin/udevadm
+      copy_bin_and_libs ${udev}/lib/systemd/systemd-sysctl
+      for BIN in ${udev}/lib/udev/*_id; do
+        copy_bin_and_libs $BIN
+      done
+      # systemd-udevd is only a symlink to udevadm these days
+      ln -sf udevadm $out/bin/systemd-udevd
+
+      # Copy modprobe.
+      copy_bin_and_libs ${pkgs.kmod}/bin/kmod
+      ln -sf kmod $out/bin/modprobe
+
+      # Copy multipath.
+      ${optionalString config.services.multipath.enable ''
+        copy_bin_and_libs ${config.services.multipath.package}/bin/multipath
+        copy_bin_and_libs ${config.services.multipath.package}/bin/multipathd
+        # Copy lib/multipath manually.
+        cp -rpv ${config.services.multipath.package}/lib/multipath $out/lib
+      ''}
+
+      # Copy secrets if needed.
+      #
+      # TODO: move out to a separate script; see #85000.
+      ${optionalString (!config.boot.loader.supportsInitrdSecrets)
+          (concatStringsSep "\n" (mapAttrsToList (dest: source:
+             let source' = if source == null then dest else source; in
+               ''
+                  mkdir -p $(dirname "$out/secrets/${dest}")
+                  # Some programs (e.g. ssh) doesn't like secrets to be
+                  # symlinks, so we use `cp -L` here to match the
+                  # behaviour when secrets are natively supported.
+                  cp -Lr ${source'} "$out/secrets/${dest}"
+                ''
+          ) config.boot.initrd.secrets))
+       }
+
+      ${config.boot.initrd.extraUtilsCommands}
+
+      # Copy ld manually since it isn't detected correctly
+      cp -pv ${pkgs.stdenv.cc.libc.out}/lib/ld*.so.? $out/lib
+
+      # Copy all of the needed libraries in a consistent order so
+      # duplicates are resolved the same way.
+      find $out/bin $out/lib -type f | sort | while read BIN; do
+        echo "Copying libs for executable $BIN"
+        for LIB in $(${findLibs}/bin/find-libs $BIN); do
+          TGT="$out/lib/$(basename $LIB)"
+          if [ ! -f "$TGT" ]; then
+            SRC="$(readlink -e $LIB)"
+            cp -pdv "$SRC" "$TGT"
+          fi
+        done
+      done
+
+      # Strip binaries further than normal.
+      chmod -R u+w $out
+      stripDirs "$STRIP" "$RANLIB" "lib bin" "-s"
+
+      # Run patchelf to make the programs refer to the copied libraries.
+      find $out/bin $out/lib -type f | while read i; do
+        nuke-refs -e $out $i
+      done
+
+      find $out/bin -type f | while read i; do
+        echo "patching $i..."
+        patchelf --set-interpreter $out/lib/ld*.so.? --set-rpath $out/lib $i || true
+      done
+
+      find $out/lib -type f \! -name 'ld*.so.?' | while read i; do
+        echo "patching $i..."
+        patchelf --set-rpath $out/lib $i
+      done
+
+      if [ -z "${toString (pkgs.stdenv.hostPlatform != pkgs.stdenv.buildPlatform)}" ]; then
+      # Make sure that the patchelf'ed binaries still work.
+      echo "testing patched programs..."
+      $out/bin/ash -c 'echo hello world' | grep "hello world"
+      ${if zfsRequiresMountHelper then ''
+        $out/bin/mount -V 1>&1 | grep -q "mount from util-linux"
+        $out/bin/mount.zfs -h 2>&1 | grep -q "Usage: mount.zfs"
+      '' else ''
+        $out/bin/mount --help 2>&1 | grep -q "BusyBox"
+      ''}
+      $out/bin/blkid -V 2>&1 | grep -q 'libblkid'
+      $out/bin/udevadm --version
+      $out/bin/dmsetup --version 2>&1 | tee -a log | grep -q "version:"
+      LVM_SYSTEM_DIR=$out $out/bin/lvm version 2>&1 | tee -a log | grep -q "LVM"
+      ${optionalString config.services.multipath.enable ''
+        ($out/bin/multipath || true) 2>&1 | grep -q 'need to be root'
+        ($out/bin/multipathd || true) 2>&1 | grep -q 'need to be root'
+      ''}
+
+      ${config.boot.initrd.extraUtilsCommandsTest}
+      fi
+    ''; # */
+
+
+  # Networkd link files are used early by udev to set up interfaces early.
+  # This must be done in stage 1 to avoid race conditions between udev and
+  # network daemons.
+  linkUnits = pkgs.runCommand "link-units" {
+      allowedReferences = [ extraUtils ];
+      preferLocalBuild = true;
+    } (''
+      mkdir -p $out
+      cp -v ${udev}/lib/systemd/network/*.link $out/
+      '' + (
+      let
+        links = filterAttrs (n: v: hasSuffix ".link" n) config.systemd.network.units;
+        files = mapAttrsToList (n: v: "${v.unit}/${n}") links;
+      in
+        concatMapStringsSep "\n" (file: "cp -v ${file} $out/") files
+      ));
+
+  udevRules = pkgs.runCommand "udev-rules" {
+      allowedReferences = [ extraUtils ];
+      preferLocalBuild = true;
+    } ''
+      mkdir -p $out
+
+      cp -v ${udev}/lib/udev/rules.d/60-cdrom_id.rules $out/
+      cp -v ${udev}/lib/udev/rules.d/60-persistent-storage.rules $out/
+      cp -v ${udev}/lib/udev/rules.d/75-net-description.rules $out/
+      cp -v ${udev}/lib/udev/rules.d/80-drivers.rules $out/
+      cp -v ${udev}/lib/udev/rules.d/80-net-setup-link.rules $out/
+      cp -v ${pkgs.lvm2}/lib/udev/rules.d/*.rules $out/
+      ${config.boot.initrd.extraUdevRulesCommands}
+
+      for i in $out/*.rules; do
+          substituteInPlace $i \
+            --replace ata_id ${extraUtils}/bin/ata_id \
+            --replace scsi_id ${extraUtils}/bin/scsi_id \
+            --replace cdrom_id ${extraUtils}/bin/cdrom_id \
+            --replace ${pkgs.coreutils}/bin/basename ${extraUtils}/bin/basename \
+            --replace ${pkgs.util-linux}/bin/blkid ${extraUtils}/bin/blkid \
+            --replace ${getBin pkgs.lvm2}/bin ${extraUtils}/bin \
+            --replace ${pkgs.mdadm}/sbin ${extraUtils}/sbin \
+            --replace ${pkgs.bash}/bin/sh ${extraUtils}/bin/sh \
+            --replace ${udev} ${extraUtils}
+      done
+
+      # Work around a bug in QEMU, which doesn't implement the "READ
+      # DISC INFORMATION" SCSI command:
+      #   https://bugzilla.redhat.com/show_bug.cgi?id=609049
+      # As a result, `cdrom_id' doesn't print
+      # ID_CDROM_MEDIA_TRACK_COUNT_DATA, which in turn prevents the
+      # /dev/disk/by-label symlinks from being created.  We need these
+      # in the NixOS installation CD, so use ID_CDROM_MEDIA in the
+      # corresponding udev rules for now.  This was the behaviour in
+      # udev <= 154.  See also
+      #   https://www.spinics.net/lists/hotplug/msg03935.html
+      substituteInPlace $out/60-persistent-storage.rules \
+        --replace ID_CDROM_MEDIA_TRACK_COUNT_DATA ID_CDROM_MEDIA
+    ''; # */
+
+
+  # The init script of boot stage 1 (loading kernel modules for
+  # mounting the root FS).
+  bootStage1 = pkgs.substituteAll {
+    src = ./stage-1-init.sh;
+
+    shell = "${extraUtils}/bin/ash";
+
+    isExecutable = true;
+
+    postInstall = ''
+      echo checking syntax
+      # check both with bash
+      ${pkgs.buildPackages.bash}/bin/sh -n $target
+      # and with ash shell, just in case
+      ${pkgs.buildPackages.busybox}/bin/ash -n $target
+    '';
+
+    inherit linkUnits udevRules extraUtils;
+
+    inherit (config.boot) resumeDevice;
+
+    inherit (config.system.nixos) distroName;
+
+    inherit (config.system.build) earlyMountScript;
+
+    inherit (config.boot.initrd) checkJournalingFS verbose
+      preLVMCommands preDeviceCommands postDeviceCommands postResumeCommands postMountCommands preFailCommands kernelModules;
+
+    resumeDevices = map (sd: if sd ? device then sd.device else "/dev/disk/by-label/${sd.label}")
+                    (filter (sd: hasPrefix "/dev/" sd.device && !sd.randomEncryption.enable
+                             # Don't include zram devices
+                             && !(hasPrefix "/dev/zram" sd.device)
+                            ) config.swapDevices);
+
+    fsInfo =
+      let f = fs: [ fs.mountPoint (if fs.device != null then fs.device else "/dev/disk/by-label/${fs.label}") fs.fsType (builtins.concatStringsSep "," fs.options) ];
+      in pkgs.writeText "initrd-fsinfo" (concatStringsSep "\n" (concatMap f fileSystems));
+
+    setHostId = optionalString (config.networking.hostId != null) ''
+      hi="${config.networking.hostId}"
+      ${if pkgs.stdenv.isBigEndian then ''
+        echo -ne "\x''${hi:0:2}\x''${hi:2:2}\x''${hi:4:2}\x''${hi:6:2}" > /etc/hostid
+      '' else ''
+        echo -ne "\x''${hi:6:2}\x''${hi:4:2}\x''${hi:2:2}\x''${hi:0:2}" > /etc/hostid
+      ''}
+    '';
+  };
+
+
+  # The closure of the init script of boot stage 1 is what we put in
+  # the initial RAM disk.
+  initialRamdisk = pkgs.makeInitrd {
+    name = "initrd-${kernel-name}";
+    inherit (config.boot.initrd) compressor compressorArgs prepend;
+
+    contents =
+      [ { object = bootStage1;
+          symlink = "/init";
+        }
+        { object = "${modulesClosure}/lib";
+          symlink = "/lib";
+        }
+        { object = pkgs.runCommand "initrd-kmod-blacklist-ubuntu" {
+              src = "${pkgs.kmod-blacklist-ubuntu}/modprobe.conf";
+              preferLocalBuild = true;
+            } ''
+              target=$out
+              ${pkgs.buildPackages.perl}/bin/perl -0pe 's/## file: iwlwifi.conf(.+?)##/##/s;' $src > $out
+            '';
+          symlink = "/etc/modprobe.d/ubuntu.conf";
+        }
+        { object = config.environment.etc."modprobe.d/nixos.conf".source;
+          symlink = "/etc/modprobe.d/nixos.conf";
+        }
+        { object = pkgs.kmod-debian-aliases;
+          symlink = "/etc/modprobe.d/debian.conf";
+        }
+      ] ++ lib.optionals config.services.multipath.enable [
+        { object = pkgs.runCommand "multipath.conf" {
+              src = config.environment.etc."multipath.conf".text;
+              preferLocalBuild = true;
+            } ''
+              target=$out
+              printf "$src" > $out
+              substituteInPlace $out \
+                --replace ${config.services.multipath.package}/lib ${extraUtils}/lib
+            '';
+          symlink = "/etc/multipath.conf";
+        }
+      ] ++ (lib.mapAttrsToList
+        (symlink: options:
+          {
+            inherit symlink;
+            object = options.source;
+          }
+        )
+        config.boot.initrd.extraFiles);
+  };
+
+  # Script to add secret files to the initrd at bootloader update time
+  initialRamdiskSecretAppender =
+    let
+      compressorExe = initialRamdisk.compressorExecutableFunction pkgs;
+    in pkgs.writeScriptBin "append-initrd-secrets"
+      ''
+        #!${pkgs.bash}/bin/bash -e
+        function usage {
+          echo "USAGE: $0 INITRD_FILE" >&2
+          echo "Appends this configuration's secrets to INITRD_FILE" >&2
+        }
+
+        if [ $# -ne 1 ]; then
+          usage
+          exit 1
+        fi
+
+        if [ "$1"x = "--helpx" ]; then
+          usage
+          exit 0
+        fi
+
+        ${lib.optionalString (config.boot.initrd.secrets == {})
+            "exit 0"}
+
+        export PATH=${pkgs.coreutils}/bin:${pkgs.libarchive}/bin:${pkgs.gzip}/bin:${pkgs.findutils}/bin
+
+        function cleanup {
+          if [ -n "$tmp" -a -d "$tmp" ]; then
+            rm -fR "$tmp"
+          fi
+        }
+        trap cleanup EXIT
+
+        tmp=$(mktemp -d ''${TMPDIR:-/tmp}/initrd-secrets.XXXXXXXXXX)
+
+        ${lib.concatStringsSep "\n" (mapAttrsToList (dest: source:
+            let source' = if source == null then dest else toString source; in
+              ''
+                mkdir -p $(dirname "$tmp/.initrd-secrets/${dest}")
+                cp -a ${source'} "$tmp/.initrd-secrets/${dest}"
+              ''
+          ) config.boot.initrd.secrets)
+         }
+
+        # mindepth 1 so that we don't change the mode of /
+        (cd "$tmp" && find . -mindepth 1 | xargs touch -amt 197001010000 && find . -mindepth 1 -print0 | sort -z | bsdtar --uid 0 --gid 0 -cnf - -T - | bsdtar --null -cf - --format=newc @-) | \
+          ${compressorExe} ${lib.escapeShellArgs initialRamdisk.compressorArgs} >> "$1"
+      '';
+
+in
+
+{
+  options = {
+
+    boot.resumeDevice = mkOption {
+      type = types.str;
+      default = "";
+      example = "/dev/sda3";
+      description = lib.mdDoc ''
+        Device for manual resume attempt during boot. This should be used primarily
+        if you want to resume from file. If left empty, the swap partitions are used.
+        Specify here the device where the file resides.
+        You should also use {var}`boot.kernelParams` to specify
+        `«resume_offset»`.
+      '';
+    };
+
+    boot.initrd.enable = mkOption {
+      type = types.bool;
+      default = !config.boot.isContainer;
+      defaultText = literalExpression "!config.boot.isContainer";
+      description = lib.mdDoc ''
+        Whether to enable the NixOS initial RAM disk (initrd). This may be
+        needed to perform some initialisation tasks (like mounting
+        network/encrypted file systems) before continuing the boot process.
+      '';
+    };
+
+    boot.initrd.extraFiles = mkOption {
+      default = { };
+      type = types.attrsOf
+        (types.submodule {
+          options = {
+            source = mkOption {
+              type = types.package;
+              description = lib.mdDoc "The object to make available inside the initrd.";
+            };
+          };
+        });
+      description = lib.mdDoc ''
+        Extra files to link and copy in to the initrd.
+      '';
+    };
+
+    boot.initrd.prepend = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        Other initrd files to prepend to the final initrd we are building.
+      '';
+    };
+
+    boot.initrd.checkJournalingFS = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to run {command}`fsck` on journaling filesystems such as ext3.
+      '';
+    };
+
+    boot.initrd.preLVMCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed immediately before LVM discovery.
+      '';
+    };
+
+    boot.initrd.preDeviceCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed before udev is started to create
+        device nodes.
+      '';
+    };
+
+    boot.initrd.postDeviceCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed immediately after stage 1 of the
+        boot has loaded kernel modules and created device nodes in
+        {file}`/dev`.
+      '';
+    };
+
+    boot.initrd.postResumeCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed immediately after attempting to resume.
+      '';
+    };
+
+    boot.initrd.postMountCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed immediately after the stage 1
+        filesystems have been mounted.
+      '';
+    };
+
+    boot.initrd.preFailCommands = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed before the failure prompt is shown.
+      '';
+    };
+
+    boot.initrd.extraUtilsCommands = mkOption {
+      internal = true;
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed in the builder of the
+        extra-utils derivation.  This can be used to provide
+        additional utilities in the initial ramdisk.
+      '';
+    };
+
+    boot.initrd.extraUtilsCommandsTest = mkOption {
+      internal = true;
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed in the builder of the
+        extra-utils derivation after patchelf has done its
+        job.  This can be used to test additional utilities
+        copied in extraUtilsCommands.
+      '';
+    };
+
+    boot.initrd.extraUdevRulesCommands = mkOption {
+      internal = true;
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Shell commands to be executed in the builder of the
+        udev-rules derivation.  This can be used to add
+        additional udev rules in the initial ramdisk.
+      '';
+    };
+
+    boot.initrd.compressor = mkOption {
+      default = (
+        if lib.versionAtLeast config.boot.kernelPackages.kernel.version "5.9"
+        then "zstd"
+        else "gzip"
+      );
+      defaultText = literalMD "`zstd` if the kernel supports it (5.9+), `gzip` if not";
+      type = types.either types.str (types.functionTo types.str);
+      description = lib.mdDoc ''
+        The compressor to use on the initrd image. May be any of:
+
+        - The name of one of the predefined compressors, see {file}`pkgs/build-support/kernel/initrd-compressor-meta.nix` for the definitions.
+        - A function which, given the nixpkgs package set, returns the path to a compressor tool, e.g. `pkgs: "''${pkgs.pigz}/bin/pigz"`
+        - (not recommended, because it does not work when cross-compiling) the full path to a compressor tool, e.g. `"''${pkgs.pigz}/bin/pigz"`
+
+        The given program should read data from stdin and write it to stdout compressed.
+      '';
+      example = "xz";
+    };
+
+    boot.initrd.compressorArgs = mkOption {
+      default = null;
+      type = types.nullOr (types.listOf types.str);
+      description = lib.mdDoc "Arguments to pass to the compressor for the initrd image, or null to use the compressor's defaults.";
+    };
+
+    boot.initrd.secrets = mkOption
+      { default = {};
+        type = types.attrsOf (types.nullOr types.path);
+        description =
+          lib.mdDoc ''
+            Secrets to append to the initrd. The attribute name is the
+            path the secret should have inside the initrd, the value
+            is the path it should be copied from (or null for the same
+            path inside and out).
+          '';
+        example = literalExpression
+          ''
+            { "/etc/dropbear/dropbear_rsa_host_key" =
+                ./secret-dropbear-key;
+            }
+          '';
+      };
+
+    boot.initrd.supportedFilesystems = mkOption {
+      default = [ ];
+      example = [ "btrfs" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc "Names of supported filesystem types in the initial ramdisk.";
+    };
+
+    boot.initrd.verbose = mkOption {
+      default = true;
+      type = types.bool;
+      description =
+        lib.mdDoc ''
+          Verbosity of the initrd. Please note that disabling verbosity removes
+          only the mandatory messages generated by the NixOS scripts. For a
+          completely silent boot, you might also want to set the two following
+          configuration options:
+
+          - `boot.consoleLogLevel = 0;`
+          - `boot.kernelParams = [ "quiet" "udev.log_level=3" ];`
+        '';
+    };
+
+    boot.loader.supportsInitrdSecrets = mkOption
+      { internal = true;
+        default = false;
+        type = types.bool;
+        description =
+          lib.mdDoc ''
+            Whether the bootloader setup runs append-initrd-secrets.
+            If not, any needed secrets must be copied into the initrd
+            and thus added to the store.
+          '';
+      };
+
+    fileSystems = mkOption {
+      type = with lib.types; attrsOf (submodule {
+        options.neededForBoot = mkOption {
+          default = false;
+          type = types.bool;
+          description = lib.mdDoc ''
+            If set, this file system will be mounted in the initial ramdisk.
+            Note that the file system will always be mounted in the initial
+            ramdisk if its mount point is one of the following:
+            ${concatStringsSep ", " (
+              forEach utils.pathsNeededForBoot (i: "{file}`${i}`")
+            )}.
+          '';
+        };
+      });
+    };
+
+  };
+
+  config = mkIf config.boot.initrd.enable {
+    assertions = [
+      { assertion = any (fs: fs.mountPoint == "/") fileSystems;
+        message = "The ‘fileSystems’ option does not specify your root file system.";
+      }
+      { assertion = let inherit (config.boot) resumeDevice; in
+          resumeDevice == "" || builtins.substring 0 1 resumeDevice == "/";
+        message = "boot.resumeDevice has to be an absolute path."
+          + " Old \"x:y\" style is no longer supported.";
+      }
+      # TODO: remove when #85000 is fixed
+      { assertion = !config.boot.loader.supportsInitrdSecrets ->
+          all (source:
+            builtins.isPath source ||
+            (builtins.isString source && hasPrefix builtins.storeDir source))
+          (attrValues config.boot.initrd.secrets);
+        message = ''
+          boot.loader.initrd.secrets values must be unquoted paths when
+          using a bootloader that doesn't natively support initrd
+          secrets, e.g.:
+
+            boot.initrd.secrets = {
+              "/etc/secret" = /path/to/secret;
+            };
+
+          Note that this will result in all secrets being stored
+          world-readable in the Nix store!
+        '';
+      }
+    ];
+
+    system.build = mkMerge [
+      { inherit bootStage1 initialRamdiskSecretAppender extraUtils; }
+
+      # generated in nixos/modules/system/boot/systemd/initrd.nix
+      (mkIf (!config.boot.initrd.systemd.enable) { inherit initialRamdisk; })
+    ];
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "TMPFS")
+      (isYes "BLK_DEV_INITRD")
+    ];
+
+    boot.initrd.supportedFilesystems = map (fs: fs.fsType) fileSystems;
+  };
+
+  imports = [
+    (mkRenamedOptionModule [ "boot" "initrd" "mdadmConf" ] [ "boot" "swraid" "mdadmConf" ])
+  ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/stage-2-init.sh b/nixpkgs/nixos/modules/system/boot/stage-2-init.sh
new file mode 100755
index 000000000000..a89e3d817637
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/stage-2-init.sh
@@ -0,0 +1,147 @@
+#! @shell@
+
+systemConfig=@systemConfig@
+
+export HOME=/root PATH="@path@"
+
+
+if [ "${IN_NIXOS_SYSTEMD_STAGE1:-}" != true ]; then
+    # Process the kernel command line.
+    for o in $(</proc/cmdline); do
+        case $o in
+            boot.debugtrace)
+                # Show each command.
+                set -x
+                ;;
+        esac
+    done
+
+
+    # Print a greeting.
+    echo
+    echo -e "\e[1;32m<<< @distroName@ Stage 2 >>>\e[0m"
+    echo
+
+
+    # Normally, stage 1 mounts the root filesystem read/writable.
+    # However, in some environments, stage 2 is executed directly, and the
+    # root is read-only.  So make it writable here.
+    if [ -z "$container" ]; then
+        mount -n -o remount,rw none /
+    fi
+fi
+
+
+# Likewise, stage 1 mounts /proc, /dev and /sys, so if we don't have a
+# stage 1, we need to do that here.
+if [ ! -e /proc/1 ]; then
+    specialMount() {
+        local device="$1"
+        local mountPoint="$2"
+        local options="$3"
+        local fsType="$4"
+
+        # We must not overwrite this mount because it's bind-mounted
+        # from stage 1's /run
+        if [ "${IN_NIXOS_SYSTEMD_STAGE1:-}" = true ] && [ "${mountPoint}" = /run ]; then
+            return
+        fi
+
+        install -m 0755 -d "$mountPoint"
+        mount -n -t "$fsType" -o "$options" "$device" "$mountPoint"
+    }
+    source @earlyMountScript@
+fi
+
+
+if [ "${IN_NIXOS_SYSTEMD_STAGE1:-}" = true ] || [ ! -c /dev/kmsg ] ; then
+    echo "booting system configuration ${systemConfig}"
+else
+    echo "booting system configuration $systemConfig" > /dev/kmsg
+fi
+
+
+# Make /nix/store a read-only bind mount to enforce immutability of
+# the Nix store.  Note that we can't use "chown root:nixbld" here
+# because users/groups might not exist yet.
+# Silence chown/chmod to fail gracefully on a readonly filesystem
+# like squashfs.
+chown -f 0:30000 /nix/store
+chmod -f 1775 /nix/store
+if [ -n "@readOnlyNixStore@" ]; then
+    if ! [[ "$(findmnt --noheadings --output OPTIONS /nix/store)" =~ ro(,|$) ]]; then
+        if [ -z "$container" ]; then
+            mount --bind /nix/store /nix/store
+        else
+            mount --rbind /nix/store /nix/store
+        fi
+        mount -o remount,ro,bind /nix/store
+    fi
+fi
+
+
+if [ "${IN_NIXOS_SYSTEMD_STAGE1:-}" != true ]; then
+    # Use /etc/resolv.conf supplied by systemd-nspawn, if applicable.
+    if [ -n "@useHostResolvConf@" ] && [ -e /etc/resolv.conf ]; then
+        resolvconf -m 1000 -a host </etc/resolv.conf
+    fi
+
+
+    # Log the script output to /dev/kmsg or /run/log/stage-2-init.log.
+    # Only at this point are all the necessary prerequisites ready for these commands.
+    exec {logOutFd}>&1 {logErrFd}>&2
+    if test -w /dev/kmsg; then
+        exec > >(tee -i /proc/self/fd/"$logOutFd" | while read -r line; do
+            if test -n "$line"; then
+                echo "<7>stage-2-init: $line" > /dev/kmsg
+            fi
+        done) 2>&1
+    else
+        mkdir -p /run/log
+        exec > >(tee -i /run/log/stage-2-init.log) 2>&1
+    fi
+fi
+
+
+# Required by the activation script
+install -m 0755 -d /etc
+if [ ! -h "/etc/nixos" ]; then
+    install -m 0755 -d /etc/nixos
+fi
+install -m 01777 -d /tmp
+
+
+# Run the script that performs all configuration activation that does
+# not have to be done at boot time.
+echo "running activation script..."
+$systemConfig/activate
+
+
+# Record the boot configuration.
+ln -sfn "$systemConfig" /run/booted-system
+
+
+# Run any user-specified commands.
+@shell@ @postBootCommands@
+
+
+# Ensure systemd doesn't try to populate /etc, by forcing its first-boot
+# heuristic off. It doesn't matter what's in /etc/machine-id for this purpose,
+# and systemd will immediately fill in the file when it starts, so just
+# creating it is enough. This `: >>` pattern avoids forking and avoids changing
+# the mtime if the file already exists.
+: >> /etc/machine-id
+
+
+# No need to restore the stdout/stderr streams we never redirected and
+# especially no need to start systemd
+if [ "${IN_NIXOS_SYSTEMD_STAGE1:-}" != true ]; then
+    # Reset the logging file descriptors.
+    exec 1>&$logOutFd 2>&$logErrFd
+    exec {logOutFd}>&- {logErrFd}>&-
+
+
+    # Start systemd in a clean environment.
+    echo "starting systemd..."
+    exec @systemdExecutable@ "$@"
+fi
diff --git a/nixpkgs/nixos/modules/system/boot/stage-2.nix b/nixpkgs/nixos/modules/system/boot/stage-2.nix
new file mode 100644
index 000000000000..001380158d5f
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/stage-2.nix
@@ -0,0 +1,82 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  useHostResolvConf = config.networking.resolvconf.enable && config.networking.useHostResolvConf;
+
+  bootStage2 = pkgs.substituteAll {
+    src = ./stage-2-init.sh;
+    shellDebug = "${pkgs.bashInteractive}/bin/bash";
+    shell = "${pkgs.bash}/bin/bash";
+    inherit (config.boot) readOnlyNixStore systemdExecutable extraSystemdUnitPaths;
+    inherit (config.system.nixos) distroName;
+    isExecutable = true;
+    inherit useHostResolvConf;
+    inherit (config.system.build) earlyMountScript;
+    path = lib.makeBinPath ([
+      pkgs.coreutils
+      pkgs.util-linux
+    ] ++ lib.optional useHostResolvConf pkgs.openresolv);
+    postBootCommands = pkgs.writeText "local-cmds"
+      ''
+        ${config.boot.postBootCommands}
+        ${config.powerManagement.powerUpCommands}
+      '';
+  };
+
+in
+
+{
+  options = {
+
+    boot = {
+
+      postBootCommands = mkOption {
+        default = "";
+        example = "rm -f /var/log/messages";
+        type = types.lines;
+        description = lib.mdDoc ''
+          Shell commands to be executed just before systemd is started.
+        '';
+      };
+
+      readOnlyNixStore = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If set, NixOS will enforce the immutability of the Nix store
+          by making {file}`/nix/store` a read-only bind
+          mount.  Nix will automatically make the store writable when
+          needed.
+        '';
+      };
+
+      systemdExecutable = mkOption {
+        default = "/run/current-system/systemd/lib/systemd/systemd";
+        type = types.str;
+        description = lib.mdDoc ''
+          The program to execute to start systemd.
+        '';
+      };
+
+      extraSystemdUnitPaths = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          Additional paths that get appended to the SYSTEMD_UNIT_PATH environment variable
+          that can contain mutable unit files.
+        '';
+      };
+    };
+
+  };
+
+
+  config = {
+
+    system.build.bootStage2 = bootStage2;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/stratisroot.nix b/nixpkgs/nixos/modules/system/boot/stratisroot.nix
new file mode 100644
index 000000000000..241d044db2fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/stratisroot.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, utils, ... }:
+let
+  requiredStratisFilesystems = lib.attrsets.filterAttrs (_: x: utils.fsNeededForBoot x && x.stratis.poolUuid != null) config.fileSystems;
+in
+{
+  options = {};
+  config = lib.mkIf (requiredStratisFilesystems != {}) {
+    assertions = [
+      {
+        assertion = config.boot.initrd.systemd.enable;
+        message = "stratis root fs requires systemd stage 1";
+      }
+    ];
+    boot.initrd = {
+      systemd = {
+        storePaths = [
+          "${pkgs.stratisd}/lib/udev/stratis-base32-decode"
+          "${pkgs.stratisd}/lib/udev/stratis-str-cmp"
+          "${pkgs.lvm2.bin}/bin/dmsetup"
+          "${pkgs.stratisd}/libexec/stratisd-min"
+          "${pkgs.stratisd.initrd}/bin/stratis-rootfs-setup"
+        ];
+        packages = [pkgs.stratisd.initrd];
+        extraBin = {
+          thin_check = "${pkgs."thin-provisioning-tools"}/bin/thin_check";
+          thin_repair = "${pkgs."thin-provisioning-tools"}/bin/thin_repair";
+          thin_metadata_size = "${pkgs."thin-provisioning-tools"}/bin/thin_metadata_size";
+          stratis-min = "${pkgs.stratisd}/bin/stratis-min";
+        };
+        services =
+          lib.attrsets.mapAttrs' (
+            mountPoint: fileSystem: {
+              name = "stratis-setup-${fileSystem.stratis.poolUuid}";
+              value = {
+                description = "setup for Stratis root filesystem";
+                unitConfig.DefaultDependencies = "no";
+                conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
+                onFailure = [ "emergency.target" ];
+                unitConfig.OnFailureJobMode = "isolate";
+                wants = [ "stratisd-min.service" "plymouth-start.service" ];
+                wantedBy = [ "initrd.target" ];
+                after = [ "paths.target" "plymouth-start.service" "stratisd-min.service" ];
+                before = [ "initrd.target" "shutdown.target" "initrd-switch-root.target" ];
+                environment.STRATIS_ROOTFS_UUID = fileSystem.stratis.poolUuid;
+                serviceConfig = {
+                  Type = "oneshot";
+                  ExecStart = "${pkgs.stratisd.initrd}/bin/stratis-rootfs-setup";
+                  RemainAfterExit = "yes";
+                };
+              };
+            }
+          ) requiredStratisFilesystems;
+      };
+      availableKernelModules = [ "dm-thin-pool" "dm-crypt" ] ++ [ "aes" "aes_generic" "blowfish" "twofish"
+        "serpent" "cbc" "xts" "lrw" "sha1" "sha256" "sha512"
+        "af_alg" "algif_skcipher"
+      ];
+      services.udev.packages = [
+        pkgs.stratisd.initrd
+        pkgs.lvm2
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd.nix b/nixpkgs/nixos/modules/system/boot/systemd.nix
new file mode 100644
index 000000000000..68a8c1f37ed5
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd.nix
@@ -0,0 +1,662 @@
+{ config, lib, pkgs, utils, ... }:
+
+with utils;
+with systemdUtils.unitOptions;
+with lib;
+
+let
+
+  cfg = config.systemd;
+
+  inherit (systemdUtils.lib)
+    generateUnits
+    targetToUnit
+    serviceToUnit
+    socketToUnit
+    timerToUnit
+    pathToUnit
+    mountToUnit
+    automountToUnit
+    sliceToUnit;
+
+  upstreamSystemUnits =
+    [ # Targets.
+      "basic.target"
+      "sysinit.target"
+      "sockets.target"
+      "exit.target"
+      "graphical.target"
+      "multi-user.target"
+      "network.target"
+      "network-pre.target"
+      "network-online.target"
+      "nss-lookup.target"
+      "nss-user-lookup.target"
+      "time-sync.target"
+    ] ++ optionals cfg.package.withCryptsetup [
+      "cryptsetup.target"
+      "cryptsetup-pre.target"
+      "remote-cryptsetup.target"
+    ] ++ [
+      "sigpwr.target"
+      "timers.target"
+      "paths.target"
+      "rpcbind.target"
+
+      # Rescue mode.
+      "rescue.target"
+      "rescue.service"
+
+      # Udev.
+      "systemd-tmpfiles-setup-dev-early.service"
+      "systemd-udevd-control.socket"
+      "systemd-udevd-kernel.socket"
+      "systemd-udevd.service"
+      "systemd-udev-settle.service"
+      ] ++ (optional (!config.boot.isContainer) "systemd-udev-trigger.service") ++ [
+      # hwdb.bin is managed by NixOS
+      # "systemd-hwdb-update.service"
+
+      # Consoles.
+      "getty.target"
+      "getty-pre.target"
+      "getty@.service"
+      "serial-getty@.service"
+      "console-getty.service"
+      "container-getty@.service"
+      "systemd-vconsole-setup.service"
+
+      # Hardware (started by udev when a relevant device is plugged in).
+      "sound.target"
+      "bluetooth.target"
+      "printer.target"
+      "smartcard.target"
+
+      # Kernel module loading.
+      "systemd-modules-load.service"
+      "kmod-static-nodes.service"
+      "modprobe@.service"
+
+      # Filesystems.
+      "systemd-fsck@.service"
+      "systemd-fsck-root.service"
+      "systemd-growfs@.service"
+      "systemd-growfs-root.service"
+      "systemd-remount-fs.service"
+      "systemd-pstore.service"
+      "local-fs.target"
+      "local-fs-pre.target"
+      "remote-fs.target"
+      "remote-fs-pre.target"
+      "swap.target"
+      "dev-hugepages.mount"
+      "dev-mqueue.mount"
+      "sys-fs-fuse-connections.mount"
+      ] ++ (optional (!config.boot.isContainer) "sys-kernel-config.mount") ++ [
+      "sys-kernel-debug.mount"
+
+      # Maintaining state across reboots.
+      "systemd-random-seed.service"
+      "systemd-backlight@.service"
+      "systemd-rfkill.service"
+      "systemd-rfkill.socket"
+
+      # Hibernate / suspend.
+      "hibernate.target"
+      "suspend.target"
+      "suspend-then-hibernate.target"
+      "sleep.target"
+      "hybrid-sleep.target"
+      "systemd-hibernate.service"
+      "systemd-hybrid-sleep.service"
+      "systemd-suspend.service"
+      "systemd-suspend-then-hibernate.service"
+
+      # Reboot stuff.
+      "reboot.target"
+      "systemd-reboot.service"
+      "poweroff.target"
+      "systemd-poweroff.service"
+      "halt.target"
+      "systemd-halt.service"
+      "shutdown.target"
+      "umount.target"
+      "final.target"
+      "kexec.target"
+      "systemd-kexec.service"
+    ] ++ lib.optional cfg.package.withUtmp "systemd-update-utmp.service" ++ [
+
+      # Password entry.
+      "systemd-ask-password-console.path"
+      "systemd-ask-password-console.service"
+      "systemd-ask-password-wall.path"
+      "systemd-ask-password-wall.service"
+
+      # Slices / containers.
+      "slices.target"
+    ] ++ optionals cfg.package.withImportd [
+      "systemd-importd.service"
+    ] ++ optionals cfg.package.withMachined [
+      "machine.slice"
+      "machines.target"
+      "systemd-machined.service"
+    ] ++ [
+      "systemd-nspawn@.service"
+
+      # Misc.
+      "systemd-sysctl.service"
+    ] ++ optionals cfg.package.withTimedated [
+      "dbus-org.freedesktop.timedate1.service"
+      "systemd-timedated.service"
+    ] ++ optionals cfg.package.withLocaled [
+      "dbus-org.freedesktop.locale1.service"
+      "systemd-localed.service"
+    ] ++ optionals cfg.package.withHostnamed [
+      "dbus-org.freedesktop.hostname1.service"
+      "systemd-hostnamed.service"
+    ] ++ optionals cfg.package.withPortabled [
+      "dbus-org.freedesktop.portable1.service"
+      "systemd-portabled.service"
+    ] ++ [
+      "systemd-exit.service"
+      "systemd-update-done.service"
+    ] ++ cfg.additionalUpstreamSystemUnits;
+
+  upstreamSystemWants =
+    [ "sysinit.target.wants"
+      "sockets.target.wants"
+      "local-fs.target.wants"
+      "multi-user.target.wants"
+      "timers.target.wants"
+    ];
+
+  proxy_env = config.networking.proxy.envVars;
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    systemd.package = mkOption {
+      default = pkgs.systemd;
+      defaultText = literalExpression "pkgs.systemd";
+      type = types.package;
+      description = lib.mdDoc "The systemd package.";
+    };
+
+    systemd.units = mkOption {
+      description = lib.mdDoc "Definition of systemd units.";
+      default = {};
+      type = systemdUtils.types.units;
+    };
+
+    systemd.packages = mkOption {
+      default = [];
+      type = types.listOf types.package;
+      example = literalExpression "[ pkgs.systemd-cryptsetup-generator ]";
+      description = lib.mdDoc "Packages providing systemd units and hooks.";
+    };
+
+    systemd.targets = mkOption {
+      default = {};
+      type = systemdUtils.types.targets;
+      description = lib.mdDoc "Definition of systemd target units.";
+    };
+
+    systemd.services = mkOption {
+      default = {};
+      type = systemdUtils.types.services;
+      description = lib.mdDoc "Definition of systemd service units.";
+    };
+
+    systemd.sockets = mkOption {
+      default = {};
+      type = systemdUtils.types.sockets;
+      description = lib.mdDoc "Definition of systemd socket units.";
+    };
+
+    systemd.timers = mkOption {
+      default = {};
+      type = systemdUtils.types.timers;
+      description = lib.mdDoc "Definition of systemd timer units.";
+    };
+
+    systemd.paths = mkOption {
+      default = {};
+      type = systemdUtils.types.paths;
+      description = lib.mdDoc "Definition of systemd path units.";
+    };
+
+    systemd.mounts = mkOption {
+      default = [];
+      type = systemdUtils.types.mounts;
+      description = lib.mdDoc ''
+        Definition of systemd mount units.
+        This is a list instead of an attrSet, because systemd mandates the names to be derived from
+        the 'where' attribute.
+      '';
+    };
+
+    systemd.automounts = mkOption {
+      default = [];
+      type = systemdUtils.types.automounts;
+      description = lib.mdDoc ''
+        Definition of systemd automount units.
+        This is a list instead of an attrSet, because systemd mandates the names to be derived from
+        the 'where' attribute.
+      '';
+    };
+
+    systemd.slices = mkOption {
+      default = {};
+      type = systemdUtils.types.slices;
+      description = lib.mdDoc "Definition of slice configurations.";
+    };
+
+    systemd.generators = mkOption {
+      type = types.attrsOf types.path;
+      default = {};
+      example = { systemd-gpt-auto-generator = "/dev/null"; };
+      description = lib.mdDoc ''
+        Definition of systemd generators.
+        For each `NAME = VALUE` pair of the attrSet, a link is generated from
+        `/etc/systemd/system-generators/NAME` to `VALUE`.
+      '';
+    };
+
+    systemd.shutdown = mkOption {
+      type = types.attrsOf types.path;
+      default = {};
+      description = lib.mdDoc ''
+        Definition of systemd shutdown executables.
+        For each `NAME = VALUE` pair of the attrSet, a link is generated from
+        `/etc/systemd/system-shutdown/NAME` to `VALUE`.
+      '';
+    };
+
+    systemd.defaultUnit = mkOption {
+      default = "multi-user.target";
+      type = types.str;
+      description = lib.mdDoc "Default unit started when the system boots.";
+    };
+
+    systemd.ctrlAltDelUnit = mkOption {
+      default = "reboot.target";
+      type = types.str;
+      example = "poweroff.target";
+      description = lib.mdDoc ''
+        Target that should be started when Ctrl-Alt-Delete is pressed.
+      '';
+    };
+
+    systemd.globalEnvironment = mkOption {
+      type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+      default = {};
+      example = { TZ = "CET"; };
+      description = lib.mdDoc ''
+        Environment variables passed to *all* systemd units.
+      '';
+    };
+
+    systemd.managerEnvironment = mkOption {
+      type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+      default = {};
+      example = { SYSTEMD_LOG_LEVEL = "debug"; };
+      description = lib.mdDoc ''
+        Environment variables of PID 1. These variables are
+        *not* passed to started units.
+      '';
+    };
+
+    systemd.enableCgroupAccounting = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable cgroup accounting.
+      '';
+    };
+
+    systemd.enableUnifiedCgroupHierarchy = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable the unified cgroup hierarchy (cgroupsv2).
+      '';
+    };
+
+    systemd.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "DefaultLimitCORE=infinity";
+      description = lib.mdDoc ''
+        Extra config options for systemd. See systemd-system.conf(5) man page
+        for available options.
+      '';
+    };
+
+    systemd.sleep.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "HibernateDelaySec=1h";
+      description = lib.mdDoc ''
+        Extra config options for systemd sleep state logic.
+        See sleep.conf.d(5) man page for available options.
+      '';
+    };
+
+    systemd.additionalUpstreamSystemUnits = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      example = [ "debug-shell.service" "systemd-quotacheck.service" ];
+      description = lib.mdDoc ''
+        Additional units shipped with systemd that shall be enabled.
+      '';
+    };
+
+    systemd.suppressedSystemUnits = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      example = [ "systemd-backlight@.service" ];
+      description = lib.mdDoc ''
+        A list of units to skip when generating system systemd configuration directory. This has
+        priority over upstream units, {option}`systemd.units`, and
+        {option}`systemd.additionalUpstreamSystemUnits`. The main purpose of this is to
+        prevent a upstream systemd unit from being added to the initrd with any modifications made to it
+        by other NixOS modules.
+      '';
+    };
+
+    systemd.watchdog.device = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      example = "/dev/watchdog";
+      description = lib.mdDoc ''
+        The path to a hardware watchdog device which will be managed by systemd.
+        If not specified, systemd will default to /dev/watchdog.
+      '';
+    };
+
+    systemd.watchdog.runtimeTime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "30s";
+      description = lib.mdDoc ''
+        The amount of time which can elapse before a watchdog hardware device
+        will automatically reboot the system. Valid time units include "ms",
+        "s", "min", "h", "d", and "w".
+      '';
+    };
+
+    systemd.watchdog.rebootTime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10m";
+      description = lib.mdDoc ''
+        The amount of time which can elapse after a reboot has been triggered
+        before a watchdog hardware device will automatically reboot the system.
+        Valid time units include "ms", "s", "min", "h", "d", and "w". If left
+        `null`, systemd will use its default of `10min`; see also {command}`man
+        5 systemd-system.conf`.
+      '';
+    };
+
+    systemd.watchdog.kexecTime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10m";
+      description = lib.mdDoc ''
+        The amount of time which can elapse when kexec is being executed before
+        a watchdog hardware device will automatically reboot the system. This
+        option should only be enabled if reloadTime is also enabled. Valid
+        time units include "ms", "s", "min", "h", "d", and "w".
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    warnings = concatLists (
+      mapAttrsToList
+        (name: service:
+          let
+            type = service.serviceConfig.Type or "";
+            restart = service.serviceConfig.Restart or "no";
+            hasDeprecated = builtins.hasAttr "StartLimitInterval" service.serviceConfig;
+          in
+            concatLists [
+              (optional (type == "oneshot" && (restart == "always" || restart == "on-success"))
+                "Service '${name}.service' with 'Type=oneshot' cannot have 'Restart=always' or 'Restart=on-success'"
+              )
+              (optional hasDeprecated
+                "Service '${name}.service' uses the attribute 'StartLimitInterval' in the Service section, which is deprecated. See https://github.com/NixOS/nixpkgs/issues/45786."
+              )
+              (optional (service.reloadIfChanged && service.reloadTriggers != [])
+                "Service '${name}.service' has both 'reloadIfChanged' and 'reloadTriggers' set. This is probably not what you want, because 'reloadTriggers' behave the same whay as 'restartTriggers' if 'reloadIfChanged' is set."
+              )
+            ]
+        )
+        cfg.services
+    );
+
+    system.build.units = cfg.units;
+
+    system.nssModules = [ cfg.package.out ];
+    system.nssDatabases = {
+      hosts = (mkMerge [
+        (mkOrder 400 ["mymachines"]) # 400 to ensure it comes before resolve (which is mkBefore'd)
+        (mkOrder 999 ["myhostname"]) # after files (which is 998), but before regular nss modules
+      ]);
+      passwd = (mkMerge [
+        (mkAfter [ "systemd" ])
+      ]);
+      group = (mkMerge [
+        (mkAfter [ "[success=merge] systemd" ]) # need merge so that NSS won't stop at file-based groups
+      ]);
+    };
+
+    environment.systemPackages = [ cfg.package ];
+
+    environment.etc = let
+      # generate contents for /etc/systemd/system-${type} from attrset of links and packages
+      hooks = type: links: pkgs.runCommand "system-${type}" {
+          preferLocalBuild = true;
+          packages = cfg.packages;
+      } ''
+        set -e
+        mkdir -p $out
+        for package in $packages
+        do
+          for hook in $package/lib/systemd/system-${type}/*
+          do
+            ln -s $hook $out/
+          done
+        done
+        ${concatStrings (mapAttrsToList (exec: target: "ln -s ${target} $out/${exec};\n") links)}
+      '';
+
+      enabledUpstreamSystemUnits = filter (n: ! elem n cfg.suppressedSystemUnits) upstreamSystemUnits;
+      enabledUnits = filterAttrs (n: v: ! elem n cfg.suppressedSystemUnits) cfg.units;
+
+    in ({
+      "systemd/system".source = generateUnits {
+        type = "system";
+        units = enabledUnits;
+        upstreamUnits = enabledUpstreamSystemUnits;
+        upstreamWants = upstreamSystemWants;
+      };
+
+      "systemd/system.conf".text = ''
+        [Manager]
+        ManagerEnvironment=${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${lib.escapeShellArg v}") cfg.managerEnvironment)}
+        ${optionalString config.systemd.enableCgroupAccounting ''
+          DefaultCPUAccounting=yes
+          DefaultIOAccounting=yes
+          DefaultBlockIOAccounting=yes
+          DefaultIPAccounting=yes
+        ''}
+        DefaultLimitCORE=infinity
+        ${optionalString (config.systemd.watchdog.device != null) ''
+          WatchdogDevice=${config.systemd.watchdog.device}
+        ''}
+        ${optionalString (config.systemd.watchdog.runtimeTime != null) ''
+          RuntimeWatchdogSec=${config.systemd.watchdog.runtimeTime}
+        ''}
+        ${optionalString (config.systemd.watchdog.rebootTime != null) ''
+          RebootWatchdogSec=${config.systemd.watchdog.rebootTime}
+        ''}
+        ${optionalString (config.systemd.watchdog.kexecTime != null) ''
+          KExecWatchdogSec=${config.systemd.watchdog.kexecTime}
+        ''}
+
+        ${config.systemd.extraConfig}
+      '';
+
+      "systemd/sleep.conf".text = ''
+        [Sleep]
+        ${config.systemd.sleep.extraConfig}
+      '';
+
+      "systemd/system-generators" = { source = hooks "generators" cfg.generators; };
+      "systemd/system-shutdown" = { source = hooks "shutdown" cfg.shutdown; };
+    });
+
+    services.dbus.enable = true;
+
+    users.users.systemd-network = {
+      uid = config.ids.uids.systemd-network;
+      group = "systemd-network";
+    };
+    users.groups.systemd-network.gid = config.ids.gids.systemd-network;
+    users.users.systemd-resolve = {
+      uid = config.ids.uids.systemd-resolve;
+      group = "systemd-resolve";
+    };
+    users.groups.systemd-resolve.gid = config.ids.gids.systemd-resolve;
+
+    # Target for ‘charon send-keys’ to hook into.
+    users.groups.keys.gid = config.ids.gids.keys;
+
+    systemd.targets.keys =
+      { description = "Security Keys";
+        unitConfig.X-StopOnReconfiguration = true;
+      };
+
+    systemd.units =
+         mapAttrs' (n: v: nameValuePair "${n}.path"    (pathToUnit    n v)) cfg.paths
+      // mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services
+      // mapAttrs' (n: v: nameValuePair "${n}.slice"   (sliceToUnit   n v)) cfg.slices
+      // mapAttrs' (n: v: nameValuePair "${n}.socket"  (socketToUnit  n v)) cfg.sockets
+      // mapAttrs' (n: v: nameValuePair "${n}.target"  (targetToUnit  n v)) cfg.targets
+      // mapAttrs' (n: v: nameValuePair "${n}.timer"   (timerToUnit   n v)) cfg.timers
+      // listToAttrs (map
+                   (v: let n = escapeSystemdPath v.where;
+                       in nameValuePair "${n}.mount" (mountToUnit n v)) cfg.mounts)
+      // listToAttrs (map
+                   (v: let n = escapeSystemdPath v.where;
+                       in nameValuePair "${n}.automount" (automountToUnit n v)) cfg.automounts);
+
+      # Environment of PID 1
+      systemd.managerEnvironment = {
+        # Doesn't contain systemd itself - everything works so it seems to use the compiled-in value for its tools
+        # util-linux is needed for the main fsck utility wrapping the fs-specific ones
+        PATH = lib.makeBinPath (config.system.fsPackages ++ [cfg.package.util-linux]);
+        LOCALE_ARCHIVE = "/run/current-system/sw/lib/locale/locale-archive";
+        TZDIR = "/etc/zoneinfo";
+        # If SYSTEMD_UNIT_PATH ends with an empty component (":"), the usual unit load path will be appended to the contents of the variable
+        SYSTEMD_UNIT_PATH = lib.mkIf (config.boot.extraSystemdUnitPaths != []) "${builtins.concatStringsSep ":" config.boot.extraSystemdUnitPaths}:";
+      };
+
+
+    system.requiredKernelConfig = map config.lib.kernelConfig.isEnabled
+      [ "DEVTMPFS" "CGROUPS" "INOTIFY_USER" "SIGNALFD" "TIMERFD" "EPOLL" "NET"
+        "SYSFS" "PROC_FS" "FHANDLE" "CRYPTO_USER_API_HASH" "CRYPTO_HMAC"
+        "CRYPTO_SHA256" "DMIID" "AUTOFS_FS" "TMPFS_POSIX_ACL"
+        "TMPFS_XATTR" "SECCOMP"
+      ];
+
+    # Generate timer units for all services that have a ‘startAt’ value.
+    systemd.timers =
+      mapAttrs (name: service:
+        { wantedBy = [ "timers.target" ];
+          timerConfig.OnCalendar = service.startAt;
+        })
+        (filterAttrs (name: service: service.enable && service.startAt != []) cfg.services);
+
+    # Some overrides to upstream units.
+    systemd.services."systemd-backlight@".restartIfChanged = false;
+    systemd.services."systemd-fsck@".restartIfChanged = false;
+    systemd.services."systemd-fsck@".path = [ pkgs.util-linux ] ++ config.system.fsPackages;
+    systemd.services."systemd-makefs@" = {
+      restartIfChanged = false;
+      path = [ pkgs.util-linux ] ++ config.system.fsPackages;
+      # Since there is no /etc/systemd/system/systemd-makefs@.service
+      # file, the units generated in /run/systemd/generator would
+      # override anything we put here. But by forcing the use of a
+      # drop-in in /etc, it does apply.
+      overrideStrategy = "asDropin";
+    };
+    systemd.services."systemd-mkswap@" = {
+      restartIfChanged = false;
+      path = [ pkgs.util-linux ];
+      overrideStrategy = "asDropin";
+    };
+    systemd.services.systemd-random-seed.restartIfChanged = false;
+    systemd.services.systemd-remount-fs.restartIfChanged = false;
+    systemd.services.systemd-update-utmp.restartIfChanged = false;
+    systemd.services.systemd-udev-settle.restartIfChanged = false; # Causes long delays in nixos-rebuild
+    systemd.targets.local-fs.unitConfig.X-StopOnReconfiguration = true;
+    systemd.targets.remote-fs.unitConfig.X-StopOnReconfiguration = true;
+    systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    systemd.services.systemd-importd.environment = proxy_env;
+    systemd.services.systemd-pstore.wantedBy = [ "sysinit.target" ]; # see #81138
+
+    # NixOS has kernel modules in a different location, so override that here.
+    systemd.services.kmod-static-nodes.unitConfig.ConditionFileNotEmpty = [
+      ""  # required to unset the previous value!
+      "/run/booted-system/kernel-modules/lib/modules/%v/modules.devname"
+    ];
+
+    # Don't bother with certain units in containers.
+    systemd.services.systemd-remount-fs.unitConfig.ConditionVirtualization = "!container";
+    systemd.services.systemd-random-seed.unitConfig.ConditionVirtualization = "!container";
+
+    # Increase numeric PID range (set directly instead of copying a one-line file from systemd)
+    # https://github.com/systemd/systemd/pull/12226
+    boot.kernel.sysctl."kernel.pid_max" = mkIf pkgs.stdenv.is64bit (lib.mkDefault 4194304);
+
+    boot.kernelParams = optional (!cfg.enableUnifiedCgroupHierarchy) "systemd.unified_cgroup_hierarchy=0";
+
+    # Avoid potentially degraded system state due to
+    # "Userspace Out-Of-Memory (OOM) Killer was skipped because of a failed condition check (ConditionControlGroupController=v2)."
+    systemd.oomd.enable = mkIf (!cfg.enableUnifiedCgroupHierarchy) false;
+
+    services.logrotate.settings = {
+      "/var/log/btmp" = mapAttrs (_: mkDefault) {
+        frequency = "monthly";
+        rotate = 1;
+        create = "0660 root ${config.users.groups.utmp.name}";
+        minsize = "1M";
+      };
+      "/var/log/wtmp" = mapAttrs (_: mkDefault) {
+        frequency = "monthly";
+        rotate = 1;
+        create = "0664 root ${config.users.groups.utmp.name}";
+        minsize = "1M";
+      };
+    };
+  };
+
+  # FIXME: Remove these eventually.
+  imports =
+    [ (mkRenamedOptionModule [ "boot" "systemd" "sockets" ] [ "systemd" "sockets" ])
+      (mkRenamedOptionModule [ "boot" "systemd" "targets" ] [ "systemd" "targets" ])
+      (mkRenamedOptionModule [ "boot" "systemd" "services" ] [ "systemd" "services" ])
+      (mkRenamedOptionModule [ "jobs" ] [ "systemd" "services" ])
+      (mkRemovedOptionModule [ "systemd" "generator-packages" ] "Use systemd.packages instead.")
+    ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/coredump.nix b/nixpkgs/nixos/modules/system/boot/systemd/coredump.nix
new file mode 100644
index 000000000000..03ef00e5683c
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/coredump.nix
@@ -0,0 +1,78 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.systemd.coredump;
+  systemd = config.systemd.package;
+in {
+  options = {
+    systemd.coredump.enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether core dumps should be processed by
+        {command}`systemd-coredump`. If disabled, core dumps
+        appear in the current directory of the crashing process.
+      '';
+    };
+
+    systemd.coredump.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "Storage=journal";
+      description = lib.mdDoc ''
+        Extra config options for systemd-coredump. See coredump.conf(5) man page
+        for available options.
+      '';
+    };
+  };
+
+  config = mkMerge [
+
+    (mkIf cfg.enable {
+      systemd.additionalUpstreamSystemUnits = [
+        "systemd-coredump.socket"
+        "systemd-coredump@.service"
+      ];
+
+      environment.etc = {
+        "systemd/coredump.conf".text =
+        ''
+          [Coredump]
+          ${cfg.extraConfig}
+        '';
+
+        # install provided sysctl snippets
+        "sysctl.d/50-coredump.conf".source =
+          # Fix systemd-coredump error caused by truncation of `kernel.core_pattern`
+          # when the `systemd` derivation name is too long. This works by substituting
+          # the path to `systemd` with a symlink that has a constant-length path.
+          #
+          # See: https://github.com/NixOS/nixpkgs/issues/213408
+          pkgs.substitute {
+            src = "${systemd}/example/sysctl.d/50-coredump.conf";
+            replacements = [
+              "--replace"
+              "${systemd}"
+              "${pkgs.symlinkJoin { name = "systemd"; paths = [ systemd ]; }}"
+            ];
+          };
+
+        "sysctl.d/50-default.conf".source = "${systemd}/example/sysctl.d/50-default.conf";
+      };
+
+      users.users.systemd-coredump = {
+        uid = config.ids.uids.systemd-coredump;
+        group = "systemd-coredump";
+      };
+      users.groups.systemd-coredump = {};
+    })
+
+    (mkIf (!cfg.enable) {
+     boot.kernel.sysctl."kernel.core_pattern" = mkDefault "core";
+    })
+
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/homed.nix b/nixpkgs/nixos/modules/system/boot/systemd/homed.nix
new file mode 100644
index 000000000000..b216820c0c0c
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/homed.nix
@@ -0,0 +1,43 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.homed;
+in
+{
+  options.services.homed.enable = lib.mkEnableOption (lib.mdDoc ''
+    systemd home area/user account manager
+  '');
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = config.services.nscd.enable;
+        message = "systemd-homed requires the use of systemd nss module. services.nscd.enable must be set to true,";
+      }
+    ];
+
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-homed.service"
+      "systemd-homed-activate.service"
+    ];
+
+    # This is mentioned in homed's [Install] section.
+    #
+    # While homed appears to work without it, it's probably better
+    # to follow upstream recommendations.
+    services.userdbd.enable = lib.mkDefault true;
+
+    systemd.services = {
+      systemd-homed = {
+        # These packages are required to manage encrypted volumes
+        path = config.system.fsPackages;
+        aliases = [ "dbus-org.freedesktop.home1.service" ];
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      systemd-homed-activate = {
+        wantedBy = [ "systemd-homed.service" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/initrd-secrets.nix b/nixpkgs/nixos/modules/system/boot/systemd/initrd-secrets.nix
new file mode 100644
index 000000000000..7b59c0cbe7b8
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/initrd-secrets.nix
@@ -0,0 +1,36 @@
+{ config, pkgs, lib, ... }:
+
+{
+  config = lib.mkIf (config.boot.initrd.enable && config.boot.initrd.systemd.enable) {
+    # Copy secrets into the initrd if they cannot be appended
+    boot.initrd.systemd.contents = lib.mkIf (!config.boot.loader.supportsInitrdSecrets)
+      (lib.mapAttrs' (dest: source: lib.nameValuePair "/.initrd-secrets/${dest}" { source = if source == null then dest else source; }) config.boot.initrd.secrets);
+
+    # Copy secrets to their respective locations
+    boot.initrd.systemd.services.initrd-nixos-copy-secrets = lib.mkIf (config.boot.initrd.secrets != {}) {
+      description = "Copy secrets into place";
+      # Run as early as possible
+      wantedBy = [ "sysinit.target" ];
+      before = [ "cryptsetup-pre.target" ];
+      unitConfig.DefaultDependencies = false;
+
+      # We write the secrets to /.initrd-secrets and move them because this allows
+      # secrets to be written to /run. If we put the secret directly to /run and
+      # drop this service, we'd mount the /run tmpfs over the secret, making it
+      # invisible in stage 2.
+      script = ''
+        for secret in $(cd /.initrd-secrets; find . -type f -o -type l); do
+          mkdir -p "$(dirname "/$secret")"
+          cp "/.initrd-secrets/$secret" "/$secret"
+        done
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+    # The script needs this
+    boot.initrd.systemd.extraBin.find = "${pkgs.findutils}/bin/find";
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
new file mode 100644
index 000000000000..0e7d59b32075
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/initrd.nix
@@ -0,0 +1,561 @@
+{ lib, options, config, utils, pkgs, ... }:
+
+with lib;
+
+let
+  inherit (utils) systemdUtils escapeSystemdPath;
+  inherit (systemdUtils.lib)
+    generateUnits
+    pathToUnit
+    serviceToUnit
+    sliceToUnit
+    socketToUnit
+    targetToUnit
+    timerToUnit
+    mountToUnit
+    automountToUnit;
+
+
+  cfg = config.boot.initrd.systemd;
+
+  # Copied from fedora
+  upstreamUnits = [
+    "basic.target"
+    "ctrl-alt-del.target"
+    "emergency.service"
+    "emergency.target"
+    "final.target"
+    "halt.target"
+    "initrd-cleanup.service"
+    "initrd-fs.target"
+    "initrd-parse-etc.service"
+    "initrd-root-device.target"
+    "initrd-root-fs.target"
+    "initrd-switch-root.service"
+    "initrd-switch-root.target"
+    "initrd.target"
+    "kexec.target"
+    "kmod-static-nodes.service"
+    "local-fs-pre.target"
+    "local-fs.target"
+    "multi-user.target"
+    "paths.target"
+    "poweroff.target"
+    "reboot.target"
+    "rescue.service"
+    "rescue.target"
+    "rpcbind.target"
+    "shutdown.target"
+    "sigpwr.target"
+    "slices.target"
+    "sockets.target"
+    "swap.target"
+    "sysinit.target"
+    "sys-kernel-config.mount"
+    "syslog.socket"
+    "systemd-ask-password-console.path"
+    "systemd-ask-password-console.service"
+    "systemd-fsck@.service"
+    "systemd-halt.service"
+    "systemd-journald-audit.socket"
+    "systemd-journald-dev-log.socket"
+    "systemd-journald.service"
+    "systemd-journald.socket"
+    "systemd-kexec.service"
+    "systemd-modules-load.service"
+    "systemd-poweroff.service"
+    "systemd-reboot.service"
+    "systemd-sysctl.service"
+    "systemd-tmpfiles-setup-dev.service"
+    "systemd-tmpfiles-setup.service"
+    "timers.target"
+    "umount.target"
+  ] ++ cfg.additionalUpstreamUnits;
+
+  upstreamWants = [
+    "sysinit.target.wants"
+  ];
+
+  enabledUpstreamUnits = filter (n: ! elem n cfg.suppressedUnits) upstreamUnits;
+  enabledUnits = filterAttrs (n: v: ! elem n cfg.suppressedUnits) cfg.units;
+  jobScripts = concatLists (mapAttrsToList (_: unit: unit.jobScripts or []) (filterAttrs (_: v: v.enable) cfg.services));
+
+  stage1Units = generateUnits {
+    type = "initrd";
+    units = enabledUnits;
+    upstreamUnits = enabledUpstreamUnits;
+    inherit upstreamWants;
+    inherit (cfg) packages package;
+  };
+
+  fileSystems = filter utils.fsNeededForBoot config.system.build.fileSystems;
+
+  needMakefs = lib.any (fs: fs.autoFormat) fileSystems;
+
+  kernel-name = config.boot.kernelPackages.kernel.name or "kernel";
+  modulesTree = config.system.modulesTree.override { name = kernel-name + "-modules"; };
+  firmware = config.hardware.firmware;
+  # Determine the set of modules that we need to mount the root FS.
+  modulesClosure = pkgs.makeModulesClosure {
+    rootModules = config.boot.initrd.availableKernelModules ++ config.boot.initrd.kernelModules;
+    kernel = modulesTree;
+    firmware = firmware;
+    allowMissing = false;
+  };
+
+  initrdBinEnv = pkgs.buildEnv {
+    name = "initrd-bin-env";
+    paths = map getBin cfg.initrdBin;
+    pathsToLink = ["/bin" "/sbin"];
+    postBuild = concatStringsSep "\n" (mapAttrsToList (n: v: "ln -sf '${v}' $out/bin/'${n}'") cfg.extraBin);
+  };
+
+  initialRamdisk = pkgs.makeInitrdNG {
+    name = "initrd-${kernel-name}";
+    inherit (config.boot.initrd) compressor compressorArgs prepend;
+    inherit (cfg) strip;
+
+    contents = map (path: { object = path; symlink = ""; }) (subtractLists cfg.suppressedStorePaths cfg.storePaths)
+      ++ mapAttrsToList (_: v: { object = v.source; symlink = v.target; }) (filterAttrs (_: v: v.enable) cfg.contents);
+  };
+
+in {
+  options.boot.initrd.systemd = {
+    enable = mkEnableOption (lib.mdDoc "systemd in initrd") // {
+      description = lib.mdDoc ''
+        Whether to enable systemd in initrd. The unit options such as
+        {option}`boot.initrd.systemd.services` are the same as their
+        stage 2 counterparts such as {option}`systemd.services`,
+        except that `restartTriggers` and `reloadTriggers` are not
+        supported.
+      '';
+    };
+
+    package = lib.mkOption {
+      type = lib.types.package;
+      default = config.systemd.package;
+      defaultText = lib.literalExpression "config.systemd.package";
+      description = ''
+        The systemd package to use.
+      '';
+    };
+
+    extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "DefaultLimitCORE=infinity";
+      description = lib.mdDoc ''
+        Extra config options for systemd. See systemd-system.conf(5) man page
+        for available options.
+      '';
+    };
+
+    managerEnvironment = mkOption {
+      type = with types; attrsOf (nullOr (oneOf [ str path package ]));
+      default = {};
+      example = { SYSTEMD_LOG_LEVEL = "debug"; };
+      description = lib.mdDoc ''
+        Environment variables of PID 1. These variables are
+        *not* passed to started units.
+      '';
+    };
+
+    contents = mkOption {
+      description = lib.mdDoc "Set of files that have to be linked into the initrd";
+      example = literalExpression ''
+        {
+          "/etc/hostname".text = "mymachine";
+        }
+      '';
+      default = {};
+      type = utils.systemdUtils.types.initrdContents;
+    };
+
+    storePaths = mkOption {
+      description = lib.mdDoc ''
+        Store paths to copy into the initrd as well.
+      '';
+      type = with types; listOf (oneOf [ singleLineStr package ]);
+      default = [];
+    };
+
+    strip = mkOption {
+      description = lib.mdDoc ''
+        Whether to completely strip executables and libraries copied to the initramfs.
+
+        Setting this to false may save on the order of 30MiB on the
+        machine building the system (by avoiding a binutils
+        reference), at the cost of ~1MiB of initramfs size. This puts
+        this option firmly in the territory of micro-optimisation.
+      '';
+      type = types.bool;
+      default = true;
+    };
+
+    extraBin = mkOption {
+      description = lib.mdDoc ''
+        Tools to add to /bin
+      '';
+      example = literalExpression ''
+        {
+          umount = ''${pkgs.util-linux}/bin/umount;
+        }
+      '';
+      type = types.attrsOf types.path;
+      default = {};
+    };
+
+    suppressedStorePaths = mkOption {
+      description = lib.mdDoc ''
+        Store paths specified in the storePaths option that
+        should not be copied.
+      '';
+      type = types.listOf types.singleLineStr;
+      default = [];
+    };
+
+    emergencyAccess = mkOption {
+      type = with types; oneOf [ bool (nullOr (passwdEntry str)) ];
+      description = lib.mdDoc ''
+        Set to true for unauthenticated emergency access, and false for
+        no emergency access.
+
+        Can also be set to a hashed super user password to allow
+        authenticated access to the emergency mode.
+      '';
+      default = false;
+    };
+
+    initrdBin = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      description = lib.mdDoc ''
+        Packages to include in /bin for the stage 1 emergency shell.
+      '';
+    };
+
+    additionalUpstreamUnits = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      example = [ "debug-shell.service" "systemd-quotacheck.service" ];
+      description = lib.mdDoc ''
+        Additional units shipped with systemd that shall be enabled.
+      '';
+    };
+
+    suppressedUnits = mkOption {
+      default = [ ];
+      type = types.listOf types.str;
+      example = [ "systemd-backlight@.service" ];
+      description = lib.mdDoc ''
+        A list of units to skip when generating system systemd configuration directory. This has
+        priority over upstream units, {option}`boot.initrd.systemd.units`, and
+        {option}`boot.initrd.systemd.additionalUpstreamUnits`. The main purpose of this is to
+        prevent a upstream systemd unit from being added to the initrd with any modifications made to it
+        by other NixOS modules.
+      '';
+    };
+
+    units = mkOption {
+      description = lib.mdDoc "Definition of systemd units.";
+      default = {};
+      visible = "shallow";
+      type = systemdUtils.types.units;
+    };
+
+    packages = mkOption {
+      default = [];
+      type = types.listOf types.package;
+      example = literalExpression "[ pkgs.systemd-cryptsetup-generator ]";
+      description = lib.mdDoc "Packages providing systemd units and hooks.";
+    };
+
+    targets = mkOption {
+      default = {};
+      visible = "shallow";
+      type = systemdUtils.types.initrdTargets;
+      description = lib.mdDoc "Definition of systemd target units.";
+    };
+
+    services = mkOption {
+      default = {};
+      type = systemdUtils.types.initrdServices;
+      visible = "shallow";
+      description = lib.mdDoc "Definition of systemd service units.";
+    };
+
+    sockets = mkOption {
+      default = {};
+      type = systemdUtils.types.initrdSockets;
+      visible = "shallow";
+      description = lib.mdDoc "Definition of systemd socket units.";
+    };
+
+    timers = mkOption {
+      default = {};
+      type = systemdUtils.types.initrdTimers;
+      visible = "shallow";
+      description = lib.mdDoc "Definition of systemd timer units.";
+    };
+
+    paths = mkOption {
+      default = {};
+      type = systemdUtils.types.initrdPaths;
+      visible = "shallow";
+      description = lib.mdDoc "Definition of systemd path units.";
+    };
+
+    mounts = mkOption {
+      default = [];
+      type = systemdUtils.types.initrdMounts;
+      visible = "shallow";
+      description = lib.mdDoc ''
+        Definition of systemd mount units.
+        This is a list instead of an attrSet, because systemd mandates the names to be derived from
+        the 'where' attribute.
+      '';
+    };
+
+    automounts = mkOption {
+      default = [];
+      type = systemdUtils.types.automounts;
+      visible = "shallow";
+      description = lib.mdDoc ''
+        Definition of systemd automount units.
+        This is a list instead of an attrSet, because systemd mandates the names to be derived from
+        the 'where' attribute.
+      '';
+    };
+
+    slices = mkOption {
+      default = {};
+      type = systemdUtils.types.slices;
+      visible = "shallow";
+      description = lib.mdDoc "Definition of slice configurations.";
+    };
+
+    enableTpm2 = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable TPM2 support in the initrd.
+      '';
+    };
+  };
+
+  config = mkIf (config.boot.initrd.enable && cfg.enable) {
+    assertions = map (name: {
+      assertion = lib.attrByPath name (throw "impossible") config.boot.initrd == "";
+      message = ''
+        systemd stage 1 does not support 'boot.initrd.${lib.concatStringsSep "." name}'. Please
+          convert it to analogous systemd units in 'boot.initrd.systemd'.
+
+            Definitions:
+        ${lib.concatMapStringsSep "\n" ({ file, ... }: "    - ${file}") (lib.attrByPath name (throw "impossible") options.boot.initrd).definitionsWithLocations}
+      '';
+    }) [
+      [ "preFailCommands" ]
+      [ "preDeviceCommands" ]
+      [ "preLVMCommands" ]
+      [ "postDeviceCommands" ]
+      [ "postResumeCommands" ]
+      [ "postMountCommands" ]
+      [ "extraUdevRulesCommands" ]
+      [ "extraUtilsCommands" ]
+      [ "extraUtilsCommandsTest" ]
+      [ "network" "postCommands" ]
+    ];
+
+    system.build = { inherit initialRamdisk; };
+
+    boot.initrd.availableKernelModules = [
+      # systemd needs this for some features
+      "autofs"
+      # systemd-cryptenroll
+    ] ++ lib.optional cfg.enableTpm2 "tpm-tis"
+    ++ lib.optional (cfg.enableTpm2 && !(pkgs.stdenv.hostPlatform.isRiscV64 || pkgs.stdenv.hostPlatform.isArmv7)) "tpm-crb";
+
+    boot.initrd.systemd = {
+      initrdBin = [pkgs.bash pkgs.coreutils cfg.package.kmod cfg.package];
+      extraBin = {
+        less = "${pkgs.less}/bin/less";
+        mount = "${cfg.package.util-linux}/bin/mount";
+        umount = "${cfg.package.util-linux}/bin/umount";
+        fsck = "${cfg.package.util-linux}/bin/fsck";
+      };
+
+      managerEnvironment.PATH = "/bin:/sbin";
+
+      contents = {
+        "/tmp/.keep".text = "systemd requires the /tmp mount point in the initrd cpio archive";
+        "/init".source = "${cfg.package}/lib/systemd/systemd";
+        "/etc/systemd/system".source = stage1Units;
+
+        "/etc/systemd/system.conf".text = ''
+          [Manager]
+          DefaultEnvironment=PATH=/bin:/sbin
+          ${cfg.extraConfig}
+          ManagerEnvironment=${lib.concatStringsSep " " (lib.mapAttrsToList (n: v: "${n}=${lib.escapeShellArg v}") cfg.managerEnvironment)}
+        '';
+
+        "/lib/modules".source = "${modulesClosure}/lib/modules";
+        "/lib/firmware".source = "${modulesClosure}/lib/firmware";
+
+        "/etc/modules-load.d/nixos.conf".text = concatStringsSep "\n" config.boot.initrd.kernelModules;
+
+        # We can use either ! or * to lock the root account in the
+        # console, but some software like OpenSSH won't even allow you
+        # to log in with an SSH key if you use ! so we use * instead
+        "/etc/shadow".text = "root:${if isBool cfg.emergencyAccess then optionalString (!cfg.emergencyAccess) "*" else cfg.emergencyAccess}:::::::";
+
+        "/bin".source = "${initrdBinEnv}/bin";
+        "/sbin".source = "${initrdBinEnv}/sbin";
+
+        "/etc/sysctl.d/nixos.conf".text = "kernel.modprobe = /sbin/modprobe";
+        "/etc/modprobe.d/systemd.conf".source = "${cfg.package}/lib/modprobe.d/systemd.conf";
+        "/etc/modprobe.d/ubuntu.conf".source = pkgs.runCommand "initrd-kmod-blacklist-ubuntu" { } ''
+          ${pkgs.buildPackages.perl}/bin/perl -0pe 's/## file: iwlwifi.conf(.+?)##/##/s;' $src > $out
+        '';
+        "/etc/modprobe.d/debian.conf".source = pkgs.kmod-debian-aliases;
+
+        "/etc/os-release".source = config.boot.initrd.osRelease;
+        "/etc/initrd-release".source = config.boot.initrd.osRelease;
+
+      } // optionalAttrs (config.environment.etc ? "modprobe.d/nixos.conf") {
+        "/etc/modprobe.d/nixos.conf".source = config.environment.etc."modprobe.d/nixos.conf".source;
+      };
+
+      storePaths = [
+        # systemd tooling
+        "${cfg.package}/lib/systemd/systemd-fsck"
+        "${cfg.package}/lib/systemd/systemd-hibernate-resume"
+        "${cfg.package}/lib/systemd/systemd-journald"
+        (lib.mkIf needMakefs "${cfg.package}/lib/systemd/systemd-makefs")
+        "${cfg.package}/lib/systemd/systemd-modules-load"
+        "${cfg.package}/lib/systemd/systemd-remount-fs"
+        "${cfg.package}/lib/systemd/systemd-shutdown"
+        "${cfg.package}/lib/systemd/systemd-sulogin-shell"
+        "${cfg.package}/lib/systemd/systemd-sysctl"
+
+        # generators
+        "${cfg.package}/lib/systemd/system-generators/systemd-debug-generator"
+        "${cfg.package}/lib/systemd/system-generators/systemd-fstab-generator"
+        "${cfg.package}/lib/systemd/system-generators/systemd-gpt-auto-generator"
+        "${cfg.package}/lib/systemd/system-generators/systemd-hibernate-resume-generator"
+        "${cfg.package}/lib/systemd/system-generators/systemd-run-generator"
+
+        # utilities needed by systemd
+        "${cfg.package.util-linux}/bin/mount"
+        "${cfg.package.util-linux}/bin/umount"
+        "${cfg.package.util-linux}/bin/sulogin"
+
+        # so NSS can look up usernames
+        "${pkgs.glibc}/lib/libnss_files.so.2"
+      ] ++ optionals (cfg.package.withCryptsetup && cfg.enableTpm2) [
+        # tpm2 support
+        "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-tpm2.so"
+        pkgs.tpm2-tss
+      ] ++ optionals cfg.package.withCryptsetup [
+        # fido2 support
+        "${cfg.package}/lib/cryptsetup/libcryptsetup-token-systemd-fido2.so"
+        "${pkgs.libfido2}/lib/libfido2.so.1"
+      ] ++ jobScripts;
+
+      targets.initrd.aliases = ["default.target"];
+      units =
+           mapAttrs' (n: v: nameValuePair "${n}.path"    (pathToUnit    n v)) cfg.paths
+        // mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services
+        // mapAttrs' (n: v: nameValuePair "${n}.slice"   (sliceToUnit   n v)) cfg.slices
+        // mapAttrs' (n: v: nameValuePair "${n}.socket"  (socketToUnit  n v)) cfg.sockets
+        // mapAttrs' (n: v: nameValuePair "${n}.target"  (targetToUnit  n v)) cfg.targets
+        // mapAttrs' (n: v: nameValuePair "${n}.timer"   (timerToUnit   n v)) cfg.timers
+        // listToAttrs (map
+                     (v: let n = escapeSystemdPath v.where;
+                         in nameValuePair "${n}.mount" (mountToUnit n v)) cfg.mounts)
+        // listToAttrs (map
+                     (v: let n = escapeSystemdPath v.where;
+                         in nameValuePair "${n}.automount" (automountToUnit n v)) cfg.automounts);
+
+      # make sure all the /dev nodes are set up
+      services.systemd-tmpfiles-setup-dev.wantedBy = ["sysinit.target"];
+
+      services.initrd-nixos-activation = {
+        after = [ "initrd-fs.target" ];
+        requiredBy = [ "initrd.target" ];
+        unitConfig.AssertPathExists = "/etc/initrd-release";
+        serviceConfig.Type = "oneshot";
+        description = "NixOS Activation";
+
+        script = /* bash */ ''
+          set -uo pipefail
+          export PATH="/bin:${cfg.package.util-linux}/bin"
+
+          # Figure out what closure to boot
+          closure=
+          for o in $(< /proc/cmdline); do
+              case $o in
+                  init=*)
+                      IFS== read -r -a initParam <<< "$o"
+                      closure="$(dirname "''${initParam[1]}")"
+                      ;;
+              esac
+          done
+
+          # Sanity check
+          if [ -z "''${closure:-}" ]; then
+            echo 'No init= parameter on the kernel command line' >&2
+            exit 1
+          fi
+
+          # If we are not booting a NixOS closure (e.g. init=/bin/sh),
+          # we don't know what root to prepare so we don't do anything
+          if ! [ -x "/sysroot$(readlink "/sysroot$closure/prepare-root" || echo "$closure/prepare-root")" ]; then
+            echo "NEW_INIT=''${initParam[1]}" > /etc/switch-root.conf
+            echo "$closure does not look like a NixOS installation - not activating"
+            exit 0
+          fi
+          echo 'NEW_INIT=' > /etc/switch-root.conf
+
+
+          # We need to propagate /run for things like /run/booted-system
+          # and /run/current-system.
+          mkdir -p /sysroot/run
+          mount --bind /run /sysroot/run
+
+          # Initialize the system
+          export IN_NIXOS_SYSTEMD_STAGE1=true
+          exec chroot /sysroot $closure/prepare-root
+        '';
+      };
+
+      # This will either call systemctl with the new init as the last parameter (which
+      # is the case when not booting a NixOS system) or with an empty string, causing
+      # systemd to bypass its verification code that checks whether the next file is a systemd
+      # and using its compiled-in value
+      services.initrd-switch-root.serviceConfig = {
+        EnvironmentFile = "-/etc/switch-root.conf";
+        ExecStart = [
+          ""
+          ''systemctl --no-block switch-root /sysroot "''${NEW_INIT}"''
+        ];
+      };
+
+      services.panic-on-fail = {
+        wantedBy = ["emergency.target"];
+        unitConfig = {
+          DefaultDependencies = false;
+          ConditionKernelCommandLine = [
+            "|boot.panic_on_fail"
+            "|stage1panic"
+          ];
+        };
+        script = ''
+          echo c > /proc/sysrq-trigger
+        '';
+        serviceConfig.Type = "oneshot";
+      };
+    };
+
+    boot.kernelParams = lib.mkIf (config.boot.resumeDevice != "") [ "resume=${config.boot.resumeDevice}" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/journald.nix b/nixpkgs/nixos/modules/system/boot/systemd/journald.nix
new file mode 100644
index 000000000000..7e62a4c9bfed
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/journald.nix
@@ -0,0 +1,140 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.journald;
+in {
+  options = {
+    services.journald.console = mkOption {
+      default = "";
+      type = types.str;
+      description = lib.mdDoc "If non-empty, write log messages to the specified TTY device.";
+    };
+
+    services.journald.rateLimitInterval = mkOption {
+      default = "30s";
+      type = types.str;
+      description = lib.mdDoc ''
+        Configures the rate limiting interval that is applied to all
+        messages generated on the system. This rate limiting is applied
+        per-service, so that two services which log do not interfere with
+        each other's limit. The value may be specified in the following
+        units: s, min, h, ms, us. To turn off any kind of rate limiting,
+        set either value to 0.
+
+        See {option}`services.journald.rateLimitBurst` for important
+        considerations when setting this value.
+      '';
+    };
+
+    services.journald.storage = mkOption {
+      default = "persistent";
+      type = types.enum [ "persistent" "volatile" "auto" "none" ];
+      description = mdDoc ''
+        Controls where to store journal data. See
+        {manpage}`journald.conf(5)` for further information.
+      '';
+    };
+
+    services.journald.rateLimitBurst = mkOption {
+      default = 10000;
+      type = types.int;
+      description = lib.mdDoc ''
+        Configures the rate limiting burst limit (number of messages per
+        interval) that is applied to all messages generated on the system.
+        This rate limiting is applied per-service, so that two services
+        which log do not interfere with each other's limit.
+
+        Note that the effective rate limit is multiplied by a factor derived
+        from the available free disk space for the journal as described on
+        [
+        journald.conf(5)](https://www.freedesktop.org/software/systemd/man/journald.conf.html).
+
+        Note that the total amount of logs stored is limited by journald settings
+        such as `SystemMaxUse`, which defaults to a 4 GB cap.
+
+        It is thus recommended to compute what period of time that you will be
+        able to store logs for when an application logs at full burst rate.
+        With default settings for log lines that are 100 Bytes long, this can
+        amount to just a few hours.
+      '';
+    };
+
+    services.journald.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "Storage=volatile";
+      description = lib.mdDoc ''
+        Extra config options for systemd-journald. See man journald.conf
+        for available options.
+      '';
+    };
+
+    services.journald.enableHttpGateway = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable the HTTP gateway to the journal.
+      '';
+    };
+
+    services.journald.forwardToSyslog = mkOption {
+      default = config.services.rsyslogd.enable || config.services.syslog-ng.enable;
+      defaultText = literalExpression "services.rsyslogd.enable || services.syslog-ng.enable";
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to forward log messages to syslog.
+      '';
+    };
+  };
+
+  config = {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-journald.socket"
+      "systemd-journald@.socket"
+      "systemd-journald-varlink@.socket"
+      "systemd-journald.service"
+      "systemd-journald@.service"
+      "systemd-journal-flush.service"
+      "systemd-journal-catalog-update.service"
+      ] ++ (optional (!config.boot.isContainer) "systemd-journald-audit.socket") ++ [
+      "systemd-journald-dev-log.socket"
+      "syslog.socket"
+      ] ++ optionals cfg.enableHttpGateway [
+      "systemd-journal-gatewayd.socket"
+      "systemd-journal-gatewayd.service"
+      ];
+
+    environment.etc = {
+      "systemd/journald.conf".text = ''
+        [Journal]
+        Storage=${cfg.storage}
+        RateLimitInterval=${cfg.rateLimitInterval}
+        RateLimitBurst=${toString cfg.rateLimitBurst}
+        ${optionalString (cfg.console != "") ''
+          ForwardToConsole=yes
+          TTYPath=${cfg.console}
+        ''}
+        ${optionalString (cfg.forwardToSyslog) ''
+          ForwardToSyslog=yes
+        ''}
+        ${cfg.extraConfig}
+      '';
+    };
+
+    users.groups.systemd-journal.gid = config.ids.gids.systemd-journal;
+    users.users.systemd-journal-gateway.uid = config.ids.uids.systemd-journal-gateway;
+    users.users.systemd-journal-gateway.group = "systemd-journal-gateway";
+    users.groups.systemd-journal-gateway.gid = config.ids.gids.systemd-journal-gateway;
+
+    systemd.sockets.systemd-journal-gatewayd.wantedBy =
+      optional cfg.enableHttpGateway "sockets.target";
+
+    systemd.services.systemd-journal-flush.restartIfChanged = false;
+    systemd.services.systemd-journald.restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
+    systemd.services.systemd-journald.stopIfChanged = false;
+    systemd.services."systemd-journald@".restartTriggers = [ config.environment.etc."systemd/journald.conf".source ];
+    systemd.services."systemd-journald@".stopIfChanged = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/logind.nix b/nixpkgs/nixos/modules/system/boot/systemd/logind.nix
new file mode 100644
index 000000000000..cf01c1882857
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/logind.nix
@@ -0,0 +1,205 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.services.logind;
+
+  logindHandlerType = types.enum [
+    "ignore" "poweroff" "reboot" "halt" "kexec" "suspend"
+    "hibernate" "hybrid-sleep" "suspend-then-hibernate" "lock"
+  ];
+in
+{
+  options.services.logind = {
+    extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "IdleAction=lock";
+      description = lib.mdDoc ''
+        Extra config options for systemd-logind.
+        See [logind.conf(5)](https://www.freedesktop.org/software/systemd/man/logind.conf.html)
+        for available options.
+      '';
+    };
+
+    killUserProcesses = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Specifies whether the processes of a user should be killed
+        when the user logs out.  If true, the scope unit corresponding
+        to the session and all processes inside that scope will be
+        terminated.  If false, the scope is "abandoned"
+        (see [systemd.scope(5)](https://www.freedesktop.org/software/systemd/man/systemd.scope.html#)),
+        and processes are not killed.
+
+        See [logind.conf(5)](https://www.freedesktop.org/software/systemd/man/logind.conf.html#KillUserProcesses=)
+        for more details.
+      '';
+    };
+
+    powerKey = mkOption {
+      default = "poweroff";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the power key is pressed.
+      '';
+    };
+
+    powerKeyLongPress = mkOption {
+      default = "ignore";
+      example = "reboot";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the power key is long-pressed.
+      '';
+    };
+
+    rebootKey = mkOption {
+      default = "reboot";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the reboot key is pressed.
+      '';
+    };
+
+    rebootKeyLongPress = mkOption {
+      default = "poweroff";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the reboot key is long-pressed.
+      '';
+    };
+
+    suspendKey = mkOption {
+      default = "suspend";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the suspend key is pressed.
+      '';
+    };
+
+    suspendKeyLongPress = mkOption {
+      default = "hibernate";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the suspend key is long-pressed.
+      '';
+    };
+
+    hibernateKey = mkOption {
+      default = "hibernate";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the hibernate key is pressed.
+      '';
+    };
+
+    hibernateKeyLongPress = mkOption {
+      default = "ignore";
+      example = "suspend";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the hibernate key is long-pressed.
+      '';
+    };
+
+    lidSwitch = mkOption {
+      default = "suspend";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the laptop lid is closed.
+      '';
+    };
+
+    lidSwitchExternalPower = mkOption {
+      default = cfg.lidSwitch;
+      defaultText = literalExpression "services.logind.lidSwitch";
+      example = "ignore";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the laptop lid is closed
+        and the system is on external power. By default use
+        the same action as specified in services.logind.lidSwitch.
+      '';
+    };
+
+    lidSwitchDocked = mkOption {
+      default = "ignore";
+      example = "suspend";
+      type = logindHandlerType;
+
+      description = lib.mdDoc ''
+        Specifies what to do when the laptop lid is closed
+        and another screen is added.
+      '';
+    };
+  };
+
+  config = {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-logind.service"
+      "autovt@.service"
+      "systemd-user-sessions.service"
+    ] ++ optionals config.systemd.package.withImportd [
+      "dbus-org.freedesktop.import1.service"
+    ] ++ optionals config.systemd.package.withMachined [
+      "dbus-org.freedesktop.machine1.service"
+    ] ++ optionals config.systemd.package.withPortabled [
+      "dbus-org.freedesktop.portable1.service"
+    ] ++ [
+      "dbus-org.freedesktop.login1.service"
+      "user@.service"
+      "user-runtime-dir@.service"
+    ];
+
+    environment.etc = {
+      "systemd/logind.conf".text = ''
+        [Login]
+        KillUserProcesses=${if cfg.killUserProcesses then "yes" else "no"}
+        HandlePowerKey=${cfg.powerKey}
+        HandlePowerKeyLongPress=${cfg.powerKeyLongPress}
+        HandleRebootKey=${cfg.rebootKey}
+        HandleRebootKeyLongPress=${cfg.rebootKeyLongPress}
+        HandleSuspendKey=${cfg.suspendKey}
+        HandleSuspendKeyLongPress=${cfg.suspendKeyLongPress}
+        HandleHibernateKey=${cfg.hibernateKey}
+        HandleHibernateKeyLongPress=${cfg.hibernateKeyLongPress}
+        HandleLidSwitch=${cfg.lidSwitch}
+        HandleLidSwitchExternalPower=${cfg.lidSwitchExternalPower}
+        HandleLidSwitchDocked=${cfg.lidSwitchDocked}
+        ${cfg.extraConfig}
+      '';
+    };
+
+    # Restarting systemd-logind breaks X11
+    # - upstream commit: https://cgit.freedesktop.org/xorg/xserver/commit/?id=dc48bd653c7e101
+    # - systemd announcement: https://github.com/systemd/systemd/blob/22043e4317ecd2bc7834b48a6d364de76bb26d91/NEWS#L103-L112
+    # - this might be addressed in the future by xorg
+    #systemd.services.systemd-logind.restartTriggers = [ config.environment.etc."systemd/logind.conf".source ];
+    systemd.services.systemd-logind.restartIfChanged = false;
+    systemd.services.systemd-logind.stopIfChanged = false;
+
+    # The user-runtime-dir@ service is managed by systemd-logind we should not touch it or else we break the users' sessions.
+    systemd.services."user-runtime-dir@".stopIfChanged = false;
+    systemd.services."user-runtime-dir@".restartIfChanged = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/nspawn.nix b/nixpkgs/nixos/modules/system/boot/systemd/nspawn.nix
new file mode 100644
index 000000000000..b513aa051f28
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/nspawn.nix
@@ -0,0 +1,132 @@
+{ config, lib, pkgs, utils, ...}:
+
+with utils.systemdUtils.unitOptions;
+with utils.systemdUtils.lib;
+with lib;
+
+let
+  cfg = config.systemd.nspawn;
+
+  checkExec = checkUnitConfig "Exec" [
+    (assertOnlyFields [
+      "Boot" "ProcessTwo" "Parameters" "Environment" "User" "WorkingDirectory"
+      "PivotRoot" "Capability" "DropCapability" "NoNewPrivileges" "KillSignal"
+      "Personality" "MachineID" "PrivateUsers" "NotifyReady" "SystemCallFilter"
+      "LimitCPU" "LimitFSIZE" "LimitDATA" "LimitSTACK" "LimitCORE" "LimitRSS"
+      "LimitNOFILE" "LimitAS" "LimitNPROC" "LimitMEMLOCK" "LimitLOCKS"
+      "LimitSIGPENDING" "LimitMSGQUEUE" "LimitNICE" "LimitRTPRIO" "LimitRTTIME"
+      "OOMScoreAdjust" "CPUAffinity" "Hostname" "ResolvConf" "Timezone"
+      "LinkJournal" "Ephemeral" "AmbientCapability"
+    ])
+    (assertValueOneOf "Boot" boolValues)
+    (assertValueOneOf "ProcessTwo" boolValues)
+    (assertValueOneOf "NotifyReady" boolValues)
+  ];
+
+  checkFiles = checkUnitConfig "Files" [
+    (assertOnlyFields [
+      "ReadOnly" "Volatile" "Bind" "BindReadOnly" "TemporaryFileSystem"
+      "Overlay" "OverlayReadOnly" "PrivateUsersChown" "BindUser"
+      "Inaccessible" "PrivateUsersOwnership"
+    ])
+    (assertValueOneOf "ReadOnly" boolValues)
+    (assertValueOneOf "Volatile" (boolValues ++ [ "state" ]))
+    (assertValueOneOf "PrivateUsersChown" boolValues)
+    (assertValueOneOf "PrivateUsersOwnership" [ "off" "chown" "map" "auto" ])
+  ];
+
+  checkNetwork = checkUnitConfig "Network" [
+    (assertOnlyFields [
+      "Private" "VirtualEthernet" "VirtualEthernetExtra" "Interface" "MACVLAN"
+      "IPVLAN" "Bridge" "Zone" "Port"
+    ])
+    (assertValueOneOf "Private" boolValues)
+    (assertValueOneOf "VirtualEthernet" boolValues)
+  ];
+
+  instanceOptions = {
+    options =
+    (getAttrs [ "enable" ] sharedOptions)
+    // {
+      execConfig = mkOption {
+        default = {};
+        example = { Parameters = "/bin/sh"; };
+        type = types.addCheck (types.attrsOf unitOption) checkExec;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Exec]` section of this unit. See
+          {manpage}`systemd.nspawn(5)` for details.
+        '';
+      };
+
+      filesConfig = mkOption {
+        default = {};
+        example = { Bind = [ "/home/alice" ]; };
+        type = types.addCheck (types.attrsOf unitOption) checkFiles;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Files]` section of this unit. See
+          {manpage}`systemd.nspawn(5)` for details.
+        '';
+      };
+
+      networkConfig = mkOption {
+        default = {};
+        example = { Private = false; };
+        type = types.addCheck (types.attrsOf unitOption) checkNetwork;
+        description = lib.mdDoc ''
+          Each attribute in this set specifies an option in the
+          `[Network]` section of this unit. See
+          {manpage}`systemd.nspawn(5)` for details.
+        '';
+      };
+    };
+
+  };
+
+  instanceToUnit = name: def:
+    let base = {
+      text = ''
+        [Exec]
+        ${attrsToSection def.execConfig}
+
+        [Files]
+        ${attrsToSection def.filesConfig}
+
+        [Network]
+        ${attrsToSection def.networkConfig}
+      '';
+    } // def;
+    in base // { unit = makeUnit name base; };
+
+in {
+
+  options = {
+
+    systemd.nspawn = mkOption {
+      default = {};
+      type = with types; attrsOf (submodule instanceOptions);
+      description = lib.mdDoc "Definition of systemd-nspawn configurations.";
+    };
+
+  };
+
+  config =
+    let
+      units = mapAttrs' (n: v: let nspawnFile = "${n}.nspawn"; in nameValuePair nspawnFile (instanceToUnit nspawnFile v)) cfg;
+    in
+      mkMerge [
+        (mkIf (cfg != {}) {
+          environment.etc."systemd/nspawn".source = mkIf (cfg != {}) (generateUnits {
+            allowCollisions = false;
+            type = "nspawn";
+            inherit units;
+            upstreamUnits = [];
+            upstreamWants = [];
+          });
+        })
+        {
+          systemd.targets.multi-user.wants = [ "machines.target" ];
+        }
+      ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/oomd.nix b/nixpkgs/nixos/modules/system/boot/systemd/oomd.nix
new file mode 100644
index 000000000000..fad755e278c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/oomd.nix
@@ -0,0 +1,57 @@
+{ config, lib, ... }: let
+
+  cfg = config.systemd.oomd;
+
+in {
+  options.systemd.oomd = {
+    enable = lib.mkEnableOption (lib.mdDoc "the `systemd-oomd` OOM killer") // { default = true; };
+
+    # Fedora enables the first and third option by default. See the 10-oomd-* files here:
+    # https://src.fedoraproject.org/rpms/systemd/tree/acb90c49c42276b06375a66c73673ac351025597
+    enableRootSlice = lib.mkEnableOption (lib.mdDoc "oomd on the root slice (`-.slice`)");
+    enableSystemSlice = lib.mkEnableOption (lib.mdDoc "oomd on the system slice (`system.slice`)");
+    enableUserServices = lib.mkEnableOption (lib.mdDoc "oomd on all user services (`user@.service`)");
+
+    extraConfig = lib.mkOption {
+      type = with lib.types; attrsOf (oneOf [ str int bool ]);
+      default = {};
+      example = lib.literalExpression ''{ DefaultMemoryPressureDurationSec = "20s"; }'';
+      description = lib.mdDoc ''
+        Extra config options for `systemd-oomd`. See {command}`man oomd.conf`
+        for available options.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-oomd.service"
+      "systemd-oomd.socket"
+    ];
+    systemd.services.systemd-oomd.wantedBy = [ "multi-user.target" ];
+
+    environment.etc."systemd/oomd.conf".text = lib.generators.toINI {} {
+      OOM = cfg.extraConfig;
+    };
+
+    systemd.oomd.extraConfig.DefaultMemoryPressureDurationSec = lib.mkDefault "20s"; # Fedora default
+
+    users.users.systemd-oom = {
+      description = "systemd-oomd service user";
+      group = "systemd-oom";
+      isSystemUser = true;
+    };
+    users.groups.systemd-oom = { };
+
+    systemd.slices."-".sliceConfig = lib.mkIf cfg.enableRootSlice {
+      ManagedOOMSwap = "kill";
+    };
+    systemd.slices."system".sliceConfig = lib.mkIf cfg.enableSystemSlice {
+      ManagedOOMSwap = "kill";
+    };
+    systemd.services."user@".serviceConfig = lib.mkIf cfg.enableUserServices {
+      ManagedOOMMemoryPressure = "kill";
+      ManagedOOMMemoryPressureLimit = "50%";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/repart.nix b/nixpkgs/nixos/modules/system/boot/systemd/repart.nix
new file mode 100644
index 000000000000..5ac2ace56ba0
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/repart.nix
@@ -0,0 +1,148 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  cfg = config.systemd.repart;
+  initrdCfg = config.boot.initrd.systemd.repart;
+
+  format = pkgs.formats.ini { };
+
+  definitionsDirectory = utils.systemdUtils.lib.definitions
+    "repart.d"
+    format
+    (lib.mapAttrs (_n: v: { Partition = v; }) cfg.partitions);
+in
+{
+  options = {
+    boot.initrd.systemd.repart = {
+      enable = lib.mkEnableOption (lib.mdDoc "systemd-repart") // {
+        description = lib.mdDoc ''
+          Grow and add partitions to a partition table at boot time in the initrd.
+          systemd-repart only works with GPT partition tables.
+
+          To run systemd-repart after the initrd, see
+          `options.systemd.repart.enable`.
+        '';
+      };
+
+      device = lib.mkOption {
+        type = with lib.types; nullOr str;
+        description = lib.mdDoc ''
+          The device to operate on.
+
+          If `device == null`, systemd-repart will operate on the device
+          backing the root partition. So in order to dynamically *create* the
+          root partition in the initrd you need to set a device.
+        '';
+        default = null;
+        example = "/dev/vda";
+      };
+    };
+
+    systemd.repart = {
+      enable = lib.mkEnableOption (lib.mdDoc "systemd-repart") // {
+        description = lib.mdDoc ''
+          Grow and add partitions to a partition table.
+          systemd-repart only works with GPT partition tables.
+
+          To run systemd-repart while in the initrd, see
+          `options.boot.initrd.systemd.repart.enable`.
+        '';
+      };
+
+      partitions = lib.mkOption {
+        type = with lib.types; attrsOf (attrsOf (oneOf [ str int bool ]));
+        default = { };
+        example = {
+          "10-root" = {
+            Type = "root";
+          };
+          "20-home" = {
+            Type = "home";
+            SizeMinBytes = "512M";
+            SizeMaxBytes = "2G";
+          };
+        };
+        description = lib.mdDoc ''
+          Specify partitions as a set of the names of the definition files as the
+          key and the partition configuration as its value. The partition
+          configuration can use all upstream options. See <link
+          xlink:href="https://www.freedesktop.org/software/systemd/man/repart.d.html"/>
+          for all available options.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf (cfg.enable || initrdCfg.enable) {
+    assertions = [
+      {
+        assertion = initrdCfg.enable -> config.boot.initrd.systemd.enable;
+        message = ''
+          'boot.initrd.systemd.repart.enable' requires 'boot.initrd.systemd.enable' to be enabled.
+        '';
+      }
+    ];
+
+    boot.initrd.systemd = lib.mkIf initrdCfg.enable {
+      additionalUpstreamUnits = [
+        "systemd-repart.service"
+      ];
+
+      storePaths = [
+        "${config.boot.initrd.systemd.package}/bin/systemd-repart"
+      ];
+
+      contents."/etc/repart.d".source = definitionsDirectory;
+
+      # Override defaults in upstream unit.
+      services.systemd-repart =
+        let
+          deviceUnit = "${utils.escapeSystemdPath initrdCfg.device}.device";
+        in
+        {
+          # systemd-repart tries to create directories in /var/tmp by default to
+          # store large temporary files that benefit from persistence on disk. In
+          # the initrd, however, /var/tmp does not provide more persistence than
+          # /tmp, so we re-use it here.
+          environment."TMPDIR" = "/tmp";
+          serviceConfig = {
+            ExecStart = [
+              " " # required to unset the previous value.
+              # When running in the initrd, systemd-repart by default searches
+              # for definition files in /sysroot or /sysusr. We tell it to look
+              # in the initrd itself.
+              ''${config.boot.initrd.systemd.package}/bin/systemd-repart \
+                  --definitions=/etc/repart.d \
+                  --dry-run=no ${lib.optionalString (initrdCfg.device != null) initrdCfg.device}
+              ''
+            ];
+          };
+          # systemd-repart needs to run after /sysroot (or /sysuser, but we
+          # don't have it) has been mounted because otherwise it cannot
+          # determine the device (i.e disk) to operate on. If you want to run
+          # systemd-repart without /sysroot (i.e. to create the root
+          # partition), you have to explicitly tell it which device to operate
+          # on. The service then needs to be ordered to run after this device
+          # is available.
+          requires = lib.mkIf (initrdCfg.device != null) [ deviceUnit ];
+          after =
+            if initrdCfg.device == null then
+              [ "sysroot.mount" ]
+            else
+              [ deviceUnit ];
+        };
+    };
+
+    environment.etc = lib.mkIf cfg.enable {
+      "repart.d".source = definitionsDirectory;
+    };
+
+    systemd = lib.mkIf cfg.enable {
+      additionalUpstreamSystemUnits = [
+        "systemd-repart.service"
+      ];
+    };
+  };
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/shutdown.nix b/nixpkgs/nixos/modules/system/boot/systemd/shutdown.nix
new file mode 100644
index 000000000000..d7300e940af2
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/shutdown.nix
@@ -0,0 +1,66 @@
+{ config, lib, utils, pkgs, ... }: let
+
+  cfg = config.systemd.shutdownRamfs;
+
+  ramfsContents = let
+    storePaths = map (p: "${p}\n") cfg.storePaths;
+    contents = lib.mapAttrsToList (_: v: "${v.source}\n${v.target}") (lib.filterAttrs (_: v: v.enable) cfg.contents);
+  in pkgs.writeText "shutdown-ramfs-contents" (lib.concatStringsSep "\n" (storePaths ++ contents));
+
+in {
+  options.systemd.shutdownRamfs = {
+    enable = lib.mkEnableOption (lib.mdDoc "pivoting back to an initramfs for shutdown") // { default = true; };
+    contents = lib.mkOption {
+      description = lib.mdDoc "Set of files that have to be linked into the shutdown ramfs";
+      example = lib.literalExpression ''
+        {
+          "/lib/systemd/system-shutdown/zpool-sync-shutdown".source = writeShellScript "zpool" "exec ''${zfs}/bin/zpool sync"
+        }
+      '';
+      type = utils.systemdUtils.types.initrdContents;
+    };
+
+    storePaths = lib.mkOption {
+      description = lib.mdDoc ''
+        Store paths to copy into the shutdown ramfs as well.
+      '';
+      type = lib.types.listOf lib.types.singleLineStr;
+      default = [];
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    systemd.shutdownRamfs.contents = {
+      "/shutdown".source = "${config.systemd.package}/lib/systemd/systemd-shutdown";
+      "/etc/initrd-release".source = config.environment.etc.os-release.source;
+      "/etc/os-release".source = config.environment.etc.os-release.source;
+    };
+    systemd.shutdownRamfs.storePaths = [pkgs.runtimeShell "${pkgs.coreutils}/bin"];
+
+    systemd.mounts = [{
+      what = "tmpfs";
+      where = "/run/initramfs";
+      type = "tmpfs";
+    }];
+
+    systemd.services.generate-shutdown-ramfs = {
+      description = "Generate shutdown ramfs";
+      wantedBy = [ "shutdown.target" ];
+      before = [ "shutdown.target" ];
+      unitConfig = {
+        DefaultDependencies = false;
+        RequiresMountsFor = "/run/initramfs";
+        ConditionFileIsExecutable = [
+          "!/run/initramfs/shutdown"
+        ];
+      };
+
+      serviceConfig = {
+        Type = "oneshot";
+        ProtectSystem = "strict";
+        ReadWritePaths = "/run/initramfs";
+        ExecStart = "${pkgs.makeInitrdNGTool}/bin/make-initrd-ng ${ramfsContents} /run/initramfs";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/sysupdate.nix b/nixpkgs/nixos/modules/system/boot/systemd/sysupdate.nix
new file mode 100644
index 000000000000..b1914a9c4e76
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/sysupdate.nix
@@ -0,0 +1,136 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+  cfg = config.systemd.sysupdate;
+
+  format = pkgs.formats.ini { };
+
+  definitionsDirectory = utils.systemdUtils.lib.definitions
+    "sysupdate.d"
+    format
+    cfg.transfers;
+in
+{
+  options.systemd.sysupdate = {
+
+    enable = lib.mkEnableOption (lib.mdDoc "systemd-sysupdate") // {
+      description = lib.mdDoc ''
+        Atomically update the host OS, container images, portable service
+        images or other sources.
+
+        If enabled, updates are triggered in regular intervals via a
+        `systemd.timer` unit.
+
+        Please see
+        <https://www.freedesktop.org/software/systemd/man/systemd-sysupdate.html>
+        for more details.
+      '';
+    };
+
+    timerConfig = utils.systemdUtils.unitOptions.timerOptions.options.timerConfig // {
+      default = { };
+      description = lib.mdDoc ''
+        The timer configuration for performing the update.
+
+        By default, the upstream configuration is used:
+        <https://github.com/systemd/systemd/blob/main/units/systemd-sysupdate.timer>
+      '';
+    };
+
+    reboot = {
+      enable = lib.mkEnableOption (lib.mdDoc "automatically rebooting after an update") // {
+        description = lib.mdDoc ''
+          Whether to automatically reboot after an update.
+
+          If set to `true`, the system will automatically reboot via a
+          `systemd.timer` unit but only after a new version was installed.
+
+          This uses a unit completely separate from the one performing the
+          update because it is typically advisable to download updates
+          regularly while the system is up, but delay reboots until the
+          appropriate time (i.e. typically at night).
+
+          Set this to `false` if you do not want to reboot after an update. This
+          is useful when you update a container image or another source where
+          rebooting is not necessary in order to finalize the update.
+        '';
+      };
+
+      timerConfig = utils.systemdUtils.unitOptions.timerOptions.options.timerConfig // {
+        default = { };
+        description = lib.mdDoc ''
+          The timer configuration for rebooting after an update.
+
+          By default, the upstream configuration is used:
+          <https://github.com/systemd/systemd/blob/main/units/systemd-sysupdate-reboot.timer>
+        '';
+      };
+    };
+
+    transfers = lib.mkOption {
+      type = with lib.types; attrsOf format.type;
+      default = { };
+      example = {
+        "10-uki.conf" = {
+          Transfer = {
+            ProtectVersion = "%A";
+          };
+
+          Source = {
+            Type = "url-file";
+            Path = "https://download.example.com/";
+            MatchPattern = "nixos_@v.efi.xz";
+          };
+
+          Target = {
+            Type = "regular-file";
+            Path = "/EFI/Linux";
+            PathRelativeTo = "boot";
+            MatchPattern = ''
+              nixos_@v+@l-@d.efi"; \
+              nixos_@v+@l.efi \
+              nixos_@v.efi
+            '';
+            Mode = "0444";
+            TriesLeft = 3;
+            TriesDone = 0;
+            InstancesMax = 2;
+          };
+        };
+      };
+      description = lib.mdDoc ''
+        Specify transfers as a set of the names of the transfer files as the
+        key and the configuration as its value. The configuration can use all
+        upstream options. See
+        <https://www.freedesktop.org/software/systemd/man/sysupdate.d.html>
+        for all available options.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-sysupdate.service"
+      "systemd-sysupdate.timer"
+      "systemd-sysupdate-reboot.service"
+      "systemd-sysupdate-reboot.timer"
+    ];
+
+    systemd.timers = {
+      "systemd-sysupdate" = {
+        wantedBy = [ "timers.target" ];
+        timerConfig = cfg.timerConfig;
+      };
+      "systemd-sysupdate-reboot" = lib.mkIf cfg.reboot.enable {
+        wantedBy = [ "timers.target" ];
+        timerConfig = cfg.reboot.timerConfig;
+      };
+    };
+
+    environment.etc."sysupdate.d".source = definitionsDirectory;
+  };
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/tmpfiles.nix b/nixpkgs/nixos/modules/system/boot/systemd/tmpfiles.nix
new file mode 100644
index 000000000000..f7ef45aab3c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/tmpfiles.nix
@@ -0,0 +1,225 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+  cfg = config.systemd.tmpfiles;
+  systemd = config.systemd.package;
+in
+{
+  options = {
+    systemd.tmpfiles.rules = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = [ "d /tmp 1777 root root 10d" ];
+      description = lib.mdDoc ''
+        Rules for creation, deletion and cleaning of volatile and temporary files
+        automatically. See
+        {manpage}`tmpfiles.d(5)`
+        for the exact format.
+      '';
+    };
+
+    systemd.tmpfiles.settings = mkOption {
+      description = lib.mdDoc ''
+        Declare systemd-tmpfiles rules to create, delete, and clean up volatile
+        and temporary files and directories.
+
+        Even though the service is called `*tmp*files` you can also create
+        persistent files.
+      '';
+      example = {
+        "10-mypackage" = {
+          "/var/lib/my-service/statefolder".d = {
+            mode = "0755";
+            user = "root";
+            group = "root";
+          };
+        };
+      };
+      default = {};
+      type = types.attrsOf (types.attrsOf (types.attrsOf (types.submodule ({ name, config, ... }: {
+        options.type = mkOption {
+          type = types.str;
+          default = name;
+          example = "d";
+          description = lib.mdDoc ''
+            The type of operation to perform on the file.
+
+            The type consists of a single letter and optionally one or more
+            modifier characters.
+
+            Please see the upstream documentation for the available types and
+            more details:
+            <https://www.freedesktop.org/software/systemd/man/tmpfiles.d>
+          '';
+        };
+        options.mode = mkOption {
+          type = types.str;
+          default = "-";
+          example = "0755";
+          description = lib.mdDoc ''
+            The file access mode to use when creating this file or directory.
+          '';
+        };
+        options.user = mkOption {
+          type = types.str;
+          default = "-";
+          example = "root";
+          description = lib.mdDoc ''
+            The user of the file.
+
+            This may either be a numeric ID or a user/group name.
+
+            If omitted or when set to `"-"`, the user and group of the user who
+            invokes systemd-tmpfiles is used.
+          '';
+        };
+        options.group = mkOption {
+          type = types.str;
+          default = "-";
+          example = "root";
+          description = lib.mdDoc ''
+            The group of the file.
+
+            This may either be a numeric ID or a user/group name.
+
+            If omitted or when set to `"-"`, the user and group of the user who
+            invokes systemd-tmpfiles is used.
+          '';
+        };
+        options.age = mkOption {
+          type = types.str;
+          default = "-";
+          example = "10d";
+          description = lib.mdDoc ''
+            Delete a file when it reaches a certain age.
+
+            If a file or directory is older than the current time minus the age
+            field, it is deleted.
+
+            If set to `"-"` no automatic clean-up is done.
+          '';
+        };
+        options.argument = mkOption {
+          type = types.str;
+          default = "";
+          example = "";
+          description = lib.mdDoc ''
+            An argument whose meaning depends on the type of operation.
+
+            Please see the upstream documentation for the meaning of this
+            parameter in different situations:
+            <https://www.freedesktop.org/software/systemd/man/tmpfiles.d>
+          '';
+        };
+      }))));
+    };
+
+    systemd.tmpfiles.packages = mkOption {
+      type = types.listOf types.package;
+      default = [];
+      example = literalExpression "[ pkgs.lvm2 ]";
+      apply = map getLib;
+      description = lib.mdDoc ''
+        List of packages containing {command}`systemd-tmpfiles` rules.
+
+        All files ending in .conf found in
+        {file}`«pkg»/lib/tmpfiles.d`
+        will be included.
+        If this folder does not exist or does not contain any files an error will be returned instead.
+
+        If a {file}`lib` output is available, rules are searched there and only there.
+        If there is no {file}`lib` output it will fall back to {file}`out`
+        and if that does not exist either, the default output will be used.
+      '';
+    };
+  };
+
+  config = {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-tmpfiles-clean.service"
+      "systemd-tmpfiles-clean.timer"
+      "systemd-tmpfiles-setup.service"
+      "systemd-tmpfiles-setup-dev.service"
+    ];
+
+    systemd.additionalUpstreamUserUnits = [
+      "systemd-tmpfiles-clean.service"
+      "systemd-tmpfiles-clean.timer"
+      "systemd-tmpfiles-setup.service"
+    ];
+
+    environment.etc = {
+      "tmpfiles.d".source = (pkgs.symlinkJoin {
+        name = "tmpfiles.d";
+        paths = map (p: p + "/lib/tmpfiles.d") cfg.packages;
+        postBuild = ''
+          for i in $(cat $pathsPath); do
+            (test -d "$i" && test $(ls "$i"/*.conf | wc -l) -ge 1) || (
+              echo "ERROR: The path '$i' from systemd.tmpfiles.packages contains no *.conf files."
+              exit 1
+            )
+          done
+        '' + concatMapStrings (name: optionalString (hasPrefix "tmpfiles.d/" name) ''
+          rm -f $out/${removePrefix "tmpfiles.d/" name}
+        '') config.system.build.etc.passthru.targets;
+      }) + "/*";
+    };
+
+    systemd.tmpfiles.packages = [
+      # Default tmpfiles rules provided by systemd
+      (pkgs.runCommand "systemd-default-tmpfiles" {} ''
+        mkdir -p $out/lib/tmpfiles.d
+        cd $out/lib/tmpfiles.d
+
+        # home.conf creates /srv (which we don't want), and /home, which
+        # is handled by NixOS anyway.
+        # ln -s "${systemd}/example/tmpfiles.d/home.conf"
+        ln -s "${systemd}/example/tmpfiles.d/journal-nocow.conf"
+        ln -s "${systemd}/example/tmpfiles.d/portables.conf"
+        ln -s "${systemd}/example/tmpfiles.d/static-nodes-permissions.conf"
+        ln -s "${systemd}/example/tmpfiles.d/systemd.conf"
+        ln -s "${systemd}/example/tmpfiles.d/systemd-nologin.conf"
+        ln -s "${systemd}/example/tmpfiles.d/systemd-nspawn.conf"
+        ln -s "${systemd}/example/tmpfiles.d/systemd-tmp.conf"
+        ln -s "${systemd}/example/tmpfiles.d/tmp.conf"
+        ln -s "${systemd}/example/tmpfiles.d/var.conf"
+        ln -s "${systemd}/example/tmpfiles.d/x11.conf"
+      '')
+      # User-specified tmpfiles rules
+      (pkgs.writeTextFile {
+        name = "nixos-tmpfiles.d";
+        destination = "/lib/tmpfiles.d/00-nixos.conf";
+        text = ''
+          # This file is created automatically and should not be modified.
+          # Please change the option ‘systemd.tmpfiles.rules’ instead.
+
+          ${concatStringsSep "\n" cfg.rules}
+        '';
+      })
+    ] ++ (mapAttrsToList (name: paths:
+      pkgs.writeTextDir "lib/tmpfiles.d/${name}.conf" (concatStrings (mapAttrsToList (path: types:
+        concatStrings (mapAttrsToList (_type: entry: ''
+          '${entry.type}' '${path}' '${entry.mode}' '${entry.user}' '${entry.group}' '${entry.age}' ${entry.argument}
+        '') types)
+      ) paths ))
+    ) cfg.settings);
+
+    systemd.tmpfiles.rules = [
+      "d  /nix/var                           0755 root root - -"
+      "L+ /nix/var/nix/gcroots/booted-system 0755 root root - /run/booted-system"
+      "d  /run/lock                          0755 root root - -"
+      "d  /var/db                            0755 root root - -"
+      "L  /etc/mtab                          -    -    -    - ../proc/mounts"
+      "L  /var/lock                          -    -    -    - ../run/lock"
+      # Boot-time cleanup
+      "R! /etc/group.lock                    -    -    -    - -"
+      "R! /etc/passwd.lock                   -    -    -    - -"
+      "R! /etc/shadow.lock                   -    -    -    - -"
+      "R! /etc/mtab*                         -    -    -    - -"
+      "R! /nix/var/nix/gcroots/tmp           -    -    -    - -"
+      "R! /nix/var/nix/temproots             -    -    -    - -"
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/user.nix b/nixpkgs/nixos/modules/system/boot/systemd/user.nix
new file mode 100644
index 000000000000..64dc19633eca
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/user.nix
@@ -0,0 +1,238 @@
+{ config, lib, pkgs, utils, ... }:
+with utils;
+with systemdUtils.unitOptions;
+with lib;
+
+let
+  cfg = config.systemd.user;
+
+  systemd = config.systemd.package;
+
+  inherit
+    (systemdUtils.lib)
+    makeUnit
+    generateUnits
+    targetToUnit
+    serviceToUnit
+    sliceToUnit
+    socketToUnit
+    timerToUnit
+    pathToUnit;
+
+  upstreamUserUnits = [
+    "app.slice"
+    "background.slice"
+    "basic.target"
+    "bluetooth.target"
+    "default.target"
+    "exit.target"
+    "graphical-session-pre.target"
+    "graphical-session.target"
+    "paths.target"
+    "printer.target"
+    "session.slice"
+    "shutdown.target"
+    "smartcard.target"
+    "sockets.target"
+    "sound.target"
+    "systemd-exit.service"
+    "timers.target"
+    "xdg-desktop-autostart.target"
+  ] ++ config.systemd.additionalUpstreamUserUnits;
+
+  writeTmpfiles = { rules, user ? null }:
+    let
+      suffix = optionalString (user != null) "-${user}";
+    in
+    pkgs.writeTextFile {
+      name = "nixos-user-tmpfiles.d${suffix}";
+      destination = "/etc/xdg/user-tmpfiles.d/00-nixos${suffix}.conf";
+      text = ''
+        # This file is created automatically and should not be modified.
+        # Please change the options ‘systemd.user.tmpfiles’ instead.
+        ${concatStringsSep "\n" rules}
+      '';
+    };
+in {
+  options = {
+    systemd.user.extraConfig = mkOption {
+      default = "";
+      type = types.lines;
+      example = "DefaultCPUAccounting=yes";
+      description = lib.mdDoc ''
+        Extra config options for systemd user instances. See {manpage}`systemd-user.conf(5)` for
+        available options.
+      '';
+    };
+
+    systemd.user.units = mkOption {
+      description = lib.mdDoc "Definition of systemd per-user units.";
+      default = {};
+      type = systemdUtils.types.units;
+    };
+
+    systemd.user.paths = mkOption {
+      default = {};
+      type = systemdUtils.types.paths;
+      description = lib.mdDoc "Definition of systemd per-user path units.";
+    };
+
+    systemd.user.services = mkOption {
+      default = {};
+      type = systemdUtils.types.services;
+      description = lib.mdDoc "Definition of systemd per-user service units.";
+    };
+
+    systemd.user.slices = mkOption {
+      default = {};
+      type = systemdUtils.types.slices;
+      description = lib.mdDoc "Definition of systemd per-user slice units.";
+    };
+
+    systemd.user.sockets = mkOption {
+      default = {};
+      type = systemdUtils.types.sockets;
+      description = lib.mdDoc "Definition of systemd per-user socket units.";
+    };
+
+    systemd.user.targets = mkOption {
+      default = {};
+      type = systemdUtils.types.targets;
+      description = lib.mdDoc "Definition of systemd per-user target units.";
+    };
+
+    systemd.user.timers = mkOption {
+      default = {};
+      type = systemdUtils.types.timers;
+      description = lib.mdDoc "Definition of systemd per-user timer units.";
+    };
+
+    systemd.user.tmpfiles = {
+      rules = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "D %C - - - 7d" ];
+        description = lib.mdDoc ''
+          Global user rules for creation, deletion and cleaning of volatile and
+          temporary files automatically. See
+          {manpage}`tmpfiles.d(5)`
+          for the exact format.
+        '';
+      };
+
+      users = mkOption {
+        description = mdDoc ''
+          Per-user rules for creation, deletion and cleaning of volatile and
+          temporary files automatically.
+        '';
+        default = {};
+        type = types.attrsOf (types.submodule {
+          options = {
+            rules = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "D %C - - - 7d" ];
+              description = mdDoc ''
+                Per-user rules for creation, deletion and cleaning of volatile and
+                temporary files automatically. See
+                {manpage}`tmpfiles.d(5)`
+                for the exact format.
+              '';
+            };
+          };
+        });
+      };
+    };
+
+    systemd.additionalUpstreamUserUnits = mkOption {
+      default = [];
+      type = types.listOf types.str;
+      example = [];
+      description = lib.mdDoc ''
+        Additional units shipped with systemd that should be enabled for per-user systemd instances.
+      '';
+      internal = true;
+    };
+  };
+
+  config = {
+    systemd.additionalUpstreamSystemUnits = [
+      "user.slice"
+    ];
+
+    environment.etc = {
+      "systemd/user".source = generateUnits {
+        type = "user";
+        inherit (cfg) units;
+        upstreamUnits = upstreamUserUnits;
+        upstreamWants = [];
+      };
+
+      "systemd/user.conf".text = ''
+        [Manager]
+        ${cfg.extraConfig}
+      '';
+    };
+
+    systemd.user.units =
+         mapAttrs' (n: v: nameValuePair "${n}.path"    (pathToUnit    n v)) cfg.paths
+      // mapAttrs' (n: v: nameValuePair "${n}.service" (serviceToUnit n v)) cfg.services
+      // mapAttrs' (n: v: nameValuePair "${n}.slice"   (sliceToUnit   n v)) cfg.slices
+      // mapAttrs' (n: v: nameValuePair "${n}.socket"  (socketToUnit  n v)) cfg.sockets
+      // mapAttrs' (n: v: nameValuePair "${n}.target"  (targetToUnit  n v)) cfg.targets
+      // mapAttrs' (n: v: nameValuePair "${n}.timer"   (timerToUnit   n v)) cfg.timers;
+
+    # Generate timer units for all services that have a ‘startAt’ value.
+    systemd.user.timers =
+      mapAttrs (name: service: {
+        wantedBy = ["timers.target"];
+        timerConfig.OnCalendar = service.startAt;
+      })
+      (filterAttrs (name: service: service.startAt != []) cfg.services);
+
+    # Provide the systemd-user PAM service, required to run systemd
+    # user instances.
+    security.pam.services.systemd-user =
+      { # Ensure that pam_systemd gets included. This is special-cased
+        # in systemd to provide XDG_RUNTIME_DIR.
+        startSession = true;
+        # Disable pam_mount in systemd-user to prevent it from being called
+        # multiple times during login, because it will prevent pam_mount from
+        # unmounting the previously mounted volumes.
+        pamMount = false;
+      };
+
+    # Some overrides to upstream units.
+    systemd.services."user@".restartIfChanged = false;
+    systemd.services.systemd-user-sessions.restartIfChanged = false; # Restart kills all active sessions.
+
+    # enable systemd user tmpfiles
+    systemd.user.services.systemd-tmpfiles-setup.wantedBy =
+      optional
+        (cfg.tmpfiles.rules != [] || any (cfg': cfg'.rules != []) (attrValues cfg.tmpfiles.users))
+        "basic.target";
+
+    # /run/current-system/sw/etc/xdg is in systemd's $XDG_CONFIG_DIRS so we can
+    # write the tmpfiles.d rules for everyone there
+    environment.systemPackages =
+      optional
+        (cfg.tmpfiles.rules != [])
+        (writeTmpfiles { inherit (cfg.tmpfiles) rules; });
+
+    # /etc/profiles/per-user/$USER/etc/xdg is in systemd's $XDG_CONFIG_DIRS so
+    # we can write a single user's tmpfiles.d rules there
+    users.users =
+      mapAttrs
+        (user: cfg': {
+          packages = optional (cfg'.rules != []) (writeTmpfiles {
+            inherit (cfg') rules;
+            inherit user;
+          });
+        })
+        cfg.tmpfiles.users;
+
+    system.userActivationScripts.tmpfiles = ''
+      ${config.systemd.package}/bin/systemd-tmpfiles --user --create --remove
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/systemd/userdbd.nix b/nixpkgs/nixos/modules/system/boot/systemd/userdbd.nix
new file mode 100644
index 000000000000..e7f6d42341c4
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/systemd/userdbd.nix
@@ -0,0 +1,18 @@
+{ config, lib, ... }:
+
+let
+  cfg = config.services.userdbd;
+in
+{
+  options.services.userdbd.enable = lib.mkEnableOption (lib.mdDoc ''
+    the systemd JSON user/group record lookup service
+  '');
+  config = lib.mkIf cfg.enable {
+    systemd.additionalUpstreamSystemUnits = [
+      "systemd-userdbd.socket"
+      "systemd-userdbd.service"
+    ];
+
+    systemd.sockets.systemd-userdbd.wantedBy = [ "sockets.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/timesyncd.nix b/nixpkgs/nixos/modules/system/boot/timesyncd.nix
new file mode 100644
index 000000000000..7487cf97fe53
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/timesyncd.nix
@@ -0,0 +1,85 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+
+  options = {
+
+    services.timesyncd = {
+      enable = mkOption {
+        default = !config.boot.isContainer;
+        defaultText = literalExpression "!config.boot.isContainer";
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enables the systemd NTP client daemon.
+        '';
+      };
+      servers = mkOption {
+        default = config.networking.timeServers;
+        defaultText = literalExpression "config.networking.timeServers";
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          The set of NTP servers from which to synchronise.
+        '';
+      };
+      extraConfig = mkOption {
+        default = "";
+        type = types.lines;
+        example = ''
+          PollIntervalMaxSec=180
+        '';
+        description = lib.mdDoc ''
+          Extra config options for systemd-timesyncd. See
+          [
+          timesyncd.conf(5)](https://www.freedesktop.org/software/systemd/man/timesyncd.conf.html) for available options.
+        '';
+      };
+    };
+  };
+
+  config = mkIf config.services.timesyncd.enable {
+
+    systemd.additionalUpstreamSystemUnits = [ "systemd-timesyncd.service" ];
+
+    systemd.services.systemd-timesyncd = {
+      wantedBy = [ "sysinit.target" ];
+      aliases = [ "dbus-org.freedesktop.timesync1.service" ];
+      restartTriggers = [ config.environment.etc."systemd/timesyncd.conf".source ];
+
+      preStart = (
+        # Ensure that we have some stored time to prevent
+        # systemd-timesyncd to resort back to the fallback time.  If
+        # the file doesn't exist we assume that our current system
+        # clock is good enough to provide an initial value.
+        ''
+          if ! [ -f /var/lib/systemd/timesync/clock ]; then
+            test -d /var/lib/systemd/timesync || mkdir -p /var/lib/systemd/timesync
+            touch /var/lib/systemd/timesync/clock
+          fi
+        '' +
+        # workaround an issue of systemd-timesyncd not starting due to upstream systemd reverting their dynamic users changes
+        #  - https://github.com/NixOS/nixpkgs/pull/61321#issuecomment-492423742
+        #  - https://github.com/systemd/systemd/issues/12131
+        (lib.optionalString (versionOlder config.system.stateVersion "19.09") ''
+          if [ -L /var/lib/systemd/timesync ]; then
+            rm /var/lib/systemd/timesync
+            mv /var/lib/private/systemd/timesync /var/lib/systemd/timesync
+          fi
+        '')
+      );
+    };
+
+    environment.etc."systemd/timesyncd.conf".text = ''
+      [Time]
+      NTP=${concatStringsSep " " config.services.timesyncd.servers}
+      ${config.services.timesyncd.extraConfig}
+    '';
+
+    users.users.systemd-timesync = {
+      uid = config.ids.uids.systemd-timesync;
+      group = "systemd-timesync";
+    };
+    users.groups.systemd-timesync.gid = config.ids.gids.systemd-timesync;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/tmp.nix b/nixpkgs/nixos/modules/system/boot/tmp.nix
new file mode 100644
index 000000000000..fd16cd3fba42
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/tmp.nix
@@ -0,0 +1,69 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.boot.tmp;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "boot" "cleanTmpDir" ] [ "boot" "tmp" "cleanOnBoot" ])
+    (mkRenamedOptionModule [ "boot" "tmpOnTmpfs" ] [ "boot" "tmp" "useTmpfs" ])
+    (mkRenamedOptionModule [ "boot" "tmpOnTmpfsSize" ] [ "boot" "tmp" "tmpfsSize" ])
+  ];
+
+  options = {
+    boot.tmp = {
+      cleanOnBoot = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to delete all files in {file}`/tmp` during boot.
+        '';
+      };
+
+      tmpfsSize = mkOption {
+        type = types.oneOf [ types.str types.types.ints.positive ];
+        default = "50%";
+        description = lib.mdDoc ''
+          Size of tmpfs in percentage.
+          Percentage is defined by systemd.
+        '';
+      };
+
+      useTmpfs = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+           Whether to mount a tmpfs on {file}`/tmp` during boot.
+
+           ::: {.note}
+           Large Nix builds can fail if the mounted tmpfs is not large enough.
+           In such a case either increase the tmpfsSize or disable this option.
+           :::
+        '';
+      };
+    };
+  };
+
+  config = {
+    # When changing remember to update /tmp mount in virtualisation/qemu-vm.nix
+    systemd.mounts = mkIf cfg.useTmpfs [
+      {
+        what = "tmpfs";
+        where = "/tmp";
+        type = "tmpfs";
+        mountConfig.Options = concatStringsSep "," [
+          "mode=1777"
+          "strictatime"
+          "rw"
+          "nosuid"
+          "nodev"
+          "size=${toString cfg.tmpfsSize}"
+        ];
+      }
+    ];
+
+    systemd.tmpfiles.rules = optional cfg.cleanOnBoot "D! /tmp 1777 root root";
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/boot/uvesafb.nix b/nixpkgs/nixos/modules/system/boot/uvesafb.nix
new file mode 100644
index 000000000000..b10dc42887a1
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/boot/uvesafb.nix
@@ -0,0 +1,39 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.boot.uvesafb;
+  inherit (lib) mkIf mkEnableOption mkOption mdDoc types;
+in {
+  options = {
+    boot.uvesafb = {
+      enable = mkEnableOption (mdDoc "uvesafb");
+
+      gfx-mode = mkOption {
+        type = types.str;
+        default = "1024x768-32";
+        description = mdDoc "Screen resolution in modedb format. See [uvesafb](https://docs.kernel.org/fb/uvesafb.html) and [modedb](https://docs.kernel.org/fb/modedb.html) documentation for more details. The default value is a sensible default but may be not ideal for all setups.";
+      };
+
+      v86d.package = mkOption {
+        type = types.package;
+        description = mdDoc "Which v86d package to use with uvesafb";
+        defaultText = ''config.boot.kernelPackages.v86d.overrideAttrs (old: {
+          hardeningDisable = [ "all" ];
+        })'';
+        default = config.boot.kernelPackages.v86d.overrideAttrs (old: {
+          hardeningDisable = [ "all" ];
+        });
+      };
+    };
+  };
+  config = mkIf cfg.enable {
+    boot.initrd = {
+      kernelModules = [ "uvesafb" ];
+      extraFiles."/usr/v86d".source = cfg.v86d.package;
+    };
+
+    boot.kernelParams = [
+      "video=uvesafb:mode:${cfg.gfx-mode},mtrr:3,ywrap"
+      ''uvesafb.v86d="${cfg.v86d.package}/bin/v86d"''
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/build.nix b/nixpkgs/nixos/modules/system/build.nix
new file mode 100644
index 000000000000..41c0258a5a35
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/build.nix
@@ -0,0 +1,21 @@
+{ lib, ... }:
+let
+  inherit (lib) mkOption types;
+in
+{
+  options = {
+
+    system.build = mkOption {
+      default = {};
+      description = lib.mdDoc ''
+        Attribute set of derivations used to set up the system.
+      '';
+      type = types.submoduleWith {
+        modules = [{
+          freeformType = with types; lazyAttrsOf (uniq unspecified);
+        }];
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/etc/etc-activation.nix b/nixpkgs/nixos/modules/system/etc/etc-activation.nix
new file mode 100644
index 000000000000..780104950186
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/etc/etc-activation.nix
@@ -0,0 +1,12 @@
+{ config, lib, ... }:
+let
+  inherit (lib) stringAfter;
+in {
+
+  imports = [ ./etc.nix ];
+
+  config = {
+    system.activationScripts.etc =
+      stringAfter [ "users" "groups" ] config.system.build.etcActivationCommands;
+  };
+}
diff --git a/nixpkgs/nixos/modules/system/etc/etc.nix b/nixpkgs/nixos/modules/system/etc/etc.nix
new file mode 100644
index 000000000000..ea61e7384e60
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/etc/etc.nix
@@ -0,0 +1,201 @@
+# Management of static files in /etc.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  etc' = filter (f: f.enable) (attrValues config.environment.etc);
+
+  etc = pkgs.runCommandLocal "etc" {
+    # This is needed for the systemd module
+    passthru.targets = map (x: x.target) etc';
+  } /* sh */ ''
+    set -euo pipefail
+
+    makeEtcEntry() {
+      src="$1"
+      target="$2"
+      mode="$3"
+      user="$4"
+      group="$5"
+
+      if [[ "$src" = *'*'* ]]; then
+        # If the source name contains '*', perform globbing.
+        mkdir -p "$out/etc/$target"
+        for fn in $src; do
+            ln -s "$fn" "$out/etc/$target/"
+        done
+      else
+
+        mkdir -p "$out/etc/$(dirname "$target")"
+        if ! [ -e "$out/etc/$target" ]; then
+          ln -s "$src" "$out/etc/$target"
+        else
+          echo "duplicate entry $target -> $src"
+          if [ "$(readlink "$out/etc/$target")" != "$src" ]; then
+            echo "mismatched duplicate entry $(readlink "$out/etc/$target") <-> $src"
+            ret=1
+
+            continue
+          fi
+        fi
+
+        if [ "$mode" != symlink ]; then
+          echo "$mode" > "$out/etc/$target.mode"
+          echo "$user" > "$out/etc/$target.uid"
+          echo "$group" > "$out/etc/$target.gid"
+        fi
+      fi
+    }
+
+    mkdir -p "$out/etc"
+    ${concatMapStringsSep "\n" (etcEntry: escapeShellArgs [
+      "makeEtcEntry"
+      # Force local source paths to be added to the store
+      "${etcEntry.source}"
+      etcEntry.target
+      etcEntry.mode
+      etcEntry.user
+      etcEntry.group
+    ]) etc'}
+  '';
+
+in
+
+{
+
+  imports = [ ../build.nix ];
+
+  ###### interface
+
+  options = {
+
+    environment.etc = mkOption {
+      default = {};
+      example = literalExpression ''
+        { example-configuration-file =
+            { source = "/nix/store/.../etc/dir/file.conf.example";
+              mode = "0440";
+            };
+          "default/useradd".text = "GROUP=100 ...";
+        }
+      '';
+      description = lib.mdDoc ''
+        Set of files that have to be linked in {file}`/etc`.
+      '';
+
+      type = with types; attrsOf (submodule (
+        { name, config, options, ... }:
+        { options = {
+
+            enable = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether this /etc file should be generated.  This
+                option allows specific /etc files to be disabled.
+              '';
+            };
+
+            target = mkOption {
+              type = types.str;
+              description = lib.mdDoc ''
+                Name of symlink (relative to
+                {file}`/etc`).  Defaults to the attribute
+                name.
+              '';
+            };
+
+            text = mkOption {
+              default = null;
+              type = types.nullOr types.lines;
+              description = lib.mdDoc "Text of the file.";
+            };
+
+            source = mkOption {
+              type = types.path;
+              description = lib.mdDoc "Path of the source file.";
+            };
+
+            mode = mkOption {
+              type = types.str;
+              default = "symlink";
+              example = "0600";
+              description = lib.mdDoc ''
+                If set to something else than `symlink`,
+                the file is copied instead of symlinked, with the given
+                file mode.
+              '';
+            };
+
+            uid = mkOption {
+              default = 0;
+              type = types.int;
+              description = lib.mdDoc ''
+                UID of created file. Only takes effect when the file is
+                copied (that is, the mode is not 'symlink').
+                '';
+            };
+
+            gid = mkOption {
+              default = 0;
+              type = types.int;
+              description = lib.mdDoc ''
+                GID of created file. Only takes effect when the file is
+                copied (that is, the mode is not 'symlink').
+              '';
+            };
+
+            user = mkOption {
+              default = "+${toString config.uid}";
+              type = types.str;
+              description = lib.mdDoc ''
+                User name of created file.
+                Only takes effect when the file is copied (that is, the mode is not 'symlink').
+                Changing this option takes precedence over `uid`.
+              '';
+            };
+
+            group = mkOption {
+              default = "+${toString config.gid}";
+              type = types.str;
+              description = lib.mdDoc ''
+                Group name of created file.
+                Only takes effect when the file is copied (that is, the mode is not 'symlink').
+                Changing this option takes precedence over `gid`.
+              '';
+            };
+
+          };
+
+          config = {
+            target = mkDefault name;
+            source = mkIf (config.text != null) (
+              let name' = "etc-" + lib.replaceStrings ["/"] ["-"] name;
+              in mkDerivedConfig options.text (pkgs.writeText name')
+            );
+          };
+
+        }));
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    system.build.etc = etc;
+    system.build.etcActivationCommands =
+      ''
+        # Set up the statically computed bits of /etc.
+        echo "setting up /etc..."
+        ${pkgs.perl.withPackages (p: [ p.FileSlurp ])}/bin/perl ${./setup-etc.pl} ${etc}/etc
+      '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/system/etc/setup-etc.pl b/nixpkgs/nixos/modules/system/etc/setup-etc.pl
new file mode 100644
index 000000000000..ea0a38308172
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/etc/setup-etc.pl
@@ -0,0 +1,159 @@
+use strict;
+use File::Find;
+use File::Copy;
+use File::Path;
+use File::Basename;
+use File::Slurp;
+
+my $etc = $ARGV[0] or die;
+my $static = "/etc/static";
+
+sub atomicSymlink {
+    my ($source, $target) = @_;
+    my $tmp = "$target.tmp";
+    unlink $tmp;
+    symlink $source, $tmp or return 0;
+    if (rename $tmp, $target) {
+        return 1;
+    } else {
+        unlink $tmp;
+        return 0;
+    }
+}
+
+
+# Atomically update /etc/static to point at the etc files of the
+# current configuration.
+atomicSymlink $etc, $static or die;
+
+# Returns 1 if the argument points to the files in /etc/static.  That
+# means either argument is a symlink to a file in /etc/static or a
+# directory with all children being static.
+sub isStatic {
+    my $path = shift;
+
+    if (-l $path) {
+        my $target = readlink $path;
+        return substr($target, 0, length "/etc/static/") eq "/etc/static/";
+    }
+
+    if (-d $path) {
+        opendir DIR, "$path" or return 0;
+        my @names = readdir DIR or die;
+        closedir DIR;
+
+        foreach my $name (@names) {
+            next if $name eq "." || $name eq "..";
+            unless (isStatic("$path/$name")) {
+                return 0;
+            }
+        }
+        return 1;
+    }
+
+    return 0;
+}
+
+# Remove dangling symlinks that point to /etc/static.  These are
+# configuration files that existed in a previous configuration but not
+# in the current one.  For efficiency, don't look under /etc/nixos
+# (where all the NixOS sources live).
+sub cleanup {
+    if ($File::Find::name eq "/etc/nixos") {
+        $File::Find::prune = 1;
+        return;
+    }
+    if (-l $_) {
+        my $target = readlink $_;
+        if (substr($target, 0, length $static) eq $static) {
+            my $x = "/etc/static/" . substr($File::Find::name, length "/etc/");
+            unless (-l $x) {
+                print STDERR "removing obsolete symlink ‘$File::Find::name’...\n";
+                unlink "$_";
+            }
+        }
+    }
+}
+
+find(\&cleanup, "/etc");
+
+
+# Use /etc/.clean to keep track of copied files.
+my @oldCopied = read_file("/etc/.clean", chomp => 1, err_mode => 'quiet');
+open CLEAN, ">>/etc/.clean";
+
+
+# For every file in the etc tree, create a corresponding symlink in
+# /etc to /etc/static.  The indirection through /etc/static is to make
+# switching to a new configuration somewhat more atomic.
+my %created;
+my @copied;
+
+sub link {
+    my $fn = substr $File::Find::name, length($etc) + 1 or next;
+
+    # nixos-enter sets up /etc/resolv.conf as a bind mount, so skip it.
+    if ($fn eq "resolv.conf" and $ENV{'IN_NIXOS_ENTER'}) {
+        return;
+    }
+
+    my $target = "/etc/$fn";
+    File::Path::make_path(dirname $target);
+    $created{$fn} = 1;
+
+    # Rename doesn't work if target is directory.
+    if (-l $_ && -d $target) {
+        if (isStatic $target) {
+            rmtree $target or warn;
+        } else {
+            warn "$target directory contains user files. Symlinking may fail.";
+        }
+    }
+
+    if (-e "$_.mode") {
+        my $mode = read_file("$_.mode"); chomp $mode;
+        if ($mode eq "direct-symlink") {
+            atomicSymlink readlink("$static/$fn"), $target or warn "could not create symlink $target";
+        } else {
+            my $uid = read_file("$_.uid"); chomp $uid;
+            my $gid = read_file("$_.gid"); chomp $gid;
+            copy "$static/$fn", "$target.tmp" or warn;
+            $uid = getpwnam $uid unless $uid =~ /^\+/;
+            $gid = getgrnam $gid unless $gid =~ /^\+/;
+            chown int($uid), int($gid), "$target.tmp" or warn;
+            chmod oct($mode), "$target.tmp" or warn;
+            unless (rename "$target.tmp", $target) {
+                warn "could not create target $target";
+                unlink "$target.tmp";
+            }
+        }
+        push @copied, $fn;
+        print CLEAN "$fn\n";
+    } elsif (-l "$_") {
+        atomicSymlink "$static/$fn", $target or warn "could not create symlink $target";
+    }
+}
+
+find(\&link, $etc);
+
+
+# Delete files that were copied in a previous version but not in the
+# current.
+foreach my $fn (@oldCopied) {
+    if (!defined $created{$fn}) {
+        $fn = "/etc/$fn";
+        print STDERR "removing obsolete file ‘$fn’...\n";
+        unlink "$fn";
+    }
+}
+
+
+# Rewrite /etc/.clean.
+close CLEAN;
+write_file("/etc/.clean", map { "$_\n" } sort @copied);
+
+# Create /etc/NIXOS tag if not exists.
+# When /etc is not on a persistent filesystem, it will be wiped after reboot,
+# so we need to check and re-create it during activation.
+open TAG, ">>/etc/NIXOS";
+close TAG;
diff --git a/nixpkgs/nixos/modules/system/etc/test.nix b/nixpkgs/nixos/modules/system/etc/test.nix
new file mode 100644
index 000000000000..5e43b155038d
--- /dev/null
+++ b/nixpkgs/nixos/modules/system/etc/test.nix
@@ -0,0 +1,70 @@
+{ lib
+, coreutils
+, fakechroot
+, fakeroot
+, evalMinimalConfig
+, pkgsModule
+, runCommand
+, util-linux
+, vmTools
+, writeText
+}:
+let
+  node = evalMinimalConfig ({ config, ... }: {
+    imports = [ pkgsModule ../etc/etc.nix ];
+    environment.etc."passwd" = {
+      text = passwdText;
+    };
+    environment.etc."hosts" = {
+      text = hostsText;
+      mode = "0751";
+    };
+  });
+  passwdText = ''
+    root:x:0:0:System administrator:/root:/run/current-system/sw/bin/bash
+  '';
+  hostsText = ''
+    127.0.0.1 localhost
+    ::1 localhost
+    # testing...
+  '';
+in
+lib.recurseIntoAttrs {
+  test-etc-vm =
+    vmTools.runInLinuxVM (runCommand "test-etc-vm" { } ''
+      mkdir -p /etc
+      ${node.config.system.build.etcActivationCommands}
+      set -x
+      [[ -L /etc/passwd ]]
+      diff /etc/passwd ${writeText "expected-passwd" passwdText}
+      [[ 751 = $(stat --format %a /etc/hosts) ]]
+      diff /etc/hosts ${writeText "expected-hosts" hostsText}
+      set +x
+      touch $out
+    '');
+
+  # fakeroot is behaving weird
+  test-etc-fakeroot =
+    runCommand "test-etc"
+      {
+        nativeBuildInputs = [
+          fakeroot
+          fakechroot
+          # for chroot
+          coreutils
+          # fakechroot needs getopt, which is provided by util-linux
+          util-linux
+        ];
+        fakeRootCommands = ''
+          mkdir -p /etc
+          ${node.config.system.build.etcActivationCommands}
+          diff /etc/hosts ${writeText "expected-hosts" hostsText}
+          touch $out
+        '';
+      } ''
+      mkdir fake-root
+      export FAKECHROOT_EXCLUDE_PATH=/dev:/proc:/sys:${builtins.storeDir}:$out
+      fakechroot fakeroot chroot $PWD/fake-root bash -c 'source $stdenv/setup; eval "$fakeRootCommands"'
+    '';
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/auto-upgrade.nix b/nixpkgs/nixos/modules/tasks/auto-upgrade.nix
new file mode 100644
index 000000000000..29e3e313336f
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/auto-upgrade.nix
@@ -0,0 +1,262 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.system.autoUpgrade;
+
+in {
+
+  options = {
+
+    system.autoUpgrade = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to periodically upgrade NixOS to the latest
+          version. If enabled, a systemd timer will run
+          `nixos-rebuild switch --upgrade` once a
+          day.
+        '';
+      };
+
+      operation = mkOption {
+        type = types.enum ["switch" "boot"];
+        default = "switch";
+        example = "boot";
+        description = lib.mdDoc ''
+          Whether to run
+          `nixos-rebuild switch --upgrade` or run
+          `nixos-rebuild boot --upgrade`
+        '';
+      };
+
+      flake = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "github:kloenk/nix";
+        description = lib.mdDoc ''
+          The Flake URI of the NixOS configuration to build.
+          Disables the option {option}`system.autoUpgrade.channel`.
+        '';
+      };
+
+      channel = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "https://nixos.org/channels/nixos-14.12-small";
+        description = lib.mdDoc ''
+          The URI of the NixOS channel to use for automatic
+          upgrades. By default, this is the channel set using
+          {command}`nix-channel` (run `nix-channel --list`
+          to see the current value).
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [ ];
+        example = [
+          "-I"
+          "stuff=/home/alice/nixos-stuff"
+          "--option"
+          "extra-binary-caches"
+          "http://my-cache.example.org/"
+        ];
+        description = lib.mdDoc ''
+          Any additional flags passed to {command}`nixos-rebuild`.
+
+          If you are using flakes and use a local repo you can add
+          {command}`[ "--update-input" "nixpkgs" "--commit-lock-file" ]`
+          to update nixpkgs.
+        '';
+      };
+
+      dates = mkOption {
+        type = types.str;
+        default = "04:40";
+        example = "daily";
+        description = lib.mdDoc ''
+          How often or when upgrade occurs. For most desktop and server systems
+          a sufficient upgrade frequency is once a day.
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      allowReboot = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Reboot the system into the new generation instead of a switch
+          if the new generation uses a different kernel, kernel modules
+          or initrd than the booted system.
+          See {option}`rebootWindow` for configuring the times at which a reboot is allowed.
+        '';
+      };
+
+      randomizedDelaySec = mkOption {
+        default = "0";
+        type = types.str;
+        example = "45min";
+        description = lib.mdDoc ''
+          Add a randomized delay before each automatic upgrade.
+          The delay will be chosen between zero and this value.
+          This value must be a time span in the format specified by
+          {manpage}`systemd.time(7)`
+        '';
+      };
+
+      rebootWindow = mkOption {
+        description = lib.mdDoc ''
+          Define a lower and upper time value (in HH:MM format) which
+          constitute a time window during which reboots are allowed after an upgrade.
+          This option only has an effect when {option}`allowReboot` is enabled.
+          The default value of `null` means that reboots are allowed at any time.
+        '';
+        default = null;
+        example = { lower = "01:00"; upper = "05:00"; };
+        type = with types; nullOr (submodule {
+          options = {
+            lower = mkOption {
+              description = lib.mdDoc "Lower limit of the reboot window";
+              type = types.strMatching "[[:digit:]]{2}:[[:digit:]]{2}";
+              example = "01:00";
+            };
+
+            upper = mkOption {
+              description = lib.mdDoc "Upper limit of the reboot window";
+              type = types.strMatching "[[:digit:]]{2}:[[:digit:]]{2}";
+              example = "05:00";
+            };
+          };
+        });
+      };
+
+      persistent = mkOption {
+        default = true;
+        type = types.bool;
+        example = false;
+        description = lib.mdDoc ''
+          Takes a boolean argument. If true, the time when the service
+          unit was last triggered is stored on disk. When the timer is
+          activated, the service unit is triggered immediately if it
+          would have been triggered at least once during the time when
+          the timer was inactive. Such triggering is nonetheless
+          subject to the delay imposed by RandomizedDelaySec=. This is
+          useful to catch up on missed runs of the service when the
+          system was powered down.
+        '';
+      };
+
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    assertions = [{
+      assertion = !((cfg.channel != null) && (cfg.flake != null));
+      message = ''
+        The options 'system.autoUpgrade.channels' and 'system.autoUpgrade.flake' cannot both be set.
+      '';
+    }];
+
+    system.autoUpgrade.flags = (if cfg.flake == null then
+        [ "--no-build-output" ] ++ optionals (cfg.channel != null) [
+          "-I"
+          "nixpkgs=${cfg.channel}/nixexprs.tar.xz"
+        ]
+      else
+        [ "--flake ${cfg.flake}" ]);
+
+    systemd.services.nixos-upgrade = {
+      description = "NixOS Upgrade";
+
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig.Type = "oneshot";
+
+      environment = config.nix.envVars // {
+        inherit (config.environment.sessionVariables) NIX_PATH;
+        HOME = "/root";
+      } // config.networking.proxy.envVars;
+
+      path = with pkgs; [
+        coreutils
+        gnutar
+        xz.bin
+        gzip
+        gitMinimal
+        config.nix.package.out
+        config.programs.ssh.package
+      ];
+
+      script = let
+        nixos-rebuild = "${config.system.build.nixos-rebuild}/bin/nixos-rebuild";
+        date     = "${pkgs.coreutils}/bin/date";
+        readlink = "${pkgs.coreutils}/bin/readlink";
+        shutdown = "${config.systemd.package}/bin/shutdown";
+        upgradeFlag = optional (cfg.channel == null) "--upgrade";
+      in if cfg.allowReboot then ''
+        ${nixos-rebuild} boot ${toString (cfg.flags ++ upgradeFlag)}
+        booted="$(${readlink} /run/booted-system/{initrd,kernel,kernel-modules})"
+        built="$(${readlink} /nix/var/nix/profiles/system/{initrd,kernel,kernel-modules})"
+
+        ${optionalString (cfg.rebootWindow != null) ''
+          current_time="$(${date} +%H:%M)"
+
+          lower="${cfg.rebootWindow.lower}"
+          upper="${cfg.rebootWindow.upper}"
+
+          if [[ "''${lower}" < "''${upper}" ]]; then
+            if [[ "''${current_time}" > "''${lower}" ]] && \
+               [[ "''${current_time}" < "''${upper}" ]]; then
+              do_reboot="true"
+            else
+              do_reboot="false"
+            fi
+          else
+            # lower > upper, so we are crossing midnight (e.g. lower=23h, upper=6h)
+            # we want to reboot if cur > 23h or cur < 6h
+            if [[ "''${current_time}" < "''${upper}" ]] || \
+               [[ "''${current_time}" > "''${lower}" ]]; then
+              do_reboot="true"
+            else
+              do_reboot="false"
+            fi
+          fi
+        ''}
+
+        if [ "''${booted}" = "''${built}" ]; then
+          ${nixos-rebuild} ${cfg.operation} ${toString cfg.flags}
+        ${optionalString (cfg.rebootWindow != null) ''
+          elif [ "''${do_reboot}" != true ]; then
+            echo "Outside of configured reboot window, skipping."
+        ''}
+        else
+          ${shutdown} -r +1
+        fi
+      '' else ''
+        ${nixos-rebuild} ${cfg.operation} ${toString (cfg.flags ++ upgradeFlag)}
+      '';
+
+      startAt = cfg.dates;
+
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+    };
+
+    systemd.timers.nixos-upgrade = {
+      timerConfig = {
+        RandomizedDelaySec = cfg.randomizedDelaySec;
+        Persistent = cfg.persistent;
+      };
+    };
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/tasks/bcache.nix b/nixpkgs/nixos/modules/tasks/bcache.nix
new file mode 100644
index 000000000000..68531a4d2fed
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/bcache.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }: let
+  cfg = config.boot.bcache;
+in {
+  options.boot.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache mount support") // {
+    default = true;
+    example = false;
+  };
+  options.boot.initrd.services.bcache.enable = lib.mkEnableOption (lib.mdDoc "bcache support in the initrd") // {
+    description = lib.mdDoc ''
+      *This will only be used when systemd is used in stage 1.*
+
+      Whether to enable bcache support in the initrd.
+    '';
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    environment.systemPackages = [ pkgs.bcache-tools ];
+
+    services.udev.packages = [ pkgs.bcache-tools ];
+
+    boot.initrd.extraUdevRulesCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+      cp -v ${pkgs.bcache-tools}/lib/udev/rules.d/*.rules $out/
+    '';
+
+    boot.initrd.services.udev = lib.mkIf config.boot.initrd.services.bcache.enable {
+      packages = [ pkgs.bcache-tools ];
+      binPackages = [ pkgs.bcache-tools ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/cpu-freq.nix b/nixpkgs/nixos/modules/tasks/cpu-freq.nix
new file mode 100644
index 000000000000..6869ef8b7915
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/cpu-freq.nix
@@ -0,0 +1,90 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cpupower = config.boot.kernelPackages.cpupower;
+  cfg = config.powerManagement;
+in
+
+{
+  ###### interface
+
+  options.powerManagement = {
+
+    # TODO: This should be aliased to powerManagement.cpufreq.governor.
+    # https://github.com/NixOS/nixpkgs/pull/53041#commitcomment-31825338
+    cpuFreqGovernor = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "ondemand";
+      description = lib.mdDoc ''
+        Configure the governor used to regulate the frequency of the
+        available CPUs. By default, the kernel configures the
+        performance governor, although this may be overwritten in your
+        hardware-configuration.nix file.
+
+        Often used values: "ondemand", "powersave", "performance"
+      '';
+    };
+
+    cpufreq = {
+
+      max = mkOption {
+        type = types.nullOr types.ints.unsigned;
+        default = null;
+        example = 2200000;
+        description = lib.mdDoc ''
+          The maximum frequency the CPU will use.  Defaults to the maximum possible.
+        '';
+      };
+
+      min = mkOption {
+        type = types.nullOr types.ints.unsigned;
+        default = null;
+        example = 800000;
+        description = lib.mdDoc ''
+          The minimum frequency the CPU will use.
+        '';
+      };
+    };
+
+  };
+
+
+  ###### implementation
+
+  config =
+    let
+      governorEnable = cfg.cpuFreqGovernor != null;
+      maxEnable = cfg.cpufreq.max != null;
+      minEnable = cfg.cpufreq.min != null;
+      enable =
+        !config.boot.isContainer &&
+        (governorEnable || maxEnable || minEnable);
+    in
+    mkIf enable {
+
+      boot.kernelModules = optional governorEnable "cpufreq_${cfg.cpuFreqGovernor}";
+
+      environment.systemPackages = [ cpupower ];
+
+      systemd.services.cpufreq = {
+        description = "CPU Frequency Setup";
+        after = [ "systemd-modules-load.service" ];
+        wantedBy = [ "multi-user.target" ];
+        path = [ cpupower pkgs.kmod ];
+        unitConfig.ConditionVirtualization = false;
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = "yes";
+          ExecStart = "${cpupower}/bin/cpupower frequency-set " +
+            optionalString governorEnable "--governor ${cfg.cpuFreqGovernor} " +
+            optionalString maxEnable "--max ${toString cfg.cpufreq.max} " +
+            optionalString minEnable "--min ${toString cfg.cpufreq.min} ";
+          SuccessExitStatus = "0 237";
+        };
+      };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/encrypted-devices.nix b/nixpkgs/nixos/modules/tasks/encrypted-devices.nix
new file mode 100644
index 000000000000..da9c83ba339c
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/encrypted-devices.nix
@@ -0,0 +1,120 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+  fileSystems = config.system.build.fileSystems ++ config.swapDevices;
+  encDevs = filter (dev: dev.encrypted.enable) fileSystems;
+
+  # With scripted initrd, devices with a keyFile have to be opened
+  # late, after file systems are mounted, because that could be where
+  # the keyFile is located. With systemd initrd, each individual
+  # systemd-cryptsetup@ unit has RequiresMountsFor= to delay until all
+  # the mount units for the key file are done; i.e. no special
+  # treatment is needed.
+  lateEncDevs =
+    if config.boot.initrd.systemd.enable
+    then { }
+    else filter (dev: dev.encrypted.keyFile != null) encDevs;
+  earlyEncDevs =
+    if config.boot.initrd.systemd.enable
+    then encDevs
+    else filter (dev: dev.encrypted.keyFile == null) encDevs;
+
+  anyEncrypted =
+    foldr (j: v: v || j.encrypted.enable) false encDevs;
+
+  encryptedFSOptions = {
+
+    options.encrypted = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "The block device is backed by an encrypted one, adds this device as a initrd luks entry.";
+      };
+
+      blkDev = mkOption {
+        default = null;
+        example = "/dev/sda1";
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Location of the backing encrypted device.";
+      };
+
+      label = mkOption {
+        default = null;
+        example = "rootfs";
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Label of the unlocked encrypted device. Set `fileSystems.<name?>.device` to `/dev/mapper/<label>` to mount the unlocked device.";
+      };
+
+      keyFile = mkOption {
+        default = null;
+        example = "/mnt-root/root/.swapkey";
+        type = types.nullOr types.str;
+        description = lib.mdDoc ''
+          Path to a keyfile used to unlock the backing encrypted
+          device. When systemd stage 1 is not enabled, at the time
+          this keyfile is accessed, the `neededForBoot` filesystems
+          (see `utils.fsNeededForBoot`) will have been mounted under
+          `/mnt-root`, so the keyfile path should usually start with
+          "/mnt-root/". When systemd stage 1 is enabled,
+          `fsNeededForBoot` file systems will be mounted as needed
+          under `/sysroot`, and the keyfile will not be accessed until
+          its requisite mounts are done.
+        '';
+      };
+    };
+  };
+in
+
+{
+
+  options = {
+    fileSystems = mkOption {
+      type = with lib.types; attrsOf (submodule encryptedFSOptions);
+    };
+    swapDevices = mkOption {
+      type = with lib.types; listOf (submodule encryptedFSOptions);
+    };
+  };
+
+  config = mkIf anyEncrypted {
+    assertions = concatMap (dev: [
+      {
+        assertion = dev.encrypted.label != null;
+        message = ''
+          The filesystem for ${dev.mountPoint} has encrypted.enable set to true, but no encrypted.label set
+        '';
+      }
+      {
+        assertion =
+          config.boot.initrd.systemd.enable -> (
+            dev.encrypted.keyFile == null
+            || !lib.any (x: lib.hasPrefix x dev.encrypted.keyFile) ["/mnt-root" "$targetRoot"]
+          );
+        message = ''
+          Bad use of '/mnt-root' or '$targetRoot` in 'keyFile'.
+
+            When 'boot.initrd.systemd.enable' is enabled, file systems
+            are mounted at '/sysroot' instead of '/mnt-root'.
+        '';
+      }
+    ]) encDevs;
+
+    boot.initrd = {
+      luks = {
+        devices =
+          builtins.listToAttrs (map (dev: {
+            name = dev.encrypted.label;
+            value = { device = dev.encrypted.blkDev; inherit (dev.encrypted) keyFile; };
+          }) earlyEncDevs);
+        forceLuksSupportInInitrd = true;
+      };
+      # TODO: systemd stage 1
+      postMountCommands = lib.mkIf (!config.boot.initrd.systemd.enable)
+        (concatMapStrings (dev:
+          "cryptsetup luksOpen --key-file ${dev.encrypted.keyFile} ${dev.encrypted.blkDev} ${dev.encrypted.label};\n"
+        ) lateEncDevs);
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems.nix b/nixpkgs/nixos/modules/tasks/filesystems.nix
new file mode 100644
index 000000000000..91e30aa4c0af
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems.nix
@@ -0,0 +1,438 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+with utils;
+
+let
+
+  addCheckDesc = desc: elemType: check: types.addCheck elemType check
+    // { description = "${elemType.description} (with check: ${desc})"; };
+
+  isNonEmpty = s: (builtins.match "[ \t\n]*" s) == null;
+  nonEmptyStr = addCheckDesc "non-empty" types.str isNonEmpty;
+
+  fileSystems' = toposort fsBefore (attrValues config.fileSystems);
+
+  fileSystems = if fileSystems' ? result
+                then # use topologically sorted fileSystems everywhere
+                     fileSystems'.result
+                else # the assertion below will catch this,
+                     # but we fall back to the original order
+                     # anyway so that other modules could check
+                     # their assertions too
+                     (attrValues config.fileSystems);
+
+  specialFSTypes = [ "proc" "sysfs" "tmpfs" "ramfs" "devtmpfs" "devpts" ];
+
+  nonEmptyWithoutTrailingSlash = addCheckDesc "non-empty without trailing slash" types.str
+    (s: isNonEmpty s && (builtins.match ".+/" s) == null);
+
+  coreFileSystemOpts = { name, config, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = nonEmptyWithoutTrailingSlash;
+        description = lib.mdDoc "Location of the mounted file system.";
+      };
+
+      stratis.poolUuid = lib.mkOption {
+        type = types.uniq (types.nullOr types.str);
+        description = lib.mdDoc ''
+          UUID of the stratis pool that the fs is located in
+        '';
+        example = "04c68063-90a5-4235-b9dd-6180098a20d9";
+        default = null;
+      };
+
+      device = mkOption {
+        default = null;
+        example = "/dev/sda";
+        type = types.nullOr nonEmptyStr;
+        description = lib.mdDoc "Location of the device.";
+      };
+
+      fsType = mkOption {
+        default = "auto";
+        example = "ext3";
+        type = nonEmptyStr;
+        description = lib.mdDoc "Type of the file system.";
+      };
+
+      options = mkOption {
+        default = [ "defaults" ];
+        example = [ "data=journal" ];
+        description = lib.mdDoc "Options used to mount the file system.";
+        type = types.nonEmptyListOf nonEmptyStr;
+      };
+
+      depends = mkOption {
+        default = [ ];
+        example = [ "/persist" ];
+        type = types.listOf nonEmptyWithoutTrailingSlash;
+        description = lib.mdDoc ''
+          List of paths that should be mounted before this one. This filesystem's
+          {option}`device` and {option}`mountPoint` are always
+          checked and do not need to be included explicitly. If a path is added
+          to this list, any other filesystem whose mount point is a parent of
+          the path will be mounted before this filesystem. The paths do not need
+          to actually be the {option}`mountPoint` of some other filesystem.
+        '';
+      };
+
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+      device = mkIf (elem config.fsType specialFSTypes) (mkDefault config.fsType);
+    };
+
+  };
+
+  fileSystemOpts = { config, ... }: {
+
+    options = {
+
+      label = mkOption {
+        default = null;
+        example = "root-partition";
+        type = types.nullOr nonEmptyStr;
+        description = lib.mdDoc "Label of the device (if any).";
+      };
+
+      autoFormat = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If the device does not currently contain a filesystem (as
+          determined by {command}`blkid`, then automatically
+          format it with the filesystem type specified in
+          {option}`fsType`.  Use with caution.
+        '';
+      };
+
+      formatOptions = mkOption {
+        visible = false;
+        type = types.unspecified;
+        default = null;
+      };
+
+      autoResize = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          If set, the filesystem is grown to its maximum size before
+          being mounted. (This is typically the size of the containing
+          partition.) This is currently only supported for ext2/3/4
+          filesystems that are mounted during early boot.
+        '';
+      };
+
+      noCheck = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc "Disable running fsck on this filesystem.";
+      };
+
+    };
+
+    config.options = mkMerge [
+      (mkIf config.autoResize [ "x-systemd.growfs" ])
+      (mkIf config.autoFormat [ "x-systemd.makefs" ])
+      (mkIf (utils.fsNeededForBoot config) [ "x-initrd.mount" ])
+    ];
+
+  };
+
+  # Makes sequence of `specialMount device mountPoint options fsType` commands.
+  # `systemMount` should be defined in the sourcing script.
+  makeSpecialMounts = mounts:
+    pkgs.writeText "mounts.sh" (concatMapStringsSep "\n" (mount: ''
+      specialMount "${mount.device}" "${mount.mountPoint}" "${concatStringsSep "," mount.options}" "${mount.fsType}"
+    '') mounts);
+
+  makeFstabEntries =
+    let
+      fsToSkipCheck = [
+        "none"
+        "auto"
+        "overlay"
+        "iso9660"
+        "bindfs"
+        "udf"
+        "btrfs"
+        "zfs"
+        "tmpfs"
+        "bcachefs"
+        "nfs"
+        "nfs4"
+        "nilfs2"
+        "vboxsf"
+        "squashfs"
+        "glusterfs"
+        "apfs"
+        "9p"
+        "cifs"
+        "prl_fs"
+        "vmhgfs"
+      ] ++ lib.optionals (!config.boot.initrd.checkJournalingFS) [
+        "ext3"
+        "ext4"
+        "reiserfs"
+        "xfs"
+        "jfs"
+        "f2fs"
+      ];
+      isBindMount = fs: builtins.elem "bind" fs.options;
+      skipCheck = fs: fs.noCheck || fs.device == "none" || builtins.elem fs.fsType fsToSkipCheck || isBindMount fs;
+      # https://wiki.archlinux.org/index.php/fstab#Filepath_spaces
+      escape = string: builtins.replaceStrings [ " " "\t" ] [ "\\040" "\\011" ] string;
+    in fstabFileSystems: { }: concatMapStrings (fs:
+      (if fs.device != null then escape fs.device
+         else if fs.label != null then "/dev/disk/by-label/${escape fs.label}"
+         else throw "No device specified for mount point ‘${fs.mountPoint}’.")
+      + " " + escape fs.mountPoint
+      + " " + fs.fsType
+      + " " + escape (builtins.concatStringsSep "," fs.options)
+      + " 0 " + (if skipCheck fs then "0" else if fs.mountPoint == "/" then "1" else "2")
+      + "\n"
+    ) fstabFileSystems;
+
+    initrdFstab = pkgs.writeText "initrd-fstab" (makeFstabEntries (filter utils.fsNeededForBoot fileSystems) { });
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    fileSystems = mkOption {
+      default = {};
+      example = literalExpression ''
+        {
+          "/".device = "/dev/hda1";
+          "/data" = {
+            device = "/dev/hda2";
+            fsType = "ext3";
+            options = [ "data=journal" ];
+          };
+          "/bigdisk".label = "bigdisk";
+        }
+      '';
+      type = types.attrsOf (types.submodule [coreFileSystemOpts fileSystemOpts]);
+      description = lib.mdDoc ''
+        The file systems to be mounted.  It must include an entry for
+        the root directory (`mountPoint = "/"`).  Each
+        entry in the list is an attribute set with the following fields:
+        `mountPoint`, `device`,
+        `fsType` (a file system type recognised by
+        {command}`mount`; defaults to
+        `"auto"`), and `options`
+        (the mount options passed to {command}`mount` using the
+        {option}`-o` flag; defaults to `[ "defaults" ]`).
+
+        Instead of specifying `device`, you can also
+        specify a volume label (`label`) for file
+        systems that support it, such as ext2/ext3 (see {command}`mke2fs -L`).
+      '';
+    };
+
+    system.fsPackages = mkOption {
+      internal = true;
+      default = [ ];
+      description = lib.mdDoc "Packages supplying file system mounters and checkers.";
+    };
+
+    boot.supportedFilesystems = mkOption {
+      default = [ ];
+      example = [ "btrfs" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc "Names of supported filesystem types.";
+    };
+
+    boot.specialFileSystems = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule coreFileSystemOpts);
+      internal = true;
+      description = lib.mdDoc ''
+        Special filesystems that are mounted very early during boot.
+      '';
+    };
+
+    boot.devSize = mkOption {
+      default = "5%";
+      example = "32m";
+      type = types.str;
+      description = lib.mdDoc ''
+        Size limit for the /dev tmpfs. Look at mount(8), tmpfs size option,
+        for the accepted syntax.
+      '';
+    };
+
+    boot.devShmSize = mkOption {
+      default = "50%";
+      example = "256m";
+      type = types.str;
+      description = lib.mdDoc ''
+        Size limit for the /dev/shm tmpfs. Look at mount(8), tmpfs size option,
+        for the accepted syntax.
+      '';
+    };
+
+    boot.runSize = mkOption {
+      default = "25%";
+      example = "256m";
+      type = types.str;
+      description = lib.mdDoc ''
+        Size limit for the /run tmpfs. Look at mount(8), tmpfs size option,
+        for the accepted syntax.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    assertions = let
+      ls = sep: concatMapStringsSep sep (x: x.mountPoint);
+      resizableFSes = [
+        "ext3"
+        "ext4"
+        "btrfs"
+        "xfs"
+      ];
+      notAutoResizable = fs: fs.autoResize && !(builtins.elem fs.fsType resizableFSes);
+    in [
+      { assertion = ! (fileSystems' ? cycle);
+        message = "The ‘fileSystems’ option can't be topologically sorted: mountpoint dependency path ${ls " -> " fileSystems'.cycle} loops to ${ls ", " fileSystems'.loops}";
+      }
+      { assertion = ! (any notAutoResizable fileSystems);
+        message = let
+          fs = head (filter notAutoResizable fileSystems);
+        in ''
+          Mountpoint '${fs.mountPoint}': 'autoResize = true' is not supported for 'fsType = "${fs.fsType}"'
+          ${optionalString (fs.fsType == "auto") "fsType has to be explicitly set and"}
+          only the following support it: ${lib.concatStringsSep ", " resizableFSes}.
+        '';
+      }
+      {
+        assertion = ! (any (fs: fs.formatOptions != null) fileSystems);
+        message = let
+          fs = head (filter (fs: fs.formatOptions != null) fileSystems);
+        in ''
+          'fileSystems.<name>.formatOptions' has been removed, since
+          systemd-makefs does not support any way to provide formatting
+          options.
+        '';
+      }
+    ];
+
+    # Export for use in other modules
+    system.build.fileSystems = fileSystems;
+    system.build.earlyMountScript = makeSpecialMounts (toposort fsBefore (attrValues config.boot.specialFileSystems)).result;
+
+    boot.supportedFilesystems = map (fs: fs.fsType) fileSystems;
+
+    # Add the mount helpers to the system path so that `mount' can find them.
+    system.fsPackages = [ pkgs.dosfstools ];
+
+    environment.systemPackages = with pkgs; [ fuse3 fuse ] ++ config.system.fsPackages;
+
+    environment.etc.fstab.text =
+      let
+        swapOptions = sw: concatStringsSep "," (
+          sw.options
+          ++ optional (sw.priority != null) "pri=${toString sw.priority}"
+          ++ optional (sw.discardPolicy != null) "discard${optionalString (sw.discardPolicy != "both") "=${toString sw.discardPolicy}"}"
+        );
+      in ''
+        # This is a generated file.  Do not edit!
+        #
+        # To make changes, edit the fileSystems and swapDevices NixOS options
+        # in your /etc/nixos/configuration.nix file.
+        #
+        # <file system> <mount point>   <type>  <options>       <dump>  <pass>
+
+        # Filesystems.
+        ${makeFstabEntries fileSystems {}}
+
+        # Swap devices.
+        ${flip concatMapStrings config.swapDevices (sw:
+            "${sw.realDevice} none swap ${swapOptions sw}\n"
+        )}
+      '';
+
+    boot.initrd.systemd.storePaths = [initrdFstab];
+    boot.initrd.systemd.managerEnvironment.SYSTEMD_SYSROOT_FSTAB = initrdFstab;
+    boot.initrd.systemd.services.initrd-parse-etc.environment.SYSTEMD_SYSROOT_FSTAB = initrdFstab;
+
+    # Provide a target that pulls in all filesystems.
+    systemd.targets.fs =
+      { description = "All File Systems";
+        wants = [ "local-fs.target" "remote-fs.target" ];
+      };
+
+    systemd.services = {
+    # Mount /sys/fs/pstore for evacuating panic logs and crashdumps from persistent storage onto the disk using systemd-pstore.
+    # This cannot be done with the other special filesystems because the pstore module (which creates the mount point) is not loaded then.
+        "mount-pstore" = {
+          serviceConfig = {
+            Type = "oneshot";
+            # skip on kernels without the pstore module
+            ExecCondition = "${pkgs.kmod}/bin/modprobe -b pstore";
+            ExecStart = pkgs.writeShellScript "mount-pstore.sh" ''
+              set -eu
+              # if the pstore module is builtin it will have mounted the persistent store automatically. it may also be already mounted for other reasons.
+              ${pkgs.util-linux}/bin/mountpoint -q /sys/fs/pstore || ${pkgs.util-linux}/bin/mount -t pstore -o nosuid,noexec,nodev pstore /sys/fs/pstore
+              # wait up to 1.5 seconds for the backend to be registered and the files to appear. a systemd path unit cannot detect this happening; and succeeding after a restart would not start dependent units.
+              TRIES=15
+              while [ "$(cat /sys/module/pstore/parameters/backend)" = "(null)" ]; do
+                if (( $TRIES )); then
+                  sleep 0.1
+                  TRIES=$((TRIES-1))
+                else
+                  echo "Persistent Storage backend was not registered in time." >&2
+                  break
+                fi
+              done
+            '';
+            RemainAfterExit = true;
+          };
+          unitConfig = {
+            ConditionVirtualization = "!container";
+            DefaultDependencies = false; # needed to prevent a cycle
+          };
+          before = [ "systemd-pstore.service" ];
+          wantedBy = [ "systemd-pstore.service" ];
+        };
+      };
+
+    systemd.tmpfiles.rules = [
+      "d /run/keys 0750 root ${toString config.ids.gids.keys}"
+      "z /run/keys 0750 root ${toString config.ids.gids.keys}"
+    ];
+
+    # Sync mount options with systemd's src/core/mount-setup.c: mount_table.
+    boot.specialFileSystems = {
+      "/proc" = { fsType = "proc"; options = [ "nosuid" "noexec" "nodev" ]; };
+      "/run" = { fsType = "tmpfs"; options = [ "nosuid" "nodev" "strictatime" "mode=755" "size=${config.boot.runSize}" ]; };
+      "/dev" = { fsType = "devtmpfs"; options = [ "nosuid" "strictatime" "mode=755" "size=${config.boot.devSize}" ]; };
+      "/dev/shm" = { fsType = "tmpfs"; options = [ "nosuid" "nodev" "strictatime" "mode=1777" "size=${config.boot.devShmSize}" ]; };
+      "/dev/pts" = { fsType = "devpts"; options = [ "nosuid" "noexec" "mode=620" "ptmxmode=0666" "gid=${toString config.ids.gids.tty}" ]; };
+
+      # To hold secrets that shouldn't be written to disk
+      "/run/keys" = { fsType = "ramfs"; options = [ "nosuid" "nodev" "mode=750" ]; };
+    } // optionalAttrs (!config.boot.isContainer) {
+      # systemd-nspawn populates /sys by itself, and remounting it causes all
+      # kinds of weird issues (most noticeably, waiting for host disk device
+      # nodes).
+      "/sys" = { fsType = "sysfs"; options = [ "nosuid" "noexec" "nodev" ]; };
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix
new file mode 100644
index 000000000000..2f2be351df61
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/apfs.nix
@@ -0,0 +1,22 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "apfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "apfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.apfsprogs ];
+
+    boot.extraModulePackages = [ config.boot.kernelPackages.apfs ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "apfs" ];
+
+    # Don't copy apfsck into the initramfs since it does not support repairing the filesystem
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix b/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix
new file mode 100644
index 000000000000..d144ce62dc27
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/bcachefs.nix
@@ -0,0 +1,150 @@
+{ config, lib, pkgs, utils, ... }:
+
+let
+
+  bootFs = lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (utils.fsNeededForBoot fs)) config.fileSystems;
+
+  commonFunctions = ''
+    prompt() {
+        local name="$1"
+        printf "enter passphrase for $name: "
+    }
+
+    tryUnlock() {
+        local name="$1"
+        local path="$2"
+        local success=false
+        local target
+        local uuid=$(echo -n $path | sed -e 's,UUID=\(.*\),\1,g')
+
+        printf "waiting for device to appear $path"
+        for try in $(seq 10); do
+          if [ -e $path ]; then
+              success=true
+              break
+          else
+              target=$(blkid --uuid $uuid)
+              if [ $? == 0 ]; then
+                 success=true
+                 break
+              fi
+          fi
+          echo -n "."
+          sleep 1
+        done
+        printf "\n"
+        if [ $success == true ]; then
+            path=$target
+        fi
+
+        if bcachefs unlock -c $path > /dev/null 2> /dev/null; then    # test for encryption
+            prompt $name
+            until bcachefs unlock $path 2> /dev/null; do              # repeat until successfully unlocked
+                printf "unlocking failed!\n"
+                prompt $name
+            done
+            printf "unlocking successful.\n"
+        else
+            echo "Cannot unlock device $uuid with path $path" >&2
+        fi
+    }
+  '';
+
+  # we need only unlock one device manually, and cannot pass multiple at once
+  # remove this adaptation when bcachefs implements mounting by filesystem uuid
+  # also, implement automatic waiting for the constituent devices when that happens
+  # bcachefs does not support mounting devices with colons in the path, ergo we don't (see #49671)
+  firstDevice = fs: lib.head (lib.splitString ":" fs.device);
+
+  openCommand = name: fs: ''
+    tryUnlock ${name} ${firstDevice fs}
+  '';
+
+  mkUnits = prefix: name: fs: let
+    mountUnit = "${utils.escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint))}.mount";
+    device = firstDevice fs;
+    deviceUnit = "${utils.escapeSystemdPath device}.device";
+  in {
+    name = "unlock-bcachefs-${utils.escapeSystemdPath fs.mountPoint}";
+    value = {
+      description = "Unlock bcachefs for ${fs.mountPoint}";
+      requiredBy = [ mountUnit ];
+      before = [ mountUnit ];
+      bindsTo = [ deviceUnit ];
+      after = [ deviceUnit ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig = {
+        Type = "oneshot";
+        ExecCondition = "${pkgs.bcachefs-tools}/bin/bcachefs unlock -c \"${device}\"";
+        Restart = "on-failure";
+        RestartMode = "direct";
+        # Ideally, this service would lock the key on stop.
+        # As is, RemainAfterExit doesn't accomplish anything.
+        RemainAfterExit = true;
+      };
+      script = ''
+        ${config.boot.initrd.systemd.package}/bin/systemd-ask-password --timeout=0 "enter passphrase for ${name}" | exec ${pkgs.bcachefs-tools}/bin/bcachefs unlock "${device}"
+      '';
+    };
+  };
+
+  assertions = [
+    {
+      assertion = let
+        kernel = config.boot.kernelPackages.kernel;
+      in (
+        kernel.kernelAtLeast "6.7" || (
+          lib.elem (kernel.structuredExtraConfig.BCACHEFS_FS or null) [
+            lib.kernel.module
+            lib.kernel.yes
+            lib.kernel.option.yes
+          ]
+        )
+      );
+
+      message = "Linux 6.7-rc1 at minimum or a custom linux kernel with bcachefs support is required";
+    }
+  ];
+in
+
+{
+  config = lib.mkIf (lib.elem "bcachefs" config.boot.supportedFilesystems) (lib.mkMerge [
+    {
+      inherit assertions;
+      # needed for systemd-remount-fs
+      system.fsPackages = [ pkgs.bcachefs-tools ];
+
+      # FIXME: Replace this with `linuxPackages_testing` after NixOS 23.11 is released
+      # FIXME: Replace this with `linuxPackages_latest` when 6.7 is released, remove this line when the LTS version is at least 6.7
+      boot.kernelPackages = lib.mkDefault (
+        # FIXME: Remove warning after NixOS 23.11 is released
+        lib.warn "Please upgrade to Linux 6.7-rc1 or later: 'linuxPackages_testing_bcachefs' is deprecated. Use 'boot.kernelPackages = pkgs.linuxPackages_testing;' to silence this warning"
+        pkgs.linuxPackages_testing_bcachefs
+      );
+
+      systemd.services = lib.mapAttrs' (mkUnits "") (lib.filterAttrs (n: fs: (fs.fsType == "bcachefs") && (!utils.fsNeededForBoot fs)) config.fileSystems);
+    }
+
+    (lib.mkIf ((lib.elem "bcachefs" config.boot.initrd.supportedFilesystems) || (bootFs != {})) {
+      inherit assertions;
+      # chacha20 and poly1305 are required only for decryption attempts
+      boot.initrd.availableKernelModules = [ "bcachefs" "sha256" "chacha20" "poly1305" ];
+      boot.initrd.systemd.extraBin = {
+        # do we need this? boot/systemd.nix:566 & boot/systemd/initrd.nix:357
+        "bcachefs" = "${pkgs.bcachefs-tools}/bin/bcachefs";
+        "mount.bcachefs" = "${pkgs.bcachefs-tools}/bin/mount.bcachefs";
+      };
+      boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/bcachefs
+        copy_bin_and_libs ${pkgs.bcachefs-tools}/bin/mount.bcachefs
+      '';
+      boot.initrd.extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        $out/bin/bcachefs version
+      '';
+
+      boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) (commonFunctions + lib.concatStrings (lib.mapAttrsToList openCommand bootFs));
+
+      boot.initrd.systemd.services = lib.mapAttrs' (mkUnits "/sysroot") bootFs;
+    })
+  ]);
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix
new file mode 100644
index 000000000000..87fe326c0974
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/btrfs.nix
@@ -0,0 +1,150 @@
+{ config, lib, pkgs, utils, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "btrfs") config.boot.initrd.supportedFilesystems;
+  inSystem = any (fs: fs == "btrfs") config.boot.supportedFilesystems;
+
+  cfgScrub = config.services.btrfs.autoScrub;
+
+  enableAutoScrub = cfgScrub.enable;
+  enableBtrfs = inInitrd || inSystem || enableAutoScrub;
+
+in
+
+{
+  options = {
+    # One could also do regular btrfs balances, but that shouldn't be necessary
+    # during normal usage and as long as the filesystems aren't filled near capacity
+    services.btrfs.autoScrub = {
+      enable = mkEnableOption (lib.mdDoc "regular btrfs scrub");
+
+      fileSystems = mkOption {
+        type = types.listOf types.path;
+        example = [ "/" ];
+        description = lib.mdDoc ''
+          List of paths to btrfs filesystems to regularly call {command}`btrfs scrub` on.
+          Defaults to all mount points with btrfs filesystems.
+          If you mount a filesystem multiple times or additionally mount subvolumes,
+          you need to manually specify this list to avoid scrubbing multiple times.
+        '';
+      };
+
+      interval = mkOption {
+        default = "monthly";
+        type = types.str;
+        example = "weekly";
+        description = lib.mdDoc ''
+          Systemd calendar expression for when to scrub btrfs filesystems.
+          The recommended period is a month but could be less
+          ({manpage}`btrfs-scrub(8)`).
+          See
+          {manpage}`systemd.time(7)`
+          for more information on the syntax.
+        '';
+      };
+
+    };
+  };
+
+  config = mkMerge [
+    (mkIf enableBtrfs {
+      system.fsPackages = [ pkgs.btrfs-progs ];
+    })
+
+    (mkIf inInitrd {
+      boot.initrd.kernelModules = [ "btrfs" ];
+      boot.initrd.availableKernelModules =
+        [ "crc32c" ]
+        ++ optionals (config.boot.kernelPackages.kernel.kernelAtLeast "5.5") [
+          # Needed for mounting filesystems with new checksums
+          "xxhash_generic"
+          "blake2b_generic"
+          "sha256_generic" # Should be baked into our kernel, just to be sure
+        ];
+
+      boot.initrd.extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable)
+      ''
+        copy_bin_and_libs ${pkgs.btrfs-progs}/bin/btrfs
+        ln -sv btrfs $out/bin/btrfsck
+        ln -sv btrfsck $out/bin/fsck.btrfs
+      '';
+
+      boot.initrd.extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable)
+      ''
+        $out/bin/btrfs --version
+      '';
+
+      boot.initrd.postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable)
+      ''
+        btrfs device scan
+      '';
+
+      boot.initrd.systemd.initrdBin = [ pkgs.btrfs-progs ];
+    })
+
+    (mkIf enableAutoScrub {
+      assertions = [
+        {
+          assertion = cfgScrub.enable -> (cfgScrub.fileSystems != []);
+          message = ''
+            If 'services.btrfs.autoScrub' is enabled, you need to have at least one
+            btrfs file system mounted via 'fileSystems' or specify a list manually
+            in 'services.btrfs.autoScrub.fileSystems'.
+          '';
+        }
+      ];
+
+      # This will yield duplicated units if the user mounts a filesystem multiple times
+      # or additionally mounts subvolumes, but going the other way around via devices would
+      # yield duplicated units when a filesystem spans multiple devices.
+      # This way around seems like the more sensible default.
+      services.btrfs.autoScrub.fileSystems = mkDefault (mapAttrsToList (name: fs: fs.mountPoint)
+      (filterAttrs (name: fs: fs.fsType == "btrfs") config.fileSystems));
+
+      # TODO: Did not manage to do it via the usual btrfs-scrub@.timer/.service
+      # template units due to problems enabling the parameterized units,
+      # so settled with many units and templating via nix for now.
+      # https://github.com/NixOS/nixpkgs/pull/32496#discussion_r156527544
+      systemd.timers = let
+        scrubTimer = fs: let
+          fs' = utils.escapeSystemdPath fs;
+        in nameValuePair "btrfs-scrub-${fs'}" {
+          description = "regular btrfs scrub timer on ${fs}";
+
+          wantedBy = [ "timers.target" ];
+          timerConfig = {
+            OnCalendar = cfgScrub.interval;
+            AccuracySec = "1d";
+            Persistent = true;
+          };
+        };
+      in listToAttrs (map scrubTimer cfgScrub.fileSystems);
+
+      systemd.services = let
+        scrubService = fs: let
+          fs' = utils.escapeSystemdPath fs;
+        in nameValuePair "btrfs-scrub-${fs'}" {
+          description = "btrfs scrub on ${fs}";
+          # scrub prevents suspend2ram or proper shutdown
+          conflicts = [ "shutdown.target" "sleep.target" ];
+          before = [ "shutdown.target" "sleep.target" ];
+
+          serviceConfig = {
+            # simple and not oneshot, otherwise ExecStop is not used
+            Type = "simple";
+            Nice = 19;
+            IOSchedulingClass = "idle";
+            ExecStart = "${pkgs.btrfs-progs}/bin/btrfs scrub start -B ${fs}";
+            # if the service is stopped before scrub end, cancel it
+            ExecStop  = pkgs.writeShellScript "btrfs-scrub-maybe-cancel" ''
+              (${pkgs.btrfs-progs}/bin/btrfs scrub status ${fs} | ${pkgs.gnugrep}/bin/grep finished) || ${pkgs.btrfs-progs}/bin/btrfs scrub cancel ${fs}
+            '';
+          };
+        };
+      in listToAttrs (map scrubService cfgScrub.fileSystems);
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix b/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix
new file mode 100644
index 000000000000..837b9e19bfb9
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/cifs.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "cifs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = {
+
+    system.fsPackages = mkIf (any (fs: fs == "cifs") config.boot.supportedFilesystems) [ pkgs.cifs-utils ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd
+      [ "cifs" "nls_utf8" "hmac" "md4" "ecb" "des_generic" "sha256" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable)
+      ''
+        copy_bin_and_libs ${pkgs.cifs-utils}/sbin/mount.cifs
+      '';
+
+    boot.initrd.systemd.extraBin."mount.cifs" = mkIf inInitrd "${pkgs.cifs-utils}/sbin/mount.cifs";
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix
new file mode 100644
index 000000000000..8138e6591610
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ecryptfs.nix
@@ -0,0 +1,24 @@
+{ config, lib, pkgs, ... }:
+# TODO: make ecryptfs work in initramfs?
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "ecryptfs") config.boot.supportedFilesystems) {
+    system.fsPackages = [ pkgs.ecryptfs ];
+    security.wrappers = {
+      "mount.ecryptfs_private" =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.ecryptfs.out}/bin/mount.ecryptfs_private";
+        };
+      "umount.ecryptfs_private" =
+        { setuid = true;
+          owner = "root";
+          group = "root";
+          source = "${pkgs.ecryptfs.out}/bin/umount.ecryptfs_private";
+        };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
new file mode 100644
index 000000000000..365cb46ff2fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/envfs.nix
@@ -0,0 +1,60 @@
+{ pkgs, config, lib, ... }:
+
+let
+  cfg = config.services.envfs;
+  mounts = {
+    "/usr/bin" = {
+      device = "none";
+      fsType = "envfs";
+      options = [
+        "fallback-path=${pkgs.runCommand "fallback-path" {} (''
+          mkdir -p $out
+          ln -s ${config.environment.usrbinenv} $out/env
+          ln -s ${config.environment.binsh} $out/sh
+        '' + cfg.extraFallbackPathCommands)}"
+        "nofail"
+      ];
+    };
+    "/bin" = {
+      device = "/usr/bin";
+      fsType = "none";
+      options = [ "bind" "nofail" ];
+    };
+  };
+in {
+  options = {
+    services.envfs = {
+      enable = lib.mkEnableOption (lib.mdDoc "Envfs filesystem") // {
+        description = lib.mdDoc ''
+          Fuse filesystem that returns symlinks to executables based on the PATH
+          of the requesting process. This is useful to execute shebangs on NixOS
+          that assume hard coded locations in locations like /bin or /usr/bin
+          etc.
+        '';
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.envfs;
+        defaultText = lib.literalExpression "pkgs.envfs";
+        description = lib.mdDoc "Which package to use for the envfs.";
+      };
+
+      extraFallbackPathCommands = lib.mkOption {
+        type = lib.types.lines;
+        default = "";
+        example = "ln -s $''{pkgs.bash}/bin/bash $out/bash";
+        description = lib.mdDoc "Extra commands to run in the package that contains fallback executables in case not other executable is found";
+      };
+    };
+  };
+  config = lib.mkIf (cfg.enable) {
+    environment.systemPackages = [ cfg.package ];
+    # we also want these mounts in virtual machines.
+    fileSystems = if config.virtualisation ? qemu then lib.mkVMOverride mounts else mounts;
+
+    # We no longer need those when using envfs
+    system.activationScripts.usrbinenv = lib.mkForce "";
+    system.activationScripts.binsh = lib.mkForce "";
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix b/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix
new file mode 100644
index 000000000000..a3d657669350
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/erofs.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inInitrd = lib.any (fs: fs == "erofs") config.boot.initrd.supportedFilesystems;
+  inSystem = lib.any (fs: fs == "erofs") config.boot.supportedFilesystems;
+
+in
+
+{
+  config = lib.mkIf (inInitrd || inSystem) {
+
+    system.fsPackages = [ pkgs.erofs-utils ];
+
+    boot.initrd.availableKernelModules = lib.mkIf inInitrd [ "erofs" ];
+
+    # fsck.erofs is currently experimental and should not be run as a
+    # privileged user. Thus, it is not included in the initrd.
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix b/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix
new file mode 100644
index 000000000000..540b9b91c3ec
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/exfat.nix
@@ -0,0 +1,13 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "exfat") config.boot.supportedFilesystems) {
+    system.fsPackages = if config.boot.kernelPackages.kernelOlder "5.7" then [
+      pkgs.exfat # FUSE
+    ] else [
+      pkgs.exfatprogs # non-FUSE
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ext.nix b/nixpkgs/nixos/modules/tasks/filesystems/ext.nix
new file mode 100644
index 000000000000..1c34ee2c7035
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ext.nix
@@ -0,0 +1,31 @@
+{ config, lib, pkgs, ... }:
+
+let
+
+  inInitrd = lib.any (fs: fs == "ext2" || fs == "ext3" || fs == "ext4") config.boot.initrd.supportedFilesystems;
+  inSystem = lib.any (fs: fs == "ext2" || fs == "ext3" || fs == "ext4") config.boot.supportedFilesystems;
+
+in
+
+{
+  config = {
+
+    system.fsPackages = lib.mkIf (config.boot.initrd.systemd.enable -> (inInitrd || inSystem)) [ pkgs.e2fsprogs ];
+
+    # As of kernel 4.3, there is no separate ext3 driver (they're also handled by ext4.ko)
+    boot.initrd.availableKernelModules = lib.mkIf (config.boot.initrd.systemd.enable -> inInitrd) [ "ext2" "ext4" ];
+
+    boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable)
+      ''
+        # Copy e2fsck and friends.
+        copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/e2fsck
+        copy_bin_and_libs ${pkgs.e2fsprogs}/sbin/tune2fs
+        ln -sv e2fsck $out/bin/fsck.ext2
+        ln -sv e2fsck $out/bin/fsck.ext3
+        ln -sv e2fsck $out/bin/fsck.ext4
+      '';
+
+    boot.initrd.systemd.initrdBin = lib.mkIf inInitrd [ pkgs.e2fsprogs ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix b/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix
new file mode 100644
index 000000000000..4f99f9a57fa6
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/f2fs.nix
@@ -0,0 +1,22 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  inInitrd = any (fs: fs == "f2fs") config.boot.initrd.supportedFilesystems;
+  fileSystems = filter (x: x.fsType == "f2fs") config.system.build.fileSystems;
+in
+{
+  config = mkIf (any (fs: fs == "f2fs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.f2fs-tools ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd [ "f2fs" "crc32" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${pkgs.f2fs-tools}/sbin/fsck.f2fs
+    '';
+
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.f2fs-tools ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix
new file mode 100644
index 000000000000..e8c7fa8efbae
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/glusterfs.nix
@@ -0,0 +1,11 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "glusterfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.glusterfs ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix
new file mode 100644
index 000000000000..b5132b4caa33
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/jfs.nix
@@ -0,0 +1,21 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  inInitrd = any (fs: fs == "jfs") config.boot.initrd.supportedFilesystems;
+in
+{
+  config = mkIf (any (fs: fs == "jfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.jfsutils ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "jfs" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable) ''
+      copy_bin_and_libs ${pkgs.jfsutils}/sbin/fsck.jfs
+    '';
+
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.jfsutils ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix
new file mode 100644
index 000000000000..8c631f0772db
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/nfs.nix
@@ -0,0 +1,135 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "nfs") config.boot.initrd.supportedFilesystems;
+
+  nfsStateDir = "/var/lib/nfs";
+
+  rpcMountpoint = "${nfsStateDir}/rpc_pipefs";
+
+  format = pkgs.formats.ini {};
+
+  idmapdConfFile = format.generate "idmapd.conf" cfg.idmapd.settings;
+  nfsConfFile = pkgs.writeText "nfs.conf" cfg.extraConfig;
+  requestKeyConfFile = pkgs.writeText "request-key.conf" ''
+    create id_resolver * * ${pkgs.nfs-utils}/bin/nfsidmap -t 600 %k %d
+  '';
+
+  cfg = config.services.nfs;
+
+in
+
+{
+  ###### interface
+
+  options = {
+    services.nfs = {
+      idmapd.settings = mkOption {
+        type = format.type;
+        default = {};
+        description = lib.mdDoc ''
+          libnfsidmap configuration. Refer to
+          <https://linux.die.net/man/5/idmapd.conf>
+          for details.
+        '';
+        example = literalExpression ''
+          {
+            Translation = {
+              GSS-Methods = "static,nsswitch";
+            };
+            Static = {
+              "root/hostname.domain.com@REALM.COM" = "root";
+            };
+          }
+        '';
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc ''
+          Extra nfs-utils configuration.
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf (any (fs: fs == "nfs" || fs == "nfs4") config.boot.supportedFilesystems) {
+
+    services.rpcbind.enable = true;
+
+    services.nfs.idmapd.settings = {
+      General = mkMerge [
+        { Pipefs-Directory = rpcMountpoint; }
+        (mkIf (config.networking.domain != null) { Domain = config.networking.domain; })
+      ];
+      Mapping = {
+        Nobody-User = "nobody";
+        Nobody-Group = "nogroup";
+      };
+      Translation = {
+        Method = "nsswitch";
+      };
+    };
+
+    system.fsPackages = [ pkgs.nfs-utils ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "nfs" ];
+
+    systemd.packages = [ pkgs.nfs-utils ];
+
+    environment.systemPackages = [ pkgs.keyutils ];
+
+    environment.etc = {
+      "idmapd.conf".source = idmapdConfFile;
+      "nfs.conf".source = nfsConfFile;
+      "request-key.conf".source = requestKeyConfFile;
+    };
+
+    systemd.services.nfs-blkmap =
+      { restartTriggers = [ nfsConfFile ];
+      };
+
+    systemd.targets.nfs-client =
+      { wantedBy = [ "multi-user.target" "remote-fs.target" ];
+      };
+
+    systemd.services.nfs-idmapd =
+      { restartTriggers = [ idmapdConfFile ];
+      };
+
+    systemd.services.nfs-mountd =
+      { restartTriggers = [ nfsConfFile ];
+        enable = mkDefault false;
+      };
+
+    systemd.services.nfs-server =
+      { restartTriggers = [ nfsConfFile ];
+        enable = mkDefault false;
+      };
+
+    systemd.services.auth-rpcgss-module =
+      {
+        unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
+      };
+
+    systemd.services.rpc-gssd =
+      { restartTriggers = [ nfsConfFile ];
+        unitConfig.ConditionPathExists = [ "" "/etc/krb5.keytab" ];
+      };
+
+    systemd.services.rpc-statd =
+      { restartTriggers = [ nfsConfFile ];
+
+        preStart =
+          ''
+            mkdir -p /var/lib/nfs/{sm,sm.bak}
+          '';
+      };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix
new file mode 100644
index 000000000000..c40d2a1a80bc
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/ntfs.nix
@@ -0,0 +1,11 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  config = mkIf (any (fs: fs == "ntfs" || fs == "ntfs-3g") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.ntfs3g ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix
new file mode 100644
index 000000000000..3c6a0f0cd917
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/reiserfs.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "reiserfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "reiserfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.reiserfsprogs ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "reiserfs" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable)
+      ''
+        copy_bin_and_libs ${pkgs.reiserfsprogs}/sbin/reiserfsck
+        ln -s reiserfsck $out/bin/fsck.reiserfs
+      '';
+
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.reiserfsprogs ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix
new file mode 100644
index 000000000000..10d45a21d3ca
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/squashfs.nix
@@ -0,0 +1,13 @@
+{ config, lib, ... }:
+
+let
+
+  inInitrd = lib.any (fs: fs == "squashfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+
+  boot.initrd.availableKernelModules = lib.mkIf inInitrd [ "squashfs" ];
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix b/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix
new file mode 100644
index 000000000000..f9954b5182f9
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/unionfs-fuse.nix
@@ -0,0 +1,43 @@
+{ config, pkgs, lib, ... }:
+
+{
+  config = lib.mkMerge [
+
+    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.initrd.supportedFilesystems) {
+      boot.initrd.kernelModules = [ "fuse" ];
+
+      boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        copy_bin_and_libs ${pkgs.fuse}/sbin/mount.fuse
+        copy_bin_and_libs ${pkgs.unionfs-fuse}/bin/unionfs
+        substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out/bin/mount.unionfs-fuse \
+          --replace '${pkgs.bash}/bin/bash' /bin/sh \
+          --replace '${pkgs.fuse}/sbin' /bin \
+          --replace '${pkgs.unionfs-fuse}/bin' /bin
+        chmod +x $out/bin/mount.unionfs-fuse
+      '';
+
+      boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+          # Hacky!!! fuse hard-codes the path to mount
+          mkdir -p /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+          ln -s $(which mount) /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+          ln -s $(which umount) /nix/store/eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee-${pkgs.util-linux.name}-bin/bin
+        '';
+
+      boot.initrd.systemd.extraBin = {
+        "mount.fuse" = "${pkgs.fuse}/bin/mount.fuse";
+        "unionfs" = "${pkgs.unionfs-fuse}/bin/unionfs";
+        "mount.unionfs-fuse" = pkgs.runCommand "mount.unionfs-fuse" {} ''
+          substitute ${pkgs.unionfs-fuse}/sbin/mount.unionfs-fuse $out \
+            --replace '${pkgs.bash}/bin/bash' /bin/sh \
+            --replace '${pkgs.fuse}/sbin' /bin \
+            --replace '${pkgs.unionfs-fuse}/bin' /bin
+        '';
+      };
+    })
+
+    (lib.mkIf (lib.any (fs: fs == "unionfs-fuse") config.boot.supportedFilesystems) {
+      system.fsPackages = [ pkgs.unionfs-fuse ];
+    })
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix b/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix
new file mode 100644
index 000000000000..5497194f6a8d
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/vboxsf.nix
@@ -0,0 +1,23 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "vboxsf") config.boot.initrd.supportedFilesystems;
+
+  package = pkgs.runCommand "mount.vboxsf" { preferLocalBuild = true; } ''
+    mkdir -p $out/bin
+    cp ${pkgs.linuxPackages.virtualboxGuestAdditions}/bin/mount.vboxsf $out/bin
+  '';
+in
+
+{
+  config = mkIf (any (fs: fs == "vboxsf") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ package ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "vboxsf" ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix b/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix
new file mode 100644
index 000000000000..9281b34633c2
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/vfat.nix
@@ -0,0 +1,27 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "vfat") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "vfat") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.dosfstools pkgs.mtools ];
+
+    boot.initrd.kernelModules = mkIf inInitrd [ "vfat" "nls_cp437" "nls_iso8859-1" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable)
+      ''
+        copy_bin_and_libs ${pkgs.dosfstools}/sbin/dosfsck
+        ln -sv dosfsck $out/bin/fsck.vfat
+      '';
+
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.dosfstools ];
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix
new file mode 100644
index 000000000000..76f31e660ad3
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/xfs.nix
@@ -0,0 +1,32 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  inInitrd = any (fs: fs == "xfs") config.boot.initrd.supportedFilesystems;
+
+in
+
+{
+  config = mkIf (any (fs: fs == "xfs") config.boot.supportedFilesystems) {
+
+    system.fsPackages = [ pkgs.xfsprogs.bin ];
+
+    boot.initrd.availableKernelModules = mkIf inInitrd [ "xfs" "crc32c" ];
+
+    boot.initrd.extraUtilsCommands = mkIf (inInitrd && !config.boot.initrd.systemd.enable)
+      ''
+        copy_bin_and_libs ${pkgs.xfsprogs.bin}/bin/fsck.xfs
+        copy_bin_and_libs ${pkgs.xfsprogs.bin}/bin/xfs_repair
+      '';
+
+    # Trick just to set 'sh' after the extraUtils nuke-refs.
+    boot.initrd.extraUtilsCommandsTest = mkIf (inInitrd && !config.boot.initrd.systemd.enable)
+      ''
+        sed -i -e 's,^#!.*,#!'$out/bin/sh, $out/bin/fsck.xfs
+      '';
+
+    boot.initrd.systemd.initrdBin = mkIf inInitrd [ pkgs.xfsprogs.bin ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix b/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix
new file mode 100644
index 000000000000..4b6a5b6c12c1
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/filesystems/zfs.nix
@@ -0,0 +1,895 @@
+{ config, lib, options, pkgs, utils, ... }:
+#
+# TODO: zfs tunables
+
+with utils;
+with lib;
+
+let
+
+  cfgZfs = config.boot.zfs;
+  optZfs = options.boot.zfs;
+  cfgExpandOnBoot = config.services.zfs.expandOnBoot;
+  cfgSnapshots = config.services.zfs.autoSnapshot;
+  cfgSnapFlags = cfgSnapshots.flags;
+  cfgScrub = config.services.zfs.autoScrub;
+  cfgTrim = config.services.zfs.trim;
+  cfgZED = config.services.zfs.zed;
+
+  inInitrd = any (fs: fs == "zfs") config.boot.initrd.supportedFilesystems;
+  inSystem = any (fs: fs == "zfs") config.boot.supportedFilesystems;
+
+  autosnapPkg = pkgs.zfstools.override {
+    zfs = cfgZfs.package;
+  };
+
+  zfsAutoSnap = "${autosnapPkg}/bin/zfs-auto-snapshot";
+
+  datasetToPool = x: elemAt (splitString "/" x) 0;
+
+  fsToPool = fs: datasetToPool fs.device;
+
+  zfsFilesystems = filter (x: x.fsType == "zfs") config.system.build.fileSystems;
+
+  allPools = unique ((map fsToPool zfsFilesystems) ++ cfgZfs.extraPools);
+
+  rootPools = unique (map fsToPool (filter fsNeededForBoot zfsFilesystems));
+
+  dataPools = unique (filter (pool: !(elem pool rootPools)) allPools);
+
+  snapshotNames = [ "frequent" "hourly" "daily" "weekly" "monthly" ];
+
+  # When importing ZFS pools, there's one difficulty: These scripts may run
+  # before the backing devices (physical HDDs, etc.) of the pool have been
+  # scanned and initialized.
+  #
+  # An attempted import with all devices missing will just fail, and can be
+  # retried, but an import where e.g. two out of three disks in a three-way
+  # mirror are missing, will succeed. This is a problem: When the missing disks
+  # are later discovered, they won't be automatically set online, rendering the
+  # pool redundancy-less (and far slower) until such time as the system reboots.
+  #
+  # The solution is the below. poolReady checks the status of an un-imported
+  # pool, to see if *every* device is available -- in which case the pool will be
+  # in state ONLINE, as opposed to DEGRADED, FAULTED or MISSING.
+  #
+  # The import scripts then loop over this, waiting until the pool is ready or a
+  # sufficient amount of time has passed that we can assume it won't be. In the
+  # latter case it makes one last attempt at importing, allowing the system to
+  # (eventually) boot even with a degraded pool.
+  importLib = {zpoolCmd, awkCmd, cfgZfs}: ''
+    for o in $(cat /proc/cmdline); do
+      case $o in
+        zfs_force|zfs_force=1|zfs_force=y)
+          ZFS_FORCE="-f"
+          ;;
+      esac
+    done
+    poolReady() {
+      pool="$1"
+      state="$("${zpoolCmd}" import 2>/dev/null | "${awkCmd}" "/pool: $pool/ { found = 1 }; /state:/ { if (found == 1) { print \$2; exit } }; END { if (found == 0) { print \"MISSING\" } }")"
+      if [[ "$state" = "ONLINE" ]]; then
+        return 0
+      else
+        echo "Pool $pool in state $state, waiting"
+        return 1
+      fi
+    }
+    poolImported() {
+      pool="$1"
+      "${zpoolCmd}" list "$pool" >/dev/null 2>/dev/null
+    }
+    poolImport() {
+      pool="$1"
+      "${zpoolCmd}" import -d "${cfgZfs.devNodes}" -N $ZFS_FORCE "$pool"
+    }
+  '';
+
+  getPoolFilesystems = pool:
+    filter (x: x.fsType == "zfs" && (fsToPool x) == pool) config.system.build.fileSystems;
+
+  getPoolMounts = prefix: pool:
+    let
+      poolFSes = getPoolFilesystems pool;
+
+      # Remove the "/" suffix because even though most mountpoints
+      # won't have it, the "/" mountpoint will, and we can't have the
+      # trailing slash in "/sysroot/" in stage 1.
+      mountPoint = fs: escapeSystemdPath (prefix + (lib.removeSuffix "/" fs.mountPoint));
+
+      hasUsr = lib.any (fs: fs.mountPoint == "/usr") poolFSes;
+    in
+      map (x: "${mountPoint x}.mount") poolFSes
+      ++ lib.optional hasUsr "sysusr-usr.mount";
+
+  getKeyLocations = pool: if isBool cfgZfs.requestEncryptionCredentials then {
+    hasKeys = cfgZfs.requestEncryptionCredentials;
+    command = "${cfgZfs.package}/sbin/zfs list -rHo name,keylocation,keystatus ${pool}";
+  } else let
+    keys = filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials;
+  in {
+    hasKeys = keys != [];
+    command = "${cfgZfs.package}/sbin/zfs list -Ho name,keylocation,keystatus ${toString keys}";
+  };
+
+  createImportService = { pool, systemd, force, prefix ? "" }:
+    nameValuePair "zfs-import-${pool}" {
+      description = "Import ZFS pool \"${pool}\"";
+      # We wait for systemd-udev-settle to ensure devices are available,
+      # but don't *require* it, because mounts shouldn't be killed if it's stopped.
+      # In the future, hopefully someone will complete this:
+      # https://github.com/zfsonlinux/zfs/pull/4943
+      wants = [ "systemd-udev-settle.service" ];
+      after = [
+        "systemd-udev-settle.service"
+        "systemd-modules-load.service"
+        "systemd-ask-password-console.service"
+      ];
+      requiredBy = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
+      before = getPoolMounts prefix pool ++ [ "zfs-import.target" ];
+      unitConfig = {
+        DefaultDependencies = "no";
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      environment.ZFS_FORCE = optionalString force "-f";
+      script = let
+        keyLocations = getKeyLocations pool;
+      in (importLib {
+        # See comments at importLib definition.
+        zpoolCmd = "${cfgZfs.package}/sbin/zpool";
+        awkCmd = "${pkgs.gawk}/bin/awk";
+        inherit cfgZfs;
+      }) + ''
+        if ! poolImported "${pool}"; then
+          echo -n "importing ZFS pool \"${pool}\"..."
+          # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
+          for trial in `seq 1 60`; do
+            poolReady "${pool}" && poolImport "${pool}" && break
+            sleep 1
+          done
+          poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
+        fi
+        if poolImported "${pool}"; then
+          ${optionalString keyLocations.hasKeys ''
+            ${keyLocations.command} | while IFS=$'\t' read ds kl ks; do
+              {
+              if [[ "$ks" != unavailable ]]; then
+                continue
+              fi
+              case "$kl" in
+                none )
+                  ;;
+                prompt )
+                  tries=3
+                  success=false
+                  while [[ $success != true ]] && [[ $tries -gt 0 ]]; do
+                    ${systemd}/bin/systemd-ask-password --timeout=${toString cfgZfs.passwordTimeout} "Enter key for $ds:" | ${cfgZfs.package}/sbin/zfs load-key "$ds" \
+                      && success=true \
+                      || tries=$((tries - 1))
+                  done
+                  [[ $success = true ]]
+                  ;;
+                * )
+                  ${cfgZfs.package}/sbin/zfs load-key "$ds"
+                  ;;
+              esac
+              } < /dev/null # To protect while read ds kl in case anything reads stdin
+            done
+          ''}
+          echo "Successfully imported ${pool}"
+        else
+          exit 1
+        fi
+      '';
+    };
+
+  zedConf = generators.toKeyValue {
+    mkKeyValue = generators.mkKeyValueDefault {
+      mkValueString = v:
+        if isInt           v then toString v
+        else if isString   v then "\"${v}\""
+        else if true  ==   v then "1"
+        else if false ==   v then "0"
+        else if isList     v then "\"" + (concatStringsSep " " v) + "\""
+        else err "this value is" (toString v);
+    } "=";
+  } cfgZED.settings;
+in
+
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "boot" "zfs" "enableLegacyCrypto" ] "The corresponding package was removed from nixpkgs.")
+  ];
+
+  ###### interface
+
+  options = {
+    boot.zfs = {
+      package = mkOption {
+        readOnly = true;
+        type = types.package;
+        default = if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs;
+        defaultText = literalExpression "if config.boot.zfs.enableUnstable then pkgs.zfsUnstable else pkgs.zfs";
+        description = lib.mdDoc "Configured ZFS userland tools package.";
+      };
+
+      enabled = mkOption {
+        readOnly = true;
+        type = types.bool;
+        default = inInitrd || inSystem;
+        defaultText = literalMD "`true` if ZFS filesystem support is enabled";
+        description = lib.mdDoc "True if ZFS filesystem support is enabled";
+      };
+
+      enableUnstable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Use the unstable zfs package. This might be an option, if the latest
+          kernel is not yet supported by a published release of ZFS. Enabling
+          this option will install a development version of ZFS on Linux. The
+          version will have already passed an extensive test suite, but it is
+          more likely to hit an undiscovered bug compared to running a released
+          version of ZFS on Linux.
+          '';
+      };
+
+      allowHibernation = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Allow hibernation support, this may be a unsafe option depending on your
+          setup. Make sure to NOT use Swap on ZFS.
+        '';
+      };
+
+      extraPools = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "tank" "data" ];
+        description = lib.mdDoc ''
+          Name or GUID of extra ZFS pools that you wish to import during boot.
+
+          Usually this is not necessary. Instead, you should set the mountpoint property
+          of ZFS filesystems to `legacy` and add the ZFS filesystems to
+          NixOS's {option}`fileSystems` option, which makes NixOS automatically
+          import the associated pool.
+
+          However, in some cases (e.g. if you have many filesystems) it may be preferable
+          to exclusively use ZFS commands to manage filesystems. If so, since NixOS/systemd
+          will not be managing those filesystems, you will need to specify the ZFS pool here
+          so that NixOS automatically imports it on every boot.
+        '';
+      };
+
+      devNodes = mkOption {
+        type = types.path;
+        default = "/dev/disk/by-id";
+        description = lib.mdDoc ''
+          Name of directory from which to import ZFS devices.
+
+          This should be a path under /dev containing stable names for all devices needed, as
+          import may fail if device nodes are renamed concurrently with a device failing.
+        '';
+      };
+
+      forceImportRoot = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Forcibly import the ZFS root pool(s) during early boot.
+
+          This is enabled by default for backwards compatibility purposes, but it is highly
+          recommended to disable this option, as it bypasses some of the safeguards ZFS uses
+          to protect your ZFS pools.
+
+          If you set this option to `false` and NixOS subsequently fails to
+          boot because it cannot import the root pool, you should boot with the
+          `zfs_force=1` option as a kernel parameter (e.g. by manually
+          editing the kernel params in grub during boot). You should only need to do this
+          once.
+        '';
+      };
+
+      forceImportAll = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Forcibly import all ZFS pool(s).
+
+          If you set this option to `false` and NixOS subsequently fails to
+          import your non-root ZFS pool(s), you should manually import each pool with
+          "zpool import -f \<pool-name\>", and then reboot. You should only need to do
+          this once.
+        '';
+      };
+
+      requestEncryptionCredentials = mkOption {
+        type = types.either types.bool (types.listOf types.str);
+        default = true;
+        example = [ "tank" "data" ];
+        description = lib.mdDoc ''
+          If true on import encryption keys or passwords for all encrypted datasets
+          are requested. To only decrypt selected datasets supply a list of dataset
+          names instead. For root pools the encryption key can be supplied via both
+          an interactive prompt (keylocation=prompt) and from a file (keylocation=file://).
+        '';
+      };
+
+      passwordTimeout = mkOption {
+        type = types.int;
+        default = 0;
+        description = lib.mdDoc ''
+          Timeout in seconds to wait for password entry for decrypt at boot.
+
+          Defaults to 0, which waits forever.
+        '';
+      };
+
+      removeLinuxDRM = lib.mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Linux 6.2 dropped some kernel symbols required on aarch64 required by zfs.
+          Enabling this option will bring them back to allow this kernel version.
+          Note that in some jurisdictions this may be illegal as it might be considered
+          removing copyright protection from the code.
+          See https://www.ifross.org/?q=en/artikel/ongoing-dispute-over-value-exportsymbolgpl-function for further information.
+
+          If configure your kernel package with `zfs.latestCompatibleLinuxPackages`, you will need to also pass removeLinuxDRM to that package like this:
+
+          ```
+          { pkgs, ... }: {
+            boot.kernelPackages = (pkgs.zfs.override {
+              removeLinuxDRM = pkgs.hostPlatform.isAarch64;
+            }).latestCompatibleLinuxPackages;
+
+            boot.zfs.removeLinuxDRM = true;
+          }
+          ```
+        '';
+      };
+    };
+
+    services.zfs.autoSnapshot = {
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable the (OpenSolaris-compatible) ZFS auto-snapshotting service.
+          Note that you must set the `com.sun:auto-snapshot`
+          property to `true` on all datasets which you wish
+          to auto-snapshot.
+
+          You can override a child dataset to use, or not use auto-snapshotting
+          by setting its flag with the given interval:
+          `zfs set com.sun:auto-snapshot:weekly=false DATASET`
+        '';
+      };
+
+      flags = mkOption {
+        default = "-k -p";
+        example = "-k -p --utc";
+        type = types.str;
+        description = lib.mdDoc ''
+          Flags to pass to the zfs-auto-snapshot command.
+
+          Run `zfs-auto-snapshot` (without any arguments) to
+          see available flags.
+
+          If it's not too inconvenient for snapshots to have timestamps in UTC,
+          it is suggested that you append `--utc` to the list
+          of default options (see example).
+
+          Otherwise, snapshot names can cause name conflicts or apparent time
+          reversals due to daylight savings, timezone or other date/time changes.
+        '';
+      };
+
+      frequent = mkOption {
+        default = 4;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of frequent (15-minute) auto-snapshots that you wish to keep.
+        '';
+      };
+
+      hourly = mkOption {
+        default = 24;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of hourly auto-snapshots that you wish to keep.
+        '';
+      };
+
+      daily = mkOption {
+        default = 7;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of daily auto-snapshots that you wish to keep.
+        '';
+      };
+
+      weekly = mkOption {
+        default = 4;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of weekly auto-snapshots that you wish to keep.
+        '';
+      };
+
+      monthly = mkOption {
+        default = 12;
+        type = types.int;
+        description = lib.mdDoc ''
+          Number of monthly auto-snapshots that you wish to keep.
+        '';
+      };
+    };
+
+    services.zfs.trim = {
+      enable = mkOption {
+        description = lib.mdDoc "Whether to enable periodic TRIM on all ZFS pools.";
+        default = true;
+        example = false;
+        type = types.bool;
+      };
+
+      interval = mkOption {
+        default = "weekly";
+        type = types.str;
+        example = "daily";
+        description = lib.mdDoc ''
+          How often we run trim. For most desktop and server systems
+          a sufficient trimming frequency is once a week.
+
+          The format is described in
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+    };
+
+    services.zfs.autoScrub = {
+      enable = mkEnableOption (lib.mdDoc "periodic scrubbing of ZFS pools");
+
+      interval = mkOption {
+        default = "Sun, 02:00";
+        type = types.str;
+        example = "daily";
+        description = lib.mdDoc ''
+          Systemd calendar expression when to scrub ZFS pools. See
+          {manpage}`systemd.time(7)`.
+        '';
+      };
+
+      pools = mkOption {
+        default = [];
+        type = types.listOf types.str;
+        example = [ "tank" ];
+        description = lib.mdDoc ''
+          List of ZFS pools to periodically scrub. If empty, all pools
+          will be scrubbed.
+        '';
+      };
+    };
+
+    services.zfs.expandOnBoot = mkOption {
+      type = types.either (types.enum [ "disabled" "all" ]) (types.listOf types.str);
+      default = "disabled";
+      example = [ "tank" "dozer" ];
+      description = lib.mdDoc ''
+        After importing, expand each device in the specified pools.
+
+        Set the value to the plain string "all" to expand all pools on boot:
+
+            services.zfs.expandOnBoot = "all";
+
+        or set the value to a list of pools to expand the disks of specific pools:
+
+            services.zfs.expandOnBoot = [ "tank" "dozer" ];
+      '';
+    };
+
+    services.zfs.zed = {
+      enableMail = mkEnableOption (lib.mdDoc "ZED's ability to send emails") // {
+        default = cfgZfs.package.enableMail;
+        defaultText = literalExpression "config.${optZfs.package}.enableMail";
+      };
+
+      settings = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = literalExpression ''
+          {
+            ZED_DEBUG_LOG = "/tmp/zed.debug.log";
+
+            ZED_EMAIL_ADDR = [ "root" ];
+            ZED_EMAIL_PROG = "mail";
+            ZED_EMAIL_OPTS = "-s '@SUBJECT@' @ADDRESS@";
+
+            ZED_NOTIFY_INTERVAL_SECS = 3600;
+            ZED_NOTIFY_VERBOSE = false;
+
+            ZED_USE_ENCLOSURE_LEDS = true;
+            ZED_SCRUB_AFTER_RESILVER = false;
+          }
+        '';
+        description = lib.mdDoc ''
+          ZFS Event Daemon /etc/zfs/zed.d/zed.rc content
+
+          See
+          {manpage}`zed(8)`
+          for details on ZED and the scripts in /etc/zfs/zed.d to find the possible variables
+        '';
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = mkMerge [
+    (mkIf cfgZfs.enabled {
+      assertions = [
+        {
+          assertion = cfgZED.enableMail -> cfgZfs.package.enableMail;
+          message = ''
+            To allow ZED to send emails, ZFS needs to be configured to enable
+            this. To do so, one must override the `zfs` package and set
+            `enableMail` to true.
+          '';
+        }
+        {
+          assertion = config.networking.hostId != null;
+          message = "ZFS requires networking.hostId to be set";
+        }
+        {
+          assertion = !cfgZfs.forceImportAll || cfgZfs.forceImportRoot;
+          message = "If you enable boot.zfs.forceImportAll, you must also enable boot.zfs.forceImportRoot";
+        }
+        {
+          assertion = cfgZfs.allowHibernation -> !cfgZfs.forceImportRoot && !cfgZfs.forceImportAll;
+          message = "boot.zfs.allowHibernation while force importing is enabled will cause data corruption";
+        }
+        {
+          assertion = !(elem "" allPools);
+          message = ''
+            Automatic pool detection found an empty pool name, which can't be used.
+            Hint: for `fileSystems` entries with `fsType = zfs`, the `device` attribute
+            should be a zfs dataset name, like `device = "pool/data/set"`.
+            This error can be triggered by using an absolute path, such as `"/dev/disk/..."`.
+          '';
+        }
+      ];
+
+      boot = {
+        kernelModules = [ "zfs" ];
+        # https://github.com/openzfs/zfs/issues/260
+        # https://github.com/openzfs/zfs/issues/12842
+        # https://github.com/NixOS/nixpkgs/issues/106093
+        kernelParams = lib.optionals (!config.boot.zfs.allowHibernation) [ "nohibernate" ];
+
+        extraModulePackages = let
+          kernelPkg = if config.boot.zfs.enableUnstable then
+            config.boot.kernelPackages.zfsUnstable
+           else
+            config.boot.kernelPackages.zfs;
+        in [
+          (kernelPkg.override { inherit (cfgZfs) removeLinuxDRM; })
+        ];
+      };
+
+      boot.initrd = mkIf inInitrd {
+        kernelModules = [ "zfs" ] ++ optional (!cfgZfs.enableUnstable) "spl";
+        extraUtilsCommands =
+          mkIf (!config.boot.initrd.systemd.enable) ''
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zfs
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zdb
+            copy_bin_and_libs ${cfgZfs.package}/sbin/zpool
+          '';
+        extraUtilsCommandsTest =
+          mkIf (!config.boot.initrd.systemd.enable) ''
+            $out/bin/zfs --help >/dev/null 2>&1
+            $out/bin/zpool --help >/dev/null 2>&1
+          '';
+        postDeviceCommands = mkIf (!config.boot.initrd.systemd.enable) (concatStringsSep "\n" ([''
+            ZFS_FORCE="${optionalString cfgZfs.forceImportRoot "-f"}"
+          ''] ++ [(importLib {
+            # See comments at importLib definition.
+            zpoolCmd = "zpool";
+            awkCmd = "awk";
+            inherit cfgZfs;
+          })] ++ (map (pool: ''
+            echo -n "importing root ZFS pool \"${pool}\"..."
+            # Loop across the import until it succeeds, because the devices needed may not be discovered yet.
+            if ! poolImported "${pool}"; then
+              for trial in `seq 1 60`; do
+                poolReady "${pool}" > /dev/null && msg="$(poolImport "${pool}" 2>&1)" && break
+                sleep 1
+                echo -n .
+              done
+              echo
+              if [[ -n "$msg" ]]; then
+                echo "$msg";
+              fi
+              poolImported "${pool}" || poolImport "${pool}"  # Try one last time, e.g. to import a degraded pool.
+            fi
+            ${if isBool cfgZfs.requestEncryptionCredentials
+              then optionalString cfgZfs.requestEncryptionCredentials ''
+                zfs load-key -a
+              ''
+              else concatMapStrings (fs: ''
+                zfs load-key -- ${escapeShellArg fs}
+              '') (filter (x: datasetToPool x == pool) cfgZfs.requestEncryptionCredentials)}
+        '') rootPools)));
+
+        # Systemd in stage 1
+        systemd = mkIf config.boot.initrd.systemd.enable {
+          packages = [cfgZfs.package];
+          services = listToAttrs (map (pool: createImportService {
+            inherit pool;
+            systemd = config.boot.initrd.systemd.package;
+            force = cfgZfs.forceImportRoot;
+            prefix = "/sysroot";
+          }) rootPools);
+          targets.zfs-import.wantedBy = [ "zfs.target" ];
+          targets.zfs.wantedBy = [ "initrd.target" ];
+          extraBin = {
+            zpool = "${cfgZfs.package}/sbin/zpool";
+            zfs = "${cfgZfs.package}/sbin/zfs";
+            awk = "${pkgs.gawk}/bin/awk";
+          };
+        };
+      };
+
+      systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/zpool".source = pkgs.writeShellScript "zpool-sync-shutdown" ''
+        exec ${cfgZfs.package}/bin/zpool sync
+      '';
+      systemd.shutdownRamfs.storePaths = ["${cfgZfs.package}/bin/zpool"];
+
+      # TODO FIXME See https://github.com/NixOS/nixpkgs/pull/99386#issuecomment-798813567. To not break people's bootloader and as probably not everybody would read release notes that thoroughly add inSystem.
+      boot.loader.grub = mkIf (inInitrd || inSystem) {
+        zfsSupport = true;
+      };
+
+      services.zfs.zed.settings = {
+        ZED_EMAIL_PROG = mkIf cfgZED.enableMail (mkDefault "${pkgs.mailutils}/bin/mail");
+        PATH = lib.makeBinPath [
+          cfgZfs.package
+          pkgs.coreutils
+          pkgs.curl
+          pkgs.gawk
+          pkgs.gnugrep
+          pkgs.gnused
+          pkgs.nettools
+          pkgs.util-linux
+        ];
+      };
+
+      # ZFS already has its own scheduler. Without this my(@Artturin) computer froze for a second when I nix build something.
+      services.udev.extraRules = ''
+        ACTION=="add|change", KERNEL=="sd[a-z]*[0-9]*|mmcblk[0-9]*p[0-9]*|nvme[0-9]*n[0-9]*p[0-9]*", ENV{ID_FS_TYPE}=="zfs_member", ATTR{../queue/scheduler}="none"
+      '';
+
+      environment.etc = genAttrs
+        (map
+          (file: "zfs/zed.d/${file}")
+          [
+            "all-syslog.sh"
+            "pool_import-led.sh"
+            "resilver_finish-start-scrub.sh"
+            "statechange-led.sh"
+            "vdev_attach-led.sh"
+            "zed-functions.sh"
+            "data-notify.sh"
+            "resilver_finish-notify.sh"
+            "scrub_finish-notify.sh"
+            "statechange-notify.sh"
+            "vdev_clear-led.sh"
+          ]
+        )
+        (file: { source = "${cfgZfs.package}/etc/${file}"; })
+      // {
+        "zfs/zed.d/zed.rc".text = zedConf;
+        "zfs/zpool.d".source = "${cfgZfs.package}/etc/zfs/zpool.d/";
+      };
+
+      system.fsPackages = [ cfgZfs.package ]; # XXX: needed? zfs doesn't have (need) a fsck
+      environment.systemPackages = [ cfgZfs.package ]
+        ++ optional cfgSnapshots.enable autosnapPkg; # so the user can run the command to see flags
+
+      services.udev.packages = [ cfgZfs.package ]; # to hook zvol naming, etc.
+      systemd.packages = [ cfgZfs.package ];
+
+      # Export kernel_neon_* symbols again.
+      # This change is necessary until ZFS figures out a solution
+      # with upstream or in their build system to fill the gap for
+      # this symbol.
+      # In the meantime, we restore what was once a working piece of code
+      # in the kernel.
+      boot.kernelPatches = lib.optional (cfgZfs.removeLinuxDRM && pkgs.stdenv.hostPlatform.system == "aarch64-linux") {
+        name = "export-neon-symbols-as-gpl";
+        patch = pkgs.fetchpatch {
+          url = "https://github.com/torvalds/linux/commit/aaeca98456431a8d9382ecf48ac4843e252c07b3.patch";
+          hash = "sha256-L2g4G1tlWPIi/QRckMuHDcdWBcKpObSWSRTvbHRIwIk=";
+          revert = true;
+        };
+      };
+
+      systemd.services = let
+        createImportService' = pool: createImportService {
+          inherit pool;
+          systemd = config.systemd.package;
+          force = cfgZfs.forceImportAll;
+        };
+
+        # This forces a sync of any ZFS pools prior to poweroff, even if they're set
+        # to sync=disabled.
+        createSyncService = pool:
+          nameValuePair "zfs-sync-${pool}" {
+            description = "Sync ZFS pool \"${pool}\"";
+            wantedBy = [ "shutdown.target" ];
+            unitConfig = {
+              DefaultDependencies = false;
+            };
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+            script = ''
+              ${cfgZfs.package}/sbin/zfs set nixos:shutdown-time="$(date)" "${pool}"
+            '';
+          };
+
+        createZfsService = serv:
+          nameValuePair serv {
+            after = [ "systemd-modules-load.service" ];
+            wantedBy = [ "zfs.target" ];
+          };
+
+      in listToAttrs (map createImportService' dataPools ++
+                      map createSyncService allPools ++
+                      map createZfsService [ "zfs-mount" "zfs-share" "zfs-zed" ]);
+
+      systemd.targets.zfs-import.wantedBy = [ "zfs.target" ];
+
+      systemd.targets.zfs.wantedBy = [ "multi-user.target" ];
+    })
+
+    (mkIf (cfgZfs.enabled && cfgExpandOnBoot != "disabled") {
+      systemd.services."zpool-expand@" = {
+        description = "Expand ZFS pools";
+        after = [ "zfs.target" ];
+
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+
+        scriptArgs = "%i";
+        path = [ cfgZfs.package ];
+
+        script =  ''
+          pool=$1
+
+          echo "Expanding all devices for $pool."
+
+          ${pkgs.zpool-auto-expand-partitions}/bin/zpool_part_disks --automatically-grow "$pool"
+        '';
+      };
+
+      systemd.services."zpool-expand-pools" =
+        let
+          # Create a string, to be interpolated in a bash script
+          # which enumerates all of the pools to expand.
+          # If the `pools` option is `true`, we want to dynamically
+          # expand every pool. Otherwise we want to enumerate
+          # just the specifically provided list of pools.
+          poolListProvider = if cfgExpandOnBoot == "all"
+            then "$(zpool list -H -o name)"
+            else lib.escapeShellArgs cfgExpandOnBoot;
+        in
+        {
+          description = "Expand specified ZFS pools";
+          wantedBy = [ "default.target" ];
+          after = [ "zfs.target" ];
+
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+          };
+
+          path = lib.optionals (cfgExpandOnBoot == "all") [ cfgZfs.package ];
+
+          script = ''
+            for pool in ${poolListProvider}; do
+              systemctl start --no-block "zpool-expand@$pool"
+            done
+          '';
+        };
+    })
+
+    (mkIf (cfgZfs.enabled && cfgSnapshots.enable) {
+      systemd.services = let
+                           descr = name: if name == "frequent" then "15 mins"
+                                    else if name == "hourly" then "hour"
+                                    else if name == "daily" then "day"
+                                    else if name == "weekly" then "week"
+                                    else if name == "monthly" then "month"
+                                    else throw "unknown snapshot name";
+                           numSnapshots = name: builtins.getAttr name cfgSnapshots;
+                         in builtins.listToAttrs (map (snapName:
+                              {
+                                name = "zfs-snapshot-${snapName}";
+                                value = {
+                                  description = "ZFS auto-snapshotting every ${descr snapName}";
+                                  after = [ "zfs-import.target" ];
+                                  serviceConfig = {
+                                    Type = "oneshot";
+                                    ExecStart = "${zfsAutoSnap} ${cfgSnapFlags} ${snapName} ${toString (numSnapshots snapName)}";
+                                  };
+                                  restartIfChanged = false;
+                                };
+                              }) snapshotNames);
+
+      systemd.timers = let
+                         timer = name: if name == "frequent" then "*:0,15,30,45" else name;
+                       in builtins.listToAttrs (map (snapName:
+                            {
+                              name = "zfs-snapshot-${snapName}";
+                              value = {
+                                wantedBy = [ "timers.target" ];
+                                timerConfig = {
+                                  OnCalendar = timer snapName;
+                                  Persistent = "yes";
+                                };
+                              };
+                            }) snapshotNames);
+    })
+
+    (mkIf (cfgZfs.enabled && cfgScrub.enable) {
+      systemd.services.zfs-scrub = {
+        description = "ZFS pools scrubbing";
+        after = [ "zfs-import.target" ];
+        serviceConfig = {
+          Type = "simple";
+        };
+        script = ''
+          ${cfgZfs.package}/bin/zpool scrub -w ${
+            if cfgScrub.pools != [] then
+              (concatStringsSep " " cfgScrub.pools)
+            else
+              "$(${cfgZfs.package}/bin/zpool list -H -o name)"
+            }
+        '';
+      };
+
+      systemd.timers.zfs-scrub = {
+        wantedBy = [ "timers.target" ];
+        after = [ "multi-user.target" ]; # Apparently scrubbing before boot is complete hangs the system? #53583
+        timerConfig = {
+          OnCalendar = cfgScrub.interval;
+          Persistent = "yes";
+        };
+      };
+    })
+
+    (mkIf (cfgZfs.enabled && cfgTrim.enable) {
+      systemd.services.zpool-trim = {
+        description = "ZFS pools trim";
+        after = [ "zfs-import.target" ];
+        path = [ cfgZfs.package ];
+        startAt = cfgTrim.interval;
+        # By default we ignore errors returned by the trim command, in case:
+        # - HDDs are mixed with SSDs
+        # - There is a SSDs in a pool that is currently trimmed.
+        # - There are only HDDs and we would set the system in a degraded state
+        serviceConfig.ExecStart = "${pkgs.runtimeShell} -c 'for pool in $(zpool list -H -o name); do zpool trim $pool;  done || true' ";
+      };
+
+      systemd.timers.zpool-trim.timerConfig.Persistent = "yes";
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/lvm.nix b/nixpkgs/nixos/modules/tasks/lvm.nix
new file mode 100644
index 000000000000..325a5aa45b1e
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/lvm.nix
@@ -0,0 +1,144 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.services.lvm;
+in {
+  options.services.lvm = {
+    enable = mkEnableOption (lib.mdDoc "lvm2") // {
+      default = true;
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.lvm2;
+      internal = true;
+      defaultText = literalExpression "pkgs.lvm2";
+      description = lib.mdDoc ''
+        This option allows you to override the LVM package that's used on the system
+        (udev rules, tmpfiles, systemd services).
+        Defaults to pkgs.lvm2, pkgs.lvm2_dmeventd if dmeventd or pkgs.lvm2_vdo if vdo is enabled.
+      '';
+    };
+    dmeventd.enable = mkEnableOption (lib.mdDoc "the LVM dmevent daemon");
+    boot.thin.enable = mkEnableOption (lib.mdDoc "support for booting from ThinLVs");
+    boot.vdo.enable = mkEnableOption (lib.mdDoc "support for booting from VDOLVs");
+  };
+
+  options.boot.initrd.services.lvm.enable = mkEnableOption (lib.mdDoc "booting from LVM2 in the initrd") // {
+    description = lib.mdDoc ''
+      *This will only be used when systemd is used in stage 1.*
+
+      Whether to enable booting from LVM2 in the initrd.
+    '';
+  };
+
+  config = mkMerge [
+    ({
+      # minimal configuration file to make lvmconfig/lvm2-activation-generator happy
+      environment.etc."lvm/lvm.conf".text = "config {}";
+    })
+    (mkIf cfg.enable {
+      systemd.tmpfiles.packages = [ cfg.package.out ];
+      environment.systemPackages = [ cfg.package ];
+      systemd.packages = [ cfg.package ];
+
+      services.udev.packages = [ cfg.package.out ];
+    })
+    (mkIf config.boot.initrd.services.lvm.enable {
+      # We need lvm2 for the device-mapper rules
+      boot.initrd.services.udev.packages = [ cfg.package ];
+      # The device-mapper rules want to call tools from lvm2
+      boot.initrd.systemd.initrdBin = [ cfg.package ];
+      boot.initrd.services.udev.binPackages = [ cfg.package ];
+    })
+    (mkIf cfg.dmeventd.enable {
+      systemd.sockets."dm-event".wantedBy = [ "sockets.target" ];
+      systemd.services."lvm2-monitor".wantedBy = [ "sysinit.target" ];
+
+      environment.etc."lvm/lvm.conf".text = ''
+        dmeventd/executable = "${cfg.package}/bin/dmeventd"
+      '';
+      services.lvm.package = mkDefault pkgs.lvm2_dmeventd;
+    })
+    (mkIf cfg.boot.thin.enable {
+      boot.initrd = {
+        kernelModules = [ "dm-snapshot" "dm-thin-pool" ];
+
+        systemd.initrdBin = lib.mkIf config.boot.initrd.services.lvm.enable [ pkgs.thin-provisioning-tools ];
+
+        extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+          for BIN in ${pkgs.thin-provisioning-tools}/bin/*; do
+            copy_bin_and_libs $BIN
+          done
+        '';
+
+        extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable) ''
+          ls ${pkgs.thin-provisioning-tools}/bin/ | grep -v pdata_tools | while read BIN; do
+            $out/bin/$(basename $BIN) --help > /dev/null
+          done
+        '';
+      };
+
+      environment.etc."lvm/lvm.conf".text = concatMapStringsSep "\n"
+        (bin: "global/${bin}_executable = ${pkgs.thin-provisioning-tools}/bin/${bin}")
+        [ "thin_check" "thin_dump" "thin_repair" "cache_check" "cache_dump" "cache_repair" ];
+
+      environment.systemPackages = [ pkgs.thin-provisioning-tools ];
+    })
+    (mkIf cfg.boot.vdo.enable {
+      boot = {
+        initrd = {
+          kernelModules = [ "kvdo" ];
+
+          systemd.initrdBin = lib.mkIf config.boot.initrd.services.lvm.enable [ pkgs.vdo ];
+
+          extraUtilsCommands = mkIf (!config.boot.initrd.systemd.enable)''
+            ls ${pkgs.vdo}/bin/ | while read BIN; do
+              copy_bin_and_libs ${pkgs.vdo}/bin/$BIN
+            done
+            substituteInPlace $out/bin/vdorecover --replace "${pkgs.bash}/bin/bash" "/bin/sh"
+            substituteInPlace $out/bin/adaptLVMVDO.sh --replace "${pkgs.bash}/bin/bash" "/bin/sh"
+          '';
+
+          extraUtilsCommandsTest = mkIf (!config.boot.initrd.systemd.enable)''
+            ls ${pkgs.vdo}/bin/ | grep -vE '(adaptLVMVDO|vdorecover)' | while read BIN; do
+              $out/bin/$(basename $BIN) --help > /dev/null
+            done
+          '';
+        };
+        extraModulePackages = [ config.boot.kernelPackages.kvdo ];
+      };
+
+      services.lvm.package = mkOverride 999 pkgs.lvm2_vdo;  # this overrides mkDefault
+
+      environment.systemPackages = [ pkgs.vdo ];
+    })
+    (mkIf (cfg.dmeventd.enable || cfg.boot.thin.enable) {
+      boot.initrd.systemd.contents."/etc/lvm/lvm.conf".text = optionalString (config.boot.initrd.services.lvm.enable && cfg.boot.thin.enable) (concatMapStringsSep "\n"
+          (bin: "global/${bin}_executable = /bin/${bin}")
+          [ "thin_check" "thin_dump" "thin_repair" "cache_check" "cache_dump" "cache_repair" ]
+        ) + "\n" + optionalString cfg.dmeventd.enable ''
+          dmeventd/executable = /bin/false
+          activation/monitoring = 0
+        '';
+
+      boot.initrd.preLVMCommands = mkIf (!config.boot.initrd.systemd.enable) ''
+          mkdir -p /etc/lvm
+          cat << EOF >> /etc/lvm/lvm.conf
+          ${optionalString cfg.boot.thin.enable (
+            concatMapStringsSep "\n"
+              (bin: "global/${bin}_executable = $(command -v ${bin})")
+              [ "thin_check" "thin_dump" "thin_repair" "cache_check" "cache_dump" "cache_repair" ]
+            )
+          }
+          ${optionalString cfg.dmeventd.enable ''
+            dmeventd/executable = "$(command -v false)"
+            activation/monitoring = 0
+          ''}
+          EOF
+      '';
+    })
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/network-interfaces-scripted.nix b/nixpkgs/nixos/modules/tasks/network-interfaces-scripted.nix
new file mode 100644
index 000000000000..e1ac7f24cb32
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/network-interfaces-scripted.nix
@@ -0,0 +1,628 @@
+{ config, lib, pkgs, utils, ... }:
+
+with utils;
+with lib;
+
+let
+
+  cfg = config.networking;
+  interfaces = attrValues cfg.interfaces;
+
+  slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
+    ++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
+    ++ concatMap (i: attrNames (filterAttrs (_: config: config.type != "internal") i.interfaces)) (attrValues cfg.vswitches)
+    ++ concatMap (i: [i.interface]) (attrValues cfg.macvlans)
+    ++ concatMap (i: [i.interface]) (attrValues cfg.vlans);
+
+  # We must escape interfaces due to the systemd interpretation
+  subsystemDevice = interface:
+    "sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
+
+  interfaceIps = i:
+    i.ipv4.addresses
+    ++ optionals cfg.enableIPv6 i.ipv6.addresses;
+
+  destroyBond = i: ''
+    while true; do
+      UPDATED=1
+      SLAVES=$(ip link | grep 'master ${i}' | awk -F: '{print $2}')
+      for I in $SLAVES; do
+        UPDATED=0
+        ip link set dev "$I" nomaster
+      done
+      [ "$UPDATED" -eq "1" ] && break
+    done
+    ip link set dev "${i}" down 2>/dev/null || true
+    ip link del dev "${i}" 2>/dev/null || true
+  '';
+
+  # warn that these attributes are deprecated (2017-2-2)
+  # Should be removed in the release after next
+  bondDeprecation = rec {
+    deprecated = [ "lacp_rate" "miimon" "mode" "xmit_hash_policy" ];
+    filterDeprecated = bond: (filterAttrs (attrName: attr:
+                         elem attrName deprecated && attr != null) bond);
+  };
+
+  bondWarnings =
+    let oneBondWarnings = bondName: bond:
+          mapAttrsToList (bondText bondName) (bondDeprecation.filterDeprecated bond);
+        bondText = bondName: optName: _:
+          "${bondName}.${optName} is deprecated, use ${bondName}.driverOptions";
+    in {
+      warnings = flatten (mapAttrsToList oneBondWarnings cfg.bonds);
+    };
+
+  normalConfig = {
+    systemd.network.links = let
+      createNetworkLink = i: nameValuePair "40-${i.name}" {
+        matchConfig.OriginalName = i.name;
+        linkConfig = optionalAttrs (i.macAddress != null) {
+          MACAddress = i.macAddress;
+        } // optionalAttrs (i.mtu != null) {
+          MTUBytes = toString i.mtu;
+        };
+      };
+    in listToAttrs (map createNetworkLink interfaces);
+    systemd.services =
+      let
+
+        deviceDependency = dev:
+          # Use systemd service if we manage device creation, else
+          # trust udev when not in a container
+          if (hasAttr dev (filterAttrs (k: v: v.virtual) cfg.interfaces)) ||
+             (hasAttr dev cfg.bridges) ||
+             (hasAttr dev cfg.bonds) ||
+             (hasAttr dev cfg.macvlans) ||
+             (hasAttr dev cfg.sits) ||
+             (hasAttr dev cfg.vlans) ||
+             (hasAttr dev cfg.vswitches)
+          then [ "${dev}-netdev.service" ]
+          else optional (dev != null && dev != "lo" && !config.boot.isContainer) (subsystemDevice dev);
+
+        hasDefaultGatewaySet = (cfg.defaultGateway != null && cfg.defaultGateway.address != "")
+                            || (cfg.enableIPv6 && cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "");
+
+        needNetworkSetup = cfg.resolvconf.enable || cfg.defaultGateway != null || cfg.defaultGateway6 != null;
+
+        networkLocalCommands = lib.mkIf needNetworkSetup {
+          after = [ "network-setup.service" ];
+          bindsTo = [ "network-setup.service" ];
+        };
+
+        networkSetup = lib.mkIf needNetworkSetup
+          { description = "Networking Setup";
+
+            after = [ "network-pre.target" "systemd-udevd.service" "systemd-sysctl.service" ];
+            before = [ "network.target" "shutdown.target" ];
+            wants = [ "network.target" ];
+            # exclude bridges from the partOf relationship to fix container networking bug #47210
+            partOf = map (i: "network-addresses-${i.name}.service") (filter (i: !(hasAttr i.name cfg.bridges)) interfaces);
+            conflicts = [ "shutdown.target" ];
+            wantedBy = [ "multi-user.target" ] ++ optional hasDefaultGatewaySet "network-online.target";
+
+            unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+
+            path = [ pkgs.iproute2 ];
+
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+
+            unitConfig.DefaultDependencies = false;
+
+            script =
+              ''
+                ${optionalString config.networking.resolvconf.enable ''
+                  # Set the static DNS configuration, if given.
+                  ${pkgs.openresolv}/sbin/resolvconf -m 1 -a static <<EOF
+                  ${optionalString (cfg.nameservers != [] && cfg.domain != null) ''
+                    domain ${cfg.domain}
+                  ''}
+                  ${optionalString (cfg.search != []) ("search " + concatStringsSep " " cfg.search)}
+                  ${flip concatMapStrings cfg.nameservers (ns: ''
+                    nameserver ${ns}
+                  '')}
+                  EOF
+                ''}
+
+                # Set the default gateway.
+                ${optionalString (cfg.defaultGateway != null && cfg.defaultGateway.address != "") ''
+                  ${optionalString (cfg.defaultGateway.interface != null) ''
+                    ip route replace ${cfg.defaultGateway.address} dev ${cfg.defaultGateway.interface} ${optionalString (cfg.defaultGateway.metric != null)
+                      "metric ${toString cfg.defaultGateway.metric}"
+                    } proto static
+                  ''}
+                  ip route replace default ${optionalString (cfg.defaultGateway.metric != null)
+                      "metric ${toString cfg.defaultGateway.metric}"
+                    } via "${cfg.defaultGateway.address}" ${
+                    optionalString (cfg.defaultGatewayWindowSize != null)
+                      "window ${toString cfg.defaultGatewayWindowSize}"} ${
+                    optionalString (cfg.defaultGateway.interface != null)
+                      "dev ${cfg.defaultGateway.interface}"} proto static
+                ''}
+                ${optionalString (cfg.defaultGateway6 != null && cfg.defaultGateway6.address != "") ''
+                  ${optionalString (cfg.defaultGateway6.interface != null) ''
+                    ip -6 route replace ${cfg.defaultGateway6.address} dev ${cfg.defaultGateway6.interface} ${optionalString (cfg.defaultGateway6.metric != null)
+                      "metric ${toString cfg.defaultGateway6.metric}"
+                    } proto static
+                  ''}
+                  ip -6 route replace default ${optionalString (cfg.defaultGateway6.metric != null)
+                      "metric ${toString cfg.defaultGateway6.metric}"
+                    } via "${cfg.defaultGateway6.address}" ${
+                    optionalString (cfg.defaultGatewayWindowSize != null)
+                      "window ${toString cfg.defaultGatewayWindowSize}"} ${
+                    optionalString (cfg.defaultGateway6.interface != null)
+                      "dev ${cfg.defaultGateway6.interface}"} proto static
+                ''}
+              '';
+          };
+
+        # For each interface <foo>, create a job ‘network-addresses-<foo>.service"
+        # that performs static address configuration.  It has a "wants"
+        # dependency on ‘<foo>.service’, which is supposed to create
+        # the interface and need not exist (i.e. for hardware
+        # interfaces).  It has a binds-to dependency on the actual
+        # network device, so it only gets started after the interface
+        # has appeared, and it's stopped when the interface
+        # disappears.
+        configureAddrs = i:
+          let
+            ips = interfaceIps i;
+          in
+          nameValuePair "network-addresses-${i.name}"
+          { description = "Address configuration of ${i.name}";
+            wantedBy = [
+              "network-setup.service"
+              "network.target"
+            ];
+            # order before network-setup because the routes that are configured
+            # there may need ip addresses configured
+            before = [ "network-setup.service" ];
+            bindsTo = deviceDependency i.name;
+            after = [ "network-pre.target" ] ++ (deviceDependency i.name);
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            # Restart rather than stop+start this unit to prevent the
+            # network from dying during switch-to-configuration.
+            stopIfChanged = false;
+            path = [ pkgs.iproute2 ];
+            script =
+              ''
+                state="/run/nixos/network/addresses/${i.name}"
+                mkdir -p $(dirname "$state")
+
+                ip link set dev "${i.name}" up
+
+                ${flip concatMapStrings ips (ip:
+                  let
+                    cidr = "${ip.address}/${toString ip.prefixLength}";
+                  in
+                  ''
+                    echo "${cidr}" >> $state
+                    echo -n "adding address ${cidr}... "
+                    if out=$(ip addr add "${cidr}" dev "${i.name}" 2>&1); then
+                      echo "done"
+                    elif ! echo "$out" | grep "File exists" >/dev/null 2>&1; then
+                      echo "'ip addr add "${cidr}" dev "${i.name}"' failed: $out"
+                      exit 1
+                    fi
+                  ''
+                )}
+
+                state="/run/nixos/network/routes/${i.name}"
+                mkdir -p $(dirname "$state")
+
+                ${flip concatMapStrings (i.ipv4.routes ++ i.ipv6.routes) (route:
+                  let
+                    cidr = "${route.address}/${toString route.prefixLength}";
+                    via = optionalString (route.via != null) ''via "${route.via}"'';
+                    options = concatStrings (mapAttrsToList (name: val: "${name} ${val} ") route.options);
+                    type = toString route.type;
+                  in
+                  ''
+                     echo "${cidr}" >> $state
+                     echo -n "adding route ${cidr}... "
+                     if out=$(ip route add ${type} "${cidr}" ${options} ${via} dev "${i.name}" proto static 2>&1); then
+                       echo "done"
+                     elif ! echo "$out" | grep "File exists" >/dev/null 2>&1; then
+                       echo "'ip route add ${type} "${cidr}" ${options} ${via} dev "${i.name}"' failed: $out"
+                       exit 1
+                     fi
+                  ''
+                )}
+              '';
+            preStop = ''
+              state="/run/nixos/network/routes/${i.name}"
+              if [ -e "$state" ]; then
+                while read cidr; do
+                  echo -n "deleting route $cidr... "
+                  ip route del "$cidr" dev "${i.name}" >/dev/null 2>&1 && echo "done" || echo "failed"
+                done < "$state"
+                rm -f "$state"
+              fi
+
+              state="/run/nixos/network/addresses/${i.name}"
+              if [ -e "$state" ]; then
+                while read cidr; do
+                  echo -n "deleting address $cidr... "
+                  ip addr del "$cidr" dev "${i.name}" >/dev/null 2>&1 && echo "done" || echo "failed"
+                done < "$state"
+                rm -f "$state"
+              fi
+            '';
+          };
+
+        createTunDevice = i: nameValuePair "${i.name}-netdev"
+          { description = "Virtual Network Interface ${i.name}";
+            bindsTo = optional (!config.boot.isContainer) "dev-net-tun.device";
+            after = optional (!config.boot.isContainer) "dev-net-tun.device" ++ [ "network-pre.target" ];
+            wantedBy = [ "network-setup.service" (subsystemDevice i.name) ];
+            partOf = [ "network-setup.service" ];
+            before = [ "network-setup.service" ];
+            path = [ pkgs.iproute2 ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+            };
+            script = ''
+              ip tuntap add dev "${i.name}" mode "${i.virtualType}" user "${i.virtualOwner}"
+            '';
+            postStop = ''
+              ip link del dev ${i.name} || true
+            '';
+          };
+
+        createBridgeDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = concatLists (map deviceDependency v.interfaces);
+          in
+          { description = "Bridge Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps ++ optional v.rstp "mstpd.service";
+            partOf = [ "network-setup.service" ] ++ optional v.rstp "mstpd.service";
+            after = [ "network-pre.target" ] ++ deps ++ optional v.rstp "mstpd.service"
+              ++ map (i: "network-addresses-${i}.service") v.interfaces;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # Remove Dead Interfaces
+              echo "Removing old bridge ${n}..."
+              ip link show dev "${n}" >/dev/null 2>&1 && ip link del dev "${n}"
+
+              echo "Adding bridge ${n}..."
+              ip link add name "${n}" type bridge
+
+              # Enslave child interfaces
+              ${flip concatMapStrings v.interfaces (i: ''
+                ip link set dev "${i}" master "${n}"
+                ip link set dev "${i}" up
+              '')}
+              # Save list of enslaved interfaces
+              echo "${flip concatMapStrings v.interfaces (i: ''
+                ${i}
+              '')}" > /run/${n}.interfaces
+
+              ${optionalString config.virtualisation.libvirtd.enable ''
+                  # Enslave dynamically added interfaces which may be lost on nixos-rebuild
+                  #
+                  # if `libvirtd.service` is not running, do not use `virsh` which would try activate it via 'libvirtd.socket' and thus start it out-of-order.
+                  # `libvirtd.service` will set up bridge interfaces when it will start normally.
+                  #
+                  if /run/current-system/systemd/bin/systemctl --quiet is-active 'libvirtd.service'; then
+                    for uri in qemu:///system lxc:///; do
+                      for dom in $(${pkgs.libvirt}/bin/virsh -c $uri list --name); do
+                        ${pkgs.libvirt}/bin/virsh -c $uri dumpxml "$dom" | \
+                        ${pkgs.xmlstarlet}/bin/xmlstarlet sel -t -m "//domain/devices/interface[@type='bridge'][source/@bridge='${n}'][target/@dev]" -v "concat('ip link set dev ',target/@dev,' master ',source/@bridge,';')" | \
+                        ${pkgs.bash}/bin/bash
+                      done
+                    done
+                  fi
+                ''}
+
+              # Enable stp on the interface
+              ${optionalString v.rstp ''
+                echo 2 >/sys/class/net/${n}/bridge/stp_state
+              ''}
+
+              ip link set dev "${n}" up
+            '';
+            postStop = ''
+              ip link set dev "${n}" down || true
+              ip link del dev "${n}" || true
+              rm -f /run/${n}.interfaces
+            '';
+            reload = ''
+              # Un-enslave child interfaces (old list of interfaces)
+              for interface in `cat /run/${n}.interfaces`; do
+                ip link set dev "$interface" nomaster up
+              done
+
+              # Enslave child interfaces (new list of interfaces)
+              ${flip concatMapStrings v.interfaces (i: ''
+                ip link set dev "${i}" master "${n}"
+                ip link set dev "${i}" up
+              '')}
+              # Save list of enslaved interfaces
+              echo "${flip concatMapStrings v.interfaces (i: ''
+                ${i}
+              '')}" > /run/${n}.interfaces
+
+              # (Un-)set stp on the bridge
+              echo ${if v.rstp then "2" else "0"} > /sys/class/net/${n}/bridge/stp_state
+            '';
+            reloadIfChanged = true;
+          });
+
+        createVswitchDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = concatLists (map deviceDependency (attrNames (filterAttrs (_: config: config.type != "internal") v.interfaces)));
+            internalConfigs = map (i: "network-addresses-${i}.service") (attrNames (filterAttrs (_: config: config.type == "internal") v.interfaces));
+            ofRules = pkgs.writeText "vswitch-${n}-openFlowRules" v.openFlowRules;
+          in
+          { description = "Open vSwitch Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ] ++ internalConfigs;
+            # before = [ "network-setup.service" ];
+            # should work without internalConfigs dependencies because address/link configuration depends
+            # on the device, which is created by ovs-vswitchd with type=internal, but it does not...
+            before = [ "network-setup.service" ] ++ internalConfigs;
+            partOf = [ "network-setup.service" ]; # shutdown the bridge when network is shutdown
+            bindsTo = [ "ovs-vswitchd.service" ]; # requires ovs-vswitchd to be alive at all times
+            after = [ "network-pre.target" "ovs-vswitchd.service" ] ++ deps; # start switch after physical interfaces and vswitch daemon
+            wants = deps; # if one or more interface fails, the switch should continue to run
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 config.virtualisation.vswitch.package ];
+            preStart = ''
+              echo "Resetting Open vSwitch ${n}..."
+              ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
+                        -- set bridge ${n} protocols=${concatStringsSep "," v.supportedOpenFlowVersions}
+            '';
+            script = ''
+              echo "Configuring Open vSwitch ${n}..."
+              ovs-vsctl ${concatStrings (mapAttrsToList (name: config: " -- add-port ${n} ${name}" + optionalString (config.vlan != null) " tag=${toString config.vlan}") v.interfaces)} \
+                ${concatStrings (mapAttrsToList (name: config: optionalString (config.type != null) " -- set interface ${name} type=${config.type}") v.interfaces)} \
+                ${concatMapStrings (x: " -- set-controller ${n} " + x)  v.controllers} \
+                ${concatMapStrings (x: " -- " + x) (splitString "\n" v.extraOvsctlCmds)}
+
+
+              echo "Adding OpenFlow rules for Open vSwitch ${n}..."
+              ovs-ofctl --protocols=${v.openFlowVersion} add-flows ${n} ${ofRules}
+            '';
+            postStop = ''
+              echo "Cleaning Open vSwitch ${n}"
+              echo "Shutting down internal ${n} interface"
+              ip link set dev ${n} down || true
+              echo "Deleting flows for ${n}"
+              ovs-ofctl --protocols=${v.openFlowVersion} del-flows ${n} || true
+              echo "Deleting Open vSwitch ${n}"
+              ovs-vsctl --if-exists del-br ${n} || true
+            '';
+          });
+
+        createBondDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = concatLists (map deviceDependency v.interfaces);
+          in
+          { description = "Bond Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps
+              ++ map (i: "network-addresses-${i}.service") v.interfaces;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 pkgs.gawk ];
+            script = ''
+              echo "Destroying old bond ${n}..."
+              ${destroyBond n}
+
+              echo "Creating new bond ${n}..."
+              ip link add name "${n}" type bond \
+              ${let opts = (mapAttrs (const toString)
+                             (bondDeprecation.filterDeprecated v))
+                           // v.driverOptions;
+                 in concatStringsSep "\n"
+                      (mapAttrsToList (set: val: "  ${set} ${val} \\") opts)}
+
+              # !!! There must be a better way to wait for the interface
+              while [ ! -d "/sys/class/net/${n}" ]; do sleep 0.1; done;
+
+              # Bring up the bond and enslave the specified interfaces
+              ip link set dev "${n}" up
+              ${flip concatMapStrings v.interfaces (i: ''
+                ip link set dev "${i}" down
+                ip link set dev "${i}" master "${n}"
+              '')}
+            '';
+            postStop = destroyBond n;
+          });
+
+        createMacvlanDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = deviceDependency v.interface;
+          in
+          { description = "Vlan Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # Remove Dead Interfaces
+              ip link show dev "${n}" >/dev/null 2>&1 && ip link delete dev "${n}"
+              ip link add link "${v.interface}" name "${n}" type macvlan \
+                ${optionalString (v.mode != null) "mode ${v.mode}"}
+              ip link set dev "${n}" up
+            '';
+            postStop = ''
+              ip link delete dev "${n}" || true
+            '';
+          });
+
+        createFouEncapsulation = n: v: nameValuePair "${n}-fou-encap"
+          (let
+            # if we have a device to bind to we can wait for its addresses to be
+            # configured, otherwise external sequencing is required.
+            deps = optionals (v.local != null && v.local.dev != null)
+              (deviceDependency v.local.dev ++ [ "network-addresses-${v.local.dev}.service" ]);
+            fouSpec = "port ${toString v.port} ${
+              if v.protocol != null then "ipproto ${toString v.protocol}" else "gue"
+            } ${
+              optionalString (v.local != null) "local ${escapeShellArg v.local.address} ${
+                optionalString (v.local.dev != null) "dev ${escapeShellArg v.local.dev}"
+              }"
+            }";
+          in
+          { description = "FOU endpoint ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # always remove previous incarnation since show can't filter
+              ip fou del ${fouSpec} >/dev/null 2>&1 || true
+              ip fou add ${fouSpec}
+            '';
+            postStop = ''
+              ip fou del ${fouSpec} || true
+            '';
+          });
+
+        createSitDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = deviceDependency v.dev;
+          in
+          { description = "6-to-4 Tunnel Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # Remove Dead Interfaces
+              ip link show dev "${n}" >/dev/null 2>&1 && ip link delete dev "${n}"
+              ip link add name "${n}" type sit \
+                ${optionalString (v.remote != null) "remote \"${v.remote}\""} \
+                ${optionalString (v.local != null) "local \"${v.local}\""} \
+                ${optionalString (v.ttl != null) "ttl ${toString v.ttl}"} \
+                ${optionalString (v.dev != null) "dev \"${v.dev}\""} \
+                ${optionalString (v.encapsulation != null)
+                  "encap ${v.encapsulation.type} encap-dport ${toString v.encapsulation.port} ${
+                    optionalString (v.encapsulation.sourcePort != null)
+                      "encap-sport ${toString v.encapsulation.sourcePort}"
+                  }"}
+              ip link set dev "${n}" up
+            '';
+            postStop = ''
+              ip link delete dev "${n}" || true
+            '';
+          });
+
+        createGreDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = deviceDependency v.dev;
+            ttlarg = if lib.hasPrefix "ip6" v.type then "hoplimit" else "ttl";
+          in
+          { description = "GRE Tunnel Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # Remove Dead Interfaces
+              ip link show dev "${n}" >/dev/null 2>&1 && ip link delete dev "${n}"
+              ip link add name "${n}" type ${v.type} \
+                ${optionalString (v.remote != null) "remote \"${v.remote}\""} \
+                ${optionalString (v.local != null) "local \"${v.local}\""} \
+                ${optionalString (v.ttl != null) "${ttlarg} ${toString v.ttl}"} \
+                ${optionalString (v.dev != null) "dev \"${v.dev}\""}
+              ip link set dev "${n}" up
+            '';
+            postStop = ''
+              ip link delete dev "${n}" || true
+            '';
+          });
+
+        createVlanDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = deviceDependency v.interface;
+          in
+          { description = "Vlan Interface ${n}";
+            wantedBy = [ "network-setup.service" (subsystemDevice n) ];
+            bindsTo = deps;
+            partOf = [ "network-setup.service" ];
+            after = [ "network-pre.target" ] ++ deps;
+            before = [ "network-setup.service" ];
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 ];
+            script = ''
+              # Remove Dead Interfaces
+              ip link show dev "${n}" >/dev/null 2>&1 && ip link delete dev "${n}"
+              ip link add link "${v.interface}" name "${n}" type vlan id "${toString v.id}"
+
+              # We try to bring up the logical VLAN interface. If the master
+              # interface the logical interface is dependent upon is not up yet we will
+              # fail to immediately bring up the logical interface. The resulting logical
+              # interface will brought up later when the master interface is up.
+              ip link set dev "${n}" up || true
+            '';
+            postStop = ''
+              ip link delete dev "${n}" || true
+            '';
+          });
+
+      in listToAttrs (
+           map configureAddrs interfaces ++
+           map createTunDevice (filter (i: i.virtual) interfaces))
+         // mapAttrs' createBridgeDevice cfg.bridges
+         // mapAttrs' createVswitchDevice cfg.vswitches
+         // mapAttrs' createBondDevice cfg.bonds
+         // mapAttrs' createMacvlanDevice cfg.macvlans
+         // mapAttrs' createFouEncapsulation cfg.fooOverUDP
+         // mapAttrs' createSitDevice cfg.sits
+         // mapAttrs' createGreDevice cfg.greTunnels
+         // mapAttrs' createVlanDevice cfg.vlans
+         // {
+           network-setup = networkSetup;
+           network-local-commands = networkLocalCommands;
+         };
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="tun", TAG+="systemd"
+      '';
+
+
+  };
+
+in
+
+{
+  config = mkMerge [
+    bondWarnings
+    (mkIf (!cfg.useNetworkd) normalConfig)
+    { # Ensure slave interfaces are brought up
+      networking.interfaces = genAttrs slaves (i: {});
+    }
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/network-interfaces-systemd.nix b/nixpkgs/nixos/modules/tasks/network-interfaces-systemd.nix
new file mode 100644
index 000000000000..2009c9a7e6e2
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/network-interfaces-systemd.nix
@@ -0,0 +1,462 @@
+{ config, lib, utils, pkgs, ... }:
+
+with utils;
+with lib;
+
+let
+
+  cfg = config.networking;
+  interfaces = attrValues cfg.interfaces;
+
+  interfaceIps = i:
+    i.ipv4.addresses
+    ++ optionals cfg.enableIPv6 i.ipv6.addresses;
+
+  interfaceRoutes = i:
+    i.ipv4.routes
+    ++ optionals cfg.enableIPv6 i.ipv6.routes;
+
+  dhcpStr = useDHCP: if useDHCP == true || useDHCP == null then "yes" else "no";
+
+  slaves =
+    concatLists (map (bond: bond.interfaces) (attrValues cfg.bonds))
+    ++ concatLists (map (bridge: bridge.interfaces) (attrValues cfg.bridges))
+    ++ map (sit: sit.dev) (attrValues cfg.sits)
+    ++ map (gre: gre.dev) (attrValues cfg.greTunnels)
+    ++ map (vlan: vlan.interface) (attrValues cfg.vlans)
+    # add dependency to physical or independently created vswitch member interface
+    # TODO: warn the user that any address configured on those interfaces will be useless
+    ++ concatMap (i: attrNames (filterAttrs (_: config: config.type != "internal") i.interfaces)) (attrValues cfg.vswitches);
+
+  defaultGateways = mkMerge (forEach [ cfg.defaultGateway cfg.defaultGateway6 ] (gateway:
+    optionalAttrs (gateway != null && gateway.interface != null) {
+      networks."40-${gateway.interface}" = {
+        matchConfig.Name = gateway.interface;
+        routes = [{
+          routeConfig = {
+            Gateway = gateway.address;
+          } // optionalAttrs (gateway.metric != null) {
+            Metric = gateway.metric;
+          };
+        }];
+      };
+    }
+  ));
+
+  genericDhcpNetworks = initrd: mkIf cfg.useDHCP {
+    networks."99-ethernet-default-dhcp" = {
+      # We want to match physical ethernet interfaces as commonly
+      # found on laptops, desktops and servers, to provide an
+      # "out-of-the-box" setup that works for common cases.  This
+      # heuristic isn't perfect (it could match interfaces with
+      # custom names that _happen_ to start with en or eth), but
+      # should be good enough to make the common case easy and can
+      # be overridden on a case-by-case basis using
+      # higher-priority networks or by disabling useDHCP.
+
+      # Type=ether matches veth interfaces as well, and this is
+      # more likely to result in interfaces being configured to
+      # use DHCP when they shouldn't.
+
+      matchConfig.Name = ["en*" "eth*"];
+      DHCP = "yes";
+      networkConfig.IPv6PrivacyExtensions = "kernel";
+    };
+    networks."99-wireless-client-dhcp" = {
+      # Like above, but this is much more likely to be correct.
+      matchConfig.WLANInterfaceType = "station";
+      DHCP = "yes";
+      networkConfig.IPv6PrivacyExtensions = "kernel";
+      # We also set the route metric to one more than the default
+      # of 1024, so that Ethernet is preferred if both are
+      # available.
+      dhcpV4Config.RouteMetric = 1025;
+      ipv6AcceptRAConfig.RouteMetric = 1025;
+    };
+  };
+
+
+  interfaceNetworks = mkMerge (forEach interfaces (i: {
+    netdevs = mkIf i.virtual ({
+      "40-${i.name}" = {
+        netdevConfig = {
+          Name = i.name;
+          Kind = i.virtualType;
+        };
+        "${i.virtualType}Config" = optionalAttrs (i.virtualOwner != null) {
+          User = i.virtualOwner;
+        };
+      };
+    });
+    networks."40-${i.name}" = {
+      name = mkDefault i.name;
+      DHCP = mkForce (dhcpStr
+        (if i.useDHCP != null then i.useDHCP else (config.networking.useDHCP && i.ipv4.addresses == [ ])));
+      address = forEach (interfaceIps i)
+        (ip: "${ip.address}/${toString ip.prefixLength}");
+      routes = forEach (interfaceRoutes i)
+        (route: {
+          # Most of these route options have not been tested.
+          # Please fix or report any mistakes you may find.
+          routeConfig =
+            optionalAttrs (route.address != null && route.prefixLength != null) {
+              Destination = "${route.address}/${toString route.prefixLength}";
+            } //
+            optionalAttrs (route.options ? fastopen_no_cookie) {
+              FastOpenNoCookie = route.options.fastopen_no_cookie;
+            } //
+            optionalAttrs (route.via != null) {
+              Gateway = route.via;
+            } //
+            optionalAttrs (route.type != null) {
+              Type = route.type;
+            } //
+            optionalAttrs (route.options ? onlink) {
+              GatewayOnLink = true;
+            } //
+            optionalAttrs (route.options ? initrwnd) {
+              InitialAdvertisedReceiveWindow = route.options.initrwnd;
+            } //
+            optionalAttrs (route.options ? initcwnd) {
+              InitialCongestionWindow = route.options.initcwnd;
+            } //
+            optionalAttrs (route.options ? pref) {
+              IPv6Preference = route.options.pref;
+            } //
+            optionalAttrs (route.options ? mtu) {
+              MTUBytes = route.options.mtu;
+            } //
+            optionalAttrs (route.options ? metric) {
+              Metric = route.options.metric;
+            } //
+            optionalAttrs (route.options ? src) {
+              PreferredSource = route.options.src;
+            } //
+            optionalAttrs (route.options ? protocol) {
+              Protocol = route.options.protocol;
+            } //
+            optionalAttrs (route.options ? quickack) {
+              QuickAck = route.options.quickack;
+            } //
+            optionalAttrs (route.options ? scope) {
+              Scope = route.options.scope;
+            } //
+            optionalAttrs (route.options ? from) {
+              Source = route.options.from;
+            } //
+            optionalAttrs (route.options ? table) {
+              Table = route.options.table;
+            } //
+            optionalAttrs (route.options ? advmss) {
+              TCPAdvertisedMaximumSegmentSize = route.options.advmss;
+            } //
+            optionalAttrs (route.options ? ttl-propagate) {
+              TTLPropagate = route.options.ttl-propagate == "enabled";
+            };
+        });
+      networkConfig.IPv6PrivacyExtensions = "kernel";
+      linkConfig = optionalAttrs (i.macAddress != null) {
+        MACAddress = i.macAddress;
+      } // optionalAttrs (i.mtu != null) {
+        MTUBytes = toString i.mtu;
+      };
+    };
+  }));
+
+  bridgeNetworks = mkMerge (flip mapAttrsToList cfg.bridges (name: bridge: {
+    netdevs."40-${name}" = {
+      netdevConfig = {
+        Name = name;
+        Kind = "bridge";
+      };
+    };
+    networks = listToAttrs (forEach bridge.interfaces (bi:
+      nameValuePair "40-${bi}" {
+        DHCP = mkOverride 0 (dhcpStr false);
+        networkConfig.Bridge = name;
+      }));
+  }));
+
+  vlanNetworks = mkMerge (flip mapAttrsToList cfg.vlans (name: vlan: {
+    netdevs."40-${name}" = {
+      netdevConfig = {
+        Name = name;
+        Kind = "vlan";
+      };
+      vlanConfig.Id = vlan.id;
+    };
+    networks."40-${vlan.interface}" = {
+      vlan = [ name ];
+    };
+  }));
+
+in
+
+{
+  config = mkMerge [
+
+  (mkIf config.boot.initrd.network.enable {
+    # Note this is if initrd.network.enable, not if
+    # initrd.systemd.network.enable. By setting the latter and not the
+    # former, the user retains full control over the configuration.
+    boot.initrd.systemd.network = mkMerge [
+      defaultGateways
+      (genericDhcpNetworks true)
+      interfaceNetworks
+      bridgeNetworks
+      vlanNetworks
+    ];
+    boot.initrd.availableKernelModules =
+      optional (cfg.bridges != {}) "bridge" ++
+      optional (cfg.vlans != {}) "8021q";
+  })
+
+  (mkIf cfg.useNetworkd {
+
+    assertions = [ {
+      assertion = cfg.defaultGatewayWindowSize == null;
+      message = "networking.defaultGatewayWindowSize is not supported by networkd.";
+    } {
+      assertion = cfg.defaultGateway != null -> cfg.defaultGateway.interface != null;
+      message = "networking.defaultGateway.interface is not optional when using networkd.";
+    } {
+      assertion = cfg.defaultGateway6 != null -> cfg.defaultGateway6.interface != null;
+      message = "networking.defaultGateway6.interface is not optional when using networkd.";
+    } ] ++ flip mapAttrsToList cfg.bridges (n: { rstp, ... }: {
+      assertion = !rstp;
+      message = "networking.bridges.${n}.rstp is not supported by networkd.";
+    }) ++ flip mapAttrsToList cfg.fooOverUDP (n: { local, ... }: {
+      assertion = local == null;
+      message = "networking.fooOverUDP.${n}.local is not supported by networkd.";
+    });
+
+    networking.dhcpcd.enable = mkDefault false;
+
+    systemd.network =
+      mkMerge [ {
+        enable = true;
+      }
+      defaultGateways
+      (genericDhcpNetworks false)
+      interfaceNetworks
+      bridgeNetworks
+      (mkMerge (flip mapAttrsToList cfg.bonds (name: bond: {
+        netdevs."40-${name}" = {
+          netdevConfig = {
+            Name = name;
+            Kind = "bond";
+          };
+          bondConfig = let
+            # manual mapping as of 2017-02-03
+            # man 5 systemd.netdev [BOND]
+            # to https://www.kernel.org/doc/Documentation/networking/bonding.txt
+            # driver options.
+            driverOptionMapping = let
+              trans = f: optName: { valTransform = f; optNames = [optName]; };
+              simp  = trans id;
+              ms    = trans (v: v + "ms");
+              in {
+                Mode                       = simp "mode";
+                TransmitHashPolicy         = simp "xmit_hash_policy";
+                LACPTransmitRate           = simp "lacp_rate";
+                MIIMonitorSec              = ms "miimon";
+                UpDelaySec                 = ms "updelay";
+                DownDelaySec               = ms "downdelay";
+                LearnPacketIntervalSec     = simp "lp_interval";
+                AdSelect                   = simp "ad_select";
+                FailOverMACPolicy          = simp "fail_over_mac";
+                ARPValidate                = simp "arp_validate";
+                # apparently in ms for this value?! Upstream bug?
+                ARPIntervalSec             = simp "arp_interval";
+                ARPIPTargets               = simp "arp_ip_target";
+                ARPAllTargets              = simp "arp_all_targets";
+                PrimaryReselectPolicy      = simp "primary_reselect";
+                ResendIGMP                 = simp "resend_igmp";
+                PacketsPerSlave            = simp "packets_per_slave";
+                GratuitousARP = { valTransform = id;
+                                  optNames = [ "num_grat_arp" "num_unsol_na" ]; };
+                AllSlavesActive            = simp "all_slaves_active";
+                MinLinks                   = simp "min_links";
+              };
+
+            do = bond.driverOptions;
+            assertNoUnknownOption = let
+              knownOptions = flatten (mapAttrsToList (_: kOpts: kOpts.optNames)
+                                                     driverOptionMapping);
+              # options that apparently don’t exist in the networkd config
+              unknownOptions = [ "primary" ];
+              assertTrace = bool: msg: if bool then true else builtins.trace msg false;
+              in assert all (driverOpt: assertTrace
+                               (elem driverOpt (knownOptions ++ unknownOptions))
+                               "The bond.driverOption `${driverOpt}` cannot be mapped to the list of known networkd bond options. Please add it to the mapping above the assert or to `unknownOptions` should it not exist in networkd.")
+                            (mapAttrsToList (k: _: k) do); "";
+            # get those driverOptions that have been set
+            filterSystemdOptions = filterAttrs (sysDOpt: kOpts:
+                                     any (kOpt: do ? ${kOpt}) kOpts.optNames);
+            # build final set of systemd options to bond values
+            buildOptionSet = mapAttrs (_: kOpts: with kOpts;
+                               # we simply take the first set kernel bond option
+                               # (one option has multiple names, which is silly)
+                               head (map (optN: valTransform (do.${optN}))
+                                 # only map those that exist
+                                 (filter (o: do ? ${o}) optNames)));
+            in seq assertNoUnknownOption
+                   (buildOptionSet (filterSystemdOptions driverOptionMapping));
+
+        };
+
+        networks = listToAttrs (forEach bond.interfaces (bi:
+          nameValuePair "40-${bi}" {
+            DHCP = mkOverride 0 (dhcpStr false);
+            networkConfig.Bond = name;
+          }));
+      })))
+      (mkMerge (flip mapAttrsToList cfg.macvlans (name: macvlan: {
+        netdevs."40-${name}" = {
+          netdevConfig = {
+            Name = name;
+            Kind = "macvlan";
+          };
+          macvlanConfig = optionalAttrs (macvlan.mode != null) { Mode = macvlan.mode; };
+        };
+        networks."40-${macvlan.interface}" = {
+          macvlan = [ name ];
+        };
+      })))
+      (mkMerge (flip mapAttrsToList cfg.fooOverUDP (name: fou: {
+        netdevs."40-${name}" = {
+          netdevConfig = {
+            Name = name;
+            Kind = "fou";
+          };
+          # unfortunately networkd cannot encode dependencies of netdevs on addresses/routes,
+          # so we cannot specify Local=, Peer=, PeerPort=. this looks like a missing feature
+          # in networkd.
+          fooOverUDPConfig = {
+            Port = fou.port;
+            Encapsulation = if fou.protocol != null then "FooOverUDP" else "GenericUDPEncapsulation";
+          } // (optionalAttrs (fou.protocol != null) {
+            Protocol = fou.protocol;
+          });
+        };
+      })))
+      (mkMerge (flip mapAttrsToList cfg.sits (name: sit: {
+        netdevs."40-${name}" = {
+          netdevConfig = {
+            Name = name;
+            Kind = "sit";
+          };
+          tunnelConfig =
+            (optionalAttrs (sit.remote != null) {
+              Remote = sit.remote;
+            }) // (optionalAttrs (sit.local != null) {
+              Local = sit.local;
+            }) // (optionalAttrs (sit.ttl != null) {
+              TTL = sit.ttl;
+            }) // (optionalAttrs (sit.encapsulation != null) (
+              {
+                FooOverUDP = true;
+                Encapsulation =
+                  if sit.encapsulation.type == "fou"
+                  then "FooOverUDP"
+                  else "GenericUDPEncapsulation";
+                FOUDestinationPort = sit.encapsulation.port;
+              } // (optionalAttrs (sit.encapsulation.sourcePort != null) {
+                FOUSourcePort = sit.encapsulation.sourcePort;
+              })));
+        };
+        networks = mkIf (sit.dev != null) {
+          "40-${sit.dev}" = {
+            tunnel = [ name ];
+          };
+        };
+      })))
+      (mkMerge (flip mapAttrsToList cfg.greTunnels (name: gre: {
+        netdevs."40-${name}" = {
+          netdevConfig = {
+            Name = name;
+            Kind = gre.type;
+          };
+          tunnelConfig =
+            (optionalAttrs (gre.remote != null) {
+              Remote = gre.remote;
+            }) // (optionalAttrs (gre.local != null) {
+              Local = gre.local;
+            }) // (optionalAttrs (gre.ttl != null) {
+              TTL = gre.ttl;
+            });
+        };
+        networks = mkIf (gre.dev != null) {
+          "40-${gre.dev}" = {
+            tunnel = [ name ];
+          };
+        };
+      })))
+      vlanNetworks
+    ];
+
+    # We need to prefill the slaved devices with networking options
+    # This forces the network interface creator to initialize slaves.
+    networking.interfaces = listToAttrs (map (i: nameValuePair i { }) slaves);
+
+    systemd.services = let
+      # We must escape interfaces due to the systemd interpretation
+      subsystemDevice = interface:
+        "sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
+      # support for creating openvswitch switches
+      createVswitchDevice = n: v: nameValuePair "${n}-netdev"
+          (let
+            deps = map subsystemDevice (attrNames (filterAttrs (_: config: config.type != "internal") v.interfaces));
+            ofRules = pkgs.writeText "vswitch-${n}-openFlowRules" v.openFlowRules;
+          in
+          { description = "Open vSwitch Interface ${n}";
+            wantedBy = [ "network.target" (subsystemDevice n) ];
+            # and create bridge before systemd-networkd starts because it might create internal interfaces
+            before = [ "systemd-networkd.service" ];
+            # shutdown the bridge when network is shutdown
+            partOf = [ "network.target" ];
+            # requires ovs-vswitchd to be alive at all times
+            bindsTo = [ "ovs-vswitchd.service" ];
+            # start switch after physical interfaces and vswitch daemon
+            after = [ "network-pre.target" "ovs-vswitchd.service" ] ++ deps;
+            wants = deps; # if one or more interface fails, the switch should continue to run
+            serviceConfig.Type = "oneshot";
+            serviceConfig.RemainAfterExit = true;
+            path = [ pkgs.iproute2 config.virtualisation.vswitch.package ];
+            preStart = ''
+              echo "Resetting Open vSwitch ${n}..."
+              ovs-vsctl --if-exists del-br ${n} -- add-br ${n} \
+                        -- set bridge ${n} protocols=${concatStringsSep "," v.supportedOpenFlowVersions}
+            '';
+            script = ''
+              echo "Configuring Open vSwitch ${n}..."
+              ovs-vsctl ${concatStrings (mapAttrsToList (name: config: " -- add-port ${n} ${name}" + optionalString (config.vlan != null) " tag=${toString config.vlan}") v.interfaces)} \
+                ${concatStrings (mapAttrsToList (name: config: optionalString (config.type != null) " -- set interface ${name} type=${config.type}") v.interfaces)} \
+                ${concatMapStrings (x: " -- set-controller ${n} " + x)  v.controllers} \
+                ${concatMapStrings (x: " -- " + x) (splitString "\n" v.extraOvsctlCmds)}
+
+
+              echo "Adding OpenFlow rules for Open vSwitch ${n}..."
+              ovs-ofctl --protocols=${v.openFlowVersion} add-flows ${n} ${ofRules}
+            '';
+            postStop = ''
+              echo "Cleaning Open vSwitch ${n}"
+              echo "Shutting down internal ${n} interface"
+              ip link set dev ${n} down || true
+              echo "Deleting flows for ${n}"
+              ovs-ofctl --protocols=${v.openFlowVersion} del-flows ${n} || true
+              echo "Deleting Open vSwitch ${n}"
+              ovs-vsctl --if-exists del-br ${n} || true
+            '';
+          });
+    in mapAttrs' createVswitchDevice cfg.vswitches
+      // {
+            "network-local-commands" = {
+              after = [ "systemd-networkd.service" ];
+              bindsTo = [ "systemd-networkd.service" ];
+          };
+      };
+  })
+
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/network-interfaces.nix b/nixpkgs/nixos/modules/tasks/network-interfaces.nix
new file mode 100644
index 000000000000..298add13437a
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/network-interfaces.nix
@@ -0,0 +1,1580 @@
+{ config, options, lib, pkgs, utils, ... }:
+
+with lib;
+with utils;
+
+let
+
+  cfg = config.networking;
+  opt = options.networking;
+  interfaces = attrValues cfg.interfaces;
+  hasVirtuals = any (i: i.virtual) interfaces;
+  hasSits = cfg.sits != { };
+  hasGres = cfg.greTunnels != { };
+  hasBonds = cfg.bonds != { };
+  hasFous = cfg.fooOverUDP != { }
+    || filterAttrs (_: s: s.encapsulation != null) cfg.sits != { };
+
+  slaves = concatMap (i: i.interfaces) (attrValues cfg.bonds)
+    ++ concatMap (i: i.interfaces) (attrValues cfg.bridges)
+    ++ concatMap (i: attrNames (filterAttrs (name: config: ! (config.type == "internal" || hasAttr name cfg.interfaces)) i.interfaces)) (attrValues cfg.vswitches);
+
+  slaveIfs = map (i: cfg.interfaces.${i}) (filter (i: cfg.interfaces ? ${i}) slaves);
+
+  rstpBridges = flip filterAttrs cfg.bridges (_: { rstp, ... }: rstp);
+
+  needsMstpd = rstpBridges != { };
+
+  bridgeStp = optional needsMstpd (pkgs.writeTextFile {
+    name = "bridge-stp";
+    executable = true;
+    destination = "/bin/bridge-stp";
+    text = ''
+      #!${pkgs.runtimeShell} -e
+      export PATH="${pkgs.mstpd}/bin"
+
+      BRIDGES=(${concatStringsSep " " (attrNames rstpBridges)})
+      for BRIDGE in $BRIDGES; do
+        if [ "$BRIDGE" = "$1" ]; then
+          if [ "$2" = "start" ]; then
+            mstpctl addbridge "$BRIDGE"
+            exit 0
+          elif [ "$2" = "stop" ]; then
+            mstpctl delbridge "$BRIDGE"
+            exit 0
+          fi
+          exit 1
+        fi
+      done
+      exit 1
+    '';
+  });
+
+  # We must escape interfaces due to the systemd interpretation
+  subsystemDevice = interface:
+    "sys-subsystem-net-devices-${escapeSystemdPath interface}.device";
+
+  addrOpts = v:
+    assert v == 4 || v == 6;
+    { options = {
+        address = mkOption {
+          type = types.str;
+          description = lib.mdDoc ''
+            IPv${toString v} address of the interface. Leave empty to configure the
+            interface using DHCP.
+          '';
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+          description = lib.mdDoc ''
+            Subnet mask of the interface, specified as the number of
+            bits in the prefix (`${if v == 4 then "24" else "64"}`).
+          '';
+        };
+      };
+    };
+
+  routeOpts = v:
+  { options = {
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc "IPv${toString v} address of the network.";
+      };
+
+      prefixLength = mkOption {
+        type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+        description = lib.mdDoc ''
+          Subnet mask of the network, specified as the number of
+          bits in the prefix (`${if v == 4 then "24" else "64"}`).
+        '';
+      };
+
+      type = mkOption {
+        type = types.nullOr (types.enum [
+          "unicast" "local" "broadcast" "multicast"
+        ]);
+        default = null;
+        description = lib.mdDoc ''
+          Type of the route.  See the `Route types` section
+          in the `ip-route(8)` manual page for the details.
+
+          Note that `prohibit`, `blackhole`,
+          `unreachable`, and `throw` cannot
+          be configured per device, so they are not available here. Similarly,
+          `nat` hasn't been supported since kernel 2.6.
+        '';
+      };
+
+      via = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description = lib.mdDoc "IPv${toString v} address of the next hop.";
+      };
+
+      options = mkOption {
+        type = types.attrsOf types.str;
+        default = { };
+        example = { mtu = "1492"; window = "524288"; };
+        description = lib.mdDoc ''
+          Other route options. See the symbol `OPTIONS`
+          in the `ip-route(8)` manual page for the details.
+          You may also specify `metric`,
+          `src`, `protocol`,
+          `scope`, `from`
+          and `table`, which are technically
+          not route options, in the sense used in the manual.
+        '';
+      };
+
+    };
+  };
+
+  gatewayCoerce = address: { inherit address; };
+
+  gatewayOpts = { ... }: {
+
+    options = {
+
+      address = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The default gateway address.";
+      };
+
+      interface = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        example = "enp0s3";
+        description = lib.mdDoc "The default gateway interface.";
+      };
+
+      metric = mkOption {
+        type = types.nullOr types.int;
+        default = null;
+        example = 42;
+        description = lib.mdDoc "The default gateway metric/preference.";
+      };
+
+    };
+
+  };
+
+  interfaceOpts = { name, ... }: {
+
+    options = {
+      name = mkOption {
+        example = "eth0";
+        type = types.str;
+        description = lib.mdDoc "Name of the interface.";
+      };
+
+      tempAddress = mkOption {
+        type = types.enum (lib.attrNames tempaddrValues);
+        default = cfg.tempAddresses;
+        defaultText = literalExpression ''config.networking.tempAddresses'';
+        description = lib.mdDoc ''
+          When IPv6 is enabled with SLAAC, this option controls the use of
+          temporary address (aka privacy extensions) on this
+          interface. This is used to reduce tracking.
+
+          See also the global option
+          [](#opt-networking.tempAddresses), which
+          applies to all interfaces where this is not set.
+
+          Possible values are:
+          ${tempaddrDoc}
+        '';
+      };
+
+      useDHCP = mkOption {
+        type = types.nullOr types.bool;
+        default = null;
+        description = lib.mdDoc ''
+          Whether this interface should be configured with DHCP. Overrides the
+          default set by {option}`networking.useDHCP`. If `null` (the default),
+          DHCP is enabled if the interface has no IPv4 addresses configured
+          with {option}`networking.interfaces.<name>.ipv4.addresses`, and
+          disabled otherwise.
+        '';
+      };
+
+      ipv4.addresses = mkOption {
+        default = [ ];
+        example = [
+          { address = "10.0.0.1"; prefixLength = 16; }
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+        type = with types; listOf (submodule (addrOpts 4));
+        description = lib.mdDoc ''
+          List of IPv4 addresses that will be statically assigned to the interface.
+        '';
+      };
+
+      ipv6.addresses = mkOption {
+        default = [ ];
+        example = [
+          { address = "fdfd:b3f0:482::1"; prefixLength = 48; }
+          { address = "2001:1470:fffd:2098::e006"; prefixLength = 64; }
+        ];
+        type = with types; listOf (submodule (addrOpts 6));
+        description = lib.mdDoc ''
+          List of IPv6 addresses that will be statically assigned to the interface.
+        '';
+      };
+
+      ipv4.routes = mkOption {
+        default = [];
+        example = [
+          { address = "10.0.0.0"; prefixLength = 16; }
+          { address = "192.168.2.0"; prefixLength = 24; via = "192.168.1.1"; }
+        ];
+        type = with types; listOf (submodule (routeOpts 4));
+        description = lib.mdDoc ''
+          List of extra IPv4 static routes that will be assigned to the interface.
+
+          ::: {.warning}
+          If the route type is the default `unicast`, then the scope
+          is set differently depending on the value of {option}`networking.useNetworkd`:
+          the script-based backend sets it to `link`, while networkd sets
+          it to `global`.
+          :::
+
+          If you want consistency between the two implementations,
+          set the scope of the route manually with
+          `networking.interfaces.eth0.ipv4.routes = [{ options.scope = "global"; }]`
+          for example.
+        '';
+      };
+
+      ipv6.routes = mkOption {
+        default = [];
+        example = [
+          { address = "fdfd:b3f0::"; prefixLength = 48; }
+          { address = "2001:1470:fffd:2098::"; prefixLength = 64; via = "fdfd:b3f0::1"; }
+        ];
+        type = with types; listOf (submodule (routeOpts 6));
+        description = lib.mdDoc ''
+          List of extra IPv6 static routes that will be assigned to the interface.
+        '';
+      };
+
+      macAddress = mkOption {
+        default = null;
+        example = "00:11:22:33:44:55";
+        type = types.nullOr (types.str);
+        description = lib.mdDoc ''
+          MAC address of the interface. Leave empty to use the default.
+        '';
+      };
+
+      mtu = mkOption {
+        default = null;
+        example = 9000;
+        type = types.nullOr types.int;
+        description = lib.mdDoc ''
+          MTU size for packets leaving the interface. Leave empty to use the default.
+        '';
+      };
+
+      virtual = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Whether this interface is virtual and should be created by tunctl.
+          This is mainly useful for creating bridges between a host and a virtual
+          network such as VPN or a virtual machine.
+        '';
+      };
+
+      virtualOwner = mkOption {
+        default = "root";
+        type = types.str;
+        description = lib.mdDoc ''
+          In case of a virtual device, the user who owns it.
+        '';
+      };
+
+      virtualType = mkOption {
+        default = if hasPrefix "tun" name then "tun" else "tap";
+        defaultText = literalExpression ''if hasPrefix "tun" name then "tun" else "tap"'';
+        type = with types; enum [ "tun" "tap" ];
+        description = lib.mdDoc ''
+          The type of interface to create.
+          The default is TUN for an interface name starting
+          with "tun", otherwise TAP.
+        '';
+      };
+
+      proxyARP = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Turn on proxy_arp for this device.
+          This is mainly useful for creating pseudo-bridges between a real
+          interface and a virtual network such as VPN or a virtual machine for
+          interfaces that don't support real bridging (most wlan interfaces).
+          As ARP proxying acts slightly above the link-layer, below-ip traffic
+          isn't bridged, so things like DHCP won't work. The advantage above
+          using NAT lies in the fact that no IP addresses are shared, so all
+          hosts are reachable/routeable.
+
+          WARNING: turns on ip-routing, so if you have multiple interfaces, you
+          should think of the consequence and setup firewall rules to limit this.
+        '';
+      };
+
+      wakeOnLan = {
+        enable = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc "Whether to enable wol on this interface.";
+        };
+        policy = mkOption {
+          type = with types; listOf (
+            enum ["phy" "unicast" "multicast" "broadcast" "arp" "magic" "secureon"]
+          );
+          default = ["magic"];
+          description = lib.mdDoc ''
+            The [Wake-on-LAN policy](https://www.freedesktop.org/software/systemd/man/systemd.link.html#WakeOnLan=)
+            to set for the device.
+
+            The options are
+            - `phy`: Wake on PHY activity
+            - `unicast`: Wake on unicast messages
+            - `multicast`: Wake on multicast messages
+            - `broadcast`: Wake on broadcast messages
+            - `arp`: Wake on ARP
+            - `magic`: Wake on receipt of a magic packet
+          '';
+        };
+      };
+    };
+
+    config = {
+      name = mkDefault name;
+    };
+
+    # Renamed or removed options
+    imports =
+      let
+        defined = x: x != "_mkMergedOptionModule";
+      in [
+        (mkChangedOptionModule [ "preferTempAddress" ] [ "tempAddress" ]
+         (config:
+          let bool = getAttrFromPath [ "preferTempAddress" ] config;
+          in if bool then "default" else "enabled"
+        ))
+        (mkRenamedOptionModule [ "ip4" ] [ "ipv4" "addresses"])
+        (mkRenamedOptionModule [ "ip6" ] [ "ipv6" "addresses"])
+        (mkRemovedOptionModule [ "subnetMask" ] ''
+          Supply a prefix length instead; use option
+          networking.interfaces.<name>.ipv{4,6}.addresses'')
+        (mkMergedOptionModule
+          [ [ "ipAddress" ] [ "prefixLength" ] ]
+          [ "ipv4" "addresses" ]
+          (cfg: with cfg;
+            optional (defined ipAddress && defined prefixLength)
+            { address = ipAddress; prefixLength = prefixLength; }))
+        (mkMergedOptionModule
+          [ [ "ipv6Address" ] [ "ipv6PrefixLength" ] ]
+          [ "ipv6" "addresses" ]
+          (cfg: with cfg;
+            optional (defined ipv6Address && defined ipv6PrefixLength)
+            { address = ipv6Address; prefixLength = ipv6PrefixLength; }))
+
+        ({ options.warnings = options.warnings; options.assertions = options.assertions; })
+      ];
+
+  };
+
+  vswitchInterfaceOpts = {name, ...}: {
+
+    options = {
+
+      name = mkOption {
+        description = lib.mdDoc "Name of the interface";
+        example = "eth0";
+        type = types.str;
+      };
+
+      vlan = mkOption {
+        description = lib.mdDoc "Vlan tag to apply to interface";
+        example = 10;
+        type = types.nullOr types.int;
+        default = null;
+      };
+
+      type = mkOption {
+        description = lib.mdDoc "Openvswitch type to assign to interface";
+        example = "internal";
+        type = types.nullOr types.str;
+        default = null;
+      };
+    };
+  };
+
+  hexChars = stringToCharacters "0123456789abcdef";
+
+  isHexString = s: all (c: elem c hexChars) (stringToCharacters (toLower s));
+
+  tempaddrValues = {
+    disabled = {
+      sysctl = "0";
+      description = "completely disable IPv6 temporary addresses";
+    };
+    enabled = {
+      sysctl = "1";
+      description = "generate IPv6 temporary addresses but still use EUI-64 addresses as source addresses";
+    };
+    default = {
+      sysctl = "2";
+      description = "generate IPv6 temporary addresses and use these as source addresses in routing";
+    };
+  };
+  tempaddrDoc = concatStringsSep "\n"
+    (mapAttrsToList
+      (name: { description, ... }: ''- `"${name}"` to ${description};'')
+      tempaddrValues);
+
+  hostidFile = pkgs.runCommand "gen-hostid" { preferLocalBuild = true; } ''
+      hi="${cfg.hostId}"
+      ${if pkgs.stdenv.isBigEndian then ''
+        echo -ne "\x''${hi:0:2}\x''${hi:2:2}\x''${hi:4:2}\x''${hi:6:2}" > $out
+      '' else ''
+        echo -ne "\x''${hi:6:2}\x''${hi:4:2}\x''${hi:2:2}\x''${hi:0:2}" > $out
+      ''}
+    '';
+
+in
+
+{
+
+  ###### interface
+
+  options = {
+
+    networking.hostName = mkOption {
+      default = config.system.nixos.distroId;
+      defaultText = literalExpression "config.system.nixos.distroId";
+      # Only allow hostnames without the domain name part (i.e. no FQDNs, see
+      # e.g. "man 5 hostname") and require valid DNS labels (recommended
+      # syntax). Note: We also allow underscores for compatibility/legacy
+      # reasons (as undocumented feature):
+      type = types.strMatching
+        "^$|^[[:alnum:]]([[:alnum:]_-]{0,61}[[:alnum:]])?$";
+      description = lib.mdDoc ''
+        The name of the machine. Leave it empty if you want to obtain it from a
+        DHCP server (if using DHCP). The hostname must be a valid DNS label (see
+        RFC 1035 section 2.3.1: "Preferred name syntax", RFC 1123 section 2.1:
+        "Host Names and Numbers") and as such must not contain the domain part.
+        This means that the hostname must start with a letter or digit,
+        end with a letter or digit, and have as interior characters only
+        letters, digits, and hyphen. The maximum length is 63 characters.
+        Additionally it is recommended to only use lower-case characters.
+        If (e.g. for legacy reasons) a FQDN is required as the Linux kernel
+        network node hostname (uname --nodename) the option
+        boot.kernel.sysctl."kernel.hostname" can be used as a workaround (but
+        the 64 character limit still applies).
+
+        WARNING: Do not use underscores (_) or you may run into unexpected issues.
+      '';
+       # warning until the issues in https://github.com/NixOS/nixpkgs/pull/138978
+       # are resolved
+    };
+
+    networking.fqdn = mkOption {
+      readOnly = true;
+      type = types.str;
+      default = if (cfg.hostName != "" && cfg.domain != null)
+        then "${cfg.hostName}.${cfg.domain}"
+        else throw ''
+          The FQDN is required but cannot be determined. Please make sure that
+          both networking.hostName and networking.domain are set properly.
+        '';
+      defaultText = literalExpression ''"''${networking.hostName}.''${networking.domain}"'';
+      description = lib.mdDoc ''
+        The fully qualified domain name (FQDN) of this host. It is the result
+        of combining `networking.hostName` and `networking.domain.` Using this
+        option will result in an evaluation error if the hostname is empty or
+        no domain is specified.
+
+        Modules that accept a mere `networking.hostName` but prefer a fully qualified
+        domain name may use `networking.fqdnOrHostName` instead.
+      '';
+    };
+
+    networking.fqdnOrHostName = mkOption {
+      readOnly = true;
+      type = types.str;
+      default = if cfg.domain == null then cfg.hostName else cfg.fqdn;
+      defaultText = literalExpression ''
+        if cfg.domain == null then cfg.hostName else cfg.fqdn
+      '';
+      description = lib.mdDoc ''
+        Either the fully qualified domain name (FQDN), or just the host name if
+        it does not exists.
+
+        This is a convenience option for modules to read instead of `fqdn` when
+        a mere `hostName` is also an acceptable value; this option does not
+        throw an error when `domain` is unset.
+      '';
+    };
+
+    networking.hostId = mkOption {
+      default = null;
+      example = "4e98920d";
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        The 32-bit host ID of the machine, formatted as 8 hexadecimal characters.
+
+        You should try to make this ID unique among your machines. You can
+        generate a random 32-bit ID using the following commands:
+
+        `head -c 8 /etc/machine-id`
+
+        (this derives it from the machine-id that systemd generates) or
+
+        `head -c4 /dev/urandom | od -A none -t x4`
+
+        The primary use case is to ensure when using ZFS that a pool isn't imported
+        accidentally on a wrong machine.
+      '';
+    };
+
+    networking.enableIPv6 = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether to enable support for IPv6.
+      '';
+    };
+
+    networking.defaultGateway = mkOption {
+      default = null;
+      example = {
+        address = "131.211.84.1";
+        interface = "enp3s0";
+      };
+      type = types.nullOr (types.coercedTo types.str gatewayCoerce (types.submodule gatewayOpts));
+      description = lib.mdDoc ''
+        The default gateway. It can be left empty if it is auto-detected through DHCP.
+        It can be specified as a string or an option set along with a network interface.
+      '';
+    };
+
+    networking.defaultGateway6 = mkOption {
+      default = null;
+      example = {
+        address = "2001:4d0:1e04:895::1";
+        interface = "enp3s0";
+      };
+      type = types.nullOr (types.coercedTo types.str gatewayCoerce (types.submodule gatewayOpts));
+      description = lib.mdDoc ''
+        The default ipv6 gateway. It can be left empty if it is auto-detected through DHCP.
+        It can be specified as a string or an option set along with a network interface.
+      '';
+    };
+
+    networking.defaultGatewayWindowSize = mkOption {
+      default = null;
+      example = 524288;
+      type = types.nullOr types.int;
+      description = lib.mdDoc ''
+        The window size of the default gateway. It limits maximal data bursts that TCP peers
+        are allowed to send to us.
+      '';
+    };
+
+    networking.nameservers = mkOption {
+      type = types.listOf types.str;
+      default = [];
+      example = ["130.161.158.4" "130.161.33.17"];
+      description = lib.mdDoc ''
+        The list of nameservers.  It can be left empty if it is auto-detected through DHCP.
+      '';
+    };
+
+    networking.search = mkOption {
+      default = [];
+      example = [ "example.com" "home.arpa" ];
+      type = types.listOf types.str;
+      description = lib.mdDoc ''
+        The list of search paths used when resolving domain names.
+      '';
+    };
+
+    networking.domain = mkOption {
+      default = null;
+      example = "home.arpa";
+      type = types.nullOr types.str;
+      description = lib.mdDoc ''
+        The domain.  It can be left empty if it is auto-detected through DHCP.
+      '';
+    };
+
+    networking.useHostResolvConf = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        In containers, whether to use the
+        {file}`resolv.conf` supplied by the host.
+      '';
+    };
+
+    networking.localCommands = mkOption {
+      type = types.lines;
+      default = "";
+      example = "text=anything; echo You can put $text here.";
+      description = lib.mdDoc ''
+        Shell commands to be executed at the end of the
+        `network-setup` systemd service.  Note that if
+        you are using DHCP to obtain the network configuration,
+        interfaces may not be fully configured yet.
+      '';
+    };
+
+    networking.interfaces = mkOption {
+      default = {};
+      example =
+        { eth0.ipv4.addresses = [ {
+            address = "131.211.84.78";
+            prefixLength = 25;
+          } ];
+        };
+      description = lib.mdDoc ''
+        The configuration for each network interface.
+
+        Please note that {option}`systemd.network.netdevs` has more features
+        and is better maintained. When building new things, it is advised to
+        use that instead.
+      '';
+      type = with types; attrsOf (submodule interfaceOpts);
+    };
+
+    networking.vswitches = mkOption {
+      default = { };
+      example =
+        { vs0.interfaces = { eth0 = { }; lo1 = { type="internal"; }; };
+          vs1.interfaces = [ { name = "eth2"; } { name = "lo2"; type="internal"; } ];
+        };
+      description =
+        lib.mdDoc ''
+          This option allows you to define Open vSwitches that connect
+          physical networks together. The value of this option is an
+          attribute set. Each attribute specifies a vswitch, with the
+          attribute name specifying the name of the vswitch's network
+          interface.
+        '';
+
+      type = with types; attrsOf (submodule {
+
+        options = {
+
+          interfaces = mkOption {
+            description = lib.mdDoc "The physical network interfaces connected by the vSwitch.";
+            type = with types; attrsOf (submodule vswitchInterfaceOpts);
+          };
+
+          controllers = mkOption {
+            type = types.listOf types.str;
+            default = [];
+            example = [ "ptcp:6653:[::1]" ];
+            description = lib.mdDoc ''
+              Specify the controller targets. For the allowed options see `man 8 ovs-vsctl`.
+            '';
+          };
+
+          openFlowRules = mkOption {
+            type = types.lines;
+            default = "";
+            example = ''
+              actions=normal
+            '';
+            description = lib.mdDoc ''
+              OpenFlow rules to insert into the Open vSwitch. All `openFlowRules` are
+              loaded with `ovs-ofctl` within one atomic operation.
+            '';
+          };
+
+          # TODO: custom "openflow version" type, with list from existing openflow protocols
+          supportedOpenFlowVersions = mkOption {
+            type = types.listOf types.str;
+            example = [ "OpenFlow10" "OpenFlow13" "OpenFlow14" ];
+            default = [ "OpenFlow13" ];
+            description = lib.mdDoc ''
+              Supported versions to enable on this switch.
+            '';
+          };
+
+          # TODO: use same type as elements from supportedOpenFlowVersions
+          openFlowVersion = mkOption {
+            type = types.str;
+            default = "OpenFlow13";
+            description = lib.mdDoc ''
+              Version of OpenFlow protocol to use when communicating with the switch internally (e.g. with `openFlowRules`).
+            '';
+          };
+
+          extraOvsctlCmds = mkOption {
+            type = types.lines;
+            default = "";
+            example = ''
+              set-fail-mode <switch_name> secure
+              set Bridge <switch_name> stp_enable=true
+            '';
+            description = lib.mdDoc ''
+              Commands to manipulate the Open vSwitch database. Every line executed with `ovs-vsctl`.
+              All commands are bundled together with the operations for adding the interfaces
+              into one atomic operation.
+            '';
+          };
+
+        };
+
+      });
+
+    };
+
+    networking.bridges = mkOption {
+      default = { };
+      example =
+        { br0.interfaces = [ "eth0" "eth1" ];
+          br1.interfaces = [ "eth2" "wlan0" ];
+        };
+      description =
+        lib.mdDoc ''
+          This option allows you to define Ethernet bridge devices
+          that connect physical networks together.  The value of this
+          option is an attribute set.  Each attribute specifies a
+          bridge, with the attribute name specifying the name of the
+          bridge's network interface.
+        '';
+
+      type = with types; attrsOf (submodule {
+
+        options = {
+
+          interfaces = mkOption {
+            example = [ "eth0" "eth1" ];
+            type = types.listOf types.str;
+            description =
+              lib.mdDoc "The physical network interfaces connected by the bridge.";
+          };
+
+          rstp = mkOption {
+            default = false;
+            type = types.bool;
+            description = lib.mdDoc "Whether the bridge interface should enable rstp.";
+          };
+
+        };
+
+      });
+
+    };
+
+    networking.bonds =
+      let
+        driverOptionsExample =  ''
+          {
+            miimon = "100";
+            mode = "active-backup";
+          }
+        '';
+      in mkOption {
+        default = { };
+        example = literalExpression ''
+          {
+            bond0 = {
+              interfaces = [ "eth0" "wlan0" ];
+              driverOptions = ${driverOptionsExample};
+            };
+            anotherBond.interfaces = [ "enp4s0f0" "enp4s0f1" "enp5s0f0" "enp5s0f1" ];
+          }
+        '';
+        description = lib.mdDoc ''
+          This option allows you to define bond devices that aggregate multiple,
+          underlying networking interfaces together. The value of this option is
+          an attribute set. Each attribute specifies a bond, with the attribute
+          name specifying the name of the bond's network interface
+        '';
+
+        type = with types; attrsOf (submodule {
+
+          options = {
+
+            interfaces = mkOption {
+              example = [ "enp4s0f0" "enp4s0f1" "wlan0" ];
+              type = types.listOf types.str;
+              description = lib.mdDoc "The interfaces to bond together";
+            };
+
+            driverOptions = mkOption {
+              type = types.attrsOf types.str;
+              default = {};
+              example = literalExpression driverOptionsExample;
+              description = lib.mdDoc ''
+                Options for the bonding driver.
+                Documentation can be found in
+                <https://www.kernel.org/doc/Documentation/networking/bonding.txt>
+              '';
+
+            };
+
+            lacp_rate = mkOption {
+              default = null;
+              example = "fast";
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                DEPRECATED, use `driverOptions`.
+                Option specifying the rate in which we'll ask our link partner
+                to transmit LACPDU packets in 802.3ad mode.
+              '';
+            };
+
+            miimon = mkOption {
+              default = null;
+              example = 100;
+              type = types.nullOr types.int;
+              description = lib.mdDoc ''
+                DEPRECATED, use `driverOptions`.
+                Miimon is the number of millisecond in between each round of polling
+                by the device driver for failed links. By default polling is not
+                enabled and the driver is trusted to properly detect and handle
+                failure scenarios.
+              '';
+            };
+
+            mode = mkOption {
+              default = null;
+              example = "active-backup";
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                DEPRECATED, use `driverOptions`.
+                The mode which the bond will be running. The default mode for
+                the bonding driver is balance-rr, optimizing for throughput.
+                More information about valid modes can be found at
+                https://www.kernel.org/doc/Documentation/networking/bonding.txt
+              '';
+            };
+
+            xmit_hash_policy = mkOption {
+              default = null;
+              example = "layer2+3";
+              type = types.nullOr types.str;
+              description = lib.mdDoc ''
+                DEPRECATED, use `driverOptions`.
+                Selects the transmit hash policy to use for slave selection in
+                balance-xor, 802.3ad, and tlb modes.
+              '';
+            };
+
+          };
+
+        });
+      };
+
+    networking.macvlans = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          wan = {
+            interface = "enp2s0";
+            mode = "vepa";
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        This option allows you to define macvlan interfaces which should
+        be automatically created.
+      '';
+      type = with types; attrsOf (submodule {
+        options = {
+
+          interface = mkOption {
+            example = "enp4s0";
+            type = types.str;
+            description = lib.mdDoc "The interface the macvlan will transmit packets through.";
+          };
+
+          mode = mkOption {
+            default = null;
+            type = types.nullOr types.str;
+            example = "vepa";
+            description = lib.mdDoc "The mode of the macvlan device.";
+          };
+
+        };
+
+      });
+    };
+
+    networking.fooOverUDP = mkOption {
+      default = { };
+      example =
+        {
+          primary = { port = 9001; local = { address = "192.0.2.1"; dev = "eth0"; }; };
+          backup =  { port = 9002; };
+        };
+      description = lib.mdDoc ''
+        This option allows you to configure Foo Over UDP and Generic UDP Encapsulation
+        endpoints. See {manpage}`ip-fou(8)` for details.
+      '';
+      type = with types; attrsOf (submodule {
+        options = {
+          port = mkOption {
+            type = port;
+            description = lib.mdDoc ''
+              Local port of the encapsulation UDP socket.
+            '';
+          };
+
+          protocol = mkOption {
+            type = nullOr (ints.between 1 255);
+            default = null;
+            description = lib.mdDoc ''
+              Protocol number of the encapsulated packets. Specifying `null`
+              (the default) creates a GUE endpoint, specifying a protocol number will create
+              a FOU endpoint.
+            '';
+          };
+
+          local = mkOption {
+            type = nullOr (submodule {
+              options = {
+                address = mkOption {
+                  type = types.str;
+                  description = lib.mdDoc ''
+                    Local address to bind to. The address must be available when the FOU
+                    endpoint is created, using the scripted network setup this can be achieved
+                    either by setting `dev` or adding dependency information to
+                    `systemd.services.<name>-fou-encap`; it isn't supported
+                    when using networkd.
+                  '';
+                };
+
+                dev = mkOption {
+                  type = nullOr str;
+                  default = null;
+                  example = "eth0";
+                  description = lib.mdDoc ''
+                    Network device to bind to.
+                  '';
+                };
+              };
+            });
+            default = null;
+            example = { address = "203.0.113.22"; };
+            description = lib.mdDoc ''
+              Local address (and optionally device) to bind to using the given port.
+            '';
+          };
+        };
+      });
+    };
+
+    networking.sits = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          hurricane = {
+            remote = "10.0.0.1";
+            local = "10.0.0.22";
+            ttl = 255;
+          };
+          msipv6 = {
+            remote = "192.168.0.1";
+            dev = "enp3s0";
+            ttl = 127;
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        This option allows you to define 6-to-4 interfaces which should be automatically created.
+      '';
+      type = with types; attrsOf (submodule {
+        options = {
+
+          remote = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "10.0.0.1";
+            description = lib.mdDoc ''
+              The address of the remote endpoint to forward traffic over.
+            '';
+          };
+
+          local = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "10.0.0.22";
+            description = lib.mdDoc ''
+              The address of the local endpoint which the remote
+              side should send packets to.
+            '';
+          };
+
+          ttl = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            example = 255;
+            description = lib.mdDoc ''
+              The time-to-live of the connection to the remote tunnel endpoint.
+            '';
+          };
+
+          dev = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "enp4s0f0";
+            description = lib.mdDoc ''
+              The underlying network device on which the tunnel resides.
+            '';
+          };
+
+          encapsulation = with types; mkOption {
+            type = nullOr (submodule {
+              options = {
+                type = mkOption {
+                  type = enum [ "fou" "gue" ];
+                  description = lib.mdDoc ''
+                    Selects encapsulation type. See
+                    {manpage}`ip-link(8)` for details.
+                  '';
+                };
+
+                port = mkOption {
+                  type = port;
+                  example = 9001;
+                  description = lib.mdDoc ''
+                    Destination port for encapsulated packets.
+                  '';
+                };
+
+                sourcePort = mkOption {
+                  type = nullOr types.port;
+                  default = null;
+                  example = 9002;
+                  description = lib.mdDoc ''
+                    Source port for encapsulated packets. Will be chosen automatically by
+                    the kernel if unset.
+                  '';
+                };
+              };
+            });
+            default = null;
+            example = { type = "fou"; port = 9001; };
+            description = lib.mdDoc ''
+              Configures encapsulation in UDP packets.
+            '';
+          };
+
+        };
+
+      });
+    };
+
+    networking.greTunnels = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          greBridge = {
+            remote = "10.0.0.1";
+            local = "10.0.0.22";
+            dev = "enp4s0f0";
+            type = "tap";
+            ttl = 255;
+          };
+          gre6Tunnel = {
+            remote = "fd7a:5634::1";
+            local = "fd7a:5634::2";
+            dev = "enp4s0f0";
+            type = "tun6";
+            ttl = 255;
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        This option allows you to define Generic Routing Encapsulation (GRE) tunnels.
+      '';
+      type = with types; attrsOf (submodule {
+        options = {
+
+          remote = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "10.0.0.1";
+            description = lib.mdDoc ''
+              The address of the remote endpoint to forward traffic over.
+            '';
+          };
+
+          local = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "10.0.0.22";
+            description = lib.mdDoc ''
+              The address of the local endpoint which the remote
+              side should send packets to.
+            '';
+          };
+
+          dev = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "enp4s0f0";
+            description = lib.mdDoc ''
+              The underlying network device on which the tunnel resides.
+            '';
+          };
+
+          ttl = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            example = 255;
+            description = lib.mdDoc ''
+              The time-to-live/hoplimit of the connection to the remote tunnel endpoint.
+            '';
+          };
+
+          type = mkOption {
+            type = with types; enum [ "tun" "tap" "tun6" "tap6" ];
+            default = "tap";
+            example = "tap";
+            apply = v: {
+              tun = "gre";
+              tap = "gretap";
+              tun6 = "ip6gre";
+              tap6 = "ip6gretap";
+            }.${v};
+            description = lib.mdDoc ''
+              Whether the tunnel routes layer 2 (tap) or layer 3 (tun) traffic.
+            '';
+          };
+        };
+      });
+    };
+
+    networking.vlans = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          vlan0 = {
+            id = 3;
+            interface = "enp3s0";
+          };
+          vlan1 = {
+            id = 1;
+            interface = "wlan0";
+          };
+        }
+      '';
+      description =
+        lib.mdDoc ''
+          This option allows you to define vlan devices that tag packets
+          on top of a physical interface. The value of this option is an
+          attribute set. Each attribute specifies a vlan, with the name
+          specifying the name of the vlan interface.
+        '';
+
+      type = with types; attrsOf (submodule {
+
+        options = {
+
+          id = mkOption {
+            example = 1;
+            type = types.int;
+            description = lib.mdDoc "The vlan identifier";
+          };
+
+          interface = mkOption {
+            example = "enp4s0";
+            type = types.str;
+            description = lib.mdDoc "The interface the vlan will transmit packets through.";
+          };
+
+        };
+
+      });
+
+    };
+
+    networking.wlanInterfaces = mkOption {
+      default = { };
+      example = literalExpression ''
+        {
+          wlan-station0 = {
+              device = "wlp6s0";
+          };
+          wlan-adhoc0 = {
+              type = "ibss";
+              device = "wlp6s0";
+              mac = "02:00:00:00:00:01";
+          };
+          wlan-p2p0 = {
+              device = "wlp6s0";
+              mac = "02:00:00:00:00:02";
+          };
+          wlan-ap0 = {
+              device = "wlp6s0";
+              mac = "02:00:00:00:00:03";
+          };
+        }
+      '';
+      description =
+        lib.mdDoc ''
+          Creating multiple WLAN interfaces on top of one physical WLAN device (NIC).
+
+          The name of the WLAN interface corresponds to the name of the attribute.
+          A NIC is referenced by the persistent device name of the WLAN interface that
+          `udev` assigns to a NIC by default.
+          If a NIC supports multiple WLAN interfaces, then the one NIC can be used as
+          `device` for multiple WLAN interfaces.
+          If a NIC is used for creating WLAN interfaces, then the default WLAN interface
+          with a persistent device name form `udev` is not created.
+          A WLAN interface with the persistent name assigned from `udev`
+          would have to be created explicitly.
+        '';
+
+      type = with types; attrsOf (submodule {
+
+        options = {
+
+          device = mkOption {
+            type = types.str;
+            example = "wlp6s0";
+            description = lib.mdDoc "The name of the underlying hardware WLAN device as assigned by `udev`.";
+          };
+
+          type = mkOption {
+            type = types.enum [ "managed" "ibss" "monitor" "mesh" "wds" ];
+            default = "managed";
+            example = "ibss";
+            description = lib.mdDoc ''
+              The type of the WLAN interface.
+              The type has to be supported by the underlying hardware of the device.
+            '';
+          };
+
+          meshID = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            description = lib.mdDoc "MeshID of interface with type `mesh`.";
+          };
+
+          flags = mkOption {
+            type = with types; nullOr (enum [ "none" "fcsfail" "control" "otherbss" "cook" "active" ]);
+            default = null;
+            example = "control";
+            description = lib.mdDoc ''
+              Flags for interface of type `monitor`.
+            '';
+          };
+
+          fourAddr = mkOption {
+            type = types.nullOr types.bool;
+            default = null;
+            description = lib.mdDoc "Whether to enable `4-address mode` with type `managed`.";
+          };
+
+          mac = mkOption {
+            type = types.nullOr types.str;
+            default = null;
+            example = "02:00:00:00:00:01";
+            description = lib.mdDoc ''
+              MAC address to use for the device. If `null`, then the MAC of the
+              underlying hardware WLAN device is used.
+
+              INFO: Locally administered MAC addresses are of the form:
+              - x2:xx:xx:xx:xx:xx
+              - x6:xx:xx:xx:xx:xx
+              - xA:xx:xx:xx:xx:xx
+              - xE:xx:xx:xx:xx:xx
+            '';
+          };
+
+        };
+
+      });
+
+    };
+
+    networking.useDHCP = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to use DHCP to obtain an IP address and other
+        configuration for all network interfaces that do not have any manually
+        configured IPv4 addresses.
+      '';
+    };
+
+    networking.useNetworkd = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Whether we should use networkd as the network configuration backend or
+        the legacy script based system. Note that this option is experimental,
+        enable at your own risk.
+      '';
+    };
+
+    networking.tempAddresses = mkOption {
+      default = if cfg.enableIPv6 then "default" else "disabled";
+      defaultText = literalExpression ''
+        if ''${config.${opt.enableIPv6}} then "default" else "disabled"
+      '';
+      type = types.enum (lib.attrNames tempaddrValues);
+      description = lib.mdDoc ''
+        Whether to enable IPv6 Privacy Extensions for interfaces not
+        configured explicitly in
+        [](#opt-networking.interfaces._name_.tempAddress).
+
+        This sets the ipv6.conf.*.use_tempaddr sysctl for all
+        interfaces. Possible values are:
+
+        ${tempaddrDoc}
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = {
+
+    warnings = (concatMap (i: i.warnings) interfaces) ++ (lib.optional
+      (config.systemd.network.enable && cfg.useDHCP && !cfg.useNetworkd) ''
+        The combination of `systemd.network.enable = true`, `networking.useDHCP = true` and `networking.useNetworkd = false` can cause both networkd and dhcpcd to manage the same interfaces. This can lead to loss of networking. It is recommended you choose only one of networkd (by also enabling `networking.useNetworkd`) or scripting (by disabling `systemd.network.enable`)
+      '');
+
+    assertions =
+      (forEach interfaces (i: {
+        # With the linux kernel, interface name length is limited by IFNAMSIZ
+        # to 16 bytes, including the trailing null byte.
+        # See include/linux/if.h in the kernel sources
+        assertion = stringLength i.name < 16;
+        message = ''
+          The name of networking.interfaces."${i.name}" is too long, it needs to be less than 16 characters.
+        '';
+      })) ++ (forEach slaveIfs (i: {
+        assertion = i.ipv4.addresses == [ ] && i.ipv6.addresses == [ ];
+        message = ''
+          The networking.interfaces."${i.name}" must not have any defined ips when it is a slave.
+        '';
+      })) ++ (forEach interfaces (i: {
+        assertion = i.tempAddress != "disabled" -> cfg.enableIPv6;
+        message = ''
+          Temporary addresses are only needed when IPv6 is enabled.
+        '';
+      })) ++ (forEach interfaces (i: {
+        assertion = (i.virtual && i.virtualType == "tun") -> i.macAddress == null;
+        message = ''
+          Setting a MAC Address for tun device ${i.name} isn't supported.
+        '';
+      })) ++ [
+        {
+          assertion = cfg.hostId == null || (stringLength cfg.hostId == 8 && isHexString cfg.hostId);
+          message = "Invalid value given to the networking.hostId option.";
+        }
+      ];
+
+    boot.kernelModules = [ ]
+      ++ optional hasVirtuals "tun"
+      ++ optional hasSits "sit"
+      ++ optional hasGres "gre"
+      ++ optional hasBonds "bonding"
+      ++ optional hasFous "fou";
+
+    boot.extraModprobeConfig =
+      # This setting is intentional as it prevents default bond devices
+      # from being created.
+      optionalString hasBonds "options bonding max_bonds=0";
+
+    boot.kernel.sysctl = {
+      "net.ipv4.conf.all.forwarding" = mkDefault (any (i: i.proxyARP) interfaces);
+      "net.ipv6.conf.all.disable_ipv6" = mkDefault (!cfg.enableIPv6);
+      "net.ipv6.conf.default.disable_ipv6" = mkDefault (!cfg.enableIPv6);
+      # networkmanager falls back to "/proc/sys/net/ipv6/conf/default/use_tempaddr"
+      "net.ipv6.conf.default.use_tempaddr" = tempaddrValues.${cfg.tempAddresses}.sysctl;
+    } // listToAttrs (forEach interfaces
+        (i: nameValuePair "net.ipv4.conf.${replaceStrings ["."] ["/"] i.name}.proxy_arp" i.proxyARP))
+      // listToAttrs (forEach interfaces
+        (i: let
+          opt = i.tempAddress;
+          val = tempaddrValues.${opt}.sysctl;
+         in nameValuePair "net.ipv6.conf.${replaceStrings ["."] ["/"] i.name}.use_tempaddr" val));
+
+    systemd.services.domainname = lib.mkIf (cfg.domain != null) {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" ];
+      unitConfig.DefaultDependencies = false;
+      serviceConfig.ExecStart = ''${pkgs.nettools}/bin/domainname "${cfg.domain}"'';
+    };
+
+    environment.etc.hostid = mkIf (cfg.hostId != null) { source = hostidFile; };
+    boot.initrd.systemd.contents."/etc/hostid" = mkIf (cfg.hostId != null) { source = hostidFile; };
+
+    # static hostname configuration needed for hostnamectl and the
+    # org.freedesktop.hostname1 dbus service (both provided by systemd)
+    environment.etc.hostname = mkIf (cfg.hostName != "")
+      {
+        text = cfg.hostName + "\n";
+      };
+
+    environment.systemPackages =
+      [ pkgs.host
+        pkgs.iproute2
+        pkgs.iputils
+        pkgs.nettools
+      ]
+      ++ optionals config.networking.wireless.enable [
+        pkgs.wirelesstools # FIXME: obsolete?
+        pkgs.iw
+      ]
+      ++ bridgeStp;
+
+    # Wake-on-LAN configuration is shared by the scripted and networkd backends.
+    systemd.network.links = pipe interfaces [
+      (filter (i: i.wakeOnLan.enable))
+      (map (i: nameValuePair "40-${i.name}" {
+        matchConfig.OriginalName = i.name;
+        linkConfig.WakeOnLan = concatStringsSep " " i.wakeOnLan.policy;
+      }))
+      listToAttrs
+    ];
+
+    # The network-interfaces target is kept for backwards compatibility.
+    # New modules must NOT use it.
+    systemd.targets.network-interfaces =
+      { description = "All Network Interfaces (deprecated)";
+        wantedBy = [ "network.target" ];
+        before = [ "network.target" ];
+        after = [ "network-pre.target" ];
+        unitConfig.X-StopOnReconfiguration = true;
+      };
+
+    systemd.services = {
+      network-local-commands = {
+        description = "Extra networking commands.";
+        before = [ "network.target" ];
+        wantedBy = [ "network.target" ];
+        after = [ "network-pre.target" ];
+        unitConfig.ConditionCapability = "CAP_NET_ADMIN";
+        path = [ pkgs.iproute2 ];
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        script = ''
+          # Run any user-specified commands.
+          ${cfg.localCommands}
+        '';
+      };
+    };
+    services.mstpd = mkIf needsMstpd { enable = true; };
+
+    virtualisation.vswitch = mkIf (cfg.vswitches != { }) { enable = true; };
+
+    services.udev.packages =  [
+      (pkgs.writeTextFile rec {
+        name = "ipv6-privacy-extensions.rules";
+        destination = "/etc/udev/rules.d/98-${name}";
+        text = let
+          sysctl-value = tempaddrValues.${cfg.tempAddresses}.sysctl;
+        in ''
+          # enable and prefer IPv6 privacy addresses by default
+          ACTION=="add", SUBSYSTEM=="net", RUN+="${pkgs.bash}/bin/sh -c 'echo ${sysctl-value} > /proc/sys/net/ipv6/conf/$name/use_tempaddr'"
+        '';
+      })
+      (pkgs.writeTextFile rec {
+        name = "ipv6-privacy-extensions.rules";
+        destination = "/etc/udev/rules.d/99-${name}";
+        text = concatMapStrings (i:
+          let
+            opt = i.tempAddress;
+            val = tempaddrValues.${opt}.sysctl;
+            msg = tempaddrValues.${opt}.description;
+          in
+          ''
+            # override to ${msg} for ${i.name}
+            ACTION=="add", SUBSYSTEM=="net", NAME=="${i.name}", RUN+="${pkgs.procps}/bin/sysctl net.ipv6.conf.${replaceStrings ["."] ["/"] i.name}.use_tempaddr=${val}"
+          '') (filter (i: i.tempAddress != cfg.tempAddresses) interfaces);
+      })
+    ] ++ lib.optional (cfg.wlanInterfaces != {})
+      (pkgs.writeTextFile {
+        name = "99-zzz-40-wlanInterfaces.rules";
+        destination = "/etc/udev/rules.d/99-zzz-40-wlanInterfaces.rules";
+        text =
+          let
+            # Collect all interfaces that are defined for a device
+            # as device:interface key:value pairs.
+            wlanDeviceInterfaces =
+              let
+                allDevices = unique (mapAttrsToList (_: v: v.device) cfg.wlanInterfaces);
+                interfacesOfDevice = d: filterAttrs (_: v: v.device == d) cfg.wlanInterfaces;
+              in
+                genAttrs allDevices (d: interfacesOfDevice d);
+
+            # Convert device:interface key:value pairs into a list, and if it exists,
+            # place the interface which is named after the device at the beginning.
+            wlanListDeviceFirst = device: interfaces:
+              if hasAttr device interfaces
+              then mapAttrsToList (n: v: v//{_iName=n;}) (filterAttrs (n: _: n==device) interfaces) ++ mapAttrsToList (n: v: v//{_iName=n;}) (filterAttrs (n: _: n!=device) interfaces)
+              else mapAttrsToList (n: v: v // {_iName = n;}) interfaces;
+
+            # Udev script to execute for the default WLAN interface with the persistend udev name.
+            # The script creates the required, new WLAN interfaces interfaces and configures the
+            # existing, default interface.
+            curInterfaceScript = device: current: new: pkgs.writeScript "udev-run-script-wlan-interfaces-${device}.sh" ''
+              #!${pkgs.runtimeShell}
+              # Change the wireless phy device to a predictable name.
+              ${pkgs.iw}/bin/iw phy `${pkgs.coreutils}/bin/cat /sys/class/net/$INTERFACE/phy80211/name` set name ${device}
+
+              # Add new WLAN interfaces
+              ${flip concatMapStrings new (i: ''
+              ${pkgs.iw}/bin/iw phy ${device} interface add ${i._iName} type managed
+              '')}
+
+              # Configure the current interface
+              ${pkgs.iw}/bin/iw dev ${device} set type ${current.type}
+              ${optionalString (current.type == "mesh" && current.meshID!=null) "${pkgs.iw}/bin/iw dev ${device} set meshid ${current.meshID}"}
+              ${optionalString (current.type == "monitor" && current.flags!=null) "${pkgs.iw}/bin/iw dev ${device} set monitor ${current.flags}"}
+              ${optionalString (current.type == "managed" && current.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${device} set 4addr ${if current.fourAddr then "on" else "off"}"}
+              ${optionalString (current.mac != null) "${pkgs.iproute2}/bin/ip link set dev ${device} address ${current.mac}"}
+            '';
+
+            # Udev script to execute for a new WLAN interface. The script configures the new WLAN interface.
+            newInterfaceScript = new: pkgs.writeScript "udev-run-script-wlan-interfaces-${new._iName}.sh" ''
+              #!${pkgs.runtimeShell}
+              # Configure the new interface
+              ${pkgs.iw}/bin/iw dev ${new._iName} set type ${new.type}
+              ${optionalString (new.type == "mesh" && new.meshID!=null) "${pkgs.iw}/bin/iw dev ${new._iName} set meshid ${new.meshID}"}
+              ${optionalString (new.type == "monitor" && new.flags!=null) "${pkgs.iw}/bin/iw dev ${new._iName} set monitor ${new.flags}"}
+              ${optionalString (new.type == "managed" && new.fourAddr!=null) "${pkgs.iw}/bin/iw dev ${new._iName} set 4addr ${if new.fourAddr then "on" else "off"}"}
+              ${optionalString (new.mac != null) "${pkgs.iproute2}/bin/ip link set dev ${new._iName} address ${new.mac}"}
+            '';
+
+            # Udev attributes for systemd to name the device and to create a .device target.
+            systemdAttrs = n: ''NAME:="${n}", ENV{INTERFACE}="${n}", ENV{SYSTEMD_ALIAS}="/sys/subsystem/net/devices/${n}", TAG+="systemd"'';
+          in
+          flip (concatMapStringsSep "\n") (attrNames wlanDeviceInterfaces) (device:
+            let
+              interfaces = wlanListDeviceFirst device wlanDeviceInterfaces.${device};
+              curInterface = elemAt interfaces 0;
+              newInterfaces = drop 1 interfaces;
+            in ''
+            # It is important to have that rule first as overwriting the NAME attribute also prevents the
+            # next rules from matching.
+            ${flip (concatMapStringsSep "\n") (wlanListDeviceFirst device wlanDeviceInterfaces.${device}) (interface:
+            ''ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", ENV{INTERFACE}=="${interface._iName}", ${systemdAttrs interface._iName}, RUN+="${newInterfaceScript interface}"'')}
+
+            # Add the required, new WLAN interfaces to the default WLAN interface with the
+            # persistent, default name as assigned by udev.
+            ACTION=="add", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", NAME=="${device}", ${systemdAttrs curInterface._iName}, RUN+="${curInterfaceScript device curInterface newInterfaces}"
+            # Generate the same systemd events for both 'add' and 'move' udev events.
+            ACTION=="move", SUBSYSTEM=="net", ENV{DEVTYPE}=="wlan", NAME=="${device}", ${systemdAttrs curInterface._iName}
+          '');
+      });
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/powertop.nix b/nixpkgs/nixos/modules/tasks/powertop.nix
new file mode 100644
index 000000000000..3839b7a4260e
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/powertop.nix
@@ -0,0 +1,29 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.powerManagement.powertop;
+in {
+  ###### interface
+
+  options.powerManagement.powertop.enable = mkEnableOption (lib.mdDoc "powertop auto tuning on startup");
+
+  ###### implementation
+
+  config = mkIf (cfg.enable) {
+    systemd.services = {
+      powertop = {
+        wantedBy = [ "multi-user.target" ];
+        after = [ "multi-user.target" ];
+        description = "Powertop tunings";
+        path = [ pkgs.kmod ];
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = "yes";
+          ExecStart = "${pkgs.powertop}/bin/powertop --auto-tune";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/scsi-link-power-management.nix b/nixpkgs/nixos/modules/tasks/scsi-link-power-management.nix
new file mode 100644
index 000000000000..a5395657e992
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/scsi-link-power-management.nix
@@ -0,0 +1,54 @@
+{ config, lib, ... }:
+
+with lib;
+
+let
+
+  cfg = config.powerManagement.scsiLinkPolicy;
+
+  kernel = config.boot.kernelPackages.kernel;
+
+  allowedValues = [
+    "min_power"
+    "max_performance"
+    "medium_power"
+    "med_power_with_dipm"
+  ];
+
+in
+
+{
+  ###### interface
+
+  options = {
+
+    powerManagement.scsiLinkPolicy = mkOption {
+      default = null;
+      type = types.nullOr (types.enum allowedValues);
+      description = lib.mdDoc ''
+        SCSI link power management policy. The kernel default is
+        "max_performance".
+
+        "med_power_with_dipm" is supported by kernel versions
+        4.15 and newer.
+      '';
+    };
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf (cfg != null) {
+
+    assertions = singleton {
+      assertion = (cfg == "med_power_with_dipm") -> versionAtLeast kernel.version "4.15";
+      message = "med_power_with_dipm is not supported for kernels older than 4.15";
+    };
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="scsi_host", ACTION=="add", KERNEL=="host*", ATTR{link_power_management_policy}="${cfg}"
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/tasks/snapraid.nix b/nixpkgs/nixos/modules/tasks/snapraid.nix
new file mode 100644
index 000000000000..243d25f88423
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/snapraid.nix
@@ -0,0 +1,230 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let cfg = config.snapraid;
+in
+{
+  options.snapraid = with types; {
+    enable = mkEnableOption (lib.mdDoc "SnapRAID");
+    dataDisks = mkOption {
+      default = { };
+      example = {
+        d1 = "/mnt/disk1/";
+        d2 = "/mnt/disk2/";
+        d3 = "/mnt/disk3/";
+      };
+      description = lib.mdDoc "SnapRAID data disks.";
+      type = attrsOf str;
+    };
+    parityFiles = mkOption {
+      default = [ ];
+      example = [
+        "/mnt/diskp/snapraid.parity"
+        "/mnt/diskq/snapraid.2-parity"
+        "/mnt/diskr/snapraid.3-parity"
+        "/mnt/disks/snapraid.4-parity"
+        "/mnt/diskt/snapraid.5-parity"
+        "/mnt/disku/snapraid.6-parity"
+      ];
+      description = lib.mdDoc "SnapRAID parity files.";
+      type = listOf str;
+    };
+    contentFiles = mkOption {
+      default = [ ];
+      example = [
+        "/var/snapraid.content"
+        "/mnt/disk1/snapraid.content"
+        "/mnt/disk2/snapraid.content"
+      ];
+      description = lib.mdDoc "SnapRAID content list files.";
+      type = listOf str;
+    };
+    exclude = mkOption {
+      default = [ ];
+      example = [ "*.unrecoverable" "/tmp/" "/lost+found/" ];
+      description = lib.mdDoc "SnapRAID exclude directives.";
+      type = listOf str;
+    };
+    touchBeforeSync = mkOption {
+      default = true;
+      example = false;
+      description = lib.mdDoc
+        "Whether {command}`snapraid touch` should be run before {command}`snapraid sync`.";
+      type = bool;
+    };
+    sync.interval = mkOption {
+      default = "01:00";
+      example = "daily";
+      description = lib.mdDoc "How often to run {command}`snapraid sync`.";
+      type = str;
+    };
+    scrub = {
+      interval = mkOption {
+        default = "Mon *-*-* 02:00:00";
+        example = "weekly";
+        description = lib.mdDoc "How often to run {command}`snapraid scrub`.";
+        type = str;
+      };
+      plan = mkOption {
+        default = 8;
+        example = 5;
+        description = lib.mdDoc
+          "Percent of the array that should be checked by {command}`snapraid scrub`.";
+        type = int;
+      };
+      olderThan = mkOption {
+        default = 10;
+        example = 20;
+        description = lib.mdDoc
+          "Number of days since data was last scrubbed before it can be scrubbed again.";
+        type = int;
+      };
+    };
+    extraConfig = mkOption {
+      default = "";
+      example = ''
+        nohidden
+        blocksize 256
+        hashsize 16
+        autosave 500
+        pool /pool
+      '';
+      description = lib.mdDoc "Extra config options for SnapRAID.";
+      type = lines;
+    };
+  };
+
+  config =
+    let
+      nParity = builtins.length cfg.parityFiles;
+      mkPrepend = pre: s: pre + s;
+    in
+    mkIf cfg.enable {
+      assertions = [
+        {
+          assertion = nParity <= 6;
+          message = "You can have no more than six SnapRAID parity files.";
+        }
+        {
+          assertion = builtins.length cfg.contentFiles >= nParity + 1;
+          message =
+            "There must be at least one SnapRAID content file for each SnapRAID parity file plus one.";
+        }
+      ];
+
+      environment = {
+        systemPackages = with pkgs; [ snapraid ];
+
+        etc."snapraid.conf" = {
+          text = with cfg;
+            let
+              prependData = mkPrepend "data ";
+              prependContent = mkPrepend "content ";
+              prependExclude = mkPrepend "exclude ";
+            in
+            concatStringsSep "\n"
+              (map prependData
+                ((mapAttrsToList (name: value: name + " " + value)) dataDisks)
+              ++ zipListsWith (a: b: a + b)
+                ([ "parity " ] ++ map (i: toString i + "-parity ") (range 2 6))
+                parityFiles ++ map prependContent contentFiles
+              ++ map prependExclude exclude) + "\n" + extraConfig;
+        };
+      };
+
+      systemd.services = with cfg; {
+        snapraid-scrub = {
+          description = "Scrub the SnapRAID array";
+          startAt = scrub.interval;
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${pkgs.snapraid}/bin/snapraid scrub -p ${
+              toString scrub.plan
+            } -o ${toString scrub.olderThan}";
+            Nice = 19;
+            IOSchedulingPriority = 7;
+            CPUSchedulingPolicy = "batch";
+
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            NoNewPrivileges = true;
+            PrivateDevices = true;
+            PrivateTmp = true;
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            RestrictAddressFamilies = "none";
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = "@system-service";
+            SystemCallErrorNumber = "EPERM";
+            CapabilityBoundingSet = "CAP_DAC_OVERRIDE";
+
+            ProtectSystem = "strict";
+            ProtectHome = "read-only";
+            ReadWritePaths =
+              # scrub requires access to directories containing content files
+              # to remove them if they are stale
+              let
+                contentDirs = map dirOf contentFiles;
+              in
+              unique (
+                attrValues dataDisks ++ contentDirs
+              );
+          };
+          unitConfig.After = "snapraid-sync.service";
+        };
+        snapraid-sync = {
+          description = "Synchronize the state of the SnapRAID array";
+          startAt = sync.interval;
+          serviceConfig = {
+            Type = "oneshot";
+            ExecStart = "${pkgs.snapraid}/bin/snapraid sync";
+            Nice = 19;
+            IOSchedulingPriority = 7;
+            CPUSchedulingPolicy = "batch";
+
+            LockPersonality = true;
+            MemoryDenyWriteExecute = true;
+            NoNewPrivileges = true;
+            PrivateTmp = true;
+            ProtectClock = true;
+            ProtectControlGroups = true;
+            ProtectHostname = true;
+            ProtectKernelLogs = true;
+            ProtectKernelModules = true;
+            ProtectKernelTunables = true;
+            RestrictAddressFamilies = "none";
+            RestrictNamespaces = true;
+            RestrictRealtime = true;
+            RestrictSUIDSGID = true;
+            SystemCallArchitectures = "native";
+            SystemCallFilter = "@system-service";
+            SystemCallErrorNumber = "EPERM";
+            CapabilityBoundingSet = "CAP_DAC_OVERRIDE" +
+              lib.optionalString cfg.touchBeforeSync " CAP_FOWNER";
+
+            ProtectSystem = "strict";
+            ProtectHome = "read-only";
+            ReadWritePaths =
+              # sync requires access to directories containing content files
+              # to remove them if they are stale
+              let
+                contentDirs = map dirOf contentFiles;
+              in
+              unique (
+                attrValues dataDisks ++ parityFiles ++ contentDirs
+              );
+          } // optionalAttrs touchBeforeSync {
+            ExecStartPre = "${pkgs.snapraid}/bin/snapraid touch";
+          };
+        };
+      };
+    };
+}
diff --git a/nixpkgs/nixos/modules/tasks/stratis.nix b/nixpkgs/nixos/modules/tasks/stratis.nix
new file mode 100644
index 000000000000..9a85fe23f248
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/stratis.nix
@@ -0,0 +1,18 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.services.stratis;
+in
+{
+  options.services.stratis = {
+    enable = lib.mkEnableOption (lib.mdDoc "Stratis Storage - Easy to use local storage management for Linux");
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.stratis-cli ];
+    systemd.packages = [ pkgs.stratisd ];
+    services.dbus.packages = [ pkgs.stratisd ];
+    services.udev.packages = [ pkgs.stratisd ];
+    systemd.services.stratisd.wantedBy = [ "sysinit.target" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/swraid.nix b/nixpkgs/nixos/modules/tasks/swraid.nix
new file mode 100644
index 000000000000..249755bc0548
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/swraid.nix
@@ -0,0 +1,87 @@
+{ config, pkgs, lib, ... }: let
+
+  cfg = config.boot.swraid;
+
+  mdadm_conf = config.environment.etc."mdadm.conf";
+
+  enable_implicitly_for_old_state_versions = lib.versionOlder config.system.stateVersion "23.11";
+
+  minimum_config_is_set = config_text:
+    (builtins.match ".*(MAILADDR|PROGRAM).*" mdadm_conf.text) != null;
+
+in {
+  imports = [
+    (lib.mkRenamedOptionModule [ "boot" "initrd" "services" "swraid" "enable" ] [ "boot" "swraid" "enable" ])
+    (lib.mkRenamedOptionModule [ "boot" "initrd" "services" "swraid" "mdadmConf" ] [ "boot" "swraid" "mdadmConf" ])
+  ];
+
+
+  options.boot.swraid = {
+    enable = lib.mkEnableOption (lib.mdDoc "swraid support using mdadm") // {
+      description = lib.mdDoc ''
+        Whether to enable support for Linux MD RAID arrays.
+
+        When this is enabled, mdadm will be added to the system path,
+        and MD RAID arrays will be detected and activated
+        automatically, both in stage-1 (initramfs) and in stage-2 (the
+        final NixOS system).
+
+        This should be enabled if you want to be able to access and/or
+        boot from MD RAID arrays. {command}`nixos-generate-config`
+        should detect it correctly in the standard installation
+        procedure.
+      '';
+      default = enable_implicitly_for_old_state_versions;
+      defaultText = lib.mdDoc "`true` if stateVersion is older than 23.11";
+    };
+
+    mdadmConf = lib.mkOption {
+      description = lib.mdDoc "Contents of {file}`/etc/mdadm.conf`.";
+      type = lib.types.lines;
+      default = "";
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    warnings = lib.mkIf
+        ( !enable_implicitly_for_old_state_versions && !minimum_config_is_set mdadm_conf)
+        [ "mdadm: Neither MAILADDR nor PROGRAM has been set. This will cause the `mdmon` service to crash." ];
+
+    environment.systemPackages = [ pkgs.mdadm ];
+
+    environment.etc."mdadm.conf".text = lib.mkAfter cfg.mdadmConf;
+
+    services.udev.packages = [ pkgs.mdadm ];
+
+    systemd.packages = [ pkgs.mdadm ];
+
+    boot.initrd = {
+      availableKernelModules = [ "md_mod" "raid0" "raid1" "raid10" "raid456" ];
+
+      extraUdevRulesCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        cp -v ${pkgs.mdadm}/lib/udev/rules.d/*.rules $out/
+      '';
+
+      extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        # Add RAID mdadm tool.
+        copy_bin_and_libs ${pkgs.mdadm}/sbin/mdadm
+        copy_bin_and_libs ${pkgs.mdadm}/sbin/mdmon
+      '';
+
+      extraUtilsCommandsTest = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+        $out/bin/mdadm --version
+      '';
+
+      extraFiles."/etc/mdadm.conf".source = pkgs.writeText "mdadm.conf" mdadm_conf.text;
+
+      systemd = {
+        contents."/etc/mdadm.conf".text = mdadm_conf.text;
+
+        packages = [ pkgs.mdadm ];
+        initrdBin = [ pkgs.mdadm ];
+      };
+
+      services.udev.packages = [ pkgs.mdadm ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/tasks/trackpoint.nix b/nixpkgs/nixos/modules/tasks/trackpoint.nix
new file mode 100644
index 000000000000..d197a0feb337
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/trackpoint.nix
@@ -0,0 +1,108 @@
+{ config, lib, ... }:
+
+with lib;
+
+{
+  ###### interface
+
+  options = {
+
+    hardware.trackpoint = {
+
+      enable = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable sensitivity and speed configuration for trackpoints.
+        '';
+      };
+
+      sensitivity = mkOption {
+        default = 128;
+        example = 255;
+        type = types.int;
+        description = lib.mdDoc ''
+          Configure the trackpoint sensitivity. By default, the kernel
+          configures 128.
+        '';
+      };
+
+      speed = mkOption {
+        default = 97;
+        example = 255;
+        type = types.int;
+        description = lib.mdDoc ''
+          Configure the trackpoint speed. By default, the kernel
+          configures 97.
+        '';
+      };
+
+      emulateWheel = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Enable scrolling while holding the middle mouse button.
+        '';
+      };
+
+      fakeButtons = mkOption {
+        default = false;
+        type = types.bool;
+        description = lib.mdDoc ''
+          Switch to "bare" PS/2 mouse support in case Trackpoint buttons are not recognized
+          properly. This can happen for example on models like the L430, T450, T450s, on
+          which the Trackpoint buttons are actually a part of the Synaptics touchpad.
+        '';
+      };
+
+      device = mkOption {
+        default = "TPPS/2 IBM TrackPoint";
+        type = types.str;
+        description = lib.mdDoc ''
+          The device name of the trackpoint. You can check with xinput.
+          Some newer devices (example x1c6) use "TPPS/2 Elan TrackPoint".
+        '';
+      };
+
+    };
+
+  };
+
+
+  ###### implementation
+
+  config =
+  let cfg = config.hardware.trackpoint; in
+  mkMerge [
+    (mkIf cfg.enable {
+      services.udev.extraRules =
+      ''
+        ACTION=="add|change", SUBSYSTEM=="input", ATTR{name}=="${cfg.device}", ATTR{device/speed}="${toString cfg.speed}", ATTR{device/sensitivity}="${toString cfg.sensitivity}"
+      '';
+
+      system.activationScripts.trackpoint =
+        ''
+          ${config.systemd.package}/bin/udevadm trigger --attr-match=name="${cfg.device}"
+        '';
+    })
+
+    (mkIf (cfg.emulateWheel) {
+      services.xserver.inputClassSections = [
+        ''
+          Identifier "Trackpoint Wheel Emulation"
+          MatchProduct "${if cfg.fakeButtons then "PS/2 Generic Mouse" else "ETPS/2 Elantech TrackPoint|Elantech PS/2 TrackPoint|TPPS/2 IBM TrackPoint|DualPoint Stick|Synaptics Inc. Composite TouchPad / TrackPoint|ThinkPad USB Keyboard with TrackPoint|USB Trackpoint pointing device|Composite TouchPad / TrackPoint|${cfg.device}"}"
+          MatchDevicePath "/dev/input/event*"
+          Option "EmulateWheel" "true"
+          Option "EmulateWheelButton" "2"
+          Option "Emulate3Buttons" "false"
+          Option "XAxisMapping" "6 7"
+          Option "YAxisMapping" "4 5"
+        ''
+      ];
+    })
+
+    (mkIf cfg.fakeButtons {
+      boot.extraModprobeConfig = "options psmouse proto=bare";
+    })
+  ];
+}
diff --git a/nixpkgs/nixos/modules/tasks/tty-backgrounds-combine.sh b/nixpkgs/nixos/modules/tasks/tty-backgrounds-combine.sh
new file mode 100644
index 000000000000..55c3a1ebfa8a
--- /dev/null
+++ b/nixpkgs/nixos/modules/tasks/tty-backgrounds-combine.sh
@@ -0,0 +1,32 @@
+source $stdenv/setup
+
+ttys=($ttys)
+themes=($themes)
+
+mkdir -p $out
+
+defaultName=$(cd $default && ls | grep -v default)
+echo $defaultName
+ln -s $default/$defaultName $out/$defaultName
+ln -s $defaultName $out/default
+
+for ((n = 0; n < ${#ttys[*]}; n++)); do
+    tty=${ttys[$n]}
+    theme=${themes[$n]}
+
+    echo "TTY $tty -> $theme"
+
+    if [ "$theme" != default ]; then
+        themeName=$(cd $theme && ls | grep -v default)
+        ln -sfn $theme/$themeName $out/$themeName
+    else
+        themeName=default
+    fi
+
+    if test -e $out/$tty; then
+        echo "Multiple themes defined for the same TTY!"
+        exit 1
+    fi
+
+    ln -sfn $themeName $out/$tty
+done
diff --git a/nixpkgs/nixos/modules/testing/service-runner.nix b/nixpkgs/nixos/modules/testing/service-runner.nix
new file mode 100644
index 000000000000..bdb35f128a73
--- /dev/null
+++ b/nixpkgs/nixos/modules/testing/service-runner.nix
@@ -0,0 +1,127 @@
+{ lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  makeScript = name: service: pkgs.writeScript "${name}-runner"
+    ''
+      #! ${pkgs.perl.withPackages (p: [ p.FileSlurp ])}/bin/perl -w
+
+      use File::Slurp;
+
+      sub run {
+          my ($cmd) = @_;
+          my @args = ();
+          while ($cmd =~ /([^ \t\n']+)|(\'([^'])\')\s*/g) {
+            push @args, $1;
+          }
+          my $prog;
+          if (substr($args[0], 0, 1) eq "@") {
+              $prog = substr($args[0], 1);
+              shift @args;
+          } else {
+              $prog = $args[0];
+          }
+          my $pid = fork;
+          if ($pid == 0) {
+              setpgrp; # don't receive SIGINT etc. from terminal
+              exec { $prog } @args;
+              die "failed to exec $prog\n";
+          } elsif (!defined $pid) {
+              die "failed to fork: $!\n";
+          }
+          return $pid;
+      };
+
+      sub run_wait {
+          my ($cmd) = @_;
+          my $pid = run $cmd;
+          die if waitpid($pid, 0) != $pid;
+          return $?;
+      };
+
+      # Set the environment.  FIXME: escaping.
+      foreach my $key (keys %ENV) {
+          next if $key eq 'LOCALE_ARCHIVE';
+          delete $ENV{$key};
+      }
+      ${concatStrings (mapAttrsToList (n: v: ''
+        $ENV{'${n}'} = '${v}';
+      '') service.environment)}
+
+      # Run the ExecStartPre program.  FIXME: this could be a list.
+      my $preStart = <<END_CMD;
+      ${concatStringsSep "\n" (service.serviceConfig.ExecStartPre or [])}
+      END_CMD
+      if (defined $preStart && $preStart ne "\n") {
+          print STDERR "running ExecStartPre: $preStart\n";
+          my $res = run_wait $preStart;
+          die "$0: ExecStartPre failed with status $res\n" if $res;
+      };
+
+      # Run the ExecStart program.
+      my $cmd = <<END_CMD;
+      ${service.serviceConfig.ExecStart}
+      END_CMD
+
+      print STDERR "running ExecStart: $cmd\n";
+      my $mainPid = run $cmd;
+      $ENV{'MAINPID'} = $mainPid;
+
+      # Catch SIGINT, propagate to the main program.
+      sub intHandler {
+          print STDERR "got SIGINT, stopping service...\n";
+          kill 'INT', $mainPid;
+      };
+      $SIG{'INT'} = \&intHandler;
+      $SIG{'QUIT'} = \&intHandler;
+
+      # Run the ExecStartPost program.
+      my $postStart = <<END_CMD;
+      ${concatStringsSep "\n" (service.serviceConfig.ExecStartPost or [])}
+      END_CMD
+      if (defined $postStart && $postStart ne "\n") {
+          print STDERR "running ExecStartPost: $postStart\n";
+          my $res = run_wait $postStart;
+          die "$0: ExecStartPost failed with status $res\n" if $res;
+      }
+
+      # Wait for the main program to exit.
+      die if waitpid($mainPid, 0) != $mainPid;
+      my $mainRes = $?;
+
+      # Run the ExecStopPost program.
+      my $postStop = <<END_CMD;
+      ${service.serviceConfig.ExecStopPost or ""}
+      END_CMD
+      if (defined $postStop && $postStop ne "\n") {
+          print STDERR "running ExecStopPost: $postStop\n";
+          my $res = run_wait $postStop;
+          die "$0: ExecStopPost failed with status $res\n" if $res;
+      }
+
+      exit($mainRes & 127 ? 255 : $mainRes << 8);
+    '';
+
+  opts = { config, name, ... }: {
+    options.runner = mkOption {
+    internal = true;
+    description = lib.mdDoc ''
+        A script that runs the service outside of systemd,
+        useful for testing or for using NixOS services outside
+        of NixOS.
+    '';
+    };
+    config.runner = makeScript name config;
+  };
+
+in
+
+{
+  options = {
+    systemd.services = mkOption {
+      type = with types; attrsOf (submodule opts);
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/testing/test-instrumentation.nix b/nixpkgs/nixos/modules/testing/test-instrumentation.nix
new file mode 100644
index 000000000000..9ee77cd79a9b
--- /dev/null
+++ b/nixpkgs/nixos/modules/testing/test-instrumentation.nix
@@ -0,0 +1,222 @@
+# This module allows the test driver to connect to the virtual machine
+# via a root shell attached to port 514.
+
+{ options, config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.testing;
+
+  qemu-common = import ../../lib/qemu-common.nix { inherit lib pkgs; };
+
+  backdoorService = {
+    requires = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
+    after = [ "dev-hvc0.device" "dev-${qemu-common.qemuSerialDevice}.device" ];
+    script =
+      ''
+        export USER=root
+        export HOME=/root
+        export DISPLAY=:0.0
+
+        if [[ -e /etc/profile ]]; then
+            source /etc/profile
+        fi
+
+        # Don't use a pager when executing backdoor
+        # actions. Because we use a tty, commands like systemctl
+        # or nix-store get confused into thinking they're running
+        # interactively.
+        export PAGER=
+
+        cd /tmp
+        exec < /dev/hvc0 > /dev/hvc0
+        while ! exec 2> /dev/${qemu-common.qemuSerialDevice}; do sleep 0.1; done
+        echo "connecting to host..." >&2
+        stty -F /dev/hvc0 raw -echo # prevent nl -> cr/nl conversion
+        # The following line is essential since it signals to
+        # the test driver that the shell is ready.
+        # See: the connect method in the Machine class.
+        echo "Spawning backdoor root shell..."
+        # Passing the terminal device makes bash run non-interactively.
+        # Otherwise we get errors on the terminal because bash tries to
+        # setup things like job control.
+        # Note: calling bash explicitly here instead of sh makes sure that
+        # we can also run non-NixOS guests during tests.
+        PS1= exec /usr/bin/env bash --norc /dev/hvc0
+      '';
+      serviceConfig.KillSignal = "SIGHUP";
+  };
+
+in
+
+{
+
+  options.testing = {
+
+    initrdBackdoor = lib.mkEnableOption (lib.mdDoc ''
+      enable backdoor.service in initrd. Requires
+      boot.initrd.systemd.enable to be enabled. Boot will pause in
+      stage 1 at initrd.target, and will listen for commands from the
+      Machine python interface, just like stage 2 normally does. This
+      enables commands to be sent to test and debug stage 1. Use
+      machine.switch_root() to leave stage 1 and proceed to stage 2.
+    '');
+
+  };
+
+  config = {
+
+    assertions = [
+      {
+        assertion = cfg.initrdBackdoor -> config.boot.initrd.systemd.enable;
+        message = ''
+          testing.initrdBackdoor requires boot.initrd.systemd.enable to be enabled.
+        '';
+      }
+    ];
+
+    systemd.services.backdoor = lib.mkMerge [
+      backdoorService
+      {
+        wantedBy = [ "multi-user.target" ];
+      }
+    ];
+
+    boot.initrd.systemd = lib.mkMerge [
+      {
+        contents."/etc/systemd/journald.conf".text = ''
+          [Journal]
+          ForwardToConsole=yes
+          MaxLevelConsole=debug
+        '';
+
+        extraConfig = config.systemd.extraConfig;
+      }
+
+      (lib.mkIf cfg.initrdBackdoor {
+        # Implemented in machine.switch_root(). Suppress the unit by
+        # making it a noop without removing it, which would break
+        # initrd-parse-etc.service
+        services.initrd-cleanup.serviceConfig.ExecStart = [
+          # Reset
+          ""
+          # noop
+          "/bin/true"
+        ];
+
+        services.backdoor = lib.mkMerge [
+          backdoorService
+          {
+            # TODO: Both stage 1 and stage 2 should use these same
+            # settings. But a lot of existing tests rely on
+            # backdoor.service having default orderings,
+            # e.g. systemd-boot.update relies on /boot being mounted
+            # as soon as backdoor starts. But it can be useful for
+            # backdoor to start even earlier.
+            wantedBy = [ "sysinit.target" ];
+            unitConfig.DefaultDependencies = false;
+            conflicts = [ "shutdown.target" "initrd-switch-root.target" ];
+            before = [ "shutdown.target" "initrd-switch-root.target" ];
+          }
+        ];
+
+        contents."/usr/bin/env".source = "${pkgs.coreutils}/bin/env";
+      })
+    ];
+
+    # Prevent agetty from being instantiated on the serial device, since it
+    # interferes with the backdoor (writes to it will randomly fail
+    # with EIO).  Likewise for hvc0.
+    systemd.services."serial-getty@${qemu-common.qemuSerialDevice}".enable = false;
+    systemd.services."serial-getty@hvc0".enable = false;
+
+    # Only set these settings when the options exist. Some tests (e.g. those
+    # that do not specify any nodes, or an empty attr set as nodes) will not
+    # have the QEMU module loaded and thuse these options can't and should not
+    # be set.
+    virtualisation = lib.optionalAttrs (options ? virtualisation.qemu) {
+      qemu = {
+        # Only use a serial console, no TTY.
+        # NOTE: optionalAttrs
+        #       test-instrumentation.nix appears to be used without qemu-vm.nix, so
+        #       we avoid defining consoles if not possible.
+        # TODO: refactor such that test-instrumentation can import qemu-vm
+        #       or declare virtualisation.qemu.console option in a module that's always imported
+        consoles = [ qemu-common.qemuSerialDevice ];
+        package  = lib.mkDefault pkgs.qemu_test;
+      };
+    };
+
+    boot.kernel.sysctl = {
+      "kernel.hung_task_timeout_secs" = 600;
+      # Panic on out-of-memory conditions rather than letting the
+      # OOM killer randomly get rid of processes, since this leads
+      # to failures that are hard to diagnose.
+      "vm.panic_on_oom" = lib.mkDefault 2;
+    };
+
+    boot.kernelParams = [
+      "console=${qemu-common.qemuSerialDevice}"
+      # Panic if an error occurs in stage 1 (rather than waiting for
+      # user intervention).
+      "panic=1" "boot.panic_on_fail"
+      # Using acpi_pm as a clock source causes the guest clock to
+      # slow down under high host load.  This is usually a bad
+      # thing, but for VM tests it should provide a bit more
+      # determinism (e.g. if the VM runs at lower speed, then
+      # timeouts in the VM should also be delayed).
+      "clock=acpi_pm"
+    ];
+
+    # `xwininfo' is used by the test driver to query open windows.
+    environment.systemPackages = [ pkgs.xorg.xwininfo ];
+
+    # Log everything to the serial console.
+    services.journald.extraConfig =
+      ''
+        ForwardToConsole=yes
+        MaxLevelConsole=debug
+      '';
+
+    systemd.extraConfig = ''
+      # Don't clobber the console with duplicate systemd messages.
+      ShowStatus=no
+      # Allow very slow start
+      DefaultTimeoutStartSec=300
+      DefaultDeviceTimeoutSec=300
+    '';
+    systemd.user.extraConfig = ''
+      # Allow very slow start
+      DefaultTimeoutStartSec=300
+      DefaultDeviceTimeoutSec=300
+    '';
+
+    boot.consoleLogLevel = 7;
+
+    # Prevent tests from accessing the Internet.
+    networking.defaultGateway = mkOverride 150 null;
+    networking.nameservers = mkOverride 150 [ ];
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "SERIAL_8250_CONSOLE")
+      (isYes "SERIAL_8250")
+      (isEnabled "VIRTIO_CONSOLE")
+    ];
+
+    networking.usePredictableInterfaceNames = false;
+
+    # Make it easy to log in as root when running the test interactively.
+    users.users.root.initialHashedPassword = mkOverride 150 "";
+
+    services.xserver.displayManager.job.logToJournal = true;
+
+    # Make sure we use the Guest Agent from the QEMU package for testing
+    # to reduce the closure size required for the tests.
+    services.qemuGuest.package = pkgs.qemu_test.ga;
+
+    # Squelch warning about unset system.stateVersion
+    system.stateVersion = lib.mkDefault lib.trivial.release;
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-ec2-amis.nix b/nixpkgs/nixos/modules/virtualisation/amazon-ec2-amis.nix
new file mode 100644
index 000000000000..ff88f02e5d33
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-ec2-amis.nix
@@ -0,0 +1,588 @@
+let self = {
+  "14.04".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-71c6f470";
+  "14.04".ap-northeast-1.x86_64-linux.pv-ebs = "ami-4dcbf84c";
+  "14.04".ap-northeast-1.x86_64-linux.pv-s3 = "ami-8fc4f68e";
+  "14.04".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-da280888";
+  "14.04".ap-southeast-1.x86_64-linux.pv-ebs = "ami-7a9dbc28";
+  "14.04".ap-southeast-1.x86_64-linux.pv-s3 = "ami-c4290996";
+  "14.04".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-ab523e91";
+  "14.04".ap-southeast-2.x86_64-linux.pv-ebs = "ami-6769055d";
+  "14.04".ap-southeast-2.x86_64-linux.pv-s3 = "ami-15533f2f";
+  "14.04".eu-central-1.x86_64-linux.hvm-ebs = "ami-ba0234a7";
+  "14.04".eu-west-1.x86_64-linux.hvm-ebs = "ami-96cb63e1";
+  "14.04".eu-west-1.x86_64-linux.pv-ebs = "ami-b48c25c3";
+  "14.04".eu-west-1.x86_64-linux.pv-s3 = "ami-06cd6571";
+  "14.04".sa-east-1.x86_64-linux.hvm-ebs = "ami-01b90e1c";
+  "14.04".sa-east-1.x86_64-linux.pv-ebs = "ami-69e35474";
+  "14.04".sa-east-1.x86_64-linux.pv-s3 = "ami-61b90e7c";
+  "14.04".us-east-1.x86_64-linux.hvm-ebs = "ami-58ba3a30";
+  "14.04".us-east-1.x86_64-linux.pv-ebs = "ami-9e0583f6";
+  "14.04".us-east-1.x86_64-linux.pv-s3 = "ami-9cbe3ef4";
+  "14.04".us-west-1.x86_64-linux.hvm-ebs = "ami-0bc3d74e";
+  "14.04".us-west-1.x86_64-linux.pv-ebs = "ami-8b1703ce";
+  "14.04".us-west-1.x86_64-linux.pv-s3 = "ami-27ccd862";
+  "14.04".us-west-2.x86_64-linux.hvm-ebs = "ami-3bf1bf0b";
+  "14.04".us-west-2.x86_64-linux.pv-ebs = "ami-259bd515";
+  "14.04".us-west-2.x86_64-linux.pv-s3 = "ami-07094037";
+
+  "14.12".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-24435f25";
+  "14.12".ap-northeast-1.x86_64-linux.pv-ebs = "ami-b0425eb1";
+  "14.12".ap-northeast-1.x86_64-linux.pv-s3 = "ami-fed3c6ff";
+  "14.12".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-6c765d3e";
+  "14.12".ap-southeast-1.x86_64-linux.pv-ebs = "ami-6a765d38";
+  "14.12".ap-southeast-1.x86_64-linux.pv-s3 = "ami-d1bf9183";
+  "14.12".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-af86f395";
+  "14.12".ap-southeast-2.x86_64-linux.pv-ebs = "ami-b386f389";
+  "14.12".ap-southeast-2.x86_64-linux.pv-s3 = "ami-69c5ae53";
+  "14.12".eu-central-1.x86_64-linux.hvm-ebs = "ami-4a497a57";
+  "14.12".eu-central-1.x86_64-linux.pv-ebs = "ami-4c497a51";
+  "14.12".eu-central-1.x86_64-linux.pv-s3 = "ami-60f2c27d";
+  "14.12".eu-west-1.x86_64-linux.hvm-ebs = "ami-d126a5a6";
+  "14.12".eu-west-1.x86_64-linux.pv-ebs = "ami-0126a576";
+  "14.12".eu-west-1.x86_64-linux.pv-s3 = "ami-deda5fa9";
+  "14.12".sa-east-1.x86_64-linux.hvm-ebs = "ami-2d239e30";
+  "14.12".sa-east-1.x86_64-linux.pv-ebs = "ami-35239e28";
+  "14.12".sa-east-1.x86_64-linux.pv-s3 = "ami-81e3519c";
+  "14.12".us-east-1.x86_64-linux.hvm-ebs = "ami-0c463a64";
+  "14.12".us-east-1.x86_64-linux.pv-ebs = "ami-ac473bc4";
+  "14.12".us-east-1.x86_64-linux.pv-s3 = "ami-00e18a68";
+  "14.12".us-west-1.x86_64-linux.hvm-ebs = "ami-ca534a8f";
+  "14.12".us-west-1.x86_64-linux.pv-ebs = "ami-3e534a7b";
+  "14.12".us-west-1.x86_64-linux.pv-s3 = "ami-2905196c";
+  "14.12".us-west-2.x86_64-linux.hvm-ebs = "ami-fb9dc3cb";
+  "14.12".us-west-2.x86_64-linux.pv-ebs = "ami-899dc3b9";
+  "14.12".us-west-2.x86_64-linux.pv-s3 = "ami-cb7f2dfb";
+
+  "15.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-58cac236";
+  "15.09".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-39c8c057";
+  "15.09".ap-northeast-1.x86_64-linux.pv-ebs = "ami-5ac9c134";
+  "15.09".ap-northeast-1.x86_64-linux.pv-s3 = "ami-03cec66d";
+  "15.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-2fc2094c";
+  "15.09".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-9ec308fd";
+  "15.09".ap-southeast-1.x86_64-linux.pv-ebs = "ami-95c00bf6";
+  "15.09".ap-southeast-1.x86_64-linux.pv-s3 = "ami-bfc00bdc";
+  "15.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-996c4cfa";
+  "15.09".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-3f6e4e5c";
+  "15.09".ap-southeast-2.x86_64-linux.pv-ebs = "ami-066d4d65";
+  "15.09".ap-southeast-2.x86_64-linux.pv-s3 = "ami-cc6e4eaf";
+  "15.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-3f8c6b50";
+  "15.09".eu-central-1.x86_64-linux.hvm-s3 = "ami-5b836434";
+  "15.09".eu-central-1.x86_64-linux.pv-ebs = "ami-118c6b7e";
+  "15.09".eu-central-1.x86_64-linux.pv-s3 = "ami-2c977043";
+  "15.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-9cf04aef";
+  "15.09".eu-west-1.x86_64-linux.hvm-s3 = "ami-2bea5058";
+  "15.09".eu-west-1.x86_64-linux.pv-ebs = "ami-c9e852ba";
+  "15.09".eu-west-1.x86_64-linux.pv-s3 = "ami-c6f64cb5";
+  "15.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-6e52df02";
+  "15.09".sa-east-1.x86_64-linux.hvm-s3 = "ami-1852df74";
+  "15.09".sa-east-1.x86_64-linux.pv-ebs = "ami-4368e52f";
+  "15.09".sa-east-1.x86_64-linux.pv-s3 = "ami-f15ad79d";
+  "15.09".us-east-1.x86_64-linux.hvm-ebs = "ami-84a6a0ee";
+  "15.09".us-east-1.x86_64-linux.hvm-s3 = "ami-06a7a16c";
+  "15.09".us-east-1.x86_64-linux.pv-ebs = "ami-a4a1a7ce";
+  "15.09".us-east-1.x86_64-linux.pv-s3 = "ami-5ba8ae31";
+  "15.09".us-west-1.x86_64-linux.hvm-ebs = "ami-22c8bb42";
+  "15.09".us-west-1.x86_64-linux.hvm-s3 = "ami-a2ccbfc2";
+  "15.09".us-west-1.x86_64-linux.pv-ebs = "ami-10cebd70";
+  "15.09".us-west-1.x86_64-linux.pv-s3 = "ami-fa30429a";
+  "15.09".us-west-2.x86_64-linux.hvm-ebs = "ami-ce57b9ae";
+  "15.09".us-west-2.x86_64-linux.hvm-s3 = "ami-2956b849";
+  "15.09".us-west-2.x86_64-linux.pv-ebs = "ami-005fb160";
+  "15.09".us-west-2.x86_64-linux.pv-s3 = "ami-cd55bbad";
+
+  "16.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-40619d21";
+  "16.03".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-ce629eaf";
+  "16.03".ap-northeast-1.x86_64-linux.pv-ebs = "ami-ef639f8e";
+  "16.03".ap-northeast-1.x86_64-linux.pv-s3 = "ami-a1609cc0";
+  "16.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-deca00b0";
+  "16.03".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-a3b77dcd";
+  "16.03".ap-northeast-2.x86_64-linux.pv-ebs = "ami-7bcb0115";
+  "16.03".ap-northeast-2.x86_64-linux.pv-s3 = "ami-a2b77dcc";
+  "16.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0dff9562";
+  "16.03".ap-south-1.x86_64-linux.hvm-s3 = "ami-13f69c7c";
+  "16.03".ap-south-1.x86_64-linux.pv-ebs = "ami-0ef39961";
+  "16.03".ap-south-1.x86_64-linux.pv-s3 = "ami-e0c8a28f";
+  "16.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-5e964a3d";
+  "16.03".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-4d964a2e";
+  "16.03".ap-southeast-1.x86_64-linux.pv-ebs = "ami-ec9b478f";
+  "16.03".ap-southeast-1.x86_64-linux.pv-s3 = "ami-999b47fa";
+  "16.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-9f7359fc";
+  "16.03".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-987359fb";
+  "16.03".ap-southeast-2.x86_64-linux.pv-ebs = "ami-a2705ac1";
+  "16.03".ap-southeast-2.x86_64-linux.pv-s3 = "ami-a3705ac0";
+  "16.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-17a45178";
+  "16.03".eu-central-1.x86_64-linux.hvm-s3 = "ami-f9a55096";
+  "16.03".eu-central-1.x86_64-linux.pv-ebs = "ami-c8a550a7";
+  "16.03".eu-central-1.x86_64-linux.pv-s3 = "ami-6ea45101";
+  "16.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-b5b3d5c6";
+  "16.03".eu-west-1.x86_64-linux.hvm-s3 = "ami-c986e0ba";
+  "16.03".eu-west-1.x86_64-linux.pv-ebs = "ami-b083e5c3";
+  "16.03".eu-west-1.x86_64-linux.pv-s3 = "ami-3c83e54f";
+  "16.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-f6eb7f9a";
+  "16.03".sa-east-1.x86_64-linux.hvm-s3 = "ami-93e773ff";
+  "16.03".sa-east-1.x86_64-linux.pv-ebs = "ami-cbb82ca7";
+  "16.03".sa-east-1.x86_64-linux.pv-s3 = "ami-abb82cc7";
+  "16.03".us-east-1.x86_64-linux.hvm-ebs = "ami-c123a3d6";
+  "16.03".us-east-1.x86_64-linux.hvm-s3 = "ami-bc25a5ab";
+  "16.03".us-east-1.x86_64-linux.pv-ebs = "ami-bd25a5aa";
+  "16.03".us-east-1.x86_64-linux.pv-s3 = "ami-a325a5b4";
+  "16.03".us-west-1.x86_64-linux.hvm-ebs = "ami-748bcd14";
+  "16.03".us-west-1.x86_64-linux.hvm-s3 = "ami-a68dcbc6";
+  "16.03".us-west-1.x86_64-linux.pv-ebs = "ami-048acc64";
+  "16.03".us-west-1.x86_64-linux.pv-s3 = "ami-208dcb40";
+  "16.03".us-west-2.x86_64-linux.hvm-ebs = "ami-8263a0e2";
+  "16.03".us-west-2.x86_64-linux.hvm-s3 = "ami-925c9ff2";
+  "16.03".us-west-2.x86_64-linux.pv-ebs = "ami-5e61a23e";
+  "16.03".us-west-2.x86_64-linux.pv-s3 = "ami-734c8f13";
+
+  # 16.09.1508.3909827
+  "16.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-68453b0f";
+  "16.09".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-f9bec09e";
+  "16.09".ap-northeast-1.x86_64-linux.pv-ebs = "ami-254a3442";
+  "16.09".ap-northeast-1.x86_64-linux.pv-s3 = "ami-ef473988";
+  "16.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-18ae7f76";
+  "16.09".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-9eac7df0";
+  "16.09".ap-northeast-2.x86_64-linux.pv-ebs = "ami-57aa7b39";
+  "16.09".ap-northeast-2.x86_64-linux.pv-s3 = "ami-5cae7f32";
+  "16.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-b3f98fdc";
+  "16.09".ap-south-1.x86_64-linux.hvm-s3 = "ami-98e690f7";
+  "16.09".ap-south-1.x86_64-linux.pv-ebs = "ami-aef98fc1";
+  "16.09".ap-south-1.x86_64-linux.pv-s3 = "ami-caf88ea5";
+  "16.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-80fb51e3";
+  "16.09".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-2df3594e";
+  "16.09".ap-southeast-1.x86_64-linux.pv-ebs = "ami-37f05a54";
+  "16.09".ap-southeast-1.x86_64-linux.pv-s3 = "ami-27f35944";
+  "16.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-57ece834";
+  "16.09".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-87f4f0e4";
+  "16.09".ap-southeast-2.x86_64-linux.pv-ebs = "ami-d8ede9bb";
+  "16.09".ap-southeast-2.x86_64-linux.pv-s3 = "ami-a6ebefc5";
+  "16.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-9f863bfb";
+  "16.09".ca-central-1.x86_64-linux.hvm-s3 = "ami-ea85388e";
+  "16.09".ca-central-1.x86_64-linux.pv-ebs = "ami-ce8a37aa";
+  "16.09".ca-central-1.x86_64-linux.pv-s3 = "ami-448a3720";
+  "16.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-1b884774";
+  "16.09".eu-central-1.x86_64-linux.hvm-s3 = "ami-b08c43df";
+  "16.09".eu-central-1.x86_64-linux.pv-ebs = "ami-888946e7";
+  "16.09".eu-central-1.x86_64-linux.pv-s3 = "ami-06874869";
+  "16.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-1ed3e76d";
+  "16.09".eu-west-1.x86_64-linux.hvm-s3 = "ami-73d1e500";
+  "16.09".eu-west-1.x86_64-linux.pv-ebs = "ami-44c0f437";
+  "16.09".eu-west-1.x86_64-linux.pv-s3 = "ami-f3d8ec80";
+  "16.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-2c9c9648";
+  "16.09".eu-west-2.x86_64-linux.hvm-s3 = "ami-6b9e940f";
+  "16.09".eu-west-2.x86_64-linux.pv-ebs = "ami-f1999395";
+  "16.09".eu-west-2.x86_64-linux.pv-s3 = "ami-bb9f95df";
+  "16.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-a11882cd";
+  "16.09".sa-east-1.x86_64-linux.hvm-s3 = "ami-7726bc1b";
+  "16.09".sa-east-1.x86_64-linux.pv-ebs = "ami-9725bffb";
+  "16.09".sa-east-1.x86_64-linux.pv-s3 = "ami-b027bddc";
+  "16.09".us-east-1.x86_64-linux.hvm-ebs = "ami-854ca593";
+  "16.09".us-east-1.x86_64-linux.hvm-s3 = "ami-2241a834";
+  "16.09".us-east-1.x86_64-linux.pv-ebs = "ami-a441a8b2";
+  "16.09".us-east-1.x86_64-linux.pv-s3 = "ami-e841a8fe";
+  "16.09".us-east-2.x86_64-linux.hvm-ebs = "ami-3f41645a";
+  "16.09".us-east-2.x86_64-linux.hvm-s3 = "ami-804065e5";
+  "16.09".us-east-2.x86_64-linux.pv-ebs = "ami-f1466394";
+  "16.09".us-east-2.x86_64-linux.pv-s3 = "ami-05426760";
+  "16.09".us-west-1.x86_64-linux.hvm-ebs = "ami-c2efbca2";
+  "16.09".us-west-1.x86_64-linux.hvm-s3 = "ami-d71042b7";
+  "16.09".us-west-1.x86_64-linux.pv-ebs = "ami-04e8bb64";
+  "16.09".us-west-1.x86_64-linux.pv-s3 = "ami-31e9ba51";
+  "16.09".us-west-2.x86_64-linux.hvm-ebs = "ami-6449f504";
+  "16.09".us-west-2.x86_64-linux.hvm-s3 = "ami-344af654";
+  "16.09".us-west-2.x86_64-linux.pv-ebs = "ami-6d4af60d";
+  "16.09".us-west-2.x86_64-linux.pv-s3 = "ami-de48f4be";
+
+  # 17.03.885.6024dd4067
+  "17.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-dbd0f7bc";
+  "17.03".ap-northeast-1.x86_64-linux.hvm-s3 = "ami-7cdff81b";
+  "17.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-c59a48ab";
+  "17.03".ap-northeast-2.x86_64-linux.hvm-s3 = "ami-0b944665";
+  "17.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-4f413220";
+  "17.03".ap-south-1.x86_64-linux.hvm-s3 = "ami-864033e9";
+  "17.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-e08c3383";
+  "17.03".ap-southeast-1.x86_64-linux.hvm-s3 = "ami-c28f30a1";
+  "17.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-fca9a69f";
+  "17.03".ap-southeast-2.x86_64-linux.hvm-s3 = "ami-3daaa55e";
+  "17.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-9b00bdff";
+  "17.03".ca-central-1.x86_64-linux.hvm-s3 = "ami-e800bd8c";
+  "17.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-5450803b";
+  "17.03".eu-central-1.x86_64-linux.hvm-s3 = "ami-6e2efe01";
+  "17.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-10754c76";
+  "17.03".eu-west-1.x86_64-linux.hvm-s3 = "ami-11734a77";
+  "17.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-ff1d099b";
+  "17.03".eu-west-2.x86_64-linux.hvm-s3 = "ami-fe1d099a";
+  "17.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-d95d3eb5";
+  "17.03".sa-east-1.x86_64-linux.hvm-s3 = "ami-fca2c190";
+  "17.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0940c61f";
+  "17.03".us-east-1.x86_64-linux.hvm-s3 = "ami-674fc971";
+  "17.03".us-east-2.x86_64-linux.hvm-ebs = "ami-afc2e6ca";
+  "17.03".us-east-2.x86_64-linux.hvm-s3 = "ami-a1cde9c4";
+  "17.03".us-west-1.x86_64-linux.hvm-ebs = "ami-587b2138";
+  "17.03".us-west-1.x86_64-linux.hvm-s3 = "ami-70411b10";
+  "17.03".us-west-2.x86_64-linux.hvm-ebs = "ami-a93daac9";
+  "17.03".us-west-2.x86_64-linux.hvm-s3 = "ami-5139ae31";
+
+  # 17.09.2681.59661f21be6
+  "17.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-a30192da";
+  "17.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-295a414d";
+  "17.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-8c0eb9f1";
+  "17.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-266cfe49";
+  "17.09".us-east-1.x86_64-linux.hvm-ebs = "ami-40bee63a";
+  "17.09".us-east-2.x86_64-linux.hvm-ebs = "ami-9d84aff8";
+  "17.09".us-west-1.x86_64-linux.hvm-ebs = "ami-d14142b1";
+  "17.09".us-west-2.x86_64-linux.hvm-ebs = "ami-3eb40346";
+  "17.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-ca8207ae";
+  "17.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-84bccff8";
+  "17.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0dc5386f";
+  "17.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-89b921ef";
+  "17.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-179b3b79";
+  "17.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-4762202b";
+  "17.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-4e376021";
+
+  # 18.03.132946.1caae7247b8
+  "18.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-065c46ec";
+  "18.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-64f31903";
+  "18.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-5a8d3d27";
+  "18.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-09faf9e2";
+  "18.03".us-east-1.x86_64-linux.hvm-ebs = "ami-8b3538f4";
+  "18.03".us-east-2.x86_64-linux.hvm-ebs = "ami-150b3170";
+  "18.03".us-west-1.x86_64-linux.hvm-ebs = "ami-ce06ebad";
+  "18.03".us-west-2.x86_64-linux.hvm-ebs = "ami-586c3520";
+  "18.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-aca72ac8";
+  "18.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-aa0b4d40";
+  "18.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-d0f254b2";
+  "18.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-456511a8";
+  "18.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-3366d15d";
+  "18.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-163e1f7a";
+  "18.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-6a390b05";
+
+  # 18.09.910.c15e342304a
+  "18.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-0f412186fb8a0ec97";
+  "18.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0dada3805ce43c55e";
+  "18.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-074df85565f2e02e2";
+  "18.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-07c9b884e679df4f8";
+  "18.09".us-east-1.x86_64-linux.hvm-ebs = "ami-009c9c3f1af480ff3";
+  "18.09".us-east-2.x86_64-linux.hvm-ebs = "ami-08199961085ea8bc6";
+  "18.09".us-west-1.x86_64-linux.hvm-ebs = "ami-07aa7f56d612ddd38";
+  "18.09".us-west-2.x86_64-linux.hvm-ebs = "ami-01c84b7c368ac24d1";
+  "18.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-04f66113f76198f6c";
+  "18.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0892c7e24ebf2194f";
+  "18.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-010730f36424b0a2c";
+  "18.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0cdba8e998f076547";
+  "18.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0400a698e6a9f4a15";
+  "18.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-0e4a8a47fd6db6112";
+  "18.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-0880a678d3f555313";
+
+  # 19.03.172286.8ea36d73256
+  "19.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-0fe40176548ff0940";
+  "19.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-03a40fd3a02fe95ba";
+  "19.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-0436f9da0f20a638e";
+  "19.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-0022b8ea9efde5de4";
+  "19.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0efc58fb70ae9a217";
+  "19.03".us-east-2.x86_64-linux.hvm-ebs = "ami-0abf711b1b34da1af";
+  "19.03".us-west-1.x86_64-linux.hvm-ebs = "ami-07d126e8838c40ec5";
+  "19.03".us-west-2.x86_64-linux.hvm-ebs = "ami-03f8a737546e47fb0";
+  "19.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-03f9fd0ef2e035ede";
+  "19.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0cff66114c652c262";
+  "19.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-054c73a7f8d773ea9";
+  "19.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-00db62688900456a4";
+  "19.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0485cdd1a5fdd2117";
+  "19.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-0c6a43c6e0ad1f4e2";
+  "19.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0303deb1b5890f878";
+
+  # 19.09.2243.84af403f54f
+  "19.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-071082f0fa035374f";
+  "19.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0d9dc33c54d1dc4c3";
+  "19.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-09566799591d1bfed";
+  "19.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-015f8efc2be419b79";
+  "19.09".eu-north-1.x86_64-linux.hvm-ebs = "ami-07fc0a32d885e01ed";
+  "19.09".us-east-1.x86_64-linux.hvm-ebs = "ami-03330d8b51287412f";
+  "19.09".us-east-2.x86_64-linux.hvm-ebs = "ami-0518b4c84972e967f";
+  "19.09".us-west-1.x86_64-linux.hvm-ebs = "ami-06ad07e61a353b4a6";
+  "19.09".us-west-2.x86_64-linux.hvm-ebs = "ami-0e31e30925cf3ce4e";
+  "19.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-07df50fc76702a36d";
+  "19.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0f71ae5d4b0b78d95";
+  "19.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-057bbf2b4bd62d210";
+  "19.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-02a62555ca182fb5b";
+  "19.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0219dde0e6b7b7b93";
+  "19.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-066f7f2a895c821a1";
+  "19.09".ap-east-1.x86_64-linux.hvm-ebs = "ami-055b2348db2827ff1";
+  "19.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-018aab68377227e06";
+
+  # 20.03.1554.94e39623a49
+  "20.03".eu-west-1.x86_64-linux.hvm-ebs = "ami-02c34db5766cc7013";
+  "20.03".eu-west-2.x86_64-linux.hvm-ebs = "ami-0e32bd8c7853883f1";
+  "20.03".eu-west-3.x86_64-linux.hvm-ebs = "ami-061edb1356c1d69fd";
+  "20.03".eu-central-1.x86_64-linux.hvm-ebs = "ami-0a1a94722dcbff94c";
+  "20.03".eu-north-1.x86_64-linux.hvm-ebs = "ami-02699abfacbb6464b";
+  "20.03".us-east-1.x86_64-linux.hvm-ebs = "ami-0c5e7760748b74e85";
+  "20.03".us-east-2.x86_64-linux.hvm-ebs = "ami-030296bb256764655";
+  "20.03".us-west-1.x86_64-linux.hvm-ebs = "ami-050be818e0266b741";
+  "20.03".us-west-2.x86_64-linux.hvm-ebs = "ami-06562f78dca68eda2";
+  "20.03".ca-central-1.x86_64-linux.hvm-ebs = "ami-02365684a173255c7";
+  "20.03".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0dbf353e168d155f7";
+  "20.03".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-04c0f3a75f63daddd";
+  "20.03".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-093d9cc49c191eb6c";
+  "20.03".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0087df91a7b6ebd45";
+  "20.03".ap-south-1.x86_64-linux.hvm-ebs = "ami-0a1a6b569af04af9d";
+  "20.03".ap-east-1.x86_64-linux.hvm-ebs = "ami-0d18fdd309cdefa86";
+  "20.03".sa-east-1.x86_64-linux.hvm-ebs = "ami-09859378158ae971d";
+  # 20.03.2351.f8248ab6d9e-aarch64-linux
+  "20.03".eu-west-1.aarch64-linux.hvm-ebs = "ami-0a4c46dfdfe921aab";
+  "20.03".eu-west-2.aarch64-linux.hvm-ebs = "ami-0b47871912b7d36f9";
+  "20.03".eu-west-3.aarch64-linux.hvm-ebs = "ami-01031e1aa505b8935";
+  "20.03".eu-central-1.aarch64-linux.hvm-ebs = "ami-0bb4669de1f477fd1";
+  # missing "20.03".eu-north-1.aarch64-linux.hvm-ebs = "ami-";
+  "20.03".us-east-1.aarch64-linux.hvm-ebs = "ami-01d2de16a1878271c";
+  "20.03".us-east-2.aarch64-linux.hvm-ebs = "ami-0eade0158b1ff49c0";
+  "20.03".us-west-1.aarch64-linux.hvm-ebs = "ami-0913bf30cb9a764a4";
+  "20.03".us-west-2.aarch64-linux.hvm-ebs = "ami-073449580ff8e82b5";
+  "20.03".ca-central-1.aarch64-linux.hvm-ebs = "ami-050f2e923c4d703c0";
+  "20.03".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-0d11ef6705a9a11a7";
+  "20.03".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-05446a2f818cd3263";
+  "20.03".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-0c057f010065d2453";
+  "20.03".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-0e90eda7f24eb33ab";
+  "20.03".ap-south-1.aarch64-linux.hvm-ebs = "ami-03ba7e9f093f568bc";
+  "20.03".sa-east-1.aarch64-linux.hvm-ebs = "ami-0a8344c6ce6d0c902";
+
+  # 20.09.2016.19db3e5ea27
+  "20.09".eu-west-1.x86_64-linux.hvm-ebs = "ami-0057cb7d614329fa2";
+  "20.09".eu-west-2.x86_64-linux.hvm-ebs = "ami-0d46f16e0bb0ec8fd";
+  "20.09".eu-west-3.x86_64-linux.hvm-ebs = "ami-0e8985c3ea42f87fe";
+  "20.09".eu-central-1.x86_64-linux.hvm-ebs = "ami-0eed77c38432886d2";
+  "20.09".eu-north-1.x86_64-linux.hvm-ebs = "ami-0be5bcadd632bea14";
+  "20.09".us-east-1.x86_64-linux.hvm-ebs = "ami-0a2cce52b42daccc8";
+  "20.09".us-east-2.x86_64-linux.hvm-ebs = "ami-09378bf487b07a4d8";
+  "20.09".us-west-1.x86_64-linux.hvm-ebs = "ami-09b4337b2a9e77485";
+  "20.09".us-west-2.x86_64-linux.hvm-ebs = "ami-081d3bb5fbee0a1ac";
+  "20.09".ca-central-1.x86_64-linux.hvm-ebs = "ami-020c24c6c607e7ac7";
+  "20.09".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-08f648d5db009e67d";
+  "20.09".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0be390efaccbd40f9";
+  "20.09".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0c3311601cbe8f927";
+  "20.09".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0020146701f4d56cf";
+  "20.09".ap-south-1.x86_64-linux.hvm-ebs = "ami-0117e2bd876bb40d1";
+  "20.09".ap-east-1.x86_64-linux.hvm-ebs = "ami-0c42f97e5b1fda92f";
+  "20.09".sa-east-1.x86_64-linux.hvm-ebs = "ami-021637976b094959d";
+  # 20.09.2016.19db3e5ea27-aarch64-linux
+  "20.09".eu-west-1.aarch64-linux.hvm-ebs = "ami-00a02608ff45ff8f9";
+  "20.09".eu-west-2.aarch64-linux.hvm-ebs = "ami-0e991d0f8dca21e20";
+  "20.09".eu-west-3.aarch64-linux.hvm-ebs = "ami-0d18eec4dc48c6f3b";
+  "20.09".eu-central-1.aarch64-linux.hvm-ebs = "ami-01691f25d08f48c9e";
+  "20.09".eu-north-1.aarch64-linux.hvm-ebs = "ami-09bb5aabe567ec6f4";
+  "20.09".us-east-1.aarch64-linux.hvm-ebs = "ami-0504bd006f9eaae42";
+  "20.09".us-east-2.aarch64-linux.hvm-ebs = "ami-00f0f8f2ab2d695ad";
+  "20.09".us-west-1.aarch64-linux.hvm-ebs = "ami-02d147d2cb992f878";
+  "20.09".us-west-2.aarch64-linux.hvm-ebs = "ami-07f40006cf4d4820e";
+  "20.09".ca-central-1.aarch64-linux.hvm-ebs = "ami-0e5f563919a987894";
+  "20.09".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-083e35d1acecae5c1";
+  "20.09".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-052cdc008b245b067";
+  "20.09".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-05e137f373bd72c0c";
+  "20.09".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-020791fe4c32f851a";
+  "20.09".ap-south-1.aarch64-linux.hvm-ebs = "ami-0285bb96a0f2c3955";
+  "20.09".sa-east-1.aarch64-linux.hvm-ebs = "ami-0a55ab650c32be058";
+
+
+  # 21.05.740.aa576357673
+  "21.05".eu-west-1.x86_64-linux.hvm-ebs = "ami-048dbc738074a3083";
+  "21.05".eu-west-2.x86_64-linux.hvm-ebs = "ami-0234cf81fec68315d";
+  "21.05".eu-west-3.x86_64-linux.hvm-ebs = "ami-020e459baf709107d";
+  "21.05".eu-central-1.x86_64-linux.hvm-ebs = "ami-0857d5d1309ab8b77";
+  "21.05".eu-north-1.x86_64-linux.hvm-ebs = "ami-05403e3ae53d3716f";
+  "21.05".us-east-1.x86_64-linux.hvm-ebs = "ami-0d3002ba40b5b9897";
+  "21.05".us-east-2.x86_64-linux.hvm-ebs = "ami-069a0ca1bde6dea52";
+  "21.05".us-west-1.x86_64-linux.hvm-ebs = "ami-0b415460a84bcf9bc";
+  "21.05".us-west-2.x86_64-linux.hvm-ebs = "ami-093cba49754abd7f8";
+  "21.05".ca-central-1.x86_64-linux.hvm-ebs = "ami-065c13e1d52d60b33";
+  "21.05".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-04f570c70ff9b665e";
+  "21.05".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-02a3d1df595df5ef6";
+  "21.05".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-027836fddb5c56012";
+  "21.05".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0edacd41dc7700c39";
+  "21.05".ap-south-1.x86_64-linux.hvm-ebs = "ami-0b279b5bb55288059";
+  "21.05".ap-east-1.x86_64-linux.hvm-ebs = "ami-06dc98082bc55c1fc";
+  "21.05".sa-east-1.x86_64-linux.hvm-ebs = "ami-04737dd49b98936c6";
+
+  # 21.11.333823.96b4157790f-x86_64-linux
+  "21.11".eu-west-1.x86_64-linux.hvm-ebs = "ami-01d0304a712f2f3f0";
+  "21.11".eu-west-2.x86_64-linux.hvm-ebs = "ami-00e828bfc1e5d09ac";
+  "21.11".eu-west-3.x86_64-linux.hvm-ebs = "ami-0e1ea64430d8103f2";
+  "21.11".eu-central-1.x86_64-linux.hvm-ebs = "ami-0fcf28c07e86142c5";
+  "21.11".eu-north-1.x86_64-linux.hvm-ebs = "ami-0ee83a3c6590fd6b1";
+  "21.11".us-east-1.x86_64-linux.hvm-ebs = "ami-099756bfda4540da0";
+  "21.11".us-east-2.x86_64-linux.hvm-ebs = "ami-0b20a80b82052d23f";
+  "21.11".us-west-1.x86_64-linux.hvm-ebs = "ami-088ea590004b01752";
+  "21.11".us-west-2.x86_64-linux.hvm-ebs = "ami-0025b9d4831b911a7";
+  "21.11".ca-central-1.x86_64-linux.hvm-ebs = "ami-0e67089f898e74443";
+  "21.11".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0dc8d718279d3402d";
+  "21.11".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0155e842329970187";
+  "21.11".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-07c95eda953bf5435";
+  "21.11".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-04167df3cd952b3bd";
+  "21.11".ap-south-1.x86_64-linux.hvm-ebs = "ami-0680e05531b3db677";
+  "21.11".ap-east-1.x86_64-linux.hvm-ebs = "ami-0835a3e481dc240f9";
+  "21.11".sa-east-1.x86_64-linux.hvm-ebs = "ami-0f7c354c421348e51";
+
+  # 21.11.333823.96b4157790f-aarch64-linux
+  "21.11".eu-west-1.aarch64-linux.hvm-ebs = "ami-048f3eea6a12c4b3b";
+  "21.11".eu-west-2.aarch64-linux.hvm-ebs = "ami-0e6f18f2009806add";
+  "21.11".eu-west-3.aarch64-linux.hvm-ebs = "ami-0a28d593f5e938d80";
+  "21.11".eu-central-1.aarch64-linux.hvm-ebs = "ami-0b9c95d926ab9474c";
+  "21.11".eu-north-1.aarch64-linux.hvm-ebs = "ami-0f2d400b4a2368a1a";
+  "21.11".us-east-1.aarch64-linux.hvm-ebs = "ami-05afb75585567d386";
+  "21.11".us-east-2.aarch64-linux.hvm-ebs = "ami-07f360673c2fccf8d";
+  "21.11".us-west-1.aarch64-linux.hvm-ebs = "ami-0a6892c61d85774db";
+  "21.11".us-west-2.aarch64-linux.hvm-ebs = "ami-04eaf20283432e852";
+  "21.11".ca-central-1.aarch64-linux.hvm-ebs = "ami-036b69828502e7fdf";
+  "21.11".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-0d52e51e68b6954ef";
+  "21.11".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-000a3019e003f4fb9";
+  "21.11".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-09b0c7928780e25b6";
+  "21.11".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-05f80f3c83083ff62";
+  "21.11".ap-south-1.aarch64-linux.hvm-ebs = "ami-05b2a3ff8489c3f59";
+  "21.11".ap-east-1.aarch64-linux.hvm-ebs = "ami-0aa3b50a4f2822a00";
+  "21.11".sa-east-1.aarch64-linux.hvm-ebs = "ami-00f68eff453d3fe69";
+
+  # 22.05.342.a634c8f6c1f
+
+  "22.05".eu-west-1.x86_64-linux.hvm-ebs = "ami-00badba5cfa0a0c0d";
+  "22.05".af-south-1.x86_64-linux.hvm-ebs = "ami-0d3a6166c1ea4d7b4";
+  "22.05".ap-east-1.x86_64-linux.hvm-ebs = "ami-06445325c360470d8";
+  "22.05".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-009c422293bcf3721";
+  "22.05".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0bfc0397525a67ed8";
+  "22.05".ap-northeast-3.x86_64-linux.hvm-ebs = "ami-0a1fb4d4e08a6065e";
+  "22.05".ap-south-1.x86_64-linux.hvm-ebs = "ami-07ad258dcc69239d2";
+  "22.05".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0f59f7f33cba8b1a4";
+  "22.05".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0d1e49fe30aec165d";
+  "22.05".ap-southeast-3.x86_64-linux.hvm-ebs = "ami-0f5cb24a1e3fc62dd";
+  "22.05".ca-central-1.x86_64-linux.hvm-ebs = "ami-0551a595ba7916462";
+  "22.05".eu-central-1.x86_64-linux.hvm-ebs = "ami-0702eee2e75d541d1";
+  "22.05".eu-north-1.x86_64-linux.hvm-ebs = "ami-0fc6838942cb7d9cb";
+  "22.05".eu-south-1.x86_64-linux.hvm-ebs = "ami-0df9463b8965cdb80";
+  "22.05".eu-west-2.x86_64-linux.hvm-ebs = "ami-08f3c1eb533a42ac1";
+  "22.05".eu-west-3.x86_64-linux.hvm-ebs = "ami-04b50c79dc4009c97";
+  "22.05".me-south-1.x86_64-linux.hvm-ebs = "ami-05c52087afab7024d";
+  "22.05".sa-east-1.x86_64-linux.hvm-ebs = "ami-0732aa0f0c28f281b";
+  "22.05".us-east-1.x86_64-linux.hvm-ebs = "ami-0223db08811f6fb2d";
+  "22.05".us-east-2.x86_64-linux.hvm-ebs = "ami-0a743534fa3e51b41";
+  "22.05".us-west-1.x86_64-linux.hvm-ebs = "ami-0d72ab697beab5ea5";
+  "22.05".us-west-2.x86_64-linux.hvm-ebs = "ami-034946f0c47088751";
+
+  "22.05".eu-west-1.aarch64-linux.hvm-ebs = "ami-08114069426233360";
+  "22.05".af-south-1.aarch64-linux.hvm-ebs = "ami-0a9b83913abd61694";
+  "22.05".ap-east-1.aarch64-linux.hvm-ebs = "ami-03966ad4547f532b7";
+  "22.05".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-0eb7e152c8d5aae7d";
+  "22.05".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-08369e00c5528762b";
+  "22.05".ap-northeast-3.aarch64-linux.hvm-ebs = "ami-0fa14b8d48cdd57c3";
+  "22.05".ap-south-1.aarch64-linux.hvm-ebs = "ami-0f2ca3b542ff0913b";
+  "22.05".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-087def0511ef2687d";
+  "22.05".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-0aa90985199011f04";
+  "22.05".ap-southeast-3.aarch64-linux.hvm-ebs = "ami-0c86c52790deefa23";
+  "22.05".ca-central-1.aarch64-linux.hvm-ebs = "ami-06e932cc9c20403e4";
+  "22.05".eu-central-1.aarch64-linux.hvm-ebs = "ami-07680df1026a9b54c";
+  "22.05".eu-north-1.aarch64-linux.hvm-ebs = "ami-0cbe9f2725e4de706";
+  "22.05".eu-south-1.aarch64-linux.hvm-ebs = "ami-01a83c3892925765f";
+  "22.05".eu-west-2.aarch64-linux.hvm-ebs = "ami-049024d086d039b54";
+  "22.05".eu-west-3.aarch64-linux.hvm-ebs = "ami-0c0ebe20ebfc635a1";
+  "22.05".me-south-1.aarch64-linux.hvm-ebs = "ami-0d662fcaac553e945";
+  "22.05".sa-east-1.aarch64-linux.hvm-ebs = "ami-0888c8f703e00fdb8";
+  "22.05".us-east-1.aarch64-linux.hvm-ebs = "ami-03536a13324333073";
+  "22.05".us-east-2.aarch64-linux.hvm-ebs = "ami-067611519fa817aaa";
+  "22.05".us-west-1.aarch64-linux.hvm-ebs = "ami-0f96be48071c13ab2";
+  "22.05".us-west-2.aarch64-linux.hvm-ebs = "ami-084bc5d777585adfb";
+
+  # 22.11.466.596a8e828c5
+
+  "22.11".eu-west-1.x86_64-linux.hvm-ebs = "ami-01aafe08a4e74bd9a";
+  "22.11".af-south-1.x86_64-linux.hvm-ebs = "ami-0d937fc7bf7b8c2ed";
+  "22.11".ap-east-1.x86_64-linux.hvm-ebs = "ami-020e59f6affef2732";
+  "22.11".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-04a7bd7a969506a87";
+  "22.11".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-007b9209171e2dcdd";
+  "22.11".ap-northeast-3.x86_64-linux.hvm-ebs = "ami-0c4d0b584cd570584";
+  "22.11".ap-south-1.x86_64-linux.hvm-ebs = "ami-02aa47f84c215d593";
+  "22.11".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-067a7fca4a01c4dda";
+  "22.11".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-0638db75ba113c635";
+  "22.11".ap-southeast-3.x86_64-linux.hvm-ebs = "ami-08dcda749c59e8747";
+  "22.11".ca-central-1.x86_64-linux.hvm-ebs = "ami-09b007688e369f794";
+  "22.11".eu-central-1.x86_64-linux.hvm-ebs = "ami-05df1b211df600977";
+  "22.11".eu-north-1.x86_64-linux.hvm-ebs = "ami-0427d0897b928e191";
+  "22.11".eu-south-1.x86_64-linux.hvm-ebs = "ami-051beda489f0dd109";
+  "22.11".eu-west-2.x86_64-linux.hvm-ebs = "ami-0c2090b73fc610ac3";
+  "22.11".eu-west-3.x86_64-linux.hvm-ebs = "ami-0d03a150cf6c07022";
+  "22.11".me-south-1.x86_64-linux.hvm-ebs = "ami-0443b1af94bff9e3d";
+  "22.11".sa-east-1.x86_64-linux.hvm-ebs = "ami-07b2ce95ba17b6bc1";
+  "22.11".us-east-1.x86_64-linux.hvm-ebs = "ami-0508167db03652cc4";
+  "22.11".us-east-2.x86_64-linux.hvm-ebs = "ami-0e41ac272a7d67029";
+  "22.11".us-west-1.x86_64-linux.hvm-ebs = "ami-02f3fb062ee9af563";
+  "22.11".us-west-2.x86_64-linux.hvm-ebs = "ami-06b260b3a958948a0";
+
+  "22.11".eu-west-1.aarch64-linux.hvm-ebs = "ami-0c4132540cabbc7df";
+  "22.11".af-south-1.aarch64-linux.hvm-ebs = "ami-0f12780247b337357";
+  "22.11".ap-east-1.aarch64-linux.hvm-ebs = "ami-04789617e858da6fb";
+  "22.11".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-0f4d8517ab163b274";
+  "22.11".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-051a06893bcc696c1";
+  "22.11".ap-northeast-3.aarch64-linux.hvm-ebs = "ami-05a086610680a7d8b";
+  "22.11".ap-south-1.aarch64-linux.hvm-ebs = "ami-04cd79197824124cd";
+  "22.11".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-0437f330961467257";
+  "22.11".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-000c2ecbc430c36d7";
+  "22.11".ap-southeast-3.aarch64-linux.hvm-ebs = "ami-062e917296b5087c0";
+  "22.11".ca-central-1.aarch64-linux.hvm-ebs = "ami-0c91995b735d1b8b6";
+  "22.11".eu-central-1.aarch64-linux.hvm-ebs = "ami-0537d704b177a676b";
+  "22.11".eu-north-1.aarch64-linux.hvm-ebs = "ami-05f1f532f90d8e16c";
+  "22.11".eu-south-1.aarch64-linux.hvm-ebs = "ami-097fe290eafff61ad";
+  "22.11".eu-west-2.aarch64-linux.hvm-ebs = "ami-053b6cc7a3394891a";
+  "22.11".eu-west-3.aarch64-linux.hvm-ebs = "ami-0a5b6d023afde63c3";
+  "22.11".me-south-1.aarch64-linux.hvm-ebs = "ami-024fcb01f8638ed08";
+  "22.11".sa-east-1.aarch64-linux.hvm-ebs = "ami-06d72c6e930037236";
+  "22.11".us-east-1.aarch64-linux.hvm-ebs = "ami-0b33ffb684d6b07b5";
+  "22.11".us-east-2.aarch64-linux.hvm-ebs = "ami-033ff64078c59f378";
+  "22.11".us-west-1.aarch64-linux.hvm-ebs = "ami-052d52b9e30a18562";
+  "22.11".us-west-2.aarch64-linux.hvm-ebs = "ami-07418b6a4782c9521";
+
+  # 23.05.426.afc48694f2a
+
+  "23.05".eu-west-1.x86_64-linux.hvm-ebs = "ami-0fc7825fe890f87d1";
+  "23.05".af-south-1.x86_64-linux.hvm-ebs = "ami-0df2f7b42bfbd53e5";
+  "23.05".ap-east-1.x86_64-linux.hvm-ebs = "ami-07ba84d7321f6f4bb";
+  "23.05".ap-northeast-1.x86_64-linux.hvm-ebs = "ami-0e37827874573dbbf";
+  "23.05".ap-northeast-2.x86_64-linux.hvm-ebs = "ami-0ff5b3b7738651895";
+  "23.05".ap-northeast-3.x86_64-linux.hvm-ebs = "ami-0a7861571eb44c70c";
+  "23.05".ap-south-1.x86_64-linux.hvm-ebs = "ami-05c4802ca81d7c95b";
+  "23.05".ap-southeast-1.x86_64-linux.hvm-ebs = "ami-0aee8193da16bd2db";
+  "23.05".ap-southeast-2.x86_64-linux.hvm-ebs = "ami-008be032289f60d16";
+  "23.05".ap-southeast-3.x86_64-linux.hvm-ebs = "ami-033debde7c1659c96";
+  "23.05".ca-central-1.x86_64-linux.hvm-ebs = "ami-031821b5f83896474";
+  "23.05".eu-central-1.x86_64-linux.hvm-ebs = "ami-0d6ee9d5e1c985df6";
+  "23.05".eu-north-1.x86_64-linux.hvm-ebs = "ami-0cecb1f67b2a837f6";
+  "23.05".eu-south-1.x86_64-linux.hvm-ebs = "ami-0f9fee15eb5a64ac4";
+  "23.05".eu-west-2.x86_64-linux.hvm-ebs = "ami-0e62fef78d2c4f031";
+  "23.05".eu-west-3.x86_64-linux.hvm-ebs = "ami-01a6e4c1659b08390";
+  "23.05".me-south-1.x86_64-linux.hvm-ebs = "ami-0a01a7eeffa8f0fd5";
+  "23.05".sa-east-1.x86_64-linux.hvm-ebs = "ami-09a1760227f929ccf";
+  "23.05".us-east-1.x86_64-linux.hvm-ebs = "ami-07df5833f04703a2a";
+  "23.05".us-east-2.x86_64-linux.hvm-ebs = "ami-04dd2f100d9665df5";
+  "23.05".us-west-1.x86_64-linux.hvm-ebs = "ami-0fe502361fea4216c";
+  "23.05".us-west-2.x86_64-linux.hvm-ebs = "ami-0749963dd978a57c7";
+
+  "23.05".eu-west-1.aarch64-linux.hvm-ebs = "ami-0a0609421e5638005";
+  "23.05".af-south-1.aarch64-linux.hvm-ebs = "ami-05d95a055aba9373e";
+  "23.05".ap-east-1.aarch64-linux.hvm-ebs = "ami-08ae0190b1357465b";
+  "23.05".ap-northeast-1.aarch64-linux.hvm-ebs = "ami-09418b2049c3c9533";
+  "23.05".ap-northeast-2.aarch64-linux.hvm-ebs = "ami-040713ad23b404271";
+  "23.05".ap-northeast-3.aarch64-linux.hvm-ebs = "ami-0c888d6c1d989db68";
+  "23.05".ap-south-1.aarch64-linux.hvm-ebs = "ami-02da38deb21545675";
+  "23.05".ap-southeast-1.aarch64-linux.hvm-ebs = "ami-06df0713468bea276";
+  "23.05".ap-southeast-2.aarch64-linux.hvm-ebs = "ami-0171ee37ae5104c06";
+  "23.05".ap-southeast-3.aarch64-linux.hvm-ebs = "ami-075da61f5fef1fe80";
+  "23.05".ca-central-1.aarch64-linux.hvm-ebs = "ami-0ba8bd0a3d0a596f8";
+  "23.05".eu-central-1.aarch64-linux.hvm-ebs = "ami-0891608ae66031439";
+  "23.05".eu-north-1.aarch64-linux.hvm-ebs = "ami-0a3ad7ef18d595c68";
+  "23.05".eu-south-1.aarch64-linux.hvm-ebs = "ami-0fa86b680aa9a0444";
+  "23.05".eu-west-2.aarch64-linux.hvm-ebs = "ami-0a415791078f05970";
+  "23.05".eu-west-3.aarch64-linux.hvm-ebs = "ami-05d9b146317962e3b";
+  "23.05".me-south-1.aarch64-linux.hvm-ebs = "ami-0019b591acf30aa66";
+  "23.05".sa-east-1.aarch64-linux.hvm-ebs = "ami-030d6c30d91f06cc7";
+  "23.05".us-east-1.aarch64-linux.hvm-ebs = "ami-0a061ca437b63df33";
+  "23.05".us-east-2.aarch64-linux.hvm-ebs = "ami-0bf0b2b8fdfda30e8";
+  "23.05".us-west-1.aarch64-linux.hvm-ebs = "ami-0e75c8f3deb1f842b";
+  "23.05".us-west-2.aarch64-linux.hvm-ebs = "ami-0d0979d889078d036";
+
+  latest = self."23.05";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-image.nix b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
new file mode 100644
index 000000000000..aa44f2642697
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-image.nix
@@ -0,0 +1,105 @@
+# Configuration for Amazon EC2 instances. (Note that this file is a
+# misnomer - it should be "amazon-config.nix" or so, not
+# "amazon-image.nix", since it's used not only to build images but
+# also to reconfigure instances. However, we can't rename it because
+# existing "configuration.nix" files on EC2 instances refer to it.)
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.ec2;
+in
+
+{
+  imports = [
+    ../profiles/headless.nix
+    # Note: While we do use the headless profile, we also explicitly
+    # turn on the serial console on ttyS0 below. This is because
+    # AWS does support accessing the serial console:
+    # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/configure-access-to-serial-console.html
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+
+    assertions = [ ];
+
+    boot.growPartition = true;
+
+    fileSystems."/" = mkIf (!cfg.zfs.enable) {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) {
+      # The ZFS image uses a partition labeled ESP whether or not we're
+      # booting with EFI.
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    services.zfs.expandOnBoot = mkIf cfg.zfs.enable "all";
+
+    boot.zfs.devNodes = mkIf cfg.zfs.enable "/dev/";
+
+    boot.extraModulePackages = [
+      config.boot.kernelPackages.ena
+    ];
+    boot.initrd.kernelModules = [ "xen-blkfront" ];
+    boot.initrd.availableKernelModules = [ "nvme" ];
+    boot.kernelParams = [ "console=ttyS0,115200n8" "random.trust_cpu=on" ];
+
+    # Prevent the nouveau kernel module from being loaded, as it
+    # interferes with the nvidia/nvidia-uvm modules needed for CUDA.
+    # Also blacklist xen_fbfront to prevent a 30 second delay during
+    # boot.
+    boot.blacklistedKernelModules = [ "nouveau" "xen_fbfront" ];
+
+    boot.loader.grub.device = if cfg.efi then "nodev" else "/dev/xvda";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
+    boot.loader.timeout = 1;
+    boot.loader.grub.extraConfig = ''
+      serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+      terminal_output console serial
+      terminal_input console serial
+    '';
+
+    systemd.services.fetch-ec2-metadata = {
+      wantedBy = [ "multi-user.target" ];
+      after = ["network-online.target"];
+      path = [ pkgs.curl ];
+      script = builtins.readFile ./ec2-metadata-fetcher.sh;
+      serviceConfig.Type = "oneshot";
+      serviceConfig.StandardOutput = "journal+console";
+    };
+
+    # Allow root logins only using the SSH key that the user specified
+    # at instance creation time.
+    services.openssh.enable = true;
+    services.openssh.settings.PermitRootLogin = "prohibit-password";
+
+    # Enable the serial console on ttyS0
+    systemd.services."serial-getty@ttyS0".enable = true;
+
+    # Creates symlinks for block device names.
+    services.udev.packages = [ pkgs.amazon-ec2-utils ];
+
+    # Force getting the hostname from EC2.
+    networking.hostName = mkDefault "";
+
+    # Always include cryptsetup so that Charon can use it.
+    environment.systemPackages = [ pkgs.cryptsetup ];
+
+    # EC2 has its own NTP server provided by the hypervisor
+    networking.timeServers = [ "169.254.169.123" ];
+
+    # udisks has become too bloated to have in a headless system
+    # (e.g. it depends on GTK).
+    services.udisks2.enable = false;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-init.nix b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
new file mode 100644
index 000000000000..8b98f2e32dd5
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-init.nix
@@ -0,0 +1,87 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.amazon-init;
+
+  script = ''
+    #!${pkgs.runtimeShell} -eu
+
+    echo "attempting to fetch configuration from EC2 user data..."
+
+    export HOME=/root
+    export PATH=${pkgs.lib.makeBinPath [ config.nix.package config.systemd.package pkgs.gnugrep pkgs.git pkgs.gnutar pkgs.gzip pkgs.gnused pkgs.xz config.system.build.nixos-rebuild]}:$PATH
+    export NIX_PATH=nixpkgs=/nix/var/nix/profiles/per-user/root/channels/nixos:nixos-config=/etc/nixos/configuration.nix:/nix/var/nix/profiles/per-user/root/channels
+
+    userData=/etc/ec2-metadata/user-data
+
+    # Check if user-data looks like a shell script and execute it with the
+    # runtime shell if it does. Otherwise treat it as a nixos configuration
+    # expression
+    if IFS= LC_ALL=C read -rN2 shebang < $userData && [ "$shebang" = '#!' ]; then
+      # NB: we cannot chmod the $userData file, this is why we execute it via
+      # `pkgs.runtimeShell`. This means we have only limited support for shell
+      # scripts compatible with the `pkgs.runtimeShell`.
+      exec ${pkgs.runtimeShell} $userData
+    fi
+
+    if [ -s "$userData" ]; then
+      # If the user-data looks like it could be a nix expression,
+      # copy it over. Also, look for a magic three-hash comment and set
+      # that as the channel.
+      if sed '/^\(#\|SSH_HOST_.*\)/d' < "$userData" | grep -q '\S'; then
+        channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+        while IFS= read -r channel; do
+          echo "writing channel: $channel"
+        done < <(printf "%s\n" "$channels")
+
+        if [[ -n "$channels" ]]; then
+          printf "%s" "$channels" > /root/.nix-channels
+          nix-channel --update
+        fi
+
+        echo "setting configuration from EC2 user data"
+        cp "$userData" /etc/nixos/configuration.nix
+      else
+        echo "user data does not appear to be a Nix expression; ignoring"
+        exit
+      fi
+    else
+      echo "no user data is available"
+      exit
+    fi
+
+    nixos-rebuild switch
+  '';
+in {
+
+  options.virtualisation.amazon-init = {
+    enable = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc ''
+        Enable or disable the amazon-init service.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    systemd.services.amazon-init = {
+      inherit script;
+      description = "Reconfigure the system from EC2 userdata on startup";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [ "multi-user.target" ];
+      requires = [ "network-online.target" ];
+
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/amazon-options.nix b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
new file mode 100644
index 000000000000..3ea4a6cf7818
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/amazon-options.nix
@@ -0,0 +1,73 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) literalExpression types;
+in {
+  options = {
+    ec2 = {
+      zfs = {
+        enable = lib.mkOption {
+          default = false;
+          internal = true;
+          description = lib.mdDoc ''
+            Whether the EC2 instance uses a ZFS root.
+          '';
+        };
+
+        datasets = lib.mkOption {
+          description = lib.mdDoc ''
+            Datasets to create under the `tank` and `boot` zpools.
+
+            **NOTE:** This option is used only at image creation time, and
+            does not attempt to declaratively create or manage datasets
+            on an existing system.
+          '';
+
+          default = {};
+
+          type = types.attrsOf (types.submodule {
+            options = {
+              mount = lib.mkOption {
+                description = lib.mdDoc "Where to mount this dataset.";
+                type = types.nullOr types.str;
+                default = null;
+              };
+
+              properties = lib.mkOption {
+                description = lib.mdDoc "Properties to set on this dataset.";
+                type = types.attrsOf types.str;
+                default = {};
+              };
+            };
+          });
+        };
+      };
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        defaultText = literalExpression "pkgs.stdenv.hostPlatform.isAarch64";
+        internal = true;
+        description = lib.mdDoc ''
+          Whether the EC2 instance is using EFI.
+        '';
+      };
+      hvm = lib.mkOption {
+        description = "Unused legacy option. While support for non-hvm has been dropped, we keep this option around so that NixOps remains compatible with a somewhat recent `nixpkgs` and machines with an old `stateVersion`.";
+        internal = true;
+        default = true;
+        readOnly = true;
+      };
+    };
+  };
+
+  config = lib.mkIf config.ec2.zfs.enable {
+    networking.hostId = lib.mkDefault "00000000";
+
+    fileSystems = let
+      mountable = lib.filterAttrs (_: value: ((value.mount or null) != null)) config.ec2.zfs.datasets;
+    in lib.mapAttrs'
+      (dataset: opts: lib.nameValuePair opts.mount {
+        device = dataset;
+        fsType = "zfs";
+      })
+      mountable;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/anbox.nix b/nixpkgs/nixos/modules/virtualisation/anbox.nix
new file mode 100644
index 000000000000..523d9a9576ef
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/anbox.nix
@@ -0,0 +1,176 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.anbox;
+
+  addrOpts = v: addr: pref: name: {
+    address = mkOption {
+      default = addr;
+      type = types.str;
+      description = lib.mdDoc ''
+        IPv${toString v} ${name} address.
+      '';
+    };
+
+    prefixLength = mkOption {
+      default = pref;
+      type = types.addCheck types.int (n: n >= 0 && n <= (if v == 4 then 32 else 128));
+      description = lib.mdDoc ''
+        Subnet mask of the ${name} address, specified as the number of
+        bits in the prefix (`${if v == 4 then "24" else "64"}`).
+      '';
+    };
+  };
+
+  finalImage = if cfg.imageModifications == "" then cfg.image else ( pkgs.callPackage (
+    { runCommandNoCC, squashfsTools }:
+
+    runCommandNoCC "${cfg.image.name}-modified.img" {
+      nativeBuildInputs = [
+        squashfsTools
+      ];
+    } ''
+      echo "-> Extracting Anbox root image..."
+      unsquashfs -dest rootfs ${cfg.image}
+
+      echo "-> Modifying Anbox root image..."
+      (
+      cd rootfs
+      ${cfg.imageModifications}
+      )
+
+      echo "-> Packing modified Anbox root image..."
+      mksquashfs rootfs $out -comp xz -no-xattrs -all-root
+    ''
+  ) { });
+
+in
+
+{
+
+  options.virtualisation.anbox = {
+
+    enable = mkEnableOption (lib.mdDoc "Anbox");
+
+    image = mkOption {
+      default = pkgs.anbox.image;
+      defaultText = literalExpression "pkgs.anbox.image";
+      type = types.package;
+      description = lib.mdDoc ''
+        Base android image for Anbox.
+      '';
+    };
+
+    imageModifications = mkOption {
+      default = "";
+      type = types.lines;
+      description = lib.mdDoc ''
+        Commands to edit the image filesystem.
+
+        This can be used to e.g. bundle a privileged F-Droid.
+
+        Commands are ran with PWD being at the root of the filesystem.
+      '';
+    };
+
+    extraInit = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra shell commands to be run inside the container image during init.
+      '';
+    };
+
+    ipv4 = {
+      container = addrOpts 4 "192.168.250.2" 24 "Container";
+      gateway = addrOpts 4 "192.168.250.1" 24 "Host";
+
+      dns = mkOption {
+        default = "1.1.1.1";
+        type = types.str;
+        description = lib.mdDoc ''
+          Container DNS server.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+
+    assertions = singleton {
+      assertion = with config.boot.kernelPackages; kernelAtLeast "5.5" && kernelOlder "5.18";
+      message = "Anbox needs a kernel with binder and ashmem support";
+    };
+
+    environment.systemPackages = with pkgs; [ anbox ];
+
+    systemd.mounts = singleton {
+      requiredBy = [ "anbox-container-manager.service" ];
+      description = "Anbox Binder File System";
+      what = "binder";
+      where = "/dev/binderfs";
+      type = "binder";
+    };
+
+    virtualisation.lxc.enable = true;
+    networking.bridges.anbox0.interfaces = [];
+    networking.interfaces.anbox0.ipv4.addresses = [ cfg.ipv4.gateway ];
+
+    networking.nat = {
+      enable = true;
+      internalInterfaces = [ "anbox0" ];
+    };
+
+    # Ensures NetworkManager doesn't touch anbox0
+    networking.networkmanager.unmanaged = [ "anbox0" ];
+
+    systemd.services.anbox-container-manager = let
+      anboxloc = "/var/lib/anbox";
+    in {
+      description = "Anbox Container Management Daemon";
+
+      environment.XDG_RUNTIME_DIR="${anboxloc}";
+
+      wantedBy = [ "multi-user.target" ];
+      preStart = let
+        initsh = pkgs.writeText "nixos-init" (''
+          #!/system/bin/sh
+          setprop nixos.version ${config.system.nixos.version}
+
+          # we don't have radio
+          setprop ro.radio.noril yes
+          stop ril-daemon
+
+          # speed up boot
+          setprop debug.sf.nobootanimation 1
+        '' + cfg.extraInit);
+        initshloc = "${anboxloc}/rootfs-overlay/system/etc/init.goldfish.sh";
+      in ''
+        mkdir -p ${anboxloc}
+        mkdir -p $(dirname ${initshloc})
+        [ -f ${initshloc} ] && rm ${initshloc}
+        cp ${initsh} ${initshloc}
+        chown 100000:100000 ${initshloc}
+        chmod +x ${initshloc}
+      '';
+
+      serviceConfig = {
+        ExecStart = ''
+          ${pkgs.anbox}/bin/anbox container-manager \
+            --data-path=${anboxloc} \
+            --android-image=${finalImage} \
+            --container-network-address=${cfg.ipv4.container.address} \
+            --container-network-gateway=${cfg.ipv4.gateway.address} \
+            --container-network-dns-servers=${cfg.ipv4.dns} \
+            --use-rootfs-overlay \
+            --privileged \
+            --daemon
+        '';
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/appvm.nix b/nixpkgs/nixos/modules/virtualisation/appvm.nix
new file mode 100644
index 000000000000..9fe2995d37a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/appvm.nix
@@ -0,0 +1,49 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.appvm;
+
+in {
+
+  options = {
+    virtualisation.appvm = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This enables AppVMs and related virtualisation settings.
+        '';
+      };
+      user = mkOption {
+        type = types.str;
+        description = lib.mdDoc ''
+          AppVM user login. Currently only AppVMs are supported for a single user only.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf cfg.enable {
+    virtualisation.libvirtd = {
+      enable = true;
+      qemu.verbatimConfig = ''
+        namespaces = []
+        user = "${cfg.user}"
+        group = "users"
+        remember_owner = 0
+      '';
+    };
+
+    users.users."${cfg.user}" = {
+      packages = [ pkgs.appvm ];
+      extraGroups = [ "libvirtd" ];
+    };
+
+  };
+
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-agent.nix b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
new file mode 100644
index 000000000000..e712fac17a46
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-agent.nix
@@ -0,0 +1,268 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+
+  cfg = config.virtualisation.azure.agent;
+
+  provisionedHook = pkgs.writeScript "provisioned-hook" ''
+    #!${pkgs.runtimeShell}
+    /run/current-system/systemd/bin/systemctl start provisioned.target
+  '';
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.azure.agent = {
+    enable = mkOption {
+      default = false;
+      description = lib.mdDoc "Whether to enable the Windows Azure Linux Agent.";
+    };
+    verboseLogging = mkOption {
+      default = false;
+      description = lib.mdDoc "Whether to enable verbose logging.";
+    };
+    mountResourceDisk = mkOption {
+      default = true;
+      description = lib.mdDoc "Whether the agent should format (ext4) and mount the resource disk to /mnt/resource.";
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    assertions = [{
+      assertion = pkgs.stdenv.hostPlatform.isx86;
+      message = "Azure not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    }
+      {
+        assertion = config.networking.networkmanager.enable == false;
+        message = "Windows Azure Linux Agent is not compatible with NetworkManager";
+      }];
+
+    boot.initrd.kernelModules = [ "ata_piix" ];
+    networking.firewall.allowedUDPPorts = [ 68 ];
+
+
+    environment.etc."waagent.conf".text = ''
+        #
+        # Microsoft Azure Linux Agent Configuration
+        #
+
+        # Enable extension handling. Do not disable this unless you do not need password reset,
+        # backup, monitoring, or any extension handling whatsoever.
+        Extensions.Enabled=y
+
+        # How often (in seconds) to poll for new goal states
+        Extensions.GoalStatePeriod=6
+
+        # Which provisioning agent to use. Supported values are "auto" (default), "waagent",
+        # "cloud-init", or "disabled".
+        Provisioning.Agent=auto
+
+        # Password authentication for root account will be unavailable.
+        Provisioning.DeleteRootPassword=n
+
+        # Generate fresh host key pair.
+        Provisioning.RegenerateSshHostKeyPair=n
+
+        # Supported values are "rsa", "dsa", "ecdsa", "ed25519", and "auto".
+        # The "auto" option is supported on OpenSSH 5.9 (2011) and later.
+        Provisioning.SshHostKeyPairType=ed25519
+
+        # Monitor host name changes and publish changes via DHCP requests.
+        Provisioning.MonitorHostName=y
+
+        # How often (in seconds) to monitor host name changes.
+        Provisioning.MonitorHostNamePeriod=30
+
+        # Decode CustomData from Base64.
+        Provisioning.DecodeCustomData=n
+
+        # Execute CustomData after provisioning.
+        Provisioning.ExecuteCustomData=n
+
+        # Algorithm used by crypt when generating password hash.
+        #Provisioning.PasswordCryptId=6
+
+        # Length of random salt used when generating password hash.
+        #Provisioning.PasswordCryptSaltLength=10
+
+        # Allow reset password of sys user
+        Provisioning.AllowResetSysUser=n
+
+        # Format if unformatted. If 'n', resource disk will not be mounted.
+        ResourceDisk.Format=${if cfg.mountResourceDisk then "y" else "n"}
+
+        # File system on the resource disk
+        # Typically ext3 or ext4. FreeBSD images should use 'ufs2' here.
+        ResourceDisk.Filesystem=ext4
+
+        # Mount point for the resource disk
+        ResourceDisk.MountPoint=/mnt/resource
+
+        # Create and use swapfile on resource disk.
+        ResourceDisk.EnableSwap=n
+
+        # Size of the swapfile.
+        ResourceDisk.SwapSizeMB=0
+
+        # Comma-separated list of mount options. See mount(8) for valid options.
+        ResourceDisk.MountOptions=None
+
+        # Enable verbose logging (y|n)
+        Logs.Verbose=${if cfg.verboseLogging then "y" else "n"}
+
+        # Enable Console logging, default is y
+        # Logs.Console=y
+
+        # Enable periodic log collection, default is n
+        Logs.Collect=n
+
+        # How frequently to collect logs, default is each hour
+        Logs.CollectPeriod=3600
+
+        # Is FIPS enabled
+        OS.EnableFIPS=n
+
+        # Root device timeout in seconds.
+        OS.RootDeviceScsiTimeout=300
+
+        # How often (in seconds) to set the root device timeout.
+        OS.RootDeviceScsiTimeoutPeriod=30
+
+        # If "None", the system default version is used.
+        OS.OpensslPath=${pkgs.openssl_3.bin}/bin/openssl
+
+        # Set the SSH ClientAliveInterval
+        # OS.SshClientAliveInterval=180
+
+        # Set the path to SSH keys and configuration files
+        OS.SshDir=/etc/ssh
+
+        # If set, agent will use proxy server to access internet
+        #HttpProxy.Host=None
+        #HttpProxy.Port=None
+
+        # Detect Scvmm environment, default is n
+        # DetectScvmmEnv=n
+
+        #
+        # Lib.Dir=/var/lib/waagent
+
+        #
+        # DVD.MountPoint=/mnt/cdrom/secure
+
+        #
+        # Pid.File=/var/run/waagent.pid
+
+        #
+        # Extension.LogDir=/var/log/azure
+
+        #
+        # Home.Dir=/home
+
+        # Enable RDMA management and set up, should only be used in HPC images
+        OS.EnableRDMA=n
+
+        # Enable checking RDMA driver version and update
+        # OS.CheckRdmaDriver=y
+
+        # Enable or disable goal state processing auto-update, default is enabled
+        AutoUpdate.Enabled=n
+
+        # Determine the update family, this should not be changed
+        # AutoUpdate.GAFamily=Prod
+
+        # Determine if the overprovisioning feature is enabled. If yes, hold extension
+        # handling until inVMArtifactsProfile.OnHold is false.
+        # Default is enabled
+        EnableOverProvisioning=n
+
+        # Allow fallback to HTTP if HTTPS is unavailable
+        # Note: Allowing HTTP (vs. HTTPS) may cause security risks
+        # OS.AllowHTTP=n
+
+        # Add firewall rules to protect access to Azure host node services
+        OS.EnableFirewall=n
+
+        # How often (in seconds) to check the firewall rules
+        OS.EnableFirewallPeriod=30
+
+        # How often (in seconds) to remove the udev rules for persistent network interface
+        # names (75-persistent-net-generator.rules and /etc/udev/rules.d/70-persistent-net.rules)
+        OS.RemovePersistentNetRulesPeriod=30
+
+        # How often (in seconds) to monitor for DHCP client restarts
+        OS.MonitorDhcpClientRestartPeriod=30
+    '';
+
+    services.udev.packages = [ pkgs.waagent ];
+
+    networking.dhcpcd.persistent = true;
+
+    services.logrotate = {
+      enable = true;
+      settings."/var/log/waagent.log" = {
+        compress = true;
+        frequency = "monthly";
+        rotate = 6;
+      };
+    };
+
+    systemd.targets.provisioned = {
+      description = "Services Requiring Azure VM provisioning to have finished";
+    };
+
+    systemd.services.consume-hypervisor-entropy =
+      {
+        description = "Consume entropy in ACPI table provided by Hyper-V";
+
+        wantedBy = [ "sshd.service" "waagent.service" ];
+        before = [ "sshd.service" "waagent.service" ];
+
+        path = [ pkgs.coreutils ];
+        script =
+          ''
+            echo "Fetching entropy..."
+            cat /sys/firmware/acpi/tables/OEM0 > /dev/random
+          '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.StandardError = "journal+console";
+        serviceConfig.StandardOutput = "journal+console";
+      };
+
+    systemd.services.waagent = {
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network-online.target" "sshd.service" ];
+      wants = [ "network-online.target" ];
+
+      path = [
+        pkgs.e2fsprogs
+        pkgs.bash
+
+        # waagent's Microsoft.OSTCExtensions.VMAccessForLinux needs Python 3
+        pkgs.python39
+
+        # waagent's Microsoft.CPlat.Core.RunCommandLinux needs lsof
+        pkgs.lsof
+      ];
+      description = "Windows Azure Agent Service";
+      unitConfig.ConditionPathExists = "/etc/waagent.conf";
+      serviceConfig = {
+        ExecStart = "${pkgs.waagent}/bin/waagent -daemon";
+        Type = "simple";
+      };
+    };
+
+    # waagent will generate files under /etc/sudoers.d during provisioning
+    security.sudo.extraConfig = ''
+      #includedir /etc/sudoers.d
+    '';
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
new file mode 100644
index 000000000000..281be9a12318
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-bootstrap-blobs.nix
@@ -0,0 +1,3 @@
+{
+    "16.03" = "https://nixos.blob.core.windows.net/images/nixos-image-16.03.847.8688c17-x86_64-linux.vhd";
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-common.nix b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
new file mode 100644
index 000000000000..cd1ffdb6cbcc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-common.nix
@@ -0,0 +1,67 @@
+{ lib, pkgs, ... }:
+
+with lib;
+{
+  imports = [ ../profiles/headless.nix ];
+
+  require = [ ./azure-agent.nix ];
+  virtualisation.azure.agent.enable = true;
+
+  boot.kernelParams = [ "console=ttyS0" "earlyprintk=ttyS0" "rootdelay=300" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "hv_vmbus" "hv_netvsc" "hv_utils" "hv_storvsc" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.timeout = 0;
+
+  boot.growPartition = true;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  fileSystems."/" = {
+    device = "/dev/disk/by-label/nixos";
+    fsType = "ext4";
+    autoResize = true;
+  };
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time, ping client connections to avoid timeouts
+  services.openssh.enable = true;
+  services.openssh.settings.PermitRootLogin = "prohibit-password";
+  services.openssh.settings.ClientAliveInterval = 180;
+
+  # Force getting the hostname from Azure
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  # sg_scan is needed to finalize disk removal on older kernels
+  environment.systemPackages = [ pkgs.cryptsetup pkgs.sg3_utils ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  services.udev.extraRules = ''
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:0", ATTR{removable}=="0", SYMLINK+="disk/by-lun/0",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:1", ATTR{removable}=="0", SYMLINK+="disk/by-lun/1",
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:2", ATTR{removable}=="0", SYMLINK+="disk/by-lun/2"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:3", ATTR{removable}=="0", SYMLINK+="disk/by-lun/3"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:4", ATTR{removable}=="0", SYMLINK+="disk/by-lun/4"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:5", ATTR{removable}=="0", SYMLINK+="disk/by-lun/5"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:6", ATTR{removable}=="0", SYMLINK+="disk/by-lun/6"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:7", ATTR{removable}=="0", SYMLINK+="disk/by-lun/7"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:8", ATTR{removable}=="0", SYMLINK+="disk/by-lun/8"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:9", ATTR{removable}=="0", SYMLINK+="disk/by-lun/9"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:10", ATTR{removable}=="0", SYMLINK+="disk/by-lun/10"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:11", ATTR{removable}=="0", SYMLINK+="disk/by-lun/11"
+
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:12", ATTR{removable}=="0", SYMLINK+="disk/by-lun/12"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:13", ATTR{removable}=="0", SYMLINK+="disk/by-lun/13"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:14", ATTR{removable}=="0", SYMLINK+="disk/by-lun/14"
+    ENV{DEVTYPE}=="disk", KERNEL!="sda" SUBSYSTEM=="block", SUBSYSTEMS=="scsi", KERNELS=="?:0:0:15", ATTR{removable}=="0", SYMLINK+="disk/by-lun/15"
+
+  '';
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
new file mode 100644
index 000000000000..267ba50ae025
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/azure-common.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-config.nix b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
new file mode 100644
index 000000000000..780bd1b78dce
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/azure-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-image.nix b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
new file mode 100644
index 000000000000..d909680cca1f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-image.nix
@@ -0,0 +1,41 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.azureImage;
+in
+{
+  imports = [ ./azure-common.nix ];
+
+  options = {
+    virtualisation.azureImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 2048;
+      description = lib.mdDoc ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+    virtualisation.azureImage.contents = mkOption {
+      type = with types; listOf attrs;
+      default = [ ];
+      description = lib.mdDoc ''
+        Extra contents to add to the image.
+      '';
+    };
+  };
+  config = {
+    system.build.azureImage = import ../../lib/make-disk-image.nix {
+      name = "azure-image";
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=fixed,force_size -O vpc $diskImage $out/disk.vhd
+        rm $diskImage
+      '';
+      configFile = ./azure-config-user.nix;
+      format = "raw";
+      inherit (cfg) diskSize contents;
+      inherit config lib pkgs;
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/azure-images.nix b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
new file mode 100644
index 000000000000..22c82fc14f65
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/azure-images.nix
@@ -0,0 +1,5 @@
+let self = {
+  "16.09" = "https://nixos.blob.core.windows.net/images/nixos-image-16.09.1694.019dcc3-x86_64-linux.vhd";
+
+  latest = self."16.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
new file mode 100644
index 000000000000..0a018e4cd695
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-config.nix
@@ -0,0 +1,5 @@
+{ modulesPath, ... }:
+
+{
+  imports = [ "${modulesPath}/virtualisation/brightbox-image.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
new file mode 100644
index 000000000000..15f8fd6d8f7d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/brightbox-image.nix
@@ -0,0 +1,166 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  diskSize = "20G";
+in
+{
+  imports = [ ../profiles/headless.nix ../profiles/qemu-guest.nix ];
+
+  system.build.brightboxImage =
+    pkgs.vmTools.runInLinuxVM (
+      pkgs.runCommand "brightbox-image"
+        { preVM =
+            ''
+              mkdir $out
+              diskImage=$out/$diskImageBase
+              truncate $diskImage --size ${diskSize}
+              mv closure xchg/
+            '';
+
+          postVM =
+            ''
+              PATH=$PATH:${lib.makeBinPath [ pkgs.gnutar pkgs.gzip ]}
+              pushd $out
+              ${pkgs.qemu_kvm}/bin/qemu-img convert -c -O qcow2 $diskImageBase nixos.qcow2
+              rm $diskImageBase
+              popd
+            '';
+          diskImageBase = "nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw";
+          buildInputs = [ pkgs.util-linux pkgs.perl ];
+          exportReferencesGraph =
+            [ "closure" config.system.build.toplevel ];
+        }
+        ''
+          # Create partition table
+          ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+          ${pkgs.parted}/sbin/parted --script /dev/vda mkpart primary ext4 1 ${diskSize}
+          ${pkgs.parted}/sbin/parted --script /dev/vda print
+          . /sys/class/block/vda1/uevent
+          mknod /dev/vda1 b $MAJOR $MINOR
+
+          # Create an empty filesystem and mount it.
+          ${pkgs.e2fsprogs}/sbin/mkfs.ext4 -L nixos /dev/vda1
+          ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+
+          mkdir /mnt
+          mount /dev/vda1 /mnt
+
+          # The initrd expects these directories to exist.
+          mkdir /mnt/dev /mnt/proc /mnt/sys
+
+          mount --bind /proc /mnt/proc
+          mount --bind /dev /mnt/dev
+          mount --bind /sys /mnt/sys
+
+          # Copy all paths in the closure to the filesystem.
+          storePaths=$(perl ${pkgs.pathsFromGraph} /tmp/xchg/closure)
+
+          mkdir -p /mnt/nix/store
+          echo "copying everything (will take a while)..."
+          cp -prd $storePaths /mnt/nix/store/
+
+          # Register the paths in the Nix database.
+          printRegistration=1 perl ${pkgs.pathsFromGraph} /tmp/xchg/closure | \
+              chroot /mnt ${config.nix.package.out}/bin/nix-store --load-db --option build-users-group ""
+
+          # Create the system profile to allow nixos-rebuild to work.
+          chroot /mnt ${config.nix.package.out}/bin/nix-env \
+              -p /nix/var/nix/profiles/system --set ${config.system.build.toplevel} \
+              --option build-users-group ""
+
+          # `nixos-rebuild' requires an /etc/NIXOS.
+          mkdir -p /mnt/etc
+          touch /mnt/etc/NIXOS
+
+          # `switch-to-configuration' requires a /bin/sh
+          mkdir -p /mnt/bin
+          ln -s ${config.system.build.binsh}/bin/sh /mnt/bin/sh
+
+          # Install a configuration.nix.
+          mkdir -p /mnt/etc/nixos /mnt/boot/grub
+          cp ${./brightbox-config.nix} /mnt/etc/nixos/configuration.nix
+
+          # Generate the GRUB menu.
+          ln -s vda /dev/sda
+          chroot /mnt ${config.system.build.toplevel}/bin/switch-to-configuration boot
+
+          umount /mnt/proc /mnt/dev /mnt/sys
+          umount /mnt
+        ''
+    );
+
+  fileSystems."/".label = "nixos";
+
+  # Generate a GRUB menu.  Amazon's pv-grub uses this to boot our kernel/initrd.
+  boot.loader.grub.device = "/dev/vda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using the SSH key that the user specified
+  # at instance creation time.
+  services.openssh.enable = true;
+  services.openssh.settings.PermitRootLogin = "prohibit-password";
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  systemd.services.fetch-ec2-data =
+    { description = "Fetch EC2 Data";
+
+      wantedBy = [ "multi-user.target" "sshd.service" ];
+      before = [ "sshd.service" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      path = [ pkgs.wget pkgs.iproute2 ];
+
+      script =
+        ''
+          wget="wget -q --retry-connrefused -O -"
+
+          ${optionalString (config.networking.hostName == "") ''
+            echo "setting host name..."
+            ${pkgs.nettools}/bin/hostname $($wget http://169.254.169.254/latest/meta-data/hostname)
+          ''}
+
+          # Don't download the SSH key if it has already been injected
+          # into the image (a Nova feature).
+          if ! [ -e /root/.ssh/authorized_keys ]; then
+              echo "obtaining SSH key..."
+              mkdir -m 0700 -p /root/.ssh
+              $wget http://169.254.169.254/latest/meta-data/public-keys/0/openssh-key > /root/key.pub
+              if [ $? -eq 0 -a -e /root/key.pub ]; then
+                  if ! grep -q -f /root/key.pub /root/.ssh/authorized_keys; then
+                      cat /root/key.pub >> /root/.ssh/authorized_keys
+                      echo "new key added to authorized_keys"
+                  fi
+                  chmod 600 /root/.ssh/authorized_keys
+                  rm -f /root/key.pub
+              fi
+          fi
+
+          # Extract the intended SSH host key for this machine from
+          # the supplied user data, if available.  Otherwise sshd will
+          # generate one normally.
+          $wget http://169.254.169.254/2011-01-01/user-data > /root/user-data || true
+          key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' /root/user-data)"
+          key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' /root/user-data)"
+          if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+              mkdir -m 0755 -p /etc/ssh
+              (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+              echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+          fi
+        '';
+
+      serviceConfig.Type = "oneshot";
+      serviceConfig.RemainAfterExit = true;
+    };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/build-vm.nix b/nixpkgs/nixos/modules/virtualisation/build-vm.nix
new file mode 100644
index 000000000000..e94254416316
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/build-vm.nix
@@ -0,0 +1,58 @@
+{ config, extendModules, lib, ... }:
+let
+
+  inherit (lib)
+    mkOption
+    ;
+
+  vmVariant = extendModules {
+    modules = [ ./qemu-vm.nix ];
+  };
+
+  vmVariantWithBootLoader = vmVariant.extendModules {
+    modules = [
+      ({ config, ... }: {
+        _file = "nixos/default.nix##vmWithBootLoader";
+        virtualisation.useBootLoader = true;
+        virtualisation.useEFIBoot =
+          config.boot.loader.systemd-boot.enable ||
+          config.boot.loader.efi.canTouchEfiVariables;
+      })
+    ];
+  };
+in
+{
+  options = {
+
+    virtualisation.vmVariant = mkOption {
+      description = lib.mdDoc ''
+        Machine configuration to be added for the vm script produced by `nixos-rebuild build-vm`.
+      '';
+      inherit (vmVariant) type;
+      default = {};
+      visible = "shallow";
+    };
+
+    virtualisation.vmVariantWithBootLoader = mkOption {
+      description = lib.mdDoc ''
+        Machine configuration to be added for the vm script produced by `nixos-rebuild build-vm-with-bootloader`.
+      '';
+      inherit (vmVariantWithBootLoader) type;
+      default = {};
+      visible = "shallow";
+    };
+
+  };
+
+  config = {
+
+    system.build = {
+      vm = lib.mkDefault config.virtualisation.vmVariant.system.build.vm;
+      vmWithBootLoader = lib.mkDefault config.virtualisation.vmVariantWithBootLoader.system.build.vm;
+    };
+
+  };
+
+  # uses extendModules
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
new file mode 100644
index 000000000000..7df3c9c613b4
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/cloudstack-config.nix
@@ -0,0 +1,40 @@
+{ lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      settings.PermitRootLogin = "prohibit-password";
+    };
+
+    # Cloud-init configuration.
+    services.cloud-init.enable = true;
+    # Wget is needed for setting password. This is of little use as
+    # root password login is disabled above.
+    environment.systemPackages = [ pkgs.wget ];
+    # Only enable CloudStack datasource for faster boot speed.
+    environment.etc."cloud/cloud.cfg.d/99_cloudstack.cfg".text = ''
+      datasource:
+        CloudStack: {}
+        None: {}
+      datasource_list: ["CloudStack"]
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/container-config.nix b/nixpkgs/nixos/modules/virtualisation/container-config.nix
new file mode 100644
index 000000000000..2460ec45e3fc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/container-config.nix
@@ -0,0 +1,43 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+
+  config = mkIf config.boot.isContainer {
+
+    # Disable some features that are not useful in a container.
+
+    # containers don't have a kernel
+    boot.kernel.enable = false;
+    boot.modprobeConfig.enable = false;
+
+    console.enable = mkDefault false;
+
+    nix.optimise.automatic = mkDefault false; # the store is host managed
+    powerManagement.enable = mkDefault false;
+    documentation.nixos.enable = mkDefault false;
+
+    networking.useHostResolvConf = mkDefault true;
+
+    # Containers should be light-weight, so start sshd on demand.
+    services.openssh.startWhenNeeded = mkDefault true;
+
+    # containers do not need to setup devices
+    services.udev.enable = false;
+
+    # containers normally do not need to manage logical volumes
+    services.lvm.enable = lib.mkDefault false;
+
+    # Shut up warnings about not having a boot loader.
+    system.build.installBootLoader = lib.mkDefault "${pkgs.coreutils}/bin/true";
+
+    # Not supported in systemd-nspawn containers.
+    security.audit.enable = false;
+
+    # Use the host's nix-daemon.
+    environment.variables.NIX_REMOTE = "daemon";
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/containerd.nix b/nixpkgs/nixos/modules/virtualisation/containerd.nix
new file mode 100644
index 000000000000..f6e3c8387298
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/containerd.nix
@@ -0,0 +1,101 @@
+{ pkgs, lib, config, ... }:
+let
+  cfg = config.virtualisation.containerd;
+
+  configFile = if cfg.configFile == null then
+    settingsFormat.generate "containerd.toml" cfg.settings
+  else
+    cfg.configFile;
+
+  containerdConfigChecked = pkgs.runCommand "containerd-config-checked.toml" {
+    nativeBuildInputs = [ pkgs.containerd ];
+  } ''
+    containerd -c ${configFile} config dump >/dev/null
+    ln -s ${configFile} $out
+  '';
+
+  settingsFormat = pkgs.formats.toml {};
+in
+{
+
+  options.virtualisation.containerd = with lib.types; {
+    enable = lib.mkEnableOption (lib.mdDoc "containerd container runtime");
+
+    configFile = lib.mkOption {
+      default = null;
+      description = lib.mdDoc ''
+       Path to containerd config file.
+       Setting this option will override any configuration applied by the settings option.
+      '';
+      type = nullOr path;
+    };
+
+    settings = lib.mkOption {
+      type = settingsFormat.type;
+      default = {};
+      description = lib.mdDoc ''
+        Verbatim lines to add to containerd.toml
+      '';
+    };
+
+    args = lib.mkOption {
+      default = {};
+      description = lib.mdDoc "extra args to append to the containerd cmdline";
+      type = attrsOf str;
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    warnings = lib.optional (cfg.configFile != null) ''
+      `virtualisation.containerd.configFile` is deprecated. use `virtualisation.containerd.settings` instead.
+    '';
+
+    virtualisation.containerd = {
+      args.config = toString containerdConfigChecked;
+      settings = {
+        version = 2;
+        plugins."io.containerd.grpc.v1.cri" = {
+         containerd.snapshotter =
+           lib.mkIf config.boot.zfs.enabled (lib.mkOptionDefault "zfs");
+         cni.bin_dir = lib.mkOptionDefault "${pkgs.cni-plugins}/bin";
+        };
+      };
+    };
+
+    environment.systemPackages = [ pkgs.containerd ];
+
+    systemd.services.containerd = {
+      description = "containerd - container runtime";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = with pkgs; [
+        containerd
+        runc
+        iptables
+      ] ++ lib.optional config.boot.zfs.enabled config.boot.zfs.package;
+      serviceConfig = {
+        ExecStart = ''${pkgs.containerd}/bin/containerd ${lib.concatStringsSep " " (lib.cli.toGNUCommandLine {} cfg.args)}'';
+        Delegate = "yes";
+        KillMode = "process";
+        Type = "notify";
+        Restart = "always";
+        RestartSec = "10";
+
+        # "limits" defined below are adopted from upstream: https://github.com/containerd/containerd/blob/master/containerd.service
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        LimitNOFILE = "infinity";
+        TasksMax = "infinity";
+        OOMScoreAdjust = "-999";
+
+        StateDirectory = "containerd";
+        RuntimeDirectory = "containerd";
+        RuntimeDirectoryPreserve = "yes";
+      };
+      unitConfig = {
+        StartLimitBurst = "16";
+        StartLimitIntervalSec = "120s";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/containers.nix b/nixpkgs/nixos/modules/virtualisation/containers.nix
new file mode 100644
index 000000000000..3e33cabf2660
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/containers.nix
@@ -0,0 +1,142 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.containers;
+
+  inherit (lib) literalExpression mkOption types;
+
+  toml = pkgs.formats.toml { };
+in
+{
+  meta = {
+    maintainers = [ ] ++ lib.teams.podman.members;
+  };
+
+  options.virtualisation.containers = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option enables the common /etc/containers configuration module.
+        '';
+      };
+
+    ociSeccompBpfHook.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc "Enable the OCI seccomp BPF hook";
+    };
+
+    containersConf.settings = mkOption {
+      type = toml.type;
+      default = { };
+      description = lib.mdDoc "containers.conf configuration";
+    };
+
+    containersConf.cniPlugins = mkOption {
+      type = types.listOf types.package;
+      defaultText = literalExpression ''
+        [
+          pkgs.cni-plugins
+        ]
+      '';
+      example = literalExpression ''
+        [
+          pkgs.cniPlugins.dnsname
+        ]
+      '';
+      description = lib.mdDoc ''
+        CNI plugins to install on the system.
+      '';
+    };
+
+    storage.settings = mkOption {
+      type = toml.type;
+      default = {
+        storage = {
+          driver = "overlay";
+          graphroot = "/var/lib/containers/storage";
+          runroot = "/run/containers/storage";
+        };
+      };
+      description = lib.mdDoc "storage.conf configuration";
+    };
+
+    registries = {
+      search = mkOption {
+        type = types.listOf types.str;
+        default = [ "docker.io" "quay.io" ];
+        description = lib.mdDoc ''
+          List of repositories to search.
+        '';
+      };
+
+      insecure = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of insecure repositories.
+        '';
+      };
+
+      block = mkOption {
+        default = [ ];
+        type = types.listOf types.str;
+        description = lib.mdDoc ''
+          List of blocked repositories.
+        '';
+      };
+    };
+
+    policy = mkOption {
+      default = { };
+      type = types.attrs;
+      example = literalExpression ''
+        {
+          default = [ { type = "insecureAcceptAnything"; } ];
+          transports = {
+            docker-daemon = {
+              "" = [ { type = "insecureAcceptAnything"; } ];
+            };
+          };
+        }
+      '';
+      description = lib.mdDoc ''
+        Signature verification policy file.
+        If this option is empty the default policy file from
+        `skopeo` will be used.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable {
+
+    virtualisation.containers.containersConf.cniPlugins = [ pkgs.cni-plugins ];
+
+    virtualisation.containers.containersConf.settings = {
+      network.cni_plugin_dirs = map (p: "${lib.getBin p}/bin") cfg.containersConf.cniPlugins;
+      engine = {
+        init_path = "${pkgs.catatonit}/bin/catatonit";
+      } // lib.optionalAttrs cfg.ociSeccompBpfHook.enable {
+        hooks_dir = [ config.boot.kernelPackages.oci-seccomp-bpf-hook ];
+      };
+    };
+
+    environment.etc."containers/containers.conf".source =
+      toml.generate "containers.conf" cfg.containersConf.settings;
+
+    environment.etc."containers/storage.conf".source =
+      toml.generate "storage.conf" cfg.storage.settings;
+
+    environment.etc."containers/registries.conf".source = toml.generate "registries.conf" {
+      registries = lib.mapAttrs (n: v: { registries = v; }) cfg.registries;
+    };
+
+    environment.etc."containers/policy.json".source =
+      if cfg.policy != { } then pkgs.writeText "policy.json" (builtins.toJSON cfg.policy)
+      else "${pkgs.skopeo.policy}/default-policy.json";
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/cri-o.nix b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
new file mode 100644
index 000000000000..dacd700537c7
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/cri-o.nix
@@ -0,0 +1,158 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.cri-o;
+
+  crioPackage = pkgs.cri-o.override {
+    extraPackages = cfg.extraPackages
+      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+  };
+
+  format = pkgs.formats.toml { };
+
+  cfgFile = format.generate "00-default.conf" cfg.settings;
+in
+{
+  meta = {
+    maintainers = teams.podman.members;
+  };
+
+  options.virtualisation.cri-o = {
+    enable = mkEnableOption (lib.mdDoc "Container Runtime Interface for OCI (CRI-O)");
+
+    storageDriver = mkOption {
+      type = types.enum [ "aufs" "btrfs" "devmapper" "overlay" "vfs" "zfs" ];
+      default = "overlay";
+      description = lib.mdDoc "Storage driver to be used";
+    };
+
+    logLevel = mkOption {
+      type = types.enum [ "trace" "debug" "info" "warn" "error" "fatal" ];
+      default = "info";
+      description = lib.mdDoc "Log level to be used";
+    };
+
+    pauseImage = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Override the default pause image for pod sandboxes";
+      example = "k8s.gcr.io/pause:3.2";
+    };
+
+    pauseCommand = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Override the default pause command";
+      example = "/pause";
+    };
+
+    runtime = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      description = lib.mdDoc "Override the default runtime";
+      example = "crun";
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = literalExpression ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed in the CRI-O wrapper.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = crioPackage;
+      internal = true;
+      description = lib.mdDoc ''
+        The final CRI-O package (including extra packages).
+      '';
+    };
+
+    networkDir = mkOption {
+      type = types.nullOr types.path;
+      default = null;
+      description = lib.mdDoc "Override the network_dir option.";
+      internal = true;
+    };
+
+    settings = mkOption {
+      type = format.type;
+      default = { };
+      description = lib.mdDoc ''
+        Configuration for cri-o, see
+        <https://github.com/cri-o/cri-o/blob/master/docs/crio.conf.5.md>.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package pkgs.cri-tools ];
+
+    environment.etc."crictl.yaml".source = "${cfg.package}/etc/crictl.yaml";
+
+    virtualisation.cri-o.settings.crio = {
+      storage_driver = cfg.storageDriver;
+
+      image = {
+        pause_image = mkIf (cfg.pauseImage != null) cfg.pauseImage;
+        pause_command = mkIf (cfg.pauseCommand != null) cfg.pauseCommand;
+      };
+
+      network = {
+        plugin_dirs = [ "${pkgs.cni-plugins}/bin" ];
+        network_dir = mkIf (cfg.networkDir != null) cfg.networkDir;
+      };
+
+      runtime = {
+        cgroup_manager = "systemd";
+        log_level = cfg.logLevel;
+        manage_ns_lifecycle = true;
+        pinns_path = "${cfg.package}/bin/pinns";
+        hooks_dir =
+          optional (config.virtualisation.containers.ociSeccompBpfHook.enable)
+            config.boot.kernelPackages.oci-seccomp-bpf-hook;
+
+        default_runtime = mkIf (cfg.runtime != null) cfg.runtime;
+        runtimes = mkIf (cfg.runtime != null) {
+          "${cfg.runtime}" = { };
+        };
+      };
+    };
+
+    environment.etc."cni/net.d/10-crio-bridge.conflist".source = "${cfg.package}/etc/cni/net.d/10-crio-bridge.conflist";
+    environment.etc."cni/net.d/99-loopback.conflist".source = "${cfg.package}/etc/cni/net.d/99-loopback.conflist";
+    environment.etc."crio/crio.conf.d/00-default.conf".source = cfgFile;
+
+    # Enable common /etc/containers configuration
+    virtualisation.containers.enable = true;
+
+    systemd.services.crio = {
+      description = "Container Runtime Interface for OCI (CRI-O)";
+      documentation = [ "https://github.com/cri-o/cri-o" ];
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/crio";
+        ExecReload = "/bin/kill -s HUP $MAINPID";
+        TasksMax = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "1048576";
+        LimitCORE = "infinity";
+        OOMScoreAdjust = "-999";
+        TimeoutStartSec = "0";
+        Restart = "on-abnormal";
+      };
+      restartTriggers = [ cfgFile ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix
new file mode 100644
index 000000000000..e004b7880aad
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-config.nix
@@ -0,0 +1,197 @@
+{ config, pkgs, lib, modulesPath, ... }:
+with lib;
+{
+  imports = [
+    (modulesPath + "/profiles/qemu-guest.nix")
+    (modulesPath + "/virtualisation/digital-ocean-init.nix")
+  ];
+  options.virtualisation.digitalOcean = with types; {
+    setRootPassword = mkOption {
+      type = bool;
+      default = false;
+      example = true;
+      description = lib.mdDoc "Whether to set the root password from the Digital Ocean metadata";
+    };
+    setSshKeys = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = lib.mdDoc "Whether to fetch ssh keys from Digital Ocean";
+    };
+    seedEntropy = mkOption {
+      type = bool;
+      default = true;
+      example = true;
+      description = lib.mdDoc "Whether to run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+    };
+  };
+  config =
+    let
+      cfg = config.virtualisation.digitalOcean;
+      hostName = config.networking.hostName;
+      doMetadataFile = "/run/do-metadata/v1.json";
+    in mkMerge [{
+      fileSystems."/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      boot = {
+        growPartition = true;
+        kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+        initrd.kernelModules = [ "virtio_scsi" ];
+        kernelModules = [ "virtio_pci" "virtio_net" ];
+        loader = {
+          grub.device = "/dev/vda";
+          timeout = 0;
+          grub.configurationLimit = 0;
+        };
+      };
+      services.openssh = {
+        enable = mkDefault true;
+        settings.PasswordAuthentication = mkDefault false;
+      };
+      services.do-agent.enable = mkDefault true;
+      networking = {
+        hostName = mkDefault ""; # use Digital Ocean metadata server
+      };
+
+      /* Check for and wait for the metadata server to become reachable.
+       * This serves as a dependency for all the other metadata services. */
+      systemd.services.digitalocean-metadata = {
+        path = [ pkgs.curl ];
+        description = "Get host metadata provided by Digitalocean";
+        script = ''
+          set -eu
+          DO_DELAY_ATTEMPTS=0
+          while ! curl -fsSL -o $RUNTIME_DIRECTORY/v1.json http://169.254.169.254/metadata/v1.json; do
+            DO_DELAY_ATTEMPTS=$((DO_DELAY_ATTEMPTS + 1))
+            if (( $DO_DELAY_ATTEMPTS >= $DO_DELAY_ATTEMPTS_MAX )); then
+              echo "giving up"
+              exit 1
+            fi
+
+            echo "metadata unavailable, trying again in 1s..."
+            sleep 1
+          done
+          chmod 600 $RUNTIME_DIRECTORY/v1.json
+          '';
+        environment = {
+          DO_DELAY_ATTEMPTS_MAX = "10";
+        };
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+          RuntimeDirectory = "do-metadata";
+          RuntimeDirectoryPreserve = "yes";
+        };
+        unitConfig = {
+          ConditionPathExists = "!${doMetadataFile}";
+          After = [ "network-pre.target" ] ++
+            optional config.networking.dhcpcd.enable "dhcpcd.service" ++
+            optional config.systemd.network.enable "systemd-networkd.service";
+        };
+      };
+
+      /* Fetch the root password from the digital ocean metadata.
+       * There is no specific route for this, so we use jq to get
+       * it from the One Big JSON metadata blob */
+      systemd.services.digitalocean-set-root-password = mkIf cfg.setRootPassword {
+        path = [ pkgs.shadow pkgs.jq ];
+        description = "Set root password provided by Digitalocean";
+        wantedBy = [ "multi-user.target" ];
+        script = ''
+          set -eo pipefail
+          ROOT_PASSWORD=$(jq -er '.auth_key' ${doMetadataFile})
+          echo "root:$ROOT_PASSWORD" | chpasswd
+          mkdir -p /etc/do-metadata/set-root-password
+          '';
+        unitConfig = {
+          ConditionPathExists = "!/etc/do-metadata/set-root-password";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Set the hostname from Digital Ocean, unless the user configured it in
+       * the NixOS configuration. The cached metadata file isn't used here
+       * because the hostname is a mutable part of the droplet. */
+      systemd.services.digitalocean-set-hostname = mkIf (hostName == "") {
+        path = [ pkgs.curl pkgs.nettools ];
+        description = "Set hostname provided by Digitalocean";
+        wantedBy = [ "network.target" ];
+        script = ''
+          set -e
+          DIGITALOCEAN_HOSTNAME=$(curl -fsSL http://169.254.169.254/metadata/v1/hostname)
+          hostname "$DIGITALOCEAN_HOSTNAME"
+          if [[ ! -e /etc/hostname || -w /etc/hostname ]]; then
+            printf "%s\n" "$DIGITALOCEAN_HOSTNAME" > /etc/hostname
+          fi
+        '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Wants = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+      /* Fetch the ssh keys for root from Digital Ocean */
+      systemd.services.digitalocean-ssh-keys = mkIf cfg.setSshKeys {
+        description = "Set root ssh keys provided by Digital Ocean";
+        wantedBy = [ "multi-user.target" ];
+        path = [ pkgs.jq ];
+        script = ''
+          set -e
+          mkdir -m 0700 -p /root/.ssh
+          jq -er '.public_keys[]' ${doMetadataFile} > /root/.ssh/authorized_keys
+          chmod 600 /root/.ssh/authorized_keys
+        '';
+        serviceConfig = {
+          Type = "oneshot";
+          RemainAfterExit = true;
+        };
+        unitConfig = {
+          ConditionPathExists = "!/root/.ssh/authorized_keys";
+          Before = optional config.services.openssh.enable "sshd.service";
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+      };
+
+      /* Initialize the RNG by running the entropy-seed script from the
+       * Digital Ocean metadata
+       */
+      systemd.services.digitalocean-entropy-seed = mkIf cfg.seedEntropy {
+        description = "Run the kernel RNG entropy seeding script from the Digital Ocean vendor data";
+        wantedBy = [ "network.target" ];
+        path = [ pkgs.jq pkgs.mpack ];
+        script = ''
+          set -eo pipefail
+          TEMPDIR=$(mktemp -d)
+          jq -er '.vendor_data' ${doMetadataFile} | munpack -tC $TEMPDIR
+          ENTROPY_SEED=$(grep -rl "DigitalOcean Entropy Seed script" $TEMPDIR)
+          ${pkgs.runtimeShell} $ENTROPY_SEED
+          rm -rf $TEMPDIR
+          '';
+        unitConfig = {
+          Before = [ "network.target" ];
+          After = [ "digitalocean-metadata.service" ];
+          Requires = [ "digitalocean-metadata.service" ];
+        };
+        serviceConfig = {
+          Type = "oneshot";
+        };
+      };
+
+    }
+  ];
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
+
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix
new file mode 100644
index 000000000000..a57c89245f2e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-image.nix
@@ -0,0 +1,70 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.digitalOceanImage;
+in
+{
+
+  imports = [ ./digital-ocean-config.nix ];
+
+  options = {
+    virtualisation.digitalOceanImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 4096;
+      description = lib.mdDoc ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.configFile = mkOption {
+      type = with types; nullOr path;
+      default = null;
+      description = lib.mdDoc ''
+        A path to a configuration file which will be placed at
+        `/etc/nixos/configuration.nix` and be used when switching
+        to a new configuration. If set to `null`, a default
+        configuration is used that imports
+        `(modulesPath + "/virtualisation/digital-ocean-config.nix")`.
+      '';
+    };
+
+    virtualisation.digitalOceanImage.compressionMethod = mkOption {
+      type = types.enum [ "gzip" "bzip2" ];
+      default = "gzip";
+      example = "bzip2";
+      description = lib.mdDoc ''
+        Disk image compression method. Choose bzip2 to generate smaller images that
+        take longer to generate but will consume less metered storage space on your
+        Digital Ocean account.
+      '';
+    };
+  };
+
+  #### implementation
+  config = {
+
+    system.build.digitalOceanImage = import ../../lib/make-disk-image.nix {
+      name = "digital-ocean-image";
+      format = "qcow2";
+      postVM = let
+        compress = {
+          "gzip" = "${pkgs.gzip}/bin/gzip";
+          "bzip2" = "${pkgs.bzip2}/bin/bzip2";
+        }.${cfg.compressionMethod};
+      in ''
+        ${compress} $diskImage
+      '';
+      configFile = if cfg.configFile == null
+        then config.virtualisation.digitalOcean.defaultConfigFile
+        else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix b/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix
new file mode 100644
index 000000000000..1a5d4e898e96
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/digital-ocean-init.nix
@@ -0,0 +1,95 @@
+{ config, pkgs, lib, ... }:
+with lib;
+let
+  cfg = config.virtualisation.digitalOcean;
+  defaultConfigFile = pkgs.writeText "digitalocean-configuration.nix" ''
+    { modulesPath, lib, ... }:
+    {
+      imports = lib.optional (builtins.pathExists ./do-userdata.nix) ./do-userdata.nix ++ [
+        (modulesPath + "/virtualisation/digital-ocean-config.nix")
+      ];
+    }
+  '';
+in {
+  options.virtualisation.digitalOcean.rebuildFromUserData = mkOption {
+    type = types.bool;
+    default = true;
+    example = true;
+    description = lib.mdDoc "Whether to reconfigure the system from Digital Ocean user data";
+  };
+  options.virtualisation.digitalOcean.defaultConfigFile = mkOption {
+    type = types.path;
+    default = defaultConfigFile;
+    defaultText = literalMD ''
+      The default configuration imports user-data if applicable and
+      `(modulesPath + "/virtualisation/digital-ocean-config.nix")`.
+    '';
+    description = lib.mdDoc ''
+      A path to a configuration file which will be placed at
+      `/etc/nixos/configuration.nix` and be used when switching to
+      a new configuration.
+    '';
+  };
+
+  config = {
+    systemd.services.digitalocean-init = mkIf cfg.rebuildFromUserData {
+      description = "Reconfigure the system from Digital Ocean userdata on startup";
+      wantedBy = [ "network-online.target" ];
+      unitConfig = {
+        ConditionPathExists = "!/etc/nixos/do-userdata.nix";
+        After = [ "digitalocean-metadata.service" "network-online.target" ];
+        Requires = [ "digitalocean-metadata.service" ];
+        X-StopOnRemoval = false;
+      };
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+      restartIfChanged = false;
+      path = [ pkgs.jq pkgs.gnused pkgs.gnugrep config.systemd.package config.nix.package config.system.build.nixos-rebuild ];
+      environment = {
+        HOME = "/root";
+        NIX_PATH = concatStringsSep ":" [
+          "/nix/var/nix/profiles/per-user/root/channels/nixos"
+          "nixos-config=/etc/nixos/configuration.nix"
+          "/nix/var/nix/profiles/per-user/root/channels"
+        ];
+      };
+      script = ''
+        set -e
+        echo "attempting to fetch configuration from Digital Ocean user data..."
+        userData=$(mktemp)
+        if jq -er '.user_data' /run/do-metadata/v1.json > $userData; then
+          # If the user-data looks like it could be a nix expression,
+          # copy it over. Also, look for a magic three-hash comment and set
+          # that as the channel.
+          if nix-instantiate --parse $userData > /dev/null; then
+            channels="$(grep '^###' "$userData" | sed 's|###\s*||')"
+            printf "%s" "$channels" | while read channel; do
+              echo "writing channel: $channel"
+            done
+
+            if [[ -n "$channels" ]]; then
+              printf "%s" "$channels" > /root/.nix-channels
+              nix-channel --update
+            fi
+
+            echo "setting configuration from Digital Ocean user data"
+            cp "$userData" /etc/nixos/do-userdata.nix
+            if [[ ! -e /etc/nixos/configuration.nix ]]; then
+              install -m0644 ${cfg.defaultConfigFile} /etc/nixos/configuration.nix
+            fi
+          else
+            echo "user data does not appear to be a Nix expression; ignoring"
+            exit
+          fi
+
+          nixos-rebuild switch
+        else
+          echo "no user data is available"
+        fi
+        '';
+    };
+  };
+  meta.maintainers = with maintainers; [ arianvp eamsden ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-image.nix b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
new file mode 100644
index 000000000000..baac3a35a78e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-image.nix
@@ -0,0 +1,57 @@
+{ ... }:
+
+{
+  imports = [
+    ../profiles/docker-container.nix # FIXME, shouldn't include something from profiles/
+  ];
+
+  boot.postBootCommands =
+    ''
+      # Set virtualisation to docker
+      echo "docker" > /run/systemd/container
+    '';
+
+  # Iptables do not work in Docker.
+  networking.firewall.enable = false;
+
+  # Socket activated ssh presents problem in Docker.
+  services.openssh.startWhenNeeded = false;
+}
+
+# Example usage:
+#
+## default.nix
+# let
+#   nixos = import <nixpkgs/nixos> {
+#     configuration = ./configuration.nix;
+#     system = "x86_64-linux";
+#   };
+# in
+# nixos.config.system.build.tarball
+#
+## configuration.nix
+# { pkgs, config, lib, ... }:
+# {
+#   imports = [
+#     <nixpkgs/nixos/modules/virtualisation/docker-image.nix>
+#     <nixpkgs/nixos/modules/installer/cd-dvd/channel.nix>
+#   ];
+#
+#   documentation.doc.enable = false;
+#
+#   environment.systemPackages = with pkgs; [
+#     bashInteractive
+#     cacert
+#     nix
+#   ];
+# }
+#
+## Run
+# Build the tarball:
+# $ nix-build default.nix
+# Load into docker:
+# $ docker import result/tarball/nixos-system-*.tar.xz nixos-docker
+# Boots into systemd
+# $ docker run --privileged -it nixos-docker /init
+# Log into the container
+# $ docker exec -it <container-name> /run/current-system/sw/bin/bash
diff --git a/nixpkgs/nixos/modules/virtualisation/docker-rootless.nix b/nixpkgs/nixos/modules/virtualisation/docker-rootless.nix
new file mode 100644
index 000000000000..f4e4bdc0963a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker-rootless.nix
@@ -0,0 +1,101 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker.rootless;
+  proxy_env = config.networking.proxy.envVars;
+  settingsFormat = pkgs.formats.json {};
+  daemonSettingsFile = settingsFormat.generate "daemon.json" cfg.daemon.settings;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker.rootless = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        This option enables docker in a rootless mode, a daemon that manages
+        linux containers. To interact with the daemon, one needs to set
+        {command}`DOCKER_HOST=unix://$XDG_RUNTIME_DIR/docker.sock`.
+      '';
+    };
+
+    setSocketVariable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Point {command}`DOCKER_HOST` to rootless Docker instance for
+        normal users by default.
+      '';
+    };
+
+    daemon.settings = mkOption {
+      type = settingsFormat.type;
+      default = { };
+      example = {
+        ipv6 = true;
+        "fixed-cidr-v6" = "fd00::/80";
+      };
+      description = lib.mdDoc ''
+        Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
+        See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+      '';
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      defaultText = literalExpression "pkgs.docker";
+      type = types.package;
+      description = lib.mdDoc ''
+        Docker package to be used in the module.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    environment.extraInit = optionalString cfg.setSocketVariable ''
+      if [ -z "$DOCKER_HOST" -a -n "$XDG_RUNTIME_DIR" ]; then
+        export DOCKER_HOST="unix://$XDG_RUNTIME_DIR/docker.sock"
+      fi
+    '';
+
+    # Taken from https://github.com/moby/moby/blob/master/contrib/dockerd-rootless-setuptool.sh
+    systemd.user.services.docker = {
+      wantedBy = [ "default.target" ];
+      description = "Docker Application Container Engine (Rootless)";
+      # needs newuidmap from pkgs.shadow
+      path = [ "/run/wrappers" ];
+      environment = proxy_env;
+      unitConfig = {
+        # docker-rootless doesn't support running as root.
+        ConditionUser = "!root";
+        StartLimitInterval = "60s";
+      };
+      serviceConfig = {
+        Type = "notify";
+        ExecStart = "${cfg.package}/bin/dockerd-rootless --config-file=${daemonSettingsFile}";
+        ExecReload = "${pkgs.procps}/bin/kill -s HUP $MAINPID";
+        TimeoutSec = 0;
+        RestartSec = 2;
+        Restart = "always";
+        StartLimitBurst = 3;
+        LimitNOFILE = "infinity";
+        LimitNPROC = "infinity";
+        LimitCORE = "infinity";
+        Delegate = true;
+        NotifyAccess = "all";
+        KillMode = "mixed";
+      };
+    };
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/docker.nix b/nixpkgs/nixos/modules/virtualisation/docker.nix
new file mode 100644
index 000000000000..6fe460316091
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/docker.nix
@@ -0,0 +1,262 @@
+# Systemd services for docker.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.docker;
+  proxy_env = config.networking.proxy.envVars;
+  settingsFormat = pkgs.formats.json {};
+  daemonSettingsFile = settingsFormat.generate "daemon.json" cfg.daemon.settings;
+in
+
+{
+  ###### interface
+
+  options.virtualisation.docker = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            This option enables docker, a daemon that manages
+            linux containers. Users in the "docker" group can interact with
+            the daemon (e.g. to start or stop containers) using the
+            {command}`docker` command line tool.
+          '';
+      };
+
+    listenOptions =
+      mkOption {
+        type = types.listOf types.str;
+        default = ["/run/docker.sock"];
+        description =
+          lib.mdDoc ''
+            A list of unix and tcp docker should listen to. The format follows
+            ListenStream as described in systemd.socket(5).
+          '';
+      };
+
+    enableOnBoot =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            When enabled dockerd is started on boot. This is required for
+            containers which are created with the
+            `--restart=always` flag to work. If this option is
+            disabled, docker might be started on demand by socket activation.
+          '';
+      };
+
+    daemon.settings =
+      mkOption {
+        type = settingsFormat.type;
+        default = { };
+        example = {
+          ipv6 = true;
+          "fixed-cidr-v6" = "fd00::/80";
+        };
+        description = lib.mdDoc ''
+          Configuration for docker daemon. The attributes are serialized to JSON used as daemon.conf.
+          See https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+        '';
+      };
+
+    enableNvidia =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enable nvidia-docker wrapper, supporting NVIDIA GPUs inside docker containers.
+        '';
+      };
+
+    liveRestore =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Allow dockerd to be restarted without affecting running container.
+            This option is incompatible with docker swarm.
+          '';
+      };
+
+    storageDriver =
+      mkOption {
+        type = types.nullOr (types.enum ["aufs" "btrfs" "devicemapper" "overlay" "overlay2" "zfs"]);
+        default = null;
+        description =
+          lib.mdDoc ''
+            This option determines which Docker storage driver to use. By default
+            it let's docker automatically choose preferred storage driver.
+          '';
+      };
+
+    logDriver =
+      mkOption {
+        type = types.enum ["none" "json-file" "syslog" "journald" "gelf" "fluentd" "awslogs" "splunk" "etwlogs" "gcplogs" "local"];
+        default = "journald";
+        description =
+          lib.mdDoc ''
+            This option determines which Docker log driver to use.
+          '';
+      };
+
+    extraOptions =
+      mkOption {
+        type = types.separatedString " ";
+        default = "";
+        description =
+          lib.mdDoc ''
+            The extra command-line options to pass to
+            {command}`docker` daemon.
+          '';
+      };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to periodically prune Docker resources. If enabled, a
+          systemd timer will run `docker system prune -f`
+          as specified by the `dates` option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = lib.mdDoc ''
+          Any additional flags passed to {command}`docker system prune`.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specification (in the format described by
+          {manpage}`systemd.time(7)`) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
+
+    package = mkOption {
+      default = pkgs.docker;
+      defaultText = literalExpression "pkgs.docker";
+      type = types.package;
+      description = lib.mdDoc ''
+        Docker package to be used in the module.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = types.listOf types.package;
+      default = [ ];
+      example = literalExpression "with pkgs; [ criu ]";
+      description = lib.mdDoc ''
+        Extra packages to add to PATH for the docker daemon process.
+      '';
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+      boot.kernelModules = [ "bridge" "veth" "br_netfilter" "xt_nat" ];
+      boot.kernel.sysctl = {
+        "net.ipv4.conf.all.forwarding" = mkOverride 98 true;
+        "net.ipv4.conf.default.forwarding" = mkOverride 98 true;
+      };
+      environment.systemPackages = [ cfg.package ]
+        ++ optional cfg.enableNvidia pkgs.nvidia-docker;
+      users.groups.docker.gid = config.ids.gids.docker;
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.docker = {
+        wantedBy = optional cfg.enableOnBoot "multi-user.target";
+        after = [ "network.target" "docker.socket" ];
+        requires = [ "docker.socket" ];
+        environment = proxy_env;
+        serviceConfig = {
+          Type = "notify";
+          ExecStart = [
+            ""
+            ''
+              ${cfg.package}/bin/dockerd \
+                --config-file=${daemonSettingsFile} \
+                ${cfg.extraOptions}
+            ''];
+          ExecReload=[
+            ""
+            "${pkgs.procps}/bin/kill -s HUP $MAINPID"
+          ];
+        };
+
+        path = [ pkgs.kmod ] ++ optional (cfg.storageDriver == "zfs") pkgs.zfs
+          ++ optional cfg.enableNvidia pkgs.nvidia-docker
+          ++ cfg.extraPackages;
+      };
+
+      systemd.sockets.docker = {
+        description = "Docker Socket for the API";
+        wantedBy = [ "sockets.target" ];
+        socketConfig = {
+          ListenStream = cfg.listenOptions;
+          SocketMode = "0660";
+          SocketUser = "root";
+          SocketGroup = "docker";
+        };
+      };
+
+      systemd.services.docker-prune = {
+        description = "Prune docker resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${cfg.package}/bin/docker system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = optional cfg.autoPrune.enable cfg.autoPrune.dates;
+        after = [ "docker.service" ];
+        requires = [ "docker.service" ];
+      };
+
+      assertions = [
+        { assertion = cfg.enableNvidia && pkgs.stdenv.isx86_64 -> config.hardware.opengl.driSupport32Bit or false;
+          message = "Option enableNvidia on x86_64 requires 32bit support libraries";
+        }];
+
+      virtualisation.docker.daemon.settings = {
+        group = "docker";
+        hosts = [ "fd://" ];
+        log-driver = mkDefault cfg.logDriver;
+        storage-driver = mkIf (cfg.storageDriver != null) (mkDefault cfg.storageDriver);
+        live-restore = mkDefault cfg.liveRestore;
+        runtimes = mkIf cfg.enableNvidia {
+          nvidia = {
+            path = "${pkgs.nvidia-docker}/bin/nvidia-container-runtime";
+          };
+        };
+      };
+    }
+  ]);
+
+  imports = [
+    (mkRemovedOptionModule ["virtualisation" "docker" "socketActivation"] "This option was removed and socket activation is now always active")
+  ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
new file mode 100644
index 000000000000..1ffb326ba7a8
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-amis.nix
@@ -0,0 +1,9 @@
+# Compatibility shim
+let
+  lib = import ../../../lib;
+  inherit (lib) mapAttrs;
+  everything = import ./amazon-ec2-amis.nix;
+  doAllVersions = mapAttrs (versionName: doRegion);
+  doRegion = mapAttrs (regionName: systems: systems.x86_64-linux);
+in
+  doAllVersions everything
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-data.nix b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
new file mode 100644
index 000000000000..0cc6d9938e22
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-data.nix
@@ -0,0 +1,92 @@
+# This module defines a systemd service that sets the SSH host key and
+# authorized client key and host name of virtual machines running on
+# Amazon EC2, Eucalyptus and OpenStack Compute (Nova).
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "ec2" "metadata" ] "")
+  ];
+
+  config = {
+
+    systemd.services.apply-ec2-data =
+      { description = "Apply EC2 Data";
+
+        wantedBy = [ "multi-user.target" "sshd.service" ];
+        before = [ "sshd.service" ];
+        after = ["fetch-ec2-metadata.service"];
+
+        path = [ pkgs.iproute2 ];
+
+        script =
+          ''
+            ${optionalString (config.networking.hostName == "") ''
+              echo "setting host name..."
+              if [ -s /etc/ec2-metadata/hostname ]; then
+                  ${pkgs.nettools}/bin/hostname $(cat /etc/ec2-metadata/hostname)
+              fi
+            ''}
+
+            if ! [ -e /root/.ssh/authorized_keys ]; then
+                echo "obtaining SSH key..."
+                mkdir -m 0700 -p /root/.ssh
+                if [ -s /etc/ec2-metadata/public-keys-0-openssh-key ]; then
+                    cat /etc/ec2-metadata/public-keys-0-openssh-key >> /root/.ssh/authorized_keys
+                    echo "new key added to authorized_keys"
+                    chmod 600 /root/.ssh/authorized_keys
+                fi
+            fi
+
+            # Extract the intended SSH host key for this machine from
+            # the supplied user data, if available.  Otherwise sshd will
+            # generate one normally.
+            userData=/etc/ec2-metadata/user-data
+
+            mkdir -m 0755 -p /etc/ssh
+
+            if [ -s "$userData" ]; then
+              key="$(sed 's/|/\n/g; s/SSH_HOST_DSA_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_DSA_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_dsa_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_dsa_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_dsa_key.pub
+              fi
+
+              key="$(sed 's/|/\n/g; s/SSH_HOST_ED25519_KEY://; t; d' $userData)"
+              key_pub="$(sed 's/SSH_HOST_ED25519_KEY_PUB://; t; d' $userData)"
+              if [ -n "$key" -a -n "$key_pub" -a ! -e /etc/ssh/ssh_host_ed25519_key ]; then
+                  (umask 077; echo "$key" > /etc/ssh/ssh_host_ed25519_key)
+                  echo "$key_pub" > /etc/ssh/ssh_host_ed25519_key.pub
+              fi
+            fi
+          '';
+
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+    systemd.services.print-host-key =
+      { description = "Print SSH Host Key";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "sshd.service" ];
+        script =
+          ''
+            # Print the host public key on the console so that the user
+            # can obtain it securely by parsing the output of
+            # ec2-get-console-output.
+            echo "-----BEGIN SSH HOST KEY FINGERPRINTS-----" > /dev/console
+            for i in /etc/ssh/ssh_host_*_key.pub; do
+                ${config.programs.ssh.package}/bin/ssh-keygen -l -f $i > /dev/console
+            done
+            echo "-----END SSH HOST KEY FINGERPRINTS-----" > /dev/console
+          '';
+        serviceConfig.Type = "oneshot";
+        serviceConfig.RemainAfterExit = true;
+      };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.sh b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.sh
new file mode 100644
index 000000000000..716aff7c22fb
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ec2-metadata-fetcher.sh
@@ -0,0 +1,66 @@
+metaDir=/etc/ec2-metadata
+mkdir -m 0755 -p "$metaDir"
+rm -f "$metaDir/*"
+
+get_imds_token() {
+  # retry-delay of 1 selected to give the system a second to get going,
+  # but not add a lot to the bootup time
+  curl \
+    --silent \
+    --show-error \
+    --retry 3 \
+    --retry-delay 1 \
+    --fail \
+    -X PUT \
+    --connect-timeout 1 \
+    -H "X-aws-ec2-metadata-token-ttl-seconds: 600" \
+    http://169.254.169.254/latest/api/token
+}
+
+preflight_imds_token() {
+  # retry-delay of 1 selected to give the system a second to get going,
+  # but not add a lot to the bootup time
+  curl \
+    --silent \
+    --show-error \
+    --retry 3 \
+    --retry-delay 1 \
+    --fail \
+    --connect-timeout 1 \
+    -H "X-aws-ec2-metadata-token: $IMDS_TOKEN" \
+    -o /dev/null \
+    http://169.254.169.254/1.0/meta-data/instance-id
+}
+
+try=1
+while [ $try -le 3 ]; do
+  echo "(attempt $try/3) getting an EC2 instance metadata service v2 token..."
+  IMDS_TOKEN=$(get_imds_token) && break
+  try=$((try + 1))
+  sleep 1
+done
+
+if [ "x$IMDS_TOKEN" == "x" ]; then
+  echo "failed to fetch an IMDS2v token."
+fi
+
+try=1
+while [ $try -le 10 ]; do
+  echo "(attempt $try/10) validating the EC2 instance metadata service v2 token..."
+  preflight_imds_token && break
+  try=$((try + 1))
+  sleep 1
+done
+
+echo "getting EC2 instance metadata..."
+
+get_imds() {
+  # --fail to avoid populating missing files with 404 HTML response body
+  # || true to allow the script to continue even when encountering a 404
+  curl --silent --show-error --fail --header "X-aws-ec2-metadata-token: $IMDS_TOKEN" "$@" || true
+}
+
+get_imds -o "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path
+(umask 077 && get_imds -o "$metaDir/user-data" http://169.254.169.254/1.0/user-data)
+get_imds -o "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname
+get_imds -o "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key
diff --git a/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
new file mode 100644
index 000000000000..dd87df9a2780
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/ecs-agent.nix
@@ -0,0 +1,45 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.services.ecs-agent;
+in {
+  options.services.ecs-agent = {
+    enable = mkEnableOption (lib.mdDoc "Amazon ECS agent");
+
+    package = mkOption {
+      type = types.path;
+      description = lib.mdDoc "The ECS agent package to use";
+      default = pkgs.ecs-agent;
+      defaultText = literalExpression "pkgs.ecs-agent";
+    };
+
+    extra-environment = mkOption {
+      type = types.attrsOf types.str;
+      description = lib.mdDoc "The environment the ECS agent should run with. See the ECS agent documentation for keys that work here.";
+      default = {};
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # This service doesn't run if docker isn't running, and unlike potentially remote services like e.g., postgresql, docker has
+    # to be running locally so `docker.enable` will always be set if the ECS agent is enabled.
+    virtualisation.docker.enable = true;
+
+    systemd.services.ecs-agent = {
+      inherit (cfg.package.meta) description;
+      after    = [ "network.target" ];
+      wantedBy = [ "multi-user.target" ];
+
+      environment = cfg.extra-environment;
+
+      script = ''
+        if [ ! -z "$ECS_DATADIR" ]; then
+          mkdir -p "$ECS_DATADIR"
+        fi
+        ${cfg.package}/bin/agent
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/gce-images.nix b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
new file mode 100644
index 000000000000..7b027619a443
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/gce-images.nix
@@ -0,0 +1,17 @@
+let self = {
+  "14.12" = "gs://nixos-cloud-images/nixos-14.12.471.1f09b77-x86_64-linux.raw.tar.gz";
+  "15.09" = "gs://nixos-cloud-images/nixos-15.09.425.7870f20-x86_64-linux.raw.tar.gz";
+  "16.03" = "gs://nixos-cloud-images/nixos-image-16.03.847.8688c17-x86_64-linux.raw.tar.gz";
+  "17.03" = "gs://nixos-cloud-images/nixos-image-17.03.1082.4aab5c5798-x86_64-linux.raw.tar.gz";
+  "18.03" = "gs://nixos-cloud-images/nixos-image-18.03.132536.fdb5ba4cdf9-x86_64-linux.raw.tar.gz";
+  "18.09" = "gs://nixos-cloud-images/nixos-image-18.09.1228.a4c4cbb613c-x86_64-linux.raw.tar.gz";
+
+  # This format will be handled by the upcoming NixOPS 2.0 release.
+  # The old images based on a GS object are deprecated.
+  "20.09" = {
+    project = "nixos-cloud";
+    name = "nixos-image-20-09-3531-3858fbc08e6-x86-64-linux";
+  };
+
+  latest = self."20.09";
+}; in self
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
new file mode 100644
index 000000000000..887af26949fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-config.nix
@@ -0,0 +1,116 @@
+{ config, lib, pkgs, ... }:
+
+let
+  inherit (lib)
+    boolToString
+    mkDefault
+    mkIf
+    optional
+    readFile
+  ;
+in
+
+{
+  imports = [
+    ../profiles/headless.nix
+    ../profiles/qemu-guest.nix
+  ];
+
+
+  fileSystems."/" = {
+    fsType = "ext4";
+    device = "/dev/disk/by-label/nixos";
+    autoResize = true;
+  };
+
+  boot.growPartition = true;
+  boot.kernelParams = [ "console=ttyS0" "panic=1" "boot.panic_on_fail" ];
+  boot.initrd.kernelModules = [ "virtio_scsi" ];
+  boot.kernelModules = [ "virtio_pci" "virtio_net" ];
+
+  # Generate a GRUB menu.
+  boot.loader.grub.device = "/dev/sda";
+  boot.loader.timeout = 0;
+
+  # Don't put old configurations in the GRUB menu.  The user has no
+  # way to select them anyway.
+  boot.loader.grub.configurationLimit = 0;
+
+  # Allow root logins only using SSH keys
+  # and disable password authentication in general
+  services.openssh.enable = true;
+  services.openssh.settings.PermitRootLogin = mkDefault "prohibit-password";
+  services.openssh.settings.PasswordAuthentication = mkDefault false;
+
+  # enable OS Login. This also requires setting enable-oslogin=TRUE metadata on
+  # instance or project level
+  security.googleOsLogin.enable = true;
+
+  # Use GCE udev rules for dynamic disk volumes
+  services.udev.packages = [ pkgs.google-guest-configs ];
+  services.udev.path = [ pkgs.google-guest-configs ];
+
+  # Force getting the hostname from Google Compute.
+  networking.hostName = mkDefault "";
+
+  # Always include cryptsetup so that NixOps can use it.
+  environment.systemPackages = [ pkgs.cryptsetup ];
+
+  # Rely on GCP's firewall instead
+  networking.firewall.enable = mkDefault false;
+
+  # Configure default metadata hostnames
+  networking.extraHosts = ''
+    169.254.169.254 metadata.google.internal metadata
+  '';
+
+  networking.timeServers = [ "metadata.google.internal" ];
+
+  networking.usePredictableInterfaceNames = false;
+
+  # GC has 1460 MTU
+  networking.interfaces.eth0.mtu = 1460;
+
+  systemd.packages = [ pkgs.google-guest-agent ];
+  systemd.services.google-guest-agent = {
+    wantedBy = [ "multi-user.target" ];
+    restartTriggers = [ config.environment.etc."default/instance_configs.cfg".source ];
+    path = optional config.users.mutableUsers pkgs.shadow;
+  };
+  systemd.services.google-startup-scripts.wantedBy = [ "multi-user.target" ];
+  systemd.services.google-shutdown-scripts.wantedBy = [ "multi-user.target" ];
+
+  security.sudo.extraRules = mkIf config.users.mutableUsers [
+    { groups = [ "google-sudoers" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+  ];
+
+  security.sudo-rs.extraRules = mkIf config.users.mutableUsers [
+    { groups = [ "google-sudoers" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+  ];
+
+  users.groups.google-sudoers = mkIf config.users.mutableUsers { };
+
+  boot.extraModprobeConfig = readFile "${pkgs.google-guest-configs}/etc/modprobe.d/gce-blacklist.conf";
+
+  environment.etc."sysctl.d/60-gce-network-security.conf".source = "${pkgs.google-guest-configs}/etc/sysctl.d/60-gce-network-security.conf";
+
+  environment.etc."default/instance_configs.cfg".text = ''
+    [Accounts]
+    useradd_cmd = useradd -m -s /run/current-system/sw/bin/bash -p * {user}
+
+    [Daemons]
+    accounts_daemon = ${boolToString config.users.mutableUsers}
+
+    [InstanceSetup]
+    # Make sure GCE image does not replace host key that NixOps sets.
+    set_host_keys = false
+
+    [MetadataScripts]
+    default_shell = ${pkgs.stdenv.shell}
+
+    [NetworkInterfaces]
+    dhclient_script = ${pkgs.google-guest-configs}/bin/google-dhclient-script
+    # We set up network interfaces declaratively.
+    setup = false
+  '';
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
new file mode 100644
index 000000000000..e4a18fd81d71
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/google-compute-image.nix
@@ -0,0 +1,84 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.googleComputeImage;
+  defaultConfigFile = pkgs.writeText "configuration.nix" ''
+    { ... }:
+    {
+      imports = [
+        <nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>
+      ];
+    }
+  '';
+in
+{
+
+  imports = [ ./google-compute-config.nix ];
+
+  options = {
+    virtualisation.googleComputeImage.diskSize = mkOption {
+      type = with types; either (enum [ "auto" ]) int;
+      default = "auto";
+      example = 1536;
+      description = lib.mdDoc ''
+        Size of disk image. Unit is MB.
+      '';
+    };
+
+    virtualisation.googleComputeImage.configFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = lib.mdDoc ''
+        A path to a configuration file which will be placed at `/etc/nixos/configuration.nix`
+        and be used when switching to a new configuration.
+        If set to `null`, a default configuration is used, where the only import is
+        `<nixpkgs/nixos/modules/virtualisation/google-compute-image.nix>`.
+      '';
+    };
+
+    virtualisation.googleComputeImage.compressionLevel = mkOption {
+      type = types.int;
+      default = 6;
+      description = lib.mdDoc ''
+        GZIP compression level of the resulting disk image (1-9).
+      '';
+    };
+    virtualisation.googleComputeImage.efi = mkEnableOption "EFI booting";
+  };
+
+  #### implementation
+  config = {
+    boot.initrd.availableKernelModules = [ "nvme" ];
+    boot.loader.grub = mkIf cfg.efi {
+      device = mkForce "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    fileSystems."/boot" = mkIf cfg.efi {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    system.build.googleComputeImage = import ../../lib/make-disk-image.nix {
+      name = "google-compute-image";
+      postVM = ''
+        PATH=$PATH:${with pkgs; lib.makeBinPath [ gnutar gzip ]}
+        pushd $out
+        mv $diskImage disk.raw
+        tar -Sc disk.raw | gzip -${toString cfg.compressionLevel} > \
+          nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.raw.tar.gz
+        rm $out/disk.raw
+        popd
+      '';
+      format = "raw";
+      configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
+      partitionTableType = if cfg.efi then "efi" else "legacy";
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/grow-partition.nix b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
new file mode 100644
index 000000000000..444c0bc1630e
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/grow-partition.nix
@@ -0,0 +1,3 @@
+# This profile is deprecated, use boot.growPartition directly.
+builtins.trace "the profile <nixos/modules/virtualisation/grow-partition.nix> is deprecated, use boot.growPartition instead"
+{ }
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
new file mode 100644
index 000000000000..cba4f92abe82
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-guest.nix
@@ -0,0 +1,64 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.hypervGuest;
+
+in {
+  options = {
+    virtualisation.hypervGuest = {
+      enable = mkEnableOption (lib.mdDoc "Hyper-V Guest Support");
+
+      videoMode = mkOption {
+        type = types.str;
+        default = "1152x864";
+        example = "1024x768";
+        description = lib.mdDoc ''
+          Resolution at which to initialize the video adapter.
+
+          Supports screen resolution up to Full HD 1920x1080 with 32 bit color
+          on Windows Server 2012, and 1600x1200 with 16 bit color on Windows
+          Server 2008 R2 or earlier.
+        '';
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    boot = {
+      initrd.kernelModules = [
+        "hv_balloon" "hv_netvsc" "hv_storvsc" "hv_utils" "hv_vmbus"
+      ];
+
+      initrd.availableKernelModules = [ "hyperv_keyboard" ];
+
+      kernelParams = [
+        "video=hyperv_fb:${cfg.videoMode}" "elevator=noop"
+      ];
+    };
+
+    environment.systemPackages = [ config.boot.kernelPackages.hyperv-daemons.bin ];
+
+    # enable hotadding cpu/memory
+    services.udev.packages = lib.singleton (pkgs.writeTextFile {
+      name = "hyperv-cpu-and-memory-hotadd-udev-rules";
+      destination = "/etc/udev/rules.d/99-hyperv-cpu-and-memory-hotadd.rules";
+      text = ''
+        # Memory hotadd
+        SUBSYSTEM=="memory", ACTION=="add", DEVPATH=="/devices/system/memory/memory[0-9]*", TEST=="state", ATTR{state}="online"
+
+        # CPU hotadd
+        SUBSYSTEM=="cpu", ACTION=="add", DEVPATH=="/devices/system/cpu/cpu[0-9]*", TEST=="online", ATTR{online}="1"
+      '';
+    });
+
+    systemd = {
+      packages = [ config.boot.kernelPackages.hyperv-daemons.lib ];
+
+      targets.hyperv-daemons = {
+        wantedBy = [ "multi-user.target" ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
new file mode 100644
index 000000000000..efaea0c110d2
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/hyperv-image.nix
@@ -0,0 +1,71 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  cfg = config.hyperv;
+
+in {
+  options = {
+    hyperv = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 2048;
+        description = lib.mdDoc ''
+          The size of the hyper-v base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-hyperv-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = lib.mdDoc ''
+          The name of the derivation for the hyper-v appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vhdx";
+        description = lib.mdDoc ''
+          The file name of the hyper-v appliance.
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.hypervImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o subformat=dynamic -O vhdx $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.hypervGuest.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/includes-to-excludes.py b/nixpkgs/nixos/modules/virtualisation/includes-to-excludes.py
new file mode 100644
index 000000000000..05ef9c0f23b9
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/includes-to-excludes.py
@@ -0,0 +1,86 @@
+
+# Convert a list of strings to a regex that matches everything but those strings
+# ... and it had to be a POSIX regex; no negative lookahead :(
+# This is a workaround for erofs supporting only exclude regex, not an include list
+
+import sys
+import re
+from collections import defaultdict
+
+# We can configure this script to match in different ways if we need to.
+# The regex got too long for the argument list, so we had to truncate the
+# hashes and use MATCH_STRING_PREFIX. That's less accurate, and might pick up some
+# garbage like .lock files, but only if the sandbox doesn't hide those. Even
+# then it should be harmless.
+
+# Produce the negation of ^a$
+MATCH_EXACTLY = ".+"
+# Produce the negation of ^a
+MATCH_STRING_PREFIX = "//X" # //X should be epsilon regex instead. Not supported??
+# Produce the negation of ^a/?
+MATCH_SUBPATHS = "[^/].*$"
+
+# match_end = MATCH_SUBPATHS
+match_end = MATCH_STRING_PREFIX
+# match_end = MATCH_EXACTLY
+
+def chars_to_inverted_class(letters):
+    assert len(letters) > 0
+    letters = list(letters)
+
+    s = "[^"
+
+    if "]" in letters:
+        s += "]"
+        letters.remove("]")
+
+    final = ""
+    if "-" in letters:
+        final = "-"
+        letters.remove("-")
+
+    s += "".join(letters)
+
+    s += final
+
+    s += "]"
+
+    return s
+
+# There's probably at least one bug in here, but it seems to works well enough
+# for filtering store paths.
+def strings_to_inverted_regex(strings):
+    s = "("
+
+    # Match anything that starts with the wrong character
+
+    chars = defaultdict(list)
+
+    for item in strings:
+        if item != "":
+            chars[item[0]].append(item[1:])
+
+    if len(chars) == 0:
+        s += match_end
+    else:
+        s += chars_to_inverted_class(chars)
+
+    # Now match anything that starts with the right char, but then goes wrong
+
+    for char, sub in chars.items():
+        s += "|(" + re.escape(char) + strings_to_inverted_regex(sub) + ")"
+
+    s += ")"
+    return s
+
+if __name__ == "__main__":
+    stdin_lines = []
+    for line in sys.stdin:
+        if line.strip() != "":
+            stdin_lines.append(line.strip())
+
+    print("^" + strings_to_inverted_regex(stdin_lines))
+
+# Test:
+# (echo foo; echo fo/; echo foo/; echo foo/ba/r; echo b; echo az; echo az/; echo az/a; echo ab; echo ab/a; echo ab/; echo abc; echo abcde; echo abb; echo ac; echo b) | grep -vE "$((echo ab; echo az; echo foo;) | python includes-to-excludes.py | tee /dev/stderr )"
+# should print ab, az, foo and their subpaths
diff --git a/nixpkgs/nixos/modules/virtualisation/incus.nix b/nixpkgs/nixos/modules/virtualisation/incus.nix
new file mode 100644
index 000000000000..3a4f0d7157a0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/incus.nix
@@ -0,0 +1,236 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.virtualisation.incus;
+  preseedFormat = pkgs.formats.yaml { };
+in
+{
+  meta.maintainers = [ lib.maintainers.adamcstephens ];
+
+  options = {
+    virtualisation.incus = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        incusd, a daemon that manages containers and virtual machines.
+
+        Users in the "incus-admin" group can interact with
+        the daemon (e.g. to start or stop containers) using the
+        {command}`incus` command line tool, among others.
+      '');
+
+      package = lib.mkPackageOptionMD pkgs "incus" { };
+
+      lxcPackage = lib.mkPackageOptionMD pkgs "lxc" { };
+
+      preseed = lib.mkOption {
+        type = lib.types.nullOr (
+          lib.types.submodule { freeformType = preseedFormat.type; }
+        );
+
+        default = null;
+
+        description = lib.mdDoc ''
+          Configuration for Incus preseed, see
+          <https://linuxcontainers.org/incus/docs/main/howto/initialize/#non-interactive-configuration>
+          for supported values.
+
+          Changes to this will be re-applied to Incus which will overwrite existing entities or create missing ones,
+          but entities will *not* be removed by preseed.
+        '';
+
+        example = {
+          networks = [
+            {
+              name = "incusbr0";
+              type = "bridge";
+              config = {
+                "ipv4.address" = "10.0.100.1/24";
+                "ipv4.nat" = "true";
+              };
+            }
+          ];
+          profiles = [
+            {
+              name = "default";
+              devices = {
+                eth0 = {
+                  name = "eth0";
+                  network = "incusbr0";
+                  type = "nic";
+                };
+                root = {
+                  path = "/";
+                  pool = "default";
+                  size = "35GiB";
+                  type = "disk";
+                };
+              };
+            }
+          ];
+          storage_pools = [
+            {
+              name = "default";
+              driver = "dir";
+              config = {
+                source = "/var/lib/incus/storage-pools/default";
+              };
+            }
+          ];
+        };
+      };
+
+      socketActivation = lib.mkEnableOption (
+        lib.mdDoc ''
+          socket-activation for starting incus.service. Enabling this option
+          will stop incus.service from starting automatically on boot.
+        ''
+      );
+
+      startTimeout = lib.mkOption {
+        type = lib.types.ints.unsigned;
+        default = 600;
+        apply = toString;
+        description = lib.mdDoc ''
+          Time to wait (in seconds) for incusd to become ready to process requests.
+          If incusd does not reply within the configured time, `incus.service` will be
+          considered failed and systemd will attempt to restart it.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    # https://github.com/lxc/incus/blob/f145309929f849b9951658ad2ba3b8f10cbe69d1/doc/reference/server_settings.md
+    boot.kernel.sysctl = {
+      "fs.aio-max-nr" = lib.mkDefault 524288;
+      "fs.inotify.max_queued_events" = lib.mkDefault 1048576;
+      "fs.inotify.max_user_instances" = lib.mkOverride 1050 1048576; # override in case conflict nixos/modules/services/x11/xserver.nix
+      "fs.inotify.max_user_watches" = lib.mkOverride 1050 1048576; # override in case conflict nixos/modules/services/x11/xserver.nix
+      "kernel.dmesg_restrict" = lib.mkDefault 1;
+      "kernel.keys.maxbytes" = lib.mkDefault 2000000;
+      "kernel.keys.maxkeys" = lib.mkDefault 2000;
+      "net.core.bpf_jit_limit" = lib.mkDefault 1000000000;
+      "net.ipv4.neigh.default.gc_thresh3" = lib.mkDefault 8192;
+      "net.ipv6.neigh.default.gc_thresh3" = lib.mkDefault 8192;
+      # vm.max_map_count is set higher in nixos/modules/config/sysctl.nix
+    };
+
+    boot.kernelModules = [
+      "veth"
+      "xt_comment"
+      "xt_CHECKSUM"
+      "xt_MASQUERADE"
+      "vhost_vsock"
+    ] ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+
+    environment.systemPackages = [ cfg.package ];
+
+    # Note: the following options are also declared in virtualisation.lxc, but
+    # the latter can't be simply enabled to reuse the formers, because it
+    # does a bunch of unrelated things.
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor = {
+      packages = [ cfg.lxcPackage ];
+      policies = {
+        "bin.lxc-start".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
+        '';
+        "lxc-containers".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
+        '';
+      };
+    };
+
+    systemd.services.incus = {
+      description = "Incus Container and Virtual Machine Management Daemon";
+
+      wantedBy = lib.mkIf (!cfg.socketActivation) [ "multi-user.target" ];
+      after = [
+        "network-online.target"
+        "lxcfs.service"
+      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+      requires = [
+        "lxcfs.service"
+      ] ++ (lib.optional cfg.socketActivation "incus.socket");
+      wants = [
+        "network-online.target"
+      ];
+
+      path = lib.mkIf config.boot.zfs.enabled [ config.boot.zfs.package ];
+
+      environment = {
+        # Override Path to the LXC template configuration directory
+        INCUS_LXC_TEMPLATE_CONFIG = "${pkgs.lxcfs}/share/lxc/config";
+      };
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/incusd --group incus-admin";
+        ExecStartPost = "${cfg.package}/bin/incusd waitready --timeout=${cfg.startTimeout}";
+        ExecStop = "${cfg.package}/bin/incus admin shutdown";
+
+        KillMode = "process"; # when stopping, leave the containers alone
+        Delegate = "yes";
+        LimitMEMLOCK = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "infinity";
+        TasksMax = "infinity";
+
+        Restart = "on-failure";
+        TimeoutStartSec = "${cfg.startTimeout}s";
+        TimeoutStopSec = "30s";
+      };
+    };
+
+    systemd.sockets.incus = lib.mkIf cfg.socketActivation {
+      description = "Incus UNIX socket";
+      wantedBy = [ "sockets.target" ];
+
+      socketConfig = {
+        ListenStream = "/var/lib/incus/unix.socket";
+        SocketMode = "0660";
+        SocketGroup = "incus-admin";
+        Service = "incus.service";
+      };
+    };
+
+    systemd.services.incus-preseed = lib.mkIf (cfg.preseed != null) {
+      description = "Incus initialization with preseed file";
+
+      wantedBy = ["incus.service"];
+      after = ["incus.service"];
+      bindsTo = ["incus.service"];
+      partOf = ["incus.service"];
+
+      script = ''
+        ${cfg.package}/bin/incus admin init --preseed <${
+          preseedFormat.generate "incus-preseed.yaml" cfg.preseed
+        }
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+
+    users.groups.incus-admin = { };
+
+    users.users.root = {
+      # match documented default ranges https://linuxcontainers.org/incus/docs/main/userns-idmap/#allowed-ranges
+      subUidRanges = [
+        {
+          startUid = 1000000;
+          count = 1000000000;
+        }
+      ];
+      subGidRanges = [
+        {
+          startGid = 1000000;
+          count = 1000000000;
+        }
+      ];
+    };
+
+    virtualisation.lxc.lxcfs.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/kubevirt.nix b/nixpkgs/nixos/modules/virtualisation/kubevirt.nix
new file mode 100644
index 000000000000..408822b6af0b
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/kubevirt.nix
@@ -0,0 +1,30 @@
+{ config, lib, pkgs, ... }:
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=ttyS0" ];
+    boot.loader.grub.device = "/dev/vda";
+    boot.loader.timeout = 0;
+
+    services.qemuGuest.enable = true;
+    services.openssh.enable = true;
+    services.cloud-init.enable = true;
+    systemd.services."serial-getty@ttyS0".enable = true;
+
+    system.build.kubevirtImage = import ../../lib/make-disk-image.nix {
+      inherit lib config pkgs;
+      format = "qcow2";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/kvmgt.nix b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
new file mode 100644
index 000000000000..1e02636f81f4
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/kvmgt.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.kvmgt;
+
+  kernelPackages = config.boot.kernelPackages;
+
+  vgpuOptions = {
+    uuid = mkOption {
+      type = with types; listOf str;
+      description = lib.mdDoc "UUID(s) of VGPU device. You can generate one with `libossp_uuid`.";
+    };
+  };
+
+in {
+  options = {
+    virtualisation.kvmgt = {
+      enable = mkEnableOption (lib.mdDoc ''
+        KVMGT (iGVT-g) VGPU support. Allows Qemu/KVM guests to share host's Intel integrated graphics card.
+        Currently only one graphical device can be shared. To allow users to access the device without root add them
+        to the kvm group: `users.extraUsers.<yourusername>.extraGroups = [ "kvm" ];`
+      '');
+      # multi GPU support is under the question
+      device = mkOption {
+        type = types.str;
+        default = "0000:00:02.0";
+        description = lib.mdDoc "PCI ID of graphics card. You can figure it with {command}`ls /sys/class/mdev_bus`.";
+      };
+      vgpus = mkOption {
+        default = {};
+        type = with types; attrsOf (submodule [ { options = vgpuOptions; } ]);
+        description = lib.mdDoc ''
+          Virtual GPUs to be used in Qemu. You can find devices via {command}`ls /sys/bus/pci/devices/*/mdev_supported_types`
+          and find info about device via {command}`cat /sys/bus/pci/devices/*/mdev_supported_types/i915-GVTg_V5_4/description`
+        '';
+        example = {
+          i915-GVTg_V5_8.uuid = [ "a297db4a-f4c2-11e6-90f6-d3b88d6c9525" ];
+        };
+      };
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast kernelPackages.kernel.version "4.16";
+      message = "KVMGT is not properly supported for kernels older than 4.16";
+    };
+
+    boot.kernelModules = [ "kvmgt" ];
+    boot.kernelParams = [ "i915.enable_gvt=1" ];
+
+    services.udev.extraRules = ''
+      SUBSYSTEM=="vfio", OWNER="root", GROUP="kvm"
+    '';
+
+    systemd = let
+      vgpus = listToAttrs (flatten (mapAttrsToList
+        (mdev: opt: map (id: nameValuePair "kvmgt-${id}" { inherit mdev; uuid = id; }) opt.uuid)
+        cfg.vgpus));
+    in {
+      paths = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid} path";
+          wantedBy = [ "multi-user.target" ];
+          pathConfig = {
+            PathExists = "/sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create";
+          };
+        }) vgpus;
+
+      services = mapAttrs (_: opt:
+        {
+          description = "KVMGT VGPU ${opt.uuid}";
+          serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.runtimeShell} -c 'echo ${opt.uuid} > /sys/bus/pci/devices/${cfg.device}/mdev_supported_types/${opt.mdev}/create'";
+            ExecStop = "${pkgs.runtimeShell} -c 'echo 1 > /sys/bus/pci/devices/${cfg.device}/${opt.uuid}/remove'";
+          };
+        }) vgpus;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ patryk27 ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/libvirtd.nix b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
new file mode 100644
index 000000000000..708c577ec1ed
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/libvirtd.nix
@@ -0,0 +1,502 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.libvirtd;
+  vswitch = config.virtualisation.vswitch;
+  configFile = pkgs.writeText "libvirtd.conf" ''
+    auth_unix_ro = "polkit"
+    auth_unix_rw = "polkit"
+    ${cfg.extraConfig}
+  '';
+  qemuConfigFile = pkgs.writeText "qemu.conf" ''
+    ${optionalString cfg.qemu.ovmf.enable ''
+      nvram = [ "/run/libvirt/nix-ovmf/AAVMF_CODE.fd:/run/libvirt/nix-ovmf/AAVMF_VARS.fd", "/run/libvirt/nix-ovmf/OVMF_CODE.fd:/run/libvirt/nix-ovmf/OVMF_VARS.fd" ]
+    ''}
+    ${optionalString (!cfg.qemu.runAsRoot) ''
+      user = "qemu-libvirtd"
+      group = "qemu-libvirtd"
+    ''}
+    ${cfg.qemu.verbatimConfig}
+  '';
+  dirName = "libvirt";
+  subDirs = list: [ dirName ] ++ map (e: "${dirName}/${e}") list;
+
+  ovmfModule = types.submodule {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Allows libvirtd to take advantage of OVMF when creating new
+          QEMU VMs with UEFI boot.
+        '';
+      };
+
+      # mkRemovedOptionModule does not work in submodules, do it manually
+      package = mkOption {
+        type = types.nullOr types.package;
+        default = null;
+        internal = true;
+      };
+
+      packages = mkOption {
+        type = types.listOf types.package;
+        default = [ pkgs.OVMF.fd ];
+        defaultText = literalExpression "[ pkgs.OVMF.fd ]";
+        example = literalExpression "[ pkgs.OVMFFull.fd pkgs.pkgsCross.aarch64-multiplatform.OVMF.fd ]";
+        description = lib.mdDoc ''
+          List of OVMF packages to use. Each listed package must contain files names FV/OVMF_CODE.fd and FV/OVMF_VARS.fd or FV/AAVMF_CODE.fd and FV/AAVMF_VARS.fd
+        '';
+      };
+    };
+  };
+
+  swtpmModule = types.submodule {
+    options = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Allows libvirtd to use swtpm to create an emulated TPM.
+        '';
+      };
+
+      package = mkOption {
+        type = types.package;
+        default = pkgs.swtpm;
+        defaultText = literalExpression "pkgs.swtpm";
+        description = lib.mdDoc ''
+          swtpm package to use.
+        '';
+      };
+    };
+  };
+
+  qemuModule = types.submodule {
+    options = {
+      package = mkOption {
+        type = types.package;
+        default = pkgs.qemu;
+        defaultText = literalExpression "pkgs.qemu";
+        description = lib.mdDoc ''
+          Qemu package to use with libvirt.
+          `pkgs.qemu` can emulate alien architectures (e.g. aarch64 on x86)
+          `pkgs.qemu_kvm` saves disk space allowing to emulate only host architectures.
+        '';
+      };
+
+      runAsRoot = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          If true,  libvirtd runs qemu as root.
+          If false, libvirtd runs qemu as unprivileged user qemu-libvirtd.
+          Changing this option to false may cause file permission issues
+          for existing guests. To fix these, manually change ownership
+          of affected files in /var/lib/libvirt/qemu to qemu-libvirtd.
+        '';
+      };
+
+      verbatimConfig = mkOption {
+        type = types.lines;
+        default = ''
+          namespaces = []
+        '';
+        description = lib.mdDoc ''
+          Contents written to the qemu configuration file, qemu.conf.
+          Make sure to include a proper namespace configuration when
+          supplying custom configuration.
+        '';
+      };
+
+      ovmf = mkOption {
+        type = ovmfModule;
+        default = { };
+        description = lib.mdDoc ''
+          QEMU's OVMF options.
+        '';
+      };
+
+      swtpm = mkOption {
+        type = swtpmModule;
+        default = { };
+        description = lib.mdDoc ''
+          QEMU's swtpm options.
+        '';
+      };
+    };
+  };
+
+  hooksModule = types.submodule {
+    options = {
+      daemon = mkOption {
+        type = types.attrsOf types.path;
+        default = { };
+        description = lib.mdDoc ''
+          Hooks that will be placed under /var/lib/libvirt/hooks/daemon.d/
+          and called for daemon start/shutdown/SIGHUP events.
+          Please see https://libvirt.org/hooks.html for documentation.
+        '';
+      };
+
+      qemu = mkOption {
+        type = types.attrsOf types.path;
+        default = { };
+        description = lib.mdDoc ''
+          Hooks that will be placed under /var/lib/libvirt/hooks/qemu.d/
+          and called for qemu domains begin/end/migrate events.
+          Please see https://libvirt.org/hooks.html for documentation.
+        '';
+      };
+
+      lxc = mkOption {
+        type = types.attrsOf types.path;
+        default = { };
+        description = lib.mdDoc ''
+          Hooks that will be placed under /var/lib/libvirt/hooks/lxc.d/
+          and called for lxc domains begin/end events.
+          Please see https://libvirt.org/hooks.html for documentation.
+        '';
+      };
+
+      libxl = mkOption {
+        type = types.attrsOf types.path;
+        default = { };
+        description = lib.mdDoc ''
+          Hooks that will be placed under /var/lib/libvirt/hooks/libxl.d/
+          and called for libxl-handled xen domains begin/end events.
+          Please see https://libvirt.org/hooks.html for documentation.
+        '';
+      };
+
+      network = mkOption {
+        type = types.attrsOf types.path;
+        default = { };
+        description = lib.mdDoc ''
+          Hooks that will be placed under /var/lib/libvirt/hooks/lxc.d/
+          and called for networks begin/end events.
+          Please see https://libvirt.org/hooks.html for documentation.
+        '';
+      };
+    };
+  };
+in
+{
+
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "libvirtd" "enableKVM" ]
+      "Set the option `virtualisation.libvirtd.qemu.package' instead.")
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuPackage" ]
+      [ "virtualisation" "libvirtd" "qemu" "package" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuRunAsRoot" ]
+      [ "virtualisation" "libvirtd" "qemu" "runAsRoot" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuVerbatimConfig" ]
+      [ "virtualisation" "libvirtd" "qemu" "verbatimConfig" ])
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuOvmf" ]
+      [ "virtualisation" "libvirtd" "qemu" "ovmf" "enable" ])
+    (mkRemovedOptionModule
+      [ "virtualisation" "libvirtd" "qemuOvmfPackage" ]
+      "If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead.")
+    (mkRenamedOptionModule
+      [ "virtualisation" "libvirtd" "qemuSwtpm" ]
+      [ "virtualisation" "libvirtd" "qemu" "swtpm" "enable" ])
+  ];
+
+  ###### interface
+
+  options.virtualisation.libvirtd = {
+
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        This option enables libvirtd, a daemon that manages
+        virtual machines. Users in the "libvirtd" group can interact with
+        the daemon (e.g. to start or stop VMs) using the
+        {command}`virsh` command line tool, among others.
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.libvirt;
+      defaultText = literalExpression "pkgs.libvirt";
+      description = lib.mdDoc ''
+        libvirt package to use.
+      '';
+    };
+
+    extraConfig = mkOption {
+      type = types.lines;
+      default = "";
+      description = lib.mdDoc ''
+        Extra contents appended to the libvirtd configuration file,
+        libvirtd.conf.
+      '';
+    };
+
+    extraOptions = mkOption {
+      type = types.listOf types.str;
+      default = [ ];
+      example = [ "--verbose" ];
+      description = lib.mdDoc ''
+        Extra command line arguments passed to libvirtd on startup.
+      '';
+    };
+
+    onBoot = mkOption {
+      type = types.enum [ "start" "ignore" ];
+      default = "start";
+      description = lib.mdDoc ''
+        Specifies the action to be done to / on the guests when the host boots.
+        The "start" option starts all guests that were running prior to shutdown
+        regardless of their autostart settings. The "ignore" option will not
+        start the formerly running guest on boot. However, any guest marked as
+        autostart will still be automatically started by libvirtd.
+      '';
+    };
+
+    onShutdown = mkOption {
+      type = types.enum [ "shutdown" "suspend" ];
+      default = "suspend";
+      description = lib.mdDoc ''
+        When shutting down / restarting the host what method should
+        be used to gracefully halt the guests. Setting to "shutdown"
+        will cause an ACPI shutdown of each guest. "suspend" will
+        attempt to save the state of the guests ready to restore on boot.
+      '';
+    };
+
+    parallelShutdown = mkOption {
+      type = types.ints.unsigned;
+      default = 0;
+      description = lib.mdDoc ''
+        Number of guests that will be shutdown concurrently, taking effect when onShutdown
+        is set to "shutdown". If set to 0, guests will be shutdown one after another.
+        Number of guests on shutdown at any time will not exceed number set in this
+        variable.
+      '';
+    };
+
+    allowedBridges = mkOption {
+      type = types.listOf types.str;
+      default = [ "virbr0" ];
+      description = lib.mdDoc ''
+        List of bridge devices that can be used by qemu:///session
+      '';
+    };
+
+    qemu = mkOption {
+      type = qemuModule;
+      default = { };
+      description = lib.mdDoc ''
+        QEMU related options.
+      '';
+    };
+
+    hooks = mkOption {
+      type = hooksModule;
+      default = { };
+      description = lib.mdDoc ''
+        Hooks related options.
+      '';
+    };
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+
+    assertions = [
+      {
+        assertion = config.virtualisation.libvirtd.qemu.ovmf.package == null;
+        message = ''
+        The option virtualisation.libvirtd.qemu.ovmf.package is superseded by virtualisation.libvirtd.qemu.ovmf.packages.
+        If this option was set to `foo`, set the option `virtualisation.libvirtd.qemu.ovmf.packages' to `[foo.fd]` instead.
+        '';
+      }
+      {
+        assertion = config.security.polkit.enable;
+        message = "The libvirtd module currently requires Polkit to be enabled ('security.polkit.enable = true').";
+      }
+    ];
+
+    environment = {
+      # this file is expected in /etc/qemu and not sysconfdir (/var/lib)
+      etc."qemu/bridge.conf".text = lib.concatMapStringsSep "\n"
+        (e:
+          "allow ${e}")
+        cfg.allowedBridges;
+      systemPackages = with pkgs; [ libressl.nc iptables cfg.package cfg.qemu.package ];
+      etc.ethertypes.source = "${pkgs.iptables}/etc/ethertypes";
+    };
+
+    boot.kernelModules = [ "tun" ];
+
+    users.groups.libvirtd.gid = config.ids.gids.libvirtd;
+
+    # libvirtd runs qemu as this user and group by default
+    users.extraGroups.qemu-libvirtd.gid = config.ids.gids.qemu-libvirtd;
+    users.extraUsers.qemu-libvirtd = {
+      uid = config.ids.uids.qemu-libvirtd;
+      isNormalUser = false;
+      group = "qemu-libvirtd";
+    };
+
+    security.wrappers.qemu-bridge-helper = {
+      setuid = true;
+      owner = "root";
+      group = "root";
+      source = "${cfg.qemu.package}/libexec/qemu-bridge-helper";
+    };
+
+    systemd.packages = [ cfg.package ];
+
+    systemd.services.libvirtd-config = {
+      description = "Libvirt Virtual Machine Management Daemon - configuration";
+      script = ''
+        # Copy default libvirt network config .xml files to /var/lib
+        # Files modified by the user will not be overwritten
+        for i in $(cd ${cfg.package}/var/lib && echo \
+            libvirt/qemu/networks/*.xml \
+            libvirt/nwfilter/*.xml );
+        do
+            mkdir -p /var/lib/$(dirname $i) -m 755
+            if [ ! -e /var/lib/$i ]; then
+              cp -pd ${cfg.package}/var/lib/$i /var/lib/$i
+            fi
+        done
+
+        # Copy generated qemu config to libvirt directory
+        cp -f ${qemuConfigFile} /var/lib/${dirName}/qemu.conf
+
+        # stable (not GC'able as in /nix/store) paths for using in <emulator> section of xml configs
+        for emulator in ${cfg.package}/libexec/libvirt_lxc ${cfg.qemu.package}/bin/qemu-kvm ${cfg.qemu.package}/bin/qemu-system-*; do
+          ln -s --force "$emulator" /run/${dirName}/nix-emulators/
+        done
+
+        for helper in bin/qemu-pr-helper; do
+          ln -s --force ${cfg.qemu.package}/$helper /run/${dirName}/nix-helpers/
+        done
+
+        ${optionalString cfg.qemu.ovmf.enable (let
+          ovmfpackage = pkgs.buildEnv {
+            name = "qemu-ovmf";
+            paths = cfg.qemu.ovmf.packages;
+          };
+        in
+          ''
+          ln -s --force ${ovmfpackage}/FV/AAVMF_CODE.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${ovmfpackage}/FV/OVMF_CODE.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${ovmfpackage}/FV/AAVMF_VARS.fd /run/${dirName}/nix-ovmf/
+          ln -s --force ${ovmfpackage}/FV/OVMF_VARS.fd /run/${dirName}/nix-ovmf/
+        '')}
+
+        # Symlink hooks to /var/lib/libvirt
+        ${concatStringsSep "\n" (map (driver:
+          ''
+          mkdir -p /var/lib/${dirName}/hooks/${driver}.d
+          rm -rf /var/lib/${dirName}/hooks/${driver}.d/*
+          ${concatStringsSep "\n" (mapAttrsToList (name: value:
+            "ln -s --force ${value} /var/lib/${dirName}/hooks/${driver}.d/${name}") cfg.hooks.${driver})}
+        '') (attrNames cfg.hooks))}
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+        RuntimeDirectoryPreserve = "yes";
+        LogsDirectory = subDirs [ "qemu" ];
+        RuntimeDirectory = subDirs [ "nix-emulators" "nix-helpers" "nix-ovmf" ];
+        StateDirectory = subDirs [ "dnsmasq" ];
+      };
+    };
+
+    systemd.services.libvirtd = {
+      wantedBy = [ "multi-user.target" ];
+      requires = [ "libvirtd-config.service" ];
+      after = [ "libvirtd-config.service" ]
+        ++ optional vswitch.enable "ovs-vswitchd.service";
+
+      environment.LIBVIRTD_ARGS = escapeShellArgs (
+        [
+          "--config"
+          configFile
+          "--timeout"
+          "120" # from ${libvirt}/var/lib/sysconfig/libvirtd
+        ] ++ cfg.extraOptions
+      );
+
+      path = [ cfg.qemu.package ] # libvirtd requires qemu-img to manage disk images
+        ++ optional vswitch.enable vswitch.package
+        ++ optional cfg.qemu.swtpm.enable cfg.qemu.swtpm.package;
+
+      serviceConfig = {
+        Type = "notify";
+        KillMode = "process"; # when stopping, leave the VMs alone
+        Restart = "no";
+      };
+      restartIfChanged = false;
+    };
+
+    systemd.services.virtchd = {
+      path = [ pkgs.cloud-hypervisor ];
+    };
+
+    systemd.services.libvirt-guests = {
+      wantedBy = [ "multi-user.target" ];
+      path = with pkgs; [ coreutils gawk cfg.package ];
+      restartIfChanged = false;
+
+      environment.ON_BOOT = "${cfg.onBoot}";
+      environment.ON_SHUTDOWN = "${cfg.onShutdown}";
+      environment.PARALLEL_SHUTDOWN = "${toString cfg.parallelShutdown}";
+    };
+
+    systemd.sockets.virtlogd = {
+      description = "Virtual machine log manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlogd-sock" ];
+    };
+
+    systemd.services.virtlogd = {
+      description = "Virtual machine log manager";
+      serviceConfig.ExecStart = "@${cfg.package}/sbin/virtlogd virtlogd";
+      restartIfChanged = false;
+    };
+
+    systemd.sockets.virtlockd = {
+      description = "Virtual machine lock manager socket";
+      wantedBy = [ "sockets.target" ];
+      listenStreams = [ "/run/${dirName}/virtlockd-sock" ];
+    };
+
+    systemd.services.virtlockd = {
+      description = "Virtual machine lock manager";
+      serviceConfig.ExecStart = "@${cfg.package}/sbin/virtlockd virtlockd";
+      restartIfChanged = false;
+    };
+
+    # https://libvirt.org/daemons.html#monolithic-systemd-integration
+    systemd.sockets.libvirtd.wantedBy = [ "sockets.target" ];
+
+    security.polkit = {
+      enable = true;
+      extraConfig = ''
+        polkit.addRule(function(action, subject) {
+          if (action.id == "org.libvirt.unix.manage" &&
+            subject.isInGroup("libvirtd")) {
+            return polkit.Result.YES;
+          }
+        });
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/linode-config.nix b/nixpkgs/nixos/modules/virtualisation/linode-config.nix
new file mode 100644
index 000000000000..bbf81bda9c02
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/linode-config.nix
@@ -0,0 +1,75 @@
+{ config, lib, pkgs, ... }:
+with lib;
+{
+  imports = [ ../profiles/qemu-guest.nix ];
+
+  services.openssh = {
+    enable = true;
+
+    settings.PermitRootLogin = "prohibit-password";
+    settings.PasswordAuthentication = mkDefault false;
+  };
+
+  networking = {
+    usePredictableInterfaceNames = false;
+    useDHCP = false;
+    interfaces.eth0 = {
+      useDHCP = true;
+
+      # Linode expects IPv6 privacy extensions to be disabled, so disable them
+      # See: https://www.linode.com/docs/guides/manual-network-configuration/#static-vs-dynamic-addressing
+      tempAddress = "disabled";
+    };
+  };
+
+  # Install diagnostic tools for Linode support
+  environment.systemPackages = with pkgs; [
+    inetutils
+    mtr
+    sysstat
+  ];
+
+  fileSystems."/" = {
+    fsType = "ext4";
+    device = "/dev/sda";
+    autoResize = true;
+  };
+
+  swapDevices = mkDefault [{ device = "/dev/sdb"; }];
+
+  # Enable LISH and Linode Booting w/ GRUB
+  boot = {
+    # Add Required Kernel Modules
+    # NOTE: These are not documented in the install guide
+    initrd.availableKernelModules = [
+      "virtio_pci"
+      "virtio_scsi"
+      "ahci"
+      "sd_mod"
+    ];
+
+    # Set Up LISH Serial Connection
+    kernelParams = [ "console=ttyS0,19200n8" ];
+    kernelModules = [ "virtio_net" ];
+
+    loader = {
+      # Increase Timeout to Allow LISH Connection
+      # NOTE: The image generator tries to set a timeout of 0, so we must force
+      timeout = lib.mkForce 10;
+
+      grub = {
+        enable = true;
+        version = 2;
+        forceInstall = true;
+        device = "nodev";
+
+        # Allow serial connection for GRUB to be able to use LISH
+        extraConfig = ''
+          serial --speed=19200 --unit=0 --word=8 --parity=no --stop=1;
+          terminal_input serial;
+          terminal_output serial
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/linode-image.nix b/nixpkgs/nixos/modules/virtualisation/linode-image.nix
new file mode 100644
index 000000000000..51f793ac011d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/linode-image.nix
@@ -0,0 +1,66 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.linodeImage;
+  defaultConfigFile = pkgs.writeText "configuration.nix" ''
+    _: {
+      imports = [
+        <nixpkgs/nixos/modules/virtualisation/linode-image.nix>
+      ];
+    }
+  '';
+in
+{
+  imports = [ ./linode-config.nix ];
+
+  options = {
+    virtualisation.linodeImage.diskSize = mkOption {
+      type = with types; either (enum (singleton "auto")) ints.positive;
+      default = "auto";
+      example = 1536;
+      description = ''
+        Size of disk image in MB.
+      '';
+    };
+
+    virtualisation.linodeImage.configFile = mkOption {
+      type = with types; nullOr str;
+      default = null;
+      description = ''
+        A path to a configuration file which will be placed at `/etc/nixos/configuration.nix`
+        and be used when switching to a new configuration.
+        If set to `null`, a default configuration is used, where the only import is
+        `<nixpkgs/nixos/modules/virtualisation/linode-image.nix>`
+      '';
+    };
+
+    virtualisation.linodeImage.compressionLevel = mkOption {
+      type = types.ints.between 1 9;
+      default = 6;
+      description = ''
+        GZIP compression level of the resulting disk image (1-9).
+      '';
+    };
+  };
+
+  config = {
+    system.build.linodeImage = import ../../lib/make-disk-image.nix {
+      name = "linode-image";
+      # NOTE: Linode specifically requires images to be `gzip`-ed prior to upload
+      # See: https://www.linode.com/docs/products/tools/images/guides/upload-an-image/#requirements-and-considerations
+      postVM = ''
+        ${pkgs.gzip}/bin/gzip -${toString cfg.compressionLevel} -c -- $diskImage > \
+        $out/nixos-image-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.img.gz
+        rm $diskImage
+      '';
+      format = "raw";
+      partitionTableType = "none";
+      configFile = if cfg.configFile == null then defaultConfigFile else cfg.configFile;
+      inherit (cfg) diskSize;
+      inherit config lib pkgs;
+    };
+  };
+
+  meta.maintainers = with maintainers; [ cyntheticfox ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-container.nix b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
new file mode 100644
index 000000000000..61d7c4cb73fe
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-container.nix
@@ -0,0 +1,121 @@
+{ lib, config, pkgs, ... }:
+
+let
+  cfg = config.virtualisation.lxc;
+in {
+  imports = [
+    ./lxc-instance-common.nix
+  ];
+
+  options = {
+    virtualisation.lxc = {
+      nestedContainer = lib.mkEnableOption (lib.mdDoc ''
+        Whether this container is configured as a nested container. On LXD containers this is recommended
+        for all containers and is enabled with `security.nesting = true`.
+      '');
+
+      privilegedContainer = lib.mkEnableOption (lib.mdDoc ''
+        Whether this LXC container will be running as a privileged container or not. If set to `true` then
+        additional configuration will be applied to the `systemd` instance running within the container as
+        recommended by [distrobuilder](https://linuxcontainers.org/distrobuilder/introduction/).
+      '');
+    };
+  };
+
+  config = {
+    boot.isContainer = true;
+    boot.postBootCommands =
+      ''
+        # After booting, register the contents of the Nix store in the Nix
+        # database.
+        if [ -f /nix-path-registration ]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < /nix-path-registration &&
+          rm /nix-path-registration
+        fi
+
+        # nixos-rebuild also requires a "system" profile
+        ${config.nix.package.out}/bin/nix-env -p /nix/var/nix/profiles/system --set /run/current-system
+      '';
+
+    system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      extraArgs = "--owner=0";
+
+      storeContents = [
+        {
+          object = config.system.build.toplevel;
+          symlink = "none";
+        }
+      ];
+
+      contents = [
+        {
+          source = config.system.build.toplevel + "/init";
+          target = "/sbin/init";
+        }
+        # Technically this is not required for lxc, but having also make this configuration work with systemd-nspawn.
+        # Nixos will setup the same symlink after start.
+        {
+          source = config.system.build.toplevel + "/etc/os-release";
+          target = "/etc/os-release";
+        }
+      ];
+
+      extraCommands = "mkdir -p proc sys dev";
+    };
+
+    system.build.squashfs = pkgs.callPackage ../../lib/make-squashfs.nix {
+      fileName = "nixos-lxc-image-${pkgs.stdenv.hostPlatform.system}";
+
+      noStrip = true; # keep directory structure
+      comp = "zstd -Xcompression-level 6";
+
+      storeContents = [config.system.build.toplevel];
+
+      pseudoFiles = [
+        "/sbin d 0755 0 0"
+        "/sbin/init s 0555 0 0 ${config.system.build.toplevel}/init"
+        "/dev d 0755 0 0"
+        "/proc d 0555 0 0"
+        "/sys d 0555 0 0"
+      ];
+    };
+
+    system.build.installBootLoader = pkgs.writeScript "install-lxd-sbin-init.sh" ''
+      #!${pkgs.runtimeShell}
+      ${pkgs.coreutils}/bin/ln -fs "$1/init" /sbin/init
+    '';
+
+    systemd.additionalUpstreamSystemUnits = lib.mkIf cfg.nestedContainer ["systemd-udev-trigger.service"];
+
+    # Add the overrides from lxd distrobuilder
+    # https://github.com/lxc/distrobuilder/blob/05978d0d5a72718154f1525c7d043e090ba7c3e0/distrobuilder/main.go#L630
+    systemd.packages = [
+      (pkgs.writeTextFile {
+        name = "systemd-lxc-service-overrides";
+        destination = "/etc/systemd/system/service.d/zzz-lxc-service.conf";
+        text = ''
+          [Service]
+          ProcSubset=all
+          ProtectProc=default
+          ProtectControlGroups=no
+          ProtectKernelTunables=no
+          NoNewPrivileges=no
+          LoadCredential=
+        '' + lib.optionalString cfg.privilegedContainer ''
+          # Additional settings for privileged containers
+          ProtectHome=no
+          ProtectSystem=no
+          PrivateDevices=no
+          PrivateTmp=no
+          ProtectKernelLogs=no
+          ProtectKernelModules=no
+          ReadWritePaths=
+        '';
+      })
+    ];
+
+    system.activationScripts.installInitScript = lib.mkForce ''
+      ln -fs $systemConfig/init /sbin/init
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-image-metadata.nix b/nixpkgs/nixos/modules/virtualisation/lxc-image-metadata.nix
new file mode 100644
index 000000000000..2c0568b4c468
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-image-metadata.nix
@@ -0,0 +1,104 @@
+{ lib, config, pkgs, ... }:
+
+let
+  templateSubmodule = {...}: {
+    options = {
+      enable = lib.mkEnableOption "this template";
+
+      target = lib.mkOption {
+        description = "Path in the container";
+        type = lib.types.path;
+      };
+      template = lib.mkOption {
+        description = ".tpl file for rendering the target";
+        type = lib.types.path;
+      };
+      when = lib.mkOption {
+        description = "Events which trigger a rewrite (create, copy)";
+        type = lib.types.listOf (lib.types.str);
+      };
+      properties = lib.mkOption {
+        description = "Additional properties";
+        type = lib.types.attrs;
+        default = {};
+      };
+    };
+  };
+
+  toYAML = name: data: pkgs.writeText name (lib.generators.toYAML {} data);
+
+  cfg = config.virtualisation.lxc;
+  templates = if cfg.templates != {} then let
+    list = lib.mapAttrsToList (name: value: { inherit name; } // value)
+      (lib.filterAttrs (name: value: value.enable) cfg.templates);
+  in
+    {
+      files = map (tpl: {
+        source = tpl.template;
+        target = "/templates/${tpl.name}.tpl";
+      }) list;
+      properties = lib.listToAttrs (map (tpl: lib.nameValuePair tpl.target {
+        when = tpl.when;
+        template = "${tpl.name}.tpl";
+        properties = tpl.properties;
+      }) list);
+    }
+  else { files = []; properties = {}; };
+
+in {
+  options = {
+    virtualisation.lxc = {
+      templates = lib.mkOption {
+        description = "Templates for LXD";
+        type = lib.types.attrsOf (lib.types.submodule templateSubmodule);
+        default = {};
+        example = lib.literalExpression ''
+          {
+            # create /etc/hostname on container creation
+            "hostname" = {
+              enable = true;
+              target = "/etc/hostname";
+              template = builtins.writeFile "hostname.tpl" "{{ container.name }}";
+              when = [ "create" ];
+            };
+            # create /etc/nixos/hostname.nix with a configuration for keeping the hostname applied
+            "hostname-nix" = {
+              enable = true;
+              target = "/etc/nixos/hostname.nix";
+              template = builtins.writeFile "hostname-nix.tpl" "{ ... }: { networking.hostName = "{{ container.name }}"; }";
+              # copy keeps the file updated when the container is changed
+              when = [ "create" "copy" ];
+            };
+            # copy allow the user to specify a custom configuration.nix
+            "configuration-nix" = {
+              enable = true;
+              target = "/etc/nixos/configuration.nix";
+              template = builtins.writeFile "configuration-nix" "{{ config_get(\"user.user-data\", properties.default) }}";
+              when = [ "create" ];
+            };
+          };
+        '';
+      };
+    };
+  };
+
+  config = {
+    system.build.metadata = pkgs.callPackage ../../lib/make-system-tarball.nix {
+      contents = [
+        {
+          source = toYAML "metadata.yaml" {
+            architecture = builtins.elemAt (builtins.match "^([a-z0-9_]+).+" (toString pkgs.system)) 0;
+            creation_date = 1;
+            properties = {
+              description = "${config.system.nixos.distroName} ${config.system.nixos.codeName} ${config.system.nixos.label} ${pkgs.system}";
+              os = "${config.system.nixos.distroId}";
+              release = "${config.system.nixos.codeName}";
+            };
+            templates = templates.properties;
+          };
+          target = "/metadata.yaml";
+        }
+      ] ++ templates.files;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc-instance-common.nix b/nixpkgs/nixos/modules/virtualisation/lxc-instance-common.nix
new file mode 100644
index 000000000000..d6a0e05fb1c9
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc-instance-common.nix
@@ -0,0 +1,30 @@
+{lib, ...}:
+
+{
+  imports = [
+    ./lxc-image-metadata.nix
+
+    ../installer/cd-dvd/channel.nix
+    ../profiles/clone-config.nix
+    ../profiles/minimal.nix
+  ];
+
+  # Allow the user to login as root without password.
+  users.users.root.initialHashedPassword = lib.mkOverride 150 "";
+
+  # Some more help text.
+  services.getty.helpLine = ''
+
+    Log in as "root" with an empty password.
+  '';
+
+  # Containers should be light-weight, so start sshd on demand.
+  services.openssh.enable = lib.mkDefault true;
+  services.openssh.startWhenNeeded = lib.mkDefault true;
+
+  # As this is intended as a standalone image, undo some of the minimal profile stuff
+  environment.noXlibs = false;
+  documentation.enable = true;
+  documentation.nixos.enable = true;
+  services.logrotate.enable = true;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxc.nix b/nixpkgs/nixos/modules/virtualisation/lxc.nix
new file mode 100644
index 000000000000..5bd64a5f9a56
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxc.nix
@@ -0,0 +1,82 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.lxc;
+
+in
+
+{
+  ###### interface
+
+  options.virtualisation.lxc = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            This enables Linux Containers (LXC), which provides tools
+            for creating and managing system or application containers
+            on Linux.
+          '';
+      };
+
+    systemConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          lib.mdDoc ''
+            This is the system-wide LXC config. See
+            {manpage}`lxc.system.conf(5)`.
+          '';
+      };
+
+    defaultConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          lib.mdDoc ''
+            Default config (default.conf) for new containers, i.e. for
+            network config. See {manpage}`lxc.container.conf(5)`.
+          '';
+      };
+
+    usernetConfig =
+      mkOption {
+        type = types.lines;
+        default = "";
+        description =
+          lib.mdDoc ''
+            This is the config file for managing unprivileged user network
+            administration access in LXC. See {manpage}`lxc-usernet(5)`.
+          '';
+      };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    environment.systemPackages = [ pkgs.lxc ];
+    environment.etc."lxc/lxc.conf".text = cfg.systemConfig;
+    environment.etc."lxc/lxc-usernet".text = cfg.usernetConfig;
+    environment.etc."lxc/default.conf".text = cfg.defaultConfig;
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor.packages = [ pkgs.lxc ];
+    security.apparmor.policies = {
+      "bin.lxc-start".profile = ''
+        include ${pkgs.lxc}/etc/apparmor.d/usr.bin.lxc-start
+      '';
+      "lxc-containers".profile = ''
+        include ${pkgs.lxc}/etc/apparmor.d/lxc-containers
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxcfs.nix b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
new file mode 100644
index 000000000000..fb0ba49f7304
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxcfs.nix
@@ -0,0 +1,45 @@
+# LXC Configuration
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.lxc.lxcfs;
+in {
+  meta.maintainers = [ maintainers.mic92 ];
+
+  ###### interface
+  options.virtualisation.lxc.lxcfs = {
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This enables LXCFS, a FUSE filesystem for LXC.
+          To use lxcfs in include the following configuration in your
+          container configuration:
+          ```
+          virtualisation.lxc.defaultConfig = "lxc.include = ''${pkgs.lxcfs}/share/lxc/config/common.conf.d/00-lxcfs.conf";
+          ```
+        '';
+      };
+  };
+
+  ###### implementation
+  config = mkIf cfg.enable {
+    systemd.services.lxcfs = {
+      description = "FUSE filesystem for LXC";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "lxc.service" ];
+      restartIfChanged = false;
+      serviceConfig = {
+        ExecStartPre="${pkgs.coreutils}/bin/mkdir -p /var/lib/lxcfs";
+        ExecStart="${pkgs.lxcfs}/bin/lxcfs /var/lib/lxcfs";
+        ExecStopPost="-${pkgs.fuse}/bin/fusermount -u /var/lib/lxcfs";
+        KillMode="process";
+        Restart="on-failure";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd-agent.nix b/nixpkgs/nixos/modules/virtualisation/lxd-agent.nix
new file mode 100644
index 000000000000..5bcc86e3bcbe
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxd-agent.nix
@@ -0,0 +1,91 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.virtualisation.lxd.agent;
+
+  # the lxd agent is provided by the lxd daemon through a virtiofs or 9p mount
+  # this is a port of the distrobuilder lxd-agent generator
+  # https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L18-L55
+  preStartScript = ''
+    PREFIX="/run/lxd_agent"
+
+    mount_virtiofs() {
+        mount -t virtiofs config "$PREFIX/.mnt" >/dev/null 2>&1
+    }
+
+    mount_9p() {
+        modprobe 9pnet_virtio >/dev/null 2>&1 || true
+        mount -t 9p config "$PREFIX/.mnt" -o access=0,trans=virtio,size=1048576 >/dev/null 2>&1
+    }
+
+    fail() {
+        umount -l "$PREFIX" >/dev/null 2>&1 || true
+        rmdir "$PREFIX" >/dev/null 2>&1 || true
+        echo "$1"
+        exit 1
+    }
+
+    # Setup the mount target.
+    umount -l "$PREFIX" >/dev/null 2>&1 || true
+    mkdir -p "$PREFIX"
+    mount -t tmpfs tmpfs "$PREFIX" -o mode=0700,size=50M
+    mkdir -p "$PREFIX/.mnt"
+
+    # Try virtiofs first.
+    mount_virtiofs || mount_9p || fail "Couldn't mount virtiofs or 9p, failing."
+
+    # Copy the data.
+    cp -Ra "$PREFIX/.mnt/"* "$PREFIX"
+
+    # Unmount the temporary mount.
+    umount "$PREFIX/.mnt"
+    rmdir "$PREFIX/.mnt"
+
+    # Fix up permissions.
+    chown -R root:root "$PREFIX"
+  '';
+in {
+  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+  options = {
+    virtualisation.lxd.agent.enable = lib.mkEnableOption (lib.mdDoc "Enable LXD agent");
+  };
+
+  config = lib.mkIf cfg.enable {
+    # https://github.com/lxc/distrobuilder/blob/f77300bf7d7d5707b08eaf8a434d647d1ba81b5d/generators/lxd-agent.go#L108-L125
+    systemd.services.lxd-agent = {
+      enable = true;
+      wantedBy = [ "multi-user.target" ];
+      path = [ pkgs.kmod pkgs.util-linux ];
+
+      preStart = preStartScript;
+
+      # avoid killing nixos-rebuild switch when executed through lxc exec
+      stopIfChanged = false;
+
+      unitConfig = {
+        Description = "LXD - agent";
+        Documentation = "https://documentation.ubuntu.com/lxd/en/latest";
+        ConditionPathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
+        Before = lib.optionals config.services.cloud-init.enable [ "cloud-init.target" "cloud-init.service" "cloud-init-local.service" ];
+        DefaultDependencies = "no";
+        StartLimitInterval = "60";
+        StartLimitBurst = "10";
+      };
+
+      serviceConfig = {
+        Type = "notify";
+        WorkingDirectory = "-/run/lxd_agent";
+        ExecStart = "/run/lxd_agent/lxd-agent";
+        Restart = "on-failure";
+        RestartSec = "5s";
+      };
+    };
+
+    systemd.paths.lxd-agent = {
+      enable = true;
+      wantedBy = [ "multi-user.target" ];
+      pathConfig.PathExists = "/dev/virtio-ports/org.linuxcontainers.lxd";
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix b/nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix
new file mode 100644
index 000000000000..ba729465ec2f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxd-virtual-machine.nix
@@ -0,0 +1,46 @@
+{ config, lib, pkgs, ... }:
+
+let
+  serialDevice =
+    if pkgs.stdenv.hostPlatform.isx86
+    then "ttyS0"
+    else "ttyAMA0"; # aarch64
+in {
+  imports = [
+    ./lxc-instance-common.nix
+
+    ../profiles/qemu-guest.nix
+  ];
+
+  config = {
+    system.build.qemuImage = import ../../lib/make-disk-image.nix {
+      inherit pkgs lib config;
+
+      partitionTableType = "efi";
+      format = "qcow2-compressed";
+      copyChannel = true;
+    };
+
+    fileSystems = {
+      "/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+      "/boot" = {
+        device = "/dev/disk/by-label/ESP";
+        fsType = "vfat";
+      };
+    };
+
+    boot.growPartition = true;
+    boot.loader.systemd-boot.enable = true;
+
+    # image building needs to know what device to install bootloader on
+    boot.loader.grub.device = "/dev/vda";
+
+    boot.kernelParams = ["console=tty1" "console=${serialDevice}"];
+
+    virtualisation.lxd.agent.enable = lib.mkDefault true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/lxd.nix b/nixpkgs/nixos/modules/virtualisation/lxd.nix
new file mode 100644
index 000000000000..6f628c4a6e32
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/lxd.nix
@@ -0,0 +1,273 @@
+# Systemd services for lxd.
+
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.virtualisation.lxd;
+  preseedFormat = pkgs.formats.yaml {};
+in {
+  imports = [
+    (lib.mkRemovedOptionModule [ "virtualisation" "lxd" "zfsPackage" ] "Override zfs in an overlay instead to override it globally")
+  ];
+
+  ###### interface
+
+  options = {
+    virtualisation.lxd = {
+      enable = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option enables lxd, a daemon that manages
+          containers. Users in the "lxd" group can interact with
+          the daemon (e.g. to start or stop containers) using the
+          {command}`lxc` command line tool, among others.
+
+          Most of the time, you'll also want to start lxcfs, so
+          that containers can "see" the limits:
+          ```
+          virtualisation.lxc.lxcfs.enable = true;
+          ```
+        '';
+      };
+
+      package = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.lxd;
+        defaultText = lib.literalExpression "pkgs.lxd";
+        description = lib.mdDoc ''
+          The LXD package to use.
+        '';
+      };
+
+      lxcPackage = lib.mkOption {
+        type = lib.types.package;
+        default = pkgs.lxc;
+        defaultText = lib.literalExpression "pkgs.lxc";
+        description = lib.mdDoc ''
+          The LXC package to use with LXD (required for AppArmor profiles).
+        '';
+      };
+
+      zfsSupport = lib.mkOption {
+        type = lib.types.bool;
+        default = config.boot.zfs.enabled;
+        defaultText = lib.literalExpression "config.boot.zfs.enabled";
+        description = lib.mdDoc ''
+          Enables lxd to use zfs as a storage for containers.
+
+          This option is enabled by default if a zfs pool is configured
+          with nixos.
+        '';
+      };
+
+      recommendedSysctlSettings = lib.mkOption {
+        type = lib.types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Enables various settings to avoid common pitfalls when
+          running containers requiring many file operations.
+          Fixes errors like "Too many open files" or
+          "neighbour: ndisc_cache: neighbor table overflow!".
+          See https://lxd.readthedocs.io/en/latest/production-setup/
+          for details.
+        '';
+      };
+
+      preseed = lib.mkOption {
+        type = lib.types.nullOr (lib.types.submodule {
+          freeformType = preseedFormat.type;
+        });
+
+        default = null;
+
+        description = lib.mdDoc ''
+          Configuration for LXD preseed, see
+          <https://documentation.ubuntu.com/lxd/en/latest/howto/initialize/#initialize-preseed>
+          for supported values.
+
+          Changes to this will be re-applied to LXD which will overwrite existing entities or create missing ones,
+          but entities will *not* be removed by preseed.
+        '';
+
+        example = lib.literalExpression ''
+          {
+            networks = [
+              {
+                name = "lxdbr0";
+                type = "bridge";
+                config = {
+                  "ipv4.address" = "10.0.100.1/24";
+                  "ipv4.nat" = "true";
+                };
+              }
+            ];
+            profiles = [
+              {
+                name = "default";
+                devices = {
+                  eth0 = {
+                    name = "eth0";
+                    network = "lxdbr0";
+                    type = "nic";
+                  };
+                  root = {
+                    path = "/";
+                    pool = "default";
+                    size = "35GiB";
+                    type = "disk";
+                  };
+                };
+              }
+            ];
+            storage_pools = [
+              {
+                name = "default";
+                driver = "dir";
+                config = {
+                  source = "/var/lib/lxd/storage-pools/default";
+                };
+              }
+            ];
+          }
+        '';
+      };
+
+      startTimeout = lib.mkOption {
+        type = lib.types.int;
+        default = 600;
+        apply = toString;
+        description = lib.mdDoc ''
+          Time to wait (in seconds) for LXD to become ready to process requests.
+          If LXD does not reply within the configured time, lxd.service will be
+          considered failed and systemd will attempt to restart it.
+        '';
+      };
+
+      ui = {
+        enable = lib.mkEnableOption (lib.mdDoc "(experimental) LXD UI");
+
+        package = lib.mkPackageOption pkgs.lxd-unwrapped "ui" { };
+      };
+    };
+  };
+
+  ###### implementation
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    # Note: the following options are also declared in virtualisation.lxc, but
+    # the latter can't be simply enabled to reuse the formers, because it
+    # does a bunch of unrelated things.
+    systemd.tmpfiles.rules = [ "d /var/lib/lxc/rootfs 0755 root root -" ];
+
+    security.apparmor = {
+      packages = [ cfg.lxcPackage ];
+      policies = {
+        "bin.lxc-start".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/usr.bin.lxc-start
+        '';
+        "lxc-containers".profile = ''
+          include ${cfg.lxcPackage}/etc/apparmor.d/lxc-containers
+        '';
+      };
+    };
+
+    # TODO: remove once LXD gets proper support for cgroupsv2
+    # (currently most of the e.g. CPU accounting stuff doesn't work)
+    systemd.enableUnifiedCgroupHierarchy = false;
+
+    systemd.sockets.lxd = {
+      description = "LXD UNIX socket";
+      wantedBy = [ "sockets.target" ];
+
+      socketConfig = {
+        ListenStream = "/var/lib/lxd/unix.socket";
+        SocketMode = "0660";
+        SocketGroup = "lxd";
+        Service = "lxd.service";
+      };
+    };
+
+    systemd.services.lxd = {
+      description = "LXD Container Management Daemon";
+
+      wantedBy = [ "multi-user.target" ];
+      after = [
+        "network-online.target"
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+      ];
+      requires = [
+        "network-online.target"
+        "lxd.socket"
+        (lib.mkIf config.virtualisation.lxc.lxcfs.enable "lxcfs.service")
+      ];
+      documentation = [ "man:lxd(1)" ];
+
+      path = [ pkgs.util-linux ]
+        ++ lib.optional cfg.zfsSupport config.boot.zfs.package;
+
+      environment = lib.mkIf (cfg.ui.enable) {
+        "LXD_UI" = cfg.ui.package;
+      };
+
+      serviceConfig = {
+        ExecStart = "@${cfg.package}/bin/lxd lxd --group lxd";
+        ExecStartPost = "${cfg.package}/bin/lxd waitready --timeout=${cfg.startTimeout}";
+        ExecStop = "${cfg.package}/bin/lxd shutdown";
+
+        KillMode = "process"; # when stopping, leave the containers alone
+        LimitMEMLOCK = "infinity";
+        LimitNOFILE = "1048576";
+        LimitNPROC = "infinity";
+        TasksMax = "infinity";
+
+        Restart = "on-failure";
+        TimeoutStartSec = "${cfg.startTimeout}s";
+        TimeoutStopSec = "30s";
+
+        # By default, `lxd` loads configuration files from hard-coded
+        # `/usr/share/lxc/config` - since this is a no-go for us, we have to
+        # explicitly tell it where the actual configuration files are
+        Environment = lib.mkIf (config.virtualisation.lxc.lxcfs.enable)
+          "LXD_LXC_TEMPLATE_CONFIG=${pkgs.lxcfs}/share/lxc/config";
+      };
+    };
+
+    systemd.services.lxd-preseed = lib.mkIf (cfg.preseed != null) {
+      description = "LXD initialization with preseed file";
+      wantedBy = ["multi-user.target"];
+      requires = ["lxd.service"];
+      after = ["lxd.service"];
+
+      script = ''
+        ${pkgs.coreutils}/bin/cat ${preseedFormat.generate "lxd-preseed.yaml" cfg.preseed} | ${cfg.package}/bin/lxd init --preseed
+      '';
+
+      serviceConfig = {
+        Type = "oneshot";
+      };
+    };
+
+    users.groups.lxd = {};
+
+    users.users.root = {
+      subUidRanges = [ { startUid = 1000000; count = 65536; } ];
+      subGidRanges = [ { startGid = 1000000; count = 65536; } ];
+    };
+
+    boot.kernel.sysctl = lib.mkIf cfg.recommendedSysctlSettings {
+      "fs.inotify.max_queued_events" = 1048576;
+      "fs.inotify.max_user_instances" = 1048576;
+      "fs.inotify.max_user_watches" = 1048576;
+      "vm.max_map_count" = 262144; # TODO: Default vm.max_map_count has been increased system-wide
+      "kernel.dmesg_restrict" = 1;
+      "net.ipv4.neigh.default.gc_thresh3" = 8192;
+      "net.ipv6.neigh.default.gc_thresh3" = 8192;
+      "kernel.keys.maxkeys" = 2000;
+    };
+
+    boot.kernelModules = [ "veth" "xt_comment" "xt_CHECKSUM" "xt_MASQUERADE" "vhost_vsock" ]
+      ++ lib.optionals (!config.networking.nftables.enable) [ "iptable_mangle" ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/multipass.nix b/nixpkgs/nixos/modules/virtualisation/multipass.nix
new file mode 100644
index 000000000000..b331b3be7ea5
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/multipass.nix
@@ -0,0 +1,61 @@
+{ config
+, lib
+, pkgs
+, ...
+}:
+
+let
+  cfg = config.virtualisation.multipass;
+in
+{
+  options = {
+    virtualisation.multipass = {
+      enable = lib.mkEnableOption (lib.mdDoc ''
+        Multipass, a simple manager for virtualised Ubuntu instances.
+      '');
+
+      logLevel = lib.mkOption {
+        type = lib.types.enum [ "error" "warning" "info" "debug" "trace" ];
+        default = "debug";
+        description = lib.mdDoc ''
+          The logging verbosity of the multipassd binary.
+        '';
+      };
+
+      package = lib.mkPackageOptionMD pkgs "multipass" { };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    environment.systemPackages = [ cfg.package ];
+
+    systemd.services.multipass = {
+      description = "Multipass orchestrates virtual Ubuntu instances.";
+
+      wantedBy = [ "multi-user.target" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+
+      environment = {
+        "XDG_DATA_HOME" = "/var/lib/multipass/data";
+        "XDG_CACHE_HOME" = "/var/lib/multipass/cache";
+        "XDG_CONFIG_HOME" = "/var/lib/multipass/config";
+      };
+
+      serviceConfig = {
+        ExecStart = "${cfg.package}/bin/multipassd --logger platform --verbosity ${cfg.logLevel}";
+        SyslogIdentifier = "multipassd";
+        Restart = "on-failure";
+        TimeoutStopSec = 300;
+        Type = "simple";
+
+        WorkingDirectory = "/var/lib/multipass";
+
+        StateDirectory = "multipass";
+        StateDirectoryMode = "0750";
+        CacheDirectory = "multipass";
+        CacheDirectoryMode = "0750";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix b/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix
new file mode 100644
index 000000000000..6fdb177b968b
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/nixos-containers.nix
@@ -0,0 +1,911 @@
+{ config, lib, pkgs, ... }@host:
+
+with lib;
+
+let
+
+  configurationPrefix = optionalString (versionAtLeast config.system.stateVersion "22.05") "nixos-";
+  configurationDirectoryName = "${configurationPrefix}containers";
+  configurationDirectory = "/etc/${configurationDirectoryName}";
+  stateDirectory = "/var/lib/${configurationPrefix}containers";
+
+  nixos-container = pkgs.nixos-container.override {
+    inherit stateDirectory configurationDirectory;
+  };
+
+  # The container's init script, a small wrapper around the regular
+  # NixOS stage-2 init script.
+  containerInit = (cfg:
+    let
+      renderExtraVeth = (name: cfg:
+        ''
+        echo "Bringing ${name} up"
+        ip link set dev ${name} up
+        ${optionalString (cfg.localAddress != null) ''
+          echo "Setting ip for ${name}"
+          ip addr add ${cfg.localAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.localAddress6 != null) ''
+          echo "Setting ip6 for ${name}"
+          ip -6 addr add ${cfg.localAddress6} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress != null) ''
+          echo "Setting route to host for ${name}"
+          ip route add ${cfg.hostAddress} dev ${name}
+        ''}
+        ${optionalString (cfg.hostAddress6 != null) ''
+          echo "Setting route6 to host for ${name}"
+          ip -6 route add ${cfg.hostAddress6} dev ${name}
+        ''}
+        ''
+        );
+    in
+      pkgs.writeScript "container-init"
+      ''
+        #! ${pkgs.runtimeShell} -e
+
+        # Exit early if we're asked to shut down.
+        trap "exit 0" SIGRTMIN+3
+
+        # Initialise the container side of the veth pair.
+        if [ -n "$HOST_ADDRESS" ]   || [ -n "$HOST_ADDRESS6" ]  ||
+           [ -n "$LOCAL_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS6" ] ||
+           [ -n "$HOST_BRIDGE" ]; then
+          ip link set host0 name eth0
+          ip link set dev eth0 up
+
+          if [ -n "$LOCAL_ADDRESS" ]; then
+            ip addr add $LOCAL_ADDRESS dev eth0
+          fi
+          if [ -n "$LOCAL_ADDRESS6" ]; then
+            ip -6 addr add $LOCAL_ADDRESS6 dev eth0
+          fi
+          if [ -n "$HOST_ADDRESS" ]; then
+            ip route add $HOST_ADDRESS dev eth0
+            ip route add default via $HOST_ADDRESS
+          fi
+          if [ -n "$HOST_ADDRESS6" ]; then
+            ip -6 route add $HOST_ADDRESS6 dev eth0
+            ip -6 route add default via $HOST_ADDRESS6
+          fi
+        fi
+
+        ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+
+        # Start the regular stage 2 script.
+        # We source instead of exec to not lose an early stop signal, which is
+        # also the only _reliable_ shutdown signal we have since early stop
+        # does not execute ExecStop* commands.
+        set +e
+        . "$1"
+      ''
+    );
+
+  nspawnExtraVethArgs = (name: cfg: "--network-veth-extra=${name}");
+
+  startScript = cfg:
+    ''
+      mkdir -p -m 0755 "$root/etc" "$root/var/lib"
+      mkdir -p -m 0700 "$root/var/lib/private" "$root/root" /run/nixos-containers
+      if ! [ -e "$root/etc/os-release" ]; then
+        touch "$root/etc/os-release"
+      fi
+
+      if ! [ -e "$root/etc/machine-id" ]; then
+        touch "$root/etc/machine-id"
+      fi
+
+      mkdir -p -m 0755 \
+        "/nix/var/nix/profiles/per-container/$INSTANCE" \
+        "/nix/var/nix/gcroots/per-container/$INSTANCE"
+
+      cp --remove-destination /etc/resolv.conf "$root/etc/resolv.conf"
+
+      if [ "$PRIVATE_NETWORK" = 1 ]; then
+        extraFlags+=" --private-network"
+      fi
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        extraFlags+=" --network-veth"
+      fi
+
+      if [ -n "$HOST_PORT" ]; then
+        OIFS=$IFS
+        IFS=","
+        for i in $HOST_PORT
+        do
+            extraFlags+=" --port=$i"
+        done
+        IFS=$OIFS
+      fi
+
+      if [ -n "$HOST_BRIDGE" ]; then
+        extraFlags+=" --network-bridge=$HOST_BRIDGE"
+      fi
+
+      extraFlags+=" ${concatStringsSep " " (mapAttrsToList nspawnExtraVethArgs cfg.extraVeths)}"
+
+      for iface in $INTERFACES; do
+        extraFlags+=" --network-interface=$iface"
+      done
+
+      for iface in $MACVLANS; do
+        extraFlags+=" --network-macvlan=$iface"
+      done
+
+      # If the host is 64-bit and the container is 32-bit, add a
+      # --personality flag.
+      ${optionalString (pkgs.stdenv.hostPlatform.system == "x86_64-linux") ''
+        if [ "$(< ''${SYSTEM_PATH:-/nix/var/nix/profiles/per-container/$INSTANCE/system}/system)" = i686-linux ]; then
+          extraFlags+=" --personality=x86"
+        fi
+      ''}
+
+      export SYSTEMD_NSPAWN_UNIFIED_HIERARCHY=1
+
+      # Run systemd-nspawn without startup notification (we'll
+      # wait for the container systemd to signal readiness)
+      # Kill signal handling means systemd-nspawn will pass a system-halt signal
+      # to the container systemd when it receives SIGTERM for container shutdown;
+      # containerInit and stage2 have to handle this as well.
+      exec ${config.systemd.package}/bin/systemd-nspawn \
+        --keep-unit \
+        -M "$INSTANCE" -D "$root" $extraFlags \
+        $EXTRA_NSPAWN_FLAGS \
+        --notify-ready=yes \
+        --kill-signal=SIGRTMIN+3 \
+        --bind-ro=/nix/store \
+        --bind-ro=/nix/var/nix/db \
+        --bind-ro=/nix/var/nix/daemon-socket \
+        --bind="/nix/var/nix/profiles/per-container/$INSTANCE:/nix/var/nix/profiles" \
+        --bind="/nix/var/nix/gcroots/per-container/$INSTANCE:/nix/var/nix/gcroots" \
+        ${optionalString (!cfg.ephemeral) "--link-journal=try-guest"} \
+        --setenv PRIVATE_NETWORK="$PRIVATE_NETWORK" \
+        --setenv HOST_BRIDGE="$HOST_BRIDGE" \
+        --setenv HOST_ADDRESS="$HOST_ADDRESS" \
+        --setenv LOCAL_ADDRESS="$LOCAL_ADDRESS" \
+        --setenv HOST_ADDRESS6="$HOST_ADDRESS6" \
+        --setenv LOCAL_ADDRESS6="$LOCAL_ADDRESS6" \
+        --setenv HOST_PORT="$HOST_PORT" \
+        --setenv PATH="$PATH" \
+        ${optionalString cfg.ephemeral "--ephemeral"} \
+        ${optionalString (cfg.additionalCapabilities != null && cfg.additionalCapabilities != [])
+          ''--capability="${concatStringsSep "," cfg.additionalCapabilities}"''
+        } \
+        ${optionalString (cfg.tmpfs != null && cfg.tmpfs != [])
+          ''--tmpfs=${concatStringsSep " --tmpfs=" cfg.tmpfs}''
+        } \
+        ${containerInit cfg} "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/init"
+    '';
+
+  preStartScript = cfg:
+    ''
+      # Clean up existing machined registration and interfaces.
+      machinectl terminate "$INSTANCE" 2> /dev/null || true
+
+      if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+         [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+        ip link del dev "ve-$INSTANCE" 2> /dev/null || true
+        ip link del dev "vb-$INSTANCE" 2> /dev/null || true
+      fi
+
+      ${concatStringsSep "\n" (
+        mapAttrsToList (name: cfg:
+          "ip link del dev ${name} 2> /dev/null || true "
+        ) cfg.extraVeths
+      )}
+   '';
+
+  postStartScript = (cfg:
+    let
+      ipcall = cfg: ipcmd: variable: attribute:
+        if cfg.${attribute} == null then
+          ''
+            if [ -n "${variable}" ]; then
+              ${ipcmd} add ${variable} dev $ifaceHost
+            fi
+          ''
+        else
+          "${ipcmd} add ${cfg.${attribute}} dev $ifaceHost";
+      renderExtraVeth = name: cfg:
+        if cfg.hostBridge != null then
+          ''
+            # Add ${name} to bridge ${cfg.hostBridge}
+            ip link set dev ${name} master ${cfg.hostBridge} up
+          ''
+        else
+          ''
+            echo "Bring ${name} up"
+            ip link set dev ${name} up
+            # Set IPs and routes for ${name}
+            ${optionalString (cfg.hostAddress != null) ''
+              ip addr add ${cfg.hostAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.hostAddress6 != null) ''
+              ip -6 addr add ${cfg.hostAddress6} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress != null) ''
+              ip route add ${cfg.localAddress} dev ${name}
+            ''}
+            ${optionalString (cfg.localAddress6 != null) ''
+              ip -6 route add ${cfg.localAddress6} dev ${name}
+            ''}
+          '';
+    in
+      ''
+        if [ -n "$HOST_ADDRESS" ]  || [ -n "$LOCAL_ADDRESS" ] ||
+           [ -n "$HOST_ADDRESS6" ] || [ -n "$LOCAL_ADDRESS6" ]; then
+          if [ -z "$HOST_BRIDGE" ]; then
+            ifaceHost=ve-$INSTANCE
+            ip link set dev $ifaceHost up
+
+            ${ipcall cfg "ip addr" "$HOST_ADDRESS" "hostAddress"}
+            ${ipcall cfg "ip -6 addr" "$HOST_ADDRESS6" "hostAddress6"}
+            ${ipcall cfg "ip route" "$LOCAL_ADDRESS" "localAddress"}
+            ${ipcall cfg "ip -6 route" "$LOCAL_ADDRESS6" "localAddress6"}
+          fi
+        fi
+        ${concatStringsSep "\n" (mapAttrsToList renderExtraVeth cfg.extraVeths)}
+      ''
+  );
+
+  serviceDirectives = cfg: {
+    ExecReload = pkgs.writeScript "reload-container"
+      ''
+        #! ${pkgs.runtimeShell} -e
+        ${nixos-container}/bin/nixos-container run "$INSTANCE" -- \
+          bash --login -c "''${SYSTEM_PATH:-/nix/var/nix/profiles/system}/bin/switch-to-configuration test"
+      '';
+
+    SyslogIdentifier = "container %i";
+
+    EnvironmentFile = "-${configurationDirectory}/%i.conf";
+
+    Type = "notify";
+
+    RuntimeDirectory = lib.optional cfg.ephemeral "${configurationDirectoryName}/%i";
+
+    # Note that on reboot, systemd-nspawn returns 133, so this
+    # unit will be restarted. On poweroff, it returns 0, so the
+    # unit won't be restarted.
+    RestartForceExitStatus = "133";
+    SuccessExitStatus = "133";
+
+    # Some containers take long to start
+    # especially when you automatically start many at once
+    TimeoutStartSec = cfg.timeoutStartSec;
+
+    Restart = "on-failure";
+
+    Slice = "machine.slice";
+    Delegate = true;
+
+    # We rely on systemd-nspawn turning a SIGTERM to itself into a shutdown
+    # signal (SIGRTMIN+3) for the inner container.
+    KillMode = "mixed";
+    KillSignal = "TERM";
+
+    DevicePolicy = "closed";
+    DeviceAllow = map (d: "${d.node} ${d.modifier}") cfg.allowedDevices;
+  };
+
+  kernelVersion = config.boot.kernelPackages.kernel.version;
+
+  bindMountOpts = { name, ... }: {
+
+    options = {
+      mountPoint = mkOption {
+        example = "/mnt/usb";
+        type = types.str;
+        description = lib.mdDoc "Mount point on the container file system.";
+      };
+      hostPath = mkOption {
+        default = null;
+        example = "/home/alice";
+        type = types.nullOr types.str;
+        description = lib.mdDoc "Location of the host path to be mounted.";
+      };
+      isReadOnly = mkOption {
+        default = true;
+        type = types.bool;
+        description = lib.mdDoc "Determine whether the mounted path will be accessed in read-only mode.";
+      };
+    };
+
+    config = {
+      mountPoint = mkDefault name;
+    };
+
+  };
+
+  allowedDeviceOpts = { ... }: {
+    options = {
+      node = mkOption {
+        example = "/dev/net/tun";
+        type = types.str;
+        description = lib.mdDoc "Path to device node";
+      };
+      modifier = mkOption {
+        example = "rw";
+        type = types.str;
+        description = lib.mdDoc ''
+          Device node access modifier. Takes a combination
+          `r` (read), `w` (write), and
+          `m` (mknod). See the
+          `systemd.resource-control(5)` man page for more
+          information.'';
+      };
+    };
+  };
+
+  mkBindFlag = d:
+               let flagPrefix = if d.isReadOnly then " --bind-ro=" else " --bind=";
+                   mountstr = if d.hostPath != null then "${d.hostPath}:${d.mountPoint}" else "${d.mountPoint}";
+               in flagPrefix + mountstr ;
+
+  mkBindFlags = bs: concatMapStrings mkBindFlag (lib.attrValues bs);
+
+  networkOptions = {
+    hostBridge = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "br0";
+      description = lib.mdDoc ''
+        Put the host-side of the veth-pair into the named bridge.
+        Only one of hostAddress* or hostBridge can be given.
+      '';
+    };
+
+    forwardPorts = mkOption {
+      type = types.listOf (types.submodule {
+        options = {
+          protocol = mkOption {
+            type = types.str;
+            default = "tcp";
+            description = lib.mdDoc "The protocol specifier for port forwarding between host and container";
+          };
+          hostPort = mkOption {
+            type = types.int;
+            description = lib.mdDoc "Source port of the external interface on host";
+          };
+          containerPort = mkOption {
+            type = types.nullOr types.int;
+            default = null;
+            description = lib.mdDoc "Target port of container";
+          };
+        };
+      });
+      default = [];
+      example = [ { protocol = "tcp"; hostPort = 8080; containerPort = 80; } ];
+      description = lib.mdDoc ''
+        List of forwarded ports from host to container. Each forwarded port
+        is specified by protocol, hostPort and containerPort. By default,
+        protocol is tcp and hostPort and containerPort are assumed to be
+        the same if containerPort is not explicitly given.
+      '';
+    };
+
+
+    hostAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.1";
+      description = lib.mdDoc ''
+        The IPv4 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    hostAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::1";
+      description = lib.mdDoc ''
+        The IPv6 address assigned to the host interface.
+        (Not used when hostBridge is set.)
+      '';
+    };
+
+    localAddress = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "10.231.136.2";
+      description = lib.mdDoc ''
+        The IPv4 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /32 and routing is
+        set up from localAddress to hostAddress and back.
+      '';
+    };
+
+    localAddress6 = mkOption {
+      type = types.nullOr types.str;
+      default = null;
+      example = "fc00::2";
+      description = lib.mdDoc ''
+        The IPv6 address assigned to the interface in the container.
+        If a hostBridge is used, this should be given with netmask to access
+        the whole network. Otherwise the default netmask is /128 and routing is
+        set up from localAddress6 to hostAddress6 and back.
+      '';
+    };
+
+  };
+
+  dummyConfig =
+    {
+      extraVeths = {};
+      additionalCapabilities = [];
+      ephemeral = false;
+      timeoutStartSec = "1min";
+      allowedDevices = [];
+      hostAddress = null;
+      hostAddress6 = null;
+      localAddress = null;
+      localAddress6 = null;
+      tmpfs = null;
+    };
+
+in
+
+{
+  options = {
+
+    boot.isContainer = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether this NixOS machine is a lightweight container running
+        in another NixOS system.
+      '';
+    };
+
+    boot.enableContainers = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Whether to enable support for NixOS containers. Defaults to true
+        (at no cost if containers are not actually used).
+      '';
+    };
+
+    containers = mkOption {
+      type = types.attrsOf (types.submodule (
+        { config, options, name, ... }:
+        {
+          options = {
+            config = mkOption {
+              description = lib.mdDoc ''
+                A specification of the desired configuration of this
+                container, as a NixOS module.
+              '';
+              type = lib.mkOptionType {
+                name = "Toplevel NixOS config";
+                merge = loc: defs: (import "${toString config.nixpkgs}/nixos/lib/eval-config.nix" {
+                  modules =
+                    let
+                      extraConfig = { options, ... }: {
+                        _file = "module at ${__curPos.file}:${toString __curPos.line}";
+                        config = {
+                          nixpkgs = if options.nixpkgs?hostPlatform && host.options.nixpkgs.hostPlatform.isDefined
+                                    then { inherit (host.config.nixpkgs) hostPlatform; }
+                                    else { inherit (host.config.nixpkgs) localSystem; }
+                          ;
+                          boot.isContainer = true;
+                          networking.hostName = mkDefault name;
+                          networking.useDHCP = false;
+                          assertions = [
+                            {
+                              assertion =
+                                (builtins.compareVersions kernelVersion "5.8" <= 0)
+                                -> config.privateNetwork
+                                -> stringLength name <= 11;
+                              message = ''
+                                Container name `${name}` is too long: When `privateNetwork` is enabled, container names can
+                                not be longer than 11 characters, because the container's interface name is derived from it.
+                                You should either make the container name shorter or upgrade to a more recent kernel that
+                                supports interface altnames (i.e. at least Linux 5.8 - please see https://github.com/NixOS/nixpkgs/issues/38509
+                                for details).
+                              '';
+                            }
+                          ];
+                        };
+                      };
+                    in [ extraConfig ] ++ (map (x: x.value) defs);
+                  prefix = [ "containers" name ];
+                  inherit (config) specialArgs;
+
+                  # The system is inherited from the host above.
+                  # Set it to null, to remove the "legacy" entrypoint's non-hermetic default.
+                  system = null;
+                }).config;
+              };
+            };
+
+            path = mkOption {
+              type = types.path;
+              example = "/nix/var/nix/profiles/per-container/webserver";
+              description = lib.mdDoc ''
+                As an alternative to specifying
+                {option}`config`, you can specify the path to
+                the evaluated NixOS system configuration, typically a
+                symlink to a system profile.
+              '';
+            };
+
+            additionalCapabilities = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "CAP_NET_ADMIN" "CAP_MKNOD" ];
+              description = lib.mdDoc ''
+                Grant additional capabilities to the container.  See the
+                capabilities(7) and systemd-nspawn(1) man pages for more
+                information.
+              '';
+            };
+
+            nixpkgs = mkOption {
+              type = types.path;
+              default = pkgs.path;
+              defaultText = literalExpression "pkgs.path";
+              description = lib.mdDoc ''
+                A path to the nixpkgs that provide the modules, pkgs and lib for evaluating the container.
+
+                To only change the `pkgs` argument used inside the container modules,
+                set the `nixpkgs.*` options in the container {option}`config`.
+                Setting `config.nixpkgs.pkgs = pkgs` speeds up the container evaluation
+                by reusing the system pkgs, but the `nixpkgs.config` option in the
+                container config is ignored in this case.
+              '';
+            };
+
+            specialArgs = mkOption {
+              type = types.attrsOf types.unspecified;
+              default = {};
+              description = lib.mdDoc ''
+                A set of special arguments to be passed to NixOS modules.
+                This will be merged into the `specialArgs` used to evaluate
+                the NixOS configurations.
+              '';
+            };
+
+            ephemeral = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Runs container in ephemeral mode with the empty root filesystem at boot.
+                This way container will be bootstrapped from scratch on each boot
+                and will be cleaned up on shutdown leaving no traces behind.
+                Useful for completely stateless, reproducible containers.
+
+                Note that this option might require to do some adjustments to the container configuration,
+                e.g. you might want to set
+                {var}`systemd.network.networks.$interface.dhcpV4Config.ClientIdentifier` to "mac"
+                if you use {var}`macvlans` option.
+                This way dhcp client identifier will be stable between the container restarts.
+
+                Note that the container journal will not be linked to the host if this option is enabled.
+              '';
+            };
+
+            enableTun = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Allows the container to create and setup tunnel interfaces
+                by granting the `NET_ADMIN` capability and
+                enabling access to `/dev/net/tun`.
+              '';
+            };
+
+            privateNetwork = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether to give the container its own private virtual
+                Ethernet interface.  The interface is called
+                `eth0`, and is hooked up to the interface
+                `ve-«container-name»`
+                on the host.  If this option is not set, then the
+                container shares the network interfaces of the host,
+                and can bind to any port on any interface.
+              '';
+            };
+
+            interfaces = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = lib.mdDoc ''
+                The list of interfaces to be moved into the container.
+              '';
+            };
+
+            macvlans = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "eth1" "eth2" ];
+              description = lib.mdDoc ''
+                The list of host interfaces from which macvlans will be
+                created. For each interface specified, a macvlan interface
+                will be created and moved to the container.
+              '';
+            };
+
+            extraVeths = mkOption {
+              type = with types; attrsOf (submodule { options = networkOptions; });
+              default = {};
+              description = lib.mdDoc ''
+                Extra veth-pairs to be created for the container.
+              '';
+            };
+
+            autoStart = mkOption {
+              type = types.bool;
+              default = false;
+              description = lib.mdDoc ''
+                Whether the container is automatically started at boot-time.
+              '';
+            };
+
+            restartIfChanged = mkOption {
+              type = types.bool;
+              default = true;
+              description = lib.mdDoc ''
+                Whether the container should be restarted during a NixOS
+                configuration switch if its definition has changed.
+              '';
+            };
+
+            timeoutStartSec = mkOption {
+              type = types.str;
+              default = "1min";
+              description = lib.mdDoc ''
+                Time for the container to start. In case of a timeout,
+                the container processes get killed.
+                See {manpage}`systemd.time(7)`
+                for more information about the format.
+               '';
+            };
+
+            bindMounts = mkOption {
+              type = with types; attrsOf (submodule bindMountOpts);
+              default = {};
+              example = literalExpression ''
+                { "/home" = { hostPath = "/home/alice";
+                              isReadOnly = false; };
+                }
+              '';
+
+              description =
+                lib.mdDoc ''
+                  An extra list of directories that is bound to the container.
+                '';
+            };
+
+            allowedDevices = mkOption {
+              type = with types; listOf (submodule allowedDeviceOpts);
+              default = [];
+              example = [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              description = lib.mdDoc ''
+                A list of device nodes to which the containers has access to.
+              '';
+            };
+
+            tmpfs = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "/var" ];
+              description = lib.mdDoc ''
+                Mounts a set of tmpfs file systems into the container.
+                Multiple paths can be specified.
+                Valid items must conform to the --tmpfs argument
+                of systemd-nspawn. See systemd-nspawn(1) for details.
+              '';
+            };
+
+            extraFlags = mkOption {
+              type = types.listOf types.str;
+              default = [];
+              example = [ "--drop-capability=CAP_SYS_CHROOT" ];
+              description = lib.mdDoc ''
+                Extra flags passed to the systemd-nspawn command.
+                See systemd-nspawn(1) for details.
+              '';
+            };
+
+            # Removed option. See `checkAssertion` below for the accompanying error message.
+            pkgs = mkOption { visible = false; };
+          } // networkOptions;
+
+          config = let
+            # Throw an error when removed option `pkgs` is used.
+            # Because this is a submodule we cannot use `mkRemovedOptionModule` or option `assertions`.
+            optionPath = "containers.${name}.pkgs";
+            files = showFiles options.pkgs.files;
+            checkAssertion = if options.pkgs.isDefined then throw ''
+              The option definition `${optionPath}' in ${files} no longer has any effect; please remove it.
+
+              Alternatively, you can use the following options:
+              - containers.${name}.nixpkgs
+                This sets the nixpkgs (and thereby the modules, pkgs and lib) that
+                are used for evaluating the container.
+
+              - containers.${name}.config.nixpkgs.pkgs
+                This only sets the `pkgs` argument used inside the container modules.
+            ''
+            else null;
+          in {
+            path = builtins.seq checkAssertion
+              mkIf options.config.isDefined config.config.system.build.toplevel;
+          };
+        }));
+
+      default = {};
+      example = literalExpression
+        ''
+          { webserver =
+              { path = "/nix/var/nix/profiles/webserver";
+              };
+            database =
+              { config =
+                  { config, pkgs, ... }:
+                  { services.postgresql.enable = true;
+                    services.postgresql.package = pkgs.postgresql_14;
+
+                    system.stateVersion = "${lib.trivial.release}";
+                  };
+              };
+          }
+        '';
+      description = lib.mdDoc ''
+        A set of NixOS system configurations to be run as lightweight
+        containers.  Each container appears as a service
+        `container-«name»`
+        on the host system, allowing it to be started and stopped via
+        {command}`systemctl`.
+      '';
+    };
+
+  };
+
+
+  config = mkIf (config.boot.enableContainers) (let
+
+    unit = {
+      description = "Container '%i'";
+
+      unitConfig.RequiresMountsFor = "${stateDirectory}/%i";
+
+      path = [ pkgs.iproute2 ];
+
+      environment = {
+        root = "${stateDirectory}/%i";
+        INSTANCE = "%i";
+      };
+
+      preStart = preStartScript dummyConfig;
+
+      script = startScript dummyConfig;
+
+      postStart = postStartScript dummyConfig;
+
+      restartIfChanged = false;
+
+      serviceConfig = serviceDirectives dummyConfig;
+    };
+  in {
+    warnings =
+      (optional (config.virtualisation.containers.enable && versionOlder config.system.stateVersion "22.05") ''
+        Enabling both boot.enableContainers & virtualisation.containers on system.stateVersion < 22.05 is unsupported.
+      '');
+
+    systemd.targets.multi-user.wants = [ "machines.target" ];
+
+    systemd.services = listToAttrs (filter (x: x.value != null) (
+      # The generic container template used by imperative containers
+      [{ name = "container@"; value = unit; }]
+      # declarative containers
+      ++ (mapAttrsToList (name: cfg: nameValuePair "container@${name}" (let
+          containerConfig = cfg // (
+          optionalAttrs cfg.enableTun
+            {
+              allowedDevices = cfg.allowedDevices
+                ++ [ { node = "/dev/net/tun"; modifier = "rw"; } ];
+              additionalCapabilities = cfg.additionalCapabilities
+                ++ [ "CAP_NET_ADMIN" ];
+            }
+          );
+        in
+          recursiveUpdate unit {
+            preStart = preStartScript containerConfig;
+            script = startScript containerConfig;
+            postStart = postStartScript containerConfig;
+            serviceConfig = serviceDirectives containerConfig;
+            unitConfig.RequiresMountsFor = lib.optional (!containerConfig.ephemeral) "${stateDirectory}/%i";
+            environment.root = if containerConfig.ephemeral then "/run/nixos-containers/%i" else "${stateDirectory}/%i";
+          } // (
+          optionalAttrs containerConfig.autoStart
+            {
+              wantedBy = [ "machines.target" ];
+              wants = [ "network.target" ];
+              after = [ "network.target" ];
+              restartTriggers = [
+                containerConfig.path
+                config.environment.etc."${configurationDirectoryName}/${name}.conf".source
+              ];
+              restartIfChanged = containerConfig.restartIfChanged;
+            }
+          )
+      )) config.containers)
+    ));
+
+    # Generate a configuration file in /etc/nixos-containers for each
+    # container so that container@.target can get the container
+    # configuration.
+    environment.etc =
+      let mkPortStr = p: p.protocol + ":" + (toString p.hostPort) + ":" + (if p.containerPort == null then toString p.hostPort else toString p.containerPort);
+      in mapAttrs' (name: cfg: nameValuePair "${configurationDirectoryName}/${name}.conf"
+      { text =
+          ''
+            SYSTEM_PATH=${cfg.path}
+            ${optionalString cfg.privateNetwork ''
+              PRIVATE_NETWORK=1
+              ${optionalString (cfg.hostBridge != null) ''
+                HOST_BRIDGE=${cfg.hostBridge}
+              ''}
+              ${optionalString (length cfg.forwardPorts > 0) ''
+                HOST_PORT=${concatStringsSep "," (map mkPortStr cfg.forwardPorts)}
+              ''}
+              ${optionalString (cfg.hostAddress != null) ''
+                HOST_ADDRESS=${cfg.hostAddress}
+              ''}
+              ${optionalString (cfg.hostAddress6 != null) ''
+                HOST_ADDRESS6=${cfg.hostAddress6}
+              ''}
+              ${optionalString (cfg.localAddress != null) ''
+                LOCAL_ADDRESS=${cfg.localAddress}
+              ''}
+              ${optionalString (cfg.localAddress6 != null) ''
+                LOCAL_ADDRESS6=${cfg.localAddress6}
+              ''}
+            ''}
+            INTERFACES="${toString cfg.interfaces}"
+            MACVLANS="${toString cfg.macvlans}"
+            ${optionalString cfg.autoStart ''
+              AUTO_START=1
+            ''}
+            EXTRA_NSPAWN_FLAGS="${mkBindFlags cfg.bindMounts +
+              optionalString (cfg.extraFlags != [])
+                (" " + concatStringsSep " " cfg.extraFlags)}"
+          '';
+      }) config.containers;
+
+    # Generate /etc/hosts entries for the containers.
+    networking.extraHosts = concatStrings (mapAttrsToList (name: cfg: optionalString (cfg.localAddress != null)
+      ''
+        ${head (splitString "/" cfg.localAddress)} ${name}.containers
+      '') config.containers);
+
+    networking.dhcpcd.denyInterfaces = [ "ve-*" "vb-*" ];
+
+    services.udev.extraRules = optionalString config.networking.networkmanager.enable ''
+      # Don't manage interfaces created by nixos-container.
+      ENV{INTERFACE}=="v[eb]-*", ENV{NM_UNMANAGED}="1"
+    '';
+
+    environment.systemPackages = [
+      nixos-container
+    ];
+
+    boot.kernelModules = [
+      "bridge"
+      "macvlan"
+      "tap"
+      "tun"
+    ];
+  });
+
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-common.nix b/nixpkgs/nixos/modules/virtualisation/oci-common.nix
new file mode 100644
index 000000000000..a620df063151
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-common.nix
@@ -0,0 +1,60 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.oci;
+in
+{
+  imports = [ ../profiles/qemu-guest.nix ];
+
+  # Taken from /proc/cmdline of Ubuntu 20.04.2 LTS on OCI
+  boot.kernelParams = [
+    "nvme.shutdown_timeout=10"
+    "nvme_core.shutdown_timeout=10"
+    "libiscsi.debug_libiscsi_eh=1"
+    "crash_kexec_post_notifiers"
+
+    # VNC console
+    "console=tty1"
+
+    # x86_64-linux
+    "console=ttyS0"
+
+    # aarch64-linux
+    "console=ttyAMA0,115200"
+  ];
+
+  boot.growPartition = true;
+
+  fileSystems."/" = {
+    device = "/dev/disk/by-label/nixos";
+    fsType = "ext4";
+    autoResize = true;
+  };
+
+  fileSystems."/boot" = lib.mkIf cfg.efi {
+    device = "/dev/disk/by-label/ESP";
+    fsType = "vfat";
+  };
+
+  boot.loader.efi.canTouchEfiVariables = false;
+  boot.loader.grub = {
+    device = if cfg.efi then "nodev" else "/dev/sda";
+    splashImage = null;
+    extraConfig = ''
+      serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1
+      terminal_input --append serial
+      terminal_output --append serial
+    '';
+    efiInstallAsRemovable = cfg.efi;
+    efiSupport = cfg.efi;
+  };
+
+  # https://docs.oracle.com/en-us/iaas/Content/Compute/Tasks/configuringntpservice.htm#Configuring_the_Oracle_Cloud_Infrastructure_NTP_Service_for_an_Instance
+  networking.timeServers = [ "169.254.169.254" ];
+
+  services.openssh.enable = true;
+
+  # Otherwise the instance may not have a working network-online.target,
+  # making the fetch-ssh-keys.service fail
+  networking.useNetworkd = lib.mkDefault true;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-config-user.nix b/nixpkgs/nixos/modules/virtualisation/oci-config-user.nix
new file mode 100644
index 000000000000..70c0b34efe7a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-config-user.nix
@@ -0,0 +1,12 @@
+{ modulesPath, ... }:
+
+{
+  # To build the configuration or use nix-env, you need to run
+  # either nixos-rebuild --upgrade or nix-channel --update
+  # to fetch the nixos channel.
+
+  # This configures everything but bootstrap services,
+  # which only need to be run once and have already finished
+  # if you are able to see this comment.
+  imports = [ "${modulesPath}/virtualisation/oci-common.nix" ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-containers.nix b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
new file mode 100644
index 000000000000..a4a40346f093
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-containers.nix
@@ -0,0 +1,386 @@
+{ config, options, lib, pkgs, ... }:
+
+with lib;
+let
+  cfg = config.virtualisation.oci-containers;
+  proxy_env = config.networking.proxy.envVars;
+
+  defaultBackend = options.virtualisation.oci-containers.backend.default;
+
+  containerOptions =
+    { ... }: {
+
+      options = {
+
+        image = mkOption {
+          type = with types; str;
+          description = lib.mdDoc "OCI image to run.";
+          example = "library/hello-world";
+        };
+
+        imageFile = mkOption {
+          type = with types; nullOr package;
+          default = null;
+          description = lib.mdDoc ''
+            Path to an image file to load before running the image. This can
+            be used to bypass pulling the image from the registry.
+
+            The `image` attribute must match the name and
+            tag of the image contained in this file, as they will be used to
+            run the container with that image. If they do not match, the
+            image will be pulled from the registry as usual.
+          '';
+          example = literalExpression "pkgs.dockerTools.buildImage {...};";
+        };
+
+        login = {
+
+          username = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = lib.mdDoc "Username for login.";
+          };
+
+          passwordFile = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = lib.mdDoc "Path to file containing password.";
+            example = "/etc/nixos/dockerhub-password.txt";
+          };
+
+          registry = mkOption {
+            type = with types; nullOr str;
+            default = null;
+            description = lib.mdDoc "Registry where to login to.";
+            example = "https://docker.pkg.github.com";
+          };
+
+        };
+
+        cmd = mkOption {
+          type =  with types; listOf str;
+          default = [];
+          description = lib.mdDoc "Commandline arguments to pass to the image's entrypoint.";
+          example = literalExpression ''
+            ["--port=9000"]
+          '';
+        };
+
+        labels = mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = lib.mdDoc "Labels to attach to the container at runtime.";
+          example = literalExpression ''
+            {
+              "traefik.https.routers.example.rule" = "Host(`example.container`)";
+            }
+          '';
+        };
+
+        entrypoint = mkOption {
+          type = with types; nullOr str;
+          description = lib.mdDoc "Override the default entrypoint of the image.";
+          default = null;
+          example = "/bin/my-app";
+        };
+
+        environment = mkOption {
+          type = with types; attrsOf str;
+          default = {};
+          description = lib.mdDoc "Environment variables to set for this container.";
+          example = literalExpression ''
+            {
+              DATABASE_HOST = "db.example.com";
+              DATABASE_PORT = "3306";
+            }
+        '';
+        };
+
+        environmentFiles = mkOption {
+          type = with types; listOf path;
+          default = [];
+          description = lib.mdDoc "Environment files for this container.";
+          example = literalExpression ''
+            [
+              /path/to/.env
+              /path/to/.env.secret
+            ]
+        '';
+        };
+
+        log-driver = mkOption {
+          type = types.str;
+          default = "journald";
+          description = lib.mdDoc ''
+            Logging driver for the container.  The default of
+            `"journald"` means that the container's logs will be
+            handled as part of the systemd unit.
+
+            For more details and a full list of logging drivers, refer to respective backends documentation.
+
+            For Docker:
+            [Docker engine documentation](https://docs.docker.com/engine/reference/run/#logging-drivers---log-driver)
+
+            For Podman:
+            Refer to the docker-run(1) man page.
+          '';
+        };
+
+        ports = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = lib.mdDoc ''
+            Network ports to publish from the container to the outer host.
+
+            Valid formats:
+            - `<ip>:<hostPort>:<containerPort>`
+            - `<ip>::<containerPort>`
+            - `<hostPort>:<containerPort>`
+            - `<containerPort>`
+
+            Both `hostPort` and `containerPort` can be specified as a range of
+            ports.  When specifying ranges for both, the number of container
+            ports in the range must match the number of host ports in the
+            range.  Example: `1234-1236:1234-1236/tcp`
+
+            When specifying a range for `hostPort` only, the `containerPort`
+            must *not* be a range.  In this case, the container port is published
+            somewhere within the specified `hostPort` range.
+            Example: `1234-1236:1234/tcp`
+
+            Refer to the
+            [Docker engine documentation](https://docs.docker.com/engine/reference/run/#expose-incoming-ports) for full details.
+          '';
+          example = literalExpression ''
+            [
+              "8080:9000"
+            ]
+          '';
+        };
+
+        user = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc ''
+            Override the username or UID (and optionally groupname or GID) used
+            in the container.
+          '';
+          example = "nobody:nogroup";
+        };
+
+        volumes = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = lib.mdDoc ''
+            List of volumes to attach to this container.
+
+            Note that this is a list of `"src:dst"` strings to
+            allow for `src` to refer to `/nix/store` paths, which
+            would be difficult with an attribute set.  There are
+            also a variety of mount options available as a third
+            field; please refer to the
+            [docker engine documentation](https://docs.docker.com/engine/reference/run/#volume-shared-filesystems) for details.
+          '';
+          example = literalExpression ''
+            [
+              "volume_name:/path/inside/container"
+              "/path/on/host:/path/inside/container"
+            ]
+          '';
+        };
+
+        workdir = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "Override the default working directory for the container.";
+          example = "/var/lib/hello_world";
+        };
+
+        dependsOn = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = lib.mdDoc ''
+            Define which other containers this one depends on. They will be added to both After and Requires for the unit.
+
+            Use the same name as the attribute under `virtualisation.oci-containers.containers`.
+          '';
+          example = literalExpression ''
+            virtualisation.oci-containers.containers = {
+              node1 = {};
+              node2 = {
+                dependsOn = [ "node1" ];
+              }
+            }
+          '';
+        };
+
+        hostname = mkOption {
+          type = with types; nullOr str;
+          default = null;
+          description = lib.mdDoc "The hostname of the container.";
+          example = "hello-world";
+        };
+
+        extraOptions = mkOption {
+          type = with types; listOf str;
+          default = [];
+          description = lib.mdDoc "Extra options for {command}`${defaultBackend} run`.";
+          example = literalExpression ''
+            ["--network=host"]
+          '';
+        };
+
+        autoStart = mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            When enabled, the container is automatically started on boot.
+            If this option is set to false, the container has to be started on-demand via its service.
+          '';
+        };
+      };
+    };
+
+  isValidLogin = login: login.username != null && login.passwordFile != null && login.registry != null;
+
+  mkService = name: container: let
+    dependsOn = map (x: "${cfg.backend}-${x}.service") container.dependsOn;
+    escapedName = escapeShellArg name;
+    preStartScript = pkgs.writeShellApplication {
+      name = "pre-start";
+      runtimeInputs = [ ];
+      text = ''
+        ${cfg.backend} rm -f ${name} || true
+        ${optionalString (isValidLogin container.login) ''
+          ${cfg.backend} login \
+          ${container.login.registry} \
+          --username ${container.login.username} \
+          --password-stdin < ${container.login.passwordFile}
+        ''}
+        ${optionalString (container.imageFile != null) ''
+          ${cfg.backend} load -i ${container.imageFile}
+        ''}
+        ${optionalString (cfg.backend == "podman") ''
+          rm -f /run/podman-${escapedName}.ctr-id
+        ''}
+      '';
+    };
+  in {
+    wantedBy = [] ++ optional (container.autoStart) "multi-user.target";
+    after = lib.optionals (cfg.backend == "docker") [ "docker.service" "docker.socket" ]
+            # if imageFile is not set, the service needs the network to download the image from the registry
+            ++ lib.optionals (container.imageFile == null) [ "network-online.target" ]
+            ++ dependsOn;
+    requires = dependsOn;
+    environment = proxy_env;
+
+    path =
+      if cfg.backend == "docker" then [ config.virtualisation.docker.package ]
+      else if cfg.backend == "podman" then [ config.virtualisation.podman.package ]
+      else throw "Unhandled backend: ${cfg.backend}";
+
+    script = concatStringsSep " \\\n  " ([
+      "exec ${cfg.backend} run"
+      "--rm"
+      "--name=${escapedName}"
+      "--log-driver=${container.log-driver}"
+    ] ++ optional (container.entrypoint != null)
+      "--entrypoint=${escapeShellArg container.entrypoint}"
+      ++ optional (container.hostname != null)
+      "--hostname=${escapeShellArg container.hostname}"
+      ++ lib.optionals (cfg.backend == "podman") [
+        "--cidfile=/run/podman-${escapedName}.ctr-id"
+        "--cgroups=no-conmon"
+        "--sdnotify=conmon"
+        "-d"
+        "--replace"
+      ] ++ (mapAttrsToList (k: v: "-e ${escapeShellArg k}=${escapeShellArg v}") container.environment)
+      ++ map (f: "--env-file ${escapeShellArg f}") container.environmentFiles
+      ++ map (p: "-p ${escapeShellArg p}") container.ports
+      ++ optional (container.user != null) "-u ${escapeShellArg container.user}"
+      ++ map (v: "-v ${escapeShellArg v}") container.volumes
+      ++ (mapAttrsToList (k: v: "-l ${escapeShellArg k}=${escapeShellArg v}") container.labels)
+      ++ optional (container.workdir != null) "-w ${escapeShellArg container.workdir}"
+      ++ map escapeShellArg container.extraOptions
+      ++ [container.image]
+      ++ map escapeShellArg container.cmd
+    );
+
+    preStop = if cfg.backend == "podman"
+      then "[ $SERVICE_RESULT = success ] || podman stop --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
+      else "[ $SERVICE_RESULT = success ] || ${cfg.backend} stop ${name}";
+    postStop =  if cfg.backend == "podman"
+      then "podman rm -f --ignore --cidfile=/run/podman-${escapedName}.ctr-id"
+      else "${cfg.backend} rm -f ${name} || true";
+
+    serviceConfig = {
+      ### There is no generalized way of supporting `reload` for docker
+      ### containers. Some containers may respond well to SIGHUP sent to their
+      ### init process, but it is not guaranteed; some apps have other reload
+      ### mechanisms, some don't have a reload signal at all, and some docker
+      ### images just have broken signal handling.  The best compromise in this
+      ### case is probably to leave ExecReload undefined, so `systemctl reload`
+      ### will at least result in an error instead of potentially undefined
+      ### behaviour.
+      ###
+      ### Advanced users can still override this part of the unit to implement
+      ### a custom reload handler, since the result of all this is a normal
+      ### systemd service from the perspective of the NixOS module system.
+      ###
+      # ExecReload = ...;
+      ###
+      ExecStartPre = [ "${preStartScript}/bin/pre-start" ];
+      TimeoutStartSec = 0;
+      TimeoutStopSec = 120;
+      Restart = "always";
+    } // optionalAttrs (cfg.backend == "podman") {
+      Environment="PODMAN_SYSTEMD_UNIT=podman-${name}.service";
+      Type="notify";
+      NotifyAccess="all";
+    };
+  };
+
+in {
+  imports = [
+    (
+      lib.mkChangedOptionModule
+      [ "docker-containers"  ]
+      [ "virtualisation" "oci-containers" ]
+      (oldcfg: {
+        backend = "docker";
+        containers = lib.mapAttrs (n: v: builtins.removeAttrs (v // {
+          extraOptions = v.extraDockerOptions or [];
+        }) [ "extraDockerOptions" ]) oldcfg.docker-containers;
+      })
+    )
+  ];
+
+  options.virtualisation.oci-containers = {
+
+    backend = mkOption {
+      type = types.enum [ "podman" "docker" ];
+      default = if versionAtLeast config.system.stateVersion "22.05" then "podman" else "docker";
+      description = lib.mdDoc "The underlying Docker implementation to use.";
+    };
+
+    containers = mkOption {
+      default = {};
+      type = types.attrsOf (types.submodule containerOptions);
+      description = lib.mdDoc "OCI (Docker) containers to run as systemd services.";
+    };
+
+  };
+
+  config = lib.mkIf (cfg.containers != {}) (lib.mkMerge [
+    {
+      systemd.services = mapAttrs' (n: v: nameValuePair "${cfg.backend}-${n}" (mkService n v)) cfg.containers;
+    }
+    (lib.mkIf (cfg.backend == "podman") {
+      virtualisation.podman.enable = true;
+    })
+    (lib.mkIf (cfg.backend == "docker") {
+      virtualisation.docker.enable = true;
+    })
+  ]);
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-image.nix b/nixpkgs/nixos/modules/virtualisation/oci-image.nix
new file mode 100644
index 000000000000..d4af5016dd71
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-image.nix
@@ -0,0 +1,50 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.oci;
+in
+{
+  imports = [ ./oci-common.nix ];
+
+  config = {
+    system.build.OCIImage = import ../../lib/make-disk-image.nix {
+      inherit config lib pkgs;
+      name = "oci-image";
+      configFile = ./oci-config-user.nix;
+      format = "qcow2";
+      diskSize = 8192;
+      partitionTableType = if cfg.efi then "efi" else "legacy";
+    };
+
+    systemd.services.fetch-ssh-keys = {
+      description = "Fetch authorized_keys for root user";
+
+      wantedBy = [ "sshd.service" ];
+      before = [ "sshd.service" ];
+
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+
+      path  = [ pkgs.coreutils pkgs.curl ];
+      script = ''
+        mkdir -m 0700 -p /root/.ssh
+        if [ -f /root/.ssh/authorized_keys ]; then
+          echo "Authorized keys have already been downloaded"
+        else
+          echo "Downloading authorized keys from Instance Metadata Service v2"
+          curl -s -S -L \
+            -H "Authorization: Bearer Oracle" \
+            -o /root/.ssh/authorized_keys \
+            http://169.254.169.254/opc/v2/instance/metadata/ssh_authorized_keys
+          chmod 600 /root/.ssh/authorized_keys
+        fi
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        StandardError = "journal+console";
+        StandardOutput = "journal+console";
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/oci-options.nix b/nixpkgs/nixos/modules/virtualisation/oci-options.nix
new file mode 100644
index 000000000000..0dfedc6a530c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/oci-options.nix
@@ -0,0 +1,14 @@
+{ config, lib, pkgs, ... }:
+{
+  options = {
+    oci = {
+      efi = lib.mkOption {
+        default = true;
+        internal = true;
+        description = ''
+          Whether the OCI instance is using EFI.
+        '';
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openstack-config.nix b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
new file mode 100644
index 000000000000..0ef7a3b50106
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openstack-config.nix
@@ -0,0 +1,90 @@
+{ config, pkgs, lib, ... }:
+
+# image metadata:
+# hw_firmware_type=uefi
+
+let
+  inherit (lib) mkIf mkDefault;
+  cfg = config.openstack;
+  metadataFetcher = import ./openstack-metadata-fetcher.nix {
+    targetRoot = "/";
+    wgetExtraOptions = "--retry-connrefused";
+  };
+in
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+
+    # Note: While we do use the headless profile, we also explicitly
+    # turn on the serial console on tty1 below.
+    # Note that I could not find any documentation indicating tty1 was
+    # the correct choice. I picked tty1 because that is what one
+    # particular host was using.
+    ../profiles/headless.nix
+
+    # The Openstack Metadata service exposes data on an EC2 API also.
+    ./ec2-data.nix
+    ./amazon-init.nix
+  ];
+
+  config = {
+    fileSystems."/" = mkIf (!cfg.zfs.enable) {
+      device = "/dev/disk/by-label/nixos";
+      fsType = "ext4";
+      autoResize = true;
+    };
+
+    fileSystems."/boot" = mkIf (cfg.efi || cfg.zfs.enable) {
+      # The ZFS image uses a partition labeled ESP whether or not we're
+      # booting with EFI.
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+    boot.kernelParams = [ "console=tty1" ];
+    boot.loader.grub.device = if (!cfg.efi) then "/dev/vda" else "nodev";
+    boot.loader.grub.efiSupport = cfg.efi;
+    boot.loader.grub.efiInstallAsRemovable = cfg.efi;
+    boot.loader.timeout = 1;
+    boot.loader.grub.extraConfig = ''
+      serial --unit=1 --speed=115200 --word=8 --parity=no --stop=1
+      terminal_output console serial
+      terminal_input console serial
+    '';
+
+    services.zfs.expandOnBoot = mkIf cfg.zfs.enable (lib.mkDefault "all");
+    boot.zfs.devNodes = mkIf cfg.zfs.enable "/dev/";
+
+    # Allow root logins
+    services.openssh = {
+      enable = true;
+      settings.PermitRootLogin = "prohibit-password";
+      settings.PasswordAuthentication = mkDefault false;
+    };
+
+    users.users.root.initialPassword = "foobar";
+
+    # Enable the serial console on tty1
+    systemd.services."serial-getty@tty1".enable = true;
+
+    # Force getting the hostname from Openstack metadata.
+    networking.hostName = mkDefault "";
+
+    systemd.services.openstack-init = {
+      path = [ pkgs.wget ];
+      description = "Fetch Metadata on startup";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "apply-ec2-data.service" "amazon-init.service" ];
+      wants = [ "network-online.target" ];
+      after = [ "network-online.target" ];
+      script = metadataFetcher;
+      restartIfChanged = false;
+      unitConfig.X-StopOnRemoval = false;
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openstack-metadata-fetcher.nix b/nixpkgs/nixos/modules/virtualisation/openstack-metadata-fetcher.nix
new file mode 100644
index 000000000000..d62428b47a44
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openstack-metadata-fetcher.nix
@@ -0,0 +1,22 @@
+{ targetRoot, wgetExtraOptions }:
+
+# OpenStack's metadata service aims to be EC2-compatible. Where
+# possible, try to keep the set of fetched metadata in sync with
+# ./ec2-metadata-fetcher.nix .
+''
+  metaDir=${targetRoot}etc/ec2-metadata
+  mkdir -m 0755 -p "$metaDir"
+  rm -f "$metaDir/*"
+
+  echo "getting instance metadata..."
+
+  wget_imds() {
+    wget ${wgetExtraOptions} "$@"
+  }
+
+  wget_imds -O "$metaDir/ami-manifest-path" http://169.254.169.254/1.0/meta-data/ami-manifest-path || true
+  # When no user-data is provided, the OpenStack metadata server doesn't expose the user-data route.
+  (umask 077 && wget_imds -O "$metaDir/user-data" http://169.254.169.254/1.0/user-data || rm -f "$metaDir/user-data")
+  wget_imds -O "$metaDir/hostname" http://169.254.169.254/1.0/meta-data/hostname || true
+  wget_imds -O "$metaDir/public-keys-0-openssh-key" http://169.254.169.254/1.0/meta-data/public-keys/0/openssh-key || true
+''
diff --git a/nixpkgs/nixos/modules/virtualisation/openstack-options.nix b/nixpkgs/nixos/modules/virtualisation/openstack-options.nix
new file mode 100644
index 000000000000..52f45de92ecb
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openstack-options.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+let
+  inherit (lib) literalExpression types;
+in
+{
+  options = {
+    openstack = {
+      zfs = {
+        enable = lib.mkOption {
+          default = false;
+          internal = true;
+          description = lib.mdDoc ''
+            Whether the OpenStack instance uses a ZFS root.
+          '';
+        };
+
+        datasets = lib.mkOption {
+          description = lib.mdDoc ''
+            Datasets to create under the `tank` and `boot` zpools.
+
+            **NOTE:** This option is used only at image creation time, and
+            does not attempt to declaratively create or manage datasets
+            on an existing system.
+          '';
+
+          default = { };
+
+          type = types.attrsOf (types.submodule {
+            options = {
+              mount = lib.mkOption {
+                description = lib.mdDoc "Where to mount this dataset.";
+                type = types.nullOr types.str;
+                default = null;
+              };
+
+              properties = lib.mkOption {
+                description = lib.mdDoc "Properties to set on this dataset.";
+                type = types.attrsOf types.str;
+                default = { };
+              };
+            };
+          });
+        };
+      };
+
+      efi = lib.mkOption {
+        default = pkgs.stdenv.hostPlatform.isAarch64;
+        defaultText = literalExpression "pkgs.stdenv.hostPlatform.isAarch64";
+        internal = true;
+        description = lib.mdDoc ''
+          Whether the instance is using EFI.
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf config.openstack.zfs.enable {
+    networking.hostId = lib.mkDefault "00000000";
+
+    fileSystems =
+      let
+        mountable = lib.filterAttrs (_: value: ((value.mount or null) != null)) config.openstack.zfs.datasets;
+      in
+      lib.mapAttrs'
+        (dataset: opts: lib.nameValuePair opts.mount {
+          device = dataset;
+          fsType = "zfs";
+        })
+        mountable;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/openvswitch.nix b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
new file mode 100644
index 000000000000..32646f60f8e0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/openvswitch.nix
@@ -0,0 +1,145 @@
+# Systemd services for openvswitch
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vswitch;
+
+in {
+
+  options.virtualisation.vswitch = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable Open vSwitch. A configuration daemon (ovs-server)
+        will be started.
+        '';
+    };
+
+    resetOnStart = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to reset the Open vSwitch configuration database to a default
+        configuration on every start of the systemd `ovsdb.service`.
+        '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.openvswitch;
+      defaultText = literalExpression "pkgs.openvswitch";
+      description = lib.mdDoc ''
+        Open vSwitch package to use.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (let
+
+    # Where the communication sockets live
+    runDir = "/run/openvswitch";
+
+    # The path to the an initialized version of the database
+    db = pkgs.stdenv.mkDerivation {
+      name = "vswitch.db";
+      dontUnpack = true;
+      buildPhase = "true";
+      buildInputs = with pkgs; [
+        cfg.package
+      ];
+      installPhase = "mkdir -p $out";
+    };
+
+  in {
+    environment.systemPackages = [ cfg.package ];
+    boot.kernelModules = [ "tun" "openvswitch" ];
+
+    boot.extraModulePackages = [ cfg.package ];
+
+    systemd.services.ovsdb = {
+      description = "Open_vSwitch Database Server";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "systemd-udev-settle.service" ];
+      path = [ cfg.package ];
+      restartTriggers = [ db cfg.package ];
+      # Create the config database
+      preStart =
+        ''
+        mkdir -p ${runDir}
+        mkdir -p /var/db/openvswitch
+        chmod +w /var/db/openvswitch
+        ${optionalString cfg.resetOnStart "rm -f /var/db/openvswitch/conf.db"}
+        if [[ ! -e /var/db/openvswitch/conf.db ]]; then
+          ${cfg.package}/bin/ovsdb-tool create \
+            "/var/db/openvswitch/conf.db" \
+            "${cfg.package}/share/openvswitch/vswitch.ovsschema"
+        fi
+        chmod -R +w /var/db/openvswitch
+        if ${cfg.package}/bin/ovsdb-tool needs-conversion /var/db/openvswitch/conf.db | grep -q "yes"
+        then
+          echo "Performing database upgrade"
+          ${cfg.package}/bin/ovsdb-tool convert /var/db/openvswitch/conf.db
+        else
+          echo "Database already up to date"
+        fi
+        '';
+      serviceConfig = {
+        ExecStart =
+          ''
+          ${cfg.package}/bin/ovsdb-server \
+            --remote=punix:${runDir}/db.sock \
+            --private-key=db:Open_vSwitch,SSL,private_key \
+            --certificate=db:Open_vSwitch,SSL,certificate \
+            --bootstrap-ca-cert=db:Open_vSwitch,SSL,ca_cert \
+            --unixctl=ovsdb.ctl.sock \
+            --pidfile=/run/openvswitch/ovsdb.pid \
+            --detach \
+            /var/db/openvswitch/conf.db
+          '';
+        Restart = "always";
+        RestartSec = 3;
+        PIDFile = "/run/openvswitch/ovsdb.pid";
+        # Use service type 'forking' to correctly determine when ovsdb-server is ready.
+        Type = "forking";
+      };
+      postStart = ''
+        ${cfg.package}/bin/ovs-vsctl --timeout 3 --retry --no-wait init
+      '';
+    };
+
+    systemd.services.ovs-vswitchd = {
+      description = "Open_vSwitch Daemon";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "ovsdb.service" ];
+      after = [ "ovsdb.service" ];
+      path = [ cfg.package ];
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/ovs-vswitchd \
+          --pidfile=/run/openvswitch/ovs-vswitchd.pid \
+          --detach
+        '';
+        PIDFile = "/run/openvswitch/ovs-vswitchd.pid";
+        # Use service type 'forking' to correctly determine when vswitchd is ready.
+        Type = "forking";
+        Restart = "always";
+        RestartSec = 3;
+      };
+    };
+
+  });
+
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "vswitch" "ipsec" ] ''
+      OpenVSwitch IPSec functionality has been removed, because it depended on racoon,
+      which was removed from nixpkgs, because it was abanoded upstream.
+    '')
+  ];
+
+  meta.maintainers = with maintainers; [ netixx ];
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
new file mode 100644
index 000000000000..dba8ce02b724
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/parallels-guest.nix
@@ -0,0 +1,145 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  prl-tools = config.hardware.parallels.package;
+in
+
+{
+
+  options = {
+    hardware.parallels = {
+
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This enables Parallels Tools for Linux guests, along with provided
+          video, mouse and other hardware drivers.
+        '';
+      };
+
+      autoMountShares = mkOption {
+        type = types.bool;
+        default = true;
+        description = lib.mdDoc ''
+          Control prlfsmountd service. When this service is running, shares can not be manually
+          mounted through `mount -t prl_fs ...` as this service will remount and trample any set options.
+          Recommended to enable for simple file sharing, but extended share use such as for code should
+          disable this to manually mount shares.
+        '';
+      };
+
+      package = mkOption {
+        type = types.nullOr types.package;
+        default = config.boot.kernelPackages.prl-tools;
+        defaultText = "config.boot.kernelPackages.prl-tools";
+        example = literalExpression "config.boot.kernelPackages.prl-tools";
+        description = lib.mdDoc ''
+          Defines which package to use for prl-tools. Override to change the version.
+        '';
+      };
+    };
+
+  };
+
+  config = mkIf config.hardware.parallels.enable {
+
+    services.udev.packages = [ prl-tools ];
+
+    environment.systemPackages = [ prl-tools ];
+
+    boot.extraModulePackages = [ prl-tools ];
+
+    boot.kernelModules = [ "prl_fs" "prl_fs_freeze" "prl_tg" ]
+      ++ optional (pkgs.stdenv.hostPlatform.system == "aarch64-linux") "prl_notifier";
+
+    services.timesyncd.enable = false;
+
+    systemd.services.prltoolsd = {
+      description = "Parallels Tools Service";
+      wantedBy = [ "multi-user.target" ];
+      path = [ prl-tools ];
+      serviceConfig = {
+        ExecStart = "${prl-tools}/bin/prltoolsd -f";
+        PIDFile = "/var/run/prltoolsd.pid";
+        WorkingDirectory = "${prl-tools}/bin";
+      };
+    };
+
+    systemd.services.prlfsmountd = mkIf config.hardware.parallels.autoMountShares {
+      description = "Parallels Guest File System Sharing Tool";
+      wantedBy = [ "multi-user.target" ];
+      path = [ prl-tools ];
+      serviceConfig = rec {
+        ExecStart = "${prl-tools}/sbin/prlfsmountd ${PIDFile}";
+        ExecStartPre = "${pkgs.coreutils}/bin/mkdir -p /media";
+        ExecStopPost = "${prl-tools}/sbin/prlfsmountd -u";
+        PIDFile = "/run/prlfsmountd.pid";
+        WorkingDirectory = "${prl-tools}/bin";
+      };
+    };
+
+    systemd.services.prlshprint = {
+      description = "Parallels Printing Tool";
+      wantedBy = [ "multi-user.target" ];
+      bindsTo = [ "cups.service" ];
+      path = [ prl-tools ];
+      serviceConfig = {
+        ExecStart = "${prl-tools}/bin/prlshprint";
+        WorkingDirectory = "${prl-tools}/bin";
+      };
+    };
+
+    systemd.user.services = {
+      prlcc = {
+        description = "Parallels Control Center";
+        wantedBy = [ "graphical-session.target" ];
+        path = [ prl-tools ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcc";
+          WorkingDirectory = "${prl-tools}/bin";
+        };
+      };
+      prldnd = {
+        description = "Parallels Drag And Drop Tool";
+        wantedBy = [ "graphical-session.target" ];
+        path = [ prl-tools ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prldnd";
+          WorkingDirectory = "${prl-tools}/bin";
+        };
+      };
+      prlcp = {
+        description = "Parallels Copy Paste Tool";
+        wantedBy = [ "graphical-session.target" ];
+        path = [ prl-tools ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlcp";
+          Restart = "always";
+          WorkingDirectory = "${prl-tools}/bin";
+        };
+      };
+      prlsga = {
+        description = "Parallels Shared Guest Applications Tool";
+        wantedBy = [ "graphical-session.target" ];
+        path = [ prl-tools ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlsga";
+          WorkingDirectory = "${prl-tools}/bin";
+        };
+      };
+      prlshprof = {
+        description = "Parallels Shared Profile Tool";
+        wantedBy = [ "graphical-session.target" ];
+        path = [ prl-tools ];
+        serviceConfig = {
+          ExecStart = "${prl-tools}/bin/prlshprof";
+          WorkingDirectory = "${prl-tools}/bin";
+        };
+      };
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/podman/default.nix b/nixpkgs/nixos/modules/virtualisation/podman/default.nix
new file mode 100644
index 000000000000..ec0b713e58b3
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/podman/default.nix
@@ -0,0 +1,245 @@
+{ config, lib, pkgs, ... }:
+let
+  cfg = config.virtualisation.podman;
+  json = pkgs.formats.json { };
+
+  inherit (lib) mkOption types;
+
+  podmanPackage = (pkgs.podman.override {
+    extraPackages = cfg.extraPackages
+      # setuid shadow
+      ++ [ "/run/wrappers" ]
+      ++ lib.optional (builtins.elem "zfs" config.boot.supportedFilesystems) config.boot.zfs.package;
+  });
+
+  # Provides a fake "docker" binary mapping to podman
+  dockerCompat = pkgs.runCommand "${podmanPackage.pname}-docker-compat-${podmanPackage.version}"
+    {
+      outputs = [ "out" "man" ];
+      inherit (podmanPackage) meta;
+    } ''
+    mkdir -p $out/bin
+    ln -s ${podmanPackage}/bin/podman $out/bin/docker
+
+    mkdir -p $man/share/man/man1
+    for f in ${podmanPackage.man}/share/man/man1/*; do
+      basename=$(basename $f | sed s/podman/docker/g)
+      ln -s $f $man/share/man/man1/$basename
+    done
+  '';
+
+in
+{
+  imports = [
+    (lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "dnsname" ]
+      "Use virtualisation.podman.defaultNetwork.settings.dns_enabled instead.")
+    (lib.mkRemovedOptionModule [ "virtualisation" "podman" "defaultNetwork" "extraPlugins" ]
+      "Netavark isn't compatible with CNI plugins.")
+    ./network-socket.nix
+  ];
+
+  meta = {
+    maintainers = lib.teams.podman.members;
+  };
+
+  options.virtualisation.podman = {
+
+    enable =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          This option enables Podman, a daemonless container engine for
+          developing, managing, and running OCI Containers on your Linux System.
+
+          It is a drop-in replacement for the {command}`docker` command.
+        '';
+      };
+
+    dockerSocket.enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Make the Podman socket available in place of the Docker socket, so
+        Docker tools can find the Podman socket.
+
+        Podman implements the Docker API.
+
+        Users must be in the `podman` group in order to connect. As
+        with Docker, members of this group can gain root access.
+      '';
+    };
+
+    dockerCompat = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Create an alias mapping {command}`docker` to {command}`podman`.
+      '';
+    };
+
+    enableNvidia = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Enable use of NVidia GPUs from within podman containers.
+      '';
+    };
+
+    extraPackages = mkOption {
+      type = with types; listOf package;
+      default = [ ];
+      example = lib.literalExpression ''
+        [
+          pkgs.gvisor
+        ]
+      '';
+      description = lib.mdDoc ''
+        Extra packages to be installed in the Podman wrapper.
+      '';
+    };
+
+    autoPrune = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Whether to periodically prune Podman resources. If enabled, a
+          systemd timer will run `podman system prune -f`
+          as specified by the `dates` option.
+        '';
+      };
+
+      flags = mkOption {
+        type = types.listOf types.str;
+        default = [];
+        example = [ "--all" ];
+        description = lib.mdDoc ''
+          Any additional flags passed to {command}`podman system prune`.
+        '';
+      };
+
+      dates = mkOption {
+        default = "weekly";
+        type = types.str;
+        description = lib.mdDoc ''
+          Specification (in the format described by
+          {manpage}`systemd.time(7)`) of the time at
+          which the prune will occur.
+        '';
+      };
+    };
+
+    package = lib.mkOption {
+      type = types.package;
+      default = podmanPackage;
+      internal = true;
+      description = lib.mdDoc ''
+        The final Podman package (including extra packages).
+      '';
+    };
+
+    defaultNetwork.settings = lib.mkOption {
+      type = json.type;
+      default = { };
+      example = lib.literalExpression "{ dns_enabled = true; }";
+      description = lib.mdDoc ''
+        Settings for podman's default network.
+      '';
+    };
+
+  };
+
+  config = lib.mkIf cfg.enable
+    {
+      environment.systemPackages = [ cfg.package ]
+        ++ lib.optional cfg.dockerCompat dockerCompat;
+
+      # https://github.com/containers/podman/blob/097cc6eb6dd8e598c0e8676d21267b4edb11e144/docs/tutorials/basic_networking.md#default-network
+      environment.etc."containers/networks/podman.json" = lib.mkIf (cfg.defaultNetwork.settings != { }) {
+        source = json.generate "podman.json" ({
+          dns_enabled = false;
+          driver = "bridge";
+          id = "0000000000000000000000000000000000000000000000000000000000000000";
+          internal = false;
+          ipam_options = { driver = "host-local"; };
+          ipv6_enabled = false;
+          name = "podman";
+          network_interface = "podman0";
+          subnets = [{ gateway = "10.88.0.1"; subnet = "10.88.0.0/16"; }];
+        } // cfg.defaultNetwork.settings);
+      };
+
+      virtualisation.containers = {
+        enable = true; # Enable common /etc/containers configuration
+        containersConf.settings = {
+          network.network_backend = "netavark";
+        } // lib.optionalAttrs cfg.enableNvidia {
+          engine = {
+            conmon_env_vars = [ "PATH=${lib.makeBinPath [ pkgs.nvidia-podman ]}" ];
+            runtimes.nvidia = [ "${pkgs.nvidia-podman}/bin/nvidia-container-runtime" ];
+          };
+        };
+      };
+
+      systemd.packages = [ cfg.package ];
+
+      systemd.services.podman-prune = {
+        description = "Prune podman resources";
+
+        restartIfChanged = false;
+        unitConfig.X-StopOnRemoval = false;
+
+        serviceConfig.Type = "oneshot";
+
+        script = ''
+          ${cfg.package}/bin/podman system prune -f ${toString cfg.autoPrune.flags}
+        '';
+
+        startAt = lib.optional cfg.autoPrune.enable cfg.autoPrune.dates;
+        after = [ "podman.service" ];
+        requires = [ "podman.service" ];
+      };
+
+      systemd.sockets.podman.wantedBy = [ "sockets.target" ];
+      systemd.sockets.podman.socketConfig.SocketGroup = "podman";
+
+      systemd.user.sockets.podman.wantedBy = [ "sockets.target" ];
+
+      systemd.timers.podman-prune.timerConfig = lib.mkIf cfg.autoPrune.enable {
+        Persistent = true;
+        RandomizedDelaySec = 1800;
+      };
+
+      systemd.tmpfiles.packages = [
+        # The /run/podman rule interferes with our podman group, so we remove
+        # it and let the systemd socket logic take care of it.
+        (pkgs.runCommand "podman-tmpfiles-nixos" { package = cfg.package; } ''
+          mkdir -p $out/lib/tmpfiles.d/
+          grep -v 'D! /run/podman 0700 root root' \
+            <$package/lib/tmpfiles.d/podman.conf \
+            >$out/lib/tmpfiles.d/podman.conf
+        '')
+      ];
+
+      systemd.tmpfiles.rules =
+        lib.optionals cfg.dockerSocket.enable [
+          "L! /run/docker.sock - - - - /run/podman/podman.sock"
+        ];
+
+      users.groups.podman = { };
+
+      assertions = [
+        {
+          assertion = cfg.dockerCompat -> !config.virtualisation.docker.enable;
+          message = "Option dockerCompat conflicts with docker";
+        }
+        {
+          assertion = cfg.dockerSocket.enable -> !config.virtualisation.docker.enable;
+          message = ''
+            The options virtualisation.podman.dockerSocket.enable and virtualisation.docker.enable conflict, because only one can serve the socket.
+          '';
+        }
+      ];
+    };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix b/nixpkgs/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix
new file mode 100644
index 000000000000..ade4926c94cd
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/podman/network-socket-ghostunnel.nix
@@ -0,0 +1,34 @@
+{ config, lib, pkg, ... }:
+let
+  inherit (lib)
+    mkOption
+    types
+    ;
+
+  cfg = config.virtualisation.podman.networkSocket;
+
+in
+{
+  options.virtualisation.podman.networkSocket = {
+    server = mkOption {
+      type = types.enum [ "ghostunnel" ];
+    };
+  };
+
+  config = lib.mkIf (cfg.enable && cfg.server == "ghostunnel") {
+
+    services.ghostunnel = {
+      enable = true;
+      servers."podman-socket" = {
+        inherit (cfg.tls) cert key cacert;
+        listen = "${cfg.listenAddress}:${toString cfg.port}";
+        target = "unix:/run/podman/podman.sock";
+        allowAll = lib.mkDefault true;
+      };
+    };
+    systemd.services.ghostunnel-server-podman-socket.serviceConfig.SupplementaryGroups = [ "podman" ];
+
+  };
+
+  meta.maintainers = lib.teams.podman.members ++ [ lib.maintainers.roberth ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/podman/network-socket.nix b/nixpkgs/nixos/modules/virtualisation/podman/network-socket.nix
new file mode 100644
index 000000000000..a10597175ab9
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/podman/network-socket.nix
@@ -0,0 +1,95 @@
+{ config, lib, pkg, ... }:
+let
+  inherit (lib)
+    mkOption
+    types
+    ;
+
+  cfg = config.virtualisation.podman.networkSocket;
+
+in
+{
+  imports = [
+    ./network-socket-ghostunnel.nix
+  ];
+
+  options.virtualisation.podman.networkSocket = {
+    enable = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Make the Podman and Docker compatibility API available over the network
+        with TLS client certificate authentication.
+
+        This allows Docker clients to connect with the equivalents of the Docker
+        CLI `-H` and `--tls*` family of options.
+
+        For certificate setup, see https://docs.docker.com/engine/security/protect-access/
+
+        This option is independent of [](#opt-virtualisation.podman.dockerSocket.enable).
+      '';
+    };
+
+    server = mkOption {
+      type = types.enum [ ];
+      description = lib.mdDoc ''
+        Choice of TLS proxy server.
+      '';
+      example = "ghostunnel";
+    };
+
+    openFirewall = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to open the port in the firewall.
+      '';
+    };
+
+    tls.cacert = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to CA certificate to use for client authentication.
+      '';
+    };
+
+    tls.cert = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to certificate describing the server.
+      '';
+    };
+
+    tls.key = mkOption {
+      type = types.path;
+      description = lib.mdDoc ''
+        Path to the private key corresponding to the server certificate.
+
+        Use a string for this setting. Otherwise it will be copied to the Nix
+        store first, where it is readable by any system process.
+      '';
+    };
+
+    port = mkOption {
+      type = types.port;
+      default = 2376;
+      description = lib.mdDoc ''
+        TCP port number for receiving TLS connections.
+      '';
+    };
+    listenAddress = mkOption {
+      type = types.str;
+      default = "0.0.0.0";
+      description = lib.mdDoc ''
+        Interface address for receiving TLS connections.
+      '';
+    };
+  };
+
+  config = {
+    networking.firewall.allowedTCPPorts =
+      lib.optional (cfg.enable && cfg.openFirewall) cfg.port;
+  };
+
+  meta.maintainers = lib.teams.podman.members ++ [ lib.maintainers.roberth ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/proxmox-image.nix b/nixpkgs/nixos/modules/virtualisation/proxmox-image.nix
new file mode 100644
index 000000000000..62778f2626f8
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/proxmox-image.nix
@@ -0,0 +1,303 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options.proxmox = {
+    qemuConf = {
+      # essential configs
+      boot = mkOption {
+        type = types.str;
+        default = "";
+        example = "order=scsi0;net0";
+        description = lib.mdDoc ''
+          Default boot device. PVE will try all devices in its default order if this value is empty.
+        '';
+      };
+      scsihw = mkOption {
+        type = types.str;
+        default = "virtio-scsi-pci";
+        example = "lsi";
+        description = lib.mdDoc ''
+          SCSI controller type. Must be one of the supported values given in
+          <https://pve.proxmox.com/wiki/Qemu/KVM_Virtual_Machines>
+        '';
+      };
+      virtio0 = mkOption {
+        type = types.str;
+        default = "local-lvm:vm-9999-disk-0";
+        example = "ceph:vm-123-disk-0";
+        description = lib.mdDoc ''
+          Configuration for the default virtio disk. It can be used as a cue for PVE to autodetect the target storage.
+          This parameter is required by PVE even if it isn't used.
+        '';
+      };
+      ostype = mkOption {
+        type = types.str;
+        default = "l26";
+        description = lib.mdDoc ''
+          Guest OS type
+        '';
+      };
+      cores = mkOption {
+        type = types.ints.positive;
+        default = 1;
+        description = lib.mdDoc ''
+          Guest core count
+        '';
+      };
+      memory = mkOption {
+        type = types.ints.positive;
+        default = 1024;
+        description = lib.mdDoc ''
+          Guest memory in MB
+        '';
+      };
+      bios = mkOption {
+        type = types.enum [ "seabios" "ovmf" ];
+        default = "seabios";
+        description = ''
+          Select BIOS implementation (seabios = Legacy BIOS, ovmf = UEFI).
+        '';
+      };
+
+      # optional configs
+      name = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}";
+        description = lib.mdDoc ''
+          VM name
+        '';
+      };
+      additionalSpace = mkOption {
+        type = types.str;
+        default = "512M";
+        example = "2048M";
+        description = lib.mdDoc ''
+          additional disk space to be added to the image if diskSize "auto"
+          is used.
+        '';
+      };
+      bootSize = mkOption {
+        type = types.str;
+        default = "256M";
+        example = "512M";
+        description = lib.mdDoc ''
+          Size of the boot partition. Is only used if partitionTableType is
+          either "efi" or "hybrid".
+        '';
+      };
+      diskSize = mkOption {
+        type = types.str;
+        default = "auto";
+        example = "20480";
+        description = lib.mdDoc ''
+          The size of the disk, in megabytes.
+          if "auto" size is calculated based on the contents copied to it and
+          additionalSpace is taken into account.
+        '';
+      };
+      net0 = mkOption {
+        type = types.commas;
+        default = "virtio=00:00:00:00:00:00,bridge=vmbr0,firewall=1";
+        description = lib.mdDoc ''
+          Configuration for the default interface. When restoring from VMA, check the
+          "unique" box to ensure device mac is randomized.
+        '';
+      };
+      serial0 = mkOption {
+        type = types.str;
+        default = "socket";
+        example = "/dev/ttyS0";
+        description = lib.mdDoc ''
+          Create a serial device inside the VM (n is 0 to 3), and pass through a host serial device (i.e. /dev/ttyS0),
+          or create a unix socket on the host side (use qm terminal to open a terminal connection).
+        '';
+      };
+      agent = mkOption {
+        type = types.bool;
+        apply = x: if x then "1" else "0";
+        default = true;
+        description = lib.mdDoc ''
+          Expect guest to have qemu agent running
+        '';
+      };
+    };
+    qemuExtraConf = mkOption {
+      type = with types; attrsOf (oneOf [ str int ]);
+      default = {};
+      example = literalExpression ''
+        {
+          cpu = "host";
+          onboot = 1;
+        }
+      '';
+      description = lib.mdDoc ''
+        Additional options appended to qemu-server.conf
+      '';
+    };
+    partitionTableType = mkOption {
+      type = types.enum [ "efi" "hybrid" "legacy" "legacy+gpt" ];
+      description = ''
+        Partition table type to use. See make-disk-image.nix partitionTableType for details.
+        Defaults to 'legacy' for 'proxmox.qemuConf.bios="seabios"' (default), other bios values defaults to 'efi'.
+        Use 'hybrid' to build grub-based hybrid bios+efi images.
+      '';
+      default = if config.proxmox.qemuConf.bios == "seabios" then "legacy" else "efi";
+      defaultText = lib.literalExpression ''if config.proxmox.qemuConf.bios == "seabios" then "legacy" else "efi"'';
+      example = "hybrid";
+    };
+    filenameSuffix = mkOption {
+      type = types.str;
+      default = config.proxmox.qemuConf.name;
+      example = "999-nixos_template";
+      description = lib.mdDoc ''
+        Filename of the image will be vzdump-qemu-''${filenameSuffix}.vma.zstd.
+        This will also determine the default name of the VM on restoring the VMA.
+        Start this value with a number if you want the VMA to be detected as a backup of
+        any specific VMID.
+      '';
+    };
+  };
+
+  config = let
+    cfg = config.proxmox;
+    cfgLine = name: value: ''
+      ${name}: ${builtins.toString value}
+    '';
+    virtio0Storage = builtins.head (builtins.split ":" cfg.qemuConf.virtio0);
+    cfgFile = fileName: properties: pkgs.writeTextDir fileName ''
+      # generated by NixOS
+      ${lib.concatStrings (lib.mapAttrsToList cfgLine properties)}
+      #qmdump#map:virtio0:drive-virtio0:${virtio0Storage}:raw:
+    '';
+    inherit (cfg) partitionTableType;
+    supportEfi = partitionTableType == "efi" || partitionTableType == "hybrid";
+    supportBios = partitionTableType == "legacy" || partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
+    hasBootPartition = partitionTableType == "efi" || partitionTableType == "hybrid";
+    hasNoFsPartition = partitionTableType == "hybrid" || partitionTableType == "legacy+gpt";
+  in {
+    assertions = [
+      {
+        assertion = config.boot.loader.systemd-boot.enable -> config.proxmox.qemuConf.bios == "ovmf";
+        message = "systemd-boot requires 'ovmf' bios";
+      }
+      {
+        assertion = partitionTableType == "efi" -> config.proxmox.qemuConf.bios == "ovmf";
+        message = "'efi' disk partitioning requires 'ovmf' bios";
+      }
+      {
+        assertion = partitionTableType == "legacy" -> config.proxmox.qemuConf.bios == "seabios";
+        message = "'legacy' disk partitioning requires 'seabios' bios";
+      }
+      {
+        assertion = partitionTableType == "legacy+gpt" -> config.proxmox.qemuConf.bios == "seabios";
+        message = "'legacy+gpt' disk partitioning requires 'seabios' bios";
+      }
+    ];
+    system.build.VMA = import ../../lib/make-disk-image.nix {
+      name = "proxmox-${cfg.filenameSuffix}";
+      inherit (cfg) partitionTableType;
+      postVM = let
+        # Build qemu with PVE's patch that adds support for the VMA format
+        vma = (pkgs.qemu_kvm.override {
+          alsaSupport = false;
+          pulseSupport = false;
+          sdlSupport = false;
+          jackSupport = false;
+          gtkSupport = false;
+          vncSupport = false;
+          smartcardSupport = false;
+          spiceSupport = false;
+          ncursesSupport = false;
+          libiscsiSupport = false;
+          tpmSupport = false;
+          numaSupport = false;
+          seccompSupport = false;
+          guestAgentSupport = false;
+        }).overrideAttrs ( super: rec {
+
+          version = "7.2.1";
+          src = pkgs.fetchurl {
+            url= "https://download.qemu.org/qemu-${version}.tar.xz";
+            sha256 = "sha256-jIVpms+dekOl/immTN1WNwsMLRrQdLr3CYqCTReq1zs=";
+          };
+          patches = [
+            # Proxmox' VMA tool is published as a particular patch upon QEMU
+            (pkgs.fetchpatch {
+              url =
+                let
+                  rev = "abb04bb6272c1202ca9face0827917552b9d06f6";
+                  path = "debian/patches/pve/0027-PVE-Backup-add-vma-backup-format-code.patch";
+                in "https://git.proxmox.com/?p=pve-qemu.git;a=blob_plain;hb=${rev};f=${path}";
+              hash = "sha256-3d0HHdvaExCry6zcULnziYnWIAnn24vECkI4sjj2BMg=";
+            })
+
+            # Proxmox' VMA tool uses O_DIRECT which fails on tmpfs
+            # Filed to upstream issue tracker: https://bugzilla.proxmox.com/show_bug.cgi?id=4710
+            (pkgs.writeText "inline.patch" ''
+                --- a/vma-writer.c   2023-05-01 15:11:13.361341177 +0200
+                +++ b/vma-writer.c   2023-05-01 15:10:51.785293129 +0200
+                @@ -306,7 +306,7 @@
+                             /* try to use O_NONBLOCK */
+                             fcntl(vmaw->fd, F_SETFL, fcntl(vmaw->fd, F_GETFL)|O_NONBLOCK);
+                         } else  {
+                -            oflags = O_NONBLOCK|O_DIRECT|O_WRONLY|O_EXCL;
+                +            oflags = O_NONBLOCK|O_WRONLY|O_EXCL;
+                             vmaw->fd = qemu_create(filename, oflags, 0644, errp);
+                         }
+            '')
+          ];
+
+          buildInputs = super.buildInputs ++ [ pkgs.libuuid ];
+          nativeBuildInputs = super.nativeBuildInputs ++ [ pkgs.perl ];
+
+        });
+      in
+      ''
+        ${vma}/bin/vma create "vzdump-qemu-${cfg.filenameSuffix}.vma" \
+          -c ${cfgFile "qemu-server.conf" (cfg.qemuConf // cfg.qemuExtraConf)}/qemu-server.conf drive-virtio0=$diskImage
+        rm $diskImage
+        ${pkgs.zstd}/bin/zstd "vzdump-qemu-${cfg.filenameSuffix}.vma"
+        mv "vzdump-qemu-${cfg.filenameSuffix}.vma.zst" $out/
+
+        mkdir -p $out/nix-support
+        echo "file vma $out/vzdump-qemu-${cfg.filenameSuffix}.vma.zst" >> $out/nix-support/hydra-build-products
+      '';
+      inherit (cfg.qemuConf) additionalSpace diskSize bootSize;
+      format = "raw";
+      inherit config lib pkgs;
+    };
+
+    boot = {
+      growPartition = true;
+      kernelParams = [ "console=ttyS0" ];
+      loader.grub = {
+        device = lib.mkDefault (if (hasNoFsPartition || supportBios) then
+          # Even if there is a separate no-fs partition ("/dev/disk/by-partlabel/no-fs" i.e. "/dev/vda2"),
+          # which will be used the bootloader, do not set it as loader.grub.device.
+          # GRUB installation fails, unless the whole disk is selected.
+          "/dev/vda"
+        else
+          "nodev");
+        efiSupport = lib.mkDefault supportEfi;
+        efiInstallAsRemovable = lib.mkDefault supportEfi;
+      };
+
+      loader.timeout = 0;
+      initrd.availableKernelModules = [ "uas" "virtio_blk" "virtio_pci" ];
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+    fileSystems."/boot" = lib.mkIf hasBootPartition {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    services.qemuGuest.enable = lib.mkDefault true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/proxmox-lxc.nix b/nixpkgs/nixos/modules/virtualisation/proxmox-lxc.nix
new file mode 100644
index 000000000000..3d966d725a9a
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/proxmox-lxc.nix
@@ -0,0 +1,75 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+{
+  options.proxmoxLXC = {
+    privileged = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable privileged mounts
+      '';
+    };
+    manageNetwork = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to manage network interfaces through nix options
+        When false, systemd-networkd is enabled to accept network
+        configuration from proxmox.
+      '';
+    };
+    manageHostName = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to manage hostname through nix options
+        When false, the hostname is picked up from /etc/hostname
+        populated by proxmox.
+      '';
+    };
+  };
+
+  config =
+    let
+      cfg = config.proxmoxLXC;
+    in
+    {
+      system.build.tarball = pkgs.callPackage ../../lib/make-system-tarball.nix {
+        storeContents = [{
+          object = config.system.build.toplevel;
+          symlink = "none";
+        }];
+
+        contents = [{
+          source = config.system.build.toplevel + "/init";
+          target = "/sbin/init";
+        }];
+
+        extraCommands = "mkdir -p root etc/systemd/network";
+      };
+
+      boot = {
+        isContainer = true;
+        loader.initScript.enable = true;
+      };
+
+      networking = mkIf (!cfg.manageNetwork) {
+        useDHCP = false;
+        useHostResolvConf = false;
+        useNetworkd = true;
+        # pick up hostname from /etc/hostname generated by proxmox
+        hostName = mkIf (!cfg.manageHostName) (mkForce "");
+      };
+
+      services.openssh = {
+        enable = mkDefault true;
+        startWhenNeeded = mkDefault true;
+      };
+
+      systemd.mounts = mkIf (!cfg.privileged)
+        [{ where = "/sys/kernel/debug"; enable = false; }];
+
+    };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
new file mode 100644
index 000000000000..650fb2419160
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-guest-agent.nix
@@ -0,0 +1,45 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.services.qemuGuest;
+in {
+
+  options.services.qemuGuest = {
+      enable = mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc "Whether to enable the qemu guest agent.";
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.qemu_kvm.ga;
+        defaultText = literalExpression "pkgs.qemu_kvm.ga";
+        description = lib.mdDoc "The QEMU guest agent package.";
+      };
+  };
+
+  config = mkIf cfg.enable (
+      mkMerge [
+    {
+
+      services.udev.extraRules = ''
+        SUBSYSTEM=="virtio-ports", ATTR{name}=="org.qemu.guest_agent.0", TAG+="systemd" ENV{SYSTEMD_WANTS}="qemu-guest-agent.service"
+      '';
+
+      systemd.services.qemu-guest-agent = {
+        description = "Run the QEMU Guest Agent";
+        serviceConfig = {
+          ExecStart = "${cfg.package}/bin/qemu-ga --statedir /run/qemu-ga";
+          Restart = "always";
+          RestartSec = 0;
+          # Runtime directory and mode
+          RuntimeDirectory = "qemu-ga";
+          RuntimeDirectoryMode = "0755";
+        };
+      };
+    }
+  ]
+  );
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
new file mode 100644
index 000000000000..6f275baf60dc
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/qemu-vm.nix
@@ -0,0 +1,1333 @@
+# This module creates a virtual machine from the NixOS configuration.
+# Building the `config.system.build.vm' attribute gives you a command
+# that starts a KVM/QEMU VM running the NixOS configuration defined in
+# `config'. By default, the Nix store is shared read-only with the
+# host, which makes (re)building VMs very efficient.
+
+{ config, lib, pkgs, options, ... }:
+
+with lib;
+
+let
+
+  qemu-common = import ../../lib/qemu-common.nix { inherit lib pkgs; };
+
+  cfg = config.virtualisation;
+
+  opt = options.virtualisation;
+
+  qemu = cfg.qemu.package;
+
+  hostPkgs = cfg.host.pkgs;
+
+  consoles = lib.concatMapStringsSep " " (c: "console=${c}") cfg.qemu.consoles;
+
+  driveOpts = { ... }: {
+
+    options = {
+
+      file = mkOption {
+        type = types.str;
+        description = lib.mdDoc "The file image used for this drive.";
+      };
+
+      driveExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = lib.mdDoc "Extra options passed to drive flag.";
+      };
+
+      deviceExtraOpts = mkOption {
+        type = types.attrsOf types.str;
+        default = {};
+        description = lib.mdDoc "Extra options passed to device flag.";
+      };
+
+      name = mkOption {
+        type = types.nullOr types.str;
+        default = null;
+        description =
+          lib.mdDoc "A name for the drive. Must be unique in the drives list. Not passed to qemu.";
+      };
+
+    };
+
+  };
+
+  selectPartitionTableLayout = { useEFIBoot, useDefaultFilesystems }:
+  if useDefaultFilesystems then
+    if useEFIBoot then "efi" else "legacy"
+  else "none";
+
+  driveCmdline = idx: { file, driveExtraOpts, deviceExtraOpts, ... }:
+    let
+      drvId = "drive${toString idx}";
+      mkKeyValue = generators.mkKeyValueDefault {} "=";
+      mkOpts = opts: concatStringsSep "," (mapAttrsToList mkKeyValue opts);
+      driveOpts = mkOpts (driveExtraOpts // {
+        index = idx;
+        id = drvId;
+        "if" = "none";
+        inherit file;
+      });
+      deviceOpts = mkOpts (deviceExtraOpts // {
+        drive = drvId;
+      });
+      device =
+        if cfg.qemu.diskInterface == "scsi" then
+          "-device lsi53c895a -device scsi-hd,${deviceOpts}"
+        else
+          "-device virtio-blk-pci,${deviceOpts}";
+    in
+      "-drive ${driveOpts} ${device}";
+
+  drivesCmdLine = drives: concatStringsSep "\\\n    " (imap1 driveCmdline drives);
+
+  # Shell script to start the VM.
+  startVM =
+    ''
+      #! ${hostPkgs.runtimeShell}
+
+      export PATH=${makeBinPath [ hostPkgs.coreutils ]}''${PATH:+:}$PATH
+
+      set -e
+
+      # Create an empty ext4 filesystem image. A filesystem image does not
+      # contain a partition table but just a filesystem.
+      createEmptyFilesystemImage() {
+        local name=$1
+        local size=$2
+        local temp=$(mktemp)
+        ${qemu}/bin/qemu-img create -f raw "$temp" "$size"
+        ${hostPkgs.e2fsprogs}/bin/mkfs.ext4 -L ${rootFilesystemLabel} "$temp"
+        ${qemu}/bin/qemu-img convert -f raw -O qcow2 "$temp" "$name"
+        rm "$temp"
+      }
+
+      NIX_DISK_IMAGE=$(readlink -f "''${NIX_DISK_IMAGE:-${toString config.virtualisation.diskImage}}") || test -z "$NIX_DISK_IMAGE"
+
+      if test -n "$NIX_DISK_IMAGE" && ! test -e "$NIX_DISK_IMAGE"; then
+          echo "Disk image do not exist, creating the virtualisation disk image..."
+
+          ${if (cfg.useBootLoader && cfg.useDefaultFilesystems) then ''
+            # Create a writable qcow2 image using the systemImage as a backing
+            # image.
+
+            # CoW prevent size to be attributed to an image.
+            # FIXME: raise this issue to upstream.
+            ${qemu}/bin/qemu-img create \
+              -f qcow2 \
+              -b ${systemImage}/nixos.qcow2 \
+              -F qcow2 \
+              "$NIX_DISK_IMAGE"
+          '' else if cfg.useDefaultFilesystems then ''
+            createEmptyFilesystemImage "$NIX_DISK_IMAGE" "${toString cfg.diskSize}M"
+          '' else ''
+            # Create an empty disk image without a filesystem.
+            ${qemu}/bin/qemu-img create -f qcow2 "$NIX_DISK_IMAGE" "${toString cfg.diskSize}M"
+          ''
+          }
+          echo "Virtualisation disk image created."
+      fi
+
+      # Create a directory for storing temporary data of the running VM.
+      if [ -z "$TMPDIR" ] || [ -z "$USE_TMPDIR" ]; then
+          TMPDIR=$(mktemp -d nix-vm.XXXXXXXXXX --tmpdir)
+      fi
+
+      ${lib.optionalString (cfg.useNixStoreImage)
+        (if cfg.writableStore
+          then ''
+            # Create a writable copy/snapshot of the store image.
+            ${qemu}/bin/qemu-img create -f qcow2 -F qcow2 -b ${storeImage}/nixos.qcow2 "$TMPDIR"/store.img
+          ''
+          else ''
+            (
+              cd ${builtins.storeDir}
+              ${hostPkgs.erofs-utils}/bin/mkfs.erofs \
+                --force-uid=0 \
+                --force-gid=0 \
+                -L ${nixStoreFilesystemLabel} \
+                -U eb176051-bd15-49b7-9e6b-462e0b467019 \
+                -T 0 \
+                --exclude-regex="$(
+                  <${hostPkgs.closureInfo { rootPaths = [ config.system.build.toplevel regInfo ]; }}/store-paths \
+                    sed -e 's^.*/^^g' \
+                  | cut -c -10 \
+                  | ${hostPkgs.python3}/bin/python ${./includes-to-excludes.py} )" \
+                "$TMPDIR"/store.img \
+                . \
+                </dev/null >/dev/null
+            )
+          ''
+        )
+      }
+
+      # Create a directory for exchanging data with the VM.
+      mkdir -p "$TMPDIR/xchg"
+
+      ${lib.optionalString cfg.useHostCerts
+      ''
+        mkdir -p "$TMPDIR/certs"
+        if [ -e "$NIX_SSL_CERT_FILE" ]; then
+          cp -L "$NIX_SSL_CERT_FILE" "$TMPDIR"/certs/ca-certificates.crt
+        else
+          echo \$NIX_SSL_CERT_FILE should point to a valid file if virtualisation.useHostCerts is enabled.
+        fi
+      ''}
+
+      ${lib.optionalString cfg.useEFIBoot
+      ''
+        # Expose EFI variables, it's useful even when we are not using a bootloader (!).
+        # We might be interested in having EFI variable storage present even if we aren't booting via UEFI, hence
+        # no guard against `useBootLoader`.  Examples:
+        # - testing PXE boot or other EFI applications
+        # - directbooting LinuxBoot, which `kexec()s` into a UEFI environment that can boot e.g. Windows
+        NIX_EFI_VARS=$(readlink -f "''${NIX_EFI_VARS:-${config.system.name}-efi-vars.fd}")
+        # VM needs writable EFI vars
+        if ! test -e "$NIX_EFI_VARS"; then
+        ${if cfg.useBootLoader then
+            # We still need the EFI var from the make-disk-image derivation
+            # because our "switch-to-configuration" process might
+            # write into it and we want to keep this data.
+            ''cp ${systemImage}/efi-vars.fd "$NIX_EFI_VARS"''
+            else
+            ''cp ${cfg.efi.variables} "$NIX_EFI_VARS"''
+          }
+          chmod 0644 "$NIX_EFI_VARS"
+        fi
+      ''}
+
+      ${lib.optionalString cfg.tpm.enable ''
+        NIX_SWTPM_DIR=$(readlink -f "''${NIX_SWTPM_DIR:-${config.system.name}-swtpm}")
+        mkdir -p "$NIX_SWTPM_DIR"
+        ${lib.getExe cfg.tpm.package} \
+          socket \
+          --tpmstate dir="$NIX_SWTPM_DIR" \
+          --ctrl type=unixio,path="$NIX_SWTPM_DIR"/socket,terminate \
+          --pid file="$NIX_SWTPM_DIR"/pid --daemon \
+          --tpm2 \
+          --log file="$NIX_SWTPM_DIR"/stdout,level=6
+
+        # Enable `fdflags` builtin in Bash
+        # We will need it to perform surgical modification of the file descriptor
+        # passed in the coprocess to remove `FD_CLOEXEC`, i.e. close the file descriptor
+        # on exec.
+        # If let alone, it will trigger the coprocess to read EOF when QEMU is `exec`
+        # at the end of this script. To work around that, we will just clear
+        # the `FD_CLOEXEC` bits as a first step.
+        enable -f ${hostPkgs.bash}/lib/bash/fdflags fdflags
+        # leave a dangling subprocess because the swtpm ctrl socket has
+        # "terminate" when the last connection disconnects, it stops swtpm.
+        # When qemu stops, or if the main shell process ends, the coproc will
+        # get signaled by virtue of the pipe between main and coproc ending.
+        # Which in turns triggers a socat connect-disconnect to swtpm which
+        # will stop it.
+        coproc waitingswtpm {
+          read || :
+          echo "" | ${lib.getExe hostPkgs.socat} STDIO UNIX-CONNECT:"$NIX_SWTPM_DIR"/socket
+        }
+        # Clear `FD_CLOEXEC` on the coprocess' file descriptor stdin.
+        fdflags -s-cloexec ''${waitingswtpm[1]}
+      ''}
+
+      cd "$TMPDIR"
+
+      ${lib.optionalString (cfg.emptyDiskImages != []) "idx=0"}
+      ${flip concatMapStrings cfg.emptyDiskImages (size: ''
+        if ! test -e "empty$idx.qcow2"; then
+            ${qemu}/bin/qemu-img create -f qcow2 "empty$idx.qcow2" "${toString size}M"
+        fi
+        idx=$((idx + 1))
+      '')}
+
+      # Start QEMU.
+      exec ${qemu-common.qemuBinary qemu} \
+          -name ${config.system.name} \
+          -m ${toString config.virtualisation.memorySize} \
+          -smp ${toString config.virtualisation.cores} \
+          -device virtio-rng-pci \
+          ${concatStringsSep " " config.virtualisation.qemu.networkingOptions} \
+          ${concatStringsSep " \\\n    "
+            (mapAttrsToList
+              (tag: share: "-virtfs local,path=${share.source},security_model=none,mount_tag=${tag}")
+              config.virtualisation.sharedDirectories)} \
+          ${drivesCmdLine config.virtualisation.qemu.drives} \
+          ${concatStringsSep " \\\n    " config.virtualisation.qemu.options} \
+          $QEMU_OPTS \
+          "$@"
+    '';
+
+
+  regInfo = hostPkgs.closureInfo { rootPaths = config.virtualisation.additionalPaths; };
+
+  # Use well-defined and persistent filesystem labels to identify block devices.
+  rootFilesystemLabel = "nixos";
+  espFilesystemLabel = "ESP"; # Hard-coded by make-disk-image.nix
+  nixStoreFilesystemLabel = "nix-store";
+
+  # The root drive is a raw disk which does not necessarily contain a
+  # filesystem or partition table. It thus cannot be identified via the typical
+  # persistent naming schemes (e.g. /dev/disk/by-{label, uuid, partlabel,
+  # partuuid}. Instead, supply a well-defined and persistent serial attribute
+  # via QEMU. Inside the running system, the disk can then be identified via
+  # the /dev/disk/by-id scheme.
+  rootDriveSerialAttr = "root";
+
+  # System image is akin to a complete NixOS install with
+  # a boot partition and root partition.
+  systemImage = import ../../lib/make-disk-image.nix {
+    inherit pkgs config lib;
+    additionalPaths = [ regInfo ];
+    format = "qcow2";
+    onlyNixStore = false;
+    label = rootFilesystemLabel;
+    partitionTableType = selectPartitionTableLayout { inherit (cfg) useDefaultFilesystems useEFIBoot; };
+    # Bootloader should be installed on the system image only if we are booting through bootloaders.
+    # Though, if a user is not using our default filesystems, it is possible to not have any ESP
+    # or a strange partition table that's incompatible with GRUB configuration.
+    # As a consequence, this may lead to disk image creation failures.
+    # To avoid this, we prefer to let the user find out about how to install the bootloader on its ESP/disk.
+    # Usually, this can be through building your own disk image.
+    # TODO: If a user is interested into a more fine grained heuristic for `installBootLoader`
+    # by examining the actual contents of `cfg.fileSystems`, please send a PR.
+    installBootLoader = cfg.useBootLoader && cfg.useDefaultFilesystems;
+    touchEFIVars = cfg.useEFIBoot;
+    diskSize = "auto";
+    additionalSpace = "0M";
+    copyChannel = false;
+    OVMF = cfg.efi.OVMF;
+  };
+
+  storeImage = import ../../lib/make-disk-image.nix {
+    name = "nix-store-image";
+    inherit pkgs config lib;
+    additionalPaths = [ regInfo ];
+    format = "qcow2";
+    onlyNixStore = true;
+    label = nixStoreFilesystemLabel;
+    partitionTableType = "none";
+    installBootLoader = false;
+    touchEFIVars = false;
+    diskSize = "auto";
+    additionalSpace = "0M";
+    copyChannel = false;
+  };
+
+in
+
+{
+  imports = [
+    ../profiles/qemu-guest.nix
+    (mkRenamedOptionModule [ "virtualisation" "pathsInNixDB" ] [ "virtualisation" "additionalPaths" ])
+    (mkRemovedOptionModule [ "virtualisation" "bootDevice" ] "This option was renamed to `virtualisation.rootDevice`, as it was incorrectly named and misleading. Take the time to review what you want to do and look at the new options like `virtualisation.{bootLoaderDevice, bootPartition}`, open an issue in case of issues.")
+    (mkRemovedOptionModule [ "virtualisation" "efiVars" ] "This option was removed, it is possible to provide a template UEFI variable with `virtualisation.efi.variables` ; if this option is important to you, open an issue")
+    (mkRemovedOptionModule [ "virtualisation" "persistBootDevice" ] "Boot device is always persisted if you use a bootloader through the root disk image ; if this does not work for your usecase, please examine carefully what `virtualisation.{bootDevice, rootDevice, bootPartition}` options offer you and open an issue explaining your need.`")
+  ];
+
+  options = {
+
+    virtualisation.fileSystems = options.fileSystems;
+
+    virtualisation.memorySize =
+      mkOption {
+        type = types.ints.positive;
+        default = 1024;
+        description =
+          lib.mdDoc ''
+            The memory size in megabytes of the virtual machine.
+          '';
+      };
+
+    virtualisation.msize =
+      mkOption {
+        type = types.ints.positive;
+        default = 16384;
+        description =
+          lib.mdDoc ''
+            The msize (maximum packet size) option passed to 9p file systems, in
+            bytes. Increasing this should increase performance significantly,
+            at the cost of higher RAM usage.
+          '';
+      };
+
+    virtualisation.diskSize =
+      mkOption {
+        type = types.nullOr types.ints.positive;
+        default = 1024;
+        description =
+          lib.mdDoc ''
+            The disk size in megabytes of the virtual machine.
+          '';
+      };
+
+    virtualisation.diskImage =
+      mkOption {
+        type = types.nullOr types.str;
+        default = "./${config.system.name}.qcow2";
+        defaultText = literalExpression ''"./''${config.system.name}.qcow2"'';
+        description =
+          lib.mdDoc ''
+            Path to the disk image containing the root filesystem.
+            The image will be created on startup if it does not
+            exist.
+
+            If null, a tmpfs will be used as the root filesystem and
+            the VM's state will not be persistent.
+          '';
+      };
+
+    virtualisation.bootLoaderDevice =
+      mkOption {
+        type = types.path;
+        default = "/dev/disk/by-id/virtio-${rootDriveSerialAttr}";
+        defaultText = literalExpression ''/dev/disk/by-id/virtio-${rootDriveSerialAttr}'';
+        example = "/dev/disk/by-id/virtio-boot-loader-device";
+        description =
+          lib.mdDoc ''
+            The path (inside th VM) to the device to boot from when legacy booting.
+          '';
+        };
+
+    virtualisation.bootPartition =
+      mkOption {
+        type = types.nullOr types.path;
+        default = if cfg.useEFIBoot then "/dev/disk/by-label/${espFilesystemLabel}" else null;
+        defaultText = literalExpression ''if cfg.useEFIBoot then "/dev/disk/by-label/${espFilesystemLabel}" else null'';
+        example = "/dev/disk/by-label/esp";
+        description =
+          lib.mdDoc ''
+            The path (inside the VM) to the device containing the EFI System Partition (ESP).
+
+            If you are *not* booting from a UEFI firmware, this value is, by
+            default, `null`. The ESP is mounted under `/boot`.
+          '';
+      };
+
+    virtualisation.rootDevice =
+      mkOption {
+        type = types.nullOr types.path;
+        default = "/dev/disk/by-label/${rootFilesystemLabel}";
+        defaultText = literalExpression ''/dev/disk/by-label/${rootFilesystemLabel}'';
+        example = "/dev/disk/by-label/nixos";
+        description =
+          lib.mdDoc ''
+            The path (inside the VM) to the device containing the root filesystem.
+          '';
+      };
+
+    virtualisation.emptyDiskImages =
+      mkOption {
+        type = types.listOf types.ints.positive;
+        default = [];
+        description =
+          lib.mdDoc ''
+            Additional disk images to provide to the VM. The value is
+            a list of size in megabytes of each disk. These disks are
+            writeable by the VM.
+          '';
+      };
+
+    virtualisation.graphics =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Whether to run QEMU with a graphics window, or in nographic mode.
+            Serial console will be enabled on both settings, but this will
+            change the preferred console.
+            '';
+      };
+
+    virtualisation.resolution =
+      mkOption {
+        type = options.services.xserver.resolutions.type.nestedTypes.elemType;
+        default = { x = 1024; y = 768; };
+        description =
+          lib.mdDoc ''
+            The resolution of the virtual machine display.
+          '';
+      };
+
+    virtualisation.cores =
+      mkOption {
+        type = types.ints.positive;
+        default = 1;
+        description =
+          lib.mdDoc ''
+            Specify the number of cores the guest is permitted to use.
+            The number can be higher than the available cores on the
+            host system.
+          '';
+      };
+
+    virtualisation.sharedDirectories =
+      mkOption {
+        type = types.attrsOf
+          (types.submodule {
+            options.source = mkOption {
+              type = types.str;
+              description = lib.mdDoc "The path of the directory to share, can be a shell variable";
+            };
+            options.target = mkOption {
+              type = types.path;
+              description = lib.mdDoc "The mount point of the directory inside the virtual machine";
+            };
+          });
+        default = { };
+        example = {
+          my-share = { source = "/path/to/be/shared"; target = "/mnt/shared"; };
+        };
+        description =
+          lib.mdDoc ''
+            An attributes set of directories that will be shared with the
+            virtual machine using VirtFS (9P filesystem over VirtIO).
+            The attribute name will be used as the 9P mount tag.
+          '';
+      };
+
+    virtualisation.additionalPaths =
+      mkOption {
+        type = types.listOf types.path;
+        default = [];
+        description =
+          lib.mdDoc ''
+            A list of paths whose closure should be made available to
+            the VM.
+
+            When 9p is used, the closure is registered in the Nix
+            database in the VM. All other paths in the host Nix store
+            appear in the guest Nix store as well, but are considered
+            garbage (because they are not registered in the Nix
+            database of the guest).
+
+            When {option}`virtualisation.useNixStoreImage` is
+            set, the closure is copied to the Nix store image.
+          '';
+      };
+
+    virtualisation.forwardPorts = mkOption {
+      type = types.listOf
+        (types.submodule {
+          options.from = mkOption {
+            type = types.enum [ "host" "guest" ];
+            default = "host";
+            description =
+              lib.mdDoc ''
+                Controls the direction in which the ports are mapped:
+
+                - `"host"` means traffic from the host ports
+                  is forwarded to the given guest port.
+                - `"guest"` means traffic from the guest ports
+                  is forwarded to the given host port.
+              '';
+          };
+          options.proto = mkOption {
+            type = types.enum [ "tcp" "udp" ];
+            default = "tcp";
+            description = lib.mdDoc "The protocol to forward.";
+          };
+          options.host.address = mkOption {
+            type = types.str;
+            default = "";
+            description = lib.mdDoc "The IPv4 address of the host.";
+          };
+          options.host.port = mkOption {
+            type = types.port;
+            description = lib.mdDoc "The host port to be mapped.";
+          };
+          options.guest.address = mkOption {
+            type = types.str;
+            default = "";
+            description = lib.mdDoc "The IPv4 address on the guest VLAN.";
+          };
+          options.guest.port = mkOption {
+            type = types.port;
+            description = lib.mdDoc "The guest port to be mapped.";
+          };
+        });
+      default = [];
+      example = lib.literalExpression
+        ''
+        [ # forward local port 2222 -> 22, to ssh into the VM
+          { from = "host"; host.port = 2222; guest.port = 22; }
+
+          # forward local port 80 -> 10.0.2.10:80 in the VLAN
+          { from = "guest";
+            guest.address = "10.0.2.10"; guest.port = 80;
+            host.address = "127.0.0.1"; host.port = 80;
+          }
+        ]
+        '';
+      description =
+        lib.mdDoc ''
+          When using the SLiRP user networking (default), this option allows to
+          forward ports to/from the host/guest.
+
+          ::: {.warning}
+          If the NixOS firewall on the virtual machine is enabled, you also
+          have to open the guest ports to enable the traffic between host and
+          guest.
+          :::
+
+          ::: {.note}
+          Currently QEMU supports only IPv4 forwarding.
+          :::
+        '';
+    };
+
+    virtualisation.restrictNetwork =
+      mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description =
+          lib.mdDoc ''
+            If this option is enabled, the guest will be isolated, i.e. it will
+            not be able to contact the host and no guest IP packets will be
+            routed over the host to the outside. This option does not affect
+            any explicitly set forwarding rules.
+          '';
+      };
+
+    virtualisation.vlans =
+      mkOption {
+        type = types.listOf types.ints.unsigned;
+        default = if config.virtualisation.interfaces == {} then [ 1 ] else [ ];
+        defaultText = lib.literalExpression ''if config.virtualisation.interfaces == {} then [ 1 ] else [ ]'';
+        example = [ 1 2 ];
+        description =
+          lib.mdDoc ''
+            Virtual networks to which the VM is connected.  Each
+            number «N» in this list causes
+            the VM to have a virtual Ethernet interface attached to a
+            separate virtual network on which it will be assigned IP
+            address
+            `192.168.«N».«M»`,
+            where «M» is the index of this VM
+            in the list of VMs.
+          '';
+      };
+
+    virtualisation.interfaces = mkOption {
+      default = {};
+      example = {
+        enp1s0.vlan = 1;
+      };
+      description = lib.mdDoc ''
+        Network interfaces to add to the VM.
+      '';
+      type = with types; attrsOf (submodule {
+        options = {
+          vlan = mkOption {
+            type = types.ints.unsigned;
+            description = lib.mdDoc ''
+              VLAN to which the network interface is connected.
+            '';
+          };
+
+          assignIP = mkOption {
+            type = types.bool;
+            default = false;
+            description = lib.mdDoc ''
+              Automatically assign an IP address to the network interface using the same scheme as
+              virtualisation.vlans.
+            '';
+          };
+        };
+      });
+    };
+
+    virtualisation.writableStore =
+      mkOption {
+        type = types.bool;
+        default = cfg.mountHostNixStore;
+        defaultText = literalExpression "cfg.mountHostNixStore";
+        description =
+          lib.mdDoc ''
+            If enabled, the Nix store in the VM is made writable by
+            layering an overlay filesystem on top of the host's Nix
+            store.
+
+            By default, this is enabled if you mount a host Nix store.
+          '';
+      };
+
+    virtualisation.writableStoreUseTmpfs =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            Use a tmpfs for the writable store instead of writing to the VM's
+            own filesystem.
+          '';
+      };
+
+    networking.primaryIPAddress =
+      mkOption {
+        type = types.str;
+        default = "";
+        internal = true;
+        description = lib.mdDoc "Primary IP address used in /etc/hosts.";
+      };
+
+    virtualisation.host.pkgs = mkOption {
+      type = options.nixpkgs.pkgs.type;
+      default = pkgs;
+      defaultText = literalExpression "pkgs";
+      example = literalExpression ''
+        import pkgs.path { system = "x86_64-darwin"; }
+      '';
+      description = lib.mdDoc ''
+        Package set to use for the host-specific packages of the VM runner.
+        Changing this to e.g. a Darwin package set allows running NixOS VMs on Darwin.
+      '';
+    };
+
+    virtualisation.qemu = {
+      package =
+        mkOption {
+          type = types.package;
+          default = if hostPkgs.stdenv.hostPlatform.qemuArch == pkgs.stdenv.hostPlatform.qemuArch then hostPkgs.qemu_kvm else hostPkgs.qemu;
+          defaultText = literalExpression "if hostPkgs.stdenv.hostPlatform.qemuArch == pkgs.stdenv.hostPlatform.qemuArch then config.virtualisation.host.pkgs.qemu_kvm else config.virtualisation.host.pkgs.qemu";
+          example = literalExpression "pkgs.qemu_test";
+          description = lib.mdDoc "QEMU package to use.";
+        };
+
+      options =
+        mkOption {
+          type = types.listOf types.str;
+          default = [];
+          example = [ "-vga std" ];
+          description = lib.mdDoc "Options passed to QEMU.";
+        };
+
+      consoles = mkOption {
+        type = types.listOf types.str;
+        default = let
+          consoles = [ "${qemu-common.qemuSerialDevice},115200n8" "tty0" ];
+        in if cfg.graphics then consoles else reverseList consoles;
+        example = [ "console=tty1" ];
+        description = lib.mdDoc ''
+          The output console devices to pass to the kernel command line via the
+          `console` parameter, the primary console is the last
+          item of this list.
+
+          By default it enables both serial console and
+          `tty0`. The preferred console (last one) is based on
+          the value of {option}`virtualisation.graphics`.
+        '';
+      };
+
+      networkingOptions =
+        mkOption {
+          type = types.listOf types.str;
+          default = [ ];
+          example = [
+            "-net nic,netdev=user.0,model=virtio"
+            "-netdev user,id=user.0,\${QEMU_NET_OPTS:+,$QEMU_NET_OPTS}"
+          ];
+          description = lib.mdDoc ''
+            Networking-related command-line options that should be passed to qemu.
+            The default is to use userspace networking (SLiRP).
+
+            If you override this option, be advised to keep
+            ''${QEMU_NET_OPTS:+,$QEMU_NET_OPTS} (as seen in the example)
+            to keep the default runtime behaviour.
+          '';
+        };
+
+      drives =
+        mkOption {
+          type = types.listOf (types.submodule driveOpts);
+          description = lib.mdDoc "Drives passed to qemu.";
+        };
+
+      diskInterface =
+        mkOption {
+          type = types.enum [ "virtio" "scsi" "ide" ];
+          default = "virtio";
+          example = "scsi";
+          description = lib.mdDoc "The interface used for the virtual hard disks.";
+        };
+
+      guestAgent.enable =
+        mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Enable the Qemu guest agent.
+          '';
+        };
+
+      virtioKeyboard =
+        mkOption {
+          type = types.bool;
+          default = true;
+          description = lib.mdDoc ''
+            Enable the virtio-keyboard device.
+          '';
+        };
+    };
+
+    virtualisation.useNixStoreImage =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description = lib.mdDoc ''
+          Build and use a disk image for the Nix store, instead of
+          accessing the host's one through 9p.
+
+          For applications which do a lot of reads from the store,
+          this can drastically improve performance, but at the cost of
+          disk space and image build time.
+
+          As an alternative, you can use a bootloader which will provide you
+          with a full NixOS system image containing a Nix store and
+          avoid mounting the host nix store through
+          {option}`virtualisation.mountHostNixStore`.
+        '';
+      };
+
+    virtualisation.mountHostNixStore =
+      mkOption {
+        type = types.bool;
+        default = !cfg.useNixStoreImage && !cfg.useBootLoader;
+        defaultText = literalExpression "!cfg.useNixStoreImage && !cfg.useBootLoader";
+        description = lib.mdDoc ''
+          Mount the host Nix store as a 9p mount.
+        '';
+      };
+
+    virtualisation.directBoot = {
+      enable =
+        mkOption {
+          type = types.bool;
+          default = !cfg.useBootLoader;
+          defaultText = "!cfg.useBootLoader";
+          description =
+            lib.mdDoc ''
+              If enabled, the virtual machine will boot directly into the kernel instead of through a bootloader. Other relevant parameters such as the initrd are also passed to QEMU.
+
+              If you want to test netboot, consider disabling this option.
+
+              This will not boot / reboot correctly into a system that has switched to a different configuration on disk.
+
+              This is enabled by default if you don't enable bootloaders, but you can still enable a bootloader if you need.
+              Read more about this feature: <https://qemu-project.gitlab.io/qemu/system/linuxboot.html>.
+            '';
+        };
+      initrd =
+        mkOption {
+          type = types.str;
+          default = "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+          defaultText = "\${config.system.build.initialRamdisk}/\${config.system.boot.loader.initrdFile}";
+          description =
+            lib.mdDoc ''
+              In direct boot situations, you may want to influence the initrd to load
+              to use your own customized payload.
+
+              This is useful if you want to test the netboot image without
+              testing the firmware or the loading part.
+            '';
+        };
+    };
+
+    virtualisation.useBootLoader =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            Use a boot loader to boot the system.
+            This allows, among other things, testing the boot loader.
+
+            If disabled, the kernel and initrd are directly booted,
+            forgoing any bootloader.
+          '';
+      };
+
+    virtualisation.useEFIBoot =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            If enabled, the virtual machine will provide a EFI boot
+            manager.
+            useEFIBoot is ignored if useBootLoader == false.
+          '';
+        };
+
+    virtualisation.efi = {
+      OVMF = mkOption {
+        type = types.package;
+        default = (pkgs.OVMF.override {
+          secureBoot = cfg.useSecureBoot;
+        }).fd;
+        defaultText = ''(pkgs.OVMF.override {
+          secureBoot = cfg.useSecureBoot;
+        }).fd'';
+        description =
+        lib.mdDoc "OVMF firmware package, defaults to OVMF configured with secure boot if needed.";
+      };
+
+      firmware = mkOption {
+        type = types.path;
+        default = cfg.efi.OVMF.firmware;
+        defaultText = literalExpression "cfg.efi.OVMF.firmware";
+        description =
+          lib.mdDoc ''
+            Firmware binary for EFI implementation, defaults to OVMF.
+          '';
+      };
+
+      variables = mkOption {
+        type = types.path;
+        default = cfg.efi.OVMF.variables;
+        defaultText = literalExpression "cfg.efi.OVMF.variables";
+        description =
+          lib.mdDoc ''
+            Platform-specific flash binary for EFI variables, implementation-dependent to the EFI firmware.
+            Defaults to OVMF.
+          '';
+      };
+    };
+
+    virtualisation.tpm = {
+      enable = mkEnableOption "a TPM device in the virtual machine with a driver, using swtpm.";
+
+      package = mkPackageOptionMD cfg.host.pkgs "swtpm" { };
+
+      deviceModel = mkOption {
+        type = types.str;
+        default = ({
+          "i686-linux" = "tpm-tis";
+          "x86_64-linux" = "tpm-tis";
+          "ppc64-linux" = "tpm-spapr";
+          "armv7-linux" = "tpm-tis-device";
+          "aarch64-linux" = "tpm-tis-device";
+        }.${pkgs.hostPlatform.system} or (throw "Unsupported system for TPM2 emulation in QEMU"));
+        defaultText = ''
+          Based on the guest platform Linux system:
+
+          - `tpm-tis` for (i686, x86_64)
+          - `tpm-spapr` for ppc64
+          - `tpm-tis-device` for (armv7, aarch64)
+        '';
+        example = "tpm-tis-device";
+        description = lib.mdDoc "QEMU device model for the TPM, uses the appropriate default based on th guest platform system and the package passed.";
+      };
+    };
+
+    virtualisation.useDefaultFilesystems =
+      mkOption {
+        type = types.bool;
+        default = true;
+        description =
+          lib.mdDoc ''
+            If enabled, the boot disk of the virtual machine will be
+            formatted and mounted with the default filesystems for
+            testing. Swap devices and LUKS will be disabled.
+
+            If disabled, a root filesystem has to be specified and
+            formatted (for example in the initial ramdisk).
+          '';
+      };
+
+    virtualisation.useSecureBoot =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            Enable Secure Boot support in the EFI firmware.
+          '';
+      };
+
+    virtualisation.bios =
+      mkOption {
+        type = types.nullOr types.package;
+        default = null;
+        description =
+          lib.mdDoc ''
+            An alternate BIOS (such as `qboot`) with which to start the VM.
+            Should contain a file named `bios.bin`.
+            If `null`, QEMU's builtin SeaBIOS will be used.
+          '';
+      };
+
+    virtualisation.useHostCerts =
+      mkOption {
+        type = types.bool;
+        default = false;
+        description =
+          lib.mdDoc ''
+            If enabled, when `NIX_SSL_CERT_FILE` is set on the host,
+            pass the CA certificates from the host to the VM.
+          '';
+      };
+
+  };
+
+  config = {
+
+    assertions =
+      lib.concatLists (lib.flip lib.imap cfg.forwardPorts (i: rule:
+        [
+          { assertion = rule.from == "guest" -> rule.proto == "tcp";
+            message =
+              ''
+                Invalid virtualisation.forwardPorts.<entry ${toString i}>.proto:
+                  Guest forwarding supports only TCP connections.
+              '';
+          }
+          { assertion = rule.from == "guest" -> lib.hasPrefix "10.0.2." rule.guest.address;
+            message =
+              ''
+                Invalid virtualisation.forwardPorts.<entry ${toString i}>.guest.address:
+                  The address must be in the default VLAN (10.0.2.0/24).
+              '';
+          }
+        ])) ++ [
+          { assertion = pkgs.stdenv.hostPlatform.is32bit -> cfg.memorySize < 2047;
+            message = ''
+              virtualisation.memorySize is above 2047, but qemu is only able to allocate 2047MB RAM on 32bit max.
+            '';
+          }
+          { assertion = cfg.directBoot.enable || cfg.directBoot.initrd == options.virtualisation.directBoot.initrd.default;
+            message =
+              ''
+                You changed the default of `virtualisation.directBoot.initrd` but you are not
+                using QEMU direct boot. This initrd will not be used in your current
+                boot configuration.
+
+                Either do not mutate `virtualisation.directBoot.initrd` or enable direct boot.
+
+                If you have a more advanced usecase, please open an issue or a pull request.
+              '';
+          }
+        ];
+
+    warnings =
+      optional (
+        cfg.writableStore &&
+        cfg.useNixStoreImage &&
+        opt.writableStore.highestPrio > lib.modules.defaultOverridePriority)
+        ''
+          You have enabled ${opt.useNixStoreImage} = true,
+          without setting ${opt.writableStore} = false.
+
+          This causes a store image to be written to the store, which is
+          costly, especially for the binary cache, and because of the need
+          for more frequent garbage collection.
+
+          If you really need this combination, you can set ${opt.writableStore}
+          explicitly to true, incur the cost and make this warning go away.
+          Otherwise, we recommend
+
+            ${opt.writableStore} = false;
+            ''
+      ++ optional (cfg.directBoot.enable && cfg.useBootLoader)
+        ''
+          You enabled direct boot and a bootloader, QEMU will not boot your bootloader, rendering
+          `useBootLoader` useless. You might want to disable one of those options.
+        '';
+
+    # In UEFI boot, we use a EFI-only partition table layout, thus GRUB will fail when trying to install
+    # legacy and UEFI. In order to avoid this, we have to put "nodev" to force UEFI-only installs.
+    # Otherwise, we set the proper bootloader device for this.
+    # FIXME: make a sense of this mess wrt to multiple ESP present in the system, probably use boot.efiSysMountpoint?
+    boot.loader.grub.device = mkVMOverride (if cfg.useEFIBoot then "nodev" else cfg.bootLoaderDevice);
+    boot.loader.grub.gfxmodeBios = with cfg.resolution; "${toString x}x${toString y}";
+
+    boot.initrd.kernelModules = optionals (cfg.useNixStoreImage && !cfg.writableStore) [ "erofs" ];
+
+    boot.loader.supportsInitrdSecrets = mkIf (!cfg.useBootLoader) (mkVMOverride false);
+
+    boot.initrd.postMountCommands = lib.mkIf (!config.boot.initrd.systemd.enable)
+      ''
+        # Mark this as a NixOS machine.
+        mkdir -p $targetRoot/etc
+        echo -n > $targetRoot/etc/NIXOS
+
+        # Fix the permissions on /tmp.
+        chmod 1777 $targetRoot/tmp
+
+        mkdir -p $targetRoot/boot
+
+        ${optionalString cfg.writableStore ''
+          echo "mounting overlay filesystem on /nix/store..."
+          mkdir -p -m 0755 $targetRoot/nix/.rw-store/store $targetRoot/nix/.rw-store/work $targetRoot/nix/store
+          mount -t overlay overlay $targetRoot/nix/store \
+            -o lowerdir=$targetRoot/nix/.ro-store,upperdir=$targetRoot/nix/.rw-store/store,workdir=$targetRoot/nix/.rw-store/work || fail
+        ''}
+      '';
+
+    systemd.tmpfiles.rules = lib.mkIf config.boot.initrd.systemd.enable [
+      "f /etc/NIXOS 0644 root root -"
+      "d /boot 0644 root root -"
+    ];
+
+    # After booting, register the closure of the paths in
+    # `virtualisation.additionalPaths' in the Nix database in the VM.  This
+    # allows Nix operations to work in the VM.  The path to the
+    # registration file is passed through the kernel command line to
+    # allow `system.build.toplevel' to be included.  (If we had a direct
+    # reference to ${regInfo} here, then we would get a cyclic
+    # dependency.)
+    boot.postBootCommands = lib.mkIf config.nix.enable
+      ''
+        if [[ "$(cat /proc/cmdline)" =~ regInfo=([^ ]*) ]]; then
+          ${config.nix.package.out}/bin/nix-store --load-db < ''${BASH_REMATCH[1]}
+        fi
+      '';
+
+    boot.initrd.availableKernelModules =
+      optional cfg.writableStore "overlay"
+      ++ optional (cfg.qemu.diskInterface == "scsi") "sym53c8xx"
+      ++ optional (cfg.tpm.enable) "tpm_tis";
+
+    virtualisation.additionalPaths = [ config.system.build.toplevel ];
+
+    virtualisation.sharedDirectories = {
+      nix-store = mkIf cfg.mountHostNixStore {
+        source = builtins.storeDir;
+        target = "/nix/store";
+      };
+      xchg = {
+        source = ''"$TMPDIR"/xchg'';
+        target = "/tmp/xchg";
+      };
+      shared = {
+        source = ''"''${SHARED_DIR:-$TMPDIR/xchg}"'';
+        target = "/tmp/shared";
+      };
+      certs = mkIf cfg.useHostCerts {
+        source = ''"$TMPDIR"/certs'';
+        target = "/etc/ssl/certs";
+      };
+    };
+
+    security.pki.installCACerts = mkIf cfg.useHostCerts false;
+
+    virtualisation.qemu.networkingOptions =
+      let
+        forwardingOptions = flip concatMapStrings cfg.forwardPorts
+          ({ proto, from, host, guest }:
+            if from == "host"
+              then "hostfwd=${proto}:${host.address}:${toString host.port}-" +
+                   "${guest.address}:${toString guest.port},"
+              else "'guestfwd=${proto}:${guest.address}:${toString guest.port}-" +
+                   "cmd:${pkgs.netcat}/bin/nc ${host.address} ${toString host.port}',"
+          );
+        restrictNetworkOption = lib.optionalString cfg.restrictNetwork "restrict=on,";
+      in
+      [
+        "-net nic,netdev=user.0,model=virtio"
+        "-netdev user,id=user.0,${forwardingOptions}${restrictNetworkOption}\"$QEMU_NET_OPTS\""
+      ];
+
+    virtualisation.qemu.options = mkMerge [
+      (mkIf cfg.qemu.virtioKeyboard [
+        "-device virtio-keyboard"
+      ])
+      (mkIf pkgs.stdenv.hostPlatform.isx86 [
+        "-usb" "-device usb-tablet,bus=usb-bus.0"
+      ])
+      (mkIf pkgs.stdenv.hostPlatform.isAarch [
+        "-device virtio-gpu-pci" "-device usb-ehci,id=usb0" "-device usb-kbd" "-device usb-tablet"
+      ])
+      (let
+        alphaNumericChars = lowerChars ++ upperChars ++ (map toString (range 0 9));
+        # Replace all non-alphanumeric characters with underscores
+        sanitizeShellIdent = s: concatMapStrings (c: if builtins.elem c alphaNumericChars then c else "_") (stringToCharacters s);
+      in mkIf cfg.directBoot.enable [
+        "-kernel \${NIXPKGS_QEMU_KERNEL_${sanitizeShellIdent config.system.name}:-${config.system.build.toplevel}/kernel}"
+        "-initrd ${cfg.directBoot.initrd}"
+        ''-append "$(cat ${config.system.build.toplevel}/kernel-params) init=${config.system.build.toplevel}/init regInfo=${regInfo}/registration ${consoles} $QEMU_KERNEL_PARAMS"''
+      ])
+      (mkIf cfg.useEFIBoot [
+        "-drive if=pflash,format=raw,unit=0,readonly=on,file=${cfg.efi.firmware}"
+        "-drive if=pflash,format=raw,unit=1,readonly=off,file=$NIX_EFI_VARS"
+      ])
+      (mkIf (cfg.bios != null) [
+        "-bios ${cfg.bios}/bios.bin"
+      ])
+      (mkIf (!cfg.graphics) [
+        "-nographic"
+      ])
+      (mkIf (cfg.tpm.enable) [
+        "-chardev socket,id=chrtpm,path=\"$NIX_SWTPM_DIR\"/socket"
+        "-tpmdev emulator,id=tpm_dev_0,chardev=chrtpm"
+        "-device ${cfg.tpm.deviceModel},tpmdev=tpm_dev_0"
+      ])
+    ];
+
+    virtualisation.qemu.drives = mkMerge [
+      (mkIf (cfg.diskImage != null) [{
+        name = "root";
+        file = ''"$NIX_DISK_IMAGE"'';
+        driveExtraOpts.cache = "writeback";
+        driveExtraOpts.werror = "report";
+        deviceExtraOpts.bootindex = "1";
+        deviceExtraOpts.serial = rootDriveSerialAttr;
+      }])
+      (mkIf cfg.useNixStoreImage [{
+        name = "nix-store";
+        file = ''"$TMPDIR"/store.img'';
+        deviceExtraOpts.bootindex = "2";
+        driveExtraOpts.format = if cfg.writableStore then "qcow2" else "raw";
+      }])
+      (imap0 (idx: _: {
+        file = "$(pwd)/empty${toString idx}.qcow2";
+        driveExtraOpts.werror = "report";
+      }) cfg.emptyDiskImages)
+    ];
+
+    # By default, use mkVMOverride to enable building test VMs (e.g. via
+    # `nixos-rebuild build-vm`) of a system configuration, where the regular
+    # value for the `fileSystems' attribute should be disregarded (since those
+    # filesystems don't necessarily exist in the VM). You can disable this
+    # override by setting `virtualisation.fileSystems = lib.mkForce { };`.
+    fileSystems = lib.mkIf (cfg.fileSystems != { }) (mkVMOverride cfg.fileSystems);
+
+    virtualisation.fileSystems = let
+      mkSharedDir = tag: share:
+        {
+          name =
+            if tag == "nix-store" && cfg.writableStore
+            then "/nix/.ro-store"
+            else share.target;
+          value.device = tag;
+          value.fsType = "9p";
+          value.neededForBoot = true;
+          value.options =
+            [ "trans=virtio" "version=9p2000.L"  "msize=${toString cfg.msize}" ]
+            ++ lib.optional (tag == "nix-store") "cache=loose";
+        };
+    in lib.mkMerge [
+      (lib.mapAttrs' mkSharedDir cfg.sharedDirectories)
+      {
+        "/" = lib.mkIf cfg.useDefaultFilesystems (if cfg.diskImage == null then {
+          device = "tmpfs";
+          fsType = "tmpfs";
+        } else {
+          device = cfg.rootDevice;
+          fsType = "ext4";
+        });
+        "/tmp" = lib.mkIf config.boot.tmp.useTmpfs {
+          device = "tmpfs";
+          fsType = "tmpfs";
+          neededForBoot = true;
+          # Sync with systemd's tmp.mount;
+          options = [ "mode=1777" "strictatime" "nosuid" "nodev" "size=${toString config.boot.tmp.tmpfsSize}" ];
+        };
+        "/nix/${if cfg.writableStore then ".ro-store" else "store"}" = lib.mkIf cfg.useNixStoreImage {
+          device = "/dev/disk/by-label/${nixStoreFilesystemLabel}";
+          neededForBoot = true;
+          options = [ "ro" ];
+        };
+        "/nix/.rw-store" = lib.mkIf (cfg.writableStore && cfg.writableStoreUseTmpfs) {
+          fsType = "tmpfs";
+          options = [ "mode=0755" ];
+          neededForBoot = true;
+        };
+        "/boot" = lib.mkIf (cfg.useBootLoader && cfg.bootPartition != null) {
+          device = cfg.bootPartition;
+          fsType = "vfat";
+          noCheck = true; # fsck fails on a r/o filesystem
+        };
+      }
+    ];
+
+    boot.initrd.systemd = lib.mkIf (config.boot.initrd.systemd.enable && cfg.writableStore) {
+      mounts = [{
+        where = "/sysroot/nix/store";
+        what = "overlay";
+        type = "overlay";
+        options = "lowerdir=/sysroot/nix/.ro-store,upperdir=/sysroot/nix/.rw-store/store,workdir=/sysroot/nix/.rw-store/work";
+        wantedBy = ["initrd-fs.target"];
+        before = ["initrd-fs.target"];
+        requires = ["rw-store.service"];
+        after = ["rw-store.service"];
+        unitConfig.RequiresMountsFor = "/sysroot/nix/.ro-store";
+      }];
+      services.rw-store = {
+        unitConfig = {
+          DefaultDependencies = false;
+          RequiresMountsFor = "/sysroot/nix/.rw-store";
+        };
+        serviceConfig = {
+          Type = "oneshot";
+          ExecStart = "/bin/mkdir -p -m 0755 /sysroot/nix/.rw-store/store /sysroot/nix/.rw-store/work /sysroot/nix/store";
+        };
+      };
+    };
+
+    swapDevices = (if cfg.useDefaultFilesystems then mkVMOverride else mkDefault) [ ];
+    boot.initrd.luks.devices = (if cfg.useDefaultFilesystems then mkVMOverride else mkDefault) {};
+
+    # Don't run ntpd in the guest.  It should get the correct time from KVM.
+    services.timesyncd.enable = false;
+
+    services.qemuGuest.enable = cfg.qemu.guestAgent.enable;
+
+    system.build.vm = hostPkgs.runCommand "nixos-vm" {
+      preferLocalBuild = true;
+      meta.mainProgram = "run-${config.system.name}-vm";
+    }
+      ''
+        mkdir -p $out/bin
+        ln -s ${config.system.build.toplevel} $out/system
+        ln -s ${hostPkgs.writeScript "run-nixos-vm" startVM} $out/bin/run-${config.system.name}-vm
+      '';
+
+    # When building a regular system configuration, override whatever
+    # video driver the host uses.
+    services.xserver.videoDrivers = mkVMOverride [ "modesetting" ];
+    services.xserver.defaultDepth = mkVMOverride 0;
+    services.xserver.resolutions = mkVMOverride [ cfg.resolution ];
+    services.xserver.monitorSection =
+      ''
+        # Set a higher refresh rate so that resolutions > 800x600 work.
+        HorizSync 30-140
+        VertRefresh 50-160
+      '';
+
+    # Wireless won't work in the VM.
+    networking.wireless.enable = mkVMOverride false;
+    services.connman.enable = mkVMOverride false;
+
+    # Speed up booting by not waiting for ARP.
+    networking.dhcpcd.extraConfig = "noarp";
+
+    networking.usePredictableInterfaceNames = false;
+
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isEnabled "VIRTIO_BLK")
+        (isEnabled "VIRTIO_PCI")
+        (isEnabled "VIRTIO_NET")
+        (isEnabled "EXT4_FS")
+        (isEnabled "NET_9P_VIRTIO")
+        (isEnabled "9P_FS")
+        (isYes "BLK_DEV")
+        (isYes "PCI")
+        (isYes "NETDEVICES")
+        (isYes "NET_CORE")
+        (isYes "INET")
+        (isYes "NETWORK_FILESYSTEMS")
+      ] ++ optionals (!cfg.graphics) [
+        (isYes "SERIAL_8250_CONSOLE")
+        (isYes "SERIAL_8250")
+      ] ++ optionals (cfg.writableStore) [
+        (isEnabled "OVERLAY_FS")
+      ];
+
+  };
+
+  # uses types of services/x11/xserver.nix
+  meta.buildDocsInSandbox = false;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/rosetta.nix b/nixpkgs/nixos/modules/virtualisation/rosetta.nix
new file mode 100644
index 000000000000..ee811b571b8f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/rosetta.nix
@@ -0,0 +1,81 @@
+{ config, lib, pkgs, ... }:
+
+let
+  cfg = config.virtualisation.rosetta;
+  inherit (lib) types;
+in
+{
+  options = {
+    virtualisation.rosetta.enable = lib.mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Whether to enable [Rosetta](https://developer.apple.com/documentation/apple-silicon/about-the-rosetta-translation-environment) support.
+
+        This feature requires the system to be a virtualised guest on an Apple silicon host.
+
+        The default settings are suitable for the [UTM](https://docs.getutm.app/) virtualisation [package](https://search.nixos.org/packages?channel=unstable&show=utm&from=0&size=1&sort=relevance&type=packages&query=utm).
+        Make sure to select 'Apple Virtualization' as the virtualisation engine and then tick the 'Enable Rosetta' option.
+      '';
+    };
+
+    virtualisation.rosetta.mountPoint = lib.mkOption {
+      type = types.str;
+      default = "/run/rosetta";
+      internal = true;
+      description = lib.mdDoc ''
+        The mount point for the Rosetta runtime inside the guest system.
+
+        The proprietary runtime is exposed through a VirtioFS directory share and then mounted at this directory.
+      '';
+    };
+
+    virtualisation.rosetta.mountTag = lib.mkOption {
+      type = types.str;
+      default = "rosetta";
+      description = lib.mdDoc ''
+        The VirtioFS mount tag for the Rosetta runtime, exposed by the host's virtualisation software.
+
+        If supported, your virtualisation software should provide instructions on how register the Rosetta runtime inside Linux guests.
+        These instructions should mention the name of the mount tag used for the VirtioFS directory share that contains the Rosetta runtime.
+      '';
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    assertions = [
+      {
+        assertion = pkgs.stdenv.hostPlatform.isAarch64;
+        message = "Rosetta is only supported on aarch64 systems";
+      }
+    ];
+
+    fileSystems."${cfg.mountPoint}" = {
+      device = cfg.mountTag;
+      fsType = "virtiofs";
+    };
+
+
+    nix.settings = {
+      extra-platforms = [ "x86_64-linux" ];
+      extra-sandbox-paths =  [
+        "/run/binfmt"
+        cfg.mountPoint
+      ];
+    };
+    boot.binfmt.registrations.rosetta = {
+      interpreter = "${cfg.mountPoint}/rosetta";
+
+      # The required flags for binfmt are documented by Apple:
+      # https://developer.apple.com/documentation/virtualization/running_intel_binaries_in_linux_vms_with_rosetta
+      magicOrExtension = ''\x7fELF\x02\x01\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x3e\x00'';
+      mask = ''\xff\xff\xff\xff\xff\xfe\xfe\x00\xff\xff\xff\xff\xff\xff\xff\xff\xfe\xff\xff\xff'';
+      fixBinary = true;
+      matchCredentials = true;
+      preserveArgvZero = false;
+
+      # Remove the shell wrapper and call the runtime directly
+      wrapInterpreterInShell = false;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/spice-usb-redirection.nix b/nixpkgs/nixos/modules/virtualisation/spice-usb-redirection.nix
new file mode 100644
index 000000000000..ab2b058c686f
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/spice-usb-redirection.nix
@@ -0,0 +1,26 @@
+{ config, pkgs, lib, ... }:
+{
+  options.virtualisation.spiceUSBRedirection.enable = lib.mkOption {
+    type = lib.types.bool;
+    default = false;
+    description = lib.mdDoc ''
+      Install the SPICE USB redirection helper with setuid
+      privileges. This allows unprivileged users to pass USB devices
+      connected to this machine to libvirt VMs, both local and
+      remote. Note that this allows users arbitrary access to USB
+      devices.
+    '';
+  };
+
+  config = lib.mkIf config.virtualisation.spiceUSBRedirection.enable {
+    environment.systemPackages = [ pkgs.spice-gtk ]; # For polkit actions
+    security.wrappers.spice-client-glib-usb-acl-helper = {
+      owner = "root";
+      group = "root";
+      capabilities = "cap_fowner+ep";
+      source = "${pkgs.spice-gtk}/bin/spice-client-glib-usb-acl-helper";
+    };
+  };
+
+  meta.maintainers = [ lib.maintainers.lheckemann ];
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vagrant-guest.nix b/nixpkgs/nixos/modules/virtualisation/vagrant-guest.nix
new file mode 100644
index 000000000000..2fad376086e3
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vagrant-guest.nix
@@ -0,0 +1,59 @@
+# Minimal configuration that vagrant depends on
+
+{ config, pkgs, ... }:
+let
+  # Vagrant uses an insecure shared private key by default, but we
+  # don't use the authorizedKeys attribute under users because it should be
+  # removed on first boot and replaced with a random one. This script sets
+  # the correct permissions and installs the temporary key if no
+  # ~/.ssh/authorized_keys exists.
+  install-vagrant-ssh-key = pkgs.writeScriptBin "install-vagrant-ssh-key" ''
+    #!${pkgs.runtimeShell}
+    if [ ! -e ~/.ssh/authorized_keys ]; then
+      mkdir -m 0700 -p ~/.ssh
+      echo "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA6NF8iallvQVp22WDkTkyrtvp9eWW6A8YVr+kz4TjGYe7gHzIw+niNltGEFHzD8+v1I2YJ6oXevct1YeS0o9HZyN1Q9qgCgzUFtdOKLv6IedplqoPkcmF0aYet2PkEDo3MlTBckFXPITAMzF8dJSIFo9D8HfdOV0IAdx4O7PtixWKn5y2hMNG0zQPyUecp4pzC6kivAIhyfHilFR61RGL+GPXQ2MWZWFYbAGjyiYJnAmCP3NOTd0jMZEnDkbUvxhMmBYSdETk1rRgm+R4LOzFUGaHqHDLKLX+FIPKcF96hrucXzcWyLbIbEgE98OHlnVYCzRdK8jlqm8tehUc9c9WhQ== vagrant insecure public key" >> ~/.ssh/authorized_keys
+      chmod 0600 ~/.ssh/authorized_keys
+    fi
+  '';
+in
+{
+  # Enable the OpenSSH daemon.
+  services.openssh.enable = true;
+
+  # Packages used by Vagrant
+  environment.systemPackages = with pkgs; [
+    findutils
+    iputils
+    nettools
+    netcat
+    nfs-utils
+    rsync
+  ];
+
+  users.extraUsers.vagrant = {
+    isNormalUser    = true;
+    createHome      = true;
+    description     = "Vagrant user account";
+    extraGroups     = [ "users" "wheel" ];
+    home            = "/home/vagrant";
+    password        = "vagrant";
+    useDefaultShell = true;
+    uid             = 1000;
+  };
+
+  systemd.services.install-vagrant-ssh-key = {
+    description = "Vagrant SSH key install (if needed)";
+    after = [ "fs.target" ];
+    wants = [ "fs.target" ];
+    wantedBy = [ "multi-user.target" ];
+    serviceConfig = {
+      ExecStart = "${install-vagrant-ssh-key}/bin/install-vagrant-ssh-key";
+      User = "vagrant";
+      # So it won't be (needlessly) restarted:
+      RemainAfterExit = true;
+    };
+  };
+
+  security.sudo.wheelNeedsPassword = false;
+  security.sudo-rs.wheelNeedsPassword = false;
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vagrant-virtualbox-image.nix b/nixpkgs/nixos/modules/virtualisation/vagrant-virtualbox-image.nix
new file mode 100644
index 000000000000..2a921894ab61
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vagrant-virtualbox-image.nix
@@ -0,0 +1,60 @@
+# Vagrant + VirtualBox
+
+{ config, pkgs, ... }:
+
+{
+  imports = [
+    ./vagrant-guest.nix
+    ./virtualbox-image.nix
+  ];
+
+  virtualbox.params = {
+    audio = "none";
+    audioin = "off";
+    audioout = "off";
+    usb = "off";
+    usbehci = "off";
+  };
+  sound.enable = false;
+  documentation.man.enable = false;
+  documentation.nixos.enable = false;
+
+  users.extraUsers.vagrant.extraGroups = [ "vboxsf" ];
+
+  # generate the box v1 format which is much easier to generate
+  # https://www.vagrantup.com/docs/boxes/format.html
+  system.build.vagrantVirtualbox = pkgs.runCommand
+    "virtualbox-vagrant.box"
+    {}
+    ''
+      mkdir workdir
+      cd workdir
+
+      # 1. create that metadata.json file
+      echo '{"provider":"virtualbox"}' > metadata.json
+
+      # 2. create a default Vagrantfile config
+      cat <<VAGRANTFILE > Vagrantfile
+      Vagrant.configure("2") do |config|
+        config.vm.base_mac = "0800275F0936"
+      end
+      VAGRANTFILE
+
+      # 3. add the exported VM files
+      tar xvf ${config.system.build.virtualBoxOVA}/*.ova
+
+      # 4. move the ovf to the fixed location
+      mv *.ovf box.ovf
+
+      # 5. generate OVF manifest file
+      rm *.mf
+      touch box.mf
+      for fname in *; do
+        checksum=$(sha256sum $fname | cut -d' ' -f 1)
+        echo "SHA256($fname)= $checksum" >> box.mf
+      done
+
+      # 6. compress everything back together
+      tar --owner=0 --group=0 --sort=name --numeric-owner -czf $out .
+    '';
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
new file mode 100644
index 000000000000..94f70c65436c
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-guest.nix
@@ -0,0 +1,93 @@
+# Module for VirtualBox guests.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.virtualbox.guest;
+  kernel = config.boot.kernelPackages;
+
+in
+
+{
+
+  ###### interface
+
+  options.virtualisation.virtualbox.guest = {
+    enable = mkOption {
+      default = false;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable the VirtualBox service and other guest additions.";
+    };
+
+    x11 = mkOption {
+      default = true;
+      type = types.bool;
+      description = lib.mdDoc "Whether to enable x11 graphics";
+    };
+  };
+
+  ###### implementation
+
+  config = mkIf cfg.enable (mkMerge [{
+    assertions = [{
+      assertion = pkgs.stdenv.hostPlatform.isx86;
+      message = "Virtualbox not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    }];
+
+    environment.systemPackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.extraModulePackages = [ kernel.virtualboxGuestAdditions ];
+
+    boot.supportedFilesystems = [ "vboxsf" ];
+    boot.initrd.supportedFilesystems = [ "vboxsf" ];
+
+    users.groups.vboxsf.gid = config.ids.gids.vboxsf;
+
+    systemd.services.virtualbox =
+      { description = "VirtualBox Guest Services";
+
+        wantedBy = [ "multi-user.target" ];
+        requires = [ "dev-vboxguest.device" ];
+        after = [ "dev-vboxguest.device" ];
+
+        unitConfig.ConditionVirtualization = "oracle";
+
+        serviceConfig.ExecStart = "@${kernel.virtualboxGuestAdditions}/bin/VBoxService VBoxService --foreground";
+      };
+
+    services.udev.extraRules =
+      ''
+        # /dev/vboxuser is necessary for VBoxClient to work.  Maybe we
+        # should restrict this to logged-in users.
+        KERNEL=="vboxuser",  OWNER="root", GROUP="root", MODE="0666"
+
+        # Allow systemd dependencies on vboxguest.
+        SUBSYSTEM=="misc", KERNEL=="vboxguest", TAG+="systemd"
+      '';
+  } (mkIf cfg.x11 {
+    services.xserver.videoDrivers = [ "vmware" "virtualbox" "modesetting" ];
+
+    services.xserver.config =
+      ''
+        Section "InputDevice"
+          Identifier "VBoxMouse"
+          Driver "vboxmouse"
+        EndSection
+      '';
+
+    services.xserver.serverLayoutSection =
+      ''
+        InputDevice "VBoxMouse"
+      '';
+
+    services.xserver.displayManager.sessionCommands =
+      ''
+        PATH=${makeBinPath [ pkgs.gnugrep pkgs.which pkgs.xorg.xorgserver.out ]}:$PATH \
+          ${kernel.virtualboxGuestAdditions}/bin/VBoxClient-all
+      '';
+  })]);
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
new file mode 100644
index 000000000000..9741ea090f79
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-host.nix
@@ -0,0 +1,170 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.virtualbox.host;
+
+  virtualbox = cfg.package.override {
+    inherit (cfg) enableHardening headless enableWebService;
+    extensionPack = if cfg.enableExtensionPack then pkgs.virtualboxExtpack else null;
+  };
+
+  kernelModules = config.boot.kernelPackages.virtualbox.override {
+    inherit virtualbox;
+  };
+
+in
+
+{
+  options.virtualisation.virtualbox.host = {
+    enable = mkEnableOption (lib.mdDoc "VirtualBox") // {
+      description = lib.mdDoc ''
+        Whether to enable VirtualBox.
+
+        ::: {.note}
+        In order to pass USB devices from the host to the guests, the user
+        needs to be in the `vboxusers` group.
+        :::
+      '';
+    };
+
+    enableExtensionPack = mkEnableOption (lib.mdDoc "VirtualBox extension pack") // {
+      description = lib.mdDoc ''
+        Whether to install the Oracle Extension Pack for VirtualBox.
+
+        ::: {.important}
+        You must set `nixpkgs.config.allowUnfree = true` in
+        order to use this.  This requires you accept the VirtualBox PUEL.
+        :::
+      '';
+    };
+
+    package = mkOption {
+      type = types.package;
+      default = pkgs.virtualbox;
+      defaultText = literalExpression "pkgs.virtualbox";
+      description = lib.mdDoc ''
+        Which VirtualBox package to use.
+      '';
+    };
+
+    addNetworkInterface = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Automatically set up a vboxnet0 host-only network interface.
+      '';
+    };
+
+    enableHardening = mkOption {
+      type = types.bool;
+      default = true;
+      description = lib.mdDoc ''
+        Enable hardened VirtualBox, which ensures that only the binaries in the
+        system path get access to the devices exposed by the kernel modules
+        instead of all users in the vboxusers group.
+
+        ::: {.important}
+        Disabling this can put your system's security at risk, as local users
+        in the vboxusers group can tamper with the VirtualBox device files.
+        :::
+      '';
+    };
+
+    headless = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Use VirtualBox installation without GUI and Qt dependency. Useful to enable on servers
+        and when virtual machines are controlled only via SSH.
+      '';
+    };
+
+    enableWebService = mkOption {
+      type = types.bool;
+      default = false;
+      description = lib.mdDoc ''
+        Build VirtualBox web service tool (vboxwebsrv) to allow managing VMs via other webpage frontend tools. Useful for headless servers.
+      '';
+    };
+  };
+
+  config = mkIf cfg.enable (mkMerge [{
+    warnings = mkIf (pkgs.config.virtualbox.enableExtensionPack or false)
+      ["'nixpkgs.virtualbox.enableExtensionPack' has no effect, please use 'virtualisation.virtualbox.host.enableExtensionPack'"];
+    boot.kernelModules = [ "vboxdrv" "vboxnetadp" "vboxnetflt" ];
+    boot.extraModulePackages = [ kernelModules ];
+    environment.systemPackages = [ virtualbox ];
+
+    security.wrappers = let
+      mkSuid = program: {
+        source = "${virtualbox}/libexec/virtualbox/${program}";
+        owner = "root";
+        group = "vboxusers";
+        setuid = true;
+      };
+      executables = [
+        "VBoxHeadless"
+        "VBoxNetAdpCtl"
+        "VBoxNetDHCP"
+        "VBoxNetNAT"
+        "VBoxVolInfo"
+      ] ++ (lib.optionals (!cfg.headless) [
+        "VBoxSDL"
+        "VirtualBoxVM"
+      ]);
+    in mkIf cfg.enableHardening
+      (builtins.listToAttrs (map (x: { name = x; value = mkSuid x; }) executables));
+
+    users.groups.vboxusers.gid = config.ids.gids.vboxusers;
+
+    services.udev.extraRules =
+      ''
+        KERNEL=="vboxdrv",    OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        KERNEL=="vboxdrvu",   OWNER="root", GROUP="root",      MODE="0666", TAG+="systemd"
+        KERNEL=="vboxnetctl", OWNER="root", GROUP="vboxusers", MODE="0660", TAG+="systemd"
+        SUBSYSTEM=="usb_device", ACTION=="add", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb", ACTION=="add", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh $major $minor $attr{bDeviceClass}"
+        SUBSYSTEM=="usb_device", ACTION=="remove", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+        SUBSYSTEM=="usb", ACTION=="remove", ENV{DEVTYPE}=="usb_device", RUN+="${virtualbox}/libexec/virtualbox/VBoxCreateUSBNode.sh --remove $major $minor"
+      '';
+
+    # Since we lack the right setuid/setcap binaries, set up a host-only network by default.
+  } (mkIf cfg.addNetworkInterface {
+    systemd.services.vboxnet0 =
+      { description = "VirtualBox vboxnet0 Interface";
+        requires = [ "dev-vboxnetctl.device" ];
+        after = [ "dev-vboxnetctl.device" ];
+        wantedBy = [ "network.target" "sys-subsystem-net-devices-vboxnet0.device" ];
+        path = [ virtualbox ];
+        serviceConfig.RemainAfterExit = true;
+        serviceConfig.Type = "oneshot";
+        serviceConfig.PrivateTmp = true;
+        environment.VBOX_USER_HOME = "/tmp";
+        script =
+          ''
+            if ! [ -e /sys/class/net/vboxnet0 ]; then
+              VBoxManage hostonlyif create
+              cat /tmp/VBoxSVC.log >&2
+            fi
+          '';
+        postStop =
+          ''
+            VBoxManage hostonlyif remove vboxnet0
+          '';
+      };
+
+    networking.interfaces.vboxnet0.ipv4.addresses = [{ address = "192.168.56.1"; prefixLength = 24; }];
+    # Make sure NetworkManager won't assume this interface being up
+    # means we have internet access.
+    networking.networkmanager.unmanaged = ["vboxnet0"];
+  }) (mkIf config.networking.useNetworkd {
+    systemd.network.networks."40-vboxnet0".extraConfig = ''
+      [Link]
+      RequiredForOnline=no
+    '';
+  })
+
+]);
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
new file mode 100644
index 000000000000..0da217fd1cb0
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/virtualbox-image.nix
@@ -0,0 +1,256 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualbox;
+
+in {
+
+  options = {
+    virtualbox = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 50 * 1024;
+        description = lib.mdDoc ''
+          The size of the VirtualBox base image in MiB.
+        '';
+      };
+      baseImageFreeSpace = mkOption {
+        type = with types; int;
+        default = 30 * 1024;
+        description = lib.mdDoc ''
+          Free space in the VirtualBox base image in MiB.
+        '';
+      };
+      memorySize = mkOption {
+        type = types.int;
+        default = 1536;
+        description = lib.mdDoc ''
+          The amount of RAM the VirtualBox appliance can use in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-ova-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = lib.mdDoc ''
+          The name of the derivation for the VirtualBox appliance.
+        '';
+      };
+      vmName = mkOption {
+        type = types.str;
+        default = "${config.system.nixos.distroName} ${config.system.nixos.label} (${pkgs.stdenv.hostPlatform.system})";
+        description = lib.mdDoc ''
+          The name of the VirtualBox appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.ova";
+        description = lib.mdDoc ''
+          The file name of the VirtualBox appliance.
+        '';
+      };
+      params = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = {
+          audio = "alsa";
+          rtcuseutc = "on";
+          usb = "off";
+        };
+        description = lib.mdDoc ''
+          Parameters passed to the Virtualbox appliance.
+
+          Run `VBoxManage modifyvm --help` to see more options.
+        '';
+      };
+      exportParams = mkOption {
+        type = with types; listOf (oneOf [ str int bool (listOf str) ]);
+        example = [
+          "--vsys" "0" "--vendor" "ACME Inc."
+        ];
+        default = [];
+        description = lib.mdDoc ''
+          Parameters passed to the Virtualbox export command.
+
+          Run `VBoxManage export --help` to see more options.
+        '';
+      };
+      extraDisk = mkOption {
+        description = lib.mdDoc ''
+          Optional extra disk/hdd configuration.
+          The disk will be an 'ext4' partition on a separate file.
+        '';
+        default = null;
+        example = {
+          label = "storage";
+          mountPoint = "/home/demo/storage";
+          size = 100 * 1024;
+        };
+        type = types.nullOr (types.submodule {
+          options = {
+            size = mkOption {
+              type = types.int;
+              description = lib.mdDoc "Size in MiB";
+            };
+            label = mkOption {
+              type = types.str;
+              default = "vm-extra-storage";
+              description = lib.mdDoc "Label for the disk partition";
+            };
+            mountPoint = mkOption {
+              type = types.str;
+              description = lib.mdDoc "Path where to mount this disk.";
+            };
+          };
+        });
+      };
+      postExportCommands = mkOption {
+        type = types.lines;
+        default = "";
+        example = ''
+          ${pkgs.cot}/bin/cot edit-hardware "$fn" \
+            -v vmx-14 \
+            --nics 2 \
+            --nic-types VMXNET3 \
+            --nic-names 'Nic name' \
+            --nic-networks 'Nic match' \
+            --network-descriptions 'Nic description' \
+            --scsi-subtypes VirtualSCSI
+        '';
+        description = lib.mdDoc ''
+          Extra commands to run after exporting the OVA to `$fn`.
+        '';
+      };
+      storageController = mkOption {
+        type = with types; attrsOf (oneOf [ str int bool (listOf str) ]);
+        example = {
+          name = "SCSI";
+          add = "scsi";
+          portcount = 16;
+          bootable = "on";
+          hostiocache = "on";
+        };
+        default = {
+          name = "SATA";
+          add = "sata";
+          portcount = 4;
+          bootable = "on";
+          hostiocache = "on";
+        };
+        description = lib.mdDoc ''
+          Parameters passed to the VirtualBox appliance. Must have at least
+          `name`.
+
+          Run `VBoxManage storagectl --help` to see more options.
+        '';
+      };
+    };
+  };
+
+  config = {
+
+    virtualbox.params = mkMerge [
+      (mapAttrs (name: mkDefault) {
+        acpi = "on";
+        vram = 32;
+        nictype1 = "virtio";
+        nic1 = "nat";
+        audiocontroller = "ac97";
+        audio = "alsa";
+        audioout = "on";
+        graphicscontroller = "vmsvga";
+        rtcuseutc = "on";
+        usb = "on";
+        usbehci = "on";
+        mouse = "usbtablet";
+      })
+      (mkIf (pkgs.stdenv.hostPlatform.system == "i686-linux") { pae = "on"; })
+    ];
+
+    system.build.virtualBoxOVA = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+
+      inherit pkgs lib config;
+      partitionTableType = "legacy";
+      diskSize = cfg.baseImageSize;
+      additionalSpace = "${toString cfg.baseImageFreeSpace}M";
+
+      postVM =
+        ''
+          export HOME=$PWD
+          export PATH=${pkgs.virtualbox}/bin:$PATH
+
+          echo "converting image to VirtualBox format..."
+          VBoxManage convertfromraw $diskImage disk.vdi
+
+          ${optionalString (cfg.extraDisk != null) ''
+            echo "creating extra disk: data-disk.raw"
+            dataDiskImage=data-disk.raw
+            truncate -s ${toString cfg.extraDisk.size}M $dataDiskImage
+
+            parted --script $dataDiskImage -- \
+              mklabel msdos \
+              mkpart primary ext4 1MiB -1
+            eval $(partx $dataDiskImage -o START,SECTORS --nr 1 --pairs)
+            mkfs.ext4 -F -L ${cfg.extraDisk.label} $dataDiskImage -E offset=$(sectorsToBytes $START) $(sectorsToKilobytes $SECTORS)K
+            echo "creating extra disk: data-disk.vdi"
+            VBoxManage convertfromraw $dataDiskImage data-disk.vdi
+          ''}
+
+          echo "creating VirtualBox VM..."
+          vmName="${cfg.vmName}";
+          VBoxManage createvm --name "$vmName" --register \
+            --ostype ${if pkgs.stdenv.hostPlatform.system == "x86_64-linux" then "Linux26_64" else "Linux26"}
+          VBoxManage modifyvm "$vmName" \
+            --memory ${toString cfg.memorySize} \
+            ${lib.cli.toGNUCommandLineShell { } cfg.params}
+          VBoxManage storagectl "$vmName" ${lib.cli.toGNUCommandLineShell { } cfg.storageController}
+          VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 0 --device 0 --type hdd \
+            --medium disk.vdi
+          ${optionalString (cfg.extraDisk != null) ''
+            VBoxManage storageattach "$vmName" --storagectl ${cfg.storageController.name} --port 1 --device 0 --type hdd \
+            --medium data-disk.vdi
+          ''}
+
+          echo "exporting VirtualBox VM..."
+          mkdir -p $out
+          fn="$out/${cfg.vmFileName}"
+          VBoxManage export "$vmName" --output "$fn" --options manifest ${escapeShellArgs cfg.exportParams}
+          ${cfg.postExportCommands}
+
+          rm -v $diskImage
+
+          mkdir -p $out/nix-support
+          echo "file ova $fn" >> $out/nix-support/hydra-build-products
+        '';
+    };
+
+    fileSystems = {
+      "/" = {
+        device = "/dev/disk/by-label/nixos";
+        autoResize = true;
+        fsType = "ext4";
+      };
+    } // (lib.optionalAttrs (cfg.extraDisk != null) {
+      ${cfg.extraDisk.mountPoint} = {
+        device = "/dev/disk/by-label/" + cfg.extraDisk.label;
+        autoResize = true;
+        fsType = "ext4";
+      };
+    });
+
+    boot.growPartition = true;
+    boot.loader.grub.device = "/dev/sda";
+
+    swapDevices = [{
+      device = "/var/swap";
+      size = 2048;
+    }];
+
+    virtualisation.virtualbox.guest.enable = true;
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
new file mode 100644
index 000000000000..6880a257c2be
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-guest.nix
@@ -0,0 +1,86 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.vmware.guest;
+  open-vm-tools = if cfg.headless then pkgs.open-vm-tools-headless else pkgs.open-vm-tools;
+  xf86inputvmmouse = pkgs.xorg.xf86inputvmmouse;
+in
+{
+  imports = [
+    (mkRenamedOptionModule [ "services" "vmwareGuest" ] [ "virtualisation" "vmware" "guest" ])
+  ];
+
+  options.virtualisation.vmware.guest = {
+    enable = mkEnableOption (lib.mdDoc "VMWare Guest Support");
+    headless = mkOption {
+      type = types.bool;
+      default = !config.services.xserver.enable;
+      defaultText = "!config.services.xserver.enable";
+      description = lib.mdDoc "Whether to disable X11-related features.";
+    };
+  };
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.hostPlatform.isx86 || pkgs.stdenv.hostPlatform.isAarch64;
+      message = "VMWare guest is not currently supported on ${pkgs.stdenv.hostPlatform.system}";
+    } ];
+
+    boot.initrd.availableKernelModules = [ "mptspi" ];
+    boot.initrd.kernelModules = lib.optionals pkgs.stdenv.hostPlatform.isx86 [ "vmw_pvscsi" ];
+
+    environment.systemPackages = [ open-vm-tools ];
+
+    systemd.services.vmware =
+      { description = "VMWare Guest Service";
+        wantedBy = [ "multi-user.target" ];
+        after = [ "display-manager.service" ];
+        unitConfig.ConditionVirtualization = "vmware";
+        serviceConfig.ExecStart = "${open-vm-tools}/bin/vmtoolsd";
+      };
+
+    # Mount the vmblock for drag-and-drop and copy-and-paste.
+    systemd.mounts = mkIf (!cfg.headless) [
+      {
+        description = "VMware vmblock fuse mount";
+        documentation = [ "https://github.com/vmware/open-vm-tools/blob/master/open-vm-tools/vmblock-fuse/design.txt" ];
+        unitConfig.ConditionVirtualization = "vmware";
+        what = "${open-vm-tools}/bin/vmware-vmblock-fuse";
+        where = "/run/vmblock-fuse";
+        type = "fuse";
+        options = "subtype=vmware-vmblock,default_permissions,allow_other";
+        wantedBy = [ "multi-user.target" ];
+      }
+    ];
+
+    security.wrappers.vmware-user-suid-wrapper = mkIf (!cfg.headless) {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${open-vm-tools}/bin/vmware-user-suid-wrapper";
+      };
+
+    environment.etc.vmware-tools.source = "${open-vm-tools}/etc/vmware-tools/*";
+
+    services.xserver = mkIf (!cfg.headless) {
+      modules = [ xf86inputvmmouse ];
+
+      config = ''
+          Section "InputClass"
+            Identifier "VMMouse"
+            MatchDevicePath "/dev/input/event*"
+            MatchProduct "ImPS/2 Generic Wheel Mouse"
+            Driver "vmmouse"
+          EndSection
+        '';
+
+      displayManager.sessionCommands = ''
+          ${open-vm-tools}/bin/vmware-user-suid-wrapper
+        '';
+    };
+
+    services.udev.packages = [ open-vm-tools ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-host.nix b/nixpkgs/nixos/modules/virtualisation/vmware-host.nix
new file mode 100644
index 000000000000..4b2dc28aeac7
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-host.nix
@@ -0,0 +1,166 @@
+{ config, pkgs, lib, ... }:
+
+let
+  cfg = config.virtualisation.vmware.host;
+  wrapperDir = "/run/vmware/bin"; # Perfectly fits as /usr/local/bin
+  parentWrapperDir = dirOf wrapperDir;
+  vmwareWrappers = # Needed as hardcoded paths workaround
+    let mkVmwareSymlink =
+      program:
+      ''
+        ln -s "${config.security.wrapperDir}/${program}" $wrapperDir/${program}
+      '';
+    in
+    [
+      (mkVmwareSymlink "pkexec")
+      (mkVmwareSymlink "mount")
+      (mkVmwareSymlink "umount")
+    ];
+in
+{
+  options = with lib; {
+    virtualisation.vmware.host = {
+      enable = mkEnableOption (lib.mdDoc "VMware") // {
+        description = lib.mdDoc ''
+          This enables VMware host virtualisation for running VMs.
+
+          ::: {.important}
+          `vmware-vmx` will cause kcompactd0 due to
+          `Transparent Hugepages` feature in kernel.
+          Apply `[ "transparent_hugepage=never" ]` in
+          option {option}`boot.kernelParams` to disable them.
+          :::
+
+          ::: {.note}
+          If that didn't work disable `TRANSPARENT_HUGEPAGE`,
+          `COMPACTION` configs and recompile kernel.
+          :::
+        '';
+      };
+      package = mkOption {
+        type = types.package;
+        default = pkgs.vmware-workstation;
+        defaultText = literalExpression "pkgs.vmware-workstation";
+        description = lib.mdDoc "VMware host virtualisation package to use";
+      };
+      extraPackages = mkOption {
+        type = with types; listOf package;
+        default = with pkgs; [ ];
+        description = lib.mdDoc "Extra packages to be used with VMware host.";
+        example = "with pkgs; [ ntfs3g ]";
+      };
+      extraConfig = mkOption {
+        type = types.lines;
+        default = "";
+        description = lib.mdDoc "Add extra config to /etc/vmware/config";
+        example = ''
+          # Allow unsupported device's OpenGL and Vulkan acceleration for guest vGPU
+          mks.gl.allowUnsupportedDrivers = "TRUE"
+          mks.vk.allowUnsupportedDevices = "TRUE"
+        '';
+      };
+    };
+  };
+
+  config = lib.mkIf cfg.enable {
+    boot.extraModulePackages = [ config.boot.kernelPackages.vmware ];
+    boot.extraModprobeConfig = "alias char-major-10-229 fuse";
+    boot.kernelModules = [ "vmw_pvscsi" "vmw_vmci" "vmmon" "vmnet" "fuse" ];
+
+    environment.systemPackages = [ cfg.package ] ++ cfg.extraPackages;
+    services.printing.drivers = [ cfg.package ];
+
+    environment.etc."vmware/config".text = ''
+      ${builtins.readFile "${cfg.package}/etc/vmware/config"}
+      ${cfg.extraConfig}
+    '';
+
+    environment.etc."vmware/bootstrap".source = "${cfg.package}/etc/vmware/bootstrap";
+    environment.etc."vmware/icu".source = "${cfg.package}/etc/vmware/icu";
+    environment.etc."vmware-installer".source = "${cfg.package}/etc/vmware-installer";
+
+    # SUID wrappers
+
+    security.wrappers = {
+      vmware-vmx = {
+        setuid = true;
+        owner = "root";
+        group = "root";
+        source = "${cfg.package}/lib/vmware/bin/.vmware-vmx-wrapped";
+      };
+    };
+
+    ###### wrappers activation script
+
+    system.activationScripts.vmwareWrappers =
+      lib.stringAfter [ "specialfs" "users" ]
+        ''
+          mkdir -p "${parentWrapperDir}"
+          chmod 755 "${parentWrapperDir}"
+          # We want to place the tmpdirs for the wrappers to the parent dir.
+          wrapperDir=$(mktemp --directory --tmpdir="${parentWrapperDir}" wrappers.XXXXXXXXXX)
+          chmod a+rx "$wrapperDir"
+          ${lib.concatStringsSep "\n" (vmwareWrappers)}
+          if [ -L ${wrapperDir} ]; then
+            # Atomically replace the symlink
+            # See https://axialcorps.com/2013/07/03/atomically-replacing-files-and-directories/
+            old=$(readlink -f ${wrapperDir})
+            if [ -e "${wrapperDir}-tmp" ]; then
+              rm --force --recursive "${wrapperDir}-tmp"
+            fi
+            ln --symbolic --force --no-dereference "$wrapperDir" "${wrapperDir}-tmp"
+            mv --no-target-directory "${wrapperDir}-tmp" "${wrapperDir}"
+            rm --force --recursive "$old"
+          else
+            # For initial setup
+            ln --symbolic "$wrapperDir" "${wrapperDir}"
+          fi
+        '';
+
+    # Services
+
+    systemd.services."vmware-authdlauncher" = {
+      description = "VMware Authentication Daemon";
+      serviceConfig = {
+        Type = "forking";
+        ExecStart = [ "${cfg.package}/bin/vmware-authdlauncher" ];
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services."vmware-networks-configuration" = {
+      description = "VMware Networks Configuration Generation";
+      unitConfig.ConditionPathExists = "!/etc/vmware/networking";
+      serviceConfig = {
+        UMask = "0077";
+        ExecStart = [
+          "${cfg.package}/bin/vmware-networks --postinstall vmware-player,0,1"
+        ];
+        Type = "oneshot";
+        RemainAfterExit = "yes";
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services."vmware-networks" = {
+      description = "VMware Networks";
+      after = [ "vmware-networks-configuration.service" ];
+      requires = [ "vmware-networks-configuration.service" ];
+      serviceConfig = {
+        Type = "forking";
+        ExecCondition = [ "${pkgs.kmod}/bin/modprobe vmnet" ];
+        ExecStart = [ "${cfg.package}/bin/vmware-networks --start" ];
+        ExecStop = [ "${cfg.package}/bin/vmware-networks --stop" ];
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+
+    systemd.services."vmware-usbarbitrator" = {
+      description = "VMware USB Arbitrator";
+      serviceConfig = {
+        ExecStart = [ "${cfg.package}/bin/vmware-usbarbitrator -f" ];
+      };
+      wantedBy = [ "multi-user.target" ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/vmware-image.nix b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
new file mode 100644
index 000000000000..a38713b4d4ee
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/vmware-image.nix
@@ -0,0 +1,91 @@
+{ config, pkgs, lib, ... }:
+
+with lib;
+
+let
+  boolToStr = value: if value then "on" else "off";
+  cfg = config.vmware;
+
+  subformats = [
+    "monolithicSparse"
+    "monolithicFlat"
+    "twoGbMaxExtentSparse"
+    "twoGbMaxExtentFlat"
+    "streamOptimized"
+  ];
+
+in {
+  options = {
+    vmware = {
+      baseImageSize = mkOption {
+        type = with types; either (enum [ "auto" ]) int;
+        default = "auto";
+        example = 2048;
+        description = lib.mdDoc ''
+          The size of the VMWare base image in MiB.
+        '';
+      };
+      vmDerivationName = mkOption {
+        type = types.str;
+        default = "nixos-vmware-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}";
+        description = lib.mdDoc ''
+          The name of the derivation for the VMWare appliance.
+        '';
+      };
+      vmFileName = mkOption {
+        type = types.str;
+        default = "nixos-${config.system.nixos.label}-${pkgs.stdenv.hostPlatform.system}.vmdk";
+        description = lib.mdDoc ''
+          The file name of the VMWare appliance.
+        '';
+      };
+      vmSubformat = mkOption {
+        type = types.enum subformats;
+        default = "monolithicSparse";
+        description = lib.mdDoc "Specifies which VMDK subformat to use.";
+      };
+      vmCompat6 = mkOption {
+        type = types.bool;
+        default = false;
+        example = true;
+        description = lib.mdDoc "Create a VMDK version 6 image (instead of version 4).";
+      };
+    };
+  };
+
+  config = {
+    system.build.vmwareImage = import ../../lib/make-disk-image.nix {
+      name = cfg.vmDerivationName;
+      postVM = ''
+        ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -o compat6=${boolToStr cfg.vmCompat6},subformat=${cfg.vmSubformat} -O vmdk $diskImage $out/${cfg.vmFileName}
+        rm $diskImage
+      '';
+      format = "raw";
+      diskSize = cfg.baseImageSize;
+      partitionTableType = "efi";
+      inherit config lib pkgs;
+    };
+
+    fileSystems."/" = {
+      device = "/dev/disk/by-label/nixos";
+      autoResize = true;
+      fsType = "ext4";
+    };
+
+    fileSystems."/boot" = {
+      device = "/dev/disk/by-label/ESP";
+      fsType = "vfat";
+    };
+
+    boot.growPartition = true;
+
+    boot.loader.grub = {
+      version = 2;
+      device = "nodev";
+      efiSupport = true;
+      efiInstallAsRemovable = true;
+    };
+
+    virtualisation.vmware.guest.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/waydroid.nix b/nixpkgs/nixos/modules/virtualisation/waydroid.nix
new file mode 100644
index 000000000000..46e5f901015d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/waydroid.nix
@@ -0,0 +1,71 @@
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+
+  cfg = config.virtualisation.waydroid;
+  kernelPackages = config.boot.kernelPackages;
+  waydroidGbinderConf = pkgs.writeText "waydroid.conf" ''
+    [Protocol]
+    /dev/binder = aidl2
+    /dev/vndbinder = aidl2
+    /dev/hwbinder = hidl
+
+    [ServiceManager]
+    /dev/binder = aidl2
+    /dev/vndbinder = aidl2
+    /dev/hwbinder = hidl
+  '';
+
+in
+{
+
+  options.virtualisation.waydroid = {
+    enable = mkEnableOption (lib.mdDoc "Waydroid");
+  };
+
+  config = mkIf cfg.enable {
+    assertions = singleton {
+      assertion = versionAtLeast (getVersion config.boot.kernelPackages.kernel) "4.18";
+      message = "Waydroid needs user namespace support to work properly";
+    };
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isEnabled "ANDROID_BINDER_IPC")
+      (isEnabled "ANDROID_BINDERFS")
+      (isEnabled "ASHMEM") # FIXME Needs memfd support instead on Linux 5.18 and waydroid 1.2.1
+    ];
+
+    /* NOTE: we always enable this flag even if CONFIG_PSI_DEFAULT_DISABLED is not on
+      as reading the kernel config is not always possible and on kernels where it's
+      already on it will be no-op
+    */
+    boot.kernelParams = [ "psi=1" ];
+
+    environment.etc."gbinder.d/waydroid.conf".source = waydroidGbinderConf;
+
+    environment.systemPackages = with pkgs; [ waydroid ];
+
+    networking.firewall.trustedInterfaces = [ "waydroid0" ];
+
+    virtualisation.lxc.enable = true;
+
+    systemd.services.waydroid-container = {
+      description = "Waydroid Container";
+
+      wantedBy = [ "multi-user.target" ];
+
+      serviceConfig = {
+        ExecStart = "${pkgs.waydroid}/bin/waydroid -w container start";
+        ExecStop = "${pkgs.waydroid}/bin/waydroid container stop";
+        ExecStopPost = "${pkgs.waydroid}/bin/waydroid session stop";
+      };
+    };
+
+    systemd.tmpfiles.rules = [
+      "d /var/lib/misc 0755 root root -" # for dnsmasq.leases
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
new file mode 100644
index 000000000000..792edc9b397d
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xe-guest-utilities.nix
@@ -0,0 +1,52 @@
+{ config, lib, pkgs, ... }:
+with lib;
+let
+  cfg = config.services.xe-guest-utilities;
+in {
+  options = {
+    services.xe-guest-utilities = {
+      enable = mkEnableOption (lib.mdDoc "the Xen guest utilities daemon");
+    };
+  };
+  config = mkIf cfg.enable {
+    services.udev.packages = [ pkgs.xe-guest-utilities ];
+    systemd.tmpfiles.rules = [ "d /run/xenstored 0755 - - -" ];
+
+    systemd.services.xe-daemon = {
+      description = "xen daemon file";
+      wantedBy    = [ "multi-user.target" ];
+      after = [ "xe-linux-distribution.service" ];
+      requires = [ "proc-xen.mount" ];
+      path = [ pkgs.coreutils pkgs.iproute2 ];
+      serviceConfig = {
+        PIDFile = "/run/xe-daemon.pid";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-daemon -p /run/xe-daemon.pid";
+        ExecStop = "${pkgs.procps}/bin/pkill -TERM -F /run/xe-daemon.pid";
+      };
+    };
+
+    systemd.services.xe-linux-distribution = {
+      description = "xen linux distribution service";
+      wantedBy    = [ "multi-user.target" ];
+      before = [ "xend.service" ];
+      path = [ pkgs.xe-guest-utilities pkgs.coreutils pkgs.gawk pkgs.gnused ];
+      serviceConfig = {
+        Type = "simple";
+        RemainAfterExit = "yes";
+        ExecStart = "${pkgs.xe-guest-utilities}/bin/xe-linux-distribution /var/cache/xe-linux-distribution";
+      };
+    };
+
+    systemd.mounts = [
+      { description = "Mount /proc/xen files";
+        what = "xenfs";
+        where = "/proc/xen";
+        type = "xenfs";
+        unitConfig = {
+          ConditionPathExists = "/proc/xen";
+          RefuseManualStop = "true";
+        };
+      }
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
new file mode 100644
index 000000000000..8f361a7ac020
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-dom0.nix
@@ -0,0 +1,453 @@
+# Xen hypervisor (Dom0) support.
+
+{ config, lib, pkgs, ... }:
+
+with lib;
+
+let
+  cfg = config.virtualisation.xen;
+in
+
+{
+  imports = [
+    (mkRemovedOptionModule [ "virtualisation" "xen" "qemu" ] "You don't need this option anymore, it will work without it.")
+    (mkRenamedOptionModule [ "virtualisation" "xen" "qemu-package" ] [ "virtualisation" "xen" "package-qemu" ])
+  ];
+
+  ###### interface
+
+  options = {
+
+    virtualisation.xen.enable =
+      mkOption {
+        default = false;
+        type = types.bool;
+        description =
+          mdDoc ''
+            Setting this option enables the Xen hypervisor, a
+            virtualisation technology that allows multiple virtual
+            machines, known as *domains*, to run
+            concurrently on the physical machine.  NixOS runs as the
+            privileged *Domain 0*.  This option
+            requires a reboot to take effect.
+          '';
+      };
+
+    virtualisation.xen.package = mkOption {
+      type = types.package;
+      defaultText = literalExpression "pkgs.xen";
+      example = literalExpression "pkgs.xen-light";
+      description = lib.mdDoc ''
+        The package used for Xen binary.
+      '';
+      relatedPackages = [ "xen" "xen-light" ];
+    };
+
+    virtualisation.xen.package-qemu = mkOption {
+      type = types.package;
+      defaultText = literalExpression "pkgs.xen";
+      example = literalExpression "pkgs.qemu_xen-light";
+      description = lib.mdDoc ''
+        The package with qemu binaries for dom0 qemu and xendomains.
+      '';
+      relatedPackages = [ "xen"
+                          { name = "qemu_xen-light"; comment = "For use with pkgs.xen-light."; }
+                        ];
+    };
+
+    virtualisation.xen.bootParams =
+      mkOption {
+        default = [];
+        type = types.listOf types.str;
+        description = lib.mdDoc
+          ''
+            Parameters passed to the Xen hypervisor at boot time.
+          '';
+      };
+
+    virtualisation.xen.domain0MemorySize =
+      mkOption {
+        default = 0;
+        example = 512;
+        type = types.addCheck types.int (n: n >= 0);
+        description = lib.mdDoc
+          ''
+            Amount of memory (in MiB) allocated to Domain 0 on boot.
+            If set to 0, all memory is assigned to Domain 0.
+          '';
+      };
+
+    virtualisation.xen.bridge = {
+        name = mkOption {
+          default = "xenbr0";
+          type = types.str;
+          description = lib.mdDoc ''
+              Name of bridge the Xen domUs connect to.
+            '';
+        };
+
+        address = mkOption {
+          type = types.str;
+          default = "172.16.0.1";
+          description = lib.mdDoc ''
+            IPv4 address of the bridge.
+          '';
+        };
+
+        prefixLength = mkOption {
+          type = types.addCheck types.int (n: n >= 0 && n <= 32);
+          default = 16;
+          description = lib.mdDoc ''
+            Subnet mask of the bridge interface, specified as the number of
+            bits in the prefix (`24`).
+            A DHCP server will provide IP addresses for the whole, remaining
+            subnet.
+          '';
+        };
+
+        forwardDns = mkOption {
+          type = types.bool;
+          default = false;
+          description = lib.mdDoc ''
+            If set to `true`, the DNS queries from the
+            hosts connected to the bridge will be forwarded to the DNS
+            servers specified in /etc/resolv.conf .
+            '';
+        };
+
+      };
+
+    virtualisation.xen.stored =
+      mkOption {
+        type = types.path;
+        description = lib.mdDoc
+          ''
+            Xen Store daemon to use. Defaults to oxenstored of the xen package.
+          '';
+      };
+
+    virtualisation.xen.domains = {
+        extraConfig = mkOption {
+          type = types.lines;
+          default = "";
+          description = lib.mdDoc
+            ''
+              Options defined here will override the defaults for xendomains.
+              The default options can be seen in the file included from
+              /etc/default/xendomains.
+            '';
+          };
+      };
+
+    virtualisation.xen.trace = mkEnableOption (lib.mdDoc "Xen tracing");
+
+  };
+
+
+  ###### implementation
+
+  config = mkIf cfg.enable {
+    assertions = [ {
+      assertion = pkgs.stdenv.isx86_64;
+      message = "Xen currently not supported on ${pkgs.stdenv.hostPlatform.system}";
+    } {
+      assertion = config.boot.loader.grub.enable && (config.boot.loader.grub.efiSupport == false);
+      message = "Xen currently does not support EFI boot";
+    } ];
+
+    virtualisation.xen.package = mkDefault pkgs.xen;
+    virtualisation.xen.package-qemu = mkDefault pkgs.xen;
+    virtualisation.xen.stored = mkDefault "${cfg.package}/bin/oxenstored";
+
+    environment.systemPackages = [ cfg.package ];
+
+    boot.kernelModules =
+      [ "xen-evtchn" "xen-gntdev" "xen-gntalloc" "xen-blkback" "xen-netback"
+        "xen-pciback" "evtchn" "gntdev" "netbk" "blkbk" "xen-scsibk"
+        "usbbk" "pciback" "xen-acpi-processor" "blktap2" "tun" "netxen_nic"
+        "xen_wdt" "xen-acpi-processor" "xen-privcmd" "xen-scsiback"
+        "xenfs"
+      ];
+
+    # The xenfs module is needed in system.activationScripts.xen, but
+    # the modprobe command there fails silently. Include xenfs in the
+    # initrd as a work around.
+    boot.initrd.kernelModules = [ "xenfs" ];
+
+    # The radeonfb kernel module causes the screen to go black as soon
+    # as it's loaded, so don't load it.
+    boot.blacklistedKernelModules = [ "radeonfb" ];
+
+    # Increase the number of loopback devices from the default (8),
+    # which is way too small because every VM virtual disk requires a
+    # loopback device.
+    boot.extraModprobeConfig =
+      ''
+        options loop max_loop=64
+      '';
+
+    virtualisation.xen.bootParams = [] ++
+      optionals cfg.trace [ "loglvl=all" "guest_loglvl=all" ] ++
+      optional (cfg.domain0MemorySize != 0) "dom0_mem=${toString cfg.domain0MemorySize}M";
+
+    system.extraSystemBuilderCmds =
+      ''
+        ln -s ${cfg.package}/boot/xen.gz $out/xen.gz
+        echo "${toString cfg.bootParams}" > $out/xen-params
+      '';
+
+    # Mount the /proc/xen pseudo-filesystem.
+    system.activationScripts.xen =
+      ''
+        if [ -d /proc/xen ]; then
+            ${pkgs.kmod}/bin/modprobe xenfs 2> /dev/null
+            ${pkgs.util-linux}/bin/mountpoint -q /proc/xen || \
+                ${pkgs.util-linux}/bin/mount -t xenfs none /proc/xen
+        fi
+      '';
+
+    # Domain 0 requires a pvops-enabled kernel.
+    system.requiredKernelConfig = with config.lib.kernelConfig;
+      [ (isYes "XEN")
+        (isYes "X86_IO_APIC")
+        (isYes "ACPI")
+        (isYes "XEN_DOM0")
+        (isYes "PCI_XEN")
+        (isYes "XEN_DEV_EVTCHN")
+        (isYes "XENFS")
+        (isYes "XEN_COMPAT_XENFS")
+        (isYes "XEN_SYS_HYPERVISOR")
+        (isYes "XEN_GNTDEV")
+        (isYes "XEN_BACKEND")
+        (isModule "XEN_NETDEV_BACKEND")
+        (isModule "XEN_BLKDEV_BACKEND")
+        (isModule "XEN_PCIDEV_BACKEND")
+        (isYes "XEN_BALLOON")
+        (isYes "XEN_SCRUB_PAGES")
+      ];
+
+
+    environment.etc =
+      {
+        "xen/xl.conf".source = "${cfg.package}/etc/xen/xl.conf";
+        "xen/scripts".source = "${cfg.package}/etc/xen/scripts";
+        "default/xendomains".text = ''
+          source ${cfg.package}/etc/default/xendomains
+
+          ${cfg.domains.extraConfig}
+        '';
+      }
+      // optionalAttrs (builtins.compareVersions cfg.package.version "4.10" >= 0) {
+        # in V 4.10 oxenstored requires /etc/xen/oxenstored.conf to start
+        "xen/oxenstored.conf".source = "${cfg.package}/etc/xen/oxenstored.conf";
+      };
+
+    # Xen provides udev rules.
+    services.udev.packages = [ cfg.package ];
+
+    services.udev.path = [ pkgs.bridge-utils pkgs.iproute2 ];
+
+    systemd.services.xen-store = {
+      description = "Xen Store Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "network.target" "xen-store.socket" ];
+      requires = [ "xen-store.socket" ];
+      preStart = ''
+        export XENSTORED_ROOTDIR="/var/lib/xenstored"
+        rm -f "$XENSTORED_ROOTDIR"/tdb* &>/dev/null
+
+        mkdir -p /var/run
+        mkdir -p /var/log/xen # Running xl requires /var/log/xen and /var/lib/xen,
+        mkdir -p /var/lib/xen # so we create them here unconditionally.
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = if (builtins.compareVersions cfg.package.version "4.8" < 0) then
+        { ExecStart = ''
+            ${cfg.stored}${optionalString cfg.trace " -T /var/log/xen/xenstored-trace.log"} --no-fork
+            '';
+        } else {
+          ExecStart = ''
+            ${cfg.package}/etc/xen/scripts/launch-xenstore
+            '';
+          Type            = "notify";
+          RemainAfterExit = true;
+          NotifyAccess    = "all";
+        };
+      postStart = ''
+        ${optionalString (builtins.compareVersions cfg.package.version "4.8" < 0) ''
+          time=0
+          timeout=30
+          # Wait for xenstored to actually come up, timing out after 30 seconds
+          while [ $time -lt $timeout ] && ! `${cfg.package}/bin/xenstore-read -s / >/dev/null 2>&1` ; do
+              time=$(($time+1))
+              sleep 1
+          done
+
+          # Exit if we timed out
+          if ! [ $time -lt $timeout ] ; then
+              echo "Could not start Xenstore Daemon"
+              exit 1
+          fi
+        ''}
+        echo "executing xen-init-dom0"
+        ${cfg.package}/lib/xen/bin/xen-init-dom0
+        '';
+    };
+
+    systemd.sockets.xen-store = {
+      description = "XenStore Socket for userspace API";
+      wantedBy = [ "sockets.target" ];
+      socketConfig = {
+        ListenStream = [ "/var/run/xenstored/socket" "/var/run/xenstored/socket_ro" ];
+        SocketMode = "0660";
+        SocketUser = "root";
+        SocketGroup = "root";
+      };
+    };
+
+
+    systemd.services.xen-console = {
+      description = "Xen Console Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-store.service" ];
+      requires = [ "xen-store.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        ${optionalString cfg.trace "mkdir -p /var/log/xen"}
+        grep -q control_d /proc/xen/capabilities
+        '';
+      serviceConfig = {
+        ExecStart = ''
+          ${cfg.package}/bin/xenconsoled\
+            ${optionalString ((builtins.compareVersions cfg.package.version "4.8" >= 0)) " -i"}\
+            ${optionalString cfg.trace " --log=all --log-dir=/var/log/xen"}
+          '';
+      };
+    };
+
+
+    systemd.services.xen-qemu = {
+      description = "Xen Qemu Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-console.service" ];
+      requires = [ "xen-store.service" ];
+      serviceConfig.ExecStart = ''
+        ${cfg.package-qemu}/${cfg.package-qemu.qemu-system-i386} \
+           -xen-attach -xen-domid 0 -name dom0 -M xenpv \
+           -nographic -monitor /dev/null -serial /dev/null -parallel /dev/null
+        '';
+    };
+
+
+    systemd.services.xen-watchdog = {
+      description = "Xen Watchdog Daemon";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-qemu.service" "xen-domains.service" ];
+      serviceConfig.ExecStart = "${cfg.package}/bin/xenwatchdogd 30 15";
+      serviceConfig.Type = "forking";
+      serviceConfig.RestartSec = "1";
+      serviceConfig.Restart = "on-failure";
+    };
+
+
+    systemd.services.xen-bridge = {
+      description = "Xen bridge";
+      wantedBy = [ "multi-user.target" ];
+      before = [ "xen-domains.service" ];
+      preStart = ''
+        mkdir -p /var/run/xen
+        touch /var/run/xen/dnsmasq.pid
+        touch /var/run/xen/dnsmasq.etherfile
+        touch /var/run/xen/dnsmasq.leasefile
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Usable\ range`
+        export XEN_BRIDGE_IP_RANGE_START="${"\${data[1]//[[:blank:]]/}"}"
+        export XEN_BRIDGE_IP_RANGE_END="${"\${data[2]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ mask`
+        export XEN_BRIDGE_NETMASK="${"\${data[1]//[[:blank:]]/}"}"
+
+        echo "${cfg.bridge.address} host gw dns" > /var/run/xen/dnsmasq.hostsfile
+
+        cat <<EOF > /var/run/xen/dnsmasq.conf
+        no-daemon
+        pid-file=/var/run/xen/dnsmasq.pid
+        interface=${cfg.bridge.name}
+        except-interface=lo
+        bind-interfaces
+        auth-zone=xen.local,$XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength}
+        domain=xen.local
+        addn-hosts=/var/run/xen/dnsmasq.hostsfile
+        expand-hosts
+        strict-order
+        no-hosts
+        bogus-priv
+        ${optionalString (!cfg.bridge.forwardDns) ''
+          no-resolv
+          no-poll
+          auth-server=dns.xen.local,${cfg.bridge.name}
+        ''}
+        filterwin2k
+        clear-on-reload
+        domain-needed
+        dhcp-hostsfile=/var/run/xen/dnsmasq.etherfile
+        dhcp-authoritative
+        dhcp-range=$XEN_BRIDGE_IP_RANGE_START,$XEN_BRIDGE_IP_RANGE_END
+        dhcp-no-override
+        no-ping
+        dhcp-leasefile=/var/run/xen/dnsmasq.leasefile
+        EOF
+
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -I INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+
+        ${pkgs.bridge-utils}/bin/brctl addbr ${cfg.bridge.name}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} ${cfg.bridge.address}
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} netmask $XEN_BRIDGE_NETMASK
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} up
+      '';
+      serviceConfig.ExecStart = "${pkgs.dnsmasq}/bin/dnsmasq --conf-file=/var/run/xen/dnsmasq.conf";
+      postStop = ''
+        IFS='-' read -a data <<< `${pkgs.sipcalc}/bin/sipcalc ${cfg.bridge.address}/${toString cfg.bridge.prefixLength} | grep Network\ address`
+        export XEN_BRIDGE_NETWORK_ADDRESS="${"\${data[1]//[[:blank:]]/}"}"
+
+        ${pkgs.inetutils}/bin/ifconfig ${cfg.bridge.name} down
+        ${pkgs.bridge-utils}/bin/brctl delbr ${cfg.bridge.name}
+
+        # DNS
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -d ${cfg.bridge.address} --dport 53 -m state --state NEW,ESTABLISHED -j ACCEPT
+        # DHCP
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p udp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+        ${pkgs.iptables}/bin/iptables -w -D INPUT  -i ${cfg.bridge.name} -p tcp -s $XEN_BRIDGE_NETWORK_ADDRESS/${toString cfg.bridge.prefixLength} --sport 68 --dport 67 -j ACCEPT
+      '';
+    };
+
+
+    systemd.services.xen-domains = {
+      description = "Xen domains - automatically starts, saves and restores Xen domains";
+      wantedBy = [ "multi-user.target" ];
+      after = [ "xen-bridge.service" "xen-qemu.service" ];
+      requires = [ "xen-bridge.service" "xen-qemu.service" ];
+      ## To prevent a race between dhcpcd and xend's bridge setup script
+      ## (which renames eth* to peth* and recreates eth* as a virtual
+      ## device), start dhcpcd after xend.
+      before = [ "dhcpd.service" ];
+      restartIfChanged = false;
+      serviceConfig.RemainAfterExit = "yes";
+      path = [ cfg.package cfg.package-qemu ];
+      environment.XENDOM_CONFIG = "${cfg.package}/etc/sysconfig/xendomains";
+      preStart = "mkdir -p /var/lock/subsys -m 755";
+      serviceConfig.ExecStart = "${cfg.package}/etc/init.d/xendomains start";
+      serviceConfig.ExecStop = "${cfg.package}/etc/init.d/xendomains stop";
+    };
+
+  };
+}
diff --git a/nixpkgs/nixos/modules/virtualisation/xen-domU.nix b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
new file mode 100644
index 000000000000..ce5a482b1145
--- /dev/null
+++ b/nixpkgs/nixos/modules/virtualisation/xen-domU.nix
@@ -0,0 +1,18 @@
+# Common configuration for Xen DomU NixOS virtual machines.
+
+{ ... }:
+
+{
+  boot.loader.grub.device = "nodev";
+
+  boot.initrd.kernelModules =
+    [ "xen-blkfront" "xen-tpmfront" "xen-kbdfront" "xen-fbfront"
+      "xen-netfront" "xen-pcifront" "xen-scsifront"
+    ];
+
+  # Send syslog messages to the Xen console.
+  services.syslogd.tty = "hvc0";
+
+  # Don't run ntpd, since we should get the correct time from Dom0.
+  services.timesyncd.enable = false;
+}
diff --git a/nixpkgs/nixos/release-combined.nix b/nixpkgs/nixos/release-combined.nix
new file mode 100644
index 000000000000..9b4b92be6f3a
--- /dev/null
+++ b/nixpkgs/nixos/release-combined.nix
@@ -0,0 +1,178 @@
+# This jobset defines the main NixOS channels (such as nixos-unstable
+# and nixos-14.04). The channel is updated every time the ‘tested’ job
+# succeeds, and all other jobs have finished (they may fail).
+
+{ nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; }
+, stableBranch ? false
+, supportedSystems ? [ "aarch64-linux" "x86_64-linux" ]
+, limitedSupportedSystems ? [ "i686-linux" ]
+}:
+
+let
+
+  nixpkgsSrc = nixpkgs; # urgh
+
+  pkgs = import ./.. {};
+
+  removeMaintainers = set: if builtins.isAttrs set
+    then if (set.type or "") == "derivation"
+      then set // { meta = builtins.removeAttrs (set.meta or {}) [ "maintainers" ]; }
+      else pkgs.lib.mapAttrs (n: v: removeMaintainers v) set
+    else set;
+
+in rec {
+
+  nixos = removeMaintainers (import ./release.nix {
+    inherit stableBranch;
+    supportedSystems = supportedSystems ++ limitedSupportedSystems;
+    nixpkgs = nixpkgsSrc;
+  });
+
+  nixpkgs = builtins.removeAttrs (removeMaintainers (import ../pkgs/top-level/release.nix {
+    inherit supportedSystems;
+    nixpkgs = nixpkgsSrc;
+  })) [ "unstable" ];
+
+  tested =
+    let
+      onFullSupported = x: map (system: "${x}.${system}") supportedSystems;
+      onAllSupported = x: map (system: "${x}.${system}") (supportedSystems ++ limitedSupportedSystems);
+      onSystems = systems: x: map (system: "${x}.${system}")
+        (pkgs.lib.intersectLists systems (supportedSystems ++ limitedSupportedSystems));
+    in pkgs.releaseTools.aggregate {
+      name = "nixos-${nixos.channel.version}";
+      meta = {
+        description = "Release-critical builds for the NixOS channel";
+        maintainers = with pkgs.lib.maintainers; [ eelco ];
+      };
+      constituents = pkgs.lib.concatLists [
+        [ "nixos.channel" ]
+        (onFullSupported "nixos.dummy")
+        (onAllSupported "nixos.iso_minimal")
+        (onSystems ["x86_64-linux" "aarch64-linux"] "nixos.amazonImage")
+        (onFullSupported "nixos.iso_plasma5")
+        (onFullSupported "nixos.iso_gnome")
+        (onFullSupported "nixos.manual")
+        (onSystems ["x86_64-linux"] "nixos.ova")
+        (onSystems ["aarch64-linux"] "nixos.sd_image")
+        (onFullSupported "nixos.tests.acme")
+        (onSystems ["x86_64-linux"] "nixos.tests.boot.biosCdrom")
+        (onSystems ["x86_64-linux"] "nixos.tests.boot.biosUsb")
+        (onFullSupported "nixos.tests.boot-stage1")
+        (onFullSupported "nixos.tests.boot.uefiCdrom")
+        (onFullSupported "nixos.tests.boot.uefiUsb")
+        (onFullSupported "nixos.tests.chromium")
+        (onFullSupported "nixos.tests.containers-imperative")
+        (onFullSupported "nixos.tests.containers-ip")
+        (onSystems ["x86_64-linux"] "nixos.tests.docker")
+        (onFullSupported "nixos.tests.ecryptfs")
+        (onFullSupported "nixos.tests.env")
+
+        # Way too many manual retries required on Hydra.
+        #  Apparently it's hard to track down the cause.
+        #  So let's depend just on the packages for now.
+        #(onFullSupported "nixos.tests.firefox-esr")
+        #(onFullSupported "nixos.tests.firefox")
+        # Note: only -unwrapped variants have a Hydra job.
+        (onFullSupported "nixpkgs.firefox-esr-unwrapped")
+        (onFullSupported "nixpkgs.firefox-unwrapped")
+
+        (onFullSupported "nixos.tests.firewall")
+        (onFullSupported "nixos.tests.fontconfig-default-fonts")
+        (onFullSupported "nixos.tests.gitlab")
+        (onFullSupported "nixos.tests.gnome")
+        (onFullSupported "nixos.tests.gnome-xorg")
+        (onSystems ["x86_64-linux"] "nixos.tests.hibernate")
+        (onFullSupported "nixos.tests.i3wm")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSimple")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSubvolDefault")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSubvolEscape")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.btrfsSubvols")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.luksroot")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.lvm")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBootFat")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.separateBoot")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleLabels")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleProvided")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.simpleUefiSystemdBoot")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.simple")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.swraid")
+        (onSystems ["x86_64-linux"] "nixos.tests.installer.zfsroot")
+        (onSystems ["x86_64-linux"] "nixos.tests.nixos-rebuild-specialisations")
+        (onFullSupported "nixos.tests.ipv6")
+        (onFullSupported "nixos.tests.keymap.azerty")
+        (onFullSupported "nixos.tests.keymap.colemak")
+        (onFullSupported "nixos.tests.keymap.dvorak")
+        (onFullSupported "nixos.tests.keymap.dvorak-programmer")
+        (onFullSupported "nixos.tests.keymap.neo")
+        (onFullSupported "nixos.tests.keymap.qwertz")
+        (onFullSupported "nixos.tests.latestKernel.login")
+        (onFullSupported "nixos.tests.lightdm")
+        (onFullSupported "nixos.tests.login")
+        (onFullSupported "nixos.tests.misc")
+        (onFullSupported "nixos.tests.mutableUsers")
+        (onFullSupported "nixos.tests.nat.firewall")
+        (onFullSupported "nixos.tests.nat.standalone")
+        (onFullSupported "nixos.tests.networking.scripted.bond")
+        (onFullSupported "nixos.tests.networking.scripted.bridge")
+        (onFullSupported "nixos.tests.networking.scripted.dhcpOneIf")
+        (onFullSupported "nixos.tests.networking.scripted.dhcpSimple")
+        (onFullSupported "nixos.tests.networking.scripted.link")
+        (onFullSupported "nixos.tests.networking.scripted.loopback")
+        (onFullSupported "nixos.tests.networking.scripted.macvlan")
+        (onFullSupported "nixos.tests.networking.scripted.privacy")
+        (onFullSupported "nixos.tests.networking.scripted.routes")
+        (onFullSupported "nixos.tests.networking.scripted.sit")
+        (onFullSupported "nixos.tests.networking.scripted.static")
+        (onFullSupported "nixos.tests.networking.scripted.virtual")
+        (onFullSupported "nixos.tests.networking.scripted.vlan")
+        (onFullSupported "nixos.tests.networking.networkd.bond")
+        (onFullSupported "nixos.tests.networking.networkd.bridge")
+        (onFullSupported "nixos.tests.networking.networkd.dhcpOneIf")
+        (onFullSupported "nixos.tests.networking.networkd.dhcpSimple")
+        (onFullSupported "nixos.tests.networking.networkd.link")
+        (onFullSupported "nixos.tests.networking.networkd.loopback")
+        # Fails nondeterministically (https://github.com/NixOS/nixpkgs/issues/96709)
+        #(onFullSupported "nixos.tests.networking.networkd.macvlan")
+        (onFullSupported "nixos.tests.networking.networkd.privacy")
+        (onFullSupported "nixos.tests.networking.networkd.routes")
+        (onFullSupported "nixos.tests.networking.networkd.sit")
+        (onFullSupported "nixos.tests.networking.networkd.static")
+        (onFullSupported "nixos.tests.networking.networkd.virtual")
+        (onFullSupported "nixos.tests.networking.networkd.vlan")
+        (onFullSupported "nixos.tests.systemd-networkd-ipv6-prefix-delegation")
+        (onFullSupported "nixos.tests.nfs3.simple")
+        (onFullSupported "nixos.tests.nfs4.simple")
+        (onSystems ["x86_64-linux"] "nixos.tests.oci-containers.podman")
+        (onFullSupported "nixos.tests.openssh")
+        (onFullSupported "nixos.tests.pantheon")
+        (onFullSupported "nixos.tests.php.fpm")
+        (onFullSupported "nixos.tests.php.httpd")
+        (onFullSupported "nixos.tests.php.pcre")
+        (onFullSupported "nixos.tests.plasma5")
+        (onSystems ["x86_64-linux"] "nixos.tests.podman")
+        (onFullSupported "nixos.tests.predictable-interface-names.predictableNetworkd")
+        (onFullSupported "nixos.tests.predictable-interface-names.predictable")
+        (onFullSupported "nixos.tests.predictable-interface-names.unpredictableNetworkd")
+        (onFullSupported "nixos.tests.predictable-interface-names.unpredictable")
+        (onFullSupported "nixos.tests.printing-service")
+        (onFullSupported "nixos.tests.printing-socket")
+        (onFullSupported "nixos.tests.proxy")
+        (onFullSupported "nixos.tests.sddm.default")
+        (onFullSupported "nixos.tests.shadow")
+        (onFullSupported "nixos.tests.simple")
+        (onFullSupported "nixos.tests.sway")
+        (onFullSupported "nixos.tests.switchTest")
+        (onFullSupported "nixos.tests.udisks2")
+        (onFullSupported "nixos.tests.xfce")
+        (onFullSupported "nixpkgs.emacs")
+        (onFullSupported "nixpkgs.jdk")
+        ["nixpkgs.tarball"]
+
+        # Ensure that nixpkgs-check-by-name is available in all release channels and nixos-unstable,
+        # so that a pre-built version can be used in CI for PR's on the corresponding development branches.
+        # See ../pkgs/test/nixpkgs-check-by-name/README.md
+        (onSystems ["x86_64-linux"] "nixpkgs.tests.nixpkgs-check-by-name")
+      ];
+    };
+}
diff --git a/nixpkgs/nixos/release-small.nix b/nixpkgs/nixos/release-small.nix
new file mode 100644
index 000000000000..6204dc731ad9
--- /dev/null
+++ b/nixpkgs/nixos/release-small.nix
@@ -0,0 +1,144 @@
+# This jobset is used to generate a NixOS channel that contains a
+# small subset of Nixpkgs, mostly useful for servers that need fast
+# security updates.
+#
+# Individual jobs can be tested by running:
+#
+#   nix-build nixos/release-small.nix -A <jobname>
+#
+{ nixpkgs ? { outPath = (import ../lib).cleanSource ./..; revCount = 56789; shortRev = "gfedcba"; }
+, stableBranch ? false
+, supportedSystems ? [ "aarch64-linux" "x86_64-linux" ] # no i686-linux
+}:
+
+let
+
+  nixpkgsSrc = nixpkgs; # urgh
+
+  pkgs = import ./.. { system = "x86_64-linux"; };
+
+  lib = pkgs.lib;
+
+  nixos' = import ./release.nix {
+    inherit stableBranch supportedSystems;
+    nixpkgs = nixpkgsSrc;
+  };
+
+  nixpkgs' = builtins.removeAttrs (import ../pkgs/top-level/release.nix {
+    inherit supportedSystems;
+    nixpkgs = nixpkgsSrc;
+  }) [ "unstable" ];
+
+in rec {
+
+  nixos = {
+    inherit (nixos') channel manual options iso_minimal amazonImage dummy;
+    tests = {
+      inherit (nixos'.tests)
+        acme
+        containers-imperative
+        containers-ip
+        firewall
+        ipv6
+        login
+        misc
+        nat
+        nfs3
+        openssh
+        php
+        predictable-interface-names
+        proxy
+        simple;
+      installer = {
+        inherit (nixos'.tests.installer)
+          lvm
+          separateBoot
+          simple;
+      };
+      boot = {
+        inherit (nixos'.tests.boot)
+          biosCdrom
+          uefiCdrom;
+      };
+    };
+  };
+
+  nixpkgs = {
+    inherit (nixpkgs')
+      apacheHttpd
+      cmake
+      cryptsetup
+      emacs
+      gettext
+      git
+      imagemagick
+      jdk
+      linux
+      mariadb
+      nginx
+      nodejs
+      openssh
+      php
+      postgresql
+      python
+      rsyslog
+      stdenv
+      subversion
+      tarball
+      vim
+      tests-stdenv-gcc-stageCompare;
+  };
+
+  tested = let
+    onSupported = x: map (system: "${x}.${system}") supportedSystems;
+    onSystems = systems: x: map (system: "${x}.${system}")
+      (pkgs.lib.intersectLists systems supportedSystems);
+  in pkgs.releaseTools.aggregate {
+    name = "nixos-${nixos.channel.version}";
+    meta = {
+      description = "Release-critical builds for the NixOS channel";
+      maintainers = [ lib.maintainers.eelco ];
+    };
+    constituents = lib.flatten [
+      [
+        "nixos.channel"
+        "nixpkgs.tarball"
+      ]
+      (map (onSystems [ "x86_64-linux" ]) [
+        "nixos.tests.boot.biosCdrom"
+        "nixos.tests.installer.lvm"
+        "nixos.tests.installer.separateBoot"
+        "nixos.tests.installer.simple"
+      ])
+      (map onSupported [
+        "nixos.dummy"
+        "nixos.iso_minimal"
+        "nixos.amazonImage"
+        "nixos.manual"
+        "nixos.tests.acme"
+        "nixos.tests.boot.uefiCdrom"
+        "nixos.tests.containers-imperative"
+        "nixos.tests.containers-ip"
+        "nixos.tests.firewall"
+        "nixos.tests.ipv6"
+        "nixos.tests.login"
+        "nixos.tests.misc"
+        "nixos.tests.nat.firewall"
+        "nixos.tests.nat.standalone"
+        "nixos.tests.nfs3.simple"
+        "nixos.tests.openssh"
+        "nixos.tests.php.fpm"
+        "nixos.tests.php.pcre"
+        "nixos.tests.predictable-interface-names.predictable"
+        "nixos.tests.predictable-interface-names.predictableNetworkd"
+        "nixos.tests.predictable-interface-names.unpredictable"
+        "nixos.tests.predictable-interface-names.unpredictableNetworkd"
+        "nixos.tests.proxy"
+        "nixos.tests.simple"
+        "nixpkgs.jdk"
+        "nixpkgs.tests-stdenv-gcc-stageCompare"
+      ])
+    ];
+  };
+
+}
diff --git a/nixpkgs/nixos/release.nix b/nixpkgs/nixos/release.nix
new file mode 100644
index 000000000000..2acc5ade7848
--- /dev/null
+++ b/nixpkgs/nixos/release.nix
@@ -0,0 +1,473 @@
+with import ../lib;
+
+{ nixpkgs ? { outPath = cleanSource ./..; revCount = 130979; shortRev = "gfedcba"; }
+, stableBranch ? false
+, supportedSystems ? [ "x86_64-linux" "aarch64-linux" ]
+, configuration ? {}
+}:
+
+with import ../pkgs/top-level/release-lib.nix { inherit supportedSystems; };
+
+let
+
+  version = fileContents ../.version;
+  versionSuffix =
+    (if stableBranch then "." else "pre") + "${toString nixpkgs.revCount}.${nixpkgs.shortRev}";
+
+  # Run the tests for each platform.  You can run a test by doing
+  # e.g. ‘nix-build release.nix -A tests.login.x86_64-linux’,
+  # or equivalently, ‘nix-build tests/login.nix’.
+  # See also nixosTests in pkgs/top-level/all-packages.nix
+  allTestsForSystem = system:
+    import ./tests/all-tests.nix {
+      inherit system;
+      pkgs = import ./.. { inherit system; };
+      callTest = config: {
+        ${system} = hydraJob config.test;
+      };
+    } // {
+      # for typechecking of the scripts and evaluation of
+      # the nodes, without running VMs.
+      allDrivers =
+        import ./tests/all-tests.nix {
+        inherit system;
+        pkgs = import ./.. { inherit system; };
+        callTest = config: {
+          ${system} = hydraJob config.driver;
+        };
+      };
+    };
+
+  allTests =
+    foldAttrs recursiveUpdate {} (map allTestsForSystem supportedSystems);
+
+  pkgs = import ./.. { system = "x86_64-linux"; };
+
+
+  versionModule = { config, ... }: {
+    system.nixos.versionSuffix = versionSuffix;
+    system.nixos.revision = nixpkgs.rev or nixpkgs.shortRev;
+
+    # At creation time we do not have state yet, so just default to latest.
+    system.stateVersion = config.system.nixos.version;
+  };
+
+  makeModules = module: rest: [ configuration versionModule module rest ];
+
+  makeIso =
+    { module, type, system, ... }:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules = makeModules module {
+        isoImage.isoBaseName = "nixos-${type}";
+      };
+    }).config.system.build.isoImage);
+
+
+  makeSdImage =
+    { module, system, ... }:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules = makeModules module {};
+    }).config.system.build.sdImage);
+
+
+  makeSystemTarball =
+    { module, maintainers ? ["viric"], system }:
+
+    with import ./.. { inherit system; };
+
+    let
+
+      config = (import lib/eval-config.nix {
+        inherit system;
+        modules = makeModules module {};
+      }).config;
+
+      tarball = config.system.build.tarball;
+
+    in
+      tarball //
+        { meta = {
+            description = "NixOS system tarball for ${system} - ${stdenv.hostPlatform.linux-kernel.name}";
+            maintainers = map (x: lib.maintainers.${x}) maintainers;
+          };
+          inherit config;
+        };
+
+
+  makeClosure = module: buildFromConfig module (config: config.system.build.toplevel);
+
+
+  buildFromConfig = module: sel: forAllSystems (system: hydraJob (sel (import ./lib/eval-config.nix {
+    inherit system;
+    modules = makeModules module
+      ({ ... }:
+      { fileSystems."/".device  = mkDefault "/dev/sda1";
+        boot.loader.grub.device = mkDefault "/dev/sda";
+      });
+  }).config));
+
+  makeNetboot = { module, system, ... }:
+    let
+      configEvaled = import lib/eval-config.nix {
+        inherit system;
+        modules = makeModules module {};
+      };
+      build = configEvaled.config.system.build;
+      kernelTarget = configEvaled.pkgs.stdenv.hostPlatform.linux-kernel.target;
+    in
+      configEvaled.pkgs.symlinkJoin {
+        name = "netboot";
+        paths = [
+          build.netbootRamdisk
+          build.kernel
+          build.netbootIpxeScript
+        ];
+        postBuild = ''
+          mkdir -p $out/nix-support
+          echo "file ${kernelTarget} ${build.kernel}/${kernelTarget}" >> $out/nix-support/hydra-build-products
+          echo "file initrd ${build.netbootRamdisk}/initrd" >> $out/nix-support/hydra-build-products
+          echo "file ipxe ${build.netbootIpxeScript}/netboot.ipxe" >> $out/nix-support/hydra-build-products
+        '';
+        preferLocalBuild = true;
+      };
+
+in rec {
+
+  channel = import lib/make-channel.nix { inherit pkgs nixpkgs version versionSuffix; };
+
+  manualHTML = buildFromConfig ({ ... }: { }) (config: config.system.build.manual.manualHTML);
+  manual = manualHTML; # TODO(@oxij): remove eventually
+  manualEpub = (buildFromConfig ({ ... }: { }) (config: config.system.build.manual.manualEpub));
+  nixos-configuration-reference-manpage = buildFromConfig ({ ... }: { }) (config: config.system.build.manual.nixos-configuration-reference-manpage);
+  options = (buildFromConfig ({ ... }: { }) (config: config.system.build.manual.optionsJSON)).x86_64-linux;
+
+
+  # Build the initial ramdisk so Hydra can keep track of its size over time.
+  initialRamdisk = buildFromConfig ({ ... }: { }) (config: config.system.build.initialRamdisk);
+
+  kexec = forMatchingSystems supportedSystems (system: (import lib/eval-config.nix {
+    inherit system;
+    modules = [
+      ./modules/installer/netboot/netboot-minimal.nix
+    ];
+  }).config.system.build.kexecTree);
+
+  netboot = forMatchingSystems supportedSystems (system: makeNetboot {
+    module = ./modules/installer/netboot/netboot-minimal.nix;
+    inherit system;
+  });
+
+  iso_minimal = forAllSystems (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-minimal.nix;
+    type = "minimal";
+    inherit system;
+  });
+
+  iso_plasma5 = forMatchingSystems supportedSystems (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-graphical-calamares-plasma5.nix;
+    type = "plasma5";
+    inherit system;
+  });
+
+  iso_gnome = forMatchingSystems supportedSystems (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-graphical-calamares-gnome.nix;
+    type = "gnome";
+    inherit system;
+  });
+
+  # A variant with a more recent (but possibly less stable) kernel that might support more hardware.
+  # This variant keeps zfs support enabled, hoping it will build and work.
+  iso_minimal_new_kernel = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-minimal-new-kernel.nix;
+    type = "minimal-new-kernel";
+    inherit system;
+  });
+
+  # A variant with a more recent (but possibly less stable) kernel that might support more hardware.
+  # ZFS support disabled since it is unlikely to support the latest kernel.
+  iso_minimal_new_kernel_no_zfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system: makeIso {
+    module = ./modules/installer/cd-dvd/installation-cd-minimal-new-kernel-no-zfs.nix;
+    type = "minimal-new-kernel-no-zfs";
+    inherit system;
+  });
+
+  sd_image = forMatchingSystems [ "armv6l-linux" "armv7l-linux" "aarch64-linux" ] (system: makeSdImage {
+    module = {
+        armv6l-linux = ./modules/installer/sd-card/sd-image-raspberrypi-installer.nix;
+        armv7l-linux = ./modules/installer/sd-card/sd-image-armv7l-multiplatform-installer.nix;
+        aarch64-linux = ./modules/installer/sd-card/sd-image-aarch64-installer.nix;
+      }.${system};
+    inherit system;
+  });
+
+  sd_image_new_kernel = forMatchingSystems [ "aarch64-linux" ] (system: makeSdImage {
+    module = {
+        aarch64-linux = ./modules/installer/sd-card/sd-image-aarch64-new-kernel-installer.nix;
+      }.${system};
+    type = "minimal-new-kernel";
+    inherit system;
+  });
+
+  sd_image_new_kernel_no_zfs = forMatchingSystems [ "aarch64-linux" ] (system: makeSdImage {
+    module = {
+        aarch64-linux = ./modules/installer/sd-card/sd-image-aarch64-new-kernel-no-zfs-installer.nix;
+      }.${system};
+    type = "minimal-new-kernel-no-zfs";
+    inherit system;
+  });
+
+  # A bootable VirtualBox virtual appliance as an OVA file (i.e. packaged OVF).
+  ova = forMatchingSystems [ "x86_64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ versionModule
+          ./modules/installer/virtualbox-demo.nix
+        ];
+    }).config.system.build.virtualBoxOVA)
+
+  );
+
+  # KVM image for proxmox in VMA format
+  proxmoxImage = forMatchingSystems [ "x86_64-linux" ] (system:
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ./modules/virtualisation/proxmox-image.nix
+      ];
+    }).config.system.build.VMA)
+  );
+
+  # LXC tarball for proxmox
+  proxmoxLXC = forMatchingSystems [ "x86_64-linux" ] (system:
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ./modules/virtualisation/proxmox-lxc.nix
+      ];
+    }).config.system.build.tarball)
+  );
+
+  # A disk image that can be imported to Amazon EC2 and registered as an AMI
+  amazonImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/ec2/amazon-image.nix
+        ];
+    }).config.system.build.amazonImage)
+
+  );
+  amazonImageZfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/ec2/amazon-image-zfs.nix
+        ];
+    }).config.system.build.amazonImage)
+
+  );
+
+
+  # Test job for https://github.com/NixOS/nixpkgs/issues/121354 to test
+  # automatic sizing without blocking the channel.
+  amazonImageAutomaticSize = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/ec2/amazon-image.nix
+          ({ ... }: { amazonImage.sizeMB = "auto"; })
+        ];
+    }).config.system.build.amazonImage)
+
+  );
+
+  # An image that can be imported into lxd and used for container creation
+  lxdContainerImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.tarball)
+
+  );
+
+  lxdContainerImageSquashfs = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.squashfs)
+
+  );
+
+  # Metadata for the lxd image
+  lxdContainerMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-container-image.nix
+        ];
+    }).config.system.build.metadata)
+
+  );
+
+  # An image that can be imported into lxd and used for container creation
+  lxdVirtualMachineImage = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
+        ];
+    }).config.system.build.qemuImage)
+
+  );
+
+  # Metadata for the lxd image
+  lxdVirtualMachineImageMeta = forMatchingSystems [ "x86_64-linux" "aarch64-linux" ] (system:
+
+    with import ./.. { inherit system; };
+
+    hydraJob ((import lib/eval-config.nix {
+      inherit system;
+      modules =
+        [ configuration
+          versionModule
+          ./maintainers/scripts/lxd/lxd-virtual-machine-image.nix
+        ];
+    }).config.system.build.metadata)
+
+  );
+
+  # Ensure that all packages used by the minimal NixOS config end up in the channel.
+  dummy = forAllSystems (system: pkgs.runCommand "dummy"
+    { toplevel = (import lib/eval-config.nix {
+        inherit system;
+        modules = singleton ({ ... }:
+          { fileSystems."/".device  = mkDefault "/dev/sda1";
+            boot.loader.grub.device = mkDefault "/dev/sda";
+            system.stateVersion = mkDefault lib.trivial.release;
+          });
+      }).config.system.build.toplevel;
+      preferLocalBuild = true;
+    }
+    "mkdir $out; ln -s $toplevel $out/dummy");
+
+
+  # Provide container tarball for lxc, libvirt-lxc, docker-lxc, ...
+  containerTarball = forAllSystems (system: makeSystemTarball {
+    module = ./modules/virtualisation/lxc-container.nix;
+    inherit system;
+  });
+
+  tests = allTests;
+
+  /* Build a bunch of typical closures so that Hydra can keep track of
+     the evolution of closure sizes. */
+
+  closures = {
+
+    smallContainer = makeClosure ({ ... }:
+      { boot.isContainer = true;
+        services.openssh.enable = true;
+      });
+
+    tinyContainer = makeClosure ({ ... }:
+      { boot.isContainer = true;
+        imports = [ modules/profiles/minimal.nix ];
+      });
+
+    ec2 = makeClosure ({ ... }:
+      { imports = [ modules/virtualisation/amazon-image.nix ];
+      });
+
+    kde = makeClosure ({ ... }:
+      { services.xserver.enable = true;
+        services.xserver.displayManager.sddm.enable = true;
+        services.xserver.desktopManager.plasma5.enable = true;
+      });
+
+    xfce = makeClosure ({ ... }:
+      { services.xserver.enable = true;
+        services.xserver.desktopManager.xfce.enable = true;
+      });
+
+    gnome = makeClosure ({ ... }:
+      { services.xserver.enable = true;
+        services.xserver.displayManager.gdm.enable = true;
+        services.xserver.desktopManager.gnome.enable = true;
+      });
+
+    pantheon = makeClosure ({ ... }:
+      { services.xserver.enable = true;
+        services.xserver.desktopManager.pantheon.enable = true;
+      });
+
+    deepin = makeClosure ({ ... }:
+      { services.xserver.enable = true;
+        services.xserver.displayManager.lightdm.enable = true;
+        services.xserver.desktopManager.deepin.enable = true;
+      });
+
+    # Linux/Apache/PostgreSQL/PHP stack.
+    lapp = makeClosure ({ pkgs, ... }:
+      { services.httpd.enable = true;
+        services.httpd.adminAddr = "foo@example.org";
+        services.httpd.enablePHP = true;
+        services.postgresql.enable = true;
+        services.postgresql.package = pkgs.postgresql;
+      });
+  };
+}
diff --git a/nixpkgs/nixos/tests/3proxy.nix b/nixpkgs/nixos/tests/3proxy.nix
new file mode 100644
index 000000000000..83d39de018a3
--- /dev/null
+++ b/nixpkgs/nixos/tests/3proxy.nix
@@ -0,0 +1,187 @@
+{ lib, pkgs, ... }: {
+  name = "3proxy";
+  meta.maintainers = with lib.maintainers; [ misuzu ];
+
+  nodes = {
+    peer0 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.1";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.111";
+            prefixLength = 24;
+          }
+        ];
+      };
+    };
+
+    peer1 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.2";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.112";
+            prefixLength = 24;
+          }
+        ];
+      };
+      # test that binding to [::] is working when ipv6 is disabled
+      networking.enableIPv6 = false;
+      services._3proxy = {
+        enable = true;
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "none" ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+
+    peer2 = { lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.3";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.113";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services._3proxy = {
+        enable = true;
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "iponly" ];
+            acl = [
+              {
+                rule = "allow";
+              }
+            ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+
+    peer3 = { lib, pkgs, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.4";
+            prefixLength = 24;
+          }
+          {
+            address = "216.58.211.114";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services._3proxy = {
+        enable = true;
+        usersFile = pkgs.writeText "3proxy.passwd" ''
+          admin:CR:$1$.GUV4Wvk$WnEVQtaqutD9.beO5ar1W/
+        '';
+        services = [
+          {
+            type = "admin";
+            bindPort = 9999;
+            auth = [ "none" ];
+          }
+          {
+            type = "proxy";
+            bindPort = 3128;
+            auth = [ "strong" ];
+            acl = [
+              {
+                rule = "allow";
+              }
+            ];
+          }
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 3128 9999 ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    peer0.wait_for_unit("network-online.target")
+
+    peer1.wait_for_unit("3proxy.service")
+    peer1.wait_for_open_port(9999)
+
+    # test none auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://216.58.211.112:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://192.168.0.2:9999"
+    )
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.2:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+
+    peer2.wait_for_unit("3proxy.service")
+    peer2.wait_for_open_port(9999)
+
+    # test iponly auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://216.58.211.113:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://192.168.0.3:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.3:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+
+    peer3.wait_for_unit("3proxy.service")
+    peer3.wait_for_open_port(9999)
+
+    # test strong auth
+    peer0.succeed(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://admin:bigsecret\@192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://216.58.211.114:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://192.168.0.4:9999"
+    )
+    peer0.fail(
+        "${pkgs.wget}/bin/wget -e use_proxy=yes -e http_proxy=http://192.168.0.4:3128 -S -O /dev/null http://127.0.0.1:9999"
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/aaaaxy.nix b/nixpkgs/nixos/tests/aaaaxy.nix
new file mode 100644
index 000000000000..19861198c369
--- /dev/null
+++ b/nixpkgs/nixos/tests/aaaaxy.nix
@@ -0,0 +1,29 @@
+{ pkgs, lib, ... }: {
+  name = "aaaaxy";
+  meta.maintainers = with lib.maintainers; [ Luflosi ];
+
+  nodes.machine = {
+    imports = [
+      ./common/x11.nix
+    ];
+  };
+
+  # This starts the game from a known state, feeds it a prerecorded set of button presses
+  # and then checks if the final game state is identical to the expected state.
+  # This is also what AAAAXY's CI system does and serves as a good sanity check.
+  testScript = ''
+    machine.wait_for_x()
+
+    machine.succeed(
+      # benchmark.dem needs to be in a mutable directory,
+      # so we can't just refer to the file in the Nix store directly
+      "mkdir -p '/tmp/aaaaxy/assets/demos/'",
+      "ln -s '${pkgs.aaaaxy.testing_infra}/assets/demos/benchmark.dem' '/tmp/aaaaxy/assets/demos/'",
+      """
+        '${pkgs.aaaaxy.testing_infra}/scripts/regression-test-demo.sh' \
+        'aaaaxy' 'on track for Any%, All Paths, No Teleports and No Coil' \
+        '${pkgs.aaaaxy}/bin/aaaaxy' '/tmp/aaaaxy/assets/demos/benchmark.dem'
+      """,
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/acme-dns.nix b/nixpkgs/nixos/tests/acme-dns.nix
new file mode 100644
index 000000000000..92d9498fe714
--- /dev/null
+++ b/nixpkgs/nixos/tests/acme-dns.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "acme-dns";
+
+  nodes.machine = { pkgs, ... }: {
+    services.acme-dns = {
+      enable = true;
+      settings = {
+        general = rec {
+          domain = "acme-dns.home.arpa";
+          nsname = domain;
+          nsadmin = "admin.home.arpa";
+          records = [
+            "${domain}. A 127.0.0.1"
+            "${domain}. AAAA ::1"
+            "${domain}. NS ${domain}."
+          ];
+        };
+        logconfig.loglevel = "debug";
+      };
+    };
+    environment.systemPackages = with pkgs; [ curl bind ];
+  };
+
+  testScript = ''
+    import json
+
+    machine.wait_for_unit("acme-dns.service")
+    machine.wait_for_open_port(53) # dns
+    machine.wait_for_open_port(8080) # http api
+
+    result = machine.succeed("curl --fail -X POST http://localhost:8080/register")
+    print(result)
+
+    registration = json.loads(result)
+
+    machine.succeed(f'dig -t TXT @localhost {registration["fulldomain"]} | grep "SOA" | grep "admin.home.arpa"')
+
+    # acme-dns exspects a TXT value string length of exactly 43 chars
+    txt = "___dummy_validation_token_for_txt_record___"
+
+    machine.succeed(
+      "curl --fail -X POST http://localhost:8080/update "
+      + f' -H "X-Api-User: {registration["username"]}"'
+      + f' -H "X-Api-Key: {registration["password"]}"'
+      + f' -d \'{{"subdomain":"{registration["subdomain"]}", "txt":"{txt}"}}\'''
+    )
+
+    assert txt in machine.succeed(f'dig -t TXT +short @localhost {registration["fulldomain"]}')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/acme.nix b/nixpkgs/nixos/tests/acme.nix
new file mode 100644
index 000000000000..e5f2d4c7934a
--- /dev/null
+++ b/nixpkgs/nixos/tests/acme.nix
@@ -0,0 +1,715 @@
+{ pkgs, lib, ... }: let
+  commonConfig = ./common/acme/client;
+
+  dnsServerIP = nodes: nodes.dnsserver.networking.primaryIPAddress;
+
+  dnsScript = nodes: let
+    dnsAddress = dnsServerIP nodes;
+  in pkgs.writeShellScript "dns-hook.sh" ''
+    set -euo pipefail
+    echo '[INFO]' "[$2]" 'dns-hook.sh' $*
+    if [ "$1" = "present" ]; then
+      ${pkgs.curl}/bin/curl --data '{"host": "'"$2"'", "value": "'"$3"'"}' http://${dnsAddress}:8055/set-txt
+    else
+      ${pkgs.curl}/bin/curl --data '{"host": "'"$2"'"}' http://${dnsAddress}:8055/clear-txt
+    fi
+  '';
+
+  dnsConfig = nodes: {
+    dnsProvider = "exec";
+    dnsPropagationCheck = false;
+    environmentFile = pkgs.writeText "wildcard.env" ''
+      EXEC_PATH=${dnsScript nodes}
+      EXEC_POLLING_INTERVAL=1
+      EXEC_PROPAGATION_TIMEOUT=1
+      EXEC_SEQUENCE_INTERVAL=1
+    '';
+  };
+
+  documentRoot = pkgs.runCommand "docroot" {} ''
+    mkdir -p "$out"
+    echo hello world > "$out/index.html"
+  '';
+
+  vhostBase = {
+    forceSSL = true;
+    locations."/".root = documentRoot;
+  };
+
+  vhostBaseHttpd = {
+    forceSSL = true;
+    inherit documentRoot;
+  };
+
+  simpleConfig = {
+    security.acme = {
+      certs."http.example.test" = {
+        listenHTTP = ":80";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+
+  # Base specialisation config for testing general ACME features
+  webserverBasicConfig = {
+    services.nginx.enable = true;
+    services.nginx.virtualHosts."a.example.test" = vhostBase // {
+      enableACME = true;
+    };
+  };
+
+  # Generate specialisations for testing a web server
+  mkServerConfigs = { server, group, vhostBaseData, extraConfig ? {} }: let
+    baseConfig = { nodes, config, specialConfig ? {} }: lib.mkMerge [
+      {
+        security.acme = {
+          defaults = (dnsConfig nodes);
+          # One manual wildcard cert
+          certs."example.test" = {
+            domain = "*.example.test";
+          };
+        };
+
+        users.users."${config.services."${server}".user}".extraGroups = ["acme"];
+
+        services."${server}" = {
+          enable = true;
+          virtualHosts = {
+            # Run-of-the-mill vhost using HTTP-01 validation
+            "${server}-http.example.test" = vhostBaseData // {
+              serverAliases = [ "${server}-http-alias.example.test" ];
+              enableACME = true;
+            };
+
+            # Another which inherits the DNS-01 config
+            "${server}-dns.example.test" = vhostBaseData // {
+              serverAliases = [ "${server}-dns-alias.example.test" ];
+              enableACME = true;
+              # Set acmeRoot to null instead of using the default of "/var/lib/acme/acme-challenge"
+              # webroot + dnsProvider are mutually exclusive.
+              acmeRoot = null;
+            };
+
+            # One using the wildcard certificate
+            "${server}-wildcard.example.test" = vhostBaseData // {
+              serverAliases = [ "${server}-wildcard-alias.example.test" ];
+              useACMEHost = "example.test";
+            };
+          };
+        };
+
+        # Used to determine if service reload was triggered
+        systemd.targets."test-renew-${server}" = {
+          wants = [ "acme-${server}-http.example.test.service" ];
+          after = [ "acme-${server}-http.example.test.service" "${server}-config-reload.service" ];
+        };
+      }
+      specialConfig
+      extraConfig
+    ];
+  in {
+    "${server}".configuration = { nodes, config, ... }: baseConfig {
+      inherit nodes config;
+    };
+
+    # Test that server reloads when an alias is removed (and subsequently test removal works in acme)
+    "${server}-remove-alias".configuration = { nodes, config, ... }: baseConfig {
+      inherit nodes config;
+      specialConfig = {
+        # Remove an alias, but create a standalone vhost in its place for testing.
+        # This configuration results in certificate errors as useACMEHost does not imply
+        # append extraDomains, and thus we can validate the SAN is removed.
+        services."${server}" = {
+          virtualHosts."${server}-http.example.test".serverAliases = lib.mkForce [];
+          virtualHosts."${server}-http-alias.example.test" = vhostBaseData // {
+            useACMEHost = "${server}-http.example.test";
+          };
+        };
+      };
+    };
+
+    # Test that the server reloads when only the acme configuration is changed.
+    "${server}-change-acme-conf".configuration = { nodes, config, ... }: baseConfig {
+      inherit nodes config;
+      specialConfig = {
+        security.acme.certs."${server}-http.example.test" = {
+          keyType = "ec384";
+          # Also test that postRun is exec'd as root
+          postRun = "id | grep root";
+        };
+      };
+    };
+  };
+
+in {
+  name = "acme";
+  meta = {
+    maintainers = lib.teams.acme.members;
+    # Hard timeout in seconds. Average run time is about 7 minutes.
+    timeout = 1800;
+  };
+
+  nodes = {
+    # The fake ACME server which will respond to client requests
+    acme = { nodes, ... }: {
+      imports = [ ./common/acme/server ];
+      networking.nameservers = lib.mkForce [ (dnsServerIP nodes) ];
+    };
+
+    # A fake DNS server which can be configured with records as desired
+    # Used to test DNS-01 challenge
+    dnsserver = { nodes, ... }: {
+      networking.firewall.allowedTCPPorts = [ 8055 53 ];
+      networking.firewall.allowedUDPPorts = [ 53 ];
+      systemd.services.pebble-challtestsrv = {
+        enable = true;
+        description = "Pebble ACME challenge test server";
+        wantedBy = [ "network.target" ];
+        serviceConfig = {
+          ExecStart = "${pkgs.pebble}/bin/pebble-challtestsrv -dns01 ':53' -defaultIPv6 '' -defaultIPv4 '${nodes.webserver.networking.primaryIPAddress}'";
+          # Required to bind on privileged ports.
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+        };
+      };
+    };
+
+    # A web server which will be the node requesting certs
+    webserver = { nodes, config, ... }: {
+      imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [ (dnsServerIP nodes) ];
+      networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+      # OpenSSL will be used for more thorough certificate validation
+      environment.systemPackages = [ pkgs.openssl ];
+
+      # Set log level to info so that we can see when the service is reloaded
+      services.nginx.logError = "stderr info";
+
+      specialisation = {
+        # Tests HTTP-01 verification using Lego's built-in web server
+        http01lego.configuration = simpleConfig;
+
+        renew.configuration = lib.mkMerge [
+          simpleConfig
+          {
+            # Pebble provides 5 year long certs,
+            # needs to be higher than that to test renewal
+            security.acme.certs."http.example.test".validMinDays = 9999;
+          }
+        ];
+
+        # Tests that account creds can be safely changed.
+        accountchange.configuration = lib.mkMerge [
+          simpleConfig
+          {
+            security.acme.certs."http.example.test".email = "admin@example.test";
+          }
+        ];
+
+        # First derivation used to test general ACME features
+        general.configuration = { ... }: let
+          caDomain = nodes.acme.test-support.acme.caDomain;
+          email = config.security.acme.defaults.email;
+          # Exit 99 to make it easier to track if this is the reason a renew failed
+          accountCreateTester = ''
+            test -e accounts/${caDomain}/${email}/account.json || exit 99
+          '';
+        in lib.mkMerge [
+          webserverBasicConfig
+          {
+            # Used to test that account creation is collated into one service.
+            # These should not run until after acme-finished-a.example.test.target
+            systemd.services."b.example.test".preStart = accountCreateTester;
+            systemd.services."c.example.test".preStart = accountCreateTester;
+
+            services.nginx.virtualHosts."b.example.test" = vhostBase // {
+              enableACME = true;
+            };
+            services.nginx.virtualHosts."c.example.test" = vhostBase // {
+              enableACME = true;
+            };
+          }
+        ];
+
+        # Test OCSP Stapling
+        ocsp-stapling.configuration = { ... }: lib.mkMerge [
+          webserverBasicConfig
+          {
+            security.acme.certs."a.example.test".ocspMustStaple = true;
+            services.nginx.virtualHosts."a.example.test" = {
+              extraConfig = ''
+                ssl_stapling on;
+                ssl_stapling_verify on;
+              '';
+            };
+          }
+        ];
+
+        # Validate service relationships by adding a slow start service to nginx' wants.
+        # Reproducer for https://github.com/NixOS/nixpkgs/issues/81842
+        slow-startup.configuration = { ... }: lib.mkMerge [
+          webserverBasicConfig
+          {
+            systemd.services.my-slow-service = {
+              wantedBy = [ "multi-user.target" "nginx.service" ];
+              before = [ "nginx.service" ];
+              preStart = "sleep 5";
+              script = "${pkgs.python3}/bin/python -m http.server";
+            };
+
+            services.nginx.virtualHosts."slow.example.test" = {
+              forceSSL = true;
+              enableACME = true;
+              locations."/".proxyPass = "http://localhost:8000";
+            };
+          }
+        ];
+
+        concurrency-limit.configuration = {pkgs, ...}: lib.mkMerge [
+          webserverBasicConfig {
+            security.acme.maxConcurrentRenewals = 1;
+
+            services.nginx.virtualHosts = {
+              "f.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "g.example.test" = vhostBase // {
+                enableACME = true;
+              };
+              "h.example.test" = vhostBase // {
+                enableACME = true;
+              };
+            };
+
+            systemd.services = {
+              # check for mutual exclusion of starting renew services
+              "acme-f.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-f" ''
+                test "$(systemctl is-active acme-{g,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-g.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-g" ''
+                test "$(systemctl is-active acme-{f,h}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              "acme-h.example.test".serviceConfig.ExecPreStart = "+" + (pkgs.writeShellScript "test-h" ''
+                test "$(systemctl is-active acme-{g,f}.example.test.service | grep activating | wc -l)" -le 0
+                '');
+              };
+          }
+        ];
+
+        # Test lego internal server (listenHTTP option)
+        # Also tests useRoot option
+        lego-server.configuration = { ... }: {
+          security.acme.useRoot = true;
+          security.acme.certs."lego.example.test" = {
+            listenHTTP = ":80";
+            group = "nginx";
+          };
+          services.nginx.enable = true;
+          services.nginx.virtualHosts."lego.example.test" = {
+            useACMEHost = "lego.example.test";
+            onlySSL = true;
+          };
+        };
+
+      # Test compatibility with Caddy
+      # It only supports useACMEHost, hence not using mkServerConfigs
+      } // (let
+        baseCaddyConfig = { nodes, config, ... }: {
+          security.acme = {
+            defaults = (dnsConfig nodes);
+            # One manual wildcard cert
+            certs."example.test" = {
+              domain = "*.example.test";
+            };
+          };
+
+          users.users."${config.services.caddy.user}".extraGroups = ["acme"];
+
+          services.caddy = {
+            enable = true;
+            virtualHosts."a.example.test" = {
+              useACMEHost = "example.test";
+              extraConfig = ''
+                root * ${documentRoot}
+              '';
+            };
+          };
+        };
+      in {
+        caddy.configuration = baseCaddyConfig;
+
+        # Test that the server reloads when only the acme configuration is changed.
+        "caddy-change-acme-conf".configuration = { nodes, config, ... }: lib.mkMerge [
+          (baseCaddyConfig {
+            inherit nodes config;
+          })
+          {
+            security.acme.certs."example.test" = {
+              keyType = "ec384";
+            };
+          }
+        ];
+
+      # Test compatibility with Nginx
+      }) // (mkServerConfigs {
+          server = "nginx";
+          group = "nginx";
+          vhostBaseData = vhostBase;
+        })
+
+      # Test compatibility with Apache HTTPD
+        // (mkServerConfigs {
+          server = "httpd";
+          group = "wwwrun";
+          vhostBaseData = vhostBaseHttpd;
+          extraConfig = {
+            services.httpd.adminAddr = config.security.acme.defaults.email;
+          };
+        });
+    };
+
+    # The client will be used to curl the webserver to validate configuration
+    client = { nodes, ... }: {
+      imports = [ commonConfig ];
+      networking.nameservers = lib.mkForce [ (dnsServerIP nodes) ];
+
+      # OpenSSL will be used for more thorough certificate validation
+      environment.systemPackages = [ pkgs.openssl ];
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      caDomain = nodes.acme.test-support.acme.caDomain;
+      newServerSystem = nodes.webserver.config.system.build.toplevel;
+      switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
+    in
+    # Note, wait_for_unit does not work for oneshot services that do not have RemainAfterExit=true,
+    # this is because a oneshot goes from inactive => activating => inactive, and never
+    # reaches the active state. Targets do not have this issue.
+    ''
+      import time
+
+
+      TOTAL_RETRIES = 20
+
+
+      class BackoffTracker(object):
+          delay = 1
+          increment = 1
+
+          def handle_fail(self, retries, message) -> int:
+              assert retries < TOTAL_RETRIES, message
+
+              print(f"Retrying in {self.delay}s, {retries + 1}/{TOTAL_RETRIES}")
+              time.sleep(self.delay)
+
+              # Only increment after the first try
+              if retries == 0:
+                  self.delay += self.increment
+                  self.increment *= 2
+
+              return retries + 1
+
+
+      backoff = BackoffTracker()
+
+
+      def switch_to(node, name):
+          # On first switch, this will create a symlink to the current system so that we can
+          # quickly switch between derivations
+          root_specs = "/tmp/specialisation"
+          node.execute(
+            f"test -e {root_specs}"
+            f" || ln -s $(readlink /run/current-system)/specialisation {root_specs}"
+          )
+
+          switcher_path = f"/run/current-system/specialisation/{name}/bin/switch-to-configuration"
+          rc, _ = node.execute(f"test -e '{switcher_path}'")
+          if rc > 0:
+              switcher_path = f"/tmp/specialisation/{name}/bin/switch-to-configuration"
+
+          node.succeed(
+              f"{switcher_path} test"
+          )
+
+
+      # Ensures the issuer of our cert matches the chain
+      # and matches the issuer we expect it to be.
+      # It's a good validation to ensure the cert.pem and fullchain.pem
+      # are not still selfsigned after verification
+      def check_issuer(node, cert_name, issuer):
+          for fname in ("cert.pem", "fullchain.pem"):
+              actual_issuer = node.succeed(
+                  f"openssl x509 -noout -issuer -in /var/lib/acme/{cert_name}/{fname}"
+              ).partition("=")[2]
+              print(f"{fname} issuer: {actual_issuer}")
+              assert issuer.lower() in actual_issuer.lower()
+
+
+      # Ensure cert comes before chain in fullchain.pem
+      def check_fullchain(node, cert_name):
+          subject_data = node.succeed(
+              f"openssl crl2pkcs7 -nocrl -certfile /var/lib/acme/{cert_name}/fullchain.pem"
+              " | openssl pkcs7 -print_certs -noout"
+          )
+          for line in subject_data.lower().split("\n"):
+              if "subject" in line:
+                  print(f"First subject in fullchain.pem: {line}")
+                  assert cert_name.lower() in line
+                  return
+
+          assert False
+
+
+      def check_connection(node, domain, retries=0):
+          result = node.succeed(
+              "openssl s_client -brief -verify 2 -CAfile /tmp/ca.crt"
+              f" -servername {domain} -connect {domain}:443 < /dev/null 2>&1"
+          )
+
+          for line in result.lower().split("\n"):
+              if "verification" in line and "error" in line:
+                  retries = backoff.handle_fail(retries, f"Failed to connect to https://{domain}")
+                  return check_connection(node, domain, retries)
+
+
+      def check_connection_key_bits(node, domain, bits, retries=0):
+          result = node.succeed(
+              "openssl s_client -CAfile /tmp/ca.crt"
+              f" -servername {domain} -connect {domain}:443 < /dev/null"
+              " | openssl x509 -noout -text | grep -i Public-Key"
+          )
+          print("Key type:", result)
+
+          if bits not in result:
+              retries = backoff.handle_fail(retries, f"Did not find expected number of bits ({bits}) in key")
+              return check_connection_key_bits(node, domain, bits, retries)
+
+
+      def check_stapling(node, domain, retries=0):
+          # Pebble doesn't provide a full OCSP responder, so just check the URL
+          result = node.succeed(
+              "openssl s_client -CAfile /tmp/ca.crt"
+              f" -servername {domain} -connect {domain}:443 < /dev/null"
+              " | openssl x509 -noout -ocsp_uri"
+          )
+          print("OCSP Responder URL:", result)
+
+          if "${caDomain}:4002" not in result.lower():
+              retries = backoff.handle_fail(retries, "OCSP Stapling check failed")
+              return check_stapling(node, domain, retries)
+
+
+      def download_ca_certs(node, retries=0):
+          exit_code, _ = node.execute("curl https://${caDomain}:15000/roots/0 > /tmp/ca.crt")
+          exit_code_2, _ = node.execute(
+              "curl https://${caDomain}:15000/intermediate-keys/0 >> /tmp/ca.crt"
+          )
+
+          if exit_code + exit_code_2 > 0:
+              retries = backoff.handle_fail(retries, "Failed to connect to pebble to download root CA certs")
+              return download_ca_certs(node, retries)
+
+
+      start_all()
+
+      dnsserver.wait_for_unit("pebble-challtestsrv.service")
+      client.wait_for_unit("default.target")
+
+      client.succeed(
+          'curl --data \'{"host": "${caDomain}", "addresses": ["${nodes.acme.networking.primaryIPAddress}"]}\' http://${dnsServerIP nodes}:8055/add-a'
+      )
+
+      acme.wait_for_unit("network-online.target")
+      acme.wait_for_unit("pebble.service")
+
+      download_ca_certs(client)
+
+      # Perform http-01 w/ lego test first
+      with subtest("Can request certificate with Lego's built in web server"):
+          switch_to(webserver, "http01lego")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+
+      # Perform renewal test
+      with subtest("Can renew certificates when they expire"):
+          hash = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          switch_to(webserver, "renew")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+          hash_after = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          assert hash != hash_after
+
+      # Perform account change test
+      with subtest("Handles email change correctly"):
+          hash = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          switch_to(webserver, "accountchange")
+          webserver.wait_for_unit("acme-finished-http.example.test.target")
+          check_fullchain(webserver, "http.example.test")
+          check_issuer(webserver, "http.example.test", "pebble")
+          hash_after = webserver.succeed("sha256sum /var/lib/acme/http.example.test/cert.pem")
+          # Has to do a full run to register account, which creates new certs.
+          assert hash != hash_after
+
+      # Perform general tests
+      switch_to(webserver, "general")
+
+      with subtest("Can request certificate with HTTP-01 challenge"):
+          webserver.wait_for_unit("acme-finished-a.example.test.target")
+          check_fullchain(webserver, "a.example.test")
+          check_issuer(webserver, "a.example.test", "pebble")
+          webserver.wait_for_unit("nginx.service")
+          check_connection(client, "a.example.test")
+
+      with subtest("Runs 1 cert for account creation before others"):
+          webserver.wait_for_unit("acme-finished-b.example.test.target")
+          webserver.wait_for_unit("acme-finished-c.example.test.target")
+          check_connection(client, "b.example.test")
+          check_connection(client, "c.example.test")
+
+      with subtest("Certificates and accounts have safe + valid permissions"):
+          # Nginx will set the group appropriately when enableACME is used
+          group = "nginx"
+          webserver.succeed(
+              f"test $(stat -L -c '%a %U %G' /var/lib/acme/a.example.test/*.pem | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
+          )
+          webserver.succeed(
+              f"test $(stat -L -c '%a %U %G' /var/lib/acme/.lego/a.example.test/**/a.example.test* | tee /dev/stderr | grep '600 acme {group}' | wc -l) -eq 4"
+          )
+          webserver.succeed(
+              f"test $(stat -L -c '%a %U %G' /var/lib/acme/a.example.test | tee /dev/stderr | grep '750 acme {group}' | wc -l) -eq 1"
+          )
+          webserver.succeed(
+              f"test $(find /var/lib/acme/accounts -type f -exec stat -L -c '%a %U %G' {{}} \\; | tee /dev/stderr | grep -v '600 acme {group}' | wc -l) -eq 0"
+          )
+
+      # Selfsigned certs tests happen late so we aren't fighting the system init triggering cert renewal
+      with subtest("Can generate valid selfsigned certs"):
+          webserver.succeed("systemctl clean acme-a.example.test.service --what=state")
+          webserver.succeed("systemctl start acme-selfsigned-a.example.test.service")
+          check_fullchain(webserver, "a.example.test")
+          check_issuer(webserver, "a.example.test", "minica")
+          # Check selfsigned permissions
+          webserver.succeed(
+              f"test $(stat -L -c '%a %U %G' /var/lib/acme/a.example.test/*.pem | tee /dev/stderr | grep '640 acme {group}' | wc -l) -eq 5"
+          )
+          # Will succeed if nginx can load the certs
+          webserver.succeed("systemctl start nginx-config-reload.service")
+
+      with subtest("Correctly implements OCSP stapling"):
+          switch_to(webserver, "ocsp-stapling")
+          webserver.wait_for_unit("acme-finished-a.example.test.target")
+          check_stapling(client, "a.example.test")
+
+      with subtest("Can request certificate with HTTP-01 using lego's internal web server"):
+          switch_to(webserver, "lego-server")
+          webserver.wait_for_unit("acme-finished-lego.example.test.target")
+          webserver.wait_for_unit("nginx.service")
+          webserver.succeed("echo HENLO && systemctl cat nginx.service")
+          webserver.succeed("test \"$(stat -c '%U' /var/lib/acme/* | uniq)\" = \"root\"")
+          check_connection(client, "a.example.test")
+          check_connection(client, "lego.example.test")
+
+      with subtest("Can request certificate with HTTP-01 when nginx startup is delayed"):
+          webserver.execute("systemctl stop nginx")
+          switch_to(webserver, "slow-startup")
+          webserver.wait_for_unit("acme-finished-slow.example.test.target")
+          check_issuer(webserver, "slow.example.test", "pebble")
+          webserver.wait_for_unit("nginx.service")
+          check_connection(client, "slow.example.test")
+
+      with subtest("Can limit concurrency of running renewals"):
+          switch_to(webserver, "concurrency-limit")
+          webserver.wait_for_unit("acme-finished-f.example.test.target")
+          webserver.wait_for_unit("acme-finished-g.example.test.target")
+          webserver.wait_for_unit("acme-finished-h.example.test.target")
+          check_connection(client, "f.example.test")
+          check_connection(client, "g.example.test")
+          check_connection(client, "h.example.test")
+
+      with subtest("Works with caddy"):
+          switch_to(webserver, "caddy")
+          webserver.wait_for_unit("acme-finished-example.test.target")
+          webserver.wait_for_unit("caddy.service")
+          # FIXME reloading caddy is not sufficient to load new certs.
+          # Restart it manually until this is fixed.
+          webserver.succeed("systemctl restart caddy.service")
+          check_connection(client, "a.example.test")
+
+      with subtest("security.acme changes reflect on caddy"):
+          switch_to(webserver, "caddy-change-acme-conf")
+          webserver.wait_for_unit("acme-finished-example.test.target")
+          webserver.wait_for_unit("caddy.service")
+          # FIXME reloading caddy is not sufficient to load new certs.
+          # Restart it manually until this is fixed.
+          webserver.succeed("systemctl restart caddy.service")
+          check_connection_key_bits(client, "a.example.test", "384")
+
+      domains = ["http", "dns", "wildcard"]
+      for server, logsrc in [
+          ("nginx", "journalctl -n 30 -u nginx.service"),
+          ("httpd", "tail -n 30 /var/log/httpd/*.log"),
+      ]:
+          wait_for_server = lambda: webserver.wait_for_unit(f"{server}.service")
+          with subtest(f"Works with {server}"):
+              try:
+                  switch_to(webserver, server)
+                  # Skip wildcard domain for this check ([:-1])
+                  for domain in domains[:-1]:
+                      webserver.wait_for_unit(
+                          f"acme-finished-{server}-{domain}.example.test.target"
+                      )
+              except Exception as err:
+                  _, output = webserver.execute(
+                      f"{logsrc} && ls -al /var/lib/acme/acme-challenge"
+                  )
+                  print(output)
+                  raise err
+
+              wait_for_server()
+
+              for domain in domains[:-1]:
+                  check_issuer(webserver, f"{server}-{domain}.example.test", "pebble")
+              for domain in domains:
+                  check_connection(client, f"{server}-{domain}.example.test")
+                  check_connection(client, f"{server}-{domain}-alias.example.test")
+
+          test_domain = f"{server}-{domains[0]}.example.test"
+
+          with subtest(f"Can reload {server} when timer triggers renewal"):
+              # Switch to selfsigned first
+              webserver.succeed(f"systemctl clean acme-{test_domain}.service --what=state")
+              webserver.succeed(f"systemctl start acme-selfsigned-{test_domain}.service")
+              check_issuer(webserver, test_domain, "minica")
+              webserver.succeed(f"systemctl start {server}-config-reload.service")
+              webserver.succeed(f"systemctl start test-renew-{server}.target")
+              check_issuer(webserver, test_domain, "pebble")
+              check_connection(client, test_domain)
+
+          with subtest("Can remove an alias from a domain + cert is updated"):
+              test_alias = f"{server}-{domains[0]}-alias.example.test"
+              switch_to(webserver, f"{server}-remove-alias")
+              webserver.wait_for_unit(f"acme-finished-{test_domain}.target")
+              wait_for_server()
+              check_connection(client, test_domain)
+              rc, _s = client.execute(
+                  f"openssl s_client -CAfile /tmp/ca.crt -connect {test_alias}:443"
+                  " </dev/null 2>/dev/null | openssl x509 -noout -text"
+                  f" | grep DNS: | grep {test_alias}"
+              )
+              assert rc > 0, "Removed extraDomainName was not removed from the cert"
+
+          with subtest("security.acme changes reflect on web server"):
+              # Switch back to normal server config first, reset everything.
+              switch_to(webserver, server)
+              wait_for_server()
+              switch_to(webserver, f"{server}-change-acme-conf")
+              webserver.wait_for_unit(f"acme-finished-{test_domain}.target")
+              wait_for_server()
+              check_connection_key_bits(client, test_domain, "384")
+    '';
+}
diff --git a/nixpkgs/nixos/tests/activation/nix-channel.nix b/nixpkgs/nixos/tests/activation/nix-channel.nix
new file mode 100644
index 000000000000..d26ea98e56cc
--- /dev/null
+++ b/nixpkgs/nixos/tests/activation/nix-channel.nix
@@ -0,0 +1,26 @@
+{ lib, ... }:
+
+{
+
+  name = "activation-nix-channel";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = {
+    nix.channel.enable = true;
+  };
+
+  testScript = { nodes, ... }: ''
+    machine.start(allow_reboot=True)
+
+    assert machine.succeed("cat /root/.nix-channels") == "${nodes.machine.system.defaultChannel} nixos\n"
+
+    nixpkgs_unstable_channel = "https://nixos.org/channels/nixpkgs-unstable nixpkgs"
+    machine.succeed(f"echo '{nixpkgs_unstable_channel}' > /root/.nix-channels")
+
+    machine.reboot()
+
+    assert machine.succeed("cat /root/.nix-channels") == f"{nixpkgs_unstable_channel}\n"
+  '';
+
+}
diff --git a/nixpkgs/nixos/tests/activation/var.nix b/nixpkgs/nixos/tests/activation/var.nix
new file mode 100644
index 000000000000..1a546a7671c5
--- /dev/null
+++ b/nixpkgs/nixos/tests/activation/var.nix
@@ -0,0 +1,18 @@
+{ lib, ... }:
+
+{
+
+  name = "activation-var";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { };
+
+  testScript = ''
+    assert machine.succeed("stat -c '%a' /var/tmp") == "1777\n"
+    assert machine.succeed("stat -c '%a' /var/empty") == "555\n"
+    assert machine.succeed("stat -c '%U' /var/empty") == "root\n"
+    assert machine.succeed("stat -c '%G' /var/empty") == "root\n"
+    assert "i" in machine.succeed("lsattr -d /var/empty")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/adguardhome.nix b/nixpkgs/nixos/tests/adguardhome.nix
new file mode 100644
index 000000000000..a6f790b83f5f
--- /dev/null
+++ b/nixpkgs/nixos/tests/adguardhome.nix
@@ -0,0 +1,135 @@
+{
+  name = "adguardhome";
+
+  nodes = {
+    nullConf = { ... }: { services.adguardhome = { enable = true; }; };
+
+    emptyConf = { lib, ... }: {
+      services.adguardhome = {
+        enable = true;
+      };
+    };
+
+    declarativeConf = { ... }: {
+      services.adguardhome = {
+        enable = true;
+
+        mutableSettings = false;
+        settings = {
+          schema_version = 0;
+          dns = {
+            bind_host = "0.0.0.0";
+            bootstrap_dns = "127.0.0.1";
+          };
+        };
+      };
+    };
+
+    mixedConf = { ... }: {
+      services.adguardhome = {
+        enable = true;
+
+        mutableSettings = true;
+        settings = {
+          schema_version = 0;
+          dns = {
+            bind_host = "0.0.0.0";
+            bootstrap_dns = "127.0.0.1";
+          };
+        };
+      };
+    };
+
+    dhcpConf = { lib, ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        # Configure static IP for DHCP server
+        useDHCP = false;
+        interfaces."eth1" = lib.mkForce {
+          useDHCP = false;
+          ipv4 = {
+            addresses = [{
+              address = "10.0.10.1";
+              prefixLength = 24;
+            }];
+
+            routes = [{
+              address = "10.0.10.0";
+              prefixLength = 24;
+            }];
+          };
+        };
+
+        # Required for DHCP
+        firewall.allowedUDPPorts = [ 67 68 ];
+      };
+
+      services.adguardhome = {
+        enable = true;
+        allowDHCP = true;
+        mutableSettings = false;
+        settings = {
+          schema_version = 0;
+          dns = {
+            bind_host = "0.0.0.0";
+            bootstrap_dns = "127.0.0.1";
+          };
+          dhcp = {
+            # This implicitly enables CAP_NET_RAW
+            enabled = true;
+            interface_name = "eth1";
+            local_domain_name = "lan";
+            dhcpv4 = {
+              gateway_ip = "10.0.10.1";
+              range_start = "10.0.10.100";
+              range_end = "10.0.10.101";
+              subnet_mask = "255.255.255.0";
+            };
+          };
+        };
+      };
+    };
+
+    client = { lib, ... }: {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        interfaces.eth1 = {
+          useDHCP = true;
+          ipv4.addresses = lib.mkForce [ ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    with subtest("Minimal (settings = null) config test"):
+        nullConf.wait_for_unit("adguardhome.service")
+
+    with subtest("Default config test"):
+        emptyConf.wait_for_unit("adguardhome.service")
+        emptyConf.wait_for_open_port(3000)
+
+    with subtest("Declarative config test, DNS will be reachable"):
+        declarativeConf.wait_for_unit("adguardhome.service")
+        declarativeConf.wait_for_open_port(53)
+        declarativeConf.wait_for_open_port(3000)
+
+    with subtest("Mixed config test, check whether merging works"):
+        mixedConf.wait_for_unit("adguardhome.service")
+        mixedConf.wait_for_open_port(53)
+        mixedConf.wait_for_open_port(3000)
+        # Test whether merging works properly, even if nothing is changed
+        mixedConf.systemctl("restart adguardhome.service")
+        mixedConf.wait_for_unit("adguardhome.service")
+        mixedConf.wait_for_open_port(3000)
+
+    with subtest("Testing successful DHCP start"):
+        dhcpConf.wait_for_unit("adguardhome.service")
+        client.wait_for_unit("network-online.target")
+        # Test IP assignment via DHCP
+        dhcpConf.wait_until_succeeds("ping -c 5 10.0.10.100")
+        # Test hostname resolution over DHCP-provided DNS
+        dhcpConf.wait_until_succeeds("ping -c 5 client.lan")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/aesmd.nix b/nixpkgs/nixos/tests/aesmd.nix
new file mode 100644
index 000000000000..848e1c599201
--- /dev/null
+++ b/nixpkgs/nixos/tests/aesmd.nix
@@ -0,0 +1,102 @@
+{ pkgs, lib, ... }: {
+  name = "aesmd";
+  meta = {
+    maintainers = with lib.maintainers; [ trundle veehaitch ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    services.aesmd = {
+      enable = true;
+      settings = {
+        defaultQuotingType = "ecdsa_256";
+        proxyType = "direct";
+        whitelistUrl = "http://nixos.org";
+      };
+    };
+
+    # Should have access to the AESM socket
+    users.users."sgxtest" = {
+      isNormalUser = true;
+      extraGroups = [ "sgx" ];
+    };
+
+    # Should NOT have access to the AESM socket
+    users.users."nosgxtest".isNormalUser = true;
+
+    # We don't have a real SGX machine in NixOS tests
+    systemd.services.aesmd.unitConfig.AssertPathExists = lib.mkForce [ ];
+
+    specialisation = {
+      withQuoteProvider.configuration = { ... }: {
+        services.aesmd = {
+          quoteProviderLibrary = pkgs.sgx-azure-dcap-client;
+          environment = {
+            AZDCAP_DEBUG_LOG_LEVEL = "INFO";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+    in
+    ''
+      def get_aesmd_pid():
+        status, main_pid = machine.systemctl("show --property MainPID --value aesmd.service")
+        assert status == 0, "Could not get MainPID of aesmd.service"
+        return main_pid.strip()
+
+      with subtest("aesmd.service starts"):
+        machine.wait_for_unit("aesmd.service")
+
+      main_pid = get_aesmd_pid()
+
+      with subtest("aesmd.service runtime directory permissions"):
+        runtime_dir = "/run/aesmd";
+        res = machine.succeed(f"stat -c '%a %U %G' {runtime_dir}").strip()
+        assert "750 aesmd sgx" == res, f"{runtime_dir} does not have the expected permissions: {res}"
+
+      with subtest("aesm.socket available on host"):
+        socket_path = "/var/run/aesmd/aesm.socket"
+        machine.wait_until_succeeds(f"test -S {socket_path}")
+        machine.succeed(f"test 777 -eq $(stat -c '%a' {socket_path})")
+        for op in [ "-r", "-w", "-x" ]:
+          machine.succeed(f"sudo -u sgxtest test {op} {socket_path}")
+          machine.fail(f"sudo -u nosgxtest test {op} {socket_path}")
+
+      with subtest("Copies white_list_cert_to_be_verify.bin"):
+        whitelist_path = "/var/opt/aesmd/data/white_list_cert_to_be_verify.bin"
+        whitelist_perms = machine.succeed(
+          f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/stat -c '%a' {whitelist_path}"
+        ).strip()
+        assert "644" == whitelist_perms, f"white_list_cert_to_be_verify.bin has permissions {whitelist_perms}"
+
+      with subtest("Writes and binds aesm.conf in service namespace"):
+        aesmd_config = machine.succeed(f"nsenter -m -t {main_pid} ${pkgs.coreutils}/bin/cat /etc/aesmd.conf")
+
+        assert aesmd_config == "whitelist url = http://nixos.org\nproxy type = direct\ndefault quoting type = ecdsa_256\n", "aesmd.conf differs"
+
+      with subtest("aesmd.service without quote provider library has correct LD_LIBRARY_PATH"):
+        status, environment = machine.systemctl("show --property Environment --value aesmd.service")
+        assert status == 0, "Could not get Environment of aesmd.service"
+        env_by_name = dict(entry.split("=", 1) for entry in environment.split())
+        assert not env_by_name["LD_LIBRARY_PATH"], "LD_LIBRARY_PATH is not empty"
+
+      with subtest("aesmd.service with quote provider library starts"):
+        machine.succeed('${specialisations}/withQuoteProvider/bin/switch-to-configuration test')
+        machine.wait_for_unit("aesmd.service")
+
+      main_pid = get_aesmd_pid()
+
+      with subtest("aesmd.service with quote provider library has correct LD_LIBRARY_PATH"):
+        ld_library_path = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep LD_LIBRARY_PATH")
+        assert ld_library_path.startswith("LD_LIBRARY_PATH=${pkgs.sgx-azure-dcap-client}/lib:"), \
+          "LD_LIBRARY_PATH is not set to the configured quote provider library"
+
+      with subtest("aesmd.service with quote provider library has set AZDCAP_DEBUG_LOG_LEVEL"):
+        azdcp_debug_log_level = machine.succeed(f"xargs -0 -L1 -a /proc/{main_pid}/environ | grep AZDCAP_DEBUG_LOG_LEVEL")
+        assert azdcp_debug_log_level == "AZDCAP_DEBUG_LOG_LEVEL=INFO\n", "AZDCAP_DEBUG_LOG_LEVEL is not set to INFO"
+    '';
+}
diff --git a/nixpkgs/nixos/tests/agda.nix b/nixpkgs/nixos/tests/agda.nix
new file mode 100644
index 000000000000..6f51300111ac
--- /dev/null
+++ b/nixpkgs/nixos/tests/agda.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  hello-world = pkgs.writeText "hello-world" ''
+    {-# OPTIONS --guardedness #-}
+    open import IO
+    open import Level
+
+    main = run {0â„“} (putStrLn "Hello World!")
+  '';
+in
+{
+  name = "agda";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ alexarice turion ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [
+      (pkgs.agda.withPackages {
+        pkgs = p: [ p.standard-library ];
+      })
+    ];
+    virtualisation.memorySize = 2000; # Agda uses a lot of memory
+  };
+
+  testScript = ''
+    assert (
+        "${pkgs.agdaPackages.lib.interfaceFile "Everything.agda"}" == "Everything.agdai"
+    ), "wrong interface file for Everything.agda"
+    assert (
+        "${pkgs.agdaPackages.lib.interfaceFile "tmp/Everything.agda.md"}" == "tmp/Everything.agdai"
+    ), "wrong interface file for tmp/Everything.agda.md"
+
+    # Minimal script that typechecks
+    machine.succeed("touch TestEmpty.agda")
+    machine.succeed("agda TestEmpty.agda")
+
+    # Hello world
+    machine.succeed(
+        "cp ${hello-world} HelloWorld.agda"
+    )
+    machine.succeed("agda -l standard-library -i . -c HelloWorld.agda")
+    # Check execution
+    assert "Hello World!" in machine.succeed(
+        "./HelloWorld"
+    ), "HelloWorld does not run properly"
+  '';
+}
+)
diff --git a/nixpkgs/nixos/tests/airsonic.nix b/nixpkgs/nixos/tests/airsonic.nix
new file mode 100644
index 000000000000..69f979726bce
--- /dev/null
+++ b/nixpkgs/nixos/tests/airsonic.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "airsonic";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ sumnerevans ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.airsonic = {
+        enable = true;
+        maxMemory = 800;
+      };
+    };
+
+  testScript = ''
+    def airsonic_is_up(_) -> bool:
+        status, _ = machine.execute("curl --fail http://localhost:4040/login")
+        return status == 0
+
+
+    machine.start()
+    machine.wait_for_unit("airsonic.service")
+    machine.wait_for_open_port(4040)
+
+    with machine.nested("Waiting for UI to work"):
+        retry(airsonic_is_up)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/akkoma.nix b/nixpkgs/nixos/tests/akkoma.nix
new file mode 100644
index 000000000000..287e2d485999
--- /dev/null
+++ b/nixpkgs/nixos/tests/akkoma.nix
@@ -0,0 +1,124 @@
+/*
+  End-to-end test for Akkoma.
+
+  Based in part on nixos/tests/pleroma.
+
+  TODO: Test federation.
+*/
+import ./make-test-python.nix ({ pkgs, package ? pkgs.akkoma, confined ? false, ... }:
+let
+  userPassword = "4LKOrGo8SgbPm1a6NclVU5Wb";
+
+  provisionUser = pkgs.writers.writeBashBin "provisionUser" ''
+    set -eu -o errtrace -o pipefail
+
+    pleroma_ctl user new jamy jamy@nixos.test --password '${userPassword}' --moderator --admin -y
+  '';
+
+  tlsCert = pkgs.runCommand "selfSignedCerts" {
+    nativeBuildInputs = with pkgs; [ openssl ];
+  } ''
+    mkdir -p $out
+    openssl req -x509 \
+      -subj '/CN=akkoma.nixos.test/' -days 49710 \
+      -addext 'subjectAltName = DNS:akkoma.nixos.test' \
+      -keyout "$out/key.pem" -newkey ed25519 \
+      -out "$out/cert.pem" -noenc
+  '';
+
+  sendToot = pkgs.writers.writeBashBin "sendToot" ''
+    set -eu -o errtrace -o pipefail
+
+    export REQUESTS_CA_BUNDLE="/etc/ssl/certs/ca-certificates.crt"
+
+    echo '${userPassword}' | ${pkgs.toot}/bin/toot login_cli -i "akkoma.nixos.test" -e "jamy@nixos.test"
+    echo "y" | ${pkgs.toot}/bin/toot post "hello world Jamy here"
+
+    # Retrieving timeline with toot currently broken due to incompatible timestamp format
+    # cf. <https://akkoma.dev/AkkomaGang/akkoma/issues/637> and <https://github.com/ihabunek/toot/issues/399>
+    #echo "y" | ${pkgs.toot}/bin/toot timeline | grep -F -q "hello world Jamy here"
+
+    # Test file upload
+    echo "y" | ${pkgs.toot}/bin/toot upload <(dd if=/dev/zero bs=1024 count=1024 status=none) \
+      | grep -F -q "https://akkoma.nixos.test/media"
+  '';
+
+  checkFe = pkgs.writers.writeBashBin "checkFe" ''
+    set -eu -o errtrace -o pipefail
+
+    paths=( / /static/{config,styles}.json /pleroma/admin/ )
+
+    for path in "''${paths[@]}"; do
+      diff \
+        <(${pkgs.curl}/bin/curl -f -S -s -o /dev/null -w '%{response_code}' "https://akkoma.nixos.test$path") \
+        <(echo -n 200)
+    done
+  '';
+
+  hosts = nodes: ''
+    ${nodes.akkoma.networking.primaryIPAddress} akkoma.nixos.test
+    ${nodes.client.networking.primaryIPAddress} client.nixos.test
+  '';
+in
+{
+  name = "akkoma";
+  nodes = {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${tlsCert}/cert.pem" ];
+      networking.extraHosts = hosts nodes;
+    };
+
+    akkoma = { nodes, pkgs, config, ... }: {
+      networking.extraHosts = hosts nodes;
+      networking.firewall.allowedTCPPorts = [ 443 ];
+      environment.systemPackages = with pkgs; [ provisionUser ];
+      systemd.services.akkoma.confinement.enable = confined;
+
+      services.akkoma = {
+        enable = true;
+        package = package;
+        config = {
+          ":pleroma" = {
+            ":instance" = {
+              name = "NixOS test Akkoma server";
+              description = "NixOS test Akkoma server";
+              email = "akkoma@nixos.test";
+              notify_email = "akkoma@nixos.test";
+              registration_open = true;
+            };
+
+            ":media_proxy" = {
+              enabled = false;
+            };
+
+            "Pleroma.Web.Endpoint" = {
+              url.host = "akkoma.nixos.test";
+            };
+          };
+        };
+
+        nginx = {
+          addSSL = true;
+          sslCertificate = "${tlsCert}/cert.pem";
+          sslCertificateKey = "${tlsCert}/key.pem";
+        };
+      };
+
+      services.nginx.enable = true;
+      services.postgresql.enable = true;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+    akkoma.wait_for_unit('akkoma-initdb.service')
+    akkoma.systemctl('restart akkoma-initdb.service')  # test repeated initialisation
+    akkoma.wait_for_unit('akkoma.service')
+    akkoma.wait_for_file('/run/akkoma/socket');
+    akkoma.succeed('${provisionUser}/bin/provisionUser')
+    akkoma.wait_for_unit('nginx.service')
+    client.succeed('${sendToot}/bin/sendToot')
+    client.succeed('${checkFe}/bin/checkFe')
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/alice-lg.nix b/nixpkgs/nixos/tests/alice-lg.nix
new file mode 100644
index 000000000000..640e60030a04
--- /dev/null
+++ b/nixpkgs/nixos/tests/alice-lg.nix
@@ -0,0 +1,44 @@
+# This test does a basic functionality check for alice-lg
+
+{ system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; config = { }; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) optionalString;
+in
+makeTest {
+  name = "birdwatcher";
+  nodes = {
+    host1 = {
+      environment.systemPackages = with pkgs; [ jq ];
+      services.alice-lg = {
+        enable = true;
+        settings = {
+          server = {
+            listen_http = "[::]:7340";
+            enable_prefix_lookup = true;
+            asn = 1;
+            routes_store_refresh_parallelism = 5;
+            neighbors_store_refresh_parallelism = 10000;
+            routes_store_refresh_interval = 5;
+            neighbors_store_refresh_interval = 5;
+          };
+          housekeeping = {
+            interval = 5;
+            force_release_memory = true;
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    host1.wait_for_unit("alice-lg.service")
+    host1.wait_for_open_port(7340)
+    host1.succeed("curl http://[::]:7340 | grep 'Alice BGP Looking Glass'")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/all-terminfo.nix b/nixpkgs/nixos/tests/all-terminfo.nix
new file mode 100644
index 000000000000..dd47c66ee1c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/all-terminfo.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }: rec {
+  name = "all-terminfo";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jkarlson ];
+  };
+
+  nodes.machine = { pkgs, config, lib, ... }:
+    let
+      infoFilter = name: drv:
+        let
+          o = builtins.tryEval drv;
+        in
+        o.success && lib.isDerivation o.value && o.value ? outputs && builtins.elem "terminfo" o.value.outputs;
+      terminfos = lib.filterAttrs infoFilter pkgs;
+      excludedTerminfos = lib.filterAttrs (_: drv: !(builtins.elem drv.terminfo config.environment.systemPackages)) terminfos;
+      includedOuts = lib.filterAttrs (_: drv: builtins.elem drv.out config.environment.systemPackages) terminfos;
+    in
+    {
+      environment = {
+        enableAllTerminfo = true;
+        etc."terminfo-missing".text = builtins.concatStringsSep "\n" (builtins.attrNames excludedTerminfos);
+        etc."terminfo-extra-outs".text = builtins.concatStringsSep "\n" (builtins.attrNames includedOuts);
+      };
+    };
+
+  testScript =
+    ''
+      machine.fail("grep . /etc/terminfo-missing >&2")
+      machine.fail("grep . /etc/terminfo-extra-outs >&2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/all-tests.nix b/nixpkgs/nixos/tests/all-tests.nix
new file mode 100644
index 000000000000..367e3da29336
--- /dev/null
+++ b/nixpkgs/nixos/tests/all-tests.nix
@@ -0,0 +1,942 @@
+{ system,
+  pkgs,
+
+  # Projects the test configuration into a the desired value; usually
+  # the test runner: `config: config.test`.
+  callTest,
+
+}:
+# The return value of this function will be an attrset with arbitrary depth and
+# the `anything` returned by callTest at its test leafs.
+# The tests not supported by `system` will be replaced with `{}`, so that
+# `passthru.tests` can contain links to those without breaking on architectures
+# where said tests are unsupported.
+# Example callTest that just extracts the derivation from the test:
+#   callTest = t: t.test;
+
+with pkgs.lib;
+
+let
+  discoverTests = val:
+    if isAttrs val
+    then
+      if hasAttr "test" val then callTest val
+      else mapAttrs (n: s: if n == "passthru" then s else discoverTests s) val
+    else if isFunction val
+      then
+        # Tests based on make-test-python.nix will return the second lambda
+        # in that file, which are then forwarded to the test definition
+        # following the `import make-test-python.nix` expression
+        # (if it is a function).
+        discoverTests (val { inherit system pkgs; })
+      else val;
+  handleTest = path: args:
+    discoverTests (import path ({ inherit system pkgs; } // args));
+  handleTestOn = systems: path: args:
+    if elem system systems then handleTest path args
+    else {};
+
+  nixosLib = import ../lib {
+    # Experimental features need testing too, but there's no point in warning
+    # about it, so we enable the feature flag.
+    featureFlags.minimalModules = {};
+  };
+  evalMinimalConfig = module: nixosLib.evalModules { modules = [ module ]; };
+
+  inherit
+    (rec {
+      doRunTest = arg: ((import ../lib/testing-python.nix { inherit system pkgs; }).evalTest {
+        imports = [ arg readOnlyPkgs ];
+      }).config.result;
+      findTests = tree:
+        if tree?recurseForDerivations && tree.recurseForDerivations
+        then
+          mapAttrs
+            (k: findTests)
+            (builtins.removeAttrs tree ["recurseForDerivations"])
+        else callTest tree;
+
+      runTest = arg: let r = doRunTest arg; in findTests r;
+      runTestOn = systems: arg:
+        if elem system systems then runTest arg
+        else {};
+    })
+    runTest
+    runTestOn
+    ;
+
+  # Using a single instance of nixpkgs makes test evaluation faster.
+  # To make sure we don't accidentally depend on a modified pkgs, we make the
+  # related options read-only. We need to test the right configuration.
+  #
+  # If your service depends on a nixpkgs setting, first try to avoid that, but
+  # otherwise, you can remove the readOnlyPkgs import and test your service as
+  # usual.
+  readOnlyPkgs =
+    # TODO: We currently accept this for nixosTests, so that the `pkgs` argument
+    #       is consistent with `pkgs` in `pkgs.nixosTests`. Can we reinitialize
+    #       it with `allowAliases = false`?
+    # warnIf pkgs.config.allowAliases "nixosTests: pkgs includes aliases."
+    {
+      _class = "nixosTest";
+      node.pkgs = pkgs;
+    };
+
+in {
+
+  # Testing the test driver
+  nixos-test-driver = {
+    extra-python-packages = handleTest ./nixos-test-driver/extra-python-packages.nix {};
+    lib-extend = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./nixos-test-driver/lib-extend.nix {};
+    node-name = runTest ./nixos-test-driver/node-name.nix;
+    busybox = runTest ./nixos-test-driver/busybox.nix;
+    driver-timeout = pkgs.runCommand "ensure-timeout-induced-failure" {
+      failed = pkgs.testers.testBuildFailure ((runTest ./nixos-test-driver/timeout.nix).config.rawTestDerivation);
+    } ''
+      grep -F "timeout reached; test terminating" $failed/testBuildFailure.log
+      # The program will always be terminated by SIGTERM (143) if it waits for the deadline thread.
+      [[ 143 = $(cat $failed/testBuildFailure.exit) ]]
+      touch $out
+    '';
+  };
+
+  # NixOS vm tests and non-vm unit tests
+
+  _3proxy = runTest ./3proxy.nix;
+  aaaaxy = runTest ./aaaaxy.nix;
+  acme = runTest ./acme.nix;
+  acme-dns = handleTest ./acme-dns.nix {};
+  adguardhome = runTest ./adguardhome.nix;
+  aesmd = runTestOn ["x86_64-linux"] ./aesmd.nix;
+  agate = runTest ./web-servers/agate.nix;
+  agda = handleTest ./agda.nix {};
+  airsonic = handleTest ./airsonic.nix {};
+  akkoma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./akkoma.nix {};
+  akkoma-confined = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./akkoma.nix { confined = true; };
+  alice-lg = handleTest ./alice-lg.nix {};
+  allTerminfo = handleTest ./all-terminfo.nix {};
+  alps = handleTest ./alps.nix {};
+  amazon-init-shell = handleTest ./amazon-init-shell.nix {};
+  amazon-ssm-agent = handleTest ./amazon-ssm-agent.nix {};
+  amd-sev = runTest ./amd-sev.nix;
+  anbox = runTest ./anbox.nix;
+  anuko-time-tracker = handleTest ./anuko-time-tracker.nix {};
+  apcupsd = handleTest ./apcupsd.nix {};
+  apfs = runTest ./apfs.nix;
+  appliance-repart-image = runTest ./appliance-repart-image.nix;
+  apparmor = handleTest ./apparmor.nix {};
+  archi = handleTest ./archi.nix {};
+  atd = handleTest ./atd.nix {};
+  atop = handleTest ./atop.nix {};
+  atuin = handleTest ./atuin.nix {};
+  audiobookshelf = handleTest ./audiobookshelf.nix {};
+  auth-mysql = handleTest ./auth-mysql.nix {};
+  authelia = handleTest ./authelia.nix {};
+  avahi = handleTest ./avahi.nix {};
+  avahi-with-resolved = handleTest ./avahi.nix { networkd = true; };
+  babeld = handleTest ./babeld.nix {};
+  bazarr = handleTest ./bazarr.nix {};
+  bcachefs = handleTestOn ["x86_64-linux" "aarch64-linux"] ./bcachefs.nix {};
+  beanstalkd = handleTest ./beanstalkd.nix {};
+  bees = handleTest ./bees.nix {};
+  binary-cache = handleTest ./binary-cache.nix {};
+  bind = handleTest ./bind.nix {};
+  bird = handleTest ./bird.nix {};
+  birdwatcher = handleTest ./birdwatcher.nix {};
+  bitcoind = handleTest ./bitcoind.nix {};
+  bittorrent = handleTest ./bittorrent.nix {};
+  blockbook-frontend = handleTest ./blockbook-frontend.nix {};
+  blocky = handleTest ./blocky.nix {};
+  boot = handleTestOn ["x86_64-linux" "aarch64-linux"] ./boot.nix {};
+  bootspec = handleTestOn ["x86_64-linux"] ./bootspec.nix {};
+  boot-stage1 = handleTest ./boot-stage1.nix {};
+  borgbackup = handleTest ./borgbackup.nix {};
+  botamusique = handleTest ./botamusique.nix {};
+  bpf = handleTestOn ["x86_64-linux" "aarch64-linux"] ./bpf.nix {};
+  bpftune = handleTest ./bpftune.nix {};
+  breitbandmessung = handleTest ./breitbandmessung.nix {};
+  brscan5 = handleTest ./brscan5.nix {};
+  btrbk = handleTest ./btrbk.nix {};
+  btrbk-doas = handleTest ./btrbk-doas.nix {};
+  btrbk-no-timer = handleTest ./btrbk-no-timer.nix {};
+  btrbk-section-order = handleTest ./btrbk-section-order.nix {};
+  budgie = handleTest ./budgie.nix {};
+  buildbot = handleTest ./buildbot.nix {};
+  buildkite-agents = handleTest ./buildkite-agents.nix {};
+  c2fmzq = handleTest ./c2fmzq.nix {};
+  caddy = handleTest ./caddy.nix {};
+  cadvisor = handleTestOn ["x86_64-linux"] ./cadvisor.nix {};
+  cage = handleTest ./cage.nix {};
+  cagebreak = handleTest ./cagebreak.nix {};
+  calibre-web = handleTest ./calibre-web.nix {};
+  calibre-server = handleTest ./calibre-server.nix {};
+  castopod = handleTest ./castopod.nix {};
+  cassandra_3_0 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_0; };
+  cassandra_3_11 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_3_11; };
+  cassandra_4 = handleTest ./cassandra.nix { testPackage = pkgs.cassandra_4; };
+  ceph-multi-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-multi-node.nix {};
+  ceph-single-node = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node.nix {};
+  ceph-single-node-bluestore = handleTestOn [ "aarch64-linux" "x86_64-linux" ] ./ceph-single-node-bluestore.nix {};
+  certmgr = handleTest ./certmgr.nix {};
+  cfssl = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cfssl.nix {};
+  cgit = handleTest ./cgit.nix {};
+  charliecloud = handleTest ./charliecloud.nix {};
+  chromium = (handleTestOn ["aarch64-linux" "x86_64-linux"] ./chromium.nix {}).stable or {};
+  chrony = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony.nix {};
+  chrony-ptp = handleTestOn ["aarch64-linux" "x86_64-linux"] ./chrony-ptp.nix {};
+  cinnamon = handleTest ./cinnamon.nix {};
+  cjdns = handleTest ./cjdns.nix {};
+  clickhouse = handleTest ./clickhouse.nix {};
+  cloud-init = handleTest ./cloud-init.nix {};
+  cloud-init-hostname = handleTest ./cloud-init-hostname.nix {};
+  cloudlog = handleTest ./cloudlog.nix {};
+  cntr = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cntr.nix {};
+  cockpit = handleTest ./cockpit.nix {};
+  cockroachdb = handleTestOn ["x86_64-linux"] ./cockroachdb.nix {};
+  coder = handleTest ./coder.nix {};
+  collectd = handleTest ./collectd.nix {};
+  connman = handleTest ./connman.nix {};
+  consul = handleTest ./consul.nix {};
+  consul-template = handleTest ./consul-template.nix {};
+  containers-bridge = handleTest ./containers-bridge.nix {};
+  containers-custom-pkgs.nix = handleTest ./containers-custom-pkgs.nix {};
+  containers-ephemeral = handleTest ./containers-ephemeral.nix {};
+  containers-extra_veth = handleTest ./containers-extra_veth.nix {};
+  containers-hosts = handleTest ./containers-hosts.nix {};
+  containers-imperative = handleTest ./containers-imperative.nix {};
+  containers-ip = handleTest ./containers-ip.nix {};
+  containers-macvlans = handleTest ./containers-macvlans.nix {};
+  containers-names = handleTest ./containers-names.nix {};
+  containers-nested = handleTest ./containers-nested.nix {};
+  containers-physical_interfaces = handleTest ./containers-physical_interfaces.nix {};
+  containers-portforward = handleTest ./containers-portforward.nix {};
+  containers-reloadable = handleTest ./containers-reloadable.nix {};
+  containers-restart_networking = handleTest ./containers-restart_networking.nix {};
+  containers-tmpfs = handleTest ./containers-tmpfs.nix {};
+  containers-unified-hierarchy = handleTest ./containers-unified-hierarchy.nix {};
+  convos = handleTest ./convos.nix {};
+  corerad = handleTest ./corerad.nix {};
+  coturn = handleTest ./coturn.nix {};
+  couchdb = handleTest ./couchdb.nix {};
+  cri-o = handleTestOn ["aarch64-linux" "x86_64-linux"] ./cri-o.nix {};
+  cups-pdf = handleTest ./cups-pdf.nix {};
+  curl-impersonate = handleTest ./curl-impersonate.nix {};
+  custom-ca = handleTest ./custom-ca.nix {};
+  croc = handleTest ./croc.nix {};
+  darling = handleTest ./darling.nix {};
+  dae = handleTest ./dae.nix {};
+  dconf = handleTest ./dconf.nix {};
+  deconz = handleTest ./deconz.nix {};
+  deepin = handleTest ./deepin.nix {};
+  deluge = handleTest ./deluge.nix {};
+  dendrite = handleTest ./matrix/dendrite.nix {};
+  dex-oidc = handleTest ./dex-oidc.nix {};
+  dhparams = handleTest ./dhparams.nix {};
+  disable-installer-tools = handleTest ./disable-installer-tools.nix {};
+  discourse = handleTest ./discourse.nix {};
+  dnscrypt-proxy2 = handleTestOn ["x86_64-linux"] ./dnscrypt-proxy2.nix {};
+  dnscrypt-wrapper = runTestOn ["x86_64-linux"] ./dnscrypt-wrapper;
+  dnsdist = handleTest ./dnsdist.nix {};
+  doas = handleTest ./doas.nix {};
+  docker = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker.nix {};
+  docker-rootless = handleTestOn ["aarch64-linux" "x86_64-linux"] ./docker-rootless.nix {};
+  docker-registry = handleTest ./docker-registry.nix {};
+  docker-tools = handleTestOn ["x86_64-linux"] ./docker-tools.nix {};
+  docker-tools-cross = handleTestOn ["x86_64-linux" "aarch64-linux"] ./docker-tools-cross.nix {};
+  docker-tools-overlay = handleTestOn ["x86_64-linux"] ./docker-tools-overlay.nix {};
+  documize = handleTest ./documize.nix {};
+  documentation = pkgs.callPackage ../modules/misc/documentation/test.nix { inherit nixosLib; };
+  doh-proxy-rust = handleTest ./doh-proxy-rust.nix {};
+  dokuwiki = handleTest ./dokuwiki.nix {};
+  dolibarr = handleTest ./dolibarr.nix {};
+  domination = handleTest ./domination.nix {};
+  dovecot = handleTest ./dovecot.nix {};
+  drbd = handleTest ./drbd.nix {};
+  earlyoom = handleTestOn ["x86_64-linux"] ./earlyoom.nix {};
+  early-mount-options = handleTest ./early-mount-options.nix {};
+  ec2-config = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-config or {};
+  ec2-nixops = (handleTestOn ["x86_64-linux"] ./ec2.nix {}).boot-ec2-nixops or {};
+  ecryptfs = handleTest ./ecryptfs.nix {};
+  fscrypt = handleTest ./fscrypt.nix {};
+  fastnetmon-advanced = runTest ./fastnetmon-advanced.nix;
+  ejabberd = handleTest ./xmpp/ejabberd.nix {};
+  elk = handleTestOn ["x86_64-linux"] ./elk.nix {};
+  emacs-daemon = handleTest ./emacs-daemon.nix {};
+  endlessh = handleTest ./endlessh.nix {};
+  endlessh-go = handleTest ./endlessh-go.nix {};
+  engelsystem = handleTest ./engelsystem.nix {};
+  enlightenment = handleTest ./enlightenment.nix {};
+  env = handleTest ./env.nix {};
+  envfs = handleTest ./envfs.nix {};
+  envoy = handleTest ./envoy.nix {};
+  ergo = handleTest ./ergo.nix {};
+  ergochat = handleTest ./ergochat.nix {};
+  eris-server = handleTest ./eris-server.nix {};
+  esphome = handleTest ./esphome.nix {};
+  etc = pkgs.callPackage ../modules/system/etc/test.nix { inherit evalMinimalConfig; };
+  activation = pkgs.callPackage ../modules/system/activation/test.nix { };
+  activation-var = runTest ./activation/var.nix;
+  activation-nix-channel = runTest ./activation/nix-channel.nix;
+  etcd = handleTestOn ["x86_64-linux"] ./etcd.nix {};
+  etcd-cluster = handleTestOn ["x86_64-linux"] ./etcd-cluster.nix {};
+  etebase-server = handleTest ./etebase-server.nix {};
+  etesync-dav = handleTest ./etesync-dav.nix {};
+  evcc = handleTest ./evcc.nix {};
+  fail2ban = handleTest ./fail2ban.nix { };
+  fakeroute = handleTest ./fakeroute.nix {};
+  fancontrol = handleTest ./fancontrol.nix {};
+  fanout = handleTest ./fanout.nix {};
+  fcitx5 = handleTest ./fcitx5 {};
+  fenics = handleTest ./fenics.nix {};
+  ferm = handleTest ./ferm.nix {};
+  ferretdb = handleTest ./ferretdb.nix {};
+  firefox = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox; };
+  firefox-beta = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-beta; };
+  firefox-devedition = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-devedition; };
+  firefox-esr    = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr; }; # used in `tested` job
+  firefox-esr-115 = handleTest ./firefox.nix { firefoxPackage = pkgs.firefox-esr-115; };
+  firejail = handleTest ./firejail.nix {};
+  firewall = handleTest ./firewall.nix { nftables = false; };
+  firewall-nftables = handleTest ./firewall.nix { nftables = true; };
+  fish = handleTest ./fish.nix {};
+  flannel = handleTestOn ["x86_64-linux"] ./flannel.nix {};
+  floorp = handleTest ./firefox.nix { firefoxPackage = pkgs.floorp; };
+  fluentd = handleTest ./fluentd.nix {};
+  fluidd = handleTest ./fluidd.nix {};
+  fontconfig-default-fonts = handleTest ./fontconfig-default-fonts.nix {};
+  forgejo = handleTest ./forgejo.nix { };
+  freenet = handleTest ./freenet.nix {};
+  freeswitch = handleTest ./freeswitch.nix {};
+  freetube = discoverTests (import ./freetube.nix);
+  freshrss-sqlite = handleTest ./freshrss-sqlite.nix {};
+  freshrss-pgsql = handleTest ./freshrss-pgsql.nix {};
+  frigate = handleTest ./frigate.nix {};
+  frp = handleTest ./frp.nix {};
+  frr = handleTest ./frr.nix {};
+  fsck = handleTest ./fsck.nix {};
+  fsck-systemd-stage-1 = handleTest ./fsck.nix { systemdStage1 = true; };
+  ft2-clone = handleTest ./ft2-clone.nix {};
+  legit = handleTest ./legit.nix {};
+  mimir = handleTest ./mimir.nix {};
+  garage = handleTest ./garage {};
+  gemstash = handleTest ./gemstash.nix {};
+  gerrit = handleTest ./gerrit.nix {};
+  geth = handleTest ./geth.nix {};
+  ghostunnel = handleTest ./ghostunnel.nix {};
+  gitdaemon = handleTest ./gitdaemon.nix {};
+  gitea = handleTest ./gitea.nix { giteaPackage = pkgs.gitea; };
+  github-runner = handleTest ./github-runner.nix {};
+  gitlab = runTest ./gitlab.nix;
+  gitolite = handleTest ./gitolite.nix {};
+  gitolite-fcgiwrap = handleTest ./gitolite-fcgiwrap.nix {};
+  glusterfs = handleTest ./glusterfs.nix {};
+  gnome = handleTest ./gnome.nix {};
+  gnome-flashback = handleTest ./gnome-flashback.nix {};
+  gnome-xorg = handleTest ./gnome-xorg.nix {};
+  gnupg = handleTest ./gnupg.nix {};
+  go-neb = handleTest ./go-neb.nix {};
+  gobgpd = handleTest ./gobgpd.nix {};
+  gocd-agent = handleTest ./gocd-agent.nix {};
+  gocd-server = handleTest ./gocd-server.nix {};
+  gollum = handleTest ./gollum.nix {};
+  gonic = handleTest ./gonic.nix {};
+  google-oslogin = handleTest ./google-oslogin {};
+  goss = handleTest ./goss.nix {};
+  gotify-server = handleTest ./gotify-server.nix {};
+  gotosocial = runTest ./web-apps/gotosocial.nix;
+  grafana = handleTest ./grafana {};
+  grafana-agent = handleTest ./grafana-agent.nix {};
+  graphite = handleTest ./graphite.nix {};
+  graylog = handleTest ./graylog.nix {};
+  grocy = handleTest ./grocy.nix {};
+  grow-partition = runTest ./grow-partition.nix;
+  grub = handleTest ./grub.nix {};
+  guacamole-server = handleTest ./guacamole-server.nix {};
+  gvisor = handleTest ./gvisor.nix {};
+  hadoop = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop; };
+  hadoop_3_2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop_3_2; };
+  hadoop2 = import ./hadoop { inherit handleTestOn; package=pkgs.hadoop2; };
+  haka = handleTest ./haka.nix {};
+  haste-server = handleTest ./haste-server.nix {};
+  haproxy = handleTest ./haproxy.nix {};
+  hardened = handleTest ./hardened.nix {};
+  harmonia = runTest ./harmonia.nix;
+  headscale = handleTest ./headscale.nix {};
+  healthchecks = handleTest ./web-apps/healthchecks.nix {};
+  hbase2 = handleTest ./hbase.nix { package=pkgs.hbase2; };
+  hbase_2_4 = handleTest ./hbase.nix { package=pkgs.hbase_2_4; };
+  hbase3 = handleTest ./hbase.nix { package=pkgs.hbase3; };
+  hddfancontrol = handleTest ./hddfancontrol.nix {};
+  hedgedoc = handleTest ./hedgedoc.nix {};
+  herbstluftwm = handleTest ./herbstluftwm.nix {};
+  homepage-dashboard = handleTest ./homepage-dashboard.nix {};
+  honk = runTest ./honk.nix;
+  installed-tests = pkgs.recurseIntoAttrs (handleTest ./installed-tests {});
+  invidious = handleTest ./invidious.nix {};
+  livebook-service = handleTest ./livebook-service.nix {};
+  oci-containers = handleTestOn ["aarch64-linux" "x86_64-linux"] ./oci-containers.nix {};
+  odoo = handleTest ./odoo.nix {};
+  odoo15 = handleTest ./odoo.nix { package = pkgs.odoo15; };
+  # 9pnet_virtio used to mount /nix partition doesn't support
+  # hibernation. This test happens to work on x86_64-linux but
+  # not on other platforms.
+  hibernate = handleTestOn ["x86_64-linux"] ./hibernate.nix {};
+  hibernate-systemd-stage-1 = handleTestOn ["x86_64-linux"] ./hibernate.nix { systemdStage1 = true; };
+  hitch = handleTest ./hitch {};
+  hledger-web = handleTest ./hledger-web.nix {};
+  hocker-fetchdocker = handleTest ./hocker-fetchdocker {};
+  hockeypuck = handleTest ./hockeypuck.nix { };
+  home-assistant = handleTest ./home-assistant.nix {};
+  hostname = handleTest ./hostname.nix {};
+  hound = handleTest ./hound.nix {};
+  hub = handleTest ./git/hub.nix {};
+  hydra = handleTest ./hydra {};
+  i3wm = handleTest ./i3wm.nix {};
+  icingaweb2 = handleTest ./icingaweb2.nix {};
+  iftop = handleTest ./iftop.nix {};
+  incron = handleTest ./incron.nix {};
+  incus = pkgs.recurseIntoAttrs (handleTest ./incus { inherit handleTestOn; });
+  influxdb = handleTest ./influxdb.nix {};
+  influxdb2 = handleTest ./influxdb2.nix {};
+  initrd-network-openvpn = handleTest ./initrd-network-openvpn {};
+  initrd-network-ssh = handleTest ./initrd-network-ssh {};
+  initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix {};
+  initrdNetwork = handleTest ./initrd-network.nix {};
+  initrd-secrets = handleTest ./initrd-secrets.nix {};
+  initrd-secrets-changing = handleTest ./initrd-secrets-changing.nix {};
+  input-remapper = handleTest ./input-remapper.nix {};
+  inspircd = handleTest ./inspircd.nix {};
+  installer = handleTest ./installer.nix {};
+  installer-systemd-stage-1 = handleTest ./installer-systemd-stage-1.nix {};
+  invoiceplane = handleTest ./invoiceplane.nix {};
+  iodine = handleTest ./iodine.nix {};
+  ipv6 = handleTest ./ipv6.nix {};
+  iscsi-multipath-root = handleTest ./iscsi-multipath-root.nix {};
+  iscsi-root = handleTest ./iscsi-root.nix {};
+  isso = handleTest ./isso.nix {};
+  jackett = handleTest ./jackett.nix {};
+  jellyfin = handleTest ./jellyfin.nix {};
+  jenkins = handleTest ./jenkins.nix {};
+  jenkins-cli = handleTest ./jenkins-cli.nix {};
+  jibri = handleTest ./jibri.nix {};
+  jirafeau = handleTest ./jirafeau.nix {};
+  jitsi-meet = handleTest ./jitsi-meet.nix {};
+  jool = import ./jool.nix { inherit pkgs runTest; };
+  k3s = handleTest ./k3s {};
+  kafka = handleTest ./kafka.nix {};
+  kanidm = handleTest ./kanidm.nix {};
+  karma = handleTest ./karma.nix {};
+  kavita = handleTest ./kavita.nix {};
+  kbd-setfont-decompress = handleTest ./kbd-setfont-decompress.nix {};
+  kbd-update-search-paths-patch = handleTest ./kbd-update-search-paths-patch.nix {};
+  kea = handleTest ./kea.nix {};
+  keepalived = handleTest ./keepalived.nix {};
+  keepassxc = handleTest ./keepassxc.nix {};
+  kerberos = handleTest ./kerberos/default.nix {};
+  kernel-generic = handleTest ./kernel-generic.nix {};
+  kernel-latest-ath-user-regd = handleTest ./kernel-latest-ath-user-regd.nix {};
+  keter = handleTest ./keter.nix {};
+  kexec = handleTest ./kexec.nix {};
+  keycloak = discoverTests (import ./keycloak.nix);
+  keyd = handleTest ./keyd.nix {};
+  keymap = handleTest ./keymap.nix {};
+  knot = handleTest ./knot.nix {};
+  komga = handleTest ./komga.nix {};
+  krb5 = discoverTests (import ./krb5 {});
+  ksm = handleTest ./ksm.nix {};
+  kthxbye = handleTest ./kthxbye.nix {};
+  kubernetes = handleTestOn ["x86_64-linux"] ./kubernetes {};
+  kubo = import ./kubo { inherit recurseIntoAttrs runTest; };
+  ladybird = handleTest ./ladybird.nix {};
+  languagetool = handleTest ./languagetool.nix {};
+  lanraragi = handleTest ./lanraragi.nix {};
+  latestKernel.login = handleTest ./login.nix { latestKernel = true; };
+  leaps = handleTest ./leaps.nix {};
+  lemmy = handleTest ./lemmy.nix {};
+  libinput = handleTest ./libinput.nix {};
+  libreddit = handleTest ./libreddit.nix {};
+  librenms = handleTest ./librenms.nix {};
+  libresprite = handleTest ./libresprite.nix {};
+  libreswan = handleTest ./libreswan.nix {};
+  librewolf = handleTest ./firefox.nix { firefoxPackage = pkgs.librewolf; };
+  libuiohook = handleTest ./libuiohook.nix {};
+  libvirtd = handleTest ./libvirtd.nix {};
+  lidarr = handleTest ./lidarr.nix {};
+  lightdm = handleTest ./lightdm.nix {};
+  lighttpd = handleTest ./lighttpd.nix {};
+  limesurvey = handleTest ./limesurvey.nix {};
+  listmonk = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./listmonk.nix {};
+  litestream = handleTest ./litestream.nix {};
+  lldap = handleTest ./lldap.nix {};
+  locate = handleTest ./locate.nix {};
+  login = handleTest ./login.nix {};
+  logrotate = handleTest ./logrotate.nix {};
+  loki = handleTest ./loki.nix {};
+  luks = handleTest ./luks.nix {};
+  lvm2 = handleTest ./lvm2 {};
+  lxd = pkgs.recurseIntoAttrs (handleTest ./lxd { inherit handleTestOn; });
+  lxd-image-server = handleTest ./lxd-image-server.nix {};
+  #logstash = handleTest ./logstash.nix {};
+  lorri = handleTest ./lorri/default.nix {};
+  maddy = discoverTests (import ./maddy { inherit handleTest; });
+  maestral = handleTest ./maestral.nix {};
+  magic-wormhole-mailbox-server = handleTest ./magic-wormhole-mailbox-server.nix {};
+  magnetico = handleTest ./magnetico.nix {};
+  mailcatcher = handleTest ./mailcatcher.nix {};
+  mailhog = handleTest ./mailhog.nix {};
+  mailman = handleTest ./mailman.nix {};
+  man = handleTest ./man.nix {};
+  mariadb-galera = handleTest ./mysql/mariadb-galera.nix {};
+  mastodon = discoverTests (import ./web-apps/mastodon { inherit handleTestOn; });
+  pixelfed = discoverTests (import ./web-apps/pixelfed { inherit handleTestOn; });
+  mate = handleTest ./mate.nix {};
+  matomo = handleTest ./matomo.nix {};
+  matrix-appservice-irc = handleTest ./matrix/appservice-irc.nix {};
+  matrix-conduit = handleTest ./matrix/conduit.nix {};
+  matrix-synapse = handleTest ./matrix/synapse.nix {};
+  matrix-synapse-workers = handleTest ./matrix/synapse-workers.nix {};
+  mattermost = handleTest ./mattermost.nix {};
+  mediamtx = handleTest ./mediamtx.nix {};
+  mediatomb = handleTest ./mediatomb.nix {};
+  mediawiki = handleTest ./mediawiki.nix {};
+  meilisearch = handleTest ./meilisearch.nix {};
+  memcached = handleTest ./memcached.nix {};
+  merecat = handleTest ./merecat.nix {};
+  metabase = handleTest ./metabase.nix {};
+  mindustry = handleTest ./mindustry.nix {};
+  minecraft = handleTest ./minecraft.nix {};
+  minecraft-server = handleTest ./minecraft-server.nix {};
+  minidlna = handleTest ./minidlna.nix {};
+  miniflux = handleTest ./miniflux.nix {};
+  minio = handleTest ./minio.nix {};
+  miriway = handleTest ./miriway.nix {};
+  misc = handleTest ./misc.nix {};
+  mjolnir = handleTest ./matrix/mjolnir.nix {};
+  mobilizon = handleTest ./mobilizon.nix {};
+  mod_perl = handleTest ./mod_perl.nix {};
+  molly-brown = handleTest ./molly-brown.nix {};
+  monica = handleTest ./web-apps/monica.nix {};
+  mongodb = handleTest ./mongodb.nix {};
+  moodle = handleTest ./moodle.nix {};
+  moonraker = handleTest ./moonraker.nix {};
+  morty = handleTest ./morty.nix {};
+  mosquitto = handleTest ./mosquitto.nix {};
+  moosefs = handleTest ./moosefs.nix {};
+  mpd = handleTest ./mpd.nix {};
+  mpv = handleTest ./mpv.nix {};
+  mtp = handleTest ./mtp.nix {};
+  multipass = handleTest ./multipass.nix {};
+  mumble = handleTest ./mumble.nix {};
+  # Fails on aarch64-linux at the PDF creation step - need to debug this on an
+  # aarch64 machine..
+  musescore = handleTestOn ["x86_64-linux"] ./musescore.nix {};
+  munin = handleTest ./munin.nix {};
+  mutableUsers = handleTest ./mutable-users.nix {};
+  mxisd = handleTest ./mxisd.nix {};
+  mysql = handleTest ./mysql/mysql.nix {};
+  mysql-autobackup = handleTest ./mysql/mysql-autobackup.nix {};
+  mysql-backup = handleTest ./mysql/mysql-backup.nix {};
+  mysql-replication = handleTest ./mysql/mysql-replication.nix {};
+  n8n = handleTest ./n8n.nix {};
+  nagios = handleTest ./nagios.nix {};
+  nar-serve = handleTest ./nar-serve.nix {};
+  nat.firewall = handleTest ./nat.nix { withFirewall = true; };
+  nat.standalone = handleTest ./nat.nix { withFirewall = false; };
+  nat.nftables.firewall = handleTest ./nat.nix { withFirewall = true; nftables = true; };
+  nat.nftables.standalone = handleTest ./nat.nix { withFirewall = false; nftables = true; };
+  nats = handleTest ./nats.nix {};
+  navidrome = handleTest ./navidrome.nix {};
+  nbd = handleTest ./nbd.nix {};
+  ncdns = handleTest ./ncdns.nix {};
+  ndppd = handleTest ./ndppd.nix {};
+  nebula = handleTest ./nebula.nix {};
+  netbird = handleTest ./netbird.nix {};
+  neo4j = handleTest ./neo4j.nix {};
+  netdata = handleTest ./netdata.nix {};
+  networking.networkd = handleTest ./networking.nix { networkd = true; };
+  networking.scripted = handleTest ./networking.nix { networkd = false; };
+  netbox_3_5 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_5; };
+  netbox_3_6 = handleTest ./web-apps/netbox.nix { netbox = pkgs.netbox_3_6; };
+  netbox-upgrade = handleTest ./web-apps/netbox-upgrade.nix {};
+  # TODO: put in networking.nix after the test becomes more complete
+  networkingProxy = handleTest ./networking-proxy.nix {};
+  nextcloud = handleTest ./nextcloud {};
+  nexus = handleTest ./nexus.nix {};
+  # TODO: Test nfsv3 + Kerberos
+  nfs3 = handleTest ./nfs { version = 3; };
+  nfs4 = handleTest ./nfs { version = 4; };
+  nghttpx = handleTest ./nghttpx.nix {};
+  nginx = handleTest ./nginx.nix {};
+  nginx-auth = handleTest ./nginx-auth.nix {};
+  nginx-etag = handleTest ./nginx-etag.nix {};
+  nginx-globalredirect = handleTest ./nginx-globalredirect.nix {};
+  nginx-http3 = handleTest ./nginx-http3.nix {};
+  nginx-modsecurity = handleTest ./nginx-modsecurity.nix {};
+  nginx-njs = handleTest ./nginx-njs.nix {};
+  nginx-proxyprotocol = handleTest ./nginx-proxyprotocol {};
+  nginx-pubhtml = handleTest ./nginx-pubhtml.nix {};
+  nginx-sso = handleTest ./nginx-sso.nix {};
+  nginx-status-page = handleTest ./nginx-status-page.nix {};
+  nginx-tmpdir = handleTest ./nginx-tmpdir.nix {};
+  nginx-unix-socket = handleTest ./nginx-unix-socket.nix {};
+  nginx-variants = handleTest ./nginx-variants.nix {};
+  nifi = handleTestOn ["x86_64-linux"] ./web-apps/nifi.nix {};
+  nitter = handleTest ./nitter.nix {};
+  nix-ld = handleTest ./nix-ld.nix {};
+  nix-serve = handleTest ./nix-serve.nix {};
+  nix-serve-ssh = handleTest ./nix-serve-ssh.nix {};
+  nixops = handleTest ./nixops/default.nix {};
+  nixos-generate-config = handleTest ./nixos-generate-config.nix {};
+  nixos-rebuild-install-bootloader = handleTestOn ["x86_64-linux"] ./nixos-rebuild-install-bootloader.nix {};
+  nixos-rebuild-specialisations = handleTestOn ["x86_64-linux"] ./nixos-rebuild-specialisations.nix {};
+  nixpkgs = pkgs.callPackage ../modules/misc/nixpkgs/test.nix { inherit evalMinimalConfig; };
+  node-red = handleTest ./node-red.nix {};
+  nomad = handleTest ./nomad.nix {};
+  non-default-filesystems = handleTest ./non-default-filesystems.nix {};
+  non-switchable-system = runTest ./non-switchable-system.nix;
+  noto-fonts = handleTest ./noto-fonts.nix {};
+  noto-fonts-cjk-qt-default-weight = handleTest ./noto-fonts-cjk-qt-default-weight.nix {};
+  novacomd = handleTestOn ["x86_64-linux"] ./novacomd.nix {};
+  nscd = handleTest ./nscd.nix {};
+  nsd = handleTest ./nsd.nix {};
+  ntfy-sh = handleTest ./ntfy-sh.nix {};
+  nzbget = handleTest ./nzbget.nix {};
+  nzbhydra2 = handleTest ./nzbhydra2.nix {};
+  oh-my-zsh = handleTest ./oh-my-zsh.nix {};
+  ombi = handleTest ./ombi.nix {};
+  openarena = handleTest ./openarena.nix {};
+  openldap = handleTest ./openldap.nix {};
+  opensearch = discoverTests (import ./opensearch.nix);
+  openresty-lua = handleTest ./openresty-lua.nix {};
+  opensmtpd = handleTest ./opensmtpd.nix {};
+  opensmtpd-rspamd = handleTest ./opensmtpd-rspamd.nix {};
+  opensnitch = handleTest ./opensnitch.nix {};
+  openssh = handleTest ./openssh.nix {};
+  octoprint = handleTest ./octoprint.nix {};
+  openstack-image-metadata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).metadata or {};
+  openstack-image-userdata = (handleTestOn ["x86_64-linux"] ./openstack-image.nix {}).userdata or {};
+  opentabletdriver = handleTest ./opentabletdriver.nix {};
+  opentelemetry-collector = handleTest ./opentelemetry-collector.nix {};
+  ocsinventory-agent = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./ocsinventory-agent.nix {};
+  owncast = handleTest ./owncast.nix {};
+  outline = handleTest ./outline.nix {};
+  image-contents = handleTest ./image-contents.nix {};
+  openvscode-server = handleTest ./openvscode-server.nix {};
+  orangefs = handleTest ./orangefs.nix {};
+  os-prober = handleTestOn ["x86_64-linux"] ./os-prober.nix {};
+  osquery = handleTestOn ["x86_64-linux"] ./osquery.nix {};
+  osrm-backend = handleTest ./osrm-backend.nix {};
+  overlayfs = handleTest ./overlayfs.nix {};
+  pacemaker = handleTest ./pacemaker.nix {};
+  packagekit = handleTest ./packagekit.nix {};
+  pam-file-contents = handleTest ./pam/pam-file-contents.nix {};
+  pam-oath-login = handleTest ./pam/pam-oath-login.nix {};
+  pam-u2f = handleTest ./pam/pam-u2f.nix {};
+  pam-ussh = handleTest ./pam/pam-ussh.nix {};
+  pam-zfs-key = handleTest ./pam/zfs-key.nix {};
+  pass-secret-service = handleTest ./pass-secret-service.nix {};
+  patroni = handleTestOn ["x86_64-linux"] ./patroni.nix {};
+  pantalaimon = handleTest ./matrix/pantalaimon.nix {};
+  pantheon = handleTest ./pantheon.nix {};
+  paperless = handleTest ./paperless.nix {};
+  parsedmarc = handleTest ./parsedmarc {};
+  pdns-recursor = handleTest ./pdns-recursor.nix {};
+  peerflix = handleTest ./peerflix.nix {};
+  peering-manager = handleTest ./web-apps/peering-manager.nix {};
+  peertube = handleTestOn ["x86_64-linux"] ./web-apps/peertube.nix {};
+  peroxide = handleTest ./peroxide.nix {};
+  pgadmin4 = handleTest ./pgadmin4.nix {};
+  pgbouncer = handleTest ./pgbouncer.nix {};
+  pgjwt = handleTest ./pgjwt.nix {};
+  pgmanage = handleTest ./pgmanage.nix {};
+  phosh = handleTest ./phosh.nix {};
+  photoprism = handleTest ./photoprism.nix {};
+  php = handleTest ./php {};
+  php81 = handleTest ./php { php = pkgs.php81; };
+  php82 = handleTest ./php { php = pkgs.php82; };
+  php83 = handleTest ./php { php = pkgs.php83; };
+  phylactery = handleTest ./web-apps/phylactery.nix {};
+  pict-rs = handleTest ./pict-rs.nix {};
+  pinnwand = handleTest ./pinnwand.nix {};
+  plantuml-server = handleTest ./plantuml-server.nix {};
+  plasma-bigscreen = handleTest ./plasma-bigscreen.nix {};
+  plasma5 = handleTest ./plasma5.nix {};
+  plasma5-systemd-start = handleTest ./plasma5-systemd-start.nix {};
+  plausible = handleTest ./plausible.nix {};
+  please = handleTest ./please.nix {};
+  pleroma = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./pleroma.nix {};
+  plikd = handleTest ./plikd.nix {};
+  plotinus = handleTest ./plotinus.nix {};
+  podgrab = handleTest ./podgrab.nix {};
+  podman = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/default.nix {};
+  podman-tls-ghostunnel = handleTestOn ["aarch64-linux" "x86_64-linux"] ./podman/tls-ghostunnel.nix {};
+  polaris = handleTest ./polaris.nix {};
+  pomerium = handleTestOn ["x86_64-linux"] ./pomerium.nix {};
+  portunus = handleTest ./portunus.nix { };
+  postfix = handleTest ./postfix.nix {};
+  postfix-raise-smtpd-tls-security-level = handleTest ./postfix-raise-smtpd-tls-security-level.nix {};
+  postfixadmin = handleTest ./postfixadmin.nix {};
+  postgis = handleTest ./postgis.nix {};
+  apache_datasketches = handleTest ./apache_datasketches.nix {};
+  postgresql = handleTest ./postgresql.nix {};
+  postgresql-jit = handleTest ./postgresql-jit.nix {};
+  postgresql-wal-receiver = handleTest ./postgresql-wal-receiver.nix {};
+  powerdns = handleTest ./powerdns.nix {};
+  powerdns-admin = handleTest ./powerdns-admin.nix {};
+  power-profiles-daemon = handleTest ./power-profiles-daemon.nix {};
+  pppd = handleTest ./pppd.nix {};
+  predictable-interface-names = handleTest ./predictable-interface-names.nix {};
+  printing-socket = handleTest ./printing.nix { socket = true; };
+  printing-service = handleTest ./printing.nix { socket = false; };
+  privoxy = handleTest ./privoxy.nix {};
+  prometheus = handleTest ./prometheus.nix {};
+  prometheus-exporters = handleTest ./prometheus-exporters.nix {};
+  prosody = handleTest ./xmpp/prosody.nix {};
+  prosody-mysql = handleTest ./xmpp/prosody-mysql.nix {};
+  proxy = handleTest ./proxy.nix {};
+  prowlarr = handleTest ./prowlarr.nix {};
+  pt2-clone = handleTest ./pt2-clone.nix {};
+  pykms = handleTest ./pykms.nix {};
+  public-inbox = handleTest ./public-inbox.nix {};
+  pufferpanel = handleTest ./pufferpanel.nix {};
+  pulseaudio = discoverTests (import ./pulseaudio.nix);
+  qboot = handleTestOn ["x86_64-linux" "i686-linux"] ./qboot.nix {};
+  qemu-vm-restrictnetwork = handleTest ./qemu-vm-restrictnetwork.nix {};
+  qemu-vm-volatile-root = runTest ./qemu-vm-volatile-root.nix;
+  qemu-vm-external-disk-image = runTest ./qemu-vm-external-disk-image.nix;
+  qgis = handleTest ./qgis.nix { qgisPackage = pkgs.qgis; };
+  qgis-ltr = handleTest ./qgis.nix { qgisPackage = pkgs.qgis-ltr; };
+  qownnotes = handleTest ./qownnotes.nix {};
+  quake3 = handleTest ./quake3.nix {};
+  quorum = handleTest ./quorum.nix {};
+  rabbitmq = handleTest ./rabbitmq.nix {};
+  radarr = handleTest ./radarr.nix {};
+  radicale = handleTest ./radicale.nix {};
+  ragnarwm = handleTest ./ragnarwm.nix {};
+  rasdaemon = handleTest ./rasdaemon.nix {};
+  readarr = handleTest ./readarr.nix {};
+  redis = handleTest ./redis.nix {};
+  redmine = handleTest ./redmine.nix {};
+  restartByActivationScript = handleTest ./restart-by-activation-script.nix {};
+  restic = handleTest ./restic.nix {};
+  retroarch = handleTest ./retroarch.nix {};
+  rkvm = handleTest ./rkvm {};
+  robustirc-bridge = handleTest ./robustirc-bridge.nix {};
+  roundcube = handleTest ./roundcube.nix {};
+  rosenpass = handleTest ./rosenpass.nix {};
+  rshim = handleTest ./rshim.nix {};
+  rspamd = handleTest ./rspamd.nix {};
+  rss2email = handleTest ./rss2email.nix {};
+  rstudio-server = handleTest ./rstudio-server.nix {};
+  rsyncd = handleTest ./rsyncd.nix {};
+  rsyslogd = handleTest ./rsyslogd.nix {};
+  rxe = handleTest ./rxe.nix {};
+  sabnzbd = handleTest ./sabnzbd.nix {};
+  samba = handleTest ./samba.nix {};
+  samba-wsdd = handleTest ./samba-wsdd.nix {};
+  sanoid = handleTest ./sanoid.nix {};
+  scaphandre = handleTest ./scaphandre.nix {};
+  schleuder = handleTest ./schleuder.nix {};
+  sddm = handleTest ./sddm.nix {};
+  seafile = handleTest ./seafile.nix {};
+  searx = handleTest ./searx.nix {};
+  service-runner = handleTest ./service-runner.nix {};
+  sftpgo = runTest ./sftpgo.nix;
+  sfxr-qt = handleTest ./sfxr-qt.nix {};
+  sgt-puzzles = handleTest ./sgt-puzzles.nix {};
+  shadow = handleTest ./shadow.nix {};
+  shadowsocks = handleTest ./shadowsocks {};
+  shattered-pixel-dungeon = handleTest ./shattered-pixel-dungeon.nix {};
+  shiori = handleTest ./shiori.nix {};
+  signal-desktop = handleTest ./signal-desktop.nix {};
+  simple = handleTest ./simple.nix {};
+  sing-box = handleTest ./sing-box.nix {};
+  slimserver = handleTest ./slimserver.nix {};
+  slurm = handleTest ./slurm.nix {};
+  smokeping = handleTest ./smokeping.nix {};
+  snapcast = handleTest ./snapcast.nix {};
+  snapper = handleTest ./snapper.nix {};
+  snipe-it = runTest ./web-apps/snipe-it.nix;
+  soapui = handleTest ./soapui.nix {};
+  soft-serve = handleTest ./soft-serve.nix {};
+  sogo = handleTest ./sogo.nix {};
+  solanum = handleTest ./solanum.nix {};
+  sonarr = handleTest ./sonarr.nix {};
+  sourcehut = handleTest ./sourcehut.nix {};
+  spacecookie = handleTest ./spacecookie.nix {};
+  spark = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./spark {};
+  sqlite3-to-mysql = handleTest ./sqlite3-to-mysql.nix {};
+  sslh = handleTest ./sslh.nix {};
+  ssh-audit = handleTest ./ssh-audit.nix {};
+  sssd = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd.nix {};
+  sssd-ldap = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./sssd-ldap.nix {};
+  stalwart-mail = handleTest ./stalwart-mail.nix {};
+  stargazer = runTest ./web-servers/stargazer.nix;
+  starship = handleTest ./starship.nix {};
+  static-web-server = handleTest ./web-servers/static-web-server.nix {};
+  step-ca = handleTestOn ["x86_64-linux"] ./step-ca.nix {};
+  stratis = handleTest ./stratis {};
+  strongswan-swanctl = handleTest ./strongswan-swanctl.nix {};
+  stunnel = handleTest ./stunnel.nix {};
+  sudo = handleTest ./sudo.nix {};
+  sudo-rs = handleTest ./sudo-rs.nix {};
+  swap-file-btrfs = handleTest ./swap-file-btrfs.nix {};
+  swap-partition = handleTest ./swap-partition.nix {};
+  swap-random-encryption = handleTest ./swap-random-encryption.nix {};
+  sway = handleTest ./sway.nix {};
+  switchTest = handleTest ./switch-test.nix {};
+  sympa = handleTest ./sympa.nix {};
+  syncthing = handleTest ./syncthing.nix {};
+  syncthing-no-settings = handleTest ./syncthing-no-settings.nix {};
+  syncthing-init = handleTest ./syncthing-init.nix {};
+  syncthing-many-devices = handleTest ./syncthing-many-devices.nix {};
+  syncthing-relay = handleTest ./syncthing-relay.nix {};
+  systemd = handleTest ./systemd.nix {};
+  systemd-analyze = handleTest ./systemd-analyze.nix {};
+  systemd-binfmt = handleTestOn ["x86_64-linux"] ./systemd-binfmt.nix {};
+  systemd-boot = handleTest ./systemd-boot.nix {};
+  systemd-bpf = handleTest ./systemd-bpf.nix {};
+  systemd-confinement = handleTest ./systemd-confinement.nix {};
+  systemd-coredump = handleTest ./systemd-coredump.nix {};
+  systemd-cryptenroll = handleTest ./systemd-cryptenroll.nix {};
+  systemd-credentials-tpm2 = handleTest ./systemd-credentials-tpm2.nix {};
+  systemd-escaping = handleTest ./systemd-escaping.nix {};
+  systemd-initrd-bridge = handleTest ./systemd-initrd-bridge.nix {};
+  systemd-initrd-btrfs-raid = handleTest ./systemd-initrd-btrfs-raid.nix {};
+  systemd-initrd-luks-fido2 = handleTest ./systemd-initrd-luks-fido2.nix {};
+  systemd-initrd-luks-keyfile = handleTest ./systemd-initrd-luks-keyfile.nix {};
+  systemd-initrd-luks-empty-passphrase = handleTest ./initrd-luks-empty-passphrase.nix { systemdStage1 = true; };
+  systemd-initrd-luks-password = handleTest ./systemd-initrd-luks-password.nix {};
+  systemd-initrd-luks-tpm2 = handleTest ./systemd-initrd-luks-tpm2.nix {};
+  systemd-initrd-modprobe = handleTest ./systemd-initrd-modprobe.nix {};
+  systemd-initrd-shutdown = handleTest ./systemd-shutdown.nix { systemdStage1 = true; };
+  systemd-initrd-simple = handleTest ./systemd-initrd-simple.nix {};
+  systemd-initrd-swraid = handleTest ./systemd-initrd-swraid.nix {};
+  systemd-initrd-vconsole = handleTest ./systemd-initrd-vconsole.nix {};
+  systemd-initrd-networkd = handleTest ./systemd-initrd-networkd.nix {};
+  systemd-initrd-networkd-ssh = handleTest ./systemd-initrd-networkd-ssh.nix {};
+  systemd-initrd-networkd-openvpn = handleTest ./initrd-network-openvpn { systemdStage1 = true; };
+  systemd-initrd-vlan = handleTest ./systemd-initrd-vlan.nix {};
+  systemd-journal = handleTest ./systemd-journal.nix {};
+  systemd-machinectl = handleTest ./systemd-machinectl.nix {};
+  systemd-networkd = handleTest ./systemd-networkd.nix {};
+  systemd-networkd-dhcpserver = handleTest ./systemd-networkd-dhcpserver.nix {};
+  systemd-networkd-dhcpserver-static-leases = handleTest ./systemd-networkd-dhcpserver-static-leases.nix {};
+  systemd-networkd-ipv6-prefix-delegation = handleTest ./systemd-networkd-ipv6-prefix-delegation.nix {};
+  systemd-networkd-vrf = handleTest ./systemd-networkd-vrf.nix {};
+  systemd-no-tainted = handleTest ./systemd-no-tainted.nix {};
+  systemd-nspawn = handleTest ./systemd-nspawn.nix {};
+  systemd-nspawn-configfile = handleTest ./systemd-nspawn-configfile.nix {};
+  systemd-oomd = handleTest ./systemd-oomd.nix {};
+  systemd-portabled = handleTest ./systemd-portabled.nix {};
+  systemd-repart = handleTest ./systemd-repart.nix {};
+  systemd-shutdown = handleTest ./systemd-shutdown.nix {};
+  systemd-sysupdate = runTest ./systemd-sysupdate.nix;
+  systemd-timesyncd = handleTest ./systemd-timesyncd.nix {};
+  systemd-user-tmpfiles-rules = handleTest ./systemd-user-tmpfiles-rules.nix {};
+  systemd-misc = handleTest ./systemd-misc.nix {};
+  systemd-userdbd = handleTest ./systemd-userdbd.nix {};
+  systemd-homed = handleTest ./systemd-homed.nix {};
+  tandoor-recipes = handleTest ./tandoor-recipes.nix {};
+  tang = handleTest ./tang.nix {};
+  taskserver = handleTest ./taskserver.nix {};
+  tayga = handleTest ./tayga.nix {};
+  teeworlds = handleTest ./teeworlds.nix {};
+  telegraf = handleTest ./telegraf.nix {};
+  teleport = handleTest ./teleport.nix {};
+  thelounge = handleTest ./thelounge.nix {};
+  terminal-emulators = handleTest ./terminal-emulators.nix {};
+  tiddlywiki = handleTest ./tiddlywiki.nix {};
+  tigervnc = handleTest ./tigervnc.nix {};
+  timescaledb = handleTest ./timescaledb.nix {};
+  promscale = handleTest ./promscale.nix {};
+  timezone = handleTest ./timezone.nix {};
+  tinc = handleTest ./tinc {};
+  tinydns = handleTest ./tinydns.nix {};
+  tinyproxy = handleTest ./tinyproxy.nix {};
+  tinywl = handleTest ./tinywl.nix {};
+  tmate-ssh-server = handleTest ./tmate-ssh-server.nix { };
+  tomcat = handleTest ./tomcat.nix {};
+  tor = handleTest ./tor.nix {};
+  traefik = handleTestOn ["aarch64-linux" "x86_64-linux"] ./traefik.nix {};
+  trafficserver = handleTest ./trafficserver.nix {};
+  transmission = handleTest ./transmission.nix { transmission = pkgs.transmission; };
+  transmission_4 = handleTest ./transmission.nix { transmission = pkgs.transmission_4; };
+  # tracee requires bpf
+  tracee = handleTestOn ["x86_64-linux"] ./tracee.nix {};
+  trezord = handleTest ./trezord.nix {};
+  trickster = handleTest ./trickster.nix {};
+  trilium-server = handleTestOn ["x86_64-linux"] ./trilium-server.nix {};
+  tsja = handleTest ./tsja.nix {};
+  tsm-client-gui = handleTest ./tsm-client-gui.nix {};
+  txredisapi = handleTest ./txredisapi.nix {};
+  tuptime = handleTest ./tuptime.nix {};
+  turbovnc-headless-server = handleTest ./turbovnc-headless-server.nix {};
+  tuxguitar = handleTest ./tuxguitar.nix {};
+  twingate = runTest ./twingate.nix;
+  typesense = handleTest ./typesense.nix {};
+  ucarp = handleTest ./ucarp.nix {};
+  udisks2 = handleTest ./udisks2.nix {};
+  ulogd = handleTest ./ulogd/ulogd.nix {};
+  unbound = handleTest ./unbound.nix {};
+  unifi = handleTest ./unifi.nix {};
+  unit-php = handleTest ./web-servers/unit-php.nix {};
+  upnp = handleTest ./upnp.nix {};
+  uptermd = handleTest ./uptermd.nix {};
+  uptime-kuma = handleTest ./uptime-kuma.nix {};
+  usbguard = handleTest ./usbguard.nix {};
+  user-activation-scripts = handleTest ./user-activation-scripts.nix {};
+  user-expiry = runTest ./user-expiry.nix;
+  user-home-mode = handleTest ./user-home-mode.nix {};
+  uwsgi = handleTest ./uwsgi.nix {};
+  v2ray = handleTest ./v2ray.nix {};
+  varnish60 = handleTest ./varnish.nix { package = pkgs.varnish60; };
+  varnish74 = handleTest ./varnish.nix { package = pkgs.varnish74; };
+  vault = handleTest ./vault.nix {};
+  vault-agent = handleTest ./vault-agent.nix {};
+  vault-dev = handleTest ./vault-dev.nix {};
+  vault-postgresql = handleTest ./vault-postgresql.nix {};
+  vaultwarden = handleTest ./vaultwarden.nix {};
+  vector = handleTest ./vector.nix {};
+  vengi-tools = handleTest ./vengi-tools.nix {};
+  victoriametrics = handleTest ./victoriametrics.nix {};
+  vikunja = handleTest ./vikunja.nix {};
+  virtualbox = handleTestOn ["x86_64-linux"] ./virtualbox.nix {};
+  vscode-remote-ssh = handleTestOn ["x86_64-linux"] ./vscode-remote-ssh.nix {};
+  vscodium = discoverTests (import ./vscodium.nix);
+  vsftpd = handleTest ./vsftpd.nix {};
+  warzone2100 = handleTest ./warzone2100.nix {};
+  wasabibackend = handleTest ./wasabibackend.nix {};
+  webhook = runTest ./webhook.nix;
+  wiki-js = handleTest ./wiki-js.nix {};
+  wine = handleTest ./wine.nix {};
+  wireguard = handleTest ./wireguard {};
+  without-nix = handleTest ./without-nix.nix {};
+  wmderland = handleTest ./wmderland.nix {};
+  wpa_supplicant = handleTest ./wpa_supplicant.nix {};
+  wordpress = handleTest ./wordpress.nix {};
+  wrappers = handleTest ./wrappers.nix {};
+  writefreely = handleTest ./web-apps/writefreely.nix {};
+  xandikos = handleTest ./xandikos.nix {};
+  xautolock = handleTest ./xautolock.nix {};
+  xfce = handleTest ./xfce.nix {};
+  xmonad = handleTest ./xmonad.nix {};
+  xmonad-xdg-autostart = handleTest ./xmonad-xdg-autostart.nix {};
+  xpadneo = handleTest ./xpadneo.nix {};
+  xrdp = handleTest ./xrdp.nix {};
+  xss-lock = handleTest ./xss-lock.nix {};
+  xterm = handleTest ./xterm.nix {};
+  xxh = handleTest ./xxh.nix {};
+  yabar = handleTest ./yabar.nix {};
+  yggdrasil = handleTest ./yggdrasil.nix {};
+  zammad = handleTest ./zammad.nix {};
+  zeronet-conservancy = handleTest ./zeronet-conservancy.nix {};
+  zfs = handleTest ./zfs.nix {};
+  zigbee2mqtt = handleTest ./zigbee2mqtt.nix {};
+  zoneminder = handleTest ./zoneminder.nix {};
+  zookeeper = handleTest ./zookeeper.nix {};
+  zram-generator = handleTest ./zram-generator.nix {};
+  zrepl = handleTest ./zrepl.nix {};
+  zsh-history = handleTest ./zsh-history.nix {};
+  zwave-js = handleTest ./zwave-js.nix {};
+}
diff --git a/nixpkgs/nixos/tests/alps.nix b/nixpkgs/nixos/tests/alps.nix
new file mode 100644
index 000000000000..9756f2d4da15
--- /dev/null
+++ b/nixpkgs/nixos/tests/alps.nix
@@ -0,0 +1,108 @@
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "alps";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hmenke ];
+  };
+
+  nodes = {
+    server = {
+      imports = [ ./common/user-account.nix ];
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+      networking.extraHosts = ''
+        127.0.0.1 ${domain}
+      '';
+      networking.firewall.allowedTCPPorts = [ 25 465 993 ];
+      services.postfix = {
+        enable = true;
+        enableSubmission = true;
+        enableSubmissions = true;
+        tlsTrustedAuthorities = "${certs.ca.cert}";
+        sslCert = "${certs.${domain}.cert}";
+        sslKey = "${certs.${domain}.key}";
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        sslCACert = "${certs.ca.cert}";
+        sslServerCert = "${certs.${domain}.cert}";
+        sslServerKey = "${certs.${domain}.key}";
+      };
+    };
+
+    client = { nodes, config, ... }: {
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} ${domain}
+      '';
+      services.alps = {
+        enable = true;
+        theme = "alps";
+        imaps = {
+          host = domain;
+          port = 993;
+        };
+        smtps = {
+          host = domain;
+          port = 465;
+        };
+      };
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "test-alps-login" { } ''
+          from urllib.request import build_opener, HTTPCookieProcessor, Request
+          from urllib.parse import urlencode, urljoin
+          from http.cookiejar import CookieJar
+
+          baseurl = "http://localhost:${toString config.services.alps.port}"
+          username = "alice"
+          password = "${nodes.server.config.users.users.alice.password}"
+          cookiejar = CookieJar()
+          cookieprocessor = HTTPCookieProcessor(cookiejar)
+          opener = build_opener(cookieprocessor)
+
+          data = urlencode({"username": username, "password": password}).encode()
+          req = Request(urljoin(baseurl, "login"), data=data, method="POST")
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is set
+              print(cookiejar)
+              assert any(cookie.name == "alps_session" for cookie in cookiejar)
+
+          req = Request(baseurl)
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is still there...
+              print(cookiejar)
+              assert any(cookie.name == "alps_session" for cookie in cookiejar)
+              # ...and that we have not been redirected back to the login page
+              print(ret.url)
+              assert ret.url == urljoin(baseurl, "mailbox/INBOX")
+
+          req = Request(urljoin(baseurl, "logout"))
+          with opener.open(req) as ret:
+              # Check that the alps_session cookie is now gone
+              print(cookiejar)
+              assert all(cookie.name != "alps_session" for cookie in cookiejar)
+        '')
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    server.start()
+    server.wait_for_unit("postfix.service")
+    server.wait_for_unit("dovecot2.service")
+    server.wait_for_open_port(465)
+    server.wait_for_open_port(993)
+
+    client.start()
+    client.wait_for_unit("alps.service")
+    client.wait_for_open_port(${toString nodes.client.config.services.alps.port})
+    client.succeed("test-alps-login")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/amazon-init-shell.nix b/nixpkgs/nixos/tests/amazon-init-shell.nix
new file mode 100644
index 000000000000..3c040841b6d2
--- /dev/null
+++ b/nixpkgs/nixos/tests/amazon-init-shell.nix
@@ -0,0 +1,40 @@
+# This test verifies that the amazon-init service can treat the `user-data` ec2
+# metadata file as a shell script. If amazon-init detects that `user-data` is a
+# script (based on the presence of the shebang #! line) it executes it and
+# exits.
+# Note that other tests verify that amazon-init can treat user-data as a nixos
+# configuration expression.
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+makeTest {
+  name = "amazon-init";
+  meta = with maintainers; {
+    maintainers = [ urbas ];
+  };
+  nodes.machine = { ... }:
+  {
+    imports = [ ../modules/profiles/headless.nix ../modules/virtualisation/amazon-init.nix ];
+    services.openssh.enable = true;
+    networking.hostName = "";
+    environment.etc."ec2-metadata/user-data" = {
+      text = ''
+        #!/usr/bin/bash
+
+        echo successful > /tmp/evidence
+      '';
+    };
+  };
+  testScript = ''
+    # To wait until amazon-init terminates its run
+    unnamed.wait_for_unit("amazon-init.service")
+
+    unnamed.succeed("grep -q successful /tmp/evidence")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/amazon-ssm-agent.nix b/nixpkgs/nixos/tests/amazon-ssm-agent.nix
new file mode 100644
index 000000000000..957e9e0e02c5
--- /dev/null
+++ b/nixpkgs/nixos/tests/amazon-ssm-agent.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "amazon-ssm-agent";
+  meta.maintainers = [ lib.maintainers.anthonyroussel ];
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.amazon-ssm-agent.enable = true;
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_file("/etc/amazon/ssm/seelog.xml")
+    machine.wait_for_file("/etc/amazon/ssm/amazon-ssm-agent.json")
+
+    machine.wait_for_unit("amazon-ssm-agent.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/amd-sev.nix b/nixpkgs/nixos/tests/amd-sev.nix
new file mode 100644
index 000000000000..bf9a50c10d0d
--- /dev/null
+++ b/nixpkgs/nixos/tests/amd-sev.nix
@@ -0,0 +1,56 @@
+{ lib, ... }: {
+  name = "amd-sev";
+  meta = {
+    maintainers = with lib.maintainers; [ trundle veehaitch ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    hardware.cpu.amd.sev.enable = true;
+    hardware.cpu.amd.sevGuest.enable = true;
+
+    specialisation.sevCustomUserGroup.configuration = {
+      users.groups.sevtest = { };
+
+      hardware.cpu.amd.sev = {
+        enable = true;
+        group = "root";
+        mode = "0600";
+      };
+      hardware.cpu.amd.sevGuest = {
+        enable = true;
+        group = "sevtest";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+    in
+    ''
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("Check default settings"):
+        out = machine.succeed("cat /etc/udev/rules.d/99-local.rules")
+        assert 'KERNEL=="sev", OWNER="root", GROUP="sev", MODE="0660"' in out
+        assert 'KERNEL=="sev-guest", OWNER="root", GROUP="sev-guest", MODE="0660"' in out
+
+        out = machine.succeed("cat /etc/group")
+        assert "sev:" in out
+        assert "sev-guest:" in out
+        assert "sevtest:" not in out
+
+      with subtest("Activate configuration with custom user/group"):
+        machine.succeed('${specialisations}/sevCustomUserGroup/bin/switch-to-configuration test')
+
+      with subtest("Check custom user and group"):
+        out = machine.succeed("cat /etc/udev/rules.d/99-local.rules")
+        assert 'KERNEL=="sev", OWNER="root", GROUP="root", MODE="0600"' in out
+        assert 'KERNEL=="sev-guest", OWNER="root", GROUP="sevtest", MODE="0660"' in out
+
+        out = machine.succeed("cat /etc/group")
+        assert "sev:" not in out
+        assert "sev-guest:" not in out
+        assert "sevtest:" in out
+    '';
+}
diff --git a/nixpkgs/nixos/tests/anbox.nix b/nixpkgs/nixos/tests/anbox.nix
new file mode 100644
index 000000000000..dfd6c13d9318
--- /dev/null
+++ b/nixpkgs/nixos/tests/anbox.nix
@@ -0,0 +1,36 @@
+{ lib, pkgs, ... }:
+
+{
+  name = "anbox";
+  meta.maintainers = with lib.maintainers; [ mvnetbiz ];
+
+  nodes.machine = { pkgs, config, ... }: {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ android-tools ];
+
+    test-support.displayManager.auto.user = "alice";
+
+    virtualisation.anbox.enable = true;
+    boot.kernelPackages = pkgs.linuxPackages_5_15;
+    virtualisation.memorySize = 2500;
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
+  in ''
+    machine.wait_for_x()
+
+    machine.wait_until_succeeds(
+        "sudo -iu alice ${bus} anbox wait-ready"
+    )
+
+    machine.wait_until_succeeds("adb shell true")
+
+    print(machine.succeed("adb devices"))
+  '';
+}
diff --git a/nixpkgs/nixos/tests/anuko-time-tracker.nix b/nixpkgs/nixos/tests/anuko-time-tracker.nix
new file mode 100644
index 000000000000..18c3bf5cf695
--- /dev/null
+++ b/nixpkgs/nixos/tests/anuko-time-tracker.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "anuko-time-tracker";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ michaelshmitty ];
+  };
+  nodes = {
+    machine = {
+      services.anuko-time-tracker.enable = true;
+    };
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("phpfpm-anuko-time-tracker")
+    machine.wait_for_open_port(80);
+    machine.wait_until_succeeds("curl -s --fail -L http://localhost/time.php | grep 'Anuko Time Tracker'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/apache_datasketches.nix b/nixpkgs/nixos/tests/apache_datasketches.nix
new file mode 100644
index 000000000000..2bf099ac7991
--- /dev/null
+++ b/nixpkgs/nixos/tests/apache_datasketches.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "postgis";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lsix ]; # TODO: Who's the maintener now?
+  };
+
+  nodes = {
+    master =
+      { pkgs, ... }:
+
+      {
+        services.postgresql = let mypg = pkgs.postgresql_15; in {
+            enable = true;
+            package = mypg;
+            extraPlugins = with mypg.pkgs; [
+              apache_datasketches
+            ];
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.sleep(10)  # Hopefully this is long enough!!
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION datasketches;'")
+    master.succeed("sudo -u postgres psql -c 'SELECT hll_sketch_to_string(hll_sketch_build(1));'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/apcupsd.nix b/nixpkgs/nixos/tests/apcupsd.nix
new file mode 100644
index 000000000000..287140f039d8
--- /dev/null
+++ b/nixpkgs/nixos/tests/apcupsd.nix
@@ -0,0 +1,41 @@
+let
+  # arbitrary address
+  ipAddr = "192.168.42.42";
+in
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "apcupsd";
+  meta.maintainers = with lib.maintainers; [ bjornfor ];
+
+  nodes = {
+    machine = {
+      services.apcupsd = {
+        enable = true;
+        configText = ''
+          UPSTYPE usb
+          BATTERYLEVEL 42
+          # Configure NISIP so that the only way apcaccess can work is to read
+          # this config.
+          NISIP ${ipAddr}
+        '';
+      };
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [{
+          address = ipAddr;
+          prefixLength = 24;
+        }];
+      };
+    };
+  };
+
+  # Check that the service starts, that the CLI (apcaccess) works and that it
+  # uses the config (ipAddr) defined in the service config.
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("apcupsd.service")
+    machine.wait_for_open_port(3551, "${ipAddr}")
+    res = machine.succeed("apcaccess")
+    expect_line="MBATTCHG : 42 Percent"
+    assert "MBATTCHG : 42 Percent" in res, f"expected apcaccess output to contain '{expect_line}' but got '{res}'"
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/apfs.nix b/nixpkgs/nixos/tests/apfs.nix
new file mode 100644
index 000000000000..15ed5aa7f573
--- /dev/null
+++ b/nixpkgs/nixos/tests/apfs.nix
@@ -0,0 +1,65 @@
+{ lib, ... }: {
+  name = "apfs";
+  meta.maintainers = with lib.maintainers; [ Luflosi ];
+
+  nodes.machine = {
+    virtualisation.emptyDiskImages = [ 1024 ];
+
+    boot.supportedFilesystems = [ "apfs" ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("basic.target")
+    machine.succeed("mkdir /tmp/mnt")
+
+    with subtest("mkapfs refuses to work with a label that is too long"):
+      machine.fail( "mkapfs -L '000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7F' /dev/vdb")
+
+    with subtest("mkapfs works with the maximum label length"):
+      machine.succeed("mkapfs -L '000102030405060708090A0B0C0D0E0F101112131415161718191A1B1C1D1E1F202122232425262728292A2B2C2D2E2F303132333435363738393A3B3C3D3E3F404142434445464748494A4B4C4D4E4F505152535455565758595A5B5C5D5E5F606162636465666768696A6B6C6D6E6F707172737475767778797A7B7C7D7E7' /dev/vdb")
+
+    with subtest("Enable case sensitivity and normalization sensitivity"):
+      machine.succeed(
+          "mkapfs -s -z /dev/vdb",
+          "mount -o cknodes,readwrite /dev/vdb /tmp/mnt",
+          "echo 'Hello World 1' > /tmp/mnt/test.txt",
+          "[ ! -f /tmp/mnt/TeSt.TxT ] || false", # Test case sensitivity
+          "echo 'Hello World 1' | diff - /tmp/mnt/test.txt",
+          "echo 'Hello World 2' > /tmp/mnt/\u0061\u0301.txt",
+          "echo 'Hello World 2' | diff - /tmp/mnt/\u0061\u0301.txt",
+          "[ ! -f /tmp/mnt/\u00e1.txt ] || false", # Test Unicode normalization sensitivity
+          "umount /tmp/mnt",
+          "apfsck /dev/vdb",
+      )
+    with subtest("Disable case sensitivity and normalization sensitivity"):
+      machine.succeed(
+          "mkapfs /dev/vdb",
+          "mount -o cknodes,readwrite /dev/vdb /tmp/mnt",
+          "echo 'bla bla bla' > /tmp/mnt/Test.txt",
+          "echo -n 'Hello World' > /tmp/mnt/test.txt",
+          "echo ' 1' >> /tmp/mnt/TEST.TXT",
+          "umount /tmp/mnt",
+          "apfsck /dev/vdb",
+          "mount -o cknodes,readwrite /dev/vdb /tmp/mnt",
+          "echo 'Hello World 1' | diff - /tmp/mnt/TeSt.TxT", # Test case insensitivity
+          "echo 'Hello World 2' > /tmp/mnt/\u0061\u0301.txt",
+          "echo 'Hello World 2' | diff - /tmp/mnt/\u0061\u0301.txt",
+          "echo 'Hello World 2' | diff - /tmp/mnt/\u00e1.txt", # Test Unicode normalization
+          "umount /tmp/mnt",
+          "apfsck /dev/vdb",
+      )
+    with subtest("Snapshots"):
+      machine.succeed(
+          "mkapfs /dev/vdb",
+          "mount -o cknodes,readwrite /dev/vdb /tmp/mnt",
+          "echo 'Hello World' > /tmp/mnt/test.txt",
+          "apfs-snap /tmp/mnt snap-1",
+          "rm /tmp/mnt/test.txt",
+          "umount /tmp/mnt",
+          "mount -o cknodes,readwrite,snap=snap-1 /dev/vdb /tmp/mnt",
+          "echo 'Hello World' | diff - /tmp/mnt/test.txt",
+          "umount /tmp/mnt",
+          "apfsck /dev/vdb",
+      )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/apparmor.nix b/nixpkgs/nixos/tests/apparmor.nix
new file mode 100644
index 000000000000..be91e9632849
--- /dev/null
+++ b/nixpkgs/nixos/tests/apparmor.nix
@@ -0,0 +1,85 @@
+import ./make-test-python.nix ({ pkgs, lib, ... } : {
+  name = "apparmor";
+  meta.maintainers = with lib.maintainers; [ julm ];
+
+  nodes.machine =
+    { lib, pkgs, config, ... }:
+    {
+      security.apparmor.enable = lib.mkDefault true;
+    };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("AppArmor profiles are loaded"):
+          machine.succeed("systemctl status apparmor.service")
+
+      # AppArmor securityfs
+      with subtest("AppArmor securityfs is mounted"):
+          machine.succeed("mountpoint -q /sys/kernel/security")
+          machine.succeed("cat /sys/kernel/security/apparmor/profiles")
+
+      # Test apparmorRulesFromClosure by:
+      # 1. Prepending a string of the relevant packages' name and version on each line.
+      # 2. Sorting according to those strings.
+      # 3. Removing those prepended strings.
+      # 4. Using `diff` against the expected output.
+      with subtest("apparmorRulesFromClosure"):
+          machine.succeed(
+              "${pkgs.diffutils}/bin/diff -u ${pkgs.writeText "expected.rules" ''
+                  mr ${pkgs.bash}/lib/**.so*,
+                  r ${pkgs.bash},
+                  r ${pkgs.bash}/etc/**,
+                  r ${pkgs.bash}/lib/**,
+                  r ${pkgs.bash}/share/**,
+                  x ${pkgs.bash}/foo/**,
+                  mr ${pkgs.glibc}/lib/**.so*,
+                  r ${pkgs.glibc},
+                  r ${pkgs.glibc}/etc/**,
+                  r ${pkgs.glibc}/lib/**,
+                  r ${pkgs.glibc}/share/**,
+                  x ${pkgs.glibc}/foo/**,
+                  mr ${pkgs.libcap}/lib/**.so*,
+                  r ${pkgs.libcap},
+                  r ${pkgs.libcap}/etc/**,
+                  r ${pkgs.libcap}/lib/**,
+                  r ${pkgs.libcap}/share/**,
+                  x ${pkgs.libcap}/foo/**,
+                  mr ${pkgs.libcap.lib}/lib/**.so*,
+                  r ${pkgs.libcap.lib},
+                  r ${pkgs.libcap.lib}/etc/**,
+                  r ${pkgs.libcap.lib}/lib/**,
+                  r ${pkgs.libcap.lib}/share/**,
+                  x ${pkgs.libcap.lib}/foo/**,
+                  mr ${pkgs.libidn2.out}/lib/**.so*,
+                  r ${pkgs.libidn2.out},
+                  r ${pkgs.libidn2.out}/etc/**,
+                  r ${pkgs.libidn2.out}/lib/**,
+                  r ${pkgs.libidn2.out}/share/**,
+                  x ${pkgs.libidn2.out}/foo/**,
+                  mr ${pkgs.libunistring}/lib/**.so*,
+                  r ${pkgs.libunistring},
+                  r ${pkgs.libunistring}/etc/**,
+                  r ${pkgs.libunistring}/lib/**,
+                  r ${pkgs.libunistring}/share/**,
+                  x ${pkgs.libunistring}/foo/**,
+                  mr ${pkgs.glibc.libgcc}/lib/**.so*,
+                  r ${pkgs.glibc.libgcc},
+                  r ${pkgs.glibc.libgcc}/etc/**,
+                  r ${pkgs.glibc.libgcc}/lib/**,
+                  r ${pkgs.glibc.libgcc}/share/**,
+                  x ${pkgs.glibc.libgcc}/foo/**,
+              ''} ${pkgs.runCommand "actual.rules" { preferLocalBuild = true; } ''
+                  ${pkgs.gnused}/bin/sed -e 's:^[^ ]* ${builtins.storeDir}/[^,/-]*-\([^/,]*\):\1 \0:' ${
+                      pkgs.apparmorRulesFromClosure {
+                        name = "ping";
+                        additionalRules = ["x $path/foo/**"];
+                      } [ pkgs.libcap ]
+                  } |
+                  ${pkgs.coreutils}/bin/sort -n -k1 |
+                  ${pkgs.gnused}/bin/sed -e 's:^[^ ]* ::' >$out
+              ''}"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/appliance-repart-image.nix b/nixpkgs/nixos/tests/appliance-repart-image.nix
new file mode 100644
index 000000000000..3f256db84621
--- /dev/null
+++ b/nixpkgs/nixos/tests/appliance-repart-image.nix
@@ -0,0 +1,116 @@
+# Tests building and running a GUID Partition Table (GPT) appliance image.
+# "Appliance" here means that the image does not contain the normal NixOS
+# infrastructure of a system profile and cannot be re-built via
+# `nixos-rebuild`.
+
+{ lib, ... }:
+
+let
+  rootPartitionLabel = "root";
+
+  bootLoaderConfigPath = "/loader/entries/nixos.conf";
+  kernelPath = "/EFI/nixos/kernel.efi";
+  initrdPath = "/EFI/nixos/initrd.efi";
+in
+{
+  name = "appliance-gpt-image";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+
+    imports = [ ../modules/image/repart.nix ];
+
+    virtualisation.directBoot.enable = false;
+    virtualisation.mountHostNixStore = false;
+    virtualisation.useEFIBoot = true;
+
+    # Disable boot loaders because we install one "manually".
+    # TODO(raitobezarius): revisit this when #244907 lands
+    boot.loader.grub.enable = false;
+
+    virtualisation.fileSystems = lib.mkForce {
+      "/" = {
+        device = "/dev/disk/by-partlabel/${rootPartitionLabel}";
+        fsType = "ext4";
+      };
+    };
+
+    image.repart = {
+      name = "appliance-gpt-image";
+      partitions = {
+        "esp" = {
+          contents =
+            let
+              efiArch = config.nixpkgs.hostPlatform.efiArch;
+            in
+            {
+              "/EFI/BOOT/BOOT${lib.toUpper efiArch}.EFI".source =
+                "${pkgs.systemd}/lib/systemd/boot/efi/systemd-boot${efiArch}.efi";
+
+              # TODO: create an abstraction for Boot Loader Specification (BLS) entries.
+              "${bootLoaderConfigPath}".source = pkgs.writeText "nixos.conf" ''
+                title NixOS
+                linux ${kernelPath}
+                initrd ${initrdPath}
+                options init=${config.system.build.toplevel}/init ${toString config.boot.kernelParams}
+              '';
+
+              "${kernelPath}".source =
+                "${config.boot.kernelPackages.kernel}/${config.system.boot.loader.kernelFile}";
+
+              "${initrdPath}".source =
+                "${config.system.build.initialRamdisk}/${config.system.boot.loader.initrdFile}";
+            };
+          repartConfig = {
+            Type = "esp";
+            Format = "vfat";
+            # Minimize = "guess" seems to not work very vell for vfat
+            # partitons. It's better to set a sensible default instead. The
+            # aarch64 kernel seems to generally be a little bigger than the
+            # x86_64 kernel. To stay on the safe side, leave some more slack
+            # for every platform other than x86_64.
+            SizeMinBytes = if config.nixpkgs.hostPlatform.isx86_64 then "64M" else "96M";
+          };
+        };
+        "root" = {
+          storePaths = [ config.system.build.toplevel ];
+          repartConfig = {
+            Type = "root";
+            Format = config.fileSystems."/".fsType;
+            Label = rootPartitionLabel;
+            Minimize = "guess";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    import os
+    import subprocess
+    import tempfile
+
+    tmp_disk_image = tempfile.NamedTemporaryFile()
+
+    subprocess.run([
+      "${nodes.machine.virtualisation.qemu.package}/bin/qemu-img",
+      "create",
+      "-f",
+      "qcow2",
+      "-b",
+      "${nodes.machine.system.build.image}/image.raw",
+      "-F",
+      "raw",
+      tmp_disk_image.name,
+    ])
+
+    # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
+    os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
+
+    bootctl_status = machine.succeed("bootctl status")
+    assert "${bootLoaderConfigPath}" in bootctl_status
+    assert "${kernelPath}" in bootctl_status
+    assert "${initrdPath}" in bootctl_status
+  '';
+}
diff --git a/nixpkgs/nixos/tests/archi.nix b/nixpkgs/nixos/tests/archi.nix
new file mode 100644
index 000000000000..59f2e940c005
--- /dev/null
+++ b/nixpkgs/nixos/tests/archi.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "archi";
+  meta.maintainers = with lib.maintainers; [ paumr ];
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    environment.systemPackages = with pkgs; [ archi ];
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+
+    with subtest("createEmptyModel via CLI"):
+         machine.succeed("Archi -application com.archimatetool.commandline.app -consoleLog -nosplash --createEmptyModel --saveModel smoke.archimate")
+         machine.copy_from_vm("smoke.archimate", "")
+
+    with subtest("UI smoketest"):
+         machine.succeed("DISPLAY=:0 Archi --createEmptyModel >&2 &")
+         machine.wait_for_window("Archi")
+
+         # wait till main UI is open
+         machine.wait_for_text("Welcome to Archi")
+
+         machine.screenshot("welcome-screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/atd.nix b/nixpkgs/nixos/tests/atd.nix
new file mode 100644
index 000000000000..4342e9d7dc18
--- /dev/null
+++ b/nixpkgs/nixos/tests/atd.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "atd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bjornfor ];
+  };
+
+  nodes.machine =
+    { ... }:
+    { services.atd.enable = true;
+      users.users.alice = { isNormalUser = true; };
+    };
+
+  # "at" has a resolution of 1 minute
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("atd.service")  # wait for atd to start
+    machine.fail("test -f ~root/at-1")
+    machine.fail("test -f ~alice/at-1")
+
+    machine.succeed("echo 'touch ~root/at-1' | at now+1min")
+    machine.succeed("su - alice -c \"echo 'touch at-1' | at now+1min\"")
+
+    machine.succeed("sleep 1.5m")
+
+    machine.succeed("test -f ~root/at-1")
+    machine.succeed("test -f ~alice/at-1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/atop.nix b/nixpkgs/nixos/tests/atop.nix
new file mode 100644
index 000000000000..f9335eecc20e
--- /dev/null
+++ b/nixpkgs/nixos/tests/atop.nix
@@ -0,0 +1,226 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let assertions = rec {
+  path = program: path: ''
+    with subtest("The path of ${program} should be ${path}"):
+        p = machine.succeed("type -p \"${program}\" | head -c -1")
+        assert p == "${path}", f"${program} is {p}, expected ${path}"
+  '';
+  unit = name: state: ''
+    with subtest("Unit ${name} should be ${state}"):
+        if "${state}" == "active":
+            machine.wait_for_unit("${name}")
+        else:
+            machine.require_unit_state("${name}", "${state}")
+  '';
+  version = ''
+    import re
+
+    with subtest("binary should report the correct version"):
+        pkgver = "${pkgs.atop.version}"
+        ver = re.sub(r'(?s)^Version: (\d\.\d\.\d).*', r'\1', machine.succeed("atop -V"))
+        assert ver == pkgver, f"Version is `{ver}`, expected `{pkgver}`"
+  '';
+  atoprc = contents:
+    if builtins.stringLength contents > 0 then ''
+      with subtest("/etc/atoprc should have the correct contents"):
+          f = machine.succeed("cat /etc/atoprc")
+          assert f == "${contents}", f"/etc/atoprc contents: '{f}', expected '${contents}'"
+    '' else ''
+      with subtest("/etc/atoprc should not be present"):
+          machine.succeed("test ! -e /etc/atoprc")
+    '';
+  wrapper = present:
+    if present then path "atop" "/run/wrappers/bin/atop" + ''
+      with subtest("Wrapper should be setuid root"):
+          stat = machine.succeed("stat --printf '%a %u' /run/wrappers/bin/atop")
+          assert stat == "4511 0", f"Wrapper stat is {stat}, expected '4511 0'"
+    ''
+    else path "atop" "/run/current-system/sw/bin/atop";
+  atopService = present:
+    if present then
+      unit "atop.service" "active"
+      + ''
+        with subtest("atop.service should write some data to /var/log/atop"):
+
+            def has_data_files(last: bool) -> bool:
+                files = int(machine.succeed("ls -1 /var/log/atop | wc -l"))
+                if files == 0:
+                    machine.log("Did not find at least one 1 data file")
+                    if not last:
+                        machine.log("Will retry...")
+                    return False
+                return True
+
+            with machine.nested("Waiting for data files"):
+                retry(has_data_files)
+      '' else unit "atop.service" "inactive";
+  atopRotateTimer = present:
+    unit "atop-rotate.timer" (if present then "active" else "inactive");
+  atopacctService = present:
+    if present then
+      unit "atopacct.service" "active"
+      + ''
+        with subtest("atopacct.service should enable process accounting"):
+            machine.wait_until_succeeds("test -f /run/pacct_source")
+
+        with subtest("atopacct.service should write data to /run/pacct_shadow.d"):
+
+            def has_data_files(last: bool) -> bool:
+                files = int(machine.succeed("ls -1 /run/pacct_shadow.d | wc -l"))
+                if files == 0:
+                    machine.log("Did not find at least one 1 data file")
+                    if not last:
+                        machine.log("Will retry...")
+                    return False
+                return True
+
+            with machine.nested("Waiting for data files"):
+                retry(has_data_files)
+      '' else unit "atopacct.service" "inactive";
+  netatop = present:
+    if present then
+      unit "netatop.service" "active"
+      + ''
+        with subtest("The netatop kernel module should be loaded"):
+            out = machine.succeed("modprobe -n -v netatop")
+            assert out == "", f"Module should be loaded already, but modprobe would have done {out}."
+      '' else ''
+      with subtest("The netatop kernel module should be absent"):
+          machine.fail("modprobe -n -v netatop")
+    '';
+  atopgpu = present:
+    if present then
+      (unit "atopgpu.service" "active") + (path "atopgpud" "/run/current-system/sw/bin/atopgpud")
+    else (unit "atopgpu.service" "inactive") + ''
+      with subtest("atopgpud should not be present"):
+          machine.fail("type -p atopgpud")
+    '';
+};
+in
+{
+  justThePackage = makeTest {
+    name = "atop-justThePackage";
+    nodes.machine = {
+      environment.systemPackages = [ pkgs.atop ];
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "")
+      (wrapper false)
+      (atopService false)
+      (atopRotateTimer false)
+      (atopacctService false)
+      (netatop false)
+      (atopgpu false)
+    ];
+  };
+  defaults = makeTest {
+    name = "atop-defaults";
+    nodes.machine = {
+      programs.atop = {
+        enable = true;
+      };
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "")
+      (wrapper false)
+      (atopService true)
+      (atopRotateTimer true)
+      (atopacctService true)
+      (netatop false)
+      (atopgpu false)
+    ];
+  };
+  minimal = makeTest {
+    name = "atop-minimal";
+    nodes.machine = {
+      programs.atop = {
+        enable = true;
+        atopService.enable = false;
+        atopRotateTimer.enable = false;
+        atopacctService.enable = false;
+      };
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "")
+      (wrapper false)
+      (atopService false)
+      (atopRotateTimer false)
+      (atopacctService false)
+      (netatop false)
+      (atopgpu false)
+    ];
+  };
+  netatop = makeTest {
+    name = "atop-netatop";
+    nodes.machine = {
+      programs.atop = {
+        enable = true;
+        netatop.enable = true;
+      };
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "")
+      (wrapper false)
+      (atopService true)
+      (atopRotateTimer true)
+      (atopacctService true)
+      (netatop true)
+      (atopgpu false)
+    ];
+  };
+  atopgpu = makeTest {
+    name = "atop-atopgpu";
+    nodes.machine = {
+      programs.atop = {
+        enable = true;
+        atopgpu.enable = true;
+      };
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "")
+      (wrapper false)
+      (atopService true)
+      (atopRotateTimer true)
+      (atopacctService true)
+      (netatop false)
+      (atopgpu true)
+    ];
+  };
+  everything = makeTest {
+    name = "atop-everything";
+    nodes.machine = {
+      programs.atop = {
+        enable = true;
+        settings = {
+          flags = "faf1";
+          interval = 2;
+        };
+        setuidWrapper.enable = true;
+        netatop.enable = true;
+        atopgpu.enable = true;
+      };
+    };
+    testScript = with assertions; builtins.concatStringsSep "\n" [
+      version
+      (atoprc "flags faf1\\ninterval 2\\n")
+      (wrapper true)
+      (atopService true)
+      (atopRotateTimer true)
+      (atopacctService true)
+      (netatop true)
+      (atopgpu true)
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/atuin.nix b/nixpkgs/nixos/tests/atuin.nix
new file mode 100644
index 000000000000..3164c83c683d
--- /dev/null
+++ b/nixpkgs/nixos/tests/atuin.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  testPort = 8888;
+  testUser = "testerman";
+  testPass = "password";
+  testEmail = "test.testerman@test.com";
+in
+{
+  name = "atuin";
+  meta.maintainers = with lib.maintainers; [ devusb ];
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        services.postgresql.enable = true;
+
+        services.atuin = {
+          enable = true;
+          port = testPort;
+          host = "0.0.0.0";
+          openFirewall = true;
+          openRegistration = true;
+        };
+      };
+
+    client =
+      { ... }:
+      { };
+
+  };
+
+  testScript = with pkgs; ''
+    start_all()
+
+    # wait for atuin server startup
+    server.wait_for_unit("atuin.service")
+    server.wait_for_open_port(${toString testPort})
+
+    # configure atuin client on server node
+    server.execute("mkdir -p ~/.config/atuin")
+    server.execute("echo 'sync_address = \"http://localhost:${toString testPort}\"' > ~/.config/atuin/config.toml")
+
+    # register with atuin server on server node
+    server.succeed("${atuin}/bin/atuin register -u ${testUser} -p ${testPass} -e ${testEmail}")
+    _, key = server.execute("${atuin}/bin/atuin key")
+
+    # store test record in atuin server and sync
+    server.succeed("ATUIN_SESSION=$(${atuin}/bin/atuin uuid) ${atuin}/bin/atuin history start 'shazbot'")
+    server.succeed("${atuin}/bin/atuin sync")
+
+    # configure atuin client on client node
+    client.execute("mkdir -p ~/.config/atuin")
+    client.execute("echo 'sync_address = \"http://server:${toString testPort}\"' > ~/.config/atuin/config.toml")
+
+    # log in to atuin server on client node
+    client.succeed(f"${atuin}/bin/atuin login -u ${testUser} -p ${testPass} -k \"{key}\"")
+
+    # pull records from atuin server
+    client.succeed("${atuin}/bin/atuin sync -f")
+
+    # check for test record
+    client.succeed("ATUIN_SESSION=$(${atuin}/bin/atuin uuid) ${atuin}/bin/atuin history list | grep shazbot")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/audiobookshelf.nix b/nixpkgs/nixos/tests/audiobookshelf.nix
new file mode 100644
index 000000000000..64bd415160ee
--- /dev/null
+++ b/nixpkgs/nixos/tests/audiobookshelf.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+with lib;
+
+{
+  name = "audiobookshelf";
+  meta.maintainers = with maintainers; [ wietsedv ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.audiobookshelf = {
+        enable = true;
+        port = 1234;
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("audiobookshelf.service")
+    machine.wait_for_open_port(1234)
+    machine.succeed("curl --fail http://localhost:1234/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/auth-mysql.nix b/nixpkgs/nixos/tests/auth-mysql.nix
new file mode 100644
index 000000000000..0ed4b050a69a
--- /dev/null
+++ b/nixpkgs/nixos/tests/auth-mysql.nix
@@ -0,0 +1,177 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  dbUser = "nixos_auth";
+  dbPassword = "topsecret123";
+  dbName = "auth";
+
+  mysqlUsername = "mysqltest";
+  mysqlPassword = "topsecretmysqluserpassword123";
+  mysqlGroup = "mysqlusers";
+
+  localUsername = "localtest";
+  localPassword = "topsecretlocaluserpassword123";
+
+  mysqlInit = pkgs.writeText "mysqlInit" ''
+      CREATE USER '${dbUser}'@'localhost' IDENTIFIED BY '${dbPassword}';
+      CREATE DATABASE ${dbName};
+      GRANT ALL PRIVILEGES ON ${dbName}.* TO '${dbUser}'@'localhost';
+      FLUSH PRIVILEGES;
+
+      USE ${dbName};
+      CREATE TABLE `groups` (
+        rowid int(11) NOT NULL auto_increment,
+        gid int(11) NOT NULL,
+        name char(255) NOT NULL,
+        PRIMARY KEY (rowid)
+      );
+
+      CREATE TABLE `users` (
+        name varchar(255) NOT NULL,
+        uid int(11) NOT NULL auto_increment,
+        gid int(11) NOT NULL,
+        password varchar(255) NOT NULL,
+        PRIMARY KEY (uid),
+        UNIQUE (name)
+      ) AUTO_INCREMENT=5000;
+
+      INSERT INTO `users` (name, uid, gid, password) VALUES
+      ('${mysqlUsername}', 5000, 5000, SHA2('${mysqlPassword}', 256));
+      INSERT INTO `groups` (name, gid) VALUES ('${mysqlGroup}', 5000);
+    '';
+in
+{
+  name = "auth-mysql";
+  meta.maintainers = with lib.maintainers; [ netali ];
+
+  nodes.machine =
+    { ... }:
+    {
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        settings.mysqld.bind-address = "127.0.0.1";
+        initialScript = mysqlInit;
+      };
+
+      users.users.${localUsername} = {
+        isNormalUser = true;
+        password = localPassword;
+      };
+
+      security.pam.services.login.makeHomeDir = true;
+
+      users.mysql = {
+        enable = true;
+        host = "127.0.0.1";
+        user = dbUser;
+        database = dbName;
+        passwordFile = "${builtins.toFile "dbPassword" dbPassword}";
+        pam = {
+          table = "users";
+          userColumn = "name";
+          passwordColumn = "password";
+          passwordCrypt = "sha256";
+          disconnectEveryOperation = true;
+        };
+        nss = {
+          getpwnam = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users \
+            WHERE name='%1$s' \
+            LIMIT 1
+          '';
+          getpwuid = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users \
+            WHERE id=%1$u \
+            LIMIT 1
+          '';
+          getspnam = ''
+            SELECT name, password, 1, 0, 99999, 7, 0, -1, 0 \
+            FROM users \
+            WHERE name='%1$s' \
+            LIMIT 1
+          '';
+          getpwent = ''
+            SELECT name, 'x', uid, gid, name, CONCAT('/home/', name), "/run/current-system/sw/bin/bash" \
+            FROM users
+          '';
+          getspent = ''
+            SELECT name, password, 1, 0, 99999, 7, 0, -1, 0 \
+            FROM users
+          '';
+          getgrnam = ''
+            SELECT name, 'x', gid FROM groups WHERE name='%1$s' LIMIT 1
+          '';
+          getgrgid = ''
+            SELECT name, 'x', gid FROM groups WHERE gid='%1$u' LIMIT 1
+          '';
+          getgrent = ''
+            SELECT name, 'x', gid FROM groups
+          '';
+          memsbygid = ''
+            SELECT name FROM users WHERE gid=%1$u
+          '';
+          gidsbymem = ''
+            SELECT gid FROM users WHERE name='%1$s'
+          '';
+        };
+      };
+    };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+        machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+        machine.send_key(f"alt-f{tty_number}")
+        machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+        machine.wait_for_unit(f"getty@tty{tty_number}.service")
+        machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    def try_login(tty_number, username, password):
+        machine.wait_until_tty_matches(tty_number, "login: ")
+        machine.send_chars(f"{username}\n")
+        machine.wait_until_tty_matches(tty_number, f"login: {username}")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches(tty_number, "Password: ")
+        machine.send_chars(f"{password}\n")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("mysql.service")
+    machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+    with subtest("Local login"):
+        switch_to_tty("2")
+        try_login("2", "${localUsername}", "${localPassword}")
+
+        machine.wait_until_succeeds("pgrep -u ${localUsername} bash")
+        machine.send_chars("id > local_id.txt\n")
+        machine.wait_for_file("/home/${localUsername}/local_id.txt")
+        machine.succeed("cat /home/${localUsername}/local_id.txt | grep 'uid=1000(${localUsername}) gid=100(users) groups=100(users)'")
+
+    with subtest("Local incorrect login"):
+        switch_to_tty("3")
+        try_login("3", "${localUsername}", "wrongpassword")
+
+        machine.wait_until_tty_matches("3", "Login incorrect")
+        machine.wait_until_tty_matches("3", "login:")
+
+    with subtest("MySQL login"):
+        switch_to_tty("4")
+        try_login("4", "${mysqlUsername}", "${mysqlPassword}")
+
+        machine.wait_until_succeeds("pgrep -u ${mysqlUsername} bash")
+        machine.send_chars("id > mysql_id.txt\n")
+        machine.wait_for_file("/home/${mysqlUsername}/mysql_id.txt")
+        machine.succeed("cat /home/${mysqlUsername}/mysql_id.txt | grep 'uid=5000(${mysqlUsername}) gid=5000(${mysqlGroup}) groups=5000(${mysqlGroup})'")
+
+    with subtest("MySQL incorrect login"):
+        switch_to_tty("5")
+        try_login("5", "${mysqlUsername}", "wrongpassword")
+
+        machine.wait_until_tty_matches("5", "Login incorrect")
+        machine.wait_until_tty_matches("5", "login:")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/authelia.nix b/nixpkgs/nixos/tests/authelia.nix
new file mode 100644
index 000000000000..679c65fea087
--- /dev/null
+++ b/nixpkgs/nixos/tests/authelia.nix
@@ -0,0 +1,169 @@
+# Test Authelia as an auth server for Traefik as a reverse proxy of a local web service
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "authelia";
+  meta.maintainers = with lib.maintainers; [ jk ];
+
+  nodes = {
+    authelia = { config, pkgs, lib, ... }: {
+      services.authelia.instances.testing = {
+        enable = true;
+        secrets.storageEncryptionKeyFile = "/etc/authelia/storageEncryptionKeyFile";
+        secrets.jwtSecretFile = "/etc/authelia/jwtSecretFile";
+        settings = {
+          authentication_backend.file.path = "/etc/authelia/users_database.yml";
+          access_control.default_policy = "one_factor";
+          session.domain = "example.com";
+          storage.local.path = "/tmp/db.sqlite3";
+          notifier.filesystem.filename = "/tmp/notifications.txt";
+        };
+      };
+
+      # These should not be set from nix but through other means to not leak the secret!
+      # This is purely for testing purposes!
+      environment.etc."authelia/storageEncryptionKeyFile" = {
+        mode = "0400";
+        user = "authelia-testing";
+        text = "you_must_generate_a_random_string_of_more_than_twenty_chars_and_configure_this";
+      };
+      environment.etc."authelia/jwtSecretFile" = {
+        mode = "0400";
+        user = "authelia-testing";
+        text = "a_very_important_secret";
+      };
+      environment.etc."authelia/users_database.yml" = {
+        mode = "0400";
+        user = "authelia-testing";
+        text = ''
+          users:
+            bob:
+              disabled: false
+              displayname: bob
+              # password of password
+              password: $argon2id$v=19$m=65536,t=3,p=4$2ohUAfh9yetl+utr4tLcCQ$AsXx0VlwjvNnCsa70u4HKZvFkC8Gwajr2pHGKcND/xs
+              email: bob@jim.com
+              groups:
+                - admin
+                - dev
+        '';
+      };
+
+      services.traefik = {
+        enable = true;
+
+        dynamicConfigOptions = {
+          tls.certificates =
+            let
+              certDir = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+                openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=example.com/CN=auth.example.com/CN=static.example.com' -days 36500
+                mkdir -p $out
+                cp key.pem cert.pem $out
+              '';
+            in
+            [{
+              certFile = "${certDir}/cert.pem";
+              keyFile = "${certDir}/key.pem";
+            }];
+          http.middlewares.authelia.forwardAuth = {
+            address = "http://localhost:9091/api/verify?rd=https%3A%2F%2Fauth.example.com%2F";
+            trustForwardHeader = true;
+            authResponseHeaders = [
+              "Remote-User"
+              "Remote-Groups"
+              "Remote-Email"
+              "Remote-Name"
+            ];
+          };
+          http.middlewares.authelia-basic.forwardAuth = {
+            address = "http://localhost:9091/api/verify?auth=basic";
+            trustForwardHeader = true;
+            authResponseHeaders = [
+              "Remote-User"
+              "Remote-Groups"
+              "Remote-Email"
+              "Remote-Name"
+            ];
+          };
+
+          http.routers.simplehttp = {
+            rule = "Host(`static.example.com`)";
+            tls = true;
+            entryPoints = "web";
+            service = "simplehttp";
+          };
+          http.routers.simplehttp-basic-auth = {
+            rule = "Host(`static-basic-auth.example.com`)";
+            tls = true;
+            entryPoints = "web";
+            service = "simplehttp";
+            middlewares = [ "authelia-basic@file" ];
+          };
+
+          http.services.simplehttp = {
+            loadBalancer.servers = [{
+              url = "http://localhost:8000";
+            }];
+          };
+
+          http.routers.authelia = {
+            rule = "Host(`auth.example.com`)";
+            tls = true;
+            entryPoints = "web";
+            service = "authelia@file";
+          };
+
+          http.services.authelia = {
+            loadBalancer.servers = [{
+              url = "http://localhost:9091";
+            }];
+          };
+        };
+
+        staticConfigOptions = {
+          global = {
+            checkNewVersion = false;
+            sendAnonymousUsage = false;
+          };
+
+          entryPoints.web.address = ":443";
+        };
+      };
+
+      systemd.services.simplehttp =
+        let fakeWebPageDir = pkgs.writeTextDir "index.html" "hello"; in
+        {
+          script = "${pkgs.python3}/bin/python -m http.server --directory ${fakeWebPageDir} 8000";
+          serviceConfig.Type = "simple";
+          wantedBy = [ "multi-user.target" ];
+        };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    authelia.wait_for_unit("simplehttp.service")
+    authelia.wait_for_unit("traefik.service")
+    authelia.wait_for_unit("authelia-testing.service")
+    authelia.wait_for_open_port(443)
+    authelia.wait_for_unit("multi-user.target")
+
+    with subtest("Check for authelia"):
+      # expect the login page
+      assert "Login - Authelia", "could not reach authelia" in \
+        authelia.succeed("curl --insecure -sSf -H Host:auth.example.com https://authelia:443/")
+
+    with subtest("Check contacting basic http server via traefik with https works"):
+      assert "hello", "could not reach raw static site" in \
+        authelia.succeed("curl --insecure -sSf -H Host:static.example.com https://authelia:443/")
+
+    with subtest("Test traefik and authelia"):
+      with subtest("No details fail"):
+        authelia.fail("curl --insecure -sSf -H Host:static-basic-auth.example.com https://authelia:443/")
+      with subtest("Incorrect details fail"):
+        authelia.fail("curl --insecure -sSf -u 'bob:wordpass' -H Host:static-basic-auth.example.com https://authelia:443/")
+        authelia.fail("curl --insecure -sSf -u 'alice:password' -H Host:static-basic-auth.example.com https://authelia:443/")
+      with subtest("Correct details pass"):
+        assert "hello", "could not reach authed static site with valid credentials" in \
+          authelia.succeed("curl --insecure -sSf -u 'bob:password' -H Host:static-basic-auth.example.com https://authelia:443/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/avahi.nix b/nixpkgs/nixos/tests/avahi.nix
new file mode 100644
index 000000000000..c53a95903291
--- /dev/null
+++ b/nixpkgs/nixos/tests/avahi.nix
@@ -0,0 +1,79 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+# bool: whether to use networkd in the tests
+, networkd ? false
+} @ args:
+
+# Test whether `avahi-daemon' and `libnss-mdns' work as expected.
+import ./make-test-python.nix {
+  name = "avahi";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes = let
+    cfg = { ... }: {
+      services.avahi = {
+        enable = true;
+        nssmdns = true;
+        publish.addresses = true;
+        publish.domain = true;
+        publish.enable = true;
+        publish.userServices = true;
+        publish.workstation = true;
+        extraServiceFiles.ssh = "${pkgs.avahi}/etc/avahi/services/ssh.service";
+      };
+    } // pkgs.lib.optionalAttrs (networkd) {
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+    };
+  in {
+    one = cfg;
+    two = cfg;
+  };
+
+  testScript = ''
+    start_all()
+
+    # mDNS.
+    one.wait_for_unit("network.target")
+    two.wait_for_unit("network.target")
+
+    one.succeed("avahi-resolve-host-name one.local | tee out >&2")
+    one.succeed('test "`cut -f1 < out`" = one.local')
+    one.succeed("avahi-resolve-host-name two.local | tee out >&2")
+    one.succeed('test "`cut -f1 < out`" = two.local')
+
+    two.succeed("avahi-resolve-host-name one.local | tee out >&2")
+    two.succeed('test "`cut -f1 < out`" = one.local')
+    two.succeed("avahi-resolve-host-name two.local | tee out >&2")
+    two.succeed('test "`cut -f1 < out`" = two.local')
+
+    # Basic DNS-SD.
+    one.succeed("avahi-browse -r -t _workstation._tcp | tee out >&2")
+    one.succeed("test `wc -l < out` -gt 0")
+    two.succeed("avahi-browse -r -t _workstation._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+
+    # More DNS-SD.
+    one.execute('avahi-publish -s "This is a test" _test._tcp 123 one=1 >&2 &')
+    one.sleep(5)
+    two.succeed("avahi-browse -r -t _test._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+
+    # NSS-mDNS.
+    one.succeed("getent hosts one.local >&2")
+    one.succeed("getent hosts two.local >&2")
+    two.succeed("getent hosts one.local >&2")
+    two.succeed("getent hosts two.local >&2")
+
+    # extra service definitions
+    one.succeed("avahi-browse -r -t _ssh._tcp | tee out >&2")
+    one.succeed("test `wc -l < out` -gt 0")
+    two.succeed("avahi-browse -r -t _ssh._tcp | tee out >&2")
+    two.succeed("test `wc -l < out` -gt 0")
+  '';
+} args
diff --git a/nixpkgs/nixos/tests/babeld.nix b/nixpkgs/nixos/tests/babeld.nix
new file mode 100644
index 000000000000..d4df6f86d089
--- /dev/null
+++ b/nixpkgs/nixos/tests/babeld.nix
@@ -0,0 +1,142 @@
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "babeld";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hexa ];
+  };
+
+  nodes =
+    { client = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 10 ];
+
+        networking = {
+          useDHCP = false;
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.10.2"; prefixLength = 24; } ];
+            ipv4.routes = lib.mkForce [ { address = "0.0.0.0"; prefixLength = 0; via = "192.168.10.1"; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:10::2"; prefixLength = 64; } ];
+            ipv6.routes = lib.mkForce [ { address = "::"; prefixLength = 0; via = "2001:db8:10::1"; } ];
+          };
+        };
+      };
+
+      local_router = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 10 20 ];
+
+        networking = {
+          useDHCP = false;
+          firewall.enable = false;
+
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.10.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:10::1"; prefixLength = 64; } ];
+          };
+
+          interfaces."eth2" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.20.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:20::1"; prefixLength = 64; } ];
+          };
+        };
+
+        services.babeld = {
+          enable = true;
+          interfaces.eth2 = {
+            hello-interval = 1;
+            type = "wired";
+          };
+          extraConfig = ''
+            local-port-readwrite 33123
+
+            import-table 254 # main
+            export-table 254 # main
+
+            in ip 192.168.10.0/24 deny
+            in ip 192.168.20.0/24 deny
+            in ip 2001:db8:10::/64 deny
+            in ip 2001:db8:20::/64 deny
+
+            in ip 192.168.30.0/24 allow
+            in ip 2001:db8:30::/64 allow
+
+            in deny
+
+            redistribute local proto 2
+            redistribute local deny
+          '';
+        };
+      };
+      remote_router = { pkgs, lib, ... }:
+      {
+        virtualisation.vlans = [ 20 30 ];
+
+        networking = {
+          useDHCP = false;
+          firewall.enable = false;
+
+          interfaces."eth1" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.20.2"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:20::2"; prefixLength = 64; } ];
+          };
+
+          interfaces."eth2" = {
+            ipv4.addresses = lib.mkForce [ { address = "192.168.30.1"; prefixLength = 24; } ];
+            ipv6.addresses = lib.mkForce [ { address = "2001:db8:30::1"; prefixLength = 64; } ];
+          };
+        };
+
+        services.babeld = {
+          enable = true;
+          interfaces.eth1 = {
+            hello-interval = 1;
+            type = "wired";
+          };
+          extraConfig = ''
+            local-port-readwrite 33123
+
+            import-table 254 # main
+            export-table 254 # main
+
+            in ip 192.168.20.0/24 deny
+            in ip 192.168.30.0/24 deny
+            in ip 2001:db8:20::/64 deny
+            in ip 2001:db8:30::/64 deny
+
+            in ip 192.168.10.0/24 allow
+            in ip 2001:db8:10::/64 allow
+
+            in deny
+
+            redistribute local proto 2
+            redistribute local deny
+          '';
+        };
+
+      };
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      client.wait_for_unit("network-online.target")
+      local_router.wait_for_unit("network-online.target")
+      remote_router.wait_for_unit("network-online.target")
+
+      local_router.wait_for_unit("babeld.service")
+      remote_router.wait_for_unit("babeld.service")
+
+      local_router.wait_until_succeeds("ip route get 192.168.30.1")
+      local_router.wait_until_succeeds("ip route get 2001:db8:30::1")
+
+      remote_router.wait_until_succeeds("ip route get 192.168.10.1")
+      remote_router.wait_until_succeeds("ip route get 2001:db8:10::1")
+
+      client.succeed("ping -c1 192.168.30.1")
+      client.succeed("ping -c1 2001:db8:30::1")
+
+      remote_router.succeed("ping -c1 192.168.10.2")
+      remote_router.succeed("ping -c1 2001:db8:10::2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/bazarr.nix b/nixpkgs/nixos/tests/bazarr.nix
new file mode 100644
index 000000000000..aa0550e243ae
--- /dev/null
+++ b/nixpkgs/nixos/tests/bazarr.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+let
+  port = 42069;
+in
+{
+  name = "bazarr";
+  meta.maintainers = with lib.maintainers; [ d-xo ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.bazarr = {
+        enable = true;
+        listenPort = port;
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("bazarr.service")
+    machine.wait_for_open_port(${toString port})
+    machine.succeed("curl --fail http://localhost:${toString port}/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bcachefs.nix b/nixpkgs/nixos/tests/bcachefs.nix
new file mode 100644
index 000000000000..ec3c2427f386
--- /dev/null
+++ b/nixpkgs/nixos/tests/bcachefs.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "bcachefs";
+  meta.maintainers = with pkgs.lib.maintainers; [ Madouura ];
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 4096 ];
+    networking.hostId = "deadbeef";
+    boot.supportedFilesystems = [ "bcachefs" ];
+    environment.systemPackages = with pkgs; [ parted keyutils ];
+  };
+
+  testScript = ''
+    machine.succeed("modprobe bcachefs")
+    machine.succeed("bcachefs version")
+    machine.succeed("ls /dev")
+
+    machine.succeed(
+        "mkdir /tmp/mnt",
+        "udevadm settle",
+        "parted --script /dev/vdb mklabel msdos",
+        "parted --script /dev/vdb -- mkpart primary 1024M 50% mkpart primary 50% -1s",
+        "udevadm settle",
+        "echo password | bcachefs format --encrypted --metadata_replicas 2 --label vtest /dev/vdb1 /dev/vdb2",
+        "echo password | bcachefs unlock -k session /dev/vdb1",
+        "echo password | mount -t bcachefs /dev/vdb1:/dev/vdb2 /tmp/mnt",
+        "udevadm settle",
+        "bcachefs fs usage /tmp/mnt",
+        "umount /tmp/mnt",
+        "udevadm settle",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/beanstalkd.nix b/nixpkgs/nixos/tests/beanstalkd.nix
new file mode 100644
index 000000000000..518f018408ad
--- /dev/null
+++ b/nixpkgs/nixos/tests/beanstalkd.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  pythonEnv = pkgs.python3.withPackages (p: [p.beanstalkc]);
+
+  produce = pkgs.writeScript "produce.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+    queue.put(b'this is a job')
+    queue.put(b'this is another job')
+  '';
+
+  consume = pkgs.writeScript "consume.py" ''
+    #!${pythonEnv.interpreter}
+    import beanstalkc
+
+    queue = beanstalkc.Connection(host='localhost', port=11300, parse_yaml=False);
+
+    job = queue.reserve(timeout=0)
+    print(job.body.decode('utf-8'))
+    job.delete()
+  '';
+
+in
+{
+  name = "beanstalkd";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  nodes.machine =
+    { ... }:
+    { services.beanstalkd.enable = true;
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("beanstalkd.service")
+
+    machine.succeed("${produce}")
+    assert "this is a job\n" == machine.succeed(
+        "${consume}"
+    )
+    assert "this is another job\n" == machine.succeed(
+        "${consume}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bees.nix b/nixpkgs/nixos/tests/bees.nix
new file mode 100644
index 000000000000..3ab9f38ada8f
--- /dev/null
+++ b/nixpkgs/nixos/tests/bees.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "bees";
+
+  nodes.machine = { config, pkgs, ... }: {
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux1 /dev/vdb
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux2 /dev/vdc
+    '';
+    virtualisation.emptyDiskImages = [ 4096 4096 ];
+    virtualisation.fileSystems = {
+      "/aux1" = { # filesystem configured to be deduplicated
+        device = "/dev/disk/by-label/aux1";
+        fsType = "btrfs";
+      };
+      "/aux2" = { # filesystem not configured to be deduplicated
+        device = "/dev/disk/by-label/aux2";
+        fsType = "btrfs";
+      };
+    };
+    services.beesd.filesystems = {
+      aux1 = {
+        spec = "LABEL=aux1";
+        hashTableSizeMB = 16;
+        verbosity = "debug";
+      };
+    };
+  };
+
+  testScript =
+  let
+    someContentIsShared = loc: pkgs.writeShellScript "some-content-is-shared" ''
+      [[ $(btrfs fi du -s --raw ${lib.escapeShellArg loc}/dedup-me-{1,2} | awk 'BEGIN { count=0; } NR>1 && $3 == 0 { count++ } END { print count }') -eq 0 ]]
+    '';
+  in ''
+    # shut down the instance started by systemd at boot, so we can test our test procedure
+    machine.succeed("systemctl stop beesd@aux1.service")
+
+    machine.succeed(
+        "dd if=/dev/urandom of=/aux1/dedup-me-1 bs=1M count=8",
+        "cp --reflink=never /aux1/dedup-me-1 /aux1/dedup-me-2",
+        "cp --reflink=never /aux1/* /aux2/",
+        "sync",
+    )
+    machine.fail(
+        "${someContentIsShared "/aux1"}",
+        "${someContentIsShared "/aux2"}",
+    )
+    machine.succeed("systemctl start beesd@aux1.service")
+
+    # assert that "Set Shared" column is nonzero
+    machine.wait_until_succeeds(
+        "${someContentIsShared "/aux1"}",
+    )
+    machine.fail("${someContentIsShared "/aux2"}")
+
+    # assert that 16MB hash table size requested was honored
+    machine.succeed(
+        "[[ $(stat -c %s /aux1/.beeshome/beeshash.dat) = $(( 16 * 1024 * 1024)) ]]"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/binary-cache.nix b/nixpkgs/nixos/tests/binary-cache.nix
new file mode 100644
index 000000000000..bc1c6fb9a267
--- /dev/null
+++ b/nixpkgs/nixos/tests/binary-cache.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "binary-cache";
+  meta.maintainers = with lib.maintainers; [ thomasjm ];
+
+  nodes.machine =
+    { pkgs, ... }: {
+      imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      environment.systemPackages = [ pkgs.python3 ];
+      system.extraDependencies = [ pkgs.hello.inputDerivation ];
+      nix.extraOptions = ''
+        experimental-features = nix-command
+      '';
+    };
+
+  testScript = ''
+    # Build the cache, then remove it from the store
+    cachePath = machine.succeed("nix-build --no-out-link -E 'with import <nixpkgs> {}; mkBinaryCache { rootPaths = [hello]; }'").strip()
+    machine.succeed("cp -r %s/. /tmp/cache" % cachePath)
+    machine.succeed("nix-store --delete " + cachePath)
+
+    # Sanity test of cache structure
+    status, stdout = machine.execute("ls /tmp/cache")
+    cache_files = stdout.split()
+    assert ("nix-cache-info" in cache_files)
+    assert ("nar" in cache_files)
+
+    # Nix store ping should work
+    machine.succeed("nix store ping --store file:///tmp/cache")
+
+    # Cache should contain a .narinfo referring to "hello"
+    grepLogs = machine.succeed("grep -l 'StorePath: /nix/store/[[:alnum:]]*-hello-.*' /tmp/cache/*.narinfo")
+
+    # Get the store path referenced by the .narinfo
+    narInfoFile = grepLogs.strip()
+    narInfoContents = machine.succeed("cat " + narInfoFile)
+    import re
+    match = re.match(r"^StorePath: (/nix/store/[a-z0-9]*-hello-.*)$", narInfoContents, re.MULTILINE)
+    if not match: raise Exception("Couldn't find hello store path in cache")
+    storePath = match[1]
+
+    # Delete the store path
+    machine.succeed("nix-store --delete " + storePath)
+    machine.succeed("[ ! -d %s ] || exit 1" % storePath)
+
+    # Should be able to build hello using the cache
+    logs = machine.succeed("nix-build -A hello '<nixpkgs>' --option require-sigs false --option trusted-substituters file:///tmp/cache --option substituters file:///tmp/cache 2>&1")
+    logLines = logs.split("\n")
+    if not "this path will be fetched" in logLines[0]: raise Exception("Unexpected first log line")
+    def shouldBe(got, desired):
+      if got != desired: raise Exception("Expected '%s' but got '%s'" % (desired, got))
+    shouldBe(logLines[1], "  " + storePath)
+    shouldBe(logLines[2], "copying path '%s' from 'file:///tmp/cache'..." % storePath)
+    shouldBe(logLines[3], storePath)
+
+    # Store path should exist in the store now
+    machine.succeed("[ -d %s ] || exit 1" % storePath)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bind.nix b/nixpkgs/nixos/tests/bind.nix
new file mode 100644
index 000000000000..15accbd49db4
--- /dev/null
+++ b/nixpkgs/nixos/tests/bind.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix {
+  name = "bind";
+
+  nodes.machine = { pkgs, lib, ... }: {
+    services.bind.enable = true;
+    services.bind.extraOptions = "empty-zones-enable no;";
+    services.bind.zones = lib.singleton {
+      name = ".";
+      master = true;
+      file = pkgs.writeText "root.zone" ''
+        $TTL 3600
+        . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+        . IN NS ns.example.org.
+
+        ns.example.org. IN A    192.168.0.1
+        ns.example.org. IN AAAA abcd::1
+
+        1.0.168.192.in-addr.arpa IN PTR ns.example.org.
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("bind.service")
+    machine.wait_for_open_port(53)
+    machine.succeed("host 192.168.0.1 127.0.0.1 | grep -qF ns.example.org")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/bird.nix b/nixpkgs/nixos/tests/bird.nix
new file mode 100644
index 000000000000..822a7caea9ba
--- /dev/null
+++ b/nixpkgs/nixos/tests/bird.nix
@@ -0,0 +1,129 @@
+# This test does a basic functionality check for all bird variants and demonstrates a use
+# of the preCheckConfig option.
+
+{ system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; config = { }; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) optionalString;
+
+  makeBird2Host = hostId: { pkgs, ... }: {
+    virtualisation.vlans = [ 1 ];
+
+    environment.systemPackages = with pkgs; [ jq ];
+
+    networking = {
+      useNetworkd = true;
+      useDHCP = false;
+      firewall.enable = false;
+    };
+
+    systemd.network.networks."01-eth1" = {
+      name = "eth1";
+      networkConfig.Address = "10.0.0.${hostId}/24";
+    };
+
+    services.bird2 = {
+      enable = true;
+
+      config = ''
+        log syslog all;
+
+        debug protocols all;
+
+        router id 10.0.0.${hostId};
+
+        protocol device {
+        }
+
+        protocol kernel kernel4 {
+          ipv4 {
+            import none;
+            export all;
+          };
+        }
+
+        protocol static static4 {
+          ipv4;
+          include "static4.conf";
+        }
+
+        protocol ospf v2 ospf4 {
+          ipv4 {
+            export all;
+          };
+          area 0 {
+            interface "eth1" {
+              hello 5;
+              wait 5;
+            };
+          };
+        }
+
+        protocol kernel kernel6 {
+          ipv6 {
+            import none;
+            export all;
+          };
+        }
+
+        protocol static static6 {
+          ipv6;
+          include "static6.conf";
+        }
+
+        protocol ospf v3 ospf6 {
+          ipv6 {
+            export all;
+          };
+          area 0 {
+            interface "eth1" {
+              hello 5;
+              wait 5;
+            };
+          };
+        }
+      '';
+
+      preCheckConfig = ''
+        echo "route 1.2.3.4/32 blackhole;" > static4.conf
+        echo "route fd00::/128 blackhole;" > static6.conf
+      '';
+    };
+
+    systemd.tmpfiles.rules = [
+      "f /etc/bird/static4.conf - - - - route 10.10.0.${hostId}/32 blackhole;"
+      "f /etc/bird/static6.conf - - - - route fdff::${hostId}/128 blackhole;"
+    ];
+  };
+in
+makeTest {
+  name = "bird2";
+
+  nodes.host1 = makeBird2Host "1";
+  nodes.host2 = makeBird2Host "2";
+
+  testScript = ''
+    start_all()
+
+    host1.wait_for_unit("bird2.service")
+    host2.wait_for_unit("bird2.service")
+    host1.succeed("systemctl reload bird2.service")
+
+    with subtest("Waiting for advertised IPv4 routes"):
+      host1.wait_until_succeeds("ip --json r | jq -e 'map(select(.dst == \"10.10.0.2\")) | any'")
+      host2.wait_until_succeeds("ip --json r | jq -e 'map(select(.dst == \"10.10.0.1\")) | any'")
+    with subtest("Waiting for advertised IPv6 routes"):
+      host1.wait_until_succeeds("ip --json -6 r | jq -e 'map(select(.dst == \"fdff::2\")) | any'")
+      host2.wait_until_succeeds("ip --json -6 r | jq -e 'map(select(.dst == \"fdff::1\")) | any'")
+
+    with subtest("Check fake routes in preCheckConfig do not exists"):
+      host1.fail("ip --json r | jq -e 'map(select(.dst == \"1.2.3.4\")) | any'")
+      host2.fail("ip --json r | jq -e 'map(select(.dst == \"1.2.3.4\")) | any'")
+
+      host1.fail("ip --json -6 r | jq -e 'map(select(.dst == \"fd00::\")) | any'")
+      host2.fail("ip --json -6 r | jq -e 'map(select(.dst == \"fd00::\")) | any'")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/birdwatcher.nix b/nixpkgs/nixos/tests/birdwatcher.nix
new file mode 100644
index 000000000000..5c41b4d0e4f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/birdwatcher.nix
@@ -0,0 +1,94 @@
+# This test does a basic functionality check for birdwatcher
+
+{ system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; config = { }; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) optionalString;
+in
+makeTest {
+  name = "birdwatcher";
+  nodes = {
+    host1 = {
+      environment.systemPackages = with pkgs; [ jq ];
+      services.bird2 = {
+        enable = true;
+        config = ''
+          log syslog all;
+
+          debug protocols all;
+
+          router id 10.0.0.1;
+
+          protocol device {
+          }
+
+          protocol kernel kernel4 {
+            ipv4 {
+              import none;
+              export all;
+            };
+          }
+
+          protocol kernel kernel6 {
+            ipv6 {
+              import none;
+              export all;
+            };
+          }
+        '';
+      };
+      services.birdwatcher = {
+        enable = true;
+        settings = ''
+          [server]
+          allow_from = []
+          allow_uncached = false
+          modules_enabled = ["status",
+                             "protocols",
+                             "protocols_bgp",
+                             "protocols_short",
+                             "routes_protocol",
+                             "routes_peer",
+                             "routes_table",
+                             "routes_table_filtered",
+                             "routes_table_peer",
+                             "routes_filtered",
+                             "routes_prefixed",
+                             "routes_noexport",
+                             "routes_pipe_filtered_count",
+                             "routes_pipe_filtered"
+                            ]
+          [status]
+          reconfig_timestamp_source = "bird"
+          reconfig_timestamp_match = "# created: (.*)"
+          filter_fields = []
+          [bird]
+          listen = "0.0.0.0:29184"
+          config = "/etc/bird/bird2.conf"
+          birdc  = "${pkgs.bird}/bin/birdc"
+          ttl = 5 # time to live (in minutes) for caching of cli output
+          [parser]
+          filter_fields = []
+          [cache]
+          use_redis = false # if not using redis cache, activate housekeeping to save memory!
+          [housekeeping]
+          interval = 5
+          force_release_memory = true
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    host1.wait_for_unit("bird2.service")
+    host1.wait_for_unit("birdwatcher.service")
+    host1.wait_for_open_port(29184)
+    host1.succeed("curl http://[::]:29184/status | jq -r .status.message | grep 'Daemon is up and running'")
+    host1.succeed("curl http://[::]:29184/protocols | jq -r .protocols.device1.state | grep 'up'")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/bitcoind.nix b/nixpkgs/nixos/tests/bitcoind.nix
new file mode 100644
index 000000000000..7726a23d853e
--- /dev/null
+++ b/nixpkgs/nixos/tests/bitcoind.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "bitcoind";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ _1000101 ];
+  };
+
+  nodes.machine = { ... }: {
+    services.bitcoind."mainnet" = {
+      enable = true;
+      rpc = {
+        port = 8332;
+        users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
+        users.rpc2.passwordHMAC = "1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225";
+      };
+    };
+
+    environment.etc."test.blank".text = "";
+    services.bitcoind."testnet" = {
+      enable = true;
+      configFile = "/etc/test.blank";
+      testnet = true;
+      rpc = {
+        port = 18332;
+      };
+      extraCmdlineOptions = [ "-rpcuser=rpc" "-rpcpassword=rpc" "-rpcauth=rpc2:1495e4a3ad108187576c68f7f9b5ddc5$accce0881c74aa01bb8960ff3bdbd39f607fd33178147679e055a4ac35f53225" ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("bitcoind-mainnet.service")
+    machine.wait_for_unit("bitcoind-testnet.service")
+
+    machine.wait_until_succeeds(
+        'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 |  grep \'"chain":"main"\' '
+    )
+    machine.wait_until_succeeds(
+        'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:8332 |  grep \'"chain":"main"\' '
+    )
+    machine.wait_until_succeeds(
+        'curl --fail --user rpc:rpc --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 |  grep \'"chain":"test"\' '
+    )
+    machine.wait_until_succeeds(
+        'curl --fail --user rpc2:rpc2 --data-binary \'{"jsonrpc": "1.0", "id":"curltest", "method": "getblockchaininfo", "params": [] }\' -H \'content-type: text/plain;\' localhost:18332 |  grep \'"chain":"test"\' '
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bittorrent.nix b/nixpkgs/nixos/tests/bittorrent.nix
new file mode 100644
index 000000000000..4a73fea6a09d
--- /dev/null
+++ b/nixpkgs/nixos/tests/bittorrent.nix
@@ -0,0 +1,164 @@
+# This test runs a Bittorrent tracker on one machine, and verifies
+# that two client machines can download the torrent using
+# `transmission'.  The first client (behind a NAT router) downloads
+# from the initial seeder running on the tracker.  Then we kill the
+# initial seeder.  The second client downloads from the first client,
+# which only works if the first client successfully uses the UPnP-IGD
+# protocol to poke a hole in the NAT.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  # Some random file to serve.
+  file = pkgs.hello.src;
+
+  internalRouterAddress = "192.168.3.1";
+  internalClient1Address = "192.168.3.2";
+  externalRouterAddress = "80.100.100.1";
+  externalClient2Address = "80.100.100.2";
+  externalTrackerAddress = "80.100.100.3";
+
+  download-dir = "/var/lib/transmission/Downloads";
+  transmissionConfig = { ... }: {
+    environment.systemPackages = [ pkgs.transmission ];
+    services.transmission = {
+      enable = true;
+      settings = {
+        dht-enabled = false;
+        message-level = 2;
+        inherit download-dir;
+      };
+    };
+  };
+in
+
+{
+  name = "bittorrent";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ domenkozar eelco rob bobvanderlinden ];
+  };
+
+  nodes = {
+    tracker = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.firewall.enable = false;
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalTrackerAddress; prefixLength = 24; }
+      ];
+
+      # We need Apache on the tracker to serve the torrents.
+      services.httpd = {
+        enable = true;
+        virtualHosts = {
+          "torrentserver.org" = {
+            adminAddr = "foo@example.org";
+            documentRoot = "/tmp";
+          };
+        };
+      };
+      services.opentracker.enable = true;
+    };
+
+    router = { pkgs, nodes, ... }: {
+      virtualisation.vlans = [ 1 2 ];
+      networking.nat.enable = true;
+      networking.nat.internalInterfaces = [ "eth2" ];
+      networking.nat.externalInterface = "eth1";
+      networking.firewall.enable = true;
+      networking.firewall.trustedInterfaces = [ "eth2" ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalRouterAddress; prefixLength = 24; }
+      ];
+      networking.interfaces.eth2.ipv4.addresses = [
+        { address = internalRouterAddress; prefixLength = 24; }
+      ];
+      services.miniupnpd = {
+        enable = true;
+        externalInterface = "eth1";
+        internalIPs = [ "eth2" ];
+        appendConfig = ''
+          ext_ip=${externalRouterAddress}
+        '';
+      };
+    };
+
+    client1 = { pkgs, nodes, ... }: {
+      imports = [ transmissionConfig ];
+      environment.systemPackages = [ pkgs.miniupnpc ];
+
+      virtualisation.vlans = [ 2 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = internalClient1Address; prefixLength = 24; }
+      ];
+      networking.defaultGateway = internalRouterAddress;
+      networking.firewall.enable = false;
+    };
+
+    client2 = { pkgs, ... }: {
+      imports = [ transmissionConfig ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.interfaces.eth0.ipv4.addresses = [];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = externalClient2Address; prefixLength = 24; }
+      ];
+      networking.firewall.enable = false;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+      start_all()
+
+      # Wait for network and miniupnpd.
+      router.wait_for_unit("network-online.target")
+      router.wait_for_unit("miniupnpd")
+
+      # Create the torrent.
+      tracker.succeed("mkdir ${download-dir}/data")
+      tracker.succeed(
+          "cp ${file} ${download-dir}/data/test.tar.bz2"
+      )
+      tracker.succeed(
+          "transmission-create ${download-dir}/data/test.tar.bz2 --private --tracker http://${externalTrackerAddress}:6969/announce --outfile /tmp/test.torrent"
+      )
+      tracker.succeed("chmod 644 /tmp/test.torrent")
+
+      # Start the tracker.  !!! use a less crappy tracker
+      tracker.wait_for_unit("network-online.target")
+      tracker.wait_for_unit("opentracker.service")
+      tracker.wait_for_open_port(6969)
+
+      # Start the initial seeder.
+      tracker.succeed(
+          "transmission-remote --add /tmp/test.torrent --no-portmap --no-dht --download-dir ${download-dir}/data"
+      )
+
+      # Now we should be able to download from the client behind the NAT.
+      tracker.wait_for_unit("httpd")
+      client1.wait_for_unit("network-online.target")
+      client1.succeed("transmission-remote --add http://${externalTrackerAddress}/test.torrent >&2 &")
+      client1.wait_for_file("${download-dir}/test.tar.bz2")
+      client1.succeed(
+          "cmp ${download-dir}/test.tar.bz2 ${file}"
+      )
+
+      # Bring down the initial seeder.
+      tracker.stop_job("transmission")
+
+      # Now download from the second client.  This can only succeed if
+      # the first client created a NAT hole in the router.
+      client2.wait_for_unit("network-online.target")
+      client2.succeed(
+          "transmission-remote --add http://${externalTrackerAddress}/test.torrent --no-portmap --no-dht >&2 &"
+      )
+      client2.wait_for_file("${download-dir}/test.tar.bz2")
+      client2.succeed(
+          "cmp ${download-dir}/test.tar.bz2 ${file}"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/blockbook-frontend.nix b/nixpkgs/nixos/tests/blockbook-frontend.nix
new file mode 100644
index 000000000000..dca4f2f53cc1
--- /dev/null
+++ b/nixpkgs/nixos/tests/blockbook-frontend.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "blockbook-frontend";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ _1000101 ];
+  };
+
+  nodes.machine = { ... }: {
+    services.blockbook-frontend."test" = {
+      enable = true;
+    };
+    services.bitcoind.mainnet = {
+      enable = true;
+      rpc = {
+        port = 8030;
+        users.rpc.passwordHMAC = "acc2374e5f9ba9e62a5204d3686616cf$53abdba5e67a9005be6a27ca03a93ce09e58854bc2b871523a0d239a72968033";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("blockbook-frontend-test.service")
+
+    machine.wait_for_open_port(9030)
+
+    machine.succeed("curl -sSfL http://localhost:9030 | grep 'Blockbook'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/blocky.nix b/nixpkgs/nixos/tests/blocky.nix
new file mode 100644
index 000000000000..18e7f45e1c73
--- /dev/null
+++ b/nixpkgs/nixos/tests/blocky.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix {
+  name = "blocky";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.dnsutils ];
+      services.blocky = {
+        enable = true;
+
+        settings = {
+          customDNS = {
+            mapping = {
+              "printer.lan" = "192.168.178.3,2001:0db8:85a3:08d3:1319:8a2e:0370:7344";
+            };
+          };
+          upstream = {
+            default = [ "8.8.8.8" "1.1.1.1" ];
+          };
+          port = 53;
+          httpPort = 5000;
+          logLevel = "info";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    with subtest("Service test"):
+        server.wait_for_unit("blocky.service")
+        server.wait_for_open_port(53)
+        server.wait_for_open_port(5000)
+        server.succeed("dig @127.0.0.1 +short -x 192.168.178.3 | grep -qF printer.lan")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/boot-stage1.nix b/nixpkgs/nixos/tests/boot-stage1.nix
new file mode 100644
index 000000000000..f07802b8c31e
--- /dev/null
+++ b/nixpkgs/nixos/tests/boot-stage1.nix
@@ -0,0 +1,164 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "boot-stage1";
+
+  nodes.machine = { config, pkgs, lib, ... }: {
+    boot.extraModulePackages = let
+      compileKernelModule = name: source: pkgs.runCommandCC name rec {
+        inherit source;
+        kdev = config.boot.kernelPackages.kernel.dev;
+        kver = config.boot.kernelPackages.kernel.modDirVersion;
+        ksrc = "${kdev}/lib/modules/${kver}/build";
+        hardeningDisable = [ "pic" ];
+        nativeBuildInputs = kdev.moduleBuildDependencies;
+      } ''
+        echo "obj-m += $name.o" > Makefile
+        echo "$source" > "$name.c"
+        make -C "$ksrc" M=$(pwd) modules
+        install -vD "$name.ko" "$out/lib/modules/$kver/$name.ko"
+      '';
+
+      # This spawns a kthread which just waits until it gets a signal and
+      # terminates if that is the case. We want to make sure that nothing during
+      # the boot process kills any kthread by accident, like what happened in
+      # issue #15226.
+      kcanary = compileKernelModule "kcanary" ''
+        #include <linux/version.h>
+        #include <linux/init.h>
+        #include <linux/module.h>
+        #include <linux/kernel.h>
+        #include <linux/kthread.h>
+        #include <linux/sched.h>
+        #include <linux/signal.h>
+        #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)
+        #include <linux/sched/signal.h>
+        #endif
+
+        MODULE_LICENSE("GPL");
+
+        struct task_struct *canaryTask;
+
+        static int kcanary(void *nothing)
+        {
+          allow_signal(SIGINT);
+          allow_signal(SIGTERM);
+          allow_signal(SIGKILL);
+          while (!kthread_should_stop()) {
+            set_current_state(TASK_INTERRUPTIBLE);
+            schedule_timeout_interruptible(msecs_to_jiffies(100));
+            if (signal_pending(current)) break;
+          }
+          return 0;
+        }
+
+        static int kcanaryInit(void)
+        {
+          kthread_run(&kcanary, NULL, "kcanary");
+          return 0;
+        }
+
+        static void kcanaryExit(void)
+        {
+          kthread_stop(canaryTask);
+        }
+
+        module_init(kcanaryInit);
+        module_exit(kcanaryExit);
+      '';
+
+    in lib.singleton kcanary;
+
+    boot.initrd.kernelModules = [ "kcanary" ];
+
+    boot.initrd.extraUtilsCommands = let
+      compile = name: source: pkgs.runCommandCC name { inherit source; } ''
+        mkdir -p "$out/bin"
+        echo "$source" | gcc -Wall -o "$out/bin/$name" -xc -
+      '';
+
+      daemonize = name: source: compile name ''
+        #include <stdio.h>
+        #include <unistd.h>
+
+        void runSource(void) {
+        ${source}
+        }
+
+        int main(void) {
+          if (fork() > 0) return 0;
+          setsid();
+          runSource();
+          return 1;
+        }
+      '';
+
+      mkCmdlineCanary = { name, cmdline ? "", source ? "" }: (daemonize name ''
+        char *argv[] = {"${cmdline}", NULL};
+        execvp("${name}-child", argv);
+      '') // {
+        child = compile "${name}-child" ''
+          #include <stdio.h>
+          #include <unistd.h>
+
+          int main(void) {
+            ${source}
+            while (1) sleep(1);
+            return 1;
+          }
+        '';
+      };
+
+      copyCanaries = lib.concatMapStrings (canary: ''
+        ${lib.optionalString (canary ? child) ''
+          copy_bin_and_libs "${canary.child}/bin/${canary.child.name}"
+        ''}
+        copy_bin_and_libs "${canary}/bin/${canary.name}"
+      '');
+
+    in copyCanaries [
+      # Simple canary process which just sleeps forever and should be killed by
+      # stage 2.
+      (daemonize "canary1" "while (1) sleep(1);")
+
+      # We want this canary process to try mimicking a kthread using a cmdline
+      # with a zero length so we can make sure that the process is properly
+      # killed in stage 1.
+      (mkCmdlineCanary {
+        name = "canary2";
+        source = ''
+          FILE *f;
+          f = fopen("/run/canary2.pid", "w");
+          fprintf(f, "%d\n", getpid());
+          fclose(f);
+        '';
+      })
+
+      # This canary process mimics a storage daemon, which we do NOT want to be
+      # killed before going into stage 2. For more on root storage daemons, see:
+      # https://www.freedesktop.org/wiki/Software/systemd/RootStorageDaemons/
+      (mkCmdlineCanary {
+        name = "canary3";
+        cmdline = "@canary3";
+      })
+    ];
+
+    boot.initrd.postMountCommands = ''
+      canary1
+      canary2
+      canary3
+      # Make sure the pidfile of canary 2 is created so that we still can get
+      # its former pid after the killing spree starts next within stage 1.
+      while [ ! -s /run/canary2.pid ]; do sleep 0.1; done
+    '';
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("test -s /run/canary2.pid")
+    machine.fail("pgrep -a canary1")
+    machine.fail("kill -0 $(< /run/canary2.pid)")
+    machine.succeed('pgrep -a -f "^@canary3$"')
+    machine.succeed('pgrep -a -f "^kcanary$"')
+  '';
+
+  meta.maintainers = with pkgs.lib.maintainers; [ aszlig ];
+})
diff --git a/nixpkgs/nixos/tests/boot.nix b/nixpkgs/nixos/tests/boot.nix
new file mode 100644
index 000000000000..ec2a9f6527c9
--- /dev/null
+++ b/nixpkgs/nixos/tests/boot.nix
@@ -0,0 +1,148 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  qemu-common = import ../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
+
+  iso =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ../modules/installer/cd-dvd/installation-cd-minimal.nix
+        ../modules/testing/test-instrumentation.nix
+      ];
+    }).config.system.build.isoImage;
+
+  sd =
+    (import ../lib/eval-config.nix {
+      inherit system;
+      modules = [
+        ../modules/installer/sd-card/sd-image-x86_64.nix
+        ../modules/testing/test-instrumentation.nix
+        { sdImage.compressImage = false; }
+      ];
+    }).config.system.build.sdImage;
+
+  pythonDict = params: "\n    {\n        ${concatStringsSep ",\n        " (mapAttrsToList (name: param: "\"${name}\": \"${param}\"") params)},\n    }\n";
+
+  makeBootTest = name: extraConfig:
+    let
+      machineConfig = pythonDict ({
+        qemuBinary = qemu-common.qemuBinary pkgs.qemu_test;
+        qemuFlags = "-m 768";
+      } // extraConfig);
+    in
+      makeTest {
+        name = "boot-" + name;
+        nodes = { };
+        testScript =
+          ''
+            machine = create_machine(${machineConfig})
+            machine.start()
+            machine.wait_for_unit("multi-user.target")
+            machine.succeed("nix store verify --no-trust -r --option experimental-features nix-command /run/current-system")
+
+            with subtest("Check whether the channel got installed correctly"):
+                machine.succeed("nix-instantiate --dry-run '<nixpkgs>' -A hello")
+                machine.succeed("nix-env --dry-run -iA nixos.procps")
+
+            machine.shutdown()
+          '';
+      };
+
+  makeNetbootTest = name: extraConfig:
+    let
+      config = (import ../lib/eval-config.nix {
+          inherit system;
+          modules =
+            [ ../modules/installer/netboot/netboot.nix
+              ../modules/testing/test-instrumentation.nix
+              { key = "serial"; }
+            ];
+        }).config;
+      ipxeBootDir = pkgs.symlinkJoin {
+        name = "ipxeBootDir";
+        paths = [
+          config.system.build.netbootRamdisk
+          config.system.build.kernel
+          config.system.build.netbootIpxeScript
+        ];
+      };
+      machineConfig = pythonDict ({
+        qemuBinary = qemu-common.qemuBinary pkgs.qemu_test;
+        qemuFlags = "-boot order=n -m 2000";
+        netBackendArgs = "tftp=${ipxeBootDir},bootfile=netboot.ipxe";
+      } // extraConfig);
+    in
+      makeTest {
+        name = "boot-netboot-" + name;
+        nodes = { };
+        testScript = ''
+            machine = create_machine(${machineConfig})
+            machine.start()
+            machine.wait_for_unit("multi-user.target")
+            machine.shutdown()
+          '';
+      };
+  uefiBinary = {
+    x86_64-linux = "${pkgs.OVMF.fd}/FV/OVMF.fd";
+    aarch64-linux = "${pkgs.OVMF.fd}/FV/QEMU_EFI.fd";
+  }.${pkgs.stdenv.hostPlatform.system};
+in {
+    uefiCdrom = makeBootTest "uefi-cdrom" {
+      cdrom = "${iso}/iso/${iso.isoName}";
+      bios = uefiBinary;
+    };
+
+    uefiUsb = makeBootTest "uefi-usb" {
+      usb = "${iso}/iso/${iso.isoName}";
+      bios = uefiBinary;
+    };
+
+    uefiNetboot = makeNetbootTest "uefi" {
+      bios = uefiBinary;
+      # Custom ROM is needed for EFI PXE boot. I failed to understand exactly why, because QEMU should still use iPXE for EFI.
+      netFrontendArgs = "romfile=${pkgs.ipxe}/ipxe.efirom";
+    };
+} // optionalAttrs (pkgs.stdenv.hostPlatform.system == "x86_64-linux") {
+    biosCdrom = makeBootTest "bios-cdrom" {
+      cdrom = "${iso}/iso/${iso.isoName}";
+    };
+
+    biosUsb = makeBootTest "bios-usb" {
+      usb = "${iso}/iso/${iso.isoName}";
+    };
+
+    biosNetboot = makeNetbootTest "bios" {};
+
+    ubootExtlinux = let
+      sdImage = "${sd}/sd-image/${sd.imageName}";
+      mutableImage = "/tmp/linked-image.qcow2";
+
+      machineConfig = pythonDict {
+        bios = "${pkgs.ubootQemuX86}/u-boot.rom";
+        qemuFlags = "-m 768 -machine type=pc,accel=tcg -drive file=${mutableImage},if=ide,format=qcow2";
+      };
+    in makeTest {
+      name = "boot-uboot-extlinux";
+      nodes = { };
+      testScript = ''
+        import os
+
+        # Create a mutable linked image backed by the read-only SD image
+        if os.system("qemu-img create -f qcow2 -F raw -b ${sdImage} ${mutableImage}") != 0:
+            raise RuntimeError("Could not create mutable linked image")
+
+        machine = create_machine(${machineConfig})
+        machine.start()
+        machine.wait_for_unit("multi-user.target")
+        machine.succeed("nix store verify -r --no-trust --option experimental-features nix-command /run/current-system")
+        machine.shutdown()
+      '';
+    };
+}
diff --git a/nixpkgs/nixos/tests/bootspec.nix b/nixpkgs/nixos/tests/bootspec.nix
new file mode 100644
index 000000000000..9295500422a9
--- /dev/null
+++ b/nixpkgs/nixos/tests/bootspec.nix
@@ -0,0 +1,172 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  baseline = {
+    virtualisation.useBootLoader = true;
+  };
+  grub = {
+    boot.loader.grub.enable = true;
+  };
+  systemd-boot = {
+    boot.loader.systemd-boot.enable = true;
+  };
+  uefi = {
+    virtualisation.useEFIBoot = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+    boot.loader.grub.efiSupport = true;
+    environment.systemPackages = [ pkgs.efibootmgr ];
+  };
+  standard = {
+    boot.bootspec.enable = true;
+
+    imports = [
+      baseline
+      systemd-boot
+      uefi
+    ];
+  };
+in
+{
+  basic = makeTest {
+    name = "systemd-boot-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = standard;
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+    '';
+  };
+
+  grub = makeTest {
+    name = "grub-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      boot.bootspec.enable = true;
+
+      imports = [
+        baseline
+        grub
+        uefi
+      ];
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+    '';
+  };
+
+  legacy-boot = makeTest {
+    name = "legacy-boot-with-bootspec";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      boot.bootspec.enable = true;
+
+      imports = [
+        baseline
+        grub
+      ];
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+    '';
+  };
+
+  # Check that initrd create corresponding entries in bootspec.
+  initrd = makeTest {
+    name = "bootspec-with-initrd";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      # It's probably the case, but we want to make it explicit here.
+      boot.initrd.enable = true;
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+
+      bootspec = json.loads(machine.succeed("jq -r '.\"org.nixos.bootspec.v1\"' /run/current-system/boot.json"))
+
+      assert all(key in bootspec for key in ('initrd', 'initrdSecrets')), "Bootspec should contain initrd or initrdSecrets field when initrd is enabled"
+    '';
+  };
+
+  # Check that specialisations create corresponding entries in bootspec.
+  specialisation = makeTest {
+    name = "bootspec-with-specialisation";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      specialisation.something.configuration = {};
+    };
+
+    testScript = ''
+      import json
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /run/current-system/boot.json")
+      machine.succeed("test -e /run/current-system/specialisation/something/boot.json")
+
+      sp_in_parent = json.loads(machine.succeed("jq -r '.\"org.nixos.specialisation.v1\".something' /run/current-system/boot.json"))
+      sp_in_fs = json.loads(machine.succeed("cat /run/current-system/specialisation/something/boot.json"))
+
+      assert sp_in_parent['org.nixos.bootspec.v1'] == sp_in_fs['org.nixos.bootspec.v1'], "Bootspecs of the same specialisation are different!"
+    '';
+  };
+
+  # Check that extensions are propagated.
+  extensions = makeTest {
+    name = "bootspec-with-extensions";
+    meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+    nodes.machine = { config, ... }: {
+      imports = [ standard ];
+      environment.systemPackages = [ pkgs.jq ];
+      boot.bootspec.extensions = {
+        "org.nix-tests.product" = {
+          osRelease = config.environment.etc."os-release".source;
+        };
+      };
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      current_os_release = machine.succeed("cat /etc/os-release")
+      bootspec_os_release = machine.succeed("cat $(jq -r '.\"org.nix-tests.product\".osRelease' /run/current-system/boot.json)")
+
+      assert current_os_release == bootspec_os_release, "Filename referenced by extension has unexpected contents"
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/tests/borgbackup.nix b/nixpkgs/nixos/tests/borgbackup.nix
new file mode 100644
index 000000000000..4160e727f047
--- /dev/null
+++ b/nixpkgs/nixos/tests/borgbackup.nix
@@ -0,0 +1,230 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  passphrase = "supersecret";
+  dataDir = "/ran:dom/data";
+  excludeFile = "not_this_file";
+  keepFile = "important_file";
+  keepFileData = "important_data";
+  localRepo = "/root/back:up";
+  archiveName = "my_archive";
+  remoteRepo = "borg@server:."; # No need to specify path
+  privateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
+    RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
+    AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
+    9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+  publicKey = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv root@client
+  '';
+  privateKeyAppendOnly = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLwAAAJC9YTxxvWE8
+    cQAAAAtzc2gtZWQyNTUxOQAAACBacZuz1ELGQdhI7PF6dGFafCDlvh8pSEc4cHjkW0QjLw
+    AAAEAAhV7wTl5dL/lz+PF/d4PnZXuG1Id6L/mFEiGT1tZsuFpxm7PUQsZB2Ejs8Xp0YVp8
+    IOW+HylIRzhweORbRCMvAAAADXJzY2h1ZXR6QGt1cnQ=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+  publicKeyAppendOnly = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFpxm7PUQsZB2Ejs8Xp0YVp8IOW+HylIRzhweORbRCMv root@client
+  '';
+
+in {
+  name = "borgbackup";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ dotlambda ];
+  };
+
+  nodes = {
+    client = { ... }: {
+      services.borgbackup.jobs = {
+
+        local = {
+          paths = dataDir;
+          repo = localRepo;
+          preHook = ''
+            # Don't append a timestamp
+            archiveName="${archiveName}"
+          '';
+          encryption = {
+            mode = "repokey";
+            inherit passphrase;
+          };
+          compression = "auto,zlib,9";
+          prune.keep = {
+            within = "1y";
+            yearly = 5;
+          };
+          exclude = [ "*/${excludeFile}" ];
+          postHook = "echo post";
+          startAt = [ ]; # Do not run automatically
+        };
+
+        remote = {
+          paths = dataDir;
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
+        remoteAppendOnly = {
+          paths = dataDir;
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly";
+        };
+
+        commandSuccess = {
+          dumpCommand = pkgs.writeScript "commandSuccess" ''
+            echo -n test
+          '';
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
+        commandFail = {
+          dumpCommand = "${pkgs.coreutils}/bin/false";
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
+        sleepInhibited = {
+          inhibitsSleep = true;
+          # Blocks indefinitely while "backing up" so that we can try to suspend the local system while it's hung
+          dumpCommand = pkgs.writeScript "sleepInhibited" ''
+            cat /dev/zero
+          '';
+          repo = remoteRepo;
+          encryption.mode = "none";
+          startAt = [ ];
+          environment.BORG_RSH = "ssh -oStrictHostKeyChecking=no -i /root/id_ed25519";
+        };
+
+      };
+    };
+
+    server = { ... }: {
+      services.openssh = {
+        enable = true;
+        settings = {
+          PasswordAuthentication = false;
+          KbdInteractiveAuthentication = false;
+        };
+      };
+
+      services.borgbackup.repos.repo1 = {
+        authorizedKeys = [ publicKey ];
+        path = "/data/borgbackup";
+      };
+
+      # Second repo to make sure the authorizedKeys options are merged correctly
+      services.borgbackup.repos.repo2 = {
+        authorizedKeysAppendOnly = [ publicKeyAppendOnly ];
+        path = "/data/borgbackup";
+        quota = ".5G";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.fail('test -d "${remoteRepo}"')
+
+    client.succeed(
+        "cp ${privateKey} /root/id_ed25519"
+    )
+    client.succeed("chmod 0600 /root/id_ed25519")
+    client.succeed(
+        "cp ${privateKeyAppendOnly} /root/id_ed25519.appendOnly"
+    )
+    client.succeed("chmod 0600 /root/id_ed25519.appendOnly")
+
+    client.succeed("mkdir -p ${dataDir}")
+    client.succeed("touch ${dataDir}/${excludeFile}")
+    client.succeed("echo '${keepFileData}' > ${dataDir}/${keepFile}")
+
+    with subtest("local"):
+        borg = "BORG_PASSPHRASE='${passphrase}' borg"
+        client.systemctl("start --wait borgbackup-job-local")
+        client.fail("systemctl is-failed borgbackup-job-local")
+        # Make sure exactly one archive has been created
+        assert int(client.succeed("{} list '${localRepo}' | wc -l".format(borg))) > 0
+        # Make sure excludeFile has been excluded
+        client.fail(
+            "{} list '${localRepo}::${archiveName}' | grep -qF '${excludeFile}'".format(borg)
+        )
+        # Make sure keepFile has the correct content
+        client.succeed("{} extract '${localRepo}::${archiveName}'".format(borg))
+        assert "${keepFileData}" in client.succeed("cat ${dataDir}/${keepFile}")
+        # Make sure the same is true when using `borg mount`
+        client.succeed(
+            "mkdir -p /mnt/borg && {} mount '${localRepo}::${archiveName}' /mnt/borg".format(
+                borg
+            )
+        )
+        assert "${keepFileData}" in client.succeed(
+            "cat /mnt/borg/${dataDir}/${keepFile}"
+        )
+
+    with subtest("remote"):
+        borg = "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519' borg"
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-remote")
+        client.fail("systemctl is-failed borgbackup-job-remote")
+
+        # Make sure we can't access repos other than the specified one
+        client.fail("{} list borg\@server:wrong".format(borg))
+
+        # TODO: Make sure that data is actually deleted
+
+    with subtest("remoteAppendOnly"):
+        borg = (
+            "BORG_RSH='ssh -oStrictHostKeyChecking=no -i /root/id_ed25519.appendOnly' borg"
+        )
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-remoteAppendOnly")
+        client.fail("systemctl is-failed borgbackup-job-remoteAppendOnly")
+
+        # Make sure we can't access repos other than the specified one
+        client.fail("{} list borg\@server:wrong".format(borg))
+
+        # TODO: Make sure that data is not actually deleted
+
+    with subtest("commandSuccess"):
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-commandSuccess")
+        client.fail("systemctl is-failed borgbackup-job-commandSuccess")
+        id = client.succeed("borg-job-commandSuccess list | tail -n1 | cut -d' ' -f1").strip()
+        client.succeed(f"borg-job-commandSuccess extract ::{id} stdin")
+        assert "test" == client.succeed("cat stdin")
+
+    with subtest("commandFail"):
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.systemctl("start --wait borgbackup-job-commandFail")
+        client.succeed("systemctl is-failed borgbackup-job-commandFail")
+
+    with subtest("sleepInhibited"):
+        server.wait_for_unit("sshd.service")
+        client.wait_for_unit("network.target")
+        client.fail("systemd-inhibit --list | grep -q borgbackup")
+        client.systemctl("start borgbackup-job-sleepInhibited")
+        client.wait_until_succeeds("systemd-inhibit --list | grep -q borgbackup")
+        client.systemctl("stop borgbackup-job-sleepInhibited")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/botamusique.nix b/nixpkgs/nixos/tests/botamusique.nix
new file mode 100644
index 000000000000..ecb79cb69867
--- /dev/null
+++ b/nixpkgs/nixos/tests/botamusique.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "botamusique";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes = {
+    machine = { config, ... }: {
+      networking.extraHosts = ''
+        127.0.0.1 all.api.radio-browser.info
+      '';
+
+      services.murmur = {
+        enable = true;
+        registerName = "NixOS tests";
+      };
+
+      services.botamusique = {
+        enable = true;
+        settings = {
+          server = {
+            channel = "NixOS tests";
+          };
+          bot = {
+            version = false;
+            auto_check_update = false;
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("murmur.service")
+    machine.wait_for_unit("botamusique.service")
+
+    machine.sleep(10)
+
+    machine.wait_until_succeeds(
+        "journalctl -u murmur.service -e | grep -q '<1:botamusique(-1)> Authenticated'"
+    )
+
+    with subtest("Check systemd hardening"):
+        output = machine.execute("systemctl show botamusique.service")[1]
+        machine.log(output)
+        output = machine.execute("systemd-analyze security botamusique.service")[1]
+        machine.log(output)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bpf.nix b/nixpkgs/nixos/tests/bpf.nix
new file mode 100644
index 000000000000..150ed0958862
--- /dev/null
+++ b/nixpkgs/nixos/tests/bpf.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "bpf";
+  meta.maintainers = with pkgs.lib.maintainers; [ martinetd ];
+
+  nodes.machine = { pkgs, ... }: {
+    programs.bcc.enable = true;
+    environment.systemPackages = with pkgs; [ bpftrace ];
+  };
+
+  testScript = ''
+    ## bcc
+    # syscount -d 1 stops 1s after probe started so is good for that
+    print(machine.succeed("syscount -d 1"))
+
+    ## bpftrace
+    # list probes
+    machine.succeed("bpftrace -l")
+    # simple BEGIN probe (user probe on bpftrace itself)
+    print(machine.succeed("bpftrace -e 'BEGIN { print(\"ok\"); exit(); }'"))
+    # tracepoint
+    print(machine.succeed("bpftrace -e 'tracepoint:syscalls:sys_enter_* { print(probe); exit() }'"))
+    # kprobe
+    print(machine.succeed("bpftrace -e 'kprobe:schedule { print(probe); exit() }'"))
+    # BTF
+    print(machine.succeed("bpftrace -e 'kprobe:schedule { "
+        "    printf(\"tgid: %d\", ((struct task_struct*) curtask)->tgid); exit() "
+        "}'"))
+    # module BTF (bpftrace >= 0.17)
+    # test is currently disabled on aarch64 as kfunc does not work there yet
+    # https://github.com/iovisor/bpftrace/issues/2496
+    print(machine.succeed("uname -m | grep aarch64 || "
+        "bpftrace -e 'kfunc:nft_trans_alloc_gfp { "
+        "    printf(\"portid: %d\\n\", args->ctx->portid); "
+        "} BEGIN { exit() }'"))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/bpftune.nix b/nixpkgs/nixos/tests/bpftune.nix
new file mode 100644
index 000000000000..c17bbcd11092
--- /dev/null
+++ b/nixpkgs/nixos/tests/bpftune.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+
+  name = "bpftune";
+
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.bpftune.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("bpftune.service")
+    machine.wait_for_console_text("bpftune works")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/breitbandmessung.nix b/nixpkgs/nixos/tests/breitbandmessung.nix
new file mode 100644
index 000000000000..78df0d5017eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/breitbandmessung.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "breitbandmessung";
+  meta.maintainers = with lib.maintainers; [ b4dm4n ];
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    # increase screen size to make the whole program visible
+    virtualisation.resolution = { x = 1280; y = 1024; };
+
+    test-support.displayManager.auto.user = "alice";
+
+    environment.systemPackages = with pkgs; [ breitbandmessung ];
+    environment.variables.XAUTHORITY = "/home/alice/.Xauthority";
+
+    # breitbandmessung is unfree
+    nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "breitbandmessung" ];
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    machine.wait_for_x()
+    machine.execute("su - alice -c breitbandmessung >&2  &")
+    machine.wait_for_window("Breitbandmessung")
+    machine.wait_for_text("Breitbandmessung")
+    machine.wait_for_text("Datenschutz")
+    machine.screenshot("breitbandmessung")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/brscan5.nix b/nixpkgs/nixos/tests/brscan5.nix
new file mode 100644
index 000000000000..9156a4cccfcf
--- /dev/null
+++ b/nixpkgs/nixos/tests/brscan5.nix
@@ -0,0 +1,43 @@
+# integration tests for brscan5 sane driver
+#
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "brscan5";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mattchrist ];
+  };
+
+  nodes.machine = { pkgs, ... }:
+    {
+      nixpkgs.config.allowUnfree = true;
+      hardware.sane = {
+        enable = true;
+        brscan5 = {
+          enable = true;
+          netDevices = {
+            "a" = { model="ADS-1200"; nodename="BRW0080927AFBCE"; };
+            "b" = { model="ADS-1200"; ip="192.168.1.2"; };
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    import re
+    # sane loads libsane-brother5.so.1 successfully, and scanimage doesn't die
+    strace = machine.succeed('strace scanimage -L 2>&1').split("\n")
+    regexp = 'openat\(.*libsane-brother5.so.1", O_RDONLY|O_CLOEXEC\) = \d\d*$'
+    assert len([x for x in strace if re.match(regexp,x)]) > 0
+
+    # module creates a config
+    cfg = machine.succeed('cat /etc/opt/brother/scanner/brscan5/brsanenetdevice.cfg')
+    assert 'DEVICE=a , "ADS-1200" , 0x4f9:0x459 , NODENAME=BRW0080927AFBCE' in cfg
+    assert 'DEVICE=b , "ADS-1200" , 0x4f9:0x459 , IP-ADDRESS=192.168.1.2' in cfg
+
+    # scanimage lists the two network scanners
+    scanimage = machine.succeed("scanimage -L")
+    print(scanimage)
+    assert """device `brother5:net1;dev0' is a Brother b ADS-1200""" in scanimage
+    assert """device `brother5:net1;dev1' is a Brother a ADS-1200""" in scanimage
+  '';
+})
diff --git a/nixpkgs/nixos/tests/btrbk-doas.nix b/nixpkgs/nixos/tests/btrbk-doas.nix
new file mode 100644
index 000000000000..1e3f8d56addb
--- /dev/null
+++ b/nixpkgs/nixos/tests/btrbk-doas.nix
@@ -0,0 +1,114 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+  let
+    privateKey = ''
+      -----BEGIN OPENSSH PRIVATE KEY-----
+      b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+      QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
+      RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
+      AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
+      9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
+      -----END OPENSSH PRIVATE KEY-----
+    '';
+    publicKey = ''
+      ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
+    '';
+  in
+  {
+    name = "btrbk-doas";
+    meta = with pkgs.lib; {
+      maintainers = with maintainers; [ symphorien tu-maurice ];
+    };
+
+    nodes = {
+      archive = { ... }: {
+        security.sudo.enable = false;
+        security.doas.enable = true;
+        environment.systemPackages = with pkgs; [ btrfs-progs ];
+        # note: this makes the privateKey world readable.
+        # don't do it with real ssh keys.
+        environment.etc."btrbk_key".text = privateKey;
+        services.btrbk = {
+          extraPackages = [ pkgs.lz4 ];
+          instances = {
+            remote = {
+              onCalendar = "minutely";
+              settings = {
+                ssh_identity = "/etc/btrbk_key";
+                ssh_user = "btrbk";
+                stream_compress = "lz4";
+                volume = {
+                  "ssh://main/mnt" = {
+                    target = "/mnt";
+                    snapshot_dir = "btrbk/remote";
+                    subvolume = "to_backup";
+                  };
+                };
+              };
+            };
+          };
+        };
+      };
+
+      main = { ... }: {
+        security.sudo.enable = false;
+        security.doas.enable = true;
+        environment.systemPackages = with pkgs; [ btrfs-progs ];
+        services.openssh = {
+          enable = true;
+          passwordAuthentication = false;
+          kbdInteractiveAuthentication = false;
+        };
+        services.btrbk = {
+          extraPackages = [ pkgs.lz4 ];
+          sshAccess = [
+            {
+              key = publicKey;
+              roles = [ "source" "send" "info" "delete" ];
+            }
+          ];
+          instances = {
+            local = {
+              onCalendar = "minutely";
+              settings = {
+                volume = {
+                  "/mnt" = {
+                    snapshot_dir = "btrbk/local";
+                    subvolume = "to_backup";
+                  };
+                };
+              };
+            };
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      # create btrfs partition at /mnt
+      for machine in (archive, main):
+        machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
+        machine.succeed("mkfs.btrfs /data_fs")
+        machine.succeed("mkdir /mnt")
+        machine.succeed("mount /data_fs /mnt")
+
+      # what to backup and where
+      main.succeed("btrfs subvolume create /mnt/to_backup")
+      main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
+
+      # check that local snapshots work
+      with subtest("local"):
+          main.succeed("echo foo > /mnt/to_backup/bar")
+          main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
+          main.succeed("echo bar > /mnt/to_backup/bar")
+          main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
+
+      # check that btrfs send/receive works and ssh access works
+      with subtest("remote"):
+          archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
+          main.succeed("echo baz > /mnt/to_backup/bar")
+          archive.succeed("cat /mnt/*/bar | grep bar")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/btrbk-no-timer.nix b/nixpkgs/nixos/tests/btrbk-no-timer.nix
new file mode 100644
index 000000000000..4fcab8839c89
--- /dev/null
+++ b/nixpkgs/nixos/tests/btrbk-no-timer.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+  {
+    name = "btrbk-no-timer";
+    meta.maintainers = with lib.maintainers; [ oxalica ];
+
+    nodes.machine = { ... }: {
+      environment.systemPackages = with pkgs; [ btrfs-progs ];
+      services.btrbk.instances.local = {
+        onCalendar = null;
+        settings.volume."/mnt" = {
+          snapshot_dir = "btrbk/local";
+          subvolume = "to_backup";
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      # Create btrfs partition at /mnt
+      machine.succeed("truncate --size=128M /data_fs")
+      machine.succeed("mkfs.btrfs /data_fs")
+      machine.succeed("mkdir /mnt")
+      machine.succeed("mount /data_fs /mnt")
+      machine.succeed("btrfs subvolume create /mnt/to_backup")
+      machine.succeed("mkdir -p /mnt/btrbk/local")
+
+      # The service should not have any triggering timer.
+      unit = machine.get_unit_info('btrbk-local.service')
+      assert "TriggeredBy" not in unit
+
+      # Manually starting the service should still work.
+      machine.succeed("echo foo > /mnt/to_backup/bar")
+      machine.start_job("btrbk-local.service")
+      machine.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/btrbk-section-order.nix b/nixpkgs/nixos/tests/btrbk-section-order.nix
new file mode 100644
index 000000000000..20f1afcf80ec
--- /dev/null
+++ b/nixpkgs/nixos/tests/btrbk-section-order.nix
@@ -0,0 +1,51 @@
+# This tests validates the order of generated sections that may contain
+# other sections.
+# When a `volume` section has both `subvolume` and `target` children,
+# `target` must go before `subvolume`. Otherwise, `target` will become
+# a child of the last `subvolume` instead of `volume`, due to the
+# order-sensitive config format.
+#
+# Issue: https://github.com/NixOS/nixpkgs/issues/195660
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "btrbk-section-order";
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine = { ... }: {
+    services.btrbk.instances.local = {
+      onCalendar = null;
+      settings = {
+        timestamp_format = "long";
+        target."ssh://global-target/".ssh_user = "root";
+        volume."/btrfs" = {
+          snapshot_dir = "/volume-snapshots";
+          target."ssh://volume-target/".ssh_user = "root";
+          subvolume."@subvolume" = {
+            snapshot_dir = "/subvolume-snapshots";
+            target."ssh://subvolume-target/".ssh_user = "root";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("basic.target")
+    got = machine.succeed("cat /etc/btrbk/local.conf")
+    expect = """
+    backend btrfs-progs-sudo
+    timestamp_format long
+    target ssh://global-target/
+     ssh_user root
+    volume /btrfs
+     snapshot_dir /volume-snapshots
+     target ssh://volume-target/
+      ssh_user root
+     subvolume @subvolume
+      snapshot_dir /subvolume-snapshots
+      target ssh://subvolume-target/
+       ssh_user root
+    """.strip()
+    print(got)
+    assert got == expect
+  '';
+})
diff --git a/nixpkgs/nixos/tests/btrbk.nix b/nixpkgs/nixos/tests/btrbk.nix
new file mode 100644
index 000000000000..5261321dfa2c
--- /dev/null
+++ b/nixpkgs/nixos/tests/btrbk.nix
@@ -0,0 +1,112 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+  let
+    privateKey = ''
+      -----BEGIN OPENSSH PRIVATE KEY-----
+      b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+      QyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrwAAAJB+cF5HfnBe
+      RwAAAAtzc2gtZWQyNTUxOQAAACBx8UB04Q6Q/fwDFjakHq904PYFzG9pU2TJ9KXpaPMcrw
+      AAAEBN75NsJZSpt63faCuaD75Unko0JjlSDxMhYHAPJk2/xXHxQHThDpD9/AMWNqQer3Tg
+      9gXMb2lTZMn0pelo8xyvAAAADXJzY2h1ZXR6QGt1cnQ=
+      -----END OPENSSH PRIVATE KEY-----
+    '';
+    publicKey = ''
+      ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIHHxQHThDpD9/AMWNqQer3Tg9gXMb2lTZMn0pelo8xyv
+    '';
+  in
+  {
+    name = "btrbk";
+    meta = with pkgs.lib; {
+      maintainers = with maintainers; [ symphorien ];
+    };
+
+    nodes = {
+      archive = { ... }: {
+        environment.systemPackages = with pkgs; [ btrfs-progs ];
+        # note: this makes the privateKey world readable.
+        # don't do it with real ssh keys.
+        environment.etc."btrbk_key".text = privateKey;
+        services.btrbk = {
+          extraPackages = [ pkgs.lz4 ];
+          instances = {
+            remote = {
+              onCalendar = "minutely";
+              settings = {
+                ssh_identity = "/etc/btrbk_key";
+                ssh_user = "btrbk";
+                stream_compress = "lz4";
+                volume = {
+                  "ssh://main/mnt" = {
+                    target = "/mnt";
+                    snapshot_dir = "btrbk/remote";
+                    subvolume = "to_backup";
+                  };
+                };
+              };
+            };
+          };
+        };
+      };
+
+      main = { ... }: {
+        environment.systemPackages = with pkgs; [ btrfs-progs ];
+        services.openssh = {
+          enable = true;
+          settings = {
+            KbdInteractiveAuthentication = false;
+            PasswordAuthentication = false;
+          };
+        };
+        services.btrbk = {
+          extraPackages = [ pkgs.lz4 ];
+          sshAccess = [
+            {
+              key = publicKey;
+              roles = [ "source" "send" "info" "delete" ];
+            }
+          ];
+          instances = {
+            local = {
+              onCalendar = "minutely";
+              settings = {
+                volume = {
+                  "/mnt" = {
+                    snapshot_dir = "btrbk/local";
+                    subvolume = "to_backup";
+                  };
+                };
+              };
+            };
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      # create btrfs partition at /mnt
+      for machine in (archive, main):
+        machine.succeed("dd if=/dev/zero of=/data_fs bs=120M count=1")
+        machine.succeed("mkfs.btrfs /data_fs")
+        machine.succeed("mkdir /mnt")
+        machine.succeed("mount /data_fs /mnt")
+
+      # what to backup and where
+      main.succeed("btrfs subvolume create /mnt/to_backup")
+      main.succeed("mkdir -p /mnt/btrbk/{local,remote}")
+
+      # check that local snapshots work
+      with subtest("local"):
+          main.succeed("echo foo > /mnt/to_backup/bar")
+          main.wait_until_succeeds("cat /mnt/btrbk/local/*/bar | grep foo")
+          main.succeed("echo bar > /mnt/to_backup/bar")
+          main.succeed("cat /mnt/btrbk/local/*/bar | grep foo")
+
+      # check that btrfs send/receive works and ssh access works
+      with subtest("remote"):
+          archive.wait_until_succeeds("cat /mnt/*/bar | grep bar")
+          main.succeed("echo baz > /mnt/to_backup/bar")
+          archive.succeed("cat /mnt/*/bar | grep bar")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/budgie.nix b/nixpkgs/nixos/tests/budgie.nix
new file mode 100644
index 000000000000..19d9b2bd0bed
--- /dev/null
+++ b/nixpkgs/nixos/tests/budgie.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "budgie";
+
+  meta.maintainers = [ lib.maintainers.federicoschonborn ];
+
+  nodes.machine = { ... }: {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    services.xserver.enable = true;
+
+    services.xserver.displayManager = {
+      lightdm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+
+    services.xserver.desktopManager.budgie = {
+      enable = true;
+      extraPlugins = [
+        pkgs.budgiePlugins.budgie-analogue-clock-applet
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+    in
+    ''
+      with subtest("Wait for login"):
+          # wait_for_x() checks graphical-session.target, which is expected to be
+          # inactive on Budgie before #228946 (i.e. systemd managed gnome-session) is
+          # done on upstream.
+          # https://github.com/BuddiesOfBudgie/budgie-desktop/blob/v10.7.2/src/session/budgie-desktop.in#L16
+          #
+          # Previously this was unconditionally touched by xsessionWrapper but was
+          # changed in #233981 (we have Budgie:GNOME in XDG_CURRENT_DESKTOP).
+          # machine.wait_for_x()
+          machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
+          machine.wait_for_file("${user.home}/.Xauthority")
+          machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Check if Budgie session components actually start"):
+          machine.wait_until_succeeds("pgrep budgie-daemon")
+          machine.wait_for_window("budgie-daemon")
+          machine.wait_until_succeeds("pgrep budgie-panel")
+          machine.wait_for_window("budgie-panel")
+          # We don't check xwininfo for this one.
+          # See https://github.com/NixOS/nixpkgs/pull/216737#discussion_r1155312754
+          machine.wait_until_succeeds("pgrep budgie-wm")
+
+      with subtest("Open MATE terminal"):
+          machine.succeed("su - ${user.name} -c 'DISPLAY=:0 mate-terminal >&2 &'")
+          machine.wait_for_window("Terminal")
+
+      with subtest("Check if budgie-wm has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep budgie-wm")
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/buildbot.nix b/nixpkgs/nixos/tests/buildbot.nix
new file mode 100644
index 000000000000..dbf68aba9467
--- /dev/null
+++ b/nixpkgs/nixos/tests/buildbot.nix
@@ -0,0 +1,108 @@
+# Test ensures buildbot master comes up correctly and workers can connect
+
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "buildbot";
+
+  nodes = {
+    bbmaster = { pkgs, ... }: {
+      services.buildbot-master = {
+        enable = true;
+
+        # NOTE: use fake repo due to no internet in hydra ci
+        factorySteps = [
+          "steps.Git(repourl='git://gitrepo/fakerepo.git', mode='incremental')"
+          "steps.ShellCommand(command=['bash', 'fakerepo.sh'])"
+        ];
+        changeSource = [
+          "changes.GitPoller('git://gitrepo/fakerepo.git', workdir='gitpoller-workdir', branch='master', pollinterval=300)"
+        ];
+      };
+      networking.firewall.allowedTCPPorts = [ 8010 8011 9989 ];
+      environment.systemPackages = with pkgs; [ git buildbot-full ];
+    };
+
+    bbworker = { pkgs, ... }: {
+      services.buildbot-worker = {
+        enable = true;
+        masterUrl = "bbmaster:9989";
+      };
+      environment.systemPackages = with pkgs; [ git buildbot-worker ];
+    };
+
+    gitrepo = { pkgs, ... }: {
+      services.openssh.enable = true;
+      networking.firewall.allowedTCPPorts = [ 22 9418 ];
+      environment.systemPackages = with pkgs; [ git ];
+      systemd.services.git-daemon = {
+        description   = "Git daemon for the test";
+        wantedBy      = [ "multi-user.target" ];
+        after         = [ "network.target" "sshd.service" ];
+
+        serviceConfig.Restart = "always";
+        path = with pkgs; [ coreutils git openssh ];
+        environment = { HOME = "/root"; };
+        preStart = ''
+          git config --global user.name 'Nobody Fakeuser'
+          git config --global user.email 'nobody\@fakerepo.com'
+          rm -rvf /srv/repos/fakerepo.git /tmp/fakerepo
+          mkdir -pv /srv/repos/fakerepo ~/.ssh
+          ssh-keyscan -H gitrepo > ~/.ssh/known_hosts
+          cat ~/.ssh/known_hosts
+
+          mkdir -p /src/repos/fakerepo
+          cd /srv/repos/fakerepo
+          rm -rf *
+          git init
+          echo -e '#!/bin/sh\necho fakerepo' > fakerepo.sh
+          cat fakerepo.sh
+          touch .git/git-daemon-export-ok
+          git add fakerepo.sh .git/git-daemon-export-ok
+          git commit -m fakerepo
+        '';
+        script = ''
+          git daemon --verbose --export-all --base-path=/srv/repos --reuseaddr
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    gitrepo.wait_for_unit("git-daemon.service")
+    gitrepo.wait_for_unit("multi-user.target")
+
+    with subtest("Repo is accessible via git daemon"):
+        bbmaster.wait_for_unit("network-online.target")
+        bbmaster.succeed("rm -rfv /tmp/fakerepo")
+        bbmaster.succeed("git clone git://gitrepo/fakerepo /tmp/fakerepo")
+
+    with subtest("Master service and worker successfully connect"):
+        bbmaster.wait_for_unit("buildbot-master.service")
+        bbmaster.wait_until_succeeds("curl --fail -s --head http://bbmaster:8010")
+        bbworker.wait_for_unit("network-online.target")
+        bbworker.succeed("nc -z bbmaster 8010")
+        bbworker.succeed("nc -z bbmaster 9989")
+        bbworker.wait_for_unit("buildbot-worker.service")
+
+    with subtest("Stop buildbot worker"):
+        bbmaster.succeed("systemctl -l --no-pager status buildbot-master")
+        bbmaster.succeed("systemctl stop buildbot-master")
+        bbworker.fail("nc -z bbmaster 8010")
+        bbworker.fail("nc -z bbmaster 9989")
+        bbworker.succeed("systemctl -l --no-pager status buildbot-worker")
+        bbworker.succeed("systemctl stop buildbot-worker")
+
+    with subtest("Buildbot daemon mode works"):
+        bbmaster.succeed(
+            "buildbot create-master /tmp",
+            "mv -fv /tmp/master.cfg.sample /tmp/master.cfg",
+            "sed -i 's/8010/8011/' /tmp/master.cfg",
+            "buildbot start /tmp",
+            "nc -z bbmaster 8011",
+        )
+        bbworker.wait_until_succeeds("curl --fail -s --head http://bbmaster:8011")
+        bbmaster.wait_until_succeeds("buildbot stop /tmp")
+        bbworker.fail("nc -z bbmaster 8011")
+  '';
+
+  meta.maintainers = with pkgs.lib.maintainers; [ ];
+})
diff --git a/nixpkgs/nixos/tests/buildkite-agents.nix b/nixpkgs/nixos/tests/buildkite-agents.nix
new file mode 100644
index 000000000000..a5abfdb5e2e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/buildkite-agents.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "buildkite-agent";
+  meta.maintainers = with lib.maintainers; [ flokli ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.buildkite-agents = {
+      one = {
+        privateSshKeyPath = (import ./ssh-keys.nix pkgs).snakeOilPrivateKey;
+        tokenPath = (pkgs.writeText "my-token" "5678");
+      };
+      two = {
+        tokenPath = (pkgs.writeText "my-token" "1234");
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    # we can't wait on the unit to start up, as we obviously can't connect to buildkite,
+    # but we can look whether files are set up correctly
+
+    machine.wait_for_file("/var/lib/buildkite-agent-one/buildkite-agent.cfg")
+    machine.wait_for_file("/var/lib/buildkite-agent-one/.ssh/id_rsa")
+
+    machine.wait_for_file("/var/lib/buildkite-agent-two/buildkite-agent.cfg")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/c2fmzq.nix b/nixpkgs/nixos/tests/c2fmzq.nix
new file mode 100644
index 000000000000..d8ec816c7d29
--- /dev/null
+++ b/nixpkgs/nixos/tests/c2fmzq.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "c2FmZQ";
+  meta.maintainers = with lib.maintainers; [ hmenke ];
+
+  nodes.machine = {
+    services.c2fmzq-server = {
+      enable = true;
+      port = 8080;
+      passphraseFile = builtins.toFile "pwfile" "hunter2"; # don't do this on real deployments
+      settings = {
+        verbose = 3; # debug
+      };
+    };
+    environment = {
+      sessionVariables = {
+        C2FMZQ_PASSPHRASE = "lol";
+        C2FMZQ_API_SERVER = "http://localhost:8080";
+      };
+      systemPackages = [
+        pkgs.c2fmzq
+        (pkgs.writeScriptBin "c2FmZQ-client-wrapper" ''
+          #!${pkgs.expect}/bin/expect -f
+          spawn c2FmZQ-client {*}$argv
+          expect {
+            "Enter password:" { send "$env(PASSWORD)\r" }
+            "Type YES to confirm:" { send "YES\r" }
+            timeout { exit 1 }
+            eof { exit 0 }
+          }
+          interact
+        '')
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    machine.start()
+    machine.wait_for_unit("c2fmzq-server.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("Create accounts for alice and bob"):
+        machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 create-account alice@example.com")
+        machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 create-account bob@example.com")
+
+    with subtest("Log in as alice"):
+        machine.succeed("PASSWORD=foobar c2FmZQ-client-wrapper -- -v 3 login alice@example.com")
+        msg = machine.succeed("c2FmZQ-client -v 3 status")
+        assert "Logged in as alice@example.com" in msg, f"ERROR: Not logged in as alice:\n{msg}"
+
+    with subtest("Create a new album, upload a file, and delete the uploaded file"):
+        machine.succeed("c2FmZQ-client -v 3 create-album 'Rarest Memes'")
+        machine.succeed("echo 'pls do not steal' > meme.txt")
+        machine.succeed("c2FmZQ-client -v 3 import meme.txt 'Rarest Memes'")
+        machine.succeed("c2FmZQ-client -v 3 sync")
+        machine.succeed("rm meme.txt")
+
+    with subtest("Share the album with bob"):
+        machine.succeed("c2FmZQ-client-wrapper -- -v 3 share 'Rarest Memes' bob@example.com")
+
+    with subtest("Log in as bob"):
+        machine.succeed("PASSWORD=fizzbuzz c2FmZQ-client-wrapper -- -v 3 login bob@example.com")
+        msg = machine.succeed("c2FmZQ-client -v 3 status")
+        assert "Logged in as bob@example.com" in msg, f"ERROR: Not logged in as bob:\n{msg}"
+
+    with subtest("Download the shared file"):
+        machine.succeed("c2FmZQ-client -v 3 download 'shared/Rarest Memes/meme.txt'")
+        machine.succeed("c2FmZQ-client -v 3 export 'shared/Rarest Memes/meme.txt' .")
+        msg = machine.succeed("cat meme.txt")
+        assert "pls do not steal\n" == msg, f"File content is not the same:\n{msg}"
+
+    with subtest("Test that PWA is served"):
+        msg = machine.succeed("curl -sSfL http://localhost:8080")
+        assert "c2FmZQ" in msg, f"Could not find 'c2FmZQ' in the output:\n{msg}"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/caddy.nix b/nixpkgs/nixos/tests/caddy.nix
new file mode 100644
index 000000000000..5a0d3539394b
--- /dev/null
+++ b/nixpkgs/nixos/tests/caddy.nix
@@ -0,0 +1,88 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "caddy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ xfix Br1ght0ne ];
+  };
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.caddy.enable = true;
+      services.caddy.extraConfig = ''
+        http://localhost {
+          encode gzip
+
+          file_server
+          root * ${
+            pkgs.runCommand "testdir" {} ''
+              mkdir "$out"
+              echo hello world > "$out/example.html"
+            ''
+          }
+        }
+      '';
+      services.caddy.enableReload = true;
+
+      specialisation.config-reload.configuration = {
+        services.caddy.extraConfig = ''
+          http://localhost:8080 {
+          }
+        '';
+      };
+      specialisation.multiple-configs.configuration = {
+        services.caddy.virtualHosts = {
+          "http://localhost:8080" = { };
+          "http://localhost:8081" = { };
+        };
+      };
+      specialisation.rfc42.configuration = {
+        services.caddy.settings = {
+          apps.http.servers.default = {
+            listen = [ ":80" ];
+            routes = [{
+              handle = [{
+                body = "hello world";
+                handler = "static_response";
+                status_code = 200;
+              }];
+            }];
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/config-reload";
+      multipleConfigs = "${nodes.webserver.system.build.toplevel}/specialisation/multiple-configs";
+      rfc42Config = "${nodes.webserver.system.build.toplevel}/specialisation/rfc42";
+    in
+    ''
+      url = "http://localhost/example.html"
+      webserver.wait_for_unit("caddy")
+      webserver.wait_for_open_port(80)
+
+
+      with subtest("config is reloaded on nixos-rebuild switch"):
+          webserver.succeed(
+              "${justReloadSystem}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(8080)
+          webserver.fail("journalctl -u caddy | grep -q -i stopped")
+          webserver.succeed("journalctl -u caddy | grep -q -i reloaded")
+
+      with subtest("multiple configs are correctly merged"):
+          webserver.succeed(
+              "${multipleConfigs}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(8080)
+          webserver.wait_for_open_port(8081)
+
+      with subtest("rfc42 settings config"):
+          webserver.succeed(
+              "${rfc42Config}/bin/switch-to-configuration test >&2"
+          )
+          webserver.wait_for_open_port(80)
+          webserver.succeed("curl http://localhost | grep hello")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/cadvisor.nix b/nixpkgs/nixos/tests/cadvisor.nix
new file mode 100644
index 000000000000..70e068fcf21c
--- /dev/null
+++ b/nixpkgs/nixos/tests/cadvisor.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, pkgs, ... } : {
+  name = "cadvisor";
+  meta.maintainers = with lib.maintainers; [ offline ];
+
+  nodes = {
+    machine = { ... }: {
+      services.cadvisor.enable = true;
+    };
+
+    influxdb = { lib, ... }: {
+      services.cadvisor.enable = true;
+      services.cadvisor.storageDriver = "influxdb";
+      services.influxdb.enable = true;
+    };
+  };
+
+  testScript =  ''
+      start_all()
+      machine.wait_for_unit("cadvisor.service")
+      machine.succeed("curl -f http://localhost:8080/containers/")
+
+      influxdb.wait_for_unit("influxdb.service")
+
+      # create influxdb database
+      influxdb.succeed(
+          'curl -f -XPOST http://localhost:8086/query --data-urlencode "q=CREATE DATABASE root"'
+      )
+
+      influxdb.wait_for_unit("cadvisor.service")
+      influxdb.succeed("curl -f http://localhost:8080/containers/")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/cage.nix b/nixpkgs/nixos/tests/cage.nix
new file mode 100644
index 000000000000..3b49185124f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/cage.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "cage";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ matthewbauer ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+
+    fonts.packages = with pkgs; [ dejavu_fonts ];
+
+    services.cage = {
+      enable = true;
+      user = "alice";
+      program = "${pkgs.xterm}/bin/xterm";
+    };
+
+    # Need to switch to a different GPU driver than the default one (-vga std) so that Cage can launch:
+    virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    with subtest("Wait for cage to boot up"):
+        start_all()
+        machine.wait_for_file("/run/user/${toString user.uid}/wayland-0.lock")
+        machine.wait_until_succeeds("pgrep xterm")
+        machine.wait_for_text("alice@machine")
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cagebreak.nix b/nixpkgs/nixos/tests/cagebreak.nix
new file mode 100644
index 000000000000..1fef7cb57cfc
--- /dev/null
+++ b/nixpkgs/nixos/tests/cagebreak.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+let
+  cagebreakConfigfile = pkgs.writeText "config" ''
+    workspaces 1
+    escape C-t
+    bind t exec env DISPLAY=:0 ${pkgs.xterm}/bin/xterm -cm -pc
+  '';
+in
+{
+  name = "cagebreak";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ berbiche ];
+  };
+
+  nodes.machine = { config, ... }:
+  let
+    alice = config.users.users.alice;
+  in {
+    # Automatically login on tty1 as a normal user:
+    imports = [ ./common/user-account.nix ];
+    services.getty.autologinUser = "alice";
+    programs.bash.loginShellInit = ''
+      if [ "$(tty)" = "/dev/tty1" ]; then
+        set -e
+
+        mkdir -p ~/.config/cagebreak
+        cp -f ${cagebreakConfigfile} ~/.config/cagebreak/config
+
+        cagebreak
+      fi
+    '';
+
+    hardware.opengl.enable = true;
+    programs.xwayland.enable = true;
+    security.polkit.enable = true;
+    environment.systemPackages = [ pkgs.cagebreak pkgs.wayland-utils ];
+
+    # Need to switch to a different GPU driver than the default one (-vga std) so that Cagebreak can launch:
+    virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    XDG_RUNTIME_DIR = "/run/user/${toString user.uid}";
+  in ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_file("${XDG_RUNTIME_DIR}/wayland-0")
+
+    with subtest("ensure wayland works with wayinfo from wallutils"):
+        print(machine.succeed("env XDG_RUNTIME_DIR=${XDG_RUNTIME_DIR} wayland-info"))
+
+    # TODO: Fix the XWayland test (log the cagebreak output to debug):
+    # with subtest("ensure xwayland works with xterm"):
+    #     machine.send_key("ctrl-t")
+    #     machine.send_key("t")
+    #     machine.wait_until_succeeds("pgrep xterm")
+    #     machine.wait_for_text("${user.name}@machine")
+    #     machine.screenshot("screen")
+    #     machine.send_key("ctrl-d")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/calibre-server.nix b/nixpkgs/nixos/tests/calibre-server.nix
new file mode 100644
index 000000000000..4b1753aaa704
--- /dev/null
+++ b/nixpkgs/nixos/tests/calibre-server.nix
@@ -0,0 +1,104 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) concatStringsSep maintainers mapAttrs mkMerge
+    removeSuffix splitString;
+
+  tests = {
+    default = {
+      calibreConfig = {};
+      calibreScript = ''
+        wait_for_unit("calibre-server.service")
+      '';
+    };
+    customLibrary = {
+      calibreConfig = {
+        libraries = [ "/var/lib/calibre-data" ];
+      };
+      calibreScript = ''
+        succeed("ls -la /var/lib/calibre-data")
+        wait_for_unit("calibre-server.service")
+      '';
+    };
+    multipleLibraries = {
+      calibreConfig = {
+        libraries = [ "/var/lib/calibre-data" "/var/lib/calibre-server" ];
+      };
+      calibreScript = ''
+        succeed("ls -la /var/lib/calibre-data")
+        succeed("ls -la /var/lib/calibre-server")
+        wait_for_unit("calibre-server.service")
+      '';
+    };
+    hostAndPort = {
+      calibreConfig = {
+        host = "127.0.0.1";
+        port = 8888;
+      };
+      calibreScript = ''
+        wait_for_unit("calibre-server.service")
+        wait_for_open_port(8888)
+        succeed("curl --fail http://127.0.0.1:8888")
+      '';
+    };
+    basicAuth = {
+      calibreConfig = {
+        host = "127.0.0.1";
+        port = 8888;
+        auth = {
+          enable = true;
+          mode = "basic";
+        };
+      };
+      calibreScript = ''
+        wait_for_unit("calibre-server.service")
+        wait_for_open_port(8888)
+        fail("curl --fail http://127.0.0.1:8888")
+      '';
+    };
+  };
+in
+mapAttrs
+  (test: testConfig: (makeTest (
+    let
+      nodeName = testConfig.nodeName or test;
+      calibreConfig = {
+        enable = true;
+        libraries = [ "/var/lib/calibre-server" ];
+      } // testConfig.calibreConfig or {};
+      librariesInitScript = path: ''
+        ${nodeName}.execute("touch /tmp/test.epub")
+        ${nodeName}.execute("zip -r /tmp/test.zip /tmp/test.epub")
+        ${nodeName}.execute("mkdir -p ${path}")
+        ${nodeName}.execute("calibredb add -d --with-library ${path} /tmp/test.zip")
+      '';
+    in
+    {
+      name = "calibre-server-${test}";
+
+      nodes.${nodeName} = mkMerge [{
+        environment.systemPackages = [ pkgs.zip ];
+        services.calibre-server = calibreConfig;
+      } testConfig.calibreProvider or { }];
+
+      testScript = ''
+        ${nodeName}.start()
+        ${concatStringsSep "\n" (map librariesInitScript calibreConfig.libraries)}
+        ${concatStringsSep "\n" (map (line:
+          if (builtins.substring 0 1 line == " " || builtins.substring 0 1 line == ")")
+          then line
+          else "${nodeName}.${line}"
+        ) (splitString "\n" (removeSuffix "\n" testConfig.calibreScript)))}
+        ${nodeName}.shutdown()
+      '';
+
+      meta = with maintainers; {
+        maintainers = [ gaelreyrol ];
+      };
+    }
+  )))
+  tests
diff --git a/nixpkgs/nixos/tests/calibre-web.nix b/nixpkgs/nixos/tests/calibre-web.nix
new file mode 100644
index 000000000000..aea9bca3ebe9
--- /dev/null
+++ b/nixpkgs/nixos/tests/calibre-web.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+    let
+      port = 3142;
+      defaultPort = 8083;
+    in
+      {
+        name = "calibre-web";
+        meta.maintainers = with lib.maintainers; [ pborzenkov ];
+
+        nodes = {
+          customized = { pkgs, ... }: {
+            services.calibre-web = {
+              enable = true;
+              listen.port = port;
+              options = {
+                calibreLibrary = "/tmp/books";
+                reverseProxyAuth = {
+                  enable = true;
+                  header = "X-User";
+                };
+              };
+            };
+            environment.systemPackages = [ pkgs.calibre ];
+          };
+        };
+        testScript = ''
+          start_all()
+
+          customized.succeed(
+              "mkdir /tmp/books && calibredb --library-path /tmp/books add -e --title test-book"
+          )
+          customized.succeed("systemctl restart calibre-web")
+          customized.wait_for_unit("calibre-web.service")
+          customized.wait_for_open_port(${toString port})
+          customized.succeed(
+              "curl --fail -H X-User:admin 'http://localhost:${toString port}' | grep test-book"
+          )
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/cassandra.nix b/nixpkgs/nixos/tests/cassandra.nix
new file mode 100644
index 000000000000..a19d525c3431
--- /dev/null
+++ b/nixpkgs/nixos/tests/cassandra.nix
@@ -0,0 +1,132 @@
+import ./make-test-python.nix ({ pkgs, lib, testPackage ? pkgs.cassandra, ... }:
+let
+  clusterName = "NixOS Automated-Test Cluster";
+
+  testRemoteAuth = lib.versionAtLeast testPackage.version "3.11";
+  jmxRoles = [{ username = "me"; password = "password"; }];
+  jmxRolesFile = ./cassandra-jmx-roles;
+  jmxAuthArgs = "-u ${(builtins.elemAt jmxRoles 0).username} -pw ${(builtins.elemAt jmxRoles 0).password}";
+  jmxPort = 7200;  # Non-standard port so it doesn't accidentally work
+  jmxPortStr = toString jmxPort;
+
+  # Would usually be assigned to 512M.
+  # Set it to a different value, so that we can check whether our config
+  # actually changes it.
+  numMaxHeapSize = "400";
+  getHeapLimitCommand = ''
+    nodetool info -p ${jmxPortStr} | grep "^Heap Memory" | awk '{print $NF}'
+  '';
+  checkHeapLimitCommand = pkgs.writeShellScript "check-heap-limit.sh" ''
+    [ 1 -eq "$(echo "$(${getHeapLimitCommand}) < ${numMaxHeapSize}" | ${pkgs.bc}/bin/bc)" ]
+  '';
+
+  cassandraCfg = ipAddress:
+    { enable = true;
+      inherit clusterName;
+      listenAddress = ipAddress;
+      rpcAddress = ipAddress;
+      seedAddresses = [ "192.168.1.1" ];
+      package = testPackage;
+      maxHeapSize = "${numMaxHeapSize}M";
+      heapNewSize = "100M";
+      inherit jmxPort;
+    };
+  nodeCfg = ipAddress: extra: {pkgs, config, ...}: rec {
+    environment.systemPackages = [ testPackage ];
+    networking = {
+      firewall.allowedTCPPorts = [ 7000 9042 services.cassandra.jmxPort ];
+      useDHCP = false;
+      interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+        { address = ipAddress; prefixLength = 24; }
+      ];
+    };
+    services.cassandra = cassandraCfg ipAddress // extra;
+  };
+in
+{
+  name = "cassandra-${testPackage.version}";
+  meta = {
+    maintainers = with lib.maintainers; [ johnazoidberg ];
+  };
+
+  nodes = {
+    cass0 = nodeCfg "192.168.1.1" {};
+    cass1 = nodeCfg "192.168.1.2" (lib.optionalAttrs testRemoteAuth { inherit jmxRoles; remoteJmx = true; });
+    cass2 = nodeCfg "192.168.1.3" { jvmOpts = [ "-Dcassandra.replace_address=cass1" ]; };
+  };
+
+  testScript = ''
+    # Check configuration
+    with subtest("Timers exist"):
+        cass0.succeed("systemctl list-timers | grep cassandra-full-repair.timer")
+        cass0.succeed("systemctl list-timers | grep cassandra-incremental-repair.timer")
+
+    with subtest("Can connect via cqlsh"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z cass0 9042")
+        cass0.succeed("echo 'show version;' | cqlsh cass0")
+
+    with subtest("Nodetool is operational"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass0'")
+
+    with subtest("Cluster name was set"):
+        cass0.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.wait_until_succeeds(
+            "nodetool describecluster -p ${jmxPortStr} | grep 'Name: ${clusterName}'"
+        )
+
+    with subtest("Heap limit set correctly"):
+        # Nodetool takes a while until it can display info
+        cass0.wait_until_succeeds("nodetool info -p ${jmxPortStr}")
+        cass0.succeed("${checkHeapLimitCommand}")
+
+    # Check cluster interaction
+    with subtest("Bring up cluster"):
+        cass1.wait_for_unit("cassandra.service")
+        cass1.wait_until_succeeds(
+            "nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN' | grep 2"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'")
+  '' + lib.optionalString testRemoteAuth ''
+    with subtest("Remote authenticated jmx"):
+        # Doesn't work if not enabled
+        cass0.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass1.fail("nc -z 192.168.1.1 ${jmxPortStr}")
+        cass1.fail("nodetool -p ${jmxPortStr} -h 192.168.1.1 status")
+
+        # Works if enabled
+        cass1.wait_until_succeeds("nc -z localhost ${jmxPortStr}")
+        cass0.succeed("nodetool -p ${jmxPortStr} -h 192.168.1.2 ${jmxAuthArgs} status")
+  '' + ''
+    with subtest("Break and fix node"):
+        cass1.block()
+        cass0.wait_until_succeeds(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep -c '^DN[[:space:]]+cass1'"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN'  | grep 1")
+        cass1.unblock()
+        cass1.wait_until_succeeds(
+            "nodetool -p ${jmxPortStr} ${jmxAuthArgs} status | egrep -c '^UN'  | grep 2"
+        )
+        cass0.succeed("nodetool status -p ${jmxPortStr} | egrep -c '^UN'  | grep 2")
+
+    with subtest("Replace crashed node"):
+        cass1.block()  # .crash() waits until it's fully shutdown
+        cass2.start()
+        cass0.wait_until_fails(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass1'"
+        )
+
+        cass2.wait_for_unit("cassandra.service")
+        cass0.wait_until_succeeds(
+            "nodetool status -p ${jmxPortStr} --resolve-ip | egrep '^UN[[:space:]]+cass2'"
+        )
+  '';
+
+  passthru = {
+    inherit testPackage;
+  };
+})
diff --git a/nixpkgs/nixos/tests/castopod.nix b/nixpkgs/nixos/tests/castopod.nix
new file mode 100644
index 000000000000..4435ec617d4e
--- /dev/null
+++ b/nixpkgs/nixos/tests/castopod.nix
@@ -0,0 +1,87 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "castopod";
+  meta = with lib.maintainers; {
+    maintainers = [ alexoundos misuzu ];
+  };
+  nodes.castopod = { nodes, ... }: {
+    networking.firewall.allowedTCPPorts = [ 80 ];
+    networking.extraHosts = ''
+      127.0.0.1 castopod.example.com
+    '';
+    services.castopod = {
+      enable = true;
+      database.createLocally = true;
+      localDomain = "castopod.example.com";
+    };
+    environment.systemPackages =
+      let
+        username = "admin";
+        email = "admin@castood.example.com";
+        password = "v82HmEp5";
+        testRunner = pkgs.writers.writePython3Bin "test-runner"
+          {
+            libraries = [ pkgs.python3Packages.selenium ];
+            flakeIgnore = [
+              "E501"
+            ];
+          } ''
+          from selenium.webdriver.common.by import By
+          from selenium.webdriver import Firefox
+          from selenium.webdriver.firefox.options import Options
+          from selenium.webdriver.support.ui import WebDriverWait
+          from selenium.webdriver.support import expected_conditions as EC
+
+          options = Options()
+          options.add_argument('--headless')
+          driver = Firefox(options=options)
+          try:
+              driver.implicitly_wait(20)
+              driver.get('http://castopod.example.com/cp-install')
+
+              wait = WebDriverWait(driver, 10)
+
+              wait.until(EC.title_contains("installer"))
+
+              driver.find_element(By.CSS_SELECTOR, '#username').send_keys(
+                  '${username}'
+              )
+              driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
+                  '${email}'
+              )
+              driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
+                  '${password}'
+              )
+              driver.find_element(By.XPATH, "//button[contains(., 'Finish install')]").click()
+
+              wait.until(EC.title_contains("Auth"))
+
+              driver.find_element(By.CSS_SELECTOR, '#email').send_keys(
+                  '${email}'
+              )
+              driver.find_element(By.CSS_SELECTOR, '#password').send_keys(
+                  '${password}'
+              )
+              driver.find_element(By.XPATH, "//button[contains(., 'Login')]").click()
+
+              wait.until(EC.title_contains("Admin dashboard"))
+          finally:
+              driver.close()
+              driver.quit()
+        '';
+      in
+      [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+  };
+  testScript = ''
+    start_all()
+    castopod.wait_for_unit("castopod-setup.service")
+    castopod.wait_for_file("/run/phpfpm/castopod.sock")
+    castopod.wait_for_unit("nginx.service")
+    castopod.wait_for_open_port(80)
+    castopod.wait_until_succeeds("curl -sS -f http://castopod.example.com")
+    castopod.succeed("curl -s http://localhost/cp-install | grep 'Create your Super Admin account' > /dev/null")
+
+    with subtest("Create superadmin and log in"):
+        castopod.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ceph-multi-node.nix b/nixpkgs/nixos/tests/ceph-multi-node.nix
new file mode 100644
index 000000000000..556546beee76
--- /dev/null
+++ b/nixpkgs/nixos/tests/ceph-multi-node.nix
@@ -0,0 +1,233 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      ip = "192.168.1.2";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      ip = "192.168.1.3";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+    osd2 = {
+      name = "2";
+      ip = "192.168.1.4";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      emptyDiskImages = [ 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+      libressl.nc
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPorts = [ 6789 3300 ];
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+  }; };
+
+  networkOsd = osd: {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = osd.ip; prefixLength = 24; }
+    ];
+    firewall = {
+      allowedTCPPortRanges = [ { from = 6800; to = 7300; } ];
+    };
+  };
+
+  cephConfigOsd = osd: generateCephConfig { daemonConfig = {
+    osd = {
+      enable = true;
+      daemons = [ osd.name ];
+    };
+  }; };
+
+  # Following deployment is based on the manual deployment described here:
+  # https://docs.ceph.com/docs/master/install/manual-deployment/
+  # For other ways to deploy a ceph cluster, look at the documentation at
+  # https://docs.ceph.com/docs/master/
+  testscript = { ... }: ''
+    start_all()
+
+    monA.wait_for_unit("network.target")
+    osd0.wait_for_unit("network.target")
+    osd1.wait_for_unit("network.target")
+    osd2.wait_for_unit("network.target")
+
+    # Bootstrap ceph-mon daemon
+    monA.succeed(
+        "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+        "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+        "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+        "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+        "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+        "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+        "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+        "systemctl start ceph-mon-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.succeed("ceph mon enable-msgr2")
+    monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
+    # Can't check ceph status until a mon is up
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+    # Start the ceph-mgr daemon, it has no deps and hardly any setup
+    monA.succeed(
+        "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+        "systemctl start ceph-mgr-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mgr-a")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
+    # Send the admin keyring to the OSD machines
+    monA.succeed("cp /etc/ceph/ceph.client.admin.keyring /tmp/shared")
+    osd0.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+    osd1.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+    osd2.succeed("cp /tmp/shared/ceph.client.admin.keyring /etc/ceph")
+
+    # Bootstrap OSDs
+    osd0.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+        'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+    )
+    osd1.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+    )
+    osd2.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
+
+    # Initialize the OSDs with regular filestore
+    osd0.succeed(
+        "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd0.name}",
+    )
+    osd1.succeed(
+        "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd1.name}",
+    )
+    osd2.succeed(
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    monA.succeed(
+        "ceph osd pool create multi-node-test 32 32",
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool rename multi-node-test multi-node-other-test",
+        "ceph osd pool ls | grep 'multi-node-other-test'",
+    )
+    monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+    monA.succeed("ceph osd pool set multi-node-other-test size 2")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+    monA.fail(
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool delete multi-node-other-test multi-node-other-test --yes-i-really-really-mean-it",
+    )
+
+    # Shut down ceph on all machines in a very unpolite way
+    monA.crash()
+    osd0.crash()
+    osd1.crash()
+    osd2.crash()
+
+    # Start it up
+    osd0.start()
+    osd1.start()
+    osd2.start()
+    monA.start()
+
+    # Ensure the cluster comes back up again
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+  '';
+in {
+  name = "basic-multi-node-ceph-cluster";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lejonet ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+    osd0 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd0; networkConfig = networkOsd cfg.osd0; };
+    osd1 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd1; networkConfig = networkOsd cfg.osd1; };
+    osd2 = generateHost { pkgs = pkgs; cephConfig = cephConfigOsd cfg.osd2; networkConfig = networkOsd cfg.osd2; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixpkgs/nixos/tests/ceph-single-node-bluestore.nix b/nixpkgs/nixos/tests/ceph-single-node-bluestore.nix
new file mode 100644
index 000000000000..acaae4cf300e
--- /dev/null
+++ b/nixpkgs/nixos/tests/ceph-single-node-bluestore.nix
@@ -0,0 +1,196 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+    osd2 = {
+      name = "2";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      emptyDiskImages = [ 20480 20480 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
+    };
+  }; };
+
+  # Following deployment is based on the manual deployment described here:
+  # https://docs.ceph.com/docs/master/install/manual-deployment/
+  # For other ways to deploy a ceph cluster, look at the documentation at
+  # https://docs.ceph.com/docs/master/
+  testscript = { ... }: ''
+    start_all()
+
+    monA.wait_for_unit("network.target")
+
+    # Bootstrap ceph-mon daemon
+    monA.succeed(
+        "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+        "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+        "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+        "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+        "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+        "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+        "systemctl start ceph-mon-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.succeed("ceph mon enable-msgr2")
+    monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
+    # Can't check ceph status until a mon is up
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+    # Start the ceph-mgr daemon, after copying in the keyring
+    monA.succeed(
+        "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+        "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+        "systemctl start ceph-mgr-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mgr-a")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
+    # Bootstrap OSDs
+    monA.succeed(
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd0.name}/type",
+        "ln -sf /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}/block",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd1.name}/type",
+        "ln -sf /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}/block",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "echo bluestore > /var/lib/ceph/osd/ceph-${cfg.osd2.name}/type",
+        "ln -sf /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}/block",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
+
+    # Initialize the OSDs with regular filestore
+    monA.succeed(
+        "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+        "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd0.name}",
+        "systemctl start ceph-osd-${cfg.osd1.name}",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    monA.succeed(
+        "ceph osd pool create single-node-test 32 32",
+        "ceph osd pool ls | grep 'single-node-test'",
+        "ceph osd pool rename single-node-test single-node-other-test",
+        "ceph osd pool ls | grep 'single-node-other-test'",
+    )
+    monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+    monA.succeed(
+        "ceph osd getcrushmap -o crush",
+        "crushtool -d crush -o decrushed",
+        "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+        "crushtool -c modcrush -o recrushed",
+        "ceph osd setcrushmap -i recrushed",
+        "ceph osd pool set single-node-other-test size 2",
+    )
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+    monA.fail(
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+    )
+
+    # Shut down ceph by stopping ceph.target.
+    monA.succeed("systemctl stop ceph.target")
+
+    # Start it up
+    monA.succeed("systemctl start ceph.target")
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
+
+    # Ensure the cluster comes back up again
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+  '';
+in {
+  name = "basic-single-node-ceph-cluster-bluestore";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lukegb ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixpkgs/nixos/tests/ceph-single-node.nix b/nixpkgs/nixos/tests/ceph-single-node.nix
new file mode 100644
index 000000000000..4a5636fac156
--- /dev/null
+++ b/nixpkgs/nixos/tests/ceph-single-node.nix
@@ -0,0 +1,207 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  cfg = {
+    clusterId = "066ae264-2a5d-4729-8001-6ad265f50b03";
+    monA = {
+      name = "a";
+      ip = "192.168.1.1";
+    };
+    osd0 = {
+      name = "0";
+      key = "AQBCEJNa3s8nHRAANvdsr93KqzBznuIWm2gOGg==";
+      uuid = "55ba2294-3e24-478f-bee0-9dca4c231dd9";
+    };
+    osd1 = {
+      name = "1";
+      key = "AQBEEJNac00kExAAXEgy943BGyOpVH1LLlHafQ==";
+      uuid = "5e97a838-85b6-43b0-8950-cb56d554d1e5";
+    };
+    osd2 = {
+      name = "2";
+      key = "AQAdyhZeIaUlARAAGRoidDAmS6Vkp546UFEf5w==";
+      uuid = "ea999274-13d0-4dd5-9af9-ad25a324f72f";
+    };
+  };
+  generateCephConfig = { daemonConfig }: {
+    enable = true;
+    global = {
+      fsid = cfg.clusterId;
+      monHost = cfg.monA.ip;
+      monInitialMembers = cfg.monA.name;
+    };
+  } // daemonConfig;
+
+  generateHost = { pkgs, cephConfig, networkConfig, ... }: {
+    virtualisation = {
+      emptyDiskImages = [ 20480 20480 20480 ];
+      vlans = [ 1 ];
+    };
+
+    networking = networkConfig;
+
+    environment.systemPackages = with pkgs; [
+      bash
+      sudo
+      ceph
+      xfsprogs
+    ];
+
+    boot.kernelModules = [ "xfs" ];
+
+    services.ceph = cephConfig;
+  };
+
+  networkMonA = {
+    dhcpcd.enable = false;
+    interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+      { address = cfg.monA.ip; prefixLength = 24; }
+    ];
+  };
+  cephConfigMonA = generateCephConfig { daemonConfig = {
+    mon = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    mgr = {
+      enable = true;
+      daemons = [ cfg.monA.name ];
+    };
+    osd = {
+      enable = true;
+      daemons = [ cfg.osd0.name cfg.osd1.name cfg.osd2.name ];
+    };
+  }; };
+
+  # Following deployment is based on the manual deployment described here:
+  # https://docs.ceph.com/docs/master/install/manual-deployment/
+  # For other ways to deploy a ceph cluster, look at the documentation at
+  # https://docs.ceph.com/docs/master/
+  testscript = { ... }: ''
+    start_all()
+
+    monA.wait_for_unit("network.target")
+
+    # Bootstrap ceph-mon daemon
+    monA.succeed(
+        "sudo -u ceph ceph-authtool --create-keyring /tmp/ceph.mon.keyring --gen-key -n mon. --cap mon 'allow *'",
+        "sudo -u ceph ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'",
+        "sudo -u ceph ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/ceph.client.admin.keyring",
+        "monmaptool --create --add ${cfg.monA.name} ${cfg.monA.ip} --fsid ${cfg.clusterId} /tmp/monmap",
+        "sudo -u ceph ceph-mon --mkfs -i ${cfg.monA.name} --monmap /tmp/monmap --keyring /tmp/ceph.mon.keyring",
+        "sudo -u ceph touch /var/lib/ceph/mon/ceph-${cfg.monA.name}/done",
+        "systemctl start ceph-mon-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.succeed("ceph mon enable-msgr2")
+    monA.succeed("ceph config set mon auth_allow_insecure_global_id_reclaim false")
+
+    # Can't check ceph status until a mon is up
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+
+    # Start the ceph-mgr daemon, after copying in the keyring
+    monA.succeed(
+        "sudo -u ceph mkdir -p /var/lib/ceph/mgr/ceph-${cfg.monA.name}/",
+        "ceph auth get-or-create mgr.${cfg.monA.name} mon 'allow profile mgr' osd 'allow *' mds 'allow *' > /var/lib/ceph/mgr/ceph-${cfg.monA.name}/keyring",
+        "systemctl start ceph-mgr-${cfg.monA.name}",
+    )
+    monA.wait_for_unit("ceph-mgr-a")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+
+    # Bootstrap OSDs
+    monA.succeed(
+        "mkfs.xfs /dev/vdb",
+        "mkfs.xfs /dev/vdc",
+        "mkfs.xfs /dev/vdd",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mount /dev/vdb /var/lib/ceph/osd/ceph-${cfg.osd0.name}",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mount /dev/vdc /var/lib/ceph/osd/ceph-${cfg.osd1.name}",
+        "mkdir -p /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "mount /dev/vdd /var/lib/ceph/osd/ceph-${cfg.osd2.name}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd0.name}/keyring --name osd.${cfg.osd0.name} --add-key ${cfg.osd0.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd1.name}/keyring --name osd.${cfg.osd1.name} --add-key ${cfg.osd1.key}",
+        "ceph-authtool --create-keyring /var/lib/ceph/osd/ceph-${cfg.osd2.name}/keyring --name osd.${cfg.osd2.name} --add-key ${cfg.osd2.key}",
+        'echo \'{"cephx_secret": "${cfg.osd0.key}"}\' | ceph osd new ${cfg.osd0.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd1.key}"}\' | ceph osd new ${cfg.osd1.uuid} -i -',
+        'echo \'{"cephx_secret": "${cfg.osd2.key}"}\' | ceph osd new ${cfg.osd2.uuid} -i -',
+    )
+
+    # Initialize the OSDs with regular filestore
+    monA.succeed(
+        "ceph-osd -i ${cfg.osd0.name} --mkfs --osd-uuid ${cfg.osd0.uuid}",
+        "ceph-osd -i ${cfg.osd1.name} --mkfs --osd-uuid ${cfg.osd1.uuid}",
+        "ceph-osd -i ${cfg.osd2.name} --mkfs --osd-uuid ${cfg.osd2.uuid}",
+        "chown -R ceph:ceph /var/lib/ceph/osd",
+        "systemctl start ceph-osd-${cfg.osd0.name}",
+        "systemctl start ceph-osd-${cfg.osd1.name}",
+        "systemctl start ceph-osd-${cfg.osd2.name}",
+    )
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    monA.succeed(
+        "ceph osd pool create single-node-test 32 32",
+        "ceph osd pool ls | grep 'single-node-test'",
+        "ceph osd pool rename single-node-test single-node-other-test",
+        "ceph osd pool ls | grep 'single-node-other-test'",
+    )
+    monA.wait_until_succeeds("ceph -s | grep '2 pools, 33 pgs'")
+    monA.succeed(
+        "ceph osd getcrushmap -o crush",
+        "crushtool -d crush -o decrushed",
+        "sed 's/step chooseleaf firstn 0 type host/step chooseleaf firstn 0 type osd/' decrushed > modcrush",
+        "crushtool -c modcrush -o recrushed",
+        "ceph osd setcrushmap -i recrushed",
+        "ceph osd pool set single-node-other-test size 2",
+    )
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+    monA.wait_until_succeeds("ceph -s | grep '33 active+clean'")
+    monA.fail(
+        "ceph osd pool ls | grep 'multi-node-test'",
+        "ceph osd pool delete single-node-other-test single-node-other-test --yes-i-really-really-mean-it",
+    )
+
+    # Shut down ceph by stopping ceph.target.
+    monA.succeed("systemctl stop ceph.target")
+
+    # Start it up
+    monA.succeed("systemctl start ceph.target")
+    monA.wait_for_unit("ceph-mon-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-mgr-${cfg.monA.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd0.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd1.name}")
+    monA.wait_for_unit("ceph-osd-${cfg.osd2.name}")
+
+    # Ensure the cluster comes back up again
+    monA.succeed("ceph -s | grep 'mon: 1 daemons'")
+    monA.wait_until_succeeds("ceph -s | grep 'quorum ${cfg.monA.name}'")
+    monA.wait_until_succeeds("ceph osd stat | grep -e '3 osds: 3 up[^,]*, 3 in'")
+    monA.wait_until_succeeds("ceph -s | grep 'mgr: ${cfg.monA.name}(active,'")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+
+    # Enable the dashboard and recheck health
+    monA.succeed(
+        "ceph mgr module enable dashboard",
+        "ceph config set mgr mgr/dashboard/ssl false",
+        # default is 8080 but it's better to be explicit
+        "ceph config set mgr mgr/dashboard/server_port 8080",
+    )
+    monA.wait_for_open_port(8080)
+    monA.wait_until_succeeds("curl -q --fail http://localhost:8080")
+    monA.wait_until_succeeds("ceph -s | grep 'HEALTH_OK'")
+  '';
+in {
+  name = "basic-single-node-ceph-cluster";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lejonet johanot ];
+  };
+
+  nodes = {
+    monA = generateHost { pkgs = pkgs; cephConfig = cephConfigMonA; networkConfig = networkMonA; };
+  };
+
+  testScript = testscript;
+})
diff --git a/nixpkgs/nixos/tests/certmgr.nix b/nixpkgs/nixos/tests/certmgr.nix
new file mode 100644
index 000000000000..8f5b89487793
--- /dev/null
+++ b/nixpkgs/nixos/tests/certmgr.nix
@@ -0,0 +1,155 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+let
+  mkSpec = { host, service ? null, action }: {
+    inherit action;
+    authority = {
+      file = {
+        group = "nginx";
+        owner = "nginx";
+        path = "/var/ssl/${host}-ca.pem";
+      };
+      label = "www_ca";
+      profile = "three-month";
+      remote = "localhost:8888";
+    };
+    certificate = {
+      group = "nginx";
+      owner = "nginx";
+      path = "/var/ssl/${host}-cert.pem";
+    };
+    private_key = {
+      group = "nginx";
+      mode = "0600";
+      owner = "nginx";
+      path = "/var/ssl/${host}-key.pem";
+    };
+    request = {
+      CN = host;
+      hosts = [ host "www.${host}" ];
+      key = {
+        algo = "rsa";
+        size = 2048;
+      };
+      names = [
+        {
+          C = "US";
+          L = "San Francisco";
+          O = "Example, LLC";
+          ST = "CA";
+        }
+      ];
+    };
+    inherit service;
+  };
+
+  mkCertmgrTest = { svcManager, specs, testScript }: makeTest {
+    name = "certmgr-" + svcManager;
+    nodes = {
+      machine = { config, lib, pkgs, ... }: {
+        networking.firewall.allowedTCPPorts = with config.services; [ cfssl.port certmgr.metricsPort ];
+        networking.extraHosts = "127.0.0.1 imp.example.org decl.example.org";
+
+        services.cfssl.enable = true;
+        systemd.services.cfssl.after = [ "cfssl-init.service" "networking.target" ];
+
+        systemd.tmpfiles.rules = [ "d /var/ssl 777 root root" ];
+
+        systemd.services.cfssl-init = {
+          description = "Initialize the cfssl CA";
+          wantedBy    = [ "multi-user.target" ];
+          serviceConfig = {
+            User             = "cfssl";
+            Type             = "oneshot";
+            WorkingDirectory = config.services.cfssl.dataDir;
+          };
+          script = ''
+            ${pkgs.cfssl}/bin/cfssl genkey -initca ${pkgs.writeText "ca.json" (builtins.toJSON {
+              hosts = [ "ca.example.com" ];
+              key = {
+                algo = "rsa"; size = 4096; };
+                names = [
+                  {
+                    C = "US";
+                    L = "San Francisco";
+                    O = "Internet Widgets, LLC";
+                    OU = "Certificate Authority";
+                    ST = "California";
+                  }
+                ];
+            })} | ${pkgs.cfssl}/bin/cfssljson -bare ca
+          '';
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts = lib.mkMerge (map (host: {
+            ${host} = {
+              sslCertificate = "/var/ssl/${host}-cert.pem";
+              sslCertificateKey = "/var/ssl/${host}-key.pem";
+              extraConfig = ''
+                ssl_protocols TLSv1 TLSv1.1 TLSv1.2;
+              '';
+              onlySSL = true;
+              serverName = host;
+              root = pkgs.writeTextDir "index.html" "It works!";
+            };
+          }) [ "imp.example.org" "decl.example.org" ]);
+        };
+
+        systemd.services.nginx.wantedBy = lib.mkForce [];
+
+        systemd.services.certmgr.after = [ "cfssl.service" ];
+        services.certmgr = {
+          enable = true;
+          inherit svcManager;
+          inherit specs;
+        };
+
+      };
+    };
+    inherit testScript;
+  };
+in
+{
+  systemd = mkCertmgrTest {
+    svcManager = "systemd";
+    specs = {
+      decl = mkSpec { host = "decl.example.org"; service = "nginx"; action ="restart"; };
+      imp = toString (pkgs.writeText "test.json" (builtins.toJSON (
+        mkSpec { host = "imp.example.org"; service = "nginx"; action = "restart"; }
+      )));
+    };
+    testScript = ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("ls /var/ssl/decl.example.org-ca.pem")
+      machine.wait_until_succeeds("ls /var/ssl/decl.example.org-key.pem")
+      machine.wait_until_succeeds("ls /var/ssl/decl.example.org-cert.pem")
+      machine.wait_until_succeeds("ls /var/ssl/imp.example.org-ca.pem")
+      machine.wait_until_succeeds("ls /var/ssl/imp.example.org-key.pem")
+      machine.wait_until_succeeds("ls /var/ssl/imp.example.org-cert.pem")
+      machine.wait_for_unit("nginx.service")
+      assert 1 < int(machine.succeed('journalctl -u nginx | grep "Starting Nginx" | wc -l'))
+      machine.succeed("curl --cacert /var/ssl/imp.example.org-ca.pem https://imp.example.org")
+      machine.succeed(
+          "curl --cacert /var/ssl/decl.example.org-ca.pem https://decl.example.org"
+      )
+    '';
+  };
+
+  command = mkCertmgrTest {
+    svcManager = "command";
+    specs = {
+      test = mkSpec { host = "command.example.org"; action = "touch /tmp/command.executed"; };
+    };
+    testScript = ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("stat /tmp/command.executed")
+    '';
+  };
+
+}
diff --git a/nixpkgs/nixos/tests/cfssl.nix b/nixpkgs/nixos/tests/cfssl.nix
new file mode 100644
index 000000000000..e673df3131f8
--- /dev/null
+++ b/nixpkgs/nixos/tests/cfssl.nix
@@ -0,0 +1,67 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cfssl";
+
+  nodes.machine = { config, lib, pkgs, ... }:
+  {
+    networking.firewall.allowedTCPPorts = [ config.services.cfssl.port ];
+
+    services.cfssl.enable = true;
+    systemd.services.cfssl.after = [ "cfssl-init.service" ];
+
+    systemd.services.cfssl-init = {
+      description = "Initialize the cfssl CA";
+      wantedBy    = [ "multi-user.target" ];
+      serviceConfig = {
+        User             = "cfssl";
+        Type             = "oneshot";
+        WorkingDirectory = config.services.cfssl.dataDir;
+      };
+      script = with pkgs; ''
+        ${cfssl}/bin/cfssl genkey -initca ${pkgs.writeText "ca.json" (builtins.toJSON {
+          hosts = [ "ca.example.com" ];
+          key = {
+            algo = "rsa"; size = 4096; };
+            names = [
+              {
+                C = "US";
+                L = "San Francisco";
+                O = "Internet Widgets, LLC";
+                OU = "Certificate Authority";
+                ST = "California";
+              }
+            ];
+        })} | ${cfssl}/bin/cfssljson -bare ca
+      '';
+    };
+  };
+
+  testScript =
+  let
+    cfsslrequest = with pkgs; writeScript "cfsslrequest" ''
+      curl -f -X POST -H "Content-Type: application/json" -d @${csr} \
+        http://localhost:8888/api/v1/cfssl/newkey | ${cfssl}/bin/cfssljson /tmp/certificate
+    '';
+    csr = pkgs.writeText "csr.json" (builtins.toJSON {
+      CN = "www.example.com";
+      hosts = [ "example.com" "www.example.com" ];
+      key = {
+        algo = "rsa";
+        size = 2048;
+      };
+      names = [
+        {
+          C = "US";
+          L = "San Francisco";
+          O = "Example Company, LLC";
+          OU = "Operations";
+          ST = "California";
+        }
+      ];
+    });
+  in
+    ''
+      machine.wait_for_unit("cfssl.service")
+      machine.wait_until_succeeds("${cfsslrequest}")
+      machine.succeed("ls /tmp/certificate-key.pem")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/cgit.nix b/nixpkgs/nixos/tests/cgit.nix
new file mode 100644
index 000000000000..6aed06adefdf
--- /dev/null
+++ b/nixpkgs/nixos/tests/cgit.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  robotsTxt = pkgs.writeText "cgit-robots.txt" ''
+    User-agent: *
+    Disallow: /
+  '';
+in {
+  name = "cgit";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ schnusch ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      services.cgit."localhost" = {
+        enable = true;
+        package = pkgs.cgit.overrideAttrs ({ postInstall, ... }: {
+          postInstall = ''
+            ${postInstall}
+            cp ${robotsTxt} "$out/cgit/robots.txt"
+          '';
+        });
+        nginx.location = "/(c)git/";
+        repos = {
+          some-repo = {
+            path = "/srv/git/some-repo";
+            desc = "some-repo description";
+          };
+        };
+      };
+
+      environment.systemPackages = [ pkgs.git ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    server.wait_for_unit("network.target")
+    server.wait_for_open_port(80)
+
+    server.succeed("curl -fsS http://localhost/%28c%29git/cgit.css")
+
+    server.succeed("curl -fsS http://localhost/%28c%29git/robots.txt | diff -u - ${robotsTxt}")
+
+    server.succeed(
+        "curl -fsS http://localhost/%28c%29git/ | grep -F 'some-repo description'"
+    )
+
+    server.fail("curl -fsS http://localhost/robots.txt")
+
+    server.succeed("${pkgs.writeShellScript "setup-cgit-test-repo" ''
+      set -e
+      git init --bare -b master /srv/git/some-repo
+      git init -b master reference
+      cd reference
+      git remote add origin /srv/git/some-repo
+      date > date.txt
+      git add date.txt
+      git -c user.name=test -c user.email=test@localhost commit -m 'add date'
+      git push -u origin master
+    ''}")
+
+    server.succeed(
+        "curl -fsS 'http://localhost/%28c%29git/some-repo/plain/date.txt?id=master' | diff -u reference/date.txt -"
+    )
+
+    server.succeed(
+       "git clone http://localhost/%28c%29git/some-repo && diff -u reference/date.txt some-repo/date.txt"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/charliecloud.nix b/nixpkgs/nixos/tests/charliecloud.nix
new file mode 100644
index 000000000000..28c3e2f2dbf7
--- /dev/null
+++ b/nixpkgs/nixos/tests/charliecloud.nix
@@ -0,0 +1,43 @@
+# This test checks charliecloud image construction and run
+
+import ./make-test-python.nix ({ pkgs, ...} : let
+
+  dockerfile = pkgs.writeText "Dockerfile" ''
+    FROM nix
+    RUN mkdir /home /tmp
+    RUN touch /etc/passwd /etc/group
+    CMD ["true"]
+  '';
+
+in {
+  name = "charliecloud";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bzizou ];
+  };
+
+  nodes = {
+    host = { ... }: {
+      environment.systemPackages = [ pkgs.charliecloud ];
+      virtualisation.docker.enable = true;
+      users.users.alice = {
+        isNormalUser = true;
+        extraGroups = [ "docker" ];
+      };
+    };
+  };
+
+  testScript = ''
+    host.start()
+    host.wait_for_unit("docker.service")
+    host.succeed(
+        'su - alice -c "docker load --input=${pkgs.dockerTools.examples.nix}"'
+    )
+    host.succeed(
+        "cp ${dockerfile} /home/alice/Dockerfile"
+    )
+    host.succeed('su - alice -c "ch-build -t hello ."')
+    host.succeed('su - alice -c "ch-builder2tar hello /var/tmp"')
+    host.succeed('su - alice -c "ch-tar2dir /var/tmp/hello.tar.gz /var/tmp"')
+    host.succeed('su - alice -c "ch-run /var/tmp/hello -- echo Running_From_Container_OK"')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/chromium.nix b/nixpkgs/nixos/tests/chromium.nix
new file mode 100644
index 000000000000..cdfdcc9bcdd2
--- /dev/null
+++ b/nixpkgs/nixos/tests/chromium.nix
@@ -0,0 +1,269 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, channelMap ? { # Maps "channels" to packages
+    stable        = pkgs.chromium;
+    beta          = pkgs.chromiumBeta;
+    dev           = pkgs.chromiumDev;
+    ungoogled     = pkgs.ungoogled-chromium;
+    chrome-stable = pkgs.google-chrome;
+    chrome-beta   = pkgs.google-chrome-beta;
+    chrome-dev    = pkgs.google-chrome-dev;
+  }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  user = "alice";
+
+  startupHTML = pkgs.writeText "chromium-startup.html" ''
+    <!DOCTYPE html>
+    <html>
+    <head>
+    <meta charset="UTF-8">
+    <title>Chromium startup notifier</title>
+    </head>
+    <body onload="javascript:document.title='startup done'">
+      <img src="file://${pkgs.fetchurl {
+        url = "https://nixos.org/logo/nixos-hex.svg";
+        sha256 = "07ymq6nw8kc22m7kzxjxldhiq8gzmc7f45kq2bvhbdm0w5s112s4";
+      }}" />
+    </body>
+    </html>
+  '';
+in
+
+mapAttrs (channel: chromiumPkg: makeTest {
+  name = "chromium-${channel}";
+  meta = {
+    maintainers = with maintainers; [ aszlig primeos ];
+  } // optionalAttrs (chromiumPkg.meta ? timeout) {
+    # https://github.com/NixOS/hydra/issues/591#issuecomment-435125621
+    # Note: optionalAttrs is used since meta.timeout is not set for Google Chrome
+    inherit (chromiumPkg.meta) timeout;
+  };
+
+  enableOCR = true;
+
+  nodes.machine = { ... }: {
+    imports = [ ./common/user-account.nix ./common/x11.nix ];
+    virtualisation.memorySize = 2047;
+    test-support.displayManager.auto.user = user;
+    environment = {
+      systemPackages = [ chromiumPkg ];
+      variables."XAUTHORITY" = "/home/alice/.Xauthority";
+    };
+  };
+
+  testScript = let
+    xdo = name: text: let
+      xdoScript = pkgs.writeText "${name}.xdo" text;
+    in "${pkgs.xdotool}/bin/xdotool ${xdoScript}";
+  in ''
+    import shlex
+    import re
+    from contextlib import contextmanager
+
+
+    major_version = "${versions.major (getVersion chromiumPkg.name)}"
+
+
+    # Run as user alice
+    def ru(cmd):
+        return "su - ${user} -c " + shlex.quote(cmd)
+
+
+    def launch_browser():
+        """Launches the web browser with the correct options."""
+        # Determine the name of the binary:
+        pname = "${getName chromiumPkg.name}"
+        if pname.find("chromium") != -1:
+            binary = "chromium"  # Same name for all channels and ungoogled-chromium
+        elif pname == "google-chrome":
+            binary = "google-chrome-stable"
+        elif pname == "google-chrome-dev":
+            binary = "google-chrome-unstable"
+        else:  # For google-chrome-beta and as fallback:
+            binary = pname
+        # Add optional CLI options:
+        options = []
+        if major_version > "95" and not pname.startswith("google-chrome"):
+            # Workaround to avoid a GPU crash:
+            options.append("--use-gl=swiftshader")
+        # Launch the process:
+        options.append("file://${startupHTML}")
+        machine.succeed(ru(f'ulimit -c unlimited; {binary} {shlex.join(options)} >&2 & disown'))
+        if binary.startswith("google-chrome"):
+            # Need to click away the first window:
+            machine.wait_for_text("Make Google Chrome the default browser")
+            machine.screenshot("google_chrome_default_browser_prompt")
+            machine.send_key("ret")
+
+
+    def create_new_win():
+        """Creates a new Chromium window."""
+        with machine.nested("Creating a new Chromium window"):
+            machine.wait_until_succeeds(
+                ru(
+                    "${xdo "create_new_win-select_main_window" ''
+                      search --onlyvisible --name "startup done"
+                      windowfocus --sync
+                      windowactivate --sync
+                    ''}"
+                )
+            )
+            machine.send_key("ctrl-n")
+            # Wait until the new window appears:
+            machine.wait_until_succeeds(
+                ru(
+                    "${xdo "create_new_win-wait_for_window" ''
+                      search --onlyvisible --name "New Tab"
+                      windowfocus --sync
+                      windowactivate --sync
+                    ''}"
+                )
+            )
+
+
+    def close_new_tab_win():
+        """Closes the Chromium window with the title "New Tab"."""
+        machine.wait_until_succeeds(
+            ru(
+                "${xdo "close_new_tab_win-select_main_window" ''
+                  search --onlyvisible --name "New Tab"
+                  windowfocus --sync
+                  windowactivate --sync
+                ''}"
+            )
+        )
+        machine.send_key("ctrl-w")
+        # Wait until the closed window disappears:
+        machine.wait_until_fails(
+            ru(
+                "${xdo "close_new_tab_win-wait_for_close" ''
+                  search --onlyvisible --name "New Tab"
+                ''}"
+            )
+        )
+
+
+    @contextmanager
+    def test_new_win(description, url, window_name):
+        create_new_win()
+        machine.wait_for_window("New Tab")
+        machine.send_chars(f"{url}\n")
+        machine.wait_for_window(window_name)
+        machine.screenshot(description)
+        machine.succeed(
+            ru(
+                "${xdo "copy-all" ''
+                  key --delay 1000 Ctrl+a Ctrl+c
+                ''}"
+            )
+        )
+        clipboard = machine.succeed(
+            ru("${pkgs.xclip}/bin/xclip -o")
+        )
+        if url == "chrome://gpu":
+            clipboard = ""  # TODO: We cannot copy the text via Ctrl+a
+        print(f"{description} window content:\n{clipboard}")
+        with machine.nested(description):
+            yield clipboard
+        # Close the newly created window:
+        machine.send_key("ctrl-w")
+
+
+    machine.wait_for_x()
+
+    launch_browser()
+
+    machine.wait_for_text("startup done")
+    machine.wait_until_succeeds(
+        ru(
+            "${xdo "check-startup" ''
+              search --sync --onlyvisible --name "startup done"
+              # close first start help popup
+              key -delay 1000 Escape
+              windowfocus --sync
+              windowactivate --sync
+            ''}"
+        )
+    )
+
+    create_new_win()
+    # Optional: Wait for the new tab page to fully load before taking the screenshot:
+    machine.wait_for_text("Web Store")
+    machine.screenshot("empty_windows")
+    close_new_tab_win()
+
+    machine.screenshot("startup_done")
+
+    with test_new_win("sandbox_info", "chrome://sandbox", "Sandbox Status") as clipboard:
+        filters = [
+            "layer 1 sandbox.*namespace",
+            "pid namespaces.*yes",
+            "network namespaces.*yes",
+            "seccomp.*sandbox.*yes",
+            "you are adequately sandboxed",
+        ]
+        if not all(
+            re.search(filter, clipboard, flags=re.DOTALL | re.IGNORECASE)
+            for filter in filters
+        ):
+            assert False, f"sandbox not working properly: {clipboard}"
+
+        machine.sleep(1)
+        machine.succeed(
+            ru(
+                "${xdo "find-window-after-copy" ''
+                  search --onlyvisible --name "Sandbox Status"
+                ''}"
+            )
+        )
+
+        clipboard = machine.succeed(
+            ru(
+                "echo void | ${pkgs.xclip}/bin/xclip -i >&2"
+            )
+        )
+        machine.succeed(
+            ru(
+                "${xdo "copy-sandbox-info" ''
+                  key --delay 1000 Ctrl+a Ctrl+c
+                ''}"
+            )
+        )
+
+        clipboard = machine.succeed(
+            ru("${pkgs.xclip}/bin/xclip -o")
+        )
+        if not all(
+            re.search(filter, clipboard, flags=re.DOTALL | re.IGNORECASE)
+            for filter in filters
+        ):
+            assert False, f"copying twice in a row does not work properly: {clipboard}"
+
+        machine.screenshot("after_copy_from_chromium")
+
+
+    with test_new_win("gpu_info", "chrome://gpu", "GPU Internals"):
+        # To check the text rendering (catches regressions like #131074):
+        machine.wait_for_text("Graphics Feature Status")
+        # TODO: Fix copying all of the text to the clipboard
+
+
+    with test_new_win("version_info", "chrome://version", "About Version") as clipboard:
+        filters = [
+            r"${chromiumPkg.version} \(Official Build",
+        ]
+        if not all(
+            re.search(filter, clipboard) for filter in filters
+        ):
+            assert False, "Version info not correct."
+
+
+    machine.shutdown()
+  '';
+}) channelMap
diff --git a/nixpkgs/nixos/tests/chrony-ptp.nix b/nixpkgs/nixos/tests/chrony-ptp.nix
new file mode 100644
index 000000000000..b2634a8cfc5c
--- /dev/null
+++ b/nixpkgs/nixos/tests/chrony-ptp.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "chrony-ptp";
+
+  meta = {
+    maintainers = with lib.maintainers; [ gkleen ];
+  };
+
+  nodes = {
+    qemuGuest = { lib, ... }: {
+      boot.kernelModules = [ "ptp_kvm" ];
+
+      services.chrony = {
+        enable = true;
+        extraConfig = ''
+          refclock PHC /dev/ptp_kvm poll 2 dpoll -2 offset 0 stratum 3
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    qemuGuest.wait_for_unit('multi-user.target')
+    qemuGuest.succeed('systemctl is-active chronyd.service')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/chrony.nix b/nixpkgs/nixos/tests/chrony.nix
new file mode 100644
index 000000000000..578b1e32d50c
--- /dev/null
+++ b/nixpkgs/nixos/tests/chrony.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "chrony";
+
+  meta = {
+    maintainers = with lib.maintainers; [ fpletz ];
+  };
+
+  nodes = {
+    default = {
+      services.chrony.enable = true;
+    };
+    graphene-hardened = {
+      services.chrony.enable = true;
+      services.chrony.enableMemoryLocking = true;
+      environment.memoryAllocator.provider = "graphene-hardened";
+      # dhcpcd privsep is incompatible with graphene-hardened
+      networking.useNetworkd = true;
+    };
+  };
+
+  testScript = {nodes, ...} : let
+    graphene-hardened = nodes.graphene-hardened.system.build.toplevel;
+  in ''
+    default.start()
+    default.wait_for_unit('multi-user.target')
+    default.succeed('systemctl is-active chronyd.service')
+    default.succeed('${graphene-hardened}/bin/switch-to-configuration test')
+    default.succeed('systemctl is-active chronyd.service')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cinnamon.nix b/nixpkgs/nixos/tests/cinnamon.nix
new file mode 100644
index 000000000000..7637b55a2b12
--- /dev/null
+++ b/nixpkgs/nixos/tests/cinnamon.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "cinnamon";
+
+  meta.maintainers = lib.teams.cinnamon.members;
+
+  nodes.machine = { ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.cinnamon.enable = true;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+      env = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus DISPLAY=:0";
+      su = command: "su - ${user.name} -c '${env} ${command}'";
+
+      # Call javascript in cinnamon (the shell), returns a tuple (success, output),
+      # where `success` is true if the dbus call was successful and `output` is what
+      # the javascript evaluates to.
+      eval = name: su "gdbus call --session -d org.Cinnamon -o /org/Cinnamon -m org.Cinnamon.Eval ${name}";
+    in
+    ''
+      machine.wait_for_unit("display-manager.service")
+
+      with subtest("Test if we can see username in slick-greeter"):
+          machine.wait_for_text("${user.description}")
+          machine.screenshot("slick_greeter_lightdm")
+
+      with subtest("Login with slick-greeter"):
+          machine.send_chars("${user.password}\n")
+          machine.wait_for_x()
+          machine.wait_for_file("${user.home}/.Xauthority")
+          machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Wait for the Cinnamon shell"):
+          # Correct output should be (true, '2')
+          # https://github.com/linuxmint/cinnamon/blob/5.4.0/js/ui/main.js#L183-L187
+          machine.wait_until_succeeds("${eval "Main.runState"} | grep -q 'true,..2'")
+
+      with subtest("Check if Cinnamon components actually start"):
+          for i in ["csd-media-keys", "cinnamon-killer-daemon", "xapp-sn-watcher", "nemo-desktop"]:
+            machine.wait_until_succeeds(f"pgrep -f {i}")
+          machine.wait_until_succeeds("journalctl -b --grep 'Loaded applet menu@cinnamon.org'")
+          machine.wait_until_succeeds("journalctl -b --grep 'calendar@cinnamon.org: Calendar events supported'")
+
+      with subtest("Open Cinnamon Settings"):
+          machine.succeed("${su "cinnamon-settings themes >&2 &"}")
+          machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'cinnamon-settings'")
+          machine.wait_for_text('(Style|Appearance|Color)')
+          machine.sleep(2)
+          machine.screenshot("cinnamon_settings")
+
+      with subtest("Lock the screen"):
+          machine.succeed("${su "cinnamon-screensaver-command -l >&2 &"}")
+          machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is active'")
+          machine.sleep(2)
+          machine.screenshot("cinnamon_screensaver")
+          machine.send_chars("${user.password}\n", delay=0.2)
+          machine.wait_until_succeeds("${su "cinnamon-screensaver-command -q"} | grep 'The screensaver is inactive'")
+          machine.sleep(2)
+
+      with subtest("Open GNOME Terminal"):
+          machine.succeed("${su "gnome-terminal"}")
+          machine.wait_until_succeeds("${eval "global.display.focus_window.wm_class"} | grep -i 'gnome-terminal'")
+          machine.sleep(2)
+
+      with subtest("Open virtual keyboard"):
+          machine.succeed("${su "dbus-send --print-reply --dest=org.Cinnamon /org/Cinnamon org.Cinnamon.ToggleKeyboard"}")
+          machine.wait_for_text('(Ctrl|Alt)')
+          machine.sleep(2)
+          machine.screenshot("cinnamon_virtual_keyboard")
+
+      with subtest("Check if Cinnamon has ever coredumped"):
+          machine.fail("coredumpctl --json=short | grep -E 'cinnamon|nemo'")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/cjdns.nix b/nixpkgs/nixos/tests/cjdns.nix
new file mode 100644
index 000000000000..dc5f371c74d8
--- /dev/null
+++ b/nixpkgs/nixos/tests/cjdns.nix
@@ -0,0 +1,121 @@
+let
+  carolKey = "2d2a338b46f8e4a8c462f0c385b481292a05f678e19a2b82755258cf0f0af7e2";
+  carolPubKey = "n932l3pjvmhtxxcdrqq2qpw5zc58f01vvjx01h4dtd1bb0nnu2h0.k";
+  carolPassword = "678287829ce4c67bc8b227e56d94422ee1b85fa11618157b2f591de6c6322b52";
+
+  basicConfig =
+    { ... }:
+    { services.cjdns.enable = true;
+
+      # Turning off DHCP isn't very realistic but makes
+      # the sequence of address assignment less stochastic.
+      networking.useDHCP = false;
+
+      # CJDNS output is incompatible with the XML log.
+      systemd.services.cjdns.serviceConfig.StandardOutput = "null";
+    };
+
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cjdns";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ehmry ];
+  };
+
+  nodes = { # Alice finds peers over over ETHInterface.
+      alice =
+        { ... }:
+        { imports = [ basicConfig ];
+
+          services.cjdns.ETHInterface.bind = "eth1";
+
+          services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.allowedTCPPorts = [ 80 ];
+        };
+
+      # Bob explicitly connects to Carol over UDPInterface.
+      bob =
+        { ... }:
+
+        { imports = [ basicConfig ];
+
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = "192.168.0.2"; prefixLength = 24; }
+          ];
+
+          services.cjdns =
+            { UDPInterface =
+                { bind = "0.0.0.0:1024";
+                  connectTo."192.168.0.1:1024" =
+                    { password = carolPassword;
+                      publicKey = carolPubKey;
+                    };
+                };
+            };
+        };
+
+      # Carol listens on ETHInterface and UDPInterface,
+      # but knows neither Alice or Bob.
+      carol =
+        { ... }:
+        { imports = [ basicConfig ];
+
+          environment.etc."cjdns.keys".text = ''
+            CJDNS_PRIVATE_KEY=${carolKey}
+            CJDNS_ADMIN_PASSWORD=FOOBAR
+          '';
+
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = "192.168.0.1"; prefixLength = 24; }
+          ];
+
+          services.cjdns =
+            { authorizedPasswords = [ carolPassword ];
+              ETHInterface.bind = "eth1";
+              UDPInterface.bind = "192.168.0.1:1024";
+            };
+          networking.firewall.allowedUDPPorts = [ 1024 ];
+        };
+
+    };
+
+  testScript =
+    ''
+      import re
+
+      start_all()
+
+      alice.wait_for_unit("cjdns.service")
+      bob.wait_for_unit("cjdns.service")
+      carol.wait_for_unit("cjdns.service")
+
+
+      def cjdns_ip(machine):
+          res = machine.succeed("ip -o -6 addr show dev tun0")
+          ip = re.split("\s+|/", res)[3]
+          machine.log("has ip {}".format(ip))
+          return ip
+
+
+      alice_ip6 = cjdns_ip(alice)
+      bob_ip6 = cjdns_ip(bob)
+      carol_ip6 = cjdns_ip(carol)
+
+      # ping a few times each to let the routing table establish itself
+
+      alice.succeed("ping -c 4 {}".format(carol_ip6))
+      bob.succeed("ping -c 4 {}".format(carol_ip6))
+
+      carol.succeed("ping -c 4 {}".format(alice_ip6))
+      carol.succeed("ping -c 4 {}".format(bob_ip6))
+
+      alice.succeed("ping -c 4 {}".format(bob_ip6))
+      bob.succeed("ping -c 4 {}".format(alice_ip6))
+
+      alice.wait_for_unit("httpd.service")
+
+      bob.succeed("curl --fail -g http://[{}]".format(alice_ip6))
+    '';
+})
diff --git a/nixpkgs/nixos/tests/clickhouse.nix b/nixpkgs/nixos/tests/clickhouse.nix
new file mode 100644
index 000000000000..77d6a7ab8be4
--- /dev/null
+++ b/nixpkgs/nixos/tests/clickhouse.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "clickhouse";
+  meta.maintainers = with pkgs.lib.maintainers; [ ];
+
+  nodes.machine = {
+    services.clickhouse.enable = true;
+    virtualisation.memorySize = 4096;
+  };
+
+  testScript =
+    let
+      # work around quote/substitution complexity by Nix, Perl, bash and SQL.
+      tableDDL = pkgs.writeText "ddl.sql" "CREATE TABLE `demo` (`value` FixedString(10)) engine = MergeTree PARTITION BY value ORDER BY tuple();";
+      insertQuery = pkgs.writeText "insert.sql" "INSERT INTO `demo` (`value`) VALUES ('foo');";
+      selectQuery = pkgs.writeText "select.sql" "SELECT * from `demo`";
+    in
+      ''
+        machine.start()
+        machine.wait_for_unit("clickhouse.service")
+        machine.wait_for_open_port(9000)
+
+        machine.succeed(
+            "cat ${tableDDL} | clickhouse-client"
+        )
+        machine.succeed(
+            "cat ${insertQuery} | clickhouse-client"
+        )
+        machine.succeed(
+            "cat ${selectQuery} | clickhouse-client | grep foo"
+        )
+      '';
+})
diff --git a/nixpkgs/nixos/tests/cloud-init-hostname.nix b/nixpkgs/nixos/tests/cloud-init-hostname.nix
new file mode 100644
index 000000000000..7c657cc9f6f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/cloud-init-hostname.nix
@@ -0,0 +1,46 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  # Hostname can also be set through "hostname" in user-data.
+  # This is how proxmox configures hostname through cloud-init.
+  metadataDrive = pkgs.stdenv.mkDerivation {
+    name = "metadata";
+    buildCommand = ''
+      mkdir -p $out/iso
+
+      cat << EOF > $out/iso/user-data
+      #cloud-config
+      hostname: testhostname
+      EOF
+
+      cat << EOF > $out/iso/meta-data
+      instance-id: iid-local02
+      EOF
+
+      ${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
+    '';
+  };
+
+in makeTest {
+  name = "cloud-init-hostname";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lewo illustris ];
+  };
+
+  nodes.machine2 = { ... }: {
+    virtualisation.qemu.options = [ "-cdrom" "${metadataDrive}/metadata.iso" ];
+    services.cloud-init.enable = true;
+    networking.hostName = "";
+  };
+
+  testScript = ''
+    unnamed.wait_for_unit("cloud-final.service")
+    assert "testhostname" in unnamed.succeed("hostname")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/cloud-init.nix b/nixpkgs/nixos/tests/cloud-init.nix
new file mode 100644
index 000000000000..786e01add7d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/cloud-init.nix
@@ -0,0 +1,114 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  metadataDrive = pkgs.stdenv.mkDerivation {
+    name = "metadata";
+    buildCommand = ''
+      mkdir -p $out/iso
+
+      cat << EOF > $out/iso/user-data
+      #cloud-config
+      write_files:
+      -   content: |
+                cloudinit
+          path: /tmp/cloudinit-write-file
+
+      users:
+        - default
+        - name: nixos
+          ssh_authorized_keys:
+            - "${snakeOilPublicKey}"
+      EOF
+
+      cat << EOF > $out/iso/meta-data
+      instance-id: iid-local01
+      local-hostname: "test"
+      public-keys:
+        - "${snakeOilPublicKey}"
+      EOF
+
+      cat << EOF > $out/iso/network-config
+      version: 1
+      config:
+          - type: physical
+            name: eth0
+            mac_address: '52:54:00:12:34:56'
+            subnets:
+            - type: static
+              address: '12.34.56.78'
+              netmask: '255.255.255.0'
+              gateway: '12.34.56.9'
+          - type: nameserver
+            address:
+            - '6.7.8.9'
+            search:
+            - 'example.com'
+      EOF
+      ${pkgs.cdrkit}/bin/genisoimage -volid cidata -joliet -rock -o $out/metadata.iso $out/iso
+      '';
+  };
+
+in makeTest {
+  name = "cloud-init";
+  meta.maintainers = with pkgs.lib.maintainers; [ lewo illustris ];
+  nodes.machine = { ... }:
+  {
+    virtualisation.qemu.options = [ "-cdrom" "${metadataDrive}/metadata.iso" ];
+    services.cloud-init = {
+      enable = true;
+      network.enable = true;
+    };
+    services.openssh.enable = true;
+    networking.hostName = "";
+    networking.useDHCP = false;
+  };
+  testScript = ''
+    # To wait until cloud-init terminates its run
+    unnamed.wait_for_unit("cloud-final.service")
+
+    unnamed.succeed("cat /tmp/cloudinit-write-file | grep -q 'cloudinit'")
+
+    # install snakeoil ssh key and provision .ssh/config file
+    unnamed.succeed("mkdir -p ~/.ssh")
+    unnamed.succeed(
+        "cat ${snakeOilPrivateKey} > ~/.ssh/id_snakeoil"
+    )
+    unnamed.succeed("chmod 600 ~/.ssh/id_snakeoil")
+
+    unnamed.wait_for_unit("sshd.service")
+
+    # we should be able to log in as the root user, as well as the created nixos user
+    unnamed.succeed(
+        "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil root@localhost 'true'"
+    )
+    unnamed.succeed(
+        "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'true'"
+    )
+
+    # test changing hostname via cloud-init worked
+    assert (
+        unnamed.succeed(
+            "timeout 10 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o IdentityFile=~/.ssh/id_snakeoil nixos@localhost 'hostname'"
+        ).strip()
+        == "test"
+    )
+
+    # check IP and route configs
+    assert "default via 12.34.56.9 dev eth0 proto static" in unnamed.succeed("ip route")
+    assert "12.34.56.0/24 dev eth0 proto kernel scope link src 12.34.56.78" in unnamed.succeed("ip route")
+
+    # check nameserver and search configs
+    assert "6.7.8.9" in unnamed.succeed("resolvectl status")
+    assert "example.com" in unnamed.succeed("resolvectl status")
+
+  '';
+}
diff --git a/nixpkgs/nixos/tests/cloudlog.nix b/nixpkgs/nixos/tests/cloudlog.nix
new file mode 100644
index 000000000000..c99951c1b22c
--- /dev/null
+++ b/nixpkgs/nixos/tests/cloudlog.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "cloudlog";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ melling ];
+  };
+  nodes = {
+    machine = {
+      services.mysql.package = pkgs.mariadb;
+      services.cloudlog.enable = true;
+    };
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("phpfpm-cloudlog")
+    machine.wait_for_open_port(80);
+    machine.wait_until_succeeds("curl -s -L --fail http://localhost | grep 'Login - Cloudlog'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cntr.nix b/nixpkgs/nixos/tests/cntr.nix
new file mode 100644
index 000000000000..598143beb6c0
--- /dev/null
+++ b/nixpkgs/nixos/tests/cntr.nix
@@ -0,0 +1,75 @@
+# Test for cntr tool
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; }, lib ? pkgs.lib }:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+
+  mkOCITest = backend:
+    makeTest {
+      name = "cntr-${backend}";
+
+      meta = { maintainers = with lib.maintainers; [ sorki mic92 ]; };
+
+      nodes = {
+        ${backend} = { pkgs, ... }: {
+          environment.systemPackages = [ pkgs.cntr ];
+          virtualisation.oci-containers = {
+            inherit backend;
+            containers.nginx = {
+              image = "nginx-container";
+              imageFile = pkgs.dockerTools.examples.nginx;
+              ports = [ "8181:80" ];
+            };
+          };
+        };
+      };
+
+      testScript = ''
+        start_all()
+        ${backend}.wait_for_unit("${backend}-nginx.service")
+        ${backend}.wait_for_open_port(8181)
+        # For some reason, the cntr command hangs when run without the &.
+        # As such, we have to do some messy things to ensure we check the exitcode and output in a race-condition-safe manner
+        ${backend}.execute(
+            "(cntr attach -t ${backend} nginx sh -- -c 'curl localhost | grep Hello' > /tmp/result; echo $? > /tmp/exitcode; touch /tmp/done) &"
+        )
+
+        ${backend}.wait_for_file("/tmp/done")
+        assert "0" == ${backend}.succeed("cat /tmp/exitcode").strip(), "non-zero exit code"
+        assert "Hello" in ${backend}.succeed("cat /tmp/result"), "no greeting in output"
+      '';
+    };
+
+  mkContainersTest = makeTest {
+    name = "cntr-containers";
+
+    meta = with pkgs.lib.maintainers; { maintainers = [ sorki mic92 ]; };
+
+    nodes.machine = { lib, ... }: {
+      environment.systemPackages = [ pkgs.cntr ];
+      containers.test = {
+        autoStart = true;
+        privateNetwork = true;
+        hostAddress = "172.16.0.1";
+        localAddress = "172.16.0.2";
+        config = { };
+      };
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("container@test.service")
+      # I haven't observed the same hanging behaviour in this version as in the OCI version which necessetates this messy invocation, but it's probably better to be safe than sorry and use it here as well
+      machine.execute(
+          "(cntr attach test sh -- -c 'ping -c5 172.16.0.1'; echo $? > /tmp/exitcode; touch /tmp/done) &"
+      )
+
+      machine.wait_for_file("/tmp/done")
+      assert "0" == machine.succeed("cat /tmp/exitcode").strip(), "non-zero exit code"
+    '';
+  };
+in {
+  nixos-container = mkContainersTest;
+} // (lib.foldl' (attrs: backend: attrs // { ${backend} = mkOCITest backend; })
+  { } [ "docker" "podman" ])
diff --git a/nixpkgs/nixos/tests/cockpit.nix b/nixpkgs/nixos/tests/cockpit.nix
new file mode 100644
index 000000000000..e7165b979014
--- /dev/null
+++ b/nixpkgs/nixos/tests/cockpit.nix
@@ -0,0 +1,136 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let
+    user = "alice"; # from ./common/user-account.nix
+    password = "foobar"; # from ./common/user-account.nix
+  in {
+    name = "cockpit";
+    meta = {
+      maintainers = with lib.maintainers; [ lucasew ];
+    };
+    nodes = {
+      server = { config, ... }: {
+        imports = [ ./common/user-account.nix ];
+        security.polkit.enable = true;
+        users.users.${user} = {
+          extraGroups = [ "wheel" ];
+        };
+        services.cockpit = {
+          enable = true;
+          openFirewall = true;
+          settings = {
+            WebService = {
+              Origins = "https://server:9090";
+            };
+          };
+        };
+      };
+      client = { config, ... }: {
+        imports = [ ./common/user-account.nix ];
+        environment.systemPackages = let
+          seleniumScript = pkgs.writers.writePython3Bin "selenium-script" {
+            libraries = with pkgs.python3Packages; [ selenium ];
+            } ''
+            from selenium import webdriver
+            from selenium.webdriver.common.by import By
+            from selenium.webdriver.firefox.options import Options
+            from selenium.webdriver.support.ui import WebDriverWait
+            from selenium.webdriver.support import expected_conditions as EC
+            from time import sleep
+
+
+            def log(msg):
+                from sys import stderr
+                print(f"[*] {msg}", file=stderr)
+
+
+            log("Initializing")
+
+            options = Options()
+            options.add_argument("--headless")
+
+            service = webdriver.FirefoxService(executable_path="${lib.getExe pkgs.geckodriver}")  # noqa: E501
+            driver = webdriver.Firefox(options=options, service=service)
+
+            driver.implicitly_wait(10)
+
+            log("Opening homepage")
+            driver.get("https://server:9090")
+
+            wait = WebDriverWait(driver, 60)
+
+
+            def wait_elem(by, query):
+                wait.until(EC.presence_of_element_located((by, query)))
+
+
+            def wait_title_contains(title):
+                wait.until(EC.title_contains(title))
+
+
+            def find_element(by, query):
+                return driver.find_element(by, query)
+
+
+            def set_value(elem, value):
+                script = 'arguments[0].value = arguments[1]'
+                return driver.execute_script(script, elem, value)
+
+
+            log("Waiting for the homepage to load")
+
+            # cockpit sets initial title as hostname
+            wait_title_contains("server")
+            wait_elem(By.CSS_SELECTOR, 'input#login-user-input')
+
+            log("Homepage loaded!")
+
+            log("Filling out username")
+            login_input = find_element(By.CSS_SELECTOR, 'input#login-user-input')
+            set_value(login_input, "${user}")
+
+            log("Filling out password")
+            password_input = find_element(By.CSS_SELECTOR, 'input#login-password-input')
+            set_value(password_input, "${password}")
+
+            log("Submitting credentials for login")
+            driver.find_element(By.CSS_SELECTOR, 'button#login-button').click()
+
+            # driver.implicitly_wait(1)
+            # driver.get("https://server:9090/system")
+
+            log("Waiting dashboard to load")
+            wait_title_contains("${user}@server")
+
+            log("Waiting for the frontend to initialize")
+            sleep(1)
+
+            log("Looking for that banner that tells about limited access")
+            container_iframe = find_element(By.CSS_SELECTOR, 'iframe.container-frame')
+            driver.switch_to.frame(container_iframe)
+
+            assert "Web console is running in limited access mode" in driver.page_source
+
+            driver.close()
+          '';
+        in with pkgs; [ firefox-unwrapped geckodriver seleniumScript ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      server.wait_for_open_port(9090)
+      server.wait_for_unit("network.target")
+      server.wait_for_unit("multi-user.target")
+      server.systemctl("start", "polkit")
+
+      client.wait_for_unit("multi-user.target")
+
+      client.succeed("curl -k https://server:9090 -o /dev/stderr")
+      print(client.succeed("whoami"))
+      client.succeed('PYTHONUNBUFFERED=1 selenium-script')
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/cockroachdb.nix b/nixpkgs/nixos/tests/cockroachdb.nix
new file mode 100644
index 000000000000..5b1e1a7dee1f
--- /dev/null
+++ b/nixpkgs/nixos/tests/cockroachdb.nix
@@ -0,0 +1,124 @@
+# This performs a full 'end-to-end' test of a multi-node CockroachDB cluster
+# using the built-in 'cockroach workload' command, to simulate a semi-realistic
+# test load. It generally takes anywhere from 3-5 minutes to run and 1-2GB of
+# RAM (though each of 3 workers gets 2GB allocated)
+#
+# CockroachDB requires synchronized system clocks within a small error window
+# (~500ms by default) on each node in order to maintain a multi-node cluster.
+# Cluster joins that are outside this window will fail, and nodes that skew
+# outside the window after joining will promptly get kicked out.
+#
+# To accommodate this, we use QEMU/virtio infrastructure and load the 'ptp_kvm'
+# driver inside a guest. This driver allows the host machine to pass its clock
+# through to the guest as a hardware clock that appears as a Precision Time
+# Protocol (PTP) Clock device, generally /dev/ptp0. PTP devices can be measured
+# and used as hardware reference clocks (similar to an on-board GPS clock) by
+# NTP software. In our case, we use Chrony to synchronize to the reference
+# clock.
+#
+# This test is currently NOT enabled as a continuously-checked NixOS test.
+# Ideally, this test would be run by Hydra and Borg on all relevant changes,
+# except:
+#
+#   - Not every build machine is compatible with the ptp_kvm driver.
+#     Virtualized EC2 instances, for example, do not support loading the ptp_kvm
+#     driver into guests. However, bare metal builders (e.g. Packet) do seem to
+#     work just fine. In practice, this means x86_64-linux builds would fail
+#     randomly, depending on which build machine got the job. (This is probably
+#     worth some investigation; I imagine it's based on ptp_kvm's usage of paravirt
+#     support which may not be available in 'nested' environments.)
+#
+#   - ptp_kvm is not supported on aarch64, otherwise it seems likely Cockroach
+#     could be tested there, as well. This seems to be due to the usage of
+#     the TSC in ptp_kvm, which isn't supported (easily) on AArch64. (And:
+#     testing stuff, not just making sure it builds, is important to ensure
+#     aarch64 support remains viable.)
+#
+# For future developers who are reading this message, are daring and would want
+# to fix this, some options are:
+#
+#   - Just test a single node cluster instead (boring and less thorough).
+#   - Move all CI to bare metal packet builders, and we can at least do x86_64-linux.
+#   - Get virtualized clocking working in aarch64, somehow.
+#   - Add a 4th node that acts as an NTP service and uses no PTP clocks for
+#     references, at the client level. This bloats the node and memory
+#     requirements, but would probably allow both aarch64/x86_64 to work.
+#
+
+let
+
+  # Creates a node. If 'joinNode' parameter, a string containing an IP address,
+  # is non-null, then the CockroachDB server will attempt to join/connect to
+  # the cluster node specified at that address.
+  makeNode = locality: myAddr: joinNode:
+    { nodes, pkgs, lib, config, ... }:
+
+    {
+      # Bank/TPC-C benchmarks take some memory to complete
+      virtualisation.memorySize = 2048;
+
+      # Install the KVM PTP "Virtualized Clock" driver. This allows a /dev/ptp0
+      # device to appear as a reference clock, synchronized to the host clock.
+      # Because CockroachDB *requires* a time-synchronization mechanism for
+      # the system time in a cluster scenario, this is necessary to work.
+      boot.kernelModules = [ "ptp_kvm" ];
+
+      # Enable and configure Chrony, using the given virtualized clock passed
+      # through by KVM.
+      services.chrony.enable = true;
+      services.chrony.servers = lib.mkForce [ ];
+      services.chrony.extraConfig = ''
+        refclock PHC /dev/ptp0 poll 2 prefer require refid KVM
+        makestep 0.1 3
+      '';
+
+      # Enable CockroachDB. In order to ensure that Chrony has performed its
+      # first synchronization at boot-time (which may take ~10 seconds) before
+      # starting CockroachDB, we block the ExecStartPre directive using the
+      # 'waitsync' command. This ensures Cockroach doesn't have its system time
+      # leap forward out of nowhere during startup/execution.
+      #
+      # Note that the default threshold for NTP-based skew in CockroachDB is
+      # ~500ms by default, so making sure it's started *after* accurate time
+      # synchronization is extremely important.
+      services.cockroachdb.enable = true;
+      services.cockroachdb.insecure = true;
+      services.cockroachdb.openPorts = true;
+      services.cockroachdb.locality = locality;
+      services.cockroachdb.listen.address = myAddr;
+      services.cockroachdb.join = lib.mkIf (joinNode != null) joinNode;
+
+      systemd.services.chronyd.unitConfig.ConditionPathExists = "/dev/ptp0";
+
+      # Hold startup until Chrony has performed its first measurement (which
+      # will probably result in a full timeskip, thanks to makestep)
+      systemd.services.cockroachdb.preStart = ''
+        ${pkgs.chrony}/bin/chronyc waitsync
+      '';
+    };
+
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "cockroachdb";
+  meta.maintainers = with pkgs.lib.maintainers;
+    [ thoughtpolice ];
+
+  nodes = {
+    node1 = makeNode "country=us,region=east,dc=1"  "192.168.1.1" null;
+    node2 = makeNode "country=us,region=west,dc=2b" "192.168.1.2" "192.168.1.1";
+    node3 = makeNode "country=eu,region=west,dc=2"  "192.168.1.3" "192.168.1.1";
+  };
+
+  # NOTE: All the nodes must start in order and you must NOT use startAll, because
+  # there's otherwise no way to guarantee that node1 will start before the others try
+  # to join it.
+  testScript = ''
+    for node in node1, node2, node3:
+        node.start()
+        node.wait_for_unit("cockroachdb")
+    node1.succeed(
+        "cockroach sql --host=192.168.1.1 --insecure -e 'SHOW ALL CLUSTER SETTINGS' 2>&1",
+        "cockroach workload init bank 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+        "cockroach workload run bank --duration=1m 'postgresql://root@192.168.1.1:26257?sslmode=disable'",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/coder.nix b/nixpkgs/nixos/tests/coder.nix
new file mode 100644
index 000000000000..12813827284b
--- /dev/null
+++ b/nixpkgs/nixos/tests/coder.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "coder";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ shyim ghuntley ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.coder = {
+        enable = true;
+        accessUrl = "http://localhost:3000";
+      };
+    };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("postgresql.service")
+    machine.wait_for_unit("coder.service")
+    machine.wait_for_open_port(3000)
+
+    machine.succeed("curl --fail http://localhost:3000")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/collectd.nix b/nixpkgs/nixos/tests/collectd.nix
new file mode 100644
index 000000000000..2480bdb5f917
--- /dev/null
+++ b/nixpkgs/nixos/tests/collectd.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "collectd";
+  meta = { };
+
+  nodes.machine =
+    { pkgs, lib, ... }:
+
+    {
+      services.collectd = {
+        enable = true;
+        extraConfig = lib.mkBefore ''
+          Interval 30
+        '';
+        plugins = {
+          rrdtool = ''
+            DataDir "/var/lib/collectd/rrd"
+          '';
+          load = "";
+        };
+      };
+      environment.systemPackages = [ pkgs.rrdtool ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("collectd.service")
+    hostname = machine.succeed("hostname").strip()
+    file = f"/var/lib/collectd/rrd/{hostname}/load/load.rrd"
+    machine.wait_for_file(file);
+    machine.succeed(f"rrdinfo {file} | logger")
+    # check that this file contains a shortterm metric
+    machine.succeed(f"rrdinfo {file} | grep -F 'ds[shortterm].min = '")
+    # check that interval was set before the plugins
+    machine.succeed(f"rrdinfo {file} | grep -F 'step = 30'")
+    # check that there are frequent updates
+    machine.succeed(f"cp {file} before")
+    machine.wait_until_fails(f"cmp before {file}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/common/acme/client/default.nix b/nixpkgs/nixos/tests/common/acme/client/default.nix
new file mode 100644
index 000000000000..503e610d1ac9
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/client/default.nix
@@ -0,0 +1,16 @@
+{ lib, nodes, pkgs, ... }:
+let
+  caCert = nodes.acme.test-support.acme.caCert;
+  caDomain = nodes.acme.test-support.acme.caDomain;
+
+in {
+  security.acme = {
+    acceptTerms = true;
+    defaults = {
+      server = "https://${caDomain}/dir";
+      email = "hostmaster@example.test";
+    };
+  };
+
+  security.pki.certificateFiles = [ caCert ];
+}
diff --git a/nixpkgs/nixos/tests/common/acme/server/README.md b/nixpkgs/nixos/tests/common/acme/server/README.md
new file mode 100644
index 000000000000..9de2b2c71029
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/README.md
@@ -0,0 +1,21 @@
+# Fake Certificate Authority for ACME testing
+
+This will set up a test node running [pebble](https://github.com/letsencrypt/pebble)
+to serve ACME certificate requests.
+
+## "Snake oil" certs
+
+The snake oil certs are hard coded into the repo for reasons explained [here](https://github.com/NixOS/nixpkgs/pull/91121#discussion_r505410235).
+The root of the issue is that Nix will hash the derivation based on the arguments
+to mkDerivation, not the output. [Minica](https://github.com/jsha/minica) will
+always generate a random certificate even if the arguments are unchanged. As a
+result, it's possible to end up in a situation where the cached and local
+generated certs mismatch and cause issues with testing.
+
+To generate new certificates, run the following commands:
+
+```bash
+nix-build generate-certs.nix
+cp result/* .
+rm result
+```
diff --git a/nixpkgs/nixos/tests/common/acme/server/acme.test.cert.pem b/nixpkgs/nixos/tests/common/acme/server/acme.test.cert.pem
new file mode 100644
index 000000000000..48f488ab8f90
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/acme.test.cert.pem
@@ -0,0 +1,19 @@
+-----BEGIN CERTIFICATE-----
+MIIDLDCCAhSgAwIBAgIIajCXIUnozqQwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgMjMwYjU4MB4XDTIyMTEyMTE3MTcxMFoXDTQyMTEy
+MTE3MTcxMFowFDESMBAGA1UEAxMJYWNtZS50ZXN0MIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEA5INxJwKDVYNfTnkXwvKM/SufBNjvxWZxlkaMFbkAN5wJ
+6HwuesRZE9IgfRO9N+rSq1U2lDBm9gFPERqsQJVZHHJ5kkaNUr89h25+wgX5emGy
+UV2KEpCFssDD4aSBF+b0sryguCa1ZRj9b+pdfRxiYaORjSh5UzlXZoRm9iwHdzHT
+oKLlmqozqzEt0o9qpZL8gv+rv8C5BGOY6hfXAHYmkWRt87FN5BkSjgEWiY++DOAU
+X0TdobdSTrs/xJP+IbadRchqTH2kiG0g2BoCSXUsl7Mdh4IOUeQGDz/F5tH8PAtz
+p3dyjdQEFex2J5tlScLfVHoCBKV3gpCg+Keuum2j8QIDAQABo3YwdDAOBgNVHQ8B
+Af8EBAMCBaAwHQYDVR0lBBYwFAYIKwYBBQUHAwEGCCsGAQUFBwMCMAwGA1UdEwEB
+/wQCMAAwHwYDVR0jBBgwFoAUvTCE3Lj/P6OWkmOGtsjcTcIDzkAwFAYDVR0RBA0w
+C4IJYWNtZS50ZXN0MA0GCSqGSIb3DQEBCwUAA4IBAQAvZM4Ik1NOXQfbPRgbolyL
+b3afsSHbhHl9B2f0HGi5EAPdwyeWZsK3BF+SKFGAW5BlXr2SSlW/MQOMiUKTadnS
+8xTOFc1Ws8JWWc82zQqWcOWEXhU+AI8p70sTVFeXPWwLFy3nBRwDH4ZPU8UFHeje
+YXqbfxrsdEFXrbCfWSzPQP24xqVt7n9Am/5XFGtDkRsYlVgLwq/F6lN9hO0/gYIx
+8NsZ8Xy+QvBlGL+z9Zo7EylB8bP9OBtOtEv9fZcnxgughieiTDs36GwdQRE2aI+d
+cG3lQX8NGxgcpDoH8+rNx7Uw7odg0gVbI3agyyvax6DPht+/bzXmHm8ogklGTOoG
+-----END CERTIFICATE-----
diff --git a/nixpkgs/nixos/tests/common/acme/server/acme.test.key.pem b/nixpkgs/nixos/tests/common/acme/server/acme.test.key.pem
new file mode 100644
index 000000000000..4837f19b3024
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/acme.test.key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEA5INxJwKDVYNfTnkXwvKM/SufBNjvxWZxlkaMFbkAN5wJ6Hwu
+esRZE9IgfRO9N+rSq1U2lDBm9gFPERqsQJVZHHJ5kkaNUr89h25+wgX5emGyUV2K
+EpCFssDD4aSBF+b0sryguCa1ZRj9b+pdfRxiYaORjSh5UzlXZoRm9iwHdzHToKLl
+mqozqzEt0o9qpZL8gv+rv8C5BGOY6hfXAHYmkWRt87FN5BkSjgEWiY++DOAUX0Td
+obdSTrs/xJP+IbadRchqTH2kiG0g2BoCSXUsl7Mdh4IOUeQGDz/F5tH8PAtzp3dy
+jdQEFex2J5tlScLfVHoCBKV3gpCg+Keuum2j8QIDAQABAoIBAHfnUHQ7qVYxfMzc
+VU+BneEqBmKwwf8+ZdOIaPDtBeQoCDrpDip05Ji15T48IUk5+hjUubLAQwZKYYaE
+DGZG918p4giS5IzKtCpgHDsKj4FbyglPn6dmFgFZjG7VtrcoBLXUrDB0fzHxDuqu
+eyeuwSCihzkeR6sXp3iveKcrKy+rA31aqWvJZb24qyAu1y8KIcf2ZMUiYcJF2kpL
+XZz4uyx4x/B9NE+PmLqo7x/9iS+p5aT2kWVCVUGmhII0ChFnWSnjxqecBMhWFY1O
+3U0lKhloj6UKBya91hGospEJdaLHpHCWUgYPvA5mG+48kqYkPkecmTf8Xha3TxPf
+g1qv3sECgYEA+hMO1qTlnqhBajCMcAGIlpRHwr97hQMdSylHBXob1xCnuTEJKHOo
+7UmQw9hJgD4JgYxcivg/OFErXdefbSae9NqSNdOshxmrxz6DFTN3Ms3WR1I1be3c
+B2mpGllMPbxJ3CKFet2CQSvOM9jfbK68R7Jlhiap0bESvWrT9ztUCWUCgYEA6e2Y
+iMNNo1dWushSMVvCkWR9CLAsnWnjFG4FYIPz/iuxJjRXDiWyR6x4WYjUx3ZBhpf5
+wVFUK7VaPJBfOn7KCan59dqOvL3LSB/5SupwRMecCEhYPQvSaxn4MNrx0Vi83O4C
+togyD9/UJ4ji+TXwMj2eMzwRspmO/26hXkQGzZ0CgYEA0qlLTrYKWOUUdgf/xjsE
+fRTcfsofm6VMAAz9rzd2TG3TXMZaGKGWJI5cTR7ejBG2oFNFgiwt1ZtLFPqXarOm
+JE4b7QwrwoN1mZqngiygtUOAxwQRzlEZkYUI1xFykG8VKURLfX0sRQpJ4pNHY56v
+LRazP5dCZ0rrpnVfql1oJaECgYEAxtvT728XcOOuNtpUBOGcZTynjds2EhsRjyx4
+JbQGlutNjMyxtLUW+RcEuBg5ydYdne1Tw6L/iqiALTwNuAxQdCaq9vT0oj41sPp9
+UdI53j5Rxji5yitilOlesylsqCpnYuhyJflhlV0RXQpg6LmRlyQKeEN4R/uCNGI3
+i4sIvYECgYEA4DC2qObfB0UkN81uGluwwM5rR04qvIc5xX3QIvHuIJOs/uP54daD
+OiEDTxTpiqDNsFL0Pyl07aL7jubHNqU/eQpQIEZRlDy4Mr31QSbQ9R2/NNBwHu22
+BnnNKzZ97T0NVgxJXOqcOlRGjwb/5OUDpaIClJY+GqilEdOeu7Pl3aA=
+-----END RSA PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/common/acme/server/ca.cert.pem b/nixpkgs/nixos/tests/common/acme/server/ca.cert.pem
new file mode 100644
index 000000000000..b6f2b9e3a91f
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/ca.cert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDSzCCAjOgAwIBAgIIIwtYp+WlBbswDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgMjMwYjU4MCAXDTIyMTEyMTE3MTcxMFoYDzIxMjIx
+MTIxMTcxNzEwWjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSAyMzBiNTgwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvqAoAyV8igrmBnU6T1nQDfkkQ
+HjQp+ANCthNCi4kGPOoTxrYrUMWa6d/aSIv5hKO2A+r2GdTeM1RvSo6GUr3GmsJc
+WUMbIsJ0SJSLQEyvmFPpzfV3NdfIt6vZRiqJbLt7yuDiZil33GdQEKYywJxIsCb2
+CSd55V1cZSiLItWEIURAhHhSxHabMRmIF/xZWxKFEDeagzXOxUBPAvIwzzqQroBv
+3vZhfgcAjCyS0crJ/E2Wa6GLKfFvaXGEj/KlXftwpbvFtnNBtmtJcNy9a8LJoOcA
+E+ZjD21hidnCc+Yag7LaR3ZtAVkpeRJ9rRNBkVP4rv2mq2skIkgDfY/F8smPAgMB
+AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
+BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBS9MITcuP8/o5aS
+Y4a2yNxNwgPOQDAfBgNVHSMEGDAWgBS9MITcuP8/o5aSY4a2yNxNwgPOQDANBgkq
+hkiG9w0BAQsFAAOCAQEADCcgaxrI/pqjkYb0c3QHwfKCNz4khSWs/9tBpBfdxdUX
+uvG7rZzVW7pkzML+m4tSo2wm9sHRAgG+dIpzbSoRTouMntWlvYEnrr1SCw4NyBo1
+cwmNUz4JL+E3dnpI4FSOpyFyO87qL9ep0dxQEADWSppyCA762wfFpY+FvT6b/he8
+eDEc/Umjfm+X0tqNWx3aVoeyIJT46AeElry2IRLAk7z/vEPGFFzgd2Jh6Qsdeagk
+YkU0tFl9q9BotPYGlCMtVjmzbJtxh4uM9YCgiz1THzFjrUvfaTM8VjuBxbpoCZkS
+85mNhFZvNq8/cgYc0kYZOg8+jRdy87xmTRp64LBd6w==
+-----END CERTIFICATE-----
diff --git a/nixpkgs/nixos/tests/common/acme/server/ca.key.pem b/nixpkgs/nixos/tests/common/acme/server/ca.key.pem
new file mode 100644
index 000000000000..5d46c025788f
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/ca.key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAr6gKAMlfIoK5gZ1Ok9Z0A35JEB40KfgDQrYTQouJBjzqE8a2
+K1DFmunf2kiL+YSjtgPq9hnU3jNUb0qOhlK9xprCXFlDGyLCdEiUi0BMr5hT6c31
+dzXXyLer2UYqiWy7e8rg4mYpd9xnUBCmMsCcSLAm9gkneeVdXGUoiyLVhCFEQIR4
+UsR2mzEZiBf8WVsShRA3moM1zsVATwLyMM86kK6Ab972YX4HAIwsktHKyfxNlmuh
+iynxb2lxhI/ypV37cKW7xbZzQbZrSXDcvWvCyaDnABPmYw9tYYnZwnPmGoOy2kd2
+bQFZKXkSfa0TQZFT+K79pqtrJCJIA32PxfLJjwIDAQABAoIBAErEFJXnIIY47Cq+
+QS7t7e16uDCTGpLujLy9cQ83AzjTfrKyNuHS/HkGqRBpJqMrEN+tZTohHpkBciP4
+sRd9amd5gdb663RGZExIhGmNEdb/2F/BGYUHNvSpMQ1HL13VGSwE25mh8G6jMppC
+q+sYTq0lxT+d/96DgSyNpicqyYT2S2CTCRkWGAsc6KQwRpBYqoEqUeakyGfe2k85
+pj32H53Si/49fkWkQ9RciPdg7qcu7u/iegwAkkjKoATeEjNf0NqBlkWag1qU0UHR
+r2xDin+3ffEU2GQEwSvnGwlo7uyAN0UsryEWa9suuhX5T4eSWAMgTL4iVkh8Aa24
++YEFOGkCgYEA0DUb++31+nuxU8N+GPaPQXiob8C0RmSzSzSHJ3daJpzq8k576jqs
+3TgkhLDzQepcTYVU2ucn6+9ziXEsz4H06W3FNGktnyK4BRqYitt5TjZvPc+WTPhR
+0U+iUqBZilCAhUkIsNUiGvnMhz9VfcS/gn+NqhL7kvYi11/jAc4bbB0CgYEA1/oh
++t1ZKVLkbANrma/M8AX27Vl3k4jgOWGzFwAVD10zN31gGyVjv1knmG22pmL2+N+Z
+8CnVmdHQQQIWV1pYbgwRkvpnZWyH7AvHd9l1XLYyOU3VEpz+e2bpMtzesaza3UWW
+k8NELNE5sBopY939XkQ9G3aMXtbkx01zX+0BZJsCgYB+MdJ2TfKrEVGXfYPuSXLm
+seUVZu1dRSfOy1WnvBVuFenpV1yPyWSA6MhpjH7EUvIDIm8eBsERpZ6XjXslgpUY
+7ql6bM10CK0UmtwePYw2tZOTGUD2AgRFI0k1X28mAEkFgBC+bVAwnXsz9lUw15Fj
+3T/V9493savIcpu6uluwmQKBgQCE/I4jzFv0aAgiwlBlB6znNqT/LRHGFIgMjS4b
+QX+2QCsjRd4BmRo8XodVAmlvNozgXb6J9RiDaIAVJ1XeX9EHogLIP8ue1h8zp2Uh
+VRNBDScLxfMnTOgd0BZTrVCqkscJbKn1Pk0iU4pz9wf5aF10yAvgdzSjySqB1hzu
+uh8bdQKBgEpFIyhqfXf/NzchI5y23Cok14LFIPJ1yERD/B8taS7muVQwpgffy+Ld
+BH7dhafWSDVqIk1e6yl+82b4amleTEmDfopgc6FR7uPid1JoFxrwhnEfC3FjZamp
+1OzXAOE/mX3jHf1spqpB2J/rDVPKi934ocQVoWnBeRopGTXxzbed
+-----END RSA PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/common/acme/server/default.nix b/nixpkgs/nixos/tests/common/acme/server/default.nix
new file mode 100644
index 000000000000..2a2e3b08a1df
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/default.nix
@@ -0,0 +1,141 @@
+# The certificate for the ACME service is exported as:
+#
+#   config.test-support.acme.caCert
+#
+# This value can be used inside the configuration of other test nodes to inject
+# the test certificate into security.pki.certificateFiles or into package
+# overlays.
+#
+# Another value that's needed if you don't use a custom resolver (see below for
+# notes on that) is to add the acme node as a nameserver to every node
+# that needs to acquire certificates using ACME, because otherwise the API host
+# for acme.test can't be resolved.
+#
+# A configuration example of a full node setup using this would be this:
+#
+# {
+#   acme = import ./common/acme/server;
+#
+#   example = { nodes, ... }: {
+#     networking.nameservers = [
+#       nodes.acme.networking.primaryIPAddress
+#     ];
+#     security.pki.certificateFiles = [
+#       nodes.acme.test-support.acme.caCert
+#     ];
+#   };
+# }
+#
+# By default, this module runs a local resolver, generated using resolver.nix
+# from the parent directory to automatically discover all zones in the network.
+#
+# If you do not want this and want to use your own resolver, you can just
+# override networking.nameservers like this:
+#
+# {
+#   acme = { nodes, lib, ... }: {
+#     imports = [ ./common/acme/server ];
+#     networking.nameservers = lib.mkForce [
+#       nodes.myresolver.networking.primaryIPAddress
+#     ];
+#   };
+#
+#   myresolver = ...;
+# }
+#
+# Keep in mind, that currently only _one_ resolver is supported, if you have
+# more than one resolver in networking.nameservers only the first one will be
+# used.
+#
+# Also make sure that whenever you use a resolver from a different test node
+# that it has to be started _before_ the ACME service.
+{ config, pkgs, lib, ... }:
+let
+  testCerts = import ./snakeoil-certs.nix;
+  domain = testCerts.domain;
+
+  resolver = let
+    message = "You need to define a resolver for the acme test module.";
+    firstNS = lib.head config.networking.nameservers;
+  in if config.networking.nameservers == [] then throw message else firstNS;
+
+  pebbleConf.pebble = {
+    listenAddress = "0.0.0.0:443";
+    managementListenAddress = "0.0.0.0:15000";
+    # These certs and keys are used for the Web Front End (WFE)
+    certificate = testCerts.${domain}.cert;
+    privateKey = testCerts.${domain}.key;
+    httpPort = 80;
+    tlsPort = 443;
+    ocspResponderURL = "http://${domain}:4002";
+    strict = true;
+  };
+
+  pebbleConfFile = pkgs.writeText "pebble.conf" (builtins.toJSON pebbleConf);
+
+in {
+  imports = [ ../../resolver.nix ];
+
+  options.test-support.acme = {
+    caDomain = lib.mkOption {
+      type = lib.types.str;
+      readOnly = true;
+      default = domain;
+      description = lib.mdDoc ''
+        A domain name to use with the `nodes` attribute to
+        identify the CA server.
+      '';
+    };
+    caCert = lib.mkOption {
+      type = lib.types.path;
+      readOnly = true;
+      default = testCerts.ca.cert;
+      description = lib.mdDoc ''
+        A certificate file to use with the `nodes` attribute to
+        inject the test CA certificate used in the ACME server into
+        {option}`security.pki.certificateFiles`.
+      '';
+    };
+  };
+
+  config = {
+    test-support = {
+      resolver.enable = let
+        isLocalResolver = config.networking.nameservers == [ "127.0.0.1" ];
+      in lib.mkOverride 900 isLocalResolver;
+    };
+
+    # This has priority 140, because modules/testing/test-instrumentation.nix
+    # already overrides this with priority 150.
+    networking.nameservers = lib.mkOverride 140 [ "127.0.0.1" ];
+    networking.firewall.allowedTCPPorts = [ 80 443 15000 4002 ];
+
+    networking.extraHosts = ''
+      127.0.0.1 ${domain}
+      ${config.networking.primaryIPAddress} ${domain}
+    '';
+
+    systemd.services = {
+      pebble = {
+        enable = true;
+        description = "Pebble ACME server";
+        wantedBy = [ "network.target" ];
+        environment = {
+          # We're not testing lego, we're just testing our configuration.
+          # No need to sleep.
+          PEBBLE_VA_NOSLEEP = "1";
+        };
+
+        serviceConfig = {
+          RuntimeDirectory = "pebble";
+          WorkingDirectory = "/run/pebble";
+
+          # Required to bind on privileged ports.
+          AmbientCapabilities = [ "CAP_NET_BIND_SERVICE" ];
+
+          ExecStart = "${pkgs.pebble}/bin/pebble -config ${pebbleConfFile}";
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/acme/server/generate-certs.nix b/nixpkgs/nixos/tests/common/acme/server/generate-certs.nix
new file mode 100644
index 000000000000..4f38ca309b05
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/generate-certs.nix
@@ -0,0 +1,33 @@
+# Minica can provide a CA key and cert, plus a key
+# and cert for our fake CA server's Web Front End (WFE).
+{
+  pkgs ? import <nixpkgs> {},
+  minica ? pkgs.minica,
+  mkDerivation ? pkgs.stdenv.mkDerivation
+}:
+let
+  conf = import ./snakeoil-certs.nix;
+  domain = conf.domain;
+in mkDerivation {
+  name = "test-certs";
+  buildInputs = [ (minica.overrideAttrs (old: {
+    prePatch = ''
+      sed -i 's_NotAfter: time.Now().AddDate(2, 0, 30),_NotAfter: time.Now().AddDate(20, 0, 0),_' main.go
+    '';
+  })) ];
+  dontUnpack = true;
+
+  buildPhase = ''
+    minica \
+      --ca-key ca.key.pem \
+      --ca-cert ca.cert.pem \
+      --domains ${domain}
+  '';
+
+  installPhase = ''
+    mkdir -p $out
+    mv ca.*.pem $out/
+    mv ${domain}/key.pem $out/${domain}.key.pem
+    mv ${domain}/cert.pem $out/${domain}.cert.pem
+  '';
+}
diff --git a/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix b/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix
new file mode 100644
index 000000000000..11c3f7fc9290
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/acme/server/snakeoil-certs.nix
@@ -0,0 +1,13 @@
+let
+  domain = "acme.test";
+in {
+  inherit domain;
+  ca = {
+    cert = ./ca.cert.pem;
+    key = ./ca.key.pem;
+  };
+  "${domain}" = {
+    cert = ./. + "/${domain}.cert.pem";
+    key = ./. + "/${domain}.key.pem";
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/auto-format-root-device.nix b/nixpkgs/nixos/tests/common/auto-format-root-device.nix
new file mode 100644
index 000000000000..fef8c7004991
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/auto-format-root-device.nix
@@ -0,0 +1,29 @@
+# This is a test utility that automatically formats
+# `config.virtualisation.rootDevice` in the initrd.
+# Note that when you are using
+# `boot.initrd.systemd.enable = true`, you can use
+# `virtualisation.fileSystems."/".autoFormat = true;`
+# instead.
+
+{ lib, config, pkgs, ... }:
+
+let
+  rootDevice = config.virtualisation.rootDevice;
+in
+{
+
+  boot.initrd.extraUtilsCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+    # We need mke2fs in the initrd.
+    copy_bin_and_libs ${pkgs.e2fsprogs}/bin/mke2fs
+  '';
+
+  boot.initrd.postDeviceCommands = lib.mkIf (!config.boot.initrd.systemd.enable) ''
+    # If the disk image appears to be empty, run mke2fs to
+    # initialise.
+    FSTYPE=$(blkid -o value -s TYPE ${rootDevice} || true)
+    PARTTYPE=$(blkid -o value -s PTTYPE ${rootDevice} || true)
+    if test -z "$FSTYPE" -a -z "$PARTTYPE"; then
+        mke2fs -t ext4 ${rootDevice}
+    fi
+  '';
+}
diff --git a/nixpkgs/nixos/tests/common/auto.nix b/nixpkgs/nixos/tests/common/auto.nix
new file mode 100644
index 000000000000..ac56bed4a88f
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/auto.nix
@@ -0,0 +1,55 @@
+{ config, lib, ... }:
+
+let
+  dmcfg = config.services.xserver.displayManager;
+  cfg = config.test-support.displayManager.auto;
+in
+{
+
+  ###### interface
+
+  options = {
+    test-support.displayManager.auto = {
+      enable = lib.mkOption {
+        default = false;
+        description = lib.mdDoc ''
+          Whether to enable the fake "auto" display manager, which
+          automatically logs in the user specified in the
+          {option}`user` option.  This is mostly useful for
+          automated tests.
+        '';
+      };
+
+      user = lib.mkOption {
+        default = "root";
+        description = lib.mdDoc "The user account to login automatically.";
+      };
+    };
+  };
+
+  ###### implementation
+
+  config = lib.mkIf cfg.enable {
+    services.xserver.displayManager = {
+      lightdm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = cfg.user;
+      };
+    };
+
+    # lightdm by default doesn't allow auto login for root, which is
+    # required by some nixos tests. Override it here.
+    security.pam.services.lightdm-autologin.text = lib.mkForce ''
+        auth     requisite pam_nologin.so
+        auth     required  pam_succeed_if.so quiet
+        auth     required  pam_permit.so
+
+        account  include   lightdm
+
+        password include   lightdm
+
+        session  include   lightdm
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/ec2.nix b/nixpkgs/nixos/tests/common/ec2.nix
new file mode 100644
index 000000000000..1a64c464039b
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/ec2.nix
@@ -0,0 +1,73 @@
+{ pkgs, makeTest }:
+
+with pkgs.lib;
+
+{
+  makeEc2Test = { name, image, userData, script, hostname ? "ec2-instance", sshPublicKey ? null, meta ? {} }:
+    let
+      metaData = pkgs.stdenv.mkDerivation {
+        name = "metadata";
+        buildCommand = ''
+          mkdir -p $out/1.0/meta-data
+          ln -s ${pkgs.writeText "userData" userData} $out/1.0/user-data
+          echo "${hostname}" > $out/1.0/meta-data/hostname
+          echo "(unknown)" > $out/1.0/meta-data/ami-manifest-path
+        '' + optionalString (sshPublicKey != null) ''
+          mkdir -p $out/1.0/meta-data/public-keys/0
+          ln -s ${pkgs.writeText "sshPublicKey" sshPublicKey} $out/1.0/meta-data/public-keys/0/openssh-key
+        '';
+      };
+      indentLines = str: concatLines (map (s: "  " + s) (splitString "\n" str));
+    in makeTest {
+      name = "ec2-" + name;
+      nodes = {};
+      testScript = ''
+        import os
+        import subprocess
+        import tempfile
+
+        image_dir = os.path.join(
+            os.environ.get("TMPDIR", tempfile.gettempdir()), "tmp", "vm-state-machine"
+        )
+        os.makedirs(image_dir, mode=0o700, exist_ok=True)
+        disk_image = os.path.join(image_dir, "machine.qcow2")
+        subprocess.check_call(
+            [
+                "qemu-img",
+                "create",
+                "-f",
+                "qcow2",
+                "-F",
+                "qcow2",
+                "-o",
+                "backing_file=${image}",
+                disk_image,
+            ]
+        )
+        subprocess.check_call(["qemu-img", "resize", disk_image, "10G"])
+
+        # Note: we use net=169.0.0.0/8 rather than
+        # net=169.254.0.0/16 to prevent dhcpcd from getting horribly
+        # confused. (It would get a DHCP lease in the 169.254.*
+        # range, which it would then configure and promptly delete
+        # again when it deletes link-local addresses.) Ideally we'd
+        # turn off the DHCP server, but qemu does not have an option
+        # to do that.
+        start_command = (
+            "qemu-kvm -m 1024"
+            + " -device virtio-net-pci,netdev=vlan0"
+            + " -netdev 'user,id=vlan0,net=169.0.0.0/8,guestfwd=tcp:169.254.169.254:80-cmd:${pkgs.micro-httpd}/bin/micro_httpd ${metaData}'"
+            + f" -drive file={disk_image},if=virtio,werror=report"
+            + " $QEMU_OPTS"
+        )
+
+        machine = create_machine({"startCommand": start_command})
+        try:
+      '' + indentLines script + ''
+        finally:
+          machine.shutdown()
+      '';
+
+      inherit meta;
+    };
+}
diff --git a/nixpkgs/nixos/tests/common/gpg-keyring.nix b/nixpkgs/nixos/tests/common/gpg-keyring.nix
new file mode 100644
index 000000000000..fb8d07b1183e
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/gpg-keyring.nix
@@ -0,0 +1,21 @@
+{ pkgs, ... }:
+
+pkgs.runCommand "gpg-keyring" { nativeBuildInputs = [ pkgs.gnupg ]; } ''
+  mkdir -p $out
+  export GNUPGHOME=$out
+  cat > foo <<EOF
+    %echo Generating a basic OpenPGP key
+    %no-protection
+    Key-Type: EdDSA
+    Key-Curve: ed25519
+    Name-Real: Bob Foobar
+    Name-Email: bob@foo.bar
+    Expire-Date: 0
+    # Do a commit here, so that we can later print "done"
+    %commit
+    %echo done
+  EOF
+  gpg --batch --generate-key foo
+  rm $out/S.gpg-agent $out/S.gpg-agent.*
+  gpg --export bob@foo.bar -a > $out/pubkey.gpg
+''
diff --git a/nixpkgs/nixos/tests/common/resolver.nix b/nixpkgs/nixos/tests/common/resolver.nix
new file mode 100644
index 000000000000..609058a7374a
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/resolver.nix
@@ -0,0 +1,141 @@
+# This module automatically discovers zones in BIND and NSD NixOS
+# configurations and creates zones for all definitions of networking.extraHosts
+# (except those that point to 127.0.0.1 or ::1) within the current test network
+# and delegates these zones using a fake root zone served by a BIND recursive
+# name server.
+{ config, nodes, pkgs, lib, ... }:
+
+{
+  options.test-support.resolver.enable = lib.mkOption {
+    type = lib.types.bool;
+    default = true;
+    internal = true;
+    description = lib.mdDoc ''
+      Whether to enable the resolver that automatically discovers zone in the
+      test network.
+
+      This option is `true` by default, because the module
+      defining this option needs to be explicitly imported.
+
+      The reason this option exists is for the
+      {file}`nixos/tests/common/acme/server` module, which
+      needs that option to disable the resolver once the user has set its own
+      resolver.
+    '';
+  };
+
+  config = lib.mkIf config.test-support.resolver.enable {
+    networking.firewall.enable = false;
+    services.bind.enable = true;
+    services.bind.cacheNetworks = lib.mkForce [ "any" ];
+    services.bind.forwarders = lib.mkForce [];
+    services.bind.zones = lib.singleton {
+      name = ".";
+      file = let
+        addDot = zone: zone + lib.optionalString (!lib.hasSuffix "." zone) ".";
+        mkNsdZoneNames = zones: map addDot (lib.attrNames zones);
+        mkBindZoneNames = zones: map (zone: addDot zone.name) zones;
+        getZones = cfg: mkNsdZoneNames cfg.services.nsd.zones
+                     ++ mkBindZoneNames cfg.services.bind.zones;
+
+        getZonesForNode = attrs: {
+          ip = attrs.config.networking.primaryIPAddress;
+          zones = lib.filter (zone: zone != ".") (getZones attrs.config);
+        };
+
+        zoneInfo = lib.mapAttrsToList (lib.const getZonesForNode) nodes;
+
+        # A and AAAA resource records for all the definitions of
+        # networking.extraHosts except those for 127.0.0.1 or ::1.
+        #
+        # The result is an attribute set with keys being the host name and the
+        # values are either { ipv4 = ADDR; } or { ipv6 = ADDR; } where ADDR is
+        # the IP address for the corresponding key.
+        recordsFromExtraHosts = let
+          getHostsForNode = lib.const (n: n.config.networking.extraHosts);
+          allHostsList = lib.mapAttrsToList getHostsForNode nodes;
+          allHosts = lib.concatStringsSep "\n" allHostsList;
+
+          reIp = "[a-fA-F0-9.:]+";
+          reHost = "[a-zA-Z0-9.-]+";
+
+          matchAliases = str: let
+            matched = builtins.match "[ \t]+(${reHost})(.*)" str;
+            continue = lib.singleton (lib.head matched)
+                    ++ matchAliases (lib.last matched);
+          in lib.optional (matched != null) continue;
+
+          matchLine = str: let
+            result = builtins.match "[ \t]*(${reIp})[ \t]+(${reHost})(.*)" str;
+          in if result == null then null else {
+            ipAddr = lib.head result;
+            hosts = lib.singleton (lib.elemAt result 1)
+                 ++ matchAliases (lib.last result);
+          };
+
+          skipLine = str: let
+            rest = builtins.match "[^\n]*\n(.*)" str;
+          in if rest == null then "" else lib.head rest;
+
+          getEntries = str: acc: let
+            result = matchLine str;
+            next = getEntries (skipLine str);
+            newEntry = acc ++ lib.singleton result;
+            continue = if result == null then next acc else next newEntry;
+          in if str == "" then acc else continue;
+
+          isIPv6 = str: builtins.match ".*:.*" str != null;
+          loopbackIps = [ "127.0.0.1" "::1" ];
+          filterLoopback = lib.filter (e: !lib.elem e.ipAddr loopbackIps);
+
+          allEntries = lib.concatMap (entry: map (host: {
+            inherit host;
+            ${if isIPv6 entry.ipAddr then "ipv6" else "ipv4"} = entry.ipAddr;
+          }) entry.hosts) (filterLoopback (getEntries (allHosts + "\n") []));
+
+          mkRecords = entry: let
+            records = lib.optional (entry ? ipv6) "AAAA ${entry.ipv6}"
+                   ++ lib.optional (entry ? ipv4) "A ${entry.ipv4}";
+            mkRecord = typeAndData: "${entry.host}. IN ${typeAndData}";
+          in lib.concatMapStringsSep "\n" mkRecord records;
+
+        in lib.concatMapStringsSep "\n" mkRecords allEntries;
+
+        # All of the zones that are subdomains of existing zones.
+        # For example if there is only "example.com" the following zones would
+        # be 'subZones':
+        #
+        #  * foo.example.com.
+        #  * bar.example.com.
+        #
+        # While the following would *not* be 'subZones':
+        #
+        #  * example.com.
+        #  * com.
+        #
+        subZones = let
+          allZones = lib.concatMap (zi: zi.zones) zoneInfo;
+          isSubZoneOf = z1: z2: lib.hasSuffix z2 z1 && z1 != z2;
+        in lib.filter (z: lib.any (isSubZoneOf z) allZones) allZones;
+
+        # All the zones without 'subZones'.
+        filteredZoneInfo = map (zi: zi // {
+          zones = lib.filter (x: !lib.elem x subZones) zi.zones;
+        }) zoneInfo;
+
+      in pkgs.writeText "fake-root.zone" ''
+        $TTL 3600
+        . IN SOA ns.fakedns. admin.fakedns. ( 1 3h 1h 1w 1d )
+        ns.fakedns. IN A ${config.networking.primaryIPAddress}
+        . IN NS ns.fakedns.
+        ${lib.concatImapStrings (num: { ip, zones }: ''
+          ns${toString num}.fakedns. IN A ${ip}
+          ${lib.concatMapStrings (zone: ''
+          ${zone} IN NS ns${toString num}.fakedns.
+          '') zones}
+        '') (lib.filter (zi: zi.zones != []) filteredZoneInfo)}
+        ${recordsFromExtraHosts}
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/user-account.nix b/nixpkgs/nixos/tests/common/user-account.nix
new file mode 100644
index 000000000000..a57ee2d59ae3
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/user-account.nix
@@ -0,0 +1,15 @@
+{ ... }:
+
+{ users.users.alice =
+    { isNormalUser = true;
+      description = "Alice Foobar";
+      password = "foobar";
+      uid = 1000;
+    };
+
+  users.users.bob =
+    { isNormalUser = true;
+      description = "Bob Foobar";
+      password = "foobar";
+    };
+}
diff --git a/nixpkgs/nixos/tests/common/wayland-cage.nix b/nixpkgs/nixos/tests/common/wayland-cage.nix
new file mode 100644
index 000000000000..fd0700941392
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/wayland-cage.nix
@@ -0,0 +1,13 @@
+{ ... }:
+
+{
+  imports = [ ./user-account.nix ];
+  services.cage = {
+    enable = true;
+    user = "alice";
+  };
+
+  virtualisation = {
+    qemu.options = [ "-vga virtio" ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/common/webroot/news-rss.xml b/nixpkgs/nixos/tests/common/webroot/news-rss.xml
new file mode 100644
index 000000000000..b8099bf0364a
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/webroot/news-rss.xml
@@ -0,0 +1,27 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<rss xmlns:blogChannel="http://backend.userland.com/blogChannelModule" version="2.0">
+ <channel>
+  <title>NixOS News</title><link>https://nixos.org</link>
+  <description>News for NixOS, the purely functional Linux distribution.</description>
+  
+  <item>
+   <title>NixOS 18.09 released</title><link>https://nixos.org/news.html</link>
+   <description>
+    <a href="https://github.com/NixOS/nixos-artwork/blob/master/releases/18.09-jellyfish/jellyfish.png">
+     <img class="inline" src="logo/nixos-logo-18.09-jellyfish-lores.png" alt="18.09 Jellyfish logo" with="100" height="87"/>
+    </a>
+      NixOS 18.09 “Jellyfish†has been released, the tenth stable release branch.
+      See the <a href="/nixos/manual/release-notes.html#sec-release-18.09">release notes</a>
+      for details. You can get NixOS 18.09 ISOs and VirtualBox appliances
+      from the <a href="nixos/download.html">download page</a>.
+      For information on how to upgrade from older release branches
+      to 18.09, check out the
+      <a href="/nixos/manual/index.html#sec-upgrading">manual section on upgrading</a>.
+    </description>
+   <pubDate>Sat Oct 06 2018 00:00:00 GMT</pubDate>
+  </item>
+ </channel>
+</rss>
diff --git a/nixpkgs/nixos/tests/common/x11.nix b/nixpkgs/nixos/tests/common/x11.nix
new file mode 100644
index 000000000000..0d76a0e972ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/common/x11.nix
@@ -0,0 +1,17 @@
+{ lib, ... }:
+
+{
+  imports = [
+    ./auto.nix
+  ];
+
+  services.xserver.enable = true;
+
+  # Automatically log in.
+  test-support.displayManager.auto.enable = true;
+
+  # Use IceWM as the window manager.
+  # Don't use a desktop manager.
+  services.xserver.displayManager.defaultSession = lib.mkDefault "none+icewm";
+  services.xserver.windowManager.icewm.enable = true;
+}
diff --git a/nixpkgs/nixos/tests/connman.nix b/nixpkgs/nixos/tests/connman.nix
new file mode 100644
index 000000000000..348b2a895a63
--- /dev/null
+++ b/nixpkgs/nixos/tests/connman.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+{
+  name = "connman";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  # Router running radvd on VLAN 1
+  nodes.router = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    virtualisation.vlans = [ 1 ];
+
+    boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+
+    networking = {
+      useDHCP = false;
+      interfaces.eth1.ipv6.addresses =
+        [ { address = "fd12::1"; prefixLength = 64; } ];
+    };
+
+    services.radvd = {
+      enable = true;
+      config = ''
+        interface eth1 {
+          AdvSendAdvert on;
+          AdvManagedFlag on;
+          AdvOtherConfigFlag on;
+          prefix fd12::/64 {
+            AdvAutonomous off;
+          };
+        };
+      '';
+    };
+  };
+
+  # Client running connman, connected to VLAN 1
+  nodes.client = { ... }: {
+    virtualisation.vlans = [ 1 ];
+
+    # add a virtual wlan interface
+    boot.kernelModules = [ "mac80211_hwsim" ];
+    boot.extraModprobeConfig = ''
+      options mac80211_hwsim radios=1
+    '';
+
+    # Note: the overrides are needed because the wifi is
+    # disabled with mkVMOverride in qemu-vm.nix.
+    services.connman.enable = lib.mkOverride 0 true;
+    services.connman.networkInterfaceBlacklist = [ "eth0" ];
+    networking.wireless.enable = lib.mkOverride 0 true;
+    networking.wireless.interfaces = [ "wlan0" ];
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("Router is ready"):
+          router.wait_for_unit("radvd.service")
+
+      with subtest("Daemons are running"):
+          client.wait_for_unit("wpa_supplicant-wlan0.service")
+          client.wait_for_unit("connman.service")
+          client.wait_until_succeeds("connmanctl state | grep -q ready")
+
+      with subtest("Wired interface is configured"):
+          client.wait_until_succeeds("ip -6 route | grep -q fd12::/64")
+          client.wait_until_succeeds("ping -c 1 fd12::1")
+
+      with subtest("Can set up a wireless access point"):
+          client.succeed("connmanctl enable wifi")
+          client.wait_until_succeeds("connmanctl tether wifi on nixos-test reproducibility | grep -q 'Enabled'")
+          client.wait_until_succeeds("iw wlan0 info | grep -q nixos-test")
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/consul-template.nix b/nixpkgs/nixos/tests/consul-template.nix
new file mode 100644
index 000000000000..cbffa94569e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/consul-template.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "consul-template";
+
+  nodes.machine = { ... }: {
+    services.consul-template.instances.example.settings = {
+      template = [{
+        contents = ''
+          {{ key "example" }}
+        '';
+        perms = "0600";
+        destination = "/example";
+      }];
+    };
+
+    services.consul = {
+      enable = true;
+      extraConfig = {
+        server = true;
+        bootstrap_expect = 1;
+        bind_addr = "127.0.0.1";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("consul.service")
+    machine.wait_for_open_port(8500)
+
+    machine.wait_for_unit("consul-template-example.service")
+
+    machine.wait_until_succeeds('consul kv put example example')
+
+    machine.wait_for_file("/example")
+    machine.succeed('grep "example" /example')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/consul.nix b/nixpkgs/nixos/tests/consul.nix
new file mode 100644
index 000000000000..6233234ff083
--- /dev/null
+++ b/nixpkgs/nixos/tests/consul.nix
@@ -0,0 +1,239 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  # Settings for both servers and agents
+  webUi = true;
+  retry_interval = "1s";
+  raft_multiplier = 1;
+
+  defaultExtraConfig = {
+    inherit retry_interval;
+    performance = {
+      inherit raft_multiplier;
+    };
+  };
+
+  allConsensusServerHosts = [
+    "192.168.1.1"
+    "192.168.1.2"
+    "192.168.1.3"
+  ];
+
+  allConsensusClientHosts = [
+    "192.168.2.1"
+    "192.168.2.2"
+  ];
+
+  firewallSettings = {
+    # See https://www.consul.io/docs/install/ports.html
+    allowedTCPPorts = [ 8301 8302 8600 8500 8300 ];
+    allowedUDPPorts = [ 8301 8302 8600 ];
+  };
+
+  client = index: { pkgs, ... }:
+    let
+      ip = builtins.elemAt allConsensusClientHosts index;
+    in
+      {
+        environment.systemPackages = [ pkgs.consul ];
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = ip; prefixLength = 16; }
+        ];
+        networking.firewall = firewallSettings;
+
+        services.consul = {
+          enable = true;
+          inherit webUi;
+          extraConfig = defaultExtraConfig // {
+            server = false;
+            retry_join = allConsensusServerHosts;
+            bind_addr = ip;
+          };
+        };
+      };
+
+  server = index: { pkgs, ... }:
+    let
+      numConsensusServers = builtins.length allConsensusServerHosts;
+      thisConsensusServerHost = builtins.elemAt allConsensusServerHosts index;
+      ip = thisConsensusServerHost; # since we already use IPs to identify servers
+    in
+      {
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = ip; prefixLength = 16; }
+        ];
+        networking.firewall = firewallSettings;
+
+        services.consul =
+          assert builtins.elem thisConsensusServerHost allConsensusServerHosts;
+          {
+            enable = true;
+            inherit webUi;
+            extraConfig = defaultExtraConfig // {
+              server = true;
+              bootstrap_expect = numConsensusServers;
+              # Tell Consul that we never intend to drop below this many servers.
+              # Ensures to not permanently lose consensus after temporary loss.
+              # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
+              autopilot.min_quorum = numConsensusServers;
+              retry_join =
+                # If there's only 1 node in the network, we allow self-join;
+                # otherwise, the node must not try to join itself, and join only the other servers.
+                # See https://github.com/hashicorp/consul/issues/2868
+                if numConsensusServers == 1
+                  then allConsensusServerHosts
+                  else builtins.filter (h: h != thisConsensusServerHost) allConsensusServerHosts;
+              bind_addr = ip;
+            };
+          };
+      };
+in {
+  name = "consul";
+
+  nodes = {
+    server1 = server 0;
+    server2 = server 1;
+    server3 = server 2;
+
+    client1 = client 0;
+    client2 = client 1;
+  };
+
+  testScript = ''
+    servers = [server1, server2, server3]
+    machines = [server1, server2, server3, client1, client2]
+
+    for m in machines:
+        m.wait_for_unit("consul.service")
+
+
+    def wait_for_healthy_servers():
+        # See https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040
+        # for why the `Voter` column of `list-peers` has that info.
+        # TODO: The `grep true` relies on the fact that currently in
+        #       the output like
+        #           # consul operator raft list-peers
+        #           Node     ID   Address           State     Voter  RaftProtocol
+        #           server3  ...  192.168.1.3:8300  leader    true   3
+        #           server2  ...  192.168.1.2:8300  follower  true   3
+        #           server1  ...  192.168.1.1:8300  follower  false  3
+        #       `Voter`is the only boolean column.
+        #       Change this to the more reliable way to be defined by
+        #       https://github.com/hashicorp/consul/issues/8118
+        #       once that ticket is closed.
+        for m in machines:
+            m.wait_until_succeeds(
+                "[ $(consul operator raft list-peers | grep true | wc -l) == 3 ]"
+            )
+
+
+    def wait_for_all_machines_alive():
+        """
+        Note that Serf-"alive" does not mean "Raft"-healthy;
+        see `wait_for_healthy_servers()` for that instead.
+        """
+        for m in machines:
+            m.wait_until_succeeds("[ $(consul members | grep -o alive | wc -l) == 5 ]")
+
+
+    wait_for_healthy_servers()
+    # Also wait for clients to be alive.
+    wait_for_all_machines_alive()
+
+    client1.succeed("consul kv put testkey 42")
+    client2.succeed("[ $(consul kv get testkey) == 42 ]")
+
+
+    def rolling_restart_test(proper_rolling_procedure=True):
+        """
+        Tests that the cluster can tolearate failures of any single server,
+        following the recommended rolling upgrade procedure from
+        https://www.consul.io/docs/upgrading#standard-upgrades.
+
+        Optionally, `proper_rolling_procedure=False` can be given
+        to wait only for each server to be back `Healthy`, not `Stable`
+        in the Raft consensus, see Consul setting `ServerStabilizationTime` and
+        https://github.com/hashicorp/consul/issues/8118#issuecomment-645330040.
+        """
+
+        for server in servers:
+            server.block()
+            server.systemctl("stop consul")
+
+            # Make sure the stopped peer is recognized as being down
+            client1.wait_until_succeeds(
+              f"[ $(consul members | grep {server.name} | grep -o -E 'failed|left' | wc -l) == 1 ]"
+            )
+
+            # For each client, wait until they have connection again
+            # using `kv get -recurse` before issuing commands.
+            client1.wait_until_succeeds("consul kv get -recurse")
+            client2.wait_until_succeeds("consul kv get -recurse")
+
+            # Do some consul actions while one server is down.
+            client1.succeed("consul kv put testkey 43")
+            client2.succeed("[ $(consul kv get testkey) == 43 ]")
+            client2.succeed("consul kv delete testkey")
+
+            server.unblock()
+            server.systemctl("start consul")
+
+            if proper_rolling_procedure:
+                # Wait for recovery.
+                wait_for_healthy_servers()
+            else:
+                # NOT proper rolling upgrade procedure, see above.
+                wait_for_all_machines_alive()
+
+            # Wait for client connections.
+            client1.wait_until_succeeds("consul kv get -recurse")
+            client2.wait_until_succeeds("consul kv get -recurse")
+
+            # Do some consul actions with server back up.
+            client1.succeed("consul kv put testkey 44")
+            client2.succeed("[ $(consul kv get testkey) == 44 ]")
+            client2.succeed("consul kv delete testkey")
+
+
+    def all_servers_crash_simultaneously_test():
+        """
+        Tests that the cluster will eventually come back after all
+        servers crash simultaneously.
+        """
+
+        for server in servers:
+            server.block()
+            server.systemctl("stop --no-block consul")
+
+        for server in servers:
+            # --no-block is async, so ensure it has been stopped by now
+            server.wait_until_fails("systemctl is-active --quiet consul")
+            server.unblock()
+            server.systemctl("start consul")
+
+        # Wait for recovery.
+        wait_for_healthy_servers()
+
+        # Wait for client connections.
+        client1.wait_until_succeeds("consul kv get -recurse")
+        client2.wait_until_succeeds("consul kv get -recurse")
+
+        # Do some consul actions with servers back up.
+        client1.succeed("consul kv put testkey 44")
+        client2.succeed("[ $(consul kv get testkey) == 44 ]")
+        client2.succeed("consul kv delete testkey")
+
+
+    # Run the tests.
+
+    print("rolling_restart_test()")
+    rolling_restart_test()
+
+    print("all_servers_crash_simultaneously_test()")
+    all_servers_crash_simultaneously_test()
+
+    print("rolling_restart_test(proper_rolling_procedure=False)")
+    rolling_restart_test(proper_rolling_procedure=False)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-bridge.nix b/nixpkgs/nixos/tests/containers-bridge.nix
new file mode 100644
index 000000000000..d2e16299edaa
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-bridge.nix
@@ -0,0 +1,99 @@
+let
+  hostIp = "192.168.0.1";
+  containerIp = "192.168.0.100/24";
+  hostIp6 = "fc00::1";
+  containerIp6 = "fc00::2/7";
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-bridge";
+  meta = {
+    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+
+      networking.bridges = {
+        br0 = {
+          interfaces = [];
+        };
+      };
+      networking.interfaces = {
+        br0 = {
+          ipv4.addresses = [{ address = hostIp; prefixLength = 24; }];
+          ipv6.addresses = [{ address = hostIp6; prefixLength = 7; }];
+        };
+      };
+
+      containers.webserver =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          localAddress = containerIp;
+          localAddress6 = containerIp6;
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      containers.web-noip =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+
+      virtualisation.additionalPaths = [ pkgs.stdenv ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+    assert "webserver" in machine.succeed("nixos-container list")
+
+    with subtest("Start the webserver container"):
+        assert "up" in machine.succeed("nixos-container status webserver")
+
+    with subtest("Bridges exist inside containers"):
+        machine.succeed(
+            "nixos-container run webserver -- ip link show eth0",
+            "nixos-container run web-noip -- ip link show eth0",
+        )
+
+    ip = "${containerIp}".split("/")[0]
+    machine.succeed(f"ping -n -c 1 {ip}")
+    machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
+
+    ip6 = "${containerIp6}".split("/")[0]
+    machine.succeed(f"ping -n -c 1 {ip6}")
+    machine.succeed(f"curl --fail http://[{ip6}]/ > /dev/null")
+
+    with subtest(
+        "nixos-container show-ip works in case of an ipv4 address "
+        + "with subnetmask in CIDR notation."
+    ):
+        result = machine.succeed("nixos-container show-ip webserver").rstrip()
+        assert result == ip
+
+    with subtest("Stop the container"):
+        machine.succeed("nixos-container stop webserver")
+        machine.fail(
+            f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null",
+            f"curl --fail --connect-timeout 2 http://[{ip6}]/ > /dev/null",
+        )
+
+    # Destroying a declarative container should fail.
+    machine.fail("nixos-container destroy webserver")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-custom-pkgs.nix b/nixpkgs/nixos/tests/containers-custom-pkgs.nix
new file mode 100644
index 000000000000..e8740ac63134
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-custom-pkgs.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+
+  customPkgs = pkgs.appendOverlays [ (self: super: {
+    hello = super.hello.overrideAttrs (old: {
+       name = "custom-hello";
+    });
+  }) ];
+
+in {
+  name = "containers-custom-pkgs";
+  meta = {
+    maintainers = with lib.maintainers; [ adisbladis erikarvstedt ];
+  };
+
+  nodes.machine = { config, ... }: {
+    assertions = let
+      helloName = (builtins.head config.containers.test.config.system.extraDependencies).name;
+    in [ {
+      assertion = helloName == "custom-hello";
+      message = "Unexpected value: ${helloName}";
+    } ];
+
+    containers.test = {
+      autoStart = true;
+      config = { pkgs, config, ... }: {
+        nixpkgs.pkgs = customPkgs;
+        system.extraDependencies = [ pkgs.hello ];
+      };
+    };
+  };
+
+  # This test only consists of evaluating the test machine
+  testScript = "pass";
+})
diff --git a/nixpkgs/nixos/tests/containers-ephemeral.nix b/nixpkgs/nixos/tests/containers-ephemeral.nix
new file mode 100644
index 000000000000..cb4b7d4eba0f
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-ephemeral.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-ephemeral";
+  meta = {
+    maintainers = with lib.maintainers; [ patryk27 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.writableStore = true;
+
+    containers.webserver = {
+      ephemeral = true;
+      privateNetwork = true;
+      hostAddress = "10.231.136.1";
+      localAddress = "10.231.136.2";
+      config = {
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost = {
+            root = pkgs.runCommand "localhost" {} ''
+              mkdir "$out"
+              echo hello world > "$out/index.html"
+            '';
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ 80 ];
+      };
+    };
+  };
+
+  testScript = ''
+    assert "webserver" in machine.succeed("nixos-container list")
+
+    machine.succeed("nixos-container start webserver")
+
+    with subtest("Container got its own root folder"):
+        machine.succeed("ls /run/nixos-containers/webserver")
+
+    with subtest("Container persistent directory is not created"):
+        machine.fail("ls /var/lib/nixos-containers/webserver")
+
+    # Since "start" returns after the container has reached
+    # multi-user.target, we should now be able to access it.
+    ip = machine.succeed("nixos-container show-ip webserver").rstrip()
+    machine.succeed(f"ping -n -c1 {ip}")
+    machine.succeed(f"curl --fail http://{ip}/ > /dev/null")
+
+    with subtest("Stop the container"):
+        machine.succeed("nixos-container stop webserver")
+        machine.fail(f"curl --fail --connect-timeout 2 http://{ip}/ > /dev/null")
+
+    with subtest("Container's root folder was removed"):
+        machine.fail("ls /run/nixos-containers/webserver")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-extra_veth.nix b/nixpkgs/nixos/tests/containers-extra_veth.nix
new file mode 100644
index 000000000000..f3e62265f6c4
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-extra_veth.nix
@@ -0,0 +1,91 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-extra_veth";
+  meta = {
+    maintainers = with lib.maintainers; [ kampfschlaefer ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+      virtualisation.vlans = [];
+
+      networking.useDHCP = false;
+      networking.bridges = {
+        br0 = {
+          interfaces = [];
+        };
+        br1 = { interfaces = []; };
+      };
+      networking.interfaces = {
+        br0 = {
+          ipv4.addresses = [{ address = "192.168.0.1"; prefixLength = 24; }];
+          ipv6.addresses = [{ address = "fc00::1"; prefixLength = 7; }];
+        };
+        br1 = {
+          ipv4.addresses = [{ address = "192.168.1.1"; prefixLength = 24; }];
+        };
+      };
+
+      containers.webserver =
+        {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          localAddress = "192.168.0.100/24";
+          localAddress6 = "fc00::2/7";
+          extraVeths = {
+            veth1 = { hostBridge = "br1"; localAddress = "192.168.1.100/24"; };
+            veth2 = { hostAddress = "192.168.2.1"; localAddress = "192.168.2.100"; };
+          };
+          config =
+            {
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      virtualisation.additionalPaths = [ pkgs.stdenv ];
+    };
+
+  testScript =
+    ''
+      machine.wait_for_unit("default.target")
+      assert "webserver" in machine.succeed("nixos-container list")
+
+      with subtest("Status of the webserver container is up"):
+          assert "up" in machine.succeed("nixos-container status webserver")
+
+      with subtest("Ensure that the veths are inside the container"):
+          assert "state UP" in machine.succeed(
+              "nixos-container run webserver -- ip link show veth1"
+          )
+          assert "state UP" in machine.succeed(
+              "nixos-container run webserver -- ip link show veth2"
+          )
+
+      with subtest("Ensure the presence of the extra veths"):
+          assert "state UP" in machine.succeed("ip link show veth1")
+          assert "state UP" in machine.succeed("ip link show veth2")
+
+      with subtest("Ensure the veth1 is part of br1 on the host"):
+          assert "master br1" in machine.succeed("ip link show veth1")
+
+      with subtest("Ping on main veth"):
+          machine.succeed("ping -n -c 1 192.168.0.100")
+          machine.succeed("ping -n -c 1 fc00::2")
+
+      with subtest("Ping on the first extra veth"):
+          machine.succeed("ping -n -c 1 192.168.1.100 >&2")
+
+      with subtest("Ping on the second extra veth"):
+          machine.succeed("ping -n -c 1 192.168.2.100 >&2")
+
+      with subtest("Container can be stopped"):
+          machine.succeed("nixos-container stop webserver")
+          machine.fail("ping -n -c 1 192.168.1.100 >&2")
+          machine.fail("ping -n -c 1 192.168.2.100 >&2")
+
+      with subtest("Destroying a declarative container should fail"):
+          machine.fail("nixos-container destroy webserver")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/containers-hosts.nix b/nixpkgs/nixos/tests/containers-hosts.nix
new file mode 100644
index 000000000000..7bce7c997efe
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-hosts.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-hosts";
+  meta = {
+    maintainers = with lib.maintainers; [ montag451 ];
+  };
+
+  nodes.machine =
+    { lib, ... }:
+    {
+      virtualisation.vlans = [];
+
+      networking.bridges.br0.interfaces = [];
+      networking.interfaces.br0.ipv4.addresses = [
+        { address = "10.11.0.254"; prefixLength = 24; }
+      ];
+
+      # Force /etc/hosts to be the only source for host name resolution
+      environment.etc."nsswitch.conf".text = lib.mkForce ''
+        hosts: files
+      '';
+
+      containers.simple = {
+        autoStart = true;
+        privateNetwork = true;
+        localAddress = "10.10.0.1";
+        hostAddress = "10.10.0.254";
+
+        config = {};
+      };
+
+      containers.netmask = {
+        autoStart = true;
+        privateNetwork = true;
+        hostBridge = "br0";
+        localAddress = "10.11.0.1/24";
+
+        config = {};
+      };
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("default.target")
+
+    with subtest("Ping the containers using the entries added in /etc/hosts"):
+        for host in "simple.containers", "netmask.containers":
+            machine.succeed(f"ping -n -c 1 {host}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-imperative.nix b/nixpkgs/nixos/tests/containers-imperative.nix
new file mode 100644
index 000000000000..18bec1db78e8
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-imperative.nix
@@ -0,0 +1,169 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-imperative";
+  meta = {
+    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  nodes.machine =
+    { config, pkgs, lib, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+
+      # XXX: Sandbox setup fails while trying to hardlink files from the host's
+      #      store file system into the prepared chroot directory.
+      nix.settings.sandbox = false;
+      nix.settings.substituters = []; # don't try to access cache.nixos.org
+
+      virtualisation.writableStore = true;
+      # Make sure we always have all the required dependencies for creating a
+      # container available within the VM, because we don't have network access.
+      virtualisation.additionalPaths = let
+        emptyContainer = import ../lib/eval-config.nix {
+          modules = lib.singleton {
+            nixpkgs = { inherit (config.nixpkgs) localSystem; };
+
+            containers.foo.config = {};
+          };
+
+          # The system is inherited from the host above.
+          # Set it to null, to remove the "legacy" entrypoint's non-hermetic default.
+          system = null;
+        };
+      in with pkgs; [
+        stdenv stdenvNoCC emptyContainer.config.containers.foo.path
+        libxslt desktop-file-utils texinfo docbook5 libxml2
+        docbook_xsl_ns xorg.lndir documentation-highlighter
+      ];
+    };
+
+  testScript = let
+      tmpfilesContainerConfig = pkgs.writeText "container-config-tmpfiles" ''
+        {
+          systemd.tmpfiles.rules = [ "d /foo - - - - -" ];
+          systemd.services.foo = {
+            serviceConfig.Type = "oneshot";
+            script = "ls -al /foo";
+            wantedBy = [ "multi-user.target" ];
+          };
+        }
+      '';
+      brokenCfg = pkgs.writeText "broken.nix" ''
+        {
+          assertions = [
+            { assertion = false;
+              message = "I never evaluate";
+            }
+          ];
+        }
+      '';
+    in ''
+      with subtest("Make sure we have a NixOS tree (required by ‘nixos-container create’)"):
+          machine.succeed("PAGER=cat nix-env -qa -A nixos.hello >&2")
+
+      id1, id2 = None, None
+
+      with subtest("Create some containers imperatively"):
+          id1 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
+          machine.log(f"created container {id1}")
+
+          id2 = machine.succeed("nixos-container create foo --ensure-unique-name").rstrip()
+          machine.log(f"created container {id2}")
+
+          assert id1 != id2
+
+      with subtest(f"Put the root of {id2} into a bind mount"):
+          machine.succeed(
+              f"mv /var/lib/nixos-containers/{id2} /id2-bindmount",
+              f"mount --bind /id2-bindmount /var/lib/nixos-containers/{id1}",
+          )
+
+          ip1 = machine.succeed(f"nixos-container show-ip {id1}").rstrip()
+          ip2 = machine.succeed(f"nixos-container show-ip {id2}").rstrip()
+          assert ip1 != ip2
+
+      with subtest(
+          "Create a directory and a file we can later check if it still exists "
+          + "after destruction of the container"
+      ):
+          machine.succeed("mkdir /nested-bindmount")
+          machine.succeed("echo important data > /nested-bindmount/dummy")
+
+      with subtest(
+          "Create a directory with a dummy file and bind-mount it into both containers."
+      ):
+          for id in id1, id2:
+              important_path = f"/var/lib/nixos-containers/{id}/very/important/data"
+              machine.succeed(
+                  f"mkdir -p {important_path}",
+                  f"mount --bind /nested-bindmount {important_path}",
+              )
+
+      with subtest("Start one of them"):
+          machine.succeed(f"nixos-container start {id1}")
+
+      with subtest("Execute commands via the root shell"):
+          assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
+
+      with subtest("Execute a nix command via the root shell. (regression test for #40355)"):
+          machine.succeed(
+              f"nixos-container run {id1} -- nix-instantiate -E "
+              + '\'derivation { name = "empty"; builder = "false"; system = "false"; }\' '
+          )
+
+      with subtest("Stop and start (regression test for #4989)"):
+          machine.succeed(f"nixos-container stop {id1}")
+          machine.succeed(f"nixos-container start {id1}")
+
+      # clear serial backlog for next tests
+      machine.succeed("logger eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d")
+      machine.wait_for_console_text(
+          "eat console backlog 3ea46eb2-7f82-4f70-b810-3f00e3dd4c4d"
+      )
+
+      with subtest("Stop a container early"):
+          machine.succeed(f"nixos-container stop {id1}")
+          machine.succeed(f"nixos-container start {id1} >&2 &")
+          machine.wait_for_console_text("Stage 2")
+          machine.succeed(f"nixos-container stop {id1}")
+          machine.wait_for_console_text(f"Container {id1} exited successfully")
+          machine.succeed(f"nixos-container start {id1}")
+
+      with subtest("Stop a container without machined (regression test for #109695)"):
+          machine.systemctl("stop systemd-machined")
+          machine.succeed(f"nixos-container stop {id1}")
+          machine.wait_for_console_text(f"Container {id1} has been shut down")
+          machine.succeed(f"nixos-container start {id1}")
+
+      with subtest("tmpfiles are present"):
+          machine.log("creating container tmpfiles")
+          machine.succeed(
+              "nixos-container create tmpfiles --config-file ${tmpfilesContainerConfig}"
+          )
+          machine.log("created, starting…")
+          machine.succeed("nixos-container start tmpfiles")
+          machine.log("done starting, investigating…")
+          machine.succeed(
+              "echo $(nixos-container run tmpfiles -- systemctl is-active foo.service) | grep -q active;"
+          )
+          machine.succeed("nixos-container destroy tmpfiles")
+
+      with subtest("Execute commands via the root shell"):
+          assert "Linux" in machine.succeed(f"nixos-container run {id1} -- uname")
+
+      with subtest("Destroy the containers"):
+          for id in id1, id2:
+              machine.succeed(f"nixos-container destroy {id}")
+
+      with subtest("Check whether destruction of any container has killed important data"):
+          machine.succeed("grep -qF 'important data' /nested-bindmount/dummy")
+
+      with subtest("Ensure that the container path is gone"):
+          print(machine.succeed("ls -lsa /var/lib/nixos-containers"))
+          machine.succeed(f"test ! -e /var/lib/nixos-containers/{id1}")
+
+      with subtest("Ensure that a failed container creation doesn'leave any state"):
+          machine.fail(
+              "nixos-container create b0rk --config-file ${brokenCfg}"
+          )
+          machine.succeed("test ! -e /var/lib/nixos-containers/b0rk")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/containers-ip.nix b/nixpkgs/nixos/tests/containers-ip.nix
new file mode 100644
index 000000000000..ecead5c22f75
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-ip.nix
@@ -0,0 +1,74 @@
+let
+  webserverFor = hostAddress: localAddress: {
+    inherit hostAddress localAddress;
+    privateNetwork = true;
+    config = {
+      services.httpd = {
+        enable = true;
+        adminAddr = "foo@example.org";
+      };
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+  };
+
+in import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-ipv4-ipv6";
+  meta = {
+    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }: {
+      imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation = {
+        writableStore = true;
+      };
+
+      containers.webserver4 = webserverFor "10.231.136.1" "10.231.136.2";
+      containers.webserver6 = webserverFor "fc00::2" "fc00::1";
+      virtualisation.additionalPaths = [ pkgs.stdenv ];
+    };
+
+  testScript = { nodes, ... }: ''
+    import time
+
+
+    def curl_host(ip):
+        # put [] around ipv6 addresses for curl
+        host = ip if ":" not in ip else f"[{ip}]"
+        return f"curl --fail --connect-timeout 2 http://{host}/ > /dev/null"
+
+
+    def get_ip(container):
+        # need to distinguish because show-ip won't work for ipv6
+        if container == "webserver4":
+            ip = machine.succeed(f"nixos-container show-ip {container}").rstrip()
+            assert ip == "${nodes.machine.config.containers.webserver4.localAddress}"
+            return ip
+        return "${nodes.machine.config.containers.webserver6.localAddress}"
+
+
+    for container in "webserver4", "webserver6":
+        assert container in machine.succeed("nixos-container list")
+
+        with subtest(f"Start container {container}"):
+            machine.succeed(f"nixos-container start {container}")
+            # wait 2s for container to start and network to be up
+            time.sleep(2)
+
+        # Since "start" returns after the container has reached
+        # multi-user.target, we should now be able to access it.
+
+        ip = get_ip(container)
+        with subtest(f"{container} reacts to pings and HTTP requests"):
+            machine.succeed(f"ping -n -c1 {ip}")
+            machine.succeed(curl_host(ip))
+
+        with subtest(f"Stop container {container}"):
+            machine.succeed(f"nixos-container stop {container}")
+            machine.fail(curl_host(ip))
+
+        # Destroying a declarative container should fail.
+        machine.fail(f"nixos-container destroy {container}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-macvlans.nix b/nixpkgs/nixos/tests/containers-macvlans.nix
new file mode 100644
index 000000000000..a0cea8db4a1a
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-macvlans.nix
@@ -0,0 +1,82 @@
+let
+  # containers IP on VLAN 1
+  containerIp1 = "192.168.1.253";
+  containerIp2 = "192.168.1.254";
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-macvlans";
+  meta = {
+    maintainers = with lib.maintainers; [ montag451 ];
+  };
+
+  nodes = {
+
+    machine1 =
+      { lib, ... }:
+      {
+        virtualisation.vlans = [ 1 ];
+
+        # To be able to ping containers from the host, it is necessary
+        # to create a macvlan on the host on the VLAN 1 network.
+        networking.macvlans.mv-eth1-host = {
+          interface = "eth1";
+          mode = "bridge";
+        };
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [];
+        networking.interfaces.mv-eth1-host = {
+          ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+        };
+
+        containers.test1 = {
+          autoStart = true;
+          macvlans = [ "eth1" ];
+
+          config = {
+            networking.interfaces.mv-eth1 = {
+              ipv4.addresses = [ { address = containerIp1; prefixLength = 24; } ];
+            };
+          };
+        };
+
+        containers.test2 = {
+          autoStart = true;
+          macvlans = [ "eth1" ];
+
+          config = {
+            networking.interfaces.mv-eth1 = {
+              ipv4.addresses = [ { address = containerIp2; prefixLength = 24; } ];
+            };
+          };
+        };
+      };
+
+    machine2 =
+      { ... }:
+      {
+        virtualisation.vlans = [ 1 ];
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+    machine1.wait_for_unit("default.target")
+    machine2.wait_for_unit("default.target")
+
+    with subtest(
+        "Ping between containers to check that macvlans are created in bridge mode"
+    ):
+        machine1.succeed("nixos-container run test1 -- ping -n -c 1 ${containerIp2}")
+
+    with subtest("Ping containers from the host (machine1)"):
+        machine1.succeed("ping -n -c 1 ${containerIp1}")
+        machine1.succeed("ping -n -c 1 ${containerIp2}")
+
+    with subtest(
+        "Ping containers from the second machine to check that containers are reachable from the outside"
+    ):
+        machine2.succeed("ping -n -c 1 ${containerIp1}")
+        machine2.succeed("ping -n -c 1 ${containerIp2}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-names.nix b/nixpkgs/nixos/tests/containers-names.nix
new file mode 100644
index 000000000000..721f64990724
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-names.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-names";
+  meta = {
+    maintainers = with lib.maintainers; [ patryk27 ];
+  };
+
+  nodes.machine = { ... }: {
+    # We're using the newest kernel, so that we can test containers with long names.
+    # Please see https://github.com/NixOS/nixpkgs/issues/38509 for details.
+    boot.kernelPackages = pkgs.linuxPackages_latest;
+
+    containers = let
+      container = subnet: {
+        autoStart = true;
+        privateNetwork = true;
+        hostAddress = "192.168.${subnet}.1";
+        localAddress = "192.168.${subnet}.2";
+        config = { };
+      };
+
+     in {
+      first = container "1";
+      second = container "2";
+      really-long-name = container "3";
+      really-long-long-name-2 = container "4";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+
+    machine.succeed("ip link show | grep ve-first")
+    machine.succeed("ip link show | grep ve-second")
+    machine.succeed("ip link show | grep ve-really-lFYWO")
+    machine.succeed("ip link show | grep ve-really-l3QgY")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-nested.nix b/nixpkgs/nixos/tests/containers-nested.nix
new file mode 100644
index 000000000000..4a9fb8f01e24
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-nested.nix
@@ -0,0 +1,30 @@
+# Test for NixOS' container nesting.
+
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nested";
+
+  meta = with pkgs.lib.maintainers; { maintainers = [ sorki ]; };
+
+  nodes.machine = { lib, ... }:
+    let
+      makeNested = subConf: {
+        containers.nested = {
+          autoStart = true;
+          privateNetwork = true;
+          config = subConf;
+        };
+      };
+    in makeNested (makeNested { });
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("container@nested.service")
+    machine.succeed("systemd-run --pty --machine=nested -- machinectl list | grep nested")
+    print(
+        machine.succeed(
+            "systemd-run --pty --machine=nested -- systemd-run --pty --machine=nested -- systemctl status"
+        )
+    )
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/containers-physical_interfaces.nix b/nixpkgs/nixos/tests/containers-physical_interfaces.nix
new file mode 100644
index 000000000000..e203f88786a3
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-physical_interfaces.nix
@@ -0,0 +1,131 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-physical_interfaces";
+  meta = {
+    maintainers = with lib.maintainers; [ kampfschlaefer ];
+  };
+
+  nodes = {
+    server = { ... }:
+      {
+        virtualisation.vlans = [ 1 ];
+
+        containers.server = {
+          privateNetwork = true;
+          interfaces = [ "eth1" ];
+
+          config = {
+            networking.interfaces.eth1.ipv4.addresses = [
+              { address = "10.10.0.1"; prefixLength = 24; }
+            ];
+            networking.firewall.enable = false;
+          };
+        };
+      };
+    bridged = { ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      containers.bridged = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bridges.br0.interfaces = [ "eth1" ];
+          networking.interfaces.br0.ipv4.addresses = [
+            { address = "10.10.0.2"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+
+    bonded = { ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      containers.bonded = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bonds.bond0 = {
+            interfaces = [ "eth1" ];
+            driverOptions.mode = "active-backup";
+          };
+          networking.interfaces.bond0.ipv4.addresses = [
+            { address = "10.10.0.3"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+
+    bridgedbond = { ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      containers.bridgedbond = {
+        privateNetwork = true;
+        interfaces = [ "eth1" ];
+
+        config = {
+          networking.bonds.bond0 = {
+            interfaces = [ "eth1" ];
+            driverOptions.mode = "active-backup";
+          };
+          networking.bridges.br0.interfaces = [ "bond0" ];
+          networking.interfaces.br0.ipv4.addresses = [
+            { address = "10.10.0.4"; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("Prepare server"):
+        server.wait_for_unit("default.target")
+        server.succeed("ip link show dev eth1 >&2")
+
+    with subtest("Simple physical interface is up"):
+        server.succeed("nixos-container start server")
+        server.wait_for_unit("container@server")
+        server.succeed(
+            "systemctl -M server list-dependencies network-addresses-eth1.service >&2"
+        )
+
+        # The other tests will ping this container on its ip. Here we just check
+        # that the device is present in the container.
+        server.succeed("nixos-container run server -- ip a show dev eth1 >&2")
+
+    with subtest("Physical device in bridge in container can ping server"):
+        bridged.wait_for_unit("default.target")
+        bridged.succeed("nixos-container start bridged")
+        bridged.wait_for_unit("container@bridged")
+        bridged.succeed(
+            "systemctl -M bridged list-dependencies network-addresses-br0.service >&2",
+            "systemctl -M bridged status -n 30 -l network-addresses-br0.service",
+            "nixos-container run bridged -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+
+    with subtest("Physical device in bond in container can ping server"):
+        bonded.wait_for_unit("default.target")
+        bonded.succeed("nixos-container start bonded")
+        bonded.wait_for_unit("container@bonded")
+        bonded.succeed(
+            "systemctl -M bonded list-dependencies network-addresses-bond0 >&2",
+            "systemctl -M bonded status -n 30 -l network-addresses-bond0 >&2",
+            "nixos-container run bonded -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+
+    with subtest("Physical device in bond in bridge in container can ping server"):
+        bridgedbond.wait_for_unit("default.target")
+        bridgedbond.succeed("nixos-container start bridgedbond")
+        bridgedbond.wait_for_unit("container@bridgedbond")
+        bridgedbond.succeed(
+            "systemctl -M bridgedbond list-dependencies network-addresses-br0.service >&2",
+            "systemctl -M bridgedbond status -n 30 -l network-addresses-br0.service",
+            "nixos-container run bridgedbond -- ping -w 10 -c 1 -n 10.10.0.1",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/containers-portforward.nix b/nixpkgs/nixos/tests/containers-portforward.nix
new file mode 100644
index 000000000000..b8c7aabc5a50
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-portforward.nix
@@ -0,0 +1,59 @@
+let
+  hostIp = "192.168.0.1";
+  hostPort = 10080;
+  containerIp = "192.168.0.100";
+  containerPort = 80;
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-portforward";
+  meta = {
+    maintainers = with lib.maintainers; [ aristid aszlig eelco kampfschlaefer ianwookim ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+
+      containers.webserver =
+        { privateNetwork = true;
+          hostAddress = hostIp;
+          localAddress = containerIp;
+          forwardPorts = [ { protocol = "tcp"; hostPort = hostPort; containerPort = containerPort; } ];
+          config =
+            { services.httpd.enable = true;
+              services.httpd.adminAddr = "foo@example.org";
+              networking.firewall.allowedTCPPorts = [ 80 ];
+            };
+        };
+
+      virtualisation.additionalPaths = [ pkgs.stdenv ];
+    };
+
+  testScript =
+    ''
+      container_list = machine.succeed("nixos-container list")
+      assert "webserver" in container_list
+
+      # Start the webserver container.
+      machine.succeed("nixos-container start webserver")
+
+      # wait two seconds for the container to start and the network to be up
+      machine.sleep(2)
+
+      # Since "start" returns after the container has reached
+      # multi-user.target, we should now be able to access it.
+      # ip = machine.succeed("nixos-container show-ip webserver").strip()
+      machine.succeed("ping -n -c1 ${hostIp}")
+      machine.succeed("curl --fail http://${hostIp}:${toString hostPort}/ > /dev/null")
+
+      # Stop the container.
+      machine.succeed("nixos-container stop webserver")
+      machine.fail("curl --fail --connect-timeout 2 http://${hostIp}:${toString hostPort}/ > /dev/null")
+
+      # Destroying a declarative container should fail.
+      machine.fail("nixos-container destroy webserver")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-reloadable.nix b/nixpkgs/nixos/tests/containers-reloadable.nix
new file mode 100644
index 000000000000..876e62c1da9e
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-reloadable.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  client_base = {
+    containers.test1 = {
+      autoStart = true;
+      config = {
+        environment.etc.check.text = "client_base";
+      };
+    };
+
+    # prevent make-test-python.nix to change IP
+    networking.interfaces = {
+      eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+    };
+  };
+in {
+  name = "containers-reloadable";
+  meta = {
+    maintainers = with lib.maintainers; [ danbst ];
+  };
+
+  nodes = {
+    client = { ... }: {
+      imports = [ client_base ];
+    };
+
+    client_c1 = { lib, ... }: {
+      imports = [ client_base ];
+
+      containers.test1.config = {
+        environment.etc.check.text = lib.mkForce "client_c1";
+        services.httpd.enable = true;
+        services.httpd.adminAddr = "nixos@example.com";
+      };
+    };
+    client_c2 = { lib, ... }: {
+      imports = [ client_base ];
+
+      containers.test1.config = {
+        environment.etc.check.text = lib.mkForce "client_c2";
+        services.nginx.enable = true;
+      };
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    c1System = nodes.client_c1.config.system.build.toplevel;
+    c2System = nodes.client_c2.config.system.build.toplevel;
+  in ''
+    client.start()
+    client.wait_for_unit("default.target")
+
+    assert "client_base" in client.succeed("nixos-container run test1 cat /etc/check")
+
+    with subtest("httpd is available after activating config1"):
+        client.succeed(
+            "${c1System}/bin/switch-to-configuration test >&2",
+            "[[ $(nixos-container run test1 cat /etc/check) == client_c1 ]] >&2",
+            "systemctl status httpd -M test1 >&2",
+        )
+
+    with subtest("httpd is not available any longer after switching to config2"):
+        client.succeed(
+            "${c2System}/bin/switch-to-configuration test >&2",
+            "[[ $(nixos-container run test1 cat /etc/check) == client_c2 ]] >&2",
+            "systemctl status nginx -M test1 >&2",
+        )
+        client.fail("systemctl status httpd -M test1 >&2")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-restart_networking.nix b/nixpkgs/nixos/tests/containers-restart_networking.nix
new file mode 100644
index 000000000000..e1ad8157b288
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-restart_networking.nix
@@ -0,0 +1,113 @@
+let
+  client_base = {
+    networking.firewall.enable = false;
+
+    containers.webserver = {
+      autoStart = true;
+      privateNetwork = true;
+      hostBridge = "br0";
+      config = {
+        networking.firewall.enable = false;
+        networking.interfaces.eth0.ipv4.addresses = [
+          { address = "192.168.1.122"; prefixLength = 24; }
+        ];
+      };
+    };
+  };
+in import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "containers-restart_networking";
+  meta = {
+    maintainers = with lib.maintainers; [ kampfschlaefer ];
+  };
+
+  nodes = {
+    client = { lib, ... }: client_base // {
+      virtualisation.vlans = [ 1 ];
+
+      networking.bridges.br0 = {
+        interfaces = [];
+        rstp = false;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+      };
+
+    };
+    client_eth1 = { lib, ... }: client_base // {
+      networking.bridges.br0 = {
+        interfaces = [ "eth1" ];
+        rstp = false;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+      };
+    };
+    client_eth1_rstp = { lib, ... }: client_base // {
+      networking.bridges.br0 = {
+        interfaces = [ "eth1" ];
+        rstp = true;
+      };
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkOverride 0 [ ];
+        br0.ipv4.addresses =  [ { address = "192.168.1.2"; prefixLength = 24; } ];
+      };
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    originalSystem = nodes.client.config.system.build.toplevel;
+    eth1_bridged = nodes.client_eth1.config.system.build.toplevel;
+    eth1_rstp = nodes.client_eth1_rstp.config.system.build.toplevel;
+  in ''
+    client.start()
+
+    client.wait_for_unit("default.target")
+
+    with subtest("Initial configuration connectivity check"):
+        client.succeed("ping 192.168.1.122 -c 1 -n >&2")
+        client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
+
+        client.fail("ip l show eth1 |grep 'master br0' >&2")
+        client.fail("grep eth1 /run/br0.interfaces >&2")
+
+    with subtest("Bridged configuration without STP preserves connectivity"):
+        client.succeed(
+            "${eth1_bridged}/bin/switch-to-configuration test >&2"
+        )
+
+        client.succeed(
+            "ping 192.168.1.122 -c 1 -n >&2",
+            "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
+            "ip l show eth1 |grep 'master br0' >&2",
+            "grep eth1 /run/br0.interfaces >&2",
+        )
+
+    #  activating rstp needs another service, therefore the bridge will restart and the container will lose its connectivity
+    # with subtest("Bridged configuration with STP"):
+    #     client.succeed("${eth1_rstp}/bin/switch-to-configuration test >&2")
+    #     client.execute("ip -4 a >&2")
+    #     client.execute("ip l >&2")
+    #
+    #     client.succeed(
+    #         "ping 192.168.1.122 -c 1 -n >&2",
+    #         "nixos-container run webserver -- ping -c 1 -n 192.168.1.2 >&2",
+    #         "ip l show eth1 |grep 'master br0' >&2",
+    #         "grep eth1 /run/br0.interfaces >&2",
+    #     )
+
+    with subtest("Reverting to initial configuration preserves connectivity"):
+        client.succeed(
+            "${originalSystem}/bin/switch-to-configuration test >&2"
+        )
+
+        client.succeed("ping 192.168.1.122 -c 1 -n >&2")
+        client.succeed("nixos-container run webserver -- ping -c 1 -n 192.168.1.1 >&2")
+
+        client.fail("ip l show eth1 |grep 'master br0' >&2")
+        client.fail("grep eth1 /run/br0.interfaces >&2")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/containers-tmpfs.nix b/nixpkgs/nixos/tests/containers-tmpfs.nix
new file mode 100644
index 000000000000..cf5b81656afe
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-tmpfs.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-tmpfs";
+  meta = {
+    maintainers = with lib.maintainers; [ patryk27 ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    { imports = [ ../modules/installer/cd-dvd/channel.nix ];
+      virtualisation.writableStore = true;
+
+      containers.tmpfs =
+        {
+          autoStart = true;
+          tmpfs = [
+            # Mount var as a tmpfs
+            "/var"
+
+            # Add a nested mount inside a tmpfs
+            "/var/log"
+
+            # Add a tmpfs on a path that does not exist
+            "/some/random/path"
+          ];
+          config = { };
+        };
+
+      virtualisation.additionalPaths = [ pkgs.stdenv ];
+    };
+
+  testScript = ''
+      machine.wait_for_unit("default.target")
+      assert "tmpfs" in machine.succeed("nixos-container list")
+
+      with subtest("tmpfs container is up"):
+          assert "up" in machine.succeed("nixos-container status tmpfs")
+
+
+      def tmpfs_cmd(command):
+          return f"nixos-container run tmpfs -- {command} 2>/dev/null"
+
+
+      with subtest("/var is mounted as a tmpfs"):
+          machine.succeed(tmpfs_cmd("mountpoint -q /var"))
+
+      with subtest("/var/log is mounted as a tmpfs"):
+          assert "What: tmpfs" in machine.succeed(
+              tmpfs_cmd("systemctl status var-log.mount --no-pager")
+          )
+          machine.succeed(tmpfs_cmd("mountpoint -q /var/log"))
+
+      with subtest("/some/random/path is mounted as a tmpfs"):
+          assert "What: tmpfs" in machine.succeed(
+              tmpfs_cmd("systemctl status some-random-path.mount --no-pager")
+          )
+          machine.succeed(tmpfs_cmd("mountpoint -q /some/random/path"))
+
+      with subtest(
+          "files created in the container in a non-tmpfs directory are visible on the host."
+      ):
+          # This establishes legitimacy for the following tests
+          machine.succeed(
+              tmpfs_cmd("touch /root/test.file"),
+              tmpfs_cmd("ls -l  /root | grep -q test.file"),
+              "test -e /var/lib/nixos-containers/tmpfs/root/test.file",
+          )
+
+      with subtest(
+          "/some/random/path is writable and that files created there are not "
+          + "in the hosts container dir but in the tmpfs"
+      ):
+          machine.succeed(
+              tmpfs_cmd("touch /some/random/path/test.file"),
+              tmpfs_cmd("test -e /some/random/path/test.file"),
+          )
+          machine.fail("test -e /var/lib/nixos-containers/tmpfs/some/random/path/test.file")
+
+      with subtest(
+          "files created in the hosts container dir in a path where a tmpfs "
+          + "file system has been mounted are not visible to the container as "
+          + "the do not exist in the tmpfs"
+      ):
+          machine.succeed(
+              "touch /var/lib/nixos-containers/tmpfs/var/test.file",
+              "test -e /var/lib/nixos-containers/tmpfs/var/test.file",
+              "ls -l /var/lib/nixos-containers/tmpfs/var/ | grep -q test.file 2>/dev/null",
+          )
+          machine.fail(tmpfs_cmd("ls -l /var | grep -q test.file"))
+    '';
+})
diff --git a/nixpkgs/nixos/tests/containers-unified-hierarchy.nix b/nixpkgs/nixos/tests/containers-unified-hierarchy.nix
new file mode 100644
index 000000000000..978d59e12c8a
--- /dev/null
+++ b/nixpkgs/nixos/tests/containers-unified-hierarchy.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "containers-unified-hierarchy";
+  meta = {
+    maintainers = with lib.maintainers; [ farnoy ];
+  };
+
+  nodes.machine = { ... }: {
+    containers = {
+      test-container = {
+        autoStart = true;
+        config = { };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("default.target")
+
+    machine.succeed("echo 'stat -fc %T /sys/fs/cgroup/ | grep cgroup2fs' | nixos-container root-login test-container")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/convos.nix b/nixpkgs/nixos/tests/convos.nix
new file mode 100644
index 000000000000..8fe5892da9e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/convos.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+
+let
+  port = 3333;
+in
+{
+  name = "convos";
+  meta.maintainers = with lib.maintainers; [ sgo ];
+
+  nodes = {
+    machine =
+      { pkgs, ... }:
+      {
+        services.convos = {
+          enable = true;
+          listenPort = port;
+        };
+      };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("convos")
+    machine.wait_for_open_port(${toString port})
+    machine.succeed("journalctl -u convos | grep -q 'application available at.*${toString port}'")
+    machine.succeed("curl -f http://localhost:${toString port}/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/corerad.nix b/nixpkgs/nixos/tests/corerad.nix
new file mode 100644
index 000000000000..b6f5d7fc6f75
--- /dev/null
+++ b/nixpkgs/nixos/tests/corerad.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix (
+  {
+    name = "corerad";
+    nodes = {
+      router = {config, pkgs, ...}: {
+        config = {
+          # This machine simulates a router with IPv6 forwarding and a static IPv6 address.
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.forwarding" = true;
+          };
+          networking.interfaces.eth1 = {
+            ipv6.addresses = [ { address = "fd00:dead:beef:dead::1"; prefixLength = 64; } ];
+          };
+          services.corerad = {
+            enable = true;
+            # Serve router advertisements to the client machine with prefix information matching
+            # any IPv6 /64 prefixes configured on this interface.
+            #
+            # This configuration is identical to the example in the CoreRAD NixOS module.
+            settings = {
+              interfaces = [
+                {
+                  name = "eth0";
+                  monitor = true;
+                }
+                {
+                  name = "eth1";
+                  advertise = true;
+                  prefix = [{ prefix = "::/64"; }];
+                }
+              ];
+              debug = {
+                address = "localhost:9430";
+                prometheus = true;
+              };
+            };
+          };
+        };
+      };
+      client = {config, pkgs, ...}: {
+        # Use IPv6 SLAAC from router advertisements, and install rdisc6 so we can
+        # trigger one immediately.
+        config = {
+          boot.kernel.sysctl = {
+            "net.ipv6.conf.all.autoconf" = true;
+          };
+          environment.systemPackages = with pkgs; [
+            ndisc6
+          ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("Wait for CoreRAD and network ready"):
+          # Ensure networking is online and CoreRAD is ready.
+          router.wait_for_unit("network-online.target")
+          client.wait_for_unit("network-online.target")
+          router.wait_for_unit("corerad.service")
+
+          # Ensure the client can reach the router.
+          client.wait_until_succeeds("ping -c 1 fd00:dead:beef:dead::1")
+
+      with subtest("Verify SLAAC on client"):
+          # Trigger a router solicitation and verify a SLAAC address is assigned from
+          # the prefix configured on the router.
+          client.wait_until_succeeds("rdisc6 -1 -r 10 eth1")
+          client.wait_until_succeeds(
+              "ip -6 addr show dev eth1 | grep -q 'fd00:dead:beef:dead:'"
+          )
+
+          addrs = client.succeed("ip -6 addr show dev eth1")
+
+          assert (
+              "fd00:dead:beef:dead:" in addrs
+          ), "SLAAC prefix was not found in client addresses after router advertisement"
+          assert (
+              "/64 scope global temporary" in addrs
+          ), "SLAAC temporary address was not configured on client after router advertisement"
+
+      with subtest("Verify HTTP debug server is configured"):
+          out = router.succeed("curl -f localhost:9430/metrics")
+
+          assert (
+              "corerad_build_info" in out
+          ), "Build info metric was not found in Prometheus output"
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/coturn.nix b/nixpkgs/nixos/tests/coturn.nix
new file mode 100644
index 000000000000..b44bf8d06e39
--- /dev/null
+++ b/nixpkgs/nixos/tests/coturn.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "coturn";
+  nodes = {
+    default = {
+      services.coturn.enable = true;
+    };
+    secretsfile = {
+      boot.postBootCommands = ''
+        echo "some-very-secret-string" > /run/coturn-secret
+      '';
+      services.coturn = {
+        enable = true;
+        static-auth-secret-file = "/run/coturn-secret";
+      };
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("by default works without configuration"):
+          default.wait_for_unit("coturn.service")
+
+      with subtest("works with static-auth-secret-file"):
+          secretsfile.wait_for_unit("coturn.service")
+          secretsfile.wait_for_open_port(3478)
+          secretsfile.succeed("grep 'some-very-secret-string' /run/coturn/turnserver.cfg")
+          # Forbidden IP, fails:
+          secretsfile.fail("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 127.0.0.1 -DgX -e 127.0.0.1 -n 1 -c -y")
+          # allowed-peer-ip, should succeed:
+          secretsfile.succeed("${pkgs.coturn}/bin/turnutils_uclient -W some-very-secret-string 192.168.1.2 -DgX -e 192.168.1.2 -n 1 -c -y")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/couchdb.nix b/nixpkgs/nixos/tests/couchdb.nix
new file mode 100644
index 000000000000..cf6ca8e4548d
--- /dev/null
+++ b/nixpkgs/nixos/tests/couchdb.nix
@@ -0,0 +1,57 @@
+let
+  makeNode = couchpkg: user: passwd:
+    { pkgs, ... } :
+
+      { environment.systemPackages = [ pkgs.jq ];
+        services.couchdb.enable = true;
+        services.couchdb.package = couchpkg;
+        services.couchdb.adminUser = user;
+        services.couchdb.adminPass = passwd;
+      };
+  testuser = "testadmin";
+  testpass = "cowabunga";
+  testlogin = "${testuser}:${testpass}@";
+in
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+{
+  name = "couchdb";
+  meta.maintainers = [ ];
+
+  nodes = {
+    couchdb3 = makeNode pkgs.couchdb3 testuser testpass;
+  };
+
+  testScript = let
+    curlJqCheck = login: action: path: jqexpr: result:
+      pkgs.writeScript "curl-jq-check-${action}-${path}.sh" ''
+        RESULT=$(curl -X ${action} http://${login}127.0.0.1:5984/${path} | jq -r '${jqexpr}')
+        echo $RESULT >&2
+        if [ "$RESULT" != "${result}" ]; then
+          exit 1
+        fi
+      '';
+  in ''
+    start_all()
+
+    couchdb3.wait_for_unit("couchdb.service")
+    couchdb3.wait_until_succeeds(
+        "${curlJqCheck testlogin "GET" "" ".couchdb" "Welcome"}"
+    )
+    couchdb3.wait_until_succeeds(
+        "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
+    )
+    couchdb3.succeed("${curlJqCheck testlogin "PUT" "foo" ".ok" "true"}")
+    couchdb3.succeed(
+        "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "1"}"
+    )
+    couchdb3.succeed(
+        "${curlJqCheck testlogin "DELETE" "foo" ".ok" "true"}"
+    )
+    couchdb3.succeed(
+        "${curlJqCheck testlogin "GET" "_all_dbs" ". | length" "0"}"
+    )
+    couchdb3.succeed(
+        "${curlJqCheck testlogin "GET" "_node/couchdb@127.0.0.1" ".couchdb" "Welcome"}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cri-o.nix b/nixpkgs/nixos/tests/cri-o.nix
new file mode 100644
index 000000000000..08e1e8f36b06
--- /dev/null
+++ b/nixpkgs/nixos/tests/cri-o.nix
@@ -0,0 +1,19 @@
+# This test runs CRI-O and verifies via critest
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "cri-o";
+  meta.maintainers = with pkgs.lib; teams.podman.members;
+
+  nodes = {
+    crio = {
+      virtualisation.cri-o.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    crio.wait_for_unit("crio.service")
+    crio.succeed(
+        "critest --ginkgo.focus='Runtime info' --runtime-endpoint unix:///var/run/crio/crio.sock"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/croc.nix b/nixpkgs/nixos/tests/croc.nix
new file mode 100644
index 000000000000..5d709eb3d1cb
--- /dev/null
+++ b/nixpkgs/nixos/tests/croc.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  client = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.croc ];
+  };
+  pass = pkgs.writeText "pass" "PassRelay";
+in {
+  name = "croc";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hax404 julm ];
+  };
+
+  nodes = {
+    relay = {
+      services.croc = {
+        enable = true;
+        pass = pass;
+        openFirewall = true;
+      };
+    };
+    sender = client;
+    receiver = client;
+  };
+
+  testScript = ''
+    start_all()
+
+    # wait until relay is up
+    relay.wait_for_unit("croc")
+    relay.wait_for_open_port(9009)
+    relay.wait_for_open_port(9010)
+    relay.wait_for_open_port(9011)
+    relay.wait_for_open_port(9012)
+    relay.wait_for_open_port(9013)
+
+    # generate testfiles and send them
+    sender.wait_for_unit("multi-user.target")
+    sender.execute("echo Hello World > testfile01.txt")
+    sender.execute("echo Hello Earth > testfile02.txt")
+    sender.execute(
+        "croc --pass ${pass} --relay relay send --code topSecret testfile01.txt testfile02.txt >&2 &"
+    )
+
+    # receive the testfiles and check them
+    receiver.succeed(
+        "croc --pass ${pass} --yes --relay relay topSecret"
+    )
+    assert "Hello World" in receiver.succeed("cat testfile01.txt")
+    assert "Hello Earth" in receiver.succeed("cat testfile02.txt")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/cups-pdf.nix b/nixpkgs/nixos/tests/cups-pdf.nix
new file mode 100644
index 000000000000..5602193b0408
--- /dev/null
+++ b/nixpkgs/nixos/tests/cups-pdf.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "cups-pdf";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    environment.systemPackages = [ pkgs.poppler_utils ];
+    fonts.packages = [ pkgs.dejavu_fonts ];  # yields more OCR-able pdf
+    services.printing.cups-pdf.enable = true;
+    services.printing.cups-pdf.instances = {
+      opt = {};
+      noopt.installPrinter = false;
+    };
+    hardware.printers.ensurePrinters = [{
+      name = "noopt";
+      model = "CUPS-PDF_noopt.ppd";
+      deviceUri = "cups-pdf:/noopt";
+    }];
+  };
+
+  # we cannot check the files with pdftotext, due to
+  # https://github.com/alexivkin/CUPS-PDF-to-PDF/issues/7
+  # we need `imagemagickBig` as it has ghostscript support
+
+  testScript = ''
+    from subprocess import run
+    machine.wait_for_unit("multi-user.target")
+    for name in ("opt", "noopt"):
+        text = f"test text {name}".upper()
+        machine.wait_until_succeeds(f"lpstat -v {name}")
+        machine.succeed(f"su - alice -c 'echo -e \"\n  {text}\" | lp -d {name}'")
+        # wait until the pdf files are completely produced and readable by alice
+        machine.wait_until_succeeds(f"su - alice -c 'pdfinfo /var/spool/cups-pdf-{name}/users/alice/*.pdf'")
+        machine.succeed(f"cp /var/spool/cups-pdf-{name}/users/alice/*.pdf /tmp/{name}.pdf")
+        machine.copy_from_vm(f"/tmp/{name}.pdf", "")
+        run(f"${pkgs.imagemagickBig}/bin/convert -density 300 $out/{name}.pdf $out/{name}.jpeg", shell=True, check=True)
+        assert text.encode() in run(f"${lib.getExe pkgs.tesseract} $out/{name}.jpeg stdout", shell=True, check=True, capture_output=True).stdout
+  '';
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+})
diff --git a/nixpkgs/nixos/tests/curl-impersonate.nix b/nixpkgs/nixos/tests/curl-impersonate.nix
new file mode 100644
index 000000000000..7954e9e5584c
--- /dev/null
+++ b/nixpkgs/nixos/tests/curl-impersonate.nix
@@ -0,0 +1,157 @@
+/*
+  Test suite for curl-impersonate
+
+  Abstract:
+    Uses the test suite from the curl-impersonate source repo which:
+
+    1. Performs requests with libcurl and captures the TLS client-hello
+       packets with tcpdump to compare against known-good signatures
+    2. Spins up an nghttpd2 server to test client HTTP/2 headers against
+       known-good headers
+
+    See https://github.com/lwthiker/curl-impersonate/tree/main/tests/signatures
+    for details.
+
+  Notes:
+    - We need to have our own web server running because the tests expect to be able
+      to hit domains like wikipedia.org and the sandbox has no internet
+    - We need to be able to do (verifying) TLS handshakes without internet access.
+      We do that by creating a trusted CA and issuing a cert that includes
+      all of the test domains as subject-alternative names and then spoofs the
+      hostnames in /etc/hosts.
+*/
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  # Update with domains in TestImpersonate.TEST_URLS if needed from:
+  # https://github.com/lwthiker/curl-impersonate/blob/main/tests/test_impersonate.py
+  domains = [
+    "www.wikimedia.org"
+    "www.wikipedia.org"
+    "www.mozilla.org"
+    "www.apache.org"
+    "www.kernel.org"
+    "git-scm.com"
+  ];
+
+  tls-certs = let
+    # Configure CA with X.509 v3 extensions that would be trusted by curl
+    ca-cert-conf = pkgs.writeText "curl-impersonate-ca.cnf" ''
+      basicConstraints = critical, CA:TRUE
+      subjectKeyIdentifier = hash
+      authorityKeyIdentifier = keyid:always, issuer:always
+      keyUsage = critical, cRLSign, digitalSignature, keyCertSign
+    '';
+
+    # Configure leaf certificate with X.509 v3 extensions that would be trusted
+    # by curl and set subject-alternative names for test domains
+    tls-cert-conf = pkgs.writeText "curl-impersonate-tls.cnf" ''
+      basicConstraints = critical, CA:FALSE
+      subjectKeyIdentifier = hash
+      authorityKeyIdentifier = keyid:always, issuer:always
+      keyUsage = critical, nonRepudiation, digitalSignature, keyEncipherment, keyAgreement
+      extendedKeyUsage = critical, serverAuth
+      subjectAltName = @alt_names
+
+      [alt_names]
+      ${lib.concatStringsSep "\n" (lib.imap0 (idx: domain: "DNS.${toString idx} = ${domain}") domains)}
+    '';
+  in pkgs.runCommand "curl-impersonate-test-certs" {
+    nativeBuildInputs = [ pkgs.openssl ];
+  } ''
+    # create CA certificate and key
+    openssl req -newkey rsa:4096 -keyout ca-key.pem -out ca-csr.pem -nodes -subj '/CN=curl-impersonate-ca.nixos.test'
+    openssl x509 -req -sha512 -in ca-csr.pem -key ca-key.pem -out ca.pem -extfile ${ca-cert-conf} -days 36500
+    openssl x509 -in ca.pem -text
+
+    # create server certificate and key
+    openssl req -newkey rsa:4096 -keyout key.pem -out csr.pem -nodes -subj '/CN=curl-impersonate.nixos.test'
+    openssl x509 -req -sha512 -in csr.pem -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out cert.pem -extfile ${tls-cert-conf} -days 36500
+    openssl x509 -in cert.pem -text
+
+    # output CA cert and server cert and key
+    mkdir -p $out
+    cp key.pem cert.pem ca.pem $out
+  '';
+
+  # Test script
+  curl-impersonate-test = let
+    # Build miniature libcurl client used by test driver
+    minicurl = pkgs.runCommandCC "minicurl" {
+      buildInputs = [ pkgs.curl ];
+    } ''
+      mkdir -p $out/bin
+      $CC -Wall -Werror -o $out/bin/minicurl ${pkgs.curl-impersonate.src}/tests/minicurl.c `curl-config --libs`
+    '';
+  in pkgs.writeShellScript "curl-impersonate-test" ''
+    set -euxo pipefail
+
+    # Test driver requirements
+    export PATH="${with pkgs; lib.makeBinPath [
+      bash
+      coreutils
+      python3Packages.pytest
+      nghttp2
+      tcpdump
+    ]}"
+    export PYTHONPATH="${with pkgs.python3Packages; makePythonPath [
+      pyyaml
+      pytest-asyncio
+      dpkt
+    ]}"
+
+    # Prepare test root prefix
+    mkdir -p usr/{bin,lib}
+    cp -rs ${pkgs.curl-impersonate}/* ${minicurl}/* usr/
+
+    cp -r ${pkgs.curl-impersonate.src}/tests ./
+
+    # Run tests
+    cd tests
+    pytest . --install-dir ../usr --capture-interface eth1
+  '';
+in {
+  name = "curl-impersonate";
+
+  meta = with lib.maintainers; {
+    maintainers = [ lilyinstarlight ];
+  };
+
+  nodes = {
+    web = { nodes, pkgs, lib, config, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+      services = {
+        nginx = {
+          enable = true;
+          virtualHosts."curl-impersonate.nixos.test" = {
+            default = true;
+            addSSL = true;
+            sslCertificate = "${tls-certs}/cert.pem";
+            sslCertificateKey = "${tls-certs}/key.pem";
+          };
+        };
+      };
+    };
+
+    curl = { nodes, pkgs, lib, config, ... }: {
+      networking.extraHosts = lib.concatStringsSep "\n" (map (domain: "${nodes.web.networking.primaryIPAddress}  ${domain}") domains);
+
+      security.pki.certificateFiles = [ "${tls-certs}/ca.pem" ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    with subtest("Wait for network"):
+        web.wait_for_unit("network-online.target")
+        curl.wait_for_unit("network-online.target")
+
+    with subtest("Wait for web server"):
+        web.wait_for_unit("nginx.service")
+        web.wait_for_open_port(443)
+
+    with subtest("Run curl-impersonate tests"):
+        curl.succeed("${curl-impersonate-test}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/custom-ca.nix b/nixpkgs/nixos/tests/custom-ca.nix
new file mode 100644
index 000000000000..0fcdf81022d7
--- /dev/null
+++ b/nixpkgs/nixos/tests/custom-ca.nix
@@ -0,0 +1,195 @@
+# Checks that `security.pki` options are working in curl and the main browser
+# engines: Gecko (via Firefox), Chromium, QtWebEngine (via qutebrowser) and
+# WebKitGTK (via Midori). The test checks that certificates issued by a custom
+# trusted CA are accepted but those from an unknown CA are rejected.
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  inherit (pkgs) lib;
+
+  makeCert = { caName, domain }: pkgs.runCommand "example-cert"
+  { buildInputs = [ pkgs.gnutls ]; }
+  ''
+    mkdir $out
+
+    # CA cert template
+    cat >ca.template <<EOF
+    organization = "${caName}"
+    cn = "${caName}"
+    expiration_days = 365
+    ca
+    cert_signing_key
+    crl_signing_key
+    EOF
+
+    # server cert template
+    cat >server.template <<EOF
+    organization = "An example company"
+    cn = "${domain}"
+    expiration_days = 30
+    dns_name = "${domain}"
+    encryption_key
+    signing_key
+    EOF
+
+    # generate CA keypair
+    certtool                \
+      --generate-privkey    \
+      --key-type rsa        \
+      --sec-param High      \
+      --outfile $out/ca.key
+    certtool                     \
+      --generate-self-signed     \
+      --load-privkey $out/ca.key \
+      --template ca.template     \
+      --outfile $out/ca.crt
+
+    # generate server keypair
+    certtool                    \
+      --generate-privkey        \
+      --key-type rsa            \
+      --sec-param High          \
+      --outfile $out/server.key
+    certtool                            \
+      --generate-certificate            \
+      --load-privkey $out/server.key    \
+      --load-ca-privkey $out/ca.key     \
+      --load-ca-certificate $out/ca.crt \
+      --template server.template        \
+      --outfile $out/server.crt
+  '';
+
+  example-good-cert = makeCert
+    { caName = "Example good CA";
+      domain = "good.example.com";
+    };
+
+  example-bad-cert = makeCert
+    { caName = "Unknown CA";
+      domain = "bad.example.com";
+    };
+
+  webserverConfig =
+    { networking.hosts."127.0.0.1" = [ "good.example.com" "bad.example.com" ];
+      security.pki.certificateFiles = [ "${example-good-cert}/ca.crt" ];
+
+      services.nginx.enable = true;
+      services.nginx.virtualHosts."good.example.com" =
+        { onlySSL = true;
+          sslCertificate = "${example-good-cert}/server.crt";
+          sslCertificateKey = "${example-good-cert}/server.key";
+          locations."/".extraConfig = ''
+            add_header Content-Type text/plain;
+            return 200 'It works!';
+          '';
+        };
+      services.nginx.virtualHosts."bad.example.com" =
+        { onlySSL = true;
+          sslCertificate = "${example-bad-cert}/server.crt";
+          sslCertificateKey = "${example-bad-cert}/server.key";
+          locations."/".extraConfig = ''
+            add_header Content-Type text/plain;
+            return 200 'It does not work!';
+          '';
+        };
+    };
+
+  curlTest = makeTest {
+    name = "custom-ca-curl";
+    meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+    nodes.machine = { ... }: webserverConfig;
+    testScript = ''
+        with subtest("Good certificate is trusted in curl"):
+            machine.wait_for_unit("nginx")
+            machine.wait_for_open_port(443)
+            machine.succeed("curl -fv https://good.example.com")
+
+        with subtest("Unknown CA is untrusted in curl"):
+            machine.fail("curl -fv https://bad.example.com")
+    '';
+  };
+
+  mkBrowserTest = browser: testParams: makeTest {
+    name = "custom-ca-${browser}";
+    meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+    enableOCR = true;
+
+    nodes.machine = { pkgs, ... }:
+      { imports =
+          [ ./common/user-account.nix
+            ./common/x11.nix
+            webserverConfig
+          ];
+
+        # chromium-based browsers refuse to run as root
+        test-support.displayManager.auto.user = "alice";
+
+        # machine often runs out of memory with less
+        virtualisation.memorySize = 1024;
+
+        environment.systemPackages = [ pkgs.xdotool pkgs.${browser} ];
+      };
+
+    testScript = ''
+      from typing import Tuple
+      def execute_as(user: str, cmd: str) -> Tuple[int, str]:
+          """
+          Run a shell command as a specific user.
+          """
+          return machine.execute(f"sudo -u {user} {cmd}")
+
+
+      def wait_for_window_as(user: str, cls: str) -> None:
+          """
+          Wait until a X11 window of a given user appears.
+          """
+
+          def window_is_visible(last_try: bool) -> bool:
+              ret, stdout = execute_as(user, f"xdotool search --onlyvisible --class {cls}")
+              if last_try:
+                  machine.log(f"Last chance to match {cls} on the window list")
+              return ret == 0
+
+          with machine.nested("Waiting for a window to appear"):
+              retry(window_is_visible)
+
+
+      machine.start()
+      machine.wait_for_x()
+
+      command = "${browser} ${testParams.args or ""}"
+      with subtest("Good certificate is trusted in ${browser}"):
+          execute_as(
+              "alice", f"{command} https://good.example.com >&2 &"
+          )
+          wait_for_window_as("alice", "${browser}")
+          machine.sleep(4)
+          execute_as("alice", "xdotool key ctrl+r")  # reload to be safe
+          machine.wait_for_text("It works!")
+          machine.screenshot("good${browser}")
+          execute_as("alice", "xdotool key ctrl+w")  # close tab
+
+      with subtest("Unknown CA is untrusted in ${browser}"):
+          execute_as("alice", f"{command} https://bad.example.com >&2 &")
+          machine.wait_for_text("${testParams.error}")
+          machine.screenshot("bad${browser}")
+    '';
+  };
+
+in
+
+{
+  curl = curlTest;
+} // pkgs.lib.mapAttrs mkBrowserTest {
+  firefox = { error = "Security Risk"; };
+  chromium = { error = "not private"; };
+  qutebrowser = { args = "-T"; error = "Certificate error"; };
+  midori = { args = "-p"; error = "Security"; };
+}
diff --git a/nixpkgs/nixos/tests/dae.nix b/nixpkgs/nixos/tests/dae.nix
new file mode 100644
index 000000000000..42a2eb5fe0be
--- /dev/null
+++ b/nixpkgs/nixos/tests/dae.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+
+  name = "dae";
+
+  meta = {
+    maintainers = with lib.maintainers; [ oluceps ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+    services.dae = {
+      enable = true;
+      config = ''
+        global{}
+        routing{}
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("dae.service")
+
+    machine.wait_for_open_port(80)
+
+    machine.succeed("curl --fail --max-time 10 http://localhost")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/darling.nix b/nixpkgs/nixos/tests/darling.nix
new file mode 100644
index 000000000000..5665b4c2ffef
--- /dev/null
+++ b/nixpkgs/nixos/tests/darling.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  # Well, we _can_ cross-compile from Linux :)
+  hello = pkgs.runCommand "hello" {
+    sdk = "${pkgs.darling.sdk}/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX.sdk";
+    nativeBuildInputs = with pkgs.llvmPackages_14; [ clang-unwrapped lld ];
+    src = pkgs.writeText "hello.c" ''
+      #include <stdio.h>
+      int main() {
+        printf("Hello, Darling!\n");
+        return 0;
+      }
+    '';
+  } ''
+    clang \
+      -target x86_64-apple-darwin \
+      -fuse-ld=lld \
+      -nostdinc -nostdlib \
+      -mmacosx-version-min=10.15 \
+      --sysroot $sdk \
+      -isystem $sdk/usr/include \
+      -L $sdk/usr/lib -lSystem \
+      $src -o $out
+  '';
+in
+{
+  name = "darling";
+
+  meta.maintainers = with lib.maintainers; [ zhaofengli ];
+
+  nodes.machine = {
+    programs.darling.enable = true;
+  };
+
+  testScript = ''
+    start_all()
+
+    # Darling holds stdout until the server is shutdown
+    machine.succeed("darling ${hello} >hello.out")
+    machine.succeed("grep Hello hello.out")
+    machine.succeed("darling shutdown")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dconf.nix b/nixpkgs/nixos/tests/dconf.nix
new file mode 100644
index 000000000000..192c075540a4
--- /dev/null
+++ b/nixpkgs/nixos/tests/dconf.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix
+  ({ lib, ... }:
+  {
+    name = "dconf";
+
+    meta.maintainers = with lib.maintainers; [
+      linsui
+    ];
+
+    nodes.machine = { config, pkgs, lib, ... }: {
+      users.extraUsers.alice = { isNormalUser = true; };
+      programs.dconf = with lib.gvariant; {
+        enable = true;
+        profiles.user.databases = [
+          {
+            settings = {
+              "test/not".locked = mkInt32 1;
+              "test/is".locked = "locked";
+            };
+            locks = [
+              "/test/is/locked"
+            ];
+          }
+        ];
+      };
+    };
+
+    testScript = ''
+      machine.succeed("test $(dconf read -d /test/not/locked) == 1")
+      machine.succeed("test $(dconf read -d /test/is/locked) == \"'locked'\"")
+      machine.fail("sudo -u alice dbus-run-session -- dconf write /test/is/locked \"@s 'unlocked'\"")
+      machine.succeed("sudo -u alice dbus-run-session -- dconf write /test/not/locked \"@i 2\"")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/deconz.nix b/nixpkgs/nixos/tests/deconz.nix
new file mode 100644
index 000000000000..cbe721ba4925
--- /dev/null
+++ b/nixpkgs/nixos/tests/deconz.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  httpPort = 800;
+in
+{
+  name = "deconz";
+
+  meta.maintainers = with lib.maintainers; [
+    bjornfor
+  ];
+
+  nodes.machine = { config, pkgs, lib, ... }: {
+    nixpkgs.config.allowUnfree = true;
+    services.deconz = {
+      enable = true;
+      inherit httpPort;
+      extraArgs = [
+        "--dbg-err=2"
+        "--dbg-info=2"
+      ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("deconz.service")
+    machine.succeed("curl -sfL http://localhost:${toString httpPort}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/deepin.nix b/nixpkgs/nixos/tests/deepin.nix
new file mode 100644
index 000000000000..7b2e2430f31c
--- /dev/null
+++ b/nixpkgs/nixos/tests/deepin.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "deepin";
+
+  meta.maintainers = lib.teams.deepin.members;
+
+  nodes.machine = { ... }: {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    virtualisation.memorySize = 2048;
+
+    services.xserver.enable = true;
+
+    services.xserver.displayManager = {
+      lightdm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+
+    services.xserver.desktopManager.deepin.enable = true;
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+    in
+    ''
+      with subtest("Wait for login"):
+          machine.wait_for_x()
+          machine.wait_for_file("${user.home}/.Xauthority")
+          machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Check if DDE wm chooser actually start"):
+          machine.wait_until_succeeds("pgrep -f dde-wm-chooser")
+          machine.wait_for_window("dde-wm-chooser")
+          machine.execute("pkill dde-wm-chooser")
+
+
+      with subtest("Check if Deepin session components actually start"):
+          machine.wait_until_succeeds("pgrep -f dde-session-daemon")
+          machine.wait_for_window("dde-session-daemon")
+          machine.wait_until_succeeds("pgrep -f dde-desktop")
+          machine.wait_for_window("dde-desktop")
+
+      with subtest("Open deepin-terminal"):
+          machine.succeed("su - ${user.name} -c 'DISPLAY=:0 deepin-terminal >&2 &'")
+          machine.wait_for_window("deepin-terminal")
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/deluge.nix b/nixpkgs/nixos/tests/deluge.nix
new file mode 100644
index 000000000000..e8945fdea003
--- /dev/null
+++ b/nixpkgs/nixos/tests/deluge.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "deluge";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ flokli ];
+  };
+
+  nodes = {
+    simple = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        web = {
+          enable = true;
+          openFirewall = true;
+        };
+      };
+    };
+
+    declarative = {
+      services.deluge = {
+        enable = true;
+        package = pkgs.deluge-2_x;
+        openFirewall = true;
+        declarative = true;
+        config = {
+          allow_remote = true;
+          download_location = "/var/lib/deluge/my-download";
+          daemon_port = 58846;
+          listen_ports = [ 6881 6889 ];
+        };
+        web = {
+          enable = true;
+          port =  3142;
+        };
+        authFile = pkgs.writeText "deluge-auth" ''
+          localclient:a7bef72a890:10
+          andrew:password:10
+          user3:anotherpass:5
+        '';
+      };
+    };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    simple.wait_for_unit("deluged")
+    simple.wait_for_unit("delugeweb")
+    simple.wait_for_open_port(8112)
+    declarative.wait_for_unit("network.target")
+    declarative.wait_until_succeeds("curl --fail http://simple:8112")
+
+    declarative.wait_for_unit("deluged")
+    declarative.wait_for_unit("delugeweb")
+    declarative.wait_until_succeeds("curl --fail http://declarative:3142")
+
+    # deluge-console always exits with 1. https://dev.deluge-torrent.org/ticket/3291
+    declarative.succeed(
+        "(deluge-console 'connect 127.0.0.1:58846 andrew password; help' || true) | grep -q 'rm.*Remove a torrent'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dex-oidc.nix b/nixpkgs/nixos/tests/dex-oidc.nix
new file mode 100644
index 000000000000..e54ae18ca937
--- /dev/null
+++ b/nixpkgs/nixos/tests/dex-oidc.nix
@@ -0,0 +1,78 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "dex-oidc";
+  meta.maintainers = with lib.maintainers; [ Flakebi ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [ jq ];
+    services.dex = {
+      enable = true;
+      settings = {
+        issuer = "http://127.0.0.1:8080/dex";
+        storage = {
+          type = "postgres";
+          config.host = "/var/run/postgresql";
+        };
+        web.http = "127.0.0.1:8080";
+        oauth2.skipApprovalScreen = true;
+        staticClients = [
+          {
+            id = "oidcclient";
+            name = "Client";
+            redirectURIs = [ "https://example.com/callback" ];
+            secretFile = "/etc/dex/oidcclient";
+          }
+        ];
+        connectors = [
+          {
+            type = "mockPassword";
+            id = "mock";
+            name = "Example";
+            config = {
+              username = "admin";
+              password = "password";
+            };
+          }
+        ];
+      };
+    };
+
+    # This should not be set from nix but through other means to not leak the secret.
+    environment.etc."dex/oidcclient" = {
+      mode = "0400";
+      user = "dex";
+      text = "oidcclientsecret";
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases =[ "dex" ];
+      ensureUsers = [
+        {
+          name = "dex";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+  };
+
+  testScript = ''
+    with subtest("Web server gets ready"):
+        machine.wait_for_unit("dex.service")
+        # Wait until server accepts connections
+        machine.wait_until_succeeds("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid'")
+
+    with subtest("Login"):
+        state = machine.succeed("curl -fs 'localhost:8080/dex/auth/mock?client_id=oidcclient&response_type=code&redirect_uri=https://example.com/callback&scope=openid' | sed -n 's/.*state=\\(.*\\)\">.*/\\1/p'").strip()
+        print(f"Got state {state}")
+        machine.succeed(f"curl -fs 'localhost:8080/dex/auth/mock/login?back=&state={state}' -d 'login=admin&password=password'")
+        code = machine.succeed(f"curl -fs localhost:8080/dex/approval?req={state} | sed -n 's/.*code=\\(.*\\)&amp;.*/\\1/p'").strip()
+        print(f"Got approval code {code}")
+        bearer = machine.succeed(f"curl -fs localhost:8080/dex/token -u oidcclient:oidcclientsecret -d 'grant_type=authorization_code&redirect_uri=https://example.com/callback&code={code}' | jq .access_token -r").strip()
+        print(f"Got access token {bearer}")
+
+    with subtest("Get userinfo"):
+        assert '"sub"' in machine.succeed(
+            f"curl -fs localhost:8080/dex/userinfo --oauth2-bearer {bearer}"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dhparams.nix b/nixpkgs/nixos/tests/dhparams.nix
new file mode 100644
index 000000000000..021042fafdb1
--- /dev/null
+++ b/nixpkgs/nixos/tests/dhparams.nix
@@ -0,0 +1,128 @@
+import ./make-test-python.nix {
+  name = "dhparams";
+
+  nodes.machine = { pkgs, ... }: {
+    security.dhparams.enable = true;
+    environment.systemPackages = [ pkgs.openssl ];
+
+    specialisation = {
+      gen1.configuration = { config, ... }: {
+        security.dhparams.params = {
+          # Use low values here because we don't want the test to run for ages.
+          foo.bits = 1024;
+          # Also use the old format to make sure the type is coerced in the right
+          # way.
+          bar = 1025;
+        };
+
+        systemd.services.foo = {
+          description = "Check systemd Ordering";
+          wantedBy = [ "multi-user.target" ];
+          unitConfig = {
+            # This is to make sure that the dhparams generation of foo occurs
+            # before this service so we need this service to start as early as
+            # possible to provoke a race condition.
+            DefaultDependencies = false;
+
+            # We check later whether the service has been started or not.
+            ConditionPathExists = config.security.dhparams.params.foo.path;
+          };
+          serviceConfig.Type = "oneshot";
+          serviceConfig.RemainAfterExit = true;
+          # The reason we only provide an ExecStop here is to ensure that we don't
+          # accidentally trigger an error because a file system is not yet ready
+          # during very early startup (we might not even have the Nix store
+          # available, for example if future changes in NixOS use systemd mount
+          # units to do early file system initialisation).
+          serviceConfig.ExecStop = "${pkgs.coreutils}/bin/true";
+        };
+      };
+      gen2.configuration = {
+        security.dhparams.params.foo.bits = 1026;
+      };
+      gen3.configuration =  {};
+      gen4.configuration = {
+        security.dhparams.stateful = false;
+        security.dhparams.params.foo2.bits = 1027;
+        security.dhparams.params.bar2.bits = 1028;
+      };
+      gen5.configuration = {
+        security.dhparams.defaultBitSize = 1029;
+        security.dhparams.params.foo3 = {};
+        security.dhparams.params.bar3 = {};
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    getParamPath = gen: name: let
+      node = "gen${toString gen}";
+    in nodes.machine.config.specialisation.${node}.configuration.security.dhparams.params.${name}.path;
+
+    switchToGeneration = gen: let
+      switchCmd = "${nodes.machine.config.system.build.toplevel}/specialisation/gen${toString gen}/bin/switch-to-configuration test";
+    in ''
+      with machine.nested("switch to generation ${toString gen}"):
+        machine.succeed("${switchCmd}")
+    '';
+
+  in ''
+    import re
+
+
+    def assert_param_bits(path, bits):
+        with machine.nested(f"check bit size of {path}"):
+            output = machine.succeed(f"openssl dhparam -in {path} -text")
+            pattern = re.compile(r"^\s*DH Parameters:\s+\((\d+)\s+bit\)\s*$", re.M)
+            match = pattern.match(output)
+            if match is None:
+                raise Exception("bla")
+            if match[1] != str(bits):
+                raise Exception(f"bit size should be {bits} but it is {match[1]} instead.")
+
+    machine.wait_for_unit("multi-user.target")
+    ${switchToGeneration 1}
+
+    with subtest("verify startup order"):
+        machine.succeed("systemctl is-active foo.service")
+
+    with subtest("check bit sizes of dhparam files"):
+        assert_param_bits("${getParamPath 1 "foo"}", 1024)
+        assert_param_bits("${getParamPath 1 "bar"}", 1025)
+
+    ${switchToGeneration 2}
+
+    with subtest("check whether bit size has changed"):
+        assert_param_bits("${getParamPath 2 "foo"}", 1026)
+
+    with subtest("ensure that dhparams file for 'bar' was deleted"):
+        machine.fail("test -e ${getParamPath 1 "bar"}")
+
+    ${switchToGeneration 3}
+
+    with subtest("ensure that 'security.dhparams.path' has been deleted"):
+        machine.fail("test -e ${nodes.machine.config.specialisation.gen3.configuration.security.dhparams.path}")
+
+    ${switchToGeneration 4}
+
+    with subtest("check bit sizes dhparam files"):
+        assert_param_bits(
+            "${getParamPath 4 "foo2"}", 1027
+        )
+        assert_param_bits(
+            "${getParamPath 4 "bar2"}", 1028
+        )
+
+    with subtest("check whether dhparam files are in the Nix store"):
+        machine.succeed(
+            "expr match ${getParamPath 4 "foo2"} ${builtins.storeDir}",
+            "expr match ${getParamPath 4 "bar2"} ${builtins.storeDir}",
+        )
+
+    ${switchToGeneration 5}
+
+    with subtest("check whether defaultBitSize works as intended"):
+        assert_param_bits("${getParamPath 5 "foo3"}", 1029)
+        assert_param_bits("${getParamPath 5 "bar3"}", 1029)
+  '';
+}
diff --git a/nixpkgs/nixos/tests/disable-installer-tools.nix b/nixpkgs/nixos/tests/disable-installer-tools.nix
new file mode 100644
index 000000000000..69f99122753a
--- /dev/null
+++ b/nixpkgs/nixos/tests/disable-installer-tools.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+
+{
+  name = "disable-installer-tools";
+
+  nodes.machine =
+    { pkgs, lib, ... }:
+    {
+        system.disableInstallerTools = true;
+        boot.enableContainers = false;
+        environment.defaultPackages = [];
+    };
+
+  testScript = ''
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+      with subtest("nixos installer tools should not be included"):
+          machine.fail("which nixos-rebuild")
+          machine.fail("which nixos-install")
+          machine.fail("which nixos-generate-config")
+          machine.fail("which nixos-enter")
+          machine.fail("which nixos-version")
+          machine.fail("which nixos-build-vms")
+
+      with subtest("perl should not be included"):
+          machine.fail("which perl")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/discourse.nix b/nixpkgs/nixos/tests/discourse.nix
new file mode 100644
index 000000000000..3e69a314905c
--- /dev/null
+++ b/nixpkgs/nixos/tests/discourse.nix
@@ -0,0 +1,202 @@
+# This tests Discourse by:
+#  1. logging in as the admin user
+#  2. sending a private message to the admin user through the API
+#  3. replying to that message via email.
+
+import ./make-test-python.nix (
+  { pkgs, lib, package ? pkgs.discourse, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    clientDomain = "client.fake.domain";
+    discourseDomain = certs.domain;
+    adminPassword = "eYAX85qmMJ5GZIHLaXGDAoszD7HSZp5d";
+    secretKeyBase = "381f4ac6d8f5e49d804dae72aa9c046431d2f34c656a705c41cd52fed9b4f6f76f51549f0b55db3b8b0dded7a00d6a381ebe9a4367d2d44f5e743af6628b4d42";
+    admin = {
+      email = "alice@${clientDomain}";
+      username = "alice";
+      fullName = "Alice Admin";
+      passwordFile = "${pkgs.writeText "admin-pass" adminPassword}";
+    };
+  in
+  {
+    name = "discourse";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ talyz ];
+    };
+
+    nodes.discourse =
+      { nodes, ... }:
+      {
+        virtualisation.memorySize = 2048;
+        virtualisation.cores = 4;
+        virtualisation.useNixStoreImage = true;
+        virtualisation.writableStore = false;
+
+        imports = [ common/user-account.nix ];
+
+        security.pki.certificateFiles = [
+          certs.ca.cert
+        ];
+
+        networking.extraHosts = ''
+          127.0.0.1 ${discourseDomain}
+          ${nodes.client.networking.primaryIPAddress} ${clientDomain}
+        '';
+
+        services.postfix = {
+          enableSubmission = true;
+          enableSubmissions = true;
+          submissionsOptions = {
+            smtpd_sasl_auth_enable = "yes";
+            smtpd_client_restrictions = "permit";
+          };
+        };
+
+        environment.systemPackages = [ pkgs.jq ];
+
+        services.postgresql.package = pkgs.postgresql_13;
+
+        services.discourse = {
+          enable = true;
+          inherit admin package;
+          hostname = discourseDomain;
+          sslCertificate = "${certs.${discourseDomain}.cert}";
+          sslCertificateKey = "${certs.${discourseDomain}.key}";
+          secretKeyBaseFile = "${pkgs.writeText "secret-key-base" secretKeyBase}";
+          enableACME = false;
+          mail.outgoing.serverAddress = clientDomain;
+          mail.incoming.enable = true;
+          siteSettings = {
+            posting = {
+              min_post_length = 5;
+              min_first_post_length = 5;
+              min_personal_message_post_length = 5;
+            };
+          };
+          unicornTimeout = 900;
+        };
+
+        networking.firewall.allowedTCPPorts = [ 25 465 ];
+      };
+
+    nodes.client =
+      { nodes, ... }:
+      {
+        imports = [ common/user-account.nix ];
+
+        security.pki.certificateFiles = [
+          certs.ca.cert
+        ];
+
+        networking.extraHosts = ''
+          127.0.0.1 ${clientDomain}
+          ${nodes.discourse.networking.primaryIPAddress} ${discourseDomain}
+        '';
+
+        services.dovecot2 = {
+          enable = true;
+          protocols = [ "imap" ];
+          modules = [ pkgs.dovecot_pigeonhole ];
+        };
+
+        services.postfix = {
+          enable = true;
+          origin = clientDomain;
+          relayDomains = [ clientDomain ];
+          config = {
+            compatibility_level = "2";
+            smtpd_banner = "ESMTP server";
+            myhostname = clientDomain;
+            mydestination = clientDomain;
+          };
+        };
+
+        environment.systemPackages =
+          let
+            replyToEmail = pkgs.writeScriptBin "reply-to-email" ''
+              #!${pkgs.python3.interpreter}
+              import imaplib
+              import smtplib
+              import ssl
+              import email.header
+              from email import message_from_bytes
+              from email.message import EmailMessage
+
+              with imaplib.IMAP4('localhost') as imap:
+                  imap.login('alice', 'foobar')
+                  imap.select()
+                  status, data = imap.search(None, 'ALL')
+                  assert status == 'OK'
+
+                  nums = data[0].split()
+                  assert len(nums) == 1
+
+                  status, msg_data = imap.fetch(nums[0], '(RFC822)')
+                  assert status == 'OK'
+
+              msg = email.message_from_bytes(msg_data[0][1])
+              subject = str(email.header.make_header(email.header.decode_header(msg['Subject'])))
+              reply_to = email.header.decode_header(msg['Reply-To'])[0][0]
+              message_id = email.header.decode_header(msg['Message-ID'])[0][0]
+              date = email.header.decode_header(msg['Date'])[0][0]
+
+              ctx = ssl.create_default_context()
+              with smtplib.SMTP_SSL(host='${discourseDomain}', context=ctx) as smtp:
+                  reply = EmailMessage()
+                  reply['Subject'] = 'Re: ' + subject
+                  reply['To'] = reply_to
+                  reply['From'] = 'alice@${clientDomain}'
+                  reply['In-Reply-To'] = message_id
+                  reply['References'] = message_id
+                  reply['Date'] = date
+                  reply.set_content("Test reply.")
+
+                  smtp.send_message(reply)
+                  smtp.quit()
+            '';
+          in
+            [ replyToEmail ];
+
+        networking.firewall.allowedTCPPorts = [ 25 ];
+      };
+
+
+    testScript = { nodes }:
+      let
+        request = builtins.toJSON {
+          title = "Private message";
+          raw = "This is a test message.";
+          target_recipients = admin.username;
+          archetype = "private_message";
+        };
+      in ''
+        discourse.start()
+        client.start()
+
+        discourse.wait_for_unit("discourse.service")
+        discourse.wait_for_file("/run/discourse/sockets/unicorn.sock")
+        discourse.wait_until_succeeds("curl -sS -f https://${discourseDomain}")
+        discourse.succeed(
+            "curl -sS -f https://${discourseDomain}/session/csrf -c cookie -b cookie -H 'Accept: application/json' | jq -r '\"X-CSRF-Token: \" + .csrf' > csrf_token",
+            "curl -sS -f https://${discourseDomain}/session -c cookie -b cookie -H @csrf_token -H 'Accept: application/json' -d 'login=${nodes.discourse.services.discourse.admin.username}' -d \"password=${adminPassword}\" | jq -e '.user.username == \"${nodes.discourse.services.discourse.admin.username}\"'",
+            "curl -sS -f https://${discourseDomain}/login -v -H 'Accept: application/json' -c cookie -b cookie 2>&1 | grep ${nodes.discourse.services.discourse.admin.username}",
+        )
+
+        client.wait_for_unit("postfix.service")
+        client.wait_for_unit("dovecot2.service")
+
+        discourse.succeed(
+            "sudo -u discourse discourse-rake api_key:create_master[master] >api_key",
+            'curl -sS -f https://${discourseDomain}/posts -X POST -H "Content-Type: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" -d \'${request}\' ',
+        )
+
+        client.wait_until_succeeds("reply-to-email")
+
+        discourse.wait_until_succeeds(
+            'curl -sS -f https://${discourseDomain}/topics/private-messages/system -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .topic_list.topics[0].id != null then .topic_list.topics[0].id else null end\' >topic_id'
+        )
+        discourse.succeed(
+            'curl -sS -f https://${discourseDomain}/t/$(<topic_id) -H "Accept: application/json" -H "Api-Key: $(<api_key)" -H "Api-Username: system" | jq -e \'if .post_stream.posts[1].cooked == "<p>Test reply.</p>" then true else null end\' '
+        )
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/dnscrypt-proxy2.nix b/nixpkgs/nixos/tests/dnscrypt-proxy2.nix
new file mode 100644
index 000000000000..a75a745d3553
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnscrypt-proxy2.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  localProxyPort = 43;
+in {
+  name = "dnscrypt-proxy2";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ joachifm ];
+  };
+
+  nodes = {
+    # A client running the recommended setup: DNSCrypt proxy as a forwarder
+    # for a caching DNS client.
+    client =
+    { ... }:
+    {
+      security.apparmor.enable = true;
+
+      services.dnscrypt-proxy2.enable = true;
+      services.dnscrypt-proxy2.settings = {
+        listen_addresses = [ "127.0.0.1:${toString localProxyPort}" ];
+        sources.public-resolvers = {
+          urls = [ "https://download.dnscrypt.info/resolvers-list/v2/public-resolvers.md" ];
+          cache_file = "public-resolvers.md";
+          minisign_key = "RWQf6LRCGA9i53mlYecO4IzT51TGPpvWucNSCh1CBM0QTaLn73Y7GFO3";
+          refresh_delay = 72;
+        };
+      };
+
+      services.dnsmasq.enable = true;
+      services.dnsmasq.settings.server = [ "127.0.0.1#${toString localProxyPort}" ];
+    };
+  };
+
+  testScript = ''
+    client.wait_for_unit("dnsmasq")
+    client.wait_for_unit("dnscrypt-proxy2")
+    client.wait_until_succeeds("ss --numeric --udp --listening | grep -q ${toString localProxyPort}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dnscrypt-wrapper/default.nix b/nixpkgs/nixos/tests/dnscrypt-wrapper/default.nix
new file mode 100644
index 000000000000..1a794931dc50
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnscrypt-wrapper/default.nix
@@ -0,0 +1,148 @@
+
+{ lib, pkgs, ... }:
+
+let
+  snakeoil = import ../common/acme/server/snakeoil-certs.nix;
+
+  hosts = lib.mkForce
+   { "fd::a" = [ "server" snakeoil.domain ];
+     "fd::b" = [ "client" ];
+   };
+in
+
+{
+  name = "dnscrypt-wrapper";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes = {
+    server = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::a"; prefixLength = 64; };
+
+        services.dnscrypt-wrapper =
+          { enable = true;
+            address = "[::]";
+            port = 5353;
+            keys.expiration = 5; # days
+            keys.checkInterval = 2;  # min
+            # The keypair was generated by the command:
+            # dnscrypt-wrapper --gen-provider-keypair \
+            #  --provider-name=2.dnscrypt-cert.server \
+            providerKey.public = "${./public.key}";
+            providerKey.secret = "${./secret.key}";
+          };
+
+        # nameserver
+        services.bind.enable = true;
+        services.bind.zones = lib.singleton
+          { name = ".";
+            master = true;
+            file = pkgs.writeText "root.zone" ''
+              $TTL 3600
+              . IN SOA example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+              . IN NS example.org.
+              example.org. IN AAAA 2001:db8::1
+            '';
+          };
+
+        # webserver
+        services.nginx.enable = true;
+        services.nginx.virtualHosts.${snakeoil.domain} =
+          { onlySSL = true;
+            listenAddresses = [ "localhost" ];
+            sslCertificate = snakeoil.${snakeoil.domain}.cert;
+            sslCertificateKey = snakeoil.${snakeoil.domain}.key;
+            locations."/ip".extraConfig = ''
+              default_type text/plain;
+              return 200 "Ciao $remote_addr!\n";
+            '';
+          };
+
+        # demultiplex HTTP and DNS from port 443
+        services.sslh =
+          { enable = true;
+            method = "ev";
+            settings.transparent = true;
+            settings.listen = lib.mkForce
+              [ { host = "server"; port = "443"; is_udp = false; }
+                { host = "server"; port = "443"; is_udp = true; }
+              ];
+            settings.protocols =
+              [ # Send TLS to webserver (TCP)
+                { name = "tls"; host= "localhost"; port= "443"; }
+                # Send DNSCrypt to dnscrypt-wrapper (TCP or UDP)
+                { name = "anyprot"; host = "localhost"; port = "5353"; }
+                { name = "anyprot"; host = "localhost"; port = "5353"; is_udp = true;}
+              ];
+          };
+
+        networking.firewall.allowedTCPPorts = [ 443 ];
+        networking.firewall.allowedUDPPorts = [ 443 ];
+      };
+
+    client = {
+      networking.hosts = hosts;
+      networking.interfaces.eth1.ipv6.addresses = lib.singleton
+        { address = "fd::b"; prefixLength = 64; };
+
+      services.dnscrypt-proxy2.enable = true;
+      services.dnscrypt-proxy2.upstreamDefaults = false;
+      services.dnscrypt-proxy2.settings =
+        { server_names = [ "server" ];
+          listen_addresses = [ "[::1]:53" ];
+          cache = false;
+          # Computed using https://dnscrypt.info/stamps/
+          static.server.stamp =
+            "sdns://AQAAAAAAAAAADzE5Mi4xNjguMS4yOjQ0MyAUQdg6"
+            +"_RIIpK6pHkINhrv7nxwIG5c7b_m5NJVT3A1AXRYyLmRuc2NyeXB0LWNlcnQuc2VydmVy";
+        };
+      networking.nameservers = [ "::1" ];
+      security.pki.certificateFiles = [ snakeoil.ca.cert ];
+    };
+
+  };
+
+  testScript = ''
+    with subtest("The server can generate the ephemeral keypair"):
+        server.wait_for_unit("dnscrypt-wrapper")
+        server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.key")
+        server.wait_for_file("/var/lib/dnscrypt-wrapper/2.dnscrypt-cert.server.crt")
+        almost_expiration = server.succeed("date --date '4days 23 hours 56min'").strip()
+
+    with subtest("The DNSCrypt client can connect to the server"):
+        server.wait_for_unit("sslh")
+        client.wait_until_succeeds("journalctl -u dnscrypt-proxy2 --grep '\[server\] OK'")
+
+    with subtest("HTTP client can connect to the server"):
+        server.wait_for_unit("nginx")
+        client.succeed("curl -s --fail https://${snakeoil.domain}/ip | grep -q fd::b")
+
+    with subtest("DNS queries over UDP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -U example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
+
+    with subtest("DNS queries over TCP are working"):
+        server.wait_for_unit("bind")
+        client.wait_for_open_port(53)
+        assert "2001:db8::1" in client.wait_until_succeeds(
+            "host -T example.org"
+        ), "The IP address of 'example.org' does not match 2001:db8::1"
+
+    with subtest("The server rotates the ephemeral keys"):
+        # advance time by a little less than 5 days
+        server.succeed(f"date -s '{almost_expiration}'")
+        client.succeed(f"date -s '{almost_expiration}'")
+        server.wait_for_file("/var/lib/dnscrypt-wrapper/oldkeys")
+
+    with subtest("The client can still connect to the server"):
+        client.systemctl("restart dnscrypt-proxy2")
+        client.wait_until_succeeds("host -T example.org")
+        client.wait_until_succeeds("host -U example.org")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/dnscrypt-wrapper/public.key b/nixpkgs/nixos/tests/dnscrypt-wrapper/public.key
new file mode 100644
index 000000000000..80232b97f529
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnscrypt-wrapper/public.key
@@ -0,0 +1 @@
+AØ:ý¤®©B
†»ûŸ—;où¹4•SÜ
@]
\ No newline at end of file
diff --git a/nixpkgs/nixos/tests/dnscrypt-wrapper/secret.key b/nixpkgs/nixos/tests/dnscrypt-wrapper/secret.key
new file mode 100644
index 000000000000..01fbf8e08b7a
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnscrypt-wrapper/secret.key
@@ -0,0 +1 @@
+G½>Æ©» ì>Ðà¥(Ò²‡¼J•«º=Ÿ„ÝÁlìAØ:ý¤®©B
†»ûŸ—;où¹4•SÜ
@]
\ No newline at end of file
diff --git a/nixpkgs/nixos/tests/dnsdist.nix b/nixpkgs/nixos/tests/dnsdist.nix
new file mode 100644
index 000000000000..e72fa05ff282
--- /dev/null
+++ b/nixpkgs/nixos/tests/dnsdist.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: {
+    name = "dnsdist";
+    meta = with pkgs.lib; {
+      maintainers = with maintainers; [ jojosch ];
+    };
+
+    nodes.machine = { pkgs, lib, ... }: {
+      services.bind = {
+        enable = true;
+        extraOptions = "empty-zones-enable no;";
+        zones = lib.singleton {
+          name = ".";
+          master = true;
+          file = pkgs.writeText "root.zone" ''
+            $TTL 3600
+            . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+            . IN NS ns.example.org.
+
+            ns.example.org. IN A    192.168.0.1
+            ns.example.org. IN AAAA abcd::1
+
+            1.0.168.192.in-addr.arpa IN PTR ns.example.org.
+          '';
+        };
+      };
+      services.dnsdist = {
+        enable = true;
+        listenPort = 5353;
+        extraConfig = ''
+          newServer({address="127.0.0.1:53", name="local-bind"})
+        '';
+      };
+
+      environment.systemPackages = with pkgs; [ dig ];
+    };
+
+    testScript = ''
+      machine.wait_for_unit("bind.service")
+      machine.wait_for_open_port(53)
+      machine.succeed("dig @127.0.0.1 +short -x 192.168.0.1 | grep -qF ns.example.org")
+
+      machine.wait_for_unit("dnsdist.service")
+      machine.wait_for_open_port(5353)
+      machine.succeed("dig @127.0.0.1 -p 5353 +short -x 192.168.0.1 | grep -qF ns.example.org")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/doas.nix b/nixpkgs/nixos/tests/doas.nix
new file mode 100644
index 000000000000..2aa8b02caf57
--- /dev/null
+++ b/nixpkgs/nixos/tests/doas.nix
@@ -0,0 +1,96 @@
+# Some tests to ensure doas is working properly.
+import ./make-test-python.nix (
+  { lib, ... }: {
+    name = "doas";
+    meta.maintainers = with lib.maintainers; [ cole-h ];
+
+    nodes.machine =
+      { ... }:
+        {
+          users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+          users.users = {
+            test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+            test1 = { isNormalUser = true; };
+            test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; };
+            test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+            test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+            test5 = { isNormalUser = true; };
+            test6 = { isNormalUser = true; };
+            test7 = { isNormalUser = true; };
+          };
+
+          security.doas = {
+            enable = true;
+            wheelNeedsPassword = false;
+
+            extraRules = [
+              { users = [ "test1" ]; groups = [ "foobar" ]; }
+              { users = [ "test2" ]; noPass = true; setEnv = [ "CORRECT" "HORSE=BATTERY" ]; }
+              { groups = [ "barfoo" 1337 ]; noPass = true; }
+              { users = [ "test5" ]; noPass = true; keepEnv = true; runAs = "test1"; }
+              { users = [ "test6" ]; noPass = true; keepEnv = true; setEnv = [ "-STAPLE" ]; }
+              { users = [ "test7" ]; noPass = true; setEnv = [ "-SSH_AUTH_SOCK" ]; }
+            ];
+          };
+        };
+
+    testScript = ''
+      with subtest("users in wheel group should have passwordless doas"):
+          machine.succeed('su - test0 -c "doas -u root true"')
+
+      with subtest("test1 user should not be able to use doas without password"):
+          machine.fail('su - test1 -c "doas -n -u root true"')
+
+      with subtest("test2 user should be able to keep some env"):
+          if "CORRECT=1" not in machine.succeed('su - test2 -c "CORRECT=1 doas env"'):
+              raise Exception("failed to keep CORRECT")
+
+          if "HORSE=BATTERY" not in machine.succeed('su - test2 -c "doas env"'):
+              raise Exception("failed to setenv HORSE=BATTERY")
+
+      with subtest("users in group 'barfoo' shouldn't require password"):
+          machine.succeed("doas -u test3 doas -n -u root true")
+
+      with subtest("users in group 'baz' (GID 1337) shouldn't require password"):
+          machine.succeed("doas -u test4 doas -n -u root echo true")
+
+      with subtest("test5 user should be able to run commands under test1"):
+          machine.succeed("doas -u test5 doas -n -u test1 true")
+
+      with subtest("test5 user should not be able to run commands under root"):
+          machine.fail("doas -u test5 doas -n -u root true")
+
+      with subtest("test6 user should be able to keepenv"):
+          envs = ["BATTERY=HORSE", "CORRECT=false"]
+          out = machine.succeed(
+              'su - test6 -c "BATTERY=HORSE CORRECT=false STAPLE=Tr0ub4dor doas env"'
+          )
+
+          if not all(env in out for env in envs):
+              raise Exception("failed to keep BATTERY or CORRECT")
+          if "STAPLE=Tr0ub4dor" in out:
+              raise Exception("failed to exclude STAPLE")
+
+      with subtest("test7 should not have access to SSH_AUTH_SOCK"):
+          if "SSH_AUTH_SOCK=HOLEY" in machine.succeed(
+              'su - test7 -c "SSH_AUTH_SOCK=HOLEY doas env"'
+          ):
+              raise Exception("failed to exclude SSH_AUTH_SOCK")
+
+      # Test that the doas setuid wrapper precedes the unwrapped version in PATH after
+      # calling doas.
+      # The PATH set by doas is defined in
+      # ../../pkgs/tools/security/doas/0001-add-NixOS-specific-dirs-to-safe-PATH.patch
+      with subtest("recursive calls to doas from subprocesses should succeed"):
+          machine.succeed('doas -u test0 sh -c "doas -u test0 true"')
+
+      with subtest("test0 should inherit TERMINFO_DIRS from the user environment"):
+          dirs = machine.succeed(
+               "su - test0 -c 'doas -u root $SHELL -c \"echo \$TERMINFO_DIRS\"'"
+          )
+
+          if not "test0" in dirs:
+             raise Exception(f"user profile TERMINFO_DIRS is not preserved: {dirs}")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/docker-registry.nix b/nixpkgs/nixos/tests/docker-registry.nix
new file mode 100644
index 000000000000..db20cb52c3e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-registry.nix
@@ -0,0 +1,61 @@
+# This test runs docker-registry and check if it works
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "docker-registry";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ globin ironpinguin ];
+  };
+
+  nodes = {
+    registry = { ... }: {
+      services.dockerRegistry.enable = true;
+      services.dockerRegistry.enableDelete = true;
+      services.dockerRegistry.port = 8080;
+      services.dockerRegistry.listenAddress = "0.0.0.0";
+      services.dockerRegistry.enableGarbageCollect = true;
+      networking.firewall.allowedTCPPorts = [ 8080 ];
+    };
+
+    client1 = { ... }: {
+      virtualisation.docker.enable = true;
+      virtualisation.docker.extraOptions = "--insecure-registry registry:8080";
+    };
+
+    client2 = { ... }: {
+      virtualisation.docker.enable = true;
+      virtualisation.docker.extraOptions = "--insecure-registry registry:8080";
+    };
+  };
+
+  testScript = ''
+    client1.start()
+    client1.wait_for_unit("docker.service")
+    client1.succeed("tar cv --files-from /dev/null | docker import - scratch")
+    client1.succeed("docker tag scratch registry:8080/scratch")
+
+    registry.start()
+    registry.wait_for_unit("docker-registry.service")
+    registry.wait_for_open_port(8080)
+    client1.succeed("docker push registry:8080/scratch")
+
+    client2.start()
+    client2.wait_for_unit("docker.service")
+    client2.succeed("docker pull registry:8080/scratch")
+    client2.succeed("docker images | grep scratch")
+
+    client2.succeed(
+        "curl -fsS -X DELETE registry:8080/v2/scratch/manifests/$(curl -fsS -I -H\"Accept: application/vnd.docker.distribution.manifest.v2+json\" registry:8080/v2/scratch/manifests/latest | grep Docker-Content-Digest | sed -e 's/Docker-Content-Digest: //' | tr -d '\\r')"
+    )
+
+    registry.systemctl("start docker-registry-garbage-collect.service")
+    registry.wait_until_fails("systemctl status docker-registry-garbage-collect.service")
+    registry.wait_for_unit("docker-registry.service")
+
+    registry.fail("ls -l /var/lib/docker-registry/docker/registry/v2/blobs/sha256/*/*/data")
+
+    client1.succeed("docker push registry:8080/scratch")
+    registry.succeed(
+        "ls -l /var/lib/docker-registry/docker/registry/v2/blobs/sha256/*/*/data"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-rootless.nix b/nixpkgs/nixos/tests/docker-rootless.nix
new file mode 100644
index 000000000000..e2a926eb3cb0
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-rootless.nix
@@ -0,0 +1,41 @@
+# This test runs docker and checks if simple container starts
+
+import ./make-test-python.nix ({ lib, pkgs, ...} : {
+  name = "docker-rootless";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ abbradar ];
+  };
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      virtualisation.docker.rootless.enable = true;
+
+      users.users.alice = {
+        uid = 1000;
+        isNormalUser = true;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+      sudo = lib.concatStringsSep " " [
+        "XDG_RUNTIME_DIR=/run/user/${toString user.uid}"
+        "DOCKER_HOST=unix:///run/user/${toString user.uid}/docker.sock"
+        "sudo" "--preserve-env=XDG_RUNTIME_DIR,DOCKER_HOST" "-u" "alice"
+      ];
+    in ''
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("loginctl enable-linger alice")
+      machine.wait_until_succeeds("${sudo} systemctl --user is-active docker.service")
+
+      machine.succeed("tar cv --files-from /dev/null | ${sudo} docker import - scratchimg")
+      machine.succeed(
+          "${sudo} docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+      )
+      machine.succeed("${sudo} docker ps | grep sleeping")
+      machine.succeed("${sudo} docker stop sleeping")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/docker-tools-cross.nix b/nixpkgs/nixos/tests/docker-tools-cross.nix
new file mode 100644
index 000000000000..14cb14ceeaea
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-tools-cross.nix
@@ -0,0 +1,80 @@
+# Not everyone has a suitable remote builder set up, so the cross-compilation
+# tests that _include_ running the result are separate. That way, most people
+# can run the majority of the test suite without the extra setup.
+
+
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+
+  remoteSystem =
+    if pkgs.stdenv.hostPlatform.system == "aarch64-linux"
+    then "x86_64-linux"
+    else "aarch64-linux";
+
+  remoteCrossPkgs = import ../.. /*nixpkgs*/ {
+    # NOTE: This is the machine that runs the build -  local from the
+    #       'perspective' of the build script.
+    localSystem = remoteSystem;
+
+    # NOTE: Since this file can't control where the test will be _run_ we don't
+    #       cross-compile _to_ a different system but _from_ a different system
+    crossSystem = pkgs.stdenv.hostPlatform.system;
+  };
+
+  hello1 = remoteCrossPkgs.dockerTools.buildImage {
+    name = "hello1";
+    tag = "latest";
+    copyToRoot = remoteCrossPkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ remoteCrossPkgs.hello ];
+    };
+  };
+
+  hello2 = remoteCrossPkgs.dockerTools.buildLayeredImage {
+    name = "hello2";
+    tag = "latest";
+    contents = remoteCrossPkgs.hello;
+  };
+
+in {
+  name = "docker-tools";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ roberth ];
+  };
+
+  nodes = {
+    docker = { ... }: {
+      virtualisation = {
+        diskSize = 2048;
+        docker.enable = true;
+      };
+    };
+  };
+
+  testScript = ''
+    docker.wait_for_unit("sockets.target")
+
+    with subtest("Ensure cross compiled buildImage image can run."):
+        docker.succeed(
+            "docker load --input='${hello1}'"
+        )
+        assert "Hello, world!" in docker.succeed(
+            "docker run --rm ${hello1.imageName} hello",
+        )
+        docker.succeed(
+            "docker rmi ${hello1.imageName}",
+        )
+
+    with subtest("Ensure cross compiled buildLayeredImage image can run."):
+        docker.succeed(
+            "docker load --input='${hello2}'"
+        )
+        assert "Hello, world!" in docker.succeed(
+            "docker run --rm ${hello2.imageName} hello",
+        )
+        docker.succeed(
+            "docker rmi ${hello2.imageName}",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker-tools-overlay.nix b/nixpkgs/nixos/tests/docker-tools-overlay.nix
new file mode 100644
index 000000000000..6781388e639b
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-tools-overlay.nix
@@ -0,0 +1,33 @@
+# this test creates a simple GNU image with docker tools and sees if it executes
+
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "docker-tools-overlay";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 roberth ];
+  };
+
+  nodes = {
+    docker =
+      { ... }:
+      {
+        virtualisation.docker.enable = true;
+        virtualisation.docker.storageDriver = "overlay";  # defaults to overlay2
+      };
+  };
+
+  testScript = ''
+      docker.wait_for_unit("sockets.target")
+
+      docker.succeed(
+          "docker load --input='${pkgs.dockerTools.examples.bash}'",
+          "docker run --rm ${pkgs.dockerTools.examples.bash.imageName} bash --version",
+      )
+
+      # Check if the nix store has correct user permissions depending on what
+      # storage driver is used, incorrectly built images can show up as readonly.
+      # drw-------  3 0 0   3 Apr 14 11:36 /nix
+      # drw------- 99 0 0 100 Apr 14 11:36 /nix/store
+      docker.succeed("docker run --rm -u 1000:1000 ${pkgs.dockerTools.examples.bash.imageName} bash --version")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/docker-tools.nix b/nixpkgs/nixos/tests/docker-tools.nix
new file mode 100644
index 000000000000..fcdfa586fd55
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker-tools.nix
@@ -0,0 +1,554 @@
+# this test creates a simple GNU image with docker tools and sees if it executes
+
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # nixpkgs#214434: dockerTools.buildImage fails to unpack base images
+  # containing duplicate layers when those duplicate tarballs
+  # appear under the manifest's 'Layers'. Docker can generate images
+  # like this even though dockerTools does not.
+  repeatedLayerTestImage =
+    let
+      # Rootfs diffs for layers 1 and 2 are identical (and empty)
+      layer1 = pkgs.dockerTools.buildImage {  name = "empty";  };
+      layer2 = layer1.overrideAttrs (_: { fromImage = layer1; });
+      repeatedRootfsDiffs = pkgs.runCommandNoCC "image-with-links.tar" {
+        nativeBuildInputs = [pkgs.jq];
+      } ''
+        mkdir contents
+        tar -xf "${layer2}" -C contents
+        cd contents
+        first_rootfs=$(jq -r '.[0].Layers[0]' manifest.json)
+        second_rootfs=$(jq -r '.[0].Layers[1]' manifest.json)
+        target_rootfs=$(sha256sum "$first_rootfs" | cut -d' ' -f 1).tar
+
+        # Replace duplicated rootfs diffs with symlinks to one tarball
+        chmod -R ug+w .
+        mv "$first_rootfs" "$target_rootfs"
+        rm "$second_rootfs"
+        ln -s "../$target_rootfs" "$first_rootfs"
+        ln -s "../$target_rootfs" "$second_rootfs"
+
+        # Update manifest's layers to use the symlinks' target
+        cat manifest.json | \
+        jq ".[0].Layers[0] = \"$target_rootfs\"" |
+        jq ".[0].Layers[1] = \"$target_rootfs\"" > manifest.json.new
+        mv manifest.json.new manifest.json
+
+        tar --sort=name --hard-dereference -cf $out .
+        '';
+    in pkgs.dockerTools.buildImage {
+      fromImage = repeatedRootfsDiffs;
+      name = "repeated-layer-test";
+      tag = "latest";
+      copyToRoot = pkgs.bash;
+      # A runAsRoot script is required to force previous layers to be unpacked
+      runAsRoot = ''
+        echo 'runAsRoot has run.'
+      '';
+    };
+in {
+  name = "docker-tools";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 roberth ];
+  };
+
+  nodes = {
+    docker = { ... }: {
+      virtualisation = {
+        diskSize = 3072;
+        docker.enable = true;
+      };
+    };
+  };
+
+  testScript = with pkgs.dockerTools; ''
+    unix_time_second1 = "1970-01-01T00:00:01Z"
+
+    docker.wait_for_unit("sockets.target")
+
+    with subtest("includeStorePath"):
+        with subtest("assumption"):
+            docker.succeed("${examples.helloOnRoot} | docker load")
+            docker.succeed("docker run --rm hello | grep -i hello")
+            docker.succeed("docker image rm hello:latest")
+        with subtest("includeStorePath = false; breaks example"):
+            docker.succeed("${examples.helloOnRootNoStore} | docker load")
+            docker.fail("docker run --rm hello | grep -i hello")
+            docker.succeed("docker image rm hello:latest")
+        with subtest("includeStorePath = false; works with mounted store"):
+            docker.succeed("${examples.helloOnRootNoStore} | docker load")
+            docker.succeed("docker run --rm --volume ${builtins.storeDir}:${builtins.storeDir}:ro hello | grep -i hello")
+            docker.succeed("docker image rm hello:latest")
+
+    with subtest("Ensure Docker images use a stable date by default"):
+        docker.succeed(
+            "docker load --input='${examples.bash}'"
+        )
+        assert unix_time_second1 in docker.succeed(
+            "docker inspect ${examples.bash.imageName} "
+            + "| ${pkgs.jq}/bin/jq -r .[].Created",
+        )
+
+    docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+    # Check imageTag attribute matches image
+    docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.bash.imageTag}'")
+    docker.succeed("docker rmi ${examples.bash.imageName}")
+
+    # The remaining combinations
+    with subtest("Ensure imageTag attribute matches image"):
+        docker.succeed(
+            "docker load --input='${examples.bashNoTag}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTag.imageTag}'"
+        )
+        docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
+
+        docker.succeed(
+            "docker load --input='${examples.bashNoTagLayered}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagLayered.imageTag}'"
+        )
+        docker.succeed("docker rmi ${examples.bashNoTagLayered.imageName}:${examples.bashNoTagLayered.imageTag}")
+
+        docker.succeed(
+            "${examples.bashNoTagStreamLayered} | docker load"
+        )
+        docker.succeed(
+            "docker images --format '{{.Tag}}' | grep -F '${examples.bashNoTagStreamLayered.imageTag}'"
+        )
+        docker.succeed(
+            "docker rmi ${examples.bashNoTagStreamLayered.imageName}:${examples.bashNoTagStreamLayered.imageTag}"
+        )
+
+        docker.succeed(
+            "docker load --input='${examples.nixLayered}'"
+        )
+        docker.succeed("docker images --format '{{.Tag}}' | grep -F '${examples.nixLayered.imageTag}'")
+        docker.succeed("docker rmi ${examples.nixLayered.imageName}")
+
+
+    with subtest(
+        "Check if the nix store is correctly initialized by listing "
+        "dependencies of the installed Nix binary"
+    ):
+        docker.succeed(
+            "docker load --input='${examples.nix}'",
+            "docker run --rm ${examples.nix.imageName} nix-store -qR ${pkgs.nix}",
+            "docker rmi ${examples.nix.imageName}",
+        )
+
+    with subtest(
+        "Ensure (layered) nix store has correct permissions "
+        "and that the container starts when its process does not have uid 0"
+    ):
+        docker.succeed(
+            "docker load --input='${examples.bashLayeredWithUser}'",
+            "docker run -u somebody --rm ${examples.bashLayeredWithUser.imageName} ${pkgs.bash}/bin/bash -c 'test 555 == $(stat --format=%a /nix) && test 555 == $(stat --format=%a /nix/store)'",
+            "docker rmi ${examples.bashLayeredWithUser.imageName}",
+        )
+
+    with subtest("The nix binary symlinks are intact"):
+        docker.succeed(
+            "docker load --input='${examples.nix}'",
+            "docker run --rm ${examples.nix.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
+            "docker rmi ${examples.nix.imageName}",
+        )
+
+    with subtest("The nix binary symlinks are intact when the image is layered"):
+        docker.succeed(
+            "docker load --input='${examples.nixLayered}'",
+            "docker run --rm ${examples.nixLayered.imageName} ${pkgs.bash}/bin/bash -c 'test nix == $(readlink ${pkgs.nix}/bin/nix-daemon)'",
+            "docker rmi ${examples.nixLayered.imageName}",
+        )
+
+    with subtest("The pullImage tool works"):
+        docker.succeed(
+            "docker load --input='${examples.testNixFromDockerHub}'",
+            "docker run --rm nix:2.2.1 nix-store --version",
+            "docker rmi nix:2.2.1",
+        )
+
+    with subtest("runAsRoot and entry point work"):
+        docker.succeed(
+            "docker load --input='${examples.nginx}'",
+            "docker run --name nginx -d -p 8000:80 ${examples.nginx.imageName}",
+        )
+        docker.wait_until_succeeds("curl -f http://localhost:8000/")
+        docker.succeed(
+            "docker rm --force nginx",
+            "docker rmi '${examples.nginx.imageName}'",
+        )
+
+    with subtest("A pulled image can be used as base image"):
+        docker.succeed(
+            "docker load --input='${examples.onTopOfPulledImage}'",
+            "docker run --rm ontopofpulledimage hello",
+            "docker rmi ontopofpulledimage",
+        )
+
+    with subtest("Regression test for issue #34779"):
+        docker.succeed(
+            "docker load --input='${examples.runAsRootExtraCommands}'",
+            "docker run --rm runasrootextracommands cat extraCommands",
+            "docker run --rm runasrootextracommands cat runAsRoot",
+            "docker rmi '${examples.runAsRootExtraCommands.imageName}'",
+        )
+
+    with subtest("Ensure Docker images can use an unstable date"):
+        docker.succeed(
+            "docker load --input='${examples.unstableDate}'"
+        )
+        assert unix_time_second1 not in docker.succeed(
+            "docker inspect ${examples.unstableDate.imageName} "
+            + "| ${pkgs.jq}/bin/jq -r .[].Created"
+        )
+
+    with subtest("Ensure Layered Docker images can use an unstable date"):
+        docker.succeed(
+            "docker load --input='${examples.unstableDateLayered}'"
+        )
+        assert unix_time_second1 not in docker.succeed(
+            "docker inspect ${examples.unstableDateLayered.imageName} "
+            + "| ${pkgs.jq}/bin/jq -r .[].Created"
+        )
+
+    with subtest("Ensure Layered Docker images work"):
+        docker.succeed(
+            "docker load --input='${examples.layered-image}'",
+            "docker run --rm ${examples.layered-image.imageName}",
+            "docker run --rm ${examples.layered-image.imageName} cat extraCommands",
+        )
+
+    with subtest("Ensure images built on top of layered Docker images work"):
+        docker.succeed(
+            "docker load --input='${examples.layered-on-top}'",
+            "docker run --rm ${examples.layered-on-top.imageName}",
+        )
+
+    with subtest("Ensure layered images built on top of layered Docker images work"):
+        docker.succeed(
+            "docker load --input='${examples.layered-on-top-layered}'",
+            "docker run --rm ${examples.layered-on-top-layered.imageName}",
+        )
+
+
+    def set_of_layers(image_name):
+        return set(
+            docker.succeed(
+                f"docker inspect {image_name} "
+                + "| ${pkgs.jq}/bin/jq -r '.[] | .RootFS.Layers | .[]'"
+            ).split()
+        )
+
+
+    with subtest("Ensure layers are shared between images"):
+        docker.succeed(
+            "docker load --input='${examples.another-layered-image}'"
+        )
+        layers1 = set_of_layers("${examples.layered-image.imageName}")
+        layers2 = set_of_layers("${examples.another-layered-image.imageName}")
+        assert bool(layers1 & layers2)
+
+    with subtest("Ensure order of layers is correct"):
+        docker.succeed(
+            "docker load --input='${examples.layersOrder}'"
+        )
+
+        for index in 1, 2, 3:
+            assert f"layer{index}" in docker.succeed(
+                f"docker run --rm  ${examples.layersOrder.imageName} cat /tmp/layer{index}"
+            )
+
+    with subtest("Ensure layers unpacked in correct order before runAsRoot runs"):
+        assert "abc" in docker.succeed(
+            "docker load --input='${examples.layersUnpackOrder}'",
+            "docker run --rm ${examples.layersUnpackOrder.imageName} cat /layer-order"
+        )
+
+    with subtest("Ensure repeated base layers handled by buildImage"):
+        docker.succeed(
+            "docker load --input='${repeatedLayerTestImage}'",
+            "docker run --rm ${repeatedLayerTestImage.imageName} /bin/bash -c 'exit 0'"
+        )
+
+    with subtest("Ensure environment variables are correctly inherited"):
+        docker.succeed(
+            "docker load --input='${examples.environmentVariables}'"
+        )
+        out = docker.succeed("docker run --rm ${examples.environmentVariables.imageName} env")
+        env = out.splitlines()
+        assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
+        assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
+        assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
+
+    with subtest("Ensure environment variables of layered images are correctly inherited"):
+        docker.succeed(
+            "docker load --input='${examples.environmentVariablesLayered}'"
+        )
+        out = docker.succeed("docker run --rm ${examples.environmentVariablesLayered.imageName} env")
+        env = out.splitlines()
+        assert "FROM_PARENT=true" in env, "envvars from the parent should be preserved"
+        assert "FROM_CHILD=true" in env, "envvars from the child should be preserved"
+        assert "LAST_LAYER=child" in env, "envvars from the child should take priority"
+
+    with subtest(
+        "Ensure inherited environment variables of layered images are correctly resolved"
+    ):
+        # Read environment variables as stored in image config
+        config = docker.succeed(
+            "tar -xOf ${examples.environmentVariablesLayered} manifest.json | ${pkgs.jq}/bin/jq -r .[].Config"
+        ).strip()
+        out = docker.succeed(
+            f"tar -xOf ${examples.environmentVariablesLayered} {config} | ${pkgs.jq}/bin/jq -r '.config.Env | .[]'"
+        )
+        env = out.splitlines()
+        assert (
+            sum(entry.startswith("LAST_LAYER") for entry in env) == 1
+        ), "envvars overridden by child should be unique"
+
+    with subtest("Ensure image with only 2 layers can be loaded"):
+        docker.succeed(
+            "docker load --input='${examples.two-layered-image}'"
+        )
+
+    with subtest(
+        "Ensure the bulk layer doesn't miss store paths (regression test for #78744)"
+    ):
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.bulk-layer}'",
+            # Ensure the two output paths (ls and hello) are in the layer
+            "docker run bulk-layer ls /bin/hello",
+        )
+
+    with subtest(
+        "Ensure the bulk layer with a base image respects the number of maxLayers"
+    ):
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.layered-bulk-layer}'",
+            # Ensure the image runs correctly
+            "docker run layered-bulk-layer ls /bin/hello",
+        )
+
+        # Ensure the image has the correct number of layers
+        assert len(set_of_layers("layered-bulk-layer")) == 4
+
+    with subtest("Ensure only minimal paths are added to the store"):
+        # TODO: make an example that has no store paths, for example by making
+        #       busybox non-self-referential.
+
+        # This check tests that buildLayeredImage can build images that don't need a store.
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.no-store-paths}'"
+        )
+
+        docker.succeed("docker run --rm no-store-paths ls / >/dev/console")
+
+        # If busybox isn't self-referential, we need this line
+        #   docker.fail("docker run --rm no-store-paths ls /nix/store >/dev/console")
+        # However, it currently is self-referential, so we check that it is the
+        # only store path.
+        docker.succeed("diff <(docker run --rm no-store-paths ls /nix/store) <(basename ${pkgs.pkgsStatic.busybox}) >/dev/console")
+
+    with subtest("Ensure buildLayeredImage does not change store path contents."):
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.filesInStore}'",
+            "docker run --rm file-in-store nix-store --verify --check-contents",
+            "docker run --rm file-in-store |& grep 'some data'",
+        )
+
+    with subtest("Ensure cross compiled image can be loaded and has correct arch."):
+        docker.succeed(
+            "docker load --input='${pkgs.dockerTools.examples.cross}'",
+        )
+        assert (
+            docker.succeed(
+                "docker inspect ${pkgs.dockerTools.examples.cross.imageName} "
+                + "| ${pkgs.jq}/bin/jq -r .[].Architecture"
+            ).strip()
+            == "${if pkgs.stdenv.hostPlatform.system == "aarch64-linux" then "amd64" else "arm64"}"
+        )
+
+    with subtest("buildLayeredImage doesn't dereference /nix/store symlink layers"):
+        docker.succeed(
+            "docker load --input='${examples.layeredStoreSymlink}'",
+            "docker run --rm ${examples.layeredStoreSymlink.imageName} bash -c 'test -L ${examples.layeredStoreSymlink.passthru.symlink}'",
+            "docker rmi ${examples.layeredStoreSymlink.imageName}",
+        )
+
+    with subtest("buildImage supports registry/ prefix in image name"):
+        docker.succeed(
+            "docker load --input='${examples.prefixedImage}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}' | grep -F '${examples.prefixedImage.imageName}'"
+        )
+
+    with subtest("buildLayeredImage supports registry/ prefix in image name"):
+        docker.succeed(
+            "docker load --input='${examples.prefixedLayeredImage}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}' | grep -F '${examples.prefixedLayeredImage.imageName}'"
+        )
+
+    with subtest("buildLayeredImage supports running chown with fakeRootCommands"):
+        docker.succeed(
+            "docker load --input='${examples.layeredImageWithFakeRootCommands}'"
+        )
+        docker.succeed(
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
+        )
+
+    with subtest("Ensure docker load on merged images loads all of the constituent images"):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashAndRedis}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bash.imageName}-${examples.bash.imageTag}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+        )
+        docker.succeed("docker run --rm ${examples.bash.imageName} bash --version")
+        docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+        docker.succeed("docker rmi ${examples.bash.imageName}")
+        docker.succeed("docker rmi ${examples.redis.imageName}")
+
+    with subtest(
+        "Ensure docker load on merged images loads all of the constituent images (missing tags)"
+    ):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashNoTagAndRedis}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.bashNoTag.imageName}-${examples.bashNoTag.imageTag}'"
+        )
+        docker.succeed(
+            "docker images --format '{{.Repository}}-{{.Tag}}' | grep -F '${examples.redis.imageName}-${examples.redis.imageTag}'"
+        )
+        # we need to explicitly specify the generated tag here
+        docker.succeed(
+            "docker run --rm ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag} bash --version"
+        )
+        docker.succeed("docker run --rm ${examples.redis.imageName} redis-cli --version")
+        docker.succeed("docker rmi ${examples.bashNoTag.imageName}:${examples.bashNoTag.imageTag}")
+        docker.succeed("docker rmi ${examples.redis.imageName}")
+
+    with subtest("mergeImages preserves owners of the original images"):
+        docker.succeed(
+            "docker load --input='${examples.mergedBashFakeRoot}'"
+        )
+        docker.succeed(
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} sh -c 'stat -c '%u' /home/alice | grep -E ^1000$'"
+        )
+
+    with subtest("The image contains store paths referenced by the fakeRootCommands output"):
+        docker.succeed(
+            "docker run --rm ${examples.layeredImageWithFakeRootCommands.imageName} /hello/bin/layeredImageWithFakeRootCommands-hello"
+        )
+
+    with subtest("exportImage produces a valid tarball"):
+        docker.succeed(
+            "tar -tf ${examples.exportBash} | grep '\./bin/bash' > /dev/null"
+        )
+
+    with subtest("layered image fakeRootCommands with fakechroot works"):
+        docker.succeed("${examples.imageViaFakeChroot} | docker load")
+        docker.succeed("docker run --rm image-via-fake-chroot | grep -i hello")
+        docker.succeed("docker image rm image-via-fake-chroot:latest")
+
+    with subtest("Ensure bare paths in contents are loaded correctly"):
+        docker.succeed(
+            "docker load --input='${examples.build-image-with-path}'",
+            "docker run --rm build-image-with-path bash -c '[[ -e /hello.txt ]]'",
+            "docker rmi build-image-with-path",
+        )
+        docker.succeed(
+            "${examples.layered-image-with-path} | docker load",
+            "docker run --rm layered-image-with-path bash -c '[[ -e /hello.txt ]]'",
+            "docker rmi layered-image-with-path",
+        )
+
+    with subtest("Ensure correct architecture is present in manifests."):
+        docker.succeed("""
+            docker load --input='${examples.build-image-with-architecture}'
+            docker inspect build-image-with-architecture \
+              | ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+            docker rmi build-image-with-architecture
+        """)
+        docker.succeed("""
+            ${examples.layered-image-with-architecture} | docker load
+            docker inspect layered-image-with-architecture \
+              | ${pkgs.jq}/bin/jq -er '.[] | select(.Architecture=="arm64").Architecture'
+            docker rmi layered-image-with-architecture
+        """)
+
+    with subtest("etc"):
+        docker.succeed("${examples.etc} | docker load")
+        docker.succeed("docker run --rm etc | grep localhost")
+        docker.succeed("docker image rm etc:latest")
+
+    with subtest("image-with-certs"):
+        docker.succeed("<${examples.image-with-certs} docker load")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-bundle.crt")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/ssl/certs/ca-certificates.crt")
+        docker.succeed("docker run --rm image-with-certs:latest test -r /etc/pki/tls/certs/ca-bundle.crt")
+        docker.succeed("docker image rm image-with-certs:latest")
+
+    with subtest("buildNixShellImage: Can build a basic derivation"):
+        docker.succeed(
+            "${examples.nix-shell-basic} | docker load",
+            "docker run --rm nix-shell-basic bash -c 'buildDerivation && $out/bin/hello' | grep '^Hello, world!$'"
+        )
+
+    with subtest("buildNixShellImage: Runs the shell hook"):
+        docker.succeed(
+            "${examples.nix-shell-hook} | docker load",
+            "docker run --rm -it nix-shell-hook | grep 'This is the shell hook!'"
+        )
+
+    with subtest("buildNixShellImage: Sources stdenv, making build inputs available"):
+        docker.succeed(
+            "${examples.nix-shell-inputs} | docker load",
+            "docker run --rm -it nix-shell-inputs | grep 'Hello, world!'"
+        )
+
+    with subtest("buildNixShellImage: passAsFile works"):
+        docker.succeed(
+            "${examples.nix-shell-pass-as-file} | docker load",
+            "docker run --rm -it nix-shell-pass-as-file | grep 'this is a string'"
+        )
+
+    with subtest("buildNixShellImage: run argument works"):
+        docker.succeed(
+            "${examples.nix-shell-run} | docker load",
+            "docker run --rm -it nix-shell-run | grep 'This shell is not interactive'"
+        )
+
+    with subtest("buildNixShellImage: command argument works"):
+        docker.succeed(
+            "${examples.nix-shell-command} | docker load",
+            "docker run --rm -it nix-shell-command | grep 'This shell is interactive'"
+        )
+
+    with subtest("buildNixShellImage: home directory is writable by default"):
+        docker.succeed(
+            "${examples.nix-shell-writable-home} | docker load",
+            "docker run --rm -it nix-shell-writable-home"
+        )
+
+    with subtest("buildNixShellImage: home directory can be made non-existent"):
+        docker.succeed(
+            "${examples.nix-shell-nonexistent-home} | docker load",
+            "docker run --rm -it nix-shell-nonexistent-home"
+        )
+
+    with subtest("buildNixShellImage: can build derivations"):
+        docker.succeed(
+            "${examples.nix-shell-build-derivation} | docker load",
+            "docker run --rm -it nix-shell-build-derivation"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/docker.nix b/nixpkgs/nixos/tests/docker.nix
new file mode 100644
index 000000000000..93baa198088b
--- /dev/null
+++ b/nixpkgs/nixos/tests/docker.nix
@@ -0,0 +1,53 @@
+# This test runs docker and checks if simple container starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "docker";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus offline ];
+  };
+
+  nodes = {
+    docker =
+      { pkgs, ... }:
+        {
+          virtualisation.docker.enable = true;
+          virtualisation.docker.autoPrune.enable = true;
+          virtualisation.docker.package = pkgs.docker;
+
+          users.users = {
+            noprivs = {
+              isNormalUser = true;
+              description = "Can't access the docker daemon";
+              password = "foobar";
+            };
+
+            hasprivs = {
+              isNormalUser = true;
+              description = "Can access the docker daemon";
+              password = "foobar";
+              extraGroups = [ "docker" ];
+            };
+          };
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    docker.wait_for_unit("sockets.target")
+    docker.succeed("tar cv --files-from /dev/null | docker import - scratchimg")
+    docker.succeed(
+        "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+    )
+    docker.succeed("docker ps | grep sleeping")
+    docker.succeed("sudo -u hasprivs docker ps")
+    docker.fail("sudo -u noprivs docker ps")
+    docker.succeed("docker stop sleeping")
+
+    # Must match version 4 times to ensure client and server git commits and versions are correct
+    docker.succeed('[ $(docker version | grep ${pkgs.docker.version} | wc -l) = "4" ]')
+    docker.succeed("systemctl restart systemd-sysctl")
+    docker.succeed("grep 1 /proc/sys/net/ipv4/conf/all/forwarding")
+    docker.succeed("grep 1 /proc/sys/net/ipv4/conf/default/forwarding")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/documize.nix b/nixpkgs/nixos/tests/documize.nix
new file mode 100644
index 000000000000..3624c0c56769
--- /dev/null
+++ b/nixpkgs/nixos/tests/documize.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "documize";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.documize = {
+      enable = true;
+      port = 3000;
+      dbtype = "postgresql";
+      db = "host=localhost port=5432 sslmode=disable user=documize password=documize dbname=documize";
+    };
+
+    systemd.services.documize-server = {
+      after = [ "postgresql.service" ];
+      requires = [ "postgresql.service" ];
+    };
+
+    services.postgresql = {
+      enable = true;
+      initialScript = pkgs.writeText "psql-init" ''
+        CREATE ROLE documize WITH LOGIN PASSWORD 'documize';
+        CREATE DATABASE documize WITH OWNER documize;
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("documize-server.service")
+    machine.wait_for_open_port(3000)
+
+    dbhash = machine.succeed(
+        "curl -f localhost:3000 | grep 'property=\"dbhash' | grep -Po 'content=\"\\K[^\"]*'"
+    )
+
+    dbhash = dbhash.strip()
+
+    machine.succeed(
+        (
+            "curl -X POST"
+            " --data 'dbname=documize'"
+            " --data 'dbhash={}'"
+            " --data 'title=NixOS'"
+            " --data 'message=Docs'"
+            " --data 'firstname=Bob'"
+            " --data 'lastname=Foobar'"
+            " --data 'email=bob.foobar@nixos.org'"
+            " --data 'password=verysafe'"
+            " -f localhost:3000/api/setup"
+        ).format(dbhash)
+    )
+
+    machine.succeed(
+        'test "$(curl -f localhost:3000/api/public/meta | jq ".title" | xargs echo)" = "NixOS"'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/doh-proxy-rust.nix b/nixpkgs/nixos/tests/doh-proxy-rust.nix
new file mode 100644
index 000000000000..8c743fe77e32
--- /dev/null
+++ b/nixpkgs/nixos/tests/doh-proxy-rust.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "doh-proxy-rust";
+  meta.maintainers = with lib.maintainers; [ stephank ];
+
+  nodes = {
+    machine = { pkgs, lib, ... }: {
+      services.bind = {
+        enable = true;
+        extraOptions = "empty-zones-enable no;";
+        zones = lib.singleton {
+          name = ".";
+          master = true;
+          file = pkgs.writeText "root.zone" ''
+            $TTL 3600
+            . IN SOA ns.example.org. admin.example.org. ( 1 3h 1h 1w 1d )
+            . IN NS ns.example.org.
+            ns.example.org. IN A    192.168.0.1
+          '';
+        };
+      };
+      services.doh-proxy-rust = {
+        enable = true;
+        flags = [
+          "--server-address=127.0.0.1:53"
+        ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    url = "http://localhost:3000/dns-query"
+    query = "AAABAAABAAAAAAAAAm5zB2V4YW1wbGUDb3JnAAABAAE="  # IN A ns.example.org.
+    bin_ip = r"$'\xC0\xA8\x00\x01'"  # 192.168.0.1, as shell binary string
+
+    machine.wait_for_unit("bind.service")
+    machine.wait_for_unit("doh-proxy-rust.service")
+    machine.wait_for_open_port(53)
+    machine.wait_for_open_port(3000)
+    machine.succeed(f"curl --fail -H 'Accept: application/dns-message' '{url}?dns={query}' | grep -F {bin_ip}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dokuwiki.nix b/nixpkgs/nixos/tests/dokuwiki.nix
new file mode 100644
index 000000000000..ce3102eec780
--- /dev/null
+++ b/nixpkgs/nixos/tests/dokuwiki.nix
@@ -0,0 +1,161 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  template-bootstrap3 = pkgs.stdenv.mkDerivation rec {
+    name = "bootstrap3";
+    version = "2022-07-27";
+    src = pkgs.fetchFromGitHub {
+      owner = "giterlizzi";
+      repo = "dokuwiki-template-bootstrap3";
+      rev = "v${version}";
+      hash = "sha256-B3Yd4lxdwqfCnfmZdp+i/Mzwn/aEuZ0ovagDxuR6lxo=";
+    };
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+
+  plugin-icalevents = pkgs.stdenv.mkDerivation rec {
+    name = "icalevents";
+    version = "2017-06-16";
+    src = pkgs.fetchzip {
+      stripRoot = false;
+      url = "https://github.com/real-or-random/dokuwiki-plugin-icalevents/releases/download/${version}/dokuwiki-plugin-icalevents-${version}.zip";
+      hash = "sha256-IPs4+qgEfe8AAWevbcCM9PnyI0uoyamtWeg4rEb+9Wc=";
+    };
+    installPhase = "mkdir -p $out; cp -R * $out/";
+  };
+
+  acronymsFile = pkgs.writeText "acronyms.local.conf" ''
+    r13y  reproducibility
+  '';
+
+  dwWithAcronyms = pkgs.dokuwiki.overrideAttrs (prev: {
+    installPhase = prev.installPhase or "" + ''
+      ln -sf ${acronymsFile} $out/share/dokuwiki/conf/acronyms.local.conf
+    '';
+  });
+
+  mkNode = webserver: { ... }: {
+    services.dokuwiki = {
+      inherit webserver;
+
+      sites = {
+        "site1.local" = {
+          templates = [ template-bootstrap3 ];
+          settings = {
+            useacl = false;
+            userewrite = true;
+            template = "bootstrap3";
+          };
+        };
+        "site2.local" = {
+          package = dwWithAcronyms;
+          usersFile = "/var/lib/dokuwiki/site2.local/users.auth.php";
+          plugins = [ plugin-icalevents ];
+          settings = {
+            useacl = true;
+            superuser = "admin";
+            title._file = titleFile;
+            plugin.dummy.empty = "This is just for testing purposes";
+          };
+          acl = [
+            { page = "*";
+              actor = "@ALL";
+              level = "read"; }
+            { page = "acl-test";
+              actor = "@ALL";
+              level = "none"; }
+          ];
+          pluginsConfig = {
+            authad = false;
+            authldap = false;
+            authmysql = false;
+            authpgsql = false;
+            tag = false;
+            icalevents = true;
+          };
+        };
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+    networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+  };
+
+  titleFile = pkgs.writeText "dokuwiki-title" "DokuWiki on site2";
+in {
+  name = "dokuwiki";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [
+      _1000101
+      onny
+      e1mo
+    ];
+  };
+
+  nodes = {
+    dokuwiki_nginx = mkNode "nginx";
+    dokuwiki_caddy = mkNode "caddy";
+  };
+
+  testScript = ''
+
+    start_all()
+
+    dokuwiki_nginx.wait_for_unit("nginx")
+    dokuwiki_caddy.wait_for_unit("caddy")
+
+    site_names = ["site1.local", "site2.local"]
+
+    for machine in (dokuwiki_nginx, dokuwiki_caddy):
+      for site_name in site_names:
+        machine.wait_for_unit(f"phpfpm-dokuwiki-{site_name}")
+
+        machine.succeed("curl -sSfL http://site1.local/ | grep 'DokuWiki'")
+        machine.fail("curl -sSfL 'http://site1.local/doku.php?do=login' | grep 'Login'")
+
+        machine.succeed("curl -sSfL http://site2.local/ | grep 'DokuWiki on site2'")
+        machine.succeed("curl -sSfL 'http://site2.local/doku.php?do=login' | grep 'Login'")
+
+        with subtest("ACL Operations"):
+          machine.succeed(
+            "echo 'admin:$2y$10$ijdBQMzSVV20SrKtCna8gue36vnsbVm2wItAXvdm876sshI4uwy6S:Admin:admin@example.test:user' >> /var/lib/dokuwiki/site2.local/users.auth.php",
+            "curl -sSfL -d 'u=admin&p=password' --cookie-jar cjar 'http://site2.local/doku.php?do=login'",
+            "curl -sSfL --cookie cjar --cookie-jar cjar 'http://site2.local/doku.php?do=login' | grep 'Logged in as: <bdi>Admin</bdi>'",
+          )
+
+          # Ensure the generated ACL is valid
+          machine.succeed(
+            "echo 'No Hello World! for @ALL here' >> /var/lib/dokuwiki/site2.local/data/pages/acl-test.txt",
+            "curl -sSL 'http://site2.local/doku.php?id=acl-test' | grep 'Permission Denied'"
+          )
+
+        with subtest("Customizing Dokuwiki"):
+          machine.succeed(
+            "echo 'r13y is awesome!' >> /var/lib/dokuwiki/site2.local/data/pages/acronyms-test.txt",
+            "curl -sSfL 'http://site2.local/doku.php?id=acronyms-test' | grep '<abbr title=\"reproducibility\">r13y</abbr>'",
+          )
+
+          # Testing if plugins (a) be correctly loaded and (b) configuration to enable them works
+          machine.succeed(
+              "echo '~~INFO:syntaxplugins~~' >> /var/lib/dokuwiki/site2.local/data/pages/plugin-list.txt",
+              "curl -sSfL 'http://site2.local/doku.php?id=plugin-list' | grep 'plugin:icalevents'",
+              "curl -sSfL 'http://site2.local/doku.php?id=plugin-list' | (! grep 'plugin:tag')",
+          )
+
+          # Test if theme is applied and working correctly (no weird relative PHP import errors)
+          machine.succeed(
+            "curl -sSfL 'http://site1.local/doku.php' | grep 'bootstrap3/images/logo.png'",
+            "curl -sSfL 'http://site1.local/lib/exe/css.php' | grep 'bootstrap3'",
+            "curl -sSfL 'http://site1.local/lib/tpl/bootstrap3/css.php'",
+          )
+
+
+        # Just to ensure both Webserver configurations are consistent in allowing that
+        with subtest("Rewriting"):
+          machine.succeed(
+            "echo 'Hello, NixOS!' >> /var/lib/dokuwiki/site1.local/data/pages/rewrite-test.txt",
+            "curl -sSfL http://site1.local/rewrite-test | grep 'Hello, NixOS!'",
+          )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/dolibarr.nix b/nixpkgs/nixos/tests/dolibarr.nix
new file mode 100644
index 000000000000..4fdee9e9698f
--- /dev/null
+++ b/nixpkgs/nixos/tests/dolibarr.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "dolibarr";
+  meta.maintainers = [ lib.maintainers.raitobezarius ];
+
+  nodes.machine =
+    { ... }:
+    {
+      services.dolibarr = {
+        enable = true;
+        domain = "localhost";
+        nginx = {
+          forceSSL = false;
+          enableACME = false;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+
+  testScript = ''
+    from html.parser import HTMLParser
+    start_all()
+
+    csrf_token = None
+    class TokenParser(HTMLParser):
+      def handle_starttag(self, tag, attrs):
+        attrs = dict(attrs) # attrs is an assoc list originally
+        if tag == 'input' and attrs.get('name') == 'token':
+            csrf_token = attrs.get('value')
+            print(f'[+] Caught CSRF token: {csrf_token}')
+      def handle_endtag(self, tag): pass
+      def handle_data(self, data): pass
+
+    machine.wait_for_unit("phpfpm-dolibarr.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    # Sanity checks on URLs.
+    # machine.succeed("curl -fL http://localhost/index.php")
+    # machine.succeed("curl -fL http://localhost/")
+    # Perform installation.
+    machine.succeed('curl -fL -X POST http://localhost/install/check.php -F selectlang=auto')
+    machine.succeed('curl -fL -X POST http://localhost/install/fileconf.php -F selectlang=auto')
+    # First time is to write the configuration file correctly.
+    machine.succeed('curl -fL -X POST http://localhost/install/step1.php -F "testpost=ok" -F "action=set" -F "selectlang=auto"')
+    # Now, we have a proper conf.php in $stateDir.
+    assert 'nixos' in machine.succeed("cat /var/lib/dolibarr/conf.php")
+    machine.succeed('curl -fL -X POST http://localhost/install/step2.php --data "testpost=ok&action=set&dolibarr_main_db_character_set=utf8&dolibarr_main_db_collation=utf8_unicode_ci&selectlang=auto"')
+    machine.succeed('curl -fL -X POST http://localhost/install/step4.php --data "testpost=ok&action=set&selectlang=auto"')
+    machine.succeed('curl -fL -X POST http://localhost/install/step5.php --data "testpost=ok&action=set&login=root&pass=hunter2&pass_verif=hunter2&selectlang=auto"')
+    # Now, we have installed the machine, let's verify we still have the right configuration.
+    assert 'nixos' in machine.succeed("cat /var/lib/dolibarr/conf.php")
+    # We do not want any redirect now as we have installed the machine.
+    machine.succeed('curl -f -X GET http://localhost')
+    # Test authentication to the webservice.
+    parser = TokenParser()
+    parser.feed(machine.succeed('curl -f -X GET http://localhost/index.php?mainmenu=login&username=root'))
+    machine.succeed(f'curl -f -X POST http://localhost/index.php?mainmenu=login&token={csrf_token}&username=root&password=hunter2')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/domination.nix b/nixpkgs/nixos/tests/domination.nix
new file mode 100644
index 000000000000..409a7f3029c4
--- /dev/null
+++ b/nixpkgs/nixos/tests/domination.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "domination";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [ pkgs.domination ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("domination >&2 &")
+      machine.wait_for_window("Menu")
+      machine.wait_for_text(r"(New Game|Start Server|Load Game|Help Manual|Join Game|About|Play Online)")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/dovecot.nix b/nixpkgs/nixos/tests/dovecot.nix
new file mode 100644
index 000000000000..5439387807fd
--- /dev/null
+++ b/nixpkgs/nixos/tests/dovecot.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix {
+  name = "dovecot";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    services.postfix.enable = true;
+    services.dovecot2 = {
+      enable = true;
+      protocols = [ "imap" "pop3" ];
+      modules = [ pkgs.dovecot_pigeonhole ];
+      mailUser = "vmail";
+      mailGroup = "vmail";
+    };
+    environment.systemPackages = let
+      sendTestMail = pkgs.writeScriptBin "send-testmail" ''
+        #!${pkgs.runtimeShell}
+        exec sendmail -vt <<MAIL
+        From: root@localhost
+        To: alice@localhost
+        Subject: Very important!
+
+        Hello world!
+        MAIL
+      '';
+
+      sendTestMailViaDeliveryAgent = pkgs.writeScriptBin "send-lda" ''
+        #!${pkgs.runtimeShell}
+
+        exec ${pkgs.dovecot}/libexec/dovecot/deliver -d bob <<MAIL
+        From: root@localhost
+        To: bob@localhost
+        Subject: Something else...
+
+        I'm running short of ideas!
+        MAIL
+      '';
+
+      testImap = pkgs.writeScriptBin "test-imap" ''
+        #!${pkgs.python3.interpreter}
+        import imaplib
+
+        with imaplib.IMAP4('localhost') as imap:
+          imap.login('alice', 'foobar')
+          imap.select()
+          status, refs = imap.search(None, 'ALL')
+          assert status == 'OK'
+          assert len(refs) == 1
+          status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+          assert status == 'OK'
+          assert msg[0][1].strip() == b'Hello world!'
+      '';
+
+      testPop = pkgs.writeScriptBin "test-pop" ''
+        #!${pkgs.python3.interpreter}
+        import poplib
+
+        pop = poplib.POP3('localhost')
+        try:
+          pop.user('bob')
+          pop.pass_('foobar')
+          assert len(pop.list()[1]) == 1
+          status, fullmail, size = pop.retr(1)
+          assert status.startswith(b'+OK ')
+          body = b"".join(fullmail[fullmail.index(b""):]).strip()
+          assert body == b"I'm running short of ideas!"
+        finally:
+          pop.quit()
+      '';
+
+    in [ sendTestMail sendTestMailViaDeliveryAgent testImap testPop ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("postfix.service")
+    machine.wait_for_unit("dovecot2.service")
+    machine.succeed("send-testmail")
+    machine.succeed("send-lda")
+    machine.wait_until_fails('[ "$(postqueue -p)" != "Mail queue is empty" ]')
+    machine.succeed("test-imap")
+    machine.succeed("test-pop")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/drbd.nix b/nixpkgs/nixos/tests/drbd.nix
new file mode 100644
index 000000000000..bede7206d706
--- /dev/null
+++ b/nixpkgs/nixos/tests/drbd.nix
@@ -0,0 +1,87 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  let
+    drbdPort = 7789;
+
+    drbdConfig =
+      { nodes, ... }:
+      {
+        virtualisation.emptyDiskImages = [ 1 ];
+        networking.firewall.allowedTCPPorts = [ drbdPort ];
+
+        services.drbd = {
+          enable = true;
+          config = ''
+            global {
+              usage-count yes;
+            }
+
+            common {
+              net {
+                protocol C;
+                ping-int 1;
+              }
+            }
+
+            resource r0 {
+              volume 0 {
+                device    /dev/drbd0;
+                disk      /dev/vdb;
+                meta-disk internal;
+              }
+
+              on drbd1 {
+                address ${nodes.drbd1.config.networking.primaryIPAddress}:${toString drbdPort};
+              }
+
+              on drbd2 {
+                address ${nodes.drbd2.config.networking.primaryIPAddress}:${toString drbdPort};
+              }
+            }
+          '';
+        };
+      };
+  in
+  {
+    name = "drbd";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ ryantm astro ];
+    };
+
+    nodes.drbd1 = drbdConfig;
+    nodes.drbd2 = drbdConfig;
+
+    testScript = { nodes }: ''
+      drbd1.start()
+      drbd2.start()
+
+      drbd1.wait_for_unit("network.target")
+      drbd2.wait_for_unit("network.target")
+
+      drbd1.succeed(
+          "drbdadm create-md r0",
+          "drbdadm up r0",
+          "drbdadm primary r0 --force",
+      )
+
+      drbd2.succeed("drbdadm create-md r0", "drbdadm up r0")
+
+      drbd1.succeed(
+          "mkfs.ext4 /dev/drbd0",
+          "mkdir -p /mnt/drbd",
+          "mount /dev/drbd0 /mnt/drbd",
+          "touch /mnt/drbd/hello",
+          "umount /mnt/drbd",
+          "drbdadm secondary r0",
+      )
+      drbd1.sleep(1)
+
+      drbd2.succeed(
+          "drbdadm primary r0",
+          "mkdir -p /mnt/drbd",
+          "mount /dev/drbd0 /mnt/drbd",
+          "ls /mnt/drbd/hello",
+      )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/early-mount-options.nix b/nixpkgs/nixos/tests/early-mount-options.nix
new file mode 100644
index 000000000000..8be318ae13bc
--- /dev/null
+++ b/nixpkgs/nixos/tests/early-mount-options.nix
@@ -0,0 +1,19 @@
+# Test for https://github.com/NixOS/nixpkgs/pull/193469
+import ./make-test-python.nix {
+  name = "early-mount-options";
+
+  nodes.machine = {
+    virtualisation.fileSystems."/var" = {
+      options = [ "bind" "nosuid" "nodev" "noexec" ];
+      device = "/var";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    var_mount_info = machine.succeed("findmnt /var -n -o OPTIONS")
+    options = var_mount_info.strip().split(",")
+    assert "nosuid" in options and "nodev" in options and "noexec" in options
+  '';
+}
diff --git a/nixpkgs/nixos/tests/earlyoom.nix b/nixpkgs/nixos/tests/earlyoom.nix
new file mode 100644
index 000000000000..75bdf56899b3
--- /dev/null
+++ b/nixpkgs/nixos/tests/earlyoom.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "earlyoom";
+  meta = {
+    maintainers = with lib.maintainers; [ ncfavier ];
+  };
+
+  machine = {
+    services.earlyoom = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("earlyoom.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ec2.nix b/nixpkgs/nixos/tests/ec2.nix
new file mode 100644
index 000000000000..e649761d029d
--- /dev/null
+++ b/nixpkgs/nixos/tests/ec2.nix
@@ -0,0 +1,156 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  imageCfg = (import ../lib/eval-config.nix {
+    inherit system;
+    modules = [
+      ../maintainers/scripts/ec2/amazon-image.nix
+      ../modules/testing/test-instrumentation.nix
+      ../modules/profiles/qemu-guest.nix
+      {
+        # Hack to make the partition resizing work in QEMU.
+        boot.initrd.postDeviceCommands = mkBefore ''
+          ln -s vda /dev/xvda
+          ln -s vda1 /dev/xvda1
+        '';
+
+        # In a NixOS test the serial console is occupied by the "backdoor"
+        # (see testing/test-instrumentation.nix) and is incompatible with
+        # the configuration in virtualisation/amazon-image.nix.
+        systemd.services."serial-getty@ttyS0".enable = mkForce false;
+
+        # Needed by nixos-rebuild due to the lack of network
+        # access. Determined by trial and error.
+        system.extraDependencies = with pkgs; ( [
+          # Needed for a nixos-rebuild.
+          busybox
+          cloud-utils
+          desktop-file-utils
+          libxslt.bin
+          mkinitcpio-nfs-utils
+          stdenv
+          stdenvNoCC
+          texinfo
+          unionfs-fuse
+          xorg.lndir
+
+          # These are used in the configure-from-userdata tests
+          # for EC2. Httpd and valgrind are requested by the
+          # configuration.
+          apacheHttpd
+          apacheHttpd.doc
+          apacheHttpd.man
+          valgrind.doc
+        ]);
+      }
+    ];
+  }).config;
+  image = "${imageCfg.system.build.amazonImage}/${imageCfg.amazonImage.name}.vhd";
+
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
+
+in {
+  boot-ec2-nixops = makeEc2Test {
+    name         = "nixops-userdata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey; # That's right folks! My user's key is also the host key!
+
+    userData = ''
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
+    '';
+    script = ''
+      machine.start()
+      machine.wait_for_file("/etc/ec2-metadata/user-data")
+      machine.wait_for_unit("sshd.service")
+
+      machine.succeed("grep unknown /etc/ec2-metadata/ami-manifest-path")
+
+      # We have no keys configured on the client side yet, so this should fail
+      machine.fail("ssh -o BatchMode=yes localhost exit")
+
+      # Let's install our client private key
+      machine.succeed("mkdir -p ~/.ssh")
+
+      machine.copy_from_host_via_shell(
+          "${snakeOilPrivateKeyFile}", "~/.ssh/id_ed25519"
+      )
+      machine.succeed("chmod 600 ~/.ssh/id_ed25519")
+
+      # We haven't configured the host key yet, so this should still fail
+      machine.fail("ssh -o BatchMode=yes localhost exit")
+
+      # Add the host key; ssh should finally succeed
+      machine.succeed(
+          "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"
+      )
+      machine.succeed("ssh -o BatchMode=yes localhost exit")
+
+      # Test whether the root disk was resized.
+      blocks, block_size = map(int, machine.succeed("stat -c %b:%S -f /").split(":"))
+      GB = 1024 ** 3
+      assert 9.7 * GB <= blocks * block_size <= 10 * GB
+
+      # Just to make sure resizing is idempotent.
+      machine.shutdown()
+      machine.start()
+      machine.wait_for_file("/etc/ec2-metadata/user-data")
+    '';
+  };
+
+  boot-ec2-config = makeEc2Test {
+    name         = "config-userdata";
+    meta.broken = true; # amazon-init wants to download from the internet while building the system
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+
+    # ### https://nixos.org/channels/nixos-unstable nixos
+    userData = ''
+      { pkgs, ... }:
+
+      {
+        imports = [
+          <nixpkgs/nixos/modules/virtualisation/amazon-image.nix>
+          <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
+        ];
+        environment.etc.testFile = {
+          text = "whoa";
+        };
+
+        networking.hostName = "ec2-test-vm"; # required by services.httpd
+
+        services.httpd = {
+          enable = true;
+          adminAddr = "test@example.org";
+          virtualHosts.localhost.documentRoot = "''${pkgs.valgrind.doc}/share/doc/valgrind/html";
+        };
+        networking.firewall.allowedTCPPorts = [ 80 ];
+      }
+    '';
+    script = ''
+      machine.start()
+
+      # amazon-init must succeed. if it fails, make the test fail
+      # immediately instead of timing out in wait_for_file.
+      machine.wait_for_unit("amazon-init.service")
+
+      machine.wait_for_file("/etc/testFile")
+      assert "whoa" in machine.succeed("cat /etc/testFile")
+
+      machine.wait_for_unit("httpd.service")
+      assert "Valgrind" in machine.succeed("curl http://localhost")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/ecryptfs.nix b/nixpkgs/nixos/tests/ecryptfs.nix
new file mode 100644
index 000000000000..1c67d307a00e
--- /dev/null
+++ b/nixpkgs/nixos/tests/ecryptfs.nix
@@ -0,0 +1,85 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "ecryptfs";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    boot.kernelModules = [ "ecryptfs" ];
+    security.pam.enableEcryptfs = true;
+    environment.systemPackages = with pkgs; [ keyutils ];
+  };
+
+  testScript = ''
+    def login_as_alice():
+        machine.wait_until_tty_matches("1", "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches("1", "Password: ")
+        machine.send_chars("foobar\n")
+        machine.wait_until_tty_matches("1", "alice\@machine")
+
+
+    def logout():
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches("1", "login: ")
+
+
+    machine.wait_for_unit("default.target")
+
+    with subtest("Set alice up with a password and a home"):
+        machine.succeed("(echo foobar; echo foobar) | passwd alice")
+        machine.succeed("chown -R alice.users ~alice")
+
+    with subtest("Migrate alice's home"):
+        out = machine.succeed("echo foobar | ecryptfs-migrate-home -u alice")
+        machine.log(f"ecryptfs-migrate-home said: {out}")
+
+    with subtest("Log alice in (ecryptfs passwhrase is wrapped during first login)"):
+        login_as_alice()
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches("1", "login: ")
+
+    # Why do I need to do this??
+    machine.succeed("su alice -c ecryptfs-umount-private || true")
+    machine.sleep(1)
+
+    with subtest("check that encrypted home is not mounted"):
+        machine.fail("mount | grep ecryptfs")
+
+    with subtest("Show contents of the user keyring"):
+        out = machine.succeed("su - alice -c 'keyctl list \@u'")
+        machine.log(f"keyctl unlink said: {out}")
+
+    with subtest("Log alice again"):
+        login_as_alice()
+
+    with subtest("Create some files in encrypted home"):
+        machine.succeed("su alice -c 'touch ~alice/a'")
+        machine.succeed("su alice -c 'echo c > ~alice/b'")
+
+    with subtest("Logout"):
+        logout()
+
+    # Why do I need to do this??
+    machine.succeed("su alice -c ecryptfs-umount-private || true")
+    machine.sleep(1)
+
+    with subtest("Check that the filesystem is not accessible"):
+        machine.fail("mount | grep ecryptfs")
+        machine.succeed("su alice -c 'test \! -f ~alice/a'")
+        machine.succeed("su alice -c 'test \! -f ~alice/b'")
+
+    with subtest("Log alice once more"):
+        login_as_alice()
+
+    with subtest("Check that the files are there"):
+        machine.sleep(1)
+        machine.succeed("su alice -c 'test -f ~alice/a'")
+        machine.succeed("su alice -c 'test -f ~alice/b'")
+        machine.succeed('test "$(cat ~alice/b)" = "c"')
+
+    with subtest("Catch https://github.com/NixOS/nixpkgs/issues/16766"):
+        machine.succeed("su alice -c 'ls -lh ~alice/'")
+
+    logout()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/elk.nix b/nixpkgs/nixos/tests/elk.nix
new file mode 100644
index 000000000000..900ea6320100
--- /dev/null
+++ b/nixpkgs/nixos/tests/elk.nix
@@ -0,0 +1,276 @@
+# To run the test on the unfree ELK use the following command:
+# cd path/to/nixpkgs
+# NIXPKGS_ALLOW_UNFREE=1 nix-build -A nixosTests.elk.unfree.ELK-6
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+}:
+
+let
+  inherit (pkgs) lib;
+
+  esUrl = "http://localhost:9200";
+
+  mkElkTest = name : elk :
+    import ./make-test-python.nix ({
+    inherit name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ eelco offline basvandijk ];
+    };
+    nodes = {
+      one =
+        { pkgs, lib, ... }: {
+            # Not giving the machine at least 2060MB results in elasticsearch failing with the following error:
+            #
+            #   OpenJDK 64-Bit Server VM warning:
+            #     INFO: os::commit_memory(0x0000000085330000, 2060255232, 0)
+            #     failed; error='Cannot allocate memory' (errno=12)
+            #
+            #   There is insufficient memory for the Java Runtime Environment to continue.
+            #   Native memory allocation (mmap) failed to map 2060255232 bytes for committing reserved memory.
+            #
+            # When setting this to 2500 I got "Kernel panic - not syncing: Out of
+            # memory: compulsory panic_on_oom is enabled" so lets give it even a
+            # bit more room:
+            virtualisation.memorySize = 3000;
+
+            # For querying JSON objects returned from elasticsearch and kibana.
+            environment.systemPackages = [ pkgs.jq ];
+
+            services = {
+
+              journalbeat = {
+                enable = elk ? journalbeat;
+                package = elk.journalbeat;
+                extraConfig = pkgs.lib.mkOptionDefault (''
+                  logging:
+                    to_syslog: true
+                    level: warning
+                    metrics.enabled: false
+                  output.elasticsearch:
+                    hosts: [ "127.0.0.1:9200" ]
+                  journalbeat.inputs:
+                  - paths: []
+                    seek: cursor
+                '');
+              };
+
+              filebeat = {
+                enable = elk ? filebeat;
+                package = elk.filebeat;
+                inputs.journald.id = "everything";
+
+                inputs.log = {
+                  enabled = true;
+                  paths = [
+                    "/var/lib/filebeat/test"
+                  ];
+                };
+
+                settings = {
+                  logging.level = "info";
+                };
+              };
+
+              metricbeat = {
+                enable = true;
+                package = elk.metricbeat;
+                modules.system = {
+                  metricsets = ["cpu" "load" "memory" "network" "process" "process_summary" "uptime" "socket_summary"];
+                  enabled = true;
+                  period = "5s";
+                  processes = [".*"];
+                  cpu.metrics = ["percentages" "normalized_percentages"];
+                  core.metrics = ["percentages"];
+                };
+                settings = {
+                  output.elasticsearch = {
+                    hosts = ["127.0.0.1:9200"];
+                  };
+                };
+              };
+
+              logstash = {
+                enable = true;
+                package = elk.logstash;
+                inputConfig = ''
+                  exec { command => "echo -n flowers" interval => 1 type => "test" }
+                  exec { command => "echo -n dragons" interval => 1 type => "test" }
+                '';
+                filterConfig = ''
+                  if [message] =~ /dragons/ {
+                    drop {}
+                  }
+                '';
+                outputConfig = ''
+                  file {
+                    path => "/tmp/logstash.out"
+                    codec => line { format => "%{message}" }
+                  }
+                  elasticsearch {
+                    hosts => [ "${esUrl}" ]
+                  }
+                '';
+              };
+
+              elasticsearch = {
+                enable = true;
+                package = elk.elasticsearch;
+              };
+
+              elasticsearch-curator = {
+                enable = true;
+                actionYAML = ''
+                ---
+                actions:
+                  1:
+                    action: delete_indices
+                    description: >-
+                      Delete indices older than 1 second (based on index name), for logstash-
+                      prefixed indices. Ignore the error if the filter does not result in an
+                      actionable list of indices (ignore_empty_list) and exit cleanly.
+                    options:
+                      allow_ilm_indices: true
+                      ignore_empty_list: True
+                      disable_action: False
+                    filters:
+                    - filtertype: pattern
+                      kind: prefix
+                      value: logstash-
+                    - filtertype: age
+                      source: name
+                      direction: older
+                      timestring: '%Y.%m.%d'
+                      unit: seconds
+                      unit_count: 1
+                '';
+              };
+            };
+          };
+      };
+
+    passthru.elkPackages = elk;
+    testScript =
+      let
+        valueObject = lib.optionalString (lib.versionAtLeast elk.elasticsearch.version "7") ".value";
+      in ''
+      import json
+
+
+      def expect_hits(message):
+          dictionary = {"query": {"match": {"message": message}}}
+          return (
+              "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
+              + "-H 'Content-Type: application/json' "
+              + "-d '{}' ".format(json.dumps(dictionary))
+              + " | tee /dev/console"
+              + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+          )
+
+
+      def expect_no_hits(message):
+          dictionary = {"query": {"match": {"message": message}}}
+          return (
+              "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
+              + "-H 'Content-Type: application/json' "
+              + "-d '{}' ".format(json.dumps(dictionary))
+              + " | tee /dev/console"
+              + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} == 0 end'"
+          )
+
+
+      def has_metricbeat():
+          dictionary = {"query": {"match": {"event.dataset": {"query": "system.cpu"}}}}
+          return (
+              "curl --silent --show-error --fail-with-body '${esUrl}/_search' "
+              + "-H 'Content-Type: application/json' "
+              + "-d '{}' ".format(json.dumps(dictionary))
+              + " | tee /dev/console"
+              + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+          )
+
+
+      start_all()
+
+      one.wait_for_unit("elasticsearch.service")
+      one.wait_for_open_port(9200)
+
+      # Continue as long as the status is not "red". The status is probably
+      # "yellow" instead of "green" because we are using a single elasticsearch
+      # node which elasticsearch considers risky.
+      #
+      # TODO: extend this test with multiple elasticsearch nodes
+      #       and see if the status turns "green".
+      one.wait_until_succeeds(
+          "curl --silent --show-error --fail-with-body '${esUrl}/_cluster/health'"
+          + " | jq -es 'if . == [] then null else .[] | .status != \"red\" end'"
+      )
+
+      with subtest("Perform some simple logstash tests"):
+          one.wait_for_unit("logstash.service")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep flowers")
+          one.wait_until_succeeds("cat /tmp/logstash.out | grep -v dragons")
+
+      with subtest("Metricbeat is running"):
+          one.wait_for_unit("metricbeat.service")
+
+      with subtest("Metricbeat metrics arrive in elasticsearch"):
+          one.wait_until_succeeds(has_metricbeat())
+
+      with subtest("Logstash messages arive in elasticsearch"):
+          one.wait_until_succeeds(expect_hits("flowers"))
+          one.wait_until_succeeds(expect_no_hits("dragons"))
+
+    '' + lib.optionalString (elk ? journalbeat) ''
+      with subtest(
+          "A message logged to the journal is ingested by elasticsearch via journalbeat"
+      ):
+          one.wait_for_unit("journalbeat.service")
+          one.execute("echo 'Supercalifragilisticexpialidocious' | systemd-cat")
+          one.wait_until_succeeds(
+              expect_hits("Supercalifragilisticexpialidocious")
+          )
+    '' + lib.optionalString (elk ? filebeat) ''
+      with subtest(
+          "A message logged to the journal is ingested by elasticsearch via filebeat"
+      ):
+          one.wait_for_unit("filebeat.service")
+          one.execute("echo 'Superdupercalifragilisticexpialidocious' | systemd-cat")
+          one.wait_until_succeeds(
+              expect_hits("Superdupercalifragilisticexpialidocious")
+          )
+          one.execute(
+              "echo 'SuperdupercalifragilisticexpialidociousIndeed' >> /var/lib/filebeat/test"
+          )
+          one.wait_until_succeeds(
+              expect_hits("SuperdupercalifragilisticexpialidociousIndeed")
+          )
+    '' + ''
+      with subtest("Elasticsearch-curator works"):
+          one.systemctl("stop logstash")
+          one.systemctl("start elasticsearch-curator")
+          one.wait_until_succeeds(
+              '! curl --silent --show-error --fail-with-body "${esUrl}/_cat/indices" | grep logstash | grep ^'
+          )
+    '';
+  }) { inherit pkgs system; };
+in {
+  # We currently only package upstream binaries.
+  # Feel free to package an SSPL licensed source-based package!
+  # ELK-7 = mkElkTest "elk-7-oss" {
+  #   name = "elk-7";
+  #   elasticsearch = pkgs.elasticsearch7-oss;
+  #   logstash      = pkgs.logstash7-oss;
+  #   filebeat      = pkgs.filebeat7;
+  #   metricbeat    = pkgs.metricbeat7;
+  # };
+  unfree = lib.dontRecurseIntoAttrs {
+    ELK-7 = mkElkTest "elk-7" {
+      elasticsearch = pkgs.elasticsearch7;
+      logstash      = pkgs.logstash7;
+      filebeat      = pkgs.filebeat7;
+      metricbeat    = pkgs.metricbeat7;
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/emacs-daemon.nix b/nixpkgs/nixos/tests/emacs-daemon.nix
new file mode 100644
index 000000000000..310e93e19b0b
--- /dev/null
+++ b/nixpkgs/nixos/tests/emacs-daemon.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "emacs-daemon";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  enableOCR = true;
+
+  nodes.machine =
+    { ... }:
+
+    { imports = [ ./common/x11.nix ];
+      services.emacs = {
+        enable = true;
+        defaultEditor = true;
+      };
+
+      # Important to get the systemd service running for root
+      environment.variables.XDG_RUNTIME_DIR = "/run/user/0";
+
+      environment.variables.TEST_SYSTEM_VARIABLE = "system variable";
+    };
+
+  testScript = ''
+      machine.wait_for_unit("multi-user.target")
+
+      # checks that the EDITOR environment variable is set
+      machine.succeed('test $(basename "$EDITOR") = emacseditor')
+
+      # waits for the emacs service to be ready
+      machine.wait_until_succeeds(
+          "systemctl --user status emacs.service | grep 'Active: active'"
+      )
+
+      # connects to the daemon
+      machine.succeed("emacsclient --no-wait --frame-parameters='((display . \"'\"$DISPLAY\"'\"))' --create-frame $EDITOR >&2")
+
+      # checks that Emacs shows the edited filename
+      machine.wait_for_text("emacseditor")
+
+      # makes sure environment variables are accessible from Emacs
+      machine.succeed(
+          "emacsclient --eval '(getenv \"TEST_SYSTEM_VARIABLE\")' | grep -q 'system variable'"
+      )
+
+      machine.screenshot("emacsclient")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/empty-file b/nixpkgs/nixos/tests/empty-file
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/nixpkgs/nixos/tests/empty-file
diff --git a/nixpkgs/nixos/tests/endlessh-go.nix b/nixpkgs/nixos/tests/endlessh-go.nix
new file mode 100644
index 000000000000..b261dbf1c560
--- /dev/null
+++ b/nixpkgs/nixos/tests/endlessh-go.nix
@@ -0,0 +1,58 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "endlessh-go";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes = {
+    server = { ... }: {
+      services.endlessh-go = {
+        enable = true;
+        prometheus.enable = true;
+        openFirewall = true;
+      };
+
+      specialisation = {
+        unprivileged.configuration = {
+          services.endlessh-go = {
+            port = 2222;
+            prometheus.port = 9229;
+          };
+        };
+
+        privileged.configuration = {
+          services.endlessh-go = {
+            port = 22;
+            prometheus.port = 92;
+          };
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ curl netcat ];
+    };
+  };
+
+  testScript = ''
+    def activate_specialisation(name: str):
+        server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
+
+    start_all()
+
+    with subtest("Unprivileged"):
+        activate_specialisation("unprivileged")
+        server.wait_for_unit("endlessh-go.service")
+        server.wait_for_open_port(2222)
+        server.wait_for_open_port(9229)
+        client.succeed("nc -dvW5 server 2222")
+        client.succeed("curl -kv server:9229/metrics")
+
+    with subtest("Privileged"):
+        activate_specialisation("privileged")
+        server.wait_for_unit("endlessh-go.service")
+        server.wait_for_open_port(22)
+        server.wait_for_open_port(92)
+        client.succeed("nc -dvW5 server 22")
+        client.succeed("curl -kv server:92/metrics")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/endlessh.nix b/nixpkgs/nixos/tests/endlessh.nix
new file mode 100644
index 000000000000..be742a749fdd
--- /dev/null
+++ b/nixpkgs/nixos/tests/endlessh.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "endlessh";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes = {
+    server = { ... }: {
+      services.endlessh = {
+        enable = true;
+        openFirewall = true;
+      };
+
+      specialisation = {
+        unprivileged.configuration.services.endlessh.port = 2222;
+
+        privileged.configuration.services.endlessh.port = 22;
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ curl netcat ];
+    };
+  };
+
+  testScript = ''
+    def activate_specialisation(name: str):
+        server.succeed(f"/run/booted-system/specialisation/{name}/bin/switch-to-configuration test >&2")
+
+    start_all()
+
+    with subtest("Unprivileged"):
+        activate_specialisation("unprivileged")
+        server.wait_for_unit("endlessh.service")
+        server.wait_for_open_port(2222)
+        client.succeed("nc -dvW5 server 2222")
+
+    with subtest("Privileged"):
+        activate_specialisation("privileged")
+        server.wait_for_unit("endlessh.service")
+        server.wait_for_open_port(22)
+        client.succeed("nc -dvW5 server 22")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/engelsystem.nix b/nixpkgs/nixos/tests/engelsystem.nix
new file mode 100644
index 000000000000..7be3b8a5a1fe
--- /dev/null
+++ b/nixpkgs/nixos/tests/engelsystem.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  {
+    name = "engelsystem";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ talyz ];
+    };
+
+    nodes.engelsystem =
+      { ... }:
+      {
+        services.engelsystem = {
+          enable = true;
+          domain = "engelsystem";
+          createDatabase = true;
+        };
+        networking.firewall.allowedTCPPorts = [ 80 443 ];
+        environment.systemPackages = with pkgs; [
+          xmlstarlet
+          libxml2
+        ];
+      };
+
+    testScript = ''
+      engelsystem.start()
+      engelsystem.wait_for_unit("phpfpm-engelsystem.service")
+      engelsystem.wait_until_succeeds("curl engelsystem/login -sS -f")
+      engelsystem.succeed(
+          "curl engelsystem/login -sS -f -c cookie | xmllint -html -xmlout - >login"
+      )
+      engelsystem.succeed(
+          "xml sel -T -t -m \"html/head/meta[@name='csrf-token']\" -v @content login >token"
+      )
+      engelsystem.succeed(
+          "curl engelsystem/login -sS -f -b cookie -F 'login=admin' -F 'password=asdfasdf' -F '_token=<token' -L | xmllint -html -xmlout - >news"
+      )
+      engelsystem.succeed(
+          "test 'News - Engelsystem' = \"$(xml sel -T -t -c html/head/title news)\""
+      )
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/enlightenment.nix b/nixpkgs/nixos/tests/enlightenment.nix
new file mode 100644
index 000000000000..bce14c1ddd5c
--- /dev/null
+++ b/nixpkgs/nixos/tests/enlightenment.nix
@@ -0,0 +1,96 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+{
+  name = "enlightenment";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ romildo ];
+  };
+
+  nodes.machine = { ... }:
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.desktopManager.enlightenment.enable = true;
+    services.xserver.displayManager = {
+      lightdm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    environment.systemPackages = [ pkgs.xdotool ];
+    services.acpid.enable = true;
+    services.connman.enable = true;
+    services.connman.package = pkgs.connmanMinimal;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    with subtest("Ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("First time wizard"):
+        machine.wait_for_text("Default")  # Language
+        machine.screenshot("wizard1")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+        machine.screenshot("wizard2")
+
+        machine.wait_for_text("English")  # Keyboard (default)
+        machine.screenshot("wizard3")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Standard")  # Profile (default)
+        machine.screenshot("wizard4")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Title")  # Sizing (default)
+        machine.screenshot("wizard5")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("clicked")  # Windows Focus
+        machine.succeed("xdotool mousemove 512 370 click 1")  # Click
+        machine.screenshot("wizard6")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Connman")  # Network Management (default)
+        machine.screenshot("wizard7")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("BlusZ")  # Bluetooth Management (default)
+        machine.screenshot("wizard8")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("OpenGL")  # Compositing (default)
+        machine.screenshot("wizard9")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("update")  # Updates
+        machine.succeed("xdotool mousemove 512 495 click 1")  # Disable
+        machine.screenshot("wizard10")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("taskbar")  # Taskbar
+        machine.succeed("xdotool mousemove 480 410 click 1")  # Enable
+        machine.screenshot("wizard11")
+        machine.succeed("xdotool mousemove 512 740 click 1")  # Next
+
+        machine.wait_for_text("Home")  # The desktop
+        machine.screenshot("wizard12")
+
+    with subtest("Run Terminology"):
+        machine.succeed("terminology >&2 &")
+        machine.sleep(5)
+        machine.send_chars("ls --color -alF\n")
+        machine.sleep(2)
+        machine.screenshot("terminology")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/env.nix b/nixpkgs/nixos/tests/env.nix
new file mode 100644
index 000000000000..dec17b6b565a
--- /dev/null
+++ b/nixpkgs/nixos/tests/env.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "environment";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  nodes.machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages;
+      environment.etc.plainFile.text = ''
+        Hello World
+      '';
+      environment.etc."folder/with/file".text = ''
+        Foo Bar!
+      '';
+
+      environment.sessionVariables = {
+        TERMINFO_DIRS = "/run/current-system/sw/share/terminfo";
+        NIXCON = "awesome";
+      };
+    };
+
+  testScript = ''
+    machine.succeed('[ -L "/etc/plainFile" ]')
+    assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
+    machine.succeed('[ -d "/etc/folder" ]')
+    machine.succeed('[ -d "/etc/folder/with" ]')
+    machine.succeed('[ -L "/etc/folder/with/file" ]')
+    assert "Hello World" in machine.succeed('cat "/etc/plainFile"')
+
+    assert "/run/current-system/sw/share/terminfo" in machine.succeed(
+        "echo ''${TERMINFO_DIRS}"
+    )
+    assert "awesome" in machine.succeed("echo ''${NIXCON}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/envfs.nix b/nixpkgs/nixos/tests/envfs.nix
new file mode 100644
index 000000000000..3f9cd1edb595
--- /dev/null
+++ b/nixpkgs/nixos/tests/envfs.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+let
+  pythonShebang = pkgs.writeScript "python-shebang" ''
+    #!/usr/bin/python
+    print("OK")
+  '';
+
+  bashShebang = pkgs.writeScript "bash-shebang" ''
+    #!/usr/bin/bash
+    echo "OK"
+  '';
+in
+{
+  name = "envfs";
+  nodes.machine.services.envfs.enable = true;
+
+  testScript = ''
+    start_all()
+    machine.wait_until_succeeds("mountpoint -q /usr/bin/")
+    machine.succeed(
+        "PATH=${pkgs.coreutils}/bin /usr/bin/cp --version",
+        # check fallback paths
+        "PATH= /usr/bin/sh --version",
+        "PATH= /usr/bin/env --version",
+        "PATH= test -e /usr/bin/sh",
+        "PATH= test -e /usr/bin/env",
+        # no stat
+        "! test -e /usr/bin/cp",
+        # also picks up PATH that was set after execve
+        "! /usr/bin/hello",
+        "PATH=${pkgs.hello}/bin /usr/bin/hello",
+    )
+
+    out = machine.succeed("PATH=${pkgs.python3}/bin ${pythonShebang}")
+    print(out)
+    assert out == "OK\n"
+
+    out = machine.succeed("PATH=${pkgs.bash}/bin ${bashShebang}")
+    print(out)
+    assert out == "OK\n"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/envoy.nix b/nixpkgs/nixos/tests/envoy.nix
new file mode 100644
index 000000000000..1e4bfe626398
--- /dev/null
+++ b/nixpkgs/nixos/tests/envoy.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "envoy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ cameronnemo ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.envoy.enable = true;
+    services.envoy.settings = {
+      admin = {
+        access_log_path = "/dev/null";
+        address = {
+          socket_address = {
+            protocol = "TCP";
+            address = "127.0.0.1";
+            port_value = 80;
+          };
+        };
+      };
+      static_resources = {
+        listeners = [];
+        clusters = [];
+      };
+    };
+    specialisation = {
+      withoutConfigValidation.configuration = { ... }: {
+        services.envoy = {
+          requireValidConfig = false;
+          settings.admin.access_log_path = lib.mkForce "/var/log/envoy/access.log";
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+    in
+    ''
+      machine.start()
+
+      with subtest("envoy.service starts and responds with ready"):
+        machine.wait_for_unit("envoy.service")
+        machine.wait_for_open_port(80)
+        machine.wait_until_succeeds("curl -fsS localhost:80/ready")
+
+      with subtest("envoy.service works with config path not available at eval time"):
+        machine.succeed('${specialisations}/withoutConfigValidation/bin/switch-to-configuration test')
+        machine.wait_for_unit("envoy.service")
+        machine.wait_for_open_port(80)
+        machine.wait_until_succeeds("curl -fsS localhost:80/ready")
+        machine.succeed('test -f /var/log/envoy/access.log')
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ergo.nix b/nixpkgs/nixos/tests/ergo.nix
new file mode 100644
index 000000000000..b49e0c9dfed7
--- /dev/null
+++ b/nixpkgs/nixos/tests/ergo.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ergo";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.ergo.enable = true;
+      services.ergo.api.keyHash = "324dcf027dd4a30a932c441f365a25e86b173defa4b8e58948253471b81b72cf";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("ergo.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ergochat.nix b/nixpkgs/nixos/tests/ergochat.nix
new file mode 100644
index 000000000000..2e9dc55e648e
--- /dev/null
+++ b/nixpkgs/nixos/tests/ergochat.nix
@@ -0,0 +1,97 @@
+let
+  clients = [
+    "ircclient1"
+    "ircclient2"
+  ];
+  server = "ergochat";
+  ircPort = 6667;
+  channel = "nixos-cat";
+  iiDir = "/tmp/irc";
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "ergochat";
+  nodes = {
+    "${server}" = {
+      networking.firewall.allowedTCPPorts = [ ircPort ];
+      services.ergochat = {
+        enable = true;
+        settings.server.motd = pkgs.writeText "ergo.motd" ''
+          The default MOTD doesn't contain the word "nixos" in it.
+          This one does.
+        '';
+      };
+    };
+  } // lib.listToAttrs (builtins.map (client: lib.nameValuePair client {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    systemd.services.ii = {
+      requires = [ "network.target" ];
+      wantedBy = [ "default.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecPreStartPre = "mkdir -p ${iiDir}";
+        ExecStart = ''
+          ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
+        '';
+        User = "alice";
+      };
+    };
+  }) clients);
+
+  testScript =
+    let
+      msg = client: "Hello, my name is ${client}";
+      clientScript = client: [
+        ''
+          ${client}.wait_for_unit("network.target")
+          ${client}.systemctl("start ii")
+          ${client}.wait_for_unit("ii")
+          ${client}.wait_for_file("${iiDir}/${server}/out")
+        ''
+        # look for the custom text in the MOTD.
+        ''
+          ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out")
+        ''
+        # wait until first PING from server arrives before joining,
+        # so we don't try it too early
+        ''
+          ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
+        ''
+        # join ${channel}
+        ''
+          ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
+          ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
+        ''
+        # send a greeting
+        ''
+          ${client}.succeed(
+              "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
+          )
+        ''
+        # check that all greetings arrived on all clients
+      ] ++ builtins.map (other: ''
+        ${client}.succeed(
+            "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
+        )
+      '') clients;
+
+      # foldl', but requires a non-empty list instead of a start value
+      reduce = f: list:
+        builtins.foldl' f (builtins.head list) (builtins.tail list);
+    in ''
+      start_all()
+      ${server}.systemctl("status ergochat")
+      ${server}.wait_for_open_port(${toString ircPort})
+
+      # run clientScript for all clients so that every list
+      # entry is executed by every client before advancing
+      # to the next one.
+    '' + lib.concatStrings
+      (reduce
+        (lib.zipListsWith (cs: c: cs + c))
+        (builtins.map clientScript clients));
+})
diff --git a/nixpkgs/nixos/tests/eris-server.nix b/nixpkgs/nixos/tests/eris-server.nix
new file mode 100644
index 000000000000..a50db3afebf5
--- /dev/null
+++ b/nixpkgs/nixos/tests/eris-server.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "eris-server";
+  meta.maintainers = with lib.maintainers; [ ehmry ];
+
+  nodes.server = {
+    environment.systemPackages = [ pkgs.eris-go pkgs.nim.pkgs.eris ];
+    services.eris-server = {
+      enable = true;
+      decode = true;
+      listenHttp = "[::1]:80";
+      backends = [ "badger+file:///var/cache/eris.badger?get&put" ];
+      mountpoint = "/eris";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("eris-server.service")
+    server.wait_for_open_port(5683)
+    server.wait_for_open_port(80)
+    server.succeed("eriscmd get http://[::1] $(echo 'Hail ERIS!' | eriscmd put coap+tcp://[::1]:5683)")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/esphome.nix b/nixpkgs/nixos/tests/esphome.nix
new file mode 100644
index 000000000000..5a318b65a723
--- /dev/null
+++ b/nixpkgs/nixos/tests/esphome.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  testPort = 6052;
+  unixSocket = "/run/esphome/esphome.sock";
+in
+{
+  name = "esphome";
+  meta.maintainers = with lib.maintainers; [ oddlama ];
+
+  nodes = {
+    esphomeTcp = { ... }:
+      {
+        services.esphome = {
+          enable = true;
+          port = testPort;
+          address = "0.0.0.0";
+          openFirewall = true;
+        };
+      };
+
+    esphomeUnix = { ... }:
+      {
+        services.esphome = {
+          enable = true;
+          enableUnixSocket = true;
+        };
+      };
+  };
+
+  testScript = ''
+    esphomeTcp.wait_for_unit("esphome.service")
+    esphomeTcp.wait_for_open_port(${toString testPort})
+    esphomeTcp.succeed("curl --fail http://localhost:${toString testPort}/")
+
+    esphomeUnix.wait_for_unit("esphome.service")
+    esphomeUnix.wait_for_file("${unixSocket}")
+    esphomeUnix.succeed("curl --fail --unix-socket ${unixSocket} http://localhost/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/etcd-cluster.nix b/nixpkgs/nixos/tests/etcd-cluster.nix
new file mode 100644
index 000000000000..c77c0dd73c25
--- /dev/null
+++ b/nixpkgs/nixos/tests/etcd-cluster.nix
@@ -0,0 +1,157 @@
+# This test runs simple etcd cluster
+
+import ./make-test-python.nix ({ pkgs, ... } : let
+
+  runWithOpenSSL = file: cmd: pkgs.runCommand file {
+    buildInputs = [ pkgs.openssl ];
+  } cmd;
+
+  ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+  ca_pem = runWithOpenSSL "ca.pem" ''
+    openssl req \
+      -x509 -new -nodes -key ${ca_key} \
+      -days 10000 -out $out -subj "/CN=etcd-ca"
+  '';
+  etcd_key = runWithOpenSSL "etcd-key.pem" "openssl genrsa -out $out 2048";
+  etcd_csr = runWithOpenSSL "etcd.csr" ''
+    openssl req \
+       -new -key ${etcd_key} \
+       -out $out -subj "/CN=etcd" \
+       -config ${openssl_cnf}
+  '';
+  etcd_cert = runWithOpenSSL "etcd.pem" ''
+    openssl x509 \
+      -req -in ${etcd_csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} \
+      -CAcreateserial -out $out \
+      -days 365 -extensions v3_req \
+      -extfile ${openssl_cnf}
+  '';
+
+  etcd_client_key = runWithOpenSSL "etcd-client-key.pem"
+    "openssl genrsa -out $out 2048";
+
+  etcd_client_csr = runWithOpenSSL "etcd-client-key.pem" ''
+    openssl req \
+      -new -key ${etcd_client_key} \
+      -out $out -subj "/CN=etcd-client" \
+      -config ${client_openssl_cnf}
+  '';
+
+  etcd_client_cert = runWithOpenSSL "etcd-client.crt" ''
+    openssl x509 \
+      -req -in ${etcd_client_csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} -CAcreateserial \
+      -out $out -days 365 -extensions v3_req \
+      -extfile ${client_openssl_cnf}
+  '';
+
+  openssl_cnf = pkgs.writeText "openssl.cnf" ''
+    ions = v3_req
+    distinguished_name = req_distinguished_name
+    [req_distinguished_name]
+    [ v3_req ]
+    basicConstraints = CA:FALSE
+    keyUsage = digitalSignature, keyEncipherment
+    extendedKeyUsage = serverAuth, clientAuth
+    subjectAltName = @alt_names
+    [alt_names]
+    DNS.1 = node1
+    DNS.2 = node2
+    DNS.3 = node3
+    IP.1 = 127.0.0.1
+  '';
+
+  client_openssl_cnf = pkgs.writeText "client-openssl.cnf" ''
+    ions = v3_req
+    distinguished_name = req_distinguished_name
+    [req_distinguished_name]
+    [ v3_req ]
+    basicConstraints = CA:FALSE
+    keyUsage = digitalSignature, keyEncipherment
+    extendedKeyUsage = clientAuth
+  '';
+
+  nodeConfig = {
+    services = {
+      etcd = {
+        enable = true;
+        keyFile = etcd_key;
+        certFile = etcd_cert;
+        trustedCaFile = ca_pem;
+        clientCertAuth = true;
+        listenClientUrls = ["https://127.0.0.1:2379"];
+        listenPeerUrls = ["https://0.0.0.0:2380"];
+      };
+    };
+
+    environment.variables = {
+      ETCD_CERT_FILE = "${etcd_client_cert}";
+      ETCD_KEY_FILE = "${etcd_client_key}";
+      ETCD_CA_FILE = "${ca_pem}";
+      ETCDCTL_ENDPOINTS = "https://127.0.0.1:2379";
+      ETCDCTL_CACERT = "${ca_pem}";
+      ETCDCTL_CERT = "${etcd_cert}";
+      ETCDCTL_KEY = "${etcd_key}";
+    };
+
+    networking.firewall.allowedTCPPorts = [ 2380 ];
+  };
+in {
+  name = "etcd-cluster";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    node1 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380"];
+        initialAdvertisePeerUrls = ["https://node1:2380"];
+      };
+    };
+
+    node2 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380"];
+        initialAdvertisePeerUrls = ["https://node2:2380"];
+      };
+    };
+
+    node3 = { ... }: {
+      require = [nodeConfig];
+      services.etcd = {
+        initialCluster = ["node1=https://node1:2380" "node2=https://node2:2380" "node3=https://node3:2380"];
+        initialAdvertisePeerUrls = ["https://node3:2380"];
+        initialClusterState = "existing";
+      };
+    };
+  };
+
+  testScript = ''
+    with subtest("should start etcd cluster"):
+        node1.start()
+        node2.start()
+        node1.wait_for_unit("etcd.service")
+        node2.wait_for_unit("etcd.service")
+        node2.wait_until_succeeds("etcdctl endpoint status")
+        node1.succeed("etcdctl put /foo/bar 'Hello world'")
+        node2.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+
+    with subtest("should add another member"):
+        node1.wait_until_succeeds("etcdctl member add node3 --peer-urls=https://node3:2380")
+        node3.start()
+        node3.wait_for_unit("etcd.service")
+        node3.wait_until_succeeds("etcdctl member list | grep 'node3'")
+        node3.succeed("etcdctl endpoint status")
+
+    with subtest("should survive member crash"):
+        node3.crash()
+        node1.succeed("etcdctl endpoint status")
+        node1.succeed("etcdctl put /foo/bar 'Hello degraded world'")
+        node1.succeed("etcdctl get /foo/bar | grep 'Hello degraded world'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/etcd.nix b/nixpkgs/nixos/tests/etcd.nix
new file mode 100644
index 000000000000..79857778ae1b
--- /dev/null
+++ b/nixpkgs/nixos/tests/etcd.nix
@@ -0,0 +1,25 @@
+# This test runs simple etcd node
+
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "etcd";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    node = { ... }: {
+      services.etcd.enable = true;
+    };
+  };
+
+  testScript = ''
+    with subtest("should start etcd node"):
+        node.start()
+        node.wait_for_unit("etcd.service")
+
+    with subtest("should write and read some values to etcd"):
+        node.succeed("etcdctl put /foo/bar 'Hello world'")
+        node.succeed("etcdctl get /foo/bar | grep 'Hello world'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/etebase-server.nix b/nixpkgs/nixos/tests/etebase-server.nix
new file mode 100644
index 000000000000..49bfccf359e2
--- /dev/null
+++ b/nixpkgs/nixos/tests/etebase-server.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  dataDir = "/var/lib/foobar";
+
+in {
+    name = "etebase-server";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ felschr ];
+    };
+
+    nodes.machine = { pkgs, ... }:
+      {
+        services.etebase-server = {
+          inherit dataDir;
+          enable = true;
+          settings.global.secret_file =
+            toString (pkgs.writeText "secret" "123456");
+        };
+      };
+
+    testScript = ''
+      machine.wait_for_unit("etebase-server.service")
+      machine.wait_for_open_port(8001)
+
+      with subtest("Database & src-version were created"):
+          machine.wait_for_file("${dataDir}/src-version")
+          assert (
+              "${pkgs.etebase-server}"
+              in machine.succeed("cat ${dataDir}/src-version")
+          )
+          machine.wait_for_file("${dataDir}/db.sqlite3")
+          machine.wait_for_file("${dataDir}/static")
+
+      with subtest("Only allow access from allowed_hosts"):
+          machine.succeed("curl -sSfL http://0.0.0.0:8001/")
+          machine.fail("curl -sSfL http://127.0.0.1:8001/")
+          machine.fail("curl -sSfL http://localhost:8001/")
+
+      with subtest("Run tests"):
+          machine.succeed("etebase-server check")
+          machine.succeed("etebase-server test")
+
+      with subtest("Create superuser"):
+          machine.succeed(
+              "etebase-server createsuperuser --no-input --username admin --email root@localhost"
+          )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/etesync-dav.nix b/nixpkgs/nixos/tests/etesync-dav.nix
new file mode 100644
index 000000000000..f49152c60991
--- /dev/null
+++ b/nixpkgs/nixos/tests/etesync-dav.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+
+  name = "etesync-dav";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ _3699n ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curl pkgs.etesync-dav ];
+  };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("etesync-dav --version")
+      machine.execute("etesync-dav >&2 &")
+      machine.wait_for_open_port(37358)
+      with subtest("Check that the web interface is accessible"):
+          assert "Add User" in machine.succeed("curl -s http://localhost:37358/.web/add/")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/evcc.nix b/nixpkgs/nixos/tests/evcc.nix
new file mode 100644
index 000000000000..7ebdc6a6f5ab
--- /dev/null
+++ b/nixpkgs/nixos/tests/evcc.nix
@@ -0,0 +1,96 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "evcc";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes = {
+    machine = { config, ... }: {
+      services.evcc = {
+        enable = true;
+        settings = {
+          network = {
+            schema = "http";
+            host = "localhost";
+            port = 7070;
+          };
+
+          log = "info";
+
+          site = {
+            title = "NixOS Test";
+            meters = {
+              grid = "grid";
+              pv = "pv";
+            };
+          };
+
+          meters = [ {
+            type = "custom";
+            name = "grid";
+            power = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo -4500'";
+            };
+          } {
+            type = "custom";
+            name = "pv";
+            power = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo 7500'";
+            };
+          } ];
+
+          chargers = [ {
+            name = "dummy-charger";
+            type = "custom";
+            status = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo charger status A'";
+            };
+            enabled = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo charger enabled state false'";
+            };
+            enable = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo set charger enabled state true'";
+            };
+            maxcurrent = {
+              source = "script";
+              cmd = "/bin/sh -c 'echo set charger max current 7200'";
+            };
+          } ];
+
+          loadpoints = [ {
+            title = "Dummy";
+            charger = "dummy-charger";
+          } ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("evcc.service")
+    machine.wait_for_open_port(7070)
+
+    with subtest("Check package version propagates into frontend"):
+        machine.fail(
+            "curl --fail http://localhost:7070 | grep '0.0.1-alpha'"
+        )
+        machine.succeed(
+            "curl --fail http://localhost:7070 | grep '${pkgs.evcc.version}'"
+        )
+
+    with subtest("Check journal for errors"):
+        _, output = machine.execute("journalctl -o cat -u evcc.service")
+        assert "FATAL" not in output
+
+    with subtest("Check systemd hardening"):
+        _, output = machine.execute("systemd-analyze security evcc.service | grep -v '✓'")
+        machine.log(output)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fail2ban.nix b/nixpkgs/nixos/tests/fail2ban.nix
new file mode 100644
index 000000000000..c3708575b702
--- /dev/null
+++ b/nixpkgs/nixos/tests/fail2ban.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "fail2ban";
+
+  nodes.machine = _: {
+    services.fail2ban = {
+      enable = true;
+      bantime-increment.enable = true;
+    };
+
+    services.openssh.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    machine.wait_for_unit("fail2ban")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fakeroute.nix b/nixpkgs/nixos/tests/fakeroute.nix
new file mode 100644
index 000000000000..37b174524ab8
--- /dev/null
+++ b/nixpkgs/nixos/tests/fakeroute.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, pkgs, ...} : {
+  name = "fakeroute";
+  meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.fakeroute.enable = true;
+    services.fakeroute.route =
+      [ "216.102.187.130" "4.0.1.122"
+        "198.116.142.34" "63.199.8.242"
+      ];
+    environment.systemPackages = [ pkgs.traceroute ];
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("fakeroute.service")
+      machine.succeed("traceroute 127.0.0.1 | grep -q 216.102.187.130")
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/fancontrol.nix b/nixpkgs/nixos/tests/fancontrol.nix
new file mode 100644
index 000000000000..ecb936097446
--- /dev/null
+++ b/nixpkgs/nixos/tests/fancontrol.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "fancontrol";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ evils ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    hardware.fancontrol.enable = true;
+    hardware.fancontrol.config = ''
+      INTERVAL=42
+      DEVPATH=hwmon1=devices/platform/dummy
+      DEVNAME=hwmon1=dummy
+      FCTEMPS=hwmon1/device/pwm1=hwmon1/device/temp1_input
+      FCFANS=hwmon1/device/pwm1=hwmon1/device/fan1_input
+      MINTEMP=hwmon1/device/pwm1=25
+      MAXTEMP=hwmon1/device/pwm1=65
+      MINSTART=hwmon1/device/pwm1=150
+      MINSTOP=hwmon1/device/pwm1=0
+    '';
+    };
+
+  # This configuration cannot be valid for the test VM, so it's expected to get an 'outdated' error.
+  testScript = ''
+    start_all()
+    # can't wait for unit fancontrol.service because it doesn't become active due to invalid config
+    # fancontrol.service is WantedBy multi-user.target
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed(
+        "journalctl -eu fancontrol | tee /dev/stderr | grep 'Configuration appears to be outdated'"
+    )
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fanout.nix b/nixpkgs/nixos/tests/fanout.nix
new file mode 100644
index 000000000000..c36d34dcce0b
--- /dev/null
+++ b/nixpkgs/nixos/tests/fanout.nix
@@ -0,0 +1,30 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+import ./make-test-python.nix ({lib, pkgs, ...}: {
+  name = "fanout";
+  meta.maintainers = [ lib.maintainers.therishidesai ];
+
+  nodes = let
+    cfg = { ... }: {
+      services.fanout = {
+        enable = true;
+        fanoutDevices = 2;
+        bufferSize = 8192;
+      };
+    };
+  in {
+    machine = cfg;
+  };
+
+  testScript = ''
+    start_all()
+
+    # mDNS.
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("test -c /dev/fanout0")
+    machine.succeed("test -c /dev/fanout1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fastnetmon-advanced.nix b/nixpkgs/nixos/tests/fastnetmon-advanced.nix
new file mode 100644
index 000000000000..b2d2713a9211
--- /dev/null
+++ b/nixpkgs/nixos/tests/fastnetmon-advanced.nix
@@ -0,0 +1,65 @@
+{ pkgs, lib, ... }:
+
+{
+  name = "fastnetmon-advanced";
+  meta.maintainers = lib.teams.wdz.members;
+
+  nodes = {
+    bird = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 179 ];
+      services.bird2 = {
+        enable = true;
+        config = ''
+          router id 192.168.1.1;
+
+          protocol bgp fnm {
+            local 192.168.1.1 as 64513;
+            neighbor 192.168.1.2 as 64514;
+            multihop;
+            ipv4 {
+              import all;
+              export none;
+            };
+          }
+        '';
+      };
+    };
+    fnm = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 179 ];
+      services.fastnetmon-advanced = {
+        enable = true;
+        settings = {
+          networks_list = [ "172.23.42.0/24" ];
+          gobgp = true;
+          gobgp_flow_spec_announces = true;
+        };
+        bgpPeers = {
+          bird = {
+            local_asn = 64514;
+            remote_asn = 64513;
+            local_address = "192.168.1.2";
+            remote_address = "192.168.1.1";
+
+            description = "Bird";
+            ipv4_unicast = true;
+            multihop = true;
+            active = true;
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+    fnm.wait_for_unit("fastnetmon.service")
+    bird.wait_for_unit("bird2.service")
+
+    fnm.wait_until_succeeds('journalctl -eu fastnetmon.service | grep "BGP daemon restarted correctly"')
+    fnm.wait_until_succeeds("journalctl -eu gobgp.service | grep BGP_FSM_OPENCONFIRM")
+    bird.wait_until_succeeds("birdc show protocol fnm | grep Estab")
+    fnm.wait_until_succeeds('journalctl -eu fastnetmon.service | grep "API server listening"')
+    fnm.succeed("fcli set blackhole 172.23.42.123")
+    bird.succeed("birdc show route | grep 172.23.42.123")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/fcitx5/default.nix b/nixpkgs/nixos/tests/fcitx5/default.nix
new file mode 100644
index 000000000000..c113f2e2c052
--- /dev/null
+++ b/nixpkgs/nixos/tests/fcitx5/default.nix
@@ -0,0 +1,165 @@
+import ../make-test-python.nix ({ lib, ... }:
+rec {
+  name = "fcitx5";
+  meta.maintainers = with lib.maintainers; [ nevivurn ];
+
+  nodes.machine = { pkgs, ... }:
+  {
+    imports = [
+      ../common/user-account.nix
+    ];
+
+    environment.systemPackages = [
+      # To avoid clashing with xfce4-terminal
+      pkgs.alacritty
+    ];
+
+    services.xserver = {
+      enable = true;
+
+      displayManager = {
+        lightdm.enable = true;
+        autoLogin = {
+          enable = true;
+          user = "alice";
+        };
+      };
+
+      desktopManager.xfce.enable = true;
+    };
+
+    i18n.inputMethod = {
+      enabled = "fcitx5";
+      fcitx5.addons = [
+        pkgs.fcitx5-chinese-addons
+        pkgs.fcitx5-hangul
+        pkgs.fcitx5-m17n
+        pkgs.fcitx5-mozc
+      ];
+      fcitx5.settings = {
+        globalOptions = {
+          "Hotkey"."EnumerateSkipFirst" = "False";
+          "Hotkey/TriggerKeys"."0" = "Control+space";
+          "Hotkey/EnumerateForwardKeys"."0" = "Alt+Shift_L";
+          "Hotkey/EnumerateBackwardKeys"."0" = "Alt+Shift_R";
+        };
+        inputMethod = {
+          "GroupOrder" = {
+            "0" = "NixOS_test";
+          };
+          "Groups/0" = {
+            "Default Layout" = "us";
+            "DefaultIM" = "wbx";
+            "Name" = "NixOS_test";
+          };
+          "Groups/0/Items/0" = {
+            "Name" = "keyboard-us";
+          };
+          "Groups/0/Items/1" = {
+            "Layout" = "us";
+            "Name" = "wbx";
+          };
+          "Groups/0/Items/2" = {
+            "Layout" = "us";
+            "Name" = "hangul";
+          };
+          "Groups/0/Items/3" = {
+            "Layout" = "us";
+            "Name" = "m17n_sa_harvard-kyoto";
+          };
+          "Groups/0/Items/4" = {
+            "Layout" = "us";
+            "Name" = "mozc";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+      xauth         = "${user.home}/.Xauthority";
+    in
+      ''
+            start_all()
+
+            machine.wait_for_x()
+            machine.wait_for_file("${xauth}")
+            machine.succeed("xauth merge ${xauth}")
+            machine.sleep(5)
+
+            machine.succeed("su - ${user.name} -c 'kill $(pgrep fcitx5)'")
+            machine.sleep(1)
+
+            machine.succeed("su - ${user.name} -c 'alacritty >&2 &'")
+            machine.succeed("su - ${user.name} -c 'fcitx5 >&2 &'")
+            machine.sleep(10)
+
+            ### Type on terminal
+            machine.send_chars("echo ")
+            machine.sleep(1)
+
+            ### Start fcitx Unicode input
+            machine.send_key("ctrl-alt-shift-u")
+            machine.sleep(1)
+
+            ### Search for smiling face
+            machine.send_chars("smil")
+            machine.sleep(1)
+
+            ### Navigate to the second one
+            machine.send_key("tab")
+            machine.sleep(1)
+
+            ### Choose it
+            machine.send_key("\n")
+            machine.sleep(1)
+
+            ### Start fcitx language input
+            machine.send_key("ctrl-spc")
+            machine.sleep(1)
+
+            ### Default wubi, enter 一下
+            machine.send_chars("gggh ")
+            machine.sleep(1)
+
+            ### Switch to Hangul
+            machine.send_key("alt-shift")
+            machine.sleep(1)
+
+            ### Enter 한
+            machine.send_chars("gks")
+            machine.sleep(1)
+
+            ### Switch to Harvard Kyoto
+            machine.send_key("alt-shift")
+            machine.sleep(1)
+
+            ### Enter क
+            machine.send_chars("ka")
+            machine.sleep(1)
+
+            ### Switch to Mozc
+            machine.send_key("alt-shift")
+            machine.sleep(1)
+
+            ### Enter ã‹
+            machine.send_chars("ka\n")
+            machine.sleep(1)
+
+            ### Turn off Fcitx
+            machine.send_key("ctrl-spc")
+            machine.sleep(1)
+
+            ### Redirect typed characters to a file
+            machine.send_chars(" > fcitx_test.out\n")
+            machine.sleep(1)
+            machine.screenshot("terminal_chars")
+
+            ### Verify that file contents are as expected
+            file_content = machine.succeed("cat ${user.home}/fcitx_test.out")
+            assert file_content == "☺一下한कã‹\n"
+      ''
+  ;
+})
diff --git a/nixpkgs/nixos/tests/fenics.nix b/nixpkgs/nixos/tests/fenics.nix
new file mode 100644
index 000000000000..1d182cfc4499
--- /dev/null
+++ b/nixpkgs/nixos/tests/fenics.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  fenicsScript = pkgs.writeScript "poisson.py" ''
+    #!/usr/bin/env python
+    from dolfin import *
+
+    mesh = UnitSquareMesh(4, 4)
+    V = FunctionSpace(mesh, "Lagrange", 1)
+
+    def boundary(x):
+        return x[0] < DOLFIN_EPS or x[0] > 1.0 - DOLFIN_EPS
+
+    u0 = Constant(0.0)
+    bc = DirichletBC(V, u0, boundary)
+
+    u = TrialFunction(V)
+    v = TestFunction(V)
+    f = Expression("10*exp(-(pow(x[0] - 0.5, 2) + pow(x[1] - 0.5, 2)) / 0.02)", degree=2)
+    g = Expression("sin(5*x[0])", degree=2)
+    a = inner(grad(u), grad(v))*dx
+    L = f*v*dx + g*v*ds
+
+    u = Function(V)
+    solve(a == L, u, bc)
+    print(u)
+  '';
+in
+{
+  name = "fenics";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ knedlsepp ];
+  };
+
+  nodes = {
+    fenicsnode = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [
+        gcc
+        (python3.withPackages (ps: with ps; [ fenics ]))
+      ];
+    };
+  };
+  testScript =
+    { nodes, ... }:
+    ''
+      start_all()
+      fenicsnode.succeed("${fenicsScript}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ferm.nix b/nixpkgs/nixos/tests/ferm.nix
new file mode 100644
index 000000000000..be43877445eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/ferm.nix
@@ -0,0 +1,75 @@
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "ferm";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mic92 ];
+  };
+
+  nodes =
+    { client =
+        { pkgs, ... }:
+        with pkgs.lib;
+        {
+          networking = {
+            dhcpcd.enable = false;
+            interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::2"; prefixLength = 64; } ];
+            interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          };
+      };
+      server =
+        { pkgs, ... }:
+        with pkgs.lib;
+        {
+          networking = {
+            dhcpcd.enable = false;
+            useNetworkd = true;
+            useDHCP = false;
+            interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::1"; prefixLength = 64; } ];
+            interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.1"; prefixLength = 24; } ];
+          };
+
+          services = {
+            ferm.enable = true;
+            ferm.config = ''
+              domain (ip ip6) table filter chain INPUT {
+                interface lo ACCEPT;
+                proto tcp dport 8080 REJECT reject-with tcp-reset;
+              }
+            '';
+            nginx.enable = true;
+            nginx.httpConfig = ''
+              server {
+                listen 80;
+                listen [::]:80;
+                listen 8080;
+                listen [::]:8080;
+
+                location /status { stub_status on; }
+              }
+            '';
+          };
+        };
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      client.wait_for_unit("network-online.target")
+      server.wait_for_unit("network-online.target")
+      server.wait_for_unit("ferm.service")
+      server.wait_for_unit("nginx.service")
+      server.wait_until_succeeds("ss -ntl | grep -q 80")
+
+      with subtest("port 80 is allowed"):
+          client.succeed("curl --fail -g http://192.168.1.1:80/status")
+          client.succeed("curl --fail -g http://[fd00::1]:80/status")
+
+      with subtest("port 8080 is not allowed"):
+          server.succeed("curl --fail -g http://192.168.1.1:8080/status")
+          server.succeed("curl --fail -g http://[fd00::1]:8080/status")
+
+          client.fail("curl --fail -g http://192.168.1.1:8080/status")
+          client.fail("curl --fail -g http://[fd00::1]:8080/status")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ferretdb.nix b/nixpkgs/nixos/tests/ferretdb.nix
new file mode 100644
index 000000000000..7251198af77d
--- /dev/null
+++ b/nixpkgs/nixos/tests/ferretdb.nix
@@ -0,0 +1,64 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; }
+, ...
+}:
+let
+  lib = pkgs.lib;
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("ferretdb.service")
+    machine.wait_for_open_port(27017)
+    machine.succeed("mongosh --eval 'use myNewDatabase;' --eval 'db.myCollection.insertOne( { x: 1 } );'")
+  '';
+in
+with import ../lib/testing-python.nix { inherit system; };
+{
+
+  postgresql = makeTest
+    {
+      inherit testScript;
+      name = "ferretdb-postgresql";
+      meta.maintainers = with lib.maintainers; [ julienmalka ];
+
+      nodes.machine =
+        { pkgs, ... }:
+        {
+          services.ferretdb = {
+            enable = true;
+            settings.FERRETDB_HANDLER = "pg";
+            settings.FERRETDB_POSTGRESQL_URL = "postgres://ferretdb@localhost/ferretdb?host=/run/postgresql";
+          };
+
+          systemd.services.ferretdb.serviceConfig = {
+            Requires = "postgresql.service";
+            After = "postgresql.service";
+          };
+
+          services.postgresql = {
+            enable = true;
+            ensureDatabases = [ "ferretdb" ];
+            ensureUsers = [{
+              name = "ferretdb";
+              ensureDBOwnership = true;
+            }];
+          };
+
+          environment.systemPackages = with pkgs; [ mongosh ];
+        };
+    };
+
+  sqlite = makeTest
+    {
+      inherit testScript;
+      name = "ferretdb-sqlite";
+      meta.maintainers = with lib.maintainers; [ julienmalka ];
+
+      nodes.machine =
+        { pkgs, ... }:
+        {
+          services.ferretdb.enable = true;
+
+          environment.systemPackages = with pkgs; [ mongosh ];
+        };
+    };
+}
diff --git a/nixpkgs/nixos/tests/firefox.nix b/nixpkgs/nixos/tests/firefox.nix
new file mode 100644
index 000000000000..fbea95dc7523
--- /dev/null
+++ b/nixpkgs/nixos/tests/firefox.nix
@@ -0,0 +1,123 @@
+import ./make-test-python.nix ({ pkgs, firefoxPackage, ... }:
+{
+  name = firefoxPackage.pname;
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco shlevy ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      environment.systemPackages = [ pkgs.xdotool ];
+
+      programs.firefox = {
+        enable = true;
+        preferences."media.autoplay.default" = 0;
+        package = firefoxPackage;
+      };
+
+      # Create a virtual sound device, with mixing
+      # and all, for recording audio.
+      boot.kernelModules = [ "snd-aloop" ];
+      sound.enable = true;
+      sound.extraConfig = ''
+        pcm.!default {
+          type plug
+          slave.pcm pcm.dmixer
+        }
+        pcm.dmixer {
+          type dmix
+          ipc_key 1
+          slave {
+            pcm "hw:Loopback,0,0"
+            rate 48000
+            periods 128
+            period_time 0
+            period_size 1024
+            buffer_size 8192
+          }
+        }
+        pcm.recorder {
+          type hw
+          card "Loopback"
+          device 1
+          subdevice 0
+        }
+      '';
+
+      systemd.services.audio-recorder = {
+        description = "Record NixOS test audio to /tmp/record.wav";
+        script = "${pkgs.alsa-utils}/bin/arecord -D recorder -f S16_LE -r48000 /tmp/record.wav";
+      };
+
+    };
+
+  testScript = let
+    exe = firefoxPackage.unwrapped.binaryName;
+  in ''
+      from contextlib import contextmanager
+
+
+      @contextmanager
+      def record_audio(machine: Machine):
+          """
+          Perform actions while recording the
+          machine audio output.
+          """
+          machine.systemctl("start audio-recorder")
+          yield
+          machine.systemctl("stop audio-recorder")
+
+
+      def wait_for_sound(machine: Machine):
+          """
+          Wait until any sound has been emitted.
+          """
+          machine.wait_for_file("/tmp/record.wav")
+          while True:
+              # Get at most 2M of the recording
+              machine.execute("tail -c 2M /tmp/record.wav > /tmp/last")
+              # Get the exact size
+              size = int(machine.succeed("stat -c '%s' /tmp/last").strip())
+              # Compare it against /dev/zero using `cmp` (skipping 50B of WAVE header).
+              # If some non-NULL bytes are found it returns 1.
+              status, output = machine.execute(
+                  f"cmp -i 50 -n {size - 50} /tmp/last /dev/zero 2>&1"
+              )
+              if status == 1:
+                  break
+              machine.sleep(2)
+
+
+      machine.wait_for_x()
+
+      with subtest("Wait until Firefox has finished loading the Valgrind docs page"):
+          machine.execute(
+              "xterm -e '${exe} file://${pkgs.valgrind.doc}/share/doc/valgrind/html/index.html' >&2 &"
+          )
+          machine.wait_for_window("Valgrind")
+          machine.sleep(40)
+
+      with subtest("Check whether Firefox can play sound"):
+          with record_audio(machine):
+              machine.succeed(
+                  "${exe} file://${pkgs.sound-theme-freedesktop}/share/sounds/freedesktop/stereo/phone-incoming-call.oga >&2 &"
+              )
+              wait_for_sound(machine)
+          machine.copy_from_vm("/tmp/record.wav")
+
+      with subtest("Close sound test tab"):
+          machine.execute("xdotool key ctrl+w")
+
+      with subtest("Close default browser prompt"):
+          machine.execute("xdotool key space")
+
+      with subtest("Wait until Firefox draws the developer tool panel"):
+          machine.sleep(10)
+          machine.succeed("xwininfo -root -tree | grep Valgrind")
+          machine.screenshot("screen")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/firejail.nix b/nixpkgs/nixos/tests/firejail.nix
new file mode 100644
index 000000000000..6c42c37b2813
--- /dev/null
+++ b/nixpkgs/nixos/tests/firejail.nix
@@ -0,0 +1,91 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "firejail";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ sgo ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ./common/user-account.nix ];
+
+    programs.firejail = {
+      enable = true;
+      wrappedBinaries = {
+        bash-jailed  = "${pkgs.bash}/bin/bash";
+        bash-jailed2  = {
+          executable = "${pkgs.bash}/bin/bash";
+          extraArgs = [ "--private=~/firejail-home" ];
+        };
+      };
+    };
+
+    systemd.services.setupFirejailTest = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "multi-user.target" ];
+
+      environment = {
+        HOME = "/home/alice";
+      };
+
+      unitConfig = {
+        type = "oneshot";
+        RemainAfterExit = true;
+        user = "alice";
+      };
+
+      script = ''
+        cd $HOME
+
+        mkdir .password-store && echo s3cret > .password-store/secret
+        mkdir my-secrets && echo s3cret > my-secrets/secret
+
+        echo publ1c > public
+
+        mkdir -p .config/firejail
+        echo 'blacklist ''${HOME}/my-secrets' > .config/firejail/globals.local
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+
+    # Test path acl with wrapper
+    machine.succeed("sudo -u alice bash-jailed -c 'cat ~/public' | grep -q publ1c")
+    machine.fail(
+        "sudo -u alice bash-jailed -c 'cat ~/.password-store/secret' | grep -q s3cret"
+    )
+    machine.fail("sudo -u alice bash-jailed -c 'cat ~/my-secrets/secret' | grep -q s3cret")
+
+    # Test extraArgs
+    machine.succeed("sudo -u alice mkdir /home/alice/firejail-home")
+    machine.succeed("sudo -u alice bash-jailed2 -c 'echo test > /home/alice/foo'")
+    machine.fail("sudo -u alice cat /home/alice/foo")
+    machine.succeed("sudo -u alice cat /home/alice/firejail-home/foo | grep test")
+
+    # Test path acl with firejail executable
+    machine.succeed("sudo -u alice firejail -- bash -c 'cat ~/public' | grep -q publ1c")
+    machine.fail(
+        "sudo -u alice firejail -- bash -c 'cat ~/.password-store/secret' | grep -q s3cret"
+    )
+    machine.fail(
+        "sudo -u alice firejail -- bash -c 'cat ~/my-secrets/secret' | grep -q s3cret"
+    )
+
+    # Disabling profiles
+    machine.succeed(
+        "sudo -u alice bash -c 'firejail --noprofile -- cat ~/.password-store/secret' | grep -q s3cret"
+    )
+
+    # CVE-2020-17367
+    machine.fail(
+        "sudo -u alice firejail --private-tmp id --output=/tmp/vuln1 && cat /tmp/vuln1"
+    )
+
+    # CVE-2020-17368
+    machine.fail(
+        "sudo -u alice firejail --private-tmp --output=/tmp/foo 'bash -c $(id>/tmp/vuln2;echo id)' && cat /tmp/vuln2"
+    )
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/firewall.nix b/nixpkgs/nixos/tests/firewall.nix
new file mode 100644
index 000000000000..dd7551f143a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/firewall.nix
@@ -0,0 +1,68 @@
+# Test the firewall module.
+
+import ./make-test-python.nix ( { pkgs, nftables, ... } : {
+  name = "firewall" + pkgs.lib.optionalString nftables "-nftables";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    { walled =
+        { ... }:
+        { networking.firewall.enable = true;
+          networking.firewall.logRefusedPackets = true;
+          networking.nftables.enable = nftables;
+          services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+        };
+
+      # Dummy configuration to check whether firewall.service will be honored
+      # during system activation. This only needs to be different to the
+      # original walled configuration so that there is a change in the service
+      # file.
+      walled2 =
+        { ... }:
+        { networking.firewall.enable = true;
+          networking.firewall.rejectPackets = true;
+          networking.nftables.enable = nftables;
+        };
+
+      attacker =
+        { ... }:
+        { services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript = { nodes, ... }: let
+    newSystem = nodes.walled2.config.system.build.toplevel;
+    unit = if nftables then "nftables" else "firewall";
+  in ''
+    start_all()
+
+    walled.wait_for_unit("${unit}")
+    walled.wait_for_unit("httpd")
+    attacker.wait_for_unit("network.target")
+
+    # Local connections should still work.
+    walled.succeed("curl -v http://localhost/ >&2")
+
+    # Connections to the firewalled machine should fail, but ping should succeed.
+    attacker.fail("curl --fail --connect-timeout 2 http://walled/ >&2")
+    attacker.succeed("ping -c 1 walled >&2")
+
+    # Outgoing connections/pings should still work.
+    walled.succeed("curl -v http://attacker/ >&2")
+    walled.succeed("ping -c 1 attacker >&2")
+
+    # If we stop the firewall, then connections should succeed.
+    walled.stop_job("${unit}")
+    attacker.succeed("curl -v http://walled/ >&2")
+
+    # Check whether activation of a new configuration reloads the firewall.
+    walled.succeed(
+        "${newSystem}/bin/switch-to-configuration test 2>&1 | grep -qF ${unit}.service"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fish.nix b/nixpkgs/nixos/tests/fish.nix
new file mode 100644
index 000000000000..3d9b13c6af70
--- /dev/null
+++ b/nixpkgs/nixos/tests/fish.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "fish";
+
+  nodes.machine =
+    { pkgs, ... }:
+
+    {
+      programs.fish.enable = true;
+      environment.systemPackages = with pkgs; [
+        coreutils
+        procps # kill collides with coreutils' to test https://github.com/NixOS/nixpkgs/issues/56432
+      ];
+    };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_file("/etc/fish/generated_completions/coreutils.fish")
+      machine.wait_for_file("/etc/fish/generated_completions/kill.fish")
+      machine.succeed(
+          "fish -ic 'echo $fish_complete_path' | grep -q '/share/fish/completions /etc/fish/generated_completions /root/.local/share/fish/generated_completions$'"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/flannel.nix b/nixpkgs/nixos/tests/flannel.nix
new file mode 100644
index 000000000000..7615732c20ca
--- /dev/null
+++ b/nixpkgs/nixos/tests/flannel.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "flannel";
+
+  meta = with lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = let
+    flannelConfig = { pkgs, ... } : {
+      services.flannel = {
+        enable = true;
+        backend = {
+          Type = "udp";
+          Port = 8285;
+        };
+        network = "10.1.0.0/16";
+        iface = "eth1";
+        etcd.endpoints = ["http://etcd:2379"];
+      };
+
+      networking.firewall.allowedUDPPorts = [ 8285 ];
+    };
+  in {
+    etcd = { ... }: {
+      services = {
+        etcd = {
+          enable = true;
+          listenClientUrls = ["http://0.0.0.0:2379"]; # requires ip-address for binding
+          listenPeerUrls = ["http://0.0.0.0:2380"]; # requires ip-address for binding
+          advertiseClientUrls = ["http://etcd:2379"];
+          initialAdvertisePeerUrls = ["http://etcd:2379"];
+          initialCluster = ["etcd=http://etcd:2379"];
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 2379 ];
+    };
+
+    node1 = flannelConfig;
+    node2 = flannelConfig;
+  };
+
+  testScript = ''
+    start_all()
+
+    node1.wait_for_unit("flannel.service")
+    node2.wait_for_unit("flannel.service")
+
+    node1.wait_until_succeeds("ip l show dev flannel0")
+    ip1 = node1.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+    node2.wait_until_succeeds("ip l show dev flannel0")
+    ip2 = node2.succeed("ip -4 addr show flannel0 | grep -oP '(?<=inet).*(?=/)'")
+
+    node1.wait_until_succeeds(f"ping -c 1 {ip2}")
+    node2.wait_until_succeeds(f"ping -c 1 {ip1}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fluentd.nix b/nixpkgs/nixos/tests/fluentd.nix
new file mode 100644
index 000000000000..150638f246f2
--- /dev/null
+++ b/nixpkgs/nixos/tests/fluentd.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "fluentd";
+
+  nodes.machine = { pkgs, ... }: {
+    services.fluentd = {
+      enable = true;
+      config = ''
+        <source>
+          @type http
+          port 9880
+        </source>
+
+        <match **>
+          type copy
+          <store>
+            @type file
+            format json
+            path /tmp/fluentd
+            symlink_path /tmp/current-log
+          </store>
+          <store>
+            @type stdout
+          </store>
+        </match>
+      '';
+    };
+  };
+
+  testScript = let
+    testMessage = "an example log message";
+
+    payload = pkgs.writeText "test-message.json" (builtins.toJSON {
+      inherit testMessage;
+    });
+  in ''
+    machine.start()
+    machine.wait_for_unit("fluentd.service")
+    machine.wait_for_open_port(9880)
+
+    machine.succeed(
+        "curl -fsSL -X POST -H 'Content-type: application/json' -d @${payload} http://localhost:9880/test.tag"
+    )
+
+    # blocking flush
+    machine.succeed("systemctl stop fluentd")
+
+    machine.succeed("grep '${testMessage}' /tmp/current-log")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fluidd.nix b/nixpkgs/nixos/tests/fluidd.nix
new file mode 100644
index 000000000000..82a2c1e4049f
--- /dev/null
+++ b/nixpkgs/nixos/tests/fluidd.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "fluidd";
+  meta.maintainers = with lib.maintainers; [ vtuan10 ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.fluidd = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl -sSfL http://localhost/ | grep 'fluidd'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/fontconfig-default-fonts.nix b/nixpkgs/nixos/tests/fontconfig-default-fonts.nix
new file mode 100644
index 000000000000..293dc43f91f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/fontconfig-default-fonts.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "fontconfig-default-fonts";
+
+  meta.maintainers = with lib.maintainers; [
+    jtojnar
+  ];
+
+  nodes.machine = { config, pkgs, ... }: {
+    fonts.enableDefaultPackages = true; # Background fonts
+    fonts.packages = with pkgs; [
+      noto-fonts-color-emoji
+      cantarell-fonts
+      twitter-color-emoji
+      source-code-pro
+      gentium
+    ];
+    fonts.fontconfig.defaultFonts = {
+      serif = [ "Gentium Plus" ];
+      sansSerif = [ "Cantarell" ];
+      monospace = [ "Source Code Pro" ];
+      emoji = [ "Twitter Color Emoji" ];
+    };
+  };
+
+  testScript = ''
+    machine.succeed("fc-match serif | grep '\"Gentium Plus\"'")
+    machine.succeed("fc-match sans-serif | grep '\"Cantarell\"'")
+    machine.succeed("fc-match monospace | grep '\"Source Code Pro\"'")
+    machine.succeed("fc-match emoji | grep '\"Twitter Color Emoji\"'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/forgejo.nix b/nixpkgs/nixos/tests/forgejo.nix
new file mode 100644
index 000000000000..6acd6acb50fa
--- /dev/null
+++ b/nixpkgs/nixos/tests/forgejo.nix
@@ -0,0 +1,178 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  ## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
+  signingPrivateKey = ''
+    -----BEGIN PGP PRIVATE KEY BLOCK-----
+
+    lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
+    5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
+    ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
+    ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
+    TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
+    GCqGCRf9O/hzBA==
+    =9Uy3
+    -----END PGP PRIVATE KEY BLOCK-----
+  '';
+  signingPrivateKeyId = "4D642DE8B678C79D";
+
+  supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
+  makeGForgejoTest = type: nameValuePair type (makeTest {
+    name = "forgejo-${type}";
+    meta.maintainers = with maintainers; [ bendlas emilylange ];
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        virtualisation.memorySize = 2047;
+        services.forgejo = {
+          enable = true;
+          database = { inherit type; };
+          settings.service.DISABLE_REGISTRATION = true;
+          settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
+          settings.actions.ENABLED = true;
+        };
+        environment.systemPackages = [ config.services.forgejo.package pkgs.gnupg pkgs.jq pkgs.file ];
+        services.openssh.enable = true;
+
+        specialisation.runner = {
+          inheritParentConfig = true;
+          configuration.services.gitea-actions-runner.instances."test" = {
+            enable = true;
+            name = "ci";
+            url = "http://localhost:3000";
+            labels = [
+              # don't require docker/podman
+              "native:host"
+            ];
+            tokenFile = "/var/lib/forgejo/runner_token";
+          };
+        };
+        specialisation.dump = {
+          inheritParentConfig = true;
+          configuration.services.forgejo.dump = {
+            enable = true;
+            type = "tar.zst";
+            file = "dump.tar.zst";
+          };
+        };
+      };
+      client1 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+      client2 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+    };
+
+    testScript = { nodes, ... }:
+      let
+        inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+        serverSystem = nodes.server.system.build.toplevel;
+        dumpFile = with nodes.server.specialisation.dump.configuration.services.forgejo.dump; "${backupDir}/${file}";
+      in
+      ''
+        import json
+        GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
+        REPO = "forgejo@server:test/repo"
+        PRIVK = "${snakeOilPrivateKey}"
+
+        start_all()
+
+        client1.succeed("mkdir /tmp/repo")
+        client1.succeed("mkdir -p $HOME/.ssh")
+        client1.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+        client1.succeed("chmod 0400 $HOME/.ssh/privk")
+        client1.succeed("git -C /tmp/repo init")
+        client1.succeed("echo hello world > /tmp/repo/testfile")
+        client1.succeed("git -C /tmp/repo add .")
+        client1.succeed("git config --global user.email test@localhost")
+        client1.succeed("git config --global user.name test")
+        client1.succeed("git -C /tmp/repo commit -m 'Initial import'")
+        client1.succeed(f"git -C /tmp/repo remote add origin {REPO}")
+
+        server.wait_for_unit("forgejo.service")
+        server.wait_for_open_port(3000)
+        server.wait_for_open_port(22)
+        server.succeed("curl --fail http://localhost:3000/")
+
+        server.succeed(
+            "su -l forgejo -c 'gpg --homedir /var/lib/forgejo/data/home/.gnupg "
+            + "--import ${toString (pkgs.writeText "forgejo.key" signingPrivateKey)}'"
+        )
+
+        assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
+
+        server.succeed(
+            "curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+            + "Please contact your site administrator.'"
+        )
+        server.succeed(
+            "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea admin user create "
+            + "--username test --password totallysafe --email test@localhost'"
+        )
+
+        api_token = server.succeed(
+            "curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' -d "
+            + "'{\"name\":\"token\",\"scopes\":[\"all\"]}' | jq '.sha1' | xargs echo -n"
+        )
+
+        server.succeed(
+            "curl --fail -X POST http://localhost:3000/api/v1/user/repos "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+            + f"-H 'Authorization: token {api_token}'"
+            + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
+        )
+
+        server.succeed(
+            "curl --fail -X POST http://localhost:3000/api/v1/user/keys "
+            + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+            + f"-H 'Authorization: token {api_token}'"
+            + ' -d \'{"key":"${snakeOilPublicKey}","read_only":true,"title":"SSH"}\'''
+        )
+
+        client1.succeed(
+            f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git -C /tmp/repo push origin master"
+        )
+
+        client2.succeed("mkdir -p $HOME/.ssh")
+        client2.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+        client2.succeed("chmod 0400 $HOME/.ssh/privk")
+        client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
+        client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
+
+        with subtest("Testing git protocol version=2 over ssh"):
+            git_protocol = client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' GIT_TRACE2_EVENT=true git -C repo fetch |& grep negotiated-version")
+            version = json.loads(git_protocol).get("value")
+            assert version == "2", f"git did not negotiate protocol version 2, but version {version} instead."
+
+        server.wait_until_succeeds(
+            'test "$(curl http://localhost:3000/api/v1/repos/test/repo/commits '
+            + '-H "Accept: application/json" | jq length)" = "1"',
+            timeout=10
+        )
+
+        with subtest("Testing runner registration"):
+            server.succeed(
+                "su -l forgejo -c 'GITEA_WORK_DIR=/var/lib/forgejo gitea actions generate-runner-token' | sed 's/^/TOKEN=/' | tee /var/lib/forgejo/runner_token"
+            )
+            server.succeed("${serverSystem}/specialisation/runner/bin/switch-to-configuration test")
+            server.wait_for_unit("gitea-runner-test.service")
+            server.succeed("journalctl -o cat -u gitea-runner-test.service | grep -q 'Runner registered successfully'")
+
+        with subtest("Testing backup service"):
+            server.succeed("${serverSystem}/specialisation/dump/bin/switch-to-configuration test")
+            server.systemctl("start forgejo-dump")
+            assert "Zstandard compressed data" in server.succeed("file ${dumpFile}")
+            server.copy_from_vm("${dumpFile}")
+      '';
+  });
+in
+
+listToAttrs (map makeGForgejoTest supportedDbTypes)
diff --git a/nixpkgs/nixos/tests/freenet.nix b/nixpkgs/nixos/tests/freenet.nix
new file mode 100644
index 000000000000..96dbb4caa129
--- /dev/null
+++ b/nixpkgs/nixos/tests/freenet.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "freenet";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nagy ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.freenet.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("freenet.service")
+    machine.wait_for_open_port(8888)
+    machine.wait_until_succeeds("curl -sfL http://localhost:8888/ | grep Freenet")
+    machine.succeed("systemctl stop freenet")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/freeswitch.nix b/nixpkgs/nixos/tests/freeswitch.nix
new file mode 100644
index 000000000000..bfb7339ec3c0
--- /dev/null
+++ b/nixpkgs/nixos/tests/freeswitch.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "freeswitch";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ misuzu ];
+  };
+  nodes = {
+    node0 = { config, lib, ... }: {
+      networking.useDHCP = false;
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          {
+            address = "192.168.0.1";
+            prefixLength = 24;
+          }
+        ];
+      };
+      services.freeswitch = {
+        enable = true;
+        enableReload = true;
+        configTemplate = "${config.services.freeswitch.package}/share/freeswitch/conf/minimal";
+      };
+    };
+  };
+  testScript = ''
+    node0.wait_for_unit("freeswitch.service")
+    # Wait for SIP port to be open
+    node0.wait_for_open_port(5060)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/freetube.nix b/nixpkgs/nixos/tests/freetube.nix
new file mode 100644
index 000000000000..f285384b68e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/freetube.nix
@@ -0,0 +1,41 @@
+let
+  tests = {
+    wayland = { pkgs, ... }: {
+      imports = [ ./common/wayland-cage.nix ];
+      services.cage.program = "${pkgs.freetube}/bin/freetube";
+      virtualisation.memorySize = 2047;
+      environment.variables.NIXOS_OZONE_WL = "1";
+      environment.variables.DISPLAY = "do not use";
+    };
+    xorg = { pkgs, ... }: {
+      imports = [ ./common/user-account.nix ./common/x11.nix ];
+      virtualisation.memorySize = 2047;
+      services.xserver.enable = true;
+      services.xserver.displayManager.sessionCommands = ''
+        ${pkgs.freetube}/bin/freetube
+      '';
+      test-support.displayManager.auto.user = "alice";
+    };
+  };
+
+  mkTest = name: machine:
+    import ./make-test-python.nix ({ pkgs, ... }: {
+      inherit name;
+      nodes = { "${name}" = machine; };
+      meta.maintainers = with pkgs.lib.maintainers; [ kirillrdy ];
+      enableOCR = true;
+
+      testScript = ''
+        start_all()
+        machine.wait_for_unit('graphical.target')
+        machine.wait_for_text('Your Subscription list is currently empty')
+        machine.send_key("ctrl-r")
+        machine.wait_for_text('Your Subscription list is currently empty')
+        machine.screenshot("main.png")
+        machine.send_key("ctrl-comma")
+        machine.wait_for_text('General Settings', timeout=30)
+        machine.screenshot("preferences.png")
+      '';
+    });
+in
+builtins.mapAttrs (k: v: mkTest k v { }) tests
diff --git a/nixpkgs/nixos/tests/freshrss-http-auth.nix b/nixpkgs/nixos/tests/freshrss-http-auth.nix
new file mode 100644
index 000000000000..d0ec3da31689
--- /dev/null
+++ b/nixpkgs/nixos/tests/freshrss-http-auth.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+  meta.maintainers = with lib.maintainers; [ mattchrist ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      dataDir = "/srv/freshrss";
+      authType = "http_auth";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s -H 'Host: freshrss' -H 'Remote-User: testuser' http://127.0.0.1:80/i/")
+    assert 'Account: testuser' in response, "http_auth method didn't work."
+  '';
+})
diff --git a/nixpkgs/nixos/tests/freshrss-pgsql.nix b/nixpkgs/nixos/tests/freshrss-pgsql.nix
new file mode 100644
index 000000000000..c685f4a8159b
--- /dev/null
+++ b/nixpkgs/nixos/tests/freshrss-pgsql.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+  meta.maintainers = with lib.maintainers; [ etu stunkymonkey ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      passwordFile = pkgs.writeText "password" "secret";
+      dataDir = "/srv/freshrss";
+      database = {
+        type = "pgsql";
+        port = 5432;
+        user = "freshrss";
+        passFile = pkgs.writeText "db-password" "db-secret";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "freshrss" ];
+      ensureUsers = [
+        {
+          name = "freshrss";
+          ensureDBOwnership = true;
+        }
+      ];
+      initialScript = pkgs.writeText "postgresql-password" ''
+        CREATE ROLE freshrss WITH LOGIN PASSWORD 'db-secret' CREATEDB;
+      '';
+    };
+
+    systemd.services."freshrss-config" = {
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(5432)
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://127.0.0.1:80/i/")
+    assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/freshrss-sqlite.nix b/nixpkgs/nixos/tests/freshrss-sqlite.nix
new file mode 100644
index 000000000000..b821c98a7e7a
--- /dev/null
+++ b/nixpkgs/nixos/tests/freshrss-sqlite.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "freshrss";
+  meta.maintainers = with lib.maintainers; [ etu stunkymonkey ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.freshrss = {
+      enable = true;
+      baseUrl = "http://localhost";
+      passwordFile = pkgs.writeText "password" "secret";
+      dataDir = "/srv/freshrss";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(80)
+    response = machine.succeed("curl -vvv -s -H 'Host: freshrss' http://127.0.0.1:80/i/")
+    assert '<title>Login · FreshRSS</title>' in response, "Login page didn't load successfully"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/frigate.nix b/nixpkgs/nixos/tests/frigate.nix
new file mode 100644
index 000000000000..836fe0d063f8
--- /dev/null
+++ b/nixpkgs/nixos/tests/frigate.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "frigate";
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  nodes = {
+    machine = { config, ... }: {
+      services.frigate = {
+        enable = true;
+
+        hostname = "localhost";
+
+        settings = {
+          mqtt.enabled = false;
+
+          cameras.test = {
+            ffmpeg = {
+              input_args = "-fflags nobuffer -strict experimental -fflags +genpts+discardcorrupt -r 10 -use_wallclock_as_timestamps 1";
+              inputs = [ {
+                path = "http://127.0.0.1:8080";
+                roles = [
+                  "record"
+                ];
+              } ];
+            };
+          };
+
+          record.enabled = true;
+        };
+      };
+
+      systemd.services.video-stream = {
+        description = "Start a test stream that frigate can capture";
+        before = [
+          "frigate.service"
+        ];
+        wantedBy = [
+          "multi-user.target"
+        ];
+        serviceConfig = {
+          DynamicUser = true;
+          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -f mpegts -listen 1 http://0.0.0.0:8080";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("frigate.service")
+
+    machine.wait_for_open_port(5001)
+
+    machine.succeed("curl http://localhost:5001")
+
+    machine.wait_for_file("/var/cache/frigate/test-*.mp4")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/frp.nix b/nixpkgs/nixos/tests/frp.nix
new file mode 100644
index 000000000000..2f5c0f8ec933
--- /dev/null
+++ b/nixpkgs/nixos/tests/frp.nix
@@ -0,0 +1,86 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "frp";
+  meta.maintainers = with lib.maintainers; [ zaldnoay janik ];
+  nodes = {
+    frps = {
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.1/24";
+      };
+
+      services.frp = {
+        enable = true;
+        role = "server";
+        settings = {
+          common = {
+            bind_port = 7000;
+            vhost_http_port = 80;
+          };
+        };
+      };
+    };
+
+
+    frpc = {
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.2/24";
+      };
+
+      services.httpd = {
+        enable = true;
+        adminAddr = "admin@example.com";
+        virtualHosts."test-appication" =
+        let
+          testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+        in
+        {
+          documentRoot = "${testdir}/web";
+          locations."/" = {
+            index = "index.php index.html";
+          };
+        };
+        phpPackage = pkgs.php81;
+        enablePHP = true;
+      };
+
+      services.frp = {
+        enable = true;
+        role = "client";
+        settings = {
+          common = {
+            server_addr = "10.0.0.1";
+            server_port = 7000;
+          };
+          web = {
+            type = "http";
+            local_port = 80;
+            custom_domains = "10.0.0.1";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    frps.wait_for_unit("frp.service")
+    frps.wait_for_open_port(80)
+    frpc.wait_for_unit("frp.service")
+    response = frpc.succeed("curl -fvvv -s http://127.0.0.1/")
+    assert "PHP Version ${pkgs.php81.version}" in response, "PHP version not detected"
+    response = frpc.succeed("curl -fvvv -s http://10.0.0.1/")
+    assert "PHP Version ${pkgs.php81.version}" in response, "PHP version not detected"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/frr.nix b/nixpkgs/nixos/tests/frr.nix
new file mode 100644
index 000000000000..598d7a7d2867
--- /dev/null
+++ b/nixpkgs/nixos/tests/frr.nix
@@ -0,0 +1,104 @@
+# This test runs FRR and checks if OSPF routing works.
+#
+# Network topology:
+#   [ client ]--net1--[ router1 ]--net2--[ router2 ]--net3--[ server ]
+#
+# All interfaces are in OSPF Area 0.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+
+    ifAddr = node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address;
+
+    ospfConf1 = ''
+      router ospf
+        network 192.168.0.0/16 area 0
+    '';
+
+    ospfConf2 = ''
+      interface eth2
+        ip ospf hello-interval 1
+        ip ospf dead-interval 5
+      !
+      router ospf
+        network 192.168.0.0/16 area 0
+    '';
+
+  in
+    {
+      name = "frr";
+
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ hexa ];
+      };
+
+      nodes = {
+
+        client =
+          { nodes, ... }:
+          {
+            virtualisation.vlans = [ 1 ];
+            networking.defaultGateway = ifAddr nodes.router1 "eth1";
+          };
+
+        router1 =
+          { ... }:
+          {
+            virtualisation.vlans = [ 1 2 ];
+            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospfigp -j ACCEPT";
+            services.frr.ospf = {
+              enable = true;
+              config = ospfConf1;
+            };
+
+            specialisation.ospf.configuration = {
+              services.frr.ospf.config = ospfConf2;
+            };
+          };
+
+        router2 =
+          { ... }:
+          {
+            virtualisation.vlans = [ 3 2 ];
+            boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+            networking.firewall.extraCommands = "iptables -A nixos-fw -i eth2 -p ospfigp -j ACCEPT";
+            services.frr.ospf = {
+              enable = true;
+              config = ospfConf2;
+            };
+          };
+
+        server =
+          { nodes, ... }:
+          {
+            virtualisation.vlans = [ 3 ];
+            networking.defaultGateway = ifAddr nodes.router2 "eth1";
+          };
+      };
+
+      testScript =
+        { nodes, ... }:
+        ''
+          start_all()
+
+          # Wait for the networking to start on all machines
+          for machine in client, router1, router2, server:
+              machine.wait_for_unit("network.target")
+
+          with subtest("Wait for Zebra and OSPFD"):
+              for gw in router1, router2:
+                  gw.wait_for_unit("zebra")
+                  gw.wait_for_unit("ospfd")
+
+          router1.succeed("${nodes.router1.config.system.build.toplevel}/specialisation/ospf/bin/switch-to-configuration test >&2")
+
+          with subtest("Wait for OSPF to form adjacencies"):
+              for gw in router1, router2:
+                  gw.wait_until_succeeds("vtysh -c 'show ip ospf neighbor' | grep Full")
+                  gw.wait_until_succeeds("vtysh -c 'show ip route' | grep '^O>'")
+
+          with subtest("Test ICMP"):
+              client.wait_until_succeeds("ping -c 3 server >&2")
+        '';
+    })
diff --git a/nixpkgs/nixos/tests/fsck.nix b/nixpkgs/nixos/tests/fsck.nix
new file mode 100644
index 000000000000..31ed8bdf78c0
--- /dev/null
+++ b/nixpkgs/nixos/tests/fsck.nix
@@ -0,0 +1,45 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, systemdStage1 ? false
+}:
+
+import ./make-test-python.nix {
+  name = "fsck";
+
+  nodes.machine = { lib, ... }: {
+    virtualisation.emptyDiskImages = [ 1 ];
+
+    virtualisation.fileSystems = {
+      "/mnt" = {
+        device = "/dev/vdb";
+        fsType = "ext4";
+        autoFormat = true;
+      };
+    };
+
+    boot.initrd.systemd.enable = systemdStage1;
+  };
+
+  testScript =  { nodes, ...}:
+  let
+    rootDevice = nodes.machine.virtualisation.rootDevice;
+  in
+  ''
+    machine.wait_for_unit("default.target")
+
+    with subtest("root fs is fsckd"):
+        machine.succeed("journalctl -b | grep '${if systemdStage1
+          then "fsck.*${builtins.baseNameOf rootDevice}.*clean"
+          else "fsck.ext4.*${rootDevice}"}'")
+
+    with subtest("mnt fs is fsckd"):
+        machine.succeed("journalctl -b | grep 'fsck.*vdb.*clean'")
+        machine.succeed(
+            "grep 'Requires=systemd-fsck@dev-vdb.service' /run/systemd/generator/mnt.mount"
+        )
+        machine.succeed(
+            "grep 'After=systemd-fsck@dev-vdb.service' /run/systemd/generator/mnt.mount"
+        )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/fscrypt.nix b/nixpkgs/nixos/tests/fscrypt.nix
new file mode 100644
index 000000000000..03367979359b
--- /dev/null
+++ b/nixpkgs/nixos/tests/fscrypt.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "fscrypt";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    security.pam.enableFscrypt = true;
+  };
+
+  testScript = ''
+    def login_as_alice():
+        machine.wait_until_tty_matches("1", "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches("1", "Password: ")
+        machine.send_chars("foobar\n")
+        machine.wait_until_tty_matches("1", "alice\@machine")
+
+
+    def logout():
+        machine.send_chars("logout\n")
+        machine.wait_until_tty_matches("1", "login: ")
+
+
+    machine.wait_for_unit("default.target")
+
+    with subtest("Enable fscrypt on filesystem"):
+        machine.succeed("tune2fs -O encrypt /dev/vda")
+        machine.succeed("fscrypt setup --quiet --force --time=1ms")
+
+    with subtest("Set up alice with an fscrypt-enabled home directory"):
+        machine.succeed("(echo foobar; echo foobar) | passwd alice")
+        machine.succeed("chown -R alice.users ~alice")
+        machine.succeed("echo foobar | fscrypt encrypt --skip-unlock --source=pam_passphrase --user=alice /home/alice")
+
+    with subtest("Create file as alice"):
+      login_as_alice()
+      machine.succeed("echo hello > /home/alice/world")
+      logout()
+      # Wait for logout to be processed
+      machine.sleep(1)
+
+    with subtest("File should not be readable without being logged in as alice"):
+      machine.fail("cat /home/alice/world")
+
+    with subtest("File should be readable again as alice"):
+      login_as_alice()
+      machine.succeed("cat /home/alice/world")
+      logout()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ft2-clone.nix b/nixpkgs/nixos/tests/ft2-clone.nix
new file mode 100644
index 000000000000..a8395d4ebaa6
--- /dev/null
+++ b/nixpkgs/nixos/tests/ft2-clone.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ft2-clone";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    sound.enable = true;
+    environment.systemPackages = [ pkgs.ft2-clone ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      # Add a dummy sound card, or the program won't start
+      machine.execute("modprobe snd-dummy")
+
+      machine.execute("ft2-clone >&2 &")
+
+      machine.wait_for_window(r"Fasttracker")
+      machine.sleep(5)
+      machine.wait_for_text(r"(Songlen|Repstart|Time|About|Nibbles|Help)")
+      machine.screenshot("screen")
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/garage/basic.nix b/nixpkgs/nixos/tests/garage/basic.nix
new file mode 100644
index 000000000000..88d747ea33b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/garage/basic.nix
@@ -0,0 +1,98 @@
+args@{ mkNode, ver, ... }:
+(import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "garage-basic";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  };
+
+  nodes = {
+    single_node = mkNode { replicationMode = "none"; };
+  };
+
+  testScript = ''
+    from typing import List
+    from dataclasses import dataclass
+    import re
+
+    start_all()
+
+    cur_version_regex = re.compile('Current cluster layout version: (?P<ver>\d*)')
+    key_creation_regex = re.compile('Key name: (?P<key_name>.*)\nKey ID: (?P<key_id>.*)\nSecret key: (?P<secret_key>.*)')
+
+    @dataclass
+    class S3Key:
+       key_name: str
+       key_id: str
+       secret_key: str
+
+    @dataclass
+    class GarageNode:
+       node_id: str
+       host: str
+
+    def get_node_fqn(machine: Machine) -> GarageNode:
+      node_id, host = machine.succeed("garage node id").split('@')
+      return GarageNode(node_id=node_id, host=host)
+
+    def get_node_id(machine: Machine) -> str:
+      return get_node_fqn(machine).node_id
+
+    def get_layout_version(machine: Machine) -> int:
+      version_data = machine.succeed("garage layout show")
+      m = cur_version_regex.search(version_data)
+      if m and m.group('ver') is not None:
+        return int(m.group('ver')) + 1
+      else:
+        raise ValueError('Cannot find current layout version')
+
+    def apply_garage_layout(machine: Machine, layouts: List[str]):
+       for layout in layouts:
+          machine.succeed(f"garage layout assign {layout}")
+       version = get_layout_version(machine)
+       machine.succeed(f"garage layout apply --version {version}")
+
+    def create_api_key(machine: Machine, key_name: str) -> S3Key:
+       output = machine.succeed(f"garage key ${if ver == "0_8" then "new --name" else "create"} {key_name}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_id') or not m.group('secret_key'):
+          raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=key_name, key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def get_api_key(machine: Machine, key_pattern: str) -> S3Key:
+       output = machine.succeed(f"garage key info {key_pattern}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_name') or not m.group('key_id') or not m.group('secret_key'):
+           raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=m.group('key_name'), key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def test_bucket_writes(node):
+      node.succeed("garage bucket create test-bucket")
+      s3_key = create_api_key(node, "test-api-key")
+      node.succeed("garage bucket allow --read --write test-bucket --key test-api-key")
+      other_s3_key = get_api_key(node, 'test-api-key')
+      assert other_s3_key.secret_key == other_s3_key.secret_key
+      node.succeed(
+        f"mc alias set test-garage http://[::1]:3900 {s3_key.key_id} {s3_key.secret_key} --api S3v4"
+      )
+      node.succeed("echo test | mc pipe test-garage/test-bucket/test.txt")
+      assert node.succeed("mc cat test-garage/test-bucket/test.txt").strip() == "test"
+
+    def test_bucket_over_http(node, bucket='test-bucket', url=None):
+      if url is None:
+         url = f"{bucket}.web.garage"
+
+      node.succeed(f'garage bucket website --allow {bucket}')
+      node.succeed(f'echo hello world | mc pipe test-garage/{bucket}/index.html')
+      assert (node.succeed(f"curl -H 'Host: {url}' http://localhost:3902")).strip() == 'hello world'
+
+    with subtest("Garage works as a single-node S3 storage"):
+      single_node.wait_for_unit("garage.service")
+      single_node.wait_for_open_port(3900)
+      # Now Garage is initialized.
+      single_node_id = get_node_id(single_node)
+      apply_garage_layout(single_node, [f'-z qemutest -c ${if ver == "0_8" then "1" else "1G"} "{single_node_id}"'])
+      # Now Garage is operational.
+      test_bucket_writes(single_node)
+      test_bucket_over_http(single_node)
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/garage/default.nix b/nixpkgs/nixos/tests/garage/default.nix
new file mode 100644
index 000000000000..a42236e9a5bb
--- /dev/null
+++ b/nixpkgs/nixos/tests/garage/default.nix
@@ -0,0 +1,54 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+with pkgs.lib;
+
+let
+    mkNode = package: { replicationMode, publicV6Address ? "::1" }: { pkgs, ... }: {
+      networking.interfaces.eth1.ipv6.addresses = [{
+        address = publicV6Address;
+        prefixLength = 64;
+      }];
+
+      networking.firewall.allowedTCPPorts = [ 3901 3902 ];
+
+      services.garage = {
+        enable = true;
+        inherit package;
+        settings = {
+          replication_mode = replicationMode;
+
+          rpc_bind_addr = "[::]:3901";
+          rpc_public_addr = "[${publicV6Address}]:3901";
+          rpc_secret = "5c1915fa04d0b6739675c61bf5907eb0fe3d9c69850c83820f51b4d25d13868c";
+
+          s3_api = {
+            s3_region = "garage";
+            api_bind_addr = "[::]:3900";
+            root_domain = ".s3.garage";
+          };
+
+          s3_web = {
+            bind_addr = "[::]:3902";
+            root_domain = ".web.garage";
+            index = "index.html";
+          };
+        };
+      };
+      environment.systemPackages = [ pkgs.minio-client ];
+
+      # Garage requires at least 1GiB of free disk space to run.
+      virtualisation.diskSize = 2 * 1024;
+    };
+in
+  foldl
+  (matrix: ver: matrix // {
+    "basic${toString ver}" = import ./basic.nix { inherit system pkgs ver; mkNode = mkNode pkgs."garage_${ver}"; };
+    "with-3node-replication${toString ver}" = import ./with-3node-replication.nix { inherit system pkgs ver; mkNode = mkNode pkgs."garage_${ver}"; };
+  })
+  {}
+  [
+    "0_8"
+    "0_9"
+  ]
diff --git a/nixpkgs/nixos/tests/garage/with-3node-replication.nix b/nixpkgs/nixos/tests/garage/with-3node-replication.nix
new file mode 100644
index 000000000000..d4387b198d97
--- /dev/null
+++ b/nixpkgs/nixos/tests/garage/with-3node-replication.nix
@@ -0,0 +1,121 @@
+args@{ mkNode, ver, ... }:
+(import ../make-test-python.nix ({ pkgs, ...} :
+{
+  name = "garage-3node-replication";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  };
+
+  nodes = {
+    node1 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::1"; };
+    node2 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::2"; };
+    node3 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::3"; };
+    node4 = mkNode { replicationMode = 3; publicV6Address = "fc00:1::4"; };
+  };
+
+  testScript = ''
+    from typing import List
+    from dataclasses import dataclass
+    import re
+    start_all()
+
+    cur_version_regex = re.compile('Current cluster layout version: (?P<ver>\d*)')
+    key_creation_regex = re.compile('Key name: (?P<key_name>.*)\nKey ID: (?P<key_id>.*)\nSecret key: (?P<secret_key>.*)')
+
+    @dataclass
+    class S3Key:
+       key_name: str
+       key_id: str
+       secret_key: str
+
+    @dataclass
+    class GarageNode:
+       node_id: str
+       host: str
+
+    def get_node_fqn(machine: Machine) -> GarageNode:
+      node_id, host = machine.succeed("garage node id").split('@')
+      return GarageNode(node_id=node_id, host=host)
+
+    def get_node_id(machine: Machine) -> str:
+      return get_node_fqn(machine).node_id
+
+    def get_layout_version(machine: Machine) -> int:
+      version_data = machine.succeed("garage layout show")
+      m = cur_version_regex.search(version_data)
+      if m and m.group('ver') is not None:
+        return int(m.group('ver')) + 1
+      else:
+        raise ValueError('Cannot find current layout version')
+
+    def apply_garage_layout(machine: Machine, layouts: List[str]):
+       for layout in layouts:
+          machine.succeed(f"garage layout assign {layout}")
+       version = get_layout_version(machine)
+       machine.succeed(f"garage layout apply --version {version}")
+
+    def create_api_key(machine: Machine, key_name: str) -> S3Key:
+       output = machine.succeed(f"garage key ${if ver == "0_8" then "new --name" else "create"} {key_name}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_id') or not m.group('secret_key'):
+          raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=key_name, key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def get_api_key(machine: Machine, key_pattern: str) -> S3Key:
+       output = machine.succeed(f"garage key info {key_pattern}")
+       m = key_creation_regex.match(output)
+       if not m or not m.group('key_name') or not m.group('key_id') or not m.group('secret_key'):
+           raise ValueError('Cannot parse API key data')
+       return S3Key(key_name=m.group('key_name'), key_id=m.group('key_id'), secret_key=m.group('secret_key'))
+
+    def test_bucket_writes(node):
+      node.succeed("garage bucket create test-bucket")
+      s3_key = create_api_key(node, "test-api-key")
+      node.succeed("garage bucket allow --read --write test-bucket --key test-api-key")
+      other_s3_key = get_api_key(node, 'test-api-key')
+      assert other_s3_key.secret_key == other_s3_key.secret_key
+      node.succeed(
+        f"mc alias set test-garage http://[::1]:3900 {s3_key.key_id} {s3_key.secret_key} --api S3v4"
+      )
+      node.succeed("echo test | mc pipe test-garage/test-bucket/test.txt")
+      assert node.succeed("mc cat test-garage/test-bucket/test.txt").strip() == "test"
+
+    def test_bucket_over_http(node, bucket='test-bucket', url=None):
+      if url is None:
+         url = f"{bucket}.web.garage"
+
+      node.succeed(f'garage bucket website --allow {bucket}')
+      node.succeed(f'echo hello world | mc pipe test-garage/{bucket}/index.html')
+      assert (node.succeed(f"curl -H 'Host: {url}' http://localhost:3902")).strip() == 'hello world'
+
+    with subtest("Garage works as a multi-node S3 storage"):
+      nodes = ('node1', 'node2', 'node3', 'node4')
+      rev_machines = {m.name: m for m in machines}
+      def get_machine(key): return rev_machines[key]
+      for key in nodes:
+        node = get_machine(key)
+        node.wait_for_unit("garage.service")
+        node.wait_for_open_port(3900)
+
+      # Garage is initialized on all nodes.
+      node_ids = {key: get_node_fqn(get_machine(key)) for key in nodes}
+
+      for key in nodes:
+        for other_key in nodes:
+          if other_key != key:
+            other_id = node_ids[other_key]
+            get_machine(key).succeed(f"garage node connect {other_id.node_id}@{other_id.host}")
+
+      # Provide multiple zones for the nodes.
+      zones = ["nixcon", "nixcon", "paris_meetup", "fosdem"]
+      apply_garage_layout(node1,
+      [
+        f'{ndata.node_id} -z {zones[index]} -c ${if ver == "0_8" then "1" else "1G"}'
+        for index, ndata in enumerate(node_ids.values())
+      ])
+      # Now Garage is operational.
+      test_bucket_writes(node1)
+      for node in nodes:
+         test_bucket_over_http(get_machine(node))
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/gemstash.nix b/nixpkgs/nixos/tests/gemstash.nix
new file mode 100644
index 000000000000..bc152e42e92e
--- /dev/null
+++ b/nixpkgs/nixos/tests/gemstash.nix
@@ -0,0 +1,51 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let common_meta = { maintainers = [ maintainers.viraptor ]; };
+in
+{
+  gemstash_works = makeTest {
+    name = "gemstash-works";
+    meta = common_meta;
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.gemstash = {
+        enable = true;
+      };
+    };
+
+    # gemstash responds to http requests
+    testScript = ''
+      machine.wait_for_unit("gemstash.service")
+      machine.wait_for_file("/var/lib/gemstash")
+      machine.wait_for_open_port(9292)
+      machine.succeed("curl http://localhost:9292")
+    '';
+  };
+
+  gemstash_custom_port = makeTest {
+    name = "gemstash-custom-port";
+    meta = common_meta;
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.gemstash = {
+        enable = true;
+        openFirewall = true;
+        settings = {
+          bind = "tcp://0.0.0.0:12345";
+        };
+      };
+    };
+
+    # gemstash responds to http requests
+    testScript = ''
+      machine.wait_for_unit("gemstash.service")
+      machine.wait_for_file("/var/lib/gemstash")
+      machine.wait_for_open_port(12345)
+      machine.succeed("curl http://localhost:12345")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/gerrit.nix b/nixpkgs/nixos/tests/gerrit.nix
new file mode 100644
index 000000000000..8ae9e89cf6b0
--- /dev/null
+++ b/nixpkgs/nixos/tests/gerrit.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  lfs = pkgs.fetchurl {
+    url = "https://gerrit-ci.gerritforge.com/job/plugin-lfs-bazel-master/90/artifact/bazel-bin/plugins/lfs/lfs.jar";
+    sha256 = "023b0kd8djm3cn1lf1xl67yv3j12yl8bxccn42lkfmwxjwjfqw6h";
+  };
+
+in {
+  name = "gerrit";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ flokli zimbatm ];
+  };
+
+  nodes = {
+    server =
+      { config, pkgs, ... }: {
+        networking.firewall.allowedTCPPorts = [ 80 2222 ];
+
+
+        services.gerrit = {
+          enable = true;
+          serverId = "aa76c84b-50b0-4711-a0a0-1ee30e45bbd0";
+          listenAddress = "[::]:80";
+          jvmHeapLimit = "1g";
+
+          plugins = [ lfs ];
+          builtinPlugins = [ "hooks" "webhooks" ];
+          settings = {
+            gerrit.canonicalWebUrl = "http://server";
+            lfs.plugin = "lfs";
+            plugins.allowRemoteAdmin = true;
+            sshd.listenAddress = "[::]:2222";
+            sshd.advertisedAddress = "[::]:2222";
+          };
+        };
+      };
+
+    client =
+      { ... }: {
+      };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("gerrit.service")
+    server.wait_for_open_port(80)
+    client.succeed("curl http://server")
+
+    server.wait_for_open_port(2222)
+    client.succeed("nc -z server 2222")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/geth.nix b/nixpkgs/nixos/tests/geth.nix
new file mode 100644
index 000000000000..dc6490db57c9
--- /dev/null
+++ b/nixpkgs/nixos/tests/geth.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "geth";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [bachp ];
+  };
+
+  nodes.machine = { ... }: {
+    services.geth."mainnet" = {
+      enable = true;
+      http = {
+        enable = true;
+      };
+    };
+    services.geth."testnet" = {
+      enable = true;
+      port = 30304;
+      network = "goerli";
+      http = {
+        enable = true;
+        port = 18545;
+      };
+      authrpc = {
+        enable = true;
+        port = 18551;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("geth-mainnet.service")
+    machine.wait_for_unit("geth-testnet.service")
+    machine.wait_for_open_port(8545)
+    machine.wait_for_open_port(18545)
+
+    machine.succeed(
+        'geth attach --exec "eth.blockNumber" http://localhost:8545 | grep \'^0$\' '
+    )
+
+    machine.succeed(
+        'geth attach --exec "eth.blockNumber" http://localhost:18545 | grep \'^0$\' '
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ghostunnel.nix b/nixpkgs/nixos/tests/ghostunnel.nix
new file mode 100644
index 000000000000..91a7b7085f67
--- /dev/null
+++ b/nixpkgs/nixos/tests/ghostunnel.nix
@@ -0,0 +1,104 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ghostunnel";
+  nodes = {
+    backend = { pkgs, ... }: {
+      services.nginx.enable = true;
+      services.nginx.virtualHosts."backend".root = pkgs.runCommand "webroot" {} ''
+        mkdir $out
+        echo hi >$out/hi.txt
+      '';
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+    service = { ... }: {
+      services.ghostunnel.enable = true;
+      services.ghostunnel.servers."plain-old" = {
+        listen = "0.0.0.0:443";
+        cert = "/root/service-cert.pem";
+        key = "/root/service-key.pem";
+        disableAuthentication = true;
+        target = "backend:80";
+        unsafeTarget = true;
+      };
+      services.ghostunnel.servers."client-cert" = {
+        listen = "0.0.0.0:1443";
+        cert = "/root/service-cert.pem";
+        key = "/root/service-key.pem";
+        cacert = "/root/ca.pem";
+        target = "backend:80";
+        allowCN = ["client"];
+        unsafeTarget = true;
+      };
+      networking.firewall.allowedTCPPorts = [ 443 1443 ];
+    };
+    client = { pkgs, ... }: {
+      environment.systemPackages = [
+        pkgs.curl
+      ];
+    };
+  };
+
+  testScript = ''
+
+    # prepare certificates
+
+    def cmd(command):
+      print(f"+{command}")
+      r = os.system(command)
+      if r != 0:
+        raise Exception(f"Command {command} failed with exit code {r}")
+
+    # Create CA
+    cmd("${pkgs.openssl}/bin/openssl genrsa -out ca-key.pem 4096")
+    cmd("${pkgs.openssl}/bin/openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -subj '/C=NL/ST=Zuid-Holland/L=The Hague/O=Stevige Balken en Planken B.V./OU=OpSec/CN=Certificate Authority' -out ca.pem")
+
+    # Create service
+    cmd("${pkgs.openssl}/bin/openssl genrsa -out service-key.pem 4096")
+    cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=service' -sha256 -new -key service-key.pem -out service.csr")
+    cmd("echo subjectAltName = DNS:service,IP:127.0.0.1 >> extfile.cnf")
+    cmd("echo extendedKeyUsage = serverAuth >> extfile.cnf")
+    cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in service.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out service-cert.pem -extfile extfile.cnf")
+
+    # Create client
+    cmd("${pkgs.openssl}/bin/openssl genrsa -out client-key.pem 4096")
+    cmd("${pkgs.openssl}/bin/openssl req -subj '/CN=client' -new -key client-key.pem -out client.csr")
+    cmd("echo extendedKeyUsage = clientAuth > extfile-client.cnf")
+    cmd("${pkgs.openssl}/bin/openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -extfile extfile-client.cnf")
+
+    cmd("ls -al")
+
+    start_all()
+
+    # Configuration
+    service.copy_from_host("ca.pem", "/root/ca.pem")
+    service.copy_from_host("service-cert.pem", "/root/service-cert.pem")
+    service.copy_from_host("service-key.pem", "/root/service-key.pem")
+    client.copy_from_host("ca.pem", "/root/ca.pem")
+    client.copy_from_host("service-cert.pem", "/root/service-cert.pem")
+    client.copy_from_host("client-cert.pem", "/root/client-cert.pem")
+    client.copy_from_host("client-key.pem", "/root/client-key.pem")
+
+    backend.wait_for_unit("nginx.service")
+    service.wait_for_unit("multi-user.target")
+    service.wait_for_unit("multi-user.target")
+    client.wait_for_unit("multi-user.target")
+
+    # Check assumptions before the real test
+    client.succeed("bash -c 'diff <(curl -v --no-progress-meter http://backend/hi.txt) <(echo hi)'")
+
+    # Plain old simple TLS can connect, ignoring cert
+    client.succeed("bash -c 'diff <(curl -v --no-progress-meter --insecure https://service/hi.txt) <(echo hi)'")
+
+    # Plain old simple TLS provides correct signature with its cert
+    client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service/hi.txt) <(echo hi)'")
+
+    # Client can authenticate with certificate
+    client.succeed("bash -c 'diff <(curl -v --no-progress-meter --cert /root/client-cert.pem --key /root/client-key.pem --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'")
+
+    # Client must authenticate with certificate
+    client.fail("bash -c 'diff <(curl -v --no-progress-meter --cacert /root/ca.pem https://service:1443/hi.txt) <(echo hi)'")
+  '';
+
+  meta.maintainers = with pkgs.lib.maintainers; [
+    roberth
+  ];
+})
diff --git a/nixpkgs/nixos/tests/git/hub.nix b/nixpkgs/nixos/tests/git/hub.nix
new file mode 100644
index 000000000000..4f3189861a00
--- /dev/null
+++ b/nixpkgs/nixos/tests/git/hub.nix
@@ -0,0 +1,17 @@
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "hub";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  nodes.hub = { pkgs, ... }:
+    {
+      environment.systemPackages = [ pkgs.hub ];
+    };
+
+  testScript =
+    ''
+      assert "git version ${pkgs.git.version}\nhub version ${pkgs.hub.version}\n" in hub.succeed("hub version")
+      assert "These GitHub commands are provided by hub" in hub.succeed("hub help")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gitdaemon.nix b/nixpkgs/nixos/tests/gitdaemon.nix
new file mode 100644
index 000000000000..bb07b6e97b7f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitdaemon.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  hashes = pkgs.writeText "hashes" ''
+    b5bb9d8014a0f9b1d61e21e796d78dccdf1352f23cd32812f4850b878ae4944c  /project/bar
+  '';
+in {
+  name = "gitdaemon";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tilpner ];
+  };
+
+  nodes = {
+    server =
+      { config, ... }: {
+        networking.firewall.allowedTCPPorts = [ config.services.gitDaemon.port ];
+
+        environment.systemPackages = [ pkgs.git ];
+
+        systemd.tmpfiles.rules = [
+          # type path mode user group age arg
+          " d    /git 0755 root root  -   -"
+        ];
+
+        services.gitDaemon = {
+          enable = true;
+          basePath = "/git";
+        };
+      };
+
+    client =
+      { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("create project.git"):
+        server.succeed(
+            "git init --bare /git/project.git",
+            "touch /git/project.git/git-daemon-export-ok",
+        )
+
+    with subtest("add file to project.git"):
+        server.succeed(
+            "git clone /git/project.git /project",
+            "echo foo > /project/bar",
+            "git config --global user.email 'you@example.com'",
+            "git config --global user.name 'Your Name'",
+            "git -C /project add bar",
+            "git -C /project commit -m 'quux'",
+            "git -C /project push",
+            "rm -r /project",
+        )
+
+    with subtest("git daemon starts"):
+        server.wait_for_unit("git-daemon.service")
+
+    server.wait_for_unit("network-online.target")
+    client.wait_for_unit("network-online.target")
+
+    with subtest("client can clone project.git"):
+        client.succeed(
+            "git clone git://server/project.git /project",
+            "sha256sum -c ${hashes}",
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gitea.nix b/nixpkgs/nixos/tests/gitea.nix
new file mode 100644
index 000000000000..f62c72bddddc
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitea.nix
@@ -0,0 +1,165 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  giteaPackage ? pkgs.gitea,
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  ## gpg --faked-system-time='20230301T010000!' --quick-generate-key snakeoil ed25519 sign
+  signingPrivateKey = ''
+    -----BEGIN PGP PRIVATE KEY BLOCK-----
+
+    lFgEY/6jkBYJKwYBBAHaRw8BAQdADXiZRV8RJUyC9g0LH04wLMaJL9WTc+szbMi7
+    5fw4yP8AAQCl8EwGfzSLm/P6fCBfA3I9znFb3MEHGCCJhJ6VtKYyRw7ktAhzbmFr
+    ZW9pbIiUBBMWCgA8FiEE+wUM6VW/NLtAdSixTWQt6LZ4x50FAmP+o5ACGwMFCQPC
+    ZwAECwkIBwQVCgkIBRYCAwEAAh4FAheAAAoJEE1kLei2eMedFTgBAKQs1oGFZrCI
+    TZP42hmBTKxGAI1wg7VSdDEWTZxut/2JAQDGgo2sa4VHMfj0aqYGxrIwfP2B7JHO
+    GCqGCRf9O/hzBA==
+    =9Uy3
+    -----END PGP PRIVATE KEY BLOCK-----
+  '';
+  signingPrivateKeyId = "4D642DE8B678C79D";
+
+  supportedDbTypes = [ "mysql" "postgres" "sqlite3" ];
+  makeGiteaTest = type: nameValuePair type (makeTest {
+    name = "${giteaPackage.pname}-${type}";
+    meta.maintainers = with maintainers; [ aanderse kolaente ma27 ];
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        virtualisation.memorySize = 2047;
+        services.gitea = {
+          enable = true;
+          database = { inherit type; };
+          package = giteaPackage;
+          metricsTokenFile = (pkgs.writeText "metrics_secret" "fakesecret").outPath;
+          settings.service.DISABLE_REGISTRATION = true;
+          settings."repository.signing".SIGNING_KEY = signingPrivateKeyId;
+          settings.actions.ENABLED = true;
+          settings.metrics.ENABLED = true;
+        };
+        environment.systemPackages = [ giteaPackage pkgs.gnupg pkgs.jq ];
+        services.openssh.enable = true;
+
+        specialisation.runner = {
+          inheritParentConfig = true;
+
+          configuration.services.gitea-actions-runner.instances."test" = {
+            enable = true;
+            name = "ci";
+            url = "http://localhost:3000";
+            labels = [
+              # don't require docker/podman
+              "native:host"
+            ];
+            tokenFile = "/var/lib/gitea/runner_token";
+          };
+        };
+      };
+      client1 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+      client2 = { config, pkgs, ... }: {
+        environment.systemPackages = [ pkgs.git ];
+      };
+    };
+
+    testScript = { nodes, ... }: let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+      serverSystem = nodes.server.system.build.toplevel;
+    in ''
+      GIT_SSH_COMMAND = "ssh -i $HOME/.ssh/privk -o StrictHostKeyChecking=no"
+      REPO = "gitea@server:test/repo"
+      PRIVK = "${snakeOilPrivateKey}"
+
+      start_all()
+
+      client1.succeed("mkdir /tmp/repo")
+      client1.succeed("mkdir -p $HOME/.ssh")
+      client1.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+      client1.succeed("chmod 0400 $HOME/.ssh/privk")
+      client1.succeed("git -C /tmp/repo init")
+      client1.succeed("echo hello world > /tmp/repo/testfile")
+      client1.succeed("git -C /tmp/repo add .")
+      client1.succeed("git config --global user.email test@localhost")
+      client1.succeed("git config --global user.name test")
+      client1.succeed("git -C /tmp/repo commit -m 'Initial import'")
+      client1.succeed(f"git -C /tmp/repo remote add origin {REPO}")
+
+      server.wait_for_unit("gitea.service")
+      server.wait_for_open_port(3000)
+      server.wait_for_open_port(22)
+      server.succeed("curl --fail http://localhost:3000/")
+
+      server.succeed(
+          "su -l gitea -c 'gpg --homedir /var/lib/gitea/data/home/.gnupg "
+          + "--import ${toString (pkgs.writeText "gitea.key" signingPrivateKey)}'"
+      )
+
+      assert "BEGIN PGP PUBLIC KEY BLOCK" in server.succeed("curl http://localhost:3000/api/v1/signing-key.gpg")
+
+      server.succeed(
+          "curl --fail http://localhost:3000/user/sign_up | grep 'Registration is disabled. "
+          + "Please contact your site administrator.'"
+      )
+      server.succeed(
+          "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea admin user create "
+          + "--username test --password totallysafe --email test@localhost'"
+      )
+
+      api_token = server.succeed(
+          "curl --fail -X POST http://test:totallysafe@localhost:3000/api/v1/users/test/tokens "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' -d "
+          + "'{\"name\":\"token\",\"scopes\":[\"all\"]}' | jq '.sha1' | xargs echo -n"
+      )
+
+      server.succeed(
+          "curl --fail -X POST http://localhost:3000/api/v1/user/repos "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+          + f"-H 'Authorization: token {api_token}'"
+          + ' -d \'{"auto_init":false, "description":"string", "license":"mit", "name":"repo", "private":false}\'''
+      )
+
+      server.succeed(
+          "curl --fail -X POST http://localhost:3000/api/v1/user/keys "
+          + "-H 'Accept: application/json' -H 'Content-Type: application/json' "
+          + f"-H 'Authorization: token {api_token}'"
+          + ' -d \'{"key":"${snakeOilPublicKey}","read_only":true,"title":"SSH"}\'''
+      )
+
+      client1.succeed(
+          f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git -C /tmp/repo push origin master"
+      )
+
+      client2.succeed("mkdir -p $HOME/.ssh")
+      client2.succeed(f"cat {PRIVK} > $HOME/.ssh/privk")
+      client2.succeed("chmod 0400 $HOME/.ssh/privk")
+      client2.succeed(f"GIT_SSH_COMMAND='{GIT_SSH_COMMAND}' git clone {REPO}")
+      client2.succeed('test "$(cat repo/testfile | xargs echo -n)" = "hello world"')
+
+      server.wait_until_succeeds(
+          'test "$(curl http://localhost:3000/api/v1/repos/test/repo/commits '
+          + '-H "Accept: application/json" | jq length)" = "1"'
+      )
+
+      with subtest("Testing metrics endpoint"):
+          server.succeed('curl '
+                         + '-H "Authorization: Bearer fakesecret" '
+                         + 'http://localhost:3000/metrics '
+                         + '| grep gitea_accesses')
+
+      with subtest("Testing runner registration"):
+          server.succeed(
+              "su -l gitea -c 'GITEA_WORK_DIR=/var/lib/gitea gitea actions generate-runner-token' | sed 's/^/TOKEN=/' | tee /var/lib/gitea/runner_token"
+          )
+          server.succeed("${serverSystem}/specialisation/runner/bin/switch-to-configuration test")
+          server.wait_for_unit("gitea-runner-test.service")
+          server.succeed("journalctl -o cat -u gitea-runner-test.service | grep -q 'Runner registered successfully'")
+    '';
+  });
+in
+
+listToAttrs (map makeGiteaTest supportedDbTypes)
diff --git a/nixpkgs/nixos/tests/github-runner.nix b/nixpkgs/nixos/tests/github-runner.nix
new file mode 100644
index 000000000000..033365d6925c
--- /dev/null
+++ b/nixpkgs/nixos/tests/github-runner.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "github-runner";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ veehaitch ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    services.github-runners.test = {
+      enable = true;
+      url = "https://github.com/yaxitech";
+      tokenFile = builtins.toFile "github-runner.token" "not-so-secret";
+    };
+
+    systemd.services.dummy-github-com = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "github-runner-test.service" ];
+      script = "${pkgs.netcat}/bin/nc -Fl 443 | true && touch /tmp/registration-connect";
+    };
+    networking.hosts."127.0.0.1" = [ "api.github.com" ];
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("dummy-github-com")
+
+    try:
+      machine.wait_for_unit("github-runner-test")
+    except Exception:
+      pass
+
+    out = machine.succeed("journalctl -u github-runner-test")
+    assert "Self-hosted runner registration" in out, "did not read runner registration header"
+
+    machine.wait_until_succeeds("test -f /tmp/registration-connect")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gitlab.nix b/nixpkgs/nixos/tests/gitlab.nix
new file mode 100644
index 000000000000..88cd774f815a
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitlab.nix
@@ -0,0 +1,437 @@
+# This test runs gitlab and performs the following tests:
+# - Creating users
+# - Pushing commits
+#   - over the API
+#   - over SSH
+# - Creating Merge Requests and merging them
+# - Opening and closing issues.
+# - Downloading repository archives as tar.gz and tar.bz2
+# Run with
+# [nixpkgs]$ nix-build -A nixosTests.gitlab
+
+{ pkgs, lib, ... }:
+
+let
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+  initialRootPassword = "notproduction";
+  rootProjectId = "2";
+
+  aliceUsername = "alice";
+  aliceUserId = "2";
+  alicePassword = "R5twyCgU0uXC71wT9BBTCqLs6HFZ7h3L";
+  aliceProjectId = "1";
+  aliceProjectName = "test-alice";
+
+  bobUsername = "bob";
+  bobUserId = "3";
+  bobPassword = "XwkkBbl2SiIwabQzgcoaTbhsotijEEtF";
+  bobProjectId = "2";
+in {
+  name = "gitlab";
+  meta.maintainers = with lib.maintainers; [ globin yayayayaka ];
+
+  nodes = {
+    gitlab = { ... }: {
+      imports = [ common/user-account.nix ];
+
+      virtualisation.memorySize = if pkgs.stdenv.is64bit then 4096 else 2047;
+      virtualisation.cores = 4;
+      virtualisation.useNixStoreImage = true;
+      virtualisation.writableStore = false;
+
+      systemd.services.gitlab.serviceConfig.Restart = lib.mkForce "no";
+      systemd.services.gitlab-workhorse.serviceConfig.Restart = lib.mkForce "no";
+      systemd.services.gitaly.serviceConfig.Restart = lib.mkForce "no";
+      systemd.services.gitlab-sidekiq.serviceConfig.Restart = lib.mkForce "no";
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts = {
+          localhost = {
+            locations."/".proxyPass = "http://unix:/run/gitlab/gitlab-workhorse.socket";
+          };
+        };
+      };
+
+      services.openssh.enable = true;
+
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+      };
+
+      systemd.services.gitlab-backup.environment.BACKUP = "dump";
+
+      services.gitlab = {
+        enable = true;
+        databasePasswordFile = pkgs.writeText "dbPassword" "xo0daiF4";
+        initialRootPasswordFile = pkgs.writeText "rootPassword" initialRootPassword;
+        smtp.enable = true;
+        pages = {
+          enable = true;
+          settings.pages-domain = "localhost";
+        };
+        extraConfig = {
+          incoming_email = {
+            enabled = true;
+            mailbox = "inbox";
+            address = "alice@localhost";
+            user = "alice";
+            password = "foobar";
+            host = "localhost";
+            port = 143;
+          };
+        };
+        secrets = {
+          secretFile = pkgs.writeText "secret" "Aig5zaic";
+          otpFile = pkgs.writeText "otpsecret" "Riew9mue";
+          dbFile = pkgs.writeText "dbsecret" "we2quaeZ";
+          jwsFile = pkgs.runCommand "oidcKeyBase" {} "${pkgs.openssl}/bin/openssl genrsa 2048 > $out";
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      auth = pkgs.writeText "auth.json" (builtins.toJSON {
+        grant_type = "password";
+        username = "root";
+        password = initialRootPassword;
+      });
+
+      createUserAlice = pkgs.writeText "create-user-alice.json" (builtins.toJSON rec {
+        username = aliceUsername;
+        name = username;
+        email = "alice@localhost";
+        password = alicePassword;
+        skip_confirmation = true;
+      });
+
+      createUserBob = pkgs.writeText "create-user-bob.json" (builtins.toJSON rec {
+        username = bobUsername;
+        name = username;
+        email = "bob@localhost";
+        password = bobPassword;
+        skip_confirmation = true;
+      });
+
+      aliceAuth = pkgs.writeText "alice-auth.json" (builtins.toJSON {
+        grant_type = "password";
+        username = aliceUsername;
+        password = alicePassword;
+      });
+
+      bobAuth = pkgs.writeText "bob-auth.json" (builtins.toJSON {
+        grant_type = "password";
+        username = bobUsername;
+        password = bobPassword;
+      });
+
+      aliceAddSSHKey = pkgs.writeText "alice-add-ssh-key.json" (builtins.toJSON {
+        id = aliceUserId;
+        title = "snakeoil@nixos";
+        key = snakeOilPublicKey;
+      });
+
+      createProjectAlice = pkgs.writeText "create-project-alice.json" (builtins.toJSON {
+        name = aliceProjectName;
+        visibility = "public";
+      });
+
+      putFile = pkgs.writeText "put-file.json" (builtins.toJSON {
+        branch = "master";
+        author_email = "author@example.com";
+        author_name = "Firstname Lastname";
+        content = "some content";
+        commit_message = "create a new file";
+      });
+
+      mergeRequest = pkgs.writeText "merge-request.json" (builtins.toJSON {
+        id = bobProjectId;
+        target_project_id = aliceProjectId;
+        source_branch = "master";
+        target_branch = "master";
+        title = "Add some other file";
+      });
+
+      newIssue = pkgs.writeText "new-issue.json" (builtins.toJSON {
+        title = "useful issue title";
+      });
+
+      closeIssue = pkgs.writeText "close-issue.json" (builtins.toJSON {
+        issue_iid = 1;
+        state_event = "close";
+      });
+
+      # Wait for all GitLab services to be fully started.
+      waitForServices = ''
+        gitlab.wait_for_unit("gitaly.service")
+        gitlab.wait_for_unit("gitlab-workhorse.service")
+        gitlab.wait_for_unit("gitlab-mailroom.service")
+        gitlab.wait_for_unit("gitlab.service")
+        gitlab.wait_for_unit("gitlab-pages.service")
+        gitlab.wait_for_unit("gitlab-sidekiq.service")
+        gitlab.wait_for_file("${nodes.gitlab.services.gitlab.statePath}/tmp/sockets/gitlab.socket")
+        gitlab.wait_until_succeeds("curl -sSf http://gitlab/users/sign_in")
+      '';
+
+      # The actual test of GitLab. Only push data to GitLab if
+      # `doSetup` is is true.
+      test = doSetup: ''
+        GIT_SSH_COMMAND = "ssh -o StrictHostKeyChecking=accept-new -o UserKnownHostsFile=/dev/null"
+
+        gitlab.succeed(
+            "curl -isSf http://gitlab | grep -i location | grep http://gitlab/users/sign_in"
+        )
+        gitlab.succeed(
+            "${pkgs.sudo}/bin/sudo -u gitlab -H gitlab-rake gitlab:check 1>&2"
+        )
+        gitlab.succeed(
+            "echo \"Authorization: Bearer $(curl -X POST -H 'Content-Type: application/json' -d @${auth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers"
+        )
+      '' + lib.optionalString doSetup ''
+        with subtest("Create user Alice"):
+            gitlab.succeed(
+                """[ "$(curl -o /dev/null -w '%{http_code}' -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${createUserAlice} http://gitlab/api/v4/users)" = "201" ]"""
+            )
+            gitlab.succeed(
+                "echo \"Authorization: Bearer $(curl -X POST -H 'Content-Type: application/json' -d @${aliceAuth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers-alice"
+            )
+
+        with subtest("Create user Bob"):
+            gitlab.succeed(
+                """ [ "$(curl -o /dev/null -w '%{http_code}' -X POST -H 'Content-Type: application/json' -H @/tmp/headers -d @${createUserBob} http://gitlab/api/v4/users)" = "201" ]"""
+            )
+            gitlab.succeed(
+                "echo \"Authorization: Bearer $(curl -X POST -H 'Content-Type: application/json' -d @${bobAuth} http://gitlab/oauth/token | ${pkgs.jq}/bin/jq -r '.access_token')\" >/tmp/headers-bob"
+            )
+
+        with subtest("Setup Git and SSH for Alice"):
+            gitlab.succeed("git config --global user.name Alice")
+            gitlab.succeed("git config --global user.email alice@nixos.invalid")
+            gitlab.succeed("mkdir -m 700 /root/.ssh")
+            gitlab.succeed("cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa")
+            gitlab.succeed("chmod 600 /root/.ssh/id_ecdsa")
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-alice -d @${aliceAddSSHKey} \
+                    http://gitlab/api/v4/user/keys)" = "201" ]
+                """
+            )
+
+        with subtest("Create a new repository"):
+            # Alice creates a new repository
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-alice \
+                    -d @${createProjectAlice} \
+                    http://gitlab/api/v4/projects)" = "201" ]
+                """
+            )
+
+            # Alice commits an initial commit
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-alice \
+                    -d @${putFile} \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/repository/files/some-file.txt)" = "201" ]"""
+            )
+
+        with subtest("git clone over HTTP"):
+            gitlab.succeed(
+                """git clone http://gitlab/alice/${aliceProjectName}.git clone-via-http""",
+                timeout=15
+            )
+
+        with subtest("Push a commit via SSH"):
+            gitlab.succeed(
+                f"""GIT_SSH_COMMAND="{GIT_SSH_COMMAND}" git clone gitlab@gitlab:alice/${aliceProjectName}.git""",
+                timeout=15
+            )
+            gitlab.succeed(
+                """echo "a commit sent over ssh" > ${aliceProjectName}/ssh.txt"""
+            )
+            gitlab.succeed(
+                """
+                cd ${aliceProjectName} || exit 1
+                git add .
+                """
+            )
+            gitlab.succeed(
+                """
+                cd ${aliceProjectName} || exit 1
+                git commit -m "Add a commit to be sent over ssh"
+                """
+            )
+            gitlab.succeed(
+                f"""
+                cd ${aliceProjectName} || exit 1
+                GIT_SSH_COMMAND="{GIT_SSH_COMMAND}" git push --set-upstream origin master
+                """,
+                timeout=15
+            )
+
+        with subtest("Fork a project"):
+            # Bob forks Alice's project
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-bob \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/fork)" = "201" ]
+                """
+            )
+
+            # Bob creates a commit
+            gitlab.wait_until_succeeds(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-bob \
+                    -d @${putFile} \
+                    http://gitlab/api/v4/projects/${bobProjectId}/repository/files/some-other-file.txt)" = "201" ]
+                """
+            )
+
+        with subtest("Create a Merge Request"):
+            # Bob opens a merge request against Alice's repository
+            gitlab.wait_until_succeeds(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-bob \
+                    -d @${mergeRequest} \
+                    http://gitlab/api/v4/projects/${bobProjectId}/merge_requests)" = "201" ]
+                """
+            )
+
+            # Alice merges the MR
+            gitlab.wait_until_succeeds(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X PUT \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-alice \
+                    -d @${mergeRequest} \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/merge_requests/1/merge)" = "200" ]
+                """
+            )
+
+        with subtest("Create an Issue"):
+            # Bob opens an issue on Alice's repository
+            gitlab.succeed(
+                """[ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X POST \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-bob \
+                    -d @${newIssue} \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/issues)" = "201" ]
+                """
+            )
+
+            # Alice closes the issue
+            gitlab.wait_until_succeeds(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -X PUT \
+                    -H 'Content-Type: application/json' \
+                    -H @/tmp/headers-alice -d @${closeIssue} http://gitlab/api/v4/projects/${aliceProjectId}/issues/1)" = "200" ]
+                """
+            )
+      '' + ''
+        with subtest("Download archive.tar.gz"):
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -H @/tmp/headers-alice \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/repository/archive.tar.gz)" = "200" ]
+                """
+            )
+            gitlab.succeed(
+                """
+                curl \
+                    -H @/tmp/headers-alice \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/repository/archive.tar.gz > /tmp/archive.tar.gz
+                """
+            )
+            gitlab.succeed("test -s /tmp/archive.tar.gz")
+
+        with subtest("Download archive.tar.bz2"):
+            gitlab.succeed(
+                """
+                [ "$(curl \
+                    -o /dev/null \
+                    -w '%{http_code}' \
+                    -H @/tmp/headers-alice \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/repository/archive.tar.bz2)" = "200" ]
+                """
+            )
+            gitlab.succeed(
+                """
+                curl \
+                    -H @/tmp/headers-alice \
+                    http://gitlab/api/v4/projects/${aliceProjectId}/repository/archive.tar.bz2 > /tmp/archive.tar.bz2
+                """
+            )
+            gitlab.succeed("test -s /tmp/archive.tar.bz2")
+      '';
+
+  in ''
+      gitlab.start()
+    ''
+    + waitForServices
+    + test true
+    + ''
+      gitlab.systemctl("start gitlab-backup.service")
+      gitlab.wait_for_unit("gitlab-backup.service")
+      gitlab.wait_for_file("${nodes.gitlab.services.gitlab.statePath}/backup/dump_gitlab_backup.tar")
+      gitlab.systemctl("stop postgresql.service gitlab.target")
+      gitlab.succeed(
+          "find ${nodes.gitlab.services.gitlab.statePath} -mindepth 1 -maxdepth 1 -not -name backup -execdir rm -r {} +"
+      )
+      gitlab.succeed("systemd-tmpfiles --create")
+      gitlab.succeed("rm -rf ${nodes.gitlab.services.postgresql.dataDir}")
+      gitlab.systemctl("start gitlab-config.service gitaly.service gitlab-postgresql.service")
+      gitlab.wait_for_file("${nodes.gitlab.services.gitlab.statePath}/tmp/sockets/gitaly.socket")
+      gitlab.succeed(
+          "sudo -u gitlab -H gitlab-rake gitlab:backup:restore RAILS_ENV=production BACKUP=dump force=yes"
+      )
+      gitlab.systemctl("start gitlab.target")
+    ''
+    + waitForServices
+    + test false;
+}
diff --git a/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix b/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix
new file mode 100644
index 000000000000..abf1db37003a
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitolite-fcgiwrap.nix
@@ -0,0 +1,93 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+
+    let
+      user = "gitolite-admin";
+      password = "some_password";
+
+      # not used but needed to setup gitolite
+      adminPublicKey = ''
+        ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client
+      '';
+    in
+      {
+        name = "gitolite-fcgiwrap";
+
+        meta = with pkgs.lib.maintainers; {
+          maintainers = [ bbigras ];
+        };
+
+        nodes = {
+
+          server =
+            { config, ... }:
+              {
+                networking.firewall.allowedTCPPorts = [ 80 ];
+
+                services.fcgiwrap.enable = true;
+                services.gitolite = {
+                  enable = true;
+                  adminPubkey = adminPublicKey;
+                };
+
+                services.nginx = {
+                  enable = true;
+                  recommendedProxySettings = true;
+                  virtualHosts."server".locations."/git".extraConfig = ''
+                    # turn off gzip as git objects are already well compressed
+                    gzip off;
+
+                    # use file based basic authentication
+                    auth_basic "Git Repository Authentication";
+                    auth_basic_user_file /etc/gitolite/htpasswd;
+
+                    # common FastCGI parameters are required
+                    include ${config.services.nginx.package}/conf/fastcgi_params;
+
+                    # strip the CGI program prefix
+                    fastcgi_split_path_info ^(/git)(.*)$;
+                    fastcgi_param PATH_INFO $fastcgi_path_info;
+
+                    # pass authenticated user login(mandatory) to Gitolite
+                    fastcgi_param REMOTE_USER $remote_user;
+
+                    # pass git repository root directory and hosting user directory
+                    # these env variables can be set in a wrapper script
+                    fastcgi_param GIT_HTTP_EXPORT_ALL "";
+                    fastcgi_param GIT_PROJECT_ROOT /var/lib/gitolite/repositories;
+                    fastcgi_param GITOLITE_HTTP_HOME /var/lib/gitolite;
+                    fastcgi_param SCRIPT_FILENAME ${pkgs.gitolite}/bin/gitolite-shell;
+
+                    # use Unix domain socket or inet socket
+                    fastcgi_pass unix:/run/fcgiwrap.sock;
+                  '';
+                };
+
+                # WARNING: DON'T DO THIS IN PRODUCTION!
+                # This puts unhashed secrets directly into the Nix store for ease of testing.
+                environment.etc."gitolite/htpasswd".source = pkgs.runCommand "htpasswd" {} ''
+                  ${pkgs.apacheHttpd}/bin/htpasswd -bc "$out" ${user} ${password}
+                '';
+              };
+
+          client =
+            { pkgs, ... }:
+              {
+                environment.systemPackages = [ pkgs.git ];
+              };
+        };
+
+        testScript = ''
+          start_all()
+
+          server.wait_for_unit("gitolite-init.service")
+          server.wait_for_unit("nginx.service")
+          server.wait_for_file("/run/fcgiwrap.sock")
+
+          client.wait_for_unit("multi-user.target")
+          client.succeed(
+              "git clone http://${user}:${password}@server/git/gitolite-admin.git"
+          )
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/gitolite.nix b/nixpkgs/nixos/tests/gitolite.nix
new file mode 100644
index 000000000000..9b3af59e4fbd
--- /dev/null
+++ b/nixpkgs/nixos/tests/gitolite.nix
@@ -0,0 +1,138 @@
+import ./make-test-python.nix ({ pkgs, ...}:
+
+let
+  adminPrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3gAAAJBJiYxDSYmM
+    QwAAAAtzc2gtZWQyNTUxOQAAACDu7qxYQAPdAU6RrhB3llk2N1v4PTwcVzcX1oX265uC3g
+    AAAEDE1W6vMwSEUcF1r7Hyypm/+sCOoDmKZgPxi3WOa1mD2u7urFhAA90BTpGuEHeWWTY3
+    W/g9PBxXNxfWhfbrm4LeAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  adminPublicKey = ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIO7urFhAA90BTpGuEHeWWTY3W/g9PBxXNxfWhfbrm4Le root@client
+  '';
+
+  alicePrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQAAAJAwVQ5VMFUO
+    VQAAAAtzc2gtZWQyNTUxOQAAACBbeWvHh/AWGWI6EIc1xlSihyXtacNQ9KeztlW/VUy8wQ
+    AAAEB7lbfkkdkJoE+4TKHPdPQWBKLSx+J54Eg8DaTr+3KoSlt5a8eH8BYZYjoQhzXGVKKH
+    Je1pw1D0p7O2Vb9VTLzBAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  alicePublicKey = pkgs.writeText "id_ed25519.pub" ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIFt5a8eH8BYZYjoQhzXGVKKHJe1pw1D0p7O2Vb9VTLzB alice@client
+  '';
+
+  bobPrivateKey = pkgs.writeText "id_ed25519" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMAAAAJDQBmNV0AZj
+    VQAAAAtzc2gtZWQyNTUxOQAAACCWTaJ1D9Xjxy6759FvQ9oXTes1lmWBciXPkEeqTikBMA
+    AAAEDM1IYYFUwk/IVxauha9kuR6bbRtT3gZ6ZA0GLb9txb/pZNonUP1ePHLrvn0W9D2hdN
+    6zWWZYFyJc+QR6pOKQEwAAAACGJmb0BtaW5pAQIDBAU=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  bobPublicKey = pkgs.writeText "id_ed25519.pub" ''
+    ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJZNonUP1ePHLrvn0W9D2hdN6zWWZYFyJc+QR6pOKQEw bob@client
+  '';
+
+  gitoliteAdminConfSnippet = pkgs.writeText "gitolite-admin-conf-snippet" ''
+    repo alice-project
+        RW+     =   alice
+  '';
+in
+{
+  name = "gitolite";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bjornfor ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+      {
+        services.gitolite = {
+          enable = true;
+          adminPubkey = adminPublicKey;
+        };
+        services.openssh.enable = true;
+      };
+
+    client =
+      { pkgs, ... }:
+      {
+        environment.systemPackages = [ pkgs.git ];
+        programs.ssh.extraConfig = ''
+          Host *
+            UserKnownHostsFile /dev/null
+            StrictHostKeyChecking no
+            # there's nobody around that can input password
+            PreferredAuthentications publickey
+        '';
+        users.users.alice = { isNormalUser = true; };
+        users.users.bob = { isNormalUser = true; };
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("can setup ssh keys on system"):
+        client.succeed(
+            "mkdir -p ~root/.ssh",
+            "cp ${adminPrivateKey} ~root/.ssh/id_ed25519",
+            "chmod 600 ~root/.ssh/id_ed25519",
+        )
+        client.succeed(
+            "sudo -u alice mkdir -p ~alice/.ssh",
+            "sudo -u alice cp ${alicePrivateKey} ~alice/.ssh/id_ed25519",
+            "sudo -u alice chmod 600 ~alice/.ssh/id_ed25519",
+        )
+        client.succeed(
+            "sudo -u bob mkdir -p ~bob/.ssh",
+            "sudo -u bob cp ${bobPrivateKey} ~bob/.ssh/id_ed25519",
+            "sudo -u bob chmod 600 ~bob/.ssh/id_ed25519",
+        )
+
+    with subtest("gitolite server starts"):
+        server.wait_for_unit("gitolite-init.service")
+        server.wait_for_unit("sshd.service")
+        client.succeed("ssh -n gitolite@server info")
+
+    with subtest("admin can clone and configure gitolite-admin.git"):
+        client.succeed(
+            "git clone gitolite@server:gitolite-admin.git",
+            "git config --global user.name 'System Administrator'",
+            "git config --global user.email root\@domain.example",
+            "cp ${alicePublicKey} gitolite-admin/keydir/alice.pub",
+            "cp ${bobPublicKey} gitolite-admin/keydir/bob.pub",
+            "(cd gitolite-admin && git add . && git commit -m 'Add keys for alice, bob' && git push)",
+            "cat ${gitoliteAdminConfSnippet} >> gitolite-admin/conf/gitolite.conf",
+            "(cd gitolite-admin && git add . && git commit -m 'Add repo for alice' && git push)",
+        )
+
+    with subtest("non-admins cannot clone gitolite-admin.git"):
+        client.fail("sudo -i -u alice git clone gitolite@server:gitolite-admin.git")
+        client.fail("sudo -i -u bob git clone gitolite@server:gitolite-admin.git")
+
+    with subtest("non-admins can clone testing.git"):
+        client.succeed("sudo -i -u alice git clone gitolite@server:testing.git")
+        client.succeed("sudo -i -u bob git clone gitolite@server:testing.git")
+
+    with subtest("alice can clone alice-project.git"):
+        client.succeed("sudo -i -u alice git clone gitolite@server:alice-project.git")
+
+    with subtest("bob cannot clone alice-project.git"):
+        client.fail("sudo -i -u bob git clone gitolite@server:alice-project.git")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/glusterfs.nix b/nixpkgs/nixos/tests/glusterfs.nix
new file mode 100644
index 000000000000..ef09264a0216
--- /dev/null
+++ b/nixpkgs/nixos/tests/glusterfs.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+
+let
+  client = { pkgs, ... } : {
+    environment.systemPackages = [ pkgs.glusterfs ];
+    virtualisation.fileSystems =
+      { "/gluster" =
+          { device = "server1:/gv0";
+            fsType = "glusterfs";
+          };
+      };
+  };
+
+  server = { pkgs, ... } : {
+    networking.firewall.enable = false;
+    services.glusterfs.enable = true;
+
+    # create a mount point for the volume
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 1024 ];
+
+    virtualisation.fileSystems =
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
+  };
+in {
+  name = "glusterfs";
+
+  nodes = {
+    server1 = server;
+    server2 = server;
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    server1.wait_for_unit("glusterd.service")
+    server2.wait_for_unit("glusterd.service")
+
+    server1.wait_until_succeeds("gluster peer status")
+    server2.wait_until_succeeds("gluster peer status")
+
+    # establish initial contact
+    server1.succeed("gluster peer probe server2")
+    server1.succeed("gluster peer probe server1")
+
+    server1.succeed("gluster peer status | grep Connected")
+
+    # create volumes
+    server1.succeed("mkdir -p /data/vg0")
+    server2.succeed("mkdir -p /data/vg0")
+    server1.succeed("gluster volume create gv0 server1:/data/vg0 server2:/data/vg0")
+    server1.succeed("gluster volume start gv0")
+
+    # test clients
+    client1.wait_for_unit("gluster.mount")
+    client2.wait_for_unit("gluster.mount")
+
+    client1.succeed("echo test > /gluster/file1")
+    client2.succeed("grep test /gluster/file1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gnome-flashback.nix b/nixpkgs/nixos/tests/gnome-flashback.nix
new file mode 100644
index 000000000000..f486dabc5c40
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnome-flashback.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gnome-flashback";
+  meta.maintainers = lib.teams.gnome.members ++ [ lib.maintainers.chpatrick ];
+
+  nodes.machine = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+    in
+
+    { imports = [ ./common/user-account.nix ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager = {
+        gdm.enable = true;
+        gdm.debug = true;
+        autoLogin = {
+          enable = true;
+          user = user.name;
+        };
+      };
+
+      services.xserver.desktopManager.gnome.enable = true;
+      services.xserver.desktopManager.gnome.debug = true;
+      services.xserver.desktopManager.gnome.flashback.enableMetacity = true;
+      services.xserver.displayManager.defaultSession = "gnome-flashback-metacity";
+    };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    uid = toString user.uid;
+    xauthority = "/run/user/${uid}/gdm/Xauthority";
+  in ''
+      with subtest("Login to GNOME Flashback with GDM"):
+          machine.wait_for_x()
+          machine.wait_until_succeeds('journalctl -t gnome-session-binary --grep "Entering running state"')
+          # Wait for alice to be logged in"
+          machine.wait_for_unit("default.target", "${user.name}")
+          machine.wait_for_file("${xauthority}")
+          machine.succeed("xauth merge ${xauthority}")
+          # Check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
+
+      with subtest("Wait for Metacity"):
+          machine.wait_until_succeeds("pgrep metacity")
+
+      with subtest("Regression test for #233920"):
+          machine.wait_until_succeeds("pgrep -fa gnome-flashback-media-keys")
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gnome-xorg.nix b/nixpkgs/nixos/tests/gnome-xorg.nix
new file mode 100644
index 000000000000..7762fff5c3a2
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnome-xorg.nix
@@ -0,0 +1,99 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gnome-xorg";
+  meta = {
+    maintainers = lib.teams.gnome.members;
+  };
+
+  nodes.machine = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in
+
+    { imports = [ ./common/user-account.nix ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager = {
+        gdm.enable = true;
+        gdm.debug = true;
+        autoLogin = {
+          enable = true;
+          user = user.name;
+        };
+      };
+
+      services.xserver.desktopManager.gnome.enable = true;
+      services.xserver.desktopManager.gnome.debug = true;
+      services.xserver.displayManager.defaultSession = "gnome-xorg";
+
+      systemd.user.services = {
+        "org.gnome.Shell@x11" = {
+          serviceConfig = {
+            ExecStart = [
+              # Clear the list before overriding it.
+              ""
+              # Eval API is now internal so Shell needs to run in unsafe mode.
+              # TODO: improve test driver so that it supports openqa-like manipulation
+              # that would allow us to drop this mess.
+              "${pkgs.gnome.gnome-shell}/bin/gnome-shell --unsafe-mode"
+            ];
+          };
+        };
+      };
+
+    };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+    uid = toString user.uid;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+    xauthority = "/run/user/${uid}/gdm/Xauthority";
+    display = "DISPLAY=:0.0";
+    env = "${bus} XAUTHORITY=${xauthority} ${display}";
+    gdbus = "${env} gdbus";
+    su = command: "su - ${user.name} -c '${env} ${command}'";
+
+    # Call javascript in gnome shell, returns a tuple (success, output), where
+    # `success` is true if the dbus call was successful and output is what the
+    # javascript evaluates to.
+    eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
+
+    # False when startup is done
+    startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
+
+    # Start Console
+    launchConsole = su "${bus} gapplication launch org.gnome.Console";
+
+    # Hopefully Console's wm class
+    wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+  in ''
+      with subtest("Login to GNOME Xorg with GDM"):
+          machine.wait_for_x()
+          # Wait for alice to be logged in"
+          machine.wait_for_unit("default.target", "${user.name}")
+          machine.wait_for_file("${xauthority}")
+          machine.succeed("xauth merge ${xauthority}")
+          # Check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
+
+      with subtest("Wait for GNOME Shell"):
+          # correct output should be (true, 'false')
+          machine.wait_until_succeeds(
+              "${startingUp} | grep -q 'true,..false'"
+          )
+
+      with subtest("Open Console"):
+          # Close the Activities view so that Shell can correctly track the focused window.
+          machine.send_key("esc")
+
+          machine.succeed(
+              "${launchConsole}"
+          )
+          # correct output should be (true, '"kgx"')
+          # For some reason, this deviates from Wayland.
+          machine.wait_until_succeeds(
+              "${wmClass} | grep -q  'true,...kgx'"
+          )
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gnome.nix b/nixpkgs/nixos/tests/gnome.nix
new file mode 100644
index 000000000000..448a3350240c
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnome.nix
@@ -0,0 +1,93 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gnome";
+  meta.maintainers = lib.teams.gnome.members;
+
+  nodes.machine =
+    { ... }:
+
+    { imports = [ ./common/user-account.nix ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager = {
+        gdm.enable = true;
+        gdm.debug = true;
+        autoLogin = {
+          enable = true;
+          user = "alice";
+        };
+      };
+
+      services.xserver.desktopManager.gnome.enable = true;
+      services.xserver.desktopManager.gnome.debug = true;
+
+      systemd.user.services = {
+        "org.gnome.Shell@wayland" = {
+          serviceConfig = {
+            ExecStart = [
+              # Clear the list before overriding it.
+              ""
+              # Eval API is now internal so Shell needs to run in unsafe mode.
+              # TODO: improve test driver so that it supports openqa-like manipulation
+              # that would allow us to drop this mess.
+              "${pkgs.gnome.gnome-shell}/bin/gnome-shell --unsafe-mode"
+            ];
+          };
+        };
+      };
+
+    };
+
+  testScript = { nodes, ... }: let
+    # Keep line widths somewhat manageable
+    user = nodes.machine.config.users.users.alice;
+    uid = toString user.uid;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${uid}/bus";
+    gdbus = "${bus} gdbus";
+    su = command: "su - ${user.name} -c '${command}'";
+
+    # Call javascript in gnome shell, returns a tuple (success, output), where
+    # `success` is true if the dbus call was successful and output is what the
+    # javascript evaluates to.
+    eval = "call --session -d org.gnome.Shell -o /org/gnome/Shell -m org.gnome.Shell.Eval";
+
+    # False when startup is done
+    startingUp = su "${gdbus} ${eval} Main.layoutManager._startingUp";
+
+    # Start Console
+    launchConsole = su "${bus} gapplication launch org.gnome.Console";
+
+    # Hopefully Console's wm class
+    wmClass = su "${gdbus} ${eval} global.display.focus_window.wm_class";
+  in ''
+      with subtest("Login to GNOME with GDM"):
+          # wait for gdm to start
+          machine.wait_for_unit("display-manager.service")
+          # wait for the wayland server
+          machine.wait_for_file("/run/user/${uid}/wayland-0")
+          # wait for alice to be logged in
+          machine.wait_for_unit("default.target", "${user.name}")
+          # check that logging in has given the user ownership of devices
+          assert "alice" in machine.succeed("getfacl -p /dev/snd/timer")
+
+      with subtest("Wait for GNOME Shell"):
+          # correct output should be (true, 'false')
+          machine.wait_until_succeeds(
+              "${startingUp} | grep -q 'true,..false'"
+          )
+
+      with subtest("Open Console"):
+          # Close the Activities view so that Shell can correctly track the focused window.
+          machine.send_key("esc")
+
+          machine.succeed(
+              "${launchConsole}"
+          )
+          # correct output should be (true, '"org.gnome.Console"')
+          machine.wait_until_succeeds(
+              "${wmClass} | grep -q 'true,...org.gnome.Console'"
+          )
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gnupg.nix b/nixpkgs/nixos/tests/gnupg.nix
new file mode 100644
index 000000000000..65a9a93007fd
--- /dev/null
+++ b/nixpkgs/nixos/tests/gnupg.nix
@@ -0,0 +1,118 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+{
+  name = "gnupg";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  # server for testing SSH
+  nodes.server = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    users.users.alice.isNormalUser = true;
+    services.openssh.enable = true;
+  };
+
+  # machine for testing GnuPG
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    users.users.alice.isNormalUser = true;
+    services.getty.autologinUser = "alice";
+
+    environment.shellInit = ''
+      # preset a key passphrase in gpg-agent
+      preset_key() {
+        # find all keys
+        case "$1" in
+          ssh) grips=$(awk '/^[0-9A-F]/{print $1}' "''${GNUPGHOME:-$HOME/.gnupg}/sshcontrol") ;;
+          pgp) grips=$(gpg --with-keygrip --list-secret-keys | awk '/Keygrip/{print $3}') ;;
+        esac
+
+        # try to preset the passphrase for each key found
+        for grip in $grips; do
+          "$(gpgconf --list-dirs libexecdir)/gpg-preset-passphrase" -c -P "$2" "$grip"
+        done
+      }
+    '';
+
+    programs.gnupg.agent.enable = true;
+    programs.gnupg.agent.enableSSHSupport = true;
+  };
+
+  testScript =
+    ''
+      import shlex
+
+
+      def as_alice(command: str) -> str:
+          """
+          Wraps a command to run it as Alice in a login shell
+          """
+          quoted = shlex.quote(command)
+          return "su --login alice --command " + quoted
+
+
+      start_all()
+
+      with subtest("Wait for the autologin"):
+          machine.wait_until_tty_matches("1", "alice@machine")
+
+      with subtest("Can generate a PGP key"):
+          # Note: this needs a tty because of pinentry
+          machine.send_chars("gpg --gen-key\n")
+          machine.wait_until_tty_matches("1", "Real name:")
+          machine.send_chars("Alice\n")
+          machine.wait_until_tty_matches("1", "Email address:")
+          machine.send_chars("alice@machine\n")
+          machine.wait_until_tty_matches("1", "Change")
+          machine.send_chars("O\n")
+          machine.wait_until_tty_matches("1", "Please enter")
+          machine.send_chars("pgp_p4ssphrase\n")
+          machine.wait_until_tty_matches("1", "Please re-enter")
+          machine.send_chars("pgp_p4ssphrase\n")
+          machine.wait_until_tty_matches("1", "public and secret key created")
+
+      with subtest("Confirm the key is in the keyring"):
+          machine.wait_until_succeeds(as_alice("gpg --list-secret-keys | grep -q alice@machine"))
+
+      with subtest("Can generate and add an SSH key"):
+          machine.succeed(as_alice("ssh-keygen -t ed25519 -f alice -N ssh_p4ssphrase"))
+
+          # Note: apparently this must be run before using the OpenSSH agent
+          # socket for the first time in a tty. It's not needed for `ssh`
+          # because there's a hook that calls it automatically (only in NixOS).
+          machine.send_chars("gpg-connect-agent updatestartuptty /bye\n")
+
+          # Note: again, this needs a tty because of pinentry
+          machine.send_chars("ssh-add alice\n")
+          machine.wait_until_tty_matches("1", "Enter passphrase")
+          machine.send_chars("ssh_p4ssphrase\n")
+          machine.wait_until_tty_matches("1", "Please enter")
+          machine.send_chars("ssh_agent_p4ssphrase\n")
+          machine.wait_until_tty_matches("1", "Please re-enter")
+          machine.send_chars("ssh_agent_p4ssphrase\n")
+
+      with subtest("Confirm the SSH key has been registered"):
+          machine.wait_until_succeeds(as_alice("ssh-add -l | grep -q alice@machine"))
+
+      with subtest("Can preset the key passphrases in the agent"):
+          machine.succeed(as_alice("echo allow-preset-passphrase > .gnupg/gpg-agent.conf"))
+          machine.succeed(as_alice("pkill gpg-agent"))
+          machine.succeed(as_alice("preset_key pgp pgp_p4ssphrase"))
+          machine.succeed(as_alice("preset_key ssh ssh_agent_p4ssphrase"))
+
+      with subtest("Can encrypt and decrypt a message"):
+          machine.succeed(as_alice("echo Hello | gpg -e -r alice | gpg -d | grep -q Hello"))
+
+      with subtest("Can log into the server"):
+          # Install Alice's public key
+          public_key = machine.succeed(as_alice("cat alice.pub"))
+          server.succeed("mkdir /etc/ssh/authorized_keys.d")
+          server.succeed(f"printf '{public_key}' > /etc/ssh/authorized_keys.d/alice")
+
+          server.wait_for_open_port(22)
+          machine.succeed(as_alice("ssh -i alice -o StrictHostKeyChecking=no server exit"))
+    '';
+})
diff --git a/nixpkgs/nixos/tests/go-neb.nix b/nixpkgs/nixos/tests/go-neb.nix
new file mode 100644
index 000000000000..4bd03dcf3c6b
--- /dev/null
+++ b/nixpkgs/nixos/tests/go-neb.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "go-neb";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hexa maralorn ];
+  };
+
+  nodes = {
+    server = {
+      services.go-neb = {
+        enable = true;
+        baseUrl = "http://localhost";
+        secretFile = pkgs.writeText "secrets" "ACCESS_TOKEN=changeme";
+        config = {
+          clients = [ {
+            UserId = "@test:localhost";
+            AccessToken = "$ACCESS_TOKEN";
+            HomeServerUrl = "http://localhost";
+            Sync = false;
+            AutoJoinRooms = false;
+            DisplayName = "neverbeseen";
+          } ];
+          services = [ {
+            ID = "wikipedia_service";
+            Type = "wikipedia";
+            UserID = "@test:localhost";
+            Config = { };
+          } ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("go-neb.service")
+    server.wait_until_succeeds("curl -fL http://localhost:4050/services/hooks/d2lraXBlZGlhX3NlcnZpY2U")
+    server.succeed(
+        "journalctl -eu go-neb -o cat | grep -q service_id=wikipedia_service",
+        "grep -q changeme /var/run/go-neb/config.yaml",
+    )
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/gobgpd.nix b/nixpkgs/nixos/tests/gobgpd.nix
new file mode 100644
index 000000000000..775f65d1199f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gobgpd.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    ifAddr = node: iface: (pkgs.lib.head node.config.networking.interfaces.${iface}.ipv4.addresses).address;
+  in {
+    name = "gobgpd";
+
+    meta = with pkgs.lib.maintainers; { maintainers = [ higebu ]; };
+
+    nodes = {
+      node1 = { nodes, ... }: {
+        environment.systemPackages = [ pkgs.gobgp ];
+        networking.firewall.allowedTCPPorts = [ 179 ];
+        services.gobgpd = {
+          enable = true;
+          settings = {
+            global = {
+              config = {
+                as = 64512;
+                router-id = "192.168.255.1";
+              };
+            };
+            neighbors = [{
+              config = {
+                neighbor-address = ifAddr nodes.node2 "eth1";
+                peer-as = 64513;
+              };
+            }];
+          };
+        };
+      };
+      node2 = { nodes, ... }: {
+        environment.systemPackages = [ pkgs.gobgp ];
+        networking.firewall.allowedTCPPorts = [ 179 ];
+        services.gobgpd = {
+          enable = true;
+          settings = {
+            global = {
+              config = {
+                as = 64513;
+                router-id = "192.168.255.2";
+              };
+            };
+            neighbors = [{
+              config = {
+                neighbor-address = ifAddr nodes.node1 "eth1";
+                peer-as = 64512;
+              };
+            }];
+          };
+        };
+      };
+    };
+
+    testScript = { nodes, ... }: let
+      addr1 = ifAddr nodes.node1 "eth1";
+      addr2 = ifAddr nodes.node2 "eth1";
+    in
+      ''
+      start_all()
+
+      for node in node1, node2:
+          with subtest("should start gobgpd node"):
+              node.wait_for_unit("gobgpd.service")
+          with subtest("should open port 179"):
+              node.wait_for_open_port(179)
+
+      with subtest("should show neighbors by gobgp cli and BGP state should be ESTABLISHED"):
+          node1.wait_until_succeeds("gobgp neighbor ${addr2} | grep -q ESTABLISHED")
+          node2.wait_until_succeeds("gobgp neighbor ${addr1} | grep -q ESTABLISHED")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/gocd-agent.nix b/nixpkgs/nixos/tests/gocd-agent.nix
new file mode 100644
index 000000000000..9301a88ec05d
--- /dev/null
+++ b/nixpkgs/nixos/tests/gocd-agent.nix
@@ -0,0 +1,48 @@
+# verifies:
+#   1. GoCD agent starts
+#   2. GoCD agent responds
+#   3. GoCD agent is available on GoCD server using GoCD API
+#     3.1. https://api.go.cd/current/#get-all-agents
+
+let
+  serverUrl = "localhost:8153/go/api/agents";
+  header = "Accept: application/vnd.go.cd.v2+json";
+in
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "gocd-agent";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ grahamc swarren83 ];
+
+    # gocd agent needs to register with the autoregister key created on first server startup,
+    # but NixOS module doesn't seem to allow to pass during runtime currently
+    broken = true;
+  };
+
+  nodes = {
+    agent =
+      { ... }:
+      {
+        virtualisation.memorySize = 2046;
+        services.gocd-agent = {
+          enable = true;
+        };
+        services.gocd-server = {
+          enable = true;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    agent.wait_for_unit("gocd-server")
+    agent.wait_for_open_port(8153)
+    agent.wait_for_unit("gocd-agent")
+    agent.wait_until_succeeds(
+        "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].uuid"
+    )
+    agent.succeed(
+        "curl ${serverUrl} -H '${header}' | ${pkgs.jq}/bin/jq -e ._embedded.agents[0].agent_state | grep Idle"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gocd-server.nix b/nixpkgs/nixos/tests/gocd-server.nix
new file mode 100644
index 000000000000..aff651c5278f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gocd-server.nix
@@ -0,0 +1,28 @@
+# verifies:
+#   1. GoCD server starts
+#   2. GoCD server responds
+
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "gocd-server";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ swarren83 ];
+  };
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        virtualisation.memorySize = 2046;
+        services.gocd-server.enable = true;
+      };
+  };
+
+  testScript = ''
+    server.start()
+    server.wait_for_unit("gocd-server")
+    server.wait_for_open_port(8153)
+    server.wait_until_succeeds("curl -s -f localhost:8153/go")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gollum.nix b/nixpkgs/nixos/tests/gollum.nix
new file mode 100644
index 000000000000..44d373e35262
--- /dev/null
+++ b/nixpkgs/nixos/tests/gollum.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "gollum";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.gollum.enable = true;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    webserver.wait_for_unit("gollum")
+    webserver.wait_for_open_port(${toString nodes.webserver.services.gollum.port})
+  '';
+})
diff --git a/nixpkgs/nixos/tests/gonic.nix b/nixpkgs/nixos/tests/gonic.nix
new file mode 100644
index 000000000000..726d7da0970f
--- /dev/null
+++ b/nixpkgs/nixos/tests/gonic.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "gonic";
+
+  nodes.machine = { ... }: {
+    services.gonic = {
+      enable = true;
+      settings = {
+        music-path = [ "/tmp" ];
+        podcast-path = "/tmp";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("gonic")
+    machine.wait_for_open_port(4747)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/google-oslogin/default.nix b/nixpkgs/nixos/tests/google-oslogin/default.nix
new file mode 100644
index 000000000000..72c87d7153bd
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/default.nix
@@ -0,0 +1,74 @@
+import ../make-test-python.nix ({ pkgs, ... } :
+let
+  inherit (import ./../ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+    # don't check host keys or known hosts, use the snakeoil ssh key
+    ssh-config = builtins.toFile "ssh.conf" ''
+      UserKnownHostsFile=/dev/null
+      StrictHostKeyChecking=no
+      IdentityFile=~/.ssh/id_snakeoil
+    '';
+in {
+  name = "google-oslogin";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ adisbladis flokli ];
+  };
+
+  nodes = {
+    # the server provides both the the mocked google metadata server and the ssh server
+    server = (import ./server.nix pkgs);
+
+    client = { ... }: {};
+  };
+  testScript =  ''
+    MOCKUSER = "mockuser_nixos_org"
+    MOCKADMIN = "mockadmin_nixos_org"
+    start_all()
+
+    server.wait_for_unit("mock-google-metadata.service")
+    server.wait_for_open_port(80)
+
+    # mockserver should return a non-expired ssh key for both mockuser and mockadmin
+    server.succeed(
+        f'${pkgs.google-guest-oslogin}/bin/google_authorized_keys {MOCKUSER} | grep -q "${snakeOilPublicKey}"'
+    )
+    server.succeed(
+        f'${pkgs.google-guest-oslogin}/bin/google_authorized_keys {MOCKADMIN} | grep -q "${snakeOilPublicKey}"'
+    )
+
+    # install snakeoil ssh key on the client, and provision .ssh/config file
+    client.succeed("mkdir -p ~/.ssh")
+    client.succeed(
+        "cat ${snakeOilPrivateKey} > ~/.ssh/id_snakeoil"
+    )
+    client.succeed("chmod 600 ~/.ssh/id_snakeoil")
+    client.succeed("cp ${ssh-config} ~/.ssh/config")
+
+    client.wait_for_unit("network.target")
+    server.wait_for_unit("sshd.service")
+
+    # we should not be able to connect as non-existing user
+    client.fail("ssh ghost@server 'true'")
+
+    # we should be able to connect as mockuser
+    client.succeed(f"ssh {MOCKUSER}@server 'true'")
+    # but we shouldn't be able to sudo
+    client.fail(
+        f"ssh {MOCKUSER}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+    )
+
+    # we should also be able to log in as mockadmin
+    client.succeed(f"ssh {MOCKADMIN}@server 'true'")
+    # pam_oslogin_admin.so should now have generated a sudoers file
+    server.succeed(
+        f"find /run/google-sudoers.d | grep -q '/run/google-sudoers.d/{MOCKADMIN}'"
+    )
+
+    # and we should be able to sudo
+    client.succeed(
+        f"ssh {MOCKADMIN}@server '/run/wrappers/bin/sudo /run/current-system/sw/bin/id' | grep -q 'root'"
+    )
+  '';
+  })
+
diff --git a/nixpkgs/nixos/tests/google-oslogin/server.nix b/nixpkgs/nixos/tests/google-oslogin/server.nix
new file mode 100644
index 000000000000..3df41155c92d
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/server.nix
@@ -0,0 +1,27 @@
+{ pkgs, ... }:
+let
+  inherit (import ./../ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+in {
+  networking.firewall.allowedTCPPorts = [ 80 ];
+
+  systemd.services.mock-google-metadata = {
+    description = "Mock Google metadata service";
+    serviceConfig.Type = "simple";
+    serviceConfig.ExecStart = "${pkgs.python3}/bin/python ${./server.py}";
+    environment = {
+      SNAKEOIL_PUBLIC_KEY = snakeOilPublicKey;
+    };
+    wantedBy = [ "multi-user.target" ];
+    after = [ "network.target" ];
+  };
+
+  services.openssh.enable = true;
+  services.openssh.settings.KbdInteractiveAuthentication = false;
+  services.openssh.settings.PasswordAuthentication = false;
+
+  security.googleOsLogin.enable = true;
+
+  # Mock google service
+  networking.interfaces.lo.ipv4.addresses = [ { address = "169.254.169.254"; prefixLength = 32; } ];
+}
diff --git a/nixpkgs/nixos/tests/google-oslogin/server.py b/nixpkgs/nixos/tests/google-oslogin/server.py
new file mode 100755
index 000000000000..622cd86b2619
--- /dev/null
+++ b/nixpkgs/nixos/tests/google-oslogin/server.py
@@ -0,0 +1,145 @@
+#!/usr/bin/env python3
+import json
+import sys
+import time
+import os
+import hashlib
+import base64
+
+from http.server import BaseHTTPRequestHandler, HTTPServer
+from urllib.parse import urlparse, parse_qs
+from typing import Dict
+
+SNAKEOIL_PUBLIC_KEY = os.environ['SNAKEOIL_PUBLIC_KEY']
+MOCKUSER="mockuser_nixos_org"
+MOCKADMIN="mockadmin_nixos_org"
+
+
+def w(msg: bytes):
+    sys.stderr.write(f"{msg}\n")
+    sys.stderr.flush()
+
+
+def gen_fingerprint(pubkey: str):
+    decoded_key = base64.b64decode(pubkey.encode("ascii").split()[1])
+    return hashlib.sha256(decoded_key).hexdigest()
+
+
+def gen_email(username: str):
+    """username seems to be a 21 characters long number string, so mimic that in a reproducible way"""
+    return str(int(hashlib.sha256(username.encode()).hexdigest(), 16))[0:21]
+
+
+def gen_mockuser(username: str, uid: str, gid: str, home_directory: str, snakeoil_pubkey: str) -> Dict:
+    snakeoil_pubkey_fingerprint = gen_fingerprint(snakeoil_pubkey)
+    # seems to be a 21 characters long numberstring, so mimic that in a reproducible way
+    email = gen_email(username)
+    return {
+        "loginProfiles": [
+            {
+                "name": email,
+                "posixAccounts": [
+                    {
+                        "primary": True,
+                        "username": username,
+                        "uid": uid,
+                        "gid": gid,
+                        "homeDirectory": home_directory,
+                        "operatingSystemType": "LINUX"
+                    }
+                ],
+                "sshPublicKeys": {
+                    snakeoil_pubkey_fingerprint: {
+                        "key": snakeoil_pubkey,
+                        "expirationTimeUsec": str((time.time() + 600) * 1000000),  # 10 minutes in the future
+                        "fingerprint": snakeoil_pubkey_fingerprint
+                    }
+                }
+            }
+        ]
+    }
+
+
+class ReqHandler(BaseHTTPRequestHandler):
+
+    def _send_json_ok(self, data: dict):
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        out = json.dumps(data).encode()
+        w(out)
+        self.wfile.write(out)
+
+    def _send_json_success(self, success=True):
+        self.send_response(200)
+        self.send_header('Content-type', 'application/json')
+        self.end_headers()
+        out = json.dumps({"success": success}).encode()
+        w(out)
+        self.wfile.write(out)
+
+    def _send_404(self):
+        self.send_response(404)
+        self.end_headers()
+
+    def do_GET(self):
+        p = str(self.path)
+        pu = urlparse(p)
+        params = parse_qs(pu.query)
+
+        # users endpoint
+        if pu.path == "/computeMetadata/v1/oslogin/users":
+            # mockuser and mockadmin are allowed to login, both use the same snakeoil public key
+            if params.get('username') == [MOCKUSER] or params.get('uid') == ["1009719690"]:
+                username = MOCKUSER
+                uid = "1009719690"
+            elif params.get('username') == [MOCKADMIN] or params.get('uid') == ["1009719691"]:
+                username = MOCKADMIN
+                uid = "1009719691"
+            else:
+                self._send_404()
+                return
+
+            self._send_json_ok(gen_mockuser(username=username, uid=uid, gid=uid, home_directory=f"/home/{username}", snakeoil_pubkey=SNAKEOIL_PUBLIC_KEY))
+            return
+
+        # we need to provide something at the groups endpoint.
+        # the nss module does segfault if we don't.
+        elif pu.path == "/computeMetadata/v1/oslogin/groups":
+            self._send_json_ok({
+                "posixGroups": [
+                    {"name" : "demo", "gid" : 4294967295}
+                ],
+            })
+            return
+
+        # authorize endpoint
+        elif pu.path == "/computeMetadata/v1/oslogin/authorize":
+            # is user allowed to login?
+            if params.get("policy") == ["login"]:
+                # mockuser and mockadmin are allowed to login
+                if params.get('email') == [gen_email(MOCKUSER)] or params.get('email') == [gen_email(MOCKADMIN)]:
+                    self._send_json_success()
+                    return
+                self._send_json_success(False)
+                return
+            # is user allowed to become root?
+            elif params.get("policy") == ["adminLogin"]:
+                # only mockadmin is allowed to become admin
+                self._send_json_success((params['email'] == [gen_email(MOCKADMIN)]))
+                return
+            # send 404 for other policies
+            else:
+                self._send_404()
+                return
+        else:
+            sys.stderr.write(f"Unhandled path: {p}\n")
+            sys.stderr.flush()
+            self.send_response(404)
+            self.end_headers()
+            self.wfile.write(b'')
+
+
+if __name__ == '__main__':
+    s = HTTPServer(('0.0.0.0', 80), ReqHandler)
+    s.serve_forever()
diff --git a/nixpkgs/nixos/tests/goss.nix b/nixpkgs/nixos/tests/goss.nix
new file mode 100644
index 000000000000..6b772d19215e
--- /dev/null
+++ b/nixpkgs/nixos/tests/goss.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "goss";
+  meta.maintainers = [ lib.maintainers.anthonyroussel ];
+
+  nodes.machine = {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.goss = {
+      enable = true;
+
+      environment = {
+        GOSS_FMT = "json";
+      };
+
+      settings = {
+        addr."tcp://localhost:8080" = {
+          reachable = true;
+          local-address = "127.0.0.1";
+        };
+        command."check-goss-version" = {
+          exec = "${lib.getExe pkgs.goss} --version";
+          exit-status = 0;
+        };
+        dns.localhost.resolvable = true;
+        file."/nix" = {
+          filetype = "directory";
+          exists = true;
+        };
+        group.root.exists = true;
+        kernel-param."kernel.ostype".value = "Linux";
+        service.goss = {
+          enabled = true;
+          running = true;
+        };
+        user.root.exists = true;
+      };
+    };
+  };
+
+  testScript = ''
+    import json
+
+    machine.wait_for_unit("goss.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("returns health status"):
+      result = json.loads(machine.succeed("curl -sS http://localhost:8080/healthz"))
+
+      assert len(result["results"]) == 10, f".results should be an array of 10 items, was {result['results']!r}"
+      assert result["summary"]["failed-count"] == 0, f".summary.failed-count should be zero, was {result['summary']['failed-count']}"
+      assert result["summary"]["test-count"] == 10, f".summary.test-count should be 10, was {result['summary']['test-count']}"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/gotify-server.nix b/nixpkgs/nixos/tests/gotify-server.nix
new file mode 100644
index 000000000000..c8d7fa172a7b
--- /dev/null
+++ b/nixpkgs/nixos/tests/gotify-server.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "gotify-server";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+
+    services.gotify = {
+      enable = true;
+      port = 3000;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("gotify-server.service")
+    machine.wait_for_open_port(3000)
+
+    token = machine.succeed(
+        "curl --fail -sS -X POST localhost:3000/application -F name=nixos "
+        + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" '
+        + "| jq .token | xargs echo -n"
+    )
+
+    usertoken = machine.succeed(
+        "curl --fail -sS -X POST localhost:3000/client -F name=nixos "
+        + '-H "Authorization: Basic $(echo -ne "admin:admin" | base64 --wrap 0)" '
+        + "| jq .token | xargs echo -n"
+    )
+
+    machine.succeed(
+        f"curl --fail -sS -X POST 'localhost:3000/message?token={token}' -H 'Accept: application/json' "
+        + "-F title=Gotify -F message=Works"
+    )
+
+    title = machine.succeed(
+        f"curl --fail -sS 'localhost:3000/message?since=0&token={usertoken}' | jq '.messages|.[0]|.title' | xargs echo -n"
+    )
+
+    assert title == "Gotify"
+
+    # Ensure that the UI responds with a successful code and that the
+    # response is not empty
+    result = machine.succeed("curl -fsS localhost:3000")
+    assert result, "HTTP response from localhost:3000 must not be empty!"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grafana-agent.nix b/nixpkgs/nixos/tests/grafana-agent.nix
new file mode 100644
index 000000000000..a9f34d8cea31
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana-agent.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+  let
+    nodes = {
+      machine = {
+        services.grafana-agent = {
+          enable = true;
+        };
+      };
+    };
+  in
+  {
+    name = "grafana-agent";
+
+    meta = with lib.maintainers; {
+      maintainers = [ zimbatm ];
+    };
+
+    inherit nodes;
+
+    testScript = ''
+      start_all()
+
+      with subtest("Grafana-agent is running"):
+          machine.wait_for_unit("grafana-agent.service")
+          machine.wait_for_open_port(12345)
+          machine.succeed(
+              "curl -sSfN http://127.0.0.1:12345/-/healthy"
+          )
+          machine.shutdown()
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/grafana/basic.nix b/nixpkgs/nixos/tests/grafana/basic.nix
new file mode 100644
index 000000000000..dd389bc8a3d1
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/basic.nix
@@ -0,0 +1,142 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  inherit (lib) mkMerge nameValuePair maintainers;
+
+  baseGrafanaConf = {
+    services.grafana = {
+      enable = true;
+      settings = {
+        analytics.reporting_enabled = false;
+
+        server = {
+          http_addr = "localhost";
+          domain = "localhost";
+        };
+
+        security = {
+          admin_user = "testadmin";
+          admin_password = "snakeoilpwd";
+        };
+      };
+    };
+  };
+
+  extraNodeConfs = {
+    sqlite = {};
+
+    socket = { config, ... }: {
+      services.grafana.settings.server = {
+        protocol = "socket";
+        socket = "/run/grafana/sock";
+        socket_gid = config.users.groups.nginx.gid;
+      };
+
+      users.users.grafana.extraGroups = [ "nginx" ];
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts."_".locations."/".proxyPass = "http://unix:/run/grafana/sock";
+      };
+    };
+
+    declarativePlugins = {
+      services.grafana.declarativePlugins = [ pkgs.grafanaPlugins.grafana-clock-panel ];
+    };
+
+    postgresql = {
+      services.grafana.settings.database = {
+        host = "127.0.0.1:5432";
+        user = "grafana";
+      };
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "grafana" ];
+        ensureUsers = [{
+          name = "grafana";
+          ensureDBOwnership = true;
+        }];
+      };
+      systemd.services.grafana.after = [ "postgresql.service" ];
+    };
+
+    mysql = {
+      services.grafana.settings.database.user = "grafana";
+      services.mysql = {
+        enable = true;
+        ensureDatabases = [ "grafana" ];
+        ensureUsers = [{
+          name = "grafana";
+          ensurePermissions."grafana.*" = "ALL PRIVILEGES";
+        }];
+        package = pkgs.mariadb;
+      };
+      systemd.services.grafana.after = [ "mysql.service" ];
+    };
+  };
+
+  nodes = builtins.mapAttrs (_: val: mkMerge [ val baseGrafanaConf ]) extraNodeConfs;
+in {
+  name = "grafana-basic";
+
+  meta = with maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  inherit nodes;
+
+  testScript = ''
+    start_all()
+
+    with subtest("Declarative plugins installed"):
+        declarativePlugins.wait_for_unit("grafana.service")
+        declarativePlugins.wait_for_open_port(3000)
+        declarativePlugins.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/plugins | grep grafana-clock-panel"
+        )
+        declarativePlugins.shutdown()
+
+    with subtest("Successful API query as admin user with sqlite db"):
+        sqlite.wait_for_unit("grafana.service")
+        sqlite.wait_for_open_port(3000)
+        print(sqlite.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users -i"
+        ))
+        sqlite.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+        )
+        sqlite.shutdown()
+
+    with subtest("Successful API query as admin user with sqlite db listening on socket"):
+        socket.wait_for_unit("grafana.service")
+        socket.wait_for_open_port(80)
+        print(socket.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users -i"
+        ))
+        socket.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1/api/org/users | grep admin\@localhost"
+        )
+        socket.shutdown()
+
+    with subtest("Successful API query as admin user with postgresql db"):
+        postgresql.wait_for_unit("grafana.service")
+        postgresql.wait_for_unit("postgresql.service")
+        postgresql.wait_for_open_port(3000)
+        postgresql.wait_for_open_port(5432)
+        postgresql.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+        )
+        postgresql.shutdown()
+
+    with subtest("Successful API query as admin user with mysql db"):
+        mysql.wait_for_unit("grafana.service")
+        mysql.wait_for_unit("mysql.service")
+        mysql.wait_for_open_port(3000)
+        mysql.wait_for_open_port(3306)
+        mysql.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+        )
+        mysql.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grafana/default.nix b/nixpkgs/nixos/tests/grafana/default.nix
new file mode 100644
index 000000000000..9c2622571800
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+{
+  basic = import ./basic.nix { inherit system pkgs; };
+  provision = import ./provision { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/grafana/provision/contact-points.yaml b/nixpkgs/nixos/tests/grafana/provision/contact-points.yaml
new file mode 100644
index 000000000000..2a5f14e75e2d
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/contact-points.yaml
@@ -0,0 +1,9 @@
+apiVersion: 1
+
+contactPoints:
+  - name: "Test Contact Point"
+    receivers:
+      - uid: "test_contact_point"
+        type: prometheus-alertmanager
+        settings:
+          url: http://localhost:9000
diff --git a/nixpkgs/nixos/tests/grafana/provision/dashboards.yaml b/nixpkgs/nixos/tests/grafana/provision/dashboards.yaml
new file mode 100644
index 000000000000..dc83fe6b892d
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/dashboards.yaml
@@ -0,0 +1,6 @@
+apiVersion: 1
+
+providers:
+  - name: 'default'
+    options:
+      path: /var/lib/grafana/dashboards
diff --git a/nixpkgs/nixos/tests/grafana/provision/datasources.yaml b/nixpkgs/nixos/tests/grafana/provision/datasources.yaml
new file mode 100644
index 000000000000..ccf9481db7f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/datasources.yaml
@@ -0,0 +1,7 @@
+apiVersion: 1
+
+datasources:
+  - name: 'Test Datasource'
+    type: 'testdata'
+    access: 'proxy'
+    uid: 'test_datasource'
diff --git a/nixpkgs/nixos/tests/grafana/provision/default.nix b/nixpkgs/nixos/tests/grafana/provision/default.nix
new file mode 100644
index 000000000000..d33d16ce1209
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/default.nix
@@ -0,0 +1,256 @@
+import ../../make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  inherit (lib) mkMerge nameValuePair maintainers;
+
+  baseGrafanaConf = {
+    services.grafana = {
+      enable = true;
+      provision.enable = true;
+      settings = {
+        analytics.reporting_enabled = false;
+
+        server = {
+          http_addr = "localhost";
+          domain = "localhost";
+        };
+
+        security = {
+          admin_user = "testadmin";
+          admin_password = "$__file{${pkgs.writeText "pwd" "snakeoilpwd"}}";
+        };
+      };
+    };
+
+    systemd.tmpfiles.rules =
+      let
+        dashboard = pkgs.writeText "test.json" (builtins.readFile ./test_dashboard.json);
+      in
+      [
+        "d /var/lib/grafana/dashboards 0700 grafana grafana -"
+        "C+ /var/lib/grafana/dashboards/test.json - - - - ${dashboard}"
+      ];
+  };
+
+  extraNodeConfs = {
+    provisionLegacyNotifiers = {
+      services.grafana.provision = {
+        datasources.settings = {
+          apiVersion = 1;
+          datasources = [{
+            name = "Test Datasource";
+            type = "testdata";
+            access = "proxy";
+            uid = "test_datasource";
+          }];
+        };
+        dashboards.settings = {
+          apiVersion = 1;
+          providers = [{
+            name = "default";
+            options.path = "/var/lib/grafana/dashboards";
+          }];
+        };
+        notifiers = [{
+          uid = "test_notifiers";
+          name = "Test Notifiers";
+          type = "email";
+          settings = {
+            singleEmail = true;
+            addresses = "test@test.com";
+          };
+        }];
+      };
+    };
+    provisionNix = {
+      services.grafana.provision = {
+        datasources.settings = {
+          apiVersion = 1;
+          datasources = [{
+            name = "Test Datasource";
+            type = "testdata";
+            access = "proxy";
+            uid = "test_datasource";
+          }];
+        };
+
+        dashboards.settings = {
+          apiVersion = 1;
+          providers = [{
+            name = "default";
+            options.path = "/var/lib/grafana/dashboards";
+          }];
+        };
+
+        alerting = {
+          rules.settings = {
+            groups = [{
+              name = "test_rule_group";
+              folder = "test_folder";
+              interval = "60s";
+              rules = [{
+                uid = "test_rule";
+                title = "Test Rule";
+                condition = "A";
+                data = [{
+                  refId = "A";
+                  datasourceUid = "-100";
+                  model = {
+                    conditions = [{
+                      evaluator = {
+                        params = [ 3 ];
+                        type = "git";
+                      };
+                      operator.type = "and";
+                      query.params = [ "A" ];
+                      reducer.type = "last";
+                      type = "query";
+                    }];
+                    datasource = {
+                      type = "__expr__";
+                      uid = "-100";
+                    };
+                    expression = "1==0";
+                    intervalMs = 1000;
+                    maxDataPoints = 43200;
+                    refId = "A";
+                    type = "math";
+                  };
+                }];
+                for = "60s";
+              }];
+            }];
+          };
+
+          contactPoints.settings = {
+            contactPoints = [{
+              name = "Test Contact Point";
+              receivers = [{
+                uid = "test_contact_point";
+                type = "prometheus-alertmanager";
+                settings.url = "http://localhost:9000";
+              }];
+            }];
+          };
+
+          policies.settings = {
+            policies = [{
+              receiver = "Test Contact Point";
+            }];
+          };
+
+          templates.settings = {
+            templates = [{
+              name = "Test Template";
+              template = "Test message";
+            }];
+          };
+
+          muteTimings.settings = {
+            muteTimes = [{
+              name = "Test Mute Timing";
+            }];
+          };
+        };
+      };
+    };
+
+    provisionYaml = {
+      services.grafana.provision = {
+        datasources.path = ./datasources.yaml;
+        dashboards.path = ./dashboards.yaml;
+        alerting = {
+          rules.path = ./rules.yaml;
+          contactPoints.path = ./contact-points.yaml;
+          policies.path = ./policies.yaml;
+          templates.path = ./templates.yaml;
+          muteTimings.path = ./mute-timings.yaml;
+        };
+      };
+    };
+
+    provisionYamlDirs = let
+      mkdir = p: pkgs.writeTextDir (baseNameOf p) (builtins.readFile p);
+    in {
+      services.grafana.provision = {
+        datasources.path = mkdir ./datasources.yaml;
+        dashboards.path = mkdir ./dashboards.yaml;
+        alerting = {
+          rules.path = mkdir ./rules.yaml;
+          contactPoints.path = mkdir ./contact-points.yaml;
+          policies.path = mkdir ./policies.yaml;
+          templates.path = mkdir ./templates.yaml;
+          muteTimings.path = mkdir ./mute-timings.yaml;
+        };
+      };
+    };
+  };
+
+  nodes = builtins.mapAttrs (_: val: mkMerge [ val baseGrafanaConf ]) extraNodeConfs;
+in {
+  name = "grafana-provision";
+
+  meta = with maintainers; {
+    maintainers = [ kfears willibutz ];
+  };
+
+  inherit nodes;
+
+  testScript = ''
+    start_all()
+
+    nodeNix = ("Nix (new format)", provisionNix)
+    nodeYaml = ("Nix (YAML)", provisionYaml)
+    nodeYamlDir = ("Nix (YAML in dirs)", provisionYamlDirs)
+
+    for description, machine in [nodeNix, nodeYaml, nodeYamlDir]:
+        with subtest(f"Should start provision node: {description}"):
+            machine.wait_for_unit("grafana.service")
+            machine.wait_for_open_port(3000)
+
+        with subtest(f"Successful datasource provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/datasources/uid/test_datasource | grep Test\ Datasource"
+            )
+
+        with subtest(f"Successful dashboard provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/dashboards/uid/test_dashboard | grep Test\ Dashboard"
+            )
+
+        with subtest(f"Successful rule provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/alert-rules/test_rule | grep Test\ Rule"
+            )
+
+        with subtest(f"Successful contact point provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/contact-points | grep Test\ Contact\ Point"
+            )
+
+        with subtest(f"Successful policy provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/policies | grep Test\ Contact\ Point"
+            )
+
+        with subtest(f"Successful template provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/templates | grep Test\ Template"
+            )
+
+        with subtest("Successful mute timings provision with {description}"):
+            machine.succeed(
+                "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/v1/provisioning/mute-timings | grep Test\ Mute\ Timing"
+            )
+
+    with subtest("Successful notifiers provision"):
+        provisionLegacyNotifiers.wait_for_unit("grafana.service")
+        provisionLegacyNotifiers.wait_for_open_port(3000)
+        print(provisionLegacyNotifiers.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers"
+        ))
+        provisionLegacyNotifiers.succeed(
+            "curl -sSfN -u testadmin:snakeoilpwd http://127.0.0.1:3000/api/alert-notifications/uid/test_notifiers | grep Test\ Notifiers"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grafana/provision/mute-timings.yaml b/nixpkgs/nixos/tests/grafana/provision/mute-timings.yaml
new file mode 100644
index 000000000000..1f47f7c18f0c
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/mute-timings.yaml
@@ -0,0 +1,4 @@
+apiVersion: 1
+
+muteTimes:
+  - name: "Test Mute Timing"
diff --git a/nixpkgs/nixos/tests/grafana/provision/policies.yaml b/nixpkgs/nixos/tests/grafana/provision/policies.yaml
new file mode 100644
index 000000000000..eb31126c4ba5
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/policies.yaml
@@ -0,0 +1,4 @@
+apiVersion: 1
+
+policies:
+  - receiver: "Test Contact Point"
diff --git a/nixpkgs/nixos/tests/grafana/provision/rules.yaml b/nixpkgs/nixos/tests/grafana/provision/rules.yaml
new file mode 100644
index 000000000000..946539c8cb69
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/rules.yaml
@@ -0,0 +1,36 @@
+apiVersion: 1
+
+groups:
+  - name: "test_rule_group"
+    folder: "test_group"
+    interval: 60s
+    rules:
+      - uid: "test_rule"
+        title: "Test Rule"
+        condition: A
+        data:
+          - refId: A
+            datasourceUid: '-100'
+            model:
+              conditions:
+                - evaluator:
+                    params:
+                      - 3
+                    type: gt
+                  operator:
+                    type: and
+                  query:
+                    params:
+                      - A
+                  reducer:
+                    type: last
+                  type: query
+              datasource:
+                type: __expr__
+                uid: '-100'
+              expression: 1==0
+              intervalMs: 1000
+              maxDataPoints: 43200
+              refId: A
+              type: math
+        for: 60s
diff --git a/nixpkgs/nixos/tests/grafana/provision/templates.yaml b/nixpkgs/nixos/tests/grafana/provision/templates.yaml
new file mode 100644
index 000000000000..09df247b3451
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/templates.yaml
@@ -0,0 +1,5 @@
+apiVersion: 1
+
+templates:
+  - name: "Test Template"
+    template: "Test message"
diff --git a/nixpkgs/nixos/tests/grafana/provision/test_dashboard.json b/nixpkgs/nixos/tests/grafana/provision/test_dashboard.json
new file mode 100644
index 000000000000..6e7a5b37f22b
--- /dev/null
+++ b/nixpkgs/nixos/tests/grafana/provision/test_dashboard.json
@@ -0,0 +1,47 @@
+{
+  "annotations": {
+    "list": [
+      {
+        "builtIn": 1,
+        "datasource": {
+          "type": "grafana",
+          "uid": "-- Grafana --"
+        },
+        "enable": true,
+        "hide": true,
+        "iconColor": "rgba(0, 211, 255, 1)",
+        "name": "Annotations & Alerts",
+        "target": {
+          "limit": 100,
+          "matchAny": false,
+          "tags": [],
+          "type": "dashboard"
+        },
+        "type": "dashboard"
+      }
+    ]
+  },
+  "editable": true,
+  "fiscalYearStartMonth": 0,
+  "graphTooltip": 0,
+  "id": 28,
+  "links": [],
+  "liveNow": false,
+  "panels": [],
+  "schemaVersion": 37,
+  "style": "dark",
+  "tags": [],
+  "templating": {
+    "list": []
+  },
+  "time": {
+    "from": "now-6h",
+    "to": "now"
+  },
+  "timepicker": {},
+  "timezone": "",
+  "title": "Test Dashboard",
+  "uid": "test_dashboard",
+  "version": 1,
+  "weekStart": ""
+}
diff --git a/nixpkgs/nixos/tests/graphite.nix b/nixpkgs/nixos/tests/graphite.nix
new file mode 100644
index 000000000000..de6cd8a50e17
--- /dev/null
+++ b/nixpkgs/nixos/tests/graphite.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+{
+  name = "graphite";
+  nodes = {
+    one =
+      { ... }: {
+        time.timeZone = "UTC";
+        services.graphite = {
+          web = {
+            enable = true;
+            extraConfig = ''
+              SECRET_KEY = "abcd";
+            '';
+          };
+          carbon.enableCache = true;
+          seyren.enable = false;  # Implicitly requires openssl-1.0.2u which is marked insecure
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    one.wait_for_unit("default.target")
+    one.wait_for_unit("graphiteWeb.service")
+    one.wait_for_unit("carbonCache.service")
+    # The services above are of type "simple". systemd considers them active immediately
+    # even if they're still in preStart (which takes quite long for graphiteWeb).
+    # Wait for ports to open so we're sure the services are up and listening.
+    one.wait_for_open_port(8080)
+    one.wait_for_open_port(2003)
+    one.succeed('echo "foo 1 `date +%s`" | nc -N localhost 2003')
+    one.wait_until_succeeds(
+        "curl 'http://localhost:8080/metrics/find/?query=foo&format=treejson' --silent | grep foo >&2"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/graylog.nix b/nixpkgs/nixos/tests/graylog.nix
new file mode 100644
index 000000000000..3f7cc3a91439
--- /dev/null
+++ b/nixpkgs/nixos/tests/graylog.nix
@@ -0,0 +1,114 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "graylog";
+  meta.maintainers = with lib.maintainers; [ ];
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.memorySize = 4096;
+    virtualisation.diskSize = 4096;
+
+    services.mongodb.enable = true;
+    services.elasticsearch.enable = true;
+    services.elasticsearch.extraConf = ''
+      network.publish_host: 127.0.0.1
+      network.bind_host: 127.0.0.1
+    '';
+
+    services.graylog = {
+      enable = true;
+      passwordSecret = "YGhZ59wXMrYOojx5xdgEpBpDw2N6FbhM4lTtaJ1KPxxmKrUvSlDbtWArwAWMQ5LKx1ojHEVrQrBMVRdXbRyZLqffoUzHfssc";
+      elasticsearchHosts = [ "http://localhost:9200" ];
+
+      # `echo -n "nixos" | shasum -a 256`
+      rootPasswordSha2 = "6ed332bcfa615381511d4d5ba44a293bb476f368f7e9e304f0dff50230d1a85b";
+    };
+
+    environment.systemPackages = [ pkgs.jq ];
+
+    systemd.services.graylog.path = [ pkgs.netcat ];
+    systemd.services.graylog.preStart = ''
+      until nc -z localhost 9200; do
+        sleep 2
+      done
+    '';
+  };
+
+  testScript = let
+    payloads.login = pkgs.writeText "login.json" (builtins.toJSON {
+      host = "127.0.0.1:9000";
+      username = "admin";
+      password = "nixos";
+    });
+
+    payloads.input = pkgs.writeText "input.json" (builtins.toJSON {
+      title = "Demo";
+      global = false;
+      type = "org.graylog2.inputs.gelf.udp.GELFUDPInput";
+      node = "@node@";
+      configuration = {
+        bind_address = "0.0.0.0";
+        decompress_size_limit = 8388608;
+        number_worker_threads = 1;
+        override_source = null;
+        port = 12201;
+        recv_buffer_size = 262144;
+      };
+    });
+
+    payloads.gelf_message = pkgs.writeText "gelf.json" (builtins.toJSON {
+      host = "example.org";
+      short_message = "A short message";
+      full_message = "A long message";
+      version = "1.1";
+      level = 5;
+      facility = "Test";
+    });
+  in ''
+    machine.start()
+    machine.wait_for_unit("graylog.service")
+    machine.wait_for_open_port(9000)
+    machine.succeed("curl -sSfL http://127.0.0.1:9000/")
+
+    session = machine.succeed(
+        "curl -X POST "
+        + "-sSfL http://127.0.0.1:9000/api/system/sessions "
+        + "-d $(cat ${payloads.login}) "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'Accept: application/json' "
+        + "-H 'x-requested-by: cli' "
+        + "| jq .session_id | xargs echo"
+    ).rstrip()
+
+    machine.succeed(
+        "curl -X POST "
+        + f"-sSfL http://127.0.0.1:9000/api/system/inputs -u {session}:session "
+        + '-d $(cat ${payloads.input} | sed -e "s,@node@,$(cat /var/lib/graylog/server/node-id),") '
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli' "
+    )
+
+    machine.wait_until_succeeds(
+        "test \"$(curl -sSfL 'http://127.0.0.1:9000/api/cluster/inputstates' "
+        + f"-u {session}:session "
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli'"
+        + "| jq 'to_entries[]|.value|.[0]|.state' | xargs echo"
+        + ')" = "RUNNING"'
+    )
+
+    machine.succeed(
+        "echo -n $(cat ${payloads.gelf_message}) | nc -w10 -u 127.0.0.1 12201"
+    )
+
+    machine.succeed(
+        'test "$(curl -X GET '
+        + "-sSfL 'http://127.0.0.1:9000/api/search/universal/relative?query=*' "
+        + f"-u {session}:session "
+        + "-H 'Accept: application/json' "
+        + "-H 'Content-Type: application/json' "
+        + "-H 'x-requested-by: cli'"
+        + ' | jq \'.total_results\' | xargs echo)" = "1"'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grocy.nix b/nixpkgs/nixos/tests/grocy.nix
new file mode 100644
index 000000000000..48bbc9f7d3fa
--- /dev/null
+++ b/nixpkgs/nixos/tests/grocy.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "grocy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.grocy = {
+      enable = true;
+      hostName = "localhost";
+      nginx.enableSSL = false;
+    };
+    environment.systemPackages = [ pkgs.jq ];
+  };
+
+  testScript = ''
+    from base64 import b64encode
+    from urllib.parse import quote
+
+    machine.start()
+    machine.wait_for_open_port(80)
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("curl -sSf http://localhost")
+
+    machine.succeed(
+        "curl -c cookies -sSf -X POST http://localhost/login -d 'username=admin&password=admin'"
+    )
+
+    cookie = machine.succeed(
+        "grep -v '^#' cookies | awk '{ print $7 }' | sed -e '/^$/d' | perl -pe 'chomp'"
+    )
+
+    machine.succeed(
+        f"curl -sSf -X POST http://localhost/api/objects/tasks -b 'grocy_session={cookie}' "
+        + '-d \'{"assigned_to_user_id":1,"name":"Test Task","due_date":"1970-01-01"}\'''
+        + " --header 'Content-Type: application/json'"
+    )
+
+    task_name = machine.succeed(
+        f"curl -sSf http://localhost/api/tasks -b 'grocy_session={cookie}' --header 'Accept: application/json' | jq '.[].name' | xargs echo | perl -pe 'chomp'"
+    )
+
+    assert task_name == "Test Task"
+
+    machine.succeed("curl -sSI http://localhost/api/tasks 2>&1 | grep '401 Unauthorized'")
+
+    file_name = "test.txt"
+    file_name_base64 = b64encode(file_name.encode('ascii')).decode('ascii')
+    file_name_base64_urlencode = quote(file_name_base64)
+
+    machine.succeed(
+        f"echo Sample equipment manual > /tmp/{file_name}"
+    )
+
+    machine.succeed(
+        f"curl -sSf -X 'PUT' -b 'grocy_session={cookie}' "
+        + f" 'http://localhost/api/files/equipmentmanuals/{file_name_base64_urlencode}' "
+        + "  --header 'Accept: */*' "
+        + "  --header 'Content-Type: application/octet-stream' "
+        + f" --data-binary '@/tmp/{file_name}' "
+    )
+
+    machine.succeed(
+        f"curl -sSf -X 'GET' -b 'grocy_session={cookie}' "
+        + f" 'http://localhost/api/files/equipmentmanuals/{file_name_base64_urlencode}' "
+        + "  --header 'Accept: application/octet-stream' "
+        + f" | cmp /tmp/{file_name}"
+    )
+
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/grow-partition.nix b/nixpkgs/nixos/tests/grow-partition.nix
new file mode 100644
index 000000000000..344910848dca
--- /dev/null
+++ b/nixpkgs/nixos/tests/grow-partition.nix
@@ -0,0 +1,83 @@
+{ lib, ... }:
+
+let
+  rootFslabel = "external";
+  rootFsDevice = "/dev/disk/by-label/${rootFslabel}";
+
+  externalModule = partitionTableType: { config, lib, pkgs, ... }: {
+    virtualisation.directBoot.enable = false;
+    virtualisation.mountHostNixStore = false;
+    virtualisation.useEFIBoot = partitionTableType == "efi";
+
+    # This stops the qemu-vm module from overriding the fileSystems option
+    # with virtualisation.fileSystems.
+    virtualisation.fileSystems = lib.mkForce { };
+
+
+    boot.loader.grub.enable = true;
+    boot.loader.grub.efiSupport = partitionTableType == "efi";
+    boot.loader.grub.efiInstallAsRemovable = partitionTableType == "efi";
+    boot.loader.grub.device = if partitionTableType == "efi" then "nodev" else "/dev/vda";
+
+    boot.growPartition = true;
+
+    fileSystems = {
+      "/".device = rootFsDevice;
+    };
+
+    system.build.diskImage = import ../lib/make-disk-image.nix {
+      inherit config lib pkgs;
+      label = rootFslabel;
+      inherit partitionTableType;
+      format = "raw";
+      bootSize = "128M";
+      additionalSpace = "0M";
+      copyChannel = false;
+    };
+  };
+in
+{
+  name = "grow-partition";
+
+  meta.maintainers = with lib.maintainers; [ arianvp ];
+
+  nodes = {
+    efi = externalModule "efi";
+    legacy = externalModule "legacy";
+    legacyGPT = externalModule "legacy+gpt";
+    hybrid = externalModule "hybrid";
+  };
+
+
+  testScript = { nodes, ... }:
+    lib.concatLines (lib.mapAttrsToList (name: node: ''
+    import os
+    import subprocess
+    import tempfile
+    import shutil
+
+    tmp_disk_image = tempfile.NamedTemporaryFile()
+
+    shutil.copyfile("${node.system.build.diskImage}/nixos.img", tmp_disk_image.name)
+
+    subprocess.run([
+      "${node.virtualisation.qemu.package}/bin/qemu-img",
+      "resize",
+      "-f",
+      "raw",
+      tmp_disk_image.name,
+      "+32M",
+    ])
+
+    # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
+    os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
+
+    ${name}.wait_for_unit("growpart.service")
+    systemd_growpart_logs = ${name}.succeed("journalctl --boot --unit growpart.service")
+    assert "CHANGED" in systemd_growpart_logs
+    ${name}.succeed("systemctl restart growpart.service")
+    systemd_growpart_logs = ${name}.succeed("journalctl --boot --unit growpart.service")
+    assert "NOCHANGE" in systemd_growpart_logs
+
+    '') nodes);
+}
diff --git a/nixpkgs/nixos/tests/grub.nix b/nixpkgs/nixos/tests/grub.nix
new file mode 100644
index 000000000000..e0875e70f6a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/grub.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "grub";
+
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.machine = { ... }: {
+    virtualisation.useBootLoader = true;
+
+    boot.loader.timeout = null;
+    boot.loader.grub = {
+      enable = true;
+      users.alice.password = "supersecret";
+
+      # OCR is not accurate enough
+      extraConfig = "serial; terminal_output serial";
+    };
+  };
+
+  testScript = ''
+    def grub_login_as(user, password):
+        """
+        Enters user and password to log into GRUB
+        """
+        machine.wait_for_console_text("Enter username:")
+        machine.send_chars(user + "\n")
+        machine.wait_for_console_text("Enter password:")
+        machine.send_chars(password + "\n")
+
+
+    def grub_select_all_configurations():
+        """
+        Selects "All configurations" from the GRUB menu
+        to trigger a login request.
+        """
+        machine.send_monitor_command("sendkey down")
+        machine.send_monitor_command("sendkey ret")
+
+
+    machine.start()
+
+    # wait for grub screen
+    machine.wait_for_console_text("GNU GRUB")
+
+    grub_select_all_configurations()
+    with subtest("Invalid credentials are rejected"):
+        grub_login_as("wronguser", "wrongsecret")
+        machine.wait_for_console_text("error: access denied.")
+
+    grub_select_all_configurations()
+    with subtest("Valid credentials are accepted"):
+        grub_login_as("alice", "supersecret")
+        machine.send_chars("\n")  # press enter to boot
+        machine.wait_for_console_text("Linux version")
+
+    with subtest("Machine boots correctly"):
+        machine.wait_for_unit("multi-user.target")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/guacamole-server.nix b/nixpkgs/nixos/tests/guacamole-server.nix
new file mode 100644
index 000000000000..48194fddfb22
--- /dev/null
+++ b/nixpkgs/nixos/tests/guacamole-server.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+{
+  name = "guacamole-server";
+
+  nodes = {
+    machine = {pkgs, ...}: {
+      services.guacamole-server = {
+        enable = true;
+        host = "0.0.0.0";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("guacamole-server.service")
+    machine.wait_for_open_port(4822)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+})
diff --git a/nixpkgs/nixos/tests/gvisor.nix b/nixpkgs/nixos/tests/gvisor.nix
new file mode 100644
index 000000000000..77ff29341bed
--- /dev/null
+++ b/nixpkgs/nixos/tests/gvisor.nix
@@ -0,0 +1,49 @@
+# This test runs a container through gvisor and checks if simple container starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "gvisor";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ andrew-d ];
+  };
+
+  nodes = {
+    gvisor =
+      { pkgs, ... }:
+        {
+          virtualisation.docker = {
+            enable = true;
+            extraOptions = "--add-runtime runsc=${pkgs.gvisor}/bin/runsc";
+          };
+
+          networking = {
+            dhcpcd.enable = false;
+            defaultGateway = "192.168.1.1";
+            interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+              { address = "192.168.1.2"; prefixLength = 24; }
+            ];
+          };
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    gvisor.wait_for_unit("network.target")
+    gvisor.wait_for_unit("sockets.target")
+
+    # Start by verifying that gvisor itself works
+    output = gvisor.succeed(
+        "${pkgs.gvisor}/bin/runsc -alsologtostderr do ${pkgs.coreutils}/bin/echo hello world"
+    )
+    assert output.strip() == "hello world"
+
+    # Also test the Docker runtime
+    gvisor.succeed("tar cv --files-from /dev/null | docker import - scratchimg")
+    gvisor.succeed(
+        "docker run -d --name=sleeping --runtime=runsc -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+    )
+    gvisor.succeed("docker ps | grep sleeping")
+    gvisor.succeed("docker stop sleeping")
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/hadoop/default.nix b/nixpkgs/nixos/tests/hadoop/default.nix
new file mode 100644
index 000000000000..479690adc064
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/default.nix
@@ -0,0 +1,8 @@
+{ handleTestOn, package, ... }:
+
+{
+  all = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hadoop.nix { inherit package; };
+  hdfs = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hdfs.nix { inherit package; };
+  yarn = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./yarn.nix { inherit package; };
+  hbase = handleTestOn [ "x86_64-linux" "aarch64-linux" ] ./hbase.nix { inherit package; };
+}
diff --git a/nixpkgs/nixos/tests/hadoop/hadoop.nix b/nixpkgs/nixos/tests/hadoop/hadoop.nix
new file mode 100644
index 000000000000..6162ccfd33d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/hadoop.nix
@@ -0,0 +1,255 @@
+# This test is very comprehensive. It tests whether all hadoop services work well with each other.
+# Run this when updating the Hadoop package or making significant changes to the hadoop module.
+# For a more basic test, see hdfs.nix and yarn.nix
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-combined";
+
+  nodes =
+    let
+      coreSite = {
+        "fs.defaultFS" = "hdfs://ns1";
+      };
+      hdfsSite = {
+        # HA Quorum Journal Manager configuration
+        "dfs.nameservices" = "ns1";
+        "dfs.ha.namenodes.ns1" = "nn1,nn2";
+        "dfs.namenode.shared.edits.dir.ns1" = "qjournal://jn1:8485;jn2:8485;jn3:8485/ns1";
+        "dfs.namenode.rpc-address.ns1.nn1" = "nn1:8020";
+        "dfs.namenode.rpc-address.ns1.nn2" = "nn2:8020";
+        "dfs.namenode.servicerpc-address.ns1.nn1" = "nn1:8022";
+        "dfs.namenode.servicerpc-address.ns1.nn2" = "nn2:8022";
+        "dfs.namenode.http-address.ns1.nn1" = "nn1:9870";
+        "dfs.namenode.http-address.ns1.nn2" = "nn2:9870";
+
+        # Automatic failover configuration
+        "dfs.client.failover.proxy.provider.ns1" = "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider";
+        "dfs.ha.automatic-failover.enabled.ns1" = "true";
+        "dfs.ha.fencing.methods" = "shell(true)";
+        "ha.zookeeper.quorum" = "zk1:2181";
+      };
+      yarnSite = {
+        "yarn.resourcemanager.zk-address" = "zk1:2181";
+        "yarn.resourcemanager.ha.enabled" = "true";
+        "yarn.resourcemanager.ha.rm-ids" = "rm1,rm2";
+        "yarn.resourcemanager.hostname.rm1" = "rm1";
+        "yarn.resourcemanager.hostname.rm2" = "rm2";
+        "yarn.resourcemanager.ha.automatic-failover.enabled" = "true";
+        "yarn.resourcemanager.cluster-id" = "cluster1";
+        # yarn.resourcemanager.webapp.address needs to be defined even though yarn.resourcemanager.hostname is set. This shouldn't be necessary, but there's a bug in
+        # hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/amfilter/AmFilterInitializer.java:70
+        # that causes AM containers to fail otherwise.
+        "yarn.resourcemanager.webapp.address.rm1" = "rm1:8088";
+        "yarn.resourcemanager.webapp.address.rm2" = "rm2:8088";
+      };
+    in
+    {
+      zk1 = { ... }: {
+        services.zookeeper.enable = true;
+        networking.firewall.allowedTCPPorts = [ 2181 ];
+      };
+
+      # HDFS cluster
+      nn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
+      };
+      nn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.namenode = {
+            enable = true;
+            openFirewall = true;
+          };
+          hdfs.zkfc.enable = true;
+        };
+      };
+
+      jn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+      jn2 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+      jn3 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.journalnode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+
+      dn1 = { ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite;
+          hdfs.datanode = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+
+      # YARN cluster
+      rm1 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+      rm2 = { options, ... }: {
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.resourcemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+      nm1 = { options, ... }: {
+        virtualisation.memorySize = 2048;
+        services.hadoop = {
+          inherit package coreSite hdfsSite yarnSite;
+          yarn.nodemanager = {
+            enable = true;
+            openFirewall = true;
+          };
+        };
+      };
+      client = { options, ... }: {
+        services.hadoop = {
+          gatewayRole.enable = true;
+          inherit package coreSite hdfsSite yarnSite;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    #### HDFS tests ####
+
+    zk1.wait_for_unit("network.target")
+    jn1.wait_for_unit("network.target")
+    jn2.wait_for_unit("network.target")
+    jn3.wait_for_unit("network.target")
+    nn1.wait_for_unit("network.target")
+    nn2.wait_for_unit("network.target")
+    dn1.wait_for_unit("network.target")
+
+    zk1.wait_for_unit("zookeeper")
+    jn1.wait_for_unit("hdfs-journalnode")
+    jn2.wait_for_unit("hdfs-journalnode")
+    jn3.wait_for_unit("hdfs-journalnode")
+
+    zk1.wait_for_open_port(2181)
+    jn1.wait_for_open_port(8480)
+    jn1.wait_for_open_port(8485)
+    jn2.wait_for_open_port(8480)
+    jn2.wait_for_open_port(8485)
+
+    # Namenodes must be stopped before initializing the cluster
+    nn1.succeed("systemctl stop hdfs-namenode")
+    nn2.succeed("systemctl stop hdfs-namenode")
+    nn1.succeed("systemctl stop hdfs-zkfc")
+    nn2.succeed("systemctl stop hdfs-zkfc")
+
+    # Initialize zookeeper for failover controller
+    nn1.succeed("sudo -u hdfs systemd-cat hdfs zkfc -formatZK")
+
+    # Format NN1 and start it
+    nn1.succeed("sudo -u hdfs systemd-cat hadoop namenode -format")
+    nn1.succeed("systemctl start hdfs-namenode")
+    nn1.wait_for_open_port(9870)
+    nn1.wait_for_open_port(8022)
+    nn1.wait_for_open_port(8020)
+
+    # Bootstrap NN2 from NN1 and start it
+    nn2.succeed("sudo -u hdfs systemd-cat hdfs namenode -bootstrapStandby")
+    nn2.succeed("systemctl start hdfs-namenode")
+    nn2.wait_for_open_port(9870)
+    nn2.wait_for_open_port(8022)
+    nn2.wait_for_open_port(8020)
+    nn1.succeed("systemd-cat netstat -tulpne")
+
+    # Start failover controllers
+    nn1.succeed("systemctl start hdfs-zkfc")
+    nn2.succeed("systemctl start hdfs-zkfc")
+
+    # DN should have started by now, but confirm anyway
+    dn1.wait_for_unit("hdfs-datanode")
+    # Print states of namenodes
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
+    # Wait for cluster to exit safemode
+    client.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
+    # test R/W
+    client.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+
+    # Test NN failover
+    nn1.succeed("systemctl stop hdfs-namenode")
+    assert "active" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
+    assert "testfilecontents" in client.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+
+    nn1.succeed("systemctl start hdfs-namenode")
+    nn1.wait_for_open_port(9870)
+    nn1.wait_for_open_port(8022)
+    nn1.wait_for_open_port(8020)
+    assert "standby" in client.succeed("sudo -u hdfs hdfs haadmin -getAllServiceState")
+    client.succeed("sudo -u hdfs systemd-cat hdfs haadmin -getAllServiceState")
+
+    #### YARN tests ####
+
+    rm1.wait_for_unit("network.target")
+    rm2.wait_for_unit("network.target")
+    nm1.wait_for_unit("network.target")
+
+    rm1.wait_for_unit("yarn-resourcemanager")
+    rm1.wait_for_open_port(8088)
+    rm2.wait_for_unit("yarn-resourcemanager")
+    rm2.wait_for_open_port(8088)
+
+    nm1.wait_for_unit("yarn-nodemanager")
+    nm1.wait_for_open_port(8042)
+    nm1.wait_for_open_port(8040)
+    client.wait_until_succeeds("yarn node -list | grep Nodes:1")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn systemd-cat yarn node -list")
+
+    # Test RM failover
+    rm1.succeed("systemctl stop yarn-resourcemanager")
+    assert "standby" not in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
+    rm1.succeed("systemctl start yarn-resourcemanager")
+    rm1.wait_for_unit("yarn-resourcemanager")
+    rm1.wait_for_open_port(8088)
+    assert "standby" in client.succeed("sudo -u yarn yarn rmadmin -getAllServiceState")
+    client.succeed("sudo -u yarn systemd-cat yarn rmadmin -getAllServiceState")
+
+    assert "Estimated value of Pi is" in client.succeed("HADOOP_USER_NAME=hdfs yarn jar $(readlink $(which yarn) | sed -r 's~bin/yarn~share/hadoop/mapreduce/hadoop-mapreduce-examples-*.jar~g') pi 2 10")
+    assert "SUCCEEDED" in client.succeed("yarn application -list -appStates FINISHED")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hadoop/hbase.nix b/nixpkgs/nixos/tests/hadoop/hbase.nix
new file mode 100644
index 000000000000..0416345682a8
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/hbase.nix
@@ -0,0 +1,109 @@
+# Test a minimal hbase cluster
+{ pkgs, ... }:
+import ../make-test-python.nix ({ hadoop ? pkgs.hadoop, hbase ? pkgs.hbase, ... }:
+with pkgs.lib;
+{
+  name = "hadoop-hbase";
+
+  nodes = let
+    coreSite = {
+      "fs.defaultFS" = "hdfs://namenode:8020";
+    };
+    defOpts = {
+      enable = true;
+      openFirewall = true;
+    };
+    zookeeperQuorum = "zookeeper";
+  in {
+    zookeeper = { ... }: {
+      services.zookeeper.enable = true;
+      networking.firewall.allowedTCPPorts = [ 2181 ];
+    };
+    namenode = { ... }: {
+      services.hadoop = {
+        hdfs = {
+          namenode = defOpts // { formatOnInit = true; };
+        };
+        inherit coreSite;
+      };
+    };
+    datanode = { ... }: {
+      virtualisation.diskSize = 8192;
+      services.hadoop = {
+        hdfs.datanode = defOpts;
+        inherit coreSite;
+      };
+    };
+
+    master = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          master = defOpts // { initHDFS = true; };
+        };
+      };
+    };
+    regionserver = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          regionServer = defOpts;
+        };
+      };
+    };
+    thrift = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          thrift = defOpts;
+        };
+      };
+    };
+    rest = { ... }:{
+      services.hadoop = {
+        inherit coreSite;
+        hbase = {
+          inherit zookeeperQuorum;
+          rest = defOpts;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # wait for HDFS cluster
+    namenode.wait_for_unit("hdfs-namenode")
+    namenode.wait_for_unit("network.target")
+    namenode.wait_for_open_port(8020)
+    namenode.wait_for_open_port(9870)
+    datanode.wait_for_unit("hdfs-datanode")
+    datanode.wait_for_unit("network.target")
+    datanode.wait_for_open_port(9864)
+    datanode.wait_for_open_port(9866)
+    datanode.wait_for_open_port(9867)
+
+    # wait for ZK
+    zookeeper.wait_for_unit("zookeeper")
+    zookeeper.wait_for_open_port(2181)
+
+    # wait for HBase to start up
+    master.wait_for_unit("hbase-master")
+    regionserver.wait_for_unit("hbase-regionserver")
+
+    assert "1 active master, 0 backup masters, 1 servers" in master.succeed("echo status | HADOOP_USER_NAME=hbase hbase shell -n")
+    regionserver.wait_until_succeeds("echo \"create 't1','f1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
+    assert "NAME => 'f1'" in regionserver.succeed("echo \"describe 't1'\" | HADOOP_USER_NAME=hbase hbase shell -n")
+
+    rest.wait_for_open_port(8080)
+    assert "${hbase.version}" in regionserver.succeed("curl http://rest:8080/version/cluster")
+
+    thrift.wait_for_open_port(9090)
+  '';
+
+  meta.maintainers = with maintainers; [ illustris ];
+})
diff --git a/nixpkgs/nixos/tests/hadoop/hdfs.nix b/nixpkgs/nixos/tests/hadoop/hdfs.nix
new file mode 100644
index 000000000000..65686b371559
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/hdfs.nix
@@ -0,0 +1,83 @@
+# Test a minimal HDFS cluster with no HA
+import ../make-test-python.nix ({ package, lib, ... }:
+{
+  name = "hadoop-hdfs";
+
+  nodes = let
+    coreSite = {
+      "fs.defaultFS" = "hdfs://namenode:8020";
+      "hadoop.proxyuser.httpfs.groups" = "*";
+      "hadoop.proxyuser.httpfs.hosts" = "*";
+    };
+    in {
+    namenode = { pkgs, ... }: {
+      services.hadoop = {
+        inherit package;
+        hdfs = {
+          namenode = {
+            enable = true;
+            openFirewall = true;
+            formatOnInit = true;
+          };
+          httpfs = {
+            # The NixOS hadoop module only support webHDFS on 3.3 and newer
+            enable = lib.mkIf (lib.versionAtLeast package.version "3.3") true;
+            openFirewall = true;
+          };
+        };
+        inherit coreSite;
+      };
+    };
+    datanode = { pkgs, ... }: {
+      services.hadoop = {
+        inherit package;
+        hdfs.datanode = {
+          enable = true;
+          openFirewall = true;
+          dataDirs = [{
+            type = "DISK";
+            path = "/tmp/dn1";
+          }];
+        };
+        inherit coreSite;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    namenode.wait_for_unit("hdfs-namenode")
+    namenode.wait_for_unit("network.target")
+    namenode.wait_for_open_port(8020)
+    namenode.succeed("systemd-cat ss -tulpne")
+    namenode.succeed("systemd-cat cat /etc/hadoop*/hdfs-site.xml")
+    namenode.wait_for_open_port(9870)
+
+    datanode.wait_for_unit("hdfs-datanode")
+    datanode.wait_for_unit("network.target")
+  '' + (if lib.versionAtLeast package.version "3" then ''
+    datanode.wait_for_open_port(9864)
+    datanode.wait_for_open_port(9866)
+    datanode.wait_for_open_port(9867)
+
+    datanode.succeed("curl -f http://datanode:9864")
+  '' else ''
+    datanode.wait_for_open_port(50075)
+    datanode.wait_for_open_port(50010)
+    datanode.wait_for_open_port(50020)
+
+    datanode.succeed("curl -f http://datanode:50075")
+  '' ) + ''
+    namenode.succeed("curl -f http://namenode:9870")
+
+    datanode.succeed("sudo -u hdfs hdfs dfsadmin -safemode wait")
+    datanode.succeed("echo testfilecontents | sudo -u hdfs hdfs dfs -put - /testfile")
+    assert "testfilecontents" in datanode.succeed("sudo -u hdfs hdfs dfs -cat /testfile")
+
+  '' + lib.optionalString (lib.versionAtLeast package.version "3.3" ) ''
+    namenode.wait_for_unit("hdfs-httpfs")
+    namenode.wait_for_open_port(14000)
+    assert "testfilecontents" in datanode.succeed("curl -f \"http://namenode:14000/webhdfs/v1/testfile?user.name=hdfs&op=OPEN\" 2>&1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hadoop/yarn.nix b/nixpkgs/nixos/tests/hadoop/yarn.nix
new file mode 100644
index 000000000000..08c8ff857d8c
--- /dev/null
+++ b/nixpkgs/nixos/tests/hadoop/yarn.nix
@@ -0,0 +1,45 @@
+# This only tests if YARN is able to start its services
+import ../make-test-python.nix ({ package, ... }: {
+  name = "hadoop-yarn";
+
+  nodes = {
+    resourcemanager = { ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.resourcemanager = {
+          enable = true;
+          openFirewall = true;
+        };
+      };
+    };
+    nodemanager = { options, lib, ... }: {
+      services.hadoop = {
+        inherit package;
+        yarn.nodemanager = {
+          enable = true;
+          openFirewall = true;
+        };
+        yarnSite = {
+          "yarn.resourcemanager.hostname" = "resourcemanager";
+          "yarn.nodemanager.log-dirs" = "/tmp/userlogs";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    resourcemanager.wait_for_unit("yarn-resourcemanager")
+    resourcemanager.wait_for_unit("network.target")
+    resourcemanager.wait_for_open_port(8031)
+    resourcemanager.wait_for_open_port(8088)
+
+    nodemanager.wait_for_unit("yarn-nodemanager")
+    nodemanager.wait_for_unit("network.target")
+    nodemanager.wait_for_open_port(8042)
+
+    resourcemanager.succeed("curl -f http://localhost:8088")
+    nodemanager.succeed("curl -f http://localhost:8042")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/haka.nix b/nixpkgs/nixos/tests/haka.nix
new file mode 100644
index 000000000000..dd65a6bcf115
--- /dev/null
+++ b/nixpkgs/nixos/tests/haka.nix
@@ -0,0 +1,24 @@
+# This test runs haka and probes it with hakactl
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "haka";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tvestelind ];
+  };
+
+  nodes = {
+    haka =
+      { ... }:
+        {
+          services.haka.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    haka.wait_for_unit("haka.service")
+    haka.succeed("hakactl status")
+    haka.succeed("hakactl stop")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/haproxy.nix b/nixpkgs/nixos/tests/haproxy.nix
new file mode 100644
index 000000000000..555474d7f299
--- /dev/null
+++ b/nixpkgs/nixos/tests/haproxy.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ pkgs, ...}: {
+  name = "haproxy";
+  nodes = {
+    machine = { ... }: {
+      services.haproxy = {
+        enable = true;
+        config = ''
+          defaults
+            timeout connect 10s
+
+          backend http_server
+            mode http
+            server httpd [::1]:8000
+
+          frontend http
+            bind *:80
+            mode http
+            http-request use-service prometheus-exporter if { path /metrics }
+            use_backend http_server
+        '';
+      };
+      services.httpd = {
+        enable = true;
+        virtualHosts.localhost = {
+          documentRoot = pkgs.writeTextDir "index.txt" "We are all good!";
+          adminAddr = "notme@yourhost.local";
+          listen = [{
+            ip = "::1";
+            port = 8000;
+          }];
+        };
+      };
+    };
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("haproxy.service")
+    machine.wait_for_unit("httpd.service")
+    assert "We are all good!" in machine.succeed("curl -fk http://localhost:80/index.txt")
+    assert "haproxy_process_pool_allocated_bytes" in machine.succeed(
+        "curl -fk http://localhost:80/metrics"
+    )
+
+    with subtest("reload"):
+        machine.succeed("systemctl reload haproxy")
+        # wait some time to ensure the following request hits the reloaded haproxy
+        machine.sleep(5)
+        assert "We are all good!" in machine.succeed(
+            "curl -fk http://localhost:80/index.txt"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hardened.nix b/nixpkgs/nixos/tests/hardened.nix
new file mode 100644
index 000000000000..e38834961e13
--- /dev/null
+++ b/nixpkgs/nixos/tests/hardened.nix
@@ -0,0 +1,105 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "hardened";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ joachifm ];
+  };
+
+  nodes.machine =
+    { lib, pkgs, config, ... }:
+    { users.users.alice = { isNormalUser = true; extraGroups = [ "proc" ]; };
+      users.users.sybil = { isNormalUser = true; group = "wheel"; };
+      imports = [ ../modules/profiles/hardened.nix ];
+      environment.memoryAllocator.provider = "graphene-hardened";
+      nix.settings.sandbox = false;
+      nixpkgs.overlays = [
+        (final: super: {
+          dhcpcd = super.dhcpcd.override { enablePrivSep = false; };
+        })
+      ];
+      virtualisation.emptyDiskImages = [ 4096 ];
+      boot.initrd.postDeviceCommands = ''
+        ${pkgs.dosfstools}/bin/mkfs.vfat -n EFISYS /dev/vdb
+      '';
+      virtualisation.fileSystems = {
+        "/efi" = {
+          device = "/dev/disk/by-label/EFISYS";
+          fsType = "vfat";
+          options = [ "noauto" ];
+        };
+      };
+      boot.extraModulePackages =
+        pkgs.lib.optional (pkgs.lib.versionOlder config.boot.kernelPackages.kernel.version "5.6")
+          config.boot.kernelPackages.wireguard;
+      boot.kernelModules = [ "wireguard" ];
+    };
+
+  testScript =
+    let
+      hardened-malloc-tests = pkgs.graphene-hardened-malloc.ld-preload-tests;
+    in
+    ''
+      machine.wait_for_unit("multi-user.target")
+
+
+      with subtest("AppArmor profiles are loaded"):
+          machine.succeed("systemctl status apparmor.service")
+
+
+      # AppArmor securityfs
+      with subtest("AppArmor securityfs is mounted"):
+          machine.succeed("mountpoint -q /sys/kernel/security")
+          machine.succeed("cat /sys/kernel/security/apparmor/profiles")
+
+
+      # Test loading out-of-tree modules
+      with subtest("Out-of-tree modules can be loaded"):
+          machine.succeed("grep -Fq wireguard /proc/modules")
+
+
+      # Test kernel module hardening
+      with subtest("No more kernel modules can be loaded"):
+          # note: this better a be module we normally wouldn't load ...
+          machine.wait_for_unit("disable-kernel-module-loading.service")
+          machine.fail("modprobe dccp")
+
+
+      # Test userns
+      with subtest("User namespaces are restricted"):
+          machine.succeed("unshare --user true")
+          machine.fail("su -l alice -c 'unshare --user true'")
+
+
+      # Test dmesg restriction
+      with subtest("Regular users cannot access dmesg"):
+          machine.fail("su -l alice -c dmesg")
+
+
+      # Test access to kcore
+      with subtest("Kcore is inaccessible as root"):
+          machine.fail("cat /proc/kcore")
+
+
+      # Test deferred mount
+      with subtest("Deferred mounts work"):
+          machine.fail("mountpoint -q /efi")  # was deferred
+          machine.execute("mkdir -p /efi")
+          machine.succeed("mount /dev/disk/by-label/EFISYS /efi")
+          machine.succeed("mountpoint -q /efi")  # now mounted
+
+
+      # Test Nix dæmon usage
+      with subtest("nix-daemon cannot be used by all users"):
+          machine.fail("su -l nobody -s /bin/sh -c 'nix --extra-experimental-features nix-command ping-store'")
+          machine.succeed("su -l alice -c 'nix --extra-experimental-features nix-command ping-store'")
+
+
+      # Test kernel image protection
+      with subtest("The kernel image is protected"):
+          machine.fail("systemctl hibernate")
+          machine.fail("systemctl kexec")
+
+
+      with subtest("The hardened memory allocator works"):
+          machine.succeed("${hardened-malloc-tests}/bin/run-tests")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/harmonia.nix b/nixpkgs/nixos/tests/harmonia.nix
new file mode 100644
index 000000000000..6cf9ad4d2335
--- /dev/null
+++ b/nixpkgs/nixos/tests/harmonia.nix
@@ -0,0 +1,37 @@
+{ pkgs, lib, ... }:
+
+{
+  name = "harmonia";
+
+  nodes = {
+    harmonia = {
+      services.harmonia = {
+        enable = true;
+        signKeyPath = pkgs.writeText "cache-key" "cache.example.com-1:9FhO0w+7HjZrhvmzT1VlAZw4OSAlFGTgC24Seg3tmPl4gZBdwZClzTTHr9cVzJpwsRSYLTu7hEAQe3ljy92CWg==";
+        settings.priority = 35;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 5000 ];
+      system.extraDependencies = [ pkgs.emptyFile ];
+    };
+
+    client01 = {
+      nix.settings = {
+        substituters = lib.mkForce [ "http://harmonia:5000" ];
+        trusted-public-keys = lib.mkForce [ "cache.example.com-1:eIGQXcGQpc00x6/XFcyacLEUmC07u4RAEHt5Y8vdglo=" ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    harmonia.wait_for_unit("harmonia.service")
+
+    client01.wait_until_succeeds("curl -f http://harmonia:5000/nix-cache-info | grep '${toString nodes.harmonia.services.harmonia.settings.priority}' >&2")
+    client01.succeed("curl -f http://harmonia:5000/version | grep '${nodes.harmonia.services.harmonia.package.version}' >&2")
+
+    client01.succeed("cat /etc/nix/nix.conf >&2")
+    client01.succeed("nix-store --realise ${pkgs.emptyFile} --store /root/other-store")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/haste-server.nix b/nixpkgs/nixos/tests/haste-server.nix
new file mode 100644
index 000000000000..9097c992c548
--- /dev/null
+++ b/nixpkgs/nixos/tests/haste-server.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  {
+    name = "haste-server";
+    meta.maintainers = with lib.maintainers; [ mkg20001 ];
+
+    nodes.machine = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [
+        curl
+        jq
+      ];
+
+      services.haste-server = {
+        enable = true;
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("haste-server")
+      machine.wait_until_succeeds("curl -s localhost:7777")
+      machine.succeed('curl -s -X POST http://localhost:7777/documents -d "Hello World!" > bla')
+      machine.succeed('curl http://localhost:7777/raw/$(cat bla | jq -r .key) | grep "Hello World"')
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/hbase.nix b/nixpkgs/nixos/tests/hbase.nix
new file mode 100644
index 000000000000..7d8e32f81603
--- /dev/null
+++ b/nixpkgs/nixos/tests/hbase.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.hbase, ... }:
+{
+  name = "hbase-standalone";
+
+  meta = with lib.maintainers; {
+    maintainers = [ illustris ];
+  };
+
+  nodes = {
+    hbase = { pkgs, ... }: {
+      services.hbase-standalone = {
+        enable = true;
+        inherit package;
+        # Needed for standalone mode in hbase 2+
+        # This setting and standalone mode are not suitable for production
+        settings."hbase.unsafe.stream.capability.enforce" = "false";
+      };
+      environment.systemPackages = with pkgs; [
+        package
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+    hbase.wait_for_unit("hbase.service")
+    hbase.wait_until_succeeds("echo \"create 't1','f1'\" | sudo -u hbase hbase shell -n")
+    assert "NAME => 'f1'" in hbase.succeed("echo \"describe 't1'\" | sudo -u hbase hbase shell -n")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hddfancontrol.nix b/nixpkgs/nixos/tests/hddfancontrol.nix
new file mode 100644
index 000000000000..b5fa7ccb2c19
--- /dev/null
+++ b/nixpkgs/nixos/tests/hddfancontrol.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "hddfancontrol";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ benley ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    services.hddfancontrol.enable = true;
+    services.hddfancontrol.disks = ["/dev/vda"];
+    services.hddfancontrol.pwmPaths = ["/test/hwmon1/pwm1"];
+    services.hddfancontrol.extraArgs = ["--pwm-start-value=32"
+                                        "--pwm-stop-value=0"];
+
+    systemd.services.hddfancontrol_fixtures = {
+      description = "Install test fixtures for hddfancontrol";
+      serviceConfig = {
+        Type = "oneshot";
+      };
+      script = ''
+        mkdir -p /test/hwmon1
+        echo 255 > /test/hwmon1/pwm1
+        echo 2 > /test/hwmon1/pwm1_enable
+      '';
+      wantedBy = ["hddfancontrol.service"];
+      before = ["hddfancontrol.service"];
+    };
+
+    systemd.services.hddfancontrol.serviceConfig.ReadWritePaths = "/test";
+  };
+
+  # hddfancontrol.service will fail to start because qemu /dev/vda doesn't have
+  # any thermal interfaces, but it should ensure that fans appear to be running
+  # before it aborts.
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("journalctl -eu hddfancontrol.service|grep 'Setting fan speed'")
+    machine.shutdown()
+
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/headscale.nix b/nixpkgs/nixos/tests/headscale.nix
new file mode 100644
index 000000000000..80188b65dbfc
--- /dev/null
+++ b/nixpkgs/nixos/tests/headscale.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    tls-cert =
+      pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+        openssl req \
+          -x509 -newkey rsa:4096 -sha256 -days 365 \
+          -nodes -out cert.pem -keyout key.pem \
+          -subj '/CN=headscale' -addext "subjectAltName=DNS:headscale"
+
+        mkdir -p $out
+        cp key.pem cert.pem $out
+      '';
+  in {
+    name = "headscale";
+    meta.maintainers = with lib.maintainers; [ misterio77 ];
+
+    nodes = let
+      headscalePort = 8080;
+      stunPort = 3478;
+      peer = {
+        services.tailscale.enable = true;
+        security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+      };
+    in {
+      peer1 = peer;
+      peer2 = peer;
+
+      headscale = {
+        services = {
+          headscale = {
+            enable = true;
+            port = headscalePort;
+            settings = {
+              server_url = "https://headscale";
+              ip_prefixes = [ "100.64.0.0/10" ];
+              derp.server = {
+                enabled = true;
+                region_id = 999;
+                stun_listen_addr = "0.0.0.0:${toString stunPort}";
+              };
+            };
+          };
+          nginx = {
+            enable = true;
+            virtualHosts.headscale = {
+              addSSL = true;
+              sslCertificate = "${tls-cert}/cert.pem";
+              sslCertificateKey = "${tls-cert}/key.pem";
+              locations."/" = {
+                proxyPass = "http://127.0.0.1:${toString headscalePort}";
+                proxyWebsockets = true;
+              };
+            };
+          };
+        };
+        networking.firewall = {
+          allowedTCPPorts = [ 80 443 ];
+          allowedUDPPorts = [ stunPort ];
+        };
+        environment.systemPackages = [ pkgs.headscale ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+      headscale.wait_for_unit("headscale")
+      headscale.wait_for_open_port(443)
+
+      # Create headscale user and preauth-key
+      headscale.succeed("headscale users create test")
+      authkey = headscale.succeed("headscale preauthkeys -u test create --reusable")
+
+      # Connect peers
+      up_cmd = f"tailscale up --login-server 'https://headscale' --auth-key {authkey}"
+      peer1.execute(up_cmd)
+      peer2.execute(up_cmd)
+
+      # Check that they are reachable from the tailnet
+      peer1.wait_until_succeeds("tailscale ping peer2")
+      peer2.wait_until_succeeds("tailscale ping peer1")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/hedgedoc.nix b/nixpkgs/nixos/tests/hedgedoc.nix
new file mode 100644
index 000000000000..16e0dc14e947
--- /dev/null
+++ b/nixpkgs/nixos/tests/hedgedoc.nix
@@ -0,0 +1,96 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "hedgedoc";
+
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  nodes = {
+    hedgedocSqlite = { ... }: {
+      services.hedgedoc.enable = true;
+    };
+
+    hedgedocPostgresWithTCPSocket = { ... }: {
+      systemd.services.hedgedoc.after = [ "postgresql.service" ];
+      services = {
+        hedgedoc = {
+          enable = true;
+          settings.db = {
+            dialect = "postgres";
+            user = "hedgedoc";
+            password = "$DB_PASSWORD";
+            host = "localhost";
+            port = 5432;
+            database = "hedgedocdb";
+          };
+
+          /*
+           * Do not use pkgs.writeText for secrets as
+           * they will end up in the world-readable Nix store.
+           */
+          environmentFile = pkgs.writeText "hedgedoc-env" ''
+            DB_PASSWORD=snakeoilpassword
+          '';
+        };
+        postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "pg-init-script.sql" ''
+            CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword';
+            CREATE DATABASE hedgedocdb OWNER hedgedoc;
+          '';
+        };
+      };
+    };
+
+    hedgedocPostgresWithUNIXSocket = { ... }: {
+      systemd.services.hedgedoc.after = [ "postgresql.service" ];
+      services = {
+        hedgedoc = {
+          enable = true;
+          settings.db = {
+            dialect = "postgres";
+            user = "hedgedoc";
+            password = "$DB_PASSWORD";
+            host = "/run/postgresql";
+            database = "hedgedocdb";
+          };
+
+          environmentFile = pkgs.writeText "hedgedoc-env" ''
+            DB_PASSWORD=snakeoilpassword
+          '';
+        };
+        postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "pg-init-script.sql" ''
+            CREATE ROLE hedgedoc LOGIN PASSWORD 'snakeoilpassword';
+            CREATE DATABASE hedgedocdb OWNER hedgedoc;
+          '';
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("HedgeDoc sqlite"):
+        hedgedocSqlite.wait_for_unit("hedgedoc.service")
+        hedgedocSqlite.wait_for_open_port(3000)
+        hedgedocSqlite.wait_until_succeeds("curl -sSf http://localhost:3000/new")
+
+    with subtest("HedgeDoc postgres with TCP socket"):
+        hedgedocPostgresWithTCPSocket.wait_for_unit("postgresql.service")
+        hedgedocPostgresWithTCPSocket.wait_for_unit("hedgedoc.service")
+        hedgedocPostgresWithTCPSocket.wait_for_open_port(5432)
+        hedgedocPostgresWithTCPSocket.wait_for_open_port(3000)
+        hedgedocPostgresWithTCPSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new")
+
+    with subtest("HedgeDoc postgres with UNIX socket"):
+        hedgedocPostgresWithUNIXSocket.wait_for_unit("postgresql.service")
+        hedgedocPostgresWithUNIXSocket.wait_for_unit("hedgedoc.service")
+        hedgedocPostgresWithUNIXSocket.wait_for_open_port(5432)
+        hedgedocPostgresWithUNIXSocket.wait_for_open_port(3000)
+        hedgedocPostgresWithUNIXSocket.wait_until_succeeds("curl -sSf http://localhost:3000/new")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/herbstluftwm.nix b/nixpkgs/nixos/tests/herbstluftwm.nix
new file mode 100644
index 000000000000..b6965914360e
--- /dev/null
+++ b/nixpkgs/nixos/tests/herbstluftwm.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "herbstluftwm";
+
+  meta = {
+    maintainers = with lib.maintainers; [ thibautmarty ];
+  };
+
+  nodes.machine = { pkgs, lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+herbstluftwm";
+    services.xserver.windowManager.herbstluftwm.enable = true;
+    environment.systemPackages = [ pkgs.dzen2 ]; # needed for upstream provided panel
+  };
+
+  testScript = ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure client is available"):
+        machine.succeed("herbstclient --version")
+
+    with subtest("ensure keybindings are set"):
+        machine.wait_until_succeeds("herbstclient list_keybinds | grep xterm")
+
+    with subtest("ensure panel starts"):
+        machine.wait_for_window("dzen title")
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("alt-ret")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.sleep(2)
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hibernate.nix b/nixpkgs/nixos/tests/hibernate.nix
new file mode 100644
index 000000000000..296aa9ba68b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/hibernate.nix
@@ -0,0 +1,55 @@
+# Test whether hibernation from partition works.
+
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, systemdStage1 ? false
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+makeTest {
+  name = "hibernate";
+
+  nodes = {
+    machine = { config, lib, pkgs, ... }: {
+      imports = [
+        ./common/auto-format-root-device.nix
+      ];
+
+      systemd.services.backdoor.conflicts = [ "sleep.target" ];
+      powerManagement.resumeCommands = "systemctl --no-block restart backdoor.service";
+
+      virtualisation.emptyDiskImages = [ (2 * config.virtualisation.memorySize) ];
+      virtualisation.useNixStoreImage = true;
+
+      swapDevices = lib.mkOverride 0 [ { device = "/dev/vdc"; options = [ "x-systemd.makefs" ]; } ];
+      boot.resumeDevice = "/dev/vdc";
+      boot.initrd.systemd.enable = systemdStage1;
+    };
+  };
+
+  testScript = ''
+    # Drop in file that checks if we un-hibernated properly (and not booted fresh)
+    machine.wait_for_unit("default.target")
+    machine.succeed(
+        "mkdir /run/test",
+        "mount -t ramfs -o size=1m ramfs /run/test",
+        "echo not persisted to disk > /run/test/suspended",
+    )
+
+    # Hibernate machine
+    machine.execute("systemctl hibernate >&2 &", check_return=False)
+    machine.wait_for_shutdown()
+
+    # Restore machine from hibernation, validate our ramfs file is there.
+    machine.start()
+    machine.succeed("grep 'not persisted to disk' /run/test/suspended")
+
+    # Ensure we don't restore from hibernation when booting again
+    machine.crash()
+    machine.wait_for_unit("default.target")
+    machine.fail("grep 'not persisted to disk' /run/test/suspended")
+  '';
+
+}
diff --git a/nixpkgs/nixos/tests/hitch/default.nix b/nixpkgs/nixos/tests/hitch/default.nix
new file mode 100644
index 000000000000..4283b9f7dffb
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/default.nix
@@ -0,0 +1,33 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+{
+  name = "hitch";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jflanglois ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.hitch = {
+      enable = true;
+      backend = "[127.0.0.1]:80";
+      pem-files = [
+        ./example.pem
+      ];
+    };
+
+    services.httpd = {
+      enable = true;
+      virtualHosts.localhost.documentRoot = ./example;
+      adminAddr = "noone@testing.nowhere";
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("hitch.service")
+      machine.wait_for_open_port(443)
+      assert "We are all good!" in machine.succeed("curl -fk https://localhost:443/index.txt")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/hitch/example.pem b/nixpkgs/nixos/tests/hitch/example.pem
new file mode 100644
index 000000000000..fde6f3cbd19a
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/example.pem
@@ -0,0 +1,53 @@
+-----BEGIN CERTIFICATE-----
+MIIEKTCCAxGgAwIBAgIJAIFAWQXSZ7lIMA0GCSqGSIb3DQEBCwUAMIGqMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEVMBMGA1UEBwwMUmVkd29vZCBD
+aXR5MRkwFwYDVQQKDBBUZXN0aW5nIDEyMyBJbmMuMRQwEgYDVQQLDAtJVCBTZXJ2
+aWNlczEYMBYGA1UEAwwPdGVzdGluZy5ub3doZXJlMSQwIgYJKoZIhvcNAQkBFhVu
+b29uZUB0ZXN0aW5nLm5vd2hlcmUwHhcNMTgwNDIzMDcxMTI5WhcNMTkwNDIzMDcx
+MTI5WjCBqjELMAkGA1UEBhMCVVMxEzARBgNVBAgMCkNhbGlmb3JuaWExFTATBgNV
+BAcMDFJlZHdvb2QgQ2l0eTEZMBcGA1UECgwQVGVzdGluZyAxMjMgSW5jLjEUMBIG
+A1UECwwLSVQgU2VydmljZXMxGDAWBgNVBAMMD3Rlc3Rpbmcubm93aGVyZTEkMCIG
+CSqGSIb3DQEJARYVbm9vbmVAdGVzdGluZy5ub3doZXJlMIIBIjANBgkqhkiG9w0B
+AQEFAAOCAQ8AMIIBCgKCAQEAxQq6AA9o/QErMbQwfgDF4mqXcvglRTwPr2zPE6Rv
+1g0ncRBSMM8iKbPapHM6qHNfg2e1fU2SFqzD6HkyZqHHLCgLzkdzswEcEjsMqiUP
+OR++5g4CWoQrdTi31itzYzCjnQ45BrAMrLEhBQgDTNwrEE+Tit0gpOGggtj/ktLk
+OD8BKa640lkmWEUGF18fd3rYTUC4hwM5qhAVXTe21vj9ZWsgprpQKdN61v0dCUap
+C5eAgvZ8Re+Cd0Id674hK4cJ4SekqfHKv/jLyIg3Vsdc9nkhmiC4O6KH5f1Zzq2i
+E4Kd5mnJDFxfSzIErKWmbhriLWsj3KEJ983AGLJ9hxQTAwIDAQABo1AwTjAdBgNV
+HQ4EFgQU76Mm6DP/BePJRQUNrJ9z038zjocwHwYDVR0jBBgwFoAU76Mm6DP/BePJ
+RQUNrJ9z038zjocwDAYDVR0TBAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAAZzt
+VdPaUqrvDAh5rMYqzYMJ3tj6daNYoX6CbTFoevK5J5D4FESM0D/FMKgpNiVz39kB
+8Cjaw5rPHMHY61rHz7JRDK1sWXsonwzCF21BK7Tx0G1CIfLpYHWYb/FfdWGROx+O
+hPgKuoMRWQB+txozkZp5BqWJmk5MOyFCDEXhMOmrfsJq0IYU6QaH3Lsf1oJRy4yU
+afFrT9o3DLOyYLG/j/HXijCu8DVjZVa4aboum79ecYzPjjGF1posrFUnvQiuAeYy
+t7cuHNUB8gW9lWR5J7tP8fzFWtIcyT2oRL8u3H+fXf0i4bW73wtOBOoeULBzBNE7
+6rphcSrQunSZQIc+hg==
+-----END CERTIFICATE-----
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQDFCroAD2j9ASsx
+tDB+AMXiapdy+CVFPA+vbM8TpG/WDSdxEFIwzyIps9qkczqoc1+DZ7V9TZIWrMPo
+eTJmoccsKAvOR3OzARwSOwyqJQ85H77mDgJahCt1OLfWK3NjMKOdDjkGsAyssSEF
+CANM3CsQT5OK3SCk4aCC2P+S0uQ4PwEprrjSWSZYRQYXXx93ethNQLiHAzmqEBVd
+N7bW+P1layCmulAp03rW/R0JRqkLl4CC9nxF74J3Qh3rviErhwnhJ6Sp8cq/+MvI
+iDdWx1z2eSGaILg7oofl/VnOraITgp3mackMXF9LMgSspaZuGuItayPcoQn3zcAY
+sn2HFBMDAgMBAAECggEAcaR8HijFHpab+PC5vxJnDuz3KEHiDQpU6ZJR5DxEnCm+
+A8GsBaaRR4gJpCspO5o/DiS0Ue55QUanPt8XqIXJv7fhBznCiw0qyYDxDviMzR94
+FGskBFySS+tIa+dnh1+4HY7kaO0Egl0udB5o+N1KoP+kUsSyXSYcUxsgW+fx5FW9
+22Ya3HNWnWxMCSfSGGlTFXGj2whf25SkL25dM9iblO4ZOx4MX8kaXij7TaYy8hMM
+Vf6/OMnXqtPKho+ctZZVKZkE9PxdS4f/pnp5EsdoOZwNBtfQ1WqVLWd3DlGWhnsH
+7L8ZSP2HkoI4Pd1wtkpOKZc+yM2bFXWa8WY4TcmpUQKBgQD33HxGdtmtZehrexSA
+/ZwWJlMslUsNz4Ivv6s7J4WCRhdh94+r9TWQP/yHdT9Ry5bvn84I5ZLUdp+aA962
+mvjz+GIglkCGpA7HU/hqurB1O63pj2cIDB8qhV21zjVIoqXcQ7IBJ+tqD79nF8vm
+h3KfuHUhuu1rayGepbtIyNhLdwKBgQDLgw4TJBg/QB8RzYECk78QnfZpCExsQA/z
+YJpc+dF2/nsid5R2u9jWzfmgHM2Jjo2/+ofRUaTqcFYU0K57CqmQkOLIzsbNQoYt
+e2NOANNVHiZLuzTZC2r3BrrkNbo3YvQzhAesUA5lS6LfrxBLUKiwo2LU9NlmJs3b
+UPVFYI0/1QKBgCswxIcS1sOcam+wNtZzWuuRKhUuvrFdY3YmlBPuwxj8Vb7AgMya
+IgdM3xhLmgkKzPZchm6OcpOLSCxyWDDBuHfq5E6BYCUWGW0qeLNAbNdA2wFD99Qz
+KIskSjwP/sD1dql3MmF5L1CABf5U6zb0i0jBv8ds50o8lNMsVgJM3UPpAoGBAL1+
+nzllb4pdi1CJWKnspoizfQCZsIdPM0r71V/jYY36MO+MBtpz2NlSWzAiAaQm74gl
+oBdgfT2qMg0Zro11BSRONEykdOolGkj5TiMQk7b65s+3VeMPRZ8UTis2d9kgs5/Q
+PVDODkl1nwfGu1ZVmW04BUujXVZHpYCkJm1eFMetAoGAImE7gWj+qRMhpbtCCGCg
+z06gDKvMrF6S+GJsvUoSyM8oUtfdPodI6gWAC65NfYkIiqbpCaEVNzfui73f5Lnz
+p5X1IbzhuH5UZs/k5A3OR2PPDbPs3lqEw7YJdBdLVRmO1o824uaXaJJwkL/1C+lq
+8dh1wV3CnynNmZApkz4vpzQ=
+-----END PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/hitch/example/index.txt b/nixpkgs/nixos/tests/hitch/example/index.txt
new file mode 100644
index 000000000000..0478b1c26351
--- /dev/null
+++ b/nixpkgs/nixos/tests/hitch/example/index.txt
@@ -0,0 +1 @@
+We are all good!
diff --git a/nixpkgs/nixos/tests/hledger-web.nix b/nixpkgs/nixos/tests/hledger-web.nix
new file mode 100644
index 000000000000..f8919f7d4bd0
--- /dev/null
+++ b/nixpkgs/nixos/tests/hledger-web.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  journal = pkgs.writeText "test.journal" ''
+    2010/01/10 Loan
+        assets:cash                 500$
+        income:loan                -500$
+    2010/01/10 NixOS Foundation donation
+        expenses:donation           250$
+        assets:cash                -250$
+  '';
+in
+rec {
+  name = "hledger-web";
+  meta.maintainers = with lib.maintainers; [ marijanp ];
+
+  nodes = rec {
+    server = { config, pkgs, ... }: {
+      services.hledger-web = {
+        host = "127.0.0.1";
+        port = 5000;
+        enable = true;
+        capabilities.manage = true;
+      };
+      networking.firewall.allowedTCPPorts = [ config.services.hledger-web.port ];
+      systemd.services.hledger-web.preStart = ''
+        ln -s ${journal} /var/lib/hledger-web/.hledger.journal
+      '';
+    };
+    apiserver = { ... }: {
+      imports = [ server ];
+      services.hledger-web.serveApi = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("hledger-web.service")
+    server.wait_for_open_port(5000)
+    with subtest("Check if web UI is accessible"):
+        page = server.succeed("curl -L http://127.0.0.1:5000")
+        assert ".hledger.journal" in page
+
+    apiserver.wait_for_unit("hledger-web.service")
+    apiserver.wait_for_open_port(5000)
+    with subtest("Check if the JSON API is served"):
+        transactions = apiserver.succeed("curl -L http://127.0.0.1:5000/transactions")
+        assert "NixOS Foundation donation" in transactions
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix
new file mode 100644
index 000000000000..b5c06126c2e8
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/default.nix
@@ -0,0 +1,16 @@
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "test-hocker-fetchdocker";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ixmatus ];
+    broken = true; # tries to download from registry-1.docker.io - how did this ever work?
+  };
+
+  nodes.machine = import ./machine.nix;
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_until_succeeds("docker run registry-1.docker.io/v2/library/hello-world:latest")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix
new file mode 100644
index 000000000000..a127875264e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/hello-world-container.nix
@@ -0,0 +1,19 @@
+{ fetchDockerConfig, fetchDockerLayer, fetchdocker }:
+fetchdocker rec {
+    name = "hello-world";
+    registry = "https://registry-1.docker.io/v2/";
+    repository = "library";
+    imageName = "hello-world";
+    tag = "latest";
+    imageConfig = fetchDockerConfig {
+      inherit tag registry repository imageName;
+      sha256 = "1ivbd23hyindkahzfw4kahgzi6ibzz2ablmgsz6340vc6qr1gagj";
+    };
+    imageLayers = let
+      layer0 = fetchDockerLayer {
+        inherit registry repository imageName;
+        layerDigest = "ca4f61b1923c10e9eb81228bd46bee1dfba02b9c7dac1844527a734752688ede";
+        sha256 = "1plfd194fwvsa921ib3xkhms1yqxxrmx92r2h7myj41wjaqn2kya";
+      };
+      in [ layer0 ];
+  }
diff --git a/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix b/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix
new file mode 100644
index 000000000000..885adebe1498
--- /dev/null
+++ b/nixpkgs/nixos/tests/hocker-fetchdocker/machine.nix
@@ -0,0 +1,26 @@
+{ pkgs, ... }:
+{ nixpkgs.config.packageOverrides = pkgs': {
+    hello-world-container = pkgs'.callPackage ./hello-world-container.nix { };
+  };
+
+  virtualisation.docker = {
+    enable  = true;
+    package = pkgs.docker;
+  };
+
+  systemd.services.docker-load-fetchdocker-image = {
+    description = "Docker load hello-world-container";
+    wantedBy    = [ "multi-user.target" ];
+    wants       = [ "docker.service" ];
+    after       = [ "docker.service" ];
+
+    script = ''
+      ${pkgs.hello-world-container}/compositeImage.sh | ${pkgs.docker}/bin/docker load
+    '';
+
+    serviceConfig = {
+      Type = "oneshot";
+    };
+  };
+}
+
diff --git a/nixpkgs/nixos/tests/hockeypuck.nix b/nixpkgs/nixos/tests/hockeypuck.nix
new file mode 100644
index 000000000000..675d6b226ad2
--- /dev/null
+++ b/nixpkgs/nixos/tests/hockeypuck.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+let
+  gpgKeyring = (pkgs.runCommand "gpg-keyring" { buildInputs = [ pkgs.gnupg ]; } ''
+    mkdir -p $out
+    export GNUPGHOME=$out
+    cat > foo <<EOF
+      %echo Generating a basic OpenPGP key
+      %no-protection
+      Key-Type: DSA
+      Key-Length: 1024
+      Subkey-Type: ELG-E
+      Subkey-Length: 1024
+      Name-Real: Foo Example
+      Name-Email: foo@example.org
+      Expire-Date: 0
+      # Do a commit here, so that we can later print "done"
+      %commit
+      %echo done
+    EOF
+    gpg --batch --generate-key foo
+    rm $out/S.gpg-agent $out/S.gpg-agent.*
+  '');
+in {
+  name = "hockeypuck";
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  nodes.machine = { ... }: {
+    # Used for test
+    environment.systemPackages = [ pkgs.gnupg ];
+
+    services.hockeypuck.enable = true;
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "hockeypuck" ];
+      ensureUsers = [{
+        name = "hockeypuck";
+        ensureDBOwnership = true;
+      }];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("hockeypuck.service")
+    machine.wait_for_open_port(11371)
+
+    response = machine.succeed("curl -vvv -s http://127.0.0.1:11371/")
+
+    assert "<title>OpenPGP Keyserver</title>" in response, "HTML title not found"
+
+    # Copy the keyring
+    machine.succeed("cp -R ${gpgKeyring} /tmp/GNUPGHOME")
+
+    # Extract our GPG key id
+    keyId = machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --list-keys | grep dsa1024 --after-context=1 | grep -v dsa1024").strip()
+
+    # Send the key to our local keyserver
+    machine.succeed("GNUPGHOME=/tmp/GNUPGHOME gpg --keyserver hkp://127.0.0.1:11371 --send-keys " + keyId)
+
+    # Receive the key from our local keyserver to a separate directory
+    machine.succeed("GNUPGHOME=$(mktemp -d) gpg --keyserver hkp://127.0.0.1:11371 --recv-keys " + keyId)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/home-assistant.nix b/nixpkgs/nixos/tests/home-assistant.nix
new file mode 100644
index 000000000000..e1588088ba19
--- /dev/null
+++ b/nixpkgs/nixos/tests/home-assistant.nix
@@ -0,0 +1,241 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  configDir = "/var/lib/foobar";
+in {
+  name = "home-assistant";
+  meta.maintainers = lib.teams.home-assistant.members;
+
+  nodes.hass = { pkgs, ... }: {
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "hass" ];
+      ensureUsers = [{
+        name = "hass";
+        ensureDBOwnership = true;
+      }];
+    };
+
+    services.home-assistant = {
+      enable = true;
+      inherit configDir;
+
+      # provide dependencies through package overrides
+      package = (pkgs.home-assistant.override {
+        extraPackages = ps: with ps; [
+          colorama
+        ];
+        extraComponents = [
+          # test char-tty device allow propagation into the service
+          "zha"
+         ];
+      });
+
+      # provide component dependencies explicitly from the module
+      extraComponents = [
+        "mqtt"
+      ];
+
+      # provide package for postgresql support
+      extraPackages = python3Packages: with python3Packages; [
+        psycopg2
+      ];
+
+      # test loading custom components
+      customComponents = with pkgs.home-assistant-custom-components; [
+        prometheus-sensor
+      ];
+
+      # test loading lovelace modules
+      customLovelaceModules = with pkgs.home-assistant-custom-lovelace-modules; [
+        mini-graph-card
+      ];
+
+      config = {
+        homeassistant = {
+          name = "Home";
+          time_zone = "UTC";
+          latitude = "0.0";
+          longitude = "0.0";
+          elevation = 0;
+        };
+
+        # configure the recorder component to use the postgresql db
+        recorder.db_url = "postgresql://@/hass";
+
+        # we can't load default_config, because the updater requires
+        # network access and would cause an error, so load frontend
+        # here explicitly.
+        # https://www.home-assistant.io/integrations/frontend/
+        frontend = {};
+
+        # include some popular integrations, that absolutely shouldn't break
+        knx = {};
+        shelly = {};
+        zha = {};
+
+        # set up a wake-on-lan switch to test capset capability required
+        # for the ping suid wrapper
+        # https://www.home-assistant.io/integrations/wake_on_lan/
+        switch = [ {
+          platform = "wake_on_lan";
+          mac = "00:11:22:33:44:55";
+          host = "127.0.0.1";
+        } ];
+
+        # test component-based capability assignment (CAP_NET_BIND_SERVICE)
+        # https://www.home-assistant.io/integrations/emulated_hue/
+        emulated_hue = {
+          host_ip = "127.0.0.1";
+          listen_port = 80;
+        };
+
+        # https://www.home-assistant.io/integrations/logger/
+        logger = {
+          default = "info";
+        };
+      };
+
+      # configure the sample lovelace dashboard
+      lovelaceConfig = {
+        title = "My Awesome Home";
+        views = [{
+          title = "Example";
+          cards = [{
+            type = "markdown";
+            title = "Lovelace";
+            content = "Welcome to your **Lovelace UI**.";
+          }];
+        }];
+      };
+      lovelaceConfigWritable = true;
+    };
+
+    # Cause a configuration change inside `configuration.yml` and verify that the process is being reloaded.
+    specialisation.differentName = {
+      inheritParentConfig = true;
+      configuration.services.home-assistant.config.homeassistant.name = lib.mkForce "Test Home";
+    };
+
+    # Cause a configuration change that requires a service restart as we added a new runtime dependency
+    specialisation.newFeature = {
+      inheritParentConfig = true;
+      configuration.services.home-assistant.config.backup = {};
+    };
+
+    specialisation.removeCustomThings = {
+      inheritParentConfig = true;
+      configuration.services.home-assistant = {
+        customComponents = lib.mkForce [];
+        customLovelaceModules = lib.mkForce [];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    system = nodes.hass.system.build.toplevel;
+  in
+  ''
+    import json
+
+    start_all()
+
+
+    def get_journal_cursor() -> str:
+        exit, out = hass.execute("journalctl -u home-assistant.service -n1 -o json-pretty --output-fields=__CURSOR")
+        assert exit == 0
+        return json.loads(out)["__CURSOR"]
+
+
+    def get_journal_since(cursor) -> str:
+        exit, out = hass.execute(f"journalctl --after-cursor='{cursor}' -u home-assistant.service")
+        assert exit == 0
+        return out
+
+
+    def get_unit_property(property) -> str:
+        exit, out = hass.execute(f"systemctl show --property={property} home-assistant.service")
+        assert exit == 0
+        return out
+
+
+    def wait_for_homeassistant(cursor):
+        hass.wait_until_succeeds(f"journalctl --after-cursor='{cursor}' -u home-assistant.service | grep -q 'Home Assistant initialized in'")
+
+
+    hass.wait_for_unit("home-assistant.service")
+    cursor = get_journal_cursor()
+
+    with subtest("Check that YAML configuration file is in place"):
+        hass.succeed("test -L ${configDir}/configuration.yaml")
+
+    with subtest("Check the lovelace config is copied because lovelaceConfigWritable = true"):
+        hass.succeed("test -f ${configDir}/ui-lovelace.yaml")
+
+    with subtest("Check that Home Assistant's web interface and API can be reached"):
+        wait_for_homeassistant(cursor)
+        hass.wait_for_open_port(8123)
+        hass.succeed("curl --fail http://localhost:8123/lovelace")
+
+    with subtest("Check that custom components get installed"):
+        hass.succeed("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
+        hass.wait_until_succeeds("journalctl -u home-assistant.service | grep -q 'We found a custom integration prometheus_sensor which has not been tested by Home Assistant'")
+
+    with subtest("Check that lovelace modules are referenced and fetchable"):
+        hass.succeed("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
+        hass.succeed("curl --fail http://localhost:8123/local/nixos-lovelace-modules/mini-graph-card-bundle.js")
+
+    with subtest("Check that optional dependencies are in the PYTHONPATH"):
+        env = get_unit_property("Environment")
+        python_path = env.split("PYTHONPATH=")[1].split()[0]
+        for package in ["colorama", "paho-mqtt", "psycopg2"]:
+            assert package in python_path, f"{package} not in PYTHONPATH"
+
+    with subtest("Check that declaratively configured components get setup"):
+        journal = get_journal_since(cursor)
+        for domain in ["emulated_hue", "wake_on_lan"]:
+            assert f"Setup of domain {domain} took" in journal, f"{domain} setup missing"
+
+    with subtest("Check that capabilities are passed for emulated_hue to bind to port 80"):
+        hass.wait_for_open_port(80)
+        hass.succeed("curl --fail http://localhost:80/description.xml")
+
+    with subtest("Check extra components are considered in systemd unit hardening"):
+        hass.succeed("systemctl show -p DeviceAllow home-assistant.service | grep -q char-ttyUSB")
+
+    with subtest("Check service reloads when configuration changes"):
+        pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
+        cursor = get_journal_cursor()
+        hass.succeed("${system}/specialisation/differentName/bin/switch-to-configuration test")
+        new_pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
+        assert pid == new_pid, "The PID of the process should not change between process reloads"
+        wait_for_homeassistant(cursor)
+
+    with subtest("Check service restarts when dependencies change"):
+        pid = new_pid
+        cursor = get_journal_cursor()
+        hass.succeed("${system}/specialisation/newFeature/bin/switch-to-configuration test")
+        new_pid = hass.succeed("systemctl show --property=MainPID home-assistant.service")
+        assert pid != new_pid, "The PID of the process should change when its PYTHONPATH changess"
+        wait_for_homeassistant(cursor)
+
+    with subtest("Check that new components get setup after restart"):
+        journal = get_journal_since(cursor)
+        for domain in ["backup"]:
+            assert f"Setup of domain {domain} took" in journal, f"{domain} setup missing"
+
+    with subtest("Check custom components and custom lovelace modules get removed"):
+        cursor = get_journal_cursor()
+        hass.succeed("${system}/specialisation/removeCustomThings/bin/switch-to-configuration test")
+        hass.fail("grep -q 'mini-graph-card-bundle.js' '${configDir}/ui-lovelace.yaml'")
+        hass.fail("test -f ${configDir}/custom_components/prometheus_sensor/manifest.json")
+        wait_for_homeassistant(cursor)
+
+    with subtest("Check that no errors were logged"):
+        hass.fail("journalctl -u home-assistant -o cat | grep -q ERROR")
+
+    with subtest("Check systemd unit hardening"):
+        hass.log(hass.succeed("systemctl cat home-assistant.service"))
+        hass.log(hass.succeed("systemd-analyze security home-assistant.service"))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/homepage-dashboard.nix b/nixpkgs/nixos/tests/homepage-dashboard.nix
new file mode 100644
index 000000000000..56e077f5ff6d
--- /dev/null
+++ b/nixpkgs/nixos/tests/homepage-dashboard.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "homepage-dashboard";
+  meta.maintainers = with lib.maintainers; [ jnsgruk ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.homepage-dashboard.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("homepage-dashboard.service")
+    machine.wait_for_open_port(8082)
+    machine.succeed("curl --fail http://localhost:8082/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/honk.nix b/nixpkgs/nixos/tests/honk.nix
new file mode 100644
index 000000000000..71d86a592439
--- /dev/null
+++ b/nixpkgs/nixos/tests/honk.nix
@@ -0,0 +1,32 @@
+{ lib, ... }:
+
+{
+  name = "honk-server";
+
+  nodes = {
+    machine = { pkgs, ... }: {
+      services.honk = {
+        enable = true;
+        host = "0.0.0.0";
+        port = 8080;
+        username = "username";
+        passwordFile = "${pkgs.writeText "honk-password" "secure"}";
+        servername = "servername";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("honk.service")
+    machine.wait_for_open_port(8080)
+
+    machine.stop_job("honk")
+    machine.wait_for_closed_port(8080)
+
+    machine.start_job("honk")
+    machine.wait_for_open_port(8080)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+}
diff --git a/nixpkgs/nixos/tests/hostname.nix b/nixpkgs/nixos/tests/hostname.nix
new file mode 100644
index 000000000000..6122e2ffeb83
--- /dev/null
+++ b/nixpkgs/nixos/tests/hostname.nix
@@ -0,0 +1,72 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  makeHostNameTest = hostName: domain: fqdnOrNull:
+    let
+      fqdn = hostName + (optionalString (domain != null) ".${domain}");
+      getStr = str: # maybeString2String
+        let res = builtins.tryEval str;
+        in if (res.success && res.value != null) then res.value else "null";
+    in
+    makeTest {
+      name = "hostname-${fqdn}";
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ primeos blitz ];
+      };
+
+      nodes.machine = { lib, ... }: {
+        networking.hostName = hostName;
+        networking.domain = domain;
+
+        environment.systemPackages = with pkgs; [
+          inetutils
+        ];
+      };
+
+      testScript = { nodes, ... }: ''
+        start_all()
+
+        machine = ${hostName}
+
+        machine.wait_for_unit("network-online.target")
+
+        # Test if NixOS computes the correct FQDN (either a FQDN or an error/null):
+        assert "${getStr nodes.machine.networking.fqdn}" == "${getStr fqdnOrNull}"
+
+        # The FQDN, domain name, and hostname detection should work as expected:
+        assert "${fqdn}" == machine.succeed("hostname --fqdn").strip()
+        assert "${optionalString (domain != null) domain}" == machine.succeed("dnsdomainname").strip()
+        assert (
+            "${hostName}"
+            == machine.succeed(
+                'hostnamectl status | grep "Static hostname" | cut -d: -f2'
+            ).strip()
+        )
+
+        # 127.0.0.1 and ::1 should resolve back to "localhost":
+        assert (
+            "localhost" == machine.succeed("getent hosts 127.0.0.1 | awk '{print $2}'").strip()
+        )
+        assert "localhost" == machine.succeed("getent hosts ::1 | awk '{print $2}'").strip()
+
+        # 127.0.0.2 should resolve back to the FQDN and hostname:
+        fqdn_and_host_name = "${optionalString (domain != null) "${hostName}.${domain} "}${hostName}"
+        assert (
+            fqdn_and_host_name
+            == machine.succeed("getent hosts 127.0.0.2 | awk '{print $2,$3}'").strip()
+        )
+      '';
+    };
+
+in
+{
+  noExplicitDomain = makeHostNameTest "ahost" null null;
+
+  explicitDomain = makeHostNameTest "ahost" "adomain" "ahost.adomain";
+}
diff --git a/nixpkgs/nixos/tests/hound.nix b/nixpkgs/nixos/tests/hound.nix
new file mode 100644
index 000000000000..a9b036deb0dd
--- /dev/null
+++ b/nixpkgs/nixos/tests/hound.nix
@@ -0,0 +1,59 @@
+# Test whether `houndd` indexes nixpkgs
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "hound";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ grahamc ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    services.hound = {
+      enable = true;
+      config = ''
+        {
+          "max-concurrent-indexers": 1,
+          "dbpath": "/var/lib/hound/data",
+          "repos": {
+            "nix": {
+              "url": "file:///var/lib/hound/my-git"
+            }
+          }
+        }
+      '';
+    };
+
+    systemd.services.houndseed = {
+      description = "seed hound with a git repo";
+      requiredBy = [ "hound.service" ];
+      before = [ "hound.service" ];
+
+      serviceConfig = {
+        User = "hound";
+        Group = "hound";
+        WorkingDirectory = "/var/lib/hound";
+      };
+      path = [ pkgs.git ];
+      script = ''
+        git config --global user.email "you@example.com"
+        git config --global user.name "Your Name"
+        git init my-git --bare
+        git init my-git-clone
+        cd my-git-clone
+        echo 'hi nix!' > hello
+        git add hello
+        git commit -m "hello there :)"
+        git remote add origin /var/lib/hound/my-git
+        git push origin master
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("network.target")
+    machine.wait_for_unit("hound.service")
+    machine.wait_for_open_port(6080)
+    machine.wait_until_succeeds(
+        "curl -f http://127.0.0.1:6080/api/v1/search\?stats\=fosho\&repos\=\*\&rng=%3A20\&q\=hi\&files\=\&i=nope | grep 'Filename' | grep 'hello'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/hydra/common.nix b/nixpkgs/nixos/tests/hydra/common.nix
new file mode 100644
index 000000000000..2bce03418e1f
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/common.nix
@@ -0,0 +1,48 @@
+{ system, ... }:
+{
+  baseConfig = { pkgs, ... }: let
+    trivialJob = pkgs.writeTextDir "trivial.nix" ''
+     { trivial = builtins.derivation {
+         name = "trivial";
+         system = "${system}";
+         builder = "/bin/sh";
+         allowSubstitutes = false;
+         preferLocalBuild = true;
+         args = ["-c" "echo success > $out; exit 0"];
+       };
+     }
+    '';
+
+    createTrivialProject = pkgs.stdenv.mkDerivation {
+      name = "create-trivial-project";
+      dontUnpack = true;
+      nativeBuildInputs = [ pkgs.makeWrapper ];
+      installPhase = "install -m755 -D ${./create-trivial-project.sh} $out/bin/create-trivial-project.sh";
+      postFixup = ''
+        wrapProgram "$out/bin/create-trivial-project.sh" --prefix PATH ":" ${pkgs.lib.makeBinPath [ pkgs.curl ]} --set EXPR_PATH ${trivialJob}
+      '';
+    };
+  in {
+    virtualisation.memorySize = 2048;
+    time.timeZone = "UTC";
+    environment.systemPackages = [ createTrivialProject pkgs.jq ];
+    services.hydra = {
+      enable = true;
+      # Hydra needs those settings to start up, so we add something not harmfull.
+      hydraURL = "example.com";
+      notificationSender = "example@example.com";
+      extraConfig = ''
+        email_notification = 1
+      '';
+    };
+    services.postfix.enable = true;
+    nix = {
+      distributedBuilds = true;
+      buildMachines = [{
+        hostName = "localhost";
+        systems = [ system ];
+      }];
+      settings.substituters = [];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/hydra/create-trivial-project.sh b/nixpkgs/nixos/tests/hydra/create-trivial-project.sh
new file mode 100755
index 000000000000..5aae2d5bf90d
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/create-trivial-project.sh
@@ -0,0 +1,59 @@
+#!/usr/bin/env bash
+#
+# This script creates a project, a jobset with an input of type local
+# path. This local path is a directory that contains a Nix expression
+# to define a job.
+# The EXPR-PATH environment variable must be set with the local path.
+
+set -e
+
+URL=http://localhost:3000
+USERNAME="admin"
+PASSWORD="admin"
+PROJECT_NAME="trivial"
+JOBSET_NAME="trivial"
+EXPR_PATH=${EXPR_PATH:-}
+
+if [ -z $EXPR_PATH ]; then
+   echo "Environment variable EXPR_PATH must be set"
+   exit 1
+fi
+
+mycurl() {
+  curl --referer $URL -H "Accept: application/json" -H "Content-Type: application/json" $@
+}
+
+cat >data.json <<EOF
+{ "username": "$USERNAME", "password": "$PASSWORD" }
+EOF
+mycurl -X POST -d '@data.json' $URL/login -c hydra-cookie.txt
+
+cat >data.json <<EOF
+{
+  "displayname":"Trivial",
+  "enabled":"1",
+  "visible":"1"
+}
+EOF
+mycurl --silent -X PUT $URL/project/$PROJECT_NAME -d @data.json -b hydra-cookie.txt
+
+cat >data.json <<EOF
+{
+  "description": "Trivial",
+  "checkinterval": "60",
+  "enabled": "1",
+  "visible": "1",
+  "keepnr": "1",
+  "enableemail": true,
+  "emailoverride": "hydra@localhost",
+  "nixexprinput": "trivial",
+  "nixexprpath": "trivial.nix",
+  "inputs": {
+    "trivial": {
+      "value": "$EXPR_PATH",
+      "type": "path"
+    }
+  }
+}
+EOF
+mycurl --silent -X PUT $URL/jobset/$PROJECT_NAME/$JOBSET_NAME -d @data.json -b hydra-cookie.txt
diff --git a/nixpkgs/nixos/tests/hydra/default.nix b/nixpkgs/nixos/tests/hydra/default.nix
new file mode 100644
index 000000000000..98c3c6fbae9f
--- /dev/null
+++ b/nixpkgs/nixos/tests/hydra/default.nix
@@ -0,0 +1,59 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  inherit (import ./common.nix { inherit system; }) baseConfig;
+
+  hydraPkgs = {
+    inherit (pkgs) hydra_unstable;
+  };
+
+  makeHydraTest = with pkgs.lib; name: package: makeTest {
+    name = "hydra-${name}";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ lewo ];
+    };
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ baseConfig ];
+      services.hydra = { inherit package; };
+    };
+
+    testScript = ''
+      # let the system boot up
+      machine.wait_for_unit("multi-user.target")
+      # test whether the database is running
+      machine.wait_for_unit("postgresql.service")
+      # test whether the actual hydra daemons are running
+      machine.wait_for_unit("hydra-init.service")
+      machine.require_unit_state("hydra-queue-runner.service")
+      machine.require_unit_state("hydra-evaluator.service")
+      machine.require_unit_state("hydra-notify.service")
+
+      machine.succeed("hydra-create-user admin --role admin --password admin")
+
+      # create a project with a trivial job
+      machine.wait_for_open_port(3000)
+
+      # make sure the build as been successfully built
+      machine.succeed("create-trivial-project.sh")
+
+      machine.wait_until_succeeds(
+          'curl -L -s http://localhost:3000/build/1 -H "Accept: application/json" |  jq .buildstatus | xargs test 0 -eq'
+      )
+
+      machine.wait_until_succeeds(
+          'journalctl -eu hydra-notify.service -o cat | grep -q "sending mail notification to hydra@localhost"'
+      )
+    '';
+  };
+
+in
+
+mapAttrs makeHydraTest hydraPkgs
diff --git a/nixpkgs/nixos/tests/i3wm.nix b/nixpkgs/nixos/tests/i3wm.nix
new file mode 100644
index 000000000000..b216650d8192
--- /dev/null
+++ b/nixpkgs/nixos/tests/i3wm.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "i3wm";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ aszlig ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+i3";
+    services.xserver.windowManager.i3.enable = true;
+  };
+
+  testScript = { ... }: ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we get first configuration window"):
+        machine.wait_for_window(r".*?first configuration.*?")
+        machine.sleep(2)
+        machine.screenshot("started")
+
+    with subtest("ensure we generate and save a config"):
+        # press return to indicate we want to gen a new config
+        machine.send_key("\n")
+        machine.sleep(2)
+        machine.screenshot("preconfig")
+        # press alt then return to indicate we want to use alt as our Mod key
+        machine.send_key("alt")
+        machine.send_key("\n")
+        machine.sleep(2)
+        # make sure the config file is created before we continue
+        machine.wait_for_file("/home/alice/.config/i3/config")
+        machine.screenshot("postconfig")
+        machine.sleep(2)
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("alt-ret")
+        machine.sleep(2)
+        machine.wait_for_window(r"alice.*?machine")
+        machine.sleep(2)
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/icingaweb2.nix b/nixpkgs/nixos/tests/icingaweb2.nix
new file mode 100644
index 000000000000..e631e667bd50
--- /dev/null
+++ b/nixpkgs/nixos/tests/icingaweb2.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "icingaweb2";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ das_j ];
+  };
+
+  nodes = {
+    icingaweb2 = { config, pkgs, ... }: {
+      services.icingaweb2 = {
+        enable = true;
+
+        modulePackages = with pkgs.icingaweb2Modules; {
+          particles = theme-particles;
+          spring = theme-spring;
+        };
+
+        modules = {
+          doc.enable = true;
+          migrate.enable =  true;
+          setup.enable = true;
+          test.enable = true;
+          translation.enable = true;
+        };
+
+        generalConfig = {
+          global = {
+            module_path = "${pkgs.icingaweb2}/modules";
+          };
+        };
+
+        authentications = {
+          icingaweb = {
+            backend = "external";
+          };
+        };
+
+        groupBackends = {
+          icingaweb = {
+            backend = "db";
+            resource = "icingaweb_db";
+          };
+        };
+
+        resources = {
+          # Not used, so no DB server needed
+          icingaweb_db = {
+            type = "db";
+            db = "mysql";
+            host = "localhost";
+            username = "icingaweb2";
+            password = "icingaweb2";
+            dbname = "icingaweb2";
+          };
+        };
+
+        roles = {
+          Administrators = {
+            users = "*";
+            permissions = "*";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    icingaweb2.wait_for_unit("multi-user.target")
+    icingaweb2.succeed("curl -sSf http://icingaweb2/authentication/login")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/iftop.nix b/nixpkgs/nixos/tests/iftop.nix
new file mode 100644
index 000000000000..933f115a8a5a
--- /dev/null
+++ b/nixpkgs/nixos/tests/iftop.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "iftop";
+  meta.maintainers = with lib.maintainers; [ ma27 ];
+
+  nodes = {
+    withIftop = {
+      imports = [ ./common/user-account.nix ];
+      programs.iftop.enable = true;
+    };
+    withoutIftop = {
+      imports = [ ./common/user-account.nix ];
+      environment.systemPackages = [ pkgs.iftop ];
+    };
+  };
+
+  testScript = ''
+    with subtest("machine with iftop enabled"):
+        withIftop.wait_for_unit("default.target")
+        # limit to eth1 (eth0 is the test driver's control interface)
+        # and don't try name lookups
+        withIftop.succeed("su -l alice -c 'iftop -t -s 1 -n -i eth1'")
+
+    with subtest("machine without iftop"):
+        withoutIftop.wait_for_unit("default.target")
+        # check that iftop is there but user alice lacks capabilitie
+        withoutIftop.succeed("iftop -t -s 1 -n -i eth1")
+        withoutIftop.fail("su -l alice -c 'iftop -t -s 1 -n -i eth1'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/image-contents.nix b/nixpkgs/nixos/tests/image-contents.nix
new file mode 100644
index 000000000000..858f7d8c68f4
--- /dev/null
+++ b/nixpkgs/nixos/tests/image-contents.nix
@@ -0,0 +1,62 @@
+# Tests the contents attribute of nixos/lib/make-disk-image.nix
+# including its user, group, and mode attributes.
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  config = (import ../lib/eval-config.nix {
+    inherit system;
+    modules = [
+      ../modules/testing/test-instrumentation.nix
+      ../modules/profiles/qemu-guest.nix
+      {
+        fileSystems."/".device = "/dev/disk/by-label/nixos";
+        boot.loader.grub.device = "/dev/vda";
+        boot.loader.timeout = 0;
+      }
+    ];
+  }).config;
+  image = (import ../lib/make-disk-image.nix {
+    inherit pkgs config;
+    lib = pkgs.lib;
+    format = "qcow2";
+    contents = [
+      {
+        source = pkgs.writeText "testFile" "contents";
+        target = "/testFile";
+        user = "1234";
+        group = "5678";
+        mode = "755";
+      }
+      {
+        source = ./.;
+        target = "/testDir";
+      }
+    ];
+  }) + "/nixos.qcow2";
+
+in makeEc2Test {
+  name = "image-contents";
+  inherit image;
+  userData = null;
+  script = ''
+    machine.start()
+    # Test that if contents includes a file, it is copied to the target.
+    assert "content" in machine.succeed("cat /testFile")
+    fileDetails = machine.succeed("ls -l /testFile")
+    assert "1234" in fileDetails
+    assert "5678" in fileDetails
+    assert "rwxr-xr-x" in fileDetails
+
+    # Test that if contents includes a directory, it is copied to the target.
+    dirList = machine.succeed("ls /testDir")
+    assert "image-contents.nix" in dirList
+  '';
+}
diff --git a/nixpkgs/nixos/tests/incron.nix b/nixpkgs/nixos/tests/incron.nix
new file mode 100644
index 000000000000..c978ff27dfad
--- /dev/null
+++ b/nixpkgs/nixos/tests/incron.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "incron";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  nodes.machine =
+    { ... }:
+    { services.incron.enable = true;
+      services.incron.extraPackages = [ pkgs.coreutils ];
+      services.incron.systab = ''
+        /test IN_CREATE,IN_MODIFY,IN_CLOSE_WRITE,IN_MOVED_FROM,IN_MOVED_TO echo "$@/$# $%" >> /root/incron.log
+      '';
+
+      # ensure the directory to be monitored exists before incron is started
+      system.activationScripts.incronTest = ''
+        mkdir /test
+      '';
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("incron.service")
+
+    machine.succeed("test -d /test")
+    # create some activity for incron to monitor
+    machine.succeed("touch /test/file")
+    machine.succeed("echo foo >> /test/file")
+    machine.succeed("mv /test/file /root")
+    machine.succeed("mv /root/file /test")
+
+    machine.sleep(1)
+
+    # touch /test/file
+    machine.succeed("grep '/test/file IN_CREATE' /root/incron.log")
+
+    # echo foo >> /test/file
+    machine.succeed("grep '/test/file IN_MODIFY' /root/incron.log")
+    machine.succeed("grep '/test/file IN_CLOSE_WRITE' /root/incron.log")
+
+    # mv /test/file /root
+    machine.succeed("grep '/test/file IN_MOVED_FROM' /root/incron.log")
+
+    # mv /root/file /test
+    machine.succeed("grep '/test/file IN_MOVED_TO' /root/incron.log")
+
+    # ensure something unexpected is not present
+    machine.fail("grep 'IN_OPEN' /root/incron.log")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/incus/container.nix b/nixpkgs/nixos/tests/incus/container.nix
new file mode 100644
index 000000000000..79b9e2fbabdc
--- /dev/null
+++ b/nixpkgs/nixos/tests/incus/container.nix
@@ -0,0 +1,77 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+    };
+  };
+
+  container-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+  container-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+in
+{
+  name = "incus-container";
+
+  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = { ... }: {
+    virtualisation = {
+      # Ensure test VM has enough resources for creating and managing guests
+      cores = 2;
+      memorySize = 1024;
+      diskSize = 4096;
+
+      incus.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+        status, _ = machine.execute("incus exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+        return status == 0
+
+    def set_container(config):
+        machine.succeed(f"incus config set container {config}")
+        machine.succeed("incus restart container")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+
+    machine.wait_for_unit("incus.service")
+
+    # no preseed should mean no service
+    machine.fail("systemctl status incus-preseed.service")
+
+    machine.succeed("incus admin init --minimal")
+
+    with subtest("Container image can be imported"):
+        machine.succeed("incus image import ${container-image-metadata}/*/*.tar.xz ${container-image-rootfs}/*/*.tar.xz --alias nixos")
+
+    with subtest("Container can be launched and managed"):
+        machine.succeed("incus launch nixos container")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+        machine.succeed("echo true | incus exec container /run/current-system/sw/bin/bash -")
+
+    with subtest("Container CPU limits can be managed"):
+        set_container("limits.cpu 1")
+        cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+        assert cpuinfo == "1", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 1, got: {cpuinfo}"
+
+        set_container("limits.cpu 2")
+        cpuinfo = machine.succeed("incus exec container grep -- -c ^processor /proc/cpuinfo").strip()
+        assert cpuinfo == "2", f"Wrong number of CPUs reported from /proc/cpuinfo, want: 2, got: {cpuinfo}"
+
+    with subtest("Container memory limits can be managed"):
+        set_container("limits.memory 64MB")
+        meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+        meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+        assert meminfo_bytes == "62500 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '62500 kB', got: '{meminfo_bytes}'"
+
+        set_container("limits.memory 128MB")
+        meminfo = machine.succeed("incus exec container grep -- MemTotal /proc/meminfo").strip()
+        meminfo_bytes = " ".join(meminfo.split(' ')[-2:])
+        assert meminfo_bytes == "125000 kB", f"Wrong amount of memory reported from /proc/meminfo, want: '125000 kB', got: '{meminfo_bytes}'"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/incus/default.nix b/nixpkgs/nixos/tests/incus/default.nix
new file mode 100644
index 000000000000..c88974605e30
--- /dev/null
+++ b/nixpkgs/nixos/tests/incus/default.nix
@@ -0,0 +1,14 @@
+{
+  system ? builtins.currentSystem,
+  config ? { },
+  pkgs ? import ../../.. { inherit system config; },
+  handleTestOn,
+}:
+{
+  container = import ./container.nix { inherit system pkgs; };
+  preseed = import ./preseed.nix { inherit system pkgs; };
+  socket-activated = import ./socket-activated.nix { inherit system pkgs; };
+  virtual-machine = handleTestOn [ "x86_64-linux" ] ./virtual-machine.nix {
+    inherit system pkgs;
+  };
+}
diff --git a/nixpkgs/nixos/tests/incus/preseed.nix b/nixpkgs/nixos/tests/incus/preseed.nix
new file mode 100644
index 000000000000..47b2d0cd6228
--- /dev/null
+++ b/nixpkgs/nixos/tests/incus/preseed.nix
@@ -0,0 +1,60 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "incus-preseed";
+
+  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      incus.enable = true;
+
+      incus.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("incus.service")
+    machine.wait_for_unit("incus-preseed.service")
+
+    with subtest("Verify preseed resources created"):
+      machine.succeed("incus profile show nixostest_default")
+      machine.succeed("incus network info nixostestbr0")
+      machine.succeed("incus storage show nixostest_pool")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/incus/socket-activated.nix b/nixpkgs/nixos/tests/incus/socket-activated.nix
new file mode 100644
index 000000000000..4d25b26a15f5
--- /dev/null
+++ b/nixpkgs/nixos/tests/incus/socket-activated.nix
@@ -0,0 +1,26 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "incus-socket-activated";
+
+  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      incus.enable = true;
+      incus.socketActivation = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("incus.socket")
+
+    # ensure service is not running by default
+    machine.fail("systemctl is-active incus.service")
+    machine.fail("systemctl is-active incus-preseed.service")
+
+    # access the socket and ensure the service starts
+    machine.succeed("incus list")
+    machine.wait_for_unit("incus.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/incus/virtual-machine.nix b/nixpkgs/nixos/tests/incus/virtual-machine.nix
new file mode 100644
index 000000000000..bfa116679d43
--- /dev/null
+++ b/nixpkgs/nixos/tests/incus/virtual-machine.nix
@@ -0,0 +1,55 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+
+      # Our tests require `grep` & friends:
+      environment.systemPackages = with pkgs; [busybox];
+    };
+  };
+
+  vm-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+  vm-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+
+  instance-name = "instance1";
+in
+{
+  name = "incus-virtual-machine";
+
+  meta.maintainers = with lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = {...}: {
+    virtualisation = {
+      # Ensure test VM has enough resources for creating and managing guests
+      cores = 2;
+      memorySize = 1024;
+      diskSize = 4096;
+
+      incus.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("incus exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
+    machine.wait_for_unit("incus.service")
+
+    machine.succeed("incus admin init --minimal")
+
+    with subtest("virtual-machine image can be imported"):
+        machine.succeed("incus image import ${vm-image-metadata}/*/*.tar.xz ${vm-image-disk}/nixos.qcow2 --alias nixos")
+
+    with subtest("virtual-machine can be launched and become available"):
+        machine.succeed("incus launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+
+    with subtest("lxd-agent is started"):
+        machine.succeed("incus exec ${instance-name} systemctl is-active lxd-agent")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/influxdb.nix b/nixpkgs/nixos/tests/influxdb.nix
new file mode 100644
index 000000000000..03026f8404be
--- /dev/null
+++ b/nixpkgs/nixos/tests/influxdb.nix
@@ -0,0 +1,40 @@
+# This test runs influxdb and checks if influxdb is up and running
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "influxdb";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    one = { ... }: {
+      services.influxdb.enable = true;
+      environment.systemPackages = [ pkgs.httpie ];
+    };
+  };
+
+  testScript = ''
+    import shlex
+
+    start_all()
+
+    one.wait_for_unit("influxdb.service")
+
+    # create database
+    one.succeed(
+        "curl -XPOST http://localhost:8086/query --data-urlencode 'q=CREATE DATABASE test'"
+    )
+
+    # write some points and run simple query
+    out = one.succeed(
+        "curl -XPOST 'http://localhost:8086/write?db=test' --data-binary 'cpu_load_short,host=server01,region=us-west value=0.64 1434055562000000000'"
+    )
+
+    qv = "SELECT value FROM cpu_load_short WHERE region='us-west'"
+    cmd = f'curl -GET "http://localhost:8086/query?db=test" --data-urlencode {shlex.quote("q="+ qv)}'
+    out = one.succeed(cmd)
+
+    assert "2015-06-11T20:46:02Z" in out
+    assert "0.64" in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/influxdb2.nix b/nixpkgs/nixos/tests/influxdb2.nix
new file mode 100644
index 000000000000..1631ac1d9408
--- /dev/null
+++ b/nixpkgs/nixos/tests/influxdb2.nix
@@ -0,0 +1,225 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "influxdb2";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    environment.systemPackages = [ pkgs.influxdb2-cli ];
+    # Make sure that the service is restarted immediately if tokens need to be rewritten
+    # without relying on any Restart=on-failure behavior
+    systemd.services.influxdb2.serviceConfig.RestartSec = 6000;
+    services.influxdb2.enable = true;
+    services.influxdb2.provision = {
+      enable = true;
+      initialSetup = {
+        organization = "default";
+        bucket = "default";
+        passwordFile = pkgs.writeText "admin-pw" "ExAmPl3PA55W0rD";
+        tokenFile = pkgs.writeText "admin-token" "verysecureadmintoken";
+      };
+      organizations.someorg = {
+        buckets.somebucket = {};
+        auths.sometoken = {
+          description = "some auth token";
+          readBuckets = ["somebucket"];
+          writeBuckets = ["somebucket"];
+        };
+      };
+      users.someuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+    };
+
+    specialisation.withModifications.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.buckets.somebucket.present = false;
+        organizations.someorg.auths.sometoken.present = false;
+        users.someuser.present = false;
+
+        organizations.myorg = {
+          description = "Myorg description";
+          buckets.mybucket = {
+            description = "Mybucket description";
+          };
+          auths.mytoken = {
+            operator = true;
+            description = "operator token";
+            tokenFile = pkgs.writeText "tmp-tok" "someusertoken";
+          };
+        };
+        users.myuser.passwordFile = pkgs.writeText "tmp-pw" "abcgoiuhaoga";
+      };
+    };
+
+    specialisation.withParentDelete.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.someorg.present = false;
+        # Deleting the parent implies:
+        #organizations.someorg.buckets.somebucket.present = false;
+        #organizations.someorg.auths.sometoken.present = false;
+      };
+    };
+
+    specialisation.withNewTokens.configuration = { ... }: {
+      services.influxdb2.provision = {
+        organizations.default = {
+          auths.operator = {
+            operator = true;
+            description = "new optoken";
+            tokenFile = pkgs.writeText "tmp-tok" "newoptoken";
+          };
+          auths.allaccess = {
+            operator = true;
+            description = "new allaccess";
+            tokenFile = pkgs.writeText "tmp-tok" "newallaccess";
+          };
+          auths.specifics = {
+            description = "new specifics";
+            readPermissions = ["users" "tasks"];
+            writePermissions = ["tasks"];
+            tokenFile = pkgs.writeText "tmp-tok" "newspecificstoken";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+      tokenArg = "--token verysecureadmintoken";
+    in ''
+      def assert_contains(haystack, needle):
+          if needle not in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack)
+              print("---")
+              raise Exception(f"Expected string '{needle}' was not found")
+
+      def assert_lacks(haystack, needle):
+          if needle in haystack:
+              print("The haystack that will cause the following exception is:")
+              print("---")
+              print(haystack, end="")
+              print("---")
+              raise Exception(f"Unexpected string '{needle}' was found")
+
+      machine.wait_for_unit("influxdb2.service")
+
+      machine.fail("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:wrongpassword")
+      machine.succeed("curl --fail -X POST 'http://localhost:8086/api/v2/signin' -u admin:ExAmPl3PA55W0rD")
+
+      out = machine.succeed("influx org list ${tokenArg}")
+      assert_contains(out, "default")
+      assert_lacks(out, "myorg")
+      assert_contains(out, "someorg")
+
+      out = machine.succeed("influx bucket list ${tokenArg} --org default")
+      assert_contains(out, "default")
+
+      machine.fail("influx bucket list ${tokenArg} --org myorg")
+
+      out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+      assert_contains(out, "somebucket")
+
+      out = machine.succeed("influx user list ${tokenArg}")
+      assert_contains(out, "admin")
+      assert_lacks(out, "myuser")
+      assert_contains(out, "someuser")
+
+      out = machine.succeed("influx auth list ${tokenArg}")
+      assert_lacks(out, "operator token")
+      assert_contains(out, "some auth token")
+
+      with subtest("withModifications"):
+        machine.succeed('${specialisations}/withModifications/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_lacks(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_lacks(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+        # Make sure the user token is also usable
+        machine.succeed("influx auth list --token someusertoken")
+
+      with subtest("keepsUnrelated"):
+        machine.succeed('${nodes.machine.system.build.toplevel}/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_contains(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org someorg")
+        assert_contains(out, "somebucket")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+
+      with subtest("withParentDelete"):
+        machine.succeed('${specialisations}/withParentDelete/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx org list ${tokenArg}")
+        assert_contains(out, "default")
+        assert_contains(out, "myorg")
+        assert_lacks(out, "someorg")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org default")
+        assert_contains(out, "default")
+
+        out = machine.succeed("influx bucket list ${tokenArg} --org myorg")
+        assert_contains(out, "mybucket")
+
+        machine.fail("influx bucket list ${tokenArg} --org someorg")
+
+        out = machine.succeed("influx user list ${tokenArg}")
+        assert_contains(out, "admin")
+        assert_contains(out, "myuser")
+        assert_contains(out, "someuser")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_lacks(out, "some auth token")
+
+      with subtest("withNewTokens"):
+        machine.succeed('${specialisations}/withNewTokens/bin/switch-to-configuration test')
+        machine.wait_for_unit("influxdb2.service")
+
+        out = machine.succeed("influx auth list ${tokenArg}")
+        assert_contains(out, "operator token")
+        assert_contains(out, "some auth token")
+        assert_contains(out, "new optoken")
+        assert_contains(out, "new allaccess")
+        assert_contains(out, "new specifics")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-luks-empty-passphrase.nix b/nixpkgs/nixos/tests/initrd-luks-empty-passphrase.nix
new file mode 100644
index 000000000000..a846c120415d
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-luks-empty-passphrase.nix
@@ -0,0 +1,105 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. {inherit system config; }
+, systemdStage1 ? false }:
+import ./make-test-python.nix ({ lib, pkgs, ... }: let
+
+  keyfile = pkgs.writeText "luks-keyfile" ''
+    MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2
+    gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6
+    FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC
+  '';
+
+in {
+  name = "initrd-luks-empty-passphrase";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = lib.optionals (!systemdStage1) [ ./common/auto-format-root-device.nix ];
+
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      useEFIBoot = true;
+      # This requires to have access
+      # to a host Nix store as
+      # the new root device is /dev/vdb
+      # an empty 512MiB drive, containing no Nix store.
+      mountHostNixStore = true;
+      fileSystems."/".autoFormat = lib.mkIf systemdStage1 true;
+    };
+
+    boot.loader.systemd-boot.enable = true;
+    boot.initrd.systemd = lib.mkIf systemdStage1 {
+      enable = true;
+      emergencyAccess = true;
+    };
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+
+    specialisation.boot-luks-wrong-keyfile.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdb";
+          keyFile = "/etc/cryptroot.key";
+          tryEmptyPassphrase = true;
+          fallbackToPassword = !systemdStage1;
+        };
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      boot.initrd.secrets."/etc/cryptroot.key" = keyfile;
+    };
+
+    specialisation.boot-luks-missing-keyfile.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdb";
+          keyFile = "/etc/cryptroot.key";
+          tryEmptyPassphrase = true;
+          fallbackToPassword = !systemdStage1;
+        };
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+    };
+  };
+
+  testScript = ''
+    # Encrypt key with empty key so boot should try keyfile and then fallback to empty passphrase
+
+
+    def grub_select_boot_luks_wrong_key_file():
+        """
+        Selects "boot-luks" from the GRUB menu
+        to trigger a login request.
+        """
+        machine.send_monitor_command("sendkey down")
+        machine.send_monitor_command("sendkey down")
+        machine.send_monitor_command("sendkey ret")
+
+    def grub_select_boot_luks_missing_key_file():
+        """
+        Selects "boot-luks" from the GRUB menu
+        to trigger a login request.
+        """
+        machine.send_monitor_command("sendkey down")
+        machine.send_monitor_command("sendkey ret")
+
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo "" | cryptsetup luksFormat /dev/vdb --batch-mode")
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-wrong-keyfile.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Check if rootfs is on /dev/mapper/cryptroot
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+
+    # Choose boot-luks-missing-keyfile specialisation
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-missing-keyfile.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Check if rootfs is on /dev/mapper/cryptroot
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-network-openvpn/default.nix b/nixpkgs/nixos/tests/initrd-network-openvpn/default.nix
new file mode 100644
index 000000000000..769049905eb8
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-openvpn/default.nix
@@ -0,0 +1,164 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, systemdStage1 ? false
+}:
+
+import ../make-test-python.nix ({ lib, ...}:
+
+{
+  name = "initrd-network-openvpn";
+
+  nodes =
+    let
+
+      # Inlining of the shared secret for the
+      # OpenVPN server and client
+      secretblock = ''
+        secret [inline]
+        <secret>
+        ${lib.readFile ./shared.key}
+        </secret>
+        '';
+
+    in
+    {
+
+      # Minimal test case to check a successful boot, even with invalid config
+      minimalboot =
+        { ... }:
+        {
+          boot.initrd.systemd.enable = systemdStage1;
+          boot.initrd.network = {
+            enable = true;
+            openvpn = {
+              enable = true;
+              configuration = builtins.toFile "initrd.ovpn" "";
+            };
+          };
+        };
+
+      # initrd VPN client
+      ovpnclient =
+        { ... }:
+        {
+          virtualisation.useBootLoader = true;
+          virtualisation.vlans = [ 1 ];
+
+          boot.initrd = {
+            systemd.enable = systemdStage1;
+            systemd.extraBin.nc = "${pkgs.busybox}/bin/nc";
+            systemd.services.nc = {
+              requiredBy = ["initrd.target"];
+              after = ["network.target"];
+              serviceConfig = {
+                ExecStart = "/bin/nc -p 1234 -lke /bin/echo TESTVALUE";
+                Type = "oneshot";
+              };
+            };
+
+            # This command does not fork to keep the VM in the state where
+            # only the initramfs is loaded
+            preLVMCommands =
+            ''
+              /bin/nc -p 1234 -lke /bin/echo TESTVALUE
+            '';
+
+            network = {
+              enable = true;
+
+              # Work around udhcpc only getting a lease on eth0
+              postCommands = ''
+                /bin/ip addr add 192.168.1.2/24 dev eth1
+              '';
+
+              # Example configuration for OpenVPN
+              # This is the main reason for this test
+              openvpn = {
+                enable = true;
+                configuration = "${./initrd.ovpn}";
+              };
+            };
+          };
+        };
+
+      # VPN server and gateway for ovpnclient between vlan 1 and 2
+      ovpnserver =
+        { ... }:
+        {
+          virtualisation.vlans = [ 1 2 ];
+
+          # Enable NAT and forward port 12345 to port 1234
+          networking.nat = {
+            enable = true;
+            internalInterfaces = [ "tun0" ];
+            externalInterface = "eth2";
+            forwardPorts = [ { destination = "10.8.0.2:1234";
+                               sourcePort = 12345; } ];
+          };
+
+          # Trust tun0 and allow the VPN Server to be reached
+          networking.firewall = {
+            trustedInterfaces = [ "tun0" ];
+            allowedUDPPorts = [ 1194 ];
+          };
+
+          # Minimal OpenVPN server configuration
+          services.openvpn.servers.testserver =
+          {
+            config = ''
+              dev tun0
+              ifconfig 10.8.0.1 10.8.0.2
+              cipher AES-256-CBC
+              ${secretblock}
+            '';
+          };
+        };
+
+      # Client that resides in the "external" VLAN
+      testclient =
+        { ... }:
+        {
+          virtualisation.vlans = [ 2 ];
+        };
+  };
+
+
+  testScript =
+    ''
+      # Minimal test case, checks whether enabling (with invalid config) harms
+      # the boot process
+      with subtest("Check for successful boot with broken openvpn config"):
+          minimalboot.start()
+          # If we get to multi-user.target, we booted successfully
+          minimalboot.wait_for_unit("multi-user.target")
+          minimalboot.shutdown()
+
+      # Elaborated test case where the ovpnclient (where this module is used)
+      # can be reached by testclient only over ovpnserver.
+      # This is an indirect test for success.
+      with subtest("Check for connection from initrd VPN client, config as file"):
+          ovpnserver.start()
+          testclient.start()
+          ovpnclient.start()
+
+          # Wait until the OpenVPN Server is available
+          ovpnserver.wait_for_unit("openvpn-testserver.service")
+          ovpnserver.succeed("ping -c 1 10.8.0.1")
+
+          # Wait for the client to connect
+          ovpnserver.wait_until_succeeds("ping -c 1 10.8.0.2")
+
+          # Wait until the testclient has network
+          testclient.wait_for_unit("network.target")
+
+          # Check that ovpnclient is reachable over vlan 1
+          ovpnserver.succeed("nc -w 2 192.168.1.2 1234 | grep -q TESTVALUE")
+
+          # Check that ovpnclient is reachable over tun0
+          ovpnserver.succeed("nc -w 2 10.8.0.2 1234 | grep -q TESTVALUE")
+
+          # Check that ovpnclient is reachable from testclient over the gateway
+          testclient.succeed("nc -w 2 192.168.2.3 12345 | grep -q TESTVALUE")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-network-openvpn/initrd.ovpn b/nixpkgs/nixos/tests/initrd-network-openvpn/initrd.ovpn
new file mode 100644
index 000000000000..3ada4130e868
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-openvpn/initrd.ovpn
@@ -0,0 +1,30 @@
+remote 192.168.1.3
+dev tun
+ifconfig 10.8.0.2 10.8.0.1
+# Only force VLAN 2 through the VPN
+route 192.168.2.0 255.255.255.0 10.8.0.1
+cipher AES-256-CBC
+secret [inline]
+<secret>
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+553aabe853acdfe51cd6fcfea93dcbb0
+c8797deadd1187606b1ea8f2315eb5e6
+67c0d7e830f50df45686063b189d6c6b
+aab8bb3430cc78f7bb1f78628d5c3742
+0cef4f53a5acab2894905f4499f95d8e
+e69b7b6748b17016f89e19e91481a9fd
+bf8c10651f41a1d4fdf5f438925a6733
+13cec8f04701eb47b8f7ffc48bc3d7af
+65f07bce766015b87c3db4d668c655ff
+be5a69522a8e60ccb217f8521681b45d
+27c0b70bdfbfbb426c7646d80adf7482
+3ddac58b25cb1c1bb100de974478b4c6
+8b45a94261a2405e99810cb2b3abd49f
+21b3198ada87ff3c4e656a008e540a8d
+e7811584363597599cce2040a68ac00e
+f2125540e0f7f4adc37cb3f0d922eeb7
+-----END OpenVPN Static key V1-----
+</secret>
diff --git a/nixpkgs/nixos/tests/initrd-network-openvpn/shared.key b/nixpkgs/nixos/tests/initrd-network-openvpn/shared.key
new file mode 100644
index 000000000000..248a91a3e3d5
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-openvpn/shared.key
@@ -0,0 +1,21 @@
+#
+# 2048 bit OpenVPN static key
+#
+-----BEGIN OpenVPN Static key V1-----
+553aabe853acdfe51cd6fcfea93dcbb0
+c8797deadd1187606b1ea8f2315eb5e6
+67c0d7e830f50df45686063b189d6c6b
+aab8bb3430cc78f7bb1f78628d5c3742
+0cef4f53a5acab2894905f4499f95d8e
+e69b7b6748b17016f89e19e91481a9fd
+bf8c10651f41a1d4fdf5f438925a6733
+13cec8f04701eb47b8f7ffc48bc3d7af
+65f07bce766015b87c3db4d668c655ff
+be5a69522a8e60ccb217f8521681b45d
+27c0b70bdfbfbb426c7646d80adf7482
+3ddac58b25cb1c1bb100de974478b4c6
+8b45a94261a2405e99810cb2b3abd49f
+21b3198ada87ff3c4e656a008e540a8d
+e7811584363597599cce2040a68ac00e
+f2125540e0f7f4adc37cb3f0d922eeb7
+-----END OpenVPN Static key V1-----
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/default.nix b/nixpkgs/nixos/tests/initrd-network-ssh/default.nix
new file mode 100644
index 000000000000..17b6c21ff1e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/default.nix
@@ -0,0 +1,73 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "initrd-network-ssh";
+  meta.maintainers = with lib.maintainers; [ willibutz emily ];
+
+  nodes = {
+    server =
+      { config, ... }:
+      {
+        boot.kernelParams = [
+          "ip=${config.networking.primaryIPAddress}:::255.255.255.0::eth1:none"
+        ];
+        boot.initrd.network = {
+          enable = true;
+          ssh = {
+            enable = true;
+            authorizedKeys = [ (lib.readFile ./id_ed25519.pub) ];
+            port = 22;
+            hostKeys = [ ./ssh_host_ed25519_key ];
+          };
+        };
+        boot.initrd.preLVMCommands = ''
+          while true; do
+            if [ -f fnord ]; then
+              poweroff
+            fi
+            sleep 1
+          done
+        '';
+      };
+
+    client =
+      { config, ... }:
+      {
+        environment.etc = {
+          knownHosts = {
+            text = lib.concatStrings [
+              "server,"
+              "${toString (lib.head (lib.splitString " " (
+                toString (lib.elemAt (lib.splitString "\n" config.networking.extraHosts) 2)
+              )))} "
+              "${lib.readFile ./ssh_host_ed25519_key.pub}"
+            ];
+          };
+          sshKey = {
+            source = ./id_ed25519;
+            mode = "0600";
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    client.wait_for_unit("network.target")
+
+
+    def ssh_is_up(_) -> bool:
+        status, _ = client.execute("nc -z server 22")
+        return status == 0
+
+
+    with client.nested("waiting for SSH server to come up"):
+        retry(ssh_is_up)
+
+
+    client.succeed(
+        "ssh -i /etc/sshKey -o UserKnownHostsFile=/etc/knownHosts server 'touch /fnord'"
+    )
+    client.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix b/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix
new file mode 100644
index 000000000000..3d7978890ab0
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/generate-keys.nix
@@ -0,0 +1,10 @@
+with import ../../.. {};
+
+runCommand "gen-keys" {
+    buildInputs = [ openssh ];
+  }
+  ''
+    mkdir $out
+    ssh-keygen -q -t ed25519 -N "" -f $out/ssh_host_ed25519_key
+    ssh-keygen -q -t ed25519 -N "" -f $out/id_ed25519
+  ''
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519 b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519
new file mode 100644
index 000000000000..f914b3f712fc
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACAVcX+32Yqig25RxRA8bel/f604wV0p/63um+Oku/3vfwAAAJi/AJZMvwCW
+TAAAAAtzc2gtZWQyNTUxOQAAACAVcX+32Yqig25RxRA8bel/f604wV0p/63um+Oku/3vfw
+AAAEAPLjQusjrB90Lk3996G3AbtTeK+XweNgxaegYnml/A/RVxf7fZiqKDblHFEDxt6X9/
+rTjBXSn/re6b46S7/e9/AAAAEG5peGJsZEBsb2NhbGhvc3QBAgMEBQ==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub
new file mode 100644
index 000000000000..40de4a8ac602
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIBVxf7fZiqKDblHFEDxt6X9/rTjBXSn/re6b46S7/e9/ nixbld@localhost
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key
new file mode 100644
index 000000000000..f1e29459b7a3
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDP9Mz6qlxdQqA4omrgbOlVsxSGONCJstjW9zqquajlIAAAAJg0WGFGNFhh
+RgAAAAtzc2gtZWQyNTUxOQAAACDP9Mz6qlxdQqA4omrgbOlVsxSGONCJstjW9zqquajlIA
+AAAEA0Hjs7LfFPdTf3ThGx6GNKvX0ItgzgXs91Z3oGIaF6S8/0zPqqXF1CoDiiauBs6VWz
+FIY40Imy2Nb3Oqq5qOUgAAAAEG5peGJsZEBsb2NhbGhvc3QBAgMEBQ==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub
new file mode 100644
index 000000000000..3aa1587e1dce
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network-ssh/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIM/0zPqqXF1CoDiiauBs6VWzFIY40Imy2Nb3Oqq5qOUg nixbld@localhost
diff --git a/nixpkgs/nixos/tests/initrd-network.nix b/nixpkgs/nixos/tests/initrd-network.nix
new file mode 100644
index 000000000000..f2483b7393de
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-network.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "initrd-network";
+
+  meta.maintainers = [ pkgs.lib.maintainers.eelco ];
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    boot.initrd.network.enable = true;
+    boot.initrd.network.postCommands =
+      ''
+        ip addr show
+        ip route show
+        ip addr | grep 10.0.2.15 || exit 1
+        ping -c1 10.0.2.2 || exit 1
+      '';
+    # Check if cleanup was done correctly
+    boot.initrd.postMountCommands = lib.mkAfter
+      ''
+        ip addr show
+        ip route show
+        ip addr | grep 10.0.2.15 && exit 1
+        ping -c1 10.0.2.2 && exit 1
+      '';
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("ip addr show >&2")
+      machine.succeed("ip route show >&2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/initrd-secrets-changing.nix b/nixpkgs/nixos/tests/initrd-secrets-changing.nix
new file mode 100644
index 000000000000..d6f9ef9ced83
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-secrets-changing.nix
@@ -0,0 +1,57 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+, testing ? import ../lib/testing-python.nix { inherit system pkgs; }
+}:
+
+let
+  secret1InStore = pkgs.writeText "topsecret" "iamasecret1";
+  secret2InStore = pkgs.writeText "topsecret" "iamasecret2";
+in
+
+testing.makeTest {
+  name = "initrd-secrets-changing";
+
+  nodes.machine = { ... }: {
+    virtualisation.useBootLoader = true;
+
+    boot.loader.grub.device = "/dev/vda";
+
+    boot.initrd.secrets = {
+      "/test" = secret1InStore;
+      "/run/keys/test" = secret1InStore;
+    };
+    boot.initrd.postMountCommands = "cp /test /mnt-root/secret-from-initramfs";
+
+    specialisation.secrets2System.configuration = {
+      boot.initrd.secrets = lib.mkForce {
+        "/test" = secret2InStore;
+        "/run/keys/test" = secret2InStore;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("multi-user.target")
+    print(machine.succeed("cat /run/keys/test"))
+    machine.succeed(
+        "cmp ${secret1InStore} /secret-from-initramfs",
+        "cmp ${secret1InStore} /run/keys/test",
+    )
+    # Select the second boot entry corresponding to the specialisation secrets2System.
+    machine.succeed("grub-reboot 1")
+    machine.shutdown()
+
+    with subtest("Check that the specialisation's secrets are distinct despite identical kernels"):
+        machine.wait_for_unit("multi-user.target")
+        print(machine.succeed("cat /run/keys/test"))
+        machine.succeed(
+            "cmp ${secret2InStore} /secret-from-initramfs",
+            "cmp ${secret2InStore} /run/keys/test",
+        )
+        machine.shutdown()
+  '';
+}
diff --git a/nixpkgs/nixos/tests/initrd-secrets.nix b/nixpkgs/nixos/tests/initrd-secrets.nix
new file mode 100644
index 000000000000..0f3f83b0904e
--- /dev/null
+++ b/nixpkgs/nixos/tests/initrd-secrets.nix
@@ -0,0 +1,41 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+, testing ? import ../lib/testing-python.nix { inherit system pkgs; }
+}:
+let
+  secretInStore = pkgs.writeText "topsecret" "iamasecret";
+  testWithCompressor = compressor: testing.makeTest {
+    name = "initrd-secrets-${compressor}";
+
+    meta.maintainers = [ lib.maintainers.lheckemann ];
+
+    nodes.machine = { ... }: {
+      virtualisation.useBootLoader = true;
+      boot.initrd.secrets = {
+        "/test" = secretInStore;
+
+        # This should *not* need to be copied in postMountCommands
+        "/run/keys/test" = secretInStore;
+      };
+      boot.initrd.postMountCommands = ''
+        cp /test /mnt-root/secret-from-initramfs
+      '';
+      boot.initrd.compressor = compressor;
+      # zstd compression is only supported from 5.9 onwards. Remove when 5.10 becomes default.
+      boot.kernelPackages = pkgs.linuxPackages_latest;
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed(
+          "cmp ${secretInStore} /secret-from-initramfs",
+          "cmp ${secretInStore} /run/keys/test",
+      )
+    '';
+  };
+in lib.flip lib.genAttrs testWithCompressor [
+  "cat" "gzip" "bzip2" "xz" "lzma" "lzop" "pigz" "pixz" "zstd"
+]
diff --git a/nixpkgs/nixos/tests/input-remapper.nix b/nixpkgs/nixos/tests/input-remapper.nix
new file mode 100644
index 000000000000..1b0350063f7f
--- /dev/null
+++ b/nixpkgs/nixos/tests/input-remapper.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+  {
+    name = "input-remapper";
+    meta = {
+      maintainers = with pkgs.lib.maintainers; [ LunNova ];
+    };
+
+    nodes.machine = { config, ... }:
+      let user = config.users.users.sybil; in
+      {
+        imports = [
+          ./common/user-account.nix
+          ./common/x11.nix
+        ];
+
+        services.xserver.enable = true;
+        services.input-remapper.enable = true;
+        users.users.sybil = { isNormalUser = true; group = "wheel"; };
+        test-support.displayManager.auto.user = user.name;
+        # workaround for pkexec not working in the test environment
+        # Error creating textual authentication agent:
+        #   Error opening current controlling terminal for the process (`/dev/tty'):
+        #   No such device or address
+        # passwordless pkexec with polkit module also doesn't work
+        # to allow the program to run, we replace pkexec with sudo
+        # and turn on passwordless sudo
+        # this is not correct in general but good enough for this test
+        security.sudo = { enable = true; wheelNeedsPassword = false; };
+        security.wrappers.pkexec = pkgs.lib.mkForce
+          {
+            setuid = true;
+            owner = "root";
+            group = "root";
+            source = "${pkgs.sudo}/bin/sudo";
+          };
+      };
+
+    enableOCR = true;
+
+    testScript = { nodes, ... }: ''
+      start_all()
+      machine.wait_for_x()
+
+      machine.succeed("systemctl status input-remapper.service")
+      machine.execute("su - sybil -c input-remapper-gtk >&2 &")
+
+      machine.wait_for_text("Input Remapper")
+      machine.wait_for_text("Preset")
+      machine.wait_for_text("Change Key")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/inspircd.nix b/nixpkgs/nixos/tests/inspircd.nix
new file mode 100644
index 000000000000..f4d82054011c
--- /dev/null
+++ b/nixpkgs/nixos/tests/inspircd.nix
@@ -0,0 +1,93 @@
+let
+  clients = [
+    "ircclient1"
+    "ircclient2"
+  ];
+  server = "inspircd";
+  ircPort = 6667;
+  channel = "nixos-cat";
+  iiDir = "/tmp/irc";
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "inspircd";
+  nodes = {
+    "${server}" = {
+      networking.firewall.allowedTCPPorts = [ ircPort ];
+      services.inspircd = {
+        enable = true;
+        package = pkgs.inspircdMinimal;
+        config = ''
+          <bind address="" port="${toString ircPort}" type="clients">
+          <connect name="main" allow="*" pingfreq="15">
+        '';
+      };
+    };
+  } // lib.listToAttrs (builtins.map (client: lib.nameValuePair client {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    systemd.services.ii = {
+      requires = [ "network.target" ];
+      wantedBy = [ "default.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecPreStartPre = "mkdir -p ${iiDir}";
+        ExecStart = ''
+          ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
+        '';
+        User = "alice";
+      };
+    };
+  }) clients);
+
+  testScript =
+    let
+      msg = client: "Hello, my name is ${client}";
+      clientScript = client: [
+        ''
+          ${client}.wait_for_unit("network.target")
+          ${client}.systemctl("start ii")
+          ${client}.wait_for_unit("ii")
+          ${client}.wait_for_file("${iiDir}/${server}/out")
+        ''
+        # wait until first PING from server arrives before joining,
+        # so we don't try it too early
+        ''
+          ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
+        ''
+        # join ${channel}
+        ''
+          ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
+          ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
+        ''
+        # send a greeting
+        ''
+          ${client}.succeed(
+              "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
+          )
+        ''
+        # check that all greetings arrived on all clients
+      ] ++ builtins.map (other: ''
+        ${client}.succeed(
+            "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
+        )
+      '') clients;
+
+      # foldl', but requires a non-empty list instead of a start value
+      reduce = f: list:
+        builtins.foldl' f (builtins.head list) (builtins.tail list);
+    in ''
+      start_all()
+      ${server}.wait_for_open_port(${toString ircPort})
+
+      # run clientScript for all clients so that every list
+      # entry is executed by every client before advancing
+      # to the next one.
+    '' + lib.concatStrings
+      (reduce
+        (lib.zipListsWith (cs: c: cs + c))
+        (builtins.map clientScript clients));
+})
diff --git a/nixpkgs/nixos/tests/installed-tests/appstream-qt.nix b/nixpkgs/nixos/tests/installed-tests/appstream-qt.nix
new file mode 100644
index 000000000000..d08187bfe466
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/appstream-qt.nix
@@ -0,0 +1,9 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libsForQt5.appstream-qt;
+
+  testConfig = {
+    appstream.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/appstream.nix b/nixpkgs/nixos/tests/installed-tests/appstream.nix
new file mode 100644
index 000000000000..f71a095d4452
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/appstream.nix
@@ -0,0 +1,9 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.appstream;
+
+  testConfig = {
+    appstream.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/colord.nix b/nixpkgs/nixos/tests/installed-tests/colord.nix
new file mode 100644
index 000000000000..77e6b917fe68
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/colord.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.colord;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/default.nix b/nixpkgs/nixos/tests/installed-tests/default.nix
new file mode 100644
index 000000000000..e87edb2007e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/default.nix
@@ -0,0 +1,112 @@
+# NixOS tests for gnome-desktop-testing-runner using software
+# See https://wiki.gnome.org/Initiatives/GnomeGoals/InstalledTests
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  callInstalledTest = pkgs.newScope { inherit makeInstalledTest; };
+
+  makeInstalledTest =
+    { # Package to test. Needs to have an installedTests output
+      tested
+
+      # Config to inject into machine
+    , testConfig ? {}
+
+      # Test script snippet to inject before gnome-desktop-testing-runner begins.
+      # This is useful for extra setup the environment may need before the runner begins.
+    , preTestScript ? ""
+
+      # Does test need X11?
+    , withX11 ? false
+
+      # Extra flags to pass to gnome-desktop-testing-runner.
+    , testRunnerFlags ? []
+
+      # Extra attributes to pass to makeTest.
+      # They will be recursively merged into the attrset created by this function.
+    , ...
+    }@args:
+    makeTest
+      (recursiveUpdate
+        rec {
+          name = tested.name;
+
+          meta = {
+            maintainers = tested.meta.maintainers or [];
+          };
+
+          nodes.machine = { ... }: {
+            imports = [
+              testConfig
+            ] ++ optional withX11 ../common/x11.nix;
+
+            environment.systemPackages = with pkgs; [ gnome-desktop-testing ];
+
+            # The installed tests need to be added to the test VM’s closure.
+            # Otherwise, their dependencies might not actually be registered
+            # as valid paths in the VM’s Nix store database,
+            # and `nix-store --query` commands run as part of the tests
+            # (for example when building Flatpak runtimes) will fail.
+            environment.variables.TESTED_PACKAGE_INSTALLED_TESTS = "${tested.installedTests}/share";
+          };
+
+          testScript =
+            optionalString withX11 ''
+              machine.wait_for_x()
+            '' +
+            optionalString (preTestScript != "") ''
+              ${preTestScript}
+            '' +
+            ''
+              machine.succeed(
+                  "gnome-desktop-testing-runner ${escapeShellArgs testRunnerFlags} -d '${tested.installedTests}/share'"
+              )
+            '';
+        }
+
+        (removeAttrs args [
+          "tested"
+          "testConfig"
+          "preTestScript"
+          "withX11"
+          "testRunnerFlags"
+        ])
+      );
+
+in
+
+{
+  appstream = callInstalledTest ./appstream.nix {};
+  appstream-qt = callInstalledTest ./appstream-qt.nix {};
+  colord = callInstalledTest ./colord.nix {};
+  flatpak = callInstalledTest ./flatpak.nix {};
+  flatpak-builder = callInstalledTest ./flatpak-builder.nix {};
+  fwupd = callInstalledTest ./fwupd.nix {};
+  gcab = callInstalledTest ./gcab.nix {};
+  gdk-pixbuf = callInstalledTest ./gdk-pixbuf.nix {};
+  geocode-glib = callInstalledTest ./geocode-glib.nix {};
+  gjs = callInstalledTest ./gjs.nix {};
+  glib-networking = callInstalledTest ./glib-networking.nix {};
+  gnome-photos = callInstalledTest ./gnome-photos.nix {};
+  graphene = callInstalledTest ./graphene.nix {};
+  gsconnect = callInstalledTest ./gsconnect.nix {};
+  json-glib = callInstalledTest ./json-glib.nix {};
+  ibus = callInstalledTest ./ibus.nix {};
+  libgdata = callInstalledTest ./libgdata.nix {};
+  glib-testing = callInstalledTest ./glib-testing.nix {};
+  libjcat = callInstalledTest ./libjcat.nix {};
+  libxmlb = callInstalledTest ./libxmlb.nix {};
+  malcontent = callInstalledTest ./malcontent.nix {};
+  ostree = callInstalledTest ./ostree.nix {};
+  pipewire = callInstalledTest ./pipewire.nix {};
+  upower = callInstalledTest ./upower.nix {};
+  xdg-desktop-portal = callInstalledTest ./xdg-desktop-portal.nix {};
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix b/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix
new file mode 100644
index 000000000000..d5e04fcf975c
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/flatpak-builder.nix
@@ -0,0 +1,15 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.flatpak-builder;
+
+  testConfig = {
+    services.flatpak.enable = true;
+    xdg.portal.enable = true;
+    xdg.portal.extraPortals = with pkgs; [ xdg-desktop-portal-gtk ];
+    environment.systemPackages = with pkgs; [ flatpak-builder ] ++ flatpak-builder.installedTestsDependencies;
+    virtualisation.diskSize = 2048;
+  };
+
+  testRunnerFlags = [ "--timeout" "3600" ];
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/flatpak.nix b/nixpkgs/nixos/tests/installed-tests/flatpak.nix
new file mode 100644
index 000000000000..9524d890c402
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/flatpak.nix
@@ -0,0 +1,17 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.flatpak;
+  withX11 = true;
+
+  testConfig = {
+    xdg.portal.enable = true;
+    xdg.portal.extraPortals = [ pkgs.xdg-desktop-portal-gtk ];
+    services.flatpak.enable = true;
+    environment.systemPackages = with pkgs; [ gnupg ostree python3 ];
+    virtualisation.memorySize = 2047;
+    virtualisation.diskSize = 3072;
+  };
+
+  testRunnerFlags = [ "--timeout" "3600" ];
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/fwupd.nix b/nixpkgs/nixos/tests/installed-tests/fwupd.nix
new file mode 100644
index 000000000000..c095a50dc836
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/fwupd.nix
@@ -0,0 +1,11 @@
+{ pkgs, lib, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.fwupd;
+
+  testConfig = {
+    services.fwupd.enable = true;
+    services.fwupd.daemonSettings.DisabledPlugins = lib.mkForce [ ]; # don't disable test plugin
+    services.fwupd.enableTestRemote = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gcab.nix b/nixpkgs/nixos/tests/installed-tests/gcab.nix
new file mode 100644
index 000000000000..b24cc2e01267
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gcab.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gcab;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix b/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix
new file mode 100644
index 000000000000..110efdbf710f
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gdk-pixbuf.nix
@@ -0,0 +1,13 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gdk-pixbuf;
+
+  testConfig = {
+    # Tests allocate a lot of memory trying to exploit a CVE
+    # but qemu-system-i386 has a 2047M memory limit
+    virtualisation.memorySize = if pkgs.stdenv.isi686 then 2047 else 4096;
+  };
+
+  testRunnerFlags = [ "--timeout" "1800" ];
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/geocode-glib.nix b/nixpkgs/nixos/tests/installed-tests/geocode-glib.nix
new file mode 100644
index 000000000000..fcb38c96ab0f
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/geocode-glib.nix
@@ -0,0 +1,13 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  testConfig = {
+    i18n.supportedLocales = [
+      "en_US.UTF-8/UTF-8"
+      # The tests require this locale available.
+      "en_GB.UTF-8/UTF-8"
+    ];
+  };
+
+  tested = pkgs.geocode-glib;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gjs.nix b/nixpkgs/nixos/tests/installed-tests/gjs.nix
new file mode 100644
index 000000000000..d12487cba249
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gjs.nix
@@ -0,0 +1,12 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gjs;
+  withX11 = true;
+
+  testConfig = {
+    environment.systemPackages = [
+      pkgs.gjs
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/glib-networking.nix b/nixpkgs/nixos/tests/installed-tests/glib-networking.nix
new file mode 100644
index 000000000000..b58d4df21fca
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/glib-networking.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.glib-networking;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/glib-testing.nix b/nixpkgs/nixos/tests/installed-tests/glib-testing.nix
new file mode 100644
index 000000000000..7a06cf792bdd
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/glib-testing.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.glib-testing;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix b/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix
new file mode 100644
index 000000000000..bcb6479ee89c
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gnome-photos.nix
@@ -0,0 +1,35 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gnome-photos;
+
+  withX11 = true;
+
+  testConfig = {
+    programs.dconf.enable = true;
+    services.gnome.at-spi2-core.enable = true; # needed for dogtail
+    environment.systemPackages = with pkgs; [
+      # gsettings tool with access to gsettings-desktop-schemas
+      (stdenv.mkDerivation {
+        name = "desktop-gsettings";
+        dontUnpack = true;
+        nativeBuildInputs = [ glib wrapGAppsHook ];
+        buildInputs = [ gsettings-desktop-schemas ];
+        installPhase = ''
+          runHook preInstall
+          mkdir -p $out/bin
+          ln -s ${glib.bin}/bin/gsettings $out/bin/desktop-gsettings
+          runHook postInstall
+        '';
+      })
+    ];
+    services.dbus.packages = with pkgs; [ gnome-photos ];
+  };
+
+  preTestScript = ''
+    # dogtail needs accessibility enabled
+    machine.succeed(
+        "desktop-gsettings set org.gnome.desktop.interface toolkit-accessibility true 2>&1"
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/graphene.nix b/nixpkgs/nixos/tests/installed-tests/graphene.nix
new file mode 100644
index 000000000000..e43339abd88c
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/graphene.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.graphene;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/gsconnect.nix b/nixpkgs/nixos/tests/installed-tests/gsconnect.nix
new file mode 100644
index 000000000000..ac39f7435786
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/gsconnect.nix
@@ -0,0 +1,7 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.gnomeExtensions.gsconnect;
+
+  withX11 = true;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/ibus.nix b/nixpkgs/nixos/tests/installed-tests/ibus.nix
new file mode 100644
index 000000000000..028c20c29f2d
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/ibus.nix
@@ -0,0 +1,17 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.ibus;
+
+  testConfig = {
+    i18n.supportedLocales = [ "all" ];
+    i18n.inputMethod.enabled = "ibus";
+    systemd.user.services.ibus-daemon = {
+      serviceConfig.ExecStart = "${pkgs.ibus}/bin/ibus-daemon --xim --verbose";
+      wantedBy = [ "graphical-session.target" ];
+      partOf = [ "graphical-session.target" ];
+    };
+  };
+
+  withX11 = true;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/json-glib.nix b/nixpkgs/nixos/tests/installed-tests/json-glib.nix
new file mode 100644
index 000000000000..3dfd3dd0b098
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/json-glib.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.json-glib;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libgdata.nix b/nixpkgs/nixos/tests/installed-tests/libgdata.nix
new file mode 100644
index 000000000000..b0d39c042be4
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libgdata.nix
@@ -0,0 +1,11 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libgdata;
+
+  testConfig = {
+    # # GLib-GIO-DEBUG: _g_io_module_get_default: Found default implementation dummy (GDummyTlsBackend) for ‘gio-tls-backend’
+    # Bail out! libgdata:ERROR:../gdata/tests/common.c:134:gdata_test_init: assertion failed (child_error == NULL): TLS support is not available (g-tls-error-quark, 0)
+    services.gnome.glib-networking.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libjcat.nix b/nixpkgs/nixos/tests/installed-tests/libjcat.nix
new file mode 100644
index 000000000000..41493a730890
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libjcat.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libjcat;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/libxmlb.nix b/nixpkgs/nixos/tests/installed-tests/libxmlb.nix
new file mode 100644
index 000000000000..af2bbe9c35e2
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/libxmlb.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.libxmlb;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/malcontent.nix b/nixpkgs/nixos/tests/installed-tests/malcontent.nix
new file mode 100644
index 000000000000..d4e214c41988
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/malcontent.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.malcontent;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/ostree.nix b/nixpkgs/nixos/tests/installed-tests/ostree.nix
new file mode 100644
index 000000000000..90e09ad4ddf4
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/ostree.nix
@@ -0,0 +1,12 @@
+{ pkgs, lib, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.ostree;
+
+  testConfig = {
+    environment.systemPackages = with pkgs; [
+      gnupg
+      ostree
+    ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/pipewire.nix b/nixpkgs/nixos/tests/installed-tests/pipewire.nix
new file mode 100644
index 000000000000..6e69ada8612f
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/pipewire.nix
@@ -0,0 +1,5 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.pipewire;
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/upower.nix b/nixpkgs/nixos/tests/installed-tests/upower.nix
new file mode 100644
index 000000000000..a8e777a55527
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/upower.nix
@@ -0,0 +1,9 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.upower;
+
+  testConfig = {
+    services.upower.enable = true;
+  };
+}
diff --git a/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix b/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix
new file mode 100644
index 000000000000..90529d37ee0f
--- /dev/null
+++ b/nixpkgs/nixos/tests/installed-tests/xdg-desktop-portal.nix
@@ -0,0 +1,9 @@
+{ pkgs, makeInstalledTest, ... }:
+
+makeInstalledTest {
+  tested = pkgs.xdg-desktop-portal;
+
+  # Ton of breakage.
+  # https://github.com/flatpak/xdg-desktop-portal/pull/428
+  meta.broken = true;
+}
diff --git a/nixpkgs/nixos/tests/installer-systemd-stage-1.nix b/nixpkgs/nixos/tests/installer-systemd-stage-1.nix
new file mode 100644
index 000000000000..1b4c92b584b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/installer-systemd-stage-1.nix
@@ -0,0 +1,37 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+{
+  # Some of these tests don't work with systemd stage 1 yet. Uncomment
+  # them when fixed.
+  inherit (import ./installer.nix { inherit system config pkgs; systemdStage1 = true; })
+    # bcache
+    bcachefsSimple
+    bcachefsEncrypted
+    btrfsSimple
+    btrfsSubvolDefault
+    btrfsSubvolEscape
+    btrfsSubvols
+    encryptedFSWithKeyfile
+    # grub1
+    luksroot
+    luksroot-format1
+    luksroot-format2
+    # lvm
+    separateBoot
+    separateBootFat
+    simple
+    simpleLabels
+    simpleProvided
+    simpleSpecialised
+    simpleUefiGrub
+    simpleUefiGrubSpecialisation
+    simpleUefiSystemdBoot
+    stratisRoot
+    swraid
+    zfsroot
+    ;
+
+}
diff --git a/nixpkgs/nixos/tests/installer.nix b/nixpkgs/nixos/tests/installer.nix
new file mode 100644
index 000000000000..e9ec28749850
--- /dev/null
+++ b/nixpkgs/nixos/tests/installer.nix
@@ -0,0 +1,1213 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+  systemdStage1 ? false
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+
+  # The configuration to install.
+  makeConfig = { bootLoader, grubDevice, grubIdentifier, grubUseEfi
+               , extraConfig, forceGrubReinstallCount ? 0, flake ? false
+               }:
+    pkgs.writeText "configuration.nix" ''
+      { config, lib, pkgs, modulesPath, ... }:
+
+      { imports =
+          [ ./hardware-configuration.nix
+            ${if flake
+              then "" # Still included, but via installer/flake.nix
+              else "<nixpkgs/nixos/modules/testing/test-instrumentation.nix>"}
+          ];
+
+        networking.hostName = "thatworked";
+
+        documentation.enable = false;
+
+        # To ensure that we can rebuild the grub configuration on the nixos-rebuild
+        system.extraDependencies = with pkgs; [ stdenvNoCC ];
+
+        ${optionalString systemdStage1 "boot.initrd.systemd.enable = true;"}
+
+        ${optionalString (bootLoader == "grub") ''
+          boot.loader.grub.extraConfig = "serial; terminal_output serial";
+          ${if grubUseEfi then ''
+            boot.loader.grub.device = "nodev";
+            boot.loader.grub.efiSupport = true;
+            boot.loader.grub.efiInstallAsRemovable = true; # XXX: needed for OVMF?
+          '' else ''
+            boot.loader.grub.device = "${grubDevice}";
+            boot.loader.grub.fsIdentifier = "${grubIdentifier}";
+          ''}
+
+          boot.loader.grub.configurationLimit = 100 + ${toString forceGrubReinstallCount};
+        ''}
+
+        ${optionalString (bootLoader == "systemd-boot") ''
+          boot.loader.systemd-boot.enable = true;
+        ''}
+
+        boot.initrd.secrets."/etc/secret" = ./secret;
+
+        users.users.alice = {
+          isNormalUser = true;
+          home = "/home/alice";
+          description = "Alice Foobar";
+        };
+
+        hardware.enableAllFirmware = lib.mkForce false;
+
+        ${replaceStrings ["\n"] ["\n  "] extraConfig}
+      }
+    '';
+
+
+  # The test script boots a NixOS VM, installs NixOS on an empty hard
+  # disk, and then reboot from the hard disk.  It's parameterized with
+  # a test script fragment `createPartitions', which must create
+  # partitions and filesystems.
+  testScriptFun = { bootLoader, createPartitions, grubDevice, grubUseEfi, grubIdentifier
+                  , postInstallCommands, preBootCommands, postBootCommands, extraConfig
+                  , testSpecialisationConfig, testFlakeSwitch
+                  }:
+    let iface = "virtio";
+        isEfi = bootLoader == "systemd-boot" || (bootLoader == "grub" && grubUseEfi);
+        bios  = if pkgs.stdenv.isAarch64 then "QEMU_EFI.fd" else "OVMF.fd";
+    in if !isEfi && !pkgs.stdenv.hostPlatform.isx86 then ''
+      machine.succeed("true")
+    '' else ''
+      def assemble_qemu_flags():
+          flags = "-cpu max"
+          ${if (system == "x86_64-linux" || system == "i686-linux")
+            then ''flags += " -m 1024"''
+            else ''flags += " -m 768 -enable-kvm -machine virt,gic-version=host"''
+          }
+          return flags
+
+
+      qemu_flags = {"qemuFlags": assemble_qemu_flags()}
+
+      import os
+
+      image_dir = machine.state_dir
+      disk_image = os.path.join(image_dir, "machine.qcow2")
+
+      hd_flags = {
+          "hdaInterface": "${iface}",
+          "hda": disk_image,
+      }
+      ${optionalString isEfi ''
+        hd_flags.update(
+            bios="${pkgs.OVMF.fd}/FV/${bios}"
+        )''
+      }
+      default_flags = {**hd_flags, **qemu_flags}
+
+
+      def create_machine_named(name):
+          return create_machine({**default_flags, "name": name})
+
+
+      machine.start()
+
+      with subtest("Assert readiness of login prompt"):
+          machine.succeed("echo hello")
+
+      with subtest("Wait for hard disks to appear in /dev"):
+          machine.succeed("udevadm settle")
+
+      ${createPartitions}
+
+      with subtest("Create the NixOS configuration"):
+          machine.succeed("nixos-generate-config --root /mnt")
+          machine.succeed("cat /mnt/etc/nixos/hardware-configuration.nix >&2")
+          machine.copy_from_host(
+              "${ makeConfig {
+                    inherit bootLoader grubDevice grubIdentifier
+                            grubUseEfi extraConfig;
+                  }
+              }",
+              "/mnt/etc/nixos/configuration.nix",
+          )
+          machine.copy_from_host("${pkgs.writeText "secret" "secret"}", "/mnt/etc/nixos/secret")
+
+      with subtest("Perform the installation"):
+          machine.succeed("nixos-install < /dev/null >&2")
+
+      with subtest("Do it again to make sure it's idempotent"):
+          machine.succeed("nixos-install < /dev/null >&2")
+
+      with subtest("Check that we can build things in nixos-enter"):
+          machine.succeed(
+              """
+              nixos-enter -- nix-build --option substitute false -E 'derivation {
+                  name = "t";
+                  builder = "/bin/sh";
+                  args = ["-c" "echo nixos-enter build > $out"];
+                  system = builtins.currentSystem;
+                  preferLocalBuild = true;
+              }'
+              """
+          )
+
+      ${postInstallCommands}
+
+      with subtest("Shutdown system after installation"):
+          machine.succeed("umount -R /mnt")
+          machine.succeed("sync")
+          machine.shutdown()
+
+      # Now see if we can boot the installation.
+      machine = create_machine_named("boot-after-install")
+
+      # For example to enter LUKS passphrase.
+      ${preBootCommands}
+
+      with subtest("Assert that /boot get mounted"):
+          machine.wait_for_unit("local-fs.target")
+          ${if bootLoader == "grub"
+              then ''machine.succeed("test -e /boot/grub")''
+              else ''machine.succeed("test -e /boot/loader/loader.conf")''
+          }
+
+      with subtest("Check whether /root has correct permissions"):
+          assert "700" in machine.succeed("stat -c '%a' /root")
+
+      with subtest("Assert swap device got activated"):
+          # uncomment once https://bugs.freedesktop.org/show_bug.cgi?id=86930 is resolved
+          machine.wait_for_unit("swap.target")
+          machine.succeed("cat /proc/swaps | grep -q /dev")
+
+      with subtest("Check that the store is in good shape"):
+          machine.succeed("nix-store --verify --check-contents >&2")
+
+      with subtest("Check whether the channel works"):
+          machine.succeed("nix-env -iA nixos.procps >&2")
+          assert ".nix-profile" in machine.succeed("type -tP ps | tee /dev/stderr")
+
+      with subtest(
+          "Check that the daemon works, and that non-root users can run builds "
+          "(this will build a new profile generation through the daemon)"
+      ):
+          machine.succeed("su alice -l -c 'nix-env -iA nixos.procps' >&2")
+
+      with subtest("Configure system with writable Nix store on next boot"):
+          # we're not using copy_from_host here because the installer image
+          # doesn't know about the host-guest sharing mechanism.
+          machine.copy_from_host_via_shell(
+              "${ makeConfig {
+                    inherit bootLoader grubDevice grubIdentifier
+                            grubUseEfi extraConfig;
+                    forceGrubReinstallCount = 1;
+                  }
+              }",
+              "/etc/nixos/configuration.nix",
+          )
+
+      with subtest("Check whether nixos-rebuild works"):
+          machine.succeed("nixos-rebuild switch >&2")
+
+      # FIXME: Nix 2.4 broke nixos-option, someone has to fix it.
+      # with subtest("Test nixos-option"):
+      #     kernel_modules = machine.succeed("nixos-option boot.initrd.kernelModules")
+      #     assert "virtio_console" in kernel_modules
+      #     assert "List of modules" in kernel_modules
+      #     assert "qemu-guest.nix" in kernel_modules
+
+      machine.shutdown()
+
+      # Check whether a writable store build works
+      machine = create_machine_named("rebuild-switch")
+      ${preBootCommands}
+      machine.wait_for_unit("multi-user.target")
+
+      # we're not using copy_from_host here because the installer image
+      # doesn't know about the host-guest sharing mechanism.
+      machine.copy_from_host_via_shell(
+          "${ makeConfig {
+                inherit bootLoader grubDevice grubIdentifier
+                grubUseEfi extraConfig;
+                forceGrubReinstallCount = 2;
+              }
+          }",
+          "/etc/nixos/configuration.nix",
+      )
+      machine.succeed("nixos-rebuild boot >&2")
+      machine.shutdown()
+
+      # And just to be sure, check that the machine still boots after
+      # "nixos-rebuild switch".
+      machine = create_machine_named("boot-after-rebuild-switch")
+      ${preBootCommands}
+      machine.wait_for_unit("network.target")
+
+      # Sanity check, is it the configuration.nix we generated?
+      hostname = machine.succeed("hostname").strip()
+      assert hostname == "thatworked"
+
+      ${postBootCommands}
+      machine.shutdown()
+
+      # Tests for validating clone configuration entries in grub menu
+    ''
+    + optionalString testSpecialisationConfig ''
+      # Reboot Machine
+      machine = create_machine_named("clone-default-config")
+      ${preBootCommands}
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("Booted configuration name should be 'Home'"):
+          # This is not the name that shows in the grub menu.
+          # The default configuration is always shown as "Default"
+          machine.succeed("cat /run/booted-system/configuration-name >&2")
+          assert "Home" in machine.succeed("cat /run/booted-system/configuration-name")
+
+      with subtest("We should **not** find a file named /etc/gitconfig"):
+          machine.fail("test -e /etc/gitconfig")
+
+      with subtest("Set grub to boot the second configuration"):
+          machine.succeed("grub-reboot 1")
+
+      ${postBootCommands}
+      machine.shutdown()
+
+      # Reboot Machine
+      machine = create_machine_named("clone-alternate-config")
+      ${preBootCommands}
+
+      machine.wait_for_unit("multi-user.target")
+      with subtest("Booted configuration name should be Work"):
+          machine.succeed("cat /run/booted-system/configuration-name >&2")
+          assert "Work" in machine.succeed("cat /run/booted-system/configuration-name")
+
+      with subtest("We should find a file named /etc/gitconfig"):
+          machine.succeed("test -e /etc/gitconfig")
+
+      ${postBootCommands}
+      machine.shutdown()
+    ''
+    + optionalString testFlakeSwitch ''
+      ${preBootCommands}
+      machine.start()
+
+      with subtest("Configure system with flake"):
+        # TODO: evaluate as user?
+        machine.succeed("""
+          mkdir /root/my-config
+          mv /etc/nixos/hardware-configuration.nix /root/my-config/
+          mv /etc/nixos/secret /root/my-config/
+          rm /etc/nixos/configuration.nix
+        """)
+        machine.copy_from_host_via_shell(
+          "${makeConfig {
+               inherit bootLoader grubDevice grubIdentifier grubUseEfi extraConfig;
+               forceGrubReinstallCount = 1;
+               flake = true;
+            }}",
+          "/root/my-config/configuration.nix",
+        )
+        machine.copy_from_host_via_shell(
+          "${./installer/flake.nix}",
+          "/root/my-config/flake.nix",
+        )
+        machine.succeed("""
+          # for some reason the image does not have `pkgs.path`, so
+          # we use readlink to find a Nixpkgs source.
+          pkgs=$(readlink -f /nix/var/nix/profiles/per-user/root/channels)/nixos
+          if ! [[ -e $pkgs/pkgs/top-level/default.nix ]]; then
+            echo 1>&2 "$pkgs does not seem to be a nixpkgs source. Please fix the test so that pkgs points to a nixpkgs source.";
+            exit 1;
+          fi
+          sed -e s^@nixpkgs@^$pkgs^ -i /root/my-config/flake.nix
+        """)
+
+      with subtest("Switch to flake based config"):
+        machine.succeed("nixos-rebuild switch --flake /root/my-config#xyz")
+
+      ${postBootCommands}
+      machine.shutdown()
+
+      ${preBootCommands}
+      machine.start()
+
+      machine.wait_for_unit("multi-user.target")
+
+      with subtest("nix-channel command is not available anymore"):
+        machine.succeed("! which nix-channel")
+
+      # Note that the channel profile is still present on disk, but configured
+      # not to be used.
+      with subtest("builtins.nixPath is now empty"):
+        machine.succeed("""
+          [[ "[ ]" == "$(nix-instantiate builtins.nixPath --eval --expr)" ]]
+        """)
+
+      with subtest("<nixpkgs> does not resolve"):
+        machine.succeed("""
+          ! nix-instantiate '<nixpkgs>' --eval --expr
+        """)
+
+      with subtest("Evaluate flake config in fresh env without nix-channel"):
+        machine.succeed("nixos-rebuild switch --flake /root/my-config#xyz")
+
+      with subtest("Evaluate flake config in fresh env without channel profiles"):
+        machine.succeed("""
+          (
+            exec 1>&2
+            rm -v /root/.nix-channels
+            rm -vrf ~/.nix-defexpr
+            rm -vrf /nix/var/nix/profiles/per-user/root/channels*
+          )
+        """)
+        machine.succeed("nixos-rebuild switch --flake /root/my-config#xyz")
+
+      ${postBootCommands}
+      machine.shutdown()
+    '';
+
+
+  makeInstallerTest = name:
+    { createPartitions
+    , postInstallCommands ? "", preBootCommands ? "", postBootCommands ? ""
+    , extraConfig ? ""
+    , extraInstallerConfig ? {}
+    , bootLoader ? "grub" # either "grub" or "systemd-boot"
+    , grubDevice ? "/dev/vda", grubIdentifier ? "uuid", grubUseEfi ? false
+    , enableOCR ? false, meta ? {}
+    , testSpecialisationConfig ? false
+    , testFlakeSwitch ? false
+    }:
+    makeTest {
+      inherit enableOCR;
+      name = "installer-" + name;
+      meta = {
+        # put global maintainers here, individuals go into makeInstallerTest fkt call
+        maintainers = (meta.maintainers or []);
+      };
+      nodes = {
+
+        # The configuration of the machine used to run "nixos-install".
+        machine = { pkgs, ... }: {
+          imports = [
+            ../modules/profiles/installation-device.nix
+            ../modules/profiles/base.nix
+            extraInstallerConfig
+            ./common/auto-format-root-device.nix
+          ];
+
+          # In systemdStage1, also automatically format the device backing the
+          # root filesystem.
+          virtualisation.fileSystems."/".autoFormat = systemdStage1;
+
+          # builds stuff in the VM, needs more juice
+          virtualisation.diskSize = 8 * 1024;
+          virtualisation.cores = 8;
+          virtualisation.memorySize = 1536;
+
+          boot.initrd.systemd.enable = systemdStage1;
+
+          # Use a small /dev/vdb as the root disk for the
+          # installer. This ensures the target disk (/dev/vda) is
+          # the same during and after installation.
+          virtualisation.emptyDiskImages = [ 512 ];
+          virtualisation.rootDevice = "/dev/vdb";
+          virtualisation.bootLoaderDevice = "/dev/vda";
+          virtualisation.qemu.diskInterface = "virtio";
+
+          # We don't want to have any networking in the guest whatsoever.
+          # Also, if any vlans are enabled, the guest will reboot
+          # (with a different configuration for legacy reasons),
+          # and spend 5 minutes waiting for the vlan interface to show up
+          # (which will never happen).
+          virtualisation.vlans = [];
+
+          boot.loader.systemd-boot.enable = mkIf (bootLoader == "systemd-boot") true;
+
+          hardware.enableAllFirmware = mkForce false;
+
+          # The test cannot access the network, so any packages we
+          # need must be included in the VM.
+          system.extraDependencies = with pkgs; [
+            bintools
+            brotli
+            brotli.dev
+            brotli.lib
+            desktop-file-utils
+            docbook5
+            docbook_xsl_ns
+            kbd.dev
+            kmod.dev
+            libarchive.dev
+            libxml2.bin
+            libxslt.bin
+            nixos-artwork.wallpapers.simple-dark-gray-bottom
+            ntp
+            perlPackages.ListCompare
+            perlPackages.XMLLibXML
+            python3Minimal
+            # make-options-doc/default.nix
+            (let
+                self = (pkgs.python3Minimal.override {
+                  inherit self;
+                  includeSiteCustomize = true;
+                });
+              in self.withPackages (p: [ p.mistune ]))
+            shared-mime-info
+            sudo
+            texinfo
+            unionfs-fuse
+            xorg.lndir
+
+            # add curl so that rather than seeing the test attempt to download
+            # curl's tarball, we see what it's trying to download
+            curl
+          ]
+          ++ optionals (bootLoader == "grub") (let
+            zfsSupport = lib.any (x: x == "zfs")
+              (extraInstallerConfig.boot.supportedFilesystems or []);
+          in [
+            (pkgs.grub2.override { inherit zfsSupport; })
+            (pkgs.grub2_efi.override { inherit zfsSupport; })
+          ]);
+
+          nix.settings = {
+            substituters = mkForce [];
+            hashed-mirrors = null;
+            connect-timeout = 1;
+          };
+        };
+
+      };
+
+      testScript = testScriptFun {
+        inherit bootLoader createPartitions postInstallCommands preBootCommands postBootCommands
+                grubDevice grubIdentifier grubUseEfi extraConfig
+                testSpecialisationConfig testFlakeSwitch;
+      };
+    };
+
+    makeLuksRootTest = name: luksFormatOpts: makeInstallerTest name {
+      createPartitions = ''
+        machine.succeed(
+            "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+            + " mkpart primary ext2 1M 100MB"  # /boot
+            + " mkpart primary linux-swap 100M 1024M"
+            + " mkpart primary 1024M -1s",  # LUKS
+            "udevadm settle",
+            "mkswap /dev/vda2 -L swap",
+            "swapon -L swap",
+            "modprobe dm_mod dm_crypt",
+            "echo -n supersecret | cryptsetup luksFormat ${luksFormatOpts} -q /dev/vda3 -",
+            "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vda3 cryptroot",
+            "mkfs.ext3 -L nixos /dev/mapper/cryptroot",
+            "mount LABEL=nixos /mnt",
+            "mkfs.ext3 -L boot /dev/vda1",
+            "mkdir -p /mnt/boot",
+            "mount LABEL=boot /mnt/boot",
+        )
+      '';
+      extraConfig = ''
+        boot.kernelParams = lib.mkAfter [ "console=tty0" ];
+      '';
+      enableOCR = true;
+      preBootCommands = ''
+        machine.start()
+        machine.wait_for_text("[Pp]assphrase for")
+        machine.send_chars("supersecret\n")
+      '';
+    };
+
+  # The (almost) simplest partitioning scheme: a swap partition and
+  # one big filesystem partition.
+  simple-test-config = {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary linux-swap 1M 1024M"
+          + " mkpart primary ext2 1024M -1s",
+          "udevadm settle",
+          "mkswap /dev/vda1 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda2",
+          "mount LABEL=nixos /mnt",
+      )
+    '';
+  };
+
+  simple-test-config-flake = simple-test-config // {
+    testFlakeSwitch = true;
+  };
+
+  simple-uefi-grub-config = {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel gpt"
+          + " mkpart ESP fat32 1M 100MiB"  # /boot
+          + " set 1 boot on"
+          + " mkpart primary linux-swap 100MiB 1024MiB"
+          + " mkpart primary ext2 1024MiB -1MiB",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+    bootLoader = "grub";
+    grubUseEfi = true;
+  };
+
+  specialisation-test-extraconfig = {
+    extraConfig = ''
+      environment.systemPackages = [ pkgs.grub2 ];
+      boot.loader.grub.configurationName = "Home";
+      specialisation.work.configuration = {
+        boot.loader.grub.configurationName = lib.mkForce "Work";
+
+        environment.etc = {
+          "gitconfig".text = "
+            [core]
+              gitproxy = none for work.com
+              ";
+        };
+      };
+    '';
+    testSpecialisationConfig = true;
+  };
+  # disable zfs so we can support latest kernel if needed
+  no-zfs-module = {
+    nixpkgs.overlays = [(final: super: {
+      zfs = super.zfs.overrideAttrs(_: {meta.platforms = [];});}
+    )];
+  };
+in {
+
+  # !!! `parted mkpart' seems to silently create overlapping partitions.
+
+
+  # The (almost) simplest partitioning scheme: a swap partition and
+  # one big filesystem partition.
+  simple = makeInstallerTest "simple" simple-test-config;
+
+  switchToFlake = makeInstallerTest "switch-to-flake" simple-test-config-flake;
+
+  # Test cloned configurations with the simple grub configuration
+  simpleSpecialised = makeInstallerTest "simpleSpecialised" (simple-test-config // specialisation-test-extraconfig);
+
+  # Simple GPT/UEFI configuration using systemd-boot with 3 partitions: ESP, swap & root filesystem
+  simpleUefiSystemdBoot = makeInstallerTest "simpleUefiSystemdBoot" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel gpt"
+          + " mkpart ESP fat32 1M 100MiB"  # /boot
+          + " set 1 boot on"
+          + " mkpart primary linux-swap 100MiB 1024MiB"
+          + " mkpart primary ext2 1024MiB -1MiB",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+    bootLoader = "systemd-boot";
+  };
+
+  simpleUefiGrub = makeInstallerTest "simpleUefiGrub" simple-uefi-grub-config;
+
+  # Test cloned configurations with the uefi grub configuration
+  simpleUefiGrubSpecialisation = makeInstallerTest "simpleUefiGrubSpecialisation" (simple-uefi-grub-config // specialisation-test-extraconfig);
+
+  # Same as the previous, but now with a separate /boot partition.
+  separateBoot = makeInstallerTest "separateBoot" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart primary linux-swap 100MB 1024M"
+          + " mkpart primary ext2 1024M -1s",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+      )
+    '';
+  };
+
+  # Same as the previous, but with fat32 /boot.
+  separateBootFat = makeInstallerTest "separateBootFat" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart primary linux-swap 100MB 1024M"
+          + " mkpart primary ext2 1024M -1s",  # /
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda3",
+          "mount LABEL=nixos /mnt",
+          "mkfs.vfat -n BOOT /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=BOOT /mnt/boot",
+      )
+    '';
+  };
+
+  # zfs on / with swap
+  zfsroot = makeInstallerTest "zfs-root" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "zfs" ];
+    };
+
+    extraConfig = ''
+      boot.supportedFilesystems = [ "zfs" ];
+
+      # Using by-uuid overrides the default of by-id, and is unique
+      # to the qemu disks, as they don't produce by-id paths for
+      # some reason.
+      boot.zfs.devNodes = "/dev/disk/by-uuid/";
+      networking.hostId = "00000000";
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary 1M 100MB"  # bpool
+          + " mkpart primary linux-swap 100M 1024M"
+          + " mkpart primary 1024M -1s", # rpool
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "zpool create rpool /dev/vda3",
+          "zfs create -o mountpoint=legacy rpool/root",
+          "mount -t zfs rpool/root /mnt",
+          "zfs create -o mountpoint=legacy rpool/root/usr",
+          "mkdir /mnt/usr",
+          "mount -t zfs rpool/root/usr /mnt/usr",
+          "zpool create -o compatibility=grub2 bpool /dev/vda1",
+          "zfs create -o mountpoint=legacy bpool/boot",
+          "mkdir /mnt/boot",
+          "mount -t zfs bpool/boot /mnt/boot",
+          "udevadm settle",
+      )
+    '';
+
+    # umount & export bpool before shutdown
+    # this is a fix for "cannot import 'bpool': pool was previously in use from another system."
+    postInstallCommands = ''
+      machine.succeed("umount /mnt/boot")
+      machine.succeed("zpool export bpool")
+    '';
+  };
+
+  # Create two physical LVM partitions combined into one volume group
+  # that contains the logical swap and root partitions.
+  lvm = makeInstallerTest "lvm" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary 1M 2048M"  # PV1
+          + " set 1 lvm on"
+          + " mkpart primary 2048M -1s"  # PV2
+          + " set 2 lvm on",
+          "udevadm settle",
+          "pvcreate /dev/vda1 /dev/vda2",
+          "vgcreate MyVolGroup /dev/vda1 /dev/vda2",
+          "lvcreate --size 1G --name swap MyVolGroup",
+          "lvcreate --size 6G --name nixos MyVolGroup",
+          "mkswap -f /dev/MyVolGroup/swap -L swap",
+          "swapon -L swap",
+          "mkfs.xfs -L nixos /dev/MyVolGroup/nixos",
+          "mount LABEL=nixos /mnt",
+      )
+    '';
+  };
+
+  # Boot off an encrypted root partition with the default LUKS header format
+  luksroot = makeLuksRootTest "luksroot-format1" "";
+
+  # Boot off an encrypted root partition with LUKS1 format
+  luksroot-format1 = makeLuksRootTest "luksroot-format1" "--type=LUKS1";
+
+  # Boot off an encrypted root partition with LUKS2 format
+  luksroot-format2 = makeLuksRootTest "luksroot-format2" "--type=LUKS2";
+
+  # Test whether opening encrypted filesystem with keyfile
+  # Checks for regression of missing cryptsetup, when no luks device without
+  # keyfile is configured
+  encryptedFSWithKeyfile = makeInstallerTest "encryptedFSWithKeyfile" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart primary linux-swap 100M 1024M"
+          + " mkpart primary 1024M 1280M"  # LUKS with keyfile
+          + " mkpart primary 1280M -1s",
+          "udevadm settle",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/vda4",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir -p /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "modprobe dm_mod dm_crypt",
+          "echo -n supersecret > /mnt/keyfile",
+          "cryptsetup luksFormat -q /dev/vda3 --key-file /mnt/keyfile",
+          "cryptsetup luksOpen --key-file /mnt/keyfile /dev/vda3 crypt",
+          "mkfs.ext3 -L test /dev/mapper/crypt",
+          "cryptsetup luksClose crypt",
+          "mkdir -p /mnt/test",
+      )
+    '';
+    extraConfig = ''
+      fileSystems."/test" = {
+        device = "/dev/disk/by-label/test";
+        fsType = "ext3";
+        encrypted.enable = true;
+        encrypted.blkDev = "/dev/vda3";
+        encrypted.label = "crypt";
+        encrypted.keyFile = "/${if systemdStage1 then "sysroot" else "mnt-root"}/keyfile";
+      };
+    '';
+  };
+
+  # Full disk encryption (root, kernel and initrd encrypted) using GRUB, GPT/UEFI,
+  # LVM-on-LUKS and a keyfile in initrd.secrets to enter the passphrase once
+  fullDiskEncryption = makeInstallerTest "fullDiskEncryption" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda -- mklabel gpt"
+          + " mkpart ESP fat32 1M 100MiB"  # /boot/efi
+          + " set 1 boot on"
+          + " mkpart primary ext2 1024MiB -1MiB",  # LUKS
+          "udevadm settle",
+          "modprobe dm_mod dm_crypt",
+          "dd if=/dev/random of=luks.key bs=256 count=1",
+          "echo -n supersecret | cryptsetup luksFormat -q --pbkdf-force-iterations 1000 --type luks1 /dev/vda2 -",
+          "echo -n supersecret | cryptsetup luksAddKey -q --pbkdf-force-iterations 1000 --key-file - /dev/vda2 luks.key",
+          "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vda2 crypt",
+          "pvcreate /dev/mapper/crypt",
+          "vgcreate crypt /dev/mapper/crypt",
+          "lvcreate -L 100M -n swap crypt",
+          "lvcreate -l '100%FREE' -n nixos crypt",
+          "mkfs.vfat -n efi /dev/vda1",
+          "mkfs.ext4 -L nixos /dev/crypt/nixos",
+          "mkswap -L swap /dev/crypt/swap",
+          "mount LABEL=nixos /mnt",
+          "mkdir -p /mnt/{etc/nixos,boot/efi}",
+          "mount LABEL=efi /mnt/boot/efi",
+          "swapon -L swap",
+          "mv luks.key /mnt/etc/nixos/"
+      )
+    '';
+    bootLoader = "grub";
+    grubUseEfi = true;
+    extraConfig = ''
+      boot.loader.grub.enableCryptodisk = true;
+      boot.loader.efi.efiSysMountPoint = "/boot/efi";
+
+      boot.initrd.secrets."/luks.key" = ./luks.key;
+      boot.initrd.luks.devices.crypt =
+        { device  = "/dev/vda2";
+          keyFile = "/luks.key";
+        };
+    '';
+    enableOCR = true;
+    preBootCommands = ''
+      machine.start()
+      machine.wait_for_text("Enter passphrase for")
+      machine.send_chars("supersecret\n")
+    '';
+  };
+
+  swraid = makeInstallerTest "swraid" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart extended 100M -1s"
+          + " mkpart logical 102M 3102M"  # md0 (root), first device
+          + " mkpart logical 3103M 6103M"  # md0 (root), second device
+          + " mkpart logical 6104M 6360M"  # md1 (swap), first device
+          + " mkpart logical 6361M 6617M",  # md1 (swap), second device
+          "udevadm settle",
+          "ls -l /dev/vda* >&2",
+          "cat /proc/partitions >&2",
+          "udevadm control --stop-exec-queue",
+          "mdadm --create --force /dev/md0 --metadata 1.2 --level=raid1 "
+          + "--raid-devices=2 /dev/vda5 /dev/vda6",
+          "mdadm --create --force /dev/md1 --metadata 1.2 --level=raid1 "
+          + "--raid-devices=2 /dev/vda7 /dev/vda8",
+          "udevadm control --start-exec-queue",
+          "udevadm settle",
+          "mkswap -f /dev/md1 -L swap",
+          "swapon -L swap",
+          "mkfs.ext3 -L nixos /dev/md0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "udevadm settle",
+      )
+    '';
+    preBootCommands = ''
+      machine.start()
+      machine.fail("dmesg | grep 'immediate safe mode'")
+    '';
+  };
+
+  bcache = makeInstallerTest "bcache" {
+    createPartitions = ''
+      machine.succeed(
+          "flock /dev/vda parted --script /dev/vda --"
+          + " mklabel msdos"
+          + " mkpart primary ext2 1M 100MB"  # /boot
+          + " mkpart primary 100MB 512MB  "  # swap
+          + " mkpart primary 512MB 1024MB"  # Cache (typically SSD)
+          + " mkpart primary 1024MB -1s ",  # Backing device (typically HDD)
+          "modprobe bcache",
+          "udevadm settle",
+          "make-bcache -B /dev/vda4 -C /dev/vda3",
+          "udevadm settle",
+          "mkfs.ext3 -L nixos /dev/bcache0",
+          "mount LABEL=nixos /mnt",
+          "mkfs.ext3 -L boot /dev/vda1",
+          "mkdir /mnt/boot",
+          "mount LABEL=boot /mnt/boot",
+          "mkswap -f /dev/vda2 -L swap",
+          "swapon -L swap",
+      )
+    '';
+  };
+
+  bcachefsSimple = makeInstallerTest "bcachefs-simple" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "bcachefs" ];
+      imports = [ no-zfs-module ];
+    };
+
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"          # /boot
+        + " mkpart primary linux-swap 100M 1024M"  # swap
+        + " mkpart primary 1024M -1s",             # /
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "mkfs.bcachefs -L root /dev/vda3",
+        "mount -t bcachefs /dev/vda3 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot",
+      )
+    '';
+  };
+
+  bcachefsEncrypted = makeInstallerTest "bcachefs-encrypted" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "bcachefs" ];
+
+      # disable zfs so we can support latest kernel if needed
+      imports = [ no-zfs-module ];
+
+      environment.systemPackages = with pkgs; [ keyutils ];
+    };
+
+    extraConfig = ''
+      boot.kernelParams = lib.mkAfter [ "console=tty0" ];
+    '';
+
+    enableOCR = true;
+    preBootCommands = ''
+      machine.start()
+      # Enter it wrong once
+      machine.wait_for_text("enter passphrase for ")
+      machine.send_chars("wrong\n")
+      # Then enter it right.
+      machine.wait_for_text("enter passphrase for ")
+      machine.send_chars("password\n")
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"          # /boot
+        + " mkpart primary linux-swap 100M 1024M"  # swap
+        + " mkpart primary 1024M -1s",             # /
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "echo password | mkfs.bcachefs -L root --encrypted /dev/vda3",
+        "echo password | bcachefs unlock -k session /dev/vda3",
+        "echo password | mount -t bcachefs /dev/vda3 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot",
+      )
+    '';
+  };
+
+  bcachefsMulti = makeInstallerTest "bcachefs-multi" {
+    extraInstallerConfig = {
+      boot.supportedFilesystems = [ "bcachefs" ];
+
+      # disable zfs so we can support latest kernel if needed
+      imports = [ no-zfs-module ];
+    };
+
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"          # /boot
+        + " mkpart primary linux-swap 100M 1024M"  # swap
+        + " mkpart primary 1024M 4096M"            # /
+        + " mkpart primary 4096M -1s",             # /
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "mkfs.bcachefs -L root --metadata_replicas 2 --foreground_target ssd --promote_target ssd --background_target hdd --label ssd /dev/vda3 --label hdd /dev/vda4",
+        "mount -t bcachefs /dev/vda3:/dev/vda4 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot",
+      )
+    '';
+  };
+
+  bcachefsLinuxTesting = makeInstallerTest "bcachefs-linux-testing" {
+    extraInstallerConfig = {
+      imports = [ no-zfs-module ];
+
+      boot = {
+        supportedFilesystems = [ "bcachefs" ];
+        kernelPackages = pkgs.linuxPackages_testing;
+      };
+    };
+
+    extraConfig = ''
+      boot.kernelPackages = pkgs.linuxPackages_testing;
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"          # /boot
+        + " mkpart primary linux-swap 100M 1024M"  # swap
+        + " mkpart primary 1024M -1s",             # /
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "mkfs.bcachefs -L root /dev/vda3",
+        "mount -t bcachefs /dev/vda3 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot",
+      )
+    '';
+  };
+
+  bcachefsUpgradeToLinuxTesting = makeInstallerTest "bcachefs-upgrade-to-linux-testing" {
+    extraInstallerConfig = {
+      imports = [ no-zfs-module ];
+      boot.supportedFilesystems = [ "bcachefs" ];
+      # We don't have network access in the VM, we need this for `nixos-install`
+      system.extraDependencies = [ pkgs.linux_testing ];
+    };
+
+    extraConfig = ''
+      boot.kernelPackages = pkgs.linuxPackages_testing;
+    '';
+
+    createPartitions = ''
+      machine.succeed(
+        "flock /dev/vda parted --script /dev/vda -- mklabel msdos"
+        + " mkpart primary ext2 1M 100MB"          # /boot
+        + " mkpart primary linux-swap 100M 1024M"  # swap
+        + " mkpart primary 1024M -1s",             # /
+        "udevadm settle",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "mkfs.bcachefs -L root /dev/vda3",
+        "mount -t bcachefs /dev/vda3 /mnt",
+        "mkfs.ext3 -L boot /dev/vda1",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot",
+      )
+    '';
+  };
+
+  # Test using labels to identify volumes in grub
+  simpleLabels = makeInstallerTest "simpleLabels" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.ext4 -L root /dev/vda3",
+          "mount LABEL=root /mnt",
+      )
+    '';
+    grubIdentifier = "label";
+  };
+
+  # Test using the provided disk name within grub
+  # TODO: Fix udev so the symlinks are unneeded in /dev/disks
+  simpleProvided = makeInstallerTest "simpleProvided" {
+    createPartitions = ''
+      uuid = "$(blkid -s UUID -o value /dev/vda2)"
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+100M -n 3:0:+1G -N 4 -t 1:ef02 -t 2:8300 "
+          + "-t 3:8200 -t 4:8300 -c 2:boot -c 4:root /dev/vda",
+          "mkswap /dev/vda3 -L swap",
+          "swapon -L swap",
+          "mkfs.ext4 -L boot /dev/vda2",
+          "mkfs.ext4 -L root /dev/vda4",
+      )
+      machine.execute(f"ln -s ../../vda2 /dev/disk/by-uuid/{uuid}")
+      machine.execute("ln -s ../../vda4 /dev/disk/by-label/root")
+      machine.succeed(
+          "mount /dev/disk/by-label/root /mnt",
+          "mkdir /mnt/boot",
+          f"mount /dev/disk/by-uuid/{uuid} /mnt/boot",
+      )
+    '';
+    grubIdentifier = "provided";
+  };
+
+  # Simple btrfs grub testing
+  btrfsSimple = makeInstallerTest "btrfsSimple" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "mount LABEL=root /mnt",
+      )
+    '';
+  };
+
+  # Test to see if we can detect /boot and /nix on subvolumes
+  btrfsSubvols = makeInstallerTest "btrfsSubvols" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create /mnt/boot",
+          "btrfs subvol create /mnt/nixos",
+          "btrfs subvol create /mnt/nixos/default",
+          "umount /mnt",
+          "mount -o defaults,subvol=nixos/default LABEL=root /mnt",
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
+
+  # Test to see if we can detect default and aux subvolumes correctly
+  btrfsSubvolDefault = makeInstallerTest "btrfsSubvolDefault" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create /mnt/badpath",
+          "btrfs subvol create /mnt/badpath/boot",
+          "btrfs subvol create /mnt/nixos",
+          "btrfs subvol set-default "
+          + "$(btrfs subvol list /mnt | grep 'nixos' | awk '{print $2}') /mnt",
+          "umount /mnt",
+          "mount -o defaults LABEL=root /mnt",
+          "mkdir -p /mnt/badpath/boot",  # Help ensure the detection mechanism
+          # is actually looking up subvolumes
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=badpath/boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
+
+  # Test to see if we can deal with subvols that need to be escaped in fstab
+  btrfsSubvolEscape = makeInstallerTest "btrfsSubvolEscape" {
+    createPartitions = ''
+      machine.succeed(
+          "sgdisk -Z /dev/vda",
+          "sgdisk -n 1:0:+1M -n 2:0:+1G -N 3 -t 1:ef02 -t 2:8200 -t 3:8300 -c 3:root /dev/vda",
+          "mkswap /dev/vda2 -L swap",
+          "swapon -L swap",
+          "mkfs.btrfs -L root /dev/vda3",
+          "btrfs device scan",
+          "mount LABEL=root /mnt",
+          "btrfs subvol create '/mnt/nixos in space'",
+          "btrfs subvol create /mnt/boot",
+          "umount /mnt",
+          "mount -o 'defaults,subvol=nixos in space' LABEL=root /mnt",
+          "mkdir /mnt/boot",
+          "mount -o defaults,subvol=boot LABEL=root /mnt/boot",
+      )
+    '';
+  };
+} // optionalAttrs systemdStage1 {
+  stratisRoot = makeInstallerTest "stratisRoot" {
+    createPartitions = ''
+      machine.succeed(
+        "sgdisk --zap-all /dev/vda",
+        "sgdisk --new=1:0:+100M --typecode=0:ef00 /dev/vda", # /boot
+        "sgdisk --new=2:0:+1G --typecode=0:8200 /dev/vda", # swap
+        "sgdisk --new=3:0:+5G --typecode=0:8300 /dev/vda", # /
+        "udevadm settle",
+
+        "mkfs.vfat /dev/vda1",
+        "mkswap /dev/vda2 -L swap",
+        "swapon -L swap",
+        "stratis pool create my-pool /dev/vda3",
+        "stratis filesystem create my-pool nixos",
+        "udevadm settle",
+
+        "mount /dev/stratis/my-pool/nixos /mnt",
+        "mkdir -p /mnt/boot",
+        "mount /dev/vda1 /mnt/boot"
+      )
+    '';
+    bootLoader = "systemd-boot";
+    extraInstallerConfig = { modulesPath, ...}: {
+      config = {
+        services.stratis.enable = true;
+        environment.systemPackages = [
+          pkgs.stratis-cli
+          pkgs.thin-provisioning-tools
+          pkgs.lvm2.bin
+          pkgs.stratisd.initrd
+        ];
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/installer/flake.nix b/nixpkgs/nixos/tests/installer/flake.nix
new file mode 100644
index 000000000000..4bbef44e34fc
--- /dev/null
+++ b/nixpkgs/nixos/tests/installer/flake.nix
@@ -0,0 +1,20 @@
+# This file gets copied into the installation
+
+{
+  # To keep things simple, we'll use an absolute path dependency here.
+  inputs.nixpkgs.url = "@nixpkgs@";
+
+  outputs = { nixpkgs, ... }: {
+
+    nixosConfigurations.xyz = nixpkgs.lib.nixosSystem {
+      modules = [
+        ./configuration.nix
+        ( nixpkgs + "/nixos/modules/testing/test-instrumentation.nix" )
+        {
+          # We don't need nix-channel anymore
+          nix.channel.enable = false;
+        }
+      ];
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/invidious.nix b/nixpkgs/nixos/tests/invidious.nix
new file mode 100644
index 000000000000..701e8e5e7a3f
--- /dev/null
+++ b/nixpkgs/nixos/tests/invidious.nix
@@ -0,0 +1,80 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "invidious";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ sbruder ];
+  };
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.invidious = {
+      enable = true;
+    };
+
+    specialisation = {
+      nginx.configuration = {
+        services.invidious = {
+          nginx.enable = true;
+          domain = "invidious.example.com";
+        };
+        services.nginx.virtualHosts."invidious.example.com" = {
+          forceSSL = false;
+          enableACME = false;
+        };
+        networking.hosts."127.0.0.1" = [ "invidious.example.com" ];
+      };
+      postgres-tcp.configuration = {
+        services.invidious = {
+          database = {
+            createLocally = false;
+            host = "127.0.0.1";
+            passwordFile = toString (pkgs.writeText "database-password" "correct horse battery staple");
+          };
+        };
+        # Normally not needed because when connecting to postgres over TCP/IP
+        # the database is most likely on another host.
+        systemd.services.invidious = {
+          after = [ "postgresql.service" ];
+          requires = [ "postgresql.service" ];
+        };
+        services.postgresql =
+          let
+            inherit (config.services.invidious.settings.db) dbname user;
+          in
+          {
+            enable = true;
+            initialScript = pkgs.writeText "init-postgres-with-password" ''
+              CREATE USER kemal WITH PASSWORD 'correct horse battery staple';
+              CREATE DATABASE invidious OWNER kemal;
+            '';
+          };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    def curl_assert_status_code(url, code, form=None):
+        assert int(machine.succeed(f"curl -s -o /dev/null -w %{{http_code}} {'-F ' + form + ' ' if form else '''}{url}")) == code
+
+
+    def activate_specialisation(name: str):
+        machine.succeed(f"${nodes.machine.config.system.build.toplevel}/specialisation/{name}/bin/switch-to-configuration test >&2")
+
+
+    url = "http://localhost:${toString nodes.machine.config.services.invidious.port}"
+    port = ${toString nodes.machine.config.services.invidious.port}
+
+    machine.wait_for_open_port(port)
+    curl_assert_status_code(f"{url}/search", 200)
+
+    activate_specialisation("nginx")
+    machine.wait_for_open_port(80)
+    curl_assert_status_code("http://invidious.example.com/search", 200)
+
+    # Remove the state so the `initialScript` gets run
+    machine.succeed("systemctl stop postgresql")
+    machine.succeed("rm -r /var/lib/postgresql")
+    activate_specialisation("postgres-tcp")
+    machine.wait_for_open_port(port)
+    curl_assert_status_code(f"{url}/search", 200)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/invoiceplane.nix b/nixpkgs/nixos/tests/invoiceplane.nix
new file mode 100644
index 000000000000..70ed96ee39f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/invoiceplane.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "invoiceplane";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [
+      onny
+    ];
+  };
+
+  nodes = {
+    invoiceplane_caddy = { ... }: {
+      services.invoiceplane.webserver = "caddy";
+      services.invoiceplane.sites = {
+        "site1.local" = {
+          database.name = "invoiceplane1";
+          database.createLocally = true;
+          enable = true;
+        };
+        "site2.local" = {
+          database.name = "invoiceplane2";
+          database.createLocally = true;
+          enable = true;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    invoiceplane_caddy.wait_for_unit("caddy")
+    invoiceplane_caddy.wait_for_open_port(80)
+    invoiceplane_caddy.wait_for_open_port(3306)
+
+    site_names = ["site1.local", "site2.local"]
+
+    for site_name in site_names:
+        machine.wait_for_unit(f"phpfpm-invoiceplane-{site_name}")
+
+        with subtest("Website returns welcome screen"):
+            assert "Please install InvoicePlane" in machine.succeed(f"curl -L {site_name}")
+
+        with subtest("Finish InvoicePlane setup"):
+          machine.succeed(
+            f"curl -sSfL --cookie-jar cjar {site_name}/setup/language"
+          )
+          csrf_token = machine.succeed(
+            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+          )
+          machine.succeed(
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&ip_lang=english&btn_continue=Continue' {site_name}/setup/language"
+          )
+          csrf_token = machine.succeed(
+            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+          )
+          machine.succeed(
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/prerequisites"
+          )
+          csrf_token = machine.succeed(
+            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+          )
+          machine.succeed(
+            f"curl -sSfL --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/configure_database"
+          )
+          csrf_token = machine.succeed(
+            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+          )
+          machine.succeed(
+            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/install_tables"
+          )
+          csrf_token = machine.succeed(
+            "grep ip_csrf_cookie cjar | cut -f 7 | tr -d '\n'"
+          )
+          machine.succeed(
+            f"curl -sSfl --cookie cjar --cookie-jar cjar -d '_ip_csrf={csrf_token}&btn_continue=Continue' {site_name}/setup/upgrade_tables"
+          )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/iodine.nix b/nixpkgs/nixos/tests/iodine.nix
new file mode 100644
index 000000000000..41fb2e7778d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/iodine.nix
@@ -0,0 +1,64 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: let
+    domain = "whatever.example.com";
+    password = "false;foo;exit;withspecialcharacters";
+  in
+    {
+      name = "iodine";
+      nodes = {
+        server =
+          { ... }:
+
+            {
+              networking.firewall = {
+                allowedUDPPorts = [ 53 ];
+                trustedInterfaces = [ "dns0" ];
+              };
+              boot.kernel.sysctl = {
+                "net.ipv4.ip_forward" = 1;
+                "net.ipv6.ip_forward" = 1;
+              };
+
+              services.iodine.server = {
+                enable = true;
+                ip = "10.53.53.1/24";
+                passwordFile = "${builtins.toFile "password" password}";
+                inherit domain;
+              };
+
+              # test resource: accessible only via tunnel
+              services.openssh = {
+                enable = true;
+                openFirewall = false;
+              };
+            };
+
+        client =
+          { ... }: {
+            services.iodine.clients.testClient = {
+              # test that ProtectHome is "read-only"
+              passwordFile = "/root/pw";
+              relay = "server";
+              server = domain;
+            };
+            systemd.tmpfiles.rules = [
+              "f /root/pw 0666 root root - ${password}"
+            ];
+            environment.systemPackages = [
+              pkgs.nagiosPluginsOfficial
+            ];
+          };
+
+      };
+
+      testScript = ''
+        start_all()
+
+        server.wait_for_unit("sshd")
+        server.wait_for_unit("iodined")
+        client.wait_for_unit("iodine-testClient")
+
+        client.succeed("check_ssh -H 10.53.53.1")
+      '';
+    }
+)
diff --git a/nixpkgs/nixos/tests/ipv6.nix b/nixpkgs/nixos/tests/ipv6.nix
new file mode 100644
index 000000000000..75faa6f60201
--- /dev/null
+++ b/nixpkgs/nixos/tests/ipv6.nix
@@ -0,0 +1,130 @@
+# Test of IPv6 functionality in NixOS, including whether router
+# solicication/advertisement using radvd works.
+
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "ipv6";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    {
+      # We use lib.mkForce here to remove the interface configuration
+      # provided by makeTest, so that the interfaces are all configured
+      # implicitly.
+
+      # This client should use privacy extensions fully, having a
+      # completely-default network configuration.
+      client_defaults.networking.interfaces = lib.mkForce {};
+
+      # Both of these clients should obtain temporary addresses, but
+      # not use them as the default source IP. We thus run the same
+      # checks against them — but the configuration resulting in this
+      # behaviour is different.
+
+      # Here, by using an altered default value for the global setting...
+      client_global_setting = {
+        networking.interfaces = lib.mkForce {};
+        networking.tempAddresses = "enabled";
+      };
+      # and here, by setting this on the interface explicitly.
+      client_interface_setting = {
+        networking.tempAddresses = "disabled";
+        networking.interfaces = lib.mkForce {
+          eth1.tempAddress = "enabled";
+        };
+      };
+
+      server =
+        { services.httpd.enable = true;
+          services.httpd.adminAddr = "foo@example.org";
+          networking.firewall.allowedTCPPorts = [ 80 ];
+        };
+
+      router =
+        { ... }:
+        { services.radvd.enable = true;
+          services.radvd.config =
+            ''
+              interface eth1 {
+                AdvSendAdvert on;
+                # ULA prefix (RFC 4193).
+                prefix fd60:cc69:b537:1::/64 { };
+              };
+            '';
+        };
+    };
+
+  testScript =
+    ''
+      import re
+
+      # Start the router first so that it respond to router solicitations.
+      router.wait_for_unit("radvd")
+
+      clients = [client_defaults, client_global_setting, client_interface_setting]
+
+      start_all()
+
+      for client in clients:
+          client.wait_for_unit("network.target")
+      server.wait_for_unit("network.target")
+      server.wait_for_unit("httpd.service")
+
+      # Wait until the given interface has a non-tentative address of
+      # the desired scope (i.e. has completed Duplicate Address
+      # Detection).
+      def wait_for_address(machine, iface, scope, temporary=False):
+          temporary_flag = "temporary" if temporary else "-temporary"
+          cmd = f"ip -o -6 addr show dev {iface} scope {scope} -tentative {temporary_flag}"
+
+          machine.wait_until_succeeds(f"[ `{cmd} | wc -l` -eq 1 ]")
+          output = machine.succeed(cmd)
+          ip = re.search(r"inet6 ([0-9a-f:]{2,})/", output).group(1)
+
+          if temporary:
+              scope = scope + " temporary"
+          machine.log(f"{scope} address on {iface} is {ip}")
+          return ip
+
+
+      with subtest("Loopback address can be pinged"):
+          client_defaults.succeed("ping -c 1 ::1 >&2")
+          client_defaults.fail("ping -c 1 2001:db8:: >&2")
+
+      with subtest("Local link addresses can be obtained and pinged"):
+          for client in clients:
+              client_ip = wait_for_address(client, "eth1", "link")
+              server_ip = wait_for_address(server, "eth1", "link")
+              client.succeed(f"ping -c 1 {client_ip}%eth1 >&2")
+              client.succeed(f"ping -c 1 {server_ip}%eth1 >&2")
+
+      with subtest("Global addresses can be obtained, pinged, and reached via http"):
+          for client in clients:
+              client_ip = wait_for_address(client, "eth1", "global")
+              server_ip = wait_for_address(server, "eth1", "global")
+              client.succeed(f"ping -c 1 {client_ip} >&2")
+              client.succeed(f"ping -c 1 {server_ip} >&2")
+              client.succeed(f"curl --fail -g http://[{server_ip}]")
+              client.fail(f"curl --fail -g http://[{client_ip}]")
+
+      with subtest(
+          "Privacy extensions: Global temporary address is used as default source address"
+      ):
+          ip = wait_for_address(client_defaults, "eth1", "global", temporary=True)
+          # Default route should have "src <temporary address>" in it
+          client_defaults.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'")
+
+      for client, setting_desc in (
+          (client_global_setting, "global"),
+          (client_interface_setting, "interface"),
+      ):
+          with subtest(f'Privacy extensions: "enabled" through {setting_desc} setting)'):
+              # We should be obtaining both a temporary address and an EUI-64 address...
+              ip = wait_for_address(client, "eth1", "global")
+              assert "ff:fe" in ip
+              ip_temp = wait_for_address(client, "eth1", "global", temporary=True)
+              # But using the EUI-64 one.
+              client.succeed(f"ip route get 2001:db8:: | grep 'src {ip}'")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/iscsi-multipath-root.nix b/nixpkgs/nixos/tests/iscsi-multipath-root.nix
new file mode 100644
index 000000000000..494a539b57e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/iscsi-multipath-root.nix
@@ -0,0 +1,267 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  let
+    initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+    targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af";
+  in
+  {
+    name = "iscsi";
+    meta = {
+      maintainers = pkgs.lib.teams.deshaw.members;
+    };
+
+    nodes = {
+      target = { config, pkgs, lib, ... }: {
+        virtualisation.vlans = [ 1 2 ];
+        services.target = {
+          enable = true;
+          config = {
+            fabric_modules = [ ];
+            storage_objects = [
+              {
+                dev = "/dev/vdb";
+                name = "test";
+                plugin = "block";
+                write_back = true;
+                wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522";
+              }
+            ];
+            targets = [
+              {
+                fabric = "iscsi";
+                tpgs = [
+                  {
+                    enable = true;
+                    attributes = {
+                      authentication = 0;
+                      generate_node_acls = 1;
+                    };
+                    luns = [
+                      {
+                        alias = "94dfe06967";
+                        alua_tg_pt_gp_name = "default_tg_pt_gp";
+                        index = 0;
+                        storage_object = "/backstores/block/test";
+                      }
+                    ];
+                    node_acls = [
+                      {
+                        mapped_luns = [
+                          {
+                            alias = "d42f5bdf8a";
+                            index = 0;
+                            tpg_lun = 0;
+                            write_protect = false;
+                          }
+                        ];
+                        node_wwn = initiatorName;
+                      }
+                    ];
+                    portals = [
+                      {
+                        ip_address = "0.0.0.0";
+                        iser = false;
+                        offload = false;
+                        port = 3260;
+                      }
+                    ];
+                    tag = 1;
+                  }
+                ];
+                wwn = targetName;
+              }
+            ];
+          };
+        };
+
+        networking.firewall.allowedTCPPorts = [ 3260 ];
+        networking.firewall.allowedUDPPorts = [ 3260 ];
+
+        virtualisation.memorySize = 2048;
+        virtualisation.emptyDiskImages = [ 2048 ];
+      };
+
+      initiatorAuto = { nodes, config, pkgs, ... }: {
+        virtualisation.vlans = [ 1 2 ];
+
+        services.multipath = {
+          enable = true;
+          defaults = ''
+            find_multipaths yes
+            user_friendly_names yes
+          '';
+          pathGroups = [
+            {
+              alias = 123456;
+              wwid = "3600140592b17c3f6b404168b082ceeb7";
+            }
+          ];
+        };
+
+        services.openiscsi = {
+          enable = true;
+          enableAutoLoginOut = true;
+          discoverPortal = "target";
+          name = initiatorName;
+        };
+
+        environment.systemPackages = with pkgs; [
+          xfsprogs
+        ];
+
+        environment.etc."initiator-root-disk-closure".source = nodes.initiatorRootDisk.config.system.build.toplevel;
+
+        nix.settings = {
+          substituters = lib.mkForce [ ];
+          hashed-mirrors = null;
+          connect-timeout = 1;
+        };
+      };
+
+      initiatorRootDisk = { config, pkgs, modulesPath, lib, ... }: {
+        boot.initrd.network.enable = true;
+        boot.loader.grub.enable = false;
+
+        boot.kernelParams = lib.mkOverride 5 (
+          [
+            "boot.shell_on_fail"
+            "console=tty1"
+            "ip=192.168.1.1:::255.255.255.0::ens9:none"
+            "ip=192.168.2.1:::255.255.255.0::ens10:none"
+          ]
+        );
+
+        # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store
+        virtualisation.writableStore = false;
+        virtualisation.vlans = [ 1 2 ];
+
+        services.multipath = {
+          enable = true;
+          defaults = ''
+            find_multipaths yes
+            user_friendly_names yes
+          '';
+          pathGroups = [
+            {
+              alias = 123456;
+              wwid = "3600140592b17c3f6b404168b082ceeb7";
+            }
+          ];
+        };
+
+        fileSystems = lib.mkOverride 5 {
+          "/" = {
+            fsType = "xfs";
+            device = "/dev/mapper/123456";
+            options = [ "_netdev" ];
+          };
+        };
+
+        boot.initrd.extraFiles."etc/multipath/wwids".source = pkgs.writeText "wwids" "/3600140592b17c3f6b404168b082ceeb7/";
+
+        boot.iscsi-initiator = {
+          discoverPortal = "target";
+          name = initiatorName;
+          target = targetName;
+          extraIscsiCommands = ''
+            iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login
+          '';
+        };
+      };
+
+    };
+
+    testScript = { nodes, ... }: ''
+      target.start()
+      target.wait_for_unit("iscsi-target.service")
+
+      initiatorAuto.start()
+
+      initiatorAuto.wait_for_unit("iscsid.service")
+      initiatorAuto.wait_for_unit("iscsi.service")
+      initiatorAuto.get_unit_info("iscsi")
+
+      # Expecting this to fail since we should already know about 192.168.1.3
+      initiatorAuto.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login")
+      # Expecting this to succeed since we don't yet know about 192.168.2.3
+      initiatorAuto.succeed("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login")
+
+      # /dev/sda is provided by iscsi on target
+      initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done")
+
+      initiatorAuto.succeed("mkfs.xfs /dev/sda")
+      initiatorAuto.succeed("mkdir /mnt")
+
+      # Start by verifying /dev/sda and /dev/sdb are both the same disk
+      initiatorAuto.succeed("mount /dev/sda /mnt")
+      initiatorAuto.succeed("touch /mnt/hi")
+      initiatorAuto.succeed("umount /mnt")
+
+      initiatorAuto.succeed("mount /dev/sdb /mnt")
+      initiatorAuto.succeed("test -e /mnt/hi")
+      initiatorAuto.succeed("umount /mnt")
+
+      initiatorAuto.succeed("systemctl restart multipathd")
+      initiatorAuto.succeed("systemd-cat multipath -ll")
+
+      # Install our RootDisk machine to 123456, the alias to the device that multipath is now managing
+      initiatorAuto.succeed("mount /dev/mapper/123456 /mnt")
+      initiatorAuto.succeed("mkdir -p /mnt/etc/{multipath,iscsi}")
+      initiatorAuto.succeed("cp -r /etc/multipath/wwids /mnt/etc/multipath/wwids")
+      initiatorAuto.succeed("cp -r /etc/iscsi/{nodes,send_targets} /mnt/etc/iscsi")
+      initiatorAuto.succeed(
+        "nixos-install --no-bootloader --no-root-passwd --system /etc/initiator-root-disk-closure"
+      )
+      initiatorAuto.succeed("umount /mnt")
+      initiatorAuto.shutdown()
+
+      initiatorRootDisk.start()
+      initiatorRootDisk.wait_for_unit("multi-user.target")
+      initiatorRootDisk.wait_for_unit("iscsid")
+
+      # Log in over both nodes
+      initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.1.3 --login")
+      initiatorRootDisk.fail("iscsiadm -m discovery -o update -t sendtargets -p 192.168.2.3 --login")
+      initiatorRootDisk.succeed("systemctl restart multipathd")
+      initiatorRootDisk.succeed("systemd-cat multipath -ll")
+
+      # Verify we can write and sync the root disk
+      initiatorRootDisk.succeed("mkdir /scratch")
+      initiatorRootDisk.succeed("touch /scratch/both-up")
+      initiatorRootDisk.succeed("sync /scratch")
+
+      # Verify we can write to the root with ens9 (sda, 192.168.1.3) down
+      initiatorRootDisk.succeed("ip link set ens9 down")
+      initiatorRootDisk.succeed("touch /scratch/ens9-down")
+      initiatorRootDisk.succeed("sync /scratch")
+      initiatorRootDisk.succeed("ip link set ens9 up")
+
+      # todo: better way to wait until multipath notices the link is back
+      initiatorRootDisk.succeed("sleep 5")
+      initiatorRootDisk.succeed("touch /scratch/both-down")
+      initiatorRootDisk.succeed("sync /scratch")
+
+      # Verify we can write to the root with ens10 (sdb, 192.168.2.3) down
+      initiatorRootDisk.succeed("ip link set ens10 down")
+      initiatorRootDisk.succeed("touch /scratch/ens10-down")
+      initiatorRootDisk.succeed("sync /scratch")
+      initiatorRootDisk.succeed("ip link set ens10 up")
+      initiatorRootDisk.succeed("touch /scratch/ens10-down")
+      initiatorRootDisk.succeed("sync /scratch")
+
+      initiatorRootDisk.succeed("ip link set ens9 up")
+      initiatorRootDisk.succeed("ip link set ens10 up")
+      initiatorRootDisk.shutdown()
+
+      # Verify we can boot with the target's eth1 down, forcing
+      # it to multipath via the second link
+      target.succeed("ip link set eth1 down")
+      initiatorRootDisk.start()
+      initiatorRootDisk.wait_for_unit("multi-user.target")
+      initiatorRootDisk.wait_for_unit("iscsid")
+      initiatorRootDisk.succeed("test -e /scratch/both-up")
+    '';
+  }
+)
+
+
diff --git a/nixpkgs/nixos/tests/iscsi-root.nix b/nixpkgs/nixos/tests/iscsi-root.nix
new file mode 100644
index 000000000000..eb0719edc379
--- /dev/null
+++ b/nixpkgs/nixos/tests/iscsi-root.nix
@@ -0,0 +1,161 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+    let
+      initiatorName = "iqn.2020-08.org.linux-iscsi.initiatorhost:example";
+      targetName = "iqn.2003-01.org.linux-iscsi.target.x8664:sn.acf8fd9c23af";
+    in
+      {
+        name = "iscsi";
+        meta = {
+          maintainers = pkgs.lib.teams.deshaw.members
+          ++ (with pkgs.lib.maintainers; [ ajs124 ]);
+        };
+
+        nodes = {
+          target = { config, pkgs, lib, ... }: {
+            services.target = {
+              enable = true;
+              config = {
+                fabric_modules = [];
+                storage_objects = [
+                  {
+                    dev = "/dev/vdb";
+                    name = "test";
+                    plugin = "block";
+                    write_back = true;
+                    wwn = "92b17c3f-6b40-4168-b082-ceeb7b495522";
+                  }
+                ];
+                targets = [
+                  {
+                    fabric = "iscsi";
+                    tpgs = [
+                      {
+                        enable = true;
+                        attributes = {
+                          authentication = 0;
+                          generate_node_acls = 1;
+                        };
+                        luns = [
+                          {
+                            alias = "94dfe06967";
+                            alua_tg_pt_gp_name = "default_tg_pt_gp";
+                            index = 0;
+                            storage_object = "/backstores/block/test";
+                          }
+                        ];
+                        node_acls = [
+                          {
+                            mapped_luns = [
+                              {
+                                alias = "d42f5bdf8a";
+                                index = 0;
+                                tpg_lun = 0;
+                                write_protect = false;
+                              }
+                            ];
+                            node_wwn = initiatorName;
+                          }
+                        ];
+                        portals = [
+                          {
+                            ip_address = "0.0.0.0";
+                            iser = false;
+                            offload = false;
+                            port = 3260;
+                          }
+                        ];
+                        tag = 1;
+                      }
+                    ];
+                    wwn = targetName;
+                  }
+                ];
+              };
+            };
+
+            networking.firewall.allowedTCPPorts = [ 3260 ];
+            networking.firewall.allowedUDPPorts = [ 3260 ];
+
+            virtualisation.memorySize = 2048;
+            virtualisation.emptyDiskImages = [ 2048 ];
+          };
+
+          initiatorAuto = { nodes, config, pkgs, ... }: {
+            services.openiscsi = {
+              enable = true;
+              enableAutoLoginOut = true;
+              discoverPortal = "target";
+              name = initiatorName;
+            };
+
+            environment.systemPackages = with pkgs; [
+              xfsprogs
+            ];
+
+            system.extraDependencies = [ nodes.initiatorRootDisk.config.system.build.toplevel ];
+
+            nix.settings = {
+              substituters = lib.mkForce [];
+              hashed-mirrors = null;
+              connect-timeout = 1;
+            };
+          };
+
+          initiatorRootDisk = { config, pkgs, modulesPath, lib, ... }: {
+            boot.loader.grub.enable = false;
+            boot.kernelParams = lib.mkOverride 5 (
+              [
+                "boot.shell_on_fail"
+                "console=tty1"
+                "ip=${config.networking.primaryIPAddress}:::255.255.255.0::ens9:none"
+              ]
+            );
+
+            # defaults to true, puts some code in the initrd that tries to mount an overlayfs on /nix/store
+            virtualisation.writableStore = false;
+
+            fileSystems = lib.mkOverride 5 {
+              "/" = {
+                fsType = "xfs";
+                device = "/dev/sda";
+                options = [ "_netdev" ];
+              };
+            };
+
+            boot.iscsi-initiator = {
+              discoverPortal = "target";
+              name = initiatorName;
+              target = targetName;
+            };
+          };
+        };
+
+        testScript = { nodes, ... }: ''
+          target.start()
+          target.wait_for_unit("iscsi-target.service")
+
+          initiatorAuto.start()
+
+          initiatorAuto.wait_for_unit("iscsid.service")
+          initiatorAuto.wait_for_unit("iscsi.service")
+          initiatorAuto.get_unit_info("iscsi")
+
+          initiatorAuto.succeed("set -x; while ! test -e /dev/sda; do sleep 1; done")
+
+          initiatorAuto.succeed("mkfs.xfs /dev/sda")
+          initiatorAuto.succeed("mkdir /mnt && mount /dev/sda /mnt")
+          initiatorAuto.succeed(
+              "nixos-install --no-bootloader --no-root-passwd --system ${nodes.initiatorRootDisk.config.system.build.toplevel}"
+          )
+          initiatorAuto.succeed("umount /mnt && rmdir /mnt")
+          initiatorAuto.shutdown()
+
+          initiatorRootDisk.start()
+          initiatorRootDisk.wait_for_unit("multi-user.target")
+          initiatorRootDisk.wait_for_unit("iscsid")
+          initiatorRootDisk.succeed("touch test")
+          initiatorRootDisk.shutdown()
+        '';
+      }
+)
diff --git a/nixpkgs/nixos/tests/isso.nix b/nixpkgs/nixos/tests/isso.nix
new file mode 100644
index 000000000000..4ec8b5ec3593
--- /dev/null
+++ b/nixpkgs/nixos/tests/isso.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "isso";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.isso = {
+      enable = true;
+      settings = {
+        general = {
+          dbpath = "/var/lib/isso/comments.db";
+          host = "http://localhost";
+        };
+      };
+    };
+  };
+
+  testScript = let
+    port = 8080;
+  in
+  ''
+    machine.wait_for_unit("isso.service")
+
+    machine.wait_for_open_port(${toString port})
+
+    machine.succeed("curl --fail http://localhost:${toString port}/?uri")
+    machine.succeed("curl --fail http://localhost:${toString port}/js/embed.min.js")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jackett.nix b/nixpkgs/nixos/tests/jackett.nix
new file mode 100644
index 000000000000..bc8b724e8b4b
--- /dev/null
+++ b/nixpkgs/nixos/tests/jackett.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "jackett";
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.jackett.enable = true; };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("jackett.service")
+    machine.wait_for_open_port(9117)
+    machine.succeed("curl --fail http://localhost:9117/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jellyfin.nix b/nixpkgs/nixos/tests/jellyfin.nix
new file mode 100644
index 000000000000..7d3097b58629
--- /dev/null
+++ b/nixpkgs/nixos/tests/jellyfin.nix
@@ -0,0 +1,155 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+  {
+    name = "jellyfin";
+    meta.maintainers = with lib.maintainers; [ minijackson ];
+
+    nodes.machine =
+      { ... }:
+      {
+        services.jellyfin.enable = true;
+        environment.systemPackages = with pkgs; [ ffmpeg ];
+      };
+
+    # Documentation of the Jellyfin API: https://api.jellyfin.org/
+    # Beware, this link can be resource intensive
+    testScript =
+      let
+        payloads = {
+          auth = pkgs.writeText "auth.json" (builtins.toJSON {
+            Username = "jellyfin";
+          });
+          empty = pkgs.writeText "empty.json" (builtins.toJSON { });
+        };
+      in
+      ''
+        import json
+        from urllib.parse import urlencode
+
+        machine.wait_for_unit("jellyfin.service")
+        machine.wait_for_open_port(8096)
+        machine.succeed("curl --fail http://localhost:8096/")
+
+        machine.wait_until_succeeds("curl --fail http://localhost:8096/health | grep Healthy")
+
+        auth_header = 'MediaBrowser Client="NixOS Integration Tests", DeviceId="1337", Device="Apple II", Version="20.09"'
+
+
+        def api_get(path):
+            return f"curl --fail 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'"
+
+
+        def api_post(path, json_file=None):
+            if json_file:
+                return f"curl --fail -X post 'http://localhost:8096{path}' -d '@{json_file}' -H Content-Type:application/json -H 'X-Emby-Authorization:{auth_header}'"
+            else:
+                return f"curl --fail -X post 'http://localhost:8096{path}' -H 'X-Emby-Authorization:{auth_header}'"
+
+
+        with machine.nested("Wizard completes"):
+            machine.wait_until_succeeds(api_get("/Startup/Configuration"))
+            machine.succeed(api_get("/Startup/FirstUser"))
+            machine.succeed(api_post("/Startup/Complete"))
+
+        with machine.nested("Can login"):
+            auth_result_str = machine.succeed(
+                api_post(
+                    "/Users/AuthenticateByName",
+                    "${payloads.auth}",
+                )
+            )
+            auth_result = json.loads(auth_result_str)
+            auth_token = auth_result["AccessToken"]
+            auth_header += f", Token={auth_token}"
+
+            sessions_result_str = machine.succeed(api_get("/Sessions"))
+            sessions_result = json.loads(sessions_result_str)
+
+            this_session = [
+                session for session in sessions_result if session["DeviceId"] == "1337"
+            ]
+            if len(this_session) != 1:
+                raise Exception("Session not created")
+
+            me_str = machine.succeed(api_get("/Users/Me"))
+            me = json.loads(me_str)["Id"]
+
+        with machine.nested("Can add library"):
+            tempdir = machine.succeed("mktemp -d -p /var/lib/jellyfin").strip()
+            machine.succeed(f"chmod 755 '{tempdir}'")
+
+            # Generate a dummy video that we can test later
+            videofile = f"{tempdir}/Big Buck Bunny (2008) [1080p].mkv"
+            machine.succeed(f"ffmpeg -f lavfi -i testsrc2=duration=5 '{videofile}'")
+
+            add_folder_query = urlencode(
+                {
+                    "name": "My Library",
+                    "collectionType": "Movies",
+                    "paths": tempdir,
+                    "refreshLibrary": "true",
+                }
+            )
+
+            machine.succeed(
+                api_post(
+                    f"/Library/VirtualFolders?{add_folder_query}",
+                    "${payloads.empty}",
+                )
+            )
+
+
+        def is_refreshed(_):
+            folders_str = machine.succeed(api_get("/Library/VirtualFolders"))
+            folders = json.loads(folders_str)
+            print(folders)
+            return all(folder["RefreshStatus"] == "Idle" for folder in folders)
+
+
+        retry(is_refreshed)
+
+        with machine.nested("Can identify videos"):
+            items = []
+
+            # For some reason, having the folder refreshed doesn't mean the
+            # movie was scanned
+            def has_movie(_):
+                global items
+
+                items_str = machine.succeed(
+                    api_get(f"/Users/{me}/Items?IncludeItemTypes=Movie&Recursive=true")
+                )
+                items = json.loads(items_str)["Items"]
+
+                return len(items) == 1
+
+            retry(has_movie)
+
+            video = items[0]["Id"]
+
+            item_info_str = machine.succeed(api_get(f"/Users/{me}/Items/{video}"))
+            item_info = json.loads(item_info_str)
+
+            if item_info["Name"] != "Big Buck Bunny":
+                raise Exception("Jellyfin failed to properly identify file")
+
+        with machine.nested("Can read videos"):
+            media_source_id = item_info["MediaSources"][0]["Id"]
+
+            machine.succeed(
+                "ffmpeg"
+                + f" -headers 'X-Emby-Authorization:{auth_header}'"
+                + f" -i http://localhost:8096/Videos/{video}/master.m3u8?mediaSourceId={media_source_id}"
+                + " /tmp/test.mkv"
+            )
+
+            duration = machine.succeed(
+                "ffprobe /tmp/test.mkv"
+                + " -show_entries format=duration"
+                + " -of compact=print_section=0:nokey=1"
+            )
+
+            if duration.strip() != "5.000000":
+                raise Exception("Downloaded video has wrong duration")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/jenkins-cli.nix b/nixpkgs/nixos/tests/jenkins-cli.nix
new file mode 100644
index 000000000000..f25e1604da33
--- /dev/null
+++ b/nixpkgs/nixos/tests/jenkins-cli.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ...} : rec {
+  name = "jenkins-cli";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ pamplemousse ];
+  };
+
+  nodes = {
+    machine =
+      { ... }:
+      {
+        services.jenkins = {
+          enable = true;
+          withCLI = true;
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("jenkins")
+
+    assert "JENKINS_URL" in machine.succeed("env")
+    assert "http://0.0.0.0:8080" in machine.succeed("echo $JENKINS_URL")
+
+    machine.succeed(
+        "jenkins-cli -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword)"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jenkins.nix b/nixpkgs/nixos/tests/jenkins.nix
new file mode 100644
index 000000000000..a8f621000654
--- /dev/null
+++ b/nixpkgs/nixos/tests/jenkins.nix
@@ -0,0 +1,123 @@
+# verifies:
+#   1. jenkins service starts on master node
+#   2. jenkins user can be extended on both master and slave
+#   3. jenkins service not started on slave node
+#   4. declarative jobs can be added and removed
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "jenkins";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bjornfor coconnor domenkozar eelco ];
+  };
+
+  nodes = {
+
+    master =
+      { ... }:
+      { services.jenkins = {
+          enable = true;
+          jobBuilder = {
+            enable = true;
+            nixJobs = [
+              { job = {
+                  name = "job-1";
+                  builders = [
+                    { shell = ''
+                        echo "Running job-1"
+                      '';
+                    }
+                  ];
+                };
+              }
+
+              { job = {
+                  name = "folder-1";
+                  project-type = "folder";
+                };
+              }
+
+              { job = {
+                  name = "folder-1/job-2";
+                  builders = [
+                    { shell = ''
+                        echo "Running job-2"
+                      '';
+                    }
+                  ];
+                };
+              }
+            ];
+          };
+        };
+
+        specialisation.noJenkinsJobs.configuration = {
+          services.jenkins.jobBuilder.nixJobs = pkgs.lib.mkForce [];
+        };
+
+        # should have no effect
+        services.jenkinsSlave.enable = true;
+
+        users.users.jenkins.extraGroups = [ "users" ];
+
+        systemd.services.jenkins.serviceConfig.TimeoutStartSec = "6min";
+      };
+
+    slave =
+      { ... }:
+      { services.jenkinsSlave.enable = true;
+
+        users.users.jenkins.extraGroups = [ "users" ];
+      };
+
+  };
+
+  testScript = { nodes, ... }:
+    let
+      configWithoutJobs = "${nodes.master.system.build.toplevel}/specialisation/noJenkinsJobs";
+      jenkinsPort = nodes.master.services.jenkins.port;
+      jenkinsUrl = "http://localhost:${toString jenkinsPort}";
+    in ''
+    start_all()
+
+    master.wait_for_unit("default.target")
+
+    assert "Authentication required" in master.succeed("curl http://localhost:8080")
+
+    for host in master, slave:
+        groups = host.succeed("sudo -u jenkins groups")
+        assert "jenkins" in groups
+        assert "users" in groups
+
+    slave.fail("systemctl is-enabled jenkins.service")
+
+    slave.succeed("java -fullversion")
+
+    with subtest("jobs are declarative"):
+        # Check that jobs are created on disk.
+        master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/job-1/config.xml")
+        master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
+        master.wait_until_succeeds("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
+
+        # Verify that jenkins also sees the jobs.
+        out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
+        jobs = [x.strip() for x in out.splitlines()]
+        # Seeing jobs inside folders requires the Folders plugin
+        # (https://plugins.jenkins.io/cloudbees-folder/), which we don't have
+        # in this vanilla jenkins install, so limit ourself to non-folder jobs.
+        assert jobs == ['job-1'], f"jobs != ['job-1']: {jobs}"
+
+        master.succeed(
+            "${configWithoutJobs}/bin/switch-to-configuration test >&2"
+        )
+
+        # Check that jobs are removed from disk.
+        master.wait_until_fails("test -f /var/lib/jenkins/jobs/job-1/config.xml")
+        master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/config.xml")
+        master.wait_until_fails("test -f /var/lib/jenkins/jobs/folder-1/jobs/job-2/config.xml")
+
+        # Verify that jenkins also sees the jobs as removed.
+        out = master.succeed("${pkgs.jenkins}/bin/jenkins-cli -s ${jenkinsUrl} -auth admin:$(cat /var/lib/jenkins/secrets/initialAdminPassword) list-jobs")
+        jobs = [x.strip() for x in out.splitlines()]
+        assert jobs == [], f"jobs != []: {jobs}"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jibri.nix b/nixpkgs/nixos/tests/jibri.nix
new file mode 100644
index 000000000000..45e30af9a9a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/jibri.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "jibri";
+  meta = with pkgs.lib; {
+    maintainers = teams.jitsi.members;
+  };
+
+    nodes.machine = { config, pkgs, ... }: {
+      virtualisation.memorySize = 5120;
+
+      services.jitsi-meet = {
+        enable = true;
+        hostName = "machine";
+        jibri.enable = true;
+      };
+      services.jibri.ignoreCert = true;
+      services.jitsi-videobridge.openFirewall = true;
+
+      networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+      services.nginx.virtualHosts.machine = {
+        enableACME = true;
+        forceSSL = true;
+      };
+
+      security.acme.email = "me@example.org";
+      security.acme.acceptTerms = true;
+      security.acme.server = "https://example.com"; # self-signed only
+    };
+
+  testScript = ''
+    machine.wait_for_unit("jitsi-videobridge2.service")
+    machine.wait_for_unit("jicofo.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("prosody.service")
+    machine.wait_for_unit("jibri.service")
+
+    machine.wait_until_succeeds(
+        "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.machine'", timeout=31
+    )
+    machine.wait_until_succeeds(
+        "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.machine'", timeout=32
+    )
+    machine.wait_until_succeeds(
+        "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jibri@auth.machine'", timeout=33
+    )
+    machine.wait_until_succeeds(
+        "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Joined MUC: jibribrewery@internal.machine'", timeout=34
+    )
+
+    assert '"busyStatus":"IDLE","health":{"healthStatus":"HEALTHY"' in machine.succeed(
+        "curl -X GET http://machine:2222/jibri/api/v1.0/health"
+    )
+    machine.succeed(
+        """curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/startService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'"""
+    )
+    machine.wait_until_succeeds(
+        "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'File recording service transitioning from state Starting up to Running'", timeout=35
+    )
+    machine.succeed(
+        """sleep 15 && curl -H "Content-Type: application/json" -X POST http://localhost:2222/jibri/api/v1.0/stopService -d '{"sessionId": "RecordTest","callParams":{"callUrlInfo":{"baseUrl": "https://machine","callName": "TestCall"}},"callLoginParams":{"domain": "recorder.machine", "username": "recorder", "password": "'"$(cat /var/lib/jitsi-meet/jibri-recorder-secret)"'" },"sinkType": "file"}'"""
+    )
+    machine.wait_until_succeeds(
+        "cat /var/log/jitsi/jibri/log.0.txt | grep -q 'Finalize script finished with exit value 0'", timeout=36
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jirafeau.nix b/nixpkgs/nixos/tests/jirafeau.nix
new file mode 100644
index 000000000000..dbfaf515e257
--- /dev/null
+++ b/nixpkgs/nixos/tests/jirafeau.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "jirafeau";
+  meta.maintainers = with lib.maintainers; [ davidtwco ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.jirafeau = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("phpfpm-jirafeau.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl -sSfL http://localhost/ | grep 'Jirafeau'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jitsi-meet.nix b/nixpkgs/nixos/tests/jitsi-meet.nix
new file mode 100644
index 000000000000..c39cd19e1f0a
--- /dev/null
+++ b/nixpkgs/nixos/tests/jitsi-meet.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "jitsi-meet";
+  meta = with pkgs.lib; {
+    maintainers = teams.jitsi.members;
+  };
+
+  nodes = {
+    client = { nodes, pkgs, ... }: {
+    };
+    server = { config, pkgs, ... }: {
+      services.jitsi-meet = {
+        enable = true;
+        hostName = "server";
+      };
+      services.jitsi-videobridge.openFirewall = true;
+
+      networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+      services.nginx.virtualHosts.server = {
+        enableACME = true;
+        forceSSL = true;
+      };
+
+      security.acme.acceptTerms = true;
+      security.acme.defaults.email = "me@example.org";
+      security.acme.defaults.server = "https://example.com"; # self-signed only
+    };
+  };
+
+  testScript = ''
+    server.wait_for_unit("jitsi-videobridge2.service")
+    server.wait_for_unit("jicofo.service")
+    server.wait_for_unit("nginx.service")
+    server.wait_for_unit("prosody.service")
+
+    server.wait_until_succeeds(
+        "journalctl -b -u prosody -o cat | grep -q 'Authenticated as focus@auth.server'"
+    )
+    server.wait_until_succeeds(
+        "journalctl -b -u prosody -o cat | grep -q 'Authenticated as jvb@auth.server'"
+    )
+
+    client.wait_for_unit("network.target")
+    assert "<title>Jitsi Meet</title>" in client.succeed("curl -sSfkL http://server/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/jool.nix b/nixpkgs/nixos/tests/jool.nix
new file mode 100644
index 000000000000..93575f07b1c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/jool.nix
@@ -0,0 +1,220 @@
+{ pkgs, runTest }:
+
+let
+  inherit (pkgs) lib;
+
+  ipv6Only = {
+    networking.useDHCP = false;
+    networking.interfaces.eth1.ipv4.addresses = lib.mkVMOverride [ ];
+  };
+
+  ipv4Only = {
+    networking.useDHCP = false;
+    networking.interfaces.eth1.ipv6.addresses = lib.mkVMOverride [ ];
+  };
+
+  webserver = ip: msg: {
+    systemd.services.webserver = {
+      description = "Mock webserver";
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        while true; do
+        {
+          printf 'HTTP/1.0 200 OK\n'
+          printf 'Content-Length: ${toString (1 + builtins.stringLength msg)}\n'
+          printf '\n${msg}\n\n'
+        } | ${pkgs.libressl.nc}/bin/nc -${toString ip}nvl 80
+        done
+      '';
+    };
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+
+in
+
+{
+  siit = runTest {
+    # This test simulates the setup described in [1] with two IPv6 and
+    # IPv4-only devices on different subnets communicating through a border
+    # relay running Jool in SIIT mode.
+    # [1]: https://nicmx.github.io/Jool/en/run-vanilla.html
+    name = "jool-siit";
+    meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+    # Border relay
+    nodes.relay = {
+      virtualisation.vlans = [ 1 2 ];
+
+      # Enable packet routing
+      boot.kernel.sysctl = {
+        "net.ipv6.conf.all.forwarding" = 1;
+        "net.ipv4.conf.all.forwarding" = 1;
+      };
+
+      networking.useDHCP = false;
+      networking.interfaces = lib.mkVMOverride {
+        eth1.ipv6.addresses = [ { address = "fd::198.51.100.1"; prefixLength = 120; } ];
+        eth2.ipv4.addresses = [ { address = "192.0.2.1";  prefixLength = 24; } ];
+      };
+
+      networking.jool.enable = true;
+      networking.jool.siit.default.global.pool6 = "fd::/96";
+    };
+
+    # IPv6 only node
+    nodes.alice = {
+      imports = [ ipv6Only (webserver 6 "Hello, Bob!") ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.interfaces.eth1.ipv6 = {
+        addresses = [ { address = "fd::198.51.100.8"; prefixLength = 120; } ];
+        routes    = [ { address = "fd::192.0.2.0"; prefixLength = 120;
+                        via = "fd::198.51.100.1"; } ];
+      };
+    };
+
+    # IPv4 only node
+    nodes.bob = {
+      imports = [ ipv4Only (webserver 4 "Hello, Alice!") ];
+
+      virtualisation.vlans = [ 2 ];
+      networking.interfaces.eth1.ipv4 = {
+        addresses = [ { address = "192.0.2.16"; prefixLength = 24; } ];
+        routes    = [ { address = "198.51.100.0"; prefixLength = 24;
+                        via = "192.0.2.1"; } ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      relay.wait_for_unit("jool-siit-default.service")
+      alice.wait_for_unit("network-addresses-eth1.service")
+      bob.wait_for_unit("network-addresses-eth1.service")
+
+      with subtest("Alice and Bob can't ping each other"):
+        relay.systemctl("stop jool-siit-default.service")
+        alice.fail("ping -c1 fd::192.0.2.16")
+        bob.fail("ping -c1 198.51.100.8")
+
+      with subtest("Alice and Bob can ping using the relay"):
+        relay.systemctl("start jool-siit-default.service")
+        alice.wait_until_succeeds("ping -c1 fd::192.0.2.16")
+        bob.wait_until_succeeds("ping -c1 198.51.100.8")
+
+      with subtest("Alice can connect to Bob's webserver"):
+        bob.wait_for_open_port(80)
+        alice.succeed("curl -vvv http://[fd::192.0.2.16] >&2")
+        alice.succeed("curl --fail -s http://[fd::192.0.2.16] | grep -q Alice")
+
+      with subtest("Bob can connect to Alices's webserver"):
+        alice.wait_for_open_port(80)
+        bob.succeed("curl --fail -s http://198.51.100.8 | grep -q Bob")
+    '';
+  };
+
+  nat64 = runTest {
+    # This test simulates the setup described in [1] with two IPv6-only nodes
+    # (a client and a homeserver) on the LAN subnet and an IPv4 node on the WAN.
+    # The router runs Jool in stateful NAT64 mode, masquarading the LAN and
+    # forwarding ports using static BIB entries.
+    # [1]: https://nicmx.github.io/Jool/en/run-nat64.html
+    name = "jool-nat64";
+    meta.maintainers = with lib.maintainers; [ rnhmjoj ];
+
+    # Router
+    nodes.router = {
+      virtualisation.vlans = [ 1 2 ];
+
+      # Enable packet routing
+      boot.kernel.sysctl = {
+        "net.ipv6.conf.all.forwarding" = 1;
+        "net.ipv4.conf.all.forwarding" = 1;
+      };
+
+      networking.useDHCP = false;
+      networking.interfaces = lib.mkVMOverride {
+        eth1.ipv6.addresses = [ { address = "2001:db8::1"; prefixLength = 96; } ];
+        eth2.ipv4.addresses = [ { address = "203.0.113.1"; prefixLength = 24; } ];
+      };
+
+      networking.jool.enable = true;
+      networking.jool.nat64.default = {
+        bib = [
+          { # forward HTTP 203.0.113.1 (router) → 2001:db8::9 (homeserver)
+            "protocol"     = "TCP";
+            "ipv4 address" = "203.0.113.1#80";
+            "ipv6 address" = "2001:db8::9#80";
+          }
+        ];
+        pool4 = [
+          # Ports for dynamic translation
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol =  "UDP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          { protocol = "ICMP";  prefix = "203.0.113.1/32"; "port range" = "40001-65535"; }
+          # Ports for static BIB entries
+          { protocol =  "TCP";  prefix = "203.0.113.1/32"; "port range" = "80"; }
+        ];
+      };
+    };
+
+    # LAN client (IPv6 only)
+    nodes.client = {
+      imports = [ ipv6Only ];
+      virtualisation.vlans = [ 1 ];
+
+      networking.interfaces.eth1.ipv6 = {
+        addresses = [ { address = "2001:db8::8"; prefixLength = 96; } ];
+        routes    = [ { address = "64:ff9b::";   prefixLength = 96;
+                        via = "2001:db8::1"; } ];
+      };
+    };
+
+    # LAN server (IPv6 only)
+    nodes.homeserver = {
+      imports = [ ipv6Only (webserver 6 "Hello from IPv6!") ];
+
+      virtualisation.vlans = [ 1 ];
+      networking.interfaces.eth1.ipv6 = {
+        addresses = [ { address = "2001:db8::9"; prefixLength = 96; } ];
+        routes    = [ { address = "64:ff9b::";   prefixLength = 96;
+                        via = "2001:db8::1"; } ];
+      };
+    };
+
+    # WAN server (IPv4 only)
+    nodes.server = {
+      imports = [ ipv4Only (webserver 4 "Hello from IPv4!") ];
+
+      virtualisation.vlans = [ 2 ];
+      networking.interfaces.eth1.ipv4.addresses =
+        [ { address = "203.0.113.16"; prefixLength = 24; } ];
+    };
+
+    testScript = ''
+      start_all()
+
+      for node in [client, homeserver, server]:
+        node.wait_for_unit("network-addresses-eth1.service")
+
+      with subtest("Client can ping the WAN server"):
+        router.wait_for_unit("jool-nat64-default.service")
+        client.succeed("ping -c1 64:ff9b::203.0.113.16")
+
+      with subtest("Client can connect to the WAN webserver"):
+        server.wait_for_open_port(80)
+        client.succeed("curl --fail -s http://[64:ff9b::203.0.113.16] | grep -q IPv4!")
+
+      with subtest("Router BIB entries are correctly populated"):
+        router.succeed("jool bib display | grep -q 'Dynamic TCP.*2001:db8::8'")
+        router.succeed("jool bib display | grep -q 'Static TCP.*2001:db8::9'")
+
+      with subtest("WAN server can reach the LAN server"):
+        homeserver.wait_for_open_port(80)
+        server.succeed("curl --fail -s http://203.0.113.1 | grep -q IPv6!")
+    '';
+
+  };
+
+}
diff --git a/nixpkgs/nixos/tests/k3s/default.nix b/nixpkgs/nixos/tests/k3s/default.nix
new file mode 100644
index 000000000000..e168f8233c76
--- /dev/null
+++ b/nixpkgs/nixos/tests/k3s/default.nix
@@ -0,0 +1,13 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+, lib ? pkgs.lib
+}:
+let
+  allK3s = lib.filterAttrs (n: _: lib.strings.hasPrefix "k3s_" n) pkgs;
+in
+{
+  # Run a single node k3s cluster and verify a pod can run
+  single-node = lib.mapAttrs (_: k3s: import ./single-node.nix { inherit system pkgs k3s; }) allK3s;
+  # Run a multi-node k3s cluster and verify pod networking works across nodes
+  multi-node = lib.mapAttrs (_: k3s: import ./multi-node.nix { inherit system pkgs k3s; }) allK3s;
+}
diff --git a/nixpkgs/nixos/tests/k3s/multi-node.nix b/nixpkgs/nixos/tests/k3s/multi-node.nix
new file mode 100644
index 000000000000..932b4639b39c
--- /dev/null
+++ b/nixpkgs/nixos/tests/k3s/multi-node.nix
@@ -0,0 +1,183 @@
+import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
+  let
+    imageEnv = pkgs.buildEnv {
+      name = "k3s-pause-image-env";
+      paths = with pkgs; [ tini bashInteractive coreutils socat ];
+    };
+    pauseImage = pkgs.dockerTools.streamLayeredImage {
+      name = "test.local/pause";
+      tag = "local";
+      contents = imageEnv;
+      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+    };
+    # A daemonset that responds 'server' on port 8000
+    networkTestDaemonset = pkgs.writeText "test.yml" ''
+      apiVersion: apps/v1
+      kind: DaemonSet
+      metadata:
+        name: test
+        labels:
+          name: test
+      spec:
+        selector:
+          matchLabels:
+            name: test
+        template:
+          metadata:
+            labels:
+              name: test
+          spec:
+            containers:
+            - name: test
+              image: test.local/pause:local
+              imagePullPolicy: Never
+              resources:
+                limits:
+                  memory: 20Mi
+              command: ["socat", "TCP4-LISTEN:8000,fork", "EXEC:echo server"]
+    '';
+    tokenFile = pkgs.writeText "token" "p@s$w0rd";
+  in
+  {
+    name = "${k3s.name}-multi-node";
+
+    nodes = {
+      server = { pkgs, ... }: {
+        environment.systemPackages = with pkgs; [ gzip jq ];
+        # k3s uses enough resources the default vm fails.
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          role = "server";
+          package = k3s;
+          clusterInit = true;
+          extraFlags = builtins.toString [
+            "--disable" "coredns"
+            "--disable" "local-storage"
+            "--disable" "metrics-server"
+            "--disable" "servicelb"
+            "--disable" "traefik"
+            "--node-ip" "192.168.1.1"
+            "--pause-image" "test.local/pause:local"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.1";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+
+      server2 = { pkgs, ... }: {
+        environment.systemPackages = with pkgs; [ gzip jq ];
+        virtualisation.memorySize = 1536;
+        virtualisation.diskSize = 4096;
+
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          serverAddr = "https://192.168.1.1:6443";
+          clusterInit = false;
+          extraFlags = builtins.toString [
+            "--disable" "coredns"
+            "--disable" "local-storage"
+            "--disable" "metrics-server"
+            "--disable" "servicelb"
+            "--disable" "traefik"
+            "--node-ip" "192.168.1.3"
+            "--pause-image" "test.local/pause:local"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 2379 2380 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.3";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+      };
+
+      agent = { pkgs, ... }: {
+        virtualisation.memorySize = 1024;
+        virtualisation.diskSize = 2048;
+        services.k3s = {
+          inherit tokenFile;
+          enable = true;
+          role = "agent";
+          serverAddr = "https://192.168.1.3:6443";
+          extraFlags = lib.concatStringsSep " " [
+            "--pause-image" "test.local/pause:local"
+            "--node-ip" "192.168.1.2"
+          ];
+        };
+        networking.firewall.allowedTCPPorts = [ 6443 ];
+        networking.firewall.allowedUDPPorts = [ 8472 ];
+        networking.firewall.trustedInterfaces = [ "flannel.1" ];
+        networking.useDHCP = false;
+        networking.defaultGateway = "192.168.1.2";
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkForce [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+    };
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ euank ];
+    };
+
+    testScript = ''
+      machines = [server, server2, agent]
+      for m in machines:
+          m.start()
+          m.wait_for_unit("k3s")
+
+      is_aarch64 = "${toString pkgs.stdenv.isAarch64}" == "1"
+
+      # wait for the agent to show up
+      server.wait_until_succeeds("k3s kubectl get node agent")
+
+      for m in machines:
+          # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
+          if not is_aarch64:
+              m.succeed("k3s check-config")
+          m.succeed(
+              "${pauseImage} | k3s ctr image import -"
+          )
+
+      server.succeed("k3s kubectl cluster-info")
+      # Also wait for our service account to show up; it takes a sec
+      server.wait_until_succeeds("k3s kubectl get serviceaccount default")
+
+      # Now create a pod on each node via a daemonset and verify they can talk to each other.
+      server.succeed("k3s kubectl apply -f ${networkTestDaemonset}")
+      server.wait_until_succeeds(f'[ "$(k3s kubectl get ds test -o json | jq .status.numberReady)" -eq {len(machines)} ]')
+
+      # Get pod IPs
+      pods = server.succeed("k3s kubectl get po -o json | jq '.items[].metadata.name' -r").splitlines()
+      pod_ips = [server.succeed(f"k3s kubectl get po {name} -o json | jq '.status.podIP' -cr").strip() for name in pods]
+
+      # Verify each server can ping each pod ip
+      for pod_ip in pod_ips:
+          server.succeed(f"ping -c 1 {pod_ip}")
+          agent.succeed(f"ping -c 1 {pod_ip}")
+
+      # Verify the pods can talk to each other
+      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[0]} -- socat TCP:{pod_ips[1]}:8000 -")
+      assert resp.strip() == "server"
+      resp = server.wait_until_succeeds(f"k3s kubectl exec {pods[1]} -- socat TCP:{pod_ips[0]}:8000 -")
+      assert resp.strip() == "server"
+
+      # Cleanup
+      server.succeed("k3s kubectl delete -f ${networkTestDaemonset}")
+
+      for m in machines:
+          m.shutdown()
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/k3s/single-node.nix b/nixpkgs/nixos/tests/k3s/single-node.nix
new file mode 100644
index 000000000000..e059603b9c9d
--- /dev/null
+++ b/nixpkgs/nixos/tests/k3s/single-node.nix
@@ -0,0 +1,85 @@
+import ../make-test-python.nix ({ pkgs, lib, k3s, ... }:
+  let
+    imageEnv = pkgs.buildEnv {
+      name = "k3s-pause-image-env";
+      paths = with pkgs; [ tini (hiPrio coreutils) busybox ];
+    };
+    pauseImage = pkgs.dockerTools.streamLayeredImage {
+      name = "test.local/pause";
+      tag = "local";
+      contents = imageEnv;
+      config.Entrypoint = [ "/bin/tini" "--" "/bin/sleep" "inf" ];
+    };
+    testPodYaml = pkgs.writeText "test.yml" ''
+      apiVersion: v1
+      kind: Pod
+      metadata:
+        name: test
+      spec:
+        containers:
+        - name: test
+          image: test.local/pause:local
+          imagePullPolicy: Never
+          command: ["sh", "-c", "sleep inf"]
+    '';
+  in
+  {
+    name = "${k3s.name}-single-node";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ euank ];
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ k3s gzip ];
+
+      # k3s uses enough resources the default vm fails.
+      virtualisation.memorySize = 1536;
+      virtualisation.diskSize = 4096;
+
+      services.k3s.enable = true;
+      services.k3s.role = "server";
+      services.k3s.package = k3s;
+      # Slightly reduce resource usage
+      services.k3s.extraFlags = builtins.toString [
+        "--disable" "coredns"
+        "--disable" "local-storage"
+        "--disable" "metrics-server"
+        "--disable" "servicelb"
+        "--disable" "traefik"
+        "--pause-image" "test.local/pause:local"
+      ];
+
+      users.users = {
+        noprivs = {
+          isNormalUser = true;
+          description = "Can't access k3s by default";
+          password = "*";
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("k3s")
+      machine.succeed("kubectl cluster-info")
+      machine.fail("sudo -u noprivs kubectl cluster-info")
+      '' # Fix-Me: Tests fail for 'aarch64-linux' as: "CONFIG_CGROUP_FREEZER: missing (fail)"
+      + lib.optionalString (!pkgs.stdenv.isAarch64) ''machine.succeed("k3s check-config")'' + ''
+
+      machine.succeed(
+          "${pauseImage} | ctr image import -"
+      )
+
+      # Also wait for our service account to show up; it takes a sec
+      machine.wait_until_succeeds("kubectl get serviceaccount default")
+      machine.succeed("kubectl apply -f ${testPodYaml}")
+      machine.succeed("kubectl wait --for 'condition=Ready' pod/test")
+      machine.succeed("kubectl delete -f ${testPodYaml}")
+
+      # regression test for #176445
+      machine.fail("journalctl -o cat -u k3s.service | grep 'ipset utility not found'")
+
+      machine.shutdown()
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/kafka.nix b/nixpkgs/nixos/tests/kafka.nix
new file mode 100644
index 000000000000..864253fd8c73
--- /dev/null
+++ b/nixpkgs/nixos/tests/kafka.nix
@@ -0,0 +1,78 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with pkgs.lib;
+
+let
+  makeKafkaTest = name: kafkaPackage: (import ./make-test-python.nix ({
+    inherit name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nequissimus ];
+    };
+
+    nodes = {
+      zookeeper1 = { ... }: {
+        services.zookeeper = {
+          enable = true;
+        };
+
+        networking.firewall.allowedTCPPorts = [ 2181 ];
+      };
+      kafka = { ... }: {
+        services.apache-kafka = {
+          enable = true;
+          extraProperties = ''
+            offsets.topic.replication.factor = 1
+            zookeeper.session.timeout.ms = 600000
+          '';
+          package = kafkaPackage;
+          zookeeper = "zookeeper1:2181";
+        };
+
+        networking.firewall.allowedTCPPorts = [ 9092 ];
+        # i686 tests: qemu-system-i386 can simulate max 2047MB RAM (not 2048)
+        virtualisation.memorySize = 2047;
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      zookeeper1.wait_for_unit("default.target")
+      zookeeper1.wait_for_unit("zookeeper.service")
+      zookeeper1.wait_for_open_port(2181)
+
+      kafka.wait_for_unit("default.target")
+      kafka.wait_for_unit("apache-kafka.service")
+      kafka.wait_for_open_port(9092)
+
+      kafka.wait_until_succeeds(
+          "${kafkaPackage}/bin/kafka-topics.sh --create "
+          + "--bootstrap-server localhost:9092 --partitions 1 "
+          + "--replication-factor 1 --topic testtopic"
+      )
+      kafka.succeed(
+          "echo 'test 1' | "
+          + "${kafkaPackage}/bin/kafka-console-producer.sh "
+          + "--broker-list localhost:9092 --topic testtopic"
+      )
+      assert "test 1" in kafka.succeed(
+          "${kafkaPackage}/bin/kafka-console-consumer.sh "
+          + "--bootstrap-server localhost:9092 --topic testtopic "
+          + "--from-beginning --max-messages 1"
+      )
+    '';
+  }) { inherit system; });
+
+in with pkgs; {
+  kafka_2_8  = makeKafkaTest "kafka_2_8"  apacheKafka_2_8;
+  kafka_3_0  = makeKafkaTest "kafka_3_0"  apacheKafka_3_0;
+  kafka_3_1  = makeKafkaTest "kafka_3_1"  apacheKafka_3_1;
+  kafka_3_2  = makeKafkaTest "kafka_3_2"  apacheKafka_3_2;
+  kafka_3_3  = makeKafkaTest "kafka_3_3"  apacheKafka_3_3;
+  kafka_3_4  = makeKafkaTest "kafka_3_4"  apacheKafka_3_4;
+  kafka_3_5  = makeKafkaTest "kafka_3_5"  apacheKafka_3_5;
+  kafka  = makeKafkaTest "kafka"  apacheKafka;
+}
diff --git a/nixpkgs/nixos/tests/kanidm.nix b/nixpkgs/nixos/tests/kanidm.nix
new file mode 100644
index 000000000000..3f5bca397740
--- /dev/null
+++ b/nixpkgs/nixos/tests/kanidm.nix
@@ -0,0 +1,128 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    serverDomain = certs.domain;
+
+    testCredentials = {
+      password = "Password1_cZPEwpCWvrReripJmAZdmVIZd8HHoHcl";
+    };
+  in
+  {
+    name = "kanidm";
+    meta.maintainers = with pkgs.lib.maintainers; [ erictapen Flakebi ];
+
+    nodes.server = { config, pkgs, lib, ... }: {
+      services.kanidm = {
+        enableServer = true;
+        serverSettings = {
+          origin = "https://${serverDomain}";
+          domain = serverDomain;
+          bindaddress = "[::]:443";
+          ldapbindaddress = "[::1]:636";
+          tls_chain = certs."${serverDomain}".cert;
+          tls_key = certs."${serverDomain}".key;
+        };
+      };
+
+      security.pki.certificateFiles = [ certs.ca.cert ];
+
+      networking.hosts."::1" = [ serverDomain ];
+      networking.firewall.allowedTCPPorts = [ 443 ];
+
+      users.users.kanidm.shell = pkgs.bashInteractive;
+
+      environment.systemPackages = with pkgs; [ kanidm openldap ripgrep ];
+    };
+
+    nodes.client = { pkgs, nodes, ... }: {
+      services.kanidm = {
+        enableClient = true;
+        clientSettings = {
+          uri = "https://${serverDomain}";
+          verify_ca = true;
+          verify_hostnames = true;
+        };
+        enablePam = true;
+        unixSettings = {
+          pam_allowed_login_groups = [ "shell" ];
+        };
+      };
+
+      networking.hosts."${nodes.server.networking.primaryIPAddress}" = [ serverDomain ];
+
+      security.pki.certificateFiles = [ certs.ca.cert ];
+    };
+
+    testScript = { nodes, ... }:
+      let
+        ldapBaseDN = builtins.concatStringsSep "," (map (s: "dc=" + s) (pkgs.lib.splitString "." serverDomain));
+
+        # We need access to the config file in the test script.
+        filteredConfig = pkgs.lib.converge
+          (pkgs.lib.filterAttrsRecursive (_: v: v != null))
+          nodes.server.services.kanidm.serverSettings;
+        serverConfigFile = (pkgs.formats.toml { }).generate "server.toml" filteredConfig;
+
+      in
+      ''
+        start_all()
+        server.wait_for_unit("kanidm.service")
+        client.wait_for_unit("network-online.target")
+
+        with subtest("Test HTTP interface"):
+            server.wait_until_succeeds("curl -Lsf https://${serverDomain} | grep Kanidm")
+
+        with subtest("Test LDAP interface"):
+            server.succeed("ldapsearch -H ldaps://${serverDomain}:636 -b '${ldapBaseDN}' -x '(name=test)'")
+
+        with subtest("Test CLI login"):
+            client.succeed("kanidm login -D anonymous")
+            client.succeed("kanidm self whoami | grep anonymous@${serverDomain}")
+            client.succeed("kanidm logout")
+
+        with subtest("Recover idm_admin account"):
+            idm_admin_password = server.succeed("su - kanidm -c 'kanidmd recover-account -c ${serverConfigFile} idm_admin 2>&1 | rg -o \'[A-Za-z0-9]{48}\' '").strip().removeprefix("'").removesuffix("'")
+
+        with subtest("Test unixd connection"):
+            client.wait_for_unit("kanidm-unixd.service")
+            client.wait_for_file("/run/kanidm-unixd/sock")
+            client.wait_until_succeeds("kanidm-unix status | grep working!")
+
+        with subtest("Test user creation"):
+            client.wait_for_unit("getty@tty1.service")
+            client.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+            client.wait_until_tty_matches("1", "login: ")
+            client.send_chars("root\n")
+            client.send_chars("kanidm login -D idm_admin\n")
+            client.wait_until_tty_matches("1", "Enter password: ")
+            client.send_chars(f"{idm_admin_password}\n")
+            client.wait_until_tty_matches("1", "Login Success for idm_admin")
+            client.succeed("kanidm person create testuser TestUser")
+            client.succeed("kanidm person posix set --shell \"$SHELL\" testuser")
+            client.send_chars("kanidm person posix set-password testuser\n")
+            client.wait_until_tty_matches("1", "Enter new")
+            client.send_chars("${testCredentials.password}\n")
+            client.wait_until_tty_matches("1", "Retype")
+            client.send_chars("${testCredentials.password}\n")
+            output = client.succeed("getent passwd testuser")
+            assert "TestUser" in output
+            client.succeed("kanidm group create shell")
+            client.succeed("kanidm group posix set shell")
+            client.succeed("kanidm group add-members shell testuser")
+
+        with subtest("Test user login"):
+            client.send_key("alt-f2")
+            client.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+            client.wait_for_unit("getty@tty2.service")
+            client.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+            client.wait_until_tty_matches("2", "login: ")
+            client.send_chars("testuser\n")
+            client.wait_until_tty_matches("2", "login: testuser")
+            client.wait_until_succeeds("pgrep login")
+            client.wait_until_tty_matches("2", "Password: ")
+            client.send_chars("${testCredentials.password}\n")
+            client.wait_until_succeeds("systemctl is-active user@$(id -u testuser).service")
+            client.send_chars("touch done\n")
+            client.wait_for_file("/home/testuser@${serverDomain}/done")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/karma.nix b/nixpkgs/nixos/tests/karma.nix
new file mode 100644
index 000000000000..5ac2983b8aa3
--- /dev/null
+++ b/nixpkgs/nixos/tests/karma.nix
@@ -0,0 +1,84 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "karma";
+  nodes = {
+    server = { ... }: {
+      services.prometheus.alertmanager = {
+        enable = true;
+        logLevel = "debug";
+        port = 9093;
+        openFirewall = true;
+        configuration = {
+          global = {
+            resolve_timeout = "1m";
+          };
+          route = {
+            # Root route node
+            receiver = "test";
+            group_by = ["..."];
+            continue = false;
+            group_wait = "1s";
+            group_interval="15s";
+            repeat_interval = "24h";
+          };
+          receivers = [
+            {
+              name = "test";
+              webhook_configs = [
+                {
+                  url = "http://localhost:1234";
+                  send_resolved = true;
+                  max_alerts = 0;
+                }
+              ];
+            }
+          ];
+        };
+      };
+      services.karma = {
+        enable = true;
+        openFirewall = true;
+        settings = {
+          listen = {
+            address = "0.0.0.0";
+            port = 8081;
+          };
+          alertmanager = {
+            servers = [
+              {
+                name = "alertmanager";
+                uri = "https://127.0.0.1:9093";
+              }
+            ];
+          };
+          karma.name = "test-dashboard";
+          log.config = true;
+          log.requests = true;
+          log.timestamp = true;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("Wait for server to come up"):
+
+      server.wait_for_unit("alertmanager.service")
+      server.wait_for_unit("karma.service")
+
+      server.sleep(5) # wait for both services to settle
+
+      server.wait_for_open_port(9093)
+      server.wait_for_open_port(8081)
+
+    with subtest("Test alertmanager readiness"):
+      server.succeed("curl -s http://127.0.0.1:9093/-/ready")
+
+      # Karma only starts serving the dashboard once it has established connectivity to all alertmanagers in its config
+      # Therefore, this will fail if karma isn't able to reach alertmanager
+      server.succeed("curl -s http://127.0.0.1:8081")
+
+    server.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kavita.nix b/nixpkgs/nixos/tests/kavita.nix
new file mode 100644
index 000000000000..f27b3fffbcf6
--- /dev/null
+++ b/nixpkgs/nixos/tests/kavita.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "kavita";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ misterio77 ];
+  };
+
+  nodes = {
+    kavita = { config, pkgs, ... }: {
+      services.kavita = {
+        enable = true;
+        port = 5000;
+        tokenKeyFile = builtins.toFile "kavita.key" "QfpjFvjT83BLtZ74GE3U3Q==";
+      };
+    };
+  };
+
+  testScript = let
+    regUrl = "http://kavita:5000/api/Account/register";
+    payload = builtins.toFile "payload.json" (builtins.toJSON {
+      username = "foo";
+      password = "correcthorsebatterystaple";
+      email = "foo@bar";
+    });
+  in ''
+    kavita.start
+    kavita.wait_for_unit("kavita.service")
+
+    # Check that static assets are working
+    kavita.wait_until_succeeds("curl http://kavita:5000/site.webmanifest | grep Kavita")
+
+    # Check that registration is working
+    kavita.succeed("curl -fX POST ${regUrl} --json @${payload}")
+    # But only for the first one
+    kavita.fail("curl -fX POST ${regUrl} --json @${payload}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kbd-setfont-decompress.nix b/nixpkgs/nixos/tests/kbd-setfont-decompress.nix
new file mode 100644
index 000000000000..810ef39cc11a
--- /dev/null
+++ b/nixpkgs/nixos/tests/kbd-setfont-decompress.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "kbd-setfont-decompress";
+
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine = { ... }: {};
+
+  testScript = ''
+    machine.succeed("gzip -cd ${pkgs.terminus_font}/share/consolefonts/ter-v16b.psf.gz >font.psf")
+    machine.succeed("gzip <font.psf >font.psf.gz")
+    machine.succeed("bzip2 <font.psf >font.psf.bz2")
+    machine.succeed("xz <font.psf >font.psf.xz")
+    machine.succeed("zstd <font.psf >font.psf.zst")
+    # setfont returns 0 even on error.
+    assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.gz  2>&1") == ""
+    assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.bz2 2>&1") == ""
+    assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.xz  2>&1") == ""
+    assert machine.succeed("PATH= ${pkgs.kbd}/bin/setfont font.psf.zst 2>&1") == ""
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kbd-update-search-paths-patch.nix b/nixpkgs/nixos/tests/kbd-update-search-paths-patch.nix
new file mode 100644
index 000000000000..746a809c4cdf
--- /dev/null
+++ b/nixpkgs/nixos/tests/kbd-update-search-paths-patch.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "kbd-update-search-paths-patch";
+
+  nodes.machine = { pkgs, options, ... }: {
+    console = {
+      packages = options.console.packages.default ++ [ pkgs.terminus_font ];
+    };
+  };
+
+  testScript = ''
+    command = "${pkgs.kbd}/bin/setfont ter-112n 2>&1"
+    (status, out) = machine.execute(command)
+    import re
+    pattern = re.compile(r".*Unable to find file:.*")
+    match = pattern.match(out)
+    if match:
+        raise Exception("command `{}` failed".format(command))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kea.nix b/nixpkgs/nixos/tests/kea.nix
new file mode 100644
index 000000000000..c8ecf771fa13
--- /dev/null
+++ b/nixpkgs/nixos/tests/kea.nix
@@ -0,0 +1,186 @@
+# This test verifies DHCPv4 interaction between a client and a router.
+# For successful DHCP allocations a dynamic update request is sent
+# towards a nameserver to allocate a name in the lan.nixos.test zone.
+# We then verify whether client and router can ping each other, and
+# that the nameserver can resolve the clients fqdn to the correct IP
+# address.
+
+import ./make-test-python.nix ({ pkgs, lib, ...}: {
+  meta.maintainers = with lib.maintainers; [ hexa ];
+
+  name = "kea";
+
+  nodes = {
+    router = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useDHCP = false;
+        firewall.allowedUDPPorts = [ 67 ];
+      };
+
+      systemd.network = {
+        enable = true;
+        networks = {
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              Address = "10.0.0.1/29";
+            };
+          };
+        };
+      };
+
+      services.kea.dhcp4 = {
+        enable = true;
+        settings = {
+          valid-lifetime = 3600;
+          renew-timer = 900;
+          rebind-timer = 1800;
+
+          lease-database = {
+            type = "memfile";
+            persist = true;
+            name = "/var/lib/kea/dhcp4.leases";
+          };
+
+          interfaces-config = {
+            dhcp-socket-type = "raw";
+            interfaces = [
+              "eth1"
+            ];
+          };
+
+          subnet4 = [ {
+            subnet = "10.0.0.0/29";
+            pools = [ {
+              pool = "10.0.0.3 - 10.0.0.3";
+            } ];
+          } ];
+
+          # Enable communication between dhcp4 and a local dhcp-ddns
+          # instance.
+          # https://kea.readthedocs.io/en/kea-2.2.0/arm/dhcp4-srv.html#ddns-for-dhcpv4
+          dhcp-ddns = {
+            enable-updates = true;
+          };
+
+          ddns-send-updates = true;
+          ddns-qualifying-suffix = "lan.nixos.test.";
+        };
+      };
+
+      services.kea.dhcp-ddns = {
+        enable = true;
+        settings = {
+          forward-ddns = {
+            # Configure updates of a forward zone named `lan.nixos.test`
+            # hosted at the nameserver at 10.0.0.2
+            # https://kea.readthedocs.io/en/kea-2.2.0/arm/ddns.html#adding-forward-dns-servers
+            ddns-domains = [ {
+              name = "lan.nixos.test.";
+              # Use a TSIG key in production!
+              key-name = "";
+              dns-servers = [ {
+                ip-address = "10.0.0.2";
+                port = 53;
+              } ];
+            } ];
+          };
+        };
+      };
+    };
+
+    nameserver = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useDHCP = false;
+        firewall.allowedUDPPorts = [ 53 ];
+      };
+
+      systemd.network = {
+        enable = true;
+        networks = {
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              Address = "10.0.0.2/29";
+            };
+          };
+        };
+      };
+
+      services.resolved.enable = false;
+
+      # Set up an authoritative nameserver, serving the `lan.nixos.test`
+      # zone and configure an ACL that allows dynamic updates from
+      # the router's ip address.
+      # This ACL is likely insufficient for production usage. Please
+      # use TSIG keys.
+      services.knot = let
+        zone = pkgs.writeTextDir "lan.nixos.test.zone" ''
+          @ SOA ns.nixos.test nox.nixos.test 0 86400 7200 3600000 172800
+          @ NS nameserver
+          nameserver A 10.0.0.3
+          router A 10.0.0.1
+        '';
+        zonesDir = pkgs.buildEnv {
+          name = "knot-zones";
+          paths = [ zone ];
+        };
+      in {
+        enable = true;
+        extraArgs = [
+          "-v"
+        ];
+        settings = {
+          server.listen = [
+            "0.0.0.0@53"
+          ];
+
+          log.syslog.any = "info";
+
+          acl.dhcp_ddns = {
+            address = "10.0.0.1";
+            action = "update";
+          };
+
+          template.default = {
+            storage = zonesDir;
+            zonefile-sync = "-1";
+            zonefile-load = "difference-no-serial";
+            journal-content = "all";
+          };
+
+          zone."lan.nixos.test" = {
+            file = "lan.nixos.test.zone";
+            acl = [
+              "dhcp_ddns"
+            ];
+          };
+        };
+      };
+
+    };
+
+    client = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.useDHCP = true;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    start_all()
+    router.wait_for_unit("kea-dhcp4-server.service")
+    client.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_until_succeeds("ping -c 5 10.0.0.1")
+    router.wait_until_succeeds("ping -c 5 10.0.0.3")
+    nameserver.wait_until_succeeds("kdig +short client.lan.nixos.test @10.0.0.2 | grep -q 10.0.0.3")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/keepalived.nix b/nixpkgs/nixos/tests/keepalived.nix
new file mode 100644
index 000000000000..d0bf9d465200
--- /dev/null
+++ b/nixpkgs/nixos/tests/keepalived.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "keepalived";
+
+  nodes = {
+    node1 = { pkgs, ... }: {
+      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
+      services.keepalived.enable = true;
+      services.keepalived.vrrpInstances.test = {
+        interface = "eth1";
+        state = "MASTER";
+        priority = 50;
+        virtualIps = [{ addr = "192.168.1.200"; }];
+        virtualRouterId = 1;
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+    };
+    node2 = { pkgs, ... }: {
+      networking.firewall.extraCommands = "iptables -A INPUT -p vrrp -j ACCEPT";
+      services.keepalived.enable = true;
+      services.keepalived.vrrpInstances.test = {
+        interface = "eth1";
+        state = "MASTER";
+        priority = 100;
+        virtualIps = [{ addr = "192.168.1.200"; }];
+        virtualRouterId = 1;
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+    };
+  };
+
+  testScript = ''
+    # wait for boot time delay to pass
+    for node in [node1, node2]:
+        node.wait_until_succeeds(
+            "systemctl show -p LastTriggerUSecMonotonic keepalived-boot-delay.timer | grep -vq 'LastTriggerUSecMonotonic=0'"
+        )
+        node.wait_for_unit("keepalived")
+    node2.wait_until_succeeds("ip addr show dev eth1 | grep -q 192.168.1.200")
+    node1.fail("ip addr show dev eth1 | grep -q 192.168.1.200")
+    node1.succeed("ping -c1 192.168.1.200")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/keepassxc.nix b/nixpkgs/nixos/tests/keepassxc.nix
new file mode 100644
index 000000000000..a4f452412cdf
--- /dev/null
+++ b/nixpkgs/nixos/tests/keepassxc.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "keepassxc";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ turion ];
+    timeout = 1800;
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/163482
+    qt = {
+      enable = true;
+      platformTheme = "gnome";
+      style = "adwaita-dark";
+    };
+
+    test-support.displayManager.auto.user = "alice";
+    environment.systemPackages = with pkgs; [
+      keepassxc
+      xdotool
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");'';
+    in ''
+    with subtest("Ensure X starts"):
+        start_all()
+        machine.wait_for_x()
+
+    with subtest("Can create database and entry with CLI"):
+        ${aliceDo "keepassxc-cli db-create -k foo.keyfile foo.kdbx"}
+        ${aliceDo "keepassxc-cli add --no-password -k foo.keyfile foo.kdbx bar"}
+
+    with subtest("Ensure KeePassXC starts"):
+        # start KeePassXC window
+        ${aliceDo "keepassxc >&2 &"}
+
+        machine.wait_for_text("KeePassXC ${pkgs.keepassxc.version}")
+        machine.screenshot("KeePassXC")
+
+    with subtest("Can open existing database"):
+        machine.send_key("ctrl-o")
+        machine.sleep(5)
+        # Regression #163482: keepassxc did not crash
+        machine.succeed("ps -e | grep keepassxc")
+        machine.wait_for_text("Open database")
+        machine.send_key("ret")
+
+        # Wait for the enter password screen to appear.
+        machine.wait_for_text("/home/alice/foo.kdbx")
+
+        # Click on "Browse" button to select keyfile
+        machine.send_key("tab")
+        machine.send_chars("/home/alice/foo.keyfile")
+        machine.send_key("ret")
+        # Database is unlocked (doesn't have "[Locked]" in the title anymore)
+        machine.wait_for_text("foo.kdbx - KeePassXC")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kerberos/default.nix b/nixpkgs/nixos/tests/kerberos/default.nix
new file mode 100644
index 000000000000..f2f1a438918c
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/default.nix
@@ -0,0 +1,7 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+{
+  mit = import ./mit.nix { inherit system pkgs; };
+  heimdal = import ./heimdal.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/kerberos/heimdal.nix b/nixpkgs/nixos/tests/kerberos/heimdal.nix
new file mode 100644
index 000000000000..47f9d0285aef
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/heimdal.nix
@@ -0,0 +1,42 @@
+import ../make-test-python.nix ({pkgs, ...}: {
+  name = "kerberos_server-heimdal";
+  nodes.machine = { config, libs, pkgs, ...}:
+  { services.kerberos_server =
+    { enable = true;
+      realms = {
+        "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
+      };
+    };
+    krb5 = {
+      enable = true;
+      kerberos = pkgs.heimdal;
+      libdefaults = {
+        default_realm = "FOO.BAR";
+      };
+      realms = {
+        "FOO.BAR" = {
+          admin_server = "machine";
+          kdc = "machine";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.succeed(
+        "kadmin -l init --realm-max-ticket-life='8 day' --realm-max-renewable-life='10 day' FOO.BAR",
+        "systemctl restart kadmind.service kdc.service",
+    )
+
+    for unit in ["kadmind", "kdc", "kpasswdd"]:
+        machine.wait_for_unit(f"{unit}.service")
+
+    machine.succeed(
+        "kadmin -l add --password=admin_pw --use-defaults admin",
+        "kadmin -l ext_keytab --keytab=admin.keytab admin",
+        "kadmin -p admin -K admin.keytab add --password=alice_pw --use-defaults alice",
+        "kadmin -l ext_keytab --keytab=alice.keytab alice",
+        "kinit -kt alice.keytab alice",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kerberos/mit.nix b/nixpkgs/nixos/tests/kerberos/mit.nix
new file mode 100644
index 000000000000..7e427ffef0ba
--- /dev/null
+++ b/nixpkgs/nixos/tests/kerberos/mit.nix
@@ -0,0 +1,41 @@
+import ../make-test-python.nix ({pkgs, ...}: {
+  name = "kerberos_server-mit";
+  nodes.machine = { config, libs, pkgs, ...}:
+  { services.kerberos_server =
+    { enable = true;
+      realms = {
+        "FOO.BAR".acl = [{principal = "admin"; access = ["add" "cpw"];}];
+      };
+    };
+    krb5 = {
+      enable = true;
+      kerberos = pkgs.krb5;
+      libdefaults = {
+        default_realm = "FOO.BAR";
+      };
+      realms = {
+        "FOO.BAR" = {
+          admin_server = "machine";
+          kdc = "machine";
+        };
+      };
+    };
+    users.extraUsers.alice = { isNormalUser = true; };
+  };
+
+  testScript = ''
+    machine.succeed(
+        "kdb5_util create -s -r FOO.BAR -P master_key",
+        "systemctl restart kadmind.service kdc.service",
+    )
+
+    for unit in ["kadmind", "kdc"]:
+        machine.wait_for_unit(f"{unit}.service")
+
+    machine.succeed(
+        "kadmin.local add_principal -pw admin_pw admin",
+        "kadmin -p admin -w admin_pw addprinc -pw alice_pw alice",
+        "echo alice_pw | sudo -u alice kinit",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kernel-generic.nix b/nixpkgs/nixos/tests/kernel-generic.nix
new file mode 100644
index 000000000000..352deb521a47
--- /dev/null
+++ b/nixpkgs/nixos/tests/kernel-generic.nix
@@ -0,0 +1,49 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}@args:
+
+with pkgs.lib;
+
+let
+  testsForLinuxPackages = linuxPackages: (import ./make-test-python.nix ({ pkgs, ... }: {
+    name = "kernel-${linuxPackages.kernel.version}";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nequissimus atemu ma27 ];
+    };
+
+    nodes.machine = { ... }:
+      {
+        boot.kernelPackages = linuxPackages;
+      };
+
+    testScript =
+      ''
+        assert "Linux" in machine.succeed("uname -s")
+        assert "${linuxPackages.kernel.modDirVersion}" in machine.succeed("uname -a")
+      '';
+  }) args);
+  kernels = pkgs.linuxKernel.vanillaPackages // {
+    inherit (pkgs.linuxKernel.packages)
+      linux_4_19_hardened
+      linux_5_4_hardened
+      linux_5_10_hardened
+      linux_5_15_hardened
+      linux_6_1_hardened
+      linux_6_5_hardened
+      linux_rt_5_4
+      linux_rt_5_10
+      linux_rt_5_15
+      linux_rt_6_1
+      linux_libre
+
+      linux_testing;
+  };
+
+in mapAttrs (_: lP: testsForLinuxPackages lP) kernels // {
+  passthru = {
+    inherit testsForLinuxPackages;
+
+    testsForKernel = kernel: testsForLinuxPackages (pkgs.linuxPackagesFor kernel);
+  };
+}
diff --git a/nixpkgs/nixos/tests/kernel-latest-ath-user-regd.nix b/nixpkgs/nixos/tests/kernel-latest-ath-user-regd.nix
new file mode 100644
index 000000000000..09e1da9d2aff
--- /dev/null
+++ b/nixpkgs/nixos/tests/kernel-latest-ath-user-regd.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "kernel-latest-ath-user-regd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ veehaitch ];
+  };
+
+  nodes.machine = { pkgs, ... }:
+    {
+      boot.kernelPackages = pkgs.linuxPackages_latest;
+      networking.wireless.athUserRegulatoryDomain = true;
+    };
+
+  testScript =
+    ''
+      assert "CONFIG_ATH_USER_REGD=y" in machine.succeed("zcat /proc/config.gz")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/keter.nix b/nixpkgs/nixos/tests/keter.nix
new file mode 100644
index 000000000000..1cc2ffbde0a0
--- /dev/null
+++ b/nixpkgs/nixos/tests/keter.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    port = 81;
+  in
+  {
+    name = "keter";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ jappie ];
+    };
+
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.keter = {
+        enable = true;
+
+        globalKeterConfig = {
+          cli-port = 123; # just adding this to test the freeform
+          listeners = [{
+            host = "*4";
+            inherit port;
+          }];
+        };
+        bundle = {
+          appName = "test-bundle";
+          domain = "localhost";
+          executable = pkgs.writeShellScript "run" ''
+            ${pkgs.python3}/bin/python -m http.server $PORT
+          '';
+        };
+      };
+    };
+
+    testScript =
+      ''
+        machine.wait_for_unit("keter.service")
+
+        machine.wait_for_open_port(${toString port})
+        machine.wait_for_console_text("Activating app test-bundle with hosts: localhost")
+
+
+        machine.succeed("curl --fail http://localhost:${toString port}/")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/kexec.nix b/nixpkgs/nixos/tests/kexec.nix
new file mode 100644
index 000000000000..4d1be497b8ba
--- /dev/null
+++ b/nixpkgs/nixos/tests/kexec.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "kexec";
+  meta = with lib.maintainers; {
+    maintainers = [ flokli lassulus ];
+  };
+
+  nodes = {
+    node1 = { ... }: {
+      virtualisation.vlans = [ ];
+      virtualisation.memorySize = 4 * 1024;
+    };
+
+    node2 = { modulesPath, ... }: {
+      virtualisation.vlans = [ ];
+      environment.systemPackages = [ pkgs.hello ];
+      imports = [
+        "${modulesPath}/installer/netboot/netboot-minimal.nix"
+        "${modulesPath}/testing/test-instrumentation.nix"
+        "${modulesPath}/profiles/qemu-guest.nix"
+      ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    # Test whether reboot via kexec works.
+    node1.wait_for_unit("multi-user.target")
+    node1.succeed('kexec --load /run/current-system/kernel --initrd /run/current-system/initrd --command-line "$(</proc/cmdline)"')
+    node1.execute("systemctl kexec >&2 &", check_return=False)
+    node1.connected = False
+    node1.connect()
+    node1.wait_for_unit("multi-user.target")
+
+    # Check if the machine with netboot-minimal.nix profile boots up
+    node2.wait_for_unit("multi-user.target")
+    node2.shutdown()
+
+    # Kexec node1 to the toplevel of node2 via the kexec-boot script
+    node1.succeed('touch /run/foo')
+    node1.fail('hello')
+    node1.execute('${nodes.node2.system.build.kexecTree}/kexec-boot', check_output=False)
+    node1.connected = False
+    node1.connect()
+    node1.wait_for_unit("multi-user.target")
+    node1.succeed('! test -e /run/foo')
+    node1.succeed('hello')
+    node1.succeed('[ "$(hostname)" = "node2" ]')
+
+    node1.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/keycloak.nix b/nixpkgs/nixos/tests/keycloak.nix
new file mode 100644
index 000000000000..228e57d1cdd6
--- /dev/null
+++ b/nixpkgs/nixos/tests/keycloak.nix
@@ -0,0 +1,183 @@
+# This tests Keycloak: it starts the service, creates a realm with an
+# OIDC client and a user, and simulates the user logging in to the
+# client using their Keycloak login.
+
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  frontendUrl = "https://${certs.domain}";
+
+  keycloakTest = import ./make-test-python.nix (
+    { pkgs, databaseType, ... }:
+    let
+      initialAdminPassword = "h4Iho\"JFn't2>iQIR9";
+      adminPasswordFile = pkgs.writeText "admin-password" "${initialAdminPassword}";
+    in
+    {
+      name = "keycloak";
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ talyz ];
+      };
+
+      nodes = {
+        keycloak = { config, ... }: {
+          security.pki.certificateFiles = [
+            certs.ca.cert
+          ];
+
+          networking.extraHosts = ''
+            127.0.0.1 ${certs.domain}
+          '';
+
+          services.keycloak = {
+            enable = true;
+            settings = {
+              hostname = certs.domain;
+            };
+            inherit initialAdminPassword;
+            sslCertificate = "${certs.${certs.domain}.cert}";
+            sslCertificateKey = "${certs.${certs.domain}.key}";
+            database = {
+              type = databaseType;
+              username = "bogus";
+              name = "also bogus";
+              passwordFile = "${pkgs.writeText "dbPassword" ''wzf6\"vO"Cb\nP>p#6;c&o?eu=q'THE'''H''''E''}";
+            };
+            plugins = with config.services.keycloak.package.plugins; [
+              keycloak-discord
+              keycloak-metrics-spi
+            ];
+          };
+          environment.systemPackages = with pkgs; [
+            xmlstarlet
+            html-tidy
+            jq
+          ];
+        };
+      };
+
+      testScript =
+        let
+          client = {
+            clientId = "test-client";
+            name = "test-client";
+            redirectUris = [ "urn:ietf:wg:oauth:2.0:oob" ];
+          };
+
+          user = {
+            firstName = "Chuck";
+            lastName = "Testa";
+            username = "chuck.testa";
+            email = "chuck.testa@example.com";
+          };
+
+          password = "password1234";
+
+          realm = {
+            enabled = true;
+            realm = "test-realm";
+            clients = [ client ];
+            users = [(
+              user // {
+                enabled = true;
+                credentials = [{
+                  type = "password";
+                  temporary = false;
+                  value = password;
+                }];
+              }
+            )];
+          };
+
+          realmDataJson = pkgs.writeText "realm-data.json" (builtins.toJSON realm);
+
+          jqCheckUserinfo = pkgs.writeText "check-userinfo.jq" ''
+            if {
+              "firstName": .given_name,
+              "lastName": .family_name,
+              "username": .preferred_username,
+              "email": .email
+            } != ${builtins.toJSON user} then
+              error("Wrong user info!")
+            else
+              empty
+            end
+          '';
+        in ''
+          keycloak.start()
+          keycloak.wait_for_unit("keycloak.service")
+          keycloak.wait_for_open_port(443)
+          keycloak.wait_until_succeeds("curl -sSf ${frontendUrl}")
+
+          ### Realm Setup ###
+
+          # Get an admin interface access token
+          keycloak.succeed("""
+              curl -sSf -d 'client_id=admin-cli' \
+                   -d 'username=admin' \
+                   -d "password=$(<${adminPasswordFile})" \
+                   -d 'grant_type=password' \
+                   '${frontendUrl}/realms/master/protocol/openid-connect/token' \
+                   | jq -r '"Authorization: bearer " + .access_token' >admin_auth_header
+          """)
+
+          # Register the metrics SPI
+          keycloak.succeed(
+              """${pkgs.jre}/bin/keytool -import -alias snakeoil -file ${certs.ca.cert} -storepass aaaaaa -keystore cacert.jks -noprompt""",
+              """KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh config credentials --server '${frontendUrl}' --realm master --user admin --password "$(<${adminPasswordFile})" """,
+              """KC_OPTS='-Djavax.net.ssl.trustStore=cacert.jks -Djavax.net.ssl.trustStorePassword=aaaaaa' kcadm.sh update events/config -s 'eventsEnabled=true' -s 'adminEventsEnabled=true' -s 'eventsListeners+=metrics-listener'""",
+              """curl -sSf '${frontendUrl}/realms/master/metrics' | grep '^keycloak_admin_event_UPDATE'"""
+          )
+
+          # Publish the realm, including a test OIDC client and user
+          keycloak.succeed(
+              "curl -sSf -H @admin_auth_header -X POST -H 'Content-Type: application/json' -d @${realmDataJson} '${frontendUrl}/admin/realms/'"
+          )
+
+          # Generate and save the client secret. To do this we need
+          # Keycloak's internal id for the client.
+          keycloak.succeed(
+              "curl -sSf -H @admin_auth_header '${frontendUrl}/admin/realms/${realm.realm}/clients?clientId=${client.name}' | jq -r '.[].id' >client_id",
+              "curl -sSf -H @admin_auth_header -X POST '${frontendUrl}/admin/realms/${realm.realm}/clients/'$(<client_id)'/client-secret' | jq -r .value >client_secret",
+          )
+
+
+          ### Authentication Testing ###
+
+          # Start the login process by sending an initial request to the
+          # OIDC authentication endpoint, saving the returned page. Tidy
+          # up the HTML (XmlStarlet is picky) and extract the login form
+          # post url.
+          keycloak.succeed(
+              "curl -sSf -c cookie '${frontendUrl}/realms/${realm.realm}/protocol/openid-connect/auth?client_id=${client.name}&redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob&scope=openid+email&response_type=code&response_mode=query&nonce=qw4o89g3qqm' >login_form",
+              "tidy -asxml -q -m login_form || true",
+              "xml sel -T -t -m \"_:html/_:body/_:div/_:div/_:div/_:div/_:div/_:div/_:form[@id='kc-form-login']\" -v @action login_form >form_post_url",
+          )
+
+          # Post the login form and save the response. Once again tidy up
+          # the HTML, then extract the authorization code.
+          keycloak.succeed(
+              "curl -sSf -L -b cookie -d 'username=${user.username}' -d 'password=${password}' -d 'credentialId=' \"$(<form_post_url)\" >auth_code_html",
+              "tidy -asxml -q -m auth_code_html || true",
+              "xml sel -T -t -m \"_:html/_:body/_:div/_:div/_:div/_:div/_:div/_:input[@id='code']\" -v @value auth_code_html >auth_code",
+          )
+
+          # Exchange the authorization code for an access token.
+          keycloak.succeed(
+              "curl -sSf -d grant_type=authorization_code -d code=$(<auth_code) -d client_id=${client.name} -d client_secret=$(<client_secret) -d redirect_uri=urn%3Aietf%3Awg%3Aoauth%3A2.0%3Aoob '${frontendUrl}/realms/${realm.realm}/protocol/openid-connect/token' | jq -r '\"Authorization: bearer \" + .access_token' >auth_header"
+          )
+
+          # Use the access token on the OIDC userinfo endpoint and check
+          # that the returned user info matches what we initialized the
+          # realm with.
+          keycloak.succeed(
+              "curl -sSf -H @auth_header '${frontendUrl}/realms/${realm.realm}/protocol/openid-connect/userinfo' | jq -f ${jqCheckUserinfo}"
+          )
+        '';
+    }
+  );
+in
+{
+  postgres = keycloakTest { databaseType = "postgresql"; };
+  mariadb = keycloakTest { databaseType = "mariadb"; };
+  mysql = keycloakTest { databaseType = "mysql"; };
+}
diff --git a/nixpkgs/nixos/tests/keyd.nix b/nixpkgs/nixos/tests/keyd.nix
new file mode 100644
index 000000000000..bfc4558b64bb
--- /dev/null
+++ b/nixpkgs/nixos/tests/keyd.nix
@@ -0,0 +1,89 @@
+# The test template is taken from the `./keymap.nix`
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  readyFile = "/tmp/readerReady";
+  resultFile = "/tmp/readerResult";
+
+  testReader = pkgs.writeScript "test-input-reader" ''
+    rm -f ${resultFile} ${resultFile}.tmp
+    logger "testReader: START: Waiting for $1 characters, expecting '$2'."
+    touch ${readyFile}
+    read -r -N $1 chars
+    rm -f ${readyFile}
+    if [ "$chars" == "$2" ]; then
+      logger -s "testReader: PASS: Got '$2' as expected." 2>${resultFile}.tmp
+    else
+      logger -s "testReader: FAIL: Expected '$2' but got '$chars'." 2>${resultFile}.tmp
+    fi
+    # rename after the file is written to prevent a race condition
+    mv  ${resultFile}.tmp ${resultFile}
+  '';
+
+
+  mkKeyboardTest = name: { default, test }: with pkgs.lib; makeTest {
+    inherit name;
+
+    nodes.machine = {
+      services.keyd = {
+        enable = true;
+        keyboards = { inherit default; };
+      };
+    };
+
+    testScript = ''
+      import shlex
+
+      machine.wait_for_unit("keyd.service")
+
+      def run_test_case(cmd, test_case_name, inputs, expected):
+          with subtest(test_case_name):
+              assert len(inputs) == len(expected)
+              machine.execute("rm -f ${readyFile} ${resultFile}")
+              # set up process that expects all the keys to be entered
+              machine.succeed(
+                  "{} {} {} {} >&2 &".format(
+                      cmd,
+                      "${testReader}",
+                      len(inputs),
+                      shlex.quote("".join(expected)),
+                  )
+              )
+              # wait for reader to be ready
+              machine.wait_for_file("${readyFile}")
+              # send all keys
+              for key in inputs:
+                  machine.send_key(key)
+              # wait for result and check
+              machine.wait_for_file("${resultFile}")
+              machine.succeed("grep -q 'PASS:' ${resultFile}")
+      test = ${builtins.toJSON test}
+      run_test_case("openvt -sw --", "${name}", test["press"], test["expect"])
+    '';
+  };
+
+in
+pkgs.lib.mapAttrs mkKeyboardTest {
+  swap-ab_and_ctrl-as-shift = {
+    test.press = [ "a" "ctrl-b" "c" "alt_r-h" ];
+    test.expect = [ "b" "A" "c" "q" ];
+
+    default = {
+      settings.main = {
+        "a" = "b";
+        "b" = "a";
+        "control" = "oneshot(shift)";
+        "rightalt" = "layer(rightalt)";
+      };
+      extraConfig = ''
+        [rightalt:G]
+        h = q
+      '';
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/keymap.nix b/nixpkgs/nixos/tests/keymap.nix
new file mode 100644
index 000000000000..e8973a50f852
--- /dev/null
+++ b/nixpkgs/nixos/tests/keymap.nix
@@ -0,0 +1,233 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  readyFile  = "/tmp/readerReady";
+  resultFile = "/tmp/readerResult";
+
+  testReader = pkgs.writeScript "test-input-reader" ''
+    rm -f ${resultFile} ${resultFile}.tmp
+    logger "testReader: START: Waiting for $1 characters, expecting '$2'."
+    touch ${readyFile}
+    read -r -N $1 chars
+    rm -f ${readyFile}
+
+    if [ "$chars" == "$2" ]; then
+      logger -s "testReader: PASS: Got '$2' as expected." 2>${resultFile}.tmp
+    else
+      logger -s "testReader: FAIL: Expected '$2' but got '$chars'." 2>${resultFile}.tmp
+    fi
+    # rename after the file is written to prevent a race condition
+    mv  ${resultFile}.tmp ${resultFile}
+  '';
+
+
+  mkKeyboardTest = layout: { extraConfig ? {}, tests }: with pkgs.lib; makeTest {
+    name = "keymap-${layout}";
+
+    nodes.machine.console.keyMap = mkOverride 900 layout;
+    nodes.machine.services.xserver.desktopManager.xterm.enable = false;
+    nodes.machine.services.xserver.xkb.layout = mkOverride 900 layout;
+    nodes.machine.imports = [ ./common/x11.nix extraConfig ];
+
+    testScript = ''
+      import json
+      import shlex
+
+
+      def run_test_case(cmd, xorg_keymap, test_case_name, inputs, expected):
+          with subtest(test_case_name):
+              assert len(inputs) == len(expected)
+              machine.execute("rm -f ${readyFile} ${resultFile}")
+
+              # set up process that expects all the keys to be entered
+              machine.succeed(
+                  "{} {} {} {} >&2 &".format(
+                      cmd,
+                      "${testReader}",
+                      len(inputs),
+                      shlex.quote("".join(expected)),
+                  )
+              )
+
+              if xorg_keymap:
+                  # make sure the xterm window is open and has focus
+                  machine.wait_for_window("testterm")
+                  machine.wait_until_succeeds(
+                      "${pkgs.xdotool}/bin/xdotool search --sync --onlyvisible "
+                      "--class testterm windowfocus --sync"
+                  )
+
+              # wait for reader to be ready
+              machine.wait_for_file("${readyFile}")
+
+              # send all keys
+              for key in inputs:
+                  machine.send_key(key)
+
+              # wait for result and check
+              machine.wait_for_file("${resultFile}")
+              machine.succeed("grep -q 'PASS:' ${resultFile}")
+
+
+      with open("${pkgs.writeText "tests.json" (builtins.toJSON tests)}") as json_file:
+          tests = json.load(json_file)
+
+      # These environments used to run in the opposite order, causing the
+      # following error at openvt startup.
+      #
+      # openvt: Couldn't deallocate console 1
+      #
+      # This error did not appear in successful runs.
+      # I don't know the exact cause, but I it seems that openvt and X are
+      # fighting over the virtual terminal. This does not appear to be a problem
+      # when the X test runs first.
+      keymap_environments = {
+          "Xorg Keymap": "DISPLAY=:0 xterm -title testterm -class testterm -fullscreen -e",
+          "VT Keymap": "openvt -sw --",
+      }
+
+      machine.wait_for_x()
+
+      for keymap_env_name, command in keymap_environments.items():
+          with subtest(keymap_env_name):
+              for test_case_name, test_data in tests.items():
+                  run_test_case(
+                      command,
+                      False,
+                      test_case_name,
+                      test_data["qwerty"],
+                      test_data["expect"],
+                  )
+    '';
+  };
+
+in pkgs.lib.mapAttrs mkKeyboardTest {
+  azerty = {
+    tests = {
+      azqw.qwerty = [ "q" "w" ];
+      azqw.expect = [ "a" "z" ];
+      altgr.qwerty = [ "alt_r-2" "alt_r-3" "alt_r-4" "alt_r-5" "alt_r-6" ];
+      altgr.expect = [ "~"       "#"       "{"       "["       "|"       ];
+    };
+
+    extraConfig.console.keyMap = "fr";
+    extraConfig.services.xserver.xkb.layout = "fr";
+  };
+
+  bone = {
+    tests = {
+      layer1.qwerty = [ "f"           "j"                     ];
+      layer1.expect = [ "e"           "n"                     ];
+      layer2.qwerty = [ "shift-f"     "shift-j"     "shift-6" ];
+      layer2.expect = [ "E"           "N"           "$"       ];
+      layer3.qwerty = [ "caps_lock-d" "caps_lock-f"           ];
+      layer3.expect = [ "{"           "}"                     ];
+    };
+
+    extraConfig.console.keyMap = "bone";
+    extraConfig.services.xserver.xkb.layout = "de";
+    extraConfig.services.xserver.xkb.variant = "bone";
+  };
+
+  colemak = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "r" "s" "t" "n" "e" "i" "o"         ];
+    };
+
+    extraConfig.console.keyMap = "colemak";
+    extraConfig.services.xserver.xkb.layout = "us";
+    extraConfig.services.xserver.xkb.variant = "colemak";
+  };
+
+  dvorak = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "o" "e" "u" "h" "t" "n" "s"         ];
+      symbols.qwerty = [ "q" "w" "e" "minus" "equal" ];
+      symbols.expect = [ "'" "," "." "["     "]"     ];
+    };
+
+    extraConfig.console.keyMap = "dvorak";
+    extraConfig.services.xserver.xkb.layout = "us";
+    extraConfig.services.xserver.xkb.variant = "dvorak";
+  };
+
+  dvorak-programmer = {
+    tests = {
+      homerow.qwerty = [ "a" "s" "d" "f" "j" "k" "l" "semicolon" ];
+      homerow.expect = [ "a" "o" "e" "u" "h" "t" "n" "s"         ];
+      numbers.qwerty = map (x: "shift-${x}")
+                       [ "1" "2" "3" "4" "5" "6" "7" "8" "9" "0" "minus" ];
+      numbers.expect = [ "%" "7" "5" "3" "1" "9" "0" "2" "4" "6" "8" ];
+      symbols.qwerty = [ "1" "2" "3" "4" "5" "6" "7" "8" "9" "0" "minus" ];
+      symbols.expect = [ "&" "[" "{" "}" "(" "=" "*" ")" "+" "]" "!" ];
+    };
+
+    extraConfig.console.keyMap = "dvorak-programmer";
+    extraConfig.services.xserver.xkb.layout = "us";
+    extraConfig.services.xserver.xkb.variant = "dvp";
+  };
+
+  neo = {
+    tests = {
+      layer1.qwerty = [ "f"           "j"                     ];
+      layer1.expect = [ "e"           "n"                     ];
+      layer2.qwerty = [ "shift-f"     "shift-j"     "shift-6" ];
+      layer2.expect = [ "E"           "N"           "$"       ];
+      layer3.qwerty = [ "caps_lock-d" "caps_lock-f"           ];
+      layer3.expect = [ "{"           "}"                     ];
+    };
+
+    extraConfig.console.keyMap = "neo";
+    extraConfig.services.xserver.xkb.layout = "de";
+    extraConfig.services.xserver.xkb.variant = "neo";
+  };
+
+  qwertz = {
+    tests = {
+      zy.qwerty = [ "z" "y" ];
+      zy.expect = [ "y" "z" ];
+      altgr.qwerty = map (x: "alt_r-${x}")
+                     [ "q" "less" "7" "8" "9" "0" ];
+      altgr.expect = [ "@" "|"    "{" "[" "]" "}" ];
+    };
+
+    extraConfig.console.keyMap = "de";
+    extraConfig.services.xserver.xkb.layout = "de";
+  };
+
+  custom = {
+    tests = {
+      us.qwerty = [ "a" "b" "g" "d" "z" "shift-2" "shift-3" ];
+      us.expect = [ "a" "b" "g" "d" "z" "@" "#" ];
+      greek.qwerty = map (x: "alt_r-${x}")
+                     [ "a" "b" "g" "d" "z" ];
+      greek.expect = [ "α" "β" "γ" "δ" "ζ" ];
+    };
+
+    extraConfig.console.useXkbConfig = true;
+    extraConfig.services.xserver.xkb.layout = "us-greek";
+    extraConfig.services.xserver.xkb.extraLayouts.us-greek =
+      { description = "US layout with alt-gr greek";
+        languages   = [ "eng" ];
+        symbolsFile = pkgs.writeText "us-greek" ''
+          xkb_symbols "us-greek"
+          {
+            include "us(basic)"
+            include "level3(ralt_switch)"
+            key <LatA> { [ a, A, Greek_alpha ] };
+            key <LatB> { [ b, B, Greek_beta  ] };
+            key <LatG> { [ g, G, Greek_gamma ] };
+            key <LatD> { [ d, D, Greek_delta ] };
+            key <LatZ> { [ z, Z, Greek_zeta  ] };
+          };
+        '';
+      };
+  };
+}
diff --git a/nixpkgs/nixos/tests/knot.nix b/nixpkgs/nixos/tests/knot.nix
new file mode 100644
index 000000000000..44efd93b6fa9
--- /dev/null
+++ b/nixpkgs/nixos/tests/knot.nix
@@ -0,0 +1,200 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+let
+  common = {
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+  };
+  exampleZone = pkgs.writeTextDir "example.com.zone" ''
+      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+      @       NS      ns1
+      @       NS      ns2
+      ns1     A       192.168.0.1
+      ns1     AAAA    fd00::1
+      ns2     A       192.168.0.2
+      ns2     AAAA    fd00::2
+      www     A       192.0.2.1
+      www     AAAA    2001:DB8::1
+      sub     NS      ns.example.com.
+  '';
+  delegatedZone = pkgs.writeTextDir "sub.example.com.zone" ''
+      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+      @       NS      ns1.example.com.
+      @       NS      ns2.example.com.
+      @       A       192.0.2.2
+      @       AAAA    2001:DB8::2
+  '';
+
+  knotZonesEnv = pkgs.buildEnv {
+    name = "knot-zones";
+    paths = [ exampleZone delegatedZone ];
+  };
+  # DO NOT USE pkgs.writeText IN PRODUCTION. This put secrets in the nix store!
+  tsigFile = pkgs.writeText "tsig.conf" ''
+    key:
+      - id: xfr_key
+        algorithm: hmac-sha256
+        secret: zOYgOgnzx3TGe5J5I/0kxd7gTcxXhLYMEq3Ek3fY37s=
+  '';
+in {
+  name = "knot";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hexa ];
+  };
+
+
+  nodes = {
+    primary = { lib, ... }: {
+      imports = [ common ];
+
+      # trigger sched_setaffinity syscall
+      virtualisation.cores = 2;
+
+      networking.interfaces.eth1 = {
+        ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.1"; prefixLength = 24; }
+        ];
+        ipv6.addresses = lib.mkForce [
+          { address = "fd00::1"; prefixLength = 64; }
+        ];
+      };
+      services.knot.enable = true;
+      services.knot.extraArgs = [ "-v" ];
+      services.knot.keyFiles = [ tsigFile ];
+      services.knot.settings = {
+        server = {
+          listen = [
+            "0.0.0.0@53"
+            "::@53"
+           ];
+          automatic-acl = true;
+        };
+
+        acl.secondary_acl = {
+          address = "192.168.0.2";
+          key = "xfr_key";
+          action = "transfer";
+        };
+
+        remote.secondary.address = "192.168.0.2@53";
+
+        template.default = {
+          storage = knotZonesEnv;
+          notify = [ "secondary" ];
+          acl = [ "secondary_acl" ];
+          dnssec-signing = true;
+          # Input-only zone files
+          # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-3
+          # prevents modification of the zonefiles, since the zonefiles are immutable
+          zonefile-sync = -1;
+          zonefile-load = "difference";
+          journal-content = "changes";
+        };
+
+        zone = {
+          "example.com".file = "example.com.zone";
+          "sub.example.com".file = "sub.example.com.zone";
+        };
+
+        log.syslog.any = "info";
+      };
+    };
+
+    secondary = { lib, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1 = {
+        ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.2"; prefixLength = 24; }
+        ];
+        ipv6.addresses = lib.mkForce [
+          { address = "fd00::2"; prefixLength = 64; }
+        ];
+      };
+      services.knot.enable = true;
+      services.knot.keyFiles = [ tsigFile ];
+      services.knot.extraArgs = [ "-v" ];
+      services.knot.settings = {
+        server = {
+          listen = [
+            "0.0.0.0@53"
+            "::@53"
+          ];
+          automatic-acl = true;
+        };
+
+        remote.primary = {
+          address = "192.168.0.1@53";
+          key = "xfr_key";
+        };
+
+        template.default = {
+          master = "primary";
+          # zonefileless setup
+          # https://www.knot-dns.cz/docs/2.8/html/operation.html#example-2
+          zonefile-sync = "-1";
+          zonefile-load = "none";
+          journal-content = "all";
+        };
+
+        zone = {
+          "example.com".file = "example.com.zone";
+          "sub.example.com".file = "sub.example.com.zone";
+        };
+
+        log.syslog.any = "info";
+      };
+    };
+    client = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1 = {
+        ipv4.addresses = [
+          { address = "192.168.0.3"; prefixLength = 24; }
+        ];
+        ipv6.addresses = [
+          { address = "fd00::3"; prefixLength = 64; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.knot-dns ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    primary4 = (lib.head nodes.primary.config.networking.interfaces.eth1.ipv4.addresses).address;
+    primary6 = (lib.head nodes.primary.config.networking.interfaces.eth1.ipv6.addresses).address;
+
+    secondary4 = (lib.head nodes.secondary.config.networking.interfaces.eth1.ipv4.addresses).address;
+    secondary6 = (lib.head nodes.secondary.config.networking.interfaces.eth1.ipv6.addresses).address;
+  in ''
+    import re
+
+    start_all()
+
+    client.wait_for_unit("network.target")
+    primary.wait_for_unit("knot.service")
+    secondary.wait_for_unit("knot.service")
+
+
+    def test(host, query_type, query, pattern):
+        out = client.succeed(f"khost -t {query_type} {query} {host}").strip()
+        client.log(f"{host} replied with: {out}")
+        assert re.search(pattern, out), f'Did not match "{pattern}"'
+
+
+    for host in ("${primary4}", "${primary6}", "${secondary4}", "${secondary6}"):
+        with subtest(f"Interrogate {host}"):
+            test(host, "SOA", "example.com", r"start of authority.*noc\.example\.com\.")
+            test(host, "A", "example.com", r"has no [^ ]+ record")
+            test(host, "AAAA", "example.com", r"has no [^ ]+ record")
+
+            test(host, "A", "www.example.com", r"address 192.0.2.1$")
+            test(host, "AAAA", "www.example.com", r"address 2001:db8::1$")
+
+            test(host, "NS", "sub.example.com", r"nameserver is ns\d\.example\.com.$")
+            test(host, "A", "sub.example.com", r"address 192.0.2.2$")
+            test(host, "AAAA", "sub.example.com", r"address 2001:db8::2$")
+
+            test(host, "RRSIG", "www.example.com", r"RR set signature is")
+            test(host, "DNSKEY", "example.com", r"DNSSEC key is")
+
+    primary.log(primary.succeed("systemd-analyze security knot.service | grep -v '✓'"))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/komga.nix b/nixpkgs/nixos/tests/komga.nix
new file mode 100644
index 000000000000..d48d19bbbdd3
--- /dev/null
+++ b/nixpkgs/nixos/tests/komga.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "komga";
+  meta.maintainers = with lib.maintainers; [ govanify ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.komga = {
+        enable = true;
+        port = 1234;
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("komga.service")
+    machine.wait_for_open_port(1234)
+    machine.succeed("curl --fail http://localhost:1234/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/krb5/default.nix b/nixpkgs/nixos/tests/krb5/default.nix
new file mode 100644
index 000000000000..dd5b2f37202e
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/default.nix
@@ -0,0 +1,5 @@
+{ system ? builtins.currentSystem }:
+{
+  example-config = import ./example-config.nix { inherit system; };
+  deprecated-config = import ./deprecated-config.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/krb5/deprecated-config.nix b/nixpkgs/nixos/tests/krb5/deprecated-config.nix
new file mode 100644
index 000000000000..aca29ae6ca2b
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/deprecated-config.nix
@@ -0,0 +1,50 @@
+# Verifies that the configuration suggested in deprecated example values
+# will result in the expected output.
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "krb5-with-deprecated-config";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes.machine =
+    { ... }: {
+      krb5 = {
+        enable = true;
+        defaultRealm = "ATHENA.MIT.EDU";
+        domainRealm = "athena.mit.edu";
+        kdc = "kerberos.mit.edu";
+        kerberosAdminServer = "kerberos.mit.edu";
+      };
+    };
+
+  testScript =
+    let snapshot = pkgs.writeText "krb5-with-deprecated-config.conf" ''
+      [libdefaults]
+        default_realm = ATHENA.MIT.EDU
+
+      [realms]
+        ATHENA.MIT.EDU = {
+          admin_server = kerberos.mit.edu
+          kdc = kerberos.mit.edu
+        }
+
+      [domain_realm]
+        .athena.mit.edu = ATHENA.MIT.EDU
+        athena.mit.edu = ATHENA.MIT.EDU
+
+      [capaths]
+
+
+      [appdefaults]
+
+
+      [plugins]
+
+    '';
+  in ''
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/krb5/example-config.nix b/nixpkgs/nixos/tests/krb5/example-config.nix
new file mode 100644
index 000000000000..9a5c3b2af249
--- /dev/null
+++ b/nixpkgs/nixos/tests/krb5/example-config.nix
@@ -0,0 +1,112 @@
+# Verifies that the configuration suggested in (non-deprecated) example values
+# will result in the expected output.
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "krb5-with-example-config";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }: {
+      krb5 = {
+        enable = true;
+        kerberos = pkgs.krb5;
+        libdefaults = {
+          default_realm = "ATHENA.MIT.EDU";
+        };
+        realms = {
+          "ATHENA.MIT.EDU" = {
+            admin_server = "athena.mit.edu";
+            kdc = [
+              "athena01.mit.edu"
+              "athena02.mit.edu"
+            ];
+          };
+        };
+        domain_realm = {
+          "example.com" = "EXAMPLE.COM";
+          ".example.com" = "EXAMPLE.COM";
+        };
+        capaths = {
+          "ATHENA.MIT.EDU" = {
+            "EXAMPLE.COM" = ".";
+          };
+          "EXAMPLE.COM" = {
+            "ATHENA.MIT.EDU" = ".";
+          };
+        };
+        appdefaults = {
+          pam = {
+            debug = false;
+            ticket_lifetime = 36000;
+            renew_lifetime = 36000;
+            max_timeout = 30;
+            timeout_shift = 2;
+            initial_timeout = 1;
+          };
+        };
+        plugins = {
+          ccselect = {
+            disable = "k5identity";
+          };
+        };
+        extraConfig = ''
+          [logging]
+            kdc          = SYSLOG:NOTICE
+            admin_server = SYSLOG:NOTICE
+            default      = SYSLOG:NOTICE
+        '';
+      };
+    };
+
+  testScript =
+    let snapshot = pkgs.writeText "krb5-with-example-config.conf" ''
+      [libdefaults]
+        default_realm = ATHENA.MIT.EDU
+
+      [realms]
+        ATHENA.MIT.EDU = {
+          admin_server = athena.mit.edu
+          kdc = athena01.mit.edu
+          kdc = athena02.mit.edu
+        }
+
+      [domain_realm]
+        .example.com = EXAMPLE.COM
+        example.com = EXAMPLE.COM
+
+      [capaths]
+        ATHENA.MIT.EDU = {
+          EXAMPLE.COM = .
+        }
+        EXAMPLE.COM = {
+          ATHENA.MIT.EDU = .
+        }
+
+      [appdefaults]
+        pam = {
+          debug = false
+          initial_timeout = 1
+          max_timeout = 30
+          renew_lifetime = 36000
+          ticket_lifetime = 36000
+          timeout_shift = 2
+        }
+
+      [plugins]
+        ccselect = {
+          disable = k5identity
+        }
+
+      [logging]
+        kdc          = SYSLOG:NOTICE
+        admin_server = SYSLOG:NOTICE
+        default      = SYSLOG:NOTICE
+    '';
+  in ''
+    machine.succeed(
+        "diff /etc/krb5.conf ${snapshot}"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ksm.nix b/nixpkgs/nixos/tests/ksm.nix
new file mode 100644
index 000000000000..026d2ee85a24
--- /dev/null
+++ b/nixpkgs/nixos/tests/ksm.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, ...} :
+
+{
+  name = "ksm";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    hardware.ksm.enable = true;
+    hardware.ksm.sleep = 300;
+  };
+
+  testScript =
+    ''
+      machine.start()
+      machine.wait_until_succeeds("test $(</sys/kernel/mm/ksm/run) -eq 1")
+      machine.wait_until_succeeds("test $(</sys/kernel/mm/ksm/sleep_millisecs) -eq 300")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/kthxbye.nix b/nixpkgs/nixos/tests/kthxbye.nix
new file mode 100644
index 000000000000..5ca0917ec8e7
--- /dev/null
+++ b/nixpkgs/nixos/tests/kthxbye.nix
@@ -0,0 +1,110 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "kthxbye";
+
+  meta = with lib.maintainers; {
+    maintainers = [ nukaduka ];
+  };
+
+  nodes.server = { ... }: {
+    environment.systemPackages = with pkgs; [ prometheus-alertmanager ];
+    services.prometheus = {
+      enable = true;
+
+      globalConfig = {
+        scrape_interval = "5s";
+        scrape_timeout = "5s";
+        evaluation_interval = "5s";
+      };
+
+      scrapeConfigs = [
+        {
+          job_name = "prometheus";
+          scrape_interval = "5s";
+          static_configs = [
+            {
+              targets = [ "localhost:9090" ];
+            }
+          ];
+        }
+      ];
+
+      rules = [
+        ''
+          groups:
+            - name: test
+              rules:
+                - alert: node_up
+                  expr: up != 0
+                  for: 5s
+                  labels:
+                    severity: bottom of the barrel
+                  annotations:
+                    summary: node is fine
+        ''
+      ];
+
+      alertmanagers = [
+        {
+          static_configs = [
+            {
+              targets = [
+                "localhost:9093"
+              ];
+            }
+          ];
+        }
+      ];
+
+      alertmanager = {
+        enable = true;
+        openFirewall = true;
+        configuration.route = {
+          receiver = "test";
+          group_wait = "5s";
+          group_interval = "5s";
+          group_by = [ "..." ];
+        };
+        configuration.receivers = [
+          {
+            name = "test";
+            webhook_configs = [
+              {
+                url = "http://localhost:1234";
+              }
+            ];
+          }
+        ];
+      };
+    };
+
+    services.kthxbye = {
+      enable = true;
+      openFirewall = true;
+      extendIfExpiringIn = "30s";
+      logJSON = true;
+      maxDuration = "15m";
+      interval = "5s";
+    };
+  };
+
+  testScript = ''
+    with subtest("start the server"):
+      start_all()
+      server.wait_for_unit("prometheus.service")
+      server.wait_for_unit("alertmanager.service")
+      server.wait_for_unit("kthxbye.service")
+
+      server.sleep(2) # wait for units to settle
+      server.systemctl("restart kthxbye.service") # make sure kthxbye comes up after alertmanager
+      server.sleep(2)
+
+    with subtest("set up test silence which expires in 20s"):
+      server.succeed('amtool --alertmanager.url "http://localhost:9093" silence add alertname="node_up" -a "nixosTest" -d "20s" -c "ACK! this server is fine!!"')
+
+    with subtest("wait for 21 seconds and check if the silence is still active"):
+      server.sleep(21)
+      server.systemctl("status kthxbye.service")
+      server.succeed("amtool --alertmanager.url 'http://localhost:9093' silence | grep 'ACK'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/kubernetes/base.nix b/nixpkgs/nixos/tests/kubernetes/base.nix
new file mode 100644
index 000000000000..ba7b2d9b1d2d
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/base.nix
@@ -0,0 +1,107 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; }
+}:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  mkKubernetesBaseTest =
+    { name, domain ? "my.zyx", test, machines
+    , extraConfiguration ? null }:
+    let
+      masterName = head (filter (machineName: any (role: role == "master") machines.${machineName}.roles) (attrNames machines));
+      master = machines.${masterName};
+      extraHosts = ''
+        ${master.ip}  etcd.${domain}
+        ${master.ip}  api.${domain}
+        ${concatMapStringsSep "\n" (machineName: "${machines.${machineName}.ip}  ${machineName}.${domain}") (attrNames machines)}
+      '';
+      wrapKubectl = with pkgs; runCommand "wrap-kubectl" { nativeBuildInputs = [ makeWrapper ]; } ''
+        mkdir -p $out/bin
+        makeWrapper ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl --set KUBECONFIG "/etc/kubernetes/cluster-admin.kubeconfig"
+      '';
+    in makeTest {
+      inherit name;
+
+      nodes = mapAttrs (machineName: machine:
+        { config, pkgs, lib, nodes, ... }:
+          mkMerge [
+            {
+              boot.postBootCommands = "rm -fr /var/lib/kubernetes/secrets /tmp/shared/*";
+              virtualisation.memorySize = mkDefault 1536;
+              virtualisation.diskSize = mkDefault 4096;
+              networking = {
+                inherit domain extraHosts;
+                primaryIPAddress = mkForce machine.ip;
+
+                firewall = {
+                  allowedTCPPorts = [
+                    10250 # kubelet
+                  ];
+                  trustedInterfaces = ["mynet"];
+
+                  extraCommands = concatMapStrings  (node: ''
+                    iptables -A INPUT -s ${node.networking.primaryIPAddress} -j ACCEPT
+                  '') (attrValues nodes);
+                };
+              };
+              programs.bash.enableCompletion = true;
+              environment.systemPackages = [ wrapKubectl ];
+              services.flannel.iface = "eth1";
+              services.kubernetes = {
+                proxy.hostname = "${masterName}.${domain}";
+
+                easyCerts = true;
+                inherit (machine) roles;
+                apiserver = {
+                  securePort = 443;
+                  advertiseAddress = master.ip;
+                };
+                masterAddress = "${masterName}.${config.networking.domain}";
+              };
+            }
+            (optionalAttrs (any (role: role == "master") machine.roles) {
+              networking.firewall.allowedTCPPorts = [
+                443 # kubernetes apiserver
+              ];
+            })
+            (optionalAttrs (machine ? extraConfiguration) (machine.extraConfiguration { inherit config pkgs lib nodes; }))
+            (optionalAttrs (extraConfiguration != null) (extraConfiguration { inherit config pkgs lib nodes; }))
+          ]
+      ) machines;
+
+      testScript = ''
+        start_all()
+      '' + test;
+    };
+
+  mkKubernetesMultiNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master"];
+        ip = "192.168.1.1";
+      };
+      machine2 = {
+        roles = ["node"];
+        ip = "192.168.1.2";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-multinode";
+  });
+
+  mkKubernetesSingleNodeTest = attrs: mkKubernetesBaseTest ({
+    machines = {
+      machine1 = {
+        roles = ["master" "node"];
+        ip = "192.168.1.1";
+      };
+    };
+  } // attrs // {
+    name = "kubernetes-${attrs.name}-singlenode";
+  });
+in {
+  inherit mkKubernetesBaseTest mkKubernetesSingleNodeTest mkKubernetesMultiNodeTest;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/default.nix b/nixpkgs/nixos/tests/kubernetes/default.nix
new file mode 100644
index 000000000000..a3de9ed115d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/default.nix
@@ -0,0 +1,13 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+let
+  dns = import ./dns.nix { inherit system pkgs; };
+  rbac = import ./rbac.nix { inherit system pkgs; };
+in
+{
+  dns-single-node = dns.singlenode.test;
+  dns-multi-node = dns.multinode.test;
+  rbac-single-node = rbac.singlenode.test;
+  rbac-multi-node = rbac.multinode.test;
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/dns.nix b/nixpkgs/nixos/tests/kubernetes/dns.nix
new file mode 100644
index 000000000000..1b7145eb5d5e
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/dns.nix
@@ -0,0 +1,159 @@
+{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+  domain = "my.zyx";
+
+  redisPod = pkgs.writeText "redis-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    metadata.labels.name = "redis";
+    spec.containers = [{
+      name = "redis";
+      image = "redis";
+      args = ["--bind" "0.0.0.0"];
+      imagePullPolicy = "Never";
+      ports = [{
+        name = "redis-server";
+        containerPort = 6379;
+      }];
+    }];
+  });
+
+  redisService = pkgs.writeText "redis-service.json" (builtins.toJSON {
+    kind = "Service";
+    apiVersion = "v1";
+    metadata.name = "redis";
+    spec = {
+      ports = [{port = 6379; targetPort = 6379;}];
+      selector = {name = "redis";};
+    };
+  });
+
+  redisImage = pkgs.dockerTools.buildImage {
+    name = "redis";
+    tag = "latest";
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ pkgs.redis pkgs.bind.host ];
+    };
+    config.Entrypoint = ["/bin/redis-server"];
+  };
+
+  probePod = pkgs.writeText "probe-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "probe";
+    metadata.labels.name = "probe";
+    spec.containers = [{
+      name = "probe";
+      image = "probe";
+      args = [ "-f" ];
+      tty = true;
+      imagePullPolicy = "Never";
+    }];
+  });
+
+  probeImage = pkgs.dockerTools.buildImage {
+    name = "probe";
+    tag = "latest";
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ pkgs.bind.host pkgs.busybox ];
+    };
+    config.Entrypoint = ["/bin/tail"];
+  };
+
+  extraConfiguration = { config, pkgs, lib, ... }: {
+    environment.systemPackages = [ pkgs.bind.host ];
+    services.dnsmasq.enable = true;
+    services.dnsmasq.settings.server = [
+      "/cluster.local/${config.services.kubernetes.addons.dns.clusterIp}#53"
+    ];
+  };
+
+  base = {
+    name = "dns";
+    inherit domain extraConfiguration;
+  };
+
+  singleNodeTest = {
+    test = ''
+      # prepare machine1 for test
+      machine1.wait_until_succeeds("kubectl get node machine1.${domain} | grep -w Ready")
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
+
+      # check if pods are running
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
+
+      # check dns on host (dnsmasq)
+      machine1.succeed("host redis.default.svc.cluster.local")
+
+      # check dns inside the container
+      machine1.succeed("kubectl exec probe -- /bin/host redis.default.svc.cluster.local")
+    '';
+  };
+
+  multiNodeTest = {
+    test = ''
+      # Node token exchange
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
+
+      # prepare machines for test
+      machine1.wait_until_succeeds("kubectl get node machine2.${domain} | grep -w Ready")
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${redisImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisPod}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${redisService}"
+      )
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${probeImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${probePod}"
+      )
+
+      # check if pods are running
+      machine1.wait_until_succeeds("kubectl get pod redis | grep Running")
+      machine1.wait_until_succeeds("kubectl get pod probe | grep Running")
+      machine1.wait_until_succeeds("kubectl get pods -n kube-system | grep 'coredns.*1/1'")
+
+      # check dns on hosts (dnsmasq)
+      machine1.succeed("host redis.default.svc.cluster.local")
+      machine2.succeed("host redis.default.svc.cluster.local")
+
+      # check dns inside the container
+      machine1.succeed("kubectl exec probe -- /bin/host redis.default.svc.cluster.local")
+    '';
+  };
+in {
+  singlenode = mkKubernetesSingleNodeTest (base // singleNodeTest);
+  multinode = mkKubernetesMultiNodeTest (base // multiNodeTest);
+}
diff --git a/nixpkgs/nixos/tests/kubernetes/rbac.nix b/nixpkgs/nixos/tests/kubernetes/rbac.nix
new file mode 100644
index 000000000000..779eafbb1d24
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubernetes/rbac.nix
@@ -0,0 +1,168 @@
+{ system ? builtins.currentSystem, pkgs ? import ../../.. { inherit system; } }:
+with import ./base.nix { inherit system; };
+let
+
+  roServiceAccount = pkgs.writeText "ro-service-account.json" (builtins.toJSON {
+    kind = "ServiceAccount";
+    apiVersion = "v1";
+    metadata = {
+      name = "read-only";
+      namespace = "default";
+    };
+  });
+
+  roRoleBinding = pkgs.writeText "ro-role-binding.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "RoleBinding";
+    metadata = {
+      name = "read-pods";
+      namespace = "default";
+    };
+    roleRef = {
+      apiGroup = "rbac.authorization.k8s.io";
+      kind = "Role";
+      name = "pod-reader";
+    };
+    subjects = [{
+      kind = "ServiceAccount";
+      name = "read-only";
+      namespace = "default";
+    }];
+  });
+
+  roRole = pkgs.writeText "ro-role.json" (builtins.toJSON {
+    apiVersion = "rbac.authorization.k8s.io/v1";
+    kind = "Role";
+    metadata = {
+      name = "pod-reader";
+      namespace = "default";
+    };
+    rules = [{
+      apiGroups = [""];
+      resources = ["pods"];
+      verbs = ["get" "list" "watch"];
+    }];
+  });
+
+  kubectlPod = pkgs.writeText "kubectl-pod.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  kubectlPod2 = pkgs.writeTextDir "kubectl-pod-2.json" (builtins.toJSON {
+    kind = "Pod";
+    apiVersion = "v1";
+    metadata.name = "kubectl-2";
+    metadata.namespace = "default";
+    metadata.labels.name = "kubectl-2";
+    spec.serviceAccountName = "read-only";
+    spec.containers = [{
+      name = "kubectl-2";
+      image = "kubectl:latest";
+      command = ["/bin/tail" "-f"];
+      imagePullPolicy = "Never";
+      tty = true;
+    }];
+  });
+
+  copyKubectl = pkgs.runCommand "copy-kubectl" { } ''
+    mkdir -p $out/bin
+    cp ${pkgs.kubernetes}/bin/kubectl $out/bin/kubectl
+  '';
+
+  kubectlImage = pkgs.dockerTools.buildImage {
+    name = "kubectl";
+    tag = "latest";
+    copyToRoot = pkgs.buildEnv {
+      name = "image-root";
+      pathsToLink = [ "/bin" ];
+      paths = [ copyKubectl pkgs.busybox kubectlPod2 ];
+    };
+    config.Entrypoint = ["/bin/sh"];
+  };
+
+  base = {
+    name = "rbac";
+  };
+
+  singlenode = base // {
+    test = ''
+      machine1.wait_until_succeeds("kubectl get node machine1.my.zyx | grep -w Ready")
+
+      machine1.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec kubectl -- kubectl delete pods -l name=kubectl")
+    '';
+  };
+
+  multinode = base // {
+    test = ''
+      # Node token exchange
+      machine1.wait_until_succeeds(
+          "cp -f /var/lib/cfssl/apitoken.secret /tmp/shared/apitoken.secret"
+      )
+      machine2.wait_until_succeeds(
+          "cat /tmp/shared/apitoken.secret | nixos-kubernetes-node-join"
+      )
+
+      machine1.wait_until_succeeds("kubectl get node machine2.my.zyx | grep -w Ready")
+
+      machine2.wait_until_succeeds(
+          "${pkgs.gzip}/bin/zcat ${kubectlImage} | ${pkgs.containerd}/bin/ctr -n k8s.io image import -"
+      )
+
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roServiceAccount}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRole}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl apply -f ${roRoleBinding}"
+      )
+      machine1.wait_until_succeeds(
+          "kubectl create -f ${kubectlPod}"
+      )
+
+      machine1.wait_until_succeeds("kubectl get pod kubectl | grep Running")
+
+      machine1.wait_until_succeeds("kubectl exec kubectl -- kubectl get pods")
+      machine1.fail("kubectl exec kubectl -- kubectl create -f /kubectl-pod-2.json")
+      machine1.fail("kubectl exec kubectl -- kubectl delete pods -l name=kubectl")
+    '';
+  };
+
+in {
+  singlenode = mkKubernetesSingleNodeTest singlenode;
+  multinode = mkKubernetesMultiNodeTest multinode;
+}
diff --git a/nixpkgs/nixos/tests/kubo/default.nix b/nixpkgs/nixos/tests/kubo/default.nix
new file mode 100644
index 000000000000..629922fc366d
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubo/default.nix
@@ -0,0 +1,5 @@
+{ recurseIntoAttrs, runTest }:
+recurseIntoAttrs {
+  kubo = runTest ./kubo.nix;
+  kubo-fuse = runTest ./kubo-fuse.nix;
+}
diff --git a/nixpkgs/nixos/tests/kubo/kubo-fuse.nix b/nixpkgs/nixos/tests/kubo/kubo-fuse.nix
new file mode 100644
index 000000000000..71a5bf61649f
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubo/kubo-fuse.nix
@@ -0,0 +1,42 @@
+{ lib, ...} : {
+  name = "kubo-fuse";
+  meta = with lib.maintainers; {
+    maintainers = [ mguentner Luflosi ];
+  };
+
+  nodes.machine = { config, ... }: {
+    services.kubo = {
+      enable = true;
+      autoMount = true;
+    };
+    users.users.alice = {
+      isNormalUser = true;
+      extraGroups = [ config.services.kubo.group ];
+    };
+    users.users.bob = {
+      isNormalUser = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("FUSE mountpoint"):
+        machine.fail("echo a | su bob -l -c 'ipfs add --quieter'")
+        # The FUSE mount functionality is broken as of v0.13.0 and v0.17.0.
+        # See https://github.com/ipfs/kubo/issues/9044.
+        # Workaround: using CID Version 1 avoids that.
+        ipfs_hash = machine.succeed(
+            "echo fnord3 | su alice -l -c 'ipfs add --quieter --cid-version=1'"
+        ).strip()
+
+        machine.succeed(f"cat /ipfs/{ipfs_hash} | grep fnord3")
+
+    with subtest("Unmounting of /ipns and /ipfs"):
+        # Force Kubo to crash and wait for it to restart
+        machine.systemctl("kill --signal=SIGKILL ipfs.service")
+        machine.wait_for_unit("ipfs.service", timeout = 30)
+
+        machine.succeed(f"cat /ipfs/{ipfs_hash} | grep fnord3")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/kubo/kubo.nix b/nixpkgs/nixos/tests/kubo/kubo.nix
new file mode 100644
index 000000000000..7965ad277385
--- /dev/null
+++ b/nixpkgs/nixos/tests/kubo/kubo.nix
@@ -0,0 +1,53 @@
+{ lib, ...} : {
+  name = "kubo";
+  meta = with lib.maintainers; {
+    maintainers = [ mguentner Luflosi ];
+  };
+
+  nodes.machine = { config, ... }: {
+    services.kubo = {
+      enable = true;
+      # Also will add a unix domain socket socket API address, see module.
+      startWhenNeeded = true;
+      settings.Addresses.API = "/ip4/127.0.0.1/tcp/2324";
+      dataDir = "/mnt/ipfs";
+    };
+    users.users.alice = {
+      isNormalUser = true;
+      extraGroups = [ config.services.kubo.group ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("Automatic socket activation"):
+        ipfs_hash = machine.succeed(
+            "echo fnord0 | su alice -l -c 'ipfs add --quieter'"
+        )
+        machine.succeed(f"ipfs cat /ipfs/{ipfs_hash.strip()} | grep fnord0")
+
+    machine.stop_job("ipfs")
+
+    with subtest("IPv4 socket activation"):
+        machine.succeed("ipfs --api /ip4/127.0.0.1/tcp/2324 id")
+        ipfs_hash = machine.succeed(
+            "echo fnord | ipfs --api /ip4/127.0.0.1/tcp/2324 add --quieter"
+        )
+        machine.succeed(f"ipfs cat /ipfs/{ipfs_hash.strip()} | grep fnord")
+
+    machine.stop_job("ipfs")
+
+    with subtest("Unix domain socket activation"):
+        ipfs_hash = machine.succeed(
+            "echo fnord2 | ipfs --api /unix/run/ipfs.sock add --quieter"
+        )
+        machine.succeed(
+            f"ipfs --api /unix/run/ipfs.sock cat /ipfs/{ipfs_hash.strip()} | grep fnord2"
+        )
+
+    with subtest("Setting dataDir works properly with the hardened systemd unit"):
+        machine.succeed("test -e /mnt/ipfs/config")
+        machine.succeed("test ! -e /var/lib/ipfs/")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/ladybird.nix b/nixpkgs/nixos/tests/ladybird.nix
new file mode 100644
index 000000000000..4e9ab9a36d13
--- /dev/null
+++ b/nixpkgs/nixos/tests/ladybird.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "ladybird";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [
+      pkgs.ladybird
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.succeed("echo '<!DOCTYPE html><html><body><h1>Hello world</h1></body></html>' > page.html")
+      machine.execute("ladybird file://$(pwd)/page.html >&2 &")
+      machine.wait_for_window("Ladybird")
+      machine.sleep(5)
+      machine.wait_for_text("Hello world")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/languagetool.nix b/nixpkgs/nixos/tests/languagetool.nix
new file mode 100644
index 000000000000..e4ab2a47064e
--- /dev/null
+++ b/nixpkgs/nixos/tests/languagetool.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let port = 8082;
+in {
+  name = "languagetool";
+  meta = with lib.maintainers; { maintainers = [ fbeffa ]; };
+
+  nodes.machine = { ... }:
+    {
+      services.languagetool.enable = true;
+      services.languagetool.port = port;
+    };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("languagetool.service")
+    machine.wait_for_open_port(${toString port})
+    machine.wait_until_succeeds('curl -d "language=en-US" -d "text=a simple test" http://localhost:${toString port}/v2/check')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lanraragi.nix b/nixpkgs/nixos/tests/lanraragi.nix
new file mode 100644
index 000000000000..f513ac9d252b
--- /dev/null
+++ b/nixpkgs/nixos/tests/lanraragi.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "lanraragi";
+  meta.maintainers = with lib.maintainers; [ tomasajt ];
+
+  nodes = {
+    machine1 = { pkgs, ... }: {
+      services.lanraragi.enable = true;
+    };
+    machine2 = { pkgs, ... }: {
+      services.lanraragi = {
+        enable = true;
+        passwordFile = pkgs.writeText "lrr-test-pass" ''
+          ultra-secure-password
+        '';
+        port = 4000;
+        redis = {
+          port = 4001;
+          passwordFile = pkgs.writeText "redis-lrr-test-pass" ''
+            still-a-very-secure-password
+          '';
+        };
+      };
+    };
+
+
+  };
+
+  testScript = ''
+    start_all()
+
+    machine1.wait_for_unit("lanraragi.service")
+    machine1.wait_until_succeeds("curl -f localhost:3000")
+    machine1.succeed("[ $(curl -o /dev/null -X post 'http://localhost:3000/login' --data-raw 'password=kamimamita' -w '%{http_code}') -eq 302 ]")
+
+    machine2.wait_for_unit("lanraragi.service")
+    machine2.wait_until_succeeds("curl -f localhost:4000")
+    machine2.succeed("[ $(curl -o /dev/null -X post 'http://localhost:4000/login' --data-raw 'password=ultra-secure-password' -w '%{http_code}') -eq 302 ]")
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/leaps.nix b/nixpkgs/nixos/tests/leaps.nix
new file mode 100644
index 000000000000..5cc387c86a45
--- /dev/null
+++ b/nixpkgs/nixos/tests/leaps.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs,  ... }:
+
+{
+  name = "leaps";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ qknight ];
+  };
+
+  nodes =
+    {
+      client = { };
+
+      server =
+        { services.leaps = {
+            enable = true;
+            port = 6666;
+            path = "/leaps/";
+          };
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript =
+    ''
+      start_all()
+      server.wait_for_open_port(6666)
+      client.wait_for_unit("network.target")
+      assert "leaps" in client.succeed(
+          "${pkgs.curl}/bin/curl -f http://server:6666/leaps/"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/legit.nix b/nixpkgs/nixos/tests/legit.nix
new file mode 100644
index 000000000000..a71fb1743c76
--- /dev/null
+++ b/nixpkgs/nixos/tests/legit.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+let
+  port = 5000;
+  scanPath = "/var/lib/legit";
+in
+{
+  name = "legit-web";
+  meta.maintainers = [ lib.maintainers.ratsclub ];
+
+  nodes = {
+    server = { config, pkgs, ... }: {
+      services.legit = {
+        enable = true;
+        settings = {
+          server.port = 5000;
+          repo = { inherit scanPath; };
+        };
+      };
+
+      environment.systemPackages = [ pkgs.git ];
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      strPort = builtins.toString port;
+    in
+    ''
+      start_all()
+
+      server.wait_for_unit("network.target")
+      server.wait_for_unit("legit.service")
+
+      server.wait_until_succeeds(
+          "curl -f http://localhost:${strPort}"
+      )
+
+      server.succeed("${pkgs.writeShellScript "setup-legit-test-repo" ''
+        set -e
+        git init --bare -b master ${scanPath}/some-repo
+        git init -b master reference
+        cd reference
+        git remote add origin ${scanPath}/some-repo
+        date > date.txt
+        git add date.txt
+        git -c user.name=test -c user.email=test@localhost commit -m 'add date'
+        git push -u origin master
+      ''}")
+
+      server.wait_until_succeeds(
+          "curl -f http://localhost:${strPort}/some-repo"
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/lemmy.nix b/nixpkgs/nixos/tests/lemmy.nix
new file mode 100644
index 000000000000..de2c4938fe23
--- /dev/null
+++ b/nixpkgs/nixos/tests/lemmy.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  uiPort = 1234;
+  backendPort = 5678;
+  lemmyNodeName = "server";
+in
+{
+  name = "lemmy";
+  meta = with lib.maintainers; { maintainers = [ mightyiam ]; };
+
+  nodes = {
+    client = { };
+
+    "${lemmyNodeName}" = {
+      services.lemmy = {
+        enable = true;
+        ui.port = uiPort;
+        database.createLocally = true;
+        settings = {
+          hostname = "http://${lemmyNodeName}";
+          port = backendPort;
+          # Without setup, the /feeds/* and /nodeinfo/* API endpoints won't return 200
+          setup = {
+            admin_username = "mightyiam";
+            site_name = "Lemmy FTW";
+            admin_email = "mightyiam@example.com";
+          };
+        };
+        adminPasswordFile = /etc/lemmy-admin-password.txt;
+        caddy.enable = true;
+      };
+
+      environment.etc."lemmy-admin-password.txt".text = "ThisIsWhatIUseEverywhereTryIt";
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      # pict-rs seems to need more than 1025114112 bytes
+      virtualisation.memorySize = 2000;
+    };
+  };
+
+  testScript = ''
+    server = ${lemmyNodeName}
+
+    with subtest("the merged config is secure"):
+        server.wait_for_unit("lemmy.service")
+        config_permissions = server.succeed("stat --format %A /run/lemmy/config.hjson").rstrip()
+        assert config_permissions == "-rw-------", f"merged config permissions {config_permissions} are insecure"
+        directory_permissions = server.succeed("stat --format %A /run/lemmy").rstrip()
+        assert directory_permissions[5] == directory_permissions[8] == "-", "merged config can be replaced"
+
+    with subtest("the backend starts and responds"):
+        server.wait_for_open_port(${toString backendPort})
+        server.succeed("curl --fail localhost:${toString backendPort}/api/v3/site")
+
+    with subtest("the UI starts and responds"):
+        server.wait_for_unit("lemmy-ui.service")
+        server.wait_for_open_port(${toString uiPort})
+        server.succeed("curl --fail localhost:${toString uiPort}")
+
+    with subtest("Lemmy-UI responds through the caddy reverse proxy"):
+        server.wait_for_unit("network-online.target")
+        server.wait_for_unit("caddy.service")
+        server.wait_for_open_port(80)
+        body = server.execute("curl --fail --location ${lemmyNodeName}")[1]
+        assert "Lemmy" in body, f"String Lemmy not found in response for ${lemmyNodeName}: \n{body}"
+
+    with subtest("the server is exposed externally"):
+        client.wait_for_unit("network-online.target")
+        client.succeed("curl -v --fail ${lemmyNodeName}")
+
+    with subtest("caddy correctly routes backend requests"):
+        # Make sure we are not hitting frontend
+        server.execute("systemctl stop lemmy-ui.service")
+
+        def assert_http_code(url, expected_http_code, extra_curl_args=""):
+            _, http_code = server.execute(f'curl --silent -o /dev/null {extra_curl_args} --fail --write-out "%{{http_code}}" {url}')
+            assert http_code == str(expected_http_code), f"expected http code {expected_http_code}, got {http_code}"
+
+        # Caddy responds with HTTP code 502 if it cannot handle the requested path
+        assert_http_code("${lemmyNodeName}/obviously-wrong-path/", 502)
+
+        assert_http_code("${lemmyNodeName}/static/js/client.js", 200)
+        assert_http_code("${lemmyNodeName}/api/v3/site", 200)
+
+        # A 404 confirms that the request goes to the backend
+        # No path can return 200 until after we upload an image to pict-rs
+        assert_http_code("${lemmyNodeName}/pictrs/", 404)
+
+        assert_http_code("${lemmyNodeName}/feeds/all.xml", 200)
+        assert_http_code("${lemmyNodeName}/nodeinfo/2.0.json", 200)
+
+        assert_http_code("${lemmyNodeName}/some-other-made-up-path/", 404, "-X POST")
+        assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/activity+json'")
+        assert_http_code("${lemmyNodeName}/some-other-path", 404, "-H 'Accept: application/ld+json; profile=\"https://www.w3.org/ns/activitystreams\"'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/libinput.nix b/nixpkgs/nixos/tests/libinput.nix
new file mode 100644
index 000000000000..9b6fa159b999
--- /dev/null
+++ b/nixpkgs/nixos/tests/libinput.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ ... }:
+
+{
+  name = "libinput";
+
+  nodes.machine = { ... }:
+    {
+      imports = [
+        ./common/x11.nix
+        ./common/user-account.nix
+      ];
+
+      test-support.displayManager.auto.user = "alice";
+
+      services.xserver.libinput = {
+        enable = true;
+        mouse = {
+          naturalScrolling = true;
+          leftHanded = true;
+          middleEmulation = false;
+          horizontalScrolling = false;
+        };
+      };
+    };
+
+  testScript = ''
+    def expect_xserver_option(option, value):
+        machine.succeed(f"""cat /var/log/X.0.log | grep -F 'Option "{option}" "{value}"'""")
+
+    machine.start()
+    machine.wait_for_x()
+    machine.succeed("""cat /var/log/X.0.log | grep -F "Using input driver 'libinput'" """)
+    expect_xserver_option("NaturalScrolling", "on")
+    expect_xserver_option("LeftHanded", "on")
+    expect_xserver_option("MiddleEmulation", "off")
+    expect_xserver_option("HorizontalScrolling", "off")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/libreddit.nix b/nixpkgs/nixos/tests/libreddit.nix
new file mode 100644
index 000000000000..ecf347b9e12e
--- /dev/null
+++ b/nixpkgs/nixos/tests/libreddit.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "libreddit";
+  meta.maintainers = with lib.maintainers; [ fab ];
+
+  nodes.machine = {
+    services.libreddit.enable = true;
+    # Test CAP_NET_BIND_SERVICE
+    services.libreddit.port = 80;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("libreddit.service")
+    machine.wait_for_open_port(80)
+    # Query a page that does not require Internet access
+    machine.succeed("curl --fail http://localhost:80/settings")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/librenms.nix b/nixpkgs/nixos/tests/librenms.nix
new file mode 100644
index 000000000000..c59f56a32316
--- /dev/null
+++ b/nixpkgs/nixos/tests/librenms.nix
@@ -0,0 +1,108 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  api_token = "f87f42114e44b63ad1b9e3c3d33d6fbe"; # random md5 hash
+  wrong_api_token = "e68ba041fcf1eab923a7a6de3af5f726"; # another random md5 hash
+in {
+  name = "librenms";
+  meta.maintainers = lib.teams.wdz.members;
+
+  nodes.librenms = {
+    time.timeZone = "Europe/Berlin";
+
+    environment.systemPackages = with pkgs; [
+      curl
+      jq
+    ];
+
+    services.librenms = {
+      enable = true;
+      hostname = "librenms";
+      database = {
+        createLocally = true;
+        host = "localhost";
+        database = "librenms";
+        username = "librenms";
+        passwordFile = pkgs.writeText "librenms-db-pass" "librenmsdbpass";
+      };
+      nginx = {
+        default = true;
+      };
+      enableOneMinutePolling = true;
+      settings = {
+        enable_billing = true;
+      };
+    };
+
+    # systemd oneshot to create a dummy admin user and a API token for testing
+    systemd.services.lnms-api-init = {
+      description = "LibreNMS API init";
+      after = [ "librenms-setup.service" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        User = "root";
+        Group = "root";
+      };
+      script = ''
+        API_USER_NAME=api
+        API_TOKEN=${api_token} # random md5 hash
+
+        # we don't need to know the password, it just has to exist
+        API_USER_PASS=$(${pkgs.pwgen}/bin/pwgen -s 64 1)
+        ${pkgs.librenms}/artisan user:add $API_USER_NAME -r admin -p $API_USER_PASS
+        API_USER_ID=$(${pkgs.mariadb}/bin/mysql -D librenms -N -B -e "SELECT user_id FROM users WHERE username = '$API_USER_NAME';")
+
+        ${pkgs.mariadb}/bin/mysql -D librenms -e "INSERT INTO api_tokens (user_id, token_hash, description) VALUES ($API_USER_ID, '$API_TOKEN', 'API User')"
+      '';
+    };
+  };
+
+  nodes.snmphost = {
+    networking.firewall.allowedUDPPorts = [ 161 ];
+
+    systemd.services.snmpd = {
+      description = "snmpd";
+      after = [ "network-online.target" ];
+      wants = [ "network-online.target" ];
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "forking";
+        User = "root";
+        Group = "root";
+        ExecStart = let
+          snmpd-config = pkgs.writeText "snmpd-config" ''
+            com2sec readonly default public
+
+            group MyROGroup v2c        readonly
+            view all    included  .1                               80
+            access MyROGroup ""      any       noauth    exact  all    none   none
+
+            syslocation Testcity, Testcountry
+            syscontact Testi mc Test <test@example.com>
+          '';
+        in "${pkgs.net-snmp}/bin/snmpd -c ${snmpd-config} -C";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    snmphost.wait_until_succeeds("pgrep snmpd")
+
+    librenms.wait_for_unit("lnms-api-init.service")
+    librenms.wait_for_open_port(80)
+
+    # Test that we can authenticate against the API
+    librenms.succeed("curl --fail -H 'X-Auth-Token: ${api_token}' http://localhost/api/v0")
+    librenms.fail("curl --fail -H 'X-Auth-Token: ${wrong_api_token}' http://localhost/api/v0")
+
+    # add snmphost as a device
+    librenms.succeed("curl --fail -X POST -d '{\"hostname\":\"snmphost\",\"version\":\"v2c\",\"community\":\"public\"}' -H 'X-Auth-Token: ${api_token}' http://localhost/api/v0/devices")
+
+    # wait until snmphost gets polled
+    librenms.wait_until_succeeds("test $(curl -H 'X-Auth-Token: ${api_token}' http://localhost/api/v0/devices/snmphost | jq -Mr .devices[0].last_polled) != 'null'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/libresprite.nix b/nixpkgs/nixos/tests/libresprite.nix
new file mode 100644
index 000000000000..16d272acfa0f
--- /dev/null
+++ b/nixpkgs/nixos/tests/libresprite.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "libresprite";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [
+      pkgs.imagemagick
+      pkgs.libresprite
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.succeed("convert -font DejaVu-Sans +antialias label:'IT WORKS' image.png")
+      machine.execute("libresprite image.png >&2 &")
+      machine.wait_for_window("LibreSprite v${pkgs.libresprite.version}")
+      machine.wait_for_text("IT WORKS")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/libreswan.nix b/nixpkgs/nixos/tests/libreswan.nix
new file mode 100644
index 000000000000..aadba941fab1
--- /dev/null
+++ b/nixpkgs/nixos/tests/libreswan.nix
@@ -0,0 +1,136 @@
+# This test sets up a host-to-host IPsec VPN between Alice and Bob, each on its
+# own network and with Eve as the only route between each other. We check that
+# Eve can eavesdrop the plaintext traffic between Alice and Bob, but once they
+# enable the secure tunnel Eve's spying becomes ineffective.
+
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+
+  # IPsec tunnel between Alice and Bob
+  tunnelConfig = {
+    services.libreswan.enable = true;
+    services.libreswan.connections.tunnel =
+      ''
+        leftid=@alice
+        left=fd::a
+        rightid=@bob
+        right=fd::b
+        authby=secret
+        auto=add
+      '';
+    environment.etc."ipsec.d/tunnel.secrets" =
+      { text = ''@alice @bob : PSK "j1JbIi9WY07rxwcNQ6nbyThKCf9DGxWOyokXIQcAQUnafsNTUJxfsxwk9WYK8fHj"'';
+        mode = "600";
+      };
+  };
+
+  # Common network setup
+  baseNetwork = {
+    # shared hosts file
+    extraHosts = lib.mkVMOverride ''
+      fd::a alice
+      fd::b bob
+      fd::e eve
+    '';
+    # remove all automatic addresses
+    useDHCP = false;
+    interfaces.eth1.ipv4.addresses = lib.mkVMOverride [];
+    interfaces.eth2.ipv4.addresses = lib.mkVMOverride [];
+    # open a port for testing
+    firewall.allowedUDPPorts = [ 1234 ];
+  };
+
+  # Adds an address and route from a to b via Eve
+  addRoute = a: b: {
+    interfaces.eth1.ipv6.addresses =
+      [ { address = a; prefixLength = 64; } ];
+    interfaces.eth1.ipv6.routes =
+      [ { address = b; prefixLength = 128; via = "fd::e"; } ];
+  };
+
+in
+
+{
+  name = "libreswan";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  # Our protagonist
+  nodes.alice = { ... }: {
+    virtualisation.vlans = [ 1 ];
+    networking = baseNetwork // addRoute "fd::a" "fd::b";
+  } // tunnelConfig;
+
+  # Her best friend
+  nodes.bob = { ... }: {
+    virtualisation.vlans = [ 2 ];
+    networking = baseNetwork // addRoute "fd::b" "fd::a";
+  } // tunnelConfig;
+
+  # The malicious network operator
+  nodes.eve = { ... }: {
+    virtualisation.vlans = [ 1 2 ];
+    networking = lib.mkMerge
+      [ baseNetwork
+        { interfaces.br0.ipv6.addresses =
+            [ { address = "fd::e"; prefixLength = 64; } ];
+          bridges.br0.interfaces = [ "eth1" "eth2" ];
+        }
+      ];
+    environment.systemPackages = [ pkgs.tcpdump ];
+    boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+  };
+
+  testScript =
+    ''
+      def alice_to_bob(msg: str):
+          """
+          Sends a message as Alice to Bob
+          """
+          bob.execute("nc -lu ::0 1234 >/tmp/msg &")
+          alice.sleep(1)
+          alice.succeed(f"echo '{msg}' | nc -uw 0 bob 1234")
+          bob.succeed(f"grep '{msg}' /tmp/msg")
+
+
+      def eavesdrop():
+          """
+          Starts eavesdropping on Alice and Bob
+          """
+          match = "src host alice and dst host bob"
+          eve.execute(f"tcpdump -i br0 -c 1 -Avv {match} >/tmp/log &")
+
+
+      start_all()
+
+      with subtest("Network is up"):
+          alice.wait_until_succeeds("ping -c1 bob")
+          alice.succeed("systemctl restart ipsec")
+          bob.succeed("systemctl restart ipsec")
+
+      with subtest("Eve can eavesdrop cleartext traffic"):
+          eavesdrop()
+          alice_to_bob("I secretly love turnip")
+          eve.sleep(1)
+          eve.succeed("grep turnip /tmp/log")
+
+      with subtest("Libreswan is ready"):
+          alice.wait_for_unit("ipsec")
+          bob.wait_for_unit("ipsec")
+          alice.succeed("ipsec verify 1>&2")
+
+      with subtest("Alice and Bob can start the tunnel"):
+          alice.execute("ipsec auto --start tunnel >&2 &")
+          bob.succeed("ipsec auto --start tunnel")
+          # apparently this is needed to "wake" the tunnel
+          bob.execute("ping -c1 alice")
+
+      with subtest("Eve no longer can eavesdrop"):
+          eavesdrop()
+          alice_to_bob("Just kidding, I actually like rhubarb")
+          eve.sleep(1)
+          eve.fail("grep rhubarb /tmp/log")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/libuiohook.nix b/nixpkgs/nixos/tests/libuiohook.nix
new file mode 100644
index 000000000000..66c5033d9688
--- /dev/null
+++ b/nixpkgs/nixos/tests/libuiohook.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "libuiohook";
+  meta = with lib.maintainers; { maintainers = [ anoa ]; };
+
+  nodes.client = { nodes, ... }:
+      let user = nodes.client.config.users.users.alice;
+      in {
+        imports = [ ./common/user-account.nix ./common/x11.nix ];
+
+        environment.systemPackages = [ pkgs.libuiohook.test ];
+
+        test-support.displayManager.auto.user = user.name;
+      };
+
+  testScript = { nodes, ... }:
+    let user = nodes.client.config.users.users.alice;
+    in ''
+      client.wait_for_x()
+      client.succeed("su - alice -c ${pkgs.libuiohook.test}/share/uiohook_tests >&2 &")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/libvirtd.nix b/nixpkgs/nixos/tests/libvirtd.nix
new file mode 100644
index 000000000000..41d06cc9643f
--- /dev/null
+++ b/nixpkgs/nixos/tests/libvirtd.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "libvirtd";
+  meta.maintainers = with pkgs.lib.maintainers; [ fpletz ];
+
+  nodes = {
+    virthost =
+      { pkgs, ... }:
+      {
+        virtualisation = {
+          cores = 2;
+          memorySize = 2048;
+
+          libvirtd.enable = true;
+          libvirtd.hooks.qemu.is_working = "${pkgs.writeShellScript "testHook.sh" ''
+            touch /tmp/qemu_hook_is_working
+          ''}";
+        };
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "deadbeef"; # needed for zfs
+        networking.nameservers = [ "192.168.122.1" ];
+        security.polkit.enable = true;
+        environment.systemPackages = with pkgs; [ virt-manager ];
+      };
+  };
+
+  testScript = let
+    nixosInstallISO = (import ../release.nix {}).iso_minimal.${pkgs.stdenv.hostPlatform.system};
+    virshShutdownCmd = if pkgs.stdenv.isx86_64 then "shutdown" else "destroy";
+  in ''
+    start_all()
+
+    virthost.wait_for_unit("multi-user.target")
+
+    with subtest("enable default network"):
+      virthost.succeed("virsh net-start default")
+      virthost.succeed("virsh net-autostart default")
+      virthost.succeed("virsh net-info default")
+
+    with subtest("check if partition disk pools works with parted"):
+      virthost.succeed("fallocate -l100m /tmp/foo; losetup /dev/loop0 /tmp/foo; echo 'label: dos' | sfdisk /dev/loop0")
+      virthost.succeed("virsh pool-create-as foo disk --source-dev /dev/loop0 --target /dev")
+      virthost.succeed("virsh vol-create-as foo loop0p1 25MB")
+      virthost.succeed("virsh vol-create-as foo loop0p2 50MB")
+
+    with subtest("check if virsh zfs pools work"):
+      virthost.succeed("fallocate -l100m /tmp/zfs; losetup /dev/loop1 /tmp/zfs;")
+      virthost.succeed("zpool create zfs_loop /dev/loop1")
+      virthost.succeed("virsh pool-define-as --name zfs_storagepool --source-name zfs_loop --type zfs")
+      virthost.succeed("virsh pool-start zfs_storagepool")
+      virthost.succeed("virsh vol-create-as zfs_storagepool disk1 25MB")
+
+    with subtest("check if nixos install iso boots, network and autostart works"):
+      virthost.succeed(
+        "virt-install -n nixos --osinfo nixos-unstable --memory 1024 --graphics none --disk `find ${nixosInstallISO}/iso -type f | head -n1`,readonly=on --import --noautoconsole --autostart"
+      )
+      virthost.succeed("virsh domstate nixos | grep running")
+      virthost.wait_until_succeeds("ping -c 1 nixos")
+      virthost.succeed("virsh ${virshShutdownCmd} nixos")
+      virthost.wait_until_succeeds("virsh domstate nixos | grep 'shut off'")
+      virthost.shutdown()
+      virthost.wait_for_unit("multi-user.target")
+      virthost.wait_until_succeeds("ping -c 1 nixos")
+
+    with subtest("test if hooks are linked and run"):
+      virthost.succeed("ls /var/lib/libvirt/hooks/qemu.d/is_working")
+      virthost.succeed("ls /tmp/qemu_hook_is_working")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lidarr.nix b/nixpkgs/nixos/tests/lidarr.nix
new file mode 100644
index 000000000000..8230dda53736
--- /dev/null
+++ b/nixpkgs/nixos/tests/lidarr.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "lidarr";
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.lidarr.enable = true; };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("lidarr.service")
+    machine.wait_for_open_port(8686)
+    machine.succeed("curl --fail http://localhost:8686/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lightdm.nix b/nixpkgs/nixos/tests/lightdm.nix
new file mode 100644
index 000000000000..94cebd4a630a
--- /dev/null
+++ b/nixpkgs/nixos/tests/lightdm.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "lightdm";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ aszlig ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.lightdm.enable = true;
+    services.xserver.displayManager.defaultSession = "none+icewm";
+    services.xserver.windowManager.icewm.enable = true;
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    start_all()
+    machine.wait_for_text("${user.description}")
+    machine.screenshot("lightdm")
+    machine.send_chars("${user.password}\n")
+    machine.wait_for_file("${user.home}/.Xauthority")
+    machine.succeed("xauth merge ${user.home}/.Xauthority")
+    machine.wait_for_window("^IceWM ")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lighttpd.nix b/nixpkgs/nixos/tests/lighttpd.nix
new file mode 100644
index 000000000000..daef1584a45c
--- /dev/null
+++ b/nixpkgs/nixos/tests/lighttpd.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "lighttpd";
+  meta.maintainers = with lib.maintainers; [ bjornfor ];
+
+  nodes = {
+    server = {
+      services.lighttpd.enable = true;
+      services.lighttpd.document-root = pkgs.runCommand "document-root" {} ''
+        mkdir -p "$out"
+        echo "hello nixos test" > "$out/file.txt"
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("lighttpd.service")
+    res = server.succeed("curl --fail http://localhost/file.txt")
+    assert "hello nixos test" in res, f"bad server response: '{res}'"
+    server.succeed("systemctl reload lighttpd")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/limesurvey.nix b/nixpkgs/nixos/tests/limesurvey.nix
new file mode 100644
index 000000000000..9a3193991f35
--- /dev/null
+++ b/nixpkgs/nixos/tests/limesurvey.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "limesurvey";
+  meta.maintainers = [ pkgs.lib.maintainers.aanderse ];
+
+  nodes.machine = { ... }: {
+    services.limesurvey = {
+      enable = true;
+      virtualHost = {
+        hostName = "example.local";
+        adminAddr = "root@example.local";
+      };
+    };
+
+    # limesurvey won't work without a dot in the hostname
+    networking.hosts."127.0.0.1" = [ "example.local" ];
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("phpfpm-limesurvey.service")
+    assert "The following surveys are available" in machine.succeed(
+        "curl -f http://example.local/"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/listmonk.nix b/nixpkgs/nixos/tests/listmonk.nix
new file mode 100644
index 000000000000..938c36026a7f
--- /dev/null
+++ b/nixpkgs/nixos/tests/listmonk.nix
@@ -0,0 +1,76 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "listmonk";
+  meta.maintainers = with lib.maintainers; [ raitobezarius ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.mailhog.enable = true;
+    services.listmonk = {
+      enable = true;
+      settings = {
+        admin_username = "listmonk";
+        admin_password = "hunter2";
+      };
+      database = {
+        createLocally = true;
+        # https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/internal/messenger/email/email.go#L18-L27
+        settings.smtp = [ {
+          enabled = true;
+          host = "localhost";
+          port = 1025;
+          tls_type = "none";
+        }];
+      };
+    };
+  };
+
+  testScript = ''
+    import json
+
+    start_all()
+
+    basic_auth = "listmonk:hunter2"
+    def generate_listmonk_request(type, url, data=None):
+       if data is None: data = {}
+       json_data = json.dumps(data)
+       return f'curl -u "{basic_auth}" -X {type} "http://localhost:9000/api/{url}" -H "Content-Type: application/json; charset=utf-8" --data-raw \'{json_data}\'''
+
+    machine.wait_for_unit("mailhog.service")
+    machine.wait_for_unit("postgresql.service")
+    machine.wait_for_unit("listmonk.service")
+    machine.wait_for_open_port(1025)
+    machine.wait_for_open_port(8025)
+    machine.wait_for_open_port(9000)
+    machine.succeed("[[ -f /var/lib/listmonk/.db_settings_initialized ]]")
+
+    assert json.loads(machine.succeed(generate_listmonk_request("GET", 'health')))['data'], 'Health endpoint returned unexpected value'
+
+    # A sample subscriber is guaranteed to exist at install-time
+    # A sample transactional template is guaranteed to exist at install-time
+    subscribers = json.loads(machine.succeed(generate_listmonk_request('GET', "subscribers")))['data']['results']
+    templates = json.loads(machine.succeed(generate_listmonk_request('GET', "templates")))['data']
+    tx_template = templates[2]
+
+    # Test transactional endpoint
+    print(machine.succeed(
+      generate_listmonk_request('POST', 'tx', data={'subscriber_id': subscribers[0]['id'], 'template_id': tx_template['id']})
+    ))
+
+    assert 'Welcome Anon Doe' in machine.succeed(
+        "curl --fail http://localhost:8025/api/v2/messages"
+    ), "Failed to find Welcome John Doe inside the messages API endpoint"
+
+    # Test campaign endpoint
+    # Based on https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L549 as docs do not exist.
+    campaign_data = json.loads(machine.succeed(
+      generate_listmonk_request('POST', 'campaigns/1/test', data={'template_id': templates[0]['id'], 'subscribers': ['john@example.com'], 'name': 'Test', 'subject': 'NixOS is great', 'lists': [1], 'messenger': 'email'})
+    ))
+
+    assert campaign_data['data']  # This is a boolean asserting if the test was successful or not: https://github.com/knadh/listmonk/blob/174a48f252a146d7e69dab42724e3329dbe25ebe/cmd/campaigns.go#L626
+
+    messages = json.loads(machine.succeed(
+        "curl --fail http://localhost:8025/api/v2/messages"
+    ))
+
+    assert messages['total'] == 2
+  '';
+})
diff --git a/nixpkgs/nixos/tests/litestream.nix b/nixpkgs/nixos/tests/litestream.nix
new file mode 100644
index 000000000000..a281d8538694
--- /dev/null
+++ b/nixpkgs/nixos/tests/litestream.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "litestream";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jwygoda ];
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.litestream = {
+        enable = true;
+        settings = {
+          dbs = [
+            {
+              path = "/var/lib/grafana/data/grafana.db";
+              replicas = [{
+                url = "sftp://foo:bar@127.0.0.1:22/home/foo/grafana";
+              }];
+            }
+          ];
+        };
+      };
+      systemd.services.grafana.serviceConfig.ExecStartPost = "+" + pkgs.writeShellScript "grant-grafana-permissions" ''
+        timeout=10
+
+        while [ ! -f /var/lib/grafana/data/grafana.db ];
+        do
+          if [ "$timeout" == 0 ]; then
+            echo "ERROR: Timeout while waiting for /var/lib/grafana/data/grafana.db."
+            exit 1
+          fi
+
+          sleep 1
+
+          ((timeout--))
+        done
+
+        find /var/lib/grafana -type d -exec chmod -v 775 {} \;
+        find /var/lib/grafana -type f -exec chmod -v 660 {} \;
+      '';
+      services.openssh = {
+        enable = true;
+        allowSFTP = true;
+        listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+      };
+      services.grafana = {
+        enable = true;
+        settings = {
+          security = {
+            admin_user = "admin";
+            admin_password = "admin";
+          };
+
+          server = {
+            http_addr = "localhost";
+            http_port = 3000;
+          };
+
+          database = {
+            type = "sqlite3";
+            path = "/var/lib/grafana/data/grafana.db";
+            wal = true;
+          };
+        };
+      };
+      users.users.foo = {
+        isNormalUser = true;
+        password = "bar";
+      };
+      users.users.litestream.extraGroups = [ "grafana" ];
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_until_succeeds("test -d /home/foo/grafana")
+    machine.wait_for_open_port(3000)
+    machine.succeed("""
+        curl -sSfN -X PUT -H "Content-Type: application/json" -d '{
+          "oldPassword": "admin",
+          "newPassword": "newpass",
+          "confirmNew": "newpass"
+        }' http://admin:admin@127.0.0.1:3000/api/user/password
+    """)
+    # https://litestream.io/guides/systemd/#simulating-a-disaster
+    machine.systemctl("stop litestream.service")
+    machine.succeed(
+        "rm -f /var/lib/grafana/data/grafana.db "
+        "/var/lib/grafana/data/grafana.db-shm "
+        "/var/lib/grafana/data/grafana.db-wal"
+    )
+    machine.succeed(
+        "litestream restore /var/lib/grafana/data/grafana.db "
+        "&& chown grafana:grafana /var/lib/grafana/data/grafana.db "
+        "&& chmod 660 /var/lib/grafana/data/grafana.db"
+    )
+    machine.systemctl("restart grafana.service")
+    machine.wait_for_open_port(3000)
+    machine.succeed(
+        "curl -sSfN -u admin:newpass http://127.0.0.1:3000/api/org/users | grep admin\@localhost"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/livebook-service.nix b/nixpkgs/nixos/tests/livebook-service.nix
new file mode 100644
index 000000000000..9397e3cb75ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/livebook-service.nix
@@ -0,0 +1,43 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "livebook-service";
+
+  nodes = {
+    machine = { config, pkgs, ... }: {
+      imports = [
+        ./common/user-account.nix
+      ];
+
+      services.livebook = {
+        enableUserService = true;
+        port = 20123;
+        environmentFile = pkgs.writeText "livebook.env" ''
+          LIVEBOOK_PASSWORD = "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx";
+        '';
+        options = {
+          cookie = "chocolate chip";
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+      sudo = lib.concatStringsSep " " [
+        "XDG_RUNTIME_DIR=/run/user/${toString user.uid}"
+        "sudo"
+        "--preserve-env=XDG_RUNTIME_DIR"
+        "-u"
+        "alice"
+      ];
+    in
+    ''
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("loginctl enable-linger alice")
+      machine.wait_until_succeeds("${sudo} systemctl --user is-active livebook.service")
+      machine.wait_for_open_port(20123)
+
+      machine.succeed("curl -L localhost:20123 | grep 'Type password'")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/lldap.nix b/nixpkgs/nixos/tests/lldap.nix
new file mode 100644
index 000000000000..d6c3a865aa04
--- /dev/null
+++ b/nixpkgs/nixos/tests/lldap.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "lldap";
+
+  nodes.machine = { pkgs, ... }: {
+    services.lldap = {
+      enable = true;
+      settings = {
+        verbose = true;
+        ldap_base_dn = "dc=example,dc=com";
+      };
+    };
+    environment.systemPackages = [ pkgs.openldap ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("lldap.service")
+    machine.wait_for_open_port(3890)
+    machine.wait_for_open_port(17170)
+
+    machine.succeed("curl --location --fail http://localhost:17170/")
+
+    print(
+      machine.succeed('ldapsearch -H ldap://localhost:3890 -D uid=admin,ou=people,dc=example,dc=com -b "ou=people,dc=example,dc=com" -w password')
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/locate.nix b/nixpkgs/nixos/tests/locate.nix
new file mode 100644
index 000000000000..e8ba41812a8f
--- /dev/null
+++ b/nixpkgs/nixos/tests/locate.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+  let inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+  in {
+    name = "locate";
+    meta.maintainers = with pkgs.lib.maintainers; [ chkno ];
+
+    nodes = rec {
+      a = {
+        environment.systemPackages = with pkgs; [ sshfs ];
+        virtualisation.fileSystems = {
+          "/ssh" = {
+            device = "alice@b:/";
+            fsType = "fuse.sshfs";
+            options = [
+              "allow_other"
+              "IdentityFile=/privkey"
+              "noauto"
+              "StrictHostKeyChecking=no"
+              "UserKnownHostsFile=/dev/null"
+            ];
+          };
+        };
+        services.locate = {
+          enable = true;
+          interval = "*:*:0/5";
+        };
+      };
+      b = {
+        services.openssh.enable = true;
+        users.users.alice = {
+          isNormalUser = true;
+          openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      # Set up sshfs mount
+      a.succeed(
+          "(umask 077; cat ${snakeOilPrivateKey} > /privkey)"
+      )
+      b.succeed("touch /file-on-b-machine")
+      b.wait_for_open_port(22)
+      a.succeed("mkdir /ssh")
+      a.succeed("mount /ssh")
+
+      # Core locatedb functionality
+      a.succeed("touch /file-on-a-machine-1")
+      a.wait_for_file("/var/cache/locatedb")
+      a.wait_until_succeeds("locate file-on-a-machine-1")
+
+      # Wait for a second update to make sure we're using a locatedb from a run
+      # that began after the sshfs mount
+      a.succeed("touch /file-on-a-machine-2")
+      a.wait_until_succeeds("locate file-on-a-machine-2")
+
+      # We shouldn't be able to see files on the other machine
+      a.fail("locate file-on-b-machine")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/login.nix b/nixpkgs/nixos/tests/login.nix
new file mode 100644
index 000000000000..67f5764a0a16
--- /dev/null
+++ b/nixpkgs/nixos/tests/login.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+
+{
+  name = "login";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes.machine =
+    { pkgs, lib, ... }:
+    { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
+      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    };
+
+  testScript = ''
+      machine.start(allow_reboot = True)
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+      machine.screenshot("postboot")
+
+      with subtest("create user"):
+          machine.succeed("useradd -m alice")
+          machine.succeed("(echo foobar; echo foobar) | passwd alice")
+
+      with subtest("Check whether switching VTs works"):
+          machine.fail("pgrep -f 'agetty.*tty2'")
+          machine.send_key("alt-f2")
+          machine.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+          machine.wait_for_unit("getty@tty2.service")
+          machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+
+      with subtest("Log in as alice on a virtual console"):
+          machine.wait_until_tty_matches("2", "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches("2", "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("2", "Password: ")
+          machine.send_chars("foobar\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+          machine.send_chars("touch done\n")
+          machine.wait_for_file("/home/alice/done")
+
+      with subtest("Systemd gives and removes device ownership as needed"):
+          machine.succeed("getfacl /dev/snd/timer | grep -q alice")
+          machine.send_key("alt-f1")
+          machine.wait_until_succeeds("[ $(fgconsole) = 1 ]")
+          machine.fail("getfacl /dev/snd/timer | grep -q alice")
+          machine.succeed("chvt 2")
+          machine.wait_until_succeeds("getfacl /dev/snd/timer | grep -q alice")
+
+      with subtest("Virtual console logout"):
+          machine.send_chars("exit\n")
+          machine.wait_until_fails("pgrep -u alice bash")
+          machine.screenshot("getty")
+
+      with subtest("Check whether ctrl-alt-delete works"):
+          boot_id1 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip()
+          assert boot_id1 != ""
+
+          machine.reboot()
+
+          boot_id2 = machine.succeed("cat /proc/sys/kernel/random/boot_id").strip()
+          assert boot_id2 != ""
+
+          assert boot_id1 != boot_id2
+  '';
+})
diff --git a/nixpkgs/nixos/tests/logrotate.nix b/nixpkgs/nixos/tests/logrotate.nix
new file mode 100644
index 000000000000..bcbe89c259ae
--- /dev/null
+++ b/nixpkgs/nixos/tests/logrotate.nix
@@ -0,0 +1,123 @@
+# Test logrotate service works and is enabled by default
+
+let
+  importTest = { ... }: {
+    services.logrotate.settings.import = {
+      olddir = false;
+    };
+  };
+
+in
+
+import ./make-test-python.nix ({ pkgs, ... }: rec {
+  name = "logrotate";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ martinetd ];
+  };
+
+  nodes = {
+    defaultMachine = { ... }: { };
+    failingMachine = { ... }: {
+      services.logrotate.configFile = pkgs.writeText "logrotate.conf" ''
+        # self-written config file
+        su notarealuser notagroupeither
+      '';
+    };
+    machine = { config, ... }: {
+      imports = [ importTest ];
+
+      services.logrotate.settings = {
+        # remove default frequency header and add another
+        header = {
+          frequency = null;
+          delaycompress = true;
+        };
+        # extra global setting... affecting nothing
+        last_line = {
+          global = true;
+          priority = 2000;
+          shred = true;
+        };
+        # using mail somewhere should add --mail to logrotate invocation
+        sendmail = {
+          mail = "user@domain.tld";
+        };
+        # postrotate should be suffixed by 'endscript'
+        postrotate = {
+          postrotate = "touch /dev/null";
+        };
+        # check checkConfig works as expected: there is nothing to check here
+        # except that the file build passes
+        checkConf = {
+          su = "root utmp";
+          createolddir = "0750 root utmp";
+          create = "root utmp";
+          "create " = "0750 root utmp";
+        };
+        # multiple paths should be aggregated
+        multipath = {
+          files = [ "file1" "file2" ];
+        };
+        # overriding imported path should keep existing attributes
+        # (e.g. olddir is still set)
+        import = {
+          notifempty = true;
+        };
+      };
+    };
+  };
+
+  testScript =
+    ''
+      with subtest("whether logrotate works"):
+          # we must rotate once first to create logrotate stamp
+          defaultMachine.succeed("systemctl start logrotate.service")
+          # we need to wait for console text once here to
+          # clear console buffer up to this point for next wait
+          defaultMachine.wait_for_console_text('logrotate.service: Deactivated successfully')
+
+          defaultMachine.succeed(
+              # wtmp is present in default config.
+              "rm -f /var/log/wtmp*",
+              # we need to give it at least 1MB
+              "dd if=/dev/zero of=/var/log/wtmp bs=2M count=1",
+
+              # move into the future and check rotation.
+              "date -s 'now + 1 month + 1 day'")
+          defaultMachine.wait_for_console_text('logrotate.service: Deactivated successfully')
+          defaultMachine.succeed(
+              # check rotate worked
+              "[ -e /var/log/wtmp.1 ]",
+          )
+      with subtest("default config does not have mail"):
+          defaultMachine.fail("systemctl cat logrotate.service | grep -- --mail")
+      with subtest("using mails adds mail option"):
+          machine.succeed("systemctl cat logrotate.service | grep -- --mail")
+      with subtest("check generated config matches expectation"):
+          machine.succeed(
+              # copy conf to /tmp/logrotate.conf for easy grep
+              "conf=$(systemctl cat logrotate | grep -oE '/nix/store[^ ]*logrotate.conf'); cp $conf /tmp/logrotate.conf",
+              "! grep weekly /tmp/logrotate.conf",
+              "grep -E '^delaycompress' /tmp/logrotate.conf",
+              "tail -n 1 /tmp/logrotate.conf | grep shred",
+              "sed -ne '/\"sendmail\" {/,/}/p' /tmp/logrotate.conf | grep 'mail user@domain.tld'",
+              "sed -ne '/\"postrotate\" {/,/}/p' /tmp/logrotate.conf | grep endscript",
+              "grep '\"file1\"\n\"file2\" {' /tmp/logrotate.conf",
+              "sed -ne '/\"import\" {/,/}/p' /tmp/logrotate.conf | grep noolddir",
+          )
+          # also check configFile option
+          failingMachine.succeed(
+              "conf=$(systemctl cat logrotate | grep -oE '/nix/store[^ ]*logrotate.conf'); cp $conf /tmp/logrotate.conf",
+              "grep 'self-written config' /tmp/logrotate.conf",
+          )
+      with subtest("Check logrotate-checkconf service"):
+          machine.wait_for_unit("logrotate-checkconf.service")
+          # wait_for_unit also asserts for success, so wait for
+          # parent target instead and check manually.
+          failingMachine.wait_for_unit("multi-user.target")
+          info = failingMachine.get_unit_info("logrotate-checkconf.service")
+          if info["ActiveState"] != "failed":
+              raise Exception('logrotate-checkconf.service was not failed')
+
+    '';
+})
diff --git a/nixpkgs/nixos/tests/loki.nix b/nixpkgs/nixos/tests/loki.nix
new file mode 100644
index 000000000000..470f80e9db63
--- /dev/null
+++ b/nixpkgs/nixos/tests/loki.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+{
+  name = "loki";
+
+  meta = with lib.maintainers; {
+    maintainers = [ willibutz ];
+  };
+
+  nodes.machine = { ... }: {
+    services.loki = {
+      enable = true;
+      configFile = "${pkgs.grafana-loki.src}/cmd/loki/loki-local-config.yaml";
+    };
+    services.promtail = {
+      enable = true;
+      configuration = {
+        server = {
+          http_listen_port = 9080;
+          grpc_listen_port = 0;
+        };
+        clients = [ { url = "http://localhost:3100/loki/api/v1/push"; } ];
+        scrape_configs = [
+          {
+            job_name = "system";
+            static_configs = [
+              {
+                targets = [ "localhost" ];
+                labels = {
+                  job = "varlogs";
+                  __path__ = "/var/log/*log";
+                };
+              }
+            ];
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start
+    machine.wait_for_unit("loki.service")
+    machine.wait_for_unit("promtail.service")
+    machine.wait_for_open_port(3100)
+    machine.wait_for_open_port(9080)
+    machine.succeed("echo 'Loki Ingestion Test' > /var/log/testlog")
+    # should not have access to journal unless specified
+    machine.fail(
+        "systemctl show --property=SupplementaryGroups promtail | grep -q systemd-journal"
+    )
+    machine.wait_until_succeeds(
+        "${pkgs.grafana-loki}/bin/logcli --addr='http://localhost:3100' query --no-labels '{job=\"varlogs\",filename=\"/var/log/testlog\"}' | grep -q 'Loki Ingestion Test'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lorri/builder.sh b/nixpkgs/nixos/tests/lorri/builder.sh
new file mode 100644
index 000000000000..b586b2bf7985
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/builder.sh
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+printf "%s" "${name:?}" > "${out:?}"
diff --git a/nixpkgs/nixos/tests/lorri/default.nix b/nixpkgs/nixos/tests/lorri/default.nix
new file mode 100644
index 000000000000..a4bdc92490ce
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/default.nix
@@ -0,0 +1,28 @@
+import ../make-test-python.nix {
+  name = "lorri";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ../../modules/profiles/minimal.nix ];
+    environment.systemPackages = [ pkgs.lorri ];
+  };
+
+  testScript = ''
+    # Copy files over
+    machine.succeed(
+        "cp '${./fake-shell.nix}' shell.nix"
+    )
+    machine.succeed(
+        "cp '${./builder.sh}' builder.sh"
+    )
+
+    # Start the daemon and wait until it is ready
+    machine.execute("lorri daemon > lorri.stdout 2> lorri.stderr &")
+    machine.wait_until_succeeds("grep --fixed-strings 'ready' lorri.stdout")
+
+    # Ping the daemon
+    machine.succeed("lorri internal ping shell.nix")
+
+    # Wait for the daemon to finish the build
+    machine.wait_until_succeeds("grep --fixed-strings 'Completed' lorri.stdout")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/lorri/fake-shell.nix b/nixpkgs/nixos/tests/lorri/fake-shell.nix
new file mode 100644
index 000000000000..9de9d247e542
--- /dev/null
+++ b/nixpkgs/nixos/tests/lorri/fake-shell.nix
@@ -0,0 +1,5 @@
+derivation {
+  system = builtins.currentSystem;
+  name = "fake-shell";
+  builder = ./builder.sh;
+}
diff --git a/nixpkgs/nixos/tests/luks.nix b/nixpkgs/nixos/tests/luks.nix
new file mode 100644
index 000000000000..da1d0c63b95d
--- /dev/null
+++ b/nixpkgs/nixos/tests/luks.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "luks";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/auto-format-root-device.nix ];
+
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 512 ];
+      useBootLoader = true;
+      useEFIBoot = true;
+      # To boot off the encrypted disk, we need to have a init script which comes from the Nix store
+      mountHostNixStore = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    boot.kernelParams = lib.mkOverride 5 [ "console=tty1" ];
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+
+    specialisation = rec {
+      boot-luks.configuration = {
+        boot.initrd.luks.devices = lib.mkVMOverride {
+          # We have two disks and only type one password - key reuse is in place
+          cryptroot.device = "/dev/vdb";
+          cryptroot2.device = "/dev/vdc";
+        };
+        virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      };
+      boot-luks-custom-keymap.configuration = lib.mkMerge [
+        boot-luks.configuration
+        {
+          console.keyMap = "neo";
+        }
+      ];
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.start()
+    machine.wait_for_text("Passphrase for")
+    machine.send_chars("supersecret\n")
+    machine.wait_for_unit("multi-user.target")
+
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+
+    # Boot from the encrypted disk with custom keymap
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks-custom-keymap.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.start()
+    machine.wait_for_text("Passphrase for")
+    machine.send_chars("havfkhfrkfl\n")
+    machine.wait_for_unit("multi-user.target")
+
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lvm2/default.nix b/nixpkgs/nixos/tests/lvm2/default.nix
new file mode 100644
index 000000000000..e0358ec2806f
--- /dev/null
+++ b/nixpkgs/nixos/tests/lvm2/default.nix
@@ -0,0 +1,45 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+, lib ? pkgs.lib
+, kernelVersionsToTest ? [ "4.19" "5.4" "5.10" "5.15" "6.1" "latest" ]
+}:
+
+# For quickly running a test, the nixosTests.lvm2.lvm-thinpool-linux-latest attribute is recommended
+let
+  tests = let callTest = p: lib.flip (import p) { inherit system pkgs; }; in {
+    thinpool = { test = callTest ./thinpool.nix; kernelFilter = lib.id; };
+    # we would like to test all versions, but the kernel module currently does not compile against the other versions
+    vdo = { test = callTest ./vdo.nix; kernelFilter = lib.filter (v: v == "5.15"); };
+
+
+    # systemd in stage 1
+    raid-sd-stage-1 = {
+      test = callTest ./systemd-stage-1.nix;
+      kernelFilter = lib.id;
+      flavour = "raid";
+    };
+    thinpool-sd-stage-1 = {
+      test = callTest ./systemd-stage-1.nix;
+      kernelFilter = lib.id;
+      flavour = "thinpool";
+    };
+    vdo-sd-stage-1 = {
+      test = callTest ./systemd-stage-1.nix;
+      kernelFilter = lib.filter (v: v == "5.15");
+      flavour = "vdo";
+    };
+  };
+in
+lib.listToAttrs (
+  lib.filter (x: x.value != {}) (
+    lib.flip lib.concatMap kernelVersionsToTest (version:
+      let
+        v' = lib.replaceStrings [ "." ] [ "_" ] version;
+      in
+      lib.flip lib.mapAttrsToList tests (name: t:
+        lib.nameValuePair "lvm-${name}-linux-${v'}" (lib.optionalAttrs (builtins.elem version (t.kernelFilter kernelVersionsToTest)) (t.test ({ kernelPackages = pkgs."linuxPackages_${v'}"; } // builtins.removeAttrs t [ "test" "kernelFilter" ])))
+      )
+    )
+  )
+)
diff --git a/nixpkgs/nixos/tests/lvm2/systemd-stage-1.nix b/nixpkgs/nixos/tests/lvm2/systemd-stage-1.nix
new file mode 100644
index 000000000000..b581f2b23507
--- /dev/null
+++ b/nixpkgs/nixos/tests/lvm2/systemd-stage-1.nix
@@ -0,0 +1,106 @@
+{ kernelPackages ? null, flavour }: let
+  preparationCode = {
+    raid = ''
+      machine.succeed("vgcreate test_vg /dev/vdb /dev/vdc")
+      machine.succeed("lvcreate -L 512M --type raid0 test_vg -n test_lv")
+    '';
+
+    thinpool = ''
+      machine.succeed("vgcreate test_vg /dev/vdb")
+      machine.succeed("lvcreate -L 512M -T test_vg/test_thin_pool")
+      machine.succeed("lvcreate -n test_lv -V 16G --thinpool test_thin_pool test_vg")
+    '';
+
+    vdo = ''
+      machine.succeed("vgcreate test_vg /dev/vdb")
+      machine.succeed("lvcreate --type vdo -n test_lv -L 6G -V 12G test_vg/vdo_pool_lv")
+    '';
+  }.${flavour};
+
+  extraConfig = {
+    raid = {
+      boot.initrd.kernelModules = [
+        "dm-raid"
+        "raid0"
+      ];
+    };
+
+    thinpool = {
+      services.lvm = {
+        boot.thin.enable = true;
+        dmeventd.enable = true;
+      };
+    };
+
+    vdo = {
+      services.lvm = {
+        boot.vdo.enable = true;
+        dmeventd.enable = true;
+      };
+    };
+  }.${flavour};
+
+  extraCheck = {
+    raid = ''
+      "test_lv" in machine.succeed("lvs --select segtype=raid0")
+    '';
+
+    thinpool = ''
+      "test_lv" in machine.succeed("lvs --select segtype=thin-pool")
+    '';
+
+    vdo = ''
+      "test_lv" in machine.succeed("lvs --select segtype=vdo")
+    '';
+  }.${flavour};
+
+in import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "lvm2-${flavour}-systemd-stage-1";
+  meta.maintainers = with pkgs.lib.maintainers; [ das_j ];
+
+  nodes.machine = { pkgs, lib, ... }: {
+    imports = [ extraConfig ];
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 8192 8192 ];
+      useBootLoader = true;
+      useEFIBoot = true;
+      # To boot off the LVM disk, we need to have a init script which comes from the Nix store.
+      mountHostNixStore = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+
+    environment.systemPackages = with pkgs; [ e2fsprogs ]; # for mkfs.ext4
+    boot = {
+      initrd.systemd = {
+        enable = true;
+        emergencyAccess = true;
+      };
+      initrd.services.lvm.enable = true;
+      kernelPackages = lib.mkIf (kernelPackages != null) kernelPackages;
+    };
+
+    specialisation.boot-lvm.configuration.virtualisation.rootDevice = "/dev/test_vg/test_lv";
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    # Create a VG for the root
+    ${preparationCode}
+    machine.succeed("mkfs.ext4 /dev/test_vg/test_lv")
+    machine.succeed("mkdir -p /mnt && mount /dev/test_vg/test_lv /mnt && echo hello > /mnt/test && umount /mnt")
+
+    # Boot from LVM
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-lvm.conf")
+    machine.succeed("sync")
+    machine.crash()
+    machine.wait_for_unit("multi-user.target")
+
+    # Ensure we have successfully booted from LVM
+    assert "(initrd)" in machine.succeed("systemd-analyze")  # booted with systemd in stage 1
+    assert "/dev/mapper/test_vg-test_lv on / type ext4" in machine.succeed("mount")
+    assert "hello" in machine.succeed("cat /test")
+    ${extraCheck}
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lvm2/thinpool.nix b/nixpkgs/nixos/tests/lvm2/thinpool.nix
new file mode 100644
index 000000000000..14781a8a6045
--- /dev/null
+++ b/nixpkgs/nixos/tests/lvm2/thinpool.nix
@@ -0,0 +1,34 @@
+{ kernelPackages ? null }:
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "lvm2-thinpool";
+  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+
+  nodes.machine = { pkgs, lib, ... }: {
+    virtualisation.emptyDiskImages = [ 4096 ];
+    services.lvm = {
+      boot.thin.enable = true;
+      dmeventd.enable = true;
+    };
+    environment.systemPackages = with pkgs; [ xfsprogs ];
+    environment.etc."lvm/lvm.conf".text = ''
+      activation/thin_pool_autoextend_percent = 10
+      activation/thin_pool_autoextend_threshold = 80
+    '';
+    boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+  };
+
+  testScript = let
+    mkXfsFlags = lib.optionalString (lib.versionOlder kernelPackages.kernel.version "5.10") "-m bigtime=0 -m inobtcount=0";
+  in ''
+    machine.succeed("vgcreate test_vg /dev/vdb")
+    machine.succeed("lvcreate -L 512M -T test_vg/test_thin_pool")
+    machine.succeed("lvcreate -n test_lv -V 16G --thinpool test_thin_pool test_vg")
+    machine.succeed("mkfs.xfs ${mkXfsFlags} /dev/test_vg/test_lv")
+    machine.succeed("mkdir /mnt; mount /dev/test_vg/test_lv /mnt")
+    assert "/dev/mapper/test_vg-test_lv" == machine.succeed("findmnt -no SOURCE /mnt").strip()
+    machine.succeed("dd if=/dev/zero of=/mnt/empty.file bs=1M count=1024")
+    machine.succeed("journalctl -u dm-event.service | grep \"successfully resized\"")
+    machine.succeed("umount /mnt")
+    machine.succeed("vgchange -a n")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lvm2/vdo.nix b/nixpkgs/nixos/tests/lvm2/vdo.nix
new file mode 100644
index 000000000000..5b014c2f7222
--- /dev/null
+++ b/nixpkgs/nixos/tests/lvm2/vdo.nix
@@ -0,0 +1,27 @@
+{ kernelPackages ? null }:
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "lvm2-vdo";
+  meta.maintainers = with pkgs.lib.maintainers; [ ajs124 ];
+
+  nodes.machine = { pkgs, lib, ... }: {
+    # Minimum required size for VDO volume: 5063921664 bytes
+    virtualisation.emptyDiskImages = [ 8192 ];
+    services.lvm = {
+      boot.vdo.enable = true;
+      dmeventd.enable = true;
+    };
+    environment.systemPackages = with pkgs; [ xfsprogs ];
+    boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+  };
+
+  testScript = ''
+    machine.succeed("vgcreate test_vg /dev/vdb")
+    machine.succeed("lvcreate --type vdo -n vdo_lv -L 6G -V 12G test_vg/vdo_pool_lv")
+    machine.succeed("mkfs.xfs -K /dev/test_vg/vdo_lv")
+    machine.succeed("mkdir /mnt; mount /dev/test_vg/vdo_lv /mnt")
+    assert "/dev/mapper/test_vg-vdo_lv" == machine.succeed("findmnt -no SOURCE /mnt").strip()
+    machine.succeed("umount /mnt")
+    machine.succeed("vdostats")
+    machine.succeed("vgchange -a n")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd-image-server.nix b/nixpkgs/nixos/tests/lxd-image-server.nix
new file mode 100644
index 000000000000..619542bdd945
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd-image-server.nix
@@ -0,0 +1,94 @@
+import ./make-test-python.nix ({ pkgs, lib, ... } :
+
+let
+  lxd-image = import ../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+    };
+  };
+
+  lxd-image-metadata = lxd-image.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = lxd-image.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+
+in {
+  name = "lxd-image-server";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mkg20001 patryk27 ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      cores = 2;
+
+      memorySize = 2048;
+      diskSize = 4096;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+    };
+
+    security.pki.certificates = [
+      (builtins.readFile ./common/acme/server/ca.cert.pem)
+    ];
+
+    services.nginx = {
+      enable = true;
+    };
+
+    services.lxd-image-server = {
+      enable = true;
+      nginx = {
+        enable = true;
+        domain = "acme.test";
+      };
+    };
+
+    services.nginx.virtualHosts."acme.test" = {
+      enableACME = false;
+      sslCertificate = ./common/acme/server/acme.test.cert.pem;
+      sslCertificateKey = ./common/acme/server/acme.test.key.pem;
+    };
+
+    networking.hosts = {
+      "::1" = [ "acme.test" ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    # lxd expects the pool's directory to already exist
+    machine.succeed("mkdir /var/lxd-pool")
+
+    machine.succeed(
+        "lxd init --minimal"
+    )
+
+    machine.succeed(
+        "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
+    )
+
+    loc = "/var/www/simplestreams/images/iats/nixos/amd64/default/v1"
+
+    with subtest("push image to server"):
+        machine.succeed("lxc launch nixos test")
+        machine.sleep(5)
+        machine.succeed("lxc stop -f test")
+        machine.succeed("lxc publish --public test --alias=testimg")
+        machine.succeed("lxc image export testimg")
+        machine.succeed("ls >&2")
+        machine.succeed("mkdir -p " + loc)
+        machine.succeed("mv *.tar.gz " + loc)
+
+    with subtest("pull image from server"):
+        machine.succeed("lxc remote add img https://acme.test --protocol=simplestreams")
+        machine.succeed("lxc image list img: >&2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd/container.nix b/nixpkgs/nixos/tests/lxd/container.nix
new file mode 100644
index 000000000000..0ebe73d872f2
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/container.nix
@@ -0,0 +1,132 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+
+      # Our tests require `grep` & friends:
+      environment.systemPackages = with pkgs; [ busybox ];
+    };
+  };
+
+  lxd-image-metadata = releases.lxdContainerMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs = releases.lxdContainerImage.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-rootfs-squashfs = releases.lxdContainerImageSquashfs.${pkgs.stdenv.hostPlatform.system};
+
+in {
+  name = "lxd-container";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ patryk27 adamcstephens ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      diskSize = 6144;
+
+      # Since we're testing `limits.cpu`, we've gotta have a known number of
+      # cores to lean on
+      cores = 2;
+
+      # Ditto, for `limits.memory`
+      memorySize = 512;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec container --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    # no preseed should mean no service
+    machine.fail("systemctl status lxd-preseed.service")
+
+    machine.succeed("lxd init --minimal")
+
+    machine.succeed(
+        "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs}/*/*.tar.xz --alias nixos"
+    )
+
+    with subtest("Container can be managed"):
+        machine.succeed("lxc launch nixos container")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+        machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
+        machine.succeed("lxc delete -f container")
+
+    with subtest("Squashfs image is functional"):
+        machine.succeed(
+            "lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-rootfs-squashfs} --alias nixos-squashfs"
+        )
+        machine.succeed("lxc launch nixos-squashfs container")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+        machine.succeed("echo true | lxc exec container /run/current-system/sw/bin/bash -")
+        machine.succeed("lxc delete -f container")
+
+    with subtest("Container is mounted with lxcfs inside"):
+        machine.succeed("lxc launch nixos container")
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+
+        ## ---------- ##
+        ## limits.cpu ##
+
+        machine.succeed("lxc config set container limits.cpu 1")
+        machine.succeed("lxc restart container")
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+
+        assert (
+            "1"
+            == machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
+        )
+
+        machine.succeed("lxc config set container limits.cpu 2")
+        machine.succeed("lxc restart container")
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+
+        assert (
+            "2"
+            == machine.succeed("lxc exec container grep -- -c ^processor /proc/cpuinfo").strip()
+        )
+
+        ## ------------- ##
+        ## limits.memory ##
+
+        machine.succeed("lxc config set container limits.memory 64MB")
+        machine.succeed("lxc restart container")
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+
+        assert (
+            "MemTotal:          62500 kB"
+            == machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
+        )
+
+        machine.succeed("lxc config set container limits.memory 128MB")
+        machine.succeed("lxc restart container")
+        with machine.nested("Waiting for instance to start and be usable"):
+            retry(instance_is_up)
+
+        assert (
+            "MemTotal:         125000 kB"
+            == machine.succeed("lxc exec container grep -- MemTotal /proc/meminfo").strip()
+        )
+
+        machine.succeed("lxc delete -f container")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd/default.nix b/nixpkgs/nixos/tests/lxd/default.nix
new file mode 100644
index 000000000000..20afdd5e48bb
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/default.nix
@@ -0,0 +1,12 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. {inherit system config;},
+  handleTestOn,
+}: {
+  container = import ./container.nix {inherit system pkgs;};
+  nftables = import ./nftables.nix {inherit system pkgs;};
+  preseed = import ./preseed.nix {inherit system pkgs;};
+  ui = import ./ui.nix {inherit system pkgs;};
+  virtual-machine = handleTestOn ["x86_64-linux"] ./virtual-machine.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/lxd/nftables.nix b/nixpkgs/nixos/tests/lxd/nftables.nix
new file mode 100644
index 000000000000..d98bd4952906
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/nftables.nix
@@ -0,0 +1,50 @@
+# This test makes sure that lxd stops implicitly depending on iptables when
+# user enabled nftables.
+#
+# It has been extracted from `lxd.nix` for clarity, and because switching from
+# iptables to nftables requires a full reboot, which is a bit hard inside NixOS
+# tests.
+
+import ../make-test-python.nix ({ pkgs, ...} : {
+  name = "lxd-nftables";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ patryk27 ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      lxd.enable = true;
+    };
+
+    networking = {
+      firewall.enable = false;
+      nftables.enable = true;
+      nftables.tables."filter".family = "inet";
+      nftables.tables."filter".content = ''
+          chain incoming {
+            type filter hook input priority 0;
+            policy accept;
+          }
+
+          chain forward {
+            type filter hook forward priority 0;
+            policy accept;
+          }
+
+          chain output {
+            type filter hook output priority 0;
+            policy accept;
+          }
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("network.target")
+
+    with subtest("When nftables are enabled, lxd doesn't depend on iptables anymore"):
+        machine.succeed("lsmod | grep nf_tables")
+        machine.fail("lsmod | grep ip_tables")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd/preseed.nix b/nixpkgs/nixos/tests/lxd/preseed.nix
new file mode 100644
index 000000000000..7d89b9f56daa
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/preseed.nix
@@ -0,0 +1,71 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } :
+
+{
+  name = "lxd-preseed";
+
+  meta = {
+    maintainers = with lib.maintainers; [ adamcstephens ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      diskSize = 4096;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+
+      lxd.preseed = {
+        networks = [
+          {
+            name = "nixostestbr0";
+            type = "bridge";
+            config = {
+              "ipv4.address" = "10.0.100.1/24";
+              "ipv4.nat" = "true";
+            };
+          }
+        ];
+        profiles = [
+          {
+            name = "nixostest_default";
+            devices = {
+              eth0 = {
+                name = "eth0";
+                network = "nixostestbr0";
+                type = "nic";
+              };
+              root = {
+                path = "/";
+                pool = "default";
+                size = "35GiB";
+                type = "disk";
+              };
+            };
+          }
+        ];
+        storage_pools = [
+          {
+            name = "nixostest_pool";
+            driver = "dir";
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    def wait_for_preseed(_) -> bool:
+      _, output = machine.systemctl("is-active lxd-preseed.service")
+      return ("inactive" in output)
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    with machine.nested("Waiting for preseed to complete"):
+      retry(wait_for_preseed)
+
+    with subtest("Verify preseed resources created"):
+      machine.succeed("lxc profile show nixostest_default")
+      machine.succeed("lxc network info nixostestbr0")
+      machine.succeed("lxc storage show nixostest_pool")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd/ui.nix b/nixpkgs/nixos/tests/lxd/ui.nix
new file mode 100644
index 000000000000..86cb30d8c2b6
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/ui.nix
@@ -0,0 +1,35 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "lxd-ui";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jnsgruk ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      lxd.enable = true;
+      lxd.ui.enable = true;
+    };
+
+    environment.systemPackages = [ pkgs.curl ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    # Configure LXC listen address
+    machine.succeed("lxc config set core.https_address :8443")
+    machine.succeed("systemctl restart lxd")
+
+    # Check that the LXD_UI environment variable is populated in the systemd unit
+    machine.succeed("cat /etc/systemd/system/lxd.service | grep 'LXD_UI'")
+
+    # Ensure the endpoint returns an HTML page with 'LXD UI' in the title
+    machine.succeed("curl -kLs https://localhost:8443/ui | grep '<title>LXD UI</title>'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/lxd/virtual-machine.nix b/nixpkgs/nixos/tests/lxd/virtual-machine.nix
new file mode 100644
index 000000000000..93705e9350c5
--- /dev/null
+++ b/nixpkgs/nixos/tests/lxd/virtual-machine.nix
@@ -0,0 +1,64 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  releases = import ../../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+
+      # Our tests require `grep` & friends:
+      environment.systemPackages = with pkgs; [busybox];
+    };
+  };
+
+  lxd-image-metadata = releases.lxdVirtualMachineImageMeta.${pkgs.stdenv.hostPlatform.system};
+  lxd-image-disk = releases.lxdVirtualMachineImage.${pkgs.stdenv.hostPlatform.system};
+
+  instance-name = "instance1";
+in {
+  name = "lxd-virtual-machine";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [adamcstephens];
+  };
+
+  nodes.machine = {lib, ...}: {
+    virtualisation = {
+      diskSize = 4096;
+
+      cores = 2;
+
+      # Ensure we have enough memory for the nested virtual machine
+      memorySize = 1024;
+
+      lxc.lxcfs.enable = true;
+      lxd.enable = true;
+    };
+  };
+
+  testScript = ''
+    def instance_is_up(_) -> bool:
+      status, _ = machine.execute("lxc exec ${instance-name} --disable-stdin --force-interactive /run/current-system/sw/bin/true")
+      return status == 0
+
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("lxd.service")
+    machine.wait_for_file("/var/lib/lxd/unix.socket")
+
+    # Wait for lxd to settle
+    machine.succeed("lxd waitready")
+
+    machine.succeed("lxd init --minimal")
+
+    with subtest("virtual-machine image can be imported"):
+        machine.succeed("lxc image import ${lxd-image-metadata}/*/*.tar.xz ${lxd-image-disk}/nixos.qcow2 --alias nixos")
+
+    with subtest("virtual-machine can be launched and become available"):
+        machine.succeed("lxc launch nixos ${instance-name} --vm --config limits.memory=512MB --config security.secureboot=false")
+        with machine.nested("Waiting for instance to start and be usable"):
+          retry(instance_is_up)
+
+    with subtest("lxd-agent is started"):
+        machine.succeed("lxc exec ${instance-name} systemctl is-active lxd-agent")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/maddy/default.nix b/nixpkgs/nixos/tests/maddy/default.nix
new file mode 100644
index 000000000000..043906863e64
--- /dev/null
+++ b/nixpkgs/nixos/tests/maddy/default.nix
@@ -0,0 +1,6 @@
+{ handleTest }:
+
+{
+  unencrypted = handleTest ./unencrypted.nix { };
+  tls = handleTest ./tls.nix { };
+}
diff --git a/nixpkgs/nixos/tests/maddy/tls.nix b/nixpkgs/nixos/tests/maddy/tls.nix
new file mode 100644
index 000000000000..44da4cf2a3cf
--- /dev/null
+++ b/nixpkgs/nixos/tests/maddy/tls.nix
@@ -0,0 +1,94 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+let
+  certs = import ../common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in {
+  name = "maddy-tls";
+  meta = with pkgs.lib.maintainers; { maintainers = [ onny ]; };
+
+  nodes = {
+    server = { options, ... }: {
+      services.maddy = {
+        enable = true;
+        hostname = domain;
+        primaryDomain = domain;
+        openFirewall = true;
+        ensureAccounts = [ "postmaster@${domain}" ];
+        ensureCredentials = {
+          # Do not use this in production. This will make passwords world-readable
+          # in the Nix store
+          "postmaster@${domain}".passwordFile = "${pkgs.writeText "postmaster" "test"}";
+        };
+        tls = {
+          loader = "file";
+          certificates = [{
+            certPath = "${certs.${domain}.cert}";
+            keyPath = "${certs.${domain}.key}";
+          }];
+        };
+        # Enable TLS listeners. Configuring this via the module is not yet
+        # implemented.
+        config = builtins.replaceStrings [
+          "imap tcp://0.0.0.0:143"
+          "submission tcp://0.0.0.0:587"
+        ] [
+          "imap tls://0.0.0.0:993 tcp://0.0.0.0:143"
+          "submission tls://0.0.0.0:465 tcp://0.0.0.0:587"
+        ] options.services.maddy.config.default;
+      };
+      # Not covered by openFirewall yet
+      networking.firewall.allowedTCPPorts = [ 993 465 ];
+    };
+
+    client = { nodes, ... }: {
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+      networking.extraHosts = ''
+        ${nodes.server.networking.primaryIPAddress} ${domain}
+     '';
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "send-testmail" { } ''
+          import smtplib
+          import ssl
+          from email.mime.text import MIMEText
+
+          context = ssl.create_default_context()
+          msg = MIMEText("Hello World")
+          msg['Subject'] = 'Test'
+          msg['From'] = "postmaster@${domain}"
+          msg['To'] = "postmaster@${domain}"
+          with smtplib.SMTP_SSL(host='${domain}', port=465, context=context) as smtp:
+              smtp.login('postmaster@${domain}', 'test')
+              smtp.sendmail(
+                'postmaster@${domain}', 'postmaster@${domain}', msg.as_string()
+              )
+        '')
+        (pkgs.writers.writePython3Bin "test-imap" { } ''
+          import imaplib
+
+          with imaplib.IMAP4_SSL('${domain}') as imap:
+              imap.login('postmaster@${domain}', 'test')
+              imap.select()
+              status, refs = imap.search(None, 'ALL')
+              assert status == 'OK'
+              assert len(refs) == 1
+              status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+              assert status == 'OK'
+              assert msg[0][1].strip() == b"Hello World"
+        '')
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("maddy.service")
+    server.wait_for_open_port(143)
+    server.wait_for_open_port(993)
+    server.wait_for_open_port(587)
+    server.wait_for_open_port(465)
+    client.succeed("send-testmail")
+    client.succeed("test-imap")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/maddy/unencrypted.nix b/nixpkgs/nixos/tests/maddy/unencrypted.nix
new file mode 100644
index 000000000000..2420d461e4e7
--- /dev/null
+++ b/nixpkgs/nixos/tests/maddy/unencrypted.nix
@@ -0,0 +1,60 @@
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "maddy-unencrypted";
+  meta = with pkgs.lib.maintainers; { maintainers = [ onny ]; };
+
+  nodes = {
+    server = { ... }: {
+      services.maddy = {
+        enable = true;
+        hostname = "server";
+        primaryDomain = "server";
+        openFirewall = true;
+        ensureAccounts = [ "postmaster@server" ];
+        ensureCredentials = {
+          # Do not use this in production. This will make passwords world-readable
+          # in the Nix store
+          "postmaster@server".passwordFile = "${pkgs.writeText "postmaster" "test"}";
+        };
+      };
+    };
+
+    client = { ... }: {
+      environment.systemPackages = [
+        (pkgs.writers.writePython3Bin "send-testmail" { } ''
+          import smtplib
+          from email.mime.text import MIMEText
+
+          msg = MIMEText("Hello World")
+          msg['Subject'] = 'Test'
+          msg['From'] = "postmaster@server"
+          msg['To'] = "postmaster@server"
+          with smtplib.SMTP('server', 587) as smtp:
+              smtp.login('postmaster@server', 'test')
+              smtp.sendmail('postmaster@server', 'postmaster@server', msg.as_string())
+        '')
+        (pkgs.writers.writePython3Bin "test-imap" { } ''
+          import imaplib
+
+          with imaplib.IMAP4('server') as imap:
+              imap.login('postmaster@server', 'test')
+              imap.select()
+              status, refs = imap.search(None, 'ALL')
+              assert status == 'OK'
+              assert len(refs) == 1
+              status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+              assert status == 'OK'
+              assert msg[0][1].strip() == b"Hello World"
+        '')
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("maddy.service")
+    server.wait_for_open_port(143)
+    server.wait_for_open_port(587)
+    client.succeed("send-testmail")
+    client.succeed("test-imap")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/maestral.nix b/nixpkgs/nixos/tests/maestral.nix
new file mode 100644
index 000000000000..67a265926187
--- /dev/null
+++ b/nixpkgs/nixos/tests/maestral.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "maestral";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ peterhoeg ];
+  };
+
+  nodes =
+    let
+      common = attrs:
+        pkgs.lib.recursiveUpdate
+          {
+            imports = [ ./common/user-account.nix ];
+            systemd.user.services.maestral = {
+              description = "Maestral Dropbox Client";
+              serviceConfig.Type = "exec";
+            };
+          }
+          attrs;
+
+    in
+    {
+      cli = { ... }: common {
+        systemd.user.services.maestral = {
+          wantedBy = [ "default.target" ];
+          serviceConfig.ExecStart = "${pkgs.maestral}/bin/maestral start --foreground";
+        };
+      };
+
+      gui = { ... }: common {
+        services.xserver = {
+          enable = true;
+          displayManager.sddm.enable = true;
+          displayManager.defaultSession = "plasma";
+          desktopManager.plasma5.enable = true;
+          desktopManager.plasma5.runUsingSystemd = true;
+          displayManager.autoLogin = {
+            enable = true;
+            user = "alice";
+          };
+        };
+
+        systemd.user.services = {
+          maestral = {
+            wantedBy = [ "graphical-session.target" ];
+            serviceConfig.ExecStart = "${pkgs.maestral-gui}/bin/maestral_qt";
+          };
+          # PowerDevil doesn't like our VM
+          plasma-powerdevil.enable = false;
+        };
+      };
+    };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.cli.users.users.alice;
+    in
+    ''
+      start_all()
+
+      with subtest("CLI"):
+        # we need SOME way to give the user an active login session
+        cli.execute("loginctl enable-linger ${user.name}")
+        cli.systemctl("start user@${toString user.uid}")
+        cli.wait_for_unit("maestral.service", "${user.name}")
+
+      with subtest("GUI"):
+        gui.wait_for_x()
+        gui.wait_for_file("/tmp/xauth_*")
+        gui.succeed("xauth merge /tmp/xauth_*")
+        gui.wait_for_window("^Desktop ")
+        gui.wait_for_unit("maestral.service", "${user.name}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix b/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix
new file mode 100644
index 000000000000..54088ac60f28
--- /dev/null
+++ b/nixpkgs/nixos/tests/magic-wormhole-mailbox-server.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "magic-wormhole-mailbox-server";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      networking.firewall.allowedTCPPorts = [ 4000 ];
+      services.magic-wormhole-mailbox-server.enable = true;
+    };
+
+    client_alice = { ... }: {
+      networking.firewall.enable = false;
+      environment.systemPackages = [ pkgs.magic-wormhole ];
+    };
+
+    client_bob = { ... }: {
+      environment.systemPackages = [ pkgs.magic-wormhole ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # Start the wormhole relay server
+    server.wait_for_unit("magic-wormhole-mailbox-server.service")
+    server.wait_for_open_port(4000)
+
+    # Create a secret file and send it to Bob
+    client_alice.succeed("echo mysecret > secretfile")
+    client_alice.succeed("wormhole --relay-url=ws://server:4000/v1 send -0 secretfile >&2 &")
+
+    # Retrieve a secret file from Alice and check its content
+    client_bob.succeed("wormhole --relay-url=ws://server:4000/v1 receive -0 --accept-file")
+    client_bob.succeed("grep mysecret secretfile")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/magnetico.nix b/nixpkgs/nixos/tests/magnetico.nix
new file mode 100644
index 000000000000..ee84aacaf7a7
--- /dev/null
+++ b/nixpkgs/nixos/tests/magnetico.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  port = 8081;
+in
+{
+  name = "magnetico";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    networking.firewall.allowedTCPPorts = [ 9000 ];
+
+    services.magnetico = {
+      enable = true;
+      crawler.port = 9000;
+      web.port = port;
+      web.credentials.user = "$2y$12$P88ZF6soFthiiAeXnz64aOWDsY3Dw7Yw8fZ6GtiqFNjknD70zDmNe";
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("magneticod")
+      machine.wait_for_unit("magneticow")
+      machine.wait_for_open_port(${toString port})
+      machine.succeed(
+          "${pkgs.curl}/bin/curl --fail "
+          + "-u user:password http://localhost:${toString port}"
+      )
+      machine.fail(
+          "${pkgs.curl}/bin/curl --fail "
+          + "-u user:wrongpwd http://localhost:${toString port}"
+      )
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/mailcatcher.nix b/nixpkgs/nixos/tests/mailcatcher.nix
new file mode 100644
index 000000000000..627ef56617e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/mailcatcher.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "mailcatcher";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.mailcatcher.enable = true;
+
+      programs.msmtp = {
+        enable = true;
+        accounts.default = {
+          host = "localhost";
+          port = 1025;
+        };
+      };
+
+      environment.systemPackages = [ pkgs.mailutils ];
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("mailcatcher.service")
+    machine.wait_for_open_port(1025)
+    machine.succeed(
+        'echo "this is the body of the email" | mail -s "subject" root@example.org'
+    )
+    assert "this is the body of the email" in machine.succeed(
+        "curl -f http://localhost:1080/messages/1.source"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mailhog.nix b/nixpkgs/nixos/tests/mailhog.nix
new file mode 100644
index 000000000000..e3c2da37a3c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/mailhog.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "mailhog";
+  meta.maintainers = with lib.maintainers; [ jojosch ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.mailhog.enable = true;
+
+    environment.systemPackages = with pkgs; [ swaks ];
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("mailhog.service")
+    machine.wait_for_open_port(1025)
+    machine.wait_for_open_port(8025)
+    machine.succeed(
+        'echo "this is the body of the email" | swaks --to root@example.org --body - --server localhost:1025'
+    )
+    assert "this is the body of the email" in machine.succeed(
+        "curl --fail http://localhost:8025/api/v2/messages"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mailman.nix b/nixpkgs/nixos/tests/mailman.nix
new file mode 100644
index 000000000000..f9b43861a12f
--- /dev/null
+++ b/nixpkgs/nixos/tests/mailman.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix {
+  name = "mailman";
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [ mailutils ];
+
+    services.mailman.enable = true;
+    services.mailman.serve.enable = true;
+    services.mailman.siteOwner = "postmaster@example.com";
+    services.mailman.webHosts = [ "example.com" ];
+
+    services.postfix.enable = true;
+    services.postfix.destination = [ "example.com" "example.net" ];
+    services.postfix.relayDomains = [ "hash:/var/lib/mailman/data/postfix_domains" ];
+    services.postfix.config.local_recipient_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" "proxy:unix:passwd.byname" ];
+    services.postfix.config.transport_maps = [ "hash:/var/lib/mailman/data/postfix_lmtp" ];
+
+    users.users.user = { isNormalUser = true; };
+
+    virtualisation.memorySize = 2048;
+
+    specialisation.restApiPassFileSystem.configuration = {
+      services.mailman.restApiPassFile = "/var/lib/mailman/pass";
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    restApiPassFileSystem = "${nodes.machine.system.build.toplevel}/specialisation/restApiPassFileSystem";
+  in ''
+    def check_mail(_) -> bool:
+        status, _ = machine.execute("grep -q hello /var/spool/mail/user/new/*")
+        return status == 0
+
+    def try_api(_) -> bool:
+        status, _ = machine.execute("curl -s http://localhost:8001/")
+        return status == 0
+
+    def wait_for_api():
+        with machine.nested("waiting for Mailman REST API to be available"):
+            retry(try_api)
+
+    machine.wait_for_unit("mailman.service")
+    wait_for_api()
+
+    with subtest("subscription and delivery"):
+        creds = machine.succeed("su -s /bin/sh -c 'mailman info' mailman | grep '^REST credentials: ' | sed 's/^REST credentials: //'").strip()
+        machine.succeed(f"curl --fail-with-body -sLSu {creds} -d mail_host=example.com http://localhost:8001/3.1/domains")
+        machine.succeed(f"curl --fail-with-body -sLSu {creds} -d fqdn_listname=list@example.com http://localhost:8001/3.1/lists")
+        machine.succeed(f"curl --fail-with-body -sLSu {creds} -d list_id=list.example.com -d subscriber=root@example.com -d pre_confirmed=True -d pre_verified=True -d send_welcome_message=False http://localhost:8001/3.1/members")
+        machine.succeed(f"curl --fail-with-body -sLSu {creds} -d list_id=list.example.com -d subscriber=user@example.net -d pre_confirmed=True -d pre_verified=True -d send_welcome_message=False http://localhost:8001/3.1/members")
+        machine.succeed("mail -a 'From: root@example.com' -s hello list@example.com < /dev/null")
+        with machine.nested("waiting for mail from list"):
+            retry(check_mail)
+
+    with subtest("Postorius"):
+        machine.succeed("curl --fail-with-body -sILS http://localhost/")
+
+    with subtest("restApiPassFile"):
+        machine.succeed("echo secretpassword > /var/lib/mailman/pass")
+        machine.succeed("${restApiPassFileSystem}/bin/switch-to-configuration test >&2")
+        machine.succeed("grep secretpassword /etc/mailman.cfg")
+        machine.succeed("su -s /bin/sh -c 'mailman info' mailman | grep secretpassword")
+        wait_for_api()
+        machine.succeed("curl --fail-with-body -sLSu restadmin:secretpassword http://localhost:8001/3.1/domains")
+        machine.succeed("curl --fail-with-body -sILS http://localhost/")
+
+    with subtest("service locking"):
+        machine.fail("su -s /bin/sh -c 'mailman start' mailman")
+        machine.execute("systemctl kill --signal=SIGKILL mailman")
+        machine.succeed("systemctl restart mailman")
+        wait_for_api()
+  '';
+}
diff --git a/nixpkgs/nixos/tests/make-test-python.nix b/nixpkgs/nixos/tests/make-test-python.nix
new file mode 100644
index 000000000000..28569f1d2955
--- /dev/null
+++ b/nixpkgs/nixos/tests/make-test-python.nix
@@ -0,0 +1,9 @@
+f: {
+  system ? builtins.currentSystem,
+  pkgs ? import ../.. { inherit system; config = {}; overlays = []; },
+  ...
+} @ args:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+makeTest (if pkgs.lib.isFunction f then f (args // { inherit pkgs; inherit (pkgs) lib; }) else f)
diff --git a/nixpkgs/nixos/tests/man.nix b/nixpkgs/nixos/tests/man.nix
new file mode 100644
index 000000000000..1ff5af4e8059
--- /dev/null
+++ b/nixpkgs/nixos/tests/man.nix
@@ -0,0 +1,100 @@
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  manImplementations = [
+    "mandoc"
+    "man-db"
+  ];
+
+  machineNames = builtins.map machineSafe manImplementations;
+
+  makeConfig = useImpl: {
+    # Note: mandoc currently can't index symlinked section directories.
+    # So if a man section comes from one package exclusively (e. g.
+    # 1p from man-pages-posix and 2 from man-pages), it isn't searchable.
+    environment.systemPackages = [
+      pkgs.man-pages
+      pkgs.openssl
+      pkgs.libunwind
+    ];
+
+    documentation = {
+      enable = true;
+      nixos.enable = lib.mkForce true;
+      dev.enable = true;
+      man = {
+        enable = true;
+        generateCaches = true;
+      } // lib.listToAttrs (builtins.map (impl: {
+        name = impl;
+        value = {
+          enable = useImpl == impl;
+        };
+      }) manImplementations);
+    };
+  };
+
+  machineSafe = builtins.replaceStrings [ "-" ] [ "_" ];
+in {
+  name = "man";
+  meta.maintainers = [ lib.maintainers.sternenseemann ];
+
+  nodes = lib.listToAttrs (builtins.map (i: {
+    name = machineSafe i;
+    value = makeConfig i;
+  }) manImplementations);
+
+  testScript = ''
+    import re
+    start_all()
+
+    def match_man_k(page, section, haystack):
+      """
+      Check if the man page {page}({section}) occurs in
+      the output of `man -k` given as haystack. Note:
+      This is not super reliable, e. g. it can't deal
+      with man pages that are in multiple sections.
+      """
+
+      for line in haystack.split("\n"):
+        # man -k can look like this:
+        # page(3) - bla
+        # page (3) - bla
+        # pagea, pageb (3, 3P) - foo
+        # pagea, pageb, pagec(3) - bar
+        pages = line.split("(")[0]
+        sections = re.search("\\([a-zA-Z1-9, ]+\\)", line)
+        if sections is None:
+          continue
+        else:
+          sections = sections.group(0)[1:-1]
+
+        if page in pages and f'{section}' in sections:
+          return True
+
+      return False
+
+  '' + lib.concatMapStrings (machine: ''
+    with subtest("Test direct man page lookups in ${machine}"):
+      # man works
+      ${machine}.succeed("man man > /dev/null")
+      # devman works
+      ${machine}.succeed("man 3 libunwind > /dev/null")
+      # NixOS configuration man page is installed
+      ${machine}.succeed("man configuration.nix > /dev/null")
+
+    with subtest("Test generateCaches via man -k in ${machine}"):
+      expected = [
+        ("openssl", "ssl", 3),
+        ("unwind", "libunwind", 3),
+        ("user", "useradd", 8),
+        ("user", "userdel", 8),
+        ("mem", "free", 3),
+        ("mem", "free", 1),
+      ]
+
+      for (keyword, page, section) in expected:
+        matches = ${machine}.succeed(f"man -k {keyword}")
+        if not match_man_k(page, section, matches):
+          raise Exception(f"{page}({section}) missing in matches: {matches}")
+  '') machineNames;
+})
diff --git a/nixpkgs/nixos/tests/mate.nix b/nixpkgs/nixos/tests/mate.nix
new file mode 100644
index 000000000000..78ba59c5fc20
--- /dev/null
+++ b/nixpkgs/nixos/tests/mate.nix
@@ -0,0 +1,58 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "mate";
+
+  meta = {
+    maintainers = lib.teams.mate.members;
+  };
+
+  nodes.machine = { ... }: {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    services.xserver.enable = true;
+
+    services.xserver.displayManager = {
+      lightdm.enable = true;
+      autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+
+    services.xserver.desktopManager.mate.enable = true;
+
+    # Silence log spam due to no sound drivers loaded:
+    # ALSA lib confmisc.c:855:(parse_card) cannot find card '0'
+    hardware.pulseaudio.enable = true;
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.users.users.alice;
+    in
+    ''
+      with subtest("Wait for login"):
+          machine.wait_for_x()
+          machine.wait_for_file("${user.home}/.Xauthority")
+          machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+          machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Check if MATE session components actually start"):
+          machine.wait_until_succeeds("pgrep marco")
+          machine.wait_for_window("marco")
+          machine.wait_until_succeeds("pgrep mate-panel")
+          machine.wait_for_window("Top Panel")
+          machine.wait_for_window("Bottom Panel")
+          machine.wait_until_succeeds("pgrep caja")
+          machine.wait_for_window("Caja")
+
+      with subtest("Open MATE terminal"):
+          machine.succeed("su - ${user.name} -c 'DISPLAY=:0.0 mate-terminal >&2 &'")
+          machine.wait_for_window("Terminal")
+          machine.sleep(20)
+          machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/matomo.nix b/nixpkgs/nixos/tests/matomo.nix
new file mode 100644
index 000000000000..7dbef63136aa
--- /dev/null
+++ b/nixpkgs/nixos/tests/matomo.nix
@@ -0,0 +1,50 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  matomoTest = package:
+  makeTest {
+    name = "matomo";
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.matomo = {
+        package = package;
+        enable = true;
+        nginx = {
+          forceSSL = false;
+          enableACME = false;
+        };
+      };
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+      };
+      services.nginx.enable = true;
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("mysql.service")
+      machine.wait_for_unit("phpfpm-matomo.service")
+      machine.wait_for_unit("nginx.service")
+
+      # without the grep the command does not produce valid utf-8 for some reason
+      with subtest("welcome screen loads"):
+          machine.succeed(
+              "curl -sSfL http://localhost/ | grep '<title>Matomo[^<]*Installation'"
+          )
+    '';
+  };
+in {
+  matomo = matomoTest pkgs.matomo // {
+    name = "matomo";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
+  };
+  matomo-beta = matomoTest pkgs.matomo-beta // {
+    name = "matomo-beta";
+    meta.maintainers = with maintainers; [ florianjacob kiwi mmilata twey boozedog ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/matrix/appservice-irc.nix b/nixpkgs/nixos/tests/matrix/appservice-irc.nix
new file mode 100644
index 000000000000..78c53024ca6c
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/appservice-irc.nix
@@ -0,0 +1,225 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  let
+    homeserverUrl = "http://homeserver:8008";
+  in
+  {
+    name = "matrix-appservice-irc";
+    meta = {
+      maintainers = pkgs.matrix-appservice-irc.meta.maintainers;
+    };
+
+    nodes = {
+      homeserver = { pkgs, ... }: {
+        # We'll switch to this once the config is copied into place
+        specialisation.running.configuration = {
+          services.matrix-synapse = {
+            enable = true;
+            settings = {
+              database.name = "sqlite3";
+              app_service_config_files = [ "/registration.yml" ];
+
+              enable_registration = true;
+
+              # don't use this in production, always use some form of verification
+              enable_registration_without_verification = true;
+
+              listeners = [ {
+                # The default but tls=false
+                bind_addresses = [
+                  "0.0.0.0"
+                ];
+                port = 8008;
+                resources = [ {
+                  "compress" = true;
+                  "names" = [ "client" ];
+                } {
+                  "compress" = false;
+                  "names" = [ "federation" ];
+                } ];
+                tls = false;
+                type = "http";
+              } ];
+            };
+          };
+
+          networking.firewall.allowedTCPPorts = [ 8008 ];
+        };
+      };
+
+      ircd = { pkgs, ... }: {
+        services.ngircd = {
+          enable = true;
+          config = ''
+            [Global]
+              Name = ircd.ircd
+              Info = Server Info Text
+              AdminInfo1 = _
+
+            [Channel]
+              Name = #test
+              Topic = a cool place
+
+            [Options]
+              PAM = no
+          '';
+        };
+        networking.firewall.allowedTCPPorts = [ 6667 ];
+      };
+
+      appservice = { pkgs, ... }: {
+        services.matrix-appservice-irc = {
+          enable = true;
+          registrationUrl = "http://appservice:8009";
+
+          settings = {
+            homeserver.url = homeserverUrl;
+            homeserver.domain = "homeserver";
+
+            ircService.servers."ircd" = {
+              name = "IRCd";
+              port = 6667;
+              dynamicChannels = {
+                enabled = true;
+                aliasTemplate = "#irc_$CHANNEL";
+              };
+            };
+          };
+        };
+
+        networking.firewall.allowedTCPPorts = [ 8009 ];
+      };
+
+      client = { pkgs, ... }: {
+        environment.systemPackages = [
+          (pkgs.writers.writePython3Bin "do_test"
+          {
+            libraries = [ pkgs.python3Packages.matrix-nio ];
+            flakeIgnore = [
+              # We don't live in the dark ages anymore.
+              # Languages like Python that are whitespace heavy will overrun
+              # 79 characters..
+              "E501"
+            ];
+          } ''
+              import sys
+              import socket
+              import functools
+              from time import sleep
+              import asyncio
+
+              from nio import AsyncClient, RoomMessageText, JoinResponse
+
+
+              async def matrix_room_message_text_callback(matrix: AsyncClient, msg: str, _r, e):
+                  print("Received matrix text message: ", e)
+                  if msg in e.body:
+                      print("Received hi from IRC")
+                      await matrix.close()
+                      exit(0)  # Actual exit point
+
+
+              class IRC:
+                  def __init__(self):
+                      sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+                      sock.connect(("ircd", 6667))
+                      sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
+                      sock.send(b"USER bob bob bob :bob\n")
+                      sock.send(b"NICK bob\n")
+                      self.sock = sock
+
+                  def join(self, room: str):
+                      self.sock.send(f"JOIN {room}\n".encode())
+
+                  def privmsg(self, room: str, msg: str):
+                      self.sock.send(f"PRIVMSG {room} :{msg}\n".encode())
+
+                  def expect_msg(self, body: str):
+                      buffer = ""
+                      while True:
+                          buf = self.sock.recv(1024).decode()
+                          buffer += buf
+                          if body in buffer:
+                              return
+
+
+              async def run(homeserver: str):
+                  irc = IRC()
+
+                  matrix = AsyncClient(homeserver)
+                  response = await matrix.register("alice", "foobar")
+                  print("Matrix register response: ", response)
+
+                  response = await matrix.join("#irc_#test:homeserver")
+                  print("Matrix join room response:", response)
+                  assert isinstance(response, JoinResponse)
+                  room_id = response.room_id
+
+                  irc.join("#test")
+                  # FIXME: what are we waiting on here? Matrix? IRC? Both?
+                  # 10s seem bad for busy hydra machines.
+                  sleep(10)
+
+                  # Exchange messages
+                  print("Sending text message to matrix room")
+                  response = await matrix.room_send(
+                      room_id=room_id,
+                      message_type="m.room.message",
+                      content={"msgtype": "m.text", "body": "hi from matrix"},
+                  )
+                  print("Matrix room send response: ", response)
+                  irc.privmsg("#test", "hi from irc")
+
+                  print("Waiting for the matrix message to appear on the IRC side...")
+                  irc.expect_msg("hi from matrix")
+
+                  callback = functools.partial(
+                      matrix_room_message_text_callback, matrix, "hi from irc"
+                  )
+                  matrix.add_event_callback(callback, RoomMessageText)
+
+                  print("Waiting for matrix message...")
+                  await matrix.sync_forever()
+
+                  exit(1)  # Unreachable
+
+
+              if __name__ == "__main__":
+                  asyncio.run(run(sys.argv[1]))
+            ''
+          )
+        ];
+      };
+    };
+
+    testScript = ''
+      import pathlib
+      import os
+
+      start_all()
+
+      ircd.wait_for_unit("ngircd.service")
+      ircd.wait_for_open_port(6667)
+
+      with subtest("start the appservice"):
+          appservice.wait_for_unit("matrix-appservice-irc.service")
+          appservice.wait_for_open_port(8009)
+
+      with subtest("copy the registration file"):
+          appservice.copy_from_vm("/var/lib/matrix-appservice-irc/registration.yml")
+          homeserver.copy_from_host(
+              str(pathlib.Path(os.environ.get("out", os.getcwd())) / "registration.yml"), "/"
+          )
+          homeserver.succeed("chmod 444 /registration.yml")
+
+      with subtest("start the homeserver"):
+          homeserver.succeed(
+              "/run/current-system/specialisation/running/bin/switch-to-configuration test >&2"
+          )
+
+          homeserver.wait_for_unit("matrix-synapse.service")
+          homeserver.wait_for_open_port(8008)
+
+      with subtest("ensure messages can be exchanged"):
+          client.succeed("do_test ${homeserverUrl} >&2")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/matrix/conduit.nix b/nixpkgs/nixos/tests/matrix/conduit.nix
new file mode 100644
index 000000000000..2b81c23598eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/conduit.nix
@@ -0,0 +1,97 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  let
+    name = "conduit";
+  in
+  {
+    name = "matrix-conduit";
+
+    nodes = {
+      conduit = args: {
+        services.matrix-conduit = {
+          enable = true;
+          settings.global.server_name = name;
+          settings.global.allow_registration = true;
+          extraEnvironment.RUST_BACKTRACE = "yes";
+        };
+        services.nginx = {
+          enable = true;
+          virtualHosts.${name} = {
+            enableACME = false;
+            forceSSL = false;
+            enableSSL = false;
+
+            locations."/_matrix" = {
+              proxyPass = "http://[::1]:6167";
+            };
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ 80 ];
+      };
+      client = { pkgs, ... }: {
+        environment.systemPackages = [
+          (
+            pkgs.writers.writePython3Bin "do_test"
+              { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
+              import asyncio
+
+              from nio import AsyncClient
+
+
+              async def main() -> None:
+                  # Connect to conduit
+                  client = AsyncClient("http://conduit:80", "alice")
+
+                  # Register as user alice
+                  response = await client.register("alice", "my-secret-password")
+
+                  # Log in as user alice
+                  response = await client.login("my-secret-password")
+
+                  # Create a new room
+                  response = await client.room_create(federate=False)
+                  room_id = response.room_id
+
+                  # Join the room
+                  response = await client.join(room_id)
+
+                  # Send a message to the room
+                  response = await client.room_send(
+                      room_id=room_id,
+                      message_type="m.room.message",
+                      content={
+                          "msgtype": "m.text",
+                          "body": "Hello conduit!"
+                      }
+                  )
+
+                  # Sync responses
+                  response = await client.sync(timeout=30000)
+
+                  # Check the message was received by conduit
+                  last_message = response.rooms.join[room_id].timeline.events[-1].body
+                  assert last_message == "Hello conduit!"
+
+                  # Leave the room
+                  response = await client.room_leave(room_id)
+
+                  # Close the client
+                  await client.close()
+
+              asyncio.get_event_loop().run_until_complete(main())
+            ''
+          )
+        ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("start conduit"):
+            conduit.wait_for_unit("conduit.service")
+            conduit.wait_for_open_port(80)
+
+      with subtest("ensure messages can be exchanged"):
+            client.succeed("do_test")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/matrix/dendrite.nix b/nixpkgs/nixos/tests/matrix/dendrite.nix
new file mode 100644
index 000000000000..82e71d912130
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/dendrite.nix
@@ -0,0 +1,101 @@
+import ../make-test-python.nix (
+  { pkgs, ... }:
+    let
+      homeserverUrl = "http://homeserver:8008";
+
+      private_key = pkgs.runCommand "matrix_key.pem" {
+        buildInputs = [ pkgs.dendrite ];
+      } "generate-keys --private-key $out";
+    in
+      {
+        name = "dendrite";
+        meta = with pkgs.lib; {
+          maintainers = teams.matrix.members;
+        };
+
+        nodes = {
+          homeserver = { pkgs, ... }: {
+            services.dendrite = {
+              enable = true;
+              loadCredential = [ "test_private_key:${private_key}" ];
+              openRegistration = true;
+              settings = {
+                global.server_name = "test-dendrite-server.com";
+                global.private_key = "$CREDENTIALS_DIRECTORY/test_private_key";
+                client_api.registration_disabled = false;
+              };
+            };
+
+            networking.firewall.allowedTCPPorts = [ 8008 ];
+          };
+
+          client = { pkgs, ... }: {
+            environment.systemPackages = [
+              (
+                pkgs.writers.writePython3Bin "do_test"
+                  { libraries = [ pkgs.python3Packages.matrix-nio ]; } ''
+                  import asyncio
+
+                  from nio import AsyncClient
+
+
+                  async def main() -> None:
+                      # Connect to dendrite
+                      client = AsyncClient("http://homeserver:8008", "alice")
+
+                      # Register as user alice
+                      response = await client.register("alice", "my-secret-password")
+
+                      # Log in as user alice
+                      response = await client.login("my-secret-password")
+
+                      # Create a new room
+                      response = await client.room_create(federate=False)
+                      room_id = response.room_id
+
+                      # Join the room
+                      response = await client.join(room_id)
+
+                      # Send a message to the room
+                      response = await client.room_send(
+                          room_id=room_id,
+                          message_type="m.room.message",
+                          content={
+                              "msgtype": "m.text",
+                              "body": "Hello world!"
+                          }
+                      )
+
+                      # Sync responses
+                      response = await client.sync(timeout=30000)
+
+                      # Check the message was received by dendrite
+                      last_message = response.rooms.join[room_id].timeline.events[-1].body
+                      assert last_message == "Hello world!"
+
+                      # Leave the room
+                      response = await client.room_leave(room_id)
+
+                      # Close the client
+                      await client.close()
+
+                  asyncio.get_event_loop().run_until_complete(main())
+                ''
+              )
+            ];
+          };
+        };
+
+        testScript = ''
+          start_all()
+
+          with subtest("start the homeserver"):
+              homeserver.wait_for_unit("dendrite.service")
+              homeserver.wait_for_open_port(8008)
+
+          with subtest("ensure messages can be exchanged"):
+              client.succeed("do_test")
+        '';
+
+      }
+)
diff --git a/nixpkgs/nixos/tests/matrix/mjolnir.nix b/nixpkgs/nixos/tests/matrix/mjolnir.nix
new file mode 100644
index 000000000000..8a888b17a3d7
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/mjolnir.nix
@@ -0,0 +1,176 @@
+import ../make-test-python.nix (
+  { pkgs, ... }:
+  let
+    # Set up SSL certs for Synapse to be happy.
+    runWithOpenSSL = file: cmd: pkgs.runCommand file
+      {
+        buildInputs = [ pkgs.openssl ];
+      }
+      cmd;
+
+    ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+    ca_pem = runWithOpenSSL "ca.pem" ''
+      openssl req \
+        -x509 -new -nodes -key ${ca_key} \
+        -days 10000 -out $out -subj "/CN=snakeoil-ca"
+    '';
+    key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048";
+    csr = runWithOpenSSL "matrix.csr" ''
+      openssl req \
+         -new -key ${key} \
+         -out $out -subj "/CN=localhost" \
+    '';
+    cert = runWithOpenSSL "matrix_cert.pem" ''
+      openssl x509 \
+        -req -in ${csr} \
+        -CA ${ca_pem} -CAkey ${ca_key} \
+        -CAcreateserial -out $out \
+        -days 365
+    '';
+  in
+  {
+    name = "mjolnir";
+    meta = with pkgs.lib; {
+      maintainers = teams.matrix.members;
+    };
+
+    nodes = {
+      homeserver = { pkgs, ... }: {
+        services.matrix-synapse = {
+          enable = true;
+          settings = {
+            database.name = "sqlite3";
+            tls_certificate_path = "${cert}";
+            tls_private_key_path = "${key}";
+            enable_registration = true;
+            enable_registration_without_verification = true;
+            registration_shared_secret = "supersecret-registration";
+
+            listeners = [ {
+              # The default but tls=false
+              bind_addresses = [
+                "0.0.0.0"
+              ];
+              port = 8448;
+              resources = [ {
+                compress = true;
+                names = [ "client" ];
+              } {
+                compress = false;
+                names = [ "federation" ];
+              } ];
+              tls = false;
+              type = "http";
+              x_forwarded = false;
+            } ];
+          };
+        };
+
+        networking.firewall.allowedTCPPorts = [ 8448 ];
+
+        environment.systemPackages = [
+          (pkgs.writeShellScriptBin "register_mjolnir_user" ''
+            exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \
+              -u mjolnir \
+              -p mjolnir-password \
+              --admin \
+              --shared-secret supersecret-registration \
+              http://localhost:8448
+          ''
+          )
+          (pkgs.writeShellScriptBin "register_moderator_user" ''
+            exec ${pkgs.matrix-synapse}/bin/register_new_matrix_user \
+              -u moderator \
+              -p moderator-password \
+              --no-admin \
+              --shared-secret supersecret-registration \
+              http://localhost:8448
+          ''
+          )
+        ];
+      };
+
+      mjolnir = { pkgs, ... }: {
+        services.mjolnir = {
+          enable = true;
+          homeserverUrl = "http://homeserver:8448";
+          pantalaimon = {
+            enable = true;
+            username = "mjolnir";
+            passwordFile = pkgs.writeText "password.txt" "mjolnir-password";
+            # otherwise mjolnir tries to connect to ::1, which is not listened by pantalaimon
+            options.listenAddress = "127.0.0.1";
+          };
+          managementRoom = "#moderators:homeserver";
+        };
+      };
+
+      client = { pkgs, ... }: {
+        environment.systemPackages = [
+          (pkgs.writers.writePython3Bin "create_management_room_and_invite_mjolnir"
+            { libraries = with pkgs.python3Packages; [
+                matrix-nio
+              ] ++ matrix-nio.optional-dependencies.e2e;
+            } ''
+            import asyncio
+
+            from nio import (
+                AsyncClient,
+                EnableEncryptionBuilder
+            )
+
+
+            async def main() -> None:
+                client = AsyncClient("http://homeserver:8448", "moderator")
+
+                await client.login("moderator-password")
+
+                room = await client.room_create(
+                    name="Moderators",
+                    alias="moderators",
+                    initial_state=[EnableEncryptionBuilder().as_dict()],
+                )
+
+                await client.join(room.room_id)
+                await client.room_invite(room.room_id, "@mjolnir:homeserver")
+
+            asyncio.run(main())
+          ''
+          )
+        ];
+      };
+    };
+
+    testScript = ''
+      with subtest("start homeserver"):
+        homeserver.start()
+
+        homeserver.wait_for_unit("matrix-synapse.service")
+        homeserver.wait_until_succeeds("curl --fail -L http://localhost:8448/")
+
+      with subtest("register users"):
+        # register mjolnir user
+        homeserver.succeed("register_mjolnir_user")
+        # register moderator user
+        homeserver.succeed("register_moderator_user")
+
+      with subtest("start mjolnir"):
+        mjolnir.start()
+
+        # wait for pantalaimon to be ready
+        mjolnir.wait_for_unit("pantalaimon-mjolnir.service")
+        mjolnir.wait_for_unit("mjolnir.service")
+
+        mjolnir.wait_until_succeeds("curl --fail -L http://localhost:8009/")
+
+      with subtest("ensure mjolnir can be invited to the management room"):
+        client.start()
+
+        client.wait_until_succeeds("curl --fail -L http://homeserver:8448/")
+
+        client.succeed("create_management_room_and_invite_mjolnir")
+
+        mjolnir.wait_for_console_text("Startup complete. Now monitoring rooms")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/matrix/pantalaimon.nix b/nixpkgs/nixos/tests/matrix/pantalaimon.nix
new file mode 100644
index 000000000000..b5d649e6517a
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/pantalaimon.nix
@@ -0,0 +1,88 @@
+import ../make-test-python.nix (
+  { pkgs, ... }:
+  let
+    pantalaimonInstanceName = "testing";
+
+    # Set up SSL certs for Synapse to be happy.
+    runWithOpenSSL = file: cmd: pkgs.runCommand file
+      {
+        buildInputs = [ pkgs.openssl ];
+      }
+      cmd;
+
+    ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+    ca_pem = runWithOpenSSL "ca.pem" ''
+      openssl req \
+        -x509 -new -nodes -key ${ca_key} \
+        -days 10000 -out $out -subj "/CN=snakeoil-ca"
+    '';
+    key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048";
+    csr = runWithOpenSSL "matrix.csr" ''
+      openssl req \
+         -new -key ${key} \
+         -out $out -subj "/CN=localhost" \
+    '';
+    cert = runWithOpenSSL "matrix_cert.pem" ''
+      openssl x509 \
+        -req -in ${csr} \
+        -CA ${ca_pem} -CAkey ${ca_key} \
+        -CAcreateserial -out $out \
+        -days 365
+    '';
+  in
+  {
+    name = "pantalaimon";
+    meta = with pkgs.lib; {
+      maintainers = teams.matrix.members;
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      services.pantalaimon-headless.instances.${pantalaimonInstanceName} = {
+        homeserver = "https://localhost:8448";
+        listenAddress = "0.0.0.0";
+        listenPort = 8888;
+        logLevel = "debug";
+        ssl = false;
+      };
+
+      services.matrix-synapse = {
+        enable = true;
+        settings = {
+          listeners = [ {
+            port = 8448;
+            bind_addresses = [
+              "127.0.0.1"
+              "::1"
+            ];
+            type = "http";
+            tls = true;
+            x_forwarded = false;
+            resources = [ {
+              names = [
+                "client"
+              ];
+              compress = true;
+            } {
+              names = [
+                "federation"
+              ];
+              compress = false;
+            } ];
+          } ];
+          database.name = "sqlite3";
+          tls_certificate_path = "${cert}";
+          tls_private_key_path = "${key}";
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("pantalaimon-${pantalaimonInstanceName}.service")
+      machine.wait_for_unit("matrix-synapse.service")
+      machine.wait_until_succeeds(
+          "curl --fail -L http://localhost:8888/"
+      )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/matrix/synapse-workers.nix b/nixpkgs/nixos/tests/matrix/synapse-workers.nix
new file mode 100644
index 000000000000..e90301aeae9e
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/synapse-workers.nix
@@ -0,0 +1,50 @@
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "matrix-synapse-workers";
+  meta = with pkgs.lib; {
+    maintainers = teams.matrix.members;
+  };
+
+  nodes = {
+    homeserver =
+      { pkgs
+      , nodes
+      , ...
+      }: {
+        services.postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "synapse-init.sql" ''
+            CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
+            CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
+            TEMPLATE template0
+            LC_COLLATE = "C"
+            LC_CTYPE = "C";
+          '';
+        };
+
+        services.matrix-synapse = {
+          enable = true;
+          settings = {
+            database = {
+              name = "psycopg2";
+              args.password = "synapse";
+            };
+            enable_registration = true;
+            enable_registration_without_verification = true;
+
+            federation_sender_instances = [ "federation_sender" ];
+          };
+          configureRedisLocally = true;
+          workers = {
+            "federation_sender" = { };
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    homeserver.wait_for_unit("matrix-synapse.service");
+    homeserver.wait_for_unit("matrix-synapse-worker-federation_sender.service");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/matrix/synapse.nix b/nixpkgs/nixos/tests/matrix/synapse.nix
new file mode 100644
index 000000000000..98b077469192
--- /dev/null
+++ b/nixpkgs/nixos/tests/matrix/synapse.nix
@@ -0,0 +1,234 @@
+import ../make-test-python.nix ({ pkgs, ... } : let
+
+
+  runWithOpenSSL = file: cmd: pkgs.runCommand file {
+    buildInputs = [ pkgs.openssl ];
+  } cmd;
+
+
+  ca_key = runWithOpenSSL "ca-key.pem" "openssl genrsa -out $out 2048";
+  ca_pem = runWithOpenSSL "ca.pem" ''
+    openssl req \
+      -x509 -new -nodes -key ${ca_key} \
+      -days 10000 -out $out -subj "/CN=snakeoil-ca"
+  '';
+  key = runWithOpenSSL "matrix_key.pem" "openssl genrsa -out $out 2048";
+  csr = runWithOpenSSL "matrix.csr" ''
+    openssl req \
+       -new -key ${key} \
+       -out $out -subj "/CN=localhost" \
+  '';
+  cert = runWithOpenSSL "matrix_cert.pem" ''
+    openssl x509 \
+      -req -in ${csr} \
+      -CA ${ca_pem} -CAkey ${ca_key} \
+      -CAcreateserial -out $out \
+      -days 365
+  '';
+
+
+  mailerCerts = import ../common/acme/server/snakeoil-certs.nix;
+  mailerDomain = mailerCerts.domain;
+  registrationSharedSecret = "unsecure123";
+  testUser = "alice";
+  testPassword = "alicealice";
+  testEmail = "alice@example.com";
+
+  listeners = [ {
+    port = 8448;
+    bind_addresses = [
+      "127.0.0.1"
+      "::1"
+    ];
+    type = "http";
+    tls = true;
+    x_forwarded = false;
+    resources = [ {
+      names = [
+        "client"
+      ];
+      compress = true;
+    } {
+      names = [
+        "federation"
+      ];
+      compress = false;
+    } ];
+  } ];
+
+in {
+
+  name = "matrix-synapse";
+  meta = with pkgs.lib; {
+    maintainers = teams.matrix.members;
+  };
+
+  nodes = {
+    # Since 0.33.0, matrix-synapse doesn't allow underscores in server names
+    serverpostgres = { pkgs, nodes, config, ... }: let
+      mailserverIP = nodes.mailserver.config.networking.primaryIPAddress;
+    in
+    {
+      services.matrix-synapse = {
+        enable = true;
+        settings = {
+          inherit listeners;
+          database = {
+            name = "psycopg2";
+            args.password = "synapse";
+          };
+          redis = {
+            enabled = true;
+            host = "localhost";
+            port = config.services.redis.servers.matrix-synapse.port;
+          };
+          tls_certificate_path = "${cert}";
+          tls_private_key_path = "${key}";
+          registration_shared_secret = registrationSharedSecret;
+          public_baseurl = "https://example.com";
+          email = {
+            smtp_host = mailerDomain;
+            smtp_port = 25;
+            require_transport_security = true;
+            notif_from = "matrix <matrix@${mailerDomain}>";
+            app_name = "Matrix";
+          };
+        };
+      };
+      services.postgresql = {
+        enable = true;
+
+        # The database name and user are configured by the following options:
+        #   - services.matrix-synapse.database_name
+        #   - services.matrix-synapse.database_user
+        #
+        # The values used here represent the default values of the module.
+        initialScript = pkgs.writeText "synapse-init.sql" ''
+          CREATE ROLE "matrix-synapse" WITH LOGIN PASSWORD 'synapse';
+          CREATE DATABASE "matrix-synapse" WITH OWNER "matrix-synapse"
+            TEMPLATE template0
+            LC_COLLATE = "C"
+            LC_CTYPE = "C";
+        '';
+      };
+
+      services.redis.servers.matrix-synapse = {
+        enable = true;
+        port = 6380;
+      };
+
+      networking.extraHosts = ''
+        ${mailserverIP} ${mailerDomain}
+      '';
+
+      security.pki.certificateFiles = [
+        mailerCerts.ca.cert ca_pem
+      ];
+
+      environment.systemPackages = let
+        sendTestMailStarttls = pkgs.writeScriptBin "send-testmail-starttls" ''
+          #!${pkgs.python3.interpreter}
+          import smtplib
+          import ssl
+
+          ctx = ssl.create_default_context()
+
+          with smtplib.SMTP('${mailerDomain}') as smtp:
+            smtp.ehlo()
+            smtp.starttls(context=ctx)
+            smtp.ehlo()
+            smtp.sendmail('matrix@${mailerDomain}', '${testEmail}', 'Subject: Test STARTTLS\n\nTest data.')
+            smtp.quit()
+         '';
+
+        obtainTokenAndRegisterEmail = let
+          # adding the email through the API is quite complicated as it involves more than one step and some
+          # client-side calculation
+          insertEmailForAlice = pkgs.writeText "alice-email.sql" ''
+            INSERT INTO user_threepids (user_id, medium, address, validated_at, added_at) VALUES ('${testUser}@serverpostgres', 'email', '${testEmail}', '1629149927271', '1629149927270');
+          '';
+        in
+        pkgs.writeScriptBin "obtain-token-and-register-email" ''
+          #!${pkgs.runtimeShell}
+          set -o errexit
+          set -o pipefail
+          set -o nounset
+          su postgres -c "psql -d matrix-synapse -f ${insertEmailForAlice}"
+          curl --fail -XPOST 'https://localhost:8448/_matrix/client/r0/account/password/email/requestToken' -d '{"email":"${testEmail}","client_secret":"foobar","send_attempt":1}' -v
+        '';
+        in [ sendTestMailStarttls pkgs.matrix-synapse obtainTokenAndRegisterEmail ];
+    };
+
+    # test mail delivery
+    mailserver = args: let
+    in
+    {
+      security.pki.certificateFiles = [
+        mailerCerts.ca.cert
+      ];
+
+      networking.firewall.enable = false;
+
+      services.postfix = {
+        enable = true;
+        hostname = "${mailerDomain}";
+        # open relay for subnet
+        networksStyle = "subnet";
+        enableSubmission = true;
+        tlsTrustedAuthorities = "${mailerCerts.ca.cert}";
+        sslCert = "${mailerCerts.${mailerDomain}.cert}";
+        sslKey = "${mailerCerts.${mailerDomain}.key}";
+
+        # blackhole transport
+        transport = "example.com discard:silently";
+
+        config = {
+          debug_peer_level = "10";
+          smtpd_relay_restrictions = [
+            "permit_mynetworks" "reject_unauth_destination"
+          ];
+
+          # disable obsolete protocols, something old versions of twisted are still using
+          smtpd_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3";
+          smtp_tls_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3";
+          smtpd_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3";
+          smtp_tls_mandatory_protocols = "TLSv1.3, TLSv1.2, !TLSv1.1, !TLSv1, !SSLv2, !SSLv3";
+        };
+      };
+    };
+
+    serversqlite = args: {
+      services.matrix-synapse = {
+        enable = true;
+        settings = {
+          inherit listeners;
+          database.name = "sqlite3";
+          tls_certificate_path = "${cert}";
+          tls_private_key_path = "${key}";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    mailserver.wait_for_unit("postfix.service")
+    serverpostgres.succeed("send-testmail-starttls")
+    serverpostgres.wait_for_unit("matrix-synapse.service")
+    serverpostgres.wait_until_succeeds(
+        "curl --fail -L --cacert ${ca_pem} https://localhost:8448/"
+    )
+    serverpostgres.wait_until_succeeds(
+        "journalctl -u matrix-synapse.service | grep -q 'Connected to redis'"
+    )
+    serverpostgres.require_unit_state("postgresql.service")
+    serverpostgres.succeed("register_new_matrix_user -u ${testUser} -p ${testPassword} -a -k ${registrationSharedSecret} https://localhost:8448/")
+    serverpostgres.succeed("obtain-token-and-register-email")
+    serversqlite.wait_for_unit("matrix-synapse.service")
+    serversqlite.wait_until_succeeds(
+        "curl --fail -L --cacert ${ca_pem} https://localhost:8448/"
+    )
+    serversqlite.succeed("[ -e /var/lib/matrix-synapse/homeserver.db ]")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/mattermost.nix b/nixpkgs/nixos/tests/mattermost.nix
new file mode 100644
index 000000000000..e11201f05357
--- /dev/null
+++ b/nixpkgs/nixos/tests/mattermost.nix
@@ -0,0 +1,140 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  host = "smoke.test";
+  port = "8065";
+  url = "http://${host}:${port}";
+  siteName = "NixOS Smoke Tests, Inc.";
+
+  makeMattermost = mattermostConfig:
+    { config, ... }: {
+      environment.systemPackages = [
+        pkgs.mattermost
+        pkgs.curl
+        pkgs.jq
+      ];
+      networking.hosts = {
+        "127.0.0.1" = [ host ];
+      };
+      services.mattermost = lib.recursiveUpdate {
+        enable = true;
+        inherit siteName;
+        listenAddress = "0.0.0.0:${port}";
+        siteUrl = url;
+        extraConfig = {
+          SupportSettings.AboutLink = "https://nixos.org";
+        };
+      } mattermostConfig;
+    };
+in
+{
+  name = "mattermost";
+
+  nodes = {
+    mutable = makeMattermost {
+      mutableConfig = true;
+      extraConfig.SupportSettings.HelpLink = "https://search.nixos.org";
+    };
+    mostlyMutable = makeMattermost {
+      mutableConfig = true;
+      preferNixConfig = true;
+      plugins = let
+        mattermostDemoPlugin = pkgs.fetchurl {
+          url = "https://github.com/mattermost/mattermost-plugin-demo/releases/download/v0.9.0/com.mattermost.demo-plugin-0.9.0.tar.gz";
+          sha256 = "1h4qi34gcxcx63z8wiqcf2aaywmvv8lys5g8gvsk13kkqhlmag25";
+        };
+      in [
+        mattermostDemoPlugin
+      ];
+    };
+    immutable = makeMattermost {
+      mutableConfig = false;
+      extraConfig.SupportSettings.HelpLink = "https://search.nixos.org";
+    };
+    environmentFile = makeMattermost {
+      mutableConfig = false;
+      extraConfig.SupportSettings.AboutLink = "https://example.org";
+      environmentFile = pkgs.writeText "mattermost-env" ''
+        MM_SUPPORTSETTINGS_ABOUTLINK=https://nixos.org
+      '';
+    };
+  };
+
+  testScript = let
+    expectConfig = jqExpression: pkgs.writeShellScript "expect-config" ''
+      set -euo pipefail
+      echo "Expecting config to match: "${lib.escapeShellArg jqExpression} >&2
+      curl ${lib.escapeShellArg url} >/dev/null
+      config="$(curl ${lib.escapeShellArg "${url}/api/v4/config/client?format=old"})"
+      echo "Config: $(echo "$config" | ${pkgs.jq}/bin/jq)" >&2
+      [[ "$(echo "$config" | ${pkgs.jq}/bin/jq -r ${lib.escapeShellArg ".SiteName == $siteName and .Version == ($mattermostName / $sep)[-1] and (${jqExpression})"} --arg siteName ${lib.escapeShellArg siteName} --arg mattermostName ${lib.escapeShellArg pkgs.mattermost.name} --arg sep '-')" = "true" ]]
+    '';
+
+    setConfig = jqExpression: pkgs.writeShellScript "set-config" ''
+      set -euo pipefail
+      mattermostConfig=/var/lib/mattermost/config/config.json
+      newConfig="$(${pkgs.jq}/bin/jq -r ${lib.escapeShellArg jqExpression} $mattermostConfig)"
+      rm -f $mattermostConfig
+      echo "$newConfig" > "$mattermostConfig"
+    '';
+
+  in
+  ''
+    start_all()
+
+    ## Mutable node tests ##
+    mutable.wait_for_unit("mattermost.service")
+    mutable.wait_for_open_port(8065)
+
+    # Get the initial config
+    mutable.succeed("${expectConfig ''.AboutLink == "https://nixos.org" and .HelpLink == "https://search.nixos.org"''}")
+
+    # Edit the config
+    mutable.succeed("${setConfig ''.SupportSettings.AboutLink = "https://mattermost.com"''}")
+    mutable.succeed("${setConfig ''.SupportSettings.HelpLink = "https://nixos.org/nixos/manual"''}")
+    mutable.systemctl("restart mattermost.service")
+    mutable.wait_for_open_port(8065)
+
+    # AboutLink and HelpLink should be changed
+    mutable.succeed("${expectConfig ''.AboutLink == "https://mattermost.com" and .HelpLink == "https://nixos.org/nixos/manual"''}")
+
+    ## Mostly mutable node tests ##
+    mostlyMutable.wait_for_unit("mattermost.service")
+    mostlyMutable.wait_for_open_port(8065)
+
+    # Get the initial config
+    mostlyMutable.succeed("${expectConfig ''.AboutLink == "https://nixos.org"''}")
+
+    # Edit the config
+    mostlyMutable.succeed("${setConfig ''.SupportSettings.AboutLink = "https://mattermost.com"''}")
+    mostlyMutable.succeed("${setConfig ''.SupportSettings.HelpLink = "https://nixos.org/nixos/manual"''}")
+    mostlyMutable.systemctl("restart mattermost.service")
+    mostlyMutable.wait_for_open_port(8065)
+
+    # AboutLink should be overridden by NixOS configuration; HelpLink should be what we set above
+    mostlyMutable.succeed("${expectConfig ''.AboutLink == "https://nixos.org" and .HelpLink == "https://nixos.org/nixos/manual"''}")
+
+    ## Immutable node tests ##
+    immutable.wait_for_unit("mattermost.service")
+    immutable.wait_for_open_port(8065)
+
+    # Get the initial config
+    immutable.succeed("${expectConfig ''.AboutLink == "https://nixos.org" and .HelpLink == "https://search.nixos.org"''}")
+
+    # Edit the config
+    immutable.succeed("${setConfig ''.SupportSettings.AboutLink = "https://mattermost.com"''}")
+    immutable.succeed("${setConfig ''.SupportSettings.HelpLink = "https://nixos.org/nixos/manual"''}")
+    immutable.systemctl("restart mattermost.service")
+    immutable.wait_for_open_port(8065)
+
+    # Our edits should be ignored on restart
+    immutable.succeed("${expectConfig ''.AboutLink == "https://nixos.org" and .HelpLink == "https://search.nixos.org"''}")
+
+
+    ## Environment File node tests ##
+    environmentFile.wait_for_unit("mattermost.service")
+    environmentFile.wait_for_open_port(8065)
+
+    # Settings in the environment file should override settings set otherwise
+    environmentFile.succeed("${expectConfig ''.AboutLink == "https://nixos.org"''}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mediamtx.nix b/nixpkgs/nixos/tests/mediamtx.nix
new file mode 100644
index 000000000000..8cacd02631d9
--- /dev/null
+++ b/nixpkgs/nixos/tests/mediamtx.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "mediamtx";
+  meta.maintainers = with lib.maintainers; [ fpletz ];
+
+  nodes = {
+    machine = { config, ... }: {
+      services.mediamtx = {
+        enable = true;
+        settings = {
+          metrics = true;
+          paths.all.source = "publisher";
+        };
+      };
+
+      systemd.services.rtmp-publish = {
+        description = "Publish an RTMP stream to mediamtx";
+        after = [ "mediamtx.service" ];
+        bindsTo = [ "mediamtx.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          Restart = "on-failure";
+          RestartSec = "1s";
+          TimeoutStartSec = "10s";
+          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -re -f lavfi -i smptebars=size=800x600:rate=10 -c libx264 -f flv rtmp://localhost:1935/test";
+        };
+      };
+
+      systemd.services.rtmp-receive = {
+        description = "Receive an RTMP stream from mediamtx";
+        after = [ "rtmp-publish.service" ];
+        bindsTo = [ "rtmp-publish.service" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          DynamicUser = true;
+          Restart = "on-failure";
+          RestartSec = "1s";
+          TimeoutStartSec = "10s";
+          ExecStart = "${lib.getBin pkgs.ffmpeg-headless}/bin/ffmpeg -y -re -i rtmp://localhost:1935/test -f flv /dev/null";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("mediamtx.service")
+    machine.wait_for_unit("rtmp-publish.service")
+    machine.wait_for_unit("rtmp-receive.service")
+    machine.wait_for_open_port(9998)
+    machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"publish\".*1$'")
+    machine.succeed("curl http://localhost:9998/metrics | grep '^rtmp_conns.*state=\"read\".*1$'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mediatomb.nix b/nixpkgs/nixos/tests/mediatomb.nix
new file mode 100644
index 000000000000..9c84aa3e92a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/mediatomb.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix {
+  name = "mediatomb";
+
+  nodes = {
+    server = {
+      services.mediatomb = {
+        enable = true;
+        serverName = "Gerbera";
+        interface = "eth1";
+        openFirewall = true;
+        mediaDirectories = [
+          {
+            path = "/var/lib/gerbera/pictures";
+            recursive = false;
+            hidden-files = false;
+          }
+          {
+            path = "/var/lib/gerbera/audio";
+            recursive = true;
+            hidden-files = false;
+          }
+        ];
+      };
+      systemd.tmpfiles.rules = [
+        "d /var/lib/gerbera/pictures 0770 mediatomb mediatomb"
+        "d /var/lib/gerbera/audio 0770 mediatomb mediatomb"
+      ];
+    };
+
+    client = {};
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("mediatomb")
+    server.wait_until_succeeds("nc -z 192.168.1.2 49152")
+    server.succeed("curl -v --fail http://server:49152/")
+
+    client.wait_for_unit("multi-user.target")
+    page = client.succeed("curl -v --fail http://server:49152/")
+    assert "Gerbera" in page and "MediaTomb" not in page
+  '';
+}
diff --git a/nixpkgs/nixos/tests/mediawiki.nix b/nixpkgs/nixos/tests/mediawiki.nix
new file mode 100644
index 000000000000..e30cc55ff616
--- /dev/null
+++ b/nixpkgs/nixos/tests/mediawiki.nix
@@ -0,0 +1,93 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+}:
+
+let
+  shared = {
+    services.mediawiki.enable = true;
+    services.mediawiki.httpd.virtualHost.hostName = "localhost";
+    services.mediawiki.httpd.virtualHost.adminAddr = "root@example.com";
+    services.mediawiki.passwordFile = pkgs.writeText "password" "correcthorsebatterystaple";
+    services.mediawiki.extensions = {
+      Matomo = pkgs.fetchzip {
+        url = "https://github.com/DaSchTour/matomo-mediawiki-extension/archive/v4.0.1.tar.gz";
+        sha256 = "0g5rd3zp0avwlmqagc59cg9bbkn3r7wx7p6yr80s644mj6dlvs1b";
+      };
+      ParserFunctions = null;
+    };
+  };
+
+  testLib = import ../lib/testing-python.nix {
+    inherit system pkgs;
+    extraConfigurations = [ shared ];
+  };
+in
+{
+  mysql = testLib.makeTest {
+    name = "mediawiki-mysql";
+    nodes.machine = {
+      services.mediawiki.database.type = "mysql";
+    };
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("phpfpm-mediawiki.service")
+
+      page = machine.succeed("curl -fL http://localhost/")
+      assert "MediaWiki has been installed" in page
+    '';
+  };
+
+  postgresql = testLib.makeTest {
+    name = "mediawiki-postgres";
+    nodes.machine = {
+      services.mediawiki.database.type = "postgres";
+    };
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("phpfpm-mediawiki.service")
+
+      page = machine.succeed("curl -fL http://localhost/")
+      assert "MediaWiki has been installed" in page
+    '';
+  };
+
+  nohttpd = testLib.makeTest {
+    name = "mediawiki-nohttpd";
+    nodes.machine = {
+      services.mediawiki.webserver = "none";
+    };
+    testScript = { nodes, ... }: ''
+      start_all()
+      machine.wait_for_unit("phpfpm-mediawiki.service")
+      env = (
+        "SCRIPT_NAME=/index.php",
+        "SCRIPT_FILENAME=${nodes.machine.services.mediawiki.finalPackage}/share/mediawiki/index.php",
+        "REMOTE_ADDR=127.0.0.1",
+        'QUERY_STRING=title=Main_Page',
+        "REQUEST_METHOD=GET",
+      );
+      page = machine.succeed(f"{' '.join(env)} ${pkgs.fcgi}/bin/cgi-fcgi -bind -connect ${nodes.machine.services.phpfpm.pools.mediawiki.socket}")
+      assert "MediaWiki has been installed" in page, f"no 'MediaWiki has been installed' in:\n{page}"
+    '';
+  };
+
+  nginx = testLib.makeTest {
+    name = "mediawiki-nginx";
+    nodes.machine = {
+      services.mediawiki.webserver = "nginx";
+    };
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("phpfpm-mediawiki.service")
+      machine.wait_for_unit("nginx.service")
+
+      page = machine.succeed("curl -fL http://localhost/")
+      assert "MediaWiki has been installed" in page
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/meilisearch.nix b/nixpkgs/nixos/tests/meilisearch.nix
new file mode 100644
index 000000000000..c31dcb0559db
--- /dev/null
+++ b/nixpkgs/nixos/tests/meilisearch.nix
@@ -0,0 +1,61 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    listenAddress = "127.0.0.1";
+    listenPort = 7700;
+    apiUrl = "http://${listenAddress}:${toString listenPort}";
+    uid = "movies";
+    indexJSON = pkgs.writeText "index.json" (builtins.toJSON { inherit uid; });
+    moviesJSON = pkgs.fetchurl {
+      url = "https://github.com/meilisearch/meilisearch/raw/v0.23.1/datasets/movies/movies.json";
+      sha256 = "1r3srld63dpmg9yrmysm6xl175661j5cspi93mk5q2wf8xwn50c5";
+    };
+  in {
+    name = "meilisearch";
+    meta.maintainers = with lib.maintainers; [ Br1ght0ne ];
+
+    nodes.machine = { ... }: {
+      environment.systemPackages = with pkgs; [ curl jq ];
+      services.meilisearch = {
+        enable = true;
+        inherit listenAddress listenPort;
+      };
+    };
+
+    testScript = ''
+      import json
+
+      start_all()
+
+      machine.wait_for_unit("meilisearch")
+      machine.wait_for_open_port(7700)
+
+      with subtest("check version"):
+          version = json.loads(machine.succeed("curl ${apiUrl}/version"))
+          assert version["pkgVersion"] == "${pkgs.meilisearch.version}"
+
+      with subtest("create index"):
+          machine.succeed(
+              "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes --data @${indexJSON}"
+          )
+          indexes = json.loads(machine.succeed("curl ${apiUrl}/indexes"))
+          assert indexes["total"] == 1, "index wasn't created"
+
+      with subtest("add documents"):
+          response = json.loads(
+              machine.succeed(
+                  "curl -X POST -H 'Content-Type: application/json' ${apiUrl}/indexes/${uid}/documents --data-binary @${moviesJSON}"
+              )
+          )
+          task_uid = response["taskUid"]
+          machine.wait_until_succeeds(
+              f"curl ${apiUrl}/tasks/{task_uid} | jq -e '.status == \"succeeded\"'"
+          )
+
+      with subtest("search"):
+          response = json.loads(
+              machine.succeed("curl ${apiUrl}/indexes/movies/search?q=hero")
+          )
+          print(response)
+          assert len(response["hits"]) >= 1, "no results found"
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/memcached.nix b/nixpkgs/nixos/tests/memcached.nix
new file mode 100644
index 000000000000..6549995110d7
--- /dev/null
+++ b/nixpkgs/nixos/tests/memcached.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "memcached";
+
+  nodes.machine = {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.memcached.enable = true;
+  };
+
+  testScript = let
+    testScript = pkgs.writers.writePython3 "test_memcache" {
+      libraries = with pkgs.python3Packages; [ memcached ];
+    } ''
+      import memcache
+      c = memcache.Client(['localhost:11211'])
+      c.set('key', 'value')
+      assert 'value' == c.get('key')
+    '';
+  in ''
+    machine.start()
+    machine.wait_for_unit("memcached.service")
+    machine.wait_for_open_port(11211)
+    machine.succeed("${testScript}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/merecat.nix b/nixpkgs/nixos/tests/merecat.nix
new file mode 100644
index 000000000000..9d8f66165ee9
--- /dev/null
+++ b/nixpkgs/nixos/tests/merecat.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "merecat";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.merecat = {
+      enable = true;
+      settings = {
+        hostname = "localhost";
+        virtual-host = true;
+        directory = toString (pkgs.runCommand "merecat-webdir" {} ''
+          mkdir -p $out/foo.localhost $out/bar.localhost
+          echo '<h1>Hello foo</h1>' > $out/foo.localhost/index.html
+          echo '<h1>Hello bar</h1>' > $out/bar.localhost/index.html
+        '');
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("merecat")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl --fail foo.localhost | grep 'Hello foo'")
+    machine.succeed("curl --fail bar.localhost | grep 'Hello bar'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/metabase.nix b/nixpkgs/nixos/tests/metabase.nix
new file mode 100644
index 000000000000..1b25071902e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/metabase.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "metabase";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.metabase.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("metabase.service")
+    machine.wait_for_open_port(3000)
+    machine.wait_until_succeeds("curl -fL http://localhost:3000/setup | grep Metabase")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mimir.nix b/nixpkgs/nixos/tests/mimir.nix
new file mode 100644
index 000000000000..f1b30d261472
--- /dev/null
+++ b/nixpkgs/nixos/tests/mimir.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "mimir";
+  nodes = {
+    server = { ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.mimir.enable = true;
+      services.mimir.configuration = {
+        ingester.ring.replication_factor = 1;
+      };
+
+      services.telegraf.enable = true;
+      services.telegraf.extraConfig = {
+        agent.interval = "1s";
+        agent.flush_interval = "1s";
+        inputs.exec = {
+          commands = [
+            "${pkgs.coreutils}/bin/echo 'foo i=42i'"
+          ];
+          data_format = "influx";
+        };
+        outputs = {
+          http = {
+            # test remote write
+            url = "http://localhost:8080/api/v1/push";
+
+            # Data format to output.
+            data_format = "prometheusremotewrite";
+
+            headers = {
+              Content-Type = "application/x-protobuf";
+              Content-Encoding = "snappy";
+              X-Scope-OrgID = "nixos";
+              X-Prometheus-Remote-Write-Version = "0.1.0";
+            };
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("mimir.service")
+    server.wait_for_unit("telegraf.service")
+    server.wait_for_open_port(8080)
+    server.wait_until_succeeds(
+        "curl -H 'X-Scope-OrgID: nixos' http://127.0.0.1:8080/prometheus/api/v1/label/host/values | jq -r '.data[0]' | grep server"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mindustry.nix b/nixpkgs/nixos/tests/mindustry.nix
new file mode 100644
index 000000000000..b3f5423c601b
--- /dev/null
+++ b/nixpkgs/nixos/tests/mindustry.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "mindustry";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [ pkgs.mindustry ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("mindustry >&2 &")
+      machine.wait_for_window("Mindustry")
+      # Loading can take a while. Avoid wasting cycles on OCR during that time
+      machine.sleep(60)
+      machine.wait_for_text(r"(Play|Database|Editor|Mods|Settings|Quit)")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/minecraft-server.nix b/nixpkgs/nixos/tests/minecraft-server.nix
new file mode 100644
index 000000000000..6e733bb96c1c
--- /dev/null
+++ b/nixpkgs/nixos/tests/minecraft-server.nix
@@ -0,0 +1,40 @@
+let
+  seed = "2151901553968352745";
+  rcon-pass = "foobar";
+  rcon-port = 43000;
+in import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "minecraft-server";
+  meta = with pkgs.lib.maintainers; { maintainers = [ nequissimus ]; };
+
+  nodes.server = { ... }: {
+    environment.systemPackages = [ pkgs.mcrcon ];
+
+    nixpkgs.config.allowUnfree = true;
+
+    services.minecraft-server = {
+      declarative = true;
+      enable = true;
+      eula = true;
+      serverProperties = {
+        enable-rcon = true;
+        level-seed = seed;
+        level-type = "flat";
+        generate-structures = false;
+        online-mode = false;
+        "rcon.password" = rcon-pass;
+        "rcon.port" = rcon-port;
+      };
+    };
+
+    virtualisation.memorySize = 2047;
+  };
+
+  testScript = ''
+    server.wait_for_unit("minecraft-server")
+    server.wait_for_open_port(${toString rcon-port})
+    assert "${seed}" in server.succeed(
+        "mcrcon -H localhost -P ${toString rcon-port} -p '${rcon-pass}' -c 'seed'"
+    )
+    server.succeed("systemctl stop minecraft-server")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/minecraft.nix b/nixpkgs/nixos/tests/minecraft.nix
new file mode 100644
index 000000000000..1c34f04b4df2
--- /dev/null
+++ b/nixpkgs/nixos/tests/minecraft.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "minecraft";
+  meta = with lib.maintainers; { maintainers = [ nequissimus ]; };
+
+  nodes.client = { nodes, ... }:
+      let user = nodes.client.config.users.users.alice;
+      in {
+        imports = [ ./common/user-account.nix ./common/x11.nix ];
+
+        environment.systemPackages = [ pkgs.minecraft ];
+
+        nixpkgs.config.allowUnfree = true;
+
+        test-support.displayManager.auto.user = user.name;
+      };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+    let user = nodes.client.config.users.users.alice;
+    in ''
+      client.wait_for_x()
+      client.execute("su - alice -c minecraft-launcher >&2 &")
+      client.wait_for_text("Create a new Microsoft account")
+      client.sleep(10)
+      client.screenshot("launcher")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/minidlna.nix b/nixpkgs/nixos/tests/minidlna.nix
new file mode 100644
index 000000000000..32721819634e
--- /dev/null
+++ b/nixpkgs/nixos/tests/minidlna.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "minidlna";
+
+  nodes = {
+    server =
+      { ... }:
+      {
+        imports = [ ../modules/profiles/minimal.nix ];
+        services.minidlna.enable = true;
+        services.minidlna.openFirewall = true;
+        services.minidlna.settings = {
+          log_level = "error";
+          media_dir = [
+            "PV,/tmp/stuff"
+          ];
+          friendly_name = "rpi3";
+          root_container = "B";
+          notify_interval = 60;
+          album_art_names = [
+            "Cover.jpg/cover.jpg/AlbumArtSmall.jpg/albumartsmall.jpg"
+            "AlbumArt.jpg/albumart.jpg/Album.jpg/album.jpg"
+            "Folder.jpg/folder.jpg/Thumb.jpg/thumb.jpg"
+          ];
+        };
+      };
+    client = { ... }: { };
+  };
+
+  testScript =
+  ''
+    start_all()
+    server.succeed("mkdir -p /tmp/stuff && chown minidlna: /tmp/stuff")
+    server.wait_for_unit("minidlna")
+    server.wait_for_open_port(8200)
+    # requests must be made *by IP* to avoid triggering minidlna's
+    # DNS-rebinding protection
+    server.succeed("curl --fail http://$(getent ahostsv4 localhost | head -n1 | cut -f 1 -d ' '):8200/")
+    client.succeed("curl --fail http://$(getent ahostsv4 server | head -n1 | cut -f 1 -d ' '):8200/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/miniflux.nix b/nixpkgs/nixos/tests/miniflux.nix
new file mode 100644
index 000000000000..a3af53db0e7a
--- /dev/null
+++ b/nixpkgs/nixos/tests/miniflux.nix
@@ -0,0 +1,87 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  port = 3142;
+  username = "alice";
+  password = "correcthorsebatterystaple";
+  defaultPort = 8080;
+  defaultUsername = "admin";
+  defaultPassword = "password";
+  adminCredentialsFile = pkgs.writeText "admin-credentials" ''
+            ADMIN_USERNAME=${defaultUsername}
+            ADMIN_PASSWORD=${defaultPassword}
+          '';
+  customAdminCredentialsFile = pkgs.writeText "admin-credentials" ''
+            ADMIN_USERNAME=${username}
+            ADMIN_PASSWORD=${password}
+          '';
+
+in
+{
+  name = "miniflux";
+  meta.maintainers = [ ];
+
+  nodes = {
+    default =
+      { ... }:
+      {
+        security.apparmor.enable = true;
+        services.miniflux = {
+          enable = true;
+          inherit adminCredentialsFile;
+        };
+      };
+
+    withoutSudo =
+      { ... }:
+      {
+        security.apparmor.enable = true;
+        services.miniflux = {
+          enable = true;
+          inherit adminCredentialsFile;
+        };
+        security.sudo.enable = false;
+      };
+
+    customized =
+      { ... }:
+      {
+        security.apparmor.enable = true;
+        services.miniflux = {
+          enable = true;
+          config = {
+            CLEANUP_FREQUENCY = "48";
+            LISTEN_ADDR = "localhost:${toString port}";
+          };
+          adminCredentialsFile = customAdminCredentialsFile;
+        };
+      };
+  };
+  testScript = ''
+    start_all()
+
+    default.wait_for_unit("miniflux.service")
+    default.wait_for_open_port(${toString defaultPort})
+    default.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
+    default.succeed(
+        "curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
+    )
+    default.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+
+    withoutSudo.wait_for_unit("miniflux.service")
+    withoutSudo.wait_for_open_port(${toString defaultPort})
+    withoutSudo.succeed("curl --fail 'http://localhost:${toString defaultPort}/healthcheck' | grep OK")
+    withoutSudo.succeed(
+        "curl 'http://localhost:${toString defaultPort}/v1/me' -u '${defaultUsername}:${defaultPassword}' -H Content-Type:application/json | grep '\"is_admin\":true'"
+    )
+    withoutSudo.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+
+    customized.wait_for_unit("miniflux.service")
+    customized.wait_for_open_port(${toString port})
+    customized.succeed("curl --fail 'http://localhost:${toString port}/healthcheck' | grep OK")
+    customized.succeed(
+        "curl 'http://localhost:${toString port}/v1/me' -u '${username}:${password}' -H Content-Type:application/json | grep '\"is_admin\":true'"
+    )
+    customized.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/minio.nix b/nixpkgs/nixos/tests/minio.nix
new file mode 100644
index 000000000000..ece4864f771c
--- /dev/null
+++ b/nixpkgs/nixos/tests/minio.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    accessKey = "BKIKJAA5BMMU2RHO6IBB";
+    secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+    minioPythonScript = pkgs.writeScript "minio-test.py" ''
+      #! ${pkgs.python3.withPackages(ps: [ ps.minio ])}/bin/python
+      import io
+      import os
+      from minio import Minio
+      minioClient = Minio('localhost:9000',
+                    access_key='${accessKey}',
+                    secret_key='${secretKey}',
+                    secure=False)
+      sio = io.BytesIO()
+      sio.write(b'Test from Python')
+      sio.seek(0, os.SEEK_END)
+      sio_len = sio.tell()
+      sio.seek(0)
+      minioClient.put_object('test-bucket', 'test.txt', sio, sio_len, content_type='text/plain')
+    '';
+    rootCredentialsFile = "/etc/nixos/minio-root-credentials";
+    credsPartial = pkgs.writeText "minio-credentials-partial" ''
+      MINIO_ROOT_USER=${accessKey}
+    '';
+    credsFull = pkgs.writeText "minio-credentials-full" ''
+      MINIO_ROOT_USER=${accessKey}
+      MINIO_ROOT_PASSWORD=${secretKey}
+    '';
+  in
+  {
+    name = "minio";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ bachp ];
+    };
+
+    nodes = {
+      machine = { pkgs, ... }: {
+        services.minio = {
+          enable = true;
+          inherit rootCredentialsFile;
+        };
+        environment.systemPackages = [ pkgs.minio-client ];
+
+        # Minio requires at least 1GiB of free disk space to run.
+        virtualisation.diskSize = 4 * 1024;
+      };
+    };
+
+    testScript = ''
+      import time
+
+      start_all()
+      # simulate manually editing root credentials file
+      machine.wait_for_unit("multi-user.target")
+      machine.copy_from_host("${credsPartial}", "${rootCredentialsFile}")
+      time.sleep(3)
+      machine.copy_from_host("${credsFull}", "${rootCredentialsFile}")
+
+      machine.wait_for_unit("minio.service")
+      machine.wait_for_open_port(9000)
+
+      # Create a test bucket on the server
+      machine.succeed(
+          "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
+      )
+      machine.succeed("mc mb minio/test-bucket")
+      machine.succeed("${minioPythonScript}")
+      assert "test-bucket" in machine.succeed("mc ls minio")
+      assert "Test from Python" in machine.succeed("mc cat minio/test-bucket/test.txt")
+      machine.shutdown()
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/miriway.nix b/nixpkgs/nixos/tests/miriway.nix
new file mode 100644
index 000000000000..f12c4d5ecc41
--- /dev/null
+++ b/nixpkgs/nixos/tests/miriway.nix
@@ -0,0 +1,125 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "miriway";
+
+  meta = {
+    maintainers = with lib.maintainers; [ OPNA2608 ];
+  };
+
+  nodes.machine = { config, ... }: {
+    imports = [
+      ./common/auto.nix
+      ./common/user-account.nix
+    ];
+
+    # Seems to very rarely get interrupted by oom-killer
+    virtualisation.memorySize = 2047;
+
+    test-support.displayManager.auto = {
+      enable = true;
+      user = "alice";
+    };
+
+    services.xserver = {
+      enable = true;
+      displayManager.defaultSession = lib.mkForce "miriway";
+    };
+
+    programs.miriway = {
+      enable = true;
+      config = ''
+        add-wayland-extensions=all
+        enable-x11=
+
+        ctrl-alt=t:foot --maximized
+        ctrl-alt=a:env WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty --option window.startup_mode=maximized
+
+        shell-component=dbus-update-activation-environment --systemd DISPLAY WAYLAND_DISPLAY
+
+        shell-component=foot --maximized
+      '';
+    };
+
+    environment = {
+      shellAliases = {
+        test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok";
+        test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok";
+      };
+
+      systemPackages = with pkgs; [
+        mesa-demos
+        wayland-utils
+        foot
+        alacritty
+      ];
+
+      # To help with OCR
+      etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
+        main = {
+          font = "inconsolata:size=16";
+        };
+        colors = rec {
+          foreground = "000000";
+          background = "ffffff";
+          regular2 = foreground;
+        };
+      };
+      etc."xdg/alacritty/alacritty.yml".text = lib.generators.toYAML { } {
+        font = rec {
+          normal.family = "Inconsolata";
+          bold.family = normal.family;
+          italic.family = normal.family;
+          bold_italic.family = normal.family;
+          size = 16;
+        };
+        colors = rec {
+          primary = {
+            foreground = "0x000000";
+            background = "0xffffff";
+          };
+          normal = {
+            green = primary.foreground;
+          };
+        };
+      };
+    };
+
+    fonts.packages = [ pkgs.inconsolata ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+
+    # Wait for Miriway to complete startup
+    machine.wait_for_file("/run/user/1000/wayland-0")
+    machine.succeed("pgrep miriway-shell")
+    machine.screenshot("miriway_launched")
+
+    # Test Wayland
+    # We let Miriway start the first terminal, as we might get stuck if it's not ready to process the first keybind
+    # machine.send_key("ctrl-alt-t")
+    machine.wait_for_text("alice@machine")
+    machine.send_chars("test-wayland\n")
+    machine.wait_for_file("/tmp/test-wayland-exit-ok")
+    machine.copy_from_vm("/tmp/test-wayland.out")
+    machine.screenshot("foot_wayland_info")
+    # Only succeeds when a mouse is moved inside an interactive session?
+    # machine.send_chars("exit\n")
+    # machine.wait_until_fails("pgrep foot")
+    machine.succeed("pkill foot")
+
+    # Test XWayland
+    machine.send_key("ctrl-alt-a")
+    machine.wait_for_text("alice@machine")
+    machine.send_chars("test-x11\n")
+    machine.wait_for_file("/tmp/test-x11-exit-ok")
+    machine.copy_from_vm("/tmp/test-x11.out")
+    machine.screenshot("alacritty_glinfo")
+    # Only succeeds when a mouse is moved inside an interactive session?
+    # machine.send_chars("exit\n")
+    # machine.wait_until_fails("pgrep alacritty")
+    machine.succeed("pkill alacritty")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/misc.nix b/nixpkgs/nixos/tests/misc.nix
new file mode 100644
index 000000000000..e7842debba7a
--- /dev/null
+++ b/nixpkgs/nixos/tests/misc.nix
@@ -0,0 +1,164 @@
+# Miscellaneous small tests that don't warrant their own VM run.
+
+import ./make-test-python.nix ({ lib, pkgs, ...} : let
+  foo = pkgs.writeText "foo" "Hello World";
+in {
+  name = "misc";
+  meta.maintainers = with lib.maintainers; [ eelco ];
+
+  nodes.machine =
+    { lib, ... }:
+    { swapDevices = lib.mkOverride 0
+        [ { device = "/root/swapfile"; size = 128; } ];
+      environment.variables.EDITOR = lib.mkOverride 0 "emacs";
+      documentation.nixos.enable = lib.mkOverride 0 true;
+      systemd.tmpfiles.rules = [ "d /tmp 1777 root root 10d" ];
+      systemd.tmpfiles.settings."10-test"."/tmp/somefile".d = {};
+      virtualisation.fileSystems = { "/tmp2" =
+        { fsType = "tmpfs";
+          options = [ "mode=1777" "noauto" ];
+        };
+        # Tests https://discourse.nixos.org/t/how-to-make-a-derivations-executables-have-the-s-permission/8555
+        "/user-mount/point" = {
+          device = "/user-mount/source";
+          fsType = "none";
+          options = [ "bind" "rw" "user" "noauto" ];
+        };
+        "/user-mount/denied-point" = {
+          device = "/user-mount/denied-source";
+          fsType = "none";
+          options = [ "bind" "rw" "noauto" ];
+        };
+      };
+      systemd.automounts = lib.singleton
+        { wantedBy = [ "multi-user.target" ];
+          where = "/tmp2";
+        };
+      users.users.sybil = { isNormalUser = true; group = "wheel"; };
+      users.users.alice = { isNormalUser = true; };
+      security.sudo = { enable = true; wheelNeedsPassword = false; };
+      boot.kernel.sysctl."vm.swappiness" = 1;
+      boot.kernelParams = [ "vsyscall=emulate" ];
+      system.extraDependencies = [ foo ];
+    };
+
+  testScript =
+    ''
+      import json
+
+
+      def get_path_info(path):
+          result = machine.succeed(f"nix --option experimental-features nix-command path-info --json {path}")
+          parsed = json.loads(result)
+          return parsed
+
+
+      with subtest("nix-db"):
+          info = get_path_info("${foo}")
+          print(info)
+
+          if (
+              info[0]["narHash"]
+              != "sha256-BdMdnb/0eWy3EddjE83rdgzWWpQjfWPAj3zDIFMD3Ck="
+          ):
+              raise Exception("narHash not set")
+
+          if info[0]["narSize"] != 128:
+              raise Exception("narSize not set")
+
+      with subtest("nixos-version"):
+          machine.succeed("[ `nixos-version | wc -w` = 2 ]")
+
+      with subtest("nixos-rebuild"):
+          assert "NixOS module" in machine.succeed("nixos-rebuild --help")
+
+      with subtest("Sanity check for uid/gid assignment"):
+          assert "4" == machine.succeed("id -u messagebus").strip()
+          assert "4" == machine.succeed("id -g messagebus").strip()
+          assert "users:x:100:" == machine.succeed("getent group users").strip()
+
+      with subtest("Regression test for GMP aborts on QEMU."):
+          machine.succeed("expr 1 + 2")
+
+      with subtest("the swap file got created"):
+          machine.wait_for_unit("root-swapfile.swap")
+          machine.succeed("ls -l /root/swapfile | grep 134217728")
+
+      with subtest("whether kernel.poweroff_cmd is set"):
+          machine.succeed('[ -x "$(cat /proc/sys/kernel/poweroff_cmd)" ]')
+
+      with subtest("whether the io cgroupv2 controller is properly enabled"):
+          machine.succeed("grep -q '\\bio\\b' /sys/fs/cgroup/cgroup.controllers")
+
+      with subtest("whether we have a reboot record in wtmp"):
+          machine.shutdown
+          machine.wait_for_unit("multi-user.target")
+          machine.succeed("last | grep reboot >&2")
+
+      with subtest("whether we can override environment variables"):
+          machine.succeed('[ "$EDITOR" = emacs ]')
+
+      with subtest("whether hostname (and by extension nss_myhostname) works"):
+          assert "machine" == machine.succeed("hostname").strip()
+          assert "machine" == machine.succeed("hostname -s").strip()
+
+      with subtest("whether systemd-udevd automatically loads modules for our hardware"):
+          machine.succeed("systemctl start systemd-udev-settle.service")
+          machine.wait_for_unit("systemd-udev-settle.service")
+          assert "mousedev" in machine.succeed("lsmod")
+
+      with subtest("whether systemd-tmpfiles-clean works"):
+          machine.succeed(
+              "touch /tmp/foo", "systemctl start systemd-tmpfiles-clean", "[ -e /tmp/foo ]"
+          )
+          # move into the future
+          machine.succeed(
+              'date -s "@$(($(date +%s) + 1000000))"',
+              "systemctl start systemd-tmpfiles-clean",
+          )
+          machine.fail("[ -e /tmp/foo ]")
+
+      with subtest("whether systemd-tmpfiles settings works"):
+          machine.succeed("[ -e /tmp/somefile ]")
+
+      with subtest("whether automounting works"):
+          machine.fail("grep '/tmp2 tmpfs' /proc/mounts")
+          machine.succeed("touch /tmp2/x")
+          machine.succeed("grep '/tmp2 tmpfs' /proc/mounts")
+
+      with subtest(
+          "Whether mounting by a user is possible with the `user` option in fstab (#95444)"
+      ):
+          machine.succeed("mkdir -p /user-mount/source")
+          machine.succeed("touch /user-mount/source/file")
+          machine.succeed("chmod -R a+Xr /user-mount/source")
+          machine.succeed("mkdir /user-mount/point")
+          machine.succeed("chown alice:users /user-mount/point")
+          machine.succeed("su - alice -c 'mount /user-mount/point'")
+          machine.succeed("su - alice -c 'ls /user-mount/point/file'")
+      with subtest(
+          "Whether mounting by a user is denied without the `user` option in  fstab"
+      ):
+          machine.succeed("mkdir -p /user-mount/denied-source")
+          machine.succeed("touch /user-mount/denied-source/file")
+          machine.succeed("chmod -R a+Xr /user-mount/denied-source")
+          machine.succeed("mkdir /user-mount/denied-point")
+          machine.succeed("chown alice:users /user-mount/denied-point")
+          machine.fail("su - alice -c 'mount /user-mount/denied-point'")
+
+      with subtest("shell-vars"):
+          machine.succeed('[ -n "$NIX_PATH" ]')
+
+      with subtest("nix-db"):
+          machine.succeed("nix-store -qR /run/current-system | grep nixos-")
+
+      with subtest("Test sysctl"):
+          machine.wait_for_unit("systemd-sysctl.service")
+          assert "1" == machine.succeed("sysctl -ne vm.swappiness").strip()
+          machine.execute("sysctl vm.swappiness=60")
+          assert "60" == machine.succeed("sysctl -ne vm.swappiness").strip()
+
+      with subtest("Test boot parameters"):
+          assert "vsyscall=emulate" in machine.succeed("cat /proc/cmdline")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/mobilizon.nix b/nixpkgs/nixos/tests/mobilizon.nix
new file mode 100644
index 000000000000..398c8530dc56
--- /dev/null
+++ b/nixpkgs/nixos/tests/mobilizon.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ lib, ... }:
+  let
+    certs = import ./common/acme/server/snakeoil-certs.nix;
+    mobilizonDomain = certs.domain;
+    port = 41395;
+  in
+
+  {
+    name = "mobilizon";
+    meta.maintainers = with lib.maintainers; [ minijackson erictapen ];
+
+    nodes.server =
+      { pkgs, ... }:
+      {
+        services.mobilizon = {
+          enable = true;
+          settings = {
+            ":mobilizon" = {
+              ":instance" = {
+                name = "Test Mobilizon";
+                hostname = mobilizonDomain;
+              };
+              "Mobilizon.Web.Endpoint".http.port = port;
+            };
+          };
+        };
+
+        services.postgresql.package = pkgs.postgresql_14;
+
+        security.pki.certificateFiles = [ certs.ca.cert ];
+
+        services.nginx.virtualHosts."${mobilizonDomain}" = {
+          enableACME = lib.mkForce false;
+          sslCertificate = certs.${mobilizonDomain}.cert;
+          sslCertificateKey = certs.${mobilizonDomain}.key;
+        };
+
+        networking.hosts."::1" = [ mobilizonDomain ];
+      };
+
+    testScript = ''
+      server.wait_for_unit("mobilizon.service")
+      server.wait_for_open_port(${toString port})
+      server.succeed("curl --fail https://${mobilizonDomain}/")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/mod_perl.nix b/nixpkgs/nixos/tests/mod_perl.nix
new file mode 100644
index 000000000000..f29d79ea6206
--- /dev/null
+++ b/nixpkgs/nixos/tests/mod_perl.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "mod_perl";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ sgo ];
+  };
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "admin@localhost";
+      virtualHosts."modperl" =
+        let
+          inc = pkgs.writeTextDir "ModPerlTest.pm" ''
+            package ModPerlTest;
+            use strict;
+            use Apache2::RequestRec ();
+            use Apache2::RequestIO ();
+            use Apache2::Const -compile => qw(OK);
+            sub handler {
+              my $r = shift;
+              $r->content_type('text/plain');
+              print "Hello mod_perl!\n";
+              return Apache2::Const::OK;
+            }
+            1;
+          '';
+          startup = pkgs.writeScript "startup.pl" ''
+            use lib "${inc}",
+              split ":","${with pkgs.perl.pkgs; makeFullPerlPath ([ mod_perl2 ])}";
+            1;
+          '';
+        in
+        {
+          extraConfig = ''
+            PerlRequire ${startup}
+          '';
+          locations."/modperl" = {
+            extraConfig = ''
+              SetHandler perl-script
+              PerlResponseHandler ModPerlTest
+            '';
+          };
+        };
+      enablePerl = true;
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("httpd.service")
+    response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/modperl")
+    assert "Hello mod_perl!" in response, "/modperl handler did not respond"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/molly-brown.nix b/nixpkgs/nixos/tests/molly-brown.nix
new file mode 100644
index 000000000000..bfc036e81ba0
--- /dev/null
+++ b/nixpkgs/nixos/tests/molly-brown.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+  let testString = "NixOS Gemini test successful";
+  in {
+
+    name = "molly-brown";
+    meta = with pkgs.lib.maintainers; { maintainers = [ ehmry ]; };
+
+    nodes = {
+
+      geminiServer = { config, pkgs, ... }:
+        let
+          inherit (config.networking) hostName;
+          cfg = config.services.molly-brown;
+        in {
+
+          environment.systemPackages = [
+            (pkgs.writeScriptBin "test-gemini" ''
+              #!${pkgs.python3}/bin/python
+
+              import socket
+              import ssl
+              import tempfile
+              import textwrap
+              import urllib.parse
+
+              url = "gemini://geminiServer/init.gmi"
+              parsed_url = urllib.parse.urlparse(url)
+
+              s = socket.create_connection((parsed_url.netloc, 1965))
+              context = ssl.SSLContext()
+              context.check_hostname = False
+              context.verify_mode = ssl.CERT_NONE
+              s = context.wrap_socket(s, server_hostname=parsed_url.netloc)
+              s.sendall((url + "\r\n").encode("UTF-8"))
+              fp = s.makefile("rb")
+              print(fp.readline().strip())
+              print(fp.readline().strip())
+              print(fp.readline().strip())
+            '')
+          ];
+
+          networking.firewall.allowedTCPPorts = [ cfg.settings.Port ];
+
+          services.molly-brown = {
+            enable = true;
+            docBase = "/tmp/docs";
+            certPath = "/tmp/cert.pem";
+            keyPath = "/tmp/key.pem";
+          };
+
+          systemd.services.molly-brown.preStart = ''
+            ${pkgs.openssl}/bin/openssl genrsa -out "/tmp/key.pem"
+            ${pkgs.openssl}/bin/openssl req -new \
+              -subj "/CN=${config.networking.hostName}" \
+              -key "/tmp/key.pem" -out /tmp/request.pem
+            ${pkgs.openssl}/bin/openssl x509 -req -days 3650 \
+              -in /tmp/request.pem -signkey "/tmp/key.pem" -out "/tmp/cert.pem"
+
+            mkdir -p "${cfg.settings.DocBase}"
+            echo "${testString}" > "${cfg.settings.DocBase}/test.gmi"
+          '';
+        };
+    };
+    testScript = ''
+      geminiServer.wait_for_unit("molly-brown")
+      geminiServer.wait_for_open_port(1965)
+      geminiServer.succeed("test-gemini")
+    '';
+
+  })
diff --git a/nixpkgs/nixos/tests/mongodb.nix b/nixpkgs/nixos/tests/mongodb.nix
new file mode 100644
index 000000000000..1afc891817af
--- /dev/null
+++ b/nixpkgs/nixos/tests/mongodb.nix
@@ -0,0 +1,50 @@
+# This test start mongodb, runs a query using mongo shell
+
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    testQuery = pkgs.writeScript "nixtest.js" ''
+      db.greetings.insert({ "greeting": "hello" });
+      print(db.greetings.findOne().greeting);
+    '';
+
+    runMongoDBTest = pkg: ''
+      node.execute("(rm -rf data || true) && mkdir data")
+      node.execute(
+          "${pkg}/bin/mongod --fork --logpath logs --dbpath data"
+      )
+      node.wait_for_open_port(27017)
+
+      assert "hello" in node.succeed(
+          "${pkg}/bin/mongo ${testQuery}"
+      )
+
+      node.execute(
+          "${pkg}/bin/mongod --shutdown --dbpath data"
+      )
+      node.wait_for_closed_port(27017)
+    '';
+
+  in {
+    name = "mongodb";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ bluescreen303 offline rvl phile314 ];
+    };
+
+    nodes = {
+      node = {...}: {
+        environment.systemPackages = with pkgs; [
+          mongodb-4_4
+          mongodb-5_0
+        ];
+      };
+    };
+
+    testScript = ''
+      node.start()
+    ''
+      + runMongoDBTest pkgs.mongodb-4_4
+      + runMongoDBTest pkgs.mongodb-5_0
+      + ''
+        node.shutdown()
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/moodle.nix b/nixpkgs/nixos/tests/moodle.nix
new file mode 100644
index 000000000000..8fd011e0cb21
--- /dev/null
+++ b/nixpkgs/nixos/tests/moodle.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "moodle";
+  meta.maintainers = [ lib.maintainers.aanderse ];
+
+  nodes.machine =
+    { ... }:
+    { services.moodle.enable = true;
+      services.moodle.virtualHost.hostName = "localhost";
+      services.moodle.virtualHost.adminAddr = "root@example.com";
+      services.moodle.initialPassword = "correcthorsebatterystaple";
+
+      # Ensure the virtual machine has enough memory to avoid errors like:
+      # Fatal error: Out of memory (allocated 152047616) (tried to allocate 33554440 bytes)
+      virtualisation.memorySize = 2000;
+    };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("phpfpm-moodle.service", timeout=1800)
+    machine.wait_until_succeeds("curl http://localhost/ | grep 'You are not logged in'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/moonraker.nix b/nixpkgs/nixos/tests/moonraker.nix
new file mode 100644
index 000000000000..b0a93a4a608b
--- /dev/null
+++ b/nixpkgs/nixos/tests/moonraker.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "moonraker";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ zhaofengli ];
+  };
+
+  nodes = {
+    printer = { config, pkgs, ... }: {
+      security.polkit.enable = true;
+
+      services.moonraker = {
+        enable = true;
+        allowSystemControl = true;
+
+        settings = {
+          authorization = {
+            trusted_clients = [ "127.0.0.0/8" "::1/128" ];
+          };
+        };
+      };
+
+      services.klipper = {
+        enable = true;
+
+        user = "moonraker";
+        group = "moonraker";
+
+        # No mcu configured so won't even enter `ready` state
+        settings = {};
+      };
+    };
+  };
+
+  testScript = ''
+    printer.start()
+
+    printer.wait_for_unit("klipper.service")
+    printer.wait_for_unit("moonraker.service")
+    printer.wait_until_succeeds("curl http://localhost:7125/printer/info | grep -v 'Not Found' >&2", timeout=30)
+
+    with subtest("Check that we can perform system-level operations"):
+        printer.succeed("curl -X POST http://localhost:7125/machine/services/stop?service=klipper | grep ok >&2")
+        printer.wait_until_succeeds("systemctl --no-pager show klipper.service | grep ActiveState=inactive", timeout=10)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/moosefs.nix b/nixpkgs/nixos/tests/moosefs.nix
new file mode 100644
index 000000000000..0dc08748b828
--- /dev/null
+++ b/nixpkgs/nixos/tests/moosefs.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+
+let
+  master = { pkgs, ... } : {
+    # data base is stored in memory
+    # server crashes with default memory size
+    virtualisation.memorySize = 1024;
+
+    services.moosefs.master = {
+      enable = true;
+      openFirewall = true;
+      exports = [
+        "* / rw,alldirs,admin,maproot=0:0"
+        "* . rw"
+      ];
+    };
+  };
+
+  chunkserver = { pkgs, ... } : {
+    virtualisation.emptyDiskImages = [ 4096 ];
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    fileSystems = pkgs.lib.mkVMOverride {
+      "/data" = {
+        device = "/dev/disk/by-label/data";
+        fsType = "ext4";
+      };
+    };
+
+    services.moosefs = {
+      masterHost = "master";
+      chunkserver = {
+        openFirewall = true;
+        enable = true;
+        hdds = [ "~/data" ];
+      };
+    };
+  };
+
+  metalogger = { pkgs, ... } : {
+    services.moosefs = {
+      masterHost = "master";
+      metalogger.enable = true;
+    };
+  };
+
+  client = { pkgs, ... } : {
+    services.moosefs.client.enable = true;
+  };
+
+in {
+  name = "moosefs";
+
+  nodes= {
+    inherit master;
+    inherit metalogger;
+    chunkserver1 = chunkserver;
+    chunkserver2 = chunkserver;
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    # prepare master server
+    master.start()
+    master.wait_for_unit("multi-user.target")
+    master.succeed("mfsmaster-init")
+    master.succeed("systemctl restart mfs-master")
+    master.wait_for_unit("mfs-master.service")
+
+    metalogger.wait_for_unit("mfs-metalogger.service")
+
+    for chunkserver in [chunkserver1, chunkserver2]:
+        chunkserver.wait_for_unit("multi-user.target")
+        chunkserver.succeed("chown moosefs:moosefs /data")
+        chunkserver.succeed("systemctl restart mfs-chunkserver")
+        chunkserver.wait_for_unit("mfs-chunkserver.service")
+
+    for client in [client1, client2]:
+        client.wait_for_unit("multi-user.target")
+        client.succeed("mkdir /moosefs")
+        client.succeed("mount -t moosefs master:/ /moosefs")
+
+    client1.succeed("echo test > /moosefs/file")
+    client2.succeed("grep test /moosefs/file")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/morty.nix b/nixpkgs/nixos/tests/morty.nix
new file mode 100644
index 000000000000..9909596820d3
--- /dev/null
+++ b/nixpkgs/nixos/tests/morty.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "morty";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ leenaars ];
+  };
+
+  nodes =
+    { mortyProxyWithKey =
+
+      { ... }:
+      { services.morty = {
+        enable = true;
+        key = "78a9cd0cfee20c672f78427efb2a2a96036027f0";
+        port = 3001;
+        };
+      };
+
+    };
+
+  testScript =
+    { ... }:
+    ''
+      mortyProxyWithKey.wait_for_unit("default.target")
+      mortyProxyWithKey.wait_for_open_port(3001)
+      mortyProxyWithKey.succeed("curl -fL 127.0.0.1:3001 | grep MortyProxy")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/mosquitto.nix b/nixpkgs/nixos/tests/mosquitto.nix
new file mode 100644
index 000000000000..c0980b23e78f
--- /dev/null
+++ b/nixpkgs/nixos/tests/mosquitto.nix
@@ -0,0 +1,213 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  port = 1888;
+  tlsPort = 1889;
+  anonPort = 1890;
+  password = "VERY_secret";
+  hashedPassword = "$7$101$/WJc4Mp+I+uYE9sR$o7z9rD1EYXHPwEP5GqQj6A7k4W1yVbePlb8TqNcuOLV9WNCiDgwHOB0JHC1WCtdkssqTBduBNUnUGd6kmZvDSw==";
+  topic = "test/foo";
+
+  snakeOil = pkgs.runCommand "snakeoil-certs" {
+    buildInputs = [ pkgs.gnutls.bin ];
+    caTemplate = pkgs.writeText "snakeoil-ca.template" ''
+      cn = server
+      expiration_days = -1
+      cert_signing_key
+      ca
+    '';
+    certTemplate = pkgs.writeText "snakeoil-cert.template" ''
+      cn = server
+      expiration_days = -1
+      tls_www_server
+      encryption_key
+      signing_key
+    '';
+    userCertTemplate = pkgs.writeText "snakeoil-user-cert.template" ''
+      organization = snakeoil
+      cn = client1
+      expiration_days = -1
+      tls_www_client
+      encryption_key
+      signing_key
+    '';
+  } ''
+    mkdir "$out"
+
+    certtool -p --bits 2048 --outfile "$out/ca.key"
+    certtool -s --template "$caTemplate" --load-privkey "$out/ca.key" \
+                --outfile "$out/ca.crt"
+    certtool -p --bits 2048 --outfile "$out/server.key"
+    certtool -c --template "$certTemplate" \
+                --load-ca-privkey "$out/ca.key" \
+                --load-ca-certificate "$out/ca.crt" \
+                --load-privkey "$out/server.key" \
+                --outfile "$out/server.crt"
+
+    certtool -p --bits 2048 --outfile "$out/client1.key"
+    certtool -c --template "$userCertTemplate" \
+                --load-privkey "$out/client1.key" \
+                --load-ca-privkey "$out/ca.key" \
+                --load-ca-certificate "$out/ca.crt" \
+                --outfile "$out/client1.crt"
+  '';
+
+in {
+  name = "mosquitto";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ pennae peterhoeg ];
+  };
+
+  nodes = let
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ mosquitto ];
+    };
+  in {
+    server = { pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ port tlsPort anonPort ];
+      networking.useNetworkd = true;
+      services.mosquitto = {
+        enable = true;
+        settings = {
+          sys_interval = 1;
+        };
+        listeners = [
+          {
+            inherit port;
+            users = {
+              password_store = {
+                inherit password;
+              };
+              password_file = {
+                passwordFile = pkgs.writeText "mqtt-password" password;
+              };
+              hashed_store = {
+                inherit hashedPassword;
+              };
+              hashed_file = {
+                hashedPasswordFile = pkgs.writeText "mqtt-hashed-password" hashedPassword;
+              };
+
+              reader = {
+                inherit password;
+                acl = [
+                  "read ${topic}"
+                  "read $SYS/#" # so we always have something to read
+                ];
+              };
+              writer = {
+                inherit password;
+                acl = [ "write ${topic}" ];
+              };
+            };
+          }
+          {
+            port = tlsPort;
+            users.client1 = {
+              acl = [ "read $SYS/#" ];
+            };
+            settings = {
+              cafile = "${snakeOil}/ca.crt";
+              certfile = "${snakeOil}/server.crt";
+              keyfile = "${snakeOil}/server.key";
+              require_certificate = true;
+              use_identity_as_username = true;
+            };
+          }
+          {
+            port = anonPort;
+            omitPasswordAuth = true;
+            settings.allow_anonymous = true;
+            acl = [ "pattern read #" ];
+            users = {
+              anonWriter = {
+                password = "<ignored>" + password;
+                acl = [ "write ${topic}" ];
+              };
+            };
+          }
+        ];
+      };
+    };
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    def mosquitto_cmd(binary, user, topic, port):
+        return (
+            "mosquitto_{} "
+            "-V mqttv311 "
+            "-h server "
+            "-p {} "
+            "-u {} "
+            "-P '${password}' "
+            "-t '{}'"
+        ).format(binary, port, user, topic)
+
+
+    def publish(args, user, topic="${topic}", port=${toString port}):
+        return "{} {}".format(mosquitto_cmd("pub", user, topic, port), args)
+
+    def subscribe(args, user, topic="${topic}", port=${toString port}):
+        return "{} -W 5 -C 1 {}".format(mosquitto_cmd("sub", user, topic, port), args)
+
+    def parallel(*fns):
+        from threading import Thread
+        threads = [ Thread(target=fn) for fn in fns ]
+        for t in threads: t.start()
+        for t in threads: t.join()
+
+    def wait_uuid(uuid):
+        server.wait_for_console_text(uuid)
+        return None
+
+
+    start_all()
+    server.wait_for_unit("mosquitto.service")
+
+    with subtest("check passwords"):
+        client1.succeed(publish("-m test", "password_store"))
+        client1.succeed(publish("-m test", "password_file"))
+        client1.succeed(publish("-m test", "hashed_store"))
+        client1.succeed(publish("-m test", "hashed_file"))
+
+    with subtest("check acl"):
+        client1.succeed(subscribe("", "reader", topic="$SYS/#"))
+        client1.fail(subscribe("", "writer", topic="$SYS/#"))
+
+        parallel(
+            lambda: client1.succeed(subscribe("-i 3688cdd7-aa07-42a4-be22-cb9352917e40", "reader")),
+            lambda: [
+                wait_uuid("3688cdd7-aa07-42a4-be22-cb9352917e40"),
+                client2.succeed(publish("-m test", "writer"))
+            ])
+
+        parallel(
+            lambda: client1.fail(subscribe("-i 24ff16a2-ae33-4a51-9098-1b417153c712", "reader")),
+            lambda: [
+                wait_uuid("24ff16a2-ae33-4a51-9098-1b417153c712"),
+                client2.succeed(publish("-m test", "reader"))
+            ])
+
+    with subtest("check tls"):
+        client1.succeed(
+            subscribe(
+                "--cafile ${snakeOil}/ca.crt "
+                "--cert ${snakeOil}/client1.crt "
+                "--key ${snakeOil}/client1.key",
+                topic="$SYS/#",
+                port=${toString tlsPort},
+                user="no_such_user"))
+
+    with subtest("check omitPasswordAuth"):
+        parallel(
+            lambda: client1.succeed(subscribe("-i fd56032c-d9cb-4813-a3b4-6be0e04c8fc3",
+                "anonReader", port=${toString anonPort})),
+            lambda: [
+                wait_uuid("fd56032c-d9cb-4813-a3b4-6be0e04c8fc3"),
+                client2.succeed(publish("-m test", "anonWriter", port=${toString anonPort}))
+            ])
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mpd.nix b/nixpkgs/nixos/tests/mpd.nix
new file mode 100644
index 000000000000..52d9c7fd33a1
--- /dev/null
+++ b/nixpkgs/nixos/tests/mpd.nix
@@ -0,0 +1,134 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    track = pkgs.fetchurl {
+      # Sourced from http://freemusicarchive.org/music/Blue_Wave_Theory/Surf_Music_Month_Challenge/Skyhawk_Beach_fade_in
+      # License: http://creativecommons.org/licenses/by-sa/4.0/
+
+      name = "Blue_Wave_Theory-Skyhawk_Beach.mp3";
+      url = "https://freemusicarchive.org/file/music/ccCommunity/Blue_Wave_Theory/Surf_Music_Month_Challenge/Blue_Wave_Theory_-_04_-_Skyhawk_Beach.mp3";
+      sha256 = "0xw417bxkx4gqqy139bb21yldi37xx8xjfxrwaqa0gyw19dl6mgp";
+    };
+
+    defaultCfg = rec {
+      user = "mpd";
+      group = "mpd";
+      dataDir = "/var/lib/mpd";
+      musicDirectory = "${dataDir}/music";
+    };
+
+    defaultMpdCfg = with defaultCfg; {
+      inherit dataDir musicDirectory user group;
+      enable = true;
+    };
+
+    musicService = { user, group, musicDirectory }: {
+      description = "Sets up the music file(s) for MPD to use.";
+      requires = [ "mpd.service" ];
+      after = [ "mpd.service" ];
+      wantedBy = [ "default.target" ];
+      script = ''
+        cp ${track} ${musicDirectory}
+      '';
+      serviceConfig = {
+        User = user;
+        Group = group;
+      };
+    };
+
+    mkServer = { mpd, musicService, }:
+      { boot.kernelModules = [ "snd-dummy" ];
+        sound.enable = true;
+        services.mpd = mpd;
+        systemd.services.musicService = musicService;
+      };
+  in {
+    name = "mpd";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ emmanuelrosa ];
+    };
+
+  nodes =
+    { client =
+      { ... }: { };
+
+      serverALSA =
+        { ... }: lib.mkMerge [
+          (mkServer {
+            mpd = defaultMpdCfg // {
+              network.listenAddress = "any";
+              extraConfig = ''
+                audio_output {
+                  type "alsa"
+                  name "ALSA"
+                  mixer_type "null"
+                }
+              '';
+            };
+            musicService = with defaultMpdCfg; musicService { inherit user group musicDirectory; };
+          })
+          { networking.firewall.allowedTCPPorts = [ 6600 ]; }
+        ];
+
+      serverPulseAudio =
+        { ... }: lib.mkMerge [
+          (mkServer {
+            mpd = defaultMpdCfg // {
+              extraConfig = ''
+                audio_output {
+                  type "pulse"
+                  name "The Pulse"
+                }
+              '';
+            };
+
+            musicService = with defaultCfg; musicService { inherit user group musicDirectory; };
+          })
+          {
+            hardware.pulseaudio = {
+              enable = true;
+              systemWide = true;
+              tcp.enable = true;
+              tcp.anonymousClients.allowAll = true;
+            };
+            systemd.services.mpd.environment.PULSE_SERVER = "localhost";
+          }
+        ];
+    };
+
+  testScript = ''
+    mpc = "${pkgs.mpc-cli}/bin/mpc --wait"
+
+    # Connects to the given server and attempts to play a tune.
+    def play_some_music(server):
+        server.wait_for_unit("mpd.service")
+        server.succeed(f"{mpc} update")
+        _, tracks = server.execute(f"{mpc} ls")
+
+        for track in tracks.splitlines():
+            server.succeed(f"{mpc} add {track}")
+
+        _, added_tracks = server.execute(f"{mpc} playlist")
+
+        # Check we succeeded adding audio tracks to the playlist
+        assert len(added_tracks.splitlines()) > 0
+
+        server.succeed(f"{mpc} play")
+
+        _, output = server.execute(f"{mpc} status")
+        # Assure audio track is playing
+        assert "playing" in output
+
+        server.succeed(f"{mpc} stop")
+
+
+    play_some_music(serverALSA)
+    play_some_music(serverPulseAudio)
+
+    client.wait_for_unit("multi-user.target")
+    client.succeed(f"{mpc} -h serverALSA status")
+
+    # The PulseAudio-based server is configured not to accept external client connections
+    # to perform the following test:
+    client.fail(f"{mpc} -h serverPulseAudio status")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mpich-example.c b/nixpkgs/nixos/tests/mpich-example.c
new file mode 100644
index 000000000000..c48e3c45b72e
--- /dev/null
+++ b/nixpkgs/nixos/tests/mpich-example.c
@@ -0,0 +1,21 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <mpi.h>
+
+int
+main (int argc, char *argv[])
+{
+  int rank, size, length;
+  char name[BUFSIZ];
+
+  MPI_Init (&argc, &argv);
+  MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+  MPI_Comm_size (MPI_COMM_WORLD, &size);
+  MPI_Get_processor_name (name, &length);
+
+  printf ("%s: hello world from process %d of %d\n", name, rank, size);
+
+  MPI_Finalize ();
+
+  return EXIT_SUCCESS;
+}
diff --git a/nixpkgs/nixos/tests/mpv.nix b/nixpkgs/nixos/tests/mpv.nix
new file mode 100644
index 000000000000..32a81cbe2495
--- /dev/null
+++ b/nixpkgs/nixos/tests/mpv.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+let
+  port = toString 4321;
+in
+{
+  name = "mpv";
+  meta.maintainers = with lib.maintainers; [ zopieux ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      environment.systemPackages = [
+        pkgs.curl
+        (pkgs.wrapMpv pkgs.mpv-unwrapped {
+          scripts = [ pkgs.mpvScripts.simple-mpv-webui ];
+        })
+      ];
+    };
+
+  testScript = ''
+    machine.execute("set -m; mpv --script-opts=webui-port=${port} --idle=yes >&2 &")
+    machine.wait_for_open_port(${port})
+    assert "<title>simple-mpv-webui" in machine.succeed("curl -s localhost:${port}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mtp.nix b/nixpkgs/nixos/tests/mtp.nix
new file mode 100644
index 000000000000..8f0835d75d3f
--- /dev/null
+++ b/nixpkgs/nixos/tests/mtp.nix
@@ -0,0 +1,109 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "mtp";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ matthewcroughan nixinator ];
+  };
+
+  nodes =
+  {
+    client = { config, pkgs, ... }: {
+      # DBUS runs only once a user session is created, which means a user has to
+      # login. Here, we log in as root. Once logged in, the gvfs-daemon service runs
+      # as UID 0 in User-0.service
+      services.getty.autologinUser = "root";
+
+      # XDG_RUNTIME_DIR is needed for running systemd-user services such as
+      # gvfs-daemon as root.
+      environment.variables.XDG_RUNTIME_DIR = "/run/user/0";
+
+      environment.systemPackages = with pkgs; [ usbutils glib jmtpfs tree ];
+      services.gvfs.enable = true;
+
+      # Creates a usb-mtp device inside the VM, which is mapped to the host's
+      # /tmp folder, it is able to write files to this location, but only has
+      # permissions to read its own creations.
+      virtualisation.qemu.options = [
+        "-usb"
+        "-device usb-mtp,rootdir=/tmp,readonly=false"
+      ];
+    };
+  };
+
+
+  testScript = { nodes, ... }:
+    let
+      # Creates a list of QEMU MTP devices matching USB ID (46f4:0004). This
+      # value can be sourced in a shell script. This is so we can loop over the
+      # devices we find, as this test may want to use more than one MTP device
+      # in future.
+      mtpDevices = pkgs.writeScript "mtpDevices.sh" ''
+        export mtpDevices=$(lsusb -d 46f4:0004 | awk {'print $2","$4'} | sed 's/[:-]/ /g')
+      '';
+      # Qemu is only capable of creating an MTP device with Picture Transfer
+      # Protocol. This means that gvfs must use gphoto2:// rather than mtp://
+      # when mounting.
+      # https://github.com/qemu/qemu/blob/970bc16f60937bcfd334f14c614bd4407c247961/hw/usb/dev-mtp.c#L278
+      gvfs = rec {
+        mountAllMtpDevices = pkgs.writeScript "mountAllMtpDevices.sh" ''
+          set -e
+          source ${mtpDevices}
+          for i in $mtpDevices
+          do
+            gio mount "gphoto2://[usb:$i]/"
+          done
+        '';
+        unmountAllMtpDevices = pkgs.writeScript "unmountAllMtpDevices.sh" ''
+          set -e
+          source ${mtpDevices}
+          for i in $mtpDevices
+          do
+            gio mount -u "gphoto2://[usb:$i]/"
+          done
+        '';
+        # gvfsTest:
+        # 1. Creates a 10M test file
+        # 2. Copies it to the device using GIO tools
+        # 3. Checks for corruption with `diff`
+        # 4. Removes the file, then unmounts the disks.
+        gvfsTest = pkgs.writeScript "gvfsTest.sh" ''
+          set -e
+          source ${mtpDevices}
+          ${mountAllMtpDevices}
+          dd if=/dev/urandom of=testFile10M bs=1M count=10
+          for i in $mtpDevices
+          do
+            gio copy ./testFile10M gphoto2://[usb:$i]/
+            ls -lah /run/user/0/gvfs/*/testFile10M
+            gio remove gphoto2://[usb:$i]/testFile10M
+          done
+          ${unmountAllMtpDevices}
+        '';
+      };
+      jmtpfs = {
+        # jmtpfsTest:
+        # 1. Mounts the device on a dir named `phone` using jmtpfs
+        # 2. Puts the current Nixpkgs libmtp version into a file
+        # 3. Checks for corruption with `diff`
+        # 4. Prints the directory tree
+        jmtpfsTest = pkgs.writeScript "jmtpfsTest.sh" ''
+          set -e
+          mkdir phone
+          jmtpfs phone
+          echo "${pkgs.libmtp.version}" > phone/tmp/testFile
+          echo "${pkgs.libmtp.version}" > testFile
+          diff phone/tmp/testFile testFile
+          tree phone
+        '';
+      };
+    in
+    # Using >&2 allows the results of the scripts to be printed to the terminal
+    # when building this test with Nix. Scripts would otherwise complete
+    # silently.
+    ''
+    start_all()
+    client.wait_for_unit("multi-user.target")
+    client.wait_for_unit("dbus.service")
+    client.succeed("${gvfs.gvfsTest} >&2")
+    client.succeed("${jmtpfs.jmtpfsTest} >&2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/multipass.nix b/nixpkgs/nixos/tests/multipass.nix
new file mode 100644
index 000000000000..0980e9195f5a
--- /dev/null
+++ b/nixpkgs/nixos/tests/multipass.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  multipass-image = import ../release.nix {
+    configuration = {
+      # Building documentation makes the test unnecessarily take a longer time:
+      documentation.enable = lib.mkForce false;
+    };
+  };
+
+in
+{
+  name = "multipass";
+
+  meta.maintainers = [ lib.maintainers.jnsgruk ];
+
+  nodes.machine = { lib, ... }: {
+    virtualisation = {
+      cores = 1;
+      memorySize = 1024;
+      diskSize = 4096;
+
+      multipass.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("sockets.target")
+    machine.wait_for_unit("multipass.service")
+    machine.wait_for_file("/var/lib/multipass/data/multipassd/network/multipass_subnet")
+
+    # Wait for Multipass to settle
+    machine.sleep(1)
+
+    machine.succeed("multipass list")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mumble.nix b/nixpkgs/nixos/tests/mumble.nix
new file mode 100644
index 000000000000..8eee454721a1
--- /dev/null
+++ b/nixpkgs/nixos/tests/mumble.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  client = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ];
+    environment.systemPackages = [ pkgs.mumble ];
+  };
+
+  # outside of tests, this file should obviously not come from the nix store
+  envFile = pkgs.writeText "nixos-test-mumble-murmurd.env" ''
+    MURMURD_PASSWORD=testpassword
+  '';
+
+in
+{
+  name = "mumble";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ thoughtpolice eelco ];
+  };
+
+  nodes = {
+    server = { config, ... }: {
+      security.apparmor.enable = true;
+      services.murmur.enable = true;
+      services.murmur.registerName = "NixOS tests";
+      services.murmur.password = "$MURMURD_PASSWORD";
+      services.murmur.environmentFile = envFile;
+      networking.firewall.allowedTCPPorts = [ config.services.murmur.port ];
+    };
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("murmur.service")
+    client1.wait_for_x()
+    client2.wait_for_x()
+
+    client1.execute("mumble mumble://client1:testpassword\@server/test >&2 &")
+    client2.execute("mumble mumble://client2:testpassword\@server/test >&2 &")
+
+    # cancel client audio configuration
+    client1.wait_for_window(r"Audio Tuning Wizard")
+    client2.wait_for_window(r"Audio Tuning Wizard")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_key("esc")
+    client2.send_key("esc")
+
+    # cancel client cert configuration
+    client1.wait_for_window(r"Certificate Management")
+    client2.wait_for_window(r"Certificate Management")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_key("esc")
+    client2.send_key("esc")
+
+    # accept server certificate
+    client1.wait_for_window(r"^Mumble$")
+    client2.wait_for_window(r"^Mumble$")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_chars("y")
+    client2.send_chars("y")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+
+    # sometimes the wrong of the 2 windows is focused, we switch focus and try pressing "y" again
+    client1.send_key("alt-tab")
+    client2.send_key("alt-tab")
+    server.sleep(5)  # wait because mumble is slow to register event handlers
+    client1.send_chars("y")
+    client2.send_chars("y")
+
+    # Find clients in logs
+    server.wait_until_succeeds(
+        "journalctl -eu murmur -o cat | grep -q 'client1.\+Authenticated'"
+    )
+    server.wait_until_succeeds(
+        "journalctl -eu murmur -o cat | grep -q 'client2.\+Authenticated'"
+    )
+
+    server.sleep(5)  # wait to get screenshot
+    client1.screenshot("screen1")
+    client2.screenshot("screen2")
+
+    # check if apparmor denied anything
+    server.fail('journalctl -b --no-pager --grep "^audit: .*apparmor=\\"DENIED\\""')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/munin.nix b/nixpkgs/nixos/tests/munin.nix
new file mode 100644
index 000000000000..4ec17e0339df
--- /dev/null
+++ b/nixpkgs/nixos/tests/munin.nix
@@ -0,0 +1,44 @@
+# This test runs basic munin setup with node and cron job running on the same
+# machine.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "munin";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ domenkozar eelco ];
+  };
+
+  nodes = {
+    one =
+      { config, ... }:
+        {
+          services = {
+            munin-node = {
+              enable = true;
+              # disable a failing plugin to prevent irrelevant error message, see #23049
+              disabledPlugins = [ "apc_nis" ];
+            };
+            munin-cron = {
+             enable = true;
+             hosts = ''
+               [${config.networking.hostName}]
+               address localhost
+             '';
+            };
+          };
+
+          # increase the systemd timer interval so it fires more often
+          systemd.timers.munin-cron.timerConfig.OnCalendar = pkgs.lib.mkForce "*:*:0/10";
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    with subtest("ensure munin-node starts and listens on 4949"):
+        one.wait_for_unit("munin-node.service")
+        one.wait_for_open_port(4949)
+    with subtest("ensure munin-cron output is correct"):
+        one.wait_for_file("/var/lib/munin/one/one-uptime-uptime-g.rrd")
+        one.wait_for_file("/var/www/munin/one/index.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/musescore.nix b/nixpkgs/nixos/tests/musescore.nix
new file mode 100644
index 000000000000..6aeb0558a49d
--- /dev/null
+++ b/nixpkgs/nixos/tests/musescore.nix
@@ -0,0 +1,106 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  # Make sure we don't have to go through the startup tutorial
+  customMuseScoreConfig = pkgs.writeText "MuseScore4.ini" ''
+    [application]
+    hasCompletedFirstLaunchSetup=true
+
+    [project]
+    preferredScoreCreationMode=1
+    '';
+in
+{
+  name = "musescore";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ turion ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = with pkgs; [
+      musescore
+      pdfgrep
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript = { ... }: ''
+    start_all()
+    machine.wait_for_x()
+
+    # Inject custom settings
+    machine.succeed("mkdir -p /root/.config/MuseScore/")
+    machine.succeed(
+        "cp ${customMuseScoreConfig} /root/.config/MuseScore/MuseScore4.ini"
+    )
+
+    # Start MuseScore window
+    machine.execute("DISPLAY=:0.0 mscore >&2 &")
+
+    # Wait until MuseScore has launched
+    machine.wait_for_window("MuseScore 4")
+
+    # Wait until the window has completely initialised
+    machine.wait_for_text("MuseScore 4")
+
+    machine.screenshot("MuseScore0")
+
+    # Create a new score
+    machine.send_key("ctrl-n")
+
+    # Wait until the creation wizard appears
+    machine.wait_for_window("New score")
+
+    machine.screenshot("MuseScore1")
+
+    machine.send_key("tab")
+    machine.send_key("tab")
+    machine.send_key("tab")
+    machine.send_key("tab")
+    machine.send_key("right")
+    machine.send_key("right")
+    machine.send_key("ret")
+
+    machine.sleep(1)
+
+    # Type the beginning of https://de.wikipedia.org/wiki/Alle_meine_Entchen
+    machine.send_chars("cdef6gg5aaaa7g")
+    machine.sleep(1)
+
+    machine.screenshot("MuseScore2")
+
+    # Go to the export dialogue and create a PDF
+    machine.send_key("alt-f")
+    machine.sleep(1)
+    machine.send_key("e")
+
+    # Wait until the export dialogue appears.
+    machine.wait_for_text("Export")
+
+    machine.screenshot("MuseScore3")
+
+    machine.send_key("shift-tab")
+    machine.sleep(1)
+    machine.send_key("ret")
+    machine.sleep(1)
+    machine.send_key("ret")
+
+    machine.screenshot("MuseScore4")
+
+    # Wait until PDF is exported
+    machine.wait_for_file('"/root/Documents/MuseScore4/Scores/Untitled score.pdf"')
+
+    # Check that it contains the title of the score
+    machine.succeed('pdfgrep "Untitled score" "/root/Documents/MuseScore4/Scores/Untitled score.pdf"')
+
+    machine.screenshot("MuseScore5")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mutable-users.nix b/nixpkgs/nixos/tests/mutable-users.nix
new file mode 100644
index 000000000000..ebe32e6487ef
--- /dev/null
+++ b/nixpkgs/nixos/tests/mutable-users.nix
@@ -0,0 +1,73 @@
+# Mutable users tests.
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "mutable-users";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ gleber ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      users.mutableUsers = false;
+    };
+    mutable = { ... }: {
+      users.mutableUsers = true;
+      users.users.dry-test.isNormalUser = true;
+    };
+  };
+
+  testScript = {nodes, ...}: let
+    immutableSystem = nodes.machine.config.system.build.toplevel;
+    mutableSystem = nodes.mutable.config.system.build.toplevel;
+  in ''
+    machine.start()
+    machine.wait_for_unit("default.target")
+
+    # Machine starts in immutable mode. Add a user and test if reactivating
+    # configuration removes the user.
+    with subtest("Machine in immutable mode"):
+        assert "foobar" not in machine.succeed("cat /etc/passwd")
+        machine.succeed("sudo useradd foobar")
+        assert "foobar" in machine.succeed("cat /etc/passwd")
+        machine.succeed(
+            "${immutableSystem}/bin/switch-to-configuration test"
+        )
+        assert "foobar" not in machine.succeed("cat /etc/passwd")
+
+    # In immutable mode passwd is not wrapped, while in mutable mode it is
+    # wrapped.
+    with subtest("Password is wrapped in mutable mode"):
+        assert "/run/current-system/" in machine.succeed("which passwd")
+        machine.succeed(
+            "${mutableSystem}/bin/switch-to-configuration test"
+        )
+        assert "/run/wrappers/" in machine.succeed("which passwd")
+
+    with subtest("dry-activation does not change files"):
+        machine.succeed('test -e /home/dry-test')  # home was created
+        machine.succeed('rm -rf /home/dry-test')
+
+        files_to_check = ['/etc/group',
+                          '/etc/passwd',
+                          '/etc/shadow',
+                          '/etc/subuid',
+                          '/etc/subgid',
+                          '/var/lib/nixos/uid-map',
+                          '/var/lib/nixos/gid-map',
+                          '/var/lib/nixos/declarative-groups',
+                          '/var/lib/nixos/declarative-users'
+                         ]
+        expected_hashes = {}
+        expected_stats = {}
+        for file in files_to_check:
+            expected_hashes[file] = machine.succeed(f"sha256sum {file}")
+            expected_stats[file] = machine.succeed(f"stat {file}")
+
+        machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate")
+
+        machine.fail('test -e /home/dry-test')  # home was not recreated
+        for file in files_to_check:
+            assert machine.succeed(f"sha256sum {file}") == expected_hashes[file]
+            assert machine.succeed(f"stat {file}") == expected_stats[file]
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mxisd.nix b/nixpkgs/nixos/tests/mxisd.nix
new file mode 100644
index 000000000000..354612a8a53d
--- /dev/null
+++ b/nixpkgs/nixos/tests/mxisd.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+
+  name = "mxisd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mguentner ];
+  };
+
+  nodes = {
+    server = args : {
+      services.mxisd.enable = true;
+      services.mxisd.matrix.domain = "example.org";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("mxisd.service")
+    server.wait_for_open_port(8090)
+    server.succeed("curl -Ssf 'http://127.0.0.1:8090/_matrix/identity/api/v1'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/mysql/common.nix b/nixpkgs/nixos/tests/mysql/common.nix
new file mode 100644
index 000000000000..1cf52347f4c7
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/common.nix
@@ -0,0 +1,10 @@
+{ lib, pkgs }: {
+  mariadbPackages = lib.filterAttrs (n: _: lib.hasPrefix "mariadb" n) (import ../../../pkgs/servers/sql/mariadb pkgs);
+  mysqlPackages = {
+    inherit (pkgs) mysql80;
+  };
+  perconaPackages = {
+    inherit (pkgs) percona-server_8_0;
+  };
+  mkTestName = pkg: "mariadb_${builtins.replaceStrings ["."] [""] (lib.versions.majorMinor pkg.version)}";
+}
diff --git a/nixpkgs/nixos/tests/mysql/mariadb-galera.nix b/nixpkgs/nixos/tests/mysql/mariadb-galera.nix
new file mode 100644
index 000000000000..c9962f49c02f
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/mariadb-galera.nix
@@ -0,0 +1,250 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; },
+  lib ? pkgs.lib
+}:
+
+let
+  inherit (import ./common.nix { inherit pkgs lib; }) mkTestName mariadbPackages;
+
+  makeTest = import ./../make-test-python.nix;
+
+  # Common user configuration
+  makeGaleraTest = {
+    mariadbPackage,
+    name ? mkTestName mariadbPackage,
+    galeraPackage ? pkgs.mariadb-galera
+  }: makeTest {
+    name = "${name}-galera-mariabackup";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ izorkin ajs124 das_j ];
+    };
+
+    # The test creates a Galera cluster with 3 nodes and is checking if mariabackup-based SST works. The cluster is tested by creating a DB and an empty table on one node,
+    # and checking the table's presence on the other node.
+    nodes = let
+      mkGaleraNode = {
+        id,
+        method
+      }: let
+        address = "192.168.1.${toString id}";
+        isFirstClusterNode = id == 1 || id == 4;
+      in {
+        users = {
+          users.testuser = {
+            isSystemUser = true;
+            group = "testusers";
+          };
+          groups.testusers = { };
+        };
+
+        networking = {
+          interfaces.eth1 = {
+            ipv4.addresses = [
+              { inherit address; prefixLength = 24; }
+            ];
+          };
+          extraHosts = lib.concatMapStringsSep "\n" (i: "192.168.1.${toString i} galera_0${toString i}") (lib.range 1 6);
+          firewall.allowedTCPPorts = [ 3306 4444 4567 4568 ];
+          firewall.allowedUDPPorts = [ 4567 ];
+        };
+        systemd.services.mysql = with pkgs; {
+          path = with pkgs; [
+            bash
+            gawk
+            gnutar
+            gzip
+            inetutils
+            iproute2
+            netcat
+            procps
+            pv
+            rsync
+            socat
+            stunnel
+            which
+          ];
+        };
+        services.mysql = {
+          enable = true;
+          package = mariadbPackage;
+          ensureDatabases = lib.mkIf isFirstClusterNode [ "testdb" ];
+          ensureUsers = lib.mkIf isFirstClusterNode [{
+            name = "testuser";
+            ensurePermissions = {
+              "testdb.*" = "ALL PRIVILEGES";
+            };
+          }];
+          initialScript = lib.mkIf isFirstClusterNode (pkgs.writeText "mariadb-init.sql" ''
+            GRANT ALL PRIVILEGES ON *.* TO 'check_repl'@'localhost' IDENTIFIED BY 'check_pass' WITH GRANT OPTION;
+            FLUSH PRIVILEGES;
+          '');
+          settings = {
+            mysqld = {
+              bind_address = "0.0.0.0";
+            };
+            galera = {
+              wsrep_on = "ON";
+              wsrep_debug = "NONE";
+              wsrep_retry_autocommit = "3";
+              wsrep_provider = "${galeraPackage}/lib/galera/libgalera_smm.so";
+              wsrep_cluster_address = "gcomm://"
+                + lib.optionalString (id == 2 || id == 3) "galera_01,galera_02,galera_03"
+                + lib.optionalString (id == 5 || id == 6) "galera_04,galera_05,galera_06";
+              wsrep_cluster_name = "galera";
+              wsrep_node_address = address;
+              wsrep_node_name = "galera_0${toString id}";
+              wsrep_sst_method = method;
+              wsrep_sst_auth = "check_repl:check_pass";
+              binlog_format = "ROW";
+              enforce_storage_engine = "InnoDB";
+              innodb_autoinc_lock_mode = "2";
+            };
+          };
+        };
+      };
+    in {
+      galera_01 = mkGaleraNode {
+        id = 1;
+        method = "mariabackup";
+      };
+
+      galera_02 = mkGaleraNode {
+        id = 2;
+        method = "mariabackup";
+      };
+
+      galera_03 = mkGaleraNode {
+        id = 3;
+        method = "mariabackup";
+      };
+
+      galera_04 = mkGaleraNode {
+        id = 4;
+        method = "rsync";
+      };
+
+      galera_05 = mkGaleraNode {
+        id = 5;
+        method = "rsync";
+      };
+
+      galera_06 = mkGaleraNode {
+        id = 6;
+        method = "rsync";
+      };
+
+    };
+
+    testScript = ''
+      galera_01.start()
+      galera_01.wait_for_unit("mysql")
+      galera_01.wait_for_open_port(3306)
+      galera_01.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_01.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (37);'"
+      )
+      galera_02.start()
+      galera_02.wait_for_unit("mysql")
+      galera_02.wait_for_open_port(3306)
+      galera_03.start()
+      galera_03.wait_for_unit("mysql")
+      galera_03.wait_for_open_port(3306)
+      galera_02.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
+      )
+      galera_02.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_02.succeed("systemctl stop mysql")
+      galera_01.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (38);'"
+      )
+      galera_03.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_01.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (39);'"
+      )
+      galera_02.succeed("systemctl start mysql")
+      galera_02.wait_for_open_port(3306)
+      galera_02.succeed(
+          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+      )
+      galera_03.succeed(
+          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+      )
+      galera_01.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 39"
+      )
+      galera_02.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 38"
+      )
+      galera_03.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 37"
+      )
+      galera_01.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+      galera_02.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+      galera_03.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
+      galera_01.crash()
+      galera_02.crash()
+      galera_03.crash()
+
+      galera_04.start()
+      galera_04.wait_for_unit("mysql")
+      galera_04.wait_for_open_port(3306)
+      galera_04.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db1 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_04.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db1 values (41);'"
+      )
+      galera_05.start()
+      galera_05.wait_for_unit("mysql")
+      galera_05.wait_for_open_port(3306)
+      galera_06.start()
+      galera_06.wait_for_unit("mysql")
+      galera_06.wait_for_open_port(3306)
+      galera_05.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
+      )
+      galera_05.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db2 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_05.succeed("systemctl stop mysql")
+      galera_04.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db2 values (42);'"
+      )
+      galera_06.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; create table db3 (test_id INT, PRIMARY KEY (test_id)) ENGINE = InnoDB;'"
+      )
+      galera_04.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; insert into db3 values (43);'"
+      )
+      galera_05.succeed("systemctl start mysql")
+      galera_05.wait_for_open_port(3306)
+      galera_05.succeed(
+          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_cluster_size.*3'"
+      )
+      galera_06.succeed(
+          "sudo -u testuser mysql -u testuser -e 'show status' -N | grep 'wsrep_local_state_comment.*Synced'"
+      )
+      galera_04.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db3;' -N | grep 43"
+      )
+      galera_05.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db2;' -N | grep 42"
+      )
+      galera_06.succeed(
+          "sudo -u testuser mysql -u testuser -e 'use testdb; select test_id from db1;' -N | grep 41"
+      )
+      galera_04.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db3;'")
+      galera_05.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db2;'")
+      galera_06.succeed("sudo -u testuser mysql -u testuser -e 'use testdb; drop table db1;'")
+    '';
+  };
+in
+  lib.mapAttrs (_: mariadbPackage: makeGaleraTest { inherit mariadbPackage; }) mariadbPackages
diff --git a/nixpkgs/nixos/tests/mysql/mysql-autobackup.nix b/nixpkgs/nixos/tests/mysql/mysql-autobackup.nix
new file mode 100644
index 000000000000..b49466db0a9c
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/mysql-autobackup.nix
@@ -0,0 +1,53 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; },
+  lib ? pkgs.lib
+}:
+
+let
+  inherit (import ./common.nix { inherit pkgs lib; }) mkTestName mariadbPackages;
+
+  makeTest = import ./../make-test-python.nix;
+
+  makeAutobackupTest = {
+    package,
+    name ? mkTestName package,
+  }: makeTest {
+    name = "${name}-automysqlbackup";
+    meta.maintainers = [ lib.maintainers.aanderse ];
+
+    nodes.machine = {
+      services.mysql = {
+        inherit package;
+        enable = true;
+        initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+      };
+
+      services.automysqlbackup.enable = true;
+    };
+
+    testScript = ''
+      start_all()
+
+      # Need to have mysql started so that it can be populated with data.
+      machine.wait_for_unit("mysql.service")
+
+      with subtest("Wait for testdb to be fully populated (5 rows)."):
+          machine.wait_until_succeeds(
+              "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+          )
+
+      with subtest("Do a backup and wait for it to start"):
+          machine.start_job("automysqlbackup.service")
+          machine.wait_for_job("automysqlbackup.service")
+
+      with subtest("wait for backup file and check that data appears in backup"):
+          machine.wait_for_file("/var/backup/mysql/daily/testdb")
+          machine.succeed(
+              "${pkgs.gzip}/bin/zcat /var/backup/mysql/daily/testdb/daily_testdb_*.sql.gz | grep hello"
+          )
+      '';
+  };
+in
+  lib.mapAttrs (_: package: makeAutobackupTest { inherit package; }) mariadbPackages
diff --git a/nixpkgs/nixos/tests/mysql/mysql-backup.nix b/nixpkgs/nixos/tests/mysql/mysql-backup.nix
new file mode 100644
index 000000000000..968f56dd3c9b
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/mysql-backup.nix
@@ -0,0 +1,71 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; },
+  lib ? pkgs.lib
+}:
+
+let
+  inherit (import ./common.nix { inherit pkgs lib; }) mkTestName mariadbPackages;
+
+  makeTest = import ./../make-test-python.nix;
+
+  makeBackupTest = {
+    package,
+    name ? mkTestName package
+  }: makeTest {
+    name = "${name}-backup";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ rvl ];
+    };
+
+    nodes = {
+      master = { pkgs, ... }: {
+        services.mysql = {
+          inherit package;
+          enable = true;
+          initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+        };
+
+        services.mysqlBackup = {
+          enable = true;
+          databases = [ "doesnotexist" "testdb" ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      # Delete backup file that may be left over from a previous test run.
+      # This is not needed on Hydra but useful for repeated local test runs.
+      master.execute("rm -f /var/backup/mysql/testdb.gz")
+
+      # Need to have mysql started so that it can be populated with data.
+      master.wait_for_unit("mysql.service")
+
+      # Wait for testdb to be fully populated (5 rows).
+      master.wait_until_succeeds(
+          "mysql -u root -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+      )
+
+      # Do a backup and wait for it to start
+      master.start_job("mysql-backup.service")
+
+      # wait for backup to fail, because of database 'doesnotexist'
+      master.wait_until_fails("systemctl is-active -q mysql-backup.service")
+
+      # wait for backup file and check that data appears in backup
+      master.wait_for_file("/var/backup/mysql/testdb.gz")
+      master.succeed(
+          "${pkgs.gzip}/bin/zcat /var/backup/mysql/testdb.gz | grep hello"
+      )
+
+      # Check that a failed backup is logged
+      master.succeed(
+          "journalctl -u mysql-backup.service | grep 'fail.*doesnotexist' > /dev/null"
+      )
+    '';
+  };
+in
+  lib.mapAttrs (_: package: makeBackupTest { inherit package; }) mariadbPackages
diff --git a/nixpkgs/nixos/tests/mysql/mysql-replication.nix b/nixpkgs/nixos/tests/mysql/mysql-replication.nix
new file mode 100644
index 000000000000..8f1695eb97e2
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/mysql-replication.nix
@@ -0,0 +1,101 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; },
+  lib ? pkgs.lib
+}:
+
+let
+  inherit (import ./common.nix { inherit pkgs lib; }) mkTestName mariadbPackages;
+
+  replicateUser = "replicate";
+  replicatePassword = "secret";
+
+  makeTest = import ./../make-test-python.nix;
+
+  makeReplicationTest = {
+    package,
+    name ? mkTestName package,
+  }: makeTest {
+    name = "${name}-replication";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ ajs124 das_j ];
+    };
+
+    nodes = {
+      primary = {
+        services.mysql = {
+          inherit package;
+          enable = true;
+          replication.role = "master";
+          replication.slaveHost = "%";
+          replication.masterUser = replicateUser;
+          replication.masterPassword = replicatePassword;
+          initialDatabases = [ { name = "testdb"; schema = ./testdb.sql; } ];
+        };
+        networking.firewall.allowedTCPPorts = [ 3306 ];
+      };
+
+      secondary1 = { nodes, ... }: {
+        services.mysql = {
+          inherit package;
+          enable = true;
+          replication.role = "slave";
+          replication.serverId = 2;
+          replication.masterHost = nodes.primary.networking.hostName;
+          replication.masterUser = replicateUser;
+          replication.masterPassword = replicatePassword;
+        };
+      };
+
+      secondary2 = { nodes, ... }: {
+        services.mysql = {
+          inherit package;
+          enable = true;
+          replication.role = "slave";
+          replication.serverId = 3;
+          replication.masterHost = nodes.primary.networking.hostName;
+          replication.masterUser = replicateUser;
+          replication.masterPassword = replicatePassword;
+        };
+      };
+    };
+
+    testScript = ''
+      primary.start()
+      primary.wait_for_unit("mysql")
+      primary.wait_for_open_port(3306)
+      # Wait for testdb to be fully populated (5 rows).
+      primary.wait_until_succeeds(
+          "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+      )
+
+      secondary1.start()
+      secondary2.start()
+      secondary1.wait_for_unit("mysql")
+      secondary1.wait_for_open_port(3306)
+      secondary2.wait_for_unit("mysql")
+      secondary2.wait_for_open_port(3306)
+
+      # wait for replications to finish
+      secondary1.wait_until_succeeds(
+          "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+      )
+      secondary2.wait_until_succeeds(
+          "sudo -u mysql mysql -u mysql -D testdb -N -B -e 'select count(id) from tests' | grep -q 5"
+      )
+
+      secondary2.succeed("systemctl stop mysql")
+      primary.succeed(
+          "echo 'insert into testdb.tests values (123, 456);' | sudo -u mysql mysql -u mysql -N"
+      )
+      secondary2.succeed("systemctl start mysql")
+      secondary2.wait_for_unit("mysql")
+      secondary2.wait_for_open_port(3306)
+      secondary2.wait_until_succeeds(
+          "echo 'select * from testdb.tests where Id = 123;' | sudo -u mysql mysql -u mysql -N | grep 456"
+      )
+    '';
+  };
+in
+  lib.mapAttrs (_: package: makeReplicationTest { inherit package; }) mariadbPackages
diff --git a/nixpkgs/nixos/tests/mysql/mysql.nix b/nixpkgs/nixos/tests/mysql/mysql.nix
new file mode 100644
index 000000000000..3e059cad09e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/mysql.nix
@@ -0,0 +1,151 @@
+{
+  system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../../.. { inherit system config; },
+  lib ? pkgs.lib
+}:
+
+let
+  inherit (import ./common.nix { inherit pkgs lib; }) mkTestName mariadbPackages mysqlPackages perconaPackages;
+
+  makeTest = import ./../make-test-python.nix;
+  # Setup common users
+  makeMySQLTest = {
+    package,
+    name ? mkTestName package,
+    useSocketAuth ? true,
+    hasMroonga ? true,
+    hasRocksDB ? pkgs.stdenv.hostPlatform.is64bit
+  }: makeTest {
+    inherit name;
+    meta = with lib.maintainers; {
+      maintainers = [ ajs124 das_j ];
+    };
+
+    nodes = {
+      ${name} =
+        { pkgs, ... }: {
+
+          users = {
+            groups.testusers = { };
+
+            users.testuser = {
+              isSystemUser = true;
+              group = "testusers";
+            };
+
+            users.testuser2 = {
+              isSystemUser = true;
+              group = "testusers";
+            };
+          };
+
+          services.mysql = {
+            enable = true;
+            initialDatabases = [
+              { name = "testdb3"; schema = ./testdb.sql; }
+            ];
+            # note that using pkgs.writeText here is generally not a good idea,
+            # as it will store the password in world-readable /nix/store ;)
+            initialScript = pkgs.writeText "mysql-init.sql" (if (!useSocketAuth) then ''
+              CREATE USER 'testuser3'@'localhost' IDENTIFIED BY 'secure';
+              GRANT ALL PRIVILEGES ON testdb3.* TO 'testuser3'@'localhost';
+            '' else ''
+              ALTER USER root@localhost IDENTIFIED WITH unix_socket;
+              DELETE FROM mysql.user WHERE password = ''' AND plugin = ''';
+              DELETE FROM mysql.user WHERE user = ''';
+              FLUSH PRIVILEGES;
+            '');
+
+            ensureDatabases = [ "testdb" "testdb2" ];
+            ensureUsers = [{
+              name = "testuser";
+              ensurePermissions = {
+                "testdb.*" = "ALL PRIVILEGES";
+              };
+            } {
+              name = "testuser2";
+              ensurePermissions = {
+                "testdb2.*" = "ALL PRIVILEGES";
+              };
+            }];
+            package = package;
+            settings = {
+              mysqld = {
+                plugin-load-add = lib.optional hasMroonga "ha_mroonga.so"
+                  ++ lib.optional hasRocksDB "ha_rocksdb.so";
+              };
+            };
+          };
+        };
+    };
+
+    testScript = ''
+      start_all()
+
+      machine = ${name}
+      machine.wait_for_unit("mysql")
+      machine.succeed(
+          "echo 'use testdb; create table tests (test_id INT, PRIMARY KEY (test_id));' | sudo -u testuser mysql -u testuser"
+      )
+      machine.succeed(
+          "echo 'use testdb; insert into tests values (42);' | sudo -u testuser mysql -u testuser"
+      )
+      # Ensure testuser2 is not able to insert into testdb as mysql testuser2
+      machine.fail(
+          "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser2"
+      )
+      # Ensure testuser2 is not able to authenticate as mysql testuser
+      machine.fail(
+          "echo 'use testdb; insert into tests values (23);' | sudo -u testuser2 mysql -u testuser"
+      )
+      machine.succeed(
+          "echo 'use testdb; select test_id from tests;' | sudo -u testuser mysql -u testuser -N | grep 42"
+      )
+
+      ${lib.optionalString hasMroonga ''
+        # Check if Mroonga plugin works
+        machine.succeed(
+            "echo 'use testdb; create table mroongadb (test_id INT, PRIMARY KEY (test_id)) ENGINE = Mroonga;' | sudo -u testuser mysql -u testuser"
+        )
+        machine.succeed(
+            "echo 'use testdb; insert into mroongadb values (25);' | sudo -u testuser mysql -u testuser"
+        )
+        machine.succeed(
+            "echo 'use testdb; select test_id from mroongadb;' | sudo -u testuser mysql -u testuser -N | grep 25"
+        )
+        machine.succeed(
+            "echo 'use testdb; drop table mroongadb;' | sudo -u testuser mysql -u testuser"
+        )
+      ''}
+
+      ${lib.optionalString hasRocksDB ''
+        # Check if RocksDB plugin works
+        machine.succeed(
+            "echo 'use testdb; create table rocksdb (test_id INT, PRIMARY KEY (test_id)) ENGINE = RocksDB;' | sudo -u testuser mysql -u testuser"
+        )
+        machine.succeed(
+            "echo 'use testdb; insert into rocksdb values (28);' | sudo -u testuser mysql -u testuser"
+        )
+        machine.succeed(
+            "echo 'use testdb; select test_id from rocksdb;' | sudo -u testuser mysql -u testuser -N | grep 28"
+        )
+        machine.succeed(
+            "echo 'use testdb; drop table rocksdb;' | sudo -u testuser mysql -u testuser"
+        )
+      ''}
+    '';
+  };
+in
+  lib.mapAttrs (_: package: makeMySQLTest {
+    inherit package;
+    hasRocksDB = false; hasMroonga = false; useSocketAuth = false;
+  }) mysqlPackages
+  // (lib.mapAttrs (_: package: makeMySQLTest {
+    inherit package;
+  }) mariadbPackages)
+  // (lib.mapAttrs (_: package: makeMySQLTest {
+    inherit package;
+    name = "percona_8_0";
+    hasMroonga = false; useSocketAuth = false;
+  }) perconaPackages)
diff --git a/nixpkgs/nixos/tests/mysql/testdb.sql b/nixpkgs/nixos/tests/mysql/testdb.sql
new file mode 100644
index 000000000000..3c68c49ae82c
--- /dev/null
+++ b/nixpkgs/nixos/tests/mysql/testdb.sql
@@ -0,0 +1,11 @@
+create table tests
+( Id   INTEGER      NOT NULL,
+  Name VARCHAR(255) NOT NULL,
+  primary key(Id)
+);
+
+insert into tests values (1, 'a');
+insert into tests values (2, 'b');
+insert into tests values (3, 'c');
+insert into tests values (4, 'd');
+insert into tests values (5, 'hello');
diff --git a/nixpkgs/nixos/tests/n8n.nix b/nixpkgs/nixos/tests/n8n.nix
new file mode 100644
index 000000000000..0a12192d5c71
--- /dev/null
+++ b/nixpkgs/nixos/tests/n8n.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ lib, ... }:
+let
+  port = 5678;
+  webhookUrl = "http://example.com";
+in
+{
+  name = "n8n";
+  meta.maintainers = with lib.maintainers; [ freezeboy k900 ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      services.n8n = {
+        enable = true;
+        webhookUrl = webhookUrl;
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("n8n.service")
+    machine.wait_for_console_text("Editor is now accessible via")
+    machine.succeed("curl --fail -vvv http://localhost:${toString port}/")
+    machine.succeed("grep -qF ${webhookUrl} /etc/systemd/system/n8n.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nagios.nix b/nixpkgs/nixos/tests/nagios.nix
new file mode 100644
index 000000000000..b6e45fc103af
--- /dev/null
+++ b/nixpkgs/nixos/tests/nagios.nix
@@ -0,0 +1,116 @@
+import ./make-test-python.nix (
+  { pkgs, ... }: {
+    name = "nagios";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ symphorien ];
+    };
+
+    nodes.machine = { lib, ... }: let
+      writer = pkgs.writeShellScript "write" ''
+        set -x
+        echo "$@"  >> /tmp/notifications
+      '';
+    in
+      {
+        # tested service
+        services.sshd.enable = true;
+        # nagios
+        services.nagios = {
+          enable = true;
+          # make state transitions faster
+          extraConfig.interval_length = "5";
+          objectDefs =
+            (map (x: "${pkgs.nagios}/etc/objects/${x}.cfg") [ "templates" "timeperiods" "commands" ]) ++ [
+              (
+                pkgs.writeText "objects.cfg" ''
+                  # notifications are written to /tmp/notifications
+                  define command {
+                  command_name notify-host-by-file
+                  command_line ${writer} "$HOSTNAME is $HOSTSTATE$"
+                  }
+                  define command {
+                  command_name notify-service-by-file
+                  command_line ${writer} "$SERVICEDESC$ is $SERVICESTATE$"
+                  }
+
+                  # nagios boilerplate
+                  define contact {
+                  contact_name                    alice
+                  alias                           alice
+                  host_notifications_enabled      1
+                  service_notifications_enabled   1
+                  service_notification_period     24x7
+                  host_notification_period        24x7
+                  service_notification_options    w,u,c,r,f,s
+                  host_notification_options       d,u,r,f,s
+                  service_notification_commands   notify-service-by-file
+                  host_notification_commands      notify-host-by-file
+                  email                           foo@example.com
+                  }
+                  define contactgroup {
+                  contactgroup_name   admins
+                  alias               Admins
+                  members alice
+                  }
+                  define hostgroup{
+                  hostgroup_name  allhosts
+                  alias  All hosts
+                  }
+
+                  # monitored objects
+                  define host {
+                  use         generic-host
+                  host_name   localhost
+                  alias       localhost
+                  address     localhost
+                  hostgroups  allhosts
+                  contact_groups admins
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                  define service {
+                  use                 generic-service
+                  host_name           localhost
+                  service_description ssh
+                  check_command       check_ssh
+                  # make state transitions faster.
+                  max_check_attempts 2
+                  check_interval 1
+                  retry_interval 1
+                  }
+                ''
+              )
+            ];
+        };
+      };
+
+    testScript = { ... }: ''
+      with subtest("ensure sshd starts"):
+          machine.wait_for_unit("sshd.service")
+
+
+      with subtest("ensure nagios starts"):
+          machine.wait_for_file("/var/log/nagios/current")
+
+
+      def assert_notify(text):
+          machine.wait_for_file("/tmp/notifications")
+          real = machine.succeed("cat /tmp/notifications").strip()
+          print(f"got {real!r}, expected {text!r}")
+          assert text == real
+
+
+      with subtest("ensure we get a notification when sshd is down"):
+          machine.succeed("systemctl stop sshd")
+          assert_notify("ssh is CRITICAL")
+
+
+      with subtest("ensure tests can succeed"):
+          machine.succeed("systemctl start sshd")
+          machine.succeed("rm /tmp/notifications")
+          assert_notify("ssh is OK")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/nar-serve.nix b/nixpkgs/nixos/tests/nar-serve.nix
new file mode 100644
index 000000000000..f6197567b822
--- /dev/null
+++ b/nixpkgs/nixos/tests/nar-serve.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+  {
+    name = "nar-serve";
+    meta.maintainers = [ lib.maintainers.rizary ];
+    nodes =
+      {
+        server = { pkgs, ... }: {
+          services.nginx = {
+            enable = true;
+            virtualHosts.default.root = "/var/www";
+          };
+          services.nar-serve = {
+            enable = true;
+            # Connect to the localhost nginx instead of the default
+            # https://cache.nixos.org
+            cacheURL = "http://localhost/";
+          };
+          environment.systemPackages = [
+            pkgs.hello
+            pkgs.curl
+          ];
+
+          networking.firewall.allowedTCPPorts = [ 8383 ];
+
+          # virtualisation.diskSize = 2 * 1024;
+        };
+      };
+    testScript = ''
+      import os
+
+      start_all()
+
+      # Create a fake cache with Nginx service the static files
+      server.succeed(
+          "nix --experimental-features nix-command copy --to file:///var/www ${pkgs.hello}"
+      )
+      server.wait_for_unit("nginx.service")
+      server.wait_for_open_port(80)
+
+      # Check that nar-serve can return the content of the derivation
+      drvName = os.path.basename("${pkgs.hello}")
+      drvHash = drvName.split("-")[0]
+      server.wait_for_unit("nar-serve.service")
+      server.succeed(
+          "curl -o hello -f http://localhost:8383/nix/store/{}/bin/hello".format(drvHash)
+      )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/nat.nix b/nixpkgs/nixos/tests/nat.nix
new file mode 100644
index 000000000000..0b617cea7774
--- /dev/null
+++ b/nixpkgs/nixos/tests/nat.nix
@@ -0,0 +1,115 @@
+# This is a simple distributed test involving a topology with two
+# separate virtual networks - the "inside" and the "outside" - with a
+# client on the inside network, a server on the outside network, and a
+# router connected to both that performs Network Address Translation
+# for the client.
+import ./make-test-python.nix ({ pkgs, lib, withFirewall, nftables ? false, ... }:
+  let
+    unit = if nftables then "nftables" else (if withFirewall then "firewall" else "nat");
+
+    routerBase =
+      lib.mkMerge [
+        { virtualisation.vlans = [ 2 1 ];
+          networking.firewall.enable = withFirewall;
+          networking.firewall.filterForward = nftables;
+          networking.nftables.enable = nftables;
+          networking.nat.internalIPs = [ "192.168.1.0/24" ];
+          networking.nat.externalInterface = "eth1";
+        }
+      ];
+  in
+  {
+    name = "nat" + (lib.optionalString nftables "Nftables")
+                 + (if withFirewall then "WithFirewall" else "Standalone");
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ eelco rob ];
+    };
+
+    nodes =
+      { client =
+          { pkgs, nodes, ... }:
+          lib.mkMerge [
+            { virtualisation.vlans = [ 1 ];
+              networking.defaultGateway =
+                (pkgs.lib.head nodes.router.config.networking.interfaces.eth2.ipv4.addresses).address;
+              networking.nftables.enable = nftables;
+            }
+          ];
+
+        router =
+        { ... }: lib.mkMerge [
+          routerBase
+          { networking.nat.enable = true; }
+        ];
+
+        routerDummyNoNat =
+        { ... }: lib.mkMerge [
+          routerBase
+          { networking.nat.enable = false; }
+        ];
+
+        server =
+          { ... }:
+          { virtualisation.vlans = [ 2 ];
+            networking.firewall.enable = false;
+            services.httpd.enable = true;
+            services.httpd.adminAddr = "foo@example.org";
+            services.vsftpd.enable = true;
+            services.vsftpd.anonymousUser = true;
+          };
+      };
+
+    testScript =
+      { nodes, ... }: let
+        routerDummyNoNatClosure = nodes.routerDummyNoNat.config.system.build.toplevel;
+        routerClosure = nodes.router.config.system.build.toplevel;
+      in ''
+        client.start()
+        router.start()
+        server.start()
+
+        # The router should have access to the server.
+        server.wait_for_unit("network.target")
+        server.wait_for_unit("httpd")
+        router.wait_for_unit("network.target")
+        router.succeed("curl --fail http://server/ >&2")
+
+        # The client should be also able to connect via the NAT router.
+        router.wait_for_unit("${unit}")
+        client.wait_for_unit("network.target")
+        client.succeed("curl --fail http://server/ >&2")
+        client.succeed("ping -c 1 server >&2")
+
+        # Test whether passive FTP works.
+        server.wait_for_unit("vsftpd")
+        server.succeed("echo Hello World > /home/ftp/foo.txt")
+        client.succeed("curl -v ftp://server/foo.txt >&2")
+
+        # Test whether active FTP works.
+        client.fail("curl -v -P - ftp://server/foo.txt >&2")
+
+        # Test ICMP.
+        client.succeed("ping -c 1 router >&2")
+        router.succeed("ping -c 1 client >&2")
+
+        # If we turn off NAT, the client shouldn't be able to reach the server.
+        router.succeed(
+            "${routerDummyNoNatClosure}/bin/switch-to-configuration test 2>&1"
+        )
+        client.fail("curl --fail --connect-timeout 5 http://server/ >&2")
+        client.fail("ping -c 1 server >&2")
+
+        # And make sure that reloading the NAT job works.
+        router.succeed(
+            "${routerClosure}/bin/switch-to-configuration test 2>&1"
+        )
+        # FIXME: this should not be necessary, but nat.service is not started because
+        #        network.target is not triggered
+        #        (https://github.com/NixOS/nixpkgs/issues/16230#issuecomment-226408359)
+        ${lib.optionalString (!withFirewall && !nftables) ''
+          router.succeed("systemctl start nat.service")
+        ''}
+        client.succeed("curl --fail http://server/ >&2")
+        client.succeed("ping -c 1 server >&2")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/nats.nix b/nixpkgs/nixos/tests/nats.nix
new file mode 100644
index 000000000000..c650904e53bf
--- /dev/null
+++ b/nixpkgs/nixos/tests/nats.nix
@@ -0,0 +1,63 @@
+let
+
+  port = 4222;
+  username = "client";
+  password = "password";
+  topic = "foo.bar";
+
+in import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nats";
+  meta = with pkgs.lib; { maintainers = with maintainers; [ c0deaddict ]; };
+
+  nodes = let
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ natscli ];
+    };
+  in {
+    server = { pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ port ];
+      services.nats = {
+        inherit port;
+        enable = true;
+        settings = {
+          authorization = {
+            users = [{
+              user = username;
+              inherit password;
+            }];
+          };
+        };
+      };
+    };
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = let file = "/tmp/msg";
+  in ''
+    def nats_cmd(*args):
+        return (
+            "nats "
+            "--server=nats://server:${toString port} "
+            "--user=${username} "
+            "--password=${password} "
+            "{}"
+        ).format(" ".join(args))
+
+    def parallel(*fns):
+        from threading import Thread
+        threads = [ Thread(target=fn) for fn in fns ]
+        for t in threads: t.start()
+        for t in threads: t.join()
+
+    start_all()
+    server.wait_for_unit("nats.service")
+
+    with subtest("pub sub"):
+        parallel(
+            lambda: client1.succeed(nats_cmd("sub", "--count", "1", "${topic}")),
+            lambda: client2.succeed("sleep 2 && {}".format(nats_cmd("pub", "${topic}", "hello"))),
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/navidrome.nix b/nixpkgs/nixos/tests/navidrome.nix
new file mode 100644
index 000000000000..7315aef62401
--- /dev/null
+++ b/nixpkgs/nixos/tests/navidrome.nix
@@ -0,0 +1,12 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "navidrome";
+
+  nodes.machine = { ... }: {
+    services.navidrome.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("navidrome")
+    machine.wait_for_open_port(4533)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nbd.nix b/nixpkgs/nixos/tests/nbd.nix
new file mode 100644
index 000000000000..b4aaf29ee4e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/nbd.nix
@@ -0,0 +1,103 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    listenPort = 30123;
+    testString = "It works!";
+    mkCreateSmallFileService = { path, loop ? false }: {
+      script = ''
+        ${pkgs.coreutils}/bin/dd if=/dev/zero of=${path} bs=1K count=100
+        ${pkgs.lib.optionalString loop
+          "${pkgs.util-linux}/bin/losetup --find ${path}"}
+      '';
+      serviceConfig = {
+        Type = "oneshot";
+      };
+      wantedBy = [ "multi-user.target" ];
+      before = [ "nbd-server.service" ];
+    };
+  in
+  {
+    name = "nbd";
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        # Create some small files of zeros to use as the ndb disks
+        ## `vault-pub.disk` is accessible from any IP
+        systemd.services.create-pub-file =
+          mkCreateSmallFileService { path = "/vault-pub.disk"; };
+        ## `vault-priv.disk` is accessible only from localhost.
+        ## It's also a loopback device to test exporting /dev/...
+        systemd.services.create-priv-file =
+          mkCreateSmallFileService { path = "/vault-priv.disk"; loop = true; };
+        ## `aaa.disk` is just here because "[aaa]" sorts before
+        ## "[generic]" lexicographically, and nbd-server breaks if
+        ## "[generic]" isn't the first section.
+        systemd.services.create-aaa-file =
+          mkCreateSmallFileService { path = "/aaa.disk"; };
+
+        # Needed only for nbd-client used in the tests.
+        environment.systemPackages = [ pkgs.nbd ];
+
+        # Open the nbd port in the firewall
+        networking.firewall.allowedTCPPorts = [ listenPort ];
+
+        # Run the nbd server and expose the small file created above
+        services.nbd.server = {
+          enable = true;
+          exports = {
+            aaa = {
+              path = "/aaa.disk";
+            };
+            vault-pub = {
+              path = "/vault-pub.disk";
+            };
+            vault-priv = {
+              path = "/dev/loop0";
+              allowAddresses = [ "127.0.0.1" "::1" ];
+            };
+          };
+          listenAddress = "0.0.0.0";
+          listenPort = listenPort;
+        };
+      };
+
+      client = { config, pkgs, ... }: {
+        programs.nbd.enable = true;
+      };
+    };
+
+    testScript = ''
+      testString = "${testString}"
+
+      start_all()
+      server.wait_for_open_port(${toString listenPort})
+
+      # Client: Connect to the server, write a small string to the nbd disk, and cleanly disconnect
+      client.succeed("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-pub -persist")
+      client.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc")
+      client.succeed("nbd-client -d /dev/nbd0")
+
+      # Server: Check that the string written by the client is indeed in the file
+      foundString = server.succeed(f"dd status=none if=/vault-pub.disk count={len(testString)}")[:len(testString)]
+      if foundString != testString:
+         raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'")
+
+      # Client: Fail to connect to the private disk
+      client.fail("nbd-client server ${toString listenPort} /dev/nbd0 -name vault-priv -persist")
+
+      # Server: Successfully connect to the private disk
+      server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name vault-priv -persist")
+      server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc")
+      foundString = server.succeed(f"dd status=none if=/dev/loop0 count={len(testString)}")[:len(testString)]
+      if foundString != testString:
+         raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'")
+      server.succeed("nbd-client -d /dev/nbd0")
+
+      # Server: Successfully connect to the aaa disk
+      server.succeed("nbd-client localhost ${toString listenPort} /dev/nbd0 -name aaa -persist")
+      server.succeed(f"echo '{testString}' | dd of=/dev/nbd0 conv=notrunc")
+      foundString = server.succeed(f"dd status=none if=/aaa.disk count={len(testString)}")[:len(testString)]
+      if foundString != testString:
+         raise Exception(f"Read the wrong string from nbd disk. Expected: '{testString}'. Found: '{foundString}'")
+      server.succeed("nbd-client -d /dev/nbd0")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/ncdns.nix b/nixpkgs/nixos/tests/ncdns.nix
new file mode 100644
index 000000000000..3ce39ed3cb55
--- /dev/null
+++ b/nixpkgs/nixos/tests/ncdns.nix
@@ -0,0 +1,93 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+let
+  fakeReply = pkgs.writeText "namecoin-reply.json" ''
+  { "error": null,
+    "id": 1,
+    "result": {
+      "address": "T31q8ucJ4dI1xzhxQ5QispfECld5c7Xw",
+      "expired": false,
+      "expires_in": 2248,
+      "height": 438155,
+      "name": "d/test",
+      "txid": "db61c0b2540ba0c1a2c8cc92af703a37002e7566ecea4dbf8727c7191421edfb",
+      "value": "{\"ip\": \"1.2.3.4\", \"email\": \"root@test.bit\",\"info\": \"Fake record\"}",
+      "vout": 0
+    }
+  }
+  '';
+
+  # Disabled because DNSSEC does not currently validate,
+  # see https://github.com/namecoin/ncdns/issues/127
+  dnssec = false;
+
+in
+
+{
+  name = "ncdns";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.server = { ... }: {
+    networking.nameservers = [ "::1" ];
+
+    services.namecoind.rpc = {
+      address = "::1";
+      user = "namecoin";
+      password = "secret";
+      port = 8332;
+    };
+
+    # Fake namecoin RPC server because we can't
+    # run a full node in a test.
+    systemd.services.namecoind = {
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        while true; do
+          echo -e "HTTP/1.1 200 OK\n\n $(<${fakeReply})\n" \
+            | ${pkgs.netcat}/bin/nc -N -l ::1 8332
+        done
+      '';
+    };
+
+    services.ncdns = {
+      enable = true;
+      dnssec.enable = dnssec;
+      identity.hostname   = "example.com";
+      identity.hostmaster = "root@example.com";
+      identity.address    = "1.0.0.1";
+    };
+
+    services.pdns-recursor.enable = true;
+    services.pdns-recursor.resolveNamecoin = true;
+
+    environment.systemPackages = [ pkgs.dnsutils ];
+  };
+
+  testScript =
+    (lib.optionalString dnssec ''
+      with subtest("DNSSEC keys have been generated"):
+          server.wait_for_unit("ncdns")
+          server.wait_for_file("/var/lib/ncdns/bit.key")
+          server.wait_for_file("/var/lib/ncdns/bit-zone.key")
+
+      with subtest("DNSKEY bit record is present"):
+          server.wait_for_unit("pdns-recursor")
+          server.wait_for_open_port(53)
+          server.succeed("host -t DNSKEY bit")
+    '') +
+    ''
+      with subtest("can resolve a .bit name"):
+          server.wait_for_unit("namecoind")
+          server.wait_for_unit("ncdns")
+          server.wait_for_open_port(8332)
+          assert "1.2.3.4" in server.succeed("dig @localhost -p 5333 test.bit")
+
+      with subtest("SOA record has identity information"):
+          assert "example.com" in server.succeed("dig SOA @localhost -p 5333 bit")
+
+      with subtest("bit. zone forwarding works"):
+          server.wait_for_unit("pdns-recursor")
+          assert "1.2.3.4" in server.succeed("host test.bit")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/ndppd.nix b/nixpkgs/nixos/tests/ndppd.nix
new file mode 100644
index 000000000000..e79e2a097b40
--- /dev/null
+++ b/nixpkgs/nixos/tests/ndppd.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "ndppd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  nodes = {
+    upstream = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.tcpdump ];
+      networking.useDHCP = false;
+      networking.interfaces = {
+        eth1 = {
+          ipv6.addresses = [
+            { address = "fd23::1"; prefixLength = 112; }
+          ];
+          ipv6.routes = [
+            { address = "fd42::";
+              prefixLength = 112;
+            }
+          ];
+        };
+      };
+    };
+    server = { pkgs, ... }: {
+      boot.kernel.sysctl = {
+        "net.ipv6.conf.all.forwarding" = "1";
+        "net.ipv6.conf.default.forwarding" = "1";
+      };
+      environment.systemPackages = [ pkgs.tcpdump ];
+      networking.useDHCP = false;
+      networking.interfaces = {
+        eth1 = {
+          ipv6.addresses = [
+            { address = "fd23::2"; prefixLength = 112; }
+          ];
+        };
+      };
+      services.ndppd = {
+        enable = true;
+        proxies.eth1.rules."fd42::/112" = {};
+      };
+      containers.client = {
+        autoStart = true;
+        privateNetwork = true;
+        hostAddress = "192.168.255.1";
+        localAddress = "192.168.255.2";
+        hostAddress6 = "fd42::1";
+        localAddress6 = "fd42::2";
+        config = {};
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("multi-user.target")
+    upstream.wait_for_unit("multi-user.target")
+    upstream.wait_until_succeeds("ping -c5 fd42::2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nebula.nix b/nixpkgs/nixos/tests/nebula.nix
new file mode 100644
index 000000000000..89b91d89fcb3
--- /dev/null
+++ b/nixpkgs/nixos/tests/nebula.nix
@@ -0,0 +1,308 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+
+  # We'll need to be able to trade cert files between nodes via scp.
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  makeNebulaNode = { config, ... }: name: extraConfig: lib.mkMerge [
+    {
+      # Expose nebula for doing cert signing.
+      environment.systemPackages = [ pkgs.nebula ];
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      services.openssh.enable = true;
+      networking.interfaces.eth1.useDHCP = false;
+
+      services.nebula.networks.smoke = {
+        # Note that these paths won't exist when the machine is first booted.
+        ca = "/etc/nebula/ca.crt";
+        cert = "/etc/nebula/${name}.crt";
+        key = "/etc/nebula/${name}.key";
+        listen = { host = "0.0.0.0"; port = 4242; };
+      };
+    }
+    extraConfig
+  ];
+
+in
+{
+  name = "nebula";
+
+  nodes = {
+
+    lighthouse = { ... } @ args:
+      makeNebulaNode args "lighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+          address = "192.168.1.1";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          isLighthouse = true;
+          isRelay = true;
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    allowAny = { ... } @ args:
+      makeNebulaNode args "allowAny" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+          address = "192.168.1.2";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    allowFromLighthouse = { ... } @ args:
+      makeNebulaNode args "allowFromLighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+          address = "192.168.1.3";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+          };
+        };
+      };
+
+    allowToLighthouse = { ... } @ args:
+      makeNebulaNode args "allowToLighthouse" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+          address = "192.168.1.4";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          enable = true;
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+    disabled = { ... } @ args:
+      makeNebulaNode args "disabled" {
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+          address = "192.168.1.5";
+          prefixLength = 24;
+        }];
+
+        services.nebula.networks.smoke = {
+          enable = false;
+          staticHostMap = { "10.0.100.1" = [ "192.168.1.1:4242" ]; };
+          isLighthouse = false;
+          lighthouses = [ "10.0.100.1" ];
+          relays = [ "10.0.100.1" ];
+          firewall = {
+            outbound = [ { port = "any"; proto = "any"; host = "lighthouse"; } ];
+            inbound = [ { port = "any"; proto = "any"; host = "any"; } ];
+          };
+        };
+      };
+
+  };
+
+  testScript = let
+
+    setUpPrivateKey = name: ''
+      ${name}.start()
+      ${name}.succeed(
+          "mkdir -p /root/.ssh",
+          "chown 700 /root/.ssh",
+          "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+          "chown 600 /root/.ssh/id_snakeoil",
+          "mkdir -p /root"
+      )
+    '';
+
+    # From what I can tell, StrictHostKeyChecking=no is necessary for ssh to work between machines.
+    sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
+
+    restartAndCheckNebula = name: ip: ''
+      ${name}.systemctl("restart nebula@smoke.service")
+      ${name}.succeed("ping -c5 ${ip}")
+    '';
+
+    # Create a keypair on the client node, then use the public key to sign a cert on the lighthouse.
+    signKeysFor = name: ip: ''
+      lighthouse.wait_for_unit("sshd.service")
+      ${name}.wait_for_unit("sshd.service")
+      ${name}.succeed(
+          "mkdir -p /etc/nebula",
+          "nebula-cert keygen -out-key /etc/nebula/${name}.key -out-pub /etc/nebula/${name}.pub",
+          "scp ${sshOpts} /etc/nebula/${name}.pub root@192.168.1.1:/root/${name}.pub",
+      )
+      lighthouse.succeed(
+          'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "${name}" -groups "${name}" -ip "${ip}" -in-pub /root/${name}.pub -out-crt /root/${name}.crt'
+      )
+      ${name}.succeed(
+          "scp ${sshOpts} root@192.168.1.1:/root/${name}.crt /etc/nebula/${name}.crt",
+          "scp ${sshOpts} root@192.168.1.1:/etc/nebula/ca.crt /etc/nebula/ca.crt",
+          '(id nebula-smoke >/dev/null && chown -R nebula-smoke:nebula-smoke /etc/nebula) || true'
+      )
+    '';
+
+    getPublicIp = node: ''
+      ${node}.succeed("ip --brief addr show eth1 | awk '{print $3}' | tail -n1 | cut -d/ -f1").strip()
+    '';
+
+    # Never do this for anything security critical! (Thankfully it's just a test.)
+    # Restart Nebula right after the mutual block and/or restore so the state is fresh.
+    blockTrafficBetween = nodeA: nodeB: ''
+      node_a = ${getPublicIp nodeA}
+      node_b = ${getPublicIp nodeB}
+      ${nodeA}.succeed("iptables -I INPUT -s " + node_b + " -j DROP")
+      ${nodeB}.succeed("iptables -I INPUT -s " + node_a + " -j DROP")
+      ${nodeA}.systemctl("restart nebula@smoke.service")
+      ${nodeB}.systemctl("restart nebula@smoke.service")
+    '';
+    allowTrafficBetween = nodeA: nodeB: ''
+      node_a = ${getPublicIp nodeA}
+      node_b = ${getPublicIp nodeB}
+      ${nodeA}.succeed("iptables -D INPUT -s " + node_b + " -j DROP")
+      ${nodeB}.succeed("iptables -D INPUT -s " + node_a + " -j DROP")
+      ${nodeA}.systemctl("restart nebula@smoke.service")
+      ${nodeB}.systemctl("restart nebula@smoke.service")
+    '';
+  in ''
+    # Create the certificate and sign the lighthouse's keys.
+    ${setUpPrivateKey "lighthouse"}
+    lighthouse.succeed(
+        "mkdir -p /etc/nebula",
+        'nebula-cert ca -name "Smoke Test" -out-crt /etc/nebula/ca.crt -out-key /etc/nebula/ca.key',
+        'nebula-cert sign -ca-crt /etc/nebula/ca.crt -ca-key /etc/nebula/ca.key -name "lighthouse" -groups "lighthouse" -ip "10.0.100.1/24" -out-crt /etc/nebula/lighthouse.crt -out-key /etc/nebula/lighthouse.key',
+        'chown -R nebula-smoke:nebula-smoke /etc/nebula'
+    )
+
+    # Reboot the lighthouse and verify that the nebula service comes up on boot.
+    # Since rebooting takes a while, we'll just restart the service on the other nodes.
+    lighthouse.shutdown()
+    lighthouse.start()
+    lighthouse.wait_for_unit("nebula@smoke.service")
+    lighthouse.succeed("ping -c5 10.0.100.1")
+
+    # Create keys for allowAny's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowAny"}
+    ${signKeysFor "allowAny" "10.0.100.2/24"}
+    ${restartAndCheckNebula "allowAny" "10.0.100.2"}
+
+    # Create keys for allowFromLighthouse's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowFromLighthouse"}
+    ${signKeysFor "allowFromLighthouse" "10.0.100.3/24"}
+    ${restartAndCheckNebula "allowFromLighthouse" "10.0.100.3"}
+
+    # Create keys for allowToLighthouse's nebula service and test that it comes up.
+    ${setUpPrivateKey "allowToLighthouse"}
+    ${signKeysFor "allowToLighthouse" "10.0.100.4/24"}
+    ${restartAndCheckNebula "allowToLighthouse" "10.0.100.4"}
+
+    # Create keys for disabled's nebula service and test that it does not come up.
+    ${setUpPrivateKey "disabled"}
+    ${signKeysFor "disabled" "10.0.100.5/24"}
+    disabled.fail("systemctl status nebula@smoke.service")
+    disabled.fail("ping -c5 10.0.100.5")
+
+    # The lighthouse can ping allowAny and allowFromLighthouse but not disabled
+    lighthouse.succeed("ping -c3 10.0.100.2")
+    lighthouse.succeed("ping -c3 10.0.100.3")
+    lighthouse.fail("ping -c3 10.0.100.5")
+
+    # allowAny can ping the lighthouse, but not allowFromLighthouse because of its inbound firewall
+    allowAny.succeed("ping -c3 10.0.100.1")
+    allowAny.fail("ping -c3 10.0.100.3")
+
+    # allowFromLighthouse can ping the lighthouse and allowAny
+    allowFromLighthouse.succeed("ping -c3 10.0.100.1")
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block allowFromLighthouse <-> allowAny, and allowFromLighthouse -> allowAny should still work.
+    ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+
+    # allowToLighthouse can ping the lighthouse but not allowAny or allowFromLighthouse
+    allowToLighthouse.succeed("ping -c3 10.0.100.1")
+    allowToLighthouse.fail("ping -c3 10.0.100.2")
+    allowToLighthouse.fail("ping -c3 10.0.100.3")
+
+    # allowAny can ping allowFromLighthouse now that allowFromLighthouse pinged it first
+    allowAny.succeed("ping -c3 10.0.100.3")
+
+    # block allowAny <-> allowFromLighthouse, and allowAny -> allowFromLighthouse should still work.
+    ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    allowAny.succeed("ping -c10 10.0.100.3")
+    ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
+    allowFromLighthouse.succeed("ping -c10 10.0.100.2")
+    allowAny.succeed("ping -c10 10.0.100.3")
+
+    # allowToLighthouse can ping allowAny if allowAny pings it first
+    allowAny.succeed("ping -c3 10.0.100.4")
+    allowToLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block allowToLighthouse <-> allowAny, and allowAny <-> allowToLighthouse should still work.
+    ${blockTrafficBetween "allowAny" "allowToLighthouse"}
+    allowAny.succeed("ping -c10 10.0.100.4")
+    allowToLighthouse.succeed("ping -c10 10.0.100.2")
+    ${allowTrafficBetween "allowAny" "allowToLighthouse"}
+    allowAny.succeed("ping -c10 10.0.100.4")
+    allowToLighthouse.succeed("ping -c10 10.0.100.2")
+
+    # block lighthouse <-> allowFromLighthouse and allowAny <-> allowFromLighthouse; allowFromLighthouse won't get to allowAny
+    ${blockTrafficBetween "allowFromLighthouse" "lighthouse"}
+    ${blockTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.fail("ping -c3 10.0.100.2")
+    ${allowTrafficBetween "allowFromLighthouse" "lighthouse"}
+    ${allowTrafficBetween "allowFromLighthouse" "allowAny"}
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+
+    # block lighthouse <-> allowAny, allowAny <-> allowFromLighthouse, and allowAny <-> allowToLighthouse; it won't get to allowFromLighthouse or allowToLighthouse
+    ${blockTrafficBetween "allowAny" "lighthouse"}
+    ${blockTrafficBetween "allowAny" "allowFromLighthouse"}
+    ${blockTrafficBetween "allowAny" "allowToLighthouse"}
+    allowFromLighthouse.fail("ping -c3 10.0.100.2")
+    allowAny.fail("ping -c3 10.0.100.3")
+    allowAny.fail("ping -c3 10.0.100.4")
+    ${allowTrafficBetween "allowAny" "lighthouse"}
+    ${allowTrafficBetween "allowAny" "allowFromLighthouse"}
+    ${allowTrafficBetween "allowAny" "allowToLighthouse"}
+    allowFromLighthouse.succeed("ping -c3 10.0.100.2")
+    allowAny.succeed("ping -c3 10.0.100.3")
+    allowAny.succeed("ping -c3 10.0.100.4")
+
+    # block lighthouse <-> allowToLighthouse and allowToLighthouse <-> allowAny; it won't get to allowAny
+    ${blockTrafficBetween "allowToLighthouse" "lighthouse"}
+    ${blockTrafficBetween "allowToLighthouse" "allowAny"}
+    allowAny.fail("ping -c3 10.0.100.4")
+    allowToLighthouse.fail("ping -c3 10.0.100.2")
+    ${allowTrafficBetween "allowToLighthouse" "lighthouse"}
+    ${allowTrafficBetween "allowToLighthouse" "allowAny"}
+    allowAny.succeed("ping -c3 10.0.100.4")
+    allowToLighthouse.succeed("ping -c3 10.0.100.2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/neo4j.nix b/nixpkgs/nixos/tests/neo4j.nix
new file mode 100644
index 000000000000..0b57f5b2e038
--- /dev/null
+++ b/nixpkgs/nixos/tests/neo4j.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix {
+  name = "neo4j";
+
+  nodes = {
+    server =
+      { ... }:
+
+      {
+        virtualisation.memorySize = 4096;
+        virtualisation.diskSize = 1024;
+
+        services.neo4j.enable = true;
+        # require tls certs to be available
+        services.neo4j.https.enable = false;
+        services.neo4j.bolt.enable = false;
+      };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("neo4j.service")
+    server.wait_for_open_port(7474)
+    server.succeed("curl -f http://localhost:7474/")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/netbird.nix b/nixpkgs/nixos/tests/netbird.nix
new file mode 100644
index 000000000000..ef793cfe9881
--- /dev/null
+++ b/nixpkgs/nixos/tests/netbird.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "netbird";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ misuzu ];
+  };
+
+  nodes = {
+    node = { ... }: {
+      services.netbird.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    node.wait_for_unit("netbird.service")
+    node.wait_for_file("/var/run/netbird/sock")
+    node.succeed("netbird status | grep -q 'Daemon status: NeedsLogin'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/netdata.nix b/nixpkgs/nixos/tests/netdata.nix
new file mode 100644
index 000000000000..e3438f63404e
--- /dev/null
+++ b/nixpkgs/nixos/tests/netdata.nix
@@ -0,0 +1,41 @@
+# This test runs netdata and checks for data via apps.plugin
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "netdata";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ cransom raitobezarius ];
+  };
+
+  nodes = {
+    netdata =
+      { pkgs, ... }:
+        {
+          environment.systemPackages = with pkgs; [ curl jq netdata ];
+          services.netdata.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    netdata.wait_for_unit("netdata.service")
+
+    # wait for the service to listen before sending a request
+    netdata.wait_for_open_port(19999)
+
+    # check if the netdata main page loads.
+    netdata.succeed("curl --fail http://localhost:19999/")
+    netdata.succeed("sleep 4")
+
+    # check if netdata can read disk ops for root owned processes.
+    # if > 0, successful. verifies both netdata working and
+    # apps.plugin has elevated capabilities.
+    url = "http://localhost:19999/api/v1/data\?chart=user.root_disk_physical_io"
+    filter = '[.data[range(10)][2]] | add | . < 0'
+    cmd = f"curl -s {url} | jq -e '{filter}'"
+    netdata.wait_until_succeeds(cmd)
+
+    # check if the control socket is available
+    netdata.succeed("sudo netdatacli ping")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/networking-proxy.nix b/nixpkgs/nixos/tests/networking-proxy.nix
new file mode 100644
index 000000000000..330bac2588a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/networking-proxy.nix
@@ -0,0 +1,134 @@
+# Test whether `networking.proxy' work as expected.
+
+# TODO: use a real proxy node and put this test into networking.nix
+# TODO: test whether nix tools work as expected behind a proxy
+
+let default-config = {
+        imports = [ ./common/user-account.nix ];
+
+        services.xserver.enable = false;
+
+      };
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "networking-proxy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [  ];
+  };
+
+  nodes = {
+    # no proxy
+    machine =
+      { ... }:
+
+      default-config;
+
+    # proxy default
+    machine2 =
+      { ... }:
+
+      default-config // {
+        networking.proxy.default = "http://user:pass@host:port";
+      };
+
+    # specific proxy options
+    machine3 =
+      { ... }:
+
+      default-config //
+      {
+        networking.proxy = {
+          # useless because overridden by the next options
+          default = "http://user:pass@host:port";
+          # advanced proxy setup
+          httpProxy = "123-http://user:pass@http-host:port";
+          httpsProxy = "456-http://user:pass@https-host:port";
+          rsyncProxy = "789-http://user:pass@rsync-host:port";
+          ftpProxy = "101112-http://user:pass@ftp-host:port";
+          noProxy = "131415-127.0.0.1,localhost,.localdomain";
+        };
+      };
+
+    # mix default + proxy options
+    machine4 =
+      { ... }:
+
+      default-config // {
+        networking.proxy = {
+          # open for all *_proxy env var
+          default = "000-http://user:pass@default-host:port";
+          # except for those 2
+          rsyncProxy = "123-http://user:pass@http-host:port";
+          noProxy = "131415-127.0.0.1,localhost,.localdomain";
+        };
+      };
+    };
+
+  testScript =
+    ''
+      from typing import Dict, Optional
+
+
+      def get_machine_env(machine: Machine, user: Optional[str] = None) -> Dict[str, str]:
+          """
+          Gets the environment from a given machine, and returns it as a
+          dictionary in the form:
+              {"lowercase_var_name": "value"}
+
+          Duplicate environment variables with the same name
+          (e.g. "foo" and "FOO") are handled in an undefined manner.
+          """
+          if user is not None:
+              env = machine.succeed("su - {} -c 'env -0'".format(user))
+          else:
+              env = machine.succeed("env -0")
+          ret = {}
+          for line in env.split("\0"):
+              if "=" not in line:
+                  continue
+
+              key, val = line.split("=", 1)
+              ret[key.lower()] = val
+          return ret
+
+
+      start_all()
+
+      with subtest("no proxy"):
+          assert "proxy" not in machine.succeed("env").lower()
+          assert "proxy" not in machine.succeed("su - alice -c env").lower()
+
+      with subtest("default proxy"):
+          assert "proxy" in machine2.succeed("env").lower()
+          assert "proxy" in machine2.succeed("su - alice -c env").lower()
+
+      with subtest("explicitly-set proxy"):
+          env = get_machine_env(machine3)
+          assert "123" in env["http_proxy"]
+          assert "456" in env["https_proxy"]
+          assert "789" in env["rsync_proxy"]
+          assert "101112" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+          env = get_machine_env(machine3, "alice")
+          assert "123" in env["http_proxy"]
+          assert "456" in env["https_proxy"]
+          assert "789" in env["rsync_proxy"]
+          assert "101112" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+      with subtest("default proxy + some other specifics"):
+          env = get_machine_env(machine4)
+          assert "000" in env["http_proxy"]
+          assert "000" in env["https_proxy"]
+          assert "123" in env["rsync_proxy"]
+          assert "000" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+
+          env = get_machine_env(machine4, "alice")
+          assert "000" in env["http_proxy"]
+          assert "000" in env["https_proxy"]
+          assert "123" in env["rsync_proxy"]
+          assert "000" in env["ftp_proxy"]
+          assert "131415" in env["no_proxy"]
+    '';
+})
diff --git a/nixpkgs/nixos/tests/networking.nix b/nixpkgs/nixos/tests/networking.nix
new file mode 100644
index 000000000000..768d0cfa2238
--- /dev/null
+++ b/nixpkgs/nixos/tests/networking.nix
@@ -0,0 +1,1065 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+# bool: whether to use networkd in the tests
+, networkd }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  qemu-common = import ../lib/qemu-common.nix { inherit (pkgs) lib pkgs; };
+
+  router = { config, pkgs, lib, ... }:
+    with pkgs.lib;
+    let
+      vlanIfs = range 1 (length config.virtualisation.vlans);
+    in {
+      environment.systemPackages = [ pkgs.iptables ]; # to debug firewall rules
+      virtualisation.vlans = [ 1 2 3 ];
+      boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+      networking = {
+        useDHCP = false;
+        useNetworkd = networkd;
+        firewall.checkReversePath = true;
+        firewall.allowedUDPPorts = [ 547 ];
+        interfaces = mkOverride 0 (listToAttrs (forEach vlanIfs (n:
+          nameValuePair "eth${toString n}" {
+            ipv4.addresses = [ { address = "192.168.${toString n}.1"; prefixLength = 24; } ];
+            ipv6.addresses = [ { address = "fd00:1234:5678:${toString n}::1"; prefixLength = 64; } ];
+          })));
+      };
+      services.kea = {
+        dhcp4 = {
+          enable = true;
+          settings = {
+            interfaces-config = {
+              interfaces = map (n: "eth${toString n}") vlanIfs;
+              dhcp-socket-type = "raw";
+              service-sockets-require-all = true;
+              service-sockets-max-retries = 5;
+              service-sockets-retry-wait-time = 2500;
+            };
+            subnet4 = map (n: {
+              id = n;
+              subnet = "192.168.${toString n}.0/24";
+              pools = [{ pool = "192.168.${toString n}.3 - 192.168.${toString n}.254"; }];
+              option-data = [{ name = "routers"; data = "192.168.${toString n}.1"; }];
+
+              reservations = [{
+                hw-address = qemu-common.qemuNicMac n 1;
+                hostname = "client${toString n}";
+                ip-address = "192.168.${toString n}.2";
+              }];
+            }) vlanIfs;
+          };
+        };
+        dhcp6 = {
+          enable = true;
+          settings = {
+            interfaces-config = {
+              interfaces = map (n: "eth${toString n}") vlanIfs;
+              service-sockets-require-all = true;
+              service-sockets-max-retries = 5;
+              service-sockets-retry-wait-time = 2500;
+            };
+
+            subnet6 = map (n: {
+              id = n;
+              subnet = "fd00:1234:5678:${toString n}::/64";
+              interface = "eth${toString n}";
+              pools = [{ pool = "fd00:1234:5678:${toString n}::2-fd00:1234:5678:${toString n}::2"; }];
+            }) vlanIfs;
+          };
+        };
+      };
+      services.radvd = {
+        enable = true;
+        config = flip concatMapStrings vlanIfs (n: ''
+          interface eth${toString n} {
+            AdvSendAdvert on;
+            AdvManagedFlag on;
+            AdvOtherConfigFlag on;
+
+            prefix fd00:1234:5678:${toString n}::/64 {
+              AdvAutonomous off;
+            };
+          };
+        '');
+      };
+    };
+
+  testCases = {
+    loopback = {
+      name = "Loopback";
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        networking.useDHCP = false;
+        networking.useNetworkd = networkd;
+      };
+      testScript = ''
+        start_all()
+        client.wait_for_unit("network.target")
+        loopback_addresses = client.succeed("ip addr show lo")
+        assert "inet 127.0.0.1/8" in loopback_addresses
+        assert "inet6 ::1/128" in loopback_addresses
+      '';
+    };
+    static = {
+      name = "Static";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        virtualisation.interfaces.enp2s0.vlan = 2;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          defaultGateway = { address = "192.168.1.1"; interface = "enp1s0"; };
+          defaultGateway6 = { address = "fd00:1234:5678:1::1"; interface = "enp1s0"; };
+          interfaces.enp1s0.ipv4.addresses = [
+            { address = "192.168.1.2"; prefixLength = 24; }
+            { address = "192.168.1.3"; prefixLength = 32; }
+            { address = "192.168.1.10"; prefixLength = 32; }
+          ];
+          interfaces.enp2s0.ipv4.addresses = [
+            { address = "192.168.2.2"; prefixLength = 24; }
+          ];
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Make sure DHCP server is not started"):
+              client.fail("systemctl status kea-dhcp4-server.service")
+              client.fail("systemctl status kea-dhcp6-server.service")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 192.168.1.3")
+              client.wait_until_succeeds("ping -c 1 192.168.1.10")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+              router.wait_until_succeeds("ping -c 1 192.168.1.10")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.wait_until_succeeds("ping -c 1 192.168.2.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.wait_until_succeeds("ping -c 1 192.168.2.2")
+
+          with subtest("Test default gateway"):
+              router.wait_until_succeeds("ping -c 1 192.168.3.1")
+              client.wait_until_succeeds("ping -c 1 192.168.3.1")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:3::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:3::1")
+        '';
+    };
+    routeType = {
+      name = "RouteType";
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        networking = {
+          useDHCP = false;
+          useNetworkd = networkd;
+          interfaces.eth1.ipv4.routes = [{
+            address = "192.168.1.127";
+            prefixLength = 32;
+            type = "local";
+          }];
+        };
+      };
+      testScript = ''
+        start_all()
+        client.wait_for_unit("network.target")
+        client.succeed("ip -4 route list table local | grep 'local 192.168.1.127'")
+      '';
+    };
+    dhcpDefault = {
+      name = "useDHCP-by-default";
+      nodes.router = router;
+      nodes.client = { lib, ... }: {
+        # Disable test driver default config
+        networking.interfaces = lib.mkForce {
+          # Make sure DHCP defaults correctly even when some unrelated config
+          # is set on the interface (nothing, in this case).
+          enp1s0 = {};
+        };
+        networking.useNetworkd = networkd;
+        virtualisation.interfaces.enp1s0.vlan = 1;
+      };
+      testScript = ''
+        start_all()
+        client.wait_for_unit("multi-user.target")
+        client.wait_until_succeeds("ip addr show dev enp1s0 | grep '192.168.1'")
+        client.shell_interact()
+        client.succeed("ping -c 1 192.168.1.1")
+        router.succeed("ping -c 1 192.168.1.1")
+        router.succeed("ping -c 1 192.168.1.2")
+        client.succeed("ping -c 1 192.168.1.2")
+      '';
+    };
+    dhcpSimple = {
+      name = "SimpleDHCP";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        virtualisation.interfaces.enp2s0.vlan = 2;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0.useDHCP = true;
+          interfaces.enp2s0.useDHCP = true;
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev enp1s0 | grep -q '192.168.1'")
+              client.wait_until_succeeds("ip addr show dev enp1s0 | grep -q 'fd00:1234:5678:1:'")
+              client.wait_until_succeeds("ip addr show dev enp2s0 | grep -q '192.168.2'")
+              client.wait_until_succeeds("ip addr show dev enp2s0 | grep -q 'fd00:1234:5678:2:'")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::2")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.wait_until_succeeds("ping -c 1 192.168.2.2")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.wait_until_succeeds("ping -c 1 192.168.2.2")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::1")
+              router.wait_until_succeeds("ping -c 1 fd00:1234:5678:2::2")
+        '';
+    };
+    dhcpOneIf = {
+      name = "OneInterfaceDHCP";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        virtualisation.interfaces.enp2s0.vlan = 2;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0 = {
+            mtu = 1343;
+            useDHCP = true;
+          };
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client.wait_for_unit("network.target")
+              router.wait_for_unit("network.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev enp1s0 | grep -q '192.168.1'")
+
+          with subtest("ensure MTU is set"):
+              assert "mtu 1343" in client.succeed("ip link show dev enp1s0")
+
+          with subtest("Test vlan 1"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+
+          with subtest("Test vlan 2"):
+              client.wait_until_succeeds("ping -c 1 192.168.2.1")
+              client.fail("ping -c 1 192.168.2.2")
+
+              router.wait_until_succeeds("ping -c 1 192.168.2.1")
+              router.fail("ping -c 1 192.168.2.2")
+        '';
+    };
+    bond = let
+      node = address: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        virtualisation.interfaces.enp2s0.vlan = 2;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          bonds.bond0 = {
+            interfaces = [ "enp1s0" "enp2s0" ];
+            driverOptions.mode = "802.3ad";
+          };
+          interfaces.bond0.ipv4.addresses = mkOverride 0
+            [ { inherit address; prefixLength = 30; } ];
+        };
+      };
+    in {
+      name = "Bond";
+      nodes.client1 = node "192.168.1.1";
+      nodes.client2 = node "192.168.1.2";
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+          with subtest("Test bonding"):
+              client1.wait_until_succeeds("ping -c 2 192.168.1.1")
+              client1.wait_until_succeeds("ping -c 2 192.168.1.2")
+
+              client2.wait_until_succeeds("ping -c 2 192.168.1.1")
+              client2.wait_until_succeeds("ping -c 2 192.168.1.2")
+
+          with subtest("Verify bonding mode"):
+              for client in client1, client2:
+                  client.succeed('grep -q "Bonding Mode: IEEE 802.3ad Dynamic link aggregation" /proc/net/bonding/bond0')
+        '';
+    };
+    bridge = let
+      node = { address, vlan }: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = vlan;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0.ipv4.addresses = [ { inherit address; prefixLength = 24; } ];
+        };
+      };
+    in {
+      name = "Bridge";
+      nodes.client1 = node { address = "192.168.1.2"; vlan = 1; };
+      nodes.client2 = node { address = "192.168.1.3"; vlan = 2; };
+      nodes.router = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        virtualisation.interfaces.enp2s0.vlan = 2;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          bridges.bridge.interfaces = [ "enp1s0" "enp2s0" ];
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.eth2.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.bridge.ipv4.addresses = mkOverride 0
+            [ { address = "192.168.1.1"; prefixLength = 24; } ];
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              for machine in client1, client2, router:
+                  machine.wait_for_unit("network.target")
+
+          with subtest("Test bridging"):
+              client1.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client1.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client1.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              client2.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client2.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client2.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+        '';
+    };
+    macvlan = {
+      name = "MACVLAN";
+      nodes.router = router;
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        environment.systemPackages = [ pkgs.iptables ]; # to debug firewall rules
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          firewall.logReversePathDrops = true; # to debug firewall rules
+          # reverse path filtering rules for the macvlan interface seem
+          # to be incorrect, causing the test to fail. Disable temporarily.
+          firewall.checkReversePath = false;
+          macvlans.macvlan.interface = "enp1s0";
+          interfaces.enp1s0.useDHCP = true;
+          interfaces.macvlan.useDHCP = true;
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to come up"):
+              client.wait_for_unit("network.target")
+              router.wait_for_unit("network.target")
+
+          with subtest("Wait until we have an ip address on each interface"):
+              client.wait_until_succeeds("ip addr show dev enp1s0 | grep -q '192.168.1'")
+              client.wait_until_succeeds("ip addr show dev macvlan | grep -q '192.168.1'")
+
+          with subtest("Print lots of diagnostic information"):
+              router.log("**********************************************")
+              router.succeed("ip addr >&2")
+              router.succeed("ip route >&2")
+              router.execute("iptables-save >&2")
+              client.log("==============================================")
+              client.succeed("ip addr >&2")
+              client.succeed("ip route >&2")
+              client.execute("iptables-save >&2")
+              client.log("##############################################")
+
+          with subtest("Test macvlan creates routable ips"):
+              client.wait_until_succeeds("ping -c 1 192.168.1.1")
+              client.wait_until_succeeds("ping -c 1 192.168.1.2")
+              client.wait_until_succeeds("ping -c 1 192.168.1.3")
+
+              router.wait_until_succeeds("ping -c 1 192.168.1.1")
+              router.wait_until_succeeds("ping -c 1 192.168.1.2")
+              router.wait_until_succeeds("ping -c 1 192.168.1.3")
+        '';
+    };
+    fou = {
+      name = "foo-over-udp";
+      nodes.machine = { ... }: {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0.ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+          fooOverUDP = {
+            fou1 = { port = 9001; };
+            fou2 = { port = 9002; protocol = 41; };
+            fou3 = mkIf (!networkd)
+              { port = 9003; local.address = "192.168.1.1"; };
+            fou4 = mkIf (!networkd)
+              { port = 9004; local = { address = "192.168.1.1"; dev = "enp1s0"; }; };
+          };
+        };
+        systemd.services = {
+          fou3-fou-encap.after = optional (!networkd) "network-addresses-enp1s0.service";
+        };
+      };
+      testScript = { ... }:
+        ''
+          import json
+
+          machine.wait_for_unit("network.target")
+          fous = json.loads(machine.succeed("ip -json fou show"))
+          assert {"port": 9001, "gue": None, "family": "inet"} in fous, "fou1 exists"
+          assert {"port": 9002, "ipproto": 41, "family": "inet"} in fous, "fou2 exists"
+        '' + optionalString (!networkd) ''
+          assert {
+              "port": 9003,
+              "gue": None,
+              "family": "inet",
+              "local": "192.168.1.1",
+          } in fous, "fou3 exists"
+          assert {
+              "port": 9004,
+              "gue": None,
+              "family": "inet",
+              "local": "192.168.1.1",
+              "dev": "enp1s0",
+          } in fous, "fou4 exists"
+        '';
+    };
+    sit = let
+      node = { address4, remote, address6 }: { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          sits.sit = {
+            inherit remote;
+            local = address4;
+            dev = "enp1s0";
+          };
+          interfaces.enp1s0.ipv4.addresses = mkOverride 0
+            [ { address = address4; prefixLength = 24; } ];
+          interfaces.sit.ipv6.addresses = mkOverride 0
+            [ { address = address6; prefixLength = 64; } ];
+        };
+      };
+    in {
+      name = "Sit";
+      # note on firewalling: the two nodes are explicitly asymmetric.
+      # client1 sends SIT packets in UDP, but accepts only proto-41 incoming.
+      # client2 does the reverse, sending in proto-41 and accepting only UDP incoming.
+      # that way we'll notice when either SIT itself or FOU breaks.
+      nodes.client1 = args@{ pkgs, ... }:
+        mkMerge [
+          (node { address4 = "192.168.1.1"; remote = "192.168.1.2"; address6 = "fc00::1"; } args)
+          {
+            networking = {
+              firewall.extraCommands = "iptables -A INPUT -p 41 -j ACCEPT";
+              sits.sit.encapsulation = { type = "fou"; port = 9001; };
+            };
+          }
+        ];
+      nodes.client2 = args@{ pkgs, ... }:
+        mkMerge [
+          (node { address4 = "192.168.1.2"; remote = "192.168.1.1"; address6 = "fc00::2"; } args)
+          {
+            networking = {
+              firewall.allowedUDPPorts = [ 9001 ];
+              fooOverUDP.fou1 = { port = 9001; protocol = 41; };
+            };
+          }
+        ];
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+              # Print diagnostic information
+              client1.succeed("ip addr >&2")
+              client2.succeed("ip addr >&2")
+
+          with subtest("Test ipv6"):
+              client1.wait_until_succeeds("ping -c 1 fc00::1")
+              client1.wait_until_succeeds("ping -c 1 fc00::2")
+
+              client2.wait_until_succeeds("ping -c 1 fc00::1")
+              client2.wait_until_succeeds("ping -c 1 fc00::2")
+        '';
+    };
+    gre = let
+      node = { pkgs, ... }: with pkgs.lib; {
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          firewall.extraCommands = "ip6tables -A nixos-fw -p gre -j nixos-fw-accept";
+        };
+      };
+    in {
+      name = "GRE";
+      nodes.client1 = args@{ pkgs, ... }:
+        mkMerge [
+          (node args)
+          {
+            virtualisation.vlans = [ 1 2 4 ];
+            networking = {
+              greTunnels = {
+                greTunnel = {
+                  local = "192.168.2.1";
+                  remote = "192.168.2.2";
+                  dev = "eth2";
+                  ttl = 225;
+                  type = "tap";
+                };
+                gre6Tunnel = {
+                  local = "fd00:1234:5678:4::1";
+                  remote = "fd00:1234:5678:4::2";
+                  dev = "eth3";
+                  ttl = 255;
+                  type = "tun6";
+                };
+              };
+              bridges.bridge.interfaces = [ "greTunnel" "eth1" ];
+              interfaces.eth1.ipv4.addresses = mkOverride 0 [];
+              interfaces.bridge.ipv4.addresses = mkOverride 0 [
+                { address = "192.168.1.1"; prefixLength = 24; }
+              ];
+              interfaces.eth3.ipv6.addresses = [
+                { address = "fd00:1234:5678:4::1"; prefixLength = 64; }
+              ];
+              interfaces.gre6Tunnel.ipv6.addresses = mkOverride 0 [
+                { address = "fc00::1"; prefixLength = 64; }
+              ];
+            };
+          }
+        ];
+      nodes.client2 = args@{ pkgs, ... }:
+        mkMerge [
+          (node args)
+          {
+            virtualisation.vlans = [ 2 3 4 ];
+            networking = {
+              greTunnels = {
+                greTunnel = {
+                  local = "192.168.2.2";
+                  remote = "192.168.2.1";
+                  dev = "eth1";
+                  ttl = 225;
+                  type = "tap";
+                };
+                gre6Tunnel = {
+                  local = "fd00:1234:5678:4::2";
+                  remote = "fd00:1234:5678:4::1";
+                  dev = "eth3";
+                  ttl = 255;
+                  type = "tun6";
+                };
+              };
+              bridges.bridge.interfaces = [ "greTunnel" "eth2" ];
+              interfaces.eth2.ipv4.addresses = mkOverride 0 [];
+              interfaces.bridge.ipv4.addresses = mkOverride 0 [
+                { address = "192.168.1.2"; prefixLength = 24; }
+              ];
+              interfaces.eth3.ipv6.addresses = [
+                { address = "fd00:1234:5678:4::2"; prefixLength = 64; }
+              ];
+              interfaces.gre6Tunnel.ipv6.addresses = mkOverride 0 [
+                { address = "fc00::2"; prefixLength = 64; }
+              ];
+            };
+          }
+        ];
+      testScript = { ... }:
+        ''
+          import json
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+              # Print diagnostic information
+              client1.succeed("ip addr >&2")
+              client2.succeed("ip addr >&2")
+
+          with subtest("Test GRE tunnel bridge over VLAN"):
+              client1.wait_until_succeeds("ping -c 1 192.168.1.2")
+
+              client2.wait_until_succeeds("ping -c 1 192.168.1.1")
+
+              client1.wait_until_succeeds("ping -c 1 fc00::2")
+
+              client2.wait_until_succeeds("ping -c 1 fc00::1")
+
+          with subtest("Test GRE tunnel TTL"):
+              links = json.loads(client1.succeed("ip -details -json link show greTunnel"))
+              assert links[0]['linkinfo']['info_data']['ttl'] == 225, "ttl not set for greTunnel"
+
+              links = json.loads(client2.succeed("ip -details -json link show gre6Tunnel"))
+              assert links[0]['linkinfo']['info_data']['ttl'] == 255, "ttl not set for gre6Tunnel"
+        '';
+    };
+    vlan = let
+      node = address: { pkgs, ... }: with pkgs.lib; {
+        #virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          vlans.vlan = {
+            id = 1;
+            interface = "eth0";
+          };
+          interfaces.eth0.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.eth1.ipv4.addresses = mkOverride 0 [ ];
+          interfaces.vlan.ipv4.addresses = mkOverride 0
+            [ { inherit address; prefixLength = 24; } ];
+        };
+      };
+    in {
+      name = "vlan";
+      nodes.client1 = node "192.168.1.1";
+      nodes.client2 = node "192.168.1.2";
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              client1.wait_for_unit("network.target")
+              client2.wait_for_unit("network.target")
+
+          with subtest("Test vlan is setup"):
+              client1.succeed("ip addr show dev vlan >&2")
+              client2.succeed("ip addr show dev vlan >&2")
+        '';
+    };
+    vlan-ping = let
+        baseIP = number: "10.10.10.${number}";
+        vlanIP = number: "10.1.1.${number}";
+        baseInterface = "enp1s0";
+        vlanInterface = "vlan42";
+        node = number: {pkgs, ... }: with pkgs.lib; {
+          virtualisation.interfaces.enp1s0.vlan = 1;
+          networking = {
+            #useNetworkd = networkd;
+            useDHCP = false;
+            vlans.${vlanInterface} = { id = 42; interface = baseInterface; };
+            interfaces.${baseInterface}.ipv4.addresses = mkOverride 0 [{ address = baseIP number; prefixLength = 24; }];
+            interfaces.${vlanInterface}.ipv4.addresses = mkOverride 0 [{ address = vlanIP number; prefixLength = 24; }];
+          };
+        };
+
+        serverNodeNum = "1";
+        clientNodeNum = "2";
+
+    in {
+      name = "vlan-ping";
+      nodes.server = node serverNodeNum;
+      nodes.client = node clientNodeNum;
+      testScript = { ... }:
+        ''
+          start_all()
+
+          with subtest("Wait for networking to be configured"):
+              server.wait_for_unit("network.target")
+              client.wait_for_unit("network.target")
+
+          with subtest("Test ping on base interface in setup"):
+              client.succeed("ping -I ${baseInterface} -c 1 ${baseIP serverNodeNum}")
+              server.succeed("ping -I ${baseInterface} -c 1 ${baseIP clientNodeNum}")
+
+          with subtest("Test ping on vlan subinterface in setup"):
+              client.succeed("ping -I ${vlanInterface} -c 1 ${vlanIP serverNodeNum}")
+              server.succeed("ping -I ${vlanInterface} -c 1 ${vlanIP clientNodeNum}")
+        '';
+    };
+    virtual = {
+      name = "Virtual";
+      nodes.machine = {
+        networking.useNetworkd = networkd;
+        networking.useDHCP = false;
+        networking.interfaces.tap0 = {
+          ipv4.addresses = [ { address = "192.168.1.1"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2096::"; prefixLength = 64; } ];
+          virtual = true;
+          mtu = 1342;
+          macAddress = "02:de:ad:be:ef:01";
+        };
+        networking.interfaces.tun0 = {
+          ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2097::"; prefixLength = 64; } ];
+          virtual = true;
+          mtu = 1343;
+        };
+      };
+
+      testScript = ''
+        targetList = """
+        tap0: tap persist user 0
+        tun0: tun persist user 0
+        """.strip()
+
+        with subtest("Wait for networking to come up"):
+            machine.start()
+            machine.wait_for_unit("network.target")
+
+        with subtest("Test interfaces set up"):
+            list = machine.succeed("ip tuntap list | sort").strip()
+            assert (
+                list == targetList
+            ), """
+            The list of virtual interfaces does not match the expected one:
+            Result:
+              {}
+            Expected:
+              {}
+            """.format(
+                list, targetList
+            )
+        with subtest("Test MTU and MAC Address are configured"):
+            machine.wait_until_succeeds("ip link show dev tap0 | grep 'mtu 1342'")
+            machine.wait_until_succeeds("ip link show dev tun0 | grep 'mtu 1343'")
+            assert "02:de:ad:be:ef:01" in machine.succeed("ip link show dev tap0")
+      '' # network-addresses-* only exist in scripted networking
+      + optionalString (!networkd) ''
+        with subtest("Test interfaces clean up"):
+            machine.succeed("systemctl stop network-addresses-tap0")
+            machine.sleep(10)
+            machine.succeed("systemctl stop network-addresses-tun0")
+            machine.sleep(10)
+            residue = machine.succeed("ip tuntap list")
+            assert (
+                residue == ""
+            ), "Some virtual interface has not been properly cleaned:\n{}".format(residue)
+      '';
+    };
+    privacy = {
+      name = "Privacy";
+      nodes.router = { ... }: {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = true;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0.ipv6.addresses = singleton {
+            address = "fd00:1234:5678:1::1";
+            prefixLength = 64;
+          };
+        };
+        services.radvd = {
+          enable = true;
+          config = ''
+            interface enp1s0 {
+              AdvSendAdvert on;
+              AdvManagedFlag on;
+              AdvOtherConfigFlag on;
+
+              prefix fd00:1234:5678:1::/64 {
+                AdvAutonomous on;
+                AdvOnLink on;
+              };
+            };
+          '';
+        };
+      };
+      nodes.client_with_privacy = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0 = {
+            tempAddress = "default";
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+        };
+      };
+      nodes.client = { pkgs, ... }: with pkgs.lib; {
+        virtualisation.interfaces.enp1s0.vlan = 1;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+          interfaces.enp1s0 = {
+            tempAddress = "enabled";
+            ipv4.addresses = mkOverride 0 [ ];
+            ipv6.addresses = mkOverride 0 [ ];
+            useDHCP = true;
+          };
+        };
+      };
+      testScript = { ... }:
+        ''
+          start_all()
+
+          client.wait_for_unit("network.target")
+          client_with_privacy.wait_for_unit("network.target")
+          router.wait_for_unit("network-online.target")
+
+          with subtest("Wait until we have an ip address"):
+              client_with_privacy.wait_until_succeeds(
+                  "ip addr show dev enp1s0 | grep -q 'fd00:1234:5678:1:'"
+              )
+              client.wait_until_succeeds("ip addr show dev enp1s0 | grep -q 'fd00:1234:5678:1:'")
+
+          with subtest("Test vlan 1"):
+              client_with_privacy.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+              client.wait_until_succeeds("ping -c 1 fd00:1234:5678:1::1")
+
+          with subtest("Test address used is temporary"):
+              client_with_privacy.wait_until_succeeds(
+                  "! ip route get fd00:1234:5678:1::1 | grep -q ':[a-f0-9]*ff:fe[a-f0-9]*:'"
+              )
+
+          with subtest("Test address used is EUI-64"):
+              client.wait_until_succeeds(
+                  "ip route get fd00:1234:5678:1::1 | grep -q ':[a-f0-9]*ff:fe[a-f0-9]*:'"
+              )
+        '';
+    };
+    routes = {
+      name = "routes";
+      nodes.machine = {
+        networking.useNetworkd = networkd;
+        networking.useDHCP = false;
+        networking.interfaces.eth0 = {
+          ipv4.addresses = [ { address = "192.168.1.2"; prefixLength = 24; } ];
+          ipv6.addresses = [ { address = "2001:1470:fffd:2097::"; prefixLength = 64; } ];
+          ipv6.routes = [
+            { address = "fdfd:b3f0::"; prefixLength = 48; }
+            { address = "2001:1470:fffd:2098::"; prefixLength = 64; via = "fdfd:b3f0::1"; }
+          ];
+          ipv4.routes = [
+            { address = "10.0.0.0"; prefixLength = 16; options = {
+              mtu = "1500";
+              # Explicitly set scope because iproute and systemd-networkd
+              # disagree on what the scope should be
+              # if the type is the default "unicast"
+              scope = "link";
+            }; }
+            { address = "192.168.2.0"; prefixLength = 24; via = "192.168.1.1"; }
+          ];
+        };
+        virtualisation.vlans = [ ];
+      };
+
+      testScript = ''
+        targetIPv4Table = [
+            "10.0.0.0/16 proto static scope link mtu 1500",
+            "192.168.1.0/24 proto kernel scope link src 192.168.1.2",
+            "192.168.2.0/24 via 192.168.1.1 proto static",
+        ]
+
+        targetIPv6Table = [
+            "2001:1470:fffd:2097::/64 proto kernel metric 256 pref medium",
+            "2001:1470:fffd:2098::/64 via fdfd:b3f0::1 proto static metric 1024 pref medium",
+            "fdfd:b3f0::/48 proto static metric 1024 pref medium",
+        ]
+
+        machine.start()
+        machine.wait_for_unit("network.target")
+
+        with subtest("test routing tables"):
+            ipv4Table = machine.succeed("ip -4 route list dev eth0 | head -n3").strip()
+            ipv6Table = machine.succeed("ip -6 route list dev eth0 | head -n3").strip()
+            assert [
+                l.strip() for l in ipv4Table.splitlines()
+            ] == targetIPv4Table, """
+              The IPv4 routing table does not match the expected one:
+                Result:
+                  {}
+                Expected:
+                  {}
+              """.format(
+                ipv4Table, targetIPv4Table
+            )
+            assert [
+                l.strip() for l in ipv6Table.splitlines()
+            ] == targetIPv6Table, """
+              The IPv6 routing table does not match the expected one:
+                Result:
+                  {}
+                Expected:
+                  {}
+              """.format(
+                ipv6Table, targetIPv6Table
+            )
+
+      '' + optionalString (!networkd) ''
+        with subtest("test clean-up of the tables"):
+            machine.succeed("systemctl stop network-addresses-eth0")
+            ipv4Residue = machine.succeed("ip -4 route list dev eth0 | head -n-3").strip()
+            ipv6Residue = machine.succeed("ip -6 route list dev eth0 | head -n-3").strip()
+            assert (
+                ipv4Residue == ""
+            ), "The IPv4 routing table has not been properly cleaned:\n{}".format(ipv4Residue)
+            assert (
+                ipv6Residue == ""
+            ), "The IPv6 routing table has not been properly cleaned:\n{}".format(ipv6Residue)
+      '';
+    };
+    rename = if networkd then {
+      name = "RenameInterface";
+      nodes.machine = { pkgs, ... }: {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+        };
+        systemd.network.links."10-custom_name" = {
+          matchConfig.MACAddress = "52:54:00:12:01:01";
+          linkConfig.Name = "custom_name";
+        };
+      };
+      testScript = ''
+        machine.succeed("udevadm settle")
+        print(machine.succeed("ip link show dev custom_name"))
+      '';
+    } else {
+      name = "RenameInterface";
+      nodes = { };
+      testScript = "";
+    };
+    # even with disabled networkd, systemd.network.links should work
+    # (as it's handled by udev, not networkd)
+    link = {
+      name = "Link";
+      nodes.client = { pkgs, ... }: {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+        };
+        systemd.network.links."50-foo" = {
+          matchConfig = {
+            Name = "foo";
+            Driver = "dummy";
+          };
+          linkConfig.MTUBytes = "1442";
+        };
+      };
+      testScript = ''
+        print(client.succeed("ip l add name foo type dummy"))
+        print(client.succeed("stat /etc/systemd/network/50-foo.link"))
+        client.succeed("udevadm settle")
+        assert "mtu 1442" in client.succeed("ip l show dev foo")
+      '';
+    };
+    wlanInterface = let
+      testMac = "06:00:00:00:02:00";
+    in {
+      name = "WlanInterface";
+      nodes.machine = { pkgs, ... }: {
+        boot.kernelModules = [ "mac80211_hwsim" ];
+        networking.wlanInterfaces = {
+          wlan0 = { device = "wlan0"; };
+          wap0 = { device = "wlan0"; mac = testMac; };
+        };
+      };
+      testScript = ''
+        machine.start()
+        machine.wait_for_unit("network.target")
+        machine.wait_until_succeeds("ip address show wap0 | grep -q ${testMac}")
+        machine.fail("ip address show wlan0 | grep -q ${testMac}")
+      '';
+    };
+    naughtyInterfaceNames = let
+      ifnames = [
+        # flags of ip-address
+        "home" "temporary" "optimistic"
+        "bridge_slave" "flush"
+        # flags of ip-route
+        "up" "type" "nomaster" "address"
+        # other
+        "very_loong_name" "lowerUpper" "-"
+      ];
+    in {
+      name = "naughtyInterfaceNames";
+      nodes.machine = { pkgs, ... }: {
+        networking.useNetworkd = networkd;
+        networking.bridges = listToAttrs
+          (flip map ifnames
+             (name: { inherit name; value.interfaces = []; }));
+      };
+      testScript = ''
+        machine.start()
+        machine.wait_for_unit("network.target")
+        for ifname in ${builtins.toJSON ifnames}:
+            machine.wait_until_succeeds(f"ip link show dev '{ifname}' | grep -q '{ifname}'")
+      '';
+    };
+    caseSensitiveRenaming = {
+      name = "CaseSensitiveRenaming";
+      nodes.machine = { pkgs, ... }: {
+        virtualisation.interfaces.enCustom.vlan = 11;
+        networking = {
+          useNetworkd = networkd;
+          useDHCP = false;
+        };
+      };
+      testScript = ''
+        machine.succeed("udevadm settle")
+        print(machine.succeed("ip link show dev enCustom"))
+        machine.wait_until_succeeds("ip link show dev enCustom | grep -q 52:54:00:12:0b:01")
+      '';
+    };
+  };
+
+in mapAttrs (const (attrs: makeTest (attrs // {
+  name = "${attrs.name}-Networking-${if networkd then "Networkd" else "Scripted"}";
+}))) testCases
diff --git a/nixpkgs/nixos/tests/nextcloud/basic.nix b/nixpkgs/nixos/tests/nextcloud/basic.nix
new file mode 100644
index 000000000000..ab1d8353dba0
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/basic.nix
@@ -0,0 +1,118 @@
+args@{ pkgs, nextcloudVersion ? 22, ... }:
+
+(import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "notproduction";
+  adminuser = "root";
+in {
+  name = "nextcloud-basic";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ globin eqyiel ];
+  };
+
+  nodes = rec {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {
+      services.davfs2.enable = true;
+      system.activationScripts.davfs2-secrets = ''
+        echo "http://nextcloud/remote.php/dav/files/${adminuser} ${adminuser} ${adminpass}" > /tmp/davfs2-secrets
+        chmod 600 /tmp/davfs2-secrets
+      '';
+      virtualisation.fileSystems = {
+        "/mnt/dav" = {
+          device = "http://nextcloud/remote.php/dav/files/${adminuser}";
+          fsType = "davfs";
+          options = let
+            davfs2Conf = (pkgs.writeText "davfs2.conf" "secrets /tmp/davfs2-secrets");
+          in [ "conf=${davfs2Conf}" "x-systemd.automount" "noauto"];
+        };
+      };
+    };
+
+    nextcloud = { config, pkgs, ... }: let
+      cfg = config;
+    in {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      systemd.tmpfiles.rules = [
+        "d /var/lib/nextcloud-data 0750 nextcloud nginx - -"
+      ];
+
+      services.nextcloud = {
+        enable = true;
+        datadir = "/var/lib/nextcloud-data";
+        hostName = "nextcloud";
+        database.createLocally = true;
+        config = {
+          # Don't inherit adminuser since "root" is supposed to be the default
+          adminpassFile = "${pkgs.writeText "adminpass" adminpass}"; # Don't try this at home!
+          dbtableprefix = "nixos_";
+        };
+        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
+        autoUpdateApps = {
+          enable = true;
+          startAt = "20:00";
+        };
+        phpExtraExtensions = all: [ all.bz2 ];
+      };
+
+      environment.systemPackages = [ cfg.services.nextcloud.occ ];
+    };
+
+    nextcloudWithoutMagick = args@{ config, pkgs, lib, ... }:
+      lib.mkMerge
+      [ (nextcloud args)
+        { services.nextcloud.enableImagemagick = false; } ];
+  };
+
+  testScript = { nodes, ... }: let
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${withRcloneEnv} ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+
+    findInClosure = what: drv: pkgs.runCommand "find-in-closure" { exportReferencesGraph = [ "graph" drv ]; inherit what; } ''
+      test -e graph
+      grep "$what" graph >$out || true
+    '';
+    nextcloudUsesImagick = findInClosure "imagick" nodes.nextcloud.system.build.vm;
+    nextcloudWithoutDoesntUseIt = findInClosure "imagick" nodes.nextcloudWithoutMagick.system.build.vm;
+  in ''
+    assert open("${nextcloudUsesImagick}").read() != ""
+    assert open("${nextcloudWithoutDoesntUseIt}").read() == ""
+
+    nextcloud.start()
+    client.start()
+    nextcloud.wait_for_unit("multi-user.target")
+    # This is just to ensure the nextcloud-occ program is working
+    nextcloud.succeed("nextcloud-occ status")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    # Ensure that no OpenSSL 1.1 is used.
+    nextcloud.succeed(
+        "${nodes.nextcloud.services.phpfpm.pools.nextcloud.phpPackage}/bin/php -i | grep 'OpenSSL Library Version' | awk -F'=>' '{ print $2 }' | awk '{ print $2 }' | grep -v 1.1"
+    )
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    nextcloud.succeed("test -f /var/lib/nextcloud-data/data/root/files/test-shared-file")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+    assert "hi" in client.succeed("cat /mnt/dav/test-shared-file")
+    nextcloud.succeed("grep -vE '^HBEGIN:oc_encryption_module' /var/lib/nextcloud-data/data/root/files/test-shared-file")
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/nextcloud/default.nix b/nixpkgs/nixos/tests/nextcloud/default.nix
new file mode 100644
index 000000000000..19d04b28b4f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/default.nix
@@ -0,0 +1,25 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+with pkgs.lib;
+
+foldl
+  (matrix: ver: matrix // {
+    "basic${toString ver}" = import ./basic.nix { inherit system pkgs; nextcloudVersion = ver; };
+    "with-postgresql-and-redis${toString ver}" = import ./with-postgresql-and-redis.nix {
+      inherit system pkgs;
+      nextcloudVersion = ver;
+    };
+    "with-mysql-and-memcached${toString ver}" = import ./with-mysql-and-memcached.nix {
+      inherit system pkgs;
+      nextcloudVersion = ver;
+    };
+    "with-declarative-redis-and-secrets${toString ver}" = import ./with-declarative-redis-and-secrets.nix {
+      inherit system pkgs;
+      nextcloudVersion = ver;
+    };
+  })
+{ }
+  [ 26 27 ]
diff --git a/nixpkgs/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix b/nixpkgs/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix
new file mode 100644
index 000000000000..e638f2e5b861
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/with-declarative-redis-and-secrets.nix
@@ -0,0 +1,122 @@
+args@{ nextcloudVersion ? 27, ... }:
+(import ../make-test-python.nix ({ pkgs, ...}: let
+  adminuser = "custom_admin_username";
+  # This will be used both for redis and postgresql
+  pass = "hunter2";
+  # Don't do this at home, use a file outside of the nix store instead
+  passFile = toString (pkgs.writeText "pass-file" ''
+    ${pass}
+  '');
+in {
+  name = "nextcloud-with-declarative-redis";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ma27 ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
+        caching = {
+          apcu = false;
+          redis = true;
+          memcached = false;
+        };
+        # This test also validates that we can use an "external" database
+        database.createLocally = false;
+        config = {
+          dbtype = "pgsql";
+          dbname = "nextcloud";
+          dbuser = adminuser;
+          dbpassFile = passFile;
+          adminuser = adminuser;
+          adminpassFile = passFile;
+        };
+        secretFile = "/etc/nextcloud-secrets.json";
+
+        extraOptions.redis = {
+          dbindex = 0;
+          timeout = 1.5;
+          # password handled via secretfile below
+        };
+        configureRedis = true;
+      };
+
+      services.redis.servers."nextcloud" = {
+        enable = true;
+        port = 6379;
+        requirePass = "secret";
+      };
+
+      systemd.services.nextcloud-setup= {
+        requires = ["postgresql.service"];
+        after = [ "postgresql.service" ];
+      };
+
+      services.postgresql = {
+        enable = true;
+      };
+      systemd.services.postgresql.postStart = pkgs.lib.mkAfter ''
+        password=$(cat ${passFile})
+        ${config.services.postgresql.package}/bin/psql <<EOF
+          CREATE ROLE ${adminuser} WITH LOGIN PASSWORD '$password' CREATEDB;
+          CREATE DATABASE nextcloud;
+          GRANT ALL PRIVILEGES ON DATABASE nextcloud TO ${adminuser};
+        EOF
+      '';
+
+      # This file is meant to contain secret options which should
+      # not go into the nix store. Here it is just used to set the
+      # redis password.
+      environment.etc."nextcloud-secrets.json".text = ''
+        {
+          "redis": {
+            "password": "secret"
+          }
+        }
+      '';
+    };
+  };
+
+  testScript = let
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${pass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+
+    # redis cache should not be empty
+    nextcloud.fail('test "[]" = "$(redis-cli --json KEYS "*")"')
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix b/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix
new file mode 100644
index 000000000000..035a7fdcb0c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/with-mysql-and-memcached.nix
@@ -0,0 +1,79 @@
+args@{ pkgs, nextcloudVersion ? 22, ... }:
+
+(import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "hunter2";
+  adminuser = "root";
+in {
+  name = "nextcloud-with-mysql-and-memcached";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        https = true;
+        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
+        caching = {
+          apcu = true;
+          redis = false;
+          memcached = true;
+        };
+        database.createLocally = true;
+        config = {
+          dbtype = "mysql";
+          # Don't inherit adminuser since "root" is supposed to be the default
+          adminpassFile = "${pkgs.writeText "adminpass" adminpass}"; # Don't try this at home!
+        };
+      };
+
+      services.memcached.enable = true;
+    };
+  };
+
+  testScript = let
+    configureMemcached = pkgs.writeScript "configure-memcached" ''
+      #!${pkgs.runtimeShell}
+      nextcloud-occ config:system:set memcached_servers 0 0 --value 127.0.0.1 --type string
+      nextcloud-occ config:system:set memcached_servers 0 1 --value 11211 --type integer
+      nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\APCu' --type string
+      nextcloud-occ config:system:set memcache.distributed --value '\OC\Memcache\Memcached' --type string
+    '';
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("${configureMemcached}")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
new file mode 100644
index 000000000000..586bf50fd939
--- /dev/null
+++ b/nixpkgs/nixos/tests/nextcloud/with-postgresql-and-redis.nix
@@ -0,0 +1,96 @@
+args@{ pkgs, nextcloudVersion ? 22, ... }:
+
+(import ../make-test-python.nix ({ pkgs, ...}: let
+  adminpass = "hunter2";
+  adminuser = "custom-admin-username";
+in {
+  name = "nextcloud-with-postgresql-and-redis";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eqyiel ];
+  };
+
+  nodes = {
+    # The only thing the client needs to do is download a file.
+    client = { ... }: {};
+
+    nextcloud = { config, pkgs, lib, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.nextcloud = {
+        enable = true;
+        hostName = "nextcloud";
+        package = pkgs.${"nextcloud" + (toString nextcloudVersion)};
+        caching = {
+          apcu = false;
+          redis = true;
+          memcached = false;
+        };
+        database.createLocally = true;
+        config = {
+          dbtype = "pgsql";
+          inherit adminuser;
+          adminpassFile = toString (pkgs.writeText "admin-pass-file" ''
+            ${adminpass}
+          '');
+          trustedProxies = [ "::1" ];
+        };
+        notify_push = {
+          enable = true;
+          logLevel = "debug";
+        };
+        extraAppsEnable = true;
+        extraApps = {
+          inherit (pkgs."nextcloud${lib.versions.major config.services.nextcloud.package.version}Packages".apps) notify_push;
+        };
+      };
+
+      services.redis.servers."nextcloud".enable = true;
+      services.redis.servers."nextcloud".port = 6379;
+    };
+  };
+
+  testScript = let
+    configureRedis = pkgs.writeScript "configure-redis" ''
+      #!${pkgs.runtimeShell}
+      nextcloud-occ config:system:set redis 'host' --value 'localhost' --type string
+      nextcloud-occ config:system:set redis 'port' --value 6379 --type integer
+      nextcloud-occ config:system:set memcache.local --value '\OC\Memcache\Redis' --type string
+      nextcloud-occ config:system:set memcache.locking --value '\OC\Memcache\Redis' --type string
+    '';
+    withRcloneEnv = pkgs.writeScript "with-rclone-env" ''
+      #!${pkgs.runtimeShell}
+      export RCLONE_CONFIG_NEXTCLOUD_TYPE=webdav
+      export RCLONE_CONFIG_NEXTCLOUD_URL="http://nextcloud/remote.php/dav/files/${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_VENDOR="nextcloud"
+      export RCLONE_CONFIG_NEXTCLOUD_USER="${adminuser}"
+      export RCLONE_CONFIG_NEXTCLOUD_PASS="$(${pkgs.rclone}/bin/rclone obscure ${adminpass})"
+      "''${@}"
+    '';
+    copySharedFile = pkgs.writeScript "copy-shared-file" ''
+      #!${pkgs.runtimeShell}
+      echo 'hi' | ${pkgs.rclone}/bin/rclone rcat nextcloud:test-shared-file
+    '';
+
+    diffSharedFile = pkgs.writeScript "diff-shared-file" ''
+      #!${pkgs.runtimeShell}
+      diff <(echo 'hi') <(${pkgs.rclone}/bin/rclone cat nextcloud:test-shared-file)
+    '';
+  in ''
+    start_all()
+    nextcloud.wait_for_unit("multi-user.target")
+    nextcloud.succeed("${configureRedis}")
+    nextcloud.succeed("curl -sSf http://nextcloud/login")
+    nextcloud.succeed(
+        "${withRcloneEnv} ${copySharedFile}"
+    )
+    client.wait_for_unit("multi-user.target")
+    client.execute("${pkgs.nextcloud-notify_push.passthru.test_client}/bin/test_client http://nextcloud ${adminuser} ${adminpass} >&2 &")
+    client.succeed(
+        "${withRcloneEnv} ${diffSharedFile}"
+    )
+    nextcloud.wait_until_succeeds("journalctl -u nextcloud-notify_push | grep -q \"Sending ping to ${adminuser}\"")
+
+    # redis cache should not be empty
+    nextcloud.fail('test "[]" = "$(redis-cli --json KEYS "*")"')
+  '';
+})) args
diff --git a/nixpkgs/nixos/tests/nexus.nix b/nixpkgs/nixos/tests/nexus.nix
new file mode 100644
index 000000000000..87bb4d2eb58a
--- /dev/null
+++ b/nixpkgs/nixos/tests/nexus.nix
@@ -0,0 +1,32 @@
+# verifies:
+#   1. nexus service starts on server
+#   2. nexus service can startup on server (creating database and all other initial stuff)
+#   3. the web application is reachable via HTTP
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nexus";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ironpinguin ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+      { virtualisation.memorySize = 2047; # qemu-system-i386 has a 2047M limit
+        virtualisation.diskSize = 8192;
+
+        services.nexus.enable = true;
+      };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nexus")
+    server.wait_for_open_port(8081)
+
+    server.succeed("curl -f 127.0.0.1:8081")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nfs/default.nix b/nixpkgs/nixos/tests/nfs/default.nix
new file mode 100644
index 000000000000..6bc803c91b46
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/default.nix
@@ -0,0 +1,9 @@
+{ version ? 4
+, system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}: {
+  simple = import ./simple.nix { inherit version system pkgs; };
+} // pkgs.lib.optionalAttrs (version == 4) {
+  # TODO: Test kerberos + nfsv3
+  kerberos = import ./kerberos.nix { inherit version system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/nfs/kerberos.nix b/nixpkgs/nixos/tests/nfs/kerberos.nix
new file mode 100644
index 000000000000..a7d08bc628c6
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/kerberos.nix
@@ -0,0 +1,131 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  krb5 =
+    { enable = true;
+      domain_realm."nfs.test"   = "NFS.TEST";
+      libdefaults.default_realm = "NFS.TEST";
+      realms."NFS.TEST" =
+        { admin_server = "server.nfs.test";
+          kdc = "server.nfs.test";
+        };
+    };
+
+  hosts =
+    ''
+      192.168.1.1 client.nfs.test
+      192.168.1.2 server.nfs.test
+    '';
+
+  users = {
+    users.alice = {
+        isNormalUser = true;
+        name = "alice";
+        uid = 1000;
+      };
+  };
+
+in
+
+{
+  name = "nfsv4-with-kerberos";
+
+  nodes = {
+    client = { lib, ... }:
+      { inherit krb5 users;
+
+        networking.extraHosts = hosts;
+        networking.domain = "nfs.test";
+        networking.hostName = "client";
+
+        virtualisation.fileSystems =
+          { "/data" = {
+              device  = "server.nfs.test:/";
+              fsType  = "nfs";
+              options = [ "nfsvers=4" "sec=krb5p" "noauto" ];
+            };
+          };
+      };
+
+    server = { lib, ...}:
+      { inherit krb5 users;
+
+        networking.extraHosts = hosts;
+        networking.domain = "nfs.test";
+        networking.hostName = "server";
+
+        networking.firewall.allowedTCPPorts = [
+          111  # rpc
+          2049 # nfs
+          88   # kerberos
+          749  # kerberos admin
+        ];
+
+        services.kerberos_server.enable = true;
+        services.kerberos_server.realms =
+          { "NFS.TEST".acl =
+            [ { access = "all"; principal = "admin/admin"; } ];
+          };
+
+        services.nfs.server.enable = true;
+        services.nfs.server.createMountPoints = true;
+        services.nfs.server.exports =
+          ''
+            /data *(rw,no_root_squash,fsid=0,sec=krb5p)
+          '';
+      };
+  };
+
+  testScript =
+    ''
+      server.succeed("mkdir -p /data/alice")
+      server.succeed("chown alice:users /data/alice")
+
+      # set up kerberos database
+      server.succeed(
+          "kdb5_util create -s -r NFS.TEST -P master_key",
+          "systemctl restart kadmind.service kdc.service",
+      )
+      server.wait_for_unit("kadmind.service")
+      server.wait_for_unit("kdc.service")
+
+      # create principals
+      server.succeed(
+          "kadmin.local add_principal -randkey nfs/server.nfs.test",
+          "kadmin.local add_principal -randkey nfs/client.nfs.test",
+          "kadmin.local add_principal -pw admin_pw admin/admin",
+          "kadmin.local add_principal -pw alice_pw alice",
+      )
+
+      # add principals to server keytab
+      server.succeed("kadmin.local ktadd nfs/server.nfs.test")
+      server.succeed("systemctl start rpc-gssd.service rpc-svcgssd.service")
+      server.wait_for_unit("rpc-gssd.service")
+      server.wait_for_unit("rpc-svcgssd.service")
+
+      client.wait_for_unit("network-online.target")
+
+      # add principals to client keytab
+      client.succeed("echo admin_pw | kadmin -p admin/admin ktadd nfs/client.nfs.test")
+      client.succeed("systemctl start rpc-gssd.service")
+      client.wait_for_unit("rpc-gssd.service")
+
+      with subtest("nfs share mounts"):
+          client.succeed("systemctl restart data.mount")
+          client.wait_for_unit("data.mount")
+
+      with subtest("permissions on nfs share are enforced"):
+          client.fail("su alice -c 'ls /data'")
+          client.succeed("su alice -c 'echo alice_pw | kinit'")
+          client.succeed("su alice -c 'ls /data'")
+
+          client.fail("su alice -c 'echo bla >> /data/foo'")
+          client.succeed("su alice -c 'echo bla >> /data/alice/foo'")
+          server.succeed("test -e /data/alice/foo")
+
+      with subtest("uids/gids are mapped correctly on nfs share"):
+          ids = client.succeed("stat -c '%U %G' /data/alice").split()
+          expected = ["alice", "users"]
+          assert ids == expected, f"ids incorrect: got {ids} expected {expected}"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nfs/simple.nix b/nixpkgs/nixos/tests/nfs/simple.nix
new file mode 100644
index 000000000000..026da9563bc0
--- /dev/null
+++ b/nixpkgs/nixos/tests/nfs/simple.nix
@@ -0,0 +1,95 @@
+import ../make-test-python.nix ({ pkgs, version ? 4, ... }:
+
+let
+
+  client =
+    { pkgs, ... }:
+    { virtualisation.fileSystems =
+        { "/data" =
+           { # nfs4 exports the export with fsid=0 as a virtual root directory
+             device = if (version == 4) then "server:/" else "server:/data";
+             fsType = "nfs";
+             options = [ "vers=${toString version}" ];
+           };
+        };
+      networking.firewall.enable = false; # FIXME: only open statd
+    };
+
+in
+
+{
+  name = "nfs";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes =
+    { client1 = client;
+      client2 = client;
+
+      server =
+        { ... }:
+        { services.nfs.server.enable = true;
+          services.nfs.server.exports =
+            ''
+              /data 192.168.1.0/255.255.255.0(rw,no_root_squash,no_subtree_check,fsid=0)
+            '';
+          services.nfs.server.createMountPoints = true;
+          networking.firewall.enable = false; # FIXME: figure out what ports need to be allowed
+        };
+    };
+
+  testScript =
+    ''
+      import time
+
+      server.wait_for_unit("nfs-server")
+      server.succeed("systemctl start network-online.target")
+      server.wait_for_unit("network-online.target")
+
+      start_all()
+
+      client1.wait_for_unit("data.mount")
+      client1.succeed("echo bla > /data/foo")
+      server.succeed("test -e /data/foo")
+
+      client2.wait_for_unit("data.mount")
+      client2.succeed("echo bla > /data/bar")
+      server.succeed("test -e /data/bar")
+
+      with subtest("restarting 'nfs-server' works correctly"):
+          server.succeed("systemctl restart nfs-server")
+          # will take 90 seconds due to the NFS grace period
+          client2.succeed("echo bla >> /data/bar")
+
+      with subtest("can get a lock"):
+          client2.succeed("time flock -n -s /data/lock true")
+
+      with subtest("client 2 fails to acquire lock held by client 1"):
+          client1.succeed("flock -x /data/lock -c 'touch locked; sleep 100000' >&2 &")
+          client1.wait_for_file("locked")
+          client2.fail("flock -n -s /data/lock true")
+
+      with subtest("client 2 obtains lock after resetting client 1"):
+          client2.succeed(
+              "flock -x /data/lock -c 'echo acquired; touch locked; sleep 100000' >&2 &"
+          )
+          client1.crash()
+          client1.start()
+          client2.wait_for_file("locked")
+
+      with subtest("locks survive server reboot"):
+          client1.wait_for_unit("data.mount")
+          server.shutdown()
+          server.start()
+          client1.succeed("touch /data/xyzzy")
+          client1.fail("time flock -n -s /data/lock true")
+
+      with subtest("unmounting during shutdown happens quickly"):
+          t1 = time.monotonic()
+          client1.shutdown()
+          duration = time.monotonic() - t1
+          # FIXME: regressed in kernel 6.1.28, temporarily disabled while investigating
+          # assert duration < 30, f"shutdown took too long ({duration} seconds)"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nghttpx.nix b/nixpkgs/nixos/tests/nghttpx.nix
new file mode 100644
index 000000000000..11cac332827d
--- /dev/null
+++ b/nixpkgs/nixos/tests/nghttpx.nix
@@ -0,0 +1,61 @@
+let
+  nginxRoot = "/run/nginx";
+in
+  import ./make-test-python.nix ({...}: {
+    name  = "nghttpx";
+    nodes = {
+      webserver = {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        systemd.services.nginx = {
+          preStart = ''
+            mkdir -p ${nginxRoot}
+            echo "Hello world!" > ${nginxRoot}/hello-world.txt
+          '';
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts.server = {
+            locations."/".root = nginxRoot;
+          };
+        };
+      };
+
+      proxy = {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        services.nghttpx = {
+          enable = true;
+          frontends = [
+            { server = {
+                host = "*";
+                port = 80;
+              };
+
+              params = {
+                tls = "no-tls";
+              };
+            }
+          ];
+          backends = [
+            { server = {
+                host = "webserver";
+                port = 80;
+              };
+              patterns = [ "/" ];
+              params.proto = "http/1.1";
+            }
+          ];
+        };
+      };
+
+      client = {};
+    };
+
+    testScript = ''
+      start_all()
+
+      webserver.wait_for_open_port(80)
+      proxy.wait_for_open_port(80)
+      client.wait_until_succeeds("curl -s --fail http://proxy/hello-world.txt")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/nginx-auth.nix b/nixpkgs/nixos/tests/nginx-auth.nix
new file mode 100644
index 000000000000..a85426dda871
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-auth.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-auth";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = let
+        root = pkgs.runCommand "testdir" {} ''
+          mkdir "$out"
+          echo hello world > "$out/index.html"
+        '';
+      in {
+        enable = true;
+
+        virtualHosts.lockedroot = {
+          inherit root;
+          basicAuth.alice = "pwofa";
+        };
+
+        virtualHosts.lockedsubdir = {
+          inherit root;
+          locations."/sublocation/" = {
+            alias = "${root}/";
+            basicAuth.bob = "pwofb";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    webserver.fail("curl --fail --resolve lockedroot:80:127.0.0.1 http://lockedroot")
+    webserver.succeed(
+        "curl --fail --resolve lockedroot:80:127.0.0.1 http://alice:pwofa@lockedroot"
+    )
+
+    webserver.succeed("curl --fail --resolve lockedsubdir:80:127.0.0.1 http://lockedsubdir")
+    webserver.fail(
+        "curl --fail --resolve lockedsubdir:80:127.0.0.1 http://lockedsubdir/sublocation/index.html"
+    )
+    webserver.succeed(
+        "curl --fail --resolve lockedsubdir:80:127.0.0.1 http://bob:pwofb@lockedsubdir/sublocation/index.html"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-etag.nix b/nixpkgs/nixos/tests/nginx-etag.nix
new file mode 100644
index 000000000000..6f45eacf8b41
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-etag.nix
@@ -0,0 +1,88 @@
+import ./make-test-python.nix {
+  name = "nginx-etag";
+
+  nodes = {
+    server = { pkgs, lib, ... }: {
+      networking.firewall.enable = false;
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.server = {
+        root = pkgs.runCommandLocal "testdir" {} ''
+          mkdir "$out"
+          cat > "$out/test.js" <<EOF
+          document.getElementById('foobar').setAttribute('foo', 'bar');
+          EOF
+          cat > "$out/index.html" <<EOF
+          <!DOCTYPE html>
+          <div id="foobar">test</div>
+          <script src="test.js"></script>
+          EOF
+        '';
+      };
+
+      specialisation.pass-checks.configuration = {
+        services.nginx.virtualHosts.server = {
+          root = lib.mkForce (pkgs.runCommandLocal "testdir2" {} ''
+            mkdir "$out"
+            cat > "$out/test.js" <<EOF
+            document.getElementById('foobar').setAttribute('foo', 'yay');
+            EOF
+            cat > "$out/index.html" <<EOF
+            <!DOCTYPE html>
+            <div id="foobar">test</div>
+            <script src="test.js"></script>
+            EOF
+          '');
+        };
+      };
+    };
+
+    client = { pkgs, lib, ... }: {
+      environment.systemPackages = let
+        testRunner = pkgs.writers.writePython3Bin "test-runner" {
+          libraries = [ pkgs.python3Packages.selenium ];
+        } ''
+          import os
+          import time
+
+          from selenium.webdriver import Firefox
+          from selenium.webdriver.firefox.options import Options
+
+          options = Options()
+          options.add_argument('--headless')
+          driver = Firefox(options=options)
+
+          driver.implicitly_wait(20)
+          driver.get('http://server/')
+          driver.find_element('xpath', '//div[@foo="bar"]')
+          open('/tmp/passed_stage1', 'w')
+
+          while not os.path.exists('/tmp/proceed'):
+              time.sleep(0.5)
+
+          driver.get('http://server/')
+          driver.find_element('xpath', '//div[@foo="yay"]')
+          open('/tmp/passed', 'w')
+        '';
+      in [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    inherit (nodes.server.config.system.build) toplevel;
+    newSystem = "${toplevel}/specialisation/pass-checks";
+  in ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    client.wait_for_unit("multi-user.target")
+    client.execute("test-runner >&2 &")
+    client.wait_for_file("/tmp/passed_stage1")
+
+    server.succeed(
+        "${newSystem}/bin/switch-to-configuration test >&2"
+    )
+    client.succeed("touch /tmp/proceed")
+
+    client.wait_for_file("/tmp/passed")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nginx-globalredirect.nix b/nixpkgs/nixos/tests/nginx-globalredirect.nix
new file mode 100644
index 000000000000..5f5f4f344d82
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-globalredirect.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-globalredirect";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {
+          globalRedirect = "other.example.com";
+          # Add an exception
+          locations."/noredirect".return = "200 'foo'";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    webserver.succeed("curl --fail -si http://localhost/alf | grep '^Location:.*/alf'")
+    webserver.fail("curl --fail -si http://localhost/noredirect | grep '^Location:'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-http3.nix b/nixpkgs/nixos/tests/nginx-http3.nix
new file mode 100644
index 000000000000..fc9f31037f98
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-http3.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix ({lib, pkgs, ...}:
+let
+  hosts = ''
+    192.168.2.101 acme.test
+  '';
+
+in
+{
+  name = "nginx-http3";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.101"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 443 ];
+        firewall.allowedUDPPorts = [ 443 ];
+      };
+
+      security.pki.certificates = [
+        (builtins.readFile ./common/acme/server/ca.cert.pem)
+      ];
+
+      services.nginx = {
+        enable = true;
+        package = pkgs.nginxQuic;
+
+        virtualHosts."acme.test" = {
+          onlySSL = true;
+          sslCertificate = ./common/acme/server/acme.test.cert.pem;
+          sslCertificateKey = ./common/acme/server/acme.test.key.pem;
+          http2 = true;
+          http3 = true;
+          http3_hq = false;
+          quic = true;
+          reuseport = true;
+          root = lib.mkForce (pkgs.runCommandLocal "testdir" {} ''
+            mkdir "$out"
+            cat > "$out/index.html" <<EOF
+            <html><body>Hello World!</body></html>
+            EOF
+            cat > "$out/example.txt" <<EOF
+            Check http3 protocol.
+            EOF
+          '');
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curlHTTP3 ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.201"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security.pki.certificates = [
+        (builtins.readFile ./common/acme/server/ca.cert.pem)
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nginx")
+    server.wait_for_open_port(443)
+
+    # Check http connections
+    client.succeed("curl --verbose --http3-only https://acme.test | grep 'Hello World!'")
+
+    # Check downloadings
+    client.succeed("curl --verbose --http3-only https://acme.test/example.txt --output /tmp/example.txt")
+    client.succeed("cat /tmp/example.txt | grep 'Check http3 protocol.'")
+
+    # Check header reading
+    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'content-type'")
+    client.succeed("curl --verbose --http3-only --head https://acme.test | grep 'HTTP/3 200'")
+    client.succeed("curl --verbose --http3-only --head https://acme.test/error | grep 'HTTP/3 404'")
+
+    # Check change User-Agent
+    client.succeed("curl --verbose --http3-only --user-agent 'Curl test 3.0' https://acme.test")
+    server.succeed("cat /var/log/nginx/access.log | grep 'Curl test 3.0'")
+
+    server.shutdown()
+    client.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-modsecurity.nix b/nixpkgs/nixos/tests/nginx-modsecurity.nix
new file mode 100644
index 000000000000..3c41da3e8d9b
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-modsecurity.nix
@@ -0,0 +1,39 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nginx-modsecurity";
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.nginx = {
+      enable = true;
+      additionalModules = [ pkgs.nginxModules.modsecurity ];
+      virtualHosts.localhost =
+        let modsecurity_conf = pkgs.writeText "modsecurity.conf" ''
+          SecRuleEngine On
+          SecDefaultAction "phase:1,log,auditlog,deny,status:403"
+          SecDefaultAction "phase:2,log,auditlog,deny,status:403"
+          SecRule REQUEST_METHOD   "HEAD"        "id:100, phase:1, block"
+          SecRule REQUEST_FILENAME "secret.html" "id:101, phase:2, block"
+        '';
+        testroot = pkgs.runCommand "testroot" {} ''
+          mkdir -p $out
+          echo "<html><body>Hello World!</body></html>" > $out/index.html
+          echo "s3cret" > $out/secret.html
+        '';
+      in {
+        root = testroot;
+        extraConfig = ''
+          modsecurity on;
+          modsecurity_rules_file ${modsecurity_conf};
+        '';
+      };
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("nginx")
+
+    response = machine.wait_until_succeeds("curl -fvvv -s http://127.0.0.1/")
+    assert "Hello World!" in response
+
+    machine.fail("curl -fvvv -X HEAD -s http://127.0.0.1/")
+    machine.fail("curl -fvvv -s http://127.0.0.1/secret.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-njs.nix b/nixpkgs/nixos/tests/nginx-njs.nix
new file mode 100644
index 000000000000..72be16384f1b
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-njs.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "nginx-njs";
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.nginx = {
+      enable = true;
+      additionalModules = [ pkgs.nginxModules.njs ];
+      commonHttpConfig = ''
+        js_import http from ${builtins.toFile "http.js" ''
+          function hello(r) {
+              r.return(200, "Hello world!");
+          }
+          export default {hello};
+        ''};
+      '';
+      virtualHosts."localhost".locations."/".extraConfig = ''
+        js_content http.hello;
+      '';
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("nginx")
+
+    response = machine.wait_until_succeeds("curl -fvvv -s http://127.0.0.1/")
+    assert "Hello world!" == response, f"Expected 'Hello world!', got '{response}'"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.cert.pem b/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.cert.pem
new file mode 100644
index 000000000000..e5cea72610b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.cert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDLjCCAhagAwIBAgIIP2+4GFxOYMgwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgNGU3NTJiMB4XDTIzMDEzMDAzNDExOFoXDTQzMDEz
+MDAzNDExOFowFTETMBEGA1UEAwwKKi50ZXN0Lm5peDCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAMarJSCzelnzTMT5GMoIKA/MXBNk5j277uI2Gq2MCky/
+DlBpx+tjSsKsz6QLBduKMF8OH5AgjrVAKQAtsVPDseY0Qcyx/5dgJjkdO4on+DFb
+V0SJ3ZhYPKACrqQ1SaoG+Xup37puw7sVR13J7oNvP6fAYRcjYqCiFC7VMjJNG4dR
+251jvWWidSc7v5CYw2AxrngtBgHeQuyG9QCJ1DRH8h6ioV7IeonwReN7noYtTWh8
+NDjGnw9HH2nYMcL91E+DWCxWVmbC9/orvYOT7u0Orho0t1w9BB0/zzcdojwQpMCv
+HahEmFQmdGbWTuI4caBeaDBJVsSwKlTcxLSS4MAZ0c8CAwEAAaN3MHUwDgYDVR0P
+AQH/BAQDAgWgMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggrBgEFBQcDAjAMBgNVHRMB
+Af8EAjAAMB8GA1UdIwQYMBaAFGyXySYI3gL88d7GHnGMU6wpiBf2MBUGA1UdEQQO
+MAyCCioudGVzdC5uaXgwDQYJKoZIhvcNAQELBQADggEBAJ/DpwiLVBgWyozsn++f
+kR4m0dUjnuCgpHo2EMoMZh+9og+OC0vq6WITXHaJytB3aBMxFOUTim3vwxPyWPXX
+/vy+q6jJ6QMLx1J3VIWZdmXsT+qLGbVzL/4gNoaRsLPGO06p3yVjhas+OBFx1Fee
+6kTHb82S/dzBojOJLRRo18CU9yw0FUXOPqN7HF7k2y+Twe6+iwCuCKGSFcvmRjxe
+bWy11C921bTienW0Rmq6ppFWDaUNYP8kKpMN2ViAvc0tyF6wwk5lyOiqCR+pQHJR
+H/J4qSeKDchYLKECuzd6SySz8FW/xPKogQ28zba+DBD86hpqiEJOBzxbrcN3cjUn
+7N4=
+-----END CERTIFICATE-----
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.key.pem b/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.key.pem
new file mode 100644
index 000000000000..ed2b17af0bf6
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/_.test.nix.key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAxqslILN6WfNMxPkYyggoD8xcE2TmPbvu4jYarYwKTL8OUGnH
+62NKwqzPpAsF24owXw4fkCCOtUApAC2xU8Ox5jRBzLH/l2AmOR07iif4MVtXRInd
+mFg8oAKupDVJqgb5e6nfum7DuxVHXcnug28/p8BhFyNioKIULtUyMk0bh1HbnWO9
+ZaJ1Jzu/kJjDYDGueC0GAd5C7Ib1AInUNEfyHqKhXsh6ifBF43uehi1NaHw0OMaf
+D0cfadgxwv3UT4NYLFZWZsL3+iu9g5Pu7Q6uGjS3XD0EHT/PNx2iPBCkwK8dqESY
+VCZ0ZtZO4jhxoF5oMElWxLAqVNzEtJLgwBnRzwIDAQABAoIBAFuNGOH184cqKJGI
+3RSVJ6kIGtJRKA0A4vfZyPd61nBBhx4lcRyXOCd4LYPCFKP0DZBwWLk5V6pM89gC
+NnqMbxnPsRbcXBVtGJAvWXW0L5rHJfMOuVBwMRfnxIUljVnONv/264PlcUtwZd/h
+o4lsJeBvNg7MnrG5nyVp1+T4RZxYm1P86HLp5zyT+fdj4Cr82b9j6QpxGXEfm1jV
+QA1xr1ZkrV8fgETyaE0TBIKcdt6xNfv1mpI1RE5gaP/YzcCs/mL+G0kMar4l7pO/
+6OHXTvHz+W3G6Xlha7Wq1ADoqYz2K7VoL/OgSQhIxRNujyWR6lir7eladVrKkCzu
+uzFi/HECgYEA0vSNCIK3useSypMPHhYUVNbZ4hbK0WgqSAxfJQtL3nC7KviVMAXj
+IKVR90xuzJB+ih88KCJpH84JH90paMpW0Gq1yEae90bnWa8Nj7ULLS/Zuj0WrelU
++DEGbx47IUPOtiLBxooxFKyIVhX3hWRwZ0pokSQzbgb5zYnlM6tqZ3cCgYEA8Rb2
+wtt0XmqEQedFacs4fobJoVWMcETjpuxYp0m5Kje/4QkptZIbspXGBgNtPBBRGg51
+AYSu8wYkGEueI77KiFDgY8AAkpOk2MrMVPszjOhUiO1oEfbT6ynOY5RDOuXcY6jo
+8RpSk46VkfVxt6LVmappqcVFtVWcAjdGfXeSLmkCgYAWP7SgMSkvidzxgJEXmzyJ
+th9EuSKq81GCR8vBHG/kBf+3iIAzkGtkBgufCXCmIpc1+hVeJkLwF8rekXTMmIqP
+cLG7bbdWXSQJUW0cuvtyyJkuC0NZFELh6knDbmzOFVi33PKS/gAvLgMzER4J843n
+VvGwXSEPeazfAKwrxuhyAQKBgQCOm5TPYlyNVNhy20h18d2zCivOoPn3luhKXtd5
+7OP4kw2PIYpoesqjcnC2MeS1eLlgfli70y5hVqqXLHOYlUzcIWr51iMAkREbo6oG
+QqkVmoAWlsfOiICGRC5vPM4f0sPwt4NCyt05p0fWFKd1hn5u7Ryfba90OfWUYfny
+UX5IsQKBgQCswer4Qc3UepkiYxGwSTxgIh4kYlmamU2I00Kar4uFAr9JsCbk98f0
+kaCUNZjrrvTwgRmdhwcpMDiMW/F4QkNk0I2unHcoAvzNop6c22VhHJU2XJhrQ57h
+n1iPiw0NLXiA4RQwMUMjtt3nqlpLOTXGtsF8TmpWPcAN2QcTxOutzw==
+-----END RSA PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.cert.pem b/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.cert.pem
new file mode 100644
index 000000000000..c0b2cc8f3df2
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.cert.pem
@@ -0,0 +1,20 @@
+-----BEGIN CERTIFICATE-----
+MIIDSzCCAjOgAwIBAgIITnUr3xFw4oEwDQYJKoZIhvcNAQELBQAwIDEeMBwGA1UE
+AxMVbWluaWNhIHJvb3QgY2EgNGU3NTJiMCAXDTIzMDEzMDAzNDExOFoYDzIxMjMw
+MTMwMDM0MTE4WjAgMR4wHAYDVQQDExVtaW5pY2Egcm9vdCBjYSA0ZTc1MmIwggEi
+MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC1SrJT9k3zXIXApEyL5UDlw7F6
+MMOqE5d+8ZwMccHbEKLu0ssNRY+j31tnNYQ/r5iCNeNgUZccKBgzdU0ysyw5n4tw
+0y+MTD9fCfUXYcc8pJRPRolo6zxYO9W7WJr0nfJZ+p7zFRAjRCmzXdnZjKz0EGcg
+x9mHwn//3SuLt1ItK1n3aZ6im9NlcVtunDe3lCSL0tRgy7wDGNvWDZMO49jk4AFU
+BlMqScuiNpUzYgCxNaaGMuH3M0f0YyRAxSs6FWewLtqTIaVql7HL+3PcGAhvlKEZ
+fvfaf80F9aWI88sbEddTA0s5837zEoDwGpZl3K5sPU/O3MVEHIhAY5ICG0IBAgMB
+AAGjgYYwgYMwDgYDVR0PAQH/BAQDAgKEMB0GA1UdJQQWMBQGCCsGAQUFBwMBBggr
+BgEFBQcDAjASBgNVHRMBAf8ECDAGAQH/AgEAMB0GA1UdDgQWBBRsl8kmCN4C/PHe
+xh5xjFOsKYgX9jAfBgNVHSMEGDAWgBRsl8kmCN4C/PHexh5xjFOsKYgX9jANBgkq
+hkiG9w0BAQsFAAOCAQEAmvgpU+q+TBbz+9Y2rdiIeTfeDXtMNPf+nKI3zxYztRGC
+MoKP6jCQaFSQra4BVumFLV38DoqR1pOV1ojkiyO5c/9Iym/1Wmm8LeqgsHNqSgyS
+C7wvBcb/N9PzIBQFq/RiboDoC7bqK/0zQguCmBtGceH+AVpQyfXM+P78B1EkHozu
+67igP8GfouPp2s4Vd5P2XGkA6vMgYCtFEnCbtmmo7C8B+ymhD/D9axpMKQ1OaBg9
+jfqLOlk+Rc2nYZuaDjnUmlTkYjC6EwCNe9weYkSJgQ9QzoGJLIRARsdQdsp3C2fZ
+l2UZKkDJ2GPrrc+TdaGXZTYi0uMmvQsEKZXtqAzorQ==
+-----END CERTIFICATE-----
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.key.pem b/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.key.pem
new file mode 100644
index 000000000000..717948f5b879
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/ca.key.pem
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpQIBAAKCAQEAtUqyU/ZN81yFwKRMi+VA5cOxejDDqhOXfvGcDHHB2xCi7tLL
+DUWPo99bZzWEP6+YgjXjYFGXHCgYM3VNMrMsOZ+LcNMvjEw/Xwn1F2HHPKSUT0aJ
+aOs8WDvVu1ia9J3yWfqe8xUQI0Qps13Z2Yys9BBnIMfZh8J//90ri7dSLStZ92me
+opvTZXFbbpw3t5Qki9LUYMu8Axjb1g2TDuPY5OABVAZTKknLojaVM2IAsTWmhjLh
+9zNH9GMkQMUrOhVnsC7akyGlapexy/tz3BgIb5ShGX732n/NBfWliPPLGxHXUwNL
+OfN+8xKA8BqWZdyubD1PztzFRByIQGOSAhtCAQIDAQABAoIBAQCLeAWs1kWtvTYg
+t8UzspC0slItAKrmgt//hvxYDoPmdewC8yPG+AbDOSfmRKOTIxGeyro79UjdHnNP
+0yQqpvCU/AqYJ7/inR37jXuCG3TdUHfQbSF1F9N6xb1tvYKoQYKaelYiB8g8eUnj
+dYYM+U5tDNlpvJW6/YTfYFUJzWRo3i8jj5lhbkjcJDvdOhVxMXNXJgJAymu1KysE
+N1da2l4fzmuoN82wFE9KMyYSn+LOLWBReQQmXHZPP+2LjRIVrWoFoV49k2Ylp9tH
+yeaFx1Ya/wVx3PRnSW+zebWDcc0bAua9XU3Fi42yRq5iXOyoXHyefDfJoId7+GAO
+IF2qRw9hAoGBAM1O1l4ceOEDsEBh7HWTvmfwVfkXgT6VHeI6LGEjb88FApXgT+wT
+1s1IWVVOigLl9OKQbrjqlg9xgzrPDHYRwu5/Oz3X2WaH6wlF+d+okoqls6sCEAeo
+GfzF3sKOHQyIYjttCXE5G38uhIgVFFFfK97AbUiY8egYBr0zjVXK7xINAoGBAOIN
+1pDBFBQIoKj64opm/G9lJBLUpWLBFdWXhXS6q2jNsdY1mLMRmu/RBaKSfGz7W1a/
+a2WBedjcnTWJ/84tBsn4Qj5tLl8xkcXiN/pslWzg724ZnVsbyxM9KvAdXAma3F0g
+2EsYq8mhvbAEkpE+aoM6jwOJBnMhTRZrNMKN2lbFAoGAHmZWB4lfvLG3H1FgmehO
+gUVs9X0tff7GdgD3IUsF+zlasKaOLv6hB7R2xdLjTJqQMBwCyQ6zOYYtUD/oMHNg
+0b+1HesgHbZybuUVorBrQmxWtjOP/BJABtWlrlkso/Zt1S7H/yPdlm9k4GF+qK3W
+6RzFEcLTzvH/zXQcsV9jFuECgYEAhaX+1KiC0XFkY2OpaoCHAOlAUa3NdjyIRzcF
+XUU8MINkgCxB8qUXAHCJL1wCGoDluL0FpwbM3m1YuR200tYGLIUNzVDJ2Ng6wk8E
+H5fxJGU8ydB1Gzescdx5NWt2Tet0G89ecc/NSTHKL3YUnbDUUm/dvA5YdNscc4PA
+tsIdc60CgYEArvU1MwqGQUTDKUmaM2t3qm70fbwmOViHfyTWpn4aAQR3sK16iJMm
+V+dka62L/VYs5CIbzXvCioyugUMZGJi/zIwrViRzqJQbNnPADAW4lG88UxXqHHAH
+q33ivjgd9omGFb37saKOmR44KmjUIDvSIZF4W3EPwAMEyl5mM31Ryns=
+-----END RSA PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/default.nix b/nixpkgs/nixos/tests/nginx-proxyprotocol/default.nix
new file mode 100644
index 000000000000..2ff7debfcbe2
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/default.nix
@@ -0,0 +1,148 @@
+let
+  certs = import ./snakeoil-certs.nix;
+in
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-proxyprotocol";
+
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+  };
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      environment.systemPackages = [ pkgs.netcat ];
+      security.pki.certificateFiles = [
+        certs.ca.cert
+      ];
+
+      networking.extraHosts = ''
+        127.0.0.5 proxy.test.nix
+        127.0.0.5 noproxy.test.nix
+        127.0.0.3 direct-nossl.test.nix
+        127.0.0.4 unsecure-nossl.test.nix
+        127.0.0.2 direct-noproxy.test.nix
+        127.0.0.1 direct-proxy.test.nix
+      '';
+      services.nginx = {
+        enable = true;
+        defaultListen = [
+          { addr = "127.0.0.1"; proxyProtocol = true; ssl = true; }
+          { addr = "127.0.0.2"; }
+          { addr = "127.0.0.3"; ssl = false; }
+          { addr = "127.0.0.4"; ssl = false; proxyProtocol = true; }
+        ];
+        commonHttpConfig = ''
+          log_format pcombined '(proxy_protocol=$proxy_protocol_addr) - (remote_addr=$remote_addr) - (realip=$realip_remote_addr) - (upstream=) - (remote_user=$remote_user) [$time_local] '
+                        '"$request" $status $body_bytes_sent '
+                        '"$http_referer" "$http_user_agent"';
+          access_log /var/log/nginx/access.log pcombined;
+          error_log /var/log/nginx/error.log;
+        '';
+        virtualHosts =
+        let
+          commonConfig = {
+           locations."/".return = "200 '$remote_addr'";
+           extraConfig = ''
+            set_real_ip_from 127.0.0.5/32;
+            real_ip_header proxy_protocol;
+           '';
+         };
+        in
+        {
+          "*.test.nix" = commonConfig // {
+            sslCertificate = certs."*.test.nix".cert;
+            sslCertificateKey = certs."*.test.nix".key;
+            forceSSL = true;
+          };
+          "direct-nossl.test.nix" = commonConfig;
+          "unsecure-nossl.test.nix" = commonConfig // {
+            extraConfig = ''
+              real_ip_header proxy_protocol;
+            '';
+          };
+        };
+      };
+
+      services.sniproxy = {
+        enable = true;
+        config = ''
+          error_log {
+            syslog daemon
+          }
+          access_log {
+            syslog daemon
+          }
+          listener 127.0.0.5:443 {
+            protocol tls
+            source 127.0.0.5
+          }
+          table {
+            ^proxy\.test\.nix$   127.0.0.1 proxy_protocol
+            ^noproxy\.test\.nix$ 127.0.0.2
+          }
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    def check_origin_ip(src_ip: str, dst_url: str, failure: bool = False, proxy_protocol: bool = False, expected_ip: str | None = None):
+      check = webserver.fail if failure else webserver.succeed
+      if expected_ip is None:
+        expected_ip = src_ip
+
+      return check(f"curl {'--haproxy-protocol' if proxy_protocol else '''} --interface {src_ip} --fail -L {dst_url} | grep '{expected_ip}'")
+
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_unit("sniproxy")
+    # This should be closed by virtue of ssl = true;
+    webserver.wait_for_closed_port(80, "127.0.0.1")
+    # This should be open by virtue of no explicit ssl
+    webserver.wait_for_open_port(80, "127.0.0.2")
+    # This should be open by virtue of ssl = true;
+    webserver.wait_for_open_port(443, "127.0.0.1")
+    # This should be open by virtue of no explicit ssl
+    webserver.wait_for_open_port(443, "127.0.0.2")
+    # This should be open by sniproxy
+    webserver.wait_for_open_port(443, "127.0.0.5")
+    # This should be closed by sniproxy
+    webserver.wait_for_closed_port(80, "127.0.0.5")
+
+    # Sanity checks for the NGINX module
+    # direct-HTTP connection to NGINX without TLS, this checks that ssl = false; works well.
+    check_origin_ip("127.0.0.10", "http://direct-nossl.test.nix/")
+    # webserver.execute("openssl s_client -showcerts -connect direct-noproxy.test.nix:443")
+    # direct-HTTP connection to NGINX with TLS
+    check_origin_ip("127.0.0.10", "http://direct-noproxy.test.nix/")
+    check_origin_ip("127.0.0.10", "https://direct-noproxy.test.nix/")
+    # Well, sniproxy is not listening on 80 and cannot redirect
+    check_origin_ip("127.0.0.10", "http://proxy.test.nix/", failure=True)
+    check_origin_ip("127.0.0.10", "http://noproxy.test.nix/", failure=True)
+
+    # Actual PROXY protocol related tests
+    # Connecting through sniproxy should passthrough the originating IP address.
+    check_origin_ip("127.0.0.10", "https://proxy.test.nix/")
+    # Connecting through sniproxy to a non-PROXY protocol enabled listener should not pass the originating IP address.
+    check_origin_ip("127.0.0.10", "https://noproxy.test.nix/", expected_ip="127.0.0.5")
+
+    # Attack tests against spoofing
+    # Let's try to spoof our IP address by connecting direct-y to the PROXY protocol listener.
+    # FIXME(RaitoBezarius): rewrite it using Python + (Scapy|something else) as this is too much broken unfortunately.
+    # Or wait for upstream curl patch.
+    # def generate_attacker_request(original_ip: str, target_ip: str, dst_url: str):
+    #     return f"""PROXY TCP4 {original_ip} {target_ip} 80 80
+    #     GET / HTTP/1.1
+    #     Host: {dst_url}
+
+    #     """
+    # def spoof(original_ip: str, target_ip: str, dst_url: str, tls: bool = False, expect_failure: bool = True):
+    #   method = webserver.fail if expect_failure else webserver.succeed
+    #   port = 443 if tls else 80
+    #   print(webserver.execute(f"cat <<EOF | nc {target_ip} {port}\n{generate_attacker_request(original_ip, target_ip, dst_url)}\nEOF"))
+    #   return method(f"cat <<EOF | nc {target_ip} {port} | grep {original_ip}\n{generate_attacker_request(original_ip, target_ip, dst_url)}\nEOF")
+
+    # check_origin_ip("127.0.0.10", "http://unsecure-nossl.test.nix", proxy_protocol=True)
+    # spoof("1.1.1.1", "127.0.0.4", "direct-nossl.test.nix")
+    # spoof("1.1.1.1", "127.0.0.4", "unsecure-nossl.test.nix", expect_failure=False)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/generate-certs.nix b/nixpkgs/nixos/tests/nginx-proxyprotocol/generate-certs.nix
new file mode 100644
index 000000000000..b2315062035e
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/generate-certs.nix
@@ -0,0 +1,30 @@
+# Minica can provide a CA key and cert, plus a key
+# and cert for our fake CA server's Web Front End (WFE).
+{
+  pkgs ? import <nixpkgs> {},
+  minica ? pkgs.minica,
+  runCommandCC ? pkgs.runCommandCC,
+}:
+let
+  conf = import ./snakeoil-certs.nix;
+  domain = conf.domain;
+  domainSanitized = pkgs.lib.replaceStrings ["*"] ["_"] domain;
+in
+  runCommandCC "generate-tests-certs" {
+    buildInputs = [ (minica.overrideAttrs (old: {
+    postPatch = ''
+      sed -i 's_NotAfter: time.Now().AddDate(2, 0, 30),_NotAfter: time.Now().AddDate(20, 0, 0),_' main.go
+    '';
+  })) ];
+
+  } ''
+    minica \
+      --ca-key ca.key.pem \
+      --ca-cert ca.cert.pem \
+      --domains "${domain}"
+
+    mkdir -p $out
+    mv ca.*.pem $out/
+    mv ${domainSanitized}/key.pem $out/${domainSanitized}.key.pem
+    mv ${domainSanitized}/cert.pem $out/${domainSanitized}.cert.pem
+  ''
diff --git a/nixpkgs/nixos/tests/nginx-proxyprotocol/snakeoil-certs.nix b/nixpkgs/nixos/tests/nginx-proxyprotocol/snakeoil-certs.nix
new file mode 100644
index 000000000000..61af6351ca65
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-proxyprotocol/snakeoil-certs.nix
@@ -0,0 +1,14 @@
+let
+  domain = "*.test.nix";
+  domainSanitized = "_.test.nix";
+in {
+  inherit domain;
+  ca = {
+    cert = ./ca.cert.pem;
+    key = ./ca.key.pem;
+  };
+  "${domain}" = {
+    cert = ./. + "/${domainSanitized}.cert.pem";
+    key = ./. + "/${domainSanitized}.key.pem";
+  };
+}
diff --git a/nixpkgs/nixos/tests/nginx-pubhtml.nix b/nixpkgs/nixos/tests/nginx-pubhtml.nix
new file mode 100644
index 000000000000..bff24c99d41a
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-pubhtml.nix
@@ -0,0 +1,21 @@
+import ./make-test-python.nix {
+  name = "nginx-pubhtml";
+
+  nodes.machine = { pkgs, ... }: {
+    systemd.services.nginx.serviceConfig.ProtectHome = "read-only";
+    services.nginx.enable = true;
+    services.nginx.virtualHosts.localhost = {
+      locations."~ ^/\\~([a-z0-9_]+)(/.*)?$".alias = "/home/$1/public_html$2";
+    };
+    users.users.foo.isNormalUser = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx")
+    machine.wait_for_open_port(80)
+    machine.succeed("chmod 0711 /home/foo")
+    machine.succeed("su -c 'mkdir -p /home/foo/public_html' foo")
+    machine.succeed("su -c 'echo bar > /home/foo/public_html/bar.txt' foo")
+    machine.succeed('test "$(curl -fvvv http://localhost/~foo/bar.txt)" = bar')
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nginx-sso.nix b/nixpkgs/nixos/tests/nginx-sso.nix
new file mode 100644
index 000000000000..221c5f4ed905
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-sso.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-sso";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ delroth ];
+  };
+
+  nodes.machine = {
+    services.nginx.sso = {
+      enable = true;
+      configuration = {
+        listen = { addr = "127.0.0.1"; port = 8080; };
+
+        providers.token.tokens = {
+          myuser = "MyToken";
+        };
+
+        acl = {
+          rule_sets = [
+            {
+              rules = [ { field = "x-application"; equals = "MyApp"; } ];
+              allow = [ "myuser" ];
+            }
+          ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("nginx-sso.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("No valid user -> 401"):
+        machine.fail("curl -sSf http://localhost:8080/auth")
+
+    with subtest("Valid user but no matching ACL -> 403"):
+        machine.fail(
+            "curl -sSf -H 'Authorization: Token MyToken' http://localhost:8080/auth"
+        )
+
+    with subtest("Valid user and matching ACL -> 200"):
+        machine.succeed(
+            "curl -sSf -H 'Authorization: Token MyToken' -H 'X-Application: MyApp' http://localhost:8080/auth"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-status-page.nix b/nixpkgs/nixos/tests/nginx-status-page.nix
new file mode 100644
index 000000000000..ff2c0940379c
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-status-page.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx-status-page";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ h7x4 ];
+  };
+
+  nodes = {
+    webserver = { ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.1/24";
+      };
+
+      services.nginx = {
+        enable = true;
+        statusPage = true;
+        virtualHosts."localhost".locations."/index.html".return = "200 'hello world\n'";
+      };
+
+      environment.systemPackages = with pkgs; [ curl ];
+    };
+
+    client = { ... }: {
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.2/24";
+      };
+
+      environment.systemPackages = with pkgs; [ curl ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_port(80)
+
+    def expect_http_code(node, code, url):
+        http_code = node.succeed(f"curl -w '%{{http_code}}' '{url}'")
+        assert http_code.split("\n")[-1].strip() == code, \
+          f"expected {code} but got following response:\n{http_code}"
+
+    with subtest("localhost can access status page"):
+        expect_http_code(webserver, "200", "http://localhost/nginx_status")
+
+    with subtest("localhost can access other page"):
+        expect_http_code(webserver, "200", "http://localhost/index.html")
+
+    with subtest("client can not access status page"):
+        expect_http_code(client, "403", "http://10.0.0.1/nginx_status")
+
+    with subtest("client can access other page"):
+        expect_http_code(client, "200", "http://10.0.0.1/index.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-tmpdir.nix b/nixpkgs/nixos/tests/nginx-tmpdir.nix
new file mode 100644
index 000000000000..f26f992ffe1b
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-tmpdir.nix
@@ -0,0 +1,60 @@
+let
+  dst-dir = "/run/nginx-test-tmpdir-uploads";
+in
+  import ./make-test-python.nix {
+    name = "nginx-tmpdir";
+
+    nodes.machine = { pkgs, ... }: {
+      environment.etc."tmpfiles.d/nginx-uploads.conf".text = "d ${dst-dir} 0755 nginx nginx 1d";
+
+      # overwrite the tmp.conf with a short age, there will be a duplicate line info from systemd-tmpfiles in the log
+      systemd.tmpfiles.rules = [
+        "q /tmp 1777 root root 1min"
+      ];
+
+      services.nginx.enable = true;
+      # simple upload service using the nginx client body temp path
+      services.nginx.virtualHosts = {
+        localhost = {
+          locations."~ ^/upload/([0-9a-zA-Z-.]*)$" = {
+            extraConfig = ''
+              alias ${dst-dir}/$1;
+              client_body_in_file_only clean;
+              dav_methods PUT;
+              create_full_put_path on;
+              dav_access group:rw all:r;
+            '';
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("nginx")
+      machine.wait_for_open_port(80)
+
+      with subtest("Needed prerequisite --http-client-body-temp-path=/tmp/nginx_client_body and private temp"):
+        machine.succeed("touch /tmp/systemd-private-*-nginx.service-*/tmp/nginx_client_body")
+
+      with subtest("Working upload of test setup"):
+        machine.succeed("curl -X PUT http://localhost/upload/test1 --fail --data-raw 'Raw data 1'")
+        machine.succeed('test "$(cat ${dst-dir}/test1)" = "Raw data 1"')
+
+      # let the tmpfiles clean service do its job
+      machine.succeed("touch /tmp/touched")
+      machine.wait_until_succeeds(
+        "sleep 15 && systemctl start systemd-tmpfiles-clean.service && [ ! -f /tmp/touched ]",
+        timeout=150
+      )
+
+      with subtest("Working upload after cleaning"):
+        machine.succeed("curl -X PUT http://localhost/upload/test2 --fail --data-raw 'Raw data 2'")
+        machine.succeed('test "$(cat ${dst-dir}/test2)" = "Raw data 2"')
+
+      # manually remove the nginx temp dir
+      machine.succeed("rm -r --interactive=never /tmp/systemd-private-*-nginx.service-*/tmp/nginx_client_body")
+
+      with subtest("Broken upload after manual temp dir removal"):
+        machine.fail("curl -X PUT http://localhost/upload/test3 --fail --data-raw 'Raw data 3'")
+    '';
+  }
diff --git a/nixpkgs/nixos/tests/nginx-unix-socket.nix b/nixpkgs/nixos/tests/nginx-unix-socket.nix
new file mode 100644
index 000000000000..4640eaa171bd
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-unix-socket.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  nginxSocketPath = "/var/run/nginx/test.sock";
+in
+{
+  name = "nginx-unix-socket";
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx = {
+        enable = true;
+        virtualHosts.localhost = {
+          serverName = "localhost";
+          listen = [{ addr = "unix:${nginxSocketPath}"; }];
+          locations."/test".return = "200 'foo'";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    webserver.wait_for_unit("nginx")
+    webserver.wait_for_open_unix_socket("${nginxSocketPath}")
+
+    webserver.succeed("curl --fail --silent --unix-socket '${nginxSocketPath}' http://localhost/test | grep '^foo$'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nginx-variants.nix b/nixpkgs/nixos/tests/nginx-variants.nix
new file mode 100644
index 000000000000..0faa0127669d
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx-variants.nix
@@ -0,0 +1,33 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+builtins.listToAttrs (
+  builtins.map
+    (nginxName:
+      {
+        name = nginxName;
+        value = makeTest {
+          name = "nginx-variant-${nginxName}";
+
+          nodes.machine = { pkgs, ... }: {
+            services.nginx = {
+              enable = true;
+              virtualHosts.localhost.locations."/".return = "200 'foo'";
+              package = pkgs."${nginxName}";
+            };
+          };
+
+          testScript = ''
+            machine.wait_for_unit("nginx")
+            machine.wait_for_open_port(80)
+            machine.succeed('test "$(curl -fvvv http://localhost/)" = foo')
+          '';
+        };
+      }
+    )
+    [ "nginxStable" "nginxMainline" "nginxQuic" "nginxShibboleth" "openresty" "tengine" ]
+)
diff --git a/nixpkgs/nixos/tests/nginx.nix b/nixpkgs/nixos/tests/nginx.nix
new file mode 100644
index 000000000000..8b1f921ec520
--- /dev/null
+++ b/nixpkgs/nixos/tests/nginx.nix
@@ -0,0 +1,137 @@
+# verifies:
+#   1. nginx generates config file with shared http context definitions above
+#      generated virtual hosts config.
+#   2. whether the ETag header is properly generated whenever we're serving
+#      files in Nix store paths
+#   3. nginx doesn't restart on configuration changes (only reloads)
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nginx";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mbbx6spp danbst ];
+  };
+
+  nodes = {
+    webserver = { pkgs, lib, ... }: {
+      services.nginx.enable = true;
+      services.nginx.commonHttpConfig = ''
+        log_format ceeformat '@cee: {"status":"$status",'
+          '"request_time":$request_time,'
+          '"upstream_response_time":$upstream_response_time,'
+          '"pipe":"$pipe","bytes_sent":$bytes_sent,'
+          '"connection":"$connection",'
+          '"remote_addr":"$remote_addr",'
+          '"host":"$host",'
+          '"timestamp":"$time_iso8601",'
+          '"request":"$request",'
+          '"http_referer":"$http_referer",'
+          '"upstream_addr":"$upstream_addr"}';
+      '';
+      services.nginx.virtualHosts."0.my.test" = {
+        extraConfig = ''
+          access_log syslog:server=unix:/dev/log,facility=user,tag=mytag,severity=info ceeformat;
+          location /favicon.ico { allow all; access_log off; log_not_found off; }
+        '';
+      };
+
+      services.nginx.virtualHosts.localhost = {
+        root = pkgs.runCommand "testdir" {} ''
+          mkdir "$out"
+          echo hello world > "$out/index.html"
+        '';
+      };
+
+      services.nginx.enableReload = true;
+
+      specialisation.etagSystem.configuration = {
+        services.nginx.virtualHosts.localhost = {
+          root = lib.mkForce (pkgs.runCommand "testdir2" {} ''
+            mkdir "$out"
+            echo content changed > "$out/index.html"
+          '');
+        };
+      };
+
+      specialisation.justReloadSystem.configuration = {
+        services.nginx.virtualHosts."1.my.test".listen = [ { addr = "127.0.0.1"; port = 8080; }];
+      };
+
+      specialisation.reloadRestartSystem.configuration = {
+        services.nginx.package = pkgs.nginxMainline;
+      };
+
+      specialisation.reloadWithErrorsSystem.configuration = {
+        services.nginx.package = pkgs.nginxMainline;
+        services.nginx.virtualHosts."!@$$(#*%".locations."~@#*$*!)".proxyPass = ";;;";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    etagSystem = "${nodes.webserver.system.build.toplevel}/specialisation/etagSystem";
+    justReloadSystem = "${nodes.webserver.system.build.toplevel}/specialisation/justReloadSystem";
+    reloadRestartSystem = "${nodes.webserver.system.build.toplevel}/specialisation/reloadRestartSystem";
+    reloadWithErrorsSystem = "${nodes.webserver.system.build.toplevel}/specialisation/reloadWithErrorsSystem";
+  in ''
+    url = "http://localhost/index.html"
+
+
+    def check_etag():
+        etag = webserver.succeed(
+            f'curl -v {url} 2>&1 | sed -n -e "s/^< etag: *//ip"'
+        ).rstrip()
+        http_code = webserver.succeed(
+            f"curl -w '%{{http_code}}' --head --fail -H 'If-None-Match: {etag}' {url}"
+        )
+        assert http_code.split("\n")[-1] == "304"
+
+        return etag
+
+
+    def wait_for_nginx_on_port(port):
+        webserver.wait_for_unit("nginx")
+        webserver.wait_for_open_port(port)
+
+
+    # nginx can be ready before multi-user.target, in which case switching to
+    # a different configuration might not realize it needs to restart nginx.
+    webserver.wait_for_unit("multi-user.target")
+
+    wait_for_nginx_on_port(80)
+
+    with subtest("check ETag if serving Nix store paths"):
+        old_etag = check_etag()
+        webserver.succeed(
+            "${etagSystem}/bin/switch-to-configuration test >&2"
+        )
+        wait_for_nginx_on_port(80)
+        new_etag = check_etag()
+        assert old_etag != new_etag
+
+    with subtest("config is reloaded on nixos-rebuild switch"):
+        webserver.succeed(
+            "${justReloadSystem}/bin/switch-to-configuration test >&2"
+        )
+        wait_for_nginx_on_port(8080)
+        webserver.fail("journalctl -u nginx | grep -q -i stopped")
+        webserver.succeed("journalctl -u nginx | grep -q -i reloaded")
+
+    with subtest("restart when nginx package changes"):
+        webserver.succeed(
+            "${reloadRestartSystem}/bin/switch-to-configuration test >&2"
+        )
+        wait_for_nginx_on_port(80)
+        webserver.succeed("journalctl -u nginx | grep -q -i stopped")
+
+    with subtest("nixos-rebuild --switch should fail when there are configuration errors"):
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
+        webserver.succeed("[[ $(systemctl is-failed nginx-config-reload) == failed ]]")
+        webserver.succeed("[[ $(systemctl is-failed nginx) == active ]]")
+        # just to make sure operation is idempotent. During development I had a situation
+        # when first time it shows error, but stops showing it on subsequent rebuilds
+        webserver.fail(
+            "${reloadWithErrorsSystem}/bin/switch-to-configuration test >&2"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nitter.nix b/nixpkgs/nixos/tests/nitter.nix
new file mode 100644
index 000000000000..8bc55ba8c69f
--- /dev/null
+++ b/nixpkgs/nixos/tests/nitter.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "nitter";
+  meta.maintainers = with pkgs.lib.maintainers; [ erdnaxe ];
+
+  nodes.machine = {
+    services.nitter.enable = true;
+    # Test CAP_NET_BIND_SERVICE
+    services.nitter.server.port = 80;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nitter.service")
+    machine.wait_for_open_port(80)
+    machine.succeed("curl --fail http://localhost:80/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nix-ld.nix b/nixpkgs/nixos/tests/nix-ld.nix
new file mode 100644
index 000000000000..8733f5b0c397
--- /dev/null
+++ b/nixpkgs/nixos/tests/nix-ld.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ lib, pkgs, ...} :
+{
+  name = "nix-ld";
+  nodes.machine = { pkgs, ... }: {
+    programs.nix-ld.enable = true;
+    environment.systemPackages = [
+      (pkgs.runCommand "patched-hello" {} ''
+        install -D -m755 ${pkgs.hello}/bin/hello $out/bin/hello
+        patchelf $out/bin/hello --set-interpreter $(cat ${pkgs.nix-ld}/nix-support/ldpath)
+      '')
+    ];
+  };
+  testScript = ''
+    start_all()
+    machine.succeed("hello")
+ '';
+})
diff --git a/nixpkgs/nixos/tests/nix-serve-ssh.nix b/nixpkgs/nixos/tests/nix-serve-ssh.nix
new file mode 100644
index 000000000000..1eb8d5b395b1
--- /dev/null
+++ b/nixpkgs/nixos/tests/nix-serve-ssh.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let inherit (import ./ssh-keys.nix pkgs)
+      snakeOilPrivateKey snakeOilPublicKey;
+    ssh-config = builtins.toFile "ssh.conf" ''
+      UserKnownHostsFile=/dev/null
+      StrictHostKeyChecking=no
+    '';
+in
+   { name = "nix-ssh-serve";
+     meta.maintainers = [ lib.maintainers.shlevy ];
+     nodes =
+       { server.nix.sshServe =
+           { enable = true;
+             keys = [ snakeOilPublicKey ];
+             protocol = "ssh-ng";
+           };
+         server.nix.package = pkgs.nix;
+         client.nix.package = pkgs.nix;
+       };
+     testScript = ''
+       start_all()
+
+       client.succeed("mkdir -m 700 /root/.ssh")
+       client.succeed(
+           "cat ${ssh-config} > /root/.ssh/config"
+       )
+       client.succeed(
+           "cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa"
+       )
+       client.succeed("chmod 600 /root/.ssh/id_ecdsa")
+
+       client.succeed("nix-store --add /etc/machine-id > mach-id-path")
+
+       server.wait_for_unit("sshd")
+
+       client.fail("diff /root/other-store$(cat mach-id-path) /etc/machine-id")
+       # Currently due to shared store this is a noop :(
+       client.succeed("nix copy --experimental-features 'nix-command' --to ssh-ng://nix-ssh@server $(cat mach-id-path)")
+       client.succeed(
+           "nix-store --realise $(cat mach-id-path) --store /root/other-store --substituters ssh-ng://nix-ssh@server"
+       )
+       client.succeed("diff /root/other-store$(cat mach-id-path) /etc/machine-id")
+     '';
+   }
+)
diff --git a/nixpkgs/nixos/tests/nix-serve.nix b/nixpkgs/nixos/tests/nix-serve.nix
new file mode 100644
index 000000000000..3aa913f81107
--- /dev/null
+++ b/nixpkgs/nixos/tests/nix-serve.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "nix-serve";
+  nodes.machine = { pkgs, ... }: {
+    services.nix-serve.enable = true;
+    environment.systemPackages = [
+      pkgs.hello
+    ];
+  };
+  testScript = let
+    pkgHash = builtins.head (
+      builtins.match "${builtins.storeDir}/([^-]+).+" (toString pkgs.hello)
+    );
+  in ''
+    start_all()
+    machine.wait_for_unit("nix-serve.service")
+    machine.wait_for_open_port(5000)
+    machine.succeed(
+        "curl --fail -g http://0.0.0.0:5000/nar/${pkgHash}.nar -o /tmp/hello.nar"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nixops/default.nix b/nixpkgs/nixos/tests/nixops/default.nix
new file mode 100644
index 000000000000..b8f747b2a19f
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixops/default.nix
@@ -0,0 +1,114 @@
+{ pkgs, ... }:
+let
+  inherit (pkgs) lib;
+
+  tests = {
+    # TODO: uncomment stable
+    #  - Blocked on https://github.com/NixOS/nixpkgs/issues/138584 which has a
+    #    PR in staging: https://github.com/NixOS/nixpkgs/pull/139986
+    #  - Alternatively, blocked on a NixOps 2 release
+    #    https://github.com/NixOS/nixops/issues/1242
+    # stable = testsLegacyNetwork { nixopsPkg = pkgs.nixops; };
+    unstable = testsForPackage { nixopsPkg = pkgs.nixops_unstable; };
+
+    # inherit testsForPackage;
+  };
+
+  testsForPackage = args: lib.recurseIntoAttrs {
+    legacyNetwork = testLegacyNetwork args;
+    passthru.override = args': testsForPackage (args // args');
+  };
+
+  testLegacyNetwork = { nixopsPkg, ... }: pkgs.nixosTest ({
+    name = "nixops-legacy-network";
+    nodes = {
+      deployer = { config, lib, nodes, pkgs, ... }: {
+        imports = [ ../../modules/installer/cd-dvd/channel.nix ];
+        environment.systemPackages = [ nixopsPkg ];
+        nix.settings.substituters = lib.mkForce [ ];
+        users.users.person.isNormalUser = true;
+        virtualisation.writableStore = true;
+        virtualisation.additionalPaths = [
+          pkgs.hello
+          pkgs.figlet
+        ];
+
+        # TODO: make this efficient, https://github.com/NixOS/nixpkgs/issues/180529
+        system.includeBuildDependencies = true;
+      };
+      server = { lib, ... }: {
+        imports = [ ./legacy/base-configuration.nix ];
+      };
+    };
+
+    testScript = { nodes }:
+      let
+        deployerSetup = pkgs.writeScript "deployerSetup" ''
+          #!${pkgs.runtimeShell}
+          set -eux -o pipefail
+          cp --no-preserve=mode -r ${./legacy} unicorn
+          cp --no-preserve=mode ${../ssh-keys.nix} unicorn/ssh-keys.nix
+          mkdir -p ~/.ssh
+          cp ${snakeOilPrivateKey} ~/.ssh/id_ed25519
+          chmod 0400 ~/.ssh/id_ed25519
+        '';
+        serverNetworkJSON = pkgs.writeText "server-network.json"
+          (builtins.toJSON nodes.server.system.build.networkConfig);
+      in
+      ''
+        import shlex
+
+        def deployer_do(cmd):
+            cmd = shlex.quote(cmd)
+            return deployer.succeed(f"su person -l -c {cmd} &>/dev/console")
+
+        start_all()
+
+        deployer_do("cat /etc/hosts")
+
+        deployer_do("${deployerSetup}")
+        deployer_do("cp ${serverNetworkJSON} unicorn/server-network.json")
+
+        # Establish that ssh works, regardless of nixops
+        # Easy way to accept the server host key too.
+        server.wait_for_open_port(22)
+        deployer.wait_for_unit("network.target")
+
+        # Put newlines on console, to flush the console reader's line buffer
+        # in case nixops' last output did not end in a newline, as is the case
+        # with a status line (if implemented?)
+        deployer.succeed("while sleep 60s; do echo [60s passed]; done >&2 &")
+
+        deployer_do("cd ~/unicorn; ssh -oStrictHostKeyChecking=accept-new root@server echo hi")
+
+        # Create and deploy
+        deployer_do("cd ~/unicorn; nixops create")
+
+        deployer_do("cd ~/unicorn; nixops deploy --confirm")
+
+        deployer_do("cd ~/unicorn; nixops ssh server 'hello | figlet'")
+      '';
+  });
+
+  inherit (import ../ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+
+  /*
+    Return a store path with a closure containing everything including
+    derivations and all build dependency outputs, all the way down.
+  */
+  allDrvOutputs = pkg:
+    let name = "allDrvOutputs-${pkg.pname or pkg.name or "unknown"}";
+    in
+    pkgs.runCommand name { refs = pkgs.writeReferencesToFile pkg.drvPath; } ''
+      touch $out
+      while read ref; do
+        case $ref in
+          *.drv)
+            cat $ref >>$out
+            ;;
+        esac
+      done <$refs
+    '';
+
+in
+tests
diff --git a/nixpkgs/nixos/tests/nixops/legacy/base-configuration.nix b/nixpkgs/nixos/tests/nixops/legacy/base-configuration.nix
new file mode 100644
index 000000000000..7f1c07a5c4a9
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixops/legacy/base-configuration.nix
@@ -0,0 +1,31 @@
+{ lib, modulesPath, pkgs, ... }:
+let
+  ssh-keys =
+    if builtins.pathExists ../../ssh-keys.nix
+    then # Outside sandbox
+      ../../ssh-keys.nix
+    else # In sandbox
+      ./ssh-keys.nix;
+
+  inherit (import ssh-keys pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+in
+{
+  imports = [
+    (modulesPath + "/virtualisation/qemu-vm.nix")
+    (modulesPath + "/testing/test-instrumentation.nix")
+  ];
+  virtualisation.writableStore = true;
+  nix.settings.substituters = lib.mkForce [ ];
+  virtualisation.graphics = false;
+  documentation.enable = false;
+  services.qemuGuest.enable = true;
+  boot.loader.grub.enable = false;
+
+  services.openssh.enable = true;
+  users.users.root.openssh.authorizedKeys.keys = [
+    snakeOilPublicKey
+  ];
+  security.pam.services.sshd.limits =
+    [{ domain = "*"; item = "memlock"; type = "-"; value = 1024; }];
+}
diff --git a/nixpkgs/nixos/tests/nixops/legacy/nixops.nix b/nixpkgs/nixos/tests/nixops/legacy/nixops.nix
new file mode 100644
index 000000000000..795dc2a71825
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixops/legacy/nixops.nix
@@ -0,0 +1,15 @@
+{
+  network = {
+    description = "Legacy Network using <nixpkgs> and legacy state.";
+    # NB this is not really what makes it a legacy network; lack of flakes is.
+    storage.legacy = { };
+  };
+  server = { lib, pkgs, ... }: {
+    deployment.targetEnv = "none";
+    imports = [
+      ./base-configuration.nix
+      (lib.modules.importJSON ./server-network.json)
+    ];
+    environment.systemPackages = [ pkgs.hello pkgs.figlet ];
+  };
+}
diff --git a/nixpkgs/nixos/tests/nixos-generate-config.nix b/nixpkgs/nixos/tests/nixos-generate-config.nix
new file mode 100644
index 000000000000..e1c2f29e0673
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-generate-config.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ lib, ... } : {
+  name = "nixos-generate-config";
+  meta.maintainers = with lib.maintainers; [ basvandijk ];
+  nodes.machine = {
+    system.nixos-generate-config.configuration = ''
+      # OVERRIDDEN
+      { config, pkgs, ... }: {
+        imports = [ ./hardware-configuration.nix ];
+      $bootLoaderConfig
+      $desktopConfiguration
+      }
+    '';
+
+    system.nixos-generate-config.desktopConfiguration = [''
+      # DESKTOP
+      services.xserver.displayManager.gdm.enable = true;
+      services.xserver.desktopManager.gnome.enable = true;
+    ''];
+  };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("nixos-generate-config")
+
+    # Test if the configuration really is overridden
+    machine.succeed("grep 'OVERRIDDEN' /etc/nixos/configuration.nix")
+
+    # Test if desktop configuration really is overridden
+    machine.succeed("grep 'DESKTOP' /etc/nixos/configuration.nix")
+
+    # Test of if the Perl variable $bootLoaderConfig is spliced correctly:
+    machine.succeed(
+        "grep 'boot\\.loader\\.grub\\.enable = true;' /etc/nixos/configuration.nix"
+    )
+
+    # Test if the Perl variable $desktopConfiguration is spliced correctly
+    machine.succeed(
+        "grep 'services\\.xserver\\.desktopManager\\.gnome\\.enable = true;' /etc/nixos/configuration.nix"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix b/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix
new file mode 100644
index 000000000000..3ade90ea24a7
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-rebuild-install-bootloader.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nixos-rebuild-install-bootloader";
+
+  nodes = {
+    machine = { lib, pkgs, ... }: {
+      imports = [
+        ../modules/profiles/installation-device.nix
+        ../modules/profiles/base.nix
+      ];
+
+      nix.settings = {
+        substituters = lib.mkForce [ ];
+        hashed-mirrors = null;
+        connect-timeout = 1;
+      };
+
+      system.includeBuildDependencies = true;
+
+      virtualisation = {
+        cores = 2;
+        memorySize = 2048;
+      };
+
+      virtualisation.useBootLoader = true;
+    };
+  };
+
+  testScript =
+    let
+      configFile = pkgs.writeText "configuration.nix" ''
+        { lib, pkgs, ... }: {
+          imports = [
+            ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+
+          boot.loader.grub = {
+            enable = true;
+            device = "/dev/vda";
+            forceInstall = true;
+          };
+
+          documentation.enable = false;
+        }
+      '';
+
+    in
+    ''
+      machine.start()
+      machine.succeed("udevadm settle")
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("nixos-generate-config")
+      machine.copy_from_host(
+          "${configFile}",
+          "/etc/nixos/configuration.nix",
+      )
+      machine.succeed("nixos-rebuild switch")
+
+      # Need to run `nixos-rebuild` twice because the first run will install
+      # GRUB anyway
+      with subtest("Switch system again and install bootloader"):
+          result = machine.succeed("nixos-rebuild switch --install-bootloader")
+          # install-grub2.pl messages
+          assert "updating GRUB 2 menu..." in result
+          assert "installing the GRUB 2 boot loader on /dev/vda..." in result
+          # GRUB message
+          assert "Installation finished. No error reported." in result
+          # at this point we've tested regression #262724, but haven't tested the bootloader itself
+          # TODO: figure out how to how to tell the test driver to start the bootloader instead of
+          # booting into the kernel directly.
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nixos-rebuild-specialisations.nix b/nixpkgs/nixos/tests/nixos-rebuild-specialisations.nix
new file mode 100644
index 000000000000..444ff7a3b977
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-rebuild-specialisations.nix
@@ -0,0 +1,120 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nixos-rebuild-specialisations";
+
+  nodes = {
+    machine = { lib, pkgs, ... }: {
+      imports = [
+        ../modules/profiles/installation-device.nix
+        ../modules/profiles/base.nix
+      ];
+
+      nix.settings = {
+        substituters = lib.mkForce [ ];
+        hashed-mirrors = null;
+        connect-timeout = 1;
+      };
+
+      system.includeBuildDependencies = true;
+
+      system.extraDependencies = [
+        # Not part of the initial build apparently?
+        pkgs.grub2
+      ];
+
+      virtualisation = {
+        cores = 2;
+        memorySize = 2048;
+      };
+    };
+  };
+
+  testScript =
+    let
+      configFile = pkgs.writeText "configuration.nix" ''
+        { lib, pkgs, ... }: {
+          imports = [
+            ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+
+          boot.loader.grub = {
+            enable = true;
+            device = "/dev/vda";
+            forceInstall = true;
+          };
+
+          documentation.enable = false;
+
+          environment.systemPackages = [
+            (pkgs.writeShellScriptBin "parent" "")
+          ];
+
+          specialisation.foo = {
+            inheritParentConfig = true;
+
+            configuration = { ... }: {
+              environment.systemPackages = [
+                (pkgs.writeShellScriptBin "foo" "")
+              ];
+            };
+          };
+
+          specialisation.bar = {
+            inheritParentConfig = true;
+
+            configuration = { ... }: {
+              environment.systemPackages = [
+                (pkgs.writeShellScriptBin "bar" "")
+              ];
+            };
+          };
+        }
+      '';
+
+    in
+    ''
+      machine.start()
+      machine.succeed("udevadm settle")
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("nixos-generate-config")
+      machine.copy_from_host(
+          "${configFile}",
+          "/etc/nixos/configuration.nix",
+      )
+
+      with subtest("Switch to the base system"):
+          machine.succeed("nixos-rebuild switch")
+          machine.succeed("parent")
+          machine.fail("foo")
+          machine.fail("bar")
+
+      with subtest("Switch from base system into a specialization"):
+          machine.succeed("nixos-rebuild switch --specialisation foo")
+          machine.succeed("parent")
+          machine.succeed("foo")
+          machine.fail("bar")
+
+      with subtest("Switch from specialization into another specialization"):
+          machine.succeed("nixos-rebuild switch -c bar")
+          machine.succeed("parent")
+          machine.fail("foo")
+          machine.succeed("bar")
+
+      with subtest("Switch from specialization into the base system"):
+          machine.succeed("nixos-rebuild switch")
+          machine.succeed("parent")
+          machine.fail("foo")
+          machine.fail("bar")
+
+      with subtest("Switch into specialization using `nixos-rebuild test`"):
+          machine.succeed("nixos-rebuild test --specialisation foo")
+          machine.succeed("parent")
+          machine.succeed("foo")
+          machine.fail("bar")
+
+      with subtest("Make sure nonsense command combinations are forbidden"):
+          machine.fail("nixos-rebuild boot --specialisation foo")
+          machine.fail("nixos-rebuild boot -c foo")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nixos-test-driver/busybox.nix b/nixpkgs/nixos/tests/nixos-test-driver/busybox.nix
new file mode 100644
index 000000000000..426f4494436e
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-test-driver/busybox.nix
@@ -0,0 +1,16 @@
+{
+  name = "Test that basic tests work when busybox is installed";
+
+  nodes = {
+    machine = ({ pkgs, ... }: {
+      environment.systemPackages = [
+        pkgs.busybox
+      ];
+    });
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nixos-test-driver/extra-python-packages.nix b/nixpkgs/nixos/tests/nixos-test-driver/extra-python-packages.nix
new file mode 100644
index 000000000000..1146bedd996f
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-test-driver/extra-python-packages.nix
@@ -0,0 +1,13 @@
+import ../make-test-python.nix ({ ... }:
+  {
+    name = "extra-python-packages";
+
+    extraPythonPackages = p: [ p.numpy ];
+
+    nodes = { };
+
+    testScript = ''
+      import numpy as np
+      assert str(np.zeros(4) == "array([0., 0., 0., 0.])")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/nixos-test-driver/lib-extend.nix b/nixpkgs/nixos/tests/nixos-test-driver/lib-extend.nix
new file mode 100644
index 000000000000..4fb7cf494aed
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-test-driver/lib-extend.nix
@@ -0,0 +1,31 @@
+{ pkgs, ... }:
+
+let
+  patchedPkgs = pkgs.extend (new: old: {
+    lib = old.lib.extend (self: super: {
+      sorry_dave = "sorry dave";
+    });
+  });
+
+  testBody = {
+    name = "demo lib overlay";
+
+    nodes = {
+      machine = { lib, ... }: {
+        environment.etc."got-lib-overlay".text = lib.sorry_dave;
+      };
+    };
+
+    # We don't need to run an actual test. Instead we build the `machine` configuration
+    # and call it a day, because that already proves that `lib` is wired up correctly.
+    # See the attrset returned at the bottom of this file.
+    testScript = "";
+  };
+
+  inherit (patchedPkgs.testers) nixosTest runNixOSTest;
+  evaluationNixosTest = nixosTest testBody;
+  evaluationRunNixOSTest = runNixOSTest testBody;
+in {
+  nixosTest = evaluationNixosTest.driver.nodes.machine.system.build.toplevel;
+  runNixOSTest = evaluationRunNixOSTest.driver.nodes.machine.system.build.toplevel;
+}
diff --git a/nixpkgs/nixos/tests/nixos-test-driver/node-name.nix b/nixpkgs/nixos/tests/nixos-test-driver/node-name.nix
new file mode 100644
index 000000000000..31386813a516
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-test-driver/node-name.nix
@@ -0,0 +1,33 @@
+{
+  name = "nixos-test-driver.node-name";
+  nodes = {
+    "ok" = { };
+
+    # Valid node name, but not a great host name.
+    "one_two" = { };
+
+    # Valid node name, good host name
+    "a-b" = { };
+
+    # TODO: would be nice to test these eval failures
+    # Not allowed by lib/testing/network.nix (yet?)
+    # "foo.bar" = { };
+    # Not allowed.
+    # "not ok" = { }; # not ok
+  };
+
+  testScript = ''
+    start_all()
+
+    with subtest("python vars exist and machines are reachable through test backdoor"):
+      ok.succeed("true")
+      one_two.succeed("true")
+      a_b.succeed("true")
+
+    with subtest("hostname is derived from the node name"):
+      ok.succeed("hostname | tee /dev/stderr | grep '^ok$'")
+      one_two.succeed("hostname | tee /dev/stderr | grep '^onetwo$'")
+      a_b.succeed("hostname | tee /dev/stderr | grep '^a-b$'")
+
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nixos-test-driver/timeout.nix b/nixpkgs/nixos/tests/nixos-test-driver/timeout.nix
new file mode 100644
index 000000000000..29bd85d2498e
--- /dev/null
+++ b/nixpkgs/nixos/tests/nixos-test-driver/timeout.nix
@@ -0,0 +1,15 @@
+{
+  name = "Test that sleep of 6 seconds fails a timeout of 5 seconds";
+  globalTimeout = 5;
+
+  nodes = {
+    machine = ({ pkgs, ... }: {
+    });
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("sleep 6")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/node-red.nix b/nixpkgs/nixos/tests/node-red.nix
new file mode 100644
index 000000000000..5f5960d68295
--- /dev/null
+++ b/nixpkgs/nixos/tests/node-red.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "nodered";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ matthewcroughan ];
+  };
+
+  nodes = {
+    client = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    nodered = { config, pkgs, ... }: {
+      services.node-red = {
+        enable = true;
+        openFirewall = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    nodered.wait_for_unit("node-red.service")
+    nodered.wait_for_open_port(1880)
+
+    client.wait_for_unit("multi-user.target")
+
+    with subtest("Check that the Node-RED webserver can be reached."):
+        assert "<title>Node-RED</title>" in client.succeed(
+            "curl -sSf http:/nodered:1880/ | grep title"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nomad.nix b/nixpkgs/nixos/tests/nomad.nix
new file mode 100644
index 000000000000..51b11a8fef90
--- /dev/null
+++ b/nixpkgs/nixos/tests/nomad.nix
@@ -0,0 +1,97 @@
+import ./make-test-python.nix (
+  { lib, ... }: {
+    name = "nomad";
+    nodes = {
+      default_server = { pkgs, lib, ... }: {
+        networking = {
+          interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [{
+            address = "192.168.1.1";
+            prefixLength = 16;
+          }];
+        };
+
+        environment.etc."nomad.custom.json".source =
+          (pkgs.formats.json { }).generate "nomad.custom.json" {
+            region = "universe";
+            datacenter = "earth";
+          };
+
+        services.nomad = {
+          enable = true;
+
+          settings = {
+            server = {
+              enabled = true;
+              bootstrap_expect = 1;
+            };
+          };
+
+          extraSettingsPaths = [ "/etc/nomad.custom.json" ];
+          enableDocker = false;
+        };
+      };
+
+      custom_state_dir_server = { pkgs, lib, ... }: {
+        networking = {
+          interfaces.eth1.ipv4.addresses = lib.mkOverride 0 [{
+            address = "192.168.1.1";
+            prefixLength = 16;
+          }];
+        };
+
+        environment.etc."nomad.custom.json".source =
+          (pkgs.formats.json { }).generate "nomad.custom.json" {
+            region = "universe";
+            datacenter = "earth";
+          };
+
+        services.nomad = {
+          enable = true;
+          dropPrivileges = false;
+
+          settings = {
+            data_dir = "/nomad/data/dir";
+            server = {
+              enabled = true;
+              bootstrap_expect = 1;
+            };
+          };
+
+          extraSettingsPaths = [ "/etc/nomad.custom.json" ];
+          enableDocker = false;
+        };
+
+        systemd.services.nomad.serviceConfig.ExecStartPre = "${pkgs.writeShellScript "mk_data_dir" ''
+          set -euxo pipefail
+
+          ${pkgs.coreutils}/bin/mkdir -p /nomad/data/dir
+        ''}";
+      };
+    };
+
+    testScript = ''
+      def test_nomad_server(server):
+          server.wait_for_unit("nomad.service")
+
+          # wait for healthy server
+          server.wait_until_succeeds(
+              "[ $(nomad operator raft list-peers | grep true | wc -l) == 1 ]"
+          )
+
+          # wait for server liveness
+          server.succeed("[ $(nomad server members | grep -o alive | wc -l) == 1 ]")
+
+          # check the region
+          server.succeed("nomad server members | grep -o universe")
+
+          # check the datacenter
+          server.succeed("[ $(nomad server members | grep -o earth | wc -l) == 1 ]")
+
+
+      servers = [default_server, custom_state_dir_server]
+
+      for server in servers:
+          test_nomad_server(server)
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/non-default-filesystems.nix b/nixpkgs/nixos/tests/non-default-filesystems.nix
new file mode 100644
index 000000000000..08a17107dd2f
--- /dev/null
+++ b/nixpkgs/nixos/tests/non-default-filesystems.nix
@@ -0,0 +1,172 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+{
+  bind = makeTest {
+    name = "non-default-filesystem-bind";
+
+    nodes.machine = { ... }: {
+      virtualisation.writableStore = false;
+
+      virtualisation.fileSystems."/test-bind-dir/bind" = {
+        device = "/";
+        neededForBoot = true;
+        options = [ "bind" ];
+      };
+
+      virtualisation.fileSystems."/test-bind-file/bind" = {
+        depends = [ "/nix/store" ];
+        device = builtins.toFile "empty" "";
+        neededForBoot = true;
+        options = [ "bind" ];
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("multi-user.target")
+    '';
+  };
+
+  btrfs = makeTest
+    {
+      name = "non-default-filesystems-btrfs";
+
+      nodes.machine =
+        { config, pkgs, lib, ... }:
+        let
+          disk = config.virtualisation.rootDevice;
+        in
+        {
+          virtualisation.rootDevice = "/dev/vda";
+          virtualisation.useDefaultFilesystems = false;
+
+          boot.initrd.availableKernelModules = [ "btrfs" ];
+          boot.supportedFilesystems = [ "btrfs" ];
+
+          boot.initrd.postDeviceCommands = ''
+            FSTYPE=$(blkid -o value -s TYPE ${disk} || true)
+            if test -z "$FSTYPE"; then
+              modprobe btrfs
+              ${pkgs.btrfs-progs}/bin/mkfs.btrfs ${disk}
+
+              mkdir /nixos
+              mount -t btrfs ${disk} /nixos
+
+              ${pkgs.btrfs-progs}/bin/btrfs subvolume create /nixos/root
+              ${pkgs.btrfs-progs}/bin/btrfs subvolume create /nixos/home
+
+              umount /nixos
+            fi
+          '';
+
+          virtualisation.fileSystems = {
+            "/" = {
+              device = disk;
+              fsType = "btrfs";
+              options = [ "subvol=/root" ];
+            };
+
+            "/home" = {
+              device = disk;
+              fsType = "btrfs";
+              options = [ "subvol=/home" ];
+            };
+          };
+        };
+
+      testScript = ''
+        machine.wait_for_unit("multi-user.target")
+
+        with subtest("BTRFS filesystems are mounted correctly"):
+          machine.succeed("grep -E '/dev/vda / btrfs rw,relatime,space_cache=v2,subvolid=[0-9]+,subvol=/root 0 0' /proc/mounts")
+          machine.succeed("grep -E '/dev/vda /home btrfs rw,relatime,space_cache=v2,subvolid=[0-9]+,subvol=/home 0 0' /proc/mounts")
+      '';
+    };
+
+  erofs =
+    let
+      fsImage = "/tmp/non-default-filesystem.img";
+    in
+    makeTest {
+      name = "non-default-filesystems-erofs";
+
+      meta.maintainers = with maintainers; [ nikstur ];
+
+      nodes.machine = _: {
+        virtualisation.qemu.drives = [{
+          name = "non-default-filesystem";
+          file = fsImage;
+        }];
+
+        virtualisation.fileSystems."/non-default" = {
+          device = "/dev/vdb";
+          fsType = "erofs";
+          neededForBoot = true;
+        };
+      };
+
+      testScript = ''
+        import subprocess
+        import tempfile
+
+        with tempfile.TemporaryDirectory() as tmp_dir:
+          with open(f"{tmp_dir}/filesystem", "w") as f:
+              f.write("erofs")
+
+          subprocess.run([
+            "${pkgs.erofs-utils}/bin/mkfs.erofs",
+            "${fsImage}",
+            tmp_dir,
+          ])
+
+        machine.start()
+        machine.wait_for_unit("default.target")
+
+        file_contents = machine.succeed("cat /non-default/filesystem")
+        assert "erofs" in file_contents
+      '';
+    };
+
+  squashfs =
+    let
+      fsImage = "/tmp/non-default-filesystem.img";
+    in
+    makeTest {
+      name = "non-default-filesystems-squashfs";
+
+      meta.maintainers = with maintainers; [ nikstur ];
+
+      nodes.machine = {
+        virtualisation.qemu.drives = [{
+          name = "non-default-filesystem";
+          file = fsImage;
+          deviceExtraOpts.serial = "non-default";
+        }];
+
+        virtualisation.fileSystems."/non-default" = {
+          device = "/dev/disk/by-id/virtio-non-default";
+          fsType = "squashfs";
+          neededForBoot = true;
+        };
+      };
+
+      testScript = ''
+        import subprocess
+
+        with open("filesystem", "w") as f:
+          f.write("squashfs")
+
+        subprocess.run([
+          "${pkgs.squashfsTools}/bin/mksquashfs",
+          "filesystem",
+          "${fsImage}",
+        ])
+
+        assert "squashfs" in machine.succeed("cat /non-default/filesystem")
+      '';
+    };
+}
diff --git a/nixpkgs/nixos/tests/non-switchable-system.nix b/nixpkgs/nixos/tests/non-switchable-system.nix
new file mode 100644
index 000000000000..54bede75453b
--- /dev/null
+++ b/nixpkgs/nixos/tests/non-switchable-system.nix
@@ -0,0 +1,15 @@
+{ lib, ... }:
+
+{
+  name = "non-switchable-system";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = {
+    system.switch.enable = false;
+  };
+
+  testScript = ''
+    machine.succeed("test ! -e /run/current-system/bin/switch-to-configuration")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/noto-fonts-cjk-qt-default-weight.nix b/nixpkgs/nixos/tests/noto-fonts-cjk-qt-default-weight.nix
new file mode 100644
index 000000000000..c2e0cb3adaeb
--- /dev/null
+++ b/nixpkgs/nixos/tests/noto-fonts-cjk-qt-default-weight.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "noto-fonts-cjk-qt";
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine = {
+    imports = [ ./common/x11.nix ];
+    fonts = {
+      enableDefaultPackages = false;
+      fonts = [ pkgs.noto-fonts-cjk-sans ];
+    };
+  };
+
+  testScript =
+    let
+      script = pkgs.writers.writePython3 "qt-default-weight" {
+        libraries = [ pkgs.python3Packages.pyqt6 ];
+      } ''
+        from PyQt6.QtWidgets import QApplication
+        from PyQt6.QtGui import QFont, QRawFont
+
+        app = QApplication([])
+        f = QRawFont.fromFont(QFont("Noto Sans CJK SC", 20))
+
+        assert f.styleName() == "Regular", f.styleName()
+      '';
+    in ''
+      machine.wait_for_x()
+      machine.succeed("${script}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/noto-fonts.nix b/nixpkgs/nixos/tests/noto-fonts.nix
new file mode 100644
index 000000000000..b871f5f51729
--- /dev/null
+++ b/nixpkgs/nixos/tests/noto-fonts.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "noto-fonts";
+  meta.maintainers = with lib.maintainers; [ nickcao midchildan ];
+
+  nodes.machine = {
+    imports = [ ./common/x11.nix ];
+    environment.systemPackages = [ pkgs.gedit ];
+    fonts = {
+      enableDefaultPackages = false;
+      fonts = with pkgs;[
+        noto-fonts
+        noto-fonts-cjk-sans
+        noto-fonts-cjk-serif
+        noto-fonts-color-emoji
+      ];
+      fontconfig.defaultFonts = {
+        serif = [ "Noto Serif" "Noto Serif CJK SC" ];
+        sansSerif = [ "Noto Sans" "Noto Sans CJK SC" ];
+        monospace = [ "Noto Sans Mono" "Noto Sans Mono CJK SC" ];
+        emoji = [ "Noto Color Emoji" ];
+      };
+    };
+  };
+
+  testScript =
+    # extracted from http://www.clagnut.com/blog/2380/
+    let testText = builtins.toFile "test.txt" ''
+      the quick brown fox jumps over the lazy dog
+      視野無é™å»£ï¼Œçª—外有è—天
+      EÄ¥oÅanÄo ĉiuĵaÅ­de.
+      ã„ã‚ã¯ã«ã»ã¸ã¨ ã¡ã‚Šã¬ã‚‹ã‚’ ã‚ã‹ã‚ˆãŸã‚Œã ã¤ã­ãªã‚‰ã‚€ ã†ã‚ã®ãŠãã‚„ã¾ ã‘ãµã“ãˆã¦ ã‚ã•ãゆã‚ã¿ã— ã‚‘ã²ã‚‚ã›ã™
+      ë‹¤ëžŒì¥ í—Œ ì³‡ë°”í€´ì— íƒ€ê³ íŒŒ
+      中国智造,慧åŠå…¨çƒ
+    ''; in
+    ''
+      machine.wait_for_x()
+      machine.succeed("gedit ${testText} >&2 &")
+      machine.wait_for_window(".* - gedit")
+      machine.sleep(10)
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/novacomd.nix b/nixpkgs/nixos/tests/novacomd.nix
new file mode 100644
index 000000000000..d47d212fb2ec
--- /dev/null
+++ b/nixpkgs/nixos/tests/novacomd.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "novacomd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ dtzWill ];
+  };
+
+  nodes.machine = { ... }: {
+    services.novacomd.enable = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("novacomd.service")
+
+    with subtest("Make sure the daemon is really listening"):
+        machine.wait_for_open_port(6968)
+        machine.succeed("novacom -l")
+
+    with subtest("Stop the daemon, double-check novacom fails if daemon isn't working"):
+        machine.stop_job("novacomd")
+        machine.fail("novacom -l")
+
+    with subtest("Make sure the daemon starts back up again"):
+        machine.start_job("novacomd")
+        # make sure the daemon is really listening
+        machine.wait_for_open_port(6968)
+        machine.succeed("novacom -l")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nscd.nix b/nixpkgs/nixos/tests/nscd.nix
new file mode 100644
index 000000000000..356c6d2e2a54
--- /dev/null
+++ b/nixpkgs/nixos/tests/nscd.nix
@@ -0,0 +1,142 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # build a getent that itself doesn't see anything in /etc/hosts and
+  # /etc/nsswitch.conf, by using libredirect to steer its own requests to
+  # /dev/null.
+  # This means is /has/ to go via nscd to actuallly resolve any of the
+  # additionally configured hosts.
+  getent' = pkgs.writeScript "getent-without-etc-hosts" ''
+    export NIX_REDIRECTS=/etc/hosts=/dev/null:/etc/nsswitch.conf=/dev/null
+    export LD_PRELOAD=${pkgs.libredirect}/lib/libredirect.so
+    exec getent $@
+  '';
+in
+{
+  name = "nscd";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    networking.extraHosts = ''
+      2001:db8::1 somehost.test
+      192.0.2.1 somehost.test
+    '';
+
+    systemd.services.sockdump = {
+      wantedBy = [ "multi-user.target" ];
+      path = [
+        # necessary for bcc to unpack kernel headers and invoke modprobe
+        pkgs.gnutar
+        pkgs.xz.bin
+        pkgs.kmod
+      ];
+      environment.PYTHONUNBUFFERED = "1";
+
+      serviceConfig = {
+        ExecStart = "${pkgs.sockdump}/bin/sockdump /var/run/nscd/socket";
+        Restart = "on-failure";
+        RestartSec = "1";
+        Type = "simple";
+      };
+    };
+
+    specialisation = {
+      withGlibcNscd.configuration = { ... }: {
+        services.nscd.enableNsncd = false;
+      };
+      withUnscd.configuration = { ... }: {
+        services.nscd.enableNsncd = false;
+        services.nscd.package = pkgs.unscd;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      specialisations = "${nodes.machine.system.build.toplevel}/specialisation";
+    in
+    ''
+      # Regression test for https://github.com/NixOS/nixpkgs/issues/50273
+      def test_dynamic_user():
+          with subtest("DynamicUser actually allocates a user"):
+              assert "iamatest" in machine.succeed(
+                  "systemd-run --pty --property=Type=oneshot --property=DynamicUser=yes --property=User=iamatest whoami"
+              )
+
+      # Test resolution of somehost.test with getent', to make sure we go via
+      # nscd protocol
+      def test_host_lookups():
+          with subtest("host lookups via nscd protocol"):
+              # ahosts
+              output = machine.succeed("${getent'} ahosts somehost.test")
+              assert "192.0.2.1" in output
+              assert "2001:db8::1" in output
+
+              # ahostsv4
+              output = machine.succeed("${getent'} ahostsv4 somehost.test")
+              assert "192.0.2.1" in output
+              assert "2001:db8::1" not in output
+
+              # ahostsv6
+              output = machine.succeed("${getent'} ahostsv6 somehost.test")
+              assert "192.0.2.1" not in output
+              assert "2001:db8::1" in output
+
+              # reverse lookups (hosts)
+              assert "somehost.test" in machine.succeed("${getent'} hosts 2001:db8::1")
+              assert "somehost.test" in machine.succeed("${getent'} hosts 192.0.2.1")
+
+
+      # Test host resolution via nss modules works
+      # We rely on nss-myhostname in this case, which resolves *.localhost and
+      # _gateway.
+      # We don't need to use getent' here, as non-glibc nss modules can only be
+      # discovered via nscd.
+      def test_nss_myhostname():
+          with subtest("nss-myhostname provides hostnames (ahosts)"):
+              # ahosts
+              output = machine.succeed("getent ahosts foobar.localhost")
+              assert "::1" in output
+              assert "127.0.0.1" in output
+
+              # ahostsv4
+              output = machine.succeed("getent ahostsv4 foobar.localhost")
+              assert "::1" not in output
+              assert "127.0.0.1" in output
+
+              # ahostsv6
+              output = machine.succeed("getent ahostsv6 foobar.localhost")
+              assert "::1" in output
+              assert "127.0.0.1" not in output
+
+      start_all()
+      machine.wait_for_unit("default.target")
+
+      # give sockdump some time to finish attaching.
+      machine.sleep(5)
+
+      # Test all tests with glibc-nscd.
+      test_dynamic_user()
+      test_host_lookups()
+      test_nss_myhostname()
+
+      with subtest("glibc-nscd"):
+          machine.succeed('${specialisations}/withGlibcNscd/bin/switch-to-configuration test')
+          machine.wait_for_unit("default.target")
+
+          test_dynamic_user()
+          test_host_lookups()
+          test_nss_myhostname()
+
+      with subtest("unscd"):
+          machine.succeed('${specialisations}/withUnscd/bin/switch-to-configuration test')
+          machine.wait_for_unit("default.target")
+
+          # known to fail, unscd doesn't load external NSS modules
+          # test_dynamic_user()
+
+          test_host_lookups()
+
+          # known to fail, unscd doesn't load external NSS modules
+          # test_nss_myhostname()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/nsd.nix b/nixpkgs/nixos/tests/nsd.nix
new file mode 100644
index 000000000000..eea5a82f6f92
--- /dev/null
+++ b/nixpkgs/nixos/tests/nsd.nix
@@ -0,0 +1,109 @@
+let
+  common = { pkgs, ... }: {
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+    # for a host utility with IPv6 support
+    environment.systemPackages = [ pkgs.bind ];
+  };
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nsd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ aszlig ];
+  };
+
+  nodes = {
+    clientv4 = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.nameservers = lib.mkForce [
+        (lib.head nodes.server.config.networking.interfaces.eth1.ipv4.addresses).address
+      ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "192.168.0.2"; prefixLength = 24; }
+      ];
+    };
+
+    clientv6 = { lib, nodes, ... }: {
+      imports = [ common ];
+      networking.nameservers = lib.mkForce [
+        (lib.head nodes.server.config.networking.interfaces.eth1.ipv6.addresses).address
+      ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "dead:beef::2"; prefixLength = 24; }
+      ];
+    };
+
+    server = { lib, ... }: {
+      imports = [ common ];
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "192.168.0.1"; prefixLength = 24; }
+      ];
+      networking.interfaces.eth1.ipv6.addresses = [
+        { address = "dead:beef::1"; prefixLength = 64; }
+      ];
+      services.nsd.enable = true;
+      services.nsd.rootServer = true;
+      services.nsd.interfaces = lib.mkForce [];
+      services.nsd.keys."tsig.example.com." = {
+        algorithm = "hmac-sha256";
+        keyFile = pkgs.writeTextFile { name = "tsig.example.com."; text = "aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc="; };
+      };
+      services.nsd.zones."example.com.".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        ipv4 A 1.2.3.4
+        ipv6 AAAA abcd::eeff
+        deleg NS ns.example.com
+        ns A 192.168.0.1
+        ns AAAA dead:beef::1
+      '';
+      services.nsd.zones."example.com.".provideXFR = [ "0.0.0.0 tsig.example.com." ];
+      services.nsd.zones."deleg.example.com.".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        @ A 9.8.7.6
+        @ AAAA fedc::bbaa
+      '';
+      services.nsd.zones.".".data = ''
+        @ SOA ns.example.com noc.example.com 666 7200 3600 1209600 3600
+        root A 1.8.7.4
+        root AAAA acbd::4
+      '';
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    clientv4.wait_for_unit("network.target")
+    clientv6.wait_for_unit("network.target")
+    server.wait_for_unit("nsd.service")
+
+    with subtest("server tsig.example.com."):
+        expected_tsig = "  secret: \"aR3FJA92+bxRSyosadsJ8Aeeav5TngQW/H/EF9veXbc=\"\n"
+        tsig=server.succeed("cat /var/lib/nsd/private/tsig.example.com.")
+        assert expected_tsig == tsig, f"Expected /var/lib/nsd/private/tsig.example.com. to contain '{expected_tsig}', but found '{tsig}'"
+
+    def assert_host(type, rr, query, expected):
+        self = clientv4 if type == 4 else clientv6
+        out = self.succeed(f"host -{type} -t {rr} {query}").rstrip()
+        self.log(f"output: {out}")
+        import re
+        assert re.search(
+            expected, out
+        ), f"DNS IPv{type} query on {query} gave '{out}' instead of '{expected}'"
+
+
+    for ipv in 4, 6:
+        with subtest(f"IPv{ipv}"):
+            assert_host(ipv, "a", "example.com", "has no [^ ]+ record")
+            assert_host(ipv, "aaaa", "example.com", "has no [^ ]+ record")
+
+            assert_host(ipv, "soa", "example.com", "SOA.*?noc\.example\.com")
+            assert_host(ipv, "a", "ipv4.example.com", "address 1.2.3.4$")
+            assert_host(ipv, "aaaa", "ipv6.example.com", "address abcd::eeff$")
+
+            assert_host(ipv, "a", "deleg.example.com", "address 9.8.7.6$")
+            assert_host(ipv, "aaaa", "deleg.example.com", "address fedc::bbaa$")
+
+            assert_host(ipv, "a", "root", "address 1.8.7.4$")
+            assert_host(ipv, "aaaa", "root", "address acbd::4$")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ntfy-sh.nix b/nixpkgs/nixos/tests/ntfy-sh.nix
new file mode 100644
index 000000000000..ec2e645bacb5
--- /dev/null
+++ b/nixpkgs/nixos/tests/ntfy-sh.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix {
+  name = "ntfy-sh";
+
+  nodes.machine = { ... }: {
+    services.ntfy-sh.enable = true;
+    services.ntfy-sh.settings.base-url = "http://localhost:2586";
+  };
+
+  testScript = ''
+    import json
+
+    msg = "Test notification"
+
+    machine.wait_for_unit("multi-user.target")
+
+    machine.wait_for_open_port(2586)
+
+    machine.succeed(f"curl -d '{msg}' localhost:2586/test")
+
+    notif = json.loads(machine.succeed("curl -s localhost:2586/test/json?poll=1"))
+
+    assert msg == notif["message"], "Wrong message"
+
+    machine.succeed("ntfy user list")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/nzbget.nix b/nixpkgs/nixos/tests/nzbget.nix
new file mode 100644
index 000000000000..e45031d5629c
--- /dev/null
+++ b/nixpkgs/nixos/tests/nzbget.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "nzbget";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ aanderse flokli ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      services.nzbget.enable = true;
+
+      # provide some test settings
+      services.nzbget.settings = {
+        "MainDir" = "/var/lib/nzbget";
+        "DirectRename" = true;
+        "DiskSpace" = 0;
+        "Server1.Name" = "this is a test";
+      };
+
+      # hack, don't add (unfree) unrar to nzbget's path,
+      # so we can run this test in CI
+      systemd.services.nzbget.path = pkgs.lib.mkForce [ pkgs.p7zip ];
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    start_all()
+
+    server.wait_for_unit("nzbget.service")
+    server.wait_for_unit("network.target")
+    server.wait_for_open_port(6789)
+    assert "This file is part of nzbget" in server.succeed(
+        "curl -f -s -u nzbget:tegbzn6789 http://127.0.0.1:6789"
+    )
+    server.succeed(
+        "${pkgs.nzbget}/bin/nzbget -n -o Control_iP=127.0.0.1 -o Control_port=6789 -o Control_password=tegbzn6789 -V"
+    )
+
+    config = server.succeed("${nodes.server.systemd.services.nzbget.serviceConfig.ExecStart} --printconfig")
+
+    # confirm the test settings are applied
+    assert 'MainDir = "/var/lib/nzbget"' in config
+    assert 'DirectRename = "yes"' in config
+    assert 'DiskSpace = "0"' in config
+    assert 'Server1.Name = "this is a test"' in config
+  '';
+})
diff --git a/nixpkgs/nixos/tests/nzbhydra2.nix b/nixpkgs/nixos/tests/nzbhydra2.nix
new file mode 100644
index 000000000000..e1d528cd9520
--- /dev/null
+++ b/nixpkgs/nixos/tests/nzbhydra2.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ lib, ... }:
+  {
+    name = "nzbhydra2";
+    meta.maintainers = with lib.maintainers; [ jamiemagee ];
+
+    nodes.machine = { pkgs, ... }: { services.nzbhydra2.enable = true; };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("nzbhydra2.service")
+      machine.wait_for_open_port(5076)
+      machine.succeed("curl --fail http://localhost:5076/")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/oci-containers.nix b/nixpkgs/nixos/tests/oci-containers.nix
new file mode 100644
index 000000000000..1afa9df36dfa
--- /dev/null
+++ b/nixpkgs/nixos/tests/oci-containers.nix
@@ -0,0 +1,42 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+}:
+
+let
+
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+
+  mkOCITest = backend: makeTest {
+    name = "oci-containers-${backend}";
+
+    meta.maintainers = lib.teams.serokell.members
+                       ++ (with lib.maintainers; [ adisbladis benley mkaito ]);
+
+    nodes = {
+      ${backend} = { pkgs, ... }: {
+        virtualisation.oci-containers = {
+          inherit backend;
+          containers.nginx = {
+            image = "nginx-container";
+            imageFile = pkgs.dockerTools.examples.nginx;
+            ports = ["8181:80"];
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      ${backend}.wait_for_unit("${backend}-nginx.service")
+      ${backend}.wait_for_open_port(8181)
+      ${backend}.wait_until_succeeds("curl -f http://localhost:8181 | grep Hello")
+    '';
+  };
+
+in
+lib.foldl' (attrs: backend: attrs // { ${backend} = mkOCITest backend; }) {} [
+  "docker"
+  "podman"
+]
diff --git a/nixpkgs/nixos/tests/ocsinventory-agent.nix b/nixpkgs/nixos/tests/ocsinventory-agent.nix
new file mode 100644
index 000000000000..67b0c8c91103
--- /dev/null
+++ b/nixpkgs/nixos/tests/ocsinventory-agent.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "ocsinventory-agent";
+
+  nodes.machine = { pkgs, ... }: {
+    services.ocsinventory-agent = {
+      enable = true;
+      settings = {
+        debug = true;
+        local = "/var/lib/ocsinventory-agent/reports";
+        tag = "MY_INVENTORY_TAG";
+      };
+    };
+  };
+
+  testScript = ''
+    path = "/var/lib/ocsinventory-agent/reports"
+
+    # Run the agent to generate the inventory file in offline mode
+    start_all()
+    machine.succeed("mkdir -p {}".format(path))
+    machine.wait_for_unit("ocsinventory-agent.service")
+    machine.wait_until_succeeds("journalctl -u ocsinventory-agent.service | grep 'Inventory saved in'")
+
+    # Fetch the path to the generated inventory file
+    report_file = machine.succeed("find {}/*.ocs -type f | head -n1".format(path))
+
+    with subtest("Check the tag value"):
+      tag = machine.succeed(
+        "${pkgs.libxml2}/bin/xmllint --xpath 'string(/REQUEST/CONTENT/ACCOUNTINFO/KEYVALUE)' {}".format(report_file)
+      ).rstrip()
+      assert tag == "MY_INVENTORY_TAG", f"tag is not valid, was '{tag}'"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/octoprint.nix b/nixpkgs/nixos/tests/octoprint.nix
new file mode 100644
index 000000000000..15a2d677d4cf
--- /dev/null
+++ b/nixpkgs/nixos/tests/octoprint.nix
@@ -0,0 +1,61 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  apikey = "testapikey";
+in
+{
+  name = "octoprint";
+  meta.maintainers = with lib.maintainers; [ gador ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [ jq ];
+    services.octoprint = {
+      enable = true;
+      extraConfig = {
+        server = {
+          firstRun = false;
+        };
+        api = {
+          enabled = true;
+          key = apikey;
+        };
+        plugins = {
+          # these need internet access and pollute the output with connection failed errors
+          _disabled = [ "softwareupdate" "announcements" "pluginmanager" ];
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    import json
+
+    @polling_condition
+    def octoprint_running():
+        machine.succeed("pgrep octoprint")
+
+    with subtest("Wait for octoprint service to start"):
+        machine.wait_for_unit("octoprint.service")
+        machine.wait_until_succeeds("pgrep octoprint")
+
+    with subtest("Wait for final boot"):
+        # this appears whe octoprint is almost finished starting
+        machine.wait_for_file("/var/lib/octoprint/uploads")
+
+    # octoprint takes some time to start. This makes sure we'll retry just in case it takes longer
+    # retry-all-errors in necessary, since octoprint will report a 404 error when not yet ready
+    curl_cmd = "curl --retry-all-errors --connect-timeout 5 --max-time 10 --retry 5 --retry-delay 0 \
+                --retry-max-time 40 -X GET --header 'X-API-Key: ${apikey}' "
+
+    # used to fail early, in case octoprint first starts and then crashes
+    with octoprint_running: # type: ignore[union-attr]
+        with subtest("Check for web interface"):
+            machine.wait_until_succeeds("curl -s localhost:5000")
+
+        with subtest("Check API"):
+            version = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/version"))
+            server = json.loads(machine.succeed(curl_cmd + "localhost:5000/api/server"))
+            assert version["server"] == str("${pkgs.octoprint.version}")
+            assert server["safemode"] == None
+  '';
+})
diff --git a/nixpkgs/nixos/tests/odoo.nix b/nixpkgs/nixos/tests/odoo.nix
new file mode 100644
index 000000000000..00ae4a2137d1
--- /dev/null
+++ b/nixpkgs/nixos/tests/odoo.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, lib, package ? pkgs.odoo, ...} : {
+  name = "odoo";
+  meta.maintainers = with lib.maintainers; [ mkg20001 ];
+
+  nodes = {
+    server = { ... }: {
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+      };
+
+      services.odoo = {
+        enable = true;
+        package = package;
+        domain = "localhost";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+  ''
+    server.wait_for_unit("odoo.service")
+    server.wait_until_succeeds("curl -s http://localhost:8069/web/database/selector | grep '<title>Odoo</title>'")
+    server.succeed("curl -s http://localhost/web/database/selector | grep '<title>Odoo</title>'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/oh-my-zsh.nix b/nixpkgs/nixos/tests/oh-my-zsh.nix
new file mode 100644
index 000000000000..1d5227e36236
--- /dev/null
+++ b/nixpkgs/nixos/tests/oh-my-zsh.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "oh-my-zsh";
+
+  nodes.machine = { pkgs, ... }:
+
+    {
+      programs.zsh = {
+        enable = true;
+        ohMyZsh.enable = true;
+      };
+    };
+
+  testScript = ''
+    start_all()
+    machine.succeed("touch ~/.zshrc")
+    machine.succeed("zsh -c 'source /etc/zshrc && echo $ZSH | grep oh-my-zsh-${pkgs.oh-my-zsh.version}'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ombi.nix b/nixpkgs/nixos/tests/ombi.nix
new file mode 100644
index 000000000000..fb3a37c978e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/ombi.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "ombi";
+  meta.maintainers = with lib.maintainers; [ woky ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.ombi.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("ombi.service")
+    machine.wait_for_open_port(5000)
+    machine.succeed("curl --fail http://localhost:5000/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openarena.nix b/nixpkgs/nixos/tests/openarena.nix
new file mode 100644
index 000000000000..63dc1b9a6857
--- /dev/null
+++ b/nixpkgs/nixos/tests/openarena.nix
@@ -0,0 +1,71 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      hardware.opengl.driSupport = true;
+      environment.systemPackages = [ pkgs.openarena ];
+    };
+
+in {
+  name = "openarena";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fpletz ];
+  };
+
+  nodes =
+    { server =
+        { services.openarena = {
+            enable = true;
+            extraFlags = [ "+set g_gametype 0" "+map oa_dm7" "+addbot Angelyss" "+addbot Arachna" ];
+            openPorts = true;
+          };
+        };
+
+      client1 = client;
+      client2 = client;
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      server.wait_for_unit("openarena")
+      server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 27960")
+
+      client1.wait_for_x()
+      client2.wait_for_x()
+
+      client1.execute("openarena +set r_fullscreen 0 +set name Foo +connect server >&2 &")
+      client2.execute("openarena +set r_fullscreen 0 +set name Bar +connect server >&2 &")
+
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Foo.*entered the game'"
+      )
+      server.wait_until_succeeds(
+          "journalctl -u openarena -e | grep -q 'Bar.*entered the game'"
+      )
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.screenshot("screen_client1_1")
+      client2.screenshot("screen_client2_1")
+
+      client1.block()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_2")
+      client2.screenshot("screen_client2_2")
+
+      client1.unblock()
+
+      server.sleep(10)
+
+      client1.screenshot("screen_client1_3")
+      client2.screenshot("screen_client2_3")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/openldap.nix b/nixpkgs/nixos/tests/openldap.nix
new file mode 100644
index 000000000000..47d6a91843f1
--- /dev/null
+++ b/nixpkgs/nixos/tests/openldap.nix
@@ -0,0 +1,156 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  dbContents = ''
+    dn: dc=example
+    objectClass: domain
+    dc: example
+
+    dn: ou=users,dc=example
+    objectClass: organizationalUnit
+    ou: users
+  '';
+
+  ldifConfig = ''
+    dn: cn=config
+    cn: config
+    objectClass: olcGlobal
+    olcLogLevel: stats
+
+    dn: cn=schema,cn=config
+    cn: schema
+    objectClass: olcSchemaConfig
+
+    include: file://${pkgs.openldap}/etc/schema/core.ldif
+    include: file://${pkgs.openldap}/etc/schema/cosine.ldif
+    include: file://${pkgs.openldap}/etc/schema/inetorgperson.ldif
+
+    dn: olcDatabase={0}config,cn=config
+    olcDatabase: {0}config
+    objectClass: olcDatabaseConfig
+    olcRootDN: cn=root,cn=config
+    olcRootPW: configpassword
+
+    dn: olcDatabase={1}mdb,cn=config
+    objectClass: olcDatabaseConfig
+    objectClass: olcMdbConfig
+    olcDatabase: {1}mdb
+    olcDbDirectory: /var/db/openldap
+    olcDbIndex: objectClass eq
+    olcSuffix: dc=example
+    olcRootDN: cn=root,dc=example
+    olcRootPW: notapassword
+  '';
+
+  ldapClientConfig = {
+    enable = true;
+    loginPam = false;
+    nsswitch = false;
+    server = "ldap://";
+    base = "dc=example";
+  };
+
+in {
+  name = "openldap";
+
+  nodes.machine = { pkgs, ... }: {
+    environment.etc."openldap/root_password".text = "notapassword";
+
+    users.ldap = ldapClientConfig;
+
+    services.openldap = {
+      enable = true;
+      urlList = [ "ldapi:///" "ldap://" ];
+      settings = {
+        children = {
+          "cn=schema".includes = [
+            "${pkgs.openldap}/etc/schema/core.ldif"
+            "${pkgs.openldap}/etc/schema/cosine.ldif"
+            "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+            "${pkgs.openldap}/etc/schema/nis.ldif"
+          ];
+          "olcDatabase={0}config" = {
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" ];
+              olcDatabase = "{0}config";
+              olcRootDN = "cn=root,cn=config";
+              olcRootPW = "configpassword";
+            };
+          };
+          "olcDatabase={1}mdb" = {
+            # This tests string, base64 and path values, as well as lists of string values
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+              olcDatabase = "{1}mdb";
+              olcDbDirectory = "/var/lib/openldap/db";
+              olcSuffix = "dc=example";
+              olcRootDN = {
+                # cn=root,dc=example
+                base64 = "Y249cm9vdCxkYz1leGFtcGxl";
+              };
+              olcRootPW = {
+                path = "/etc/openldap/root_password";
+              };
+            };
+          };
+        };
+      };
+    };
+
+    specialisation = {
+      declarativeContents.configuration = { ... }: {
+        services.openldap.declarativeContents."dc=example" = dbContents;
+      };
+      mutableConfig.configuration = { ... }: {
+        services.openldap = {
+          declarativeContents."dc=example" = dbContents;
+          mutableConfig = true;
+        };
+      };
+      manualConfigDir = {
+        inheritParentConfig = false;
+        configuration = { ... }: {
+          users.ldap = ldapClientConfig;
+          services.openldap = {
+            enable = true;
+            configDir = "/var/db/slapd.d";
+          };
+        };
+      };
+    };
+  };
+  testScript = { nodes, ... }: let
+    specializations = "${nodes.machine.system.build.toplevel}/specialisation";
+    changeRootPw = ''
+      dn: olcDatabase={1}mdb,cn=config
+      changetype: modify
+      replace: olcRootPW
+      olcRootPW: foobar
+    '';
+  in ''
+    # Test startup with empty DB
+    machine.wait_for_unit("openldap.service")
+
+    with subtest("declarative contents"):
+      machine.succeed('${specializations}/declarativeContents/bin/switch-to-configuration test')
+      machine.wait_for_unit("openldap.service")
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.fail('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
+
+    with subtest("mutable config"):
+      machine.succeed('${specializations}/mutableConfig/bin/switch-to-configuration test')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar')
+
+    with subtest("manual config dir"):
+      machine.succeed(
+        'mkdir /var/db/slapd.d /var/db/openldap',
+        'slapadd -F /var/db/slapd.d -n0 -l ${pkgs.writeText "config.ldif" ldifConfig}',
+        'slapadd -F /var/db/slapd.d -n1 -l ${pkgs.writeText "contents.ldif" dbContents}',
+        'chown -R openldap:openldap /var/db/slapd.d /var/db/openldap',
+        '${specializations}/manualConfigDir/bin/switch-to-configuration test',
+      )
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w notapassword')
+      machine.succeed('ldapmodify -D cn=root,cn=config -w configpassword -f ${pkgs.writeText "rootpw.ldif" changeRootPw}')
+      machine.succeed('ldapsearch -LLL -D "cn=root,dc=example" -w foobar')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openresty-lua.nix b/nixpkgs/nixos/tests/openresty-lua.nix
new file mode 100644
index 000000000000..9e987398f51d
--- /dev/null
+++ b/nixpkgs/nixos/tests/openresty-lua.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    lualibs = [
+      pkgs.lua.pkgs.markdown
+    ];
+
+    getPath = lib: type: "${lib}/share/lua/${pkgs.lua.luaversion}/?.${type}";
+    getLuaPath = lib: getPath lib "lua";
+    luaPath = lib.concatStringsSep ";" (map getLuaPath lualibs);
+  in
+  {
+    name = "openresty-lua";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ bbigras ];
+    };
+
+    nodes = {
+      webserver = { pkgs, lib, ... }: {
+        networking = {
+          extraHosts = ''
+            127.0.0.1 default.test
+            127.0.0.1 sandbox.test
+          '';
+        };
+        services.nginx = {
+          enable = true;
+          package = pkgs.openresty;
+
+          commonHttpConfig = ''
+            lua_package_path '${luaPath};;';
+          '';
+
+          virtualHosts."default.test" = {
+            default = true;
+            locations."/" = {
+              extraConfig = ''
+                default_type text/html;
+                access_by_lua '
+                  local markdown = require "markdown"
+                  markdown("source")
+                ';
+              '';
+            };
+          };
+
+          virtualHosts."sandbox.test" = {
+            locations."/test1-write" = {
+              extraConfig = ''
+                content_by_lua_block {
+                  local create = os.execute('${pkgs.coreutils}/bin/mkdir /tmp/test1-read')
+                  local create = os.execute('${pkgs.coreutils}/bin/touch /tmp/test1-read/foo.txt')
+                  local echo = os.execute('${pkgs.coreutils}/bin/echo worked > /tmp/test1-read/foo.txt')
+                }
+              '';
+            };
+            locations."/test1-read" = {
+              root = "/tmp";
+            };
+            locations."/test2-write" = {
+              extraConfig = ''
+                content_by_lua_block {
+                  local create = os.execute('${pkgs.coreutils}/bin/mkdir /var/web/test2-read')
+                  local create = os.execute('${pkgs.coreutils}/bin/touch /var/web/test2-read/bar.txt')
+                  local echo = os.execute('${pkgs.coreutils}/bin/echo error-worked > /var/web/test2-read/bar.txt')
+                }
+              '';
+            };
+            locations."/test2-read" = {
+              root = "/var/web";
+            };
+          };
+        };
+      };
+    };
+
+    testScript = { nodes, ... }:
+      ''
+        url = "http://localhost"
+
+        webserver.wait_for_unit("nginx")
+        webserver.wait_for_open_port(80)
+
+        http_code = webserver.succeed(
+          f"curl -w '%{{http_code}}' --head --fail {url}"
+        )
+        assert http_code.split("\n")[-1] == "200"
+
+        # This test checks the creation and reading of a file in sandbox mode.
+        # Checking write in temporary folder
+        webserver.succeed("$(curl -vvv http://sandbox.test/test1-write)")
+        webserver.succeed('test "$(curl -fvvv http://sandbox.test/test1-read/foo.txt)" = worked')
+        # Checking write in protected folder. In sandbox mode for the nginx service, the folder /var/web is mounted
+        # in read-only mode.
+        webserver.succeed("mkdir -p /var/web")
+        webserver.succeed("chown nginx:nginx /var/web")
+        webserver.succeed("$(curl -vvv http://sandbox.test/test2-write)")
+        assert "404 Not Found" in machine.succeed(
+            "curl -vvv -s http://sandbox.test/test2-read/bar.txt"
+        )
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/opensearch.nix b/nixpkgs/nixos/tests/opensearch.nix
new file mode 100644
index 000000000000..2887ac967765
--- /dev/null
+++ b/nixpkgs/nixos/tests/opensearch.nix
@@ -0,0 +1,47 @@
+let
+  opensearchTest =
+    import ./make-test-python.nix (
+      { pkgs, lib, extraSettings ? {} }: {
+        name = "opensearch";
+        meta.maintainers = with pkgs.lib.maintainers; [ shyim ];
+
+        nodes.machine = lib.mkMerge [
+          {
+            virtualisation.memorySize = 2048;
+            services.opensearch.enable = true;
+          }
+          extraSettings
+        ];
+
+        testScript = ''
+          machine.start()
+          machine.wait_for_unit("opensearch.service")
+          machine.wait_for_open_port(9200)
+
+          machine.succeed(
+              "curl --fail localhost:9200"
+          )
+        '';
+      });
+in
+{
+  opensearch = opensearchTest {};
+  opensearchCustomPathAndUser = opensearchTest {
+    extraSettings = {
+      services.opensearch.dataDir = "/var/opensearch_test";
+      services.opensearch.user = "open_search";
+      services.opensearch.group = "open_search";
+      systemd.tmpfiles.rules = [
+        "d /var/opensearch_test 0700 open_search open_search -"
+      ];
+      users = {
+        groups.open_search = {};
+        users.open_search = {
+          description = "OpenSearch daemon user";
+          group = "open_search";
+          isSystemUser = true;
+        };
+      };
+    };
+  };
+}
diff --git a/nixpkgs/nixos/tests/opensmtpd-rspamd.nix b/nixpkgs/nixos/tests/opensmtpd-rspamd.nix
new file mode 100644
index 000000000000..19969a7b47dd
--- /dev/null
+++ b/nixpkgs/nixos/tests/opensmtpd-rspamd.nix
@@ -0,0 +1,141 @@
+import ./make-test-python.nix {
+  name = "opensmtpd-rspamd";
+
+  nodes = {
+    smtp1 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 143 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 0.0.0.0
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+
+          action do_relay relay
+          # DO NOT DO THIS IN PRODUCTION!
+          # Setting up authentication requires a certificate which is painful in
+          # a test environment, but THIS WOULD BE DANGEROUS OUTSIDE OF A
+          # WELL-CONTROLLED ENVIRONMENT!
+          match from any for any action do_relay
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+    };
+
+    smtp2 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 143 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.rspamd = {
+        enable = true;
+        locals."worker-normal.inc".text = ''
+          bind_socket = "127.0.0.1:11333";
+        '';
+      };
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          filter rspamd proc-exec "${pkgs.opensmtpd-filter-rspamd}/bin/filter-rspamd"
+          listen on 0.0.0.0 filter rspamd
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+    };
+
+    client = { pkgs, ... }: {
+      networking = {
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = let
+        sendTestMail = pkgs.writeScriptBin "send-a-test-mail" ''
+          #!${pkgs.python3.interpreter}
+          import smtplib, sys
+
+          with smtplib.SMTP('192.168.1.1') as smtp:
+            smtp.sendmail('alice@[192.168.1.1]', 'bob@[192.168.1.2]', """
+              From: alice@smtp1
+              To: bob@smtp2
+              Subject: Test
+
+              Hello World
+              Here goes the spam test
+              XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
+            """)
+        '';
+
+        checkMailBounced = pkgs.writeScriptBin "check-mail-bounced" ''
+          #!${pkgs.python3.interpreter}
+          import imaplib
+
+          with imaplib.IMAP4('192.168.1.1', 143) as imap:
+            imap.login('alice', 'foobar')
+            imap.select()
+            status, refs = imap.search(None, 'ALL')
+            assert status == 'OK'
+            assert len(refs) == 1
+            status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+            assert status == 'OK'
+            content = msg[0][1]
+            print("===> content:", content)
+            assert b"An error has occurred while attempting to deliver a message" in content
+        '';
+      in [ sendTestMail checkMailBounced ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.wait_for_unit("network-online.target")
+    smtp1.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("rspamd")
+    smtp2.wait_for_unit("dovecot2")
+
+    # To prevent sporadic failures during daemon startup, make sure
+    # services are listening on their ports before sending requests
+    smtp1.wait_for_open_port(25)
+    smtp2.wait_for_open_port(25)
+    smtp2.wait_for_open_port(143)
+    smtp2.wait_for_open_port(11333)
+
+    client.succeed("send-a-test-mail")
+    smtp1.wait_until_fails("smtpctl show queue | egrep .")
+    client.succeed("check-mail-bounced >&2")
+  '';
+
+  meta.timeout = 1800;
+}
diff --git a/nixpkgs/nixos/tests/opensmtpd.nix b/nixpkgs/nixos/tests/opensmtpd.nix
new file mode 100644
index 000000000000..17c1a569ba0d
--- /dev/null
+++ b/nixpkgs/nixos/tests/opensmtpd.nix
@@ -0,0 +1,125 @@
+import ./make-test-python.nix {
+  name = "opensmtpd";
+
+  nodes = {
+    smtp1 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 0.0.0.0
+          action do_relay relay
+          # DO NOT DO THIS IN PRODUCTION!
+          # Setting up authentication requires a certificate which is painful in
+          # a test environment, but THIS WOULD BE DANGEROUS OUTSIDE OF A
+          # WELL-CONTROLLED ENVIRONMENT!
+          match from any for any action do_relay
+        '';
+      };
+    };
+
+    smtp2 = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      networking = {
+        firewall.allowedTCPPorts = [ 25 143 ];
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = [ pkgs.opensmtpd ];
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 0.0.0.0
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+    };
+
+    client = { pkgs, ... }: {
+      networking = {
+        useDHCP = false;
+        interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.3"; prefixLength = 24; }
+        ];
+      };
+      environment.systemPackages = let
+        sendTestMail = pkgs.writeScriptBin "send-a-test-mail" ''
+          #!${pkgs.python3.interpreter}
+          import smtplib, sys
+
+          with smtplib.SMTP('192.168.1.1') as smtp:
+            smtp.sendmail('alice@[192.168.1.1]', 'bob@[192.168.1.2]', """
+              From: alice@smtp1
+              To: bob@smtp2
+              Subject: Test
+
+              Hello World
+            """)
+        '';
+
+        checkMailLanded = pkgs.writeScriptBin "check-mail-landed" ''
+          #!${pkgs.python3.interpreter}
+          import imaplib
+
+          with imaplib.IMAP4('192.168.1.2', 143) as imap:
+            imap.login('bob', 'foobar')
+            imap.select()
+            status, refs = imap.search(None, 'ALL')
+            assert status == 'OK'
+            assert len(refs) == 1
+            status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+            assert status == 'OK'
+            content = msg[0][1]
+            print("===> content:", content)
+            split = content.split(b'\r\n')
+            print("===> split:", split)
+            lastline = split[-3]
+            print("===> lastline:", lastline)
+            assert lastline.strip() == b'Hello World'
+        '';
+      in [ sendTestMail checkMailLanded ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.wait_for_unit("network-online.target")
+    smtp1.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("opensmtpd")
+    smtp2.wait_for_unit("dovecot2")
+
+    # To prevent sporadic failures during daemon startup, make sure
+    # services are listening on their ports before sending requests
+    smtp1.wait_for_open_port(25)
+    smtp2.wait_for_open_port(25)
+    smtp2.wait_for_open_port(143)
+
+    client.succeed("send-a-test-mail")
+    smtp1.wait_until_fails("smtpctl show queue | egrep .")
+    smtp2.wait_until_fails("smtpctl show queue | egrep .")
+    client.succeed("check-mail-landed >&2")
+  '';
+
+  meta.timeout = 1800;
+}
diff --git a/nixpkgs/nixos/tests/opensnitch.nix b/nixpkgs/nixos/tests/opensnitch.nix
new file mode 100644
index 000000000000..d84e4e0a935b
--- /dev/null
+++ b/nixpkgs/nixos/tests/opensnitch.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "opensnitch";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ onny ];
+  };
+
+  nodes = {
+    server =
+      { ... }: {
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        services.caddy = {
+          enable = true;
+          virtualHosts."localhost".extraConfig = ''
+            respond "Hello, world!"
+          '';
+        };
+      };
+
+    clientBlocked =
+      { ... }: {
+        services.opensnitch = {
+          enable = true;
+          settings.DefaultAction = "deny";
+        };
+      };
+
+    clientAllowed =
+      { ... }: {
+        services.opensnitch = {
+          enable = true;
+          settings.DefaultAction = "deny";
+          rules = {
+            opensnitch = {
+              name = "curl";
+              enabled = true;
+              action = "allow";
+              duration = "always";
+              operator = {
+                type ="simple";
+                sensitive = false;
+                operand = "process.path";
+                data = "${pkgs.curl}/bin/curl";
+              };
+            };
+          };
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    server.wait_for_unit("caddy.service")
+    server.wait_for_open_port(80)
+
+    clientBlocked.wait_for_unit("opensnitchd.service")
+    clientBlocked.fail("curl http://server")
+
+    clientAllowed.wait_for_unit("opensnitchd.service")
+    clientAllowed.succeed("curl http://server")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openssh.nix b/nixpkgs/nixos/tests/openssh.nix
new file mode 100644
index 000000000000..799497477993
--- /dev/null
+++ b/nixpkgs/nixos/tests/openssh.nix
@@ -0,0 +1,187 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let inherit (import ./ssh-keys.nix pkgs)
+      snakeOilPrivateKey snakeOilPublicKey;
+in {
+  name = "openssh";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ aszlig eelco ];
+  };
+
+  nodes = {
+
+    server =
+      { ... }:
+
+      {
+        services.openssh.enable = true;
+        security.pam.services.sshd.limits =
+          [ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
+        users.users.root.openssh.authorizedKeys.keys = [
+          snakeOilPublicKey
+        ];
+      };
+
+    server-lazy =
+      { ... }:
+
+      {
+        services.openssh = { enable = true; startWhenNeeded = true; };
+        security.pam.services.sshd.limits =
+          [ { domain = "*"; item = "memlock"; type = "-"; value = 1024; } ];
+        users.users.root.openssh.authorizedKeys.keys = [
+          snakeOilPublicKey
+        ];
+      };
+
+    server-localhost-only =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+        };
+      };
+
+    server-localhost-only-lazy =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; startWhenNeeded = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } ];
+        };
+      };
+
+    server-match-rule =
+      { ... }:
+
+      {
+        services.openssh = {
+          enable = true; listenAddresses = [ { addr = "127.0.0.1"; port = 22; } { addr = "[::]"; port = 22; } ];
+          extraConfig = ''
+            # Combined test for two (predictable) Match criterias
+            Match LocalAddress 127.0.0.1 LocalPort 22
+              PermitRootLogin yes
+
+            # Separate tests for Match criterias
+            Match User root
+              PermitRootLogin yes
+            Match Group root
+              PermitRootLogin yes
+            Match Host nohost.example
+              PermitRootLogin yes
+            Match LocalAddress 127.0.0.1
+              PermitRootLogin yes
+            Match LocalPort 22
+              PermitRootLogin yes
+            Match RDomain nohost.example
+              PermitRootLogin yes
+            Match Address 127.0.0.1
+              PermitRootLogin yes
+          '';
+        };
+      };
+
+    server_allowedusers =
+      { ... }:
+
+      {
+        services.openssh = { enable = true; settings.AllowUsers = [ "alice" "bob" ]; };
+        users.groups = { alice = { }; bob = { }; carol = { }; };
+        users.users = {
+          alice = { isNormalUser = true; group = "alice"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
+          bob = { isNormalUser = true; group = "bob"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
+          carol = { isNormalUser = true; group = "carol"; openssh.authorizedKeys.keys = [ snakeOilPublicKey ]; };
+        };
+      };
+
+    client =
+      { ... }: { };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("sshd", timeout=30)
+    server_localhost_only.wait_for_unit("sshd", timeout=30)
+    server_match_rule.wait_for_unit("sshd", timeout=30)
+
+    server_lazy.wait_for_unit("sshd.socket", timeout=30)
+    server_localhost_only_lazy.wait_for_unit("sshd.socket", timeout=30)
+
+    with subtest("manual-authkey"):
+        client.succeed("mkdir -m 700 /root/.ssh")
+        client.succeed(
+            '${pkgs.openssh}/bin/ssh-keygen -t ed25519 -f /root/.ssh/id_ed25519 -N ""'
+        )
+        public_key = client.succeed(
+            "${pkgs.openssh}/bin/ssh-keygen -y -f /root/.ssh/id_ed25519"
+        )
+        public_key = public_key.strip()
+        client.succeed("chmod 600 /root/.ssh/id_ed25519")
+
+        server.succeed("mkdir -m 700 /root/.ssh")
+        server.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
+        server_lazy.succeed("mkdir -m 700 /root/.ssh")
+        server_lazy.succeed("echo '{}' > /root/.ssh/authorized_keys".format(public_key))
+
+        client.wait_for_unit("network.target")
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'echo hello world' >&2",
+            timeout=30
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server 'ulimit -l' | grep 1024",
+            timeout=30
+        )
+
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'echo hello world' >&2",
+            timeout=30
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no server-lazy 'ulimit -l' | grep 1024",
+            timeout=30
+        )
+
+    with subtest("configured-authkey"):
+        client.succeed(
+            "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+        )
+        client.succeed("chmod 600 privkey.snakeoil")
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server true",
+            timeout=30
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil server-lazy true",
+            timeout=30
+        )
+
+    with subtest("localhost-only"):
+        server_localhost_only.succeed("ss -nlt | grep '127.0.0.1:22'")
+        server_localhost_only_lazy.succeed("ss -nlt | grep '127.0.0.1:22'")
+
+    with subtest("match-rules"):
+        server_match_rule.succeed("ss -nlt | grep '127.0.0.1:22'")
+
+    with subtest("allowed-users"):
+        client.succeed(
+            "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+        )
+        client.succeed("chmod 600 privkey.snakeoil")
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil alice@server_allowedusers true",
+            timeout=30
+        )
+        client.succeed(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil bob@server_allowedusers true",
+            timeout=30
+        )
+        client.fail(
+            "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil carol@server_allowedusers true",
+            timeout=30
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openstack-image.nix b/nixpkgs/nixos/tests/openstack-image.nix
new file mode 100644
index 000000000000..0b57dfb8e7eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/openstack-image.nix
@@ -0,0 +1,98 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+with import common/ec2.nix { inherit makeTest pkgs; };
+
+let
+  image = (import ../lib/eval-config.nix {
+    inherit system;
+    modules = [
+      ../maintainers/scripts/openstack/openstack-image.nix
+      ../modules/testing/test-instrumentation.nix
+      ../modules/profiles/qemu-guest.nix
+      {
+        # Needed by nixos-rebuild due to lack of network access.
+        system.extraDependencies = with pkgs; [
+          stdenv
+        ];
+      }
+    ];
+  }).config.system.build.openstackImage + "/nixos.qcow2";
+
+  sshKeys = import ./ssh-keys.nix pkgs;
+  snakeOilPrivateKey = sshKeys.snakeOilPrivateKey.text;
+  snakeOilPrivateKeyFile = pkgs.writeText "private-key" snakeOilPrivateKey;
+  snakeOilPublicKey = sshKeys.snakeOilPublicKey;
+
+in {
+  metadata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      SSH_HOST_ED25519_KEY_PUB:${snakeOilPublicKey}
+      SSH_HOST_ED25519_KEY:${replaceStrings ["\n"] ["|"] snakeOilPrivateKey}
+    '';
+    script = ''
+      machine.start()
+      machine.wait_for_file("/etc/ec2-metadata/user-data")
+      machine.wait_for_unit("sshd.service")
+
+      machine.succeed("grep unknown /etc/ec2-metadata/ami-manifest-path")
+
+      # We have no keys configured on the client side yet, so this should fail
+      machine.fail("ssh -o BatchMode=yes localhost exit")
+
+      # Let's install our client private key
+      machine.succeed("mkdir -p ~/.ssh")
+
+      machine.copy_from_host_via_shell(
+          "${snakeOilPrivateKeyFile}", "~/.ssh/id_ed25519"
+      )
+      machine.succeed("chmod 600 ~/.ssh/id_ed25519")
+
+      # We haven't configured the host key yet, so this should still fail
+      machine.fail("ssh -o BatchMode=yes localhost exit")
+
+      # Add the host key; ssh should finally succeed
+      machine.succeed(
+          "echo localhost,127.0.0.1 ${snakeOilPublicKey} > ~/.ssh/known_hosts"
+      )
+      machine.succeed("ssh -o BatchMode=yes localhost exit")
+
+      # Just to make sure resizing is idempotent.
+      machine.shutdown()
+      machine.start()
+      machine.wait_for_file("/etc/ec2-metadata/user-data")
+    '';
+  };
+
+  userdata = makeEc2Test {
+    name = "openstack-ec2-metadata";
+    inherit image;
+    sshPublicKey = snakeOilPublicKey;
+    userData = ''
+      { pkgs, ... }:
+      {
+        imports = [
+          <nixpkgs/nixos/modules/virtualisation/openstack-config.nix>
+          <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          <nixpkgs/nixos/modules/profiles/qemu-guest.nix>
+        ];
+        environment.etc.testFile = {
+          text = "whoa";
+        };
+      }
+    '';
+    script = ''
+      machine.start()
+      machine.wait_for_file("/etc/testFile")
+      assert "whoa" in machine.succeed("cat /etc/testFile")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/opentabletdriver.nix b/nixpkgs/nixos/tests/opentabletdriver.nix
new file mode 100644
index 000000000000..b7583f6dd264
--- /dev/null
+++ b/nixpkgs/nixos/tests/opentabletdriver.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ( { pkgs, ... }: let
+  testUser = "alice";
+in {
+  name = "opentabletdriver";
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ thiagokokada ];
+  };
+
+  nodes.machine = { pkgs, ... }:
+    {
+      imports = [
+        ./common/user-account.nix
+        ./common/x11.nix
+      ];
+      test-support.displayManager.auto.user = testUser;
+      hardware.opentabletdriver.enable = true;
+    };
+
+  testScript =
+    ''
+      machine.start()
+      machine.wait_for_x()
+      machine.wait_for_unit("opentabletdriver.service", "${testUser}")
+
+      machine.succeed("cat /etc/udev/rules.d/99-opentabletdriver.rules")
+      # Will fail if service is not running
+      # Needs to run as the same user that started the service
+      machine.succeed("su - ${testUser} -c 'otd detect'")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/opentelemetry-collector.nix b/nixpkgs/nixos/tests/opentelemetry-collector.nix
new file mode 100644
index 000000000000..9a56a22ca47e
--- /dev/null
+++ b/nixpkgs/nixos/tests/opentelemetry-collector.nix
@@ -0,0 +1,76 @@
+import ./make-test-python.nix ({ pkgs, ...} : let
+  port = 4318;
+in {
+  name = "opentelemetry-collector";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tylerjl ];
+  };
+
+  nodes.machine = { ... }: {
+    networking.firewall.allowedTCPPorts = [ port ];
+    services.opentelemetry-collector = {
+      enable = true;
+      settings = {
+        exporters.logging.verbosity = "detailed";
+        receivers.otlp.protocols.http = {};
+        service = {
+          pipelines.logs = {
+            receivers = [ "otlp" ];
+            exporters = [ "logging" ];
+          };
+        };
+      };
+    };
+    virtualisation.forwardPorts = [{
+      host.port = port;
+      guest.port = port;
+    }];
+  };
+
+  extraPythonPackages = p: [
+    p.requests
+    p.types-requests
+  ];
+
+  # Send a log event through the OTLP pipeline and check for its
+  # presence in the collector logs.
+  testScript = /* python */ ''
+    import requests
+    import time
+
+    from uuid import uuid4
+
+    flag = str(uuid4())
+
+    machine.wait_for_unit("opentelemetry-collector.service")
+    machine.wait_for_open_port(${toString port})
+
+    event = {
+        "resourceLogs": [
+            {
+                "resource": {"attributes": []},
+                "scopeLogs": [
+                    {
+                        "logRecords": [
+                            {
+                                "timeUnixNano": str(time.time_ns()),
+                                "severityNumber": 9,
+                                "severityText": "Info",
+                                "name": "logTest",
+                                "body": {
+                                    "stringValue": flag
+                                },
+                                "attributes": []
+                            },
+                        ]
+                    }
+                ]
+            }
+        ]
+    }
+
+    response = requests.post("http://localhost:${toString port}/v1/logs", json=event)
+    assert response.status_code == 200
+    assert flag in machine.execute("journalctl -u opentelemetry-collector")[-1]
+  '';
+})
diff --git a/nixpkgs/nixos/tests/openvscode-server.nix b/nixpkgs/nixos/tests/openvscode-server.nix
new file mode 100644
index 000000000000..cbff8e09c593
--- /dev/null
+++ b/nixpkgs/nixos/tests/openvscode-server.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+{
+  name = "openvscode-server";
+
+  nodes = {
+    machine = {pkgs, ...}: {
+      services.openvscode-server = {
+        enable = true;
+        withoutConnectionToken = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("openvscode-server.service")
+    machine.wait_for_open_port(3000)
+    machine.succeed("curl -k --fail http://localhost:3000", timeout=10)
+  '';
+
+  meta.maintainers = [ lib.maintainers.drupol ];
+})
diff --git a/nixpkgs/nixos/tests/orangefs.nix b/nixpkgs/nixos/tests/orangefs.nix
new file mode 100644
index 000000000000..4e67a7fb8efe
--- /dev/null
+++ b/nixpkgs/nixos/tests/orangefs.nix
@@ -0,0 +1,82 @@
+import ./make-test-python.nix ({ ... } :
+
+let
+  server = { pkgs, ... } : {
+    networking.firewall.allowedTCPPorts = [ 3334 ];
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.e2fsprogs}/bin/mkfs.ext4 -L data /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 4096 ];
+
+    virtualisation.fileSystems =
+      { "/data" =
+          { device = "/dev/disk/by-label/data";
+            fsType = "ext4";
+          };
+      };
+
+    services.orangefs.server = {
+      enable = true;
+      dataStorageSpace = "/data/storage";
+      metadataStorageSpace = "/data/meta";
+      servers = {
+        server1 = "tcp://server1:3334";
+        server2 = "tcp://server2:3334";
+      };
+    };
+  };
+
+  client = { lib, ... } : {
+    networking.firewall.enable = true;
+
+    services.orangefs.client = {
+      enable = true;
+      fileSystems = [{
+        target = "tcp://server1:3334/orangefs";
+        mountPoint = "/orangefs";
+      }];
+    };
+  };
+
+in {
+  name = "orangefs";
+
+  nodes = {
+    server1 = server;
+    server2 = server;
+
+    client1 = client;
+    client2 = client;
+  };
+
+  testScript = ''
+    # format storage
+    for server in server1, server2:
+        server.start()
+        server.wait_for_unit("multi-user.target")
+        server.succeed("mkdir -p /data/storage /data/meta")
+        server.succeed("chown orangefs:orangefs /data/storage /data/meta")
+        server.succeed("chmod 0770 /data/storage /data/meta")
+        server.succeed(
+            "sudo -g orangefs -u orangefs pvfs2-server -f /etc/orangefs/server.conf"
+        )
+
+    # start services after storage is formatted on all machines
+    for server in server1, server2:
+        server.succeed("systemctl start orangefs-server.service")
+
+    with subtest("clients can reach and mount the FS"):
+        for client in client1, client2:
+            client.start()
+            client.wait_for_unit("orangefs-client.service")
+            # Both servers need to be reachable
+            client.succeed("pvfs2-check-server -h server1 -f orangefs -n tcp -p 3334")
+            client.succeed("pvfs2-check-server -h server2 -f orangefs -n tcp -p 3334")
+            client.wait_for_unit("orangefs.mount")
+
+    with subtest("R/W test between clients"):
+        client1.succeed("echo test > /orangefs/file1")
+        client2.succeed("grep test /orangefs/file1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/os-prober.nix b/nixpkgs/nixos/tests/os-prober.nix
new file mode 100644
index 000000000000..dae1306bd69d
--- /dev/null
+++ b/nixpkgs/nixos/tests/os-prober.nix
@@ -0,0 +1,133 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+let
+  # A filesystem image with a (presumably) bootable debian
+  debianImage = pkgs.vmTools.diskImageFuns.debian11i386 {
+    # os-prober cannot detect systems installed on disks without a partition table
+    # so we create the disk ourselves
+    createRootFS = with pkgs; ''
+      ${parted}/bin/parted --script /dev/vda mklabel msdos
+      ${parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
+      mkdir /mnt
+      ${e2fsprogs}/bin/mkfs.ext4 -O '^metadata_csum_seed' /dev/vda1
+      ${util-linux}/bin/mount -t ext4 /dev/vda1 /mnt
+
+      if test -e /mnt/.debug; then
+        exec ${bash}/bin/sh
+      fi
+      touch /mnt/.debug
+
+      mkdir /mnt/proc /mnt/dev /mnt/sys
+    '';
+    extraPackages = [
+      # /etc/os-release
+      "base-files"
+      # make the disk bootable-looking
+      "grub2" "linux-image-686"
+    ];
+    # install grub
+    postInstall = ''
+      ln -sf /proc/self/mounts > /etc/mtab
+      PATH=/usr/bin:/bin:/usr/sbin:/sbin $chroot /mnt \
+        grub-install /dev/vda --force
+      PATH=/usr/bin:/bin:/usr/sbin:/sbin $chroot /mnt \
+        update-grub
+    '';
+  };
+
+  # a part of the configuration of the test vm
+  simpleConfig = {
+    boot.loader.grub = {
+      enable = true;
+      useOSProber = true;
+      device = "/dev/vda";
+      # vda is a filesystem without partition table
+      forceInstall = true;
+    };
+    nix.settings = {
+      substituters = lib.mkForce [];
+      hashed-mirrors = null;
+      connect-timeout = 1;
+    };
+    # save some memory
+    documentation.enable = false;
+  };
+  # /etc/nixos/configuration.nix for the vm
+  configFile = pkgs.writeText "configuration.nix"  ''
+    {config, pkgs, lib, ...}: ({
+    imports =
+          [ ./hardware-configuration.nix
+            <nixpkgs/nixos/modules/testing/test-instrumentation.nix>
+          ];
+    } // lib.importJSON ${
+      pkgs.writeText "simpleConfig.json" (builtins.toJSON simpleConfig)
+    })
+  '';
+in {
+  name = "os-prober";
+
+  nodes.machine = { config, pkgs, ... }: (simpleConfig // {
+      imports = [ ../modules/profiles/installation-device.nix
+                  ../modules/profiles/base.nix ];
+      virtualisation.memorySize = 1300;
+      # To add the secondary disk:
+      virtualisation.qemu.options = [ "-drive index=2,file=${debianImage}/disk-image.qcow2,read-only,if=virtio" ];
+
+      # The test cannot access the network, so any packages
+      # nixos-rebuild needs must be included in the VM.
+      system.extraDependencies = with pkgs;
+        [
+          bintools
+          brotli
+          brotli.dev
+          brotli.lib
+          desktop-file-utils
+          docbook5
+          docbook_xsl_ns
+          grub2
+          kbd
+          kbd.dev
+          kmod.dev
+          libarchive
+          libarchive.dev
+          libxml2.bin
+          libxslt.bin
+          nixos-artwork.wallpapers.simple-dark-gray-bottom
+          ntp
+          perlPackages.ListCompare
+          perlPackages.XMLLibXML
+          python3Minimal
+          shared-mime-info
+          stdenv
+          sudo
+          texinfo
+          unionfs-fuse
+          xorg.lndir
+
+          # add curl so that rather than seeing the test attempt to download
+          # curl's tarball, we see what it's trying to download
+          curl
+        ];
+  });
+
+  testScript = ''
+    machine.start()
+    machine.succeed("udevadm settle")
+    machine.wait_for_unit("multi-user.target")
+    print(machine.succeed("lsblk"))
+
+    # check that os-prober works standalone
+    machine.succeed(
+        "${pkgs.os-prober}/bin/os-prober | grep /dev/vdb1"
+    )
+
+    # rebuild and test that debian is available in the grub menu
+    machine.succeed("nixos-generate-config")
+    machine.copy_from_host(
+        "${configFile}",
+        "/etc/nixos/configuration.nix",
+    )
+    machine.succeed("nixos-rebuild boot --show-trace >&2")
+
+    machine.succeed("egrep 'menuentry.*debian' /boot/grub/grub.cfg")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/osquery.nix b/nixpkgs/nixos/tests/osquery.nix
new file mode 100644
index 000000000000..e98e7c1baf04
--- /dev/null
+++ b/nixpkgs/nixos/tests/osquery.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  config_refresh = "10";
+  nullvalue = "NULL";
+  utc = false;
+in
+{
+  name = "osquery";
+  meta.maintainers = with lib.maintainers; [ znewman01 lewo ];
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.osquery = {
+      enable = true;
+
+      settings.options = { inherit nullvalue utc; };
+      flags = {
+        inherit config_refresh;
+        nullvalue = "IGNORED";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+    let
+      cfg = nodes.machine.services.osquery;
+    in
+    ''
+      machine.start()
+      machine.wait_for_unit("osqueryd.service")
+
+      # Stop the osqueryd service so that we can use osqueryi to check information stored in the database.
+      machine.wait_until_succeeds("systemctl stop osqueryd.service")
+
+      # osqueryd was able to query information about the host.
+      machine.succeed("echo 'SELECT address FROM etc_hosts LIMIT 1;' | osqueryi | tee /dev/console | grep -q '127.0.0.1'")
+
+      # osquery binaries respect configuration from the Nix config option.
+      machine.succeed("echo 'SELECT value FROM osquery_flags WHERE name = \"utc\";' | osqueryi | tee /dev/console | grep -q ${lib.boolToString utc}")
+
+      # osquery binaries respect configuration from the Nix flags option.
+      machine.succeed("echo 'SELECT value FROM osquery_flags WHERE name = \"config_refresh\";' | osqueryi | tee /dev/console | grep -q ${config_refresh}")
+
+      # Demonstrate that osquery binaries prefer configuration plugin options over CLI flags.
+      # https://osquery.readthedocs.io/en/latest/deployment/configuration/#options.
+      machine.succeed("echo 'SELECT value FROM osquery_flags WHERE name = \"nullvalue\";' | osqueryi | tee /dev/console | grep -q ${nullvalue}")
+
+      # Module creates directories for default database_path and pidfile flag values.
+      machine.succeed("test -d $(dirname ${cfg.flags.database_path})")
+      machine.succeed("test -d $(dirname ${cfg.flags.pidfile})")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/osrm-backend.nix b/nixpkgs/nixos/tests/osrm-backend.nix
new file mode 100644
index 000000000000..b0e65a2ae1c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/osrm-backend.nix
@@ -0,0 +1,57 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  port = 5000;
+in {
+  name = "osrm-backend";
+  meta.maintainers = [ lib.maintainers.erictapen ];
+
+  nodes.machine = { config, pkgs, ... }:{
+
+    services.osrm = {
+      enable = true;
+      inherit port;
+      dataFile = let
+        filename = "monaco";
+        osrm-data = pkgs.stdenv.mkDerivation {
+          name = "osrm-data";
+
+          buildInputs = [ pkgs.osrm-backend ];
+
+          # This is a pbf file of monaco, downloaded at 2019-01-04 from
+          # http://download.geofabrik.de/europe/monaco-latest.osm.pbf
+          # as apparently no provider of OSM files guarantees immutability,
+          # this is hosted as a gist on GitHub.
+          src = pkgs.fetchgit {
+            url = "https://gist.github.com/erictapen/01e39f73a6c856eac53ba809a94cdb83";
+            rev = "9b1ff0f24deb40e5cf7df51f843dbe860637b8ce";
+            sha256 = "1scqhmrfnpwsy5i2a9jpggqnvfgj4hv9p4qyvc79321pzkbv59nx";
+          };
+
+          buildCommand = ''
+            cp $src/${filename}.osm.pbf .
+            ${pkgs.osrm-backend}/bin/osrm-extract -p ${pkgs.osrm-backend}/share/osrm/profiles/car.lua ${filename}.osm.pbf
+            ${pkgs.osrm-backend}/bin/osrm-partition ${filename}.osrm
+            ${pkgs.osrm-backend}/bin/osrm-customize ${filename}.osrm
+            mkdir -p $out
+            cp ${filename}* $out/
+          '';
+        };
+      in "${osrm-data}/${filename}.osrm";
+    };
+
+    environment.systemPackages = [ pkgs.jq ];
+  };
+
+  testScript = let
+    query = "http://localhost:${toString port}/route/v1/driving/7.41720,43.73304;7.42463,43.73886?steps=true";
+  in ''
+    machine.wait_for_unit("osrm.service")
+    machine.wait_for_open_port(${toString port})
+    assert "Boulevard Rainier III" in machine.succeed(
+        "curl --fail --silent '${query}' | jq .waypoints[0].name"
+    )
+    assert "Avenue de la Costa" in machine.succeed(
+        "curl --fail --silent '${query}' | jq .waypoints[1].name"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/outline.nix b/nixpkgs/nixos/tests/outline.nix
new file mode 100644
index 000000000000..e45be37f5d3b
--- /dev/null
+++ b/nixpkgs/nixos/tests/outline.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  accessKey = "BKIKJAA5BMMU2RHO6IBB";
+  secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+  secretKeyFile = pkgs.writeText "outline-secret-key" ''
+    ${secretKey}
+  '';
+  rootCredentialsFile = pkgs.writeText "minio-credentials-full" ''
+    MINIO_ROOT_USER=${accessKey}
+    MINIO_ROOT_PASSWORD=${secretKey}
+  '';
+in
+{
+  name = "outline";
+
+  meta.maintainers = with lib.maintainers; [ xanderio ];
+
+  nodes = {
+    outline = { pkgs, config, ... }: {
+      nixpkgs.config.allowUnfree = true;
+      environment.systemPackages = [ pkgs.minio-client ];
+      services.outline = {
+        enable = true;
+        forceHttps = false;
+        storage = {
+          inherit accessKey secretKeyFile;
+          uploadBucketUrl = "http://localhost:9000";
+          uploadBucketName = "outline";
+          region = config.services.minio.region;
+        };
+      };
+      services.minio = {
+        enable = true;
+        inherit rootCredentialsFile;
+      };
+    };
+  };
+
+  testScript =
+    ''
+      machine.wait_for_unit("minio.service")
+      machine.wait_for_open_port(9000)
+
+      # Create a test bucket on the server
+      machine.succeed(
+          "mc config host add minio http://localhost:9000 ${accessKey} ${secretKey} --api s3v4"
+      )
+      machine.succeed("mc mb minio/outline")
+
+      outline.wait_for_unit("outline.service")
+      outline.wait_for_open_port(3000)
+      outline.succeed("curl --fail http://localhost:3000/")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/overlayfs.nix b/nixpkgs/nixos/tests/overlayfs.nix
new file mode 100644
index 000000000000..6dab6760c5b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/overlayfs.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "overlayfs";
+  meta.maintainers = with pkgs.lib.maintainers; [ bachp ];
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 512 ];
+    networking.hostId = "deadbeef";
+    environment.systemPackages = with pkgs; [ parted ];
+  };
+
+  testScript = ''
+    machine.succeed("ls /dev")
+
+    machine.succeed("mkdir -p /tmp/mnt")
+
+    # Test ext4 + overlayfs
+    machine.succeed(
+      'mkfs.ext4 -F -L overlay-ext4 /dev/vdb',
+      'mount -t ext4 /dev/vdb /tmp/mnt',
+      'mkdir -p /tmp/mnt/upper /tmp/mnt/lower /tmp/mnt/work /tmp/mnt/merged',
+      # Setup some existing files
+      'echo Replace > /tmp/mnt/lower/replace.txt',
+      'echo Append > /tmp/mnt/lower/append.txt',
+      'echo Overwrite > /tmp/mnt/lower/overwrite.txt',
+      'mount -t overlay overlay -o lowerdir=/tmp/mnt/lower,upperdir=/tmp/mnt/upper,workdir=/tmp/mnt/work /tmp/mnt/merged',
+      # Test new
+      'echo New > /tmp/mnt/merged/new.txt',
+      '[[ "$(cat /tmp/mnt/merged/new.txt)" == New ]]',
+      # Test replace
+      '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replace ]]',
+      'echo Replaced > /tmp/mnt/merged/replace-tmp.txt',
+      'mv /tmp/mnt/merged/replace-tmp.txt /tmp/mnt/merged/replace.txt',
+      '[[ "$(cat /tmp/mnt/merged/replace.txt)" == Replaced ]]',
+      # Overwrite
+      '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwrite ]]',
+      'echo Overwritten > /tmp/mnt/merged/overwrite.txt',
+      '[[ "$(cat /tmp/mnt/merged/overwrite.txt)" == Overwritten ]]',
+      # Test append
+      '[[ "$(cat /tmp/mnt/merged/append.txt)" == Append ]]',
+      'echo ed >> /tmp/mnt/merged/append.txt',
+      '[[ "$(cat /tmp/mnt/merged/append.txt)" == "Append\ned" ]]',
+      'umount /tmp/mnt/merged',
+      'umount /tmp/mnt',
+      'udevadm settle',
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/owncast.nix b/nixpkgs/nixos/tests/owncast.nix
new file mode 100644
index 000000000000..debb34f5009d
--- /dev/null
+++ b/nixpkgs/nixos/tests/owncast.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "owncast";
+  meta = with pkgs.lib.maintainers; { maintainers = [ MayNiklas ]; };
+
+  nodes = {
+    client = { pkgs, ... }: with pkgs.lib; {
+      networking = {
+        dhcpcd.enable = false;
+        interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::2"; prefixLength = 64; } ];
+        interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.2"; prefixLength = 24; } ];
+      };
+    };
+    server = { pkgs, ... }: with pkgs.lib; {
+      networking = {
+        dhcpcd.enable = false;
+        useNetworkd = true;
+        useDHCP = false;
+        interfaces.eth1.ipv6.addresses = mkOverride 0 [ { address = "fd00::1"; prefixLength = 64; } ];
+        interfaces.eth1.ipv4.addresses = mkOverride 0 [ { address = "192.168.1.1"; prefixLength = 24; } ];
+
+        firewall.allowedTCPPorts = [ 8080 ];
+      };
+
+      services.owncast = {
+        enable = true;
+        listen = "0.0.0.0";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    client.wait_for_unit("network-online.target")
+    server.wait_for_unit("network-online.target")
+    server.wait_for_unit("owncast.service")
+    server.wait_until_succeeds("ss -ntl | grep -q 8080")
+
+    client.succeed("curl http://192.168.1.1:8080/api/status")
+    client.succeed("curl http://[fd00::1]:8080/api/status")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pacemaker.nix b/nixpkgs/nixos/tests/pacemaker.nix
new file mode 100644
index 000000000000..684557614953
--- /dev/null
+++ b/nixpkgs/nixos/tests/pacemaker.nix
@@ -0,0 +1,110 @@
+import ./make-test-python.nix  ({ pkgs, lib, ... }: rec {
+  name = "pacemaker";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ astro ];
+  };
+
+  nodes =
+    let
+      node = i: {
+        networking.interfaces.eth1.ipv4.addresses = [ {
+          address = "192.168.0.${toString i}";
+          prefixLength = 24;
+        } ];
+
+        services.corosync = {
+          enable = true;
+          clusterName = "zentralwerk-network";
+          nodelist = lib.imap (i: name: {
+            nodeid = i;
+            inherit name;
+            ring_addrs = [
+              (builtins.head nodes.${name}.networking.interfaces.eth1.ipv4.addresses).address
+            ];
+          }) (builtins.attrNames nodes);
+        };
+        environment.etc."corosync/authkey" = {
+          source = builtins.toFile "authkey"
+            # minimum length: 128 bytes
+            "testtesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttesttest";
+          mode = "0400";
+        };
+
+        services.pacemaker.enable = true;
+
+        # used for pacemaker resource
+        systemd.services.ha-cat = {
+          description = "Highly available netcat";
+          serviceConfig.ExecStart = "${pkgs.netcat}/bin/nc -l discard";
+        };
+      };
+    in {
+      node1 = node 1;
+      node2 = node 2;
+      node3 = node 3;
+    };
+
+  # sets up pacemaker with resources configuration, then crashes a
+  # node and waits for service restart on another node
+  testScript =
+    let
+      resources = builtins.toFile "cib-resources.xml" ''
+        <resources>
+          <primitive id="cat" class="systemd" type="ha-cat">
+            <operations>
+              <op id="stop-cat" name="start" interval="0" timeout="1s"/>
+              <op id="start-cat" name="start" interval="0" timeout="1s"/>
+              <op id="monitor-cat" name="monitor" interval="1s" timeout="1s"/>
+            </operations>
+          </primitive>
+        </resources>
+      '';
+    in ''
+      import re
+      import time
+
+      start_all()
+
+      ${lib.concatMapStrings (node: ''
+        ${node}.wait_until_succeeds("corosync-quorumtool")
+        ${node}.wait_for_unit("pacemaker.service")
+      '') (builtins.attrNames nodes)}
+
+      # No STONITH device
+      node1.succeed("crm_attribute -t crm_config -n stonith-enabled -v false")
+      # Configure the cat resource
+      node1.succeed("cibadmin --replace --scope resources --xml-file ${resources}")
+
+      # wait until the service is started
+      while True:
+        output = node1.succeed("crm_resource -r cat --locate")
+        match = re.search("is running on: (.+)", output)
+        if match:
+          for machine in machines:
+            if machine.name == match.group(1):
+              current_node = machine
+          break
+        time.sleep(1)
+
+      current_node.log("Service running here!")
+      current_node.crash()
+
+      # pick another node that's still up
+      for machine in machines:
+        if machine.booted:
+          check_node = machine
+      # find where the service has been started next
+      while True:
+        output = check_node.succeed("crm_resource -r cat --locate")
+        match = re.search("is running on: (.+)", output)
+        # output will remain the old current_node until the crash is detected by pacemaker
+        if match and match.group(1) != current_node.name:
+          for machine in machines:
+            if machine.name == match.group(1):
+              next_node = machine
+          break
+        time.sleep(1)
+
+      next_node.log("Service migrated here!")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/packagekit.nix b/nixpkgs/nixos/tests/packagekit.nix
new file mode 100644
index 000000000000..5769c6c9a8d4
--- /dev/null
+++ b/nixpkgs/nixos/tests/packagekit.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "packagekit";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ peterhoeg ];
+  };
+
+  nodes.machine = { ... }: {
+    environment.systemPackages = with pkgs; [ dbus ];
+    services.packagekit = {
+      enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    # send a dbus message to activate the service
+    machine.succeed(
+        "dbus-send --system --type=method_call --print-reply --dest=org.freedesktop.PackageKit /org/freedesktop/PackageKit org.freedesktop.DBus.Introspectable.Introspect"
+    )
+
+    # so now it should be running
+    machine.wait_for_unit("packagekit.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pam/pam-file-contents.nix b/nixpkgs/nixos/tests/pam/pam-file-contents.nix
new file mode 100644
index 000000000000..2bafd90618e9
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/pam-file-contents.nix
@@ -0,0 +1,26 @@
+let
+  name = "pam";
+in
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "pam-file-contents";
+
+  nodes.machine = { ... }: {
+    imports = [ ../../modules/profiles/minimal.nix ];
+
+    krb5.enable = true;
+
+    users = {
+      mutableUsers = false;
+      users = {
+        user = {
+          isNormalUser = true;
+        };
+      };
+    };
+  };
+
+  testScript = builtins.replaceStrings
+    [ "@@pam_ccreds@@" "@@pam_krb5@@" ]
+    [ pkgs.pam_ccreds.outPath pkgs.pam_krb5.outPath ]
+    (builtins.readFile ./test_chfn.py);
+})
diff --git a/nixpkgs/nixos/tests/pam/pam-oath-login.nix b/nixpkgs/nixos/tests/pam/pam-oath-login.nix
new file mode 100644
index 000000000000..dd6ef4a0abcb
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/pam-oath-login.nix
@@ -0,0 +1,108 @@
+import ../make-test-python.nix ({ ... }:
+
+let
+  oathSnakeoilSecret = "cdd4083ef8ff1fa9178c6d46bfb1a3";
+
+  # With HOTP mode the password is calculated based on a counter of
+  # how many passwords have been made. In this env, we'll always be on
+  # the 0th counter, so the password is static.
+  #
+  # Generated in nix-shell -p oath-toolkit
+  # via: oathtool -v -d6 -w10 cdd4083ef8ff1fa9178c6d46bfb1a3
+  # and picking a the first 4:
+  oathSnakeOilPassword1 = "143349";
+  oathSnakeOilPassword2 = "801753";
+
+  alicePassword = "foobar";
+  # Generated via: mkpasswd -m sha-512 and passing in "foobar"
+  hashedAlicePassword = "$6$MsMrE1q.1HrCgTS$Vq2e/uILzYjSN836TobAyN9xh9oi7EmCmucnZID25qgPoibkw8qTCugiAPnn4eCGvn1A.7oEBFJaaGUaJsQQY.";
+
+in
+{
+  name = "pam-oath-login";
+
+  nodes.machine =
+    { ... }:
+    {
+      security.pam.oath = {
+        enable = true;
+      };
+
+      users.users.alice = {
+        isNormalUser = true;
+        name = "alice";
+        uid = 1000;
+        hashedPassword = hashedAlicePassword;
+        extraGroups = [ "wheel" ];
+        createHome = true;
+        home = "/home/alice";
+      };
+
+
+      systemd.services.setupOathSnakeoilFile = {
+        wantedBy = [ "default.target" ];
+        before = [ "default.target" ];
+        unitConfig = {
+          type = "oneshot";
+          RemainAfterExit = true;
+        };
+        script = ''
+          touch /etc/users.oath
+          chmod 600 /etc/users.oath
+          chown root /etc/users.oath
+          echo "HOTP/E/6 alice - ${oathSnakeoilSecret}" > /etc/users.oath
+        '';
+      };
+    };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+        machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+        machine.send_key(f"alt-f{tty_number}")
+        machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+        machine.wait_for_unit(f"getty@tty{tty_number}.service")
+        machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    def enter_user_alice(tty_number):
+        machine.wait_until_tty_matches(tty_number, "login: ")
+        machine.send_chars("alice\n")
+        machine.wait_until_tty_matches(tty_number, "login: alice")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches(tty_number, "One-time password")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    machine.screenshot("postboot")
+
+    with subtest("Invalid password"):
+        switch_to_tty("2")
+        enter_user_alice("2")
+
+        machine.send_chars("${oathSnakeOilPassword1}\n")
+        machine.wait_until_tty_matches("2", "Password: ")
+        machine.send_chars("blorg\n")
+        machine.wait_until_tty_matches("2", "Login incorrect")
+
+    with subtest("Invalid oath token"):
+        switch_to_tty("3")
+        enter_user_alice("3")
+
+        machine.send_chars("000000\n")
+        machine.wait_until_tty_matches("3", "Login incorrect")
+        machine.wait_until_tty_matches("3", "login:")
+
+    with subtest("Happy path: Both passwords are mandatory to get us in"):
+        switch_to_tty("4")
+        enter_user_alice("4")
+
+        machine.send_chars("${oathSnakeOilPassword2}\n")
+        machine.wait_until_tty_matches("4", "Password: ")
+        machine.send_chars("${alicePassword}\n")
+
+        machine.wait_until_succeeds("pgrep -u alice bash")
+        machine.send_chars("touch  done4\n")
+        machine.wait_for_file("/home/alice/done4")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pam/pam-u2f.nix b/nixpkgs/nixos/tests/pam/pam-u2f.nix
new file mode 100644
index 000000000000..46e307a3f125
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/pam-u2f.nix
@@ -0,0 +1,26 @@
+import ../make-test-python.nix ({ ... }:
+
+{
+  name = "pam-u2f";
+
+  nodes.machine =
+    { ... }:
+    {
+      security.pam.u2f = {
+        control = "required";
+        cue = true;
+        debug = true;
+        enable = true;
+        interactive = true;
+        origin = "nixos-test";
+      };
+    };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed(
+          'egrep "auth required .*/lib/security/pam_u2f.so.*cue.*debug.*interactive.*origin=nixos-test" /etc/pam.d/ -R'
+      )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pam/pam-ussh.nix b/nixpkgs/nixos/tests/pam/pam-ussh.nix
new file mode 100644
index 000000000000..ba0570dbf97d
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/pam-ussh.nix
@@ -0,0 +1,70 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }:
+
+let
+  testOnlySSHCredentials = pkgs.runCommand "pam-ussh-test-ca" {
+    nativeBuildInputs = [ pkgs.openssh ];
+  } ''
+    mkdir $out
+    ssh-keygen -t ed25519 -N "" -f $out/ca
+
+    ssh-keygen -t ed25519 -N "" -f $out/alice
+    ssh-keygen -s $out/ca -I "alice user key" -n "alice,root" -V 19700101:forever $out/alice.pub
+
+    ssh-keygen -t ed25519 -N "" -f $out/bob
+    ssh-keygen -s $out/ca -I "bob user key" -n "bob" -V 19700101:forever $out/bob.pub
+  '';
+  makeTestScript = user: pkgs.writeShellScript "pam-ussh-${user}-test-script" ''
+    set -euo pipefail
+
+    eval $(${pkgs.openssh}/bin/ssh-agent)
+
+    mkdir -p $HOME/.ssh
+    chmod 700 $HOME/.ssh
+    cp ${testOnlySSHCredentials}/${user}{,.pub,-cert.pub} $HOME/.ssh
+    chmod 600 $HOME/.ssh/${user}
+    chmod 644 $HOME/.ssh/${user}{,-cert}.pub
+
+    set -x
+
+    ${pkgs.openssh}/bin/ssh-add $HOME/.ssh/${user}
+    ${pkgs.openssh}/bin/ssh-add -l &>2
+
+    exec sudo id -u -n
+  '';
+in {
+  name = "pam-ussh";
+  meta.maintainers = with lib.maintainers; [ lukegb ];
+
+  machine =
+    { ... }:
+    {
+      users.users.alice = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+      users.users.bob = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+
+      security.pam.ussh = {
+        enable = true;
+        authorizedPrincipals = "root";
+        caFile = "${testOnlySSHCredentials}/ca.pub";
+      };
+
+      security.sudo = {
+        enable = true;
+        extraConfig = ''
+          Defaults lecture="never"
+        '';
+      };
+    };
+
+  testScript =
+    ''
+      with subtest("alice should be allowed to escalate to root"):
+        machine.succeed(
+            'su -c "${makeTestScript "alice"}" -l alice | grep root'
+        )
+
+      with subtest("bob should not be allowed to escalate to root"):
+        machine.fail(
+            'su -c "${makeTestScript "bob"}" -l bob | grep root'
+        )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pam/test_chfn.py b/nixpkgs/nixos/tests/pam/test_chfn.py
new file mode 100644
index 000000000000..3cfbb3908e9d
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/test_chfn.py
@@ -0,0 +1,28 @@
+expected_lines = {
+    "account required pam_unix.so",
+    "account sufficient @@pam_krb5@@/lib/security/pam_krb5.so",
+    "auth [default=die success=done] @@pam_ccreds@@/lib/security/pam_ccreds.so action=validate use_first_pass",
+    "auth [default=ignore success=1 service_err=reset] @@pam_krb5@@/lib/security/pam_krb5.so use_first_pass",
+    "auth required pam_deny.so",
+    "auth sufficient @@pam_ccreds@@/lib/security/pam_ccreds.so action=store use_first_pass",
+    "auth sufficient pam_rootok.so",
+    "auth sufficient pam_unix.so likeauth try_first_pass",
+    "password sufficient @@pam_krb5@@/lib/security/pam_krb5.so use_first_pass",
+    "password sufficient pam_unix.so nullok yescrypt",
+    "session optional @@pam_krb5@@/lib/security/pam_krb5.so",
+    "session required pam_env.so conffile=/etc/pam/environment readenv=0",
+    "session required pam_unix.so",
+}
+actual_lines = set(machine.succeed("cat /etc/pam.d/chfn").splitlines())
+
+stripped_lines = set([line.split("#")[0].rstrip() for line in actual_lines])
+missing_lines = expected_lines - stripped_lines
+extra_lines = stripped_lines - expected_lines
+non_functional_lines = set([line for line in extra_lines if line == ""])
+unexpected_functional_lines = extra_lines - non_functional_lines
+
+with subtest("All expected lines are in the file"):
+    assert not missing_lines, f"Missing lines: {missing_lines}"
+
+with subtest("All remaining lines are empty or comments"):
+    assert not unexpected_functional_lines, f"Unexpected lines: {unexpected_functional_lines}"
diff --git a/nixpkgs/nixos/tests/pam/zfs-key.nix b/nixpkgs/nixos/tests/pam/zfs-key.nix
new file mode 100644
index 000000000000..4f54c287e91a
--- /dev/null
+++ b/nixpkgs/nixos/tests/pam/zfs-key.nix
@@ -0,0 +1,83 @@
+import ../make-test-python.nix ({ ... }:
+
+  let
+    userPassword = "password";
+    mismatchPass = "mismatch";
+  in
+  {
+    name = "pam-zfs-key";
+
+    nodes.machine =
+      { ... }: {
+        boot.supportedFilesystems = [ "zfs" ];
+
+        networking.hostId = "12345678";
+
+        security.pam.zfs.enable = true;
+
+        users.users = {
+          alice = {
+            isNormalUser = true;
+            password = userPassword;
+          };
+          bob = {
+            isNormalUser = true;
+            password = userPassword;
+          };
+        };
+      };
+
+    testScript = { nodes, ... }:
+      let
+        homes = nodes.machine.security.pam.zfs.homes;
+        pool = builtins.head (builtins.split "/" homes);
+      in
+      ''
+        machine.wait_for_unit("multi-user.target")
+        machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+        with subtest("Create encrypted ZFS datasets"):
+          machine.succeed("truncate -s 64M /testpool.img")
+          machine.succeed("zpool create -O canmount=off '${pool}' /testpool.img")
+          machine.succeed("zfs create -o canmount=off -p '${homes}'")
+          machine.succeed("echo ${userPassword} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/alice'")
+          machine.succeed("zfs unload-key '${homes}/alice'")
+          machine.succeed("echo ${mismatchPass} | zfs create -o canmount=noauto -o encryption=on -o keyformat=passphrase '${homes}/bob'")
+          machine.succeed("zfs unload-key '${homes}/bob'")
+
+        with subtest("Switch to tty2"):
+          machine.fail("pgrep -f 'agetty.*tty2'")
+          machine.send_key("alt-f2")
+          machine.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+          machine.wait_for_unit("getty@tty2.service")
+          machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+
+        with subtest("Log in as user with home locked by login password"):
+          machine.wait_until_tty_matches("2", "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches("2", "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("2", "Password: ")
+          machine.send_chars("${userPassword}\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+          machine.succeed("mount | grep ${homes}/alice")
+
+        with subtest("Switch to tty3"):
+          machine.fail("pgrep -f 'agetty.*tty3'")
+          machine.send_key("alt-f3")
+          machine.wait_until_succeeds("[ $(fgconsole) = 3 ]")
+          machine.wait_for_unit("getty@tty3.service")
+          machine.wait_until_succeeds("pgrep -f 'agetty.*tty3'")
+
+        with subtest("Log in as user with home locked by password different from login"):
+          machine.wait_until_tty_matches("3", "login: ")
+          machine.send_chars("bob\n")
+          machine.wait_until_tty_matches("3", "login: bob")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("3", "Password: ")
+          machine.send_chars("${userPassword}\n")
+          machine.wait_until_succeeds("pgrep -u bob bash")
+          machine.fail("mount | grep ${homes}/bob")
+      '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/pantheon.nix b/nixpkgs/nixos/tests/pantheon.nix
new file mode 100644
index 000000000000..be1351283d99
--- /dev/null
+++ b/nixpkgs/nixos/tests/pantheon.nix
@@ -0,0 +1,85 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+{
+  name = "pantheon";
+
+  meta.maintainers = lib.teams.pantheon.members;
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+
+    services.xserver.enable = true;
+    services.xserver.desktopManager.pantheon.enable = true;
+
+    environment.systemPackages = [ pkgs.xdotool ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    bob = nodes.machine.users.users.bob;
+  in ''
+    machine.wait_for_unit("display-manager.service")
+
+    with subtest("Test we can see usernames in elementary-greeter"):
+        machine.wait_for_text("${user.description}")
+        # OCR was struggling with this one.
+        # machine.wait_for_text("${bob.description}")
+        # Ensure the password box is focused by clicking it.
+        # Workaround for https://github.com/NixOS/nixpkgs/issues/211366.
+        machine.succeed("XAUTHORITY=/var/lib/lightdm/.Xauthority DISPLAY=:0 xdotool mousemove 512 505 click 1")
+        machine.sleep(2)
+        machine.screenshot("elementary_greeter_lightdm")
+
+    with subtest("Login with elementary-greeter"):
+        machine.send_chars("${user.password}\n")
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("Check if pantheon session components actually start"):
+        machine.wait_until_succeeds("pgrep gala")
+        machine.wait_for_window("gala")
+        machine.wait_until_succeeds("pgrep -f io.elementary.wingpanel")
+        machine.wait_for_window("io.elementary.wingpanel")
+        machine.wait_until_succeeds("pgrep plank")
+        machine.wait_for_window("plank")
+        machine.wait_until_succeeds("pgrep -f gsd-media-keys")
+        machine.wait_for_unit("bamfdaemon.service", "${user.name}")
+        machine.wait_for_unit("io.elementary.files.xdg-desktop-portal.service", "${user.name}")
+
+    with subtest("Open elementary videos"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.videos >&2 &'")
+        machine.sleep(2)
+        machine.wait_for_window("io.elementary.videos")
+        machine.wait_for_text("No Videos Open")
+
+    with subtest("Open elementary calendar"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.calendar >&2 &'")
+        machine.sleep(2)
+        machine.wait_for_window("io.elementary.calendar")
+
+    with subtest("Open system settings"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.switchboard >&2 &'")
+        # Wait for all plugins to be loaded before we check if the window is still there.
+        machine.sleep(5)
+        machine.wait_for_window("io.elementary.switchboard")
+
+    with subtest("Open elementary terminal"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0 io.elementary.terminal >&2 &'")
+        machine.wait_for_window("io.elementary.terminal")
+
+    with subtest("Check if gala has ever coredumped"):
+        machine.fail("coredumpctl --json=short | grep gala")
+        # So you can see the dock in the below screenshot.
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xdotool mousemove 450 1000 >&2 &'")
+        machine.sleep(10)
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/paperless.nix b/nixpkgs/nixos/tests/paperless.nix
new file mode 100644
index 000000000000..6a51cc522bdc
--- /dev/null
+++ b/nixpkgs/nixos/tests/paperless.nix
@@ -0,0 +1,89 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "paperless";
+  meta.maintainers = with lib.maintainers; [ erikarvstedt Flakebi ];
+
+  nodes = let self = {
+    simple = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [ imagemagick jq ];
+      services.paperless = {
+        enable = true;
+        passwordFile = builtins.toFile "password" "admin";
+      };
+    };
+    postgres = { config, pkgs, ... }: {
+      imports = [ self.simple ];
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "paperless" ];
+        ensureUsers = [
+          { name = config.services.paperless.user;
+            ensureDBOwnership = true;
+          }
+        ];
+      };
+      services.paperless.extraConfig = {
+        PAPERLESS_DBHOST = "/run/postgresql";
+      };
+    };
+  }; in self;
+
+  testScript = ''
+    import json
+
+    def test_paperless(node):
+      node.wait_for_unit("paperless-consumer.service")
+
+      with subtest("Add a document via the file system"):
+        node.succeed(
+          "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
+          "-annotate +5+20 'hello world 16-10-2005' /var/lib/paperless/consume/doc.png"
+        )
+
+      with subtest("Web interface gets ready"):
+        node.wait_for_unit("paperless-web.service")
+        # Wait until server accepts connections
+        node.wait_until_succeeds("curl -fs localhost:28981")
+
+      # Required for consuming documents via the web interface
+      with subtest("Task-queue gets ready"):
+        node.wait_for_unit("paperless-task-queue.service")
+
+      with subtest("Add a png document via the web interface"):
+        node.succeed(
+          "convert -size 400x40 xc:white -font 'DejaVu-Sans' -pointsize 20 -fill black "
+          "-annotate +5+20 'hello web 16-10-2005' /tmp/webdoc.png"
+        )
+        node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.png -fs localhost:28981/api/documents/post_document/")
+
+      with subtest("Add a txt document via the web interface"):
+        node.succeed(
+          "echo 'hello web 16-10-2005' > /tmp/webdoc.txt"
+        )
+        node.wait_until_succeeds("curl -u admin:admin -F document=@/tmp/webdoc.txt -fs localhost:28981/api/documents/post_document/")
+
+      with subtest("Documents are consumed"):
+        node.wait_until_succeeds(
+          "(($(curl -u admin:admin -fs localhost:28981/api/documents/ | jq .count) == 3))"
+        )
+        docs = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/"))['results']
+        assert "2005-10-16" in docs[0]['created']
+        assert "2005-10-16" in docs[1]['created']
+        assert "2005-10-16" in docs[2]['created']
+
+      # Detects gunicorn issues, see PR #190888
+      with subtest("Document metadata can be accessed"):
+        metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/1/metadata/"))
+        assert "original_checksum" in metadata
+
+        metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/2/metadata/"))
+        assert "original_checksum" in metadata
+
+        metadata = json.loads(node.succeed("curl -u admin:admin -fs localhost:28981/api/documents/3/metadata/"))
+        assert "original_checksum" in metadata
+
+    test_paperless(simple)
+    simple.send_monitor_command("quit")
+    simple.wait_for_shutdown()
+    test_paperless(postgres)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/parsedmarc/default.nix b/nixpkgs/nixos/tests/parsedmarc/default.nix
new file mode 100644
index 000000000000..1feadcb7f39b
--- /dev/null
+++ b/nixpkgs/nixos/tests/parsedmarc/default.nix
@@ -0,0 +1,230 @@
+# This tests parsedmarc by sending a report to its monitored email
+# address and reading the results out of Elasticsearch.
+
+{ pkgs, ... }@args:
+let
+  inherit (import ../../lib/testing-python.nix args) makeTest;
+  inherit (pkgs) lib;
+
+  dmarcTestReport = builtins.fetchurl {
+    name = "dmarc-test-report";
+    url = "https://github.com/domainaware/parsedmarc/raw/f45ab94e0608088e0433557608d9f4e9517d3afe/samples/aggregate/estadocuenta1.infonacot.gob.mx!example.com!1536853302!1536939702!2940.xml.zip";
+    sha256 = "0dq64cj49711kbja27pjl2hy0d3azrjxg91kqrh40x46fkn1dwkx";
+  };
+
+  sendEmail = address:
+    pkgs.writeScriptBin "send-email" ''
+      #!${pkgs.python3.interpreter}
+      import smtplib
+      from email import encoders
+      from email.mime.base import MIMEBase
+      from email.mime.multipart import MIMEMultipart
+      from email.mime.text import MIMEText
+
+      sender_email = "dmarc_tester@fake.domain"
+      receiver_email = "${address}"
+
+      message = MIMEMultipart()
+      message["From"] = sender_email
+      message["To"] = receiver_email
+      message["Subject"] = "DMARC test"
+
+      message.attach(MIMEText("Testing parsedmarc", "plain"))
+
+      attachment = MIMEBase("application", "zip")
+
+      with open("${dmarcTestReport}", "rb") as report:
+          attachment.set_payload(report.read())
+
+      encoders.encode_base64(attachment)
+
+      attachment.add_header(
+          "Content-Disposition",
+          "attachment; filename= estadocuenta1.infonacot.gob.mx!example.com!1536853302!1536939702!2940.xml.zip",
+      )
+
+      message.attach(attachment)
+      text = message.as_string()
+
+      with smtplib.SMTP('localhost') as server:
+          server.sendmail(sender_email, receiver_email, text)
+          server.quit()
+    '';
+in
+{
+  localMail = makeTest
+    {
+      name = "parsedmarc-local-mail";
+      meta = with lib.maintainers; {
+        maintainers = [ talyz ];
+      };
+
+      nodes.parsedmarc =
+        { nodes, ... }:
+        {
+          virtualisation.memorySize = 2048;
+
+          services.postfix = {
+            enableSubmission = true;
+            enableSubmissions = true;
+            submissionsOptions = {
+              smtpd_sasl_auth_enable = "yes";
+              smtpd_client_restrictions = "permit";
+            };
+          };
+
+          services.parsedmarc = {
+            enable = true;
+            provision = {
+              geoIp = false;
+              localMail = {
+                enable = true;
+                hostname = "localhost";
+              };
+            };
+          };
+
+          environment.systemPackages = [
+            (sendEmail "dmarc@localhost")
+            pkgs.jq
+          ];
+        };
+
+      testScript = { nodes }:
+        let
+          esPort = toString nodes.parsedmarc.config.services.elasticsearch.port;
+          valueObject = lib.optionalString (lib.versionAtLeast nodes.parsedmarc.config.services.elasticsearch.package.version "7") ".value";
+        in ''
+          parsedmarc.start()
+          parsedmarc.wait_for_unit("postfix.service")
+          parsedmarc.wait_for_unit("dovecot2.service")
+          parsedmarc.wait_for_unit("parsedmarc.service")
+          parsedmarc.wait_until_succeeds(
+              "curl -sS -f http://localhost:${esPort}"
+          )
+
+          parsedmarc.fail(
+              "curl -sS -f http://localhost:${esPort}/_search?q=report_id:2940"
+              + " | tee /dev/console"
+              + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+          )
+          parsedmarc.succeed("send-email")
+          parsedmarc.wait_until_succeeds(
+              "curl -sS -f http://localhost:${esPort}/_search?q=report_id:2940"
+              + " | tee /dev/console"
+              + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+          )
+        '';
+    };
+
+  externalMail =
+    let
+      certs = import ../common/acme/server/snakeoil-certs.nix;
+      mailDomain = certs.domain;
+      parsedmarcDomain = "parsedmarc.fake.domain";
+    in
+      makeTest {
+        name = "parsedmarc-external-mail";
+        meta = with lib.maintainers; {
+          maintainers = [ talyz ];
+        };
+
+        nodes = {
+          parsedmarc =
+            { nodes, ... }:
+            {
+              virtualisation.memorySize = 2048;
+
+              security.pki.certificateFiles = [
+                certs.ca.cert
+              ];
+
+              networking.extraHosts = ''
+                127.0.0.1 ${parsedmarcDomain}
+                ${nodes.mail.config.networking.primaryIPAddress} ${mailDomain}
+              '';
+
+              services.parsedmarc = {
+                enable = true;
+                provision.geoIp = false;
+                settings.imap = {
+                  host = mailDomain;
+                  port = 993;
+                  ssl = true;
+                  user = "alice";
+                  password = "${pkgs.writeText "imap-password" "foobar"}";
+                };
+              };
+
+              environment.systemPackages = [
+                pkgs.jq
+              ];
+            };
+
+          mail =
+            { nodes, ... }:
+            {
+              imports = [ ../common/user-account.nix ];
+
+              networking.extraHosts = ''
+                127.0.0.1 ${mailDomain}
+                ${nodes.parsedmarc.config.networking.primaryIPAddress} ${parsedmarcDomain}
+              '';
+
+              services.dovecot2 = {
+                enable = true;
+                protocols = [ "imap" ];
+                sslCACert = "${certs.ca.cert}";
+                sslServerCert = "${certs.${mailDomain}.cert}";
+                sslServerKey = "${certs.${mailDomain}.key}";
+              };
+
+              services.postfix = {
+                enable = true;
+                origin = mailDomain;
+                config = {
+                  myhostname = mailDomain;
+                  mydestination = mailDomain;
+                };
+                enableSubmission = true;
+                enableSubmissions = true;
+                submissionsOptions = {
+                  smtpd_sasl_auth_enable = "yes";
+                  smtpd_client_restrictions = "permit";
+                };
+              };
+              environment.systemPackages = [ (sendEmail "alice@${mailDomain}") ];
+
+              networking.firewall.allowedTCPPorts = [ 993 ];
+            };
+        };
+
+        testScript = { nodes }:
+          let
+            esPort = toString nodes.parsedmarc.config.services.elasticsearch.port;
+            valueObject = lib.optionalString (lib.versionAtLeast nodes.parsedmarc.config.services.elasticsearch.package.version "7") ".value";
+          in ''
+            mail.start()
+            mail.wait_for_unit("postfix.service")
+            mail.wait_for_unit("dovecot2.service")
+
+            parsedmarc.start()
+            parsedmarc.wait_for_unit("parsedmarc.service")
+            parsedmarc.wait_until_succeeds(
+                "curl -sS -f http://localhost:${esPort}"
+            )
+
+            parsedmarc.fail(
+                "curl -sS -f http://localhost:${esPort}/_search?q=report_id:2940"
+                + " | tee /dev/console"
+                + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+            )
+            mail.succeed("send-email")
+            parsedmarc.wait_until_succeeds(
+                "curl -sS -f http://localhost:${esPort}/_search?q=report_id:2940"
+                + " | tee /dev/console"
+                + " | jq -es 'if . == [] then null else .[] | .hits.total${valueObject} > 0 end'"
+            )
+          '';
+      };
+}
diff --git a/nixpkgs/nixos/tests/pass-secret-service.nix b/nixpkgs/nixos/tests/pass-secret-service.nix
new file mode 100644
index 000000000000..e0dddf0ad29e
--- /dev/null
+++ b/nixpkgs/nixos/tests/pass-secret-service.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "pass-secret-service";
+  meta.maintainers = [ lib.maintainers.aidalgol ];
+
+  nodes.machine = { nodes, pkgs, ... }:
+    {
+      imports = [ ./common/user-account.nix ];
+
+      services.passSecretService.enable = true;
+
+      environment.systemPackages = [
+        # Create a script that tries to make a request to the D-Bus secrets API.
+        (pkgs.writers.writePython3Bin "secrets-dbus-init"
+          {
+            libraries = [ pkgs.python3Packages.secretstorage ];
+          } ''
+          import secretstorage
+          print("Initializing dbus connection...")
+          connection = secretstorage.dbus_init()
+          print("Requesting default collection...")
+          collection = secretstorage.get_default_collection(connection)
+          print("Done!  dbus-org.freedesktop.secrets should now be active.")
+        '')
+        pkgs.pass
+      ];
+
+      programs.gnupg = {
+        agent.enable = true;
+        agent.pinentryFlavor = "tty";
+        dirmngr.enable = true;
+      };
+    };
+
+  # Some of the commands are run via a virtual console because they need to be
+  # run under a real login session, with D-Bus running in the environment.
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+      gpg-uid = "alice@example.net";
+      gpg-pw = "foobar9000";
+      ready-file = "/tmp/secrets-dbus-init.done";
+    in
+    ''
+      # Initialise the pass(1) storage.
+      machine.succeed("""
+        sudo -u alice gpg --pinentry-mode loopback --batch --passphrase ${gpg-pw} \
+        --quick-gen-key ${gpg-uid} \
+      """)
+      machine.succeed("sudo -u alice pass init ${gpg-uid}")
+
+      with subtest("Service is not running on login"):
+          machine.wait_until_tty_matches("1", "login: ")
+          machine.send_chars("alice\n")
+          machine.wait_until_tty_matches("1", "login: alice")
+          machine.wait_until_succeeds("pgrep login")
+          machine.wait_until_tty_matches("1", "Password: ")
+          machine.send_chars("${user.password}\n")
+          machine.wait_until_succeeds("pgrep -u alice bash")
+
+          _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice")
+          assert "Active: inactive (dead)" in output
+
+      with subtest("Service starts after a client tries to talk to the D-Bus API"):
+          machine.send_chars("secrets-dbus-init; touch ${ready-file}\n")
+          machine.wait_for_file("${ready-file}")
+          _, output = machine.systemctl("status dbus-org.freedesktop.secrets --no-pager", "alice")
+          assert "Active: active (running)" in output
+    '';
+})
diff --git a/nixpkgs/nixos/tests/patroni.nix b/nixpkgs/nixos/tests/patroni.nix
new file mode 100644
index 000000000000..1f15cd59677a
--- /dev/null
+++ b/nixpkgs/nixos/tests/patroni.nix
@@ -0,0 +1,206 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+  let
+    nodesIps = [
+      "192.168.1.1"
+      "192.168.1.2"
+      "192.168.1.3"
+    ];
+
+    createNode = index: { pkgs, ... }:
+      let
+        ip = builtins.elemAt nodesIps index; # since we already use IPs to identify servers
+      in
+      {
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = ip; prefixLength = 16; }
+        ];
+
+        networking.firewall.allowedTCPPorts = [ 5432 8008 5010 ];
+
+        environment.systemPackages = [ pkgs.jq ];
+
+        services.patroni = {
+
+          enable = true;
+
+          postgresqlPackage = pkgs.postgresql_14.withPackages (p: [ p.pg_safeupdate ]);
+
+          scope = "cluster1";
+          name = "node${toString(index + 1)}";
+          nodeIp = ip;
+          otherNodesIps = builtins.filter (h: h != ip) nodesIps;
+          softwareWatchdog = true;
+
+          settings = {
+            bootstrap = {
+              dcs = {
+                ttl = 30;
+                loop_wait = 10;
+                retry_timeout = 10;
+                maximum_lag_on_failover = 1048576;
+              };
+              initdb = [
+                { encoding = "UTF8"; }
+                "data-checksums"
+              ];
+            };
+
+            postgresql = {
+              use_pg_rewind = true;
+              use_slots = true;
+              authentication = {
+                replication = {
+                  username = "replicator";
+                };
+                superuser = {
+                  username = "postgres";
+                };
+                rewind = {
+                  username = "rewind";
+                };
+              };
+              parameters = {
+                listen_addresses = "${ip}";
+                wal_level = "replica";
+                hot_standby_feedback = "on";
+                unix_socket_directories = "/tmp";
+              };
+              pg_hba = [
+                "host replication replicator 192.168.1.0/24 md5"
+                # Unsafe, do not use for anything other than tests
+                "host all all 0.0.0.0/0 trust"
+              ];
+            };
+
+            etcd3 = {
+              host = "192.168.1.4:2379";
+            };
+          };
+
+          environmentFiles = {
+            PATRONI_REPLICATION_PASSWORD = pkgs.writeText "replication-password" "postgres";
+            PATRONI_SUPERUSER_PASSWORD = pkgs.writeText "superuser-password" "postgres";
+            PATRONI_REWIND_PASSWORD = pkgs.writeText "rewind-password" "postgres";
+          };
+        };
+
+        # We always want to restart so the tests never hang
+        systemd.services.patroni.serviceConfig.StartLimitIntervalSec = 0;
+      };
+  in
+  {
+    name = "patroni";
+
+    nodes = {
+      node1 = createNode 0;
+      node2 = createNode 1;
+      node3 = createNode 2;
+
+      etcd = { pkgs, ... }: {
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.1.4"; prefixLength = 16; }
+        ];
+
+        services.etcd = {
+          enable = true;
+          listenClientUrls = [ "http://192.168.1.4:2379" ];
+        };
+
+        networking.firewall.allowedTCPPorts = [ 2379 ];
+      };
+
+      client = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.postgresql_14 ];
+
+        networking.interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+          { address = "192.168.2.1"; prefixLength = 16; }
+        ];
+
+        services.haproxy = {
+          enable = true;
+          config = ''
+            global
+                maxconn 100
+
+            defaults
+                log global
+                mode tcp
+                retries 2
+                timeout client 30m
+                timeout connect 4s
+                timeout server 30m
+                timeout check 5s
+
+            listen cluster1
+                bind 127.0.0.1:5432
+                option httpchk
+                http-check expect status 200
+                default-server inter 3s fall 3 rise 2 on-marked-down shutdown-sessions
+                ${builtins.concatStringsSep "\n" (map (ip: "server postgresql_${ip}_5432 ${ip}:5432 maxconn 100 check port 8008") nodesIps)}
+          '';
+        };
+      };
+    };
+
+
+
+    testScript = ''
+      nodes = [node1, node2, node3]
+
+      def wait_for_all_nodes_ready(expected_replicas=2):
+          booted_nodes = filter(lambda node: node.booted, nodes)
+          for node in booted_nodes:
+              print(node.succeed("patronictl list cluster1"))
+              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'length') == {expected_replicas + 1} ]")
+              node.wait_until_succeeds("[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Leader$\"))) | map(select(.State | test(\"^running$\"))) | length') == 1 ]")
+              node.wait_until_succeeds(f"[ $(patronictl list -f json cluster1 | jq 'map(select(.Role | test(\"^Replica$\"))) | map(select(.State | test(\"^running$\"))) | length') == {expected_replicas} ]")
+              print(node.succeed("patronictl list cluster1"))
+          client.wait_until_succeeds("psql -h 127.0.0.1 -U postgres --command='select 1;'")
+
+      def run_dummy_queries():
+          client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='insert into dummy(val) values (101);'")
+          client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select val from dummy where val = 101;') -eq 101")
+          client.succeed("psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='delete from dummy where val = 101;'")
+
+      start_all()
+
+      etcd.wait_for_unit("etcd.service")
+
+      with subtest("should bootstrap a new patroni cluster"):
+          wait_for_all_nodes_ready()
+
+      with subtest("should be able to insert and select"):
+          client.succeed("psql -h 127.0.0.1 -U postgres --command='create table dummy as select * from generate_series(1, 100) as val;'")
+          client.succeed("test $(psql -h 127.0.0.1 -U postgres --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100")
+
+      with subtest("should restart after all nodes are crashed"):
+          for node in nodes:
+              node.crash()
+          for node in nodes:
+              node.start()
+          wait_for_all_nodes_ready()
+
+      with subtest("should be able to run queries while any one node is crashed"):
+          masterNodeName = node1.succeed("patronictl list -f json cluster1 | jq '.[] | select(.Role | test(\"^Leader$\")) | .Member' -r").strip()
+          masterNodeIndex = int(masterNodeName[len(masterNodeName)-1]) - 1
+
+          # Move master node at the end of the list to avoid multiple failovers (makes the test faster and more consistent)
+          nodes.append(nodes.pop(masterNodeIndex))
+
+          for node in nodes:
+              node.crash()
+              wait_for_all_nodes_ready(1)
+
+              # Execute some queries while a node is down.
+              run_dummy_queries()
+
+              # Restart crashed node.
+              node.start()
+              wait_for_all_nodes_ready()
+
+              # Execute some queries with the node back up.
+              run_dummy_queries()
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/pdns-recursor.nix b/nixpkgs/nixos/tests/pdns-recursor.nix
new file mode 100644
index 000000000000..14f1b7ea8a35
--- /dev/null
+++ b/nixpkgs/nixos/tests/pdns-recursor.nix
@@ -0,0 +1,15 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "powerdns-recursor";
+
+  nodes.server = { ... }: {
+    services.pdns-recursor.enable = true;
+    services.pdns-recursor.exportHosts= true;
+    networking.hosts."192.0.2.1" = [ "example.com" ];
+  };
+
+  testScript = ''
+    server.wait_for_unit("pdns-recursor")
+    server.wait_for_open_port(53)
+    assert "192.0.2.1" in server.succeed("host example.com localhost")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/peerflix.nix b/nixpkgs/nixos/tests/peerflix.nix
new file mode 100644
index 000000000000..4800413783b1
--- /dev/null
+++ b/nixpkgs/nixos/tests/peerflix.nix
@@ -0,0 +1,23 @@
+# This test runs peerflix and checks if peerflix starts
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "peerflix";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ offline ];
+  };
+
+  nodes = {
+    peerflix =
+      { ... }:
+        {
+          services.peerflix.enable = true;
+        };
+    };
+
+  testScript = ''
+    start_all()
+
+    peerflix.wait_for_unit("peerflix.service")
+    peerflix.wait_until_succeeds("curl -f localhost:9000")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/peroxide.nix b/nixpkgs/nixos/tests/peroxide.nix
new file mode 100644
index 000000000000..12e196484164
--- /dev/null
+++ b/nixpkgs/nixos/tests/peroxide.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "peroxide";
+  meta.maintainers = with lib.maintainers; [ aidalgol ];
+
+  nodes.machine =
+    { config, pkgs, ... }: {
+      networking.hostName = "nixos";
+      services.peroxide.enable = true;
+    };
+
+  testScript = ''
+    machine.wait_for_unit("peroxide.service")
+    machine.wait_for_open_port(1143) # IMAP
+    machine.wait_for_open_port(1025) # SMTP
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgadmin4.nix b/nixpkgs/nixos/tests/pgadmin4.nix
new file mode 100644
index 000000000000..3ee7ed19fa1c
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgadmin4.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "pgadmin4";
+  meta.maintainers = with lib.maintainers; [ mkg20001 gador ];
+
+  nodes.machine = { pkgs, ... }: {
+
+    imports = [ ./common/user-account.nix ];
+
+    environment.systemPackages = with pkgs; [
+      wget
+      curl
+      pgadmin4-desktopmode
+    ];
+
+    services.postgresql = {
+      enable = true;
+      authentication = ''
+        host    all             all             localhost               trust
+      '';
+    };
+
+    services.pgadmin = {
+      port = 5051;
+      enable = true;
+      initialEmail = "bruh@localhost.de";
+      initialPasswordFile = pkgs.writeText "pw" "bruh2012!";
+    };
+  };
+
+  testScript = ''
+    with subtest("Check pgadmin module"):
+      machine.wait_for_unit("postgresql")
+      machine.wait_for_unit("pgadmin")
+      machine.wait_until_succeeds("curl -sS localhost:5051")
+      machine.wait_until_succeeds("curl -sS localhost:5051/login | grep \"<title>pgAdmin 4</title>\" > /dev/null")
+      # check for missing support files (css, js etc). Should catch not-generated files during build. See e.g. https://github.com/NixOS/nixpkgs/pull/229184
+      machine.succeed("wget -nv --level=1 --spider --recursive localhost:5051/login")
+
+    # pgadmin4 module saves the configuration to /etc/pgadmin/config_system.py
+    # pgadmin4-desktopmode tries to read that as well. This normally fails with a PermissionError, as the config file
+    # is owned by the user of the pgadmin module. With the check-system-config-dir.patch this will just throw a warning
+    # but will continue and not read the file.
+    # If we run pgadmin4-desktopmode as root (something one really shouldn't do), it can read the config file and fail,
+    # because of the wrong config for desktopmode.
+    with subtest("Check pgadmin standalone desktop mode"):
+      machine.execute("sudo -u alice pgadmin4 >&2 &", timeout=60)
+      machine.wait_until_succeeds("curl -sS localhost:5050")
+      machine.wait_until_succeeds("curl -sS localhost:5050/browser/ | grep \"<title>pgAdmin 4</title>\" > /dev/null")
+      machine.succeed("wget -nv --level=1 --spider --recursive localhost:5050/browser")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgbouncer.nix b/nixpkgs/nixos/tests/pgbouncer.nix
new file mode 100644
index 000000000000..bb5afd35ee28
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgbouncer.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+let
+  testAuthFile = pkgs.writeTextFile {
+    name = "authFile";
+    text = ''
+      "testuser" "testpass"
+    '';
+  };
+in
+{
+  name = "pgbouncer";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ _1000101 ];
+  };
+  nodes = {
+    one = { config, pkgs, ... }: {
+
+      systemd.services.postgresql = {
+        postStart = ''
+          ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER ROLE testuser WITH LOGIN PASSWORD 'testpass'";
+          ${pkgs.postgresql}/bin/psql -U postgres -c "ALTER DATABASE testdb OWNER TO testuser;";
+        '';
+      };
+
+      services = {
+        postgresql = {
+          enable = true;
+          ensureDatabases = [ "testdb" ];
+          ensureUsers = [
+          {
+            name = "testuser";
+          }];
+          authentication = ''
+            local testdb testuser scram-sha-256
+          '';
+        };
+
+        pgbouncer = {
+          enable = true;
+          listenAddress = "localhost";
+          databases = { test = "host=/run/postgresql/ port=5432 auth_user=testuser dbname=testdb"; };
+          authType = "scram-sha-256";
+          authFile = testAuthFile;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    one.wait_for_unit("default.target")
+    one.require_unit_state("pgbouncer.service", "active")
+
+    # Test if we can make a query through PgBouncer
+    one.wait_until_succeeds(
+        "psql 'postgres://testuser:testpass@localhost:6432/test' -c 'SELECT 1;'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgjwt.nix b/nixpkgs/nixos/tests/pgjwt.nix
new file mode 100644
index 000000000000..4793a3e31503
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgjwt.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+with pkgs; {
+  name = "pgjwt";
+  meta = with lib.maintainers; {
+    maintainers = [ spinus willibutz ];
+  };
+
+  nodes = {
+    master = { ... }:
+    {
+      services.postgresql = {
+        enable = true;
+        extraPlugins = [ pgjwt pgtap ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }:
+  let
+    sqlSU = "${nodes.master.config.services.postgresql.superUser}";
+    pgProve = "${pkgs.perlPackages.TAPParserSourceHandlerpgTAP}";
+  in
+  ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.succeed(
+        "${pkgs.gnused}/bin/sed -e '12 i CREATE EXTENSION pgcrypto;\\nCREATE EXTENSION pgtap;\\nSET search_path TO tap,public;' ${pgjwt.src}/test.sql > /tmp/test.sql"
+    )
+    master.succeed(
+        "${pkgs.sudo}/bin/sudo -u ${sqlSU} PGOPTIONS=--search_path=tap,public ${pgProve}/bin/pg_prove -d postgres -v -f /tmp/test.sql"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pgmanage.nix b/nixpkgs/nixos/tests/pgmanage.nix
new file mode 100644
index 000000000000..6f8f2f965340
--- /dev/null
+++ b/nixpkgs/nixos/tests/pgmanage.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ... } :
+let
+  role     = "test";
+  password = "secret";
+  conn     = "local";
+in
+{
+  name = "pgmanage";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ basvandijk ];
+  };
+  nodes = {
+    one = { config, pkgs, ... }: {
+      services = {
+        postgresql = {
+          enable = true;
+          initialScript = pkgs.writeText "pg-init-script" ''
+            CREATE ROLE ${role} SUPERUSER LOGIN PASSWORD '${password}';
+          '';
+        };
+        pgmanage = {
+          enable = true;
+          connections = {
+            ${conn} = "hostaddr=127.0.0.1 port=${toString config.services.postgresql.port} dbname=postgres";
+          };
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    one.wait_for_unit("default.target")
+    one.require_unit_state("pgmanage.service", "active")
+
+    # Test if we can log in.
+    one.wait_until_succeeds(
+        "curl 'http://localhost:8080/pgmanage/auth' --data 'action=login&connname=${conn}&username=${role}&password=${password}' --fail"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/phosh.nix b/nixpkgs/nixos/tests/phosh.nix
new file mode 100644
index 000000000000..78d6da31beee
--- /dev/null
+++ b/nixpkgs/nixos/tests/phosh.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix ({ pkgs, ...}: let
+  pin = "1234";
+in {
+  name = "phosh";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tomfitzhenry zhaofengli ];
+  };
+
+  nodes = {
+    phone = { config, pkgs, ... }: {
+      users.users.nixos = {
+        isNormalUser = true;
+        password = pin;
+      };
+
+      services.xserver.desktopManager.phosh = {
+        enable = true;
+        user = "nixos";
+        group = "users";
+
+        phocConfig = {
+          outputs.Virtual-1 = {
+            scale = 2;
+          };
+        };
+      };
+
+      systemd.services.phosh = {
+        environment = {
+          # Accelerated graphics fail on phoc 0.20 (wlroots 0.15)
+          "WLR_RENDERER" = "pixman";
+        };
+      };
+
+      virtualisation.resolution = { x = 720; y = 1440; };
+      virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci,xres=720,yres=1440" ];
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    import time
+
+    start_all()
+    phone.wait_for_unit("phosh.service")
+
+    with subtest("Check that we can see the lock screen info page"):
+        # Saturday, January 1
+        phone.succeed("timedatectl set-time '2022-01-01 07:00'")
+
+        phone.wait_for_text("Saturday")
+        phone.screenshot("01lockinfo")
+
+    with subtest("Check that we can unlock the screen"):
+        phone.send_chars("${pin}", delay=0.2)
+        time.sleep(1)
+        phone.screenshot("02unlock")
+
+        phone.send_chars("\n")
+
+        phone.wait_for_text("All Apps")
+        phone.screenshot("03launcher")
+
+    with subtest("Check the on-screen keyboard shows"):
+        phone.send_chars("setting", delay=0.2)
+        phone.wait_for_text("123") # A button on the OSK
+        phone.screenshot("04osk")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/photoprism.nix b/nixpkgs/nixos/tests/photoprism.nix
new file mode 100644
index 000000000000..a77ab59f5c9a
--- /dev/null
+++ b/nixpkgs/nixos/tests/photoprism.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "photoprism";
+  meta.maintainers = with lib.maintainers; [ stunkymonkey ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.photoprism = {
+      enable = true;
+      port = 8080;
+      originalsPath = "/media/photos/";
+      passwordFile = pkgs.writeText "password" "secret";
+    };
+    environment.extraInit = ''
+      mkdir -p /media/photos
+    '';
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(8080)
+    response = machine.succeed("curl -vvv -s -H 'Host: photoprism' http://127.0.0.1:8080/library/login")
+    assert '<title>PhotoPrism</title>' in response, "Login page didn't load successfully"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/php/default.nix b/nixpkgs/nixos/tests/php/default.nix
new file mode 100644
index 000000000000..c0386385753f
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/default.nix
@@ -0,0 +1,16 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+, php ? pkgs.php
+}:
+
+let
+  php' = php.buildEnv {
+    extensions = { enabled, all }: with all; enabled ++ [ apcu ];
+  };
+in
+{
+  fpm = import ./fpm.nix { inherit system pkgs; php = php'; };
+  httpd = import ./httpd.nix { inherit system pkgs; php = php'; };
+  pcre = import ./pcre.nix { inherit system pkgs; php = php'; };
+}
diff --git a/nixpkgs/nixos/tests/php/fpm.nix b/nixpkgs/nixos/tests/php/fpm.nix
new file mode 100644
index 000000000000..64b61a377e28
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/fpm.nix
@@ -0,0 +1,59 @@
+import ../make-test-python.nix ({ pkgs, lib, php, ... }: {
+  name = "php-${php.version}-fpm-nginx-test";
+  meta.maintainers = lib.teams.php.members;
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    environment.systemPackages = [ php ];
+
+    services.nginx = {
+      enable = true;
+
+      virtualHosts."phpfpm" =
+        let
+          testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+        in
+        {
+          root = "${testdir}/web";
+          locations."~ \\.php$".extraConfig = ''
+            fastcgi_pass unix:${config.services.phpfpm.pools.foobar.socket};
+            fastcgi_index index.php;
+            include ${config.services.nginx.package}/conf/fastcgi_params;
+            include ${pkgs.nginx}/conf/fastcgi.conf;
+          '';
+          locations."/" = {
+            tryFiles = "$uri $uri/ index.php";
+            index = "index.php index.html index.htm";
+          };
+        };
+    };
+
+    services.phpfpm.pools."foobar" = {
+      user = "nginx";
+      phpPackage = php;
+      settings = {
+        "listen.group" = "nginx";
+        "listen.mode" = "0600";
+        "listen.owner" = "nginx";
+        "pm" = "dynamic";
+        "pm.max_children" = 5;
+        "pm.max_requests" = 500;
+        "pm.max_spare_servers" = 3;
+        "pm.min_spare_servers" = 1;
+        "pm.start_servers" = 2;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("phpfpm-foobar.service")
+
+    # Check so we get an evaluated PHP back
+    response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${php.version}" in response, "PHP version not detected"
+
+    # Check so we have database and some other extensions loaded
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite", "apcu"]:
+        assert ext in response, f"Missing {ext} extension"
+        machine.succeed(f'test -n "$(php -m | grep -i {ext})"')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/php/httpd.nix b/nixpkgs/nixos/tests/php/httpd.nix
new file mode 100644
index 000000000000..b6dfbeeaed52
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/httpd.nix
@@ -0,0 +1,34 @@
+import ../make-test-python.nix ({ pkgs, lib, php, ... }: {
+  name = "php-${php.version}-httpd-test";
+  meta.maintainers = lib.teams.php.members;
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "admin@phpfpm";
+      virtualHosts."phpfpm" =
+        let
+          testdir = pkgs.writeTextDir "web/index.php" "<?php phpinfo();";
+        in
+        {
+          documentRoot = "${testdir}/web";
+          locations."/" = {
+            index = "index.php index.html";
+          };
+        };
+      phpPackage = php;
+      enablePHP = true;
+    };
+  };
+  testScript = { ... }: ''
+    machine.wait_for_unit("httpd.service")
+
+    # Check so we get an evaluated PHP back
+    response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/")
+    assert "PHP Version ${php.version}" in response, "PHP version not detected"
+
+    # Check so we have database and some other extensions loaded
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/php/pcre.nix b/nixpkgs/nixos/tests/php/pcre.nix
new file mode 100644
index 000000000000..8e37d5dcf97b
--- /dev/null
+++ b/nixpkgs/nixos/tests/php/pcre.nix
@@ -0,0 +1,52 @@
+let
+  testString = "can-use-subgroups";
+in
+import ../make-test-python.nix ({ pkgs, lib, php, ... }: {
+  name = "php-${php.version}-httpd-pcre-jit-test";
+  meta.maintainers = lib.teams.php.members;
+
+  nodes.machine = { lib, pkgs, ... }: {
+    time.timeZone = "UTC";
+    services.httpd = {
+      enable = true;
+      adminAddr = "please@dont.contact";
+      phpPackage = php;
+      enablePHP = true;
+      phpOptions = "pcre.jit = true";
+      extraConfig =
+        let
+          testRoot = pkgs.writeText "index.php"
+            ''
+              <?php
+              preg_match('/(${testString})/', '${testString}', $result);
+              var_dump($result);
+            '';
+        in
+        ''
+          Alias / ${testRoot}/
+
+          <Directory ${testRoot}>
+            Require all granted
+          </Directory>
+        '';
+    };
+  };
+  testScript = let
+    # PCRE JIT SEAlloc feature does not play well with fork()
+    # The feature needs to either be disabled or PHP configured correctly
+    # More information in https://bugs.php.net/bug.php?id=78927 and https://bugs.php.net/bug.php?id=78630
+    pcreJitSeallocForkIssue = pkgs.writeText "pcre-jit-sealloc-issue.php" ''
+      <?php
+      preg_match('/nixos/', 'nixos');
+      $pid = pcntl_fork();
+      pcntl_wait($pid);
+    '';
+  in ''
+      machine.wait_for_unit("httpd.service")
+      # Ensure php evaluation by matching on the var_dump syntax
+      response = machine.succeed("curl -fvvv -s http://127.0.0.1:80/index.php")
+      expected = 'string(${toString (builtins.stringLength testString)}) "${testString}"'
+      assert expected in response, "Does not appear to be able to use subgroups."
+      machine.succeed("${php}/bin/php -f ${pcreJitSeallocForkIssue}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/pict-rs.nix b/nixpkgs/nixos/tests/pict-rs.nix
new file mode 100644
index 000000000000..4315e9fb6e90
--- /dev/null
+++ b/nixpkgs/nixos/tests/pict-rs.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  {
+    name = "pict-rs";
+    meta.maintainers = with lib.maintainers; [ happysalada ];
+
+    nodes.machine = { ... }: {
+      environment.systemPackages = with pkgs; [ curl jq ];
+      services.pict-rs.enable = true;
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.wait_for_unit("pict-rs")
+      machine.wait_for_open_port(8080)
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/pinnwand.nix b/nixpkgs/nixos/tests/pinnwand.nix
new file mode 100644
index 000000000000..42b26e08c189
--- /dev/null
+++ b/nixpkgs/nixos/tests/pinnwand.nix
@@ -0,0 +1,93 @@
+import ./make-test-python.nix ({ pkgs, ...}:
+let
+  port = 8000;
+  baseUrl = "http://server:${toString port}";
+in
+{
+  name = "pinnwand";
+  meta = with pkgs.lib.maintainers; {
+    maintainers =[ hexa ];
+  };
+
+  nodes = {
+    server = { config, ... }:
+    {
+      networking.firewall.allowedTCPPorts = [
+        port
+      ];
+
+      services.pinnwand = {
+        enable = true;
+        port = port;
+      };
+    };
+
+    client = { pkgs, ... }:
+    {
+      environment.systemPackages = [
+        pkgs.steck
+
+        (pkgs.writers.writePython3Bin "setup-steck.py" {
+          libraries = with pkgs.python3.pkgs; [ appdirs toml ];
+          flakeIgnore = [
+            "E501"
+          ];
+        }
+        ''
+          import appdirs
+          import toml
+          import os
+
+          CONFIG = {
+              "base": "${baseUrl}/",
+              "confirm": False,
+              "magic": True,
+              "ignore": True
+          }
+
+          os.makedirs(appdirs.user_config_dir('steck'))
+          with open(os.path.join(appdirs.user_config_dir('steck'), 'steck.toml'), "w") as fd:
+              toml.dump(CONFIG, fd)
+        '')
+      ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("pinnwand.service")
+    client.wait_for_unit("network.target")
+
+    # create steck.toml config file
+    client.succeed("setup-steck.py")
+
+    # wait until the server running pinnwand is reachable
+    client.wait_until_succeeds("ping -c1 server")
+
+    # make sure pinnwand is listening
+    server.wait_for_open_port(${toString port})
+
+    # send the contents of /etc/machine-id
+    response = client.succeed("steck paste /etc/machine-id")
+
+    # parse the steck response
+    raw_url = None
+    removal_link = None
+    for line in response.split("\n"):
+        if line.startswith("View link:"):
+            raw_url = f"${baseUrl}/raw/{line.split('/')[-1]}"
+        if line.startswith("Removal link:"):
+            removal_link = line.split(":", 1)[1]
+
+    # check whether paste matches what we sent
+    client.succeed(f"curl {raw_url} > /tmp/machine-id")
+    client.succeed("diff /tmp/machine-id /etc/machine-id")
+
+    # remove paste and check that it's not available any more
+    client.succeed(f"curl {removal_link}")
+    client.fail(f"curl --fail {raw_url}")
+
+    server.log(server.execute("systemd-analyze security pinnwand | grep '✗'")[1])
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plantuml-server.nix b/nixpkgs/nixos/tests/plantuml-server.nix
new file mode 100644
index 000000000000..460c30919aec
--- /dev/null
+++ b/nixpkgs/nixos/tests/plantuml-server.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "plantuml-server";
+  meta.maintainers = with lib.maintainers; [ anthonyroussel ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.plantuml-server.enable = true;
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("plantuml-server.service")
+    machine.wait_for_open_port(8080)
+
+    with subtest("Generate chart"):
+      chart_id = machine.succeed("curl -sSf http://localhost:8080/plantuml/coder -d 'Alice -> Bob'")
+      machine.succeed("curl -sSf http://localhost:8080/plantuml/txt/{}".format(chart_id))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plasma-bigscreen.nix b/nixpkgs/nixos/tests/plasma-bigscreen.nix
new file mode 100644
index 000000000000..2fe90fa9b539
--- /dev/null
+++ b/nixpkgs/nixos/tests/plasma-bigscreen.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "plasma-bigscreen";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ttuegel k900 ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.sddm.enable = true;
+    services.xserver.displayManager.defaultSession = "plasma-bigscreen-x11";
+    services.xserver.desktopManager.plasma5.bigscreen.enable = true;
+    services.xserver.displayManager.autoLogin = {
+      enable = true;
+      user = "alice";
+    };
+
+    users.users.alice.extraGroups = ["uinput"];
+  };
+
+  testScript = { nodes, ... }: ''
+    with subtest("Wait for login"):
+        start_all()
+        machine.wait_for_file("/tmp/xauth_*")
+        machine.succeed("xauth merge /tmp/xauth_*")
+
+    with subtest("Check plasmashell started"):
+        machine.wait_until_succeeds("pgrep plasmashell")
+        machine.wait_for_window("Plasma Big Screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plasma5-systemd-start.nix b/nixpkgs/nixos/tests/plasma5-systemd-start.nix
new file mode 100644
index 000000000000..31a313af308b
--- /dev/null
+++ b/nixpkgs/nixos/tests/plasma5-systemd-start.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "plasma5-systemd-start";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ oxalica ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver = {
+      enable = true;
+      displayManager.sddm.enable = true;
+      displayManager.defaultSession = "plasma";
+      desktopManager.plasma5.enable = true;
+      desktopManager.plasma5.runUsingSystemd = true;
+      displayManager.autoLogin = {
+        enable = true;
+        user = "alice";
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    with subtest("Wait for login"):
+        start_all()
+        machine.wait_for_file("/tmp/xauth_*")
+        machine.succeed("xauth merge /tmp/xauth_*")
+
+    with subtest("Check plasmashell started"):
+        machine.wait_until_succeeds("pgrep plasmashell")
+        machine.wait_for_window("^Desktop ")
+
+    status, result = machine.systemctl('--no-pager show plasma-plasmashell.service', user='alice')
+    assert status == 0, 'Service not found'
+    assert 'ActiveState=active' in result.split('\n'), 'Systemd service not active'
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plasma5.nix b/nixpkgs/nixos/tests/plasma5.nix
new file mode 100644
index 000000000000..fb8a5b73832e
--- /dev/null
+++ b/nixpkgs/nixos/tests/plasma5.nix
@@ -0,0 +1,67 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "plasma5";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ttuegel ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [ ./common/user-account.nix ];
+    services.xserver.enable = true;
+    services.xserver.displayManager.sddm.enable = true;
+    services.xserver.displayManager.defaultSession = "plasma";
+    services.xserver.desktopManager.plasma5.enable = true;
+    environment.plasma5.excludePackages = [ pkgs.plasma5Packages.elisa ];
+    services.xserver.displayManager.autoLogin = {
+      enable = true;
+      user = "alice";
+    };
+    hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    xdo = "${pkgs.xdotool}/bin/xdotool";
+  in ''
+    with subtest("Wait for login"):
+        start_all()
+        machine.wait_for_file("/tmp/xauth_*")
+        machine.succeed("xauth merge /tmp/xauth_*")
+
+    with subtest("Check plasmashell started"):
+        machine.wait_until_succeeds("pgrep plasmashell")
+        machine.wait_for_window("^Desktop ")
+
+    with subtest("Check that KDED is running"):
+        machine.succeed("pgrep kded5")
+
+    with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+    with subtest("Ensure Elisa is not installed"):
+        machine.fail("which elisa")
+
+    machine.succeed("su - ${user.name} -c 'xauth merge /tmp/xauth_*'")
+
+    with subtest("Run Dolphin"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 dolphin >&2 &'")
+        machine.wait_for_window(" Dolphin")
+
+    with subtest("Run Konsole"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 konsole >&2 &'")
+        machine.wait_for_window("Konsole")
+
+    with subtest("Run systemsettings"):
+        machine.execute("su - ${user.name} -c 'DISPLAY=:0.0 systemsettings5 >&2 &'")
+        machine.wait_for_window("Settings")
+
+    with subtest("Wait to get a screenshot"):
+        machine.execute(
+            "${xdo} key Alt+F1 sleep 10"
+        )
+        machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plausible.nix b/nixpkgs/nixos/tests/plausible.nix
new file mode 100644
index 000000000000..9c26c509a5ab
--- /dev/null
+++ b/nixpkgs/nixos/tests/plausible.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "plausible";
+  meta = with lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.memorySize = 4096;
+    services.plausible = {
+      enable = true;
+      adminUser = {
+        email = "admin@example.org";
+        passwordFile = "${pkgs.writeText "pwd" "foobar"}";
+        activate = true;
+      };
+      server = {
+        baseUrl = "http://localhost:8000";
+        secretKeybaseFile = "${pkgs.writeText "dont-try-this-at-home" "nannannannannannannannannannannannannannannannannannannan_batman!"}";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("plausible.service")
+    machine.wait_for_open_port(8000)
+
+    # Ensure that the software does not make not make the machine
+    # listen on any public interfaces by default.
+    machine.fail("ss -tlpn 'src = 0.0.0.0 or src = [::]' | grep LISTEN")
+
+    machine.succeed("curl -f localhost:8000 >&2")
+
+    machine.succeed("curl -f localhost:8000/js/script.js >&2")
+
+    csrf_token = machine.succeed(
+        "curl -c /tmp/cookies localhost:8000/login | grep '_csrf_token' | sed -E 's,.*value=\"(.*)\".*,\\1,g'"
+    )
+
+    machine.succeed(
+        f"curl -b /tmp/cookies -f -X POST localhost:8000/login -F email=admin@example.org -F password=foobar -F _csrf_token={csrf_token.strip()} -D headers"
+    )
+
+    # By ensuring that the user is redirected to the dashboard after login, we
+    # also make sure that the automatic verification of the module works.
+    machine.succeed(
+        "[[ $(grep 'location: ' headers | cut -d: -f2- | xargs echo) == /sites* ]]"
+    )
+
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/please.nix b/nixpkgs/nixos/tests/please.nix
new file mode 100644
index 000000000000..af825ae4b9b3
--- /dev/null
+++ b/nixpkgs/nixos/tests/please.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "please";
+  meta.maintainers = with lib.maintainers; [ azahi ];
+
+  nodes.machine =
+    { ... }:
+    {
+      users.users = lib.mkMerge [
+        (lib.listToAttrs (map
+          (n: lib.nameValuePair n { isNormalUser = true; })
+          (lib.genList (x: "user${toString x}") 6)))
+        {
+          user0.extraGroups = [ "wheel" ];
+        }
+      ];
+
+      security.please = {
+        enable = true;
+        wheelNeedsPassword = false;
+        settings = {
+          user2_run_true_as_root = {
+            name = "user2";
+            target = "root";
+            rule = "/run/current-system/sw/bin/true";
+            require_pass = false;
+          };
+          user4_edit_etc_hosts_as_root = {
+            name = "user4";
+            type = "edit";
+            target = "root";
+            rule = "/etc/hosts";
+            editmode = 644;
+            require_pass = false;
+          };
+        };
+      };
+    };
+
+  testScript = ''
+    with subtest("root: can run anything by default"):
+        machine.succeed('please true')
+    with subtest("root: can edit anything by default"):
+        machine.succeed('EDITOR=cat pleaseedit /etc/hosts')
+
+    with subtest("user0: can run as root because it's in the wheel group"):
+        machine.succeed('su - user0 -c "please -u root true"')
+    with subtest("user1: cannot run as root because it's not in the wheel group"):
+        machine.fail('su - user1 -c "please -u root true"')
+
+    with subtest("user0: can edit as root"):
+        machine.succeed('su - user0 -c "EDITOR=cat pleaseedit /etc/hosts"')
+    with subtest("user1: cannot edit as root"):
+        machine.fail('su - user1 -c "EDITOR=cat pleaseedit /etc/hosts"')
+
+    with subtest("user2: can run 'true' as root"):
+        machine.succeed('su - user2 -c "please -u root true"')
+    with subtest("user3: cannot run 'true' as root"):
+        machine.fail('su - user3 -c "please -u root true"')
+
+    with subtest("user4: can edit /etc/hosts"):
+        machine.succeed('su - user4 -c "EDITOR=cat pleaseedit /etc/hosts"')
+    with subtest("user5: cannot edit /etc/hosts"):
+        machine.fail('su - user5 -c "EDITOR=cat pleaseedit /etc/hosts"')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pleroma.nix b/nixpkgs/nixos/tests/pleroma.nix
new file mode 100644
index 000000000000..08a01585f877
--- /dev/null
+++ b/nixpkgs/nixos/tests/pleroma.nix
@@ -0,0 +1,254 @@
+/*
+  Pleroma E2E VM test.
+
+  Abstract:
+  =========
+  Using pleroma, postgresql, a local CA cert, a nginx reverse proxy
+  and a toot-based client, we're going to:
+
+  1. Provision a pleroma service from scratch (pleroma config + postgres db).
+  2. Create a "jamy" admin user.
+  3. Send a toot from this user.
+  4. Send a upload from this user.
+  5. Check the toot is part of the server public timeline
+
+  Notes:
+  - We need a fully functional TLS setup without having any access to
+    the internet. We do that by issuing a self-signed cert, add this
+    self-cert to the hosts pki trust store and finally spoof the
+    hostnames using /etc/hosts.
+  - For this NixOS test, we *had* to store some DB-related and
+    pleroma-related secrets to the store. Keep in mind the store is
+    world-readable, it's the worst place possible to store *any*
+    secret. **DO NOT DO THIS IN A REAL WORLD DEPLOYMENT**.
+*/
+
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+  send-toot = pkgs.writeScriptBin "send-toot" ''
+    set -eux
+    # toot is using the requests library internally. This library
+    # sadly embed its own certificate store instead of relying on the
+    # system one. Overriding this pretty bad default behaviour.
+    export REQUESTS_CA_BUNDLE=/etc/ssl/certs/ca-certificates.crt
+
+    echo "jamy-password" | toot login_cli -i "pleroma.nixos.test" -e "jamy@nixos.test"
+    echo "Login OK"
+
+    # Send a toot then verify it's part of the public timeline
+    echo "y" | toot post "hello world Jamy here"
+    echo "Send toot OK"
+    echo "y" | toot timeline | grep -c "hello world Jamy here"
+    echo "Get toot from timeline OK"
+
+    # Test file upload
+    echo "y" | toot upload ${db-seed} | grep -c "https://pleroma.nixos.test/media"
+    echo "File upload OK"
+
+    echo "====================================================="
+    echo "=                   SUCCESS                         ="
+    echo "=                                                   ="
+    echo "=    We were able to sent a toot + a upload and     ="
+    echo "=   retrieve both of them in the public timeline.   ="
+    echo "====================================================="
+  '';
+
+  provision-db = pkgs.writeScriptBin "provision-db" ''
+    set -eux
+    sudo -u postgres psql -f ${db-seed}
+  '';
+
+  test-db-passwd = "SccZOvTGM//BMrpoQj68JJkjDkMGb4pHv2cECWiI+XhVe3uGJTLI0vFV/gDlZ5jJ";
+
+  /* For this NixOS test, we *had* to store this secret to the store.
+    Keep in mind the store is world-readable, it's the worst place
+    possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
+    DEPLOYMENT**.*/
+  db-seed = pkgs.writeText "provision.psql" ''
+    CREATE USER pleroma WITH ENCRYPTED PASSWORD '${test-db-passwd}';
+    CREATE DATABASE pleroma OWNER pleroma;
+    \c pleroma;
+    --Extensions made by ecto.migrate that need superuser access
+    CREATE EXTENSION IF NOT EXISTS citext;
+    CREATE EXTENSION IF NOT EXISTS pg_trgm;
+    CREATE EXTENSION IF NOT EXISTS "uuid-ossp";
+  '';
+
+  pleroma-conf = ''
+    import Config
+
+    config :pleroma, Pleroma.Web.Endpoint,
+       url: [host: "pleroma.nixos.test", scheme: "https", port: 443],
+       http: [ip: {127, 0, 0, 1}, port: 4000]
+
+    config :pleroma, :instance,
+      name: "NixOS test pleroma server",
+      email: "pleroma@nixos.test",
+      notify_email: "pleroma@nixos.test",
+      limit: 5000,
+      registrations_open: true
+
+    config :pleroma, :media_proxy,
+      enabled: false,
+      redirect_on_failure: true
+      #base_url: "https://cache.pleroma.social"
+
+    config :pleroma, Pleroma.Repo,
+      adapter: Ecto.Adapters.Postgres,
+      username: "pleroma",
+      password: "${test-db-passwd}",
+      database: "pleroma",
+      hostname: "localhost",
+      pool_size: 10,
+      prepare: :named,
+      parameters: [
+        plan_cache_mode: "force_custom_plan"
+      ]
+
+    config :pleroma, :database, rum_enabled: false
+    config :pleroma, :instance, static_dir: "/var/lib/pleroma/static"
+    config :pleroma, Pleroma.Uploaders.Local, uploads: "/var/lib/pleroma/uploads"
+    config :pleroma, configurable_from_database: false
+  '';
+
+  /* For this NixOS test, we *had* to store this secret to the store.
+    Keep in mind the store is world-readable, it's the worst place
+    possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
+    DEPLOYMENT**.
+    In a real-word deployment, you'd handle this either by:
+    - manually upload your pleroma secrets to /var/lib/pleroma/secrets.exs
+    - use a deployment tool such as morph or NixOps to deploy your secrets.
+  */
+  pleroma-conf-secret = pkgs.writeText "secrets.exs" ''
+    import Config
+
+    config :joken, default_signer: "PS69/wMW7X6FIQPABt9lwvlZvgrJIncfiAMrK9J5mjVus/7/NJJi1DsDA1OghBE5"
+
+    config :pleroma, Pleroma.Web.Endpoint,
+       secret_key_base: "NvfmU7lYaQrmmxt4NACm0AaAfN9t6WxsrX0NCB4awkGHvr1S7jyshlEmrjaPFhhq",
+       signing_salt: "3L41+BuJ"
+
+    config :web_push_encryption, :vapid_details,
+      subject: "mailto:pleroma@nixos.test",
+      public_key: "BKjfNX9-UqAcncaNqERQtF7n9pKrB0-MO-juv6U5E5XQr_Tg5D-f8AlRjduAguDpyAngeDzG8MdrTejMSL4VF30",
+      private_key: "k7o9onKMQrgMjMb6l4fsxSaXO0BTNAer5MVSje3q60k"
+  '';
+
+  /* For this NixOS test, we *had* to store this secret to the store.
+    Keep in mind the store is world-readable, it's the worst place
+    possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
+    DEPLOYMENT**.
+    In a real-word deployment, you'd handle this either by:
+    - manually upload your pleroma secrets to /var/lib/pleroma/secrets.exs
+    - use a deployment tool such as morph or NixOps to deploy your secrets.
+    */
+  provision-secrets = pkgs.writeScriptBin "provision-secrets" ''
+    set -eux
+    cp "${pleroma-conf-secret}" "/var/lib/pleroma/secrets.exs"
+    chown pleroma:pleroma /var/lib/pleroma/secrets.exs
+  '';
+
+  /* For this NixOS test, we *had* to store this secret to the store.
+    Keep in mind the store is world-readable, it's the worst place
+    possible to store *any* secret. **DO NOT DO THIS IN A REAL WORLD
+    DEPLOYMENT**.
+  */
+  provision-user = pkgs.writeScriptBin "provision-user" ''
+    set -eux
+
+    # Waiting for pleroma to be up.
+    timeout 5m bash -c 'while [[ "$(curl -s -o /dev/null -w '%{http_code}' https://pleroma.nixos.test/api/v1/instance)" != "200" ]]; do sleep 2; done'
+    # Toremove the RELEASE_COOKIE bit when https://github.com/NixOS/nixpkgs/issues/166229 gets fixed.
+    RELEASE_COOKIE="/var/lib/pleroma/.cookie" \
+      pleroma_ctl user new jamy jamy@nixos.test --password 'jamy-password' --moderator --admin -y
+  '';
+
+  tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    mkdir -p $out
+    openssl req -x509 \
+      -subj '/CN=pleroma.nixos.test/' -days 49710 \
+      -addext 'subjectAltName = DNS:pleroma.nixos.test' \
+      -keyout "$out/key.pem" -newkey ed25519 \
+      -out "$out/cert.pem" -noenc
+  '';
+
+  hosts = nodes: ''
+    ${nodes.pleroma.networking.primaryIPAddress} pleroma.nixos.test
+    ${nodes.client.networking.primaryIPAddress} client.nixos.test
+  '';
+  in {
+  name = "pleroma";
+  nodes = {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+      networking.extraHosts = hosts nodes;
+      environment.systemPackages = with pkgs; [
+        pkgs.toot
+        send-toot
+      ];
+    };
+    pleroma = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+      networking.extraHosts = hosts nodes;
+      networking.firewall.enable = false;
+      environment.systemPackages = with pkgs; [
+        provision-db
+        provision-secrets
+        provision-user
+      ];
+      services = {
+        pleroma = {
+          enable = true;
+          configs = [
+            pleroma-conf
+          ];
+        };
+        postgresql = {
+          enable = true;
+          package = pkgs.postgresql_12;
+        };
+        nginx = {
+          enable = true;
+          virtualHosts."pleroma.nixos.test" = {
+            addSSL = true;
+            sslCertificate = "${tls-cert}/cert.pem";
+            sslCertificateKey = "${tls-cert}/key.pem";
+            locations."/" = {
+              proxyPass = "http://127.0.0.1:4000";
+              extraConfig = ''
+                add_header 'Access-Control-Allow-Origin' '*' always;
+                add_header 'Access-Control-Allow-Methods' 'POST, PUT, DELETE, GET, PATCH, OPTIONS' always;
+                add_header 'Access-Control-Allow-Headers' 'Authorization, Content-Type, Idempotency-Key' always;
+                add_header 'Access-Control-Expose-Headers' 'Link, X-RateLimit-Reset, X-RateLimit-Limit, X-RateLimit-Remaining, X-Request-Id' always;
+                if ($request_method = OPTIONS) {
+                    return 204;
+                }
+                add_header X-XSS-Protection "1; mode=block";
+                add_header X-Permitted-Cross-Domain-Policies none;
+                add_header X-Frame-Options DENY;
+                add_header X-Content-Type-Options nosniff;
+                add_header Referrer-Policy same-origin;
+                add_header X-Download-Options noopen;
+                proxy_http_version 1.1;
+                proxy_set_header Upgrade $http_upgrade;
+                proxy_set_header Connection "upgrade";
+                proxy_set_header Host $host;
+                client_max_body_size 16m;
+              '';
+            };
+          };
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    pleroma.wait_for_unit("postgresql.service")
+    pleroma.succeed("provision-db")
+    pleroma.succeed("provision-secrets")
+    pleroma.systemctl("restart pleroma.service")
+    pleroma.wait_for_unit("pleroma.service")
+    pleroma.succeed("provision-user")
+    client.succeed("send-toot")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plikd.nix b/nixpkgs/nixos/tests/plikd.nix
new file mode 100644
index 000000000000..97c254a5f7b0
--- /dev/null
+++ b/nixpkgs/nixos/tests/plikd.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "plikd";
+  meta = with lib.maintainers; {
+    maintainers = [ freezeboy ];
+  };
+
+  nodes.machine = { pkgs, ... }: let
+  in {
+    services.plikd.enable = true;
+    environment.systemPackages = [ pkgs.plik ];
+  };
+
+  testScript = ''
+    # Service basic test
+    machine.wait_for_unit("plikd")
+
+    # Network test
+    machine.wait_for_open_port(8080)
+    machine.succeed("curl --fail -v http://localhost:8080")
+
+    # Application test
+    machine.execute("echo test > /tmp/data.txt")
+    machine.succeed("plik --server http://localhost:8080 /tmp/data.txt | grep curl")
+
+    machine.succeed("diff data.txt /tmp/data.txt")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/plotinus.nix b/nixpkgs/nixos/tests/plotinus.nix
new file mode 100644
index 000000000000..b6ebab9b0198
--- /dev/null
+++ b/nixpkgs/nixos/tests/plotinus.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "plotinus";
+  meta = {
+    maintainers = pkgs.plotinus.meta.maintainers;
+  };
+
+  nodes.machine =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      programs.plotinus.enable = true;
+      environment.systemPackages = [ pkgs.gnome.gnome-calculator pkgs.xdotool ];
+    };
+
+  testScript = ''
+    machine.wait_for_x()
+    machine.succeed("gnome-calculator >&2 &")
+    machine.wait_for_window("gnome-calculator")
+    machine.succeed(
+        "xdotool search --sync --onlyvisible --class gnome-calculator "
+        + "windowfocus --sync key --clearmodifiers --delay 1 'ctrl+shift+p'"
+    )
+    machine.sleep(5)  # wait for the popup
+    machine.succeed("xdotool key --delay 100 p r e f e r e n c e s Return")
+    machine.wait_for_window("Preferences")
+    machine.screenshot("screen")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/podgrab.nix b/nixpkgs/nixos/tests/podgrab.nix
new file mode 100644
index 000000000000..dc9dfebaf49b
--- /dev/null
+++ b/nixpkgs/nixos/tests/podgrab.nix
@@ -0,0 +1,34 @@
+let
+  defaultPort = 8080;
+  customPort = 4242;
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "podgrab";
+
+  nodes = {
+    default = { ... }: {
+      services.podgrab.enable = true;
+    };
+
+    customized = { ... }: {
+      services.podgrab = {
+        enable = true;
+        port = customPort;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    default.wait_for_unit("podgrab")
+    default.wait_for_open_port(${toString defaultPort})
+    default.succeed("curl --fail http://localhost:${toString defaultPort}")
+
+    customized.wait_for_unit("podgrab")
+    customized.wait_for_open_port(${toString customPort})
+    customized.succeed("curl --fail http://localhost:${toString customPort}")
+  '';
+
+  meta.maintainers = with pkgs.lib.maintainers; [ ambroisie ];
+})
diff --git a/nixpkgs/nixos/tests/podman/default.nix b/nixpkgs/nixos/tests/podman/default.nix
new file mode 100644
index 000000000000..0e1f420f2a7d
--- /dev/null
+++ b/nixpkgs/nixos/tests/podman/default.nix
@@ -0,0 +1,183 @@
+import ../make-test-python.nix (
+  { pkgs, lib, ... }: {
+    name = "podman";
+    meta = {
+      maintainers = lib.teams.podman.members;
+    };
+
+    nodes = {
+      rootful = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        # hack to ensure that podman built with and without zfs in extraPackages is cached
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "00000000";
+      };
+      rootless = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        users.users.alice = {
+          isNormalUser = true;
+        };
+      };
+      dns = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        virtualisation.podman.defaultNetwork.settings.dns_enabled = true;
+
+        networking.firewall.allowedUDPPorts = [ 53 ];
+      };
+      docker = { pkgs, ... }: {
+        virtualisation.podman.enable = true;
+
+        virtualisation.podman.dockerSocket.enable = true;
+
+        environment.systemPackages = [
+          pkgs.docker-client
+        ];
+
+        users.users.alice = {
+          isNormalUser = true;
+          extraGroups = [ "podman" ];
+        };
+
+        users.users.mallory = {
+          isNormalUser = true;
+        };
+      };
+    };
+
+    testScript = ''
+      import shlex
+
+
+      def su_cmd(cmd, user = "alice"):
+          cmd = shlex.quote(cmd)
+          return f"su {user} -l -c {cmd}"
+
+
+      rootful.wait_for_unit("sockets.target")
+      rootless.wait_for_unit("sockets.target")
+      dns.wait_for_unit("sockets.target")
+      docker.wait_for_unit("sockets.target")
+      start_all()
+
+      with subtest("Run container as root with runc"):
+          rootful.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          rootful.succeed(
+              "podman run --runtime=runc -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+          )
+          rootful.succeed("podman ps | grep sleeping")
+          rootful.succeed("podman stop sleeping")
+          rootful.succeed("podman rm sleeping")
+
+      with subtest("Run container as root with crun"):
+          rootful.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          rootful.succeed(
+              "podman run --runtime=crun -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+          )
+          rootful.succeed("podman ps | grep sleeping")
+          rootful.succeed("podman stop sleeping")
+          rootful.succeed("podman rm sleeping")
+
+      with subtest("Run container as root with the default backend"):
+          rootful.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          rootful.succeed(
+              "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+          )
+          rootful.succeed("podman ps | grep sleeping")
+          rootful.succeed("podman stop sleeping")
+          rootful.succeed("podman rm sleeping")
+
+      # start systemd session for rootless
+      rootless.succeed("loginctl enable-linger alice")
+      rootless.succeed(su_cmd("whoami"))
+      rootless.sleep(1)
+
+      with subtest("Run container rootless with runc"):
+          rootless.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          rootless.succeed(
+              su_cmd(
+                  "podman run --runtime=runc -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+              )
+          )
+          rootless.succeed(su_cmd("podman ps | grep sleeping"))
+          rootless.succeed(su_cmd("podman stop sleeping"))
+          rootless.succeed(su_cmd("podman rm sleeping"))
+
+      with subtest("Run container rootless with crun"):
+          rootless.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          rootless.succeed(
+              su_cmd(
+                  "podman run --runtime=crun -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+              )
+          )
+          rootless.succeed(su_cmd("podman ps | grep sleeping"))
+          rootless.succeed(su_cmd("podman stop sleeping"))
+          rootless.succeed(su_cmd("podman rm sleeping"))
+
+      with subtest("Run container rootless with the default backend"):
+          rootless.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          rootless.succeed(
+              su_cmd(
+                  "podman run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg /bin/sleep 10"
+              )
+          )
+          rootless.succeed(su_cmd("podman ps | grep sleeping"))
+          rootless.succeed(su_cmd("podman stop sleeping"))
+          rootless.succeed(su_cmd("podman rm sleeping"))
+
+      with subtest("rootlessport"):
+          rootless.succeed(su_cmd("tar cv --files-from /dev/null | podman import - scratchimg"))
+          rootless.succeed(
+              su_cmd(
+                  "podman run -d -p 9000:8888 --name=rootlessport -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${pkgs.writeTextDir "index.html" "<h1>Testing</h1>"} scratchimg ${pkgs.python3}/bin/python -m http.server 8888"
+              )
+          )
+          rootless.succeed(su_cmd("podman ps | grep rootlessport"))
+          rootless.wait_until_succeeds(su_cmd("${pkgs.curl}/bin/curl localhost:9000 | grep Testing"))
+          rootless.succeed(su_cmd("podman stop rootlessport"))
+          rootless.succeed(su_cmd("podman rm rootlessport"))
+
+      with subtest("Run container with init"):
+          rootful.succeed(
+              "tar cv -C ${pkgs.pkgsStatic.busybox} . | podman import - busybox"
+          )
+          pid = rootful.succeed("podman run --rm busybox readlink /proc/self").strip()
+          assert pid == "1"
+          pid = rootful.succeed("podman run --rm --init busybox readlink /proc/self").strip()
+          assert pid == "2"
+
+      with subtest("aardvark-dns"):
+          dns.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          dns.succeed(
+              "podman run -d --name=webserver -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin -w ${pkgs.writeTextDir "index.html" "<h1>Testing</h1>"} scratchimg ${pkgs.python3}/bin/python -m http.server 8000"
+          )
+          dns.succeed("podman ps | grep webserver")
+          dns.wait_until_succeeds(
+              "podman run --rm --name=client -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin scratchimg ${pkgs.curl}/bin/curl http://webserver:8000 | grep Testing"
+          )
+          dns.succeed("podman stop webserver")
+          dns.succeed("podman rm webserver")
+
+      with subtest("A podman member can use the docker cli"):
+          docker.succeed(su_cmd("docker version"))
+
+      with subtest("Run container via docker cli"):
+          docker.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+          docker.succeed(
+            "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin localhost/scratchimg /bin/sleep 10"
+          )
+          docker.succeed("docker ps | grep sleeping")
+          docker.succeed("podman ps | grep sleeping")
+          docker.succeed("docker stop sleeping")
+          docker.succeed("docker rm sleeping")
+
+      with subtest("A podman non-member can not use the docker cli"):
+          docker.fail(su_cmd("docker version", user="mallory"))
+
+      # TODO: add docker-compose test
+
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/podman/tls-ghostunnel.nix b/nixpkgs/nixos/tests/podman/tls-ghostunnel.nix
new file mode 100644
index 000000000000..52c31dc21f10
--- /dev/null
+++ b/nixpkgs/nixos/tests/podman/tls-ghostunnel.nix
@@ -0,0 +1,147 @@
+/*
+  This test runs podman as a backend for the Docker CLI.
+ */
+import ../make-test-python.nix (
+  { pkgs, lib, ... }:
+
+  let gen-ca = pkgs.writeScript "gen-ca" ''
+    # Create CA
+    PATH="${pkgs.openssl}/bin:$PATH"
+    openssl genrsa -out ca-key.pem 4096
+    openssl req -new -x509 -days 365 -key ca-key.pem -sha256 -subj '/C=NL/ST=Zuid-Holland/L=The Hague/O=Stevige Balken en Planken B.V./OU=OpSec/CN=Certificate Authority' -out ca.pem
+
+    # Create service
+    openssl genrsa -out podman-key.pem 4096
+    openssl req -subj '/CN=podman' -sha256 -new -key podman-key.pem -out service.csr
+    echo subjectAltName = DNS:podman,IP:127.0.0.1 >> extfile.cnf
+    echo extendedKeyUsage = serverAuth >> extfile.cnf
+    openssl x509 -req -days 365 -sha256 -in service.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out podman-cert.pem -extfile extfile.cnf
+
+    # Create client
+    openssl genrsa -out client-key.pem 4096
+    openssl req -subj '/CN=client' -new -key client-key.pem -out client.csr
+    echo extendedKeyUsage = clientAuth > extfile-client.cnf
+    openssl x509 -req -days 365 -sha256 -in client.csr -CA ca.pem -CAkey ca-key.pem -CAcreateserial -out client-cert.pem -extfile extfile-client.cnf
+
+    # Create CA 2
+    PATH="${pkgs.openssl}/bin:$PATH"
+    openssl genrsa -out ca-2-key.pem 4096
+    openssl req -new -x509 -days 365 -key ca-2-key.pem -sha256 -subj '/C=NL/ST=Zuid-Holland/L=The Hague/O=Stevige Balken en Planken B.V./OU=OpSec/CN=Certificate Authority' -out ca-2.pem
+
+    # Create client signed by CA 2
+    openssl genrsa -out client-2-key.pem 4096
+    openssl req -subj '/CN=client' -new -key client-2-key.pem -out client-2.csr
+    echo extendedKeyUsage = clientAuth > extfile-client.cnf
+    openssl x509 -req -days 365 -sha256 -in client-2.csr -CA ca-2.pem -CAkey ca-2-key.pem -CAcreateserial -out client-2-cert.pem -extfile extfile-client.cnf
+
+    '';
+  in
+  {
+    name = "podman-tls-ghostunnel";
+    meta = {
+      maintainers = lib.teams.podman.members ++ [ lib.maintainers.roberth ];
+    };
+
+    nodes = {
+      podman =
+        { pkgs, ... }:
+        {
+          virtualisation.podman.enable = true;
+          virtualisation.podman.dockerSocket.enable = true;
+          virtualisation.podman.networkSocket = {
+            enable = true;
+            openFirewall = true;
+            server = "ghostunnel";
+            tls.cert = "/root/podman-cert.pem";
+            tls.key = "/root/podman-key.pem";
+            tls.cacert = "/root/ca.pem";
+          };
+
+          environment.systemPackages = [
+            pkgs.docker-client
+          ];
+
+          users.users.alice = {
+            isNormalUser = true;
+            home = "/home/alice";
+            description = "Alice Foobar";
+            extraGroups = ["podman"];
+          };
+
+        };
+
+      client = { ... }: {
+        environment.systemPackages = [
+          # Installs the docker _client_ only
+          # Normally, you'd want `virtualisation.docker.enable = true;`.
+          pkgs.docker-client
+        ];
+        environment.variables.DOCKER_HOST = "podman:2376";
+        environment.variables.DOCKER_TLS_VERIFY = "1";
+      };
+    };
+
+    testScript = ''
+      import shlex
+
+
+      def su_cmd(user, cmd):
+          cmd = shlex.quote(cmd)
+          return f"su {user} -l -c {cmd}"
+
+      def cmd(command):
+        print(f"+{command}")
+        r = os.system(command)
+        if r != 0:
+          raise Exception(f"Command {command} failed with exit code {r}")
+
+      start_all()
+      cmd("${gen-ca}")
+
+      podman.copy_from_host("ca.pem", "/root/ca.pem")
+      podman.copy_from_host("podman-cert.pem", "/root/podman-cert.pem")
+      podman.copy_from_host("podman-key.pem", "/root/podman-key.pem")
+
+      client.copy_from_host("ca.pem", "/root/.docker/ca.pem")
+      # client.copy_from_host("podman-cert.pem", "/root/podman-cert.pem")
+      client.copy_from_host("client-cert.pem", "/root/.docker/cert.pem")
+      client.copy_from_host("client-key.pem", "/root/.docker/key.pem")
+
+      # TODO (ghostunnel): add file watchers so the restart isn't necessary
+      podman.succeed("systemctl reset-failed && systemctl restart ghostunnel-server-podman-socket.service")
+
+      podman.wait_for_unit("sockets.target")
+      podman.wait_for_unit("ghostunnel-server-podman-socket.service")
+
+      with subtest("Root docker cli also works"):
+          podman.succeed("docker version")
+
+      with subtest("A podman member can also still use the docker cli"):
+          podman.succeed(su_cmd("alice", "docker version"))
+
+      with subtest("Run container remotely via docker cli"):
+          client.succeed("docker version")
+
+          # via socket would be nicer
+          podman.succeed("tar cv --files-from /dev/null | podman import - scratchimg")
+
+          client.succeed(
+            "docker run -d --name=sleeping -v /nix/store:/nix/store -v /run/current-system/sw/bin:/bin localhost/scratchimg /bin/sleep 10"
+          )
+          client.succeed("docker ps | grep sleeping")
+          podman.succeed("docker ps | grep sleeping")
+          client.succeed("docker stop sleeping")
+          client.succeed("docker rm sleeping")
+
+      with subtest("Clients without cert will be denied"):
+          client.succeed("rm /root/.docker/{cert,key}.pem")
+          client.fail("docker version")
+
+      with subtest("Clients with wrong cert will be denied"):
+          client.copy_from_host("client-2-cert.pem", "/root/.docker/cert.pem")
+          client.copy_from_host("client-2-key.pem", "/root/.docker/key.pem")
+          client.fail("docker version")
+
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/polaris.nix b/nixpkgs/nixos/tests/polaris.nix
new file mode 100644
index 000000000000..bb105d600032
--- /dev/null
+++ b/nixpkgs/nixos/tests/polaris.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "polaris";
+  meta.maintainers = with lib.maintainers; [ pbsds ];
+
+  nodes.machine =
+    { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.polaris = {
+        enable = true;
+        port = 5050;
+        settings.users = [
+          {
+            name = "test_user";
+            password = "very_secret_password";
+            admin = true;
+          }
+        ];
+      };
+    };
+
+  testScript = ''
+    machine.wait_for_unit("polaris.service")
+    machine.wait_for_open_port(5050)
+    machine.succeed("curl http://localhost:5050/api/version")
+    machine.succeed("curl -X GET http://localhost:5050/api/initial_setup -H  'accept: application/json' | jq -e '.has_any_users == true'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pomerium.nix b/nixpkgs/nixos/tests/pomerium.nix
new file mode 100644
index 000000000000..abaf56c518e0
--- /dev/null
+++ b/nixpkgs/nixos/tests/pomerium.nix
@@ -0,0 +1,109 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "pomerium";
+  meta = with lib.maintainers; {
+    maintainers = [ lukegb ];
+  };
+
+  nodes = let base = myIP: { pkgs, lib, ... }: {
+    virtualisation.vlans = [ 1 ];
+    networking = {
+      dhcpcd.enable = false;
+      firewall.allowedTCPPorts = [ 80 443 ];
+      hosts = {
+        "192.168.1.1" = [ "pomerium" "pom-auth" ];
+        "192.168.1.2" = [ "backend" "dummy-oidc" ];
+      };
+      interfaces.eth1.ipv4.addresses = pkgs.lib.mkOverride 0 [
+        { address = myIP; prefixLength = 24; }
+      ];
+    };
+  }; in {
+    pomerium = { pkgs, lib, ... }: {
+      imports = [ (base "192.168.1.1") ];
+      environment.systemPackages = with pkgs; [ chromium ];
+      services.pomerium = {
+        enable = true;
+        settings = {
+          address = ":80";
+          insecure_server = true;
+          authenticate_service_url = "http://pom-auth";
+
+          idp_provider = "oidc";
+          idp_scopes = [ "oidc" ];
+          idp_client_id = "dummy";
+          idp_provider_url = "http://dummy-oidc";
+
+          policy = [{
+            from = "https://my.website";
+            to = "http://192.168.1.2";
+            allow_public_unauthenticated_access = true;
+            preserve_host_header = true;
+          } {
+            from = "https://login.required";
+            to = "http://192.168.1.2";
+            allowed_domains = [ "my.domain" ];
+            preserve_host_header = true;
+          }];
+        };
+        secretsFile = pkgs.writeText "pomerium-secrets" ''
+          # 12345678901234567890123456789012 in base64
+          COOKIE_SECRET=MTIzNDU2Nzg5MDEyMzQ1Njc4OTAxMjM0NTY3ODkwMTI=
+          IDP_CLIENT_SECRET=dummy
+        '';
+      };
+    };
+    backend = { pkgs, lib, ... }: {
+      imports = [ (base "192.168.1.2") ];
+      services.nginx.enable = true;
+      services.nginx.virtualHosts."my.website" = {
+        root = pkgs.runCommand "testdir" {} ''
+          mkdir "$out"
+          echo hello world > "$out/index.html"
+        '';
+      };
+      services.nginx.virtualHosts."dummy-oidc" = {
+        root = pkgs.runCommand "testdir" {} ''
+          mkdir -p "$out/.well-known"
+          cat <<EOF >"$out/.well-known/openid-configuration"
+            {
+              "issuer": "http://dummy-oidc",
+              "authorization_endpoint": "http://dummy-oidc/auth.txt",
+              "token_endpoint": "http://dummy-oidc/token",
+              "jwks_uri": "http://dummy-oidc/jwks.json",
+              "userinfo_endpoint": "http://dummy-oidc/userinfo",
+              "id_token_signing_alg_values_supported": ["RS256"]
+            }
+          EOF
+          echo hello I am login page >"$out/auth.txt"
+        '';
+      };
+    };
+  };
+
+  testScript = { ... }: ''
+    backend.wait_for_unit("nginx")
+    backend.wait_for_open_port(80)
+
+    pomerium.wait_for_unit("pomerium")
+    pomerium.wait_for_open_port(80)
+
+    with subtest("no authentication required"):
+        pomerium.succeed(
+            "curl --resolve my.website:80:127.0.0.1 http://my.website | grep 'hello world'"
+        )
+
+    with subtest("login required"):
+        pomerium.succeed(
+            "curl -I --resolve login.required:80:127.0.0.1 http://login.required | grep pom-auth"
+        )
+        pomerium.succeed(
+            "curl -L --resolve login.required:80:127.0.0.1 http://login.required | grep 'hello I am login page'"
+        )
+
+    with subtest("ui"):
+        pomerium.succeed(
+          # check for a string that only appears if the UI is displayed correctly
+            "chromium --no-sandbox --headless --disable-gpu --dump-dom --host-resolver-rules='MAP login.required 127.0.0.1:80' http://login.required/.pomerium | grep 'contact your administrator'"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/portunus.nix b/nixpkgs/nixos/tests/portunus.nix
new file mode 100644
index 000000000000..6fcae7e1c4ce
--- /dev/null
+++ b/nixpkgs/nixos/tests/portunus.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "portunus";
+  meta.maintainers = with lib.maintainers; [ SuperSandro2000 ];
+
+  nodes.machine = _: {
+    services.portunus = {
+      enable = true;
+      ldap.suffix = "dc=example,dc=org";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("portunus.service")
+    machine.succeed("curl --fail -vvv http://localhost:8080/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/postfix-raise-smtpd-tls-security-level.nix b/nixpkgs/nixos/tests/postfix-raise-smtpd-tls-security-level.nix
new file mode 100644
index 000000000000..2a6c85a3a920
--- /dev/null
+++ b/nixpkgs/nixos/tests/postfix-raise-smtpd-tls-security-level.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix {
+  name = "postfix";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    services.postfix = {
+      enable = true;
+      enableSubmissions = true;
+      submissionsOptions = {
+        smtpd_tls_security_level = "none";
+      };
+    };
+
+    environment.systemPackages = let
+      checkConfig = pkgs.writeScriptBin "check-config" ''
+        #!${pkgs.python3.interpreter}
+        import sys
+
+        state = 1
+        success = False
+
+        with open("/etc/postfix/master.cf") as masterCf:
+          for line in masterCf:
+            if state == 1 and line.startswith("submissions"):
+              state = 2
+            elif state == 2 and line.startswith(" ") and "smtpd_tls_security_level=encrypt" in line:
+              success = True
+            elif state == 2 and not line.startswith(" "):
+              state == 3
+        if not success:
+          sys.exit(1)
+      '';
+
+    in [ checkConfig ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("postfix.service")
+    machine.succeed("check-config")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/postfix.nix b/nixpkgs/nixos/tests/postfix.nix
new file mode 100644
index 000000000000..1dbe6a4c5193
--- /dev/null
+++ b/nixpkgs/nixos/tests/postfix.nix
@@ -0,0 +1,77 @@
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in
+import ./make-test-python.nix {
+  name = "postfix";
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ common/user-account.nix ];
+    services.postfix = {
+      enable = true;
+      enableSubmission = true;
+      enableSubmissions = true;
+      tlsTrustedAuthorities = "${certs.ca.cert}";
+      sslCert = "${certs.${domain}.cert}";
+      sslKey = "${certs.${domain}.key}";
+      submissionsOptions = {
+          smtpd_sasl_auth_enable = "yes";
+          smtpd_client_restrictions = "permit";
+          milter_macro_daemon_name = "ORIGINATING";
+      };
+    };
+
+    security.pki.certificateFiles = [
+      certs.ca.cert
+    ];
+
+    networking.extraHosts = ''
+      127.0.0.1 ${domain}
+    '';
+
+    environment.systemPackages = let
+      sendTestMail = pkgs.writeScriptBin "send-testmail" ''
+        #!${pkgs.python3.interpreter}
+        import smtplib
+
+        with smtplib.SMTP('${domain}') as smtp:
+          smtp.sendmail('root@localhost', 'alice@localhost', 'Subject: Test\n\nTest data.')
+          smtp.quit()
+      '';
+
+      sendTestMailStarttls = pkgs.writeScriptBin "send-testmail-starttls" ''
+        #!${pkgs.python3.interpreter}
+        import smtplib
+        import ssl
+
+        ctx = ssl.create_default_context()
+
+        with smtplib.SMTP('${domain}') as smtp:
+          smtp.ehlo()
+          smtp.starttls(context=ctx)
+          smtp.ehlo()
+          smtp.sendmail('root@localhost', 'alice@localhost', 'Subject: Test STARTTLS\n\nTest data.')
+          smtp.quit()
+      '';
+
+      sendTestMailSmtps = pkgs.writeScriptBin "send-testmail-smtps" ''
+        #!${pkgs.python3.interpreter}
+        import smtplib
+        import ssl
+
+        ctx = ssl.create_default_context()
+
+        with smtplib.SMTP_SSL(host='${domain}', context=ctx) as smtp:
+          smtp.sendmail('root@localhost', 'alice@localhost', 'Subject: Test SMTPS\n\nTest data.')
+          smtp.quit()
+      '';
+    in [ sendTestMail sendTestMailStarttls sendTestMailSmtps ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("postfix.service")
+    machine.succeed("send-testmail")
+    machine.succeed("send-testmail-starttls")
+    machine.succeed("send-testmail-smtps")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/postfixadmin.nix b/nixpkgs/nixos/tests/postfixadmin.nix
new file mode 100644
index 000000000000..b2712f4699ae
--- /dev/null
+++ b/nixpkgs/nixos/tests/postfixadmin.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "postfixadmin";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ globin ];
+  };
+
+  nodes = {
+    postfixadmin = { config, pkgs, ... }: {
+      services.postfixadmin = {
+        enable = true;
+        hostName = "postfixadmin";
+        setupPasswordFile = pkgs.writeText "insecure-test-setup-pw-file" "$2y$10$r0p63YCjd9rb9nHrV9UtVuFgGTmPDLKu.0UIJoQTkWCZZze2iuB1m";
+      };
+      services.nginx.virtualHosts.postfixadmin = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+  };
+
+  testScript = ''
+    postfixadmin.start
+    postfixadmin.wait_for_unit("postgresql.service")
+    postfixadmin.wait_for_unit("phpfpm-postfixadmin.service")
+    postfixadmin.wait_for_unit("nginx.service")
+    postfixadmin.succeed(
+        "curl -sSfL http://postfixadmin/setup.php -X POST -F 'setup_password=not production'"
+    )
+    postfixadmin.succeed("curl -sSfL http://postfixadmin/ | grep 'Mail admins login here'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/postgis.nix b/nixpkgs/nixos/tests/postgis.nix
new file mode 100644
index 000000000000..d0685abc510c
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgis.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "postgis";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lsix ];
+  };
+
+  nodes = {
+    master =
+      { pkgs, ... }:
+
+      {
+        services.postgresql = let mypg = pkgs.postgresql; in {
+            enable = true;
+            package = mypg;
+            extraPlugins = with mypg.pkgs; [
+              postgis
+            ];
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.sleep(10)  # Hopefully this is long enough!!
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis;'")
+    master.succeed("sudo -u postgres psql -c 'CREATE EXTENSION postgis_topology;'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/postgresql-jit.nix b/nixpkgs/nixos/tests/postgresql-jit.nix
new file mode 100644
index 000000000000..baf26b8da2b3
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgresql-jit.nix
@@ -0,0 +1,48 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  inherit (pkgs) lib;
+  packages = builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs);
+
+  mkJitTest = packageName: makeTest {
+    name = "${packageName}";
+    meta.maintainers = with lib.maintainers; [ ma27 ];
+    nodes.machine = { pkgs, lib, ... }: {
+      services.postgresql = {
+        enable = true;
+        enableJIT = true;
+        package = pkgs.${packageName};
+        initialScript = pkgs.writeText "init.sql" ''
+          create table demo (id int);
+          insert into demo (id) select generate_series(1, 5);
+        '';
+      };
+    };
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("postgresql.service")
+
+      with subtest("JIT is enabled"):
+          machine.succeed("sudo -u postgres psql <<<'show jit;' | grep 'on'")
+
+      with subtest("Test JIT works fine"):
+          output = machine.succeed(
+              "cat ${pkgs.writeText "test.sql" ''
+                set jit_above_cost = 1;
+                EXPLAIN ANALYZE SELECT CONCAT('jit result = ', SUM(id)) FROM demo;
+                SELECT CONCAT('jit result = ', SUM(id)) from demo;
+              ''} | sudo -u postgres psql"
+          )
+          assert "JIT:" in output
+          assert "jit result = 15" in output
+
+      machine.shutdown()
+    '';
+  };
+in
+lib.genAttrs packages mkJitTest
diff --git a/nixpkgs/nixos/tests/postgresql-wal-receiver.nix b/nixpkgs/nixos/tests/postgresql-wal-receiver.nix
new file mode 100644
index 000000000000..b0bd7711dbcd
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgresql-wal-receiver.nix
@@ -0,0 +1,119 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  lib = pkgs.lib;
+
+  # Makes a test for a PostgreSQL package, given by name and looked up from `pkgs`.
+  makePostgresqlWalReceiverTest = postgresqlPackage:
+  {
+    name = postgresqlPackage;
+    value =
+      let
+        pkg = pkgs."${postgresqlPackage}";
+        postgresqlDataDir = "/var/lib/postgresql/${pkg.psqlSchema}";
+        replicationUser = "wal_receiver_user";
+        replicationSlot = "wal_receiver_slot";
+        replicationConn = "postgresql://${replicationUser}@localhost";
+        baseBackupDir = "/tmp/pg_basebackup";
+        walBackupDir = "/tmp/pg_wal";
+        atLeast12 = lib.versionAtLeast pkg.version "12.0";
+
+        recoveryFile = if atLeast12
+            then pkgs.writeTextDir "recovery.signal" ""
+            else pkgs.writeTextDir "recovery.conf" "restore_command = 'cp ${walBackupDir}/%f %p'";
+
+      in makeTest {
+        name = "postgresql-wal-receiver-${postgresqlPackage}";
+        meta.maintainers = with lib.maintainers; [ pacien ];
+
+        nodes.machine = { ... }: {
+          services.postgresql = {
+            package = pkg;
+            enable = true;
+            settings = lib.mkMerge [
+              {
+                wal_level = "archive"; # alias for replica on pg >= 9.6
+                max_wal_senders = 10;
+                max_replication_slots = 10;
+              }
+              (lib.mkIf atLeast12 {
+                restore_command = "cp ${walBackupDir}/%f %p";
+                recovery_end_command = "touch recovery.done";
+              })
+            ];
+            authentication = ''
+              host replication ${replicationUser} all trust
+            '';
+            initialScript = pkgs.writeText "init.sql" ''
+              create user ${replicationUser} replication;
+              select * from pg_create_physical_replication_slot('${replicationSlot}');
+            '';
+          };
+
+          services.postgresqlWalReceiver.receivers.main = {
+            postgresqlPackage = pkg;
+            connection = replicationConn;
+            slot = replicationSlot;
+            directory = walBackupDir;
+          };
+          # This is only to speedup test, it isn't time racing. Service is set to autorestart always,
+          # default 60sec is fine for real system, but is too much for a test
+          systemd.services.postgresql-wal-receiver-main.serviceConfig.RestartSec = lib.mkForce 5;
+        };
+
+        testScript = ''
+          # make an initial base backup
+          machine.wait_for_unit("postgresql")
+          machine.wait_for_unit("postgresql-wal-receiver-main")
+          # WAL receiver healthchecks PG every 5 seconds, so let's be sure they have connected each other
+          # required only for 9.4
+          machine.sleep(5)
+          machine.succeed(
+              "${pkg}/bin/pg_basebackup --dbname=${replicationConn} --pgdata=${baseBackupDir}"
+          )
+
+          # create a dummy table with 100 records
+          machine.succeed(
+              "sudo -u postgres psql --command='create table dummy as select * from generate_series(1, 100) as val;'"
+          )
+
+          # stop postgres and destroy data
+          machine.systemctl("stop postgresql")
+          machine.systemctl("stop postgresql-wal-receiver-main")
+          machine.succeed("rm -r ${postgresqlDataDir}/{base,global,pg_*}")
+
+          # restore the base backup
+          machine.succeed(
+              "cp -r ${baseBackupDir}/* ${postgresqlDataDir} && chown postgres:postgres -R ${postgresqlDataDir}"
+          )
+
+          # prepare WAL and recovery
+          machine.succeed("chmod a+rX -R ${walBackupDir}")
+          machine.execute(
+              "for part in ${walBackupDir}/*.partial; do mv $part ''${part%%.*}; done"
+          )  # make use of partial segments too
+          machine.succeed(
+              "cp ${recoveryFile}/* ${postgresqlDataDir}/ && chmod 666 ${postgresqlDataDir}/recovery*"
+          )
+
+          # replay WAL
+          machine.systemctl("start postgresql")
+          machine.wait_for_file("${postgresqlDataDir}/recovery.done")
+          machine.systemctl("restart postgresql")
+          machine.wait_for_unit("postgresql")
+
+          # check that our records have been restored
+          machine.succeed(
+              "test $(sudo -u postgres psql --pset='pager=off' --tuples-only --command='select count(distinct val) from dummy;') -eq 100"
+          )
+        '';
+      };
+    };
+
+# Maps the generic function over all attributes of PostgreSQL packages
+in builtins.listToAttrs (map makePostgresqlWalReceiverTest (builtins.attrNames (import ../../pkgs/servers/sql/postgresql pkgs)))
diff --git a/nixpkgs/nixos/tests/postgresql.nix b/nixpkgs/nixos/tests/postgresql.nix
new file mode 100644
index 000000000000..c0dd24cf6ad2
--- /dev/null
+++ b/nixpkgs/nixos/tests/postgresql.nix
@@ -0,0 +1,224 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE EXTENSION pgcrypto; -- just to check if lib loading works
+    CREATE TABLE sth (
+      id int
+    );
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    INSERT INTO sth (id) VALUES (1);
+    CREATE TABLE xmltest ( doc xml );
+    INSERT INTO xmltest (doc) VALUES ('<test>ok</test>'); -- check if libxml2 enabled
+  '';
+  make-postgresql-test = postgresql-name: postgresql-package: backup-all: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ zagy ];
+    };
+
+    nodes.machine = {...}:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+        };
+
+        services.postgresqlBackup = {
+          enable = true;
+          databases = optional (!backup-all) "postgres";
+        };
+      };
+
+    testScript = let
+      backupName = if backup-all then "all" else "postgres";
+      backupService = if backup-all then "postgresqlBackup" else "postgresqlBackup-postgres";
+      backupFileBase = "/var/backup/postgresql/${backupName}";
+    in ''
+      def check_count(statement, lines):
+          return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
+              statement, lines
+          )
+
+
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("Postgresql is available just after unit start"):
+          machine.succeed(
+              "cat ${test-sql} | sudo -u postgres psql"
+          )
+
+      with subtest("Postgresql survives restart (bug #1735)"):
+          machine.shutdown()
+          import time
+          time.sleep(2)
+          machine.start()
+          machine.wait_for_unit("postgresql")
+
+      machine.fail(check_count("SELECT * FROM sth;", 3))
+      machine.succeed(check_count("SELECT * FROM sth;", 5))
+      machine.fail(check_count("SELECT * FROM sth;", 4))
+      machine.succeed(check_count("SELECT xpath('/test/text()', doc) FROM xmltest;", 1))
+
+      with subtest("Backup service works"):
+          machine.succeed(
+              "systemctl start ${backupService}.service",
+              "zcat ${backupFileBase}.sql.gz | grep '<test>ok</test>'",
+              "ls -hal /var/backup/postgresql/ >/dev/console",
+              "stat -c '%a' ${backupFileBase}.sql.gz | grep 600",
+          )
+      with subtest("Backup service removes prev files"):
+          machine.succeed(
+              # Create dummy prev files.
+              "touch ${backupFileBase}.prev.sql{,.gz,.zstd}",
+              "chown postgres:postgres ${backupFileBase}.prev.sql{,.gz,.zstd}",
+
+              # Run backup.
+              "systemctl start ${backupService}.service",
+              "ls -hal /var/backup/postgresql/ >/dev/console",
+
+              # Since nothing has changed in the database, the cur and prev files
+              # should match.
+              "zcat ${backupFileBase}.sql.gz | grep '<test>ok</test>'",
+              "cmp ${backupFileBase}.sql.gz ${backupFileBase}.prev.sql.gz",
+
+              # The prev files with unused suffix should be removed.
+              "[ ! -f '${backupFileBase}.prev.sql' ]",
+              "[ ! -f '${backupFileBase}.prev.sql.zstd' ]",
+
+              # Both cur and prev file should only be accessible by the postgres user.
+              "stat -c '%a' ${backupFileBase}.sql.gz | grep 600",
+              "stat -c '%a' '${backupFileBase}.prev.sql.gz' | grep 600",
+          )
+      with subtest("Backup service fails gracefully"):
+          # Sabotage the backup process
+          machine.succeed("rm /run/postgresql/.s.PGSQL.5432")
+          machine.fail(
+              "systemctl start ${backupService}.service",
+          )
+          machine.succeed(
+              "ls -hal /var/backup/postgresql/ >/dev/console",
+              "zcat ${backupFileBase}.prev.sql.gz | grep '<test>ok</test>'",
+              "stat ${backupFileBase}.in-progress.sql.gz",
+          )
+          # In a previous version, the second run would overwrite prev.sql.gz,
+          # so we test a second run as well.
+          machine.fail(
+              "systemctl start ${backupService}.service",
+          )
+          machine.succeed(
+              "stat ${backupFileBase}.in-progress.sql.gz",
+              "zcat ${backupFileBase}.prev.sql.gz | grep '<test>ok</test>'",
+          )
+
+
+      with subtest("Initdb works"):
+          machine.succeed("sudo -u postgres initdb -D /tmp/testpostgres2")
+
+      machine.shutdown()
+    '';
+
+  };
+
+  mk-ensure-clauses-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ zagy ];
+    };
+
+    nodes.machine = {...}:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          ensureUsers = [
+            {
+              name = "all-clauses";
+              ensureClauses = {
+                superuser = true;
+                createdb = true;
+                createrole = true;
+                "inherit" = true;
+                login = true;
+                replication = true;
+                bypassrls = true;
+              };
+            }
+            {
+              name = "default-clauses";
+            }
+          ];
+        };
+      };
+
+    testScript = let
+      getClausesQuery = user: pkgs.lib.concatStringsSep " "
+        [
+          "SELECT row_to_json(row)"
+          "FROM ("
+          "SELECT"
+            "rolsuper,"
+            "rolinherit,"
+            "rolcreaterole,"
+            "rolcreatedb,"
+            "rolcanlogin,"
+            "rolreplication,"
+            "rolbypassrls"
+          "FROM pg_roles"
+          "WHERE rolname = '${user}'"
+          ") row;"
+        ];
+    in ''
+      import json
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("All user permissions are set according to the ensureClauses attr"):
+          clauses = json.loads(
+            machine.succeed(
+                "sudo -u postgres psql -tc \"${getClausesQuery "all-clauses"}\""
+            )
+          )
+          print(clauses)
+          assert clauses['rolsuper'], 'expected user with clauses to have superuser clause'
+          assert clauses['rolinherit'], 'expected user with clauses to have inherit clause'
+          assert clauses['rolcreaterole'], 'expected user with clauses to have create role clause'
+          assert clauses['rolcreatedb'], 'expected user with clauses to have create db clause'
+          assert clauses['rolcanlogin'], 'expected user with clauses to have login clause'
+          assert clauses['rolreplication'], 'expected user with clauses to have replication clause'
+          assert clauses['rolbypassrls'], 'expected user with clauses to have bypassrls clause'
+
+      with subtest("All user permissions default when ensureClauses is not provided"):
+          clauses = json.loads(
+            machine.succeed(
+                "sudo -u postgres psql -tc \"${getClausesQuery "default-clauses"}\""
+            )
+          )
+          assert not clauses['rolsuper'], 'expected user with no clauses set to have default superuser clause'
+          assert clauses['rolinherit'], 'expected user with no clauses set to have default inherit clause'
+          assert not clauses['rolcreaterole'], 'expected user with no clauses set to have default create role clause'
+          assert not clauses['rolcreatedb'], 'expected user with no clauses set to have default create db clause'
+          assert clauses['rolcanlogin'], 'expected user with no clauses set to have default login clause'
+          assert not clauses['rolreplication'], 'expected user with no clauses set to have default replication clause'
+          assert not clauses['rolbypassrls'], 'expected user with no clauses set to have default bypassrls clause'
+
+      machine.shutdown()
+    '';
+  };
+in
+  concatMapAttrs (name: package: {
+    ${name} = make-postgresql-test name package false;
+    ${name + "-backup-all"} = make-postgresql-test "${name + "-backup-all"}" package true;
+    ${name + "-clauses"} = mk-ensure-clauses-test name package;
+  }) postgresql-versions
diff --git a/nixpkgs/nixos/tests/power-profiles-daemon.nix b/nixpkgs/nixos/tests/power-profiles-daemon.nix
new file mode 100644
index 000000000000..c887cde4b829
--- /dev/null
+++ b/nixpkgs/nixos/tests/power-profiles-daemon.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "power-profiles-daemon";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mvnetbiz ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    security.polkit.enable = true;
+    services.power-profiles-daemon.enable = true;
+    environment.systemPackages = [ pkgs.glib ];
+  };
+
+  testScript = ''
+    def get_profile():
+        return machine.succeed(
+            """gdbus call --system --dest net.hadess.PowerProfiles --object-path /net/hadess/PowerProfiles \
+    --method org.freedesktop.DBus.Properties.Get 'net.hadess.PowerProfiles' 'ActiveProfile'
+    """
+        )
+
+
+    def set_profile(profile):
+        return machine.succeed(
+            """gdbus call --system --dest net.hadess.PowerProfiles --object-path /net/hadess/PowerProfiles \
+    --method org.freedesktop.DBus.Properties.Set 'net.hadess.PowerProfiles' 'ActiveProfile' "<'{profile}'>"
+    """.format(
+                profile=profile
+            )
+        )
+
+
+    machine.wait_for_unit("multi-user.target")
+
+    set_profile("power-saver")
+    profile = get_profile()
+    if not "power-saver" in profile:
+        raise Exception("Unable to set power-saver profile")
+
+
+    set_profile("balanced")
+    profile = get_profile()
+    if not "balanced" in profile:
+        raise Exception("Unable to set balanced profile")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/powerdns-admin.nix b/nixpkgs/nixos/tests/powerdns-admin.nix
new file mode 100644
index 000000000000..d326d74a9826
--- /dev/null
+++ b/nixpkgs/nixos/tests/powerdns-admin.nix
@@ -0,0 +1,139 @@
+# Test powerdns-admin
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+let
+  defaultConfig = ''
+    BIND_ADDRESS = '127.0.0.1'
+    PORT = 8000
+    CAPTCHA_ENABLE = False
+  '';
+
+  makeAppTest = name: configs: makeTest {
+    name = "powerdns-admin-${name}";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ Flakebi zhaofengli ];
+    };
+
+    nodes.server = { pkgs, config, ... }: mkMerge ([
+      {
+        services.powerdns-admin = {
+          enable = true;
+          secretKeyFile = "/etc/powerdns-admin/secret";
+          saltFile = "/etc/powerdns-admin/salt";
+        };
+        # It's insecure to have secrets in the world-readable nix store, but this is just a test
+        environment.etc."powerdns-admin/secret".text = "secret key";
+        environment.etc."powerdns-admin/salt".text = "salt";
+        environment.systemPackages = [
+          (pkgs.writeShellScriptBin "run-test" config.system.build.testScript)
+        ];
+      }
+    ] ++ configs);
+
+    testScript = ''
+      server.wait_for_unit("powerdns-admin.service")
+      server.wait_until_succeeds("run-test", timeout=10)
+    '';
+  };
+
+  matrix = {
+    backend = {
+      mysql = {
+        services.powerdns-admin = {
+          config = ''
+            ${defaultConfig}
+            SQLALCHEMY_DATABASE_URI = 'mysql://powerdnsadmin@/powerdnsadmin?unix_socket=/run/mysqld/mysqld.sock'
+          '';
+        };
+        systemd.services.powerdns-admin = {
+          after = [ "mysql.service" ];
+          serviceConfig.BindPaths = "/run/mysqld";
+        };
+
+        services.mysql = {
+          enable = true;
+          package = pkgs.mariadb;
+          ensureDatabases = [ "powerdnsadmin" ];
+          ensureUsers = [
+            {
+              name = "powerdnsadmin";
+              ensurePermissions = {
+                "powerdnsadmin.*" = "ALL PRIVILEGES";
+              };
+            }
+          ];
+        };
+      };
+      postgresql = {
+        services.powerdns-admin = {
+          config = ''
+            ${defaultConfig}
+            SQLALCHEMY_DATABASE_URI = 'postgresql://powerdnsadmin@/powerdnsadmin?host=/run/postgresql'
+          '';
+        };
+        systemd.services.powerdns-admin = {
+          after = [ "postgresql.service" ];
+          serviceConfig.BindPaths = "/run/postgresql";
+        };
+
+        services.postgresql = {
+          enable = true;
+          ensureDatabases = [ "powerdnsadmin" ];
+          ensureUsers = [
+            {
+              name = "powerdnsadmin";
+              ensureDBOwnership = true;
+            }
+          ];
+        };
+      };
+    };
+    listen = {
+      tcp = {
+        services.powerdns-admin.extraArgs = [ "-b" "127.0.0.1:8000" ];
+        system.build.testScript = ''
+          set -euxo pipefail
+          curl -sSf http://127.0.0.1:8000/
+
+          # Create account to check that the database migrations ran
+          csrf_token="$(curl -sSfc session http://127.0.0.1:8000/register | grep _csrf_token | cut -d\" -f6)"
+          # Outputs 'Redirecting' if successful
+          curl -sSfb session http://127.0.0.1:8000/register \
+            -F "_csrf_token=$csrf_token" \
+            -F "firstname=first" \
+            -F "lastname=last" \
+            -F "email=a@example.com" \
+            -F "username=user" \
+            -F "password=password" \
+            -F "rpassword=password" | grep Redirecting
+
+          # Login
+          # Outputs 'Redirecting' if successful
+          curl -sSfb session http://127.0.0.1:8000/login \
+            -F "_csrf_token=$csrf_token" \
+            -F "username=user" \
+            -F "password=password" | grep Redirecting
+
+          # Check that we are logged in, this redirects to /admin/setting/pdns if we are
+          curl -sSfb session http://127.0.0.1:8000/dashboard/ | grep /admin/setting
+        '';
+      };
+      unix = {
+        services.powerdns-admin.extraArgs = [ "-b" "unix:/run/powerdns-admin/http.sock" ];
+        system.build.testScript = ''
+          curl -sSf --unix-socket /run/powerdns-admin/http.sock http://somehost/
+        '';
+      };
+    };
+  };
+in
+with matrix; {
+  postgresql = makeAppTest "postgresql" [ backend.postgresql listen.tcp ];
+  mysql = makeAppTest "mysql" [ backend.mysql listen.tcp ];
+  unix-listener = makeAppTest "unix-listener" [ backend.postgresql listen.unix ];
+}
diff --git a/nixpkgs/nixos/tests/powerdns.nix b/nixpkgs/nixos/tests/powerdns.nix
new file mode 100644
index 000000000000..599d5ea67efe
--- /dev/null
+++ b/nixpkgs/nixos/tests/powerdns.nix
@@ -0,0 +1,62 @@
+# This test runs PowerDNS authoritative server with the
+# generic MySQL backend (gmysql) to connect to a
+# MariaDB server using UNIX sockets authentication.
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "powerdns";
+
+  nodes.server = { ... }: {
+    services.powerdns.enable = true;
+    services.powerdns.extraConfig = ''
+      launch=gmysql
+      gmysql-user=pdns
+      zone-cache-refresh-interval=0
+    '';
+
+    services.mysql = {
+      enable = true;
+      package = pkgs.mariadb;
+      ensureDatabases = [ "powerdns" ];
+      ensureUsers = lib.singleton
+        { name = "pdns";
+          ensurePermissions = { "powerdns.*" = "ALL PRIVILEGES"; };
+        };
+    };
+
+    environment.systemPackages = with pkgs;
+      [ dnsutils powerdns mariadb ];
+  };
+
+  testScript = ''
+    with subtest("PowerDNS database exists"):
+        server.wait_for_unit("mysql")
+        server.succeed("echo 'SHOW DATABASES;' | sudo -u pdns mysql -u pdns >&2")
+
+    with subtest("Loading the MySQL schema works"):
+        server.succeed(
+            "sudo -u pdns mysql -u pdns -D powerdns <"
+            "${pkgs.powerdns}/share/doc/pdns/schema.mysql.sql"
+        )
+
+    with subtest("PowerDNS server starts"):
+        server.wait_for_unit("pdns")
+        server.succeed("dig version.bind txt chaos @127.0.0.1 >&2")
+
+    with subtest("Adding an example zone works"):
+        # Extract configuration file needed by pdnsutil
+        pdnsutil = "sudo -u pdns pdnsutil "
+        server.succeed(f"{pdnsutil} create-zone example.com ns1.example.com")
+        server.succeed(f"{pdnsutil} add-record  example.com ns1 A 192.168.1.2")
+
+    with subtest("Querying the example zone works"):
+        reply = server.succeed("dig +noall +answer ns1.example.com @127.0.0.1")
+        assert (
+            "192.168.1.2" in reply
+        ), f""""
+        The reply does not contain the expected IP address:
+          Expected:
+            ns1.example.com.        3600    IN      A       192.168.1.2
+          Reply:
+            {reply}"""
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pppd.nix b/nixpkgs/nixos/tests/pppd.nix
new file mode 100644
index 000000000000..d599f918036f
--- /dev/null
+++ b/nixpkgs/nixos/tests/pppd.nix
@@ -0,0 +1,64 @@
+import ./make-test-python.nix (
+  let
+    chap-secrets = {
+      text = ''"flynn" * "reindeerflotilla" *'';
+      mode = "0640";
+    };
+  in {
+    name = "pppd";
+
+    nodes = {
+      server = {config, pkgs, ...}: {
+        config = {
+          # Run a PPPoE access concentrator server. It will spawn an
+          # appropriate PPP server process when a PPPoE client sets up a
+          # PPPoE session.
+          systemd.services.pppoe-server = {
+            restartTriggers = [
+              config.environment.etc."ppp/pppoe-server-options".source
+              config.environment.etc."ppp/chap-secrets".source
+            ];
+            after = ["network.target"];
+            serviceConfig = {
+              ExecStart = "${pkgs.rpPPPoE}/sbin/pppoe-server -F -O /etc/ppp/pppoe-server-options -q ${pkgs.ppp}/sbin/pppd -I eth1 -L 192.0.2.1 -R 192.0.2.2";
+            };
+            wantedBy = ["multi-user.target"];
+          };
+          environment.etc = {
+            "ppp/pppoe-server-options".text = ''
+              lcp-echo-interval 10
+              lcp-echo-failure 2
+              plugin pppoe.so
+              require-chap
+              nobsdcomp
+              noccp
+              novj
+            '';
+            "ppp/chap-secrets" = chap-secrets;
+          };
+        };
+      };
+      client = {config, pkgs, ...}: {
+        services.pppd = {
+          enable = true;
+          peers.test = {
+            config = ''
+              plugin pppoe.so eth1
+              name "flynn"
+              noipdefault
+              persist
+              noauth
+              debug
+            '';
+          };
+        };
+        environment.etc."ppp/chap-secrets" = chap-secrets;
+      };
+    };
+
+    testScript = ''
+      start_all()
+      client.wait_until_succeeds("ping -c1 -W1 192.0.2.1")
+      server.wait_until_succeeds("ping -c1 -W1 192.0.2.2")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/predictable-interface-names.nix b/nixpkgs/nixos/tests/predictable-interface-names.nix
new file mode 100644
index 000000000000..51d5e8ae59b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/predictable-interface-names.nix
@@ -0,0 +1,60 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  testCombinations = pkgs.lib.cartesianProductOfSets {
+    predictable = [true false];
+    withNetworkd = [true false];
+    systemdStage1 = [true false];
+  };
+in pkgs.lib.listToAttrs (builtins.map ({ predictable, withNetworkd, systemdStage1 }: {
+  name = pkgs.lib.optionalString (!predictable) "un" + "predictable"
+       + pkgs.lib.optionalString withNetworkd "Networkd"
+       + pkgs.lib.optionalString systemdStage1 "SystemdStage1";
+  value = makeTest {
+    name = pkgs.lib.optionalString (!predictable) "un" + "predictableInterfaceNames"
+         + pkgs.lib.optionalString withNetworkd "-with-networkd"
+         + pkgs.lib.optionalString systemdStage1 "-systemd-stage-1";
+    meta = {};
+
+    nodes.machine = { lib, ... }: let
+      script = ''
+        ip link
+        if ${lib.optionalString predictable "!"} ip link show eth0; then
+          echo Success
+        else
+          exit 1
+        fi
+      '';
+    in {
+      networking.usePredictableInterfaceNames = lib.mkForce predictable;
+      networking.useNetworkd = withNetworkd;
+      networking.dhcpcd.enable = !withNetworkd;
+      networking.useDHCP = !withNetworkd;
+
+      # Check if predictable interface names are working in stage-1
+      boot.initrd.postDeviceCommands = lib.mkIf (!systemdStage1) script;
+
+      boot.initrd.systemd = lib.mkIf systemdStage1 {
+        enable = true;
+        initrdBin = [ pkgs.iproute2 ];
+        services.systemd-udev-settle.wantedBy = ["initrd.target"];
+        services.check-interfaces = {
+          requiredBy = ["initrd.target"];
+          after = ["systemd-udev-settle.service"];
+          serviceConfig.Type = "oneshot";
+          path = [ pkgs.iproute2 ];
+          inherit script;
+        };
+      };
+    };
+
+    testScript = ''
+      print(machine.succeed("ip link"))
+      machine.${if predictable then "fail" else "succeed"}("ip link show eth0")
+    '';
+  };
+}) testCombinations)
diff --git a/nixpkgs/nixos/tests/printing.nix b/nixpkgs/nixos/tests/printing.nix
new file mode 100644
index 000000000000..29c5d810f215
--- /dev/null
+++ b/nixpkgs/nixos/tests/printing.nix
@@ -0,0 +1,122 @@
+# Test printing via CUPS.
+
+import ./make-test-python.nix (
+{ pkgs
+, socket ? true # whether to use socket activation
+, ...
+}:
+
+{
+  name = "printing";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ domenkozar eelco matthewbauer ];
+  };
+
+  nodes.server = { ... }: {
+    services.printing = {
+      enable = true;
+      stateless = true;
+      startWhenNeeded = socket;
+      listenAddresses = [ "*:631" ];
+      defaultShared = true;
+      openFirewall = true;
+      extraConf = ''
+        <Location />
+          Order allow,deny
+          Allow from all
+        </Location>
+      '';
+    };
+    # Add a HP Deskjet printer connected via USB to the server.
+    hardware.printers.ensurePrinters = [{
+      name = "DeskjetLocal";
+      deviceUri = "usb://foobar/printers/foobar";
+      model = "drv:///sample.drv/deskjet.ppd";
+    }];
+  };
+
+  nodes.client = { ... }: {
+    services.printing.enable = true;
+    services.printing.startWhenNeeded = socket;
+    # Add printer to the client as well, via IPP.
+    hardware.printers.ensurePrinters = [{
+      name = "DeskjetRemote";
+      deviceUri = "ipp://server/printers/DeskjetLocal";
+      model = "drv:///sample.drv/deskjet.ppd";
+    }];
+    hardware.printers.ensureDefaultPrinter = "DeskjetRemote";
+  };
+
+  testScript = ''
+    import os
+    import re
+
+    start_all()
+
+    with subtest("Make sure that cups is up on both sides and printers are set up"):
+        server.wait_for_unit("cups.${if socket then "socket" else "service"}")
+        client.wait_for_unit("cups.${if socket then "socket" else "service"}")
+
+    assert "scheduler is running" in client.succeed("lpstat -r")
+
+    with subtest("UNIX socket is used for connections"):
+        assert "/var/run/cups/cups.sock" in client.succeed("lpstat -H")
+
+    with subtest("HTTP server is available too"):
+        client.succeed("curl --fail http://localhost:631/")
+        client.succeed(f"curl --fail http://{server.name}:631/")
+        server.fail(f"curl --fail --connect-timeout 2 http://{client.name}:631/")
+
+    with subtest("LP status checks"):
+        assert "DeskjetRemote accepting requests" in client.succeed("lpstat -a")
+        assert "DeskjetLocal accepting requests" in client.succeed(
+            f"lpstat -h {server.name}:631 -a"
+        )
+        client.succeed("cupsdisable DeskjetRemote")
+        out = client.succeed("lpq")
+        print(out)
+        assert re.search(
+            "DeskjetRemote is not ready.*no entries",
+            client.succeed("lpq"),
+            flags=re.DOTALL,
+        )
+        client.succeed("cupsenable DeskjetRemote")
+        assert re.match(
+            "DeskjetRemote is ready.*no entries", client.succeed("lpq"), flags=re.DOTALL
+        )
+
+    # Test printing various file types.
+    for file in [
+        "${pkgs.groff.doc}/share/doc/*/examples/mom/penguin.pdf",
+        "${pkgs.groff.doc}/share/doc/*/meref.ps",
+        "${pkgs.cups.out}/share/doc/cups/images/cups.png",
+        "${pkgs.pcre.doc}/share/doc/pcre/pcre.txt",
+    ]:
+        file_name = os.path.basename(file)
+        with subtest(f"print {file_name}"):
+            # Print the file on the client.
+            print(client.succeed("lpq"))
+            client.succeed(f"lp {file}")
+            client.wait_until_succeeds(
+                f"lpq; lpq | grep -q -E 'active.*root.*{file_name}'"
+            )
+
+            # Ensure that a raw PCL file appeared in the server's queue
+            # (showing that the right filters have been applied).  Of
+            # course, since there is no actual USB printer attached, the
+            # file will stay in the queue forever.
+            server.wait_for_file("/var/spool/cups/d*-001")
+            server.wait_until_succeeds(f"lpq -a | grep -q -E '{file_name}'")
+
+            # Delete the job on the client.  It should disappear on the
+            # server as well.
+            client.succeed("lprm")
+            client.wait_until_succeeds("lpq -a | grep -q -E 'no entries'")
+
+            retry(lambda _: "no entries" in server.succeed("lpq -a"))
+
+            # The queue is empty already, so this should be safe.
+            # Otherwise, pairs of "c*"-"d*-001" files might persist.
+            server.execute("rm /var/spool/cups/*")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/privoxy.nix b/nixpkgs/nixos/tests/privoxy.nix
new file mode 100644
index 000000000000..2d95c4522a01
--- /dev/null
+++ b/nixpkgs/nixos/tests/privoxy.nix
@@ -0,0 +1,113 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  # Note: For some reason Privoxy can't issue valid
+  # certificates if the CA is generated using gnutls :(
+  certs = pkgs.runCommand "example-certs"
+    { buildInputs = [ pkgs.openssl ]; }
+    ''
+      mkdir $out
+
+      # generate CA keypair
+      openssl req -new -nodes -x509 \
+        -extensions v3_ca -keyout $out/ca.key \
+        -out $out/ca.crt -days 365 \
+        -subj "/O=Privoxy CA/CN=Privoxy CA"
+
+      # generate server key/signing request
+      openssl genrsa -out $out/server.key 3072
+      openssl req -new -key $out/server.key \
+        -out server.csr -sha256 \
+        -subj "/O=An unhappy server./CN=example.com"
+
+      # sign the request/generate the certificate
+      openssl x509 -req -in server.csr -CA $out/ca.crt \
+      -CAkey $out/ca.key -CAcreateserial -out $out/server.crt \
+      -days 500 -sha256
+    '';
+in
+
+{
+  name = "privoxy";
+  meta = with lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  nodes.machine = { ... }: {
+    services.nginx.enable = true;
+    services.nginx.virtualHosts."example.com" = {
+      addSSL = true;
+      sslCertificate = "${certs}/server.crt";
+      sslCertificateKey = "${certs}/server.key";
+      locations."/".root = pkgs.writeTextFile
+        { name = "bad-day";
+          destination = "/how-are-you/index.html";
+          text = "I've had a bad day!\n";
+        };
+      locations."/ads".extraConfig = ''
+        return 200 "Hot Nixpkgs PRs in your area. Click here!\n";
+      '';
+    };
+
+    services.privoxy = {
+      enable = true;
+      inspectHttps = true;
+      settings = {
+        ca-cert-file = "${certs}/ca.crt";
+        ca-key-file  = "${certs}/ca.key";
+        debug = 65536;
+      };
+      userActions = ''
+        {+filter{positive}}
+        example.com
+
+        {+block{Fake ads}}
+        example.com/ads
+      '';
+      userFilters = ''
+        FILTER: positive This is a filter example.
+        s/bad/great/ig
+      '';
+    };
+
+    security.pki.certificateFiles = [ "${certs}/ca.crt" ];
+
+    networking.hosts."::1" = [ "example.com" ];
+    networking.proxy.httpProxy = "http://localhost:8118";
+    networking.proxy.httpsProxy = "http://localhost:8118";
+  };
+
+  testScript =
+    ''
+      with subtest("Privoxy is running"):
+          machine.wait_for_unit("privoxy")
+          machine.wait_for_open_port(8118)
+          machine.succeed("curl -f http://config.privoxy.org")
+
+      with subtest("Privoxy can filter http requests"):
+          machine.wait_for_open_port(80)
+          assert "great day" in machine.succeed(
+              "curl -sfL http://example.com/how-are-you? | tee /dev/stderr"
+          )
+
+      with subtest("Privoxy can filter https requests"):
+          machine.wait_for_open_port(443)
+          assert "great day" in machine.succeed(
+              "curl -sfL https://example.com/how-are-you? | tee /dev/stderr"
+          )
+
+      with subtest("Blocks are working"):
+          machine.wait_for_open_port(443)
+          machine.fail("curl -f https://example.com/ads 1>&2")
+          machine.succeed("curl -f https://example.com/PRIVOXY-FORCE/ads 1>&2")
+
+      with subtest("Temporary certificates are cleaned"):
+          # Count current certificates
+          machine.succeed("test $(ls /run/privoxy/certs | wc -l) -gt 0")
+          # Forward in time 12 days, trigger the timer..
+          machine.succeed("date -s \"$(date --date '12 days')\"")
+          machine.systemctl("start systemd-tmpfiles-clean")
+          # ...and count again
+          machine.succeed("test $(ls /run/privoxy/certs | wc -l) -eq 0")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/prometheus-exporters.nix b/nixpkgs/nixos/tests/prometheus-exporters.nix
new file mode 100644
index 000000000000..7840130d4a36
--- /dev/null
+++ b/nixpkgs/nixos/tests/prometheus-exporters.nix
@@ -0,0 +1,1692 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+let
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+  inherit (pkgs.lib) concatStringsSep maintainers mapAttrs mkMerge
+    removeSuffix replaceStrings singleton splitString makeBinPath;
+
+  /*
+    * The attrset `exporterTests` contains one attribute
+    * for each exporter test. Each of these attributes
+    * is expected to be an attrset containing:
+    *
+    *  `exporterConfig`:
+    *    this attribute set contains config for the exporter itself
+    *
+    *  `exporterTest`
+    *    this attribute set contains test instructions
+    *
+    *  `metricProvider` (optional)
+    *    this attribute contains additional machine config
+    *
+    *  `nodeName` (optional)
+    *    override an incompatible testnode name
+    *
+    *  Example:
+    *    exporterTests.<exporterName> = {
+    *      exporterConfig = {
+    *        enable = true;
+    *      };
+    *      metricProvider = {
+    *        services.<metricProvider>.enable = true;
+    *      };
+    *      exporterTest = ''
+    *        wait_for_unit("prometheus-<exporterName>-exporter.service")
+    *        wait_for_open_port(1234)
+    *        succeed("curl -sSf 'localhost:1234/metrics'")
+    *      '';
+    *    };
+    *
+    *  # this would generate the following test config:
+    *
+    *    nodes.<exporterName> = {
+    *      services.prometheus.<exporterName> = {
+    *        enable = true;
+    *      };
+    *      services.<metricProvider>.enable = true;
+    *    };
+    *
+    *    testScript = ''
+    *      <exporterName>.start()
+    *      <exporterName>.wait_for_unit("prometheus-<exporterName>-exporter.service")
+    *      <exporterName>.wait_for_open_port(1234)
+    *      <exporterName>.succeed("curl -sSf 'localhost:1234/metrics'")
+    *      <exporterName>.shutdown()
+    *    '';
+  */
+
+  exporterTests = {
+    apcupsd = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.apcupsd.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("apcupsd.service")
+        wait_for_open_port(3551)
+        wait_for_unit("prometheus-apcupsd-exporter.service")
+        wait_for_open_port(9162)
+        succeed("curl -sSf http://localhost:9162/metrics | grep 'apcupsd_info'")
+      '';
+    };
+
+    artifactory = {
+      exporterConfig = {
+        enable = true;
+        artiUsername = "artifactory-username";
+        artiPassword = "artifactory-password";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-artifactory-exporter.service")
+        wait_for_open_port(9531)
+        succeed(
+            "curl -sSf http://localhost:9531/metrics | grep 'artifactory_up'"
+        )
+      '';
+    };
+
+    bind = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.bind.enable = true;
+        services.bind.extraConfig = ''
+          statistics-channels {
+            inet 127.0.0.1 port 8053 allow { localhost; };
+          };
+        '';
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-bind-exporter.service")
+        wait_for_open_port(9119)
+        succeed(
+            "curl -sSf http://localhost:9119/metrics | grep 'bind_query_recursions_total 0'"
+        )
+      '';
+    };
+
+    bird = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.bird2.enable = true;
+        services.bird2.config = ''
+          router id 127.0.0.1;
+
+          protocol kernel MyObviousTestString {
+            ipv4 {
+              import all;
+              export none;
+            };
+          }
+
+          protocol device {
+          }
+        '';
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-bird-exporter.service")
+        wait_for_open_port(9324)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9324/metrics | grep 'MyObviousTestString'"
+        )
+      '';
+    };
+
+    bitcoin = {
+      exporterConfig = {
+        enable = true;
+        rpcUser = "bitcoinrpc";
+        rpcPasswordFile = pkgs.writeText "password" "hunter2";
+      };
+      metricProvider = {
+        services.bitcoind.default.enable = true;
+        services.bitcoind.default.rpc.users.bitcoinrpc.passwordHMAC = "e8fe33f797e698ac258c16c8d7aadfbe$872bdb8f4d787367c26bcfd75e6c23c4f19d44a69f5d1ad329e5adf3f82710f7";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-bitcoin-exporter.service")
+        wait_for_unit("bitcoind-default.service")
+        wait_for_open_port(9332)
+        succeed("curl -sSf http://localhost:9332/metrics | grep '^bitcoin_blocks '")
+      '';
+    };
+
+    blackbox = {
+      exporterConfig = {
+        enable = true;
+        configFile = pkgs.writeText "config.yml" (builtins.toJSON {
+          modules.icmp_v6 = {
+            prober = "icmp";
+            icmp.preferred_ip_protocol = "ip6";
+          };
+        });
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-blackbox-exporter.service")
+        wait_for_open_port(9115)
+        succeed(
+            "curl -sSf 'http://localhost:9115/probe?target=localhost&module=icmp_v6' | grep 'probe_success 1'"
+        )
+      '';
+    };
+
+    collectd = {
+      exporterConfig = {
+        enable = true;
+        extraFlags = [ "--web.collectd-push-path /collectd" ];
+      };
+      exporterTest = let postData = replaceStrings [ "\n" ] [ "" ] ''
+        [{
+          "values":[23],
+          "dstypes":["gauge"],
+          "type":"gauge",
+          "interval":1000,
+          "host":"testhost",
+          "plugin":"testplugin",
+          "time":DATE
+        }]
+      ''; in
+        ''
+          wait_for_unit("prometheus-collectd-exporter.service")
+          wait_for_open_port(9103)
+          succeed(
+              'echo \'${postData}\'> /tmp/data.json'
+          )
+          succeed('sed -ie "s DATE $(date +%s) " /tmp/data.json')
+          succeed(
+              "curl -sSfH 'Content-Type: application/json' -X POST --data @/tmp/data.json localhost:9103/collectd"
+          )
+          succeed(
+              "curl -sSf localhost:9103/metrics | grep 'collectd_testplugin_gauge{instance=\"testhost\"} 23'"
+          )
+        '';
+    };
+
+    dnsmasq = {
+      exporterConfig = {
+        enable = true;
+        leasesPath = "/var/lib/dnsmasq/dnsmasq.leases";
+      };
+      metricProvider = {
+        services.dnsmasq.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-dnsmasq-exporter.service")
+        wait_for_open_port(9153)
+        succeed("curl -sSf http://localhost:9153/metrics | grep 'dnsmasq_leases 0'")
+      '';
+    };
+
+    # Access to WHOIS server is required to properly test this exporter, so
+    # just perform basic sanity check that the exporter is running and returns
+    # a failure.
+    domain = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-domain-exporter.service")
+        wait_for_open_port(9222)
+        succeed("curl -sSf 'http://localhost:9222/probe?target=nixos.org'")
+      '';
+    };
+
+    dovecot = {
+      exporterConfig = {
+        enable = true;
+        scopes = [ "global" ];
+        socketPath = "/var/run/dovecot2/old-stats";
+        user = "root"; # <- don't use user root in production
+      };
+      metricProvider = {
+        services.dovecot2.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-dovecot-exporter.service")
+        wait_for_open_port(9166)
+        succeed(
+            "curl -sSf http://localhost:9166/metrics | grep 'dovecot_up{scope=\"global\"} 1'"
+        )
+      '';
+    };
+
+    exportarr-sonarr = {
+      nodeName = "exportarr_sonarr";
+      exporterConfig = {
+        enable = true;
+        url = "http://127.0.0.1:8989";
+        # testing for real data is tricky, because the api key can not be preconfigured
+        apiKeyFile = pkgs.writeText "dummy-api-key" "eccff6a992bc2e4b88e46d064b26bb4e";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-exportarr-sonarr-exporter.service")
+        wait_for_open_port(9707)
+        succeed("curl -sSf 'http://localhost:9707/metrics")
+      '';
+    };
+
+    fastly = {
+      exporterConfig = {
+        enable = true;
+        tokenPath = pkgs.writeText "token" "abc123";
+      };
+
+      # noop: fastly's exporter can't start without first talking to fastly
+      # see: https://github.com/peterbourgon/fastly-exporter/issues/87
+      exporterTest = ''
+        succeed("true");
+      '';
+    };
+
+    fritzbox = {
+      # TODO add proper test case
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-fritzbox-exporter.service")
+        wait_for_open_port(9133)
+        succeed(
+            "curl -sSf http://localhost:9133/metrics | grep 'fritzbox_exporter_collect_errors 0'"
+        )
+      '';
+    };
+
+    graphite = {
+      exporterConfig = {
+        enable = true;
+        port = 9108;
+        graphitePort = 9109;
+        mappingSettings.mappings = [{
+          match = "test.*.*";
+          name = "testing";
+          labels = {
+            protocol = "$1";
+            author = "$2";
+          };
+        }];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-graphite-exporter.service")
+        wait_for_open_port(9108)
+        wait_for_open_port(9109)
+        succeed("echo test.tcp.foo-bar 1234 $(date +%s) | nc -w1 localhost 9109")
+        succeed("curl -sSf http://localhost:9108/metrics | grep 'testing{author=\"foo-bar\",protocol=\"tcp\"} 1234'")
+      '';
+    };
+
+    idrac = {
+      exporterConfig = {
+        enable = true;
+        port = 9348;
+        configuration = {
+          hosts = {
+            default = { username = "username"; password = "password"; };
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-idrac-exporter.service")
+        wait_for_open_port(9348)
+        wait_until_succeeds("curl localhost:9348")
+      '';
+    };
+
+    influxdb = {
+      exporterConfig = {
+        enable = true;
+        sampleExpiry = "3s";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-influxdb-exporter.service")
+        wait_for_open_port(9122)
+        succeed(
+          "curl -XPOST http://localhost:9122/write --data-binary 'influxdb_exporter,distro=nixos,added_in=21.09 value=1'"
+        )
+        succeed(
+          "curl -sSf http://localhost:9122/metrics | grep 'nixos'"
+        )
+        execute("sleep 5")
+        fail(
+          "curl -sSf http://localhost:9122/metrics | grep 'nixos'"
+        )
+      '';
+    };
+
+    ipmi = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-ipmi-exporter.service")
+        wait_for_open_port(9290)
+        succeed(
+          "curl -sSf http://localhost:9290/metrics | grep 'ipmi_scrape_duration_seconds'"
+        )
+      '';
+    };
+
+    jitsi = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        systemd.services.prometheus-jitsi-exporter.after = [ "jitsi-videobridge2.service" ];
+        services.jitsi-videobridge = {
+          enable = true;
+          colibriRestApi = true;
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("jitsi-videobridge2.service")
+        wait_for_open_port(8080)
+        wait_for_unit("prometheus-jitsi-exporter.service")
+        wait_for_open_port(9700)
+        wait_until_succeeds(
+            'journalctl -eu prometheus-jitsi-exporter.service -o cat | grep "key=participants"'
+        )
+        succeed("curl -sSf 'localhost:9700/metrics' | grep 'jitsi_participants 0'")
+      '';
+    };
+
+    json = {
+      exporterConfig = {
+        enable = true;
+        url = "http://localhost";
+        configFile = pkgs.writeText "json-exporter-conf.json" (builtins.toJSON {
+          modules = {
+            default = {
+              metrics = [
+                { name = "json_test_metric"; path = "{ .test }"; }
+              ];
+            };
+          };
+        });
+      };
+      metricProvider = {
+        systemd.services.prometheus-json-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/".extraConfig = ''
+            return 200 "{\"test\":1}";
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_open_port(80)
+        wait_for_unit("prometheus-json-exporter.service")
+        wait_for_open_port(7979)
+        succeed(
+            "curl -sSf 'localhost:7979/probe?target=http://localhost' | grep 'json_test_metric 1'"
+        )
+      '';
+    };
+
+    kea = let
+      controlSocketPathV4 = "/run/kea-dhcp4/dhcp4.sock";
+      controlSocketPathV6 = "/run/kea-dhcp6/dhcp6.sock";
+    in
+    {
+      exporterConfig = {
+        enable = true;
+        controlSocketPaths = [
+          controlSocketPathV4
+          controlSocketPathV6
+        ];
+      };
+      metricProvider = {
+        services.kea = {
+          dhcp4 = {
+            enable = true;
+            settings = {
+              control-socket = {
+                socket-type = "unix";
+                socket-name = controlSocketPathV4;
+              };
+            };
+          };
+          dhcp6 = {
+            enable = true;
+            settings = {
+              control-socket = {
+                socket-type = "unix";
+                socket-name = controlSocketPathV6;
+              };
+            };
+          };
+        };
+      };
+
+      exporterTest = ''
+        wait_for_unit("kea-dhcp4-server.service")
+        wait_for_unit("kea-dhcp6-server.service")
+        wait_for_file("${controlSocketPathV4}")
+        wait_for_file("${controlSocketPathV6}")
+        wait_for_unit("prometheus-kea-exporter.service")
+        wait_for_open_port(9547)
+        succeed(
+            "curl --fail localhost:9547/metrics | grep 'packets_received_total'"
+        )
+      '';
+    };
+
+    knot = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.knot = {
+          enable = true;
+          extraArgs = [ "-v" ];
+          settingsFile = pkgs.writeText "knot.conf" ''
+            server:
+              listen: 127.0.0.1@53
+
+            template:
+              - id: default
+                global-module: mod-stats
+                dnssec-signing: off
+                zonefile-sync: -1
+                journal-db: /var/lib/knot/journal
+                kasp-db: /var/lib/knot/kasp
+                timer-db: /var/lib/knot/timer
+                zonefile-load: difference
+                storage: ${pkgs.buildEnv {
+                  name = "foo";
+                  paths = [
+                    (pkgs.writeTextDir "test.zone" ''
+                      @ SOA ns.example.com. noc.example.com. 2019031301 86400 7200 3600000 172800
+                      @       NS      ns1
+                      @       NS      ns2
+                      ns1     A       192.168.0.1
+                    '')
+                  ];
+                }}
+
+            mod-stats:
+              - id: custom
+                edns-presence: on
+                query-type: on
+
+            zone:
+              - domain: test
+                file: test.zone
+                module: mod-stats/custom
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("knot.service")
+        wait_for_unit("prometheus-knot-exporter.service")
+        wait_for_open_port(9433)
+        succeed("curl -sSf 'localhost:9433' | grep '2\.019031301'")
+      '';
+    };
+
+    keylight = {
+      # A hardware device is required to properly test this exporter, so just
+      # perform a couple of basic sanity checks that the exporter is running
+      # and requires a target, but cannot reach a specified target.
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-keylight-exporter.service")
+        wait_for_open_port(9288)
+        succeed(
+            "curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics | grep '400'"
+        )
+        succeed(
+            "curl -sS --write-out '%{http_code}' -o /dev/null http://localhost:9288/metrics?target=nosuchdevice | grep '500'"
+        )
+      '';
+    };
+
+    lnd = {
+      exporterConfig = {
+        enable = true;
+        lndTlsPath = "/var/lib/lnd/tls.cert";
+        lndMacaroonDir = "/var/lib/lnd";
+        extraFlags = [ "--lnd.network=regtest" ];
+      };
+      metricProvider = {
+        systemd.services.prometheus-lnd-exporter.serviceConfig.RestartSec = 15;
+        systemd.services.prometheus-lnd-exporter.after = [ "lnd.service" ];
+        services.bitcoind.regtest = {
+          enable = true;
+          extraConfig = ''
+            rpcauth=bitcoinrpc:e8fe33f797e698ac258c16c8d7aadfbe$872bdb8f4d787367c26bcfd75e6c23c4f19d44a69f5d1ad329e5adf3f82710f7
+            zmqpubrawblock=tcp://127.0.0.1:28332
+            zmqpubrawtx=tcp://127.0.0.1:28333
+          '';
+          extraCmdlineOptions = [ "-regtest" ];
+        };
+        systemd.services.lnd = {
+          serviceConfig.ExecStart = ''
+            ${pkgs.lnd}/bin/lnd \
+              --datadir=/var/lib/lnd \
+              --tlscertpath=/var/lib/lnd/tls.cert \
+              --tlskeypath=/var/lib/lnd/tls.key \
+              --logdir=/var/log/lnd \
+              --bitcoin.active \
+              --bitcoin.regtest \
+              --bitcoin.node=bitcoind \
+              --bitcoind.rpcuser=bitcoinrpc \
+              --bitcoind.rpcpass=hunter2 \
+              --bitcoind.zmqpubrawblock=tcp://127.0.0.1:28332 \
+              --bitcoind.zmqpubrawtx=tcp://127.0.0.1:28333 \
+              --readonlymacaroonpath=/var/lib/lnd/readonly.macaroon
+          '';
+          serviceConfig.StateDirectory = "lnd";
+          wantedBy = [ "multi-user.target" ];
+          after = [ "network.target" ];
+        };
+        # initialize wallet, creates macaroon needed by exporter
+        systemd.services.lnd.postStart = ''
+          ${pkgs.curl}/bin/curl \
+            --retry 20 \
+            --retry-delay 1 \
+            --retry-connrefused \
+            --cacert /var/lib/lnd/tls.cert \
+            -X GET \
+            https://localhost:8080/v1/genseed | ${pkgs.jq}/bin/jq -c '.cipher_seed_mnemonic' > /tmp/seed
+          ${pkgs.curl}/bin/curl \
+            --retry 20 \
+            --retry-delay 1 \
+            --retry-connrefused \
+            --cacert /var/lib/lnd/tls.cert \
+            -X POST \
+            -d "{\"wallet_password\": \"asdfasdfasdf\", \"cipher_seed_mnemonic\": $(cat /tmp/seed | tr -d '\n')}" \
+            https://localhost:8080/v1/initwallet
+        '';
+      };
+      exporterTest = ''
+        wait_for_unit("lnd.service")
+        wait_for_open_port(10009)
+        wait_for_unit("prometheus-lnd-exporter.service")
+        wait_for_open_port(9092)
+        succeed("curl -sSf localhost:9092/metrics | grep '^lnd_peer_count'")
+      '';
+    };
+
+    mail = {
+      exporterConfig = {
+        enable = true;
+        configuration = {
+          monitoringInterval = "2s";
+          mailCheckTimeout = "10s";
+          servers = [{
+            name = "testserver";
+            server = "localhost";
+            port = 25;
+            from = "mail-exporter@localhost";
+            to = "mail-exporter@localhost";
+            detectionDir = "/var/spool/mail/mail-exporter/new";
+          }];
+        };
+      };
+      metricProvider = {
+        services.postfix.enable = true;
+        systemd.services.prometheus-mail-exporter = {
+          after = [ "postfix.service" ];
+          requires = [ "postfix.service" ];
+          serviceConfig = {
+            ExecStartPre = [
+              "${pkgs.writeShellScript "create-maildir" ''
+                mkdir -p -m 0700 mail-exporter/new
+              ''}"
+            ];
+            ProtectHome = true;
+            ReadOnlyPaths = "/";
+            ReadWritePaths = "/var/spool/mail";
+            WorkingDirectory = "/var/spool/mail";
+          };
+        };
+        users.users.mailexporter = {
+          isSystemUser = true;
+          group = "mailexporter";
+        };
+        users.groups.mailexporter = {};
+      };
+      exporterTest = ''
+        wait_for_unit("postfix.service")
+        wait_for_unit("prometheus-mail-exporter.service")
+        wait_for_open_port(9225)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9225/metrics | grep 'mail_deliver_success{configname=\"testserver\"} 1'"
+        )
+      '';
+    };
+
+    mikrotik = {
+      exporterConfig = {
+        enable = true;
+        extraFlags = [ "-timeout=1s" ];
+        configuration = {
+          devices = [
+            {
+              name = "router";
+              address = "192.168.42.48";
+              user = "prometheus";
+              password = "shh";
+            }
+          ];
+          features = {
+            bgp = true;
+            dhcp = true;
+            dhcpl = true;
+            dhcpv6 = true;
+            health = true;
+            routes = true;
+            poe = true;
+            pools = true;
+            optics = true;
+            w60g = true;
+            wlansta = true;
+            wlanif = true;
+            monitor = true;
+            ipsec = true;
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-mikrotik-exporter.service")
+        wait_for_open_port(9436)
+        succeed(
+            "curl -sSf http://localhost:9436/metrics | grep 'mikrotik_scrape_collector_success{device=\"router\"} 0'"
+        )
+      '';
+    };
+
+    modemmanager = {
+      exporterConfig = {
+        enable = true;
+        refreshRate = "10s";
+      };
+      metricProvider = {
+        # ModemManager is installed when NetworkManager is enabled. Ensure it is
+        # started and is wanted by NM and the exporter to start everything up
+        # in the right order.
+        networking.networkmanager.enable = true;
+        systemd.services.ModemManager = {
+          enable = true;
+          wantedBy = [ "NetworkManager.service" "prometheus-modemmanager-exporter.service" ];
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("ModemManager.service")
+        wait_for_unit("prometheus-modemmanager-exporter.service")
+        wait_for_open_port(9539)
+        succeed(
+            "curl -sSf http://localhost:9539/metrics | grep 'modemmanager_info'"
+        )
+      '';
+    };
+
+    mysqld = {
+      exporterConfig = {
+        enable = true;
+        runAsLocalSuperUser = true;
+        configFile = pkgs.writeText "test-prometheus-exporter-mysqld-config.my-cnf" ''
+          [client]
+          user = exporter
+          password = snakeoilpassword
+        '';
+      };
+      metricProvider = {
+        services.mysql = {
+          enable = true;
+          package = pkgs.mariadb;
+          initialScript = pkgs.writeText "mysql-init-script.sql" ''
+            CREATE USER 'exporter'@'localhost'
+            IDENTIFIED BY 'snakeoilpassword'
+            WITH MAX_USER_CONNECTIONS 3;
+            GRANT PROCESS, REPLICATION CLIENT, SLAVE MONITOR, SELECT ON *.* TO 'exporter'@'localhost';
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-mysqld-exporter.service")
+        wait_for_open_port(9104)
+        wait_for_unit("mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 1'")
+        systemctl("stop mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 0'")
+        systemctl("start mysql.service")
+        wait_for_unit("mysql.service")
+        succeed("curl -sSf http://localhost:9104/metrics | grep 'mysql_up 1'")
+      '';
+    };
+
+    nextcloud = {
+      exporterConfig = {
+        enable = true;
+        passwordFile = "/var/nextcloud-pwfile";
+        url = "http://localhost";
+      };
+      metricProvider = {
+        systemd.services.nc-pwfile =
+          let
+            passfile = (pkgs.writeText "pwfile" "snakeoilpw");
+          in
+          {
+            requiredBy = [ "prometheus-nextcloud-exporter.service" ];
+            before = [ "prometheus-nextcloud-exporter.service" ];
+            serviceConfig.ExecStart = ''
+              ${pkgs.coreutils}/bin/install -o nextcloud-exporter -m 0400 ${passfile} /var/nextcloud-pwfile
+            '';
+          };
+        services.nginx = {
+          enable = true;
+          virtualHosts."localhost" = {
+            basicAuth.nextcloud-exporter = "snakeoilpw";
+            locations."/" = {
+              root = "${pkgs.prometheus-nextcloud-exporter.src}/serverinfo/testdata";
+              tryFiles = "/negative-space.json =404";
+            };
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_unit("prometheus-nextcloud-exporter.service")
+        wait_for_open_port(9205)
+        succeed("curl -sSf http://localhost:9205/metrics | grep 'nextcloud_up 1'")
+      '';
+    };
+
+    nginx = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.nginx = {
+          enable = true;
+          statusPage = true;
+          virtualHosts."test".extraConfig = "return 204;";
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_unit("prometheus-nginx-exporter.service")
+        wait_for_open_port(9113)
+        succeed("curl -sSf http://localhost:9113/metrics | grep 'nginx_up 1'")
+      '';
+    };
+
+    nginxlog = {
+      exporterConfig = {
+        enable = true;
+        group = "nginx";
+        settings = {
+          namespaces = [
+            {
+              name = "filelogger";
+              source = {
+                files = [ "/var/log/nginx/filelogger.access.log" ];
+              };
+            }
+            {
+              name = "syslogger";
+              source = {
+                syslog = {
+                  listen_address = "udp://127.0.0.1:10000";
+                  format = "rfc3164";
+                  tags = [ "nginx" ];
+                };
+              };
+            }
+          ];
+        };
+      };
+      metricProvider = {
+        services.nginx = {
+          enable = true;
+          httpConfig = ''
+            server {
+              listen 80;
+              server_name filelogger.local;
+              access_log /var/log/nginx/filelogger.access.log;
+            }
+            server {
+              listen 81;
+              server_name syslogger.local;
+              access_log syslog:server=127.0.0.1:10000,tag=nginx,severity=info;
+            }
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_unit("prometheus-nginxlog-exporter.service")
+        wait_for_open_port(9117)
+        wait_for_open_port(80)
+        wait_for_open_port(81)
+        succeed("curl http://localhost")
+        execute("sleep 1")
+        succeed(
+            "curl -sSf http://localhost:9117/metrics | grep 'filelogger_http_response_count_total' | grep 1"
+        )
+        succeed("curl http://localhost:81")
+        execute("sleep 1")
+        succeed(
+            "curl -sSf http://localhost:9117/metrics | grep 'syslogger_http_response_count_total' | grep 1"
+        )
+      '';
+    };
+
+    node = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-node-exporter.service")
+        wait_for_open_port(9100)
+        succeed(
+            "curl -sSf http://localhost:9100/metrics | grep 'node_exporter_build_info{.\\+} 1'"
+        )
+      '';
+    };
+
+    openldap = {
+      exporterConfig = {
+        enable = true;
+        ldapCredentialFile = "${pkgs.writeText "exporter.yml" ''
+          ldapUser: "cn=root,dc=example"
+          ldapPass: "notapassword"
+        ''}";
+      };
+      metricProvider = {
+        services.openldap = {
+          enable = true;
+          settings.children = {
+            "cn=schema".includes = [
+              "${pkgs.openldap}/etc/schema/core.ldif"
+              "${pkgs.openldap}/etc/schema/cosine.ldif"
+              "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+              "${pkgs.openldap}/etc/schema/nis.ldif"
+            ];
+            "olcDatabase={1}mdb" = {
+              attrs = {
+                objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+                olcDatabase = "{1}mdb";
+                olcDbDirectory = "/var/db/openldap";
+                olcSuffix = "dc=example";
+                olcRootDN = {
+                  # cn=root,dc=example
+                  base64 = "Y249cm9vdCxkYz1leGFtcGxl";
+                };
+                olcRootPW = {
+                  path = "${pkgs.writeText "rootpw" "notapassword"}";
+                };
+              };
+            };
+            "olcDatabase={2}monitor".attrs = {
+              objectClass = [ "olcDatabaseConfig" ];
+              olcDatabase = "{2}monitor";
+              olcAccess = [ "to dn.subtree=cn=monitor by users read" ];
+            };
+          };
+          declarativeContents."dc=example" = ''
+            dn: dc=example
+            objectClass: domain
+            dc: example
+
+            dn: ou=users,dc=example
+            objectClass: organizationalUnit
+            ou: users
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-openldap-exporter.service")
+        wait_for_open_port(389)
+        wait_for_open_port(9330)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9330/metrics | grep 'openldap_scrape{result=\"ok\"} 1'"
+        )
+      '';
+    };
+
+    openvpn = {
+      exporterConfig = {
+        enable = true;
+        group = "openvpn";
+        statusPaths = [ "/run/openvpn-test" ];
+      };
+      metricProvider = {
+        users.groups.openvpn = { };
+        services.openvpn.servers.test = {
+          config = ''
+            dev tun
+            status /run/openvpn-test
+            status-version 3
+          '';
+          up = "chmod g+r /run/openvpn-test";
+        };
+        systemd.services."openvpn-test".serviceConfig.Group = "openvpn";
+      };
+      exporterTest = ''
+        wait_for_unit("openvpn-test.service")
+        wait_for_unit("prometheus-openvpn-exporter.service")
+        succeed("curl -sSf http://localhost:9176/metrics | grep 'openvpn_up{.*} 1'")
+      '';
+    };
+
+    pgbouncer = {
+      exporterConfig = {
+        enable = true;
+        connectionStringFile = pkgs.writeText "connection.conf" "postgres://admin:@localhost:6432/pgbouncer?sslmode=disable";
+      };
+
+      metricProvider = {
+        services.postgresql.enable = true;
+        services.pgbouncer = {
+          # https://github.com/prometheus-community/pgbouncer_exporter#pgbouncer-configuration
+          ignoreStartupParameters = "extra_float_digits";
+          enable = true;
+          listenAddress = "*";
+          databases = { postgres = "host=/run/postgresql/ port=5432 auth_user=postgres dbname=postgres"; };
+          authType = "any";
+          maxClientConn = 99;
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("postgresql.service")
+        wait_for_unit("pgbouncer.service")
+        wait_for_unit("prometheus-pgbouncer-exporter.service")
+        wait_for_open_port(9127)
+        succeed("curl -sSf http://localhost:9127/metrics | grep 'pgbouncer_up 1'")
+        succeed(
+            "curl -sSf http://localhost:9127/metrics | grep 'pgbouncer_config_max_client_connections 99'"
+        )
+      '';
+    };
+
+    php-fpm = {
+      nodeName = "php_fpm";
+      exporterConfig = {
+        enable = true;
+        environmentFile = pkgs.writeTextFile {
+          name = "/tmp/prometheus-php-fpm-exporter.env";
+          text = ''
+            PHP_FPM_SCRAPE_URI="tcp://127.0.0.1:9000/status"
+          '';
+        };
+      };
+      metricProvider = {
+        users.users."php-fpm-exporter" = {
+          isSystemUser = true;
+          group  = "php-fpm-exporter";
+        };
+        users.groups."php-fpm-exporter" = {};
+        services.phpfpm.pools."php-fpm-exporter" = {
+          user = "php-fpm-exporter";
+          group = "php-fpm-exporter";
+          settings = {
+            "pm" = "dynamic";
+            "pm.max_children" = 32;
+            "pm.max_requests" = 500;
+            "pm.start_servers" = 2;
+            "pm.min_spare_servers" = 2;
+            "pm.max_spare_servers" = 5;
+            "pm.status_path" = "/status";
+            "listen" = "127.0.0.1:9000";
+            "listen.allowed_clients" = "127.0.0.1";
+          };
+          phpEnv."PATH" = makeBinPath [ pkgs.php ];
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("phpfpm-php-fpm-exporter.service")
+        wait_for_unit("prometheus-php-fpm-exporter.service")
+        succeed("curl -sSf http://localhost:9253/metrics | grep 'phpfpm_up{.*} 1'")
+      '';
+    };
+
+    postfix = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.postfix.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-postfix-exporter.service")
+        wait_for_file("/var/lib/postfix/queue/public/showq")
+        wait_for_open_port(9154)
+        wait_until_succeeds(
+            "curl -sSf http://localhost:9154/metrics | grep 'postfix_up{path=\"/var/lib/postfix/queue/public/showq\"} 1'"
+        )
+        succeed(
+            "curl -sSf http://localhost:9154/metrics | grep 'postfix_smtpd_connects_total 0'"
+        )
+        succeed("curl -sSf http://localhost:9154/metrics | grep 'postfix_up{.*} 1'")
+      '';
+    };
+
+    postgres = {
+      exporterConfig = {
+        enable = true;
+        runAsLocalSuperUser = true;
+      };
+      metricProvider = {
+        services.postgresql.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-postgres-exporter.service")
+        wait_for_open_port(9187)
+        wait_for_unit("postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 1'")
+        systemctl("stop postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep -v 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 0'")
+        systemctl("start postgresql.service")
+        wait_for_unit("postgresql.service")
+        succeed(
+            "curl -sSf http://localhost:9187/metrics | grep 'pg_exporter_last_scrape_error 0'"
+        )
+        succeed("curl -sSf http://localhost:9187/metrics | grep 'pg_up 1'")
+      '';
+    };
+
+    process = {
+      exporterConfig = {
+        enable = true;
+        settings.process_names = [
+          # Remove nix store path from process name
+          { name = "{{.Matches.Wrapped}} {{ .Matches.Args }}"; cmdline = [ "^/nix/store[^ ]*/(?P<Wrapped>[^ /]*) (?P<Args>.*)" ]; }
+        ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-process-exporter.service")
+        wait_for_open_port(9256)
+        wait_until_succeeds(
+            "curl -sSf localhost:9256/metrics | grep -q '{}'".format(
+                'namedprocess_namegroup_cpu_seconds_total{groupname="process-exporter '
+            )
+        )
+      '';
+    };
+
+    pve = let
+      pveExporterEnvFile = pkgs.writeTextFile {
+        name = "pve.env";
+        text = ''
+          PVE_USER="test_user@pam"
+          PVE_PASSWORD="hunter3"
+          PVE_VERIFY_SSL="false"
+        '';
+      };
+    in {
+      exporterConfig = {
+        enable = true;
+        environmentFile = pveExporterEnvFile;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-pve-exporter.service")
+        wait_for_open_port(9221)
+        wait_until_succeeds("curl localhost:9221")
+      '';
+    };
+
+    py-air-control = {
+      nodeName = "py_air_control";
+      exporterConfig = {
+        enable = true;
+        deviceHostname = "127.0.0.1";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-py-air-control-exporter.service")
+        wait_for_open_port(9896)
+        succeed(
+            "curl -sSf http://localhost:9896/metrics | grep 'py_air_control_sampling_error_total'"
+        )
+      '';
+    };
+
+    redis = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider.services.redis.servers."".enable = true;
+      exporterTest = ''
+        wait_for_unit("redis.service")
+        wait_for_unit("prometheus-redis-exporter.service")
+        wait_for_open_port(6379)
+        wait_for_open_port(9121)
+        wait_until_succeeds("curl -sSf localhost:9121/metrics | grep 'redis_up 1'")
+      '';
+    };
+
+    rspamd = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        services.rspamd.enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("rspamd.service")
+        wait_for_unit("prometheus-rspamd-exporter.service")
+        wait_for_open_port(11334)
+        wait_for_open_port(7980)
+        wait_until_succeeds(
+            "curl -sSf 'localhost:7980/probe?target=http://localhost:11334/stat' | grep 'rspamd_scanned{host=\"rspamd\"} 0'"
+        )
+      '';
+    };
+
+    rtl_433 = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        # Mock rtl_433 binary to return a dummy metric stream.
+        nixpkgs.overlays = [
+          (self: super: {
+            rtl_433 = self.runCommand "rtl_433" { } ''
+              mkdir -p "$out/bin"
+              cat <<EOF > "$out/bin/rtl_433"
+              #!/bin/sh
+              while true; do
+                printf '{"time" : "2020-04-26 13:37:42", "model" : "zopieux", "id" : 55, "channel" : 3, "temperature_C" : 18.000}\n'
+                sleep 4
+              done
+              EOF
+              chmod +x "$out/bin/rtl_433"
+            '';
+          })
+        ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-rtl_433-exporter.service")
+        wait_for_open_port(9550)
+        wait_until_succeeds(
+            "curl -sSf localhost:9550/metrics | grep '{}'".format(
+                'rtl_433_temperature_celsius{channel="3",id="55",location="",model="zopieux"} 18'
+            )
+        )
+      '';
+    };
+
+    sabnzbd = {
+      exporterConfig = {
+        enable = true;
+        servers = [{
+          baseUrl = "http://localhost:8080";
+          apiKeyFile = "/var/sabnzbd-apikey";
+        }];
+      };
+
+      metricProvider = {
+        services.sabnzbd.enable = true;
+
+        # unrar is required for sabnzbd
+        nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (pkgs.lib.getName pkg) [ "unrar" ];
+
+        # extract the generated api key before starting
+        systemd.services.sabnzbd-apikey = {
+          requires = [ "sabnzbd.service" ];
+          after = [ "sabnzbd.service" ];
+          requiredBy = [ "prometheus-sabnzbd-exporter.service" ];
+          before = [ "prometheus-sabnzbd-exporter.service" ];
+          script = ''
+            grep -Po '^api_key = \K.+' /var/lib/sabnzbd/sabnzbd.ini > /var/sabnzbd-apikey
+          '';
+        };
+      };
+
+      exporterTest = ''
+        wait_for_unit("sabnzbd.service")
+        wait_for_unit("prometheus-sabnzbd-exporter.service")
+        wait_for_open_port(8080)
+        wait_for_open_port(9387)
+        wait_until_succeeds(
+            "curl -sSf 'localhost:9387/metrics' | grep 'sabnzbd_queue_size{sabnzbd_instance=\"http://localhost:8080\"} 0.0'"
+        )
+      '';
+    };
+
+    scaphandre = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        boot.kernelModules = [ "intel_rapl_common" ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-scaphandre-exporter.service")
+        wait_for_open_port(8080)
+        wait_until_succeeds(
+            "curl -sSf 'localhost:8080/metrics'"
+        )
+      '';
+    };
+
+    shelly = {
+      exporterConfig = {
+        enable = true;
+        metrics-file = "${pkgs.writeText "test.json" ''{}''}";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-shelly-exporter.service")
+        wait_for_open_port(9784)
+        wait_until_succeeds(
+            "curl -sSf 'localhost:9784/metrics'"
+        )
+      '';
+    };
+
+    script = {
+      exporterConfig = {
+        enable = true;
+        settings.scripts = [
+          { name = "success"; script = "sleep 1"; }
+        ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-script-exporter.service")
+        wait_for_open_port(9172)
+        wait_until_succeeds(
+            "curl -sSf 'localhost:9172/probe?name=success' | grep -q '{}'".format(
+                'script_success{script="success"} 1'
+            )
+        )
+      '';
+    };
+
+    smartctl = {
+      exporterConfig = {
+        enable = true;
+        devices = [
+          "/dev/vda"
+        ];
+      };
+      exporterTest = ''
+        wait_until_succeeds(
+            'journalctl -eu prometheus-smartctl-exporter.service -o cat | grep "Unable to detect device type"'
+        )
+      '';
+    };
+
+    smokeping = {
+      exporterConfig = {
+        enable = true;
+        hosts = [ "127.0.0.1" ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-smokeping-exporter.service")
+        wait_for_open_port(9374)
+        wait_until_succeeds(
+            "curl -sSf localhost:9374/metrics | grep '{}' | grep -v ' 0$'".format(
+                'smokeping_requests_total{host="127.0.0.1",ip="127.0.0.1",source=""} '
+            )
+        )
+        wait_until_succeeds(
+            "curl -sSf localhost:9374/metrics | grep '{}'".format(
+                'smokeping_response_ttl{host="127.0.0.1",ip="127.0.0.1",source=""}'
+            )
+        )
+      '';
+    };
+
+    snmp = {
+      exporterConfig = {
+        enable = true;
+        configuration.default = {
+          version = 2;
+          auth.community = "public";
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-snmp-exporter.service")
+        wait_for_open_port(9116)
+        succeed("curl -sSf localhost:9116/metrics | grep 'snmp_request_errors_total 0'")
+      '';
+    };
+
+    sql = {
+      exporterConfig = {
+        configuration.jobs.points = {
+          interval = "1m";
+          connections = [
+            "postgres://prometheus-sql-exporter@/data?host=/run/postgresql&sslmode=disable"
+          ];
+          queries = {
+            points = {
+              labels = [ "name" ];
+              help = "Amount of points accumulated per person";
+              values = [ "amount" ];
+              query = "SELECT SUM(amount) as amount, name FROM points GROUP BY name";
+            };
+          };
+        };
+        enable = true;
+        user = "prometheus-sql-exporter";
+      };
+      metricProvider = {
+        services.postgresql = {
+          enable = true;
+          initialScript = builtins.toFile "init.sql" ''
+            CREATE DATABASE data;
+            \c data;
+            CREATE TABLE points (amount INT, name TEXT);
+            INSERT INTO points(amount, name) VALUES (1, 'jack');
+            INSERT INTO points(amount, name) VALUES (2, 'jill');
+            INSERT INTO points(amount, name) VALUES (3, 'jack');
+
+            CREATE USER "prometheus-sql-exporter";
+            GRANT ALL PRIVILEGES ON DATABASE data TO "prometheus-sql-exporter";
+            GRANT SELECT ON points TO "prometheus-sql-exporter";
+          '';
+        };
+        systemd.services.prometheus-sql-exporter.after = [ "postgresql.service" ];
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-sql-exporter.service")
+        wait_for_open_port(9237)
+        succeed("curl http://localhost:9237/metrics | grep -c 'sql_points{' | grep 2")
+      '';
+    };
+
+    statsd = {
+      exporterConfig = {
+        enable = true;
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-statsd-exporter.service")
+        wait_for_open_port(9102)
+        succeed("curl http://localhost:9102/metrics | grep 'statsd_exporter_build_info{'")
+        wait_until_succeeds(
+          "echo 'test.udp:1|c' > /dev/udp/localhost/9125 && \
+          curl http://localhost:9102/metrics | grep 'test_udp 1'",
+          timeout=10
+        )
+        wait_until_succeeds(
+          "echo 'test.tcp:1|c' > /dev/tcp/localhost/9125 && \
+          curl http://localhost:9102/metrics | grep 'test_tcp 1'",
+          timeout=10
+        )
+      '';
+    };
+
+    surfboard = {
+      exporterConfig = {
+        enable = true;
+        modemAddress = "localhost";
+      };
+      metricProvider = {
+        systemd.services.prometheus-surfboard-exporter.after = [ "nginx.service" ];
+        services.nginx = {
+          enable = true;
+          virtualHosts.localhost.locations."/cgi-bin/status".extraConfig = ''
+            return 204;
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("nginx.service")
+        wait_for_open_port(80)
+        wait_for_unit("prometheus-surfboard-exporter.service")
+        wait_for_open_port(9239)
+        succeed("curl -sSf localhost:9239/metrics | grep 'surfboard_up 1'")
+      '';
+    };
+
+    systemd = {
+      exporterConfig = {
+        enable = true;
+
+        extraFlags = [
+          "--systemd.collector.enable-restart-count"
+        ];
+      };
+      metricProvider = { };
+      exporterTest = ''
+        wait_for_unit("prometheus-systemd-exporter.service")
+        wait_for_open_port(9558)
+        wait_until_succeeds(
+            "curl -sSf localhost:9558/metrics | grep '{}'".format(
+                'systemd_unit_state{name="basic.target",state="active",type="target"} 1'
+            )
+        )
+        succeed(
+            "curl -sSf localhost:9558/metrics | grep '{}'".format(
+                'systemd_service_restart_total{name="prometheus-systemd-exporter.service"} 0'
+            )
+        )
+      '';
+    };
+
+    tor = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        # Note: this does not connect the test environment to the Tor network.
+        # Client, relay, bridge or exit connectivity are disabled by default.
+        services.tor.enable = true;
+        services.tor.settings.ControlPort = 9051;
+      };
+      exporterTest = ''
+        wait_for_unit("tor.service")
+        wait_for_open_port(9051)
+        wait_for_unit("prometheus-tor-exporter.service")
+        wait_for_open_port(9130)
+        succeed("curl -sSf localhost:9130/metrics | grep 'tor_version{.\\+} 1'")
+      '';
+    };
+
+    unpoller = {
+      nodeName = "unpoller";
+      exporterConfig.enable = true;
+      exporterConfig.controllers = [{ }];
+      exporterTest = ''
+        wait_until_succeeds(
+            'journalctl -eu prometheus-unpoller-exporter.service -o cat | grep "Connection Error"'
+        )
+      '';
+    };
+
+    unbound = {
+      exporterConfig = {
+        enable = true;
+        unbound.host = "unix:///run/unbound/unbound.ctl";
+      };
+      metricProvider = {
+        services.unbound = {
+          enable = true;
+          localControlSocketPath = "/run/unbound/unbound.ctl";
+        };
+        systemd.services.prometheus-unbound-exporter.serviceConfig = {
+          SupplementaryGroups = [ "unbound" ];
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("unbound.service")
+        wait_for_unit("prometheus-unbound-exporter.service")
+        wait_for_open_port(9167)
+        wait_until_succeeds("curl -sSf localhost:9167/metrics | grep 'unbound_up 1'")
+      '';
+    };
+
+    v2ray = {
+      exporterConfig = {
+        enable = true;
+      };
+
+      metricProvider = {
+        systemd.services.prometheus-nginx-exporter.after = [ "v2ray.service" ];
+        services.v2ray = {
+          enable = true;
+          config = {
+            stats = {};
+            api = {
+              tag = "api";
+              services = [ "StatsService" ];
+            };
+            inbounds = [
+              {
+                port = 1080;
+                listen = "127.0.0.1";
+                protocol = "http";
+              }
+              {
+                listen = "127.0.0.1";
+                port = 54321;
+                protocol = "dokodemo-door";
+                settings = { address = "127.0.0.1"; };
+                tag = "api";
+              }
+            ];
+            outbounds = [
+              {
+                protocol = "freedom";
+              }
+              {
+                protocol = "freedom";
+                settings = {};
+                tag = "api";
+              }
+            ];
+            routing = {
+              strategy = "rules";
+              settings = {
+                rules = [
+                  {
+                    inboundTag = [ "api" ];
+                    outboundTag = "api";
+                    type = "field";
+                  }
+                ];
+              };
+            };
+          };
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-v2ray-exporter.service")
+        wait_for_open_port(9299)
+        succeed("curl -sSf localhost:9299/scrape | grep 'v2ray_up 1'")
+      '';
+    };
+
+    varnish = {
+      exporterConfig = {
+        enable = true;
+        instance = "/var/spool/varnish/varnish";
+        group = "varnish";
+      };
+      metricProvider = {
+        systemd.services.prometheus-varnish-exporter.after = [
+          "varnish.service"
+        ];
+        services.varnish = {
+          enable = true;
+          config = ''
+            vcl 4.0;
+            backend default {
+              .host = "127.0.0.1";
+              .port = "80";
+            }
+          '';
+        };
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-varnish-exporter.service")
+        wait_for_open_port(6081)
+        wait_for_open_port(9131)
+        succeed("curl -sSf http://localhost:9131/metrics | grep 'varnish_up 1'")
+      '';
+    };
+
+    wireguard = let
+      snakeoil = import ./wireguard/snakeoil-keys.nix;
+      publicKeyWithoutNewlines = replaceStrings [ "\n" ] [ "" ] snakeoil.peer1.publicKey;
+    in
+      {
+        exporterConfig.enable = true;
+        metricProvider = {
+          networking.wireguard.interfaces.wg0 = {
+            ips = [ "10.23.42.1/32" "fc00::1/128" ];
+            listenPort = 23542;
+
+            inherit (snakeoil.peer0) privateKey;
+
+            peers = singleton {
+              allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+              inherit (snakeoil.peer1) publicKey;
+            };
+          };
+          systemd.services.prometheus-wireguard-exporter.after = [ "wireguard-wg0.service" ];
+        };
+        exporterTest = ''
+          wait_for_unit("prometheus-wireguard-exporter.service")
+          wait_for_open_port(9586)
+          wait_until_succeeds(
+              "curl -sSf http://localhost:9586/metrics | grep '${publicKeyWithoutNewlines}'"
+          )
+        '';
+      };
+
+    zfs = {
+      exporterConfig = {
+        enable = true;
+      };
+      metricProvider = {
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "7327ded7";
+      };
+      exporterTest = ''
+        wait_for_unit("prometheus-zfs-exporter.service")
+        wait_for_unit("zfs.target")
+        wait_for_open_port(9134)
+        wait_until_succeeds("curl -f localhost:9134/metrics | grep 'zfs_scrape_collector_success{.*} 1'")
+      '';
+    };
+  };
+in
+mapAttrs
+  (exporter: testConfig: (makeTest (
+    let
+      nodeName = testConfig.nodeName or exporter;
+
+    in
+    {
+      name = "prometheus-${exporter}-exporter";
+
+      nodes.${nodeName} = mkMerge [{
+        services.prometheus.exporters.${exporter} = testConfig.exporterConfig;
+      } testConfig.metricProvider or { }];
+
+      testScript = ''
+        ${nodeName}.start()
+        ${concatStringsSep "\n" (map (line:
+          if (builtins.substring 0 1 line == " " || builtins.substring 0 1 line == ")")
+          then line
+          else "${nodeName}.${line}"
+        ) (splitString "\n" (removeSuffix "\n" testConfig.exporterTest)))}
+        ${nodeName}.shutdown()
+      '';
+
+      meta = with maintainers; {
+        maintainers = [ willibutz ];
+      };
+    }
+  )))
+  exporterTests
diff --git a/nixpkgs/nixos/tests/prometheus.nix b/nixpkgs/nixos/tests/prometheus.nix
new file mode 100644
index 000000000000..011127389377
--- /dev/null
+++ b/nixpkgs/nixos/tests/prometheus.nix
@@ -0,0 +1,349 @@
+let
+  grpcPort   = 19090;
+  queryPort  =  9090;
+  minioPort  =  9000;
+  pushgwPort =  9091;
+  frontPort  =  9092;
+
+  s3 = {
+    accessKey = "BKIKJAA5BMMU2RHO6IBB";
+    secretKey = "V7f1CwQqAcwo80UEIJEjc5gVQUSSx5ohQ9GSrr12";
+  };
+
+  objstore.config = {
+    type = "S3";
+    config = {
+      bucket = "thanos-bucket";
+      endpoint = "s3:${toString minioPort}";
+      region =  "us-east-1";
+      access_key = s3.accessKey;
+      secret_key = s3.secretKey;
+      insecure = true;
+      signature_version2 = false;
+      put_user_metadata = {};
+      http_config = {
+        idle_conn_timeout = "0s";
+        insecure_skip_verify = false;
+      };
+      trace = {
+        enable = false;
+      };
+    };
+  };
+
+in import ./make-test-python.nix {
+  name = "prometheus";
+
+  nodes = {
+    prometheus = { pkgs, ... }: {
+      virtualisation.diskSize = 2 * 1024;
+      virtualisation.memorySize = 2048;
+      environment.systemPackages = [ pkgs.jq ];
+      networking.firewall.allowedTCPPorts = [ grpcPort ];
+      services.prometheus = {
+        enable = true;
+        enableReload = true;
+        scrapeConfigs = [
+          {
+            job_name = "prometheus";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:${toString queryPort}" ];
+                labels = { instance = "localhost"; };
+              }
+            ];
+          }
+          {
+            job_name = "pushgateway";
+            scrape_interval = "1s";
+            static_configs = [
+              {
+                targets = [ "127.0.0.1:${toString pushgwPort}" ];
+              }
+            ];
+          }
+        ];
+        rules = [
+          ''
+            groups:
+              - name: test
+                rules:
+                  - record: testrule
+                    expr: count(up{job="prometheus"})
+          ''
+        ];
+        globalConfig = {
+          external_labels = {
+            some_label = "required by thanos";
+          };
+        };
+        extraFlags = [
+          # Required by thanos
+          "--storage.tsdb.min-block-duration=5s"
+          "--storage.tsdb.max-block-duration=5s"
+        ];
+      };
+      services.prometheus.pushgateway = {
+        enable = true;
+        web.listen-address = ":${toString pushgwPort}";
+        persistMetrics = true;
+        persistence.interval = "1s";
+        stateDir = "prometheus-pushgateway";
+      };
+      services.thanos = {
+        sidecar = {
+          enable = true;
+          grpc-address = "0.0.0.0:${toString grpcPort}";
+          inherit objstore;
+        };
+
+        # TODO: Add some tests for these services:
+        #rule = {
+        #  enable = true;
+        #  http-address = "0.0.0.0:19194";
+        #  grpc-address = "0.0.0.0:19193";
+        #  query.addresses = [
+        #    "localhost:19191"
+        #  ];
+        #  labels = {
+        #    just = "some";
+        #    nice = "labels";
+        #  };
+        #};
+        #
+        #receive = {
+        #  http-address = "0.0.0.0:19195";
+        #  enable = true;
+        #  labels = {
+        #    just = "some";
+        #    nice = "labels";
+        #  };
+        #};
+      };
+      # Adds a "specialisation" of the above config which allows us to
+      # "switch" to it and see if the services.prometheus.enableReload
+      # functionality actually reloads the prometheus service instead of
+      # restarting it.
+      specialisation = {
+        "prometheus-config-change" = {
+          configuration = {
+            environment.systemPackages = [ pkgs.yq ];
+
+            # This configuration just adds a new prometheus job
+            # to scrape the node_exporter metrics of the s3 machine.
+            services.prometheus = {
+              scrapeConfigs = [
+                {
+                  job_name = "s3-node_exporter";
+                  static_configs = [
+                    {
+                      targets = [ "s3:9100" ];
+                    }
+                  ];
+                }
+              ];
+            };
+          };
+        };
+      };
+    };
+
+    query = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      services.thanos.query = {
+        enable = true;
+        http-address = "0.0.0.0:${toString queryPort}";
+        endpoints = [
+          "prometheus:${toString grpcPort}"
+        ];
+      };
+      services.thanos.query-frontend = {
+        enable = true;
+        http-address = "0.0.0.0:${toString frontPort}";
+        query-frontend.downstream-url = "http://127.0.0.1:${toString queryPort}";
+      };
+    };
+
+    store = { pkgs, ... }: {
+      virtualisation.diskSize = 2 * 1024;
+      virtualisation.memorySize = 2048;
+      environment.systemPackages = with pkgs; [ jq thanos ];
+      services.thanos.store = {
+        enable = true;
+        http-address = "0.0.0.0:10902";
+        grpc-address = "0.0.0.0:${toString grpcPort}";
+        inherit objstore;
+        sync-block-duration = "1s";
+      };
+      services.thanos.compact = {
+        enable = true;
+        http-address = "0.0.0.0:10903";
+        inherit objstore;
+        consistency-delay = "5s";
+      };
+      services.thanos.query = {
+        enable = true;
+        http-address = "0.0.0.0:${toString queryPort}";
+        endpoints = [
+          "localhost:${toString grpcPort}"
+        ];
+      };
+    };
+
+    s3 = { pkgs, ... } : {
+      # Minio requires at least 1GiB of free disk space to run.
+      virtualisation = {
+        diskSize = 2 * 1024;
+      };
+      networking.firewall.allowedTCPPorts = [ minioPort ];
+
+      services.minio = {
+        enable = true;
+        inherit (s3) accessKey secretKey;
+      };
+
+      environment.systemPackages = [ pkgs.minio-client ];
+
+      services.prometheus.exporters.node = {
+        enable = true;
+        openFirewall = true;
+      };
+    };
+  };
+
+  testScript = { nodes, ... } : ''
+    import json
+
+    # Before starting the other machines we first make sure that our S3 service is online
+    # and has a bucket added for thanos:
+    s3.start()
+    s3.wait_for_unit("minio.service")
+    s3.wait_for_open_port(${toString minioPort})
+    s3.succeed(
+        "mc config host add minio "
+        + "http://localhost:${toString minioPort} "
+        + "${s3.accessKey} ${s3.secretKey} --api s3v4",
+        "mc mb minio/thanos-bucket",
+    )
+
+    # Now that s3 has started we can start the other machines:
+    for machine in prometheus, query, store:
+        machine.start()
+
+    # Check if prometheus responds to requests:
+    prometheus.wait_for_unit("prometheus.service")
+
+    prometheus.wait_for_open_port(${toString queryPort})
+    prometheus.succeed("curl -sf http://127.0.0.1:${toString queryPort}/metrics")
+
+    # Let's test if pushing a metric to the pushgateway succeeds:
+    prometheus.wait_for_unit("pushgateway.service")
+    prometheus.succeed(
+        "echo 'some_metric 3.14' | "
+        + "curl -f --data-binary \@- "
+        + "http://127.0.0.1:${toString pushgwPort}/metrics/job/some_job"
+    )
+
+    # Now check whether that metric gets ingested by prometheus.
+    # Since we'll check for the metric several times on different machines
+    # we abstract the test using the following function:
+
+    # Function to check if the metric "some_metric" has been received and returns the correct value.
+    def wait_for_metric(machine):
+        return machine.wait_until_succeeds(
+            "curl -sf 'http://127.0.0.1:${toString queryPort}/api/v1/query?query=some_metric' | "
+            + "jq '.data.result[0].value[1]' | grep '\"3.14\"'"
+        )
+
+
+    wait_for_metric(prometheus)
+
+    # Let's test if the pushgateway persists metrics to the configured location.
+    prometheus.wait_until_succeeds("test -e /var/lib/prometheus-pushgateway/metrics")
+
+    # Test thanos
+    prometheus.wait_for_unit("thanos-sidecar.service")
+
+    # Test if the Thanos query service can correctly retrieve the metric that was send above.
+    query.wait_for_unit("thanos-query.service")
+    wait_for_metric(query)
+
+    # Test Thanos query frontend service
+    query.wait_for_unit("thanos-query-frontend.service")
+    query.succeed("curl -sS http://localhost:${toString frontPort}/-/healthy")
+
+    # Test if the Thanos sidecar has correctly uploaded its TSDB to S3, if the
+    # Thanos storage service has correctly downloaded it from S3 and if the Thanos
+    # query service running on $store can correctly retrieve the metric:
+    store.wait_for_unit("thanos-store.service")
+    wait_for_metric(store)
+
+    store.wait_for_unit("thanos-compact.service")
+
+    # Test if the Thanos bucket command is able to retrieve blocks from the S3 bucket
+    # and check if the blocks have the correct labels:
+    store.succeed(
+        "thanos tools bucket ls "
+        + "--objstore.config-file=${nodes.store.config.services.thanos.store.objstore.config-file} "
+        + "--output=json | "
+        + "jq .thanos.labels.some_label | "
+        + "grep 'required by thanos'"
+    )
+
+    # Check if switching to a NixOS configuration that changes the prometheus
+    # configuration reloads (instead of restarts) prometheus before the switch
+    # finishes successfully:
+    with subtest("config change reloads prometheus"):
+        # We check if prometheus has finished reloading by looking for the message
+        # "Completed loading of configuration file" in the journal between the start
+        # and finish of switching to the new NixOS configuration.
+        #
+        # To mark the start we record the journal cursor before starting the switch:
+        cursor_before_switching = json.loads(
+            prometheus.succeed("journalctl -n1 -o json --output-fields=__CURSOR")
+        )["__CURSOR"]
+
+        # Now we switch:
+        prometheus_config_change = prometheus.succeed(
+            "readlink /run/current-system/specialisation/prometheus-config-change"
+        ).strip()
+        prometheus.succeed(prometheus_config_change + "/bin/switch-to-configuration test")
+
+        # Next we retrieve all logs since the start of switching:
+        logs_after_starting_switching = prometheus.succeed(
+            """
+              journalctl --after-cursor='{cursor_before_switching}' -o json --output-fields=MESSAGE
+            """.format(
+                cursor_before_switching=cursor_before_switching
+            )
+        )
+
+        # Finally we check if the message "Completed loading of configuration file"
+        # occurs before the "finished switching to system configuration" message:
+        finished_switching_msg = (
+            "finished switching to system configuration " + prometheus_config_change
+        )
+        reloaded_before_switching_finished = False
+        finished_switching = False
+        for log_line in logs_after_starting_switching.split("\n"):
+            msg = json.loads(log_line)["MESSAGE"]
+            if "Completed loading of configuration file" in msg:
+                reloaded_before_switching_finished = True
+            if msg == finished_switching_msg:
+                finished_switching = True
+                break
+
+        assert reloaded_before_switching_finished
+        assert finished_switching
+
+        # Check if the reloaded config includes the new s3-node_exporter job:
+        prometheus.succeed(
+          """
+            curl -sf http://127.0.0.1:${toString queryPort}/api/v1/status/config \
+              | jq -r .data.yaml \
+              | yq '.scrape_configs | any(.job_name == "s3-node_exporter")' \
+              | grep true
+          """
+        )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/promscale.nix b/nixpkgs/nixos/tests/promscale.nix
new file mode 100644
index 000000000000..d4825b6d7f55
--- /dev/null
+++ b/nixpkgs/nixos/tests/promscale.nix
@@ -0,0 +1,60 @@
+# mostly copied from ./timescaledb.nix which was copied from ./postgresql.nix
+# as it seemed unapproriate to test additional extensions for postgresql there.
+
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE USER promscale SUPERUSER PASSWORD 'promscale';
+    CREATE DATABASE promscale OWNER promscale;
+  '';
+
+  make-postgresql-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ anpin ];
+    };
+
+    nodes.machine = { config, pkgs, ... }:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          extraPlugins = with postgresql-package.pkgs; [
+            timescaledb
+            promscale_extension
+          ];
+          settings = { shared_preload_libraries = "timescaledb, promscale"; };
+        };
+        environment.systemPackages = with pkgs; [ promscale ];
+      };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("postgresql")
+      with subtest("Postgresql with extensions timescaledb and promscale is available just after unit start"):
+          print(machine.succeed("sudo -u postgres psql -f ${test-sql}"))
+          machine.succeed("sudo -u postgres psql promscale -c 'SHOW shared_preload_libraries;' | grep promscale")
+          machine.succeed(
+            "promscale --db.name promscale --db.password promscale --db.user promscale --db.ssl-mode allow --startup.install-extensions --startup.only"
+          )
+      machine.succeed("sudo -u postgres psql promscale -c 'SELECT ps_trace.get_trace_retention_period();' | grep '(1 row)'")
+      machine.shutdown()
+    '';
+  };
+  #version 15 is not supported yet
+  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12" && !(versionAtLeast value.version "15")) postgresql-versions;
+in
+mapAttrs'
+  (name: package: {
+    inherit name;
+    value = make-postgresql-test name package;
+  })
+  applicablePostgresqlVersions
diff --git a/nixpkgs/nixos/tests/prowlarr.nix b/nixpkgs/nixos/tests/prowlarr.nix
new file mode 100644
index 000000000000..af669afd5700
--- /dev/null
+++ b/nixpkgs/nixos/tests/prowlarr.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "prowlarr";
+  meta.maintainers = with lib.maintainers; [ jdreaver ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.prowlarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("prowlarr.service")
+    machine.wait_for_open_port(9696)
+    machine.succeed("curl --fail http://localhost:9696/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/proxy.nix b/nixpkgs/nixos/tests/proxy.nix
new file mode 100644
index 000000000000..f8a3d576903e
--- /dev/null
+++ b/nixpkgs/nixos/tests/proxy.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  backend = { pkgs, ... }: {
+    services.httpd = {
+      enable = true;
+      adminAddr = "foo@example.org";
+      virtualHosts.localhost.documentRoot = "${pkgs.valgrind.doc}/share/doc/valgrind/html";
+    };
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+in {
+  name = "proxy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes = {
+    proxy = { nodes, ... }: {
+      services.httpd = {
+        enable = true;
+        adminAddr = "bar@example.org";
+        extraModules = [ "proxy_balancer" "lbmethod_byrequests" ];
+        extraConfig = ''
+          ExtendedStatus on
+        '';
+        virtualHosts.localhost = {
+          extraConfig = ''
+            <Location /server-status>
+              Require all granted
+              SetHandler server-status
+            </Location>
+
+            <Proxy balancer://cluster>
+              Require all granted
+              BalancerMember http://${nodes.backend1.config.networking.hostName} retry=0
+              BalancerMember http://${nodes.backend2.config.networking.hostName} retry=0
+            </Proxy>
+
+            ProxyStatus       full
+            ProxyPass         /server-status !
+            ProxyPass         /       balancer://cluster/
+            ProxyPassReverse  /       balancer://cluster/
+
+            # For testing; don't want to wait forever for dead backend servers.
+            ProxyTimeout      5
+          '';
+        };
+      };
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+
+    backend1 = backend;
+    backend2 = backend;
+
+    client = { ... }: { };
+  };
+
+  testScript = ''
+    start_all()
+
+    proxy.wait_for_unit("httpd")
+    backend1.wait_for_unit("httpd")
+    backend2.wait_for_unit("httpd")
+    client.wait_for_unit("network.target")
+
+    # With the back-ends up, the proxy should work.
+    client.succeed("curl --fail http://proxy/")
+
+    client.succeed("curl --fail http://proxy/server-status")
+
+    # Block the first back-end.
+    backend1.block()
+
+    # The proxy should still work.
+    client.succeed("curl --fail http://proxy/")
+    client.succeed("curl --fail http://proxy/")
+
+    # Block the second back-end.
+    backend2.block()
+
+    # Now the proxy should fail as well.
+    client.fail("curl --fail http://proxy/")
+
+    # But if the second back-end comes back, the proxy should start
+    # working again.
+    backend2.unblock()
+    client.succeed("curl --fail http://proxy/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pt2-clone.nix b/nixpkgs/nixos/tests/pt2-clone.nix
new file mode 100644
index 000000000000..ea4329c4a980
--- /dev/null
+++ b/nixpkgs/nixos/tests/pt2-clone.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "pt2-clone";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    sound.enable = true;
+    environment.systemPackages = [ pkgs.pt2-clone ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      # Add a dummy sound card, or the program won't start
+      machine.execute("modprobe snd-dummy")
+
+      machine.execute("pt2-clone >&2 &")
+
+      machine.wait_for_window(r"ProTracker")
+      machine.sleep(5)
+      # One of the few words that actually get recognized
+      if "LENGTH" not in machine.get_screen_text():
+          raise Exception("Program did not start successfully")
+      machine.screenshot("screen")
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/public-inbox.nix b/nixpkgs/nixos/tests/public-inbox.nix
new file mode 100644
index 000000000000..4d06d3e1738e
--- /dev/null
+++ b/nixpkgs/nixos/tests/public-inbox.nix
@@ -0,0 +1,230 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  orga = "example";
+  domain = "${orga}.localdomain";
+
+  tls-cert = pkgs.runCommand "selfSignedCert" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 36500 \
+      -subj '/CN=machine.${domain}'
+    install -D -t $out key.pem cert.pem
+  '';
+in
+{
+  name = "public-inbox";
+
+  meta.maintainers = with pkgs.lib.maintainers; [ julm ];
+
+  nodes.machine = { config, pkgs, nodes, ... }: let
+    inherit (config.services) gitolite public-inbox;
+    # Git repositories paths in Gitolite.
+    # Only their baseNameOf is used for configuring public-inbox.
+    repositories = [
+      "user/repo1"
+      "user/repo2"
+    ];
+  in
+  {
+    virtualisation.diskSize = 1 * 1024;
+    virtualisation.memorySize = 1 * 1024;
+    networking.domain = domain;
+
+    security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+    # If using security.acme:
+    #security.acme.certs."${domain}".postRun = ''
+    #  systemctl try-restart public-inbox-nntpd public-inbox-imapd
+    #'';
+
+    services.public-inbox = {
+      enable = true;
+      postfix.enable = true;
+      openFirewall = true;
+      settings.publicinbox = {
+        css = [ "href=https://machine.${domain}/style/light.css" ];
+        nntpserver = [ "nntps://machine.${domain}" ];
+        wwwlisting = "match=domain";
+      };
+      mda = {
+        enable = true;
+        args = [ "--no-precheck" ]; # Allow Bcc:
+      };
+      http = {
+        enable = true;
+        port = "/run/public-inbox-http.sock";
+        #port = 8080;
+        args = ["-W0"];
+        mounts = [
+          "https://machine.${domain}/inbox"
+        ];
+      };
+      nntp = {
+        enable = true;
+        #port = 563;
+        args = ["-W0"];
+        cert = "${tls-cert}/cert.pem";
+        key = "${tls-cert}/key.pem";
+      };
+      imap = {
+        enable = true;
+        #port = 993;
+        args = ["-W0"];
+        cert = "${tls-cert}/cert.pem";
+        key = "${tls-cert}/key.pem";
+      };
+      inboxes = lib.recursiveUpdate (lib.genAttrs (map baseNameOf repositories) (repo: {
+        address = [
+          # Routed to the "public-inbox:" transport in services.postfix.transport
+          "${repo}@${domain}"
+        ];
+        description = ''
+          ${repo}@${domain} :
+          discussions about ${repo}.
+        '';
+        url = "https://machine.${domain}/inbox/${repo}";
+        newsgroup = "inbox.comp.${orga}.${repo}";
+        coderepo = [ repo ];
+      }))
+      {
+        repo2 = {
+          hide = [
+            "imap" # FIXME: doesn't work for IMAP as of public-inbox 1.6.1
+            "manifest"
+            "www"
+          ];
+        };
+      };
+      settings.coderepo = lib.listToAttrs (map (path: lib.nameValuePair (baseNameOf path) {
+        dir = "/var/lib/gitolite/repositories/${path}.git";
+        cgitUrl = "https://git.${domain}/${path}.git";
+      }) repositories);
+    };
+
+    # Use gitolite to store Git repositories listed in coderepo entries
+    services.gitolite = {
+      enable = true;
+      adminPubkey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIJmoTOQnGqX+//us5oye8UuE+tQBx9QEM7PN13jrwgqY root@localhost";
+    };
+    systemd.services.public-inbox-httpd = {
+      serviceConfig.SupplementaryGroups = [ gitolite.group ];
+    };
+
+    # Use nginx as a reverse proxy for public-inbox-httpd
+    services.nginx = {
+      enable = true;
+      recommendedGzipSettings = true;
+      recommendedOptimisation = true;
+      recommendedTlsSettings = true;
+      recommendedProxySettings = true;
+      virtualHosts."machine.${domain}" = {
+        forceSSL = true;
+        sslCertificate = "${tls-cert}/cert.pem";
+        sslCertificateKey = "${tls-cert}/key.pem";
+        locations."/".return = "302 /inbox";
+        locations."= /inbox".return = "302 /inbox/";
+        locations."/inbox".proxyPass = "http://unix:${public-inbox.http.port}:/inbox";
+        # If using TCP instead of a Unix socket:
+        #locations."/inbox".proxyPass = "http://127.0.0.1:${toString public-inbox.http.port}/inbox";
+        # Referred to by settings.publicinbox.css
+        # See http://public-inbox.org/meta/_/text/color/
+        locations."= /style/light.css".alias = pkgs.writeText "light.css" ''
+          * { background:#fff; color:#000 }
+
+          a { color:#00f; text-decoration:none }
+          a:visited { color:#808 }
+
+          *.q { color:#008 }
+
+          *.add { color:#060 }
+          *.del {color:#900 }
+          *.head { color:#000 }
+          *.hunk { color:#960 }
+
+          .hl.num { color:#f30 } /* number */
+          .hl.esc { color:#f0f } /* escape character */
+          .hl.str { color:#f30 } /* string */
+          .hl.ppc { color:#c3c } /* preprocessor */
+          .hl.pps { color:#f30 } /* preprocessor string */
+          .hl.slc { color:#099 } /* single-line comment */
+          .hl.com { color:#099 } /* multi-line comment */
+          /* .hl.opt { color:#ccc } */ /* operator */
+          /* .hl.ipl { color:#ccc } */ /* interpolation */
+
+          /* keyword groups kw[a-z] */
+          .hl.kwa { color:#f90 }
+          .hl.kwb { color:#060 }
+          .hl.kwc { color:#f90 }
+          /* .hl.kwd { color:#ccc } */
+        '';
+      };
+    };
+
+    services.postfix = {
+      enable = true;
+      setSendmail = true;
+      #sslCert = "${tls-cert}/cert.pem";
+      #sslKey = "${tls-cert}/key.pem";
+      recipientDelimiter = "+";
+    };
+
+    environment.systemPackages = [
+      pkgs.mailutils
+      pkgs.openssl
+    ];
+
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("public-inbox-init.service")
+
+    # Very basic check that Gitolite can work;
+    # Gitolite is not needed for the rest of this testScript
+    machine.wait_for_unit("gitolite-init.service")
+
+    # List inboxes through public-inbox-httpd
+    machine.wait_for_unit("nginx.service")
+    machine.succeed("curl -L https://machine.${domain} | grep repo1@${domain}")
+    # The repo2 inbox is hidden
+    machine.fail("curl -L https://machine.${domain} | grep repo2@${domain}")
+    machine.wait_for_unit("public-inbox-httpd.service")
+
+    # Send a mail and read it through public-inbox-httpd
+    # Must work too when using a recipientDelimiter.
+    machine.wait_for_unit("postfix.service")
+    machine.succeed("mail -t <${pkgs.writeText "mail" ''
+      Subject: Testing mail
+      From: root@localhost
+      To: repo1+extension@${domain}
+      Message-ID: <repo1@root-1>
+      Content-Type: text/plain; charset=utf-8
+      Content-Disposition: inline
+
+      This is a testing mail.
+    ''}")
+    machine.sleep(10)
+    machine.succeed("curl -L 'https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u' | grep 'This is a testing mail.'")
+
+    # Read a mail through public-inbox-imapd
+    machine.wait_for_open_port(993)
+    machine.wait_for_unit("public-inbox-imapd.service")
+    machine.succeed("openssl s_client -ign_eof -crlf -connect machine.${domain}:993 <${pkgs.writeText "imap-commands" ''
+      tag login anonymous@${domain} anonymous
+      tag SELECT INBOX.comp.${orga}.repo1.0
+      tag FETCH 1 (BODY[HEADER])
+      tag LOGOUT
+    ''} | grep '^Message-ID: <repo1@root-1>'")
+
+    # TODO: Read a mail through public-inbox-nntpd
+    #machine.wait_for_open_port(563)
+    #machine.wait_for_unit("public-inbox-nntpd.service")
+
+    # Delete a mail.
+    # Note that the use of an extension not listed in the addresses
+    # require to use --all
+    machine.succeed("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/raw | sudo -u public-inbox public-inbox-learn rm --all")
+    machine.fail("curl -L https://machine.${domain}/inbox/repo1/repo1@root-1/T/#u | grep 'This is a testing mail.'")
+
+    # Compact the database
+    machine.succeed("sudo -u public-inbox public-inbox-compact --all")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pufferpanel.nix b/nixpkgs/nixos/tests/pufferpanel.nix
new file mode 100644
index 000000000000..e7b09c13f90b
--- /dev/null
+++ b/nixpkgs/nixos/tests/pufferpanel.nix
@@ -0,0 +1,74 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "pufferpanel";
+  meta.maintainers = [ lib.maintainers.tie ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.pufferpanel ];
+    services.pufferpanel = {
+      enable = true;
+      extraPackages = [ pkgs.netcat ];
+      environment = {
+        PUFFER_PANEL_REGISTRATIONENABLED = "false";
+        PUFFER_PANEL_SETTINGS_COMPANYNAME = "NixOS";
+      };
+    };
+  };
+
+  testScript = ''
+    import shlex
+    import json
+
+    curl = "curl --fail-with-body --silent"
+    baseURL = "http://localhost:8080"
+    adminName = "admin"
+    adminEmail = "admin@nixos.org"
+    adminPass = "admin"
+    adminCreds = json.dumps({
+      "email": adminEmail,
+      "password": adminPass,
+    })
+    stopCode = 9 # SIGKILL
+    serverPort = 1337
+    serverDefinition = json.dumps({
+      "name": "netcat",
+      "node": 0,
+      "users": [
+        adminName,
+      ],
+      "type": "netcat",
+      "run": {
+        "stopCode": stopCode,
+        "command": f"nc -l {serverPort}",
+      },
+      "environment": {
+        "type": "standard",
+      },
+    })
+
+    start_all()
+
+    machine.wait_for_unit("pufferpanel.service")
+    machine.wait_for_open_port(5657) # SFTP
+    machine.wait_for_open_port(8080) # HTTP
+
+    # Note that PufferPanel does not initialize database unless necessary.
+    # /api/config endpoint creates database file and triggers migrations.
+    # On success, we run a command to create administrator user that we use to
+    # interact with HTTP API.
+    resp = json.loads(machine.succeed(f"{curl} {baseURL}/api/config"))
+    assert resp["branding"]["name"] == "NixOS", "Invalid company name in configuration"
+    assert resp["registrationEnabled"] == False, "Expected registration to be disabled"
+
+    machine.succeed(f"pufferpanel --workDir /var/lib/pufferpanel user add --admin --name {adminName} --email {adminEmail} --password {adminPass}")
+
+    resp = json.loads(machine.succeed(f"{curl} -d '{adminCreds}' {baseURL}/auth/login"))
+    assert "servers.admin" in resp["scopes"], "User is not administrator"
+    token = resp["session"]
+    authHeader = shlex.quote(f"Authorization: Bearer {token}")
+
+    resp = json.loads(machine.succeed(f"{curl} -H {authHeader} -H 'Content-Type: application/json' -d '{serverDefinition}' {baseURL}/api/servers"))
+    serverID = resp["id"]
+    machine.succeed(f"{curl} -X POST -H {authHeader} {baseURL}/proxy/daemon/server/{serverID}/start")
+    machine.wait_for_open_port(serverPort)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/pulseaudio.nix b/nixpkgs/nixos/tests/pulseaudio.nix
new file mode 100644
index 000000000000..dc8e33ccd559
--- /dev/null
+++ b/nixpkgs/nixos/tests/pulseaudio.nix
@@ -0,0 +1,80 @@
+let
+  mkTest = { systemWide ? false , fullVersion ? false }:
+    import ./make-test-python.nix ({ pkgs, lib, ... }:
+      let
+        testFile = pkgs.fetchurl {
+          url =
+            "https://file-examples.com/storage/fe5947fd2362fc197a3c2df/2017/11/file_example_MP3_700KB.mp3";
+          hash = "sha256-+iggJW8s0/LfA/okfXsB550/55Q0Sq3OoIzuBrzOPJQ=";
+        };
+
+        makeTestPlay = key:
+          { sox, alsa-utils }:
+          pkgs.writeScriptBin key ''
+            set -euxo pipefail
+            ${sox}/bin/play ${testFile}
+            ${sox}/bin/sox ${testFile} -t wav - | ${alsa-utils}/bin/aplay
+            touch /tmp/${key}_success
+          '';
+
+        testers = builtins.mapAttrs makeTestPlay {
+          testPlay = { inherit (pkgs) sox alsa-utils; };
+          testPlay32 = { inherit (pkgs.pkgsi686Linux) sox alsa-utils; };
+        };
+      in {
+        name = "pulseaudio${lib.optionalString fullVersion "Full"}${lib.optionalString systemWide "-systemWide"}";
+        meta = with pkgs.lib.maintainers; {
+          maintainers = [ synthetica ] ++ pkgs.pulseaudio.meta.maintainers;
+        };
+
+        nodes.machine = { ... }:
+
+          {
+            imports = [ ./common/wayland-cage.nix ];
+            hardware.pulseaudio = {
+              enable = true;
+              support32Bit = true;
+              inherit systemWide;
+            } // lib.optionalAttrs fullVersion {
+              package = pkgs.pulseaudioFull;
+            };
+
+            environment.systemPackages = [ testers.testPlay pkgs.pavucontrol ]
+              ++ lib.optional pkgs.stdenv.isx86_64 testers.testPlay32;
+          } // lib.optionalAttrs systemWide {
+            users.users.alice.extraGroups = [ "pulse-access" ];
+            systemd.services.pulseaudio.wantedBy = [ "multi-user.target" ];
+          };
+
+        enableOCR = true;
+
+        testScript = { ... }: ''
+          machine.wait_until_succeeds("pgrep xterm")
+          machine.wait_for_text("alice@machine")
+
+          machine.send_chars("testPlay \n")
+          machine.wait_for_file("/tmp/testPlay_success")
+          ${lib.optionalString pkgs.stdenv.isx86_64 ''
+            machine.send_chars("testPlay32 \n")
+            machine.wait_for_file("/tmp/testPlay32_success")
+          ''}
+          machine.screenshot("testPlay")
+
+          ${lib.optionalString (!systemWide) ''
+            machine.send_chars("pacmd info && touch /tmp/pacmd_success\n")
+            machine.wait_for_file("/tmp/pacmd_success")
+          ''}
+
+          # Pavucontrol only loads when Pulseaudio is running. If it isn't, the
+          # text "Dummy Output" (sound device name) will never show.
+          machine.send_chars("pavucontrol\n")
+          machine.wait_for_text("Dummy Output")
+          machine.screenshot("Pavucontrol")
+        '';
+      });
+in builtins.mapAttrs (key: val: mkTest val) {
+  user = { systemWide = false; fullVersion = false; };
+  system = { systemWide = true; fullVersion = false; };
+  userFull = { systemWide = false; fullVersion = true; };
+  systemFull = { systemWide = true; fullVersion = true; };
+}
diff --git a/nixpkgs/nixos/tests/pykms.nix b/nixpkgs/nixos/tests/pykms.nix
new file mode 100644
index 000000000000..14d776a2f113
--- /dev/null
+++ b/nixpkgs/nixos/tests/pykms.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "pykms-test";
+    meta.maintainers = with pkgs.lib.maintainers; [ zopieux ];
+
+    nodes.machine = { config, lib, pkgs, ... }: {
+      services.pykms.enable = true;
+    };
+
+    testScript = ''
+      machine.wait_for_unit("pykms.service")
+      machine.succeed("${pkgs.pykms}/bin/client")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/qboot.nix b/nixpkgs/nixos/tests/qboot.nix
new file mode 100644
index 000000000000..29d999be58e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/qboot.nix
@@ -0,0 +1,13 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "qboot";
+
+  nodes.machine = { ... }: {
+    virtualisation.bios = pkgs.qboot;
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/qemu-vm-external-disk-image.nix b/nixpkgs/nixos/tests/qemu-vm-external-disk-image.nix
new file mode 100644
index 000000000000..a229fc5e3963
--- /dev/null
+++ b/nixpkgs/nixos/tests/qemu-vm-external-disk-image.nix
@@ -0,0 +1,73 @@
+# Tests that you can boot from an external disk image with the qemu-vm module.
+# "External" here means that the image was not produced within the qemu-vm
+# module and relies on the fileSystems option also set outside the qemu-vm
+# module. Most notably, this tests that you can stop the qemu-vm module from
+# overriding fileSystems with virtualisation.fileSystems so you don't have to
+# replicate the previously set fileSystems in virtualisation.fileSystems.
+
+{ lib, ... }:
+
+let
+  rootFslabel = "external";
+  rootFsDevice = "/dev/disk/by-label/${rootFslabel}";
+
+  externalModule = { config, lib, pkgs, ... }: {
+    boot.loader.systemd-boot.enable = true;
+
+    fileSystems = {
+      "/".device = rootFsDevice;
+    };
+
+    system.build.diskImage = import ../lib/make-disk-image.nix {
+      inherit config lib pkgs;
+      label = rootFslabel;
+      partitionTableType = "efi";
+      format = "qcow2";
+      bootSize = "32M";
+      additionalSpace = "0M";
+      copyChannel = false;
+    };
+  };
+in
+{
+  name = "qemu-vm-external-disk-image";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    virtualisation.directBoot.enable = false;
+    virtualisation.mountHostNixStore = false;
+    virtualisation.useEFIBoot = true;
+
+    # This stops the qemu-vm module from overriding the fileSystems option
+    # with virtualisation.fileSystems.
+    virtualisation.fileSystems = lib.mkForce { };
+
+    imports = [ externalModule ];
+  };
+
+  testScript = { nodes, ... }: ''
+    import os
+    import subprocess
+    import tempfile
+
+    tmp_disk_image = tempfile.NamedTemporaryFile()
+
+    subprocess.run([
+      "${nodes.machine.virtualisation.qemu.package}/bin/qemu-img",
+      "create",
+      "-f",
+      "qcow2",
+      "-b",
+      "${nodes.machine.system.build.diskImage}/nixos.qcow2",
+      "-F",
+      "qcow2",
+      tmp_disk_image.name,
+    ])
+
+    # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
+    os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
+
+    machine.succeed("findmnt --kernel --source ${rootFsDevice} --target /")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/qemu-vm-restrictnetwork.nix b/nixpkgs/nixos/tests/qemu-vm-restrictnetwork.nix
new file mode 100644
index 000000000000..49a105ef1076
--- /dev/null
+++ b/nixpkgs/nixos/tests/qemu-vm-restrictnetwork.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({
+  name = "qemu-vm-restrictnetwork";
+
+  nodes = {
+    unrestricted = { config, pkgs, ... }: {
+      virtualisation.restrictNetwork = false;
+    };
+
+    restricted = { config, pkgs, ... }: {
+      virtualisation.restrictNetwork = true;
+    };
+  };
+
+  testScript = ''
+    import os
+
+    if os.fork() == 0:
+      # Start some HTTP server on the qemu host to test guest isolation.
+      from http.server import HTTPServer, BaseHTTPRequestHandler
+      HTTPServer(("", 8000), BaseHTTPRequestHandler).serve_forever()
+
+    else:
+      start_all()
+      unrestricted.wait_for_unit("network-online.target")
+      restricted.wait_for_unit("network-online.target")
+
+      # Guests should be able to reach each other on the same VLAN.
+      unrestricted.succeed("ping -c1 restricted")
+      restricted.succeed("ping -c1 unrestricted")
+
+      # Only the unrestricted guest should be able to reach host services.
+      # 10.0.2.2 is the gateway mapping to the host's loopback interface.
+      unrestricted.succeed("curl -s http://10.0.2.2:8000")
+      restricted.fail("curl -s http://10.0.2.2:8000")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/qemu-vm-volatile-root.nix b/nixpkgs/nixos/tests/qemu-vm-volatile-root.nix
new file mode 100644
index 000000000000..bc8fd853409d
--- /dev/null
+++ b/nixpkgs/nixos/tests/qemu-vm-volatile-root.nix
@@ -0,0 +1,17 @@
+# Test that the root filesystem is a volatile tmpfs.
+
+{ lib, ... }:
+
+{
+  name = "qemu-vm-volatile-root";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes.machine = _: {
+    virtualisation.diskImage = null;
+  };
+
+  testScript = ''
+    machine.succeed("findmnt --kernel --types tmpfs /")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/qgis.nix b/nixpkgs/nixos/tests/qgis.nix
new file mode 100644
index 000000000000..7706b8c07747
--- /dev/null
+++ b/nixpkgs/nixos/tests/qgis.nix
@@ -0,0 +1,30 @@
+import ./make-test-python.nix ({ pkgs, lib, qgisPackage, ... }:
+  let
+    testScript = pkgs.writeTextFile {
+      name = "qgis-test.py";
+      text = (builtins.readFile ../../pkgs/applications/gis/qgis/test.py);
+    };
+  in
+  {
+    name = "qgis";
+    meta = {
+      maintainers = with lib; [ teams.geospatial.members ];
+    };
+
+    nodes = {
+      machine = { pkgs, ... }: {
+        virtualisation.diskSize = 2 * 1024;
+
+        imports = [ ./common/x11.nix ];
+        environment.systemPackages = [ qgisPackage ];
+
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      machine.succeed("${qgisPackage}/bin/qgis --version | grep 'QGIS ${qgisPackage.version}'")
+      machine.succeed("${qgisPackage}/bin/qgis --code ${testScript}")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/qownnotes.nix b/nixpkgs/nixos/tests/qownnotes.nix
new file mode 100644
index 000000000000..93801cb98702
--- /dev/null
+++ b/nixpkgs/nixos/tests/qownnotes.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix ({ lib, pkgs, ...} :
+
+{
+  name = "qownnotes";
+  meta.maintainers = [ lib.maintainers.pbek ];
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    test-support.displayManager.auto.user = "alice";
+    environment.systemPackages = [
+      pkgs.qownnotes
+      pkgs.xdotool
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    aliceDo = cmd: ''machine.succeed("su - alice -c '${cmd}' >&2 &");'';
+    in ''
+    with subtest("Ensure X starts"):
+        start_all()
+        machine.wait_for_x()
+
+    with subtest("Check QOwnNotes version on CLI"):
+        ${aliceDo "qownnotes --version"}
+
+        machine.wait_for_console_text("QOwnNotes ${pkgs.qownnotes.version}")
+
+    with subtest("Ensure QOwnNotes starts"):
+        # start QOwnNotes window
+        ${aliceDo "qownnotes"}
+
+        machine.wait_for_text("Welcome to QOwnNotes")
+        machine.screenshot("QOwnNotes-Welcome")
+
+    with subtest("Finish first-run wizard"):
+        # The wizard should show up now
+        machine.wait_for_text("Note folder")
+        machine.send_key("ret")
+        machine.wait_for_console_text("Note path '/home/alice/Notes' was now created.")
+        machine.wait_for_text("Panel layout")
+        machine.send_key("ret")
+        machine.wait_for_text("Nextcloud")
+        machine.send_key("ret")
+        machine.wait_for_text("App metric")
+        machine.send_key("ret")
+
+        # The main window should now show up
+        machine.wait_for_text("QOwnNotes - ${pkgs.qownnotes.version}")
+        machine.wait_for_open_port(22222)
+        machine.wait_for_console_text("QOwnNotes server listening on port 22222")
+
+        machine.screenshot("QOwnNotes-DemoNote")
+
+    with subtest("Create a new note"):
+        machine.send_key("ctrl-n")
+        machine.sleep(1)
+        machine.send_chars("This is a NixOS test!\n")
+        machine.wait_for_text("This is a NixOS test!")
+
+        machine.screenshot("QOwnNotes-NewNote")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/quake3.nix b/nixpkgs/nixos/tests/quake3.nix
new file mode 100644
index 000000000000..2d8c5207001c
--- /dev/null
+++ b/nixpkgs/nixos/tests/quake3.nix
@@ -0,0 +1,95 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+let
+
+  # Build Quake with coverage instrumentation.
+  overrides = pkgs:
+    {
+      quake3game = pkgs.quake3game.override (args: {
+        stdenv = pkgs.stdenvAdapters.addCoverageInstrumentation args.stdenv;
+      });
+    };
+
+  # Only allow the demo data to be used (only if it's unfreeRedistributable).
+  unfreePredicate = pkg: let
+    allowPackageNames = [ "quake3-demodata" "quake3-pointrelease" ];
+    allowLicenses = [ lib.licenses.unfreeRedistributable ];
+  in lib.elem pkg.pname allowPackageNames &&
+     lib.elem (pkg.meta.license or null) allowLicenses;
+
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      hardware.opengl.driSupport = true;
+      environment.systemPackages = [ pkgs.quake3demo ];
+      nixpkgs.config.packageOverrides = overrides;
+      nixpkgs.config.allowUnfreePredicate = unfreePredicate;
+    };
+
+in
+
+rec {
+  name = "quake3";
+  meta = with lib.maintainers; {
+    maintainers = [ domenkozar eelco ];
+  };
+
+  # TODO: lcov doesn't work atm
+  #makeCoverageReport = true;
+
+  nodes =
+    { server =
+        { pkgs, ... }:
+
+        { systemd.services.quake3-server =
+            { wantedBy = [ "multi-user.target" ];
+              script =
+                "${pkgs.quake3demo}/bin/quake3-server +set g_gametype 0 " +
+                "+map q3dm7 +addbot grunt +addbot daemia 2> /tmp/log";
+            };
+          nixpkgs.config.packageOverrides = overrides;
+          nixpkgs.config.allowUnfreePredicate = unfreePredicate;
+          networking.firewall.allowedUDPPorts = [ 27960 ];
+        };
+
+      client1 = client;
+      client2 = client;
+    };
+
+  testScript =
+    ''
+      start_all()
+
+      server.wait_for_unit("quake3-server")
+      client1.wait_for_x()
+      client2.wait_for_x()
+
+      client1.execute("quake3 +set r_fullscreen 0 +set name Foo +connect server &")
+      client2.execute("quake3 +set r_fullscreen 0 +set name Bar +connect server &")
+
+      server.wait_until_succeeds("grep -q 'Foo.*entered the game' /tmp/log")
+      server.wait_until_succeeds("grep -q 'Bar.*entered the game' /tmp/log")
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.block()
+
+      server.sleep(20)
+
+      client1.screenshot("screen1")
+      client2.screenshot("screen2")
+
+      client1.unblock()
+
+      server.sleep(10)
+
+      client1.screenshot("screen3")
+      client2.screenshot("screen4")
+
+      client1.shutdown()
+      client2.shutdown()
+      server.stop_job("quake3-server")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/quorum.nix b/nixpkgs/nixos/tests/quorum.nix
new file mode 100644
index 000000000000..31669eb7fc38
--- /dev/null
+++ b/nixpkgs/nixos/tests/quorum.nix
@@ -0,0 +1,102 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  keystore =  {
+    address = "9377bc3936de934c497e22917b81aa8774ac3bb0";
+    crypto = {
+      cipher = "aes-128-ctr";
+      ciphertext = "ad8341d8ef225650403fd366c955f41095e438dd966a3c84b3d406818c1e366c";
+      cipherparams = {
+        iv = "2a09f7a72fd6dff7c43150ff437e6ac2";
+      };
+      kdf = "scrypt";
+      kdfparams = {
+        dklen = 32;
+        n = 262144;
+        p = 1;
+        r = 8;
+        salt = "d1a153845bb80cd6274c87c5bac8ac09fdfac5ff131a6f41b5ed319667f12027";
+      };
+      mac = "a9621ad88fa1d042acca6fc2fcd711f7e05bfbadea3f30f379235570c8e270d3";
+    };
+    id = "89e847a3-1527-42f6-a321-77de0a14ce02";
+    version = 3;
+  };
+  keystore-file = pkgs.writeText "keystore-file" (builtins.toJSON keystore);
+in
+{
+  name = "quorum";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.quorum = {
+        enable = true;
+        permissioned = false;
+        staticNodes = [ "enode://dd333ec28f0a8910c92eb4d336461eea1c20803eed9cf2c056557f986e720f8e693605bba2f4e8f289b1162e5ac7c80c914c7178130711e393ca76abc1d92f57@0.0.0.0:30303?discport=0" ];
+        genesis = {
+          alloc = {
+            "189d23d201b03ae1cf9113672df29a5d672aefa3" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "44b07d2c28b8ed8f02b45bd84ac7d9051b3349e6" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "4c1ccd426833b9782729a212c857f2f03b7b4c0d" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            "7ae555d0f6faad7930434abdaac2274fd86ab516" = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+            c1056df7c02b6f1a353052eaf0533cc7cb743b52 = {
+              balance = "0x446c3b15f9926687d2c40534fdb564000000000000";
+            };
+          };
+          coinbase = "0x0000000000000000000000000000000000000000";
+          config = {
+            byzantiumBlock = 1;
+            chainId = 10;
+            eip150Block = 1;
+            eip150Hash =
+              "0x0000000000000000000000000000000000000000000000000000000000000000";
+            eip155Block = 1;
+            eip158Block = 1;
+            isQuorum = true;
+            istanbul = {
+              epoch = 30000;
+              policy = 0;
+            };
+          };
+        difficulty = "0x1";
+        extraData =
+          "0x0000000000000000000000000000000000000000000000000000000000000000f8aff869944c1ccd426833b9782729a212c857f2f03b7b4c0d94189d23d201b03ae1cf9113672df29a5d672aefa39444b07d2c28b8ed8f02b45bd84ac7d9051b3349e694c1056df7c02b6f1a353052eaf0533cc7cb743b52947ae555d0f6faad7930434abdaac2274fd86ab516b8410000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000c0";
+        gasLimit = "0xe0000000";
+        gasUsed = "0x0";
+        mixHash =
+          "0x63746963616c2062797a616e74696e65206661756c7420746f6c6572616e6365";
+        nonce = "0x0";
+        number = "0x0";
+        parentHash =
+          "0x0000000000000000000000000000000000000000000000000000000000000000";
+        timestamp = "0x5cffc201";
+      };
+     };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.succeed("mkdir -p /var/lib/quorum/keystore")
+    machine.succeed(
+        'cp ${keystore-file} /var/lib/quorum/keystore/UTC--2020-03-23T11-08-34.144812212Z--${keystore.address}'
+    )
+    machine.succeed(
+        "echo fe2725c4e8f7617764b845e8d939a65c664e7956eb47ed7d934573f16488efc1 > /var/lib/quorum/nodekey"
+    )
+    machine.succeed("systemctl restart quorum")
+    machine.wait_for_unit("quorum.service")
+    machine.sleep(15)
+    machine.succeed('geth attach /var/lib/quorum/geth.ipc --exec "eth.accounts" | grep ${keystore.address}')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rabbitmq.nix b/nixpkgs/nixos/tests/rabbitmq.nix
new file mode 100644
index 000000000000..040679e68d98
--- /dev/null
+++ b/nixpkgs/nixos/tests/rabbitmq.nix
@@ -0,0 +1,61 @@
+# This test runs rabbitmq and checks if rabbitmq is up and running.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  # in real life, you would keep this out of your repo and deploy it to a safe
+  # location using safe means.
+  configKeyPath = pkgs.writeText "fake-config-key" "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01";
+in
+{
+  name = "rabbitmq";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco offline ];
+  };
+
+  nodes.machine = {
+    services.rabbitmq = {
+      enable = true;
+      managementPlugin.enable = true;
+
+      # To encrypt:
+      # rabbitmqctl --quiet encode --cipher blowfish_cfb64 --hash sha256 \
+      #   --iterations 10000 '<<"dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw">>' \
+      #   "hOjWzSEn2Z7cHzKOcf6i183O2NdjurSuoMDIIv01" ;
+      config = ''
+        [ { rabbit
+          , [ {default_user, <<"alice">>}
+            , { default_pass
+              , {encrypted,<<"oKKxyTze9PYmsEfl6FG1MxIUhxY7WPQL7HBoMPRC/1ZOdOZbtr9+DxjWW3e1D5SL48n3D9QOsGD0cOgYG7Qdvb7Txrepw8w=">>}
+              }
+            , {config_entry_decoder
+              , [ {passphrase, {file, <<"${configKeyPath}">>}}
+                , {cipher, blowfish_cfb64}
+                , {hash, sha256}
+                , {iterations, 10000}
+                ]
+              }
+            % , {rabbitmq_management, [{path_prefix, "/_queues"}]}
+            ]
+          }
+        ].
+      '';
+    };
+    # Ensure there is sufficient extra disk space for rabbitmq to be happy
+    virtualisation.diskSize = 1024;
+  };
+
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("rabbitmq.service")
+    machine.wait_until_succeeds(
+        'su -s ${pkgs.runtimeShell} rabbitmq -c "rabbitmqctl status"'
+    )
+    machine.wait_for_open_port(15672)
+
+    # The password is the plaintext that was encrypted with rabbitmqctl encode above.
+    machine.wait_until_succeeds(
+        '${pkgs.rabbitmq-java-client}/bin/PerfTest --time 10 --uri amqp://alice:dJT8isYu6t0Xb6u56rPglSj1vK51SlNVlXfwsRxw@localhost'
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/radarr.nix b/nixpkgs/nixos/tests/radarr.nix
new file mode 100644
index 000000000000..bf9eb11c2b12
--- /dev/null
+++ b/nixpkgs/nixos/tests/radarr.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "radarr";
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.radarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("radarr.service")
+    machine.wait_for_open_port(7878)
+    machine.succeed("curl --fail http://localhost:7878/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/radicale.nix b/nixpkgs/nixos/tests/radicale.nix
new file mode 100644
index 000000000000..66650dce4a00
--- /dev/null
+++ b/nixpkgs/nixos/tests/radicale.nix
@@ -0,0 +1,95 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+let
+  user = "someuser";
+  password = "some_password";
+  port = "5232";
+  filesystem_folder = "/data/radicale";
+
+  cli = "${pkgs.calendar-cli}/bin/calendar-cli --caldav-user ${user} --caldav-pass ${password}";
+in {
+  name = "radicale3";
+  meta.maintainers = with lib.maintainers; [ dotlambda ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.radicale = {
+      enable = true;
+      settings = {
+        auth = {
+          type = "htpasswd";
+          htpasswd_filename = "/etc/radicale/users";
+          htpasswd_encryption = "bcrypt";
+        };
+        storage = {
+          inherit filesystem_folder;
+          hook = "git add -A && (git diff --cached --quiet || git commit -m 'Changes by '%(user)s)";
+        };
+        logging.level = "info";
+      };
+      rights = {
+        principal = {
+          user = ".+";
+          collection = "{user}";
+          permissions = "RW";
+        };
+        calendars = {
+          user = ".+";
+          collection = "{user}/[^/]+";
+          permissions = "rw";
+        };
+      };
+    };
+    systemd.services.radicale.path = [ pkgs.git ];
+    environment.systemPackages = [ pkgs.git ];
+    systemd.tmpfiles.rules = [ "d ${filesystem_folder} 0750 radicale radicale -" ];
+    # WARNING: DON'T DO THIS IN PRODUCTION!
+    # This puts unhashed secrets directly into the Nix store for ease of testing.
+    environment.etc."radicale/users".source = pkgs.runCommand "htpasswd" {} ''
+      ${pkgs.apacheHttpd}/bin/htpasswd -bcB "$out" ${user} ${password}
+    '';
+  };
+  testScript = ''
+    machine.wait_for_unit("radicale.service")
+    machine.wait_for_open_port(${port})
+
+    machine.succeed("sudo -u radicale git -C ${filesystem_folder} init")
+    machine.succeed(
+        "sudo -u radicale git -C ${filesystem_folder} config --local user.email radicale@example.com"
+    )
+    machine.succeed(
+        "sudo -u radicale git -C ${filesystem_folder} config --local user.name radicale"
+    )
+
+    with subtest("Test calendar and event creation"):
+        machine.succeed(
+            "${cli} --caldav-url http://localhost:${port}/${user} calendar create cal"
+        )
+        machine.succeed("test -d ${filesystem_folder}/collection-root/${user}/cal")
+        machine.succeed('test -z "$(ls ${filesystem_folder}/collection-root/${user}/cal)"')
+        machine.succeed(
+            "${cli} --caldav-url http://localhost:${port}/${user}/cal calendar add 2021-04-23 testevent"
+        )
+        machine.succeed('test -n "$(ls ${filesystem_folder}/collection-root/${user}/cal)"')
+        (status, stdout) = machine.execute(
+            "sudo -u radicale git -C ${filesystem_folder} log --format=oneline | wc -l"
+        )
+        assert status == 0, "git log failed"
+        assert stdout == "3\n", "there should be exactly 3 commits"
+
+    with subtest("Test rights file"):
+        machine.fail(
+            "${cli} --caldav-url http://localhost:${port}/${user} calendar create sub/cal"
+        )
+        machine.fail(
+            "${cli} --caldav-url http://localhost:${port}/otheruser calendar create cal"
+        )
+
+    with subtest("Test web interface"):
+        machine.succeed("curl --fail http://${user}:${password}@localhost:${port}/.web/")
+
+    with subtest("Test security"):
+        output = machine.succeed("systemd-analyze security radicale.service")
+        machine.log(output)
+        assert output[-9:-1] == "SAFE :-}"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ragnarwm.nix b/nixpkgs/nixos/tests/ragnarwm.nix
new file mode 100644
index 000000000000..f7c588b92008
--- /dev/null
+++ b/nixpkgs/nixos/tests/ragnarwm.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "ragnarwm";
+
+  meta = {
+    maintainers = with lib.maintainers; [ sigmanificient ];
+  };
+
+  nodes.machine = { pkgs, lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "ragnar";
+    services.xserver.windowManager.ragnarwm.enable = true;
+
+    # Setup the default terminal of Ragnar
+    environment.systemPackages = [ pkgs.alacritty ];
+  };
+
+  testScript = ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we can open a new terminal"):
+        # Sleeping a bit before the test, as it may help for sending keys
+        machine.sleep(2)
+        machine.send_key("meta_l-ret")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.sleep(2)
+        machine.screenshot("terminal")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rasdaemon.nix b/nixpkgs/nixos/tests/rasdaemon.nix
new file mode 100644
index 000000000000..7f30a3b81ab5
--- /dev/null
+++ b/nixpkgs/nixos/tests/rasdaemon.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ... } : {
+  name = "rasdaemon";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ evils ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    hardware.rasdaemon = {
+      enable = true;
+      # should be enabled by default, just making sure
+      record = true;
+      # nonsense label
+      labels = ''
+        vendor: none
+          product: none
+          model: none
+            DIMM_0: 0.0.0;
+      '';
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      # confirm rasdaemon is running and has a valid database
+      # some disk errors detected in qemu for some reason ¯\_(ツ)_/¯
+      machine.succeed("ras-mc-ctl --errors | tee /dev/stderr | grep -q 'No .* errors.'")
+      # confirm the supplied labels text made it into the system
+      machine.succeed("grep -q 'vendor: none' /etc/ras/dimm_labels.d/labels >&2")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/readarr.nix b/nixpkgs/nixos/tests/readarr.nix
new file mode 100644
index 000000000000..7c144e2ee02f
--- /dev/null
+++ b/nixpkgs/nixos/tests/readarr.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "readarr";
+  meta.maintainers = with lib.maintainers; [ jocelynthode ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.readarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("readarr.service")
+    machine.wait_for_open_port(8787)
+    machine.succeed("curl --fail http://localhost:8787/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/redis.nix b/nixpkgs/nixos/tests/redis.nix
new file mode 100644
index 000000000000..94b50d07be6d
--- /dev/null
+++ b/nixpkgs/nixos/tests/redis.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+{
+  name = "redis";
+  meta.maintainers = with lib.maintainers; [ flokli ];
+
+  nodes = {
+    machine =
+      { pkgs, lib, ... }:
+
+      {
+        services.redis.servers."".enable = true;
+        services.redis.servers."test".enable = true;
+
+        users.users = lib.listToAttrs (map (suffix: lib.nameValuePair "member${suffix}" {
+          createHome = false;
+          description = "A member of the redis${suffix} group";
+          isNormalUser = true;
+          extraGroups = [ "redis${suffix}" ];
+        }) ["" "-test"]);
+      };
+  };
+
+  testScript = { nodes, ... }: let
+    inherit (nodes.machine.config.services) redis;
+    in ''
+    start_all()
+    machine.wait_for_unit("redis")
+    machine.wait_for_unit("redis-test")
+
+    # The unnamed Redis server still opens a port for backward-compatibility
+    machine.wait_for_open_port(6379)
+
+    machine.wait_for_file("${redis.servers."".unixSocket}")
+    machine.wait_for_file("${redis.servers."test".unixSocket}")
+
+    # The unix socket is accessible to the redis group
+    machine.succeed('su member -c "redis-cli ping | grep PONG"')
+    machine.succeed('su member-test -c "redis-cli ping | grep PONG"')
+
+    machine.succeed("redis-cli ping | grep PONG")
+    machine.succeed("redis-cli -s ${redis.servers."".unixSocket} ping | grep PONG")
+    machine.succeed("redis-cli -s ${redis.servers."test".unixSocket} ping | grep PONG")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/redmine.nix b/nixpkgs/nixos/tests/redmine.nix
new file mode 100644
index 000000000000..621b3e6a36ee
--- /dev/null
+++ b/nixpkgs/nixos/tests/redmine.nix
@@ -0,0 +1,44 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  redmineTest = { name, type }: makeTest {
+    name = "redmine-${name}";
+    nodes.machine = { config, pkgs, ... }: {
+      services.redmine = {
+        enable = true;
+        package = pkgs.redmine;
+        database.type = type;
+        plugins = {
+          redmine_env_auth = pkgs.fetchurl {
+            url = "https://github.com/Intera/redmine_env_auth/archive/0.7.zip";
+            sha256 = "1xb8lyarc7mpi86yflnlgyllh9hfwb9z304f19dx409gqpia99sc";
+          };
+        };
+        themes = {
+          dkuk-redmine_alex_skin = pkgs.fetchurl {
+            url = "https://bitbucket.org/dkuk/redmine_alex_skin/get/1842ef675ef3.zip";
+            sha256 = "0hrin9lzyi50k4w2bd2b30vrf1i4fi1c0gyas5801wn8i7kpm9yl";
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("redmine.service")
+      machine.wait_for_open_port(3000)
+      machine.succeed("curl --fail http://localhost:3000/")
+    '';
+  } // {
+    meta.maintainers = [ maintainers.aanderse ];
+  };
+in {
+  mysql = redmineTest { name = "mysql"; type = "mysql2"; };
+  pgsql = redmineTest { name = "pgsql"; type = "postgresql"; };
+}
diff --git a/nixpkgs/nixos/tests/restart-by-activation-script.nix b/nixpkgs/nixos/tests/restart-by-activation-script.nix
new file mode 100644
index 000000000000..0ac079e0101e
--- /dev/null
+++ b/nixpkgs/nixos/tests/restart-by-activation-script.nix
@@ -0,0 +1,73 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "restart-by-activation-script";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ das_j ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    systemd.services.restart-me = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${pkgs.coreutils}/bin/true";
+      };
+    };
+
+    systemd.services.reload-me = {
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig = rec {
+        Type = "oneshot";
+        RemainAfterExit = true;
+        ExecStart = "${pkgs.coreutils}/bin/true";
+        ExecReload = ExecStart;
+      };
+    };
+
+    system.activationScripts.test = {
+      supportsDryActivation = true;
+      text = ''
+        if [ -e /test-the-activation-script ]; then
+          if [ "$NIXOS_ACTION" != dry-activate ]; then
+            touch /activation-was-run
+            echo restart-me.service > /run/nixos/activation-restart-list
+            echo reload-me.service > /run/nixos/activation-reload-list
+          else
+            echo restart-me.service > /run/nixos/dry-activation-restart-list
+            echo reload-me.service > /run/nixos/dry-activation-reload-list
+          fi
+        fi
+      '';
+    };
+  };
+
+  testScript = /* python */ ''
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("nothing happens when the activation script does nothing"):
+        out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1")
+        assert 'restart' not in out
+        assert 'reload' not in out
+        out = machine.succeed("/run/current-system/bin/switch-to-configuration test")
+        assert 'restart' not in out
+        assert 'reload' not in out
+
+    machine.succeed("touch /test-the-activation-script")
+
+    with subtest("dry activation"):
+        out = machine.succeed("/run/current-system/bin/switch-to-configuration dry-activate 2>&1")
+        assert 'would restart the following units: restart-me.service' in out
+        assert 'would reload the following units: reload-me.service' in out
+        machine.fail("test -f /run/nixos/dry-activation-restart-list")
+        machine.fail("test -f /run/nixos/dry-activation-reload-list")
+
+    with subtest("real activation"):
+        out = machine.succeed("/run/current-system/bin/switch-to-configuration test 2>&1")
+        assert 'restarting the following units: restart-me.service' in out
+        assert 'reloading the following units: reload-me.service' in out
+        machine.fail("test -f /run/nixos/activation-restart-list")
+        machine.fail("test -f /run/nixos/activation-reload-list")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/restic.nix b/nixpkgs/nixos/tests/restic.nix
new file mode 100644
index 000000000000..4111720cf6be
--- /dev/null
+++ b/nixpkgs/nixos/tests/restic.nix
@@ -0,0 +1,195 @@
+import ./make-test-python.nix (
+  { pkgs, ... }:
+
+  let
+    remoteRepository = "/root/restic-backup";
+    remoteFromFileRepository = "/root/restic-backup-from-file";
+    remoteNoInitRepository = "/root/restic-backup-no-init";
+    rcloneRepository = "rclone:local:/root/restic-rclone-backup";
+
+    backupPrepareCommand = ''
+      touch /root/backupPrepareCommand
+      test ! -e /root/backupCleanupCommand
+    '';
+
+    backupCleanupCommand = ''
+      rm /root/backupPrepareCommand
+      touch /root/backupCleanupCommand
+    '';
+
+    testDir = pkgs.stdenvNoCC.mkDerivation {
+      name = "test-files-to-backup";
+      unpackPhase = "true";
+      installPhase = ''
+        mkdir $out
+        echo some_file > $out/some_file
+        echo some_other_file > $out/some_other_file
+        mkdir $out/a_dir
+        echo a_file > $out/a_dir/a_file
+      '';
+    };
+
+    passwordFile = "${pkgs.writeText "password" "correcthorsebatterystaple"}";
+    paths = [ "/opt" ];
+    exclude = [ "/opt/excluded_file_*" ];
+    pruneOpts = [
+      "--keep-daily 2"
+      "--keep-weekly 1"
+      "--keep-monthly 1"
+      "--keep-yearly 99"
+    ];
+  in
+  {
+    name = "restic";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ bbigras i077 ];
+    };
+
+    nodes = {
+      server =
+        { pkgs, ... }:
+        {
+          services.restic.backups = {
+            remotebackup = {
+              inherit passwordFile paths exclude pruneOpts backupPrepareCommand backupCleanupCommand;
+              repository = remoteRepository;
+              initialize = true;
+              timerConfig = null; # has no effect here, just checking that it doesn't break the service
+            };
+            remote-from-file-backup = {
+              inherit passwordFile exclude pruneOpts;
+              initialize = true;
+              repositoryFile = pkgs.writeText "repositoryFile" remoteFromFileRepository;
+              paths = [ "/opt/a_dir" ];
+              dynamicFilesFrom = ''
+                find /opt -mindepth 1 -maxdepth 1 ! -name a_dir # all files in /opt except for a_dir
+              '';
+            };
+            remote-noinit-backup = {
+              inherit passwordFile exclude pruneOpts paths;
+              initialize = false;
+              repository = remoteNoInitRepository;
+            };
+            rclonebackup = {
+              inherit passwordFile paths exclude pruneOpts;
+              initialize = true;
+              repository = rcloneRepository;
+              rcloneConfig = {
+                type = "local";
+                one_file_system = true;
+              };
+
+              # This gets overridden by rcloneConfig.type
+              rcloneConfigFile = pkgs.writeText "rclone.conf" ''
+                [local]
+                type=ftp
+              '';
+            };
+            remoteprune = {
+              inherit passwordFile;
+              repository = remoteRepository;
+              pruneOpts = [ "--keep-last 1" ];
+            };
+            custompackage = {
+              inherit passwordFile paths;
+              repository = "some-fake-repository";
+              package = pkgs.writeShellScriptBin "restic" ''
+                echo "$@" >> /root/fake-restic.log;
+              '';
+
+              pruneOpts = [ "--keep-last 1" ];
+              checkOpts = [ "--some-check-option" ];
+            };
+          };
+
+          environment.sessionVariables.RCLONE_CONFIG_LOCAL_TYPE = "local";
+        };
+    };
+
+    testScript = ''
+      server.start()
+      server.wait_for_unit("dbus.socket")
+      server.fail(
+          "restic-remotebackup snapshots",
+          'restic-remote-from-file-backup snapshots"',
+          "restic-rclonebackup snapshots",
+          "grep 'backup.* /opt' /root/fake-restic.log",
+      )
+      server.succeed(
+          # set up
+          "cp -rT ${testDir} /opt",
+          "touch /opt/excluded_file_1 /opt/excluded_file_2",
+          "mkdir -p /root/restic-rclone-backup",
+          "restic-remote-noinit-backup init",
+
+          # test that remotebackup runs custom commands and produces a snapshot
+          "timedatectl set-time '2016-12-13 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that restoring that snapshot produces the same directory
+          "mkdir /tmp/restore-1",
+          "restic-remotebackup restore latest -t /tmp/restore-1",
+          "diff -ru ${testDir} /tmp/restore-1/opt",
+
+          # test that remote-from-file-backup produces a snapshot
+          "systemctl start restic-backups-remote-from-file-backup.service",
+          'restic-remote-from-file-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that remote-noinit-backup produces a snapshot
+          "systemctl start restic-backups-remote-noinit-backup.service",
+          'restic-remote-noinit-backup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that restoring that snapshot produces the same directory
+          "mkdir /tmp/restore-2",
+          "${pkgs.restic}/bin/restic -r ${remoteRepository} -p ${passwordFile} restore latest -t /tmp/restore-2",
+          "diff -ru ${testDir} /tmp/restore-2/opt",
+
+          # test that rclonebackup produces a snapshot
+          "systemctl start restic-backups-rclonebackup.service",
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+          # test that custompackage runs both `restic backup` and `restic check` with reasonable commandlines
+          "systemctl start restic-backups-custompackage.service",
+          "grep 'backup' /root/fake-restic.log",
+          "grep 'check.* --some-check-option' /root/fake-restic.log",
+
+          # test that we can create four snapshots in remotebackup and rclonebackup
+          "timedatectl set-time '2017-12-13 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          "systemctl start restic-backups-rclonebackup.service",
+
+          "timedatectl set-time '2018-12-13 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          "systemctl start restic-backups-rclonebackup.service",
+
+          "timedatectl set-time '2018-12-14 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          "systemctl start restic-backups-rclonebackup.service",
+
+          "timedatectl set-time '2018-12-15 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          "systemctl start restic-backups-rclonebackup.service",
+
+          "timedatectl set-time '2018-12-16 13:45'",
+          "systemctl start restic-backups-remotebackup.service",
+          "rm /root/backupCleanupCommand",
+          "systemctl start restic-backups-rclonebackup.service",
+
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+          'restic-rclonebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 4"',
+
+          # test that remoteprune brings us back to 1 snapshot in remotebackup
+          "systemctl start restic-backups-remoteprune.service",
+          'restic-remotebackup snapshots --json | ${pkgs.jq}/bin/jq "length | . == 1"',
+
+      )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/retroarch.nix b/nixpkgs/nixos/tests/retroarch.nix
new file mode 100644
index 000000000000..0e5f60aa8be2
--- /dev/null
+++ b/nixpkgs/nixos/tests/retroarch.nix
@@ -0,0 +1,49 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+  {
+    name = "retroarch";
+    meta = with pkgs.lib; { maintainers = teams.libretro.members ++ [ maintainers.j0hax ]; };
+
+    nodes.machine = { ... }:
+
+      {
+        imports = [ ./common/user-account.nix ];
+        services.xserver.enable = true;
+        services.xserver.desktopManager.retroarch = {
+          enable = true;
+          package = pkgs.retroarchBare;
+        };
+        services.xserver.displayManager = {
+          sddm.enable = true;
+          defaultSession = "RetroArch";
+          autoLogin = {
+            enable = true;
+            user = "alice";
+          };
+        };
+      };
+
+    testScript = { nodes, ... }:
+      let
+        user = nodes.machine.config.users.users.alice;
+        xdo = "${pkgs.xdotool}/bin/xdotool";
+      in ''
+        with subtest("Wait for login"):
+            start_all()
+            machine.wait_for_file("/tmp/xauth_*")
+            machine.succeed("xauth merge /tmp/xauth_*")
+
+        with subtest("Check RetroArch started"):
+            machine.wait_until_succeeds("pgrep retroarch")
+            machine.wait_for_window("^RetroArch ")
+
+        with subtest("Check configuration created"):
+            machine.wait_for_file("${user.home}/.config/retroarch/retroarch.cfg")
+
+        with subtest("Wait to get a screenshot"):
+            machine.execute(
+                "${xdo} key Alt+F1 sleep 10"
+            )
+            machine.screenshot("screen")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/rkvm/cert.pem b/nixpkgs/nixos/tests/rkvm/cert.pem
new file mode 100644
index 000000000000..933efe520578
--- /dev/null
+++ b/nixpkgs/nixos/tests/rkvm/cert.pem
@@ -0,0 +1,18 @@
+-----BEGIN CERTIFICATE-----
+MIIC3jCCAcagAwIBAgIUWW1hb9xdRtxAhA42jkS89goW9LUwDQYJKoZIhvcNAQEL
+BQAwDzENMAsGA1UEAwwEcmt2bTAeFw0yMzA4MjIxOTI1NDlaFw0zMzA4MTkxOTI1
+NDlaMA8xDTALBgNVBAMMBHJrdm0wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK
+AoIBAQCuBsh0+LDXN4b2o/PJjzuiZ9Yv9Pz1Oho9WRiXtNIuHTRdBCcht/iu3PGF
+ICIX+H3dqQOziGSCTAQGJD2p+1ik8d+boJbpa0oxXuHuomsMAT3mib3GpipQoBLP
+KaEbWEsvQbr3RMx8WOtG4dmRQFzSVVtmAXyM0pNyisd4eUCplyIl9gsRJIvsO/0M
+OkgOZW9XLfKiAWlZoyXEkBmPAshg3EkwQtmwxPA/NgWbAOW3zJKSChxnnGYiuIIu
+R/wJ8OQXHP6boQLQGUhCWBKa1uK1gEBmV3Pj6uK8RzTkQq6/47F5sPa6VfqQYdyl
+TCs9bSqHXZjqMBoiSp22uH6+Lh9RAgMBAAGjMjAwMA8GA1UdEQQIMAaHBAoAAAEw
+HQYDVR0OBBYEFEh9HEsnY3dfNKVyPWDbwfR0qHopMA0GCSqGSIb3DQEBCwUAA4IB
+AQB/r+K20JqegUZ/kepPxIU95YY81aUUoxvLbu4EAgh8o46Fgm75qrTZPg4TaIZa
+wtVejekrF+p3QVf0ErUblh/iCjTZPSzCmKHZt8cc9OwTH7bt3bx7heknzLDyIa5z
+szAL+6241UggQ5n5NUGn5+xZHA7TMe47xAZPaRMlCQ/tp5pWFjH6WSSQSP5t4Ag9
+ObhY+uudFjmWi3QIBTr3iIscbWx7tD8cjus7PzM7+kszSDRV04xb6Ox8JzW9MKIN
+GwgwVgs3zCuyqBmTGnR1og3aMk6VtlyZUYE78uuc+fMBxqoBZ0mykeOp0Tbzgtf7
+gPkYcQ6vonoQhuTXYj/NrY+b
+-----END CERTIFICATE-----
diff --git a/nixpkgs/nixos/tests/rkvm/default.nix b/nixpkgs/nixos/tests/rkvm/default.nix
new file mode 100644
index 000000000000..22425948d8bf
--- /dev/null
+++ b/nixpkgs/nixos/tests/rkvm/default.nix
@@ -0,0 +1,104 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+let
+  # Generated with
+  #
+  # nix shell .#rkvm --command "rkvm-certificate-gen --ip-addresses 10.0.0.1 cert.pem key.pem"
+  #
+  snakeoil-cert = ./cert.pem;
+  snakeoil-key = ./key.pem;
+in
+{
+  name = "rkvm";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ ../common/user-account.nix ];
+
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.1/24";
+      };
+
+      services.getty.autologinUser = "alice";
+
+      services.rkvm.server = {
+        enable = true;
+        settings = {
+          certificate = snakeoil-cert;
+          key = snakeoil-key;
+          password = "snakeoil";
+          switch-keys = [ "left-alt" "right-alt" ];
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      imports = [ ../common/user-account.nix ];
+
+      virtualisation.vlans = [ 1 ];
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+
+      systemd.network.networks."01-eth1" = {
+        name = "eth1";
+        networkConfig.Address = "10.0.0.2/24";
+      };
+
+      services.getty.autologinUser = "alice";
+
+      services.rkvm.client = {
+        enable = true;
+        settings = {
+          server = "10.0.0.1:5258";
+          certificate = snakeoil-cert;
+          key = snakeoil-key;
+          password = "snakeoil";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    server.wait_for_unit("getty@tty1.service")
+    server.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    server.wait_for_unit("rkvm-server")
+    server.wait_for_open_port(5258)
+
+    client.wait_for_unit("getty@tty1.service")
+    client.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    client.wait_for_unit("rkvm-client")
+
+    server.sleep(1)
+
+    # Switch to client
+    server.send_key("alt-alt_r", delay=0.2)
+    server.send_chars("echo 'hello client' > /tmp/test.txt\n")
+
+    # Switch to server
+    server.send_key("alt-alt_r", delay=0.2)
+    server.send_chars("echo 'hello server' > /tmp/test.txt\n")
+
+    server.sleep(1)
+
+    client.systemctl("stop rkvm-client.service")
+    server.systemctl("stop rkvm-server.service")
+
+    server_file = server.succeed("cat /tmp/test.txt")
+    assert server_file.strip() == "hello server"
+
+    client_file = client.succeed("cat /tmp/test.txt")
+    assert client_file.strip() == "hello client"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rkvm/key.pem b/nixpkgs/nixos/tests/rkvm/key.pem
new file mode 100644
index 000000000000..7197decff8d3
--- /dev/null
+++ b/nixpkgs/nixos/tests/rkvm/key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvQIBADANBgkqhkiG9w0BAQEFAASCBKcwggSjAgEAAoIBAQCuBsh0+LDXN4b2
+o/PJjzuiZ9Yv9Pz1Oho9WRiXtNIuHTRdBCcht/iu3PGFICIX+H3dqQOziGSCTAQG
+JD2p+1ik8d+boJbpa0oxXuHuomsMAT3mib3GpipQoBLPKaEbWEsvQbr3RMx8WOtG
+4dmRQFzSVVtmAXyM0pNyisd4eUCplyIl9gsRJIvsO/0MOkgOZW9XLfKiAWlZoyXE
+kBmPAshg3EkwQtmwxPA/NgWbAOW3zJKSChxnnGYiuIIuR/wJ8OQXHP6boQLQGUhC
+WBKa1uK1gEBmV3Pj6uK8RzTkQq6/47F5sPa6VfqQYdylTCs9bSqHXZjqMBoiSp22
+uH6+Lh9RAgMBAAECggEABo2V1dBu5E51zsAiFCMdypdLZEyUNphvWC5h3oXowONz
+pH8ICYfXyEnkma/kk2+ALy0dSRDn6/94dVIUX7Fpx0hJCcoJyhSysK+TJWfIonqX
+ffYOMeFG8vicIgs+GFKs/hoPtB5LREbFkUqRj/EoWE6Y3aX3roaCwTZC8vaUk0OK
+54gExcNXRwQtFmfM9BiPT76F2J641NVsddgKumrryMi605CgZ57OFfSYEena6T3t
+JbQ1TKB3SH1LvSQIspyp56E3bjh8bcwSh72g88YxWZI9yarOesmyU+fXnmVqcBc+
+CiJDX3Te1C2GIkBiH3HZJo4P88aXrkJ7J8nub/812QKBgQDfCHjBy5uWzzbDnqZc
+cllIyUqMHq1iY2/btdZQbz83maZhQhH2UL4Zvoa7qgMX7Ou5jn1xpDaMeXNaajGK
+Fz66nmqQEUFX1i+2md2J8TeKD37yUJRdlrMiAc+RNp5wiOH9EI18g2m6h/nj3s/P
+MdNyxsz+wqOiJT0sZatarKiFhQKBgQDHv+lPy4OPH1MeSv5vmv3Pa41O/CeiPy+T
+gi6nEZayVRVog3zF9T6gNIHrZ1fdIppWPiPXv9fmC3s/IVEftLG6YC+MAfigYhiz
+Iceoal0iJJ8DglzOhlKgHEnxEwENCz8aJxjpvbxHHcpvgXdBSEVfHvVqDkAFTsvF
+JA5YTmqGXQKBgQCL6uqm2S7gq1o12p+PO4VbrjwAL3aiVLNl6Gtsxn2oSdIhDavr
+FLhNukMYFA4gwlcXb5au5k/6TG7bd+dgNDj8Jkm/27NcgVgpe9mJojQvfo0rQvXw
+yIvUd8JZ3SQEgTsU4X+Bb4eyp39TPwKrfxyh0qnj4QN6w1XfNmELX2nRaQKBgEq6
+a0ik9JTovSnKGKIcM/QTYow4HYO/a8cdnuJ13BDfb+DnwBg3BbTdr/UndmGOfnrh
+SHuAk/7GMNePWVApQ4xcS61vV1p5GJB7hLxm/my1kp+3d4z0B5lKvAbqeywsFvFr
+yxA3IWbhqEhLARh1Ny684EdLCXxy3Bzmvk8fFw8pAoGAGkt9pJC2wkk9fnJIHq+f
+h/WnEO0YrGzYnVA+RyCNKrimRd+GylGHJ/Ev6PRZvMwyGE7RCB+fHVrrEcEJAcxL
+SaOg5NA8cwrG+UpTQqi4gt6tCW87afVCyL6dC/E8giJlzI0LY9DnFGoVqYL0qJvm
+Sj4SU0fyLsW/csOLd5T+Bf8=
+-----END PRIVATE KEY-----
diff --git a/nixpkgs/nixos/tests/robustirc-bridge.nix b/nixpkgs/nixos/tests/robustirc-bridge.nix
new file mode 100644
index 000000000000..8493fd628212
--- /dev/null
+++ b/nixpkgs/nixos/tests/robustirc-bridge.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "robustirc-bridge";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hax404 ];
+  };
+
+  nodes =
+    { bridge =
+      { services.robustirc-bridge = {
+          enable = true;
+          extraFlags = [
+            "-listen localhost:6667"
+            "-network example.com"
+          ];
+        };
+      };
+    };
+
+    testScript =
+    ''
+      start_all()
+
+      bridge.wait_for_unit("robustirc-bridge.service")
+      bridge.wait_for_open_port(1080)
+      bridge.wait_for_open_port(6667)
+    '';
+})
diff --git a/nixpkgs/nixos/tests/rosenpass.nix b/nixpkgs/nixos/tests/rosenpass.nix
new file mode 100644
index 000000000000..ec4046c8c035
--- /dev/null
+++ b/nixpkgs/nixos/tests/rosenpass.nix
@@ -0,0 +1,217 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  deviceName = "rp0";
+
+  server = {
+    ip = "fe80::1";
+    wg = {
+      public = "mQufmDFeQQuU/fIaB2hHgluhjjm1ypK4hJr1cW3WqAw=";
+      secret = "4N5Y1dldqrpsbaEiY8O0XBUGUFf8vkvtBtm8AoOX7Eo=";
+      listen = 10000;
+    };
+  };
+  client = {
+    ip = "fe80::2";
+    wg = {
+      public = "Mb3GOlT7oS+F3JntVKiaD7SpHxLxNdtEmWz/9FMnRFU=";
+      secret = "uC5dfGMv7Oxf5UDfdPkj6rZiRZT2dRWp5x8IQxrNcUE=";
+    };
+  };
+in
+{
+  name = "rosenpass";
+
+  nodes =
+    let
+      shared = peer: { config, modulesPath, ... }: {
+        imports = [ "${modulesPath}/services/networking/rosenpass.nix" ];
+
+        boot.kernelModules = [ "wireguard" ];
+
+        services.rosenpass = {
+          enable = true;
+          defaultDevice = deviceName;
+          settings = {
+            verbosity = "Verbose";
+            public_key = "/etc/rosenpass/pqpk";
+            secret_key = "/etc/rosenpass/pqsk";
+          };
+        };
+
+        networking.firewall.allowedUDPPorts = [ 9999 ];
+
+        systemd.network = {
+          enable = true;
+          networks."rosenpass" = {
+            matchConfig.Name = deviceName;
+            networkConfig.IPForward = true;
+            address = [ "${peer.ip}/64" ];
+          };
+
+          netdevs."10-rp0" = {
+            netdevConfig = {
+              Kind = "wireguard";
+              Name = deviceName;
+            };
+            wireguardConfig.PrivateKeyFile = "/etc/wireguard/wgsk";
+          };
+        };
+
+        environment.etc."wireguard/wgsk" = {
+          text = peer.wg.secret;
+          user = "systemd-network";
+          group = "systemd-network";
+        };
+      };
+    in
+    {
+      server = {
+        imports = [ (shared server) ];
+
+        networking.firewall.allowedUDPPorts = [ server.wg.listen ];
+
+        systemd.network.netdevs."10-${deviceName}" = {
+          wireguardConfig.ListenPort = server.wg.listen;
+          wireguardPeers = [
+            {
+              wireguardPeerConfig = {
+                AllowedIPs = [ "::/0" ];
+                PublicKey = client.wg.public;
+              };
+            }
+          ];
+        };
+
+        services.rosenpass.settings = {
+          listen = [ "0.0.0.0:9999" ];
+          peers = [
+            {
+              public_key = "/etc/rosenpass/peers/client/pqpk";
+              peer = client.wg.public;
+            }
+          ];
+        };
+      };
+      client = {
+        imports = [ (shared client) ];
+
+        systemd.network.netdevs."10-${deviceName}".wireguardPeers = [
+          {
+            wireguardPeerConfig = {
+              AllowedIPs = [ "::/0" ];
+              PublicKey = server.wg.public;
+              Endpoint = "server:${builtins.toString server.wg.listen}";
+            };
+          }
+        ];
+
+        services.rosenpass.settings.peers = [
+          {
+            public_key = "/etc/rosenpass/peers/server/pqpk";
+            endpoint = "server:9999";
+            peer = server.wg.public;
+          }
+        ];
+      };
+    };
+
+  testScript = { ... }: ''
+    from os import system
+
+    # Full path to rosenpass in the store, to avoid fiddling with `$PATH`.
+    rosenpass = "${pkgs.rosenpass}/bin/rosenpass"
+
+    # Path in `/etc` where keys will be placed.
+    etc = "/etc/rosenpass"
+
+    start_all()
+
+    for machine in [server, client]:
+        machine.wait_for_unit("multi-user.target")
+
+    # Gently stop Rosenpass to avoid crashes during key generation/distribution.
+    for machine in [server, client]:
+        machine.execute("systemctl stop rosenpass.service")
+
+    for (name, machine, remote) in [("server", server, client), ("client", client, server)]:
+        pk, sk = f"{name}.pqpk", f"{name}.pqsk"
+        system(f"{rosenpass} gen-keys --force --secret-key {sk} --public-key {pk}")
+        machine.copy_from_host(sk, f"{etc}/pqsk")
+        machine.copy_from_host(pk, f"{etc}/pqpk")
+        remote.copy_from_host(pk, f"{etc}/peers/{name}/pqpk")
+
+    for machine in [server, client]:
+        machine.execute("systemctl start rosenpass.service")
+
+    for machine in [server, client]:
+        machine.wait_for_unit("rosenpass.service")
+
+    with subtest("ping"):
+        client.succeed("ping -c 2 -i 0.5 ${server.ip}%${deviceName}")
+
+    with subtest("preshared-keys"):
+        # Rosenpass works by setting the WireGuard preshared key at regular intervals.
+        # Thus, if it is not active, then no key will be set, and the output of `wg show` will contain "none".
+        # Otherwise, if it is active, then the key will be set and "none" will not be found in the output of `wg show`.
+        for machine in [server, client]:
+            machine.wait_until_succeeds("wg show all preshared-keys | grep --invert-match none", timeout=5)
+  '';
+
+  # NOTE: Below configuration is for "interactive" (=developing/debugging) only.
+  interactive.nodes =
+    let
+      inherit (import ./ssh-keys.nix pkgs) snakeOilPublicKey snakeOilPrivateKey;
+
+      sshAndKeyGeneration = {
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+        environment.systemPackages = [
+          (pkgs.writeShellApplication {
+            name = "gen-keys";
+            runtimeInputs = [ pkgs.rosenpass ];
+            text = ''
+              HOST="$(hostname)"
+              if [ "$HOST" == "server" ]
+              then
+                PEER="client"
+              else
+                PEER="server"
+              fi
+
+              # Generate keypair.
+              mkdir -vp /etc/rosenpass/peers/$PEER
+              rosenpass gen-keys --force --secret-key /etc/rosenpass/pqsk --public-key /etc/rosenpass/pqpk
+
+              # Set up SSH key.
+              mkdir -p /root/.ssh
+              cp ${snakeOilPrivateKey} /root/.ssh/id_ecdsa
+              chmod 0400 /root/.ssh/id_ecdsa
+
+              # Copy public key to other peer.
+              # shellcheck disable=SC2029
+              ssh -o StrictHostKeyChecking=no $PEER "mkdir -pv /etc/rosenpass/peers/$HOST"
+              scp /etc/rosenpass/pqpk "$PEER:/etc/rosenpass/peers/$HOST/pqpk"
+            '';
+          })
+        ];
+      };
+
+      # Use kmscon <https://www.freedesktop.org/wiki/Software/kmscon/>
+      # to provide a slightly nicer console, and while we're at it,
+      # also use a nice font.
+      # With kmscon, we can for example zoom in/out using [Ctrl] + [+]
+      # and [Ctrl] + [-]
+      niceConsoleAndAutologin.services.kmscon = {
+        enable = true;
+        autologinUser = "root";
+        fonts = [{
+          name = "Fira Code";
+          package = pkgs.fira-code;
+        }];
+      };
+    in
+    {
+      server = sshAndKeyGeneration // niceConsoleAndAutologin;
+      client = sshAndKeyGeneration // niceConsoleAndAutologin;
+    };
+})
diff --git a/nixpkgs/nixos/tests/roundcube.nix b/nixpkgs/nixos/tests/roundcube.nix
new file mode 100644
index 000000000000..763f10a7a2dd
--- /dev/null
+++ b/nixpkgs/nixos/tests/roundcube.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "roundcube";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ globin ];
+  };
+
+  nodes = {
+    roundcube = { config, pkgs, ... }: {
+      services.roundcube = {
+        enable = true;
+        hostName = "roundcube";
+        database.password = "not production";
+        package = pkgs.roundcube.withPlugins (plugins: [ plugins.persistent_login ]);
+        plugins = [ "persistent_login" ];
+        dicts = with pkgs.aspellDicts; [ en fr de ];
+      };
+      services.nginx.virtualHosts.roundcube = {
+        forceSSL = false;
+        enableACME = false;
+      };
+    };
+  };
+
+  testScript = ''
+    roundcube.start
+    roundcube.wait_for_unit("postgresql.service")
+    roundcube.wait_for_unit("phpfpm-roundcube.service")
+    roundcube.wait_for_unit("nginx.service")
+    roundcube.succeed("curl -sSfL http://roundcube/ | grep 'Keep me logged in'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rshim.nix b/nixpkgs/nixos/tests/rshim.nix
new file mode 100644
index 000000000000..bb5cce028ae7
--- /dev/null
+++ b/nixpkgs/nixos/tests/rshim.nix
@@ -0,0 +1,25 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{
+  basic = makeTest {
+    name = "rshim";
+    meta.maintainers = with maintainers; [ nikstur ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.rshim.enable = true;
+    };
+
+    testScript = { nodes, ... }: ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      print(machine.succeed("systemctl status rshim.service"))
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/rspamd.nix b/nixpkgs/nixos/tests/rspamd.nix
new file mode 100644
index 000000000000..26895fbad3f3
--- /dev/null
+++ b/nixpkgs/nixos/tests/rspamd.nix
@@ -0,0 +1,313 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  initMachine = ''
+    start_all()
+    machine.wait_for_unit("rspamd.service")
+    machine.succeed("id rspamd >/dev/null")
+  '';
+  checkSocket = socket: user: group: mode: ''
+    machine.succeed(
+        "ls ${socket} >/dev/null",
+        '[[ "$(stat -c %U ${socket})" == "${user}" ]]',
+        '[[ "$(stat -c %G ${socket})" == "${group}" ]]',
+        '[[ "$(stat -c %a ${socket})" == "${mode}" ]]',
+    )
+  '';
+  simple = name: enableIPv6: makeTest {
+    name = "rspamd-${name}";
+    nodes.machine = {
+      services.rspamd.enable = true;
+      networking.enableIPv6 = enableIPv6;
+    };
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_open_port(11334)
+      machine.wait_for_unit("rspamd.service")
+      machine.succeed("id rspamd >/dev/null")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
+      machine.sleep(10)
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("systemctl cat rspamd.service"))
+      machine.log(machine.succeed("curl http://localhost:11334/auth"))
+      machine.log(machine.succeed("curl http://127.0.0.1:11334/auth"))
+      ${optionalString enableIPv6 ''machine.log(machine.succeed("curl http://[::1]:11334/auth"))''}
+      # would not reformat
+    '';
+  };
+in
+{
+  simple = simple "simple" true;
+  ipv4only = simple "ipv4only" false;
+  deprecated = makeTest {
+    name = "rspamd-deprecated";
+    nodes.machine = {
+      services.rspamd = {
+        enable = true;
+        workers.normal.bindSockets = [{
+          socket = "/run/rspamd/rspamd.sock";
+          mode = "0600";
+          owner = "rspamd";
+          group = "rspamd";
+        }];
+        workers.controller.bindSockets = [{
+          socket = "/run/rspamd/rspamd-worker.sock";
+          mode = "0666";
+          owner = "rspamd";
+          group = "rspamd";
+        }];
+      };
+    };
+
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_file("/run/rspamd/rspamd.sock")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "600" }
+      ${checkSocket "/run/rspamd/rspamd-worker.sock" "rspamd" "rspamd" "666" }
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("rspamc -h /run/rspamd/rspamd-worker.sock stat"))
+      machine.log(
+          machine.succeed(
+              "curl --unix-socket /run/rspamd/rspamd-worker.sock http://localhost/ping"
+          )
+      )
+    '';
+  };
+
+  bindports = makeTest {
+    name = "rspamd-bindports";
+    nodes.machine = {
+      services.rspamd = {
+        enable = true;
+        workers.normal.bindSockets = [{
+          socket = "/run/rspamd/rspamd.sock";
+          mode = "0600";
+          owner = "rspamd";
+          group = "rspamd";
+        }];
+        workers.controller.bindSockets = [{
+          socket = "/run/rspamd/rspamd-worker.sock";
+          mode = "0666";
+          owner = "rspamd";
+          group = "rspamd";
+        }];
+        workers.controller2 = {
+          type = "controller";
+          bindSockets = [ "0.0.0.0:11335" ];
+          extraConfig = ''
+            static_dir = "''${WWWDIR}";
+            secure_ip = null;
+            password = "verysecretpassword";
+          '';
+        };
+      };
+    };
+
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_file("/run/rspamd/rspamd.sock")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "600" }
+      ${checkSocket "/run/rspamd/rspamd-worker.sock" "rspamd" "rspamd" "666" }
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed("grep 'CONFDIR/worker-controller.inc' /etc/rspamd/rspamd.conf")
+      )
+      machine.log(machine.succeed("grep 'CONFDIR/worker-normal.inc' /etc/rspamd/rspamd.conf"))
+      machine.log(
+          machine.succeed(
+              "grep 'LOCAL_CONFDIR/override.d/worker-controller2.inc' /etc/rspamd/rspamd.conf"
+          )
+      )
+      machine.log(
+          machine.succeed(
+              "grep 'verysecretpassword' /etc/rspamd/override.d/worker-controller2.inc"
+          )
+      )
+      machine.wait_until_succeeds(
+          "journalctl -u rspamd | grep -i 'starting controller process' >&2"
+      )
+      machine.log(machine.succeed("rspamc -h /run/rspamd/rspamd-worker.sock stat"))
+      machine.log(
+          machine.succeed(
+              "curl --unix-socket /run/rspamd/rspamd-worker.sock http://localhost/ping"
+          )
+      )
+      machine.log(machine.succeed("curl http://localhost:11335/ping"))
+    '';
+  };
+  customLuaRules = makeTest {
+    name = "rspamd-custom-lua-rules";
+    nodes.machine = {
+      environment.etc."tests/no-muh.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<mah@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+      '';
+      environment.etc."tests/muh.eml".text = ''
+        From: Cow<cow@example.com>
+        To: Sheep2<mah@example.com>
+        Subject: Evil cows
+
+        Cows are majestic creatures don't Muh agree?
+      '';
+      services.rspamd = {
+        enable = true;
+        locals = {
+          "antivirus.conf" = mkIf false { text = ''
+              clamav {
+                action = "reject";
+                symbol = "CLAM_VIRUS";
+                type = "clamav";
+                log_clean = true;
+                servers = "/run/clamav/clamd.ctl";
+              }
+            '';};
+          "redis.conf" = {
+            enable = false;
+            text = ''
+              servers = "127.0.0.1";
+            '';
+          };
+          "groups.conf".text = ''
+            group "cows" {
+              symbol {
+                NO_MUH = {
+                  weight = 1.0;
+                  description = "Mails should not muh";
+                }
+              }
+            }
+          '';
+        };
+        localLuaRules = pkgs.writeText "rspamd.local.lua" ''
+          local rspamd_logger = require "rspamd_logger"
+          rspamd_config.NO_MUH = {
+            callback = function (task)
+              local parts = task:get_text_parts()
+              if parts then
+                for _,part in ipairs(parts) do
+                  local content = tostring(part:get_content())
+                  rspamd_logger.infox(rspamd_config, 'Found content %s', content)
+                  local found = string.find(content, "Muh");
+                  rspamd_logger.infox(rspamd_config, 'Found muh %s', tostring(found))
+                  if found then
+                    return true
+                  end
+                end
+              end
+              return false
+            end,
+            score = 5.0,
+            description = 'Allow no cows',
+            group = "cows",
+          }
+          rspamd_logger.infox(rspamd_config, 'Work dammit!!!')
+        '';
+      };
+    };
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_open_port(11334)
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.conf"))
+      machine.log(machine.succeed("cat /etc/rspamd/rspamd.local.lua"))
+      machine.log(machine.succeed("cat /etc/rspamd/local.d/groups.conf"))
+      # Verify that redis.conf was not written
+      machine.fail("cat /etc/rspamd/local.d/redis.conf >&2")
+      # Verify that antivirus.conf was not written
+      machine.fail("cat /etc/rspamd/local.d/antivirus.conf >&2")
+      ${checkSocket "/run/rspamd/rspamd.sock" "rspamd" "rspamd" "660" }
+      machine.log(
+          machine.succeed("curl --unix-socket /run/rspamd/rspamd.sock http://localhost/ping")
+      )
+      machine.log(machine.succeed("rspamc -h 127.0.0.1:11334 stat"))
+      machine.log(machine.succeed("cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334"))
+      machine.log(
+          machine.succeed("cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols")
+      )
+      machine.wait_until_succeeds("journalctl -u rspamd | grep -i muh >&2")
+      machine.log(
+          machine.fail(
+              "cat /etc/tests/no-muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"
+          )
+      )
+      machine.log(
+          machine.succeed(
+              "cat /etc/tests/muh.eml | rspamc -h 127.0.0.1:11334 symbols | grep NO_MUH"
+          )
+      )
+    '';
+  };
+  postfixIntegration = makeTest {
+    name = "rspamd-postfix-integration";
+    nodes.machine = {
+      environment.systemPackages = with pkgs; [ msmtp ];
+      environment.etc."tests/gtube.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+
+        XJS*C4JDBQADN1.NSBN3*2IDNEN*GTUBE-STANDARD-ANTI-UBE-TEST-EMAIL*C.34X
+      '';
+      environment.etc."tests/example.eml".text = ''
+        From: Sheep1<bah@example.com>
+        To: Sheep2<tester@example.com>
+        Subject: Evil cows
+
+        I find cows to be evil don't you?
+      '';
+      users.users.tester = {
+        isNormalUser = true;
+        password = "test";
+      };
+      services.postfix = {
+        enable = true;
+        destination = ["example.com"];
+      };
+      services.rspamd = {
+        enable = true;
+        postfix.enable = true;
+        workers.rspamd_proxy.type = "rspamd_proxy";
+      };
+    };
+    testScript = ''
+      ${initMachine}
+      machine.wait_for_open_port(11334)
+      machine.wait_for_open_port(25)
+      ${checkSocket "/run/rspamd/rspamd-milter.sock" "rspamd" "postfix" "660" }
+      machine.log(machine.succeed("rspamc -h 127.0.0.1:11334 stat"))
+      machine.log(
+          machine.succeed(
+              "msmtp --host=localhost -t --read-envelope-from < /etc/tests/example.eml"
+          )
+      )
+      machine.log(
+          machine.fail(
+              "msmtp --host=localhost -t --read-envelope-from < /etc/tests/gtube.eml"
+          )
+      )
+
+      machine.wait_until_fails('[ "$(postqueue -p)" != "Mail queue is empty" ]')
+      machine.fail("journalctl -u postfix | grep -i error >&2")
+      machine.fail("journalctl -u postfix | grep -i warning >&2")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/rss2email.nix b/nixpkgs/nixos/tests/rss2email.nix
new file mode 100644
index 000000000000..f32326feb50f
--- /dev/null
+++ b/nixpkgs/nixos/tests/rss2email.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix {
+  name = "rss2email";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ common/user-account.nix ];
+      services.nginx = {
+        enable = true;
+        virtualHosts."127.0.0.1".root = ./common/webroot;
+      };
+      services.rss2email = {
+        enable = true;
+        to = "alice@localhost";
+        interval = "1";
+        config.from = "test@example.org";
+        feeds = {
+          nixos = { url = "http://127.0.0.1/news-rss.xml"; };
+        };
+      };
+      services.opensmtpd = {
+        enable = true;
+        extraServerArgs = [ "-v" ];
+        serverConfiguration = ''
+          listen on 127.0.0.1
+          action dovecot_deliver mda \
+            "${pkgs.dovecot}/libexec/dovecot/deliver -d %{user.username}"
+          match from any for local action dovecot_deliver
+        '';
+      };
+      services.dovecot2 = {
+        enable = true;
+        enableImap = true;
+        mailLocation = "maildir:~/mail";
+        protocols = [ "imap" ];
+      };
+      environment.systemPackages = let
+        checkMailLanded = pkgs.writeScriptBin "check-mail-landed" ''
+          #!${pkgs.python3.interpreter}
+          import imaplib
+
+          with imaplib.IMAP4('127.0.0.1', 143) as imap:
+            imap.login('alice', 'foobar')
+            imap.select()
+            status, refs = imap.search(None, 'ALL')
+            print("=====> Result of search for all:", status, refs)
+            assert status == 'OK'
+            assert len(refs) > 0
+            status, msg = imap.fetch(refs[0], 'BODY[TEXT]')
+            assert status == 'OK'
+        '';
+      in [ pkgs.opensmtpd checkMailLanded ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("network-online.target")
+    server.wait_for_unit("opensmtpd")
+    server.wait_for_unit("dovecot2")
+    server.wait_for_unit("nginx")
+    server.wait_for_unit("rss2email")
+
+    server.wait_until_succeeds("check-mail-landed >&2")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/rstudio-server.nix b/nixpkgs/nixos/tests/rstudio-server.nix
new file mode 100644
index 000000000000..dd5fe3e5b440
--- /dev/null
+++ b/nixpkgs/nixos/tests/rstudio-server.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "rstudio-server-test";
+    meta.maintainers = with pkgs.lib.maintainers; [ jbedo cfhammill ];
+
+    nodes.machine = { config, lib, pkgs, ... }: {
+      services.rstudio-server.enable = true;
+    };
+
+    nodes.customPackageMachine = { config, lib, pkgs, ... }: {
+      services.rstudio-server = {
+        enable = true;
+        package = pkgs.rstudioServerWrapper.override { packages = [ pkgs.rPackages.ggplot2 ]; };
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("rstudio-server.service")
+      machine.succeed("curl -f -vvv -s http://127.0.0.1:8787")
+
+      customPackageMachine.wait_for_unit("rstudio-server.service")
+      customPackageMachine.succeed("curl -f -vvv -s http://127.0.0.1:8787")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/rsyncd.nix b/nixpkgs/nixos/tests/rsyncd.nix
new file mode 100644
index 000000000000..44464e42f28d
--- /dev/null
+++ b/nixpkgs/nixos/tests/rsyncd.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "rsyncd";
+  meta.maintainers = with pkgs.lib.maintainers; [ ehmry ];
+
+  nodes = let
+    mkNode = socketActivated:
+      { config, ... }: {
+        networking.firewall.allowedTCPPorts = [ config.services.rsyncd.port ];
+        services.rsyncd = {
+          enable = true;
+          inherit socketActivated;
+          settings = {
+            global = {
+              "reverse lookup" = false;
+              "forward lookup" = false;
+            };
+            tmp = {
+              path = "/nix/store";
+              comment = "test module";
+            };
+          };
+        };
+      };
+  in {
+    a = mkNode false;
+    b = mkNode true;
+  };
+
+  testScript = ''
+    start_all()
+    a.wait_for_unit("rsync")
+    b.wait_for_unit("sockets.target")
+    b.succeed("rsync a::")
+    a.succeed("rsync b::")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/rsyslogd.nix b/nixpkgs/nixos/tests/rsyslogd.nix
new file mode 100644
index 000000000000..049acdcd4393
--- /dev/null
+++ b/nixpkgs/nixos/tests/rsyslogd.nix
@@ -0,0 +1,40 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{
+  test1 = makeTest {
+    name = "rsyslogd-test1";
+    meta.maintainers = [ pkgs.lib.maintainers.aanderse ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.rsyslogd.enable = true;
+      services.journald.forwardToSyslog = false;
+    };
+
+    # ensure rsyslogd isn't receiving messages from journald if explicitly disabled
+    testScript = ''
+      machine.wait_for_unit("default.target")
+      machine.fail("test -f /var/log/messages")
+    '';
+  };
+
+  test2 = makeTest {
+    name = "rsyslogd-test2";
+    meta.maintainers = [ pkgs.lib.maintainers.aanderse ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.rsyslogd.enable = true;
+    };
+
+    # ensure rsyslogd is receiving messages from journald
+    testScript = ''
+      machine.wait_for_unit("default.target")
+      machine.succeed("test -f /var/log/messages")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/rxe.nix b/nixpkgs/nixos/tests/rxe.nix
new file mode 100644
index 000000000000..10753c4ed0c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/rxe.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ ... } :
+
+let
+  node = { pkgs, ... } : {
+    networking = {
+      firewall = {
+        allowedUDPPorts = [ 4791 ]; # open RoCE port
+        allowedTCPPorts = [ 4800 ]; # port for test utils
+      };
+      rxe = {
+        enable = true;
+        interfaces = [ "eth1" ];
+      };
+    };
+
+    environment.systemPackages = with pkgs; [ rdma-core screen ];
+  };
+
+in {
+  name = "rxe";
+
+  nodes = {
+    server = node;
+    client = node;
+  };
+
+  testScript = ''
+    # Test if rxe interface comes up
+    server.wait_for_unit("default.target")
+    server.succeed("systemctl status rxe.service")
+    server.succeed("ibv_devices | grep rxe_eth1")
+
+    client.wait_for_unit("default.target")
+
+    # ping pong tests
+    for proto in "rc", "uc", "ud", "srq":
+        server.succeed(
+            "screen -dmS {0}_pingpong ibv_{0}_pingpong -p 4800 -s 1024 -g0".format(proto)
+        )
+        client.succeed("sleep 2; ibv_{}_pingpong -p 4800 -s 1024 -g0 server".format(proto))
+
+    server.succeed("screen -dmS rping rping -s -a server -C 10")
+    client.succeed("sleep 2; rping -c -a server -C 10")
+  '';
+})
+
+
diff --git a/nixpkgs/nixos/tests/sabnzbd.nix b/nixpkgs/nixos/tests/sabnzbd.nix
new file mode 100644
index 000000000000..64cb655b4315
--- /dev/null
+++ b/nixpkgs/nixos/tests/sabnzbd.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "sabnzbd";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ jojosch ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.sabnzbd = {
+      enable = true;
+    };
+
+    # unrar is unfree
+    nixpkgs.config.allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [ "unrar" ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("sabnzbd.service")
+    machine.wait_until_succeeds(
+        "curl --fail -L http://localhost:8080/"
+    )
+    _, out = machine.execute("grep SABCTools /var/lib/sabnzbd/logs/sabnzbd.log")
+    machine.log(out)
+    machine.fail("grep 'SABCTools disabled: no correct version found!' /var/lib/sabnzbd/logs/sabnzbd.log")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/samba-wsdd.nix b/nixpkgs/nixos/tests/samba-wsdd.nix
new file mode 100644
index 000000000000..666a626d1b4a
--- /dev/null
+++ b/nixpkgs/nixos/tests/samba-wsdd.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "samba-wsdd";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    client_wsdd = { pkgs, ... }: {
+      services.samba-wsdd = {
+        enable = true;
+        openFirewall = true;
+        interface = "eth1";
+        workgroup = "WORKGROUP";
+        hostname = "CLIENT-WSDD";
+        discovery = true;
+        extraOptions = [ "--no-host" ];
+      };
+    };
+
+    server_wsdd = { ... }: {
+      services.samba-wsdd = {
+        enable = true;
+        openFirewall = true;
+        interface = "eth1";
+        workgroup = "WORKGROUP";
+        hostname = "SERVER-WSDD";
+      };
+    };
+  };
+
+  testScript = ''
+    client_wsdd.start()
+    client_wsdd.wait_for_unit("samba-wsdd")
+
+    server_wsdd.start()
+    server_wsdd.wait_for_unit("samba-wsdd")
+
+    client_wsdd.wait_until_succeeds(
+        "echo list | ${pkgs.libressl.nc}/bin/nc -N -U /run/wsdd/wsdd.sock | grep -i SERVER-WSDD"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/samba.nix b/nixpkgs/nixos/tests/samba.nix
new file mode 100644
index 000000000000..252c3dd9c76e
--- /dev/null
+++ b/nixpkgs/nixos/tests/samba.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "samba";
+
+  meta.maintainers = [ pkgs.lib.maintainers.eelco ];
+
+  nodes =
+    { client =
+        { pkgs, ... }:
+        { virtualisation.fileSystems =
+            { "/public" = {
+                fsType = "cifs";
+                device = "//server/public";
+                options = [ "guest" ];
+              };
+            };
+        };
+
+      server =
+        { ... }:
+        { services.samba.enable = true;
+          services.samba.openFirewall = true;
+          services.samba.shares.public =
+            { path = "/public";
+              "read only" = true;
+              browseable = "yes";
+              "guest ok" = "yes";
+              comment = "Public samba share.";
+            };
+        };
+    };
+
+  # client# [    4.542997] mount[777]: sh: systemd-ask-password: command not found
+
+  testScript =
+    ''
+      server.start()
+      server.wait_for_unit("samba.target")
+      server.succeed("mkdir -p /public; echo bar > /public/foo")
+
+      client.start()
+      client.wait_for_unit("remote-fs.target")
+      client.succeed("[[ $(cat /public/foo) = bar ]]")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/sanoid.nix b/nixpkgs/nixos/tests/sanoid.nix
new file mode 100644
index 000000000000..411ebcead9f6
--- /dev/null
+++ b/nixpkgs/nixos/tests/sanoid.nix
@@ -0,0 +1,130 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  commonConfig = { pkgs, ... }: {
+    virtualisation.emptyDiskImages = [ 2048 ];
+    boot.supportedFilesystems = [ "zfs" ];
+    environment.systemPackages = [ pkgs.parted ];
+  };
+in {
+  name = "sanoid";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lopsided98 ];
+  };
+
+  nodes = {
+    source = { ... }: {
+      imports = [ commonConfig ];
+      networking.hostId = "daa82e91";
+
+      programs.ssh.extraConfig = ''
+        UserKnownHostsFile=/dev/null
+        StrictHostKeyChecking=no
+      '';
+
+      services.sanoid = {
+        enable = true;
+        templates.test = {
+          hourly = 12;
+          daily = 1;
+          monthly = 1;
+          yearly = 1;
+
+          autosnap = true;
+        };
+        datasets."pool/sanoid".use_template = [ "test" ];
+        datasets."pool/compat".useTemplate = [ "test" ];
+        extraArgs = [ "--verbose" ];
+      };
+
+      services.syncoid = {
+        enable = true;
+        sshKey = "/var/lib/syncoid/id_ecdsa";
+        commands = {
+          # Sync snapshot taken by sanoid
+          "pool/sanoid" = {
+            target = "root@target:pool/sanoid";
+            extraArgs = [ "--no-sync-snap" "--create-bookmark" ];
+          };
+          # Take snapshot and sync
+          "pool/syncoid".target = "root@target:pool/syncoid";
+
+          # Test pool without parent (regression test for https://github.com/NixOS/nixpkgs/pull/180111)
+          "pool".target = "root@target:pool/full-pool";
+
+          # Test backward compatible options (regression test for https://github.com/NixOS/nixpkgs/issues/181561)
+          "pool/compat" = {
+            target = "root@target:pool/compat";
+            extraArgs = [ "--no-sync-snap" ];
+          };
+        };
+      };
+    };
+    target = { ... }: {
+      imports = [ commonConfig ];
+      networking.hostId = "dcf39d36";
+
+      services.openssh.enable = true;
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+    };
+  };
+
+  testScript = ''
+    source.succeed(
+        "mkdir /mnt",
+        "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
+        "udevadm settle",
+        "zpool create pool -R /mnt /dev/vdb1",
+        "zfs create pool/sanoid",
+        "zfs create pool/compat",
+        "zfs create pool/syncoid",
+        "udevadm settle",
+    )
+    target.succeed(
+        "mkdir /mnt",
+        "parted --script /dev/vdb -- mklabel msdos mkpart primary 1024M -1s",
+        "udevadm settle",
+        "zpool create pool -R /mnt /dev/vdb1",
+        "udevadm settle",
+    )
+
+    source.succeed(
+        "mkdir -m 700 -p /var/lib/syncoid",
+        "cat '${snakeOilPrivateKey}' > /var/lib/syncoid/id_ecdsa",
+        "chmod 600 /var/lib/syncoid/id_ecdsa",
+        "chown -R syncoid:syncoid /var/lib/syncoid/",
+    )
+
+    assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set before snapshotting"
+    assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set before snapshotting"
+    assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set before snapshotting"
+
+    # Take snapshot with sanoid
+    source.succeed("touch /mnt/pool/sanoid/test.txt")
+    source.succeed("touch /mnt/pool/compat/test.txt")
+    source.systemctl("start --wait sanoid.service")
+
+    assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after snapshotting"
+    assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after snapshotting"
+    assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after snapshotting"
+
+    # Sync snapshots
+    target.wait_for_open_port(22)
+    source.succeed("touch /mnt/pool/syncoid/test.txt")
+    source.systemctl("start --wait syncoid-pool-sanoid.service")
+    target.succeed("cat /mnt/pool/sanoid/test.txt")
+    source.systemctl("start --wait syncoid-pool-syncoid.service")
+    target.succeed("cat /mnt/pool/syncoid/test.txt")
+
+    source.systemctl("start --wait syncoid-pool.service")
+    target.succeed("[[ -d /mnt/pool/full-pool/syncoid ]]")
+
+    source.systemctl("start --wait syncoid-pool-compat.service")
+    target.succeed("cat /mnt/pool/compat/test.txt")
+
+    assert len(source.succeed("zfs allow pool")) == 0, "Pool shouldn't have delegated permissions set after syncing snapshots"
+    assert len(source.succeed("zfs allow pool/sanoid")) == 0, "Sanoid dataset shouldn't have delegated permissions set after syncing snapshots"
+    assert len(source.succeed("zfs allow pool/syncoid")) == 0, "Syncoid dataset shouldn't have delegated permissions set after syncing snapshots"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/scaphandre.nix b/nixpkgs/nixos/tests/scaphandre.nix
new file mode 100644
index 000000000000..f0a411748503
--- /dev/null
+++ b/nixpkgs/nixos/tests/scaphandre.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix {
+  name = "scaphandre";
+
+  nodes = {
+    scaphandre = { pkgs, ... } : {
+      boot.kernelModules = [ "intel_rapl_common" ];
+
+      environment.systemPackages = [ pkgs.scaphandre ];
+    };
+  };
+
+  testScript = { nodes, ... } : ''
+    scaphandre.start()
+    scaphandre.wait_until_succeeds(
+        "scaphandre stdout -t 4",
+    )
+  '';
+}
diff --git a/nixpkgs/nixos/tests/schleuder.nix b/nixpkgs/nixos/tests/schleuder.nix
new file mode 100644
index 000000000000..e57ef66bb8f9
--- /dev/null
+++ b/nixpkgs/nixos/tests/schleuder.nix
@@ -0,0 +1,126 @@
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+in
+import ./make-test-python.nix {
+  name = "schleuder";
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/user-account.nix ];
+    services.postfix = {
+      enable = true;
+      enableSubmission = true;
+      tlsTrustedAuthorities = "${certs.ca.cert}";
+      sslCert = "${certs.${domain}.cert}";
+      sslKey = "${certs.${domain}.key}";
+      inherit domain;
+      destination = [ domain ];
+      localRecipients = [ "root" "alice" "bob" ];
+    };
+    services.schleuder = {
+      enable = true;
+      # Don't do it like this in production! The point of this setting
+      # is to allow loading secrets from _outside_ the world-readable
+      # Nix store.
+      extraSettingsFile = pkgs.writeText "schleuder-api-keys.yml" ''
+        api:
+          valid_api_keys:
+            - fnord
+      '';
+      lists = [ "security@${domain}" ];
+      settings.api = {
+        tls_cert_file = "${certs.${domain}.cert}";
+        tls_key_file = "${certs.${domain}.key}";
+      };
+    };
+
+    environment.systemPackages = [
+      pkgs.gnupg
+      pkgs.msmtp
+      (pkgs.writeScriptBin "do-test" ''
+        #!${pkgs.runtimeShell}
+        set -exuo pipefail
+
+        # Generate a GPG key with no passphrase and export it
+        sudo -u alice gpg --passphrase-fd 0 --batch --yes --quick-generate-key 'alice@${domain}' rsa4096 sign,encr < <(echo)
+        sudo -u alice gpg --armor --export alice@${domain} > alice.asc
+        # Create a new mailing list with alice as the owner, and alice's key
+        schleuder-cli list new security@${domain} alice@${domain} alice.asc
+
+        # Send an email from a non-member of the list. Use --auto-from so we don't have to specify who it's from twice.
+        msmtp --auto-from security@${domain} --host=${domain} --port=25 --tls --tls-starttls <<EOF
+          Subject: really big security issue!!
+          From: root@${domain}
+
+          I found a big security problem!
+        EOF
+
+        # Wait for delivery
+        (set +o pipefail; journalctl -f -n 1000 -u postfix | grep -m 1 'delivered to maildir')
+
+        # There should be exactly one email
+        mail=(/var/spool/mail/alice/new/*)
+        [[ "''${#mail[@]}" = 1 ]]
+
+        # Find the fingerprint of the mailing list key
+        read list_key_fp address < <(schleuder-cli keys list security@${domain} | grep security@)
+        schleuder-cli keys export security@${domain} $list_key_fp > list.asc
+
+        # Import the key into alice's keyring, so we can verify it as well as decrypting
+        sudo -u alice gpg --import <list.asc
+        # And perform the decryption.
+        sudo -u alice gpg -d $mail >decrypted
+        # And check that the text matches.
+        grep "big security problem" decrypted
+      '')
+
+      # For debugging:
+      # pkgs.vim pkgs.openssl pkgs.sqliteinteractive
+    ];
+
+    security.pki.certificateFiles = [ certs.ca.cert ];
+
+    # Since we don't have internet here, use dnsmasq to provide MX records from /etc/hosts
+    services.dnsmasq = {
+      enable = true;
+      settings.selfmx = true;
+    };
+
+    networking.extraHosts = ''
+      127.0.0.1 ${domain}
+    '';
+
+    # schleuder-cli's config is not quite optimal in several ways:
+    # - A fingerprint _must_ be pinned, it doesn't even have an option
+    #   to trust the PKI
+    # - It compares certificate fingerprints rather than key
+    #   fingerprints, so renewals break the pin (though that's not
+    #   relevant for this test)
+    # - It compares them as strings, which means we need to match the
+    #   expected format exactly. This means removing the :s and
+    #   lowercasing it.
+    # Refs:
+    # https://0xacab.org/schleuder/schleuder-cli/-/issues/16
+    # https://0xacab.org/schleuder/schleuder-cli/-/blob/f8895b9f47083d8c7b99a2797c93f170f3c6a3c0/lib/schleuder-cli/helper.rb#L230-238
+    systemd.tmpfiles.rules = let cliconfig = pkgs.runCommand "schleuder-cli.yml"
+      {
+        nativeBuildInputs = [ pkgs.jq pkgs.openssl ];
+      } ''
+      fp=$(openssl x509 -in ${certs.${domain}.cert} -noout -fingerprint -sha256 | cut -d = -f 2 | tr -d : | tr 'A-Z' 'a-z')
+      cat > $out <<EOF
+      host: localhost
+      port: 4443
+      tls_fingerprint: "$fp"
+      api_key: fnord
+      EOF
+    ''; in
+      [
+        "L+ /root/.schleuder-cli/schleuder-cli.yml - - - - ${cliconfig}"
+      ];
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_until_succeeds("nc -z localhost 4443")
+    machine.succeed("do-test")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/sddm.nix b/nixpkgs/nixos/tests/sddm.nix
new file mode 100644
index 000000000000..b6c05deac05e
--- /dev/null
+++ b/nixpkgs/nixos/tests/sddm.nix
@@ -0,0 +1,67 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  inherit (pkgs) lib;
+
+  tests = {
+    default = {
+      name = "sddm";
+
+      nodes.machine = { ... }: {
+        imports = [ ./common/user-account.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager.sddm.enable = true;
+        services.xserver.displayManager.defaultSession = "none+icewm";
+        services.xserver.windowManager.icewm.enable = true;
+      };
+
+      enableOCR = true;
+
+      testScript = { nodes, ... }: let
+        user = nodes.machine.users.users.alice;
+      in ''
+        start_all()
+        machine.wait_for_text("(?i)select your user")
+        machine.screenshot("sddm")
+        machine.send_chars("${user.password}\n")
+        machine.wait_for_file("/tmp/xauth_*")
+        machine.succeed("xauth merge /tmp/xauth_*")
+        machine.wait_for_window("^IceWM ")
+      '';
+    };
+
+    autoLogin = {
+      name = "sddm-autologin";
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ ttuegel ];
+      };
+
+      nodes.machine = { ... }: {
+        imports = [ ./common/user-account.nix ];
+        services.xserver.enable = true;
+        services.xserver.displayManager = {
+          sddm.enable = true;
+          autoLogin = {
+            enable = true;
+            user = "alice";
+          };
+        };
+        services.xserver.displayManager.defaultSession = "none+icewm";
+        services.xserver.windowManager.icewm.enable = true;
+      };
+
+      testScript = { nodes, ... }: ''
+        start_all()
+        machine.wait_for_file("/tmp/xauth_*")
+        machine.succeed("xauth merge /tmp/xauth_*")
+        machine.wait_for_window("^IceWM ")
+      '';
+    };
+  };
+in
+  lib.mapAttrs (lib.const makeTest) tests
diff --git a/nixpkgs/nixos/tests/seafile.nix b/nixpkgs/nixos/tests/seafile.nix
new file mode 100644
index 000000000000..78e735f4fed7
--- /dev/null
+++ b/nixpkgs/nixos/tests/seafile.nix
@@ -0,0 +1,115 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    client = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.seafile-shared pkgs.curl ];
+    };
+  in {
+    name = "seafile";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ kampfschlaefer schmittlauch ];
+    };
+
+    nodes = {
+      server = { config, pkgs, ... }: {
+        services.seafile = {
+          enable = true;
+          ccnetSettings.General.SERVICE_URL = "http://server";
+          adminEmail = "admin@example.com";
+          initialAdminPassword = "seafile_password";
+        };
+        services.nginx = {
+          enable = true;
+          virtualHosts."server" = {
+            locations."/".proxyPass = "http://unix:/run/seahub/gunicorn.sock";
+            locations."/seafhttp" = {
+              proxyPass = "http://127.0.0.1:8082";
+              extraConfig = ''
+                rewrite ^/seafhttp(.*)$ $1 break;
+                client_max_body_size 0;
+                proxy_connect_timeout  36000s;
+                proxy_read_timeout  36000s;
+                proxy_send_timeout  36000s;
+                send_timeout  36000s;
+                proxy_http_version 1.1;
+              '';
+            };
+          };
+        };
+        networking.firewall = { allowedTCPPorts = [ 80 ]; };
+      };
+      client1 = client pkgs;
+      client2 = client pkgs;
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("start seaf-server"):
+          server.wait_for_unit("seaf-server.service")
+          server.wait_for_file("/run/seafile/seafile.sock")
+
+      with subtest("start seahub"):
+          server.wait_for_unit("seahub.service")
+          server.wait_for_unit("nginx.service")
+          server.wait_for_file("/run/seahub/gunicorn.sock")
+
+      with subtest("client1 fetch seahub page"):
+          client1.succeed("curl -L http://server | grep 'Log In' >&2")
+
+      with subtest("client1 connect"):
+          client1.wait_for_unit("default.target")
+          client1.succeed("seaf-cli init -d . >&2")
+          client1.succeed("seaf-cli start >&2")
+          client1.succeed(
+              "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2"
+          )
+
+          libid = client1.succeed(
+              'seaf-cli create -s http://server -n test01 -u admin\@example.com -p seafile_password -t "first test library"'
+          ).strip()
+
+          client1.succeed(
+              "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01"
+          )
+          client1.fail(
+              "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test02"
+          )
+
+          client1.succeed(
+              f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2"
+          )
+
+          client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
+
+          client1.succeed("ls -la >&2")
+          client1.succeed("ls -la test01 >&2")
+
+          client1.execute("echo bla > test01/first_file")
+
+          client1.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
+
+      with subtest("client2 sync"):
+          client2.wait_for_unit("default.target")
+
+          client2.succeed("seaf-cli init -d . >&2")
+          client2.succeed("seaf-cli start >&2")
+
+          client2.succeed(
+              "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password >&2"
+          )
+
+          libid = client2.succeed(
+              "seaf-cli list-remote -s http://server -u admin\@example.com -p seafile_password |grep test01 |cut -d' ' -f 2"
+          ).strip()
+
+          client2.succeed(
+              f"seaf-cli download -l {libid} -s http://server -u admin\@example.com -p seafile_password -d . >&2"
+          )
+
+          client2.wait_until_succeeds("seaf-cli status |grep synchronized >&2")
+
+          client2.succeed("ls -la test01 >&2")
+
+          client2.succeed('[ `cat test01/first_file` = "bla" ]')
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/searx.nix b/nixpkgs/nixos/tests/searx.nix
new file mode 100644
index 000000000000..2f808cb65266
--- /dev/null
+++ b/nixpkgs/nixos/tests/searx.nix
@@ -0,0 +1,114 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+{
+  name = "searx";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ rnhmjoj ];
+  };
+
+  # basic setup: searx running the built-in webserver
+  nodes.base = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    services.searx = {
+      enable = true;
+      environmentFile = pkgs.writeText "secrets" ''
+        WOLFRAM_API_KEY  = sometoken
+        SEARX_SECRET_KEY = somesecret
+      '';
+
+      settings.server =
+        { port = "8080";
+          bind_address = "0.0.0.0";
+          secret_key = "@SEARX_SECRET_KEY@";
+        };
+      settings.engines = [
+        { name = "wolframalpha";
+          api_key = "@WOLFRAM_API_KEY@";
+          engine = "wolframalpha_api";
+        }
+        { name = "startpage";
+          shortcut = "start";
+        }
+      ];
+    };
+
+  };
+
+  # fancy setup: run in uWSGI and use nginx as proxy
+  nodes.fancy = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    services.searx = {
+      enable = true;
+      # searx refuses to run if unchanged
+      settings.server.secret_key = "somesecret";
+
+      runInUwsgi = true;
+      uwsgiConfig = {
+        # serve using the uwsgi protocol
+        socket = "/run/searx/uwsgi.sock";
+        chmod-socket = "660";
+
+        # use /searx as url "mountpoint"
+        mount = "/searx=searx.webapp:application";
+        module = "";
+        manage-script-name = true;
+      };
+    };
+
+    # use nginx as reverse proxy
+    services.nginx.enable = true;
+    services.nginx.virtualHosts.localhost = {
+      locations."/searx".extraConfig =
+        ''
+          include ${pkgs.nginx}/conf/uwsgi_params;
+          uwsgi_pass unix:/run/searx/uwsgi.sock;
+        '';
+      locations."/searx/static/".alias = "${pkgs.searx}/share/static/";
+    };
+
+    # allow nginx access to the searx socket
+    users.users.nginx.extraGroups = [ "searx" ];
+
+  };
+
+  testScript =
+    ''
+      base.start()
+
+      with subtest("Settings have been merged"):
+          base.wait_for_unit("searx-init")
+          base.wait_for_file("/run/searx/settings.yml")
+          output = base.succeed(
+              "${pkgs.yq-go}/bin/yq eval"
+              " '.engines[] | select(.name==\"startpage\") | .shortcut'"
+              " /run/searx/settings.yml"
+          ).strip()
+          assert output == "start", "Settings not merged"
+
+      with subtest("Environment variables have been substituted"):
+          base.succeed("grep -q somesecret /run/searx/settings.yml")
+          base.succeed("grep -q sometoken /run/searx/settings.yml")
+          base.copy_from_vm("/run/searx/settings.yml")
+
+      with subtest("Basic setup is working"):
+          base.wait_for_open_port(8080)
+          base.wait_for_unit("searx")
+          base.succeed(
+              "${pkgs.curl}/bin/curl --fail http://localhost:8080"
+          )
+          base.shutdown()
+
+      with subtest("Nginx+uWSGI setup is working"):
+          fancy.start()
+          fancy.wait_for_open_port(80)
+          fancy.wait_for_unit("uwsgi")
+          fancy.succeed(
+              "${pkgs.curl}/bin/curl --fail http://localhost/searx >&2"
+          )
+          fancy.succeed(
+              "${pkgs.curl}/bin/curl --fail http://localhost/searx/static/themes/oscar/js/bootstrap.min.js >&2"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/service-runner.nix b/nixpkgs/nixos/tests/service-runner.nix
new file mode 100644
index 000000000000..79d96f739a6c
--- /dev/null
+++ b/nixpkgs/nixos/tests/service-runner.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "service-runner";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ roberth ];
+  };
+
+  nodes = {
+    machine = { pkgs, lib, ... }: {
+      services.nginx.enable = true;
+      services.nginx.virtualHosts.machine.root = pkgs.runCommand "webroot" {} ''
+        mkdir $out
+        echo 'yay' >$out/index.html
+      '';
+      systemd.services.nginx.enable = false;
+    };
+
+  };
+
+  testScript = { nodes, ... }: ''
+    url = "http://localhost/index.html"
+
+    with subtest("check systemd.services.nginx.runner"):
+        machine.fail(f"curl {url}")
+        machine.succeed(
+            """
+            mkdir -p /run/nginx /var/log/nginx /var/cache/nginx
+            ${nodes.machine.config.systemd.services.nginx.runner} >&2 &
+            echo $!>my-nginx.pid
+            """
+        )
+        machine.wait_for_open_port(80)
+        machine.succeed(f"curl -f {url}")
+        machine.succeed("kill -INT $(cat my-nginx.pid)")
+        machine.wait_for_closed_port(80)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sftpgo.nix b/nixpkgs/nixos/tests/sftpgo.nix
new file mode 100644
index 000000000000..a5bb1981d2c3
--- /dev/null
+++ b/nixpkgs/nixos/tests/sftpgo.nix
@@ -0,0 +1,382 @@
+# SFTPGo NixOS test
+#
+# This NixOS test sets up a basic test scenario for the SFTPGo module
+# and covers the following scenarios:
+# - uploading a file via sftp
+# - downloading the file over sftp
+# - assert that the ACLs are respected
+# - share a file between alice and bob (using sftp)
+# - assert that eve cannot acceess the shared folder between alice and bob.
+#
+# Additional test coverage for the remaining protocols (i.e. ftp, http and webdav)
+# would be a nice to have for the future.
+{ pkgs, lib, ...  }:
+
+let
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+
+  # Returns an attributeset of users who are not system users.
+  normalUsers = config:
+    lib.filterAttrs (name: user: user.isNormalUser) config.users.users;
+
+  # Returns true if a user is a member of the given group
+  isMemberOf =
+    config:
+    # str
+    groupName:
+    # users.users attrset
+    user:
+      lib.any (x: x == user.name) config.users.groups.${groupName}.members;
+
+  # Generates a valid SFTPGo user configuration for a given user
+  # Will be converted to JSON and loaded on application startup.
+  generateUserAttrSet =
+    config:
+    # attrset returned by config.users.users.<username>
+    user: {
+      # 0: user is disabled, login is not allowed
+      # 1: user is enabled
+      status = 1;
+
+      username = user.name;
+      password = ""; # disables password authentication
+      public_keys = user.openssh.authorizedKeys.keys;
+      email = "${user.name}@example.com";
+
+      # User home directory on the local filesystem
+      home_dir = "${config.services.sftpgo.dataDir}/users/${user.name}";
+
+      # Defines a mapping between virtual SFTPGo paths and filesystem paths outside the user home directory.
+      #
+      # Supported for local filesystem only. If one or more of the specified folders are not
+      # inside the dataprovider they will be automatically created.
+      # You have to create the folder on the filesystem yourself
+      virtual_folders =
+        lib.optional (isMemberOf config sharedFolderName user) {
+          name = sharedFolderName;
+          mapped_path = "${config.services.sftpgo.dataDir}/${sharedFolderName}";
+          virtual_path = "/${sharedFolderName}";
+        };
+
+      # Defines the ACL on the virtual filesystem
+      permissions =
+        lib.recursiveUpdate {
+          "/" = [ "list" ];     # read-only top level directory
+          "/private" = [ "*" ]; # private subdirectory, not shared with others
+        } (lib.optionalAttrs (isMemberOf config "shared" user) {
+          "/shared" = [ "*" ];
+        });
+
+      filters = {
+        allowed_ip = [];
+        denied_ip = [];
+        web_client = [
+          "password-change-disabled"
+          "password-reset-disabled"
+          "api-key-auth-change-disabled"
+        ];
+      };
+
+      upload_bandwidth = 0; # unlimited
+      download_bandwidth = 0; # unlimited
+      expiration_date = 0; # means no expiration
+      max_sessions = 0;
+      quota_size = 0;
+      quota_files = 0;
+    };
+
+  # Generates a json file containing a static configuration
+  # of users and folders to import to SFTPGo.
+  loadDataJson = config: pkgs.writeText "users-and-folders.json" (builtins.toJSON {
+    users =
+      lib.mapAttrsToList (name: user: generateUserAttrSet config user) (normalUsers config);
+
+    folders = [
+      {
+        name = sharedFolderName;
+        description = "shared folder";
+
+        # 0: local filesystem
+        # 1: AWS S3 compatible
+        # 2: Google Cloud Storage
+        filesystem.provider = 0;
+
+        # Mapped path on the local filesystem
+        mapped_path = "${config.services.sftpgo.dataDir}/${sharedFolderName}";
+
+        # All users in the matching group gain access
+        users = config.users.groups.${sharedFolderName}.members;
+      }
+    ];
+  });
+
+  # Generated Host Key for connecting to SFTPGo's sftp subsystem.
+  snakeOilHostKey = pkgs.writeText "sftpgo_ed25519_host_key" ''
+    -----BEGIN OPENSSH PRIVATE KEY-----
+    b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+    QyNTUxOQAAACBOtQu6U135yxtrvUqPoozUymkjoNNPVK6rqjS936RLtQAAAJAXOMoSFzjK
+    EgAAAAtzc2gtZWQyNTUxOQAAACBOtQu6U135yxtrvUqPoozUymkjoNNPVK6rqjS936RLtQ
+    AAAEAoRLEV1VD80mg314ObySpfrCcUqtWoOSS3EtMPPhx08U61C7pTXfnLG2u9So+ijNTK
+    aSOg009UrquqNL3fpEu1AAAADHNmdHBnb0BuaXhvcwE=
+    -----END OPENSSH PRIVATE KEY-----
+  '';
+
+  adminUsername = "admin";
+  adminPassword = "secretadminpassword";
+  aliceUsername = "alice";
+  alicePassword = "secretalicepassword";
+  bobUsername = "bob";
+  bobPassword = "secretbobpassword";
+  eveUsername = "eve";
+  evePassword = "secretevepassword";
+  sharedFolderName = "shared";
+
+  # A file for testing uploading via SFTP
+  testFile = pkgs.writeText "test.txt" "hello world";
+  sharedFile = pkgs.writeText "shared.txt" "shared content";
+
+  # Define the for exposing SFTP
+  sftpPort = 2022;
+
+  # Define the for exposing HTTP
+  httpPort = 8080;
+in
+{
+  name = "sftpgo";
+
+  meta.maintainers = with lib.maintainers; [ yayayayaka ];
+
+  nodes = {
+    server = { nodes, ... }: {
+      networking.firewall.allowedTCPPorts = [ sftpPort httpPort ];
+
+      # nodes.server.configure postgresql database
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "sftpgo" ];
+        ensureUsers = [{
+          name = "sftpgo";
+          ensureDBOwnership = true;
+        }];
+      };
+
+      services.sftpgo = {
+        enable = true;
+
+        loadDataFile = (loadDataJson nodes.server);
+
+        settings = {
+          data_provider = {
+            driver = "postgresql";
+            name = "sftpgo";
+            username = "sftpgo";
+            host = "/run/postgresql";
+            port = 5432;
+
+            # Enables the possibility to create an initial admin user on first startup.
+            create_default_admin = true;
+          };
+
+          httpd.bindings = [
+            {
+              address = ""; # listen on all interfaces
+              port = httpPort;
+              enable_https = false;
+
+              enable_web_client = true;
+              enable_web_admin = true;
+            }
+          ];
+
+          # Enable sftpd
+          sftpd = {
+            bindings = [{
+              address = ""; # listen on all interfaces
+              port = sftpPort;
+            }];
+            host_keys = [ snakeOilHostKey ];
+            password_authentication = false;
+            keyboard_interactive_authentication = false;
+          };
+        };
+      };
+
+      systemd.services.sftpgo = {
+        after = [ "postgresql.service"];
+        environment = {
+          # Update existing users
+          SFTPGO_LOADDATA_MODE = "0";
+          SFTPGO_DEFAULT_ADMIN_USERNAME = adminUsername;
+
+          # This will end up in cleartext in the systemd service.
+          # Don't use this approach in production!
+          SFTPGO_DEFAULT_ADMIN_PASSWORD = adminPassword;
+        };
+      };
+
+      # Sets up the folder hierarchy on the local filesystem
+      systemd.tmpfiles.rules =
+        let
+          sftpgoUser = nodes.server.services.sftpgo.user;
+          sftpgoGroup = nodes.server.services.sftpgo.group;
+          statePath = nodes.server.services.sftpgo.dataDir;
+        in [
+          # Create state directory
+          "d ${statePath} 0750 ${sftpgoUser} ${sftpgoGroup} -"
+          "d ${statePath}/users 0750 ${sftpgoUser} ${sftpgoGroup} -"
+
+          # Created shared folder directories
+          "d ${statePath}/${sharedFolderName} 2770 ${sftpgoUser} ${sharedFolderName}   -"
+        ]
+        ++ lib.mapAttrsToList (name: user:
+          # Create private user directories
+          ''
+            d ${statePath}/users/${user.name} 0700 ${sftpgoUser} ${sftpgoGroup} -
+            d ${statePath}/users/${user.name}/private 0700 ${sftpgoUser} ${sftpgoGroup} -
+          ''
+        ) (normalUsers nodes.server);
+
+      users.users =
+        let
+          commonAttrs = {
+            isNormalUser = true;
+            openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+          };
+        in {
+          # SFTPGo admin user
+          admin = commonAttrs // {
+            password = adminPassword;
+          };
+
+          # Alice and bob share folders with each other
+          alice = commonAttrs // {
+            password = alicePassword;
+            extraGroups = [ sharedFolderName ];
+          };
+
+          bob = commonAttrs // {
+            password = bobPassword;
+            extraGroups = [ sharedFolderName ];
+          };
+
+          # Eve has no shared folders
+          eve = commonAttrs // {
+            password = evePassword;
+          };
+        };
+
+      users.groups.${sharedFolderName} = {};
+
+      specialisation = {
+        # A specialisation for asserting that SFTPGo can bind to privileged ports.
+        privilegedPorts.configuration = { ... }: {
+          networking.firewall.allowedTCPPorts = [ 22 80 ];
+          services.sftpgo = {
+            settings = {
+              sftpd.bindings = lib.mkForce [{
+                address = "";
+                port = 22;
+              }];
+
+              httpd.bindings = lib.mkForce [{
+                address = "";
+                port = 80;
+              }];
+            };
+          };
+        };
+      };
+    };
+
+    client = { nodes, ... }: {
+      # Add the SFTPGo host key to the global known_hosts file
+      programs.ssh.knownHosts =
+        let
+          commonAttrs = {
+            publicKey = "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIE61C7pTXfnLG2u9So+ijNTKaSOg009UrquqNL3fpEu1";
+          };
+        in {
+          "server" = commonAttrs;
+          "[server]:2022" = commonAttrs;
+        };
+      };
+  };
+
+  testScript = { nodes, ... }: let
+    # A function to generate test cases for wheter
+    # a specified username is expected to access the shared folder.
+    accessSharedFoldersSubtest =
+      { # The username to run as
+        username
+        # Whether the tests are expected to succeed or not
+      , shouldSucceed ? true
+      }: ''
+        with subtest("Test whether ${username} can access shared folders"):
+            client.${if shouldSucceed then "succeed" else "fail"}("sftp -P ${toString sftpPort} -b ${
+              pkgs.writeText "${username}-ls-${sharedFolderName}" ''
+                ls ${sharedFolderName}
+              ''
+            } ${username}@server")
+      '';
+      statePath = nodes.server.services.sftpgo.dataDir;
+  in ''
+    start_all()
+
+    client.wait_for_unit("default.target")
+    server.wait_for_unit("sftpgo.service")
+
+    with subtest("web client"):
+        client.wait_until_succeeds("curl -sSf http://server:${toString httpPort}/web/client/login")
+
+        # Ensure sftpgo found the static folder
+        client.wait_until_succeeds("curl -o /dev/null -sSf http://server:${toString httpPort}/static/favicon.ico")
+
+    with subtest("Setup SSH keys"):
+        client.succeed("mkdir -m 700 /root/.ssh")
+        client.succeed("cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa")
+        client.succeed("chmod 600 /root/.ssh/id_ecdsa")
+
+    with subtest("Copy a file over sftp"):
+        client.wait_until_succeeds("scp -P ${toString sftpPort} ${toString testFile} alice@server:/private/${testFile.name}")
+        server.succeed("test -s ${statePath}/users/alice/private/${testFile.name}")
+
+        # The configured ACL should prevent uploading files to the root directory
+        client.fail("scp -P ${toString sftpPort} ${toString testFile} alice@server:/")
+
+    with subtest("Attempting an interactive SSH sessions must fail"):
+        client.fail("ssh -p ${toString sftpPort} alice@server")
+
+    ${accessSharedFoldersSubtest {
+      username = "alice";
+      shouldSucceed = true;
+    }}
+
+    ${accessSharedFoldersSubtest {
+      username = "bob";
+      shouldSucceed = true;
+    }}
+
+    ${accessSharedFoldersSubtest {
+      username = "eve";
+      shouldSucceed = false;
+    }}
+
+    with subtest("Test sharing files"):
+        # Alice uploads a file to shared folder
+        client.succeed("scp -P ${toString sftpPort} ${toString sharedFile} alice@server:/${sharedFolderName}/${sharedFile.name}")
+        server.succeed("test -s ${statePath}/${sharedFolderName}/${sharedFile.name}")
+
+        # Bob downloads the file from shared folder
+        client.succeed("scp -P ${toString sftpPort} bob@server:/shared/${sharedFile.name} ${sharedFile.name}")
+        client.succeed("test -s ${sharedFile.name}")
+
+        # Eve should not get the file from shared folder
+        client.fail("scp -P ${toString sftpPort} eve@server:/shared/${sharedFile.name}")
+
+    server.succeed("/run/current-system/specialisation/privilegedPorts/bin/switch-to-configuration test")
+
+    client.wait_until_succeeds("sftp -P 22 -b ${pkgs.writeText "get-hello-world.txt" ''
+      get /private/${testFile.name}
+    ''} alice@server")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/sfxr-qt.nix b/nixpkgs/nixos/tests/sfxr-qt.nix
new file mode 100644
index 000000000000..976b9b11fc66
--- /dev/null
+++ b/nixpkgs/nixos/tests/sfxr-qt.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "sfxr-qt";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    sound.enable = true;
+    environment.systemPackages = [ pkgs.sfxr-qt ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      # Add a dummy sound card, or the program won't start
+      machine.execute("modprobe snd-dummy")
+
+      machine.execute("sfxr-qt >&2 &")
+
+      machine.wait_for_window(r"sfxr")
+      machine.sleep(10)
+      machine.wait_for_text("requency")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/sgt-puzzles.nix b/nixpkgs/nixos/tests/sgt-puzzles.nix
new file mode 100644
index 000000000000..4c5210bfce77
--- /dev/null
+++ b/nixpkgs/nixos/tests/sgt-puzzles.nix
@@ -0,0 +1,34 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+{
+  name = "sgt-puzzles";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tomfitzhenry ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = with pkgs; [
+      sgt-puzzles
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }:
+  ''
+    start_all()
+    machine.wait_for_x()
+
+    machine.execute("mines >&2 &")
+
+    machine.wait_for_window("Mines")
+    machine.wait_for_text("Marked")
+    machine.screenshot("mines")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/shadow.nix b/nixpkgs/nixos/tests/shadow.nix
new file mode 100644
index 000000000000..a027af7e450b
--- /dev/null
+++ b/nixpkgs/nixos/tests/shadow.nix
@@ -0,0 +1,172 @@
+let
+  password1 = "foobar";
+  password2 = "helloworld";
+  password3 = "bazqux";
+  password4 = "asdf123";
+  hashed_bcrypt = "$2b$05$8xIEflrk2RxQtcVXbGIxs.Vl0x7dF1/JSv3cyX6JJt0npzkTCWvxK"; # fnord
+  hashed_yeshash = "$y$j9T$d8Z4EAf8P1SvM/aDFbxMS0$VnTXMp/Hnc7QdCBEaLTq5ZFOAFo2/PM0/xEAFuOE88."; # fnord
+  hashed_sha512crypt = "$6$ymzs8WINZ5wGwQcV$VC2S0cQiX8NVukOLymysTPn4v1zJoJp3NGyhnqyv/dAf4NWZsBWYveQcj6gEJr4ZUjRBRjM0Pj1L8TCQ8hUUp0"; # meow
+in import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "shadow";
+  meta = with pkgs.lib.maintainers; { maintainers = [ nequissimus ]; };
+
+  nodes.shadow = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.shadow ];
+
+    users = {
+      mutableUsers = true;
+      users.emma = {
+        isNormalUser = true;
+        password = password1;
+        shell = pkgs.bash;
+      };
+      users.layla = {
+        isNormalUser = true;
+        password = password2;
+        shell = pkgs.shadow;
+      };
+      users.ash = {
+        isNormalUser = true;
+        password = password4;
+        shell = pkgs.bash;
+      };
+      users.berta = {
+        isNormalUser = true;
+        hashedPasswordFile = (pkgs.writeText "hashed_bcrypt" hashed_bcrypt).outPath;
+        shell = pkgs.bash;
+      };
+      users.yesim = {
+        isNormalUser = true;
+        hashedPassword = hashed_yeshash;
+        shell = pkgs.bash;
+      };
+      users.leo = {
+        isNormalUser = true;
+        initialHashedPassword = "!";
+        hashedPassword = hashed_sha512crypt; # should take precedence over initialHashedPassword
+        shell = pkgs.bash;
+      };
+    };
+  };
+
+  testScript = ''
+    shadow.wait_for_unit("multi-user.target")
+    shadow.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+    with subtest("Normal login"):
+        shadow.send_key("alt-f2")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+        shadow.wait_for_unit("getty@tty2.service")
+        shadow.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+        shadow.wait_until_tty_matches("2", "login: ")
+        shadow.send_chars("emma\n")
+        shadow.wait_until_tty_matches("2", "login: emma")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.sleep(2)
+        shadow.send_chars("${password1}\n")
+        shadow.send_chars("whoami > /tmp/1\n")
+        shadow.wait_for_file("/tmp/1")
+        assert "emma" in shadow.succeed("cat /tmp/1")
+
+    with subtest("Switch user"):
+        shadow.send_chars("su - ash\n")
+        shadow.sleep(2)
+        shadow.send_chars("${password4}\n")
+        shadow.sleep(2)
+        shadow.send_chars("whoami > /tmp/3\n")
+        shadow.wait_for_file("/tmp/3")
+        assert "ash" in shadow.succeed("cat /tmp/3")
+
+    with subtest("Change password"):
+        shadow.send_key("alt-f3")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 3 ]")
+        shadow.wait_for_unit("getty@tty3.service")
+        shadow.wait_until_succeeds("pgrep -f 'agetty.*tty3'")
+        shadow.wait_until_tty_matches("3", "login: ")
+        shadow.send_chars("emma\n")
+        shadow.wait_until_tty_matches("3", "login: emma")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.sleep(2)
+        shadow.send_chars("${password1}\n")
+        shadow.send_chars("passwd\n")
+        shadow.sleep(2)
+        shadow.send_chars("${password1}\n")
+        shadow.sleep(2)
+        shadow.send_chars("${password3}\n")
+        shadow.sleep(2)
+        shadow.send_chars("${password3}\n")
+        shadow.sleep(2)
+        shadow.send_key("alt-f4")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 4 ]")
+        shadow.wait_for_unit("getty@tty4.service")
+        shadow.wait_until_succeeds("pgrep -f 'agetty.*tty4'")
+        shadow.wait_until_tty_matches("4", "login: ")
+        shadow.send_chars("emma\n")
+        shadow.wait_until_tty_matches("4", "login: emma")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.sleep(2)
+        shadow.send_chars("${password1}\n")
+        shadow.wait_until_tty_matches("4", "Login incorrect")
+        shadow.wait_until_tty_matches("4", "login:")
+        shadow.send_chars("emma\n")
+        shadow.wait_until_tty_matches("4", "login: emma")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.sleep(2)
+        shadow.send_chars("${password3}\n")
+        shadow.send_chars("whoami > /tmp/2\n")
+        shadow.wait_for_file("/tmp/2")
+        assert "emma" in shadow.succeed("cat /tmp/2")
+
+    with subtest("Groups"):
+        assert "foobar" not in shadow.succeed("groups emma")
+        shadow.succeed("groupadd foobar")
+        shadow.succeed("usermod -a -G foobar emma")
+        assert "foobar" in shadow.succeed("groups emma")
+
+    with subtest("nologin shell"):
+        shadow.send_key("alt-f5")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 5 ]")
+        shadow.wait_for_unit("getty@tty5.service")
+        shadow.wait_until_succeeds("pgrep -f 'agetty.*tty5'")
+        shadow.wait_until_tty_matches("5", "login: ")
+        shadow.send_chars("layla\n")
+        shadow.wait_until_tty_matches("5", "login: layla")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.send_chars("${password2}\n")
+        shadow.wait_until_tty_matches("5", "login:")
+
+    with subtest("check alternate password hashes"):
+        shadow.send_key("alt-f6")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]")
+        for u in ["berta", "yesim"]:
+            shadow.wait_for_unit("getty@tty6.service")
+            shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'")
+            shadow.wait_until_tty_matches("6", "login: ")
+            shadow.send_chars(f"{u}\n")
+            shadow.wait_until_tty_matches("6", f"login: {u}")
+            shadow.wait_until_succeeds("pgrep login")
+            shadow.sleep(2)
+            shadow.send_chars("fnord\n")
+            shadow.send_chars(f"whoami > /tmp/{u}\n")
+            shadow.wait_for_file(f"/tmp/{u}")
+            print(shadow.succeed(f"cat /tmp/{u}"))
+            assert u in shadow.succeed(f"cat /tmp/{u}")
+            shadow.send_chars("logout\n")
+
+    with subtest("Ensure hashedPassword does not get overridden by initialHashedPassword"):
+        shadow.send_key("alt-f6")
+        shadow.wait_until_succeeds("[ $(fgconsole) = 6 ]")
+        shadow.wait_for_unit("getty@tty6.service")
+        shadow.wait_until_succeeds("pgrep -f 'agetty.*tty6'")
+        shadow.wait_until_tty_matches("6", "login: ")
+        shadow.send_chars("leo\n")
+        shadow.wait_until_tty_matches("6", "login: leo")
+        shadow.wait_until_succeeds("pgrep login")
+        shadow.sleep(2)
+        shadow.send_chars("meow\n")
+        shadow.send_chars("whoami > /tmp/leo\n")
+        shadow.wait_for_file("/tmp/leo")
+        assert "leo" in shadow.succeed("cat /tmp/leo")
+        shadow.send_chars("logout\n")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/shadowsocks/common.nix b/nixpkgs/nixos/tests/shadowsocks/common.nix
new file mode 100644
index 000000000000..82a63771b03a
--- /dev/null
+++ b/nixpkgs/nixos/tests/shadowsocks/common.nix
@@ -0,0 +1,85 @@
+{ name
+, plugin ? null
+, pluginOpts ? ""
+}:
+
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+    inherit name;
+    meta = {
+      maintainers = with lib.maintainers; [ hmenke ];
+    };
+
+    nodes = {
+      server = {
+        boot.kernel.sysctl."net.ipv4.ip_forward" = "1";
+        networking.useDHCP = false;
+        networking.interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.0.1"; prefixLength = 24; }
+        ];
+        networking.firewall.rejectPackets = true;
+        networking.firewall.allowedTCPPorts = [ 8488 ];
+        networking.firewall.allowedUDPPorts = [ 8488 ];
+        services.shadowsocks = {
+          enable = true;
+          encryptionMethod = "chacha20-ietf-poly1305";
+          password = "pa$$w0rd";
+          localAddress = [ "0.0.0.0" ];
+          port = 8488;
+          fastOpen = false;
+          mode = "tcp_and_udp";
+        } // lib.optionalAttrs (plugin != null) {
+          inherit plugin;
+          pluginOpts = "server;${pluginOpts}";
+        };
+        services.nginx = {
+          enable = true;
+          virtualHosts.server = {
+            locations."/".root = pkgs.writeTextDir "index.html" "It works!";
+          };
+        };
+      };
+
+      client = {
+        networking.useDHCP = false;
+        networking.interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.0.2"; prefixLength = 24; }
+        ];
+        systemd.services.shadowsocks-client = {
+          description = "connect to shadowsocks";
+          after = [ "network.target" ];
+          wantedBy = [ "multi-user.target" ];
+          path = with pkgs; [ shadowsocks-libev ];
+          script = ''
+            exec ss-local \
+                -s 192.168.0.1 \
+                -p 8488 \
+                -l 1080 \
+                -k 'pa$$w0rd' \
+                -m chacha20-ietf-poly1305 \
+                -a nobody \
+                ${lib.optionalString (plugin != null) ''
+                  --plugin "${plugin}" --plugin-opts "${pluginOpts}"
+                ''}
+          '';
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      server.wait_for_unit("shadowsocks-libev.service")
+      server.wait_for_unit("nginx.service")
+      client.wait_for_unit("shadowsocks-client.service")
+
+      client.fail(
+          "${pkgs.curl}/bin/curl 192.168.0.1:80"
+      )
+
+      msg = client.succeed(
+          "${pkgs.curl}/bin/curl --socks5 localhost:1080 192.168.0.1:80"
+      )
+      assert msg == "It works!", "Could not connect through shadowsocks"
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/shadowsocks/default.nix b/nixpkgs/nixos/tests/shadowsocks/default.nix
new file mode 100644
index 000000000000..37a8c3c9d0d3
--- /dev/null
+++ b/nixpkgs/nixos/tests/shadowsocks/default.nix
@@ -0,0 +1,16 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+}:
+
+{
+  "basic" = import ./common.nix {
+    name = "basic";
+  };
+
+  "v2ray-plugin" = import ./common.nix {
+    name = "v2ray-plugin";
+    plugin = "${pkgs.shadowsocks-v2ray-plugin}/bin/v2ray-plugin";
+    pluginOpts = "host=nixos.org";
+  };
+}
diff --git a/nixpkgs/nixos/tests/shattered-pixel-dungeon.nix b/nixpkgs/nixos/tests/shattered-pixel-dungeon.nix
new file mode 100644
index 000000000000..b4ac1670b5ca
--- /dev/null
+++ b/nixpkgs/nixos/tests/shattered-pixel-dungeon.nix
@@ -0,0 +1,28 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "shattered-pixel-dungeon";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    sound.enable = true;
+    environment.systemPackages = [ pkgs.shattered-pixel-dungeon ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("shattered-pixel-dungeon >&2 &")
+      machine.wait_for_window(r"Shattered Pixel Dungeon")
+      machine.wait_for_text("Enter")
+      machine.screenshot("screen")
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/shiori.nix b/nixpkgs/nixos/tests/shiori.nix
new file mode 100644
index 000000000000..d0f68b903f8c
--- /dev/null
+++ b/nixpkgs/nixos/tests/shiori.nix
@@ -0,0 +1,80 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+
+{
+  name = "shiori";
+  meta.maintainers = with lib.maintainers; [ minijackson ];
+
+  nodes.machine =
+    { ... }:
+    { services.shiori.enable = true; };
+
+  testScript = let
+    authJSON = pkgs.writeText "auth.json" (builtins.toJSON {
+      username = "shiori";
+      password = "gopher";
+      owner = true;
+    });
+
+  insertBookmark = {
+    url = "http://example.org";
+    title = "Example Bookmark";
+  };
+
+  insertBookmarkJSON = pkgs.writeText "insertBookmark.json" (builtins.toJSON insertBookmark);
+  in ''
+    import json
+
+    machine.wait_for_unit("shiori.service")
+    machine.wait_for_open_port(8080)
+    machine.succeed("curl --fail http://localhost:8080/")
+    machine.succeed("curl --fail --location http://localhost:8080/ | grep -i shiori")
+
+    with subtest("login"):
+        auth_json = machine.succeed(
+            "curl --fail --location http://localhost:8080/api/login "
+            "-X POST -H 'Content-Type:application/json' -d @${authJSON}"
+        )
+        auth_ret = json.loads(auth_json)
+        session_id = auth_ret["session"]
+
+    with subtest("bookmarks"):
+        with subtest("first use no bookmarks"):
+            bookmarks_json = machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-H 'X-Session-Id:{}'"
+                ).format(session_id)
+            )
+
+            if json.loads(bookmarks_json)["bookmarks"] != []:
+                raise Exception("Shiori have a bookmark on first use")
+
+        with subtest("insert bookmark"):
+            machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-X POST -H 'X-Session-Id:{}' "
+                    "-H 'Content-Type:application/json' -d @${insertBookmarkJSON}"
+                ).format(session_id)
+            )
+
+        with subtest("get inserted bookmark"):
+            bookmarks_json = machine.succeed(
+                (
+                    "curl --fail --location http://localhost:8080/api/bookmarks "
+                    "-H 'X-Session-Id:{}'"
+                ).format(session_id)
+            )
+
+            bookmarks = json.loads(bookmarks_json)["bookmarks"]
+            if len(bookmarks) != 1:
+                raise Exception("Shiori didn't save the bookmark")
+
+            bookmark = bookmarks[0]
+            if (
+                bookmark["url"] != "${insertBookmark.url}"
+                or bookmark["title"] != "${insertBookmark.title}"
+            ):
+                raise Exception("Inserted bookmark doesn't have same URL or title")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/signal-desktop.nix b/nixpkgs/nixos/tests/signal-desktop.nix
new file mode 100644
index 000000000000..f146804a958d
--- /dev/null
+++ b/nixpkgs/nixos/tests/signal-desktop.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  sqlcipher-signal = pkgs.writeShellScriptBin "sqlcipher" ''
+    set -eu
+
+    readonly CFG=~/.config/Signal/config.json
+    readonly KEY="$(${pkgs.jq}/bin/jq --raw-output '.key' $CFG)"
+    readonly DB="$1"
+    readonly SQL="SELECT * FROM sqlite_master where type='table'"
+    ${pkgs.sqlcipher}/bin/sqlcipher "$DB" "PRAGMA key = \"x'$KEY'\"; $SQL"
+  '';
+in {
+  name = "signal-desktop";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ flokli primeos ];
+  };
+
+  nodes.machine = { ... }:
+
+  {
+    imports = [
+      ./common/user-account.nix
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    test-support.displayManager.auto.user = "alice";
+    environment.systemPackages = with pkgs; [
+      signal-desktop file sqlite sqlcipher-signal
+    ];
+  };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    start_all()
+    machine.wait_for_x()
+
+    # start signal desktop
+    machine.execute("su - alice -c signal-desktop >&2 &")
+
+    # Wait for the Signal window to appear. Since usually the tests
+    # are run sandboxed and therefore with no internet, we can not wait
+    # for the message "Link your phone ...". Nor should we wait for
+    # the "Failed to connect to server" message, because when manually
+    # running this test it will be not sandboxed.
+    machine.wait_for_text("Signal")
+    machine.wait_for_text("File Edit View Window Help")
+    machine.screenshot("signal_desktop")
+
+    # Test if the database is encrypted to prevent these issues:
+    # - https://github.com/NixOS/nixpkgs/issues/108772
+    # - https://github.com/NixOS/nixpkgs/pull/117555
+    print(machine.succeed("su - alice -c 'file ~/.config/Signal/sql/db.sqlite'"))
+    machine.fail(
+        "su - alice -c 'file ~/.config/Signal/sql/db.sqlite' | grep -e SQLite -e database"
+    )
+    # Only SQLCipher should be able to read the encrypted DB:
+    machine.fail(
+        "su - alice -c 'sqlite3 ~/.config/Signal/sql/db.sqlite .tables'"
+    )
+    print(machine.succeed(
+        "su - alice -c 'sqlcipher ~/.config/Signal/sql/db.sqlite'"
+    ))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/simple.nix b/nixpkgs/nixos/tests/simple.nix
new file mode 100644
index 000000000000..c36287b4e843
--- /dev/null
+++ b/nixpkgs/nixos/tests/simple.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "simple";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/sing-box.nix b/nixpkgs/nixos/tests/sing-box.nix
new file mode 100644
index 000000000000..582d594be3fd
--- /dev/null
+++ b/nixpkgs/nixos/tests/sing-box.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+
+  name = "sing-box";
+
+  meta = {
+    maintainers = with lib.maintainers; [ nickcao ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.nginx = {
+      enable = true;
+      statusPage = true;
+    };
+    services.sing-box = {
+      enable = true;
+      settings = {
+        inbounds = [{
+          type = "mixed";
+          tag = "inbound";
+          listen = "127.0.0.1";
+          listen_port = 1080;
+          users = [{
+            username = "user";
+            password = { _secret = pkgs.writeText "password" "supersecret"; };
+          }];
+        }];
+        outbounds = [{
+          type = "direct";
+          tag = "outbound";
+        }];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_unit("sing-box.service")
+
+    machine.wait_for_open_port(80)
+    machine.wait_for_open_port(1080)
+
+    machine.succeed("curl --fail --max-time 10 --proxy http://user:supersecret@localhost:1080 http://localhost")
+    machine.fail("curl --fail --max-time 10 --proxy http://user:supervillain@localhost:1080 http://localhost")
+    machine.succeed("curl --fail --max-time 10 --proxy socks5://user:supersecret@localhost:1080 http://localhost")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/slimserver.nix b/nixpkgs/nixos/tests/slimserver.nix
new file mode 100644
index 000000000000..c3f7b6fde4de
--- /dev/null
+++ b/nixpkgs/nixos/tests/slimserver.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "slimserver";
+  meta.maintainers = with pkgs.lib.maintainers; [ adamcstephens ];
+
+  nodes.machine = { ... }: {
+    services.slimserver.enable = true;
+    services.squeezelite = {
+      enable = true;
+      extraArguments = "-s 127.0.0.1 -d slimproto=info";
+    };
+    sound.enable = true;
+    boot.initrd.kernelModules = ["snd-dummy"];
+  };
+
+  testScript =
+    ''
+      import json
+      rpc_get_player = {
+          "id": 1,
+          "method": "slim.request",
+          "params":[0,["player", "id", "0", "?"]]
+      }
+
+      with subtest("slimserver is started"):
+          machine.wait_for_unit("slimserver.service")
+          # give slimserver a moment to report errors
+          machine.sleep(2)
+
+      with subtest('slimserver module errors are not reported'):
+          machine.fail("journalctl -u slimserver.service | grep 'throw_exception'")
+          machine.fail("journalctl -u slimserver.service | grep 'not installed'")
+          machine.fail("journalctl -u slimserver.service | grep 'not found'")
+          machine.fail("journalctl -u slimserver.service | grep 'The following CPAN modules were found but cannot work with Logitech Media Server'")
+          machine.fail("journalctl -u slimserver.service | grep 'please use the buildme.sh'")
+
+      with subtest('slimserver is ready'):
+          machine.wait_for_open_port(9000)
+          machine.wait_until_succeeds("journalctl -u slimserver.service | grep 'Completed dbOptimize Scan'")
+
+      with subtest("squeezelite player successfully connects to slimserver"):
+          machine.wait_for_unit("squeezelite.service")
+          machine.wait_until_succeeds("journalctl -u squeezelite.service | grep 'slimproto:937 connected'")
+          player_mac = machine.wait_until_succeeds("journalctl -eu squeezelite.service | grep 'sendHELO:148 mac:'").strip().split(" ")[-1]
+          player_id = machine.succeed(f"curl http://localhost:9000/jsonrpc.js -g -X POST -d '{json.dumps(rpc_get_player)}'")
+          assert player_mac == json.loads(player_id)["result"]["_id"], "squeezelite player not found"
+    '';
+})
diff --git a/nixpkgs/nixos/tests/slurm.nix b/nixpkgs/nixos/tests/slurm.nix
new file mode 100644
index 000000000000..a6b02e970b0c
--- /dev/null
+++ b/nixpkgs/nixos/tests/slurm.nix
@@ -0,0 +1,168 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+let
+    slurmconfig = {
+      services.slurm = {
+        controlMachine = "control";
+        nodeName = [ "node[1-3] CPUs=1 State=UNKNOWN" ];
+        partitionName = [ "debug Nodes=node[1-3] Default=YES MaxTime=INFINITE State=UP" ];
+        extraConfig = ''
+          AccountingStorageHost=dbd
+          AccountingStorageType=accounting_storage/slurmdbd
+        '';
+      };
+      environment.systemPackages = [ mpitest ];
+      networking.firewall.enable = false;
+      systemd.tmpfiles.rules = [
+        "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest"
+      ];
+    };
+
+    mpitest = let
+      mpitestC = pkgs.writeText "mpitest.c" ''
+        #include <stdio.h>
+        #include <stdlib.h>
+        #include <mpi.h>
+
+        int
+        main (int argc, char *argv[])
+        {
+          int rank, size, length;
+          char name[512];
+
+          MPI_Init (&argc, &argv);
+          MPI_Comm_rank (MPI_COMM_WORLD, &rank);
+          MPI_Comm_size (MPI_COMM_WORLD, &size);
+          MPI_Get_processor_name (name, &length);
+
+          if ( rank == 0 ) printf("size=%d\n", size);
+
+          printf ("%s: hello world from process %d of %d\n", name, rank, size);
+
+          MPI_Finalize ();
+
+          return EXIT_SUCCESS;
+        }
+      '';
+    in pkgs.runCommand "mpitest" {} ''
+      mkdir -p $out/bin
+      ${pkgs.openmpi}/bin/mpicc ${mpitestC} -o $out/bin/mpitest
+    '';
+in {
+  name = "slurm";
+
+  meta.maintainers = [ lib.maintainers.markuskowa ];
+
+  nodes =
+    let
+    computeNode =
+      { ...}:
+      {
+        imports = [ slurmconfig ];
+        # TODO slurmd port and slurmctld port should be configurations and
+        # automatically allowed by the  firewall.
+        services.slurm = {
+          client.enable = true;
+        };
+      };
+    in {
+
+    control =
+      { ...}:
+      {
+        imports = [ slurmconfig ];
+        services.slurm = {
+          server.enable = true;
+        };
+      };
+
+    submit =
+      { ...}:
+      {
+        imports = [ slurmconfig ];
+        services.slurm = {
+          enableStools = true;
+        };
+      };
+
+    dbd =
+      { pkgs, ... } :
+      let
+        passFile = pkgs.writeText "dbdpassword" "password123";
+      in {
+        networking.firewall.enable = false;
+        systemd.tmpfiles.rules = [
+          "f /etc/munge/munge.key 0400 munge munge - mungeverryweakkeybuteasytointegratoinatest"
+        ];
+        services.slurm.dbdserver = {
+          enable = true;
+          storagePassFile = "${passFile}";
+        };
+        services.mysql = {
+          enable = true;
+          package = pkgs.mariadb;
+          initialScript = pkgs.writeText "mysql-init.sql" ''
+            CREATE USER 'slurm'@'localhost' IDENTIFIED BY 'password123';
+            GRANT ALL PRIVILEGES ON slurm_acct_db.* TO 'slurm'@'localhost';
+          '';
+          ensureDatabases = [ "slurm_acct_db" ];
+          ensureUsers = [{
+            ensurePermissions = { "slurm_acct_db.*" = "ALL PRIVILEGES"; };
+            name = "slurm";
+          }];
+          settings.mysqld = {
+            # recommendations from: https://slurm.schedmd.com/accounting.html#mysql-configuration
+            innodb_buffer_pool_size="1024M";
+            innodb_log_file_size="64M";
+            innodb_lock_wait_timeout=900;
+          };
+        };
+      };
+
+    node1 = computeNode;
+    node2 = computeNode;
+    node3 = computeNode;
+  };
+
+
+  testScript =
+  ''
+  start_all()
+
+  # Make sure DBD is up after DB initialzation
+  with subtest("can_start_slurmdbd"):
+      dbd.succeed("systemctl restart slurmdbd")
+      dbd.wait_for_unit("slurmdbd.service")
+      dbd.wait_for_open_port(6819)
+
+  # there needs to be an entry for the current
+  # cluster in the database before slurmctld is restarted
+  with subtest("add_account"):
+      control.succeed("sacctmgr -i add cluster default")
+      # check for cluster entry
+      control.succeed("sacctmgr list cluster | awk '{ print $1 }' | grep default")
+
+  with subtest("can_start_slurmctld"):
+      control.succeed("systemctl restart slurmctld")
+      control.wait_for_unit("slurmctld.service")
+
+  with subtest("can_start_slurmd"):
+      for node in [node1, node2, node3]:
+          node.succeed("systemctl restart slurmd.service")
+          node.wait_for_unit("slurmd")
+
+  # Test that the cluster works and can distribute jobs;
+
+  with subtest("run_distributed_command"):
+      # Run `hostname` on 3 nodes of the partition (so on all the 3 nodes).
+      # The output must contain the 3 different names
+      submit.succeed("srun -N 3 hostname | sort | uniq | wc -l | xargs test 3 -eq")
+
+      with subtest("check_slurm_dbd"):
+          # find the srun job from above in the database
+          control.succeed("sleep 5")
+          control.succeed("sacct | grep hostname")
+
+  with subtest("run_PMIx_mpitest"):
+      submit.succeed("srun -N 3 --mpi=pmix mpitest | grep size=3")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/smokeping.nix b/nixpkgs/nixos/tests/smokeping.nix
new file mode 100644
index 000000000000..04f813964291
--- /dev/null
+++ b/nixpkgs/nixos/tests/smokeping.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "smokeping";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ cransom ];
+  };
+
+  nodes = {
+    sm =
+      { ... }:
+      {
+        networking.domain = "example.com"; # FQDN: sm.example.com
+        services.smokeping = {
+          enable = true;
+          port = 8081;
+          mailHost = "127.0.0.2";
+          probeConfig = ''
+            + FPing
+            binary = /run/wrappers/bin/fping
+            offset = 0%
+          '';
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    sm.wait_for_unit("smokeping")
+    sm.wait_for_unit("thttpd")
+    sm.wait_for_file("/var/lib/smokeping/data/Local/LocalMachine.rrd")
+    sm.succeed("curl -s -f localhost:8081/smokeping.fcgi?target=Local")
+    # Check that there's a helpful page without explicit path as well.
+    sm.succeed("curl -s -f localhost:8081")
+    sm.succeed("ls /var/lib/smokeping/cache/Local/LocalMachine_mini.png")
+    sm.succeed("ls /var/lib/smokeping/cache/index.html")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/snapcast.nix b/nixpkgs/nixos/tests/snapcast.nix
new file mode 100644
index 000000000000..9b62e4724e75
--- /dev/null
+++ b/nixpkgs/nixos/tests/snapcast.nix
@@ -0,0 +1,90 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  port = 10004;
+  tcpPort = 10005;
+  httpPort = 10080;
+  tcpStreamPort = 10006;
+  bufferSize = 742;
+in {
+  name = "snapcast";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hexa ];
+  };
+
+  nodes = {
+    server = {
+      services.snapserver = {
+        enable = true;
+        port = port;
+        tcp.port = tcpPort;
+        http.port = httpPort;
+        openFirewall = true;
+        buffer = bufferSize;
+        streams = {
+          mpd = {
+            type = "pipe";
+            location = "/run/snapserver/mpd";
+            query.mode = "create";
+          };
+          bluetooth = {
+            type = "pipe";
+            location = "/run/snapserver/bluetooth";
+          };
+          tcp = {
+            type = "tcp";
+            location = "127.0.0.1:${toString tcpStreamPort}";
+          };
+          meta = {
+            type = "meta";
+            location = "/mpd/bluetooth/tcp";
+          };
+        };
+      };
+      environment.systemPackages = [ pkgs.snapcast ];
+    };
+    client = {
+      environment.systemPackages = [ pkgs.snapcast ];
+    };
+  };
+
+  testScript = ''
+    import json
+
+    get_rpc_version = {"id": "1", "jsonrpc": "2.0", "method": "Server.GetRPCVersion"}
+
+    start_all()
+
+    server.wait_for_unit("snapserver.service")
+    server.wait_until_succeeds("ss -ntl | grep -q ${toString port}")
+    server.wait_until_succeeds("ss -ntl | grep -q ${toString tcpPort}")
+    server.wait_until_succeeds("ss -ntl | grep -q ${toString httpPort}")
+    server.wait_until_succeeds("ss -ntl | grep -q ${toString tcpStreamPort}")
+
+    with subtest("check that pipes are created"):
+        server.succeed("test -p /run/snapserver/mpd")
+        server.succeed("test -p /run/snapserver/bluetooth")
+
+    with subtest("test tcp json-rpc"):
+        server.succeed(f"echo '{json.dumps(get_rpc_version)}' | nc -w 1 localhost ${toString tcpPort}")
+
+    with subtest("test http json-rpc"):
+        server.succeed(
+            "curl --fail http://localhost:${toString httpPort}/jsonrpc -d '{json.dumps(get_rpc_version)}'"
+        )
+
+    with subtest("test a ipv6 connection"):
+        server.execute("systemd-run --unit=snapcast-local-client snapclient -h ::1 -p ${toString port}")
+        server.wait_until_succeeds(
+            "journalctl -o cat -u snapserver.service | grep -q 'Hello from'"
+        )
+        server.wait_until_succeeds("journalctl -o cat -u snapcast-local-client | grep -q 'buffer: ${toString bufferSize}'")
+
+    with subtest("test a connection"):
+        client.execute("systemd-run --unit=snapcast-client snapclient -h server -p ${toString port}")
+        server.wait_until_succeeds(
+            "journalctl -o cat -u snapserver.service | grep -q 'Hello from'"
+        )
+        client.wait_until_succeeds("journalctl -o cat -u snapcast-client | grep -q 'buffer: ${toString bufferSize}'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/snapper.nix b/nixpkgs/nixos/tests/snapper.nix
new file mode 100644
index 000000000000..674523584fda
--- /dev/null
+++ b/nixpkgs/nixos/tests/snapper.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ ... }:
+{
+  name = "snapper";
+
+  nodes.machine = { pkgs, lib, ... }: {
+    boot.initrd.postDeviceCommands = ''
+      ${pkgs.btrfs-progs}/bin/mkfs.btrfs -f -L aux /dev/vdb
+    '';
+
+    virtualisation.emptyDiskImages = [ 4096 ];
+
+    virtualisation.fileSystems = {
+      "/home" = {
+        device = "/dev/disk/by-label/aux";
+        fsType = "btrfs";
+      };
+    };
+    services.snapper.configs.home.SUBVOLUME = "/home";
+    services.snapper.filters = "/nix";
+  };
+
+  testScript = ''
+    machine.succeed("btrfs subvolume create /home/.snapshots")
+    machine.succeed("snapper -c home list")
+    machine.succeed("snapper -c home create --description empty")
+    machine.succeed("echo test > /home/file")
+    machine.succeed("snapper -c home create --description file")
+    machine.succeed("snapper -c home status 1..2")
+    machine.succeed("snapper -c home undochange 1..2")
+    machine.fail("ls /home/file")
+    machine.succeed("snapper -c home delete 2")
+    machine.succeed("systemctl --wait start snapper-timeline.service")
+    machine.succeed("systemctl --wait start snapper-cleanup.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/soapui.nix b/nixpkgs/nixos/tests/soapui.nix
new file mode 100644
index 000000000000..3a2d11a16756
--- /dev/null
+++ b/nixpkgs/nixos/tests/soapui.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "soapui";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+
+    environment.systemPackages = [ pkgs.soapui ];
+  };
+
+  testScript = ''
+    machine.wait_for_x()
+    machine.succeed("soapui >&2 &")
+    machine.wait_for_window(r"SoapUI \d+\.\d+\.\d+")
+    machine.sleep(1)
+    machine.screenshot("soapui")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/soft-serve.nix b/nixpkgs/nixos/tests/soft-serve.nix
new file mode 100644
index 000000000000..1c4cb4c95819
--- /dev/null
+++ b/nixpkgs/nixos/tests/soft-serve.nix
@@ -0,0 +1,102 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+  sshPort = 8231;
+  httpPort = 8232;
+  statsPort = 8233;
+  gitPort = 8418;
+in
+{
+  name = "soft-serve";
+  meta.maintainers = with lib.maintainers; [ dadada ];
+  nodes = {
+    client = { pkgs, ... }: {
+      environment.systemPackages = with pkgs; [
+        curl
+        git
+        openssh
+      ];
+      environment.etc.sshKey = {
+        source = snakeOilPrivateKey;
+        mode = "0600";
+      };
+    };
+
+    server =
+      { config, ... }:
+      {
+        services.soft-serve = {
+          enable = true;
+          settings = {
+            name = "TestServer";
+            ssh.listen_addr = ":${toString sshPort}";
+            git.listen_addr = ":${toString gitPort}";
+            http.listen_addr = ":${toString httpPort}";
+            stats.listen_addr = ":${toString statsPort}";
+            initial_admin_keys = [ snakeOilPublicKey ];
+          };
+        };
+        networking.firewall.allowedTCPPorts = [ sshPort httpPort statsPort ];
+      };
+  };
+
+  testScript =
+    { ... }:
+    ''
+      SSH_PORT = ${toString sshPort}
+      HTTP_PORT = ${toString httpPort}
+      STATS_PORT = ${toString statsPort}
+      KEY = "${snakeOilPublicKey}"
+      SSH_KEY = "/etc/sshKey"
+      SSH_COMMAND = f"ssh -p {SSH_PORT} -i {SSH_KEY} -o StrictHostKeyChecking=no"
+      TEST_DIR = "/tmp/test"
+      GIT = f"git -C {TEST_DIR}"
+
+      for machine in client, server:
+          machine.wait_for_unit("network.target")
+
+      server.wait_for_unit("soft-serve.service")
+      server.wait_for_open_port(SSH_PORT)
+
+      with subtest("Get info"):
+          status, test = client.execute(f"{SSH_COMMAND} server info")
+          if status != 0:
+              raise Exception("Failed to get SSH info")
+          key = " ".join(KEY.split(" ")[0:2])
+          if not key in test:
+              raise Exception("Admin key must be configured correctly")
+
+      with subtest("Create user"):
+          client.succeed(f"{SSH_COMMAND} server user create beatrice")
+          client.succeed(f"{SSH_COMMAND} server user info beatrice")
+
+      with subtest("Create repo"):
+          client.succeed(f"git init {TEST_DIR}")
+          client.succeed(f"{GIT} config --global user.email you@example.com")
+          client.succeed(f"touch {TEST_DIR}/foo")
+          client.succeed(f"{GIT} add foo")
+          client.succeed(f"{GIT} commit --allow-empty -m test")
+          client.succeed(f"{GIT} remote add origin git@server:test")
+          client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' {GIT} push -u origin master")
+          client.execute("rm -r /tmp/test")
+
+      server.wait_for_open_port(HTTP_PORT)
+
+      with subtest("Clone over HTTP"):
+          client.succeed(f"curl --connect-timeout 10 http://server:{HTTP_PORT}/")
+          client.succeed(f"git clone http://server:{HTTP_PORT}/test /tmp/test")
+          client.execute("rm -r /tmp/test")
+
+      with subtest("Clone over SSH"):
+          client.succeed(f"GIT_SSH_COMMAND='{SSH_COMMAND}' git clone git@server:test /tmp/test")
+          client.execute("rm -r /tmp/test")
+
+      with subtest("Get stats over HTTP"):
+          server.wait_for_open_port(STATS_PORT)
+          status, test = client.execute(f"curl --connect-timeout 10 http://server:{STATS_PORT}/metrics")
+          if status != 0:
+              raise Exception("Failed to get metrics from status port")
+          if not "go_gc_duration_seconds_count" in test:
+              raise Exception("Metrics did not contain key 'go_gc_duration_seconds_count'")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/sogo.nix b/nixpkgs/nixos/tests/sogo.nix
new file mode 100644
index 000000000000..acdad8d0f473
--- /dev/null
+++ b/nixpkgs/nixos/tests/sogo.nix
@@ -0,0 +1,58 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "sogo";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ajs124 das_j ];
+  };
+
+  nodes = {
+    sogo = { config, pkgs, ... }: {
+      services.nginx.enable = true;
+
+      services.mysql = {
+        enable = true;
+        package = pkgs.mariadb;
+        ensureDatabases = [ "sogo" ];
+        ensureUsers = [{
+          name = "sogo";
+          ensurePermissions = {
+            "sogo.*" = "ALL PRIVILEGES";
+          };
+        }];
+      };
+
+      services.sogo = {
+        enable = true;
+        timezone = "Europe/Berlin";
+        extraConfig = ''
+          WOWorkersCount = 1;
+
+          SOGoUserSources = (
+            {
+              type = sql;
+              userPasswordAlgorithm = md5;
+              viewURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_users";
+              canAuthenticate = YES;
+              id = users;
+            }
+          );
+
+          SOGoProfileURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_user_profile";
+          OCSFolderInfoURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_folder_info";
+          OCSSessionsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_sessions_folder";
+          OCSEMailAlarmsFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_alarms_folder";
+          OCSStoreURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_store";
+          OCSAclURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_acl";
+          OCSCacheFolderURL = "mysql://sogo@%2Frun%2Fmysqld%2Fmysqld.sock/sogo/sogo_cache_folder";
+        '';
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    sogo.wait_for_unit("multi-user.target")
+    sogo.wait_for_open_port(20000)
+    sogo.wait_for_open_port(80)
+    sogo.succeed("curl -sSfL http://sogo/SOGo")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/solanum.nix b/nixpkgs/nixos/tests/solanum.nix
new file mode 100644
index 000000000000..1ecf91bce40b
--- /dev/null
+++ b/nixpkgs/nixos/tests/solanum.nix
@@ -0,0 +1,97 @@
+let
+  clients = [
+    "ircclient1"
+    "ircclient2"
+  ];
+  server = "solanum";
+  ircPort = 6667;
+  channel = "nixos-cat";
+  iiDir = "/tmp/irc";
+in
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "solanum";
+  nodes = {
+    "${server}" = {
+      networking.firewall.allowedTCPPorts = [ ircPort ];
+      services.solanum = {
+        enable = true;
+        motd = ''
+          The default MOTD doesn't contain the word "nixos" in it.
+          This one does.
+        '';
+      };
+    };
+  } // lib.listToAttrs (builtins.map (client: lib.nameValuePair client {
+    imports = [
+      ./common/user-account.nix
+    ];
+
+    systemd.services.ii = {
+      requires = [ "network.target" ];
+      wantedBy = [ "default.target" ];
+
+      serviceConfig = {
+        Type = "simple";
+        ExecPreStartPre = "mkdir -p ${iiDir}";
+        ExecStart = ''
+          ${lib.getBin pkgs.ii}/bin/ii -n ${client} -s ${server} -i ${iiDir}
+        '';
+        User = "alice";
+      };
+    };
+  }) clients);
+
+  testScript =
+    let
+      msg = client: "Hello, my name is ${client}";
+      clientScript = client: [
+        ''
+          ${client}.wait_for_unit("network.target")
+          ${client}.systemctl("start ii")
+          ${client}.wait_for_unit("ii")
+          ${client}.wait_for_file("${iiDir}/${server}/out")
+        ''
+        # look for the custom text in the MOTD.
+        ''
+          ${client}.wait_until_succeeds("grep 'nixos' ${iiDir}/${server}/out")
+        ''
+        # wait until first PING from server arrives before joining,
+        # so we don't try it too early
+        ''
+          ${client}.wait_until_succeeds("grep 'PING' ${iiDir}/${server}/out")
+        ''
+        # join ${channel}
+        ''
+          ${client}.succeed("echo '/j #${channel}' > ${iiDir}/${server}/in")
+          ${client}.wait_for_file("${iiDir}/${server}/#${channel}/in")
+        ''
+        # send a greeting
+        ''
+          ${client}.succeed(
+              "echo '${msg client}' > ${iiDir}/${server}/#${channel}/in"
+          )
+        ''
+        # check that all greetings arrived on all clients
+      ] ++ builtins.map (other: ''
+        ${client}.succeed(
+            "grep '${msg other}$' ${iiDir}/${server}/#${channel}/out"
+        )
+      '') clients;
+
+      # foldl', but requires a non-empty list instead of a start value
+      reduce = f: list:
+        builtins.foldl' f (builtins.head list) (builtins.tail list);
+    in ''
+      start_all()
+      ${server}.systemctl("status solanum")
+      ${server}.wait_for_open_port(${toString ircPort})
+
+      # run clientScript for all clients so that every list
+      # entry is executed by every client before advancing
+      # to the next one.
+    '' + lib.concatStrings
+      (reduce
+        (lib.zipListsWith (cs: c: cs + c))
+        (builtins.map clientScript clients));
+})
diff --git a/nixpkgs/nixos/tests/sonarr.nix b/nixpkgs/nixos/tests/sonarr.nix
new file mode 100644
index 000000000000..57e6b72db3a3
--- /dev/null
+++ b/nixpkgs/nixos/tests/sonarr.nix
@@ -0,0 +1,16 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "sonarr";
+  meta.maintainers = with lib.maintainers; [ etu ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.sonarr.enable = true; };
+
+  testScript = ''
+    machine.wait_for_unit("sonarr.service")
+    machine.wait_for_open_port(8989)
+    machine.succeed("curl --fail http://localhost:8989/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sourcehut.nix b/nixpkgs/nixos/tests/sourcehut.nix
new file mode 100644
index 000000000000..0b258acc2af1
--- /dev/null
+++ b/nixpkgs/nixos/tests/sourcehut.nix
@@ -0,0 +1,252 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  domain = "sourcehut.localdomain";
+
+  # Note that wildcard certificates just under the TLD (eg. *.com)
+  # would be rejected by clients like curl.
+  tls-cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -days 36500 \
+      -subj '/CN=${domain}' -extensions v3_req \
+      -addext 'subjectAltName = DNS:*.${domain}'
+    install -D -t $out key.pem cert.pem
+  '';
+
+  images = {
+    nixos.unstable.x86_64 =
+      let
+        systemConfig = { pkgs, ... }: {
+          # passwordless ssh server
+          services.openssh = {
+            enable = true;
+            settings = {
+              PermitRootLogin = "yes";
+              PermitEmptyPasswords = true;
+            };
+          };
+
+          users = {
+            mutableUsers = false;
+            # build user
+            extraUsers."build" = {
+              isNormalUser = true;
+              uid = 1000;
+              extraGroups = [ "wheel" ];
+              password = "";
+            };
+            users.root.password = "";
+          };
+
+          security.sudo.wheelNeedsPassword = false;
+          nix.settings.trusted-users = [ "root" "build" ];
+          documentation.nixos.enable = false;
+
+          # builds.sr.ht-image-specific network settings
+          networking = {
+            hostName = "build";
+            dhcpcd.enable = false;
+            defaultGateway.address = "10.0.2.2";
+            usePredictableInterfaceNames = false;
+            interfaces."eth0".ipv4.addresses = [{
+              address = "10.0.2.15";
+              prefixLength = 25;
+            }];
+            enableIPv6 = false;
+            nameservers = [
+              # OpenNIC anycast
+              "185.121.177.177"
+              "169.239.202.202"
+              # Google
+              "8.8.8.8"
+            ];
+            firewall.allowedTCPPorts = [ 22 ];
+          };
+
+          environment.systemPackages = [
+            pkgs.gitMinimal
+            #pkgs.mercurial
+            pkgs.curl
+            pkgs.gnupg
+          ];
+        };
+        qemuConfig = { pkgs, ... }: {
+          imports = [ systemConfig ];
+          fileSystems."/".device = "/dev/disk/by-label/nixos";
+          boot.initrd.availableKernelModules = [
+            "ahci"
+            "ehci_pci"
+            "sd_mod"
+            "usb_storage"
+            "usbhid"
+            "virtio_balloon"
+            "virtio_blk"
+            "virtio_pci"
+            "virtio_ring"
+            "xhci_pci"
+          ];
+          boot.loader = {
+            grub = {
+              version = 2;
+              device = "/dev/vda";
+            };
+            timeout = 0;
+          };
+        };
+        config = (import (pkgs.path + "/nixos/lib/eval-config.nix") {
+          inherit pkgs; modules = [ qemuConfig ];
+          system = "x86_64-linux";
+        }).config;
+      in
+      import (pkgs.path + "/nixos/lib/make-disk-image.nix") {
+        inherit pkgs lib config;
+        diskSize = 16000;
+        format = "qcow2-compressed";
+        contents = [
+          { source = pkgs.writeText "gitconfig" ''
+              [user]
+                name = builds.sr.ht
+                email = build@sr.ht
+            '';
+            target = "/home/build/.gitconfig";
+            user = "build";
+            group = "users";
+            mode = "644";
+          }
+        ];
+      };
+  };
+
+in
+{
+  name = "sourcehut";
+
+  meta.maintainers = [ pkgs.lib.maintainers.tomberek ];
+
+  nodes.machine = { config, pkgs, nodes, ... }: {
+    # buildsrht needs space
+    virtualisation.diskSize = 4 * 1024;
+    virtualisation.memorySize = 2 * 1024;
+    networking.domain = domain;
+    networking.enableIPv6 = false;
+    networking.extraHosts = ''
+      ${config.networking.primaryIPAddress} builds.${domain}
+      ${config.networking.primaryIPAddress} git.${domain}
+      ${config.networking.primaryIPAddress} meta.${domain}
+    '';
+
+    services.sourcehut = {
+      enable = true;
+      nginx.enable = true;
+      nginx.virtualHost = {
+        forceSSL = true;
+        sslCertificate = "${tls-cert}/cert.pem";
+        sslCertificateKey = "${tls-cert}/key.pem";
+      };
+      postgresql.enable = true;
+      redis.enable = true;
+
+      meta.enable = true;
+      builds = {
+        enable = true;
+        # FIXME: see why it does not seem to activate fully.
+        #enableWorker = true;
+        inherit images;
+      };
+      git.enable = true;
+
+      settings."sr.ht" = {
+        global-domain = config.networking.domain;
+        service-key = pkgs.writeText "service-key" "8b327279b77e32a3620e2fc9aabce491cc46e7d821fd6713b2a2e650ce114d01";
+        network-key = pkgs.writeText "network-key" "cEEmc30BRBGkgQZcHFksiG7hjc6_dK1XR2Oo5Jb9_nQ=";
+      };
+      settings."builds.sr.ht" = {
+        oauth-client-secret = pkgs.writeText "buildsrht-oauth-client-secret" "2260e9c4d9b8dcedcef642860e0504bc";
+        oauth-client-id = "299db9f9c2013170";
+      };
+      settings."git.sr.ht" = {
+        oauth-client-secret = pkgs.writeText "gitsrht-oauth-client-secret" "3597288dc2c716e567db5384f493b09d";
+        oauth-client-id = "d07cb713d920702e";
+      };
+      settings.webhooks.private-key = pkgs.writeText "webhook-key" "Ra3IjxgFiwG9jxgp4WALQIZw/BMYt30xWiOsqD0J7EA=";
+      settings.mail = {
+        smtp-from = "root+hut@${domain}";
+        # WARNING: take care to keep pgp-privkey outside the Nix store in production,
+        # or use LoadCredentialEncrypted=
+        pgp-privkey = toString (pkgs.writeText "sourcehut.pgp-privkey" ''
+          -----BEGIN PGP PRIVATE KEY BLOCK-----
+
+          lFgEYqDRORYJKwYBBAHaRw8BAQdAehGoy36FUx2OesYm07be2rtLyvR5Pb/ltstd
+          Gk7hYQoAAP9X4oPmxxrHN8LewBpWITdBomNqlHoiP7mI0nz/BOPJHxEktDZuaXhv
+          cy90ZXN0cy9zb3VyY2VodXQgPHJvb3QraHV0QHNvdXJjZWh1dC5sb2NhbGRvbWFp
+          bj6IlwQTFgoAPxYhBPqjgjnL8RHN4JnADNicgXaYm0jJBQJioNE5AhsDBQkDwmcA
+          BgsJCAcDCgUVCgkICwUWAwIBAAIeBQIXgAAKCRDYnIF2mJtIySVCAP9e2nHsVHSi
+          2B1YGZpVG7Xf36vxljmMkbroQy+0gBPwRwEAq+jaiQqlbGhQ7R/HMFcAxBIVsq8h
+          Aw1rngsUd0o3dAicXQRioNE5EgorBgEEAZdVAQUBAQdAXZV2Sd5ZNBVTBbTGavMv
+          D6ORrUh8z7TI/3CsxCE7+yADAQgHAAD/c1RU9xH+V/uI1fE7HIn/zL0LUPpsuce2
+          cH++g4u3kBgTOYh+BBgWCgAmFiEE+qOCOcvxEc3gmcAM2JyBdpibSMkFAmKg0TkC
+          GwwFCQPCZwAACgkQ2JyBdpibSMlKagD/cTre6p1m8QuJ7kwmCFRSz5tBzIuYMMgN
+          xtT7dmS91csA/35fWsOykSiFRojQ7ccCSUTHL7ApF2EbL968tP/D2hIG
+          =Hjoc
+          -----END PGP PRIVATE KEY BLOCK-----
+        '');
+        pgp-pubkey = pkgs.writeText "sourcehut.pgp-pubkey" ''
+          -----BEGIN PGP PUBLIC KEY BLOCK-----
+
+          mDMEYqDRORYJKwYBBAHaRw8BAQdAehGoy36FUx2OesYm07be2rtLyvR5Pb/ltstd
+          Gk7hYQq0Nm5peG9zL3Rlc3RzL3NvdXJjZWh1dCA8cm9vdCtodXRAc291cmNlaHV0
+          LmxvY2FsZG9tYWluPoiXBBMWCgA/FiEE+qOCOcvxEc3gmcAM2JyBdpibSMkFAmKg
+          0TkCGwMFCQPCZwAGCwkIBwMKBRUKCQgLBRYDAgEAAh4FAheAAAoJENicgXaYm0jJ
+          JUIA/17acexUdKLYHVgZmlUbtd/fq/GWOYyRuuhDL7SAE/BHAQCr6NqJCqVsaFDt
+          H8cwVwDEEhWyryEDDWueCxR3Sjd0CLg4BGKg0TkSCisGAQQBl1UBBQEBB0BdlXZJ
+          3lk0FVMFtMZq8y8Po5GtSHzPtMj/cKzEITv7IAMBCAeIfgQYFgoAJhYhBPqjgjnL
+          8RHN4JnADNicgXaYm0jJBQJioNE5AhsMBQkDwmcAAAoJENicgXaYm0jJSmoA/3E6
+          3uqdZvELie5MJghUUs+bQcyLmDDIDcbU+3ZkvdXLAP9+X1rDspEohUaI0O3HAklE
+          xy+wKRdhGy/evLT/w9oSBg==
+          =pJD7
+          -----END PGP PUBLIC KEY BLOCK-----
+        '';
+        pgp-key-id = "0xFAA38239CBF111CDE099C00CD89C8176989B48C9";
+      };
+    };
+
+    networking.firewall.allowedTCPPorts = [ 443 ];
+    security.pki.certificateFiles = [ "${tls-cert}/cert.pem" ];
+    services.nginx = {
+      enable = true;
+      recommendedGzipSettings = true;
+      recommendedOptimisation = true;
+      recommendedTlsSettings = true;
+      recommendedProxySettings = true;
+    };
+
+    services.postgresql = {
+      enable = true;
+      enableTCPIP = false;
+      settings.unix_socket_permissions = "0770";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+
+    # Testing metasrht
+    machine.wait_for_unit("metasrht-api.service")
+    machine.wait_for_unit("metasrht.service")
+    machine.wait_for_unit("metasrht-webhooks.service")
+    machine.wait_for_open_port(5000)
+    machine.succeed("curl -sL http://localhost:5000 | grep meta.${domain}")
+    machine.succeed("curl -sL http://meta.${domain} | grep meta.${domain}")
+
+    # Testing buildsrht
+    machine.wait_for_unit("buildsrht.service")
+    machine.wait_for_open_port(5002)
+    machine.succeed("curl -sL http://localhost:5002 | grep builds.${domain}")
+    #machine.wait_for_unit("buildsrht-worker.service")
+
+    # Testing gitsrht
+    machine.wait_for_unit("gitsrht-api.service")
+    machine.wait_for_unit("gitsrht.service")
+    machine.wait_for_unit("gitsrht-webhooks.service")
+    machine.succeed("curl -sL http://git.${domain} | grep git.${domain}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/spacecookie.nix b/nixpkgs/nixos/tests/spacecookie.nix
new file mode 100644
index 000000000000..a640657d8a6b
--- /dev/null
+++ b/nixpkgs/nixos/tests/spacecookie.nix
@@ -0,0 +1,56 @@
+let
+  gopherRoot   = "/tmp/gopher";
+  gopherHost   = "gopherd";
+  gopherClient = "client";
+  fileContent  = "Hello Gopher!\n";
+  fileName     = "file.txt";
+in
+  import ./make-test-python.nix ({...}: {
+    name = "spacecookie";
+    nodes = {
+      ${gopherHost} = {
+        systemd.services.spacecookie = {
+          preStart = ''
+            mkdir -p ${gopherRoot}/directory
+            printf "%s" "${fileContent}" > ${gopherRoot}/${fileName}
+          '';
+        };
+
+        services.spacecookie = {
+          enable = true;
+          openFirewall = true;
+          settings = {
+            root = gopherRoot;
+            hostname = gopherHost;
+          };
+        };
+      };
+
+      ${gopherClient} = {};
+    };
+
+    testScript = ''
+      start_all()
+
+      # with daemon type notify, the unit being started
+      # should also mean the port is open
+      ${gopherHost}.wait_for_unit("spacecookie.service")
+      ${gopherClient}.wait_for_unit("network.target")
+
+      fileResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}/0/${fileName}")
+
+      # the file response should return our created file exactly
+      if not (fileResponse == "${builtins.replaceStrings [ "\n" ] [ "\\n" ] fileContent}"):
+          raise Exception("Unexpected file response")
+
+      # sanity check on the directory listing: we serve a directory and a file
+      # via gopher, so the directory listing should have exactly two entries,
+      # one with gopher file type 0 (file) and one with file type 1 (directory).
+      dirResponse = ${gopherClient}.succeed("curl -f -s gopher://${gopherHost}")
+      dirEntries = [l[0] for l in dirResponse.split("\n") if len(l) > 0]
+      dirEntries.sort()
+
+      if not (["0", "1"] == dirEntries):
+          raise Exception("Unexpected directory response")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/spark/default.nix b/nixpkgs/nixos/tests/spark/default.nix
new file mode 100644
index 000000000000..462f0d23a403
--- /dev/null
+++ b/nixpkgs/nixos/tests/spark/default.nix
@@ -0,0 +1,28 @@
+import ../make-test-python.nix ({...}: {
+  name = "spark";
+
+  nodes = {
+    worker = { nodes, pkgs, ... }: {
+      services.spark.worker = {
+        enable = true;
+        master = "master:7077";
+      };
+      virtualisation.memorySize = 2048;
+    };
+    master = { config, pkgs, ... }: {
+      services.spark.master = {
+        enable = true;
+        bind = "0.0.0.0";
+      };
+      networking.firewall.allowedTCPPorts = [ 22 7077 8080 ];
+    };
+  };
+
+  testScript = ''
+    master.wait_for_unit("spark-master.service")
+    worker.wait_for_unit("spark-worker.service")
+    worker.copy_from_host( "${./spark_sample.py}", "/spark_sample.py" )
+    assert "<title>Spark Master at spark://" in worker.succeed("curl -sSfkL http://master:8080/")
+    worker.succeed("spark-submit --master spark://master:7077 --executor-memory 512m --executor-cores 1 /spark_sample.py")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/spark/spark_sample.py b/nixpkgs/nixos/tests/spark/spark_sample.py
new file mode 100644
index 000000000000..c4939451eae0
--- /dev/null
+++ b/nixpkgs/nixos/tests/spark/spark_sample.py
@@ -0,0 +1,40 @@
+from pyspark.sql import Row, SparkSession
+from pyspark.sql import functions as F
+from pyspark.sql.functions import udf
+from pyspark.sql.types import *
+from pyspark.sql.functions import explode
+
+def explode_col(weight):
+    return int(weight//10) * [10.0] + ([] if weight%10==0 else [weight%10])
+
+spark = SparkSession.builder.getOrCreate()
+
+dataSchema = [
+    StructField("feature_1", FloatType()),
+    StructField("feature_2", FloatType()),
+    StructField("bias_weight", FloatType())
+]
+
+data = [
+    Row(0.1, 0.2, 10.32),
+    Row(0.32, 1.43, 12.8),
+    Row(1.28, 1.12, 0.23)
+]
+
+df = spark.createDataFrame(spark.sparkContext.parallelize(data), StructType(dataSchema))
+
+normalizing_constant = 100
+sum_bias_weight = df.select(F.sum('bias_weight')).collect()[0][0]
+normalizing_factor = normalizing_constant / sum_bias_weight
+df = df.withColumn('normalized_bias_weight', df.bias_weight * normalizing_factor)
+df = df.drop('bias_weight')
+df = df.withColumnRenamed('normalized_bias_weight', 'bias_weight')
+
+my_udf = udf(lambda x: explode_col(x), ArrayType(FloatType()))
+df1 = df.withColumn('explode_val', my_udf(df.bias_weight))
+df1 = df1.withColumn("explode_val_1", explode(df1.explode_val)).drop("explode_val")
+df1 = df1.drop('bias_weight').withColumnRenamed('explode_val_1', 'bias_weight')
+
+df1.show()
+
+assert(df1.count() == 12)
diff --git a/nixpkgs/nixos/tests/sqlite3-to-mysql.nix b/nixpkgs/nixos/tests/sqlite3-to-mysql.nix
new file mode 100644
index 000000000000..f18a442157e7
--- /dev/null
+++ b/nixpkgs/nixos/tests/sqlite3-to-mysql.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+/*
+  This test suite replaces the typical pytestCheckHook function in
+  sqlite3-to-mysql due to the need of a running mysql instance.
+*/
+
+{
+  name = "sqlite3-to-mysql";
+  meta.maintainers = with lib.maintainers; [ gador ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = with pkgs; [
+      sqlite3-to-mysql
+      # create one coherent python environment
+      (python3.withPackages
+        (ps: sqlite3-to-mysql.propagatedBuildInputs ++
+          [
+            python3Packages.pytest
+            python3Packages.pytest-mock
+            python3Packages.pytest-timeout
+            python3Packages.factory-boy
+            python3Packages.docker # only needed so import does not fail
+            sqlite3-to-mysql
+          ])
+      )
+    ];
+    services.mysql = {
+      package = pkgs.mariadb;
+      enable = true;
+      # from https://github.com/techouse/sqlite3-to-mysql/blob/master/tests/conftest.py
+      # and https://github.com/techouse/sqlite3-to-mysql/blob/master/.github/workflows/test.yml
+      initialScript = pkgs.writeText "mysql-init.sql" ''
+        create database test_db DEFAULT CHARACTER SET utf8mb4;
+        create user tester identified by 'testpass';
+        grant all on test_db.* to tester;
+        create user tester@localhost identified by 'testpass';
+        grant all on test_db.* to tester@localhost;
+      '';
+      settings = {
+        mysqld = {
+          character-set-server = "utf8mb4";
+          collation-server = "utf8mb4_unicode_ci";
+          log_warnings = 1;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("mysql")
+
+    machine.succeed(
+         "sqlite3mysql --version | grep ${pkgs.sqlite3-to-mysql.version}"
+    )
+
+    # invalid_database_name: assert '1045 (28000): Access denied' in "1044 (42000): Access denied [...]
+    # invalid_database_user: does not return non-zero exit for some reason
+    # test_version: has problems importing sqlite3_to_mysql and determining the version
+    machine.succeed(
+         "cd ${pkgs.sqlite3-to-mysql.src} \
+          && pytest -v --no-docker -k \"not test_invalid_database_name and not test_invalid_database_user and not test_version\""
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ssh-audit.nix b/nixpkgs/nixos/tests/ssh-audit.nix
new file mode 100644
index 000000000000..bd6255b8044d
--- /dev/null
+++ b/nixpkgs/nixos/tests/ssh-audit.nix
@@ -0,0 +1,103 @@
+import ./make-test-python.nix (
+  {pkgs, ...}: let
+    sshKeys = import (pkgs.path + "/nixos/tests/ssh-keys.nix") pkgs;
+    sshUsername = "any-user";
+    serverName = "server";
+    clientName = "client";
+    sshAuditPort = 2222;
+  in {
+    name = "ssh";
+
+    nodes = {
+      "${serverName}" = {
+        networking.firewall.allowedTCPPorts = [
+          sshAuditPort
+        ];
+        services.openssh.enable = true;
+        users.users."${sshUsername}" = {
+          isNormalUser = true;
+          openssh.authorizedKeys.keys = [
+            sshKeys.snakeOilPublicKey
+          ];
+        };
+      };
+      "${clientName}" = {
+        programs.ssh = {
+          ciphers = [
+            "aes128-ctr"
+            "aes128-gcm@openssh.com"
+            "aes192-ctr"
+            "aes256-ctr"
+            "aes256-gcm@openssh.com"
+            "chacha20-poly1305@openssh.com"
+          ];
+          extraConfig = ''
+            IdentitiesOnly yes
+          '';
+          hostKeyAlgorithms = [
+            "rsa-sha2-256"
+            "rsa-sha2-256-cert-v01@openssh.com"
+            "rsa-sha2-512"
+            "rsa-sha2-512-cert-v01@openssh.com"
+            "sk-ssh-ed25519-cert-v01@openssh.com"
+            "sk-ssh-ed25519@openssh.com"
+            "ssh-ed25519"
+            "ssh-ed25519-cert-v01@openssh.com"
+          ];
+          kexAlgorithms = [
+            "curve25519-sha256"
+            "curve25519-sha256@libssh.org"
+            "diffie-hellman-group-exchange-sha256"
+            "diffie-hellman-group16-sha512"
+            "diffie-hellman-group18-sha512"
+            "sntrup761x25519-sha512@openssh.com"
+          ];
+          macs = [
+            "hmac-sha2-256-etm@openssh.com"
+            "hmac-sha2-512-etm@openssh.com"
+            "umac-128-etm@openssh.com"
+          ];
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${serverName}.wait_for_open_port(22)
+
+      # Should pass SSH server audit
+      ${serverName}.succeed("${pkgs.ssh-audit}/bin/ssh-audit 127.0.0.1")
+
+      # Wait for client to be able to connect to the server
+      ${clientName}.wait_for_unit("network-online.target")
+
+      # Set up trusted private key
+      ${clientName}.succeed("cat ${sshKeys.snakeOilPrivateKey} > privkey.snakeoil")
+      ${clientName}.succeed("chmod 600 privkey.snakeoil")
+
+      # Fail fast and disable interactivity
+      ssh_options = "-o BatchMode=yes -o ConnectTimeout=1 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null"
+
+      # Should deny root user
+      ${clientName}.fail(f"ssh {ssh_options} root@${serverName} true")
+
+      # Should deny non-root user password login
+      ${clientName}.fail(f"ssh {ssh_options} -o PasswordAuthentication=yes ${sshUsername}@${serverName} true")
+
+      # Should allow non-root user certificate login
+      ${clientName}.succeed(f"ssh {ssh_options} -i privkey.snakeoil ${sshUsername}@${serverName} true")
+
+      # Should pass SSH client audit
+      service_name = "ssh-audit.service"
+      ${serverName}.succeed(f"systemd-run --unit={service_name} ${pkgs.ssh-audit}/bin/ssh-audit --client-audit --port=${toString sshAuditPort}")
+      ${clientName}.sleep(5) # We can't use wait_for_open_port because ssh-audit exits as soon as anything talks to it
+      ${clientName}.execute(
+          f"ssh {ssh_options} -i privkey.snakeoil -p ${toString sshAuditPort} ${sshUsername}@${serverName} true",
+          check_return=False,
+          timeout=10
+      )
+      ${serverName}.succeed(f"exit $(systemctl show --property=ExecMainStatus --value {service_name})")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/ssh-keys.nix b/nixpkgs/nixos/tests/ssh-keys.nix
new file mode 100644
index 000000000000..df9ff38a3b22
--- /dev/null
+++ b/nixpkgs/nixos/tests/ssh-keys.nix
@@ -0,0 +1,15 @@
+pkgs:
+{ snakeOilPrivateKey = pkgs.writeText "privkey.snakeoil" ''
+    -----BEGIN EC PRIVATE KEY-----
+    MHcCAQEEIHQf/khLvYrQ8IOika5yqtWvI0oquHlpRLTZiJy5dRJmoAoGCCqGSM49
+    AwEHoUQDQgAEKF0DYGbBwbj06tA3fd/+yP44cvmwmHBWXZCKbS+RQlAKvLXMWkpN
+    r1lwMyJZoSGgBHoUahoYjTh9/sJL7XLJtA==
+    -----END EC PRIVATE KEY-----
+  '';
+
+  snakeOilPublicKey = pkgs.lib.concatStrings [
+    "ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHA"
+    "yNTYAAABBBChdA2BmwcG49OrQN33f/sj+OHL5sJhwVl2Qim0vkUJQCry1zFpKTa"
+    "9ZcDMiWaEhoAR6FGoaGI04ff7CS+1yybQ= snakeoil"
+  ];
+}
diff --git a/nixpkgs/nixos/tests/sslh.nix b/nixpkgs/nixos/tests/sslh.nix
new file mode 100644
index 000000000000..30ffd389d442
--- /dev/null
+++ b/nixpkgs/nixos/tests/sslh.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix {
+  name = "sslh";
+
+  nodes = {
+    server = { pkgs, lib, ... }: {
+      networking.firewall.allowedTCPPorts = [ 443 ];
+      networking.interfaces.eth1.ipv6.addresses = [
+        {
+          address = "fe00:aa:bb:cc::2";
+          prefixLength = 64;
+        }
+      ];
+      services.sslh = {
+        enable = true;
+        settings.transparent = true;
+        settings.protocols = [
+          { name = "ssh"; service = "ssh"; host = "localhost"; port = "22"; probe = "builtin"; }
+          { name = "http"; host = "localhost"; port = "80"; probe = "builtin"; }
+        ];
+      };
+      services.openssh.enable = true;
+      users.users.root.openssh.authorizedKeys.keyFiles = [ ./initrd-network-ssh/id_ed25519.pub ];
+      services.nginx = {
+        enable = true;
+        virtualHosts."localhost" = {
+          addSSL = false;
+          default = true;
+          root = pkgs.runCommand "testdir" {} ''
+            mkdir "$out"
+            echo hello world > "$out/index.html"
+          '';
+        };
+      };
+    };
+    client = { ... }: {
+      networking.interfaces.eth1.ipv6.addresses = [
+        {
+          address = "fe00:aa:bb:cc::1";
+          prefixLength = 64;
+        }
+      ];
+      networking.hosts."fe00:aa:bb:cc::2" = [ "server" ];
+      environment.etc.sshKey = {
+        source = ./initrd-network-ssh/id_ed25519; # dont use this anywhere else
+        mode = "0600";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("sslh.service")
+    server.wait_for_unit("nginx.service")
+    server.wait_for_unit("sshd.service")
+    server.wait_for_open_port(80)
+    server.wait_for_open_port(443)
+    server.wait_for_open_port(22)
+
+    for arg in ["-6", "-4"]:
+        client.wait_until_succeeds(f"ping {arg} -c1 server")
+
+        # check that ssh through sslh works
+        client.succeed(
+            f"ssh {arg} -p 443 -i /etc/sshKey -o StrictHostKeyChecking=accept-new server 'echo $SSH_CONNECTION > /tmp/foo{arg}'"
+        )
+
+        # check that 1/ the above ssh command had an effect 2/ transparent proxying really works
+        ip = "fe00:aa:bb:cc::1" if arg == "-6" else "192.168.1."
+        server.succeed(f"grep '{ip}' /tmp/foo{arg}")
+
+        # check that http through sslh works
+        assert client.succeed(f"curl -f {arg} http://server:443").strip() == "hello world"
+  '';
+}
diff --git a/nixpkgs/nixos/tests/sssd-ldap.nix b/nixpkgs/nixos/tests/sssd-ldap.nix
new file mode 100644
index 000000000000..60f3b1a415da
--- /dev/null
+++ b/nixpkgs/nixos/tests/sssd-ldap.nix
@@ -0,0 +1,173 @@
+let
+  dbDomain = "example.org";
+  dbSuffix = "dc=example,dc=org";
+
+  ldapRootUser = "admin";
+  ldapRootPassword = "foobar";
+
+  testUser = "alice";
+  testPassword = "foobar";
+  testNewPassword = "barfoo";
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "sssd-ldap";
+
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bbigras s1341 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    security.pam.services.systemd-user.makeHomeDir = true;
+    environment.etc."cert.pem".text = builtins.readFile ./common/acme/server/acme.test.cert.pem;
+    environment.etc."key.pem".text = builtins.readFile ./common/acme/server/acme.test.key.pem;
+    services.openldap = {
+      enable = true;
+      urlList = [ "ldap:///" "ldaps:///" ];
+      settings = {
+        attrs = {
+          olcTLSCACertificateFile = "/etc/cert.pem";
+          olcTLSCertificateFile = "/etc/cert.pem";
+          olcTLSCertificateKeyFile = "/etc/key.pem";
+          olcTLSCipherSuite = "HIGH:MEDIUM:+3DES:+RC4:+aNULL";
+          olcTLSCRLCheck = "none";
+          olcTLSVerifyClient = "never";
+          olcTLSProtocolMin = "3.1";
+        };
+        children = {
+          "cn=schema".includes = [
+            "${pkgs.openldap}/etc/schema/core.ldif"
+            "${pkgs.openldap}/etc/schema/cosine.ldif"
+            "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+            "${pkgs.openldap}/etc/schema/nis.ldif"
+          ];
+          "olcDatabase={1}mdb" = {
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+              olcDatabase = "{1}mdb";
+              olcDbDirectory = "/var/lib/openldap/db";
+              olcSuffix = dbSuffix;
+              olcRootDN = "cn=${ldapRootUser},${dbSuffix}";
+              olcRootPW = ldapRootPassword;
+              olcAccess = [
+                /*
+                  custom access rules for userPassword attributes
+                  */
+                ''
+                  {0}to attrs=userPassword
+                                    by self write
+                                    by anonymous auth
+                                    by * none''
+
+                /*
+                  allow read on anything else
+                  */
+                ''
+                  {1}to *
+                                    by * read''
+              ];
+            };
+          };
+        };
+      };
+      declarativeContents = {
+        ${dbSuffix} = ''
+          dn: ${dbSuffix}
+          objectClass: top
+          objectClass: dcObject
+          objectClass: organization
+          o: ${dbDomain}
+
+          dn: ou=posix,${dbSuffix}
+          objectClass: top
+          objectClass: organizationalUnit
+
+          dn: ou=accounts,ou=posix,${dbSuffix}
+          objectClass: top
+          objectClass: organizationalUnit
+
+          dn: uid=${testUser},ou=accounts,ou=posix,${dbSuffix}
+          objectClass: person
+          objectClass: posixAccount
+          userPassword: ${testPassword}
+          homeDirectory: /home/${testUser}
+          uidNumber: 1234
+          gidNumber: 1234
+          cn: ""
+          sn: ""
+        '';
+      };
+    };
+
+    services.sssd = {
+      enable = true;
+      # just for testing purposes, don't put this into the Nix store in production!
+      environmentFile = "${pkgs.writeText "ldap-root" "LDAP_BIND_PW=${ldapRootPassword}"}";
+      config = ''
+        [sssd]
+        config_file_version = 2
+        services = nss, pam, sudo
+        domains = ${dbDomain}
+
+        [domain/${dbDomain}]
+        auth_provider = ldap
+        id_provider = ldap
+        ldap_uri = ldaps://127.0.0.1:636
+        ldap_tls_reqcert = allow
+        ldap_tls_cacert = /etc/cert.pem
+        ldap_search_base = ${dbSuffix}
+        ldap_default_bind_dn = cn=${ldapRootUser},${dbSuffix}
+        ldap_default_authtok_type = password
+        ldap_default_authtok = $LDAP_BIND_PW
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("openldap.service")
+    machine.wait_for_unit("sssd.service")
+    result = machine.execute("getent passwd ${testUser}")
+    if result[0] == 0:
+      assert "${testUser}" in result[1]
+    else:
+      machine.wait_for_console_text("Backend is online")
+      machine.succeed("getent passwd ${testUser}")
+
+    with subtest("Log in as ${testUser}"):
+        machine.wait_until_tty_matches("1", "login: ")
+        machine.send_chars("${testUser}\n")
+        machine.wait_until_tty_matches("1", "login: ${testUser}")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches("1", "Password: ")
+        machine.send_chars("${testPassword}\n")
+        machine.wait_until_succeeds("pgrep -u ${testUser} bash")
+        machine.send_chars("touch done\n")
+        machine.wait_for_file("/home/${testUser}/done")
+
+    with subtest("Change ${testUser}'s password"):
+        machine.send_chars("passwd\n")
+        machine.wait_until_tty_matches("1", "Current Password: ")
+        machine.send_chars("${testPassword}\n")
+        machine.wait_until_tty_matches("1", "New Password: ")
+        machine.send_chars("${testNewPassword}\n")
+        machine.wait_until_tty_matches("1", "Reenter new Password: ")
+        machine.send_chars("${testNewPassword}\n")
+        machine.wait_until_tty_matches("1", "passwd: password updated successfully")
+
+    with subtest("Log in as ${testUser} with new password in virtual console 2"):
+        machine.send_key("alt-f2")
+        machine.wait_until_succeeds("[ $(fgconsole) = 2 ]")
+        machine.wait_for_unit("getty@tty2.service")
+        machine.wait_until_succeeds("pgrep -f 'agetty.*tty2'")
+
+        machine.wait_until_tty_matches("2", "login: ")
+        machine.send_chars("${testUser}\n")
+        machine.wait_until_tty_matches("2", "login: ${testUser}")
+        machine.wait_until_succeeds("pgrep login")
+        machine.wait_until_tty_matches("2", "Password: ")
+        machine.send_chars("${testNewPassword}\n")
+        machine.wait_until_succeeds("pgrep -u ${testUser} bash")
+        machine.send_chars("touch done2\n")
+        machine.wait_for_file("/home/${testUser}/done2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sssd.nix b/nixpkgs/nixos/tests/sssd.nix
new file mode 100644
index 000000000000..c8d356e074ad
--- /dev/null
+++ b/nixpkgs/nixos/tests/sssd.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "sssd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bbigras ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    services.sssd.enable = true;
+  };
+
+  testScript = ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("sssd.service")
+      machine.succeed("sssctl config-check")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/stalwart-mail.nix b/nixpkgs/nixos/tests/stalwart-mail.nix
new file mode 100644
index 000000000000..b5589966a160
--- /dev/null
+++ b/nixpkgs/nixos/tests/stalwart-mail.nix
@@ -0,0 +1,117 @@
+# Rudimentary test checking that the Stalwart email server can:
+# - receive some message through SMTP submission, then
+# - serve this message through IMAP.
+
+let
+  certs = import ./common/acme/server/snakeoil-certs.nix;
+  domain = certs.domain;
+
+in import ./make-test-python.nix ({ lib, ... }: {
+  name = "stalwart-mail";
+
+  nodes.main = { pkgs, ... }: {
+    security.pki.certificateFiles = [ certs.ca.cert ];
+
+    services.stalwart-mail = {
+      enable = true;
+      settings = {
+        server.hostname = domain;
+
+        certificate."snakeoil" = {
+          cert = "file://${certs.${domain}.cert}";
+          private-key = "file://${certs.${domain}.key}";
+        };
+
+        server.tls = {
+          certificate = "snakeoil";
+          enable = true;
+          implicit = false;
+        };
+
+        server.listener = {
+          "smtp-submission" = {
+            bind = [ "[::]:587" ];
+            protocol = "smtp";
+          };
+
+          "imap" = {
+            bind = [ "[::]:143" ];
+            protocol = "imap";
+          };
+        };
+
+        session.auth.mechanisms = [ "PLAIN" ];
+        session.auth.directory = "in-memory";
+        jmap.directory = "in-memory";  # shared with imap
+
+        session.rcpt.directory = "in-memory";
+        queue.outbound.next-hop = [ "local" ];
+
+        directory."in-memory" = {
+          type = "memory";
+          users = [
+            {
+              name = "alice";
+              secret = "foobar";
+              email = [ "alice@${domain}" ];
+            }
+            {
+              name = "bob";
+              secret = "foobar";
+              email = [ "bob@${domain}" ];
+            }
+          ];
+        };
+      };
+    };
+
+    environment.systemPackages = [
+      (pkgs.writers.writePython3Bin "test-smtp-submission" { } ''
+        from smtplib import SMTP
+
+        with SMTP('localhost', 587) as smtp:
+            smtp.starttls()
+            smtp.login('alice', 'foobar')
+            smtp.sendmail(
+                'alice@${domain}',
+                'bob@${domain}',
+                """
+                    From: alice@${domain}
+                    To: bob@${domain}
+                    Subject: Some test message
+
+                    This is a test message.
+                """.strip()
+            )
+      '')
+
+      (pkgs.writers.writePython3Bin "test-imap-read" { } ''
+        from imaplib import IMAP4
+
+        with IMAP4('localhost') as imap:
+            imap.starttls()
+            imap.login('bob', 'foobar')
+            imap.select('"All Mail"')
+            status, [ref] = imap.search(None, 'ALL')
+            assert status == 'OK'
+            [msgId] = ref.split()
+            status, msg = imap.fetch(msgId, 'BODY[TEXT]')
+            assert status == 'OK'
+            assert msg[0][1].strip() == b'This is a test message.'
+      '')
+    ];
+  };
+
+  testScript = /* python */ ''
+    main.wait_for_unit("stalwart-mail.service")
+    main.wait_for_open_port(587)
+    main.wait_for_open_port(143)
+
+    main.succeed("test-smtp-submission")
+    main.succeed("test-imap-read")
+  '';
+
+  meta = {
+    maintainers = with lib.maintainers; [ happysalada pacien ];
+  };
+})
diff --git a/nixpkgs/nixos/tests/starship.nix b/nixpkgs/nixos/tests/starship.nix
new file mode 100644
index 000000000000..48a4be6caf17
--- /dev/null
+++ b/nixpkgs/nixos/tests/starship.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "starship";
+  meta.maintainers = pkgs.starship.meta.maintainers;
+
+  nodes.machine = {
+    programs = {
+      fish.enable = true;
+      zsh.enable = true;
+
+      starship = {
+        enable = true;
+        settings.format = "<starship>";
+      };
+    };
+
+    environment.systemPackages = map
+      (shell: pkgs.writeScriptBin "expect-${shell}" ''
+        #!${pkgs.expect}/bin/expect -f
+
+        spawn env TERM=xterm ${shell} -i
+
+        expect "<starship>" {
+          send "exit\n"
+        } timeout {
+          send_user "\n${shell} failed to display Starship\n"
+          exit 1
+        }
+
+        expect eof
+      '')
+      [ "bash" "fish" "zsh" ];
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("default.target")
+
+    machine.succeed("expect-bash")
+    machine.succeed("expect-fish")
+    machine.succeed("expect-zsh")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/step-ca.nix b/nixpkgs/nixos/tests/step-ca.nix
new file mode 100644
index 000000000000..a855b590232d
--- /dev/null
+++ b/nixpkgs/nixos/tests/step-ca.nix
@@ -0,0 +1,77 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+    test-certificates = pkgs.runCommandLocal "test-certificates" { } ''
+      mkdir -p $out
+      echo insecure-root-password > $out/root-password-file
+      echo insecure-intermediate-password > $out/intermediate-password-file
+      ${pkgs.step-cli}/bin/step certificate create "Example Root CA" $out/root_ca.crt $out/root_ca.key --password-file=$out/root-password-file --profile root-ca
+      ${pkgs.step-cli}/bin/step certificate create "Example Intermediate CA 1" $out/intermediate_ca.crt $out/intermediate_ca.key --password-file=$out/intermediate-password-file --ca-password-file=$out/root-password-file --profile intermediate-ca --ca $out/root_ca.crt --ca-key $out/root_ca.key
+    '';
+  in
+  {
+    name = "step-ca";
+    nodes =
+      {
+        caserver =
+          { config, pkgs, ... }: {
+            services.step-ca = {
+              enable = true;
+              address = "0.0.0.0";
+              port = 8443;
+              openFirewall = true;
+              intermediatePasswordFile = "${test-certificates}/intermediate-password-file";
+              settings = {
+                dnsNames = [ "caserver" ];
+                root = "${test-certificates}/root_ca.crt";
+                crt = "${test-certificates}/intermediate_ca.crt";
+                key = "${test-certificates}/intermediate_ca.key";
+                db = {
+                  type = "badger";
+                  dataSource = "/var/lib/step-ca/db";
+                };
+                authority = {
+                  provisioners = [
+                    {
+                      type = "ACME";
+                      name = "acme";
+                    }
+                  ];
+                };
+              };
+            };
+          };
+
+        caclient =
+          { config, pkgs, ... }: {
+            security.acme.defaults.server = "https://caserver:8443/acme/acme/directory";
+            security.acme.defaults.email = "root@example.org";
+            security.acme.acceptTerms = true;
+
+            security.pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
+
+            networking.firewall.allowedTCPPorts = [ 80 443 ];
+
+            services.nginx = {
+              enable = true;
+              virtualHosts = {
+                "caclient" = {
+                  forceSSL = true;
+                  enableACME = true;
+                };
+              };
+            };
+          };
+
+        catester = { config, pkgs, ... }: {
+          security.pki.certificateFiles = [ "${test-certificates}/root_ca.crt" ];
+        };
+      };
+
+    testScript =
+      ''
+        catester.start()
+        caserver.wait_for_unit("step-ca.service")
+        caclient.wait_for_unit("acme-finished-caclient.target")
+        catester.succeed("curl https://caclient/ | grep \"Welcome to nginx!\"")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/stratis/default.nix b/nixpkgs/nixos/tests/stratis/default.nix
new file mode 100644
index 000000000000..42daadd5fcaa
--- /dev/null
+++ b/nixpkgs/nixos/tests/stratis/default.nix
@@ -0,0 +1,8 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../../.. { inherit system; }
+}:
+
+{
+  simple = import ./simple.nix { inherit system pkgs; };
+  encryption = import ./encryption.nix { inherit system pkgs; };
+}
diff --git a/nixpkgs/nixos/tests/stratis/encryption.nix b/nixpkgs/nixos/tests/stratis/encryption.nix
new file mode 100644
index 000000000000..81b5f92b4ac4
--- /dev/null
+++ b/nixpkgs/nixos/tests/stratis/encryption.nix
@@ -0,0 +1,32 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "stratis";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nickcao ];
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      services.stratis.enable = true;
+      virtualisation.emptyDiskImages = [ 2048 ];
+    };
+
+    testScript =
+      let
+        testkey1 = pkgs.writeText "testkey1" "supersecret1";
+        testkey2 = pkgs.writeText "testkey2" "supersecret2";
+      in
+      ''
+        machine.wait_for_unit("stratisd")
+        # test creation of encrypted pool and filesystem
+        machine.succeed("stratis key  set    testkey1  --keyfile-path ${testkey1}")
+        machine.succeed("stratis key  set    testkey2  --keyfile-path ${testkey2}")
+        machine.succeed("stratis pool create testpool /dev/vdb --key-desc testkey1")
+        machine.succeed("stratis fs   create testpool testfs")
+        # test rebinding encrypted pool
+        machine.succeed("stratis pool rebind keyring  testpool testkey2")
+        # test restarting encrypted pool
+        machine.succeed("stratis pool stop  --name testpool")
+        machine.succeed("stratis pool start --name testpool --unlock-method keyring")
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/stratis/simple.nix b/nixpkgs/nixos/tests/stratis/simple.nix
new file mode 100644
index 000000000000..543789f59c05
--- /dev/null
+++ b/nixpkgs/nixos/tests/stratis/simple.nix
@@ -0,0 +1,39 @@
+import ../make-test-python.nix ({ pkgs, ... }:
+  {
+    name = "stratis";
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ nickcao ];
+    };
+
+    nodes.machine = { pkgs, ... }: {
+      services.stratis.enable = true;
+      virtualisation.emptyDiskImages = [ 2048 1024 1024 1024 ];
+    };
+
+    testScript = ''
+      machine.wait_for_unit("stratisd")
+      # test pool creation
+      machine.succeed("stratis pool create     testpool /dev/vdb")
+      machine.succeed("stratis pool add-data   testpool /dev/vdc")
+      machine.succeed("stratis pool init-cache testpool /dev/vdd")
+      machine.succeed("stratis pool add-cache  testpool /dev/vde")
+      # test filesystem creation and rename
+      machine.succeed("stratis filesystem create testpool testfs0")
+      machine.succeed("stratis filesystem rename testpool testfs0 testfs1")
+      # test snapshot
+      machine.succeed("mkdir -p /mnt/testfs1 /mnt/testfs2")
+      machine.wait_for_file("/dev/stratis/testpool/testfs1")
+      machine.succeed("mount /dev/stratis/testpool/testfs1 /mnt/testfs1")
+      machine.succeed("echo test0 > /mnt/testfs1/test0")
+      machine.succeed("echo test1 > /mnt/testfs1/test1")
+      machine.succeed("stratis filesystem snapshot testpool testfs1 testfs2")
+      machine.succeed("echo test2 > /mnt/testfs1/test1")
+      machine.wait_for_file("/dev/stratis/testpool/testfs2")
+      machine.succeed("mount /dev/stratis/testpool/testfs2 /mnt/testfs2")
+      assert "test0" in machine.succeed("cat /mnt/testfs1/test0")
+      assert "test0" in machine.succeed("cat /mnt/testfs2/test0")
+      assert "test2" in machine.succeed("cat /mnt/testfs1/test1")
+      assert "test1" in machine.succeed("cat /mnt/testfs2/test1")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/strongswan-swanctl.nix b/nixpkgs/nixos/tests/strongswan-swanctl.nix
new file mode 100644
index 000000000000..0cf181ee62a5
--- /dev/null
+++ b/nixpkgs/nixos/tests/strongswan-swanctl.nix
@@ -0,0 +1,148 @@
+# This strongswan-swanctl test is based on:
+# https://www.strongswan.org/testing/testresults/swanctl/rw-psk-ipv4/index.html
+# https://github.com/strongswan/strongswan/tree/master/testing/tests/swanctl/rw-psk-ipv4
+#
+# The roadwarrior carol sets up a connection to gateway moon. The authentication
+# is based on pre-shared keys and IPv4 addresses. Upon the successful
+# establishment of the IPsec tunnels, the specified updown script automatically
+# inserts iptables-based firewall rules that let pass the tunneled traffic. In
+# order to test both tunnel and firewall, carol pings the client alice behind
+# the gateway moon.
+#
+#     alice                       moon                        carol
+#      eth1------vlan_0------eth1        eth2------vlan_1------eth1
+#   192.168.0.1         192.168.0.3  192.168.1.3           192.168.1.2
+#
+# See the NixOS manual for how to run this test:
+# https://nixos.org/nixos/manual/index.html#sec-running-nixos-tests-interactively
+
+import ./make-test-python.nix ({ pkgs, ...} :
+
+let
+  allowESP = "iptables --insert INPUT --protocol ESP --jump ACCEPT";
+
+  # Shared VPN settings:
+  vlan0         = "192.168.0.0/24";
+  carolIp       = "192.168.1.2";
+  moonIp        = "192.168.1.3";
+  version       = 2;
+  secret        = "0sFpZAZqEN6Ti9sqt4ZP5EWcqx";
+  esp_proposals = [ "aes128gcm128-x25519" ];
+  proposals     = [ "aes128-sha256-x25519" ];
+in {
+  name = "strongswan-swanctl";
+  meta.maintainers = with pkgs.lib.maintainers; [ basvandijk ];
+  nodes = {
+
+    alice = { ... } : {
+      virtualisation.vlans = [ 0 ];
+      networking = {
+        dhcpcd.enable = false;
+        defaultGateway = "192.168.0.3";
+      };
+    };
+
+    moon = { config, ...} :
+      let strongswan = config.services.strongswan-swanctl.package;
+      in {
+        virtualisation.vlans = [ 0 1 ];
+        networking = {
+          dhcpcd.enable = false;
+          firewall = {
+            allowedUDPPorts = [ 4500 500 ];
+            extraCommands = allowESP;
+          };
+          nat = {
+            enable             = true;
+            internalIPs        = [ vlan0 ];
+            internalInterfaces = [ "eth1" ];
+            externalIP         = moonIp;
+            externalInterface  = "eth2";
+          };
+        };
+        environment.systemPackages = [ strongswan ];
+        services.strongswan-swanctl = {
+          enable = true;
+          swanctl = {
+            connections = {
+              rw = {
+                local_addrs = [ moonIp ];
+                local.main = {
+                  auth = "psk";
+                };
+                remote.main = {
+                  auth = "psk";
+                };
+                children = {
+                  net = {
+                    local_ts = [ vlan0 ];
+                    updown = "${strongswan}/libexec/ipsec/_updown iptables";
+                    inherit esp_proposals;
+                  };
+                };
+                inherit version;
+                inherit proposals;
+              };
+            };
+            secrets = {
+              ike.carol = {
+                id.main = carolIp;
+                inherit secret;
+              };
+            };
+          };
+        };
+      };
+
+    carol = { config, ...} :
+      let strongswan = config.services.strongswan-swanctl.package;
+      in {
+        virtualisation.vlans = [ 1 ];
+        networking = {
+          dhcpcd.enable = false;
+          firewall.extraCommands = allowESP;
+        };
+        environment.systemPackages = [ strongswan ];
+        services.strongswan-swanctl = {
+          enable = true;
+          swanctl = {
+            connections = {
+              home = {
+                local_addrs = [ carolIp ];
+                remote_addrs = [ moonIp ];
+                local.main = {
+                  auth = "psk";
+                  id = carolIp;
+                };
+                remote.main = {
+                  auth = "psk";
+                  id = moonIp;
+                };
+                children = {
+                  home = {
+                    remote_ts = [ vlan0 ];
+                    start_action = "trap";
+                    updown = "${strongswan}/libexec/ipsec/_updown iptables";
+                    inherit esp_proposals;
+                  };
+                };
+                inherit version;
+                inherit proposals;
+              };
+            };
+            secrets = {
+              ike.moon = {
+                id.main = moonIp;
+                inherit secret;
+              };
+            };
+          };
+        };
+      };
+
+  };
+  testScript = ''
+    start_all()
+    carol.wait_until_succeeds("ping -c 1 alice")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/stunnel.nix b/nixpkgs/nixos/tests/stunnel.nix
new file mode 100644
index 000000000000..07fba435d4df
--- /dev/null
+++ b/nixpkgs/nixos/tests/stunnel.nix
@@ -0,0 +1,179 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  stunnelCommon = {
+    services.stunnel = {
+      enable = true;
+      user = "stunnel";
+    };
+    users.groups.stunnel = { };
+    users.users.stunnel = {
+      isSystemUser = true;
+      group = "stunnel";
+    };
+  };
+  makeCert = { config, pkgs, ... }: {
+    systemd.services.create-test-cert = {
+      wantedBy = [ "sysinit.target" ];
+      before = [ "sysinit.target" ];
+      unitConfig.DefaultDependencies = false;
+      script = ''
+        ${pkgs.openssl}/bin/openssl req -batch -x509 -newkey rsa -nodes -out /test-cert.pem -keyout /test-key.pem -subj /CN=${config.networking.hostName}
+        ( umask 077; cat /test-key.pem /test-cert.pem > /test-key-and-cert.pem )
+        chown stunnel /test-key.pem /test-key-and-cert.pem
+    '';
+    };
+  };
+  serverCommon = { pkgs, ... }: {
+    networking.firewall.allowedTCPPorts = [ 443 ];
+    services.stunnel.servers.https = {
+      accept = "443";
+      connect = 80;
+      cert = "/test-key-and-cert.pem";
+    };
+    systemd.services.simple-webserver = {
+      wantedBy = [ "multi-user.target" ];
+      script = ''
+        cd /etc/webroot
+        ${pkgs.python3}/bin/python -m http.server 80
+      '';
+    };
+  };
+  copyCert = src: dest: filename: ''
+    from shlex import quote
+    ${src}.wait_for_file("/test-key-and-cert.pem")
+    server_cert = ${src}.succeed("cat /test-cert.pem")
+    ${dest}.succeed("echo %s > ${filename}" % quote(server_cert))
+  '';
+
+in {
+  basicServer = makeTest {
+    name = "basicServer";
+
+    nodes = {
+      client = { };
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        environment.etc."webroot/index.html".text = "well met";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+
+      server.wait_for_unit("simple-webserver")
+      server.wait_for_unit("stunnel")
+
+      client.succeed("curl --fail --cacert /authorized-server-cert.crt https://server/ > out")
+      client.succeed('[[ "$(< out)" == "well met" ]]')
+    '';
+  };
+
+  serverAndClient = makeTest {
+    name = "serverAndClient";
+
+    nodes = {
+      client = {
+        imports = [ stunnelCommon ];
+        services.stunnel.clients = {
+          httpsClient = {
+            accept = "80";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+          };
+          httpsClientWithHostVerify = {
+            accept = "81";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+            verifyHostname = "server";
+          };
+          httpsClientWithHostVerifyFail = {
+            accept = "82";
+            connect = "server:443";
+            CAFile = "/authorized-server-cert.crt";
+            verifyHostname = "wronghostname";
+          };
+        };
+      };
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        environment.etc."webroot/index.html".text = "hello there";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+
+      server.wait_for_unit("simple-webserver")
+      server.wait_for_unit("stunnel")
+
+      # In case stunnel came up before we got the server's cert copied over
+      client.succeed("systemctl reload-or-restart stunnel")
+
+      client.succeed("curl --fail http://localhost/ > out")
+      client.succeed('[[ "$(< out)" == "hello there" ]]')
+
+      client.succeed("curl --fail http://localhost:81/ > out")
+      client.succeed('[[ "$(< out)" == "hello there" ]]')
+
+      client.fail("curl --fail http://localhost:82/ > out")
+      client.succeed('[[ "$(< out)" == "" ]]')
+    '';
+  };
+
+  mutualAuth = makeTest {
+    name = "mutualAuth";
+
+    nodes = rec {
+      client = {
+        imports = [ makeCert stunnelCommon ];
+        services.stunnel.clients.authenticated-https = {
+          accept = "80";
+          connect = "server:443";
+          verifyPeer = true;
+          CAFile = "/authorized-server-cert.crt";
+          cert = "/test-cert.pem";
+          key = "/test-key.pem";
+        };
+      };
+      wrongclient = client;
+      server = {
+        imports = [ makeCert serverCommon stunnelCommon ];
+        services.stunnel.servers.https = {
+          CAFile = "/authorized-client-certs.crt";
+          verifyPeer = true;
+        };
+        environment.etc."webroot/index.html".text = "secret handshake";
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      ${copyCert "server" "client" "/authorized-server-cert.crt"}
+      ${copyCert "client" "server" "/authorized-client-certs.crt"}
+      ${copyCert "server" "wrongclient" "/authorized-server-cert.crt"}
+
+      # In case stunnel came up before we got the cross-certs in place
+      client.succeed("systemctl reload-or-restart stunnel")
+      server.succeed("systemctl reload-or-restart stunnel")
+      wrongclient.succeed("systemctl reload-or-restart stunnel")
+
+      server.wait_for_unit("simple-webserver")
+      client.fail("curl --fail --insecure https://server/ > out")
+      client.succeed('[[ "$(< out)" == "" ]]')
+      client.succeed("curl --fail http://localhost/ > out")
+      client.succeed('[[ "$(< out)" == "secret handshake" ]]')
+      wrongclient.fail("curl --fail http://localhost/ > out")
+      wrongclient.succeed('[[ "$(< out)" == "" ]]')
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/sudo-rs.nix b/nixpkgs/nixos/tests/sudo-rs.nix
new file mode 100644
index 000000000000..6006863217b6
--- /dev/null
+++ b/nixpkgs/nixos/tests/sudo-rs.nix
@@ -0,0 +1,101 @@
+# Some tests to ensure sudo is working properly.
+{ pkgs, ... }:
+let
+  inherit (pkgs.lib) mkIf optionalString;
+  password = "helloworld";
+in
+  import ./make-test-python.nix ({ lib, pkgs, ...} : {
+    name = "sudo-rs";
+    meta.maintainers = pkgs.sudo-rs.meta.maintainers;
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        environment.systemPackages = [ pkgs.faketty ];
+        users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+        users.users = {
+          test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+          test1 = { isNormalUser = true; password = password; };
+          test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; password = password; };
+          test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+          test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+          test5 = { isNormalUser = true; };
+        };
+
+        security.sudo.enable = false;
+
+        security.sudo-rs = {
+          enable = true;
+          package = pkgs.sudo-rs;
+          wheelNeedsPassword = false;
+
+          extraRules = [
+            # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output;
+            # errors being detected by the visudo checks.
+
+            # These should not create any entries
+            { users = [ "notest1" ]; commands = [ ]; }
+            { commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+            # Test defining commands with the options syntax, though not setting any options
+            { users = [ "notest2" ]; commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+
+            # CONFIGURATION FOR TEST CASES
+            { users = [ "test1" ]; groups = [ "foobar" ]; commands = [ "ALL" ]; }
+            { groups = [ "barfoo" 1337 ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; }
+            { users = [ "test5" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" ]; } ]; runAs = "test1:barfoo"; }
+          ];
+        };
+      };
+
+    nodes.strict = { ... }: {
+      environment.systemPackages = [ pkgs.faketty ];
+      users.users = {
+        admin = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+        noadmin = { isNormalUser = true; };
+      };
+
+      security.sudo.enable = false;
+
+      security.sudo-rs = {
+        package = pkgs.sudo-rs;
+        enable = true;
+        wheelNeedsPassword = false;
+        execWheelOnly = true;
+      };
+    };
+
+    testScript =
+      ''
+        with subtest("users in wheel group should have passwordless sudo"):
+            machine.succeed('faketty -- su - test0 -c "sudo -u root true"')
+
+        with subtest("test1 user should have sudo with password"):
+            machine.succeed('faketty -- su - test1 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("test1 user should not be able to use sudo without password"):
+            machine.fail('faketty -- su - test1 -c "sudo -n -u root true"')
+
+        with subtest("users in group 'foobar' should be able to use sudo with password"):
+            machine.succeed('faketty -- su - test2 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("users in group 'barfoo' should be able to use sudo without password"):
+            machine.succeed("sudo -u test3 sudo -n -u root true")
+
+        with subtest("users in group 'baz' (GID 1337)"):
+            machine.succeed("sudo -u test4 sudo -n -u root echo true")
+
+        with subtest("test5 user should be able to run commands under test1"):
+            machine.succeed("sudo -u test5 sudo -n -u test1 true")
+
+        with subtest("test5 user should not be able to run commands under root"):
+            machine.fail("sudo -u test5 sudo -n -u root true 2>/dev/null")
+
+        with subtest("users in wheel should be able to run sudo despite execWheelOnly"):
+            strict.succeed('faketty -- su - admin -c "sudo -u root true"')
+
+        with subtest("non-wheel users should be unable to run sudo thanks to execWheelOnly"):
+            strict.fail('faketty -- su - noadmin -c "sudo --help"')
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/sudo.nix b/nixpkgs/nixos/tests/sudo.nix
new file mode 100644
index 000000000000..1fe478f0bff1
--- /dev/null
+++ b/nixpkgs/nixos/tests/sudo.nix
@@ -0,0 +1,103 @@
+# Some tests to ensure sudo is working properly.
+
+let
+  password = "helloworld";
+in
+  import ./make-test-python.nix ({ lib, pkgs, ...} : {
+    name = "sudo";
+    meta.maintainers = pkgs.sudo.meta.maintainers;
+
+    nodes.machine =
+      { lib, ... }:
+      {
+        users.groups = { foobar = {}; barfoo = {}; baz = { gid = 1337; }; };
+        users.users = {
+          test0 = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+          test1 = { isNormalUser = true; password = password; };
+          test2 = { isNormalUser = true; extraGroups = [ "foobar" ]; password = password; };
+          test3 = { isNormalUser = true; extraGroups = [ "barfoo" ]; };
+          test4 = { isNormalUser = true; extraGroups = [ "baz" ]; };
+          test5 = { isNormalUser = true; };
+        };
+
+        security.sudo = {
+          # Explicitly _not_ defining 'enable = true;' here, to check that sudo is enabled by default
+
+          wheelNeedsPassword = false;
+
+          extraConfig = ''
+            Defaults lecture="never"
+          '';
+
+          extraRules = [
+            # SUDOERS SYNTAX CHECK (Test whether the module produces a valid output;
+            # errors being detected by the visudo checks.
+
+            # These should not create any entries
+            { users = [ "notest1" ]; commands = [ ]; }
+            { commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+            # Test defining commands with the options syntax, though not setting any options
+            { users = [ "notest2" ]; commands = [ { command = "ALL"; options = [ ]; } ]; }
+
+
+            # CONFIGURATION FOR TEST CASES
+            { users = [ "test1" ]; groups = [ "foobar" ]; commands = [ "ALL" ]; }
+            { groups = [ "barfoo" 1337 ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" "NOSETENV" ]; } ]; }
+            { users = [ "test5" ]; commands = [ { command = "ALL"; options = [ "NOPASSWD" "SETENV" ]; } ]; runAs = "test1:barfoo"; }
+          ];
+        };
+      };
+
+    nodes.strict = { ... }: {
+      users.users = {
+        admin = { isNormalUser = true; extraGroups = [ "wheel" ]; };
+        noadmin = { isNormalUser = true; };
+      };
+
+      security.sudo = {
+        enable = true;
+        wheelNeedsPassword = false;
+        execWheelOnly = true;
+      };
+    };
+
+    testScript =
+      ''
+        with subtest("users in wheel group should have passwordless sudo"):
+            machine.succeed('su - test0 -c "sudo -u root true"')
+
+        with subtest("test1 user should have sudo with password"):
+            machine.succeed('su - test1 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("test1 user should not be able to use sudo without password"):
+            machine.fail('su - test1 -c "sudo -n -u root true"')
+
+        with subtest("users in group 'foobar' should be able to use sudo with password"):
+            machine.succeed('su - test2 -c "echo ${password} | sudo -S -u root true"')
+
+        with subtest("users in group 'barfoo' should be able to use sudo without password"):
+            machine.succeed("sudo -u test3 sudo -n -u root true")
+
+        with subtest("users in group 'baz' (GID 1337)"):
+            machine.succeed("sudo -u test4 sudo -n -u root echo true")
+
+        with subtest("test5 user should be able to run commands under test1"):
+            machine.succeed("sudo -u test5 sudo -n -u test1 true")
+
+        with subtest("test5 user should not be able to run commands under root"):
+            machine.fail("sudo -u test5 sudo -n -u root true")
+
+        with subtest("test5 user should be able to keep their environment"):
+            machine.succeed("sudo -u test5 sudo -n -E -u test1 true")
+
+        with subtest("users in group 'barfoo' should not be able to keep their environment"):
+            machine.fail("sudo -u test3 sudo -n -E -u root true")
+
+        with subtest("users in wheel should be able to run sudo despite execWheelOnly"):
+            strict.succeed('su - admin -c "sudo -u root true"')
+
+        with subtest("non-wheel users should be unable to run sudo thanks to execWheelOnly"):
+            strict.fail('su - noadmin -c "sudo --help"')
+      '';
+  })
diff --git a/nixpkgs/nixos/tests/swap-file-btrfs.nix b/nixpkgs/nixos/tests/swap-file-btrfs.nix
new file mode 100644
index 000000000000..35b9fb4fa50a
--- /dev/null
+++ b/nixpkgs/nixos/tests/swap-file-btrfs.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ lib, ... }:
+{
+  name = "swap-file-btrfs";
+
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    {
+      virtualisation.useDefaultFilesystems = false;
+
+      virtualisation.rootDevice = "/dev/vda";
+
+      boot.initrd.postDeviceCommands = ''
+        ${pkgs.btrfs-progs}/bin/mkfs.btrfs --label root /dev/vda
+      '';
+
+      virtualisation.fileSystems = {
+        "/" = {
+          device = "/dev/disk/by-label/root";
+          fsType = "btrfs";
+        };
+      };
+
+      swapDevices = [
+        {
+          device = "/var/swapfile";
+          size = 1; # 1MiB.
+        }
+      ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit('var-swapfile.swap')
+    # Ensure the swap file creation script ran to completion without failing when creating the swap file
+    machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service")
+    machine.succeed("stat --file-system --format=%T /var/swapfile | grep btrfs")
+    # First run. Auto creation.
+    machine.succeed("swapon --show | grep /var/swapfile")
+
+    machine.shutdown()
+    machine.start()
+
+    # Second run. Use it as-is.
+    machine.wait_for_unit('var-swapfile.swap')
+    # Ensure the swap file creation script ran to completion without failing when the swap file already exists
+    machine.fail("systemctl is-failed --quiet mkswap-var-swapfile.service")
+    machine.succeed("swapon --show | grep /var/swapfile")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/swap-partition.nix b/nixpkgs/nixos/tests/swap-partition.nix
new file mode 100644
index 000000000000..ddcaeb95453e
--- /dev/null
+++ b/nixpkgs/nixos/tests/swap-partition.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "swap-partition";
+
+  nodes.machine =
+    { config, pkgs, lib, ... }:
+    {
+      virtualisation.useDefaultFilesystems = false;
+
+      virtualisation.rootDevice = "/dev/vda1";
+
+      boot.initrd.postDeviceCommands = ''
+        if ! test -b /dev/vda1; then
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100%
+          sync
+        fi
+
+        FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true)
+        if test -z "$FSTYPE"; then
+          ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1
+          ${pkgs.util-linux}/bin/mkswap --label swap /dev/vda2
+        fi
+      '';
+
+      virtualisation.fileSystems = {
+        "/" = {
+          device = "/dev/disk/by-label/root";
+          fsType = "ext4";
+        };
+      };
+
+      swapDevices = [
+        {
+          device = "/dev/disk/by-label/swap";
+        }
+      ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("Swap is active"):
+      # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions.
+      machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/swap-random-encryption.nix b/nixpkgs/nixos/tests/swap-random-encryption.nix
new file mode 100644
index 000000000000..9e919db65dde
--- /dev/null
+++ b/nixpkgs/nixos/tests/swap-random-encryption.nix
@@ -0,0 +1,80 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "swap-random-encryption";
+
+  nodes.machine =
+    { config, pkgs, lib, ... }:
+    {
+      environment.systemPackages = [ pkgs.cryptsetup ];
+
+      virtualisation.useDefaultFilesystems = false;
+
+      virtualisation.rootDevice = "/dev/vda1";
+
+      boot.initrd.postDeviceCommands = ''
+        if ! test -b /dev/vda1; then
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mklabel msdos
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary 1MiB -250MiB
+          ${pkgs.parted}/bin/parted --script /dev/vda -- mkpart primary -250MiB 100%
+          sync
+        fi
+
+        FSTYPE=$(blkid -o value -s TYPE /dev/vda1 || true)
+        if test -z "$FSTYPE"; then
+          ${pkgs.e2fsprogs}/bin/mke2fs -t ext4 -L root /dev/vda1
+        fi
+      '';
+
+      virtualisation.fileSystems = {
+        "/" = {
+          device = "/dev/disk/by-label/root";
+          fsType = "ext4";
+        };
+      };
+
+      swapDevices = [
+        {
+          device = "/dev/vda2";
+
+          randomEncryption = {
+            enable = true;
+            cipher = "aes-xts-plain64";
+            keySize = 512;
+            sectorSize = 4096;
+          };
+        }
+      ];
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("Swap is active"):
+      # Doesn't matter if the numbers reported by `free` are slightly off due to unit conversions.
+      machine.succeed("free -h | grep -E 'Swap:\s+2[45][0-9]Mi'")
+
+    with subtest("Swap device has 4k sector size"):
+      import json
+      result = json.loads(machine.succeed("lsblk -Jo PHY-SEC,LOG-SEC /dev/mapper/dev-vda2"))
+      block_devices = result["blockdevices"]
+      if len(block_devices) != 1:
+        raise Exception ("lsblk output did not report exactly one block device")
+
+      swapDevice = block_devices[0];
+      if not (swapDevice["phy-sec"] == 4096 and swapDevice["log-sec"] == 4096):
+        raise Exception ("swap device does not have the sector size specified in the configuration")
+
+    with subtest("Swap encrypt has assigned cipher and keysize"):
+      import re
+
+      results = machine.succeed("cryptsetup status dev-vda2").splitlines()
+
+      cipher_pattern = re.compile(r"\s*cipher:\s+aes-xts-plain64\s*")
+      if not any(cipher_pattern.fullmatch(line) for line in results):
+        raise Exception ("swap device encryption does not use the cipher specified in the configuration")
+
+      key_size_pattern = re.compile(r"\s*keysize:\s+512\s+bits\s*")
+      if not any(key_size_pattern.fullmatch(line) for line in results):
+        raise Exception ("swap device encryption does not use the key size specified in the configuration")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sway.nix b/nixpkgs/nixos/tests/sway.nix
new file mode 100644
index 000000000000..695d4a770810
--- /dev/null
+++ b/nixpkgs/nixos/tests/sway.nix
@@ -0,0 +1,193 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "sway";
+  meta = {
+    maintainers = with lib.maintainers; [ primeos synthetica ];
+  };
+
+  # testScriptWithTypes:49: error: Cannot call function of unknown type
+  #           (machine.succeed if succeed else machine.execute)(
+  #           ^
+  # Found 1 error in 1 file (checked 1 source file)
+  skipTypeCheck = true;
+
+  nodes.machine = { config, ... }: {
+    # Automatically login on tty1 as a normal user:
+    imports = [ ./common/user-account.nix ];
+    services.getty.autologinUser = "alice";
+
+    environment = {
+      # For glinfo and wayland-info:
+      systemPackages = with pkgs; [ mesa-demos wayland-utils alacritty ];
+      # Use a fixed SWAYSOCK path (for swaymsg):
+      variables = {
+        "SWAYSOCK" = "/tmp/sway-ipc.sock";
+        # TODO: Investigate if we can get hardware acceleration to work (via
+        # virtio-gpu and Virgil). We currently have to use the Pixman software
+        # renderer since the GLES2 renderer doesn't work inside the VM (even
+        # with WLR_RENDERER_ALLOW_SOFTWARE):
+        # "WLR_RENDERER_ALLOW_SOFTWARE" = "1";
+        "WLR_RENDERER" = "pixman";
+      };
+      # For convenience:
+      shellAliases = {
+        test-x11 = "glinfo | tee /tmp/test-x11.out && touch /tmp/test-x11-exit-ok";
+        test-wayland = "wayland-info | tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok";
+      };
+
+      # To help with OCR:
+      etc."xdg/foot/foot.ini".text = lib.generators.toINI { } {
+        main = {
+          font = "inconsolata:size=14";
+        };
+        colors = rec {
+          foreground = "000000";
+          background = "ffffff";
+          regular2 = foreground;
+        };
+      };
+
+      etc."gpg-agent.conf".text = ''
+        pinentry-timeout 86400
+      '';
+    };
+
+    fonts.packages = [ pkgs.inconsolata ];
+
+    # Automatically configure and start Sway when logging in on tty1:
+    programs.bash.loginShellInit = ''
+      if [ "$(tty)" = "/dev/tty1" ]; then
+        set -e
+
+        mkdir -p ~/.config/sway
+        sed s/Mod4/Mod1/ /etc/sway/config > ~/.config/sway/config
+
+        sway --validate
+        sway && touch /tmp/sway-exit-ok
+      fi
+    '';
+
+    programs.sway.enable = true;
+
+    # To test pinentry via gpg-agent:
+    programs.gnupg.agent.enable = true;
+
+    # Need to switch to a different GPU driver than the default one (-vga std) so that Sway can launch:
+    virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
+  };
+
+  testScript = { nodes, ... }: ''
+    import shlex
+    import json
+
+    q = shlex.quote
+    NODE_GROUPS = ["nodes", "floating_nodes"]
+
+
+    def swaymsg(command: str = "", succeed=True, type="command"):
+        assert command != "" or type != "command", "Must specify command or type"
+        shell = q(f"swaymsg -t {q(type)} -- {q(command)}")
+        with machine.nested(
+            f"sending swaymsg {shell!r}" + " (allowed to fail)" * (not succeed)
+        ):
+            ret = (machine.succeed if succeed else machine.execute)(
+                f"su - alice -c {shell}"
+            )
+
+        # execute also returns a status code, but disregard.
+        if not succeed:
+            _, ret = ret
+
+        if not succeed and not ret:
+            return None
+
+        parsed = json.loads(ret)
+        return parsed
+
+
+    def walk(tree):
+        yield tree
+        for group in NODE_GROUPS:
+            for node in tree.get(group, []):
+                yield from walk(node)
+
+
+    def wait_for_window(pattern):
+        def func(last_chance):
+            nodes = (node["name"] for node in walk(swaymsg(type="get_tree")))
+
+            if last_chance:
+                nodes = list(nodes)
+                machine.log(f"Last call! Current list of windows: {nodes}")
+
+            return any(pattern in name for name in nodes)
+
+        retry(func)
+
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+
+    # To check the version:
+    print(machine.succeed("sway --version"))
+
+    # Wait for Sway to complete startup:
+    machine.wait_for_file("/run/user/1000/wayland-1")
+    machine.wait_for_file("/tmp/sway-ipc.sock")
+
+    # Test XWayland (foot does not support X):
+    swaymsg("exec WINIT_UNIX_BACKEND=x11 WAYLAND_DISPLAY=invalid alacritty")
+    wait_for_window("alice@machine")
+    machine.send_chars("test-x11\n")
+    machine.wait_for_file("/tmp/test-x11-exit-ok")
+    print(machine.succeed("cat /tmp/test-x11.out"))
+    machine.copy_from_vm("/tmp/test-x11.out")
+    machine.screenshot("alacritty_glinfo")
+    machine.succeed("pkill alacritty")
+
+    # Start a terminal (foot) on workspace 3:
+    machine.send_key("alt-3")
+    machine.sleep(3)
+    machine.send_key("alt-ret")
+    wait_for_window("alice@machine")
+    machine.send_chars("test-wayland\n")
+    machine.wait_for_file("/tmp/test-wayland-exit-ok")
+    print(machine.succeed("cat /tmp/test-wayland.out"))
+    machine.copy_from_vm("/tmp/test-wayland.out")
+    machine.screenshot("foot_wayland_info")
+    machine.send_key("alt-shift-q")
+    machine.wait_until_fails("pgrep foot")
+
+    # Test gpg-agent starting pinentry-gnome3 via D-Bus (tests if
+    # $WAYLAND_DISPLAY is correctly imported into the D-Bus user env):
+    swaymsg("exec mkdir -p ~/.gnupg")
+    swaymsg("exec cp /etc/gpg-agent.conf ~/.gnupg")
+
+    swaymsg("exec DISPLAY=INVALID gpg --no-tty --yes --quick-generate-key test", succeed=False)
+    machine.wait_until_succeeds("pgrep --exact gpg")
+    wait_for_window("gpg")
+    machine.succeed("pgrep --exact gpg")
+    machine.screenshot("gpg_pinentry")
+    machine.send_key("alt-shift-q")
+    machine.wait_until_fails("pgrep --exact gpg")
+
+    # Test swaynag:
+    def get_height():
+        return [node['rect']['height'] for node in walk(swaymsg(type="get_tree")) if node['focused']][0]
+
+    before = get_height()
+    machine.send_key("alt-shift-e")
+    retry(lambda _: get_height() < before)
+    machine.screenshot("sway_exit")
+
+    swaymsg("exec swaylock")
+    machine.wait_until_succeeds("pgrep -x swaylock")
+    machine.sleep(3)
+    machine.send_chars("${nodes.machine.config.users.users.alice.password}")
+    machine.send_key("ret")
+    machine.wait_until_fails("pgrep -x swaylock")
+
+    # Exit Sway and verify process exit status 0:
+    swaymsg("exit", succeed=False)
+    machine.wait_until_fails("pgrep -x sway")
+    machine.wait_for_file("/tmp/sway-exit-ok")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/switch-test.nix b/nixpkgs/nixos/tests/switch-test.nix
new file mode 100644
index 000000000000..5ffdf180d5e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/switch-test.nix
@@ -0,0 +1,1409 @@
+# Test configuration switching.
+
+import ./make-test-python.nix ({ lib, pkgs, ...} : let
+
+  # Simple service that can either be socket-activated or that will
+  # listen on port 1234 if not socket-activated.
+  # A connection to the socket causes 'hello' to be written to the client.
+  socketTest = pkgs.writeScript "socket-test.py" /* python */ ''
+    #!${pkgs.python3}/bin/python3
+
+    from socketserver import TCPServer, StreamRequestHandler
+    import socket
+    import os
+
+
+    class Handler(StreamRequestHandler):
+        def handle(self):
+            self.wfile.write("hello".encode("utf-8"))
+
+
+    class Server(TCPServer):
+        def __init__(self, server_address, handler_cls):
+            listenFds = os.getenv('LISTEN_FDS')
+            if listenFds is None or int(listenFds) < 1:
+                print(f'Binding to {server_address}')
+                TCPServer.__init__(
+                        self, server_address, handler_cls, bind_and_activate=True)
+            else:
+                TCPServer.__init__(
+                        self, server_address, handler_cls, bind_and_activate=False)
+                # Override socket
+                print(f'Got activated by {os.getenv("LISTEN_FDNAMES")} '
+                      f'with {listenFds} FDs')
+                self.socket = socket.fromfd(3, self.address_family,
+                                            self.socket_type)
+
+
+    if __name__ == "__main__":
+        server = Server(("localhost", 1234), Handler)
+        server.serve_forever()
+  '';
+
+in {
+  name = "switch-test";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ gleber das_j ];
+  };
+
+  nodes = {
+    machine = { pkgs, lib, ... }: {
+      environment.systemPackages = [ pkgs.socat ]; # for the socket activation stuff
+      users.mutableUsers = false;
+
+      # For boot/switch testing
+      system.build.installBootLoader = lib.mkForce (pkgs.writeShellScript "install-dummy-loader" ''
+        echo "installing dummy bootloader"
+        touch /tmp/bootloader-installed
+      '');
+
+      specialisation = rec {
+        brokenInitInterface.configuration.config.system.extraSystemBuilderCmds = ''
+          echo "systemd 0" > $out/init-interface-version
+        '';
+
+        modifiedSystemConf.configuration.systemd.extraConfig = ''
+          # Hello world!
+        '';
+
+        addedMount.configuration.virtualisation.fileSystems."/test" = {
+          device = "tmpfs";
+          fsType = "tmpfs";
+        };
+
+        addedMountOptsModified.configuration = {
+          imports = [ addedMount.configuration ];
+          virtualisation.fileSystems."/test".options = [ "x-test" ];
+        };
+
+        addedMountDevModified.configuration = {
+          imports = [ addedMountOptsModified.configuration ];
+          virtualisation.fileSystems."/test".device = lib.mkForce "ramfs";
+        };
+
+        storeMountModified.configuration = {
+          virtualisation.fileSystems."/".device = lib.mkForce "auto";
+        };
+
+        swap.configuration.swapDevices = lib.mkVMOverride [
+          { device = "/swapfile"; size = 1; }
+        ];
+
+        simpleService.configuration = {
+          systemd.services.test = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        simpleServiceSeparateActivationScript.configuration = {
+          system.activatable = false;
+          systemd.services.test = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        simpleServiceDifferentDescription.configuration = {
+          imports = [ simpleService.configuration ];
+          systemd.services.test.description = "Test unit";
+        };
+
+        simpleServiceModified.configuration = {
+          imports = [ simpleService.configuration ];
+          systemd.services.test.serviceConfig.X-Test = true;
+        };
+
+        simpleServiceNostop.configuration = {
+          imports = [ simpleService.configuration ];
+          systemd.services.test.stopIfChanged = false;
+        };
+
+        simpleServiceReload.configuration = {
+          imports = [ simpleService.configuration ];
+          systemd.services.test = {
+            reloadIfChanged = true;
+            serviceConfig.ExecReload = "${pkgs.coreutils}/bin/true";
+          };
+        };
+
+        simpleServiceNorestart.configuration = {
+          imports = [ simpleService.configuration ];
+          systemd.services.test.restartIfChanged = false;
+        };
+
+        simpleServiceFailing.configuration = {
+          imports = [ simpleServiceModified.configuration ];
+          systemd.services.test.serviceConfig.ExecStart = lib.mkForce "${pkgs.coreutils}/bin/false";
+        };
+
+        autorestartService.configuration = {
+          # A service that immediately goes into restarting (but without failing)
+          systemd.services.autorestart = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "simple";
+              Restart = "always";
+              RestartSec = "20y"; # Should be long enough
+              ExecStart = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        autorestartServiceFailing.configuration = {
+          imports = [ autorestartService.configuration ];
+          systemd.services.autorestart.serviceConfig = {
+            ExecStart = lib.mkForce "${pkgs.coreutils}/bin/false";
+          };
+        };
+
+        simpleServiceWithExtraSection.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.packages = [ (pkgs.writeTextFile {
+            name = "systemd-extra-section";
+            destination = "/etc/systemd/system/test.service";
+            text = ''
+              [X-Test]
+              X-Test-Value=a
+            '';
+          }) ];
+        };
+
+        simpleServiceWithExtraSectionOtherName.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.packages = [ (pkgs.writeTextFile {
+            name = "systemd-extra-section";
+            destination = "/etc/systemd/system/test.service";
+            text = ''
+              [X-Test2]
+              X-Test-Value=a
+            '';
+          }) ];
+        };
+
+        simpleServiceWithInstallSection.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.packages = [ (pkgs.writeTextFile {
+            name = "systemd-extra-section";
+            destination = "/etc/systemd/system/test.service";
+            text = ''
+              [Install]
+              WantedBy=multi-user.target
+            '';
+          }) ];
+        };
+
+        simpleServiceWithExtraKey.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.serviceConfig."X-Test" = "test";
+        };
+
+        simpleServiceWithExtraKeyOtherValue.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.serviceConfig."X-Test" = "test2";
+        };
+
+        simpleServiceWithExtraKeyOtherName.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.serviceConfig."X-Test2" = "test";
+        };
+
+        simpleServiceReloadTrigger.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.reloadTriggers = [ "/dev/null" ];
+        };
+
+        simpleServiceReloadTriggerModified.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.reloadTriggers = [ "/dev/zero" ];
+        };
+
+        simpleServiceReloadTriggerModifiedAndSomethingElse.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test = {
+            reloadTriggers = [ "/dev/zero" ];
+            serviceConfig."X-Test" = "test";
+          };
+        };
+
+        simpleServiceReloadTriggerModifiedSomethingElse.configuration = {
+          imports = [ simpleServiceNostop.configuration ];
+          systemd.services.test.serviceConfig."X-Test" = "test";
+        };
+
+        unitWithBackslash.configuration = {
+          systemd.services."escaped\\x2ddash" = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        unitWithBackslashModified.configuration = {
+          imports = [ unitWithBackslash.configuration ];
+          systemd.services."escaped\\x2ddash".serviceConfig.X-Test = "test";
+        };
+
+        unitStartingWithDash.configuration = {
+          systemd.services."-" = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        unitStartingWithDashModified.configuration = {
+          imports = [ unitStartingWithDash.configuration ];
+          systemd.services."-" = {
+            reloadIfChanged = true;
+            serviceConfig.ExecReload = "${pkgs.coreutils}/bin/true";
+          };
+        };
+
+        unitWithRequirement.configuration = {
+          systemd.services.required-service = {
+            wantedBy = [ "multi-user.target" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+          systemd.services.test-service = {
+            wantedBy = [ "multi-user.target" ];
+            requires = [ "required-service.service" ];
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        unitWithRequirementModified.configuration = {
+          imports = [ unitWithRequirement.configuration ];
+          systemd.services.required-service.serviceConfig.X-Test = "test";
+          systemd.services.test-service.reloadTriggers = [ "test" ];
+        };
+
+        unitWithRequirementModifiedNostart.configuration = {
+          imports = [ unitWithRequirement.configuration ];
+          systemd.services.test-service.unitConfig.RefuseManualStart = true;
+        };
+
+        unitWithTemplate.configuration = {
+          systemd.services."instantiated@".serviceConfig = {
+            Type = "oneshot";
+            RemainAfterExit = true;
+            ExecStart = "${pkgs.coreutils}/bin/true";
+            ExecReload = "${pkgs.coreutils}/bin/true";
+          };
+          systemd.services."instantiated@one" = {
+            wantedBy = [ "multi-user.target" ];
+            overrideStrategy = "asDropin";
+          };
+          systemd.services."instantiated@two" = {
+            wantedBy = [ "multi-user.target" ];
+            overrideStrategy = "asDropin";
+          };
+        };
+
+        unitWithTemplateModified.configuration = {
+          imports = [ unitWithTemplate.configuration ];
+          systemd.services."instantiated@".serviceConfig.X-Test = "test";
+        };
+
+        restart-and-reload-by-activation-script.configuration = {
+          systemd.services = rec {
+            simple-service = {
+              # No wantedBy so we can check if the activation script restart triggers them
+              serviceConfig = {
+                Type = "oneshot";
+                RemainAfterExit = true;
+                ExecStart = "${pkgs.coreutils}/bin/true";
+                ExecReload = "${pkgs.coreutils}/bin/true";
+              };
+            };
+            "templated-simple-service@" = simple-service;
+            "templated-simple-service@instance".overrideStrategy = "asDropin";
+
+            simple-restart-service = simple-service // {
+              stopIfChanged = false;
+            };
+            "templated-simple-restart-service@" = simple-restart-service;
+            "templated-simple-restart-service@instance".overrideStrategy = "asDropin";
+
+            simple-reload-service = simple-service // {
+              reloadIfChanged = true;
+            };
+            "templated-simple-reload-service@" = simple-reload-service;
+            "templated-simple-reload-service@instance".overrideStrategy = "asDropin";
+
+            no-restart-service = simple-service // {
+              restartIfChanged = false;
+            };
+            "templated-no-restart-service@" = no-restart-service;
+            "templated-no-restart-service@instance".overrideStrategy = "asDropin";
+
+            reload-triggers = simple-service // {
+              wantedBy = [ "multi-user.target" ];
+            };
+            "templated-reload-triggers@" = simple-service;
+            "templated-reload-triggers@instance" = {
+              overrideStrategy = "asDropin";
+              wantedBy = [ "multi-user.target" ];
+            };
+
+            reload-triggers-and-restart-by-as = simple-service;
+            "templated-reload-triggers-and-restart-by-as@" = reload-triggers-and-restart-by-as;
+            "templated-reload-triggers-and-restart-by-as@instance".overrideStrategy = "asDropin";
+
+            reload-triggers-and-restart = simple-service // {
+              stopIfChanged = false; # easier to check for this
+              wantedBy = [ "multi-user.target" ];
+            };
+            "templated-reload-triggers-and-restart@" = simple-service;
+            "templated-reload-triggers-and-restart@instance" = {
+              overrideStrategy = "asDropin";
+              stopIfChanged = false; # easier to check for this
+              wantedBy = [ "multi-user.target" ];
+            };
+          };
+
+          system.activationScripts.restart-and-reload-test = {
+            supportsDryActivation = true;
+            deps = [];
+            text = ''
+              if [ "$NIXOS_ACTION" = dry-activate ]; then
+                f=/run/nixos/dry-activation-restart-list
+                g=/run/nixos/dry-activation-reload-list
+              else
+                f=/run/nixos/activation-restart-list
+                g=/run/nixos/activation-reload-list
+              fi
+              cat <<EOF >> "$f"
+              simple-service.service
+              simple-restart-service.service
+              simple-reload-service.service
+              no-restart-service.service
+              reload-triggers-and-restart-by-as.service
+              templated-simple-service@instance.service
+              templated-simple-restart-service@instance.service
+              templated-simple-reload-service@instance.service
+              templated-no-restart-service@instance.service
+              templated-reload-triggers-and-restart-by-as@instance.service
+              EOF
+
+              cat <<EOF >> "$g"
+              reload-triggers.service
+              reload-triggers-and-restart-by-as.service
+              reload-triggers-and-restart.service
+              templated-reload-triggers@instance.service
+              templated-reload-triggers-and-restart-by-as@instance.service
+              templated-reload-triggers-and-restart@instance.service
+              EOF
+            '';
+          };
+        };
+
+        restart-and-reload-by-activation-script-modified.configuration = {
+          imports = [ restart-and-reload-by-activation-script.configuration ];
+          systemd.services.reload-triggers-and-restart.serviceConfig.X-Modified = "test";
+          systemd.services."templated-reload-triggers-and-restart@instance" = {
+            overrideStrategy = "asDropin";
+            serviceConfig.X-Modified = "test";
+          };
+        };
+
+        simple-socket.configuration = {
+          systemd.services.socket-activated = {
+            description = "A socket-activated service";
+            stopIfChanged = lib.mkDefault false;
+            serviceConfig = {
+              ExecStart = socketTest;
+              ExecReload = "${pkgs.coreutils}/bin/true";
+            };
+          };
+          systemd.sockets.socket-activated = {
+            wantedBy = [ "sockets.target" ];
+            listenStreams = [ "/run/test.sock" ];
+            socketConfig.SocketMode = lib.mkDefault "0777";
+          };
+        };
+
+        simple-socket-service-modified.configuration = {
+          imports = [ simple-socket.configuration ];
+          systemd.services.socket-activated.serviceConfig.X-Test = "test";
+        };
+
+        simple-socket-stop-if-changed.configuration = {
+          imports = [ simple-socket.configuration ];
+          systemd.services.socket-activated.stopIfChanged = true;
+        };
+
+        simple-socket-stop-if-changed-and-reloadtrigger.configuration = {
+          imports = [ simple-socket.configuration ];
+          systemd.services.socket-activated = {
+            stopIfChanged = true;
+            reloadTriggers = [ "test" ];
+          };
+        };
+
+        mount.configuration = {
+          systemd.mounts = [
+            {
+              description = "Testmount";
+              what = "tmpfs";
+              type = "tmpfs";
+              where = "/testmount";
+              options = "size=1M";
+              wantedBy = [ "local-fs.target" ];
+            }
+          ];
+        };
+
+        mountOptionsModified.configuration = {
+          systemd.mounts = [
+            {
+              description = "Testmount";
+              what = "tmpfs";
+              type = "tmpfs";
+              where = "/testmount";
+              options = "size=10M";
+              wantedBy = [ "local-fs.target" ];
+            }
+          ];
+        };
+
+        mountModified.configuration = {
+          systemd.mounts = [
+            {
+              description = "Testmount";
+              what = "ramfs";
+              type = "ramfs";
+              where = "/testmount";
+              options = "size=10M";
+              wantedBy = [ "local-fs.target" ];
+            }
+          ];
+        };
+
+        timer.configuration = {
+          systemd.timers.test-timer = {
+            wantedBy = [ "timers.target" ];
+            timerConfig.OnCalendar = "@1395716396"; # chosen by fair dice roll
+          };
+          systemd.services.test-timer = {
+            serviceConfig = {
+              Type = "oneshot";
+              ExecStart = "${pkgs.coreutils}/bin/true";
+            };
+          };
+        };
+
+        timerModified.configuration = {
+          imports = [ timer.configuration ];
+          systemd.timers.test-timer.timerConfig.OnCalendar = lib.mkForce "Fri 2012-11-23 16:00:00";
+        };
+
+        hybridSleepModified.configuration = {
+          systemd.targets.hybrid-sleep.unitConfig.X-Test = true;
+        };
+
+        target.configuration = {
+          systemd.targets.test-target.wantedBy = [ "multi-user.target" ];
+          # We use this service to figure out whether the target was modified.
+          # This is the only way because targets are filtered and therefore not
+          # printed when they are started/stopped.
+          systemd.services.test-service = {
+            bindsTo = [ "test-target.target" ];
+            serviceConfig.ExecStart = "${pkgs.coreutils}/bin/sleep infinity";
+          };
+        };
+
+        targetModified.configuration = {
+          imports = [ target.configuration ];
+          systemd.targets.test-target.unitConfig.X-Test = true;
+        };
+
+        targetModifiedStopOnReconfig.configuration = {
+          imports = [ target.configuration ];
+          systemd.targets.test-target.unitConfig.X-StopOnReconfiguration = true;
+        };
+
+        path.configuration = {
+          systemd.paths.test-watch = {
+            wantedBy = [ "paths.target" ];
+            pathConfig.PathExists = "/testpath";
+          };
+          systemd.services.test-watch = {
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/touch /testpath-modified";
+            };
+          };
+        };
+
+        pathModified.configuration = {
+          imports = [ path.configuration ];
+          systemd.paths.test-watch.pathConfig.PathExists = lib.mkForce "/testpath2";
+        };
+
+        slice.configuration = {
+          systemd.slices.testslice.sliceConfig.MemoryMax = "1"; # don't allow memory allocation
+          systemd.services.testservice = {
+            serviceConfig = {
+              Type = "oneshot";
+              RemainAfterExit = true;
+              ExecStart = "${pkgs.coreutils}/bin/true";
+              Slice = "testslice.slice";
+            };
+          };
+        };
+
+        sliceModified.configuration = {
+          imports = [ slice.configuration ];
+          systemd.slices.testslice.sliceConfig.MemoryMax = lib.mkForce null;
+        };
+      };
+    };
+
+    other = {
+      users.mutableUsers = true;
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    originalSystem = nodes.machine.system.build.toplevel;
+    otherSystem = nodes.other.system.build.toplevel;
+    machine = nodes.machine.system.build.toplevel;
+
+    # Ensures failures pass through using pipefail, otherwise failing to
+    # switch-to-configuration is hidden by the success of `tee`.
+    stderrRunner = pkgs.writeScript "stderr-runner" ''
+      #! ${pkgs.runtimeShell}
+      set -e
+      set -o pipefail
+      exec env -i "$@" | tee /dev/stderr
+    '';
+
+    # Returns a comma separated representation of the given list in sorted
+    # order, that matches the output format of switch-to-configuration.pl
+    sortedUnits = xs: lib.concatStringsSep ", " (builtins.sort builtins.lessThan xs);
+  in /* python */ ''
+    def switch_to_specialisation(system, name, action="test", fail=False):
+        if name == "":
+            switcher = f"{system}/bin/switch-to-configuration"
+        else:
+            switcher = f"{system}/specialisation/{name}/bin/switch-to-configuration"
+        return run_switch(switcher, action, fail)
+
+    # like above but stc = switcher
+    def run_switch(switcher, action="test", fail=False):
+        out = machine.fail(f"{switcher} {action} 2>&1") if fail \
+            else machine.succeed(f"{switcher} {action} 2>&1")
+        assert_lacks(out, "switch-to-configuration line")  # Perl warnings
+        return out
+
+    def assert_contains(haystack, needle):
+        if needle not in haystack:
+            print("The haystack that will cause the following exception is:")
+            print("---")
+            print(haystack)
+            print("---")
+            raise Exception(f"Expected string '{needle}' was not found")
+
+    def assert_lacks(haystack, needle):
+        if needle in haystack:
+            print("The haystack that will cause the following exception is:")
+            print("---")
+            print(haystack, end="")
+            print("---")
+            raise Exception(f"Unexpected string '{needle}' was found")
+
+
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed(
+        "${stderrRunner} ${originalSystem}/bin/switch-to-configuration test"
+    )
+    # This tests whether the /etc/os-release parser works which is a fallback
+    # when /etc/NIXOS is missing. If the parser does not work, switch-to-configuration
+    # would fail.
+    machine.succeed("rm /etc/NIXOS")
+    machine.succeed(
+        "${stderrRunner} ${otherSystem}/bin/switch-to-configuration test"
+    )
+
+
+    with subtest("actions"):
+        # boot action
+        machine.fail("test -f /tmp/bootloader-installed")
+        out = switch_to_specialisation("${machine}", "simpleService", action="boot")
+        assert_contains(out, "installing dummy bootloader")
+        assert_lacks(out, "activating the configuration...")  # good indicator of a system activation
+        machine.succeed("test -f /tmp/bootloader-installed")
+        machine.succeed("rm /tmp/bootloader-installed")
+
+        # switch action
+        machine.fail("test -f /tmp/bootloader-installed")
+        out = switch_to_specialisation("${machine}", "", action="switch")
+        assert_contains(out, "installing dummy bootloader")
+        assert_contains(out, "activating the configuration...")  # good indicator of a system activation
+        machine.succeed("test -f /tmp/bootloader-installed")
+
+        # test and dry-activate actions are tested further down below
+
+        # invalid action fails the script
+        switch_to_specialisation("${machine}", "", action="broken-action", fail=True)
+        # no action fails the script
+        assert "Usage:" in machine.fail("${machine}/bin/switch-to-configuration 2>&1")
+
+    with subtest("init interface version"):
+        # Do not try to switch to an invalid init interface version
+        assert "incompatible" in switch_to_specialisation("${machine}", "brokenInitInterface", fail=True)
+
+    with subtest("systemd restarts"):
+        # systemd is restarted when its system.conf changes
+        out = switch_to_specialisation("${machine}", "modifiedSystemConf")
+        assert_contains(out, "restarting systemd...")
+
+    with subtest("continuing from an aborted switch"):
+        # An aborted switch will write into a file what it tried to start
+        # and a second switch should continue from this
+        machine.succeed("echo dbus.service > /run/nixos/start-list")
+        out = switch_to_specialisation("${machine}", "modifiedSystemConf")
+        assert_contains(out, "starting the following units: dbus.service\n")
+
+    with subtest("fstab mounts"):
+        switch_to_specialisation("${machine}", "")
+        # add a mountpoint
+        out = switch_to_specialisation("${machine}", "addedMount")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test.mount\n")
+        # modify the mountpoint's options
+        out = switch_to_specialisation("${machine}", "addedMountOptsModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: test.mount\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # modify the device
+        out = switch_to_specialisation("${machine}", "addedMountDevModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # modify both
+        out = switch_to_specialisation("${machine}", "addedMount")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # remove the mount
+        out = switch_to_specialisation("${machine}", "")
+        assert_contains(out, "stopping the following units: test.mount\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # change something about the / mount
+        out = switch_to_specialisation("${machine}", "storeMountModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_contains(out, "NOT restarting the following changed units: -.mount")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+    with subtest("swaps"):
+        switch_to_specialisation("${machine}", "")
+        # add a swap
+        out = switch_to_specialisation("${machine}", "swap")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: swapfile.swap")
+        # remove it
+        out = switch_to_specialisation("${machine}", "")
+        assert_contains(out, "stopping swap device: /swapfile")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+    with subtest("services"):
+        switch_to_specialisation("${machine}", "")
+        # Nothing happens when nothing is changed
+        out = switch_to_specialisation("${machine}", "")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Start a simple service
+        out = switch_to_specialisation("${machine}", "simpleService")
+        assert_lacks(out, "installing dummy bootloader")  # test does not install a bootloader
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")  # huh
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test.service\n")
+
+        # Not changing anything doesn't do anything
+        out = switch_to_specialisation("${machine}", "simpleService")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Only changing the description does nothing
+        out = switch_to_specialisation("${machine}", "simpleServiceDifferentDescription")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Restart the simple service
+        out = switch_to_specialisation("${machine}", "simpleServiceModified")
+        assert_contains(out, "stopping the following units: test.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: test.service\n")
+        assert_lacks(out, "the following new units were started:")
+
+        # Restart the service with stopIfChanged=false
+        out = switch_to_specialisation("${machine}", "simpleServiceNostop")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Reload the service with reloadIfChanged=true
+        out = switch_to_specialisation("${machine}", "simpleServiceReload")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: test.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Nothing happens when restartIfChanged=false
+        out = switch_to_specialisation("${machine}", "simpleServiceNorestart")
+        assert_lacks(out, "stopping the following units:")
+        assert_contains(out, "NOT restarting the following changed units: test.service\n")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Dry mode shows different messages
+        out = switch_to_specialisation("${machine}", "simpleService", action="dry-activate")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        assert_contains(out, "would start the following units: test.service\n")
+
+        out = switch_to_specialisation("${machine}", "", action="test")
+
+        # Ensure the service can be started when the activation script isn't in toplevel
+        # This is a lot like "Start a simple service", except activation-only deps could be gc-ed
+        out = run_switch("${nodes.machine.specialisation.simpleServiceSeparateActivationScript.configuration.system.build.separateActivationScript}/bin/switch-to-configuration");
+        assert_lacks(out, "installing dummy bootloader")  # test does not install a bootloader
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: dbus.service\n")  # huh
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test.service\n")
+        machine.succeed("! test -e /run/current-system/activate")
+        machine.succeed("! test -e /run/current-system/dry-activate")
+        machine.succeed("! test -e /run/current-system/bin/switch-to-configuration")
+
+        # Ensure \ works in unit names
+        out = switch_to_specialisation("${machine}", "unitWithBackslash")
+        assert_contains(out, "stopping the following units: test.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: escaped\\x2ddash.service\n")
+
+        out = switch_to_specialisation("${machine}", "unitWithBackslashModified")
+        assert_contains(out, "stopping the following units: escaped\\x2ddash.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: escaped\\x2ddash.service\n")
+        assert_lacks(out, "the following new units were started:")
+
+        # Ensure units can start with a dash
+        out = switch_to_specialisation("${machine}", "unitStartingWithDash")
+        assert_contains(out, "stopping the following units: escaped\\x2ddash.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: -.service\n")
+
+        # The regression only occurs when reloading units
+        out = switch_to_specialisation("${machine}", "unitStartingWithDashModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: -.service")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Ensure units that require changed units are properly reloaded
+        out = switch_to_specialisation("${machine}", "unitWithRequirement")
+        assert_contains(out, "stopping the following units: -.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: required-service.service, test-service.service\n")
+
+        out = switch_to_specialisation("${machine}", "unitWithRequirementModified")
+        assert_contains(out, "stopping the following units: required-service.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: required-service.service, test-service.service\n")
+        assert_lacks(out, "the following new units were started:")
+
+        # Unless the unit asks to be not restarted
+        out = switch_to_specialisation("${machine}", "unitWithRequirementModifiedNostart")
+        assert_contains(out, "stopping the following units: required-service.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: required-service.service\n")
+        assert_lacks(out, "the following new units were started:")
+
+        # Ensure templated units are restarted when the base unit changes
+        switch_to_specialisation("${machine}", "unitWithTemplate")
+        out = switch_to_specialisation("${machine}", "unitWithTemplateModified")
+        assert_contains(out, "stopping the following units: instantiated@one.service, instantiated@two.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: instantiated@one.service, instantiated@two.service\n")
+        assert_lacks(out, "the following new units were started:")
+
+    with subtest("failing units"):
+        # Let the simple service fail
+        switch_to_specialisation("${machine}", "simpleServiceModified")
+        out = switch_to_specialisation("${machine}", "simpleServiceFailing", fail=True)
+        assert_contains(out, "stopping the following units: test.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: test.service\n")
+        assert_lacks(out, "the following new units were started:")
+        assert_contains(out, "warning: the following units failed: test.service\n")
+        assert_contains(out, "Main PID:")  # output of systemctl
+
+        # A unit that gets into autorestart without failing is not treated as failed
+        out = switch_to_specialisation("${machine}", "autorestartService")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: autorestart.service\n")
+        machine.systemctl('stop autorestart.service')  # cancel the 20y timer
+
+        # Switching to the same system should do nothing (especially not treat the unit as failed)
+        out = switch_to_specialisation("${machine}", "autorestartService")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: autorestart.service\n")
+        machine.systemctl('stop autorestart.service')  # cancel the 20y timer
+
+        # If systemd thinks the unit has failed and is in autorestart, we should show it as failed
+        out = switch_to_specialisation("${machine}", "autorestartServiceFailing", fail=True)
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        assert_contains(out, "warning: the following units failed: autorestart.service\n")
+        assert_contains(out, "Main PID:")  # output of systemctl
+
+    with subtest("unit file parser"):
+        # Switch to a well-known state
+        switch_to_specialisation("${machine}", "simpleServiceNostop")
+
+        # Add a section
+        out = switch_to_specialisation("${machine}", "simpleServiceWithExtraSection")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Rename it
+        out = switch_to_specialisation("${machine}", "simpleServiceWithExtraSectionOtherName")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Remove it
+        out = switch_to_specialisation("${machine}", "simpleServiceNostop")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # [Install] section is ignored
+        out = switch_to_specialisation("${machine}", "simpleServiceWithInstallSection")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Add a key
+        out = switch_to_specialisation("${machine}", "simpleServiceWithExtraKey")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Change its value
+        out = switch_to_specialisation("${machine}", "simpleServiceWithExtraKeyOtherValue")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Rename it
+        out = switch_to_specialisation("${machine}", "simpleServiceWithExtraKeyOtherName")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Remove it
+        out = switch_to_specialisation("${machine}", "simpleServiceNostop")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Add a reload trigger
+        out = switch_to_specialisation("${machine}", "simpleServiceReloadTrigger")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: test.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Modify the reload trigger
+        out = switch_to_specialisation("${machine}", "simpleServiceReloadTriggerModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: test.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Modify the reload trigger and something else
+        out = switch_to_specialisation("${machine}", "simpleServiceReloadTriggerModifiedAndSomethingElse")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Remove the reload trigger
+        out = switch_to_specialisation("${machine}", "simpleServiceReloadTriggerModifiedSomethingElse")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+    with subtest("restart and reload by activation script"):
+        switch_to_specialisation("${machine}", "simpleServiceNorestart")
+        out = switch_to_specialisation("${machine}", "restart-and-reload-by-activation-script")
+        assert_contains(out, "stopping the following units: test.service\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "restarting the following units:")
+        assert_contains(out, "\nstarting the following units: ${sortedUnits [
+          "no-restart-service.service"
+          "reload-triggers-and-restart-by-as.service"
+          "simple-reload-service.service"
+          "simple-restart-service.service"
+          "simple-service.service"
+          "templated-no-restart-service@instance.service"
+          "templated-reload-triggers-and-restart-by-as@instance.service"
+          "templated-simple-reload-service@instance.service"
+          "templated-simple-restart-service@instance.service"
+          "templated-simple-service@instance.service"
+        ]}\n")
+        assert_contains(out, "the following new units were started: ${sortedUnits [
+          "no-restart-service.service"
+          "reload-triggers-and-restart-by-as.service"
+          "reload-triggers-and-restart.service"
+          "reload-triggers.service"
+          "simple-reload-service.service"
+          "simple-restart-service.service"
+          "simple-service.service"
+          "system-templated\\\\x2dno\\\\x2drestart\\\\x2dservice.slice"
+          "system-templated\\\\x2dreload\\\\x2dtriggers.slice"
+          "system-templated\\\\x2dreload\\\\x2dtriggers\\\\x2dand\\\\x2drestart.slice"
+          "system-templated\\\\x2dreload\\\\x2dtriggers\\\\x2dand\\\\x2drestart\\\\x2dby\\\\x2das.slice"
+          "system-templated\\\\x2dsimple\\\\x2dreload\\\\x2dservice.slice"
+          "system-templated\\\\x2dsimple\\\\x2drestart\\\\x2dservice.slice"
+          "system-templated\\\\x2dsimple\\\\x2dservice.slice"
+          "templated-no-restart-service@instance.service"
+          "templated-reload-triggers-and-restart-by-as@instance.service"
+          "templated-reload-triggers-and-restart@instance.service"
+          "templated-reload-triggers@instance.service"
+          "templated-simple-reload-service@instance.service"
+          "templated-simple-restart-service@instance.service"
+          "templated-simple-service@instance.service"
+        ]}\n")
+        # Switch to the same system where the example services get restarted
+        # and reloaded by the activation script
+        out = switch_to_specialisation("${machine}", "restart-and-reload-by-activation-script")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: ${sortedUnits [
+          "reload-triggers-and-restart.service"
+          "reload-triggers.service"
+          "simple-reload-service.service"
+          "templated-reload-triggers-and-restart@instance.service"
+          "templated-reload-triggers@instance.service"
+          "templated-simple-reload-service@instance.service"
+        ]}\n")
+        assert_contains(out, "restarting the following units: ${sortedUnits [
+          "reload-triggers-and-restart-by-as.service"
+          "simple-restart-service.service"
+          "simple-service.service"
+          "templated-reload-triggers-and-restart-by-as@instance.service"
+          "templated-simple-restart-service@instance.service"
+          "templated-simple-service@instance.service"
+        ]}\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # Switch to the same system and see if the service gets restarted when it's modified
+        # while the fact that it's supposed to be reloaded by the activation script is ignored.
+        out = switch_to_specialisation("${machine}", "restart-and-reload-by-activation-script-modified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: ${sortedUnits [
+          "reload-triggers.service"
+          "simple-reload-service.service"
+          "templated-reload-triggers@instance.service"
+          "templated-simple-reload-service@instance.service"
+        ]}\n")
+        assert_contains(out, "restarting the following units: ${sortedUnits [
+          "reload-triggers-and-restart-by-as.service"
+          "reload-triggers-and-restart.service"
+          "simple-restart-service.service"
+          "simple-service.service"
+          "templated-reload-triggers-and-restart-by-as@instance.service"
+          "templated-reload-triggers-and-restart@instance.service"
+          "templated-simple-restart-service@instance.service"
+          "templated-simple-service@instance.service"
+        ]}\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # The same, but in dry mode
+        out = switch_to_specialisation("${machine}", "restart-and-reload-by-activation-script", action="dry-activate")
+        assert_lacks(out, "would stop the following units:")
+        assert_lacks(out, "would NOT stop the following changed units:")
+        assert_contains(out, "would reload the following units: ${sortedUnits [
+          "reload-triggers.service"
+          "simple-reload-service.service"
+          "templated-reload-triggers@instance.service"
+          "templated-simple-reload-service@instance.service"
+        ]}\n")
+        assert_contains(out, "would restart the following units: ${sortedUnits [
+          "reload-triggers-and-restart-by-as.service"
+          "reload-triggers-and-restart.service"
+          "simple-restart-service.service"
+          "simple-service.service"
+          "templated-reload-triggers-and-restart-by-as@instance.service"
+          "templated-reload-triggers-and-restart@instance.service"
+          "templated-simple-restart-service@instance.service"
+          "templated-simple-service@instance.service"
+        ]}\n")
+        assert_lacks(out, "\nwould start the following units:")
+
+    with subtest("socket-activated services"):
+        # Socket-activated services don't get started, just the socket
+        machine.fail("[ -S /run/test.sock ]")
+        out = switch_to_specialisation("${machine}", "simple-socket")
+        # assert_lacks(out, "stopping the following units:") not relevant
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: socket-activated.socket\n")
+        machine.succeed("[ -S /run/test.sock ]")
+
+        # Changing a non-activated service does nothing
+        out = switch_to_specialisation("${machine}", "simple-socket-service-modified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("[ -S /run/test.sock ]")
+        # The unit is properly activated when the socket is accessed
+        if machine.succeed("socat - UNIX-CONNECT:/run/test.sock") != "hello":
+            raise Exception("Socket was not properly activated")  # idk how that would happen tbh
+
+        # Changing an activated service with stopIfChanged=false restarts the service
+        out = switch_to_specialisation("${machine}", "simple-socket")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: socket-activated.service\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("[ -S /run/test.sock ]")
+        # Socket-activation of the unit still works
+        if machine.succeed("socat - UNIX-CONNECT:/run/test.sock") != "hello":
+            raise Exception("Socket was not properly activated after the service was restarted")
+
+        # Changing an activated service with stopIfChanged=true stops the service and
+        # socket and starts the socket
+        out = switch_to_specialisation("${machine}", "simple-socket-stop-if-changed")
+        assert_contains(out, "stopping the following units: socket-activated.service, socket-activated.socket\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_contains(out, "\nstarting the following units: socket-activated.socket\n")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("[ -S /run/test.sock ]")
+        # Socket-activation of the unit still works
+        if machine.succeed("socat - UNIX-CONNECT:/run/test.sock") != "hello":
+            raise Exception("Socket was not properly activated after the service was restarted")
+
+        # Changing a reload trigger of a socket-activated unit only reloads it
+        out = switch_to_specialisation("${machine}", "simple-socket-stop-if-changed-and-reloadtrigger")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: socket-activated.service\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units: socket-activated.socket")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("[ -S /run/test.sock ]")
+        # Socket-activation of the unit still works
+        if machine.succeed("socat - UNIX-CONNECT:/run/test.sock") != "hello":
+            raise Exception("Socket was not properly activated after the service was restarted")
+
+    with subtest("mounts"):
+        switch_to_specialisation("${machine}", "mount")
+        out = machine.succeed("mount | grep 'on /testmount'")
+        assert_contains(out, "size=1024k")
+        # Changing options reloads the unit
+        out = switch_to_specialisation("${machine}", "mountOptionsModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_contains(out, "reloading the following units: testmount.mount\n")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # It changed
+        out = machine.succeed("mount | grep 'on /testmount'")
+        assert_contains(out, "size=10240k")
+        # Changing anything but `Options=` restarts the unit
+        out = switch_to_specialisation("${machine}", "mountModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: testmount.mount\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # It changed
+        out = machine.succeed("mount | grep 'on /testmount'")
+        assert_contains(out, "ramfs")
+
+    with subtest("timers"):
+        switch_to_specialisation("${machine}", "timer")
+        out = machine.succeed("systemctl show test-timer.timer")
+        assert_contains(out, "OnCalendar=2014-03-25 02:59:56 UTC")
+        out = switch_to_specialisation("${machine}", "timerModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_contains(out, "\nrestarting the following units: test-timer.timer\n")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        # It changed
+        out = machine.succeed("systemctl show test-timer.timer")
+        assert_contains(out, "OnCalendar=Fri 2012-11-23 16:00:00")
+
+    with subtest("targets"):
+        # Modifying some special targets like hybrid-sleep.target does nothing
+        out = switch_to_specialisation("${machine}", "hybridSleepModified")
+        assert_contains(out, "stopping the following units: test-timer.timer\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+
+        # Adding a new target starts it
+        out = switch_to_specialisation("${machine}", "target")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test-target.target\n")
+
+        # Changing a target doesn't print anything because the unit is filtered
+        machine.systemctl("start test-service.service")
+        out = switch_to_specialisation("${machine}", "targetModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("systemctl is-active test-service.service")  # target was not restarted
+
+        # With X-StopOnReconfiguration, the target gets stopped and started
+        out = switch_to_specialisation("${machine}", "targetModifiedStopOnReconfig")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.fail("systemctl is-active test-service.servce")  # target was restarted
+
+        # Remove the target by switching to the old specialisation
+        out = switch_to_specialisation("${machine}", "timerModified")
+        assert_contains(out, "stopping the following units: test-target.target\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test-timer.timer\n")
+
+    with subtest("paths"):
+        out = switch_to_specialisation("${machine}", "path")
+        assert_contains(out, "stopping the following units: test-timer.timer\n")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_contains(out, "the following new units were started: test-watch.path\n")
+        machine.fail("test -f /testpath-modified")
+
+        # touch the file, unit should be triggered
+        machine.succeed("touch /testpath")
+        machine.wait_until_succeeds("test -f /testpath-modified")
+        machine.succeed("rm /testpath /testpath-modified")
+        machine.systemctl("stop test-watch.service")
+        switch_to_specialisation("${machine}", "pathModified")
+        machine.succeed("touch /testpath")
+        machine.fail("test -f /testpath-modified")
+        machine.succeed("touch /testpath2")
+        machine.wait_until_succeeds("test -f /testpath-modified")
+
+    # This test ensures that changes to slice configuration get applied.
+    # We test this by having a slice that allows no memory allocation at
+    # all and starting a service within it. If the service crashes, the slice
+    # is applied and if we modify the slice to allow memory allocation, the
+    # service should successfully start.
+    with subtest("slices"):
+        machine.succeed("echo 0 > /proc/sys/vm/panic_on_oom")  # allow OOMing
+        out = switch_to_specialisation("${machine}", "slice")
+        # assert_lacks(out, "stopping the following units:") not relevant
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.fail("systemctl start testservice.service")
+
+        out = switch_to_specialisation("${machine}", "sliceModified")
+        assert_lacks(out, "stopping the following units:")
+        assert_lacks(out, "NOT restarting the following changed units:")
+        assert_lacks(out, "reloading the following units:")
+        assert_lacks(out, "\nrestarting the following units:")
+        assert_lacks(out, "\nstarting the following units:")
+        assert_lacks(out, "the following new units were started:")
+        machine.succeed("systemctl start testservice.service")
+        machine.succeed("echo 1 > /proc/sys/vm/panic_on_oom")  # disallow OOMing
+  '';
+})
diff --git a/nixpkgs/nixos/tests/sympa.nix b/nixpkgs/nixos/tests/sympa.nix
new file mode 100644
index 000000000000..80daa4134f75
--- /dev/null
+++ b/nixpkgs/nixos/tests/sympa.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "sympa";
+  meta.maintainers = with lib.maintainers; [ mmilata ];
+
+  nodes.machine =
+    { ... }:
+    {
+
+      services.sympa = {
+        enable = true;
+        domains = {
+          "lists.example.org" = {
+            webHost = "localhost";
+          };
+        };
+        listMasters = [ "bob@example.org" ];
+        web.enable = true;
+        web.https = false;
+        database = {
+          type = "PostgreSQL";
+          createLocally = true;
+        };
+      };
+    };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("sympa.service")
+    machine.wait_for_unit("wwsympa.service")
+    assert "Mailing lists service" in machine.succeed(
+        "curl --fail --insecure -L http://localhost/"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing-init.nix b/nixpkgs/nixos/tests/syncthing-init.nix
new file mode 100644
index 000000000000..97fcf2ad28d1
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-init.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: let
+
+  testId = "7CFNTQM-IMTJBHJ-3UWRDIU-ZGQJFR6-VCXZ3NB-XUH3KZO-N52ITXR-LAIYUAU";
+
+in {
+  name = "syncthing-init";
+  meta.maintainers = with pkgs.lib.maintainers; [ lassulus ];
+
+  nodes.machine = {
+    services.syncthing = {
+      enable = true;
+      settings.devices.testDevice = {
+        id = testId;
+      };
+      settings.folders.testFolder = {
+        path = "/tmp/test";
+        devices = [ "testDevice" ];
+      };
+      settings.gui.user = "guiUser";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("syncthing-init.service")
+    config = machine.succeed("cat /var/lib/syncthing/.config/syncthing/config.xml")
+
+    assert "testFolder" in config
+    assert "${testId}" in config
+    assert "guiUser" in config
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing-many-devices.nix b/nixpkgs/nixos/tests/syncthing-many-devices.nix
new file mode 100644
index 000000000000..2251bf077453
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-many-devices.nix
@@ -0,0 +1,203 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+# This nixosTest is supposed to check the following:
+#
+# - Whether syncthing's API handles multiple requests for many devices, see
+#   https://github.com/NixOS/nixpkgs/issues/260262
+#
+# - Whether syncthing-init.service generated bash script removes devices and
+#   folders that are not present in the user's configuration, which is partly
+#   injected into the script. See also:
+#   https://github.com/NixOS/nixpkgs/issues/259256
+#
+
+let
+  # Just a long path not to copy paste
+  configPath = "/var/lib/syncthing/.config/syncthing/config.xml";
+
+  # We will iterate this and more attribute sets defined here, later in the
+  # testScript. Start with this, and distinguish these settings from other
+  # settings, as we check these differently with xmllint, due to the ID.
+  settingsWithId = {
+    devices = {
+      # All of the device IDs used here were generated by the following command:
+      #
+      #    (${pkgs.syncthing}/bin/syncthing generate --home /tmp/foo\
+      #       | grep ID: | sed 's/.*ID: *//') && rm -rf /tmp/foo
+      #
+      # See also discussion at:
+      # https://forum.syncthing.net/t/how-to-generate-dummy-device-ids/20927/8
+      test_device1.id  = "IVTZ5XF-EF3GKFT-GS4AZLG-IT6H2ZP-6WK75SF-AFXQXJJ-BNRZ4N6-XPDKVAU";
+      test_device2.id  = "5C35H56-Z2GFF4F-F3IVD4B-GJYVWIE-SMDBJZN-GI66KWP-52JIQGN-4AVLYAM";
+      test_device3.id  = "XKLSKHE-BZOHV7B-WQZACEF-GTH36NP-6JSBB6L-RXS3M7C-EEVWO2L-C5B4OAJ";
+      test_device4.id  = "APN5Q7J-35GZETO-5KCLF35-ZA7KBWK-HGWPBNG-FERF24R-UTLGMEX-4VJ6PQX";
+      test_device5.id  = "D4YXQEE-5MK6LIK-BRU5QWM-ZRXJCK2-N3RQBJE-23JKTQQ-LYGDPHF-RFPZIQX";
+      test_device6.id  = "TKMCH64-T44VSLI-6FN2YLF-URBZOBR-ATO4DYX-GEDRIII-CSMRQAI-UAQMDQG";
+      test_device7.id  = "472EEBG-Q4PZCD4-4CX6PGF-XS3FSQ2-UFXBZVB-PGNXWLX-7FKBLER-NJ3EMAR";
+      test_device8.id  = "HW6KUMK-WTBG24L-2HZQXLO-TGJSG2M-2JG3FHX-5OGYRUJ-T6L5NN7-L364QAZ";
+      test_device9.id  = "YAE24AP-7LSVY4T-J74ZSEM-A2IK6RB-FGA35TP-AG4CSLU-ED4UYYY-2J2TDQU";
+      test_device10.id = "277XFSB-OFMQOBI-3XGNGUE-Y7FWRV3-QQDADIY-QIIPQ26-EOGTYKW-JP2EXAI";
+      test_device11.id = "2WWXVTN-Q3QWAAY-XFORMRM-2FDI5XZ-OGN33BD-XOLL42R-DHLT2ML-QYXDQAU";
+    };
+    # Generates a few folders with IDs and paths as written...
+    folders = lib.pipe 6 [
+      (builtins.genList (x: {
+        name = "/var/lib/syncthing/test_folder${builtins.toString x}";
+        value = {
+          id = "DontDeleteMe${builtins.toString x}";
+        };
+      }))
+      builtins.listToAttrs
+    ];
+  };
+  # Non default options that we check later if were applied
+  settingsWithoutId = {
+    options = {
+      autoUpgradeIntervalH = 0;
+      urAccepted = -1;
+    };
+    gui = {
+      theme = "dark";
+    };
+  };
+  # Used later when checking whether settings were set in config.xml:
+  checkSettingWithId = { t # t for type
+  , id
+  , not ? false
+  }: ''
+    print("Searching for a ${t} with id ${id}")
+    configVal_${t} = machine.succeed(
+        "${pkgs.libxml2}/bin/xmllint "
+        "--xpath 'string(//${t}[@id=\"${id}\"]/@id)' ${configPath}"
+    )
+    print("${t}.id = {}".format(configVal_${t}))
+    assert "${id}" ${if not then "not" else ""} in configVal_${t}
+  '';
+  # Same as checkSettingWithId, but for 'options' and 'gui'
+  checkSettingWithoutId = { t # t for type
+  , n # n for name
+  , v # v for value
+  , not ? false
+  }: ''
+    print("checking whether setting ${t}.${n} is set to ${v}")
+    configVal_${t}_${n} = machine.succeed(
+        "${pkgs.libxml2}/bin/xmllint "
+        "--xpath 'string(/configuration/${t}/${n})' ${configPath}"
+    )
+    print("${t}.${n} = {}".format(configVal_${t}_${n}))
+    assert "${v}" ${if not then "not" else ""} in configVal_${t}_${n}
+  '';
+  # Removes duplication a bit to define this function for the IDs to delete -
+  # we check whether they were added after our script ran, and before the
+  # systemd unit's bash script ran, and afterwards - whether the systemd unit
+  # worked.
+  checkSettingsToDelete = {
+    not
+  }: lib.pipe IDsToDelete [
+    (lib.mapAttrsToList (t: id:
+      checkSettingWithId {
+        inherit t id;
+        inherit not;
+      }
+    ))
+    lib.concatStrings
+  ];
+  # These IDs are added to syncthing using the API, similarly to how the
+  # generated systemd unit's bash script does it. Only we add it and expect the
+  # systemd unit bash script to remove them when executed.
+  IDsToDelete = {
+    # Also created using the syncthing generate command above
+    device = "LZ2CTHT-3W2M7BC-CMKDFZL-DLUQJFS-WJR73PA-NZGODWG-DZBHCHI-OXTQXAK";
+    # Intentionally this is a substring of the IDs of the 'test_folder's, as
+    # explained in: https://github.com/NixOS/nixpkgs/issues/259256
+    folder = "DeleteMe";
+  };
+  addDeviceToDeleteScript = pkgs.writers.writeBash "syncthing-add-device-to-delete.sh" ''
+    set -euo pipefail
+
+    export RUNTIME_DIRECTORY=/tmp
+
+    curl() {
+        # get the api key by parsing the config.xml
+        while
+            ! ${pkgs.libxml2}/bin/xmllint \
+                --xpath 'string(configuration/gui/apikey)' \
+                ${configPath} \
+                >"$RUNTIME_DIRECTORY/api_key"
+        do sleep 1; done
+
+        (printf "X-API-Key: "; cat "$RUNTIME_DIRECTORY/api_key") >"$RUNTIME_DIRECTORY/headers"
+
+        ${pkgs.curl}/bin/curl -sSLk -H "@$RUNTIME_DIRECTORY/headers" \
+            --retry 1000 --retry-delay 1 --retry-all-errors \
+            "$@"
+    }
+    curl -d ${lib.escapeShellArg (builtins.toJSON { deviceID = IDsToDelete.device;})} \
+        -X POST 127.0.0.1:8384/rest/config/devices
+    curl -d ${lib.escapeShellArg (builtins.toJSON { id = IDsToDelete.folder;})} \
+        -X POST 127.0.0.1:8384/rest/config/folders
+  '';
+in {
+  name = "syncthing-init";
+  meta.maintainers = with lib.maintainers; [ doronbehar ];
+
+  nodes.machine = {
+    services.syncthing = {
+      enable = true;
+      overrideDevices = true;
+      overrideFolders = true;
+      settings = settingsWithoutId // settingsWithId;
+    };
+  };
+  testScript = ''
+    machine.wait_for_unit("syncthing-init.service")
+  '' + (lib.pipe settingsWithId [
+    # Check that folders and devices were added properly and that all IDs exist
+    (lib.mapAttrsRecursive (path: id:
+      checkSettingWithId {
+        # plural -> solitary
+        t = (lib.removeSuffix "s" (builtins.elemAt path 0));
+        inherit id;
+      }
+    ))
+    # Get all the values we applied the above function upon
+    (lib.collect builtins.isString)
+    lib.concatStrings
+  ]) + (lib.pipe settingsWithoutId [
+    # Check that all other syncthing.settings were added properly with correct
+    # values
+    (lib.mapAttrsRecursive (path: value:
+      checkSettingWithoutId {
+        t = (builtins.elemAt path 0);
+        n = (builtins.elemAt path 1);
+        v = (builtins.toString value);
+      }
+    ))
+    # Get all the values we applied the above function upon
+    (lib.collect builtins.isString)
+    lib.concatStrings
+  ]) + ''
+    # Run the script on the machine
+    machine.succeed("${addDeviceToDeleteScript}")
+  '' + (checkSettingsToDelete {
+    not = false;
+  }) + ''
+    # Useful for debugging later
+    machine.copy_from_vm("${configPath}", "before")
+
+    machine.systemctl("restart syncthing-init.service")
+    machine.wait_for_unit("syncthing-init.service")
+  '' + (checkSettingsToDelete {
+    not = true;
+  }) + ''
+    # Useful for debugging later
+    machine.copy_from_vm("${configPath}", "after")
+
+    # Copy the systemd unit's bash script, to inspect it for debugging.
+    mergeScript = machine.succeed(
+        "systemctl cat syncthing-init.service | "
+        "${pkgs.initool}/bin/initool g - Service ExecStart --value-only"
+    ).strip() # strip from new lines
+    machine.copy_from_vm(mergeScript, "")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing-no-settings.nix b/nixpkgs/nixos/tests/syncthing-no-settings.nix
new file mode 100644
index 000000000000..fee122b5e35c
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-no-settings.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "syncthing";
+  meta.maintainers = with pkgs.lib.maintainers; [ chkno ];
+
+  nodes = {
+    a = {
+      environment.systemPackages = with pkgs; [ curl libxml2 syncthing ];
+      services.syncthing = {
+        enable = true;
+      };
+    };
+  };
+  # Test that indeed a syncthing-init.service systemd service is not created.
+  #
+  testScript = /* python */ ''
+    a.succeed("systemctl list-unit-files | awk '$1 == \"syncthing-init.service\" {exit 1;}'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing-relay.nix b/nixpkgs/nixos/tests/syncthing-relay.nix
new file mode 100644
index 000000000000..3d70b1eda7b2
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing-relay.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "syncthing-relay";
+  meta.maintainers = with pkgs.lib.maintainers; [ delroth ];
+
+  nodes.machine = {
+    environment.systemPackages = [ pkgs.jq ];
+    services.syncthing.relay = {
+      enable = true;
+      providedBy = "nixos-test";
+      pools = [];  # Don't connect to any pool while testing.
+      port = 12345;
+      statusPort = 12346;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("syncthing-relay.service")
+    machine.wait_for_open_port(12345)
+    machine.wait_for_open_port(12346)
+
+    out = machine.succeed(
+        "curl -sSf http://localhost:12346/status | jq -r '.options.\"provided-by\"'"
+    )
+    assert "nixos-test" in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/syncthing.nix b/nixpkgs/nixos/tests/syncthing.nix
new file mode 100644
index 000000000000..aff1d8744130
--- /dev/null
+++ b/nixpkgs/nixos/tests/syncthing.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "syncthing";
+  meta.maintainers = with pkgs.lib.maintainers; [ chkno ];
+
+  nodes = rec {
+    a = {
+      environment.systemPackages = with pkgs; [ curl libxml2 syncthing ];
+      services.syncthing = {
+        enable = true;
+        openDefaultPorts = true;
+      };
+    };
+    b = a;
+  };
+
+  testScript = ''
+    import json
+    import shlex
+
+    confdir = "/var/lib/syncthing/.config/syncthing"
+
+
+    def addPeer(host, name, deviceID):
+        APIKey = host.succeed(
+            "xmllint --xpath 'string(configuration/gui/apikey)' %s/config.xml" % confdir
+        ).strip()
+        oldConf = host.succeed(
+            "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config" % APIKey
+        )
+        conf = json.loads(oldConf)
+        conf["devices"].append({"deviceID": deviceID, "id": name})
+        conf["folders"].append(
+            {
+                "devices": [{"deviceID": deviceID}],
+                "id": "foo",
+                "path": "/var/lib/syncthing/foo",
+                "rescanIntervalS": 1,
+            }
+        )
+        newConf = json.dumps(conf)
+        host.succeed(
+            "curl -Ssf -H 'X-API-Key: %s' 127.0.0.1:8384/rest/config -X PUT -d %s"
+            % (APIKey, shlex.quote(newConf))
+        )
+
+
+    start_all()
+    a.wait_for_unit("syncthing.service")
+    b.wait_for_unit("syncthing.service")
+    a.wait_for_open_port(22000)
+    b.wait_for_open_port(22000)
+
+    aDeviceID = a.succeed("syncthing -home=%s -device-id" % confdir).strip()
+    bDeviceID = b.succeed("syncthing -home=%s -device-id" % confdir).strip()
+    addPeer(a, "b", bDeviceID)
+    addPeer(b, "a", aDeviceID)
+
+    a.wait_for_file("/var/lib/syncthing/foo")
+    b.wait_for_file("/var/lib/syncthing/foo")
+    a.succeed("echo a2b > /var/lib/syncthing/foo/a2b")
+    b.succeed("echo b2a > /var/lib/syncthing/foo/b2a")
+    a.wait_for_file("/var/lib/syncthing/foo/b2a")
+    b.wait_for_file("/var/lib/syncthing/foo/a2b")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-analyze.nix b/nixpkgs/nixos/tests/systemd-analyze.nix
new file mode 100644
index 000000000000..31588e2b41aa
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-analyze.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, latestKernel ? false, ... }:
+
+{
+  name = "systemd-analyze";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ raskin ];
+  };
+
+  nodes.machine =
+    { pkgs, lib, ... }:
+    { boot.kernelPackages = lib.mkIf latestKernel pkgs.linuxPackages_latest;
+      sound.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+    };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    # We create a special output directory to copy it as a whole
+    with subtest("Prepare output dir"):
+        machine.succeed("mkdir systemd-analyze")
+
+
+    # Save the output into a file with given name inside the common
+    # output directory
+    def run_systemd_analyze(args, name):
+        tgt_dir = "systemd-analyze"
+        machine.succeed(
+            "systemd-analyze {} > {}/{} 2> {}/{}.err".format(
+                " ".join(args), tgt_dir, name, tgt_dir, name
+            )
+        )
+
+
+    with subtest("Print statistics"):
+        run_systemd_analyze(["blame"], "blame.txt")
+        run_systemd_analyze(["critical-chain"], "critical-chain.txt")
+        run_systemd_analyze(["dot"], "dependencies.dot")
+        run_systemd_analyze(["plot"], "systemd-analyze.svg")
+
+    # We copy the main graph into the $out (toplevel), and we also copy
+    # the entire output directory with additional data
+    with subtest("Copying the resulting data into $out"):
+        machine.copy_from_vm("systemd-analyze/", "")
+        machine.copy_from_vm("systemd-analyze/systemd-analyze.svg", "")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-binfmt.nix b/nixpkgs/nixos/tests/systemd-binfmt.nix
new file mode 100644
index 000000000000..b16fda0ddb1a
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-binfmt.nix
@@ -0,0 +1,90 @@
+# Teach the kernel how to run armv7l and aarch64-linux binaries,
+# and run GNU Hello for these architectures.
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  expectArgv0 = xpkgs: xpkgs.runCommandCC "expect-argv0" {
+    src = pkgs.writeText "expect-argv0.c" ''
+      #include <stdio.h>
+      #include <string.h>
+
+      int main(int argc, char **argv) {
+        fprintf(stderr, "Our argv[0] is %s\n", argv[0]);
+
+        if (strcmp(argv[0], argv[1])) {
+          fprintf(stderr, "ERROR: argv[0] is %s, should be %s\n", argv[0], argv[1]);
+          return 1;
+        }
+
+        return 0;
+      }
+    '';
+  } ''
+    $CC -o $out $src
+  '';
+in {
+  basic = makeTest {
+    name = "systemd-binfmt";
+    nodes.machine = {
+      boot.binfmt.emulatedSystems = [
+        "armv7l-linux"
+        "aarch64-linux"
+      ];
+    };
+
+    testScript = let
+      helloArmv7l = pkgs.pkgsCross.armv7l-hf-multiplatform.hello;
+      helloAarch64 = pkgs.pkgsCross.aarch64-multiplatform.hello;
+    in ''
+      machine.start()
+
+      assert "world" in machine.succeed(
+          "${helloArmv7l}/bin/hello"
+      )
+
+      assert "world" in machine.succeed(
+          "${helloAarch64}/bin/hello"
+      )
+    '';
+  };
+
+  preserveArgvZero = makeTest {
+    name = "systemd-binfmt-preserve-argv0";
+    nodes.machine = {
+      boot.binfmt.emulatedSystems = [
+        "aarch64-linux"
+      ];
+    };
+    testScript = let
+      testAarch64 = expectArgv0 pkgs.pkgsCross.aarch64-multiplatform;
+    in ''
+      machine.start()
+      machine.succeed("exec -a meow ${testAarch64} meow")
+    '';
+  };
+
+  ldPreload = makeTest {
+    name = "systemd-binfmt-ld-preload";
+    nodes.machine = {
+      boot.binfmt.emulatedSystems = [
+        "aarch64-linux"
+      ];
+    };
+    testScript = let
+      helloAarch64 = pkgs.pkgsCross.aarch64-multiplatform.hello;
+      libredirectAarch64 = pkgs.pkgsCross.aarch64-multiplatform.libredirect;
+    in ''
+      machine.start()
+
+      assert "error" not in machine.succeed(
+          "LD_PRELOAD='${libredirectAarch64}/lib/libredirect.so' ${helloAarch64}/bin/hello 2>&1"
+      ).lower()
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch b/nixpkgs/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch
new file mode 100644
index 000000000000..ef547c02f918
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-boot-ovmf-broken-fat-driver.patch
@@ -0,0 +1,25 @@
+From d87a7513c6f2f2824203032ef27caeb84892ed7e Mon Sep 17 00:00:00 2001
+From: Will Fancher <elvishjerricco@gmail.com>
+Date: Tue, 30 May 2023 16:53:20 -0400
+Subject: [PATCH] Intentionally break the fat driver
+
+---
+ FatPkg/EnhancedFatDxe/ReadWrite.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/FatPkg/EnhancedFatDxe/ReadWrite.c b/FatPkg/EnhancedFatDxe/ReadWrite.c
+index 8f525044d1f1..32c62ff7817b 100644
+--- a/FatPkg/EnhancedFatDxe/ReadWrite.c
++++ b/FatPkg/EnhancedFatDxe/ReadWrite.c
+@@ -216,6 +216,11 @@ FatIFileAccess (
+   Volume = OFile->Volume;

+   Task   = NULL;

+ 

++  if (*BufferSize > (10U * 1024U * 1024U)) {

++    IFile->Position += 10U * 1024U * 1024U;

++    return EFI_BAD_BUFFER_SIZE;

++  }

++

+   //

+   // Write to a directory is unsupported

+   //

diff --git a/nixpkgs/nixos/tests/systemd-boot.nix b/nixpkgs/nixos/tests/systemd-boot.nix
new file mode 100644
index 000000000000..256a18532b0a
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-boot.nix
@@ -0,0 +1,325 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  common = {
+    virtualisation.useBootLoader = true;
+    virtualisation.useEFIBoot = true;
+    boot.loader.systemd-boot.enable = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+    environment.systemPackages = [ pkgs.efibootmgr ];
+  };
+in
+{
+  basic = makeTest {
+    name = "systemd-boot";
+    meta.maintainers = with pkgs.lib.maintainers; [ danielfullmer julienmalka ];
+
+    nodes.machine = common;
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /boot/loader/entries/nixos-generation-1.conf")
+
+      # Ensure we actually booted using systemd-boot
+      # Magic number is the vendor UUID used by systemd-boot.
+      machine.succeed(
+          "test -e /sys/firmware/efi/efivars/LoaderEntrySelected-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f"
+      )
+
+      # "bootctl install" should have created an EFI entry
+      machine.succeed('efibootmgr | grep "Linux Boot Manager"')
+    '';
+  };
+
+  # Check that specialisations create corresponding boot entries.
+  specialisation = makeTest {
+    name = "systemd-boot-specialisation";
+    meta.maintainers = with pkgs.lib.maintainers; [ lukegb julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      specialisation.something.configuration = {};
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed(
+          "test -e /boot/loader/entries/nixos-generation-1-specialisation-something.conf"
+      )
+      machine.succeed(
+          "grep -q 'title NixOS (something)' /boot/loader/entries/nixos-generation-1-specialisation-something.conf"
+      )
+    '';
+  };
+
+  # Boot without having created an EFI entry--instead using default "/EFI/BOOT/BOOTX64.EFI"
+  fallback = makeTest {
+    name = "systemd-boot-fallback";
+    meta.maintainers = with pkgs.lib.maintainers; [ danielfullmer julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.efi.canTouchEfiVariables = mkForce false;
+    };
+
+    testScript = ''
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      machine.succeed("test -e /boot/loader/entries/nixos-generation-1.conf")
+
+      # Ensure we actually booted using systemd-boot
+      # Magic number is the vendor UUID used by systemd-boot.
+      machine.succeed(
+          "test -e /sys/firmware/efi/efivars/LoaderEntrySelected-4a67b082-0a4c-41cf-b6c7-440b29bb8c4f"
+      )
+
+      # "bootctl install" should _not_ have created an EFI entry
+      machine.fail('efibootmgr | grep "Linux Boot Manager"')
+    '';
+  };
+
+  update = makeTest {
+    name = "systemd-boot-update";
+    meta.maintainers = with pkgs.lib.maintainers; [ danielfullmer julienmalka ];
+
+    nodes.machine = common;
+
+    testScript = ''
+      machine.succeed("mount -o remount,rw /boot")
+
+      # Replace version inside sd-boot with something older. See magic[] string in systemd src/boot/efi/boot.c
+      machine.succeed(
+          """
+        find /boot -iname '*boot*.efi' -print0 | \
+        xargs -0 -I '{}' sed -i 's/#### LoaderInfo: systemd-boot .* ####/#### LoaderInfo: systemd-boot 000.0-1-notnixos ####/' '{}'
+      """
+      )
+
+      output = machine.succeed("/run/current-system/bin/switch-to-configuration boot")
+      assert "updating systemd-boot from 000.0-1-notnixos to " in output, "Couldn't find systemd-boot update message"
+    '';
+  };
+
+  memtest86 = makeTest {
+    name = "systemd-boot-memtest86";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.systemd-boot.memtest86.enable = true;
+    };
+
+    testScript = ''
+      machine.succeed("test -e /boot/loader/entries/memtest86.conf")
+      machine.succeed("test -e /boot/efi/memtest86/memtest.efi")
+    '';
+  };
+
+  netbootxyz = makeTest {
+    name = "systemd-boot-netbootxyz";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.systemd-boot.netbootxyz.enable = true;
+    };
+
+    testScript = ''
+      machine.succeed("test -e /boot/loader/entries/o_netbootxyz.conf")
+      machine.succeed("test -e /boot/efi/netbootxyz/netboot.xyz.efi")
+    '';
+  };
+
+  entryFilename = makeTest {
+    name = "systemd-boot-entry-filename";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.systemd-boot.memtest86.enable = true;
+      boot.loader.systemd-boot.memtest86.entryFilename = "apple.conf";
+    };
+
+    testScript = ''
+      machine.fail("test -e /boot/loader/entries/memtest86.conf")
+      machine.succeed("test -e /boot/loader/entries/apple.conf")
+      machine.succeed("test -e /boot/efi/memtest86/memtest.efi")
+    '';
+  };
+
+  extraEntries = makeTest {
+    name = "systemd-boot-extra-entries";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.systemd-boot.extraEntries = {
+        "banana.conf" = ''
+          title banana
+        '';
+      };
+    };
+
+    testScript = ''
+      machine.succeed("test -e /boot/loader/entries/banana.conf")
+      machine.succeed("test -e /boot/efi/nixos/.extra-files/loader/entries/banana.conf")
+    '';
+  };
+
+  extraFiles = makeTest {
+    name = "systemd-boot-extra-files";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes.machine = { pkgs, lib, ... }: {
+      imports = [ common ];
+      boot.loader.systemd-boot.extraFiles = {
+        "efi/fruits/tomato.efi" = pkgs.netbootxyz-efi;
+      };
+    };
+
+    testScript = ''
+      machine.succeed("test -e /boot/efi/fruits/tomato.efi")
+      machine.succeed("test -e /boot/efi/nixos/.extra-files/efi/fruits/tomato.efi")
+    '';
+  };
+
+  switch-test = makeTest {
+    name = "systemd-boot-switch-test";
+    meta.maintainers = with pkgs.lib.maintainers; [ Enzime julienmalka ];
+
+    nodes = {
+      inherit common;
+
+      machine = { pkgs, nodes, ... }: {
+        imports = [ common ];
+        boot.loader.systemd-boot.extraFiles = {
+          "efi/fruits/tomato.efi" = pkgs.netbootxyz-efi;
+        };
+
+        # These are configs for different nodes, but we'll use them here in `machine`
+        system.extraDependencies = [
+          nodes.common.system.build.toplevel
+          nodes.with_netbootxyz.system.build.toplevel
+        ];
+      };
+
+      with_netbootxyz = { pkgs, ... }: {
+        imports = [ common ];
+        boot.loader.systemd-boot.netbootxyz.enable = true;
+      };
+    };
+
+    testScript = { nodes, ... }: let
+      originalSystem = nodes.machine.system.build.toplevel;
+      baseSystem = nodes.common.system.build.toplevel;
+      finalSystem = nodes.with_netbootxyz.system.build.toplevel;
+    in ''
+      machine.succeed("test -e /boot/efi/fruits/tomato.efi")
+      machine.succeed("test -e /boot/efi/nixos/.extra-files/efi/fruits/tomato.efi")
+
+      with subtest("remove files when no longer needed"):
+          machine.succeed("${baseSystem}/bin/switch-to-configuration boot")
+          machine.fail("test -e /boot/efi/fruits/tomato.efi")
+          machine.fail("test -d /boot/efi/fruits")
+          machine.succeed("test -d /boot/efi/nixos/.extra-files")
+          machine.fail("test -e /boot/efi/nixos/.extra-files/efi/fruits/tomato.efi")
+          machine.fail("test -d /boot/efi/nixos/.extra-files/efi/fruits")
+
+      with subtest("files are added back when needed again"):
+          machine.succeed("${originalSystem}/bin/switch-to-configuration boot")
+          machine.succeed("test -e /boot/efi/fruits/tomato.efi")
+          machine.succeed("test -e /boot/efi/nixos/.extra-files/efi/fruits/tomato.efi")
+
+      with subtest("simultaneously removing and adding files works"):
+          machine.succeed("${finalSystem}/bin/switch-to-configuration boot")
+          machine.fail("test -e /boot/efi/fruits/tomato.efi")
+          machine.fail("test -e /boot/efi/nixos/.extra-files/efi/fruits/tomato.efi")
+          machine.succeed("test -e /boot/loader/entries/o_netbootxyz.conf")
+          machine.succeed("test -e /boot/efi/netbootxyz/netboot.xyz.efi")
+          machine.succeed("test -e /boot/efi/nixos/.extra-files/loader/entries/o_netbootxyz.conf")
+          machine.succeed("test -e /boot/efi/nixos/.extra-files/efi/netbootxyz/netboot.xyz.efi")
+    '';
+  };
+
+  garbage-collect-entry = makeTest {
+    name = "systemd-boot-switch-test";
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
+
+    nodes = {
+      inherit common;
+      machine = { pkgs, nodes, ... }: {
+        imports = [ common ];
+
+        # These are configs for different nodes, but we'll use them here in `machine`
+        system.extraDependencies = [
+          nodes.common.system.build.toplevel
+        ];
+      };
+    };
+
+    testScript = { nodes, ... }:
+      let
+        baseSystem = nodes.common.system.build.toplevel;
+      in
+      ''
+        machine.succeed("nix-env -p /nix/var/nix/profiles/system --set ${baseSystem}")
+        machine.succeed("nix-env -p /nix/var/nix/profiles/system --delete-generations 1")
+        machine.succeed("${baseSystem}/bin/switch-to-configuration boot")
+        machine.fail("test -e /boot/loader/entries/nixos-generation-1.conf")
+        machine.succeed("test -e /boot/loader/entries/nixos-generation-2.conf")
+      '';
+  };
+
+  # Some UEFI firmwares fail on large reads. Now that systemd-boot loads initrd
+  # itself, systems with such firmware won't boot without this fix
+  uefiLargeFileWorkaround = makeTest {
+    name = "uefi-large-file-workaround";
+    meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
+    nodes.machine = { pkgs, ... }: {
+      imports = [common];
+      virtualisation.efi.OVMF = pkgs.OVMF.overrideAttrs (old: {
+        # This patch deliberately breaks the FAT driver in EDK2 to
+        # exhibit (part of) the firmware bug that we are testing
+        # for. Files greater than 10MiB will fail to be read in a
+        # single Read() call, so systemd-boot will fail to load the
+        # initrd without a workaround. The number 10MiB was chosen
+        # because if it were smaller than the kernel size, even the
+        # LoadImage call would fail, which is not the failure mode
+        # we're testing for. It needs to be between the kernel size
+        # and the initrd size.
+        patches = old.patches or [] ++ [ ./systemd-boot-ovmf-broken-fat-driver.patch ];
+      });
+    };
+
+    testScript = ''
+      machine.wait_for_unit("multi-user.target")
+    '';
+  };
+
+  no-bootspec = makeTest
+    {
+      name = "systemd-boot-no-bootspec";
+      meta.maintainers = with pkgs.lib.maintainers; [ julienmalka ];
+
+      nodes.machine = {
+        imports = [ common ];
+        boot.bootspec.enable = false;
+      };
+
+      testScript = ''
+        machine.start()
+        machine.wait_for_unit("multi-user.target")
+      '';
+    };
+}
diff --git a/nixpkgs/nixos/tests/systemd-bpf.nix b/nixpkgs/nixos/tests/systemd-bpf.nix
new file mode 100644
index 000000000000..e11347a2a817
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-bpf.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-bpf";
+  meta = with lib.maintainers; {
+    maintainers = [ veehaitch ];
+  };
+  nodes = {
+    node1 = {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.1.1"; prefixLength = 24; }
+        ];
+      };
+    };
+
+    node2 = {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.1.2"; prefixLength = 24; }
+        ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    node1.wait_for_unit("systemd-networkd-wait-online.service")
+    node2.wait_for_unit("systemd-networkd-wait-online.service")
+
+    with subtest("test RestrictNetworkInterfaces= works"):
+      node1.succeed("ping -c 5 192.168.1.2")
+      node1.succeed("systemd-run -t -p RestrictNetworkInterfaces='eth1' ping -c 5 192.168.1.2")
+      node1.fail("systemd-run -t -p RestrictNetworkInterfaces='lo' ping -c 5 192.168.1.2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-confinement.nix b/nixpkgs/nixos/tests/systemd-confinement.nix
new file mode 100644
index 000000000000..428888d41a20
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-confinement.nix
@@ -0,0 +1,184 @@
+import ./make-test-python.nix {
+  name = "systemd-confinement";
+
+  nodes.machine = { pkgs, lib, ... }: let
+    testServer = pkgs.writeScript "testserver.sh" ''
+      #!${pkgs.runtimeShell}
+      export PATH=${lib.escapeShellArg "${pkgs.coreutils}/bin"}
+      ${lib.escapeShellArg pkgs.runtimeShell} 2>&1
+      echo "exit-status:$?"
+    '';
+
+    testClient = pkgs.writeScriptBin "chroot-exec" ''
+      #!${pkgs.runtimeShell} -e
+      output="$(echo "$@" | nc -NU "/run/test$(< /teststep).sock")"
+      ret="$(echo "$output" | sed -nre '$s/^exit-status:([0-9]+)$/\1/p')"
+      echo "$output" | head -n -1
+      exit "''${ret:-1}"
+    '';
+
+    mkTestStep = num: {
+      testScript,
+      config ? {},
+      serviceName ? "test${toString num}",
+    }: {
+      systemd.sockets.${serviceName} = {
+        description = "Socket for Test Service ${toString num}";
+        wantedBy = [ "sockets.target" ];
+        socketConfig.ListenStream = "/run/test${toString num}.sock";
+        socketConfig.Accept = true;
+      };
+
+      systemd.services."${serviceName}@" = {
+        description = "Confined Test Service ${toString num}";
+        confinement = (config.confinement or {}) // { enable = true; };
+        serviceConfig = (config.serviceConfig or {}) // {
+          ExecStart = testServer;
+          StandardInput = "socket";
+        };
+      } // removeAttrs config [ "confinement" "serviceConfig" ];
+
+      __testSteps = lib.mkOrder num (''
+        machine.succeed("echo ${toString num} > /teststep")
+      '' + testScript);
+    };
+
+  in {
+    imports = lib.imap1 mkTestStep [
+      { config.confinement.mode = "chroot-only";
+        testScript = ''
+          with subtest("chroot-only confinement"):
+              paths = machine.succeed('chroot-exec ls -1 / | paste -sd,').strip()
+              assert_eq(paths, "bin,nix,run")
+              uid = machine.succeed('chroot-exec id -u').strip()
+              assert_eq(uid, "0")
+              machine.succeed("chroot-exec chown 65534 /bin")
+        '';
+      }
+      { testScript = ''
+          with subtest("full confinement with APIVFS"):
+              machine.fail("chroot-exec ls -l /etc")
+              machine.fail("chroot-exec chown 65534 /bin")
+              assert_eq(machine.succeed('chroot-exec id -u').strip(), "0")
+              machine.succeed("chroot-exec chown 0 /bin")
+        '';
+      }
+      { config.serviceConfig.BindReadOnlyPaths = [ "/etc" ];
+        testScript = ''
+          with subtest("check existence of bind-mounted /etc"):
+              passwd = machine.succeed('chroot-exec cat /etc/passwd').strip()
+              assert len(passwd) > 0, "/etc/passwd must not be empty"
+        '';
+      }
+      { config.serviceConfig.User = "chroot-testuser";
+        config.serviceConfig.Group = "chroot-testgroup";
+        testScript = ''
+          with subtest("check if User/Group really runs as non-root"):
+              machine.succeed("chroot-exec ls -l /dev")
+              uid = machine.succeed('chroot-exec id -u').strip()
+              assert uid != "0", "UID of chroot-testuser shouldn't be 0"
+              machine.fail("chroot-exec touch /bin/test")
+        '';
+      }
+      (let
+        symlink = pkgs.runCommand "symlink" {
+          target = pkgs.writeText "symlink-target" "got me\n";
+        } "ln -s \"$target\" \"$out\"";
+      in {
+        config.confinement.packages = lib.singleton symlink;
+        testScript = ''
+          with subtest("check if symlinks are properly bind-mounted"):
+              machine.fail("chroot-exec test -e /etc")
+              text = machine.succeed('chroot-exec cat ${symlink}').strip()
+              assert_eq(text, "got me")
+        '';
+      })
+      { config.serviceConfig.User = "chroot-testuser";
+        config.serviceConfig.Group = "chroot-testgroup";
+        config.serviceConfig.StateDirectory = "testme";
+        testScript = ''
+          with subtest("check if StateDirectory works"):
+              machine.succeed("chroot-exec touch /tmp/canary")
+              machine.succeed('chroot-exec "echo works > /var/lib/testme/foo"')
+              machine.succeed('test "$(< /var/lib/testme/foo)" = works')
+              machine.succeed("test ! -e /tmp/canary")
+        '';
+      }
+      { testScript = ''
+          with subtest("check if /bin/sh works"):
+              machine.succeed(
+                  "chroot-exec test -e /bin/sh",
+                  'test "$(chroot-exec \'/bin/sh -c "echo bar"\')" = bar',
+              )
+        '';
+      }
+      { config.confinement.binSh = null;
+        testScript = ''
+          with subtest("check if suppressing /bin/sh works"):
+              machine.succeed("chroot-exec test ! -e /bin/sh")
+              machine.succeed('test "$(chroot-exec \'/bin/sh -c "echo foo"\')" != foo')
+        '';
+      }
+      { config.confinement.binSh = "${pkgs.hello}/bin/hello";
+        testScript = ''
+          with subtest("check if we can set /bin/sh to something different"):
+              machine.succeed("chroot-exec test -e /bin/sh")
+              machine.succeed('test "$(chroot-exec /bin/sh -g foo)" = foo')
+        '';
+      }
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+        testScript = ''
+          with subtest("check if only Exec* dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" != eek')
+        '';
+      }
+      { config.environment.FOOBAR = pkgs.writeText "foobar" "eek\n";
+        config.confinement.fullUnit = true;
+        testScript = ''
+          with subtest("check if all unit dependencies are included"):
+              machine.succeed('test "$(chroot-exec \'cat "$FOOBAR"\')" = eek')
+        '';
+      }
+      { serviceName = "shipped-unitfile";
+        config.confinement.mode = "chroot-only";
+        testScript = ''
+          with subtest("check if shipped unit file still works"):
+              machine.succeed(
+                  'chroot-exec \'kill -9 $$ 2>&1 || :\' | '
+                  'grep -q "Too many levels of symbolic links"'
+              )
+        '';
+      }
+    ];
+
+    options.__testSteps = lib.mkOption {
+      type = lib.types.lines;
+      description = lib.mdDoc "All of the test steps combined as a single script.";
+    };
+
+    config.environment.systemPackages = lib.singleton testClient;
+    config.systemd.packages = lib.singleton (pkgs.writeTextFile {
+      name = "shipped-unitfile";
+      destination = "/etc/systemd/system/shipped-unitfile@.service";
+      text = ''
+        [Service]
+        SystemCallFilter=~kill
+        SystemCallErrorNumber=ELOOP
+      '';
+    });
+
+    config.users.groups.chroot-testgroup = {};
+    config.users.users.chroot-testuser = {
+      isSystemUser = true;
+      description = "Chroot Test User";
+      group = "chroot-testgroup";
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    def assert_eq(a, b):
+        assert a == b, f"{a} != {b}"
+
+    machine.wait_for_unit("multi-user.target")
+  '' + nodes.machine.config.__testSteps;
+}
diff --git a/nixpkgs/nixos/tests/systemd-coredump.nix b/nixpkgs/nixos/tests/systemd-coredump.nix
new file mode 100644
index 000000000000..62137820878b
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-coredump.nix
@@ -0,0 +1,44 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  crasher = pkgs.writeCBin "crasher" "int main;";
+
+  commonConfig = {
+    systemd.services.crasher.serviceConfig = {
+      ExecStart = "${crasher}/bin/crasher";
+      StateDirectory = "crasher";
+      WorkingDirectory = "%S/crasher";
+      Restart = "no";
+    };
+  };
+
+in
+
+{
+  name = "systemd-coredump";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ squalus ];
+  };
+
+  nodes.machine1 = { pkgs, lib, ... }: commonConfig;
+  nodes.machine2 = { pkgs, lib, ... }: lib.recursiveUpdate commonConfig {
+    systemd.coredump.enable = false;
+    systemd.package = pkgs.systemd.override {
+      withCoredump = false;
+    };
+  };
+
+  testScript = ''
+    with subtest("systemd-coredump enabled"):
+      machine1.wait_for_unit("multi-user.target")
+      machine1.wait_for_unit("systemd-coredump.socket")
+      machine1.systemctl("start crasher");
+      machine1.wait_until_succeeds("coredumpctl list | grep crasher", timeout=10)
+      machine1.fail("stat /var/lib/crasher/core")
+
+    with subtest("systemd-coredump disabled"):
+      machine2.systemctl("start crasher");
+      machine2.wait_until_succeeds("stat /var/lib/crasher/core", timeout=10)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-credentials-tpm2.nix b/nixpkgs/nixos/tests/systemd-credentials-tpm2.nix
new file mode 100644
index 000000000000..bf7418312236
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-credentials-tpm2.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+{
+  name = "systemd-credentials-tpm2";
+
+  meta = {
+    maintainers = with pkgs.lib.maintainers; [ tmarkus ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.tpm.enable = true;
+    environment.systemPackages = with pkgs; [ diffutils ];
+  };
+
+  testScript = ''
+    CRED_NAME = "testkey"
+    CRED_RAW_FILE = f"/root/{CRED_NAME}"
+    CRED_FILE = f"/root/{CRED_NAME}.cred"
+
+    def systemd_run(machine, cmd):
+        machine.log(f"Executing command (via systemd-run): \"{cmd}\"")
+
+        (status, out) = machine.execute( " ".join([
+            "systemd-run",
+            "--service-type=exec",
+            "--quiet",
+            "--wait",
+            "-E PATH=\"$PATH\"",
+            "-p StandardOutput=journal",
+            "-p StandardError=journal",
+            f"-p LoadCredentialEncrypted={CRED_NAME}:{CRED_FILE}",
+            f"$SHELL -c '{cmd}'"
+            ]) )
+
+        if status != 0:
+            raise Exception(f"systemd_run failed (status {status})")
+
+        machine.log("systemd-run finished successfully")
+
+    machine.wait_for_unit("multi-user.target")
+
+    with subtest("Check whether TPM device exists"):
+        machine.succeed("test -e /dev/tpm0")
+        machine.succeed("test -e /dev/tpmrm0")
+
+    with subtest("Check whether systemd-creds detects TPM2 correctly"):
+        cmd = "systemd-creds has-tpm2"
+        machine.log(f"Running \"{cmd}\"")
+        (status, _) = machine.execute(cmd)
+
+        # Check exit code equals 0 or 1 (1 means firmware support is missing, which is OK here)
+        if status != 0 and status != 1:
+            raise Exception("systemd-creds failed to detect TPM2")
+
+    with subtest("Encrypt credential using systemd-creds"):
+        machine.succeed(f"dd if=/dev/urandom of={CRED_RAW_FILE} bs=1k count=16")
+        machine.succeed(f"systemd-creds --with-key=host+tpm2 encrypt --name=testkey {CRED_RAW_FILE} {CRED_FILE}")
+
+    with subtest("Write provided credential and check for equality"):
+        CRED_OUT_FILE = f"/root/{CRED_NAME}.out"
+        systemd_run(machine, f"systemd-creds cat testkey > {CRED_OUT_FILE}")
+        machine.succeed(f"cmp --silent -- {CRED_RAW_FILE} {CRED_OUT_FILE}")
+
+    with subtest("Check whether systemd service can see credential in systemd-creds list"):
+        systemd_run(machine, f"systemd-creds list | grep {CRED_NAME}")
+
+    with subtest("Check whether systemd service can access credential in $CREDENTIALS_DIRECTORY"):
+        systemd_run(machine, f"cmp --silent -- $CREDENTIALS_DIRECTORY/{CRED_NAME} {CRED_RAW_FILE}")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-cryptenroll.nix b/nixpkgs/nixos/tests/systemd-cryptenroll.nix
new file mode 100644
index 000000000000..034aae1d5e95
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-cryptenroll.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd-cryptenroll";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ymatsiuk ];
+  };
+
+  nodes.machine = { pkgs, lib, ... }: {
+    environment.systemPackages = [ pkgs.cryptsetup ];
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      tpm.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.start()
+
+    # Verify the TPM device is available and accessible by systemd-cryptenroll
+    machine.succeed("test -e /dev/tpm0")
+    machine.succeed("test -e /dev/tpmrm0")
+    machine.succeed("systemd-cryptenroll --tpm2-device=list")
+
+    # Create LUKS partition
+    machine.succeed("echo -n lukspass | cryptsetup luksFormat -q /dev/vdb -")
+    # Enroll new LUKS key and bind it to Secure Boot state
+    # For more details on PASSWORD variable, check the following issue:
+    # https://github.com/systemd/systemd/issues/20955
+    machine.succeed("PASSWORD=lukspass systemd-cryptenroll --tpm2-device=auto --tpm2-pcrs=7 /dev/vdb")
+    # Add LUKS partition to /etc/crypttab to test auto unlock
+    machine.succeed("echo 'luks /dev/vdb - tpm2-device=auto' >> /etc/crypttab")
+
+    machine.shutdown()
+    machine.start()
+
+    # Test LUKS partition automatic unlock on boot
+    machine.wait_for_unit("systemd-cryptsetup@luks.service")
+    # Wipe TPM2 slot
+    machine.succeed("systemd-cryptenroll --wipe-slot=tpm2 /dev/vdb")
+  '';
+})
+
diff --git a/nixpkgs/nixos/tests/systemd-escaping.nix b/nixpkgs/nixos/tests/systemd-escaping.nix
new file mode 100644
index 000000000000..29d2ed1aa352
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-escaping.nix
@@ -0,0 +1,45 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  echoAll = pkgs.writeScript "echo-all" ''
+    #! ${pkgs.runtimeShell}
+    for s in "$@"; do
+      printf '%s\n' "$s"
+    done
+  '';
+  # deliberately using a local empty file instead of pkgs.emptyFile to have
+  # a non-store path in the test
+  args = [ "a%Nything" "lang=\${LANG}" ";" "/bin/sh -c date" ./empty-file 4.2 23 ];
+in
+{
+  name = "systemd-escaping";
+
+  nodes.machine = { pkgs, lib, utils, ... }: {
+    systemd.services.echo =
+      assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ [] ])).success;
+      assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ {} ])).success;
+      assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ null ])).success;
+      assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ false ])).success;
+      assert !(builtins.tryEval (utils.escapeSystemdExecArgs [ (_:_) ])).success;
+      { description = "Echo to the journal";
+        serviceConfig.Type = "oneshot";
+        serviceConfig.ExecStart = ''
+          ${echoAll} ${utils.escapeSystemdExecArgs args}
+        '';
+      };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("systemctl start echo.service")
+    # skip the first 'Starting <service> ...' line
+    logs = machine.succeed("journalctl -u echo.service -o cat").splitlines()[1:]
+    assert "a%Nything" == logs[0]
+    assert "lang=''${LANG}" == logs[1]
+    assert ";" == logs[2]
+    assert "/bin/sh -c date" == logs[3]
+    assert "/nix/store/ij3gw72f4n5z4dz6nnzl1731p9kmjbwr-empty-file" == logs[4]
+    assert "4.2" in logs[5] # toString produces extra fractional digits!
+    assert "23" == logs[6]
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-homed.nix b/nixpkgs/nixos/tests/systemd-homed.nix
new file mode 100644
index 000000000000..ecc92e98eddc
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-homed.nix
@@ -0,0 +1,99 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  password = "foobar";
+  newPass = "barfoo";
+in
+{
+  name = "systemd-homed";
+  nodes.machine = { config, pkgs, ... }: {
+    services.homed.enable = true;
+
+    users.users.test-normal-user = {
+      extraGroups = [ "wheel" ];
+      isNormalUser = true;
+      initialPassword = password;
+    };
+  };
+  testScript = ''
+    def switchTTY(number):
+      machine.send_key(f"alt-f{number}")
+      machine.wait_until_succeeds(f"[ $(fgconsole) = {number} ]")
+      machine.wait_for_unit(f"getty@tty{number}.service")
+      machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{number}'")
+
+    machine.wait_for_unit("multi-user.target")
+
+    # Smoke test to make sure the pam changes didn't break regular users.
+    machine.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    with subtest("login as regular user"):
+      switchTTY(2)
+      machine.wait_until_tty_matches("2", "login: ")
+      machine.send_chars("test-normal-user\n")
+      machine.wait_until_tty_matches("2", "login: test-normal-user")
+      machine.wait_until_tty_matches("2", "Password: ")
+      machine.send_chars("${password}\n")
+      machine.wait_until_succeeds("pgrep -u test-normal-user bash")
+      machine.send_chars("whoami > /tmp/1\n")
+      machine.wait_for_file("/tmp/1")
+      assert "test-normal-user" in machine.succeed("cat /tmp/1")
+
+    with subtest("create homed encrypted user"):
+      # TODO: Figure out how to pass password manually.
+      #
+      # This environment variable is used for homed internal testing
+      # and is not documented.
+      machine.succeed("NEWPASSWORD=${password} homectl create --shell=/run/current-system/sw/bin/bash --storage=luks -G wheel test-homed-user")
+
+    with subtest("login as homed user"):
+      switchTTY(3)
+      machine.wait_until_tty_matches("3", "login: ")
+      machine.send_chars("test-homed-user\n")
+      machine.wait_until_tty_matches("3", "login: test-homed-user")
+      machine.wait_until_tty_matches("3", "Password: ")
+      machine.send_chars("${password}\n")
+      machine.wait_until_succeeds("pgrep -t tty3 -u test-homed-user bash")
+      machine.send_chars("whoami > /tmp/2\n")
+      machine.wait_for_file("/tmp/2")
+      assert "test-homed-user" in machine.succeed("cat /tmp/2")
+
+    with subtest("change homed user password"):
+      switchTTY(4)
+      machine.wait_until_tty_matches("4", "login: ")
+      machine.send_chars("test-homed-user\n")
+      machine.wait_until_tty_matches("4", "login: test-homed-user")
+      machine.wait_until_tty_matches("4", "Password: ")
+      machine.send_chars("${password}\n")
+      machine.wait_until_succeeds("pgrep -t tty4 -u test-homed-user bash")
+      machine.send_chars("passwd\n")
+      # homed does it in a weird order, it asks for new passes, then it asks
+      # for the old one.
+      machine.sleep(2)
+      machine.send_chars("${newPass}\n")
+      machine.sleep(2)
+      machine.send_chars("${newPass}\n")
+      machine.sleep(4)
+      machine.send_chars("${password}\n")
+      machine.wait_until_fails("pgrep -t tty4 passwd")
+
+      @polling_condition
+      def not_logged_in_tty5():
+        machine.fail("pgrep -t tty5 bash")
+
+      switchTTY(5)
+      with not_logged_in_tty5: # type: ignore[union-attr]
+        machine.wait_until_tty_matches("5", "login: ")
+        machine.send_chars("test-homed-user\n")
+        machine.wait_until_tty_matches("5", "login: test-homed-user")
+        machine.wait_until_tty_matches("5", "Password: ")
+        machine.send_chars("${password}\n")
+        machine.wait_until_tty_matches("5", "Password incorrect or not sufficient for authentication of user test-homed-user.")
+        machine.wait_until_tty_matches("5", "Sorry, try again: ")
+      machine.send_chars("${newPass}\n")
+      machine.send_chars("whoami > /tmp/4\n")
+      machine.wait_for_file("/tmp/4")
+      assert "test-homed-user" in machine.succeed("cat /tmp/4")
+
+    with subtest("homed user should be in wheel according to NSS"):
+      machine.succeed("userdbctl group wheel -s io.systemd.NameServiceSwitch | grep test-homed-user")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-bridge.nix b/nixpkgs/nixos/tests/systemd-initrd-bridge.nix
new file mode 100644
index 000000000000..f48a46ff2b93
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-bridge.nix
@@ -0,0 +1,63 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-initrd-bridge";
+  meta.maintainers = [ lib.maintainers.majiir ];
+
+  # Tests bridge interface configuration in systemd-initrd.
+  #
+  # The 'a' and 'b' nodes are connected to a 'bridge' node through different
+  # links. The 'bridge' node configures a bridge across them. It waits forever
+  # in initrd (stage 1) with networking enabled. 'a' and 'b' ping 'bridge' to
+  # test connectivity with the bridge interface. Then, 'a' pings 'b' to test
+  # the bridge itself.
+
+  nodes = {
+    bridge = { config, lib, ... }: {
+      boot.initrd.systemd.enable = true;
+      boot.initrd.network.enable = true;
+      boot.initrd.systemd.services.boot-blocker = {
+        before = [ "initrd.target" ];
+        wantedBy = [ "initrd.target" ];
+        script = "sleep infinity";
+        serviceConfig.Type = "oneshot";
+      };
+
+      networking.primaryIPAddress = "192.168.1.${toString config.virtualisation.test.nodeNumber}";
+
+      virtualisation.vlans = [ 1 2 ];
+      networking.bridges.br0.interfaces = [ "eth1" "eth2" ];
+
+      networking.interfaces = {
+        eth1.ipv4.addresses = lib.mkForce [];
+        eth2.ipv4.addresses = lib.mkForce [];
+        br0.ipv4.addresses = [{
+          address = config.networking.primaryIPAddress;
+          prefixLength = 24;
+        }];
+      };
+    };
+
+    a = {
+      virtualisation.vlans = [ 1 ];
+    };
+
+    b = { config, ... }: {
+      virtualisation.vlans = [ 2 ];
+      networking.primaryIPAddress = lib.mkForce "192.168.1.${toString config.virtualisation.test.nodeNumber}";
+      networking.interfaces.eth1.ipv4.addresses = lib.mkForce [{
+        address = config.networking.primaryIPAddress;
+        prefixLength = 24;
+      }];
+    };
+  };
+
+  testScript = ''
+    start_all()
+    a.wait_for_unit("network.target")
+    b.wait_for_unit("network.target")
+
+    a.succeed("ping -n -w 10 -c 1 bridge >&2")
+    b.succeed("ping -n -w 10 -c 1 bridge >&2")
+
+    a.succeed("ping -n -w 10 -c 1 b >&2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-btrfs-raid.nix b/nixpkgs/nixos/tests/systemd-initrd-btrfs-raid.nix
new file mode 100644
index 000000000000..9196033789cb
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-btrfs-raid.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-btrfs-raid";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 512 ];
+      useBootLoader = true;
+      # Booting off the BTRFS RAID requires an available init script from the Nix store
+      mountHostNixStore = true;
+      useEFIBoot = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+
+    environment.systemPackages = with pkgs; [ btrfs-progs ];
+    boot.initrd.systemd = {
+      enable = true;
+      emergencyAccess = true;
+    };
+
+    specialisation.boot-btrfs-raid.configuration = {
+      fileSystems = lib.mkVMOverride {
+        "/".fsType = lib.mkForce "btrfs";
+      };
+      virtualisation.rootDevice = "/dev/vdb";
+    };
+  };
+
+  testScript = ''
+    # Create RAID
+    machine.succeed("mkfs.btrfs -d raid0 /dev/vdb /dev/vdc")
+    machine.succeed("mkdir -p /mnt && mount /dev/vdb /mnt && echo hello > /mnt/test && umount /mnt")
+
+    # Boot from the RAID
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-btrfs-raid.conf")
+    machine.succeed("sync")
+    machine.crash()
+    machine.wait_for_unit("multi-user.target")
+
+    # Ensure we have successfully booted from the RAID
+    assert "(initrd)" in machine.succeed("systemd-analyze")  # booted with systemd in stage 1
+    assert "/dev/vdb on / type btrfs" in machine.succeed("mount")
+    assert "hello" in machine.succeed("cat /test")
+    assert "Total devices 2" in machine.succeed("btrfs filesystem show")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-luks-fido2.nix b/nixpkgs/nixos/tests/systemd-initrd-luks-fido2.nix
new file mode 100644
index 000000000000..f9f75ab7f301
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-luks-fido2.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-luks-fido2";
+
+  nodes.machine = { pkgs, config, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      # Booting off the encrypted disk requires having a Nix store available for the init script
+      mountHostNixStore = true;
+      useEFIBoot = true;
+      qemu.package = lib.mkForce (pkgs.qemu_test.override { canokeySupport = true; });
+      qemu.options = [ "-device canokey,file=/tmp/canokey-file" ];
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    boot.initrd.systemd.enable = true;
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdb";
+          crypttabExtraOpts = [ "fido2-device=auto" ];
+        };
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      virtualisation.fileSystems."/".autoFormat = true;
+    };
+  };
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -")
+    machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --fido2-device=auto /dev/vdb |& systemd-cat")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-luks-keyfile.nix b/nixpkgs/nixos/tests/systemd-initrd-luks-keyfile.nix
new file mode 100644
index 000000000000..617c003484b9
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-luks-keyfile.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: let
+
+  keyfile = pkgs.writeText "luks-keyfile" ''
+    MIGHAoGBAJ4rGTSo/ldyjQypd0kuS7k2OSsmQYzMH6TNj3nQ/vIUjDn7fqa3slt2
+    gV6EK3TmTbGc4tzC1v4SWx2m+2Bjdtn4Fs4wiBwn1lbRdC6i5ZYCqasTWIntWn+6
+    FllUkMD5oqjOR/YcboxG8Z3B5sJuvTP9llsF+gnuveWih9dpbBr7AgEC
+  '';
+
+in {
+  name = "systemd-initrd-luks-keyfile";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      # Necessary to boot off the encrypted disk because it requires a init script coming from the Nix store
+      mountHostNixStore = true;
+      useEFIBoot = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+    boot.initrd.systemd = {
+      enable = true;
+      emergencyAccess = true;
+    };
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdb";
+          keyFile = "/etc/cryptroot.key";
+        };
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      virtualisation.fileSystems."/".autoFormat = true;
+      boot.initrd.secrets."/etc/cryptroot.key" = keyfile;
+    };
+  };
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("cryptsetup luksFormat -q --iter-time=1 -d ${keyfile} /dev/vdb")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-luks-password.nix b/nixpkgs/nixos/tests/systemd-initrd-luks-password.nix
new file mode 100644
index 000000000000..66b5022d87fd
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-luks-password.nix
@@ -0,0 +1,56 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-luks-password";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 512 ];
+      useBootLoader = true;
+      # Booting off the encrypted disk requires an available init script
+      mountHostNixStore = true;
+      useEFIBoot = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+    boot.initrd.systemd = {
+      enable = true;
+      emergencyAccess = true;
+    };
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        # We have two disks and only type one password - key reuse is in place
+        cryptroot.device = "/dev/vdb";
+        cryptroot2.device = "/dev/vdc";
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      virtualisation.fileSystems."/".autoFormat = true;
+      # test mounting device unlocked in initrd after switching root
+      virtualisation.fileSystems."/cryptroot2".device = "/dev/mapper/cryptroot2";
+    };
+  };
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdc -")
+    machine.succeed("echo -n supersecret | cryptsetup luksOpen   -q               /dev/vdc cryptroot2")
+    machine.succeed("mkfs.ext4 /dev/mapper/cryptroot2")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.start()
+    machine.wait_for_console_text("Please enter passphrase for disk cryptroot")
+    machine.send_console("supersecret\n")
+    machine.wait_for_unit("multi-user.target")
+
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount"), "/dev/mapper/cryptroot do not appear in mountpoints list"
+    assert "/dev/mapper/cryptroot2 on /cryptroot2 type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-luks-tpm2.nix b/nixpkgs/nixos/tests/systemd-initrd-luks-tpm2.nix
new file mode 100644
index 000000000000..e292acfd1c5f
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-luks-tpm2.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-luks-tpm2";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 ];
+      useBootLoader = true;
+      # Booting off the TPM2-encrypted device requires an available init script
+      mountHostNixStore = true;
+      useEFIBoot = true;
+      tpm.enable = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+
+    boot.initrd.availableKernelModules = [ "tpm_tis" ];
+
+    environment.systemPackages = with pkgs; [ cryptsetup ];
+    boot.initrd.systemd = {
+      enable = true;
+    };
+
+    specialisation.boot-luks.configuration = {
+      boot.initrd.luks.devices = lib.mkVMOverride {
+        cryptroot = {
+          device = "/dev/vdb";
+          crypttabExtraOpts = [ "tpm2-device=auto" ];
+        };
+      };
+      virtualisation.rootDevice = "/dev/mapper/cryptroot";
+      virtualisation.fileSystems."/".autoFormat = true;
+    };
+  };
+
+  testScript = ''
+    # Create encrypted volume
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("echo -n supersecret | cryptsetup luksFormat -q --iter-time=1 /dev/vdb -")
+    machine.succeed("PASSWORD=supersecret SYSTEMD_LOG_LEVEL=debug systemd-cryptenroll --tpm2-pcrs= --tpm2-device=auto /dev/vdb |& systemd-cat")
+
+    # Boot from the encrypted disk
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-luks.conf")
+    machine.succeed("sync")
+    machine.crash()
+
+    # Boot and decrypt the disk
+    machine.wait_for_unit("multi-user.target")
+    assert "/dev/mapper/cryptroot on / type ext4" in machine.succeed("mount")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-modprobe.nix b/nixpkgs/nixos/tests/systemd-initrd-modprobe.nix
new file mode 100644
index 000000000000..0f93492176b4
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-modprobe.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-modprobe";
+
+  nodes.machine = { pkgs, ... }: {
+    testing.initrdBackdoor = true;
+    boot.initrd.systemd.enable = true;
+    boot.initrd.kernelModules = [ "loop" ]; # Load module in initrd.
+    boot.extraModprobeConfig = ''
+      options loop max_loop=42
+    '';
+  };
+
+  testScript = ''
+    machine.wait_for_unit("initrd.target")
+    max_loop = machine.succeed("cat /sys/module/loop/parameters/max_loop")
+    assert int(max_loop) == 42, "Parameter should be respected for initrd kernel modules"
+
+    # Make sure it sticks in stage 2
+    machine.switch_root()
+    machine.wait_for_unit("multi-user.target")
+    max_loop = machine.succeed("cat /sys/module/loop/parameters/max_loop")
+    assert int(max_loop) == 42, "Parameter should be respected for initrd kernel modules"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-networkd-ssh.nix b/nixpkgs/nixos/tests/systemd-initrd-networkd-ssh.nix
new file mode 100644
index 000000000000..d4c168f40e29
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-networkd-ssh.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-initrd-network-ssh";
+  meta.maintainers = [ lib.maintainers.elvishjerricco ];
+
+  nodes = {
+    server = { config, pkgs, ... }: {
+      testing.initrdBackdoor = true;
+      boot.initrd.systemd.enable = true;
+      boot.initrd.systemd.contents."/etc/msg".text = "foo";
+      boot.initrd.network = {
+        enable = true;
+        ssh = {
+          enable = true;
+          authorizedKeys = [ (lib.readFile ./initrd-network-ssh/id_ed25519.pub) ];
+          port = 22;
+          hostKeys = [ ./initrd-network-ssh/ssh_host_ed25519_key ];
+        };
+      };
+    };
+
+    client = { config, ... }: {
+      environment.etc = {
+        knownHosts = {
+          text = lib.concatStrings [
+            "server,"
+            "${
+              toString (lib.head (lib.splitString " " (toString
+                (lib.elemAt (lib.splitString "\n" config.networking.extraHosts) 2))))
+            } "
+            "${lib.readFile ./initrd-network-ssh/ssh_host_ed25519_key.pub}"
+          ];
+        };
+        sshKey = {
+          source = ./initrd-network-ssh/id_ed25519;
+          mode = "0600";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    def ssh_is_up(_) -> bool:
+        status, _ = client.execute("nc -z server 22")
+        return status == 0
+
+    client.wait_for_unit("network.target")
+    with client.nested("waiting for SSH server to come up"):
+        retry(ssh_is_up)
+
+    msg = client.succeed(
+        "ssh -i /etc/sshKey -o UserKnownHostsFile=/etc/knownHosts server 'cat /etc/msg'"
+    )
+    assert "foo" in msg
+
+    server.switch_root()
+    server.wait_for_unit("multi-user.target")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-networkd.nix b/nixpkgs/nixos/tests/systemd-initrd-networkd.nix
new file mode 100644
index 000000000000..9c4ddb6e4b36
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-networkd.nix
@@ -0,0 +1,93 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  inherit (lib.maintainers) elvishjerricco;
+
+  common = {
+    boot.initrd.systemd = {
+      enable = true;
+      network.wait-online.timeout = 10;
+      network.wait-online.anyInterface = true;
+      targets.network-online.requiredBy = [ "initrd.target" ];
+      services.systemd-networkd-wait-online.requiredBy =
+        [ "network-online.target" ];
+      initrdBin = [ pkgs.iproute2 pkgs.iputils pkgs.gnugrep ];
+    };
+    testing.initrdBackdoor = true;
+    boot.initrd.network.enable = true;
+  };
+
+  mkFlushTest = flush: script: makeTest {
+    name = "systemd-initrd-network-${lib.optionalString (!flush) "no-"}flush";
+    meta.maintainers = [ elvishjerricco ];
+
+    nodes.machine = {
+      imports = [ common ];
+
+      boot.initrd.network.flushBeforeStage2 = flush;
+      systemd.services.check-flush = {
+        requiredBy = ["multi-user.target"];
+        before = ["network-pre.target" "multi-user.target"];
+        wants = ["network-pre.target"];
+        unitConfig.DefaultDependencies = false;
+        serviceConfig.Type = "oneshot";
+        path = [ pkgs.iproute2 pkgs.iputils pkgs.gnugrep ];
+        inherit script;
+      };
+    };
+
+    testScript = ''
+      machine.wait_for_unit("network-online.target")
+      machine.succeed(
+          "ip addr | grep 10.0.2.15",
+          "ping -c1 10.0.2.2",
+      )
+      machine.switch_root()
+
+      machine.wait_for_unit("multi-user.target")
+    '';
+  };
+
+in {
+  basic = makeTest {
+    name = "systemd-initrd-network";
+    meta.maintainers = [ elvishjerricco ];
+
+    nodes.machine = common;
+
+    testScript = ''
+      machine.wait_for_unit("network-online.target")
+      machine.succeed(
+          "ip addr | grep 10.0.2.15",
+          "ping -c1 10.0.2.2",
+      )
+      machine.switch_root()
+
+      # Make sure the systemd-network user was set correctly in initrd
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("[ $(stat -c '%U,%G' /run/systemd/netif/links) = systemd-network,systemd-network ]")
+      machine.succeed("ip addr show >&2")
+      machine.succeed("ip route show >&2")
+    '';
+  };
+
+  doFlush = mkFlushTest true ''
+    if ip addr | grep 10.0.2.15; then
+      echo "Network configuration survived switch-root; flushBeforeStage2 failed"
+      exit 1
+    fi
+  '';
+
+  dontFlush = mkFlushTest false ''
+    if ! (ip addr | grep 10.0.2.15); then
+      echo "Network configuration didn't survive switch-root"
+      exit 1
+    fi
+  '';
+}
diff --git a/nixpkgs/nixos/tests/systemd-initrd-simple.nix b/nixpkgs/nixos/tests/systemd-initrd-simple.nix
new file mode 100644
index 000000000000..2b7283a82193
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-simple.nix
@@ -0,0 +1,48 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-simple";
+
+  nodes.machine = { pkgs, ... }: {
+    testing.initrdBackdoor = true;
+    boot.initrd.systemd.enable = true;
+    virtualisation.fileSystems."/".autoResize = true;
+  };
+
+  testScript = ''
+    import subprocess
+
+    with subtest("testing initrd backdoor"):
+        machine.wait_for_unit("initrd.target")
+        machine.succeed("systemctl status initrd-fs.target")
+        machine.switch_root()
+
+    with subtest("handover to stage-2 systemd works"):
+        machine.wait_for_unit("multi-user.target")
+        machine.succeed("systemd-analyze | grep -q '(initrd)'")  # direct handover
+        machine.succeed("touch /testfile")  # / is writable
+        machine.fail("touch /nix/store/testfile")  # /nix/store is not writable
+        # Special filesystems are mounted by systemd
+        machine.succeed("[ -e /run/booted-system ]") # /run
+        machine.succeed("[ -e /sys/class ]") # /sys
+        machine.succeed("[ -e /dev/null ]") # /dev
+        machine.succeed("[ -e /proc/1 ]") # /proc
+        # stage-2-init mounted more special filesystems
+        machine.succeed("[ -e /dev/shm ]") # /dev/shm
+        machine.succeed("[ -e /dev/pts/ptmx ]") # /dev/pts
+        machine.succeed("[ -e /run/keys ]") # /run/keys
+
+    with subtest("groups work"):
+        machine.fail("journalctl -b 0 | grep 'systemd-udevd.*Unknown group.*ignoring'")
+
+    with subtest("growfs works"):
+        oldAvail = machine.succeed("df --output=avail / | sed 1d")
+        machine.shutdown()
+
+        subprocess.check_call(["qemu-img", "resize", "vm-state-machine/machine.qcow2", "+1G"])
+
+        machine.start()
+        machine.switch_root()
+        newAvail = machine.succeed("df --output=avail / | sed 1d")
+
+        assert int(oldAvail) < int(newAvail), "File system did not grow"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-swraid.nix b/nixpkgs/nixos/tests/systemd-initrd-swraid.nix
new file mode 100644
index 000000000000..d00e67b5705a
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-swraid.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-swraid";
+
+  nodes.machine = { pkgs, ... }: {
+    # Use systemd-boot
+    virtualisation = {
+      emptyDiskImages = [ 512 512 ];
+      useBootLoader = true;
+      # Booting off the RAID requires an available init script
+      mountHostNixStore = true;
+      useEFIBoot = true;
+    };
+    boot.loader.systemd-boot.enable = true;
+    boot.loader.efi.canTouchEfiVariables = true;
+
+    environment.systemPackages = with pkgs; [ mdadm e2fsprogs ]; # for mdadm and mkfs.ext4
+    boot.swraid = {
+      enable = true;
+      mdadmConf = ''
+        ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
+      '';
+    };
+    environment.etc."mdadm.conf".text = ''
+      MAILADDR test@example.com
+    '';
+    boot.initrd = {
+      systemd = {
+        enable = true;
+        emergencyAccess = true;
+      };
+      kernelModules = [ "raid0" ];
+    };
+
+    specialisation.boot-swraid.configuration.virtualisation.rootDevice = "/dev/disk/by-label/testraid";
+    # This protects against a regression. We do not have to switch to it.
+    # It's sufficient to trigger its evaluation.
+    specialisation.build-old-initrd.configuration.boot.initrd.systemd.enable = lib.mkForce false;
+  };
+
+  testScript = ''
+    # Create RAID
+    machine.succeed("mdadm --create --force /dev/md0 -n 2 --level=raid1 /dev/vdb /dev/vdc --metadata=0.90")
+    machine.succeed("mkfs.ext4 -L testraid /dev/md0")
+    machine.succeed("mkdir -p /mnt && mount /dev/md0 /mnt && echo hello > /mnt/test && umount /mnt")
+
+    # Boot from the RAID
+    machine.succeed("bootctl set-default nixos-generation-1-specialisation-boot-swraid.conf")
+    machine.succeed("sync")
+    machine.crash()
+    machine.wait_for_unit("multi-user.target")
+
+    # Ensure we have successfully booted from the RAID
+    assert "(initrd)" in machine.succeed("systemd-analyze")  # booted with systemd in stage 1
+    assert "/dev/md0 on / type ext4" in machine.succeed("mount")
+    assert "hello" in machine.succeed("cat /test")
+    assert "md0" in machine.succeed("cat /proc/mdstat")
+
+    expected_config = """MAILADDR test@example.com
+
+    ARRAY /dev/md0 devices=/dev/vdb,/dev/vdc
+    """
+    got_config = machine.execute("cat /etc/mdadm.conf")[1]
+    assert expected_config == got_config, repr((expected_config, got_config))
+    machine.wait_for_unit("mdmonitor.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-vconsole.nix b/nixpkgs/nixos/tests/systemd-initrd-vconsole.nix
new file mode 100644
index 000000000000..d4c2a57680c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-vconsole.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "systemd-initrd-vconsole";
+
+  nodes.machine = { pkgs, ... }: {
+    boot.kernelParams = lib.mkAfter [ "rd.systemd.unit=rescue.target" "loglevel=3" "udev.log_level=3" "systemd.log_level=warning" ];
+
+    boot.initrd.systemd = {
+      enable = true;
+      emergencyAccess = true;
+    };
+
+    console = {
+      earlySetup = true;
+      keyMap = "colemak";
+    };
+  };
+
+  testScript = ''
+    # Boot into rescue shell in initrd
+    machine.start()
+    machine.wait_for_console_text("Press Enter for maintenance")
+    machine.send_console("\n")
+
+    # Wait for shell to become ready
+    for _ in range(300):
+      machine.send_console("printf '%s to receive commands:\\n' Ready\n")
+      try:
+        machine.wait_for_console_text("Ready to receive commands:", timeout=1)
+        break
+      except Exception:
+        continue
+    else:
+      raise RuntimeError("Rescue shell never became ready")
+
+    # Check keymap
+    machine.send_console("(printf '%s to receive text:\\n' Ready && read text && echo \"$text\") </dev/tty1\n")
+    machine.wait_for_console_text("Ready to receive text:")
+    for key in "asdfjkl;\n":
+      machine.send_key(key)
+    machine.wait_for_console_text("arstneio")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-initrd-vlan.nix b/nixpkgs/nixos/tests/systemd-initrd-vlan.nix
new file mode 100644
index 000000000000..5060163a047d
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-initrd-vlan.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-initrd-vlan";
+  meta.maintainers = [ lib.maintainers.majiir ];
+
+  # Tests VLAN interface configuration in systemd-initrd.
+  #
+  # Two nodes are configured for a tagged VLAN. (Note that they also still have
+  # their ordinary eth0 and eth1 interfaces, which are not VLAN-tagged.)
+  #
+  # The 'server' node waits forever in initrd (stage 1) with networking
+  # enabled. The 'client' node pings it to test network connectivity.
+
+  nodes = let
+    network = id: {
+      networking = {
+        vlans."eth1.10" = {
+          id = 10;
+          interface = "eth1";
+        };
+        interfaces."eth1.10" = {
+          ipv4.addresses = [{
+            address = "192.168.10.${id}";
+            prefixLength = 24;
+          }];
+        };
+      };
+    };
+  in {
+    # Node that will use initrd networking.
+    server = network "1" // {
+      boot.initrd.systemd.enable = true;
+      boot.initrd.network.enable = true;
+      boot.initrd.systemd.services.boot-blocker = {
+        before = [ "initrd.target" ];
+        wantedBy = [ "initrd.target" ];
+        script = "sleep infinity";
+        serviceConfig.Type = "oneshot";
+      };
+    };
+
+    # Node that will ping the server.
+    client = network "2";
+  };
+
+  testScript = ''
+    start_all()
+    client.wait_for_unit("network.target")
+
+    # Wait for the regular (untagged) interface to be up.
+    def server_is_up(_) -> bool:
+        status, _ = client.execute("ping -n -c 1 server >&2")
+        return status == 0
+    with client.nested("waiting for server to come up"):
+        retry(server_is_up)
+
+    # Try to ping the (tagged) VLAN interface.
+    client.succeed("ping -n -w 10 -c 1 192.168.10.1 >&2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-journal.nix b/nixpkgs/nixos/tests/systemd-journal.nix
new file mode 100644
index 000000000000..d2063a3b9a44
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-journal.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "systemd-journal";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lewo ];
+  };
+
+  nodes.machine = { pkgs, lib, ... }: {
+    services.journald.enableHttpGateway = true;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+
+    machine.succeed("journalctl --grep=systemd")
+
+    machine.succeed(
+        "${pkgs.curl}/bin/curl -s localhost:19531/machine | ${pkgs.jq}/bin/jq -e '.hostname == \"machine\"'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-machinectl.nix b/nixpkgs/nixos/tests/systemd-machinectl.nix
new file mode 100644
index 000000000000..b8ed0c33e8e4
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-machinectl.nix
@@ -0,0 +1,114 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+  let
+
+    container = {
+      # We re-use the NixOS container option ...
+      boot.isContainer = true;
+      # ... and revert unwanted defaults
+      networking.useHostResolvConf = false;
+
+      # use networkd to obtain systemd network setup
+      networking.useNetworkd = true;
+      networking.useDHCP = false;
+
+      # systemd-nspawn expects /sbin/init
+      boot.loader.initScript.enable = true;
+
+      imports = [ ../modules/profiles/minimal.nix ];
+    };
+
+    containerSystem = (import ../lib/eval-config.nix {
+      inherit (pkgs) system;
+      modules = [ container ];
+    }).config.system.build.toplevel;
+
+    containerName = "container";
+    containerRoot = "/var/lib/machines/${containerName}";
+
+  in
+  {
+    name = "systemd-machinectl";
+
+    nodes.machine = { lib, ... }: {
+      # use networkd to obtain systemd network setup
+      networking.useNetworkd = true;
+      networking.useDHCP = false;
+
+      # do not try to access cache.nixos.org
+      nix.settings.substituters = lib.mkForce [ ];
+
+      # auto-start container
+      systemd.targets.machines.wants = [ "systemd-nspawn@${containerName}.service" ];
+
+      virtualisation.additionalPaths = [ containerSystem ];
+
+      # not needed, but we want to test the nspawn file generation
+      systemd.nspawn.${containerName} = { };
+
+      systemd.services."systemd-nspawn@${containerName}" = {
+        serviceConfig.Environment = [
+          # Disable tmpfs for /tmp
+          "SYSTEMD_NSPAWN_TMPFS_TMP=0"
+        ];
+        overrideStrategy = "asDropin";
+      };
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("default.target");
+
+      # Install container
+      machine.succeed("mkdir -p ${containerRoot}");
+      # Workaround for nixos-install
+      machine.succeed("chmod o+rx /var/lib/machines");
+      machine.succeed("nixos-install --root ${containerRoot} --system ${containerSystem} --no-channel-copy --no-root-passwd");
+
+      # Allow systemd-nspawn to apply user namespace on immutable files
+      machine.succeed("chattr -i ${containerRoot}/var/empty");
+
+      # Test machinectl start
+      machine.succeed("machinectl start ${containerName}");
+      machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
+
+      # Test nss_mymachines without nscd
+      machine.succeed('LD_LIBRARY_PATH="/run/current-system/sw/lib" getent -s hosts:mymachines hosts ${containerName}');
+
+      # Test nss_mymachines via nscd
+      machine.succeed("getent hosts ${containerName}");
+
+      # Test systemd-nspawn network configuration
+      machine.succeed("ping -n -c 1 ${containerName}");
+
+      # Test systemd-nspawn uses a user namespace
+      machine.succeed("test $(machinectl status ${containerName} | grep 'UID Shift: ' | wc -l) = 1")
+
+      # Test systemd-nspawn reboot
+      machine.succeed("machinectl shell ${containerName} /run/current-system/sw/bin/reboot");
+      machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
+
+      # Test machinectl reboot
+      machine.succeed("machinectl reboot ${containerName}");
+      machine.wait_until_succeeds("systemctl -M ${containerName} is-active default.target");
+
+      # Restart machine
+      machine.shutdown()
+      machine.start()
+      machine.wait_for_unit("default.target");
+
+      # Test auto-start
+      machine.succeed("machinectl show ${containerName}")
+
+      # Test machinectl stop
+      machine.succeed("machinectl stop ${containerName}");
+      machine.wait_until_succeeds("test $(systemctl is-active systemd-nspawn@${containerName}) = inactive");
+
+      # Test tmpfs for /tmp
+      machine.fail("mountpoint /tmp");
+
+      # Show to to delete the container
+      machine.succeed("chattr -i ${containerRoot}/var/empty");
+      machine.succeed("rm -rf ${containerRoot}");
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/systemd-misc.nix b/nixpkgs/nixos/tests/systemd-misc.nix
new file mode 100644
index 000000000000..0ddd51100463
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-misc.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  exampleScript = pkgs.writeTextFile {
+    name = "example.sh";
+    text = ''
+      #! ${pkgs.runtimeShell} -e
+
+      while true; do
+          echo "Example script running" >&2
+          ${pkgs.coreutils}/bin/sleep 1
+      done
+    '';
+    executable = true;
+  };
+
+  unitFile = pkgs.writeTextFile {
+    name = "example.service";
+    text = ''
+      [Unit]
+      Description=Example systemd service unit file
+
+      [Service]
+      ExecStart=${exampleScript}
+
+      [Install]
+      WantedBy=multi-user.target
+    '';
+  };
+in
+{
+  name = "systemd-misc";
+
+  nodes.machine = { pkgs, lib, ... }: {
+    boot.extraSystemdUnitPaths = [ "/etc/systemd-rw/system" ];
+
+    users.users.limited = {
+      isNormalUser = true;
+      uid = 1000;
+    };
+
+    systemd.units."user-1000.slice.d/limits.conf" = {
+      text = ''
+        [Slice]
+        TasksAccounting=yes
+        TasksMax=100
+      '';
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("mkdir -p /etc/systemd-rw/system")
+    machine.succeed(
+        "cp ${unitFile} /etc/systemd-rw/system/example.service"
+    )
+    machine.succeed("systemctl start example.service")
+    machine.succeed("systemctl status example.service | grep 'Active: active'")
+
+    machine.succeed("systemctl show --property TasksMax --value user-1000.slice | grep 100")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix b/nixpkgs/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix
new file mode 100644
index 000000000000..f6d5411aa5ca
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd-dhcpserver-static-leases.nix
@@ -0,0 +1,81 @@
+# In contrast to systemd-networkd-dhcpserver, this test configures
+# the router with a static DHCP lease for the client's MAC address.
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-networkd-dhcpserver-static-leases";
+  meta = with lib.maintainers; {
+    maintainers = [ veehaitch ];
+  };
+  nodes = {
+    router = {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              DHCPServer = true;
+              Address = "10.0.0.1/24";
+            };
+            dhcpServerStaticLeases = [{
+              dhcpServerStaticLeaseConfig = {
+                MACAddress = "02:de:ad:be:ef:01";
+                Address = "10.0.0.10";
+              };
+            }];
+          };
+        };
+      };
+    };
+
+    client = {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1 = {
+          useDHCP = true;
+          macAddress = "02:de:ad:be:ef:01";
+        };
+      };
+
+      # This setting is important to have the router assign the
+      # configured lease based on the client's MAC address. Also see:
+      # https://github.com/systemd/systemd/issues/21368#issuecomment-982193546
+      systemd.network.networks."40-eth1".dhcpV4Config.ClientIdentifier = "mac";
+    };
+  };
+  testScript = ''
+    start_all()
+
+    with subtest("check router network configuration"):
+      router.wait_for_unit("systemd-networkd-wait-online.service")
+      eth1_status = router.succeed("networkctl status eth1")
+      assert "Network File: /etc/systemd/network/01-eth1.network" in eth1_status, \
+        "The router interface eth1 is not using the expected network file"
+      assert "10.0.0.1" in eth1_status, "Did not find expected router IPv4"
+
+    with subtest("check client network configuration"):
+      client.wait_for_unit("systemd-networkd-wait-online.service")
+      eth1_status = client.succeed("networkctl status eth1")
+      assert "Network File: /etc/systemd/network/40-eth1.network" in eth1_status, \
+        "The client interface eth1 is not using the expected network file"
+      assert "10.0.0.10" in eth1_status, "Did not find expected client IPv4"
+
+    with subtest("router and client can reach each other"):
+      client.wait_until_succeeds("ping -c 5 10.0.0.1")
+      router.wait_until_succeeds("ping -c 5 10.0.0.10")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd-dhcpserver.nix b/nixpkgs/nixos/tests/systemd-networkd-dhcpserver.nix
new file mode 100644
index 000000000000..cf0ccb744211
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd-dhcpserver.nix
@@ -0,0 +1,109 @@
+# This test predominantly tests systemd-networkd DHCP server, by
+# setting up a DHCP server and client, and ensuring they are mutually
+# reachable via the DHCP allocated address.
+# Two DHCP servers are set up on bridge VLANs, testing to make sure that
+# bridge VLAN settings are correctly applied.
+#
+# br0 ----untagged---v
+#                    +---PVID 1+VLAN 2---[bridge]---PVID 2---eth1
+# vlan2 ---VLAN 2----^
+import ./make-test-python.nix ({pkgs, ...}: {
+  name = "systemd-networkd-dhcpserver";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+  nodes = {
+    router = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+      };
+      systemd.network = {
+        netdevs = {
+          br0 = {
+            enable = true;
+            netdevConfig = {
+              Name = "br0";
+              Kind = "bridge";
+            };
+            extraConfig = ''
+              [Bridge]
+              VLANFiltering=yes
+              DefaultPVID=none
+            '';
+          };
+          vlan2 = {
+            enable = true;
+            netdevConfig = {
+              Name = "vlan2";
+              Kind = "vlan";
+            };
+            vlanConfig.Id = 2;
+          };
+        };
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig.Bridge = "br0";
+            bridgeVLANs = [
+              { bridgeVLANConfig = { PVID = 2; EgressUntagged = 2; }; }
+            ];
+          };
+          "02-br0" = {
+            name = "br0";
+            networkConfig = {
+              DHCPServer = true;
+              Address = "10.0.0.1/24";
+              VLAN = ["vlan2"];
+            };
+            dhcpServerConfig = {
+              PoolOffset = 100;
+              PoolSize = 1;
+            };
+            bridgeVLANs = [
+              { bridgeVLANConfig = { PVID = 1; EgressUntagged = 1; }; }
+              { bridgeVLANConfig = { VLAN = 2; }; }
+            ];
+          };
+          "02-vlan2" = {
+            name = "vlan2";
+            networkConfig = {
+              DHCPServer = true;
+              Address = "10.0.2.1/24";
+            };
+            dhcpServerConfig = {
+              PoolOffset = 100;
+              PoolSize = 1;
+            };
+          };
+        };
+      };
+    };
+
+    client = { config, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1.useDHCP = true;
+      };
+    };
+  };
+  testScript = { ... }: ''
+    start_all()
+    router.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_for_unit("systemd-networkd-wait-online.service")
+    client.wait_until_succeeds("ping -c 5 10.0.2.1")
+    router.wait_until_succeeds("ping -c 5 10.0.2.100")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix b/nixpkgs/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
new file mode 100644
index 000000000000..54f371e6c070
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd-ipv6-prefix-delegation.nix
@@ -0,0 +1,335 @@
+# This test verifies that we can request and assign IPv6 prefixes from upstream
+# (e.g. ISP) routers.
+# The setup consists of three VMs. One for the ISP, as your residential router
+# and the third as a client machine in the residential network.
+#
+# There are two VLANs in this test:
+# - VLAN 1 is the connection between the ISP and the router
+# - VLAN 2 is the connection between the router and the client
+
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "systemd-networkd-ipv6-prefix-delegation";
+  meta = with lib.maintainers; {
+    maintainers = [ andir hexa ];
+  };
+  nodes = {
+
+    # The ISP's routers job is to delegate IPv6 prefixes via DHCPv6. Like with
+    # regular IPv6 auto-configuration it will also emit IPv6 router
+    # advertisements (RAs). Those RA's will not carry a prefix but in contrast
+    # just set the "Other" flag to indicate to the receiving nodes that they
+    # should attempt DHCPv6.
+    #
+    # Note: On the ISPs device we don't really care if we are using networkd in
+    # this example. That being said we can't use it (yet) as networkd doesn't
+    # implement the serving side of DHCPv6. We will use ISC Kea for that task.
+    isp = { lib, pkgs, ... }: {
+      virtualisation.vlans = [ 1 ];
+      networking = {
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1 = lib.mkForce {}; # Don't use scripted networking
+      };
+
+      systemd.network = {
+        enable = true;
+
+        networks = {
+          "eth1" = {
+            matchConfig.Name = "eth1";
+            address = [
+              "2001:DB8::1/64"
+            ];
+            networkConfig.IPForward = true;
+          };
+        };
+      };
+
+      # Since we want to program the routes that we delegate to the "customer"
+      # into our routing table we must provide kea with the required capability.
+      systemd.services.kea-dhcp6-server.serviceConfig = {
+        AmbientCapabilities = [ "CAP_NET_ADMIN" ];
+        CapabilityBoundingSet = [ "CAP_NET_ADMIN" ];
+      };
+
+      services = {
+        # Configure the DHCPv6 server to hand out both IA_NA and IA_PD.
+        #
+        # We will hand out /48 prefixes from the subnet 2001:DB8:F000::/36.
+        # That gives us ~8k prefixes. That should be enough for this test.
+        #
+        # Since (usually) you will not receive a prefix with the router
+        # advertisements we also hand out /128 leases from the range
+        # 2001:DB8:0000:0000:FFFF::/112.
+        kea.dhcp6 = {
+          enable = true;
+          settings = {
+            interfaces-config.interfaces = [ "eth1" ];
+            subnet6 = [ {
+              interface = "eth1";
+              subnet = "2001:DB8::/32";
+              pd-pools = [ {
+                prefix = "2001:DB8:1000::";
+                prefix-len = 36;
+                delegated-len = 48;
+              } ];
+              pools = [ {
+                pool = "2001:DB8:0000:0000::-2001:DB8:0FFF:FFFF::FFFF";
+              } ];
+            } ];
+
+            # This is the glue between Kea and the Kernel FIB. DHCPv6
+            # rightfully has no concept of setting up a route in your
+            # FIB. This step really depends on your setup.
+            #
+            # In a production environment your DHCPv6 server is likely
+            # not the router. You might want to consider BGP, NETCONF
+            # calls, … in those cases.
+            #
+            # In this example we use the run script hook, that lets use
+            # execute anything and passes information via the environment.
+            # https://kea.readthedocs.io/en/kea-2.2.0/arm/hooks.html#run-script-run-script-support-for-external-hook-scripts
+            hooks-libraries = [ {
+              library = "${pkgs.kea}/lib/kea/hooks/libdhcp_run_script.so";
+              parameters = {
+                name = pkgs.writeShellScript "kea-run-hooks" ''
+                  export PATH="${lib.makeBinPath (with pkgs; [ coreutils iproute2 ])}"
+
+                  set -euxo pipefail
+
+                  leases6_committed() {
+                    for i in $(seq $LEASES6_SIZE); do
+                      idx=$((i-1))
+                      prefix_var="LEASES6_AT''${idx}_ADDRESS"
+                      plen_var="LEASES6_AT''${idx}_PREFIX_LEN"
+
+                      ip -6 route replace ''${!prefix_var}/''${!plen_var} via $QUERY6_REMOTE_ADDR dev $QUERY6_IFACE_NAME
+                    done
+                  }
+
+                  unknown_handler() {
+                    echo "Unhandled function call ''${*}"
+                    exit 123
+                  }
+
+                  case "$1" in
+                      "leases6_committed")
+                          leases6_committed
+                          ;;
+                      *)
+                          unknown_handler "''${@}"
+                          ;;
+                  esac
+                '';
+                sync = false;
+              };
+            } ];
+          };
+        };
+
+        # Finally we have to set up the router advertisements. While we could be
+        # using networkd or bird for this task `radvd` is probably the most
+        # venerable of them all. It was made explicitly for this purpose and
+        # the configuration is much more straightforward than what networkd
+        # requires.
+        # As outlined above we will have to set the `Managed` flag as otherwise
+        # the clients will not know if they should do DHCPv6. (Some do
+        # anyway/always)
+        radvd = {
+          enable = true;
+          config = ''
+            interface eth1 {
+              AdvSendAdvert on;
+              AdvManagedFlag on;
+              AdvOtherConfigFlag off; # we don't really have DNS or NTP or anything like that to distribute
+              prefix ::/64 {
+                AdvOnLink on;
+                AdvAutonomous on;
+              };
+            };
+          '';
+        };
+
+      };
+    };
+
+    # This will be our (residential) router that receives the IPv6 prefix (IA_PD)
+    # and /128 (IA_NA) allocation.
+    #
+    # Here we will actually start using networkd.
+    router = {
+      virtualisation.vlans = [ 1 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+
+      boot.kernel.sysctl = {
+        # we want to forward packets from the ISP to the client and back.
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+        # Consider enabling this in production and generating firewall rules
+        # for fowarding/input from the configured interfaces so you do not have
+        # to manage multiple places
+        firewall.enable = false;
+      };
+
+      systemd.network = {
+        networks = {
+          # systemd-networkd will load the first network unit file
+          # that matches, ordered lexiographically by filename.
+          # /etc/systemd/network/{40-eth1,99-main}.network already
+          # exists. This network unit must be loaded for the test,
+          # however, hence why this network is named such.
+
+          # Configuration of the interface to the ISP.
+          # We must request accept RAs and request the PD prefix.
+          "01-eth1" = {
+            name = "eth1";
+            networkConfig = {
+              Description = "ISP interface";
+              IPv6AcceptRA = true;
+              #DHCP = false; # no need for legacy IP
+            };
+            linkConfig = {
+              # We care about this interface when talking about being "online".
+              # If this interface is in the `routable` state we can reach
+              # others and they should be able to reach us.
+              RequiredForOnline = "routable";
+            };
+            # This configures the DHCPv6 client part towards the ISPs DHCPv6 server.
+            dhcpV6Config = {
+              # We have to include a request for a prefix in our DHCPv6 client
+              # request packets.
+              # Otherwise the upstream DHCPv6 server wouldn't know if we want a
+              # prefix or not.  Note: On some installation it makes sense to
+              # always force that option on the DHPCv6 server since there are
+              # certain CPEs that are just not setting this field but happily
+              # accept the delegated prefix.
+              PrefixDelegationHint  = "::/48";
+            };
+            ipv6SendRAConfig = {
+              # Let networkd know that we would very much like to use DHCPv6
+              # to obtain the "managed" information. Not sure why they can't
+              # just take that from the upstream RAs.
+              Managed = true;
+            };
+          };
+
+          # Interface to the client. Here we should redistribute a /64 from
+          # the prefix we received from the ISP.
+          "01-eth2" = {
+            name = "eth2";
+            networkConfig = {
+              Description = "Client interface";
+              # The client shouldn't be allowed to send us RAs, that would be weird.
+              IPv6AcceptRA = false;
+
+              # Delegate prefixes from the DHCPv6 PD pool.
+              DHCPPrefixDelegation = true;
+              IPv6SendRA = true;
+            };
+
+            # In a production environment you should consider setting these as well:
+            # ipv6SendRAConfig = {
+              #EmitDNS = true;
+              #EmitDomains = true;
+              #DNS= = "fe80::1"; # or whatever "well known" IP your router will have on the inside.
+            # };
+
+            # This adds a "random" ULA prefix to the interface that is being
+            # advertised to the clients.
+            # Not used in this test.
+            # ipv6Prefixes = [
+            #   {
+            #     ipv6PrefixConfig = {
+            #       AddressAutoconfiguration = true;
+            #       PreferredLifetimeSec = 1800;
+            #       ValidLifetimeSec = 1800;
+            #     };
+            #   }
+            # ];
+          };
+
+          # finally we are going to add a static IPv6 unique local address to
+          # the "lo" interface.  This will serve as ICMPv6 echo target to
+          # verify connectivity from the client to the router.
+          "01-lo" = {
+            name = "lo";
+            addresses = [
+              { addressConfig.Address = "FD42::1/128"; }
+            ];
+          };
+        };
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+
+    # This is the client behind the router. We should be receiving router
+    # advertisements for both the ULA and the delegated prefix.
+    # All we have to do is boot with the default (networkd) configuration.
+    client = {
+      virtualisation.vlans = [ 2 ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking = {
+        useNetworkd = true;
+        useDHCP = false;
+      };
+
+      # make the network-online target a requirement, we wait for it in our test script
+      systemd.targets.network-online.wantedBy = [ "multi-user.target" ];
+    };
+  };
+
+  testScript = ''
+    # First start the router and wait for it it reach a state where we are
+    # certain networkd is up and it is able to send out RAs
+    router.start()
+    router.wait_for_unit("systemd-networkd.service")
+
+    # After that we can boot the client and wait for the network online target.
+    # Since we only care about IPv6 that should not involve waiting for legacy
+    # IP leases.
+    client.start()
+    client.wait_for_unit("network-online.target")
+
+    # the static address on the router should not be reachable
+    client.wait_until_succeeds("ping -6 -c 1 FD42::1")
+
+    # the global IP of the ISP router should still not be a reachable
+    router.fail("ping -6 -c 1 2001:DB8::1")
+
+    # Once we have internal connectivity boot up the ISP
+    isp.start()
+
+    # Since for the ISP "being online" should have no real meaning we just
+    # wait for the target where all the units have been started.
+    # It probably still takes a few more seconds for all the RA timers to be
+    # fired etc..
+    isp.wait_for_unit("multi-user.target")
+
+    # wait until the uplink interface has a good status
+    router.wait_for_unit("network-online.target")
+    router.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
+
+    # shortly after that the client should have received it's global IPv6
+    # address and thus be able to ping the ISP
+    client.wait_until_succeeds("ping -6 -c1 2001:DB8::1")
+
+    # verify that we got a globally scoped address in eth1 from the
+    # documentation prefix
+    ip_output = client.succeed("ip --json -6 address show dev eth1")
+
+    import json
+
+    ip_json = json.loads(ip_output)[0]
+    assert any(
+        addr["local"].upper().startswith("2001:DB8:")
+        for addr in ip_json["addr_info"]
+        if addr["scope"] == "global"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd-vrf.nix b/nixpkgs/nixos/tests/systemd-networkd-vrf.nix
new file mode 100644
index 000000000000..d4227526a30d
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd-vrf.nix
@@ -0,0 +1,182 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: let
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+
+  mkNode = vlan: id: {
+    virtualisation.vlans = [ vlan ];
+    networking = {
+      useDHCP = false;
+      useNetworkd = true;
+    };
+
+    systemd.network = {
+      enable = true;
+
+      networks."10-eth${toString vlan}" = {
+        matchConfig.Name = "eth${toString vlan}";
+        linkConfig.RequiredForOnline = "no";
+        networkConfig = {
+          Address = "192.168.${toString vlan}.${toString id}/24";
+          IPForward = "yes";
+        };
+      };
+    };
+  };
+in {
+  name = "systemd-networkd-vrf";
+  meta.maintainers = with lib.maintainers; [ ma27 ];
+
+  nodes = {
+    client = { pkgs, ... }: {
+      virtualisation.vlans = [ 1 2 ];
+
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+        firewall.checkReversePath = "loose";
+      };
+
+      systemd.network = {
+        enable = true;
+
+        netdevs."10-vrf1" = {
+          netdevConfig = {
+            Kind = "vrf";
+            Name = "vrf1";
+            MTUBytes = "1300";
+          };
+          vrfConfig.Table = 23;
+        };
+        netdevs."10-vrf2" = {
+          netdevConfig = {
+            Kind = "vrf";
+            Name = "vrf2";
+            MTUBytes = "1300";
+          };
+          vrfConfig.Table = 42;
+        };
+
+        networks."10-vrf1" = {
+          matchConfig.Name = "vrf1";
+          networkConfig.IPForward = "yes";
+          routes = [
+            { routeConfig = { Destination = "192.168.1.2"; Metric = 100; }; }
+          ];
+        };
+        networks."10-vrf2" = {
+          matchConfig.Name = "vrf2";
+          networkConfig.IPForward = "yes";
+          routes = [
+            { routeConfig = { Destination = "192.168.2.3"; Metric = 100; }; }
+          ];
+        };
+
+        networks."10-eth1" = {
+          matchConfig.Name = "eth1";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            VRF = "vrf1";
+            Address = "192.168.1.1/24";
+            IPForward = "yes";
+          };
+        };
+        networks."10-eth2" = {
+          matchConfig.Name = "eth2";
+          linkConfig.RequiredForOnline = "no";
+          networkConfig = {
+            VRF = "vrf2";
+            Address = "192.168.2.1/24";
+            IPForward = "yes";
+          };
+        };
+      };
+    };
+
+    node1 = lib.mkMerge [
+      (mkNode 1 2)
+      {
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      }
+    ];
+
+    node2 = mkNode 2 3;
+    node3 = mkNode 2 4;
+  };
+
+  testScript = ''
+    import json
+
+    def compare(raw_json, to_compare):
+        data = json.loads(raw_json)
+        assert len(raw_json) >= len(to_compare)
+        for i, row in enumerate(to_compare):
+            actual = data[i]
+            assert len(row.keys()) > 0
+            for key, value in row.items():
+                assert value == actual[key], f"""
+                  In entry {i}, value {key}: got: {actual[key]}, expected {value}
+                """
+
+
+    start_all()
+
+    client.wait_for_unit("network.target")
+    node1.wait_for_unit("network.target")
+    node2.wait_for_unit("network.target")
+    node3.wait_for_unit("network.target")
+
+    # Check that networkd properly configures the main routing table
+    # and the routing tables for the VRF.
+    with subtest("check vrf routing tables"):
+        compare(
+            client.succeed("ip --json -4 route list"),
+            [
+                {"dst": "192.168.1.2", "dev": "vrf1", "metric": 100},
+                {"dst": "192.168.2.3", "dev": "vrf2", "metric": 100}
+            ]
+        )
+        compare(
+            client.succeed("ip --json -4 route list table 23"),
+            [
+                {"dst": "192.168.1.0/24", "dev": "eth1", "prefsrc": "192.168.1.1"},
+                {"type": "local", "dst": "192.168.1.1", "dev": "eth1", "prefsrc": "192.168.1.1"},
+                {"type": "broadcast", "dev": "eth1", "prefsrc": "192.168.1.1", "dst": "192.168.1.255"}
+            ]
+        )
+        compare(
+            client.succeed("ip --json -4 route list table 42"),
+            [
+                {"dst": "192.168.2.0/24", "dev": "eth2", "prefsrc": "192.168.2.1"},
+                {"type": "local", "dst": "192.168.2.1", "dev": "eth2", "prefsrc": "192.168.2.1"},
+                {"type": "broadcast", "dev": "eth2", "prefsrc": "192.168.2.1", "dst": "192.168.2.255"}
+            ]
+        )
+
+    # Ensure that other nodes are reachable via ICMP through the VRF.
+    with subtest("icmp through vrf works"):
+        client.succeed("ping -c5 192.168.1.2")
+        client.succeed("ping -c5 192.168.2.3")
+
+    # Test whether TCP through a VRF IP is possible.
+    with subtest("tcp traffic through vrf works"):
+        node1.wait_for_open_port(22)
+        client.succeed(
+            "cat ${snakeOilPrivateKey} > privkey.snakeoil"
+        )
+        client.succeed("chmod 600 privkey.snakeoil")
+        client.succeed(
+            "ulimit -l 2048; ip vrf exec vrf1 ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i privkey.snakeoil root@192.168.1.2 true"
+        )
+
+    # Only configured routes through the VRF from the main routing table should
+    # work. Additional IPs are only reachable when binding to the vrf interface.
+    with subtest("only routes from main routing table work by default"):
+        client.fail("ping -c5 192.168.2.4")
+        client.succeed("ping -I vrf2 -c5 192.168.2.4")
+
+    client.shutdown()
+    node1.shutdown()
+    node2.shutdown()
+    node3.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-networkd.nix b/nixpkgs/nixos/tests/systemd-networkd.nix
new file mode 100644
index 000000000000..6b241b93d511
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-networkd.nix
@@ -0,0 +1,123 @@
+let generateNodeConf = { lib, pkgs, config, privk, pubk, peerId, nodeId, ...}: {
+      imports = [ common/user-account.nix ];
+      systemd.services.systemd-networkd.environment.SYSTEMD_LOG_LEVEL = "debug";
+      networking.useNetworkd = true;
+      networking.useDHCP = false;
+      networking.firewall.enable = false;
+      virtualisation.vlans = [ 1 ];
+      environment.systemPackages = with pkgs; [ wireguard-tools ];
+      systemd.network = {
+        enable = true;
+        config = {
+          routeTables.custom = 23;
+        };
+        netdevs = {
+          "90-wg0" = {
+            netdevConfig = { Kind = "wireguard"; Name = "wg0"; };
+            wireguardConfig = {
+              # NOTE: we're storing the wireguard private key in the
+              #       store for this test. Do not do this in the real
+              #       world. Keep in mind the nix store is
+              #       world-readable.
+              PrivateKeyFile = pkgs.writeText "wg0-priv" privk;
+              ListenPort = 51820;
+              FirewallMark = 42;
+            };
+            wireguardPeers = [ {wireguardPeerConfig={
+              Endpoint = "192.168.1.${peerId}:51820";
+              PublicKey = pubk;
+              PresharedKeyFile = pkgs.writeText "psk.key" "yTL3sCOL33Wzi6yCnf9uZQl/Z8laSE+zwpqOHC4HhFU=";
+              AllowedIPs = [ "10.0.0.${peerId}/32" ];
+              PersistentKeepalive = 15;
+            };}];
+          };
+        };
+        networks = {
+          "99-nope" = {
+            matchConfig.Name = "eth*";
+            linkConfig.Unmanaged = true;
+          };
+          "90-wg0" = {
+            matchConfig = { Name = "wg0"; };
+            address = [ "10.0.0.${nodeId}/32" ];
+            routes = [
+              { routeConfig = { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; }; }
+              { routeConfig = { Gateway = "10.0.0.${nodeId}"; Destination = "10.0.0.0/24"; Table = "custom"; }; }
+            ];
+          };
+          "30-eth1" = {
+            matchConfig = { Name = "eth1"; };
+            address = [
+              "192.168.1.${nodeId}/24"
+              "fe80::${nodeId}/64"
+            ];
+            routingPolicyRules = [
+              { routingPolicyRuleConfig = { Table = 10; IncomingInterface = "eth1"; Family = "both"; };}
+              { routingPolicyRuleConfig = { Table = 20; OutgoingInterface = "eth1"; };}
+              { routingPolicyRuleConfig = { Table = 30; From = "192.168.1.1"; To = "192.168.1.2"; SourcePort = 666 ; DestinationPort = 667; };}
+              { routingPolicyRuleConfig = { Table = 40; IPProtocol = "tcp"; InvertRule = true; };}
+              { routingPolicyRuleConfig = { Table = 50; IncomingInterface = "eth1"; Family = "ipv4"; };}
+            ];
+          };
+        };
+      };
+    };
+in import ./make-test-python.nix ({pkgs, ... }: {
+  name = "networkd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ picnoir ];
+  };
+  nodes = {
+    node1 = { pkgs, ... }@attrs:
+    let localConf = {
+        privk = "GDiXWlMQKb379XthwX0haAbK6hTdjblllpjGX0heP00=";
+        pubk = "iRxpqj42nnY0Qz8MAQbSm7bXxXP5hkPqWYIULmvW+EE=";
+        nodeId = "1";
+        peerId = "2";
+    };
+    in generateNodeConf (attrs // localConf);
+
+    node2 = { pkgs, ... }@attrs:
+    let localConf = {
+        privk = "eHxSI2jwX/P4AOI0r8YppPw0+4NZnjOxfbS5mt06K2k=";
+        pubk = "27s0OvaBBdHoJYkH9osZpjpgSOVNw+RaKfboT/Sfq0g=";
+        nodeId = "2";
+        peerId = "1";
+    };
+    in generateNodeConf (attrs // localConf);
+  };
+testScript = ''
+    start_all()
+    node1.wait_for_unit("systemd-networkd-wait-online.service")
+    node2.wait_for_unit("systemd-networkd-wait-online.service")
+
+    # ================================
+    # Networkd Config
+    # ================================
+    node1.succeed("grep RouteTable=custom:23 /etc/systemd/networkd.conf")
+    node1.succeed("sudo ip route show table custom | grep '10.0.0.0/24 via 10.0.0.1 dev wg0 proto static'")
+
+    # ================================
+    # Wireguard
+    # ================================
+    node1.succeed("ping -c 5 10.0.0.2")
+    node2.succeed("ping -c 5 10.0.0.1")
+    # Is the fwmark set?
+    node2.succeed("wg | grep -q 42")
+
+    # ================================
+    # Routing Policies
+    # ================================
+    # Testing all the routingPolicyRuleConfig members:
+    # Table + IncomingInterface
+    node1.succeed("sudo ip rule | grep 'from all iif eth1 lookup 10'")
+    # OutgoingInterface
+    node1.succeed("sudo ip rule | grep 'from all oif eth1 lookup 20'")
+    # From + To + SourcePort + DestinationPort
+    node1.succeed(
+        "sudo ip rule | grep 'from 192.168.1.1 to 192.168.1.2 sport 666 dport 667 lookup 30'"
+    )
+    # IPProtocol + InvertRule
+    node1.succeed("sudo ip rule | grep 'not from all ipproto tcp lookup 40'")
+'';
+})
diff --git a/nixpkgs/nixos/tests/systemd-no-tainted.nix b/nixpkgs/nixos/tests/systemd-no-tainted.nix
new file mode 100644
index 000000000000..f0504065f2a4
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-no-tainted.nix
@@ -0,0 +1,14 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd-no-tainted";
+
+  nodes.machine = { };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    with subtest("systemctl should not report tainted with unmerged-usr"):
+        output = machine.succeed("systemctl status")
+        print(output)
+        assert "Tainted" not in output
+        assert "unmerged-usr" not in output
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-nspawn-configfile.nix b/nixpkgs/nixos/tests/systemd-nspawn-configfile.nix
new file mode 100644
index 000000000000..12ab21b7f9b5
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-nspawn-configfile.nix
@@ -0,0 +1,128 @@
+import ./make-test-python.nix ({ lib, ... }:
+let
+  execOptions = [
+    "Boot"
+    "ProcessTwo"
+    "Parameters"
+    "Environment"
+    "User"
+    "WorkingDirectory"
+    "PivotRoot"
+    "Capability"
+    "DropCapability"
+    "NoNewPrivileges"
+    "KillSignal"
+    "Personality"
+    "MachineID"
+    "PrivateUsers"
+    "NotifyReady"
+    "SystemCallFilter"
+    "LimitCPU"
+    "LimitFSIZE"
+    "LimitDATA"
+    "LimitSTACK"
+    "LimitCORE"
+    "LimitRSS"
+    "LimitNOFILE"
+    "LimitAS"
+    "LimitNPROC"
+    "LimitMEMLOCK"
+    "LimitLOCKS"
+    "LimitSIGPENDING"
+    "LimitMSGQUEUE"
+    "LimitNICE"
+    "LimitRTPRIO"
+    "LimitRTTIME"
+    "OOMScoreAdjust"
+    "CPUAffinity"
+    "Hostname"
+    "ResolvConf"
+    "Timezone"
+    "LinkJournal"
+    "Ephemeral"
+    "AmbientCapability"
+  ];
+
+  filesOptions = [
+    "ReadOnly"
+    "Volatile"
+    "Bind"
+    "BindReadOnly"
+    "TemporaryFileSystem"
+    "Overlay"
+    "OverlayReadOnly"
+    "PrivateUsersChown"
+    "BindUser"
+    "Inaccessible"
+    "PrivateUsersOwnership"
+  ];
+
+  networkOptions = [
+    "Private"
+    "VirtualEthernet"
+    "VirtualEthernetExtra"
+    "Interface"
+    "MACVLAN"
+    "IPVLAN"
+    "Bridge"
+    "Zone"
+    "Port"
+  ];
+
+  optionsToConfig = opts: builtins.listToAttrs (map (n: lib.nameValuePair n "testdata") opts);
+
+  grepForOptions = opts: ''node.succeed(
+    "for o in ${builtins.concatStringsSep " " opts} ; do grep --quiet $o ${configFile} || exit 1 ; done"
+  )'';
+
+  unitName = "options-test";
+  configFile = "/etc/systemd/nspawn/${unitName}.nspawn";
+
+in
+{
+  name = "systemd-nspawn-configfile";
+
+  nodes = {
+    node = { pkgs, ... }: {
+      systemd.nspawn."${unitName}" = {
+        enable = true;
+
+        execConfig = optionsToConfig execOptions // {
+          Boot = true;
+          ProcessTwo = true;
+          NotifyReady = true;
+        };
+
+        filesConfig = optionsToConfig filesOptions // {
+          ReadOnly = true;
+          Volatile = "state";
+          PrivateUsersChown = true;
+          PrivateUsersOwnership = "auto";
+        };
+
+        networkConfig = optionsToConfig networkOptions // {
+          Private = true;
+          VirtualEthernet = true;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    node.wait_for_file("${configFile}")
+
+    with subtest("Test for presence of all specified options in config file"):
+      ${grepForOptions execOptions}
+      ${grepForOptions filesOptions}
+      ${grepForOptions networkOptions}
+
+    with subtest("Test for absence of misspelled option 'MachineId' (instead of 'MachineID')"):
+      node.fail("grep --quiet MachineId ${configFile}")
+  '';
+
+  meta.maintainers = [
+    lib.maintainers.zi3m5f
+  ];
+})
diff --git a/nixpkgs/nixos/tests/systemd-nspawn.nix b/nixpkgs/nixos/tests/systemd-nspawn.nix
new file mode 100644
index 000000000000..1a4251ef069e
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-nspawn.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({pkgs, lib, ...}:
+let
+  gpgKeyring = import ./common/gpg-keyring.nix { inherit pkgs; };
+
+  nspawnImages = (pkgs.runCommand "localhost" { buildInputs = [ pkgs.coreutils pkgs.gnupg ]; } ''
+    mkdir -p $out
+    cd $out
+
+    # produce a testimage.raw
+    dd if=/dev/urandom of=$out/testimage.raw bs=$((1024*1024+7)) count=5
+
+    # produce a testimage2.tar.xz, containing the hello store path
+    tar cvJpf testimage2.tar.xz ${pkgs.hello}
+
+    # produce signature(s)
+    sha256sum testimage* > SHA256SUMS
+    export GNUPGHOME="$(mktemp -d)"
+    cp -R ${gpgKeyring}/* $GNUPGHOME
+    gpg --batch --sign --detach-sign --output SHA256SUMS.gpg SHA256SUMS
+  '');
+in {
+  name = "systemd-nspawn";
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      services.nginx = {
+        enable = true;
+        virtualHosts."server".root = nspawnImages;
+      };
+    };
+    client = { pkgs, ... }: {
+      environment.etc."systemd/import-pubring.gpg".source = "${gpgKeyring}/pubkey.gpg";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("nginx.service")
+    client.wait_for_unit("network-online.target")
+    client.succeed("machinectl pull-raw --verify=signature http://server/testimage.raw")
+    client.succeed(
+        "cmp /var/lib/machines/testimage.raw ${nspawnImages}/testimage.raw"
+    )
+    client.succeed("machinectl pull-tar --verify=signature http://server/testimage2.tar.xz")
+    client.succeed(
+        "cmp /var/lib/machines/testimage2/${pkgs.hello}/bin/hello ${pkgs.hello}/bin/hello"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-oomd.nix b/nixpkgs/nixos/tests/systemd-oomd.nix
new file mode 100644
index 000000000000..55c4c1350000
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-oomd.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+{
+  name = "systemd-oomd";
+
+  # This test is a simplified version of systemd's testsuite-55.
+  # https://github.com/systemd/systemd/blob/v251/test/units/testsuite-55.sh
+  nodes.machine = { pkgs, ... }: {
+    # Limit VM resource usage.
+    virtualisation.memorySize = 1024;
+    systemd.oomd.extraConfig.DefaultMemoryPressureDurationSec = "1s";
+
+    systemd.slices.workload = {
+      description = "Test slice for memory pressure kills";
+      sliceConfig = {
+        MemoryAccounting = true;
+        ManagedOOMMemoryPressure = "kill";
+        ManagedOOMMemoryPressureLimit = "10%";
+      };
+    };
+
+    systemd.services.testbloat = {
+      description = "Create a lot of memory pressure";
+      serviceConfig = {
+        Slice = "workload.slice";
+        MemoryHigh = "5M";
+        ExecStart = "${pkgs.coreutils}/bin/tail /dev/zero";
+      };
+    };
+
+    systemd.services.testchill = {
+      description = "No memory pressure";
+      serviceConfig = {
+        Slice = "workload.slice";
+        MemoryHigh = "3M";
+        ExecStart = "${pkgs.coreutils}/bin/sleep infinity";
+      };
+    };
+  };
+
+  testScript = ''
+    # Start the system.
+    machine.wait_for_unit("multi-user.target")
+    machine.succeed("oomctl")
+
+    machine.succeed("systemctl start testchill.service")
+    with subtest("OOMd should kill the bad service"):
+        machine.fail("systemctl start --wait testbloat.service")
+        assert machine.get_unit_info("testbloat.service")["Result"] == "oom-kill"
+
+    with subtest("Service without memory pressure should be untouched"):
+        machine.require_unit_state("testchill.service", "active")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-portabled.nix b/nixpkgs/nixos/tests/systemd-portabled.nix
new file mode 100644
index 000000000000..ef38258b0d86
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-portabled.nix
@@ -0,0 +1,51 @@
+import ./make-test-python.nix ({pkgs, lib, ...}: let
+  demo-program = pkgs.writeShellScriptBin "demo" ''
+      while ${pkgs.coreutils}/bin/sleep 3; do
+          echo Hello World > /dev/null
+      done
+  '';
+  demo-service = pkgs.writeText "demo.service" ''
+    [Unit]
+    Description=demo service
+    Requires=demo.socket
+    After=demo.socket
+
+    [Service]
+    Type=simple
+    ExecStart=${demo-program}/bin/demo
+    Restart=always
+
+    [Install]
+    WantedBy=multi-user.target
+    Also=demo.socket
+  '';
+  demo-socket = pkgs.writeText "demo.socket" ''
+    [Unit]
+    Description=demo socket
+
+    [Socket]
+    ListenStream=/run/demo.sock
+    SocketMode=0666
+
+    [Install]
+    WantedBy=sockets.target
+  '';
+  demo-portable = pkgs.portableService {
+    pname = "demo";
+    version = "1.0";
+    description = ''A demo "Portable Service" for a shell program built with nix'';
+    units = [ demo-service demo-socket ];
+  };
+in {
+
+  name = "systemd-portabled";
+  nodes.machine = {};
+  testScript = ''
+    machine.succeed("portablectl")
+    machine.wait_for_unit("systemd-portabled.service")
+    machine.succeed("portablectl attach --now --runtime ${demo-portable}/demo_1.0.raw")
+    machine.wait_for_unit("demo.service")
+    machine.succeed("portablectl detach --now --runtime demo_1.0")
+    machine.fail("systemctl status demo.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-repart.nix b/nixpkgs/nixos/tests/systemd-repart.nix
new file mode 100644
index 000000000000..3914d5b32397
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-repart.nix
@@ -0,0 +1,182 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  # A testScript fragment that prepares a disk with some empty, unpartitioned
+  # space. and uses it to boot the test with. Takes a single argument `machine`
+  # from which the diskImage is extracted.
+  useDiskImage = machine: ''
+    import os
+    import shutil
+    import subprocess
+    import tempfile
+
+    tmp_disk_image = tempfile.NamedTemporaryFile()
+
+    shutil.copyfile("${machine.system.build.diskImage}/nixos.img", tmp_disk_image.name)
+
+    subprocess.run([
+      "${machine.config.virtualisation.qemu.package}/bin/qemu-img",
+      "resize",
+      "-f",
+      "raw",
+      tmp_disk_image.name,
+      "+32M",
+    ])
+
+    # Set NIX_DISK_IMAGE so that the qemu script finds the right disk image.
+    os.environ['NIX_DISK_IMAGE'] = tmp_disk_image.name
+  '';
+
+  common = { config, pkgs, lib, ... }: {
+    virtualisation.useDefaultFilesystems = false;
+    virtualisation.fileSystems = {
+      "/" = {
+        device = "/dev/vda2";
+        fsType = "ext4";
+      };
+    };
+
+    # systemd-repart operates on disks with a partition table. The qemu module,
+    # however, creates separate filesystem images without a partition table, so
+    # we have to create a disk image manually.
+    #
+    # This creates two partitions, an ESP available as /dev/vda1 and the root
+    # partition available as /dev/vda2.
+    system.build.diskImage = import ../lib/make-disk-image.nix {
+      inherit config pkgs lib;
+      # Use a raw format disk so that it can be resized before starting the
+      # test VM.
+      format = "raw";
+      # Keep the image as small as possible but leave some room for changes.
+      bootSize = "32M";
+      additionalSpace = "0M";
+      # GPT with an EFI System Partition is the typical use case for
+      # systemd-repart because it does not support MBR.
+      partitionTableType = "efi";
+      # We do not actually care much about the content of the partitions, so we
+      # do not need a bootloader installed.
+      installBootLoader = false;
+      # Improve determinism by not copying a channel.
+      copyChannel = false;
+    };
+  };
+in
+{
+  basic = makeTest {
+    name = "systemd-repart";
+    meta.maintainers = with maintainers; [ nikstur ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      imports = [ common ];
+
+      boot.initrd.systemd.enable = true;
+
+      boot.initrd.systemd.repart.enable = true;
+      systemd.repart.partitions = {
+        "10-root" = {
+          Type = "linux-generic";
+        };
+      };
+    };
+
+    testScript = { nodes, ... }: ''
+      ${useDiskImage nodes.machine}
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      systemd_repart_logs = machine.succeed("journalctl --boot --unit systemd-repart.service")
+      assert "Growing existing partition 1." in systemd_repart_logs
+    '';
+  };
+
+  after-initrd = makeTest {
+    name = "systemd-repart-after-initrd";
+    meta.maintainers = with maintainers; [ nikstur ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      imports = [ common ];
+
+      systemd.repart.enable = true;
+      systemd.repart.partitions = {
+        "10-root" = {
+          Type = "linux-generic";
+        };
+      };
+    };
+
+    testScript = { nodes, ... }: ''
+      ${useDiskImage nodes.machine}
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      systemd_repart_logs = machine.succeed("journalctl --unit systemd-repart.service")
+      assert "Growing existing partition 1." in systemd_repart_logs
+    '';
+  };
+
+  create-root = makeTest {
+    name = "systemd-repart-create-root";
+    meta.maintainers = with maintainers; [ nikstur ];
+
+    nodes.machine = { config, lib, pkgs, ... }: {
+      virtualisation.useDefaultFilesystems = false;
+      virtualisation.fileSystems = {
+        "/" = {
+          device = "/dev/disk/by-partlabel/created-root";
+          fsType = "ext4";
+        };
+        "/nix/store" = {
+          device = "/dev/vda2";
+          fsType = "ext4";
+        };
+      };
+
+      # Create an image containing only the Nix store. This enables creating
+      # the root partition with systemd-repart and then successfully booting
+      # into a working system.
+      #
+      # This creates two partitions, an ESP available as /dev/vda1 and the Nix
+      # store available as /dev/vda2.
+      system.build.diskImage = import ../lib/make-disk-image.nix {
+        inherit config pkgs lib;
+        onlyNixStore = true;
+        format = "raw";
+        bootSize = "32M";
+        additionalSpace = "0M";
+        partitionTableType = "efi";
+        installBootLoader = false;
+        copyChannel = false;
+      };
+
+      boot.initrd.systemd.enable = true;
+
+      boot.initrd.systemd.repart.enable = true;
+      boot.initrd.systemd.repart.device = "/dev/vda";
+      systemd.repart.partitions = {
+        "10-root" = {
+          Type = "root";
+          Label = "created-root";
+          Format = "ext4";
+        };
+      };
+    };
+
+    testScript = { nodes, ... }: ''
+      ${useDiskImage nodes.machine}
+
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+
+      systemd_repart_logs = machine.succeed("journalctl --boot --unit systemd-repart.service")
+      assert "Adding new partition 2 to partition table." in systemd_repart_logs
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/systemd-shutdown.nix b/nixpkgs/nixos/tests/systemd-shutdown.nix
new file mode 100644
index 000000000000..ca6754046f57
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-shutdown.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, systemdStage1 ? false, ...} : let
+  msg = "Shutting down NixOS";
+in {
+  name = "systemd-shutdown";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ das_j ];
+  };
+
+  nodes.machine = {
+    imports = [ ../modules/profiles/minimal.nix ];
+    systemd.shutdownRamfs.contents."/etc/systemd/system-shutdown/shutdown-message".source = pkgs.writeShellScript "shutdown-message" ''
+      echo "${msg}"
+    '';
+    boot.initrd.systemd.enable = systemdStage1;
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    # .shutdown() would wait for the machine to power off
+    machine.succeed("systemctl poweroff")
+    # Message printed by systemd-shutdown
+    machine.wait_for_console_text("Unmounting '/oldroot'")
+    machine.wait_for_console_text("${msg}")
+    # Don't try to sync filesystems
+    machine.wait_for_shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-sysupdate.nix b/nixpkgs/nixos/tests/systemd-sysupdate.nix
new file mode 100644
index 000000000000..37811605dbb2
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-sysupdate.nix
@@ -0,0 +1,66 @@
+# Tests downloading a signed update aritfact from a server to a target machine.
+# This test does not rely on the `systemd.timer` units provided by the
+# `systemd-sysupdate` module but triggers the `systemd-sysupdate` service
+# manually to make the test more robust.
+
+{ lib, pkgs, ... }:
+
+let
+  gpgKeyring = import ./common/gpg-keyring.nix { inherit pkgs; };
+in
+{
+  name = "systemd-sysupdate";
+
+  meta.maintainers = with lib.maintainers; [ nikstur ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+      networking.firewall.enable = false;
+      services.nginx = {
+        enable = true;
+        virtualHosts."server" = {
+          root = pkgs.runCommand "sysupdate-artifacts" { buildInputs = [ pkgs.gnupg ]; } ''
+            mkdir -p $out
+            cd $out
+
+            echo "nixos" > nixos_1.efi
+            sha256sum nixos_1.efi > SHA256SUMS
+
+            export GNUPGHOME="$(mktemp -d)"
+            cp -R ${gpgKeyring}/* $GNUPGHOME
+
+            gpg --batch --sign --detach-sign --output SHA256SUMS.gpg SHA256SUMS
+          '';
+        };
+      };
+    };
+
+    target = {
+      systemd.sysupdate = {
+        enable = true;
+        transfers = {
+          "uki" = {
+            Source = {
+              Type = "url-file";
+              Path = "http://server/";
+              MatchPattern = "nixos_@v.efi";
+            };
+            Target = {
+              Path = "/boot/EFI/Linux";
+              MatchPattern = "nixos_@v.efi";
+            };
+          };
+        };
+      };
+
+      environment.etc."systemd/import-pubring.gpg".source = "${gpgKeyring}/pubkey.gpg";
+    };
+  };
+
+  testScript = ''
+    server.wait_for_unit("nginx.service")
+
+    target.succeed("systemctl start systemd-sysupdate")
+    assert "nixos" in target.wait_until_succeeds("cat /boot/EFI/Linux/nixos_1.efi", timeout=5)
+  '';
+}
diff --git a/nixpkgs/nixos/tests/systemd-timesyncd.nix b/nixpkgs/nixos/tests/systemd-timesyncd.nix
new file mode 100644
index 000000000000..f38d06be1516
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-timesyncd.nix
@@ -0,0 +1,53 @@
+# Regression test for systemd-timesync having moved the state directory without
+# upstream providing a migration path. https://github.com/systemd/systemd/issues/12131
+
+import ./make-test-python.nix (let
+  common = { lib, ... }: {
+    # override the `false` value from the qemu-vm base profile
+    services.timesyncd.enable = lib.mkForce true;
+  };
+  mkVM = conf: { imports = [ conf common ]; };
+in {
+  name = "systemd-timesyncd";
+  nodes = {
+    current = mkVM {};
+    pre1909 = mkVM ({lib, ... }: {
+      # create the path that should be migrated by our activation script when
+      # upgrading to a newer nixos version
+      system.stateVersion = "19.03";
+      systemd.tmpfiles.rules = [
+        "r /var/lib/systemd/timesync -"
+        "d /var/lib/systemd -"
+        "d /var/lib/private/systemd/timesync -"
+        "L /var/lib/systemd/timesync - - - - /var/lib/private/systemd/timesync"
+        "d /var/lib/private/systemd/timesync - systemd-timesync systemd-timesync -"
+      ];
+    });
+  };
+
+  testScript = ''
+    start_all()
+    current.succeed("systemctl status systemd-timesyncd.service")
+    # on a new install with a recent systemd there should not be any
+    # leftovers from the dynamic user mess
+    current.succeed("test -e /var/lib/systemd/timesync")
+    current.succeed("test ! -L /var/lib/systemd/timesync")
+
+    # timesyncd should be running on the upgrading system since we fixed the
+    # file bits in the activation script
+    pre1909.succeed("systemctl status systemd-timesyncd.service")
+
+    # the path should be gone after the migration
+    pre1909.succeed("test ! -e /var/lib/private/systemd/timesync")
+
+    # and the new path should no longer be a symlink
+    pre1909.succeed("test -e /var/lib/systemd/timesync")
+    pre1909.succeed("test ! -L /var/lib/systemd/timesync")
+
+    # after a restart things should still work and not fail in the activation
+    # scripts and cause the boot to fail..
+    pre1909.shutdown()
+    pre1909.start()
+    pre1909.succeed("systemctl status systemd-timesyncd.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-user-tmpfiles-rules.nix b/nixpkgs/nixos/tests/systemd-user-tmpfiles-rules.nix
new file mode 100644
index 000000000000..bf29b4b57be3
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-user-tmpfiles-rules.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "systemd-user-tmpfiles-rules";
+
+  meta = with lib.maintainers; {
+    maintainers = [ schnusch ];
+  };
+
+  nodes.machine = { ... }: {
+    users.users = {
+      alice.isNormalUser = true;
+      bob.isNormalUser = true;
+    };
+
+    systemd.user.tmpfiles = {
+      rules = [
+        "d %h/user_tmpfiles_created"
+      ];
+      users.alice.rules = [
+        "d %h/only_alice"
+      ];
+    };
+  };
+
+  testScript = { ... }: ''
+    machine.succeed("loginctl enable-linger alice bob")
+
+    machine.wait_until_succeeds("systemctl --user --machine=alice@ is-active systemd-tmpfiles-setup.service")
+    machine.succeed("[ -d ~alice/user_tmpfiles_created ]")
+    machine.succeed("[ -d ~alice/only_alice ]")
+
+    machine.wait_until_succeeds("systemctl --user --machine=bob@ is-active systemd-tmpfiles-setup.service")
+    machine.succeed("[ -d ~bob/user_tmpfiles_created ]")
+    machine.succeed("[ ! -e ~bob/only_alice ]")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd-userdbd.nix b/nixpkgs/nixos/tests/systemd-userdbd.nix
new file mode 100644
index 000000000000..5d0233ffd9fb
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd-userdbd.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "systemd-userdbd";
+  nodes.machine = { config, pkgs, ... }: {
+    services.userdbd.enable = true;
+
+    users.users.test-user-nss = {
+      isNormalUser = true;
+    };
+
+    environment.etc."userdb/test-user-dropin.user".text = builtins.toJSON {
+      userName = "test-user-dropin";
+    };
+
+    environment.systemPackages = with pkgs; [ libvarlink ];
+  };
+  testScript = ''
+    import json
+    from shlex import quote
+
+    def getUserRecord(name):
+      Interface = "unix:/run/systemd/userdb/io.systemd.Multiplexer/io.systemd.UserDatabase"
+      payload = json.dumps({
+        "service": "io.systemd.Multiplexer",
+        "userName": name
+      })
+      return json.loads(machine.succeed(f"varlink call {Interface}.GetUserRecord {quote(payload)}"))
+
+    machine.wait_for_unit("systemd-userdbd.socket")
+    getUserRecord("test-user-nss")
+    getUserRecord("test-user-dropin")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/systemd.nix b/nixpkgs/nixos/tests/systemd.nix
new file mode 100644
index 000000000000..1a39cc73c886
--- /dev/null
+++ b/nixpkgs/nixos/tests/systemd.nix
@@ -0,0 +1,205 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "systemd";
+
+  nodes.machine = { lib, ... }: {
+    imports = [ common/user-account.nix common/x11.nix ];
+
+    virtualisation.emptyDiskImages = [ 512 512 ];
+
+    environment.systemPackages = [ pkgs.cryptsetup ];
+
+    virtualisation.fileSystems = {
+      "/test-x-initrd-mount" = {
+        device = "/dev/vdb";
+        fsType = "ext2";
+        autoFormat = true;
+        noCheck = true;
+        options = [ "x-initrd.mount" ];
+      };
+    };
+
+    systemd.extraConfig = "DefaultEnvironment=\"XXX_SYSTEM=foo\"";
+    systemd.user.extraConfig = "DefaultEnvironment=\"XXX_USER=bar\"";
+    services.journald.extraConfig = "Storage=volatile";
+    test-support.displayManager.auto.user = "alice";
+
+    systemd.shutdown.test = pkgs.writeScript "test.shutdown" ''
+      #!${pkgs.runtimeShell}
+      PATH=${lib.makeBinPath (with pkgs; [ util-linux coreutils ])}
+      mount -t 9p shared -o trans=virtio,version=9p2000.L /tmp/shared
+      touch /tmp/shared/shutdown-test
+      umount /tmp/shared
+    '';
+
+    systemd.services.oncalendar-test = {
+      description = "calendar test";
+      # Japan does not have DST which makes the test a little bit simpler
+      startAt = "Wed 10:00 Asia/Tokyo";
+      script = "true";
+    };
+
+    systemd.services.testservice1 = {
+      description = "Test Service 1";
+      wantedBy = [ "multi-user.target" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        if [ "$XXX_SYSTEM" = foo ]; then
+          touch /system_conf_read
+        fi
+      '';
+    };
+
+    systemd.user.services.testservice2 = {
+      description = "Test Service 2";
+      wantedBy = [ "default.target" ];
+      serviceConfig.Type = "oneshot";
+      script = ''
+        if [ "$XXX_USER" = bar ]; then
+          touch "$HOME/user_conf_read"
+        fi
+      '';
+    };
+
+    systemd.watchdog = {
+      device = "/dev/watchdog";
+      runtimeTime = "30s";
+      rebootTime = "10min";
+      kexecTime = "5min";
+    };
+  };
+
+  testScript = ''
+    import re
+    import subprocess
+
+    machine.wait_for_x()
+    # wait for user services
+    machine.wait_for_unit("default.target", "alice")
+
+    with subtest("systemctl edit suggests --runtime"):
+        # --runtime is suggested when using `systemctl edit`
+        ret, out = machine.execute("systemctl edit testservice1.service 2>&1")
+        assert ret == 1
+        assert out.rstrip("\n") == "The unit-directory '/etc/systemd/system' is read-only on NixOS, so it's not possible to edit system-units directly. Use 'systemctl edit --runtime' instead."
+        # editing w/o `--runtime` is possible for user-services, however
+        # it's not possible because we're not in a tty when grepping
+        # (i.e. hacky way to ensure that the error from above doesn't appear here).
+        _, out = machine.execute("systemctl --user edit testservice2.service 2>&1")
+        assert out.rstrip("\n") == "Cannot edit units if not on a tty."
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/105049
+    with subtest("systemd reads timezone database in /etc/zoneinfo"):
+        timer = machine.succeed("TZ=UTC systemctl show --property=TimersCalendar oncalendar-test.timer")
+        assert re.search("next_elapse=Wed ....-..-.. 01:00:00 UTC", timer), f"got {timer.strip()}"
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/35415
+    with subtest("configuration files are recognized by systemd"):
+        machine.succeed("test -e /system_conf_read")
+        machine.succeed("test -e /home/alice/user_conf_read")
+        machine.succeed("test -z $(ls -1 /var/log/journal)")
+
+    with subtest("regression test for https://bugs.freedesktop.org/show_bug.cgi?id=77507"):
+        retcode, output = machine.execute("systemctl status testservice1.service")
+        assert retcode in [0, 3]  # https://bugs.freedesktop.org/show_bug.cgi?id=77507
+
+    # Regression test for https://github.com/NixOS/nixpkgs/issues/35268
+    with subtest("file system with x-initrd.mount is not unmounted"):
+        machine.succeed("mountpoint -q /test-x-initrd-mount")
+        machine.shutdown()
+
+        subprocess.check_call(
+            [
+                "qemu-img",
+                "convert",
+                "-O",
+                "raw",
+                "vm-state-machine/empty0.qcow2",
+                "x-initrd-mount.raw",
+            ]
+        )
+        extinfo = subprocess.check_output(
+            [
+                "${pkgs.e2fsprogs}/bin/dumpe2fs",
+                "x-initrd-mount.raw",
+            ]
+        ).decode("utf-8")
+        assert (
+            re.search(r"^Filesystem state: *clean$", extinfo, re.MULTILINE) is not None
+        ), ("File system was not cleanly unmounted: " + extinfo)
+
+    # Regression test for https://github.com/NixOS/nixpkgs/pull/91232
+    with subtest("setting transient hostnames works"):
+        machine.succeed("hostnamectl set-hostname --transient machine-transient")
+        machine.fail("hostnamectl set-hostname machine-all")
+
+    with subtest("systemd-shutdown works"):
+        machine.shutdown()
+        machine.wait_for_unit("multi-user.target")
+        machine.succeed("test -e /tmp/shared/shutdown-test")
+
+    # Test settings from /etc/sysctl.d/50-default.conf are applied
+    with subtest("systemd sysctl settings are applied"):
+        machine.wait_for_unit("multi-user.target")
+        assert "fq_codel" in machine.succeed("sysctl net.core.default_qdisc")
+
+    # Test systemd is configured to manage a watchdog
+    with subtest("systemd manages hardware watchdog"):
+        machine.wait_for_unit("multi-user.target")
+
+        # It seems that the device's path doesn't appear in 'systemctl show' so
+        # check it separately.
+        assert "WatchdogDevice=/dev/watchdog" in machine.succeed(
+            "cat /etc/systemd/system.conf"
+        )
+
+        output = machine.succeed("systemctl show | grep Watchdog")
+        # assert "RuntimeWatchdogUSec=30s" in output
+        # for some reason RuntimeWatchdogUSec, doesn't seem to be updated in here.
+        assert "RebootWatchdogUSec=10min" in output
+        assert "KExecWatchdogUSec=5min" in output
+
+    # Test systemd cryptsetup support
+    with subtest("systemd successfully reads /etc/crypttab and unlocks volumes"):
+        # create a luks volume and put a filesystem on it
+        machine.succeed(
+            "echo -n supersecret | cryptsetup luksFormat -q /dev/vdc -",
+            "echo -n supersecret | cryptsetup luksOpen --key-file - /dev/vdc foo",
+            "mkfs.ext3 /dev/mapper/foo",
+        )
+
+        # create a keyfile and /etc/crypttab
+        machine.succeed("echo -n supersecret > /var/lib/luks-keyfile")
+        machine.succeed("chmod 600 /var/lib/luks-keyfile")
+        machine.succeed("echo 'luks1 /dev/vdc /var/lib/luks-keyfile luks' > /etc/crypttab")
+
+        # after a reboot, systemd should unlock the volume and we should be able to mount it
+        machine.shutdown()
+        machine.succeed("systemctl status systemd-cryptsetup@luks1.service")
+        machine.succeed("mkdir -p /tmp/luks1")
+        machine.succeed("mount /dev/mapper/luks1 /tmp/luks1")
+
+    # Do some IP traffic
+    output_ping = machine.succeed(
+        "systemd-run --wait -- ping -c 1 127.0.0.1 2>&1"
+    )
+
+    with subtest("systemd reports accounting data on system.slice"):
+        output = machine.succeed("systemctl status system.slice")
+        assert "CPU:" in output
+        assert "Memory:" in output
+
+        assert "IP:" in output
+        assert "0B in, 0B out" not in output
+
+        assert "IO:" in output
+        assert "0B read, 0B written" not in output
+
+    with subtest("systemd per-unit accounting works"):
+        assert "IP traffic received: 84B" in output_ping
+        assert "IP traffic sent: 84B" in output_ping
+
+    with subtest("systemd environment is properly set"):
+        machine.systemctl("daemon-reexec")  # Rewrites /proc/1/environ
+        machine.succeed("grep -q TZDIR=/etc/zoneinfo /proc/1/environ")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tandoor-recipes.nix b/nixpkgs/nixos/tests/tandoor-recipes.nix
new file mode 100644
index 000000000000..18beaac6f062
--- /dev/null
+++ b/nixpkgs/nixos/tests/tandoor-recipes.nix
@@ -0,0 +1,41 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "tandoor-recipes";
+  meta.maintainers = with lib.maintainers; [ ambroisie ];
+
+  nodes.machine = { pkgs, ... }: {
+    services.tandoor-recipes = {
+      enable = true;
+      extraConfig = {
+        DB_ENGINE = "django.db.backends.postgresql";
+        POSTGRES_HOST = "/run/postgresql";
+        POSTGRES_USER = "tandoor_recipes";
+        POSTGRES_DB = "tandoor_recipes";
+      };
+    };
+
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "tandoor_recipes" ];
+      ensureUsers = [
+        {
+          name = "tandoor_recipes";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+
+    systemd.services = {
+      tandoor-recipes = {
+        after = [ "postgresql.service" ];
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("tandoor-recipes.service")
+
+    with subtest("Web interface gets ready"):
+        # Wait until server accepts connections
+        machine.wait_until_succeeds("curl -fs localhost:8080")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tang.nix b/nixpkgs/nixos/tests/tang.nix
new file mode 100644
index 000000000000..10486a9feb8c
--- /dev/null
+++ b/nixpkgs/nixos/tests/tang.nix
@@ -0,0 +1,81 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tang";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jfroche ];
+  };
+
+  nodes.server =
+    { config
+    , pkgs
+    , modulesPath
+    , ...
+    }: {
+      imports = [
+        "${modulesPath}/../tests/common/auto-format-root-device.nix"
+      ];
+      virtualisation = {
+        emptyDiskImages = [ 512 ];
+        useBootLoader = true;
+        useEFIBoot = true;
+        # This requires to have access
+        # to a host Nix store as
+        # the new root device is /dev/vdb
+        # an empty 512MiB drive, containing no Nix store.
+        mountHostNixStore = true;
+      };
+
+      boot.loader.systemd-boot.enable = true;
+
+      networking.interfaces.eth1.ipv4.addresses = [
+        { address = "192.168.0.1"; prefixLength = 24; }
+      ];
+
+      environment.systemPackages = with pkgs; [ clevis tang cryptsetup ];
+      services.tang = {
+        enable = true;
+        ipAddressAllow = [ "127.0.0.1/32" ];
+      };
+    };
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("sockets.target")
+
+    with subtest("Check keys are generated"):
+      machine.wait_until_succeeds("curl -v http://127.0.0.1:7654/adv")
+      key = machine.wait_until_succeeds("tang-show-keys 7654")
+
+    with subtest("Check systemd access list"):
+      machine.succeed("ping -c 3 192.168.0.1")
+      machine.fail("curl -v --connect-timeout 3 http://192.168.0.1:7654/adv")
+
+    with subtest("Check basic encrypt and decrypt message"):
+      machine.wait_until_succeeds(f"""echo 'Hello World' | clevis encrypt tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}"}}' > /tmp/encrypted""")
+      decrypted = machine.wait_until_succeeds("clevis decrypt < /tmp/encrypted")
+      assert decrypted.strip() == "Hello World"
+      machine.wait_until_succeeds("tang-show-keys 7654")
+
+    with subtest("Check encrypt and decrypt disk"):
+      machine.succeed("cryptsetup luksFormat --force-password --batch-mode /dev/vdb <<<'password'")
+      machine.succeed(f"""clevis luks bind -s1 -y -f -d /dev/vdb tang '{{ "url": "http://127.0.0.1:7654", "thp":"{key}" }}' <<< 'password' """)
+      clevis_luks = machine.succeed("clevis luks list -d /dev/vdb")
+      assert clevis_luks.strip() == """1: tang '{"url":"http://127.0.0.1:7654"}'"""
+      machine.succeed("clevis luks unlock -d /dev/vdb")
+      machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +")
+      machine.succeed("clevis luks unlock -d /dev/vdb")
+      machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +")
+      # without tang available, unlock should fail
+      machine.succeed("systemctl stop tangd.socket")
+      machine.fail("clevis luks unlock -d /dev/vdb")
+      machine.succeed("systemctl start tangd.socket")
+
+    with subtest("Rotate server keys"):
+      machine.succeed("${pkgs.tang}/libexec/tangd-rotate-keys -d /var/lib/tang")
+      machine.succeed("clevis luks unlock -d /dev/vdb")
+      machine.succeed("find /dev/mapper -name 'luks*' -exec cryptsetup close {} +")
+
+    with subtest("Test systemd service security"):
+        output = machine.succeed("systemd-analyze security tangd@.service")
+        machine.log(output)
+        assert output[-9:-1] == "SAFE :-}"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/taskserver.nix b/nixpkgs/nixos/tests/taskserver.nix
new file mode 100644
index 000000000000..254bc8822f89
--- /dev/null
+++ b/nixpkgs/nixos/tests/taskserver.nix
@@ -0,0 +1,275 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  snakeOil = pkgs.runCommand "snakeoil-certs" {
+    outputs = [ "out" "cacert" "cert" "key" "crl" ];
+    buildInputs = [ pkgs.gnutls.bin ];
+    caTemplate = pkgs.writeText "snakeoil-ca.template" ''
+      cn = server
+      expiration_days = -1
+      cert_signing_key
+      ca
+    '';
+    certTemplate = pkgs.writeText "snakeoil-cert.template" ''
+      cn = server
+      expiration_days = -1
+      tls_www_server
+      encryption_key
+      signing_key
+    '';
+    crlTemplate = pkgs.writeText "snakeoil-crl.template" ''
+      expiration_days = -1
+    '';
+    userCertTemplate = pkgs.writeText "snakeoil-user-cert.template" ''
+      organization = snakeoil
+      cn = server
+      expiration_days = -1
+      tls_www_client
+      encryption_key
+      signing_key
+    '';
+  } ''
+    certtool -p --bits 4096 --outfile ca.key
+    certtool -s --template "$caTemplate" --load-privkey ca.key \
+                --outfile "$cacert"
+    certtool -p --bits 4096 --outfile "$key"
+    certtool -c --template "$certTemplate" \
+                --load-ca-privkey ca.key \
+                --load-ca-certificate "$cacert" \
+                --load-privkey "$key" \
+                --outfile "$cert"
+    certtool --generate-crl --template "$crlTemplate" \
+                            --load-ca-privkey ca.key \
+                            --load-ca-certificate "$cacert" \
+                            --outfile "$crl"
+
+    mkdir "$out"
+
+    # Stripping key information before the actual PEM-encoded values is solely
+    # to make test output a bit less verbose when copying the client key to the
+    # actual client.
+    certtool -p --bits 4096 | sed -n \
+      -e '/^----* *BEGIN/,/^----* *END/p' > "$out/alice.key"
+
+    certtool -c --template "$userCertTemplate" \
+                --load-privkey "$out/alice.key" \
+                --load-ca-privkey ca.key \
+                --load-ca-certificate "$cacert" \
+                --outfile "$out/alice.cert"
+  '';
+
+in {
+  name = "taskserver";
+
+  nodes = rec {
+    server = {
+      services.taskserver.enable = true;
+      services.taskserver.listenHost = "::";
+      services.taskserver.openFirewall = true;
+      services.taskserver.fqdn = "server";
+      services.taskserver.organisations = {
+        testOrganisation.users = [ "alice" "foo" ];
+        anotherOrganisation.users = [ "bob" ];
+      };
+
+      specialisation.manual-config.configuration = {
+        services.taskserver.pki.manual = {
+          ca.cert = snakeOil.cacert;
+          server.cert = snakeOil.cert;
+          server.key = snakeOil.key;
+          server.crl = snakeOil.crl;
+        };
+      };
+    };
+
+    client1 = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.taskwarrior pkgs.gnutls ];
+      users.users.alice.isNormalUser = true;
+      users.users.bob.isNormalUser = true;
+      users.users.foo.isNormalUser = true;
+      users.users.bar.isNormalUser = true;
+    };
+
+    client2 = client1;
+  };
+
+  testScript = { nodes, ... }: let
+    cfg = nodes.server.config.services.taskserver;
+    portStr = toString cfg.listenPort;
+    specialisations = "${nodes.server.system.build.toplevel}/specialisation";
+    newServerSystem = "${specialisations}/manual-config";
+    switchToNewServer = "${newServerSystem}/bin/switch-to-configuration test";
+  in ''
+    from shlex import quote
+
+
+    def su(user, cmd):
+        return f"su - {user} -c {quote(cmd)}"
+
+
+    def no_extra_init(client, org, user):
+        pass
+
+
+    def setup_clients_for(org, user, extra_init=no_extra_init):
+        for client in [client1, client2]:
+            with client.nested(f"initialize client for user {user}"):
+                client.succeed(
+                    su(user, f"rm -rf /home/{user}/.task"),
+                    su(user, "task rc.confirmation=no config confirmation no"),
+                )
+
+                exportinfo = server.succeed(f"nixos-taskserver user export {org} {user}")
+
+                with client.nested("importing taskwarrior configuration"):
+                    client.succeed(su(user, f"eval {quote(exportinfo)} >&2"))
+
+                extra_init(client, org, user)
+
+                client.succeed(su(user, "task config taskd.server server:${portStr} >&2"))
+
+                client.succeed(su(user, "task sync init >&2"))
+
+
+    def restart_server():
+        server.systemctl("restart taskserver.service")
+        server.wait_for_open_port(${portStr})
+
+
+    def re_add_imperative_user():
+        with server.nested("(re-)add imperative user bar"):
+            server.execute("nixos-taskserver org remove imperativeOrg")
+            server.succeed(
+                "nixos-taskserver org add imperativeOrg",
+                "nixos-taskserver user add imperativeOrg bar",
+            )
+            setup_clients_for("imperativeOrg", "bar")
+
+
+    def test_sync(user):
+        with subtest(f"sync for user {user}"):
+            client1.succeed(su(user, "task add foo >&2"))
+            client1.succeed(su(user, "task sync >&2"))
+            client2.fail(su(user, "task list >&2"))
+            client2.succeed(su(user, "task sync >&2"))
+            client2.succeed(su(user, "task list >&2"))
+
+
+    def check_client_cert(user):
+        # debug level 3 is a workaround for gnutls issue https://gitlab.com/gnutls/gnutls/-/issues/1040
+        cmd = (
+            f"gnutls-cli -d 3"
+            f" --x509cafile=/home/{user}/.task/keys/ca.cert"
+            f" --x509keyfile=/home/{user}/.task/keys/private.key"
+            f" --x509certfile=/home/{user}/.task/keys/public.cert"
+            f" --port=${portStr} server < /dev/null"
+        )
+        return su(user, cmd)
+
+
+    # Explicitly start the VMs so that we don't accidentally start newServer
+    server.start()
+    client1.start()
+    client2.start()
+
+    server.wait_for_unit("taskserver.service")
+
+    server.succeed(
+        "nixos-taskserver user list testOrganisation | grep -qxF alice",
+        "nixos-taskserver user list testOrganisation | grep -qxF foo",
+        "nixos-taskserver user list anotherOrganisation | grep -qxF bob",
+    )
+
+    server.wait_for_open_port(${portStr})
+
+    client1.wait_for_unit("multi-user.target")
+    client2.wait_for_unit("multi-user.target")
+
+    setup_clients_for("testOrganisation", "alice")
+    setup_clients_for("testOrganisation", "foo")
+    setup_clients_for("anotherOrganisation", "bob")
+
+    for user in ["alice", "bob", "foo"]:
+        test_sync(user)
+
+    server.fail("nixos-taskserver user add imperativeOrg bar")
+    re_add_imperative_user()
+
+    test_sync("bar")
+
+    with subtest("checking certificate revocation of user bar"):
+        client1.succeed(check_client_cert("bar"))
+
+        server.succeed("nixos-taskserver user remove imperativeOrg bar")
+        restart_server()
+
+        client1.fail(check_client_cert("bar"))
+
+        client1.succeed(su("bar", "task add destroy everything >&2"))
+        client1.fail(su("bar", "task sync >&2"))
+
+    re_add_imperative_user()
+
+    with subtest("checking certificate revocation of org imperativeOrg"):
+        client1.succeed(check_client_cert("bar"))
+
+        server.succeed("nixos-taskserver org remove imperativeOrg")
+        restart_server()
+
+        client1.fail(check_client_cert("bar"))
+
+        client1.succeed(su("bar", "task add destroy even more >&2"))
+        client1.fail(su("bar", "task sync >&2"))
+
+    re_add_imperative_user()
+
+    with subtest("check whether declarative config overrides user bar"):
+        restart_server()
+        test_sync("bar")
+
+
+    def init_manual_config(client, org, user):
+        cfgpath = f"/home/{user}/.task"
+
+        client.copy_from_host(
+            "${snakeOil.cacert}",
+            f"{cfgpath}/ca.cert",
+        )
+        for file in ["alice.key", "alice.cert"]:
+            client.copy_from_host(
+                f"${snakeOil}/{file}",
+                f"{cfgpath}/{file}",
+            )
+
+        for file in [f"{user}.key", f"{user}.cert"]:
+            client.copy_from_host(
+                f"${snakeOil}/{file}",
+                f"{cfgpath}/{file}",
+            )
+
+        client.succeed(
+            su("alice", f"task config taskd.ca {cfgpath}/ca.cert"),
+            su("alice", f"task config taskd.key {cfgpath}/{user}.key"),
+            su(user, f"task config taskd.certificate {cfgpath}/{user}.cert"),
+        )
+
+
+    with subtest("check manual configuration"):
+        # Remove the keys from automatic CA creation, to make sure the new
+        # generation doesn't use keys from before.
+        server.succeed("rm -rf ${cfg.dataDir}/keys/* >&2")
+
+        server.succeed(
+            "${switchToNewServer} >&2"
+        )
+        server.wait_for_unit("taskserver.service")
+        server.wait_for_open_port(${portStr})
+
+        server.succeed(
+            "nixos-taskserver org add manualOrg",
+            "nixos-taskserver user add manualOrg alice",
+        )
+
+        setup_clients_for("manualOrg", "alice", init_manual_config)
+
+        test_sync("alice")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tayga.nix b/nixpkgs/nixos/tests/tayga.nix
new file mode 100644
index 000000000000..44974f6efea8
--- /dev/null
+++ b/nixpkgs/nixos/tests/tayga.nix
@@ -0,0 +1,235 @@
+# This test verifies that we can ping an IPv4-only server from an IPv6-only
+# client via a NAT64 router. The hosts and networks are configured as follows:
+#
+#        +------
+# Client | eth1    Address: 2001:db8::2/64
+#        |  |      Route:   64:ff9b::/96 via 2001:db8::1
+#        +--|---
+#           | VLAN 3
+#        +--|---
+#        | eth2    Address: 2001:db8::1/64
+# Router |
+#        | nat64   Address: 64:ff9b::1/128
+#        |         Route:   64:ff9b::/96
+#        |         Address: 192.0.2.0/32
+#        |         Route:   192.0.2.0/24
+#        |
+#        | eth1    Address: 100.64.0.1/24
+#        +--|---
+#           | VLAN 2
+#        +--|---
+# Server | eth1    Address: 100.64.0.2/24
+#        |         Route:   192.0.2.0/24 via 100.64.0.1
+#        +------
+
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "tayga";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hax404 ];
+  };
+
+  nodes = {
+    # The server is configured with static IPv4 addresses. RFC 6052 Section 3.1
+    # disallows the mapping of non-global IPv4 addresses like RFC 1918 into the
+    # Well-Known Prefix 64:ff9b::/96. TAYGA also does not allow the mapping of
+    # documentation space (RFC 5737). To circumvent this, 100.64.0.2/24 from
+    # RFC 6589 (Carrier Grade NAT) is used here.
+    # To reach the IPv4 address pool of the NAT64 gateway, there is a static
+    # route configured. In normal cases, where the router would also source NAT
+    # the pool addresses to one IPv4 addresses, this would not be needed.
+    server = {
+      virtualisation.vlans = [
+        2 # towards router
+      ];
+      networking = {
+        useDHCP = false;
+        interfaces.eth1 = lib.mkForce {};
+      };
+      systemd.network = {
+        enable = true;
+        networks."vlan1" = {
+          matchConfig.Name = "eth1";
+          address = [
+            "100.64.0.2/24"
+          ];
+          routes = [
+            { routeConfig = { Destination = "192.0.2.0/24"; Gateway = "100.64.0.1"; }; }
+          ];
+        };
+      };
+    };
+
+    # The router is configured with static IPv4 addresses towards the server
+    # and IPv6 addresses towards the client. For NAT64, the Well-Known prefix
+    # 64:ff9b::/96 is used. NAT64 is done with TAYGA which provides the
+    # tun-interface nat64 and does the translation over it. The IPv6 packets
+    # are sent to this interfaces and received as IPv4 packets and vice versa.
+    # As TAYGA only translates IPv6 addresses to dedicated IPv4 addresses, it
+    # needs a pool of IPv4 addresses which must be at least as big as the
+    # expected amount of clients. In this test, the packets from the pool are
+    # directly routed towards the client. In normal cases, there would be a
+    # second source NAT44 to map all clients behind one IPv4 address.
+    router_systemd = {
+      boot.kernel.sysctl = {
+        "net.ipv4.ip_forward" = 1;
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      virtualisation.vlans = [
+        2 # towards server
+        3 # towards client
+      ];
+
+      networking = {
+        useDHCP = false;
+        useNetworkd = true;
+        firewall.enable = false;
+        interfaces.eth1 = lib.mkForce {
+          ipv4 = {
+            addresses = [ { address = "100.64.0.1"; prefixLength = 24; } ];
+          };
+        };
+        interfaces.eth2 = lib.mkForce {
+          ipv6 = {
+            addresses = [ { address = "2001:db8::1"; prefixLength = 64; } ];
+          };
+        };
+      };
+
+      services.tayga = {
+        enable = true;
+        ipv4 = {
+          address = "192.0.2.0";
+          router = {
+            address = "192.0.2.1";
+          };
+          pool = {
+            address = "192.0.2.0";
+            prefixLength = 24;
+          };
+        };
+        ipv6 = {
+          address = "2001:db8::1";
+          router = {
+            address = "64:ff9b::1";
+          };
+          pool = {
+            address = "64:ff9b::";
+            prefixLength = 96;
+          };
+        };
+      };
+    };
+
+    router_nixos = {
+      boot.kernel.sysctl = {
+        "net.ipv4.ip_forward" = 1;
+        "net.ipv6.conf.all.forwarding" = 1;
+      };
+
+      virtualisation.vlans = [
+        2 # towards server
+        3 # towards client
+      ];
+
+      networking = {
+        useDHCP = false;
+        firewall.enable = false;
+        interfaces.eth1 = lib.mkForce {
+          ipv4 = {
+            addresses = [ { address = "100.64.0.1"; prefixLength = 24; } ];
+          };
+        };
+        interfaces.eth2 = lib.mkForce {
+          ipv6 = {
+            addresses = [ { address = "2001:db8::1"; prefixLength = 64; } ];
+          };
+        };
+      };
+
+      services.tayga = {
+        enable = true;
+        ipv4 = {
+          address = "192.0.2.0";
+          router = {
+            address = "192.0.2.1";
+          };
+          pool = {
+            address = "192.0.2.0";
+            prefixLength = 24;
+          };
+        };
+        ipv6 = {
+          address = "2001:db8::1";
+          router = {
+            address = "64:ff9b::1";
+          };
+          pool = {
+            address = "64:ff9b::";
+            prefixLength = 96;
+          };
+        };
+      };
+    };
+
+    # The client is configured with static IPv6 addresses. It has also a static
+    # route for the NAT64 IP space where the IPv4 addresses are mapped in. In
+    # normal cases, there would be only a default route.
+    client = {
+      virtualisation.vlans = [
+        3 # towards router
+      ];
+
+      networking = {
+        useDHCP = false;
+        interfaces.eth1 = lib.mkForce {};
+      };
+
+      systemd.network = {
+        enable = true;
+        networks."vlan1" = {
+          matchConfig.Name = "eth1";
+          address = [
+            "2001:db8::2/64"
+          ];
+          routes = [
+            { routeConfig = { Destination = "64:ff9b::/96"; Gateway = "2001:db8::1"; }; }
+          ];
+        };
+      };
+      environment.systemPackages = [ pkgs.mtr ];
+    };
+  };
+
+  testScript = ''
+    # start client and server
+    for machine in client, server:
+      machine.wait_for_unit("network-online.target")
+      machine.log(machine.execute("ip addr")[1])
+      machine.log(machine.execute("ip route")[1])
+      machine.log(machine.execute("ip -6 route")[1])
+
+    # test systemd-networkd and nixos-scripts based router
+    for router in router_systemd, router_nixos:
+      router.start()
+      router.wait_for_unit("network-online.target")
+      router.wait_for_unit("tayga.service")
+      router.log(machine.execute("ip addr")[1])
+      router.log(machine.execute("ip route")[1])
+      router.log(machine.execute("ip -6 route")[1])
+
+      with subtest("Wait for tayga"):
+        router.wait_for_unit("tayga.service")
+
+      with subtest("Test ICMP"):
+        client.wait_until_succeeds("ping -c 3 64:ff9b::100.64.0.2 >&2")
+
+      with subtest("Test ICMP and show a traceroute"):
+        client.wait_until_succeeds("mtr --show-ips --report-wide 64:ff9b::100.64.0.2 >&2")
+
+      router.log(router.execute("systemd-analyze security tayga.service")[1])
+      router.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/teeworlds.nix b/nixpkgs/nixos/tests/teeworlds.nix
new file mode 100644
index 000000000000..ac2c996955c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/teeworlds.nix
@@ -0,0 +1,55 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  client =
+    { pkgs, ... }:
+
+    { imports = [ ./common/x11.nix ];
+      environment.systemPackages = [ pkgs.teeworlds ];
+    };
+
+in {
+  name = "teeworlds";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ hax404 ];
+  };
+
+  nodes =
+    { server =
+      { services.teeworlds = {
+          enable = true;
+          openPorts = true;
+        };
+      };
+
+      client1 = client;
+      client2 = client;
+    };
+
+    testScript =
+    ''
+      start_all()
+
+      server.wait_for_unit("teeworlds.service")
+      server.wait_until_succeeds("ss --numeric --udp --listening | grep -q 8303")
+
+      client1.wait_for_x()
+      client2.wait_for_x()
+
+      client1.execute("teeworlds 'player_name Alice;connect server' >&2 &")
+      server.wait_until_succeeds(
+          'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Alice"'
+      )
+
+      client2.execute("teeworlds 'player_name Bob;connect server' >&2 &")
+      server.wait_until_succeeds(
+          'journalctl -u teeworlds -e | grep --extended-regexp -q "team_join player=\'[0-9]:Bob"'
+      )
+
+      server.sleep(10)  # wait for a while to get a nice screenshot
+
+      client1.screenshot("screen_client1")
+      client2.screenshot("screen_client2")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/telegraf.nix b/nixpkgs/nixos/tests/telegraf.nix
new file mode 100644
index 000000000000..c3cdb1645213
--- /dev/null
+++ b/nixpkgs/nixos/tests/telegraf.nix
@@ -0,0 +1,33 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "telegraf";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mic92 ];
+  };
+
+  nodes.machine = { ... }: {
+    services.telegraf.enable = true;
+    services.telegraf.environmentFiles = [(pkgs.writeText "secrets" ''
+      SECRET=example
+    '')];
+    services.telegraf.extraConfig = {
+      agent.interval = "1s";
+      agent.flush_interval = "1s";
+      inputs.exec = {
+        commands = [
+          "${pkgs.runtimeShell} -c 'echo $SECRET,tag=a i=42i'"
+        ];
+        timeout = "5s";
+        data_format = "influx";
+      };
+      outputs.file.files = ["/tmp/metrics.out"];
+      outputs.file.data_format = "influx";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("telegraf.service")
+    machine.wait_until_succeeds("grep -q example /tmp/metrics.out")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/teleport.nix b/nixpkgs/nixos/tests/teleport.nix
new file mode 100644
index 000000000000..cdf762b12844
--- /dev/null
+++ b/nixpkgs/nixos/tests/teleport.nix
@@ -0,0 +1,115 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+, lib ? pkgs.lib
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+  packages = with pkgs; {
+    "default" = teleport;
+    "11" = teleport_11;
+  };
+
+  minimal = package: {
+    services.teleport = {
+      enable = true;
+      inherit package;
+    };
+  };
+
+  client = package: {
+    services.teleport = {
+      enable = true;
+      inherit package;
+      settings = {
+        teleport = {
+          nodename = "client";
+          advertise_ip = "192.168.1.20";
+          auth_token = "8d1957b2-2ded-40e6-8297-d48156a898a9";
+          auth_servers = [ "192.168.1.10:3025" ];
+          log.severity = "DEBUG";
+        };
+        ssh_service = {
+          enabled = true;
+          labels = {
+            role = "client";
+          };
+        };
+        proxy_service.enabled = false;
+        auth_service.enabled = false;
+      };
+    };
+    networking.interfaces.eth1.ipv4.addresses = [{
+      address = "192.168.1.20";
+      prefixLength = 24;
+    }];
+  };
+
+  server = package: {
+    services.teleport = {
+      enable = true;
+      inherit package;
+      settings = {
+        teleport = {
+          nodename = "server";
+          advertise_ip = "192.168.1.10";
+        };
+        ssh_service.enabled = true;
+        proxy_service.enabled = true;
+        auth_service = {
+          enabled = true;
+          tokens = [ "node:8d1957b2-2ded-40e6-8297-d48156a898a9" ];
+        };
+      };
+      diag.enable = true;
+      insecure.enable = true;
+    };
+    networking = {
+      firewall.allowedTCPPorts = [ 3025 ];
+      interfaces.eth1.ipv4.addresses = [{
+        address = "192.168.1.10";
+        prefixLength = 24;
+      }];
+    };
+  };
+in
+lib.concatMapAttrs
+  (name: package: {
+    "minimal_${name}" = makeTest {
+      # minimal setup should always work
+      name = "teleport-minimal-setup";
+      meta.maintainers = with pkgs.lib.maintainers; [ justinas ];
+      nodes.minimal = minimal package;
+
+      testScript = ''
+        minimal.wait_for_open_port(3025)
+        minimal.wait_for_open_port(3080)
+        minimal.wait_for_open_port(3022)
+      '';
+    };
+
+    "basic_${name}" = makeTest {
+      # basic server and client test
+      name = "teleport-server-client";
+      meta.maintainers = with pkgs.lib.maintainers; [ justinas ];
+      nodes = {
+        server = server package;
+        client = client package;
+      };
+
+      testScript = ''
+        with subtest("teleport ready"):
+            server.wait_for_open_port(3025)
+            client.wait_for_open_port(3022)
+
+        with subtest("check applied configuration"):
+            server.wait_until_succeeds("tctl get nodes --format=json | ${pkgs.jq}/bin/jq -e '.[] | select(.spec.hostname==\"client\") | .metadata.labels.role==\"client\"'")
+            server.wait_for_open_port(3000)
+            client.succeed("journalctl -u teleport.service --grep='DEBU'")
+            server.succeed("journalctl -u teleport.service --grep='Starting teleport in insecure mode.'")
+      '';
+    };
+  })
+  packages
diff --git a/nixpkgs/nixos/tests/terminal-emulators.nix b/nixpkgs/nixos/tests/terminal-emulators.nix
new file mode 100644
index 000000000000..b52801c898eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/terminal-emulators.nix
@@ -0,0 +1,216 @@
+# Terminal emulators all present a pretty similar interface.
+# That gives us an opportunity to easily test their basic functionality with a single codebase.
+#
+# There are two tests run on each terminal emulator
+# - can it successfully execute a command passed on the cmdline?
+# - can it successfully display a colour?
+# the latter is used as a proxy for "can it display text?", without going through all the intricacies of OCR.
+#
+# 256-colour terminal mode is used to display the test colour, since it has a universally-applicable palette (unlike 8- and 16- colour, where the colours are implementation-defined), and it is widely supported (unlike 24-bit colour).
+#
+# Future work:
+# - Wayland support (both for testing the existing terminals, and for testing wayland-only terminals like foot and havoc)
+# - Test keyboard input? (skipped for now, to eliminate the possibility of race conditions and focus issues)
+
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let tests = {
+      alacritty.pkg = p: p.alacritty;
+
+      # times out after spending many hours
+      #contour.pkg = p: p.contour;
+      #contour.cmd = "contour $command";
+
+      cool-retro-term.pkg = p: p.cool-retro-term;
+      cool-retro-term.colourTest = false; # broken by gloss effect
+
+      ctx.pkg = p: p.ctx;
+      ctx.pinkValue = "#FE0065";
+
+      darktile.pkg = p: p.darktile;
+
+      deepin-terminal.pkg = p: p.deepin.deepin-terminal;
+
+      eterm.pkg = p: p.eterm;
+      eterm.executable = "Eterm";
+      eterm.pinkValue = "#D40055";
+
+      germinal.pkg = p: p.germinal;
+
+      gnome-terminal.pkg = p: p.gnome.gnome-terminal;
+
+      guake.pkg = p: p.guake;
+      guake.cmd = "SHELL=$command guake --show";
+      guake.kill = true;
+
+      hyper.pkg = p: p.hyper;
+
+      kermit.pkg = p: p.kermit-terminal;
+
+      kgx.pkg = p: p.kgx;
+      kgx.cmd = "kgx -e $command";
+      kgx.kill = true;
+
+      kitty.pkg = p: p.kitty;
+      kitty.cmd = "kitty $command";
+
+      konsole.pkg = p: p.plasma5Packages.konsole;
+
+      lxterminal.pkg = p: p.lxterminal;
+
+      mate-terminal.pkg = p: p.mate.mate-terminal;
+      mate-terminal.cmd = "SHELL=$command mate-terminal --disable-factory"; # factory mode uses dbus, and we don't have a proper dbus session set up
+
+      mlterm.pkg = p: p.mlterm;
+
+      mrxvt.pkg = p: p.mrxvt;
+
+      qterminal.pkg = p: p.lxqt.qterminal;
+      qterminal.kill = true;
+
+      rio.pkg = p: p.rio;
+      rio.cmd = "rio -e $command";
+      rio.pinkValue = "#FF1261";
+
+      roxterm.pkg = p: p.roxterm;
+      roxterm.cmd = "roxterm -e $command";
+
+      sakura.pkg = p: p.sakura;
+
+      st.pkg = p: p.st;
+      st.kill = true;
+
+      stupidterm.pkg = p: p.stupidterm;
+      stupidterm.cmd = "stupidterm -- $command";
+
+      terminator.pkg = p: p.terminator;
+      terminator.cmd = "terminator -e $command";
+
+      terminology.pkg = p: p.enlightenment.terminology;
+      terminology.cmd = "SHELL=$command terminology --no-wizard=true";
+      terminology.colourTest = false; # broken by gloss effect
+
+      termite.pkg = p: p.termite;
+
+      termonad.pkg = p: p.termonad;
+
+      tilda.pkg = p: p.tilda;
+
+      tilix.pkg = p: p.tilix;
+      tilix.cmd = "tilix -e $command";
+
+      urxvt.pkg = p: p.rxvt-unicode;
+
+      wayst.pkg = p: p.wayst;
+      wayst.pinkValue = "#FF0066";
+
+      # times out after spending many hours
+      #wezterm.pkg = p: p.wezterm;
+
+      xfce4-terminal.pkg = p: p.xfce.xfce4-terminal;
+
+      xterm.pkg = p: p.xterm;
+    };
+in mapAttrs (name: { pkg, executable ? name, cmd ? "SHELL=$command ${executable}", colourTest ? true, pinkValue ? "#FF0087", kill ? false }: makeTest
+{
+  name = "terminal-emulator-${name}";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ jjjollyjim ];
+  };
+
+  nodes.machine = { pkgsInner, ... }:
+
+  {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    # Hyper (and any other electron-based terminals) won't run as root
+    test-support.displayManager.auto.user = "alice";
+
+    environment.systemPackages = [
+      (pkg pkgs)
+      (pkgs.writeShellScriptBin "report-success" ''
+        echo 1 > /tmp/term-ran-successfully
+        ${optionalString kill "pkill ${executable}"}
+      '')
+      (pkgs.writeShellScriptBin "display-colour" ''
+        # A 256-colour background colour code for pink, then spaces.
+        #
+        # Background is used rather than foreground to minimize the effect of anti-aliasing.
+        #
+        # Keep adding more in case the window is partially offscreen to the left or requires
+        # a change to correctly redraw after initialising the window (as with ctx).
+
+        while :
+        do
+            echo -ne "\e[48;5;198m                   "
+            sleep 0.5
+        done
+        sleep infinity
+      '')
+      (pkgs.writeShellScriptBin "run-in-this-term" "sudo -u alice run-in-this-term-wrapped $1")
+
+      (pkgs.writeShellScriptBin "run-in-this-term-wrapped" "command=\"$(which \"$1\")\"; ${cmd}")
+    ];
+
+    # Helpful reminder to add this test to passthru.tests
+    warnings = if !((pkg pkgs) ? "passthru" && (pkg pkgs).passthru ? "tests") then [ "The package for ${name} doesn't have a passthru.tests" ] else [ ];
+  };
+
+  # We need imagemagick, though not tesseract
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+  in ''
+    with subtest("wait for x"):
+        start_all()
+        machine.wait_for_x()
+
+    with subtest("have the terminal run a command"):
+        # We run this command synchronously, so we can be certain the exit codes are happy
+        machine.${if kill then "execute" else "succeed"}("run-in-this-term report-success")
+        machine.wait_for_file("/tmp/term-ran-successfully")
+    ${optionalString colourTest ''
+
+    import tempfile
+    import subprocess
+
+
+    def check_for_pink(final=False) -> bool:
+        with tempfile.NamedTemporaryFile() as tmpin:
+            machine.send_monitor_command("screendump {}".format(tmpin.name))
+
+            cmd = 'convert {} -define histogram:unique-colors=true -format "%c" histogram:info:'.format(
+                tmpin.name
+            )
+            ret = subprocess.run(cmd, shell=True, capture_output=True)
+            if ret.returncode != 0:
+                raise Exception(
+                    "image analysis failed with exit code {}".format(ret.returncode)
+                )
+
+            text = ret.stdout.decode("utf-8")
+            return "${pinkValue}" in text
+
+
+    with subtest("ensuring no pink is present without the terminal"):
+        assert (
+            check_for_pink() == False
+        ), "Pink was present on the screen before we even launched a terminal!"
+
+    with subtest("have the terminal display a colour"):
+        # We run this command in the background
+        assert machine.shell is not None
+        machine.shell.send(b"(run-in-this-term display-colour |& systemd-cat -t terminal) &\n")
+
+        with machine.nested("Waiting for the screen to have pink on it:"):
+            retry(check_for_pink)
+  ''}'';
+}
+
+  ) tests
diff --git a/nixpkgs/nixos/tests/thelounge.nix b/nixpkgs/nixos/tests/thelounge.nix
new file mode 100644
index 000000000000..8d5a37d46c46
--- /dev/null
+++ b/nixpkgs/nixos/tests/thelounge.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix {
+  name = "thelounge";
+
+  nodes = {
+    private = { config, pkgs, ... }: {
+      services.thelounge = {
+        enable = true;
+        plugins = [ pkgs.theLoungePlugins.themes.solarized ];
+      };
+    };
+
+    public = { config, pkgs, ... }: {
+      services.thelounge = {
+        enable = true;
+        public = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    for machine in machines:
+      machine.wait_for_unit("thelounge.service")
+      machine.wait_for_open_port(9000)
+
+    private.wait_until_succeeds("journalctl -u thelounge.service | grep thelounge-theme-solarized")
+    private.wait_until_succeeds("journalctl -u thelounge.service | grep 'in private mode'")
+    public.wait_until_succeeds("journalctl -u thelounge.service | grep 'in public mode'")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/tiddlywiki.nix b/nixpkgs/nixos/tests/tiddlywiki.nix
new file mode 100644
index 000000000000..822711b8939c
--- /dev/null
+++ b/nixpkgs/nixos/tests/tiddlywiki.nix
@@ -0,0 +1,69 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "tiddlywiki";
+  nodes = {
+    default = {
+      services.tiddlywiki.enable = true;
+    };
+    configured = {
+      boot.postBootCommands = ''
+        echo "username,password
+        somelogin,somesecret" > /var/lib/wikiusers.csv
+      '';
+      services.tiddlywiki = {
+        enable = true;
+        listenOptions = {
+          port = 3000;
+          credentials="../wikiusers.csv";
+          readers="(authenticated)";
+        };
+      };
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("by default works without configuration"):
+          default.wait_for_unit("tiddlywiki.service")
+
+      with subtest("by default available on port 8080 without auth"):
+          default.wait_for_unit("tiddlywiki.service")
+          default.wait_for_open_port(8080)
+          # we output to /dev/null here to avoid a python UTF-8 decode error
+          # but the check will still fail if the service doesn't respond
+          default.succeed("curl --fail -o /dev/null 127.0.0.1:8080")
+
+      with subtest("by default creates empty wiki"):
+          default.succeed("test -f /var/lib/tiddlywiki/tiddlywiki.info")
+
+      with subtest("configured on port 3000 with basic auth"):
+          configured.wait_for_unit("tiddlywiki.service")
+          configured.wait_for_open_port(3000)
+          configured.fail("curl --fail -o /dev/null 127.0.0.1:3000")
+          configured.succeed(
+              "curl --fail -o /dev/null 127.0.0.1:3000 --user somelogin:somesecret"
+          )
+
+      with subtest("restart preserves changes"):
+          # given running wiki
+          default.wait_for_unit("tiddlywiki.service")
+          # with some changes
+          default.succeed(
+              'curl --fail --request PUT --header \'X-Requested-With:TiddlyWiki\' \
+              --data \'{ "title": "title", "text": "content" }\' \
+              --url 127.0.0.1:8080/recipes/default/tiddlers/somepage '
+          )
+          default.succeed("sleep 2")
+
+          # when wiki is cycled
+          default.systemctl("restart tiddlywiki.service")
+          default.wait_for_unit("tiddlywiki.service")
+          default.wait_for_open_port(8080)
+
+          # the change is preserved
+          default.succeed(
+              "curl --fail -o /dev/null 127.0.0.1:8080/recipes/default/tiddlers/somepage"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/tigervnc.nix b/nixpkgs/nixos/tests/tigervnc.nix
new file mode 100644
index 000000000000..ed575682d933
--- /dev/null
+++ b/nixpkgs/nixos/tests/tigervnc.nix
@@ -0,0 +1,53 @@
+{ system ? builtins.currentSystem
+, config ? {}
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+makeTest {
+  name = "tigervnc";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lheckemann ];
+  };
+
+  nodes = {
+    server = { pkgs, ...}: {
+      environment.systemPackages = with pkgs; [
+        tigervnc # for Xvnc
+        xorg.xwininfo
+        imagemagickBig # for display with working label: support
+      ];
+      networking.firewall.allowedTCPPorts = [ 5901 ];
+    };
+
+    client = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ];
+      # for vncviewer
+      environment.systemPackages = [ pkgs.tigervnc ];
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = ''
+    start_all()
+
+    for host in [server, client]:
+        host.succeed("echo foobar | vncpasswd -f > vncpasswd")
+
+    server.succeed("Xvnc -geometry 720x576 :1 -PasswordFile vncpasswd >&2 &")
+    server.wait_until_succeeds("nc -z localhost 5901", timeout=10)
+    server.succeed("DISPLAY=:1 xwininfo -root | grep 720x576")
+    server.execute("DISPLAY=:1 display -size 360x200 -font sans -gravity south label:'HELLO VNC WORLD' >&2 &")
+
+    client.wait_for_x()
+    client.execute("vncviewer server:1 -PasswordFile vncpasswd >&2 &")
+    client.wait_for_window(r"VNC")
+    client.screenshot("screenshot")
+    text = client.get_screen_text()
+    # Displayed text
+    assert 'HELLO VNC WORLD' in text
+    # Client window title
+    assert 'TigerVNC' in text
+  '';
+}
diff --git a/nixpkgs/nixos/tests/timescaledb.nix b/nixpkgs/nixos/tests/timescaledb.nix
new file mode 100644
index 000000000000..00a7f9af09fb
--- /dev/null
+++ b/nixpkgs/nixos/tests/timescaledb.nix
@@ -0,0 +1,93 @@
+# mostly copied from ./postgresql.nix as it seemed unapproriate to
+# test additional extensions for postgresql there.
+
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  postgresql-versions = import ../../pkgs/servers/sql/postgresql pkgs;
+  test-sql = pkgs.writeText "postgresql-test" ''
+    CREATE EXTENSION timescaledb;
+    CREATE EXTENSION timescaledb_toolkit;
+
+    CREATE TABLE sth (
+      time TIMESTAMPTZ NOT NULL,
+      value DOUBLE PRECISION
+    );
+
+    SELECT create_hypertable('sth', 'time');
+
+    INSERT INTO sth (time, value) VALUES
+    ('2003-04-12 04:05:06 America/New_York', 1.0),
+    ('2003-04-12 04:05:07 America/New_York', 2.0),
+    ('2003-04-12 04:05:08 America/New_York', 3.0),
+    ('2003-04-12 04:05:09 America/New_York', 4.0),
+    ('2003-04-12 04:05:10 America/New_York', 5.0)
+    ;
+
+    WITH t AS (
+      SELECT
+        time_bucket('1 day'::interval, time) AS dt,
+        stats_agg(value) AS stats
+      FROM sth
+      GROUP BY time_bucket('1 day'::interval, time)
+    )
+    SELECT
+      average(stats)
+    FROM t;
+  '';
+  make-postgresql-test = postgresql-name: postgresql-package: makeTest {
+    name = postgresql-name;
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ typetetris ];
+    };
+
+    nodes.machine = { ... }:
+      {
+        services.postgresql = {
+          enable = true;
+          package = postgresql-package;
+          extraPlugins = with postgresql-package.pkgs; [
+            timescaledb
+            timescaledb_toolkit
+          ];
+          settings = { shared_preload_libraries = "timescaledb, timescaledb_toolkit"; };
+        };
+      };
+
+    testScript = ''
+      def check_count(statement, lines):
+          return 'test $(sudo -u postgres psql postgres -tAc "{}"|wc -l) -eq {}'.format(
+              statement, lines
+          )
+
+
+      machine.start()
+      machine.wait_for_unit("postgresql")
+
+      with subtest("Postgresql with extensions timescaledb and timescaledb_toolkit is available just after unit start"):
+          machine.succeed(
+              "sudo -u postgres psql -f ${test-sql}"
+          )
+
+      machine.fail(check_count("SELECT * FROM sth;", 3))
+      machine.succeed(check_count("SELECT * FROM sth;", 5))
+      machine.fail(check_count("SELECT * FROM sth;", 4))
+
+      machine.shutdown()
+    '';
+
+  };
+  applicablePostgresqlVersions = filterAttrs (_: value: versionAtLeast value.version "12") postgresql-versions;
+in
+mapAttrs'
+  (name: package: {
+    inherit name;
+    value = make-postgresql-test name package;
+  })
+  applicablePostgresqlVersions
diff --git a/nixpkgs/nixos/tests/timezone.nix b/nixpkgs/nixos/tests/timezone.nix
new file mode 100644
index 000000000000..7fc9a5058eee
--- /dev/null
+++ b/nixpkgs/nixos/tests/timezone.nix
@@ -0,0 +1,50 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "timezone";
+  meta.maintainers = with pkgs.lib.maintainers; [ lheckemann ];
+
+  nodes = {
+    node_eutz = { pkgs, ... }: {
+      time.timeZone = "Europe/Amsterdam";
+    };
+
+    node_nulltz = { pkgs, ... }: {
+      time.timeZone = null;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+      node_eutz.wait_for_unit("dbus.socket")
+
+      with subtest("static - Ensure timezone change gives the correct result"):
+          node_eutz.fail("timedatectl set-timezone Asia/Tokyo")
+          date_result = node_eutz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          assert date_result == "1970-01-01 01:00:00\n", "Timezone seems to be wrong"
+
+      node_nulltz.wait_for_unit("dbus.socket")
+
+      with subtest("imperative - Ensure timezone defaults to UTC"):
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert (
+              date_result == "1970-01-01 00:00:00\n"
+          ), "Timezone seems to be wrong (not UTC)"
+
+      with subtest("imperative - Ensure timezone adjustment produces expected result"):
+          node_nulltz.succeed("timedatectl set-timezone Asia/Tokyo")
+
+          # Adjustment should be taken into account
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert date_result == "1970-01-01 09:00:00\n", "Timezone was not adjusted"
+
+      with subtest("imperative - Ensure timezone adjustment persists across reboot"):
+          # Adjustment should persist across a reboot
+          node_nulltz.shutdown()
+          node_nulltz.wait_for_unit("dbus.socket")
+          date_result = node_nulltz.succeed('date -d @0 "+%Y-%m-%d %H:%M:%S"')
+          print(date_result)
+          assert (
+              date_result == "1970-01-01 09:00:00\n"
+          ), "Timezone adjustment was not persisted"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tinc/default.nix b/nixpkgs/nixos/tests/tinc/default.nix
new file mode 100644
index 000000000000..31b675ad35c0
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinc/default.nix
@@ -0,0 +1,139 @@
+import ../make-test-python.nix ({ lib, ... }:
+  let
+    snakeoil-keys = import ./snakeoil-keys.nix;
+
+    hosts = lib.attrNames snakeoil-keys;
+
+    subnetOf = name: config:
+      let
+        subnets = config.services.tinc.networks.myNetwork.hostSettings.${name}.subnets;
+      in
+      (builtins.head subnets).address;
+
+    makeTincHost = name: { subnet, extraConfig ? { } }: lib.mkMerge [
+      {
+        subnets = [{ address = subnet; }];
+        settings = {
+          Ed25519PublicKey = snakeoil-keys.${name}.ed25519Public;
+        };
+        rsaPublicKey = snakeoil-keys.${name}.rsaPublic;
+      }
+      extraConfig
+    ];
+
+    makeTincNode = { config, ... }: name: extraConfig: lib.mkMerge [
+      {
+        services.tinc.networks.myNetwork = {
+          inherit name;
+          rsaPrivateKeyFile =
+            builtins.toFile "rsa.priv" snakeoil-keys.${name}.rsaPrivate;
+          ed25519PrivateKeyFile =
+            builtins.toFile "ed25519.priv" snakeoil-keys.${name}.ed25519Private;
+
+          hostSettings = lib.mapAttrs makeTincHost {
+            static = {
+              subnet = "10.0.0.11";
+              # Only specify the addresses in the node's vlans, Tinc does not
+              # seem to try each one, unlike the documentation suggests...
+              extraConfig.addresses = map
+                (vlan: { address = "192.168.${toString vlan}.11"; port = 655; })
+                config.virtualisation.vlans;
+            };
+            dynamic1 = { subnet = "10.0.0.21"; };
+            dynamic2 = { subnet = "10.0.0.22"; };
+          };
+        };
+
+        networking.useDHCP = false;
+
+        networking.interfaces."tinc.myNetwork" = {
+          virtual = true;
+          virtualType = "tun";
+          ipv4.addresses = [{
+            address = subnetOf name config;
+            prefixLength = 24;
+          }];
+        };
+
+        # Prevents race condition between NixOS service and tinc creating the
+        # interface.
+        # See: https://github.com/NixOS/nixpkgs/issues/27070
+        systemd.services."tinc.myNetwork" = {
+          after = [ "network-addresses-tinc.myNetwork.service" ];
+          requires = [ "network-addresses-tinc.myNetwork.service" ];
+        };
+
+        networking.firewall.allowedTCPPorts = [ 655 ];
+        networking.firewall.allowedUDPPorts = [ 655 ];
+      }
+      extraConfig
+    ];
+
+  in
+  {
+    name = "tinc";
+    meta.maintainers = with lib.maintainers; [ minijackson ];
+
+    nodes = {
+
+      static = { ... } @ args:
+        makeTincNode args "static" {
+          virtualisation.vlans = [ 1 2 ];
+
+          networking.interfaces.eth1.ipv4.addresses = [{
+            address = "192.168.1.11";
+            prefixLength = 24;
+          }];
+
+          networking.interfaces.eth2.ipv4.addresses = [{
+            address = "192.168.2.11";
+            prefixLength = 24;
+          }];
+        };
+
+
+      dynamic1 = { ... } @ args:
+        makeTincNode args "dynamic1" {
+          virtualisation.vlans = [ 1 ];
+        };
+
+      dynamic2 = { ... } @ args:
+        makeTincNode args "dynamic2" {
+          virtualisation.vlans = [ 2 ];
+        };
+
+    };
+
+    testScript = ''
+      start_all()
+
+      static.wait_for_unit("tinc.myNetwork.service")
+      dynamic1.wait_for_unit("tinc.myNetwork.service")
+      dynamic2.wait_for_unit("tinc.myNetwork.service")
+
+      # Static is accessible by the other hosts
+      dynamic1.succeed("ping -c5 192.168.1.11")
+      dynamic2.succeed("ping -c5 192.168.2.11")
+
+      # The other hosts are in separate vlans
+      dynamic1.fail("ping -c5 192.168.2.11")
+      dynamic2.fail("ping -c5 192.168.1.11")
+
+      # Each host can ping themselves through Tinc
+      static.succeed("ping -c5 10.0.0.11")
+      dynamic1.succeed("ping -c5 10.0.0.21")
+      dynamic2.succeed("ping -c5 10.0.0.22")
+
+      # Static is accessible by the other hosts through Tinc
+      dynamic1.succeed("ping -c5 10.0.0.11")
+      dynamic2.succeed("ping -c5 10.0.0.11")
+
+      # Static can access the other hosts through Tinc
+      static.succeed("ping -c5 10.0.0.21")
+      static.succeed("ping -c5 10.0.0.22")
+
+      # The other hosts in separate vlans can access each other through Tinc
+      dynamic1.succeed("ping -c5 10.0.0.22")
+      dynamic2.succeed("ping -c5 10.0.0.21")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/tinc/snakeoil-keys.nix b/nixpkgs/nixos/tests/tinc/snakeoil-keys.nix
new file mode 100644
index 000000000000..650e57d61d4a
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinc/snakeoil-keys.nix
@@ -0,0 +1,157 @@
+{
+  static = {
+    ed25519Private = ''
+      -----BEGIN ED25519 PRIVATE KEY-----
+      IPR+ur5LfVdm6VlR1+FGIkbkL8Enkb9sejBa/JP6tXkg/vHoraIp70srb6jAUFm5
+      3YbCJiBjLW3dy16qM5PovBoWtr5hoqYYA9dFLOys8FBUFFsIGfKhnbk7g25iwxbO
+      -----END ED25519 PRIVATE KEY-----
+    '';
+
+    ed25519Public = "AqV7aeIqKGGQfXxijMLfRAVRBLixnS45G5OoduIc8mD";
+
+    rsaPrivate = ''
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpAIBAAKCAQEAxDHl0TIhhT2yH5rT+Q7MLnj+Ir8bbs3uaPqnzcxWzN1EfVP8
+      TWt5fSTrF2Dc78Kyu5ZNALrp7tUj0GZAegp1YeYJ28p3qTwCveywtCwbB4dI987S
+      yJwq95kE9aoyLa+cT99VwSTdb2YowQv2tWj/idxE3oJ+qZjy9tE5mysXm7jmTQDx
+      +U0XmNe6MHjKXc01Ener41u0ykJLeUfdgJ1zEyM2rQGtaHpIXfMT6kmxCaMcAMLg
+      YFpI38/1pQGQtROKdGOaUomx2m058bkMsJhTiBjESiLRDElRGxmMJ732crGJP0GR
+      ChJkaX/CnxHq7R0daZfwoTVHRu6N7WDbFQL5twIDAQABAoIBAQCM/fLTIHyYXRr5
+      vXFhxXGUYBz56W6UdWdEiAU5TwR92vFSQ53IIVlARtyvg0ui/b8mMcAKq0hb+03u
+      gN0LFyL+BKvHCLxvoRGzXTorcJrIET+t3jL6OchjANNgnDvNOytQ9wWQdKaxXLAi
+      8y8LdXZWozXW1d6ikKjiGL+WNCSWIcq83ktSJZcohihptU9Un16FYQzdolSC8RtI
+      XyT7i1ye6hW/wJTJxqZ4taX3EPat85kXS234VGSqg9bb2A1yE+U8Rq37bf8AKldJ
+      NUQB3JyxnkYGJcqvzDmz139+744VWxDRvXDA5vU29LC6f8bGBvwEttD98QW+pgmB
+      1NBU1Uo5AoGBAOzUk6k74h1RarwXaftjh/9Pures0CfNNnrkJApzFCh4bAoHNxq6
+      SSXqLcc/vvX2+YaZ72nn5YTo+JLQP6evM9oUaqRMAxa3nzoNCtF8U2r48UWmoUQE
+      aZCYbD3m7IVWFacCKRVaVTMZMTTicypSnXcbCSIEH8PRs9+L4jkHgql9AoGBANQT
+      TZECVhIaQnyRiKWlUE8G1QKzXIxjmfyirBe+ftlIG2XMXasAtQ4VRxpnorgqUnIH
+      BVrIbvRx21zlqwZbrZvyb1jHWRoyi1cqBPijpYBUm5LbV2jgHPhnfhRVqdD4CDKj
+      NQzIQrNymFaMWAoOQv/DE3g+Txr0fm9Ztu8ZRXZDAoGAHh3SQT0aPfwyhIS9t3gq
+      vS7YYa8aMVWJTgthAessbxERPB06xq1Vy/qBo8rZb9HeXV2J8n/I0iQGKDVPQvWm
+      tF7QSOBZrDPhjbJG4+jZesr5c5ADBfFBs1+OtDh/b11JF5nQu6RnHT5g4YbCemlT
+      GOhZOvgnSfGK3CyfsfzggskCgYEAmpKDK5kPUNxw70hH16v5L9Bj+zbt0qlZ+Ag8
+      9IV1ATuMNJNTBitay6v4iidVM3QtaUzyuytxq5s87qW7FMRHcm2ueH+70ttaMiq/
+      OtZT74g7aDuUpy0KEIemHn4dauENYJMSPIHOE+sHW7WpCZNBhBcUHsUTdSsU6GX0
+      bqr1tO8CgYBpZdR2OoX/rn8nwjmtBOH38aPnCpaAfdI2Eq2Lg6DjksP6TBt53a+R
+      m1lk6Kt37BPPZQ85SBr7ywvDgUzfoD7uSmHujF2JUHPsdrg9nx7pNIGlW6DlS9OU
+      oNXGAJ/6/y6F8uDbToUfrwFq5tKMypEEa32kFtxb9f0XQ5fSgHrBEw==
+      -----END RSA PRIVATE KEY-----
+    '';
+
+    rsaPublic = ''
+      -----BEGIN RSA PUBLIC KEY-----
+      MIIBCgKCAQEAxDHl0TIhhT2yH5rT+Q7MLnj+Ir8bbs3uaPqnzcxWzN1EfVP8TWt5
+      fSTrF2Dc78Kyu5ZNALrp7tUj0GZAegp1YeYJ28p3qTwCveywtCwbB4dI987SyJwq
+      95kE9aoyLa+cT99VwSTdb2YowQv2tWj/idxE3oJ+qZjy9tE5mysXm7jmTQDx+U0X
+      mNe6MHjKXc01Ener41u0ykJLeUfdgJ1zEyM2rQGtaHpIXfMT6kmxCaMcAMLgYFpI
+      38/1pQGQtROKdGOaUomx2m058bkMsJhTiBjESiLRDElRGxmMJ732crGJP0GRChJk
+      aX/CnxHq7R0daZfwoTVHRu6N7WDbFQL5twIDAQAB
+      -----END RSA PUBLIC KEY-----
+    '';
+  };
+
+  dynamic1 = {
+    ed25519Private = ''
+      -----BEGIN ED25519 PRIVATE KEY-----
+      wHNC2IMXfYtL4ehdsCX154HBvlIZYEiTOnXtckWMUtEAiX9fu7peyBkp9q+yOy9c
+      xsNyssLL78lt0GoweCxlu3Sza2oBQAcwb+6tuv7P/bqzcG005uCwquyCz8LVymXA
+      -----END ED25519 PRIVATE KEY-----
+    '';
+
+    ed25519Public = "t0smNaAEAH8mver77+z/m6MnBNdurAsqrswM/Sls5FA";
+
+    rsaPrivate = ''
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpAIBAAKCAQEApukYNGFNWvVlmx75LyOE7MEcd/ViV+yEyk+4cIBXYJ3Ouw+/
+      oEuh8ghQfsiUtbUPR6hPYhX2ZV8XGhuU2nAXVQV0sfZ8pdkbHQ6wHUqFcUIQAVvS
+      Wpm2DvZM8jkbCPP64/x5nukPwQ8VoNnb62rWGzbcj7rOeb7ndMK0TpX5Wwv8F297
+      nKTNCEDbK3DLTj3VD+QGnw6AoEt5i44vViAWZBXuHLHWTDC0Nq8GG+9TKODkEwt5
+      4dgN2X9f+WTVAYhZT3SayHLqIFIMQunN89RpWwhHSW+JIRfAfuT1TbP+wA5ptDeI
+      ktCkJwWyv4hK6l800BJ9GW1nbId5LPa58ipaVwIDAQABAoIBAHcw3WgKVAMwWm57
+      n9ZZtwKapInFYYUIEYungj5UaBFGn+pVRLJjUDJWXaUr94YK1e6F8qpIpLufPBAY
+      wiN7CC5exwaOzlRgxUvqwTkpjkFiu6s8tuqb+baVjD0tKnEqSW+lS/R+2hEzhG5p
+      JPLoSB0HAFpjPC8UdJSctcWos3if3mvOGkGCKyTkrwaJgECDfD+lZ+NBIAiYLSps
+      jWLE+XlY1+nfPdLUQ+TRSv3IikJ/CWbvJLl9EE1tKhkY564KytwZrkIdJlc7NyRO
+      HpzhyMzHu1GLsr+OsBZByNNUxEPU+bzkDQluRXUSIUs9zZoBiCQr3o04qGPTEX9n
+      pNU60gECgYEA3Uf+c80eqzjDxv+O0YzC+9x6A+yMrV56siGkKRPMlrSqjX7iE2Yg
+      tUjD25kEvtaFuB3f/7zp3h4O/VLZgXreRtXHvdrfoyyJGHvHIyCGm8sw8CEWsKo4
+      1LgZUzdPJRkXJq1zOgS0r1xsA1UDC4s02Ww2HwNeVWtmLUyCpA+B/ccCgYEAwRk9
+      tbe82eq1a85zZiPVXP2qvDH5+Vz9YiMky8xsBnoxmz2siR+NdvWBLcE2VDIY8MK1
+      9a1dz2a7cAHQBrtWtACFVY4zvr69DumApjbQRClDYpJ42tp2VbzlMcUDIoKudRQV
+      CObhrE4w4yfVizXFyH9+4Tsg5NzVYuGg9fUJ/vECgYEAoRz7KouNqfMhsLF/5hkM
+      Gt9zw4mm/9ALm8kcwn/U9WHD0FQy/Rbd98BsQmaOavi80cqGvqhoyz2tgkqhbUHt
+      tzuOPDCxphgWFcqBupTDDYoLLruYzraRvGfyoIFj0coL7jBZ9kNY31l2l5J9LhmE
+      OE4utbP5Kk6RTagocpWL+x8CgYB48CwcIcWf3kZeDOFtuUeqhB1o3Qwox7rSuhwT
+      oCaQL/vdtNTY1PAu7zhGxdoXBYFlWS3JfxlgCoGedyQo8zAscJ8RpIx4DNIwAsLW
+      V0I9TnKry/zxZR30OOh7MV7zQFGvdjJubtwspJQt0QcHt1f2aRO4UOYbMMxcr9+1
+      7BCkoQKBgQDBEtg1hx9zYGg1WN2TBSvh6NShi9S23r6IZ3Up8vz6Z2rcwB3UuhKi
+      xluI2ZFwM9s+7UOpaGC+hnc1aMHDEguYOPXoIzvebbYAdN4AkrsJ5d0r1GoEe64E
+      UXxrfuv5LeJ/vkUgWof+U3/jGOVvrjzi5y1xOC0r3kiSpMa85s1dhQ==
+      -----END RSA PRIVATE KEY-----
+    '';
+
+    rsaPublic = ''
+      -----BEGIN RSA PUBLIC KEY-----
+      MIIBCgKCAQEApukYNGFNWvVlmx75LyOE7MEcd/ViV+yEyk+4cIBXYJ3Ouw+/oEuh
+      8ghQfsiUtbUPR6hPYhX2ZV8XGhuU2nAXVQV0sfZ8pdkbHQ6wHUqFcUIQAVvSWpm2
+      DvZM8jkbCPP64/x5nukPwQ8VoNnb62rWGzbcj7rOeb7ndMK0TpX5Wwv8F297nKTN
+      CEDbK3DLTj3VD+QGnw6AoEt5i44vViAWZBXuHLHWTDC0Nq8GG+9TKODkEwt54dgN
+      2X9f+WTVAYhZT3SayHLqIFIMQunN89RpWwhHSW+JIRfAfuT1TbP+wA5ptDeIktCk
+      JwWyv4hK6l800BJ9GW1nbId5LPa58ipaVwIDAQAB
+      -----END RSA PUBLIC KEY-----
+    '';
+  };
+
+  dynamic2 = {
+    ed25519Private = ''
+      -----BEGIN ED25519 PRIVATE KEY-----
+      oUx9JdIstZLMj3ZPD8mP3ITsUscCTIXhNF3VKFUVi/ma5uk50/1vrEohfDraiMxj
+      gAWthpkhnFzUbp+YlOHE7/Z3h1a/br2/tk8DoZ5PV6ufoV1MaBlGdu+TZgeZou0t
+      -----END ED25519 PRIVATE KEY-----
+    '';
+
+    ed25519Public = "f2dYt2/2q9fLJ/AaW+Tlu7HaVNjWQpRnr/UGoXGqLdL";
+
+    rsaPrivate = ''
+      -----BEGIN RSA PRIVATE KEY-----
+      MIIEpAIBAAKCAQEAtQfijPX3BwOAs2Y0EuNjcBmsI90uYqNAonrFgTtcVwERIVE6
+      p6alSEakazhByujBg3jI8oPKC8eO0IJ7x/BWcgxqaw8hsPfJZFnRlwEcU5kK4c+j
+      UNS+hJOXp0x97T1edLpSFHDK9bZ2necblHKG5MsI4UsxEa+CZ0yoIybwWCDmYuya
+      PvE7CeNNa+CIOUbtPVoN4p/aBj0vZeerNBBuodNkglKRxj4l9wD9uOx4S9sdK5lu
+      q/rkxlViBoXRAshT+G2d/u/7/WPoiKB3QJcF33z8UfrlsTRnDDqOMSGisTPSv2LK
+      4QLN4hWOGXAYQqZcxTkvvjl62mCDuoy0TM+CKQIDAQABAoIBAFKpMAxXf52nPswr
+      /dkmFVCpmE2kADsv+iJ21tpkpYxgw1aoRZUp5cyz3P3MaVZio4IJ1A/Ql6B7Vb3l
+      5ulr170p6CnMdgDdlAsLbEV8T1foyOxFKHiPPBNDZXsR1WpPnGLGdRY6TqKV12HQ
+      lmpZRTkRcJOXBufhcTUD7r5mWFaUoZ7so6VxR4L4Tzcgv1Rl4S6jgnHOQdO6lj47
+      BaPjpBb+hplJ4wsRm91dQ7JApYq25XZwyxnBwQ2zAwb46wsuFxDPHlSc4wU7qTt6
+      x2omm33Xy2cm8L1XQhrassZzldSnAyaLBh9DC3+vFPLODDxdz5M2kpHujYYctRhv
+      CICMYJUCgYEA7mWVYuw0S8FNjaLx6n9Q1hr9d9vAFDd3NEaegH586xvhYNxf6n+C
+      2zZloVLEsX0UnBU/6ZtLAUfxUIqlvDS2r1VjSYG5SNxM6/vyGl17Niu1jC8nzf7M
+      V1WtDCHhT4ikZCuNkAldtgI7CXVdCVO/fTqVhjk4hDblJo7VsCZSZysCgYEAwmXp
+      TwlDHapDqA8UxClZuxS8k+2hthny3ihRPCuT34yqAz074zYG97ZBKwIa4Lm1vnkc
+      mwU7yR2aK7IYeU4ScfWm1mLjkW5iaNV/sG7iTz/RP4mBAs3KSGmuhhz8sFWcXByU
+      IZyvMJvC+FpgJQJn/Xc8ZmdImvXlZd6k8v4/kfsCgYEA6VzFPB2OH63slb4w42SX
+      o86t2dtiDigxZxnN5GhtLdSP7borpigF10JLf/y+kCOpvhRLCQk8Bdf/z+C41iAf
+      yEhktbrnvfvwzHxHhSmHCAMHZ19trodCTiePCrZLkQhoK6o6nAmfEyDh26NoXE3/
+      v71OSyLOQRZfgDwHz7PjrBsCgYAe0zojpjxWP+FqjLmmQUhROgCNFGlIDuVMBOic
+      uexAznVG/ja42KBSNzwuLa9FYy1Gfr3idvn78g24UA1BbvfNyj4iUJv1O6OvK+uL
+      dom8N0pe4NbsMuWYhel+qqoG7AxXLtDuY4IEGy7XYr1MIQ2MS5PwSQBiUguGE7/k
+      KBy8cQKBgQCyC9R8VWJxQLqJxZGa9Ful01bSuntB5OLRfEjFCCuGiY/3Vj+mCiQL
+      GOfMOi2jrcnSNgUm0uevmiFCq9m7QiPiAcSYKXPWhsz/55jJIGcZy8bwyhZ2s2Mg
+      BGeZgj4RFORidqkt5g/KJz0+Wp6Ks4sLoCvOzkpeXvLzFVyzGkihrw==
+      -----END RSA PRIVATE KEY-----
+    '';
+
+    rsaPublic = ''
+      -----BEGIN RSA PUBLIC KEY-----
+      MIIBCgKCAQEAtQfijPX3BwOAs2Y0EuNjcBmsI90uYqNAonrFgTtcVwERIVE6p6al
+      SEakazhByujBg3jI8oPKC8eO0IJ7x/BWcgxqaw8hsPfJZFnRlwEcU5kK4c+jUNS+
+      hJOXp0x97T1edLpSFHDK9bZ2necblHKG5MsI4UsxEa+CZ0yoIybwWCDmYuyaPvE7
+      CeNNa+CIOUbtPVoN4p/aBj0vZeerNBBuodNkglKRxj4l9wD9uOx4S9sdK5luq/rk
+      xlViBoXRAshT+G2d/u/7/WPoiKB3QJcF33z8UfrlsTRnDDqOMSGisTPSv2LK4QLN
+      4hWOGXAYQqZcxTkvvjl62mCDuoy0TM+CKQIDAQAB
+      -----END RSA PUBLIC KEY-----
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/tinydns.nix b/nixpkgs/nixos/tests/tinydns.nix
new file mode 100644
index 000000000000..124508bc004b
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinydns.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ lib, ...} : {
+  name = "tinydns";
+  meta = {
+    maintainers = with lib.maintainers; [ basvandijk ];
+  };
+  nodes = {
+    nameserver = { config, lib, ... } : let
+      ip = (lib.head config.networking.interfaces.eth1.ipv4.addresses).address;
+    in {
+      networking.nameservers = [ ip ];
+      services.tinydns = {
+        enable = true;
+        inherit ip;
+        data = ''
+          .foo.bar:${ip}
+          +.bla.foo.bar:1.2.3.4:300
+        '';
+      };
+    };
+  };
+  testScript = ''
+    nameserver.start()
+    nameserver.wait_for_unit("tinydns.service")
+
+    # We query tinydns a few times to trigger the bug:
+    #
+    #   nameserver # [    6.105872] mmap: tinydns (842): VmData 331776 exceed data ulimit 300000. Update limits or use boot option ignore_rlimit_data.
+    #
+    # which was reported in https://github.com/NixOS/nixpkgs/issues/119066.
+    # Without the patch <nixpkgs/pkgs/tools/networking/djbdns/softlimit.patch>
+    # it fails on the 10th iteration.
+    nameserver.succeed(
+        """
+          for i in {1..100}; do
+            host bla.foo.bar 192.168.1.1 | grep '1\.2\.3\.4'
+          done
+        """
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tinyproxy.nix b/nixpkgs/nixos/tests/tinyproxy.nix
new file mode 100644
index 000000000000..b8448d4c23b6
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinyproxy.nix
@@ -0,0 +1,20 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tinyproxy";
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.tinyproxy = {
+      enable = true;
+      settings = {
+        Listen = "127.0.0.1";
+        Port = 8080;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("tinyproxy.service")
+    machine.wait_for_open_port(8080)
+
+    machine.succeed('curl -s http://localhost:8080 |grep -i tinyproxy')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tinywl.nix b/nixpkgs/nixos/tests/tinywl.nix
new file mode 100644
index 000000000000..9199866b57af
--- /dev/null
+++ b/nixpkgs/nixos/tests/tinywl.nix
@@ -0,0 +1,59 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+  {
+    name = "tinywl";
+    meta = {
+      maintainers = with lib.maintainers; [ primeos ];
+    };
+
+    nodes.machine = { config, ... }: {
+      # Automatically login on tty1 as a normal user:
+      imports = [ ./common/user-account.nix ];
+      services.getty.autologinUser = "alice";
+      security.polkit.enable = true;
+
+      environment = {
+        systemPackages = with pkgs; [ tinywl foot wayland-utils ];
+      };
+
+      hardware.opengl.enable = true;
+
+      # Automatically start TinyWL when logging in on tty1:
+      programs.bash.loginShellInit = ''
+        if [ "$(tty)" = "/dev/tty1" ]; then
+          set -e
+          test ! -e /tmp/tinywl.log # Only start tinywl once
+          readonly TEST_CMD="wayland-info |& tee /tmp/test-wayland.out && touch /tmp/test-wayland-exit-ok; read"
+          readonly FOOT_CMD="foot sh -c '$TEST_CMD'"
+          tinywl -s "$FOOT_CMD" |& tee /tmp/tinywl.log
+          touch /tmp/tinywl-exit-ok
+        fi
+      '';
+
+      # Switch to a different GPU driver (default: -vga std), otherwise TinyWL segfaults:
+      virtualisation.qemu.options = [ "-vga none -device virtio-gpu-pci" ];
+    };
+
+    testScript = { nodes, ... }: ''
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+
+      # Wait for complete startup:
+      machine.wait_until_succeeds("pgrep tinywl")
+      machine.wait_for_file("/run/user/1000/wayland-0")
+      machine.wait_until_succeeds("pgrep foot")
+      machine.wait_for_file("/tmp/test-wayland-exit-ok")
+
+      # Make a screenshot and save the result:
+      machine.screenshot("tinywl_foot")
+      print(machine.succeed("cat /tmp/test-wayland.out"))
+      machine.copy_from_vm("/tmp/test-wayland.out")
+
+      # Terminate cleanly:
+      machine.send_key("alt-esc")
+      machine.wait_until_fails("pgrep foot")
+      machine.wait_until_fails("pgrep tinywl")
+      machine.wait_for_file("/tmp/tinywl-exit-ok")
+      machine.copy_from_vm("/tmp/tinywl.log")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/tmate-ssh-server.nix b/nixpkgs/nixos/tests/tmate-ssh-server.nix
new file mode 100644
index 000000000000..122434c505c1
--- /dev/null
+++ b/nixpkgs/nixos/tests/tmate-ssh-server.nix
@@ -0,0 +1,74 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+let
+  inherit (import ./ssh-keys.nix pkgs)
+    snakeOilPrivateKey snakeOilPublicKey;
+
+  setUpPrivateKey = name: ''
+    ${name}.succeed(
+        "mkdir -p /root/.ssh",
+        "chown 700 /root/.ssh",
+        "cat '${snakeOilPrivateKey}' > /root/.ssh/id_snakeoil",
+        "chown 600 /root/.ssh/id_snakeoil",
+    )
+    ${name}.wait_for_file("/root/.ssh/id_snakeoil")
+  '';
+
+  sshOpts = "-oStrictHostKeyChecking=no -oUserKnownHostsFile=/dev/null -oIdentityFile=/root/.ssh/id_snakeoil";
+
+in
+{
+  name = "tmate-ssh-server";
+  nodes =
+    {
+      server = { ... }: {
+        services.tmate-ssh-server = {
+          enable = true;
+          port = 2223;
+          openFirewall = true;
+        };
+      };
+      client = { ... }: {
+        environment.systemPackages = [ pkgs.tmate ];
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      };
+      client2 = { ... }: {
+        environment.systemPackages = [ pkgs.openssh ];
+      };
+    };
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("tmate-ssh-server.service")
+    server.wait_for_open_port(2223)
+    server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_ed25519_key.pub")
+    server.wait_for_file("/etc/tmate-ssh-server-keys/ssh_host_rsa_key.pub")
+    server.succeed("tmate-client-config > /tmp/tmate.conf")
+    server.wait_for_file("/tmp/tmate.conf")
+
+    ${setUpPrivateKey "server"}
+    client.wait_for_unit("sshd.service")
+    client.wait_for_open_port(22)
+    server.succeed("scp ${sshOpts} /tmp/tmate.conf client:/tmp/tmate.conf")
+
+    client.wait_for_file("/tmp/tmate.conf")
+    client.send_chars("root\n")
+    client.sleep(2)
+    client.send_chars("tmate -f /tmp/tmate.conf\n")
+    client.sleep(2)
+    client.send_chars("q")
+    client.sleep(2)
+    client.send_chars("tmate display -p '#{tmate_ssh}' > /tmp/ssh_command\n")
+    client.wait_for_file("/tmp/ssh_command")
+    ssh_cmd = client.succeed("cat /tmp/ssh_command")
+
+    client2.succeed("mkdir -p ~/.ssh; ssh-keyscan -p 2223 server > ~/.ssh/known_hosts")
+    client2.send_chars("root\n")
+    client2.sleep(2)
+    client2.send_chars(ssh_cmd.strip() + "\n")
+    client2.sleep(2)
+    client2.send_chars("touch /tmp/client_2\n")
+
+    client.wait_for_file("/tmp/client_2")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tomcat.nix b/nixpkgs/nixos/tests/tomcat.nix
new file mode 100644
index 000000000000..a5f219e104ad
--- /dev/null
+++ b/nixpkgs/nixos/tests/tomcat.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tomcat";
+
+  nodes.machine = { pkgs, ... }: {
+    services.tomcat = {
+      enable = true;
+      axis2.enable = true;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("tomcat.service")
+    machine.wait_for_open_port(8080)
+    machine.wait_for_file("/var/tomcat/webapps/examples");
+
+    machine.succeed(
+        "curl -sS --fail http://localhost:8080/examples/servlets/servlet/HelloWorldExample | grep 'Hello World!'"
+    )
+    machine.succeed(
+        "curl -sS --fail http://localhost:8080/examples/jsp/jsp2/simpletag/hello.jsp | grep 'Hello, world!'"
+    )
+    machine.succeed(
+        "curl -sS --fail http://localhost:8080/axis2/axis2-web/HappyAxis.jsp | grep 'Found Axis2'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tor.nix b/nixpkgs/nixos/tests/tor.nix
new file mode 100644
index 000000000000..b55fbf91232c
--- /dev/null
+++ b/nixpkgs/nixos/tests/tor.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "tor";
+  meta.maintainers = with lib.maintainers; [ joachifm ];
+
+  nodes.client = { pkgs, ... }: {
+    boot.kernelParams = [ "audit=0" "apparmor=0" "quiet" ];
+    networking.firewall.enable = false;
+    networking.useDHCP = false;
+
+    environment.systemPackages = [ pkgs.netcat ];
+    services.tor.enable = true;
+    services.tor.client.enable = true;
+    services.tor.settings.ControlPort = 9051;
+  };
+
+  testScript = ''
+    client.wait_for_unit("tor.service")
+    client.wait_for_open_port(9051)
+    assert "514 Authentication required." in client.succeed(
+        "echo GETINFO version | nc 127.0.0.1 9051"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tracee.nix b/nixpkgs/nixos/tests/tracee.nix
new file mode 100644
index 000000000000..3dadc0f9fdb3
--- /dev/null
+++ b/nixpkgs/nixos/tests/tracee.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tracee-integration";
+  meta.maintainers = pkgs.tracee.meta.maintainers;
+
+  nodes = {
+    machine = { config, pkgs, ... }: {
+      # EventFilters/trace_only_events_from_new_containers and
+      # Test_EventFilters/trace_only_events_from_"dockerd"_binary_and_contain_it's_pid
+      # require docker/dockerd
+      virtualisation.docker.enable = true;
+
+      environment.systemPackages = with pkgs; [
+        # required by Test_EventFilters/trace_events_from_ls_and_which_binary_in_separate_scopes
+        which
+        # build the go integration tests as a binary
+        (tracee.overrideAttrs (oa: {
+          pname = oa.pname + "-integration";
+          postPatch = oa.postPatch or "" + ''
+            # prepare tester.sh (which will be embedded in the test binary)
+            patchShebangs tests/integration/tester.sh
+
+            # fix the test to look at nixos paths for running programs
+            substituteInPlace tests/integration/integration_test.go \
+              --replace "bin=/usr/bin/" "comm=" \
+              --replace "binary=/usr/bin/" "comm=" \
+              --replace "/usr/bin/dockerd" "dockerd" \
+              --replace "/usr/bin" "/run/current-system/sw/bin"
+          '';
+          nativeBuildInputs = oa.nativeBuildInputs or [ ] ++ [ makeWrapper ];
+          buildPhase = ''
+            runHook preBuild
+            # just build the static lib we need for the go test binary
+            make $makeFlags ''${enableParallelBuilding:+-j$NIX_BUILD_CORES} bpf-core ./dist/btfhub
+
+            # then compile the tests to be ran later
+            CGO_LDFLAGS="$(pkg-config --libs libbpf)" go test -tags core,ebpf,integration -p 1 -c -o $GOPATH/tracee-integration ./tests/integration/...
+            runHook postBuild
+          '';
+          doCheck = false;
+          outputs = [ "out" ];
+          installPhase = ''
+            mkdir -p $out/bin
+            mv $GOPATH/tracee-integration $out/bin/
+          '';
+          doInstallCheck = false;
+
+          meta = oa.meta // {
+            outputsToInstall = [];
+          };
+        }))
+      ];
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("docker.service")
+
+    with subtest("run integration tests"):
+      # EventFilters/trace_only_events_from_new_containers also requires a container called "alpine"
+      machine.succeed('tar c -C ${pkgs.pkgsStatic.busybox} . | docker import - alpine --change "ENTRYPOINT [\"sleep\"]"')
+
+      # Test_EventFilters/trace_event_set_in_a_specific_scope expects to be in a dir that includes "integration"
+      print(machine.succeed(
+        'mkdir /tmp/integration',
+        'cd /tmp/integration && tracee-integration -test.v'
+      ))
+  '';
+})
diff --git a/nixpkgs/nixos/tests/traefik.nix b/nixpkgs/nixos/tests/traefik.nix
new file mode 100644
index 000000000000..ce808e6ec95a
--- /dev/null
+++ b/nixpkgs/nixos/tests/traefik.nix
@@ -0,0 +1,98 @@
+# Test Traefik as a reverse proxy of a local web service
+# and a Docker container.
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "traefik";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ joko ];
+  };
+
+  nodes = {
+    client = { config, pkgs, ... }: {
+      environment.systemPackages = [ pkgs.curl ];
+    };
+    traefik = { config, pkgs, ... }: {
+      virtualisation.oci-containers = {
+        backend = "docker";
+        containers.nginx = {
+          extraOptions = [
+            "-l"
+            "traefik.enable=true"
+            "-l"
+            "traefik.http.routers.nginx.entrypoints=web"
+            "-l"
+            "traefik.http.routers.nginx.rule=Host(`nginx.traefik.test`)"
+          ];
+          image = "nginx-container";
+          imageFile = pkgs.dockerTools.examples.nginx;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+
+      services.traefik = {
+        enable = true;
+
+        dynamicConfigOptions = {
+          http.routers.simplehttp = {
+            rule = "Host(`simplehttp.traefik.test`)";
+            entryPoints = [ "web" ];
+            service = "simplehttp";
+          };
+
+          http.services.simplehttp = {
+            loadBalancer.servers = [{
+              url = "http://127.0.0.1:8000";
+            }];
+          };
+        };
+
+        staticConfigOptions = {
+          global = {
+            checkNewVersion = false;
+            sendAnonymousUsage = false;
+          };
+
+          entryPoints.web.address = ":\${HTTP_PORT}";
+
+          providers.docker.exposedByDefault = false;
+        };
+        environmentFiles = [(pkgs.writeText "traefik.env" ''
+          HTTP_PORT=80
+        '')];
+      };
+
+      systemd.services.simplehttp = {
+        script = "${pkgs.python3}/bin/python -m http.server 8000";
+        serviceConfig.Type = "simple";
+        wantedBy = [ "multi-user.target" ];
+      };
+
+      users.users.traefik.extraGroups = [ "docker" ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    traefik.wait_for_unit("docker-nginx.service")
+    traefik.wait_until_succeeds("docker ps | grep nginx-container")
+    traefik.wait_for_unit("simplehttp.service")
+    traefik.wait_for_unit("traefik.service")
+    traefik.wait_for_open_port(80)
+    traefik.wait_for_unit("multi-user.target")
+
+    client.wait_for_unit("multi-user.target")
+
+    client.wait_until_succeeds("curl -sSf -H Host:nginx.traefik.test http://traefik/")
+
+    with subtest("Check that a container can be reached via Traefik"):
+        assert "Hello from NGINX" in client.succeed(
+            "curl -sSf -H Host:nginx.traefik.test http://traefik/"
+        )
+
+    with subtest("Check that dynamic configuration works"):
+        assert "Directory listing for " in client.succeed(
+            "curl -sSf -H Host:simplehttp.traefik.test http://traefik/"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/trafficserver.nix b/nixpkgs/nixos/tests/trafficserver.nix
new file mode 100644
index 000000000000..e4557c6c50e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/trafficserver.nix
@@ -0,0 +1,178 @@
+# verifies:
+#   1. Traffic Server is able to start
+#   2. Traffic Server spawns traffic_crashlog upon startup
+#   3. Traffic Server proxies HTTP requests according to URL remapping rules
+#      in 'services.trafficserver.remap'
+#   4. Traffic Server applies per-map settings specified with the conf_remap
+#      plugin
+#   5. Traffic Server caches HTTP responses
+#   6. Traffic Server processes HTTP PUSH requests
+#   7. Traffic Server can load the healthchecks plugin
+#   8. Traffic Server logs HTTP traffic as configured
+#
+# uses:
+#   - bin/traffic_manager
+#   - bin/traffic_server
+#   - bin/traffic_crashlog
+#   - bin/traffic_cache_tool
+#   - bin/traffic_ctl
+#   - bin/traffic_logcat
+#   - bin/traffic_logstats
+#   - bin/tspush
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trafficserver";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ midchildan ];
+  };
+
+  nodes = {
+    ats = { pkgs, lib, config, ... }: let
+      user = config.users.users.trafficserver.name;
+      group = config.users.groups.trafficserver.name;
+      healthchecks = pkgs.writeText "healthchecks.conf" ''
+        /status /tmp/ats.status text/plain 200 500
+      '';
+    in {
+      services.trafficserver.enable = true;
+
+      services.trafficserver.records = {
+        proxy.config.http.server_ports = "80 80:ipv6";
+        proxy.config.hostdb.host_file.path = "/etc/hosts";
+        proxy.config.log.max_space_mb_headroom = 0;
+        proxy.config.http.push_method_enabled = 1;
+
+        # check that cache storage is usable before accepting traffic
+        proxy.config.http.wait_for_cache = 2;
+      };
+
+      services.trafficserver.plugins = [
+        { path = "healthchecks.so"; arg = toString healthchecks; }
+        { path = "xdebug.so"; }
+      ];
+
+      services.trafficserver.remap = ''
+        map http://httpbin.test http://httpbin
+        map http://pristine-host-hdr.test http://httpbin \
+          @plugin=conf_remap.so \
+          @pparam=proxy.config.url_remap.pristine_host_hdr=1
+        map http://ats/tspush http://httpbin/cache \
+          @plugin=conf_remap.so \
+          @pparam=proxy.config.http.cache.required_headers=0
+      '';
+
+      services.trafficserver.storage = ''
+        /dev/vdb volume=1
+      '';
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      virtualisation.emptyDiskImages = [ 256 ];
+      services.udev.extraRules = ''
+        KERNEL=="vdb", OWNER="${user}", GROUP="${group}"
+      '';
+    };
+
+    httpbin = { pkgs, lib, ... }: let
+      python = pkgs.python3.withPackages
+        (ps: with ps; [ httpbin gunicorn gevent ]);
+    in {
+      systemd.services.httpbin = {
+        enable = true;
+        after = [ "network.target" ];
+        wantedBy = [ "multi-user.target" ];
+        serviceConfig = {
+          ExecStart = "${python}/bin/gunicorn -b 0.0.0.0:80 httpbin:app -k gevent";
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+    };
+
+    client = { pkgs, lib, ... }: {
+      environment.systemPackages = with pkgs; [ curl ];
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    sampleFile = pkgs.writeText "sample.txt" ''
+      It's the season of White Album.
+    '';
+  in ''
+    import json
+    import re
+
+    ats.wait_for_unit("trafficserver")
+    ats.wait_for_open_port(80)
+    httpbin.wait_for_unit("httpbin")
+    httpbin.wait_for_open_port(80)
+    client.wait_for_unit("network-online.target")
+
+    with subtest("Traffic Server is running"):
+        out = ats.succeed("traffic_ctl server status")
+        assert out.strip() == "Proxy -- on"
+
+    with subtest("traffic_crashlog is running"):
+        ats.succeed("pgrep -f traffic_crashlog")
+
+    with subtest("basic remapping works"):
+        out = client.succeed("curl -vv -H 'Host: httpbin.test' http://ats/headers")
+        assert json.loads(out)["headers"]["Host"] == "httpbin"
+
+    with subtest("conf_remap plugin works"):
+        out = client.succeed(
+            "curl -vv -H 'Host: pristine-host-hdr.test' http://ats/headers"
+        )
+        assert json.loads(out)["headers"]["Host"] == "pristine-host-hdr.test"
+
+    with subtest("caching works"):
+        out = client.succeed(
+            "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null"
+        )
+        assert "X-Cache: miss" in out
+
+        out = client.succeed(
+            "curl -vv -D - -H 'Host: httpbin.test' -H 'X-Debug: X-Cache' http://ats/cache/60 -o /dev/null"
+        )
+        assert "X-Cache: hit-fresh" in out
+
+    with subtest("pushing to cache works"):
+        url = "http://ats/tspush"
+
+        ats.succeed(f"echo {url} > /tmp/urls.txt")
+        out = ats.succeed(
+            f"tspush -f '${sampleFile}' -u {url}"
+        )
+        assert "HTTP/1.0 201 Created" in out, "cache push failed"
+
+        out = ats.succeed(
+            "traffic_cache_tool --spans /etc/trafficserver/storage.config find --input /tmp/urls.txt"
+        )
+        assert "Span: /dev/vdb" in out, "cache not stored on disk"
+
+        out = client.succeed(f"curl {url}").strip()
+        expected = (
+            open("${sampleFile}").read().strip()
+        )
+        assert out == expected, "cache content mismatch"
+
+    with subtest("healthcheck plugin works"):
+        out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'")
+        assert out.strip() == "500"
+
+        ats.succeed("touch /tmp/ats.status")
+
+        out = client.succeed("curl -vv http://ats/status -o /dev/null -w '%{http_code}'")
+        assert out.strip() == "200"
+
+    with subtest("logging works"):
+        access_log_path = "/var/log/trafficserver/squid.blog"
+        ats.wait_for_file(access_log_path)
+
+        out = ats.succeed(f"traffic_logcat {access_log_path}").split("\n")[0]
+        expected = "^\S+ \S+ \S+ TCP_MISS/200 \S+ GET http://httpbin/headers - DIRECT/httpbin application/json$"
+        assert re.fullmatch(expected, out) is not None, "no matching logs"
+
+        out = json.loads(ats.succeed(f"traffic_logstats -jf {access_log_path}"))
+        assert isinstance(out, dict)
+        assert out["total"]["error.total"]["req"] == "0", "unexpected log stat"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/transmission.nix b/nixpkgs/nixos/tests/transmission.nix
new file mode 100644
index 000000000000..03fc9a421510
--- /dev/null
+++ b/nixpkgs/nixos/tests/transmission.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, transmission, ... }: {
+  name = "transmission";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ coconnor ];
+  };
+
+  nodes.machine = { ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+
+    networking.firewall.allowedTCPPorts = [ 9091 ];
+
+    security.apparmor.enable = true;
+
+    services.transmission.enable = true;
+    services.transmission.package = transmission;
+  };
+
+  testScript =
+    ''
+      start_all()
+      machine.wait_for_unit("transmission")
+      machine.shutdown()
+    '';
+})
diff --git a/nixpkgs/nixos/tests/trezord.nix b/nixpkgs/nixos/tests/trezord.nix
new file mode 100644
index 000000000000..fb60cb4aff10
--- /dev/null
+++ b/nixpkgs/nixos/tests/trezord.nix
@@ -0,0 +1,19 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trezord";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ mmahut _1000101 ];
+  };
+  nodes = {
+    machine = { ... }: {
+      services.trezord.enable = true;
+      services.trezord.emulator.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("trezord.service")
+    machine.wait_for_open_port(21325)
+    machine.wait_until_succeeds("curl -fL http://localhost:21325/status/ | grep Version")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/trickster.nix b/nixpkgs/nixos/tests/trickster.nix
new file mode 100644
index 000000000000..acb2e735c39f
--- /dev/null
+++ b/nixpkgs/nixos/tests/trickster.nix
@@ -0,0 +1,37 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "trickster";
+  meta = with pkgs.lib; {
+    maintainers = with maintainers; [ _1000101 ];
+  };
+
+  nodes = {
+    prometheus = { ... }: {
+      services.prometheus.enable = true;
+      networking.firewall.allowedTCPPorts = [ 9090 ];
+    };
+    trickster = { ... }: {
+      services.trickster.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+    prometheus.wait_for_unit("prometheus.service")
+    prometheus.wait_for_open_port(9090)
+    prometheus.wait_until_succeeds(
+        "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_for_unit("trickster.service")
+    trickster.wait_for_open_port(8082)
+    trickster.wait_for_open_port(9090)
+    trickster.wait_until_succeeds(
+        "curl -fL http://localhost:8082/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_until_succeeds(
+        "curl -fL http://prometheus:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+    trickster.wait_until_succeeds(
+        "curl -fL http://localhost:9090/metrics | grep 'promhttp_metric_handler_requests_total{code=\"500\"} 0'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/trilium-server.nix b/nixpkgs/nixos/tests/trilium-server.nix
new file mode 100644
index 000000000000..6346575b33da
--- /dev/null
+++ b/nixpkgs/nixos/tests/trilium-server.nix
@@ -0,0 +1,53 @@
+import ./make-test-python.nix ({ ... }: {
+  name = "trilium-server";
+  nodes = {
+    default = {
+      services.trilium-server.enable = true;
+    };
+    configured = {
+      services.trilium-server = {
+        enable = true;
+        dataDir = "/data/trilium";
+      };
+    };
+
+    nginx = {
+      services.trilium-server = {
+        enable = true;
+        nginx.enable = true;
+        nginx.hostName = "trilium.example.com";
+      };
+    };
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      with subtest("by default works without configuration"):
+          default.wait_for_unit("trilium-server.service")
+
+      with subtest("by default available on port 8080"):
+          default.wait_for_unit("trilium-server.service")
+          default.wait_for_open_port(8080)
+          # we output to /dev/null here to avoid a python UTF-8 decode error
+          # but the check will still fail if the service doesn't respond
+          default.succeed("curl --fail -o /dev/null 127.0.0.1:8080")
+
+      with subtest("by default creates empty document"):
+          default.wait_for_unit("trilium-server.service")
+          default.succeed("test -f /var/lib/trilium/document.db")
+
+      with subtest("configured with custom data store"):
+          configured.wait_for_unit("trilium-server.service")
+          configured.succeed("test -f /data/trilium/document.db")
+
+      with subtest("nginx with custom host name"):
+          nginx.wait_for_unit("trilium-server.service")
+          nginx.wait_for_unit("nginx.service")
+
+          nginx.succeed(
+              "curl --resolve 'trilium.example.com:80:127.0.0.1' http://trilium.example.com/"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/tsja.nix b/nixpkgs/nixos/tests/tsja.nix
new file mode 100644
index 000000000000..176783088d8d
--- /dev/null
+++ b/nixpkgs/nixos/tests/tsja.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "tsja";
+  meta = {
+    maintainers = with lib.maintainers; [ chayleaf ];
+  };
+
+  nodes = {
+    master =
+      { config, ... }:
+
+      {
+        services.postgresql = {
+          enable = true;
+          extraPlugins = with config.services.postgresql.package.pkgs; [
+            tsja
+          ];
+        };
+      };
+  };
+
+  testScript = ''
+    start_all()
+    master.wait_for_unit("postgresql")
+    master.succeed("sudo -u postgres psql -f /run/current-system/sw/share/postgresql/extension/libtsja_dbinit.sql")
+    # make sure "日本語" is parsed as a separate lexeme
+    master.succeed("""
+      sudo -u postgres \\
+        psql -c "SELECT * FROM ts_debug('japanese', 'PostgreSQLã§æ—¥æœ¬èªžã®ãƒ†ã‚­ã‚¹ãƒˆæ¤œç´¢ãŒã§ãã¾ã™ã€‚')" \\
+          | grep "{日本語}"
+    """)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/tsm-client-gui.nix b/nixpkgs/nixos/tests/tsm-client-gui.nix
new file mode 100644
index 000000000000..e11501da53d0
--- /dev/null
+++ b/nixpkgs/nixos/tests/tsm-client-gui.nix
@@ -0,0 +1,57 @@
+# The tsm-client GUI first tries to connect to a server.
+# We can't simulate a server, so we just check if
+# it reports the correct connection failure error.
+# After that the test persuades the GUI
+# to show its main application window
+# and verifies some configuration information.
+
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "tsm-client";
+
+  enableOCR = true;
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ];
+    programs.tsmClient = {
+      enable = true;
+      package = pkgs.tsm-client-withGui;
+      defaultServername = "testserver";
+      servers.testserver = {
+        # 192.0.0.8 is a "dummy address" according to RFC 7600
+        server = "192.0.0.8";
+        node = "SOME-NODE";
+        passwdDir = "/tmp";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.succeed("which dsmj")  # fail early if this is missing
+    machine.wait_for_x()
+    machine.execute("DSM_LOG=/tmp dsmj -optfile=/dev/null >&2 &")
+
+    # does it report the "TCP/IP connection failure" error code?
+    machine.wait_for_window("IBM Spectrum Protect")
+    machine.wait_for_text("ANS2610S")
+    machine.send_key("esc")
+
+    # it asks to continue to restore a local backupset now;
+    # "yes" (return) leads to the main application window
+    machine.wait_for_text("backupset")
+    machine.send_key("ret")
+
+    # main window: navigate to "Connection Information"
+    machine.wait_for_text("Welcome")
+    machine.send_key("alt-f")  # "File" menu
+    machine.send_key("c")  # "Connection Information"
+
+    # "Connection Information" dialog box
+    machine.wait_for_window("Connection Information")
+    machine.wait_for_text("SOME-NODE")
+    machine.wait_for_text("${pkgs.tsm-client.passthru.unwrapped.version}")
+
+    machine.shutdown()
+  '';
+
+  meta.maintainers = [ lib.maintainers.yarny ];
+})
diff --git a/nixpkgs/nixos/tests/tuptime.nix b/nixpkgs/nixos/tests/tuptime.nix
new file mode 100644
index 000000000000..93410de7bdf5
--- /dev/null
+++ b/nixpkgs/nixos/tests/tuptime.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "tuptime";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ evils ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ../modules/profiles/minimal.nix ];
+    services.tuptime.enable = true;
+  };
+
+  testScript =
+    ''
+      # see if it starts
+      start_all()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*1'")
+      machine.succeed("tuptime | grep 'System uptime:[[:blank:]]*100.0%'")
+      machine.shutdown()
+
+      # restart machine and see if it correctly reports the reboot
+      machine.start()
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("tuptime | grep 'System startups:[[:blank:]]*2'")
+      machine.succeed("tuptime | grep 'System shutdowns:[[:blank:]]*1 ok'")
+      machine.shutdown()
+    '';
+})
+
diff --git a/nixpkgs/nixos/tests/turbovnc-headless-server.nix b/nixpkgs/nixos/tests/turbovnc-headless-server.nix
new file mode 100644
index 000000000000..a155f9f907b2
--- /dev/null
+++ b/nixpkgs/nixos/tests/turbovnc-headless-server.nix
@@ -0,0 +1,172 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "turbovnc-headless-server";
+  meta = {
+    maintainers = with lib.maintainers; [ nh2 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+
+    environment.systemPackages = with pkgs; [
+      glxinfo
+      procps # for `pkill`, `pidof` in the test
+      scrot # for screenshotting Xorg
+      turbovnc
+    ];
+
+    programs.turbovnc.ensureHeadlessSoftwareOpenGL = true;
+
+    networking.firewall = {
+      # Reject instead of drop, for failures instead of hangs.
+      rejectPackets = true;
+      allowedTCPPorts = [
+        5900 # VNC :0, for seeing what's going on in the server
+      ];
+    };
+
+    # So that we can ssh into the VM, see e.g.
+    # http://blog.patapon.info/nixos-local-vm/#accessing-the-vm-with-ssh
+    services.openssh.enable = true;
+    services.openssh.settings.PermitRootLogin = "yes";
+    users.extraUsers.root.password = "";
+    users.mutableUsers = false;
+  };
+
+  testScript = ''
+    def wait_until_terminated_or_succeeds(
+        termination_check_shell_command,
+        success_check_shell_command,
+        get_detail_message_fn,
+        retries=60,
+        retry_sleep=0.5,
+    ):
+        def check_success():
+            command_exit_code, _output = machine.execute(success_check_shell_command)
+            return command_exit_code == 0
+
+        for _ in range(retries):
+            exit_check_exit_code, _output = machine.execute(termination_check_shell_command)
+            is_terminated = exit_check_exit_code != 0
+            if is_terminated:
+                if check_success():
+                    return
+                else:
+                    details = get_detail_message_fn()
+                    raise Exception(
+                        f"termination check ({termination_check_shell_command}) triggered without command succeeding ({success_check_shell_command}); details: {details}"
+                    )
+            else:
+                if check_success():
+                    return
+            import time
+            time.sleep(retry_sleep)
+
+        if not check_success():
+            details = get_detail_message_fn()
+            raise Exception(
+                f"action timed out ({success_check_shell_command}); details: {details}"
+            )
+
+
+    # Below we use the pattern:
+    #     (cmd | tee stdout.log) 3>&1 1>&2 2>&3 | tee stderr.log
+    # to capture both stderr and stdout while also teeing them, see:
+    # https://unix.stackexchange.com/questions/6430/how-to-redirect-stderr-and-stdout-to-different-files-and-also-display-in-termina/6431#6431
+
+
+    # Starts headless VNC server, backgrounding it.
+    def start_xvnc():
+        xvnc_command = " ".join(
+            [
+                "Xvnc",
+                ":0",
+                "-iglx",
+                "-auth /root/.Xauthority",
+                "-geometry 1240x900",
+                "-depth 24",
+                "-rfbwait 5000",
+                "-deferupdate 1",
+                "-verbose",
+                "-securitytypes none",
+                # We don't enforce localhost listening such that we
+                # can connect from outside the VM using
+                #     env QEMU_NET_OPTS=hostfwd=tcp::5900-:5900 $(nix-build nixos/tests/turbovnc-headless-server.nix -A driver)/bin/nixos-test-driver
+                # for testing purposes, and so that we can in the future
+                # add another test case that connects the TurboVNC client.
+                # "-localhost",
+            ]
+        )
+        machine.execute(
+            # Note trailing & for backgrounding.
+            f"({xvnc_command} | tee /tmp/Xvnc.stdout) 3>&1 1>&2 2>&3 | tee /tmp/Xvnc.stderr >&2 &",
+        )
+
+
+    # Waits until the server log message that tells us that GLX is ready
+    # (requires `-verbose` above), avoiding screenshoting racing below.
+    def wait_until_xvnc_glx_ready():
+        machine.wait_until_succeeds("test -f /tmp/Xvnc.stderr")
+        wait_until_terminated_or_succeeds(
+            termination_check_shell_command="pidof Xvnc",
+            success_check_shell_command="grep 'GLX: Initialized DRISWRAST' /tmp/Xvnc.stderr",
+            get_detail_message_fn=lambda: "Contents of /tmp/Xvnc.stderr:\n"
+            + machine.succeed("cat /tmp/Xvnc.stderr"),
+        )
+
+
+    # Checks that we detect glxgears failing when
+    # `LIBGL_DRIVERS_PATH=/nonexistent` is set
+    # (in which case software rendering should not work).
+    def test_glxgears_failing_with_bad_driver_path():
+        machine.execute(
+            # Note trailing & for backgrounding.
+            "(env DISPLAY=:0 LIBGL_DRIVERS_PATH=/nonexistent glxgears -info | tee /tmp/glxgears-should-fail.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears-should-fail.stderr >&2 &"
+        )
+        machine.wait_until_succeeds("test -f /tmp/glxgears-should-fail.stderr")
+        wait_until_terminated_or_succeeds(
+            termination_check_shell_command="pidof glxgears",
+            success_check_shell_command="grep 'libGL error: failed to load driver: swrast' /tmp/glxgears-should-fail.stderr",
+            get_detail_message_fn=lambda: "Contents of /tmp/glxgears-should-fail.stderr:\n"
+            + machine.succeed("cat /tmp/glxgears-should-fail.stderr"),
+        )
+        machine.wait_until_fails("pidof glxgears")
+
+
+    # Starts glxgears, backgrounding it. Waits until it prints the `GL_RENDERER`.
+    # Does not quit glxgears.
+    def test_glxgears_prints_renderer():
+        machine.execute(
+            # Note trailing & for backgrounding.
+            "(env DISPLAY=:0 glxgears -info | tee /tmp/glxgears.stdout) 3>&1 1>&2 2>&3 | tee /tmp/glxgears.stderr >&2 &"
+        )
+        machine.wait_until_succeeds("test -f /tmp/glxgears.stderr")
+        wait_until_terminated_or_succeeds(
+            termination_check_shell_command="pidof glxgears",
+            success_check_shell_command="grep 'GL_RENDERER' /tmp/glxgears.stdout",
+            get_detail_message_fn=lambda: "Contents of /tmp/glxgears.stderr:\n"
+            + machine.succeed("cat /tmp/glxgears.stderr"),
+        )
+
+
+    with subtest("Start Xvnc"):
+        start_xvnc()
+        wait_until_xvnc_glx_ready()
+
+    with subtest("Ensure bad driver path makes glxgears fail"):
+        test_glxgears_failing_with_bad_driver_path()
+
+    with subtest("Run 3D application (glxgears)"):
+        test_glxgears_prints_renderer()
+
+        # Take screenshot; should display the glxgears.
+        machine.succeed("scrot --display :0 /tmp/glxgears.png")
+
+    # Copy files down.
+    machine.copy_from_vm("/tmp/glxgears.png")
+    machine.copy_from_vm("/tmp/glxgears.stdout")
+    machine.copy_from_vm("/tmp/glxgears-should-fail.stdout")
+    machine.copy_from_vm("/tmp/glxgears-should-fail.stderr")
+    machine.copy_from_vm("/tmp/Xvnc.stdout")
+    machine.copy_from_vm("/tmp/Xvnc.stderr")
+  '';
+
+})
diff --git a/nixpkgs/nixos/tests/tuxguitar.nix b/nixpkgs/nixos/tests/tuxguitar.nix
new file mode 100644
index 000000000000..00833024bfea
--- /dev/null
+++ b/nixpkgs/nixos/tests/tuxguitar.nix
@@ -0,0 +1,24 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "tuxguitar";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+
+    environment.systemPackages = [ pkgs.tuxguitar ];
+  };
+
+  testScript = ''
+    machine.wait_for_x()
+    machine.succeed("tuxguitar >&2 &")
+    machine.wait_for_window("TuxGuitar - Untitled.tg")
+    machine.sleep(1)
+    machine.screenshot("tuxguitar")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/twingate.nix b/nixpkgs/nixos/tests/twingate.nix
new file mode 100644
index 000000000000..f8bede09d9f2
--- /dev/null
+++ b/nixpkgs/nixos/tests/twingate.nix
@@ -0,0 +1,14 @@
+{
+  name = "twingate";
+
+  nodes.machine.services.twingate.enable = true;
+
+  testScript = { nodes, ... }: ''
+    machine.wait_for_unit("twingate.service")
+    machine.succeed("twingate --version | grep '${nodes.machine.services.twingate.package.version}' >&2")
+    machine.succeed("twingate config log-level 'debug'")
+    machine.systemctl("restart twingate.service")
+    machine.succeed("grep 'debug' /etc/twingate/log_level.conf >&2")
+    machine.succeed("twingate config log-level | grep 'debug' >&2")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/txredisapi.nix b/nixpkgs/nixos/tests/txredisapi.nix
new file mode 100644
index 000000000000..47c2ba6d3749
--- /dev/null
+++ b/nixpkgs/nixos/tests/txredisapi.nix
@@ -0,0 +1,29 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "txredisapi";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ dandellion ];
+  };
+
+  nodes = {
+    machine =
+      { pkgs, ... }:
+
+      {
+        services.redis.servers."".enable = true;
+
+        environment.systemPackages = with pkgs; [ (python3.withPackages (ps: [ ps.twisted ps.txredisapi ps.mock ]))];
+      };
+  };
+
+  testScript = { nodes, ... }: let
+    inherit (nodes.machine.config.services) redis;
+    in ''
+    start_all()
+    machine.wait_for_unit("redis")
+    machine.wait_for_file("${redis.servers."".unixSocket}")
+    machine.succeed("ln -s ${redis.servers."".unixSocket} /tmp/redis.sock")
+
+    tests = machine.succeed("PYTHONPATH=\"${pkgs.python3Packages.txredisapi.src}\" python -m twisted.trial ${pkgs.python3Packages.txredisapi.src}/tests")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/typesense.nix b/nixpkgs/nixos/tests/typesense.nix
new file mode 100644
index 000000000000..4f07a2e194be
--- /dev/null
+++ b/nixpkgs/nixos/tests/typesense.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, ... }: let
+  testPort = 8108;
+in {
+  name = "typesense";
+  meta.maintainers = with pkgs.lib.maintainers; [ oddlama ];
+
+  nodes.machine = { ... }: {
+    services.typesense = {
+      enable = true;
+      apiKeyFile = pkgs.writeText "typesense-api-key" "dummy";
+      settings.server = {
+        api-port = testPort;
+        api-address = "0.0.0.0";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("typesense.service")
+    machine.wait_for_open_port(${toString testPort})
+    assert machine.succeed("curl --fail http://localhost:${toString testPort}/health") == '{"ok":true}'
+  '';
+})
diff --git a/nixpkgs/nixos/tests/ucarp.nix b/nixpkgs/nixos/tests/ucarp.nix
new file mode 100644
index 000000000000..1f60f770d3a8
--- /dev/null
+++ b/nixpkgs/nixos/tests/ucarp.nix
@@ -0,0 +1,66 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+let
+  addrShared = "192.168.0.1";
+  addrHostA = "192.168.0.10";
+  addrHostB = "192.168.0.11";
+
+  mkUcarpHost = addr: { config, pkgs, lib, ... }: {
+    networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+      { address = addr; prefixLength = 24; }
+    ];
+
+    networking.ucarp = {
+      enable = true;
+      interface = "eth1";
+      srcIp = addr;
+      vhId = 1;
+      passwordFile = "${pkgs.writeText "ucarp-pass" "secure"}";
+      addr = addrShared;
+      upscript = pkgs.writeScript "upscript" ''
+        #!/bin/sh
+        ${pkgs.iproute2}/bin/ip addr add "$2"/24 dev "$1"
+      '';
+      downscript = pkgs.writeScript "downscript" ''
+        #!/bin/sh
+        ${pkgs.iproute2}/bin/ip addr del "$2"/24 dev "$1"
+      '';
+    };
+  };
+in {
+  name = "ucarp";
+  meta.maintainers = with lib.maintainers; [ oxzi ];
+
+  nodes = {
+    hostA = mkUcarpHost addrHostA;
+    hostB = mkUcarpHost addrHostB;
+  };
+
+  testScript = ''
+    def is_master(host):
+      ipOutput = host.succeed("ip addr show dev eth1")
+      return "inet ${addrShared}/24" in ipOutput
+
+
+    start_all()
+
+    # First, let both hosts start and let a master node be selected
+    for host, peer in [(hostA, "${addrHostB}"), (hostB, "${addrHostA}")]:
+      host.wait_for_unit("ucarp.service")
+      host.succeed(f"ping -c 1 {peer}")
+
+    hostA.sleep(5)
+
+    hostA_master, hostB_master = is_master(hostA), is_master(hostB)
+    assert hostA_master != hostB_master, "only one master node is allowed"
+
+    master_host = hostA if hostA_master else hostB
+    backup_host = hostB if hostA_master else hostA
+
+    # Let's crash the master host and let the backup take over
+    master_host.crash()
+
+    backup_host.sleep(5)
+    assert is_master(backup_host), "backup did not take over"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/udisks2.nix b/nixpkgs/nixos/tests/udisks2.nix
new file mode 100644
index 000000000000..8cc148750c7b
--- /dev/null
+++ b/nixpkgs/nixos/tests/udisks2.nix
@@ -0,0 +1,72 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+
+  stick = pkgs.fetchurl {
+    url = "https://nixos.org/~eelco/nix/udisks-test.img.xz";
+    sha256 = "0was1xgjkjad91nipzclaz5biv3m4b2nk029ga6nk7iklwi19l8b";
+  };
+
+in
+
+{
+  name = "udisks2";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ eelco ];
+  };
+
+  nodes.machine =
+    { ... }:
+    { services.udisks2.enable = true;
+      imports = [ ./common/user-account.nix ];
+
+      security.polkit.extraConfig =
+        ''
+          polkit.addRule(function(action, subject) {
+            if (subject.user == "alice") return "yes";
+          });
+        '';
+    };
+
+  testScript =
+    ''
+      import lzma
+
+      machine.systemctl("start udisks2")
+      machine.wait_for_unit("udisks2.service")
+
+      with lzma.open(
+          "${stick}"
+      ) as data, open(machine.state_dir / "usbstick.img", "wb") as stick:
+          stick.write(data.read())
+
+      machine.succeed("udisksctl info -b /dev/vda >&2")
+      machine.fail("udisksctl info -b /dev/sda1")
+
+      # Attach a USB stick and wait for it to show up.
+      machine.send_monitor_command(
+          f"drive_add 0 id=stick,if=none,file={stick.name},format=raw"
+      )
+      machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick")
+      machine.wait_until_succeeds("udisksctl info -b /dev/sda1")
+      machine.succeed("udisksctl info -b /dev/sda1 | grep 'IdLabel:.*USBSTICK'")
+
+      # Mount the stick as a non-root user and do some stuff with it.
+      machine.succeed("su - alice -c 'udisksctl info -b /dev/sda1'")
+      machine.succeed("su - alice -c 'udisksctl mount -b /dev/sda1'")
+      machine.succeed(
+          "su - alice -c 'cat /run/media/alice/USBSTICK/test.txt' | grep -q 'Hello World'"
+      )
+      machine.succeed("su - alice -c 'echo foo > /run/media/alice/USBSTICK/bar.txt'")
+
+      # Unmounting the stick should make the mountpoint disappear.
+      machine.succeed("su - alice -c 'udisksctl unmount -b /dev/sda1'")
+      machine.fail("[ -d /run/media/alice/USBSTICK ]")
+
+      # Remove the USB stick.
+      machine.send_monitor_command("device_del stick")
+      machine.wait_until_fails("udisksctl info -b /dev/sda1")
+      machine.fail("[ -e /dev/sda ]")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/ulogd/ulogd.nix b/nixpkgs/nixos/tests/ulogd/ulogd.nix
new file mode 100644
index 000000000000..0fa92229a100
--- /dev/null
+++ b/nixpkgs/nixos/tests/ulogd/ulogd.nix
@@ -0,0 +1,56 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "ulogd";
+
+  meta.maintainers = with lib.maintainers; [ p-h ];
+
+  nodes.machine = { ... }: {
+    networking.firewall.enable = false;
+    networking.nftables.enable = true;
+    networking.nftables.ruleset = ''
+      table inet filter {
+        chain input {
+          type filter hook input priority 0;
+          icmp type { echo-request, echo-reply } log group 2 accept
+        }
+
+        chain output {
+          type filter hook output priority 0; policy accept;
+          icmp type { echo-request, echo-reply } log group 2 accept
+        }
+
+        chain forward {
+          type filter hook forward priority 0; policy drop;
+        }
+
+      }
+    '';
+    services.ulogd = {
+      enable = true;
+      settings = {
+        global = {
+          logfile = "/var/log/ulogd.log";
+          stack = [
+            "log1:NFLOG,base1:BASE,ifi1:IFINDEX,ip2str1:IP2STR,print1:PRINTPKT,emu1:LOGEMU"
+            "log1:NFLOG,base1:BASE,pcap1:PCAP"
+          ];
+        };
+
+        log1.group = 2;
+
+        pcap1 = {
+          sync = 1;
+          file = "/var/log/ulogd.pcap";
+        };
+
+        emu1 = {
+          sync = 1;
+          file = "/var/log/ulogd_pkts.log";
+        };
+      };
+    };
+
+    environment.systemPackages = with pkgs; [ tcpdump ];
+  };
+
+  testScript = lib.readFile ./ulogd.py;
+})
diff --git a/nixpkgs/nixos/tests/ulogd/ulogd.py b/nixpkgs/nixos/tests/ulogd/ulogd.py
new file mode 100644
index 000000000000..d20daa4d733a
--- /dev/null
+++ b/nixpkgs/nixos/tests/ulogd/ulogd.py
@@ -0,0 +1,48 @@
+start_all()
+machine.wait_for_unit("ulogd.service")
+machine.wait_for_unit("network-online.target")
+
+with subtest("Ulogd is running"):
+    machine.succeed("pgrep ulogd >&2")
+
+# All packets show up twice in the logs
+with subtest("Logs are collected"):
+    machine.succeed("ping -f 127.0.0.1 -c 5 >&2")
+    machine.succeed("sleep 2")
+    machine.wait_until_succeeds("du /var/log/ulogd.pcap")
+    _, echo_request_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1")
+    expected, actual = 5 * 2, len(echo_request_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP request packets from pcap, got: {actual}"
+    _, echo_reply_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1")
+    expected, actual = 5 * 2, len(echo_reply_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP reply packets from pcap, got: {actual}"
+
+    machine.wait_until_succeeds("du /var/log/ulogd_pkts.log")
+    _, echo_request_packets = machine.execute("grep TYPE=8 /var/log/ulogd_pkts.log")
+    expected, actual = 5 * 2, len(echo_request_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP request packets from logfile, got: {actual}"
+    _, echo_reply_packets = machine.execute("grep TYPE=0 /var/log/ulogd_pkts.log")
+    expected, actual = 5 * 2, len(echo_reply_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP reply packets from logfile, got: {actual}"
+
+with subtest("Reloading service reopens log file"):
+    machine.succeed("mv /var/log/ulogd.pcap /var/log/old_ulogd.pcap")
+    machine.succeed("mv /var/log/ulogd_pkts.log /var/log/old_ulogd_pkts.log")
+    machine.succeed("systemctl reload ulogd.service")
+    machine.succeed("ping -f 127.0.0.1 -c 5 >&2")
+    machine.succeed("sleep 2")
+    machine.wait_until_succeeds("du /var/log/ulogd.pcap")
+    _, echo_request_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 8 and host 127.0.0.1")
+    expected, actual = 5 * 2, len(echo_request_packets.splitlines())
+    assert expected == actual, f"Expected {expected} packets, got: {actual}"
+    _, echo_reply_packets = machine.execute("tcpdump -r /var/log/ulogd.pcap icmp[0] == 0 and host 127.0.0.1")
+    expected, actual = 5 * 2, len(echo_reply_packets.splitlines())
+    assert expected == actual, f"Expected {expected} packets, got: {actual}"
+
+    machine.wait_until_succeeds("du /var/log/ulogd_pkts.log")
+    _, echo_request_packets = machine.execute("grep TYPE=8 /var/log/ulogd_pkts.log")
+    expected, actual = 5 * 2, len(echo_request_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP request packets from logfile, got: {actual}"
+    _, echo_reply_packets = machine.execute("grep TYPE=0 /var/log/ulogd_pkts.log")
+    expected, actual = 5 * 2, len(echo_reply_packets.splitlines())
+    assert expected == actual, f"Expected {expected} ICMP reply packets from logfile, got: {actual}"
diff --git a/nixpkgs/nixos/tests/unbound.nix b/nixpkgs/nixos/tests/unbound.nix
new file mode 100644
index 000000000000..f6732390b434
--- /dev/null
+++ b/nixpkgs/nixos/tests/unbound.nix
@@ -0,0 +1,315 @@
+/*
+ Test that our unbound module indeed works as most users would expect.
+ There are a few settings that we must consider when modifying the test. The
+ usual use-cases for unbound are
+   * running a recursive DNS resolver on the local machine
+   * running a recursive DNS resolver on the local machine, forwarding to a local DNS server via UDP/53 & TCP/53
+   * running a recursive DNS resolver on the local machine, forwarding to a local DNS server via TCP/853 (DoT)
+   * running a recursive DNS resolver on a machine in the network awaiting input from clients over TCP/53 & UDP/53
+   * running a recursive DNS resolver on a machine in the network awaiting input from clients over TCP/853 (DoT)
+
+ In the below test setup we are trying to implement all of those use cases.
+
+ Another aspect that we cover is access to the local control UNIX socket. It
+ can optionally be enabled and users can optionally be in a group to gain
+ access. Users that are not in the group (except for root) should not have
+ access to that socket. Also, when there is no socket configured, users
+ shouldn't be able to access the control socket at all. Not even root.
+*/
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  let
+    # common client configuration that we can just use for the multitude of
+    # clients we are constructing
+    common = { lib, pkgs, ... }: {
+      config = {
+        environment.systemPackages = [ pkgs.knot-dns ];
+
+        # disable the root anchor update as we do not have internet access during
+        # the test execution
+        services.unbound.enableRootTrustAnchor = false;
+
+        # we want to test the full-variant of the package to also get DoH support
+        services.unbound.package = pkgs.unbound-full;
+      };
+    };
+
+    cert = pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+      openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=dns.example.local'
+      mkdir -p $out
+      cp key.pem cert.pem $out
+    '';
+  in
+  {
+    name = "unbound";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ andir ];
+    };
+
+    nodes = {
+
+      # The server that actually serves our zones, this tests unbounds authoriative mode
+      authoritative = { lib, pkgs, config, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.1"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::1"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [ 53 ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          settings = {
+            server = {
+              interface = [ "192.168.0.1" "fd21::1" "::1" "127.0.0.1" ];
+              access-control = [ "192.168.0.0/24 allow" "fd21::/64 allow" "::1 allow" "127.0.0.0/8 allow" ];
+              local-data = [
+                ''"example.local. IN A 1.2.3.4"''
+                ''"example.local. IN AAAA abcd::eeff"''
+              ];
+            };
+          };
+        };
+      };
+
+      # The resolver that knows that forwards (only) to the authoritative server
+      # and listens on UDP/53, TCP/53 & TCP/853.
+      resolver = { lib, nodes, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.2"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::2"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [
+          53 # regular DNS
+          853 # DNS over TLS
+          443 # DNS over HTTPS
+        ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          settings = {
+            server = {
+              interface = [ "::1" "127.0.0.1" "192.168.0.2" "fd21::2"
+                            "192.168.0.2@853" "fd21::2@853" "::1@853" "127.0.0.1@853"
+                            "192.168.0.2@443" "fd21::2@443" "::1@443" "127.0.0.1@443" ];
+              access-control = [ "192.168.0.0/24 allow" "fd21::/64 allow" "::1 allow" "127.0.0.0/8 allow" ];
+              tls-service-pem = "${cert}/cert.pem";
+              tls-service-key = "${cert}/key.pem";
+            };
+            forward-zone = [
+              {
+                name = ".";
+                forward-addr = [
+                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv6.addresses).address
+                  (lib.head nodes.authoritative.config.networking.interfaces.eth1.ipv4.addresses).address
+                ];
+              }
+            ];
+          };
+        };
+      };
+
+      # machine that runs a local unbound that will be reconfigured during test execution
+      local_resolver = { lib, nodes, config, ... }: {
+        imports = [ common ];
+        networking.interfaces.eth1.ipv4.addresses = lib.mkForce [
+          { address = "192.168.0.3"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = lib.mkForce [
+          { address = "fd21::3"; prefixLength = 64; }
+        ];
+        networking.firewall.allowedTCPPorts = [
+          53 # regular DNS
+        ];
+        networking.firewall.allowedUDPPorts = [ 53 ];
+
+        services.unbound = {
+          enable = true;
+          settings = {
+            server = {
+              interface = [ "::1" "127.0.0.1" ];
+              access-control = [ "::1 allow" "127.0.0.0/8 allow" ];
+            };
+            include = "/etc/unbound/extra*.conf";
+          };
+          localControlSocketPath = "/run/unbound/unbound.ctl";
+        };
+
+        users.users = {
+          # user that is permitted to access the unix socket
+          someuser = {
+            isSystemUser = true;
+            group = "someuser";
+            extraGroups = [
+              config.users.users.unbound.group
+            ];
+          };
+
+          # user that is not permitted to access the unix socket
+          unauthorizeduser = {
+            isSystemUser = true;
+            group = "unauthorizeduser";
+          };
+
+        };
+        users.groups = {
+          someuser = {};
+          unauthorizeduser = {};
+        };
+
+        # Used for testing configuration reloading
+        environment.etc = {
+          "unbound-extra1.conf".text = ''
+            forward-zone:
+            name: "example.local."
+            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}
+            forward-addr: ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}
+          '';
+          "unbound-extra2.conf".text = ''
+            auth-zone:
+              name: something.local.
+              zonefile: ${pkgs.writeText "zone" ''
+                something.local. IN A 3.4.5.6
+              ''}
+          '';
+        };
+      };
+
+
+      # plain node that only has network access and doesn't run any part of the
+      # resolver software locally
+      client = { lib, nodes, ... }: {
+        imports = [ common ];
+        networking.nameservers = [
+          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address
+          (lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address
+        ];
+        networking.interfaces.eth1.ipv4.addresses = [
+          { address = "192.168.0.10"; prefixLength = 24; }
+        ];
+        networking.interfaces.eth1.ipv6.addresses = [
+          { address = "fd21::10"; prefixLength = 64; }
+        ];
+      };
+    };
+
+    testScript = { nodes, ... }: ''
+      import typing
+
+      zone = "example.local."
+      records = [("AAAA", "abcd::eeff"), ("A", "1.2.3.4")]
+
+
+      def query(
+          machine,
+          host: str,
+          query_type: str,
+          query: str,
+          expected: typing.Optional[str] = None,
+          args: typing.Optional[typing.List[str]] = None,
+      ):
+          """
+          Execute a single query and compare the result with expectation
+          """
+          text_args = ""
+          if args:
+              text_args = " ".join(args)
+
+          out = machine.succeed(
+              f"kdig {text_args} {query} {query_type} @{host} +short"
+          ).strip()
+          machine.log(f"{host} replied with {out}")
+          if expected:
+              assert expected == out, f"Expected `{expected}` but got `{out}`"
+
+
+      def test(machine, remotes, /, doh=False, zone=zone, records=records, args=[]):
+          """
+          Run queries for the given remotes on the given machine.
+          """
+          for query_type, expected in records:
+              for remote in remotes:
+                  query(machine, remote, query_type, zone, expected, args)
+                  query(machine, remote, query_type, zone, expected, ["+tcp"] + args)
+                  if doh:
+                      query(
+                          machine,
+                          remote,
+                          query_type,
+                          zone,
+                          expected,
+                          ["+tcp", "+tls"] + args,
+                      )
+                      query(
+                          machine,
+                          remote,
+                          query_type,
+                          zone,
+                          expected,
+                          ["+https"] + args,
+                      )
+
+
+      client.start()
+      authoritative.wait_for_unit("unbound.service")
+
+      # verify that we can resolve locally
+      with subtest("test the authoritative servers local responses"):
+          test(authoritative, ["::1", "127.0.0.1"])
+
+      resolver.wait_for_unit("unbound.service")
+
+      with subtest("root is unable to use unbounc-control when the socket is not configured"):
+          resolver.succeed("which unbound-control")  # the binary must exist
+          resolver.fail("unbound-control list_forwards")  # the invocation must fail
+
+      # verify that the resolver is able to resolve on all the local protocols
+      with subtest("test that the resolver resolves on all protocols and transports"):
+          test(resolver, ["::1", "127.0.0.1"], doh=True)
+
+      resolver.wait_for_unit("multi-user.target")
+
+      with subtest("client should be able to query the resolver"):
+          test(client, ["${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address}", "${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"], doh=True)
+
+      # discard the client we do not need anymore
+      client.shutdown()
+
+      local_resolver.wait_for_unit("multi-user.target")
+
+      # link a new config file to /etc/unbound/extra.conf
+      local_resolver.succeed("ln -s /etc/unbound-extra1.conf /etc/unbound/extra1.conf")
+
+      # reload the server & ensure the forwarding works
+      with subtest("test that the local resolver resolves on all protocols and transports"):
+          local_resolver.succeed("systemctl reload unbound")
+          print(local_resolver.succeed("journalctl -u unbound -n 1000"))
+          test(local_resolver, ["::1", "127.0.0.1"], args=["+timeout=60"])
+
+      with subtest("test that we can use the unbound control socket"):
+          out = local_resolver.succeed(
+              "sudo -u someuser -- unbound-control list_forwards"
+          ).strip()
+
+          # Thank you black! Can't really break this line into a readable version.
+          expected = "example.local. IN forward ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv6.addresses).address} ${(lib.head nodes.resolver.config.networking.interfaces.eth1.ipv4.addresses).address}"
+          assert out == expected, f"Expected `{expected}` but got `{out}` instead."
+          local_resolver.fail("sudo -u unauthorizeduser -- unbound-control list_forwards")
+
+
+      # link a new config file to /etc/unbound/extra.conf
+      local_resolver.succeed("ln -sf /etc/unbound-extra2.conf /etc/unbound/extra2.conf")
+
+      # reload the server & ensure the new local zone works
+      with subtest("test that we can query the new local zone"):
+          local_resolver.succeed("unbound-control reload")
+          r = [("A", "3.4.5.6")]
+          test(local_resolver, ["::1", "127.0.0.1"], zone="something.local.", records=r)
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/unifi.nix b/nixpkgs/nixos/tests/unifi.nix
new file mode 100644
index 000000000000..d371bafd6965
--- /dev/null
+++ b/nixpkgs/nixos/tests/unifi.nix
@@ -0,0 +1,38 @@
+# Test UniFi controller
+
+{ system ? builtins.currentSystem
+, config ? { allowUnfree = true; }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  makeAppTest = unifi: makeTest {
+    name = "unifi-controller-${unifi.version}";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ patryk27 zhaofengli ];
+    };
+
+    nodes.server = {
+      nixpkgs.config = config;
+
+      services.unifi = {
+        enable = true;
+        unifiPackage = unifi;
+        openFirewall = false;
+      };
+    };
+
+    testScript = ''
+      server.wait_for_unit("unifi.service")
+      server.wait_until_succeeds("curl -Lk https://localhost:8443 >&2", timeout=300)
+    '';
+  };
+in with pkgs; {
+  unifiLTS = makeAppTest unifiLTS;
+  unifi5 = makeAppTest unifi5;
+  unifi6 = makeAppTest unifi6;
+  unifi7 = makeAppTest unifi7;
+}
diff --git a/nixpkgs/nixos/tests/upnp.nix b/nixpkgs/nixos/tests/upnp.nix
new file mode 100644
index 000000000000..af7cc1fe2413
--- /dev/null
+++ b/nixpkgs/nixos/tests/upnp.nix
@@ -0,0 +1,96 @@
+# This tests whether UPnP port mappings can be created using Miniupnpd
+# and Miniupnpc.
+# It runs a Miniupnpd service on one machine, and verifies
+# a client can indeed create a port mapping using Miniupnpc. If
+# this succeeds an external client will try to connect to the port
+# mapping.
+
+import ./make-test-python.nix ({ pkgs, ... }:
+
+let
+  internalRouterAddress = "192.168.3.1";
+  internalClient1Address = "192.168.3.2";
+  externalRouterAddress = "80.100.100.1";
+  externalClient2Address = "80.100.100.2";
+in
+{
+  name = "upnp";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ bobvanderlinden ];
+  };
+
+  nodes =
+    {
+      router =
+        { pkgs, nodes, ... }:
+        { virtualisation.vlans = [ 1 2 ];
+          networking.nat.enable = true;
+          networking.nat.internalInterfaces = [ "eth2" ];
+          networking.nat.externalInterface = "eth1";
+          networking.firewall.enable = true;
+          networking.firewall.trustedInterfaces = [ "eth2" ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = externalRouterAddress; prefixLength = 24; }
+          ];
+          networking.interfaces.eth2.ipv4.addresses = [
+            { address = internalRouterAddress; prefixLength = 24; }
+          ];
+          services.miniupnpd = {
+            enable = true;
+            externalInterface = "eth1";
+            internalIPs = [ "eth2" ];
+            appendConfig = ''
+              ext_ip=${externalRouterAddress}
+            '';
+          };
+        };
+
+      client1 =
+        { pkgs, nodes, ... }:
+        { environment.systemPackages = [ pkgs.miniupnpc pkgs.netcat ];
+          virtualisation.vlans = [ 2 ];
+          networking.defaultGateway = internalRouterAddress;
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = internalClient1Address; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+
+          services.httpd.enable = true;
+          services.httpd.virtualHosts.localhost = {
+            listen = [{ ip = "*"; port = 9000; }];
+            adminAddr = "foo@example.org";
+            documentRoot = "/tmp";
+          };
+        };
+
+      client2 =
+        { pkgs, ... }:
+        { environment.systemPackages = [ pkgs.miniupnpc ];
+          virtualisation.vlans = [ 1 ];
+          networking.interfaces.eth1.ipv4.addresses = [
+            { address = externalClient2Address; prefixLength = 24; }
+          ];
+          networking.firewall.enable = false;
+        };
+    };
+
+  testScript =
+    { nodes, ... }:
+    ''
+      start_all()
+
+      # Wait for network and miniupnpd.
+      router.wait_for_unit("network-online.target")
+      # $router.wait_for_unit("nat")
+      router.wait_for_unit("firewall.service")
+      router.wait_for_unit("miniupnpd")
+
+      client1.wait_for_unit("network-online.target")
+
+      client1.succeed("upnpc -a ${internalClient1Address} 9000 9000 TCP")
+
+      client1.wait_for_unit("httpd")
+      client2.wait_until_succeeds("curl -f http://${externalRouterAddress}:9000/")
+    '';
+
+})
diff --git a/nixpkgs/nixos/tests/uptermd.nix b/nixpkgs/nixos/tests/uptermd.nix
new file mode 100644
index 000000000000..429e3c9dd5ff
--- /dev/null
+++ b/nixpkgs/nixos/tests/uptermd.nix
@@ -0,0 +1,65 @@
+import ./make-test-python.nix ({ pkgs, ...}:
+
+let
+  client = {pkgs, ...}:{
+    environment.systemPackages = [ pkgs.upterm ];
+  };
+in
+{
+  name = "uptermd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fleaz ];
+  };
+
+  nodes = {
+    server = {config, ...}: {
+      services.uptermd = {
+        enable = true;
+        openFirewall = true;
+        port = 1337;
+      };
+    };
+    client1 = client;
+    client2 = client;
+  };
+
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("uptermd.service")
+    server.wait_for_unit("network-online.target")
+
+    # wait for upterm port to be reachable
+    client1.wait_until_succeeds("nc -z -v server 1337")
+
+    # Add SSH hostkeys from the server to both clients
+    # uptermd needs an '@cert-authority entry so we need to modify the known_hosts file
+    client1.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls")
+    client1.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts")
+    client2.execute("mkdir -p ~/.ssh && ssh -o StrictHostKeyChecking=no -p 1337 server ls")
+    client2.execute("echo @cert-authority $(cat ~/.ssh/known_hosts) > ~/.ssh/known_hosts")
+
+    client1.wait_for_unit("multi-user.target")
+    client1.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+    client1.wait_until_tty_matches("1", "login: ")
+    client1.send_chars("root\n")
+    client1.wait_until_succeeds("pgrep -u root bash")
+
+    client1.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519")
+    client1.send_chars("TERM=xterm upterm host --server ssh://server:1337 --force-command hostname -- bash > /tmp/session-details\n")
+    client1.wait_for_file("/tmp/session-details")
+    client1.send_key("q")
+
+    # uptermd can't connect if we don't have a keypair
+    client2.execute("ssh-keygen -t ed25519 -N \"\" -f /root/.ssh/id_ed25519")
+
+    # Grep the ssh connect command from the output of 'upterm host'
+    ssh_command = client1.succeed("grep 'SSH Session' /tmp/session-details | cut -d':' -f2-").strip()
+
+    # Connect with client2. Because we used '--force-command hostname' we should get "client1" as the output
+    output = client2.succeed(ssh_command)
+
+    assert output.strip() == "client1"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/uptime-kuma.nix b/nixpkgs/nixos/tests/uptime-kuma.nix
new file mode 100644
index 000000000000..00e2008a5257
--- /dev/null
+++ b/nixpkgs/nixos/tests/uptime-kuma.nix
@@ -0,0 +1,17 @@
+import ./make-test-python.nix ({ lib, ... }:
+
+{
+  name = "uptime-kuma";
+  meta.maintainers = with lib.maintainers; [ julienmalka ];
+
+  nodes.machine =
+    { pkgs, ... }:
+    { services.uptime-kuma.enable = true; };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("uptime-kuma.service")
+    machine.wait_for_open_port(3001)
+    machine.succeed("curl --fail http://localhost:3001/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/usbguard.nix b/nixpkgs/nixos/tests/usbguard.nix
new file mode 100644
index 000000000000..d6d3a80c5d23
--- /dev/null
+++ b/nixpkgs/nixos/tests/usbguard.nix
@@ -0,0 +1,62 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "usbguard";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ tnias ];
+  };
+
+  nodes.machine =
+    { ... }:
+    {
+      services.usbguard = {
+        enable = true;
+        IPCAllowedUsers = [ "alice" "root" ];
+
+        # As virtual USB devices get attached to the "QEMU USB Hub" we need to
+        # allow Hubs. Otherwise we would have to explicitly allow them too.
+        rules = ''
+          allow with-interface equals { 09:00:00 }
+        '';
+      };
+      imports = [ ./common/user-account.nix ];
+    };
+
+  testScript = ''
+    # create a blank disk image for our fake USB stick
+    with open(machine.state_dir / "usbstick.img", "wb") as stick:
+        stick.write(b"\x00" * (1024 * 1024))
+
+    # wait for machine to have started and the usbguard service to be up
+    machine.wait_for_unit("usbguard.service")
+
+    with subtest("IPC access control"):
+        # User "alice" is allowed to access the IPC interface
+        machine.succeed("su alice -c 'usbguard list-devices'")
+
+        # User "bob" is not allowed to access the IPC interface
+        machine.fail("su bob -c 'usbguard list-devices'")
+
+    with subtest("check basic functionality"):
+        # at this point we expect that no USB HDD is connected
+        machine.fail("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'")
+
+        # insert usb device
+        machine.send_monitor_command(
+            f"drive_add 0 id=stick,if=none,file={stick.name},format=raw"
+        )
+        machine.send_monitor_command("device_add usb-storage,id=stick,drive=stick")
+
+        # the attached USB HDD should show up after a short while
+        machine.wait_until_succeeds("usbguard list-devices | grep -E 'QEMU USB HARDDRIVE'")
+
+        # at this point there should be a **blocked** USB HDD
+        machine.succeed("usbguard list-devices | grep -E 'block.*QEMU USB HARDDRIVE'")
+        machine.fail("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'")
+
+        # allow storage devices
+        machine.succeed("usbguard allow-device 'with-interface { 08:*:* }'")
+
+        # at this point there should be an **allowed** USB HDD
+        machine.succeed("usbguard list-devices | grep -E ' allow .*QEMU USB HARDDRIVE'")
+        machine.fail("usbguard list-devices | grep -E ' block .*QEMU USB HARDDRIVE'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/user-activation-scripts.nix b/nixpkgs/nixos/tests/user-activation-scripts.nix
new file mode 100644
index 000000000000..ebd96b019e92
--- /dev/null
+++ b/nixpkgs/nixos/tests/user-activation-scripts.nix
@@ -0,0 +1,36 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "user-activation-scripts";
+  meta = with lib.maintainers; { maintainers = [ chkno ]; };
+
+  nodes.machine = {
+    system.userActivationScripts.foo = "mktemp ~/user-activation-ran.XXXXXX";
+    users.users.alice = {
+      initialPassword = "pass1";
+      isNormalUser = true;
+    };
+    systemd.user.tmpfiles.users.alice.rules = [ "r %h/file-to-remove" ];
+  };
+
+  testScript = ''
+    def verify_user_activation_run_count(n):
+        machine.succeed(
+            '[[ "$(find /home/alice/ -name user-activation-ran.\\* | wc -l)" == %s ]]' % n
+        )
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("getty@tty1.service")
+    machine.wait_until_tty_matches("1", "login: ")
+    machine.send_chars("alice\n")
+    machine.wait_until_tty_matches("1", "Password: ")
+    machine.send_chars("pass1\n")
+    machine.send_chars("touch login-ok\n")
+    machine.wait_for_file("/home/alice/login-ok")
+    verify_user_activation_run_count(1)
+
+    machine.succeed("touch /home/alice/file-to-remove")
+    machine.succeed("/run/current-system/bin/switch-to-configuration test")
+    verify_user_activation_run_count(2)
+    machine.succeed("[[ ! -f /home/alice/file-to-remove ]] || false")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/user-expiry.nix b/nixpkgs/nixos/tests/user-expiry.nix
new file mode 100644
index 000000000000..bcaed7a0ccb0
--- /dev/null
+++ b/nixpkgs/nixos/tests/user-expiry.nix
@@ -0,0 +1,70 @@
+let
+  alice = "alice";
+  bob = "bob";
+  eve = "eve";
+  passwd = "pass1";
+in
+{
+  name = "user-expiry";
+
+  nodes = {
+    machine = {
+      users.users = {
+        ${alice} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+          expires = "1990-01-01";
+        };
+        ${bob} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+          expires = "2990-01-01";
+        };
+        ${eve} = {
+          initialPassword = passwd;
+          isNormalUser = true;
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    def switch_to_tty(tty_number):
+      machine.fail(f"pgrep -f 'agetty.*tty{tty_number}'")
+      machine.send_key(f"alt-f{tty_number}")
+      machine.wait_until_succeeds(f"[ $(fgconsole) = {tty_number} ]")
+      machine.wait_for_unit(f"getty@tty{tty_number}.service")
+      machine.wait_until_succeeds(f"pgrep -f 'agetty.*tty{tty_number}'")
+
+
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("getty@tty1.service")
+
+    with subtest("${alice} cannot login"):
+      machine.wait_until_tty_matches("1", "login: ")
+      machine.send_chars("${alice}\n")
+      machine.wait_until_tty_matches("1", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("journalctl --grep='account ${alice} has expired \\(account expired\\)'")
+      machine.wait_until_tty_matches("1", "login: ")
+
+    with subtest("${bob} can login"):
+      switch_to_tty(2)
+      machine.wait_until_tty_matches("2", "login: ")
+      machine.send_chars("${bob}\n")
+      machine.wait_until_tty_matches("2", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("pgrep -u ${bob} bash")
+
+    with subtest("${eve} can login"):
+      switch_to_tty(3)
+      machine.wait_until_tty_matches("3", "login: ")
+      machine.send_chars("${eve}\n")
+      machine.wait_until_tty_matches("3", "Password: ")
+      machine.send_chars("${passwd}\n")
+
+      machine.wait_until_succeeds("pgrep -u ${eve} bash")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/user-home-mode.nix b/nixpkgs/nixos/tests/user-home-mode.nix
new file mode 100644
index 000000000000..070cb0b75cc9
--- /dev/null
+++ b/nixpkgs/nixos/tests/user-home-mode.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "user-home-mode";
+  meta = with lib.maintainers; { maintainers = [ fbeffa ]; };
+
+  nodes.machine = {
+    users.users.alice = {
+      initialPassword = "pass1";
+      isNormalUser = true;
+    };
+    users.users.bob = {
+      initialPassword = "pass2";
+      isNormalUser = true;
+      homeMode = "750";
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("getty@tty1.service")
+    machine.wait_until_tty_matches("1", "login: ")
+    machine.send_chars("alice\n")
+    machine.wait_until_tty_matches("1", "Password: ")
+    machine.send_chars("pass1\n")
+    machine.succeed('[ "$(stat -c %a /home/alice)" == "700" ]')
+    machine.succeed('[ "$(stat -c %a /home/bob)" == "750" ]')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/uwsgi.nix b/nixpkgs/nixos/tests/uwsgi.nix
new file mode 100644
index 000000000000..62da9e0a7168
--- /dev/null
+++ b/nixpkgs/nixos/tests/uwsgi.nix
@@ -0,0 +1,81 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "uwsgi";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    users.users.hello  =
+      { isSystemUser = true;
+        group = "hello";
+      };
+    users.groups.hello = { };
+
+    services.uwsgi = {
+      enable = true;
+      plugins = [ "python3" "php" ];
+      capabilities = [ "CAP_NET_BIND_SERVICE" ];
+      instance.type = "emperor";
+
+      instance.vassals.hello = {
+        type = "normal";
+        immediate-uid = "hello";
+        immediate-gid = "hello";
+        module = "wsgi:application";
+        http = ":80";
+        cap = "net_bind_service";
+        pythonPackages = self: [ self.flask ];
+        chdir = pkgs.writeTextDir "wsgi.py" ''
+          from flask import Flask
+          import subprocess
+          application = Flask(__name__)
+
+          @application.route("/")
+          def hello():
+              return "Hello, World!"
+
+          @application.route("/whoami")
+          def whoami():
+              whoami = "${pkgs.coreutils}/bin/whoami"
+              proc = subprocess.run(whoami, capture_output=True)
+              return proc.stdout.decode().strip()
+        '';
+      };
+
+      instance.vassals.php = {
+        type = "normal";
+        master = true;
+        workers = 2;
+        http-socket = ":8000";
+        http-socket-modifier1 = 14;
+        php-index = "index.php";
+        php-docroot = pkgs.writeTextDir "index.php" ''
+          <?php echo "Hello World\n"; ?>
+        '';
+      };
+    };
+  };
+
+  testScript =
+    ''
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("uwsgi.service")
+
+      with subtest("uWSGI has started"):
+          machine.wait_for_unit("uwsgi.service")
+
+      with subtest("Vassal can bind on port <1024"):
+          machine.wait_for_open_port(80)
+          hello = machine.succeed("curl -f http://machine").strip()
+          assert "Hello, World!" in hello, f"Excepted 'Hello, World!', got '{hello}'"
+
+      with subtest("Vassal is running as dedicated user"):
+          username = machine.succeed("curl -f http://machine/whoami").strip()
+          assert username == "hello", f"Excepted 'hello', got '{username}'"
+
+      with subtest("PHP plugin is working"):
+          machine.wait_for_open_port(8000)
+          assert "Hello World" in machine.succeed("curl -fv http://machine:8000")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/v2ray.nix b/nixpkgs/nixos/tests/v2ray.nix
new file mode 100644
index 000000000000..9eee962c64e4
--- /dev/null
+++ b/nixpkgs/nixos/tests/v2ray.nix
@@ -0,0 +1,91 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: let
+
+  v2rayUser = {
+    # A random UUID.
+    id = "a6a46834-2150-45f8-8364-0f6f6ab32384";
+    alterId = 0; # Non-zero support will be disabled in the future.
+  };
+
+  # 1080 [http proxy] -> 1081 [vmess] -> direct
+  v2rayConfig = {
+    inbounds = [
+      {
+        tag = "http_in";
+        port = 1080;
+        listen = "127.0.0.1";
+        protocol = "http";
+      }
+      {
+        tag = "vmess_in";
+        port = 1081;
+        listen = "127.0.0.1";
+        protocol = "vmess";
+        settings.clients = [ v2rayUser ];
+      }
+    ];
+    outbounds = [
+      {
+        tag = "vmess_out";
+        protocol = "vmess";
+        settings.vnext = [{
+          address = "127.0.0.1";
+          port = 1081;
+          users = [ v2rayUser ];
+        }];
+      }
+      {
+        tag = "direct";
+        protocol = "freedom";
+      }
+    ];
+    routing.rules = [
+      {
+        type = "field";
+        inboundTag = "http_in";
+        outboundTag = "vmess_out";
+      }
+      {
+        type = "field";
+        inboundTag = "vmess_in";
+        outboundTag = "direct";
+      }
+
+      # Assert assets "geoip" and "geosite" are accessible.
+      {
+        type = "field";
+        ip = [ "geoip:private" ];
+        domain = [ "geosite:category-ads" ];
+        outboundTag = "direct";
+      }
+    ];
+  };
+
+in {
+  name = "v2ray";
+  meta = with lib.maintainers; {
+    maintainers = [ servalcatty ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.curl ];
+    services.v2ray = {
+      enable = true;
+      config = v2rayConfig;
+    };
+    services.httpd = {
+      enable = true;
+      adminAddr = "foo@example.org";
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("httpd.service")
+    machine.wait_for_unit("v2ray.service")
+    machine.wait_for_open_port(80)
+    machine.wait_for_open_port(1080)
+    machine.succeed(
+        "curl --fail --max-time 10 --proxy http://localhost:1080 http://localhost"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/varnish.nix b/nixpkgs/nixos/tests/varnish.nix
new file mode 100644
index 000000000000..9dcdeec9d8c8
--- /dev/null
+++ b/nixpkgs/nixos/tests/varnish.nix
@@ -0,0 +1,55 @@
+{
+  system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; }
+, package
+}:
+import ./make-test-python.nix ({ pkgs, ... }: let
+  testPath = pkgs.hello;
+in {
+  name = "varnish";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ajs124 ];
+  };
+
+  nodes = {
+    varnish = { config, pkgs, ... }: {
+        services.nix-serve = {
+          enable = true;
+        };
+
+        services.varnish = {
+          inherit package;
+          enable = true;
+          http_address = "0.0.0.0:80";
+          config = ''
+            vcl 4.0;
+
+            backend nix-serve {
+              .host = "127.0.0.1";
+              .port = "${toString config.services.nix-serve.port}";
+            }
+          '';
+        };
+
+        networking.firewall.allowedTCPPorts = [ 80 ];
+        system.extraDependencies = [ testPath ];
+      };
+
+    client = { lib, ... }: {
+      nix.settings = {
+        require-sigs = false;
+        substituters = lib.mkForce [ "http://varnish" ];
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    varnish.wait_for_open_port(80)
+
+    client.wait_until_succeeds("curl -f http://varnish/nix-cache-info");
+
+    client.wait_until_succeeds("nix-store -r ${testPath}");
+    client.succeed("${testPath}/bin/hello");
+  '';
+})
diff --git a/nixpkgs/nixos/tests/vault-agent.nix b/nixpkgs/nixos/tests/vault-agent.nix
new file mode 100644
index 000000000000..dc86c829b67a
--- /dev/null
+++ b/nixpkgs/nixos/tests/vault-agent.nix
@@ -0,0 +1,52 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "vault-agent";
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.vault-agent.instances.example.settings = {
+      vault.address = config.environment.variables.VAULT_ADDR;
+
+      auto_auth = [{
+        method = [{
+          type = "token_file";
+          config.token_file_path = pkgs.writeText "vault-token" config.environment.variables.VAULT_TOKEN;
+        }];
+      }];
+
+      template = [{
+        contents = ''
+          {{- with secret "secret/example" }}
+          {{ .Data.data.key }}"
+          {{- end }}
+        '';
+        perms = "0600";
+        destination = "/example";
+      }];
+    };
+
+    services.vault = {
+      enable = true;
+      dev = true;
+      devRootTokenID = config.environment.variables.VAULT_TOKEN;
+    };
+
+    environment = {
+      systemPackages = [ pkgs.vault ];
+      variables = {
+        VAULT_ADDR = "http://localhost:8200";
+        VAULT_TOKEN = "root";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("vault.service")
+    machine.wait_for_open_port(8200)
+
+    machine.wait_until_succeeds('vault kv put secret/example key=example')
+
+    machine.wait_for_unit("vault-agent-example.service")
+
+    machine.wait_for_file("/example")
+    machine.succeed('grep "example" /example')
+  '';
+})
diff --git a/nixpkgs/nixos/tests/vault-dev.nix b/nixpkgs/nixos/tests/vault-dev.nix
new file mode 100644
index 000000000000..ba9a1015cc13
--- /dev/null
+++ b/nixpkgs/nixos/tests/vault-dev.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "vault-dev";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 mic92 ];
+  };
+  nodes.machine = { pkgs, config, ... }: {
+    environment.systemPackages = [ pkgs.vault ];
+    environment.variables.VAULT_ADDR = "http://127.0.0.1:8200";
+    environment.variables.VAULT_TOKEN = "phony-secret";
+
+    services.vault = {
+      enable = true;
+      dev = true;
+      devRootTokenID = config.environment.variables.VAULT_TOKEN;
+    };
+  };
+
+  testScript = ''
+    import json
+    start_all()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_unit("vault.service")
+    machine.wait_for_open_port(8200)
+    out = machine.succeed("vault status -format=json")
+    print(out)
+    status = json.loads(out)
+    assert status.get("initialized") == True
+    machine.succeed("vault kv put secret/foo bar=baz")
+    out = machine.succeed("vault kv get -format=json secret/foo")
+    print(out)
+    status = json.loads(out)
+    assert status.get("data", {}).get("data", {}).get("bar") == "baz"
+  '';
+})
diff --git a/nixpkgs/nixos/tests/vault-postgresql.nix b/nixpkgs/nixos/tests/vault-postgresql.nix
new file mode 100644
index 000000000000..e0e5881c6da7
--- /dev/null
+++ b/nixpkgs/nixos/tests/vault-postgresql.nix
@@ -0,0 +1,69 @@
+/* This test checks that
+    - multiple config files can be loaded
+    - the storage backend can be in a file outside the nix store
+      as is required for security (required because while confidentiality is
+      always covered, availability isn't)
+    - the postgres integration works
+ */
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "vault-postgresql";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 roberth ];
+  };
+  nodes.machine = { lib, pkgs, ... }: {
+    environment.systemPackages = [ pkgs.vault ];
+    environment.variables.VAULT_ADDR = "http://127.0.0.1:8200";
+    services.vault.enable = true;
+    services.vault.extraSettingsPaths = [ "/run/vault.hcl" ];
+
+    systemd.services.vault = {
+      after = [
+        "postgresql.service"
+      ];
+      # Try for about 10 minutes rather than the default of 5 attempts.
+      serviceConfig.RestartSec = 1;
+      serviceConfig.StartLimitBurst = 600;
+    };
+    # systemd.services.vault.unitConfig.RequiresMountsFor = "/run/keys/";
+
+    services.postgresql.enable = true;
+    services.postgresql.initialScript = pkgs.writeText "init.psql" ''
+      CREATE USER vaultuser WITH ENCRYPTED PASSWORD 'thisisthepass';
+      GRANT CONNECT ON DATABASE postgres TO vaultuser;
+
+      -- https://www.vaultproject.io/docs/configuration/storage/postgresql
+      CREATE TABLE vault_kv_store (
+        parent_path TEXT COLLATE "C" NOT NULL,
+        path        TEXT COLLATE "C",
+        key         TEXT COLLATE "C",
+        value       BYTEA,
+        CONSTRAINT pkey PRIMARY KEY (path, key)
+      );
+      CREATE INDEX parent_path_idx ON vault_kv_store (parent_path);
+
+      GRANT SELECT, INSERT, UPDATE, DELETE ON ALL TABLES IN SCHEMA public TO vaultuser;
+    '';
+  };
+
+  testScript =
+    ''
+      secretConfig = """
+          storage "postgresql" {
+            connection_url = "postgres://vaultuser:thisisthepass@localhost/postgres?sslmode=disable"
+          }
+          """
+
+      start_all()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.succeed("cat >/root/vault.hcl <<EOF\n%s\nEOF\n" % secretConfig)
+      machine.succeed(
+          "install --owner vault --mode 0400 /root/vault.hcl /run/vault.hcl; rm /root/vault.hcl"
+      )
+      machine.wait_for_unit("vault.service")
+      machine.wait_for_open_port(8200)
+      machine.succeed("vault operator init")
+      machine.succeed("vault status || test $? -eq 2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/vault.nix b/nixpkgs/nixos/tests/vault.nix
new file mode 100644
index 000000000000..1b0a26a4487f
--- /dev/null
+++ b/nixpkgs/nixos/tests/vault.nix
@@ -0,0 +1,25 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+{
+  name = "vault";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ lnl7 ];
+  };
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.vault ];
+    environment.variables.VAULT_ADDR = "http://127.0.0.1:8200";
+    services.vault.enable = true;
+  };
+
+  testScript =
+    ''
+      start_all()
+
+      machine.wait_for_unit("multi-user.target")
+      machine.wait_for_unit("vault.service")
+      machine.wait_for_open_port(8200)
+      machine.succeed("vault operator init")
+      # vault now returns exit code 2 for sealed vaults
+      machine.fail("vault status")
+      machine.succeed("vault status || test $? -eq 2")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/vaultwarden.nix b/nixpkgs/nixos/tests/vaultwarden.nix
new file mode 100644
index 000000000000..9d2f0e6ab060
--- /dev/null
+++ b/nixpkgs/nixos/tests/vaultwarden.nix
@@ -0,0 +1,198 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../.. { inherit system config; }
+}:
+
+# These tests will:
+#  * Set up a vaultwarden server
+#  * Have Firefox use the web vault to create an account, log in, and save a password to the valut
+#  * Have the bw cli log in and read that password from the vault
+#
+# Note that Firefox must be on the same machine as the server for WebCrypto APIs to be available (or HTTPS must be configured)
+#
+# The same tests should work without modification on the official bitwarden server, if we ever package that.
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+let
+  backends = [ "sqlite" "mysql" "postgresql" ];
+
+  dbPassword = "please_dont_hack";
+
+  userEmail = "meow@example.com";
+  userPassword = "also_super_secret_ZJWpBKZi668QGt"; # Must be complex to avoid interstitial warning on the signup page
+
+  storedPassword = "seeeecret";
+
+  makeVaultwardenTest = backend: makeTest {
+    name = "vaultwarden-${backend}";
+    meta = {
+      maintainers = with pkgs.lib.maintainers; [ jjjollyjim ];
+    };
+
+    nodes = {
+      server = { pkgs, ... }:
+        let backendConfig = {
+          mysql = {
+            services.mysql = {
+              enable = true;
+              initialScript = pkgs.writeText "mysql-init.sql" ''
+                CREATE DATABASE bitwarden;
+                CREATE USER 'bitwardenuser'@'localhost' IDENTIFIED BY '${dbPassword}';
+                GRANT ALL ON `bitwarden`.* TO 'bitwardenuser'@'localhost';
+                FLUSH PRIVILEGES;
+              '';
+              package = pkgs.mariadb;
+            };
+
+            services.vaultwarden.config.databaseUrl = "mysql://bitwardenuser:${dbPassword}@localhost/bitwarden";
+
+            systemd.services.vaultwarden.after = [ "mysql.service" ];
+          };
+
+          postgresql = {
+            services.postgresql = {
+              enable = true;
+              initialScript = pkgs.writeText "postgresql-init.sql" ''
+                CREATE USER bitwardenuser WITH PASSWORD '${dbPassword}';
+                CREATE DATABASE bitwarden WITH OWNER bitwardenuser;
+              '';
+            };
+
+            services.vaultwarden.config.databaseUrl = "postgresql://bitwardenuser:${dbPassword}@localhost/bitwarden";
+
+            systemd.services.vaultwarden.after = [ "postgresql.service" ];
+          };
+
+          sqlite = { };
+        };
+        in
+        mkMerge [
+          backendConfig.${backend}
+          {
+            services.vaultwarden = {
+              enable = true;
+              dbBackend = backend;
+              config = {
+                rocketAddress = "0.0.0.0";
+                rocketPort = 80;
+              };
+            };
+
+            networking.firewall.allowedTCPPorts = [ 80 ];
+
+            environment.systemPackages =
+              let
+                testRunner = pkgs.writers.writePython3Bin "test-runner"
+                  {
+                    libraries = [ pkgs.python3Packages.selenium ];
+                    flakeIgnore = [
+                      "E501"
+                    ];
+                  } ''
+
+                  from selenium.webdriver.common.by import By
+                  from selenium.webdriver import Firefox
+                  from selenium.webdriver.firefox.options import Options
+                  from selenium.webdriver.support.ui import WebDriverWait
+                  from selenium.webdriver.support import expected_conditions as EC
+
+                  options = Options()
+                  options.add_argument('--headless')
+                  driver = Firefox(options=options)
+
+                  driver.implicitly_wait(20)
+                  driver.get('http://localhost/#/register')
+
+                  wait = WebDriverWait(driver, 10)
+
+                  wait.until(EC.title_contains("Create account"))
+
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_email').send_keys(
+                      '${userEmail}'
+                  )
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_name').send_keys(
+                      'A Cat'
+                  )
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_master-password').send_keys(
+                      '${userPassword}'
+                  )
+                  driver.find_element(By.CSS_SELECTOR, 'input#register-form_input_confirm-master-password').send_keys(
+                      '${userPassword}'
+                  )
+                  if driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').is_selected():
+                      driver.find_element(By.CSS_SELECTOR, 'input#checkForBreaches').click()
+
+                  driver.find_element(By.XPATH, "//button[contains(., 'Create account')]").click()
+
+                  wait.until_not(EC.title_contains("Create account"))
+
+                  driver.find_element(By.XPATH, "//button[contains(., 'Continue')]").click()
+
+                  driver.find_element(By.CSS_SELECTOR, 'input#login_input_master-password').send_keys(
+                      '${userPassword}'
+                  )
+                  driver.find_element(By.XPATH, "//button[contains(., 'Log in')]").click()
+
+                  wait.until(EC.title_contains("Vaults"))
+
+                  driver.find_element(By.XPATH, "//button[contains(., 'New item')]").click()
+
+                  driver.find_element(By.CSS_SELECTOR, 'input#name').send_keys(
+                      'secrets'
+                  )
+                  driver.find_element(By.CSS_SELECTOR, 'input#loginPassword').send_keys(
+                      '${storedPassword}'
+                  )
+
+                  driver.find_element(By.XPATH, "//button[contains(., 'Save')]").click()
+                '';
+              in
+              [ pkgs.firefox-unwrapped pkgs.geckodriver testRunner ];
+
+          }
+        ];
+
+      client = { pkgs, ... }:
+        {
+          environment.systemPackages = [ pkgs.bitwarden-cli ];
+        };
+    };
+
+    testScript = ''
+      start_all()
+      server.wait_for_unit("vaultwarden.service")
+      server.wait_for_open_port(80)
+
+      with subtest("configure the cli"):
+          client.succeed("bw --nointeraction config server http://server")
+
+      with subtest("can't login to nonexistent account"):
+          client.fail(
+              "bw --nointeraction --raw login ${userEmail} ${userPassword}"
+          )
+
+      with subtest("use the web interface to sign up, log in, and save a password"):
+          server.succeed("PYTHONUNBUFFERED=1 systemd-cat -t test-runner test-runner")
+
+      with subtest("log in with the cli"):
+          key = client.succeed(
+              "bw --nointeraction --raw login ${userEmail} ${userPassword}"
+          ).strip()
+
+      with subtest("sync with the cli"):
+          client.succeed(f"bw --nointeraction --raw --session {key} sync -f")
+
+      with subtest("get the password with the cli"):
+          password = client.succeed(
+              f"bw --nointeraction --raw --session {key} list items | ${pkgs.jq}/bin/jq -r .[].login.password"
+          )
+          assert password.strip() == "${storedPassword}"
+    '';
+  };
+in
+builtins.listToAttrs (
+  map
+    (backend: { name = backend; value = makeVaultwardenTest backend; })
+    backends
+)
diff --git a/nixpkgs/nixos/tests/vector.nix b/nixpkgs/nixos/tests/vector.nix
new file mode 100644
index 000000000000..a55eb4e012c5
--- /dev/null
+++ b/nixpkgs/nixos/tests/vector.nix
@@ -0,0 +1,37 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../.. { inherit system config; } }:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+{
+  test1 = makeTest {
+    name = "vector-test1";
+    meta.maintainers = [ pkgs.lib.maintainers.happysalada ];
+
+    nodes.machine = { config, pkgs, ... }: {
+      services.vector = {
+        enable = true;
+        journaldAccess = true;
+        settings = {
+          sources.journald.type = "journald";
+
+          sinks = {
+            file = {
+              type = "file";
+              inputs = [ "journald" ];
+              path = "/var/lib/vector/logs.log";
+              encoding = { codec = "json"; };
+            };
+          };
+        };
+      };
+    };
+
+    # ensure vector is forwarding the messages appropriately
+    testScript = ''
+      machine.wait_for_unit("vector.service")
+      machine.wait_for_file("/var/lib/vector/logs.log")
+    '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/vengi-tools.nix b/nixpkgs/nixos/tests/vengi-tools.nix
new file mode 100644
index 000000000000..fd7567991487
--- /dev/null
+++ b/nixpkgs/nixos/tests/vengi-tools.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "vengi-tools";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [ pkgs.vengi-tools ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("vengi-voxedit >&2 &")
+      machine.wait_for_window("voxedit")
+      # Let the window load fully
+      machine.sleep(15)
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/victoriametrics.nix b/nixpkgs/nixos/tests/victoriametrics.nix
new file mode 100644
index 000000000000..5e364b67bf87
--- /dev/null
+++ b/nixpkgs/nixos/tests/victoriametrics.nix
@@ -0,0 +1,33 @@
+# This test runs influxdb and checks if influxdb is up and running
+
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "victoriametrics";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ yorickvp ];
+  };
+
+  nodes = {
+    one = { ... }: {
+      services.victoriametrics.enable = true;
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    one.wait_for_unit("victoriametrics.service")
+
+    # write some points and run simple query
+    out = one.succeed(
+        "curl -f -d 'measurement,tag1=value1,tag2=value2 field1=123,field2=1.23' -X POST 'http://localhost:8428/write'"
+    )
+    cmd = (
+        """curl -f -s -G 'http://localhost:8428/api/v1/export' -d 'match={__name__!=""}'"""
+    )
+    # data takes a while to appear
+    one.wait_until_succeeds(f"[[ $({cmd} | wc -l) -ne 0 ]]")
+    out = one.succeed(cmd)
+    assert '"values":[123]' in out
+    assert '"values":[1.23]' in out
+  '';
+})
diff --git a/nixpkgs/nixos/tests/vikunja.nix b/nixpkgs/nixos/tests/vikunja.nix
new file mode 100644
index 000000000000..60fd5ce13854
--- /dev/null
+++ b/nixpkgs/nixos/tests/vikunja.nix
@@ -0,0 +1,64 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "vikunja";
+
+  meta.maintainers = with lib.maintainers; [ leona ];
+
+  nodes = {
+    vikunjaSqlite = { ... }: {
+      services.vikunja = {
+        enable = true;
+        database = {
+          type = "sqlite";
+        };
+        frontendScheme = "http";
+        frontendHostname = "localhost";
+      };
+      services.nginx.enable = true;
+    };
+    vikunjaPostgresql = { pkgs, ... }: {
+      services.vikunja = {
+        enable = true;
+        database = {
+          type = "postgres";
+          user = "vikunja-api";
+          database = "vikunja-api";
+          host = "/run/postgresql";
+        };
+        frontendScheme = "http";
+        frontendHostname = "localhost";
+        port = 9090;
+      };
+      services.postgresql = {
+        enable = true;
+        ensureDatabases = [ "vikunja-api" ];
+        ensureUsers = [
+          { name = "vikunja-api";
+            ensureDBOwnership = true;
+          }
+        ];
+      };
+      services.nginx.enable = true;
+    };
+  };
+
+  testScript =
+    ''
+      vikunjaSqlite.wait_for_unit("vikunja-api.service")
+      vikunjaSqlite.wait_for_open_port(3456)
+      vikunjaSqlite.succeed("curl --fail http://localhost:3456/api/v1/info")
+
+      vikunjaSqlite.wait_for_unit("nginx.service")
+      vikunjaSqlite.wait_for_open_port(80)
+      vikunjaSqlite.succeed("curl --fail http://localhost/api/v1/info")
+      vikunjaSqlite.succeed("curl --fail http://localhost")
+
+      vikunjaPostgresql.wait_for_unit("vikunja-api.service")
+      vikunjaPostgresql.wait_for_open_port(9090)
+      vikunjaPostgresql.succeed("curl --fail http://localhost:9090/api/v1/info")
+
+      vikunjaPostgresql.wait_for_unit("nginx.service")
+      vikunjaPostgresql.wait_for_open_port(80)
+      vikunjaPostgresql.succeed("curl --fail http://localhost/api/v1/info")
+      vikunjaPostgresql.succeed("curl --fail http://localhost")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/virtualbox.nix b/nixpkgs/nixos/tests/virtualbox.nix
new file mode 100644
index 000000000000..e522d0679e15
--- /dev/null
+++ b/nixpkgs/nixos/tests/virtualbox.nix
@@ -0,0 +1,522 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; },
+  debug ? false,
+  enableUnfree ? false,
+  use64bitGuest ? true
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  testVMConfig = vmName: attrs: { config, pkgs, lib, ... }: let
+    guestAdditions = pkgs.linuxPackages.virtualboxGuestAdditions;
+
+    miniInit = ''
+      #!${pkgs.runtimeShell} -xe
+      export PATH="${lib.makeBinPath [ pkgs.coreutils pkgs.util-linux ]}"
+
+      mkdir -p /run/dbus /var
+      ln -s /run /var
+      cat > /etc/passwd <<EOF
+      root:x:0:0::/root:/bin/false
+      messagebus:x:1:1::/run/dbus:/bin/false
+      EOF
+      cat > /etc/group <<EOF
+      root:x:0:
+      messagebus:x:1:
+      EOF
+
+      "${pkgs.dbus}/bin/dbus-daemon" --fork \
+        --config-file="${pkgs.dbus}/share/dbus-1/system.conf"
+
+      ${guestAdditions}/bin/VBoxService
+      ${(attrs.vmScript or (const "")) pkgs}
+
+      i=0
+      while [ ! -e /mnt-root/shutdown ]; do
+        sleep 10
+        i=$(($i + 10))
+        [ $i -le 120 ] || fail
+      done
+
+      rm -f /mnt-root/boot-done /mnt-root/shutdown
+    '';
+  in {
+    boot.kernelParams = [
+      "console=tty0" "console=ttyS0" "ignore_loglevel"
+      "boot.trace" "panic=1" "boot.panic_on_fail"
+      "init=${pkgs.writeScript "mini-init.sh" miniInit}"
+    ];
+
+    fileSystems."/" = {
+      device = "vboxshare";
+      fsType = "vboxsf";
+    };
+
+    virtualisation.virtualbox.guest.enable = true;
+
+    boot.initrd.kernelModules = [
+      "af_packet" "vboxsf"
+      "virtio" "virtio_pci" "virtio_ring" "virtio_net" "vboxguest"
+    ];
+
+    boot.initrd.extraUtilsCommands = ''
+      copy_bin_and_libs "${guestAdditions}/bin/mount.vboxsf"
+      copy_bin_and_libs "${pkgs.util-linux}/bin/unshare"
+      ${(attrs.extraUtilsCommands or (const "")) pkgs}
+    '';
+
+    boot.initrd.postMountCommands = ''
+      touch /mnt-root/boot-done
+      hostname "${vmName}"
+      mkdir -p /nix/store
+      unshare -m ${escapeShellArg pkgs.runtimeShell} -c '
+        mount -t vboxsf nixstore /nix/store
+        exec "$stage2Init"
+      '
+      poweroff -f
+    '';
+
+    system.requiredKernelConfig = with config.lib.kernelConfig; [
+      (isYes "SERIAL_8250_CONSOLE")
+      (isYes "SERIAL_8250")
+    ];
+
+    networking.usePredictableInterfaceNames = false;
+  };
+
+  mkLog = logfile: tag: let
+    rotated = map (i: "${logfile}.${toString i}") (range 1 9);
+    all = concatMapStringsSep " " (f: "\"${f}\"") ([logfile] ++ rotated);
+    logcmd = "tail -F ${all} 2> /dev/null | logger -t \"${tag}\"";
+  in if debug then "machine.execute(ru('${logcmd} & disown'))" else "pass";
+
+  testVM = vmName: vmScript: let
+    cfg = (import ../lib/eval-config.nix {
+      system = if use64bitGuest then "x86_64-linux" else "i686-linux";
+      modules = [
+        ../modules/profiles/minimal.nix
+        (testVMConfig vmName vmScript)
+      ];
+    }).config;
+  in pkgs.vmTools.runInLinuxVM (pkgs.runCommand "virtualbox-image" {
+    preVM = ''
+      mkdir -p "$out"
+      diskImage="$(pwd)/qimage"
+      ${pkgs.vmTools.qemu}/bin/qemu-img create -f raw "$diskImage" 100M
+    '';
+
+    postVM = ''
+      echo "creating VirtualBox disk image..."
+      ${pkgs.vmTools.qemu}/bin/qemu-img convert -f raw -O vdi \
+        "$diskImage" "$out/disk.vdi"
+    '';
+
+    buildInputs = [ pkgs.util-linux pkgs.perl ];
+  } ''
+    ${pkgs.parted}/sbin/parted --script /dev/vda mklabel msdos
+    ${pkgs.parted}/sbin/parted --script /dev/vda -- mkpart primary ext2 1M -1s
+    ${pkgs.e2fsprogs}/sbin/mkfs.ext4 /dev/vda1
+    ${pkgs.e2fsprogs}/sbin/tune2fs -c 0 -i 0 /dev/vda1
+    mkdir /mnt
+    mount /dev/vda1 /mnt
+    cp "${cfg.system.build.kernel}/bzImage" /mnt/linux
+    cp "${cfg.system.build.initialRamdisk}/initrd" /mnt/initrd
+
+    ${pkgs.grub2}/bin/grub-install --boot-directory=/mnt /dev/vda
+
+    cat > /mnt/grub/grub.cfg <<GRUB
+    set root=hd0,1
+    linux /linux ${concatStringsSep " " cfg.boot.kernelParams}
+    initrd /initrd
+    boot
+    GRUB
+    umount /mnt
+  '');
+
+  createVM = name: attrs: let
+    mkFlags = concatStringsSep " ";
+
+    sharePath = "/home/alice/vboxshare-${name}";
+
+    createFlags = mkFlags [
+      "--ostype ${if use64bitGuest then "Linux26_64" else "Linux26"}"
+      "--register"
+    ];
+
+    vmFlags = mkFlags ([
+      "--uart1 0x3F8 4"
+      "--uartmode1 client /run/virtualbox-log-${name}.sock"
+      "--memory 768"
+      "--audio none"
+    ] ++ (attrs.vmFlags or []));
+
+    controllerFlags = mkFlags [
+      "--name SATA"
+      "--add sata"
+      "--bootable on"
+      "--hostiocache on"
+    ];
+
+    diskFlags = mkFlags [
+      "--storagectl SATA"
+      "--port 0"
+      "--device 0"
+      "--type hdd"
+      "--mtype immutable"
+      "--medium ${testVM name attrs}/disk.vdi"
+    ];
+
+    sharedFlags = mkFlags [
+      "--name vboxshare"
+      "--hostpath ${sharePath}"
+    ];
+
+    nixstoreFlags = mkFlags [
+      "--name nixstore"
+      "--hostpath /nix/store"
+      "--readonly"
+    ];
+  in {
+    machine = {
+      systemd.sockets."vboxtestlog-${name}" = {
+        description = "VirtualBox Test Machine Log Socket For ${name}";
+        wantedBy = [ "sockets.target" ];
+        before = [ "multi-user.target" ];
+        socketConfig.ListenStream = "/run/virtualbox-log-${name}.sock";
+        socketConfig.Accept = true;
+      };
+
+      systemd.services."vboxtestlog-${name}@" = {
+        description = "VirtualBox Test Machine Log For ${name}";
+        serviceConfig.StandardInput = "socket";
+        serviceConfig.StandardOutput = "journal";
+        serviceConfig.SyslogIdentifier = "GUEST-${name}";
+        serviceConfig.ExecStart = "${pkgs.coreutils}/bin/cat";
+      };
+    };
+
+    testSubs = ''
+
+
+      ${name}_sharepath = "${sharePath}"
+
+
+      def check_running_${name}():
+          cmd = "VBoxManage list runningvms | grep -q '^\"${name}\"'"
+          (status, _) = machine.execute(ru(cmd))
+          return status == 0
+
+
+      def cleanup_${name}():
+          if check_running_${name}():
+              machine.execute(ru("VBoxManage controlvm ${name} poweroff"))
+          machine.succeed("rm -rf ${sharePath}")
+          machine.succeed("mkdir -p ${sharePath}")
+          machine.succeed("chown alice:users ${sharePath}")
+
+
+      def create_vm_${name}():
+          cleanup_${name}()
+          vbm("createvm --name ${name} ${createFlags}")
+          vbm("modifyvm ${name} ${vmFlags}")
+          vbm("setextradata ${name} VBoxInternal/PDM/HaltOnReset 1")
+          vbm("storagectl ${name} ${controllerFlags}")
+          vbm("storageattach ${name} ${diskFlags}")
+          vbm("sharedfolder add ${name} ${sharedFlags}")
+          vbm("sharedfolder add ${name} ${nixstoreFlags}")
+
+          ${mkLog "$HOME/VirtualBox VMs/${name}/Logs/VBox.log" "HOST-${name}"}
+
+
+      def destroy_vm_${name}():
+          cleanup_${name}()
+          vbm("unregistervm ${name} --delete")
+
+
+      def wait_for_vm_boot_${name}():
+          machine.execute(
+              ru(
+                  "set -e; i=0; "
+                  "while ! test -e ${sharePath}/boot-done; do "
+                  "sleep 10; i=$(($i + 10)); [ $i -le 3600 ]; "
+                  "VBoxManage list runningvms | grep -q '^\"${name}\"'; "
+                  "done"
+              )
+          )
+
+
+      def wait_for_ip_${name}(interface):
+          property = f"/VirtualBox/GuestInfo/Net/{interface}/V4/IP"
+          getip = f"VBoxManage guestproperty get ${name} {property} | sed -n -e 's/^Value: //p'"
+
+          ip = machine.succeed(
+              ru(
+                  "for i in $(seq 1000); do "
+                  f'if ipaddr="$({getip})" && [ -n "$ipaddr" ]; then '
+                  'echo "$ipaddr"; exit 0; '
+                  "fi; "
+                  "sleep 1; "
+                  "done; "
+                  "echo 'Could not get IPv4 address for ${name}!' >&2; "
+                  "exit 1"
+              )
+          ).strip()
+          return ip
+
+
+      def wait_for_startup_${name}(nudge=lambda: None):
+          for _ in range(0, 130, 10):
+              machine.sleep(10)
+              if check_running_${name}():
+                  return
+              nudge()
+          raise Exception("VirtualBox VM didn't start up within 2 minutes")
+
+
+      def wait_for_shutdown_${name}():
+          for _ in range(0, 130, 10):
+              machine.sleep(10)
+              if not check_running_${name}():
+                  return
+          raise Exception("VirtualBox VM didn't shut down within 2 minutes")
+
+
+      def shutdown_vm_${name}():
+          machine.succeed(ru("touch ${sharePath}/shutdown"))
+          machine.execute(
+              "set -e; i=0; "
+              "while test -e ${sharePath}/shutdown "
+              "        -o -e ${sharePath}/boot-done; do "
+              "sleep 1; i=$(($i + 1)); [ $i -le 3600 ]; "
+              "done"
+          )
+          wait_for_shutdown_${name}()
+    '';
+  };
+
+  hostonlyVMFlags = [
+    "--nictype1 virtio"
+    "--nictype2 virtio"
+    "--nic2 hostonly"
+    "--hostonlyadapter2 vboxnet0"
+  ];
+
+  # The VirtualBox Oracle Extension Pack lets you use USB 3.0 (xHCI).
+  enableExtensionPackVMFlags = [
+    "--usbxhci on"
+  ];
+
+  dhcpScript = pkgs: ''
+    ${pkgs.dhcpcd}/bin/dhcpcd eth0 eth1
+
+    otherIP="$(${pkgs.netcat}/bin/nc -l 1234 || :)"
+    ${pkgs.iputils}/bin/ping -I eth1 -c1 "$otherIP"
+    echo "$otherIP reachable" | ${pkgs.netcat}/bin/nc -l 5678 || :
+  '';
+
+  sysdDetectVirt = pkgs: ''
+    ${pkgs.systemd}/bin/systemd-detect-virt > /mnt-root/result
+  '';
+
+  vboxVMs = mapAttrs createVM {
+    simple = {};
+
+    detectvirt.vmScript = sysdDetectVirt;
+
+    test1.vmFlags = hostonlyVMFlags;
+    test1.vmScript = dhcpScript;
+
+    test2.vmFlags = hostonlyVMFlags;
+    test2.vmScript = dhcpScript;
+
+    headless.virtualisation.virtualbox.headless = true;
+    headless.services.xserver.enable = false;
+  };
+
+  vboxVMsWithExtpack = mapAttrs createVM {
+    testExtensionPack.vmFlags = enableExtensionPackVMFlags;
+  };
+
+  mkVBoxTest = useExtensionPack: vms: name: testScript: makeTest {
+    name = "virtualbox-${name}";
+
+    nodes.machine = { lib, config, ... }: {
+      imports = let
+        mkVMConf = name: val: val.machine // { key = "${name}-config"; };
+        vmConfigs = mapAttrsToList mkVMConf vms;
+      in [ ./common/user-account.nix ./common/x11.nix ] ++ vmConfigs;
+      virtualisation.memorySize = 2048;
+      virtualisation.qemu.options = ["-cpu" "kvm64,svm=on,vmx=on"];
+      virtualisation.virtualbox.host.enable = true;
+      test-support.displayManager.auto.user = "alice";
+      users.users.alice.extraGroups = let
+        inherit (config.virtualisation.virtualbox.host) enableHardening;
+      in lib.mkIf enableHardening (lib.singleton "vboxusers");
+      virtualisation.virtualbox.host.enableExtensionPack = useExtensionPack;
+      nixpkgs.config.allowUnfree = useExtensionPack;
+    };
+
+    testScript = ''
+      from shlex import quote
+      ${concatStrings (mapAttrsToList (_: getAttr "testSubs") vms)}
+
+      def ru(cmd: str) -> str:
+          return f"su - alice -c {quote(cmd)}"
+
+
+      def vbm(cmd: str) -> str:
+          return machine.succeed(ru(f"VBoxManage {cmd}"))
+
+
+      def remove_uuids(output: str) -> str:
+          return "\n".join(
+              [line for line in (output or "").splitlines() if not line.startswith("UUID:")]
+          )
+
+
+      machine.wait_for_x()
+
+      ${mkLog "$HOME/.config/VirtualBox/VBoxSVC.log" "HOST-SVC"}
+
+      ${testScript}
+      # (keep black happy)
+    '';
+
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ aszlig ];
+    };
+  };
+
+  unfreeTests = mapAttrs (mkVBoxTest true vboxVMsWithExtpack) {
+    enable-extension-pack = ''
+      create_vm_testExtensionPack()
+      vbm("startvm testExtensionPack")
+      wait_for_startup_testExtensionPack()
+      machine.screenshot("cli_started")
+      wait_for_vm_boot_testExtensionPack()
+      machine.screenshot("cli_booted")
+
+      with machine.nested("Checking for privilege escalation"):
+          machine.fail("test -e '/root/VirtualBox VMs'")
+          machine.fail("test -e '/root/.config/VirtualBox'")
+          machine.succeed("test -e '/home/alice/VirtualBox VMs'")
+
+      shutdown_vm_testExtensionPack()
+      destroy_vm_testExtensionPack()
+    '';
+  };
+
+in mapAttrs (mkVBoxTest false vboxVMs) {
+  simple-gui = ''
+    # Home to select Tools, down to move to the VM, enter to start it.
+    def send_vm_startup():
+        machine.send_key("home")
+        machine.send_key("down")
+        machine.send_key("ret")
+
+
+    create_vm_simple()
+    machine.succeed(ru("VirtualBox >&2 &"))
+    machine.wait_until_succeeds(ru("xprop -name 'Oracle VM VirtualBox Manager'"))
+    machine.sleep(5)
+    machine.screenshot("gui_manager_started")
+    send_vm_startup()
+    machine.screenshot("gui_manager_sent_startup")
+    wait_for_startup_simple(send_vm_startup)
+    machine.screenshot("gui_started")
+    wait_for_vm_boot_simple()
+    machine.screenshot("gui_booted")
+    shutdown_vm_simple()
+    machine.sleep(5)
+    machine.screenshot("gui_stopped")
+    machine.send_key("ctrl-q")
+    machine.sleep(5)
+    machine.screenshot("gui_manager_stopped")
+    destroy_vm_simple()
+  '';
+
+  simple-cli = ''
+    create_vm_simple()
+    vbm("startvm simple")
+    wait_for_startup_simple()
+    machine.screenshot("cli_started")
+    wait_for_vm_boot_simple()
+    machine.screenshot("cli_booted")
+
+    with machine.nested("Checking for privilege escalation"):
+        machine.fail("test -e '/root/VirtualBox VMs'")
+        machine.fail("test -e '/root/.config/VirtualBox'")
+        machine.succeed("test -e '/home/alice/VirtualBox VMs'")
+
+    shutdown_vm_simple()
+    destroy_vm_simple()
+  '';
+
+  headless = ''
+    create_vm_headless()
+    machine.succeed(ru("VBoxHeadless --startvm headless >&2 & disown %1"))
+    wait_for_startup_headless()
+    wait_for_vm_boot_headless()
+    shutdown_vm_headless()
+    destroy_vm_headless()
+  '';
+
+  host-usb-permissions = ''
+    import sys
+
+    user_usb = remove_uuids(vbm("list usbhost"))
+    print(user_usb, file=sys.stderr)
+    root_usb = remove_uuids(machine.succeed("VBoxManage list usbhost"))
+    print(root_usb, file=sys.stderr)
+
+    if user_usb != root_usb:
+        raise Exception("USB host devices differ for root and normal user")
+    if "<none>" in user_usb:
+        raise Exception("No USB host devices found")
+  '';
+
+  systemd-detect-virt = ''
+    create_vm_detectvirt()
+    vbm("startvm detectvirt")
+    wait_for_startup_detectvirt()
+    wait_for_vm_boot_detectvirt()
+    shutdown_vm_detectvirt()
+    result = machine.succeed(f"cat '{detectvirt_sharepath}/result'").strip()
+    destroy_vm_detectvirt()
+    if result != "oracle":
+        raise Exception(f'systemd-detect-virt returned "{result}" instead of "oracle"')
+  '';
+
+  net-hostonlyif = ''
+    create_vm_test1()
+    create_vm_test2()
+
+    vbm("startvm test1")
+    wait_for_startup_test1()
+    wait_for_vm_boot_test1()
+
+    vbm("startvm test2")
+    wait_for_startup_test2()
+    wait_for_vm_boot_test2()
+
+    machine.screenshot("net_booted")
+
+    test1_ip = wait_for_ip_test1(1)
+    test2_ip = wait_for_ip_test2(1)
+
+    machine.succeed(f"echo '{test2_ip}' | nc -N '{test1_ip}' 1234")
+    machine.succeed(f"echo '{test1_ip}' | nc -N '{test2_ip}' 1234")
+
+    machine.wait_until_succeeds(f"nc -N '{test1_ip}' 5678 < /dev/null >&2")
+    machine.wait_until_succeeds(f"nc -N '{test2_ip}' 5678 < /dev/null >&2")
+
+    shutdown_vm_test1()
+    shutdown_vm_test2()
+
+    destroy_vm_test1()
+    destroy_vm_test2()
+  '';
+} // (optionalAttrs enableUnfree unfreeTests)
diff --git a/nixpkgs/nixos/tests/vscode-remote-ssh.nix b/nixpkgs/nixos/tests/vscode-remote-ssh.nix
new file mode 100644
index 000000000000..de7cc6badc9a
--- /dev/null
+++ b/nixpkgs/nixos/tests/vscode-remote-ssh.nix
@@ -0,0 +1,124 @@
+import ./make-test-python.nix ({ lib, ... }@args: let
+  pkgs = args.pkgs.extend (self: super: {
+    stdenv = super.stdenv.override {
+      config = super.config // {
+        allowUnfreePredicate = pkg: builtins.elem (lib.getName pkg) [
+          "vscode" "vscode-with-extensions" "vscode-extension-ms-vscode-remote-remote-ssh"
+        ];
+      };
+    };
+  });
+
+  inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+
+  inherit (pkgs.vscode.passthru) rev vscodeServer;
+in {
+  name = "vscode-remote-ssh";
+  meta.maintainers = with lib.maintainers; [ Enzime ];
+
+  nodes = let
+    serverAddress = "192.168.0.2";
+    clientAddress = "192.168.0.1";
+  in {
+    server = { ... }: {
+      networking.interfaces.eth1.ipv4.addresses = [ { address = serverAddress; prefixLength = 24; } ];
+      services.openssh.enable = true;
+      users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      virtualisation.additionalPaths = with pkgs; [ patchelf bintools stdenv.cc.cc.lib ];
+    };
+    client = { ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      networking.interfaces.eth1.ipv4.addresses = [ { address = clientAddress; prefixLength = 24; } ];
+      networking.hosts.${serverAddress} = [ "server" ];
+      test-support.displayManager.auto.user = "alice";
+      environment.systemPackages = [
+        (pkgs.vscode-with-extensions.override {
+          vscodeExtensions = [
+            pkgs.vscode-extensions.ms-vscode-remote.remote-ssh
+          ];
+        })
+      ];
+    };
+  };
+
+  enableOCR = true;
+
+  testScript = let
+    jq = "${pkgs.jq}/bin/jq";
+
+    sshConfig = builtins.toFile "ssh.conf" ''
+      UserKnownHostsFile=/dev/null
+      StrictHostKeyChecking=no
+    '';
+
+    vscodeConfig = builtins.toFile "settings.json" ''
+      {
+        "window.zoomLevel": 1,
+        "security.workspace.trust.startupPrompt": "always"
+      }
+    '';
+  in ''
+    def connect_with_remote_ssh(screenshot, should_succeed):
+      print(f"connect_with_remote_ssh({screenshot=}, {should_succeed=})")
+
+      if server.execute("test -d ~/.vscode-server")[0] == 0:
+        server.succeed("rm -r ~/.vscode-server")
+
+      server.succeed("mkdir -p ~/.vscode-server/bin")
+      server.succeed("cp -r ${vscodeServer} ~/.vscode-server/bin/${rev}")
+
+      client.succeed("sudo -u alice code --remote=ssh-remote+root@server /root")
+      client.wait_for_window("Visual Studio Code")
+
+      client.wait_for_text("Do you trust the authors" if should_succeed else "Disconnected from SSH")
+      client.screenshot(screenshot)
+
+      if should_succeed:
+        # Press the Don't Trust button
+        client.send_key("tab")
+        client.send_key("tab")
+        client.send_key("tab")
+        client.send_key("\n")
+      else:
+        # Close the error dialog
+        client.send_key("esc")
+
+      # Don't send Ctrl-q too quickly otherwise it might not get sent to VS Code
+      client.sleep(1)
+      client.send_key("ctrl-q")
+      client.wait_until_fails("pidof code")
+
+
+    start_all()
+    server.wait_for_open_port(22)
+
+    VSCODE_COMMIT = server.execute("${jq} -r .commit ${pkgs.vscode}/lib/vscode/resources/app/product.json")[1].rstrip()
+    SERVER_COMMIT = server.execute("${jq} -r .commit ${vscodeServer}/product.json")[1].rstrip()
+
+    print(f"{VSCODE_COMMIT=} {SERVER_COMMIT=}")
+    assert VSCODE_COMMIT == SERVER_COMMIT, "VSCODE_COMMIT and SERVER_COMMIT do not match"
+
+    client.wait_until_succeeds("ping -c1 server")
+    client.succeed("sudo -u alice mkdir ~alice/.ssh")
+    client.succeed("sudo -u alice install -Dm 600 ${snakeOilPrivateKey} ~alice/.ssh/id_ecdsa")
+    client.succeed("sudo -u alice install ${sshConfig} ~alice/.ssh/config")
+    client.succeed("sudo -u alice install -Dm 644 ${vscodeConfig} ~alice/.config/Code/User/settings.json")
+
+    client.wait_for_x()
+    client.wait_for_file("~alice/.Xauthority")
+    client.succeed("xauth merge ~alice/.Xauthority")
+    # Move the mouse out of the way
+    client.succeed("${pkgs.xdotool}/bin/xdotool mousemove 0 0")
+
+    with subtest("fails to connect when nixpkgs isn't available"):
+      server.fail("nix-build '<nixpkgs>' -A hello")
+      connect_with_remote_ssh(screenshot="no_node_installed", should_succeed=False)
+      server.succeed("test -e ~/.vscode-server/bin/${rev}/node")
+      server.fail("~/.vscode-server/bin/${rev}/node -v")
+
+    with subtest("connects when server can patch Node"):
+      server.succeed("mkdir -p /nix/var/nix/profiles/per-user/root/channels")
+      server.succeed("ln -s ${pkgs.path} /nix/var/nix/profiles/per-user/root/channels/nixos")
+      connect_with_remote_ssh(screenshot="build_node_with_nix", should_succeed=True)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/vscodium.nix b/nixpkgs/nixos/tests/vscodium.nix
new file mode 100644
index 000000000000..d817ce927ff8
--- /dev/null
+++ b/nixpkgs/nixos/tests/vscodium.nix
@@ -0,0 +1,79 @@
+let
+  tests = {
+    wayland = { pkgs, ... }: {
+      imports = [ ./common/wayland-cage.nix ];
+
+      services.cage.program = "${pkgs.vscodium}/bin/codium";
+
+      environment.variables.NIXOS_OZONE_WL = "1";
+      environment.variables.DISPLAY = "do not use";
+
+      fonts.packages = with pkgs; [ dejavu_fonts ];
+    };
+    xorg = { pkgs, ... }: {
+      imports = [ ./common/user-account.nix ./common/x11.nix ];
+
+      virtualisation.memorySize = 2047;
+      services.xserver.enable = true;
+      services.xserver.displayManager.sessionCommands = ''
+        ${pkgs.vscodium}/bin/codium
+      '';
+      test-support.displayManager.auto.user = "alice";
+    };
+  };
+
+  mkTest = name: machine:
+    import ./make-test-python.nix ({ pkgs, ... }: {
+      inherit name;
+
+      nodes = { "${name}" = machine; };
+
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ synthetica turion ];
+      };
+      enableOCR = true;
+
+      testScript = ''
+        @polling_condition
+        def codium_running():
+            machine.succeed('pgrep -x codium')
+
+
+        start_all()
+
+        machine.wait_for_unit('graphical.target')
+
+        codium_running.wait() # type: ignore[union-attr]
+        with codium_running: # type: ignore[union-attr]
+            # Wait until vscodium is visible. "File" is in the menu bar.
+            machine.wait_for_text('Welcome')
+            machine.screenshot('start_screen')
+
+            test_string = 'testfile'
+
+            # Create a new file
+            machine.send_key('ctrl-n')
+            machine.wait_for_text('Untitled')
+            machine.screenshot('empty_editor')
+
+            # Type a string
+            machine.send_chars(test_string)
+            machine.wait_for_text(test_string)
+            machine.screenshot('editor')
+
+            # Save the file
+            machine.send_key('ctrl-s')
+            machine.wait_for_text('(Save|Desktop|alice|Size)')
+            machine.screenshot('save_window')
+            machine.send_key('ret')
+
+            # (the default filename is the first line of the file)
+            machine.wait_for_file(f'/home/alice/{test_string}')
+
+        # machine.send_key('ctrl-q')
+        # machine.wait_until_fails('pgrep -x codium')
+      '';
+    });
+
+in
+builtins.mapAttrs (k: v: mkTest k v { }) tests
diff --git a/nixpkgs/nixos/tests/vsftpd.nix b/nixpkgs/nixos/tests/vsftpd.nix
new file mode 100644
index 000000000000..6eaf32b22583
--- /dev/null
+++ b/nixpkgs/nixos/tests/vsftpd.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "vsftpd";
+
+  nodes = {
+    server = {
+      services.vsftpd = {
+        enable = true;
+        userlistDeny = false;
+        localUsers = true;
+        userlist = [ "ftp-test-user" ];
+        writeEnable = true;
+        localRoot = "/tmp";
+      };
+      networking.firewall.enable = false;
+
+      users = {
+        users.ftp-test-user = {
+          isSystemUser = true;
+          password = "ftp-test-password";
+          group = "ftp-test-group";
+        };
+        groups.ftp-test-group = {};
+      };
+    };
+
+    client = {};
+  };
+
+  testScript = ''
+    client.start()
+    server.wait_for_unit("vsftpd")
+    server.wait_for_open_port(21)
+
+    client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server")
+    client.succeed('echo "this is a test" > /tmp/test.file.up')
+    client.succeed("curl -v -T /tmp/test.file.up -u ftp-test-user:ftp-test-password ftp://server")
+    client.succeed("curl -u ftp-test-user:ftp-test-password ftp://server/test.file.up > /tmp/test.file.down")
+    client.succeed("diff /tmp/test.file.up /tmp/test.file.down")
+    assert client.succeed("cat /tmp/test.file.up") == server.succeed("cat /tmp/test.file.up")
+    assert client.succeed("cat /tmp/test.file.down") == server.succeed("cat /tmp/test.file.up")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/warzone2100.nix b/nixpkgs/nixos/tests/warzone2100.nix
new file mode 100644
index 000000000000..568e04a46999
--- /dev/null
+++ b/nixpkgs/nixos/tests/warzone2100.nix
@@ -0,0 +1,26 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "warzone2100";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    imports = [
+      ./common/x11.nix
+    ];
+
+    services.xserver.enable = true;
+    environment.systemPackages = [ pkgs.warzone2100 ];
+  };
+
+  enableOCR = true;
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.execute("warzone2100 >&2 &")
+      machine.wait_for_window("Warzone 2100")
+      machine.wait_for_text(r"(Single Player|Multi Player|Tutorial|Options|Quit Game)")
+      machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/wasabibackend.nix b/nixpkgs/nixos/tests/wasabibackend.nix
new file mode 100644
index 000000000000..75730fe24d09
--- /dev/null
+++ b/nixpkgs/nixos/tests/wasabibackend.nix
@@ -0,0 +1,38 @@
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "wasabibackend";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ mmahut ];
+  };
+
+  nodes = {
+    machine = { ... }: {
+      services.wasabibackend = {
+        enable = true;
+        network = "testnet";
+        rpc = {
+          user = "alice";
+          port = 18332;
+        };
+      };
+      services.bitcoind."testnet" = {
+        enable = true;
+        testnet = true;
+        rpc.users = {
+          alice.passwordHMAC = "e7096bc21da60b29ecdbfcdb2c3acc62$f948e61cb587c399358ed99c6ed245a41460b4bf75125d8330c9f6fcc13d7ae7";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("wasabibackend.service")
+    machine.wait_until_succeeds(
+        "grep 'Wasabi Backend started' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
+    )
+    machine.sleep(5)
+    machine.succeed(
+        "grep 'Config is successfully initialized' /var/lib/wasabibackend/.walletwasabi/backend/Logs.txt"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/gotosocial.nix b/nixpkgs/nixos/tests/web-apps/gotosocial.nix
new file mode 100644
index 000000000000..6d279ab63a79
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/gotosocial.nix
@@ -0,0 +1,28 @@
+{ lib, ... }:
+{
+  name = "gotosocial";
+  meta.maintainers = with lib.maintainers; [ misuzu ];
+
+  nodes.machine = { pkgs, ... }: {
+    environment.systemPackages = [ pkgs.jq ];
+    services.gotosocial = {
+      enable = true;
+      setupPostgresqlDB = true;
+      settings = {
+        host = "localhost:8081";
+        port = 8081;
+      };
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("gotosocial.service")
+    machine.wait_for_unit("postgresql.service")
+    machine.wait_for_open_port(8081)
+
+    # check user registration via cli
+    machine.succeed("curl -sS -f http://localhost:8081/nodeinfo/2.0 | jq '.usage.users.total' | grep -q '^0$'")
+    machine.succeed("gotosocial-admin account create --username nickname --email email@example.com --password kurtz575VPeBgjVm")
+    machine.succeed("curl -sS -f http://localhost:8081/nodeinfo/2.0 | jq '.usage.users.total' | grep -q '^1$'")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/web-apps/healthchecks.nix b/nixpkgs/nixos/tests/web-apps/healthchecks.nix
new file mode 100644
index 000000000000..41c40cd5dd8d
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/healthchecks.nix
@@ -0,0 +1,42 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "healthchecks";
+
+  meta = with lib.maintainers; {
+    maintainers = [ phaer ];
+  };
+
+  nodes.machine = { ... }: {
+    services.healthchecks = {
+      enable = true;
+      settings = {
+        SITE_NAME = "MyUniqueInstance";
+        COMPRESS_ENABLED = "True";
+        SECRET_KEY_FILE = pkgs.writeText "secret"
+          "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("healthchecks.target")
+    machine.wait_until_succeeds("journalctl --since -1m --unit healthchecks --grep Listening")
+
+    with subtest("Home screen loads"):
+        machine.succeed(
+            "curl -sSfL http://localhost:8000 | grep '<title>Sign In'"
+        )
+
+    with subtest("Setting SITE_NAME via freeform option works"):
+        machine.succeed(
+            "curl -sSfL http://localhost:8000 | grep 'MyUniqueInstance</title>'"
+        )
+
+    with subtest("Manage script works"):
+        # "shell" sucommand should succeed, needs python in PATH.
+        assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | sudo -u healthchecks healthchecks-manage shell")
+
+        # Shouldn't fail if not called by healthchecks user
+        assert "foo\n" == machine.succeed("echo 'print(\"foo\")' | healthchecks-manage shell")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/mastodon/default.nix b/nixpkgs/nixos/tests/web-apps/mastodon/default.nix
new file mode 100644
index 000000000000..411ebfcd731b
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/mastodon/default.nix
@@ -0,0 +1,9 @@
+{ system ? builtins.currentSystem, handleTestOn }:
+let
+  supportedSystems = [ "x86_64-linux" "i686-linux" "aarch64-linux" ];
+
+in
+{
+  standard = handleTestOn supportedSystems ./standard.nix { inherit system; };
+  remote-postgresql = handleTestOn supportedSystems ./remote-postgresql.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/web-apps/mastodon/remote-postgresql.nix b/nixpkgs/nixos/tests/web-apps/mastodon/remote-postgresql.nix
new file mode 100644
index 000000000000..6548883db452
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/mastodon/remote-postgresql.nix
@@ -0,0 +1,162 @@
+import ../../make-test-python.nix ({pkgs, ...}:
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=mastodon.local' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+
+  hosts = ''
+    192.168.2.103 mastodon.local
+  '';
+
+in
+{
+  name = "mastodon-remote-postgresql";
+  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin ];
+
+  nodes = {
+    database = { config, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.102"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ config.services.postgresql.port ];
+      };
+
+      services.postgresql = {
+        enable = true;
+        # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+        package = pkgs.postgresql_14;
+        enableTCPIP = true;
+        authentication = ''
+          hostnossl mastodon_local mastodon_test 192.168.2.201/32 md5
+        '';
+        initialScript = pkgs.writeText "postgresql_init.sql" ''
+          CREATE ROLE mastodon_test LOGIN PASSWORD 'SoDTZcISc3f1M1LJsRLT';
+          CREATE DATABASE mastodon_local TEMPLATE template0 ENCODING UTF8;
+          GRANT ALL PRIVILEGES ON DATABASE mastodon_local TO mastodon_test;
+        '';
+      };
+    };
+
+    nginx = { nodes, ... }: {
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.103"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 443 ];
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+
+      services.nginx = {
+        enable = true;
+        recommendedProxySettings = true;
+        virtualHosts."mastodon.local" = {
+          root = "/var/empty";
+          forceSSL = true;
+          enableACME = pkgs.lib.mkForce false;
+          sslCertificate = "${cert pkgs}/cert.pem";
+          sslCertificateKey = "${cert pkgs}/key.pem";
+          locations."/" = {
+            tryFiles = "$uri @proxy";
+          };
+          locations."@proxy" = {
+            proxyPass = "http://192.168.2.201:${toString nodes.server.services.mastodon.webPort}";
+            proxyWebsockets = true;
+          };
+        };
+      };
+    };
+
+    server = { config, pkgs, ... }: {
+      virtualisation.memorySize = 2048;
+
+      environment = {
+        etc = {
+          "mastodon/password-posgressql-db".text = ''
+            SoDTZcISc3f1M1LJsRLT
+          '';
+        };
+      };
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.201"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [
+          config.services.mastodon.webPort
+          config.services.mastodon.sidekiqPort
+        ];
+      };
+
+      services.mastodon = {
+        enable = true;
+        configureNginx = false;
+        localDomain = "mastodon.local";
+        enableUnixSocket = false;
+        streamingProcesses = 2;
+        database = {
+          createLocally = false;
+          host = "192.168.2.102";
+          port = 5432;
+          name = "mastodon_local";
+          user = "mastodon_test";
+          passwordFile = "/etc/mastodon/password-posgressql-db";
+        };
+        smtp = {
+          createLocally = false;
+          fromAddress = "mastodon@mastodon.local";
+        };
+        extraConfig = {
+          BIND = "0.0.0.0";
+          EMAIL_DOMAIN_ALLOWLIST = "example.com";
+          RAILS_SERVE_STATIC_FILES = "true";
+          TRUSTED_PROXY_IP = "192.168.2.103";
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.202"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+    };
+  };
+
+  testScript = import ./script.nix {
+    inherit pkgs;
+    extraInit = ''
+      nginx.wait_for_unit("nginx.service")
+      nginx.wait_for_open_port(443)
+      database.wait_for_unit("postgresql.service")
+      database.wait_for_open_port(5432)
+    '';
+    extraShutdown = ''
+      nginx.shutdown()
+      database.shutdown()
+    '';
+  };
+})
diff --git a/nixpkgs/nixos/tests/web-apps/mastodon/script.nix b/nixpkgs/nixos/tests/web-apps/mastodon/script.nix
new file mode 100644
index 000000000000..afb7c0e0a0eb
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/mastodon/script.nix
@@ -0,0 +1,53 @@
+{ pkgs
+, extraInit ? ""
+, extraShutdown ? ""
+}:
+
+''
+  start_all()
+
+  ${extraInit}
+
+  server.wait_for_unit("redis-mastodon.service")
+  server.wait_for_unit("mastodon-sidekiq-all.service")
+  server.wait_for_unit("mastodon-streaming.target")
+  server.wait_for_unit("mastodon-web.service")
+  server.wait_for_open_port(55001)
+
+  # Check that mastodon-media-auto-remove is scheduled
+  server.succeed("systemctl status mastodon-media-auto-remove.timer")
+
+  # Check Mastodon version from remote client
+  client.succeed("curl --fail https://mastodon.local/api/v1/instance | jq -r '.version' | grep '${pkgs.mastodon.version}'")
+
+  # Check access from remote client
+  client.succeed("curl --fail https://mastodon.local/about | grep 'Mastodon hosted on mastodon.local'")
+  client.succeed("curl --fail $(curl https://mastodon.local/api/v1/instance 2> /dev/null | jq -r .thumbnail) --output /dev/null")
+
+  # Simple check tootctl commands
+  # Check Mastodon version
+  server.succeed("mastodon-tootctl version | grep '${pkgs.mastodon.version}'")
+
+  # Manage accounts
+  server.succeed("mastodon-tootctl email_domain_blocks add example.com")
+  server.succeed("mastodon-tootctl email_domain_blocks list | grep example.com")
+  server.fail("mastodon-tootctl email_domain_blocks list | grep mastodon.local")
+  server.fail("mastodon-tootctl accounts create alice --email=alice@example.com")
+  server.succeed("mastodon-tootctl email_domain_blocks remove example.com")
+  server.succeed("mastodon-tootctl accounts create bob --email=bob@example.com")
+  server.succeed("mastodon-tootctl accounts approve bob")
+  server.succeed("mastodon-tootctl accounts delete bob")
+
+  # Manage IP access
+  server.succeed("mastodon-tootctl ip_blocks add 192.168.0.0/16 --severity=no_access")
+  server.succeed("mastodon-tootctl ip_blocks export | grep 192.168.0.0/16")
+  server.fail("mastodon-tootctl ip_blocks export | grep 172.16.0.0/16")
+  client.fail("curl --fail https://mastodon.local/about")
+  server.succeed("mastodon-tootctl ip_blocks remove 192.168.0.0/16")
+  client.succeed("curl --fail https://mastodon.local/about")
+
+  server.shutdown()
+  client.shutdown()
+
+  ${extraShutdown}
+''
diff --git a/nixpkgs/nixos/tests/web-apps/mastodon/standard.nix b/nixpkgs/nixos/tests/web-apps/mastodon/standard.nix
new file mode 100644
index 000000000000..e5eb30fef597
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/mastodon/standard.nix
@@ -0,0 +1,96 @@
+import ../../make-test-python.nix ({pkgs, ...}:
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=mastodon.local' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+
+  hosts = ''
+    192.168.2.101 mastodon.local
+  '';
+
+in
+{
+  name = "mastodon-standard";
+  meta.maintainers = with pkgs.lib.maintainers; [ erictapen izorkin turion ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+
+      virtualisation.memorySize = 2048;
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.101"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+        firewall.allowedTCPPorts = [ 80 443 ];
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+
+      services.redis.servers.mastodon = {
+        enable = true;
+        bind = "127.0.0.1";
+        port = 31637;
+      };
+
+      # TODO remove once https://github.com/NixOS/nixpkgs/pull/266270 is resolved.
+      services.postgresql.package = pkgs.postgresql_14;
+
+      services.mastodon = {
+        enable = true;
+        configureNginx = true;
+        localDomain = "mastodon.local";
+        enableUnixSocket = false;
+        streamingProcesses = 2;
+        smtp = {
+          createLocally = false;
+          fromAddress = "mastodon@mastodon.local";
+        };
+        extraConfig = {
+          EMAIL_DOMAIN_ALLOWLIST = "example.com";
+        };
+      };
+
+      services.nginx = {
+        virtualHosts."mastodon.local" = {
+          enableACME = pkgs.lib.mkForce false;
+          sslCertificate = "${cert pkgs}/cert.pem";
+          sslCertificateKey = "${cert pkgs}/key.pem";
+        };
+      };
+    };
+
+    client = { pkgs, ... }: {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.102"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = hosts;
+      };
+
+      security = {
+        pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      };
+    };
+  };
+
+  testScript = import ./script.nix {
+    inherit pkgs;
+    extraInit = ''
+      server.wait_for_unit("nginx.service")
+      server.wait_for_open_port(443)
+      server.wait_for_unit("postgresql.service")
+      server.wait_for_open_port(5432)
+    '';
+  };
+})
diff --git a/nixpkgs/nixos/tests/web-apps/monica.nix b/nixpkgs/nixos/tests/web-apps/monica.nix
new file mode 100644
index 000000000000..29f5cb85bb13
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/monica.nix
@@ -0,0 +1,33 @@
+import ../make-test-python.nix ({pkgs, ...}:
+let
+  cert = pkgs.runCommand "selfSignedCerts" { nativeBuildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=localhost' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+in
+{
+  name = "monica";
+
+  nodes = {
+    machine = {pkgs, ...}: {
+      services.monica = {
+        enable = true;
+        hostname = "localhost";
+        appKeyFile = "${pkgs.writeText "keyfile" "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"}";
+        nginx = {
+          forceSSL = true;
+          sslCertificate = "${cert}/cert.pem";
+          sslCertificateKey = "${cert}/key.pem";
+        };
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit("monica-setup.service")
+    machine.wait_for_open_port(443)
+    machine.succeed("curl -k --fail https://localhost", timeout=10)
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix b/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix
new file mode 100644
index 000000000000..b5403eb678bc
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/netbox-upgrade.nix
@@ -0,0 +1,87 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: let
+  oldNetbox = pkgs.netbox_3_5;
+  newNetbox = pkgs.netbox_3_6;
+in {
+  name = "netbox-upgrade";
+
+  meta = with lib.maintainers; {
+    maintainers = [ minijackson raitobezarius ];
+  };
+
+  nodes.machine = { config, ... }: {
+    virtualisation.memorySize = 2048;
+    services.netbox = {
+      enable = true;
+      package = oldNetbox;
+      secretKeyFile = pkgs.writeText "secret" ''
+        abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+      '';
+    };
+
+    services.nginx = {
+      enable = true;
+
+      recommendedProxySettings = true;
+
+      virtualHosts.netbox = {
+        default = true;
+        locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}";
+        locations."/static/".alias = "/var/lib/netbox/static/";
+      };
+    };
+
+    users.users.nginx.extraGroups = [ "netbox" ];
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+
+    specialisation.upgrade.configuration.services.netbox.package = lib.mkForce newNetbox;
+  };
+
+  testScript = { nodes, ... }:
+    let
+      apiVersion = version: lib.pipe version [
+        (lib.splitString ".")
+        (lib.take 2)
+        (lib.concatStringsSep ".")
+      ];
+      oldApiVersion = apiVersion oldNetbox.version;
+      newApiVersion = apiVersion newNetbox.version;
+    in
+    ''
+      start_all()
+      machine.wait_for_unit("netbox.target")
+      machine.wait_for_unit("nginx.service")
+      machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening")
+
+      def api_version(headers):
+          header = [header for header in headers.splitlines() if header.startswith("API-Version:")][0]
+          return header.split()[1]
+
+      def check_api_version(version):
+          headers = machine.succeed(
+            "curl -sSfL http://localhost/api/ --head -H 'Content-Type: application/json'"
+          )
+          assert api_version(headers) == version
+
+      with subtest("NetBox version is the old one"):
+          check_api_version("${oldApiVersion}")
+
+      # Somehow, even though netbox-housekeeping.service has After=netbox.service,
+      # netbox-housekeeping.service and netbox.service still get started at the
+      # same time, making netbox-housekeeping fail (can't really do some house
+      # keeping job if the database is not correctly formed).
+      #
+      # So we don't check that the upgrade went well, we just check that
+      # netbox.service is active, and that netbox-housekeeping can be run
+      # successfully afterwards.
+      #
+      # This is not good UX, but the system should be working nonetheless.
+      machine.execute("${nodes.machine.system.build.toplevel}/specialisation/upgrade/bin/switch-to-configuration test >&2")
+
+      machine.wait_for_unit("netbox.service")
+      machine.succeed("systemctl start netbox-housekeeping.service")
+
+      with subtest("NetBox version is the new one"):
+          check_api_version("${newApiVersion}")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/netbox.nix b/nixpkgs/nixos/tests/web-apps/netbox.nix
new file mode 100644
index 000000000000..233f16a8fe0d
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/netbox.nix
@@ -0,0 +1,318 @@
+let
+  ldapDomain = "example.org";
+  ldapSuffix = "dc=example,dc=org";
+
+  ldapRootUser = "admin";
+  ldapRootPassword = "foobar";
+
+  testUser = "alice";
+  testPassword = "verySecure";
+  testGroup = "netbox-users";
+in import ../make-test-python.nix ({ lib, pkgs, netbox, ... }: {
+  name = "netbox";
+
+  meta = with lib.maintainers; {
+    maintainers = [ minijackson n0emis ];
+  };
+
+  nodes.machine = { config, ... }: {
+    virtualisation.memorySize = 2048;
+    services.netbox = {
+      enable = true;
+      package = netbox;
+      secretKeyFile = pkgs.writeText "secret" ''
+        abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+      '';
+
+      enableLdap = true;
+      ldapConfigPath = pkgs.writeText "ldap_config.py" ''
+        import ldap
+        from django_auth_ldap.config import LDAPSearch, PosixGroupType
+
+        AUTH_LDAP_SERVER_URI = "ldap://localhost/"
+
+        AUTH_LDAP_USER_SEARCH = LDAPSearch(
+            "ou=accounts,ou=posix,${ldapSuffix}",
+            ldap.SCOPE_SUBTREE,
+            "(uid=%(user)s)",
+        )
+
+        AUTH_LDAP_GROUP_SEARCH = LDAPSearch(
+            "ou=groups,ou=posix,${ldapSuffix}",
+            ldap.SCOPE_SUBTREE,
+            "(objectClass=posixGroup)",
+        )
+        AUTH_LDAP_GROUP_TYPE = PosixGroupType()
+
+        # Mirror LDAP group assignments.
+        AUTH_LDAP_MIRROR_GROUPS = True
+
+        # For more granular permissions, we can map LDAP groups to Django groups.
+        AUTH_LDAP_FIND_GROUP_PERMS = True
+      '';
+    };
+
+    services.nginx = {
+      enable = true;
+
+      recommendedProxySettings = true;
+
+      virtualHosts.netbox = {
+        default = true;
+        locations."/".proxyPass = "http://localhost:${toString config.services.netbox.port}";
+        locations."/static/".alias = "/var/lib/netbox/static/";
+      };
+    };
+
+    # Adapted from the sssd-ldap NixOS test
+    services.openldap = {
+      enable = true;
+      settings = {
+        children = {
+          "cn=schema".includes = [
+            "${pkgs.openldap}/etc/schema/core.ldif"
+            "${pkgs.openldap}/etc/schema/cosine.ldif"
+            "${pkgs.openldap}/etc/schema/inetorgperson.ldif"
+            "${pkgs.openldap}/etc/schema/nis.ldif"
+          ];
+          "olcDatabase={1}mdb" = {
+            attrs = {
+              objectClass = [ "olcDatabaseConfig" "olcMdbConfig" ];
+              olcDatabase = "{1}mdb";
+              olcDbDirectory = "/var/lib/openldap/db";
+              olcSuffix = ldapSuffix;
+              olcRootDN = "cn=${ldapRootUser},${ldapSuffix}";
+              olcRootPW = ldapRootPassword;
+            };
+          };
+        };
+      };
+      declarativeContents = {
+        ${ldapSuffix} = ''
+          dn: ${ldapSuffix}
+          objectClass: top
+          objectClass: dcObject
+          objectClass: organization
+          o: ${ldapDomain}
+
+          dn: ou=posix,${ldapSuffix}
+          objectClass: top
+          objectClass: organizationalUnit
+
+          dn: ou=accounts,ou=posix,${ldapSuffix}
+          objectClass: top
+          objectClass: organizationalUnit
+
+          dn: uid=${testUser},ou=accounts,ou=posix,${ldapSuffix}
+          objectClass: person
+          objectClass: posixAccount
+          userPassword: ${testPassword}
+          homeDirectory: /home/${testUser}
+          uidNumber: 1234
+          gidNumber: 1234
+          cn: ""
+          sn: ""
+
+          dn: ou=groups,ou=posix,${ldapSuffix}
+          objectClass: top
+          objectClass: organizationalUnit
+
+          dn: cn=${testGroup},ou=groups,ou=posix,${ldapSuffix}
+          objectClass: posixGroup
+          gidNumber: 2345
+          memberUid: ${testUser}
+        '';
+      };
+    };
+
+    users.users.nginx.extraGroups = [ "netbox" ];
+
+    networking.firewall.allowedTCPPorts = [ 80 ];
+  };
+
+  testScript = let
+    changePassword = pkgs.writeText "change-password.py" ''
+      from django.contrib.auth.models import User
+      u = User.objects.get(username='netbox')
+      u.set_password('netbox')
+      u.save()
+    '';
+  in ''
+    from typing import Any, Dict
+    import json
+
+    start_all()
+    machine.wait_for_unit("netbox.target")
+    machine.wait_until_succeeds("journalctl --since -1m --unit netbox --grep Listening")
+
+    with subtest("Home screen loads"):
+        machine.succeed(
+            "curl -sSfL http://[::1]:8001 | grep '<title>Home | NetBox</title>'"
+        )
+
+    with subtest("Staticfiles are generated"):
+        machine.succeed("test -e /var/lib/netbox/static/netbox.js")
+
+    with subtest("Superuser can be created"):
+        machine.succeed(
+            "netbox-manage createsuperuser --noinput --username netbox --email netbox@example.com"
+        )
+        # Django doesn't have a "clean" way of inputting the password from the command line
+        machine.succeed("cat '${changePassword}' | netbox-manage shell")
+
+    machine.wait_for_unit("network.target")
+
+    with subtest("Home screen loads from nginx"):
+        machine.succeed(
+            "curl -sSfL http://localhost | grep '<title>Home | NetBox</title>'"
+        )
+
+    with subtest("Staticfiles can be fetched"):
+        machine.succeed("curl -sSfL http://localhost/static/netbox.js")
+        machine.succeed("curl -sSfL http://localhost/static/docs/")
+
+    with subtest("Can interact with API"):
+        json.loads(
+            machine.succeed("curl -sSfL -H 'Accept: application/json' 'http://localhost/api/'")
+        )
+
+    def login(username: str, password: str):
+        encoded_data = json.dumps({"username": username, "password": password})
+        uri = "/users/tokens/provision/"
+        result = json.loads(
+            machine.succeed(
+                "curl -sSfL "
+                "-X POST "
+                "-H 'Accept: application/json' "
+                "-H 'Content-Type: application/json' "
+                f"'http://localhost/api{uri}' "
+                f"--data '{encoded_data}'"
+            )
+        )
+        return result["key"]
+
+    with subtest("Can login"):
+        auth_token = login("netbox", "netbox")
+
+    def get(uri: str):
+        return json.loads(
+            machine.succeed(
+                "curl -sSfL "
+                "-H 'Accept: application/json' "
+                f"-H 'Authorization: Token {auth_token}' "
+                f"'http://localhost/api{uri}'"
+            )
+        )
+
+    def delete(uri: str):
+        return machine.succeed(
+            "curl -sSfL "
+            f"-X DELETE "
+            "-H 'Accept: application/json' "
+            f"-H 'Authorization: Token {auth_token}' "
+            f"'http://localhost/api{uri}'"
+        )
+
+
+    def data_request(uri: str, method: str, data: Dict[str, Any]):
+        encoded_data = json.dumps(data)
+        return json.loads(
+            machine.succeed(
+                "curl -sSfL "
+                f"-X {method} "
+                "-H 'Accept: application/json' "
+                "-H 'Content-Type: application/json' "
+                f"-H 'Authorization: Token {auth_token}' "
+                f"'http://localhost/api{uri}' "
+                f"--data '{encoded_data}'"
+            )
+        )
+
+    def post(uri: str, data: Dict[str, Any]):
+      return data_request(uri, "POST", data)
+
+    def patch(uri: str, data: Dict[str, Any]):
+      return data_request(uri, "PATCH", data)
+
+    with subtest("Can create objects"):
+        result = post("/dcim/sites/", {"name": "Test site", "slug": "test-site"})
+        site_id = result["id"]
+
+        # Example from:
+        # http://netbox.extra.cea.fr/static/docs/integrations/rest-api/#creating-a-new-object
+        post("/ipam/prefixes/", {"prefix": "192.0.2.0/24", "site": site_id})
+
+        result = post(
+            "/dcim/manufacturers/",
+            {"name": "Test manufacturer", "slug": "test-manufacturer"}
+        )
+        manufacturer_id = result["id"]
+
+        # Had an issue with device-types before NetBox 3.4.0
+        result = post(
+            "/dcim/device-types/",
+            {
+                "model": "Test device type",
+                "manufacturer": manufacturer_id,
+                "slug": "test-device-type",
+            },
+        )
+        device_type_id = result["id"]
+
+    with subtest("Can list objects"):
+        result = get("/dcim/sites/")
+
+        assert result["count"] == 1
+        assert result["results"][0]["id"] == site_id
+        assert result["results"][0]["name"] == "Test site"
+        assert result["results"][0]["description"] == ""
+
+        result = get("/dcim/device-types/")
+        assert result["count"] == 1
+        assert result["results"][0]["id"] == device_type_id
+        assert result["results"][0]["model"] == "Test device type"
+
+    with subtest("Can update objects"):
+        new_description = "Test site description"
+        patch(f"/dcim/sites/{site_id}/", {"description": new_description})
+        result = get(f"/dcim/sites/{site_id}/")
+        assert result["description"] == new_description
+
+    with subtest("Can delete objects"):
+        # Delete a device-type since no object depends on it
+        delete(f"/dcim/device-types/{device_type_id}/")
+
+        result = get("/dcim/device-types/")
+        assert result["count"] == 0
+
+    with subtest("Can use the GraphQL API"):
+        encoded_data = json.dumps({
+            "query": "query { prefix_list { prefix, site { id, description } } }",
+        })
+        result = json.loads(
+            machine.succeed(
+                "curl -sSfL "
+                "-H 'Accept: application/json' "
+                "-H 'Content-Type: application/json' "
+                f"-H 'Authorization: Token {auth_token}' "
+                "'http://localhost/graphql/' "
+                f"--data '{encoded_data}'"
+            )
+        )
+
+        assert len(result["data"]["prefix_list"]) == 1
+        assert result["data"]["prefix_list"][0]["prefix"] == "192.0.2.0/24"
+        assert result["data"]["prefix_list"][0]["site"]["id"] == str(site_id)
+        assert result["data"]["prefix_list"][0]["site"]["description"] == new_description
+
+    with subtest("Can login with LDAP"):
+        machine.wait_for_unit("openldap.service")
+        login("alice", "${testPassword}")
+
+    with subtest("Can associate LDAP groups"):
+        result = get("/users/users/?username=${testUser}")
+
+        assert result["count"] == 1
+        assert any(group["name"] == "${testGroup}" for group in result["results"][0]["groups"])
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/nifi.nix b/nixpkgs/nixos/tests/web-apps/nifi.nix
new file mode 100644
index 000000000000..92f7fa231df3
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/nifi.nix
@@ -0,0 +1,30 @@
+import ../make-test-python.nix ({pkgs, ...}:
+{
+  name = "nifi";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    nifi = { pkgs, ... }: {
+      virtualisation = {
+        memorySize = 2048;
+        diskSize = 4096;
+      };
+      services.nifi = {
+        enable = true;
+        enableHTTPS = false;
+      };
+    };
+  };
+
+  testScript = ''
+    nifi.start()
+
+    nifi.wait_for_unit("nifi.service")
+    nifi.wait_for_open_port(8080)
+
+    # Check if NiFi is running
+    nifi.succeed("curl --fail http://127.0.0.1:8080/nifi/login 2> /dev/null | grep 'NiFi Login'")
+
+    nifi.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/peering-manager.nix b/nixpkgs/nixos/tests/web-apps/peering-manager.nix
new file mode 100644
index 000000000000..3f0acd560d13
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/peering-manager.nix
@@ -0,0 +1,40 @@
+import ../make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "peering-manager";
+
+  meta = with lib.maintainers; {
+    maintainers = [ yuka ];
+  };
+
+  nodes.machine = { ... }: {
+    services.peering-manager = {
+      enable = true;
+      secretKeyFile = pkgs.writeText "secret" ''
+        abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789
+      '';
+    };
+  };
+
+  testScript = { nodes }: ''
+    machine.start()
+    machine.wait_for_unit("peering-manager.target")
+    machine.wait_until_succeeds("journalctl --since -1m --unit peering-manager --grep Listening")
+
+    print(machine.succeed(
+        "curl -sSfL http://[::1]:8001"
+    ))
+    with subtest("Home screen loads"):
+        machine.succeed(
+            "curl -sSfL http://[::1]:8001 | grep '<title>Home - Peering Manager</title>'"
+        )
+    with subtest("checks succeed"):
+        machine.succeed(
+            "systemctl stop peering-manager peering-manager-rq"
+        )
+        machine.succeed(
+            "sudo -u postgres psql -c 'ALTER USER \"peering-manager\" WITH SUPERUSER;'"
+        )
+        machine.succeed(
+            "cd ${nodes.machine.system.build.peeringManagerPkg}/opt/peering-manager ; peering-manager-manage test --no-input"
+        )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/peertube.nix b/nixpkgs/nixos/tests/web-apps/peertube.nix
new file mode 100644
index 000000000000..0e5f39c08a02
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/peertube.nix
@@ -0,0 +1,139 @@
+import ../make-test-python.nix ({pkgs, ...}:
+{
+  name = "peertube";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes = {
+    database = {
+      networking = {
+       interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.10"; prefixLength = 24; }
+          ];
+        };
+        firewall.allowedTCPPorts = [ 5432 31638 ];
+      };
+
+      services.postgresql = {
+        enable = true;
+        enableTCPIP = true;
+        authentication = ''
+          hostnossl peertube_local peertube_test 192.168.2.11/32 md5
+        '';
+        initialScript = pkgs.writeText "postgresql_init.sql" ''
+          CREATE ROLE peertube_test LOGIN PASSWORD '0gUN0C1mgST6czvjZ8T9';
+          CREATE DATABASE peertube_local TEMPLATE template0 ENCODING UTF8;
+          GRANT ALL PRIVILEGES ON DATABASE peertube_local TO peertube_test;
+          \connect peertube_local
+          CREATE EXTENSION IF NOT EXISTS pg_trgm;
+          CREATE EXTENSION IF NOT EXISTS unaccent;
+        '';
+      };
+
+      services.redis.servers.peertube = {
+        enable = true;
+        bind = "0.0.0.0";
+        requirePass = "turrQfaQwnanGbcsdhxy";
+        port = 31638;
+      };
+    };
+
+    server = { pkgs, ... }: {
+      environment = {
+        etc = {
+          "peertube/secrets-peertube".text = ''
+            063d9c60d519597acef26003d5ecc32729083965d09181ef3949200cbe5f09ee
+          '';
+          "peertube/password-posgressql-db".text = ''
+            0gUN0C1mgST6czvjZ8T9
+          '';
+          "peertube/password-redis-db".text = ''
+            turrQfaQwnanGbcsdhxy
+          '';
+        };
+      };
+
+      networking = {
+        interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.11"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.11 peertube.local
+        '';
+        firewall.allowedTCPPorts = [ 9000 ];
+      };
+
+      services.peertube = {
+        enable = true;
+        localDomain = "peertube.local";
+        enableWebHttps = false;
+
+        secrets = {
+          secretsFile = "/etc/peertube/secrets-peertube";
+        };
+
+        database = {
+          host = "192.168.2.10";
+          name = "peertube_local";
+          user = "peertube_test";
+          passwordFile = "/etc/peertube/password-posgressql-db";
+        };
+
+        redis = {
+          host = "192.168.2.10";
+          port = 31638;
+          passwordFile = "/etc/peertube/password-redis-db";
+        };
+
+        settings = {
+          listen = {
+            hostname = "0.0.0.0";
+          };
+          instance = {
+            name = "PeerTube Test Server";
+          };
+        };
+      };
+    };
+
+    client = {
+      environment.systemPackages = [ pkgs.jq ];
+      networking = {
+       interfaces.eth1 = {
+          ipv4.addresses = [
+            { address = "192.168.2.12"; prefixLength = 24; }
+          ];
+        };
+        extraHosts = ''
+          192.168.2.11 peertube.local
+        '';
+      };
+    };
+
+  };
+
+  testScript = ''
+    start_all()
+
+    database.wait_for_unit("postgresql.service")
+    database.wait_for_unit("redis-peertube.service")
+
+    database.wait_for_open_port(5432)
+    database.wait_for_open_port(31638)
+
+    server.wait_for_unit("peertube.service")
+    server.wait_for_open_port(9000)
+
+    # Check if PeerTube is running
+    client.succeed("curl --fail http://peertube.local:9000/api/v1/config/about | jq -r '.instance.name' | grep 'PeerTube\ Test\ Server'")
+
+    # Check PeerTube CLI version
+    assert "${pkgs.peertube.version}" in server.succeed('su - peertube -s /bin/sh -c "peertube --version"')
+
+    client.shutdown()
+    server.shutdown()
+    database.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/phylactery.nix b/nixpkgs/nixos/tests/web-apps/phylactery.nix
new file mode 100644
index 000000000000..cf2689d2300d
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/phylactery.nix
@@ -0,0 +1,20 @@
+import ../make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "phylactery";
+
+  nodes.machine = { ... }: {
+    services.phylactery = rec {
+      enable = true;
+      port = 8080;
+      library = "/tmp";
+    };
+  };
+
+  testScript = ''
+    start_all()
+    machine.wait_for_unit('phylactery')
+    machine.wait_for_open_port(8080)
+    machine.wait_until_succeeds('curl localhost:8080')
+  '';
+
+  meta.maintainers = with lib.maintainers; [ McSinyx ];
+})
diff --git a/nixpkgs/nixos/tests/web-apps/pixelfed/default.nix b/nixpkgs/nixos/tests/web-apps/pixelfed/default.nix
new file mode 100644
index 000000000000..4464ebe43486
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/pixelfed/default.nix
@@ -0,0 +1,8 @@
+{ system ? builtins.currentSystem, handleTestOn }:
+let
+  supportedSystems = [ "x86_64-linux" "i686-linux" ];
+
+in
+{
+  standard = handleTestOn supportedSystems ./standard.nix { inherit system; };
+}
diff --git a/nixpkgs/nixos/tests/web-apps/pixelfed/standard.nix b/nixpkgs/nixos/tests/web-apps/pixelfed/standard.nix
new file mode 100644
index 000000000000..9260e27af960
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/pixelfed/standard.nix
@@ -0,0 +1,38 @@
+import ../../make-test-python.nix ({pkgs, ...}:
+{
+  name = "pixelfed-standard";
+  meta.maintainers = with pkgs.lib.maintainers; [ raitobezarius ];
+
+  nodes = {
+    server = { pkgs, ... }: {
+      services.pixelfed = {
+        enable = true;
+        domain = "pixelfed.local";
+        # Configure NGINX.
+        nginx = {};
+        secretFile = (pkgs.writeText "secrets.env" ''
+          # Snakeoil secret, can be any random 32-chars secret via CSPRNG.
+          APP_KEY=adKK9EcY8Hcj3PLU7rzG9rJ6KKTOtYfA
+        '');
+        settings."FORCE_HTTPS_URLS" = false;
+      };
+    };
+  };
+
+  testScript = ''
+    # Wait for Pixelfed PHP pool
+    server.wait_for_unit("phpfpm-pixelfed.service")
+    # Wait for NGINX
+    server.wait_for_unit("nginx.service")
+    # Wait for HTTP port
+    server.wait_for_open_port(80)
+    # Access the homepage.
+    server.succeed("curl -H 'Host: pixelfed.local' http://localhost")
+    # Create an account
+    server.succeed("pixelfed-manage user:create --name=test --username=test --email=test@test.com --password=test")
+    # Create a OAuth token.
+    # TODO: figure out how to use it to send a image/toot
+    # server.succeed("pixelfed-manage passport:client --personal")
+    # server.succeed("curl -H 'Host: pixefed.local' -H 'Accept: application/json' -H 'Authorization: Bearer secret' -F'status'='test' http://localhost/api/v1/statuses")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-apps/snipe-it.nix b/nixpkgs/nixos/tests/web-apps/snipe-it.nix
new file mode 100644
index 000000000000..123d7742056b
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/snipe-it.nix
@@ -0,0 +1,101 @@
+/*
+Snipe-IT NixOS test
+
+It covers the following scenario:
+- Installation
+- Backup and restore
+
+Scenarios NOT covered by this test (but perhaps in the future):
+- Sending and receiving emails
+*/
+{ pkgs, ... }: let
+  siteName = "NixOS Snipe-IT Test Instance";
+in {
+  name = "snipe-it";
+
+  meta.maintainers = with pkgs.lib.maintainers; [ yayayayaka ];
+
+  nodes = {
+    snipeit = { ... }: {
+      services.snipe-it = {
+        enable = true;
+        appKeyFile = toString (pkgs.writeText "snipe-it-app-key" "uTqGUN5GUmUrh/zSAYmhyzRk62pnpXICyXv9eeITI8k=");
+        hostName = "localhost";
+        database.createLocally = true;
+        mail = {
+          driver = "smtp";
+          encryption = "tls";
+          host = "localhost";
+          port = 1025;
+          from.name = "Snipe-IT NixOS test";
+          from.address = "snipe-it@localhost";
+          replyTo.address = "snipe-it@localhost";
+          user = "snipe-it@localhost";
+          passwordFile = toString (pkgs.writeText "snipe-it-mail-pass" "a-secure-mail-password");
+        };
+      };
+    };
+  };
+
+  testScript = { nodes }: let
+    backupPath = "${nodes.snipeit.services.snipe-it.dataDir}/storage/app/backups";
+
+    # Snipe-IT has been installed successfully if the site name shows up on the login page
+    checkLoginPage = { shouldSucceed ? true }: ''
+      snipeit.${if shouldSucceed then "succeed" else "fail"}("""curl http://localhost/login | grep '${siteName}'""")
+    '';
+  in ''
+    start_all()
+
+    snipeit.wait_for_unit("nginx.service")
+    snipeit.wait_for_unit("snipe-it-setup.service")
+
+    # Create an admin user
+    snipeit.succeed(
+        """
+        snipe-it snipeit:create-admin \
+            --username="admin" \
+            --email="janedoe@localhost" \
+            --password="extremesecurepassword" \
+            --first_name="Jane" \
+            --last_name="Doe"
+        """
+    )
+
+    with subtest("Circumvent the pre-flight setup by just writing some settings into the database ourself"):
+        snipeit.succeed(
+            """
+            mysql -D ${nodes.snipeit.services.snipe-it.database.name} -e "INSERT INTO settings (id, user_id, site_name) VALUES ('1', '1', '${siteName}');"
+            """
+        )
+
+        # Usually these are generated during the pre-flight setup
+        snipeit.succeed("snipe-it passport:keys")
+
+
+    # Login page should now contain the configured site name
+    ${checkLoginPage {}}
+
+    with subtest("Test Backup and restore"):
+        snipeit.succeed("snipe-it snipeit:backup")
+
+        # One zip file should have been created
+        snipeit.succeed("""[ "$(ls -1 "${backupPath}" | wc -l)" -eq 1 ]""")
+
+        # Purge the state
+        snipeit.succeed("snipe-it migrate:fresh --force")
+
+        # Login page should disappear
+        ${checkLoginPage { shouldSucceed = false; }}
+
+        # Restore the state
+        snipeit.succeed(
+            """
+            snipe-it snipeit:restore --force $(find "${backupPath}/" -type f -name "*.zip")
+            """
+        )
+
+        # Login page should be back again
+        ${checkLoginPage {}}
+  '';
+}
diff --git a/nixpkgs/nixos/tests/web-apps/writefreely.nix b/nixpkgs/nixos/tests/web-apps/writefreely.nix
new file mode 100644
index 000000000000..ce614909706b
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-apps/writefreely.nix
@@ -0,0 +1,44 @@
+{ system ? builtins.currentSystem, config ? { }
+, pkgs ? import ../../.. { inherit system config; } }:
+
+with import ../../lib/testing-python.nix { inherit system pkgs; };
+with pkgs.lib;
+
+let
+  writefreelyTest = { name, type }:
+    makeTest {
+      name = "writefreely-${name}";
+
+      nodes.machine = { config, pkgs, ... }: {
+        services.writefreely = {
+          enable = true;
+          host = "localhost:3000";
+          admin.name = "nixos";
+
+          database = {
+            inherit type;
+            createLocally = type == "mysql";
+            passwordFile = pkgs.writeText "db-pass" "pass";
+          };
+
+          settings.server.port = 3000;
+        };
+      };
+
+      testScript = ''
+        start_all()
+        machine.wait_for_unit("writefreely.service")
+        machine.wait_for_open_port(3000)
+        machine.succeed("curl --fail http://localhost:3000")
+      '';
+    };
+in {
+  sqlite = writefreelyTest {
+    name = "sqlite";
+    type = "sqlite3";
+  };
+  mysql = writefreelyTest {
+    name = "mysql";
+    type = "mysql";
+  };
+}
diff --git a/nixpkgs/nixos/tests/web-servers/agate.nix b/nixpkgs/nixos/tests/web-servers/agate.nix
new file mode 100644
index 000000000000..0de27b6f7d8d
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-servers/agate.nix
@@ -0,0 +1,27 @@
+{ pkgs, lib, ... }:
+{
+  name = "agate";
+  meta = with lib.maintainers; { maintainers = [ jk ]; };
+
+  nodes = {
+    geminiserver = { pkgs, ... }: {
+      services.agate = {
+        enable = true;
+        hostnames = [ "localhost" ];
+        contentDir = pkgs.writeTextDir "index.gmi" ''
+          # Hello NixOS!
+        '';
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    geminiserver.wait_for_unit("agate")
+    geminiserver.wait_for_open_port(1965)
+
+    with subtest("check is serving over gemini"):
+      response = geminiserver.succeed("${pkgs.gemget}/bin/gemget --header -o - gemini://localhost:1965")
+      print(response)
+      assert "Hello NixOS!" in response
+  '';
+}
diff --git a/nixpkgs/nixos/tests/web-servers/stargazer.nix b/nixpkgs/nixos/tests/web-servers/stargazer.nix
new file mode 100644
index 000000000000..c522cfee5dbc
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-servers/stargazer.nix
@@ -0,0 +1,31 @@
+{ pkgs, lib, ... }:
+{
+  name = "stargazer";
+  meta = with lib.maintainers; { maintainers = [ gaykitty ]; };
+
+  nodes = {
+    geminiserver = { pkgs, ... }: {
+      services.stargazer = {
+        enable = true;
+        routes = [
+          {
+            route = "localhost";
+            root = toString (pkgs.writeTextDir "index.gmi" ''
+              # Hello NixOS!
+            '');
+          }
+        ];
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    geminiserver.wait_for_unit("stargazer")
+    geminiserver.wait_for_open_port(1965)
+
+    with subtest("check is serving over gemini"):
+      response = geminiserver.succeed("${pkgs.gmni}/bin/gmni -j once -i -N gemini://localhost:1965")
+      print(response)
+      assert "Hello NixOS!" in response
+  '';
+}
diff --git a/nixpkgs/nixos/tests/web-servers/static-web-server.nix b/nixpkgs/nixos/tests/web-servers/static-web-server.nix
new file mode 100644
index 000000000000..da1a9bdec5d2
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-servers/static-web-server.nix
@@ -0,0 +1,32 @@
+import ../make-test-python.nix ({ pkgs, lib, ... } : {
+  name = "static-web-server";
+  meta = {
+    maintainers = with lib.maintainers; [ mac-chaffee ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    services.static-web-server = {
+      enable = true;
+      listen = "[::]:8080";
+      root = toString (pkgs.writeTextDir "nixos-test.html" ''
+        <h1>Hello NixOS!</h1>
+      '');
+      configuration = {
+        general = { directory-listing = true; };
+      };
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_unit("static-web-server.socket")
+    machine.wait_for_open_port(8080)
+    # We don't use wait_until_succeeds() because we're testing socket
+    # activation which better work on the first request
+    response = machine.succeed("curl -fsS localhost:8080")
+    assert "nixos-test.html" in response, "The directory listing page did not include a link to our nixos-test.html file"
+    response = machine.succeed("curl -fsS localhost:8080/nixos-test.html")
+    assert "Hello NixOS!" in response
+    machine.wait_for_unit("static-web-server.service")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/web-servers/unit-php.nix b/nixpkgs/nixos/tests/web-servers/unit-php.nix
new file mode 100644
index 000000000000..f0df371945e5
--- /dev/null
+++ b/nixpkgs/nixos/tests/web-servers/unit-php.nix
@@ -0,0 +1,52 @@
+import ../make-test-python.nix ({pkgs, ...}:
+let
+  testdir = pkgs.writeTextDir "www/info.php" "<?php phpinfo();";
+
+in {
+  name = "unit-php-test";
+  meta.maintainers = with pkgs.lib.maintainers; [ izorkin ];
+
+  nodes.machine = { config, lib, pkgs, ... }: {
+    services.unit = {
+      enable = true;
+      config = pkgs.lib.strings.toJSON {
+        listeners."*:9081".application = "php_81";
+        applications.php_81 = {
+          type = "php 8.1";
+          processes = 1;
+          user = "testuser";
+          group = "testgroup";
+          root = "${testdir}/www";
+          index = "info.php";
+          options.file = "${pkgs.unit.usedPhp81}/lib/php.ini";
+        };
+      };
+    };
+    users = {
+      users.testuser = {
+        isSystemUser = true;
+        uid = 1080;
+        group = "testgroup";
+      };
+      groups.testgroup = {
+        gid = 1080;
+      };
+    };
+  };
+  testScript = ''
+    machine.start()
+
+    machine.wait_for_unit("unit.service")
+    machine.wait_for_open_port(9081)
+
+    # Check so we get an evaluated PHP back
+    response = machine.succeed("curl -f -vvv -s http://127.0.0.1:9081/")
+    assert "PHP Version ${pkgs.unit.usedPhp81.version}" in response, "PHP version not detected"
+
+    # Check so we have database and some other extensions loaded
+    for ext in ["json", "opcache", "pdo_mysql", "pdo_pgsql", "pdo_sqlite"]:
+        assert ext in response, f"Missing {ext} extension"
+
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/webhook.nix b/nixpkgs/nixos/tests/webhook.nix
new file mode 100644
index 000000000000..ed7051408640
--- /dev/null
+++ b/nixpkgs/nixos/tests/webhook.nix
@@ -0,0 +1,65 @@
+{ pkgs, ... }:
+let
+  forwardedPort = 19000;
+  internalPort = 9000;
+in
+{
+  name = "webhook";
+
+  nodes = {
+    webhookMachine = { pkgs, ... }: {
+      virtualisation.forwardPorts = [{
+        host.port = forwardedPort;
+        guest.port = internalPort;
+      }];
+      services.webhook = {
+        enable = true;
+        port = internalPort;
+        openFirewall = true;
+        hooks = {
+          echo = {
+            execute-command = "echo";
+            response-message = "Webhook is reachable!";
+          };
+        };
+        hooksTemplated = {
+          echoTemplate = ''
+            {
+              "id": "echo-template",
+              "execute-command": "echo",
+              "response-message": "{{ getenv "WEBHOOK_MESSAGE" }}"
+            }
+          '';
+        };
+        environment.WEBHOOK_MESSAGE = "Templates are working!";
+      };
+    };
+  };
+
+  extraPythonPackages = p: [
+    p.requests
+    p.types-requests
+  ];
+
+  testScript = { nodes, ... }: ''
+    import requests
+    webhookMachine.wait_for_unit("webhook")
+    webhookMachine.wait_for_open_port(${toString internalPort})
+
+    with subtest("Check that webhooks can be called externally"):
+      response = requests.get("http://localhost:${toString forwardedPort}/hooks/echo")
+      print(f"Response code: {response.status_code}")
+      print("Response: %r" % response.content)
+
+      assert response.status_code == 200
+      assert response.content == b"Webhook is reachable!"
+
+    with subtest("Check that templated webhooks can be called externally"):
+      response = requests.get("http://localhost:${toString forwardedPort}/hooks/echo-template")
+      print(f"Response code: {response.status_code}")
+      print("Response: %r" % response.content)
+
+      assert response.status_code == 200
+      assert response.content == b"Templates are working!"
+  '';
+}
diff --git a/nixpkgs/nixos/tests/wiki-js.nix b/nixpkgs/nixos/tests/wiki-js.nix
new file mode 100644
index 000000000000..8b3c51935a6c
--- /dev/null
+++ b/nixpkgs/nixos/tests/wiki-js.nix
@@ -0,0 +1,153 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} : {
+  name = "wiki-js";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ma27 ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    virtualisation.memorySize = 2047;
+    services.wiki-js = {
+      enable = true;
+      settings.db.host = "/run/postgresql";
+      settings.db.user = "wiki-js";
+      settings.db.db = "wiki-js";
+      settings.logLevel = "debug";
+    };
+    services.postgresql = {
+      enable = true;
+      ensureDatabases = [ "wiki-js" ];
+      ensureUsers = [
+        { name = "wiki-js";
+          ensureDBOwnership = true;
+        }
+      ];
+    };
+    systemd.services.wiki-js = {
+      requires = [ "postgresql.service" ];
+      after = [ "postgresql.service" ];
+    };
+    environment.systemPackages = with pkgs; [ jq ];
+  };
+
+  testScript = let
+    payloads.finalize = pkgs.writeText "finalize.json" (builtins.toJSON {
+      adminEmail = "webmaster@example.com";
+      adminPassword = "notapassword";
+      adminPasswordConfirm = "notapassword";
+      siteUrl = "http://localhost:3000";
+      telemetry = false;
+    });
+    payloads.login = pkgs.writeText "login.json" (builtins.toJSON [{
+      operationName = null;
+      extensions = {};
+      query = ''
+        mutation ($username: String!, $password: String!, $strategy: String!) {
+          authentication {
+            login(username: $username, password: $password, strategy: $strategy) {
+              responseResult {
+                succeeded
+                errorCode
+                slug
+                message
+                __typename
+              }
+              jwt
+              mustChangePwd
+              mustProvideTFA
+              mustSetupTFA
+              continuationToken
+              redirect
+              tfaQRImage
+              __typename
+            }
+            __typename
+          }
+        }
+      '';
+      variables = {
+        password = "notapassword";
+        strategy = "local";
+        username = "webmaster@example.com";
+      };
+    }]);
+    payloads.content = pkgs.writeText "content.json" (builtins.toJSON [{
+      extensions = {};
+      operationName = null;
+      query = ''
+        mutation ($content: String!, $description: String!, $editor: String!, $isPrivate: Boolean!, $isPublished: Boolean!, $locale: String!, $path: String!, $publishEndDate: Date, $publishStartDate: Date, $scriptCss: String, $scriptJs: String, $tags: [String]!, $title: String!) {
+          pages {
+            create(content: $content, description: $description, editor: $editor, isPrivate: $isPrivate, isPublished: $isPublished, locale: $locale, path: $path, publishEndDate: $publishEndDate, publishStartDate: $publishStartDate, scriptCss: $scriptCss, scriptJs: $scriptJs, tags: $tags, title: $title) {
+              responseResult {
+                succeeded
+                errorCode
+                slug
+                message
+                __typename
+              }
+              page {
+                id
+                updatedAt
+                __typename
+              }
+              __typename
+            }
+            __typename
+          }
+        }
+      '';
+      variables = {
+        content = "# Header\n\nHello world!";
+        description = "";
+        editor = "markdown";
+        isPrivate = false;
+        isPublished = true;
+        locale = "en";
+        path = "home";
+        publishEndDate = "";
+        publishStartDate = "";
+        scriptCss = "";
+        scriptJs = "";
+        tags = [];
+        title = "Hello world";
+      };
+    }]);
+  in ''
+    machine.start()
+    machine.wait_for_unit("multi-user.target")
+    machine.wait_for_open_port(3000)
+
+    machine.succeed("curl -sSf localhost:3000")
+
+    with subtest("Setup"):
+        result = machine.succeed(
+            "curl -sSf localhost:3000/finalize -X POST -d "
+            + "@${payloads.finalize} -H 'Content-Type: application/json' "
+            + "| jq .ok | xargs echo"
+        )
+        assert result.strip() == "true", f"Expected true, got {result}"
+
+        # During the setup the service gets restarted, so we use this
+        # to check if the setup is done.
+        machine.wait_until_fails("curl -sSf localhost:3000")
+        machine.wait_until_succeeds("curl -sSf localhost:3000")
+
+    with subtest("Base functionality"):
+        auth = machine.succeed(
+            "curl -sSf localhost:3000/graphql -X POST "
+            + "-d @${payloads.login} -H 'Content-Type: application/json' "
+            + "| jq '.[0].data.authentication.login.jwt' | xargs echo"
+        ).strip()
+
+        assert auth
+
+        create = machine.succeed(
+            "curl -sSf localhost:3000/graphql -X POST "
+            + "-d @${payloads.content} -H 'Content-Type: application/json' "
+            + f"-H 'Authorization: Bearer {auth}' "
+            + "| jq '.[0].data.pages.create.responseResult.succeeded'|xargs echo"
+        )
+        assert create.strip() == "true", f"Expected true, got {create}"
+
+    machine.shutdown()
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wine.nix b/nixpkgs/nixos/tests/wine.nix
new file mode 100644
index 000000000000..7cbe7ac94f1e
--- /dev/null
+++ b/nixpkgs/nixos/tests/wine.nix
@@ -0,0 +1,51 @@
+{ system ? builtins.currentSystem
+, pkgs ? import ../.. { inherit system; config = { }; }
+}:
+
+let
+  inherit (pkgs.lib) concatMapStrings listToAttrs optionals optionalString;
+  inherit (import ../lib/testing-python.nix { inherit system pkgs; }) makeTest;
+
+  hello32 = "${pkgs.pkgsCross.mingw32.hello}/bin/hello.exe";
+  hello64 = "${pkgs.pkgsCross.mingwW64.hello}/bin/hello.exe";
+
+  makeWineTest = packageSet: exes: variant: rec {
+    name = "${packageSet}-${variant}";
+    value = makeTest {
+      inherit name;
+      meta = with pkgs.lib.maintainers; { maintainers = [ chkno ]; };
+
+      nodes.machine = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs."${packageSet}"."${variant}" ];
+        virtualisation.diskSize = 800;
+      };
+
+      testScript = ''
+        machine.wait_for_unit("multi-user.target")
+        ${concatMapStrings (exe: ''
+          greeting = machine.succeed(
+              "bash -c 'wine ${exe} 2> >(tee wine-stderr >&2)'"
+          )
+          assert 'Hello, world!' in greeting
+        ''
+        # only the full version contains Gecko, but the error is not printed reliably in other variants
+        + optionalString (variant == "full") ''
+          machine.fail(
+              "fgrep 'Could not find Wine Gecko. HTML rendering will be disabled.' wine-stderr"
+          )
+        '') exes}
+      '';
+    };
+  };
+
+  variants = [ "base" "full" "minimal" "staging" "unstable" "wayland" ];
+
+in
+listToAttrs (
+  map (makeWineTest "winePackages" [ hello32 ]) variants
+  ++ optionals pkgs.stdenv.is64bit
+    (map (makeWineTest "wineWowPackages" [ hello32 hello64 ])
+         # This wayland combination times out after spending many hours.
+         # https://hydra.nixos.org/job/nixos/trunk-combined/nixos.tests.wine.wineWowPackages-wayland.x86_64-linux
+         (pkgs.lib.remove "wayland" variants))
+)
diff --git a/nixpkgs/nixos/tests/wireguard/basic.nix b/nixpkgs/nixos/tests/wireguard/basic.nix
new file mode 100644
index 000000000000..96b0a681c364
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/basic.nix
@@ -0,0 +1,73 @@
+import ../make-test-python.nix ({ pkgs, lib, kernelPackages ? null, ...} :
+  let
+    wg-snakeoil-keys = import ./snakeoil-keys.nix;
+    peer = (import ./make-peer.nix) { inherit lib; };
+  in
+  {
+    name = "wireguard";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ ma27 ];
+    };
+
+    nodes = {
+      peer0 = peer {
+        ip4 = "192.168.0.1";
+        ip6 = "fd00::1";
+        extraConfig = {
+          boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+          networking.firewall.allowedUDPPorts = [ 23542 ];
+          networking.wireguard.interfaces.wg0 = {
+            ips = [ "10.23.42.1/32" "fc00::1/128" ];
+            listenPort = 23542;
+
+            inherit (wg-snakeoil-keys.peer0) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+              inherit (wg-snakeoil-keys.peer1) publicKey;
+            };
+          };
+        };
+      };
+
+      peer1 = peer {
+        ip4 = "192.168.0.2";
+        ip6 = "fd00::2";
+        extraConfig = {
+          boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+          networking.wireguard.interfaces.wg0 = {
+            ips = [ "10.23.42.2/32" "fc00::2/128" ];
+            listenPort = 23542;
+            allowedIPsAsRoutes = false;
+
+            inherit (wg-snakeoil-keys.peer1) privateKey;
+
+            peers = lib.singleton {
+              allowedIPs = [ "0.0.0.0/0" "::/0" ];
+              endpoint = "192.168.0.1:23542";
+              persistentKeepalive = 25;
+
+              inherit (wg-snakeoil-keys.peer0) publicKey;
+            };
+
+            postSetup = let inherit (pkgs) iproute2; in ''
+              ${iproute2}/bin/ip route replace 10.23.42.1/32 dev wg0
+              ${iproute2}/bin/ip route replace fc00::1/128 dev wg0
+            '';
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      peer0.wait_for_unit("wireguard-wg0.service")
+      peer1.wait_for_unit("wireguard-wg0.service")
+
+      peer1.succeed("ping -c5 fc00::1")
+      peer1.succeed("ping -c5 10.23.42.1")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/wireguard/default.nix b/nixpkgs/nixos/tests/wireguard/default.nix
new file mode 100644
index 000000000000..c30f1b74770b
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/default.nix
@@ -0,0 +1,28 @@
+{ system ? builtins.currentSystem
+, config ? { }
+, pkgs ? import ../../.. { inherit system config; }
+, kernelVersionsToTest ? [ "5.4" "latest" ]
+}:
+
+with pkgs.lib;
+
+let
+  tests = let callTest = p: args: import p ({ inherit system pkgs; } // args); in {
+    basic = callTest ./basic.nix;
+    namespaces = callTest ./namespaces.nix;
+    wg-quick = callTest ./wg-quick.nix;
+    wg-quick-nftables = args: callTest ./wg-quick.nix ({ nftables = true; } // args);
+    generated = callTest ./generated.nix;
+  };
+in
+
+listToAttrs (
+  flip concatMap kernelVersionsToTest (version:
+    let
+      v' = replaceStrings [ "." ] [ "_" ] version;
+    in
+    flip mapAttrsToList tests (name: test:
+      nameValuePair "wireguard-${name}-linux-${v'}" (test { kernelPackages = pkgs."linuxPackages_${v'}"; })
+    )
+  )
+)
diff --git a/nixpkgs/nixos/tests/wireguard/generated.nix b/nixpkgs/nixos/tests/wireguard/generated.nix
new file mode 100644
index 000000000000..c58f7a75071e
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/generated.nix
@@ -0,0 +1,63 @@
+import ../make-test-python.nix ({ pkgs, lib, kernelPackages ? null, ... } : {
+  name = "wireguard-generated";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ma27 grahamc ];
+  };
+
+  nodes = {
+    peer1 = {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.firewall.allowedUDPPorts = [ 12345 ];
+      networking.wireguard.interfaces.wg0 = {
+        ips = [ "10.10.10.1/24" ];
+        listenPort = 12345;
+        privateKeyFile = "/etc/wireguard/private";
+        generatePrivateKeyFile = true;
+
+      };
+    };
+
+    peer2 = {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.firewall.allowedUDPPorts = [ 12345 ];
+      networking.wireguard.interfaces.wg0 = {
+        ips = [ "10.10.10.2/24" ];
+        listenPort = 12345;
+        privateKeyFile = "/etc/wireguard/private";
+        generatePrivateKeyFile = true;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    peer1.wait_for_unit("wireguard-wg0.service")
+    peer2.wait_for_unit("wireguard-wg0.service")
+
+    retcode, peer1pubkey = peer1.execute("wg pubkey < /etc/wireguard/private")
+    if retcode != 0:
+        raise Exception("Could not read public key from peer1")
+
+    retcode, peer2pubkey = peer2.execute("wg pubkey < /etc/wireguard/private")
+    if retcode != 0:
+        raise Exception("Could not read public key from peer2")
+
+    peer1.succeed(
+        "wg set wg0 peer {} allowed-ips 10.10.10.2/32 endpoint 192.168.1.2:12345 persistent-keepalive 1".format(
+            peer2pubkey.strip()
+        )
+    )
+    peer1.succeed("ip route replace 10.10.10.2/32 dev wg0 table main")
+
+    peer2.succeed(
+        "wg set wg0 peer {} allowed-ips 10.10.10.1/32 endpoint 192.168.1.1:12345 persistent-keepalive 1".format(
+            peer1pubkey.strip()
+        )
+    )
+    peer2.succeed("ip route replace 10.10.10.1/32 dev wg0 table main")
+
+    peer1.succeed("ping -c1 10.10.10.2")
+    peer2.succeed("ping -c1 10.10.10.1")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wireguard/make-peer.nix b/nixpkgs/nixos/tests/wireguard/make-peer.nix
new file mode 100644
index 000000000000..d2740549738b
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/make-peer.nix
@@ -0,0 +1,23 @@
+{ lib, ... }: { ip4, ip6, extraConfig }:
+lib.mkMerge [
+  {
+    boot.kernel.sysctl = {
+      "net.ipv6.conf.all.forwarding" = "1";
+      "net.ipv6.conf.default.forwarding" = "1";
+      "net.ipv4.ip_forward" = "1";
+    };
+
+    networking.useDHCP = false;
+    networking.interfaces.eth1 = {
+      ipv4.addresses = [{
+        address = ip4;
+        prefixLength = 24;
+      }];
+      ipv6.addresses = [{
+        address = ip6;
+        prefixLength = 64;
+      }];
+    };
+  }
+  extraConfig
+]
diff --git a/nixpkgs/nixos/tests/wireguard/namespaces.nix b/nixpkgs/nixos/tests/wireguard/namespaces.nix
new file mode 100644
index 000000000000..d0eb009e1107
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/namespaces.nix
@@ -0,0 +1,83 @@
+let
+  listenPort = 12345;
+  socketNamespace = "foo";
+  interfaceNamespace = "bar";
+  node = {
+    networking.wireguard.interfaces.wg0 = {
+      listenPort = listenPort;
+      ips = [ "10.10.10.1/24" ];
+      privateKeyFile = "/etc/wireguard/private";
+      generatePrivateKeyFile = true;
+    };
+  };
+
+in
+
+import ../make-test-python.nix ({ pkgs, lib, kernelPackages ? null, ... } : {
+  name = "wireguard-with-namespaces";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ asymmetric ];
+  };
+
+  nodes = {
+    # interface should be created in the socketNamespace
+    # and not moved from there
+    peer0 = pkgs.lib.attrsets.recursiveUpdate node {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+        '';
+        inherit socketNamespace;
+      };
+    };
+    # interface should be created in the init namespace
+    # and moved to the interfaceNamespace
+    peer1 = pkgs.lib.attrsets.recursiveUpdate node {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${interfaceNamespace}
+        '';
+        mtu = 1280;
+        inherit interfaceNamespace;
+      };
+    };
+    # interface should be created in the socketNamespace
+    # and moved to the interfaceNamespace
+    peer2 = pkgs.lib.attrsets.recursiveUpdate node {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+          ip netns add ${interfaceNamespace}
+        '';
+        inherit socketNamespace interfaceNamespace;
+      };
+    };
+    # interface should be created in the socketNamespace
+    # and moved to the init namespace
+    peer3 = pkgs.lib.attrsets.recursiveUpdate node {
+      boot = lib.mkIf (kernelPackages != null) { inherit kernelPackages; };
+      networking.wireguard.interfaces.wg0 = {
+        preSetup = ''
+          ip netns add ${socketNamespace}
+        '';
+        inherit socketNamespace;
+        interfaceNamespace = "init";
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    for machine in peer0, peer1, peer2, peer3:
+        machine.wait_for_unit("wireguard-wg0.service")
+
+    peer0.succeed("ip -n ${socketNamespace} link show wg0")
+    peer1.succeed("ip -n ${interfaceNamespace} link show wg0")
+    peer2.succeed("ip -n ${interfaceNamespace} link show wg0")
+    peer3.succeed("ip link show wg0")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix b/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix
new file mode 100644
index 000000000000..c979f0e0c8a9
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/snakeoil-keys.nix
@@ -0,0 +1,12 @@
+{
+  peer0 = {
+    privateKey = "OPuVRS2T0/AtHDp3PXkNuLQYDiqJaBEEnYe42BSnJnQ=";
+    publicKey = "IujkG119YPr2cVQzJkSLYCdjpHIDjvr/qH1w1tdKswY=";
+  };
+
+  peer1 = {
+    privateKey = "uO8JVo/sanx2DOM0L9GUEtzKZ82RGkRnYgpaYc7iXmg=";
+    # readFile'd keys may have trailing newlines, emulate this
+    publicKey = "Ks9yRJIi/0vYgRmn14mIOQRwkcUGBujYINbMpik2SBI=\n";
+  };
+}
diff --git a/nixpkgs/nixos/tests/wireguard/wg-quick.nix b/nixpkgs/nixos/tests/wireguard/wg-quick.nix
new file mode 100644
index 000000000000..ec2b8d7f2d9d
--- /dev/null
+++ b/nixpkgs/nixos/tests/wireguard/wg-quick.nix
@@ -0,0 +1,80 @@
+import ../make-test-python.nix ({ pkgs, lib, kernelPackages ? null, nftables ? false, ... }:
+  let
+    wg-snakeoil-keys = import ./snakeoil-keys.nix;
+    peer = import ./make-peer.nix { inherit lib; };
+    commonConfig = {
+      boot.kernelPackages = lib.mkIf (kernelPackages != null) kernelPackages;
+      networking.nftables.enable = nftables;
+      # Make sure iptables doesn't work with nftables enabled
+      boot.blacklistedKernelModules = lib.mkIf nftables [ "nft_compat" ];
+    };
+  in
+  {
+    name = "wg-quick";
+    meta = with pkgs.lib.maintainers; {
+      maintainers = [ d-xo ];
+    };
+
+    nodes = {
+      peer0 = peer {
+        ip4 = "192.168.0.1";
+        ip6 = "fd00::1";
+        extraConfig = lib.mkMerge [
+          commonConfig
+          {
+            networking.firewall.allowedUDPPorts = [ 23542 ];
+            networking.wg-quick.interfaces.wg0 = {
+              address = [ "10.23.42.1/32" "fc00::1/128" ];
+              listenPort = 23542;
+
+              inherit (wg-snakeoil-keys.peer0) privateKey;
+
+              peers = lib.singleton {
+                allowedIPs = [ "10.23.42.2/32" "fc00::2/128" ];
+
+                inherit (wg-snakeoil-keys.peer1) publicKey;
+              };
+
+              dns = [ "10.23.42.2" "fc00::2" "wg0" ];
+            };
+          }
+        ];
+      };
+
+      peer1 = peer {
+        ip4 = "192.168.0.2";
+        ip6 = "fd00::2";
+        extraConfig = lib.mkMerge [
+          commonConfig
+          {
+            networking.useNetworkd = true;
+            networking.wg-quick.interfaces.wg0 = {
+              address = [ "10.23.42.2/32" "fc00::2/128" ];
+              inherit (wg-snakeoil-keys.peer1) privateKey;
+
+              peers = lib.singleton {
+                allowedIPs = [ "0.0.0.0/0" "::/0" ];
+                endpoint = "192.168.0.1:23542";
+                persistentKeepalive = 25;
+
+                inherit (wg-snakeoil-keys.peer0) publicKey;
+              };
+
+              dns = [ "10.23.42.1" "fc00::1" "wg0" ];
+            };
+          }
+        ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      peer0.wait_for_unit("wg-quick-wg0.service")
+      peer1.wait_for_unit("wg-quick-wg0.service")
+
+      peer1.succeed("ping -c5 fc00::1")
+      peer1.succeed("ping -c5 10.23.42.1")
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/without-nix.nix b/nixpkgs/nixos/tests/without-nix.nix
new file mode 100644
index 000000000000..b21e9f2844f5
--- /dev/null
+++ b/nixpkgs/nixos/tests/without-nix.nix
@@ -0,0 +1,32 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "without-nix";
+  meta = with lib.maintainers; {
+    maintainers = [ ericson2314 ];
+  };
+
+  nodes.machine = { ... }: {
+    nix.enable = false;
+    nixpkgs.overlays = [
+      (self: super: {
+        nix = throw "don't want to use pkgs.nix";
+        nixVersions = lib.mapAttrs (k: throw "don't want to use pkgs.nixVersions.${k}") super.nixVersions;
+        # aliases, some deprecated
+        nix_2_3 = throw "don't want to use pkgs.nix_2_3";
+        nix_2_4 = throw "don't want to use pkgs.nix_2_4";
+        nix_2_5 = throw "don't want to use pkgs.nix_2_5";
+        nix_2_6 = throw "don't want to use pkgs.nix_2_6";
+        nixFlakes = throw "don't want to use pkgs.nixFlakes";
+        nixStable = throw "don't want to use pkgs.nixStable";
+        nixUnstable = throw "don't want to use pkgs.nixUnstable";
+        nixStatic = throw "don't want to use pkgs.nixStatic";
+      })
+    ];
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.succeed("which which")
+    machine.fail("which nix")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wmderland.nix b/nixpkgs/nixos/tests/wmderland.nix
new file mode 100644
index 000000000000..ebfd443763e1
--- /dev/null
+++ b/nixpkgs/nixos/tests/wmderland.nix
@@ -0,0 +1,54 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "wmderland";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ takagiy ];
+  };
+
+  nodes.machine = { lib, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = lib.mkForce "none+wmderland";
+    services.xserver.windowManager.wmderland.enable = true;
+
+    systemd.services.setupWmderlandConfig = {
+      wantedBy = [ "multi-user.target" ];
+      before = [ "multi-user.target" ];
+      environment = {
+        HOME = "/home/alice";
+      };
+      unitConfig = {
+        type = "oneshot";
+        RemainAfterExit = true;
+        user = "alice";
+      };
+      script = let
+        config = pkgs.writeText "config" ''
+             set $Mod = Mod1
+             bindsym $Mod+Return exec ${pkgs.xterm}/bin/xterm -cm -pc
+        '';
+      in ''
+        mkdir -p $HOME/.config/wmderland
+        cp ${config} $HOME/.config/wmderland/config
+      '';
+    };
+  };
+
+  testScript = { ... }: ''
+    with subtest("ensure x starts"):
+        machine.wait_for_x()
+        machine.wait_for_file("/home/alice/.Xauthority")
+        machine.succeed("xauth merge ~alice/.Xauthority")
+
+    with subtest("ensure we can open a new terminal"):
+        machine.send_key("alt-ret")
+        machine.wait_until_succeeds("pgrep xterm")
+        machine.wait_for_window(r"alice.*?machine")
+        machine.screenshot("terminal")
+
+    with subtest("ensure we can communicate through ipc with wmderlandc"):
+        # Kills the previously open xterm
+        machine.succeed("pgrep xterm")
+        machine.execute("DISPLAY=:0 wmderlandc kill")
+        machine.fail("pgrep xterm")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wordpress.nix b/nixpkgs/nixos/tests/wordpress.nix
new file mode 100644
index 000000000000..592af9a094f1
--- /dev/null
+++ b/nixpkgs/nixos/tests/wordpress.nix
@@ -0,0 +1,101 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }:
+
+rec {
+  name = "wordpress";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [
+      flokli
+      grahamc # under duress!
+      mmilata
+    ];
+  };
+
+  nodes = lib.foldl (a: version: let
+    package = pkgs."wordpress${version}";
+  in a // {
+    "wp${version}_httpd" = _: {
+      services.httpd.adminAddr = "webmaster@site.local";
+      services.httpd.logPerVirtualHost = true;
+
+      services.wordpress.webserver = "httpd";
+      services.wordpress.sites = {
+        "site1.local" = {
+          database.tablePrefix = "site1_";
+          inherit package;
+        };
+        "site2.local" = {
+          database.tablePrefix = "site2_";
+          inherit package;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
+
+    "wp${version}_nginx" = _: {
+      services.wordpress.webserver = "nginx";
+      services.wordpress.sites = {
+        "site1.local" = {
+          database.tablePrefix = "site1_";
+          inherit package;
+        };
+        "site2.local" = {
+          database.tablePrefix = "site2_";
+          inherit package;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
+
+    "wp${version}_caddy" = _: {
+      services.wordpress.webserver = "caddy";
+      services.wordpress.sites = {
+        "site1.local" = {
+          database.tablePrefix = "site1_";
+          inherit package;
+        };
+        "site2.local" = {
+          database.tablePrefix = "site2_";
+          inherit package;
+        };
+      };
+
+      networking.firewall.allowedTCPPorts = [ 80 ];
+      networking.hosts."127.0.0.1" = [ "site1.local" "site2.local" ];
+    };
+  }) {} [
+    "6_3" "6_4"
+  ];
+
+  testScript = ''
+    import re
+
+    start_all()
+
+    ${lib.concatStrings (lib.mapAttrsToList (name: value: ''
+      ${name}.wait_for_unit("${(value null).services.wordpress.webserver}")
+    '') nodes)}
+
+    site_names = ["site1.local", "site2.local"]
+
+    for machine in (${lib.concatStringsSep ", " (builtins.attrNames nodes)}):
+        for site_name in site_names:
+            machine.wait_for_unit(f"phpfpm-wordpress-{site_name}")
+
+            with subtest("website returns welcome screen"):
+                assert "Welcome to the famous" in machine.succeed(f"curl -L {site_name}")
+
+            with subtest("wordpress-init went through"):
+                info = machine.get_unit_info(f"wordpress-init-{site_name}")
+                assert info["Result"] == "success"
+
+            with subtest("secret keys are set"):
+                pattern = re.compile(r"^define.*NONCE_SALT.{64,};$", re.MULTILINE)
+                assert pattern.search(
+                    machine.succeed(f"cat /var/lib/wordpress/{site_name}/secret-keys.php")
+                )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/wpa_supplicant.nix b/nixpkgs/nixos/tests/wpa_supplicant.nix
new file mode 100644
index 000000000000..8c701ca7d5f7
--- /dev/null
+++ b/nixpkgs/nixos/tests/wpa_supplicant.nix
@@ -0,0 +1,210 @@
+import ./make-test-python.nix ({ pkgs, lib, ...}:
+{
+  name = "wpa_supplicant";
+  meta = with lib.maintainers; {
+    maintainers = [ oddlama rnhmjoj ];
+  };
+
+  nodes = let
+    machineWithHostapd = extraConfigModule: { ... }: {
+      imports = [
+        ../modules/profiles/minimal.nix
+        extraConfigModule
+      ];
+
+      # add a virtual wlan interface
+      boot.kernelModules = [ "mac80211_hwsim" ];
+
+      # wireless access point
+      services.hostapd = {
+        enable = true;
+        radios.wlan0 = {
+          band = "2g";
+          countryCode = "US";
+          networks = {
+            wlan0 = {
+              ssid = "nixos-test-sae";
+              authentication = {
+                mode = "wpa3-sae";
+                saePasswords = [ { password = "reproducibility"; } ];
+              };
+              bssid = "02:00:00:00:00:00";
+            };
+            wlan0-1 = {
+              ssid = "nixos-test-mixed";
+              authentication = {
+                mode = "wpa3-sae-transition";
+                saeAddToMacAllow = true;
+                saePasswordsFile = pkgs.writeText "password" "reproducibility";
+                wpaPasswordFile = pkgs.writeText "password" "reproducibility";
+              };
+              bssid = "02:00:00:00:00:01";
+            };
+            wlan0-2 = {
+              ssid = "nixos-test-wpa2";
+              authentication = {
+                mode = "wpa2-sha256";
+                wpaPassword = "reproducibility";
+              };
+              bssid = "02:00:00:00:00:02";
+            };
+          };
+        };
+      };
+
+      # wireless client
+      networking.wireless = {
+        # the override is needed because the wifi is
+        # disabled with mkVMOverride in qemu-vm.nix.
+        enable = lib.mkOverride 0 true;
+        userControlled.enable = true;
+        interfaces = [ "wlan1" ];
+        fallbackToWPA2 = lib.mkDefault true;
+
+        # networks will be added on-demand below for the specific
+        # network that should be tested
+
+        # secrets
+        environmentFile = pkgs.writeText "wpa-secrets" ''
+          PSK_NIXOS_TEST="reproducibility"
+        '';
+      };
+    };
+  in {
+    basic = { ... }: {
+      imports = [ ../modules/profiles/minimal.nix ];
+
+      # add a virtual wlan interface
+      boot.kernelModules = [ "mac80211_hwsim" ];
+
+      # wireless client
+      networking.wireless = {
+        # the override is needed because the wifi is
+        # disabled with mkVMOverride in qemu-vm.nix.
+        enable = lib.mkOverride 0 true;
+        userControlled.enable = true;
+        interfaces = [ "wlan1" ];
+        fallbackToWPA2 = true;
+
+        networks = {
+          # test WPA2 fallback
+          mixed-wpa = {
+            psk = "password";
+            authProtocols = [ "WPA-PSK" "SAE" ];
+          };
+          sae-only = {
+            psk = "password";
+            authProtocols = [ "SAE" ];
+          };
+
+          # secrets substitution test cases
+          test1.psk = "@PSK_VALID@";              # should be replaced
+          test2.psk = "@PSK_SPECIAL@";            # should be replaced
+          test3.psk = "@PSK_MISSING@";            # should not be replaced
+          test4.psk = "P@ssowrdWithSome@tSymbol"; # should not be replaced
+        };
+
+        # secrets
+        environmentFile = pkgs.writeText "wpa-secrets" ''
+          PSK_VALID="S0m3BadP4ssw0rd";
+          # taken from https://github.com/minimaxir/big-list-of-naughty-strings
+          PSK_SPECIAL=",./;'[]\-= <>?:\"{}|_+ !@#$%^\&*()`~";
+        '';
+      };
+    };
+
+    # Test connecting to the SAE-only hotspot using SAE
+    machineSae = machineWithHostapd {
+      networking.wireless = {
+        fallbackToWPA2 = false;
+        networks.nixos-test-sae = {
+          psk = "@PSK_NIXOS_TEST@";
+          authProtocols = [ "SAE" ];
+        };
+      };
+    };
+
+    # Test connecting to the SAE and WPA2 mixed hotspot using SAE
+    machineMixedUsingSae = machineWithHostapd {
+      networking.wireless = {
+        fallbackToWPA2 = false;
+        networks.nixos-test-mixed = {
+          psk = "@PSK_NIXOS_TEST@";
+          authProtocols = [ "SAE" ];
+        };
+      };
+    };
+
+    # Test connecting to the SAE and WPA2 mixed hotspot using WPA2
+    machineMixedUsingWpa2 = machineWithHostapd {
+      networking.wireless = {
+        fallbackToWPA2 = true;
+        networks.nixos-test-mixed = {
+          psk = "@PSK_NIXOS_TEST@";
+          authProtocols = [ "WPA-PSK-SHA256" ];
+        };
+      };
+    };
+
+    # Test connecting to the WPA2 legacy hotspot using WPA2
+    machineWpa2 = machineWithHostapd {
+      networking.wireless = {
+        fallbackToWPA2 = true;
+        networks.nixos-test-wpa2 = {
+          psk = "@PSK_NIXOS_TEST@";
+          authProtocols = [ "WPA-PSK-SHA256" ];
+        };
+      };
+    };
+  };
+
+  testScript =
+    ''
+      config_file = "/run/wpa_supplicant/wpa_supplicant.conf"
+
+      with subtest("Configuration file is inaccessible to other users"):
+          basic.wait_for_file(config_file)
+          basic.fail(f"sudo -u nobody ls {config_file}")
+
+      with subtest("Secrets variables have been substituted"):
+          basic.fail(f"grep -q @PSK_VALID@ {config_file}")
+          basic.fail(f"grep -q @PSK_SPECIAL@ {config_file}")
+          basic.succeed(f"grep -q @PSK_MISSING@ {config_file}")
+          basic.succeed(f"grep -q P@ssowrdWithSome@tSymbol {config_file}")
+
+      with subtest("WPA2 fallbacks have been generated"):
+          assert int(basic.succeed(f"grep -c sae-only {config_file}")) == 1
+          assert int(basic.succeed(f"grep -c mixed-wpa {config_file}")) == 2
+
+      # save file for manual inspection
+      basic.copy_from_vm(config_file)
+
+      with subtest("Daemon is running and accepting connections"):
+          basic.wait_for_unit("wpa_supplicant-wlan1.service")
+          status = basic.succeed("wpa_cli -i wlan1 status")
+          assert "Failed to connect" not in status, \
+                 "Failed to connect to the daemon"
+
+      machineSae.wait_for_unit("hostapd.service")
+      machineSae.copy_from_vm("/run/hostapd/wlan0.hostapd.conf")
+      with subtest("Daemon can connect to the SAE access point using SAE"):
+          machineSae.wait_until_succeeds(
+            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
+          )
+
+      with subtest("Daemon can connect to the SAE and WPA2 mixed access point using SAE"):
+          machineMixedUsingSae.wait_until_succeeds(
+            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
+          )
+
+      with subtest("Daemon can connect to the SAE and WPA2 mixed access point using WPA2"):
+          machineMixedUsingWpa2.wait_until_succeeds(
+            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
+          )
+
+      with subtest("Daemon can connect to the WPA2 access point using WPA2"):
+          machineWpa2.wait_until_succeeds(
+            "wpa_cli -i wlan1 status | grep -q wpa_state=COMPLETED"
+          )
+    '';
+})
diff --git a/nixpkgs/nixos/tests/wrappers.nix b/nixpkgs/nixos/tests/wrappers.nix
new file mode 100644
index 000000000000..1d4fa85d7399
--- /dev/null
+++ b/nixpkgs/nixos/tests/wrappers.nix
@@ -0,0 +1,112 @@
+import ./make-test-python.nix ({ pkgs, ... }:
+let
+  userUid = 1000;
+  usersGid = 100;
+  busybox = pkgs : pkgs.busybox.override {
+    # Without this, the busybox binary drops euid to ruid for most applets, including id.
+    # See https://bugs.busybox.net/show_bug.cgi?id=15101
+    extraConfig = "CONFIG_FEATURE_SUID n";
+  };
+in
+{
+  name = "wrappers";
+
+  nodes.machine = { config, pkgs, ... }: {
+    ids.gids.users = usersGid;
+
+    users.users = {
+      regular = {
+        uid = userUid;
+        isNormalUser = true;
+      };
+    };
+
+    security.apparmor.enable = true;
+
+    security.wrappers = {
+      suidRoot = {
+        owner = "root";
+        group = "root";
+        setuid = true;
+        source = "${busybox pkgs}/bin/busybox";
+        program = "suid_root_busybox";
+      };
+      sgidRoot = {
+        owner = "root";
+        group = "root";
+        setgid = true;
+        source = "${busybox pkgs}/bin/busybox";
+        program = "sgid_root_busybox";
+      };
+      withChown = {
+        owner = "root";
+        group = "root";
+        source = "${pkgs.libcap}/bin/capsh";
+        program = "capsh_with_chown";
+        capabilities = "cap_chown+ep";
+      };
+    };
+  };
+
+  testScript =
+    ''
+      def cmd_as_regular(cmd):
+        return "su -l regular -c '{0}'".format(cmd)
+
+      def test_as_regular(cmd, expected):
+        out = machine.succeed(cmd_as_regular(cmd)).strip()
+        assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out)
+
+      def test_as_regular_in_userns_mapped_as_root(cmd, expected):
+        out = machine.succeed(f"su -l regular -c '${pkgs.util-linux}/bin/unshare -rm {cmd}'").strip()
+        assert out == expected, "Expected {0} to output {1}, but got {2}".format(cmd, expected, out)
+
+      test_as_regular('${busybox pkgs}/bin/busybox id -u', '${toString userUid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -ru', '${toString userUid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -g', '${toString usersGid}')
+      test_as_regular('${busybox pkgs}/bin/busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -u', '0')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('/run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -g', '0')
+      test_as_regular('/run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -u', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -ru', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -g', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/suid_root_busybox id -rg', '0')
+
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -u', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -ru', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -g', '0')
+      test_as_regular_in_userns_mapped_as_root('/run/wrappers/bin/sgid_root_busybox id -rg', '0')
+
+      # Test that in nonewprivs environment the wrappers simply exec their target.
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/suid_root_busybox id -rg', '${toString usersGid}')
+
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -u', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -ru', '${toString userUid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -g', '${toString usersGid}')
+      test_as_regular('${pkgs.util-linux}/bin/setpriv --no-new-privs /run/wrappers/bin/sgid_root_busybox id -rg', '${toString usersGid}')
+
+      # We are only testing the permitted set, because it's easiest to look at with capsh.
+      machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_CHOWN'))
+      machine.fail(cmd_as_regular('${pkgs.libcap}/bin/capsh --has-p=CAP_SYS_ADMIN'))
+      machine.succeed(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_CHOWN'))
+      machine.fail(cmd_as_regular('/run/wrappers/bin/capsh_with_chown --has-p=CAP_SYS_ADMIN'))
+
+      # Test that the only user of apparmor policy includes generated by
+      # wrappers works. Ideally this'd be located in a test for the module that
+      # actually makes the apparmor policy for ping, but there's no convenient
+      # test for that one.
+      machine.succeed("ping -c 1 127.0.0.1")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/xandikos.nix b/nixpkgs/nixos/tests/xandikos.nix
new file mode 100644
index 000000000000..69d78ee21e76
--- /dev/null
+++ b/nixpkgs/nixos/tests/xandikos.nix
@@ -0,0 +1,70 @@
+import ./make-test-python.nix (
+  { pkgs, lib, ... }:
+
+    {
+      name = "xandikos";
+
+      meta.maintainers = with lib.maintainers; [ _0x4A6F ];
+
+      nodes = {
+        xandikos_client = {};
+        xandikos_default = {
+          networking.firewall.allowedTCPPorts = [ 8080 ];
+          services.xandikos.enable = true;
+        };
+        xandikos_proxy = {
+          networking.firewall.allowedTCPPorts = [ 80 8080 ];
+          services.xandikos.enable = true;
+          services.xandikos.address = "localhost";
+          services.xandikos.port = 8080;
+          services.xandikos.routePrefix = "/xandikos-prefix/";
+          services.xandikos.extraOptions = [
+            "--defaults"
+          ];
+          services.nginx = {
+            enable = true;
+            recommendedProxySettings = true;
+            virtualHosts."xandikos" = {
+              serverName = "xandikos.local";
+              basicAuth.xandikos = "snakeOilPassword";
+              locations."/xandikos/" = {
+                proxyPass = "http://localhost:8080/xandikos-prefix/";
+              };
+            };
+          };
+        };
+      };
+
+      testScript = ''
+        start_all()
+
+        with subtest("Xandikos default"):
+            xandikos_default.wait_for_unit("multi-user.target")
+            xandikos_default.wait_for_unit("xandikos.service")
+            xandikos_default.wait_for_open_port(8080)
+            xandikos_default.succeed("curl --fail http://localhost:8080/")
+            xandikos_default.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -i Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_default:8080/")
+
+        with subtest("Xandikos proxy"):
+            xandikos_proxy.wait_for_unit("multi-user.target")
+            xandikos_proxy.wait_for_unit("xandikos.service")
+            xandikos_proxy.wait_for_open_port(8080)
+            xandikos_proxy.succeed("curl --fail http://localhost:8080/")
+            xandikos_proxy.succeed(
+                "curl -s --fail --location http://localhost:8080/ | grep -i Xandikos"
+            )
+            xandikos_client.wait_for_unit("network.target")
+            xandikos_client.fail("curl --fail http://xandikos_proxy:8080/")
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/ | grep -i Xandikos"
+            )
+            xandikos_client.succeed(
+                "curl -s --fail -u xandikos:snakeOilPassword -H 'Host: xandikos.local' http://xandikos_proxy/xandikos/user/ | grep -i Xandikos"
+            )
+      '';
+    }
+)
diff --git a/nixpkgs/nixos/tests/xautolock.nix b/nixpkgs/nixos/tests/xautolock.nix
new file mode 100644
index 000000000000..cf81c4a1cf05
--- /dev/null
+++ b/nixpkgs/nixos/tests/xautolock.nix
@@ -0,0 +1,22 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+{
+  name = "xautolock";
+  meta.maintainers = [ ];
+
+  nodes.machine = {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    test-support.displayManager.auto.user = "bob";
+    services.xserver.xautolock.enable = true;
+    services.xserver.xautolock.time = 1;
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_x()
+    machine.fail("pgrep xlock")
+    machine.sleep(120)
+    machine.succeed("pgrep xlock")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xfce.nix b/nixpkgs/nixos/tests/xfce.nix
new file mode 100644
index 000000000000..9620e9188cbf
--- /dev/null
+++ b/nixpkgs/nixos/tests/xfce.nix
@@ -0,0 +1,75 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xfce";
+
+  nodes.machine =
+    { pkgs, ... }:
+
+    {
+      imports = [
+        ./common/user-account.nix
+      ];
+
+      services.xserver.enable = true;
+
+      services.xserver.displayManager = {
+        lightdm.enable = true;
+        autoLogin = {
+          enable = true;
+          user = "alice";
+        };
+      };
+
+      services.xserver.desktopManager.xfce.enable = true;
+      environment.systemPackages = [ pkgs.xfce.xfce4-whiskermenu-plugin ];
+
+      hardware.pulseaudio.enable = true; # needed for the factl test, /dev/snd/* exists without them but udev doesn't care then
+
+    };
+
+  enableOCR = true;
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.users.users.alice;
+    bus = "DBUS_SESSION_BUS_ADDRESS=unix:path=/run/user/${toString user.uid}/bus";
+  in ''
+      with subtest("Wait for login"):
+        machine.wait_for_x()
+        machine.wait_for_file("${user.home}/.Xauthority")
+        machine.succeed("xauth merge ${user.home}/.Xauthority")
+
+      with subtest("Check that logging in has given the user ownership of devices"):
+        machine.succeed("getfacl -p /dev/snd/timer | grep -q ${user.name}")
+
+      with subtest("Check if Xfce components actually start"):
+        machine.wait_for_window("xfce4-panel")
+        machine.wait_for_window("Desktop")
+        for i in ["xfwm4", "xfsettingsd", "xfdesktop", "xfce4-screensaver", "xfce4-notifyd", "xfconfd"]:
+          machine.wait_until_succeeds(f"pgrep -f {i}")
+
+      with subtest("Open whiskermenu"):
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 ${bus} xfconf-query -c xfce4-panel -p /plugins/plugin-1 -t string -s whiskermenu -n >&2 &'")
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 ${bus} xfconf-query -c xfce4-panel -p /plugins/plugin-1/stay-on-focus-out -t bool -s true -n >&2 &'")
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 ${bus} xfce4-panel -r >&2 &'")
+        machine.wait_until_succeeds("journalctl -b --grep 'xfce4-panel: Restarting' -t xsession")
+        machine.sleep(5)
+        machine.wait_until_succeeds("pgrep -f libwhiskermenu")
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 ${bus} xfce4-popup-whiskermenu >&2 &'")
+        machine.wait_for_text('Mail Reader')
+        # Close the menu.
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 ${bus} xfce4-popup-whiskermenu >&2 &'")
+
+      with subtest("Open Xfce terminal"):
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 xfce4-terminal >&2 &'")
+        machine.wait_for_window("Terminal")
+
+      with subtest("Open Thunar"):
+        machine.succeed("su - ${user.name} -c 'DISPLAY=:0 thunar >&2 &'")
+        machine.wait_for_window("Thunar")
+        machine.wait_for_text('(Pictures|Public|Templates|Videos)')
+
+      with subtest("Check if any coredumps are found"):
+        machine.succeed("(coredumpctl --json=short 2>&1 || true) | grep 'No coredumps found'")
+        machine.sleep(10)
+        machine.screenshot("screen")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/xmonad-xdg-autostart.nix b/nixpkgs/nixos/tests/xmonad-xdg-autostart.nix
new file mode 100644
index 000000000000..2577a9ce2ea1
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmonad-xdg-autostart.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ lib, ... }: {
+  name = "xmonad-xdg-autostart";
+  meta.maintainers = with lib.maintainers; [ oxalica ];
+
+  nodes.machine = { pkgs, config, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = "none+xmonad";
+    services.xserver.windowManager.xmonad.enable = true;
+    services.xserver.desktopManager.runXdgAutostartIfNone = true;
+
+    environment.systemPackages = [
+      (pkgs.writeTextFile {
+        name = "test-xdg-autostart";
+        destination = "/etc/xdg/autostart/test-xdg-autostart.desktop";
+        text = ''
+          [Desktop Entry]
+          Name=test-xdg-autoatart
+          Type=Application
+          Terminal=false
+          Exec=${pkgs.coreutils}/bin/touch ${config.users.users.alice.home}/xdg-autostart-executed
+        '';
+      })
+    ];
+  };
+
+  testScript = { nodes, ... }:
+    let
+      user = nodes.machine.config.users.users.alice;
+    in
+    ''
+      machine.wait_for_x()
+      machine.wait_for_file("${user.home}/xdg-autostart-executed")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/xmonad.nix b/nixpkgs/nixos/tests/xmonad.nix
new file mode 100644
index 000000000000..ec48c3e11275
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmonad.nix
@@ -0,0 +1,117 @@
+import ./make-test-python.nix ({ pkgs, ...}:
+
+let
+  mkConfig = name: keys: ''
+    import XMonad
+    import XMonad.Operations (restart)
+    import XMonad.Util.EZConfig
+    import XMonad.Util.SessionStart
+    import Control.Monad (when)
+    import Text.Printf (printf)
+    import System.Posix.Process (executeFile)
+    import System.Info (arch,os)
+    import System.Environment (getArgs)
+    import System.FilePath ((</>))
+
+    main = do
+      dirs <- getDirectories
+      launch (def { startupHook = startup } `additionalKeysP` myKeys) dirs
+
+    startup = isSessionStart >>= \sessInit ->
+      spawn "touch /tmp/${name}"
+        >> if sessInit then setSessionStarted else spawn "xterm"
+
+    myKeys = [${builtins.concatStringsSep ", " keys}]
+
+    compiledConfig = printf "xmonad-%s-%s" arch os
+
+    compileRestart resume = do
+      dirs <- asks directories
+
+      whenX (recompile dirs True) $
+        when resume writeStateToFile
+          *> catchIO
+            ( do
+                args <- getArgs
+                executeFile (cacheDir dirs </> compiledConfig) False args Nothing
+            )
+  '';
+
+  oldKeys =
+    [ ''("M-C-x", spawn "xterm")''
+      ''("M-q", restart "xmonad" True)''
+      ''("M-C-q", compileRestart True)''
+      ''("M-C-t", spawn "touch /tmp/somefile")'' # create somefile
+    ];
+
+  newKeys =
+    [ ''("M-C-x", spawn "xterm")''
+      ''("M-q", restart "xmonad" True)''
+      ''("M-C-q", compileRestart True)''
+      ''("M-C-r", spawn "rm /tmp/somefile")'' # delete somefile
+    ];
+
+  newConfig = pkgs.writeText "xmonad.hs" (mkConfig "newXMonad" newKeys);
+in {
+  name = "xmonad";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus ivanbrennan ];
+  };
+
+  nodes.machine = { pkgs, ... }: {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+    test-support.displayManager.auto.user = "alice";
+    services.xserver.displayManager.defaultSession = "none+xmonad";
+    services.xserver.windowManager.xmonad = {
+      enable = true;
+      enableConfiguredRecompile = true;
+      enableContribAndExtras = true;
+      extraPackages = with pkgs.haskellPackages; haskellPackages: [ xmobar ];
+      config = mkConfig "oldXMonad" oldKeys;
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.machine.config.users.users.alice;
+  in ''
+    machine.wait_for_x()
+    machine.wait_for_file("${user.home}/.Xauthority")
+    machine.succeed("xauth merge ${user.home}/.Xauthority")
+    machine.send_key("alt-ctrl-x")
+    machine.wait_for_window("${user.name}.*machine")
+    machine.sleep(1)
+    machine.screenshot("terminal1")
+    machine.succeed("rm /tmp/oldXMonad")
+    machine.send_key("alt-q")
+    machine.wait_for_file("/tmp/oldXMonad")
+    machine.wait_for_window("${user.name}.*machine")
+    machine.sleep(1)
+    machine.screenshot("terminal2")
+
+    # /tmp/somefile should not exist yet
+    machine.fail("stat /tmp/somefile")
+
+    # original config has a keybinding that creates somefile
+    machine.send_key("alt-ctrl-t")
+    machine.wait_for_file("/tmp/somefile")
+
+    # set up the new config
+    machine.succeed("mkdir -p ${user.home}/.xmonad")
+    machine.copy_from_host("${newConfig}", "${user.home}/.config/xmonad/xmonad.hs")
+
+    # recompile xmonad using the new config
+    machine.send_key("alt-ctrl-q")
+    machine.wait_for_file("/tmp/newXMonad")
+
+    # new config has a keybinding that deletes somefile
+    machine.send_key("alt-ctrl-r")
+    machine.wait_until_fails("stat /tmp/somefile", timeout=30)
+
+    # restart with the old config, and confirm the old keybinding is back
+    machine.succeed("rm /tmp/oldXMonad")
+    machine.send_key("alt-q")
+    machine.wait_for_file("/tmp/oldXMonad")
+    machine.send_key("alt-ctrl-t")
+    machine.wait_for_file("/tmp/somefile")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xmpp/ejabberd.nix b/nixpkgs/nixos/tests/xmpp/ejabberd.nix
new file mode 100644
index 000000000000..1a807b27b6f6
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/ejabberd.nix
@@ -0,0 +1,278 @@
+import ../make-test-python.nix ({ pkgs, ... }: {
+  name = "ejabberd";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+  nodes = {
+    client = { nodes, pkgs, ... }: {
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+      '';
+
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+      '';
+
+      services.ejabberd = {
+        enable = true;
+        configFile = "/etc/ejabberd.yml";
+      };
+
+      environment.etc."ejabberd.yml" = {
+        user = "ejabberd";
+        mode = "0600";
+        text = ''
+          loglevel: 3
+
+          hosts:
+            - "example.com"
+
+          listen:
+            -
+              port: 5222
+              module: ejabberd_c2s
+              zlib: false
+              max_stanza_size: 65536
+              shaper: c2s_shaper
+              access: c2s
+            -
+              port: 5269
+              ip: "::"
+              module: ejabberd_s2s_in
+            -
+              port: 5347
+              ip: "127.0.0.1"
+              module: ejabberd_service
+              access: local
+              shaper: fast
+            -
+              port: 5444
+              module: ejabberd_http
+              request_handlers:
+                "/upload": mod_http_upload
+
+          ## Disabling digest-md5 SASL authentication. digest-md5 requires plain-text
+          ## password storage (see auth_password_format option).
+          disable_sasl_mechanisms: "digest-md5"
+
+          ## Outgoing S2S options
+          ## Preferred address families (which to try first) and connect timeout
+          ## in seconds.
+          outgoing_s2s_families:
+             - ipv4
+             - ipv6
+
+          ## auth_method: Method used to authenticate the users.
+          ## The default method is the internal.
+          ## If you want to use a different method,
+          ## comment this line and enable the correct ones.
+          auth_method: internal
+
+          ## Store the plain passwords or hashed for SCRAM:
+          ## auth_password_format: plain
+          auth_password_format: scram
+
+          ###'  TRAFFIC SHAPERS
+          shaper:
+            # in B/s
+            normal: 1000000
+            fast: 50000000
+
+          ## This option specifies the maximum number of elements in the queue
+          ## of the FSM. Refer to the documentation for details.
+          max_fsm_queue: 1000
+
+          ###'   ACCESS CONTROL LISTS
+          acl:
+            ## The 'admin' ACL grants administrative privileges to XMPP accounts.
+            ## You can put here as many accounts as you want.
+            admin:
+               user:
+                 - "root": "example.com"
+
+            ## Local users: don't modify this.
+            local:
+              user_regexp: ""
+
+            ## Loopback network
+            loopback:
+              ip:
+                - "127.0.0.0/8"
+                - "::1/128"
+                - "::FFFF:127.0.0.1/128"
+
+          ###'  SHAPER RULES
+          shaper_rules:
+            ## Maximum number of simultaneous sessions allowed for a single user:
+            max_user_sessions: 10
+            ## Maximum number of offline messages that users can have:
+            max_user_offline_messages:
+              - 5000: admin
+              - 1024
+            ## For C2S connections, all users except admins use the "normal" shaper
+            c2s_shaper:
+              - none: admin
+              - normal
+            ## All S2S connections use the "fast" shaper
+            s2s_shaper: fast
+
+          ###'  ACCESS RULES
+          access_rules:
+            ## This rule allows access only for local users:
+            local:
+              - allow: local
+            ## Only non-blocked users can use c2s connections:
+            c2s:
+              - deny: blocked
+              - allow
+            ## Only admins can send announcement messages:
+            announce:
+              - allow: admin
+            ## Only admins can use the configuration interface:
+            configure:
+              - allow: admin
+            ## Only accounts of the local ejabberd server can create rooms:
+            muc_create:
+              - allow: local
+            ## Only accounts on the local ejabberd server can create Pubsub nodes:
+            pubsub_createnode:
+              - allow: local
+            ## In-band registration allows registration of any possible username.
+            ## To disable in-band registration, replace 'allow' with 'deny'.
+            register:
+              - allow
+            ## Only allow to register from localhost
+            trusted_network:
+              - allow: loopback
+
+          ## ===============
+          ## API PERMISSIONS
+          ## ===============
+          ##
+          ## This section allows you to define who and using what method
+          ## can execute commands offered by ejabberd.
+          ##
+          ## By default "console commands" section allow executing all commands
+          ## issued using ejabberdctl command, and "admin access" section allows
+          ## users in admin acl that connect from 127.0.0.1 to  execute all
+          ## commands except start and stop with any available access method
+          ## (ejabberdctl, http-api, xmlrpc depending what is enabled on server).
+          ##
+          ## If you remove "console commands" there will be one added by
+          ## default allowing executing all commands, but if you just change
+          ## permissions in it, version from config file will be used instead
+          ## of default one.
+          ##
+          api_permissions:
+            "console commands":
+              from:
+                - ejabberd_ctl
+              who: all
+              what: "*"
+
+          language: "en"
+
+          ###'  MODULES
+          ## Modules enabled in all ejabberd virtual hosts.
+          modules:
+            mod_adhoc: {}
+            mod_announce: # recommends mod_adhoc
+              access: announce
+            mod_blocking: {} # requires mod_privacy
+            mod_caps: {}
+            mod_carboncopy: {}
+            mod_client_state: {}
+            mod_configure: {} # requires mod_adhoc
+            ## mod_delegation: {} # for xep0356
+            mod_disco: {}
+            #mod_irc:
+            #  host: "irc.@HOST@"
+            #  default_encoding: "utf-8"
+            ## mod_bosh: {}
+            ## mod_http_fileserver:
+            ##   docroot: "/var/www"
+            ##   accesslog: "/var/log/ejabberd/access.log"
+            mod_http_upload:
+              thumbnail: false # otherwise needs the identify command from ImageMagick installed
+              put_url: "http://@HOST@:5444/upload"
+            ##   # docroot: "@HOME@/upload"
+            #mod_http_upload_quota:
+            #  max_days: 14
+            mod_last: {}
+            ## XEP-0313: Message Archive Management
+            ## You might want to setup a SQL backend for MAM because the mnesia database is
+            ## limited to 2GB which might be exceeded on large servers
+            mod_mam: {}
+            mod_muc:
+              host: "muc.@HOST@"
+              access:
+                - allow
+              access_admin:
+                - allow: admin
+              access_create: muc_create
+              access_persistent: muc_create
+            mod_muc_admin: {}
+            mod_muc_log: {}
+            mod_offline:
+              access_max_user_messages: max_user_offline_messages
+            mod_ping: {}
+            ## mod_pres_counter:
+            ##   count: 5
+            ##   interval: 60
+            mod_privacy: {}
+            mod_private: {}
+            mod_roster:
+                versioning: true
+            mod_shared_roster: {}
+            mod_stats: {}
+            mod_time: {}
+            mod_vcard:
+              search: false
+            mod_vcard_xupdate: {}
+            ## Convert all avatars posted by Android clients from WebP to JPEG
+            mod_avatar: {}
+            #  convert:
+            #    webp: jpeg
+            mod_version: {}
+            mod_stream_mgmt: {}
+            ##   The module for S2S dialback (XEP-0220). Please note that you cannot
+            ##   rely solely on dialback if you want to federate with other servers,
+            ##   because a lot of servers have dialback disabled and instead rely on
+            ##   PKIX authentication. Make sure you have proper certificates installed
+            ##   and check your accessibility at https://check.messaging.one/
+            mod_s2s_dialback: {}
+            mod_pubsub:
+              plugins:
+                - "pep"
+            mod_push: {}
+        '';
+      };
+
+      networking.firewall.enable = false;
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    ejabberd_prefix = "su ejabberd -s $(which ejabberdctl) "
+
+    server.wait_for_unit("ejabberd.service")
+
+    assert "status: started" in server.succeed(ejabberd_prefix + "status")
+
+    server.succeed(
+        ejabberd_prefix + "register azurediamond example.com hunter2",
+        ejabberd_prefix + "register cthon98 example.com nothunter2",
+    )
+    server.fail(ejabberd_prefix + "register asdf wrong.domain")
+    client.succeed("send-message")
+    server.succeed(
+        ejabberd_prefix + "unregister cthon98 example.com",
+        ejabberd_prefix + "unregister azurediamond example.com",
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix b/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix
new file mode 100644
index 000000000000..40f3e308a04e
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/prosody-mysql.nix
@@ -0,0 +1,124 @@
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=example.com/CN=uploads.example.com/CN=conference.example.com' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+  createUsers = pkgs: pkgs.writeScriptBin "create-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Creates and set password for the 2 xmpp test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl register cthon98 example.com nothunter2
+    prosodyctl register azurediamond example.com hunter2
+  '';
+  delUsers = pkgs: pkgs.writeScriptBin "delete-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Deletes the test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl deluser cthon98@example.com
+    prosodyctl deluser azurediamond@example.com
+  '';
+in import ../make-test-python.nix {
+  name = "prosody-mysql";
+  nodes = {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = nodes.server.config.networking.primaryIPAddress; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      nixpkgs.overlays = [
+        (self: super: {
+          prosody = super.prosody.override {
+            withExtraLuaPackages = p: [ p.luadbi-mysql ];
+          };
+        })
+      ];
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
+      '';
+      networking.firewall.enable = false;
+      environment.systemPackages = [
+        (createUsers pkgs)
+        (delUsers pkgs)
+      ];
+      services.prosody = {
+        enable = true;
+        ssl.cert = "${cert pkgs}/cert.pem";
+        ssl.key = "${cert pkgs}/key.pem";
+        virtualHosts.example = {
+          domain = "example.com";
+          enabled = true;
+          ssl.cert = "${cert pkgs}/cert.pem";
+          ssl.key = "${cert pkgs}/key.pem";
+        };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
+        };
+        extraConfig = ''
+          storage = "sql"
+          sql = {
+            driver = "MySQL";
+            database = "prosody";
+            host = "mysql";
+            port = 3306;
+            username = "prosody";
+            password = "password123";
+          };
+        '';
+      };
+    };
+    mysql = { config, pkgs, ... }: {
+      networking.firewall.enable = false;
+      services.mysql = {
+        enable = true;
+        initialScript = pkgs.writeText "mysql_init.sql" ''
+          CREATE DATABASE prosody;
+          CREATE USER 'prosody'@'server' IDENTIFIED BY 'password123';
+          GRANT ALL PRIVILEGES ON prosody.* TO 'prosody'@'server';
+          FLUSH PRIVILEGES;
+        '';
+        package = pkgs.mariadb;
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    # Check with mysql storage
+    mysql.wait_for_unit("mysql.service")
+    server.wait_for_unit("prosody.service")
+    server.succeed('prosodyctl status | grep "Prosody is running"')
+
+    server.succeed("create-prosody-users")
+    client.succeed("send-message")
+    server.succeed("delete-prosody-users")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/xmpp/prosody.nix b/nixpkgs/nixos/tests/xmpp/prosody.nix
new file mode 100644
index 000000000000..045ae6430fd4
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/prosody.nix
@@ -0,0 +1,93 @@
+let
+  cert = pkgs: pkgs.runCommand "selfSignedCerts" { buildInputs = [ pkgs.openssl ]; } ''
+    openssl req -x509 -newkey rsa:4096 -keyout key.pem -out cert.pem -nodes -subj '/CN=example.com/CN=uploads.example.com/CN=conference.example.com' -days 36500
+    mkdir -p $out
+    cp key.pem cert.pem $out
+  '';
+  createUsers = pkgs: pkgs.writeScriptBin "create-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Creates and set password for the 2 xmpp test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl register cthon98 example.com nothunter2
+    prosodyctl register azurediamond example.com hunter2
+  '';
+  delUsers = pkgs: pkgs.writeScriptBin "delete-prosody-users" ''
+    #!${pkgs.bash}/bin/bash
+    set -e
+
+    # Deletes the test users.
+    #
+    # Doing that in a bash script instead of doing that in the test
+    # script allow us to easily provision the users when running that
+    # test interactively.
+
+    prosodyctl deluser cthon98@example.com
+    prosodyctl deluser azurediamond@example.com
+  '';
+in import ../make-test-python.nix {
+  name = "prosody";
+  nodes = {
+    client = { nodes, pkgs, config, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${nodes.server.config.networking.primaryIPAddress} example.com
+        ${nodes.server.config.networking.primaryIPAddress} conference.example.com
+        ${nodes.server.config.networking.primaryIPAddress} uploads.example.com
+      '';
+      environment.systemPackages = [
+        (pkgs.callPackage ./xmpp-sendmessage.nix { connectTo = "example.com"; })
+      ];
+    };
+    server = { config, pkgs, ... }: {
+      security.pki.certificateFiles = [ "${cert pkgs}/cert.pem" ];
+      console.keyMap = "fr-bepo";
+      networking.extraHosts = ''
+        ${config.networking.primaryIPAddress} example.com
+        ${config.networking.primaryIPAddress} conference.example.com
+        ${config.networking.primaryIPAddress} uploads.example.com
+      '';
+      networking.firewall.enable = false;
+      environment.systemPackages = [
+        (createUsers pkgs)
+        (delUsers pkgs)
+      ];
+      services.prosody = {
+        enable = true;
+        ssl.cert = "${cert pkgs}/cert.pem";
+        ssl.key = "${cert pkgs}/key.pem";
+        virtualHosts.example = {
+          domain = "example.com";
+          enabled = true;
+          ssl.cert = "${cert pkgs}/cert.pem";
+          ssl.key = "${cert pkgs}/key.pem";
+        };
+        muc = [
+          {
+            domain = "conference.example.com";
+          }
+        ];
+        uploadHttp = {
+          domain = "uploads.example.com";
+        };
+      };
+    };
+  };
+
+  testScript = { nodes, ... }: ''
+    # Check with sqlite storage
+    start_all()
+    server.wait_for_unit("prosody.service")
+    server.succeed('prosodyctl status | grep "Prosody is running"')
+
+    server.succeed("create-prosody-users")
+    client.succeed("send-message")
+    server.succeed("delete-prosody-users")
+  '';
+}
diff --git a/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix b/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix
new file mode 100644
index 000000000000..8ccac0612491
--- /dev/null
+++ b/nixpkgs/nixos/tests/xmpp/xmpp-sendmessage.nix
@@ -0,0 +1,90 @@
+{ writeScriptBin, writeText, python3, connectTo ? "localhost" }:
+let
+  dummyFile = writeText "dummy-file" ''
+    Dear dog,
+
+    Please find this *really* important attachment.
+
+    Yours truly,
+    Bob
+  '';
+in writeScriptBin "send-message" ''
+#!${(python3.withPackages (ps: [ ps.slixmpp ])).interpreter}
+import logging
+import sys
+import signal
+from types import MethodType
+
+from slixmpp import ClientXMPP
+from slixmpp.exceptions import IqError, IqTimeout
+
+
+class CthonTest(ClientXMPP):
+
+    def __init__(self, jid, password):
+        ClientXMPP.__init__(self, jid, password)
+        self.add_event_handler("session_start", self.session_start)
+        self.test_succeeded = False
+
+    async def session_start(self, event):
+        try:
+            # Exceptions in event handlers are printed to stderr but not
+            # propagated, they do not make the script terminate with a non-zero
+            # exit code. We use the `test_succeeded` flag as a workaround and
+            # check it later at the end of the script to exit with a proper
+            # exit code.
+            # Additionally, this flag ensures that this event handler has been
+            # actually run by ClientXMPP, which may well not be the case.
+            await self.test_xmpp_server()
+            self.test_succeeded = True
+        finally:
+            # Even if an exception happens in `test_xmpp_server()`, we still
+            # need to disconnect explicitly, otherwise the process will hang
+            # forever.
+            self.disconnect(wait=True)
+
+    async def test_xmpp_server(self):
+        log = logging.getLogger(__name__)
+        self.send_presence()
+        self.get_roster()
+        # Sending a test message
+        self.send_message(mto="azurediamond@example.com", mbody="Hello, this is dog.", mtype="chat")
+        log.info('Message sent')
+
+        # Test http upload (XEP_0363)
+        try:
+            url = await self['xep_0363'].upload_file("${dummyFile}",timeout=10)
+        except:
+            log.error("ERROR: Cannot run upload command. XEP_0363 seems broken")
+            sys.exit(1)
+        log.info('Upload success!')
+
+        # Test MUC
+        # TODO: use join_muc_wait() after slixmpp 1.8.0 is released.
+        self.plugin['xep_0045'].join_muc('testMucRoom', 'cthon98')
+        log.info('MUC join success!')
+        log.info('XMPP SCRIPT TEST SUCCESS')
+
+def timeout_handler(signalnum, stackframe):
+    print('ERROR: xmpp-sendmessage timed out')
+    sys.exit(1)
+
+if __name__ == '__main__':
+    signal.signal(signal.SIGALRM, timeout_handler)
+    signal.alarm(120)
+    logging.basicConfig(level=logging.DEBUG,
+                        format='%(levelname)-8s %(message)s')
+
+    ct = CthonTest('cthon98@example.com', 'nothunter2')
+    ct.register_plugin('xep_0071')
+    ct.register_plugin('xep_0128')
+    # HTTP Upload
+    ct.register_plugin('xep_0363')
+    # MUC
+    ct.register_plugin('xep_0045')
+    ct.connect(("${connectTo}", 5222))
+    ct.process(forever=False)
+
+    if not ct.test_succeeded:
+        sys.exit(1)
+''
diff --git a/nixpkgs/nixos/tests/xpadneo.nix b/nixpkgs/nixos/tests/xpadneo.nix
new file mode 100644
index 000000000000..c7b72831fce8
--- /dev/null
+++ b/nixpkgs/nixos/tests/xpadneo.nix
@@ -0,0 +1,18 @@
+import ./make-test-python.nix ({ lib, pkgs, ... }: {
+  name = "xpadneo";
+  meta.maintainers = with lib.maintainers; [ kira-bruneau ];
+
+  nodes = {
+    machine = {
+      config.hardware.xpadneo.enable = true;
+    };
+  };
+
+  # This is just a sanity check to make sure the module was
+  # loaded. We'd have to find some way to mock an xbox controller if
+  # we wanted more in-depth testing.
+  testScript = ''
+    machine.start();
+    machine.succeed("modinfo hid_xpadneo | grep 'version:\s\+${pkgs.linuxPackages.xpadneo.version}'")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xrdp.nix b/nixpkgs/nixos/tests/xrdp.nix
new file mode 100644
index 000000000000..f277d4b79525
--- /dev/null
+++ b/nixpkgs/nixos/tests/xrdp.nix
@@ -0,0 +1,47 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xrdp";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes = {
+    server = { pkgs, ... }: {
+      imports = [ ./common/user-account.nix ];
+      services.xrdp.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.xterm}/bin/xterm";
+      networking.firewall.allowedTCPPorts = [ 3389 ];
+    };
+
+    client = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      test-support.displayManager.auto.user = "alice";
+      environment.systemPackages = [ pkgs.freerdp ];
+      services.xrdp.enable = true;
+      services.xrdp.defaultWindowManager = "${pkgs.icewm}/bin/icewm";
+    };
+  };
+
+  testScript = { nodes, ... }: let
+    user = nodes.client.config.users.users.alice;
+  in ''
+    start_all()
+
+    client.wait_for_x()
+    client.wait_for_file("${user.home}/.Xauthority")
+    client.succeed("xauth merge ${user.home}/.Xauthority")
+
+    client.sleep(5)
+
+    client.execute("xterm >&2 &")
+    client.sleep(1)
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:127.0.0.1 /u:${user.name} /p:${user.password}\n")
+    client.sleep(5)
+    client.screenshot("localrdp")
+
+    client.execute("xterm >&2 &")
+    client.sleep(1)
+    client.send_chars("xfreerdp /cert-tofu /w:640 /h:480 /v:server /u:${user.name} /p:${user.password}\n")
+    client.sleep(5)
+    client.screenshot("remoterdp")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xss-lock.nix b/nixpkgs/nixos/tests/xss-lock.nix
new file mode 100644
index 000000000000..e4e41a5aa797
--- /dev/null
+++ b/nixpkgs/nixos/tests/xss-lock.nix
@@ -0,0 +1,40 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "xss-lock";
+  meta.maintainers = [ ];
+
+  nodes = {
+    simple = {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      programs.xss-lock.enable = true;
+      test-support.displayManager.auto.user = "alice";
+    };
+
+    custom_lockcmd = { pkgs, ... }: {
+      imports = [ ./common/x11.nix ./common/user-account.nix ];
+      test-support.displayManager.auto.user = "alice";
+
+      programs.xss-lock = {
+        enable = true;
+        extraOptions = [ "-n" "${pkgs.libnotify}/bin/notify-send 'About to sleep!'"];
+        lockerCommand = "${pkgs.xlockmore}/bin/xlock -mode ant";
+      };
+    };
+  };
+
+  testScript = ''
+    def perform_xsslock_test(machine, lockCmd):
+        machine.start()
+        machine.wait_for_x()
+        machine.wait_for_unit("xss-lock.service", "alice")
+        machine.fail(f"pgrep {lockCmd}")
+        machine.succeed("su -l alice -c 'xset dpms force standby'")
+        machine.wait_until_succeeds(f"pgrep {lockCmd}")
+
+
+    with subtest("simple"):
+        perform_xsslock_test(simple, "i3lock")
+
+    with subtest("custom_cmd"):
+        perform_xsslock_test(custom_lockcmd, "xlock")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/xterm.nix b/nixpkgs/nixos/tests/xterm.nix
new file mode 100644
index 000000000000..745d33e8a0d5
--- /dev/null
+++ b/nixpkgs/nixos/tests/xterm.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "xterm";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus ];
+  };
+
+  nodes.machine = { pkgs, ... }:
+    {
+      imports = [ ./common/x11.nix ];
+      services.xserver.desktopManager.xterm.enable = false;
+    };
+
+  testScript =
+    ''
+      machine.wait_for_x()
+      machine.succeed("DISPLAY=:0 xterm -title testterm -class testterm -fullscreen >&2 &")
+      machine.sleep(2)
+      machine.send_chars("echo $XTERM_VERSION >> /tmp/xterm_version\n")
+      machine.wait_for_file("/tmp/xterm_version")
+      assert "${pkgs.xterm.version}" in machine.succeed("cat /tmp/xterm_version")
+      machine.screenshot("window")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/xxh.nix b/nixpkgs/nixos/tests/xxh.nix
new file mode 100644
index 000000000000..3af8e53779e3
--- /dev/null
+++ b/nixpkgs/nixos/tests/xxh.nix
@@ -0,0 +1,67 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+
+  let
+    inherit (import ./ssh-keys.nix pkgs) snakeOilPrivateKey snakeOilPublicKey;
+    xxh-shell-zsh = pkgs.stdenv.mkDerivation {
+      pname = "xxh-shell-zsh";
+      version = "";
+      src = pkgs.fetchFromGitHub {
+        owner = "xxh";
+        repo = "xxh-shell-zsh";
+        # gets rarely updated, we can then just replace the hash
+        rev = "91e1f84f8d6e0852c3235d4813f341230cac439f";
+        sha256 = "sha256-Y1FrIRxTd0yooK+ZzKcCd6bLSy5E2fRXYAzrIsm7rIc=";
+      };
+
+      postPatch = ''
+        substituteInPlace build.sh \
+          --replace "echo Install wget or curl" "cp ${zsh-portable-binary} zsh-5.8-linux-x86_64.tar.gz" \
+          --replace "command -v curl" "command -v this-should-not-trigger"
+      '';
+
+      installPhase = ''
+        mkdir -p $out
+        mv * $out/
+      '';
+    };
+
+    zsh-portable-binary = pkgs.fetchurl {
+      # kept in sync with https://github.com/xxh/xxh-shell-zsh/tree/master/build.sh#L27
+      url = "https://github.com/romkatv/zsh-bin/releases/download/v3.0.1/zsh-5.8-linux-x86_64.tar.gz";
+      sha256 = "sha256-i8flMd2Isc0uLoeYQNDnOGb/kK3oTFVqQgIx7aOAIIo=";
+    };
+  in
+  {
+    name = "xxh";
+    meta = with lib.maintainers; {
+      maintainers = [ lom ];
+    };
+
+    nodes = {
+      server = { ... }: {
+        services.openssh.enable = true;
+        users.users.root.openssh.authorizedKeys.keys = [ snakeOilPublicKey ];
+      };
+
+      client = { ... }: {
+        programs.zsh.enable = true;
+        users.users.root.shell = pkgs.zsh;
+        environment.systemPackages = with pkgs; [ xxh git ];
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      client.succeed("mkdir -m 700 /root/.ssh")
+
+      client.succeed(
+         "cat ${snakeOilPrivateKey} > /root/.ssh/id_ecdsa"
+      )
+      client.succeed("chmod 600 /root/.ssh/id_ecdsa")
+
+      server.wait_for_unit("sshd")
+
+      client.succeed("xxh server -i /root/.ssh/id_ecdsa +hc \'echo $0\' +i +s zsh +I xxh-shell-zsh+path+${xxh-shell-zsh} | grep -Fq '/root/.xxh/.xxh/shells/xxh-shell-zsh/build/zsh-bin/bin/zsh'")
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/yabar.nix b/nixpkgs/nixos/tests/yabar.nix
new file mode 100644
index 000000000000..212a8ce4bbf5
--- /dev/null
+++ b/nixpkgs/nixos/tests/yabar.nix
@@ -0,0 +1,27 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }: {
+  name = "yabar";
+  meta.maintainers = [ ];
+
+  nodes.machine = {
+    imports = [ ./common/x11.nix ./common/user-account.nix ];
+
+    test-support.displayManager.auto.user = "bob";
+
+    programs.yabar.enable = true;
+    programs.yabar.bars = {
+      top.indicators.date.exec = "YABAR_DATE";
+    };
+  };
+
+  testScript = ''
+    machine.start()
+    machine.wait_for_x()
+
+    # confirm proper startup
+    machine.wait_for_unit("yabar.service", "bob")
+    machine.sleep(10)
+    machine.wait_for_unit("yabar.service", "bob")
+
+    machine.screenshot("top_bar")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/yggdrasil.nix b/nixpkgs/nixos/tests/yggdrasil.nix
new file mode 100644
index 000000000000..70d148380bf7
--- /dev/null
+++ b/nixpkgs/nixos/tests/yggdrasil.nix
@@ -0,0 +1,172 @@
+let
+  aliceIp6 = "202:b70:9b0b:cf34:f93c:8f18:bbfd:7034";
+  aliceKeys = {
+    PublicKey = "3e91ec9e861960d86e1ce88051f97c435bdf2859640ab681dfa906eb45ad5182";
+    PrivateKey = "a867f9e078e4ce58d310cf5acd4622d759e2a21df07e1d6fc380a2a26489480d3e91ec9e861960d86e1ce88051f97c435bdf2859640ab681dfa906eb45ad5182";
+  };
+  bobIp6 = "202:a483:73a4:9f2d:a559:4a19:bc9:8458";
+  bobPrefix = "302:a483:73a4:9f2d";
+  bobConfig = {
+    InterfacePeers = {
+      eth1 = [ "tcp://192.168.1.200:12345" ];
+    };
+    MulticastInterfaces = [ {
+      Regex = ".*";
+      Beacon = true;
+      Listen = true;
+      Port = 54321;
+      Priority = 0;
+    } ];
+    PublicKey = "2b6f918b6c1a4b54d6bcde86cf74e074fb32ead4ee439b7930df2aa60c825186";
+    PrivateKey = "0c4a24acd3402722ce9277ed179f4a04b895b49586493c25fbaed60653d857d62b6f918b6c1a4b54d6bcde86cf74e074fb32ead4ee439b7930df2aa60c825186";
+  };
+  danIp6 = bobPrefix + "::2";
+
+in import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "yggdrasil";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ gazally ];
+  };
+
+  nodes = rec {
+    # Alice is listening for peerings on a specified port,
+    # but has multicast peering disabled.  Alice has part of her
+    # yggdrasil config in Nix and part of it in a file.
+    alice =
+      { ... }:
+      {
+        networking = {
+          interfaces.eth1.ipv4.addresses = [{
+            address = "192.168.1.200";
+            prefixLength = 24;
+          }];
+          firewall.allowedTCPPorts = [ 80 12345 ];
+        };
+        services.httpd.enable = true;
+        services.httpd.adminAddr = "foo@example.org";
+
+        services.yggdrasil = {
+          enable = true;
+          settings = {
+            Listen = ["tcp://0.0.0.0:12345"];
+            MulticastInterfaces = [ ];
+          };
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-alice-conf";
+                         text = builtins.toJSON aliceKeys;
+                       });
+        };
+      };
+
+    # Bob is set up to peer with Alice, and also to do local multicast
+    # peering.  Bob's yggdrasil config is in a file.
+    bob =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 54321 ];
+        services.yggdrasil = {
+          enable = true;
+          openMulticastPort = true;
+          configFile = toString (pkgs.writeTextFile {
+                         name = "yggdrasil-bob-conf";
+                         text = builtins.toJSON bobConfig;
+                       });
+        };
+
+        boot.kernel.sysctl."net.ipv6.conf.all.forwarding" = 1;
+
+        networking = {
+          bridges.br0.interfaces = [ ];
+          interfaces.br0 = {
+            ipv6.addresses = [{
+              address = bobPrefix + "::1";
+              prefixLength = 64;
+            }];
+          };
+        };
+
+        # dan is a node inside a container running on bob's host.
+        containers.dan = {
+          autoStart = true;
+          privateNetwork = true;
+          hostBridge = "br0";
+          config = { config, pkgs, ... }: {
+            networking.interfaces.eth0.ipv6 = {
+              addresses = [{
+                address = bobPrefix + "::2";
+                prefixLength = 64;
+              }];
+              routes = [{
+                address = "200::";
+                prefixLength = 7;
+                via = bobPrefix + "::1";
+              }];
+            };
+            services.httpd.enable = true;
+            services.httpd.adminAddr = "foo@example.org";
+            networking.firewall.allowedTCPPorts = [ 80 ];
+          };
+        };
+      };
+
+    # Carol only does local peering.  Carol's yggdrasil config is all Nix.
+    carol =
+      { ... }:
+      {
+        networking.firewall.allowedTCPPorts = [ 43210 ];
+        services.yggdrasil = {
+          enable = true;
+          extraArgs = [ "-loglevel" "error" ];
+          denyDhcpcdInterfaces = [ "ygg0" ];
+          settings = {
+            IfTAPMode = true;
+            IfName = "ygg0";
+            MulticastInterfaces = [
+              {
+                Port = 43210;
+              }
+            ];
+            openMulticastPort = true;
+          };
+          persistentKeys = true;
+        };
+      };
+    };
+
+  testScript =
+    ''
+      import re
+
+      # Give Alice a head start so she is ready when Bob calls.
+      alice.start()
+      alice.wait_for_unit("yggdrasil.service")
+
+      bob.start()
+      carol.start()
+      bob.wait_for_unit("default.target")
+      carol.wait_for_unit("yggdrasil.service")
+
+      ip_addr_show = "ip -o -6 addr show dev ygg0 scope global"
+      carol.wait_until_succeeds(f"[ `{ip_addr_show} | grep -v tentative | wc -l` -ge 1 ]")
+      carol_ip6 = re.split(" +|/", carol.succeed(ip_addr_show))[3]
+
+      # If Alice can talk to Carol, then Bob's outbound peering and Carol's
+      # local peering have succeeded and everybody is connected.
+      alice.wait_until_succeeds(f"ping -c 1 {carol_ip6}")
+      alice.succeed("ping -c 1 ${bobIp6}")
+
+      bob.succeed("ping -c 1 ${aliceIp6}")
+      bob.succeed(f"ping -c 1 {carol_ip6}")
+
+      carol.succeed("ping -c 1 ${aliceIp6}")
+      carol.succeed("ping -c 1 ${bobIp6}")
+      carol.succeed("ping -c 1 ${bobPrefix}::1")
+      carol.succeed("ping -c 8 ${danIp6}")
+
+      carol.fail("journalctl -u dhcpcd | grep ygg0")
+
+      alice.wait_for_unit("httpd.service")
+      carol.succeed("curl --fail -g http://[${aliceIp6}]")
+      carol.succeed("curl --fail -g http://[${danIp6}]")
+    '';
+})
diff --git a/nixpkgs/nixos/tests/zammad.nix b/nixpkgs/nixos/tests/zammad.nix
new file mode 100644
index 000000000000..7a2d40e82b3e
--- /dev/null
+++ b/nixpkgs/nixos/tests/zammad.nix
@@ -0,0 +1,60 @@
+import ./make-test-python.nix (
+  { lib, pkgs, ... }:
+
+  {
+    name = "zammad";
+
+    meta.maintainers = with lib.maintainers; [ garbas taeer n0emis ];
+
+    nodes.machine = { config, ... }: {
+      services.zammad.enable = true;
+      services.zammad.secretKeyBaseFile = pkgs.writeText "secret" ''
+        52882ef142066e09ab99ce816ba72522e789505caba224a52d750ec7dc872c2c371b2fd19f16b25dfbdd435a4dd46cb3df9f82eb63fafad715056bdfe25740d6
+      '';
+
+      systemd.services.zammad-locale-cheat =
+        let cfg = config.services.zammad; in
+        {
+          serviceConfig = {
+            Type = "simple";
+            Restart = "always";
+
+            User = "zammad";
+            Group = "zammad";
+            PrivateTmp = true;
+            StateDirectory = "zammad";
+            WorkingDirectory = cfg.dataDir;
+          };
+          wantedBy = [ "zammad-web.service" ];
+          description = "Hack in the locale files so zammad doesn't try to access the internet";
+          script = ''
+            mkdir -p ./config/translations
+            VERSION=$(cat ${cfg.package}/VERSION)
+
+            # If these files are not in place, zammad will try to access the internet.
+            # For the test, we only need to supply en-us.
+            echo '[{"locale":"en-us","alias":"en","name":"English (United States)","active":true,"dir":"ltr"}]' \
+              > ./config/locales-$VERSION.yml
+            echo '[{"locale":"en-us","format":"time","source":"date","target":"mm/dd/yyyy","target_initial":"mm/dd/yyyy"},{"locale":"en-us","format":"time","source":"timestamp","target":"mm/dd/yyyy HH:MM","target_initial":"mm/dd/yyyy HH:MM"}]' \
+              > ./config/translations/en-us-$VERSION.yml
+          '';
+        };
+    };
+
+    testScript = ''
+      start_all()
+      machine.wait_for_unit("postgresql.service")
+      machine.wait_for_unit("zammad-web.service")
+      machine.wait_for_unit("zammad-websocket.service")
+      machine.wait_for_unit("zammad-scheduler.service")
+      # wait for zammad to fully come up
+      machine.sleep(120)
+
+      # without the grep the command does not produce valid utf-8 for some reason
+      with subtest("welcome screen loads"):
+          machine.succeed(
+              "curl -sSfL http://localhost:3000/ | grep '<title>Zammad Helpdesk</title>'"
+          )
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/zeronet-conservancy.nix b/nixpkgs/nixos/tests/zeronet-conservancy.nix
new file mode 100644
index 000000000000..8cb649cbdaab
--- /dev/null
+++ b/nixpkgs/nixos/tests/zeronet-conservancy.nix
@@ -0,0 +1,25 @@
+let
+  port = 43110;
+in
+import ./make-test-python.nix ({ pkgs, ... }: {
+  name = "zeronet-conservancy";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ fgaz ];
+  };
+
+  nodes.machine = { config, pkgs, ... }: {
+    services.zeronet = {
+      enable = true;
+      package = pkgs.zeronet-conservancy;
+      inherit port;
+    };
+  };
+
+  testScript = ''
+    machine.wait_for_unit("zeronet.service")
+
+    machine.wait_for_open_port(${toString port})
+
+    machine.succeed("curl --fail -H 'Accept: text/html, application/xml, */*' localhost:${toString port}/Stats")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/zfs.nix b/nixpkgs/nixos/tests/zfs.nix
new file mode 100644
index 000000000000..3454fbaf78fe
--- /dev/null
+++ b/nixpkgs/nixos/tests/zfs.nix
@@ -0,0 +1,258 @@
+{ system ? builtins.currentSystem,
+  config ? {},
+  pkgs ? import ../.. { inherit system config; }
+}:
+
+with import ../lib/testing-python.nix { inherit system pkgs; };
+
+let
+
+  makeZfsTest = name:
+    { kernelPackage ? if enableUnstable
+                      then pkgs.zfsUnstable.latestCompatibleLinuxPackages
+                      else pkgs.linuxPackages
+    , enableUnstable ? false
+    , enableSystemdStage1 ? false
+    , extraTest ? ""
+    }:
+    makeTest {
+      name = "zfs-" + name;
+      meta = with pkgs.lib.maintainers; {
+        maintainers = [ adisbladis elvishjerricco ];
+      };
+
+      nodes.machine = { pkgs, lib, ... }:
+        let
+          usersharePath = "/var/lib/samba/usershares";
+        in {
+        virtualisation = {
+          emptyDiskImages = [ 4096 4096 ];
+          useBootLoader = true;
+          useEFIBoot = true;
+        };
+        boot.loader.systemd-boot.enable = true;
+        boot.loader.timeout = 0;
+        boot.loader.efi.canTouchEfiVariables = true;
+        networking.hostId = "deadbeef";
+        boot.kernelPackages = kernelPackage;
+        boot.supportedFilesystems = [ "zfs" ];
+        boot.zfs.enableUnstable = enableUnstable;
+        boot.initrd.systemd.enable = enableSystemdStage1;
+
+        environment.systemPackages = [ pkgs.parted ];
+
+        # /dev/disk/by-id doesn't get populated in the NixOS test framework
+        boot.zfs.devNodes = "/dev/disk/by-uuid";
+
+        specialisation.samba.configuration = {
+          services.samba = {
+            enable = true;
+            extraConfig = ''
+              registry shares = yes
+              usershare path = ${usersharePath}
+              usershare allow guests = yes
+              usershare max shares = 100
+              usershare owner only = no
+            '';
+          };
+          systemd.services.samba-smbd.serviceConfig.ExecStartPre =
+            "${pkgs.coreutils}/bin/mkdir -m +t -p ${usersharePath}";
+          virtualisation.fileSystems = {
+            "/tmp/mnt" = {
+              device = "rpool/root";
+              fsType = "zfs";
+            };
+          };
+        };
+
+        specialisation.encryption.configuration = {
+          boot.zfs.requestEncryptionCredentials = [ "automatic" ];
+          virtualisation.fileSystems."/automatic" = {
+            device = "automatic";
+            fsType = "zfs";
+          };
+          virtualisation.fileSystems."/manual" = {
+            device = "manual";
+            fsType = "zfs";
+          };
+          virtualisation.fileSystems."/manual/encrypted" = {
+            device = "manual/encrypted";
+            fsType = "zfs";
+            options = [ "noauto" ];
+          };
+          virtualisation.fileSystems."/manual/httpkey" = {
+            device = "manual/httpkey";
+            fsType = "zfs";
+            options = [ "noauto" ];
+          };
+        };
+
+        specialisation.forcepool.configuration = {
+          systemd.services.zfs-import-forcepool.wantedBy = lib.mkVMOverride [ "forcepool.mount" ];
+          systemd.targets.zfs.wantedBy = lib.mkVMOverride [];
+          boot.zfs.forceImportAll = true;
+          virtualisation.fileSystems."/forcepool" = {
+            device = "forcepool";
+            fsType = "zfs";
+            options = [ "noauto" ];
+          };
+        };
+
+        services.nginx = {
+          enable = true;
+          virtualHosts = {
+            localhost = {
+              locations = {
+                "/zfskey" = {
+                  return = ''200 "httpkeyabc"'';
+                };
+              };
+            };
+          };
+        };
+      };
+
+      testScript = ''
+        machine.wait_for_unit("multi-user.target")
+        machine.succeed(
+            "zpool status",
+            "parted --script /dev/vdb mklabel msdos",
+            "parted --script /dev/vdb -- mkpart primary 1024M -1s",
+            "parted --script /dev/vdc mklabel msdos",
+            "parted --script /dev/vdc -- mkpart primary 1024M -1s",
+        )
+
+        with subtest("sharesmb works"):
+            machine.succeed(
+                "zpool create rpool /dev/vdb1",
+                "zfs create -o mountpoint=legacy rpool/root",
+                # shared datasets cannot have legacy mountpoint
+                "zfs create rpool/shared_smb",
+                "bootctl set-default nixos-generation-1-specialisation-samba.conf",
+                "sync",
+            )
+            machine.crash()
+            machine.wait_for_unit("multi-user.target")
+            machine.succeed("zfs set sharesmb=on rpool/shared_smb")
+            machine.succeed(
+                "smbclient -gNL localhost | grep rpool_shared_smb",
+                "umount /tmp/mnt",
+                "zpool destroy rpool",
+            )
+
+        with subtest("encryption works"):
+            machine.succeed(
+                'echo password | zpool create -O mountpoint=legacy '
+                + "-O encryption=aes-256-gcm -O keyformat=passphrase automatic /dev/vdb1",
+                "zpool create -O mountpoint=legacy manual /dev/vdc1",
+                "echo otherpass | zfs create "
+                + "-o encryption=aes-256-gcm -o keyformat=passphrase manual/encrypted",
+                "zfs create -o encryption=aes-256-gcm -o keyformat=passphrase "
+                + "-o keylocation=http://localhost/zfskey manual/httpkey",
+                "bootctl set-default nixos-generation-1-specialisation-encryption.conf",
+                "sync",
+                "zpool export automatic",
+                "zpool export manual",
+            )
+            machine.crash()
+            machine.start()
+            machine.wait_for_console_text("Starting password query on")
+            machine.send_console("password\n")
+            machine.wait_for_unit("multi-user.target")
+            machine.succeed(
+                "zfs get -Ho value keystatus manual/encrypted | grep -Fx unavailable",
+                "echo otherpass | zfs load-key manual/encrypted",
+                "systemctl start manual-encrypted.mount",
+                "zfs load-key manual/httpkey",
+                "systemctl start manual-httpkey.mount",
+                "umount /automatic /manual/encrypted /manual/httpkey /manual",
+                "zpool destroy automatic",
+                "zpool destroy manual",
+            )
+
+        with subtest("boot.zfs.forceImportAll works"):
+            machine.succeed(
+                "rm /etc/hostid",
+                "zgenhostid deadcafe",
+                "zpool create forcepool /dev/vdb1 -O mountpoint=legacy",
+                "bootctl set-default nixos-generation-1-specialisation-forcepool.conf",
+                "rm /etc/hostid",
+                "sync",
+            )
+            machine.crash()
+            machine.wait_for_unit("multi-user.target")
+            machine.fail("zpool import forcepool")
+            machine.succeed(
+                "systemctl start forcepool.mount",
+                "mount | grep forcepool",
+            )
+      '' + extraTest;
+
+    };
+
+
+in {
+
+  stable = makeZfsTest "stable" { };
+
+  unstable = makeZfsTest "unstable" {
+    enableUnstable = true;
+  };
+
+  unstableWithSystemdStage1 = makeZfsTest "unstable" {
+    enableUnstable = true;
+    enableSystemdStage1 = true;
+  };
+
+  installer = (import ./installer.nix { }).zfsroot;
+
+  expand-partitions = makeTest {
+    name = "multi-disk-zfs";
+    nodes = {
+      machine = { pkgs, ... }: {
+        environment.systemPackages = [ pkgs.parted ];
+        boot.supportedFilesystems = [ "zfs" ];
+        networking.hostId = "00000000";
+
+        virtualisation = {
+          emptyDiskImages = [ 20480 20480 20480 20480 20480 20480 ];
+        };
+
+        specialisation.resize.configuration = {
+          services.zfs.expandOnBoot = [ "tank" ];
+        };
+      };
+    };
+
+    testScript = { nodes, ... }:
+      ''
+        start_all()
+        machine.wait_for_unit("default.target")
+        print(machine.succeed('mount'))
+
+        print(machine.succeed('parted --script /dev/vdb -- mklabel gpt'))
+        print(machine.succeed('parted --script /dev/vdb -- mkpart primary 1M 70M'))
+
+        print(machine.succeed('parted --script /dev/vdc -- mklabel gpt'))
+        print(machine.succeed('parted --script /dev/vdc -- mkpart primary 1M 70M'))
+
+        print(machine.succeed('zpool create tank mirror /dev/vdb1 /dev/vdc1 mirror /dev/vdd /dev/vde mirror /dev/vdf /dev/vdg'))
+        print(machine.succeed('zpool list -v'))
+        print(machine.succeed('mount'))
+        start_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())
+
+        print(machine.succeed("/run/current-system/specialisation/resize/bin/switch-to-configuration test >&2"))
+        machine.wait_for_unit("zpool-expand-pools.service")
+        machine.wait_for_unit("zpool-expand@tank.service")
+
+        print(machine.succeed('zpool list -v'))
+        new_size = int(machine.succeed('df -k --output=size /tank | tail -n1').strip())
+
+        if (new_size - start_size) > 20000000:
+          print("Disk grew appropriately.")
+        else:
+          print(f"Disk went from {start_size} to {new_size}, which doesn't seem right.")
+          exit(1)
+      '';
+  };
+}
diff --git a/nixpkgs/nixos/tests/zigbee2mqtt.nix b/nixpkgs/nixos/tests/zigbee2mqtt.nix
new file mode 100644
index 000000000000..1a40d175df83
--- /dev/null
+++ b/nixpkgs/nixos/tests/zigbee2mqtt.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ pkgs, lib, ... }:
+  {
+    name = "zigbee2mqtt";
+    nodes.machine = { pkgs, ... }:
+      {
+        services.zigbee2mqtt = {
+          enable = true;
+        };
+
+        systemd.services.zigbee2mqtt.serviceConfig.DevicePolicy = lib.mkForce "auto";
+      };
+
+    testScript = ''
+      machine.wait_for_unit("zigbee2mqtt.service")
+      machine.wait_until_fails("systemctl status zigbee2mqtt.service")
+      machine.succeed(
+          "journalctl -eu zigbee2mqtt | grep \"Error: Error while opening serialport 'Error: Error: No such file or directory, cannot open /dev/ttyACM0'\""
+      )
+
+      machine.log(machine.succeed("systemd-analyze security zigbee2mqtt.service"))
+    '';
+  }
+)
diff --git a/nixpkgs/nixos/tests/zoneminder.nix b/nixpkgs/nixos/tests/zoneminder.nix
new file mode 100644
index 000000000000..3c97bc8282d2
--- /dev/null
+++ b/nixpkgs/nixos/tests/zoneminder.nix
@@ -0,0 +1,23 @@
+import ./make-test-python.nix ({ lib, ...}:
+
+{
+  name = "zoneminder";
+  meta.maintainers = with lib.maintainers; [ danielfullmer ];
+
+  nodes.machine = { ... }:
+  {
+    services.zoneminder = {
+      enable = true;
+      database.createLocally = true;
+      database.username = "zoneminder";
+    };
+    time.timeZone = "America/New_York";
+  };
+
+  testScript = ''
+    machine.wait_for_unit("zoneminder.service")
+    machine.wait_for_unit("nginx.service")
+    machine.wait_for_open_port(8095)
+    machine.succeed("curl --fail http://localhost:8095/")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/zookeeper.nix b/nixpkgs/nixos/tests/zookeeper.nix
new file mode 100644
index 000000000000..0ee2673886a7
--- /dev/null
+++ b/nixpkgs/nixos/tests/zookeeper.nix
@@ -0,0 +1,46 @@
+import ./make-test-python.nix ({ pkgs, ...} :
+let
+
+  perlEnv = pkgs.perl.withPackages (p: [p.NetZooKeeper]);
+
+in {
+  name = "zookeeper";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ nequissimus ztzg ];
+  };
+
+  nodes = {
+    server = { ... }: {
+      services.zookeeper = {
+        enable = true;
+      };
+
+      networking.firewall.allowedTCPPorts = [ 2181 ];
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    server.wait_for_unit("zookeeper")
+    server.wait_for_unit("network.target")
+    server.wait_for_open_port(2181)
+
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 create /foo bar"
+    )
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 set /foo hello"
+    )
+    server.wait_until_succeeds(
+        "${pkgs.zookeeper}/bin/zkCli.sh -server localhost:2181 get /foo | grep hello"
+    )
+
+    server.wait_until_succeeds(
+        "${perlEnv}/bin/perl -E 'use Net::ZooKeeper qw(:acls); $z=Net::ZooKeeper->new(q(localhost:2181)); $z->create(qw(/perl foo acl), ZOO_OPEN_ACL_UNSAFE) || die $z->get_error()'"
+    )
+    server.wait_until_succeeds(
+        "${perlEnv}/bin/perl -E 'use Net::ZooKeeper qw(:acls); $z=Net::ZooKeeper->new(q(localhost:2181)); $z->get(qw(/perl)) eq qw(foo) || die $z->get_error()'"
+    )
+  '';
+})
diff --git a/nixpkgs/nixos/tests/zram-generator.nix b/nixpkgs/nixos/tests/zram-generator.nix
new file mode 100644
index 000000000000..2be7bd2e05b1
--- /dev/null
+++ b/nixpkgs/nixos/tests/zram-generator.nix
@@ -0,0 +1,42 @@
+import ./make-test-python.nix {
+  name = "zram-generator";
+
+  nodes = {
+    single = { ... }: {
+      virtualisation = {
+        emptyDiskImages = [ 512 ];
+      };
+      zramSwap = {
+        enable = true;
+        priority = 10;
+        algorithm = "lz4";
+        swapDevices = 1;
+        memoryPercent = 30;
+        memoryMax = 10 * 1024 * 1024;
+        writebackDevice = "/dev/vdb";
+      };
+    };
+    machine = { ... }: {
+      zramSwap = {
+        enable = true;
+        priority = 10;
+        algorithm = "lz4";
+        swapDevices = 2;
+        memoryPercent = 30;
+        memoryMax = 10 * 1024 * 1024;
+      };
+    };
+  };
+
+  testScript = ''
+    single.wait_for_unit("systemd-zram-setup@zram0.service")
+
+    machine.wait_for_unit("systemd-zram-setup@zram0.service")
+    machine.wait_for_unit("systemd-zram-setup@zram1.service")
+    zram = machine.succeed("zramctl --noheadings --raw")
+    swap = machine.succeed("swapon --show --noheadings")
+    for i in range(2):
+        assert f"/dev/zram{i} lz4 10M" in zram
+        assert f"/dev/zram{i} partition  10M" in swap
+  '';
+}
diff --git a/nixpkgs/nixos/tests/zrepl.nix b/nixpkgs/nixos/tests/zrepl.nix
new file mode 100644
index 000000000000..b16c7eddc7ae
--- /dev/null
+++ b/nixpkgs/nixos/tests/zrepl.nix
@@ -0,0 +1,68 @@
+import ./make-test-python.nix (
+  {
+    name = "zrepl";
+
+    nodes.host = {config, pkgs, ...}: {
+      config = {
+        # Prerequisites for ZFS and tests.
+        boot.supportedFilesystems = [ "zfs" ];
+        environment.systemPackages = [ pkgs.zrepl ];
+        networking.hostId = "deadbeef";
+        services.zrepl = {
+          enable = true;
+          settings = {
+            # Enable Prometheus output for status assertions.
+            global.monitoring = [{
+              type = "prometheus";
+              listen = ":9811";
+            }];
+            # Create a periodic snapshot job for an ephemeral zpool.
+            jobs = [{
+              name = "snap_test";
+              type = "snap";
+
+              filesystems."test" = true;
+              snapshotting = {
+                type = "periodic";
+                prefix = "zrepl_";
+                interval = "1s";
+              };
+
+              pruning.keep = [{
+                type = "last_n";
+                count = 8;
+              }];
+            }];
+          };
+        };
+      };
+    };
+
+    testScript = ''
+      start_all()
+
+      with subtest("Wait for zrepl and network ready"):
+          host.wait_for_unit("network-online.target")
+          host.wait_for_unit("zrepl.service")
+
+      with subtest("Create test zpool"):
+          # ZFS requires 64MiB minimum pool size.
+          host.succeed("fallocate -l 64MiB /root/zpool.img")
+          host.succeed("zpool create test /root/zpool.img")
+
+      with subtest("Check for completed zrepl snapshot"):
+          # zrepl periodic snapshot job creates a snapshot with this prefix.
+          host.wait_until_succeeds("zfs list -t snapshot | grep -q zrepl_")
+
+      with subtest("Verify HTTP monitoring server is configured"):
+          out = host.succeed("curl -f localhost:9811/metrics")
+
+          assert (
+              "zrepl_start_time" in out
+          ), "zrepl start time metric was not found in Prometheus output"
+
+          assert (
+              "zrepl_zfs_snapshot_duration_count{filesystem=\"test\"}" in out
+          ), "zrepl snapshot counter for test was not found in Prometheus output"
+    '';
+  })
diff --git a/nixpkgs/nixos/tests/zsh-history.nix b/nixpkgs/nixos/tests/zsh-history.nix
new file mode 100644
index 000000000000..64f32a07e215
--- /dev/null
+++ b/nixpkgs/nixos/tests/zsh-history.nix
@@ -0,0 +1,35 @@
+import ./make-test-python.nix ({ pkgs, ...} : {
+  name = "zsh-history";
+  meta = with pkgs.lib.maintainers; {
+    maintainers = [ ];
+  };
+
+  nodes.default = { ... }: {
+    programs = {
+      zsh.enable = true;
+    };
+    environment.systemPackages = [ pkgs.zsh-history ];
+    programs.zsh.interactiveShellInit = ''
+      source ${pkgs.zsh-history.out}/share/zsh/init.zsh
+    '';
+    users.users.root.shell = "${pkgs.zsh}/bin/zsh";
+  };
+
+  testScript = ''
+    start_all()
+    default.wait_for_unit("multi-user.target")
+    default.wait_until_succeeds("pgrep -f 'agetty.*tty1'")
+
+    # Login
+    default.wait_until_tty_matches("1", "login: ")
+    default.send_chars("root\n")
+    default.wait_until_tty_matches("1", r"\nroot@default\b")
+
+    # Generate some history
+    default.send_chars("echo foobar\n")
+    default.wait_until_tty_matches("1", "foobar")
+
+    # Ensure that command was recorded in history
+    default.succeed("/run/current-system/sw/bin/history list | grep -q foobar")
+  '';
+})
diff --git a/nixpkgs/nixos/tests/zwave-js.nix b/nixpkgs/nixos/tests/zwave-js.nix
new file mode 100644
index 000000000000..9239e6964fd7
--- /dev/null
+++ b/nixpkgs/nixos/tests/zwave-js.nix
@@ -0,0 +1,31 @@
+import ./make-test-python.nix ({ pkgs, lib, ...} :
+
+let
+  secretsConfigFile = pkgs.writeText "secrets.json" (builtins.toJSON {
+    securityKeys = {
+      "S0_Legacy" = "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA";
+    };
+  });
+in {
+  name = "zwave-js";
+  meta.maintainers = with lib.maintainers; [ graham33 ];
+
+  nodes = {
+    machine = { config, ... }: {
+      services.zwave-js = {
+        enable = true;
+        serialPort = "/dev/null";
+        extraFlags = ["--mock-driver"];
+        inherit secretsConfigFile;
+      };
+    };
+  };
+
+  testScript = ''
+    start_all()
+
+    machine.wait_for_unit("zwave-js.service")
+    machine.wait_for_open_port(3000)
+    machine.wait_until_succeeds("journalctl --since -1m --unit zwave-js --grep 'ZwaveJS server listening'")
+  '';
+})